summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-03-12 09:01:49 +0100
committerMichaël Zasso <targos@protonmail.com>2019-03-14 18:49:21 +0100
commit7b48713334469818661fe276cf571de9c7899f2d (patch)
tree4dbda49ac88db76ce09dc330a0cb587e68e139ba
parent8549ac09b256666cf5275224ec58fab9939ff32e (diff)
downloadandroid-node-v8-7b48713334469818661fe276cf571de9c7899f2d.tar.gz
android-node-v8-7b48713334469818661fe276cf571de9c7899f2d.tar.bz2
android-node-v8-7b48713334469818661fe276cf571de9c7899f2d.zip
deps: update V8 to 7.3.492.25
PR-URL: https://github.com/nodejs/node/pull/25852 Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
-rw-r--r--deps/v8/.clang-tidy6
-rw-r--r--deps/v8/.gitignore3
-rw-r--r--deps/v8/.vpython23
-rw-r--r--deps/v8/AUTHORS11
-rw-r--r--deps/v8/BUILD.gn877
-rw-r--r--deps/v8/ChangeLog5080
-rw-r--r--deps/v8/DEPS80
-rw-r--r--deps/v8/OWNERS3
-rw-r--r--deps/v8/PRESUBMIT.py73
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h12
-rw-r--r--deps/v8/benchmarks/base.js1
-rw-r--r--deps/v8/benchmarks/micro/slice-perf.js83
-rw-r--r--deps/v8/benchmarks/spinning-balls/v.js1
-rw-r--r--deps/v8/gni/v8.gni2
-rw-r--r--deps/v8/include/OWNERS1
-rw-r--r--deps/v8/include/PRESUBMIT.py29
-rw-r--r--deps/v8/include/libplatform/libplatform.h2
-rw-r--r--deps/v8/include/v8-inspector.h3
-rw-r--r--deps/v8/include/v8-internal.h231
-rw-r--r--deps/v8/include/v8-platform.h38
-rw-r--r--deps/v8/include/v8-profiler.h32
-rw-r--r--deps/v8/include/v8-util.h32
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8-wasm-trap-handler-posix.h31
-rw-r--r--deps/v8/include/v8-wasm-trap-handler-win.h28
-rw-r--r--deps/v8/include/v8.h805
-rw-r--r--deps/v8/include/v8config.h77
-rw-r--r--deps/v8/infra/config/cq.cfg46
-rw-r--r--deps/v8/infra/mb/mb_config.pyl92
-rw-r--r--deps/v8/infra/testing/builders.pyl249
-rw-r--r--deps/v8/snapshot_toolchain.gni11
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/OWNERS1
-rw-r--r--deps/v8/src/PRESUBMIT.py29
-rw-r--r--deps/v8/src/accessors.cc23
-rw-r--r--deps/v8/src/address-map.cc20
-rw-r--r--deps/v8/src/address-map.h14
-rw-r--r--deps/v8/src/allocation-site-scopes-inl.h11
-rw-r--r--deps/v8/src/allocation-site-scopes.h12
-rw-r--r--deps/v8/src/allocation.cc70
-rw-r--r--deps/v8/src/allocation.h11
-rw-r--r--deps/v8/src/api-arguments-inl.h32
-rw-r--r--deps/v8/src/api-arguments.cc52
-rw-r--r--deps/v8/src/api-arguments.h46
-rw-r--r--deps/v8/src/api-inl.h39
-rw-r--r--deps/v8/src/api-natives.cc75
-rw-r--r--deps/v8/src/api-natives.h1
-rw-r--r--deps/v8/src/api.cc1726
-rw-r--r--deps/v8/src/api.h228
-rw-r--r--deps/v8/src/arguments-inl.h4
-rw-r--r--deps/v8/src/arguments.cc1
-rw-r--r--deps/v8/src/arguments.h69
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h35
-rw-r--r--deps/v8/src/arm/assembler-arm.cc250
-rw-r--r--deps/v8/src/arm/assembler-arm.h376
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc594
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h31
-rw-r--r--deps/v8/src/arm/codegen-arm.cc303
-rw-r--r--deps/v8/src/arm/constants-arm.h18
-rw-r--r--deps/v8/src/arm/cpu-arm.cc3
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc95
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/frame-constants-arm.cc9
-rw-r--r--deps/v8/src/arm/frame-constants-arm.h6
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc21
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc429
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h119
-rw-r--r--deps/v8/src/arm/register-arm.h369
-rw-r--r--deps/v8/src/arm/simulator-arm.cc129
-rw-r--r--deps/v8/src/arm/simulator-arm.h22
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h46
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc169
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h722
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc623
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h28
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc25
-rw-r--r--deps/v8/src/arm64/constants-arm64.h190
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc8
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc79
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc6
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h2
-rw-r--r--deps/v8/src/arm64/instructions-arm64-constants.cc9
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc291
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h55
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc22
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h8
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc396
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h109
-rw-r--r--deps/v8/src/arm64/register-arm64.cc298
-rw-r--r--deps/v8/src/arm64/register-arm64.h752
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc157
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h22
-rw-r--r--deps/v8/src/arm64/simulator-logic-arm64.cc11
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc2
-rw-r--r--deps/v8/src/arm64/utils-arm64.h2
-rw-r--r--deps/v8/src/asmjs/OWNERS1
-rw-r--r--deps/v8/src/asmjs/asm-js.cc90
-rw-r--r--deps/v8/src/asmjs/asm-js.h3
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc31
-rw-r--r--deps/v8/src/asmjs/asm-parser.h3
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc15
-rw-r--r--deps/v8/src/asmjs/asm-types.cc19
-rw-r--r--deps/v8/src/asmjs/asm-types.h6
-rw-r--r--deps/v8/src/asmjs/switch-logic.cc65
-rw-r--r--deps/v8/src/asmjs/switch-logic.h33
-rw-r--r--deps/v8/src/assembler-arch-inl.h30
-rw-r--r--deps/v8/src/assembler.cc321
-rw-r--r--deps/v8/src/assembler.h373
-rw-r--r--deps/v8/src/assert-scope.cc59
-rw-r--r--deps/v8/src/assert-scope.h39
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h46
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc13
-rw-r--r--deps/v8/src/ast/ast-value-factory.h23
-rw-r--r--deps/v8/src/ast/ast.cc57
-rw-r--r--deps/v8/src/ast/ast.h823
-rw-r--r--deps/v8/src/ast/context-slot-cache.cc84
-rw-r--r--deps/v8/src/ast/context-slot-cache.h112
-rw-r--r--deps/v8/src/ast/prettyprinter.cc155
-rw-r--r--deps/v8/src/ast/prettyprinter.h14
-rw-r--r--deps/v8/src/ast/scopes-inl.h66
-rw-r--r--deps/v8/src/ast/scopes.cc1081
-rw-r--r--deps/v8/src/ast/scopes.h465
-rw-r--r--deps/v8/src/ast/source-range-ast-visitor.cc15
-rw-r--r--deps/v8/src/ast/variables.h12
-rw-r--r--deps/v8/src/async-hooks-wrapper.cc5
-rw-r--r--deps/v8/src/bailout-reason.h169
-rw-r--r--deps/v8/src/base/adapters.h2
-rw-r--r--deps/v8/src/base/atomic-utils.h317
-rw-r--r--deps/v8/src/base/atomicops.h5
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h15
-rw-r--r--deps/v8/src/base/atomicops_internals_std.h18
-rw-r--r--deps/v8/src/base/bits.cc2
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc26
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h12
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/compiler-specific.h8
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc16
-rw-r--r--deps/v8/src/base/enum-set.h65
-rw-r--r--deps/v8/src/base/functional.h47
-rw-r--r--deps/v8/src/base/ieee754.cc72
-rw-r--r--deps/v8/src/base/lazy-instance.h41
-rw-r--r--deps/v8/src/base/macros.h12
-rw-r--r--deps/v8/src/base/optional.h848
-rw-r--r--deps/v8/src/base/overflowing-math.h89
-rw-r--r--deps/v8/src/base/page-allocator.cc4
-rw-r--r--deps/v8/src/base/page-allocator.h2
-rw-r--r--deps/v8/src/base/platform/OWNERS1
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc2
-rw-r--r--deps/v8/src/base/platform/condition-variable.h3
-rw-r--r--deps/v8/src/base/platform/mutex.h5
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc20
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc27
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc13
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc79
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc56
-rw-r--r--deps/v8/src/base/platform/platform.h13
-rw-r--r--deps/v8/src/base/platform/time.cc63
-rw-r--r--deps/v8/src/base/platform/time.h67
-rw-r--r--deps/v8/src/base/region-allocator.cc10
-rw-r--r--deps/v8/src/base/region-allocator.h3
-rw-r--r--deps/v8/src/base/small-vector.h146
-rw-r--r--deps/v8/src/base/threaded-list.h34
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc7
-rw-r--r--deps/v8/src/basic-block-profiler.cc11
-rw-r--r--deps/v8/src/bit-vector.h4
-rw-r--r--deps/v8/src/bootstrapper.cc1951
-rw-r--r--deps/v8/src/bootstrapper.h23
-rw-r--r--deps/v8/src/builtins/arguments.tq44
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc889
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc923
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq14
-rw-r--r--deps/v8/src/builtins/array-filter.tq244
-rw-r--r--deps/v8/src/builtins/array-foreach.tq183
-rw-r--r--deps/v8/src/builtins/array-join.tq660
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq29
-rw-r--r--deps/v8/src/builtins/array-of.tq54
-rw-r--r--deps/v8/src/builtins/array-reverse.tq63
-rw-r--r--deps/v8/src/builtins/array-slice.tq212
-rw-r--r--deps/v8/src/builtins/array-splice.tq61
-rw-r--r--deps/v8/src/builtins/array-unshift.tq26
-rw-r--r--deps/v8/src/builtins/array.tq52
-rw-r--r--deps/v8/src/builtins/base.tq1017
-rw-r--r--deps/v8/src/builtins/builtins-api.cc87
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc149
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc955
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h24
-rw-r--r--deps/v8/src/builtins/builtins-array.cc97
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc21
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc282
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc300
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc46
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc6
-rw-r--r--deps/v8/src/builtins/builtins-boolean-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc77
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc18
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc712
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.h26
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc1
-rw-r--r--deps/v8/src/builtins/builtins-console-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-console.cc2
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc172
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h7
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h24
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-date.cc55
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h310
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h8
-rw-r--r--deps/v8/src/builtins/builtins-error.cc13
-rw-r--r--deps/v8/src/builtins/builtins-extras-utils.cc93
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc27
-rw-r--r--deps/v8/src/builtins/builtins-function.cc2
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc389
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc522
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc49
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc678
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc111
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h26
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc66
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc545
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc106
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-object.cc18
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc350
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h66
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc3
-rw-r--r--deps/v8/src/builtins/builtins-promise.h75
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc425
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h44
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc6
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc203
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-string.cc30
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc2
-rw-r--r--deps/v8/src/builtins/builtins-test-gen.h22
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc393
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h53
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc11
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h42
-rw-r--r--deps/v8/src/builtins/builtins-utils.h54
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc169
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc169
-rw-r--r--deps/v8/src/builtins/builtins.cc353
-rw-r--r--deps/v8/src/builtins/builtins.h102
-rw-r--r--deps/v8/src/builtins/collections.tq57
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc29
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h2
-rw-r--r--deps/v8/src/builtins/data-view.tq95
-rw-r--r--deps/v8/src/builtins/extras-utils.tq24
-rw-r--r--deps/v8/src/builtins/frames.tq150
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc5
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc1308
-rw-r--r--deps/v8/src/builtins/iterator.tq44
-rw-r--r--deps/v8/src/builtins/mips/OWNERS3
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc1343
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS5
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc864
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq69
-rw-r--r--deps/v8/src/builtins/object.tq12
-rw-r--r--deps/v8/src/builtins/ppc/OWNERS5
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc909
-rw-r--r--deps/v8/src/builtins/s390/OWNERS5
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc927
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc141
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq73
-rw-r--r--deps/v8/src/builtins/typed-array.tq34
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc1324
-rw-r--r--deps/v8/src/cancelable-task.cc56
-rw-r--r--deps/v8/src/cancelable-task.h145
-rw-r--r--deps/v8/src/char-predicates-inl.h97
-rw-r--r--deps/v8/src/char-predicates.cc6
-rw-r--r--deps/v8/src/char-predicates.h71
-rw-r--r--deps/v8/src/code-comments.cc102
-rw-r--r--deps/v8/src/code-comments.h68
-rw-r--r--deps/v8/src/code-events.h70
-rw-r--r--deps/v8/src/code-factory.cc323
-rw-r--r--deps/v8/src/code-factory.h23
-rw-r--r--deps/v8/src/code-reference.cc123
-rw-r--r--deps/v8/src/code-reference.h17
-rw-r--r--deps/v8/src/code-stub-assembler.cc2210
-rw-r--r--deps/v8/src/code-stub-assembler.h797
-rw-r--r--deps/v8/src/code-stubs-utils.h49
-rw-r--r--deps/v8/src/code-stubs.cc466
-rw-r--r--deps/v8/src/code-stubs.h670
-rw-r--r--deps/v8/src/codegen.cc32
-rw-r--r--deps/v8/src/codegen.h25
-rw-r--r--deps/v8/src/compilation-cache.cc20
-rw-r--r--deps/v8/src/compilation-cache.h4
-rw-r--r--deps/v8/src/compilation-statistics.cc6
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc19
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h80
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc127
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h83
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc464
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h89
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc34
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h8
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc91
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h71
-rw-r--r--deps/v8/src/compiler.cc316
-rw-r--r--deps/v8/src/compiler.h14
-rw-r--r--deps/v8/src/compiler/OWNERS8
-rw-r--r--deps/v8/src/compiler/access-builder.cc136
-rw-r--r--deps/v8/src/compiler/access-builder.h13
-rw-r--r--deps/v8/src/compiler/access-info.cc51
-rw-r--r--deps/v8/src/compiler/access-info.h9
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h48
-rw-r--r--deps/v8/src/compiler/allocation-builder.h22
-rw-r--r--deps/v8/src/compiler/backend/OWNERS6
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc (renamed from deps/v8/src/compiler/arm/code-generator-arm.cc)117
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h (renamed from deps/v8/src/compiler/arm/instruction-codes-arm.h)6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc (renamed from deps/v8/src/compiler/arm/instruction-scheduler-arm.cc)6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc (renamed from deps/v8/src/compiler/arm/instruction-selector-arm.cc)65
-rw-r--r--deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.cc (renamed from deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc)29
-rw-r--r--deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h (renamed from deps/v8/src/compiler/arm/unwinding-info-writer-arm.h)7
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc (renamed from deps/v8/src/compiler/arm64/code-generator-arm64.cc)116
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h (renamed from deps/v8/src/compiler/arm64/instruction-codes-arm64.h)6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc (renamed from deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc)6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc (renamed from deps/v8/src/compiler/arm64/instruction-selector-arm64.cc)74
-rw-r--r--deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc (renamed from deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc)31
-rw-r--r--deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h (renamed from deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h)7
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h (renamed from deps/v8/src/compiler/code-generator-impl.h)11
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc (renamed from deps/v8/src/compiler/code-generator.cc)243
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h (renamed from deps/v8/src/compiler/code-generator.h)26
-rw-r--r--deps/v8/src/compiler/backend/frame-elider.cc (renamed from deps/v8/src/compiler/frame-elider.cc)11
-rw-r--r--deps/v8/src/compiler/backend/frame-elider.h (renamed from deps/v8/src/compiler/frame-elider.h)10
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.cc (renamed from deps/v8/src/compiler/gap-resolver.cc)54
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.h (renamed from deps/v8/src/compiler/gap-resolver.h)8
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc (renamed from deps/v8/src/compiler/ia32/code-generator-ia32.cc)476
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h (renamed from deps/v8/src/compiler/ia32/instruction-codes-ia32.h)6
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc (renamed from deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc)10
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc (renamed from deps/v8/src/compiler/ia32/instruction-selector-ia32.cc)144
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h (renamed from deps/v8/src/compiler/instruction-codes.h)28
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc (renamed from deps/v8/src/compiler/instruction-scheduler.cc)29
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.h (renamed from deps/v8/src/compiler/instruction-scheduler.h)30
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h (renamed from deps/v8/src/compiler/instruction-selector-impl.h)12
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc (renamed from deps/v8/src/compiler/instruction-selector.cc)135
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h (renamed from deps/v8/src/compiler/instruction-selector.h)16
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc (renamed from deps/v8/src/compiler/instruction.cc)284
-rw-r--r--deps/v8/src/compiler/backend/instruction.h (renamed from deps/v8/src/compiler/instruction.h)157
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc (renamed from deps/v8/src/compiler/jump-threading.cc)21
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h (renamed from deps/v8/src/compiler/jump-threading.h)8
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc (renamed from deps/v8/src/compiler/live-range-separator.cc)77
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.h (renamed from deps/v8/src/compiler/live-range-separator.h)9
-rw-r--r--deps/v8/src/compiler/backend/mips/OWNERS3
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc (renamed from deps/v8/src/compiler/mips/code-generator-mips.cc)332
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h (renamed from deps/v8/src/compiler/mips/instruction-codes-mips.h)7
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc (renamed from deps/v8/src/compiler/mips/instruction-scheduler-mips.cc)9
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc (renamed from deps/v8/src/compiler/mips/instruction-selector-mips.cc)202
-rw-r--r--deps/v8/src/compiler/backend/mips64/OWNERS3
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc (renamed from deps/v8/src/compiler/mips64/code-generator-mips64.cc)248
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h (renamed from deps/v8/src/compiler/mips64/instruction-codes-mips64.h)7
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc (renamed from deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc)8
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc (renamed from deps/v8/src/compiler/mips64/instruction-selector-mips64.cc)236
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.cc (renamed from deps/v8/src/compiler/move-optimizer.cc)9
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.h (renamed from deps/v8/src/compiler/move-optimizer.h)8
-rw-r--r--deps/v8/src/compiler/backend/ppc/OWNERS4
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc (renamed from deps/v8/src/compiler/ppc/code-generator-ppc.cc)457
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h208
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc (renamed from deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc)98
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc (renamed from deps/v8/src/compiler/ppc/instruction-selector-ppc.cc)449
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc (renamed from deps/v8/src/compiler/register-allocator-verifier.cc)11
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.h (renamed from deps/v8/src/compiler/register-allocator-verifier.h)8
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc (renamed from deps/v8/src/compiler/register-allocator.cc)723
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h (renamed from deps/v8/src/compiler/register-allocator.h)154
-rw-r--r--deps/v8/src/compiler/backend/s390/OWNERS4
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc (renamed from deps/v8/src/compiler/s390/code-generator-s390.cc)1313
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h217
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc (renamed from deps/v8/src/compiler/s390/instruction-scheduler-s390.cc)12
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc (renamed from deps/v8/src/compiler/s390/instruction-selector-s390.cc)179
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h (renamed from deps/v8/src/compiler/unwinding-info-writer.h)14
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc (renamed from deps/v8/src/compiler/x64/code-generator-x64.cc)595
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h (renamed from deps/v8/src/compiler/x64/instruction-codes-x64.h)43
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc (renamed from deps/v8/src/compiler/x64/instruction-scheduler-x64.cc)43
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc (renamed from deps/v8/src/compiler/x64/instruction-selector-x64.cc)625
-rw-r--r--deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.cc (renamed from deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc)37
-rw-r--r--deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h (renamed from deps/v8/src/compiler/x64/unwinding-info-writer-x64.h)7
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc1
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc48
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/code-assembler.cc306
-rw-r--r--deps/v8/src/compiler/code-assembler.h219
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc2
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc16
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h7
-rw-r--r--deps/v8/src/compiler/common-operator.cc51
-rw-r--r--deps/v8/src/compiler/common-operator.h12
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc187
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h6
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc8
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h6
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc9
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc250
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h9
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc8
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc8
-rw-r--r--deps/v8/src/compiler/escape-analysis.h6
-rw-r--r--deps/v8/src/compiler/frame-states.cc9
-rw-r--r--deps/v8/src/compiler/frame-states.h3
-rw-r--r--deps/v8/src/compiler/frame.cc4
-rw-r--r--deps/v8/src/compiler/frame.h19
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc6
-rw-r--r--deps/v8/src/compiler/graph-assembler.h3
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc146
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h5
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc9
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1358
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h43
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc4
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h14
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc150
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h10
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc9
-rw-r--r--deps/v8/src/compiler/js-graph.cc15
-rw-r--r--deps/v8/src/compiler/js-graph.h1
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc697
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h222
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc37
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc62
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h6
-rw-r--r--deps/v8/src/compiler/js-inlining.cc51
-rw-r--r--deps/v8/src/compiler/js-inlining.h7
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc74
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h10
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc649
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h21
-rw-r--r--deps/v8/src/compiler/js-operator.cc128
-rw-r--r--deps/v8/src/compiler/js-operator.h7
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc1
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc231
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h15
-rw-r--r--deps/v8/src/compiler/linkage.cc39
-rw-r--r--deps/v8/src/compiler/linkage.h29
-rw-r--r--deps/v8/src/compiler/load-elimination.cc18
-rw-r--r--deps/v8/src/compiler/machine-graph.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc67
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc11
-rw-r--r--deps/v8/src/compiler/machine-operator.h23
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc46
-rw-r--r--deps/v8/src/compiler/mips/OWNERS2
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS2
-rw-r--r--deps/v8/src/compiler/node-matchers.h10
-rw-r--r--deps/v8/src/compiler/node-origin-table.h2
-rw-r--r--deps/v8/src/compiler/node-properties.cc82
-rw-r--r--deps/v8/src/compiler/node-properties.h11
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h54
-rw-r--r--deps/v8/src/compiler/operation-typer.cc228
-rw-r--r--deps/v8/src/compiler/operation-typer.h7
-rw-r--r--deps/v8/src/compiler/operator-properties.cc7
-rw-r--r--deps/v8/src/compiler/osr.cc10
-rw-r--r--deps/v8/src/compiler/pipeline.cc761
-rw-r--r--deps/v8/src/compiler/pipeline.h29
-rw-r--r--deps/v8/src/compiler/ppc/OWNERS7
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h188
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc38
-rw-r--r--deps/v8/src/compiler/property-access-builder.h10
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc363
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h90
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc23
-rw-r--r--deps/v8/src/compiler/representation-change.cc102
-rw-r--r--deps/v8/src/compiler/representation-change.h14
-rw-r--r--deps/v8/src/compiler/s390/OWNERS7
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h225
-rw-r--r--deps/v8/src/compiler/schedule.cc25
-rw-r--r--deps/v8/src/compiler/scheduler.cc28
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc521
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h183
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc341
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h7
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc17
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h7
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc109
-rw-r--r--deps/v8/src/compiler/simplified-operator.h11
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc2
-rw-r--r--deps/v8/src/compiler/type-cache.cc9
-rw-r--r--deps/v8/src/compiler/type-cache.h9
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc6
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h3
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc208
-rw-r--r--deps/v8/src/compiler/typed-optimization.h16
-rw-r--r--deps/v8/src/compiler/typer.cc186
-rw-r--r--deps/v8/src/compiler/typer.h8
-rw-r--r--deps/v8/src/compiler/types.cc30
-rw-r--r--deps/v8/src/compiler/types.h10
-rw-r--r--deps/v8/src/compiler/verifier.cc46
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1543
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h149
-rw-r--r--deps/v8/src/constant-pool.cc214
-rw-r--r--deps/v8/src/constant-pool.h159
-rw-r--r--deps/v8/src/constants-arch.h28
-rw-r--r--deps/v8/src/contexts-inl.h109
-rw-r--r--deps/v8/src/contexts.cc147
-rw-r--r--deps/v8/src/contexts.h384
-rw-r--r--deps/v8/src/conversions-inl.h27
-rw-r--r--deps/v8/src/conversions.cc126
-rw-r--r--deps/v8/src/conversions.h40
-rw-r--r--deps/v8/src/counters-inl.h2
-rw-r--r--deps/v8/src/counters.cc27
-rw-r--r--deps/v8/src/counters.h172
-rw-r--r--deps/v8/src/cpu-features.h124
-rw-r--r--deps/v8/src/d8-js.cc (renamed from deps/v8/src/d8.js)11
-rw-r--r--deps/v8/src/d8-platforms.cc309
-rw-r--r--deps/v8/src/d8-platforms.h29
-rw-r--r--deps/v8/src/d8.cc265
-rw-r--r--deps/v8/src/d8.h8
-rw-r--r--deps/v8/src/date.cc18
-rw-r--r--deps/v8/src/date.h5
-rw-r--r--deps/v8/src/dateparser-inl.h10
-rw-r--r--deps/v8/src/dateparser.cc8
-rw-r--r--deps/v8/src/dateparser.h17
-rw-r--r--deps/v8/src/debug/debug-coverage.cc57
-rw-r--r--deps/v8/src/debug/debug-coverage.h1
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc217
-rw-r--r--deps/v8/src/debug/debug-evaluate.h4
-rw-r--r--deps/v8/src/debug/debug-interface.h48
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc213
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h62
-rw-r--r--deps/v8/src/debug/debug-scopes.cc50
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc2
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc14
-rw-r--r--deps/v8/src/debug/debug-type-profile.h1
-rw-r--r--deps/v8/src/debug/debug.cc164
-rw-r--r--deps/v8/src/debug/debug.h19
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/debug/liveedit.cc60
-rw-r--r--deps/v8/src/debug/mips/OWNERS5
-rw-r--r--deps/v8/src/debug/mips64/OWNERS5
-rw-r--r--deps/v8/src/debug/ppc/OWNERS5
-rw-r--r--deps/v8/src/debug/s390/OWNERS5
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc8
-rw-r--r--deps/v8/src/deoptimize-reason.h4
-rw-r--r--deps/v8/src/deoptimizer.cc538
-rw-r--r--deps/v8/src/deoptimizer.h166
-rw-r--r--deps/v8/src/detachable-vector.cc19
-rw-r--r--deps/v8/src/detachable-vector.h109
-rw-r--r--deps/v8/src/disasm.h4
-rw-r--r--deps/v8/src/disassembler.cc136
-rw-r--r--deps/v8/src/eh-frame.cc2
-rw-r--r--deps/v8/src/eh-frame.h3
-rw-r--r--deps/v8/src/elements-inl.h2
-rw-r--r--deps/v8/src/elements-kind.cc52
-rw-r--r--deps/v8/src/elements-kind.h15
-rw-r--r--deps/v8/src/elements.cc811
-rw-r--r--deps/v8/src/elements.h64
-rw-r--r--deps/v8/src/execution.cc371
-rw-r--r--deps/v8/src/execution.h17
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc14
-rw-r--r--deps/v8/src/external-reference-table.cc160
-rw-r--r--deps/v8/src/external-reference-table.h46
-rw-r--r--deps/v8/src/external-reference.cc746
-rw-r--r--deps/v8/src/external-reference.h42
-rw-r--r--deps/v8/src/feedback-vector-inl.h94
-rw-r--r--deps/v8/src/feedback-vector.cc248
-rw-r--r--deps/v8/src/feedback-vector.h114
-rw-r--r--deps/v8/src/field-index-inl.h19
-rw-r--r--deps/v8/src/field-index.h19
-rw-r--r--deps/v8/src/field-type.cc45
-rw-r--r--deps/v8/src/field-type.h40
-rw-r--r--deps/v8/src/fixed-dtoa.cc2
-rw-r--r--deps/v8/src/flag-definitions.h259
-rw-r--r--deps/v8/src/flags.cc62
-rw-r--r--deps/v8/src/flags.h1
-rw-r--r--deps/v8/src/frame-constants.h92
-rw-r--r--deps/v8/src/frames-inl.h77
-rw-r--r--deps/v8/src/frames.cc477
-rw-r--r--deps/v8/src/frames.h169
-rw-r--r--deps/v8/src/futex-emulation.cc104
-rw-r--r--deps/v8/src/futex-emulation.h26
-rw-r--r--deps/v8/src/gdb-jit.cc40
-rw-r--r--deps/v8/src/global-handles.cc830
-rw-r--r--deps/v8/src/global-handles.h197
-rw-r--r--deps/v8/src/globals.h450
-rw-r--r--deps/v8/src/handler-table.cc6
-rw-r--r--deps/v8/src/handler-table.h8
-rw-r--r--deps/v8/src/handles-inl.h57
-rw-r--r--deps/v8/src/handles.cc42
-rw-r--r--deps/v8/src/handles.h78
-rw-r--r--deps/v8/src/heap-symbols.h81
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc8
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h24
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc41
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h24
-rw-r--r--deps/v8/src/heap/barrier.h26
-rw-r--r--deps/v8/src/heap/code-stats.cc76
-rw-r--r--deps/v8/src/heap/code-stats.h11
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc592
-rw-r--r--deps/v8/src/heap/concurrent-marking.h29
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc68
-rw-r--r--deps/v8/src/heap/embedder-tracing.h78
-rw-r--r--deps/v8/src/heap/factory-inl.h9
-rw-r--r--deps/v8/src/heap/factory.cc940
-rw-r--r--deps/v8/src/heap/factory.h143
-rw-r--r--deps/v8/src/heap/gc-tracer.cc94
-rw-r--r--deps/v8/src/heap/gc-tracer.h10
-rw-r--r--deps/v8/src/heap/heap-inl.h290
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h104
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h32
-rw-r--r--deps/v8/src/heap/heap.cc1507
-rw-r--r--deps/v8/src/heap/heap.h651
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h22
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc47
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h23
-rw-r--r--deps/v8/src/heap/incremental-marking.cc491
-rw-r--r--deps/v8/src/heap/incremental-marking.h56
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h4
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc1
-rw-r--r--deps/v8/src/heap/invalidated-slots.h7
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc2
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h6
-rw-r--r--deps/v8/src/heap/local-allocator.h6
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h433
-rw-r--r--deps/v8/src/heap/mark-compact.cc1599
-rw-r--r--deps/v8/src/heap/mark-compact.h332
-rw-r--r--deps/v8/src/heap/marking.h8
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc345
-rw-r--r--deps/v8/src/heap/object-stats.h3
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h106
-rw-r--r--deps/v8/src/heap/objects-visiting.cc80
-rw-r--r--deps/v8/src/heap/objects-visiting.h143
-rw-r--r--deps/v8/src/heap/remembered-set.h131
-rw-r--r--deps/v8/src/heap/scavenge-job.cc1
-rw-r--r--deps/v8/src/heap/scavenger-inl.h260
-rw-r--r--deps/v8/src/heap/scavenger.cc131
-rw-r--r--deps/v8/src/heap/scavenger.h104
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc213
-rw-r--r--deps/v8/src/heap/slot-set.cc99
-rw-r--r--deps/v8/src/heap/slot-set.h291
-rw-r--r--deps/v8/src/heap/spaces-inl.h129
-rw-r--r--deps/v8/src/heap/spaces.cc687
-rw-r--r--deps/v8/src/heap/spaces.h447
-rw-r--r--deps/v8/src/heap/store-buffer.cc29
-rw-r--r--deps/v8/src/heap/store-buffer.h7
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc2
-rw-r--r--deps/v8/src/heap/sweeper.cc43
-rw-r--r--deps/v8/src/heap/sweeper.h14
-rw-r--r--deps/v8/src/heap/worklist.h15
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h27
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc176
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h217
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc514
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc467
-rw-r--r--deps/v8/src/ia32/constants-ia32.h4
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc3
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc56
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h22
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc22
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc619
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h208
-rw-r--r--deps/v8/src/ia32/register-ia32.h166
-rw-r--r--deps/v8/src/ia32/simulator-ia32.cc7
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h10
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc163
-rw-r--r--deps/v8/src/ic/accessor-assembler.h5
-rw-r--r--deps/v8/src/ic/call-optimization.cc14
-rw-r--r--deps/v8/src/ic/call-optimization.h9
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h8
-rw-r--r--deps/v8/src/ic/handler-configuration.cc14
-rw-r--r--deps/v8/src/ic/handler-configuration.h8
-rw-r--r--deps/v8/src/ic/ic-inl.h21
-rw-r--r--deps/v8/src/ic/ic-stats.cc26
-rw-r--r--deps/v8/src/ic/ic-stats.h11
-rw-r--r--deps/v8/src/ic/ic.cc383
-rw-r--r--deps/v8/src/ic/ic.h74
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc409
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h2
-rw-r--r--deps/v8/src/ic/stub-cache.cc78
-rw-r--r--deps/v8/src/ic/stub-cache.h30
-rw-r--r--deps/v8/src/identity-map.cc66
-rw-r--r--deps/v8/src/identity-map.h37
-rw-r--r--deps/v8/src/inspector/BUILD.gn45
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/PRESUBMIT.py67
-rw-r--r--deps/v8/src/inspector/build/check_injected_script_source.py88
-rw-r--r--deps/v8/src/inspector/build/closure-compiler.tar.gz.sha11
-rwxr-xr-xdeps/v8/src/inspector/build/compile-scripts.py150
-rwxr-xr-xdeps/v8/src/inspector/build/generate_protocol_externs.py246
-rwxr-xr-xdeps/v8/src/inspector/build/rjsmin.py295
-rw-r--r--deps/v8/src/inspector/build/xxd.py28
-rw-r--r--deps/v8/src/inspector/custom-preview.cc388
-rw-r--r--deps/v8/src/inspector/custom-preview.h22
-rw-r--r--deps/v8/src/inspector/injected-script-source.js1116
-rw-r--r--deps/v8/src/inspector/injected-script.cc470
-rw-r--r--deps/v8/src/inspector/injected-script.h57
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js122
-rw-r--r--deps/v8/src/inspector/inspected-context.cc32
-rw-r--r--deps/v8/src/inspector/inspected-context.h10
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json1
-rw-r--r--deps/v8/src/inspector/js_protocol.json61
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl34
-rw-r--r--deps/v8/src/inspector/string-16.cc16
-rw-r--r--deps/v8/src/inspector/string-16.h10
-rw-r--r--deps/v8/src/inspector/string-util.cc4
-rw-r--r--deps/v8/src/inspector/string-util.h14
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc30
-rw-r--r--deps/v8/src/inspector/v8-console.cc5
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc41
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc5
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc304
-rw-r--r--deps/v8/src/inspector/v8-debugger.h26
-rw-r--r--deps/v8/src/inspector/v8-function-call.cc115
-rw-r--r--deps/v8/src/inspector/v8-function-call.h65
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc427
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h53
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc31
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h10
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.cc75
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.h23
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc123
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc103
-rw-r--r--deps/v8/src/inspector/v8-value-utils.h3
-rw-r--r--deps/v8/src/inspector/value-mirror.cc1617
-rw-r--r--deps/v8/src/inspector/value-mirror.h78
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc104
-rw-r--r--deps/v8/src/inspector/wasm-translation.h9
-rw-r--r--deps/v8/src/instruction-stream.cc84
-rw-r--r--deps/v8/src/instruction-stream.h39
-rw-r--r--deps/v8/src/interface-descriptors.cc59
-rw-r--r--deps/v8/src/interface-descriptors.h249
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc14
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1613
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h144
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h13
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc12
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h21
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc78
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h9
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc418
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc56
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h45
-rw-r--r--deps/v8/src/interpreter/interpreter.cc102
-rw-r--r--deps/v8/src/interpreter/interpreter.h19
-rw-r--r--deps/v8/src/intl.cc420
-rw-r--r--deps/v8/src/intl.h82
-rw-r--r--deps/v8/src/isolate-allocator.cc160
-rw-r--r--deps/v8/src/isolate-allocator.h63
-rw-r--r--deps/v8/src/isolate-data.h217
-rw-r--r--deps/v8/src/isolate-inl.h75
-rw-r--r--deps/v8/src/isolate.cc1366
-rw-r--r--deps/v8/src/isolate.h644
-rw-r--r--deps/v8/src/js/OWNERS14
-rw-r--r--deps/v8/src/js/array.js515
-rw-r--r--deps/v8/src/js/intl.js450
-rw-r--r--deps/v8/src/js/macros.py85
-rw-r--r--deps/v8/src/js/prologue.js168
-rw-r--r--deps/v8/src/js/typedarray.js93
-rw-r--r--deps/v8/src/json-parser.cc63
-rw-r--r--deps/v8/src/json-stringifier.cc106
-rw-r--r--deps/v8/src/keys.cc55
-rw-r--r--deps/v8/src/keys.h4
-rw-r--r--deps/v8/src/layout-descriptor-inl.h71
-rw-r--r--deps/v8/src/layout-descriptor.cc48
-rw-r--r--deps/v8/src/layout-descriptor.h44
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.cc21
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.h10
-rw-r--r--deps/v8/src/libplatform/default-platform.cc14
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.cc6
-rw-r--r--deps/v8/src/libplatform/task-queue.cc10
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.cc6
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc12
-rw-r--r--deps/v8/src/libsampler/sampler.cc266
-rw-r--r--deps/v8/src/libsampler/sampler.h117
-rw-r--r--deps/v8/src/locked-queue-inl.h8
-rw-r--r--deps/v8/src/log-inl.h2
-rw-r--r--deps/v8/src/log-utils.cc18
-rw-r--r--deps/v8/src/log-utils.h14
-rw-r--r--deps/v8/src/log.cc399
-rw-r--r--deps/v8/src/log.h168
-rw-r--r--deps/v8/src/lookup-cache-inl.h10
-rw-r--r--deps/v8/src/lookup-cache.cc4
-rw-r--r--deps/v8/src/lookup-cache.h16
-rw-r--r--deps/v8/src/lookup-inl.h51
-rw-r--r--deps/v8/src/lookup.cc142
-rw-r--r--deps/v8/src/lookup.h64
-rw-r--r--deps/v8/src/lsan.h31
-rw-r--r--deps/v8/src/machine-type.h18
-rw-r--r--deps/v8/src/macro-assembler-inl.h1
-rw-r--r--deps/v8/src/macro-assembler.h26
-rw-r--r--deps/v8/src/map-updater.cc57
-rw-r--r--deps/v8/src/map-updater.h6
-rw-r--r--deps/v8/src/math-random.cc16
-rw-r--r--deps/v8/src/math-random.h6
-rw-r--r--deps/v8/src/maybe-handles-inl.h18
-rw-r--r--deps/v8/src/maybe-handles.h25
-rw-r--r--deps/v8/src/memcopy.cc71
-rw-r--r--deps/v8/src/memcopy.h492
-rw-r--r--deps/v8/src/message-template.h576
-rw-r--r--deps/v8/src/messages.cc105
-rw-r--r--deps/v8/src/messages.h554
-rw-r--r--deps/v8/src/microtask-queue.cc226
-rw-r--r--deps/v8/src/microtask-queue.h124
-rw-r--r--deps/v8/src/mips/OWNERS5
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h95
-rw-r--r--deps/v8/src/mips/assembler-mips.cc343
-rw-r--r--deps/v8/src/mips/assembler-mips.h431
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc596
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h31
-rw-r--r--deps/v8/src/mips/codegen-mips.cc587
-rw-r--r--deps/v8/src/mips/constants-mips.h8
-rw-r--r--deps/v8/src/mips/cpu-mips.cc5
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc103
-rw-r--r--deps/v8/src/mips/disasm-mips.cc22
-rw-r--r--deps/v8/src/mips/frame-constants-mips.h8
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc48
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc338
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h144
-rw-r--r--deps/v8/src/mips/register-mips.h382
-rw-r--r--deps/v8/src/mips/simulator-mips.cc309
-rw-r--r--deps/v8/src/mips/simulator-mips.h99
-rw-r--r--deps/v8/src/mips64/OWNERS5
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h24
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc115
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h390
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc602
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h31
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc587
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc5
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc102
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.h4
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc48
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc250
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h135
-rw-r--r--deps/v8/src/mips64/register-mips64.h389
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc378
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h102
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h581
-rw-r--r--deps/v8/src/objects-body-descriptors.h72
-rw-r--r--deps/v8/src/objects-debug.cc739
-rw-r--r--deps/v8/src/objects-definitions.h30
-rw-r--r--deps/v8/src/objects-inl.h1272
-rw-r--r--deps/v8/src/objects-printer.cc1029
-rw-r--r--deps/v8/src/objects.cc3060
-rw-r--r--deps/v8/src/objects.h1434
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h17
-rw-r--r--deps/v8/src/objects/allocation-site.h35
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h9
-rw-r--r--deps/v8/src/objects/api-callbacks.h76
-rw-r--r--deps/v8/src/objects/arguments-inl.h26
-rw-r--r--deps/v8/src/objects/arguments.h70
-rw-r--r--deps/v8/src/objects/bigint.cc134
-rw-r--r--deps/v8/src/objects/bigint.h71
-rw-r--r--deps/v8/src/objects/builtin-function-id.h7
-rw-r--r--deps/v8/src/objects/cell-inl.h (renamed from deps/v8/src/objects/microtask-queue-inl.h)23
-rw-r--r--deps/v8/src/objects/cell.h50
-rw-r--r--deps/v8/src/objects/code-inl.h258
-rw-r--r--deps/v8/src/objects/code.h284
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h31
-rw-r--r--deps/v8/src/objects/compilation-cache.h66
-rw-r--r--deps/v8/src/objects/data-handler-inl.h6
-rw-r--r--deps/v8/src/objects/data-handler.h22
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h23
-rw-r--r--deps/v8/src/objects/debug-objects.cc18
-rw-r--r--deps/v8/src/objects/debug-objects.h49
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h226
-rw-r--r--deps/v8/src/objects/descriptor-array.h192
-rw-r--r--deps/v8/src/objects/dictionary-inl.h214
-rw-r--r--deps/v8/src/objects/dictionary.h77
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h39
-rw-r--r--deps/v8/src/objects/embedder-data-array.cc27
-rw-r--r--deps/v8/src/objects/embedder-data-array.h78
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h127
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h82
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h29
-rw-r--r--deps/v8/src/objects/feedback-cell.h51
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h355
-rw-r--r--deps/v8/src/objects/fixed-array.h252
-rw-r--r--deps/v8/src/objects/foreign-inl.h41
-rw-r--r--deps/v8/src/objects/foreign.h59
-rw-r--r--deps/v8/src/objects/frame-array-inl.h9
-rw-r--r--deps/v8/src/objects/frame-array.h9
-rw-r--r--deps/v8/src/objects/free-space-inl.h64
-rw-r--r--deps/v8/src/objects/free-space.h61
-rw-r--r--deps/v8/src/objects/hash-table-inl.h49
-rw-r--r--deps/v8/src/objects/hash-table.h64
-rw-r--r--deps/v8/src/objects/heap-number-inl.h56
-rw-r--r--deps/v8/src/objects/heap-number.h89
-rw-r--r--deps/v8/src/objects/heap-object-inl.h43
-rw-r--r--deps/v8/src/objects/heap-object.h214
-rw-r--r--deps/v8/src/objects/instance-type-inl.h82
-rw-r--r--deps/v8/src/objects/instance-type.h571
-rw-r--r--deps/v8/src/objects/intl-objects-inl.h29
-rw-r--r--deps/v8/src/objects/intl-objects.cc1561
-rw-r--r--deps/v8/src/objects/intl-objects.h178
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h52
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc31
-rw-r--r--deps/v8/src/objects/js-array-buffer.h108
-rw-r--r--deps/v8/src/objects/js-array-inl.h7
-rw-r--r--deps/v8/src/objects/js-array.h57
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h4
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc127
-rw-r--r--deps/v8/src/objects/js-break-iterator.h47
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.cc288
-rw-r--r--deps/v8/src/objects/js-collator.h17
-rw-r--r--deps/v8/src/objects/js-collection-inl.h32
-rw-r--r--deps/v8/src/objects/js-collection.h53
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h13
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc551
-rw-r--r--deps/v8/src/objects/js-date-time-format.h43
-rw-r--r--deps/v8/src/objects/js-generator-inl.h17
-rw-r--r--deps/v8/src/objects/js-generator.h97
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format.cc370
-rw-r--r--deps/v8/src/objects/js-list-format.h25
-rw-r--r--deps/v8/src/objects/js-locale-inl.h46
-rw-r--r--deps/v8/src/objects/js-locale.cc569
-rw-r--r--deps/v8/src/objects/js-locale.h120
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-number-format.cc251
-rw-r--r--deps/v8/src/objects/js-number-format.h34
-rw-r--r--deps/v8/src/objects/js-objects-inl.h339
-rw-r--r--deps/v8/src/objects/js-objects.h427
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h15
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc105
-rw-r--r--deps/v8/src/objects/js-plural-rules.h46
-rw-r--r--deps/v8/src/objects/js-promise-inl.h5
-rw-r--r--deps/v8/src/objects/js-promise.h21
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h2
-rw-r--r--deps/v8/src/objects/js-proxy.h32
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h38
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h20
-rw-r--r--deps/v8/src/objects/js-regexp.h93
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc183
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h23
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h53
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc290
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h112
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h13
-rw-r--r--deps/v8/src/objects/js-segmenter.cc149
-rw-r--r--deps/v8/src/objects/js-segmenter.h49
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h178
-rw-r--r--deps/v8/src/objects/js-weak-refs.h182
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h10
-rw-r--r--deps/v8/src/objects/literal-objects.cc70
-rw-r--r--deps/v8/src/objects/literal-objects.h23
-rw-r--r--deps/v8/src/objects/managed.cc4
-rw-r--r--deps/v8/src/objects/managed.h13
-rw-r--r--deps/v8/src/objects/map-inl.h216
-rw-r--r--deps/v8/src/objects/map.h286
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h97
-rw-r--r--deps/v8/src/objects/maybe-object.h102
-rw-r--r--deps/v8/src/objects/microtask-inl.h7
-rw-r--r--deps/v8/src/objects/microtask-queue.cc40
-rw-r--r--deps/v8/src/objects/microtask-queue.h55
-rw-r--r--deps/v8/src/objects/microtask.h34
-rw-r--r--deps/v8/src/objects/module-inl.h23
-rw-r--r--deps/v8/src/objects/module.cc24
-rw-r--r--deps/v8/src/objects/module.h113
-rw-r--r--deps/v8/src/objects/name-inl.h39
-rw-r--r--deps/v8/src/objects/name.h31
-rw-r--r--deps/v8/src/objects/object-macros-undef.h30
-rw-r--r--deps/v8/src/objects/object-macros.h331
-rw-r--r--deps/v8/src/objects/oddball-inl.h54
-rw-r--r--deps/v8/src/objects/oddball.h97
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h150
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc632
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h534
-rw-r--r--deps/v8/src/objects/promise-inl.h11
-rw-r--r--deps/v8/src/objects/promise.h94
-rw-r--r--deps/v8/src/objects/property-array-inl.h55
-rw-r--r--deps/v8/src/objects/property-array.h27
-rw-r--r--deps/v8/src/objects/property-cell-inl.h40
-rw-r--r--deps/v8/src/objects/property-cell.h81
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h1
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h14
-rw-r--r--deps/v8/src/objects/prototype-info.h44
-rw-r--r--deps/v8/src/objects/regexp-match-info.h25
-rw-r--r--deps/v8/src/objects/scope-info.cc87
-rw-r--r--deps/v8/src/objects/scope-info.h106
-rw-r--r--deps/v8/src/objects/script-inl.h17
-rw-r--r--deps/v8/src/objects/script.h52
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h385
-rw-r--r--deps/v8/src/objects/shared-function-info.h390
-rw-r--r--deps/v8/src/objects/slots-atomic-inl.h100
-rw-r--r--deps/v8/src/objects/slots-inl.h126
-rw-r--r--deps/v8/src/objects/slots.h181
-rw-r--r--deps/v8/src/objects/smi-inl.h25
-rw-r--r--deps/v8/src/objects/smi.h107
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h4
-rw-r--r--deps/v8/src/objects/stack-frame-info.h33
-rw-r--r--deps/v8/src/objects/string-inl.h270
-rw-r--r--deps/v8/src/objects/string-table-inl.h69
-rw-r--r--deps/v8/src/objects/string-table.h27
-rw-r--r--deps/v8/src/objects/string.h212
-rw-r--r--deps/v8/src/objects/struct-inl.h71
-rw-r--r--deps/v8/src/objects/struct.h127
-rw-r--r--deps/v8/src/objects/template-objects.h4
-rw-r--r--deps/v8/src/objects/templates-inl.h108
-rw-r--r--deps/v8/src/objects/templates.h189
-rw-r--r--deps/v8/src/optimized-compilation-info.cc43
-rw-r--r--deps/v8/src/optimized-compilation-info.h51
-rw-r--r--deps/v8/src/parsing/duplicate-finder.h36
-rw-r--r--deps/v8/src/parsing/expression-classifier.h568
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc6
-rw-r--r--deps/v8/src/parsing/expression-scope.h710
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc33
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h65
-rw-r--r--deps/v8/src/parsing/keywords-gen.h177
-rw-r--r--deps/v8/src/parsing/keywords.txt64
-rw-r--r--deps/v8/src/parsing/parse-info.cc35
-rw-r--r--deps/v8/src/parsing/parse-info.h90
-rw-r--r--deps/v8/src/parsing/parser-base.h4704
-rw-r--r--deps/v8/src/parsing/parser.cc2395
-rw-r--r--deps/v8/src/parsing/parser.h457
-rw-r--r--deps/v8/src/parsing/parsing.cc3
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc700
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h234
-rw-r--r--deps/v8/src/parsing/preparse-data.cc716
-rw-r--r--deps/v8/src/parsing/preparse-data.h275
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data-impl.h259
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc737
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h214
-rw-r--r--deps/v8/src/parsing/preparser.cc411
-rw-r--r--deps/v8/src/parsing/preparser.h853
-rw-r--r--deps/v8/src/parsing/rewriter.cc79
-rw-r--r--deps/v8/src/parsing/rewriter.h10
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc56
-rw-r--r--deps/v8/src/parsing/scanner-inl.h772
-rw-r--r--deps/v8/src/parsing/scanner.cc227
-rw-r--r--deps/v8/src/parsing/scanner.h245
-rw-r--r--deps/v8/src/parsing/token.cc29
-rw-r--r--deps/v8/src/parsing/token.h192
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc32
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h33
-rw-r--r--deps/v8/src/perf-jit.cc32
-rw-r--r--deps/v8/src/perf-jit.h20
-rw-r--r--deps/v8/src/pointer-with-payload.h104
-rw-r--r--deps/v8/src/ppc/OWNERS5
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h27
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc134
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h355
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc621
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h31
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc55
-rw-r--r--deps/v8/src/ppc/constants-ppc.h30
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc6
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc53
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc10
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc8
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h2
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc21
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc437
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h132
-rw-r--r--deps/v8/src/ppc/register-ppc.h321
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc563
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h167
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc28
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h4
-rw-r--r--deps/v8/src/profiler/circular-queue.h6
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h6
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc147
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h89
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc20
-rw-r--r--deps/v8/src/profiler/heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc485
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h152
-rw-r--r--deps/v8/src/profiler/profile-generator.cc148
-rw-r--r--deps/v8/src/profiler/profile-generator.h56
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc188
-rw-r--r--deps/v8/src/profiler/profiler-listener.h37
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc85
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h101
-rw-r--r--deps/v8/src/profiler/strings-storage.cc8
-rw-r--r--deps/v8/src/profiler/strings-storage.h4
-rw-r--r--deps/v8/src/profiler/tick-sample.cc22
-rw-r--r--deps/v8/src/profiler/tick-sample.h2
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc10
-rw-r--r--deps/v8/src/property-descriptor.cc4
-rw-r--r--deps/v8/src/property-details.h28
-rw-r--r--deps/v8/src/property.cc3
-rw-r--r--deps/v8/src/prototype-inl.h16
-rw-r--r--deps/v8/src/prototype.h10
-rw-r--r--deps/v8/src/ptr-compr-inl.h243
-rw-r--r--deps/v8/src/ptr-compr.h145
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc41
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h6
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc51
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h9
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc39
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h7
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc2
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h4
-rw-r--r--deps/v8/src/regexp/jsregexp.cc58
-rw-r--r--deps/v8/src/regexp/jsregexp.h36
-rw-r--r--deps/v8/src/regexp/mips/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc52
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h6
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc52
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h5
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS5
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc46
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h5
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc62
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h18
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc15
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc4
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc41
-rw-r--r--deps/v8/src/regexp/regexp-utils.h3
-rw-r--r--deps/v8/src/regexp/s390/OWNERS5
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc38
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h5
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc49
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h29
-rw-r--r--deps/v8/src/register-arch.h31
-rw-r--r--deps/v8/src/register-configuration.cc142
-rw-r--r--deps/v8/src/register-configuration.h35
-rw-r--r--deps/v8/src/register.h126
-rw-r--r--deps/v8/src/reloc-info.cc86
-rw-r--r--deps/v8/src/reloc-info.h48
-rw-r--r--deps/v8/src/roots-inl.h81
-rw-r--r--deps/v8/src/roots.cc15
-rw-r--r--deps/v8/src/roots.h249
-rw-r--r--deps/v8/src/runtime-profiler.cc70
-rw-r--r--deps/v8/src/runtime-profiler.h17
-rw-r--r--deps/v8/src/runtime/runtime-array.cc108
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc71
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc114
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc1
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc23
-rw-r--r--deps/v8/src/runtime/runtime-date.cc2
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc43
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc1
-rw-r--r--deps/v8/src/runtime/runtime-function.cc4
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc4
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc50
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc213
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc22
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc82
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc66
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc6
-rw-r--r--deps/v8/src/runtime/runtime-object.cc147
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc1
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc94
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc1
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc97
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc175
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc69
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc154
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc21
-rw-r--r--deps/v8/src/runtime/runtime-utils.h37
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc233
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc28
-rw-r--r--deps/v8/src/runtime/runtime.cc13
-rw-r--r--deps/v8/src/runtime/runtime.h161
-rw-r--r--deps/v8/src/s390/OWNERS5
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h30
-rw-r--r--deps/v8/src/s390/assembler-s390.cc141
-rw-r--r--deps/v8/src/s390/assembler-s390.h319
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc655
-rw-r--r--deps/v8/src/s390/code-stubs-s390.h31
-rw-r--r--deps/v8/src/s390/codegen-s390.cc52
-rw-r--r--deps/v8/src/s390/constants-s390.h3
-rw-r--r--deps/v8/src/s390/cpu-s390.cc5
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc61
-rw-r--r--deps/v8/src/s390/disasm-s390.cc8
-rw-r--r--deps/v8/src/s390/frame-constants-s390.cc8
-rw-r--r--deps/v8/src/s390/frame-constants-s390.h4
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc21
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc404
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h118
-rw-r--r--deps/v8/src/s390/register-s390.h281
-rw-r--r--deps/v8/src/s390/simulator-s390.cc52
-rw-r--r--deps/v8/src/s390/simulator-s390.h4
-rw-r--r--deps/v8/src/safepoint-table.cc19
-rw-r--r--deps/v8/src/safepoint-table.h72
-rw-r--r--deps/v8/src/setup-isolate-full.cc4
-rw-r--r--deps/v8/src/setup-isolate.h2
-rw-r--r--deps/v8/src/signature.h20
-rw-r--r--deps/v8/src/simulator-base.cc5
-rw-r--r--deps/v8/src/simulator-base.h6
-rw-r--r--deps/v8/src/simulator.h59
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.cc176
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h127
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc167
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h83
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.cc61
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.h50
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc110
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h59
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc138
-rw-r--r--deps/v8/src/snapshot/code-serializer.h44
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc (renamed from deps/v8/src/snapshot/default-deserializer-allocator.cc)115
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h (renamed from deps/v8/src/snapshot/default-deserializer-allocator.h)34
-rw-r--r--deps/v8/src/snapshot/deserializer.cc467
-rw-r--r--deps/v8/src/snapshot/deserializer.h89
-rw-r--r--deps/v8/src/snapshot/embedded-data.cc332
-rw-r--r--deps/v8/src/snapshot/embedded-data.h134
-rw-r--r--deps/v8/src/snapshot/embedded-empty.cc20
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.cc645
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.h424
-rw-r--r--deps/v8/src/snapshot/macros.h82
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc225
-rw-r--r--deps/v8/src/snapshot/natives-common.cc19
-rw-r--r--deps/v8/src/snapshot/natives-external.cc26
-rw-r--r--deps/v8/src/snapshot/natives.h25
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc38
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h8
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc7
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.h2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc174
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h20
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc60
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h34
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc105
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h46
-rw-r--r--deps/v8/src/snapshot/references.h4
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc67
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h64
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc (renamed from deps/v8/src/snapshot/default-serializer-allocator.cc)33
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h (renamed from deps/v8/src/snapshot/default-serializer-allocator.h)16
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc26
-rw-r--r--deps/v8/src/snapshot/serializer-common.h63
-rw-r--r--deps/v8/src/snapshot/serializer.cc421
-rw-r--r--deps/v8/src/snapshot/serializer.h143
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc453
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc6
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h2
-rw-r--r--deps/v8/src/snapshot/snapshot.h125
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc74
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h15
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc149
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h89
-rw-r--r--deps/v8/src/source-position-table.cc43
-rw-r--r--deps/v8/src/source-position-table.h22
-rw-r--r--deps/v8/src/source-position.cc34
-rw-r--r--deps/v8/src/source-position.h86
-rw-r--r--deps/v8/src/string-builder-inl.h31
-rw-r--r--deps/v8/src/string-builder.cc36
-rw-r--r--deps/v8/src/string-constants.h1
-rw-r--r--deps/v8/src/string-hasher-inl.h99
-rw-r--r--deps/v8/src/string-hasher.h11
-rw-r--r--deps/v8/src/string-stream.cc66
-rw-r--r--deps/v8/src/string-stream.h29
-rw-r--r--deps/v8/src/task-utils.cc64
-rw-r--r--deps/v8/src/task-utils.h33
-rw-r--r--deps/v8/src/thread-id.cc38
-rw-r--r--deps/v8/src/thread-id.h73
-rw-r--r--deps/v8/src/torque-assembler.h58
-rw-r--r--deps/v8/src/torque/ast.h339
-rw-r--r--deps/v8/src/torque/cfg.cc89
-rw-r--r--deps/v8/src/torque/cfg.h45
-rw-r--r--deps/v8/src/torque/csa-generator.cc463
-rw-r--r--deps/v8/src/torque/csa-generator.h13
-rw-r--r--deps/v8/src/torque/declarable.cc98
-rw-r--r--deps/v8/src/torque/declarable.h430
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc782
-rw-r--r--deps/v8/src/torque/declaration-visitor.h146
-rw-r--r--deps/v8/src/torque/declarations.cc475
-rw-r--r--deps/v8/src/torque/declarations.h286
-rw-r--r--deps/v8/src/torque/earley-parser.h4
-rw-r--r--deps/v8/src/torque/file-visitor.cc60
-rw-r--r--deps/v8/src/torque/file-visitor.h60
-rw-r--r--deps/v8/src/torque/global-context.h137
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc2116
-rw-r--r--deps/v8/src/torque/implementation-visitor.h316
-rw-r--r--deps/v8/src/torque/instructions.cc144
-rw-r--r--deps/v8/src/torque/instructions.h143
-rw-r--r--deps/v8/src/torque/scope.cc51
-rw-r--r--deps/v8/src/torque/scope.h169
-rw-r--r--deps/v8/src/torque/source-positions.h6
-rw-r--r--deps/v8/src/torque/torque-parser.cc482
-rw-r--r--deps/v8/src/torque/torque-parser.h2
-rw-r--r--deps/v8/src/torque/torque.cc36
-rw-r--r--deps/v8/src/torque/type-oracle.h104
-rw-r--r--deps/v8/src/torque/types.cc200
-rw-r--r--deps/v8/src/torque/types.h281
-rw-r--r--deps/v8/src/torque/utils.cc38
-rw-r--r--deps/v8/src/torque/utils.h41
-rw-r--r--deps/v8/src/tracing/OWNERS1
-rw-r--r--deps/v8/src/tracing/traced-value.cc24
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc5
-rw-r--r--deps/v8/src/transitions-inl.h106
-rw-r--r--deps/v8/src/transitions.cc191
-rw-r--r--deps/v8/src/transitions.h129
-rw-r--r--deps/v8/src/trap-handler/DEPS7
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc (renamed from deps/v8/src/trap-handler/handler-inside-linux.cc)54
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.h31
-rw-r--r--deps/v8/src/trap-handler/handler-inside-win.cc77
-rw-r--r--deps/v8/src/trap-handler/handler-inside-win.h27
-rw-r--r--deps/v8/src/trap-handler/handler-inside.cc20
-rw-r--r--deps/v8/src/trap-handler/handler-outside-posix.cc (renamed from deps/v8/src/trap-handler/handler-outside-linux.cc)25
-rw-r--r--deps/v8/src/trap-handler/handler-outside-win.cc39
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc17
-rw-r--r--deps/v8/src/trap-handler/handler-shared.cc19
-rw-r--r--deps/v8/src/trap-handler/trap-handler-internal.h13
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h38
-rw-r--r--deps/v8/src/turbo-assembler.cc71
-rw-r--r--deps/v8/src/turbo-assembler.h63
-rw-r--r--deps/v8/src/type-hints.cc2
-rw-r--r--deps/v8/src/type-hints.h1
-rw-r--r--deps/v8/src/unicode-cache-inl.h43
-rw-r--r--deps/v8/src/unicode-cache.h17
-rw-r--r--deps/v8/src/unicode-decoder.h2
-rw-r--r--deps/v8/src/unoptimized-compilation-info.cc5
-rw-r--r--deps/v8/src/unoptimized-compilation-info.h14
-rw-r--r--deps/v8/src/unwinder.cc98
-rw-r--r--deps/v8/src/uri.cc33
-rw-r--r--deps/v8/src/utils-inl.h16
-rw-r--r--deps/v8/src/utils.cc66
-rw-r--r--deps/v8/src/utils.h613
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8threads.cc5
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/value-serializer.cc107
-rw-r--r--deps/v8/src/value-serializer.h32
-rw-r--r--deps/v8/src/vector.h45
-rw-r--r--deps/v8/src/visitors.h45
-rw-r--r--deps/v8/src/vm-state-inl.h4
-rw-r--r--deps/v8/src/wasm/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h1402
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h52
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h219
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h27
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc530
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h74
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc631
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h18
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h60
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS3
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h95
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS3
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h54
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h22
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h22
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h104
-rw-r--r--deps/v8/src/wasm/compilation-environment.h127
-rw-r--r--deps/v8/src/wasm/decoder.h95
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1734
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc821
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h13
-rw-r--r--deps/v8/src/wasm/function-compiler.cc233
-rw-r--r--deps/v8/src/wasm/function-compiler.h123
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc879
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h36
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h41
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc16
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--deps/v8/src/wasm/module-compiler.cc2797
-rw-r--r--deps/v8/src/wasm/module-compiler.h82
-rw-r--r--deps/v8/src/wasm/module-decoder.cc422
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc1537
-rw-r--r--deps/v8/src/wasm/module-instantiate.h35
-rw-r--r--deps/v8/src/wasm/signature-map.cc20
-rw-r--r--deps/v8/src/wasm/signature-map.h3
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc301
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h154
-rw-r--r--deps/v8/src/wasm/value-type.h16
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc376
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h136
-rw-r--r--deps/v8/src/wasm/wasm-constants.h22
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc66
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc155
-rw-r--r--deps/v8/src/wasm/wasm-engine.h24
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc13
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h4
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h52
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc138
-rw-r--r--deps/v8/src/wasm/wasm-js.cc521
-rw-r--r--deps/v8/src/wasm/wasm-js.h4
-rw-r--r--deps/v8/src/wasm/wasm-limits.h17
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h48
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc43
-rw-r--r--deps/v8/src/wasm/wasm-memory.h12
-rw-r--r--deps/v8/src/wasm/wasm-module.cc25
-rw-r--r--deps/v8/src/wasm/wasm-module.h35
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h95
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc542
-rw-r--r--deps/v8/src/wasm/wasm-objects.h359
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc60
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h399
-rw-r--r--deps/v8/src/wasm/wasm-result.cc23
-rw-r--r--deps/v8/src/wasm/wasm-result.h121
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc174
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h7
-rw-r--r--deps/v8/src/wasm/wasm-text.cc22
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h18
-rw-r--r--deps/v8/src/x64/assembler-x64.cc167
-rw-r--r--deps/v8/src/x64/assembler-x64.h435
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc581
-rw-r--r--deps/v8/src/x64/codegen-x64.cc45
-rw-r--r--deps/v8/src/x64/cpu-x64.cc3
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc87
-rw-r--r--deps/v8/src/x64/disasm-x64.cc13
-rw-r--r--deps/v8/src/x64/eh-frame-x64.cc4
-rw-r--r--deps/v8/src/x64/frame-constants-x64.cc5
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h29
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc21
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc732
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h208
-rw-r--r--deps/v8/src/x64/register-x64.h224
-rw-r--r--deps/v8/src/x64/simulator-x64.cc7
-rw-r--r--deps/v8/src/x64/simulator-x64.h10
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc8
-rw-r--r--deps/v8/src/zone/zone-allocator.h17
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h2
-rw-r--r--deps/v8/src/zone/zone-handle-set.h37
-rw-r--r--deps/v8/src/zone/zone-list-inl.h10
-rw-r--r--deps/v8/src/zone/zone.cc9
-rw-r--r--deps/v8/src/zone/zone.h90
-rw-r--r--deps/v8/test/BUILD.gn3
-rw-r--r--deps/v8/test/benchmarks/csuite/README.md43
-rwxr-xr-xdeps/v8/test/benchmarks/csuite/benchmark.py220
-rwxr-xr-xdeps/v8/test/benchmarks/csuite/compare-baseline.py264
-rwxr-xr-xdeps/v8/test/benchmarks/csuite/csuite.py154
-rw-r--r--deps/v8/test/benchmarks/csuite/run-kraken.js (renamed from deps/v8/test/mjsunit/regress/regress-85177.js)61
-rw-r--r--deps/v8/test/benchmarks/csuite/sunspider-standalone-driver.js75
-rw-r--r--deps/v8/test/cctest/BUILD.gn56
-rw-r--r--deps/v8/test/cctest/OWNERS3
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc2
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h10
-rw-r--r--deps/v8/test/cctest/cctest.cc24
-rw-r--r--deps/v8/test/cctest/cctest.h30
-rw-r--r--deps/v8/test/cctest/cctest.status151
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h6
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h9
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h8
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc81
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc12
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h10
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc29
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc48
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc51
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction-scheduler.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc48
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc39
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc98
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc31
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc215
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc93
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc240
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-unwinding-info.cc1
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h5
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc31
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc33
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc79
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc7
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc958
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc70
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc23
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc21
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc120
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc61
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden31
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden457
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden168
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden52
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden467
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden71
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden1192
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden586
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden1450
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden64
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden190
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden177
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden114
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden80
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden91
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden226
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden37
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc22
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc6
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h9
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc83
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc221
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc109
-rw-r--r--deps/v8/test/cctest/parsing/test-parse-decision.cc3
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc179
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc36
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc7
-rw-r--r--deps/v8/test/cctest/scope-test-helper.h2
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc13
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc3
-rw-r--r--deps/v8/test/cctest/test-api.cc1709
-rw-r--r--deps/v8/test/cctest/test-api.h2
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc106
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc710
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc74
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc259
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc297
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc61
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc35
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc215
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc156
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc166
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc180
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc148
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc181
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc179
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc147
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc204
-rw-r--r--deps/v8/test/cctest/test-code-stubs.h59
-rw-r--r--deps/v8/test/cctest/test-compiler.cc218
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc6
-rw-r--r--deps/v8/test/cctest/test-conversions.cc342
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc333
-rw-r--r--deps/v8/test/cctest/test-date.cc23
-rw-r--r--deps/v8/test/cctest/test-debug.cc46
-rw-r--r--deps/v8/test/cctest/test-decls.cc2
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc22
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc67
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc9
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc21
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc13
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-s390.cc18
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc9
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc10
-rw-r--r--deps/v8/test/cctest/test-experimental-extra.js14
-rw-r--r--deps/v8/test/cctest/test-extra.js9
-rw-r--r--deps/v8/test/cctest/test-factory.cc46
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc35
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc59
-rw-r--r--deps/v8/test/cctest/test-flags.cc79
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc2
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc20
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc80
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc9
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc94
-rw-r--r--deps/v8/test/cctest/test-icache.cc192
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc8
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc26
-rw-r--r--deps/v8/test/cctest/test-intl.cc63
-rw-r--r--deps/v8/test/cctest/test-isolate-independent-builtins.cc206
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc477
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc16
-rw-r--r--deps/v8/test/cctest/test-log.cc387
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc24
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc52
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc53
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc281
-rw-r--r--deps/v8/test/cctest/test-mementos.cc9
-rw-r--r--deps/v8/test/cctest/test-object.cc62
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc725
-rw-r--r--deps/v8/test/cctest/test-parsing.cc1285
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc220
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc12
-rw-r--r--deps/v8/test/cctest/test-regexp.cc25
-rw-r--r--deps/v8/test/cctest/test-roots.cc29
-rw-r--r--deps/v8/test/cctest/test-serialize.cc651
-rw-r--r--deps/v8/test/cctest/test-smi-lexicographic-compare.cc15
-rw-r--r--deps/v8/test/cctest/test-strings.cc166
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc12
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc80
-rw-r--r--deps/v8/test/cctest/test-traced-value.cc28
-rw-r--r--deps/v8/test/cctest/test-transitions.cc57
-rw-r--r--deps/v8/test/cctest/test-transitions.h2
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc91
-rw-r--r--deps/v8/test/cctest/test-unwinder.cc543
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc34
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc16
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h7
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc187
-rw-r--r--deps/v8/test/cctest/trace-extension.cc40
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS2
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc13
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc106
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc26
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc106
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc205
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc65
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc160
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-codegen.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc127
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc62
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc12
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc62
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h13
-rw-r--r--deps/v8/test/common/assembler-tester.h95
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h3
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h32
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc43
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h43
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js8
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js108
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-builtin-predictions.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-caught-exception-cases.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-caught-exception.js14
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-abort-at-break.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-continue-at-break.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-in-and-out.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-in-out-out.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-in.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-nested.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-next-constant.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-next.js8
-rw-r--r--deps/v8/test/debugger/debug/es8/async-debug-step-out.js2
-rw-r--r--deps/v8/test/debugger/debug/es8/debug-async-break-on-stack.js4
-rw-r--r--deps/v8/test/debugger/debug/es8/debug-async-break.js4
-rw-r--r--deps/v8/test/debugger/debug/es8/debug-async-liveedit.js4
-rw-r--r--deps/v8/test/debugger/debug/es8/promise-finally.js2
-rw-r--r--deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js48
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js5
-rw-r--r--deps/v8/test/debugger/debugger.status22
-rw-r--r--deps/v8/test/debugger/regress/regress-5610.js9
-rw-r--r--deps/v8/test/fuzzer/BUILD.gn9
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status8
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc8
-rw-r--r--deps/v8/test/fuzzer/parser.cc2
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc2
-rw-r--r--deps/v8/test/fuzzer/testcfg.py5
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc3
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc38
-rw-r--r--deps/v8/test/fuzzer/wasm-data-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm-function-sigs-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc159
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h6
-rw-r--r--deps/v8/test/fuzzer/wasm-globals-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm-imports-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm-memory-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm-names-section.cc12
-rw-r--r--deps/v8/test/fuzzer/wasm-types-section.cc11
-rw-r--r--deps/v8/test/fuzzer/wasm_globals_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_imports_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_memory_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_names_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_types_section/foo0
-rw-r--r--deps/v8/test/inspector/BUILD.gn1
-rw-r--r--deps/v8/test/inspector/PRESUBMIT.py25
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block.js1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js1
-rw-r--r--deps/v8/test/inspector/debugger/async-chains-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/async-function-step-out-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-function-step-out-optimized-expected.txt215
-rw-r--r--deps/v8/test/inspector/debugger/async-function-step-out-optimized.js187
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-await-expected.txt60
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt56
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-var-init-optimized-expected.txt200
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-var-init-optimized.js76
-rw-r--r--deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt977
-rw-r--r--deps/v8/test/inspector/debugger/class-fields-scopes.js80
-rw-r--r--deps/v8/test/inspector/debugger/eval-scopes-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/for-of-loops-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt20
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt53
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties.js2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout-expected.txt (renamed from deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt)2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js (renamed from deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js)41
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom.js2
-rw-r--r--deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt36
-rw-r--r--deps/v8/test/inspector/debugger/schedule-step-into-async-expected.txt191
-rw-r--r--deps/v8/test/inspector/debugger/schedule-step-into-async.js160
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/script-origin-stack-expected.txt33
-rw-r--r--deps/v8/test/inspector/debugger/script-origin-stack.js15
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-2-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/step-out-async-await-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/step-snapshot-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/step-snapshot.js1
-rw-r--r--deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js21
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js22
-rw-r--r--deps/v8/test/inspector/debugger/terminate-execution-on-pause-expected.txt55
-rw-r--r--deps/v8/test/inspector/debugger/terminate-execution-on-pause.js51
-rw-r--r--deps/v8/test/inspector/debugger/wasm-reset-context-group-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/wasm-reset-context-group.js63
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt176
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js48
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt112
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js167
-rw-r--r--deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt1
-rw-r--r--deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js9
-rw-r--r--deps/v8/test/inspector/inspector-test.cc108
-rw-r--r--deps/v8/test/inspector/inspector.status38
-rw-r--r--deps/v8/test/inspector/isolate-data.cc8
-rw-r--r--deps/v8/test/inspector/isolate-data.h1
-rw-r--r--deps/v8/test/inspector/protocol-test.js73
-rw-r--r--deps/v8/test/inspector/runtime/console-messages-limits-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/console-methods-expected.txt167
-rw-r--r--deps/v8/test/inspector/runtime/console-methods.js7
-rw-r--r--deps/v8/test/inspector/runtime/console-table-expected.txt385
-rw-r--r--deps/v8/test/inspector/runtime/console-table.js102
-rw-r--r--deps/v8/test/inspector/runtime/custom-preview-expected.txt250
-rw-r--r--deps/v8/test/inspector/runtime/custom-preview.js133
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js1
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-expected.txt10
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt20
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt12
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js5
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt205
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-entries.js3
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-expected.txt252
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-expected.txt2965
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-get-properties-expected.txt138
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-get-properties.js61
-rw-r--r--deps/v8/test/inspector/runtime/remote-object.js635
-rw-r--r--deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt14
-rw-r--r--deps/v8/test/inspector/runtime/runtime-restore-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/stable-object-id-expected.txt15
-rw-r--r--deps/v8/test/inspector/runtime/stable-object-id.js87
-rw-r--r--deps/v8/test/inspector/runtime/terminate-execution-expected.txt2
-rw-r--r--deps/v8/test/inspector/sessions/runtime-console-api-called-expected.txt8
-rw-r--r--deps/v8/test/inspector/sessions/runtime-evaluate-exception-expected.txt16
-rw-r--r--deps/v8/test/inspector/sessions/runtime-evaluate-exception.js4
-rw-r--r--deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt18
-rw-r--r--deps/v8/test/inspector/testcfg.py1
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-disable.js2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-start-stop.js2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-classes.js2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js2
-rw-r--r--deps/v8/test/inspector/type-profiler/type-profile.js2
-rw-r--r--deps/v8/test/intl/break-iterator/default-locale.js3
-rw-r--r--deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/collator/check-co-option.js33
-rw-r--r--deps/v8/test/intl/collator/check-kf-option.js36
-rw-r--r--deps/v8/test/intl/collator/check-kn-option.js29
-rw-r--r--deps/v8/test/intl/collator/constructor-order.js30
-rw-r--r--deps/v8/test/intl/collator/default-locale.js4
-rw-r--r--deps/v8/test/intl/collator/options.js4
-rw-r--r--deps/v8/test/intl/collator/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/date-format/check-ca-option.js51
-rw-r--r--deps/v8/test/intl/date-format/check-hc-option.js41
-rw-r--r--deps/v8/test/intl/date-format/check-nu-option.js59
-rw-r--r--deps/v8/test/intl/date-format/default-locale.js3
-rw-r--r--deps/v8/test/intl/date-format/property-override.js20
-rw-r--r--deps/v8/test/intl/date-format/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/default_locale.js17
-rw-r--r--deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js101
-rw-r--r--deps/v8/test/intl/general/invalid-locale.js15
-rw-r--r--deps/v8/test/intl/intl.status21
-rw-r--r--deps/v8/test/intl/list-format/constructor-order.js21
-rw-r--r--deps/v8/test/intl/list-format/constructor.js32
-rw-r--r--deps/v8/test/intl/list-format/format-en.js35
-rw-r--r--deps/v8/test/intl/list-format/format-to-parts.js16
-rw-r--r--deps/v8/test/intl/list-format/format.js9
-rw-r--r--deps/v8/test/intl/list-format/resolved-options.js47
-rw-r--r--deps/v8/test/intl/locale/locale-canonicalization.js2
-rw-r--r--deps/v8/test/intl/locale/locale-constructor.js8
-rw-r--r--deps/v8/test/intl/locale/property.js22
-rw-r--r--deps/v8/test/intl/number-format/check-nu-option.js59
-rw-r--r--deps/v8/test/intl/number-format/constructor-order.js42
-rw-r--r--deps/v8/test/intl/number-format/default-locale.js3
-rw-r--r--deps/v8/test/intl/number-format/wellformed-unsupported-locale.js32
-rw-r--r--deps/v8/test/intl/plural-rules/constructor-order.js33
-rw-r--r--deps/v8/test/intl/regexp-assert.js19
-rw-r--r--deps/v8/test/intl/regexp-prepare.js5
-rw-r--r--deps/v8/test/intl/regress-7481.js39
-rw-r--r--deps/v8/test/intl/regress-8432.js42
-rw-r--r--deps/v8/test/intl/regress-8469.js87
-rw-r--r--deps/v8/test/intl/regress-8525.js27
-rw-r--r--deps/v8/test/intl/regress-8657.js7
-rw-r--r--deps/v8/test/intl/regress-895942.js6
-rw-r--r--deps/v8/test/intl/regress-900013.js9
-rw-r--r--deps/v8/test/intl/regress-903566.js32
-rw-r--r--deps/v8/test/intl/regress-917151.js11
-rw-r--r--deps/v8/test/intl/regress-925216.js10
-rw-r--r--deps/v8/test/intl/relative-time-format/constructor-order.js21
-rw-r--r--deps/v8/test/intl/relative-time-format/constructor.js24
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts-en.js41
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options-nu.js97
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options.js18
-rw-r--r--deps/v8/test/intl/segmenter/check-lb-option.js29
-rw-r--r--deps/v8/test/intl/segmenter/constructor-order.js20
-rw-r--r--deps/v8/test/intl/segmenter/constructor.js48
-rw-r--r--deps/v8/test/intl/segmenter/resolved-options.js299
-rw-r--r--deps/v8/test/intl/segmenter/segment-grapheme-following.js38
-rw-r--r--deps/v8/test/intl/segmenter/segment-grapheme-iterable.js45
-rw-r--r--deps/v8/test/intl/segmenter/segment-grapheme-next.js40
-rw-r--r--deps/v8/test/intl/segmenter/segment-grapheme-preceding.js44
-rw-r--r--deps/v8/test/intl/segmenter/segment-grapheme.js29
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-breakType.js11
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-following.js9
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-ownPropertyDescriptor.js91
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-position.js11
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator-preceding.js9
-rw-r--r--deps/v8/test/intl/segmenter/segment-iterator.js12
-rw-r--r--deps/v8/test/intl/segmenter/segment-sentence-following.js38
-rw-r--r--deps/v8/test/intl/segmenter/segment-sentence-iterable.js45
-rw-r--r--deps/v8/test/intl/segmenter/segment-sentence-next.js40
-rw-r--r--deps/v8/test/intl/segmenter/segment-sentence-preceding.js44
-rw-r--r--deps/v8/test/intl/segmenter/segment-sentence.js29
-rw-r--r--deps/v8/test/intl/segmenter/segment-word-following.js38
-rw-r--r--deps/v8/test/intl/segmenter/segment-word-iterable.js45
-rw-r--r--deps/v8/test/intl/segmenter/segment-word-next.js40
-rw-r--r--deps/v8/test/intl/segmenter/segment-word-preceding.js44
-rw-r--r--deps/v8/test/intl/segmenter/segment-word.js29
-rw-r--r--deps/v8/test/intl/segmenter/segment.js32
-rw-r--r--deps/v8/test/intl/segmenter/subclassing.js17
-rw-r--r--deps/v8/test/intl/segmenter/supported-locale.js8
-rw-r--r--deps/v8/test/intl/testcfg.py5
-rw-r--r--deps/v8/test/js-perf-test/Array/from.js99
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce.js2
-rw-r--r--deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/indexof-includes-polymorphic.js41
-rw-r--r--deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/run.js23
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoubleHoley/run.js158
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoublePacked/run.js155
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js3
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiMap/run.js94
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiSet/run.js121
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoubleHoley/run.js159
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoublePacked/run.js158
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiMap/run.js93
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiSet/run.js120
-rw-r--r--deps/v8/test/js-perf-test/AsyncAwait/baseline-babel-es2017.js4
-rw-r--r--deps/v8/test/js-perf-test/AsyncAwait/baseline-naive-promises.js4
-rw-r--r--deps/v8/test/js-perf-test/AsyncAwait/native.js4
-rw-r--r--deps/v8/test/js-perf-test/Dates/run.js20
-rw-r--r--deps/v8/test/js-perf-test/Dates/toLocaleString.js20
-rw-r--r--deps/v8/test/js-perf-test/Inspector/debugger.js2
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json243
-rw-r--r--deps/v8/test/js-perf-test/Modules/run.js6
-rw-r--r--deps/v8/test/js-perf-test/Numbers/run.js1
-rw-r--r--deps/v8/test/js-perf-test/Numbers/toLocaleString.js14
-rw-r--r--deps/v8/test/js-perf-test/RegExp/base.js3
-rw-r--r--deps/v8/test/js-perf-test/StringIterators/string-iterator.js24
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-localeCompare.js19
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-startswith.js78
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/base.js26
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-bigint.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-float.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-int.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-sep-bigint.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-sep-float.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join-sep-int.js8
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/join.js38
-rw-r--r--deps/v8/test/js-perf-test/TypedArrays/sort.js23
-rw-r--r--deps/v8/test/js-perf-test/base.js3
-rw-r--r--deps/v8/test/memory/Memory.json4
-rw-r--r--deps/v8/test/message/empty.js (renamed from deps/v8/test/fuzzer/wasm_data_section/foo)0
-rw-r--r--deps/v8/test/message/empty.out (renamed from deps/v8/test/fuzzer/wasm_function_sigs_section/foo)0
-rw-r--r--deps/v8/test/message/fail/arrow-formal-parameters.out2
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest-2.out2
-rw-r--r--deps/v8/test/message/fail/arrow-param-after-rest.out2
-rw-r--r--deps/v8/test/message/fail/arrow-two-rest-params.out2
-rw-r--r--deps/v8/test/message/fail/call-async.js8
-rw-r--r--deps/v8/test/message/fail/call-async.out6
-rw-r--r--deps/v8/test/message/fail/call-await.js8
-rw-r--r--deps/v8/test/message/fail/call-await.out7
-rw-r--r--deps/v8/test/message/fail/call-let.js (renamed from deps/v8/test/mjsunit/regress/regress-408036.js)7
-rw-r--r--deps/v8/test/message/fail/call-let.out6
-rw-r--r--deps/v8/test/message/fail/call-static.js8
-rw-r--r--deps/v8/test/message/fail/call-static.out6
-rw-r--r--deps/v8/test/message/fail/call-yield.js8
-rw-r--r--deps/v8/test/message/fail/call-yield.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-source-positions.js12
-rw-r--r--deps/v8/test/message/fail/class-fields-private-source-positions.out5
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early-2.js (renamed from deps/v8/test/mjsunit/mjsunit_suppressions.js)13
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early-2.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early.js11
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-read.js12
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-read.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-write.js12
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-write.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-throw.out2
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-computed-property.out2
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-number-property.out2
-rw-r--r--deps/v8/test/message/fail/destructuring-undefined-string-property.out2
-rw-r--r--deps/v8/test/message/fail/directive.js7
-rw-r--r--deps/v8/test/message/fail/directive.out4
-rw-r--r--deps/v8/test/message/fail/get-iterator1.out4
-rw-r--r--deps/v8/test/message/fail/invalid-spread-2.out6
-rw-r--r--deps/v8/test/message/fail/isvar.js31
-rw-r--r--deps/v8/test/message/fail/isvar.out4
-rw-r--r--deps/v8/test/message/fail/list-format-style-narrow.js7
-rw-r--r--deps/v8/test/message/fail/list-format-style-narrow.out8
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.js9
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.out5
-rw-r--r--deps/v8/test/message/fail/modules-export-illformed-class.js7
-rw-r--r--deps/v8/test/message/fail/modules-export-illformed-class.out5
-rw-r--r--deps/v8/test/message/fail/object-rest-assignment-pattern.out2
-rw-r--r--deps/v8/test/message/fail/object-rest-binding-pattern.out2
-rw-r--r--deps/v8/test/message/fail/param-arrow-redeclaration-as-let.js7
-rw-r--r--deps/v8/test/message/fail/param-arrow-redeclaration-as-let.out5
-rw-r--r--deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.js7
-rw-r--r--deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.out5
-rw-r--r--deps/v8/test/message/fail/param-async-function-redeclaration-as-let.js7
-rw-r--r--deps/v8/test/message/fail/param-async-function-redeclaration-as-let.out5
-rw-r--r--deps/v8/test/message/fail/param-function-redeclaration-as-let.js7
-rw-r--r--deps/v8/test/message/fail/param-function-redeclaration-as-let.out5
-rw-r--r--deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.js10
-rw-r--r--deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.out4
-rw-r--r--deps/v8/test/message/fail/unparenthesized-exponentiation-expression.js7
-rw-r--r--deps/v8/test/message/fail/unparenthesized-exponentiation-expression.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell1.js8
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell2.js9
-rw-r--r--deps/v8/test/message/fail/weak-refs-makecell2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory1.js7
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory2.js7
-rw-r--r--deps/v8/test/message/fail/weak-refs-weakfactory2.out6
-rw-r--r--deps/v8/test/message/message.status14
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects-with-throw-empty.out2
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects.out4
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-resolves.out4
-rw-r--r--deps/v8/test/message/regress/fail/regress-8409.js5
-rw-r--r--deps/v8/test/message/regress/fail/regress-8409.out4
-rw-r--r--deps/v8/test/message/regress/fail/regress-900383.js8
-rw-r--r--deps/v8/test/message/regress/fail/regress-900383.out4
-rw-r--r--deps/v8/test/message/wasm-function-name-async.out4
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.out4
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.out4
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.out4
-rw-r--r--deps/v8/test/message/wasm-module-name-async.out4
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.out4
-rw-r--r--deps/v8/test/message/wasm-no-name-async.out4
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.out4
-rw-r--r--deps/v8/test/mjsunit/BUILD.gn3
-rw-r--r--deps/v8/test/mjsunit/array-from-large-set.js11
-rw-r--r--deps/v8/test/mjsunit/array-functions-prototype-misc.js14
-rw-r--r--deps/v8/test/mjsunit/array-indexing-receiver.js18
-rw-r--r--deps/v8/test/mjsunit/array-join-element-tostring-prototype-side-effects.js14
-rw-r--r--deps/v8/test/mjsunit/array-join-element-tostring-side-effects.js152
-rw-r--r--deps/v8/test/mjsunit/array-join-index-getter-side-effects.js108
-rw-r--r--deps/v8/test/mjsunit/array-join-invalid-string-length.js63
-rw-r--r--deps/v8/test/mjsunit/array-join-nesting.js16
-rw-r--r--deps/v8/test/mjsunit/array-join-nonarray-length-getter-side-effects.js29
-rw-r--r--deps/v8/test/mjsunit/array-join-separator-tostring-side-effects.js197
-rw-r--r--deps/v8/test/mjsunit/array-join.js14
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js12
-rw-r--r--deps/v8/test/mjsunit/array-sort.js21
-rw-r--r--deps/v8/test/mjsunit/array-tolocalestring.js72
-rw-r--r--deps/v8/test/mjsunit/arrow-with.js7
-rw-r--r--deps/v8/test/mjsunit/asm/regress-913822.js25
-rw-r--r--deps/v8/test/mjsunit/asm/regress-920076.js13
-rw-r--r--deps/v8/test/mjsunit/async-hooks/async-await-tree.js26
-rw-r--r--deps/v8/test/mjsunit/async-hooks/chained-promises.js8
-rw-r--r--deps/v8/test/mjsunit/async-hooks/execution-order.js48
-rw-r--r--deps/v8/test/mjsunit/async-hooks/promises-async-await.js4
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-4.js39
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-promise-all.js38
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces.js31
-rw-r--r--deps/v8/test/mjsunit/code-coverage-ad-hoc.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-noopt.js2
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js6
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js57
-rw-r--r--deps/v8/test/mjsunit/code-coverage-class-fields.js3
-rw-r--r--deps/v8/test/mjsunit/code-coverage-precise.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js177
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js119
-rw-r--r--deps/v8/test/mjsunit/compiler/array-every.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/array-find.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/array-findindex.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/array-some.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-get.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-neutered.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-inlined-smi.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js106
-rw-r--r--deps/v8/test/mjsunit/compiler/int64.js40
-rw-r--r--deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/number-max.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/number-min.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/number-modulus.js230
-rw-r--r--deps/v8/test/mjsunit/compiler/number-multiply.js59
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-assert.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js61
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-8380.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-902608.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555-2.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-905555.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-910838.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-913232.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-919754.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-number.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/strict-equal-receiver.js152
-rw-r--r--deps/v8/test/mjsunit/d8/d8-arguments.js7
-rw-r--r--deps/v8/test/mjsunit/date.js10
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js15
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-detached.js8
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-prototype-values.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-spread-large-holey.js17
-rw-r--r--deps/v8/test/mjsunit/es6/block-eval-var-over-let.js24
-rw-r--r--deps/v8/test/mjsunit/es6/block-sloppy-function.js7
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js154
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js8
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-assignment.js55
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring.js10
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-1.js23
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-10.js34
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-11.js35
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-2.js20
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-3.js22
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-4.js22
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-5.js22
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-6.js20
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-7.js22
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-8.js32
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-9.js30
-rw-r--r--deps/v8/test/mjsunit/es6/microtask-delivery.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-5929-1.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-7706.js34
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr895860.js14
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-1.js23
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-10.js34
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-11.js33
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-2.js21
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-3.js23
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-4.js23
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-5.js23
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-6.js21
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-7.js23
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-8.js31
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-9.js31
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator.js9
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-copywithin.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-every.js14
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-fill.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-filter.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-find.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-findindex.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-foreach.js10
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from-detached-typedarray.js23
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from-next-overridden.js29
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from-nonfunction-iterator.js11
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-indexing.js6
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-iteration.js6
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-map.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-neutered.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-reduce.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-reverse.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-slice.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-sort.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js34
-rw-r--r--deps/v8/test/mjsunit/es6/unscopables.js6
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes-receiver.js18
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes.js28
-rw-r--r--deps/v8/test/mjsunit/es8/async-arrow-default-function-await.js5
-rw-r--r--deps/v8/test/mjsunit/es8/async-arrow-lexical-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-arrow-lexical-new.target.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-arrow-lexical-super.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-arrow-lexical-this.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-await-basic.js30
-rw-r--r--deps/v8/test/mjsunit/es8/async-await-no-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-await-species.js2
-rw-r--r--deps/v8/test/mjsunit/es8/async-destructuring.js6
-rw-r--r--deps/v8/test/mjsunit/es8/async-function-stacktrace.js36
-rw-r--r--deps/v8/test/mjsunit/es8/async-function-try-finally.js4
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-904167.js14
-rw-r--r--deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-basic.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/misc.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regressions.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions-arrow-param-scope.js95
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions-control.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js305
-rw-r--r--deps/v8/test/mjsunit/harmony/for-await-of.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/generators.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/hashbang-eval.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-1.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-10.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-11.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-12.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-13.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-14.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-16.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-17.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-2.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-3.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-5.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-6.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-7.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-8.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-9.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-13.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/object-fromentries.js439
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-static.js356
-rw-r--r--deps/v8/test/mjsunit/harmony/public-static-class-fields.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4658.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4755.js45
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4904.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-546967.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-897436.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-912504.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js214
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js56
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js35
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js33
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js46
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js40
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js49
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js41
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js40
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js41
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js71
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js49
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js44
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js45
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js45
-rw-r--r--deps/v8/test/mjsunit/json.js4
-rw-r--r--deps/v8/test/mjsunit/messages.js12
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js15
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status137
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as1.js10
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as2.js19
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as3.js15
-rw-r--r--deps/v8/test/mjsunit/modules-imports8.js11
-rw-r--r--deps/v8/test/mjsunit/modules-namespace1.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-8.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-9.js7
-rw-r--r--deps/v8/test/mjsunit/neuter-twice.js4
-rw-r--r--deps/v8/test/mjsunit/object-seal.js4
-rw-r--r--deps/v8/test/mjsunit/opt-elements-kind.js14
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js14
-rw-r--r--deps/v8/test/mjsunit/parallel-compile-tasks.js70
-rw-r--r--deps/v8/test/mjsunit/regexp-override-exec.js19
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-match-all.js12
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-match.js10
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-replace.js10
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-search.js10
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-split.js10
-rw-r--r--deps/v8/test/mjsunit/regexp.js16
-rw-r--r--deps/v8/test/mjsunit/regress-906893.js21
-rw-r--r--deps/v8/test/mjsunit/regress-918763.js14
-rw-r--r--deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js7
-rw-r--r--deps/v8/test/mjsunit/regress-v8-8445-2.js27
-rw-r--r--deps/v8/test/mjsunit/regress-v8-8445.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1199637.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2989.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-334.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-336820.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353004.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4964.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5405.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5691.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6711.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-682349.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-707410.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-740694.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-748069.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7773.js71
-rw-r--r--deps/v8/test/mjsunit/regress/regress-778668.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-797581.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-800651.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-813440.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8241.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8377.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8384.js64
-rw-r--r--deps/v8/test/mjsunit/regress/regress-840106.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-852765.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8607.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8630.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8659.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8708.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-896326.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-897512.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-897815.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-898812.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-898936.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-899115.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-899133.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-899474.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-899537.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-900085.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-900585.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-900786.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-901633.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-901798.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-902552.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-902810.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-903527.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-903697.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-903874.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-904255.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-904275.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-904417.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-904707.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-905587.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-905907.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-906406.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-907479.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-907575.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-907669.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-908231.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-908250.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-908975.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-913844.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-917215.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-917755.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-917988.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-919340.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-919710.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-921382.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-923723.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-178790.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-546968.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-691323.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772056.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-800032.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-806388.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-860788.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-867776.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-895199.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-896181.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-896700.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-897098.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-897404.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-897406.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-898785.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-898974.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-899464.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-899535.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-900674.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-902395.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-902610.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-902672.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-905457.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906043.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906220.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906870.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-908309.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-909614.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-911416.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-913212.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-913296.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-915783.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-916288.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-917076.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-917980.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-920184.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-923264.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-923265.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-923705.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-926651.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-930580.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-directive.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-in-literal.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-preparse-inner-arrow-duplicate-parameter.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8357.js31
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-651961.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-680938.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-688876.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-699485.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-702460.js50
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-710844.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-734108.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-736584.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-739768.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-763439.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7914.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803788.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808980.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-816226.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-817380.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-825087a.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-825087b.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834619.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834693.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8505.js204
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8533.js85
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-894307.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-894374.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-898932.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-905815.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-910824.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-913804.js17
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-916869.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917412.js34
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588b.js55
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918149.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918284.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-918917.js22
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919308.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919533.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922432.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922670.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922933.js52
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924905.js18
-rw-r--r--deps/v8/test/mjsunit/samevalue.js2
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js4
-rw-r--r--deps/v8/test/mjsunit/smi-ops-inlined.js2
-rw-r--r--deps/v8/test/mjsunit/spread-large-array.js13
-rw-r--r--deps/v8/test/mjsunit/spread-large-map.js17
-rw-r--r--deps/v8/test/mjsunit/spread-large-set.js21
-rw-r--r--deps/v8/test/mjsunit/spread-large-string.js13
-rw-r--r--deps/v8/test/mjsunit/stack-traces-class-fields.js16
-rw-r--r--deps/v8/test/mjsunit/test-async.js4
-rw-r--r--deps/v8/test/mjsunit/testcfg.py25
-rw-r--r--deps/v8/test/mjsunit/try-catch-default-destructuring.js24
-rw-r--r--deps/v8/test/mjsunit/type-profile/regress-707223.js2
-rw-r--r--deps/v8/test/mjsunit/unicodelctest-no-optimization.js1
-rw-r--r--deps/v8/test/mjsunit/unicodelctest.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/OWNERS2
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js43
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals.js313
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref.js70
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js295
-rw-r--r--deps/v8/test/mjsunit/wasm/async-compile.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js554
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js596
-rw-r--r--deps/v8/test/mjsunit/wasm/bigint.js207
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory.js375
-rw-r--r--deps/v8/test/mjsunit/wasm/code-space-exhaustion.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js101
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js121
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js51
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-utils.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js292
-rw-r--r--deps/v8/test/mjsunit/wasm/export-mutable-global.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/futex.js310
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-detaching.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js52
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js60
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js120
-rw-r--r--deps/v8/test/mjsunit/wasm/huge-memory.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js26
-rw-r--r--deps/v8/test/mjsunit/wasm/import-mutable-global.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js216
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js53
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js91
-rw-r--r--deps/v8/test/mjsunit/wasm/jsapi-harness.js139
-rw-r--r--deps/v8/test/mjsunit/wasm/large-offset.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-rotation.js77
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-instance-validation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/mutable-globals.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-compile.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-trap-location.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy.js275
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js53
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js58
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js257
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js173
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc12
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.status6
-rw-r--r--deps/v8/test/test262/BUILD.gn3
-rw-r--r--deps/v8/test/test262/detachArrayBuffer.js2
-rw-r--r--deps/v8/test/test262/harness-adapt-donotevaluate.js12
-rw-r--r--deps/v8/test/test262/test262.status1221
-rw-r--r--deps/v8/test/test262/testcfg.py72
-rw-r--r--deps/v8/test/torque/test-torque.tq212
-rw-r--r--deps/v8/test/unittests/BUILD.gn31
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc4
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc2
-rw-r--r--deps/v8/test/unittests/asmjs/switch-logic-unittest.cc89
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc26
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc22
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc22
-rw-r--r--deps/v8/test/unittests/background-compile-task-unittest.cc (renamed from deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc)180
-rw-r--r--deps/v8/test/unittests/base/atomic-utils-unittest.cc18
-rw-r--r--deps/v8/test/unittests/base/ieee754-unittest.cc9
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc3
-rw-r--r--deps/v8/test/unittests/base/platform/condition-variable-unittest.cc30
-rw-r--r--deps/v8/test/unittests/base/platform/mutex-unittest.cc8
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc31
-rw-r--r--deps/v8/test/unittests/cancelable-tasks-unittest.cc330
-rw-r--r--deps/v8/test/unittests/char-predicates-unittest.cc217
-rw-r--r--deps/v8/test/unittests/code-stub-assembler-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc47
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc475
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc (renamed from deps/v8/test/unittests/compiler/instruction-selector-unittest.cc)35
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h (renamed from deps/v8/test/unittests/compiler/instruction-selector-unittest.h)6
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc (renamed from deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc)87
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h (renamed from deps/v8/test/unittests/compiler/instruction-sequence-unittest.h)17
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-unittest.cc (renamed from deps/v8/test/unittests/compiler/instruction-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.cc38
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc13
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h7
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/live-range-builder.h78
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc22
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc11
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h10
-rw-r--r--deps/v8/test/unittests/compiler/persistent-unittest.cc24
-rw-r--r--deps/v8/test/unittests/compiler/ppc/OWNERS3
-rw-r--r--deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc207
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc60
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc24
-rw-r--r--deps/v8/test/unittests/compiler/s390/OWNERS3
-rw-r--r--deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc34
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc108
-rw-r--r--deps/v8/test/unittests/compiler/zone-stats-unittest.cc2
-rw-r--r--deps/v8/test/unittests/conversions-unittest.cc76
-rw-r--r--deps/v8/test/unittests/detachable-vector-unittest.cc60
-rw-r--r--deps/v8/test/unittests/heap/barrier-unittest.cc22
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc83
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc26
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc36
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc95
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc21
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc299
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc11
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc7
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc43
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc7
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc16
-rw-r--r--deps/v8/test/unittests/microtask-queue-unittest.cc187
-rw-r--r--deps/v8/test/unittests/objects/microtask-queue-unittest.cc55
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc16
-rw-r--r--deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc20
-rw-r--r--deps/v8/test/unittests/test-helpers.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.h1
-rw-r--r--deps/v8/test/unittests/test-utils.cc105
-rw-r--r--deps/v8/test/unittests/test-utils.h295
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc21
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc3
-rw-r--r--deps/v8/test/unittests/unittests.status7
-rw-r--r--deps/v8/test/unittests/utils-unittest.cc68
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc218
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS2
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc337
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc1939
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc82
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc (renamed from deps/v8/test/unittests/wasm/trap-handler-unittest.cc)4
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc93
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc478
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc137
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc71
-rw-r--r--deps/v8/test/wasm-js/BUILD.gn17
-rw-r--r--deps/v8/test/wasm-js/LICENSE.testharness30
-rw-r--r--deps/v8/test/wasm-js/testcfg.py74
-rw-r--r--deps/v8/test/wasm-js/testharness-after.js16
-rw-r--r--deps/v8/test/wasm-js/testharness.js148
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status28
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status7
-rw-r--r--deps/v8/test/webkit/array-holes-expected.txt2
-rw-r--r--deps/v8/test/webkit/array-holes.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-semicolon-expected.txt3
-rw-r--r--deps/v8/test/webkit/class-syntax-semicolon.js3
-rw-r--r--deps/v8/test/webkit/fast/js/kde/delete-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/delete.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/exceptions.js2
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt8
-rw-r--r--deps/v8/test/webkit/nested-functions.js2
-rw-r--r--deps/v8/test/webkit/propertyIsEnumerable-expected.txt2
-rw-r--r--deps/v8/test/webkit/propertyIsEnumerable.js2
-rw-r--r--deps/v8/test/webkit/resources/JSON-stringify.js30
-rw-r--r--deps/v8/test/webkit/resources/json2-es5-compat.js12
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt29
-rw-r--r--deps/v8/test/webkit/webkit.status19
-rwxr-xr-xdeps/v8/third_party/binutils/download.py15
-rw-r--r--deps/v8/third_party/googletest/BUILD.gn12
-rw-r--r--deps/v8/third_party/googletest/README.chromium2
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/CodeGenerator.py6
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py10
-rw-r--r--deps/v8/third_party/inspector_protocol/README.md18
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/check_protocol_compatibility.py5
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py19
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/convert_protocol_to_json.py22
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Allocator_h.template7
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Collections_h.template0
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template16
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template1
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Maybe_h.template9
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template22
-rw-r--r--deps/v8/third_party/inspector_protocol/pdl.py21
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq187
-rw-r--r--deps/v8/tools/BUILD.gn1
-rw-r--r--deps/v8/tools/PRESUBMIT.py (renamed from deps/v8/tools/unittests/PRESUBMIT.py)3
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations7
-rw-r--r--deps/v8/tools/callstats.html14
-rwxr-xr-xdeps/v8/tools/callstats.py2
-rwxr-xr-xdeps/v8/tools/check-unused-symbols.sh24
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock_archs.js17
-rwxr-xr-xdeps/v8/tools/deprecation_stats.py105
-rwxr-xr-xdeps/v8/tools/dev/gm.py59
-rw-r--r--deps/v8/tools/gdbinit33
-rwxr-xr-xdeps/v8/tools/gen-keywords-gen-h.py252
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py26
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py16
-rw-r--r--deps/v8/tools/heap-stats/categories.js28
-rw-r--r--deps/v8/tools/heap-stats/index.html1
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.html8
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js5
-rw-r--r--deps/v8/tools/ic-explorer.html23
-rw-r--r--deps/v8/tools/ic-processor.js10
-rwxr-xr-xdeps/v8/tools/js2c.py239
-rwxr-xr-xdeps/v8/tools/jsfunfuzz/fuzz-harness.sh19
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha12
-rw-r--r--deps/v8/tools/jsmin.py298
-rwxr-xr-xdeps/v8/tools/linux-tick-processor4
-rwxr-xr-xdeps/v8/tools/locs.py376
-rwxr-xr-xdeps/v8/tools/map-processor2
-rw-r--r--deps/v8/tools/map-processor.html432
-rw-r--r--deps/v8/tools/map-processor.js26
-rw-r--r--deps/v8/tools/mb/docs/design_spec.md2
-rw-r--r--deps/v8/tools/mb/docs/user_guide.md4
-rw-r--r--deps/v8/tools/node/README.md12
-rwxr-xr-xdeps/v8/tools/node/backport_node.py126
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py2
-rwxr-xr-xdeps/v8/tools/node/test_backport_node.py70
-rwxr-xr-xdeps/v8/tools/node/update_node.py3
-rwxr-xr-xdeps/v8/tools/parse-processor2
-rw-r--r--deps/v8/tools/parse-processor.html27
-rwxr-xr-xdeps/v8/tools/plot-timer-events2
-rw-r--r--deps/v8/tools/profview/profview.js15
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py5
-rw-r--r--deps/v8/tools/release/common_includes.py2
-rwxr-xr-xdeps/v8/tools/release/create_release.py11
-rwxr-xr-xdeps/v8/tools/release/filter_build_files.py1
-rw-r--r--deps/v8/tools/release/git_recipes.py8
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py9
-rwxr-xr-xdeps/v8/tools/run-clang-tidy.py3
-rw-r--r--deps/v8/tools/run-tests.py.vpython32
-rwxr-xr-xdeps/v8/tools/run_perf.py60
-rw-r--r--deps/v8/tools/sanitizers/tsan_suppressions.txt4
-rw-r--r--deps/v8/tools/snapshot/asm_to_inline_asm.py31
-rw-r--r--deps/v8/tools/testrunner/base_runner.py80
-rw-r--r--deps/v8/tools/testrunner/local/android.py21
-rw-r--r--deps/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status5
-rw-r--r--deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py23
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py3
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py119
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py128
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py36
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py9
-rw-r--r--deps/v8/tools/testrunner/outproc/test262.py35
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py11
-rw-r--r--deps/v8/tools/testrunner/test_config.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py11
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py25
-rw-r--r--deps/v8/tools/tick-processor.html6
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py128
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim8
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json4
-rwxr-xr-xdeps/v8/tools/try_perf.py54
-rw-r--r--deps/v8/tools/turbolizer/README.md7
-rwxr-xr-xdeps/v8/tools/turbolizer/deploy.sh18
-rw-r--r--deps/v8/tools/turbolizer/expand-all.jpgbin2839 -> 0 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/hide-selected-icon.png (renamed from deps/v8/tools/turbolizer/hide-selected.png)bin3681 -> 3681 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/hide-unselected-icon.png (renamed from deps/v8/tools/turbolizer/hide-unselected.png)bin3701 -> 3701 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/layout-icon.png (renamed from deps/v8/tools/turbolizer/layout-icon.png)bin4577 -> 4577 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/show-all-icon.pngbin0 -> 4915 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/show-control-icon.pngbin0 -> 5749 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/toggle-hide-dead-icon.png (renamed from deps/v8/tools/turbolizer/live.png)bin3730 -> 3730 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/toggle-types-icon.png (renamed from deps/v8/tools/turbolizer/types.png)bin753 -> 753 bytes
-rw-r--r--deps/v8/tools/turbolizer/img/zoom-selection-icon.png (renamed from deps/v8/tools/turbolizer/search.png)bin3751 -> 3751 bytes
-rw-r--r--deps/v8/tools/turbolizer/index.html103
-rw-r--r--deps/v8/tools/turbolizer/info-view.html119
-rw-r--r--deps/v8/tools/turbolizer/package-lock.json2390
-rw-r--r--deps/v8/tools/turbolizer/package.json25
-rw-r--r--deps/v8/tools/turbolizer/rollup.config.js24
-rw-r--r--deps/v8/tools/turbolizer/src/code-view.ts105
-rw-r--r--deps/v8/tools/turbolizer/src/disassembly-view.ts364
-rw-r--r--deps/v8/tools/turbolizer/src/edge.ts70
-rw-r--r--deps/v8/tools/turbolizer/src/graph-layout.ts342
-rw-r--r--deps/v8/tools/turbolizer/src/graph-view.ts818
-rw-r--r--deps/v8/tools/turbolizer/src/graph.ts107
-rw-r--r--deps/v8/tools/turbolizer/src/graphmultiview.ts110
-rw-r--r--deps/v8/tools/turbolizer/src/info-view.ts17
-rw-r--r--deps/v8/tools/turbolizer/src/lang-disassembly.ts14
-rw-r--r--deps/v8/tools/turbolizer/src/node-label.ts86
-rw-r--r--deps/v8/tools/turbolizer/src/node.ts166
-rw-r--r--deps/v8/tools/turbolizer/src/resizer.ts199
-rw-r--r--deps/v8/tools/turbolizer/src/schedule-view.ts100
-rw-r--r--deps/v8/tools/turbolizer/src/selection-broker.ts44
-rw-r--r--deps/v8/tools/turbolizer/src/selection-handler.ts25
-rw-r--r--deps/v8/tools/turbolizer/src/selection.ts18
-rw-r--r--deps/v8/tools/turbolizer/src/sequence-view.ts93
-rw-r--r--deps/v8/tools/turbolizer/src/source-resolver.ts251
-rw-r--r--deps/v8/tools/turbolizer/src/tabs.ts114
-rw-r--r--deps/v8/tools/turbolizer/src/text-view.ts133
-rw-r--r--deps/v8/tools/turbolizer/src/turbo-visualizer.ts305
-rw-r--r--deps/v8/tools/turbolizer/src/util.ts71
-rw-r--r--deps/v8/tools/turbolizer/src/view.ts39
-rw-r--r--deps/v8/tools/turbolizer/tabs.css55
-rw-r--r--deps/v8/tools/turbolizer/test/source-resolver-test.ts10
-rw-r--r--deps/v8/tools/turbolizer/tsconfig.json67
-rw-r--r--deps/v8/tools/turbolizer/tsconfig.test.json6
-rw-r--r--deps/v8/tools/turbolizer/tslint.json45
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css652
-rw-r--r--deps/v8/tools/ubsan/blacklist.txt6
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py27
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py175
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json6
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json6
-rwxr-xr-xdeps/v8/tools/update-object-macros-undef.py46
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py28
-rw-r--r--deps/v8/tools/v8heapconst.py654
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh8
-rw-r--r--deps/v8/tools/wasm/wasm-import-profiler-end.js6
-rw-r--r--deps/v8/tools/wasm/wasm-import-profiler.js131
-rw-r--r--deps/v8/tools/whitespace.txt4
2732 files changed, 168404 insertions, 117132 deletions
diff --git a/deps/v8/.clang-tidy b/deps/v8/.clang-tidy
index 31d7ddc750..80e243c5a0 100644
--- a/deps/v8/.clang-tidy
+++ b/deps/v8/.clang-tidy
@@ -4,13 +4,13 @@
modernize-redundant-void-arg,
modernize-replace-random-shuffle,
modernize-shrink-to-fit,
- modernize-use-auto,
+ # modernize-use-auto,
modernize-use-bool-literals,
modernize-use-equals-default,
- modernize-use-equals-delete,
+ # modernize-use-equals-delete,
modernize-use-nullptr,
modernize-use-override,
- google-build-explicit-make-pair,
+ # google-build-explicit-make-pair,
google-explicit-constructor,
google-readability-casting'
WarningsAsErrors: ''
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 7f09c89e36..6cf6ab4e91 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -32,6 +32,7 @@
.project
.pydevproject
.settings
+.torquelint-cache
.vscode
/_*
/build
@@ -51,7 +52,7 @@
/test/mozilla/data
/test/test262/data
/test/test262/harness
-/test/wasm-js
+/test/wasm-js/data
/test/wasm-spec-tests/tests
/test/wasm-spec-tests/tests.tar.gz
/third_party/*
diff --git a/deps/v8/.vpython b/deps/v8/.vpython
index 398cef1ad5..f8d3b7278a 100644
--- a/deps/v8/.vpython
+++ b/deps/v8/.vpython
@@ -43,3 +43,26 @@ wheel: <
platform: "win_amd64"
>
>
+
+# Used by:
+# tools/unittests/run_perf_test.py
+wheel: <
+ name: "infra/python/wheels/coverage/${vpython_platform}"
+ version: "version:4.3.4"
+>
+wheel: <
+ name: "infra/python/wheels/six-py2_py3"
+ version: "version:1.10.0"
+>
+wheel: <
+ name: "infra/python/wheels/pbr-py2_py3"
+ version: "version:3.0.0"
+>
+wheel: <
+ name: "infra/python/wheels/funcsigs-py2_py3"
+ version: "version:1.0.2"
+>
+wheel: <
+ name: "infra/python/wheels/mock-py2_py3"
+ version: "version:2.0.0"
+>
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 50d5d1acef..7adf005528 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -15,6 +15,7 @@ NVIDIA Corporation <*@nvidia.com>
BlackBerry Limited <*@blackberry.com>
Opera Software ASA <*@opera.com>
Intel Corporation <*@intel.com>
+Microsoft <*@microsoft.com>
MIPS Technologies, Inc. <*@mips.com>
Imagination Technologies, LLC <*@imgtec.com>
Wave Computing, Inc. <*@wavecomp.com>
@@ -39,6 +40,7 @@ Cloudflare, Inc. <*@cloudflare.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
Akinori MUSHA <knu@FreeBSD.org>
+Alessandro Pignotti <alessandro@leaningtech.com>
Alex Kodat <akodat@rocketsoftware.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
@@ -50,7 +52,8 @@ Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <anna@addaleax.net>
Bangfu Tao <bangfu.tao@samsung.com>
-Ben Coe <bencoe@gmail.com>
+Daniel Shelton <d1.shelton@samsung.com>
+Ben Coe <ben@npmjs.com>
Ben Newman <ben@meteor.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
@@ -65,6 +68,7 @@ Colin Ihrig <cjihrig@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
+Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Dominic Farolini <domfarolino@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
@@ -109,10 +113,11 @@ Maciej Małecki <me@mmalecki.com>
Marcin Cieślak <saper@marcincieslak.com>
Marcin Wiącek <marcin@mwiacek.com>
Mateusz Czeladka <mateusz.szczap@gmail.com>
-Matheus Marchini <matheus@sthima.com.br>
+Matheus Marchini <mat@mmarchini.me>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
+Maxim Mazurok <maxim@mazurok.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
@@ -143,7 +148,6 @@ Rick Waldron <waldron.rick@gmail.com>
Rob Wu <rob@robwu.nl>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
-Ruben Bridgewater <ruben@bridgewater.de>
Ryan Dahl <ry@tinyclouds.org>
Sakthipriyan Vairamani (thefourtheye) <thechargingvolcano@gmail.com>
Sander Mathijs van Veen <sander@leaningtech.com>
@@ -162,6 +166,7 @@ Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
+Wenlu Wang <kingwenlu@gmail.com>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Xiaoyin Liu <xiaoyin.l@outlook.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 10e1a28b0a..16e0b60ca7 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -18,6 +18,15 @@ if (is_android) {
import("gni/v8.gni")
import("snapshot_toolchain.gni")
+# Specifies if the target build is a simulator build. Comparing target cpu
+# with v8 target cpu to not affect simulator builds for making cross-compile
+# snapshots.
+is_target_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) ||
+ (current_cpu != v8_current_cpu && v8_multi_arch_build)
+
+# For faster Windows builds. See https://crbug.com/v8/8475.
+emit_builtins_as_inline_asm = is_win && is_clang
+
declare_args() {
# Print to stdout on Android.
v8_android_log_stdout = false
@@ -32,6 +41,11 @@ declare_args() {
# Sets -DV8_ENABLE_FUTURE.
v8_enable_future = false
+ # Lite mode disables a number of performance optimizations to reduce memory
+ # at the cost of performance.
+ # Sets --DV8_LITE_MODE.
+ v8_enable_lite_mode = false
+
# Sets -DVERIFY_HEAP.
v8_enable_verify_heap = ""
@@ -72,15 +86,17 @@ declare_args() {
v8_enable_fast_mksnapshot = false
# Enable embedded builtins.
- # TODO(jgruber,v8:6666): Support ia32 and maybe MSVC.
- v8_enable_embedded_builtins = v8_use_snapshot && v8_current_cpu != "x86" &&
- !is_aix && (!is_win || is_clang)
+ v8_enable_embedded_builtins = true
+
+ # Enable code comments for builtins in the snapshot (impacts performance).
+ v8_enable_snapshot_code_comments = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
# Enable pointer compression (sets -dV8_COMPRESS_POINTERS).
v8_enable_pointer_compression = false
+ v8_enable_31bit_smis_on_64bit_arch = false
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
@@ -107,10 +123,6 @@ declare_args() {
# Enables various testing features.
v8_enable_test_features = ""
- # Build the snapshot with unwinding information for perf.
- # Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
- v8_perf_prof_unwinding_info = false
-
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@@ -140,12 +152,6 @@ declare_args() {
# This default is used by cctests. Projects using V8 will want to override.
v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
- # Like v8_extra_library_files but for experimental features.
- #
- # This default is used by cctests. Projects using V8 will want to override.
- v8_experimental_extra_library_files =
- [ "//test/cctest/test-experimental-extra.js" ]
-
v8_enable_gdbjit =
((v8_current_cpu == "x86" || v8_current_cpu == "x64") &&
(is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux)
@@ -155,7 +161,12 @@ declare_args() {
v8_check_microtasks_scopes_consistency = ""
# Enable mitigations for executing untrusted code.
- v8_untrusted_code_mitigations = true
+ # Disabled by default on ia32 due to conflicting requirements with embedded
+ # builtins. Enabled by default on Android since it doesn't support
+ # site-isolation in Chrome and on simulator builds which test code generation
+ # on these platforms.
+ v8_untrusted_code_mitigations =
+ v8_current_cpu != "x86" && (is_android || is_target_simulator)
# Enable minor mark compact.
v8_enable_minor_mc = true
@@ -201,17 +212,13 @@ if (v8_check_microtasks_scopes_consistency == "") {
v8_enable_debugging_features || dcheck_always_on
}
-assert(!v8_enable_embedded_builtins || v8_use_snapshot,
- "Embedded builtins only work with snapshots")
-assert(
- v8_current_cpu != "x86" || !v8_enable_embedded_builtins ||
- !v8_untrusted_code_mitigations,
- "Embedded builtins on ia32 and untrusted code mitigations are incompatible")
+assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
+ "Untrusted code mitigations are unsupported on ia32")
-# Specifies if the target build is a simulator build. Comparing target cpu
-# with v8 target cpu to not affect simulator builds for making cross-compile
-# snapshots.
-is_target_simulator = target_cpu != v8_target_cpu
+assert(!v8_enable_lite_mode || v8_enable_embedded_builtins,
+ "Lite mode requires embedded builtins")
+assert(!v8_enable_lite_mode || v8_use_snapshot,
+ "Lite mode requires a snapshot build")
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -227,10 +234,10 @@ config("internal_config") {
"$target_gen_dir",
]
- defines = []
+ configs = [ "//build/config/compiler:wexit_time_destructors" ]
if (is_component_build) {
- defines += [ "BUILDING_V8_SHARED" ]
+ defines = [ "BUILDING_V8_SHARED" ]
}
}
@@ -317,6 +324,13 @@ config("features") {
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
+ if (v8_enable_lite_mode) {
+ defines += [ "V8_LITE_MODE" ]
+
+ # TODO(v8:7777): Remove the define once the --jitless runtime flag does
+ # everything we need.
+ defines += [ "V8_JITLESS_MODE" ]
+ }
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
@@ -329,6 +343,9 @@ config("features") {
if (v8_enable_pointer_compression) {
defines += [ "V8_COMPRESS_POINTERS" ]
}
+ if (v8_enable_31bit_smis_on_64bit_arch) {
+ defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
+ }
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
@@ -354,7 +371,7 @@ config("features") {
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
- if (v8_interpreted_regexp) {
+ if (v8_interpreted_regexp || v8_enable_lite_mode) {
defines += [ "V8_INTERPRETED_REGEXP" ]
}
if (v8_deprecation_warnings) {
@@ -371,9 +388,6 @@ config("features") {
}
if (v8_use_snapshot) {
defines += [ "V8_USE_SNAPSHOT" ]
- if (v8_perf_prof_unwinding_info) {
- defines += [ "V8_USE_SNAPSHOT_WITH_UNWINDING_INFO" ]
- }
}
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
@@ -385,10 +399,7 @@ config("features") {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
if (v8_enable_embedded_builtins) {
- defines += [
- "V8_EMBEDDED_BUILTINS",
- "V8_EMBEDDED_BYTECODE_HANDLERS",
- ]
+ defines += [ "V8_EMBEDDED_BUILTINS" ]
}
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
@@ -667,44 +678,35 @@ config("v8_gcov_coverage_ldflags") {
# Actions
#
-action("js2c") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "tools/js2c.py"
-
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [
- "tools/jsmin.py",
- ]
-
- # NOSORT
- sources = [
- "src/js/macros.py",
- "src/messages.h",
- "src/js/prologue.js",
- "src/js/array.js",
- "src/js/typedarray.js",
- ]
-
- outputs = [
- "$target_gen_dir/libraries.cc",
- ]
-
- if (v8_enable_i18n_support) {
- sources += [ "src/js/intl.js" ]
+# Only for Windows clang builds. Converts the embedded.S file produced by
+# mksnapshot into an embedded.cc file with corresponding inline assembly.
+template("asm_to_inline_asm") {
+ name = target_name
+ if (name == "default") {
+ suffix = ""
+ } else {
+ suffix = "_$name"
}
- args = [
- rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
- "CORE",
- ] + rebase_path(sources, root_build_dir)
+ action("asm_to_inline_asm_" + name) {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
- if (v8_use_external_startup_data) {
- outputs += [ "$target_gen_dir/libraries.bin" ]
+ assert(emit_builtins_as_inline_asm)
+
+ script = "tools/snapshot/asm_to_inline_asm.py"
+ deps = [
+ ":run_mksnapshot_" + name,
+ ]
+ sources = [
+ "$target_gen_dir/embedded${suffix}.S",
+ ]
+ outputs = [
+ "$target_gen_dir/embedded${suffix}.cc",
+ ]
+ args = invoker.args
args += [
- "--startup_blob",
- rebase_path("$target_gen_dir/libraries.bin", root_build_dir),
+ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir),
+ rebase_path("$target_gen_dir/embedded${suffix}.cc", root_build_dir),
]
}
}
@@ -714,12 +716,6 @@ action("js2c_extras") {
script = "tools/js2c.py"
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [
- "tools/jsmin.py",
- ]
-
sources = v8_extra_library_files
outputs = [
@@ -740,58 +736,6 @@ action("js2c_extras") {
}
}
-action("js2c_experimental_extras") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "tools/js2c.py"
-
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [
- "tools/jsmin.py",
- ]
-
- sources = v8_experimental_extra_library_files
-
- outputs = [
- "$target_gen_dir/experimental-extras-libraries.cc",
- ]
-
- args = [
- rebase_path("$target_gen_dir/experimental-extras-libraries.cc",
- root_build_dir),
- "EXPERIMENTAL_EXTRAS",
- ] + rebase_path(sources, root_build_dir)
-
- if (v8_use_external_startup_data) {
- outputs += [ "$target_gen_dir/libraries_experimental_extras.bin" ]
- args += [
- "--startup_blob",
- rebase_path("$target_gen_dir/libraries_experimental_extras.bin",
- root_build_dir),
- ]
- }
-}
-
-action("d8_js2c") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "tools/js2c.py"
-
- # NOSORT
- inputs = [
- "src/d8.js",
- "src/js/macros.py",
- ]
-
- outputs = [
- "$target_gen_dir/d8-js.cc",
- ]
-
- args = rebase_path(outputs, root_build_dir) + [ "D8" ] +
- rebase_path(inputs, root_build_dir)
-}
-
if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
@@ -820,16 +764,12 @@ if (v8_use_external_startup_data) {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":js2c",
- ":js2c_experimental_extras",
":js2c_extras",
]
# NOSORT
sources = [
- "$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_extras.bin",
- "$target_gen_dir/libraries_experimental_extras.bin",
]
outputs = [
@@ -862,18 +802,29 @@ action("postmortem-metadata") {
"src/objects-inl.h",
"src/objects/allocation-site-inl.h",
"src/objects/allocation-site.h",
+ "src/objects/cell-inl.h",
+ "src/objects/cell.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/data-handler.h",
"src/objects/data-handler-inl.h",
+ "src/objects/feedback-cell.h",
+ "src/objects/feedback-cell-inl.h",
"src/objects/fixed-array-inl.h",
"src/objects/fixed-array.h",
+ "src/objects/heap-number-inl.h",
+ "src/objects/heap-number.h",
+ "src/objects/heap-object-inl.h",
+ "src/objects/heap-object.h",
+ "src/objects/instance-type.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-array-buffer-inl.h",
"src/objects/js-array-buffer.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.h",
+ "src/objects/js-promise-inl.h",
+ "src/objects/js-promise.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/js-regexp-string-iterator-inl.h",
@@ -882,6 +833,8 @@ action("postmortem-metadata") {
"src/objects/map-inl.h",
"src/objects/name.h",
"src/objects/name-inl.h",
+ "src/objects/oddball-inl.h",
+ "src/objects/oddball.h",
"src/objects/scope-info.h",
"src/objects/script.h",
"src/objects/script-inl.h",
@@ -889,6 +842,8 @@ action("postmortem-metadata") {
"src/objects/shared-function-info-inl.h",
"src/objects/string.h",
"src/objects/string-inl.h",
+ "src/objects/struct.h",
+ "src/objects/struct-inl.h",
]
outputs = [
@@ -901,24 +856,41 @@ action("postmortem-metadata") {
torque_files = [
"src/builtins/base.tq",
+ "src/builtins/frames.tq",
+ "src/builtins/arguments.tq",
"src/builtins/array.tq",
"src/builtins/array-copywithin.tq",
+ "src/builtins/array-filter.tq",
"src/builtins/array-foreach.tq",
+ "src/builtins/array-join.tq",
"src/builtins/array-lastindexof.tq",
+ "src/builtins/array-of.tq",
"src/builtins/array-reverse.tq",
+ "src/builtins/array-slice.tq",
"src/builtins/array-splice.tq",
"src/builtins/array-unshift.tq",
- "src/builtins/typed-array.tq",
+ "src/builtins/collections.tq",
"src/builtins/data-view.tq",
+ "src/builtins/extras-utils.tq",
+ "src/builtins/object.tq",
+ "src/builtins/object-fromentries.tq",
+ "src/builtins/iterator.tq",
+ "src/builtins/typed-array.tq",
+ "src/builtins/typed-array-createtypedarray.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
-torque_modules = [
+torque_namespaces = [
"base",
+ "arguments",
"array",
+ "collections",
+ "iterator",
+ "object",
"typed-array",
"data-view",
+ "extras-utils",
"test",
]
@@ -940,10 +912,10 @@ action("run_torque") {
outputs = [
"$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h",
]
- foreach(module, torque_modules) {
+ foreach(namespace, torque_namespaces) {
outputs += [
- "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.cc",
- "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.h",
+ "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.cc",
+ "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.h",
]
}
@@ -960,20 +932,6 @@ action("run_torque") {
}
}
-v8_header_set("torque_generated_core") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- deps = [
- ":run_torque",
- ]
-
- sources = [
- "$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h",
- ]
-
- configs = [ ":internal_config" ]
-}
-
v8_source_set("torque_generated_initializers") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -989,10 +947,10 @@ v8_source_set("torque_generated_initializers") {
}
sources = []
- foreach(module, torque_modules) {
+ foreach(namespace, torque_namespaces) {
sources += [
- "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.cc",
- "$target_gen_dir/torque-generated/builtins-$module-from-dsl-gen.h",
+ "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.cc",
+ "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.h",
]
}
@@ -1013,14 +971,15 @@ action("generate_bytecode_builtins_list") {
":bytecode_builtins_list_generator($v8_generator_toolchain)",
"root_out_dir") + "/bytecode_builtins_list_generator",
root_build_dir),
- rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h"),
+ rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
+ root_build_dir),
]
}
# Template to generate different V8 snapshots based on different runtime flags.
# Can be invoked with run_mksnapshot(<name>). The target will resolve to
# run_mksnapshot_<name>. If <name> is "default", no file suffixes will be used.
-# Otherwise files are suffixed, e.g. embedded_<name>.cc and
+# Otherwise files are suffixed, e.g. embedded_<name>.S and
# snapshot_blob_<name>.bin.
#
# The template exposes the variables:
@@ -1059,10 +1018,10 @@ template("run_mksnapshot") {
args += invoker.args
if (v8_enable_embedded_builtins) {
- outputs += [ "$target_gen_dir/embedded${suffix}.cc" ]
+ outputs += [ "$target_gen_dir/embedded${suffix}.S" ]
args += [
"--embedded_src",
- rebase_path("$target_gen_dir/embedded${suffix}.cc", root_build_dir),
+ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir),
]
if (invoker.embedded_variant != "") {
args += [
@@ -1086,10 +1045,6 @@ template("run_mksnapshot") {
]
}
- if (v8_perf_prof_unwinding_info) {
- args += [ "--perf-prof-unwinding-info" ]
- }
-
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob${suffix}.bin" ]
data += [ "$root_out_dir/snapshot_blob${suffix}.bin" ]
@@ -1110,6 +1065,10 @@ template("run_mksnapshot") {
args += [ rebase_path(v8_embed_script, root_build_dir) ]
}
+ if (v8_enable_snapshot_code_comments) {
+ args += [ "--code-comments" ]
+ }
+
if (v8_enable_fast_mksnapshot) {
args += [
"--no-turbo-rewrite-far-jumps",
@@ -1131,6 +1090,11 @@ if (v8_use_snapshot) {
embedded_variant = "Default"
}
}
+ if (emit_builtins_as_inline_asm) {
+ asm_to_inline_asm("default") {
+ args = []
+ }
+ }
if (v8_use_multi_snapshots) {
run_mksnapshot("trusted") {
args = [ "--no-untrusted-code-mitigations" ]
@@ -1138,6 +1102,11 @@ if (v8_use_snapshot) {
embedded_variant = "Trusted"
}
}
+ if (emit_builtins_as_inline_asm) {
+ asm_to_inline_asm("trusted") {
+ args = []
+ }
+ }
}
}
@@ -1166,6 +1135,10 @@ action("v8_dump_build_config") {
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
+ "v8_enable_embedded_builtins=$v8_enable_embedded_builtins",
+ "v8_enable_verify_csa=$v8_enable_verify_csa",
+ "v8_enable_lite_mode=$v8_enable_lite_mode",
+ "v8_enable_pointer_compression=$v8_enable_pointer_compression",
]
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
@@ -1203,29 +1176,16 @@ v8_source_set("v8_nosnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":js2c",
- ":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
sources = [
- "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
- "$target_gen_dir/libraries.cc",
"src/snapshot/embedded-empty.cc",
"src/snapshot/snapshot-empty.cc",
]
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
- # Generated source, contains same variable names as libraries.cc
- "$target_gen_dir/experimental-extras-libraries.cc",
- "$target_gen_dir/libraries.cc",
- ]
- }
-
configs = [ ":internal_config" ]
}
@@ -1239,8 +1199,6 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
]
deps = [
- ":js2c",
- ":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
@@ -1254,28 +1212,20 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
public = []
sources = [
- "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
- "$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/setup-isolate-deserialize.cc",
]
- if (v8_enable_embedded_builtins) {
+ if (v8_enable_embedded_builtins && emit_builtins_as_inline_asm) {
+ deps += [ ":asm_to_inline_asm_default" ]
sources += [ "$target_gen_dir/embedded.cc" ]
+ } else if (v8_enable_embedded_builtins) {
+ sources += [ "$target_gen_dir/embedded.S" ]
} else {
sources += [ "src/snapshot/embedded-empty.cc" ]
}
- if (use_jumbo_build == true) {
- jumbo_excluded_sources = [
- # TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
- # Generated source, contains same variable names as libraries.cc
- "$target_gen_dir/experimental-extras-libraries.cc",
- "$target_gen_dir/libraries.cc",
- ]
- }
-
configs = [ ":internal_config" ]
}
}
@@ -1285,8 +1235,6 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":js2c",
- ":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
@@ -1308,16 +1256,28 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
# Do not publicize any header to remove build dependency.
public = []
- if (v8_enable_embedded_builtins) {
+ if (v8_enable_embedded_builtins && emit_builtins_as_inline_asm) {
+ deps += [ ":asm_to_inline_asm_default" ]
sources += [ "$target_gen_dir/embedded.cc" ]
if (v8_use_multi_snapshots) {
+ deps += [ ":asm_to_inline_asm_trusted" ]
sources += [ "$target_gen_dir/embedded_trusted.cc" ]
if (use_jumbo_build == true) {
+ jumbo_excluded_sources = [ "$target_gen_dir/embedded_trusted.cc" ]
+ }
+ }
+ } else if (v8_enable_embedded_builtins) {
+ sources += [ "$target_gen_dir/embedded.S" ]
+
+ if (v8_use_multi_snapshots) {
+ sources += [ "$target_gen_dir/embedded_trusted.S" ]
+
+ if (use_jumbo_build == true) {
jumbo_excluded_sources = [
- # Duplicated symbols with embedded.cc
- "$target_gen_dir/embedded_trusted.cc",
+ # Duplicated symbols with embedded.S
+ "$target_gen_dir/embedded_trusted.S",
]
}
}
@@ -1351,6 +1311,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-async-gen.h",
"src/builtins/builtins-async-generator-gen.cc",
"src/builtins/builtins-async-iterator-gen.cc",
+ "src/builtins/builtins-bigint-gen.cc",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-call-gen.h",
@@ -1377,6 +1338,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-lazy-gen.h",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-math-gen.h",
+ "src/builtins/builtins-microtask-queue-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
@@ -1397,6 +1359,8 @@ v8_source_set("v8_initializers") {
"src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h",
"src/builtins/setup-builtins-internal.cc",
+ "src/code-stub-assembler.cc",
+ "src/code-stub-assembler.h",
"src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
@@ -1418,9 +1382,10 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-async-generator-gen.cc",
- # This source file takes an unusually large amount of time to
- # compile. Build it separately to avoid bottlenecks.
+ # These source files take an unusually large amount of time to
+ # compile. Build them separately to avoid bottlenecks.
"src/builtins/builtins-regexp-gen.cc",
+ "src/code-stub-assembler.cc",
]
}
@@ -1516,6 +1481,14 @@ v8_header_set("v8_headers") {
"include/v8config.h",
]
+ if (is_linux || is_mac) {
+ sources += [ "include/v8-wasm-trap-handler-posix.h" ]
+ }
+
+ if (is_win) {
+ sources += [ "include/v8-wasm-trap-handler-win.h" ]
+ }
+
deps = [
":v8_version",
]
@@ -1539,6 +1512,7 @@ v8_source_set("v8_base") {
"include/v8-profiler.h",
"include/v8-testing.h",
"include/v8-util.h",
+ "include/v8-wasm-trap-handler-posix.h",
"include/v8.h",
"include/v8config.h",
"src/accessors.cc",
@@ -1569,8 +1543,6 @@ v8_source_set("v8_base") {
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-types.cc",
"src/asmjs/asm-types.h",
- "src/asmjs/switch-logic.cc",
- "src/asmjs/switch-logic.h",
"src/assembler-arch-inl.h",
"src/assembler-arch.h",
"src/assembler-inl.h",
@@ -1586,13 +1558,10 @@ v8_source_set("v8_base") {
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
"src/ast/ast.h",
- "src/ast/context-slot-cache.cc",
- "src/ast/context-slot-cache.h",
"src/ast/modules.cc",
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
- "src/ast/scopes-inl.h",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/source-range-ast-visitor.cc",
@@ -1627,16 +1596,17 @@ v8_source_set("v8_base") {
"src/builtins/builtins-definitions.h",
"src/builtins/builtins-descriptors.h",
"src/builtins/builtins-error.cc",
+ "src/builtins/builtins-extras-utils.cc",
"src/builtins/builtins-function.cc",
"src/builtins/builtins-global.cc",
"src/builtins/builtins-internal.cc",
- "src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-promise.cc",
+ "src/builtins/builtins-promise.h",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
"src/builtins/builtins-sharedarraybuffer.cc",
@@ -1646,6 +1616,7 @@ v8_source_set("v8_base") {
"src/builtins/builtins-typed-array.cc",
"src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
+ "src/builtins/builtins-weak-refs.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
@@ -1659,34 +1630,23 @@ v8_source_set("v8_base") {
"src/char-predicates.cc",
"src/char-predicates.h",
"src/checks.h",
+ "src/code-comments.cc",
+ "src/code-comments.h",
"src/code-events.h",
"src/code-factory.cc",
"src/code-factory.h",
"src/code-reference.cc",
"src/code-reference.h",
- "src/code-stub-assembler.cc",
- "src/code-stub-assembler.h",
- "src/code-stubs-utils.h",
- "src/code-stubs.cc",
- "src/code-stubs.h",
"src/code-tracer.h",
- "src/codegen.cc",
- "src/codegen.h",
"src/collector.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
- "src/compiler-dispatcher/compiler-dispatcher-job.cc",
- "src/compiler-dispatcher/compiler-dispatcher-job.h",
- "src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
- "src/compiler-dispatcher/compiler-dispatcher-tracer.h",
"src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
- "src/compiler-dispatcher/unoptimized-compile-job.cc",
- "src/compiler-dispatcher/unoptimized-compile-job.h",
"src/compiler.cc",
"src/compiler.h",
"src/compiler/access-builder.cc",
@@ -1695,7 +1655,34 @@ v8_source_set("v8_base") {
"src/compiler/access-info.h",
"src/compiler/all-nodes.cc",
"src/compiler/all-nodes.h",
+ "src/compiler/allocation-builder-inl.h",
"src/compiler/allocation-builder.h",
+ "src/compiler/backend/code-generator-impl.h",
+ "src/compiler/backend/code-generator.cc",
+ "src/compiler/backend/code-generator.h",
+ "src/compiler/backend/frame-elider.cc",
+ "src/compiler/backend/frame-elider.h",
+ "src/compiler/backend/gap-resolver.cc",
+ "src/compiler/backend/gap-resolver.h",
+ "src/compiler/backend/instruction-codes.h",
+ "src/compiler/backend/instruction-scheduler.cc",
+ "src/compiler/backend/instruction-scheduler.h",
+ "src/compiler/backend/instruction-selector-impl.h",
+ "src/compiler/backend/instruction-selector.cc",
+ "src/compiler/backend/instruction-selector.h",
+ "src/compiler/backend/instruction.cc",
+ "src/compiler/backend/instruction.h",
+ "src/compiler/backend/jump-threading.cc",
+ "src/compiler/backend/jump-threading.h",
+ "src/compiler/backend/live-range-separator.cc",
+ "src/compiler/backend/live-range-separator.h",
+ "src/compiler/backend/move-optimizer.cc",
+ "src/compiler/backend/move-optimizer.h",
+ "src/compiler/backend/register-allocator-verifier.cc",
+ "src/compiler/backend/register-allocator-verifier.h",
+ "src/compiler/backend/register-allocator.cc",
+ "src/compiler/backend/register-allocator.h",
+ "src/compiler/backend/unwinding-info-writer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc",
@@ -1711,9 +1698,6 @@ v8_source_set("v8_base") {
"src/compiler/checkpoint-elimination.h",
"src/compiler/code-assembler.cc",
"src/compiler/code-assembler.h",
- "src/compiler/code-generator-impl.h",
- "src/compiler/code-generator.cc",
- "src/compiler/code-generator.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
@@ -1739,15 +1723,11 @@ v8_source_set("v8_base") {
"src/compiler/escape-analysis-reducer.h",
"src/compiler/escape-analysis.cc",
"src/compiler/escape-analysis.h",
- "src/compiler/frame-elider.cc",
- "src/compiler/frame-elider.h",
"src/compiler/frame-states.cc",
"src/compiler/frame-states.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/functional-list.h",
- "src/compiler/gap-resolver.cc",
- "src/compiler/gap-resolver.h",
"src/compiler/graph-assembler.cc",
"src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc",
@@ -1758,14 +1738,6 @@ v8_source_set("v8_base") {
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
- "src/compiler/instruction-codes.h",
- "src/compiler/instruction-scheduler.cc",
- "src/compiler/instruction-scheduler.h",
- "src/compiler/instruction-selector-impl.h",
- "src/compiler/instruction-selector.cc",
- "src/compiler/instruction-selector.h",
- "src/compiler/instruction.cc",
- "src/compiler/instruction.h",
"src/compiler/int64-lowering.cc",
"src/compiler/int64-lowering.h",
"src/compiler/js-call-reducer.cc",
@@ -1796,12 +1768,8 @@ v8_source_set("v8_base") {
"src/compiler/js-type-hint-lowering.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
- "src/compiler/jump-threading.cc",
- "src/compiler/jump-threading.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
- "src/compiler/live-range-separator.cc",
- "src/compiler/live-range-separator.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-analysis.cc",
@@ -1820,8 +1788,6 @@ v8_source_set("v8_base") {
"src/compiler/machine-operator.h",
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
- "src/compiler/move-optimizer.cc",
- "src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
@@ -1859,10 +1825,6 @@ v8_source_set("v8_base") {
"src/compiler/redundancy-elimination.h",
"src/compiler/refs-map.cc",
"src/compiler/refs-map.h",
- "src/compiler/register-allocator-verifier.cc",
- "src/compiler/register-allocator-verifier.h",
- "src/compiler/register-allocator.cc",
- "src/compiler/register-allocator.h",
"src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
@@ -1871,6 +1833,8 @@ v8_source_set("v8_base") {
"src/compiler/scheduler.h",
"src/compiler/select-lowering.cc",
"src/compiler/select-lowering.h",
+ "src/compiler/serializer-for-background-compilation.cc",
+ "src/compiler/serializer-for-background-compilation.h",
"src/compiler/simd-scalar-lowering.cc",
"src/compiler/simd-scalar-lowering.h",
"src/compiler/simplified-lowering.cc",
@@ -1893,7 +1857,6 @@ v8_source_set("v8_base") {
"src/compiler/typer.h",
"src/compiler/types.cc",
"src/compiler/types.h",
- "src/compiler/unwinding-info-writer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
@@ -1902,6 +1865,9 @@ v8_source_set("v8_base") {
"src/compiler/wasm-compiler.h",
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
+ "src/constant-pool.cc",
+ "src/constant-pool.h",
+ "src/constants-arch.h",
"src/contexts-inl.h",
"src/contexts.cc",
"src/contexts.h",
@@ -1911,6 +1877,7 @@ v8_source_set("v8_base") {
"src/counters-inl.h",
"src/counters.cc",
"src/counters.h",
+ "src/cpu-features.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@@ -1923,6 +1890,8 @@ v8_source_set("v8_base") {
"src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
"src/debug/debug-interface.h",
+ "src/debug/debug-property-iterator.cc",
+ "src/debug/debug-property-iterator.h",
"src/debug/debug-scope-iterator.cc",
"src/debug/debug-scope-iterator.h",
"src/debug/debug-scopes.cc",
@@ -1940,6 +1909,7 @@ v8_source_set("v8_base") {
"src/deoptimize-reason.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
+ "src/detachable-vector.cc",
"src/detachable-vector.h",
"src/disasm.h",
"src/disassembler.cc",
@@ -2061,6 +2031,7 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
+ "src/heap/slot-set.cc",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
@@ -2091,8 +2062,6 @@ v8_source_set("v8_base") {
"src/icu_util.h",
"src/identity-map.cc",
"src/identity-map.h",
- "src/instruction-stream.cc",
- "src/instruction-stream.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/block-coverage-builder.h",
@@ -2140,8 +2109,9 @@ v8_source_set("v8_base") {
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
- "src/intl.cc",
- "src/intl.h",
+ "src/isolate-allocator.cc",
+ "src/isolate-allocator.h",
+ "src/isolate-data.h",
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
@@ -2168,7 +2138,6 @@ v8_source_set("v8_base") {
"src/lookup-inl.h",
"src/lookup.cc",
"src/lookup.h",
- "src/lsan.h",
"src/machine-type.cc",
"src/machine-type.h",
"src/macro-assembler-inl.h",
@@ -2179,8 +2148,13 @@ v8_source_set("v8_base") {
"src/math-random.h",
"src/maybe-handles-inl.h",
"src/maybe-handles.h",
+ "src/memcopy.cc",
+ "src/memcopy.h",
+ "src/message-template.h",
"src/messages.cc",
"src/messages.h",
+ "src/microtask-queue.cc",
+ "src/microtask-queue.h",
"src/msan.h",
"src/objects-body-descriptors-inl.h",
"src/objects-body-descriptors.h",
@@ -2196,6 +2170,8 @@ v8_source_set("v8_base") {
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/builtin-function-id.h",
+ "src/objects/cell-inl.h",
+ "src/objects/cell.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/compilation-cache-inl.h",
@@ -2203,15 +2179,29 @@ v8_source_set("v8_base") {
"src/objects/debug-objects-inl.h",
"src/objects/debug-objects.cc",
"src/objects/debug-objects.h",
+ "src/objects/descriptor-array-inl.h",
"src/objects/descriptor-array.h",
+ "src/objects/dictionary-inl.h",
"src/objects/dictionary.h",
+ "src/objects/embedder-data-array-inl.h",
+ "src/objects/embedder-data-array.cc",
+ "src/objects/embedder-data-array.h",
+ "src/objects/embedder-data-slot-inl.h",
+ "src/objects/embedder-data-slot.h",
+ "src/objects/feedback-cell-inl.h",
+ "src/objects/feedback-cell.h",
"src/objects/fixed-array-inl.h",
"src/objects/fixed-array.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
"src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
- "src/objects/intl-objects-inl.h",
+ "src/objects/heap-number-inl.h",
+ "src/objects/heap-number.h",
+ "src/objects/heap-object-inl.h",
+ "src/objects/heap-object.h",
+ "src/objects/instance-type-inl.h",
+ "src/objects/instance-type.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-array-buffer-inl.h",
@@ -2257,9 +2247,14 @@ v8_source_set("v8_base") {
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
+ "src/objects/js-segment-iterator-inl.h",
+ "src/objects/js-segment-iterator.cc",
+ "src/objects/js-segment-iterator.h",
"src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.cc",
"src/objects/js-segmenter.h",
+ "src/objects/js-weak-refs-inl.h",
+ "src/objects/js-weak-refs.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
@@ -2270,9 +2265,6 @@ v8_source_set("v8_base") {
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
"src/objects/microtask-inl.h",
- "src/objects/microtask-queue-inl.h",
- "src/objects/microtask-queue.cc",
- "src/objects/microtask-queue.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
@@ -2281,6 +2273,8 @@ v8_source_set("v8_base") {
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
+ "src/objects/oddball-inl.h",
+ "src/objects/oddball.h",
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.cc",
"src/objects/ordered-hash-table.h",
@@ -2288,6 +2282,8 @@ v8_source_set("v8_base") {
"src/objects/promise.h",
"src/objects/property-array-inl.h",
"src/objects/property-array.h",
+ "src/objects/property-cell-inl.h",
+ "src/objects/property-cell.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/prototype-info-inl.h",
@@ -2299,11 +2295,17 @@ v8_source_set("v8_base") {
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
"src/objects/shared-function-info.h",
+ "src/objects/slots-atomic-inl.h",
+ "src/objects/slots-inl.h",
+ "src/objects/slots.h",
"src/objects/stack-frame-info-inl.h",
"src/objects/stack-frame-info.h",
"src/objects/string-inl.h",
+ "src/objects/string-table-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
+ "src/objects/struct-inl.h",
+ "src/objects/struct.h",
"src/objects/template-objects.cc",
"src/objects/template-objects.h",
"src/objects/templates-inl.h",
@@ -2312,10 +2314,9 @@ v8_source_set("v8_base") {
"src/optimized-compilation-info.h",
"src/ostreams.cc",
"src/ostreams.h",
- "src/parsing/duplicate-finder.h",
- "src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
"src/parsing/expression-scope-reparenter.h",
+ "src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
"src/parsing/parse-info.cc",
@@ -2326,9 +2327,9 @@ v8_source_set("v8_base") {
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
- "src/parsing/preparsed-scope-data-impl.h",
- "src/parsing/preparsed-scope-data.cc",
- "src/parsing/preparsed-scope-data.h",
+ "src/parsing/preparse-data-impl.h",
+ "src/parsing/preparse-data.cc",
+ "src/parsing/preparse-data.h",
"src/parsing/preparser-logger.h",
"src/parsing/preparser.cc",
"src/parsing/preparser.h",
@@ -2344,6 +2345,7 @@ v8_source_set("v8_base") {
"src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
+ "src/pointer-with-payload.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@@ -2377,6 +2379,8 @@ v8_source_set("v8_base") {
"src/property.cc",
"src/property.h",
"src/prototype.h",
+ "src/ptr-compr-inl.h",
+ "src/ptr-compr.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
@@ -2400,8 +2404,10 @@ v8_source_set("v8_base") {
"src/regexp/regexp-stack.h",
"src/regexp/regexp-utils.cc",
"src/regexp/regexp-utils.h",
+ "src/register-arch.h",
"src/register-configuration.cc",
"src/register-configuration.h",
+ "src/register.h",
"src/reglist.h",
"src/reloc-info.cc",
"src/reloc-info.h",
@@ -2440,6 +2446,7 @@ v8_source_set("v8_base") {
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.h",
"src/runtime/runtime-wasm.cc",
+ "src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/safepoint-table.cc",
@@ -2449,23 +2456,14 @@ v8_source_set("v8_base") {
"src/simulator-base.cc",
"src/simulator-base.h",
"src/simulator.h",
- "src/snapshot/builtin-deserializer-allocator.cc",
- "src/snapshot/builtin-deserializer-allocator.h",
- "src/snapshot/builtin-deserializer.cc",
- "src/snapshot/builtin-deserializer.h",
- "src/snapshot/builtin-serializer-allocator.cc",
- "src/snapshot/builtin-serializer-allocator.h",
- "src/snapshot/builtin-serializer.cc",
- "src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
- "src/snapshot/default-deserializer-allocator.cc",
- "src/snapshot/default-deserializer-allocator.h",
- "src/snapshot/default-serializer-allocator.cc",
- "src/snapshot/default-serializer-allocator.h",
+ "src/snapshot/deserializer-allocator.cc",
+ "src/snapshot/deserializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
- "src/snapshot/macros.h",
+ "src/snapshot/embedded-data.cc",
+ "src/snapshot/embedded-data.h",
"src/snapshot/natives-common.cc",
"src/snapshot/natives.h",
"src/snapshot/object-deserializer.cc",
@@ -2474,7 +2472,15 @@ v8_source_set("v8_base") {
"src/snapshot/partial-deserializer.h",
"src/snapshot/partial-serializer.cc",
"src/snapshot/partial-serializer.h",
+ "src/snapshot/read-only-deserializer.cc",
+ "src/snapshot/read-only-deserializer.h",
+ "src/snapshot/read-only-serializer.cc",
+ "src/snapshot/read-only-serializer.h",
"src/snapshot/references.h",
+ "src/snapshot/roots-serializer.cc",
+ "src/snapshot/roots-serializer.h",
+ "src/snapshot/serializer-allocator.cc",
+ "src/snapshot/serializer-allocator.h",
"src/snapshot/serializer-common.cc",
"src/snapshot/serializer-common.h",
"src/snapshot/serializer.cc",
@@ -2508,10 +2514,13 @@ v8_source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
+ "src/task-utils.cc",
+ "src/task-utils.h",
"src/third_party/siphash/halfsiphash.cc",
"src/third_party/siphash/halfsiphash.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
- "src/torque-assembler.h",
+ "src/thread-id.cc",
+ "src/thread-id.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
@@ -2530,7 +2539,6 @@ v8_source_set("v8_base") {
"src/turbo-assembler.h",
"src/type-hints.cc",
"src/type-hints.h",
- "src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
@@ -2539,6 +2547,7 @@ v8_source_set("v8_base") {
"src/unicode.h",
"src/unoptimized-compilation-info.cc",
"src/unoptimized-compilation-info.h",
+ "src/unwinder.cc",
"src/uri.cc",
"src/uri.h",
"src/utils-inl.h",
@@ -2566,12 +2575,16 @@ v8_source_set("v8_base") {
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/baseline/liftoff-compiler.h",
"src/wasm/baseline/liftoff-register.h",
+ "src/wasm/compilation-environment.h",
"src/wasm/decoder.h",
"src/wasm/function-body-decoder-impl.h",
"src/wasm/function-body-decoder.cc",
"src/wasm/function-body-decoder.h",
"src/wasm/function-compiler.cc",
"src/wasm/function-compiler.h",
+ "src/wasm/graph-builder-interface.cc",
+ "src/wasm/graph-builder-interface.h",
+ "src/wasm/js-to-wasm-wrapper-cache-inl.h",
"src/wasm/jump-table-assembler.cc",
"src/wasm/jump-table-assembler.h",
"src/wasm/leb-helper.h",
@@ -2583,6 +2596,8 @@ v8_source_set("v8_base") {
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
+ "src/wasm/module-instantiate.cc",
+ "src/wasm/module-instantiate.h",
"src/wasm/object-access.h",
"src/wasm/signature-map.cc",
"src/wasm/signature-map.h",
@@ -2600,6 +2615,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-feature-flags.h",
"src/wasm/wasm-features.cc",
"src/wasm/wasm-features.h",
+ "src/wasm/wasm-import-wrapper-cache-inl.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
@@ -2653,7 +2669,6 @@ v8_source_set("v8_base") {
# These source files take an unusually large amount of time to
# compile. Build them separately to avoid bottlenecks.
"src/api.cc",
- "src/code-stub-assembler.cc",
"src/elements.cc",
"src/heap/heap.cc",
"src/objects.cc",
@@ -2663,16 +2678,14 @@ v8_source_set("v8_base") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
- "src/compiler/ia32/code-generator-ia32.cc",
- "src/compiler/ia32/instruction-codes-ia32.h",
- "src/compiler/ia32/instruction-scheduler-ia32.cc",
- "src/compiler/ia32/instruction-selector-ia32.cc",
+ "src/compiler/backend/ia32/code-generator-ia32.cc",
+ "src/compiler/backend/ia32/instruction-codes-ia32.h",
+ "src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
+ "src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
- "src/ia32/code-stubs-ia32.cc",
- "src/ia32/codegen-ia32.cc",
"src/ia32/constants-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
@@ -2682,8 +2695,7 @@ v8_source_set("v8_base") {
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
- "src/ia32/simulator-ia32.cc",
- "src/ia32/simulator-ia32.h",
+ "src/ia32/register-ia32.h",
"src/ia32/sse-instr.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
@@ -2691,12 +2703,12 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
- "src/compiler/x64/code-generator-x64.cc",
- "src/compiler/x64/instruction-codes-x64.h",
- "src/compiler/x64/instruction-scheduler-x64.cc",
- "src/compiler/x64/instruction-selector-x64.cc",
- "src/compiler/x64/unwinding-info-writer-x64.cc",
- "src/compiler/x64/unwinding-info-writer-x64.h",
+ "src/compiler/backend/x64/code-generator-x64.cc",
+ "src/compiler/backend/x64/instruction-codes-x64.h",
+ "src/compiler/backend/x64/instruction-scheduler-x64.cc",
+ "src/compiler/backend/x64/instruction-selector-x64.cc",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.cc",
+ "src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
@@ -2705,8 +2717,6 @@ v8_source_set("v8_base") {
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
- "src/x64/code-stubs-x64.cc",
- "src/x64/codegen-x64.cc",
"src/x64/constants-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
@@ -2717,27 +2727,28 @@ v8_source_set("v8_base") {
"src/x64/interface-descriptors-x64.cc",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
- "src/x64/simulator-x64.cc",
- "src/x64/simulator-x64.h",
+ "src/x64/register-x64.h",
"src/x64/sse-instr.h",
]
- if (is_linux) {
+ if (is_linux || is_mac) {
sources += [
- "src/trap-handler/handler-inside-linux.cc",
- "src/trap-handler/handler-outside-linux.cc",
+ "src/trap-handler/handler-inside-posix.cc",
+ "src/trap-handler/handler-inside-posix.h",
+ "src/trap-handler/handler-outside-posix.cc",
]
}
if (is_win) {
- sources += [ "src/trap-handler/handler-outside-win.cc" ]
+ sources += [
+ "src/trap-handler/handler-inside-win.cc",
+ "src/trap-handler/handler-inside-win.h",
+ "src/trap-handler/handler-outside-win.cc",
+ ]
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
- "src/arm/code-stubs-arm.cc",
- "src/arm/code-stubs-arm.h",
- "src/arm/codegen-arm.cc",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
@@ -2749,14 +2760,15 @@ v8_source_set("v8_base") {
"src/arm/interface-descriptors-arm.cc",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
+ "src/arm/register-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
- "src/compiler/arm/code-generator-arm.cc",
- "src/compiler/arm/instruction-codes-arm.h",
- "src/compiler/arm/instruction-scheduler-arm.cc",
- "src/compiler/arm/instruction-selector-arm.cc",
- "src/compiler/arm/unwinding-info-writer-arm.cc",
- "src/compiler/arm/unwinding-info-writer-arm.h",
+ "src/compiler/backend/arm/code-generator-arm.cc",
+ "src/compiler/backend/arm/instruction-codes-arm.h",
+ "src/compiler/backend/arm/instruction-scheduler-arm.cc",
+ "src/compiler/backend/arm/instruction-selector-arm.cc",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.cc",
+ "src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
@@ -2767,9 +2779,6 @@ v8_source_set("v8_base") {
"src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
- "src/arm64/code-stubs-arm64.cc",
- "src/arm64/code-stubs-arm64.h",
- "src/arm64/codegen-arm64.cc",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/decoder-arm64-inl.h",
@@ -2790,17 +2799,19 @@ v8_source_set("v8_base") {
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
+ "src/arm64/register-arm64.cc",
+ "src/arm64/register-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
- "src/compiler/arm64/code-generator-arm64.cc",
- "src/compiler/arm64/instruction-codes-arm64.h",
- "src/compiler/arm64/instruction-scheduler-arm64.cc",
- "src/compiler/arm64/instruction-selector-arm64.cc",
- "src/compiler/arm64/unwinding-info-writer-arm64.cc",
- "src/compiler/arm64/unwinding-info-writer-arm64.h",
+ "src/compiler/backend/arm64/code-generator-arm64.cc",
+ "src/compiler/backend/arm64/instruction-codes-arm64.h",
+ "src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
+ "src/compiler/backend/arm64/instruction-selector-arm64.cc",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
@@ -2815,17 +2826,14 @@ v8_source_set("v8_base") {
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
- "src/compiler/mips/code-generator-mips.cc",
- "src/compiler/mips/instruction-codes-mips.h",
- "src/compiler/mips/instruction-scheduler-mips.cc",
- "src/compiler/mips/instruction-selector-mips.cc",
+ "src/compiler/backend/mips/code-generator-mips.cc",
+ "src/compiler/backend/mips/instruction-codes-mips.h",
+ "src/compiler/backend/mips/instruction-scheduler-mips.cc",
+ "src/compiler/backend/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
- "src/mips/code-stubs-mips.cc",
- "src/mips/code-stubs-mips.h",
- "src/mips/codegen-mips.cc",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@@ -2836,6 +2844,7 @@ v8_source_set("v8_base") {
"src/mips/interface-descriptors-mips.cc",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
+ "src/mips/register-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
@@ -2844,17 +2853,14 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
- "src/compiler/mips64/code-generator-mips64.cc",
- "src/compiler/mips64/instruction-codes-mips64.h",
- "src/compiler/mips64/instruction-scheduler-mips64.cc",
- "src/compiler/mips64/instruction-selector-mips64.cc",
+ "src/compiler/backend/mips64/code-generator-mips64.cc",
+ "src/compiler/backend/mips64/instruction-codes-mips64.h",
+ "src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
+ "src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
- "src/mips64/code-stubs-mips64.cc",
- "src/mips64/code-stubs-mips64.h",
- "src/mips64/codegen-mips64.cc",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
@@ -2865,6 +2871,7 @@ v8_source_set("v8_base") {
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
+ "src/mips64/register-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
@@ -2873,17 +2880,14 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
- "src/compiler/ppc/code-generator-ppc.cc",
- "src/compiler/ppc/instruction-codes-ppc.h",
- "src/compiler/ppc/instruction-scheduler-ppc.cc",
- "src/compiler/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
- "src/ppc/code-stubs-ppc.cc",
- "src/ppc/code-stubs-ppc.h",
- "src/ppc/codegen-ppc.cc",
"src/ppc/constants-ppc.cc",
"src/ppc/constants-ppc.h",
"src/ppc/cpu-ppc.cc",
@@ -2894,6 +2898,7 @@ v8_source_set("v8_base") {
"src/ppc/interface-descriptors-ppc.cc",
"src/ppc/macro-assembler-ppc.cc",
"src/ppc/macro-assembler-ppc.h",
+ "src/ppc/register-ppc.h",
"src/ppc/simulator-ppc.cc",
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
@@ -2902,19 +2907,16 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
- "src/compiler/s390/code-generator-s390.cc",
- "src/compiler/s390/instruction-codes-s390.h",
- "src/compiler/s390/instruction-scheduler-s390.cc",
- "src/compiler/s390/instruction-selector-s390.cc",
+ "src/compiler/backend/s390/code-generator-s390.cc",
+ "src/compiler/backend/s390/instruction-codes-s390.h",
+ "src/compiler/backend/s390/instruction-scheduler-s390.cc",
+ "src/compiler/backend/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
- "src/s390/code-stubs-s390.cc",
- "src/s390/code-stubs-s390.h",
- "src/s390/codegen-s390.cc",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
@@ -2925,6 +2927,7 @@ v8_source_set("v8_base") {
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
+ "src/s390/register-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
@@ -2936,7 +2939,7 @@ v8_source_set("v8_base") {
defines = []
deps = [
":generate_bytecode_builtins_list",
- ":torque_generated_core",
+ ":run_torque",
":v8_headers",
":v8_libbase",
":v8_libsampler",
@@ -2954,9 +2957,6 @@ v8_source_set("v8_base") {
sources -= [
"src/builtins/builtins-intl.cc",
"src/char-predicates.cc",
- "src/intl.cc",
- "src/intl.h",
- "src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator-inl.h",
@@ -2983,6 +2983,9 @@ v8_source_set("v8_base") {
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
+ "src/objects/js-segment-iterator-inl.h",
+ "src/objects/js-segment-iterator.cc",
+ "src/objects/js-segment-iterator.h",
"src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.cc",
"src/objects/js-segmenter.h",
@@ -3030,8 +3033,6 @@ v8_source_set("torque_base") {
"src/torque/implementation-visitor.h",
"src/torque/instructions.cc",
"src/torque/instructions.h",
- "src/torque/scope.cc",
- "src/torque/scope.h",
"src/torque/source-positions.cc",
"src/torque/source-positions.h",
"src/torque/torque-parser.cc",
@@ -3076,6 +3077,7 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
+ "src/base/enum-set.h",
"src/base/export-template.h",
"src/base/file-utils.cc",
"src/base/file-utils.h",
@@ -3099,6 +3101,7 @@ v8_component("v8_libbase") {
"src/base/once.cc",
"src/base/once.h",
"src/base/optional.h",
+ "src/base/overflowing-math.h",
"src/base/page-allocator.cc",
"src/base/page-allocator.h",
"src/base/platform/condition-variable.cc",
@@ -3118,6 +3121,7 @@ v8_component("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
+ "src/base/small-vector.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/template-utils.h",
@@ -3229,6 +3233,14 @@ v8_component("v8_libbase") {
data_deps += [ "//build/win:runtime_libs" ]
}
+ if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") {
+ # Add runtime libs for mips.
+ data += [
+ "tools/mips_toolchain/sysroot/usr/lib/",
+ "tools/mips_toolchain/mips-mti-linux-gnu/lib",
+ ]
+ }
+
if (is_tsan && !build_with_chromium) {
data += [ "tools/sanitizers/tsan_suppressions.txt" ]
}
@@ -3368,6 +3380,8 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ "src/snapshot/embedded-file-writer.cc",
+ "src/snapshot/embedded-file-writer.h",
"src/snapshot/mksnapshot.cc",
]
@@ -3381,6 +3395,10 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
":v8_nosnapshot",
"//build/win:default_exe_manifest",
]
+
+ if (target_os == "fuchsia") {
+ defines = [ "V8_TARGET_OS_FUCHSIA" ]
+ }
}
}
@@ -3458,8 +3476,12 @@ group("v8_archive") {
deps = [
":d8",
- "test/cctest:cctest",
]
+
+ if (!is_win) {
+ # On windows, cctest doesn't link with v8_static_library.
+ deps += [ "test/cctest:cctest" ]
+ }
}
# TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause
@@ -3491,14 +3513,7 @@ group("v8_fuzzers") {
":v8_simple_wasm_async_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_compile_fuzzer",
- ":v8_simple_wasm_data_section_fuzzer",
- ":v8_simple_wasm_function_sigs_section_fuzzer",
":v8_simple_wasm_fuzzer",
- ":v8_simple_wasm_globals_section_fuzzer",
- ":v8_simple_wasm_imports_section_fuzzer",
- ":v8_simple_wasm_memory_section_fuzzer",
- ":v8_simple_wasm_names_section_fuzzer",
- ":v8_simple_wasm_types_section_fuzzer",
]
}
@@ -3568,11 +3583,13 @@ if (is_component_build) {
v8_executable("d8") {
sources = [
- "$target_gen_dir/d8-js.cc",
"src/async-hooks-wrapper.cc",
"src/async-hooks-wrapper.h",
"src/d8-console.cc",
"src/d8-console.h",
+ "src/d8-js.cc",
+ "src/d8-platforms.cc",
+ "src/d8-platforms.h",
"src/d8.cc",
"src/d8.h",
]
@@ -3585,7 +3602,6 @@ v8_executable("d8") {
]
deps = [
- ":d8_js2c",
":v8",
":v8_libbase",
":v8_libplatform",
@@ -3787,7 +3803,7 @@ v8_source_set("wasm_module_runner") {
deps = [
":generate_bytecode_builtins_list",
- ":torque_generated_core",
+ ":run_torque",
]
if (v8_enable_i18n_support) {
@@ -3871,7 +3887,7 @@ v8_source_set("lib_wasm_fuzzer_common") {
deps = [
":generate_bytecode_builtins_list",
- ":torque_generated_core",
+ ":run_torque",
]
if (v8_enable_i18n_support) {
@@ -3886,129 +3902,10 @@ v8_source_set("lib_wasm_fuzzer_common") {
]
}
-v8_source_set("wasm_types_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-types-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_types_section_fuzzer") {
-}
-
-v8_source_set("wasm_names_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-names-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_names_section_fuzzer") {
-}
-
-v8_source_set("wasm_globals_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-globals-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_globals_section_fuzzer") {
-}
-
-v8_source_set("wasm_imports_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-imports-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_imports_section_fuzzer") {
-}
-
-v8_source_set("wasm_function_sigs_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-function-sigs-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_function_sigs_section_fuzzer") {
-}
-
-v8_source_set("wasm_memory_section_fuzzer") {
- sources = [
- "test/fuzzer/wasm-memory-section.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_memory_section_fuzzer") {
-}
-
-v8_source_set("wasm_data_section_fuzzer") {
+v8_source_set("wasm_compile_fuzzer") {
sources = [
- "test/fuzzer/wasm-data-section.cc",
+ "test/common/wasm/test-signatures.h",
+ "test/fuzzer/wasm-compile.cc",
]
deps = [
@@ -4023,26 +3920,18 @@ v8_source_set("wasm_data_section_fuzzer") {
]
}
-v8_fuzzer("wasm_data_section_fuzzer") {
+v8_fuzzer("wasm_compile_fuzzer") {
}
-v8_source_set("wasm_compile_fuzzer") {
- sources = [
- "test/common/wasm/test-signatures.h",
- "test/fuzzer/wasm-compile.cc",
- ]
+# Target to build all generated .cc files.
+group("v8_generated_cc_files") {
+ testonly = true
deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
+ ":generate_bytecode_builtins_list",
+ ":js2c_extras",
+ ":run_torque",
+ "src/inspector:v8_generated_cc_files",
+ "test/cctest:v8_generated_cc_files",
]
}
-
-v8_fuzzer("wasm_compile_fuzzer") {
-}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index a3377ab473..62b3ace776 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,5083 @@
+2019-01-23: Version 7.3.492
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.491
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.490
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.489
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-23: Version 7.3.488
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.487
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.486
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.485
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.484
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.483
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.482
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.481
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.480
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.479
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.478
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.477
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.476
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.475
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.474
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.473
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-22: Version 7.3.472
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.471
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.470
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.469
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.468
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.467
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.466
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.465
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.464
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.463
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.462
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.461
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.460
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.459
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.458
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.457
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.456
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.455
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.454
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.453
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-21: Version 7.3.452
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.451
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.450
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.449
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.448
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.447
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.446
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.445
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.444
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.443
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.442
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.441
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.440
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-18: Version 7.3.439
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.438
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.437
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.436
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.435
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.434
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.433
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.432
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.431
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.430
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-17: Version 7.3.429
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.428
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.427
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.426
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.425
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.424
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.423
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.422
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.421
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.420
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.419
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.418
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.417
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.416
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.415
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.414
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-16: Version 7.3.413
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.412
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.411
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.410
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.409
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.408
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.407
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.406
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.405
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.404
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.403
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.402
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.401
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-15: Version 7.3.400
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.399
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.398
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.397
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.396
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.395
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.394
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.393
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.392
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.391
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.390
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.389
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.388
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.387
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.386
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.385
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.384
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.383
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-14: Version 7.3.382
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-13: Version 7.3.381
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-12: Version 7.3.380
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-12: Version 7.3.379
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-12: Version 7.3.378
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.377
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.376
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.375
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.374
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.373
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.372
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.371
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.370
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.369
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.368
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.367
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.366
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.365
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.364
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.363
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-11: Version 7.3.362
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.361
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.360
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.359
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.358
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.357
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.356
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-10: Version 7.3.355
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.354
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.353
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.352
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.351
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.350
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-09: Version 7.3.349
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.348
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.347
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.346
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.345
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.344
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.343
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.342
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.341
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.340
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.339
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.338
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.337
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-08: Version 7.3.336
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.335
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.334
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.333
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.332
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.331
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.330
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.329
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.328
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.327
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.326
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.325
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.324
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.323
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.322
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.321
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.320
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.319
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.318
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.317
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.316
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.315
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.314
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.313
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.312
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.311
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.310
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.309
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.308
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.307
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.306
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-07: Version 7.3.305
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-06: Version 7.3.304
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.303
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.302
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.301
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.300
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.299
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.298
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.297
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.296
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.295
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.294
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.293
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.292
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.291
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.290
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.289
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-04: Version 7.3.288
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.287
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.286
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.280
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.279
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.278
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.277
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.276
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.275
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.274
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.273
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.272
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-03: Version 7.3.271
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.270
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.269
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.268
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.267
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.266
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.265
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.264
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.263
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.262
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-02: Version 7.3.261
+
+ Performance and stability improvements on all platforms.
+
+
+2019-01-01: Version 7.3.260
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-31: Version 7.3.259
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-31: Version 7.3.258
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-30: Version 7.3.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-29: Version 7.3.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-28: Version 7.3.255
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-28: Version 7.3.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-28: Version 7.3.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-28: Version 7.3.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-27: Version 7.3.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-24: Version 7.3.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-23: Version 7.3.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-22: Version 7.3.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-21: Version 7.3.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-20: Version 7.3.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-19: Version 7.3.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-18: Version 7.3.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-17: Version 7.3.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-14: Version 7.3.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-13: Version 7.3.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.117
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.116
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.115
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.114
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.113
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.112
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.111
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.110
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.109
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.108
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.107
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.106
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.105
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-12: Version 7.3.104
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.103
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.102
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.101
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.100
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.99
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.98
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.97
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.96
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.95
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.94
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.93
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.92
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.91
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.90
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.89
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.88
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.87
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.86
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.85
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-11: Version 7.3.84
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.83
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.82
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.81
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.80
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.79
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.78
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.77
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-10: Version 7.3.76
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-08: Version 7.3.75
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.74
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.73
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.72
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.71
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.70
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.69
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.68
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.67
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.66
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.65
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.64
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.63
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.62
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.61
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.60
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-07: Version 7.3.59
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.58
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.57
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.56
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.55
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.54
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.53
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.52
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.51
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.50
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.49
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.48
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.47
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-06: Version 7.3.46
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.45
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.44
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.43
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.42
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.41
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.40
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.39
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.38
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.37
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.36
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.35
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.34
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.33
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.32
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.31
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.30
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.29
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-05: Version 7.3.28
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.27
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.26
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.25
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.24
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.23
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.22
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.21
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.20
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.19
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.18
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.17
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.16
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.15
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.14
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.13
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-04: Version 7.3.12
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.11
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.10
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.9
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.8
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.7
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.6
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.5
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.4
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.3
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.2
+
+ Performance and stability improvements on all platforms.
+
+
+2018-12-03: Version 7.3.1
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-29: Version 7.2.505
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-29: Version 7.2.504
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-29: Version 7.2.503
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.502
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.501
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.500
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.499
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.498
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.497
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.496
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.495
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.494
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.493
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-28: Version 7.2.492
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.491
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.490
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.489
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.488
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.487
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.486
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.485
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.484
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.483
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.482
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.481
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.480
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.479
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-27: Version 7.2.478
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.477
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.476
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.475
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.474
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.473
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.472
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.471
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.470
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-26: Version 7.2.469
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.468
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.467
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.466
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.465
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.464
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-24: Version 7.2.463
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-23: Version 7.2.462
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-23: Version 7.2.461
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-23: Version 7.2.460
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-23: Version 7.2.459
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-23: Version 7.2.458
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.457
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.456
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.455
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.454
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.453
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.452
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.451
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.450
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.449
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.448
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.447
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.446
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.445
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.444
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.443
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.442
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.441
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.440
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.439
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.438
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.437
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.436
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.435
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.434
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.433
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.432
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.431
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.430
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.429
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.428
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.427
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.426
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-22: Version 7.2.425
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.424
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.423
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.422
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.421
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.420
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.419
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.418
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.417
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.416
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.415
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.414
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.413
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.412
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.411
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.410
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.409
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.408
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.407
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.406
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.405
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.404
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.403
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.402
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.401
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.400
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.399
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.398
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-21: Version 7.2.397
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.396
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.395
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.394
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.393
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.392
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.391
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.390
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.389
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-20: Version 7.2.388
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.387
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.386
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.385
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.384
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.383
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.382
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.381
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.380
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-19: Version 7.2.379
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-18: Version 7.2.378
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-18: Version 7.2.377
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.376
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.375
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.374
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.373
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.372
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.371
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.370
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.369
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.368
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.367
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.366
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.365
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.364
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.363
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-16: Version 7.2.362
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.361
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.360
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.359
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.358
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.357
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.356
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.355
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.354
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.353
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.352
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.351
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.350
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.349
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.348
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.347
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.346
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.345
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.344
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.343
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.342
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.341
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.340
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-15: Version 7.2.339
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.338
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.337
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.336
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.335
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.334
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.333
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.332
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.331
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.330
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.329
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.328
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.327
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.326
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.325
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.324
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.323
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-14: Version 7.2.322
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.321
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.320
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.319
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.318
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.317
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.316
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.315
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.314
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.313
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.312
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.311
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-13: Version 7.2.310
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.309
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.308
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.307
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.306
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.305
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.304
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.303
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.302
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.301
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.300
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.299
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.298
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.297
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.296
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.295
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-12: Version 7.2.294
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.293
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.292
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.291
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.290
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.289
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.288
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.287
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.286
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.285
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-09: Version 7.2.284
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.283
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.282
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.281
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.280
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.279
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.278
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.277
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.276
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.275
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.274
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-08: Version 7.2.273
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.272
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.271
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.270
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.269
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.268
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.267
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.266
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.265
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.264
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.263
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.262
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.261
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-07: Version 7.2.260
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.259
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.258
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.255
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-06: Version 7.2.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-05: Version 7.2.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-05: Version 7.2.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-05: Version 7.2.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-05: Version 7.2.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-04: Version 7.2.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-04: Version 7.2.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-03: Version 7.2.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-03: Version 7.2.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-03: Version 7.2.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-02: Version 7.2.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-01: Version 7.2.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-01: Version 7.2.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-01: Version 7.2.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-01: Version 7.2.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-11-01: Version 7.2.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-31: Version 7.2.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-30: Version 7.2.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-30: Version 7.2.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-29: Version 7.2.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-28: Version 7.2.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-27: Version 7.2.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-26: Version 7.2.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-25: Version 7.2.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.117
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.116
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.115
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.114
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.113
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.112
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.111
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.110
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.109
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.108
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.107
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.106
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.105
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.104
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.103
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.102
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.101
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.100
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.99
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.98
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.97
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.96
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-24: Version 7.2.95
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.94
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.93
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.92
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.91
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.90
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.89
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.88
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.87
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.86
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.85
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.84
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.83
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.82
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.81
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.80
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.79
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.78
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.77
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.76
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-23: Version 7.2.75
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.74
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.73
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.72
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.71
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.70
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.69
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.68
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.67
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.66
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.65
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.64
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.63
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.62
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.61
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.60
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.59
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.58
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.57
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.56
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.55
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.54
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.53
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-22: Version 7.2.52
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-20: Version 7.2.51
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-20: Version 7.2.50
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.49
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.48
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.47
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.46
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.45
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.44
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-19: Version 7.2.43
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-18: Version 7.2.42
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-18: Version 7.2.41
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-18: Version 7.2.40
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.39
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.38
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.37
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.36
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.35
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.34
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.33
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.32
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.31
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.30
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.29
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.28
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.27
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.26
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-17: Version 7.2.25
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.24
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.23
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.22
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.21
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.20
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.19
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.18
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.17
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-16: Version 7.2.16
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.15
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.14
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.13
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.12
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.11
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.10
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.9
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.8
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.7
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.6
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.5
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-15: Version 7.2.4
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-14: Version 7.2.3
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-13: Version 7.2.2
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-13: Version 7.2.1
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-11: Version 7.1.321
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.320
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.319
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.318
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.317
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.316
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.315
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.314
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.313
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-10: Version 7.1.312
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.311
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.310
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.309
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.308
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.307
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.306
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.305
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.304
+
+ Performance and stability improvements on all platforms.
+
+
+2018-10-09: Version 7.1.303
+
+ Performance and stability improvements on all platforms.
+
+
2018-10-09: Version 7.1.302
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index a87c01e49d..ec6045a90a 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -11,32 +11,32 @@ vars = {
'check_v8_header_includes': False,
# luci-go CIPD package version.
- 'luci_go': 'git_revision:fdf05508e8a66c773a41521e0243c9d11b9a2a1c',
+ 'luci_go': 'git_revision:25958d48e89e980e2a97daeddc977fb5e2e1fb8c',
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'a7674eacc34947257c78fe6ba5cf0da17f60696c',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'f2ca77c3aa839107f36fed20dac81fe8b71b060e',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '71e3be7a50c21faeee91ed99a8d5addfb7594e7c',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f7971436824dd8eeb9b0cf19dabc3e32b369a904',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c52a2a250d6c5f5cbdd015dff36af7c5d0ae1150',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '07e7295d964399ee7bee16a3ac7ca5a053b2cf0a',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a90cbf3b4216430a437991fb53ede8e048dea454',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2dff9c9c74e9d732e6fe57c84ef7fd044cc45d96',
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2f02e1f363b1af2715536f38e239853f04ec1497',
'v8/base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '211b3ed9d0481b4caddbee1322321b86a483ca1f',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'e31a1706337ccb9a658b37d29a018c81695c6518',
'v8/third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + '130499e25286f4d56acafa252fee09f3cc595c49',
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'e958d6ea74442d4e0849bb8a018d215a0e78981d',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '9ec8468cfde0868ce5f3893e819087278c5af988',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'dd2de388fc4e3e8fa97a97515ec35c5b3834b753',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -44,23 +44,23 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '6e1868c9083769d489d3fc25657339d50c22b1d8',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5157be49c92d031a74192ee993f32a2a28c8b1c3',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '2e68926a9d4929e9289373cd49e40ddcb9a628f7',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '9518a57428ae0a7ed450c1361768e84a2a38af5a',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '0e3e1c4dc4e79f25a5b58fcbc135dc93183c0c54',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'd50a88f50782ba29076061b94c7b9d08a6c7e424',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '00cfe1628cc03164dcf03f01ba9c84376e9be735',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '4f1155c566a222238fd86f179c6635ecb4c289bb',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/third_party/qemu-linux-x64': {
@@ -84,7 +84,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7792d28b069af6dd3a86d1ba83b7f5c4ede605dc',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3a16568a56486d7d032b8ec7b8dae892413a9a7a',
'v8/tools/luci-go': {
'packages': [
{
@@ -103,8 +103,8 @@ deps = {
'condition': 'host_cpu != "s390"',
'dep_type': 'cipd',
},
- 'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'db9cd40808a90ecc5f4a23e88fb375c8f60b8d52',
+ 'v8/test/wasm-js/data':
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'b42efa9b07c5544079c31f6088a66bead617559c',
}
recursedeps = [
@@ -209,43 +209,6 @@ hooks = [
'--platform=linux*',
],
},
- # Pull luci-go binaries (isolate, swarming) using checked-in hashes.
- {
- 'name': 'luci-go_win',
- 'pattern': '.',
- 'condition': 'host_os == "win"',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=win32',
- '--no_auth',
- '--bucket', 'chromium-luci',
- '-d', 'v8/tools/luci-go/win64',
- ],
- },
- {
- 'name': 'luci-go_mac',
- 'pattern': '.',
- 'condition': 'host_os == "mac"',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=darwin',
- '--no_auth',
- '--bucket', 'chromium-luci',
- '-d', 'v8/tools/luci-go/mac64',
- ],
- },
- {
- 'name': 'luci-go_linux',
- 'pattern': '.',
- 'condition': 'host_os == "linux"',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=linux*',
- '--no_auth',
- '--bucket', 'chromium-luci',
- '-d', 'v8/tools/luci-go/linux64',
- ],
- },
# Pull GN using checked-in hashes.
{
'name': 'gn_win',
@@ -295,17 +258,6 @@ hooks = [
],
},
{
- 'name': 'closure_compiler',
- 'pattern': '.',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--no_auth',
- '-u',
- '--bucket', 'chromium-v8-closure-compiler',
- '-s', 'v8/src/inspector/build/closure-compiler.tar.gz.sha1',
- ],
- },
- {
'name': 'sysroot_arm',
'pattern': '.',
'condition': '(checkout_linux and checkout_arm)',
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index d32f721382..e41066126a 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -4,12 +4,10 @@ aseemgarg@chromium.org
bbudge@chromium.org
binji@chromium.org
bmeurer@chromium.org
-bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
delphick@chromium.org
-eholk@chromium.org
gdeepti@chromium.org
gsathya@chromium.org
hablich@chromium.org
@@ -19,7 +17,6 @@ ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-kschimpf@chromium.org
leszeks@chromium.org
machenbach@chromium.org
mathias@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index e48d51d8e5..8aea920ef4 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -77,7 +77,7 @@ def _V8PresubmitChecks(input_api, output_api):
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from v8_presubmit import CppLintProcessor
- from v8_presubmit import TorqueFormatProcessor
+ from v8_presubmit import TorqueLintProcessor
from v8_presubmit import SourceProcessor
from v8_presubmit import StatusFilesProcessor
@@ -96,7 +96,7 @@ def _V8PresubmitChecks(input_api, output_api):
if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterFile, include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed"))
- if not TorqueFormatProcessor().RunOnFiles(
+ if not TorqueLintProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterTorqueFile,
include_deletes=False)):
results.append(output_api.PresubmitError("Torque format check failed"))
@@ -330,6 +330,7 @@ def _CommonChecks(input_api, output_api):
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
+ results.extend(_CheckNoexceptAnnotations(input_api, output_api))
results.extend(input_api.RunTests(
input_api.canned_checks.CheckVPythonSpec(input_api, output_api)))
return results
@@ -450,6 +451,58 @@ def _CheckMacroUndefs(input_api, output_api):
return []
+def _CheckNoexceptAnnotations(input_api, output_api):
+ """
+ Checks that all user-defined constructors and assignment operators are marked
+ V8_NOEXCEPT.
+
+ This is required for standard containers to pick the right constructors. Our
+ macros (like MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS) add this automatically.
+ Omitting it at some places can result in weird compiler errors if this is
+ mixed with other classes that have the annotation.
+
+ TODO(clemensh): This check should eventually be enabled for all files via
+ tools/presubmit.py (https://crbug.com/v8/8616).
+ """
+
+ def FilterFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(r'src/.*', r'test/.*'))
+
+
+ # matches any class name.
+ class_name = r'\b([A-Z][A-Za-z0-9_:]*)(?:::\1)?'
+ # initial class name is potentially followed by this to declare an assignment
+ # operator.
+ potential_assignment = r'(?:&\s+(?:\1::)?operator=)?\s*'
+ # matches an argument list that contains only a reference to a class named
+ # like the first capture group, potentially const.
+ single_class_ref_arg = r'\(\s*(?:const\s+)?\1(?:::\1)?&&?[^,;)]*\)'
+ # matches anything but a sequence of whitespaces followed by either
+ # V8_NOEXCEPT or "= delete".
+ not_followed_by_noexcept = r'(?!\s+(?:V8_NOEXCEPT|=\s+delete)\b)'
+ full_pattern = r'^.*?' + class_name + potential_assignment + \
+ single_class_ref_arg + not_followed_by_noexcept + '.*?$'
+ regexp = input_api.re.compile(full_pattern, re.MULTILINE)
+
+ errors = []
+ for f in input_api.AffectedFiles(file_filter=FilterFile,
+ include_deletes=False):
+ with open(f.LocalPath()) as fh:
+ for match in re.finditer(regexp, fh.read()):
+ errors.append('in {}: {}'.format(f.LocalPath(),
+ match.group().strip()))
+
+ if errors:
+ return [output_api.PresubmitPromptOrNotify(
+ 'Copy constructors, move constructors, copy assignment operators and '
+ 'move assignment operators should be marked V8_NOEXCEPT.\n'
+ 'Please report false positives on https://crbug.com/v8/8616.',
+ errors)]
+ return []
+
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
@@ -466,19 +519,3 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
-
-def PostUploadHook(cl, change, output_api):
- """git cl upload will call this hook after the issue is created/modified.
-
- This hook adds a noi18n bot if the patch affects Intl.
- """
- def affects_intl(f):
- return 'intl' in f.LocalPath() or 'test262' in f.LocalPath()
- if not change.AffectedFiles(file_filter=affects_intl):
- return []
- return output_api.EnsureCQIncludeTrybotsAreAdded(
- cl,
- [
- 'luci.v8.try:v8_linux_noi18n_rel_ng'
- ],
- 'Automatically added noi18n trybots to run tests on CQ.')
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index e2a5ca0c8d..f9ef03f5ba 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -374,6 +374,10 @@
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_MARK1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+
#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
@@ -1005,14 +1009,6 @@
} \
} while (0)
-// Macro to explicitly warm up a given category group. This could be useful in
-// cases where we want to initialize a category group before any trace events
-// for that category group is reported. For example, to have a category group
-// always show up in the "record categories" list for manually selecting
-// settings in about://tracing.
-#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
-
// Macro to efficiently determine, through polling, if a new trace has begun.
#define TRACE_EVENT_IS_NEW_TRACE(ret) \
do { \
diff --git a/deps/v8/benchmarks/base.js b/deps/v8/benchmarks/base.js
index 62c37e1208..f84c6d84f7 100644
--- a/deps/v8/benchmarks/base.js
+++ b/deps/v8/benchmarks/base.js
@@ -87,6 +87,7 @@ Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
+ seed = seed & 0xffffffff;
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
diff --git a/deps/v8/benchmarks/micro/slice-perf.js b/deps/v8/benchmarks/micro/slice-perf.js
new file mode 100644
index 0000000000..300d212666
--- /dev/null
+++ b/deps/v8/benchmarks/micro/slice-perf.js
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const kIterations = 1000000;
+const kIterationShort = 10000;
+const kArraySize = 64;
+
+let smi_array = [];
+for (let i = 0; i < kArraySize; ++i) smi_array[i] = Math.floor(Math.random() * 100);
+
+let start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ smi_array.slice(0);
+}
+let stop = performance.now();
+print("smi_array copy: " + (Math.floor((stop - start)*10)/10) + " ms");
+
+start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ smi_array.slice(x % kArraySize);
+}
+stop = performance.now();
+print("smi_array: " + (Math.floor((stop - start)*10)/10) + " ms");
+
+let double_array = [];
+for (let i = 0; i < kArraySize; ++i) double_array[i] = Math.random() * 100;
+start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ double_array.slice(x % kArraySize);
+}
+stop = performance.now();
+print("double_array: " + (Math.floor((stop - start)*10)/10) + " ms");
+
+let object_array = [];
+for (let i = 0; i < kArraySize; ++i) object_array[i] = new Object();
+start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ object_array.slice(x % kArraySize);
+}
+stop = performance.now();
+print("object_array: " + (Math.floor((stop - start)*10)/10) + " ms");
+
+let dictionary_array = [];
+for (let i = 0; i < kArraySize; ++i) dictionary_array[i] = new Object();
+dictionary_array[100000] = new Object();
+start = performance.now();
+for (let x = 0; x < kIterationShort; ++x) {
+ dictionary_array.slice(x % kArraySize);
+}
+stop = performance.now();
+print("dictionary: " + (Math.floor((stop - start)*10)/10) + " ms");
+
+let arguments_array;
+function sloppy() {
+ arguments_array = arguments;
+}
+sloppy.apply(null, smi_array);
+start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ let r = Array.prototype.slice.call(arguments_array, x % kArraySize);
+}
+stop = performance.now();
+print("arguments_array (sloppy): " + (Math.floor((stop - start)*10)/10) + " ms");
+
+function sloppy2 (a) {
+ arguments_array = arguments;
+}
+sloppy2.apply(null, smi_array);
+start = performance.now();
+for (let x = 0; x < kIterations; ++x) {
+ Array.prototype.slice.call(arguments_array, x % kArraySize);
+}
+stop = performance.now();
+print("arguments_array (fast aliased): " + (Math.floor((stop - start)*10)/10) + " ms");
+
+delete arguments_array[5];
+start = performance.now();
+for (let x = 0; x < kIterationShort; ++x) {
+ Array.prototype.slice.call(arguments_array, x % kArraySize);
+}
+stop = performance.now();
+print("arguments_array (slow aliased): " + (Math.floor((stop - start)*10)/10) + " ms");
diff --git a/deps/v8/benchmarks/spinning-balls/v.js b/deps/v8/benchmarks/spinning-balls/v.js
index 5ae11948d6..177ab44aec 100644
--- a/deps/v8/benchmarks/spinning-balls/v.js
+++ b/deps/v8/benchmarks/spinning-balls/v.js
@@ -136,6 +136,7 @@ Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
+ seed = seed & 0xffffffff;
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 6cb248f160..1ed8e0382a 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -32,7 +32,7 @@ declare_args() {
v8_enable_backtrace = ""
# Enable the snapshot feature, for fast context creation.
- # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
+ # https://v8.dev/blog/custom-startup-snapshots
# TODO(thakis): Make snapshots work in 64-bit win/cross builds,
# https://803591
v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64")
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index d20fb79fe1..7953cfe133 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -5,6 +5,7 @@ danno@chromium.org
ulan@chromium.org
yangguo@chromium.org
+per-file v8-internal.h=file://OWNERS
per-file v8-inspector.h=dgozman@chromium.org
per-file v8-inspector.h=pfeldman@chromium.org
per-file v8-inspector.h=kozyatinskiy@chromium.org
diff --git a/deps/v8/include/PRESUBMIT.py b/deps/v8/include/PRESUBMIT.py
deleted file mode 100644
index 8002e4dcac..0000000000
--- a/deps/v8/include/PRESUBMIT.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.')
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Presubmit script for //v8/include
-
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into depot_tools.
-"""
-
-import os
-
-
-def PostUploadHook(cl, change, output_api):
- """git cl upload will call this hook after the issue is created/modified.
-
- This hook adds extra try bots to the CL description in order to run layout
- tests in addition to CQ try bots.
- """
- def header_filter(f):
- return '.h' in os.path.split(f.LocalPath())[1]
- if not change.AffectedFiles(file_filter=header_filter):
- return []
- return output_api.EnsureCQIncludeTrybotsAreAdded(
- cl,
- [
- 'luci.chromium.try:linux_chromium_rel_ng'
- ],
- 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 2b167cb9e5..13c0db9a85 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -41,7 +41,7 @@ V8_PLATFORM_EXPORT std::unique_ptr<v8::Platform> NewDefaultPlatform(
InProcessStackDumping::kDisabled,
std::unique_ptr<v8::TracingController> tracing_controller = {});
-V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
+V8_PLATFORM_EXPORT V8_DEPRECATED(
"Use NewDefaultPlatform instead",
v8::Platform* CreateDefaultPlatform(
int thread_pool_size = 0,
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index f0a8b5f163..702013588c 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -245,8 +245,7 @@ class V8_EXPORT V8Inspector {
virtual void contextCreated(const V8ContextInfo&) = 0;
virtual void contextDestroyed(v8::Local<v8::Context>) = 0;
virtual void resetContextGroup(int contextGroupId) = 0;
- virtual v8::MaybeLocal<v8::Context> contextById(int groupId,
- v8::Maybe<int> contextId) = 0;
+ virtual v8::MaybeLocal<v8::Context> contextById(int contextId) = 0;
// Various instrumentation.
virtual void idleStarted() = 0;
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 80f7367bfe..7f9c27ebb9 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -20,15 +20,19 @@ class Isolate;
namespace internal {
-class Object;
+class Isolate;
+
+typedef uintptr_t Address;
+static const Address kNullAddress = 0;
/**
* Configuration of tagging scheme.
*/
-const int kApiPointerSize = sizeof(void*); // NOLINT
-const int kApiDoubleSize = sizeof(double); // NOLINT
-const int kApiIntSize = sizeof(int); // NOLINT
-const int kApiInt64Size = sizeof(int64_t); // NOLINT
+const int kApiSystemPointerSize = sizeof(void*);
+const int kApiTaggedSize = kApiSystemPointerSize;
+const int kApiDoubleSize = sizeof(double);
+const int kApiIntSize = sizeof(int);
+const int kApiInt64Size = sizeof(int64_t);
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
@@ -44,33 +48,20 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t tagged_ptr_size>
struct SmiTagging;
-template <int kSmiShiftSize>
-V8_INLINE internal::Object* IntToSmi(int value) {
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<internal::Object*>(tagged_value);
-}
-
// Smi constants for systems where tagged pointer is a 32-bit value.
template <>
struct SmiTagging<4> {
enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
- static int SmiShiftSize() { return kSmiShiftSize; }
- static int SmiValueSize() { return kSmiValueSize; }
- V8_INLINE static int SmiToInt(const internal::Object* value) {
+ V8_INLINE static int SmiToInt(const internal::Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
- // Throw away top 32 bits and shift down (requires >> to be sign extending).
- return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
- }
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return internal::IntToSmi<kSmiShiftSize>(value);
+ // Shift down (requires >> to be sign extending).
+ return static_cast<int>(static_cast<intptr_t>(value)) >> shift_bits;
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
+ // most-significant bits, and check if the most-significant bit is 0.
//
// CAUTION: The original code below:
// bool result = ((value + 0x40000000) & 0x80000000) == 0;
@@ -86,15 +77,10 @@ struct SmiTagging<4> {
template <>
struct SmiTagging<8> {
enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
- static int SmiShiftSize() { return kSmiShiftSize; }
- static int SmiValueSize() { return kSmiValueSize; }
- V8_INLINE static int SmiToInt(const internal::Object* value) {
+ V8_INLINE static int SmiToInt(const internal::Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
- return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
- }
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return internal::IntToSmi<kSmiShiftSize>(value);
+ return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as a long smi, the value must be a 32-bit integer.
@@ -102,13 +88,16 @@ struct SmiTagging<8> {
}
};
-#if V8_COMPRESS_POINTERS
+#if defined(V8_COMPRESS_POINTERS)
static_assert(
- kApiPointerSize == kApiInt64Size,
+ kApiSystemPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
-typedef SmiTagging<4> PlatformSmiTagging;
+#endif
+
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+typedef SmiTagging<kApiIntSize> PlatformSmiTagging;
#else
-typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
+typedef SmiTagging<kApiSystemPointerSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
@@ -118,6 +107,11 @@ const int kSmiMaxValue = -(kSmiMinValue + 1);
constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
+V8_INLINE static constexpr internal::Address IntToSmi(int value) {
+ return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
+ kSmiTag;
+}
+
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -128,30 +122,40 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
- static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
- static const int kStringResourceOffset =
- 1 * kApiPointerSize + 2 * kApiIntSize;
-
- static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize;
- static const int kForeignAddressOffset = kApiPointerSize;
- static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
- static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
- static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 5;
+ static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiIntSize;
+ static const int kStringResourceOffset = 1 * kApiTaggedSize + 2 * kApiIntSize;
+
+ static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
+ static const int kForeignAddressOffset = kApiTaggedSize;
+ static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
+ static const int kJSObjectHeaderSizeForEmbedderFields =
+ (kJSObjectHeaderSize + kApiSystemPointerSize - 1) &
+ -kApiSystemPointerSize;
+ static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
+ static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
+ static const int kEmbedderDataSlotSize =
+#ifdef V8_COMPRESS_POINTERS
+ 2 *
+#endif
+ kApiSystemPointerSize;
+ static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize;
static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalOneByteRepresentationTag = 0x0a;
- static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
- static const int kExternalMemoryOffset = 4 * kApiPointerSize;
+ static const uint32_t kNumIsolateDataSlots = 4;
+
+ static const int kIsolateEmbedderDataOffset = 0;
+ static const int kExternalMemoryOffset =
+ kNumIsolateDataSlots * kApiTaggedSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
- static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
- kApiInt64Size + kApiInt64Size +
- kApiPointerSize + kApiPointerSize;
+ static const int kIsolateRootsOffset =
+ kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
+
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
@@ -159,8 +163,8 @@ class Internals {
static const int kFalseValueRootIndex = 8;
static const int kEmptyStringRootIndex = 9;
- static const int kNodeClassIdOffset = 1 * kApiPointerSize;
- static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
+ static const int kNodeClassIdOffset = 1 * kApiTaggedSize;
+ static const int kNodeFlagsOffset = 1 * kApiTaggedSize + 3;
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
@@ -178,8 +182,6 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
- static const uint32_t kNumIsolateDataSlots = 4;
-
// Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
@@ -191,32 +193,30 @@ class Internals {
#endif
}
- V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
- kHeapObjectTag);
+ V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
+ return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
}
- V8_INLINE static int SmiValue(const internal::Object* value) {
+ V8_INLINE static int SmiValue(const internal::Address value) {
return PlatformSmiTagging::SmiToInt(value);
}
- V8_INLINE static internal::Object* IntToSmi(int value) {
- return PlatformSmiTagging::IntToSmi(value);
+ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
+ return internal::IntToSmi(value);
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
return PlatformSmiTagging::IsValidSmi(value);
}
- V8_INLINE static int GetInstanceType(const internal::Object* obj) {
- typedef internal::Object O;
- O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
- return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
+ V8_INLINE static int GetInstanceType(const internal::Address obj) {
+ typedef internal::Address A;
+ A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
+ return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
}
- V8_INLINE static int GetOddballKind(const internal::Object* obj) {
- typedef internal::Object O;
- return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
+ V8_INLINE static int GetOddballKind(const internal::Address obj) {
+ return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
}
V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
@@ -224,67 +224,120 @@ class Internals {
return representation == kExternalTwoByteRepresentationTag;
}
- V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
+ V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & static_cast<uint8_t>(1U << shift);
}
- V8_INLINE static void UpdateNodeFlag(internal::Object** obj, bool value,
+ V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
- V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
+ V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
- V8_INLINE static void UpdateNodeState(internal::Object** obj, uint8_t value) {
+ V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
void* data) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset + slot * kApiPointerSize;
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
+ kIsolateEmbedderDataOffset +
+ slot * kApiSystemPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
uint32_t slot) {
- const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset + slot * kApiPointerSize;
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
+ kIsolateEmbedderDataOffset +
+ slot * kApiSystemPointerSize;
return *reinterpret_cast<void* const*>(addr);
}
- V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, int index) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
- return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
+ V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
+ kIsolateRootsOffset +
+ index * kApiSystemPointerSize;
+ return reinterpret_cast<internal::Address*>(addr);
}
template <typename T>
- V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
- const uint8_t* addr =
- reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
+ V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
+ int offset) {
+ internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
return *reinterpret_cast<const T*>(addr);
}
+ V8_INLINE static internal::Address ReadTaggedPointerField(
+ internal::Address heap_object_ptr, int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
+ internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
+ return root + static_cast<internal::Address>(static_cast<intptr_t>(value));
+#else
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
+#endif
+ }
+
+ V8_INLINE static internal::Address ReadTaggedSignedField(
+ internal::Address heap_object_ptr, int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
+ return static_cast<internal::Address>(static_cast<intptr_t>(value));
+#else
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
+#endif
+ }
+
+ V8_INLINE static internal::Address ReadTaggedAnyField(
+ internal::Address heap_object_ptr, int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
+ internal::Address root_mask = static_cast<internal::Address>(
+ -static_cast<intptr_t>(value & kSmiTagMask));
+ internal::Address root_or_zero =
+ root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
+ return root_or_zero +
+ static_cast<internal::Address>(static_cast<intptr_t>(value));
+#else
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
+#endif
+ }
+
+#ifdef V8_COMPRESS_POINTERS
+ static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
+ static constexpr size_t kPtrComprIsolateRootBias =
+ kPtrComprHeapReservationSize / 2;
+ static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
+
+ V8_INLINE static internal::Address GetRootFromOnHeapAddress(
+ internal::Address addr) {
+ return (addr + kPtrComprIsolateRootBias) &
+ -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
+ }
+
+#else
+
template <typename T>
V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* ctx = *reinterpret_cast<O* const*>(context);
- int embedder_data_offset =
- I::kContextHeaderSize +
- (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
- O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
+ A ctx = *reinterpret_cast<const A*>(context);
+ A embedder_data =
+ I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
- I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
- return I::ReadField<T>(embedder_data, value_offset);
+ I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+ return I::ReadRawField<T>(embedder_data, value_offset);
}
+#endif // V8_COMPRESS_POINTERS
};
// Only perform cast check for types derived from v8::Data since
@@ -310,6 +363,10 @@ V8_INLINE void PerformCastCheck(T* data) {
CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
}
+// {obj} must be the raw tagged pointer representation of a HeapObject
+// that's guaranteed to never be in ReadOnlySpace.
+V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index d983c30249..fc008979f6 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -54,6 +54,15 @@ class TaskRunner {
virtual void PostTask(std::unique_ptr<Task> task) = 0;
/**
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
+ * implementation takes ownership of |task|. The |task| cannot be nested
+ * within other task executions.
+ *
+ * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
+ */
+ virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
+
+ /**
* Schedules a task to be invoked by this TaskRunner. The task is scheduled
* after the given number of seconds |delay_in_seconds|. The TaskRunner
* implementation takes ownership of |task|.
@@ -64,7 +73,7 @@ class TaskRunner {
/**
* Schedules an idle task to be invoked by this TaskRunner. The task is
* scheduled when the embedder is idle. Requires that
- * TaskRunner::SupportsIdleTasks(isolate) is true. Idle tasks may be reordered
+ * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
* relative to other task types and may be starved for an arbitrarily long
* time if no idle time is available. The TaskRunner implementation takes
* ownership of |task|.
@@ -76,6 +85,11 @@ class TaskRunner {
*/
virtual bool IdleTasksEnabled() = 0;
+ /**
+ * Returns true if non-nestable tasks are enabled for this TaskRunner.
+ */
+ virtual bool NonNestableTasksEnabled() const { return false; }
+
TaskRunner() = default;
virtual ~TaskRunner() = default;
@@ -236,6 +250,13 @@ class PageAllocator {
*/
virtual bool SetPermissions(void* address, size_t length,
Permission permissions) = 0;
+
+ /**
+ * Frees memory in the given [address, address + size) range. address and size
+ * should be operating system page-aligned. The next write to this
+ * memory area brings the memory transparently back.
+ */
+ virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
};
/**
@@ -311,6 +332,15 @@ class Platform {
}
/**
+ * Schedules a task to be invoked with low-priority on a worker thread.
+ */
+ virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
+ // Embedders may optionally override this to process these tasks in a low
+ // priority pool.
+ CallOnWorkerThread(std::move(task));
+ }
+
+ /**
* Schedules a task to be invoked on a worker thread after |delay_in_seconds|
* expires.
*/
@@ -388,6 +418,12 @@ class Platform {
*/
virtual TracingController* GetTracingController() = 0;
+ /**
+ * Tells the embedder to generate and upload a crashdump during an unexpected
+ * but non-critical scenario.
+ */
+ virtual void DumpWithoutCrashing() {}
+
protected:
/**
* Default implementation of current wall-clock time in milliseconds
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index f30688582d..94d3fcfcf6 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -612,6 +612,11 @@ class V8_EXPORT AllocationProfile {
int column_number;
/**
+ * Unique id of the node.
+ */
+ uint32_t node_id;
+
+ /**
* List of callees called from this node for which we have sampled
* allocations. The lifetime of the children is scoped to the containing
* AllocationProfile.
@@ -625,11 +630,38 @@ class V8_EXPORT AllocationProfile {
};
/**
+ * Represent a single sample recorded for an allocation.
+ */
+ struct Sample {
+ /**
+ * id of the node in the profile tree.
+ */
+ uint32_t node_id;
+
+ /**
+ * Size of the sampled allocation object.
+ */
+ size_t size;
+
+ /**
+ * The number of objects of such size that were sampled.
+ */
+ unsigned int count;
+
+ /**
+ * Unique time-ordered id of the allocation sample. Can be used to track
+ * what samples were added or removed between two snapshots.
+ */
+ uint64_t sample_id;
+ };
+
+ /**
* Returns the root node of the call-graph. The root node corresponds to an
* empty JS call-stack. The lifetime of the returned Node* is scoped to the
* containing AllocationProfile.
*/
virtual Node* GetRootNode() = 0;
+ virtual const std::vector<Sample>& GetSamples() = 0;
virtual ~AllocationProfile() = default;
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 96c9acbbdc..7f12ead16b 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -25,13 +25,11 @@ enum PersistentContainerCallbackType {
kNotWeak,
// These correspond to v8::WeakCallbackType
kWeakWithParameter,
- kWeakWithInternalFields,
- kWeak = kWeakWithParameter // For backwards compatibility. Deprecate.
+ kWeakWithInternalFields
};
-
/**
- * A default trait implemenation for PersistentValueMap which uses std::map
+ * A default trait implementation for PersistentValueMap which uses std::map
* as a backing map.
*
* Users will have to implement their own weak callbacks & dispose traits.
@@ -203,7 +201,7 @@ class PersistentValueMapBase {
void RegisterExternallyReferencedObject(K& key) {
assert(Contains(key));
V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
+ reinterpret_cast<internal::Address*>(FromVal(Traits::Get(&impl_, key))),
reinterpret_cast<internal::Isolate*>(GetIsolate()));
}
@@ -289,7 +287,10 @@ class PersistentValueMapBase {
}
protected:
- explicit PersistentValueMapBase(Isolate* isolate) : isolate_(isolate) {}
+ explicit PersistentValueMapBase(Isolate* isolate)
+ : isolate_(isolate), label_(nullptr) {}
+ PersistentValueMapBase(Isolate* isolate, const char* label)
+ : isolate_(isolate), label_(label) {}
~PersistentValueMapBase() { Clear(); }
@@ -331,6 +332,10 @@ class PersistentValueMapBase {
p.Reset();
}
+ void AnnotateStrongRetainer(Global<V>* persistent) {
+ persistent->AnnotateStrongRetainer(label_);
+ }
+
private:
PersistentValueMapBase(PersistentValueMapBase&);
void operator=(PersistentValueMapBase&);
@@ -340,13 +345,14 @@ class PersistentValueMapBase {
bool hasValue = value != kPersistentContainerNotFound;
if (hasValue) {
returnValue->SetInternal(
- *reinterpret_cast<internal::Object**>(FromVal(value)));
+ *reinterpret_cast<internal::Address*>(FromVal(value)));
}
return hasValue;
}
Isolate* isolate_;
typename Traits::Impl impl_;
+ const char* label_;
};
@@ -355,6 +361,8 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
public:
explicit PersistentValueMap(Isolate* isolate)
: PersistentValueMapBase<K, V, Traits>(isolate) {}
+ PersistentValueMap(Isolate* isolate, const char* label)
+ : PersistentValueMapBase<K, V, Traits>(isolate, label) {}
typedef
typename PersistentValueMapBase<K, V, Traits>::PersistentValueReference
@@ -382,7 +390,9 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* by the Traits class.
*/
Global<V> SetUnique(const K& key, Global<V>* persistent) {
- if (Traits::kCallbackType != kNotWeak) {
+ if (Traits::kCallbackType == kNotWeak) {
+ this->AnnotateStrongRetainer(persistent);
+ } else {
WeakCallbackType callback_type =
Traits::kCallbackType == kWeakWithInternalFields
? WeakCallbackType::kInternalFields
@@ -427,6 +437,8 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
public:
explicit GlobalValueMap(Isolate* isolate)
: PersistentValueMapBase<K, V, Traits>(isolate) {}
+ GlobalValueMap(Isolate* isolate, const char* label)
+ : PersistentValueMapBase<K, V, Traits>(isolate, label) {}
typedef
typename PersistentValueMapBase<K, V, Traits>::PersistentValueReference
@@ -454,7 +466,9 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
* by the Traits class.
*/
Global<V> SetUnique(const K& key, Global<V>* persistent) {
- if (Traits::kCallbackType != kNotWeak) {
+ if (Traits::kCallbackType == kNotWeak) {
+ this->AnnotateStrongRetainer(persistent);
+ } else {
WeakCallbackType callback_type =
Traits::kCallbackType == kWeakWithInternalFields
? WeakCallbackType::kInternalFields
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 114e57c58e..abf640228f 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 302
-#define V8_PATCH_LEVEL 33
+#define V8_MINOR_VERSION 3
+#define V8_BUILD_NUMBER 492
+#define V8_PATCH_LEVEL 25
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8-wasm-trap-handler-posix.h b/deps/v8/include/v8-wasm-trap-handler-posix.h
new file mode 100644
index 0000000000..998d0a41bb
--- /dev/null
+++ b/deps/v8/include/v8-wasm-trap-handler-posix.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_TRAP_HANDLER_POSIX_H_
+#define V8_WASM_TRAP_HANDLER_POSIX_H_
+
+#include <signal.h>
+
+#include "v8config.h" // NOLINT(build/include)
+
+namespace v8 {
+/**
+ * This function determines whether a memory access violation has been an
+ * out-of-bounds memory access in WebAssembly. If so, it will modify the context
+ * parameter and add a return address where the execution can continue after the
+ * signal handling, and return true. Otherwise, false will be returned.
+ *
+ * The parameters to this function correspond to those passed to a Posix signal
+ * handler. Use this function only on Linux and Mac.
+ *
+ * \param sig_code The signal code, e.g. SIGSEGV.
+ * \param info A pointer to the siginfo_t struct provided to the signal handler.
+ * \param context A pointer to a ucontext_t struct provided to the signal
+ * handler.
+ */
+V8_EXPORT bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info,
+ void* context);
+
+} // namespace v8
+#endif // V8_WASM_TRAP_HANDLER_POSIX_H_
diff --git a/deps/v8/include/v8-wasm-trap-handler-win.h b/deps/v8/include/v8-wasm-trap-handler-win.h
new file mode 100644
index 0000000000..0185df6401
--- /dev/null
+++ b/deps/v8/include/v8-wasm-trap-handler-win.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_TRAP_HANDLER_WIN_H_
+#define V8_WASM_TRAP_HANDLER_WIN_H_
+
+#include <windows.h>
+
+#include "v8config.h" // NOLINT(build/include)
+
+namespace v8 {
+/**
+ * This function determines whether a memory access violation has been an
+ * out-of-bounds memory access in WebAssembly. If so, it will modify the
+ * exception parameter and add a return address where the execution can continue
+ * after the exception handling, and return true. Otherwise the return value
+ * will be false.
+ *
+ * The parameter to this function corresponds to the one passed to a Windows
+ * vectored exception handler. Use this function only on Windows.
+ *
+ * \param exception An EXCEPTION_POINTERS* as provided to the exception handler.
+ */
+V8_EXPORT bool TryHandleWebAssemblyTrapWindows(EXCEPTION_POINTERS* exception);
+
+} // namespace v8
+#endif // V8_WASM_TRAP_HANDLER_WIN_H_
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 0f0eb2e739..b23114f4ff 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -81,7 +81,7 @@ class Private;
class Uint32;
class Utils;
class Value;
-class WasmCompiledModule;
+class WasmModuleObject;
template <class T> class Local;
template <class T>
class MaybeLocal;
@@ -118,7 +118,6 @@ class HeapObject;
class Isolate;
class LocalEmbedderHeapTracer;
class NeverReadOnlySpaceObject;
-class Object;
struct ScriptStreamingData;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
@@ -212,8 +211,8 @@ class Local {
*/
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
- internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
@@ -221,8 +220,8 @@ class Local {
template <class S> V8_INLINE bool operator==(
const PersistentBase<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
- internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
@@ -477,8 +476,8 @@ template <class T> class PersistentBase {
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
- internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
@@ -486,8 +485,8 @@ template <class T> class PersistentBase {
template <class S>
V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
- internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
if (a == nullptr) return b == nullptr;
if (b == nullptr) return false;
return *a == *b;
@@ -569,7 +568,9 @@ template <class T> class PersistentBase {
V8_INLINE bool IsIndependent() const);
/** Checks if the handle holds the only reference to an object. */
- V8_INLINE bool IsNearDeath() const;
+ V8_DEPRECATE_SOON(
+ "Garbage collection internal state should not be relied on.",
+ V8_INLINE bool IsNearDeath() const);
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
@@ -696,7 +697,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
Copy(that);
}
- V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
+ V8_INLINE Persistent& operator=(const Persistent& that) {
Copy(that);
return *this;
}
@@ -780,7 +781,7 @@ class Global : public PersistentBase<T> {
/**
* Move constructor.
*/
- V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) { // NOLINT
+ V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) {
other.val_ = nullptr;
}
V8_INLINE ~Global() { this->Reset(); }
@@ -859,8 +860,8 @@ class V8_EXPORT HandleScope {
void Initialize(Isolate* isolate);
- static internal::Object** CreateHandle(internal::Isolate* isolate,
- internal::Object* value);
+ static internal::Address* CreateHandle(internal::Isolate* isolate,
+ internal::Address value);
private:
// Declaring operator new and delete as deleted is not spec compliant.
@@ -870,19 +871,15 @@ class V8_EXPORT HandleScope {
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
- // Uses heap_object to obtain the current Isolate.
- static internal::Object** CreateHandle(
- internal::NeverReadOnlySpaceObject* heap_object, internal::Object* value);
-
internal::Isolate* isolate_;
- internal::Object** prev_next_;
- internal::Object** prev_limit_;
+ internal::Address* prev_next_;
+ internal::Address* prev_limit_;
// Local::New uses CreateHandle with an Isolate* parameter.
template<class F> friend class Local;
// Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
- // a HeapObject* in their shortcuts.
+ // a HeapObject in their shortcuts.
friend class Object;
friend class Context;
};
@@ -903,8 +900,8 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
*/
template <class T>
V8_INLINE Local<T> Escape(Local<T> value) {
- internal::Object** slot =
- Escape(reinterpret_cast<internal::Object**>(*value));
+ internal::Address* slot =
+ Escape(reinterpret_cast<internal::Address*>(*value));
return Local<T>(reinterpret_cast<T*>(slot));
}
@@ -924,8 +921,8 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
- internal::Object** Escape(internal::Object** escape_value);
- internal::Object** escape_slot_;
+ internal::Address* Escape(internal::Address* escape_value);
+ internal::Address* escape_slot_;
};
/**
@@ -950,7 +947,7 @@ class V8_EXPORT SealHandleScope {
void operator delete[](void*, size_t);
internal::Isolate* const isolate_;
- internal::Object** prev_limit_;
+ internal::Address* prev_limit_;
int prev_sealed_level_;
};
@@ -1393,10 +1390,6 @@ class V8_EXPORT ScriptCompiler {
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
~StreamedSource();
- V8_DEPRECATED("No longer used", const CachedData* GetCachedData() const) {
- return nullptr;
- }
-
internal::ScriptStreamingData* impl() const { return impl_.get(); }
// Prevent copying.
@@ -1426,10 +1419,6 @@ class V8_EXPORT ScriptCompiler {
enum CompileOptions {
kNoCompileOptions = 0,
- kProduceParserCache,
- kConsumeParserCache,
- kProduceCodeCache,
- kProduceFullCodeCache,
kConsumeCodeCache,
kEagerCompile
};
@@ -1827,8 +1816,18 @@ struct SampleInfo {
};
struct MemoryRange {
- const void* start;
- size_t length_in_bytes;
+ const void* start = nullptr;
+ size_t length_in_bytes = 0;
+};
+
+struct JSEntryStub {
+ MemoryRange code;
+};
+
+struct UnwindState {
+ MemoryRange code_range;
+ MemoryRange embedded_code_range;
+ JSEntryStub js_entry_stub;
};
/**
@@ -1840,12 +1839,10 @@ class V8_EXPORT JSON {
* Tries to parse the string |json_string| and returns it as value if
* successful.
*
+ * \param the context in which to parse and create the value.
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static V8_DEPRECATED("Use the maybe version taking context",
- MaybeLocal<Value> Parse(Isolate* isolate,
- Local<String> json_string));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
Local<Context> context, Local<String> json_string);
@@ -1903,7 +1900,7 @@ class V8_EXPORT ValueSerializer {
Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
virtual Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmCompiledModule> module);
+ Isolate* isolate, Local<WasmModuleObject> module);
/**
* Allocates memory for the buffer of at least the size provided. The actual
* size (which may be greater or equal) is written to |actual_size|. If no
@@ -1942,12 +1939,6 @@ class V8_EXPORT ValueSerializer {
Local<Value> value);
/**
- * Returns the stored data. This serializer should not be used once the buffer
- * is released. The contents are undefined if a previous write has failed.
- */
- V8_DEPRECATED("Use Release()", std::vector<uint8_t> ReleaseBuffer());
-
- /**
* Returns the stored data (allocated using the delegate's
* ReallocateBufferMemory) and its size. This serializer should not be used
* once the buffer is released. The contents are undefined if a previous write
@@ -1963,13 +1954,6 @@ class V8_EXPORT ValueSerializer {
void TransferArrayBuffer(uint32_t transfer_id,
Local<ArrayBuffer> array_buffer);
- /**
- * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
- */
- V8_DEPRECATED("Use Delegate::GetSharedArrayBufferId",
- void TransferSharedArrayBuffer(
- uint32_t transfer_id,
- Local<SharedArrayBuffer> shared_array_buffer));
/**
* Indicate whether to treat ArrayBufferView objects as host objects,
@@ -2020,10 +2004,10 @@ class V8_EXPORT ValueDeserializer {
virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
/**
- * Get a WasmCompiledModule given a transfer_id previously provided
+ * Get a WasmModuleObject given a transfer_id previously provided
* by ValueSerializer::GetWasmModuleTransferId
*/
- virtual MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(
+ virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id);
/**
@@ -2531,7 +2515,7 @@ enum class NewStringType {
*/
class V8_EXPORT String : public Name {
public:
- static constexpr int kMaxLength = internal::kApiPointerSize == 4
+ static constexpr int kMaxLength = internal::kApiTaggedSize == 4
? (1 << 28) - 16
: internal::kSmiMaxValue / 2 - 24;
@@ -2765,7 +2749,7 @@ class V8_EXPORT String : public Name {
};
/** Allocates a new string from UTF-8 data.*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use maybe version",
Local<String> NewFromUtf8(Isolate* isolate, const char* data,
NewStringType type = kNormalString,
@@ -2915,8 +2899,6 @@ class V8_EXPORT String : public Name {
ExternalStringResource* GetExternalStringResourceSlow() const;
ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
String::Encoding* encoding_out) const;
- const ExternalOneByteStringResource* GetExternalOneByteStringResourceSlow()
- const;
static void CheckCast(v8::Value* obj);
};
@@ -3237,6 +3219,10 @@ class V8_EXPORT Object : public Value {
public:
V8_DEPRECATE_SOON("Use maybe version",
bool Set(Local<Value> key, Local<Value> value));
+ /**
+ * Set only return Just(true) or Empty(), so if it should never fail, use
+ * result.Check().
+ */
V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
Local<Value> key, Local<Value> value);
@@ -3307,7 +3293,6 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
Local<Context> context, Local<Name> key);
- V8_DEPRECATE_SOON("Use maybe version", bool Has(Local<Value> key));
/**
* Object::Has() calls the abstract operation HasProperty(O, P) described
* in ECMA-262, 7.3.10. Has() returns
@@ -3326,7 +3311,6 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
Local<Value> key);
- V8_DEPRECATE_SOON("Use maybe version", bool Delete(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
Local<Value> key);
@@ -3397,7 +3381,7 @@ class V8_EXPORT Object : public Value {
* array returned by this method contains the same values as would
* be enumerated by a for-in statement over this object.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Array> GetPropertyNames());
+ V8_DEPRECATED("Use maybe version", Local<Array> GetPropertyNames());
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
Local<Context> context);
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
@@ -3410,7 +3394,7 @@ class V8_EXPORT Object : public Value {
* the returned array doesn't contain the names of properties from
* prototype objects.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Array> GetOwnPropertyNames());
+ V8_DEPRECATED("Use maybe version", Local<Array> GetOwnPropertyNames());
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
Local<Context> context);
@@ -3509,8 +3493,8 @@ class V8_EXPORT Object : public Value {
Local<Name> key);
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version",
- bool HasRealNamedProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ bool HasRealNamedProperty(Local<String> key));
/**
* Use HasRealNamedProperty() if you want to check if an object has an own
* property without causing side effects, i.e., without calling interceptors.
@@ -3526,12 +3510,12 @@ class V8_EXPORT Object : public Value {
*/
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
Local<Name> key);
- V8_DEPRECATE_SOON("Use maybe version",
- bool HasRealIndexedProperty(uint32_t index));
+ V8_DEPRECATED("Use maybe version",
+ bool HasRealIndexedProperty(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
Local<Context> context, uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version",
- bool HasRealNamedCallbackProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ bool HasRealNamedCallbackProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
Local<Context> context, Local<Name> key);
@@ -3647,6 +3631,18 @@ class V8_EXPORT Object : public Value {
static Local<Object> New(Isolate* isolate);
+ /**
+ * Creates a JavaScript object with the given properties, and
+ * a the given prototype_or_null (which can be any JavaScript
+ * value, and if it's null, the newly created object won't have
+ * a prototype at all). This is similar to Object.create().
+ * All properties will be created as enumerable, configurable
+ * and writable properties.
+ */
+ static Local<Object> New(Isolate* isolate, Local<Value> prototype_or_null,
+ Local<Name>* names, Local<Value>* values,
+ size_t length);
+
V8_INLINE static Object* Cast(Value* obj);
private:
@@ -3760,8 +3756,8 @@ class ReturnValue {
}
// Local setters
template <typename S>
- V8_INLINE V8_DEPRECATE_SOON("Use Global<> instead",
- void Set(const Persistent<S>& handle));
+ V8_INLINE V8_DEPRECATED("Use Global<> instead",
+ void Set(const Persistent<S>& handle));
template <typename S>
V8_INLINE void Set(const Global<S>& handle);
template <typename S>
@@ -3793,10 +3789,10 @@ class ReturnValue {
template<class F> friend class PropertyCallbackInfo;
template <class F, class G, class H>
friend class PersistentValueMapBase;
- V8_INLINE void SetInternal(internal::Object* value) { *value_ = value; }
- V8_INLINE internal::Object* GetDefaultValue();
- V8_INLINE explicit ReturnValue(internal::Object** slot);
- internal::Object** value_;
+ V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
+ V8_INLINE internal::Address GetDefaultValue();
+ V8_INLINE explicit ReturnValue(internal::Address* slot);
+ internal::Address* value_;
};
@@ -3850,10 +3846,10 @@ class FunctionCallbackInfo {
static const int kDataIndex = 4;
static const int kNewTargetIndex = 5;
- V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
- internal::Object** values, int length);
- internal::Object** implicit_args_;
- internal::Object** values_;
+ V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values, int length);
+ internal::Address* implicit_args_;
+ internal::Address* values_;
int length_;
};
@@ -3965,8 +3961,8 @@ class PropertyCallbackInfo {
static const int kDataIndex = 5;
static const int kThisIndex = 6;
- V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
- internal::Object** args_;
+ V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
+ internal::Address* args_;
};
@@ -3988,10 +3984,11 @@ class V8_EXPORT Function : public Object {
Local<Value> data = Local<Value>(), int length = 0,
ConstructorBehavior behavior = ConstructorBehavior::kAllow,
SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Function> New(Isolate* isolate, FunctionCallback callback,
- Local<Value> data = Local<Value>(), int length = 0));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Function> New(Isolate* isolate,
+ FunctionCallback callback,
+ Local<Value> data = Local<Value>(),
+ int length = 0));
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
@@ -4010,9 +4007,9 @@ class V8_EXPORT Function : public Object {
Local<Context> context, int argc, Local<Value> argv[],
SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> Call(Local<Value> recv, int argc,
- Local<Value> argv[]));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> Call(Local<Value> recv, int argc,
+ Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]);
@@ -4129,6 +4126,10 @@ class V8_EXPORT Promise : public Object {
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
Local<Function> handler);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
+ Local<Function> on_fulfilled,
+ Local<Function> on_rejected);
+
/**
* Returns true if the promise has at least one derived promise, and
* therefore resolve/reject handlers (including default handler).
@@ -4146,6 +4147,11 @@ class V8_EXPORT Promise : public Object {
*/
PromiseState State();
+ /**
+ * Marks this promise as handled to avoid reporting unhandled rejections.
+ */
+ void MarkAsHandled();
+
V8_INLINE static Promise* Cast(Value* obj);
static const int kEmbedderFieldCount = V8_PROMISE_INTERNAL_FIELD_COUNT;
@@ -4188,8 +4194,16 @@ class V8_EXPORT PropertyDescriptor {
// GenericDescriptor
PropertyDescriptor();
+ // DataDescriptor (implicit / DEPRECATED)
+ // Templatized such that the explicit constructor is chosen first.
+ // TODO(clemensh): Remove after 7.3 branch.
+ template <std::nullptr_t = nullptr>
+ V8_DEPRECATED(
+ "Use explicit constructor",
+ PropertyDescriptor(Local<Value> value)); // NOLINT(runtime/explicit)
+
// DataDescriptor
- PropertyDescriptor(Local<Value> value);
+ explicit PropertyDescriptor(Local<Value> value);
// DataDescriptor with writable property
PropertyDescriptor(Local<Value> value, bool writable);
@@ -4228,6 +4242,11 @@ class V8_EXPORT PropertyDescriptor {
PrivateData* private_;
};
+// TODO(clemensh): Remove after 7.3 branch.
+template <std::nullptr_t>
+PropertyDescriptor::PropertyDescriptor(Local<Value> value)
+ : PropertyDescriptor(value) {}
+
/**
* An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
* 26.2.1).
@@ -4253,25 +4272,97 @@ class V8_EXPORT Proxy : public Object {
static void CheckCast(Value* obj);
};
-// TODO(mtrofin): rename WasmCompiledModule to WasmModuleObject, for
-// consistency with internal APIs.
-class V8_EXPORT WasmCompiledModule : public Object {
+/**
+ * Points to an unowned continous buffer holding a known number of elements.
+ *
+ * This is similar to std::span (under consideration for C++20), but does not
+ * require advanced C++ support. In the (far) future, this may be replaced with
+ * or aliased to std::span.
+ *
+ * To facilitate future migration, this class exposes a subset of the interface
+ * implemented by std::span.
+ */
+template <typename T>
+class V8_EXPORT MemorySpan {
+ public:
+ /** The default constructor creates an empty span. */
+ constexpr MemorySpan() = default;
+
+ constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
+
+ /** Returns a pointer to the beginning of the buffer. */
+ constexpr T* data() const { return data_; }
+ /** Returns the number of elements that the buffer holds. */
+ constexpr size_t size() const { return size_; }
+
+ private:
+ T* data_ = nullptr;
+ size_t size_ = 0;
+};
+
+/**
+ * An owned byte buffer with associated size.
+ */
+struct OwnedBuffer {
+ std::unique_ptr<const uint8_t[]> buffer;
+ size_t size = 0;
+ OwnedBuffer(std::unique_ptr<const uint8_t[]> buffer, size_t size)
+ : buffer(std::move(buffer)), size(size) {}
+ OwnedBuffer() = default;
+};
+
+// Wrapper around a compiled WebAssembly module, which is potentially shared by
+// different WasmModuleObjects.
+class V8_EXPORT CompiledWasmModule {
+ public:
+ /**
+ * Serialize the compiled module. The serialized data does not include the
+ * wire bytes.
+ */
+ OwnedBuffer Serialize();
+
+ /**
+ * Get the (wasm-encoded) wire bytes that were used to compile this module.
+ */
+ MemorySpan<const uint8_t> GetWireBytesRef();
+
+ private:
+ explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>);
+ friend class Utils;
+
+ const std::shared_ptr<internal::wasm::NativeModule> native_module_;
+};
+
+// An instance of WebAssembly.Module.
+class V8_EXPORT WasmModuleObject : public Object {
public:
- typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
+ // TODO(clemensh): Remove after 7.3 branch.
+ V8_DEPRECATED("Use OwnedBuffer", typedef)
+ std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
/**
* A unowned reference to a byte buffer.
+ * TODO(clemensh): Remove after 7.3 branch.
*/
- struct BufferReference {
+ V8_DEPRECATED("Use MemorySpan<const uint8_t>", struct) BufferReference {
const uint8_t* start;
size_t size;
BufferReference(const uint8_t* start, size_t size)
: start(start), size(size) {}
+
+ // Implicit conversion to and from MemorySpan<const uint8_t>.
+ BufferReference(MemorySpan<const uint8_t> span) // NOLINT(runtime/explicit)
+ : start(span.data()), size(span.size()) {}
+ operator MemorySpan<const uint8_t>() const {
+ return MemorySpan<const uint8_t>{start, size};
+ }
};
/**
* An opaque, native heap object for transferring wasm modules. It
* supports move semantics, and does not support copy semantics.
+ * TODO(wasm): Merge this with CompiledWasmModule once code sharing is always
+ * enabled.
*/
class TransferrableModule final {
public:
@@ -4283,8 +4374,7 @@ class V8_EXPORT WasmCompiledModule : public Object {
private:
typedef std::shared_ptr<internal::wasm::NativeModule> SharedModule;
- typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> OwnedBuffer;
- friend class WasmCompiledModule;
+ friend class WasmModuleObject;
explicit TransferrableModule(SharedModule shared_module)
: shared_module_(std::move(shared_module)) {}
TransferrableModule(OwnedBuffer serialized, OwnedBuffer bytes)
@@ -4303,64 +4393,87 @@ class V8_EXPORT WasmCompiledModule : public Object {
TransferrableModule GetTransferrableModule();
/**
- * Efficiently re-create a WasmCompiledModule, without recompiling, from
+ * Efficiently re-create a WasmModuleObject, without recompiling, from
* a TransferrableModule.
*/
- static MaybeLocal<WasmCompiledModule> FromTransferrableModule(
+ static MaybeLocal<WasmModuleObject> FromTransferrableModule(
Isolate* isolate, const TransferrableModule&);
/**
* Get the wasm-encoded bytes that were used to compile this module.
*/
- BufferReference GetWasmWireBytesRef();
+ V8_DEPRECATED("Use CompiledWasmModule::GetWireBytesRef()",
+ BufferReference GetWasmWireBytesRef());
+
+ /**
+ * Get the compiled module for this module object. The compiled module can be
+ * shared by several module objects.
+ */
+ CompiledWasmModule GetCompiledModule();
/**
* Serialize the compiled module. The serialized data does not include the
* uncompiled bytes.
*/
- SerializedModule Serialize();
+ V8_DEPRECATED("Use CompiledWasmModule::Serialize()",
+ SerializedModule Serialize());
/**
* If possible, deserialize the module, otherwise compile it from the provided
* uncompiled bytes.
*/
- static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
- Isolate* isolate, BufferReference serialized_module,
- BufferReference wire_bytes);
- V8_INLINE static WasmCompiledModule* Cast(Value* obj);
+ static MaybeLocal<WasmModuleObject> DeserializeOrCompile(
+ Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
+ MemorySpan<const uint8_t> wire_bytes);
+ V8_INLINE static WasmModuleObject* Cast(Value* obj);
private:
- static MaybeLocal<WasmCompiledModule> Deserialize(
- Isolate* isolate, BufferReference serialized_module,
- BufferReference wire_bytes);
- static MaybeLocal<WasmCompiledModule> Compile(Isolate* isolate,
- const uint8_t* start,
- size_t length);
- static BufferReference AsReference(
- const TransferrableModule::OwnedBuffer& buff) {
- return {buff.first.get(), buff.second};
+ static MaybeLocal<WasmModuleObject> Deserialize(
+ Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
+ MemorySpan<const uint8_t> wire_bytes);
+ static MaybeLocal<WasmModuleObject> Compile(Isolate* isolate,
+ const uint8_t* start,
+ size_t length);
+ static MemorySpan<const uint8_t> AsReference(const OwnedBuffer& buff) {
+ return {buff.buffer.get(), buff.size};
}
- WasmCompiledModule();
+ WasmModuleObject();
static void CheckCast(Value* obj);
};
+V8_DEPRECATED("Use WasmModuleObject",
+ typedef WasmModuleObject WasmCompiledModule);
+
/**
* The V8 interface for WebAssembly streaming compilation. When streaming
* compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
- * such that the embedder can pass the input butes for streaming compilation to
+ * such that the embedder can pass the input bytes for streaming compilation to
* V8.
*/
class V8_EXPORT WasmStreaming final {
public:
class WasmStreamingImpl;
- WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
+ /**
+ * Client to receive streaming event notifications.
+ */
+ class Client {
+ public:
+ virtual ~Client() = default;
+ /**
+ * Passes the fully compiled module to the client. This can be used to
+ * implement code caching.
+ */
+ virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0;
+ };
+
+ explicit WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
~WasmStreaming();
/**
- * Pass a new chunck of bytes to WebAssembly streaming compilation.
+ * Pass a new chunk of bytes to WebAssembly streaming compilation.
* The buffer passed into {OnBytesReceived} is owned by the caller.
*/
void OnBytesReceived(const uint8_t* bytes, size_t size);
@@ -4380,6 +4493,21 @@ class V8_EXPORT WasmStreaming final {
void Abort(MaybeLocal<Value> exception);
/**
+ * Passes previously compiled module bytes. This must be called before
+ * {OnBytesReceived}, {Finish}, or {Abort}. Returns true if the module bytes
+ * can be used, false otherwise. The buffer passed via {bytes} and {size}
+ * is owned by the caller. If {SetCompiledModuleBytes} returns true, the
+ * buffer must remain valid until either {Finish} or {Abort} completes.
+ */
+ bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size);
+
+ /**
+ * Sets the client object that will receive streaming event notifications.
+ * This must be called before {OnBytesReceived}, {Finish}, or {Abort}.
+ */
+ void SetClient(std::shared_ptr<Client> client);
+
+ /**
* Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
* Since the embedder is on the other side of the API, it cannot unpack the
* {Managed} itself.
@@ -4588,17 +4716,26 @@ class V8_EXPORT ArrayBuffer : public Object {
bool IsExternal() const;
/**
- * Returns true if this ArrayBuffer may be neutered.
+ * Returns true if this ArrayBuffer may be detached.
*/
- bool IsNeuterable() const;
+ bool IsDetachable() const;
+
+ // TODO(913887): fix the use of 'neuter' in the API.
+ V8_DEPRECATE_SOON("Use IsDetachable() instead.",
+ inline bool IsNeuterable() const) {
+ return IsDetachable();
+ }
/**
- * Neuters this ArrayBuffer and all its views (typed arrays).
- * Neutering sets the byte length of the buffer and all typed arrays to zero,
+ * Detaches this ArrayBuffer and all its views (typed arrays).
+ * Detaching sets the byte length of the buffer and all typed arrays to zero,
* preventing JavaScript from ever accessing underlying backing store.
- * ArrayBuffer should have been externalized and must be neuterable.
+ * ArrayBuffer should have been externalized and must be detachable.
*/
- void Neuter();
+ void Detach();
+
+ // TODO(913887): fix the use of 'neuter' in the API.
+ V8_DEPRECATE_SOON("Use Detach() instead.", inline void Neuter()) { Detach(); }
/**
* Make this ArrayBuffer external. The pointer to underlying memory block
@@ -5045,8 +5182,6 @@ class V8_EXPORT SharedArrayBuffer : public Object {
*/
class V8_EXPORT Date : public Object {
public:
- static V8_DEPRECATED("Use maybe version.",
- Local<Value> New(Isolate* isolate, double time));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> New(Local<Context> context,
double time);
@@ -5695,7 +5830,7 @@ class V8_EXPORT FunctionTemplate : public Template {
SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
/** Returns the unique function instance in the current execution context.*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
+ V8_DEPRECATED("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
Local<Context> context);
@@ -5992,7 +6127,7 @@ class V8_EXPORT ObjectTemplate : public Template {
size_t index);
/** Creates a new instance of this template.*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
+ V8_DEPRECATED("Use maybe version", Local<Object> NewInstance());
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
/**
@@ -6212,7 +6347,7 @@ class V8_EXPORT AccessorSignature : public Data {
// --- Extensions ---
-V8_DEPRECATE_SOON("Implementation detail", class)
+V8_DEPRECATED("Implementation detail", class)
V8_EXPORT ExternalOneByteStringResourceImpl
: public String::ExternalOneByteStringResource {
public:
@@ -6306,18 +6441,6 @@ class V8_EXPORT ResourceConstraints {
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit);
- // Returns the max semi-space size in MB.
- V8_DEPRECATED("Use max_semi_space_size_in_kb()",
- size_t max_semi_space_size()) {
- return max_semi_space_size_in_kb_ / 1024;
- }
-
- // Sets the max semi-space size in MB.
- V8_DEPRECATED("Use set_max_semi_space_size_in_kb(size_t limit_in_kb)",
- void set_max_semi_space_size(size_t limit_in_mb)) {
- max_semi_space_size_in_kb_ = limit_in_mb * 1024;
- }
-
// Returns the max semi-space size in KB.
size_t max_semi_space_size_in_kb() const {
return max_semi_space_size_in_kb_;
@@ -6332,14 +6455,6 @@ class V8_EXPORT ResourceConstraints {
void set_max_old_space_size(size_t limit_in_mb) {
max_old_space_size_ = limit_in_mb;
}
- V8_DEPRECATED("max_executable_size_ is subsumed by max_old_space_size_",
- size_t max_executable_size() const) {
- return max_executable_size_;
- }
- V8_DEPRECATED("max_executable_size_ is subsumed by max_old_space_size_",
- void set_max_executable_size(size_t limit_in_mb)) {
- max_executable_size_ = limit_in_mb;
- }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
@@ -6356,7 +6471,6 @@ class V8_EXPORT ResourceConstraints {
// The remaining limits are in MB
size_t max_old_space_size_;
- size_t max_executable_size_;
uint32_t* stack_limit_;
size_t code_range_size_;
size_t max_zone_pool_size_;
@@ -6463,10 +6577,12 @@ typedef void (*HostInitializeImportMetaObjectCallback)(Local<Context> context,
* PrepareStackTraceCallback is called when the stack property of an error is
* first accessed. The return value will be used as the stack value. If this
* callback is registed, the |Error.prepareStackTrace| API will be disabled.
+ * |sites| is an array of call sites, specified in
+ * https://github.com/v8/v8/wiki/Stack-Trace-API
*/
typedef MaybeLocal<Value> (*PrepareStackTraceCallback)(Local<Context> context,
Local<Value> error,
- Local<StackTrace> trace);
+ Local<Array> sites);
/**
* PromiseHook with type kInit is called when a new promise is
@@ -6761,21 +6877,6 @@ class V8_EXPORT HeapCodeStatistics {
class RetainedObjectInfo;
-
-/**
- * FunctionEntryHook is the type of the profile entry hook called at entry to
- * any generated function when function-level profiling is enabled.
- *
- * \param function the address of the function that's being entered.
- * \param return_addr_location points to a location on stack where the machine
- * return address resides. This can be used to identify the caller of
- * \p function, and/or modified to divert execution when \p function exits.
- *
- * \note the entry hook must not cause garbage collection.
- */
-typedef void (*FunctionEntryHook)(uintptr_t function,
- uintptr_t return_addr_location);
-
/**
* A JIT code event is issued each time code is added, moved or removed.
*
@@ -6937,15 +7038,6 @@ class V8_EXPORT EmbedderHeapTracer {
kEmpty,
};
- enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
-
- struct AdvanceTracingActions {
- explicit AdvanceTracingActions(ForceCompletionAction force_completion_)
- : force_completion(force_completion_) {}
-
- ForceCompletionAction force_completion;
- };
-
virtual ~EmbedderHeapTracer() = default;
/**
@@ -6963,25 +7055,6 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void TracePrologue() = 0;
/**
- * Called to make a tracing step in the embedder.
- *
- * The embedder is expected to trace its heap starting from wrappers reported
- * by RegisterV8References method, and report back all reachable wrappers.
- * Furthermore, the embedder is expected to stop tracing by the given
- * deadline.
- *
- * Returns true if there is still work to do.
- *
- * Note: Only one of the AdvanceTracing methods needs to be overriden by the
- * embedder.
- */
- V8_DEPRECATED("Use void AdvanceTracing(deadline_in_ms)",
- virtual bool AdvanceTracing(double deadline_in_ms,
- AdvanceTracingActions actions)) {
- return false;
- }
-
- /**
* Called to advance tracing in the embedder.
*
* The embedder is expected to trace its heap starting from wrappers reported
@@ -6990,17 +7063,14 @@ class V8_EXPORT EmbedderHeapTracer {
* deadline. A deadline of infinity means that tracing should be finished.
*
* Returns |true| if tracing is done, and false otherwise.
- *
- * Note: Only one of the AdvanceTracing methods needs to be overriden by the
- * embedder.
*/
- virtual bool AdvanceTracing(double deadline_in_ms);
+ virtual bool AdvanceTracing(double deadline_in_ms) = 0;
/*
* Returns true if there no more tracing work to be done (see AdvanceTracing)
* and false otherwise.
*/
- virtual bool IsTracingDone();
+ virtual bool IsTracingDone() = 0;
/**
* Called at the end of a GC cycle.
@@ -7012,13 +7082,8 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Called upon entering the final marking pause. No more incremental marking
* steps will follow this call.
- *
- * Note: Only one of the EnterFinalPause methods needs to be overriden by the
- * embedder.
*/
- V8_DEPRECATED("Use void EnterFinalPause(EmbedderStackState)",
- virtual void EnterFinalPause()) {}
- virtual void EnterFinalPause(EmbedderStackState stack_state);
+ virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
/**
* Called when tracing is aborted.
@@ -7026,8 +7091,8 @@ class V8_EXPORT EmbedderHeapTracer {
* The embedder is expected to throw away all intermediate data and reset to
* the initial state.
*/
- V8_DEPRECATE_SOON("Obsolete as V8 will not abort tracing anymore.",
- virtual void AbortTracing()) {}
+ V8_DEPRECATED("Obsolete as V8 will not abort tracing anymore.",
+ virtual void AbortTracing()) {}
/*
* Called by the embedder to request immediate finalization of the currently
@@ -7053,13 +7118,6 @@ class V8_EXPORT EmbedderHeapTracer {
*/
v8::Isolate* isolate() const { return isolate_; }
- /**
- * Returns the number of wrappers that are still to be traced by the embedder.
- */
- V8_DEPRECATED("Use IsTracingDone", virtual size_t NumberOfWrappersToTrace()) {
- return 0;
- }
-
protected:
v8::Isolate* isolate_ = nullptr;
@@ -7069,6 +7127,10 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Callback and supporting data used in SnapshotCreator to implement embedder
* logic to serialize internal fields.
+ * Internal fields that directly reference V8 objects are serialized without
+ * calling this callback. Internal fields that contain aligned pointers are
+ * serialized by this callback if it returns non-zero result. Otherwise it is
+ * serialized verbatim.
*/
struct SerializeInternalFieldsCallback {
typedef StartupData (*CallbackFunction)(Local<Object> holder, int index,
@@ -7114,8 +7176,7 @@ class V8_EXPORT Isolate {
*/
struct CreateParams {
CreateParams()
- : entry_hook(nullptr),
- code_event_handler(nullptr),
+ : code_event_handler(nullptr),
snapshot_blob(nullptr),
counter_lookup_callback(nullptr),
create_histogram_callback(nullptr),
@@ -7126,16 +7187,6 @@ class V8_EXPORT Isolate {
only_terminate_in_safe_scope(false) {}
/**
- * The optional entry_hook allows the host application to provide the
- * address of a function that's invoked on entry to every V8-generated
- * function. Note that entry_hook is invoked at the very start of each
- * generated function.
- * An entry_hook can only be provided in no-snapshot builds; in snapshot
- * builds it must be nullptr.
- */
- FunctionEntryHook entry_hook;
-
- /**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*/
@@ -7220,7 +7271,7 @@ class V8_EXPORT Isolate {
*/
class V8_EXPORT DisallowJavascriptExecutionScope {
public:
- enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE };
+ enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE, DUMP_ON_FAILURE };
DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
~DisallowJavascriptExecutionScope();
@@ -7232,7 +7283,7 @@ class V8_EXPORT Isolate {
const DisallowJavascriptExecutionScope&) = delete;
private:
- bool on_failure_;
+ OnFailure on_failure_;
void* internal_;
};
@@ -7254,6 +7305,7 @@ class V8_EXPORT Isolate {
private:
void* internal_throws_;
void* internal_assert_;
+ void* internal_dump_;
};
/**
@@ -7377,6 +7429,12 @@ class V8_EXPORT Isolate {
kDateToLocaleString = 66,
kDateToLocaleDateString = 67,
kDateToLocaleTimeString = 68,
+ kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
+ kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
+ kOptimizedFunctionWithOneShotBytecode = 71,
+ kRegExpMatchIsTrueishOnNonJSRegExp = 72,
+ kRegExpMatchIsFalseishOnJSRegExp = 73,
+ kDateGetTimezoneOffset = 74,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -7642,9 +7700,6 @@ class V8_EXPORT Isolate {
*/
void SetIdle(bool is_idle);
- /** Returns the ArrayBuffer::Allocator used in this isolate. */
- ArrayBuffer::Allocator* GetArrayBufferAllocator();
-
/** Returns true if this isolate has a current context. */
bool InContext();
@@ -7655,7 +7710,8 @@ class V8_EXPORT Isolate {
Local<Context> GetCurrentContext();
/** Returns the last context entered through V8's C++ API. */
- Local<Context> GetEnteredContext();
+ V8_DEPRECATED("Use GetEnteredOrMicrotaskContext().",
+ Local<Context> GetEnteredContext());
/**
* Returns either the last context entered through V8's C++ API, or the
@@ -7781,7 +7837,7 @@ class V8_EXPORT Isolate {
*/
typedef void (*AtomicsWaitCallback)(AtomicsWaitEvent event,
Local<SharedArrayBuffer> array_buffer,
- size_t offset_in_bytes, int32_t value,
+ size_t offset_in_bytes, int64_t value,
double timeout_in_ms,
AtomicsWaitWakeHandle* stop_handle,
void* data);
@@ -8130,13 +8186,9 @@ class V8_EXPORT Isolate {
void GetCodeRange(void** start, size_t* length_in_bytes);
/**
- * Returns a memory range containing the code for V8's embedded functions
- * (e.g. builtins) which are shared across isolates.
- *
- * If embedded builtins are disabled, then the memory range will be a null
- * pointer with 0 length.
+ * Returns the UnwindState necessary for use with the Unwinder API.
*/
- MemoryRange GetEmbeddedCodeRange();
+ UnwindState GetUnwindState();
/** Set the callback to invoke in case of fatal errors. */
void SetFatalErrorHandler(FatalErrorCallback that);
@@ -8162,6 +8214,14 @@ class V8_EXPORT Isolate {
size_t heap_limit);
/**
+ * If the heap limit was changed by the NearHeapLimitCallback, then the
+ * initial heap limit will be restored once the heap size falls below the
+ * given threshold percentage of the initial heap limit.
+ * The threshold percentage is a number in (0.0, 1.0) range.
+ */
+ void AutomaticallyRestoreInitialHeapLimit(double threshold_percent = 0.5);
+
+ /**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
@@ -8182,7 +8242,9 @@ class V8_EXPORT Isolate {
void SetWasmModuleCallback(ExtensionCallback callback);
void SetWasmInstanceCallback(ExtensionCallback callback);
- void SetWasmCompileStreamingCallback(ApiImplementationCallback callback);
+ V8_DEPRECATED(
+ "The callback set in SetWasmStreamingCallback is used now",
+ void SetWasmCompileStreamingCallback(ApiImplementationCallback callback));
void SetWasmStreamingCallback(WasmStreamingCallback callback);
@@ -8257,7 +8319,9 @@ class V8_EXPORT Isolate {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor);
+ V8_DEPRECATE_SOON(
+ "Use VisitHandlesWithClassIds",
+ void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
@@ -8294,7 +8358,7 @@ class V8_EXPORT Isolate {
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
- internal::Object** GetDataFromSnapshotOnce(size_t index);
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
void ReportExternalAllocationLimitReached();
void CheckMemoryPressure();
};
@@ -8476,15 +8540,17 @@ class V8_EXPORT V8 {
* \param context The third argument passed to the Linux signal handler, which
* points to a ucontext_t structure.
*/
- static bool TryHandleSignal(int signal_number, void* info, void* context);
+ V8_DEPRECATE_SOON("Use TryHandleWebAssemblyTrapPosix",
+ static bool TryHandleSignal(int signal_number, void* info,
+ void* context));
#endif // V8_OS_POSIX
/**
* Enable the default signal handler rather than using one provided by the
* embedder.
*/
- V8_DEPRECATE_SOON("Use EnableWebAssemblyTrapHandler",
- static bool RegisterDefaultSignalHandler());
+ V8_DEPRECATED("Use EnableWebAssemblyTrapHandler",
+ static bool RegisterDefaultSignalHandler());
/**
* Activate trap-based bounds checking for WebAssembly.
@@ -8497,26 +8563,20 @@ class V8_EXPORT V8 {
private:
V8();
- static internal::Object** GlobalizeReference(internal::Isolate* isolate,
- internal::Object** handle);
- static internal::Object** CopyPersistent(internal::Object** handle);
- static void DisposeGlobal(internal::Object** global_handle);
- static void MakeWeak(internal::Object** location, void* data,
+ static internal::Address* GlobalizeReference(internal::Isolate* isolate,
+ internal::Address* handle);
+ static internal::Address* CopyPersistent(internal::Address* handle);
+ static void DisposeGlobal(internal::Address* global_handle);
+ static void MakeWeak(internal::Address* location, void* data,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type);
- static void MakeWeak(internal::Object** location, void* data,
- // Must be 0 or -1.
- int internal_field_index1,
- // Must be 1 or -1.
- int internal_field_index2,
- WeakCallbackInfo<void>::Callback weak_callback);
- static void MakeWeak(internal::Object*** location_addr);
- static void* ClearWeak(internal::Object** location);
- static void AnnotateStrongRetainer(internal::Object** location,
+ static void MakeWeak(internal::Address** location_addr);
+ static void* ClearWeak(internal::Address* location);
+ static void AnnotateStrongRetainer(internal::Address* location,
const char* label);
static Value* Eternalize(Isolate* isolate, Value* handle);
- static void RegisterExternallyReferencedObject(internal::Object** object,
+ static void RegisterExternallyReferencedObject(internal::Address* location,
internal::Isolate* isolate);
template <class K, class V, class T>
@@ -8637,8 +8697,8 @@ class V8_EXPORT SnapshotCreator {
void operator=(const SnapshotCreator&) = delete;
private:
- size_t AddData(Local<Context> context, internal::Object* object);
- size_t AddData(internal::Object* object);
+ size_t AddData(Local<Context> context, internal::Address object);
+ size_t AddData(internal::Address object);
void* data_;
};
@@ -8665,6 +8725,14 @@ class Maybe {
V8_INLINE T ToChecked() const { return FromJust(); }
/**
+ * Short-hand for ToChecked(), which doesn't return a value. To be used, where
+ * the actual value of the Maybe is not needed like Object::Set.
+ */
+ V8_INLINE void Check() const {
+ if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
+ }
+
+ /**
* Converts this Maybe<> to a value of type T. If this Maybe<> is
* nothing (empty), |false| is returned and |out| is left untouched.
*/
@@ -9159,7 +9227,7 @@ class V8_EXPORT Context {
* stack.
* https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
*/
- class V8_EXPORT BackupIncumbentScope {
+ class V8_EXPORT BackupIncumbentScope final {
public:
/**
* |backup_incumbent_context| is pushed onto the backup incumbent settings
@@ -9168,10 +9236,20 @@ class V8_EXPORT Context {
explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
~BackupIncumbentScope();
+ /**
+ * Returns address that is comparable with JS stack address. Note that JS
+ * stack may be allocated separately from the native stack. See also
+ * |TryCatch::JSStackComparableAddress| for details.
+ */
+ uintptr_t JSStackComparableAddress() const {
+ return js_stack_comparable_address_;
+ }
+
private:
friend class internal::Isolate;
Local<Context> backup_incumbent_context_;
+ uintptr_t js_stack_comparable_address_ = 0;
const BackupIncumbentScope* prev_ = nullptr;
};
@@ -9181,7 +9259,7 @@ class V8_EXPORT Context {
friend class Object;
friend class Function;
- internal::Object** GetDataFromSnapshotOnce(size_t index);
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
Local<Value> SlowGetEmbedderData(int index);
void* SlowGetAlignedPointerFromEmbedderData(int index);
};
@@ -9310,6 +9388,55 @@ class V8_EXPORT Locker {
internal::Isolate* isolate_;
};
+/**
+ * Various helpers for skipping over V8 frames in a given stack.
+ *
+ * The unwinder API is only supported on the x64 architecture.
+ */
+class V8_EXPORT Unwinder {
+ public:
+ /**
+ * Attempt to unwind the stack to the most recent C++ frame. This function is
+ * signal-safe and does not access any V8 state and thus doesn't require an
+ * Isolate.
+ *
+ * The unwinder needs to know the location of the JS Entry Stub (a piece of
+ * code that is run when C++ code calls into generated JS code). This is used
+ * for edge cases where the current frame is being constructed or torn down
+ * when the stack sample occurs.
+ *
+ * The unwinder also needs the virtual memory range of all possible V8 code
+ * objects. There are two ranges required - the heap code range and the range
+ * for code embedded in the binary. The V8 API provides all required inputs
+ * via an UnwindState object through the Isolate::GetUnwindState() API. These
+ * values will not change after Isolate initialization, so the same
+ * |unwind_state| can be used for multiple calls.
+ *
+ * \param unwind_state Input state for the Isolate that the stack comes from.
+ * \param register_state The current registers. This is an in-out param that
+ * will be overwritten with the register values after unwinding, on success.
+ * \param stack_base The resulting stack pointer and frame pointer values are
+ * bounds-checked against the stack_base and the original stack pointer value
+ * to ensure that they are valid locations in the given stack. If these values
+ * or any intermediate frame pointer values used during unwinding are ever out
+ * of these bounds, unwinding will fail.
+ *
+ * \return True on success.
+ */
+ static bool TryUnwindV8Frames(const UnwindState& unwind_state,
+ RegisterState* register_state,
+ const void* stack_base);
+
+ /**
+ * Whether the PC is within the V8 code range represented by code_range or
+ * embedded_code_range in |unwind_state|.
+ *
+ * If this returns false, then calling UnwindV8Frames() with the same PC
+ * and unwind_state will always fail. If it returns true, then unwinding may
+ * (but not necessarily) be successful.
+ */
+ static bool PCIsInV8(const UnwindState& unwind_state, void* pc);
+};
// --- Implementation ---
@@ -9328,7 +9455,7 @@ template <class T>
Local<T> Local<T>::New(Isolate* isolate, T* that) {
if (that == nullptr) return Local<T>();
T* that_ptr = that;
- internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
+ internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::Isolate*>(isolate), *p)));
}
@@ -9371,7 +9498,7 @@ void* WeakCallbackInfo<T>::GetInternalField(int index) const {
template <class T>
T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == nullptr) return nullptr;
- internal::Object** p = reinterpret_cast<internal::Object**>(that);
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
return reinterpret_cast<T*>(
V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
p));
@@ -9384,7 +9511,7 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
TYPE_CHECK(T, S);
this->Reset();
if (that.IsEmpty()) return;
- internal::Object** p = reinterpret_cast<internal::Object**>(that.val_);
+ internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
this->val_ = reinterpret_cast<T*>(V8::CopyPersistent(p));
M::Copy(that, this);
}
@@ -9393,7 +9520,7 @@ template <class T>
bool PersistentBase<T>::IsIndependent() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
- return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
+ return I::GetNodeFlag(reinterpret_cast<internal::Address*>(this->val_),
I::kNodeIsIndependentShift);
}
@@ -9402,7 +9529,7 @@ bool PersistentBase<T>::IsNearDeath() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
uint8_t node_state =
- I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_));
+ I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_));
return node_state == I::kNodeStateIsNearDeathValue ||
node_state == I::kNodeStateIsPendingValue;
}
@@ -9412,15 +9539,15 @@ template <class T>
bool PersistentBase<T>::IsWeak() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
- return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
- I::kNodeStateIsWeakValue;
+ return I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_)) ==
+ I::kNodeStateIsWeakValue;
}
template <class T>
void PersistentBase<T>::Reset() {
if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
+ V8::DisposeGlobal(reinterpret_cast<internal::Address*>(this->val_));
val_ = nullptr;
}
@@ -9452,25 +9579,25 @@ V8_INLINE void PersistentBase<T>::SetWeak(
P* parameter, typename WeakCallbackInfo<P>::Callback callback,
WeakCallbackType type) {
typedef typename WeakCallbackInfo<void>::Callback Callback;
- V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
+ V8::MakeWeak(reinterpret_cast<internal::Address*>(this->val_), parameter,
reinterpret_cast<Callback>(callback), type);
}
template <class T>
void PersistentBase<T>::SetWeak() {
- V8::MakeWeak(reinterpret_cast<internal::Object***>(&this->val_));
+ V8::MakeWeak(reinterpret_cast<internal::Address**>(&this->val_));
}
template <class T>
template <typename P>
P* PersistentBase<T>::ClearWeak() {
return reinterpret_cast<P*>(
- V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
+ V8::ClearWeak(reinterpret_cast<internal::Address*>(this->val_)));
}
template <class T>
void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
- V8::AnnotateStrongRetainer(reinterpret_cast<internal::Object**>(this->val_),
+ V8::AnnotateStrongRetainer(reinterpret_cast<internal::Address*>(this->val_),
label);
}
@@ -9478,7 +9605,7 @@ template <class T>
void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
if (IsEmpty()) return;
V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Object**>(this->val_),
+ reinterpret_cast<internal::Address*>(this->val_),
reinterpret_cast<internal::Isolate*>(isolate));
}
@@ -9486,7 +9613,7 @@ template <class T>
void PersistentBase<T>::MarkIndependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_), true,
+ I::UpdateNodeFlag(reinterpret_cast<internal::Address*>(this->val_), true,
I::kNodeIsIndependentShift);
}
@@ -9494,7 +9621,7 @@ template <class T>
void PersistentBase<T>::MarkActive() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_), true,
+ I::UpdateNodeFlag(reinterpret_cast<internal::Address*>(this->val_), true,
I::kNodeIsActiveShift);
}
@@ -9503,7 +9630,7 @@ template <class T>
void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
- internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
*reinterpret_cast<uint16_t*>(addr) = class_id;
}
@@ -9513,14 +9640,13 @@ template <class T>
uint16_t PersistentBase<T>::WrapperClassId() const {
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
- internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
return *reinterpret_cast<uint16_t*>(addr);
}
-
-template<typename T>
-ReturnValue<T>::ReturnValue(internal::Object** slot) : value_(slot) {}
+template <typename T>
+ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
template<typename T>
template<typename S>
@@ -9529,7 +9655,7 @@ void ReturnValue<T>::Set(const Persistent<S>& handle) {
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
- *value_ = *reinterpret_cast<internal::Object**>(*handle);
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
}
}
@@ -9540,7 +9666,7 @@ void ReturnValue<T>::Set(const Global<S>& handle) {
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
- *value_ = *reinterpret_cast<internal::Object**>(*handle);
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
}
}
@@ -9551,7 +9677,7 @@ void ReturnValue<T>::Set(const Local<S> handle) {
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
- *value_ = *reinterpret_cast<internal::Object**>(*handle);
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
}
}
@@ -9639,15 +9765,15 @@ void ReturnValue<T>::Set(S* whatever) {
TYPE_CHECK(S*, Primitive);
}
-template<typename T>
-internal::Object* ReturnValue<T>::GetDefaultValue() {
+template <typename T>
+internal::Address ReturnValue<T>::GetDefaultValue() {
// Default value is always the pointer below value_ on the stack.
return value_[-1];
}
template <typename T>
-FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
- internal::Object** values,
+FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values,
int length)
: implicit_args_(implicit_args), values_(values), length_(length) {}
@@ -9816,20 +9942,22 @@ AccessorSignature* AccessorSignature::Cast(Data* data) {
}
Local<Value> Object::GetInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- typedef internal::Object O;
+#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O**>(this);
+ A obj = *reinterpret_cast<A*>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) {
- int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- O* value = I::ReadField<O*>(obj, offset);
- O** result = HandleScope::CreateHandle(
- reinterpret_cast<internal::NeverReadOnlySpaceObject*>(obj), value);
+ int offset = I::kJSObjectHeaderSizeForEmbedderFields +
+ (I::kEmbedderDataSlotSize * index);
+ A value = I::ReadTaggedAnyField(obj, offset);
+ internal::Isolate* isolate =
+ internal::IsolateFromNeverReadOnlySpaceObject(obj);
+ A* result = HandleScope::CreateHandle(isolate, value);
return Local<Value>(reinterpret_cast<Value*>(result));
}
#endif
@@ -9838,18 +9966,19 @@ Local<Value> Object::GetInternalField(int index) {
void* Object::GetAlignedPointerFromInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- typedef internal::Object O;
+#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O**>(this);
+ A obj = *reinterpret_cast<A*>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (V8_LIKELY(instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
- int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- return I::ReadField<void*>(obj, offset);
+ int offset = I::kJSObjectHeaderSizeForEmbedderFields +
+ (I::kEmbedderDataSlotSize * index);
+ return I::ReadRawField<void*>(obj, offset);
}
#endif
return SlowGetAlignedPointerFromInternalField(index);
@@ -9864,7 +9993,7 @@ String* String::Cast(v8::Value* value) {
Local<String> String::Empty(Isolate* isolate) {
- typedef internal::Object* S;
+ typedef internal::Address S;
typedef internal::Internals I;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
@@ -9873,13 +10002,13 @@ Local<String> String::Empty(Isolate* isolate) {
String::ExternalStringResource* String::GetExternalStringResource() const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
+ void* value = I::ReadRawField<void*>(obj, I::kStringResourceOffset);
result = reinterpret_cast<String::ExternalStringResource*>(value);
} else {
result = GetExternalStringResourceSlow();
@@ -9893,15 +10022,15 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
String::Encoding* encoding_out) const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource;
if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) {
- void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
+ void* value = I::ReadRawField<void*>(obj, I::kStringResourceOffset);
resource = static_cast<ExternalStringResourceBase*>(value);
} else {
resource = GetExternalStringResourceBaseSlow(encoding_out);
@@ -9922,9 +10051,9 @@ bool Value::IsUndefined() const {
}
bool Value::QuickIsUndefined() const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
@@ -9940,9 +10069,9 @@ bool Value::IsNull() const {
}
bool Value::QuickIsNull() const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
@@ -9957,9 +10086,9 @@ bool Value::IsNullOrUndefined() const {
}
bool Value::QuickIsNullOrUndefined() const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
int kind = I::GetOddballKind(obj);
@@ -9975,9 +10104,9 @@ bool Value::IsString() const {
}
bool Value::QuickIsString() const {
- typedef internal::Object O;
+ typedef internal::Address A;
typedef internal::Internals I;
- O* obj = *reinterpret_cast<O* const*>(this);
+ A obj = *reinterpret_cast<const A*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
return (I::GetInstanceType(obj) < I::kFirstNonstringType);
}
@@ -10159,11 +10288,11 @@ Proxy* Proxy::Cast(v8::Value* value) {
return static_cast<Proxy*>(value);
}
-WasmCompiledModule* WasmCompiledModule::Cast(v8::Value* value) {
+WasmModuleObject* WasmModuleObject::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
- return static_cast<WasmCompiledModule*>(value);
+ return static_cast<WasmModuleObject*>(value);
}
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
@@ -10352,7 +10481,7 @@ bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
Local<Primitive> Undefined(Isolate* isolate) {
- typedef internal::Object* S;
+ typedef internal::Address S;
typedef internal::Internals I;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
@@ -10361,7 +10490,7 @@ Local<Primitive> Undefined(Isolate* isolate) {
Local<Primitive> Null(Isolate* isolate) {
- typedef internal::Object* S;
+ typedef internal::Address S;
typedef internal::Internals I;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
@@ -10370,7 +10499,7 @@ Local<Primitive> Null(Isolate* isolate) {
Local<Boolean> True(Isolate* isolate) {
- typedef internal::Object* S;
+ typedef internal::Address S;
typedef internal::Internals I;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
@@ -10379,7 +10508,7 @@ Local<Boolean> True(Isolate* isolate) {
Local<Boolean> False(Isolate* isolate) {
- typedef internal::Object* S;
+ typedef internal::Address S;
typedef internal::Internals I;
I::CheckInitialized(isolate);
S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
@@ -10414,7 +10543,7 @@ MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
typedef internal::Internals I;
- const int64_t kMemoryReducerActivationLimit = 32 * 1024 * 1024;
+ constexpr int64_t kMemoryReducerActivationLimit = 32 * 1024 * 1024;
int64_t* external_memory = reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryOffset);
int64_t* external_memory_limit = reinterpret_cast<int64_t*>(
@@ -10422,15 +10551,14 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t* external_memory_at_last_mc =
reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) +
I::kExternalMemoryAtLastMarkCompactOffset);
- const int64_t amount = *external_memory + change_in_bytes;
+ const int64_t amount = *external_memory + change_in_bytes;
*external_memory = amount;
int64_t allocation_diff_since_last_mc =
- *external_memory_at_last_mc - *external_memory;
- allocation_diff_since_last_mc = allocation_diff_since_last_mc < 0
- ? -allocation_diff_since_last_mc
- : allocation_diff_since_last_mc;
+ *external_memory - *external_memory_at_last_mc;
+ // Only check memory pressure and potentially trigger GC if the amount of
+ // external memory increased.
if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) {
CheckMemoryPressure();
}
@@ -10446,12 +10574,13 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
}
Local<Value> Context::GetEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- typedef internal::Object O;
+#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
+ typedef internal::Address A;
typedef internal::Internals I;
- auto* context = *reinterpret_cast<internal::NeverReadOnlySpaceObject**>(this);
- O** result =
- HandleScope::CreateHandle(context, I::ReadEmbedderData<O*>(this, index));
+ internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
+ *reinterpret_cast<A*>(this));
+ A* result =
+ HandleScope::CreateHandle(isolate, I::ReadEmbedderData<A>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result));
#else
return SlowGetEmbedderData(index);
@@ -10460,7 +10589,7 @@ Local<Value> Context::GetEmbedderData(int index) {
void* Context::GetAlignedPointerFromEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
+#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS)
typedef internal::Internals I;
return I::ReadEmbedderData<void*>(this, index);
#else
@@ -10478,14 +10607,14 @@ MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
template <class T>
size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
T* object_ptr = *object;
- internal::Object** p = reinterpret_cast<internal::Object**>(object_ptr);
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
return AddData(context, *p);
}
template <class T>
size_t SnapshotCreator::AddData(Local<T> object) {
T* object_ptr = *object;
- internal::Object** p = reinterpret_cast<internal::Object**>(object_ptr);
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
return AddData(*p);
}
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 93c4629825..e30a582e8f 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -161,14 +161,8 @@
//
// C++11 feature detection
//
-// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
-// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
-//
// Compiler-specific feature detection
//
-// V8_HAS___ALIGNOF - __alignof(type) operator supported
-// V8_HAS___ALIGNOF__ - __alignof__(type) operator supported
-// V8_HAS_ATTRIBUTE_ALIGNED - __attribute__((aligned(n))) supported
// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
@@ -188,7 +182,6 @@
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
-// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported
@@ -207,11 +200,6 @@
# define V8_CC_GNU 1
#endif
-// Clang defines __alignof__ as alias for __alignof
-# define V8_HAS___ALIGNOF 1
-# define V8_HAS___ALIGNOF__ V8_HAS___ALIGNOF
-
-# define V8_HAS_ATTRIBUTE_ALIGNED (__has_attribute(aligned))
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE \
@@ -234,7 +222,9 @@
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
-# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
+# if __cplusplus >= 201402L
+# define V8_CAN_HAVE_DCHECK_IN_CONSTEXPR 1
+# endif
#elif defined(__GNUC__)
@@ -250,9 +240,6 @@
# endif
# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
-# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
-
-# define V8_HAS_ATTRIBUTE_ALIGNED (V8_GNUC_PREREQ(2, 95, 0))
// always_inline is available in gcc 4.0 but not very reliable until 4.4.
// Works around "sorry, unimplemented: inlining failed" build errors with
// older compilers.
@@ -271,17 +258,11 @@
# define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
-# if __cplusplus >= 201103L
-# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
-# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
-# endif
#endif
#if defined(_MSC_VER)
# define V8_CC_MSVC 1
-# define V8_HAS___ALIGNOF 1
-# define V8_HAS_DECLSPEC_ALIGN 1
# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
# define V8_HAS_DECLSPEC_SELECTANY 1
@@ -359,58 +340,6 @@
#endif
-// This macro allows to specify memory alignment for structs, classes, etc.
-// Use like:
-// class V8_ALIGNED(16) MyClass { ... };
-// V8_ALIGNED(32) int array[42];
-#if V8_HAS_CXX11_ALIGNAS
-# define V8_ALIGNED(n) alignas(n)
-#elif V8_HAS_ATTRIBUTE_ALIGNED
-# define V8_ALIGNED(n) __attribute__((aligned(n)))
-#elif V8_HAS_DECLSPEC_ALIGN
-# define V8_ALIGNED(n) __declspec(align(n))
-#else
-# define V8_ALIGNED(n) /* NOT SUPPORTED */
-#endif
-
-
-// This macro is similar to V8_ALIGNED(), but takes a type instead of size
-// in bytes. If the compiler does not supports using the alignment of the
-// |type|, it will align according to the |alignment| instead. For example,
-// Visual Studio C++ cannot combine __declspec(align) and __alignof. The
-// |alignment| must be a literal that is used as a kind of worst-case fallback
-// alignment.
-// Use like:
-// struct V8_ALIGNAS(AnotherClass, 16) NewClass { ... };
-// V8_ALIGNAS(double, 8) int array[100];
-#if V8_HAS_CXX11_ALIGNAS
-# define V8_ALIGNAS(type, alignment) alignas(type)
-#elif V8_HAS___ALIGNOF__ && V8_HAS_ATTRIBUTE_ALIGNED
-# define V8_ALIGNAS(type, alignment) __attribute__((aligned(__alignof__(type))))
-#else
-# define V8_ALIGNAS(type, alignment) V8_ALIGNED(alignment)
-#endif
-
-
-// This macro returns alignment in bytes (an integer power of two) required for
-// any instance of the given type, which is either complete type, an array type,
-// or a reference type.
-// Use like:
-// size_t alignment = V8_ALIGNOF(double);
-#if V8_HAS_CXX11_ALIGNOF
-# define V8_ALIGNOF(type) alignof(type)
-#elif V8_HAS___ALIGNOF
-# define V8_ALIGNOF(type) __alignof(type)
-#elif V8_HAS___ALIGNOF__
-# define V8_ALIGNOF(type) __alignof__(type)
-#else
-// Note that alignment of a type within a struct can be less than the
-// alignment of the type stand-alone (because of ancient ABIs), so this
-// should only be used as a last resort.
-namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
-# define V8_ALIGNOF(type) (sizeof(::v8::AlignOfHelper<type>) - sizeof(type))
-#endif
-
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 53ea0cdd44..e58723719e 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -42,10 +42,13 @@ verifiers {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
+ # TODO(machenbach): Figure out if bot should be removed or if
+ # functionality should be revived.
builders {
name: "v8_linux64_sanitizer_coverage_rel"
- experiment_percentage: 100
+ experiment_percentage: 10
}
+ builders { name: "v8_linux64_shared_compile_rel" }
builders { name: "v8_linux64_verify_csa_rel_ng" }
builders {
name: "v8_linux64_verify_csa_rel_ng_triggered"
@@ -77,7 +80,6 @@ verifiers {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
- builders { name: "v8_linux_shared_compile_rel" }
builders { name: "v8_linux_verify_csa_rel_ng" }
builders {
name: "v8_linux_verify_csa_rel_ng_triggered"
@@ -89,7 +91,10 @@ verifiers {
triggered_by: "v8_mac64_rel_ng"
}
builders { name: "v8_node_linux64_rel" }
- builders { name: "v8_presubmit" }
+ builders {
+ name: "v8_presubmit"
+ disable_reuse: true
+ }
builders { name: "v8_win64_msvc_compile_rel" }
builders { name: "v8_win64_rel_ng" }
builders {
@@ -107,21 +112,44 @@ verifiers {
name: "v8_win_rel_ng_triggered"
triggered_by: "v8_win_rel_ng"
}
+ builders {
+ name: "v8_linux_noi18n_rel_ng"
+ path_regexp: ".*intl.*"
+ path_regexp: ".*test262.*"
+ }
}
- # TODO(machenbach): Remove after testing in practice and migrate to
- # PRESUBMIT.py scripts.
+
buckets {
name: "luci.chromium.try"
builders {
- name: "cast_shell_android"
- experiment_percentage: 20
+ name: "linux_chromium_rel_ng"
+ path_regexp: "include/.+\\.h"
+ path_regexp: "src/api\\.cc"
+ path_regexp: "src/message-template\\.h"
}
builders {
- name: "cast_shell_linux"
+ name: "linux_chromium_headless_rel"
+ path_regexp: "src/inspector/.+"
+ path_regexp: "test/inspector/.+"
+ }
+ builders {
+ name: "linux-blink-rel"
+ path_regexp: "src/inspector/.+"
+ path_regexp: "test/inspector/.+"
+ }
+ # TODO(machenbach): Uncomment path_regexp after testing, as currently,
+ # path_regexp can not be combined with experiment_percentage. See more
+ # details at crbug.com/v8/8058.
+ builders {
+ name: "cast_shell_android"
+ #path_regexp: "include/.+\\.h"
+ #path_regexp: "src/api\\.cc"
experiment_percentage: 20
}
builders {
- name: "linux-chromeos-rel"
+ name: "cast_shell_linux"
+ #path_regexp: "include/.+\\.h"
+ #path_regexp: "src/api\\.cc"
experiment_percentage: 20
}
}
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 124524c552..8a6cba71e0 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -62,6 +62,7 @@
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
'V8 Linux64 - internal snapshot': 'release_x64_internal',
'V8 Linux64 - debug - header includes': 'debug_x64_header_includes',
+ 'V8 Linux64 - shared': 'release_x64_shared_verify_heap',
'V8 Linux64 - verify csa': 'release_x64_verify_csa',
# Jumbo.
'V8 Linux64 Jumbo': 'release_x64_jumbo',
@@ -91,8 +92,11 @@
'V8 Linux gcc 4.8': 'release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'debug_x64_gcc',
# FYI.
- 'V8 Linux - embedded builtins': 'release_x86_embedded_builtins',
- 'V8 Linux - embedded builtins - debug': 'debug_x86_embedded_builtins',
+ 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression',
+ 'V8 Linux64 - arm64 - sim - pointer compression - builder':
+ 'release_simulate_arm64_pointer_compression',
+ 'V8 Linux - noembed': 'release_x86_noembed',
+ 'V8 Linux - noembed - debug': 'debug_x86_noembed',
'V8 Fuchsia': 'release_x64_fuchsia',
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
@@ -120,8 +124,6 @@
'V8 Clusterfuzz Linux64 - release builder':
'release_x64_correctness_fuzzer',
'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64',
- 'V8 Clusterfuzz Linux64 - nosnap release builder': 'release_x64_no_snap',
- 'V8 Clusterfuzz Linux64 - nosnap debug builder': 'debug_x64_no_snap',
'V8 Clusterfuzz Linux64 ASAN no inline - release builder':
'release_x64_asan_symbolized_edge_verify_heap',
'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan_edge',
@@ -136,6 +138,8 @@
'V8 Clusterfuzz Linux MSAN chained origins':
'release_simulate_arm64_msan_edge',
'V8 Clusterfuzz Linux64 TSAN - release builder': 'release_x64_tsan',
+ 'V8 Clusterfuzz Linux64 UBSan - release builder':
+ 'release_x64_ubsan_recover_edge',
'V8 Clusterfuzz Linux64 UBSanVptr - release builder':
'release_x64_ubsan_vptr_recover_edge',
},
@@ -146,6 +150,8 @@
'V8 Android Arm - builder': 'release_android_arm',
'V8 Linux - arm - sim': 'release_simulate_arm',
'V8 Linux - arm - sim - debug': 'debug_simulate_arm',
+ 'V8 Linux - arm - sim - lite': 'release_simulate_arm_lite',
+ 'V8 Linux - arm - sim - lite - debug': 'debug_simulate_arm_lite',
# Arm64.
'V8 Android Arm64 - builder': 'release_android_arm64',
'V8 Android Arm64 - debug builder': 'debug_android_arm64',
@@ -189,7 +195,7 @@
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
- 'v8_linux_embedded_builtins_rel_ng': 'release_x86_embedded_builtins_trybot',
+ 'v8_linux_noembed_rel_ng': 'release_x86_noembed_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
'v8_linux_optional_rel_ng': 'release_x86_trybot',
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
@@ -202,12 +208,16 @@
'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
+ 'v8_linux64_arm64_pointer_compression_rel_ng':
+ 'release_simulate_arm64_pointer_compression',
'v8_linux64_compile_rel_xg': 'release_x64_test_features_trybot',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
+ 'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
+ 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
'v8_linux64_cfi_rel_ng': 'release_x64_cfi',
@@ -219,7 +229,8 @@
'v8_linux64_tsan_rel': 'release_x64_tsan_minimal_symbols',
'v8_linux64_tsan_isolates_rel_ng':
'release_x64_tsan_minimal_symbols',
- 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
+ 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_minimal_symbols',
+ 'v8_linux64_ubsan_vptr_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
'v8_odroid_arm_rel_ng': 'release_arm',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_dbg': 'debug_x86_trybot',
@@ -239,6 +250,7 @@
'v8_mac64_asan_rel': 'release_x64_asan_no_lsan',
'v8_mips_compile_rel': 'release_mips_no_snap_no_i18n',
'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot',
+ 'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot',
'v8_linux_arm_dbg': 'debug_simulate_arm',
'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
@@ -330,6 +342,8 @@
'debug_bot', 'simulate_arm'],
'debug_simulate_arm_asan_edge': [
'debug_bot', 'simulate_arm', 'asan', 'edge'],
+ 'debug_simulate_arm_lite': [
+ 'debug_bot', 'simulate_arm', 'v8_enable_lite_mode'],
'debug_simulate_arm64': [
'debug_bot', 'simulate_arm64'],
'debug_simulate_arm64_asan_edge': [
@@ -341,10 +355,17 @@
# Release configs for simulators.
'release_simulate_arm': [
'release_bot', 'simulate_arm'],
+ 'release_simulate_arm_lite': [
+ 'release_bot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm_trybot': [
'release_trybot', 'simulate_arm'],
+ 'release_simulate_arm_lite_trybot': [
+ 'release_trybot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm64': [
'release_bot', 'simulate_arm64'],
+ 'release_simulate_arm64_pointer_compression': [
+ 'release_bot', 'simulate_arm64', 'dcheck_always_on',
+ 'v8_enable_slow_dchecks', 'v8_enable_pointer_compression'],
'release_simulate_arm64_msan': [
'release_bot', 'simulate_arm64', 'msan'],
'release_simulate_arm64_msan_minimal_symbols': [
@@ -427,8 +448,9 @@
'release_trybot', 'x64', 'jumbo_limited'],
'release_x64_minimal_symbols': [
'release_bot', 'x64', 'minimal_symbols'],
- 'release_x64_no_snap': [
- 'release_bot', 'x64', 'v8_snapshot_none'],
+ 'release_x64_pointer_compression': [
+ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
+ 'v8_enable_pointer_compression'],
'release_x64_trybot': [
'release_trybot', 'x64'],
'release_x64_test_features_trybot': [
@@ -439,12 +461,18 @@
'release_bot', 'x64', 'tsan', 'minimal_symbols'],
'release_x64_ubsan': [
'release_bot', 'x64', 'ubsan'],
+ 'release_x64_ubsan_minimal_symbols': [
+ 'release_bot', 'x64', 'ubsan', 'minimal_symbols'],
+ 'release_x64_ubsan_recover_edge': [
+ 'release_bot', 'x64', 'edge', 'ubsan_recover'],
'release_x64_ubsan_vptr': [
'release_bot', 'x64', 'ubsan_vptr'],
- 'release_x64_ubsan_vptr_recover_edge': [
- 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover'],
'release_x64_ubsan_vptr_minimal_symbols': [
'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'],
+ 'release_x64_ubsan_vptr_recover_edge': [
+ 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover'],
+ 'release_x64_shared_verify_heap': [
+ 'release_bot', 'x64', 'shared', 'v8_verify_heap'],
'release_x64_verify_csa': [
'release_bot', 'x64', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
@@ -471,17 +499,14 @@
'debug_bot', 'x64', 'jumbo_limited'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
- 'debug_x64_no_snap': [
- 'debug_bot', 'x64', 'v8_snapshot_none'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
# Debug configs for x86.
'debug_x86': [
'debug_bot', 'x86'],
- 'debug_x86_embedded_builtins': [
- 'debug_bot', 'x86', 'v8_enable_embedded_builtins',
- 'v8_no_untrusted_code_mitigations'],
+ 'debug_x86_noembed': [
+ 'debug_bot', 'x86', 'v8_no_enable_embedded_builtins'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_no_i18n': [
@@ -501,12 +526,10 @@
# Release configs for x86.
'release_x86': [
'release_bot', 'x86'],
- 'release_x86_embedded_builtins': [
- 'release_bot', 'x86', 'v8_enable_embedded_builtins',
- 'v8_no_untrusted_code_mitigations'],
- 'release_x86_embedded_builtins_trybot': [
- 'release_trybot', 'x86', 'v8_enable_embedded_builtins',
- 'v8_no_untrusted_code_mitigations'],
+ 'release_x86_noembed': [
+ 'release_bot', 'x86', 'v8_no_enable_embedded_builtins'],
+ 'release_x86_noembed_trybot': [
+ 'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'],
'release_x86_gcc': [
'release_bot', 'x86', 'gcc'],
'release_x86_gcc_minimal_symbols': [
@@ -656,7 +679,10 @@
},
'mips_bundled_toolchain': {
- 'gn_args': 'custom_toolchain="//tools/toolchain:mips-bundled"',
+ 'gn_args': 'custom_toolchain="tools/toolchain:mips-bundled" '
+ 'ldso_path="tools/mips_toolchain/sysroot/usr/lib/ld.so.1" '
+ 'gcc_target_rpath="tools/mips_toolchain/sysroot/usr/lib:'
+ 'tools/mips_toolchain/mips-mti-linux-gnu/lib:\$ORIGIN/."',
},
'msan': {
@@ -746,6 +772,12 @@
'gn_args': 'is_ubsan=true is_ubsan_no_recover=true',
},
+ 'ubsan_recover': {
+ 'mixins': ['v8_enable_test_features'],
+ # Ubsan with recovery.
+ 'gn_args': 'is_ubsan=true is_ubsan_no_recover=false',
+ },
+
'ubsan_vptr': {
'mixins': ['v8_enable_test_features'],
# TODO(krasin): Remove is_ubsan_no_recover=true when
@@ -769,14 +801,22 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
- 'v8_enable_embedded_builtins': {
- 'gn_args': 'v8_enable_embedded_builtins=true',
+ 'v8_no_enable_embedded_builtins': {
+ 'gn_args': 'v8_enable_embedded_builtins=false',
+ },
+
+ 'v8_enable_lite_mode': {
+ 'gn_args': 'v8_enable_lite_mode=true',
},
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
},
+ 'v8_enable_pointer_compression': {
+ 'gn_args': 'v8_enable_pointer_compression=true',
+ },
+
'v8_enable_test_features': {
'gn_args': 'v8_enable_test_features=true',
},
@@ -818,10 +858,6 @@
'gn_args': 'v8_use_snapshot=false',
},
- 'v8_no_untrusted_code_mitigations': {
- 'gn_args': 'v8_untrusted_code_mitigations=false',
- },
-
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
},
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 31aef9c321..f15358405c 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -127,7 +127,7 @@
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'test262_variants', 'shards': 4},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
@@ -270,6 +270,11 @@
{'name': 'v8testing', 'shards': 5},
],
},
+ 'v8_linux64_pointer_compression_rel_ng_triggered': {
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'v8_linux64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -280,7 +285,7 @@
{'name': 'mjsunit_sp_frame_access'},
{'name': 'optimize_for_size'},
{'name': 'test262_variants', 'shards': 4},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
{'name': 'v8initializers'},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
@@ -299,6 +304,11 @@
{'name': 'v8testing', 'shards': 2},
],
},
+ 'v8_linux64_sanitizer_coverage_rel': {
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'v8_linux64_tsan_rel': {
'tests': [
{'name': 'benchmarks'},
@@ -319,6 +329,11 @@
{'name': 'v8testing', 'shards': 2},
],
},
+ 'v8_linux64_ubsan_vptr_rel_ng_triggered': {
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ ],
+ },
'v8_linux64_verify_csa_rel_ng_triggered': {
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -329,10 +344,10 @@
'v8_linux_arm64_dbg': {
'tests': [
{'name': 'mjsunit_sp_frame_access'},
- {'name': 'mozilla'},
- {'name': 'test262'},
- {'name': 'v8testing', 'shards': 7},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'mozilla', 'shards': 2},
+ {'name': 'test262', 'shards': 2},
+ {'name': 'v8testing', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
],
},
'v8_linux_arm64_gc_stress_dbg': {
@@ -349,6 +364,16 @@
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
],
},
+ 'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 7},
+ ],
+ },
##############################################################################
# Odroids with native arm
'v8_odroid_arm_rel_ng_triggered': {
@@ -481,7 +506,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262'},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
],
@@ -563,8 +588,8 @@
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'test262_variants', 'shards': 6},
- {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{
'name': 'v8testing',
@@ -620,8 +645,13 @@
],
},
'V8 Linux - full debug': {
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
'tests': [
- {'name': 'v8testing', 'variant': 'default'},
+ {'name': 'v8testing', 'variant': 'default', 'shards': 3},
],
},
'V8 Linux - gc stress': {
@@ -664,7 +694,7 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'variant': 'default', 'shards': 3},
],
},
@@ -763,7 +793,7 @@
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'test262_variants', 'shards': 5},
- {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
@@ -815,6 +845,18 @@
{'name': 'v8testing'},
],
},
+ 'V8 Linux64 - pointer compression': {
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ ],
+ },
+ 'V8 Linux64 - shared': {
+ 'tests': [
+ {'name': 'mozilla'},
+ {'name': 'test262'},
+ {'name': 'v8testing'},
+ ],
+ },
'V8 Linux64 - verify csa': {
'tests': [
{'name': 'v8testing'},
@@ -876,12 +918,15 @@
},
'V8 Linux64 TSAN - isolates': {
'tests': [
- {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 5},
+ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
],
},
'V8 Linux64 UBSan': {
'tests': [
+ {'name': 'mozilla'},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing'},
+ {'name': 'v8testing', 'variant': 'extra'},
],
},
'V8 Linux64 UBSanVptr': {
@@ -897,7 +942,7 @@
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing'},
+ {'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
],
},
@@ -908,7 +953,7 @@
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262'},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra'},
],
@@ -1037,7 +1082,9 @@
'cores': '8',
'os': 'Ubuntu-16.04',
'cpu': 'armv7l-32-ODROID-XU4',
- }
+ },
+ # Less parallelism to prevent OOMs in benchmarks.
+ 'test_args': ['-j2'],
},
{
'name': 'optimize_for_size',
@@ -1179,7 +1226,7 @@
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing', 'shards': 7},
+ {'name': 'v8testing', 'shards': 8},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
# Armv8-a.
{
@@ -1196,7 +1243,7 @@
'name': 'v8testing',
'suffix': 'armv8-a',
'test_args': ['--extra-flags', '--enable-armv8'],
- 'shards': 7
+ 'shards': 8
},
# Novfp3.
{
@@ -1216,7 +1263,7 @@
'suffix': 'novfp3',
'variant': 'default',
'test_args': ['--novfp3'],
- 'shards': 7
+ 'shards': 8
},
],
},
@@ -1240,10 +1287,14 @@
],
},
'V8 Linux - arm64 - sim - debug': {
+ # TODO(machenbach): Remove longer timeout when this builder scales better.
+ 'swarming_task_attrs': {
+ 'hard_timeout': 3600,
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
- {'name': 'mozilla'},
- {'name': 'test262'},
+ {'name': 'mozilla', 'shards': 2},
+ {'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 6},
],
@@ -1304,6 +1355,16 @@
{'name': 'v8testing', 'shards': 3},
],
},
+ 'V8 Linux64 - arm64 - sim - pointer compression': {
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 7},
+ ],
+ },
'V8 Mips - big endian - nosnap': {
'swarming_dimensions': {
'cpu': 'mips-32',
@@ -1463,50 +1524,9 @@
},
],
},
- 'V8 NumFuzz - nosnap': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 3600,
- 'priority': 35,
- },
- 'tests': [
- {
- 'name': 'numfuzz',
- 'suffix': 'interrupt-budget',
- 'test_args': [
- '--total-timeout-sec=2100',
- '--stress-interrupt-budget=10',
- '--stress-deopt=5',
- ]
- },
- ],
- },
- 'V8 NumFuzz - nosnap debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 3600,
- 'priority': 35,
- },
- 'tests': [
- {
- 'name': 'numfuzz',
- 'suffix': 'interrupt-budget',
- 'test_args': [
- '--total-timeout-sec=2100',
- '--stress-interrupt-budget=10',
- '--stress-deopt=5',
- ]
- },
- ],
- },
##############################################################################
- # Clusterfuzz.
+ # Branches.
'V8 Linux - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1514,11 +1534,6 @@
],
},
'V8 Linux - beta branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1526,11 +1541,6 @@
],
},
'V8 Linux - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1538,11 +1548,6 @@
],
},
'V8 Linux - stable branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1550,11 +1555,6 @@
],
},
'V8 Linux64 - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1562,11 +1562,6 @@
],
},
'V8 Linux64 - beta branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1574,11 +1569,6 @@
],
},
'V8 Linux64 - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1586,11 +1576,6 @@
],
},
'V8 Linux64 - stable branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1598,129 +1583,69 @@
],
},
'V8 arm - sim - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing', 'shards': 2},
+ {'name': 'v8testing', 'shards': 4},
],
},
'V8 arm - sim - beta branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing', 'shards': 3},
+ {'name': 'v8testing', 'shards': 8},
],
},
'V8 arm - sim - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing', 'shards': 2},
+ {'name': 'v8testing', 'shards': 4},
],
},
'V8 arm - sim - stable branch - debug': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
- {'name': 'v8testing', 'shards': 3},
+ {'name': 'v8testing', 'shards': 8},
],
},
'V8 mips64el - sim - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
},
'V8 mips64el - sim - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
},
'V8 mipsel - sim - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'v8testing', 'shards': 4},
],
},
'V8 mipsel - sim - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'v8testing', 'shards': 4},
],
},
'V8 ppc64 - sim - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
},
'V8 ppc64 - sim - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
},
'V8 s390x - sim - beta branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
},
'V8 s390x - sim - stable branch': {
- 'swarming_task_attrs': {
- 'expiration': 14400,
- 'hard_timeout': 5400,
- 'priority': 35,
- },
'tests': [
{'name': 'unittests'},
],
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
index 756413e5ce..f1c6909439 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/snapshot_toolchain.gni
@@ -83,7 +83,13 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
- _cpus = "x64_v8_${v8_current_cpu}"
+ if (is_win && v8_current_cpu == "arm64") {
+ # set _cpus to blank for Windows ARM64 so host_toolchain could be
+ # selected as snapshot toolchain later.
+ _cpus = ""
+ } else {
+ _cpus = "x64_v8_${v8_current_cpu}"
+ }
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
@@ -94,6 +100,9 @@ if (v8_snapshot_toolchain == "") {
if (_cpus != "") {
v8_snapshot_toolchain = "//build/toolchain/${host_os}:${_clang}${_cpus}"
+ } else if (is_win && v8_current_cpu == "arm64") {
+ # cross compile Windows arm64 with Windows x64 toolchain.
+ v8_snapshot_toolchain = host_toolchain
}
}
}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 99873803c9..f8190e8fd9 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -27,6 +27,8 @@ include_rules = [
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
"-src/trap-handler",
+ "+src/trap-handler/handler-inside-posix.h",
+ "+src/trap-handler/handler-inside-win.h",
"+src/trap-handler/trap-handler.h",
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 5f6fedcd36..abad5274c8 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -2,7 +2,6 @@ per-file intl.*=cira@chromium.org
per-file intl.*=mnita@google.com
per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
-per-file typing-asm.*=bradnelson@chromium.org
per-file objects-body-descriptors*=hpayer@chromium.org
per-file objects-body-descriptors*=mlippautz@chromium.org
per-file objects-body-descriptors*=ulan@chromium.org
diff --git a/deps/v8/src/PRESUBMIT.py b/deps/v8/src/PRESUBMIT.py
deleted file mode 100644
index b97eefaeb0..0000000000
--- a/deps/v8/src/PRESUBMIT.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.')
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Presubmit script for //v8/src
-
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into depot_tools.
-"""
-
-import os
-
-
-def PostUploadHook(cl, change, output_api):
- """git cl upload will call this hook after the issue is created/modified.
-
- This hook adds extra try bots to the CL description in order to run layout
- tests in addition to CQ try bots.
- """
- def is_api_cc(f):
- return 'api.cc' == os.path.split(f.LocalPath())[1]
- if not change.AffectedFiles(file_filter=is_api_cc):
- return []
- return output_api.EnsureCQIncludeTrybotsAreAdded(
- cl,
- [
- 'luci.chromium.try:linux_chromium_rel_ng'
- ],
- 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 226178394d..a368bbd81e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -6,6 +6,7 @@
#include "src/api-inl.h"
#include "src/contexts.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/frames-inl.h"
@@ -133,7 +134,7 @@ void Accessors::ArgumentsIteratorGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* result = isolate->native_context()->array_values_iterator();
+ Object result = isolate->native_context()->array_values_iterator();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@@ -156,8 +157,8 @@ void Accessors::ArrayLengthGetter(
RuntimeCallCounterId::kArrayLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
- Object* result = holder->length();
+ JSArray holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
+ Object result = holder->length();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@@ -235,7 +236,7 @@ void Accessors::ModuleNamespaceEntryGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- JSModuleNamespace* holder =
+ JSModuleNamespace holder =
JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (!holder
@@ -291,13 +292,13 @@ void Accessors::StringLengthGetter(
// v8::Object, but internally we have callbacks on entities which are higher
// in the hierarchy, in this case for String values.
- Object* value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
+ Object value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
if (!value->IsString()) {
// Not a string value. That means that we either got a String wrapper or
// a Value with a String wrapper in its prototype chain.
value = JSValue::cast(*Utils::OpenHandle(*info.Holder()))->value();
}
- Object* result = Smi::FromInt(String::cast(value)->length());
+ Object result = Smi::FromInt(String::cast(value)->length());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@@ -496,7 +497,7 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
// Copy the parameters to the arguments object.
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) {
- Object* value = frame->GetParameter(i);
+ Object value = frame->GetParameter(i);
if (value->IsTheHole(isolate)) {
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
@@ -560,13 +561,11 @@ Handle<AccessorInfo> Accessors::MakeFunctionArgumentsInfo(Isolate* isolate) {
// Accessors::FunctionCaller
//
-
-static inline bool AllowAccessToFunction(Context* current_context,
- JSFunction* function) {
+static inline bool AllowAccessToFunction(Context current_context,
+ JSFunction function) {
return current_context->HasSameSecurityTokenAs(function->context());
}
-
class FrameFunctionIterator {
public:
explicit FrameFunctionIterator(Isolate* isolate)
@@ -795,7 +794,7 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
Handle<JSObject> error) {
RETURN_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(
+ Object::SetProperty(
isolate, error, isolate->factory()->stack_trace_symbol(),
isolate->factory()->undefined_value(), LanguageMode::kStrict),
JSReceiver);
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index ad71a25a99..112e3134b8 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -6,6 +6,7 @@
#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-object-inl.h"
namespace v8 {
namespace internal {
@@ -14,16 +15,16 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != nullptr) return;
map_ = new HeapObjectToIndexHashMap();
- for (RootIndex root_index = RootIndex::kFirstStrongRoot;
- root_index <= RootIndex::kLastStrongRoot; ++root_index) {
- Object* root = isolate->heap()->root(root_index);
+ for (RootIndex root_index = RootIndex::kFirstStrongOrReadOnlyRoot;
+ root_index <= RootIndex::kLastStrongOrReadOnlyRoot; ++root_index) {
+ Object root = isolate->root(root_index);
if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
// Since we map the raw address of an root item to its root list index, the
// raw address must be constant, i.e. the object must be immovable.
- if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
- HeapObject* heap_object = HeapObject::cast(root);
+ if (RootsTable::IsImmortalImmovable(root_index)) {
+ HeapObject heap_object = HeapObject::cast(root);
Maybe<uint32_t> maybe_index = map_->Get(heap_object);
uint32_t index = static_cast<uint32_t>(root_index);
if (maybe_index.IsJust()) {
@@ -32,15 +33,14 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
} else {
map_->Set(heap_object, index);
}
- } else {
- // Immortal immovable root objects are constant and allocated on the first
- // page of old space. Non-constant roots cannot be immortal immovable. The
- // root index map contains all immortal immmovable root objects.
- CHECK(!Heap::RootIsImmortalImmovable(root_index));
}
}
isolate->set_root_index_map(map_);
}
+bool RootIndexMap::Lookup(Address obj, RootIndex* out_root_list) const {
+ return Lookup(HeapObject::cast(Object(obj)), out_root_list);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index f7a1cc2ad9..72ba97a4ec 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -8,7 +8,8 @@
#include "include/v8.h"
#include "src/assert-scope.h"
#include "src/base/hashmap.h"
-#include "src/objects.h"
+#include "src/objects/heap-object.h"
+#include "src/roots.h"
namespace v8 {
namespace internal {
@@ -44,20 +45,20 @@ inline uintptr_t PointerToIndexHashMap<Address>::Key(Address value) {
return static_cast<uintptr_t>(value);
}
-template <typename Type>
-inline uintptr_t PointerToIndexHashMap<Type>::Key(Type value) {
- return reinterpret_cast<uintptr_t>(value);
+template <>
+inline uintptr_t PointerToIndexHashMap<HeapObject>::Key(HeapObject value) {
+ return value.ptr();
}
class AddressToIndexHashMap : public PointerToIndexHashMap<Address> {};
-class HeapObjectToIndexHashMap : public PointerToIndexHashMap<HeapObject*> {};
+class HeapObjectToIndexHashMap : public PointerToIndexHashMap<HeapObject> {};
class RootIndexMap {
public:
explicit RootIndexMap(Isolate* isolate);
// Returns true on successful lookup and sets *|out_root_list|.
- bool Lookup(HeapObject* obj, RootIndex* out_root_list) {
+ bool Lookup(HeapObject obj, RootIndex* out_root_list) const {
Maybe<uint32_t> maybe_index = map_->Get(obj);
if (maybe_index.IsJust()) {
*out_root_list = static_cast<RootIndex>(maybe_index.FromJust());
@@ -65,6 +66,7 @@ class RootIndexMap {
}
return false;
}
+ bool Lookup(Address obj, RootIndex* out_root_list) const;
private:
HeapObjectToIndexHashMap* map_;
diff --git a/deps/v8/src/allocation-site-scopes-inl.h b/deps/v8/src/allocation-site-scopes-inl.h
index e114bb3885..6500e9efdc 100644
--- a/deps/v8/src/allocation-site-scopes-inl.h
+++ b/deps/v8/src/allocation-site-scopes-inl.h
@@ -12,12 +12,19 @@
namespace v8 {
namespace internal {
+void AllocationSiteContext::InitializeTraversal(Handle<AllocationSite> site) {
+ top_ = site;
+ // {current_} is updated in place to not create unnecessary Handles, hence
+ // we initially need a separate handle.
+ current_ = Handle<AllocationSite>::New(*top_, isolate());
+}
+
Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
- Object* nested_site = current()->nested_site();
+ Object nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
update_current_site(AllocationSite::cast(nested_site));
}
@@ -38,7 +45,7 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
- static_cast<void*>(*object));
+ reinterpret_cast<void*>(object->ptr()));
}
return true;
}
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 0a729948db..b6bc6448fb 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -7,6 +7,7 @@
#include "src/handles.h"
#include "src/objects.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/map.h"
namespace v8 {
@@ -28,16 +29,11 @@ class AllocationSiteContext {
Isolate* isolate() { return isolate_; }
protected:
- void update_current_site(AllocationSite* site) {
- *(current_.location()) = site;
+ void update_current_site(AllocationSite site) {
+ *(current_.location()) = site->ptr();
}
- void InitializeTraversal(Handle<AllocationSite> site) {
- top_ = site;
- // {current_} is updated in place to not create unnecessary Handles, hence
- // we initially need a separate handle.
- current_ = Handle<AllocationSite>::New(*top_, isolate());
- }
+ inline void InitializeTraversal(Handle<AllocationSite> site);
private:
Isolate* isolate_;
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 6327a9c965..4be8fb4084 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -11,8 +11,9 @@
#include "src/base/lsan-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
-#include "src/utils.h"
+#include "src/memcopy.h"
#include "src/v8.h"
+#include "src/vector.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
@@ -37,29 +38,34 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
return ptr;
}
-// TODO(bbudge) Simplify this once all embedders implement a page allocator.
-struct InitializePageAllocator {
- static void Construct(void* page_allocator_ptr_arg) {
- auto page_allocator_ptr =
- reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
- v8::PageAllocator* page_allocator =
- V8::GetCurrentPlatform()->GetPageAllocator();
- if (page_allocator == nullptr) {
- static v8::base::PageAllocator default_allocator;
- page_allocator = &default_allocator;
+class PageAllocatorInitializer {
+ public:
+ PageAllocatorInitializer() {
+ page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
+ if (page_allocator_ == nullptr) {
+ static base::LeakyObject<base::PageAllocator> default_page_allocator;
+ page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
- {
- static v8::base::LsanPageAllocator lsan_allocator(page_allocator);
- page_allocator = &lsan_allocator;
- }
+ static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
+ page_allocator_);
+ page_allocator_ = lsan_allocator.get();
#endif
- *page_allocator_ptr = page_allocator;
}
+
+ PageAllocator* page_allocator() const { return page_allocator_; }
+
+ void SetPageAllocatorForTesting(PageAllocator* allocator) {
+ page_allocator_ = allocator;
+ }
+
+ private:
+ PageAllocator* page_allocator_;
};
-static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
- page_allocator = LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
+ GetPageTableInitializer);
+
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
@@ -67,8 +73,15 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
- DCHECK_NOT_NULL(page_allocator.Get());
- return page_allocator.Get();
+ DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
+ return GetPageTableInitializer()->page_allocator();
+}
+
+v8::PageAllocator* SetPlatformPageAllocatorForTesting(
+ v8::PageAllocator* new_page_allocator) {
+ v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
+ GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
+ return old_page_allocator;
}
void* Malloced::New(size_t size) {
@@ -111,7 +124,7 @@ void* AllocWithRetry(size_t size) {
}
void* AlignedAlloc(size_t size, size_t alignment) {
- DCHECK_LE(V8_ALIGNOF(void*), alignment);
+ DCHECK_LE(alignof(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
@@ -155,7 +168,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
- DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
+ DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result = page_allocator->AllocatePages(address, size, alignment, access);
@@ -169,7 +182,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
bool FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
- DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
+ DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
return page_allocator->FreePages(address, size);
}
@@ -177,6 +190,7 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
+ DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
return page_allocator->ReleasePages(address, size, new_size);
}
@@ -209,12 +223,14 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, size_t alignment)
: page_allocator_(page_allocator) {
DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
size_t page_size = page_allocator_->AllocatePageSize();
alignment = RoundUp(alignment, page_size);
- size = RoundUp(size, page_size);
- Address address = reinterpret_cast<Address>(AllocatePages(
- page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
+ Address address = reinterpret_cast<Address>(
+ AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
+ PageAllocator::kNoAccess));
if (address != kNullAddress) {
+ DCHECK(IsAligned(address, alignment));
region_ = base::AddressRegion(address, size);
}
}
@@ -241,7 +257,7 @@ bool VirtualMemory::SetPermissions(Address address, size_t size,
size_t VirtualMemory::Release(Address free_start) {
DCHECK(IsReserved());
- DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize()));
+ DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 3a21310af8..df6e416e70 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -86,6 +86,13 @@ void AlignedFree(void *ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+// Sets the given page allocator as the platform page allocator and returns
+// the current one. This function *must* be used only for testing purposes.
+// It is not thread-safe and the testing infrastructure should ensure that
+// the tests do not modify the value simultaneously.
+V8_EXPORT_PRIVATE v8::PageAllocator* SetPlatformPageAllocatorForTesting(
+ v8::PageAllocator* page_allocator);
+
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
@@ -160,7 +167,7 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
// Reserves virtual memory containing an area of the given size that is
// aligned per |alignment| rounded up to the |page_allocator|'s allocate page
- // size.
+ // size. The |size| must be aligned with |page_allocator|'s commit page size.
// This may not be at the position returned by address().
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = 1);
@@ -170,6 +177,8 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
: page_allocator_(page_allocator), region_(address, size) {
DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(address, page_allocator->AllocatePageSize()));
+ DCHECK(IsAligned(size, page_allocator->CommitPageSize()));
}
// Releases the reserved memory, if any, controlled by this VirtualMemory
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 1e5d6b2aaa..1e2e9ed807 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -10,6 +10,7 @@
#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/slots-inl.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@@ -20,27 +21,32 @@ CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
template <typename T>
+CustomArguments<T>::~CustomArguments() {
+ slot_at(kReturnValueOffset).store(Object(kHandleZapValue));
+}
+
+template <typename T>
template <typename V>
Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
- Object** handle = &this->begin()[kReturnValueOffset];
+ FullObjectSlot slot = slot_at(kReturnValueOffset);
// Nothing was set, return empty handle as per previous behaviour.
- if ((*handle)->IsTheHole(isolate)) return Handle<V>();
- Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
+ if ((*slot)->IsTheHole(isolate)) return Handle<V>();
+ Handle<V> result = Handle<V>::cast(Handle<Object>(slot.location()));
result->VerifyApiCallResultType();
return result;
}
-inline JSObject* PropertyCallbackArguments::holder() {
- return JSObject::cast(this->begin()[T::kHolderIndex]);
+inline JSObject PropertyCallbackArguments::holder() {
+ return JSObject::cast(*slot_at(T::kHolderIndex));
}
-inline Object* PropertyCallbackArguments::receiver() {
- return Object::cast(this->begin()[T::kThisIndex]);
+inline Object PropertyCallbackArguments::receiver() {
+ return *slot_at(T::kThisIndex);
}
-inline JSObject* FunctionCallbackArguments::holder() {
- return JSObject::cast(this->begin()[T::kHolderIndex]);
+inline JSObject FunctionCallbackArguments::holder() {
+ return JSObject::cast(*slot_at(T::kHolderIndex));
}
#define FOR_EACH_CALLBACK(F) \
@@ -61,7 +67,7 @@ inline JSObject* FunctionCallbackArguments::holder() {
} \
VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
- PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
+ PropertyCallbackInfo<API_RETURN_TYPE> callback_info(values_);
#define PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_VALUE, \
API_RETURN_TYPE) \
@@ -70,7 +76,7 @@ inline JSObject* FunctionCallbackArguments::holder() {
} \
VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
- PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
+ PropertyCallbackInfo<API_RETURN_TYPE> callback_info(values_);
#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
INFO_FOR_SIDE_EFFECT) \
@@ -121,7 +127,7 @@ FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
#undef FOR_EACH_CALLBACK
#undef CREATE_INDEXED_CALLBACK
-Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
+Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
Isolate* isolate = this->isolate();
LOG(isolate, ApiObjectAccess("call", holder()));
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
@@ -136,7 +142,7 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
}
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
+ FunctionCallbackInfo<v8::Value> info(values_, argv_, argc_);
f(info);
return GetReturnValue<Object>(isolate);
}
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index 4b290d9dab..b706050b30 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -10,44 +10,42 @@ namespace v8 {
namespace internal {
PropertyCallbackArguments::PropertyCallbackArguments(Isolate* isolate,
- Object* data, Object* self,
- JSObject* holder,
+ Object data, Object self,
+ JSObject holder,
ShouldThrow should_throw)
: Super(isolate) {
- Object** values = this->begin();
- values[T::kThisIndex] = self;
- values[T::kHolderIndex] = holder;
- values[T::kDataIndex] = data;
- values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
- values[T::kShouldThrowOnErrorIndex] =
- Smi::FromInt(should_throw == kThrowOnError ? 1 : 0);
+ slot_at(T::kThisIndex).store(self);
+ slot_at(T::kHolderIndex).store(holder);
+ slot_at(T::kDataIndex).store(data);
+ slot_at(T::kIsolateIndex).store(Object(reinterpret_cast<Address>(isolate)));
+ slot_at(T::kShouldThrowOnErrorIndex)
+ .store(Smi::FromInt(should_throw == kThrowOnError ? 1 : 0));
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
- HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- values[T::kReturnValueDefaultValueIndex] = the_hole;
- values[T::kReturnValueIndex] = the_hole;
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
+ HeapObject the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ slot_at(T::kReturnValueDefaultValueIndex).store(the_hole);
+ slot_at(T::kReturnValueIndex).store(the_hole);
+ DCHECK((*slot_at(T::kHolderIndex))->IsHeapObject());
+ DCHECK((*slot_at(T::kIsolateIndex))->IsSmi());
}
FunctionCallbackArguments::FunctionCallbackArguments(
- internal::Isolate* isolate, internal::Object* data,
- internal::HeapObject* callee, internal::Object* holder,
- internal::HeapObject* new_target, internal::Object** argv, int argc)
+ internal::Isolate* isolate, internal::Object data,
+ internal::HeapObject callee, internal::Object holder,
+ internal::HeapObject new_target, internal::Address* argv, int argc)
: Super(isolate), argv_(argv), argc_(argc) {
- Object** values = begin();
- values[T::kDataIndex] = data;
- values[T::kHolderIndex] = holder;
- values[T::kNewTargetIndex] = new_target;
- values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ slot_at(T::kDataIndex).store(data);
+ slot_at(T::kHolderIndex).store(holder);
+ slot_at(T::kNewTargetIndex).store(new_target);
+ slot_at(T::kIsolateIndex).store(Object(reinterpret_cast<Address>(isolate)));
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
- HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- values[T::kReturnValueDefaultValueIndex] = the_hole;
- values[T::kReturnValueIndex] = the_hole;
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
+ HeapObject the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ slot_at(T::kReturnValueDefaultValueIndex).store(the_hole);
+ slot_at(T::kReturnValueIndex).store(the_hole);
+ DCHECK((*slot_at(T::kHolderIndex))->IsHeapObject());
+ DCHECK((*slot_at(T::kIsolateIndex))->IsSmi());
}
} // namespace internal
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index d8fc2b49ab..6b025bdbb3 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -8,6 +8,7 @@
#include "src/api.h"
#include "src/debug/debug.h"
#include "src/isolate.h"
+#include "src/objects/slots.h"
#include "src/visitors.h"
namespace v8 {
@@ -26,14 +27,11 @@ class CustomArguments : public CustomArgumentsBase {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
- ~CustomArguments() override {
- this->begin()[kReturnValueOffset] =
- reinterpret_cast<Object*>(kHandleZapValue);
- }
+ ~CustomArguments() override;
inline void IterateInstance(RootVisitor* v) override {
- v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
- values_ + T::kArgsLength);
+ v->VisitRootPointers(Root::kRelocatable, nullptr, slot_at(0),
+ slot_at(T::kArgsLength));
}
protected:
@@ -44,11 +42,17 @@ class CustomArguments : public CustomArgumentsBase {
Handle<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
- return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
+ return reinterpret_cast<Isolate*>((*slot_at(T::kIsolateIndex)).ptr());
}
- inline Object** begin() { return values_; }
- Object* values_[T::kArgsLength];
+ inline FullObjectSlot slot_at(int index) {
+ // This allows index == T::kArgsLength so "one past the end" slots
+ // can be retrieved for iterating purposes.
+ DCHECK_LE(static_cast<unsigned>(index),
+ static_cast<unsigned>(T::kArgsLength));
+ return FullObjectSlot(values_ + index);
+ }
+ Address values_[T::kArgsLength];
};
// Note: Calling args.Call() sets the return value on args. For multiple
@@ -67,8 +71,8 @@ class PropertyCallbackArguments
static const int kIsolateIndex = T::kIsolateIndex;
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
- PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
- JSObject* holder, ShouldThrow should_throw);
+ PropertyCallbackArguments(Isolate* isolate, Object data, Object self,
+ JSObject holder, ShouldThrow should_throw);
// -------------------------------------------------------------------------
// Accessor Callbacks
@@ -135,8 +139,8 @@ class PropertyCallbackArguments
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info, Handle<Object> receiver = Handle<Object>());
- inline JSObject* holder();
- inline Object* receiver();
+ inline JSObject holder();
+ inline Object receiver();
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.
@@ -156,11 +160,11 @@ class FunctionCallbackArguments
static const int kIsolateIndex = T::kIsolateIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
- FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
- internal::HeapObject* callee,
- internal::Object* holder,
- internal::HeapObject* new_target,
- internal::Object** argv, int argc);
+ FunctionCallbackArguments(internal::Isolate* isolate, internal::Object data,
+ internal::HeapObject callee,
+ internal::Object holder,
+ internal::HeapObject new_target,
+ internal::Address* argv, int argc);
/*
* The following Call function wraps the calling of all callbacks to handle
@@ -170,12 +174,12 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- inline Handle<Object> Call(CallHandlerInfo* handler);
+ inline Handle<Object> Call(CallHandlerInfo handler);
private:
- inline JSObject* holder();
+ inline JSObject holder();
- internal::Object** argv_;
+ internal::Address* argv_;
int argc_;
};
diff --git a/deps/v8/src/api-inl.h b/deps/v8/src/api-inl.h
index 5758729dd3..024dc88537 100644
--- a/deps/v8/src/api-inl.h
+++ b/deps/v8/src/api-inl.h
@@ -6,13 +6,15 @@
#define V8_API_INL_H_
#include "src/api.h"
+#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/foreign-inl.h"
#include "src/objects/stack-frame-info.h"
namespace v8 {
template <typename T>
-inline T ToCData(v8::internal::Object* obj) {
+inline T ToCData(v8::internal::Object obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::kZero) return nullptr;
return reinterpret_cast<T>(
@@ -20,7 +22,7 @@ inline T ToCData(v8::internal::Object* obj) {
}
template <>
-inline v8::internal::Address ToCData(v8::internal::Object* obj) {
+inline v8::internal::Address ToCData(v8::internal::Object obj) {
if (obj == v8::internal::Smi::kZero) return v8::internal::kNullAddress;
return v8::internal::Foreign::cast(obj)->foreign_address();
}
@@ -108,14 +110,17 @@ MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
// Implementations of OpenHandle
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
- const v8::From* that, bool allow_empty_handle) { \
- DCHECK(allow_empty_handle || that != nullptr); \
- DCHECK(that == nullptr || \
- (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ DCHECK(allow_empty_handle || that != nullptr); \
+ DCHECK(that == nullptr || \
+ v8::internal::Object( \
+ *reinterpret_cast<const v8::internal::Address*>(that)) \
+ ->Is##To()); \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::Address*>( \
+ const_cast<v8::From*>(that))); \
}
OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
@@ -125,12 +130,20 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
namespace internal {
-Handle<Context> HandleScopeImplementer::MicrotaskContext() {
- if (microtask_context_) return Handle<Context>(microtask_context_, isolate_);
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
+
+ for (size_t i = 0; i < entered_contexts_.size(); ++i) {
+ size_t j = entered_contexts_.size() - i - 1;
+ if (!is_microtask_context_.at(j)) {
+ return Handle<Context>(entered_contexts_.at(j), isolate_);
+ }
+ }
+
return Handle<Context>::null();
}
-Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+Handle<Context> HandleScopeImplementer::LastEnteredOrMicrotaskContext() {
if (entered_contexts_.empty()) return Handle<Context>::null();
return Handle<Context>(entered_contexts_.back(), isolate_);
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 11b63d56d8..d0088bbf1c 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -7,9 +7,10 @@
#include "src/api-inl.h"
#include "src/isolate-inl.h"
#include "src/lookup.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/property-cell.h"
#include "src/objects/templates.h"
namespace v8 {
@@ -161,8 +162,7 @@ class AccessCheckDisableScope {
Handle<JSObject> obj_;
};
-
-Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
+Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
Handle<Context> native_context = isolate->native_context();
DCHECK(!native_context.is_null());
switch (intrinsic) {
@@ -172,10 +172,9 @@ Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
V8_INTRINSICS_LIST(GET_INTRINSIC_VALUE)
#undef GET_INTRINSIC_VALUE
}
- return nullptr;
+ return Object();
}
-
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<TemplateInfoT> data,
@@ -186,9 +185,9 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
// Walk the inheritance chain and copy all accessors to current object.
int max_number_of_properties = 0;
- TemplateInfoT* info = *data;
- while (info != nullptr) {
- Object* props = info->property_accessors();
+ TemplateInfoT info = *data;
+ while (!info.is_null()) {
+ Object props = info->property_accessors();
if (!props->IsUndefined(isolate)) {
max_number_of_properties += TemplateList::cast(props)->length();
}
@@ -201,10 +200,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(max_number_of_properties);
- for (Handle<TemplateInfoT> temp(*data, isolate); *temp != nullptr;
+ for (Handle<TemplateInfoT> temp(*data, isolate); !temp->is_null();
temp = handle(temp->GetParent(isolate), isolate)) {
// Accumulate accessors.
- Object* maybe_properties = temp->property_accessors();
+ Object maybe_properties = temp->property_accessors();
if (!maybe_properties->IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
isolate, handle(maybe_properties, isolate), array,
@@ -222,7 +221,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
}
}
- Object* maybe_property_list = data->property_list();
+ Object maybe_property_list = data->property_list();
if (maybe_property_list->IsUndefined(isolate)) return obj;
Handle<TemplateList> properties(TemplateList::cast(maybe_property_list),
isolate);
@@ -231,7 +230,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties->get(i++)), isolate);
- Object* bit = properties->get(i++);
+ Object bit = properties->get(i++);
if (bit->IsSmi()) {
PropertyDetails details(Smi::cast(bit));
PropertyAttributes attributes = details.attributes();
@@ -347,12 +346,12 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
}
}
-bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
- JSReceiver* new_target) {
+bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
+ JSReceiver new_target) {
DisallowHeapAllocation no_gc;
if (!new_target->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(new_target);
+ JSFunction fun = JSFunction::cast(new_target);
if (fun->shared()->function_data() != info->constructor()) return false;
if (info->immutable_proto()) return false;
return fun->context()->native_context() == isolate->raw_native_context();
@@ -383,7 +382,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
}
if (constructor.is_null()) {
- Object* maybe_constructor_info = info->constructor();
+ Object maybe_constructor_info = info->constructor();
if (maybe_constructor_info->IsUndefined(isolate)) {
constructor = isolate->object_function();
} else {
@@ -432,7 +431,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
namespace {
MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
- Object* function_template) {
+ Object function_template) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
@@ -467,9 +466,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
Handle<Object> prototype;
if (!data->remove_prototype()) {
- Object* prototype_templ = data->prototype_template();
+ Object prototype_templ = data->GetPrototypeTemplate();
if (prototype_templ->IsUndefined(isolate)) {
- Object* protoype_provider_templ = data->prototype_provider_template();
+ Object protoype_provider_templ = data->GetPrototypeProviderTemplate();
if (protoype_provider_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
@@ -486,7 +485,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<JSReceiver>(), data->hidden_prototype(), true),
JSFunction);
}
- Object* parent = data->parent_template();
+ Object parent = data->GetParentTemplate();
if (!parent->IsUndefined(isolate)) {
Handle<Object> parent_prototype;
ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
@@ -498,8 +497,8 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
InstanceType function_type =
(!data->needs_access_check() &&
- data->named_property_handler()->IsUndefined(isolate) &&
- data->indexed_property_handler()->IsUndefined(isolate))
+ data->GetNamedPropertyHandler()->IsUndefined(isolate) &&
+ data->GetIndexedPropertyHandler()->IsUndefined(isolate))
? JS_API_OBJECT_TYPE
: JS_SPECIAL_API_OBJECT_TYPE;
@@ -526,7 +525,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
int length, Handle<Object>* data) {
- Object* maybe_list = templ->property_list();
+ Object maybe_list = templ->property_list();
Handle<TemplateList> list;
if (maybe_list->IsUndefined(isolate)) {
list = TemplateList::New(isolate, length);
@@ -570,7 +569,8 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
FunctionTemplateInfo::cast(data->constructor()), isolate);
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
- JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
+ JSObject::kHeaderSize +
+ data->embedder_field_count() * kEmbedderDataSlotSize,
TERMINAL_FAST_ELEMENTS_KIND);
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
@@ -620,7 +620,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<TemplateInfo> info,
Handle<AccessorInfo> property) {
- Object* maybe_list = info->property_accessors();
+ Object maybe_list = info->property_accessors();
Handle<TemplateList> list;
if (maybe_list->IsUndefined(isolate)) {
list = TemplateList::New(isolate, 1);
@@ -662,7 +662,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
- } else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
+ } else if (obj->GetPrototypeProviderTemplate()->IsUndefined(isolate)) {
JSObject::AddProperty(isolate, Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
@@ -670,17 +670,17 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
int embedder_field_count = 0;
bool immutable_proto = false;
- if (!obj->instance_template()->IsUndefined(isolate)) {
- Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()), isolate);
- embedder_field_count = instance_template->embedder_field_count();
- immutable_proto = instance_template->immutable_proto();
+ if (!obj->GetInstanceTemplate()->IsUndefined(isolate)) {
+ Handle<ObjectTemplateInfo> GetInstanceTemplate = Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->GetInstanceTemplate()), isolate);
+ embedder_field_count = GetInstanceTemplate->embedder_field_count();
+ immutable_proto = GetInstanceTemplate->immutable_proto();
}
// JS_FUNCTION_TYPE requires information about the prototype slot.
DCHECK_NE(JS_FUNCTION_TYPE, type);
- int instance_size =
- JSObject::GetHeaderSize(type) + kPointerSize * embedder_field_count;
+ int instance_size = JSObject::GetHeaderSize(type) +
+ kEmbedderDataSlotSize * embedder_field_count;
Handle<Map> map = isolate->factory()->NewMap(type, instance_size,
TERMINAL_FAST_ELEMENTS_KIND);
@@ -693,7 +693,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// undetectable and callable. If we ever see the need to have an object
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
- CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
+ CHECK(!obj->GetInstanceCallHandler()->IsUndefined(isolate));
map->set_is_undetectable(true);
}
@@ -704,17 +704,18 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
// Set interceptor information in the map.
- if (!obj->named_property_handler()->IsUndefined(isolate)) {
+ if (!obj->GetNamedPropertyHandler()->IsUndefined(isolate)) {
map->set_has_named_interceptor(true);
map->set_may_have_interesting_symbols(true);
}
- if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
+ if (!obj->GetIndexedPropertyHandler()->IsUndefined(isolate)) {
map->set_has_indexed_interceptor(true);
}
// Mark instance as callable in the map.
- if (!obj->instance_call_handler()->IsUndefined(isolate)) {
+ if (!obj->GetInstanceCallHandler()->IsUndefined(isolate)) {
map->set_is_callable(true);
+ map->set_is_constructor(!obj->undetectable());
}
if (immutable_proto) map->set_is_immutable_proto(true);
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index ff6cdc6c86..9a9ae50da8 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -16,6 +16,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
+enum InstanceType : uint16_t;
class ObjectTemplateInfo;
class TemplateInfo;
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 40e8b41e69..b1f9c99860 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -26,12 +26,13 @@
#include "src/bootstrapper.h"
#include "src/builtins/builtins-utils.h"
#include "src/char-predicates-inl.h"
-#include "src/code-stubs.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
+#include "src/cpu-features.h"
+#include "src/date.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
@@ -49,15 +50,23 @@
#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/messages.h"
+#include "src/microtask-queue.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
@@ -76,15 +85,16 @@
#include "src/runtime-profiler.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h"
-#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
+#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot.h"
+#include "src/snapshot/startup-serializer.h"
#include "src/startup-data-util.h"
#include "src/string-hasher.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/unicode-cache-inl.h"
#include "src/unicode-inl.h"
#include "src/v8.h"
#include "src/v8threads.h"
@@ -97,6 +107,18 @@
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
+#if V8_OS_LINUX || V8_OS_MACOSX
+#include <signal.h>
+#include "include/v8-wasm-trap-handler-posix.h"
+#include "src/trap-handler/handler-inside-posix.h"
+#endif
+
+#if V8_OS_WIN
+#include <windows.h>
+#include "include/v8-wasm-trap-handler-win.h"
+#include "src/trap-handler/handler-inside-win.h"
+#endif
+
namespace v8 {
/*
@@ -217,7 +239,7 @@ namespace v8 {
namespace {
Local<Context> ContextFromNeverReadOnlySpaceObject(
- i::Handle<i::NeverReadOnlySpaceObject> obj) {
+ i::Handle<i::JSReceiver> obj) {
return reinterpret_cast<v8::Isolate*>(obj->GetIsolate())->GetCurrentContext();
}
@@ -231,10 +253,11 @@ class InternalEscapableScope : public v8::EscapableHandleScope {
#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
auto handle_scope_implementer = isolate->handle_scope_implementer();
+ auto* microtask_queue = isolate->default_microtask_queue();
if (handle_scope_implementer->microtasks_policy() ==
v8::MicrotasksPolicy::kScoped) {
- DCHECK(handle_scope_implementer->GetMicrotasksScopeDepth() ||
- !handle_scope_implementer->DebugMicrotasksScopeDepthIsZero());
+ DCHECK(microtask_queue->GetMicrotasksScopeDepth() ||
+ !microtask_queue->DebugMicrotasksScopeDepthIsZero());
}
}
#endif
@@ -253,14 +276,12 @@ class CallDepthScope {
? i::InterruptsScope::kRunInterrupts
: i::InterruptsScope::kPostponeInterrupts)
: i::InterruptsScope::kNoop) {
- // TODO(dcarney): remove this when blink stops crashing.
- DCHECK(!isolate_->external_caught_exception());
isolate_->handle_scope_implementer()->IncrementCallDepth();
isolate_->set_next_v8_call_is_safe_for_termination(false);
if (!context.IsEmpty()) {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- if (isolate->context() != nullptr &&
+ if (!isolate->context().is_null() &&
isolate->context()->native_context() == env->native_context()) {
context_ = Local<Context>();
} else {
@@ -289,8 +310,10 @@ class CallDepthScope {
escaped_ = true;
auto handle_scope_implementer = isolate_->handle_scope_implementer();
handle_scope_implementer->DecrementCallDepth();
- bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero();
- isolate_->OptionalRescheduleException(call_depth_is_zero);
+ bool clear_exception =
+ handle_scope_implementer->CallDepthIsZero() &&
+ isolate_->thread_local_top()->try_catch_handler() == nullptr;
+ isolate_->OptionalRescheduleException(clear_exception);
}
private:
@@ -343,20 +366,19 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
i::HeapStats heap_stats;
if (isolate == nullptr) {
- isolate = Isolate::Current();
+ isolate = Isolate::TryGetCurrent();
}
if (isolate == nullptr) {
- // On a background thread -> we cannot retrieve memory information from the
- // Isolate. Write easy-to-recognize values on the stack.
+ // If the Isolate is not available for the current thread we cannot retrieve
+ // memory information from the Isolate. Write easy-to-recognize values on
+ // the stack.
memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
memset(js_stacktrace, 0x0BADC0DE, Heap::kStacktraceBufferSize + 1);
memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats));
- // Note that the embedder's oom handler won't be called in this case. We
- // just crash.
- FATAL(
- "API fatal error handler returned after process out of memory on the "
- "background thread");
+ // Note that the embedder's oom handler is also not available and therefore
+ // won't be called in this case. We just crash.
+ FATAL("Fatal process out of memory: %s", location);
UNREACHABLE();
}
@@ -387,6 +409,8 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
heap_stats.map_space_capacity = &map_space_capacity;
size_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
+ size_t code_lo_space_size;
+ heap_stats.code_lo_space_size = &code_lo_space_size;
size_t global_handle_count;
heap_stats.global_handle_count = &global_handle_count;
size_t weak_global_handle_count;
@@ -563,8 +587,8 @@ SnapshotCreator::SnapshotCreator(Isolate* isolate,
SnapshotCreator::SnapshotCreator(const intptr_t* external_references,
StartupData* existing_snapshot)
- : SnapshotCreator(reinterpret_cast<Isolate*>(new i::Isolate()),
- external_references, existing_snapshot) {}
+ : SnapshotCreator(Isolate::Allocate(), external_references,
+ existing_snapshot) {}
SnapshotCreator::~SnapshotCreator() {
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
@@ -608,13 +632,13 @@ size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
return AddData(template_obj);
}
-size_t SnapshotCreator::AddData(i::Object* object) {
- DCHECK_NOT_NULL(object);
+size_t SnapshotCreator::AddData(i::Address object) {
+ DCHECK_NE(object, i::kNullAddress);
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
DCHECK(!data->created_);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
i::HandleScope scope(isolate);
- i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::Object> obj(i::Object(object), isolate);
i::Handle<i::ArrayList> list;
if (!isolate->heap()->serialized_objects()->IsArrayList()) {
list = i::ArrayList::New(isolate, 1);
@@ -628,13 +652,13 @@ size_t SnapshotCreator::AddData(i::Object* object) {
return index;
}
-size_t SnapshotCreator::AddData(Local<Context> context, i::Object* object) {
- DCHECK_NOT_NULL(object);
+size_t SnapshotCreator::AddData(Local<Context> context, i::Address object) {
+ DCHECK_NE(object, i::kNullAddress);
DCHECK(!SnapshotCreatorData::cast(data_)->created_);
i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
i::Isolate* isolate = ctx->GetIsolate();
i::HandleScope scope(isolate);
- i::Handle<i::Object> obj(object, isolate);
+ i::Handle<i::Object> obj(i::Object(object), isolate);
i::Handle<i::ArrayList> list;
if (!ctx->serialized_objects()->IsArrayList()) {
list = i::ArrayList::New(isolate, 1);
@@ -731,20 +755,32 @@ StartupData SnapshotCreator::CreateBlob(
// We have to iterate the heap and collect handles to each clearable SFI,
// before we disable allocation, since we have to allocate UncompiledDatas
// to be able to recompile them.
+ //
+ // Compiled irregexp code is also flushed by collecting and clearing any
+ // seen JSRegExp objects.
i::HandleScope scope(isolate);
std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
- i::HeapIterator heap_iterator(isolate->heap());
- while (i::HeapObject* current_obj = heap_iterator.next()) {
- if (current_obj->IsSharedFunctionInfo()) {
- i::SharedFunctionInfo* shared =
- i::SharedFunctionInfo::cast(current_obj);
- if (shared->CanDiscardCompiled()) {
- sfis_to_clear.emplace_back(shared, isolate);
+ { // Heap allocation is disallowed within this scope.
+ i::HeapIterator heap_iterator(isolate->heap());
+ for (i::HeapObject current_obj = heap_iterator.next();
+ !current_obj.is_null(); current_obj = heap_iterator.next()) {
+ if (current_obj->IsSharedFunctionInfo()) {
+ i::SharedFunctionInfo shared =
+ i::SharedFunctionInfo::cast(current_obj);
+ if (shared->CanDiscardCompiled()) {
+ sfis_to_clear.emplace_back(shared, isolate);
+ }
+ } else if (current_obj->IsJSRegExp()) {
+ i::JSRegExp regexp = i::JSRegExp::cast(current_obj);
+ if (regexp->HasCompiledCode()) {
+ regexp->DiscardCompiledCodeForSerialization();
+ }
}
}
}
- i::AllowHeapAllocation allocate_for_discard;
+
+ // Must happen after heap iteration since SFI::DiscardCompiled may allocate.
for (i::Handle<i::SharedFunctionInfo> shared : sfis_to_clear) {
i::SharedFunctionInfo::DiscardCompiled(isolate, shared);
}
@@ -753,7 +789,7 @@ StartupData SnapshotCreator::CreateBlob(
i::DisallowHeapAllocation no_gc_from_here_on;
int num_contexts = num_additional_contexts + 1;
- std::vector<i::Context*> contexts;
+ std::vector<i::Context> contexts;
contexts.reserve(num_contexts);
{
i::HandleScope scope(isolate);
@@ -773,16 +809,17 @@ StartupData SnapshotCreator::CreateBlob(
CHECK(handle_checker.CheckGlobalAndEternalHandles());
i::HeapIterator heap_iterator(isolate->heap());
- while (i::HeapObject* current_obj = heap_iterator.next()) {
+ for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
+ current_obj = heap_iterator.next()) {
if (current_obj->IsJSFunction()) {
- i::JSFunction* fun = i::JSFunction::cast(current_obj);
+ i::JSFunction fun = i::JSFunction::cast(current_obj);
// Complete in-object slack tracking for all functions.
fun->CompleteInobjectSlackTrackingIfActive();
// Also, clear out feedback vectors, or any optimized code.
- if (fun->has_feedback_vector()) {
- fun->feedback_cell()->set_value(
+ if (!fun->raw_feedback_cell()->value()->IsUndefined()) {
+ fun->raw_feedback_cell()->set_value(
i::ReadOnlyRoots(isolate).undefined_value());
fun->set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
}
@@ -790,12 +827,15 @@ StartupData SnapshotCreator::CreateBlob(
DCHECK(fun->shared()->HasWasmExportedFunctionData() ||
fun->shared()->HasBuiltinId() ||
fun->shared()->IsApiFunction() ||
- fun->shared()->HasUncompiledDataWithoutPreParsedScope());
+ fun->shared()->HasUncompiledDataWithoutPreparseData());
}
}
}
- i::StartupSerializer startup_serializer(isolate);
+ i::ReadOnlySerializer read_only_serializer(isolate);
+ read_only_serializer.SerializeReadOnlyRoots();
+
+ i::StartupSerializer startup_serializer(isolate, &read_only_serializer);
startup_serializer.SerializeStrongReferences();
// Serialize each context with a new partial serializer.
@@ -816,19 +856,17 @@ StartupData SnapshotCreator::CreateBlob(
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
- // Builtin serialization places additional objects into the partial snapshot
- // cache and thus needs to happen before SerializeWeakReferencesAndDeferred
- // is called below.
- i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltinsAndHandlers();
-
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
+ read_only_serializer.FinalizeSerialization();
+ can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
+
+ i::SnapshotData read_only_snapshot(&read_only_serializer);
i::SnapshotData startup_snapshot(&startup_serializer);
- i::BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
- StartupData result = i::Snapshot::CreateSnapshotBlob(
- &startup_snapshot, &builtin_snapshot, context_snapshots, can_be_rehashed);
+ StartupData result =
+ i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &read_only_snapshot,
+ context_snapshots, can_be_rehashed);
// Delete heap-allocated context snapshot instances.
for (const auto context_snapshot : context_snapshots) {
@@ -955,72 +993,52 @@ void SetResourceConstraints(i::Isolate* isolate,
}
}
-
-i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
+i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
LOG_API(isolate, Persistent, New);
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- (*obj)->ObjectVerify(isolate);
+ i::Object(*obj)->ObjectVerify(isolate);
}
#endif // VERIFY_HEAP
return result.location();
}
-
-i::Object** V8::CopyPersistent(i::Object** obj) {
+i::Address* V8::CopyPersistent(i::Address* obj) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
return result.location();
}
-void V8::RegisterExternallyReferencedObject(i::Object** object,
+void V8::RegisterExternallyReferencedObject(i::Address* location,
i::Isolate* isolate) {
- isolate->heap()->RegisterExternallyReferencedObject(object);
-}
-
-void V8::MakeWeak(i::Object** location, void* parameter,
- int embedder_field_index1, int embedder_field_index2,
- WeakCallbackInfo<void>::Callback weak_callback) {
- WeakCallbackType type = WeakCallbackType::kParameter;
- if (embedder_field_index1 == 0) {
- if (embedder_field_index2 == 1) {
- type = WeakCallbackType::kInternalFields;
- } else {
- DCHECK_EQ(embedder_field_index2, -1);
- type = WeakCallbackType::kInternalFields;
- }
- } else {
- DCHECK_EQ(embedder_field_index1, -1);
- DCHECK_EQ(embedder_field_index2, -1);
- }
- i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
+ isolate->heap()->RegisterExternallyReferencedObject(location);
}
-void V8::MakeWeak(i::Object** location, void* parameter,
+void V8::MakeWeak(i::Address* location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type) {
i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-void V8::MakeWeak(i::Object*** location_addr) {
+void V8::MakeWeak(i::Address** location_addr) {
i::GlobalHandles::MakeWeak(location_addr);
}
-void* V8::ClearWeak(i::Object** location) {
+void* V8::ClearWeak(i::Address* location) {
return i::GlobalHandles::ClearWeakness(location);
}
-void V8::AnnotateStrongRetainer(i::Object** location, const char* label) {
+void V8::AnnotateStrongRetainer(i::Address* location, const char* label) {
i::GlobalHandles::AnnotateStrongRetainer(location, label);
}
-void V8::DisposeGlobal(i::Object** location) {
+void V8::DisposeGlobal(i::Address* location) {
i::GlobalHandles::Destroy(location);
}
Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::Object* object = *Utils::OpenHandle(value);
+ i::Object object = *Utils::OpenHandle(value);
int index = -1;
isolate->eternal_handles()->Create(isolate, object, &index);
return reinterpret_cast<Value*>(
@@ -1087,32 +1105,23 @@ int HandleScope::NumberOfHandles(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate));
}
-
-i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+i::Address* HandleScope::CreateHandle(i::Isolate* isolate, i::Address value) {
return i::HandleScope::CreateHandle(isolate, value);
}
-i::Object** HandleScope::CreateHandle(
- i::NeverReadOnlySpaceObject* writable_object, i::Object* value) {
- DCHECK(reinterpret_cast<i::HeapObject*>(writable_object)->IsHeapObject());
- return i::HandleScope::CreateHandle(writable_object->GetIsolate(), value);
-}
-
-
EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
escape_slot_ =
- CreateHandle(isolate, i::ReadOnlyRoots(isolate).the_hole_value());
+ CreateHandle(isolate, i::ReadOnlyRoots(isolate).the_hole_value()->ptr());
Initialize(v8_isolate);
}
-
-i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
+i::Address* EscapableHandleScope::Escape(i::Address* escape_value) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
- Utils::ApiCheck((*escape_slot_)->IsTheHole(heap->isolate()),
+ Utils::ApiCheck(i::Object(*escape_slot_)->IsTheHole(heap->isolate()),
"EscapableHandleScope::Escape", "Escape value set twice");
if (escape_value == nullptr) {
- *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value();
+ *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value()->ptr();
return nullptr;
}
*escape_slot_ = *escape_value;
@@ -1154,7 +1163,7 @@ void Context::Enter() {
i::Isolate* isolate = env->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- impl->EnterContext(env);
+ impl->EnterContext(*env);
impl->SaveContext(isolate->context());
isolate->set_context(*env);
}
@@ -1164,8 +1173,7 @@ void Context::Exit() {
i::Isolate* isolate = env->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- if (!Utils::ApiCheck(impl->LastEnteredContextWas(env),
- "v8::Context::Exit()",
+ if (!Utils::ApiCheck(impl->LastEnteredContextWas(*env), "v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
@@ -1180,6 +1188,10 @@ Context::BackupIncumbentScope::BackupIncumbentScope(
i::Handle<i::Context> env = Utils::OpenHandle(*backup_incumbent_context_);
i::Isolate* isolate = env->GetIsolate();
+
+ js_stack_comparable_address_ =
+ i::SimulatorStack::RegisterJSStackComparableAddress(isolate);
+
prev_ = isolate->top_backup_incumbent_scope();
isolate->set_top_backup_incumbent_scope(this);
}
@@ -1187,26 +1199,17 @@ Context::BackupIncumbentScope::BackupIncumbentScope(
Context::BackupIncumbentScope::~BackupIncumbentScope() {
i::Handle<i::Context> env = Utils::OpenHandle(*backup_incumbent_context_);
i::Isolate* isolate = env->GetIsolate();
- isolate->set_top_backup_incumbent_scope(prev_);
-}
-
-static void* DecodeSmiToAligned(i::Object* value, const char* location) {
- Utils::ApiCheck(value->IsSmi(), location, "Not a Smi");
- return reinterpret_cast<void*>(value);
-}
+ i::SimulatorStack::UnregisterJSStackComparableAddress(isolate);
-static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
- i::Smi* smi = reinterpret_cast<i::Smi*>(value);
- Utils::ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
- return smi;
+ isolate->set_top_backup_incumbent_scope(prev_);
}
+STATIC_ASSERT(i::Internals::kEmbedderDataSlotSize == i::kEmbedderDataSlotSize);
-static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
- int index,
- bool can_grow,
- const char* location) {
+static i::Handle<i::EmbedderDataArray> EmbedderDataFor(Context* context,
+ int index, bool can_grow,
+ const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
i::Isolate* isolate = env->GetIsolate();
bool ok =
@@ -1214,15 +1217,16 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
location,
"Not a native context") &&
Utils::ApiCheck(index >= 0, location, "Negative index");
- if (!ok) return i::Handle<i::FixedArray>();
- i::Handle<i::FixedArray> data(env->embedder_data(), isolate);
+ if (!ok) return i::Handle<i::EmbedderDataArray>();
+ // TODO(ishell): remove cast once embedder_data slot has a proper type.
+ i::Handle<i::EmbedderDataArray> data(
+ i::EmbedderDataArray::cast(env->embedder_data()), isolate);
if (index < data->length()) return data;
- if (!Utils::ApiCheck(can_grow, location, "Index too large")) {
- return i::Handle<i::FixedArray>();
+ if (!Utils::ApiCheck(can_grow && index < i::EmbedderDataArray::kMaxLength,
+ location, "Index too large")) {
+ return i::Handle<i::EmbedderDataArray>();
}
- int new_size = index + 1;
- int grow_by = new_size - data->length();
- data = isolate->factory()->CopyFixedArrayAndGrow(data, grow_by);
+ data = i::EmbedderDataArray::EnsureCapacity(isolate, data, index);
env->set_embedder_data(*data);
return data;
}
@@ -1230,26 +1234,30 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
uint32_t Context::GetNumberOfEmbedderDataFields() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
CHECK(context->IsNativeContext());
- return static_cast<uint32_t>(context->embedder_data()->length());
+ // TODO(ishell): remove cast once embedder_data slot has a proper type.
+ return static_cast<uint32_t>(
+ i::EmbedderDataArray::cast(context->embedder_data())->length());
}
v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
const char* location = "v8::Context::GetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ i::Handle<i::EmbedderDataArray> data =
+ EmbedderDataFor(this, index, false, location);
if (data.is_null()) return Local<Value>();
- i::Handle<i::Object> result(
- data->get(index),
- reinterpret_cast<i::Isolate*>(Utils::OpenHandle(this)->GetIsolate()));
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::Handle<i::Object> result(i::EmbedderDataSlot(*data, index).load_tagged(),
+ isolate);
return Utils::ToLocal(result);
}
void Context::SetEmbedderData(int index, v8::Local<Value> value) {
const char* location = "v8::Context::SetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
+ i::Handle<i::EmbedderDataArray> data =
+ EmbedderDataFor(this, index, true, location);
if (data.is_null()) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
- data->set(index, *val);
+ i::EmbedderDataSlot::store_tagged(*data, index, *val);
DCHECK_EQ(*Utils::OpenHandle(*value),
*Utils::OpenHandle(*GetEmbedderData(index)));
}
@@ -1257,16 +1265,22 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ i::Handle<i::EmbedderDataArray> data =
+ EmbedderDataFor(this, index, false, location);
if (data.is_null()) return nullptr;
- return DecodeSmiToAligned(data->get(index), location);
+ void* result;
+ Utils::ApiCheck(i::EmbedderDataSlot(*data, index).ToAlignedPointer(&result),
+ location, "Pointer is not aligned");
+ return result;
}
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
- data->set(index, EncodeAlignedAsSmi(value, location));
+ i::Handle<i::EmbedderDataArray> data =
+ EmbedderDataFor(this, index, true, location);
+ bool ok = i::EmbedderDataSlot(*data, index).store_aligned_pointer(value);
+ Utils::ApiCheck(ok, location, "Pointer is not aligned");
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
}
@@ -1340,13 +1354,14 @@ static Local<ObjectTemplate> ObjectTemplateNew(
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
+ i::Handle<i::Object> result(Utils::OpenHandle(this)->GetPrototypeTemplate(),
i_isolate);
if (result->IsUndefined(i_isolate)) {
// Do not cache prototype objects.
result = Utils::OpenHandle(
*ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
- Utils::OpenHandle(this)->set_prototype_template(*result);
+ i::FunctionTemplateInfo::SetPrototypeTemplate(
+ i_isolate, Utils::OpenHandle(this), result);
}
return ToApiHandle<ObjectTemplate>(result);
}
@@ -1357,9 +1372,10 @@ void FunctionTemplate::SetPrototypeProviderTemplate(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
auto info = Utils::OpenHandle(this);
- CHECK(info->prototype_template()->IsUndefined(i_isolate));
- CHECK(info->parent_template()->IsUndefined(i_isolate));
- info->set_prototype_provider_template(*result);
+ CHECK(info->GetPrototypeTemplate()->IsUndefined(i_isolate));
+ CHECK(info->GetParentTemplate()->IsUndefined(i_isolate));
+ i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info,
+ result);
}
static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
@@ -1374,8 +1390,9 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
i::Isolate* i_isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- CHECK(info->prototype_provider_template()->IsUndefined(i_isolate));
- info->set_parent_template(*Utils::OpenHandle(*value));
+ CHECK(info->GetPrototypeProviderTemplate()->IsUndefined(i_isolate));
+ i::FunctionTemplateInfo::SetParentTemplate(i_isolate, info,
+ Utils::OpenHandle(*value));
}
static Local<FunctionTemplate> FunctionTemplateNew(
@@ -1429,10 +1446,10 @@ Local<FunctionTemplate> FunctionTemplate::New(
MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
+ i::FixedArray serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
if (int_index < serialized_objects->length()) {
- i::Object* info = serialized_objects->get(int_index);
+ i::Object info = serialized_objects->get(int_index);
if (info->IsFunctionTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
i::FunctionTemplateInfo::cast(info), i_isolate));
@@ -1541,13 +1558,14 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
}
i::Isolate* isolate = handle->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (handle->instance_template()->IsUndefined(isolate)) {
+ if (handle->GetInstanceTemplate()->IsUndefined(isolate)) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
- handle->set_instance_template(*Utils::OpenHandle(*templ));
+ i::FunctionTemplateInfo::SetInstanceTemplate(isolate, handle,
+ Utils::OpenHandle(*templ));
}
i::Handle<i::ObjectTemplateInfo> result(
- i::ObjectTemplateInfo::cast(handle->instance_template()), isolate);
+ i::ObjectTemplateInfo::cast(handle->GetInstanceTemplate()), isolate);
return Utils::ToLocal(result);
}
@@ -1644,10 +1662,10 @@ Local<ObjectTemplate> ObjectTemplate::New(
MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::FixedArray* serialized_objects = i_isolate->heap()->serialized_objects();
+ i::FixedArray serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
if (int_index < serialized_objects->length()) {
- i::Object* info = serialized_objects->get(int_index);
+ i::Object info = serialized_objects->get(int_index);
if (info->IsObjectTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>(
i::ObjectTemplateInfo::cast(info), i_isolate));
@@ -1661,15 +1679,16 @@ MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
i::Isolate* isolate,
ObjectTemplate* object_template) {
- i::Object* obj = Utils::OpenHandle(object_template)->constructor();
+ i::Object obj = Utils::OpenHandle(object_template)->constructor();
if (!obj->IsUndefined(isolate)) {
- i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
+ i::FunctionTemplateInfo info = i::FunctionTemplateInfo::cast(obj);
return i::Handle<i::FunctionTemplateInfo>(info, isolate);
}
Local<FunctionTemplate> templ =
FunctionTemplate::New(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ i::FunctionTemplateInfo::SetInstanceTemplate(
+ isolate, constructor, Utils::OpenHandle(object_template));
Utils::OpenHandle(object_template)->set_constructor(*constructor);
return constructor;
}
@@ -1843,7 +1862,7 @@ static void ObjectTemplateSetNamedPropertyHandler(
auto obj =
CreateNamedInterceptorInfo(isolate, getter, setter, query, descriptor,
remover, enumerator, definer, data, flags);
- cons->set_named_property_handler(*obj);
+ i::FunctionTemplateInfo::SetNamedPropertyHandler(isolate, cons, obj);
}
void ObjectTemplate::SetHandler(
@@ -1879,15 +1898,15 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
i::Handle<i::AccessCheckInfo>::cast(struct_info);
SET_FIELD_WRAPPED(isolate, info, set_callback, callback);
- info->set_named_interceptor(nullptr);
- info->set_indexed_interceptor(nullptr);
+ info->set_named_interceptor(i::Object());
+ info->set_indexed_interceptor(i::Object());
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
info->set_data(*Utils::OpenHandle(*data));
- cons->set_access_check_info(*info);
+ i::FunctionTemplateInfo::SetAccessCheckInfo(isolate, cons, info);
cons->set_needs_access_check(true);
}
@@ -1926,7 +1945,7 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
}
info->set_data(*Utils::OpenHandle(*data));
- cons->set_access_check_info(*info);
+ i::FunctionTemplateInfo::SetAccessCheckInfo(isolate, cons, info);
cons->set_needs_access_check(true);
}
@@ -1941,7 +1960,7 @@ void ObjectTemplate::SetHandler(
isolate, config.getter, config.setter, config.query, config.descriptor,
config.deleter, config.enumerator, config.definer, config.data,
config.flags);
- cons->set_indexed_property_handler(*obj);
+ i::FunctionTemplateInfo::SetIndexedPropertyHandler(isolate, cons, obj);
}
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
@@ -1958,7 +1977,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
- cons->set_instance_call_handler(*obj);
+ i::FunctionTemplateInfo::SetInstanceCallHandler(isolate, cons, obj);
}
int ObjectTemplate::InternalFieldCount() {
@@ -2064,7 +2083,7 @@ Local<Value> UnboundScript::GetScriptName() {
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetName);
if (obj->script()->IsScript()) {
- i::Object* name = i::Script::cast(obj->script())->name();
+ i::Object name = i::Script::cast(obj->script())->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
} else {
return Local<String>();
@@ -2078,7 +2097,7 @@ Local<Value> UnboundScript::GetSourceURL() {
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetSourceURL);
if (obj->script()->IsScript()) {
- i::Object* url = i::Script::cast(obj->script())->source_url();
+ i::Object url = i::Script::cast(obj->script())->source_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
return Local<String>();
@@ -2092,7 +2111,7 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetSourceMappingURL);
if (obj->script()->IsScript()) {
- i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
+ i::Object url = i::Script::cast(obj->script())->source_mapping_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
return Local<String>();
@@ -2138,7 +2157,7 @@ Local<PrimitiveArray> ScriptOrModule::GetHostDefinedOptions() {
Local<UnboundScript> Script::GetUnboundScript() {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::SharedFunctionInfo* sfi = i::JSFunction::cast(*obj)->shared();
+ i::SharedFunctionInfo sfi = i::JSFunction::cast(*obj)->shared();
i::Isolate* isolate = sfi->GetIsolate();
return ToApiHandle<UnboundScript>(i::handle(sfi, isolate));
}
@@ -2331,18 +2350,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
- // ProduceParserCache, ProduceCodeCache, ProduceFullCodeCache and
- // ConsumeParserCache are not supported. They are present only for
- // backward compatability. All these options behave as kNoCompileOptions.
- if (options == kConsumeParserCache) {
- // We do not support parser caches anymore. Just set cached_data to
- // rejected to signal an error.
- options = kNoCompileOptions;
- source->cached_data->rejected = true;
- } else if (options == kProduceParserCache || options == kProduceCodeCache ||
- options == kProduceFullCodeCache) {
- options = kNoCompileOptions;
- }
i::ScriptData* script_data = nullptr;
if (options == kConsumeCodeCache) {
@@ -2417,44 +2424,28 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(
return ToApiHandle<Module>(i_isolate->factory()->NewModule(shared));
}
-
-class IsIdentifierHelper {
- public:
- IsIdentifierHelper() : is_identifier_(false), first_char_(true) {}
-
- bool Check(i::String* string) {
- i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
- if (cons_string == nullptr) return is_identifier_;
- // We don't support cons strings here.
- return false;
- }
- void VisitOneByteString(const uint8_t* chars, int length) {
- for (int i = 0; i < length; ++i) {
- if (first_char_) {
- first_char_ = false;
- is_identifier_ = unicode_cache_.IsIdentifierStart(chars[0]);
- } else {
- is_identifier_ &= unicode_cache_.IsIdentifierPart(chars[i]);
- }
+namespace {
+bool IsIdentifier(i::Isolate* isolate, i::Handle<i::String> string) {
+ string = i::String::Flatten(isolate, string);
+ const int length = string->length();
+ if (length == 0) return false;
+ if (!i::IsIdentifierStart(string->Get(0))) return false;
+ i::DisallowHeapAllocation no_gc;
+ i::String::FlatContent flat = string->GetFlatContent(no_gc);
+ if (flat.IsOneByte()) {
+ auto vector = flat.ToOneByteVector();
+ for (int i = 1; i < length; i++) {
+ if (!i::IsIdentifierPart(vector[i])) return false;
}
- }
- void VisitTwoByteString(const uint16_t* chars, int length) {
- for (int i = 0; i < length; ++i) {
- if (first_char_) {
- first_char_ = false;
- is_identifier_ = unicode_cache_.IsIdentifierStart(chars[0]);
- } else {
- is_identifier_ &= unicode_cache_.IsIdentifierPart(chars[i]);
- }
+ } else {
+ auto vector = flat.ToUC16Vector();
+ for (int i = 1; i < length; i++) {
+ if (!i::IsIdentifierPart(vector[i])) return false;
}
}
-
- private:
- bool is_identifier_;
- bool first_char_;
- i::UnicodeCache unicode_cache_;
- DISALLOW_COPY_AND_ASSIGN(IsIdentifierHelper);
-};
+ return true;
+}
+} // anonymous namespace
MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Local<Context> v8_context, Source* source, size_t arguments_count,
@@ -2479,9 +2470,8 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
i::Handle<i::FixedArray> arguments_list =
isolate->factory()->NewFixedArray(static_cast<int>(arguments_count));
for (int i = 0; i < static_cast<int>(arguments_count); i++) {
- IsIdentifierHelper helper;
i::Handle<i::String> argument = Utils::OpenHandle(*arguments[i]);
- if (!helper.Check(*argument)) return Local<Function>();
+ if (!IsIdentifier(isolate, argument)) return Local<Function>();
arguments_list->set(i, *argument);
}
@@ -2636,9 +2626,8 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
has_terminated_(false) {
ResetInternal();
// Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ =
- reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
- isolate_, i::GetCurrentStackPosition()));
+ js_stack_comparable_address_ = reinterpret_cast<void*>(
+ i::SimulatorStack::RegisterJSStackComparableAddress(isolate_));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2657,7 +2646,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- i::SimulatorStack::UnregisterCTryCatch(isolate_);
+ i::SimulatorStack::UnregisterJSStackComparableAddress(isolate_);
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
} else {
@@ -2668,7 +2657,7 @@ v8::TryCatch::~TryCatch() {
isolate_->CancelScheduledExceptionFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- i::SimulatorStack::UnregisterCTryCatch(isolate_);
+ i::SimulatorStack::UnregisterJSStackComparableAddress(isolate_);
}
}
@@ -2678,7 +2667,8 @@ void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); }
bool v8::TryCatch::HasCaught() const {
- return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
+ return !i::Object(reinterpret_cast<i::Address>(exception_))
+ ->IsTheHole(isolate_);
}
@@ -2702,7 +2692,7 @@ v8::Local<v8::Value> v8::TryCatch::ReThrow() {
v8::Local<Value> v8::TryCatch::Exception() const {
if (HasCaught()) {
// Check for out of memory exception.
- i::Object* exception = reinterpret_cast<i::Object*>(exception_);
+ i::Object exception(reinterpret_cast<i::Address>(exception_));
return v8::Utils::ToLocal(i::Handle<i::Object>(exception, isolate_));
} else {
return v8::Local<Value>();
@@ -2712,7 +2702,7 @@ v8::Local<Value> v8::TryCatch::Exception() const {
MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!HasCaught()) return v8::Local<Value>();
- i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
+ i::Object raw_obj(reinterpret_cast<i::Address>(exception_));
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
@@ -2730,7 +2720,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
v8::Local<v8::Message> v8::TryCatch::Message() const {
- i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
+ i::Object message(reinterpret_cast<i::Address>(message_obj_));
DCHECK(message->IsJSMessageObject() || message->IsTheHole(isolate_));
if (HasCaught() && !message->IsTheHole(isolate_)) {
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
@@ -2752,9 +2742,9 @@ void v8::TryCatch::Reset() {
void v8::TryCatch::ResetInternal() {
- i::Object* the_hole = i::ReadOnlyRoots(isolate_).the_hole_value();
- exception_ = the_hole;
- message_obj_ = the_hole;
+ i::Object the_hole = i::ReadOnlyRoots(isolate_).the_hole_value();
+ exception_ = reinterpret_cast<void*>(the_hole->ptr());
+ message_obj_ = reinterpret_cast<void*>(the_hole->ptr());
}
@@ -2994,20 +2984,6 @@ bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->is_wasm(); }
// --- J S O N ---
-MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
- PREPARE_FOR_EXECUTION(v8_isolate->GetCurrentContext(), JSON, Parse, Value);
- i::Handle<i::String> string = Utils::OpenHandle(*json_string);
- i::Handle<i::String> source = i::String::Flatten(isolate, string);
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- auto maybe = source->IsSeqOneByteString()
- ? i::JsonParser<true>::Parse(isolate, source, undefined)
- : i::JsonParser<false>::Parse(isolate, source, undefined);
- Local<Value> result;
- has_pending_exception = !ToLocal<Value>(maybe, &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
-}
-
MaybeLocal<Value> JSON::Parse(Local<Context> context,
Local<String> json_string) {
PREPARE_FOR_EXECUTION(context, JSON, Parse, Value);
@@ -3064,7 +3040,7 @@ Maybe<uint32_t> ValueSerializer::Delegate::GetSharedArrayBufferId(
}
Maybe<uint32_t> ValueSerializer::Delegate::GetWasmModuleTransferId(
- Isolate* v8_isolate, Local<WasmCompiledModule> module) {
+ Isolate* v8_isolate, Local<WasmModuleObject> module) {
return Nothing<uint32_t>();
}
@@ -3113,10 +3089,6 @@ Maybe<bool> ValueSerializer::WriteValue(Local<Context> context,
return result;
}
-std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
- return private_->serializer.ReleaseBuffer();
-}
-
std::pair<uint8_t*, size_t> ValueSerializer::Release() {
return private_->serializer.Release();
}
@@ -3127,12 +3099,6 @@ void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Utils::OpenHandle(*array_buffer));
}
-void ValueSerializer::TransferSharedArrayBuffer(
- uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
- private_->serializer.TransferArrayBuffer(
- transfer_id, Utils::OpenHandle(*shared_array_buffer));
-}
-
void ValueSerializer::WriteUint32(uint32_t value) {
private_->serializer.WriteUint32(value);
}
@@ -3158,13 +3124,13 @@ MaybeLocal<Object> ValueDeserializer::Delegate::ReadHostObject(
return MaybeLocal<Object>();
}
-MaybeLocal<WasmCompiledModule> ValueDeserializer::Delegate::GetWasmModuleFromId(
+MaybeLocal<WasmModuleObject> ValueDeserializer::Delegate::GetWasmModuleFromId(
Isolate* v8_isolate, uint32_t id) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->ScheduleThrow(*isolate->factory()->NewError(
isolate->error_function(),
i::MessageTemplate::kDataCloneDeserializationError));
- return MaybeLocal<WasmCompiledModule>();
+ return MaybeLocal<WasmModuleObject>();
}
MaybeLocal<SharedArrayBuffer>
@@ -3644,6 +3610,10 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
RETURN_ESCAPED(result);
}
+i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
+ return i::NeverReadOnlySpaceObject::GetIsolate(
+ i::HeapObject::cast(i::Object(obj)));
+}
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
@@ -3776,10 +3746,10 @@ void v8::Proxy::CheckCast(Value* that) {
"Could not convert to proxy");
}
-void v8::WasmCompiledModule::CheckCast(Value* that) {
+void v8::WasmModuleObject::CheckCast(Value* that) {
Utils::ApiCheck(that->IsWebAssemblyCompiledModule(),
- "v8::WasmCompiledModule::Cast",
- "Could not convert to wasm compiled module");
+ "v8::WasmModuleObject::Cast",
+ "Could not convert to wasm module object");
}
void v8::ArrayBuffer::CheckCast(Value* that) {
@@ -4411,7 +4381,7 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
- self->map()->instance_descriptors()->GetEnumCache()->keys() != *value);
+ self->map()->instance_descriptors()->enum_cache()->keys() != *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -4497,11 +4467,6 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
}
}
-bool v8::Object::Delete(v8::Local<Value> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Delete(context, key).FromMaybe(false);
-}
-
Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
Local<Private> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4541,12 +4506,6 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
}
-bool v8::Object::Has(v8::Local<Value> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Has(context, key).FromMaybe(false);
-}
-
-
Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
return HasOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)));
}
@@ -4897,7 +4856,7 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
@@ -4915,7 +4874,7 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
@@ -4961,17 +4920,17 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
bool should_set_has_no_side_effect =
side_effect_type == SideEffectType::kHasNoSideEffect &&
isolate->debug_execution_mode() == i::DebugInfo::kSideEffects;
if (should_set_has_no_side_effect) {
CHECK(self->IsJSFunction() &&
i::JSFunction::cast(*self)->shared()->IsApiFunction());
- i::Object* obj =
+ i::Object obj =
i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code();
if (obj->IsCallHandlerInfo()) {
- i::CallHandlerInfo* handler_info = i::CallHandlerInfo::cast(obj);
+ i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (!handler_info->IsSideEffectFreeCallHandlerInfo()) {
handler_info->SetNextCallHasNoSideEffect();
}
@@ -4982,10 +4941,10 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
has_pending_exception = !ToLocal<Object>(
i::Execution::New(isolate, self, self, argc, args), &result);
if (should_set_has_no_side_effect) {
- i::Object* obj =
+ i::Object obj =
i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code();
if (obj->IsCallHandlerInfo()) {
- i::CallHandlerInfo* handler_info = i::CallHandlerInfo::cast(obj);
+ i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (has_pending_exception) {
// Restore the map if an exception prevented restoration.
handler_info->NextCallHasNoSideEffect();
@@ -5012,7 +4971,7 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
Utils::ApiCheck(!self.is_null(), "v8::Function::Call",
"Function to be called is a null pointer");
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Handle<i::Object>));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
@@ -5215,9 +5174,9 @@ static inline const uint16_t* Align(const uint16_t* chars) {
class ContainsOnlyOneByteHelper {
public:
ContainsOnlyOneByteHelper() : is_one_byte_(true) {}
- bool Check(i::String* string) {
- i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
- if (cons_string == nullptr) return is_one_byte_;
+ bool Check(i::String string) {
+ i::ConsString cons_string = i::String::VisitFlat(this, string, 0);
+ if (cons_string.is_null()) return is_one_byte_;
return CheckCons(cons_string);
}
void VisitOneByteString(const uint8_t* chars, int length) {
@@ -5256,20 +5215,18 @@ class ContainsOnlyOneByteHelper {
}
private:
- bool CheckCons(i::ConsString* cons_string) {
+ bool CheckCons(i::ConsString cons_string) {
while (true) {
// Check left side if flat.
- i::String* left = cons_string->first();
- i::ConsString* left_as_cons =
- i::String::VisitFlat(this, left, 0);
+ i::String left = cons_string->first();
+ i::ConsString left_as_cons = i::String::VisitFlat(this, left, 0);
if (!is_one_byte_) return false;
// Check right side if flat.
- i::String* right = cons_string->second();
- i::ConsString* right_as_cons =
- i::String::VisitFlat(this, right, 0);
+ i::String right = cons_string->second();
+ i::ConsString right_as_cons = i::String::VisitFlat(this, right, 0);
if (!is_one_byte_) return false;
// Standard recurse/iterate trick.
- if (left_as_cons != nullptr && right_as_cons != nullptr) {
+ if (!left_as_cons.is_null() && !right_as_cons.is_null()) {
if (left->length() < right->length()) {
CheckCons(left_as_cons);
cons_string = right_as_cons;
@@ -5282,12 +5239,12 @@ class ContainsOnlyOneByteHelper {
continue;
}
// Descend left in place.
- if (left_as_cons != nullptr) {
+ if (!left_as_cons.is_null()) {
cons_string = left_as_cons;
continue;
}
// Descend right in place.
- if (right_as_cons != nullptr) {
+ if (!right_as_cons.is_null()) {
cons_string = right_as_cons;
continue;
}
@@ -5314,7 +5271,7 @@ int String::Utf8Length(Isolate* isolate) const {
int length = str->length();
if (length == 0) return 0;
i::DisallowHeapAllocation no_gc;
- i::String::FlatContent flat = str->GetFlatContent();
+ i::String::FlatContent flat = str->GetFlatContent(no_gc);
DCHECK(flat.IsFlat());
int utf8_length = 0;
if (flat.IsOneByte()) {
@@ -5332,204 +5289,133 @@ int String::Utf8Length(Isolate* isolate) const {
return utf8_length;
}
-class Utf8WriterVisitor {
- public:
- Utf8WriterVisitor(
- char* buffer,
- int capacity,
- bool skip_capacity_check,
- bool replace_invalid_utf8)
- : early_termination_(false),
- last_character_(unibrow::Utf16::kNoPreviousCharacter),
- buffer_(buffer),
- start_(buffer),
- capacity_(capacity),
- skip_capacity_check_(capacity == -1 || skip_capacity_check),
- replace_invalid_utf8_(replace_invalid_utf8),
- utf16_chars_read_(0) {
- }
-
- static int WriteEndCharacter(uint16_t character,
- int last_character,
- int remaining,
- char* const buffer,
- bool replace_invalid_utf8) {
- DCHECK_GT(remaining, 0);
- // We can't use a local buffer here because Encode needs to modify
- // previous characters in the stream. We know, however, that
- // exactly one character will be advanced.
- if (unibrow::Utf16::IsSurrogatePair(last_character, character)) {
- int written = unibrow::Utf8::Encode(buffer, character, last_character,
- replace_invalid_utf8);
- DCHECK_EQ(written, 1);
- return written;
- }
- // Use a scratch buffer to check the required characters.
- char temp_buffer[unibrow::Utf8::kMaxEncodedSize];
- // Can't encode using last_character as gcc has array bounds issues.
- int written = unibrow::Utf8::Encode(temp_buffer, character,
- unibrow::Utf16::kNoPreviousCharacter,
- replace_invalid_utf8);
- // Won't fit.
- if (written > remaining) return 0;
- // Copy over the character from temp_buffer.
- for (int j = 0; j < written; j++) {
- buffer[j] = temp_buffer[j];
+namespace {
+// Writes the flat content of a string to a buffer. This is done in two phases.
+// The first phase calculates a pessimistic estimate (writable_length) on how
+// many code units can be safely written without exceeding the buffer capacity
+// and without leaving at a lone surrogate. The estimated number of code units
+// is then written out in one go, and the reported byte usage is used to
+// correct the estimate. This is repeated until the estimate becomes <= 0 or
+// all code units have been written out. The second phase writes out code
+// units until the buffer capacity is reached, would be exceeded by the next
+// unit, or all code units have been written out.
+template <typename Char>
+static int WriteUtf8Impl(i::Vector<const Char> string, char* write_start,
+ int write_capacity, int options,
+ int* utf16_chars_read_out) {
+ bool write_null = !(options & v8::String::NO_NULL_TERMINATION);
+ bool replace_invalid_utf8 = (options & v8::String::REPLACE_INVALID_UTF8);
+ char* current_write = write_start;
+ const Char* read_start = string.start();
+ int read_index = 0;
+ int read_length = string.length();
+ int prev_char = unibrow::Utf16::kNoPreviousCharacter;
+ // Do a fast loop where there is no exit capacity check.
+ // Need enough space to write everything but one character.
+ STATIC_ASSERT(unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+ static const int kMaxSizePerChar = sizeof(Char) == 1 ? 2 : 3;
+ while (read_index < read_length) {
+ int up_to = read_length;
+ if (write_capacity != -1) {
+ int remaining_capacity =
+ write_capacity - static_cast<int>(current_write - write_start);
+ int writable_length =
+ (remaining_capacity - kMaxSizePerChar) / kMaxSizePerChar;
+ // Need to drop into slow loop.
+ if (writable_length <= 0) break;
+ up_to = std::min(up_to, read_index + writable_length);
}
- return written;
- }
-
- // Visit writes out a group of code units (chars) of a v8::String to the
- // internal buffer_. This is done in two phases. The first phase calculates a
- // pesimistic estimate (writable_length) on how many code units can be safely
- // written without exceeding the buffer capacity and without writing the last
- // code unit (it could be a lead surrogate). The estimated number of code
- // units is then written out in one go, and the reported byte usage is used
- // to correct the estimate. This is repeated until the estimate becomes <= 0
- // or all code units have been written out. The second phase writes out code
- // units until the buffer capacity is reached, would be exceeded by the next
- // unit, or all units have been written out.
- template<typename Char>
- void Visit(const Char* chars, const int length) {
- DCHECK(!early_termination_);
- if (length == 0) return;
- // Copy state to stack.
- char* buffer = buffer_;
- int last_character = sizeof(Char) == 1
- ? unibrow::Utf16::kNoPreviousCharacter
- : last_character_;
- int i = 0;
- // Do a fast loop where there is no exit capacity check.
- while (true) {
- int fast_length;
- if (skip_capacity_check_) {
- fast_length = length;
+ // Write the characters to the stream.
+ if (sizeof(Char) == 1) {
+ // Simply memcpy if we only have ASCII characters.
+ uint8_t char_mask = 0;
+ for (int i = read_index; i < up_to; i++) char_mask |= read_start[i];
+ if ((char_mask & 0x80) == 0) {
+ int copy_length = up_to - read_index;
+ memcpy(current_write, read_start + read_index, copy_length);
+ current_write += copy_length;
+ read_index = up_to;
} else {
- int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
- // Need enough space to write everything but one character.
- STATIC_ASSERT(unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ==
- 3);
- int max_size_per_char = sizeof(Char) == 1 ? 2 : 3;
- int writable_length =
- (remaining_capacity - max_size_per_char)/max_size_per_char;
- // Need to drop into slow loop.
- if (writable_length <= 0) break;
- fast_length = i + writable_length;
- if (fast_length > length) fast_length = length;
- }
- // Write the characters to the stream.
- if (sizeof(Char) == 1) {
- for (; i < fast_length; i++) {
- buffer += unibrow::Utf8::EncodeOneByte(
- buffer, static_cast<uint8_t>(*chars++));
- DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
- }
- } else {
- for (; i < fast_length; i++) {
- uint16_t character = *chars++;
- buffer += unibrow::Utf8::Encode(buffer, character, last_character,
- replace_invalid_utf8_);
- last_character = character;
- DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
+ for (; read_index < up_to; read_index++) {
+ current_write += unibrow::Utf8::EncodeOneByte(
+ current_write, static_cast<uint8_t>(read_start[read_index]));
+ DCHECK(write_capacity == -1 ||
+ (current_write - write_start) <= write_capacity);
}
}
- // Array is fully written. Exit.
- if (fast_length == length) {
- // Write state back out to object.
- last_character_ = last_character;
- buffer_ = buffer;
- utf16_chars_read_ += length;
- return;
+ } else {
+ for (; read_index < up_to; read_index++) {
+ uint16_t character = read_start[read_index];
+ current_write += unibrow::Utf8::Encode(current_write, character,
+ prev_char, replace_invalid_utf8);
+ prev_char = character;
+ DCHECK(write_capacity == -1 ||
+ (current_write - write_start) <= write_capacity);
}
}
- DCHECK(!skip_capacity_check_);
- // Slow loop. Must check capacity on each iteration.
- int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+ }
+ if (read_index < read_length) {
+ DCHECK_NE(-1, write_capacity);
+ // Aborted due to limited capacity. Check capacity on each iteration.
+ int remaining_capacity =
+ write_capacity - static_cast<int>(current_write - write_start);
DCHECK_GE(remaining_capacity, 0);
- for (; i < length && remaining_capacity > 0; i++) {
- uint16_t character = *chars++;
- // remaining_capacity is <= 3 bytes at this point, so we do not write out
- // an umatched lead surrogate.
- if (replace_invalid_utf8_ && unibrow::Utf16::IsLeadSurrogate(character)) {
- early_termination_ = true;
- break;
- }
- int written = WriteEndCharacter(character,
- last_character,
- remaining_capacity,
- buffer,
- replace_invalid_utf8_);
- if (written == 0) {
- early_termination_ = true;
- break;
+ for (; read_index < read_length && remaining_capacity > 0; read_index++) {
+ uint32_t character = read_start[read_index];
+ int written = 0;
+ // We can't use a local buffer here because Encode needs to modify
+ // previous characters in the stream. We know, however, that
+ // exactly one character will be advanced.
+ if (unibrow::Utf16::IsSurrogatePair(prev_char, character)) {
+ written = unibrow::Utf8::Encode(current_write, character, prev_char,
+ replace_invalid_utf8);
+ DCHECK_EQ(written, 1);
+ } else {
+ // Use a scratch buffer to check the required characters.
+ char temp_buffer[unibrow::Utf8::kMaxEncodedSize];
+ // Encoding a surrogate pair to Utf8 always takes 4 bytes.
+ static const int kSurrogatePairEncodedSize =
+ static_cast<int>(unibrow::Utf8::kMaxEncodedSize);
+ // For REPLACE_INVALID_UTF8, catch the case where we cut off in the
+ // middle of a surrogate pair. Abort before encoding the pair instead.
+ if (replace_invalid_utf8 &&
+ remaining_capacity < kSurrogatePairEncodedSize &&
+ unibrow::Utf16::IsLeadSurrogate(character) &&
+ read_index + 1 < read_length &&
+ unibrow::Utf16::IsTrailSurrogate(read_start[read_index + 1])) {
+ write_null = false;
+ break;
+ }
+ // Can't encode using prev_char as gcc has array bounds issues.
+ written = unibrow::Utf8::Encode(temp_buffer, character,
+ unibrow::Utf16::kNoPreviousCharacter,
+ replace_invalid_utf8);
+ if (written > remaining_capacity) {
+ // Won't fit. Abort and do not null-terminate the result.
+ write_null = false;
+ break;
+ }
+ // Copy over the character from temp_buffer.
+ for (int i = 0; i < written; i++) current_write[i] = temp_buffer[i];
}
- buffer += written;
- remaining_capacity -= written;
- last_character = character;
- }
- // Write state back out to object.
- last_character_ = last_character;
- buffer_ = buffer;
- utf16_chars_read_ += i;
- }
-
- inline bool IsDone() {
- return early_termination_;
- }
- inline void VisitOneByteString(const uint8_t* chars, int length) {
- Visit(chars, length);
- }
-
- inline void VisitTwoByteString(const uint16_t* chars, int length) {
- Visit(chars, length);
- }
-
- int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
- // Write out number of utf16 characters written to the stream.
- if (utf16_chars_read_out != nullptr) {
- *utf16_chars_read_out = utf16_chars_read_;
- }
- // Only null terminate if all of the string was written and there's space.
- if (write_null &&
- !early_termination_ &&
- (capacity_ == -1 || (buffer_ - start_) < capacity_)) {
- *buffer_++ = '\0';
+ current_write += written;
+ remaining_capacity -= written;
+ prev_char = character;
}
- return static_cast<int>(buffer_ - start_);
}
- private:
- bool early_termination_;
- int last_character_;
- char* buffer_;
- char* const start_;
- int capacity_;
- bool const skip_capacity_check_;
- bool const replace_invalid_utf8_;
- int utf16_chars_read_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
-};
-
+ // Write out number of utf16 characters written to the stream.
+ if (utf16_chars_read_out != nullptr) *utf16_chars_read_out = read_index;
-static bool RecursivelySerializeToUtf8(i::String* current,
- Utf8WriterVisitor* writer,
- int recursion_budget) {
- while (!writer->IsDone()) {
- i::ConsString* cons_string = i::String::VisitFlat(writer, current);
- if (cons_string == nullptr) return true; // Leaf node.
- if (recursion_budget <= 0) return false;
- // Must write the left branch first.
- i::String* first = cons_string->first();
- bool success = RecursivelySerializeToUtf8(first,
- writer,
- recursion_budget - 1);
- if (!success) return false;
- // Inline tail recurse for right branch.
- current = cons_string->second();
+ // Only null-terminate if there's space.
+ if (write_null && (write_capacity == -1 ||
+ (current_write - write_start) < write_capacity)) {
+ *current_write++ = '\0';
}
- return true;
+ return static_cast<int>(current_write - write_start);
}
+} // anonymous namespace
int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int* nchars_ref, int options) const {
@@ -5537,43 +5423,16 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, String, WriteUtf8);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- str = i::String::Flatten(isolate, str); // Flatten the string for efficiency.
- const int string_length = str->length();
- bool write_null = !(options & NO_NULL_TERMINATION);
- bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8);
- int max16BitCodeUnitSize = unibrow::Utf8::kMax16BitCodeUnitSize;
- // First check if we can just write the string without checking capacity.
- if (capacity == -1 || capacity / max16BitCodeUnitSize >= string_length) {
- Utf8WriterVisitor writer(buffer, capacity, true, replace_invalid_utf8);
- const int kMaxRecursion = 100;
- bool success = RecursivelySerializeToUtf8(*str, &writer, kMaxRecursion);
- if (success) return writer.CompleteWrite(write_null, nchars_ref);
- } else if (capacity >= string_length) {
- // First check that the buffer is large enough.
- int utf8_bytes = Utf8Length(v8_isolate);
- if (utf8_bytes <= capacity) {
- // one-byte fast path.
- if (utf8_bytes == string_length) {
- WriteOneByte(v8_isolate, reinterpret_cast<uint8_t*>(buffer), 0,
- capacity, options);
- if (nchars_ref != nullptr) *nchars_ref = string_length;
- if (write_null && (utf8_bytes+1 <= capacity)) {
- return string_length + 1;
- }
- return string_length;
- }
- if (write_null && (utf8_bytes+1 > capacity)) {
- options |= NO_NULL_TERMINATION;
- }
- // Recurse once without a capacity limit.
- // This will get into the first branch above.
- // TODO(dcarney) Check max left rec. in Utf8Length and fall through.
- return WriteUtf8(v8_isolate, buffer, -1, nchars_ref, options);
- }
+ str = i::String::Flatten(isolate, str);
+ i::DisallowHeapAllocation no_gc;
+ i::String::FlatContent content = str->GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return WriteUtf8Impl<uint8_t>(content.ToOneByteVector(), buffer, capacity,
+ options, nchars_ref);
+ } else {
+ return WriteUtf8Impl<uint16_t>(content.ToUC16Vector(), buffer, capacity,
+ options, nchars_ref);
}
- Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8);
- i::String::VisitFlat(&writer, *str);
- return writer.CompleteWrite(write_null, nchars_ref);
}
template <typename CharType>
@@ -5627,7 +5486,7 @@ bool v8::String::IsExternalOneByte() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::DisallowHeapAllocation no_allocation;
- i::String* str = *Utils::OpenHandle(this);
+ i::String str = *Utils::OpenHandle(this);
const v8::String::ExternalStringResource* expected;
if (str->IsThinString()) {
@@ -5646,7 +5505,7 @@ void v8::String::VerifyExternalStringResource(
void v8::String::VerifyExternalStringResourceBase(
v8::String::ExternalStringResourceBase* value, Encoding encoding) const {
i::DisallowHeapAllocation no_allocation;
- i::String* str = *Utils::OpenHandle(this);
+ i::String str = *Utils::OpenHandle(this);
const v8::String::ExternalStringResourceBase* expected;
Encoding expectedEncoding;
@@ -5674,18 +5533,17 @@ void v8::String::VerifyExternalStringResourceBase(
String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
i::DisallowHeapAllocation no_allocation;
typedef internal::Internals I;
- ExternalStringResource* result = nullptr;
- i::String* str = *Utils::OpenHandle(this);
+ i::String str = *Utils::OpenHandle(this);
if (str->IsThinString()) {
str = i::ThinString::cast(str)->actual();
}
if (i::StringShape(str).IsExternalTwoByte()) {
- void* value = I::ReadField<void*>(str, I::kStringResourceOffset);
- result = reinterpret_cast<String::ExternalStringResource*>(value);
+ void* value = I::ReadRawField<void*>(str.ptr(), I::kStringResourceOffset);
+ return reinterpret_cast<String::ExternalStringResource*>(value);
}
- return result;
+ return nullptr;
}
String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
@@ -5693,48 +5551,36 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
i::DisallowHeapAllocation no_allocation;
typedef internal::Internals I;
ExternalStringResourceBase* resource = nullptr;
- i::String* str = *Utils::OpenHandle(this);
+ i::String str = *Utils::OpenHandle(this);
if (str->IsThinString()) {
str = i::ThinString::cast(str)->actual();
}
- int type = I::GetInstanceType(str) & I::kFullStringRepresentationMask;
+ internal::Address string = str.ptr();
+ int type = I::GetInstanceType(string) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
if (i::StringShape(str).IsExternalOneByte() ||
i::StringShape(str).IsExternalTwoByte()) {
- void* value = I::ReadField<void*>(str, I::kStringResourceOffset);
+ void* value = I::ReadRawField<void*>(string, I::kStringResourceOffset);
resource = static_cast<ExternalStringResourceBase*>(value);
}
return resource;
}
-const String::ExternalOneByteStringResource*
-String::GetExternalOneByteStringResourceSlow() const {
- i::DisallowHeapAllocation no_allocation;
- i::String* str = *Utils::OpenHandle(this);
-
- if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
- }
-
- if (i::StringShape(str).IsExternalOneByte()) {
- const void* resource = i::ExternalOneByteString::cast(str)->resource();
- return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
- }
- return nullptr;
-}
-
const v8::String::ExternalOneByteStringResource*
v8::String::GetExternalOneByteStringResource() const {
i::DisallowHeapAllocation no_allocation;
- i::String* str = *Utils::OpenHandle(this);
+ i::String str = *Utils::OpenHandle(this);
if (i::StringShape(str).IsExternalOneByte()) {
- const void* resource = i::ExternalOneByteString::cast(str)->resource();
- return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
- } else {
- return GetExternalOneByteStringResourceSlow();
+ return i::ExternalOneByteString::cast(str)->resource();
+ } else if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ if (i::StringShape(str).IsExternalOneByte()) {
+ return i::ExternalOneByteString::cast(str)->resource();
+ }
}
+ return nullptr;
}
@@ -5744,12 +5590,18 @@ Local<Value> Symbol::Name() const {
i::Isolate* isolate;
if (!i::Isolate::FromWritableHeapObject(*sym, &isolate)) {
// If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
- // objects are immovable we can use the Handle(T**) constructor with the
- // address of the name field in the Symbol object without needing an
+ // objects are immovable we can use the Handle(Address*) constructor with
+ // the address of the name field in the Symbol object without needing an
// isolate.
- i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::HeapObject**>(
+#ifdef V8_COMPRESS_POINTERS
+ // Compressed fields can't serve as handle locations.
+ // TODO(ishell): get Isolate as a parameter.
+ isolate = i::Isolate::Current();
+#else
+ i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::Address*>(
sym->GetFieldAddress(i::Symbol::kNameOffset)));
return Utils::ToLocal(ro_name);
+#endif
}
i::Handle<i::Object> name(sym->name(), isolate);
@@ -5822,9 +5674,8 @@ Local<Value> v8::Object::SlowGetInternalField(int index) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetInternalField()";
if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(
- i::Handle<i::JSObject>::cast(obj)->GetEmbedderField(index),
- obj->GetIsolate());
+ i::Handle<i::Object> value(i::JSObject::cast(*obj)->GetEmbedderField(index),
+ obj->GetIsolate());
return Utils::ToLocal(value);
}
@@ -5840,16 +5691,20 @@ void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return nullptr;
- return DecodeSmiToAligned(
- i::Handle<i::JSObject>::cast(obj)->GetEmbedderField(index), location);
+ void* result;
+ Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index)
+ .ToAlignedPointer(&result),
+ location, "Unaligned pointer");
+ return result;
}
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
- i::Handle<i::JSObject>::cast(obj)->SetEmbedderField(
- index, EncodeAlignedAsSmi(value, location));
+ Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index)
+ .store_aligned_pointer(value),
+ location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
@@ -5858,8 +5713,8 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalFields()";
i::DisallowHeapAllocation no_gc;
- i::JSObject* object = i::JSObject::cast(*obj);
- int nof_embedder_fields = object->GetEmbedderFieldCount();
+ i::JSObject js_obj = i::JSObject::cast(*obj);
+ int nof_embedder_fields = js_obj->GetEmbedderFieldCount();
for (int i = 0; i < argc; i++) {
int index = indices[i];
if (!Utils::ApiCheck(index < nof_embedder_fields, location,
@@ -5867,21 +5722,22 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
return;
}
void* value = values[i];
- object->SetEmbedderField(index, EncodeAlignedAsSmi(value, location));
+ Utils::ApiCheck(
+ i::EmbedderDataSlot(js_obj, index).store_aligned_pointer(value),
+ location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
}
-static void* ExternalValue(i::Object* obj) {
+static void* ExternalValue(i::Object obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
if (obj->IsUndefined()) {
return nullptr;
}
- i::Object* foreign = i::JSObject::cast(obj)->GetEmbedderField(0);
+ i::Object foreign = i::JSObject::cast(obj)->GetEmbedderField(0);
return reinterpret_cast<void*>(i::Foreign::cast(foreign)->foreign_address());
}
-
// --- E n v i r o n m e n t ---
@@ -5903,15 +5759,29 @@ bool v8::V8::Initialize() {
return true;
}
-#if V8_OS_POSIX
-bool V8::TryHandleSignal(int signum, void* info, void* context) {
-#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
- return v8::internal::trap_handler::TryHandleSignal(
- signum, static_cast<siginfo_t*>(info), static_cast<ucontext_t*>(context));
-#else // V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
+#if V8_OS_LINUX || V8_OS_MACOSX
+bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info,
+ void* context) {
+#if V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
+ return i::trap_handler::TryHandleSignal(sig_code, info, context);
+#else
return false;
#endif
}
+
+bool V8::TryHandleSignal(int signum, void* info, void* context) {
+ return TryHandleWebAssemblyTrapPosix(
+ signum, reinterpret_cast<siginfo_t*>(info), context);
+}
+#endif
+
+#if V8_OS_WIN
+bool TryHandleWebAssemblyTrapWindows(EXCEPTION_POINTERS* exception) {
+#if V8_TARGET_ARCH_X64
+ return i::trap_handler::TryHandleWasmTrap(exception);
+#endif
+ return false;
+}
#endif
bool V8::RegisterDefaultSignalHandler() {
@@ -5932,7 +5802,6 @@ void v8::V8::SetReturnAddressLocationResolver(
i::StackFrame::SetReturnAddressLocationResolver(return_address_resolver);
}
-
bool v8::V8::Dispose() {
i::V8::TearDown();
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
@@ -6058,8 +5927,8 @@ static i::Handle<ObjectType> CreateEnvironment(
// Set the global template to be the prototype template of
// global proxy template.
- proxy_constructor->set_prototype_template(
- *Utils::OpenHandle(*global_template));
+ i::FunctionTemplateInfo::SetPrototypeTemplate(
+ isolate, proxy_constructor, Utils::OpenHandle(*global_template));
proxy_template->SetInternalFieldCount(
global_template->InternalFieldCount());
@@ -6067,32 +5936,37 @@ static i::Handle<ObjectType> CreateEnvironment(
// Migrate security handlers from global_template to
// proxy_template. Temporarily removing access check
// information from the global template.
- if (!global_constructor->access_check_info()->IsUndefined(isolate)) {
- proxy_constructor->set_access_check_info(
- global_constructor->access_check_info());
+ if (!global_constructor->GetAccessCheckInfo()->IsUndefined(isolate)) {
+ i::FunctionTemplateInfo::SetAccessCheckInfo(
+ isolate, proxy_constructor,
+ i::handle(global_constructor->GetAccessCheckInfo(), isolate));
proxy_constructor->set_needs_access_check(
global_constructor->needs_access_check());
global_constructor->set_needs_access_check(false);
- global_constructor->set_access_check_info(
- i::ReadOnlyRoots(isolate).undefined_value());
+ i::FunctionTemplateInfo::SetAccessCheckInfo(
+ isolate, global_constructor,
+ i::ReadOnlyRoots(isolate).undefined_value_handle());
}
// Same for other interceptors. If the global constructor has
// interceptors, we need to replace them temporarily with noop
// interceptors, so the map is correctly marked as having interceptors,
// but we don't invoke any.
- if (!global_constructor->named_property_handler()->IsUndefined(isolate)) {
+ if (!global_constructor->GetNamedPropertyHandler()->IsUndefined(
+ isolate)) {
named_interceptor =
- handle(global_constructor->named_property_handler(), isolate);
- global_constructor->set_named_property_handler(
- i::ReadOnlyRoots(isolate).noop_interceptor_info());
+ handle(global_constructor->GetNamedPropertyHandler(), isolate);
+ i::FunctionTemplateInfo::SetNamedPropertyHandler(
+ isolate, global_constructor,
+ i::ReadOnlyRoots(isolate).noop_interceptor_info_handle());
}
- if (!global_constructor->indexed_property_handler()->IsUndefined(
+ if (!global_constructor->GetIndexedPropertyHandler()->IsUndefined(
isolate)) {
indexed_interceptor =
- handle(global_constructor->indexed_property_handler(), isolate);
- global_constructor->set_indexed_property_handler(
- i::ReadOnlyRoots(isolate).noop_interceptor_info());
+ handle(global_constructor->GetIndexedPropertyHandler(), isolate);
+ i::FunctionTemplateInfo::SetIndexedPropertyHandler(
+ isolate, global_constructor,
+ i::ReadOnlyRoots(isolate).noop_interceptor_info_handle());
}
}
@@ -6111,12 +5985,15 @@ static i::Handle<ObjectType> CreateEnvironment(
if (!maybe_global_template.IsEmpty()) {
DCHECK(!global_constructor.is_null());
DCHECK(!proxy_constructor.is_null());
- global_constructor->set_access_check_info(
- proxy_constructor->access_check_info());
+ i::FunctionTemplateInfo::SetAccessCheckInfo(
+ isolate, global_constructor,
+ i::handle(proxy_constructor->GetAccessCheckInfo(), isolate));
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
- global_constructor->set_named_property_handler(*named_interceptor);
- global_constructor->set_indexed_property_handler(*indexed_interceptor);
+ i::FunctionTemplateInfo::SetNamedPropertyHandler(
+ isolate, global_constructor, named_interceptor);
+ i::FunctionTemplateInfo::SetIndexedPropertyHandler(
+ isolate, global_constructor, indexed_interceptor);
}
}
// Leave V8.
@@ -6186,9 +6063,9 @@ MaybeLocal<Object> v8::Context::NewRemoteContext(
"v8::Context::NewRemoteContext",
"Global template needs to have access checks enabled.");
i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
- i::AccessCheckInfo::cast(global_constructor->access_check_info()),
+ i::AccessCheckInfo::cast(global_constructor->GetAccessCheckInfo()),
isolate);
- Utils::ApiCheck(access_check_info->named_interceptor() != nullptr,
+ Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(),
"v8::Context::NewRemoteContext",
"Global template needs to have access check handlers.");
i::Handle<i::JSGlobalProxy> global_proxy =
@@ -6219,7 +6096,7 @@ void v8::Context::UseDefaultSecurityToken() {
Local<Value> v8::Context::GetSecurityToken() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- i::Object* security_token = env->security_token();
+ i::Object security_token = env->security_token();
i::Handle<i::Object> token_handle(security_token, isolate);
return Utils::ToLocal(token_handle);
}
@@ -6284,11 +6161,11 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
}
namespace {
-i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
- i::FixedArray* list, size_t index) {
+i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
+ i::FixedArray list, size_t index) {
if (index < static_cast<size_t>(list->length())) {
int int_index = static_cast<int>(index);
- i::Object* object = list->get(int_index);
+ i::Object object = list->get(int_index);
if (!object->IsTheHole(isolate)) {
list->set_the_hole(isolate, int_index);
// Shrink the list so that the last element is not the hole (unless it's
@@ -6304,10 +6181,10 @@ i::Object** GetSerializedDataFromFixedArray(i::Isolate* isolate,
}
} // anonymous namespace
-i::Object** Context::GetDataFromSnapshotOnce(size_t index) {
+i::Address* Context::GetDataFromSnapshotOnce(size_t index) {
auto context = Utils::OpenHandle(this);
i::Isolate* i_isolate = context->GetIsolate();
- i::FixedArray* list = context->serialized_objects();
+ i::FixedArray list = context->serialized_objects();
return GetSerializedDataFromFixedArray(i_isolate, list, index);
}
@@ -6323,7 +6200,9 @@ MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
Local<v8::Object> ObjectTemplate::NewInstance() {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
+ Local<Context> context =
+ reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate())
+ ->GetCurrentContext();
RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
}
@@ -6363,7 +6242,9 @@ MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
Local<v8::Function> FunctionTemplate::GetFunction() {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
+ Local<Context> context =
+ reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate())
+ ->GetCurrentContext();
RETURN_TO_LOCAL_UNCHECKED(GetFunction(context), Function);
}
@@ -6378,8 +6259,8 @@ MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
"v8::FunctionTemplate::NewRemoteInstance",
"InstanceTemplate needs to have access checks enabled.");
i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
- i::AccessCheckInfo::cast(constructor->access_check_info()), isolate);
- Utils::ApiCheck(access_check_info->named_interceptor() != nullptr,
+ i::AccessCheckInfo::cast(constructor->GetAccessCheckInfo()), isolate);
+ Utils::ApiCheck(access_check_info->named_interceptor() != i::Object(),
"v8::FunctionTemplate::NewRemoteInstance",
"InstanceTemplate needs to have access check handlers.");
i::Handle<i::JSObject> object;
@@ -6617,7 +6498,7 @@ Local<String> v8::String::NewExternal(
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::DisallowHeapAllocation no_allocation;
- i::String* obj = *Utils::OpenHandle(this);
+ i::String obj = *Utils::OpenHandle(this);
if (obj->IsThinString()) {
obj = i::ThinString::cast(obj)->actual();
@@ -6646,7 +6527,7 @@ bool v8::String::MakeExternal(
v8::String::ExternalOneByteStringResource* resource) {
i::DisallowHeapAllocation no_allocation;
- i::String* obj = *Utils::OpenHandle(this);
+ i::String obj = *Utils::OpenHandle(this);
if (obj->IsThinString()) {
obj = i::ThinString::cast(obj)->actual();
@@ -6673,7 +6554,7 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
i::DisallowHeapAllocation no_allocation;
- i::String* obj = *Utils::OpenHandle(this);
+ i::String obj = *Utils::OpenHandle(this);
if (obj->IsThinString()) {
obj = i::ThinString::cast(obj)->actual();
@@ -6708,6 +6589,64 @@ Local<v8::Object> v8::Object::New(Isolate* isolate) {
return Utils::ToLocal(obj);
}
+Local<v8::Object> v8::Object::New(Isolate* isolate,
+ Local<Value> prototype_or_null,
+ Local<Name>* names, Local<Value>* values,
+ size_t length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::Object> proto = Utils::OpenHandle(*prototype_or_null);
+ if (!Utils::ApiCheck(proto->IsNull() || proto->IsJSReceiver(),
+ "v8::Object::New", "prototype must be null or object")) {
+ return Local<v8::Object>();
+ }
+ LOG_API(i_isolate, Object, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+
+ // We assume that this API is mostly used to create objects with named
+ // properties, and so we default to creating a properties backing store
+ // large enough to hold all of them, while we start with no elements
+ // (see http://bit.ly/v8-fast-object-create-cpp for the motivation).
+ i::Handle<i::NameDictionary> properties =
+ i::NameDictionary::New(i_isolate, static_cast<int>(length));
+ i::Handle<i::FixedArrayBase> elements =
+ i_isolate->factory()->empty_fixed_array();
+ for (size_t i = 0; i < length; ++i) {
+ i::Handle<i::Name> name = Utils::OpenHandle(*names[i]);
+ i::Handle<i::Object> value = Utils::OpenHandle(*values[i]);
+
+ // See if the {name} is a valid array index, in which case we need to
+ // add the {name}/{value} pair to the {elements}, otherwise they end
+ // up in the {properties} backing store.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ // If this is the first element, allocate a proper
+ // dictionary elements backing store for {elements}.
+ if (!elements->IsNumberDictionary()) {
+ elements =
+ i::NumberDictionary::New(i_isolate, static_cast<int>(length));
+ }
+ elements = i::NumberDictionary::Set(
+ i_isolate, i::Handle<i::NumberDictionary>::cast(elements), index,
+ value);
+ } else {
+ // Internalize the {name} first.
+ name = i_isolate->factory()->InternalizeName(name);
+ int const entry = properties->FindEntry(i_isolate, name);
+ if (entry == i::NameDictionary::kNotFound) {
+ // Add the {name}/{value} pair as a new entry.
+ properties = i::NameDictionary::Add(i_isolate, properties, name, value,
+ i::PropertyDetails::Empty());
+ } else {
+ // Overwrite the {entry} with the {value}.
+ properties->ValueAtPut(entry, *value);
+ }
+ }
+ }
+ i::Handle<i::JSObject> obj =
+ i_isolate->factory()->NewSlowJSObjectWithPropertiesAndElements(
+ proto, properties, elements);
+ return Utils::ToLocal(obj);
+}
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6827,12 +6766,6 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
}
-Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
- auto context = isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(New(context, time), Value);
-}
-
-
double v8::Date::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
@@ -6847,17 +6780,14 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->date_cache()->ResetDateCache();
- if (!i_isolate->eternal_handles()->Exists(
- i::EternalHandles::DATE_CACHE_VERSION)) {
- return;
- }
- i::Handle<i::FixedArray> date_cache_version =
- i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
- i::EternalHandles::DATE_CACHE_VERSION));
- DCHECK_EQ(1, date_cache_version->length());
- CHECK(date_cache_version->get(0)->IsSmi());
- date_cache_version->set(
- 0, i::Smi::FromInt(i::Smi::ToInt(date_cache_version->get(0)) + 1));
+#ifdef V8_INTL_SUPPORT
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormat);
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForTime);
+ i_isolate->clear_cached_icu_object(
+ i::Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForDate);
+#endif // V8_INTL_SUPPORT
}
@@ -6931,7 +6861,7 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, Local<Value>* elements,
uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
- i::Object* length = obj->length();
+ i::Object length = obj->length();
if (length->IsSmi()) {
return i::Smi::ToInt(length);
} else {
@@ -7028,12 +6958,7 @@ enum class MapAsArrayKind {
kValues = i::JS_MAP_VALUE_ITERATOR_TYPE
};
-enum class SetAsArrayKind {
- kEntries = i::JS_SET_KEY_VALUE_ITERATOR_TYPE,
- kValues = i::JS_SET_VALUE_ITERATOR_TYPE
-};
-
-i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
+i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object table_obj,
int offset, MapAsArrayKind kind) {
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj),
@@ -7049,9 +6974,9 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
- i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
+ i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object* key = table->KeyAt(i);
+ i::Object key = table->KeyAt(i);
if (key == the_hole) continue;
if (collect_keys) result->set(result_index++, key);
if (collect_values) result->set(result_index++, table->ValueAt(i));
@@ -7141,26 +7066,24 @@ Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
}
namespace {
-i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
- int offset, SetAsArrayKind kind) {
+i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object table_obj,
+ int offset) {
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj),
isolate);
// Elements skipped by |offset| may already be deleted.
int capacity = table->UsedCapacity();
- const bool collect_key_values = kind == SetAsArrayKind::kEntries;
- int max_length = (capacity - offset) * (collect_key_values ? 2 : 1);
+ int max_length = capacity - offset;
if (max_length == 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(max_length);
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
- i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
+ i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object* key = table->KeyAt(i);
+ i::Object key = table->KeyAt(i);
if (key == the_hole) continue;
result->set(result_index++, key);
- if (collect_key_values) result->set(result_index++, key);
}
}
DCHECK_GE(max_length, result_index);
@@ -7176,8 +7099,7 @@ Local<Array> Set::AsArray() const {
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, Set, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::ToLocal(
- SetAsArray(isolate, obj->table(), 0, SetAsArrayKind::kValues));
+ return Utils::ToLocal(SetAsArray(isolate, obj->table(), 0));
}
@@ -7262,6 +7184,20 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
+MaybeLocal<Promise> Promise::Then(Local<Context> context,
+ Local<Function> on_fulfilled,
+ Local<Function> on_rejected) {
+ PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*on_fulfilled),
+ Utils::OpenHandle(*on_rejected)};
+ i::Handle<i::Object> result;
+ has_pending_exception = !i::Execution::Call(isolate, isolate->promise_then(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Promise);
+ RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
+}
bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
@@ -7294,6 +7230,11 @@ Promise::PromiseState Promise::State() {
return static_cast<PromiseState>(js_promise->status());
}
+void Promise::MarkAsHandled() {
+ i::Handle<i::JSPromise> js_promise = Utils::OpenHandle(this);
+ js_promise->set_has_handler(true);
+}
+
Local<Value> Proxy::GetTarget() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
i::Handle<i::Object> target(self->target(), self->GetIsolate());
@@ -7332,39 +7273,64 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
RETURN_ESCAPED(result);
}
-WasmCompiledModule::BufferReference WasmCompiledModule::GetWasmWireBytesRef() {
- i::Handle<i::WasmModuleObject> obj =
- i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- i::Vector<const uint8_t> bytes_vec = obj->native_module()->wire_bytes();
+CompiledWasmModule::CompiledWasmModule(
+ std::shared_ptr<internal::wasm::NativeModule> native_module)
+ : native_module_(std::move(native_module)) {
+ CHECK_NOT_NULL(native_module_);
+}
+
+OwnedBuffer CompiledWasmModule::Serialize() {
+ i::wasm::WasmSerializer wasm_serializer(native_module_.get());
+ size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ if (!wasm_serializer.SerializeNativeModule({buffer.get(), buffer_size}))
+ return {};
+ return {std::move(buffer), buffer_size};
+}
+
+MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
+ i::Vector<const uint8_t> bytes_vec = native_module_->wire_bytes();
return {bytes_vec.start(), bytes_vec.size()};
}
-WasmCompiledModule::TransferrableModule
-WasmCompiledModule::GetTransferrableModule() {
+WasmModuleObject::BufferReference WasmModuleObject::GetWasmWireBytesRef() {
+ return GetCompiledModule().GetWireBytesRef();
+}
+
+WasmModuleObject::TransferrableModule
+WasmModuleObject::GetTransferrableModule() {
if (i::FLAG_wasm_shared_code) {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- return TransferrableModule(obj->managed_native_module()->get());
+ return TransferrableModule(obj->shared_native_module());
} else {
- WasmCompiledModule::SerializedModule serialized_module = Serialize();
- BufferReference wire_bytes_ref = GetWasmWireBytesRef();
- size_t wire_size = wire_bytes_ref.size;
+ CompiledWasmModule compiled_module = GetCompiledModule();
+ OwnedBuffer serialized_module = compiled_module.Serialize();
+ MemorySpan<const uint8_t> wire_bytes_ref =
+ compiled_module.GetWireBytesRef();
+ size_t wire_size = wire_bytes_ref.size();
std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]);
- memcpy(wire_bytes_copy.get(), wire_bytes_ref.start, wire_size);
+ memcpy(wire_bytes_copy.get(), wire_bytes_ref.data(), wire_size);
return TransferrableModule(std::move(serialized_module),
{std::move(wire_bytes_copy), wire_size});
}
}
-MaybeLocal<WasmCompiledModule> WasmCompiledModule::FromTransferrableModule(
+CompiledWasmModule WasmModuleObject::GetCompiledModule() {
+ i::Handle<i::WasmModuleObject> obj =
+ i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
+ return Utils::Convert(obj->shared_native_module());
+}
+
+MaybeLocal<WasmModuleObject> WasmModuleObject::FromTransferrableModule(
Isolate* isolate,
- const WasmCompiledModule::TransferrableModule& transferrable_module) {
+ const WasmModuleObject::TransferrableModule& transferrable_module) {
if (i::FLAG_wasm_shared_code) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::WasmModuleObject> module_object =
i_isolate->wasm_engine()->ImportNativeModule(
i_isolate, transferrable_module.shared_module_);
- return Local<WasmCompiledModule>::Cast(
+ return Local<WasmModuleObject>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
} else {
return Deserialize(isolate, AsReference(transferrable_module.serialized_),
@@ -7372,60 +7338,54 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::FromTransferrableModule(
}
}
-WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
- i::Handle<i::WasmModuleObject> obj =
- i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- i::wasm::NativeModule* native_module = obj->native_module();
- i::wasm::WasmSerializer wasm_serializer(obj->GetIsolate(), native_module);
- size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- if (wasm_serializer.SerializeNativeModule({buffer.get(), buffer_size}))
- return {std::move(buffer), buffer_size};
- return {};
+WasmModuleObject::SerializedModule WasmModuleObject::Serialize() {
+ // TODO(clemensh): Deprecated; remove after M-73 branch.
+ OwnedBuffer serialized = GetCompiledModule().Serialize();
+ return {std::move(serialized.buffer), serialized.size};
}
-MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
- Isolate* isolate, WasmCompiledModule::BufferReference serialized_module,
- WasmCompiledModule::BufferReference wire_bytes) {
+MaybeLocal<WasmModuleObject> WasmModuleObject::Deserialize(
+ Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
+ MemorySpan<const uint8_t> wire_bytes) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::MaybeHandle<i::WasmModuleObject> maybe_module_object =
i::wasm::DeserializeNativeModule(
- i_isolate, {serialized_module.start, serialized_module.size},
- {wire_bytes.start, wire_bytes.size});
+ i_isolate, {serialized_module.data(), serialized_module.size()},
+ {wire_bytes.data(), wire_bytes.size()});
i::Handle<i::WasmModuleObject> module_object;
if (!maybe_module_object.ToHandle(&module_object)) {
- return MaybeLocal<WasmCompiledModule>();
+ return MaybeLocal<WasmModuleObject>();
}
- return Local<WasmCompiledModule>::Cast(
+ return Local<WasmModuleObject>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
}
-MaybeLocal<WasmCompiledModule> WasmCompiledModule::DeserializeOrCompile(
- Isolate* isolate, WasmCompiledModule::BufferReference serialized_module,
- WasmCompiledModule::BufferReference wire_bytes) {
- MaybeLocal<WasmCompiledModule> ret =
+MaybeLocal<WasmModuleObject> WasmModuleObject::DeserializeOrCompile(
+ Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
+ MemorySpan<const uint8_t> wire_bytes) {
+ MaybeLocal<WasmModuleObject> ret =
Deserialize(isolate, serialized_module, wire_bytes);
if (!ret.IsEmpty()) {
return ret;
}
- return Compile(isolate, wire_bytes.start, wire_bytes.size);
+ return Compile(isolate, wire_bytes.data(), wire_bytes.size());
}
-MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
- const uint8_t* start,
- size_t length) {
+MaybeLocal<WasmModuleObject> WasmModuleObject::Compile(Isolate* isolate,
+ const uint8_t* start,
+ size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Compile()");
+ i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()");
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
- return MaybeLocal<WasmCompiledModule>();
+ return MaybeLocal<WasmModuleObject>();
}
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i::MaybeHandle<i::JSObject> maybe_compiled =
i_isolate->wasm_engine()->SyncCompile(
i_isolate, enabled_features, &thrower,
i::wasm::ModuleWireBytes(start, start + length));
- if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
- return Local<WasmCompiledModule>::Cast(
+ if (maybe_compiled.is_null()) return MaybeLocal<WasmModuleObject>();
+ return Local<WasmModuleObject>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
}
@@ -7440,7 +7400,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
*Utils::OpenHandle(*promise))) {}
~AsyncCompilationResolver() override {
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ i::GlobalHandles::Destroy(promise_.location());
}
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
@@ -7487,12 +7447,10 @@ bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
-
-bool v8::ArrayBuffer::IsNeuterable() const {
- return Utils::OpenHandle(this)->is_neuterable();
+bool v8::ArrayBuffer::IsDetachable() const {
+ return Utils::OpenHandle(this)->is_detachable();
}
-
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -7549,21 +7507,18 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
return contents;
}
-
-void v8::ArrayBuffer::Neuter() {
+void v8::ArrayBuffer::Detach() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- Utils::ApiCheck(obj->is_external(),
- "v8::ArrayBuffer::Neuter",
- "Only externalized ArrayBuffers can be neutered");
- Utils::ApiCheck(obj->is_neuterable(), "v8::ArrayBuffer::Neuter",
- "Only neuterable ArrayBuffers can be neutered");
- LOG_API(isolate, ArrayBuffer, Neuter);
+ Utils::ApiCheck(obj->is_external(), "v8::ArrayBuffer::Detach",
+ "Only externalized ArrayBuffers can be detached");
+ Utils::ApiCheck(obj->is_detachable(), "v8::ArrayBuffer::Detach",
+ "Only detachable ArrayBuffers can be detached");
+ LOG_API(isolate, ArrayBuffer, Detach);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- obj->Neuter();
+ obj->Detach();
}
-
size_t v8::ArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return obj->byte_length();
@@ -7654,19 +7609,19 @@ bool v8::ArrayBufferView::HasBuffer() const {
size_t v8::ArrayBufferView::ByteOffset() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- return obj->WasNeutered() ? 0 : obj->byte_offset();
+ return obj->WasDetached() ? 0 : obj->byte_offset();
}
size_t v8::ArrayBufferView::ByteLength() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- return obj->WasNeutered() ? 0 : obj->byte_length();
+ return obj->WasDetached() ? 0 : obj->byte_length();
}
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return obj->WasNeutered() ? 0 : obj->length_value();
+ return obj->WasDetached() ? 0 : obj->length_value();
}
static_assert(v8::TypedArray::kMaxLength == i::Smi::kMaxValue,
@@ -8007,23 +7962,18 @@ void Isolate::SetIdle(bool is_idle) {
isolate->SetIdle(is_idle);
}
-ArrayBuffer::Allocator* Isolate::GetArrayBufferAllocator() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->array_buffer_allocator();
-}
-
bool Isolate::InContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->context() != nullptr;
+ return !isolate->context().is_null();
}
v8::Local<v8::Context> Isolate::GetCurrentContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::Context* context = isolate->context();
- if (context == nullptr) return Local<Context>();
- i::Context* native_context = context->native_context();
- if (native_context == nullptr) return Local<Context>();
+ i::Context context = isolate->context();
+ if (context.is_null()) return Local<Context>();
+ i::Context native_context = context->native_context();
+ if (native_context.is_null()) return Local<Context>();
return Utils::ToLocal(i::Handle<i::Context>(native_context, isolate));
}
@@ -8038,14 +7988,10 @@ v8::Local<v8::Context> Isolate::GetEnteredContext() {
v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::Handle<i::Object> last;
- if (isolate->handle_scope_implementer()
- ->MicrotaskContextIsLastEnteredContext()) {
- last = isolate->handle_scope_implementer()->MicrotaskContext();
- } else {
- last = isolate->handle_scope_implementer()->LastEnteredContext();
- }
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredOrMicrotaskContext();
if (last.is_null()) return Local<Context>();
+ DCHECK(last->IsNativeContext());
return Utils::ToLocal(i::Handle<i::Context>::cast(last));
}
@@ -8180,7 +8126,7 @@ Isolate* Isolate::GetCurrent() {
// static
Isolate* Isolate::Allocate() {
- return reinterpret_cast<Isolate*>(new i::Isolate());
+ return reinterpret_cast<Isolate*>(i::Isolate::New());
}
// static
@@ -8195,15 +8141,6 @@ void Isolate::Initialize(Isolate* isolate,
} else {
i_isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob());
}
- if (params.entry_hook) {
-#ifdef V8_USE_SNAPSHOT
- // Setting a FunctionEntryHook is only supported in no-snapshot builds.
- Utils::ApiCheck(
- false, "v8::Isolate::New",
- "Setting a FunctionEntryHook is only supported in no-snapshot builds.");
-#endif
- i_isolate->set_function_entry_hook(params.entry_hook);
- }
auto code_event_handler = params.code_event_handler;
#ifdef ENABLE_GDB_JIT_INTERFACE
if (code_event_handler == nullptr && i::FLAG_gdbjit) {
@@ -8234,7 +8171,7 @@ void Isolate::Initialize(Isolate* isolate,
SetResourceConstraints(i_isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(isolate);
- if (params.entry_hook || !i::Snapshot::Initialize(i_isolate)) {
+ if (!i::Snapshot::Initialize(i_isolate)) {
// If snapshot data was provided and we failed to deserialize it must
// have been corrupted.
if (i_isolate->snapshot_blob() != nullptr) {
@@ -8267,7 +8204,7 @@ void Isolate::Dispose() {
"Disposing the isolate that is entered by a thread.")) {
return;
}
- isolate->TearDown();
+ i::Isolate::Delete(isolate);
}
void Isolate::DumpAndResetStats() {
@@ -8321,22 +8258,41 @@ Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
: on_failure_(on_failure) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (on_failure_ == CRASH_ON_FAILURE) {
- internal_ = reinterpret_cast<void*>(
- new i::DisallowJavascriptExecution(i_isolate));
- } else {
- DCHECK_EQ(THROW_ON_FAILURE, on_failure);
- internal_ = reinterpret_cast<void*>(
- new i::ThrowOnJavascriptExecution(i_isolate));
+ switch (on_failure_) {
+ case CRASH_ON_FAILURE:
+ internal_ = reinterpret_cast<void*>(
+ new i::DisallowJavascriptExecution(i_isolate));
+ break;
+ case THROW_ON_FAILURE:
+ DCHECK_EQ(THROW_ON_FAILURE, on_failure);
+ internal_ =
+ reinterpret_cast<void*>(new i::ThrowOnJavascriptExecution(i_isolate));
+ break;
+ case DUMP_ON_FAILURE:
+ internal_ =
+ reinterpret_cast<void*>(new i::DumpOnJavascriptExecution(i_isolate));
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
- if (on_failure_ == CRASH_ON_FAILURE) {
- delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
- } else {
- delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ switch (on_failure_) {
+ case CRASH_ON_FAILURE:
+ delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
+ break;
+ case THROW_ON_FAILURE:
+ delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ break;
+ case DUMP_ON_FAILURE:
+ delete reinterpret_cast<i::DumpOnJavascriptExecution*>(internal_);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -8348,12 +8304,15 @@ Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
new i::AllowJavascriptExecution(i_isolate));
internal_throws_ = reinterpret_cast<void*>(
new i::NoThrowOnJavascriptExecution(i_isolate));
+ internal_dump_ =
+ reinterpret_cast<void*>(new i::NoDumpOnJavascriptExecution(i_isolate));
}
Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
+ delete reinterpret_cast<i::NoDumpOnJavascriptExecution*>(internal_dump_);
}
@@ -8361,12 +8320,12 @@ Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
isolate_->handle_scope_implementer()->IncrementCallDepth();
- isolate_->handle_scope_implementer()->IncrementMicrotasksSuppressions();
+ isolate_->default_microtask_queue()->IncrementMicrotasksSuppressions();
}
Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
- isolate_->handle_scope_implementer()->DecrementMicrotasksSuppressions();
+ isolate_->default_microtask_queue()->DecrementMicrotasksSuppressions();
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
@@ -8380,9 +8339,9 @@ Isolate::SafeForTerminationScope::~SafeForTerminationScope() {
isolate_->set_next_v8_call_is_safe_for_termination(prev_value_);
}
-i::Object** Isolate::GetDataFromSnapshotOnce(size_t index) {
+i::Address* Isolate::GetDataFromSnapshotOnce(size_t index) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- i::FixedArray* list = i_isolate->heap()->serialized_objects();
+ i::FixedArray list = i_isolate->heap()->serialized_objects();
return GetSerializedDataFromFixedArray(i_isolate, list, index);
}
@@ -8402,7 +8361,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->malloced_memory_ =
isolate->allocator()->GetCurrentMemoryUsage() +
isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
- heap_statistics->external_memory_ = isolate->heap()->external_memory();
+ heap_statistics->external_memory_ = isolate->heap()->backing_store_bytes();
heap_statistics->peak_malloced_memory_ =
isolate->allocator()->GetMaxMemoryUsage() +
isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
@@ -8500,9 +8459,7 @@ void Isolate::GetStackSample(const RegisterState& state, void** frames,
size_t Isolate::NumberOfPhantomHandleResetsSinceLastCall() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- size_t result = isolate->global_handles()->NumberOfPhantomHandleResets();
- isolate->global_handles()->ResetNumberOfPhantomHandleResets();
- return result;
+ return isolate->global_handles()->GetAndResetGlobalHandleResetCount();
}
void Isolate::SetEventLogger(LogEventCallback that) {
@@ -8562,14 +8519,15 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
void Isolate::RunMicrotasks() {
DCHECK_NE(MicrotasksPolicy::kScoped, GetMicrotasksPolicy());
- reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->default_microtask_queue()->RunMicrotasks(isolate);
}
void Isolate::EnqueueMicrotask(Local<Function> function) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Handle<i::CallableTask> microtask = isolate->factory()->NewCallableTask(
Utils::OpenHandle(*function), isolate->native_context());
- isolate->EnqueueMicrotask(microtask);
+ isolate->default_microtask_queue()->EnqueueMicrotask(*microtask);
}
void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
@@ -8578,7 +8536,7 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
i::Handle<i::CallbackTask> microtask = isolate->factory()->NewCallbackTask(
isolate->factory()->NewForeign(reinterpret_cast<i::Address>(callback)),
isolate->factory()->NewForeign(reinterpret_cast<i::Address>(data)));
- isolate->EnqueueMicrotask(microtask);
+ isolate->default_microtask_queue()->EnqueueMicrotask(*microtask);
}
@@ -8599,14 +8557,15 @@ void Isolate::AddMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
DCHECK(callback);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->AddMicrotasksCompletedCallback(callback);
+ isolate->default_microtask_queue()->AddMicrotasksCompletedCallback(callback);
}
void Isolate::RemoveMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->RemoveMicrotasksCompletedCallback(callback);
+ isolate->default_microtask_queue()->RemoveMicrotasksCompletedCallback(
+ callback);
}
@@ -8654,8 +8613,8 @@ void Isolate::LowMemoryNotification() {
}
{
i::HeapIterator iterator(isolate->heap());
- i::HeapObject* obj;
- while ((obj = iterator.next()) != nullptr) {
+ for (i::HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (obj->IsAbstractCode()) {
i::AbstractCode::cast(obj)->DropStackFrameCache();
}
@@ -8695,8 +8654,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
- isolate->compiler_dispatcher()->MemoryPressureNotification(level,
- on_isolate_thread);
}
void Isolate::EnableMemorySavingsMode() {
@@ -8747,10 +8704,24 @@ void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
*length_in_bytes = code_range.size();
}
-MemoryRange Isolate::GetEmbeddedCodeRange() {
+UnwindState Isolate::GetUnwindState() {
+ UnwindState unwind_state;
+ void* code_range_start;
+ GetCodeRange(&code_range_start, &unwind_state.code_range.length_in_bytes);
+ unwind_state.code_range.start = code_range_start;
+
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return {reinterpret_cast<const void*>(isolate->embedded_blob()),
- isolate->embedded_blob_size()};
+ unwind_state.embedded_code_range.start =
+ reinterpret_cast<const void*>(isolate->embedded_blob());
+ unwind_state.embedded_code_range.length_in_bytes =
+ isolate->embedded_blob_size();
+
+ i::Code js_entry = isolate->heap()->builtin(i::Builtins::kJSEntry);
+ unwind_state.js_entry_stub.code.start =
+ reinterpret_cast<const void*>(js_entry->InstructionStart());
+ unwind_state.js_entry_stub.code.length_in_bytes = js_entry->InstructionSize();
+
+ return unwind_state;
}
#define CALLBACK_SETTER(ExternalName, Type, InternalName) \
@@ -8790,6 +8761,13 @@ void Isolate::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
isolate->heap()->RemoveNearHeapLimitCallback(callback, heap_limit);
}
+void Isolate::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
+ DCHECK_GT(threshold_percent, 0.0);
+ DCHECK_LT(threshold_percent, 1.0);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AutomaticallyRestoreInitialHeapLimit(threshold_percent);
+}
+
bool Isolate::IsDead() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsDead();
@@ -8824,11 +8802,11 @@ void Isolate::RemoveMessageListeners(MessageCallback that) {
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::DisallowHeapAllocation no_gc;
- i::TemplateList* listeners = isolate->heap()->message_listeners();
+ i::TemplateList listeners = isolate->heap()->message_listeners();
for (int i = 0; i < listeners->length(); i++) {
if (listeners->get(i)->IsUndefined(isolate)) continue; // skip deleted ones
- i::FixedArray* listener = i::FixedArray::cast(listeners->get(i));
- i::Foreign* callback_obj = i::Foreign::cast(listener->get(0));
+ i::FixedArray listener = i::FixedArray::cast(listeners->get(i));
+ i::Foreign callback_obj = i::Foreign::cast(listener->get(0));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners->set(i, i::ReadOnlyRoots(isolate).undefined_value());
}
@@ -8892,48 +8870,48 @@ void Isolate::SetAllowAtomicsWait(bool allow) {
MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
run_(type == MicrotasksScope::kRunMicrotasks) {
- auto handle_scope_implementer = isolate_->handle_scope_implementer();
- if (run_) handle_scope_implementer->IncrementMicrotasksScopeDepth();
+ auto* microtask_queue = isolate_->default_microtask_queue();
+ if (run_) microtask_queue->IncrementMicrotasksScopeDepth();
#ifdef DEBUG
- if (!run_) handle_scope_implementer->IncrementDebugMicrotasksScopeDepth();
+ if (!run_) microtask_queue->IncrementDebugMicrotasksScopeDepth();
#endif
}
MicrotasksScope::~MicrotasksScope() {
auto handle_scope_implementer = isolate_->handle_scope_implementer();
+ auto* microtask_queue = isolate_->default_microtask_queue();
if (run_) {
- handle_scope_implementer->DecrementMicrotasksScopeDepth();
+ microtask_queue->DecrementMicrotasksScopeDepth();
if (MicrotasksPolicy::kScoped ==
handle_scope_implementer->microtasks_policy()) {
PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
}
}
#ifdef DEBUG
- if (!run_) handle_scope_implementer->DecrementDebugMicrotasksScopeDepth();
+ if (!run_) microtask_queue->DecrementDebugMicrotasksScopeDepth();
#endif
}
void MicrotasksScope::PerformCheckpoint(Isolate* v8Isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
- if (IsExecutionTerminatingCheck(isolate)) return;
- auto handle_scope_implementer = isolate->handle_scope_implementer();
- if (!handle_scope_implementer->GetMicrotasksScopeDepth() &&
- !handle_scope_implementer->HasMicrotasksSuppressions()) {
- isolate->RunMicrotasks();
+ auto* microtask_queue = isolate->default_microtask_queue();
+ if (!microtask_queue->GetMicrotasksScopeDepth() &&
+ !microtask_queue->HasMicrotasksSuppressions()) {
+ microtask_queue->RunMicrotasks(isolate);
}
}
int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
- return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
+ return isolate->default_microtask_queue()->GetMicrotasksScopeDepth();
}
bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
- return isolate->IsRunningMicrotasks();
+ return isolate->default_microtask_queue()->IsRunningMicrotasks();
}
String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
@@ -8979,7 +8957,7 @@ String::Value::~Value() {
i::Isolate* isolate = i::Isolate::Current(); \
LOG_API(isolate, NAME, New); \
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
- i::Object* error; \
+ i::Object error; \
{ \
i::HandleScope scope(isolate); \
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
@@ -9027,7 +9005,7 @@ void debug::SetContextId(Local<Context> context, int id) {
}
int debug::GetContextId(Local<Context> context) {
- i::Object* value = Utils::OpenHandle(*context)->debug_context_id();
+ i::Object value = Utils::OpenHandle(*context)->debug_context_id();
return (value->IsSmi()) ? i::Smi::ToInt(value) : 0;
}
@@ -9148,7 +9126,7 @@ std::vector<int> debug::Script::LineEnds() const {
isolate);
std::vector<int> result(line_ends->length());
for (int i = 0; i < line_ends->length(); ++i) {
- i::Smi* line_end = i::Smi::cast(line_ends->get(i));
+ i::Smi line_end = i::Smi::cast(line_ends->get(i));
result[i] = line_end->value();
}
return result;
@@ -9188,7 +9166,7 @@ Maybe<int> debug::Script::ContextId() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Object* value = script->context_data();
+ i::Object value = script->context_data();
if (value->IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
@@ -9230,7 +9208,7 @@ bool debug::Script::GetPossibleBreakpoints(
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM &&
this->SourceMappingURL().IsEmpty()) {
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
return module_object->GetPossibleBreakpoints(start, end, locations);
}
@@ -9357,7 +9335,7 @@ int debug::WasmScript::NumFunctions() const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
const i::wasm::WasmModule* module = module_object->module();
DCHECK_GE(i::kMaxInt, module->functions.size());
@@ -9368,7 +9346,7 @@ int debug::WasmScript::NumImportedFunctions() const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
const i::wasm::WasmModule* module = module_object->module();
DCHECK_GE(i::kMaxInt, module->num_imported_functions);
@@ -9380,7 +9358,7 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
const i::wasm::WasmModule* module = module_object->module();
DCHECK_LE(0, function_index);
@@ -9396,7 +9374,7 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
const i::wasm::WasmModule* module = module_object->module();
DCHECK_LE(0, function_index);
@@ -9415,7 +9393,7 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::WasmModuleObject* module_object =
+ i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
return module_object->DisassembleFunction(function_index);
}
@@ -9449,8 +9427,8 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
{
i::DisallowHeapAllocation no_gc;
i::Script::Iterator iterator(isolate);
- i::Script* script;
- while ((script = iterator.Next()) != nullptr) {
+ for (i::Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
if (!script->IsUserJavaScript()) continue;
if (script->HasValidSource()) {
i::HandleScope handle_scope(isolate);
@@ -9501,7 +9479,8 @@ void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
i::DisallowHeapAllocation no_gc;
i::SharedFunctionInfo::ScriptIterator iter(isolate,
*Utils::OpenHandle(*script));
- while (i::SharedFunctionInfo* info = iter.Next()) {
+ for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
+ info = iter.Next()) {
if (info->HasDebugInfo()) {
info->GetDebugInfo()->set_computed_debug_is_blackboxed(false);
}
@@ -9512,7 +9491,7 @@ int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> object = Utils::OpenHandle(*value);
- if (object->IsSmi()) return i::kPointerSize;
+ if (object->IsSmi()) return i::kTaggedSize;
CHECK(object->IsHeapObject());
return i::Handle<i::HeapObject>::cast(object)->Size();
}
@@ -9537,22 +9516,21 @@ v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
i::Handle<i::JSWeakCollection>::cast(object), 0));
}
if (object->IsJSMapIterator()) {
- i::Handle<i::JSMapIterator> it = i::Handle<i::JSMapIterator>::cast(object);
+ i::Handle<i::JSMapIterator> iterator =
+ i::Handle<i::JSMapIterator>::cast(object);
MapAsArrayKind const kind =
- static_cast<MapAsArrayKind>(it->map()->instance_type());
+ static_cast<MapAsArrayKind>(iterator->map()->instance_type());
*is_key_value = kind == MapAsArrayKind::kEntries;
- if (!it->HasMore()) return v8::Array::New(v8_isolate);
- return Utils::ToLocal(
- MapAsArray(isolate, it->table(), i::Smi::ToInt(it->index()), kind));
+ if (!iterator->HasMore()) return v8::Array::New(v8_isolate);
+ return Utils::ToLocal(MapAsArray(isolate, iterator->table(),
+ i::Smi::ToInt(iterator->index()), kind));
}
if (object->IsJSSetIterator()) {
i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
- SetAsArrayKind const kind =
- static_cast<SetAsArrayKind>(it->map()->instance_type());
- *is_key_value = kind == SetAsArrayKind::kEntries;
+ *is_key_value = false;
if (!it->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(
- SetAsArray(isolate, it->table(), i::Smi::ToInt(it->index()), kind));
+ SetAsArray(isolate, it->table(), i::Smi::ToInt(it->index())));
}
return v8::MaybeLocal<v8::Array>();
}
@@ -9563,20 +9541,8 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::HandleScope handle_scope(isolate);
i::Builtins::Name builtin_id;
switch (builtin) {
- case kObjectKeys:
- builtin_id = i::Builtins::kObjectKeys;
- break;
- case kObjectGetPrototypeOf:
- builtin_id = i::Builtins::kObjectGetPrototypeOf;
- break;
- case kObjectGetOwnPropertyDescriptor:
- builtin_id = i::Builtins::kObjectGetOwnPropertyDescriptor;
- break;
- case kObjectGetOwnPropertyNames:
- builtin_id = i::Builtins::kObjectGetOwnPropertyNames;
- break;
- case kObjectGetOwnPropertySymbols:
- builtin_id = i::Builtins::kObjectGetOwnPropertySymbols;
+ case kStringToLowerCase:
+ builtin_id = i::Builtins::kStringPrototypeToLocaleLowerCase;
break;
default:
UNREACHABLE();
@@ -9584,10 +9550,11 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::Handle<i::String> name = isolate->factory()->empty_string();
i::NewFunctionArgs args = i::NewFunctionArgs::ForBuiltinWithoutPrototype(
- name, builtin_id, i::LanguageMode::kSloppy);
+ name, builtin_id, i::LanguageMode::kStrict);
i::Handle<i::JSFunction> fun = isolate->factory()->NewFunction(args);
- fun->shared()->DontAdaptArguments();
+ fun->shared()->set_internal_formal_parameter_count(0);
+ fun->shared()->set_length(0);
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -9604,8 +9571,11 @@ debug::ConsoleCallArguments::ConsoleCallArguments(
debug::ConsoleCallArguments::ConsoleCallArguments(
internal::BuiltinArguments& args)
- : v8::FunctionCallbackInfo<v8::Value>(nullptr, &args[0] - 1,
- args.length() - 1) {}
+ : v8::FunctionCallbackInfo<v8::Value>(
+ nullptr,
+ // Drop the first argument (receiver, i.e. the "console" object).
+ args.address_of_arg_at(args.length() > 1 ? 1 : 0),
+ args.length() - 1) {}
int debug::GetStackFrameId(v8::Local<v8::StackFrame> frame) {
return Utils::OpenHandle(*frame)->id();
@@ -9625,7 +9595,7 @@ v8::Local<v8::StackTrace> debug::GetDetailedStackTrace(
MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- i::Object* maybe_script = obj->function()->shared()->script();
+ i::Object maybe_script = obj->function()->shared()->script();
if (!maybe_script->IsScript()) return MaybeLocal<debug::Script>();
i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
return ToApiHandle<debug::Script>(script);
@@ -9639,7 +9609,7 @@ Local<Function> debug::GeneratorObject::Function() {
debug::Location debug::GeneratorObject::SuspendedLocation() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
CHECK(obj->is_suspended());
- i::Object* maybe_script = obj->function()->shared()->script();
+ i::Object maybe_script = obj->function()->shared()->script();
if (!maybe_script->IsScript()) return debug::Location();
i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
i::Script::PositionInfo info;
@@ -9696,7 +9666,7 @@ void debug::GlobalLexicalScopeNames(
i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
int local_count = scope_info->ContextLocalCount();
for (int j = 0; j < local_count; ++j) {
- i::String* name = scope_info->ContextLocalName(j);
+ i::String name = scope_info->ContextLocalName(j);
if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
names->Append(Utils::ToLocal(handle(name, isolate)));
}
@@ -9709,41 +9679,6 @@ void debug::SetReturnValue(v8::Isolate* v8_isolate,
isolate->debug()->set_return_value(*Utils::OpenHandle(*value));
}
-int debug::GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
- v8::Local<v8::Object> v8_object,
- v8::Local<v8::Name> v8_name) {
- i::Handle<i::JSReceiver> object = Utils::OpenHandle(*v8_object);
- i::Handle<i::Name> name = Utils::OpenHandle(*v8_name);
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- return static_cast<int>(debug::NativeAccessorType::None);
- }
- i::LookupIterator it = i::LookupIterator(object->GetIsolate(), object, name,
- i::LookupIterator::OWN);
- if (!it.IsFound()) return static_cast<int>(debug::NativeAccessorType::None);
- if (it.state() != i::LookupIterator::ACCESSOR) {
- return static_cast<int>(debug::NativeAccessorType::None);
- }
- i::Handle<i::Object> structure = it.GetAccessors();
- if (!structure->IsAccessorInfo()) {
- return static_cast<int>(debug::NativeAccessorType::None);
- }
- auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- int result = 0;
-#define IS_BUILTIN_ACESSOR(_, name, ...) \
- if (*structure == *isolate->factory()->name##_accessor()) \
- result |= static_cast<int>(debug::NativeAccessorType::IsBuiltin);
- ACCESSOR_INFO_LIST_GENERATOR(IS_BUILTIN_ACESSOR, /* not used */)
-#undef IS_BUILTIN_ACESSOR
- i::Handle<i::AccessorInfo> accessor_info =
- i::Handle<i::AccessorInfo>::cast(structure);
- if (accessor_info->getter())
- result |= static_cast<int>(debug::NativeAccessorType::HasGetter);
- if (accessor_info->setter())
- result |= static_cast<int>(debug::NativeAccessorType::HasSetter);
- return result;
-}
-
int64_t debug::GetNextRandomInt64(v8::Isolate* v8_isolate) {
return reinterpret_cast<i::Isolate*>(v8_isolate)
->random_number_generator()
@@ -10418,7 +10353,6 @@ AllocationProfile* HeapProfiler::GetAllocationProfile() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetAllocationProfile();
}
-
void HeapProfiler::DeleteAllHeapSnapshots() {
reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
}
@@ -10545,46 +10479,13 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
kGCCallbackFlagForced);
}
-bool EmbedderHeapTracer::AdvanceTracing(double deadline_in_ms) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- return !this->AdvanceTracing(
- deadline_in_ms, AdvanceTracingActions(std::isinf(deadline_in_ms)
- ? FORCE_COMPLETION
- : DO_NOT_FORCE_COMPLETION));
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-}
-
-void EmbedderHeapTracer::EnterFinalPause(EmbedderStackState stack_state) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- this->EnterFinalPause();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-}
-
-bool EmbedderHeapTracer::IsTracingDone() {
-// TODO(mlippautz): Implement using "return true" after removing the deprecated
-// call.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- return NumberOfWrappersToTrace() == 0;
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-}
-
namespace internal {
+const size_t HandleScopeImplementer::kEnteredContextsOffset =
+ offsetof(HandleScopeImplementer, entered_contexts_);
+const size_t HandleScopeImplementer::kIsMicrotaskContextOffset =
+ offsetof(HandleScopeImplementer, is_microtask_context_);
+
void HandleScopeImplementer::FreeThreadResources() {
Free();
}
@@ -10619,19 +10520,23 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
#endif
// Iterate over all handles in the blocks except for the last.
for (int i = static_cast<int>(blocks()->size()) - 2; i >= 0; --i) {
- Object** block = blocks()->at(i);
+ Address* block = blocks()->at(i);
+ // Cast possibly-unrelated pointers to plain Address before comparing them
+ // to avoid undefined behavior.
if (last_handle_before_deferred_block_ != nullptr &&
- (last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
- (last_handle_before_deferred_block_ >= block)) {
- v->VisitRootPointers(Root::kHandleScope, nullptr, block,
- last_handle_before_deferred_block_);
+ (reinterpret_cast<Address>(last_handle_before_deferred_block_) <=
+ reinterpret_cast<Address>(&block[kHandleBlockSize])) &&
+ (reinterpret_cast<Address>(last_handle_before_deferred_block_) >=
+ reinterpret_cast<Address>(block))) {
+ v->VisitRootPointers(Root::kHandleScope, nullptr, FullObjectSlot(block),
+ FullObjectSlot(last_handle_before_deferred_block_));
DCHECK(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
- v->VisitRootPointers(Root::kHandleScope, nullptr, block,
- &block[kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, nullptr, FullObjectSlot(block),
+ FullObjectSlot(&block[kHandleBlockSize]));
}
}
@@ -10640,21 +10545,19 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
// Iterate over live handles in the last block (if any).
if (!blocks()->empty()) {
- v->VisitRootPointers(Root::kHandleScope, nullptr, blocks()->back(),
- handle_scope_data_.next);
+ v->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(blocks()->back()),
+ FullObjectSlot(handle_scope_data_.next));
}
- DetachableVector<Context*>* context_lists[2] = {&saved_contexts_,
- &entered_contexts_};
+ DetachableVector<Context>* context_lists[2] = {&saved_contexts_,
+ &entered_contexts_};
for (unsigned i = 0; i < arraysize(context_lists); i++) {
+ context_lists[i]->shrink_to_fit();
if (context_lists[i]->empty()) continue;
- Object** start = reinterpret_cast<Object**>(&context_lists[i]->front());
+ FullObjectSlot start(&context_lists[i]->front());
v->VisitRootPointers(Root::kHandleScope, nullptr, start,
- start + context_lists[i]->size());
- }
- if (microtask_context_) {
- v->VisitRootPointer(Root::kHandleScope, nullptr,
- reinterpret_cast<Object**>(&microtask_context_));
+ start + static_cast<int>(context_lists[i]->size()));
}
}
@@ -10671,14 +10574,13 @@ char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) {
return storage + ArchiveSpacePerThread();
}
-
-DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
+DeferredHandles* HandleScopeImplementer::Detach(Address* prev_limit) {
DeferredHandles* deferred =
new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
while (!blocks_.empty()) {
- Object** block_start = blocks_.back();
- Object** block_limit = &block_start[kHandleBlockSize];
+ Address* block_start = blocks_.back();
+ Address* block_limit = &block_start[kHandleBlockSize];
// We should not need to check for SealHandleScope here. Assert this.
DCHECK(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
@@ -10720,15 +10622,23 @@ DeferredHandles::~DeferredHandles() {
void DeferredHandles::Iterate(RootVisitor* v) {
DCHECK(!blocks_.empty());
- DCHECK((first_block_limit_ >= blocks_.front()) &&
- (first_block_limit_ <= &(blocks_.front())[kHandleBlockSize]));
+ // Comparing pointers that do not point into the same array is undefined
+ // behavior, which means if we didn't cast everything to plain Address
+ // before comparing, the compiler would be allowed to assume that all
+ // comparisons evaluate to true and drop the entire check.
+ DCHECK((reinterpret_cast<Address>(first_block_limit_) >=
+ reinterpret_cast<Address>(blocks_.front())) &&
+ (reinterpret_cast<Address>(first_block_limit_) <=
+ reinterpret_cast<Address>(&(blocks_.front())[kHandleBlockSize])));
- v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_.front(),
- first_block_limit_);
+ v->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(blocks_.front()),
+ FullObjectSlot(first_block_limit_));
for (size_t i = 1; i < blocks_.size(); i++) {
- v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_[i],
- &blocks_[i][kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(blocks_[i]),
+ FullObjectSlot(&blocks_[i][kHandleBlockSize]));
}
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index e5f5c7da70..d9a0efbf2a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -24,6 +24,10 @@
namespace v8 {
+namespace internal {
+class JSArrayBufferView;
+} // namespace internal
+
// Constants used in the implementation of the API. The most natural thing
// would usually be to place these with the classes that use them, but
// we want to keep them out of v8.h because it is an externally
@@ -37,10 +41,10 @@ class Consts {
};
template <typename T>
-inline T ToCData(v8::internal::Object* obj);
+inline T ToCData(v8::internal::Object obj);
template <>
-inline v8::internal::Address ToCData(v8::internal::Object* obj);
+inline v8::internal::Address ToCData(v8::internal::Object obj);
template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
@@ -231,22 +235,22 @@ class Utils {
static inline Local<ScriptOrModule> ScriptOrModuleToLocal(
v8::internal::Handle<v8::internal::Script> obj);
-#define DECLARE_OPEN_HANDLE(From, To) \
- static inline v8::internal::Handle<v8::internal::To> \
- OpenHandle(const From* that, bool allow_empty_handle = false);
+#define DECLARE_OPEN_HANDLE(From, To) \
+ static inline v8::internal::Handle<v8::internal::To> OpenHandle( \
+ const From* that, bool allow_empty_handle = false);
-OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
+ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
-template <class From, class To>
-static inline Local<To> Convert(v8::internal::Handle<From> obj);
+ template <class From, class To>
+ static inline Local<To> Convert(v8::internal::Handle<From> obj);
-template <class T>
-static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
- const v8::Persistent<T>& persistent) {
- return v8::internal::Handle<v8::internal::Object>(
- reinterpret_cast<v8::internal::Object**>(persistent.val_));
+ template <class T>
+ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ const v8::Persistent<T>& persistent) {
+ return v8::internal::Handle<v8::internal::Object>(
+ reinterpret_cast<v8::internal::Address*>(persistent.val_));
}
template <class T>
@@ -260,11 +264,15 @@ static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
return OpenHandle(*handle);
}
+ static inline CompiledWasmModule Convert(
+ std::shared_ptr<i::wasm::NativeModule> native_module) {
+ return CompiledWasmModule{std::move(native_module)};
+ }
+
private:
static void ReportApiFailure(const char* location, const char* message);
};
-
template <class T>
inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
return reinterpret_cast<T*>(obj.location());
@@ -295,7 +303,7 @@ class V8_EXPORT_PRIVATE DeferredHandles {
~DeferredHandles();
private:
- DeferredHandles(Object** first_block_limit, Isolate* isolate)
+ DeferredHandles(Address* first_block_limit, Isolate* isolate)
: next_(nullptr),
previous_(nullptr),
first_block_limit_(first_block_limit),
@@ -305,10 +313,10 @@ class V8_EXPORT_PRIVATE DeferredHandles {
void Iterate(RootVisitor* v);
- std::vector<Object**> blocks_;
+ std::vector<Address*> blocks_;
DeferredHandles* next_;
DeferredHandles* previous_;
- Object** first_block_limit_;
+ Address* first_block_limit_;
Isolate* isolate_;
friend class HandleScopeImplementer;
@@ -327,18 +335,26 @@ class V8_EXPORT_PRIVATE DeferredHandles {
// data.
class HandleScopeImplementer {
public:
+ class EnteredContextRewindScope {
+ public:
+ explicit EnteredContextRewindScope(HandleScopeImplementer* hsi)
+ : hsi_(hsi), saved_entered_context_count_(hsi->EnteredContextCount()) {}
+
+ ~EnteredContextRewindScope() {
+ DCHECK_LE(saved_entered_context_count_, hsi_->EnteredContextCount());
+ while (saved_entered_context_count_ < hsi_->EnteredContextCount())
+ hsi_->LeaveContext();
+ }
+
+ private:
+ HandleScopeImplementer* hsi_;
+ size_t saved_entered_context_count_;
+ };
+
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
- microtask_context_(nullptr),
spare_(nullptr),
call_depth_(0),
- microtasks_depth_(0),
- microtasks_suppressions_(0),
- entered_contexts_count_(0),
- entered_context_count_during_microtasks_(0),
-#ifdef DEBUG
- debug_microtasks_depth_(0),
-#endif
microtasks_policy_(v8::MicrotasksPolicy::kAuto),
last_handle_before_deferred_block_(nullptr) {
}
@@ -357,75 +373,51 @@ class HandleScopeImplementer {
void Iterate(v8::internal::RootVisitor* v);
static char* Iterate(v8::internal::RootVisitor* v, char* data);
- inline internal::Object** GetSpareOrNewBlock();
- inline void DeleteExtensions(internal::Object** prev_limit);
+ inline internal::Address* GetSpareOrNewBlock();
+ inline void DeleteExtensions(internal::Address* prev_limit);
// Call depth represents nested v8 api calls.
inline void IncrementCallDepth() {call_depth_++;}
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
- // Microtasks scope depth represents nested scopes controlling microtasks
- // invocation, which happens when depth reaches zero.
- inline void IncrementMicrotasksScopeDepth() {microtasks_depth_++;}
- inline void DecrementMicrotasksScopeDepth() {microtasks_depth_--;}
- inline int GetMicrotasksScopeDepth() { return microtasks_depth_; }
-
- // Possibly nested microtasks suppression scopes prevent microtasks
- // from running.
- inline void IncrementMicrotasksSuppressions() {microtasks_suppressions_++;}
- inline void DecrementMicrotasksSuppressions() {microtasks_suppressions_--;}
- inline bool HasMicrotasksSuppressions() { return !!microtasks_suppressions_; }
-
-#ifdef DEBUG
- // In debug we check that calls not intended to invoke microtasks are
- // still correctly wrapped with microtask scopes.
- inline void IncrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_++;}
- inline void DecrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_--;}
- inline bool DebugMicrotasksScopeDepthIsZero() {
- return debug_microtasks_depth_ == 0;
- }
-#endif
+ inline void EnterContext(Context context);
+ inline void LeaveContext();
+ inline bool LastEnteredContextWas(Context context);
+ inline size_t EnteredContextCount() const { return entered_contexts_.size(); }
+
+ inline void EnterMicrotaskContext(Context context);
inline void set_microtasks_policy(v8::MicrotasksPolicy policy);
inline v8::MicrotasksPolicy microtasks_policy() const;
- inline void EnterContext(Handle<Context> context);
- inline void LeaveContext();
- inline bool LastEnteredContextWas(Handle<Context> context);
-
// Returns the last entered context or an empty handle if no
// contexts have been entered.
inline Handle<Context> LastEnteredContext();
+ inline Handle<Context> LastEnteredOrMicrotaskContext();
- inline void EnterMicrotaskContext(Handle<Context> context);
- inline void LeaveMicrotaskContext();
- inline Handle<Context> MicrotaskContext();
- inline bool MicrotaskContextIsLastEnteredContext() const {
- return microtask_context_ &&
- entered_context_count_during_microtasks_ == entered_contexts_.size();
- }
-
- inline void SaveContext(Context* context);
- inline Context* RestoreContext();
+ inline void SaveContext(Context context);
+ inline Context RestoreContext();
inline bool HasSavedContexts();
- inline DetachableVector<Object**>* blocks() { return &blocks_; }
+ inline DetachableVector<Address*>* blocks() { return &blocks_; }
Isolate* isolate() const { return isolate_; }
- void ReturnBlock(Object** block) {
+ void ReturnBlock(Address* block) {
DCHECK_NOT_NULL(block);
if (spare_ != nullptr) DeleteArray(spare_);
spare_ = block;
}
+ static const size_t kEnteredContextsOffset;
+ static const size_t kIsMicrotaskContextOffset;
+
private:
void ResetAfterArchive() {
blocks_.detach();
entered_contexts_.detach();
+ is_microtask_context_.detach();
saved_contexts_.detach();
- microtask_context_ = nullptr;
- entered_context_count_during_microtasks_ = 0;
spare_ = nullptr;
last_handle_before_deferred_block_ = nullptr;
call_depth_ = 0;
@@ -434,11 +426,12 @@ class HandleScopeImplementer {
void Free() {
DCHECK(blocks_.empty());
DCHECK(entered_contexts_.empty());
+ DCHECK(is_microtask_context_.empty());
DCHECK(saved_contexts_.empty());
- DCHECK(!microtask_context_);
blocks_.free();
entered_contexts_.free();
+ is_microtask_context_.free();
saved_contexts_.free();
if (spare_ != nullptr) {
DeleteArray(spare_);
@@ -448,26 +441,27 @@ class HandleScopeImplementer {
}
void BeginDeferredScope();
- DeferredHandles* Detach(Object** prev_limit);
+ DeferredHandles* Detach(Address* prev_limit);
Isolate* isolate_;
- DetachableVector<Object**> blocks_;
+ DetachableVector<Address*> blocks_;
+
// Used as a stack to keep track of entered contexts.
- DetachableVector<Context*> entered_contexts_;
+ // If |i|th item of |entered_contexts_| is added by EnterMicrotaskContext,
+ // `is_microtask_context_[i]` is 1.
+ // TODO(tzik): Remove |is_microtask_context_| after the deprecated
+ // v8::Isolate::GetEnteredContext() is removed.
+ DetachableVector<Context> entered_contexts_;
+ DetachableVector<int8_t> is_microtask_context_;
+
// Used as a stack to keep track of saved contexts.
- DetachableVector<Context*> saved_contexts_;
- Context* microtask_context_;
- Object** spare_;
+ DetachableVector<Context> saved_contexts_;
+ Address* spare_;
int call_depth_;
- int microtasks_depth_;
- int microtasks_suppressions_;
- size_t entered_contexts_count_;
- size_t entered_context_count_during_microtasks_;
-#ifdef DEBUG
- int debug_microtasks_depth_;
-#endif
+
v8::MicrotasksPolicy microtasks_policy_;
- Object** last_handle_before_deferred_block_;
+
+ Address* last_handle_before_deferred_block_;
// This is only used for threading support.
HandleScopeData handle_scope_data_;
@@ -482,21 +476,6 @@ class HandleScopeImplementer {
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};
-class HandleScopeImplementerOffsets {
- public:
- enum Offsets {
- kMicrotaskContext = offsetof(HandleScopeImplementer, microtask_context_),
- kEnteredContexts = offsetof(HandleScopeImplementer, entered_contexts_),
- kEnteredContextsCount =
- offsetof(HandleScopeImplementer, entered_contexts_count_),
- kEnteredContextCountDuringMicrotasks = offsetof(
- HandleScopeImplementer, entered_context_count_during_microtasks_)
- };
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HandleScopeImplementerOffsets);
-};
-
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
@@ -510,14 +489,12 @@ v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
return microtasks_policy_;
}
-
-void HandleScopeImplementer::SaveContext(Context* context) {
+void HandleScopeImplementer::SaveContext(Context context) {
saved_contexts_.push_back(context);
}
-
-Context* HandleScopeImplementer::RestoreContext() {
- Context* last_context = saved_contexts_.back();
+Context HandleScopeImplementer::RestoreContext() {
+ Context last_context = saved_contexts_.back();
saved_contexts_.pop_back();
return last_context;
}
@@ -527,49 +504,50 @@ bool HandleScopeImplementer::HasSavedContexts() {
return !saved_contexts_.empty();
}
-
-void HandleScopeImplementer::EnterContext(Handle<Context> context) {
- entered_contexts_.push_back(*context);
- entered_contexts_count_ = entered_contexts_.size();
+void HandleScopeImplementer::EnterContext(Context context) {
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
+ entered_contexts_.push_back(context);
+ is_microtask_context_.push_back(0);
}
void HandleScopeImplementer::LeaveContext() {
+ DCHECK(!entered_contexts_.empty());
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
entered_contexts_.pop_back();
- entered_contexts_count_ = entered_contexts_.size();
+ is_microtask_context_.pop_back();
}
-bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
- return !entered_contexts_.empty() && entered_contexts_.back() == *context;
+bool HandleScopeImplementer::LastEnteredContextWas(Context context) {
+ return !entered_contexts_.empty() && entered_contexts_.back() == context;
}
-void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
- DCHECK(!microtask_context_);
- microtask_context_ = *context;
- entered_context_count_during_microtasks_ = entered_contexts_.size();
-}
-
-void HandleScopeImplementer::LeaveMicrotaskContext() {
- microtask_context_ = nullptr;
- entered_context_count_during_microtasks_ = 0;
+void HandleScopeImplementer::EnterMicrotaskContext(Context context) {
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
+ entered_contexts_.push_back(context);
+ is_microtask_context_.push_back(1);
}
// If there's a spare block, use it for growing the current scope.
-internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block =
+internal::Address* HandleScopeImplementer::GetSpareOrNewBlock() {
+ internal::Address* block =
(spare_ != nullptr) ? spare_
- : NewArray<internal::Object*>(kHandleBlockSize);
+ : NewArray<internal::Address>(kHandleBlockSize);
spare_ = nullptr;
return block;
}
-
-void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
+void HandleScopeImplementer::DeleteExtensions(internal::Address* prev_limit) {
while (!blocks_.empty()) {
- internal::Object** block_start = blocks_.back();
- internal::Object** block_limit = block_start + kHandleBlockSize;
+ internal::Address* block_start = blocks_.back();
+ internal::Address* block_limit = block_start + kHandleBlockSize;
// SealHandleScope may make the prev_limit to point inside the block.
- if (block_start <= prev_limit && prev_limit <= block_limit) {
+ // Cast possibly-unrelated pointers to plain Addres before comparing them
+ // to avoid undefined behavior.
+ if (reinterpret_cast<Address>(block_start) <=
+ reinterpret_cast<Address>(prev_limit) &&
+ reinterpret_cast<Address>(prev_limit) <=
+ reinterpret_cast<Address>(block_limit)) {
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(prev_limit, block_limit);
#endif
diff --git a/deps/v8/src/arguments-inl.h b/deps/v8/src/arguments-inl.h
index c1a18ab82f..ad2b5ca87c 100644
--- a/deps/v8/src/arguments-inl.h
+++ b/deps/v8/src/arguments-inl.h
@@ -18,7 +18,9 @@ Handle<S> Arguments::at(int index) {
return Handle<S>::cast(at<Object>(index));
}
-int Arguments::smi_at(int index) { return Smi::ToInt((*this)[index]); }
+int Arguments::smi_at(int index) {
+ return Smi::ToInt(Object(*address_of_arg_at(index)));
+}
double Arguments::number_at(int index) { return (*this)[index]->Number(); }
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index d246aadb95..815f5de577 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/arguments.h"
-#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index db1ee5467c..920ef7c65d 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -6,7 +6,9 @@
#define V8_ARGUMENTS_H_
#include "src/allocation.h"
+#include "src/handles.h"
#include "src/objects.h"
+#include "src/objects/slots.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -20,7 +22,7 @@ namespace internal {
// that inside the C++ function, the parameters passed can
// be accessed conveniently:
//
-// Object* Runtime_function(Arguments args) {
+// Object Runtime_function(Arguments args) {
// ... use args[i] here ...
// }
//
@@ -29,17 +31,12 @@ namespace internal {
class Arguments {
public:
- Arguments(int length, Object** arguments)
+ Arguments(int length, Address* arguments)
: length_(length), arguments_(arguments) {
DCHECK_GE(length_, 0);
}
- Object*& operator[] (int index) {
- DCHECK_GE(index, 0);
- DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
- return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
- index * kPointerSize));
- }
+ Object operator[](int index) { return Object(*address_of_arg_at(index)); }
template <class S = Object>
inline Handle<S> at(int index);
@@ -48,24 +45,35 @@ class Arguments {
inline double number_at(int index);
- // Get the total number of arguments including the receiver.
- int length() const { return static_cast<int>(length_); }
+ inline void set_at(int index, Object value) {
+ *address_of_arg_at(index) = value->ptr();
+ }
- Object** arguments() { return arguments_; }
+ inline FullObjectSlot slot_at(int index) {
+ return FullObjectSlot(address_of_arg_at(index));
+ }
- Object** lowest_address() { return &this->operator[](length() - 1); }
+ inline Address* address_of_arg_at(int index) {
+ DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
+ return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
+ index * kSystemPointerSize);
+ }
+
+ // Get the total number of arguments including the receiver.
+ int length() const { return static_cast<int>(length_); }
- Object** highest_address() { return &this->operator[](0); }
+ // Arguments on the stack are in reverse order (compared to an array).
+ FullObjectSlot first_slot() { return slot_at(length() - 1); }
+ FullObjectSlot last_slot() { return slot_at(0); }
private:
intptr_t length_;
- Object** arguments_;
+ Address* arguments_;
};
template <>
inline Handle<Object> Arguments::at(int index) {
- Object** value = &((*this)[index]);
- return Handle<Object>(value);
+ return Handle<Object>(address_of_arg_at(index));
}
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
@@ -78,33 +86,40 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
- static V8_INLINE Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, InternalType, Convert, Name) \
+ static V8_INLINE InternalType __RT_impl_##Name(Arguments args, \
+ Isolate* isolate); \
\
- V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
+ V8_NOINLINE static Type Stats_##Name(int args_length, Address* args_object, \
Isolate* isolate) { \
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Runtime_" #Name); \
Arguments args(args_length, args_object); \
- return __RT_impl_##Name(args, isolate); \
+ return Convert(__RT_impl_##Name(args, isolate)); \
} \
\
- Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+ Type Name(int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context()->IsContext()); \
CLOBBER_DOUBLE_REGISTERS(); \
if (V8_UNLIKELY(FLAG_runtime_stats)) { \
return Stats_##Name(args_length, args_object, isolate); \
} \
Arguments args(args_length, args_object); \
- return __RT_impl_##Name(args, isolate); \
+ return Convert(__RT_impl_##Name(args, isolate)); \
} \
\
- static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
+ static InternalType __RT_impl_##Name(Arguments args, Isolate* isolate)
+
+#define CONVERT_OBJECT(x) (x)->ptr()
+#define CONVERT_OBJECTPAIR(x) (x)
+
+#define RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION_RETURNS_TYPE(Address, Object, CONVERT_OBJECT, Name)
-#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
-#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
- RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
+#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
+ RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, ObjectPair, CONVERT_OBJECTPAIR, \
+ Name)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 68ea6f3210..041c030933 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -42,6 +42,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -75,14 +76,15 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
if (Assembler::IsMovW(Memory<int32_t>(pc_))) {
return pc_;
- } else {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc_)));
+ } else if (Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc_))) {
return constant_pool_entry_address();
+ } else {
+ DCHECK(Assembler::IsBOrBlPcImmediateOffset(Memory<int32_t>(pc_)));
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return pc_;
}
}
@@ -97,34 +99,32 @@ int RelocInfo::target_address_size() {
return kPointerSize;
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
if (IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
@@ -213,8 +213,8 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = static_cast<int32_t>(f.address());
}
-Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
@@ -297,7 +297,8 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code code, Address target) {
+ DCHECK(!Builtins::IsIsolateIndependentBuiltin(code));
Memory<Address>(constant_pool_entry) = target;
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 758fcd1a68..a994b6907d 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -42,7 +42,6 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -339,23 +338,6 @@ bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -411,13 +393,6 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -488,10 +463,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
- case HeapObjectRequest::kCodeStub:
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -499,7 +470,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
Memory<Address>(constant_pool_entry_address(pc, 0 /* unused */)) =
object.address();
}
@@ -517,6 +488,11 @@ const Instr kPopRegPattern = al | B26 | L | 4 | PostIndex | sp.code() * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCImmedPattern = 5 * B24 | L | pc.code() * B16;
+// Pc-relative call or jump to a signed imm24 offset.
+// bl pc + #offset
+// b pc + #offset
+const Instr kBOrBlPCImmedMask = 0xE * B24;
+const Instr kBOrBlPCImmedPattern = 0xA * B24;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | pc.code() * B16 | 11 * B8;
@@ -552,20 +528,17 @@ const Instr kLdrRegFpNegOffsetPattern =
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
pending_32_bit_constants_(),
- pending_64_bit_constants_(),
scratch_register_list_(ip.bit()) {
pending_32_bit_constants_.reserve(kMinNumPendingConstants);
- pending_64_bit_constants_.reserve(kMinNumPendingConstants);
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
- first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
if (CpuFeatures::IsSupported(VFP32DREGS)) {
// Register objects tend to be abstracted and survive between scopes, so
@@ -588,23 +561,24 @@ Assembler::~Assembler() {
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
- int constant_pool_offset = 0;
CheckConstPool(true, false);
DCHECK(pending_32_bit_constants_.empty());
- DCHECK(pending_64_bit_constants_.empty());
+
+ int code_comments_size = WriteCodeComments();
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->constant_pool_size =
- (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
+ desc->constant_pool_size = 0;
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
@@ -761,6 +735,9 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
}
+bool Assembler::IsBOrBlPcImmediateOffset(Instr instr) {
+ return (instr & kBOrBlPCImmedMask) == kBOrBlPCImmedPattern;
+}
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
@@ -883,8 +860,8 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 1);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 1);
patcher.mov(dst, Operand(target24));
} else {
uint16_t target16_0 = target24 & kImm16Mask;
@@ -892,13 +869,13 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 1);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 1);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
} else {
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 2);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
patcher.movt(dst, target16_1);
@@ -909,13 +886,13 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 2);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
} else {
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 3);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 3);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
patcher.orr(dst, dst, Operand(target8_2 << 16));
@@ -1167,7 +1144,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
DCHECK(!x.MustOutputRelocInfo(this));
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
- Register target = rd != pc ? rd : temps.Acquire();
+ Register target = rd != pc && rd != sp ? rd : temps.Acquire();
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
@@ -1241,8 +1218,9 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// it first to a scratch register and change the original instruction to
// use it.
// Re-use the destination register if possible.
- Register scratch =
- (rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
+ Register scratch = (rd.is_valid() && rd != rn && rd != pc && rd != sp)
+ ? rd
+ : temps.Acquire();
mov(scratch, x, LeaveCC, cond);
AddrMode1(instr, rd, rn, Operand(scratch));
}
@@ -1307,8 +1285,9 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
bool is_load = (instr & L) == L;
- Register scratch =
- (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
+ Register scratch = (is_load && rd != x.rn_ && rd != pc && rd != sp)
+ ? rd
+ : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@@ -1347,8 +1326,9 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
// register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
- Register scratch =
- (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
+ Register scratch = (is_load && rd != x.rn_ && rd != pc && rd != sp)
+ ? rd
+ : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@@ -1362,7 +1342,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
- (is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
+ (is_load && rd != x.rn_ && rd != pc && rd != sp) ? rd : temps.Acquire();
mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@@ -1510,6 +1490,10 @@ void Assembler::eor(Register dst, Register src1, const Operand& src2,
AddrMode1(cond | EOR | s, dst, src1, src2);
}
+void Assembler::eor(Register dst, Register src1, Register src2, SBit s,
+ Condition cond) {
+ AddrMode1(cond | EOR | s, dst, src1, Operand(src2));
+}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@@ -5041,40 +5025,34 @@ void Assembler::RecordConstPool(int size) {
void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc.origin = this;
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
+ int pc_delta = new_start - buffer_start_;
+ int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -5089,7 +5067,6 @@ void Assembler::db(uint8_t data) {
// db is used to write raw data. The constant pool should be emitted or
// blocked before using db.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
- DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -5100,7 +5077,6 @@ void Assembler::dd(uint32_t data) {
// dd is used to write raw data. The constant pool should be emitted or
// blocked before using dd.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
- DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -5111,7 +5087,6 @@ void Assembler::dq(uint64_t value) {
// dq is used to write raw data. The constant pool should be emitted or
// blocked before using dq.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
- DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@@ -5120,13 +5095,13 @@ void Assembler::dq(uint64_t value) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
reloc_info_writer.Write(&rinfo);
}
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
- DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
+ DCHECK(rmode != RelocInfo::CONST_POOL);
// We can share CODE_TARGETs because we don't patch the code objects anymore,
// and we make sure we emit only one reloc info for them (thus delta patching)
// will apply the delta only once. At the moment, we do not dedup code targets
@@ -5173,11 +5148,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK(pending_32_bit_constants_.empty() ||
- (start - first_const_pool_32_use_ +
- pending_64_bit_constants_.size() * kDoubleSize <
- kMaxDistToIntPool));
- DCHECK(pending_64_bit_constants_.empty() ||
- (start - first_const_pool_64_use_ < kMaxDistToFPPool));
+ (start < first_const_pool_32_use_ + kMaxDistToIntPool));
#endif
no_const_pool_before_ = pc_limit;
}
@@ -5199,7 +5170,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
+ if (pending_32_bit_constants_.empty()) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@@ -5212,19 +5183,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
pending_32_bit_constants_.size() * kPointerSize;
- bool has_int_values = !pending_32_bit_constants_.empty();
- bool has_fp_values = !pending_64_bit_constants_.empty();
- bool require_64_bit_align = false;
- if (has_fp_values) {
- require_64_bit_align =
- !IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
- kDoubleAlignment);
- if (require_64_bit_align) {
- estimated_size_after_marker += kInstrSize;
- }
- estimated_size_after_marker +=
- pending_64_bit_constants_.size() * kDoubleSize;
- }
int estimated_size = size_up_to_marker + estimated_size_after_marker;
// We emit a constant pool when:
@@ -5236,35 +5194,18 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK(has_fp_values || has_int_values);
+ DCHECK(!pending_32_bit_constants_.empty());
bool need_emit = false;
- if (has_fp_values) {
- // The 64-bit constants are always emitted before the 32-bit constants, so
- // we can ignore the effect of the 32-bit constants on estimated_size.
- int dist64 = pc_offset() + estimated_size -
- pending_32_bit_constants_.size() * kPointerSize -
- first_const_pool_64_use_;
- if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
- (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
- need_emit = true;
- }
- }
- if (has_int_values) {
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
- }
+ int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
}
if (!need_emit) return;
}
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
- for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
- ConstantPoolEntry& entry = pending_64_bit_constants_[i];
- if (entry.is_merged()) size_after_marker -= kDoubleSize;
- }
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5296,40 +5237,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
emit(kConstantPoolMarker |
EncodeConstantPoolLength(size_after_marker / kPointerSize));
- if (require_64_bit_align) {
- emit(kConstantPoolMarker);
- }
-
- // Emit 64-bit constant pool entries first: their range is smaller than
- // 32-bit entries.
- for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
- ConstantPoolEntry& entry = pending_64_bit_constants_[i];
-
- Instr instr = instr_at(entry.position());
- // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
- DCHECK((IsVldrDPcImmediateOffset(instr) &&
- GetVldrDRegisterImmediateOffset(instr) == 0));
-
- int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
- DCHECK(is_uint10(delta));
-
- if (entry.is_merged()) {
- ConstantPoolEntry& merged =
- pending_64_bit_constants_[entry.merged_index()];
- DCHECK(entry.value64() == merged.value64());
- Instr merged_instr = instr_at(merged.position());
- DCHECK(IsVldrDPcImmediateOffset(merged_instr));
- delta = GetVldrDRegisterImmediateOffset(merged_instr);
- delta += merged.position() - entry.position();
- }
- instr_at_put(entry.position(),
- SetVldrDRegisterImmediateOffset(instr, delta));
- if (!entry.is_merged()) {
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(pc_), kDoubleAlignment));
- dq(entry.value64());
- }
- }
-
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5366,10 +5273,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
pending_32_bit_constants_.clear();
- pending_64_bit_constants_.clear();
first_const_pool_32_use_ = -1;
- first_const_pool_64_use_ = -1;
RecordComment("]");
@@ -5387,22 +5292,29 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
byte* address, int instructions)
- : Assembler(options, address, instructions * kInstrSize + kGap) {
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ : Assembler(options, ExternalAssemblerBuffer(
+ address, instructions * kInstrSize + kGap)) {
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
PatchingAssembler::~PatchingAssembler() {
// Check that we don't have any pending constant pools.
DCHECK(pending_32_bit_constants_.empty());
- DCHECK(pending_64_bit_constants_.empty());
// Check that the code was patched as expected.
- DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
void PatchingAssembler::Emit(Address addr) { emit(static_cast<Instr>(addr)); }
+void PatchingAssembler::PadWithNops() {
+ DCHECK_LE(pc_, buffer_start_ + buffer_->size() - kGap);
+ while (pc_ < buffer_start_ + buffer_->size() - kGap) {
+ nop();
+ }
+}
+
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
: assembler_(assembler),
old_available_(*assembler->GetScratchRegisterList()),
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 1bfa58b853..0c14a67707 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -44,331 +44,15 @@
#include <vector>
#include "src/arm/constants-arm.h"
+#include "src/arm/register-arm.h"
#include "src/assembler.h"
#include "src/boxed-float.h"
+#include "src/constant-pool.h"
#include "src/double.h"
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9)
-
-#define FLOAT_REGISTERS(V) \
- V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
- V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
- V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
- V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
-
-#define LOW_DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
-
-#define NON_LOW_DOUBLE_REGISTERS(V) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define DOUBLE_REGISTERS(V) \
- LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
-
-#define SIMD128_REGISTERS(V) \
- V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
- V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d15)
-
-#define C_REGISTERS(V) \
- V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
- V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
-// clang-format on
-
-// The ARM ABI does not specify the usage of register r9, which may be reserved
-// as the static base or thread register on some platforms, in which case we
-// leave it alone. Adjust the value of kR9Available accordingly:
-const int kR9Available = 1; // 1 if available to us, 0 if reserved
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved =
- 1 << 0 | // r0 a1
- 1 << 1 | // r1 a2
- 1 << 2 | // r2 a3
- 1 << 3; // r3 a4
-
-const int kNumJSCallerSaved = 4;
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
- 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4 (cp in JavaScript code)
- 1 << 8 | // r8 v5 (pp in JavaScript code)
- kR9Available << 9 | // r9 v6
- 1 << 10 | // r10 v7
- 1 << 11; // r11 v8 (fp in JavaScript code)
-
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-const RegList kCallerSaved =
- 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 9; // r9
-
-const int kNumCalleeSaved = 7 + kR9Available;
-
-// Double registers d8 to d15 are callee-saved.
-const int kNumDoubleCalleeSaved = 8;
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-// r7: context register
-#define DECLARE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = false;
-constexpr bool kSimdMaskRegisters = false;
-
-enum SwVfpRegisterCode {
-#define REGISTER_CODE(R) kSwVfpCode_##R,
- FLOAT_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kSwVfpAfterLast
-};
-
-// Representation of a list of non-overlapping VFP registers. This list
-// represents the data layout of VFP registers as a bitfield:
-// S registers cover 1 bit
-// D registers cover 2 bits
-// Q registers cover 4 bits
-//
-// This way, we make sure no registers in the list ever overlap. However, a list
-// may represent multiple different sets of registers,
-// e.g. [d0 s2 s3] <=> [s0 s1 d1].
-typedef uint64_t VfpRegList;
-
-// Single word VFP register.
-class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
- public:
- static constexpr int kSizeInBytes = 4;
-
- static void split_code(int reg_code, int* vm, int* m) {
- DCHECK(from_code(reg_code).is_valid());
- *m = reg_code & 0x1;
- *vm = reg_code >> 1;
- }
- void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
- VfpRegList ToVfpRegList() const {
- DCHECK(is_valid());
- // Each bit in the list corresponds to a S register.
- return uint64_t{0x1} << code();
- }
-
- private:
- friend class RegisterBase;
- explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
-static_assert(sizeof(SwVfpRegister) == sizeof(int),
- "SwVfpRegister can efficiently be passed by value");
-
-typedef SwVfpRegister FloatRegister;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Double word VFP register.
-class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
- public:
- static constexpr int kSizeInBytes = 8;
-
- inline static int NumRegisters();
-
- static void split_code(int reg_code, int* vm, int* m) {
- DCHECK(from_code(reg_code).is_valid());
- *m = (reg_code & 0x10) >> 4;
- *vm = reg_code & 0x0F;
- }
- void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
- VfpRegList ToVfpRegList() const {
- DCHECK(is_valid());
- // A D register overlaps two S registers.
- return uint64_t{0x3} << (code() * 2);
- }
-
- private:
- friend class RegisterBase;
- friend class LowDwVfpRegister;
- explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
-static_assert(sizeof(DwVfpRegister) == sizeof(int),
- "DwVfpRegister can efficiently be passed by value");
-
-typedef DwVfpRegister DoubleRegister;
-
-
-// Double word VFP register d0-15.
-class LowDwVfpRegister
- : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
- public:
- constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
-
- SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
- SwVfpRegister high() const {
- return SwVfpRegister::from_code(code() * 2 + 1);
- }
- VfpRegList ToVfpRegList() const {
- DCHECK(is_valid());
- // A D register overlaps two S registers.
- return uint64_t{0x3} << (code() * 2);
- }
-
- private:
- friend class RegisterBase;
- explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
-};
-
-enum Simd128RegisterCode {
-#define REGISTER_CODE(R) kSimd128Code_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kSimd128AfterLast
-};
-
-// Quad word NEON register.
-class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
- public:
- static void split_code(int reg_code, int* vm, int* m) {
- DCHECK(from_code(reg_code).is_valid());
- int encoded_code = reg_code << 1;
- *m = (encoded_code & 0x10) >> 4;
- *vm = encoded_code & 0x0F;
- }
- void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
- DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
- DwVfpRegister high() const {
- return DwVfpRegister::from_code(code() * 2 + 1);
- }
- VfpRegList ToVfpRegList() const {
- DCHECK(is_valid());
- // A Q register overlaps four S registers.
- return uint64_t{0xf} << (code() * 4);
- }
-
- private:
- friend class RegisterBase;
- explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
-};
-
-
-typedef QwNeonRegister QuadRegister;
-
-typedef QwNeonRegister Simd128Register;
-
-enum CRegisterCode {
-#define REGISTER_CODE(R) kCCode_##R,
- C_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kCAfterLast
-};
-
-// Coprocessor register
-class CRegister : public RegisterBase<CRegister, kCAfterLast> {
- friend class RegisterBase;
- explicit constexpr CRegister(int code) : RegisterBase(code) {}
-};
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-#define DECLARE_FLOAT_REGISTER(R) \
- constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
-FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
-#undef DECLARE_FLOAT_REGISTER
-
-#define DECLARE_LOW_DOUBLE_REGISTER(R) \
- constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
-LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
-#undef DECLARE_LOW_DOUBLE_REGISTER
-
-#define DECLARE_DOUBLE_REGISTER(R) \
- constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
-NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
-#undef DECLARE_DOUBLE_REGISTER
-
-constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
-
-// Aliases for double registers.
-constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
-constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
-constexpr LowDwVfpRegister kDoubleRegZero = d13;
-
-constexpr CRegister no_creg = CRegister::no_reg();
-
-#define DECLARE_C_REGISTER(R) \
- constexpr CRegister R = CRegister::from_code<kCCode_##R>();
-C_REGISTERS(DECLARE_C_REGISTER)
-#undef DECLARE_C_REGISTER
-
// Coprocessor number
enum Coprocessor {
p0 = 0,
@@ -401,7 +85,7 @@ class Operand {
V8_INLINE static Operand Zero();
V8_INLINE explicit Operand(const ExternalReference& f);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value);
+ V8_INLINE explicit Operand(Smi value);
// rm
V8_INLINE explicit Operand(Register rm);
@@ -424,7 +108,6 @@ class Operand {
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
- static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Return true if this is a register operand.
@@ -613,17 +296,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler();
+ virtual void AbortedCodeGeneration() {
+ pending_32_bit_constants_.clear();
+ }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -678,7 +360,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target);
+ Address constant_pool_entry, Code code, Address target);
// Get the size of the special target encoded at 'location'.
inline static int deserialization_special_target_size(Address location);
@@ -736,6 +418,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void eor(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
+ void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
void sub(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -1408,10 +1092,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1446,9 +1126,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void dp(uintptr_t data) { dd(data); }
// Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(Address pc, Instr instr) {
@@ -1475,6 +1157,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsBOrBlPcImmediateOffset(Instr instr);
static bool IsVldrDPcImmediateOffset(Instr instr);
static bool IsBlxReg(Instr instr);
static bool IsBlxIp(Instr instr);
@@ -1500,13 +1183,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// PC-relative loads, thereby defining a maximum distance between the
// instruction and the accessed constant.
static constexpr int kMaxDistToIntPool = 4 * KB;
- static constexpr int kMaxDistToFPPool = 1 * KB;
// All relocations could be integer, it therefore acts as the limit.
static constexpr int kMinNumPendingConstants = 4;
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
- static constexpr int kMaxNumPending64Constants =
- kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1521,13 +1201,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
@@ -1564,11 +1237,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
DCHECK(pending_32_bit_constants_.empty() ||
- (start + pending_64_bit_constants_.size() * kDoubleSize <
- static_cast<size_t>(first_const_pool_32_use_ +
- kMaxDistToIntPool)));
- DCHECK(pending_64_bit_constants_.empty() ||
- (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
+ (start < first_const_pool_32_use_ + kMaxDistToIntPool));
#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
@@ -1619,7 +1288,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The buffers of pending constant pool entries.
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
- std::vector<ConstantPoolEntry> pending_64_bit_constants_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
@@ -1655,7 +1323,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_const_pool_32_use_;
- int first_const_pool_64_use_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1688,6 +1355,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
intptr_t value);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RelocInfo;
friend class BlockConstPoolScope;
friend class EnsureSpace;
@@ -1706,6 +1375,7 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
void Emit(Address addr);
+ void PadWithNops();
};
// This scope utility allows scratch registers to be managed safely. The
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
deleted file mode 100644
index c7eaef1325..0000000000
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/api-arguments-inl.h"
-#include "src/assembler-inl.h"
-#include "src/base/bits.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/counters.h"
-#include "src/double.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/regexp-match-info.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-#include "src/arm/code-stubs-arm.h" // Cannot be the first include.
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, handler_entry, exit;
-
- {
- NoRootArrayScope no_root_array(masm);
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, Double(0.0));
-
- __ InitializeRootRegister();
- }
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
-
- // Set up argv in r4.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- __ ldr(r4, MemOperand(sp, offset_to_argv));
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- StackFrame::Type marker = type();
- __ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
- __ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
- __ mov(r5, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ ldr(r5, MemOperand(r5));
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
-
- // Push a bad frame pointer to fail if it is used.
- __ mov(scratch, Operand(-1));
- __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
- }
-
- Register scratch = r6;
-
- // Set up frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(scratch, MemOperand(r5));
- __ cmp(scratch, Operand::Zero());
- __ b(ne, &non_outermost_js);
- __ str(fp, MemOperand(r5));
- __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont);
- __ bind(&non_outermost_js);
- __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
- __ push(scratch);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
-
- // Block literal pool emission whilst taking the position of the handler
- // entry. This avoids making the assumption that literal pools are always
- // emitted after an instruction is emitted, rather than before.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(scratch,
- Operand(ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate())));
- }
- __ str(r0, MemOperand(scratch));
- __ LoadRoot(r0, RootIndex::kException);
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r6 are available.
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit); // r0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r5);
- __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ b(ne, &non_outermost_js_2);
- __ mov(r6, Operand::Zero());
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ str(r6, MemOperand(r5));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ str(r3, MemOperand(scratch));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
-
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
-
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Place the return address on the stack, making the call
- // GC safe. The RegExp backend also relies on this.
- __ str(lr, MemOperand(sp, 0));
- __ blx(ip); // Call the C++ function.
- __ ldr(pc, MemOperand(sp, 0));
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into lr instead of ip.
- __ Move(ip, target);
- __ IndirectLoadConstant(lr, GetCode());
- __ add(lr, lr, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ blx(lr);
- return;
- }
- }
- intptr_t code =
- reinterpret_cast<intptr_t>(GetCode().location());
- __ Move(ip, target);
- __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
- __ blx(lr); // Call the stub.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- tasm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(
- tasm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
- tasm->push(lr);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->pop(lr);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- masm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(
- masm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
- __ push(lr);
- __ CallStub(&stub);
- __ pop(lr);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push lr" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart = 3 * kInstrSize;
-
- // This should contain all kCallerSaved registers.
- const RegList kSavedRegs =
- 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 5 | // r5
- 1 << 9; // r9
- // We also save lr, so the count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = 7;
-
- DCHECK_EQ(kCallerSaved & kSavedRegs, kCallerSaved);
-
- // Save all caller-save registers as this may be called from anywhere.
- __ stm(db_w, sp, kSavedRegs | lr.bit());
-
- // Compute the function's address for the first argument.
- __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(r5, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ and_(sp, sp, Operand(-frame_alignment));
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
-
-#if V8_HOST_ARCH_ARM
- int32_t entry_hook =
- reinterpret_cast<int32_t>(isolate()->function_entry_hook());
- __ mov(scratch, Operand(entry_hook));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
-
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ mov(scratch, Operand(ExternalReference::Create(
- &dispatcher, ExternalReference::BUILTIN_CALL)));
-#endif
- __ Call(scratch);
- }
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, r5);
- }
-
- // Also pop pc to get Ret(0).
- __ ldm(ia_w, sp, kSavedRegs | pc.bit());
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand* stack_space_operand,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- DCHECK(function_address == r1 || function_address == r2);
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ Move(r9, ExternalReference::is_profiling_address(isolate));
- __ ldrb(r9, MemOperand(r9, 0));
- __ cmp(r9, Operand(0));
- __ b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- __ Move(r3, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ Move(r3, function_address);
- __ bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- __ Move(r9, next_address);
- __ ldr(r4, MemOperand(r9, kNextOffset));
- __ ldr(r5, MemOperand(r9, kLimitOffset));
- __ ldr(r6, MemOperand(r9, kLevelOffset));
- __ add(r6, r6, Operand(1));
- __ str(r6, MemOperand(r9, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ Move(r0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, r3);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ Move(r0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- __ ldr(r0, return_value_operand);
- __ bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ str(r4, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
- __ ldr(r1, MemOperand(r9, kLevelOffset));
- __ cmp(r1, r6);
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
- }
- __ sub(r6, r6, Operand(1));
- __ str(r6, MemOperand(r9, kLevelOffset));
- __ ldr(r6, MemOperand(r9, kLimitOffset));
- __ cmp(r5, r6);
- __ b(ne, &delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- // LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != nullptr) {
- __ ldr(r4, *stack_space_operand);
- } else {
- __ mov(r4, Operand(stack_space));
- }
- __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
-
- // Check if the function scheduled an exception.
- __ LoadRoot(r4, RootIndex::kTheHoleValue);
- __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
- __ ldr(r5, MemOperand(r6));
- __ cmp(r4, r5);
- __ b(ne, &promote_scheduled_exception);
-
- __ mov(pc, lr);
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ str(r5, MemOperand(r9, kLimitOffset));
- __ mov(r4, r0);
- __ PrepareCallCFunction(1);
- __ Move(r0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ mov(r0, r4);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : call_data
- // -- r2 : holder
- // -- r1 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- Register call_data = r4;
- Register holder = r2;
- Register api_function_address = r1;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ push(call_data);
-
- Register scratch0 = call_data;
- Register scratch1 = r5;
- __ LoadRoot(scratch0, RootIndex::kUndefinedValue);
- // return value
- __ push(scratch0);
- // return value default
- __ push(scratch0);
- // isolate
- __ Move(scratch1, ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch1);
- // holder
- __ push(holder);
-
- // Prepare arguments.
- __ mov(scratch0, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != r0 && scratch0 != r0);
- // r0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // FunctionCallbackInfo::implicit_args_
- __ str(scratch0, MemOperand(r0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ add(scratch1, scratch0,
- Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ str(scratch1, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(scratch0, Operand(argc()));
- __ str(scratch0, MemOperand(r0, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- MemOperand* stack_space_operand = nullptr;
-
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = r4;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = r2;
-
- __ push(receiver);
- // Push data from AccessorInfo.
- __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ push(scratch);
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Push(scratch, scratch);
- __ Move(scratch, ExternalReference::isolate_address(isolate()));
- __ Push(scratch, holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch);
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ mov(r0, sp); // r0 = Handle<Name>
- __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ str(r1, MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ ldr(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
deleted file mode 100644
index a9b82210e0..0000000000
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM_CODE_STUBS_ARM_H_
-#define V8_ARM_CODE_STUBS_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
deleted file mode 100644
index 7dc4ced321..0000000000
--- a/deps/v8/src/arm/codegen-arm.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include <memory>
-
-#include "src/arm/assembler-arm-inl.h"
-#include "src/arm/simulator-arm.h"
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-#if defined(V8_HOST_ARCH_ARM)
-
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR)
- return stub;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return stub;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- Register dest = r0;
- Register src = r1;
- Register chars = r2;
- Register temp1 = r3;
- Label less_4;
-
- if (CpuFeatures::IsSupported(NEON)) {
- CpuFeatureScope scope(&masm, NEON);
- Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
- Label size_less_than_8;
- __ pld(MemOperand(src, 0));
-
- __ cmp(chars, Operand(8));
- __ b(lt, &size_less_than_8);
- __ cmp(chars, Operand(32));
- __ b(lt, &less_32);
- if (CpuFeatures::dcache_line_size() == 32) {
- __ pld(MemOperand(src, 32));
- }
- __ cmp(chars, Operand(64));
- __ b(lt, &less_64);
- __ pld(MemOperand(src, 64));
- if (CpuFeatures::dcache_line_size() == 32) {
- __ pld(MemOperand(src, 96));
- }
- __ cmp(chars, Operand(128));
- __ b(lt, &less_128);
- __ pld(MemOperand(src, 128));
- if (CpuFeatures::dcache_line_size() == 32) {
- __ pld(MemOperand(src, 160));
- }
- __ pld(MemOperand(src, 192));
- if (CpuFeatures::dcache_line_size() == 32) {
- __ pld(MemOperand(src, 224));
- }
- __ cmp(chars, Operand(256));
- __ b(lt, &less_256);
- __ sub(chars, chars, Operand(256));
-
- __ bind(&loop);
- __ pld(MemOperand(src, 256));
- __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- if (CpuFeatures::dcache_line_size() == 32) {
- __ pld(MemOperand(src, 256));
- }
- __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
- __ sub(chars, chars, Operand(64), SetCC);
- __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
- __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
- __ b(ge, &loop);
- __ add(chars, chars, Operand(256));
-
- __ bind(&less_256);
- __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
- __ sub(chars, chars, Operand(128));
- __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
- __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
- __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
- __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
- __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
- __ cmp(chars, Operand(64));
- __ b(lt, &less_64);
-
- __ bind(&less_128);
- __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
- __ sub(chars, chars, Operand(64));
- __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
- __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
-
- __ bind(&less_64);
- __ cmp(chars, Operand(32));
- __ b(lt, &less_32);
- __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
- __ sub(chars, chars, Operand(32));
-
- __ bind(&less_32);
- __ cmp(chars, Operand(16));
- __ b(le, &_16_or_less);
- __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
- __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
- __ sub(chars, chars, Operand(16));
-
- __ bind(&_16_or_less);
- __ cmp(chars, Operand(8));
- __ b(le, &_8_or_less);
- __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
- __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
- __ sub(chars, chars, Operand(8));
-
- // Do a last copy which may overlap with the previous copy (up to 8 bytes).
- __ bind(&_8_or_less);
- __ rsb(chars, chars, Operand(8));
- __ sub(src, src, Operand(chars));
- __ sub(dest, dest, Operand(chars));
- __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
- __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
-
- __ Ret();
-
- __ bind(&size_less_than_8);
-
- __ bic(temp1, chars, Operand(0x3), SetCC);
- __ b(&less_4, eq);
- __ ldr(temp1, MemOperand(src, 4, PostIndex));
- __ str(temp1, MemOperand(dest, 4, PostIndex));
- } else {
- UseScratchRegisterScope temps(&masm);
- Register temp2 = temps.Acquire();
- Label loop;
-
- __ bic(temp2, chars, Operand(0x3), SetCC);
- __ b(&less_4, eq);
- __ add(temp2, dest, temp2);
-
- __ bind(&loop);
- __ ldr(temp1, MemOperand(src, 4, PostIndex));
- __ str(temp1, MemOperand(dest, 4, PostIndex));
- __ cmp(dest, temp2);
- __ b(&loop, ne);
- }
-
- __ bind(&less_4);
- __ mov(chars, Operand(chars, LSL, 31), SetCC);
- // bit0 => Z (ne), bit1 => C (cs)
- __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
- __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
- __ ldrb(temp1, MemOperand(src), ne);
- __ strb(temp1, MemOperand(dest), ne);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<MemCopyUint8Function>(buffer);
-#endif
-}
-
-
-// Convert 8 to 16. The number of character to copy must be at least 8.
-MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub) {
-#if defined(USE_SIMULATOR)
- return stub;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return stub;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- Register dest = r0;
- Register src = r1;
- Register chars = r2;
- if (CpuFeatures::IsSupported(NEON)) {
- CpuFeatureScope scope(&masm, NEON);
- Register temp = r3;
- Label loop;
-
- __ bic(temp, chars, Operand(0x7));
- __ sub(chars, chars, Operand(temp));
- __ add(temp, dest, Operand(temp, LSL, 1));
-
- __ bind(&loop);
- __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
- __ vmovl(NeonU8, q0, d0);
- __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
- __ cmp(dest, temp);
- __ b(&loop, ne);
-
- // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
- __ rsb(chars, chars, Operand(8));
- __ sub(src, src, Operand(chars));
- __ sub(dest, dest, Operand(chars, LSL, 1));
- __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
- __ vmovl(NeonU8, q0, d0);
- __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
- __ Ret();
- } else {
- UseScratchRegisterScope temps(&masm);
-
- Register temp1 = r3;
- Register temp2 = temps.Acquire();
- Register temp3 = lr;
- Register temp4 = r4;
- Label loop;
- Label not_two;
-
- __ Push(lr, r4);
- __ bic(temp2, chars, Operand(0x3));
- __ add(temp2, dest, Operand(temp2, LSL, 1));
-
- __ bind(&loop);
- __ ldr(temp1, MemOperand(src, 4, PostIndex));
- __ uxtb16(temp3, temp1);
- __ uxtb16(temp4, temp1, 8);
- __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
- __ str(temp1, MemOperand(dest));
- __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
- __ str(temp1, MemOperand(dest, 4));
- __ add(dest, dest, Operand(8));
- __ cmp(dest, temp2);
- __ b(&loop, ne);
-
- __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
- __ b(&not_two, cc);
- __ ldrh(temp1, MemOperand(src, 2, PostIndex));
- __ uxtb(temp3, temp1, 8);
- __ mov(temp3, Operand(temp3, LSL, 16));
- __ uxtab(temp3, temp3, temp1);
- __ str(temp3, MemOperand(dest, 4, PostIndex));
- __ bind(&not_two);
- __ ldrb(temp1, MemOperand(src), ne);
- __ strh(temp1, MemOperand(dest), ne);
- __ Pop(pc, r4);
- }
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
-#endif
-}
-#endif
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- __ MovFromFloatParameter(d0);
- __ vsqrt(d0, d0);
- __ MovToFloatResult(d0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index b012340418..fa9791a0e0 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -36,21 +36,23 @@ inline int DecodeConstantPoolLength(int instr) {
}
// Number of registers in normal ARM mode.
-const int kNumRegisters = 16;
+constexpr int kNumRegisters = 16;
+constexpr int kRegSizeInBitsLog2 = 5;
// VFP support.
-const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 32;
-const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+constexpr int kNumVFPSingleRegisters = 32;
+constexpr int kNumVFPDoubleRegisters = 32;
+constexpr int kNumVFPRegisters =
+ kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
-const int kPCRegister = 15;
-const int kNoRegister = -1;
+constexpr int kPCRegister = 15;
+constexpr int kNoRegister = -1;
// Used in embedded constant pool builder - max reach in bits for
// various load instructions (unsigned)
-const int kLdrMaxReachBits = 12;
-const int kVldrMaxReachBits = 10;
+constexpr int kLdrMaxReachBits = 12;
+constexpr int kVldrMaxReachBits = 10;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values. Loads allow a uint12
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index f5d2ab19d1..2eb1aee63d 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -14,8 +14,7 @@
#if V8_TARGET_ARCH_ARM
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 032f610edc..edfb9c6096 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -4,6 +4,7 @@
#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -11,14 +12,14 @@
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 8;
-
-#define __ masm()->
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -32,9 +33,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable VFP registers before messing with them.
{
// We use a run-time check for VFP32DREGS.
- CpuFeatureScope scope(masm(), VFP32DREGS,
+ CpuFeatureScope scope(masm, VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
@@ -56,25 +57,24 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
{
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ str(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
+ // Get the bailout id is passed as r10 by the caller.
+ __ mov(r2, r10);
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
__ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@@ -86,15 +86,15 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ mov(r1, Operand(static_cast<int>(deopt_kind())));
+ __ mov(r1, Operand(static_cast<int>(deopt_kind)));
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -134,8 +134,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ str(r2, MemOperand(r1, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ // Remove the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@@ -163,7 +163,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
@@ -221,12 +221,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
- __ InitializeRootRegister();
-
// Remove sp, lr and pc.
__ Drop(3);
{
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
@@ -235,59 +233,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ stop("Unreachable.");
}
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
-
- // We need to be able to generate immediates up to kMaxNumberOfEntries. On
- // ARMv7, we can use movw (with a maximum immediate of 0xFFFF). On ARMv6, we
- // need two instructions.
- STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xFFFF);
- UseScratchRegisterScope temps(masm());
- Register scratch = temps.Acquire();
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(masm(), ARMv7);
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ movw(scratch, i);
- __ b(&done);
- DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
- }
- __ bind(&done);
- } else {
- // We want to keep table_entry_size_ == 8 (since this is the common case),
- // but we need two instructions to load most immediates over 0xFF. To handle
- // this, we set the low byte in the main table, and then set the high byte
- // in a separate table if necessary.
- Label high_fixes[256];
- int high_fix_max = (count() - 1) >> 8;
- DCHECK_GT(arraysize(high_fixes), static_cast<size_t>(high_fix_max));
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ mov(scratch, Operand(i & 0xFF)); // Set the low byte.
- __ b(&high_fixes[i >> 8]); // Jump to the secondary table.
- DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
- }
- // Generate the secondary table, to set the high byte.
- for (int high = 1; high <= high_fix_max; high++) {
- __ bind(&high_fixes[high]);
- __ orr(scratch, scratch, Operand(high << 8));
- // If this isn't the last entry, emit a branch to the end of the table.
- // The last entry can just fall through.
- if (high < high_fix_max) __ b(&high_fixes[0]);
- }
- // Bind high_fixes[0] last, for indices like 0x00**. This case requires no
- // fix-up, so for (common) small tables we can jump here, then just fall
- // through with no additional branch.
- __ bind(&high_fixes[0]);
- }
- __ push(scratch);
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0c6ef132f8..3f82f43e84 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -30,18 +30,15 @@
#if V8_TARGET_ARCH_ARM
+#include "src/arm/assembler-arm.h"
#include "src/arm/constants-arm.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
-#include "src/macro-assembler.h"
-
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
@@ -685,8 +682,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return -1;
}
}
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p",
- static_cast<void*>(addr));
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%08" PRIxPTR,
+ reinterpret_cast<uintptr_t>(addr));
return 1;
}
case 'S':
@@ -2677,7 +2675,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
+ return RegisterName(i::Register::from_code(reg));
}
diff --git a/deps/v8/src/arm/frame-constants-arm.cc b/deps/v8/src/arm/frame-constants-arm.cc
index bb4cb5dd76..f1cb8211b8 100644
--- a/deps/v8/src/arm/frame-constants-arm.cc
+++ b/deps/v8/src/arm/frame-constants-arm.cc
@@ -4,15 +4,12 @@
#if V8_TARGET_ARCH_ARM
-#include "src/assembler.h"
+#include "src/arm/frame-constants-arm.h"
+
+#include "src/assembler-inl.h"
#include "src/frame-constants.h"
#include "src/macro-assembler.h"
-#include "src/arm/assembler-arm-inl.h"
-#include "src/arm/assembler-arm.h"
-#include "src/arm/frame-constants-arm.h"
-#include "src/arm/macro-assembler-arm.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/arm/frame-constants-arm.h
index 73e171009d..af6f045667 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/arm/frame-constants-arm.h
@@ -13,8 +13,14 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+
+ // Stack offsets for arguments passed to JSEntry.
+ static constexpr int kArgcOffset = +0 * kSystemPointerSize;
+ static constexpr int kArgvOffset = +1 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index f3be7a7c4a..887a183182 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -70,12 +72,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@@ -207,10 +203,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- r4, // call_data
- r2, // holder
- r1, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ r1, // kApiFunctionAddress
+ r2, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -262,6 +257,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0, r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index cdf9dad1d9..acf96b31c2 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -13,39 +13,28 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/double.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
#include "src/arm/macro-assembler-arm.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -129,8 +118,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
// The ldr call below could end up clobbering ip when the offset does not fit
// into 12 bits (and thus needs to be loaded from the constant pool). In that
@@ -185,36 +173,45 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (FLAG_embedded_builtins) {
- int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
- if (target_is_isolate_independent_builtin &&
- options().use_pc_relative_calls_and_jumps) {
- int32_t code_target_index = AddCodeTarget(code);
- b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
- return;
- } else if (root_array_available_ && options().isolate_independent_code) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- IndirectLoadConstant(scratch, code);
- add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(scratch, cond);
- return;
- } else if (target_is_isolate_independent_builtin &&
- options().inline_offheap_trampolines) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(ip, cond);
- return;
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+ DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (options().use_pc_relative_calls_and_jumps &&
+ target_is_isolate_independent_builtin) {
+ int32_t code_target_index = AddCodeTarget(code);
+ b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ // This branch is taken only for specific cctests, where we force isolate
+ // creation at runtime. At this point, Code space isn't restricted to a
+ // size s.t. pc-relative calls may be used.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ ldr(scratch, MemOperand(kRootRegister, offset));
+ Jump(scratch, cond);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(ip, cond);
+ return;
}
+
// 'code' is always generated ARM code, never THUMB code
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
@@ -264,40 +261,139 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (FLAG_embedded_builtins) {
- int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
- if (target_is_isolate_independent_builtin &&
- options().use_pc_relative_calls_and_jumps) {
- int32_t code_target_index = AddCodeTarget(code);
- bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
- return;
- } else if (root_array_available_ && options().isolate_independent_code) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- IndirectLoadConstant(ip, code);
- add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(ip, cond);
- return;
- } else if (target_is_isolate_independent_builtin &&
- options().inline_offheap_trampolines) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(ip, cond);
- return;
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+ DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ // This branch is taken only for specific cctests, where we force isolate
+ // creation at runtime. At this point, Code space isn't restricted to a
+ // size s.t. pc-relative calls may be used.
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ ldr(ip, MemOperand(kRootRegister, offset));
+ Call(ip, cond);
+ return;
+ } else if (target_is_isolate_independent_builtin &&
+ options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(ip, cond);
+ return;
}
+
// 'code' is always generated ARM code, never THUMB code
Call(code.address(), rmode, cond, mode);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 4);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ mov(builtin_pointer,
+ Operand(builtin_pointer, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
+ add(builtin_pointer, builtin_pointer,
+ Operand(IsolateData::builtin_entry_table_offset()));
+ ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ cmp(scratch, Operand(Builtins::kNoBuiltinId));
+ b(ne, &if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ add(destination, destination, kRootRegister);
+ ldr(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ // Compute the return address in lr to return to after the jump below. The pc
+ // is already at '+ 8' from the current instruction; but return is after three
+ // instructions, so add another 4 to pc to get the return address.
+ Assembler::BlockConstPoolScope block_const_pool(this);
+ add(lr, pc, Operand(4));
+ str(lr, MemOperand(sp));
+ Call(target);
+}
+
void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
void TurboAssembler::Drop(int count, Condition cond) {
@@ -324,14 +420,14 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(scratch);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(smi));
push(scratch);
}
-void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
+void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
if (FLAG_embedded_builtins) {
@@ -529,7 +625,8 @@ void MacroAssembler::Store(Register src,
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
- ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
+ ldr(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
}
@@ -600,25 +697,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object);
Push(address);
@@ -628,7 +743,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -1529,12 +1648,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- Call(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- Jump(code);
+ JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1597,10 +1715,10 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- Push(Smi::kZero); // Padding.
+ Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
- mov(r6, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
+ Move(r6,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
ldr(r5, MemOperand(r6));
push(r5);
// Set this new handler as the current one.
@@ -1613,8 +1731,8 @@ void MacroAssembler::PopStackHandler() {
Register scratch = temps.Acquire();
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r1);
- mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kHandlerAddress, isolate())));
+ Move(scratch,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
str(r1, MemOperand(scratch));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1647,49 +1765,6 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
cmp(obj, scratch);
}
-void MacroAssembler::CallStub(CodeStub* stub,
- Condition cond) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS,
- false);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
-
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
-
-#ifdef DEBUG
- Label start;
- bind(&start);
-#endif
-
- // Call sequence on V7 or later may be :
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // Or for pre-V7 or values that may be back-patched
- // to avoid ICache flushes:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
-
- mov(ip, Operand::EmbeddedCode(stub));
- blx(ip, al);
-
- DCHECK_EQ(kCallStubSize, SizeOfCodeGeneratedSince(&start));
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
@@ -1761,8 +1836,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r0, Operand(f->nargs));
Move(r1, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r0, r1));
- add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1818,7 +1892,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- cmp(in, Operand(kClearedWeakHeapObject));
+ cmp(in, Operand(kClearedWeakHeapObjectLower32));
b(eq, target_if_cleared);
and_(out, in, Operand(~kWeakHeapObjectMask));
@@ -1852,6 +1926,10 @@ void TurboAssembler::Assert(Condition cond, AbortReason reason) {
Check(cond, reason);
}
+void TurboAssembler::AssertUnreachable(AbortReason reason) {
+ if (emit_debug_code()) Abort(reason);
+}
+
void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label L;
b(cond, &L);
@@ -1901,6 +1979,10 @@ void TurboAssembler::Abort(AbortReason reason) {
// will not return here
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+}
+
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, NativeContextMemOperand());
ldr(dst, ContextMemOperand(dst, index));
@@ -1908,10 +1990,8 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
void TurboAssembler::InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- add(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ mov(kRootRegister, Operand(isolate_root));
}
void MacroAssembler::SmiTag(Register reg, SBit s) {
@@ -2034,6 +2114,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
b(eq, &do_check);
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ cmp(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+ b(eq, &do_check);
+
// Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -2335,10 +2419,37 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
#endif
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch = r4;
+ Push(scratch);
+
+ Move(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ str(pc, MemOperand(scratch));
+ Move(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ str(fp, MemOperand(scratch));
+ Pop(scratch);
+ }
+
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
Call(function);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ Push(scratch1);
+ Push(scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ mov(scratch2, Operand::Zero());
+ str(scratch2, MemOperand(scratch1));
+ Pop(scratch2);
+ Pop(scratch1);
+ }
+
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (ActivationFrameAlignment() > kPointerSize) {
@@ -2390,6 +2501,26 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deopt id in r10 (we don't need the roots array from now on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // On ARMv7, we can use movw (with a maximum immediate of 0xFFFF)
+ movw(r10, deopt_id);
+ } else {
+ // On ARMv6, we might need two instructions.
+ mov(r10, Operand(deopt_id & 0xFF)); // Set the low byte.
+ if (deopt_id >= 0xFF) {
+ orr(r10, r10, Operand(deopt_id & 0xFF00)); // Set the high byte.
+ }
+ }
+
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+ CheckConstPool(false, false);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index ef75c3fe4c..29fa10cfea 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -2,43 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/arm/assembler-arm.h"
-#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/contexts.h"
#include "src/globals.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = r0;
-constexpr Register kReturnRegister1 = r1;
-constexpr Register kReturnRegister2 = r2;
-constexpr Register kJSFunctionRegister = r1;
-constexpr Register kContextRegister = r7;
-constexpr Register kAllocateSizeRegister = r1;
-constexpr Register kSpeculationPoisonRegister = r9;
-constexpr Register kInterpreterAccumulatorRegister = r0;
-constexpr Register kInterpreterBytecodeOffsetRegister = r5;
-constexpr Register kInterpreterBytecodeArrayRegister = r6;
-constexpr Register kInterpreterDispatchTableRegister = r8;
-
-constexpr Register kJavaScriptCallArgCountRegister = r0;
-constexpr Register kJavaScriptCallCodeStartRegister = r2;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = r3;
-constexpr Register kJavaScriptCallExtraArg1Register = r2;
-
-constexpr Register kOffHeapTrampolineRegister = ip;
-constexpr Register kRuntimeCallFunctionRegister = r1;
-constexpr Register kRuntimeCallArgCountRegister = r0;
-constexpr Register kRuntimeCallArgvRegister = r2;
-constexpr Register kWasmInstanceRegister = r3;
-
// ----------------------------------------------------------------------------
// Static helper functions
@@ -47,11 +25,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-
-// Give alias names to registers
-constexpr Register cp = r7; // JavaScript context pointer.
-constexpr Register kRootRegister = r10; // Roots array pointer.
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -71,14 +44,9 @@ enum TargetAddressStorageMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -101,7 +69,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src) { push(src); }
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -277,17 +245,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatResult(DwVfpRegister dst);
// Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
+ // Use --debug-code to enable.
void Assert(Condition cond, AbortReason reason);
+ // Like Assert(), but without condition.
+ // Use --debug-code to enable.
+ void AssertUnreachable(AbortReason reason);
+
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason);
// Print a message to stdout and abort execution.
void Abort(AbortReason msg);
- inline bool AllowThisStubCall(CodeStub* stub);
-
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
@@ -306,9 +276,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
- static constexpr int kCallStubSize = 2 * kInstrSize;
- void CallStubDelayed(CodeStub* stub);
-
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
@@ -324,14 +291,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
- CheckConstPool(false, false);
- }
+ void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -379,6 +352,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
@@ -444,7 +420,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
SwVfpRegister src_lane, int lane);
// Register move. May do nothing if the registers are identical.
- void Move(Register dst, Smi* smi);
+ void Move(Register dst, Smi smi);
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
@@ -499,8 +475,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input,
Label* done);
@@ -563,21 +537,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
@@ -655,9 +627,7 @@ class MacroAssembler : public TurboAssembler {
bool argument_count_is_length = false);
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
- }
+ void LoadGlobalProxy(Register dst);
void LoadNativeContextSlot(int index, Register dst);
@@ -748,13 +718,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub.
- void CallStub(CodeStub* stub,
- Condition cond = al);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f,
int num_arguments,
@@ -862,6 +825,8 @@ class MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/arm/register-arm.h b/deps/v8/src/arm/register-arm.h
new file mode 100644
index 0000000000..4767e50661
--- /dev/null
+++ b/deps/v8/src/arm/register-arm.h
@@ -0,0 +1,369 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM_REGISTER_ARM_H_
+#define V8_ARM_REGISTER_ARM_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9)
+
+#define FLOAT_REGISTERS(V) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
+ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+ V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+ V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
+#define LOW_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define NON_LOW_DOUBLE_REGISTERS(V) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define DOUBLE_REGISTERS(V) \
+ LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
+
+#define SIMD128_REGISTERS(V) \
+ V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
+ V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
+// clang-format on
+
+// The ARM ABI does not specify the usage of register r9, which may be reserved
+// as the static base or thread register on some platforms, in which case we
+// leave it alone. Adjust the value of kR9Available accordingly:
+const int kR9Available = 1; // 1 if available to us, 0 if reserved
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 16;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 0 | // r0 a1
+ 1 << 1 | // r1 a2
+ 1 << 2 | // r2 a3
+ 1 << 3; // r3 a4
+
+const int kNumJSCallerSaved = 4;
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 4 | // r4 v1
+ 1 << 5 | // r5 v2
+ 1 << 6 | // r6 v3
+ 1 << 7 | // r7 v4 (cp in JavaScript code)
+ 1 << 8 | // r8 v5 (pp in JavaScript code)
+ kR9Available << 9 | // r9 v6
+ 1 << 10 | // r10 v7
+ 1 << 11; // r11 v8 (fp in JavaScript code)
+
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved = 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 9; // r9
+
+const int kNumCalleeSaved = 7 + kR9Available;
+
+// Double registers d8 to d15 are callee-saved.
+const int kNumDoubleCalleeSaved = 8;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ friend class RegisterBase;
+
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+// r7: context register
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = false;
+constexpr bool kSimdMaskRegisters = false;
+
+enum SwVfpRegisterCode {
+#define REGISTER_CODE(R) kSwVfpCode_##R,
+ FLOAT_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kSwVfpAfterLast
+};
+
+// Representation of a list of non-overlapping VFP registers. This list
+// represents the data layout of VFP registers as a bitfield:
+// S registers cover 1 bit
+// D registers cover 2 bits
+// Q registers cover 4 bits
+//
+// This way, we make sure no registers in the list ever overlap. However, a list
+// may represent multiple different sets of registers,
+// e.g. [d0 s2 s3] <=> [s0 s1 d1].
+typedef uint64_t VfpRegList;
+
+// Single word VFP register.
+class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
+ public:
+ static constexpr int kSizeInBytes = 4;
+
+ static void split_code(int reg_code, int* vm, int* m) {
+ DCHECK(from_code(reg_code).is_valid());
+ *m = reg_code & 0x1;
+ *vm = reg_code >> 1;
+ }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // Each bit in the list corresponds to a S register.
+ return uint64_t{0x1} << code();
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
+static_assert(sizeof(SwVfpRegister) == sizeof(int),
+ "SwVfpRegister can efficiently be passed by value");
+
+typedef SwVfpRegister FloatRegister;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Double word VFP register.
+class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
+ public:
+ static constexpr int kSizeInBytes = 8;
+
+ inline static int NumRegisters();
+
+ static void split_code(int reg_code, int* vm, int* m) {
+ DCHECK(from_code(reg_code).is_valid());
+ *m = (reg_code & 0x10) >> 4;
+ *vm = reg_code & 0x0F;
+ }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
+
+ private:
+ friend class RegisterBase;
+ friend class LowDwVfpRegister;
+ explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
+static_assert(sizeof(DwVfpRegister) == sizeof(int),
+ "DwVfpRegister can efficiently be passed by value");
+
+typedef DwVfpRegister DoubleRegister;
+
+// Double word VFP register d0-15.
+class LowDwVfpRegister
+ : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
+ public:
+ constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
+
+ SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
+ SwVfpRegister high() const {
+ return SwVfpRegister::from_code(code() * 2 + 1);
+ }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
+};
+
+enum Simd128RegisterCode {
+#define REGISTER_CODE(R) kSimd128Code_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kSimd128AfterLast
+};
+
+// Quad word NEON register.
+class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
+ public:
+ static void split_code(int reg_code, int* vm, int* m) {
+ DCHECK(from_code(reg_code).is_valid());
+ int encoded_code = reg_code << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
+ }
+ void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
+ DwVfpRegister high() const {
+ return DwVfpRegister::from_code(code() * 2 + 1);
+ }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A Q register overlaps four S registers.
+ return uint64_t{0xf} << (code() * 4);
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
+};
+
+typedef QwNeonRegister QuadRegister;
+
+typedef QwNeonRegister Simd128Register;
+
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
+};
+
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
+
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+#define DECLARE_FLOAT_REGISTER(R) \
+ constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
+FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
+#undef DECLARE_FLOAT_REGISTER
+
+#define DECLARE_LOW_DOUBLE_REGISTER(R) \
+ constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
+LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
+#undef DECLARE_LOW_DOUBLE_REGISTER
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
+NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
+
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+// Aliases for double registers.
+constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
+constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
+constexpr LowDwVfpRegister kDoubleRegZero = d13;
+
+constexpr CRegister no_creg = CRegister::no_reg();
+
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
+DEFINE_REGISTER_NAMES(SwVfpRegister, FLOAT_REGISTERS);
+DEFINE_REGISTER_NAMES(DwVfpRegister, DOUBLE_REGISTERS);
+DEFINE_REGISTER_NAMES(LowDwVfpRegister, LOW_DOUBLE_REGISTERS);
+DEFINE_REGISTER_NAMES(QwNeonRegister, SIMD128_REGISTERS);
+DEFINE_REGISTER_NAMES(CRegister, C_REGISTERS);
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = r0;
+constexpr Register kReturnRegister1 = r1;
+constexpr Register kReturnRegister2 = r2;
+constexpr Register kJSFunctionRegister = r1;
+constexpr Register kContextRegister = r7;
+constexpr Register kAllocateSizeRegister = r1;
+constexpr Register kSpeculationPoisonRegister = r9;
+constexpr Register kInterpreterAccumulatorRegister = r0;
+constexpr Register kInterpreterBytecodeOffsetRegister = r5;
+constexpr Register kInterpreterBytecodeArrayRegister = r6;
+constexpr Register kInterpreterDispatchTableRegister = r8;
+
+constexpr Register kJavaScriptCallArgCountRegister = r0;
+constexpr Register kJavaScriptCallCodeStartRegister = r2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r3;
+constexpr Register kJavaScriptCallExtraArg1Register = r2;
+
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r1;
+constexpr Register kRuntimeCallArgCountRegister = r0;
+constexpr Register kRuntimeCallArgvRegister = r2;
+constexpr Register kWasmInstanceRegister = r3;
+constexpr Register kWasmCompileLazyFuncIndexRegister = r4;
+
+// Give alias names to registers
+constexpr Register cp = r7; // JavaScript context pointer.
+constexpr Register kRootRegister = r10; // Roots array pointer.
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ARM_REGISTER_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e9d74104d3..0ee54c8f5b 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -2,31 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm/simulator-arm.h"
+
+#if defined(USE_SIMULATOR)
+
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_ARM
-
#include "src/arm/constants-arm.h"
-#include "src/arm/simulator-arm.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
+#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
-#if defined(USE_SIMULATOR)
-
// Only build the simulator if not compiling for real ARM hardware.
namespace v8 {
namespace internal {
-// static
-base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -243,10 +242,8 @@ void ArmDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF(
- "%3s: 0x%08x %10d",
- RegisterConfiguration::Default()->GetGeneralRegisterName(i),
- value, value);
+ PrintF("%3s: 0x%08x %10d", RegisterName(Register::from_code(i)),
+ value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
@@ -287,7 +284,7 @@ void ArmDebugger::Debug() {
int32_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -331,14 +328,12 @@ void ArmDebugger::Debug() {
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -695,7 +690,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
- global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
+ GlobalMonitor::Get()->RemoveProcessor(&global_monitor_processor_);
free(stack_);
}
@@ -933,17 +928,16 @@ void Simulator::TrashCallerSaveRegisters() {
int Simulator::ReadW(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
int Simulator::ReadExW(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
@@ -951,18 +945,17 @@ int Simulator::ReadExW(int32_t addr) {
void Simulator::WriteW(int32_t addr, int value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
}
int Simulator::WriteExW(int32_t addr, int value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
@@ -975,7 +968,7 @@ int Simulator::WriteExW(int32_t addr, int value) {
uint16_t Simulator::ReadHU(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
@@ -984,17 +977,16 @@ uint16_t Simulator::ReadHU(int32_t addr) {
int16_t Simulator::ReadH(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
uint16_t Simulator::ReadExHU(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
@@ -1002,10 +994,9 @@ uint16_t Simulator::ReadExHU(int32_t addr) {
void Simulator::WriteH(int32_t addr, uint16_t value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
}
@@ -1013,18 +1004,17 @@ void Simulator::WriteH(int32_t addr, uint16_t value) {
void Simulator::WriteH(int32_t addr, int16_t value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
}
int Simulator::WriteExH(int32_t addr, uint16_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@@ -1035,50 +1025,47 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
}
uint8_t Simulator::ReadBU(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
}
int8_t Simulator::ReadB(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return *ptr;
}
uint8_t Simulator::ReadExBU(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
}
void Simulator::WriteB(int32_t addr, uint8_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
void Simulator::WriteB(int32_t addr, int8_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}
int Simulator::WriteExB(int32_t addr, uint8_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
@@ -1091,17 +1078,16 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
int32_t* Simulator::ReadExDW(int32_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
@@ -1109,19 +1095,18 @@ int32_t* Simulator::ReadExDW(int32_t addr) {
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
}
int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr++ = value1;
@@ -3212,15 +3197,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
- lazily_initialize_fast_sqrt();
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm).get_scalar();
- double dd_value = fast_sqrt(dm_value);
+ double dd_value = std::sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m).get_scalar();
- float sd_value = fast_sqrt(sm_value);
+ float sd_value = std::sqrt(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@@ -5282,10 +5266,9 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
src[i] = bit_cast<uint32_t>(result);
}
} else {
- lazily_initialize_fast_sqrt();
for (int i = 0; i < 4; i++) {
float radicand = bit_cast<float>(src[i]);
- float result = 1.0f / fast_sqrt(radicand);
+ float result = 1.0f / std::sqrt(radicand);
result = canonicalizeNaN(result);
src[i] = bit_cast<uint32_t>(result);
}
@@ -6019,8 +6002,6 @@ bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
return false;
}
-Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
-
void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
Processor* processor) {
processor->NotifyLoadExcl_Locked(addr);
@@ -6071,7 +6052,7 @@ void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
}
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
- base::LockGuard<base::Mutex> lock_guard(&mutex);
+ base::MutexGuard lock_guard(&mutex);
if (!IsProcessorInLinkedList_Locked(processor)) {
return;
}
@@ -6092,5 +6073,3 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
} // namespace v8
#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 69e5cdbe3d..273281416a 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -12,17 +12,18 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
-#include "src/allocation.h"
-#include "src/base/lazy-instance.h"
-#include "src/base/platform/mutex.h"
-#include "src/boxed-float.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
#include "src/arm/constants-arm.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+#include "src/boxed-float.h"
#include "src/simulator-base.h"
namespace v8 {
@@ -446,8 +447,6 @@ class Simulator : public SimulatorBase {
class GlobalMonitor {
public:
- GlobalMonitor();
-
class Processor {
public:
Processor();
@@ -483,16 +482,21 @@ class Simulator : public SimulatorBase {
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
+ static GlobalMonitor* Get();
+
private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
- Processor* head_;
+ Processor* head_ = nullptr;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
- static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};
} // namespace internal
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 5a163b06fd..253fb984f4 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -89,14 +90,14 @@ inline void CPURegList::Remove(const CPURegister& other1,
inline void CPURegList::Combine(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
- list_ |= (1UL << code);
+ list_ |= (1ULL << code);
}
inline void CPURegList::Remove(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
- list_ &= ~(1UL << code);
+ list_ &= ~(1ULL << code);
}
@@ -212,13 +213,12 @@ struct ImmediateInitializer {
}
};
-
-template<>
-struct ImmediateInitializer<Smi*> {
+template <>
+struct ImmediateInitializer<Smi> {
static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE; }
- static inline int64_t immediate_for(Smi* t) {;
- return reinterpret_cast<int64_t>(t);
+ static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
+ static inline int64_t immediate_for(Smi t) {
+ return static_cast<int64_t>(t.ptr());
}
};
@@ -340,8 +340,6 @@ Operand Operand::ToExtendedRegister() const {
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
- (heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
- immediate_.rmode() == RelocInfo::CODE_TARGET) ||
(heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT));
return immediate_;
@@ -547,7 +545,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- return Handle<Code>(reinterpret_cast<Code**>(
+ return Handle<Code>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
@@ -590,7 +588,7 @@ int Assembler::deserialization_special_target_size(Address location) {
}
void Assembler::deserialization_set_special_target_at(Address location,
- Code* code,
+ Code code,
Address target) {
Instruction* instr = reinterpret_cast<Instruction*>(location);
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
@@ -658,9 +656,7 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
// Read the address of the word containing the target_address in an
// instruction stream.
@@ -679,7 +675,7 @@ Address RelocInfo::target_address_address() {
return constant_pool_entry_address();
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return reinterpret_cast<Address>(pc_);
+ return pc_;
}
}
@@ -689,15 +685,15 @@ Address RelocInfo::constant_pool_entry_address() {
return Assembler::target_pointer_address_at(pc_);
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
if (rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
DCHECK(IsCodeTarget(rmode_));
@@ -705,19 +701,17 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
@@ -1131,7 +1125,7 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
inline void Assembler::CheckBufferSpace() {
- DCHECK(pc_ < (buffer_ + buffer_size_));
+ DCHECK_LT(pc_, buffer_start_ + buffer_->size());
if (buffer_space() < kGap) {
GrowBuffer();
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index eb581b472b..be0a4a9519 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -33,7 +33,6 @@
#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
#include "src/string-constants.h"
@@ -68,7 +67,7 @@ CPURegister CPURegList::PopLowestIndex() {
return NoCPUReg;
}
int index = CountTrailingZeros(list_, kRegListSizeInBits);
- DCHECK((1 << index) & list_);
+ DCHECK((1LL << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
@@ -81,7 +80,7 @@ CPURegister CPURegList::PopHighestIndex() {
}
int index = CountLeadingZeros(list_, kRegListSizeInBits);
index = kRegListSizeInBits - 1 - index;
- DCHECK((1 << index) & list_);
+ DCHECK((1LL << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
@@ -110,8 +109,14 @@ CPURegList CPURegList::GetCalleeSavedV(int size) {
CPURegList CPURegList::GetCallerSaved(int size) {
+#if defined(V8_OS_WIN)
+ // x18 is reserved as platform register on Windows arm64.
+ // Registers x0-x17 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
+#else
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+#endif
list.Combine(lr);
return list;
}
@@ -144,9 +149,13 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
list.Remove(16);
list.Remove(17);
+// Don't add x18 to safepoint list on Windows arm64 because it is reserved
+// as platform register.
+#if !defined(V8_OS_WIN)
// Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
// is a caller-saved register according to the procedure call standard.
list.Combine(18);
+#endif
// Add the link register (x30) to the safepoint list.
list.Combine(30);
@@ -181,29 +190,6 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- Instruction* movz_instr = reinterpret_cast<Instruction*>(pc_)->preceding();
- DCHECK(movz_instr->IsMovz());
- uint64_t imm = static_cast<uint64_t>(movz_instr->ImmMoveWide())
- << (16 * movz_instr->ShiftMoveWide());
- DCHECK_LE(imm, INT_MAX);
-
- return static_cast<int>(imm);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
@@ -334,8 +320,7 @@ bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
// Constant Pool.
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
- DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
- mode != RelocInfo::VENEER_POOL &&
+ DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
@@ -506,11 +491,11 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
}
// Step two: check that the offsets are contiguous and that the range
// is OK for ldp/stp.
- if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
+ if ((operandB.offset() == operandA.offset() + (1LL << access_size_log2)) &&
is_int7(operandA.offset() >> access_size_log2)) {
return kPairAB;
}
- if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
+ if ((operandA.offset() == operandB.offset() + (1LL << access_size_log2)) &&
is_int7(operandB.offset() >> access_size_log2)) {
return kPairBA;
}
@@ -548,9 +533,9 @@ void ConstPool::EmitEntries() {
// Assembler
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
constpool_(this),
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
@@ -558,7 +543,6 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
Reset();
}
-
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
DCHECK_EQ(const_pool_blocked_nesting_, 0);
@@ -568,15 +552,15 @@ Assembler::~Assembler() {
void Assembler::Reset() {
#ifdef DEBUG
- DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
- memset(buffer_, 0, pc_ - buffer_);
+ memset(buffer_start_, 0, pc_ - buffer_start_);
#endif
- pc_ = buffer_;
+ pc_ = buffer_start_;
ReserveCodeTargetSpace(64);
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constpool_.Clear();
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
@@ -586,7 +570,7 @@ void Assembler::Reset() {
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapObject> object =
@@ -594,15 +578,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
set_target_address_at(pc, 0 /* unused */, object.address());
break;
}
- case HeapObjectRequest::kCodeStub: {
- request.code_stub()->set_isolate(isolate);
- Instruction* instr = reinterpret_cast<Instruction*>(pc);
- DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
- UpdateCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2,
- request.code_stub()->GetCode());
- break;
- }
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -619,20 +594,22 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
+ int code_comments_size = WriteCodeComments();
+
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
if (desc) {
- desc->buffer = reinterpret_cast<byte*>(buffer_);
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size =
- static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
- reloc_info_writer.pos());
+ desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
+ reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
}
@@ -644,6 +621,10 @@ void Assembler::Align(int m) {
}
}
+void Assembler::CodeTargetAlign() {
+ // Preferred alignment of jump targets on some ARM chips.
+ Align(8);
+}
void Assembler::CheckLabelLinkChain(Label const * label) {
#ifdef DEBUG
@@ -696,7 +677,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
label->Unuse();
} else {
label->link_to(
- static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
+ static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_start_));
}
} else if (branch == next_link) {
@@ -1116,6 +1097,12 @@ void Assembler::adr(const Register& rd, Label* label) {
}
+void Assembler::nop(NopMarkerTypes n) {
+ DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+}
+
+
void Assembler::add(const Register& rd,
const Register& rn,
const Operand& operand) {
@@ -1719,13 +1706,6 @@ Operand Operand::EmbeddedNumber(double number) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.heap_object_request_.emplace(stub);
- DCHECK(result.IsHeapObjectRequest());
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.heap_object_request_.emplace(str);
@@ -3925,7 +3905,7 @@ void Assembler::dcptr(Label* label) {
// In this case, label->pos() returns the offset of the label from the
// start of the buffer.
internal_reference_positions_.push_back(pc_offset());
- dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
+ dc64(reinterpret_cast<uintptr_t>(buffer_start_ + label->pos()));
} else {
int32_t offset;
if (label->is_linked()) {
@@ -4002,16 +3982,16 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
// Calculate a new immediate and shift combination to encode the immediate
// argument.
shift = 0;
- if ((imm & ~0xFFFFUL) == 0) {
+ if ((imm & ~0xFFFFULL) == 0) {
// Nothing to do.
- } else if ((imm & ~(0xFFFFUL << 16)) == 0) {
+ } else if ((imm & ~(0xFFFFULL << 16)) == 0) {
imm >>= 16;
shift = 1;
- } else if ((imm & ~(0xFFFFUL << 32)) == 0) {
+ } else if ((imm & ~(0xFFFFULL << 32)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 32;
shift = 2;
- } else if ((imm & ~(0xFFFFUL << 48)) == 0) {
+ } else if ((imm & ~(0xFFFFULL << 48)) == 0) {
DCHECK(rd.Is64Bits());
imm >>= 48;
shift = 3;
@@ -4710,45 +4690,33 @@ bool Assembler::IsImmFP64(double imm) {
void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2 * buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1 * MB;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
- byte* buffer = reinterpret_cast<byte*>(buffer_);
-
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer + buffer_size_);
- memmove(desc.buffer, buffer, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ memmove(new_start, buffer_start_, pc_offset());
+ memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ = pc_ + pc_delta;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
+ pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -4758,7 +4726,7 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
+ intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
*p += pc_delta;
}
@@ -4767,15 +4735,14 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
ConstantPoolMode constant_pool_mode) {
- if ((rmode == RelocInfo::COMMENT) ||
- (rmode == RelocInfo::INTERNAL_REFERENCE) ||
+ if ((rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsComment(rmode) || RelocInfo::IsDeoptReason(rmode) ||
- RelocInfo::IsDeoptId(rmode) || RelocInfo::IsDeoptPosition(rmode) ||
+ DCHECK(RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
@@ -4792,7 +4759,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
@@ -4894,8 +4861,8 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
- RelocInfo rinfo(reinterpret_cast<Address>(buffer_) + location_offset,
- RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
+ RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 0432708fd1..54e46c74dd 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -12,659 +12,23 @@
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
+#include "src/arm64/register-arm64.h"
#include "src/assembler.h"
#include "src/base/optional.h"
+#include "src/constant-pool.h"
#include "src/globals.h"
#include "src/utils.h"
+// Windows arm64 SDK defines mvn to NEON intrinsic neon_not which will not
+// be used here.
+#if defined(V8_OS_WIN) && defined(mvn)
+#undef mvn
+#endif
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// Registers.
-// clang-format off
-#define GENERAL_REGISTER_CODE_LIST(R) \
- R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
- R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
- R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
- R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-#define GENERAL_REGISTERS(R) \
- R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
- R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
- R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
- R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(R) \
- R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
- R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
- R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
- R(x27) R(x28)
-
-#define FLOAT_REGISTERS(V) \
- V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
- V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
- V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
- V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
-
-#define DOUBLE_REGISTERS(R) \
- R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
- R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
- R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
- R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
-
-#define SIMD128_REGISTERS(V) \
- V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
- V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15) \
- V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \
- V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31)
-
-// Register d29 could be allocated, but we keep an even length list here, in
-// order to make stack alignment easier for save and restore.
-#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
- R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
- R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
- R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
- R(d25) R(d26) R(d27) R(d28)
-// clang-format on
-
-constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-
-const int kNumRegs = kNumberOfRegisters;
-// Registers x0-x17 are caller-saved.
-const int kNumJSCallerSaved = 18;
-const RegList kJSCallerSaved = 0x3ffff;
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of eight.
-// TODO(all): Refine this number.
-const int kNumSafepointRegisters = 32;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
-#define kNumSafepointSavedRegisters \
- CPURegList::GetSafepointSavedRegisters().Count()
-
-// Some CPURegister methods can return Register and VRegister types, so we
-// need to declare them in advance.
-class Register;
-class VRegister;
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
- public:
- enum RegisterType {
- kRegister,
- kVRegister,
- kNoRegister
- };
-
- static constexpr CPURegister no_reg() {
- return CPURegister{0, 0, kNoRegister};
- }
-
- template <int code, int size, RegisterType type>
- static constexpr CPURegister Create() {
- static_assert(IsValid(code, size, type), "Cannot create invalid registers");
- return CPURegister{code, size, type};
- }
-
- static CPURegister Create(int code, int size, RegisterType type) {
- DCHECK(IsValid(code, size, type));
- return CPURegister{code, size, type};
- }
-
- RegisterType type() const { return reg_type_; }
- int SizeInBits() const {
- DCHECK(IsValid());
- return reg_size_;
- }
- int SizeInBytes() const {
- DCHECK(IsValid());
- DCHECK_EQ(SizeInBits() % 8, 0);
- return reg_size_ / 8;
- }
- bool Is8Bits() const {
- DCHECK(IsValid());
- return reg_size_ == 8;
- }
- bool Is16Bits() const {
- DCHECK(IsValid());
- return reg_size_ == 16;
- }
- bool Is32Bits() const {
- DCHECK(IsValid());
- return reg_size_ == 32;
- }
- bool Is64Bits() const {
- DCHECK(IsValid());
- return reg_size_ == 64;
- }
- bool Is128Bits() const {
- DCHECK(IsValid());
- return reg_size_ == 128;
- }
- bool IsValid() const { return reg_type_ != kNoRegister; }
- bool IsNone() const { return reg_type_ == kNoRegister; }
- bool Is(const CPURegister& other) const {
- return Aliases(other) && (reg_size_ == other.reg_size_);
- }
- bool Aliases(const CPURegister& other) const {
- return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
- }
-
- bool IsZero() const;
- bool IsSP() const;
-
- bool IsRegister() const { return reg_type_ == kRegister; }
- bool IsVRegister() const { return reg_type_ == kVRegister; }
-
- bool IsFPRegister() const { return IsS() || IsD(); }
-
- bool IsW() const { return IsRegister() && Is32Bits(); }
- bool IsX() const { return IsRegister() && Is64Bits(); }
-
- // These assertions ensure that the size and type of the register are as
- // described. They do not consider the number of lanes that make up a vector.
- // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
- // does not imply Is1D() or Is8B().
- // Check the number of lanes, ie. the format of the vector, using methods such
- // as Is8B(), Is1D(), etc. in the VRegister class.
- bool IsV() const { return IsVRegister(); }
- bool IsB() const { return IsV() && Is8Bits(); }
- bool IsH() const { return IsV() && Is16Bits(); }
- bool IsS() const { return IsV() && Is32Bits(); }
- bool IsD() const { return IsV() && Is64Bits(); }
- bool IsQ() const { return IsV() && Is128Bits(); }
-
- Register Reg() const;
- VRegister VReg() const;
-
- Register X() const;
- Register W() const;
- VRegister V() const;
- VRegister B() const;
- VRegister H() const;
- VRegister D() const;
- VRegister S() const;
- VRegister Q() const;
-
- bool IsSameSizeAndType(const CPURegister& other) const;
-
- bool is(const CPURegister& other) const { return Is(other); }
- bool is_valid() const { return IsValid(); }
-
- protected:
- int reg_size_;
- RegisterType reg_type_;
-
- friend class RegisterBase;
-
- constexpr CPURegister(int code, int size, RegisterType type)
- : RegisterBase(code), reg_size_(size), reg_type_(type) {}
-
- static constexpr bool IsValidRegister(int code, int size) {
- return (size == kWRegSizeInBits || size == kXRegSizeInBits) &&
- (code < kNumberOfRegisters || code == kSPRegInternalCode);
- }
-
- static constexpr bool IsValidVRegister(int code, int size) {
- return (size == kBRegSizeInBits || size == kHRegSizeInBits ||
- size == kSRegSizeInBits || size == kDRegSizeInBits ||
- size == kQRegSizeInBits) &&
- code < kNumberOfVRegisters;
- }
-
- static constexpr bool IsValid(int code, int size, RegisterType type) {
- return (type == kRegister && IsValidRegister(code, size)) ||
- (type == kVRegister && IsValidVRegister(code, size));
- }
-
- static constexpr bool IsNone(int code, int size, RegisterType type) {
- return type == kNoRegister && code == 0 && size == 0;
- }
-};
-
-ASSERT_TRIVIALLY_COPYABLE(CPURegister);
-
-class Register : public CPURegister {
- public:
- static constexpr Register no_reg() { return Register(CPURegister::no_reg()); }
-
- template <int code, int size>
- static constexpr Register Create() {
- return Register(CPURegister::Create<code, size, CPURegister::kRegister>());
- }
-
- static Register Create(int code, int size) {
- return Register(CPURegister::Create(code, size, CPURegister::kRegister));
- }
-
- static Register XRegFromCode(unsigned code);
- static Register WRegFromCode(unsigned code);
-
- static Register from_code(int code) {
- // Always return an X register.
- return Register::Create(code, kXRegSizeInBits);
- }
-
- template <int code>
- static Register from_code() {
- // Always return an X register.
- return Register::Create<code, kXRegSizeInBits>();
- }
-
- private:
- constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-
-constexpr bool kPadArguments = true;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-class VRegister : public CPURegister {
- public:
- static constexpr VRegister no_reg() {
- return VRegister(CPURegister::no_reg(), 0);
- }
-
- template <int code, int size, int lane_count = 1>
- static constexpr VRegister Create() {
- static_assert(IsValidLaneCount(lane_count), "Invalid lane count");
- return VRegister(CPURegister::Create<code, size, kVRegister>(), lane_count);
- }
-
- static VRegister Create(int code, int size, int lane_count = 1) {
- DCHECK(IsValidLaneCount(lane_count));
- return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
- lane_count);
- }
-
- static VRegister Create(int reg_code, VectorFormat format) {
- int reg_size = RegisterSizeInBitsFromFormat(format);
- int reg_count = IsVectorFormat(format) ? LaneCountFromFormat(format) : 1;
- return VRegister::Create(reg_code, reg_size, reg_count);
- }
-
- static VRegister BRegFromCode(unsigned code);
- static VRegister HRegFromCode(unsigned code);
- static VRegister SRegFromCode(unsigned code);
- static VRegister DRegFromCode(unsigned code);
- static VRegister QRegFromCode(unsigned code);
- static VRegister VRegFromCode(unsigned code);
-
- VRegister V8B() const {
- return VRegister::Create(code(), kDRegSizeInBits, 8);
- }
- VRegister V16B() const {
- return VRegister::Create(code(), kQRegSizeInBits, 16);
- }
- VRegister V4H() const {
- return VRegister::Create(code(), kDRegSizeInBits, 4);
- }
- VRegister V8H() const {
- return VRegister::Create(code(), kQRegSizeInBits, 8);
- }
- VRegister V2S() const {
- return VRegister::Create(code(), kDRegSizeInBits, 2);
- }
- VRegister V4S() const {
- return VRegister::Create(code(), kQRegSizeInBits, 4);
- }
- VRegister V2D() const {
- return VRegister::Create(code(), kQRegSizeInBits, 2);
- }
- VRegister V1D() const {
- return VRegister::Create(code(), kDRegSizeInBits, 1);
- }
-
- bool Is8B() const { return (Is64Bits() && (lane_count_ == 8)); }
- bool Is16B() const { return (Is128Bits() && (lane_count_ == 16)); }
- bool Is4H() const { return (Is64Bits() && (lane_count_ == 4)); }
- bool Is8H() const { return (Is128Bits() && (lane_count_ == 8)); }
- bool Is2S() const { return (Is64Bits() && (lane_count_ == 2)); }
- bool Is4S() const { return (Is128Bits() && (lane_count_ == 4)); }
- bool Is1D() const { return (Is64Bits() && (lane_count_ == 1)); }
- bool Is2D() const { return (Is128Bits() && (lane_count_ == 2)); }
-
- // For consistency, we assert the number of lanes of these scalar registers,
- // even though there are no vectors of equivalent total size with which they
- // could alias.
- bool Is1B() const {
- DCHECK(!(Is8Bits() && IsVector()));
- return Is8Bits();
- }
- bool Is1H() const {
- DCHECK(!(Is16Bits() && IsVector()));
- return Is16Bits();
- }
- bool Is1S() const {
- DCHECK(!(Is32Bits() && IsVector()));
- return Is32Bits();
- }
-
- bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSizeInBits; }
- bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSizeInBits; }
- bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; }
- bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; }
-
- bool IsScalar() const { return lane_count_ == 1; }
- bool IsVector() const { return lane_count_ > 1; }
-
- bool IsSameFormat(const VRegister& other) const {
- return (reg_size_ == other.reg_size_) && (lane_count_ == other.lane_count_);
- }
-
- int LaneCount() const { return lane_count_; }
-
- unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count_; }
-
- unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
-
- static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
- STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast);
-
- static VRegister from_code(int code) {
- // Always return a D register.
- return VRegister::Create(code, kDRegSizeInBits);
- }
-
- private:
- int lane_count_;
-
- constexpr explicit VRegister(const CPURegister& r, int lane_count)
- : CPURegister(r), lane_count_(lane_count) {}
-
- static constexpr bool IsValidLaneCount(int lane_count) {
- return base::bits::IsPowerOfTwo(lane_count) && lane_count <= 16;
- }
-};
-
-ASSERT_TRIVIALLY_COPYABLE(VRegister);
-
-// No*Reg is used to indicate an unused argument, or an error case. Note that
-// these all compare equal (using the Is() method). The Register and VRegister
-// variants are provided for convenience.
-constexpr Register NoReg = Register::no_reg();
-constexpr VRegister NoVReg = VRegister::no_reg();
-constexpr CPURegister NoCPUReg = CPURegister::no_reg();
-constexpr Register no_reg = NoReg;
-constexpr VRegister no_dreg = NoVReg;
-
-#define DEFINE_REGISTER(register_class, name, ...) \
- constexpr register_class name = register_class::Create<__VA_ARGS__>()
-#define ALIAS_REGISTER(register_class, alias, name) \
- constexpr register_class alias = name
-
-#define DEFINE_REGISTERS(N) \
- DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits); \
- DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits);
-GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
-#undef DEFINE_REGISTERS
-
-DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
-DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
-
-#define DEFINE_VREGISTERS(N) \
- DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
- DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits); \
- DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits); \
- DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits); \
- DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits); \
- DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits);
-GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
-#undef DEFINE_VREGISTERS
-
-#undef DEFINE_REGISTER
-
-// Registers aliases.
-ALIAS_REGISTER(VRegister, v8_, v8); // Avoid conflicts with namespace v8.
-ALIAS_REGISTER(Register, ip0, x16);
-ALIAS_REGISTER(Register, ip1, x17);
-ALIAS_REGISTER(Register, wip0, w16);
-ALIAS_REGISTER(Register, wip1, w17);
-// Root register.
-ALIAS_REGISTER(Register, kRootRegister, x26);
-ALIAS_REGISTER(Register, rr, x26);
-// Context pointer register.
-ALIAS_REGISTER(Register, cp, x27);
-ALIAS_REGISTER(Register, fp, x29);
-ALIAS_REGISTER(Register, lr, x30);
-ALIAS_REGISTER(Register, xzr, x31);
-ALIAS_REGISTER(Register, wzr, w31);
-
-// Register used for padding stack slots.
-ALIAS_REGISTER(Register, padreg, x31);
-
-// Keeps the 0 double value.
-ALIAS_REGISTER(VRegister, fp_zero, d15);
-// MacroAssembler fixed V Registers.
-ALIAS_REGISTER(VRegister, fp_fixed1, d28);
-ALIAS_REGISTER(VRegister, fp_fixed2, d29);
-
-// MacroAssembler scratch V registers.
-ALIAS_REGISTER(VRegister, fp_scratch, d30);
-ALIAS_REGISTER(VRegister, fp_scratch1, d30);
-ALIAS_REGISTER(VRegister, fp_scratch2, d31);
-
-#undef ALIAS_REGISTER
-
-// AreAliased returns true if any of the named registers overlap. Arguments set
-// to NoReg are ignored. The system stack pointer may be specified.
-bool AreAliased(const CPURegister& reg1,
- const CPURegister& reg2,
- const CPURegister& reg3 = NoReg,
- const CPURegister& reg4 = NoReg,
- const CPURegister& reg5 = NoReg,
- const CPURegister& reg6 = NoReg,
- const CPURegister& reg7 = NoReg,
- const CPURegister& reg8 = NoReg);
-
-// AreSameSizeAndType returns true if all of the specified registers have the
-// same size, and are of the same type. The system stack pointer may be
-// specified. Arguments set to NoReg are ignored, as are any subsequent
-// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
-bool AreSameSizeAndType(
- const CPURegister& reg1, const CPURegister& reg2 = NoCPUReg,
- const CPURegister& reg3 = NoCPUReg, const CPURegister& reg4 = NoCPUReg,
- const CPURegister& reg5 = NoCPUReg, const CPURegister& reg6 = NoCPUReg,
- const CPURegister& reg7 = NoCPUReg, const CPURegister& reg8 = NoCPUReg);
-
-// AreSameFormat returns true if all of the specified VRegisters have the same
-// vector format. Arguments set to NoVReg are ignored, as are any subsequent
-// arguments. At least one argument (reg1) must be valid (not NoVReg).
-bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
- const VRegister& reg3 = NoVReg,
- const VRegister& reg4 = NoVReg);
-
-// AreConsecutive returns true if all of the specified VRegisters are
-// consecutive in the register file. Arguments may be set to NoVReg, and if so,
-// subsequent arguments must also be NoVReg. At least one argument (reg1) must
-// be valid (not NoVReg).
-bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
- const VRegister& reg3 = NoVReg,
- const VRegister& reg4 = NoVReg);
-
-typedef VRegister FloatRegister;
-typedef VRegister DoubleRegister;
-typedef VRegister Simd128Register;
-
-// -----------------------------------------------------------------------------
-// Lists of registers.
-class CPURegList {
- public:
- template <typename... CPURegisters>
- explicit CPURegList(CPURegister reg0, CPURegisters... regs)
- : list_(CPURegister::ListOf(reg0, regs...)),
- size_(reg0.SizeInBits()),
- type_(reg0.type()) {
- DCHECK(AreSameSizeAndType(reg0, regs...));
- DCHECK(IsValid());
- }
-
- CPURegList(CPURegister::RegisterType type, int size, RegList list)
- : list_(list), size_(size), type_(type) {
- DCHECK(IsValid());
- }
-
- CPURegList(CPURegister::RegisterType type, int size, int first_reg,
- int last_reg)
- : size_(size), type_(type) {
- DCHECK(
- ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
- ((type == CPURegister::kVRegister) &&
- (last_reg < kNumberOfVRegisters)));
- DCHECK(last_reg >= first_reg);
- list_ = (1UL << (last_reg + 1)) - 1;
- list_ &= ~((1UL << first_reg) - 1);
- DCHECK(IsValid());
- }
-
- CPURegister::RegisterType type() const {
- DCHECK(IsValid());
- return type_;
- }
-
- RegList list() const {
- DCHECK(IsValid());
- return list_;
- }
-
- inline void set_list(RegList new_list) {
- DCHECK(IsValid());
- list_ = new_list;
- }
-
- // Combine another CPURegList into this one. Registers that already exist in
- // this list are left unchanged. The type and size of the registers in the
- // 'other' list must match those in this list.
- void Combine(const CPURegList& other);
-
- // Remove every register in the other CPURegList from this one. Registers that
- // do not exist in this list are ignored. The type of the registers in the
- // 'other' list must match those in this list.
- void Remove(const CPURegList& other);
-
- // Variants of Combine and Remove which take CPURegisters.
- void Combine(const CPURegister& other);
- void Remove(const CPURegister& other1,
- const CPURegister& other2 = NoCPUReg,
- const CPURegister& other3 = NoCPUReg,
- const CPURegister& other4 = NoCPUReg);
-
- // Variants of Combine and Remove which take a single register by its code;
- // the type and size of the register is inferred from this list.
- void Combine(int code);
- void Remove(int code);
-
- // Remove all callee-saved registers from the list. This can be useful when
- // preparing registers for an AAPCS64 function call, for example.
- void RemoveCalleeSaved();
-
- CPURegister PopLowestIndex();
- CPURegister PopHighestIndex();
-
- // AAPCS64 callee-saved registers.
- static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
- static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits);
-
- // AAPCS64 caller-saved registers. Note that this includes lr.
- // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
- // 64-bits being caller-saved.
- static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
- static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
-
- // Registers saved as safepoints.
- static CPURegList GetSafepointSavedRegisters();
-
- bool IsEmpty() const {
- DCHECK(IsValid());
- return list_ == 0;
- }
-
- bool IncludesAliasOf(const CPURegister& other1,
- const CPURegister& other2 = NoCPUReg,
- const CPURegister& other3 = NoCPUReg,
- const CPURegister& other4 = NoCPUReg) const {
- DCHECK(IsValid());
- RegList list = 0;
- if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
- if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
- if (!other3.IsNone() && (other3.type() == type_)) list |= other3.bit();
- if (!other4.IsNone() && (other4.type() == type_)) list |= other4.bit();
- return (list_ & list) != 0;
- }
-
- int Count() const {
- DCHECK(IsValid());
- return CountSetBits(list_, kRegListSizeInBits);
- }
-
- int RegisterSizeInBits() const {
- DCHECK(IsValid());
- return size_;
- }
-
- int RegisterSizeInBytes() const {
- int size_in_bits = RegisterSizeInBits();
- DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
- return size_in_bits / kBitsPerByte;
- }
-
- int TotalSizeInBytes() const {
- DCHECK(IsValid());
- return RegisterSizeInBytes() * Count();
- }
-
- private:
- RegList list_;
- int size_;
- CPURegister::RegisterType type_;
-
- bool IsValid() const {
- constexpr RegList kValidRegisters{0x8000000ffffffff};
- constexpr RegList kValidVRegisters{0x0000000ffffffff};
- switch (type_) {
- case CPURegister::kRegister:
- return (list_ & kValidRegisters) == list_;
- case CPURegister::kVRegister:
- return (list_ & kValidVRegisters) == list_;
- case CPURegister::kNoRegister:
- return list_ == 0;
- default:
- UNREACHABLE();
- }
- }
-};
-
-
-// AAPCS64 callee-saved registers.
-#define kCalleeSaved CPURegList::GetCalleeSaved()
-#define kCalleeSavedV CPURegList::GetCalleeSavedV()
-
-// AAPCS64 caller-saved registers. Note that this includes lr.
-#define kCallerSaved CPURegList::GetCallerSaved()
-#define kCallerSavedV CPURegList::GetCallerSavedV()
-
-// -----------------------------------------------------------------------------
// Immediates.
class Immediate {
public:
@@ -693,7 +57,7 @@ class Immediate {
// -----------------------------------------------------------------------------
// Operands.
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
-constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+constexpr uint64_t kSmiShiftMask = (1ULL << kSmiShift) - 1;
// Represents an operand in a machine instruction.
class Operand {
@@ -717,7 +81,6 @@ class Operand {
unsigned shift_amount = 0);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
- static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
inline bool IsHeapObjectRequest() const;
@@ -891,15 +254,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
virtual ~Assembler();
@@ -930,6 +287,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
inline void Unreachable();
@@ -991,7 +350,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// an immediate branch or the address of an entry in the constant pool.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(Address location,
- Code* code,
+ Code code,
Address target);
// Get the size of the special target encoded at 'location'.
@@ -1008,28 +367,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// forwards in memory after a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
- // The sizes of the call sequences emitted by MacroAssembler::Call.
- //
- // A "near" call is encoded in a BL immediate instruction:
- // bl target
- //
- // whereas a "far" call will be encoded like this:
- // ldr temp, =target
- // blr temp
- static constexpr int kNearCallSize = 1 * kInstrSize;
- static constexpr int kFarCallSize = 2 * kInstrSize;
-
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
- DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
- return pc_ - buffer_;
+ DCHECK((pc_ >= buffer_start_) && (pc_ < (buffer_start_ + buffer_->size())));
+ return pc_ - buffer_start_;
}
// Return the code size generated from label to the current position.
uint64_t SizeOfCodeGeneratedSince(const Label* label) {
DCHECK(label->is_bound());
- DCHECK(pc_offset() >= label->pos());
- DCHECK(pc_offset() < buffer_size_);
+ DCHECK_GE(pc_offset(), label->pos());
+ DCHECK_LT(pc_offset(), buffer_->size());
return pc_offset() - label->pos();
}
@@ -1077,9 +425,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
EndBlockVeneerPool();
}
- // Debugging ----------------------------------------------------------------
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1744,10 +1089,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
LAST_NOP_MARKER = ADR_FAR_NOP
};
- void nop(NopMarkerTypes n) {
- DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
- mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
- }
+ void nop(NopMarkerTypes n);
// Add.
void add(const VRegister& vd, const VRegister& vn, const VRegister& vm);
@@ -2863,15 +2205,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(ptrdiff_t offset) const {
- return reinterpret_cast<Instruction*>(buffer_ + offset);
+ return reinterpret_cast<Instruction*>(buffer_start_ + offset);
}
ptrdiff_t InstructionOffset(Instruction* instr) const {
- return reinterpret_cast<byte*>(instr) - buffer_;
- }
-
- static const char* GetSpecialRegisterName(int code) {
- return (code == kSPRegInternalCode) ? "sp" : "UNKNOWN";
+ return reinterpret_cast<byte*>(instr) - buffer_start_;
}
// Register encoding.
@@ -3169,13 +2507,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
@@ -3419,7 +2750,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
STATIC_ASSERT(sizeof(instruction) == kInstrSize);
- DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+ DCHECK_LE(pc_ + sizeof(instruction), buffer_start_ + buffer_->size());
memcpy(pc_, &instruction, sizeof(instruction));
pc_ += sizeof(instruction);
@@ -3429,7 +2760,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
DCHECK_EQ(sizeof(*pc_), 1);
- DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
+ DCHECK_LE(pc_ + size, buffer_start_ + buffer_->size());
// TODO(all): Somehow register we have some data here. Then we can
// disassemble it correctly.
@@ -3586,6 +2917,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class EnsureSpace;
friend class ConstPool;
};
@@ -3603,7 +2936,8 @@ class PatchingAssembler : public Assembler {
// Note that the instruction cache will not be flushed.
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
- : Assembler(options, start, count * kInstrSize + kGap) {
+ : Assembler(options,
+ ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) {
// Block constant pool emission.
StartBlockPools();
}
@@ -3613,7 +2947,7 @@ class PatchingAssembler : public Assembler {
DCHECK(is_const_pool_blocked());
EndBlockPools();
// Verify we have generated the number of instruction we expected.
- DCHECK((pc_offset() + kGap) == buffer_size_);
+ DCHECK_EQ(pc_offset() + kGap, buffer_->size());
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
deleted file mode 100644
index 9b8114c9bf..0000000000
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ /dev/null
@@ -1,623 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/api-arguments.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/counters.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/regexp-match-info.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-#include "src/arm64/code-stubs-arm64.h" // Cannot be the first include.
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-// This is the entry point from C++. 5 arguments are provided in x0-x4.
-// See use of the JSEntryFunction for example in src/execution.cc.
-// Input:
-// x0: code entry.
-// x1: function.
-// x2: receiver.
-// x3: argc.
-// x4: argv.
-// Output:
-// x0: result.
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
-
- Register code_entry = x0;
-
- {
- NoRootArrayScope no_root_array(masm);
-
- // Enable instruction instrumentation. This only works on the simulator, and
- // will have no effect on the model or real hardware.
- __ EnableInstrumentation();
-
- __ PushCalleeSavedRegisters();
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Set up the reserved register for 0.0.
- __ Fmov(fp_zero, 0.0);
-
- // Initialize the root array register
- __ InitializeRootRegister();
- }
-
- // Build an entry frame (see layout below).
- StackFrame::Type marker = type();
- int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
- __ Mov(x13, bad_frame_pointer);
- __ Mov(x12, StackFrame::TypeToMarker(marker));
- __ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
- isolate()));
- __ Ldr(x10, MemOperand(x11));
-
- __ Push(x13, x12, xzr, x10);
- // Set up fp.
- __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
-
- // Push the JS entry frame marker. Also set js_entry_sp if this is the
- // outermost JS call.
- Label non_outermost_js, done;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ Mov(x10, js_entry_sp);
- __ Ldr(x11, MemOperand(x10));
-
- // Select between the inner and outermost frame marker, based on the JS entry
- // sp. We assert that the inner marker is zero, so we can use xzr to save a
- // move instruction.
- DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
- __ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
- __ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
- __ B(ne, &done);
- __ Str(fp, MemOperand(x10));
-
- __ Bind(&done);
- __ Push(x12, padreg);
-
- // The frame set up looks like this:
- // sp[0] : padding.
- // sp[1] : JS entry frame marker.
- // sp[2] : C entry FP.
- // sp[3] : stack frame marker.
- // sp[4] : stack frame marker.
- // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ B(&invoke);
-
- // Prevent the constant pool from being emitted between the record of the
- // handler_entry position and the first instruction of the sequence here.
- // There is no risk because Assembler::Emit() emits the instruction before
- // checking for constant pool emission, but we do not want to depend on
- // that.
- {
- Assembler::BlockPoolsScope block_pools(masm);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ Mov(x10, Operand(ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate())));
- }
- __ Str(code_entry, MemOperand(x10));
- __ LoadRoot(x0, RootIndex::kException);
- __ B(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ Bind(&invoke);
-
- // Push new stack handler.
- static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
- "Unexpected offset for StackHandlerConstants::kSize");
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
- "Unexpected offset for StackHandlerConstants::kNextOffset");
-
- // Link the current handler as the next handler.
- __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate()));
- __ Ldr(x10, MemOperand(x11));
- __ Push(padreg, x10);
-
- // Set this new handler as the current one.
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
- __ Mov(scratch, sp);
- __ Str(scratch, MemOperand(x11));
- }
-
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the B(&invoke) above, which
- // restores all callee-saved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through the JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // x0: code entry.
- // x1: function.
- // x2: receiver.
- // x3: argc.
- // x4: argv.
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Pop the stack handler and unlink this frame from the handler chain.
- static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
- "Unexpected offset for StackHandlerConstants::kNextOffset");
- __ Pop(x10, padreg);
- __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate()));
- __ Drop(StackHandlerConstants::kSlotCount - 2);
- __ Str(x10, MemOperand(x11));
-
- __ Bind(&exit);
- // x0 holds the result.
- // The stack pointer points to the top of the entry frame pushed on entry from
- // C++ (at the beginning of this stub):
- // sp[0] : padding.
- // sp[1] : JS entry frame marker.
- // sp[2] : C entry FP.
- // sp[3] : stack frame marker.
- // sp[4] : stack frame marker.
- // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
-
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- {
- Register c_entry_fp = x11;
- __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
- __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
- __ B(ne, &non_outermost_js_2);
- __ Mov(x12, js_entry_sp);
- __ Str(xzr, MemOperand(x12));
- __ Bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ Mov(x12, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
- isolate()));
- __ Str(c_entry_fp, MemOperand(x12));
- }
-
- // Reset the stack to the callee saved registers.
- static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
- "Size of entry frame is not a multiple of 16 bytes");
- __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
- // Restore the callee-saved registers and return.
- __ PopCalleeSavedRegisters();
- __ Ret();
-}
-
-// The entry hook is a Push (stp) instruction, followed by a near call.
-static const unsigned int kProfileEntryHookCallSize =
- (1 * kInstrSize) + Assembler::kNearCallSize;
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- Assembler::BlockConstPoolScope no_const_pools(tasm);
- DontEmitDebugCodeScope no_debug_code(tasm);
- Label entry_hook_call_start;
- tasm->Bind(&entry_hook_call_start);
- tasm->Push(padreg, lr);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- DCHECK_EQ(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
- kProfileEntryHookCallSize);
- tasm->Pop(lr, padreg);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- Assembler::BlockConstPoolScope no_const_pools(masm);
- DontEmitDebugCodeScope no_debug_code(masm);
- Label entry_hook_call_start;
- __ Bind(&entry_hook_call_start);
- __ Push(padreg, lr);
- __ CallStub(&stub);
- DCHECK_EQ(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
- kProfileEntryHookCallSize);
- __ Pop(lr, padreg);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- HardAbortScope hard_aborts(masm);
-
- // Save all kCallerSaved registers (including lr), since this can be called
- // from anywhere.
- // TODO(jbramley): What about FP registers?
- __ PushCPURegList(kCallerSaved);
- DCHECK(kCallerSaved.IncludesAliasOf(lr));
- const int kNumSavedRegs = kCallerSaved.Count();
- DCHECK_EQ(kNumSavedRegs % 2, 0);
-
- // Compute the function's address as the first argument.
- __ Sub(x0, lr, kProfileEntryHookCallSize);
-
-#if V8_HOST_ARCH_ARM64
- uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
- __ Mov(x10, entry_hook);
-#else
- // Under the simulator we need to indirect the entry hook through a trampoline
- // function at a known address.
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ Mov(x10, Operand(ExternalReference::Create(
- &dispatcher, ExternalReference::BUILTIN_CALL)));
- // It additionally takes an isolate as a third parameter
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
-#endif
-
- // The caller's return address is above the saved temporaries.
- // Grab its location for the second argument to the hook.
- __ SlotAddress(x1, kNumSavedRegs);
-
- {
- // Create a dummy frame, as CallCFunction requires this.
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallCFunction(x10, 2, 0);
- }
-
- __ PopCPURegList(kCallerSaved);
- __ Ret();
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Put return address on the stack (accessible to GC through exit frame pc).
- __ Poke(lr, 0);
- // Call the C++ function.
- __ Blr(x10);
- // Return to calling code.
- __ Peek(lr, 0);
- __ AssertFPCRState();
- __ Ret();
-}
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- // Branch to the stub.
- __ Mov(x10, target);
- __ Call(GetCode(), RelocInfo::CODE_TARGET);
-}
-
-// The number of register that CallApiFunctionAndReturn will need to save on
-// the stack. The space for these registers need to be allocated in the
-// ExitFrame before calling CallApiFunctionAndReturn.
-static const int kCallApiFunctionSpillSpace = 4;
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return static_cast<int>(ref0.address() - ref1.address());
-}
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions.
-// 'stack_space' is the space to be unwound on exit (includes the call JS
-// arguments space and the additional space allocated for the fast call).
-// 'spill_offset' is the offset from the stack pointer where
-// CallApiFunctionAndReturn can spill registers.
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space, int spill_offset,
- MemOperand return_value_operand) {
- ASM_LOCATION("CallApiFunctionAndReturn");
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- DCHECK(function_address.is(x1) || function_address.is(x2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ Mov(x10, ExternalReference::is_profiling_address(isolate));
- __ Ldrb(w10, MemOperand(x10));
- __ Cbz(w10, &profiler_disabled);
- __ Mov(x3, thunk_ref);
- __ B(&end_profiler_check);
-
- __ Bind(&profiler_disabled);
- __ Mov(x3, function_address);
- __ Bind(&end_profiler_check);
-
- // Save the callee-save registers we are going to use.
- // TODO(all): Is this necessary? ARM doesn't do it.
- STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
- __ Poke(x19, (spill_offset + 0) * kXRegSize);
- __ Poke(x20, (spill_offset + 1) * kXRegSize);
- __ Poke(x21, (spill_offset + 2) * kXRegSize);
- __ Poke(x22, (spill_offset + 3) * kXRegSize);
-
- // Allocate HandleScope in callee-save registers.
- // We will need to restore the HandleScope after the call to the API function,
- // by allocating it in callee-save registers they will be preserved by C code.
- Register handle_scope_base = x22;
- Register next_address_reg = x19;
- Register limit_reg = x20;
- Register level_reg = w21;
-
- __ Mov(handle_scope_base, next_address);
- __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- __ Add(level_reg, level_reg, 1);
- __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ Mov(x0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, x3);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ Mov(x0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- __ Ldr(x0, return_value_operand);
- __ Bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- if (__ emit_debug_code()) {
- __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
- __ Cmp(w1, level_reg);
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
- }
- __ Sub(level_reg, level_reg, 1);
- __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
- __ Cmp(limit_reg, x1);
- __ B(ne, &delete_allocated_handles);
-
- // Leave the API exit frame.
- __ Bind(&leave_exit_frame);
- // Restore callee-saved registers.
- __ Peek(x19, (spill_offset + 0) * kXRegSize);
- __ Peek(x20, (spill_offset + 1) * kXRegSize);
- __ Peek(x21, (spill_offset + 2) * kXRegSize);
- __ Peek(x22, (spill_offset + 3) * kXRegSize);
-
- __ LeaveExitFrame(false, x1, x5);
-
- // Check if the function scheduled an exception.
- __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
- __ Ldr(x5, MemOperand(x5));
- __ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception);
-
- __ DropSlots(stack_space);
- __ Ret();
-
- // Re-throw by promoting a scheduled exception.
- __ Bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ Bind(&delete_allocated_handles);
- __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- // Save the return value in a callee-save register.
- Register saved_result = x19;
- __ Mov(saved_result, x0);
- __ Mov(x0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ Mov(x0, saved_result);
- __ B(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x4 : call_data
- // -- x2 : holder
- // -- x1 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- Register undef = x7;
- __ LoadRoot(undef, RootIndex::kUndefinedValue);
-
- // Push new target, call data.
- __ Push(undef, call_data);
-
- Register isolate_reg = x5;
- __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
-
- // FunctionCallbackArguments:
- // return value, return value default, isolate, holder.
- __ Push(undef, undef, isolate_reg, holder);
-
- // Prepare arguments.
- Register args = x6;
- __ Mov(args, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space, since it's
- // not controlled by GC.
- const int kApiStackSpace = 3;
-
- // Allocate space so that CallApiFunctionAndReturn can store some scratch
- // registers on the stack.
- const int kCallApiFunctionSpillSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
-
- DCHECK(!AreAliased(x0, api_function_address));
- // x0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ SlotAddress(x0, 1);
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ Mov(x10, argc());
- __ Str(x10, MemOperand(x0, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- // The number of arguments might be odd, but will be padded when calling the
- // stub. We do not round up stack_space to account for odd argc here, this
- // will be done in CallApiFunctionAndReturn.
- const int stack_space = (argc() + 1) + FCA::kArgsLength;
-
- // The current frame needs to be aligned.
- DCHECK_EQ((stack_space - (argc() + 1)) % 2, 0);
- const int spill_offset = 1 + kApiStackSpace;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- spill_offset, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register data = x4;
- Register undef = x5;
- Register isolate_address = x6;
- Register name = x7;
- DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
- name));
-
- __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ LoadRoot(undef, RootIndex::kUndefinedValue);
- __ Mov(isolate_address, ExternalReference::isolate_address(isolate()));
- __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
-
- // PropertyCallbackArguments:
- // receiver, data, return value, return value default, isolate, holder,
- // should_throw_on_error
- // These are followed by the property name, which is also pushed below the
- // exit frame to make the GC aware of it.
- __ Push(receiver, data, undef, undef, isolate_address, holder, xzr, name);
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- static const int kStackUnwindSpace =
- PropertyCallbackArguments::kArgsLength + 1;
- static_assert(kStackUnwindSpace % 2 == 0,
- "slots must be a multiple of 2 for stack pointer alignment");
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, sp); // x0 = Handle<Name>
- __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
-
- const int kApiStackSpace = 1;
-
- // Allocate space so that CallApiFunctionAndReturn can store some scratch
- // registers on the stack.
- const int kCallApiFunctionSpillSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ Poke(x1, 1 * kPointerSize);
- __ SlotAddress(x1, 1);
- // x1 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- Register api_function_address = x2;
- Register js_getter = x4;
- __ Ldr(js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ Ldr(api_function_address,
- FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
-
- const int spill_offset = 1 + kApiStackSpace;
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, spill_offset,
- return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
deleted file mode 100644
index 14c4a988ac..0000000000
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
-#define V8_ARM64_CODE_STUBS_ARM64_H_
-
-namespace v8 {
-namespace internal {
-
-// Helper to call C++ functions from generated code. The caller must prepare
-// the exit frame before doing the call with GenerateCall.
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
deleted file mode 100644
index 180e3f54b7..0000000000
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/arm64/simulator-arm64.h"
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-UnaryMathFunction CreateSqrtFunction() { return nullptr; }
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 1d238e2d32..c93aad9f61 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -8,12 +8,16 @@
#include "src/base/macros.h"
#include "src/globals.h"
-// Assert that this is an LP64 system.
+// Assert that this is an LP64 system, or LLP64 on Windows.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
+#if defined(V8_OS_WIN)
+STATIC_ASSERT(sizeof(1L) == sizeof(int32_t));
+#else
STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
+#endif
STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
-STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
// Get the standard printf format macros for C99 stdint types.
@@ -77,17 +81,17 @@ const int64_t kDRegMask = 0xffffffffffffffffL;
// TODO(all) check if the expression below works on all compilers or if it
// triggers an overflow error.
const int64_t kDSignBit = 63;
-const int64_t kDSignMask = 0x1L << kDSignBit;
+const int64_t kDSignMask = 0x1LL << kDSignBit;
const int64_t kSSignBit = 31;
-const int64_t kSSignMask = 0x1L << kSSignBit;
+const int64_t kSSignMask = 0x1LL << kSSignBit;
const int64_t kXSignBit = 63;
-const int64_t kXSignMask = 0x1L << kXSignBit;
+const int64_t kXSignMask = 0x1LL << kXSignBit;
const int64_t kWSignBit = 31;
-const int64_t kWSignMask = 0x1L << kWSignBit;
+const int64_t kWSignMask = 0x1LL << kWSignBit;
const int64_t kDQuietNanBit = 51;
-const int64_t kDQuietNanMask = 0x1L << kDQuietNanBit;
+const int64_t kDQuietNanMask = 0x1LL << kDQuietNanBit;
const int64_t kSQuietNanBit = 22;
-const int64_t kSQuietNanMask = 0x1L << kSQuietNanBit;
+const int64_t kSQuietNanMask = 0x1LL << kSQuietNanBit;
const int64_t kByteMask = 0xffL;
const int64_t kHalfWordMask = 0xffffL;
const int64_t kWordMask = 0xffffffffL;
@@ -107,8 +111,6 @@ const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
const unsigned kShiftAmountXRegMask = 0x3f;
// Standard machine types defined by AAPCS64.
-const unsigned kByteSize = 8;
-const unsigned kByteSizeInBytes = kByteSize >> 3;
const unsigned kHalfWordSize = 16;
const unsigned kHalfWordSizeLog2 = 4;
const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
@@ -273,7 +275,7 @@ V_(Flags, 31, 28, Bits, uint32_t) \
V_(N, 31, 31, Bits, bool) \
V_(Z, 30, 30, Bits, bool) \
V_(C, 29, 29, Bits, bool) \
-V_(V, 28, 28, Bits, uint32_t) \
+V_(V, 28, 28, Bits, bool) \
M_(NZCV, Flags_mask) \
\
/* FPCR */ \
@@ -445,14 +447,14 @@ enum SystemRegister {
const uint32_t kUnallocatedInstruction = 0xffffffff;
// Generic fields.
-enum GenericInstrField {
+enum GenericInstrField : uint32_t {
SixtyFourBits = 0x80000000,
ThirtyTwoBits = 0x00000000,
FP32 = 0x00000000,
FP64 = 0x00400000
};
-enum NEONFormatField {
+enum NEONFormatField : uint32_t {
NEONFormatFieldMask = 0x40C00000,
NEON_Q = 0x40000000,
NEON_8B = 0x00000000,
@@ -465,14 +467,14 @@ enum NEONFormatField {
NEON_2D = 0x00C00000 | NEON_Q
};
-enum NEONFPFormatField {
+enum NEONFPFormatField : uint32_t {
NEONFPFormatFieldMask = 0x40400000,
NEON_FP_2S = FP32,
NEON_FP_4S = FP32 | NEON_Q,
NEON_FP_2D = FP64 | NEON_Q
};
-enum NEONLSFormatField {
+enum NEONLSFormatField : uint32_t {
NEONLSFormatFieldMask = 0x40000C00,
LS_NEON_8B = 0x00000000,
LS_NEON_16B = LS_NEON_8B | NEON_Q,
@@ -484,7 +486,7 @@ enum NEONLSFormatField {
LS_NEON_2D = LS_NEON_1D | NEON_Q
};
-enum NEONScalarFormatField {
+enum NEONScalarFormatField : uint32_t {
NEONScalarFormatFieldMask = 0x00C00000,
NEONScalar = 0x10000000,
NEON_B = 0x00000000,
@@ -494,7 +496,7 @@ enum NEONScalarFormatField {
};
// PC relative addressing.
-enum PCRelAddressingOp {
+enum PCRelAddressingOp : uint32_t {
PCRelAddressingFixed = 0x10000000,
PCRelAddressingFMask = 0x1F000000,
PCRelAddressingMask = 0x9F000000,
@@ -504,7 +506,7 @@ enum PCRelAddressingOp {
// Add/sub (immediate, shifted and extended.)
const int kSFOffset = 31;
-enum AddSubOp {
+enum AddSubOp : uint32_t {
AddSubOpMask = 0x60000000,
AddSubSetFlagsBit = 0x20000000,
ADD = 0x00000000,
@@ -519,7 +521,7 @@ enum AddSubOp {
V(SUB), \
V(SUBS)
-enum AddSubImmediateOp {
+enum AddSubImmediateOp : uint32_t {
AddSubImmediateFixed = 0x11000000,
AddSubImmediateFMask = 0x1F000000,
AddSubImmediateMask = 0xFF000000,
@@ -530,7 +532,7 @@ enum AddSubImmediateOp {
#undef ADD_SUB_IMMEDIATE
};
-enum AddSubShiftedOp {
+enum AddSubShiftedOp : uint32_t {
AddSubShiftedFixed = 0x0B000000,
AddSubShiftedFMask = 0x1F200000,
AddSubShiftedMask = 0xFF200000,
@@ -541,7 +543,7 @@ enum AddSubShiftedOp {
#undef ADD_SUB_SHIFTED
};
-enum AddSubExtendedOp {
+enum AddSubExtendedOp : uint32_t {
AddSubExtendedFixed = 0x0B200000,
AddSubExtendedFMask = 0x1F200000,
AddSubExtendedMask = 0xFFE00000,
@@ -553,7 +555,7 @@ enum AddSubExtendedOp {
};
// Add/sub with carry.
-enum AddSubWithCarryOp {
+enum AddSubWithCarryOp : uint32_t {
AddSubWithCarryFixed = 0x1A000000,
AddSubWithCarryFMask = 0x1FE00000,
AddSubWithCarryMask = 0xFFE0FC00,
@@ -571,7 +573,7 @@ enum AddSubWithCarryOp {
// Logical (immediate and shifted register).
-enum LogicalOp {
+enum LogicalOp : uint32_t {
LogicalOpMask = 0x60200000,
NOT = 0x00200000,
AND = 0x00000000,
@@ -585,7 +587,7 @@ enum LogicalOp {
};
// Logical immediate.
-enum LogicalImmediateOp {
+enum LogicalImmediateOp : uint32_t {
LogicalImmediateFixed = 0x12000000,
LogicalImmediateFMask = 0x1F800000,
LogicalImmediateMask = 0xFF800000,
@@ -600,7 +602,7 @@ enum LogicalImmediateOp {
};
// Logical shifted register.
-enum LogicalShiftedOp {
+enum LogicalShiftedOp : uint32_t {
LogicalShiftedFixed = 0x0A000000,
LogicalShiftedFMask = 0x1F000000,
LogicalShiftedMask = 0xFF200000,
@@ -631,7 +633,7 @@ enum LogicalShiftedOp {
};
// Move wide immediate.
-enum MoveWideImmediateOp {
+enum MoveWideImmediateOp : uint32_t {
MoveWideImmediateFixed = 0x12800000,
MoveWideImmediateFMask = 0x1F800000,
MoveWideImmediateMask = 0xFF800000,
@@ -648,7 +650,7 @@ enum MoveWideImmediateOp {
// Bitfield.
const int kBitfieldNOffset = 22;
-enum BitfieldOp {
+enum BitfieldOp : uint32_t {
BitfieldFixed = 0x13000000,
BitfieldFMask = 0x1F800000,
BitfieldMask = 0xFF800000,
@@ -665,7 +667,7 @@ enum BitfieldOp {
};
// Extract.
-enum ExtractOp {
+enum ExtractOp : uint32_t {
ExtractFixed = 0x13800000,
ExtractFMask = 0x1F800000,
ExtractMask = 0xFFA00000,
@@ -675,7 +677,7 @@ enum ExtractOp {
};
// Unconditional branch.
-enum UnconditionalBranchOp {
+enum UnconditionalBranchOp : uint32_t {
UnconditionalBranchFixed = 0x14000000,
UnconditionalBranchFMask = 0x7C000000,
UnconditionalBranchMask = 0xFC000000,
@@ -684,7 +686,7 @@ enum UnconditionalBranchOp {
};
// Unconditional branch to register.
-enum UnconditionalBranchToRegisterOp {
+enum UnconditionalBranchToRegisterOp : uint32_t {
UnconditionalBranchToRegisterFixed = 0xD6000000,
UnconditionalBranchToRegisterFMask = 0xFE000000,
UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
@@ -694,7 +696,7 @@ enum UnconditionalBranchToRegisterOp {
};
// Compare and branch.
-enum CompareBranchOp {
+enum CompareBranchOp : uint32_t {
CompareBranchFixed = 0x34000000,
CompareBranchFMask = 0x7E000000,
CompareBranchMask = 0xFF000000,
@@ -707,7 +709,7 @@ enum CompareBranchOp {
};
// Test and branch.
-enum TestBranchOp {
+enum TestBranchOp : uint32_t {
TestBranchFixed = 0x36000000,
TestBranchFMask = 0x7E000000,
TestBranchMask = 0x7F000000,
@@ -716,7 +718,7 @@ enum TestBranchOp {
};
// Conditional branch.
-enum ConditionalBranchOp {
+enum ConditionalBranchOp : uint32_t {
ConditionalBranchFixed = 0x54000000,
ConditionalBranchFMask = 0xFE000000,
ConditionalBranchMask = 0xFF000010,
@@ -728,12 +730,12 @@ enum ConditionalBranchOp {
// and CR fields to encode parameters. To handle this cleanly, the system
// instructions are split into more than one enum.
-enum SystemOp {
+enum SystemOp : uint32_t {
SystemFixed = 0xD5000000,
SystemFMask = 0xFFC00000
};
-enum SystemSysRegOp {
+enum SystemSysRegOp : uint32_t {
SystemSysRegFixed = 0xD5100000,
SystemSysRegFMask = 0xFFD00000,
SystemSysRegMask = 0xFFF00000,
@@ -741,7 +743,7 @@ enum SystemSysRegOp {
MSR = SystemSysRegFixed | 0x00000000
};
-enum SystemHintOp {
+enum SystemHintOp : uint32_t {
SystemHintFixed = 0xD503201F,
SystemHintFMask = 0xFFFFF01F,
SystemHintMask = 0xFFFFF01F,
@@ -749,7 +751,7 @@ enum SystemHintOp {
};
// Exception.
-enum ExceptionOp {
+enum ExceptionOp : uint32_t {
ExceptionFixed = 0xD4000000,
ExceptionFMask = 0xFF000000,
ExceptionMask = 0xFFE0001F,
@@ -765,7 +767,7 @@ enum ExceptionOp {
// Code used to spot hlt instructions that should not be hit.
const int kHltBadCode = 0xbad;
-enum MemBarrierOp {
+enum MemBarrierOp : uint32_t {
MemBarrierFixed = 0xD503309F,
MemBarrierFMask = 0xFFFFF09F,
MemBarrierMask = 0xFFFFF0FF,
@@ -775,13 +777,13 @@ enum MemBarrierOp {
};
// Any load or store (including pair).
-enum LoadStoreAnyOp {
+enum LoadStoreAnyOp : uint32_t {
LoadStoreAnyFMask = 0x0a000000,
LoadStoreAnyFixed = 0x08000000
};
// Any load pair or store pair.
-enum LoadStorePairAnyOp {
+enum LoadStorePairAnyOp : uint32_t {
LoadStorePairAnyFMask = 0x3a000000,
LoadStorePairAnyFixed = 0x28000000
};
@@ -794,7 +796,7 @@ enum LoadStorePairAnyOp {
V(LDP, q, 0x84400000)
// Load/store pair (post, pre and offset.)
-enum LoadStorePairOp {
+enum LoadStorePairOp : uint32_t {
LoadStorePairMask = 0xC4400000,
LoadStorePairLBit = 1 << 22,
#define LOAD_STORE_PAIR(A, B, C) \
@@ -803,7 +805,7 @@ enum LoadStorePairOp {
#undef LOAD_STORE_PAIR
};
-enum LoadStorePairPostIndexOp {
+enum LoadStorePairPostIndexOp : uint32_t {
LoadStorePairPostIndexFixed = 0x28800000,
LoadStorePairPostIndexFMask = 0x3B800000,
LoadStorePairPostIndexMask = 0xFFC00000,
@@ -813,7 +815,7 @@ enum LoadStorePairPostIndexOp {
#undef LOAD_STORE_PAIR_POST_INDEX
};
-enum LoadStorePairPreIndexOp {
+enum LoadStorePairPreIndexOp : uint32_t {
LoadStorePairPreIndexFixed = 0x29800000,
LoadStorePairPreIndexFMask = 0x3B800000,
LoadStorePairPreIndexMask = 0xFFC00000,
@@ -823,7 +825,7 @@ enum LoadStorePairPreIndexOp {
#undef LOAD_STORE_PAIR_PRE_INDEX
};
-enum LoadStorePairOffsetOp {
+enum LoadStorePairOffsetOp : uint32_t {
LoadStorePairOffsetFixed = 0x29000000,
LoadStorePairOffsetFMask = 0x3B800000,
LoadStorePairOffsetMask = 0xFFC00000,
@@ -834,7 +836,7 @@ enum LoadStorePairOffsetOp {
};
// Load literal.
-enum LoadLiteralOp {
+enum LoadLiteralOp : uint32_t {
LoadLiteralFixed = 0x18000000,
LoadLiteralFMask = 0x3B000000,
LoadLiteralMask = 0xFF000000,
@@ -876,7 +878,7 @@ enum LoadLiteralOp {
// clang-format on
// Load/store unscaled offset.
-enum LoadStoreUnscaledOffsetOp {
+enum LoadStoreUnscaledOffsetOp : uint32_t {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
@@ -887,7 +889,7 @@ enum LoadStoreUnscaledOffsetOp {
};
// Load/store (post, pre, offset and unsigned.)
-enum LoadStoreOp {
+enum LoadStoreOp : uint32_t {
LoadStoreMask = 0xC4C00000,
#define LOAD_STORE(A, B, C, D) A##B##_##C = D
LOAD_STORE_OP_LIST(LOAD_STORE),
@@ -896,7 +898,7 @@ enum LoadStoreOp {
};
// Load/store post index.
-enum LoadStorePostIndex {
+enum LoadStorePostIndex : uint32_t {
LoadStorePostIndexFixed = 0x38000400,
LoadStorePostIndexFMask = 0x3B200C00,
LoadStorePostIndexMask = 0xFFE00C00,
@@ -907,7 +909,7 @@ enum LoadStorePostIndex {
};
// Load/store pre index.
-enum LoadStorePreIndex {
+enum LoadStorePreIndex : uint32_t {
LoadStorePreIndexFixed = 0x38000C00,
LoadStorePreIndexFMask = 0x3B200C00,
LoadStorePreIndexMask = 0xFFE00C00,
@@ -918,7 +920,7 @@ enum LoadStorePreIndex {
};
// Load/store unsigned offset.
-enum LoadStoreUnsignedOffset {
+enum LoadStoreUnsignedOffset : uint32_t {
LoadStoreUnsignedOffsetFixed = 0x39000000,
LoadStoreUnsignedOffsetFMask = 0x3B000000,
LoadStoreUnsignedOffsetMask = 0xFFC00000,
@@ -930,7 +932,7 @@ enum LoadStoreUnsignedOffset {
};
// Load/store register offset.
-enum LoadStoreRegisterOffset {
+enum LoadStoreRegisterOffset : uint32_t {
LoadStoreRegisterOffsetFixed = 0x38200800,
LoadStoreRegisterOffsetFMask = 0x3B200C00,
LoadStoreRegisterOffsetMask = 0xFFE00C00,
@@ -942,7 +944,7 @@ enum LoadStoreRegisterOffset {
};
// Load/store acquire/release.
-enum LoadStoreAcquireReleaseOp {
+enum LoadStoreAcquireReleaseOp : uint32_t {
LoadStoreAcquireReleaseFixed = 0x08000000,
LoadStoreAcquireReleaseFMask = 0x3F000000,
LoadStoreAcquireReleaseMask = 0xCFC08000,
@@ -965,14 +967,14 @@ enum LoadStoreAcquireReleaseOp {
};
// Conditional compare.
-enum ConditionalCompareOp {
+enum ConditionalCompareOp : uint32_t {
ConditionalCompareMask = 0x60000000,
CCMN = 0x20000000,
CCMP = 0x60000000
};
// Conditional compare register.
-enum ConditionalCompareRegisterOp {
+enum ConditionalCompareRegisterOp : uint32_t {
ConditionalCompareRegisterFixed = 0x1A400000,
ConditionalCompareRegisterFMask = 0x1FE00800,
ConditionalCompareRegisterMask = 0xFFE00C10,
@@ -983,7 +985,7 @@ enum ConditionalCompareRegisterOp {
};
// Conditional compare immediate.
-enum ConditionalCompareImmediateOp {
+enum ConditionalCompareImmediateOp : uint32_t {
ConditionalCompareImmediateFixed = 0x1A400800,
ConditionalCompareImmediateFMask = 0x1FE00800,
ConditionalCompareImmediateMask = 0xFFE00C10,
@@ -994,7 +996,7 @@ enum ConditionalCompareImmediateOp {
};
// Conditional select.
-enum ConditionalSelectOp {
+enum ConditionalSelectOp : uint32_t {
ConditionalSelectFixed = 0x1A800000,
ConditionalSelectFMask = 0x1FE00000,
ConditionalSelectMask = 0xFFE00C00,
@@ -1013,7 +1015,7 @@ enum ConditionalSelectOp {
};
// Data processing 1 source.
-enum DataProcessing1SourceOp {
+enum DataProcessing1SourceOp : uint32_t {
DataProcessing1SourceFixed = 0x5AC00000,
DataProcessing1SourceFMask = 0x5FE00000,
DataProcessing1SourceMask = 0xFFFFFC00,
@@ -1036,7 +1038,7 @@ enum DataProcessing1SourceOp {
};
// Data processing 2 source.
-enum DataProcessing2SourceOp {
+enum DataProcessing2SourceOp : uint32_t {
DataProcessing2SourceFixed = 0x1AC00000,
DataProcessing2SourceFMask = 0x5FE00000,
DataProcessing2SourceMask = 0xFFE0FC00,
@@ -1069,7 +1071,7 @@ enum DataProcessing2SourceOp {
};
// Data processing 3 source.
-enum DataProcessing3SourceOp {
+enum DataProcessing3SourceOp : uint32_t {
DataProcessing3SourceFixed = 0x1B000000,
DataProcessing3SourceFMask = 0x1F000000,
DataProcessing3SourceMask = 0xFFE08000,
@@ -1088,7 +1090,7 @@ enum DataProcessing3SourceOp {
};
// Floating point compare.
-enum FPCompareOp {
+enum FPCompareOp : uint32_t {
FPCompareFixed = 0x1E202000,
FPCompareFMask = 0x5F203C00,
FPCompareMask = 0xFFE0FC1F,
@@ -1105,7 +1107,7 @@ enum FPCompareOp {
};
// Floating point conditional compare.
-enum FPConditionalCompareOp {
+enum FPConditionalCompareOp : uint32_t {
FPConditionalCompareFixed = 0x1E200400,
FPConditionalCompareFMask = 0x5F200C00,
FPConditionalCompareMask = 0xFFE00C10,
@@ -1118,7 +1120,7 @@ enum FPConditionalCompareOp {
};
// Floating point conditional select.
-enum FPConditionalSelectOp {
+enum FPConditionalSelectOp : uint32_t {
FPConditionalSelectFixed = 0x1E200C00,
FPConditionalSelectFMask = 0x5F200C00,
FPConditionalSelectMask = 0xFFE00C00,
@@ -1128,7 +1130,7 @@ enum FPConditionalSelectOp {
};
// Floating point immediate.
-enum FPImmediateOp {
+enum FPImmediateOp : uint32_t {
FPImmediateFixed = 0x1E201000,
FPImmediateFMask = 0x5F201C00,
FPImmediateMask = 0xFFE01C00,
@@ -1137,7 +1139,7 @@ enum FPImmediateOp {
};
// Floating point data processing 1 source.
-enum FPDataProcessing1SourceOp {
+enum FPDataProcessing1SourceOp : uint32_t {
FPDataProcessing1SourceFixed = 0x1E204000,
FPDataProcessing1SourceFMask = 0x5F207C00,
FPDataProcessing1SourceMask = 0xFFFFFC00,
@@ -1183,7 +1185,7 @@ enum FPDataProcessing1SourceOp {
};
// Floating point data processing 2 source.
-enum FPDataProcessing2SourceOp {
+enum FPDataProcessing2SourceOp : uint32_t {
FPDataProcessing2SourceFixed = 0x1E200800,
FPDataProcessing2SourceFMask = 0x5F200C00,
FPDataProcessing2SourceMask = 0xFFE0FC00,
@@ -1217,7 +1219,7 @@ enum FPDataProcessing2SourceOp {
};
// Floating point data processing 3 source.
-enum FPDataProcessing3SourceOp {
+enum FPDataProcessing3SourceOp : uint32_t {
FPDataProcessing3SourceFixed = 0x1F000000,
FPDataProcessing3SourceFMask = 0x5F000000,
FPDataProcessing3SourceMask = 0xFFE08000,
@@ -1232,7 +1234,7 @@ enum FPDataProcessing3SourceOp {
};
// Conversion between floating point and integer.
-enum FPIntegerConvertOp {
+enum FPIntegerConvertOp : uint32_t {
FPIntegerConvertFixed = 0x1E200000,
FPIntegerConvertFMask = 0x5F20FC00,
FPIntegerConvertMask = 0xFFFFFC00,
@@ -1305,7 +1307,7 @@ enum FPIntegerConvertOp {
};
// Conversion between fixed point and floating point.
-enum FPFixedPointConvertOp {
+enum FPFixedPointConvertOp : uint32_t {
FPFixedPointConvertFixed = 0x1E000000,
FPFixedPointConvertFMask = 0x5F200000,
FPFixedPointConvertMask = 0xFFFF0000,
@@ -1332,7 +1334,7 @@ enum FPFixedPointConvertOp {
};
// NEON instructions with two register operands.
-enum NEON2RegMiscOp {
+enum NEON2RegMiscOp : uint32_t {
NEON2RegMiscFixed = 0x0E200800,
NEON2RegMiscFMask = 0x9F3E0C00,
NEON2RegMiscMask = 0xBF3FFC00,
@@ -1414,7 +1416,7 @@ enum NEON2RegMiscOp {
};
// NEON instructions with three same-type operands.
-enum NEON3SameOp {
+enum NEON3SameOp : uint32_t {
NEON3SameFixed = 0x0E200400,
NEON3SameFMask = 0x9F200400,
NEON3SameMask = 0xBF20FC00,
@@ -1510,7 +1512,7 @@ enum NEON3SameOp {
};
// NEON instructions with three different-type operands.
-enum NEON3DifferentOp {
+enum NEON3DifferentOp : uint32_t {
NEON3DifferentFixed = 0x0E200000,
NEON3DifferentFMask = 0x9F200C00,
NEON3DifferentMask = 0xFF20FC00,
@@ -1569,7 +1571,7 @@ enum NEON3DifferentOp {
};
// NEON instructions operating across vectors.
-enum NEONAcrossLanesOp {
+enum NEONAcrossLanesOp : uint32_t {
NEONAcrossLanesFixed = 0x0E300800,
NEONAcrossLanesFMask = 0x9F3E0C00,
NEONAcrossLanesMask = 0xBF3FFC00,
@@ -1593,7 +1595,7 @@ enum NEONAcrossLanesOp {
};
// NEON instructions with indexed element operand.
-enum NEONByIndexedElementOp {
+enum NEONByIndexedElementOp : uint32_t {
NEONByIndexedElementFixed = 0x0F000000,
NEONByIndexedElementFMask = 0x9F000400,
NEONByIndexedElementMask = 0xBF00F400,
@@ -1622,7 +1624,7 @@ enum NEONByIndexedElementOp {
};
// NEON modified immediate.
-enum NEONModifiedImmediateOp {
+enum NEONModifiedImmediateOp : uint32_t {
NEONModifiedImmediateFixed = 0x0F000400,
NEONModifiedImmediateFMask = 0x9FF80400,
NEONModifiedImmediateOpBit = 0x20000000,
@@ -1633,14 +1635,14 @@ enum NEONModifiedImmediateOp {
};
// NEON extract.
-enum NEONExtractOp {
+enum NEONExtractOp : uint32_t {
NEONExtractFixed = 0x2E000000,
NEONExtractFMask = 0xBF208400,
NEONExtractMask = 0xBFE08400,
NEON_EXT = NEONExtractFixed | 0x00000000
};
-enum NEONLoadStoreMultiOp {
+enum NEONLoadStoreMultiOp : uint32_t {
NEONLoadStoreMultiL = 0x00400000,
NEONLoadStoreMulti1_1v = 0x00007000,
NEONLoadStoreMulti1_2v = 0x0000A000,
@@ -1652,7 +1654,7 @@ enum NEONLoadStoreMultiOp {
};
// NEON load/store multiple structures.
-enum NEONLoadStoreMultiStructOp {
+enum NEONLoadStoreMultiStructOp : uint32_t {
NEONLoadStoreMultiStructFixed = 0x0C000000,
NEONLoadStoreMultiStructFMask = 0xBFBF0000,
NEONLoadStoreMultiStructMask = 0xBFFFF000,
@@ -1676,7 +1678,7 @@ enum NEONLoadStoreMultiStructOp {
};
// NEON load/store multiple structures with post-index addressing.
-enum NEONLoadStoreMultiStructPostIndexOp {
+enum NEONLoadStoreMultiStructPostIndexOp : uint32_t {
NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
@@ -1697,7 +1699,7 @@ enum NEONLoadStoreMultiStructPostIndexOp {
NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
};
-enum NEONLoadStoreSingleOp {
+enum NEONLoadStoreSingleOp : uint32_t {
NEONLoadStoreSingle1 = 0x00000000,
NEONLoadStoreSingle2 = 0x00200000,
NEONLoadStoreSingle3 = 0x00002000,
@@ -1712,7 +1714,7 @@ enum NEONLoadStoreSingleOp {
};
// NEON load/store single structure.
-enum NEONLoadStoreSingleStructOp {
+enum NEONLoadStoreSingleStructOp : uint32_t {
NEONLoadStoreSingleStructFixed = 0x0D000000,
NEONLoadStoreSingleStructFMask = 0xBF9F0000,
NEONLoadStoreSingleStructMask = 0xBFFFE000,
@@ -1777,7 +1779,7 @@ enum NEONLoadStoreSingleStructOp {
};
// NEON load/store single structure with post-index addressing.
-enum NEONLoadStoreSingleStructPostIndexOp {
+enum NEONLoadStoreSingleStructPostIndexOp : uint32_t {
NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
@@ -1824,7 +1826,7 @@ enum NEONLoadStoreSingleStructPostIndexOp {
};
// NEON register copy.
-enum NEONCopyOp {
+enum NEONCopyOp : uint32_t {
NEONCopyFixed = 0x0E000400,
NEONCopyFMask = 0x9FE08400,
NEONCopyMask = 0x3FE08400,
@@ -1843,7 +1845,7 @@ enum NEONCopyOp {
};
// NEON scalar instructions with indexed element operand.
-enum NEONScalarByIndexedElementOp {
+enum NEONScalarByIndexedElementOp : uint32_t {
NEONScalarByIndexedElementFixed = 0x5F000000,
NEONScalarByIndexedElementFMask = 0xDF000400,
NEONScalarByIndexedElementMask = 0xFF00F400,
@@ -1866,7 +1868,7 @@ enum NEONScalarByIndexedElementOp {
};
// NEON shift immediate.
-enum NEONShiftImmediateOp {
+enum NEONShiftImmediateOp : uint32_t {
NEONShiftImmediateFixed = 0x0F000400,
NEONShiftImmediateFMask = 0x9F800400,
NEONShiftImmediateMask = 0xBF80FC00,
@@ -1902,7 +1904,7 @@ enum NEONShiftImmediateOp {
};
// NEON scalar register copy.
-enum NEONScalarCopyOp {
+enum NEONScalarCopyOp : uint32_t {
NEONScalarCopyFixed = 0x5E000400,
NEONScalarCopyFMask = 0xDFE08400,
NEONScalarCopyMask = 0xFFE0FC00,
@@ -1910,7 +1912,7 @@ enum NEONScalarCopyOp {
};
// NEON scalar pairwise instructions.
-enum NEONScalarPairwiseOp {
+enum NEONScalarPairwiseOp : uint32_t {
NEONScalarPairwiseFixed = 0x5E300800,
NEONScalarPairwiseFMask = 0xDF3E0C00,
NEONScalarPairwiseMask = 0xFFB1F800,
@@ -1923,7 +1925,7 @@ enum NEONScalarPairwiseOp {
};
// NEON scalar shift immediate.
-enum NEONScalarShiftImmediateOp {
+enum NEONScalarShiftImmediateOp : uint32_t {
NEONScalarShiftImmediateFixed = 0x5F000400,
NEONScalarShiftImmediateFMask = 0xDF800400,
NEONScalarShiftImmediateMask = 0xFF80FC00,
@@ -1954,7 +1956,7 @@ enum NEONScalarShiftImmediateOp {
};
// NEON table.
-enum NEONTableOp {
+enum NEONTableOp : uint32_t {
NEONTableFixed = 0x0E000000,
NEONTableFMask = 0xBF208C00,
NEONTableExt = 0x00001000,
@@ -1970,7 +1972,7 @@ enum NEONTableOp {
};
// NEON perm.
-enum NEONPermOp {
+enum NEONPermOp : uint32_t {
NEONPermFixed = 0x0E000800,
NEONPermFMask = 0xBF208C00,
NEONPermMask = 0x3F20FC00,
@@ -1983,7 +1985,7 @@ enum NEONPermOp {
};
// NEON scalar instructions with two register operands.
-enum NEONScalar2RegMiscOp {
+enum NEONScalar2RegMiscOp : uint32_t {
NEONScalar2RegMiscFixed = 0x5E200800,
NEONScalar2RegMiscFMask = 0xDF3E0C00,
NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
@@ -2030,7 +2032,7 @@ enum NEONScalar2RegMiscOp {
};
// NEON scalar instructions with three same-type operands.
-enum NEONScalar3SameOp {
+enum NEONScalar3SameOp : uint32_t {
NEONScalar3SameFixed = 0x5E200400,
NEONScalar3SameFMask = 0xDF200400,
NEONScalar3SameMask = 0xFF20FC00,
@@ -2073,7 +2075,7 @@ enum NEONScalar3SameOp {
};
// NEON scalar instructions with three different-type operands.
-enum NEONScalar3DiffOp {
+enum NEONScalar3DiffOp : uint32_t {
NEONScalar3DiffFixed = 0x5E200000,
NEONScalar3DiffFMask = 0xDF200C00,
NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
@@ -2084,12 +2086,12 @@ enum NEONScalar3DiffOp {
// Unimplemented and unallocated instructions. These are defined to make fixed
// bit assertion easier.
-enum UnimplementedOp {
+enum UnimplementedOp : uint32_t {
UnimplementedFixed = 0x00000000,
UnimplementedFMask = 0x00000000
};
-enum UnallocatedOp {
+enum UnallocatedOp : uint32_t {
UnallocatedFixed = 0x00000000,
UnallocatedFMask = 0x00000000
};
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 26ec06e094..379d7647d7 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -7,7 +7,7 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
-#include "src/assembler.h"
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
@@ -15,7 +15,7 @@ namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
-#ifdef USE_SIMULATOR
+#if defined(USE_SIMULATOR) || defined(V8_OS_WIN)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
@@ -38,7 +38,9 @@ class CacheLineSizes {
};
void CpuFeatures::FlushICache(void* address, size_t length) {
-#ifdef V8_HOST_ARCH_ARM64
+#if defined(V8_OS_WIN)
+ FlushInstructionCache(GetCurrentProcess(), address, length);
+#elif defined(V8_HOST_ARCH_ARM64)
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index cb8925f779..4b6aa1bf93 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -10,11 +10,10 @@
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
-
namespace v8 {
namespace internal {
-#define __ masm()->
+#define __ masm->
namespace {
@@ -69,11 +68,24 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
Register src = temps.AcquireX();
masm->Add(src, src_base, src_offset);
+#if defined(V8_OS_WIN)
+ // x18 is reserved as platform register on Windows.
+ restore_list.Remove(x18);
+#endif
+
// Restore every register in restore_list from src.
while (!restore_list.IsEmpty()) {
CPURegister reg0 = restore_list.PopLowestIndex();
CPURegister reg1 = restore_list.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
+
+#if defined(V8_OS_WIN)
+ if (reg1 == NoCPUReg) {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ break;
+ }
+#endif
+
int offset1 = reg1.code() * reg_size;
// Pair up adjacent loads, otherwise read them separately.
@@ -87,8 +99,10 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
}
} // namespace
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
@@ -116,7 +130,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ Str(fp, MemOperand(x3));
const int kSavedRegistersAreaSize =
@@ -129,18 +143,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kDoubleRegistersOffset =
kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
- // Get the bailout id from the stack.
+ // The bailout id was passed by the caller in x26.
Register bailout_id = x2;
- __ Peek(bailout_id, kSavedRegistersAreaSize);
+ __ Mov(bailout_id, x26);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
- // Compute the fp-to-sp delta, adding two words for alignment padding and
- // bailout id.
- __ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
+ // Compute the fp-to-sp delta.
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@@ -155,16 +168,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
- __ Mov(x1, static_cast<int>(deopt_kind()));
+ __ Mov(x1, static_cast<int>(deopt_kind));
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
- __ Mov(x5, ExternalReference::isolate_address(isolate()));
+ __ Mov(x5, ExternalReference::isolate_address(isolate));
{
// Call Deoptimizer::New().
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -175,22 +188,22 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
- CopyRegListToFrame(masm(), x1, FrameDescription::registers_offset(),
+ CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
saved_registers, x2, x3);
// Copy double registers to the input frame.
- CopyRegListToFrame(masm(), x1, FrameDescription::double_registers_offset(),
+ CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Copy float registers to the input frame.
// TODO(arm): these are the lower 32-bits of the double registers stored
// above, so we shouldn't need to store them again.
- CopyRegListToFrame(masm(), x1, FrameDescription::float_registers_offset(),
+ CopyRegListToFrame(masm, x1, FrameDescription::float_registers_offset(),
saved_float_registers, w2, w3, kFloatRegistersOffset);
- // Remove the padding, bailout id and the saved registers from the stack.
+ // Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
- __ Drop(2 + (kSavedRegistersAreaSize / kXRegSize));
+ __ Drop(kSavedRegistersAreaSize / kXRegSize);
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
@@ -211,13 +224,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
{
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
__ Mov(sp, scratch);
@@ -248,7 +261,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- RestoreRegList(masm(), saved_double_registers, x1,
+ RestoreRegList(masm, saved_double_registers, x1,
FrameDescription::double_registers_offset());
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
@@ -264,38 +277,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
- RestoreRegList(masm(), saved_registers, last_output_frame,
+ RestoreRegList(masm, saved_registers, last_output_frame,
FrameDescription::registers_offset());
Register continuation = x7;
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
- __ InitializeRootRegister();
__ Br(continuation);
}
-// Size of an entry of the second level deopt table. Since we do not generate
-// a table for ARM64, the size is zero.
-const int Deoptimizer::table_entry_size_ = 0 * kInstrSize;
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UseScratchRegisterScope temps(masm());
- // The MacroAssembler will have put the deoptimization id in x16, the first
- // temp register allocated. We can't assert that the id is in there, but we
- // can check that x16 the first allocated temp and that the value it contains
- // is in the expected range.
- Register entry_id = temps.AcquireX();
- DCHECK(entry_id.Is(x16));
- __ Push(padreg, entry_id);
-
- if (__ emit_debug_code()) {
- // Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
- __ Cmp(entry_id, count());
- __ Check(lo, AbortReason::kOffsetOutOfRange);
- }
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return true; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 4c7ce77e4a..eec2cbf138 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -14,7 +14,6 @@
#include "src/arm64/utils-arm64.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
-#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -3744,7 +3743,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
uint64_t imm8 = instr->ImmNEONabcdefgh();
uint64_t imm = 0;
for (int i = 0; i < 8; ++i) {
- if (imm8 & (1 << i)) {
+ if (imm8 & (1ULL << i)) {
imm |= (UINT64_C(0xFF) << (8 * i));
}
}
@@ -3892,10 +3891,9 @@ int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
char sign = '+';
if (offset < 0) {
- offset = -offset;
sign = '-';
}
- AppendToOutput("#%c0x%x (addr %p)", sign, offset,
+ AppendToOutput("#%c0x%x (addr %p)", sign, Abs(offset),
instr->InstructionAtOffset(offset, Instruction::NO_CHECK));
return 13;
}
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 96fc72f126..13a879e8bd 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -35,6 +35,8 @@ namespace internal {
//
class EntryFrameConstants : public AllStatic {
public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset = -3 * kPointerSize;
static constexpr int kFixedFrameSize = 6 * kPointerSize;
};
diff --git a/deps/v8/src/arm64/instructions-arm64-constants.cc b/deps/v8/src/arm64/instructions-arm64-constants.cc
index 0a15287417..7559946cb1 100644
--- a/deps/v8/src/arm64/instructions-arm64-constants.cc
+++ b/deps/v8/src/arm64/instructions-arm64-constants.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <cstdint>
+#include "include/v8config.h"
namespace v8 {
namespace internal {
@@ -21,6 +22,10 @@ namespace internal {
// then move this code back into instructions-arm64.cc with the same types
// that client code uses.
+#if defined(V8_OS_WIN)
+extern "C" {
+#endif
+
extern const uint16_t kFP16PositiveInfinity = 0x7C00;
extern const uint16_t kFP16NegativeInfinity = 0xFC00;
extern const uint32_t kFP32PositiveInfinity = 0x7F800000;
@@ -42,5 +47,9 @@ extern const uint64_t kFP64DefaultNaN = 0x7FF8000000000000UL;
extern const uint32_t kFP32DefaultNaN = 0x7FC00000;
extern const uint16_t kFP16DefaultNaN = 0x7E00;
+#if defined(V8_OS_WIN)
+} // end of extern "C"
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 503f31050f..11b59a9e9b 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -72,7 +72,7 @@ static uint64_t RotateRight(uint64_t value,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
- return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
(value >> rotate);
}
@@ -83,7 +83,7 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
- uint64_t result = value & ((1UL << width) - 1UL);
+ uint64_t result = value & ((1ULL << width) - 1ULL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
@@ -121,7 +121,7 @@ uint64_t Instruction::ImmLogical() {
if (imm_s == 0x3F) {
return 0;
}
- uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ uint64_t bits = (1ULL << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
@@ -133,7 +133,7 @@ uint64_t Instruction::ImmLogical() {
if ((imm_s & mask) == mask) {
return 0;
}
- uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
@@ -343,289 +343,6 @@ uint64_t InstructionSequence::InlineData() const {
return payload;
}
-VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
- DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
- vform == kFormatH || vform == kFormatS || vform == kFormatD);
- switch (vform) {
- case kFormat8H:
- return kFormat8B;
- case kFormat4S:
- return kFormat4H;
- case kFormat2D:
- return kFormat2S;
- case kFormatH:
- return kFormatB;
- case kFormatS:
- return kFormatH;
- case kFormatD:
- return kFormatS;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
- DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
- vform == kFormatB || vform == kFormatH || vform == kFormatS);
- switch (vform) {
- case kFormat8B:
- return kFormat8H;
- case kFormat4H:
- return kFormat4S;
- case kFormat2S:
- return kFormat2D;
- case kFormatB:
- return kFormatH;
- case kFormatH:
- return kFormatS;
- case kFormatS:
- return kFormatD;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat VectorFormatFillQ(VectorFormat vform) {
- switch (vform) {
- case kFormatB:
- case kFormat8B:
- case kFormat16B:
- return kFormat16B;
- case kFormatH:
- case kFormat4H:
- case kFormat8H:
- return kFormat8H;
- case kFormatS:
- case kFormat2S:
- case kFormat4S:
- return kFormat4S;
- case kFormatD:
- case kFormat1D:
- case kFormat2D:
- return kFormat2D;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
- switch (vform) {
- case kFormat4H:
- return kFormat8B;
- case kFormat8H:
- return kFormat16B;
- case kFormat2S:
- return kFormat4H;
- case kFormat4S:
- return kFormat8H;
- case kFormat1D:
- return kFormat2S;
- case kFormat2D:
- return kFormat4S;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
- DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
- switch (vform) {
- case kFormat8B:
- return kFormat16B;
- case kFormat4H:
- return kFormat8H;
- case kFormat2S:
- return kFormat4S;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
- DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
- switch (vform) {
- case kFormat16B:
- return kFormat8B;
- case kFormat8H:
- return kFormat4H;
- case kFormat4S:
- return kFormat2S;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat ScalarFormatFromLaneSize(int laneSize) {
- switch (laneSize) {
- case 8:
- return kFormatB;
- case 16:
- return kFormatH;
- case 32:
- return kFormatS;
- case 64:
- return kFormatD;
- default:
- UNREACHABLE();
- }
-}
-
-VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
- return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
-}
-
-unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
- return RegisterSizeInBitsFromFormat(vform) / 8;
-}
-
-unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormatB:
- return kBRegSizeInBits;
- case kFormatH:
- return kHRegSizeInBits;
- case kFormatS:
- return kSRegSizeInBits;
- case kFormatD:
- return kDRegSizeInBits;
- case kFormat8B:
- case kFormat4H:
- case kFormat2S:
- case kFormat1D:
- return kDRegSizeInBits;
- default:
- return kQRegSizeInBits;
- }
-}
-
-unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormatB:
- case kFormat8B:
- case kFormat16B:
- return 8;
- case kFormatH:
- case kFormat4H:
- case kFormat8H:
- return 16;
- case kFormatS:
- case kFormat2S:
- case kFormat4S:
- return 32;
- case kFormatD:
- case kFormat1D:
- case kFormat2D:
- return 64;
- default:
- UNREACHABLE();
- }
-}
-
-int LaneSizeInBytesFromFormat(VectorFormat vform) {
- return LaneSizeInBitsFromFormat(vform) / 8;
-}
-
-int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormatB:
- case kFormat8B:
- case kFormat16B:
- return 0;
- case kFormatH:
- case kFormat4H:
- case kFormat8H:
- return 1;
- case kFormatS:
- case kFormat2S:
- case kFormat4S:
- return 2;
- case kFormatD:
- case kFormat1D:
- case kFormat2D:
- return 3;
- default:
- UNREACHABLE();
- }
-}
-
-int LaneCountFromFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormat16B:
- return 16;
- case kFormat8B:
- case kFormat8H:
- return 8;
- case kFormat4H:
- case kFormat4S:
- return 4;
- case kFormat2S:
- case kFormat2D:
- return 2;
- case kFormat1D:
- case kFormatB:
- case kFormatH:
- case kFormatS:
- case kFormatD:
- return 1;
- default:
- UNREACHABLE();
- }
-}
-
-int MaxLaneCountFromFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormatB:
- case kFormat8B:
- case kFormat16B:
- return 16;
- case kFormatH:
- case kFormat4H:
- case kFormat8H:
- return 8;
- case kFormatS:
- case kFormat2S:
- case kFormat4S:
- return 4;
- case kFormatD:
- case kFormat1D:
- case kFormat2D:
- return 2;
- default:
- UNREACHABLE();
- }
-}
-
-// Does 'vform' indicate a vector format or a scalar format?
-bool IsVectorFormat(VectorFormat vform) {
- DCHECK_NE(vform, kFormatUndefined);
- switch (vform) {
- case kFormatB:
- case kFormatH:
- case kFormatS:
- case kFormatD:
- return false;
- default:
- return true;
- }
-}
-
-int64_t MaxIntFromFormat(VectorFormat vform) {
- return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
-}
-
-int64_t MinIntFromFormat(VectorFormat vform) {
- return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
-}
-
-uint64_t MaxUintFromFormat(VectorFormat vform) {
- return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
-}
-
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(IntegerFormatMap());
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 9ea15e55ad..6f46e4b88c 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -6,18 +6,24 @@
#define V8_ARM64_INSTRUCTIONS_ARM64_H_
#include "src/arm64/constants-arm64.h"
+#include "src/arm64/register-arm64.h"
#include "src/arm64/utils-arm64.h"
-#include "src/assembler.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+struct AssemblerOptions;
+
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
+#if defined(V8_OS_WIN)
+extern "C" {
+#endif
+
extern const float16 kFP16PositiveInfinity;
extern const float16 kFP16NegativeInfinity;
extern const float kFP32PositiveInfinity;
@@ -39,6 +45,10 @@ extern const double kFP64DefaultNaN;
extern const float kFP32DefaultNaN;
extern const float16 kFP16DefaultNaN;
+#if defined(V8_OS_WIN)
+} // end of extern "C"
+#endif
+
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
@@ -444,49 +454,6 @@ class Instruction {
void SetBranchImmTarget(Instruction* target);
};
-// Functions for handling NEON vector format information.
-enum VectorFormat {
- kFormatUndefined = 0xffffffff,
- kFormat8B = NEON_8B,
- kFormat16B = NEON_16B,
- kFormat4H = NEON_4H,
- kFormat8H = NEON_8H,
- kFormat2S = NEON_2S,
- kFormat4S = NEON_4S,
- kFormat1D = NEON_1D,
- kFormat2D = NEON_2D,
-
- // Scalar formats. We add the scalar bit to distinguish between scalar and
- // vector enumerations; the bit is always set in the encoding of scalar ops
- // and always clear for vector ops. Although kFormatD and kFormat1D appear
- // to be the same, their meaning is subtly different. The first is a scalar
- // operation, the second a vector operation that only affects one lane.
- kFormatB = NEON_B | NEONScalar,
- kFormatH = NEON_H | NEONScalar,
- kFormatS = NEON_S | NEONScalar,
- kFormatD = NEON_D | NEONScalar
-};
-
-VectorFormat VectorFormatHalfWidth(VectorFormat vform);
-VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
-VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
-VectorFormat VectorFormatHalfLanes(VectorFormat vform);
-VectorFormat ScalarFormatFromLaneSize(int lanesize);
-VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
-VectorFormat VectorFormatFillQ(VectorFormat vform);
-VectorFormat ScalarFormatFromFormat(VectorFormat vform);
-unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
-unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
-int LaneSizeInBytesFromFormat(VectorFormat vform);
-unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
-int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
-int LaneCountFromFormat(VectorFormat vform);
-int MaxLaneCountFromFormat(VectorFormat vform);
-bool IsVectorFormat(VectorFormat vform);
-int64_t MaxIntFromFormat(VectorFormat vform);
-int64_t MinIntFromFormat(VectorFormat vform);
-uint64_t MaxUintFromFormat(VectorFormat vform);
-
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
// MacroAssembler.
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 905cc51a57..ad79b1ec2b 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -70,13 +72,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 function the function to call
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@@ -212,10 +207,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- x4, // call_data
- x2, // holder
- x1, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ x1, // kApiFunctionAddress
+ x2, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -267,6 +261,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x0, x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 62594241ec..ae055f40ab 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1021,10 +1021,8 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
}
void TurboAssembler::InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- Mov(kRootRegister, Operand(roots_array_start));
- Add(kRootRegister, kRootRegister, kRootRegisterBias);
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ Mov(kRootRegister, Operand(isolate_root));
}
@@ -1174,7 +1172,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
Push(padreg, tmp);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, Operand(smi));
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 97a75e5758..48cd13d5fc 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -10,39 +10,27 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
-#include "src/heap/heap-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/arm64/macro-assembler-arm64.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
CPURegList TurboAssembler::DefaultFPTmpList() {
@@ -53,13 +41,27 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
// We only allow one exclusion register, so if the list is of even length
// before exclusions, it must still be afterwards, to maintain alignment.
// Therefore, we can ignore the exclusion register in the computation.
// However, we leave it in the argument list to mirror the prototype for
// Push/PopCallerSaved().
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64 which
+ // makes caller-saved registers in odd number. padreg is used accordingly
+ // to maintain the alignment.
+ DCHECK_EQ(list.Count() % 2, 1);
+ if (exclusion.Is(no_reg)) {
+ bytes += kXRegSizeInBits / 8;
+ } else {
+ bytes -= kXRegSizeInBits / 8;
+ }
+#else
+ DCHECK_EQ(list.Count() % 2, 0);
USE(exclusion);
+#endif
+
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
@@ -73,12 +75,24 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
int bytes = 0;
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64, use
+ // padreg accordingly to maintain alignment.
+ if (!exclusion.Is(no_reg)) {
+ list.Remove(exclusion);
+ } else {
+ list.Combine(padreg);
+ }
+#else
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
+#endif
+
+ DCHECK_EQ(list.Count() % 2, 0);
PushCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -99,12 +113,24 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
}
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64, use
+ // padreg accordingly to maintain alignment.
+ if (!exclusion.Is(no_reg)) {
+ list.Remove(exclusion);
+ } else {
+ list.Combine(padreg);
+ }
+#else
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
+#endif
+
+ DCHECK_EQ(list.Count() % 2, 0);
PopCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -315,7 +341,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
return;
} else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> x(
- reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
+ reinterpret_cast<Address*>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
return;
}
@@ -360,6 +386,10 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
+void TurboAssembler::Mov(const Register& rd, Smi smi) {
+ return Mov(rd, Operand(smi));
+}
+
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
@@ -1519,7 +1549,8 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
- Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
+ Ldr(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
@@ -1532,7 +1563,7 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
}
}
-void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
@@ -1632,6 +1663,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
B(eq, &do_check);
+ // Check if JSAsyncFunctionObject
+ Cmp(temp, JS_ASYNC_FUNCTION_OBJECT_TYPE);
+ B(eq, &do_check);
+
// Check if JSAsyncGeneratorObject
Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -1664,27 +1699,6 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
}
}
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start;
- Bind(&start);
-#endif
- Operand operand = Operand::EmbeddedCode(stub);
- near_call(operand.heap_object_request());
- DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
-}
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
@@ -1695,8 +1709,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Mov(x0, f->nargs);
Mov(x1, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, x0, x1));
- Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1792,10 +1805,38 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch1 = x4;
+ Register scratch2 = x5;
+ Push(scratch1, scratch2);
+
+ Label get_pc;
+ Bind(&get_pc);
+ Adr(scratch2, &get_pc);
+
+ Mov(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Str(scratch2, MemOperand(scratch1));
+ Mov(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(fp, MemOperand(scratch1));
+
+ Pop(scratch2, scratch1);
+ }
+
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = x4;
+ Push(scratch, xzr);
+ Mov(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(xzr, MemOperand(scratch));
+ Pop(xzr, scratch);
+ }
+
if (num_of_reg_args > kRegisterPassedArguments) {
// Drop the register passed arguments.
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
@@ -1805,8 +1846,7 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ldr(destination,
FieldMemOperand(destination,
@@ -1880,37 +1920,26 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference {code} above since code generation
- // for builtins and code stubs happens on the main thread.
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- IndirectLoadConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(scratch, cond);
- return;
- }
}
}
+
if (CanUseNearCallOrJump(rmode)) {
JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
} else {
@@ -1936,39 +1965,27 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
BlockPoolsScope scope(this);
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference {code} above since code generation
- // for builtins and code stubs happens on the main thread.
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- IndirectLoadConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(scratch);
- return;
- }
}
}
+
if (CanUseNearCallOrJump(rmode)) {
near_call(AddCodeTarget(code), rmode);
} else {
@@ -1983,6 +2000,108 @@ void TurboAssembler::Call(ExternalReference target) {
Call(temp);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
+ Add(builtin_pointer, builtin_pointer,
+ IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ Cmp(scratch, Operand(Builtins::kNoBuiltinId));
+ B(ne, &if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ B(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ Lsl(destination, scratch, kSystemPointerSizeLog2);
+ Add(destination, destination, kRootRegister);
+ Ldr(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.AcquireX();
+
+ Label return_location;
+ Adr(scratch1, &return_location);
+ Poke(scratch1, 0);
+
+ if (emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location.
+ Register scratch2 = temps.AcquireX();
+ Ldr(scratch2, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Ldr(scratch2, MemOperand(scratch2, -static_cast<int64_t>(kXRegSize)));
+ Cmp(scratch2, scratch1);
+ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
+ }
+
+ Blr(target);
+ Bind(&return_location);
+}
+
void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -1994,31 +2113,24 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
-
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
BlockPoolsScope scope(this);
+ NoRootArrayScope no_root_array(this);
+
#ifdef DEBUG
Label start;
Bind(&start);
#endif
- // The deoptimizer requires the deoptimization id to be in x16.
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- DCHECK(temp.Is(x16));
// Make sure that the deopt id can be encoded in 16 bits, so can be encoded
// in a single movz instruction with a zero shift.
DCHECK(is_uint16(deopt_id));
- movz(temp, deopt_id);
+ movz(x26, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
-
- DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2241,12 +2353,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- Call(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- Jump(code);
+ JumpCodeObject(code);
}
}
@@ -2527,9 +2638,14 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
Pop(fp, lr);
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+}
+
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- CompareAndBranch(in, Operand(kClearedWeakHeapObject), eq, target_if_cleared);
+ CompareAndBranch(in.W(), Operand(kClearedWeakHeapObjectLower32), eq,
+ target_if_cleared);
and_(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -2658,10 +2774,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK_GE(num_unsaved, 0);
@@ -2803,25 +2915,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object, address);
@@ -2829,7 +2959,11 @@ void TurboAssembler::CallRecordWriteStub(
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 8648ff0439..ba1885a248 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
@@ -11,7 +15,6 @@
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/globals.h"
-#include "src/turbo-assembler.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -41,31 +44,6 @@
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = x0;
-constexpr Register kReturnRegister1 = x1;
-constexpr Register kReturnRegister2 = x2;
-constexpr Register kJSFunctionRegister = x1;
-constexpr Register kContextRegister = cp;
-constexpr Register kAllocateSizeRegister = x1;
-constexpr Register kSpeculationPoisonRegister = x18;
-constexpr Register kInterpreterAccumulatorRegister = x0;
-constexpr Register kInterpreterBytecodeOffsetRegister = x19;
-constexpr Register kInterpreterBytecodeArrayRegister = x20;
-constexpr Register kInterpreterDispatchTableRegister = x21;
-
-constexpr Register kJavaScriptCallArgCountRegister = x0;
-constexpr Register kJavaScriptCallCodeStartRegister = x2;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = x3;
-constexpr Register kJavaScriptCallExtraArg1Register = x2;
-
-constexpr Register kOffHeapTrampolineRegister = ip0;
-constexpr Register kRuntimeCallFunctionRegister = x1;
-constexpr Register kRuntimeCallArgCountRegister = x0;
-constexpr Register kRuntimeCallArgvRegister = x11;
-constexpr Register kWasmInstanceRegister = x7;
-
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
V(Strb, Register&, rt, STRB_w) \
@@ -180,14 +158,9 @@ enum PreShiftImmMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
#if DEBUG
void set_allow_macro_instructions(bool value) {
@@ -196,14 +169,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif
- // We should not use near calls or jumps for JS->WASM calls and calls to
- // external references, since the code spaces are not guaranteed to be close
- // to each other.
+ // We should not use near calls or jumps for calls to external references,
+ // since the code spaces are not guaranteed to be close to each other.
bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
- return rmode != RelocInfo::JS_TO_WASM_CALL &&
- rmode != RelocInfo::EXTERNAL_REFERENCE;
+ return rmode != RelocInfo::EXTERNAL_REFERENCE;
}
+ static bool IsNearCallOffset(int64_t offset);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
@@ -222,6 +195,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(allow_macro_instructions());
mov(vd, vd_index, vn, vn_index);
}
+ void Mov(const Register& rd, Smi smi);
void Mov(const VRegister& vd, const VRegister& vn, int index) {
DCHECK(allow_macro_instructions());
mov(vd, vn, index);
@@ -237,7 +211,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
- void Move(Register dst, Smi* src);
+ void Move(Register dst, Smi src);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
@@ -543,9 +517,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void Isb();
inline void Csdb();
- bool AllowThisStubCall(CodeStub* stub);
- void CallStubDelayed(CodeStub* stub);
-
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
@@ -759,7 +730,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is a convenience method for pushing a single Handle<Object>.
inline void Push(Handle<HeapObject> object);
- inline void Push(Smi* smi);
+ inline void Push(Smi smi);
// Aliases of Push and Pop, required for V8 compatibility.
inline void push(Register src) { Push(src); }
@@ -771,6 +742,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
@@ -867,8 +841,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode);
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Address target, int deopt_id);
// Calls a C function.
// The called function is not allowed to trigger a
@@ -1140,8 +1124,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
//
// On successful conversion, the least significant 32 bits of the result are
// equivalent to the ECMA-262 operation "ToInt32".
- //
- // Only public for the test code in test-code-stubs-arm64.cc.
void TryConvertDoubleToInt64(Register result, DoubleRegister input,
Label* done);
@@ -1233,7 +1215,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool allow_macro_instructions_ = true;
#endif
-
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
@@ -1259,22 +1240,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
- static bool IsNearCallOffset(int64_t offset);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// Instruction set functions ------------------------------------------------
// Logical macros.
@@ -1727,9 +1705,6 @@ class MacroAssembler : public TurboAssembler {
// ---- Calling / Jumping helpers ----
- void CallStub(CodeStub* stub);
- void TailCallStub(CodeStub* stub);
-
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
@@ -1896,9 +1871,7 @@ class MacroAssembler : public TurboAssembler {
const Register& scratch2);
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
- }
+ void LoadGlobalProxy(Register dst);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -2021,6 +1994,8 @@ class MacroAssembler : public TurboAssembler {
// branch isntructions with a range of +-128MB. If that becomes too little
// (!), the mechanism can be extended to generate special veneers for really
// far targets.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/arm64/register-arm64.cc b/deps/v8/src/arm64/register-arm64.cc
new file mode 100644
index 0000000000..cf1b320624
--- /dev/null
+++ b/deps/v8/src/arm64/register-arm64.cc
@@ -0,0 +1,298 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/register-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
+ DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+ vform == kFormatH || vform == kFormatS || vform == kFormatD);
+ switch (vform) {
+ case kFormat8H:
+ return kFormat8B;
+ case kFormat4S:
+ return kFormat4H;
+ case kFormat2D:
+ return kFormat2S;
+ case kFormatH:
+ return kFormatB;
+ case kFormatS:
+ return kFormatH;
+ case kFormatD:
+ return kFormatS;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
+ DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+ vform == kFormatB || vform == kFormatH || vform == kFormatS);
+ switch (vform) {
+ case kFormat8B:
+ return kFormat8H;
+ case kFormat4H:
+ return kFormat4S;
+ case kFormat2S:
+ return kFormat2D;
+ case kFormatB:
+ return kFormatH;
+ case kFormatH:
+ return kFormatS;
+ case kFormatS:
+ return kFormatD;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatFillQ(VectorFormat vform) {
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return kFormat16B;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return kFormat8H;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return kFormat4S;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return kFormat2D;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
+ switch (vform) {
+ case kFormat4H:
+ return kFormat8B;
+ case kFormat8H:
+ return kFormat16B;
+ case kFormat2S:
+ return kFormat4H;
+ case kFormat4S:
+ return kFormat8H;
+ case kFormat1D:
+ return kFormat2S;
+ case kFormat2D:
+ return kFormat4S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
+ DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+ switch (vform) {
+ case kFormat8B:
+ return kFormat16B;
+ case kFormat4H:
+ return kFormat8H;
+ case kFormat2S:
+ return kFormat4S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
+ DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+ switch (vform) {
+ case kFormat16B:
+ return kFormat8B;
+ case kFormat8H:
+ return kFormat4H;
+ case kFormat4S:
+ return kFormat2S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+ switch (laneSize) {
+ case 8:
+ return kFormatB;
+ case 16:
+ return kFormatH;
+ case 32:
+ return kFormatS;
+ case 64:
+ return kFormatD;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
+ return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+}
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+ return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ return kBRegSizeInBits;
+ case kFormatH:
+ return kHRegSizeInBits;
+ case kFormatS:
+ return kSRegSizeInBits;
+ case kFormatD:
+ return kDRegSizeInBits;
+ case kFormat8B:
+ case kFormat4H:
+ case kFormat2S:
+ case kFormat1D:
+ return kDRegSizeInBits;
+ default:
+ return kQRegSizeInBits;
+ }
+}
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 8;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 16;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 32;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 64;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+ return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 0;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 1;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 2;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 3;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int LaneCountFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormat16B:
+ return 16;
+ case kFormat8B:
+ case kFormat8H:
+ return 8;
+ case kFormat4H:
+ case kFormat4S:
+ return 4;
+ case kFormat2S:
+ case kFormat2D:
+ return 2;
+ case kFormat1D:
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD:
+ return 1;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 16;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 8;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 4;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 2;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD:
+ return false;
+ default:
+ return true;
+ }
+}
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+ return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+ return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+ return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/register-arm64.h b/deps/v8/src/arm64/register-arm64.h
new file mode 100644
index 0000000000..77310213f2
--- /dev/null
+++ b/deps/v8/src/arm64/register-arm64.h
@@ -0,0 +1,752 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_REGISTER_ARM64_H_
+#define V8_ARM64_REGISTER_ARM64_H_
+
+#include "src/arm64/utils-arm64.h"
+#include "src/globals.h"
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers.
+// clang-format off
+#define GENERAL_REGISTER_CODE_LIST(R) \
+ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+ R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+ R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
+ R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
+
+#if defined(V8_OS_WIN)
+// x18 is reserved as platform register on Windows ARM64.
+#define ALLOCATABLE_GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
+ R(x27) R(x28)
+#else
+#define ALLOCATABLE_GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
+ R(x27) R(x28)
+#endif
+
+#define FLOAT_REGISTERS(V) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
+ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+ V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+ V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
+#define DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
+ R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
+ R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
+
+#define SIMD128_REGISTERS(V) \
+ V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
+ V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15) \
+ V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \
+ V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31)
+
+// Register d29 could be allocated, but we keep an even length list here, in
+// order to make stack alignment easier for save and restore.
+#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
+ R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
+ R(d25) R(d26) R(d27) R(d28)
+// clang-format on
+
+constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count()
+
+// Some CPURegister methods can return Register and VRegister types, so we
+// need to declare them in advance.
+class Register;
+class VRegister;
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
+ public:
+ enum RegisterType { kRegister, kVRegister, kNoRegister };
+
+ static constexpr CPURegister no_reg() {
+ return CPURegister{0, 0, kNoRegister};
+ }
+
+ template <int code, int size, RegisterType type>
+ static constexpr CPURegister Create() {
+ static_assert(IsValid(code, size, type), "Cannot create invalid registers");
+ return CPURegister{code, size, type};
+ }
+
+ static CPURegister Create(int code, int size, RegisterType type) {
+ DCHECK(IsValid(code, size, type));
+ return CPURegister{code, size, type};
+ }
+
+ RegisterType type() const { return reg_type_; }
+ int SizeInBits() const {
+ DCHECK(IsValid());
+ return reg_size_;
+ }
+ int SizeInBytes() const {
+ DCHECK(IsValid());
+ DCHECK_EQ(SizeInBits() % 8, 0);
+ return reg_size_ / 8;
+ }
+ bool Is8Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 8;
+ }
+ bool Is16Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 16;
+ }
+ bool Is32Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 32;
+ }
+ bool Is64Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 64;
+ }
+ bool Is128Bits() const {
+ DCHECK(IsValid());
+ return reg_size_ == 128;
+ }
+ bool IsValid() const { return reg_type_ != kNoRegister; }
+ bool IsNone() const { return reg_type_ == kNoRegister; }
+ bool Is(const CPURegister& other) const {
+ return Aliases(other) && (reg_size_ == other.reg_size_);
+ }
+ bool Aliases(const CPURegister& other) const {
+ return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
+ }
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const { return reg_type_ == kRegister; }
+ bool IsVRegister() const { return reg_type_ == kVRegister; }
+
+ bool IsFPRegister() const { return IsS() || IsD(); }
+
+ bool IsW() const { return IsRegister() && Is32Bits(); }
+ bool IsX() const { return IsRegister() && Is64Bits(); }
+
+ // These assertions ensure that the size and type of the register are as
+ // described. They do not consider the number of lanes that make up a vector.
+ // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+ // does not imply Is1D() or Is8B().
+ // Check the number of lanes, ie. the format of the vector, using methods such
+ // as Is8B(), Is1D(), etc. in the VRegister class.
+ bool IsV() const { return IsVRegister(); }
+ bool IsB() const { return IsV() && Is8Bits(); }
+ bool IsH() const { return IsV() && Is16Bits(); }
+ bool IsS() const { return IsV() && Is32Bits(); }
+ bool IsD() const { return IsV() && Is64Bits(); }
+ bool IsQ() const { return IsV() && Is128Bits(); }
+
+ Register Reg() const;
+ VRegister VReg() const;
+
+ Register X() const;
+ Register W() const;
+ VRegister V() const;
+ VRegister B() const;
+ VRegister H() const;
+ VRegister D() const;
+ VRegister S() const;
+ VRegister Q() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ protected:
+ int reg_size_;
+ RegisterType reg_type_;
+
+#if defined(V8_OS_WIN) && !defined(__clang__)
+ // MSVC has problem to parse template base class as friend class.
+ friend RegisterBase;
+#else
+ friend class RegisterBase;
+#endif
+
+ constexpr CPURegister(int code, int size, RegisterType type)
+ : RegisterBase(code), reg_size_(size), reg_type_(type) {}
+
+ static constexpr bool IsValidRegister(int code, int size) {
+ return (size == kWRegSizeInBits || size == kXRegSizeInBits) &&
+ (code < kNumberOfRegisters || code == kSPRegInternalCode);
+ }
+
+ static constexpr bool IsValidVRegister(int code, int size) {
+ return (size == kBRegSizeInBits || size == kHRegSizeInBits ||
+ size == kSRegSizeInBits || size == kDRegSizeInBits ||
+ size == kQRegSizeInBits) &&
+ code < kNumberOfVRegisters;
+ }
+
+ static constexpr bool IsValid(int code, int size, RegisterType type) {
+ return (type == kRegister && IsValidRegister(code, size)) ||
+ (type == kVRegister && IsValidVRegister(code, size));
+ }
+
+ static constexpr bool IsNone(int code, int size, RegisterType type) {
+ return type == kNoRegister && code == 0 && size == 0;
+ }
+};
+
+ASSERT_TRIVIALLY_COPYABLE(CPURegister);
+
+class Register : public CPURegister {
+ public:
+ static constexpr Register no_reg() { return Register(CPURegister::no_reg()); }
+
+ template <int code, int size>
+ static constexpr Register Create() {
+ return Register(CPURegister::Create<code, size, CPURegister::kRegister>());
+ }
+
+ static Register Create(int code, int size) {
+ return Register(CPURegister::Create(code, size, CPURegister::kRegister));
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSizeInBits);
+ }
+
+ template <int code>
+ static Register from_code() {
+ // Always return an X register.
+ return Register::Create<code, kXRegSizeInBits>();
+ }
+
+ static const char* GetSpecialRegisterName(int code) {
+ return (code == kSPRegInternalCode) ? "sp" : "UNKNOWN";
+ }
+
+ private:
+ constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+
+constexpr bool kPadArguments = true;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+ kFormatUndefined = 0xffffffff,
+ kFormat8B = NEON_8B,
+ kFormat16B = NEON_16B,
+ kFormat4H = NEON_4H,
+ kFormat8H = NEON_8H,
+ kFormat2S = NEON_2S,
+ kFormat4S = NEON_4S,
+ kFormat1D = NEON_1D,
+ kFormat2D = NEON_2D,
+
+ // Scalar formats. We add the scalar bit to distinguish between scalar and
+ // vector enumerations; the bit is always set in the encoding of scalar ops
+ // and always clear for vector ops. Although kFormatD and kFormat1D appear
+ // to be the same, their meaning is subtly different. The first is a scalar
+ // operation, the second a vector operation that only affects one lane.
+ kFormatB = NEON_B | NEONScalar,
+ kFormatH = NEON_H | NEONScalar,
+ kFormatS = NEON_S | NEONScalar,
+ kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
+VectorFormat VectorFormatFillQ(VectorFormat vform);
+VectorFormat ScalarFormatFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
+
+class VRegister : public CPURegister {
+ public:
+ static constexpr VRegister no_reg() {
+ return VRegister(CPURegister::no_reg(), 0);
+ }
+
+ template <int code, int size, int lane_count = 1>
+ static constexpr VRegister Create() {
+ static_assert(IsValidLaneCount(lane_count), "Invalid lane count");
+ return VRegister(CPURegister::Create<code, size, kVRegister>(), lane_count);
+ }
+
+ static VRegister Create(int code, int size, int lane_count = 1) {
+ DCHECK(IsValidLaneCount(lane_count));
+ return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
+ lane_count);
+ }
+
+ static VRegister Create(int reg_code, VectorFormat format) {
+ int reg_size = RegisterSizeInBitsFromFormat(format);
+ int reg_count = IsVectorFormat(format) ? LaneCountFromFormat(format) : 1;
+ return VRegister::Create(reg_code, reg_size, reg_count);
+ }
+
+ static VRegister BRegFromCode(unsigned code);
+ static VRegister HRegFromCode(unsigned code);
+ static VRegister SRegFromCode(unsigned code);
+ static VRegister DRegFromCode(unsigned code);
+ static VRegister QRegFromCode(unsigned code);
+ static VRegister VRegFromCode(unsigned code);
+
+ VRegister V8B() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 8);
+ }
+ VRegister V16B() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 16);
+ }
+ VRegister V4H() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 4);
+ }
+ VRegister V8H() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 8);
+ }
+ VRegister V2S() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 2);
+ }
+ VRegister V4S() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 4);
+ }
+ VRegister V2D() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 2);
+ }
+ VRegister V1D() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 1);
+ }
+
+ bool Is8B() const { return (Is64Bits() && (lane_count_ == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lane_count_ == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lane_count_ == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lane_count_ == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lane_count_ == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lane_count_ == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lane_count_ == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lane_count_ == 2)); }
+
+ // For consistency, we assert the number of lanes of these scalar registers,
+ // even though there are no vectors of equivalent total size with which they
+ // could alias.
+ bool Is1B() const {
+ DCHECK(!(Is8Bits() && IsVector()));
+ return Is8Bits();
+ }
+ bool Is1H() const {
+ DCHECK(!(Is16Bits() && IsVector()));
+ return Is16Bits();
+ }
+ bool Is1S() const {
+ DCHECK(!(Is32Bits() && IsVector()));
+ return Is32Bits();
+ }
+
+ bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSizeInBits; }
+ bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSizeInBits; }
+ bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; }
+ bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; }
+
+ bool IsScalar() const { return lane_count_ == 1; }
+ bool IsVector() const { return lane_count_ > 1; }
+
+ bool IsSameFormat(const VRegister& other) const {
+ return (reg_size_ == other.reg_size_) && (lane_count_ == other.lane_count_);
+ }
+
+ int LaneCount() const { return lane_count_; }
+
+ unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count_; }
+
+ unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
+
+ static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
+ STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast);
+
+ static VRegister from_code(int code) {
+ // Always return a D register.
+ return VRegister::Create(code, kDRegSizeInBits);
+ }
+
+ private:
+ int lane_count_;
+
+ constexpr explicit VRegister(const CPURegister& r, int lane_count)
+ : CPURegister(r), lane_count_(lane_count) {}
+
+ static constexpr bool IsValidLaneCount(int lane_count) {
+ return base::bits::IsPowerOfTwo(lane_count) && lane_count <= 16;
+ }
+};
+
+ASSERT_TRIVIALLY_COPYABLE(VRegister);
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and VRegister
+// variants are provided for convenience.
+constexpr Register NoReg = Register::no_reg();
+constexpr VRegister NoVReg = VRegister::no_reg();
+constexpr CPURegister NoCPUReg = CPURegister::no_reg();
+constexpr Register no_reg = NoReg;
+constexpr VRegister no_dreg = NoVReg;
+
+#define DEFINE_REGISTER(register_class, name, ...) \
+ constexpr register_class name = register_class::Create<__VA_ARGS__>()
+#define ALIAS_REGISTER(register_class, alias, name) \
+ constexpr register_class alias = name
+
+#define DEFINE_REGISTERS(N) \
+ DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits); \
+ DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits);
+GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
+DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
+
+#define DEFINE_VREGISTERS(N) \
+ DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits); \
+ DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits);
+GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
+
+#undef DEFINE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(VRegister, v8_, v8); // Avoid conflicts with namespace v8.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, kRootRegister, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Register used for padding stack slots.
+ALIAS_REGISTER(Register, padreg, x31);
+
+// Keeps the 0 double value.
+ALIAS_REGISTER(VRegister, fp_zero, d15);
+// MacroAssembler fixed V Registers.
+ALIAS_REGISTER(VRegister, fp_fixed1, d28);
+ALIAS_REGISTER(VRegister, fp_fixed2, d29);
+
+// MacroAssembler scratch V registers.
+ALIAS_REGISTER(VRegister, fp_scratch, d30);
+ALIAS_REGISTER(VRegister, fp_scratch1, d30);
+ALIAS_REGISTER(VRegister, fp_scratch2, d31);
+
+#undef ALIAS_REGISTER
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(
+ const CPURegister& reg1, const CPURegister& reg2 = NoCPUReg,
+ const CPURegister& reg3 = NoCPUReg, const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg, const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg, const CPURegister& reg8 = NoCPUReg);
+
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoVReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments may be set to NoVReg, and if so,
+// subsequent arguments must also be NoVReg. At least one argument (reg1) must
+// be valid (not NoVReg).
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+typedef VRegister FloatRegister;
+typedef VRegister DoubleRegister;
+typedef VRegister Simd128Register;
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ template <typename... CPURegisters>
+ explicit CPURegList(CPURegister reg0, CPURegisters... regs)
+ : list_(CPURegister::ListOf(reg0, regs...)),
+ size_(reg0.SizeInBits()),
+ type_(reg0.type()) {
+ DCHECK(AreSameSizeAndType(reg0, regs...));
+ DCHECK(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, int size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ DCHECK(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, int size, int first_reg,
+ int last_reg)
+ : size_(size), type_(type) {
+ DCHECK(
+ ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
+ DCHECK(last_reg >= first_reg);
+ list_ = (1ULL << (last_reg + 1)) - 1;
+ list_ &= ~((1ULL << first_reg) - 1);
+ DCHECK(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ DCHECK(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ DCHECK(IsValid());
+ return list_;
+ }
+
+ inline void set_list(RegList new_list) {
+ DCHECK(IsValid());
+ list_ = new_list;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type of the registers in the
+ // 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take CPURegisters.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other1, const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
+ static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ DCHECK(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg) const {
+ DCHECK(IsValid());
+ RegList list = 0;
+ if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
+ if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
+ if (!other3.IsNone() && (other3.type() == type_)) list |= other3.bit();
+ if (!other4.IsNone() && (other4.type() == type_)) list |= other4.bit();
+ return (list_ & list) != 0;
+ }
+
+ int Count() const {
+ DCHECK(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ int RegisterSizeInBits() const {
+ DCHECK(IsValid());
+ return size_;
+ }
+
+ int RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ int TotalSizeInBytes() const {
+ DCHECK(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
+ private:
+ RegList list_;
+ int size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ constexpr RegList kValidRegisters{0x8000000ffffffff};
+ constexpr RegList kValidVRegisters{0x0000000ffffffff};
+ switch (type_) {
+ case CPURegister::kRegister:
+ return (list_ & kValidRegisters) == list_;
+ case CPURegister::kVRegister:
+ return (list_ & kValidVRegisters) == list_;
+ case CPURegister::kNoRegister:
+ return list_ == 0;
+ default:
+ UNREACHABLE();
+ }
+ }
+};
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedV CPURegList::GetCalleeSavedV()
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedV CPURegList::GetCallerSavedV()
+
+// Define a {RegisterName} method for {CPURegister}.
+DEFINE_REGISTER_NAMES(CPURegister, GENERAL_REGISTERS);
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = x0;
+constexpr Register kReturnRegister1 = x1;
+constexpr Register kReturnRegister2 = x2;
+constexpr Register kJSFunctionRegister = x1;
+constexpr Register kContextRegister = cp;
+constexpr Register kAllocateSizeRegister = x1;
+
+#if defined(V8_OS_WIN)
+// x18 is reserved as platform register on Windows ARM64.
+constexpr Register kSpeculationPoisonRegister = x23;
+#else
+constexpr Register kSpeculationPoisonRegister = x18;
+#endif
+
+constexpr Register kInterpreterAccumulatorRegister = x0;
+constexpr Register kInterpreterBytecodeOffsetRegister = x19;
+constexpr Register kInterpreterBytecodeArrayRegister = x20;
+constexpr Register kInterpreterDispatchTableRegister = x21;
+
+constexpr Register kJavaScriptCallArgCountRegister = x0;
+constexpr Register kJavaScriptCallCodeStartRegister = x2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = x3;
+constexpr Register kJavaScriptCallExtraArg1Register = x2;
+
+constexpr Register kOffHeapTrampolineRegister = ip0;
+constexpr Register kRuntimeCallFunctionRegister = x1;
+constexpr Register kRuntimeCallArgCountRegister = x0;
+constexpr Register kRuntimeCallArgvRegister = x11;
+constexpr Register kWasmInstanceRegister = x7;
+constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ARM64_REGISTER_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 5df4361c1b..aa36de4afa 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/simulator-arm64.h"
+
+#if defined(USE_SIMULATOR)
+
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
#include <type_traits>
-#if V8_TARGET_ARCH_ARM64
-
#include "src/arm64/decoder-arm64-inl.h"
-#include "src/arm64/simulator-arm64.h"
#include "src/assembler-inl.h"
-#include "src/codegen.h"
+#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -22,8 +23,6 @@
namespace v8 {
namespace internal {
-#if defined(USE_SIMULATOR)
-
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -58,9 +57,8 @@ TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
-// static
-base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
@@ -277,7 +275,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// The simulator uses a separate JS stack. If we have exhausted the C stack,
// we also drop down the JS limit to reflect the exhaustion on the JS stack.
if (GetCurrentStackPosition() < c_limit) {
- return reinterpret_cast<uintptr_t>(get_sp());
+ return get_sp();
}
// Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
@@ -332,7 +330,7 @@ void Simulator::Init(FILE* stream) {
stack_limit_ = stack_ + stack_protection_size_;
uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
// The stack pointer must be 16-byte aligned.
- set_sp(tos & ~0xFUL);
+ set_sp(tos & ~0xFULL);
stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_);
@@ -362,13 +360,13 @@ void Simulator::ResetState() {
set_lr(kEndOfSimAddress);
// Reset debug helpers.
- breakpoints_.empty();
+ breakpoints_.clear();
break_on_next_ = false;
}
Simulator::~Simulator() {
- global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
+ GlobalMonitor::Get()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
@@ -403,6 +401,14 @@ void Simulator::RunFrom(Instruction* start) {
// uses the ObjectPair structure.
// The simulator assumes all runtime calls return two 64-bits values. If they
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+#if defined(V8_OS_WIN)
+typedef int64_t (*SimulatorRuntimeCall_ReturnPtr)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
+#endif
+
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5,
@@ -464,12 +470,51 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
break;
case ExternalReference::BUILTIN_CALL:
+#if defined(V8_OS_WIN)
+ {
+ // Object f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+
+ // When this simulator runs on Windows x64 host, function with ObjectPair
+ // return type accepts an implicit pointer to caller allocated memory for
+ // ObjectPair as return value. This diverges the calling convention from
+ // function which returns primitive type, so function returns ObjectPair
+ // and primitive type cannot share implementation.
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim(
+ "Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64,
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+
+ SimulatorRuntimeCall_ReturnPtr target =
+ reinterpret_cast<SimulatorRuntimeCall_ReturnPtr>(external);
+
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ TraceSim("Returned: 0x%16\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+
+ break;
+ }
+#endif
case ExternalReference::BUILTIN_CALL_PAIR: {
- // Object* f(v8::internal::Arguments) or
+ // Object f(v8::internal::Arguments) or
// ObjectPair f(v8::internal::Arguments).
TraceSim("Type: BUILTIN_CALL\n");
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
// We don't know how many arguments are being passed, but we can
// pass 8 without touching the stack. They will be ignored by the
@@ -486,15 +531,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
", "
"0x%016" PRIx64,
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
ObjectPair result =
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
- TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
- static_cast<void*>(result.y));
+ TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
+ reinterpret_cast<void*>(result.y));
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
#endif
- set_xreg(0, reinterpret_cast<int64_t>(result.x));
- set_xreg(1, reinterpret_cast<int64_t>(result.y));
+ set_xreg(0, static_cast<int64_t>(result.x));
+ set_xreg(1, static_cast<int64_t>(result.y));
break;
}
@@ -1489,7 +1536,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
void Simulator::VisitTestBranch(Instruction* instr) {
unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
instr->ImmTestBranchBit40();
- bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ bool take_branch = ((xreg(instr->Rt()) & (1ULL << bit_pos)) == 0);
switch (instr->Mask(TestBranchMask)) {
case TBZ: break;
case TBNZ: take_branch = !take_branch; break;
@@ -1731,12 +1778,12 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t stack = 0;
{
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad();
} else {
local_monitor_.NotifyStore();
- global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
}
}
@@ -1858,19 +1905,19 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
unsigned rt = instr->Rt();
unsigned rt2 = instr->Rt2();
unsigned addr_reg = instr->Rn();
- size_t access_size = 1 << instr->SizeLSPair();
+ size_t access_size = 1ULL << instr->SizeLSPair();
int64_t offset = instr->ImmLSPair() * access_size;
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
{
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad();
} else {
local_monitor_.NotifyStore();
- global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
}
}
@@ -2016,7 +2063,7 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
unsigned rt = instr->Rt();
{
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad();
}
@@ -2107,12 +2154,12 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
DCHECK_EQ(address % access_size, 0);
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (is_load != 0) {
if (is_exclusive) {
local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(
- address, &global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyLoadExcl_Locked(address,
+ &global_monitor_processor_);
} else {
local_monitor_.NotifyLoad();
}
@@ -2144,7 +2191,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
DCHECK_NE(rs, rn);
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ GlobalMonitor::Get()->NotifyStoreExcl_Locked(
address, &global_monitor_processor_)) {
switch (op) {
case STLXR_b:
@@ -2169,7 +2216,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
}
} else {
local_monitor_.NotifyStore();
- global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
switch (op) {
case STLR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
@@ -2266,7 +2313,7 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
break;
case CSNEG_w:
case CSNEG_x:
- new_val = -new_val;
+ new_val = (uint64_t)(-(int64_t)new_val);
break;
default: UNIMPLEMENTED();
}
@@ -2396,14 +2443,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
- u0 = u & 0xFFFFFFFFL;
+ u0 = u & 0xFFFFFFFFLL;
u1 = u >> 32;
- v0 = v & 0xFFFFFFFFL;
+ v0 = v & 0xFFFFFFFFLL;
v1 = v >> 32;
w0 = u0 * v0;
t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xFFFFFFFFL;
+ w1 = t & 0xFFFFFFFFLL;
w2 = t >> 32;
w1 = u0 * v1 + w1;
@@ -2458,7 +2505,7 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
- uint64_t umask = ((1L << (S + 1)) - 1);
+ uint64_t umask = ((1LL << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
@@ -2973,7 +3020,11 @@ void Simulator::VisitSystem(Instruction* instr) {
default: UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+#if defined(V8_OS_WIN)
+ MemoryBarrier();
+#else
__sync_synchronize();
+#endif
} else {
UNIMPLEMENTED();
}
@@ -3186,7 +3237,7 @@ void Simulator::Debug() {
int64_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -3239,16 +3290,12 @@ void Simulator::Debug() {
while (cur < end) {
PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
reinterpret_cast<uint64_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int64_t value = *cur;
+ Object obj(*cur);
Heap* current_heap = isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & kSmiTagMask) == 0) {
- DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
- PrintF("smi %" PRId32, untagged);
+ if (obj.IsSmi()) {
+ PrintF("smi %" PRId32, Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -4483,12 +4530,12 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
}
{
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (log_read) {
local_monitor_.NotifyLoad();
} else {
local_monitor_.NotifyStore();
- global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
}
}
@@ -4729,12 +4776,12 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
}
{
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
if (do_load) {
local_monitor_.NotifyLoad();
} else {
local_monitor_.NotifyStore();
- global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
}
}
@@ -4797,7 +4844,7 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
vform = q ? kFormat2D : kFormat1D;
imm = 0;
for (int i = 0; i < 8; ++i) {
- if (imm8 & (1 << i)) {
+ if (imm8 & (1ULL << i)) {
imm |= (UINT64_C(0xFF) << (8 * i));
}
}
@@ -5812,8 +5859,6 @@ bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
return false;
}
-Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
-
void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
Processor* processor) {
processor->NotifyLoadExcl_Locked(addr);
@@ -5863,7 +5908,7 @@ void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
}
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
- base::LockGuard<base::Mutex> lock_guard(&mutex);
+ base::MutexGuard lock_guard(&mutex);
if (!IsProcessorInLinkedList_Locked(processor)) {
return;
}
@@ -5880,9 +5925,7 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
processor->next_ = nullptr;
}
-#endif // USE_SIMULATOR
-
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_ARM64
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index c97a759d1b..586d65b341 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -5,6 +5,11 @@
#ifndef V8_ARM64_SIMULATOR_ARM64_H_
#define V8_ARM64_SIMULATOR_ARM64_H_
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
+
+#if defined(USE_SIMULATOR)
+
#include <stdarg.h>
#include <vector>
@@ -15,15 +20,12 @@
#include "src/arm64/instrument-arm64.h"
#include "src/assembler.h"
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
#include "src/simulator-base.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-#if defined(USE_SIMULATOR)
-
// Assemble the specified IEEE-754 components into the target type and apply
// appropriate rounding.
// sign: 0 = positive, 1 = negative
@@ -2228,8 +2230,6 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
class GlobalMonitor {
public:
- GlobalMonitor();
-
class Processor {
public:
Processor();
@@ -2265,16 +2265,21 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
+ static GlobalMonitor* Get();
+
private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
- Processor* head_;
+ Processor* head_ = nullptr;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
- static base::LazyInstance<GlobalMonitor>::type global_monitor_;
private:
void Init(FILE* stream);
@@ -2356,9 +2361,8 @@ inline float Simulator::FPDefaultNaN<float>() {
return kFP32DefaultNaN;
}
-#endif // defined(USE_SIMULATOR)
-
} // namespace internal
} // namespace v8
+#endif // defined(USE_SIMULATOR)
#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc
index 9ee5ea6cc8..e23f194414 100644
--- a/deps/v8/src/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/arm64/simulator-logic-arm64.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/simulator-arm64.h"
+
+#if defined(USE_SIMULATOR)
#include <cmath>
-#include "src/arm64/simulator-arm64.h"
namespace v8 {
namespace internal {
-#if defined(USE_SIMULATOR)
-
namespace {
// See FPRound for a description of this function.
@@ -4183,9 +4182,7 @@ LogicVRegister Simulator::ucvtf(VectorFormat vform, LogicVRegister dst,
return dst;
}
-#endif // USE_SIMULATOR
-
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_ARM64
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index f8804d8b93..6d200be18a 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -123,7 +123,7 @@ int HighestSetBitPosition(uint64_t value) {
uint64_t LargestPowerOf2Divisor(uint64_t value) {
- return value & -value;
+ return value & (-(int64_t)value);
}
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index 920a84dbdf..f57dc86173 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -44,7 +44,7 @@ int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
- DCHECK((1U << block_bytes_log2) <= sizeof(value));
+ DCHECK((1ULL << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index e8a7f1683b..d4103ae0c1 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,7 +1,6 @@
set noparent
ahaas@chromium.org
-bradnelson@chromium.org
clemensh@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index aea4c0a21b..c242c56389 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -11,11 +11,14 @@
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/execution.h"
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
@@ -34,16 +37,11 @@ namespace internal {
const char* const AsmJs::kSingleFunctionName = "__single_function__";
namespace {
-enum WasmDataEntries {
- kWasmDataCompiledModule,
- kWasmDataUsesBitSet,
- kWasmDataEntryCount,
-};
Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Name> name) {
Handle<Name> math_name(
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Math")));
+ isolate->factory()->InternalizeOneByteString(StaticCharVector("Math")));
Handle<Object> math = JSReceiver::GetDataProperty(stdlib, math_name);
if (!math->IsJSReceiver()) return isolate->factory()->undefined_value();
Handle<JSReceiver> math_receiver = Handle<JSReceiver>::cast(math);
@@ -54,51 +52,51 @@ Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
wasm::AsmJsParser::StdlibSet members,
bool* is_typed_array) {
- if (members.Contains(wasm::AsmJsParser::StandardMember::kInfinity)) {
+ if (members.contains(wasm::AsmJsParser::StandardMember::kInfinity)) {
members.Remove(wasm::AsmJsParser::StandardMember::kInfinity);
Handle<Name> name = isolate->factory()->Infinity_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNumber() || !std::isinf(value->Number())) return false;
}
- if (members.Contains(wasm::AsmJsParser::StandardMember::kNaN)) {
+ if (members.contains(wasm::AsmJsParser::StandardMember::kNaN)) {
members.Remove(wasm::AsmJsParser::StandardMember::kNaN);
Handle<Name> name = isolate->factory()->NaN_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
-#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#fname))); \
- Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
- if (!shared->HasBuiltinId() || \
- shared->builtin_id() != Builtins::kMath##FName) { \
- return false; \
- } \
- DCHECK_EQ(shared->GetCode(), \
- isolate->builtins()->builtin(Builtins::kMath##FName)); \
+#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
+ if (members.contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ StaticCharVector(#fname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ SharedFunctionInfo shared = Handle<JSFunction>::cast(value)->shared(); \
+ if (!shared->HasBuiltinId() || \
+ shared->builtin_id() != Builtins::kMath##FName) { \
+ return false; \
+ } \
+ DCHECK_EQ(shared->GetCode(), \
+ isolate->builtins()->builtin(Builtins::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
#define STDLIB_MATH_CONST(cname, const_value) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##cname)) { \
+ if (members.contains(wasm::AsmJsParser::StandardMember::kMath##cname)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##cname); \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#cname))); \
+ StaticCharVector(#cname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsNumber() || value->Number() != const_value) return false; \
}
STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
#undef STDLIB_MATH_CONST
#define STDLIB_ARRAY_TYPE(fname, FName) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
+ if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
*is_typed_array = true; \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#FName))); \
+ StaticCharVector(#FName))); \
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
@@ -114,12 +112,12 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
STDLIB_ARRAY_TYPE(float64_array_fun, Float64Array)
#undef STDLIB_ARRAY_TYPE
// All members accounted for.
- DCHECK(members.IsEmpty());
+ DCHECK(members.empty());
return true;
}
void Report(Handle<Script> script, int position, Vector<const char> text,
- MessageTemplate::Template message_template,
+ MessageTemplate message_template,
v8::Isolate::MessageErrorLevel level) {
Isolate* isolate = script->GetIsolate();
MessageLocation location(script, position, position);
@@ -280,23 +278,19 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(
Handle<HeapNumber> uses_bitset =
isolate->factory()->NewHeapNumberFromBits(stdlib_uses_.ToIntegral());
+ // The result is a compiled module and serialized standard library uses.
wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
- Handle<WasmModuleObject> compiled =
+ Handle<AsmWasmData> result =
isolate->wasm_engine()
->SyncCompileTranslatedAsmJs(
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
- parse_info()->script(),
- Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
+ Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()),
+ uses_bitset)
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
- // The result is a compiled module and serialized standard library uses.
- Handle<FixedArray> result =
- isolate->factory()->NewFixedArray(kWasmDataEntryCount);
- result->set(kWasmDataCompiledModule, *compiled);
- result->set(kWasmDataUsesBitSet, *uses_bitset);
compilation_info()->SetAsmWasmData(result);
RecordHistograms(isolate);
@@ -334,10 +328,8 @@ namespace {
inline bool IsValidAsmjsMemorySize(size_t size) {
// Enforce asm.js spec minimum size.
if (size < (1u << 12u)) return false;
- // Enforce engine-limited maximum allocation size.
- if (size > wasm::kV8MaxWasmMemoryBytes) return false;
- // Enforce flag-limited maximum allocation size.
- if (size > (FLAG_wasm_max_mem_pages * uint64_t{wasm::kWasmPageSize})) {
+ // Enforce engine-limited and flag-limited maximum allocation size.
+ if (size > wasm::max_mem_pages() * uint64_t{wasm::kWasmPageSize}) {
return false;
}
// Enforce power-of-2 sizes for 2^12 - 2^24.
@@ -354,25 +346,29 @@ inline bool IsValidAsmjsMemorySize(size_t size) {
MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<FixedArray> wasm_data,
+ Handle<AsmWasmData> wasm_data,
Handle<JSReceiver> stdlib,
Handle<JSReceiver> foreign,
Handle<JSArrayBuffer> memory) {
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
- Handle<HeapNumber> uses_bitset(
- HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)), isolate);
- Handle<WasmModuleObject> module(
- WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)), isolate);
+ Handle<HeapNumber> uses_bitset(wasm_data->uses_bitset(), isolate);
Handle<Script> script(Script::cast(shared->script()), isolate);
+
+ // Allocate the WasmModuleObject.
+ Handle<WasmModuleObject> module =
+ isolate->wasm_engine()->FinalizeTranslatedAsmJs(isolate, wasm_data,
+ script);
+
// TODO(mstarzinger): The position currently points to the module definition
// but should instead point to the instantiation site (more intuitive).
int position = shared->StartPosition();
// Check that all used stdlib members are valid.
bool stdlib_use_of_typed_array_present = false;
- wasm::AsmJsParser::StdlibSet stdlib_uses(uses_bitset->value_as_bits());
- if (!stdlib_uses.IsEmpty()) { // No checking needed if no uses.
+ wasm::AsmJsParser::StdlibSet stdlib_uses =
+ wasm::AsmJsParser::StdlibSet::FromIntegral(uses_bitset->value_as_bits());
+ if (!stdlib_uses.empty()) { // No checking needed if no uses.
if (stdlib.is_null()) {
ReportInstantiationFailure(script, position, "Requires standard library");
return MaybeHandle<Object>();
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 1a87ce99b4..05707cad98 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
class AccountingAllocator;
+class AsmWasmData;
class FunctionLiteral;
class JSArrayBuffer;
class ParseInfo;
@@ -27,7 +28,7 @@ class AsmJs {
AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo>,
- Handle<FixedArray> wasm_data,
+ Handle<AsmWasmData> wasm_data,
Handle<JSReceiver> stdlib,
Handle<JSReceiver> foreign,
Handle<JSArrayBuffer> memory);
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index fee309d9fb..df86bf5b9a 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -12,6 +12,7 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
#include "src/base/optional.h"
+#include "src/base/overflowing-math.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
#include "src/wasm/wasm-limits.h"
@@ -343,7 +344,7 @@ void AsmJsParser::ValidateModule() {
RECURSE(ValidateModuleParameters());
EXPECT_TOKEN('{');
EXPECT_TOKEN(TOK(UseAsm));
- SkipSemicolon();
+ RECURSE(SkipSemicolon());
RECURSE(ValidateModuleVars());
while (Peek(TOK(function))) {
RECURSE(ValidateFunction());
@@ -1498,10 +1499,15 @@ AsmType* AsmJsParser::AssignmentExpression() {
FAILn("Illegal type stored to heap view");
}
if (heap_type->IsA(AsmType::Float32Array()) &&
- value->IsA(AsmType::Double())) {
+ value->IsA(AsmType::DoubleQ())) {
// Assignment to a float32 heap can be used to convert doubles.
current_function_builder_->Emit(kExprF32ConvertF64);
}
+ if (heap_type->IsA(AsmType::Float64Array()) &&
+ value->IsA(AsmType::FloatQ())) {
+ // Assignment to a float64 heap can be used to convert floats.
+ current_function_builder_->Emit(kExprF64ConvertF32);
+ }
ret = value;
#define V(array_type, wasmload, wasmstore, type) \
if (heap_type->IsA(AsmType::array_type())) { \
@@ -1559,7 +1565,8 @@ AsmType* AsmJsParser::UnaryExpression() {
if (CheckForUnsigned(&uvalue)) {
// TODO(bradnelson): was supposed to be 0x7FFFFFFF, check errata.
if (uvalue <= 0x80000000) {
- current_function_builder_->EmitI32Const(-static_cast<int32_t>(uvalue));
+ current_function_builder_->EmitI32Const(
+ base::NegateWithWraparound(static_cast<int32_t>(uvalue)));
} else {
FAILn("Integer numeric literal out of range.");
}
@@ -1637,6 +1644,7 @@ AsmType* AsmJsParser::UnaryExpression() {
// 6.8.8 MultiplicativeExpression
AsmType* AsmJsParser::MultiplicativeExpression() {
+ AsmType* a;
uint32_t uvalue;
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
if (Check('*')) {
@@ -1649,8 +1657,10 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
current_function_builder_->EmitI32Const(value);
current_function_builder_->Emit(kExprI32Mul);
return AsmType::Intish();
+ } else {
+ scanner_.Rewind();
+ RECURSEn(a = UnaryExpression());
}
- scanner_.Rewind();
} else if (Check('-')) {
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
int32_t value = -static_cast<int32_t>(uvalue);
@@ -1664,12 +1674,14 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
current_function_builder_->Emit(kExprI32Mul);
return AsmType::Intish();
}
- return AsmType::Signed();
+ a = AsmType::Signed();
+ } else {
+ scanner_.Rewind();
+ RECURSEn(a = UnaryExpression());
}
- scanner_.Rewind();
+ } else {
+ RECURSEn(a = UnaryExpression());
}
- AsmType* a;
- RECURSEn(a = UnaryExpression());
for (;;) {
if (Check('*')) {
uint32_t uvalue;
@@ -1987,7 +1999,8 @@ AsmType* AsmJsParser::BitwiseORExpression() {
// Remember whether the first operand to this OR-expression has requested
// deferred validation of the |0 annotation.
// NOTE: This has to happen here to work recursively.
- bool requires_zero = call_coercion_deferred_->IsExactly(AsmType::Signed());
+ bool requires_zero =
+ AsmType::IsExactly(call_coercion_deferred_, AsmType::Signed());
call_coercion_deferred_ = nullptr;
// TODO(bradnelson): Make it prettier.
bool zero = false;
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index ac8a05a028..dd8392ddcf 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -10,6 +10,7 @@
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
+#include "src/base/enum-set.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
@@ -47,7 +48,7 @@ class AsmJsParser {
};
// clang-format on
- typedef EnumSet<StandardMember, uint64_t> StdlibSet;
+ using StdlibSet = base::EnumSet<StandardMember, uint64_t>;
explicit AsmJsParser(Zone* zone, uintptr_t stack_limit,
Utf16CharacterStream* stream);
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index c7144e3be6..448f8a77d3 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -8,7 +8,6 @@
#include "src/conversions.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
-#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
@@ -308,11 +307,8 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
return;
}
// Decode numbers.
- UnicodeCache cache;
double_value_ = StringToDouble(
- &cache,
- Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(number.data()),
- static_cast<int>(number.size())),
+ Vector<const uint8_t>::cast(VectorOf(number)),
ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL);
if (std::isnan(double_value_)) {
// Check if string to number conversion didn't consume all the characters.
@@ -353,6 +349,9 @@ bool AsmJsScanner::ConsumeCComment() {
return true;
}
}
+ if (ch == '\n') {
+ preceded_by_newline_ = true;
+ }
if (ch == kEndOfInput) {
return false;
}
@@ -362,7 +361,11 @@ bool AsmJsScanner::ConsumeCComment() {
void AsmJsScanner::ConsumeCPPComment() {
for (;;) {
uc32 ch = stream_->Advance();
- if (ch == '\n' || ch == kEndOfInput) {
+ if (ch == '\n') {
+ preceded_by_newline_ = true;
+ return;
+ }
+ if (ch == kEndOfInput) {
return;
}
}
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
index 5ec242769b..656f92a2dd 100644
--- a/deps/v8/src/asmjs/asm-types.cc
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -38,11 +38,12 @@ std::string AsmType::Name() {
return this->AsCallableType()->Name();
}
-bool AsmType::IsExactly(AsmType* that) {
- // TODO(jpp): maybe this can become this == that.
- AsmValueType* avt = this->AsValueType();
+bool AsmType::IsExactly(AsmType* x, AsmType* y) {
+ // TODO(jpp): maybe this can become x == y.
+ if (x == nullptr) return y == nullptr;
+ AsmValueType* avt = x->AsValueType();
if (avt != nullptr) {
- AsmValueType* tavt = that->AsValueType();
+ AsmValueType* tavt = y->AsValueType();
if (tavt == nullptr) {
return false;
}
@@ -51,7 +52,7 @@ bool AsmType::IsExactly(AsmType* that) {
// TODO(jpp): is it useful to allow non-value types to be tested with
// IsExactly?
- return that == this;
+ return x == y;
}
bool AsmType::IsA(AsmType* that) {
@@ -200,7 +201,7 @@ class AsmMinMaxType final : public AsmCallableType {
bool CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) override {
- if (!return_type_->IsExactly(return_type)) {
+ if (!AsmType::IsExactly(return_type_, return_type)) {
return false;
}
@@ -239,7 +240,7 @@ bool AsmFunctionType::IsA(AsmType* other) {
if (that == nullptr) {
return false;
}
- if (!return_type_->IsExactly(that->return_type_)) {
+ if (!AsmType::IsExactly(return_type_, that->return_type_)) {
return false;
}
@@ -248,7 +249,7 @@ bool AsmFunctionType::IsA(AsmType* other) {
}
for (size_t ii = 0; ii < args_.size(); ++ii) {
- if (!args_[ii]->IsExactly(that->args_[ii])) {
+ if (!AsmType::IsExactly(args_[ii], that->args_[ii])) {
return false;
}
}
@@ -258,7 +259,7 @@ bool AsmFunctionType::IsA(AsmType* other) {
bool AsmFunctionType::CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) {
- if (!return_type_->IsExactly(return_type)) {
+ if (!AsmType::IsExactly(return_type_, return_type)) {
return false;
}
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index 061d465def..fb044a95f9 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -214,9 +214,9 @@ class V8_EXPORT_PRIVATE AsmType {
static AsmType* MinMaxType(Zone* zone, AsmType* dest, AsmType* src);
std::string Name();
- // IsExactly returns true if this is the exact same type as that. For
- // non-value types (e.g., callables), this returns this == that.
- bool IsExactly(AsmType* that);
+ // IsExactly returns true if x is the exact same type as y. For
+ // non-value types (e.g., callables), this returns x == y.
+ static bool IsExactly(AsmType* x, AsmType* y);
// IsA is used to query whether this is an instance of that (i.e., if this is
// a type derived from that.) For non-value types (e.g., callables), this
// returns this == that.
diff --git a/deps/v8/src/asmjs/switch-logic.cc b/deps/v8/src/asmjs/switch-logic.cc
deleted file mode 100644
index e12b3a33f1..0000000000
--- a/deps/v8/src/asmjs/switch-logic.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-
-#include "src/asmjs/switch-logic.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-namespace {
-CaseNode* CreateBst(ZoneVector<CaseNode*>* nodes, size_t begin, size_t end) {
- if (end < begin) {
- return nullptr;
- } else if (end == begin) {
- return nodes->at(begin);
- } else {
- size_t root_index = (begin + end) / 2;
- CaseNode* root = nodes->at(root_index);
- if (root_index != 0) {
- root->left = CreateBst(nodes, begin, root_index - 1);
- }
- root->right = CreateBst(nodes, root_index + 1, end);
- return root;
- }
-}
-} // namespace
-
-CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone) {
- const int max_distance = 2;
- const int min_size = 4;
- if (cases->empty()) {
- return nullptr;
- }
- std::sort(cases->begin(), cases->end());
- ZoneVector<size_t> table_breaks(zone);
- for (size_t i = 1; i < cases->size(); ++i) {
- if (cases->at(i) - cases->at(i - 1) > max_distance) {
- table_breaks.push_back(i);
- }
- }
- table_breaks.push_back(cases->size());
- ZoneVector<CaseNode*> nodes(zone);
- size_t curr_pos = 0;
- for (size_t i = 0; i < table_breaks.size(); ++i) {
- size_t break_pos = table_breaks[i];
- if (break_pos - curr_pos >= min_size) {
- int begin = cases->at(curr_pos);
- int end = cases->at(break_pos - 1);
- nodes.push_back(new (zone) CaseNode(begin, end));
- curr_pos = break_pos;
- } else {
- for (; curr_pos < break_pos; curr_pos++) {
- nodes.push_back(new (zone)
- CaseNode(cases->at(curr_pos), cases->at(curr_pos)));
- }
- }
- }
- return CreateBst(&nodes, 0, nodes.size() - 1);
-}
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/asmjs/switch-logic.h b/deps/v8/src/asmjs/switch-logic.h
deleted file mode 100644
index f770ddc33d..0000000000
--- a/deps/v8/src/asmjs/switch-logic.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ASMJS_SWITCH_LOGIC_H_
-#define V8_ASMJS_SWITCH_LOGIC_H_
-
-#include "src/globals.h"
-#include "src/zone/zone-containers.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-struct CaseNode : public ZoneObject {
- const int begin;
- const int end;
- CaseNode* left;
- CaseNode* right;
- CaseNode(int begin, int end) : begin(begin), end(end) {
- left = nullptr;
- right = nullptr;
- }
-};
-
-V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ASMJS_SWITCH_LOGIC_H_
diff --git a/deps/v8/src/assembler-arch-inl.h b/deps/v8/src/assembler-arch-inl.h
deleted file mode 100644
index 443c6ee1ae..0000000000
--- a/deps/v8/src/assembler-arch-inl.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ASSEMBLER_ARCH_INL_H_
-#define V8_ASSEMBLER_ARCH_INL_H_
-
-#include "src/assembler-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips-inl.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64-inl.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390-inl.h"
-#else
-#error Unknown architecture.
-#endif
-
-#endif // V8_ASSEMBLER_ARCH_INL_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 2037a0ec8f..383d6f67fe 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -35,16 +35,16 @@
#include "src/assembler.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/disassembler.h"
-#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/ostreams.h"
#include "src/simulator.h" // For flushing instruction cache.
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/string-constants.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -65,17 +65,20 @@ AssemblerOptions AssemblerOptions::EnableV8AgnosticCode() const {
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
- bool serializer =
+ const bool serializer =
isolate->serializer_enabled() || explicitly_support_serialization;
+ const bool generating_embedded_builtin =
+ isolate->ShouldLoadConstantsFromRootList();
options.record_reloc_info_for_serialization = serializer;
- options.enable_root_array_delta_access = !serializer;
+ options.enable_root_array_delta_access =
+ !serializer && !generating_embedded_builtin;
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware.
options.enable_simulator_code = !serializer;
#endif
- options.inline_offheap_trampolines = !serializer;
-
+ options.inline_offheap_trampolines =
+ FLAG_embedded_builtins && !serializer && !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
const base::AddressRegion& code_range =
isolate->heap()->memory_allocator()->code_range();
@@ -85,35 +88,84 @@ AssemblerOptions AssemblerOptions::Default(
return options;
}
+namespace {
+
+class DefaultAssemblerBuffer : public AssemblerBuffer {
+ public:
+ explicit DefaultAssemblerBuffer(int size)
+ : buffer_(OwnedVector<uint8_t>::New(size)) {
+#ifdef DEBUG
+ ZapCode(reinterpret_cast<Address>(buffer_.start()), size);
+#endif
+ }
+
+ byte* start() const override { return buffer_.start(); }
+
+ int size() const override { return static_cast<int>(buffer_.size()); }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ DCHECK_LT(size(), new_size);
+ return base::make_unique<DefaultAssemblerBuffer>(new_size);
+ }
+
+ private:
+ OwnedVector<uint8_t> buffer_;
+};
+
+class ExternalAssemblerBufferImpl : public AssemblerBuffer {
+ public:
+ ExternalAssemblerBufferImpl(byte* start, int size)
+ : start_(start), size_(size) {}
+
+ byte* start() const override { return start_; }
+
+ int size() const override { return size_; }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ FATAL("Cannot grow external assembler buffer");
+ }
+
+ private:
+ byte* const start_;
+ const int size_;
+};
+
+} // namespace
+
+std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* start,
+ int size) {
+ return base::make_unique<ExternalAssemblerBufferImpl>(
+ reinterpret_cast<byte*>(start), size);
+}
+
+std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) {
+ return base::make_unique<DefaultAssemblerBuffer>(size);
+}
+
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
-AssemblerBase::AssemblerBase(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : options_(options),
+AssemblerBase::AssemblerBase(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : buffer_(std::move(buffer)),
+ options_(options),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
- own_buffer_ = buffer == nullptr;
- if (buffer_size == 0) buffer_size = kMinimalBufferSize;
- DCHECK_GT(buffer_size, 0);
- if (own_buffer_) buffer = NewArray<byte>(buffer_size);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- pc_ = buffer_;
+ if (!buffer_) buffer_ = NewAssemblerBuffer(kMinimalBufferSize);
+ buffer_start_ = buffer_->start();
+ pc_ = buffer_start_;
}
-AssemblerBase::~AssemblerBase() {
- if (own_buffer_) DeleteArray(buffer_);
-}
+AssemblerBase::~AssemblerBase() = default;
void AssemblerBase::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
- base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
+ base::MutexGuard lock_guard(Simulator::i_cache_mutex());
Simulator::FlushICache(Simulator::i_cache(), start, size);
#else
CpuFeatures::FlushICache(start, size);
@@ -122,7 +174,7 @@ void AssemblerBase::FlushICache(void* start, size_t size) {
void AssemblerBase::Print(Isolate* isolate) {
StdoutStream os;
- v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_);
+ v8::internal::Disassembler::Decode(isolate, &os, buffer_start_, pc_);
}
// -----------------------------------------------------------------------------
@@ -164,214 +216,12 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
-ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
- int double_reach_bits) {
- info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
- info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
- info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
-}
-
-ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
- ConstantPoolEntry::Type type) const {
- const PerTypeEntryInfo& info = info_[type];
-
- if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
-
- int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
- int dbl_offset = dbl_count * kDoubleSize;
- int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
- int ptr_offset = ptr_count * kPointerSize + dbl_offset;
-
- if (type == ConstantPoolEntry::DOUBLE) {
- // Double overflow detection must take into account the reach for both types
- int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
- if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
- (ptr_count > 0 &&
- !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
- return ConstantPoolEntry::OVERFLOWED;
- }
- } else {
- DCHECK(type == ConstantPoolEntry::INTPTR);
- if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
- return ConstantPoolEntry::OVERFLOWED;
- }
- }
-
- return ConstantPoolEntry::REGULAR;
-}
-
-ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
- ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
- DCHECK(!emitted_label_.is_bound());
- PerTypeEntryInfo& info = info_[type];
- const int entry_size = ConstantPoolEntry::size(type);
- bool merged = false;
-
- if (entry.sharing_ok()) {
- // Try to merge entries
- std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
- int end = static_cast<int>(info.shared_entries.size());
- for (int i = 0; i < end; i++, it++) {
- if ((entry_size == kPointerSize) ? entry.value() == it->value()
- : entry.value64() == it->value64()) {
- // Merge with found entry.
- entry.set_merged_index(i);
- merged = true;
- break;
- }
- }
- }
-
- // By definition, merged entries have regular access.
- DCHECK(!merged || entry.merged_index() < info.regular_count);
- ConstantPoolEntry::Access access =
- (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
-
- // Enforce an upper bound on search time by limiting the search to
- // unique sharable entries which fit in the regular section.
- if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
- info.shared_entries.push_back(entry);
- } else {
- info.entries.push_back(entry);
- }
-
- // We're done if we found a match or have already triggered the
- // overflow state.
- if (merged || info.overflow()) return access;
-
- if (access == ConstantPoolEntry::REGULAR) {
- info.regular_count++;
- } else {
- info.overflow_start = static_cast<int>(info.entries.size()) - 1;
- }
-
- return access;
-}
-
-void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
- ConstantPoolEntry::Type type) {
- PerTypeEntryInfo& info = info_[type];
- std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
- const int entry_size = ConstantPoolEntry::size(type);
- int base = emitted_label_.pos();
- DCHECK_GT(base, 0);
- int shared_end = static_cast<int>(shared_entries.size());
- std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
- for (int i = 0; i < shared_end; i++, shared_it++) {
- int offset = assm->pc_offset() - base;
- shared_it->set_offset(offset); // Save offset for merged entries.
- if (entry_size == kPointerSize) {
- assm->dp(shared_it->value());
- } else {
- assm->dq(shared_it->value64());
- }
- DCHECK(is_uintn(offset, info.regular_reach_bits));
-
- // Patch load sequence with correct offset.
- assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
- ConstantPoolEntry::REGULAR, type);
- }
-}
-
-void ConstantPoolBuilder::EmitGroup(Assembler* assm,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- PerTypeEntryInfo& info = info_[type];
- const bool overflow = info.overflow();
- std::vector<ConstantPoolEntry>& entries = info.entries;
- std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
- const int entry_size = ConstantPoolEntry::size(type);
- int base = emitted_label_.pos();
- DCHECK_GT(base, 0);
- int begin;
- int end;
-
- if (access == ConstantPoolEntry::REGULAR) {
- // Emit any shared entries first
- EmitSharedEntries(assm, type);
- }
-
- if (access == ConstantPoolEntry::REGULAR) {
- begin = 0;
- end = overflow ? info.overflow_start : static_cast<int>(entries.size());
- } else {
- DCHECK(access == ConstantPoolEntry::OVERFLOWED);
- if (!overflow) return;
- begin = info.overflow_start;
- end = static_cast<int>(entries.size());
- }
-
- std::vector<ConstantPoolEntry>::iterator it = entries.begin();
- if (begin > 0) std::advance(it, begin);
- for (int i = begin; i < end; i++, it++) {
- // Update constant pool if necessary and get the entry's offset.
- int offset;
- ConstantPoolEntry::Access entry_access;
- if (!it->is_merged()) {
- // Emit new entry
- offset = assm->pc_offset() - base;
- entry_access = access;
- if (entry_size == kPointerSize) {
- assm->dp(it->value());
- } else {
- assm->dq(it->value64());
- }
- } else {
- // Retrieve offset from shared entry.
- offset = shared_entries[it->merged_index()].offset();
- entry_access = ConstantPoolEntry::REGULAR;
- }
-
- DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
- is_uintn(offset, info.regular_reach_bits));
-
- // Patch load sequence with correct offset.
- assm->PatchConstantPoolAccessInstruction(it->position(), offset,
- entry_access, type);
- }
-}
-
-// Emit and return position of pool. Zero implies no constant pool.
-int ConstantPoolBuilder::Emit(Assembler* assm) {
- bool emitted = emitted_label_.is_bound();
- bool empty = IsEmpty();
-
- if (!emitted) {
- // Mark start of constant pool. Align if necessary.
- if (!empty) assm->DataAlign(kDoubleSize);
- assm->bind(&emitted_label_);
- if (!empty) {
- // Emit in groups based on access and type.
- // Emit doubles first for alignment purposes.
- EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
- EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
- if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
- assm->DataAlign(kDoubleSize);
- EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
- ConstantPoolEntry::DOUBLE);
- }
- if (info_[ConstantPoolEntry::INTPTR].overflow()) {
- EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
- ConstantPoolEntry::INTPTR);
- }
- }
- }
-
- return !empty ? emitted_label_.pos() : 0;
-}
-
HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
: kind_(kHeapNumber), offset_(offset) {
value_.heap_number = heap_number;
DCHECK(!IsSmiDouble(value_.heap_number));
}
-HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
- : kind_(kCodeStub), offset_(offset) {
- value_.code_stub = code_stub;
- DCHECK_NOT_NULL(value_.code_stub);
-}
-
HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string,
int offset)
: kind_(kStringConstant), offset_(offset) {
@@ -390,17 +240,13 @@ void Assembler::RecordDeoptReason(DeoptimizeReason reason,
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
void Assembler::DataAlign(int m) {
DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
- db(0);
+ // Pad with 0xcc (= int3 on ia32 and x64); the primary motivation is that
+ // the disassembler expects to find valid instructions, but this is also
+ // nice from a security point of view.
+ db(0xcc);
}
}
@@ -438,5 +284,18 @@ void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
code_targets_[code_target_index] = code;
}
+void AssemblerBase::ReserveCodeTargetSpace(size_t num_of_code_targets) {
+ code_targets_.reserve(num_of_code_targets);
+}
+
+int Assembler::WriteCodeComments() {
+ if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0;
+ int offset = pc_offset();
+ code_comments_writer_.Emit(this);
+ int size = pc_offset() - offset;
+ DCHECK_EQ(size, code_comments_writer_.section_size());
+ return size;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a2a1c73191..69ab58cdb4 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -36,20 +36,15 @@
#define V8_ASSEMBLER_H_
#include <forward_list>
-#include <iosfwd>
-#include <map>
-#include "src/allocation.h"
-#include "src/code-reference.h"
-#include "src/contexts.h"
+#include "src/code-comments.h"
+#include "src/cpu-features.h"
#include "src/deoptimize-reason.h"
-#include "src/double.h"
#include "src/external-reference.h"
#include "src/flags.h"
#include "src/globals.h"
-#include "src/label.h"
+#include "src/handles.h"
#include "src/objects.h"
-#include "src/register-configuration.h"
#include "src/reglist.h"
#include "src/reloc-info.h"
@@ -97,10 +92,9 @@ class JumpOptimizationInfo {
class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
- explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
- enum Kind { kHeapNumber, kCodeStub, kStringConstant };
+ enum Kind { kHeapNumber, kStringConstant };
Kind kind() const { return kind_; }
double heap_number() const {
@@ -108,11 +102,6 @@ class HeapObjectRequest {
return value_.heap_number;
}
- CodeStub* code_stub() const {
- DCHECK_EQ(kind(), kCodeStub);
- return value_.code_stub;
- }
-
const StringConstantBase* string() const {
DCHECK_EQ(kind(), kStringConstant);
return value_.string;
@@ -134,7 +123,6 @@ class HeapObjectRequest {
union {
double heap_number;
- CodeStub* code_stub;
const StringConstantBase* string;
} value_;
@@ -186,9 +174,32 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
Isolate* isolate, bool explicitly_support_serialization = false);
};
+class AssemblerBuffer {
+ public:
+ virtual ~AssemblerBuffer() = default;
+ virtual byte* start() const = 0;
+ virtual int size() const = 0;
+ // Return a grown copy of this buffer. The contained data is uninitialized.
+ // The data in {this} will still be read afterwards (until {this} is
+ // destructed), but not written.
+ virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
+ V8_WARN_UNUSED_RESULT = 0;
+};
+
+// Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
+// grow, so it must be large enough for all code emitted by the Assembler.
+V8_EXPORT_PRIVATE
+std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
+ int size);
+
+// Allocate a new growable AssemblerBuffer with a given initial size.
+V8_EXPORT_PRIVATE
+std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
+
class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
public:
- AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
+ AssemblerBase(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer>);
virtual ~AssemblerBase();
const AssemblerOptions& options() const { return options_; }
@@ -228,11 +239,13 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
jump_optimization_info_ = jump_opt;
}
+ void FinalizeJumpOptimizationInfo() {}
+
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
- static void QuietNaN(HeapObject* nan) { }
+ static void QuietNaN(HeapObject nan) {}
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
// This function is called when code generation is aborted, so that
// the assembler could clean up internal data structures.
@@ -241,6 +254,14 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Debugging
void Print(Isolate* isolate);
+ // Record an inline code comment that can be used by a disassembler.
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ code_comments_writer_.Add(pc_offset(), std::string(msg));
+ }
+ }
+
static const int kMinimalBufferSize = 4*KB;
static void FlushICache(void* start, size_t size);
@@ -248,9 +269,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
return FlushICache(reinterpret_cast<void*>(start), size);
}
- // Used to print the name of some special registers.
- static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
-
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
@@ -259,15 +277,12 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Update to the code target at {code_target_index} to {target}.
void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
// Reserves space in the code target vector.
- void ReserveCodeTargetSpace(size_t num_of_code_targets) {
- code_targets_.reserve(num_of_code_targets);
- }
+ void ReserveCodeTargetSpace(size_t num_of_code_targets);
- // The buffer into which code and relocation info are generated. It could
- // either be owned by the assembler or be provided externally.
- byte* buffer_;
- int buffer_size_;
- bool own_buffer_;
+ // The buffer into which code and relocation info are generated.
+ std::unique_ptr<AssemblerBuffer> buffer_;
+ // Cached from {buffer_->start()}, for faster access.
+ byte* buffer_start_;
std::forward_list<HeapObjectRequest> heap_object_requests_;
// The program counter, which points into the buffer above and moves forward.
// TODO(jkummerow): This should probably have type {Address}.
@@ -299,6 +314,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
return true;
}
+ CodeCommentsWriter code_comments_writer_;
+
private:
// Before we copy code into the code space, we sometimes cannot encode
// call/jump code targets as we normally would, as the difference between the
@@ -380,302 +397,6 @@ class CpuFeatureScope {
#endif
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-// Example:
-// if (assembler->IsSupported(SSE3)) {
-// CpuFeatureScope fscope(assembler, SSE3);
-// // Generate code containing SSE3 instructions.
-// } else {
-// // Generate alternative code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- static void Probe(bool cross_compile) {
- STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
- if (initialized_) return;
- initialized_ = true;
- ProbeImpl(cross_compile);
- }
-
- static unsigned SupportedFeatures() {
- Probe(false);
- return supported_;
- }
-
- static bool IsSupported(CpuFeature f) {
- return (supported_ & (1u << f)) != 0;
- }
-
- static inline bool SupportsOptimizer();
-
- static inline bool SupportsWasmSimd128();
-
- static inline unsigned icache_line_size() {
- DCHECK_NE(icache_line_size_, 0);
- return icache_line_size_;
- }
-
- static inline unsigned dcache_line_size() {
- DCHECK_NE(dcache_line_size_, 0);
- return dcache_line_size_;
- }
-
- static void PrintTarget();
- static void PrintFeatures();
-
- private:
- friend class ExternalReference;
- friend class AssemblerBase;
- // Flush instruction cache.
- static void FlushICache(void* start, size_t size);
-
- // Platform-dependent implementation.
- static void ProbeImpl(bool cross_compile);
-
- static unsigned supported_;
- static unsigned icache_line_size_;
- static unsigned dcache_line_size_;
- static bool initialized_;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-// -----------------------------------------------------------------------------
-// Utility functions
-
-// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_helper(double x, double y);
-double power_double_int(double x, int y);
-double power_double_double(double x, double y);
-
-
-// -----------------------------------------------------------------------------
-// Constant pool support
-
-class ConstantPoolEntry {
- public:
- ConstantPoolEntry() = default;
- ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
- RelocInfo::Mode rmode = RelocInfo::NONE)
- : position_(position),
- merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
- value_(value),
- rmode_(rmode) {}
- ConstantPoolEntry(int position, Double value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
- : position_(position),
- merged_index_(SHARING_ALLOWED),
- value64_(value.AsUint64()),
- rmode_(rmode) {}
-
- int position() const { return position_; }
- bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
- bool is_merged() const { return merged_index_ >= 0; }
- int merged_index() const {
- DCHECK(is_merged());
- return merged_index_;
- }
- void set_merged_index(int index) {
- DCHECK(sharing_ok());
- merged_index_ = index;
- DCHECK(is_merged());
- }
- int offset() const {
- DCHECK_GE(merged_index_, 0);
- return merged_index_;
- }
- void set_offset(int offset) {
- DCHECK_GE(offset, 0);
- merged_index_ = offset;
- }
- intptr_t value() const { return value_; }
- uint64_t value64() const { return value64_; }
- RelocInfo::Mode rmode() const { return rmode_; }
-
- enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
-
- static int size(Type type) {
- return (type == INTPTR) ? kPointerSize : kDoubleSize;
- }
-
- enum Access { REGULAR, OVERFLOWED };
-
- private:
- int position_;
- int merged_index_;
- union {
- intptr_t value_;
- uint64_t value64_;
- };
- // TODO(leszeks): The way we use this, it could probably be packed into
- // merged_index_ if size is a concern.
- RelocInfo::Mode rmode_;
- enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
-};
-
-
-// -----------------------------------------------------------------------------
-// Embedded constant pool support
-
-class ConstantPoolBuilder {
- public:
- ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
-
- // Add pointer-sized constant to the embedded constant pool
- ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
- bool sharing_ok) {
- ConstantPoolEntry entry(position, value, sharing_ok);
- return AddEntry(entry, ConstantPoolEntry::INTPTR);
- }
-
- // Add double constant to the embedded constant pool
- ConstantPoolEntry::Access AddEntry(int position, Double value) {
- ConstantPoolEntry entry(position, value);
- return AddEntry(entry, ConstantPoolEntry::DOUBLE);
- }
-
- // Add double constant to the embedded constant pool
- ConstantPoolEntry::Access AddEntry(int position, double value) {
- return AddEntry(position, Double(value));
- }
-
- // Previews the access type required for the next new entry to be added.
- ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
-
- bool IsEmpty() {
- return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
- info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
- info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
- info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
- }
-
- // Emit the constant pool. Invoke only after all entries have been
- // added and all instructions have been emitted.
- // Returns position of the emitted pool (zero implies no constant pool).
- int Emit(Assembler* assm);
-
- // Returns the label associated with the start of the constant pool.
- // Linking to this label in the function prologue may provide an
- // efficient means of constant pool pointer register initialization
- // on some architectures.
- inline Label* EmittedPosition() { return &emitted_label_; }
-
- private:
- ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
- ConstantPoolEntry::Type type);
- void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
- void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type);
-
- struct PerTypeEntryInfo {
- PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
- bool overflow() const {
- return (overflow_start >= 0 &&
- overflow_start < static_cast<int>(entries.size()));
- }
- int regular_reach_bits;
- int regular_count;
- int overflow_start;
- std::vector<ConstantPoolEntry> entries;
- std::vector<ConstantPoolEntry> shared_entries;
- };
-
- Label emitted_label_; // Records pc_offset of emitted pool
- PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
-};
-
-// Base type for CPU Registers.
-//
-// 1) We would prefer to use an enum for registers, but enum values are
-// assignment-compatible with int, which has caused code-generation bugs.
-//
-// 2) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the class in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-template <typename SubType, int kAfterLastRegister>
-class RegisterBase {
- // Internal enum class; used for calling constexpr methods, where we need to
- // pass an integral type as template parameter.
- enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
-
- public:
- static constexpr int kCode_no_reg = -1;
- static constexpr int kNumRegisters = kAfterLastRegister;
-
- static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
-
- template <int code>
- static constexpr SubType from_code() {
- static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
- return SubType{code};
- }
-
- constexpr operator RegisterCode() const {
- return static_cast<RegisterCode>(reg_code_);
- }
-
- template <RegisterCode reg_code>
- static constexpr int code() {
- static_assert(
- reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
- "must be valid reg");
- return static_cast<int>(reg_code);
- }
-
- template <RegisterCode reg_code>
- static constexpr RegList bit() {
- return RegList{1} << code<reg_code>();
- }
-
- static SubType from_code(int code) {
- DCHECK_LE(0, code);
- DCHECK_GT(kNumRegisters, code);
- return SubType{code};
- }
-
- // Constexpr version (pass registers as template parameters).
- template <RegisterCode... reg_codes>
- static constexpr RegList ListOf() {
- return CombineRegLists(RegisterBase::bit<reg_codes>()...);
- }
-
- // Non-constexpr version (pass registers as method parameters).
- template <typename... Register>
- static RegList ListOf(Register... regs) {
- return CombineRegLists(regs.bit()...);
- }
-
- bool is_valid() const { return reg_code_ != kCode_no_reg; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code_;
- }
-
- RegList bit() const { return RegList{1} << code(); }
-
- inline constexpr bool operator==(SubType other) const {
- return reg_code_ == other.reg_code_;
- }
- inline constexpr bool operator!=(SubType other) const {
- return reg_code_ != other.reg_code_;
- }
-
- protected:
- explicit constexpr RegisterBase(int code) : reg_code_(code) {}
- int reg_code_;
-};
-
-template <typename SubType, int kAfterLastRegister>
-inline std::ostream& operator<<(std::ostream& os,
- RegisterBase<SubType, kAfterLastRegister> reg) {
- return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
-}
-
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 643967411f..114942f1d3 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -14,20 +14,9 @@ namespace internal {
namespace {
-struct PerThreadAssertKeyConstructTrait final {
- static void Construct(void* key_arg) {
- auto key = reinterpret_cast<base::Thread::LocalStorageKey*>(key_arg);
- *key = base::Thread::CreateThreadLocalKey();
- }
-};
-
-
-typedef base::LazyStaticInstance<base::Thread::LocalStorageKey,
- PerThreadAssertKeyConstructTrait>::type
- PerThreadAssertKey;
-
-
-PerThreadAssertKey kPerThreadAssertKey;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey,
+ GetPerThreadAssertKey,
+ base::Thread::CreateThreadLocalKey());
} // namespace
@@ -54,10 +43,10 @@ class PerThreadAssertData final {
static PerThreadAssertData* GetCurrent() {
return reinterpret_cast<PerThreadAssertData*>(
- base::Thread::GetThreadLocal(kPerThreadAssertKey.Get()));
+ base::Thread::GetThreadLocal(*GetPerThreadAssertKey()));
}
static void SetCurrent(PerThreadAssertData* data) {
- base::Thread::SetThreadLocal(kPerThreadAssertKey.Get(), data);
+ base::Thread::SetThreadLocal(*GetPerThreadAssertKey(), data);
}
private:
@@ -67,45 +56,43 @@ class PerThreadAssertData final {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
-
template <PerThreadAssertType kType, bool kAllow>
-PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
- : data_(PerThreadAssertData::GetCurrent()) {
- if (data_ == nullptr) {
- data_ = new PerThreadAssertData();
- PerThreadAssertData::SetCurrent(data_);
+PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope() {
+ PerThreadAssertData* current_data = PerThreadAssertData::GetCurrent();
+ if (current_data == nullptr) {
+ current_data = new PerThreadAssertData();
+ PerThreadAssertData::SetCurrent(current_data);
}
- data_->IncrementLevel();
- old_state_ = data_->Get(kType);
- data_->Set(kType, kAllow);
+ data_and_old_state_.update(current_data, current_data->Get(kType));
+ current_data->IncrementLevel();
+ current_data->Set(kType, kAllow);
}
-
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
- if (data_ == nullptr) return;
+ if (data() == nullptr) return;
Release();
}
template <PerThreadAssertType kType, bool kAllow>
void PerThreadAssertScope<kType, kAllow>::Release() {
- DCHECK_NOT_NULL(data_);
- data_->Set(kType, old_state_);
- if (data_->DecrementLevel()) {
+ auto* current_data = data();
+ DCHECK_NOT_NULL(current_data);
+ current_data->Set(kType, old_state());
+ if (current_data->DecrementLevel()) {
PerThreadAssertData::SetCurrent(nullptr);
- delete data_;
+ delete current_data;
}
- data_ = nullptr;
+ set_data(nullptr);
}
// static
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
- PerThreadAssertData* data = PerThreadAssertData::GetCurrent();
- return data == nullptr || data->Get(kType);
+ PerThreadAssertData* current_data = PerThreadAssertData::GetCurrent();
+ return current_data == nullptr || current_data->Get(kType);
}
-
template <PerIsolateAssertType kType, bool kAllow>
class PerIsolateAssertScope<kType, kAllow>::DataBit
: public BitField<bool, kType, 1> {};
@@ -151,6 +138,8 @@ template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, false>;
+template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, true>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index b64f95dfa5..0a41af7f6a 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -6,8 +6,10 @@
#define V8_ASSERT_SCOPE_H_
#include <stdint.h>
+
#include "src/base/macros.h"
#include "src/globals.h"
+#include "src/pointer-with-payload.h"
namespace v8 {
namespace internal {
@@ -16,6 +18,10 @@ namespace internal {
class Isolate;
class PerThreadAssertData;
+template <>
+struct PointerWithPayloadTraits<PerThreadAssertData> {
+ static constexpr int value = 1;
+};
enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT,
@@ -29,6 +35,7 @@ enum PerThreadAssertType {
enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS,
+ JAVASCRIPT_EXECUTION_DUMP,
DEOPTIMIZATION_ASSERT,
COMPILATION_ASSERT,
NO_EXCEPTION_ASSERT
@@ -45,8 +52,21 @@ class PerThreadAssertScope {
void Release();
private:
- PerThreadAssertData* data_;
- bool old_state_;
+ PointerWithPayload<PerThreadAssertData, bool, 1> data_and_old_state_;
+
+ V8_INLINE void set_data(PerThreadAssertData* data) {
+ data_and_old_state_.SetPointer(data);
+ }
+
+ V8_INLINE PerThreadAssertData* data() const {
+ return data_and_old_state_.GetPointer();
+ }
+
+ V8_INLINE void set_old_state(bool old_state) {
+ return data_and_old_state_.SetPayload(old_state);
+ }
+
+ V8_INLINE bool old_state() const { return data_and_old_state_.GetPayload(); }
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
};
@@ -112,6 +132,13 @@ typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
// Scope to document where we do not expect any allocation and GC.
typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
+#ifdef DEBUG
+#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name
+#define DISALLOW_HEAP_ALLOCATION_REF(name) const DisallowHeapAllocation& name
+#else
+#define DISALLOW_HEAP_ALLOCATION(name)
+#define DISALLOW_HEAP_ALLOCATION_REF(name)
+#endif
// Scope to introduce an exception to DisallowHeapAllocation.
typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
@@ -174,6 +201,14 @@ typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
NoThrowOnJavascriptExecution;
+// Scope in which javascript execution causes dumps.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, false>
+ DumpOnJavascriptExecution;
+
+// Scope in which javascript execution causes dumps.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, true>
+ NoDumpOnJavascriptExecution;
+
// Scope to document where we do not expect deoptimization.
typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>
DisallowDeoptimization;
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 640de541b5..21986789ba 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -41,7 +41,7 @@ class AstTraversalVisitor : public AstVisitor<Subclass> {
// Iteration left-to-right.
void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZonePtrList<Statement>* statements);
+ void VisitStatements(const ZonePtrList<Statement>* statements);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -112,11 +112,10 @@ void AstTraversalVisitor<Subclass>::VisitDeclarations(
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitStatements(
- ZonePtrList<Statement>* stmts) {
+ const ZonePtrList<Statement>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
RECURSE(Visit(stmt));
- if (stmt->IsJump()) break;
}
}
@@ -205,7 +204,7 @@ void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
Expression* label = clause->label();
RECURSE(Visit(label));
}
- ZonePtrList<Statement>* stmts = clause->statements();
+ const ZonePtrList<Statement>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
}
}
@@ -244,17 +243,15 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitForInStatement(ForInStatement* stmt) {
PROCESS_NODE(stmt);
RECURSE(Visit(stmt->each()));
- RECURSE(Visit(stmt->enumerable()));
+ RECURSE(Visit(stmt->subject()));
RECURSE(Visit(stmt->body()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitForOfStatement(ForOfStatement* stmt) {
PROCESS_NODE(stmt);
- RECURSE(Visit(stmt->assign_iterator()));
- RECURSE(Visit(stmt->next_result()));
- RECURSE(Visit(stmt->result_done()));
- RECURSE(Visit(stmt->assign_each()));
+ RECURSE(Visit(stmt->each()));
+ RECURSE(Visit(stmt->subject()));
RECURSE(Visit(stmt->body()));
}
@@ -330,7 +327,7 @@ void AstTraversalVisitor<Subclass>::VisitRegExpLiteral(RegExpLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
PROCESS_EXPRESSION(expr);
- ZonePtrList<ObjectLiteralProperty>* props = expr->properties();
+ const ZonePtrList<ObjectLiteralProperty>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
RECURSE_EXPRESSION(Visit(prop->key()));
@@ -341,7 +338,7 @@ void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitArrayLiteral(ArrayLiteral* expr) {
PROCESS_EXPRESSION(expr);
- ZonePtrList<Expression>* values = expr->values();
+ const ZonePtrList<Expression>* values = expr->values();
for (int i = 0; i < values->length(); ++i) {
Expression* value = values->at(i);
RECURSE_EXPRESSION(Visit(value));
@@ -404,7 +401,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
- ZonePtrList<Expression>* args = expr->arguments();
+ const ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -415,7 +412,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
- ZonePtrList<Expression>* args = expr->arguments();
+ const ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -425,7 +422,7 @@ void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallRuntime(CallRuntime* expr) {
PROCESS_EXPRESSION(expr);
- ZonePtrList<Expression>* args = expr->arguments();
+ const ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@@ -484,8 +481,8 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
if (expr->static_fields_initializer() != nullptr) {
RECURSE_EXPRESSION(Visit(expr->static_fields_initializer()));
}
- if (expr->instance_fields_initializer_function() != nullptr) {
- RECURSE_EXPRESSION(Visit(expr->instance_fields_initializer_function()));
+ if (expr->instance_members_initializer_function() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->instance_members_initializer_function()));
}
ZonePtrList<ClassLiteral::Property>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
@@ -498,8 +495,8 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* stmt) {
+void AstTraversalVisitor<Subclass>::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* stmt) {
PROCESS_NODE(stmt);
ZonePtrList<ClassLiteral::Property>* props = stmt->fields();
for (int i = 0; i < props->length(); ++i) {
@@ -533,12 +530,6 @@ void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
- PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(Visit(expr->iterable()));
-}
-
-template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitGetTemplateObject(
GetTemplateObject* expr) {
PROCESS_EXPRESSION(expr);
@@ -577,13 +568,6 @@ void AstTraversalVisitor<Subclass>::VisitSuperCallReference(
RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
}
-template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitRewritableExpression(
- RewritableExpression* expr) {
- PROCESS_EXPRESSION(expr);
- RECURSE(Visit(expr->expression()));
-}
-
#undef PROCESS_NODE
#undef PROCESS_EXPRESSION
#undef RECURSE_EXPRESSION
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 67ea77bfbf..2a35097f9c 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -59,7 +59,7 @@ class AstRawStringInternalizationKey : public StringTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string)
: StringTableKey(string->hash_field()), string_(string) {}
- bool IsMatch(Object* other) override {
+ bool IsMatch(Object other) override {
if (string_->is_one_byte())
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
@@ -208,9 +208,9 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
- if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
- int key = literal[0] - 'a';
- if (one_character_strings_[key] == nullptr) {
+ if (literal.length() == 1 && literal[0] < kMaxOneCharStringValue) {
+ int key = literal[0];
+ if (V8_UNLIKELY(one_character_strings_[key] == nullptr)) {
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
one_character_strings_[key] = GetString(hash_field, true, literal);
@@ -232,7 +232,7 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
AstRawString* result = nullptr;
DisallowHeapAllocation no_gc;
- String::FlatContent content = literal->GetFlatContent();
+ String::FlatContent content = literal->GetFlatContent(no_gc);
if (content.IsOneByte()) {
result = GetOneByteStringInternal(content.ToOneByteVector());
} else {
@@ -247,9 +247,6 @@ const AstRawString* AstValueFactory::CloneFromOtherFactory(
const AstRawString* result = GetString(
raw_string->hash_field(), raw_string->is_one_byte(),
Vector<const byte>(raw_string->raw_data(), raw_string->byte_length()));
- // Check we weren't trying to clone a string that was already in this
- // ast-value-factory.
- DCHECK_NE(result, raw_string);
return result;
}
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 726d961362..472527bebe 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -35,7 +35,6 @@
#include "src/globals.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
-#include "src/utils.h"
// Ast(Raw|Cons)String and AstValueFactory are for storing strings and
// values independent of the V8 heap and internalizing them later. During
@@ -107,11 +106,11 @@ class AstRawString final : public ZoneObject {
#endif
}
- // {string_} is stored as String** instead of a Handle<String> so it can be
+ // {string_} is stored as Address* instead of a Handle<String> so it can be
// stored in a union with {next_}.
union {
AstRawString* next_;
- String** string_;
+ Address* string_;
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
@@ -163,12 +162,12 @@ class AstConsString final : public ZoneObject {
AstConsString* next() const { return next_; }
AstConsString** next_location() { return &next_; }
- // {string_} is stored as String** instead of a Handle<String> so it can be
+ // {string_} is stored as Address* instead of a Handle<String> so it can be
// stored in a union with {next_}.
void set_string(Handle<String> string) { string_ = string.location(); }
union {
AstConsString* next_;
- String** string_;
+ Address* string_;
};
struct Segment {
@@ -195,8 +194,10 @@ class AstBigInt {
// For generating constants.
#define AST_STRING_CONSTANTS(F) \
+ F(anonymous, "anonymous") \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
+ F(as, "as") \
F(async, "async") \
F(await, "await") \
F(bigint, "bigint") \
@@ -214,29 +215,34 @@ class AstBigInt {
F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
+ F(from, "from") \
F(function, "function") \
+ F(get, "get") \
F(get_space, "get ") \
F(length, "length") \
F(let, "let") \
+ F(meta, "meta") \
F(name, "name") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
F(number, "number") \
F(object, "object") \
+ F(of, "of") \
+ F(private_constructor, "#constructor") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(return, "return") \
+ F(set, "set") \
F(set_space, "set ") \
F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
+ F(target, "target") \
F(this, "this") \
F(this_function, ".this_function") \
F(throw, "throw") \
F(undefined, "undefined") \
- F(use_asm, "use asm") \
- F(use_strict, "use strict") \
F(value, "value")
class AstStringConstants final {
@@ -356,7 +362,8 @@ class AstValueFactory {
const AstConsString* empty_cons_string_;
// Caches one character lowercase strings (for minified code).
- AstRawString* one_character_strings_[26];
+ static const int kMaxOneCharStringValue = 128;
+ AstRawString* one_character_strings_[kMaxOneCharStringValue];
Zone* zone_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 617a26b937..1c1802d602 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -12,7 +12,6 @@
#include "src/base/hashmap.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
-#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/double.h"
@@ -71,15 +70,6 @@ IterationStatement* AstNode::AsIterationStatement() {
}
}
-BreakableStatement* AstNode::AsBreakableStatement() {
- switch (node_type()) {
- BREAKABLE_NODE_LIST(RETURN_NODE);
- ITERATION_NODE_LIST(RETURN_NODE);
- default:
- return nullptr;
- }
-}
-
MaterializedLiteral* AstNode::AsMaterializedLiteral() {
switch (node_type()) {
LITERAL_NODE_LIST(RETURN_NODE);
@@ -142,10 +132,6 @@ bool Expression::ToBooleanIsFalse() const {
}
bool Expression::IsValidReferenceExpression() const {
- // We don't want expressions wrapped inside RewritableExpression to be
- // considered as valid reference expressions, as they will be rewritten
- // to something (most probably involving a do expression).
- if (IsRewritableExpression()) return false;
return IsProperty() ||
(IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
}
@@ -165,26 +151,6 @@ bool Expression::IsAccessorFunctionDefinition() const {
return IsFunctionLiteral() && IsAccessorFunction(AsFunctionLiteral()->kind());
}
-bool Statement::IsJump() const {
- switch (node_type()) {
-#define JUMP_NODE_LIST(V) \
- V(Block) \
- V(ExpressionStatement) \
- V(ContinueStatement) \
- V(BreakStatement) \
- V(ReturnStatement) \
- V(IfStatement)
-#define GENERATE_CASE(Node) \
- case k##Node: \
- return static_cast<const Node*>(this)->IsJump();
- JUMP_NODE_LIST(GENERATE_CASE)
-#undef GENERATE_CASE
-#undef JUMP_NODE_LIST
- default:
- return false;
- }
-}
-
VariableProxy::VariableProxy(Variable* var, int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(var->raw_name()),
@@ -326,17 +292,19 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
bool LiteralProperty::NeedsSetFunctionName() const {
- return is_computed_name_ && (value_->IsAnonymousFunctionDefinition() ||
- value_->IsConciseMethodDefinition() ||
- value_->IsAccessorFunctionDefinition());
+ return is_computed_name() && (value_->IsAnonymousFunctionDefinition() ||
+ value_->IsConciseMethodDefinition() ||
+ value_->IsAccessorFunctionDefinition());
}
ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_static,
- bool is_computed_name)
+ bool is_computed_name,
+ bool is_private)
: LiteralProperty(key, value, is_computed_name),
kind_(kind),
is_static_(is_static),
+ is_private_(is_private),
private_or_computed_name_var_(nullptr) {}
bool ObjectLiteral::Property::IsCompileTimeValue() const {
@@ -644,7 +612,7 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 &&
- values()->length() <=
+ values_.length() <=
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
}
@@ -742,7 +710,7 @@ static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
// Check for the pattern: x + 1.
static bool MatchSmiLiteralOperation(Expression* left, Expression* right,
- Expression** expr, Smi** literal) {
+ Expression** expr, Smi* literal) {
if (right->IsSmiLiteral()) {
*expr = left;
*literal = right->AsLiteral()->AsSmiLiteral();
@@ -752,7 +720,7 @@ static bool MatchSmiLiteralOperation(Expression* left, Expression* right,
}
bool BinaryOperation::IsSmiLiteralOperation(Expression** subexpr,
- Smi** literal) {
+ Smi* literal) {
return MatchSmiLiteralOperation(left_, right_, subexpr, literal) ||
(IsCommutativeOperationWithSmiLiteral(op()) &&
MatchSmiLiteralOperation(right_, left_, subexpr, literal));
@@ -860,8 +828,11 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
-CaseClause::CaseClause(Expression* label, ZonePtrList<Statement>* statements)
- : label_(label), statements_(statements) {}
+CaseClause::CaseClause(Zone* zone, Expression* label,
+ const ScopedPtrList<Statement>& statements)
+ : label_(label), statements_(0, nullptr) {
+ statements.CopyTo(&statements_, zone);
+}
bool Literal::IsPropertyName() const {
if (type() != kString) return false;
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 6cc2cbc8ec..4f9f083d12 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -16,6 +16,7 @@
#include "src/isolate.h"
#include "src/label.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/smi.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
@@ -64,7 +65,7 @@ namespace internal {
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement) \
- V(InitializeClassFieldsStatement)
+ V(InitializeClassMembersStatement)
#define LITERAL_NODE_LIST(V) \
V(RegExpLiteral) \
@@ -88,14 +89,12 @@ namespace internal {
V(DoExpression) \
V(EmptyParentheses) \
V(FunctionLiteral) \
- V(GetIterator) \
V(GetTemplateObject) \
V(ImportCallExpression) \
V(Literal) \
V(NativeFunctionLiteral) \
V(Property) \
V(ResolvedProperty) \
- V(RewritableExpression) \
V(Spread) \
V(StoreInArrayLiteral) \
V(SuperCallReference) \
@@ -108,6 +107,8 @@ namespace internal {
V(Yield) \
V(YieldStar)
+#define FAILURE_NODE_LIST(V) V(FailureExpression)
+
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
@@ -122,17 +123,21 @@ class Expression;
class IterationStatement;
class MaterializedLiteral;
class NestedVariableDeclaration;
-class ProducedPreParsedScopeData;
+class ProducedPreparseData;
class Statement;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
+FAILURE_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
- enum NodeType : uint8_t { AST_NODE_LIST(DECLARE_TYPE_ENUM) };
+ enum NodeType : uint8_t {
+ AST_NODE_LIST(DECLARE_TYPE_ENUM) /* , */
+ FAILURE_NODE_LIST(DECLARE_TYPE_ENUM)
+ };
#undef DECLARE_TYPE_ENUM
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
@@ -151,9 +156,9 @@ class AstNode: public ZoneObject {
V8_INLINE type* As##type(); \
V8_INLINE const type* As##type() const;
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+ FAILURE_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- BreakableStatement* AsBreakableStatement();
IterationStatement* AsIterationStatement();
MaterializedLiteral* AsMaterializedLiteral();
@@ -175,10 +180,6 @@ class AstNode: public ZoneObject {
class Statement : public AstNode {
- public:
- bool IsEmpty() { return AsEmptyStatement() != nullptr; }
- bool IsJump() const;
-
protected:
Statement(int position, NodeType type) : AstNode(position, type) {}
@@ -243,10 +244,39 @@ class Expression : public AstNode {
bool IsCompileTimeValue();
+ bool IsPattern() {
+ STATIC_ASSERT(kObjectLiteral + 1 == kArrayLiteral);
+ return IsInRange(node_type(), kObjectLiteral, kArrayLiteral);
+ }
+
+ bool is_parenthesized() const {
+ return IsParenthesizedField::decode(bit_field_);
+ }
+
+ void mark_parenthesized() {
+ bit_field_ = IsParenthesizedField::update(bit_field_, true);
+ }
+
+ void clear_parenthesized() {
+ bit_field_ = IsParenthesizedField::update(bit_field_, false);
+ }
+
+ private:
+ class IsParenthesizedField
+ : public BitField<bool, AstNode::kNextBitFieldIndex, 1> {};
+
protected:
- Expression(int pos, NodeType type) : AstNode(pos, type) {}
+ Expression(int pos, NodeType type) : AstNode(pos, type) {
+ DCHECK(!is_parenthesized());
+ }
- static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
+ static const uint8_t kNextBitFieldIndex = IsParenthesizedField::kNext;
+};
+
+class FailureExpression : public Expression {
+ private:
+ friend class AstNodeFactory;
+ FailureExpression() : Expression(kNoSourcePosition, kFailureExpression) {}
};
// V8's notion of BreakableStatement does not correspond to the notion of
@@ -309,14 +339,15 @@ class Block : public BreakableStatement {
inline ZonePtrList<const AstRawString>* labels() const;
- bool IsJump() const {
- return !statements_.is_empty() && statements_.last()->IsJump() &&
- labels() == nullptr; // Good enough as an approximation...
- }
-
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
+ void InitializeStatements(const ScopedPtrList<Statement>& statements,
+ Zone* zone) {
+ DCHECK_EQ(0, statements_.length());
+ statements.CopyTo(&statements_, zone);
+ }
+
private:
friend class AstNodeFactory;
@@ -337,6 +368,9 @@ class Block : public BreakableStatement {
bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value) |
IsLabeledField::encode(labels != nullptr);
}
+
+ Block(ZonePtrList<const AstRawString>* labels, bool ignore_completion_value)
+ : Block(nullptr, labels, 0, ignore_completion_value) {}
};
class LabeledBlock final : public Block {
@@ -352,6 +386,10 @@ class LabeledBlock final : public Block {
DCHECK_GT(labels->length(), 0);
}
+ LabeledBlock(ZonePtrList<const AstRawString>* labels,
+ bool ignore_completion_value)
+ : LabeledBlock(nullptr, labels, 0, ignore_completion_value) {}
+
ZonePtrList<const AstRawString>* labels_;
};
@@ -385,14 +423,14 @@ class Declaration : public AstNode {
public:
typedef base::ThreadedList<Declaration> List;
- VariableProxy* proxy() const { return proxy_; }
+ Variable* var() const { return var_; }
+ void set_var(Variable* var) { var_ = var; }
protected:
- Declaration(VariableProxy* proxy, int pos, NodeType type)
- : AstNode(pos, type), proxy_(proxy), next_(nullptr) {}
+ Declaration(int pos, NodeType type) : AstNode(pos, type), next_(nullptr) {}
private:
- VariableProxy* proxy_;
+ Variable* var_;
// Declarations list threaded through the declarations.
Declaration** next() { return &next_; }
Declaration* next_;
@@ -411,8 +449,8 @@ class VariableDeclaration : public Declaration {
: public BitField<bool, Declaration::kNextBitFieldIndex, 1> {};
protected:
- VariableDeclaration(VariableProxy* proxy, int pos, bool is_nested = false)
- : Declaration(proxy, pos, kVariableDeclaration) {
+ explicit VariableDeclaration(int pos, bool is_nested = false)
+ : Declaration(pos, kVariableDeclaration) {
bit_field_ = IsNestedField::update(bit_field_, is_nested);
}
@@ -429,8 +467,8 @@ class NestedVariableDeclaration final : public VariableDeclaration {
private:
friend class AstNodeFactory;
- NestedVariableDeclaration(VariableProxy* proxy, Scope* scope, int pos)
- : VariableDeclaration(proxy, pos, true), scope_(scope) {}
+ NestedVariableDeclaration(Scope* scope, int pos)
+ : VariableDeclaration(pos, true), scope_(scope) {}
// Nested scope from which the declaration originated.
Scope* scope_;
@@ -445,16 +483,26 @@ inline NestedVariableDeclaration* VariableDeclaration::AsNested() {
class FunctionDeclaration final : public Declaration {
public:
FunctionLiteral* fun() const { return fun_; }
+ bool declares_sloppy_block_function() const {
+ return DeclaresSloppyBlockFunction::decode(bit_field_);
+ }
private:
friend class AstNodeFactory;
- FunctionDeclaration(VariableProxy* proxy, FunctionLiteral* fun, int pos)
- : Declaration(proxy, pos, kFunctionDeclaration), fun_(fun) {
- DCHECK_NOT_NULL(fun);
+ class DeclaresSloppyBlockFunction
+ : public BitField<bool, Declaration::kNextBitFieldIndex, 1> {};
+
+ FunctionDeclaration(FunctionLiteral* fun, bool declares_sloppy_block_function,
+ int pos)
+ : Declaration(pos, kFunctionDeclaration), fun_(fun) {
+ bit_field_ = DeclaresSloppyBlockFunction::update(
+ bit_field_, declares_sloppy_block_function);
}
FunctionLiteral* fun_;
+
+ static const uint8_t kNextBitFieldIndex = DeclaresSloppyBlockFunction::kNext;
};
@@ -566,7 +614,7 @@ class ForStatement final : public IterationStatement {
Statement* next_;
};
-
+// Shared class for for-in and for-of statements.
class ForEachStatement : public IterationStatement {
public:
enum VisitMode {
@@ -580,125 +628,59 @@ class ForEachStatement : public IterationStatement {
return mode == ITERATE ? "for-of" : "for-in";
}
- protected:
- ForEachStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos,
- NodeType type)
- : IterationStatement(labels, own_labels, pos, type) {}
-};
-
-
-class ForInStatement final : public ForEachStatement {
- public:
void Initialize(Expression* each, Expression* subject, Statement* body) {
- ForEachStatement::Initialize(body);
+ IterationStatement::Initialize(body);
each_ = each;
subject_ = subject;
}
- Expression* enumerable() const {
- return subject();
- }
-
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
- enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
- ForInType for_in_type() const { return ForInTypeField::decode(bit_field_); }
-
- private:
+ protected:
friend class AstNodeFactory;
- ForInStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
- : ForEachStatement(labels, own_labels, pos, kForInStatement),
+ ForEachStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos,
+ NodeType type)
+ : IterationStatement(labels, own_labels, pos, type),
each_(nullptr),
- subject_(nullptr) {
- bit_field_ = ForInTypeField::update(bit_field_, SLOW_FOR_IN);
- }
+ subject_(nullptr) {}
Expression* each_;
Expression* subject_;
-
- class ForInTypeField
- : public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
};
+class ForInStatement final : public ForEachStatement {
+ private:
+ friend class AstNodeFactory;
+
+ ForInStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : ForEachStatement(labels, own_labels, pos, kForInStatement) {}
+};
+enum class IteratorType { kNormal, kAsync };
class ForOfStatement final : public ForEachStatement {
public:
- void Initialize(Statement* body, Variable* iterator,
- Expression* assign_iterator, Expression* assign_next,
- Expression* next_result, Expression* result_done,
- Expression* assign_each) {
- ForEachStatement::Initialize(body);
- iterator_ = iterator;
- assign_iterator_ = assign_iterator;
- assign_next_ = assign_next;
- next_result_ = next_result;
- result_done_ = result_done;
- assign_each_ = assign_each;
- }
-
- Variable* iterator() const {
- return iterator_;
- }
-
- // iterator = subject[Symbol.iterator]()
- Expression* assign_iterator() const {
- return assign_iterator_;
- }
-
- // iteratorRecord.next = iterator.next
- Expression* assign_next() const { return assign_next_; }
-
- // result = iterator.next() // with type check
- Expression* next_result() const {
- return next_result_;
- }
-
- // result.done
- Expression* result_done() const {
- return result_done_;
- }
-
- // each = result.value
- Expression* assign_each() const {
- return assign_each_;
- }
-
- void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
- void set_assign_next(Expression* e) { assign_next_ = e; }
- void set_next_result(Expression* e) { next_result_ = e; }
- void set_result_done(Expression* e) { result_done_ = e; }
- void set_assign_each(Expression* e) { assign_each_ = e; }
+ IteratorType type() const { return type_; }
private:
friend class AstNodeFactory;
ForOfStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
+ ZonePtrList<const AstRawString>* own_labels, int pos,
+ IteratorType type)
: ForEachStatement(labels, own_labels, pos, kForOfStatement),
- iterator_(nullptr),
- assign_iterator_(nullptr),
- next_result_(nullptr),
- result_done_(nullptr),
- assign_each_(nullptr) {}
+ type_(type) {}
- Variable* iterator_;
- Expression* assign_iterator_;
- Expression* assign_next_;
- Expression* next_result_;
- Expression* result_done_;
- Expression* assign_each_;
+ IteratorType type_;
};
-
class ExpressionStatement final : public Statement {
public:
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
- bool IsJump() const { return expression_->IsThrow(); }
private:
friend class AstNodeFactory;
@@ -711,9 +693,6 @@ class ExpressionStatement final : public Statement {
class JumpStatement : public Statement {
- public:
- bool IsJump() const { return true; }
-
protected:
JumpStatement(int pos, NodeType type) : Statement(pos, type) {}
};
@@ -804,15 +783,16 @@ class CaseClause final : public ZoneObject {
DCHECK(!is_default());
return label_;
}
- ZonePtrList<Statement>* statements() const { return statements_; }
+ ZonePtrList<Statement>* statements() { return &statements_; }
private:
friend class AstNodeFactory;
- CaseClause(Expression* label, ZonePtrList<Statement>* statements);
+ CaseClause(Zone* zone, Expression* label,
+ const ScopedPtrList<Statement>& statements);
Expression* label_;
- ZonePtrList<Statement>* statements_;
+ ZonePtrList<Statement> statements_;
};
@@ -848,8 +828,8 @@ class SwitchStatement final : public BreakableStatement {
// given if-statement has a then- or an else-part containing code.
class IfStatement final : public Statement {
public:
- bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
- bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
+ bool HasThenStatement() const { return !then_statement_->IsEmptyStatement(); }
+ bool HasElseStatement() const { return !else_statement_->IsEmptyStatement(); }
Expression* condition() const { return condition_; }
Statement* then_statement() const { return then_statement_; }
@@ -858,11 +838,6 @@ class IfStatement final : public Statement {
void set_then_statement(Statement* s) { then_statement_ = s; }
void set_else_statement(Statement* s) { else_statement_ = s; }
- bool IsJump() const {
- return HasThenStatement() && then_statement()->IsJump()
- && HasElseStatement() && else_statement()->IsJump();
- }
-
private:
friend class AstNodeFactory;
@@ -990,7 +965,7 @@ class DebuggerStatement final : public Statement {
class EmptyStatement final : public Statement {
private:
friend class AstNodeFactory;
- explicit EmptyStatement(int pos) : Statement(pos, kEmptyStatement) {}
+ EmptyStatement() : Statement(kNoSourcePosition, kEmptyStatement) {}
};
@@ -1006,9 +981,8 @@ class SloppyBlockFunctionStatement final : public Statement {
private:
friend class AstNodeFactory;
- explicit SloppyBlockFunctionStatement(Statement* statement)
- : Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
- statement_(statement) {}
+ SloppyBlockFunctionStatement(int pos, Statement* statement)
+ : Statement(pos, kSloppyBlockFunctionStatement), statement_(statement) {}
Statement* statement_;
};
@@ -1046,7 +1020,7 @@ class Literal final : public Expression {
return string_;
}
- Smi* AsSmiLiteral() const {
+ Smi AsSmiLiteral() const {
DCHECK_EQ(kSmi, type());
return Smi::FromInt(smi_);
}
@@ -1261,19 +1235,20 @@ class AggregateLiteral : public MaterializedLiteral {
// Common supertype for ObjectLiteralProperty and ClassLiteralProperty
class LiteralProperty : public ZoneObject {
public:
- Expression* key() const { return key_; }
+ Expression* key() const { return key_and_is_computed_name_.GetPointer(); }
Expression* value() const { return value_; }
- bool is_computed_name() const { return is_computed_name_; }
+ bool is_computed_name() const {
+ return key_and_is_computed_name_.GetPayload();
+ }
bool NeedsSetFunctionName() const;
protected:
LiteralProperty(Expression* key, Expression* value, bool is_computed_name)
- : key_(key), value_(value), is_computed_name_(is_computed_name) {}
+ : key_and_is_computed_name_(key, is_computed_name), value_(value) {}
- Expression* key_;
+ PointerWithPayload<Expression, bool, 1> key_and_is_computed_name_;
Expression* value_;
- bool is_computed_name_;
};
// Property is used for passing information
@@ -1327,7 +1302,7 @@ class ObjectLiteral final : public AggregateLiteral {
return boilerplate_description_;
}
int properties_count() const { return boilerplate_properties_; }
- ZonePtrList<Property>* properties() const { return properties_; }
+ const ZonePtrList<Property>* properties() const { return &properties_; }
bool has_elements() const { return HasElementsField::decode(bit_field_); }
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
@@ -1402,16 +1377,17 @@ class ObjectLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
- ObjectLiteral(ZonePtrList<Property>* properties,
+ ObjectLiteral(Zone* zone, const ScopedPtrList<Property>& properties,
uint32_t boilerplate_properties, int pos,
bool has_rest_property)
: AggregateLiteral(pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
- properties_(properties) {
+ properties_(0, nullptr) {
bit_field_ |= HasElementsField::encode(false) |
HasRestPropertyField::encode(has_rest_property) |
FastElementsField::encode(false) |
HasNullPrototypeField::encode(false);
+ properties.CopyTo(&properties_, zone);
}
void InitFlagsForPendingNullPrototype(int i);
@@ -1428,7 +1404,7 @@ class ObjectLiteral final : public AggregateLiteral {
uint32_t boilerplate_properties_;
Handle<ObjectBoilerplateDescription> boilerplate_description_;
- ZoneList<Property*>* properties_;
+ ZoneList<Property*> properties_;
class HasElementsField
: public BitField<bool, AggregateLiteral::kNextBitFieldIndex, 1> {};
@@ -1474,7 +1450,7 @@ class ArrayLiteral final : public AggregateLiteral {
return boilerplate_description_;
}
- ZonePtrList<Expression>* values() const { return values_; }
+ const ZonePtrList<Expression>* values() const { return &values_; }
int first_spread_index() const { return first_spread_index_; }
@@ -1504,14 +1480,17 @@ class ArrayLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
- ArrayLiteral(ZonePtrList<Expression>* values, int first_spread_index, int pos)
+ ArrayLiteral(Zone* zone, const ScopedPtrList<Expression>& values,
+ int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
- values_(values) {}
+ values_(0, nullptr) {
+ values.CopyTo(&values_, zone);
+ }
int first_spread_index_;
Handle<ArrayBoilerplateDescription> boilerplate_description_;
- ZonePtrList<Expression>* values_;
+ ZonePtrList<Expression> values_;
};
enum class HoleCheckMode { kRequired, kElided };
@@ -1537,6 +1516,10 @@ class VariableProxy final : public Expression {
var_ = v;
}
+ Scanner::Location location() {
+ return Scanner::Location(position(), position() + raw_name()->length());
+ }
+
bool is_this() const { return IsThisField::decode(bit_field_); }
bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
@@ -1569,22 +1552,36 @@ class VariableProxy final : public Expression {
HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
}
- bool is_private_field() const { return IsPrivateField::decode(bit_field_); }
- void set_is_private_field() {
- bit_field_ = IsPrivateField::update(bit_field_, true);
+ bool IsPrivateName() const {
+ return raw_name()->length() > 0 && raw_name()->FirstCharacter() == '#';
}
// Bind this proxy to the variable var.
void BindTo(Variable* var);
V8_INLINE VariableProxy* next_unresolved() { return next_unresolved_; }
+ V8_INLINE bool is_removed_from_unresolved() const {
+ return IsRemovedFromUnresolvedField::decode(bit_field_);
+ }
+
+ void mark_removed_from_unresolved() {
+ bit_field_ = IsRemovedFromUnresolvedField::update(bit_field_, true);
+ }
- // Provides an access type for the ThreadedList used by the PreParsers
- // expressions, lists, and formal parameters.
- struct PreParserNext {
- static VariableProxy** next(VariableProxy* t) {
- return t->pre_parser_expr_next();
+ // Provides filtered access to the unresolved variable proxy threaded list.
+ struct UnresolvedNext {
+ static VariableProxy** filter(VariableProxy** t) {
+ VariableProxy** n = t;
+ // Skip over possibly removed values.
+ while (*n != nullptr && (*n)->is_removed_from_unresolved()) {
+ n = (*n)->next();
+ }
+ return n;
}
+
+ static VariableProxy** start(VariableProxy** head) { return filter(head); }
+
+ static VariableProxy** next(VariableProxy* t) { return filter(t->next()); }
};
private:
@@ -1596,13 +1593,12 @@ class VariableProxy final : public Expression {
int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(name),
- next_unresolved_(nullptr),
- pre_parser_expr_next_(nullptr) {
+ next_unresolved_(nullptr) {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
- HoleCheckModeField::encode(HoleCheckMode::kElided) |
- IsPrivateField::encode(false);
+ IsRemovedFromUnresolvedField::encode(false) |
+ HoleCheckModeField::encode(HoleCheckMode::kElided);
}
explicit VariableProxy(const VariableProxy* copy_from);
@@ -1611,10 +1607,12 @@ class VariableProxy final : public Expression {
};
class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
- class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
+ class IsRemovedFromUnresolvedField
+ : public BitField<bool, IsResolvedField::kNext, 1> {};
+ class IsNewTargetField
+ : public BitField<bool, IsRemovedFromUnresolvedField::kNext, 1> {};
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
- class IsPrivateField : public BitField<bool, HoleCheckModeField::kNext, 1> {};
union {
const AstRawString* raw_name_; // if !is_resolved_
@@ -1624,16 +1622,14 @@ class VariableProxy final : public Expression {
V8_INLINE VariableProxy** next() { return &next_unresolved_; }
VariableProxy* next_unresolved_;
- VariableProxy** pre_parser_expr_next() { return &pre_parser_expr_next_; }
- VariableProxy* pre_parser_expr_next_;
-
friend base::ThreadedListTraits<VariableProxy>;
};
-// Left-hand side can only be a property, a global or a (parameter or local)
-// slot.
-enum LhsKind {
- VARIABLE,
+// Assignments to a property will use one of several types of property access.
+// Otherwise, the assignment is to a non-property (a global, a local slot, a
+// parameter slot, or a destructuring pattern).
+enum AssignType {
+ NON_PROPERTY,
NAMED_PROPERTY,
KEYED_PROPERTY,
NAMED_SUPER_PROPERTY,
@@ -1650,8 +1646,8 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
// Returns the properties assign type.
- static LhsKind GetAssignType(Property* property) {
- if (property == nullptr) return VARIABLE;
+ static AssignType GetAssignType(Property* property) {
+ if (property == nullptr) return NON_PROPERTY;
bool super_access = property->IsSuperAccess();
return (property->key()->IsPropertyName())
? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
@@ -1692,7 +1688,7 @@ class ResolvedProperty final : public Expression {
class Call final : public Expression {
public:
Expression* expression() const { return expression_; }
- ZonePtrList<Expression>* arguments() const { return arguments_; }
+ const ZonePtrList<Expression>* arguments() const { return &arguments_; }
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
@@ -1703,7 +1699,7 @@ class Call final : public Expression {
}
bool only_last_arg_is_spread() {
- return !arguments_->is_empty() && arguments_->last()->IsSpread();
+ return !arguments_.is_empty() && arguments_.last()->IsSpread();
}
enum CallType {
@@ -1731,19 +1727,27 @@ class Call final : public Expression {
private:
friend class AstNodeFactory;
- Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
+ Call(Zone* zone, Expression* expression,
+ const ScopedPtrList<Expression>& arguments, int pos,
PossiblyEval possibly_eval)
- : Expression(pos, kCall), expression_(expression), arguments_(arguments) {
+ : Expression(pos, kCall),
+ expression_(expression),
+ arguments_(0, nullptr) {
bit_field_ |=
IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
IsTaggedTemplateField::encode(false);
+ arguments.CopyTo(&arguments_, zone);
}
- Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
+ Call(Zone* zone, Expression* expression,
+ const ScopedPtrList<Expression>& arguments, int pos,
TaggedTemplateTag tag)
- : Expression(pos, kCall), expression_(expression), arguments_(arguments) {
+ : Expression(pos, kCall),
+ expression_(expression),
+ arguments_(0, nullptr) {
bit_field_ |= IsPossiblyEvalField::encode(false) |
IsTaggedTemplateField::encode(true);
+ arguments.CopyTo(&arguments_, zone);
}
class IsPossiblyEvalField
@@ -1752,29 +1756,32 @@ class Call final : public Expression {
: public BitField<bool, IsPossiblyEvalField::kNext, 1> {};
Expression* expression_;
- ZonePtrList<Expression>* arguments_;
+ ZonePtrList<Expression> arguments_;
};
class CallNew final : public Expression {
public:
Expression* expression() const { return expression_; }
- ZonePtrList<Expression>* arguments() const { return arguments_; }
+ const ZonePtrList<Expression>* arguments() const { return &arguments_; }
bool only_last_arg_is_spread() {
- return !arguments_->is_empty() && arguments_->last()->IsSpread();
+ return !arguments_.is_empty() && arguments_.last()->IsSpread();
}
private:
friend class AstNodeFactory;
- CallNew(Expression* expression, ZonePtrList<Expression>* arguments, int pos)
+ CallNew(Zone* zone, Expression* expression,
+ const ScopedPtrList<Expression>& arguments, int pos)
: Expression(pos, kCallNew),
expression_(expression),
- arguments_(arguments) {}
+ arguments_(0, nullptr) {
+ arguments.CopyTo(&arguments_, zone);
+ }
Expression* expression_;
- ZonePtrList<Expression>* arguments_;
+ ZonePtrList<Expression> arguments_;
};
// The CallRuntime class does not represent any official JavaScript
@@ -1783,7 +1790,7 @@ class CallNew final : public Expression {
// implemented in JavaScript.
class CallRuntime final : public Expression {
public:
- ZonePtrList<Expression>* arguments() const { return arguments_; }
+ const ZonePtrList<Expression>* arguments() const { return &arguments_; }
bool is_jsruntime() const { return function_ == nullptr; }
int context_index() const {
@@ -1800,20 +1807,25 @@ class CallRuntime final : public Expression {
private:
friend class AstNodeFactory;
- CallRuntime(const Runtime::Function* function,
- ZonePtrList<Expression>* arguments, int pos)
+ CallRuntime(Zone* zone, const Runtime::Function* function,
+ const ScopedPtrList<Expression>& arguments, int pos)
: Expression(pos, kCallRuntime),
function_(function),
- arguments_(arguments) {}
- CallRuntime(int context_index, ZonePtrList<Expression>* arguments, int pos)
+ arguments_(0, nullptr) {
+ arguments.CopyTo(&arguments_, zone);
+ }
+ CallRuntime(Zone* zone, int context_index,
+ const ScopedPtrList<Expression>& arguments, int pos)
: Expression(pos, kCallRuntime),
context_index_(context_index),
function_(nullptr),
- arguments_(arguments) {}
+ arguments_(0, nullptr) {
+ arguments.CopyTo(&arguments_, zone);
+ }
int context_index_;
const Runtime::Function* function_;
- ZonePtrList<Expression>* arguments_;
+ ZonePtrList<Expression> arguments_;
};
@@ -1846,7 +1858,7 @@ class BinaryOperation final : public Expression {
// Returns true if one side is a Smi literal, returning the other side's
// sub-expression in |subexpr| and the literal Smi in |literal|.
- bool IsSmiLiteralOperation(Expression** subexpr, Smi** literal);
+ bool IsSmiLiteralOperation(Expression** subexpr, Smi* literal);
private:
friend class AstNodeFactory;
@@ -2090,58 +2102,6 @@ class CompoundAssignment final : public Assignment {
BinaryOperation* binary_operation_;
};
-// The RewritableExpression class is a wrapper for AST nodes that wait
-// for some potential rewriting. However, even if such nodes are indeed
-// rewritten, the RewritableExpression wrapper nodes will survive in the
-// final AST and should be just ignored, i.e., they should be treated as
-// equivalent to the wrapped nodes. For this reason and to simplify later
-// phases, RewritableExpressions are considered as exceptions of AST nodes
-// in the following sense:
-//
-// 1. IsRewritableExpression and AsRewritableExpression behave as usual.
-// 2. All other Is* and As* methods are practically delegated to the
-// wrapped node, i.e. IsArrayLiteral() will return true iff the
-// wrapped node is an array literal.
-//
-// Furthermore, an invariant that should be respected is that the wrapped
-// node is not a RewritableExpression.
-class RewritableExpression final : public Expression {
- public:
- Expression* expression() const { return expr_; }
- bool is_rewritten() const { return IsRewrittenField::decode(bit_field_); }
- void set_rewritten() {
- bit_field_ = IsRewrittenField::update(bit_field_, true);
- }
-
- void Rewrite(Expression* new_expression) {
- DCHECK(!is_rewritten());
- DCHECK_NOT_NULL(new_expression);
- DCHECK(!new_expression->IsRewritableExpression());
- expr_ = new_expression;
- set_rewritten();
- }
-
- Scope* scope() const { return scope_; }
- void set_scope(Scope* scope) { scope_ = scope; }
-
- private:
- friend class AstNodeFactory;
-
- RewritableExpression(Expression* expression, Scope* scope)
- : Expression(expression->position(), kRewritableExpression),
- expr_(expression),
- scope_(scope) {
- bit_field_ |= IsRewrittenField::encode(false);
- DCHECK(!expression->IsRewritableExpression());
- }
-
- Expression* expr_;
- Scope* scope_;
-
- class IsRewrittenField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-};
-
// There are several types of Suspend node:
//
// Yield
@@ -2232,9 +2192,11 @@ class FunctionLiteral final : public Expression {
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
- enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
-
- enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
+ enum ParameterFlag : uint8_t {
+ kNoDuplicateParameters,
+ kHasDuplicateParameters
+ };
+ enum EagerCompileHint : uint8_t { kShouldEagerCompile, kShouldLazyCompile };
// Empty handle means that the function does not have a shared name (i.e.
// the name will be set dynamically after creation of the function closure).
@@ -2246,7 +2208,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; }
- ZonePtrList<Statement>* body() const { return body_; }
+ ZonePtrList<Statement>* body() { return &body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const;
@@ -2259,8 +2221,10 @@ class FunctionLiteral final : public Expression {
return function_type() == kAnonymousExpression;
}
- void mark_as_iife() { bit_field_ = IIFEBit::update(bit_field_, true); }
- bool is_iife() const { return IIFEBit::decode(bit_field_); }
+ void mark_as_oneshot_iife() {
+ bit_field_ = OneshotIIFEBit::update(bit_field_, true);
+ }
+ bool is_oneshot_iife() const { return OneshotIIFEBit::decode(bit_field_); }
bool is_toplevel() const {
return function_literal_id() == FunctionLiteral::kIdTypeTopLevel;
}
@@ -2271,7 +2235,7 @@ class FunctionLiteral final : public Expression {
int expected_property_count() {
// Not valid for lazy functions.
- DCHECK_NOT_NULL(body_);
+ DCHECK(ShouldEagerCompile());
return expected_property_count_;
}
int parameter_count() { return parameter_count_; }
@@ -2311,7 +2275,7 @@ class FunctionLiteral final : public Expression {
bool has_duplicate_parameters() const {
// Not valid for lazy functions.
- DCHECK_NOT_NULL(body_);
+ DCHECK(ShouldEagerCompile());
return HasDuplicateParameters::decode(bit_field_);
}
@@ -2356,28 +2320,29 @@ class FunctionLiteral final : public Expression {
function_literal_id_ = function_literal_id;
}
- void set_requires_instance_fields_initializer(bool value) {
- bit_field_ = RequiresInstanceFieldsInitializer::update(bit_field_, value);
+ void set_requires_instance_members_initializer(bool value) {
+ bit_field_ = RequiresInstanceMembersInitializer::update(bit_field_, value);
}
- bool requires_instance_fields_initializer() const {
- return RequiresInstanceFieldsInitializer::decode(bit_field_);
+ bool requires_instance_members_initializer() const {
+ return RequiresInstanceMembersInitializer::decode(bit_field_);
}
- ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
- return produced_preparsed_scope_data_;
+ ProducedPreparseData* produced_preparse_data() const {
+ return produced_preparse_data_;
}
private:
friend class AstNodeFactory;
- FunctionLiteral(
- Zone* zone, const AstRawString* name, AstValueFactory* ast_value_factory,
- DeclarationScope* scope, ZonePtrList<Statement>* body,
- int expected_property_count, int parameter_count, int function_length,
- FunctionType function_type, ParameterFlag has_duplicate_parameters,
- EagerCompileHint eager_compile_hint, int position, bool has_braces,
- int function_literal_id,
- ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr)
+ FunctionLiteral(Zone* zone, const AstRawString* name,
+ AstValueFactory* ast_value_factory, DeclarationScope* scope,
+ const ScopedPtrList<Statement>& body,
+ int expected_property_count, int parameter_count,
+ int function_length, FunctionType function_type,
+ ParameterFlag has_duplicate_parameters,
+ EagerCompileHint eager_compile_hint, int position,
+ bool has_braces, int function_literal_id,
+ ProducedPreparseData* produced_preparse_data = nullptr)
: Expression(position, kFunctionLiteral),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
@@ -2387,18 +2352,18 @@ class FunctionLiteral final : public Expression {
function_literal_id_(function_literal_id),
raw_name_(name ? ast_value_factory->NewConsString(name) : nullptr),
scope_(scope),
- body_(body),
+ body_(0, nullptr),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
- produced_preparsed_scope_data_(produced_preparsed_scope_data) {
- bit_field_ |= FunctionTypeBits::encode(function_type) |
- Pretenure::encode(false) |
- HasDuplicateParameters::encode(has_duplicate_parameters ==
- kHasDuplicateParameters) |
- DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
- RequiresInstanceFieldsInitializer::encode(false) |
- HasBracesField::encode(has_braces) | IIFEBit::encode(false);
+ produced_preparse_data_(produced_preparse_data) {
+ bit_field_ |=
+ FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters ==
+ kHasDuplicateParameters) |
+ DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
+ RequiresInstanceMembersInitializer::encode(false) |
+ HasBracesField::encode(has_braces) | OneshotIIFEBit::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
- DCHECK_EQ(body == nullptr, expected_property_count < 0);
+ body.CopyTo(&body_, zone);
}
class FunctionTypeBits
@@ -2407,11 +2372,11 @@ class FunctionLiteral final : public Expression {
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class DontOptimizeReasonField
: public BitField<BailoutReason, HasDuplicateParameters::kNext, 8> {};
- class RequiresInstanceFieldsInitializer
+ class RequiresInstanceMembersInitializer
: public BitField<bool, DontOptimizeReasonField::kNext, 1> {};
class HasBracesField
- : public BitField<bool, RequiresInstanceFieldsInitializer::kNext, 1> {};
- class IIFEBit : public BitField<bool, HasBracesField::kNext, 1> {};
+ : public BitField<bool, RequiresInstanceMembersInitializer::kNext, 1> {};
+ class OneshotIIFEBit : public BitField<bool, HasBracesField::kNext, 1> {};
int expected_property_count_;
int parameter_count_;
@@ -2422,37 +2387,44 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name_;
DeclarationScope* scope_;
- ZonePtrList<Statement>* body_;
+ ZonePtrList<Statement> body_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
- ProducedPreParsedScopeData* produced_preparsed_scope_data_;
+ ProducedPreparseData* produced_preparse_data_;
};
// Property is used for passing information
// about a class literal's properties from the parser to the code generator.
class ClassLiteralProperty final : public LiteralProperty {
public:
- enum Kind : uint8_t { METHOD, GETTER, SETTER, PUBLIC_FIELD, PRIVATE_FIELD };
+ enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
Kind kind() const { return kind_; }
bool is_static() const { return is_static_; }
+ bool is_private() const { return is_private_; }
+
void set_computed_name_var(Variable* var) {
- DCHECK_EQ(PUBLIC_FIELD, kind());
+ DCHECK_EQ(FIELD, kind());
+ DCHECK(!is_private());
private_or_computed_name_var_ = var;
}
+
Variable* computed_name_var() const {
- DCHECK_EQ(PUBLIC_FIELD, kind());
+ DCHECK_EQ(FIELD, kind());
+ DCHECK(!is_private());
return private_or_computed_name_var_;
}
- void set_private_field_name_var(Variable* var) {
- DCHECK_EQ(PRIVATE_FIELD, kind());
+ void set_private_name_var(Variable* var) {
+ DCHECK_EQ(FIELD, kind());
+ DCHECK(is_private());
private_or_computed_name_var_ = var;
}
- Variable* private_field_name_var() const {
- DCHECK_EQ(PRIVATE_FIELD, kind());
+ Variable* private_name_var() const {
+ DCHECK_EQ(FIELD, kind());
+ DCHECK(is_private());
return private_or_computed_name_var_;
}
@@ -2460,14 +2432,15 @@ class ClassLiteralProperty final : public LiteralProperty {
friend class AstNodeFactory;
ClassLiteralProperty(Expression* key, Expression* value, Kind kind,
- bool is_static, bool is_computed_name);
+ bool is_static, bool is_computed_name, bool is_private);
Kind kind_;
bool is_static_;
+ bool is_private_;
Variable* private_or_computed_name_var_;
};
-class InitializeClassFieldsStatement final : public Statement {
+class InitializeClassMembersStatement final : public Statement {
public:
typedef ClassLiteralProperty Property;
@@ -2476,8 +2449,8 @@ class InitializeClassFieldsStatement final : public Statement {
private:
friend class AstNodeFactory;
- InitializeClassFieldsStatement(ZonePtrList<Property>* fields, int pos)
- : Statement(pos, kInitializeClassFieldsStatement), fields_(fields) {}
+ InitializeClassMembersStatement(ZonePtrList<Property>* fields, int pos)
+ : Statement(pos, kInitializeClassMembersStatement), fields_(fields) {}
ZonePtrList<Property>* fields_;
};
@@ -2511,8 +2484,8 @@ class ClassLiteral final : public Expression {
return static_fields_initializer_;
}
- FunctionLiteral* instance_fields_initializer_function() const {
- return instance_fields_initializer_function_;
+ FunctionLiteral* instance_members_initializer_function() const {
+ return instance_members_initializer_function_;
}
private:
@@ -2521,7 +2494,7 @@ class ClassLiteral final : public Expression {
ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
FunctionLiteral* constructor, ZonePtrList<Property>* properties,
FunctionLiteral* static_fields_initializer,
- FunctionLiteral* instance_fields_initializer_function,
+ FunctionLiteral* instance_members_initializer_function,
int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names,
bool is_anonymous)
@@ -2533,8 +2506,8 @@ class ClassLiteral final : public Expression {
constructor_(constructor),
properties_(properties),
static_fields_initializer_(static_fields_initializer),
- instance_fields_initializer_function_(
- instance_fields_initializer_function) {
+ instance_members_initializer_function_(
+ instance_members_initializer_function) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names) |
IsAnonymousExpression::encode(is_anonymous);
@@ -2547,7 +2520,7 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZonePtrList<Property>* properties_;
FunctionLiteral* static_fields_initializer_;
- FunctionLiteral* instance_fields_initializer_function_;
+ FunctionLiteral* instance_members_initializer_function_;
class HasNameStaticProperty
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class HasStaticComputedNames
@@ -2652,48 +2625,9 @@ class EmptyParentheses final : public Expression {
private:
friend class AstNodeFactory;
- explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
-};
-
-// Represents the spec operation `GetIterator()`
-// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
-// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
-// validate return value of the Symbol.iterator() call.
-enum class IteratorType { kNormal, kAsync };
-class GetIterator final : public Expression {
- public:
- IteratorType hint() const { return hint_; }
-
- Expression* iterable() const { return iterable_; }
-
- Expression* iterable_for_call_printer() const {
- return destructured_iterable_ != nullptr ? destructured_iterable_
- : iterable_;
+ explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {
+ mark_parenthesized();
}
-
- private:
- friend class AstNodeFactory;
-
- GetIterator(Expression* iterable, Expression* destructured_iterable,
- IteratorType hint, int pos)
- : Expression(pos, kGetIterator),
- hint_(hint),
- iterable_(iterable),
- destructured_iterable_(destructured_iterable) {}
-
- GetIterator(Expression* iterable, IteratorType hint, int pos)
- : Expression(pos, kGetIterator),
- hint_(hint),
- iterable_(iterable),
- destructured_iterable_(nullptr) {}
-
- IteratorType hint_;
- Expression* iterable_;
-
- // iterable_ is the variable proxy, while destructured_iterable_ points to
- // the raw value stored in the variable proxy. This is only used for
- // pretty printing error messages.
- Expression* destructured_iterable_;
};
// Represents the spec operation `GetTemplateObject(templateLiteral)`
@@ -2757,15 +2691,14 @@ class AstVisitor {
for (Declaration* decl : *declarations) Visit(decl);
}
- void VisitStatements(ZonePtrList<Statement>* statements) {
+ void VisitStatements(const ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Statement* stmt = statements->at(i);
Visit(stmt);
- if (stmt->IsJump()) break;
}
}
- void VisitExpressions(ZonePtrList<Expression>* expressions) {
+ void VisitExpressions(const ZonePtrList<Expression>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
// The variable statement visiting code may pass null expressions
// to this code. Maybe this should be handled by introducing an
@@ -2784,9 +2717,14 @@ class AstVisitor {
case AstNode::k##NodeType: \
return this->impl()->Visit##NodeType(static_cast<NodeType*>(node));
-#define GENERATE_AST_VISITOR_SWITCH() \
- switch (node->node_type()) { \
- AST_NODE_LIST(GENERATE_VISIT_CASE) \
+#define GENERATE_FAILURE_CASE(NodeType) \
+ case AstNode::k##NodeType: \
+ UNREACHABLE();
+
+#define GENERATE_AST_VISITOR_SWITCH() \
+ switch (node->node_type()) { \
+ AST_NODE_LIST(GENERATE_VISIT_CASE) \
+ FAILURE_NODE_LIST(GENERATE_FAILURE_CASE) \
}
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
@@ -2839,32 +2777,45 @@ class AstVisitor {
class AstNodeFactory final {
public:
AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone)
- : zone_(zone), ast_value_factory_(ast_value_factory) {}
+ : zone_(zone),
+ ast_value_factory_(ast_value_factory),
+ empty_statement_(new (zone) class EmptyStatement()),
+ failure_expression_(new (zone) class FailureExpression()) {}
+ AstNodeFactory* ast_node_factory() { return this; }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
- VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy, int pos) {
- return new (zone_) VariableDeclaration(proxy, pos);
+ VariableDeclaration* NewVariableDeclaration(int pos) {
+ return new (zone_) VariableDeclaration(pos);
}
- NestedVariableDeclaration* NewNestedVariableDeclaration(VariableProxy* proxy,
- Scope* scope,
+ NestedVariableDeclaration* NewNestedVariableDeclaration(Scope* scope,
int pos) {
- return new (zone_) NestedVariableDeclaration(proxy, scope, pos);
+ return new (zone_) NestedVariableDeclaration(scope, pos);
}
- FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
- FunctionLiteral* fun, int pos) {
- return new (zone_) FunctionDeclaration(proxy, fun, pos);
+ FunctionDeclaration* NewFunctionDeclaration(FunctionLiteral* fun,
+ bool is_sloppy_block_function,
+ int pos) {
+ return new (zone_) FunctionDeclaration(fun, is_sloppy_block_function, pos);
}
- Block* NewBlock(int capacity, bool ignore_completion_value,
- ZonePtrList<const AstRawString>* labels = nullptr) {
+ Block* NewBlock(int capacity, bool ignore_completion_value) {
+ return new (zone_) Block(zone_, nullptr, capacity, ignore_completion_value);
+ }
+
+ Block* NewBlock(bool ignore_completion_value,
+ ZonePtrList<const AstRawString>* labels) {
return labels != nullptr
- ? new (zone_) LabeledBlock(zone_, labels, capacity,
- ignore_completion_value)
- : new (zone_)
- Block(zone_, labels, capacity, ignore_completion_value);
+ ? new (zone_) LabeledBlock(labels, ignore_completion_value)
+ : new (zone_) Block(labels, ignore_completion_value);
+ }
+
+ Block* NewBlock(bool ignore_completion_value,
+ const ScopedPtrList<Statement>& statements) {
+ Block* result = NewBlock(ignore_completion_value, nullptr);
+ result->InitializeStatements(statements, zone_);
+ return result;
}
#define STATEMENT_WITH_LABELS(NodeType) \
@@ -2892,7 +2843,8 @@ class AstNodeFactory final {
return new (zone_) ForInStatement(labels, own_labels, pos);
}
case ForEachStatement::ITERATE: {
- return new (zone_) ForOfStatement(labels, own_labels, pos);
+ return new (zone_)
+ ForOfStatement(labels, own_labels, pos, IteratorType::kNormal);
}
}
UNREACHABLE();
@@ -2900,8 +2852,8 @@ class AstNodeFactory final {
ForOfStatement* NewForOfStatement(ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
- int pos) {
- return new (zone_) ForOfStatement(labels, own_labels, pos);
+ int pos, IteratorType type) {
+ return new (zone_) ForOfStatement(labels, own_labels, pos, type);
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
@@ -2980,21 +2932,25 @@ class AstNodeFactory final {
return new (zone_) DebuggerStatement(pos);
}
- EmptyStatement* NewEmptyStatement(int pos) {
- return new (zone_) EmptyStatement(pos);
+ class EmptyStatement* EmptyStatement() {
+ return empty_statement_;
}
- SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement() {
- return new (zone_)
- SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
+ class FailureExpression* FailureExpression() {
+ return failure_expression_;
+ }
+
+ SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(int pos) {
+ return new (zone_) SloppyBlockFunctionStatement(pos, EmptyStatement());
}
CaseClause* NewCaseClause(Expression* label,
- ZonePtrList<Statement>* statements) {
- return new (zone_) CaseClause(label, statements);
+ const ScopedPtrList<Statement>& statements) {
+ return new (zone_) CaseClause(zone_, label, statements);
}
Literal* NewStringLiteral(const AstRawString* string, int pos) {
+ DCHECK_NOT_NULL(string);
return new (zone_) Literal(string, pos);
}
@@ -3030,10 +2986,10 @@ class AstNodeFactory final {
}
ObjectLiteral* NewObjectLiteral(
- ZonePtrList<ObjectLiteral::Property>* properties,
+ const ScopedPtrList<ObjectLiteral::Property>& properties,
uint32_t boilerplate_properties, int pos, bool has_rest_property) {
- return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
- has_rest_property);
+ return new (zone_) ObjectLiteral(zone_, properties, boilerplate_properties,
+ pos, has_rest_property);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3055,13 +3011,14 @@ class AstNodeFactory final {
return new (zone_) RegExpLiteral(pattern, flags, pos);
}
- ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values, int pos) {
- return new (zone_) ArrayLiteral(values, -1, pos);
+ ArrayLiteral* NewArrayLiteral(const ScopedPtrList<Expression>& values,
+ int pos) {
+ return new (zone_) ArrayLiteral(zone_, values, -1, pos);
}
- ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values,
+ ArrayLiteral* NewArrayLiteral(const ScopedPtrList<Expression>& values,
int first_spread_index, int pos) {
- return new (zone_) ArrayLiteral(values, first_spread_index, pos);
+ return new (zone_) ArrayLiteral(zone_, values, first_spread_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@@ -3095,35 +3052,40 @@ class AstNodeFactory final {
return new (zone_) ResolvedProperty(obj, property, pos);
}
- Call* NewCall(Expression* expression, ZonePtrList<Expression>* arguments,
- int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
- return new (zone_) Call(expression, arguments, pos, possibly_eval);
+ Call* NewCall(Expression* expression,
+ const ScopedPtrList<Expression>& arguments, int pos,
+ Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
+ return new (zone_) Call(zone_, expression, arguments, pos, possibly_eval);
}
Call* NewTaggedTemplate(Expression* expression,
- ZonePtrList<Expression>* arguments, int pos) {
+ const ScopedPtrList<Expression>& arguments, int pos) {
return new (zone_)
- Call(expression, arguments, pos, Call::TaggedTemplateTag::kTrue);
+ Call(zone_, expression, arguments, pos, Call::TaggedTemplateTag::kTrue);
}
CallNew* NewCallNew(Expression* expression,
- ZonePtrList<Expression>* arguments, int pos) {
- return new (zone_) CallNew(expression, arguments, pos);
+ const ScopedPtrList<Expression>& arguments, int pos) {
+ return new (zone_) CallNew(zone_, expression, arguments, pos);
}
CallRuntime* NewCallRuntime(Runtime::FunctionId id,
- ZonePtrList<Expression>* arguments, int pos) {
- return new (zone_) CallRuntime(Runtime::FunctionForId(id), arguments, pos);
+ const ScopedPtrList<Expression>& arguments,
+ int pos) {
+ return new (zone_)
+ CallRuntime(zone_, Runtime::FunctionForId(id), arguments, pos);
}
CallRuntime* NewCallRuntime(const Runtime::Function* function,
- ZonePtrList<Expression>* arguments, int pos) {
- return new (zone_) CallRuntime(function, arguments, pos);
+ const ScopedPtrList<Expression>& arguments,
+ int pos) {
+ return new (zone_) CallRuntime(zone_, function, arguments, pos);
}
CallRuntime* NewCallRuntime(int context_index,
- ZonePtrList<Expression>* arguments, int pos) {
- return new (zone_) CallRuntime(context_index, arguments, pos);
+ const ScopedPtrList<Expression>& arguments,
+ int pos) {
+ return new (zone_) CallRuntime(zone_, context_index, arguments, pos);
}
UnaryOperation* NewUnaryOperation(Token::Value op,
@@ -3176,12 +3138,6 @@ class AstNodeFactory final {
Conditional(condition, then_expression, else_expression, position);
}
- RewritableExpression* NewRewritableExpression(Expression* expression,
- Scope* scope) {
- DCHECK_NOT_NULL(expression);
- return new (zone_) RewritableExpression(expression, scope);
- }
-
Assignment* NewAssignment(Token::Value op,
Expression* target,
Expression* value,
@@ -3210,7 +3166,6 @@ class AstNodeFactory final {
}
YieldStar* NewYieldStar(Expression* expression, int pos) {
- DCHECK_NOT_NULL(expression);
return new (zone_) YieldStar(expression, pos);
}
@@ -3225,27 +3180,26 @@ class AstNodeFactory final {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
- ZonePtrList<Statement>* body, int expected_property_count,
+ const ScopedPtrList<Statement>& body, int expected_property_count,
int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id,
- ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr) {
+ ProducedPreparseData* produced_preparse_data = nullptr) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body, expected_property_count,
parameter_count, function_length, function_type,
has_duplicate_parameters, eager_compile_hint, position, has_braces,
- function_literal_id, produced_preparsed_scope_data);
+ function_literal_id, produced_preparse_data);
}
// Creates a FunctionLiteral representing a top-level script, the
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
- FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
- ZonePtrList<Statement>* body,
- int expected_property_count,
- int parameter_count) {
+ FunctionLiteral* NewScriptOrEvalFunctionLiteral(
+ DeclarationScope* scope, const ScopedPtrList<Statement>& body,
+ int expected_property_count, int parameter_count) {
return new (zone_) FunctionLiteral(
zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
body, expected_property_count, parameter_count, parameter_count,
@@ -3257,9 +3211,9 @@ class AstNodeFactory final {
ClassLiteral::Property* NewClassLiteralProperty(
Expression* key, Expression* value, ClassLiteralProperty::Kind kind,
- bool is_static, bool is_computed_name) {
- return new (zone_)
- ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
+ bool is_static, bool is_computed_name, bool is_private) {
+ return new (zone_) ClassLiteral::Property(key, value, kind, is_static,
+ is_computed_name, is_private);
}
ClassLiteral* NewClassLiteral(
@@ -3267,12 +3221,12 @@ class AstNodeFactory final {
FunctionLiteral* constructor,
ZonePtrList<ClassLiteral::Property>* properties,
FunctionLiteral* static_fields_initializer,
- FunctionLiteral* instance_fields_initializer_function, int start_position,
- int end_position, bool has_name_static_property,
+ FunctionLiteral* instance_members_initializer_function,
+ int start_position, int end_position, bool has_name_static_property,
bool has_static_computed_names, bool is_anonymous) {
return new (zone_) ClassLiteral(
scope, variable, extends, constructor, properties,
- static_fields_initializer, instance_fields_initializer_function,
+ static_fields_initializer, instance_members_initializer_function,
start_position, end_position, has_name_static_property,
has_static_computed_names, is_anonymous);
}
@@ -3310,17 +3264,6 @@ class AstNodeFactory final {
return new (zone_) EmptyParentheses(pos);
}
- GetIterator* NewGetIterator(Expression* iterable,
- Expression* destructured_iterable,
- IteratorType hint, int pos) {
- return new (zone_) GetIterator(iterable, destructured_iterable, hint, pos);
- }
-
- GetIterator* NewGetIterator(Expression* iterable, IteratorType hint,
- int pos) {
- return new (zone_) GetIterator(iterable, hint, pos);
- }
-
GetTemplateObject* NewGetTemplateObject(
const ZonePtrList<const AstRawString>* cooked_strings,
const ZonePtrList<const AstRawString>* raw_strings, int pos) {
@@ -3337,9 +3280,9 @@ class AstNodeFactory final {
return new (zone_) ImportCallExpression(args, pos);
}
- InitializeClassFieldsStatement* NewInitializeClassFieldsStatement(
+ InitializeClassMembersStatement* NewInitializeClassMembersStatement(
ZonePtrList<ClassLiteral::Property>* args, int pos) {
- return new (zone_) InitializeClassFieldsStatement(args, pos);
+ return new (zone_) InitializeClassMembersStatement(args, pos);
}
Zone* zone() const { return zone_; }
@@ -3351,6 +3294,8 @@ class AstNodeFactory final {
// See ParseFunctionLiteral in parser.cc for preconditions.
Zone* zone_;
AstValueFactory* ast_value_factory_;
+ class EmptyStatement* empty_statement_;
+ class FailureExpression* failure_expression_;
};
@@ -3358,40 +3303,18 @@ class AstNodeFactory final {
// Inline functions for AstNode.
#define DECLARE_NODE_FUNCTIONS(type) \
- bool AstNode::Is##type() const { \
- NodeType mine = node_type(); \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) \
- mine = reinterpret_cast<const RewritableExpression*>(this) \
- ->expression() \
- ->node_type(); \
- return mine == AstNode::k##type; \
- } \
+ bool AstNode::Is##type() const { return node_type() == AstNode::k##type; } \
type* AstNode::As##type() { \
- NodeType mine = node_type(); \
- AstNode* result = this; \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) { \
- result = \
- reinterpret_cast<const RewritableExpression*>(this)->expression(); \
- mine = result->node_type(); \
- } \
- return mine == AstNode::k##type ? reinterpret_cast<type*>(result) \
- : nullptr; \
+ return node_type() == AstNode::k##type ? reinterpret_cast<type*>(this) \
+ : nullptr; \
} \
const type* AstNode::As##type() const { \
- NodeType mine = node_type(); \
- const AstNode* result = this; \
- if (mine == AstNode::kRewritableExpression && \
- AstNode::k##type != AstNode::kRewritableExpression) { \
- result = \
- reinterpret_cast<const RewritableExpression*>(this)->expression(); \
- mine = result->node_type(); \
- } \
- return mine == AstNode::k##type ? reinterpret_cast<const type*>(result) \
- : nullptr; \
+ return node_type() == AstNode::k##type \
+ ? reinterpret_cast<const type*>(this) \
+ : nullptr; \
}
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+FAILURE_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
} // namespace internal
diff --git a/deps/v8/src/ast/context-slot-cache.cc b/deps/v8/src/ast/context-slot-cache.cc
deleted file mode 100644
index 88d53713c2..0000000000
--- a/deps/v8/src/ast/context-slot-cache.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/context-slot-cache.h"
-
-#include <stdlib.h>
-
-#include "src/ast/scopes.h"
-#include "src/bootstrapper.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-int ContextSlotCache::Hash(Object* data, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
- return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
-}
-
-int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
- int index = Hash(data, name);
- DCHECK(name->IsInternalizedString());
- Key& key = keys_[index];
- if (key.data == data && key.name == name) {
- Value result(values_[index]);
- if (mode != nullptr) *mode = result.mode();
- if (init_flag != nullptr) *init_flag = result.initialization_flag();
- if (maybe_assigned_flag != nullptr)
- *maybe_assigned_flag = result.maybe_assigned_flag();
- return result.index() + kNotFound;
- }
- return kNotFound;
-}
-
-void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- int slot_index) {
- DCHECK(name->IsInternalizedString());
- DCHECK_LT(kNotFound, slot_index);
- int index = Hash(*data, *name);
- Key& key = keys_[index];
- key.data = *data;
- key.name = *name;
- // Please note value only takes a uint as index.
- values_[index] =
- Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound).raw();
-#ifdef DEBUG
- ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
-#endif
-}
-
-void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].data = nullptr;
-}
-
-#ifdef DEBUG
-
-void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode,
- InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- int slot_index) {
- DCHECK(name->IsInternalizedString());
- int index = Hash(*data, *name);
- Key& key = keys_[index];
- DCHECK_EQ(key.data, *data);
- DCHECK_EQ(key.name, *name);
- Value result(values_[index]);
- DCHECK_EQ(result.mode(), mode);
- DCHECK_EQ(result.initialization_flag(), init_flag);
- DCHECK_EQ(result.maybe_assigned_flag(), maybe_assigned_flag);
- DCHECK_EQ(result.index() + kNotFound, slot_index);
-}
-
-#endif // DEBUG
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/context-slot-cache.h b/deps/v8/src/ast/context-slot-cache.h
deleted file mode 100644
index bf4a6d709e..0000000000
--- a/deps/v8/src/ast/context-slot-cache.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_CONTEXT_SLOT_CACHE_H_
-#define V8_AST_CONTEXT_SLOT_CACHE_H_
-
-#include "src/allocation.h"
-#include "src/ast/modules.h"
-
-namespace v8 {
-namespace internal {
-
-// Cache for mapping (data, property name) into context slot index.
-// The cache contains both positive and negative results.
-// Slot index equals -1 means the property is absent.
-// Cleared at startup and prior to mark sweep collection.
-class ContextSlotCache {
- public:
- // Lookup context slot index for (data, name).
- // If absent, kNotFound is returned.
- int Lookup(Object* data, String* name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
-
- // Update an element in the cache.
- void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
- InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int slot_index);
-
- // Clear the cache.
- void Clear();
-
- static const int kNotFound = -2;
-
- private:
- ContextSlotCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].data = nullptr;
- keys_[i].name = nullptr;
- values_[i] = static_cast<uint32_t>(kNotFound);
- }
- }
-
- inline static int Hash(Object* data, String* name);
-
-#ifdef DEBUG
- void ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int slot_index);
-#endif
-
- static const int kLength = 256;
- struct Key {
- Object* data;
- String* name;
- };
-
- struct Value {
- Value(VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int index) {
- DCHECK(ModeField::is_valid(mode));
- DCHECK(InitField::is_valid(init_flag));
- DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
- DCHECK(IndexField::is_valid(index));
- value_ = ModeField::encode(mode) | IndexField::encode(index) |
- InitField::encode(init_flag) |
- MaybeAssignedField::encode(maybe_assigned_flag);
- DCHECK(mode == this->mode());
- DCHECK(init_flag == this->initialization_flag());
- DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
- DCHECK(index == this->index());
- }
-
- explicit inline Value(uint32_t value) : value_(value) {}
-
- uint32_t raw() { return value_; }
-
- VariableMode mode() { return ModeField::decode(value_); }
-
- InitializationFlag initialization_flag() {
- return InitField::decode(value_);
- }
-
- MaybeAssignedFlag maybe_assigned_flag() {
- return MaybeAssignedField::decode(value_);
- }
-
- int index() { return IndexField::decode(value_); }
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class ModeField : public BitField<VariableMode, 0, 4> {};
- class InitField : public BitField<InitializationFlag, 4, 1> {};
- class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
- class IndexField : public BitField<int, 6, 32 - 6> {};
-
- private:
- uint32_t value_;
- };
-
- Key keys_[kLength];
- uint32_t values_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_CONTEXT_SLOT_CACHE_H_
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index f9c2243099..a53d07064d 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -161,16 +161,30 @@ void CallPrinter::VisitForStatement(ForStatement* node) {
void CallPrinter::VisitForInStatement(ForInStatement* node) {
Find(node->each());
- Find(node->enumerable());
+ Find(node->subject());
Find(node->body());
}
void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
- Find(node->assign_iterator());
- Find(node->next_result());
- Find(node->result_done());
- Find(node->assign_each());
+ Find(node->each());
+
+ // Check the subject's position in case there was a GetIterator error.
+ bool was_found = false;
+ if (node->subject()->position() == position_) {
+ is_async_iterator_error_ = node->type() == IteratorType::kAsync;
+ is_iterator_error_ = !is_async_iterator_error_;
+ was_found = !found_;
+ if (was_found) {
+ found_ = true;
+ }
+ }
+ Find(node->subject(), true);
+ if (was_found) {
+ done_ = true;
+ found_ = false;
+ }
+
Find(node->body());
}
@@ -205,8 +219,8 @@ void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
}
}
-void CallPrinter::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* node) {
+void CallPrinter::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* node) {
for (int i = 0; i < node->fields()->length(); i++) {
Find(node->fields()->at(i)->value());
}
@@ -285,7 +299,24 @@ void CallPrinter::VisitVariableProxy(VariableProxy* node) {
void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->target());
- Find(node->value());
+ if (node->target()->IsArrayLiteral()) {
+ // Special case the visit for destructuring array assignment.
+ bool was_found = false;
+ if (node->value()->position() == position_) {
+ is_iterator_error_ = true;
+ was_found = !found_;
+ if (was_found) {
+ found_ = true;
+ }
+ }
+ Find(node->value(), true);
+ if (was_found) {
+ done_ = true;
+ found_ = false;
+ }
+ } else {
+ Find(node->value());
+ }
}
void CallPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
@@ -347,7 +378,7 @@ void CallPrinter::VisitCall(Call* node) {
found_ = true;
}
Find(node->expression(), true);
- if (!was_found) Print("(...)");
+ if (!was_found && !is_iterator_error_) Print("(...)");
FindArguments(node->arguments());
if (was_found) {
done_ = true;
@@ -371,7 +402,7 @@ void CallPrinter::VisitCallNew(CallNew* node) {
}
found_ = true;
}
- Find(node->expression(), was_found);
+ Find(node->expression(), was_found || is_iterator_error_);
FindArguments(node->arguments());
if (was_found) {
done_ = true;
@@ -455,23 +486,6 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
-void CallPrinter::VisitGetIterator(GetIterator* node) {
- bool was_found = false;
- if (node->position() == position_) {
- is_async_iterator_error_ = node->hint() == IteratorType::kAsync;
- is_iterator_error_ = !is_async_iterator_error_;
- was_found = !found_;
- if (was_found) {
- found_ = true;
- }
- }
- Find(node->iterable_for_call_printer(), true);
- if (was_found) {
- done_ = true;
- found_ = false;
- }
-}
-
void CallPrinter::VisitGetTemplateObject(GetTemplateObject* node) {}
void CallPrinter::VisitTemplateLiteral(TemplateLiteral* node) {
@@ -497,18 +511,14 @@ void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
-void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
- Find(node->expression());
-}
-
-void CallPrinter::FindStatements(ZonePtrList<Statement>* statements) {
+void CallPrinter::FindStatements(const ZonePtrList<Statement>* statements) {
if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Find(statements->at(i));
}
}
-void CallPrinter::FindArguments(ZonePtrList<Expression>* arguments) {
+void CallPrinter::FindArguments(const ZonePtrList<Expression>* arguments) {
if (found_) return;
for (int i = 0; i < arguments->length(); i++) {
Find(arguments->at(i));
@@ -742,8 +752,9 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
} else {
EmbeddedVector<char, 256> buf;
int pos =
- SNPrintF(buf, "%s (%p) (mode = %s", info, reinterpret_cast<void*>(var),
- VariableMode2String(var->mode()));
+ SNPrintF(buf, "%s (%p) (mode = %s, assigned = %s", info,
+ reinterpret_cast<void*>(var), VariableMode2String(var->mode()),
+ var->maybe_assigned() == kMaybeAssigned ? "true" : "false");
SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@@ -772,13 +783,15 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
{ IndentedScope indent(this, "FUNC", program->position());
PrintIndented("KIND");
Print(" %d\n", program->kind());
+ PrintIndented("LITERAL ID");
+ Print(" %d\n", program->function_literal_id());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", program->suspend_count());
PrintLiteralIndented("NAME", program->raw_name(), true);
if (program->raw_inferred_name()) {
PrintLiteralIndented("INFERRED NAME", program->raw_inferred_name(), true);
}
- if (program->requires_instance_fields_initializer()) {
+ if (program->requires_instance_members_initializer()) {
Print(" REQUIRES INSTANCE FIELDS INITIALIZER\n");
}
PrintParameters(program->scope());
@@ -813,13 +826,13 @@ void AstPrinter::PrintParameters(DeclarationScope* scope) {
}
}
-void AstPrinter::PrintStatements(ZonePtrList<Statement>* statements) {
+void AstPrinter::PrintStatements(const ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
}
}
-void AstPrinter::PrintArguments(ZonePtrList<Expression>* arguments) {
+void AstPrinter::PrintArguments(const ZonePtrList<Expression>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
@@ -837,15 +850,15 @@ void AstPrinter::VisitBlock(Block* node) {
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
- PrintLiteralWithModeIndented("VARIABLE", node->proxy()->var(),
- node->proxy()->raw_name());
+ PrintLiteralWithModeIndented("VARIABLE", node->var(),
+ node->var()->raw_name());
}
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
PrintIndented("FUNCTION ");
- PrintLiteral(node->proxy()->raw_name(), true);
+ PrintLiteral(node->var()->raw_name(), true);
Print(" = function ");
PrintLiteral(node->fun()->raw_name(), false);
Print("\n");
@@ -955,7 +968,7 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
PrintLabelsIndented(node->labels());
PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("FOR", node->each());
- PrintIndentedVisit("IN", node->enumerable());
+ PrintIndentedVisit("IN", node->subject());
PrintIndentedVisit("BODY", node->body());
}
@@ -964,10 +977,17 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
PrintLabelsIndented(node->labels());
PrintLabelsIndented(node->own_labels(), "OWN ");
- PrintIndentedVisit("INIT", node->assign_iterator());
- PrintIndentedVisit("NEXT", node->next_result());
- PrintIndentedVisit("DONE", node->result_done());
- PrintIndentedVisit("EACH", node->assign_each());
+ const char* for_type;
+ switch (node->type()) {
+ case IteratorType::kNormal:
+ for_type = "FOR";
+ break;
+ case IteratorType::kAsync:
+ for_type = "FOR AWAIT";
+ break;
+ }
+ PrintIndentedVisit(for_type, node->each());
+ PrintIndentedVisit("OF", node->subject());
PrintIndentedVisit("BODY", node->body());
}
@@ -1016,12 +1036,14 @@ void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
IndentedScope indent(this, "FUNC LITERAL", node->position());
+ PrintIndented("LITERAL ID");
+ Print(" %d\n", node->function_literal_id());
PrintLiteralIndented("NAME", node->raw_name(), false);
PrintLiteralIndented("INFERRED NAME", node->raw_inferred_name(), false);
- PrintParameters(node->scope());
// We don't want to see the function literal in this case: it
// will be printed via PrintProgram when the code for it is
// generated.
+ // PrintParameters(node->scope());
// PrintStatements(node->body());
}
@@ -1036,21 +1058,21 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
PrintIndentedVisit("STATIC FIELDS INITIALIZER",
node->static_fields_initializer());
}
- if (node->instance_fields_initializer_function() != nullptr) {
- PrintIndentedVisit("INSTANCE FIELDS INITIALIZER",
- node->instance_fields_initializer_function());
+ if (node->instance_members_initializer_function() != nullptr) {
+ PrintIndentedVisit("INSTANCE ELEMENTS INITIALIZER",
+ node->instance_members_initializer_function());
}
PrintClassProperties(node->properties());
}
-void AstPrinter::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* node) {
- IndentedScope indent(this, "INITIALIZE CLASS FIELDS", node->position());
+void AstPrinter::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* node) {
+ IndentedScope indent(this, "INITIALIZE CLASS ELEMENTS", node->position());
PrintClassProperties(node->fields());
}
void AstPrinter::PrintClassProperties(
- ZonePtrList<ClassLiteral::Property>* properties) {
+ const ZonePtrList<ClassLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ClassLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
@@ -1064,16 +1086,13 @@ void AstPrinter::PrintClassProperties(
case ClassLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
- case ClassLiteral::Property::PUBLIC_FIELD:
- prop_kind = "PUBLIC FIELD";
- break;
- case ClassLiteral::Property::PRIVATE_FIELD:
- prop_kind = "PRIVATE FIELD";
+ case ClassLiteral::Property::FIELD:
+ prop_kind = "FIELD";
break;
}
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "PROPERTY%s - %s", property->is_static() ? " - STATIC" : "",
- prop_kind);
+ SNPrintF(buf, "PROPERTY%s%s - %s", property->is_static() ? " - STATIC" : "",
+ property->is_private() ? " - PRIVATE" : " - PUBLIC", prop_kind);
IndentedScope prop(this, buf.start());
PrintIndentedVisit("KEY", properties->at(i)->key());
PrintIndentedVisit("VALUE", properties->at(i)->value());
@@ -1129,7 +1148,7 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstPrinter::PrintObjectProperties(
- ZonePtrList<ObjectLiteral::Property>* properties) {
+ const ZonePtrList<ObjectLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ObjectLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
@@ -1252,7 +1271,7 @@ void AstPrinter::VisitProperty(Property* node) {
IndentedScope indent(this, buf.start(), node->position());
Visit(node->obj());
- LhsKind property_kind = Property::GetAssignType(node);
+ AssignType property_kind = Property::GetAssignType(node);
if (property_kind == NAMED_PROPERTY ||
property_kind == NAMED_SUPER_PROPERTY) {
PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
@@ -1350,11 +1369,6 @@ void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position());
}
-void AstPrinter::VisitGetIterator(GetIterator* node) {
- IndentedScope indent(this, "GET-ITERATOR", node->position());
- Visit(node->iterable());
-}
-
void AstPrinter::VisitGetTemplateObject(GetTemplateObject* node) {
IndentedScope indent(this, "GET-TEMPLATE-OBJECT", node->position());
}
@@ -1392,11 +1406,6 @@ void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
-void AstPrinter::VisitRewritableExpression(RewritableExpression* node) {
- Visit(node->expression());
-}
-
-
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 71019fe264..e6f2766915 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -59,8 +59,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
protected:
void PrintLiteral(Handle<Object> value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
- void FindStatements(ZonePtrList<Statement>* statements);
- void FindArguments(ZonePtrList<Expression>* arguments);
+ void FindStatements(const ZonePtrList<Statement>* statements);
+ void FindArguments(const ZonePtrList<Expression>* arguments);
};
@@ -98,10 +98,10 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
- void PrintStatements(ZonePtrList<Statement>* statements);
+ void PrintStatements(const ZonePtrList<Statement>* statements);
void PrintDeclarations(Declaration::List* declarations);
void PrintParameters(DeclarationScope* scope);
- void PrintArguments(ZonePtrList<Expression>* arguments);
+ void PrintArguments(const ZonePtrList<Expression>* arguments);
void PrintCaseClause(CaseClause* clause);
void PrintLiteralIndented(const char* info, Literal* literal, bool quote);
void PrintLiteralIndented(const char* info, const AstRawString* value,
@@ -112,8 +112,10 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
const AstRawString* value);
void PrintLabelsIndented(ZonePtrList<const AstRawString>* labels,
const char* prefix = "");
- void PrintObjectProperties(ZonePtrList<ObjectLiteral::Property>* properties);
- void PrintClassProperties(ZonePtrList<ClassLiteral::Property>* properties);
+ void PrintObjectProperties(
+ const ZonePtrList<ObjectLiteral::Property>* properties);
+ void PrintClassProperties(
+ const ZonePtrList<ClassLiteral::Property>* properties);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/ast/scopes-inl.h b/deps/v8/src/ast/scopes-inl.h
deleted file mode 100644
index a70166c5ca..0000000000
--- a/deps/v8/src/ast/scopes-inl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_SCOPES_INL_H_
-#define V8_AST_SCOPES_INL_H_
-
-#include "src/ast/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename T>
-void Scope::ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
- T variable_proxy_stackvisitor,
- ParseInfo* info) {
- // Module variables must be allocated before variable resolution
- // to ensure that UpdateNeedsHoleCheck() can detect import variables.
- if (info != nullptr && is_module_scope()) {
- AsModuleScope()->AllocateModuleVariables();
- }
- // Lazy parsed declaration scopes are already partially analyzed. If there are
- // unresolved references remaining, they just need to be resolved in outer
- // scopes.
- Scope* lookup =
- is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
- ? outer_scope()
- : this;
-
- for (VariableProxy *proxy = unresolved_list_.first(), *next = nullptr;
- proxy != nullptr; proxy = next) {
- next = proxy->next_unresolved();
-
- DCHECK(!proxy->is_resolved());
- Variable* var =
- lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
- if (var == nullptr) {
- variable_proxy_stackvisitor(proxy);
- } else if (var != Scope::kDummyPreParserVariable &&
- var != Scope::kDummyPreParserLexicalVariable) {
- if (info != nullptr) {
- // In this case we need to leave scopes in a way that they can be
- // allocated. If we resolved variables from lazy parsed scopes, we need
- // to context allocate the var.
- ResolveTo(info, proxy, var);
- if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
- } else {
- var->set_is_used();
- if (proxy->is_assigned()) var->set_maybe_assigned();
- }
- }
- }
-
- // Clear unresolved_list_ as it's in an inconsistent state.
- unresolved_list_.Clear();
-
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->ResolveScopesThenForEachVariable(max_outer_scope,
- variable_proxy_stackvisitor, info);
- }
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_SCOPES_INL_H_
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index e9fb195609..28869cd94a 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -8,29 +8,21 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
-#include "src/ast/scopes-inl.h"
#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
-namespace {
-bool IsLexical(Variable* variable) {
- if (variable == Scope::kDummyPreParserLexicalVariable) return true;
- if (variable == Scope::kDummyPreParserVariable) return false;
- return IsLexicalVariableMode(variable->mode());
-}
-} // namespace
-
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@@ -48,34 +40,20 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
- bool* added) {
+ bool* was_added) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
ZoneAllocationPolicy(zone));
- if (added) *added = p->value == nullptr;
- if (p->value == nullptr) {
- // The variable has not been declared yet -> insert it.
- DCHECK_EQ(name, p->key);
- p->value = new (zone) Variable(scope, name, mode, kind, initialization_flag,
- maybe_assigned_flag);
- }
- return reinterpret_cast<Variable*>(p->value);
-}
-
-Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
- VariableMode mode) {
- Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
- ZoneAllocationPolicy(zone));
- if (p->value == nullptr) {
+ *was_added = p->value == nullptr;
+ if (*was_added) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- p->value = mode == VariableMode::kVar
- ? Scope::kDummyPreParserVariable
- : Scope::kDummyPreParserLexicalVariable;
+ Variable* variable = new (zone) Variable(
+ scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
+ p->value = variable;
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -149,24 +127,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
outer_scope_->AddInnerScope(this);
}
-Scope::Snapshot::Snapshot(Scope* scope)
- : outer_scope_(scope),
- top_inner_scope_(scope->inner_scope_),
- top_unresolved_(scope->unresolved_list_.first()),
- top_local_(scope->GetClosureScope()->locals_.end()),
- top_decl_(scope->GetClosureScope()->decls_.end()),
- outer_scope_calls_eval_(scope->scope_calls_eval_) {
- // Reset in order to record eval calls during this Snapshot's lifetime.
- outer_scope_->scope_calls_eval_ = false;
-}
-
-Scope::Snapshot::~Snapshot() {
- // Restore previous calls_eval bit if needed.
- if (outer_scope_calls_eval_) {
- outer_scope_->scope_calls_eval_ = true;
- }
-}
-
DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory)
: Scope(zone), function_kind_(kNormalFunction), params_(4, zone) {
@@ -175,7 +135,7 @@ DeclarationScope::DeclarationScope(Zone* zone,
// Make sure that if we don't find the global 'this', it won't be declared as
// a regular dynamic global by predeclaring it with the right variable kind.
- DeclareDynamicGlobal(ast_value_factory->this_string(), THIS_VARIABLE);
+ DeclareDynamicGlobal(ast_value_factory->this_string(), THIS_VARIABLE, this);
}
DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
@@ -188,13 +148,6 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
SetDefaults();
}
-bool DeclarationScope::IsDeclaredParameter(const AstRawString* name) {
- // If IsSimpleParameterList is false, duplicate parameters are not allowed,
- // however `arguments` may be allowed if function is not strict code. Thus,
- // the assumptions explained above do not hold.
- return params_.Contains(variables_.Lookup(name));
-}
-
ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* ast_value_factory)
: DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
@@ -267,7 +220,7 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
// We don't really need to use the preparsed scope data; this is just to
- // shorten the recursion in SetMustUsePreParsedScopeData.
+ // shorten the recursion in SetMustUsePreparseData.
must_use_preparsed_scope_data_ = true;
}
@@ -294,22 +247,22 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
+ bool was_added;
Variable* variable =
Declare(zone, catch_variable_name, VariableMode::kVar, NORMAL_VARIABLE,
- kCreatedInitialized, maybe_assigned);
+ kCreatedInitialized, maybe_assigned, &was_added);
+ DCHECK(was_added);
AllocateHeapSlot(variable);
}
void DeclarationScope::SetDefaults() {
is_declaration_scope_ = true;
has_simple_parameters_ = true;
- asm_module_ = false;
+ is_asm_module_ = false;
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
has_rest_ = false;
- has_promise_ = false;
- has_generator_object_ = false;
sloppy_block_function_map_ = nullptr;
receiver_ = nullptr;
new_target_ = nullptr;
@@ -319,7 +272,7 @@ void DeclarationScope::SetDefaults() {
should_eager_compile_ = false;
was_lazily_parsed_ = false;
is_skipped_function_ = false;
- preparsed_scope_data_builder_ = nullptr;
+ preparse_data_builder_ = nullptr;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@@ -374,12 +327,10 @@ void DeclarationScope::set_should_eager_compile() {
should_eager_compile_ = !was_lazily_parsed_;
}
-void DeclarationScope::set_asm_module() {
- asm_module_ = true;
-}
+void DeclarationScope::set_is_asm_module() { is_asm_module_ = true; }
bool Scope::IsAsmModule() const {
- return is_function_scope() && AsDeclarationScope()->asm_module();
+ return is_function_scope() && AsDeclarationScope()->is_asm_module();
}
bool Scope::ContainsAsmModule() const {
@@ -398,7 +349,7 @@ bool Scope::ContainsAsmModule() const {
}
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
- ScopeInfo* scope_info,
+ ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) {
@@ -406,7 +357,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
Scope* current_scope = nullptr;
Scope* innermost_scope = nullptr;
Scope* outer_scope = nullptr;
- while (scope_info) {
+ while (!scope_info.is_null()) {
if (scope_info->scope_type() == WITH_SCOPE) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
outer_scope =
@@ -429,8 +380,9 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
} else if (scope_info->scope_type() == FUNCTION_SCOPE) {
outer_scope = new (zone)
DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
- if (scope_info->IsAsmModule())
- outer_scope->AsDeclarationScope()->set_asm_module();
+ if (scope_info->IsAsmModule()) {
+ outer_scope->AsDeclarationScope()->set_is_asm_module();
+ }
} else if (scope_info->scope_type() == EVAL_SCOPE) {
outer_scope = new (zone)
DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate));
@@ -450,7 +402,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
DCHECK_EQ(scope_info->ContextLocalCount(), 1);
DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar);
DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
- String* name = scope_info->ContextLocalName(0);
+ String name = scope_info->ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info->ContextLocalMaybeAssignedFlag(0);
outer_scope = new (zone)
@@ -466,7 +418,16 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
current_scope = outer_scope;
if (innermost_scope == nullptr) innermost_scope = current_scope;
scope_info = scope_info->HasOuterScopeInfo() ? scope_info->OuterScopeInfo()
- : nullptr;
+ : ScopeInfo();
+ }
+
+ if (deserialization_mode == DeserializationMode::kIncludingVariables &&
+ script_scope->scope_info_.is_null()) {
+ Handle<ScriptContextTable> table(
+ isolate->native_context()->script_context_table(), isolate);
+ Handle<Context> first = ScriptContextTable::GetContext(isolate, table, 0);
+ Handle<ScopeInfo> scope_info(first->scope_info(), isolate);
+ script_scope->SetScriptScopeInfo(scope_info);
}
if (innermost_scope == nullptr) return script_scope;
@@ -519,7 +480,11 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
SloppyBlockFunctionMap* map = sloppy_block_function_map();
if (map == nullptr) return;
- const bool has_simple_parameters = HasSimpleParameters();
+ // In case of complex parameters the current scope is the body scope and the
+ // parameters are stored in the outer scope.
+ Scope* parameter_scope = HasSimpleParameters() ? this : outer_scope_;
+ DCHECK(parameter_scope->is_function_scope() || is_eval_scope() ||
+ is_script_scope());
// The declarations need to be added in the order they were seen,
// so accumulate declared names sorted by index.
@@ -534,21 +499,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// or parameter,
// Check if there's a conflict with a parameter.
- // This depends on the fact that functions always have a scope solely to
- // hold complex parameters, and the names local to that scope are
- // precisely the names of the parameters. IsDeclaredParameter(name) does
- // not hold for names declared by complex parameters, nor are those
- // bindings necessarily declared lexically, so we have to check for them
- // explicitly. On the other hand, if there are not complex parameters,
- // it is sufficient to just check IsDeclaredParameter.
- if (!has_simple_parameters) {
- if (outer_scope_->LookupLocal(name) != nullptr) {
- continue;
- }
- } else {
- if (IsDeclaredParameter(name)) {
- continue;
- }
+ Variable* maybe_parameter = parameter_scope->LookupLocal(name);
+ if (maybe_parameter != nullptr && maybe_parameter->is_parameter()) {
+ continue;
}
bool declaration_queued = false;
@@ -575,8 +528,8 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// example, that does not prevent hoisting of the function in
// `{ let e; try {} catch (e) { function e(){} } }`
do {
- var = query_scope->LookupLocal(name);
- if (var != nullptr && IsLexical(var)) {
+ var = query_scope->LookupInScopeOrScopeInfo(name);
+ if (var != nullptr && IsLexicalVariableMode(var->mode())) {
should_hoist = false;
break;
}
@@ -592,12 +545,12 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (factory) {
DCHECK(!is_being_lazily_parsed_);
+ int pos = delegate->position();
Assignment* assignment = factory->NewAssignment(
- Token::ASSIGN, NewUnresolved(factory, name),
- delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+ Token::ASSIGN, NewUnresolved(factory, name, pos),
+ delegate->scope()->NewUnresolved(factory, name, pos), pos);
assignment->set_lookup_hoisting_mode(LookupHoistingMode::kLegacySloppy);
- Statement* statement =
- factory->NewExpressionStatement(assignment, kNoSourcePosition);
+ Statement* statement = factory->NewExpressionStatement(assignment, pos);
delegate->set_statement(statement);
}
}
@@ -610,46 +563,20 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (factory) {
DCHECK(!is_being_lazily_parsed_);
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
- auto declaration =
- factory->NewVariableDeclaration(proxy, kNoSourcePosition);
+ auto declaration = factory->NewVariableDeclaration(kNoSourcePosition);
+ bool was_added;
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
- DeclareVariable(declaration, VariableMode::kVar,
+ DeclareVariable(declaration, proxy, VariableMode::kVar, NORMAL_VARIABLE,
Variable::DefaultInitializationFlag(VariableMode::kVar),
- nullptr, &ok);
+ &was_added, nullptr, &ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
- Variable* var = DeclareVariableName(name, VariableMode::kVar);
- if (var != kDummyPreParserVariable &&
- var != kDummyPreParserLexicalVariable) {
- DCHECK(FLAG_preparser_scope_analysis);
- var->set_maybe_assigned();
- }
- }
- }
-}
-
-void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
- DCHECK(scope_info_.is_null());
- Handle<ScopeInfo> outer_scope_info;
- if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
- // If we have a scope info we will potentially need to lookup variable names
- // on the scope info as internalized strings, so make sure ast_value_factory
- // is internalized.
- info->ast_value_factory()->Internalize(isolate);
- if (outer_scope()) {
- DeclarationScope* script_scope = new (info->zone())
- DeclarationScope(info->zone(), info->ast_value_factory());
- info->set_script_scope(script_scope);
- ReplaceOuterScope(Scope::DeserializeScopeChain(
- isolate, info->zone(), *outer_scope_info, script_scope,
- info->ast_value_factory(),
- Scope::DeserializationMode::kIncludingVariables));
- } else {
- DCHECK_EQ(outer_scope_info->scope_type(), SCRIPT_SCOPE);
- SetScriptScopeInfo(outer_scope_info);
+ bool was_added;
+ Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added);
+ var->set_maybe_assigned();
}
}
}
@@ -686,10 +613,9 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
scope->set_should_eager_compile();
if (scope->must_use_preparsed_scope_data_) {
- DCHECK(FLAG_preparser_scope_analysis);
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
allow_deref.emplace();
- info->consumed_preparsed_scope_data()->RestoreScopeAllocationData(scope);
+ info->consumed_preparse_data()->RestoreScopeAllocationData(scope);
}
if (!scope->AllocateVariables(info)) return false;
@@ -712,11 +638,14 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
DCHECK(has_this_declaration());
bool derived_constructor = IsDerivedConstructor(function_kind_);
+ bool was_added;
Variable* var =
Declare(zone(), ast_value_factory->this_string(),
derived_constructor ? VariableMode::kConst : VariableMode::kVar,
THIS_VARIABLE,
- derived_constructor ? kNeedsInitialization : kCreatedInitialized);
+ derived_constructor ? kNeedsInitialization : kCreatedInitialized,
+ kNotAssigned, &was_added);
+ DCHECK(was_added);
receiver_ = var;
}
@@ -724,14 +653,14 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
DCHECK(is_function_scope());
DCHECK(!is_arrow_scope());
- arguments_ = LookupLocal(ast_value_factory->arguments_string());
- if (arguments_ == nullptr) {
- // Declare 'arguments' variable which exists in all non arrow functions.
- // Note that it might never be accessed, in which case it won't be
- // allocated during variable allocation.
- arguments_ = Declare(zone(), ast_value_factory->arguments_string(),
- VariableMode::kVar);
- } else if (IsLexical(arguments_)) {
+ // Declare 'arguments' variable which exists in all non arrow functions. Note
+ // that it might never be accessed, in which case it won't be allocated during
+ // variable allocation.
+ bool was_added;
+ arguments_ =
+ Declare(zone(), ast_value_factory->arguments_string(), VariableMode::kVar,
+ NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added);
+ if (!was_added && IsLexicalVariableMode(arguments_->mode())) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
arguments_ = nullptr;
@@ -744,29 +673,35 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory);
+ bool was_added;
new_target_ = Declare(zone(), ast_value_factory->new_target_string(),
- VariableMode::kConst);
+ VariableMode::kConst, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned, &was_added);
+ DCHECK(was_added);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
- EnsureRareData()->this_function =
- Declare(zone(), ast_value_factory->this_function_string(),
- VariableMode::kConst);
+ EnsureRareData()->this_function = Declare(
+ zone(), ast_value_factory->this_function_string(), VariableMode::kConst,
+ NORMAL_VARIABLE, kCreatedInitialized, kNotAssigned, &was_added);
+ DCHECK(was_added);
}
}
-Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
+Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name,
+ Scope* cache) {
DCHECK(is_function_scope());
DCHECK_NULL(function_);
- DCHECK_NULL(variables_.Lookup(name));
+ if (cache == nullptr) cache = this;
+ DCHECK_NULL(cache->variables_.Lookup(name));
VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE
: NORMAL_VARIABLE;
function_ = new (zone())
Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized);
if (calls_sloppy_eval()) {
- NonLocal(name, VariableMode::kDynamic);
+ cache->NonLocal(name, VariableMode::kDynamic);
} else {
- variables_.Add(zone(), function_);
+ cache->variables_.Add(zone(), function_);
}
return function_;
}
@@ -779,40 +714,14 @@ Variable* DeclarationScope::DeclareGeneratorObjectVar(
Variable* result = EnsureRareData()->generator_object =
NewTemporary(name, kNotAssigned);
result->set_is_used();
- has_generator_object_ = true;
return result;
}
-Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
- DCHECK(is_function_scope());
- DCHECK_NULL(promise_var());
- Variable* result = EnsureRareData()->promise = NewTemporary(name);
- result->set_is_used();
- has_promise_ = true;
- return result;
-}
-
-bool Scope::HasBeenRemoved() const {
- if (sibling() == this) {
- DCHECK_NULL(inner_scope_);
- DCHECK(is_block_scope());
- return true;
- }
- return false;
-}
-
-Scope* Scope::GetUnremovedScope() {
- Scope* scope = this;
- while (scope != nullptr && scope->HasBeenRemoved()) {
- scope = scope->outer_scope();
- }
- DCHECK_NOT_NULL(scope);
- return scope;
-}
-
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
- DCHECK(!HasBeenRemoved());
+#ifdef DEBUG
+ DCHECK_NE(sibling_, this);
+#endif
if (variables_.occupancy() > 0 ||
(is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval())) {
@@ -852,8 +761,9 @@ Scope* Scope::FinalizeBlockScope() {
num_heap_slots_ = 0;
// Mark scope as removed by making it its own sibling.
+#ifdef DEBUG
sibling_ = this;
- DCHECK(HasBeenRemoved());
+#endif
return nullptr;
}
@@ -865,25 +775,13 @@ void DeclarationScope::AddLocal(Variable* var) {
locals_.Add(var);
}
-Variable* Scope::Declare(Zone* zone, const AstRawString* name,
- VariableMode mode, VariableKind kind,
- InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag) {
- bool added;
- Variable* var =
- variables_.Declare(zone, this, name, mode, kind, initialization_flag,
- maybe_assigned_flag, &added);
- if (added) locals_.Add(var);
- return var;
-}
-
-void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
- DCHECK_EQ(new_parent, outer_scope_->inner_scope_);
- DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
+void Scope::Snapshot::Reparent(DeclarationScope* new_parent) {
+ DCHECK(!IsCleared());
+ DCHECK_EQ(new_parent, outer_scope_and_calls_eval_.GetPointer()->inner_scope_);
+ DCHECK_EQ(new_parent->outer_scope_, outer_scope_and_calls_eval_.GetPointer());
DCHECK_EQ(new_parent, new_parent->GetClosureScope());
DCHECK_NULL(new_parent->inner_scope_);
DCHECK(new_parent->unresolved_list_.is_empty());
- DCHECK(new_parent->locals_.is_empty());
Scope* inner_scope = new_parent->sibling_;
if (inner_scope != top_inner_scope_) {
for (; inner_scope->sibling() != top_inner_scope_;
@@ -905,52 +803,33 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
new_parent->sibling_ = top_inner_scope_;
}
- if (outer_scope_->unresolved_list_.first() != top_unresolved_) {
- // If the marked VariableProxy (snapshoted) is not the first, we need to
- // find it and move all VariableProxys up to that point into the new_parent,
- // then we restore the snapshoted state by reinitializing the outer_scope
- // list.
- {
- auto iter = outer_scope_->unresolved_list_.begin();
- while (*iter != top_unresolved_) {
- ++iter;
- }
- outer_scope_->unresolved_list_.Rewind(iter);
- }
-
- new_parent->unresolved_list_ = std::move(outer_scope_->unresolved_list_);
- outer_scope_->unresolved_list_.ReinitializeHead(top_unresolved_);
- }
+ Scope* outer_scope_ = outer_scope_and_calls_eval_.GetPointer();
+ new_parent->unresolved_list_.MoveTail(&outer_scope_->unresolved_list_,
+ top_unresolved_);
- // TODO(verwaest): This currently only moves do-expression declared variables
- // in default arguments that weren't already previously declared with the same
- // name in the closure-scope. See
- // test/mjsunit/harmony/default-parameter-do-expression.js.
+ // Move temporaries allocated for complex parameter initializers.
DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
-
- new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
- for (Variable* local : new_parent->locals_) {
- DCHECK(local->mode() == VariableMode::kTemporary ||
- local->mode() == VariableMode::kVar);
+ for (auto it = top_local_; it != outer_closure->locals()->end(); ++it) {
+ Variable* local = *it;
+ DCHECK_EQ(VariableMode::kTemporary, local->mode());
DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
DCHECK_NE(local->scope(), new_parent);
local->set_scope(new_parent);
- if (local->mode() == VariableMode::kVar) {
- outer_closure->variables_.Remove(local);
- new_parent->variables_.Add(new_parent->zone(), local);
- }
}
+ new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
outer_closure->locals_.Rewind(top_local_);
- outer_closure->decls_.Rewind(top_decl_);
// Move eval calls since Snapshot's creation into new_parent.
- if (outer_scope_->scope_calls_eval_) {
+ if (outer_scope_and_calls_eval_->scope_calls_eval_) {
new_parent->scope_calls_eval_ = true;
new_parent->inner_scope_calls_eval_ = true;
}
- // Reset the outer_scope's eval state. It will be restored to its
- // original value as necessary in the destructor of this class.
- outer_scope_->scope_calls_eval_ = false;
+
+ // We are in the arrow function case. The calls eval we may have recorded
+ // is intended for the inner scope and we should simply restore the
+ // original "calls eval" flag of the outer scope.
+ RestoreEvalFlag();
+ Clear();
}
void Scope::ReplaceOuterScope(Scope* outer) {
@@ -962,7 +841,10 @@ void Scope::ReplaceOuterScope(Scope* outer) {
outer_scope_ = outer;
}
-Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
+Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
+ DCHECK(!scope_info_.is_null());
+ DCHECK_NULL(cache->variables_.Lookup(name));
+
Handle<String> name_handle = name->string();
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
@@ -992,10 +874,10 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
if (!found) {
index = scope_info_->FunctionContextSlotIndex(*name_handle);
if (index < 0) return nullptr; // Nowhere found.
- Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
+ Variable* var = AsDeclarationScope()->DeclareFunctionVar(name, cache);
DCHECK_EQ(VariableMode::kConst, var->mode());
var->AllocateTo(VariableLocation::CONTEXT, index);
- return variables_.Lookup(name);
+ return cache->variables_.Lookup(name);
}
VariableKind kind = NORMAL_VARIABLE;
@@ -1006,23 +888,20 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
// TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
// ARGUMENTS bindings as their corresponding VariableKind.
- Variable* var = variables_.Declare(zone(), this, name, mode, kind, init_flag,
- maybe_assigned_flag);
+ bool was_added;
+ Variable* var =
+ cache->variables_.Declare(zone(), this, name, mode, kind, init_flag,
+ maybe_assigned_flag, &was_added);
+ DCHECK(was_added);
var->AllocateTo(location, index);
return var;
}
-Variable* Scope::Lookup(const AstRawString* name) {
- for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
- Variable* var = scope->LookupLocal(name);
- if (var != nullptr) return var;
- }
- return nullptr;
-}
-
-Variable* DeclarationScope::DeclareParameter(
- const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
- bool* is_duplicate, AstValueFactory* ast_value_factory, int position) {
+Variable* DeclarationScope::DeclareParameter(const AstRawString* name,
+ VariableMode mode,
+ bool is_optional, bool is_rest,
+ AstValueFactory* ast_value_factory,
+ int position) {
DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_);
@@ -1033,53 +912,37 @@ Variable* DeclarationScope::DeclareParameter(
if (mode == VariableMode::kTemporary) {
var = NewTemporary(name);
} else {
+ var = LookupLocal(name);
DCHECK_EQ(mode, VariableMode::kVar);
- var = Declare(zone(), name, mode);
- // TODO(wingo): Avoid O(n^2) check.
- if (is_duplicate != nullptr) {
- *is_duplicate = *is_duplicate || IsDeclaredParameter(name);
- }
+ DCHECK(var->is_parameter());
}
has_rest_ = is_rest;
var->set_initializer_position(position);
params_.Add(var, zone());
+ if (!is_rest) ++num_parameters_;
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
}
+ // Params are automatically marked as used to make sure that the debugger and
+ // function.arguments sees them.
+ // TODO(verwaest): Reevaluate whether we always need to do this, since
+ // strict-mode function.arguments does not make the arguments available.
+ var->set_is_used();
return var;
}
-Variable* DeclarationScope::DeclareParameterName(
- const AstRawString* name, bool is_rest, AstValueFactory* ast_value_factory,
- bool declare_as_local, bool add_parameter) {
+void DeclarationScope::RecordParameter(bool is_rest) {
DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope());
- DCHECK(!has_rest_ || is_rest);
DCHECK(is_being_lazily_parsed_);
+ DCHECK(!has_rest_);
has_rest_ = is_rest;
- if (name == ast_value_factory->arguments_string()) {
- has_arguments_parameter_ = true;
- }
- if (FLAG_preparser_scope_analysis) {
- Variable* var;
- if (declare_as_local) {
- var = Declare(zone(), name, VariableMode::kVar);
- } else {
- var = new (zone()) Variable(this, name, VariableMode::kTemporary,
- NORMAL_VARIABLE, kCreatedInitialized);
- }
- if (add_parameter) {
- params_.Add(var, zone());
- }
- return var;
- }
- DeclareVariableName(name, VariableMode::kVar);
- return nullptr;
+ if (!is_rest) ++num_parameters_;
}
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag, VariableKind kind,
- MaybeAssignedFlag maybe_assigned_flag) {
+ VariableKind kind, bool* was_added,
+ InitializationFlag init_flag) {
DCHECK(!already_resolved_);
// This function handles VariableMode::kVar, VariableMode::kLet, and
// VariableMode::kConst modes. VariableMode::kDynamic variables are
@@ -1090,11 +953,14 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
mode == VariableMode::kVar || mode == VariableMode::kLet ||
mode == VariableMode::kConst);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
- return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
+ return Declare(zone(), name, mode, kind, init_flag, kNotAssigned, was_added);
}
+// TODO(leszeks): Avoid passing the proxy into here, passing the raw_name alone
+// instead.
Variable* Scope::DeclareVariable(
- Declaration* declaration, VariableMode mode, InitializationFlag init,
+ Declaration* declaration, VariableProxy* proxy, VariableMode mode,
+ VariableKind kind, InitializationFlag init, bool* was_added,
bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
@@ -1103,18 +969,16 @@ Variable* Scope::DeclareVariable(
if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
- declaration, mode, init, sloppy_mode_block_scope_function_redefinition,
- ok);
+ declaration, proxy, mode, kind, init, was_added,
+ sloppy_mode_block_scope_function_redefinition, ok);
}
DCHECK(!is_catch_scope());
DCHECK(!is_with_scope());
DCHECK(is_declaration_scope() ||
(IsLexicalVariableMode(mode) && is_block_scope()));
- VariableProxy* proxy = declaration->proxy();
DCHECK_NOT_NULL(proxy->raw_name());
const AstRawString* name = proxy->raw_name();
- bool is_function_declaration = declaration->IsFunctionDeclaration();
// Pessimistically assume that top-level variables will be assigned.
//
@@ -1127,71 +991,55 @@ Variable* Scope::DeclareVariable(
if (mode != VariableMode::kConst) proxy->set_is_assigned();
}
- Variable* var = nullptr;
- if (is_eval_scope() && is_sloppy(language_mode()) &&
- mode == VariableMode::kVar) {
- // In a var binding in a sloppy direct eval, pollute the enclosing scope
- // with this new binding by doing the following:
- // The proxy is bound to a lookup variable to force a dynamic declaration
- // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
- var = new (zone())
- Variable(this, name, mode, NORMAL_VARIABLE, init, kMaybeAssigned);
- var->AllocateTo(VariableLocation::LOOKUP, -1);
- } else {
- // Declare the variable in the declaration scope.
- var = LookupLocal(name);
- if (var == nullptr) {
+ Variable* var = LookupLocal(name);
+ // Declare the variable in the declaration scope.
+ *was_added = var == nullptr;
+ if (V8_LIKELY(*was_added)) {
+ if (V8_UNLIKELY(is_eval_scope() && is_sloppy(language_mode()) &&
+ mode == VariableMode::kVar)) {
+ // In a var binding in a sloppy direct eval, pollute the enclosing scope
+ // with this new binding by doing the following:
+ // The proxy is bound to a lookup variable to force a dynamic declaration
+ // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
+ DCHECK_EQ(NORMAL_VARIABLE, kind);
+ var = NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ } else {
// Declare the name.
- VariableKind kind = NORMAL_VARIABLE;
- if (is_function_declaration) {
- kind = FUNCTION_VARIABLE;
- }
- var = DeclareLocal(name, mode, init, kind, kNotAssigned);
- } else if (IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode())) {
- // Allow duplicate function decls for web compat, see bug 4693.
- bool duplicate_allowed = false;
- if (is_sloppy(language_mode()) && is_function_declaration &&
- var->is_function()) {
- DCHECK(IsLexicalVariableMode(mode) &&
- IsLexicalVariableMode(var->mode()));
- // If the duplication is allowed, then the var will show up
- // in the SloppyBlockFunctionMap and the new FunctionKind
- // will be a permitted duplicate.
- FunctionKind function_kind =
- declaration->AsFunctionDeclaration()->fun()->kind();
- SloppyBlockFunctionMap* map =
- GetDeclarationScope()->sloppy_block_function_map();
- duplicate_allowed = map != nullptr &&
- map->Lookup(const_cast<AstRawString*>(name),
- name->Hash()) != nullptr &&
- !IsAsyncFunction(function_kind) &&
- !IsGeneratorFunction(function_kind);
- }
- if (duplicate_allowed) {
- *sloppy_mode_block_scope_function_redefinition = true;
- } else {
- // The name was declared in this scope before; check for conflicting
- // re-declarations. We have a conflict if either of the declarations
- // is not a var (in script scope, we also have to ignore legacy const
- // for compatibility). There is similar code in runtime.cc in the
- // Declare functions. The function CheckConflictingVarDeclarations
- // checks for var and let bindings from different scopes whereas this
- // is a check for conflicting declarations within the same scope. This
- // check also covers the special case
- //
- // function () { let x; { var x; } }
- //
- // because the var declaration is hoisted to the function scope where
- // 'x' is already bound.
- DCHECK(IsDeclaredVariableMode(var->mode()));
- // In harmony we treat re-declarations as early errors. See
- // ES5 16 for a definition of early errors.
- *ok = false;
- return nullptr;
- }
- } else if (mode == VariableMode::kVar) {
- var->set_maybe_assigned();
+ var = DeclareLocal(name, mode, kind, was_added, init);
+ DCHECK(*was_added);
+ }
+ } else {
+ var->set_maybe_assigned();
+ if (V8_UNLIKELY(IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode()))) {
+ // The name was declared in this scope before; check for conflicting
+ // re-declarations. We have a conflict if either of the declarations is
+ // not a var (in script scope, we also have to ignore legacy const for
+ // compatibility). There is similar code in runtime.cc in the Declare
+ // functions. The function CheckConflictingVarDeclarations checks for
+ // var and let bindings from different scopes whereas this is a check
+ // for conflicting declarations within the same scope. This check also
+ // covers the special case
+ //
+ // function () { let x; { var x; } }
+ //
+ // because the var declaration is hoisted to the function scope where
+ // 'x' is already bound.
+ //
+ // In harmony we treat re-declarations as early errors. See ES5 16 for a
+ // definition of early errors.
+ //
+ // Allow duplicate function decls for web compat, see bug 4693. If the
+ // duplication is allowed, then the var will show up in the
+ // SloppyBlockFunctionMap.
+ SloppyBlockFunctionMap* map =
+ GetDeclarationScope()->sloppy_block_function_map();
+ *ok =
+ map != nullptr && declaration->IsFunctionDeclaration() &&
+ declaration->AsFunctionDeclaration()
+ ->declares_sloppy_block_function() &&
+ map->Lookup(const_cast<AstRawString*>(name), name->Hash()) != nullptr;
+ *sloppy_mode_block_scope_function_redefinition = *ok;
}
}
DCHECK_NOT_NULL(var);
@@ -1206,18 +1054,21 @@ Variable* Scope::DeclareVariable(
// semantic issue, but it may be a performance issue since it may
// lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
decls_.Add(declaration);
+ declaration->set_var(var);
proxy->BindTo(var);
return var;
}
Variable* Scope::DeclareVariableName(const AstRawString* name,
- VariableMode mode) {
+ VariableMode mode, bool* was_added,
+ VariableKind kind) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
if (mode == VariableMode::kVar && !is_declaration_scope()) {
- return GetDeclarationScope()->DeclareVariableName(name, mode);
+ return GetDeclarationScope()->DeclareVariableName(name, mode, was_added,
+ kind);
}
DCHECK(!is_with_scope());
DCHECK(!is_eval_scope());
@@ -1225,52 +1076,46 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
- if (FLAG_preparser_scope_analysis) {
- Variable* var = LookupLocal(name);
- DCHECK_NE(var, kDummyPreParserLexicalVariable);
- DCHECK_NE(var, kDummyPreParserVariable);
- if (var == nullptr) {
- var = DeclareLocal(name, mode);
- } else if (IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode())) {
+ Variable* var = DeclareLocal(name, mode, kind, was_added);
+ if (!*was_added) {
+ if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())) {
// Duplicate functions are allowed in the sloppy mode, but if this is not
// a function declaration, it's an error. This is an error PreParser
- // hasn't previously detected. TODO(marja): Investigate whether we can now
- // start returning this error.
- } else if (mode == VariableMode::kVar) {
- var->set_maybe_assigned();
+ // hasn't previously detected.
+ return nullptr;
}
- var->set_is_used();
- return var;
- } else {
- return variables_.DeclareName(zone(), name, mode);
+ if (mode == VariableMode::kVar) var->set_maybe_assigned();
}
+ var->set_is_used();
+ return var;
}
-void Scope::DeclareCatchVariableName(const AstRawString* name) {
+Variable* Scope::DeclareCatchVariableName(const AstRawString* name) {
DCHECK(!already_resolved_);
- DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(is_catch_scope());
DCHECK(scope_info_.is_null());
- if (FLAG_preparser_scope_analysis) {
- Declare(zone(), name, VariableMode::kVar);
- } else {
- variables_.DeclareName(zone(), name, VariableMode::kVar);
- }
+ bool was_added;
+ Variable* result = Declare(zone(), name, VariableMode::kVar, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned, &was_added);
+ DCHECK(was_added);
+ return result;
}
void Scope::AddUnresolved(VariableProxy* proxy) {
DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
- unresolved_list_.AddFront(proxy);
+ unresolved_list_.Add(proxy);
}
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
- VariableKind kind) {
+ VariableKind kind,
+ Scope* cache) {
DCHECK(is_script_scope());
- return variables_.Declare(zone(), this, name, VariableMode::kDynamicGlobal,
- kind);
+ bool was_added;
+ return cache->variables_.Declare(
+ zone(), this, name, VariableMode::kDynamicGlobal, kind,
+ kCreatedInitialized, kNotAssigned, &was_added);
// TODO(neis): Mark variable as maybe-assigned?
}
@@ -1278,6 +1123,11 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
return unresolved_list_.Remove(var);
}
+void Scope::DeleteUnresolved(VariableProxy* var) {
+ DCHECK(unresolved_list_.Contains(var));
+ var->mark_removed_from_unresolved();
+}
+
Variable* Scope::NewTemporary(const AstRawString* name) {
return NewTemporary(name, kMaybeAssigned);
}
@@ -1294,53 +1144,46 @@ Variable* Scope::NewTemporary(const AstRawString* name,
Declaration* Scope::CheckConflictingVarDeclarations() {
for (Declaration* decl : decls_) {
- VariableMode mode = decl->proxy()->var()->mode();
-
// Lexical vs lexical conflicts within the same scope have already been
// captured in Parser::Declare. The only conflicts we still need to check
- // are lexical vs nested var, or any declarations within a declaration
- // block scope vs lexical declarations in its surrounding (function) scope.
- Scope* current = this;
+ // are lexical vs nested var.
+ Scope* current = nullptr;
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
- DCHECK_EQ(mode, VariableMode::kVar);
current = decl->AsVariableDeclaration()->AsNested()->scope();
- } else if (IsLexicalVariableMode(mode)) {
- if (!is_block_scope()) continue;
- DCHECK(is_declaration_scope());
- DCHECK_EQ(outer_scope()->scope_type(), FUNCTION_SCOPE);
- current = outer_scope();
+ } else if (is_eval_scope() && is_sloppy(language_mode())) {
+ if (IsLexicalVariableMode(decl->var()->mode())) continue;
+ current = outer_scope_;
}
-
+ if (current == nullptr) continue;
+ DCHECK(decl->var()->mode() == VariableMode::kVar ||
+ decl->var()->mode() == VariableMode::kDynamic);
// Iterate through all scopes until and including the declaration scope.
while (true) {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var =
- current->variables_.Lookup(decl->proxy()->raw_name());
+ current->LookupInScopeOrScopeInfo(decl->var()->raw_name());
if (other_var != nullptr && IsLexicalVariableMode(other_var->mode())) {
return decl;
}
- if (current->is_declaration_scope()) break;
+ if (current->is_declaration_scope() &&
+ !(current->is_eval_scope() && is_sloppy(current->language_mode()))) {
+ break;
+ }
current = current->outer_scope();
}
}
return nullptr;
}
-Declaration* Scope::CheckLexDeclarationsConflictingWith(
- const ZonePtrList<const AstRawString>& names) {
- DCHECK(is_block_scope());
- for (int i = 0; i < names.length(); ++i) {
- Variable* var = LookupLocal(names.at(i));
- if (var != nullptr) {
- // Conflict; find and return its declaration.
- DCHECK(IsLexicalVariableMode(var->mode()));
- const AstRawString* name = names.at(i);
- for (Declaration* decl : decls_) {
- if (decl->proxy()->raw_name() == name) return decl;
- }
- DCHECK(false);
- }
+const AstRawString* Scope::FindVariableDeclaredIn(Scope* scope,
+ VariableMode mode_limit) {
+ const VariableMap& variables = scope->variables_;
+ for (ZoneHashMap::Entry* p = variables.Start(); p != nullptr;
+ p = variables.Next(p)) {
+ const AstRawString* name = static_cast<const AstRawString*>(p->key);
+ Variable* var = LookupLocal(name);
+ if (var != nullptr && var->mode() <= mode_limit) return name;
}
return nullptr;
}
@@ -1467,14 +1310,82 @@ Scope* Scope::GetOuterScopeWithContext() {
return scope;
}
+void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
+ Isolate* isolate, ParseInfo* info,
+ Handle<StringSet>* non_locals) {
+ // Module variables must be allocated before variable resolution
+ // to ensure that UpdateNeedsHoleCheck() can detect import variables.
+ if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
+
+ // Lazy parsed declaration scopes are already partially analyzed. If there are
+ // unresolved references remaining, they just need to be resolved in outer
+ // scopes.
+ Scope* lookup =
+ is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
+ ? outer_scope()
+ : this;
+
+ for (VariableProxy* proxy : unresolved_list_) {
+ DCHECK(!proxy->is_resolved());
+ Variable* var =
+ Lookup<kParsedScope>(proxy, lookup, max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ *non_locals = StringSet::Add(isolate, *non_locals, proxy->name());
+ } else {
+ // In this case we need to leave scopes in a way that they can be
+ // allocated. If we resolved variables from lazy parsed scopes, we need
+ // to context allocate the var.
+ ResolveTo(info, proxy, var);
+ if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
+ }
+ }
+
+ // Clear unresolved_list_ as it's in an inconsistent state.
+ unresolved_list_.Clear();
+
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->CollectNonLocals(max_outer_scope, isolate, info, non_locals);
+ }
+}
+
+void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
+ AstNodeFactory* ast_node_factory,
+ UnresolvedList* new_unresolved_list) {
+ DCHECK_IMPLIES(is_declaration_scope(),
+ !AsDeclarationScope()->was_lazily_parsed());
+
+ for (VariableProxy* proxy = unresolved_list_.first(); proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ DCHECK(!proxy->is_resolved());
+ Variable* var =
+ Lookup<kParsedScope>(proxy, this, max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ // Don't copy unresolved references to the script scope, unless it's a
+ // reference to a private name or method. In that case keep it so we
+ // can fail later.
+ if (!max_outer_scope->outer_scope()->is_script_scope() ||
+ proxy->IsPrivateName()) {
+ VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+ new_unresolved_list->Add(copy);
+ }
+ } else {
+ var->set_is_used();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
+ }
+ }
+
+ // Clear unresolved_list_ as it's in an inconsistent state.
+ unresolved_list_.Clear();
+
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->AnalyzePartially(max_outer_scope, ast_node_factory,
+ new_unresolved_list);
+ }
+}
+
Handle<StringSet> DeclarationScope::CollectNonLocals(
Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
- ResolveScopesThenForEachVariable(this,
- [=, &non_locals](VariableProxy* proxy) {
- non_locals = StringSet::Add(
- isolate, non_locals, proxy->name());
- },
- info);
+ Scope::CollectNonLocals(this, isolate, info, &non_locals);
return non_locals;
}
@@ -1483,11 +1394,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
DCHECK(is_function_scope());
// Reset all non-trivial members.
- if (!aborted || !IsArrowFunction(function_kind_)) {
- // Do not remove parameters when lazy parsing an Arrow Function has failed,
- // as the formal parameters are not re-parsed.
- params_.Clear();
- }
+ params_.Clear();
decls_.Clear();
locals_.Clear();
inner_scope_ = nullptr;
@@ -1495,8 +1402,6 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
sloppy_block_function_map_ = nullptr;
rare_data_ = nullptr;
has_rest_ = false;
- has_promise_ = false;
- has_generator_object_ = false;
DCHECK_NE(zone_, ast_value_factory->zone());
zone_->ReleaseMemory();
@@ -1506,6 +1411,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
zone_ = ast_value_factory->zone();
variables_.Reset(ZoneAllocationPolicy(zone_));
if (!IsArrowFunction(function_kind_)) {
+ has_simple_parameters_ = true;
DeclareDefaultFunctionVariables(ast_value_factory);
}
} else {
@@ -1522,54 +1428,40 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
was_lazily_parsed_ = !aborted;
}
-void Scope::SavePreParsedScopeData() {
- DCHECK(FLAG_preparser_scope_analysis);
- if (PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(this)) {
- AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope();
+void Scope::SavePreparseData(Parser* parser) {
+ if (PreparseDataBuilder::ScopeIsSkippableFunctionScope(this)) {
+ AsDeclarationScope()->SavePreparseDataForDeclarationScope(parser);
}
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->SavePreParsedScopeData();
+ scope->SavePreparseData(parser);
}
}
-void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() {
- if (preparsed_scope_data_builder_ != nullptr) {
- DCHECK(FLAG_preparser_scope_analysis);
- preparsed_scope_data_builder_->SaveScopeAllocationData(this);
- }
+void DeclarationScope::SavePreparseDataForDeclarationScope(Parser* parser) {
+ if (preparse_data_builder_ == nullptr) return;
+ preparse_data_builder_->SaveScopeAllocationData(this, parser);
}
-void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
+void DeclarationScope::AnalyzePartially(Parser* parser,
+ AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
- base::ThreadedList<VariableProxy> new_unresolved_list;
+ UnresolvedList new_unresolved_list;
if (!IsArrowFunction(function_kind_) &&
(!outer_scope_->is_script_scope() ||
- (FLAG_preparser_scope_analysis &&
- preparsed_scope_data_builder_ != nullptr &&
- preparsed_scope_data_builder_->ContainsInnerFunctions()))) {
+ (preparse_data_builder_ != nullptr &&
+ preparse_data_builder_->HasInnerFunctions()))) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
- ResolveScopesThenForEachVariable(
- this, [=, &new_unresolved_list](VariableProxy* proxy) {
- // Don't copy unresolved references to the script scope, unless it's a
- // reference to a private field. In that case keep it so we can fail
- // later.
- if (!outer_scope_->is_script_scope() || proxy->is_private_field()) {
- VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
- new_unresolved_list.AddFront(copy);
- }
- });
+ Scope::AnalyzePartially(this, ast_node_factory, &new_unresolved_list);
// Migrate function_ to the right Zone.
if (function_ != nullptr) {
function_ = ast_node_factory->CopyVariable(function_);
}
- if (FLAG_preparser_scope_analysis) {
- SavePreParsedScopeData();
- }
+ SavePreparseData(parser);
}
#ifdef DEBUG
@@ -1668,10 +1560,6 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
- if (var == Scope::kDummyPreParserVariable ||
- var == Scope::kDummyPreParserLexicalVariable) {
- continue;
- }
bool local = !IsDynamicVariableMode(var->mode());
if ((locals ? local : !local) &&
(var->is_used() || !var->IsUnallocated())) {
@@ -1691,10 +1579,11 @@ void DeclarationScope::PrintParameters() {
for (int i = 0; i < params_.length(); i++) {
if (i > 0) PrintF(", ");
const AstRawString* name = params_[i]->raw_name();
- if (name->IsEmpty())
+ if (name->IsEmpty()) {
PrintF(".%p", reinterpret_cast<void*>(params_[i]));
- else
+ } else {
PrintName(name);
+ }
}
PrintF(")");
}
@@ -1823,107 +1712,175 @@ void Scope::CheckZones() {
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
- Variable* var = variables_.Declare(zone(), nullptr, name, mode);
+ bool was_added;
+ Variable* var =
+ variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned, &was_added);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
}
-Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
- Scope* outer_scope_end) {
- DCHECK_NE(outer_scope_end, this);
- // Short-cut: whenever we find a debug-evaluate scope, just look everything up
- // dynamically. Debug-evaluate doesn't properly create scope info for the
- // lookups it does. It may not have a valid 'this' declaration, and anything
- // accessed through debug-evaluate might invalidly resolve to stack-allocated
- // variables.
- // TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
- // scopes in which it's evaluating.
- if (is_debug_evaluate_scope_)
- return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
-
- // Try to find the variable in this scope.
- Variable* var = LookupLocal(proxy->raw_name());
-
- // We found a variable and we are done. (Even if there is an 'eval' in this
- // scope which introduces the same variable again, the resulting variable
- // remains the same.)
- if (var != nullptr) return var;
-
- if (outer_scope_ == outer_scope_end) {
- // We may just be trying to find all free variables. In that case, don't
- // declare them in the outer scope.
- if (!is_script_scope()) return nullptr;
-
- if (proxy->is_private_field()) {
- info->pending_error_handler()->ReportMessageAt(
- proxy->position(), proxy->position() + 1,
- MessageTemplate::kInvalidPrivateFieldAccess, proxy->raw_name(),
- kSyntaxError);
- return nullptr;
+// static
+template <Scope::ScopeLookupMode mode>
+Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point,
+ bool force_context_allocation) {
+ if (mode == kDeserializedScope) {
+ Variable* var = entry_point->variables_.Lookup(proxy->raw_name());
+ if (var != nullptr) return var;
+ }
+
+ while (true) {
+ DCHECK_IMPLIES(mode == kParsedScope, !scope->is_debug_evaluate_scope_);
+ // Short-cut: whenever we find a debug-evaluate scope, just look everything
+ // up dynamically. Debug-evaluate doesn't properly create scope info for the
+ // lookups it does. It may not have a valid 'this' declaration, and anything
+ // accessed through debug-evaluate might invalidly resolve to
+ // stack-allocated variables.
+ // TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for
+ // the scopes in which it's evaluating.
+ if (mode == kDeserializedScope &&
+ V8_UNLIKELY(scope->is_debug_evaluate_scope_)) {
+ return entry_point->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
}
- // No binding has been found. Declare a variable on the global object.
- return AsDeclarationScope()->DeclareDynamicGlobal(proxy->raw_name(),
- NORMAL_VARIABLE);
- }
+ // Try to find the variable in this scope.
+ Variable* var = mode == kParsedScope ? scope->LookupLocal(proxy->raw_name())
+ : scope->LookupInScopeInfo(
+ proxy->raw_name(), entry_point);
- DCHECK(!is_script_scope());
+ // We found a variable and we are done. (Even if there is an 'eval' in this
+ // scope which introduces the same variable again, the resulting variable
+ // remains the same.)
+ if (var != nullptr) {
+ if (mode == kParsedScope && force_context_allocation &&
+ !var->is_dynamic()) {
+ var->ForceContextAllocation();
+ }
+ return var;
+ }
- var = outer_scope_->LookupRecursive(info, proxy, outer_scope_end);
+ if (scope->outer_scope_ == outer_scope_end) break;
- // The variable could not be resolved statically.
- if (var == nullptr) return var;
+ DCHECK(!scope->is_script_scope());
+ if (V8_UNLIKELY(scope->is_with_scope())) {
+ return LookupWith(proxy, scope, outer_scope_end, entry_point,
+ force_context_allocation);
+ }
+ if (V8_UNLIKELY(scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->calls_sloppy_eval())) {
+ return LookupSloppyEval(proxy, scope, outer_scope_end, entry_point,
+ force_context_allocation);
+ }
- // TODO(marja): Separate LookupRecursive for preparsed scopes better.
- if (var == kDummyPreParserVariable || var == kDummyPreParserLexicalVariable) {
- DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
- DCHECK(FLAG_lazy_inner_functions);
- return var;
+ force_context_allocation |= scope->is_function_scope();
+ scope = scope->outer_scope_;
+ // TODO(verwaest): Separate through AnalyzePartially.
+ if (mode == kParsedScope && !scope->scope_info_.is_null()) {
+ return Lookup<kDeserializedScope>(proxy, scope, outer_scope_end, scope);
+ }
}
- if (is_function_scope() && !var->is_dynamic()) {
- var->ForceContextAllocation();
- }
- // "this" can't be shadowed by "eval"-introduced bindings or by "with"
- // scopes.
- // TODO(wingo): There are other variables in this category; add them.
- if (var->is_this()) return var;
-
- if (is_with_scope()) {
- // The current scope is a with scope, so the variable binding can not be
- // statically resolved. However, note that it was necessary to do a lookup
- // in the outer scope anyway, because if a binding exists in an outer
- // scope, the associated variable has to be marked as potentially being
- // accessed from inside of an inner with scope (the property may not be in
- // the 'with' object).
- if (!var->is_dynamic() && var->IsUnallocated()) {
- DCHECK(!already_resolved_);
- var->set_is_used();
- var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
- }
- return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ // We may just be trying to find all free variables. In that case, don't
+ // declare them in the outer scope.
+ // TODO(marja): Separate Lookup for preparsed scopes better.
+ if (mode == kParsedScope && !scope->is_script_scope()) {
+ return nullptr;
}
+ if (V8_UNLIKELY(proxy->IsPrivateName())) return nullptr;
- if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
- // A variable binding may have been found in an outer scope, but the current
- // scope makes a sloppy 'eval' call, so the found variable may not be the
- // correct one (the 'eval' may introduce a binding with the same name). In
- // that case, change the lookup result to reflect this situation. Only
- // scopes that can host var bindings (declaration scopes) need be considered
- // here (this excludes block and catch scopes), and variable lookups at
- // script scope are always dynamic.
- if (var->IsGlobalObjectProperty()) {
- return NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
- }
+ // No binding has been found. Declare a variable on the global object.
+ return scope->AsDeclarationScope()->DeclareDynamicGlobal(
+ proxy->raw_name(), NORMAL_VARIABLE,
+ mode == kDeserializedScope ? entry_point : scope);
+}
- if (var->is_dynamic()) return var;
+template Variable* Scope::Lookup<Scope::kParsedScope>(
+ VariableProxy* proxy, Scope* scope, Scope* outer_scope_end,
+ Scope* entry_point, bool force_context_allocation);
+template Variable* Scope::Lookup<Scope::kDeserializedScope>(
+ VariableProxy* proxy, Scope* scope, Scope* outer_scope_end,
+ Scope* entry_point, bool force_context_allocation);
- Variable* invalidated = var;
- var = NonLocal(proxy->raw_name(), VariableMode::kDynamicLocal);
- var->set_local_if_not_shadowed(invalidated);
+namespace {
+bool CanBeShadowed(Scope* scope, Variable* var) {
+ if (var == nullptr) return false;
+
+ // "this" can't be shadowed by "eval"-introduced bindings or by "with" scopes.
+ // TODO(wingo): There are other variables in this category; add them.
+ return !var->is_this();
+}
+}; // namespace
+
+Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point,
+ bool force_context_allocation) {
+ DCHECK(scope->is_with_scope());
+
+ Variable* var =
+ scope->outer_scope_->scope_info_.is_null()
+ ? Lookup<kParsedScope>(proxy, scope->outer_scope_, outer_scope_end,
+ nullptr, force_context_allocation)
+ : Lookup<kDeserializedScope>(proxy, scope->outer_scope_,
+ outer_scope_end, entry_point);
+
+ if (!CanBeShadowed(scope, var)) return var;
+
+ // The current scope is a with scope, so the variable binding can not be
+ // statically resolved. However, note that it was necessary to do a lookup
+ // in the outer scope anyway, because if a binding exists in an outer
+ // scope, the associated variable has to be marked as potentially being
+ // accessed from inside of an inner with scope (the property may not be in
+ // the 'with' object).
+ if (!var->is_dynamic() && var->IsUnallocated()) {
+ DCHECK(!scope->already_resolved_);
+ var->set_is_used();
+ var->ForceContextAllocation();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
}
+ if (entry_point != nullptr) entry_point->variables_.Remove(var);
+ Scope* target = entry_point == nullptr ? scope : entry_point;
+ return target->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+}
+
+Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point,
+ bool force_context_allocation) {
+ DCHECK(scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->calls_sloppy_eval());
+
+ // If we're compiling eval, it's possible that the outer scope is the first
+ // ScopeInfo-backed scope.
+ Scope* entry = entry_point == nullptr ? scope->outer_scope_ : entry_point;
+ Variable* var =
+ scope->outer_scope_->scope_info_.is_null()
+ ? Lookup<kParsedScope>(proxy, scope->outer_scope_, outer_scope_end,
+ nullptr, force_context_allocation)
+ : Lookup<kDeserializedScope>(proxy, scope->outer_scope_,
+ outer_scope_end, entry);
+ if (!CanBeShadowed(scope, var)) return var;
+
+ // A variable binding may have been found in an outer scope, but the current
+ // scope makes a sloppy 'eval' call, so the found variable may not be the
+ // correct one (the 'eval' may introduce a binding with the same name). In
+ // that case, change the lookup result to reflect this situation. Only
+ // scopes that can host var bindings (declaration scopes) need be considered
+ // here (this excludes block and catch scopes), and variable lookups at
+ // script scope are always dynamic.
+ if (var->IsGlobalObjectProperty()) {
+ Scope* target = entry_point == nullptr ? scope : entry_point;
+ return target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
+ }
+
+ if (var->is_dynamic()) return var;
+
+ Variable* invalidated = var;
+ if (entry_point != nullptr) entry_point->variables_.Remove(invalidated);
+
+ Scope* target = entry_point == nullptr ? scope : entry_point;
+ var = target->NonLocal(proxy->raw_name(), VariableMode::kDynamicLocal);
+ var->set_local_if_not_shadowed(invalidated);
return var;
}
@@ -1931,9 +1888,13 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(info->script_scope()->is_script_scope());
DCHECK(!proxy->is_resolved());
- Variable* var = LookupRecursive(info, proxy, nullptr);
+ Variable* var = Lookup<kParsedScope>(proxy, this, nullptr);
if (var == nullptr) {
- DCHECK(proxy->is_private_field());
+ DCHECK(proxy->IsPrivateName());
+ info->pending_error_handler()->ReportMessageAt(
+ proxy->position(), proxy->position() + 1,
+ MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(),
+ kSyntaxError);
return false;
}
ResolveTo(info, proxy, var);
@@ -2041,9 +2002,13 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
DCHECK_EQ(variables_.occupancy(), 0);
for (VariableProxy* proxy : unresolved_list_) {
- Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr);
+ Variable* var = Lookup<kParsedScope>(proxy, outer_scope(), nullptr);
if (var == nullptr) {
- DCHECK(proxy->is_private_field());
+ info->pending_error_handler()->ReportMessageAt(
+ proxy->position(), proxy->position() + 1,
+ MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(),
+ kSyntaxError);
+ DCHECK(proxy->IsPrivateName());
return false;
}
if (!var->is_dynamic()) {
@@ -2068,9 +2033,6 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
}
bool Scope::MustAllocate(Variable* var) {
- if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
- return true;
- }
DCHECK(var->location() != VariableLocation::MODULE);
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
@@ -2143,6 +2105,7 @@ void DeclarationScope::AllocateParameterLocals() {
// order is relevant!
for (int i = num_parameters() - 1; i >= 0; --i) {
Variable* var = params_[i];
+ DCHECK_NOT_NULL(var);
DCHECK(!has_rest_ || var != rest_parameter());
DCHECK_EQ(this, var->scope());
if (has_mapped_arguments) {
@@ -2178,24 +2141,6 @@ void DeclarationScope::AllocateReceiver() {
AllocateParameter(receiver(), -1);
}
-void DeclarationScope::AllocatePromise() {
- if (!has_promise_) return;
- DCHECK_NOT_NULL(promise_var());
- DCHECK_EQ(this, promise_var()->scope());
- AllocateStackSlot(promise_var());
- DCHECK_EQ(VariableLocation::LOCAL, promise_var()->location());
- DCHECK_EQ(kPromiseVarIndex, promise_var()->index());
-}
-
-void DeclarationScope::AllocateGeneratorObject() {
- if (!has_generator_object_) return;
- DCHECK_NOT_NULL(generator_object_var());
- DCHECK_EQ(this, generator_object_var()->scope());
- AllocateStackSlot(generator_object_var());
- DCHECK_EQ(VariableLocation::LOCAL, generator_object_var()->location());
- DCHECK_EQ(kGeneratorObjectVarIndex, generator_object_var()->index());
-}
-
void Scope::AllocateNonParameterLocal(Variable* var) {
DCHECK(var->scope() == this);
if (var->IsUnallocated() && MustAllocate(var)) {
@@ -2257,26 +2202,12 @@ void ModuleScope::AllocateModuleVariables() {
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
- DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return;
}
- // Make sure to allocate the .promise (for async functions) or
- // .generator_object (for async generators) first, so that it
- // get's the required stack slot 0 in case it's needed. See
- // http://bit.ly/v8-zero-cost-async-stack-traces for details.
- if (is_function_scope()) {
- FunctionKind kind = GetClosureScope()->function_kind();
- if (IsAsyncGeneratorFunction(kind)) {
- AsDeclarationScope()->AllocateGeneratorObject();
- } else if (IsAsyncFunction(kind)) {
- AsDeclarationScope()->AllocatePromise();
- }
- }
-
// Allocate variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->AllocateVariablesRecursively();
@@ -2299,8 +2230,10 @@ void Scope::AllocateVariablesRecursively() {
// scope and for a function scope that makes an 'eval' call we need a context,
// even if no local variables were statically allocated in the scope.
// Likewise for modules and function scopes representing asm.js modules.
+ // Also force a context, if the scope is stricter than the outer scope.
bool must_have_context =
is_with_scope() || is_module_scope() || IsAsmModule() ||
+ ForceContextForLanguageMode() ||
(is_function_scope() && AsDeclarationScope()->calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() &&
AsDeclarationScope()->calls_sloppy_eval());
@@ -2365,14 +2298,6 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
}
}
-int Scope::StackLocalCount() const {
- Variable* function =
- is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
- return num_stack_slots() -
- (function != nullptr && function->IsStackLocal() ? 1 : 0);
-}
-
-
int Scope::ContextLocalCount() const {
if (num_heap_slots() == 0) return 0;
Variable* function =
@@ -2383,9 +2308,5 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
-void* const Scope::kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
-void* const Scope::kDummyPreParserLexicalVariable =
- reinterpret_cast<void*>(0x2);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 0b88cc027c..971cfc519b 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -10,6 +10,7 @@
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/pointer-with-payload.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -20,7 +21,8 @@ class AstValueFactory;
class AstRawString;
class Declaration;
class ParseInfo;
-class PreParsedScopeDataBuilder;
+class Parser;
+class PreparseDataBuilder;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@@ -31,17 +33,10 @@ class VariableMap: public ZoneHashMap {
public:
explicit VariableMap(Zone* zone);
- Variable* Declare(
- Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode,
- VariableKind kind = NORMAL_VARIABLE,
- InitializationFlag initialization_flag = kCreatedInitialized,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- bool* added = nullptr);
-
- // Records that "name" exists (if not recorded yet) but doesn't create a
- // Variable. Useful for preparsing.
- Variable* DeclareName(Zone* zone, const AstRawString* name,
- VariableMode mode);
+ Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
+ VariableMode mode, VariableKind kind,
+ InitializationFlag initialization_flag,
+ MaybeAssignedFlag maybe_assigned_flag, bool* was_added);
Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -57,10 +52,12 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
Delegate(Scope* scope, SloppyBlockFunctionStatement* statement, int index)
: scope_(scope), statement_(statement), next_(nullptr), index_(index) {}
void set_statement(Statement* statement);
+
void set_next(Delegate* next) { next_ = next; }
Delegate* next() const { return next_; }
Scope* scope() const { return scope_; }
int index() const { return index_; }
+ int position() const { return statement_->position(); }
private:
Scope* scope_;
@@ -77,6 +74,13 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
int count_;
};
+class Scope;
+
+template <>
+struct PointerWithPayloadTraits<Scope> {
+ static constexpr int value = 1;
+};
+
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@@ -104,6 +108,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
}
#endif
+ typedef base::ThreadedList<VariableProxy, VariableProxy::UnresolvedNext>
+ UnresolvedList;
+
// TODO(verwaest): Is this needed on Scope?
int num_parameters() const;
@@ -114,24 +121,62 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
class Snapshot final {
public:
- explicit Snapshot(Scope* scope);
- ~Snapshot();
+ Snapshot()
+ : outer_scope_and_calls_eval_(nullptr, false),
+ top_unresolved_(),
+ top_local_() {
+ DCHECK(IsCleared());
+ }
+ inline explicit Snapshot(Scope* scope);
+
+ ~Snapshot() {
+ // If we're still active, there was no arrow function. In that case outer
+ // calls eval if it already called eval before this snapshot started, or
+ // if the code during the snapshot called eval.
+ if (!IsCleared() && outer_scope_and_calls_eval_.GetPayload()) {
+ RestoreEvalFlag();
+ }
+ }
- void Reparent(DeclarationScope* new_parent) const;
+ void RestoreEvalFlag() {
+ outer_scope_and_calls_eval_->scope_calls_eval_ =
+ outer_scope_and_calls_eval_.GetPayload();
+ }
+
+ void Reparent(DeclarationScope* new_parent);
+ bool IsCleared() const {
+ return outer_scope_and_calls_eval_.GetPointer() == nullptr;
+ }
+
+ void Clear() {
+ outer_scope_and_calls_eval_.SetPointer(nullptr);
+#ifdef DEBUG
+ outer_scope_and_calls_eval_.SetPayload(false);
+ top_inner_scope_ = nullptr;
+ top_local_ = base::ThreadedList<Variable>::Iterator();
+ top_unresolved_ = UnresolvedList::Iterator();
+#endif
+ }
private:
- Scope* outer_scope_;
+ // During tracking calls_eval caches whether the outer scope called eval.
+ // Upon move assignment we store whether the new inner scope calls eval into
+ // the move target calls_eval bit, and restore calls eval on the outer
+ // scope.
+ PointerWithPayload<Scope, bool, 1> outer_scope_and_calls_eval_;
Scope* top_inner_scope_;
- VariableProxy* top_unresolved_;
+ UnresolvedList::Iterator top_unresolved_;
base::ThreadedList<Variable>::Iterator top_local_;
- base::ThreadedList<Declaration>::Iterator top_decl_;
- const bool outer_scope_calls_eval_;
+
+ // Disallow copy and move.
+ Snapshot(const Snapshot&) = delete;
+ Snapshot(Snapshot&&) = delete;
};
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
- ScopeInfo* scope_info,
+ ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
@@ -141,11 +186,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// tree and its children are reparented.
Scope* FinalizeBlockScope();
- bool HasBeenRemoved() const;
-
- // Find the first scope that hasn't been removed.
- Scope* GetUnremovedScope();
-
// Inserts outer_scope into this scope's scope chain (and removes this
// from the current outer_scope_'s inner scope list).
// Assumes outer_scope_ is non-null.
@@ -153,13 +193,13 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Zone* zone() const { return zone_; }
- void SetMustUsePreParsedScopeData() {
+ void SetMustUsePreparseData() {
if (must_use_preparsed_scope_data_) {
return;
}
must_use_preparsed_scope_data_ = true;
if (outer_scope_) {
- outer_scope_->SetMustUsePreParsedScopeData();
+ outer_scope_->SetMustUsePreparseData();
}
}
@@ -173,32 +213,29 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Lookup a variable in this scope. Returns the variable or nullptr if not
// found.
Variable* LookupLocal(const AstRawString* name) {
- Variable* result = variables_.Lookup(name);
- if (result != nullptr || scope_info_.is_null()) return result;
- return LookupInScopeInfo(name);
+ DCHECK(scope_info_.is_null());
+ return variables_.Lookup(name);
}
- Variable* LookupInScopeInfo(const AstRawString* name);
-
- // Lookup a variable in this scope or outer scopes.
- // Returns the variable or nullptr if not found.
- Variable* Lookup(const AstRawString* name);
+ Variable* LookupInScopeInfo(const AstRawString* name, Scope* cache);
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag = kCreatedInitialized,
- VariableKind kind = NORMAL_VARIABLE,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ VariableKind kind, bool* was_added,
+ InitializationFlag init_flag = kCreatedInitialized);
- Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
- InitializationFlag init,
+ Variable* DeclareVariable(Declaration* declaration, VariableProxy* proxy,
+ VariableMode mode, VariableKind kind,
+ InitializationFlag init, bool* was_added,
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
- // The return value is meaningful only if FLAG_preparser_scope_analysis is on.
- Variable* DeclareVariableName(const AstRawString* name, VariableMode mode);
- void DeclareCatchVariableName(const AstRawString* name);
+ // Returns nullptr if there was a declaration conflict.
+ Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
+ bool* was_added,
+ VariableKind kind = NORMAL_VARIABLE);
+ Variable* DeclareCatchVariableName(const AstRawString* name);
// Declarations list.
base::ThreadedList<Declaration>* declarations() { return &decls_; }
@@ -207,8 +244,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
- const AstRawString* name,
- int start_pos = kNoSourcePosition,
+ const AstRawString* name, int start_pos,
VariableKind kind = NORMAL_VARIABLE) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
@@ -222,14 +258,20 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void AddUnresolved(VariableProxy* proxy);
- // Remove a unresolved variable. During parsing, an unresolved variable
- // may have been added optimistically, but then only the variable name
- // was used (typically for labels). If the variable was not declared, the
- // addition introduced a new unresolved variable which may end up being
- // allocated globally as a "ghost" variable. RemoveUnresolved removes
- // such a variable again if it was added; otherwise this is a no-op.
+ // Removes an unresolved variable from the list so it can be readded to
+ // another list. This is used to reparent parameter initializers that contain
+ // sloppy eval.
bool RemoveUnresolved(VariableProxy* var);
+ // Deletes an unresolved variable. The variable proxy cannot be reused for
+ // another list later. During parsing, an unresolved variable may have been
+ // added optimistically, but then only the variable name was used (typically
+ // for labels and arrow function parameters). If the variable was not
+ // declared, the addition introduced a new unresolved variable which may end
+ // up being allocated globally as a "ghost" variable. DeleteUnresolved removes
+ // such a variable again if it was added; otherwise this is a no-op.
+ void DeleteUnresolved(VariableProxy* var);
+
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
// In particular, the only way to get hold of the temporary is by keeping the
@@ -246,13 +288,13 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
- // Check if the scope has a conflicting lexical declaration that has a name in
- // the given list. This is used to catch patterns like
- // `try{}catch(e){let e;}`,
- // which is an error even though the two 'e's are declared in different
- // scopes.
- Declaration* CheckLexDeclarationsConflictingWith(
- const ZonePtrList<const AstRawString>& names);
+ // Find variable with (variable->mode() <= |mode_limit|) that was declared in
+ // |scope|. This is used to catch patterns like `try{}catch(e){let e;}` and
+ // function([e]) { let e }, which are errors even though the two 'e's are each
+ // time declared in different scopes. Returns the first duplicate variable
+ // name if there is one, nullptr otherwise.
+ const AstRawString* FindVariableDeclaredIn(Scope* scope,
+ VariableMode mode_limit);
// ---------------------------------------------------------------------------
// Scope-specific info.
@@ -267,9 +309,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
inner_scope_calls_eval_ = true;
for (Scope* scope = outer_scope(); scope != nullptr;
scope = scope->outer_scope()) {
- if (scope->inner_scope_calls_eval_) {
- return;
- }
+ if (scope->inner_scope_calls_eval_) return;
scope->inner_scope_calls_eval_ = true;
}
}
@@ -358,12 +398,26 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool ContainsAsmModule() const;
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
+ // Returns if we need to force a context because the current scope is stricter
+ // than the outerscope. We need this to properly track the language mode using
+ // the context. This is required in ICs where we lookup the language mode
+ // from the context.
+ bool ForceContextForLanguageMode() const {
+ // For function scopes we need not force a context since the language mode
+ // can be obtained from the closure. Script scopes always have a context.
+ if (scope_type_ == FUNCTION_SCOPE || scope_type_ == SCRIPT_SCOPE) {
+ return false;
+ }
+ DCHECK_NOT_NULL(outer_scope_);
+ return (language_mode() > outer_scope_->language_mode());
+ }
// Whether this needs to be represented by a runtime context.
bool NeedsContext() const {
// Catch scopes always have heap slots.
DCHECK_IMPLIES(is_catch_scope(), num_heap_slots() > 0);
DCHECK_IMPLIES(is_with_scope(), num_heap_slots() > 0);
+ DCHECK_IMPLIES(ForceContextForLanguageMode(), num_heap_slots() > 0);
return num_heap_slots() > 0;
}
@@ -402,7 +456,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
- int StackLocalCount() const;
int ContextLocalCount() const;
// Determine if we can parse a function literal in this scope lazily without
@@ -477,8 +530,19 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return false;
}
- static void* const kDummyPreParserVariable;
- static void* const kDummyPreParserLexicalVariable;
+ Variable* LookupInScopeOrScopeInfo(const AstRawString* name) {
+ Variable* var = variables_.Lookup(name);
+ if (var != nullptr || scope_info_.is_null()) return var;
+ return LookupInScopeInfo(name, this);
+ }
+
+ Variable* LookupForTesting(const AstRawString* name) {
+ for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
+ Variable* var = scope->LookupInScopeOrScopeInfo(name);
+ if (var != nullptr) return var;
+ }
+ return nullptr;
+ }
protected:
explicit Scope(Zone* zone);
@@ -488,11 +552,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
}
private:
- Variable* Declare(
- Zone* zone, const AstRawString* name, VariableMode mode,
- VariableKind kind = NORMAL_VARIABLE,
- InitializationFlag initialization_flag = kCreatedInitialized,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ Variable* Declare(Zone* zone, const AstRawString* name, VariableMode mode,
+ VariableKind kind, InitializationFlag initialization_flag,
+ MaybeAssignedFlag maybe_assigned_flag, bool* was_added) {
+ Variable* result =
+ variables_.Declare(zone, this, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, was_added);
+ if (*was_added) locals_.Add(result);
+ return result;
+ }
// This method should only be invoked on scopes created during parsing (i.e.,
// not deserialized from a context). Also, since NeedsContext() is only
@@ -504,8 +572,80 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
MaybeAssignedFlag maybe_assigned);
// Walk the scope chain to find DeclarationScopes; call
- // SavePreParsedScopeDataForDeclarationScope for each.
- void SavePreParsedScopeData();
+ // SavePreparseDataForDeclarationScope for each.
+ void SavePreparseData(Parser* parser);
+
+ // Create a non-local variable with a given name.
+ // These variables are looked up dynamically at runtime.
+ Variable* NonLocal(const AstRawString* name, VariableMode mode);
+
+ enum ScopeLookupMode {
+ kParsedScope,
+ kDeserializedScope,
+ };
+
+ // Variable resolution.
+ // Lookup a variable reference given by name starting with this scope, and
+ // stopping when reaching the outer_scope_end scope. If the code is executed
+ // because of a call to 'eval', the context parameter should be set to the
+ // calling context of 'eval'.
+ template <ScopeLookupMode mode>
+ static Variable* Lookup(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point = nullptr,
+ bool force_context_allocation = false);
+ static Variable* LookupWith(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point,
+ bool force_context_allocation);
+ static Variable* LookupSloppyEval(VariableProxy* proxy, Scope* scope,
+ Scope* outer_scope_end, Scope* entry_point,
+ bool force_context_allocation);
+ void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
+ V8_WARN_UNUSED_RESULT bool ResolveVariable(ParseInfo* info,
+ VariableProxy* proxy);
+ V8_WARN_UNUSED_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
+
+ // Finds free variables of this scope. This mutates the unresolved variables
+ // list along the way, so full resolution cannot be done afterwards.
+ void AnalyzePartially(DeclarationScope* max_outer_scope,
+ AstNodeFactory* ast_node_factory,
+ UnresolvedList* new_unresolved_list);
+ void CollectNonLocals(DeclarationScope* max_outer_scope, Isolate* isolate,
+ ParseInfo* info, Handle<StringSet>* non_locals);
+
+ // Predicates.
+ bool MustAllocate(Variable* var);
+ bool MustAllocateInContext(Variable* var);
+
+ // Variable allocation.
+ void AllocateStackSlot(Variable* var);
+ void AllocateHeapSlot(Variable* var);
+ void AllocateNonParameterLocal(Variable* var);
+ void AllocateDeclaredGlobal(Variable* var);
+ void AllocateNonParameterLocalsAndDeclaredGlobals();
+ void AllocateVariablesRecursively();
+
+ void AllocateScopeInfosRecursively(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope);
+ void AllocateDebuggerScopeInfos(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope);
+
+ // Construct a scope based on the scope info.
+ Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
+
+ // Construct a catch scope with a binding for the name.
+ Scope(Zone* zone, const AstRawString* catch_variable_name,
+ MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info);
+
+ void AddInnerScope(Scope* inner_scope) {
+ inner_scope->sibling_ = inner_scope_;
+ inner_scope_ = inner_scope;
+ inner_scope->outer_scope_ = this;
+ }
+
+ void SetDefaults();
+
+ friend class DeclarationScope;
+ friend class ScopeTestHelper;
Zone* zone_;
@@ -525,7 +665,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
base::ThreadedList<Variable> locals_;
// Unresolved variables referred to from this scope. The proxies themselves
// form a linked list of all unresolved proxies.
- base::ThreadedList<VariableProxy> unresolved_list_;
+ UnresolvedList unresolved_list_;
// Declarations.
base::ThreadedList<Declaration> decls_;
@@ -577,65 +717,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_declaration_scope_ : 1;
bool must_use_preparsed_scope_data_ : 1;
-
- // Create a non-local variable with a given name.
- // These variables are looked up dynamically at runtime.
- Variable* NonLocal(const AstRawString* name, VariableMode mode);
-
- // Variable resolution.
- // Lookup a variable reference given by name recursively starting with this
- // scope, and stopping when reaching the outer_scope_end scope. If the code is
- // executed because of a call to 'eval', the context parameter should be set
- // to the calling context of 'eval'.
- Variable* LookupRecursive(ParseInfo* info, VariableProxy* proxy,
- Scope* outer_scope_end);
- void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
- V8_WARN_UNUSED_RESULT bool ResolveVariable(ParseInfo* info,
- VariableProxy* proxy);
- V8_WARN_UNUSED_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
-
- // Finds free variables of this scope. This mutates the unresolved variables
- // list along the way, so full resolution cannot be done afterwards.
- // If a ParseInfo* is passed, non-free variables will be resolved.
- template <typename T>
- void ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
- T variable_proxy_stackvisitor,
- ParseInfo* info = nullptr);
-
- // Predicates.
- bool MustAllocate(Variable* var);
- bool MustAllocateInContext(Variable* var);
-
- // Variable allocation.
- void AllocateStackSlot(Variable* var);
- void AllocateHeapSlot(Variable* var);
- void AllocateNonParameterLocal(Variable* var);
- void AllocateDeclaredGlobal(Variable* var);
- void AllocateNonParameterLocalsAndDeclaredGlobals();
- void AllocateVariablesRecursively();
-
- void AllocateScopeInfosRecursively(Isolate* isolate,
- MaybeHandle<ScopeInfo> outer_scope);
- void AllocateDebuggerScopeInfos(Isolate* isolate,
- MaybeHandle<ScopeInfo> outer_scope);
-
- // Construct a scope based on the scope info.
- Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
-
- // Construct a catch scope with a binding for the name.
- Scope(Zone* zone, const AstRawString* catch_variable_name,
- MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info);
-
- void AddInnerScope(Scope* inner_scope) {
- inner_scope->sibling_ = inner_scope_;
- inner_scope_ = inner_scope;
- inner_scope->outer_scope_ = this;
- }
-
- void SetDefaults();
-
- friend class DeclarationScope;
- friend class ScopeTestHelper;
};
class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
@@ -647,8 +728,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Creates a script scope.
DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory);
- bool IsDeclaredParameter(const AstRawString* name);
-
FunctionKind function_kind() const { return function_kind_; }
bool is_arrow_scope() const {
@@ -672,11 +751,21 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
}
bool calls_sloppy_eval() const {
- return scope_calls_eval_ && is_sloppy(language_mode());
+ // TODO(delphick): Calculate this when setting and change the name of
+ // scope_calls_eval_.
+ return !is_script_scope() && scope_calls_eval_ &&
+ is_sloppy(language_mode());
}
bool was_lazily_parsed() const { return was_lazily_parsed_; }
+ Variable* LookupInModule(const AstRawString* name) {
+ DCHECK(is_module_scope());
+ Variable* var = variables_.Lookup(name);
+ DCHECK_NOT_NULL(var);
+ return var;
+ }
+
#ifdef DEBUG
void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
is_being_lazily_parsed_ = is_being_lazily_parsed;
@@ -699,11 +788,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
scope_info_ = scope_info;
}
- bool asm_module() const { return asm_module_; }
- void set_asm_module();
+ bool is_asm_module() const { return is_asm_module_; }
+ void set_is_asm_module();
bool should_ban_arguments() const {
- return IsClassFieldsInitializerFunction(function_kind());
+ return IsClassMembersInitializerFunction(function_kind());
}
void DeclareThis(AstValueFactory* ast_value_factory);
@@ -718,32 +807,29 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// declared in the scope. It will add a variable for {name} to {variables_};
// either the function variable itself, or a non-local in case the function
// calls sloppy eval.
- Variable* DeclareFunctionVar(const AstRawString* name);
+ Variable* DeclareFunctionVar(const AstRawString* name,
+ Scope* cache = nullptr);
// Declare some special internal variables which must be accessible to
// Ignition without ScopeInfo.
Variable* DeclareGeneratorObjectVar(const AstRawString* name);
- Variable* DeclarePromiseVar(const AstRawString* name);
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
- bool is_optional, bool is_rest, bool* is_duplicate,
+ bool is_optional, bool is_rest,
AstValueFactory* ast_value_factory, int position);
- // Declares that a parameter with the name exists. Creates a Variable and
- // returns it if FLAG_preparser_scope_analysis is on.
- Variable* DeclareParameterName(const AstRawString* name, bool is_rest,
- AstValueFactory* ast_value_factory,
- bool declare_local, bool add_parameter);
+ // Makes sure that num_parameters_ and has_rest is correct for the preparser.
+ void RecordParameter(bool is_rest);
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
// with statements or eval calls.
Variable* DeclareDynamicGlobal(const AstRawString* name,
- VariableKind variable_kind);
+ VariableKind variable_kind, Scope* cache);
// The variable corresponding to the 'this' value.
Variable* receiver() {
@@ -774,30 +860,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return GetRareVariable(RareVariable::kGeneratorObject);
}
- // For async generators, the .generator_object variable is always
- // allocated to a fixed stack slot, such that the stack trace
- // construction logic can access it.
- static constexpr int kGeneratorObjectVarIndex = 0;
-
- // The variable holding the promise returned from async functions.
- // Only valid for function scopes in async functions (i.e. not
- // for async generators).
- Variable* promise_var() const {
- DCHECK(is_function_scope());
- DCHECK(IsAsyncFunction(function_kind_));
- if (IsAsyncGeneratorFunction(function_kind_)) return nullptr;
- return GetRareVariable(RareVariable::kPromise);
- }
-
- // For async functions, the .promise variable is always allocated
- // to a fixed stack slot, such that the stack trace construction
- // logic can access it.
- static constexpr int kPromiseVarIndex = 0;
-
// Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes.
Variable* parameter(int index) const {
DCHECK(is_function_scope() || is_module_scope());
+ DCHECK(!is_being_lazily_parsed_);
return params_[index];
}
@@ -806,9 +873,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// function foo(a, b) {} ==> 2
// function foo(a, b, ...c) {} ==> 2
// function foo(a, b, c = 1) {} ==> 3
- int num_parameters() const {
- return has_rest_ ? params_.length() - 1 : params_.length();
- }
+ int num_parameters() const { return num_parameters_; }
// The function's rest parameter (nullptr if there is none).
Variable* rest_parameter() const {
@@ -827,6 +892,15 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
has_simple_parameters_ = false;
}
+ void MakeParametersNonSimple() {
+ SetHasNonSimpleParameters();
+ for (ZoneHashMap::Entry* p = variables_.Start(); p != nullptr;
+ p = variables_.Next(p)) {
+ Variable* var = reinterpret_cast<Variable*>(p->value);
+ if (var->is_parameter()) var->MakeParameterNonSimple();
+ }
+ }
+
// Returns whether the arguments object aliases formal parameters.
CreateArgumentsType GetArgumentsType() const {
DCHECK(is_function_scope());
@@ -872,15 +946,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return sloppy_block_function_map_;
}
- // Replaces the outer scope with the outer_scope_info in |info| if there is
- // one.
- void AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate);
-
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
//
- // Returns false if private fields can not be resolved and
+ // Returns false if private names can not be resolved and
// ParseInfo's pending_error_handler will be populated with an
// error. Otherwise, returns true.
V8_WARN_UNUSED_RESULT
@@ -891,7 +961,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// this records variables which cannot be resolved inside the Scope (we don't
// yet know what they will resolve to since the outer Scopes are incomplete)
// and recreates them with the correct Zone with ast_node_factory.
- void AnalyzePartially(AstNodeFactory* ast_node_factory);
+ void AnalyzePartially(Parser* parser, AstNodeFactory* ast_node_factory);
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
@@ -921,8 +991,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void AllocateLocals();
void AllocateParameterLocals();
void AllocateReceiver();
- void AllocatePromise();
- void AllocateGeneratorObject();
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
@@ -941,16 +1009,15 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Save data describing the context allocation of the variables in this scope
// and its subscopes (except scopes at the laziness boundary). The data is
- // saved in produced_preparsed_scope_data_.
- void SavePreParsedScopeDataForDeclarationScope();
+ // saved in produced_preparse_data_.
+ void SavePreparseDataForDeclarationScope(Parser* parser);
- void set_preparsed_scope_data_builder(
- PreParsedScopeDataBuilder* preparsed_scope_data_builder) {
- preparsed_scope_data_builder_ = preparsed_scope_data_builder;
+ void set_preparse_data_builder(PreparseDataBuilder* preparse_data_builder) {
+ preparse_data_builder_ = preparse_data_builder;
}
- PreParsedScopeDataBuilder* preparsed_scope_data_builder() const {
- return preparsed_scope_data_builder_;
+ PreparseDataBuilder* preparse_data_builder() const {
+ return preparse_data_builder_;
}
private:
@@ -965,24 +1032,17 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
//
- // Returns false if private fields can not be resolved.
+ // Returns false if private names can not be resolved.
bool AllocateVariables(ParseInfo* info);
void SetDefaults();
- // If the scope is a function scope, this is the function kind.
- const FunctionKind function_kind_;
-
bool has_simple_parameters_ : 1;
// This scope contains an "use asm" annotation.
- bool asm_module_ : 1;
+ bool is_asm_module_ : 1;
bool force_eager_compilation_ : 1;
// This function scope has a rest parameter.
bool has_rest_ : 1;
- // This function scope has a .promise variable.
- bool has_promise_ : 1;
- // This function scope has a .generator_object variable.
- bool has_generator_object_ : 1;
// This scope has a parameter called "arguments".
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
@@ -996,6 +1056,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool is_skipped_function_ : 1;
bool has_inferred_function_name_ : 1;
+ int num_parameters_ = 0;
+
+ // If the scope is a function scope, this is the function kind.
+ const FunctionKind function_kind_;
+
// Parameter list in source order.
ZonePtrList<Variable> params_;
// Map of function names to lists of functions defined in sloppy blocks
@@ -1010,7 +1075,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* arguments_;
// For producing the scope allocation data during preparsing.
- PreParsedScopeDataBuilder* preparsed_scope_data_builder_;
+ PreparseDataBuilder* preparse_data_builder_;
struct RareData : public ZoneObject {
// Convenience variable; Subclass constructor only
@@ -1019,14 +1084,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Generator object, if any; generator function scopes and module scopes
// only.
Variable* generator_object = nullptr;
- // Promise, if any; async function scopes only.
- Variable* promise = nullptr;
};
enum class RareVariable {
kThisFunction = offsetof(RareData, this_function),
kGeneratorObject = offsetof(RareData, generator_object),
- kPromise = offsetof(RareData, promise)
};
V8_INLINE RareData* EnsureRareData() {
@@ -1055,6 +1117,15 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
RareData* rare_data_ = nullptr;
};
+Scope::Snapshot::Snapshot(Scope* scope)
+ : outer_scope_and_calls_eval_(scope, scope->scope_calls_eval_),
+ top_inner_scope_(scope->inner_scope_),
+ top_unresolved_(scope->unresolved_list_.end()),
+ top_local_(scope->GetClosureScope()->locals_.end()) {
+ // Reset in order to record eval calls during this Snapshot's lifetime.
+ outer_scope_and_calls_eval_.GetPointer()->scope_calls_eval_ = false;
+}
+
class ModuleScope final : public DeclarationScope {
public:
ModuleScope(DeclarationScope* script_scope,
diff --git a/deps/v8/src/ast/source-range-ast-visitor.cc b/deps/v8/src/ast/source-range-ast-visitor.cc
index f3a3dbcd9b..442b23718c 100644
--- a/deps/v8/src/ast/source-range-ast-visitor.cc
+++ b/deps/v8/src/ast/source-range-ast-visitor.cc
@@ -53,21 +53,10 @@ bool SourceRangeAstVisitor::VisitNode(AstNode* node) {
void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange(
ZonePtrList<Statement>* statements) {
- if (statements == nullptr || statements->is_empty()) return;
+ if (statements->is_empty()) return;
Statement* last_statement = statements->last();
- AstNodeSourceRanges* last_range = nullptr;
-
- if (last_statement->IsExpressionStatement() &&
- last_statement->AsExpressionStatement()->expression()->IsThrow()) {
- // For ThrowStatement, source range is tied to Throw expression not
- // ExpressionStatement.
- last_range = source_range_map_->Find(
- last_statement->AsExpressionStatement()->expression());
- } else {
- last_range = source_range_map_->Find(last_statement);
- }
-
+ AstNodeSourceRanges* last_range = source_range_map_->Find(last_statement);
if (last_range == nullptr) return;
if (last_range->HasRange(SourceRangeKind::kContinuation)) {
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index d33062973b..13a444536d 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -131,12 +131,13 @@ class Variable final : public ZoneObject {
return kind() != SLOPPY_FUNCTION_NAME_VARIABLE || is_strict(language_mode);
}
- bool is_function() const { return kind() == FUNCTION_VARIABLE; }
bool is_this() const { return kind() == THIS_VARIABLE; }
bool is_sloppy_function_name() const {
return kind() == SLOPPY_FUNCTION_NAME_VARIABLE;
}
+ bool is_parameter() const { return kind() == PARAMETER_VARIABLE; }
+
Variable* local_if_not_shadowed() const {
DCHECK(mode() == VariableMode::kDynamicLocal &&
local_if_not_shadowed_ != nullptr);
@@ -175,6 +176,13 @@ class Variable final : public ZoneObject {
index_ = index;
}
+ void MakeParameterNonSimple() {
+ DCHECK(is_parameter());
+ bit_field_ = VariableModeField::update(bit_field_, VariableMode::kLet);
+ bit_field_ =
+ InitializationFlagField::update(bit_field_, kNeedsInitialization);
+ }
+
static InitializationFlag DefaultInitializationFlag(VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
return mode == VariableMode::kVar ? kCreatedInitialized
@@ -199,7 +207,7 @@ class Variable final : public ZoneObject {
class VariableModeField : public BitField16<VariableMode, 0, 3> {};
class VariableKindField
- : public BitField16<VariableKind, VariableModeField::kNext, 3> {};
+ : public BitField16<VariableKind, VariableModeField::kNext, 2> {};
class LocationField
: public BitField16<VariableLocation, VariableKindField::kNext, 3> {};
class ForceContextAllocationField
diff --git a/deps/v8/src/async-hooks-wrapper.cc b/deps/v8/src/async-hooks-wrapper.cc
index fd724af9c9..bff7965171 100644
--- a/deps/v8/src/async-hooks-wrapper.cc
+++ b/deps/v8/src/async-hooks-wrapper.cc
@@ -142,8 +142,13 @@ void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Integer> async_id =
Integer::New(hooks->isolate_, hooks->current_async_id);
+ CHECK(!promise
+ ->HasPrivate(currentContext,
+ hooks->async_id_smb.Get(hooks->isolate_))
+ .ToChecked());
promise->SetPrivate(currentContext,
hooks->async_id_smb.Get(hooks->isolate_), async_id);
+
if (parent->IsPromise()) {
Local<Promise> parent_promise = parent.As<Promise>();
Local<Value> parent_async_id =
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 78f5665a38..a5f14c611e 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -10,93 +10,86 @@
namespace v8 {
namespace internal {
-#define ABORT_MESSAGES_LIST(V) \
- V(kNoReason, "no reason") \
- \
- V(k32BitValueInRegisterIsNotZeroExtended, \
- "32 bit value in register is not zero-extended") \
- V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
- V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
- V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
- V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
- V(kExpectedAllocationSite, "Expected allocation site") \
- V(kExpectedFeedbackVector, "Expected feedback vector") \
- V(kExpectedOptimizationSentinel, \
- "Expected optimized code cell or optimization sentinel") \
- V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
- "The function_data field should be a BytecodeArray on interpreter entry") \
- V(kInputStringTooLong, "Input string too long") \
- V(kInvalidBytecode, "Invalid bytecode") \
- V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
- V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
- "Invalid ElementsKind for InternalArray or InternalPackedArray") \
- V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
- V(kInvalidJumpTableIndex, "Invalid jump table index") \
- V(kInvalidParametersAndRegistersInGenerator, \
- "invalid parameters and registers in generator") \
- V(kInvalidSharedFunctionInfoData, "Invalid SharedFunctionInfo data") \
- V(kMissingBytecodeArray, "Missing bytecode array from function") \
- V(kObjectNotTagged, "The object is not tagged") \
- V(kObjectTagged, "The object is tagged") \
- V(kOffsetOutOfRange, "Offset out of range") \
- V(kOperandIsASmi, "Operand is a smi") \
- V(kOperandIsASmiAndNotABoundFunction, \
- "Operand is a smi and not a bound function") \
- V(kOperandIsASmiAndNotAConstructor, \
- "Operand is a smi and not a constructor") \
- V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
- V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
- V(kOperandIsASmiAndNotAGeneratorObject, \
- "Operand is a smi and not a generator object") \
- V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
- V(kOperandIsNotAConstructor, "Operand is not a constructor") \
- V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
- V(kOperandIsNotAFunction, "Operand is not a function") \
- V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
- V(kOperandIsNotASmi, "Operand is not a smi") \
- V(kReceivedInvalidReturnAddress, "Received invalid return address") \
- V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kRegisterWasClobbered, "Register was clobbered") \
- V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function") \
- V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
- V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kTheStackWasCorruptedByMacroAssemblerCall, \
- "The stack was corrupted by MacroAssembler::Call()") \
- V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUnexpectedElementsKindInArrayConstructor, \
- "Unexpected ElementsKind in array constructor") \
- V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
- V(kUnexpectedFunctionIDForInvokeIntrinsic, \
- "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
- V(kUnexpectedInitialMapForArrayFunction, \
- "Unexpected initial map for Array function") \
- V(kUnexpectedInitialMapForArrayFunction1, \
- "Unexpected initial map for Array function (1)") \
- V(kUnexpectedInitialMapForArrayFunction2, \
- "Unexpected initial map for Array function (2)") \
- V(kUnexpectedInitialMapForInternalArrayFunction, \
- "Unexpected initial map for InternalArray function") \
- V(kUnexpectedLevelAfterReturnFromApiCall, \
- "Unexpected level after return from api call") \
- V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedReturnFromFrameDropper, \
- "Unexpectedly returned from dropping frames") \
- V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
- V(kUnexpectedReturnFromWasmTrap, \
- "Should not return after throwing a wasm trap") \
- V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
- V(kUnexpectedValue, "Unexpected value") \
- V(kUnsupportedModuleOperation, "Unsupported module operation") \
- V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
- V(kWrongAddressOrValuePassedToRecordWrite, \
- "Wrong address or value passed to RecordWrite") \
- V(kWrongArgumentCountForInvokeIntrinsic, \
- "Wrong number of arguments for intrinsic") \
- V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
- V(kWrongFunctionContext, "Wrong context passed to function")
+#define ABORT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(k32BitValueInRegisterIsNotZeroExtended, \
+ "32 bit value in register is not zero-extended") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
+ V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
+ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
+ V(kExpectedOptimizationSentinel, \
+ "Expected optimized code cell or optimization sentinel") \
+ V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
+ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
+ "The function_data field should be a BytecodeArray on interpreter entry") \
+ V(kInputStringTooLong, "Input string too long") \
+ V(kInvalidBytecode, "Invalid bytecode") \
+ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
+ V(kInvalidElementsKindForInternalPackedArray, \
+ "Invalid ElementsKind for InternalPackedArray") \
+ V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidJumpTableIndex, "Invalid jump table index") \
+ V(kInvalidParametersAndRegistersInGenerator, \
+ "invalid parameters and registers in generator") \
+ V(kMissingBytecodeArray, "Missing bytecode array from function") \
+ V(kObjectNotTagged, "The object is not tagged") \
+ V(kObjectTagged, "The object is tagged") \
+ V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsASmiAndNotABoundFunction, \
+ "Operand is a smi and not a bound function") \
+ V(kOperandIsASmiAndNotAConstructor, \
+ "Operand is a smi and not a constructor") \
+ V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
+ V(kOperandIsASmiAndNotAGeneratorObject, \
+ "Operand is a smi and not a generator object") \
+ V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
+ V(kOperandIsNotAConstructor, "Operand is not a constructor") \
+ V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
+ V(kOperandIsNotAFunction, "Operand is not a function") \
+ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
+ V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
+ V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
+ V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kShouldNotDirectlyEnterOsrFunction, \
+ "Should not directly enter OSR-compiled function") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
+ V(kStackFrameTypesMustMatch, "Stack frame types must match") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
+ V(kUnexpectedElementsKindInArrayConstructor, \
+ "Unexpected ElementsKind in array constructor") \
+ V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
+ V(kUnexpectedFunctionIDForInvokeIntrinsic, \
+ "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
+ V(kUnexpectedInitialMapForInternalArrayFunction, \
+ "Unexpected initial map for InternalArray function") \
+ V(kUnexpectedLevelAfterReturnFromApiCall, \
+ "Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
+ V(kUnexpectedReturnFromFrameDropper, \
+ "Unexpectedly returned from dropping frames") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnexpectedReturnFromWasmTrap, \
+ "Should not return after throwing a wasm trap") \
+ V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
+ V(kUnexpectedValue, "Unexpected value") \
+ V(kUnsupportedModuleOperation, "Unsupported module operation") \
+ V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
+ V(kWrongAddressOrValuePassedToRecordWrite, \
+ "Wrong address or value passed to RecordWrite") \
+ V(kWrongArgumentCountForInvokeIntrinsic, \
+ "Wrong number of arguments for intrinsic") \
+ V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
+ V(kWrongFunctionContext, "Wrong context passed to function") \
+ V(kWrongNumberOfArgumentsForInternalPackedArray, \
+ "Wrong number of arguments for InternalPackedArray") \
+ V(kUnexpectedThreadInWasmSet, "thread_in_wasm flag was already set") \
+ V(kUnexpectedThreadInWasmUnset, "thread_in_wasm flag was not set")
#define BAILOUT_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
@@ -113,7 +106,7 @@ namespace internal {
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kOptimizationDisabled, "Optimization disabled") \
- V(kOptimizationDisabledForTest, "Optimization disabled for test")
+ V(kNeverOptimize, "Optimization is always disabled")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum class BailoutReason : uint8_t {
diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h
index 6eeaed140b..92c500085d 100644
--- a/deps/v8/src/base/adapters.h
+++ b/deps/v8/src/base/adapters.h
@@ -23,7 +23,7 @@ class ReversedAdapter {
std::reverse_iterator<decltype(std::begin(std::declval<T>()))>;
explicit ReversedAdapter(T& t) : t_(t) {}
- ReversedAdapter(const ReversedAdapter& ra) = default;
+ ReversedAdapter(const ReversedAdapter& ra) V8_NOEXCEPT = default;
// TODO(clemensh): Use std::rbegin/std::rend once we have C++14 support.
Iterator begin() const { return Iterator(std::end(t_)); }
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 90681b8a35..2c6fd23b71 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -28,29 +28,6 @@ class AtomicValue {
return cast_helper<T>::to_return_type(base::Acquire_Load(&value_));
}
- V8_INLINE bool TrySetValue(T old_value, T new_value) {
- return base::Release_CompareAndSwap(
- &value_, cast_helper<T>::to_storage_type(old_value),
- cast_helper<T>::to_storage_type(new_value)) ==
- cast_helper<T>::to_storage_type(old_value);
- }
-
- V8_INLINE void SetBits(T bits, T mask) {
- DCHECK_EQ(bits & ~mask, static_cast<T>(0));
- T old_value;
- T new_value;
- do {
- old_value = Value();
- new_value = (old_value & ~mask) | bits;
- } while (!TrySetValue(old_value, new_value));
- }
-
- V8_INLINE void SetBit(int bit) {
- SetBits(static_cast<T>(1) << bit, static_cast<T>(1) << bit);
- }
-
- V8_INLINE void ClearBit(int bit) { SetBits(0, 1 << bit); }
-
V8_INLINE void SetValue(T new_value) {
base::Release_Store(&value_, cast_helper<T>::to_storage_type(new_value));
}
@@ -81,122 +58,57 @@ class AtomicValue {
base::AtomicWord value_;
};
-class AsAtomic32 {
+// Provides atomic operations for a values stored at some address.
+template <typename TAtomicStorageType>
+class AsAtomicImpl {
public:
- template <typename T>
- static T Acquire_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
- }
+ using AtomicStorageType = TAtomicStorageType;
template <typename T>
- static T Relaxed_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
- }
-
- template <typename T>
- static void Release_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
-
- template <typename T>
- static void Relaxed_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
-
- template <typename T>
- static T Release_CompareAndSwap(
- T* addr, typename std::remove_reference<T>::type old_value,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- return to_return_type<T>(base::Release_CompareAndSwap(
- to_storage_addr(addr), to_storage_type(old_value),
- to_storage_type(new_value)));
- }
-
- // Atomically sets bits selected by the mask to the given value.
- // Returns false if the bits are already set as needed.
- template <typename T>
- static bool SetBits(T* addr, T bits, T mask) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
- DCHECK_EQ(bits & ~mask, static_cast<T>(0));
- T old_value;
- T new_value;
- do {
- old_value = Relaxed_Load(addr);
- if ((old_value & mask) == bits) return false;
- new_value = (old_value & ~mask) | bits;
- } while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
- return true;
- }
-
- private:
- template <typename T>
- static base::Atomic32 to_storage_type(T value) {
- return static_cast<base::Atomic32>(value);
- }
- template <typename T>
- static T to_return_type(base::Atomic32 value) {
- return static_cast<T>(value);
- }
- template <typename T>
- static base::Atomic32* to_storage_addr(T* value) {
- return reinterpret_cast<base::Atomic32*>(value);
- }
- template <typename T>
- static const base::Atomic32* to_storage_addr(const T* value) {
- return reinterpret_cast<const base::Atomic32*>(value);
- }
-};
-
-class AsAtomicWord {
- public:
- template <typename T>
static T Acquire_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return cast_helper<T>::to_return_type(
+ base::Acquire_Load(to_storage_addr(addr)));
}
template <typename T>
static T Relaxed_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return cast_helper<T>::to_return_type(
+ base::Relaxed_Load(to_storage_addr(addr)));
}
template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ base::Release_Store(to_storage_addr(addr),
+ cast_helper<T>::to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ base::Relaxed_Store(to_storage_addr(addr),
+ cast_helper<T>::to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Release_CompareAndSwap(
- to_storage_addr(addr), to_storage_type(old_value),
- to_storage_type(new_value)));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return cast_helper<T>::to_return_type(base::Release_CompareAndSwap(
+ to_storage_addr(addr), cast_helper<T>::to_storage_type(old_value),
+ cast_helper<T>::to_storage_type(new_value)));
}
// Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed.
template <typename T>
static bool SetBits(T* addr, T bits, T mask) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
@@ -209,173 +121,50 @@ class AsAtomicWord {
}
private:
- template <typename T>
- static base::AtomicWord to_storage_type(T value) {
- return static_cast<base::AtomicWord>(value);
- }
- template <typename T>
- static T to_return_type(base::AtomicWord value) {
- return static_cast<T>(value);
- }
- template <typename T>
- static base::AtomicWord* to_storage_addr(T* value) {
- return reinterpret_cast<base::AtomicWord*>(value);
- }
- template <typename T>
- static const base::AtomicWord* to_storage_addr(const T* value) {
- return reinterpret_cast<const base::AtomicWord*>(value);
- }
-};
-
-class AsAtomic8 {
- public:
- template <typename T>
- static T Acquire_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
- return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
- }
-
- template <typename T>
- static T Relaxed_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
- return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
- }
-
- template <typename T>
- static void Release_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
- base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
-
- template <typename T>
- static void Relaxed_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
- base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
+ template <typename U>
+ struct cast_helper {
+ static AtomicStorageType to_storage_type(U value) {
+ return static_cast<AtomicStorageType>(value);
+ }
+ static U to_return_type(AtomicStorageType value) {
+ return static_cast<U>(value);
+ }
+ };
- template <typename T>
- static T Release_CompareAndSwap(
- T* addr, typename std::remove_reference<T>::type old_value,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
- return to_return_type<T>(base::Release_CompareAndSwap(
- to_storage_addr(addr), to_storage_type(old_value),
- to_storage_type(new_value)));
- }
+ template <typename U>
+ struct cast_helper<U*> {
+ static AtomicStorageType to_storage_type(U* value) {
+ return reinterpret_cast<AtomicStorageType>(value);
+ }
+ static U* to_return_type(AtomicStorageType value) {
+ return reinterpret_cast<U*>(value);
+ }
+ };
- private:
template <typename T>
- static base::Atomic8 to_storage_type(T value) {
- return static_cast<base::Atomic8>(value);
+ static AtomicStorageType* to_storage_addr(T* value) {
+ return reinterpret_cast<AtomicStorageType*>(value);
}
template <typename T>
- static T to_return_type(base::Atomic8 value) {
- return static_cast<T>(value);
- }
- template <typename T>
- static base::Atomic8* to_storage_addr(T* value) {
- return reinterpret_cast<base::Atomic8*>(value);
- }
- template <typename T>
- static const base::Atomic8* to_storage_addr(const T* value) {
- return reinterpret_cast<const base::Atomic8*>(value);
+ static const AtomicStorageType* to_storage_addr(const T* value) {
+ return reinterpret_cast<const AtomicStorageType*>(value);
}
};
-class AsAtomicPointer {
- public:
- template <typename T>
- static T Acquire_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
- }
-
- template <typename T>
- static T Relaxed_Load(T* addr) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
- }
-
- template <typename T>
- static void Release_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
-
- template <typename T>
- static void Relaxed_Store(T* addr,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
- }
+using AsAtomic8 = AsAtomicImpl<base::Atomic8>;
+using AsAtomic32 = AsAtomicImpl<base::Atomic32>;
+using AsAtomicWord = AsAtomicImpl<base::AtomicWord>;
+// This is similar to AsAtomicWord but it explicitly deletes functionality
+// provided atomic access to bit representation of stored values.
+template <typename TAtomicStorageType>
+class AsAtomicPointerImpl : public AsAtomicImpl<TAtomicStorageType> {
+ public:
template <typename T>
- static T Release_CompareAndSwap(
- T* addr, typename std::remove_reference<T>::type old_value,
- typename std::remove_reference<T>::type new_value) {
- STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
- return to_return_type<T>(base::Release_CompareAndSwap(
- to_storage_addr(addr), to_storage_type(old_value),
- to_storage_type(new_value)));
- }
-
- private:
- template <typename T>
- static base::AtomicWord to_storage_type(T value) {
- return reinterpret_cast<base::AtomicWord>(value);
- }
- template <typename T>
- static T to_return_type(base::AtomicWord value) {
- return reinterpret_cast<T>(value);
- }
- template <typename T>
- static base::AtomicWord* to_storage_addr(T* value) {
- return reinterpret_cast<base::AtomicWord*>(value);
- }
- template <typename T>
- static const base::AtomicWord* to_storage_addr(const T* value) {
- return reinterpret_cast<const base::AtomicWord*>(value);
- }
+ static bool SetBits(T* addr, T bits, T mask) = delete;
};
-// This class is intended to be used as a wrapper for elements of an array
-// that is passed in to STL functions such as std::sort. It ensures that
-// elements accesses are atomic.
-// Usage example:
-// Object** given_array;
-// AtomicElement<Object*>* wrapped =
-// reinterpret_cast<AtomicElement<Object*>(given_array);
-// std::sort(wrapped, wrapped + given_length, cmp);
-// where the cmp function uses the value() accessor to compare the elements.
-template <typename T>
-class AtomicElement {
- public:
- AtomicElement(const AtomicElement<T>& other) {
- AsAtomicPointer::Relaxed_Store(
- &value_, AsAtomicPointer::Relaxed_Load(&other.value_));
- }
-
- void operator=(const AtomicElement<T>& other) {
- AsAtomicPointer::Relaxed_Store(
- &value_, AsAtomicPointer::Relaxed_Load(&other.value_));
- }
-
- T value() const { return AsAtomicPointer::Relaxed_Load(&value_); }
-
- bool operator<(const AtomicElement<T>& other) const {
- return value() < other.value();
- }
-
- bool operator==(const AtomicElement<T>& other) const {
- return value() == other.value();
- }
-
- private:
- T value_;
-};
+using AsAtomicPointer = AsAtomicPointerImpl<base::AtomicWord>;
template <typename T,
typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index c4c28f70c5..4c7a5ebe0f 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -40,6 +40,7 @@ namespace v8 {
namespace base {
typedef char Atomic8;
+typedef int16_t Atomic16;
typedef int32_t Atomic32;
#if defined(V8_HOST_ARCH_64_BIT)
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
@@ -65,6 +66,8 @@ typedef intptr_t AtomicWord;
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
+Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value,
+ Atomic16 new_value);
Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
Atomic32 new_value);
@@ -97,10 +100,12 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
+void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value);
void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
+Atomic16 Relaxed_Load(volatile const Atomic16* ptr);
Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index bd79558313..2e8ee1d41b 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -50,6 +50,13 @@ inline void SeqCst_MemoryFence() {
#endif
}
+inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
+ Atomic16 old_value, Atomic16 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return old_value;
+}
+
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
@@ -98,6 +105,10 @@ inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
+inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
@@ -110,6 +121,10 @@ inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
+inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h
index 4ce7b461e0..6c3d842449 100644
--- a/deps/v8/src/base/atomicops_internals_std.h
+++ b/deps/v8/src/base/atomicops_internals_std.h
@@ -28,6 +28,14 @@ inline void SeqCst_MemoryFence() {
std::atomic_thread_fence(std::memory_order_seq_cst);
}
+inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
+ Atomic16 old_value, Atomic16 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
std::atomic_compare_exchange_strong_explicit(
@@ -86,6 +94,11 @@ inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_relaxed);
}
+inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
@@ -101,6 +114,11 @@ inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
std::memory_order_relaxed);
}
+inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
index fedbdb2d2d..c0db7d0457 100644
--- a/deps/v8/src/base/bits.cc
+++ b/deps/v8/src/base/bits.cc
@@ -61,7 +61,7 @@ int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc) {
int32_t SignedDiv32(int32_t lhs, int32_t rhs) {
if (rhs == 0) return 0;
- if (rhs == -1) return -lhs;
+ if (rhs == -1) return lhs == std::numeric_limits<int32_t>::min() ? lhs : -lhs;
return lhs / rhs;
}
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index ca9dde25f7..9b01f89428 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -28,7 +28,7 @@ size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
size_t alignment,
PageAllocator::Permission access) {
- LockGuard<Mutex> guard(&mutex_);
+ MutexGuard guard(&mutex_);
CHECK(IsAligned(alignment, region_allocator_.page_size()));
// Region allocator does not support alignments bigger than it's own
@@ -45,8 +45,22 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
return reinterpret_cast<void*>(address);
}
+bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
+ PageAllocator::Permission access) {
+ CHECK(IsAligned(address, allocate_page_size_));
+ CHECK(IsAligned(size, allocate_page_size_));
+ CHECK(region_allocator_.contains(address, size));
+
+ if (!region_allocator_.AllocateRegionAt(address, size)) {
+ return false;
+ }
+ CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
+ access));
+ return true;
+}
+
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
- LockGuard<Mutex> guard(&mutex_);
+ MutexGuard guard(&mutex_);
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
@@ -72,13 +86,13 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
{
// There must be an allocated region at given |address| of a size not
// smaller than |size|.
- LockGuard<Mutex> guard(&mutex_);
+ MutexGuard guard(&mutex_);
CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
}
#endif
if (new_allocated_size < allocated_size) {
- LockGuard<Mutex> guard(&mutex_);
+ MutexGuard guard(&mutex_);
region_allocator_.TrimRegion(address, new_allocated_size);
}
@@ -97,5 +111,9 @@ bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
return page_allocator_->SetPermissions(address, size, access);
}
+bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
+ return page_allocator_->DiscardSystemPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index e3d928618b..b1289c4c62 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -53,15 +53,19 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
return page_allocator_->GetRandomMmapAddr();
}
- void* AllocatePages(void* address, size_t size, size_t alignment,
- PageAllocator::Permission access) override;
+ void* AllocatePages(void* hint, size_t size, size_t alignment,
+ Permission access) override;
+
+ // Allocates pages at given address, returns true on success.
+ bool AllocatePagesAt(Address address, size_t size, Permission access);
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
- bool SetPermissions(void* address, size_t size,
- PageAllocator::Permission access) override;
+ bool SetPermissions(void* address, size_t size, Permission access) override;
+
+ bool DiscardSystemPages(void* address, size_t size) override;
private:
v8::base::Mutex mutex_;
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 695e67a618..88073dd520 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -21,7 +21,7 @@
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
-#elif defined(__AARCH64EL__)
+#elif defined(__AARCH64EL__) || defined(_M_ARM64)
#define V8_HOST_ARCH_ARM64 1
#define V8_HOST_ARCH_64_BIT 1
#elif defined(__ARMEL__)
@@ -83,7 +83,7 @@
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
-#elif defined(__AARCH64EL__)
+#elif defined(__AARCH64EL__) || defined(_M_ARM64)
#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 8a5b9e6a60..46859bff85 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -94,9 +94,11 @@
// Allowing the use of noexcept by removing the keyword on older compilers that
// do not support adding noexcept to default members.
-#if ((!defined(V8_CC_GNU) && !defined(V8_TARGET_ARCH_MIPS) && \
- !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \
- !defined(V8_TARGET_ARCH_PPC64)) || \
+// Disabled on MSVC because constructors of standard containers are not noexcept
+// there.
+#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
+ !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
+ !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64)) || \
(defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept
#else
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 6b22131233..090137b9f0 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -7,6 +7,13 @@
#include "src/base/debug/stack_trace.h"
+// This file can't use "src/base/win32-headers.h" because it defines symbols
+// that lead to compilation errors. But `NOMINMAX` should be defined to disable
+// defining of the `min` and `max` MACROS.
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
#include <windows.h>
#include <dbghelp.h>
#include <Shlwapi.h>
@@ -189,10 +196,19 @@ void StackTrace::InitTrace(const CONTEXT* context_record) {
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
#if defined(_WIN64)
+#if defined(_M_X64)
int machine_type = IMAGE_FILE_MACHINE_AMD64;
stack_frame.AddrPC.Offset = context_record->Rip;
stack_frame.AddrFrame.Offset = context_record->Rbp;
stack_frame.AddrStack.Offset = context_record->Rsp;
+#elif defined(_M_ARM64)
+ int machine_type = IMAGE_FILE_MACHINE_ARM64;
+ stack_frame.AddrPC.Offset = context_record->Pc;
+ stack_frame.AddrFrame.Offset = context_record->Fp;
+ stack_frame.AddrStack.Offset = context_record->Sp;
+#else
+#error Unsupported Arch
+#endif
#else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = context_record->Eip;
diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h
new file mode 100644
index 0000000000..fac7c30ae0
--- /dev/null
+++ b/deps/v8/src/base/enum-set.h
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ENUM_SET_H_
+#define V8_BASE_ENUM_SET_H_
+
+#include <type_traits>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+
+// A poor man's version of STL's bitset: A bit set of enums E (without explicit
+// values), fitting into an integral type T.
+template <class E, class T = int>
+class EnumSet {
+ static_assert(std::is_enum<E>::value, "EnumSet can only be used with enums");
+
+ public:
+ constexpr EnumSet() = default;
+
+ EnumSet(std::initializer_list<E> init) {
+ for (E e : init) Add(e);
+ }
+
+ bool empty() const { return bits_ == 0; }
+ bool contains(E element) const { return (bits_ & Mask(element)) != 0; }
+ bool contains_any(const EnumSet& set) const {
+ return (bits_ & set.bits_) != 0;
+ }
+ void Add(E element) { bits_ |= Mask(element); }
+ void Add(const EnumSet& set) { bits_ |= set.bits_; }
+ void Remove(E element) { bits_ &= ~Mask(element); }
+ void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
+ void RemoveAll() { bits_ = 0; }
+ void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
+ T ToIntegral() const { return bits_; }
+ bool operator==(const EnumSet& set) const { return bits_ == set.bits_; }
+ bool operator!=(const EnumSet& set) const { return bits_ != set.bits_; }
+ EnumSet operator|(const EnumSet& set) const {
+ return EnumSet(bits_ | set.bits_);
+ }
+ EnumSet operator&(const EnumSet& set) const {
+ return EnumSet(bits_ & set.bits_);
+ }
+
+ static constexpr EnumSet FromIntegral(T bits) { return EnumSet{bits}; }
+
+ private:
+ explicit constexpr EnumSet(T bits) : bits_(bits) {}
+
+ static T Mask(E element) {
+ DCHECK_GT(sizeof(T) * 8, static_cast<int>(element));
+ return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
+ }
+
+ T bits_ = 0;
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ENUM_SET_H_
diff --git a/deps/v8/src/base/functional.h b/deps/v8/src/base/functional.h
index 1b632c6cc2..7bf6b4bac0 100644
--- a/deps/v8/src/base/functional.h
+++ b/deps/v8/src/base/functional.h
@@ -137,18 +137,17 @@ V8_INLINE size_t hash_value(std::pair<T1, T2> const& v) {
return hash_combine(v.first, v.second);
}
-
template <typename T>
-struct hash : public std::unary_function<T, size_t> {
+struct hash {
V8_INLINE size_t operator()(T const& v) const { return hash_value(v); }
};
-#define V8_BASE_HASH_SPECIALIZE(type) \
- template <> \
- struct hash<type> : public std::unary_function<type, size_t> { \
- V8_INLINE size_t operator()(type const v) const { \
- return ::v8::base::hash_value(v); \
- } \
+#define V8_BASE_HASH_SPECIALIZE(type) \
+ template <> \
+ struct hash<type> { \
+ V8_INLINE size_t operator()(type const v) const { \
+ return ::v8::base::hash_value(v); \
+ } \
};
V8_BASE_HASH_SPECIALIZE(bool)
V8_BASE_HASH_SPECIALIZE(signed char)
@@ -166,7 +165,7 @@ V8_BASE_HASH_SPECIALIZE(double)
#undef V8_BASE_HASH_SPECIALIZE
template <typename T>
-struct hash<T*> : public std::unary_function<T*, size_t> {
+struct hash<T*> {
V8_INLINE size_t operator()(T* const v) const {
return ::v8::base::hash_value(v);
}
@@ -181,10 +180,10 @@ struct hash<T*> : public std::unary_function<T*, size_t> {
// hash data structure based on the bitwise representation of types.
template <typename T>
-struct bit_equal_to : public std::binary_function<T, T, bool> {};
+struct bit_equal_to {};
template <typename T>
-struct bit_hash : public std::unary_function<T, size_t> {};
+struct bit_hash {};
#define V8_BASE_BIT_SPECIALIZE_TRIVIAL(type) \
template <> \
@@ -203,19 +202,19 @@ V8_BASE_BIT_SPECIALIZE_TRIVIAL(long long) // NOLINT(runtime/int)
V8_BASE_BIT_SPECIALIZE_TRIVIAL(unsigned long long) // NOLINT(runtime/int)
#undef V8_BASE_BIT_SPECIALIZE_TRIVIAL
-#define V8_BASE_BIT_SPECIALIZE_BIT_CAST(type, btype) \
- template <> \
- struct bit_equal_to<type> : public std::binary_function<type, type, bool> { \
- V8_INLINE bool operator()(type lhs, type rhs) const { \
- return bit_cast<btype>(lhs) == bit_cast<btype>(rhs); \
- } \
- }; \
- template <> \
- struct bit_hash<type> : public std::unary_function<type, size_t> { \
- V8_INLINE size_t operator()(type v) const { \
- hash<btype> h; \
- return h(bit_cast<btype>(v)); \
- } \
+#define V8_BASE_BIT_SPECIALIZE_BIT_CAST(type, btype) \
+ template <> \
+ struct bit_equal_to<type> { \
+ V8_INLINE bool operator()(type lhs, type rhs) const { \
+ return bit_cast<btype>(lhs) == bit_cast<btype>(rhs); \
+ } \
+ }; \
+ template <> \
+ struct bit_hash<type> { \
+ V8_INLINE size_t operator()(type v) const { \
+ hash<btype> h; \
+ return h(bit_cast<btype>(v)); \
+ } \
};
V8_BASE_BIT_SPECIALIZE_BIT_CAST(float, uint32_t)
V8_BASE_BIT_SPECIALIZE_BIT_CAST(double, uint64_t)
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index 8c5641569d..d9846b7254 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -20,6 +20,7 @@
#include "src/base/build_config.h"
#include "src/base/macros.h"
+#include "src/base/overflowing-math.h"
namespace v8 {
namespace base {
@@ -945,7 +946,7 @@ double acos(double x) {
else
return pi + 2.0 * pio2_lo; /* acos(-1)= pi */
}
- return (x - x) / (x - x); /* acos(|x|>1) is NaN */
+ return std::numeric_limits<double>::signaling_NaN(); // acos(|x|>1) is NaN
}
if (ix < 0x3FE00000) { /* |x| < 0.5 */
if (ix <= 0x3C600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
@@ -998,7 +999,7 @@ double acosh(double x) {
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
if (hx < 0x3FF00000) { /* x < 1 */
- return (x - x) / (x - x);
+ return std::numeric_limits<double>::signaling_NaN();
} else if (hx >= 0x41B00000) { /* x > 2**28 */
if (hx >= 0x7FF00000) { /* x is inf of NaN */
return x + x;
@@ -1072,9 +1073,10 @@ double asin(double x) {
if (ix >= 0x3FF00000) { /* |x|>= 1 */
uint32_t lx;
GET_LOW_WORD(lx, x);
- if (((ix - 0x3FF00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
+ if (((ix - 0x3FF00000) | lx) == 0) { /* asin(1)=+-pi/2 with inexact */
return x * pio2_hi + x * pio2_lo;
- return (x - x) / (x - x); /* asin(|x|>1) is NaN */
+ }
+ return std::numeric_limits<double>::signaling_NaN(); // asin(|x|>1) is NaN
} else if (ix < 0x3FE00000) { /* |x|<0.5 */
if (ix < 0x3E400000) { /* if |x| < 2**-27 */
if (huge + x > one) return x; /* return x with inexact if x!=0*/
@@ -1298,11 +1300,13 @@ double atan2(double y, double x) {
ix = hx & 0x7FFFFFFF;
EXTRACT_WORDS(hy, ly, y);
iy = hy & 0x7FFFFFFF;
- if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7FF00000) ||
- ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7FF00000)) {
+ if (((ix | ((lx | NegateWithWraparound<int32_t>(lx)) >> 31)) > 0x7FF00000) ||
+ ((iy | ((ly | NegateWithWraparound<int32_t>(ly)) >> 31)) > 0x7FF00000)) {
return x + y; /* x or y is NaN */
}
- if (((hx - 0x3FF00000) | lx) == 0) return atan(y); /* x=1.0 */
+ if ((SubWithWraparound(hx, 0x3FF00000) | lx) == 0) {
+ return atan(y); /* x=1.0 */
+ }
m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
/* when y = 0 */
@@ -1609,9 +1613,14 @@ double atanh(double x) {
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
ix = hx & 0x7FFFFFFF;
- if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3FF00000) /* |x|>1 */
- return (x - x) / (x - x);
- if (ix == 0x3FF00000) return x / zero;
+ if ((ix | ((lx | NegateWithWraparound<int32_t>(lx)) >> 31)) > 0x3FF00000) {
+ /* |x|>1 */
+ return std::numeric_limits<double>::signaling_NaN();
+ }
+ if (ix == 0x3FF00000) {
+ return x > 0 ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity();
+ }
if (ix < 0x3E300000 && (huge + x) > zero) return x; /* x<2**-28 */
SET_HIGH_WORD(x, ix);
if (ix < 0x3FE00000) { /* x < 0.5 */
@@ -1690,7 +1699,6 @@ double log(double x) {
Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
static const double zero = 0.0;
- static volatile double vzero = 0.0;
double hfsq, f, s, z, R, w, t1, t2, dk;
int32_t k, hx, i, j;
@@ -1700,9 +1708,12 @@ double log(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7FFFFFFF) | lx) == 0)
- return -two54 / vzero; /* log(+-0)=-inf */
- if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ if (((hx & 0x7FFFFFFF) | lx) == 0) {
+ return -std::numeric_limits<double>::infinity(); /* log(+-0)=-inf */
+ }
+ if (hx < 0) {
+ return std::numeric_limits<double>::signaling_NaN(); /* log(-#) = NaN */
+ }
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
@@ -1833,7 +1844,6 @@ double log1p(double x) {
Lp7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
static const double zero = 0.0;
- static volatile double vzero = 0.0;
double hfsq, f, c, s, z, R, u;
int32_t k, hx, hu, ax;
@@ -1845,9 +1855,9 @@ double log1p(double x) {
if (hx < 0x3FDA827A) { /* 1+x < sqrt(2)+ */
if (ax >= 0x3FF00000) { /* x <= -1.0 */
if (x == -1.0)
- return -two54 / vzero; /* log1p(-1)=+inf */
+ return -std::numeric_limits<double>::infinity(); /* log1p(-1)=+inf */
else
- return (x - x) / (x - x); /* log1p(x<-1)=NaN */
+ return std::numeric_limits<double>::signaling_NaN(); // log1p(x<-1)=NaN
}
if (ax < 0x3E200000) { /* |x| < 2**-29 */
if (two54 + x > zero /* raise inexact */
@@ -2016,9 +2026,6 @@ double log2(double x) {
ivln2hi = 1.44269504072144627571e+00, /* 0x3FF71547, 0x65200000 */
ivln2lo = 1.67517131648865118353e-10; /* 0x3DE705FC, 0x2EEFA200 */
- static const double zero = 0.0;
- static volatile double vzero = 0.0;
-
double f, hfsq, hi, lo, r, val_hi, val_lo, w, y;
int32_t i, k, hx;
uint32_t lx;
@@ -2027,15 +2034,18 @@ double log2(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7FFFFFFF) | lx) == 0)
- return -two54 / vzero; /* log(+-0)=-inf */
- if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ if (((hx & 0x7FFFFFFF) | lx) == 0) {
+ return -std::numeric_limits<double>::infinity(); /* log(+-0)=-inf */
+ }
+ if (hx < 0) {
+ return std::numeric_limits<double>::signaling_NaN(); /* log(-#) = NaN */
+ }
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
}
if (hx >= 0x7FF00000) return x + x;
- if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx == 0x3FF00000 && lx == 0) return 0.0; /* log(1) = +0 */
k += (hx >> 20) - 1023;
hx &= 0x000FFFFF;
i = (hx + 0x95F64) & 0x100000;
@@ -2123,9 +2133,6 @@ double log10(double x) {
log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
- static const double zero = 0.0;
- static volatile double vzero = 0.0;
-
double y;
int32_t i, k, hx;
uint32_t lx;
@@ -2134,16 +2141,19 @@ double log10(double x) {
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
- if (((hx & 0x7FFFFFFF) | lx) == 0)
- return -two54 / vzero; /* log(+-0)=-inf */
- if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ if (((hx & 0x7FFFFFFF) | lx) == 0) {
+ return -std::numeric_limits<double>::infinity(); /* log(+-0)=-inf */
+ }
+ if (hx < 0) {
+ return std::numeric_limits<double>::quiet_NaN(); /* log(-#) = NaN */
+ }
k -= 54;
x *= two54; /* subnormal number, scale up x */
GET_HIGH_WORD(hx, x);
GET_LOW_WORD(lx, x);
}
if (hx >= 0x7FF00000) return x + x;
- if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
+ if (hx == 0x3FF00000 && lx == 0) return 0.0; /* log(1) = +0 */
k += (hx >> 20) - 1023;
i = (k & 0x80000000) >> 31;
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index e965382b8d..bebb4e1bdc 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -68,6 +68,8 @@
#ifndef V8_BASE_LAZY_INSTANCE_H_
#define V8_BASE_LAZY_INSTANCE_H_
+#include <type_traits>
+
#include "src/base/macros.h"
#include "src/base/once.h"
@@ -92,12 +94,8 @@ struct LeakyInstanceTrait {
template <typename T>
struct StaticallyAllocatedInstanceTrait {
- // 16-byte alignment fallback to be on the safe side here.
- struct V8_ALIGNAS(T, 16) StorageType {
- char x[sizeof(T)];
- };
-
- STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
+ using StorageType =
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type;
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
@@ -112,7 +110,7 @@ struct StaticallyAllocatedInstanceTrait {
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
- typedef T* StorageType;
+ using StorageType = T*;
static T* MutableInstance(StorageType* storage) {
return *storage;
@@ -165,7 +163,7 @@ template <typename T, typename AllocationTrait, typename CreateTrait,
typename InitOnceTrait, typename DestroyTrait /* not used yet. */>
struct LazyInstanceImpl {
public:
- typedef typename AllocationTrait::StorageType StorageType;
+ using StorageType = typename AllocationTrait::StorageType;
private:
static void InitInstance(void* storage) {
@@ -226,6 +224,33 @@ struct LazyDynamicInstance {
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
+// LeakyObject<T> wraps an object of type T, which is initialized in the
+// constructor but never destructed. Thus LeakyObject<T> is trivially
+// destructible and can be used in static (lazily initialized) variables.
+template <typename T>
+class LeakyObject {
+ public:
+ template <typename... Args>
+ explicit LeakyObject(Args&&... args) {
+ new (&storage_) T(std::forward<Args>(args)...);
+ }
+
+ T* get() { return reinterpret_cast<T*>(&storage_); }
+
+ private:
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeakyObject);
+};
+
+// Define a function which returns a pointer to a lazily initialized and never
+// destructed object of type T.
+#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName, ...) \
+ T* FunctionName() { \
+ static ::v8::base::LeakyObject<T> object{__VA_ARGS__}; \
+ return object.get(); \
+ }
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 8a2efe61a9..8a088ffc40 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -109,19 +109,15 @@ V8_INLINE Dest bit_cast(Source const& source) {
}
// Explicitly declare the assignment operator as deleted.
-#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete;
+#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
// Explicitly declare the copy constructor and assignment operator as deleted.
+// This also deletes the implicit move constructor and implicit move assignment
+// operator, but still allows to manually define them.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
DISALLOW_ASSIGN(TypeName)
-// Explicitly declare all copy/move constructors and assignments as deleted.
-#define DISALLOW_COPY_AND_MOVE_AND_ASSIGN(TypeName) \
- TypeName(TypeName&&) = delete; \
- TypeName& operator=(TypeName&&) = delete; \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
-
// Explicitly declare all implicit constructors as deleted, namely the
// default constructor, copy constructor and operator= functions.
// This is especially useful for classes containing only static methods.
@@ -385,7 +381,7 @@ constexpr inline T RoundUp(T x) {
}
template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
+constexpr inline bool IsAligned(T value, U alignment) {
return (value & (alignment - 1)) == 0;
}
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 7dfef2d31f..9cadc0fb0b 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -10,6 +10,7 @@
#define V8_BASE_OPTIONAL_H_
#include <type_traits>
+#include <utility>
#include "src/base/logging.h"
@@ -34,33 +35,45 @@ constexpr in_place_t in_place = {};
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
constexpr nullopt_t nullopt(0);
+// Forward declaration, which is refered by following helpers.
+template <typename T>
+class Optional;
+
namespace internal {
template <typename T, bool = std::is_trivially_destructible<T>::value>
-struct OptionalStorage {
+struct OptionalStorageBase {
// Initializing |empty_| here instead of using default member initializing
// to avoid errors in g++ 4.8.
- constexpr OptionalStorage() : empty_('\0') {}
-
- constexpr explicit OptionalStorage(const T& value)
- : is_null_(false), value_(value) {}
+ constexpr OptionalStorageBase() : empty_('\0') {}
- // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
- explicit OptionalStorage(T&& value)
- : is_null_(false), value_(std::move(value)) {}
-
- // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
- explicit OptionalStorage(base::in_place_t, Args&&... args)
- : is_null_(false), value_(std::forward<Args>(args)...) {}
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+ : is_populated_(true), value_(std::forward<Args>(args)...) {}
// When T is not trivially destructible we must call its
// destructor before deallocating its memory.
- ~OptionalStorage() {
- if (!is_null_) value_.~T();
+ // Note that this hides the (implicitly declared) move constructor, which
+ // would be used for constexpr move constructor in OptionalStorage<T>.
+ // It is needed iff T is trivially move constructible. However, the current
+ // is_trivially_{copy,move}_constructible implementation requires
+ // is_trivially_destructible (which looks a bug, cf:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and
+ // http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not
+ // necessary for this case at the moment. Please see also the destructor
+ // comment in "is_trivially_destructible = true" specialization below.
+ ~OptionalStorageBase() {
+ if (is_populated_) value_.~T();
+ }
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(!is_populated_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_populated_ = true;
}
- bool is_null_ = true;
+ bool is_populated_ = false;
union {
// |empty_| exists so that the union will always be initialized, even when
// it doesn't contain a value. Union members must be initialized for the
@@ -71,29 +84,37 @@ struct OptionalStorage {
};
template <typename T>
-struct OptionalStorage<T, true> {
+struct OptionalStorageBase<T, true /* trivially destructible */> {
// Initializing |empty_| here instead of using default member initializing
// to avoid errors in g++ 4.8.
- constexpr OptionalStorage() : empty_('\0') {}
-
- constexpr explicit OptionalStorage(const T& value)
- : is_null_(false), value_(value) {}
-
- // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
- explicit OptionalStorage(T&& value)
- : is_null_(false), value_(std::move(value)) {}
+ constexpr OptionalStorageBase() : empty_('\0') {}
- // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
- explicit OptionalStorage(base::in_place_t, Args&&... args)
- : is_null_(false), value_(std::forward<Args>(args)...) {}
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+ : is_populated_(true), value_(std::forward<Args>(args)...) {}
// When T is trivially destructible (i.e. its destructor does nothing) there
- // is no need to call it. Explicitly defaulting the destructor means it's not
- // user-provided. Those two together make this destructor trivial.
- ~OptionalStorage() = default;
+ // is no need to call it. Implicitly defined destructor is trivial, because
+ // both members (bool and union containing only variants which are trivially
+ // destructible) are trivially destructible.
+ // Explicitly-defaulted destructor is also trivial, but do not use it here,
+ // because it hides the implicit move constructor. It is needed to implement
+ // constexpr move constructor in OptionalStorage iff T is trivially move
+ // constructible. Note that, if T is trivially move constructible, the move
+ // constructor of OptionalStorageBase<T> is also implicitly defined and it is
+ // trivially move constructor. If T is not trivially move constructible,
+ // "not declaring move constructor without destructor declaration" here means
+ // "delete move constructor", which works because any move constructor of
+ // OptionalStorage will not refer to it in that case.
- bool is_null_ = true;
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(!is_populated_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_populated_ = true;
+ }
+
+ bool is_populated_ = false;
union {
// |empty_| exists so that the union will always be initialized, even when
// it doesn't contain a value. Union members must be initialized for the
@@ -103,8 +124,289 @@ struct OptionalStorage<T, true> {
};
};
+// Implement conditional constexpr copy and move constructors. These are
+// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
+// respectively. If each is true, the corresponding constructor is defined as
+// "= default;", which generates a constexpr constructor (In this case,
+// the condition of constexpr-ness is satisfied because the base class also has
+// compiler generated constexpr {copy,move} constructors). Note that
+// placement-new is prohibited in constexpr.
+#if defined(__GNUC__) && __GNUC__ < 5
+// gcc <5 does not implement std::is_trivially_copy_constructible.
+// Conservatively assume false for this configuration.
+// TODO(clemensh): Remove this once we drop support for gcc <5.
+#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) false
+#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) false
+#else
+#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) \
+ std::is_trivially_copy_constructible<T>::value
+#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \
+ std::is_trivially_move_constructible<T>::value
+#endif
+template <typename T, bool = TRIVIALLY_COPY_CONSTRUCTIBLE(T),
+ bool = TRIVIALLY_MOVE_CONSTRUCTIBLE(T)>
+#undef TRIVIALLY_COPY_CONSTRUCTIBLE
+struct OptionalStorage : OptionalStorageBase<T> {
+ // This is no trivially {copy,move} constructible case. Other cases are
+ // defined below as specializations.
+
+ // Accessing the members of template base class requires explicit
+ // declaration.
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+
+ // Inherit constructors (specifically, the in_place constructor).
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ // User defined constructor deletes the default constructor.
+ // Define it explicitly.
+ OptionalStorage() = default;
+
+ OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT {
+ if (other.is_populated_) Init(other.value_);
+ }
+
+ OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT {
+ if (other.is_populated_) Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T, true /* trivially copy constructible */,
+ false /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT = default;
+
+ OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT {
+ if (other.is_populated_) Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T, false /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT = default;
+
+ OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT {
+ if (other.is_populated_) Init(other.value_);
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T, true /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ // If both trivially {copy,move} constructible are true, it is not necessary
+ // to use user-defined constructors. So, just inheriting constructors
+ // from the base class works.
+ using OptionalStorageBase<T>::OptionalStorageBase;
+};
+
+// Base class to support conditionally usable copy-/move- constructors
+// and assign operators.
+template <typename T>
+class OptionalBase {
+ // This class provides implementation rather than public API, so everything
+ // should be hidden. Often we use composition, but we cannot in this case
+ // because of C++ language restriction.
+ protected:
+ constexpr OptionalBase() = default;
+ constexpr OptionalBase(const OptionalBase& other) V8_NOEXCEPT = default;
+ constexpr OptionalBase(OptionalBase&& other) V8_NOEXCEPT = default;
+
+ template <class... Args>
+ constexpr explicit OptionalBase(in_place_t, Args&&... args)
+ : storage_(in_place, std::forward<Args>(args)...) {}
+
+ // Implementation of converting constructors.
+ template <typename U>
+ explicit OptionalBase(const OptionalBase<U>& other) V8_NOEXCEPT {
+ if (other.storage_.is_populated_) storage_.Init(other.storage_.value_);
+ }
+
+ template <typename U>
+ explicit OptionalBase(OptionalBase<U>&& other) V8_NOEXCEPT {
+ if (other.storage_.is_populated_)
+ storage_.Init(std::move(other.storage_.value_));
+ }
+
+ ~OptionalBase() = default;
+
+ OptionalBase& operator=(const OptionalBase& other) V8_NOEXCEPT {
+ CopyAssign(other);
+ return *this;
+ }
+
+ OptionalBase& operator=(OptionalBase&& other) V8_NOEXCEPT {
+ MoveAssign(std::move(other));
+ return *this;
+ }
+
+ template <typename U>
+ void CopyAssign(const OptionalBase<U>& other) {
+ if (other.storage_.is_populated_)
+ InitOrAssign(other.storage_.value_);
+ else
+ FreeIfNeeded();
+ }
+
+ template <typename U>
+ void MoveAssign(OptionalBase<U>&& other) {
+ if (other.storage_.is_populated_)
+ InitOrAssign(std::move(other.storage_.value_));
+ else
+ FreeIfNeeded();
+ }
+
+ template <typename U>
+ void InitOrAssign(U&& value) {
+ if (storage_.is_populated_)
+ storage_.value_ = std::forward<U>(value);
+ else
+ storage_.Init(std::forward<U>(value));
+ }
+
+ void FreeIfNeeded() {
+ if (!storage_.is_populated_) return;
+ storage_.value_.~T();
+ storage_.is_populated_ = false;
+ }
+
+ // For implementing conversion, allow access to other typed OptionalBase
+ // class.
+ template <typename U>
+ friend class OptionalBase;
+
+ OptionalStorage<T> storage_;
+};
+
+// The following {Copy,Move}{Constructible,Assignable} structs are helpers to
+// implement constructor/assign-operator overloading. Specifically, if T is
+// is not movable but copyable, Optional<T>'s move constructor should not
+// participate in overload resolution. This inheritance trick implements that.
+template <bool is_copy_constructible>
+struct CopyConstructible {};
+
+template <>
+struct CopyConstructible<false> {
+ constexpr CopyConstructible() = default;
+ constexpr CopyConstructible(const CopyConstructible&) = delete;
+ constexpr CopyConstructible(CopyConstructible&&) V8_NOEXCEPT = default;
+ CopyConstructible& operator=(const CopyConstructible&) V8_NOEXCEPT = default;
+ CopyConstructible& operator=(CopyConstructible&&) V8_NOEXCEPT = default;
+};
+
+template <bool is_move_constructible>
+struct MoveConstructible {};
+
+template <>
+struct MoveConstructible<false> {
+ constexpr MoveConstructible() = default;
+ constexpr MoveConstructible(const MoveConstructible&) V8_NOEXCEPT = default;
+ constexpr MoveConstructible(MoveConstructible&&) = delete;
+ MoveConstructible& operator=(const MoveConstructible&) V8_NOEXCEPT = default;
+ MoveConstructible& operator=(MoveConstructible&&) V8_NOEXCEPT = default;
+};
+
+template <bool is_copy_assignable>
+struct CopyAssignable {};
+
+template <>
+struct CopyAssignable<false> {
+ constexpr CopyAssignable() = default;
+ constexpr CopyAssignable(const CopyAssignable&) V8_NOEXCEPT = default;
+ constexpr CopyAssignable(CopyAssignable&&) V8_NOEXCEPT = default;
+ CopyAssignable& operator=(const CopyAssignable&) = delete;
+ CopyAssignable& operator=(CopyAssignable&&) V8_NOEXCEPT = default;
+};
+
+template <bool is_move_assignable>
+struct MoveAssignable {};
+
+template <>
+struct MoveAssignable<false> {
+ constexpr MoveAssignable() = default;
+ constexpr MoveAssignable(const MoveAssignable&) V8_NOEXCEPT = default;
+ constexpr MoveAssignable(MoveAssignable&&) V8_NOEXCEPT = default;
+ MoveAssignable& operator=(const MoveAssignable&) V8_NOEXCEPT = default;
+ MoveAssignable& operator=(MoveAssignable&&) = delete;
+};
+
+// Helper to conditionally enable converting constructors and assign operators.
+template <typename T, typename U>
+struct IsConvertibleFromOptional
+ : std::integral_constant<
+ bool, std::is_constructible<T, Optional<U>&>::value ||
+ std::is_constructible<T, const Optional<U>&>::value ||
+ std::is_constructible<T, Optional<U>&&>::value ||
+ std::is_constructible<T, const Optional<U>&&>::value ||
+ std::is_convertible<Optional<U>&, T>::value ||
+ std::is_convertible<const Optional<U>&, T>::value ||
+ std::is_convertible<Optional<U>&&, T>::value ||
+ std::is_convertible<const Optional<U>&&, T>::value> {};
+
+template <typename T, typename U>
+struct IsAssignableFromOptional
+ : std::integral_constant<
+ bool, IsConvertibleFromOptional<T, U>::value ||
+ std::is_assignable<T&, Optional<U>&>::value ||
+ std::is_assignable<T&, const Optional<U>&>::value ||
+ std::is_assignable<T&, Optional<U>&&>::value ||
+ std::is_assignable<T&, const Optional<U>&&>::value> {};
+
+// Forward compatibility for C++17.
+// Introduce one more deeper nested namespace to avoid leaking using std::swap.
+namespace swappable_impl {
+using std::swap;
+
+struct IsSwappableImpl {
+ // Tests if swap can be called. Check<T&>(0) returns true_type iff swap
+ // is available for T. Otherwise, Check's overload resolution falls back
+ // to Check(...) declared below thanks to SFINAE, so returns false_type.
+ template <typename T>
+ static auto Check(int i)
+ -> decltype(swap(std::declval<T>(), std::declval<T>()), std::true_type());
+
+ template <typename T>
+ static std::false_type Check(...);
+};
+} // namespace swappable_impl
+
+template <typename T>
+struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check<T&>(0)) {};
+
+// Forward compatibility for C++20.
+template <typename T>
+using RemoveCvRefT =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
} // namespace internal
+// On Windows, by default, empty-base class optimization does not work,
+// which means even if the base class is empty struct, it still consumes one
+// byte for its body. __declspec(empty_bases) enables the optimization.
+// cf)
+// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+#ifdef OS_WIN
+#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
+#else
+#define OPTIONAL_DECLSPEC_EMPTY_BASES
+#endif
+
// base::Optional is a Chromium version of the C++17 optional class:
// std::optional documentation:
// http://en.cppreference.com/w/cpp/utility/optional
@@ -112,128 +414,213 @@ struct OptionalStorage<T, true> {
// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
//
// These are the differences between the specification and the implementation:
-// - The constructor and emplace method using initializer_list are not
-// implemented because 'initializer_list' is banned from Chromium.
// - Constructors do not use 'constexpr' as it is a C++14 extension.
// - 'constexpr' might be missing in some places for reasons specified locally.
// - No exceptions are thrown, because they are banned from Chromium.
+// All copy/move constructors or assignment operators are marked V8_NOEXCEPT.
// - All the non-members are in the 'base' namespace instead of 'std'.
+//
+// Note that T cannot have a constructor T(Optional<T>) etc. Optional<T> checks
+// T's constructor (specifically via IsConvertibleFromOptional), and in the
+// check whether T can be constructible from Optional<T>, which is recursive
+// so it does not work. As of Feb 2018, std::optional C++17 implementation in
+// both clang and gcc has same limitation. MSVC SFINAE looks to have different
+// behavior, but anyway it reports an error, too.
template <typename T>
-class Optional {
+class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
+ : public internal::OptionalBase<T>,
+ public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
+ public internal::MoveConstructible<std::is_move_constructible<T>::value>,
+ public internal::CopyAssignable<std::is_copy_constructible<T>::value &&
+ std::is_copy_assignable<T>::value>,
+ public internal::MoveAssignable<std::is_move_constructible<T>::value &&
+ std::is_move_assignable<T>::value> {
public:
+#undef OPTIONAL_DECLSPEC_EMPTY_BASES
using value_type = T;
+ // Defer default/copy/move constructor implementation to OptionalBase.
constexpr Optional() = default;
+ constexpr Optional(const Optional& other) V8_NOEXCEPT = default;
+ constexpr Optional(Optional&& other) V8_NOEXCEPT = default;
+
+ constexpr Optional(nullopt_t) {} // NOLINT(runtime/explicit)
+
+ // Converting copy constructor. "explicit" only if
+ // std::is_convertible<const U&, T>::value is false. It is implemented by
+ // declaring two almost same constructors, but that condition in enable_if
+ // is different, so that either one is chosen, thanks to SFINAE.
+ template <typename U,
+ typename std::enable_if<
+ std::is_constructible<T, const U&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ std::is_convertible<const U&, T>::value,
+ bool>::type = false>
+ Optional(const Optional<U>& other) V8_NOEXCEPT
+ : internal::OptionalBase<T>(other) {}
+
+ template <typename U,
+ typename std::enable_if<
+ std::is_constructible<T, const U&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ !std::is_convertible<const U&, T>::value,
+ bool>::type = false>
+ explicit Optional(const Optional<U>& other) V8_NOEXCEPT
+ : internal::OptionalBase<T>(other) {}
+
+ // Converting move constructor. Similar to converting copy constructor,
+ // declaring two (explicit and non-explicit) constructors.
+ template <typename U,
+ typename std::enable_if<
+ std::is_constructible<T, U&&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ std::is_convertible<U&&, T>::value,
+ bool>::type = false>
+ Optional(Optional<U>&& other) V8_NOEXCEPT
+ : internal::OptionalBase<T>(std::move(other)) {}
+
+ template <typename U,
+ typename std::enable_if<
+ std::is_constructible<T, U&&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ !std::is_convertible<U&&, T>::value,
+ bool>::type = false>
+ explicit Optional(Optional<U>&& other) V8_NOEXCEPT
+ : internal::OptionalBase<T>(std::move(other)) {}
- constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)
-
- Optional(const Optional& other) {
- if (!other.storage_.is_null_) Init(other.value());
- }
-
- Optional(Optional&& other) V8_NOEXCEPT {
- if (!other.storage_.is_null_) Init(std::move(other.value()));
- }
-
- constexpr Optional(const T& value) // NOLINT(runtime/explicit)
- : storage_(value) {}
-
- // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
- Optional(T&& value) // NOLINT(runtime/explicit)
- : storage_(std::move(value)) {}
-
- // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
- explicit Optional(base::in_place_t, Args&&... args)
- : storage_(base::in_place, std::forward<Args>(args)...) {}
+ constexpr explicit Optional(in_place_t, Args&&... args)
+ : internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
+
+ template <class U, class... Args,
+ class = typename std::enable_if<std::is_constructible<
+ value_type, std::initializer_list<U>&, Args...>::value>::type>
+ constexpr explicit Optional(in_place_t, std::initializer_list<U> il,
+ Args&&... args)
+ : internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
+
+ // Forward value constructor. Similar to converting constructors,
+ // conditionally explicit.
+ template <
+ typename U = value_type,
+ typename std::enable_if<
+ std::is_constructible<T, U&&>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ std::is_convertible<U&&, T>::value,
+ bool>::type = false>
+ constexpr Optional(U&& value) // NOLINT(runtime/explicit)
+ : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+ template <
+ typename U = value_type,
+ typename std::enable_if<
+ std::is_constructible<T, U&&>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ !std::is_convertible<U&&, T>::value,
+ bool>::type = false>
+ constexpr explicit Optional(U&& value)
+ : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
~Optional() = default;
- Optional& operator=(base::nullopt_t) {
+ // Defer copy-/move- assign operator implementation to OptionalBase.
+ Optional& operator=(const Optional& other) V8_NOEXCEPT = default;
+ Optional& operator=(Optional&& other) V8_NOEXCEPT = default;
+
+ Optional& operator=(nullopt_t) {
FreeIfNeeded();
return *this;
}
- Optional& operator=(const Optional& other) {
- if (other.storage_.is_null_) {
- FreeIfNeeded();
- return *this;
- }
-
- InitOrAssign(other.value());
+ // Perfect-forwarded assignment.
+ template <typename U>
+ typename std::enable_if<
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ std::is_constructible<T, U>::value &&
+ std::is_assignable<T&, U>::value &&
+ (!std::is_scalar<T>::value ||
+ !std::is_same<typename std::decay<U>::type, T>::value),
+ Optional&>::type
+ operator=(U&& value) V8_NOEXCEPT {
+ InitOrAssign(std::forward<U>(value));
return *this;
}
- Optional& operator=(Optional&& other) V8_NOEXCEPT {
- if (other.storage_.is_null_) {
- FreeIfNeeded();
- return *this;
- }
-
- InitOrAssign(std::move(other.value()));
+ // Copy assign the state of other.
+ template <typename U>
+ typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
+ std::is_constructible<T, const U&>::value &&
+ std::is_assignable<T&, const U&>::value,
+ Optional&>::type
+ operator=(const Optional<U>& other) V8_NOEXCEPT {
+ CopyAssign(other);
return *this;
}
- template <class U>
- typename std::enable_if<std::is_same<std::decay<U>, T>::value,
+ // Move assign the state of other.
+ template <typename U>
+ typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
+ std::is_constructible<T, U>::value &&
+ std::is_assignable<T&, U>::value,
Optional&>::type
- operator=(U&& value) {
- InitOrAssign(std::forward<U>(value));
+ operator=(Optional<U>&& other) V8_NOEXCEPT {
+ MoveAssign(std::move(other));
return *this;
}
- // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T* operator->() const {
- CHECK(!storage_.is_null_);
- return &value();
+ DCHECK(storage_.is_populated_);
+ return &storage_.value_;
}
- // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
- // meant to be 'constexpr const'.
T* operator->() {
- CHECK(!storage_.is_null_);
- return &value();
+ DCHECK(storage_.is_populated_);
+ return &storage_.value_;
}
- constexpr const T& operator*() const & { return value(); }
+ const T& operator*() const & {
+ DCHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
- // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
- // meant to be 'constexpr const'.
- T& operator*() & { return value(); }
+ T& operator*() & {
+ DCHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
- constexpr const T&& operator*() const && { return std::move(value()); }
+ const T&& operator*() const && {
+ DCHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
- // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
- // meant to be 'constexpr const'.
- T&& operator*() && { return std::move(value()); }
+ T&& operator*() && {
+ DCHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
- constexpr explicit operator bool() const { return !storage_.is_null_; }
+ constexpr explicit operator bool() const { return storage_.is_populated_; }
- constexpr bool has_value() const { return !storage_.is_null_; }
+ constexpr bool has_value() const { return storage_.is_populated_; }
- // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
- // meant to be 'constexpr const'.
T& value() & {
- CHECK(!storage_.is_null_);
+ CHECK(storage_.is_populated_);
return storage_.value_;
}
- // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T& value() const & {
- CHECK(!storage_.is_null_);
+ CHECK(storage_.is_populated_);
return storage_.value_;
}
- // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
- // meant to be 'constexpr const'.
T&& value() && {
- CHECK(!storage_.is_null_);
+ CHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
- // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T&& value() const && {
- CHECK(!storage_.is_null_);
+ CHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
@@ -244,8 +631,9 @@ class Optional {
// "T must be copy constructible");
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
- return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
- : value();
+ return storage_.is_populated_
+ ? storage_.value_
+ : static_cast<T>(std::forward<U>(default_value));
}
template <class U>
@@ -255,25 +643,26 @@ class Optional {
// "T must be move constructible");
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
- return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
- : std::move(value());
+ return storage_.is_populated_
+ ? std::move(storage_.value_)
+ : static_cast<T>(std::forward<U>(default_value));
}
void swap(Optional& other) {
- if (storage_.is_null_ && other.storage_.is_null_) return;
+ if (!storage_.is_populated_ && !other.storage_.is_populated_) return;
- if (storage_.is_null_ != other.storage_.is_null_) {
- if (storage_.is_null_) {
- Init(std::move(other.storage_.value_));
- other.FreeIfNeeded();
- } else {
- other.Init(std::move(storage_.value_));
+ if (storage_.is_populated_ != other.storage_.is_populated_) {
+ if (storage_.is_populated_) {
+ other.storage_.Init(std::move(storage_.value_));
FreeIfNeeded();
+ } else {
+ storage_.Init(std::move(other.storage_.value_));
+ other.FreeIfNeeded();
}
return;
}
- CHECK(!storage_.is_null_ && !other.storage_.is_null_);
+ DCHECK(storage_.is_populated_ && other.storage_.is_populated_);
using std::swap;
swap(**this, *other);
}
@@ -281,202 +670,196 @@ class Optional {
void reset() { FreeIfNeeded(); }
template <class... Args>
- void emplace(Args&&... args) {
+ T& emplace(Args&&... args) {
FreeIfNeeded();
- Init(std::forward<Args>(args)...);
- }
-
- private:
- void Init(const T& value) {
- CHECK(storage_.is_null_);
- new (&storage_.value_) T(value);
- storage_.is_null_ = false;
- }
-
- void Init(T&& value) {
- CHECK(storage_.is_null_);
- new (&storage_.value_) T(std::move(value));
- storage_.is_null_ = false;
- }
-
- template <class... Args>
- void Init(Args&&... args) {
- CHECK(storage_.is_null_);
- new (&storage_.value_) T(std::forward<Args>(args)...);
- storage_.is_null_ = false;
- }
-
- void InitOrAssign(const T& value) {
- if (storage_.is_null_)
- Init(value);
- else
- storage_.value_ = value;
- }
-
- void InitOrAssign(T&& value) {
- if (storage_.is_null_)
- Init(std::move(value));
- else
- storage_.value_ = std::move(value);
+ storage_.Init(std::forward<Args>(args)...);
+ return storage_.value_;
}
- void FreeIfNeeded() {
- if (storage_.is_null_) return;
- storage_.value_.~T();
- storage_.is_null_ = true;
+ template <class U, class... Args>
+ typename std::enable_if<
+ std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+ T&>::type
+ emplace(std::initializer_list<U> il, Args&&... args) {
+ FreeIfNeeded();
+ storage_.Init(il, std::forward<Args>(args)...);
+ return storage_.value_;
}
- internal::OptionalStorage<T> storage_;
+ private:
+ // Accessing template base class's protected member needs explicit
+ // declaration to do so.
+ using internal::OptionalBase<T>::CopyAssign;
+ using internal::OptionalBase<T>::FreeIfNeeded;
+ using internal::OptionalBase<T>::InitOrAssign;
+ using internal::OptionalBase<T>::MoveAssign;
+ using internal::OptionalBase<T>::storage_;
};
-template <class T>
-constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
+// Here after defines comparation operators. The definition follows
+// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+// while bool() casting is replaced by has_value() to meet the chromium
+// style guide.
+template <class T, class U>
+bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value()) return false;
+ if (!lhs.has_value()) return true;
+ return *lhs == *rhs;
}
-template <class T>
-constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(lhs == rhs);
+template <class T, class U>
+bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value()) return true;
+ if (!lhs.has_value()) return false;
+ return *lhs != *rhs;
}
-template <class T>
-constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
- return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
+template <class T, class U>
+bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value()) return false;
+ if (!lhs.has_value()) return true;
+ return *lhs < *rhs;
}
-template <class T>
-constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(rhs < lhs);
+template <class T, class U>
+bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value()) return true;
+ if (!rhs.has_value()) return false;
+ return *lhs <= *rhs;
}
-template <class T>
-constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
- return rhs < lhs;
+template <class T, class U>
+bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value()) return false;
+ if (!rhs.has_value()) return true;
+ return *lhs > *rhs;
}
-template <class T>
-constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(lhs < rhs);
+template <class T, class U>
+bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value()) return true;
+ if (!lhs.has_value()) return false;
+ return *lhs >= *rhs;
}
template <class T>
-constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
return !opt;
}
template <class T>
-constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
return !opt;
}
template <class T>
-constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
- return !!opt;
+constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
- return !!opt;
+constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator<(const Optional<T>& opt, nullopt_t) {
return false;
}
template <class T>
-constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
- return !!opt;
+constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
return !opt;
}
template <class T>
-constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator<=(nullopt_t, const Optional<T>& opt) {
return true;
}
template <class T>
-constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
- return !!opt;
+constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator>(nullopt_t, const Optional<T>& opt) {
return false;
}
template <class T>
-constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator>=(const Optional<T>& opt, nullopt_t) {
return true;
}
template <class T>
-constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
return !opt;
}
-template <class T>
-constexpr bool operator==(const Optional<T>& opt, const T& value) {
- return opt != nullopt ? *opt == value : false;
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt == value : false;
}
-template <class T>
-constexpr bool operator==(const T& value, const Optional<T>& opt) {
- return opt == value;
+template <class T, class U>
+constexpr bool operator==(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value == *opt : false;
}
-template <class T>
-constexpr bool operator!=(const Optional<T>& opt, const T& value) {
- return !(opt == value);
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt != value : true;
}
-template <class T>
-constexpr bool operator!=(const T& value, const Optional<T>& opt) {
- return !(opt == value);
+template <class T, class U>
+constexpr bool operator!=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value != *opt : true;
}
-template <class T>
-constexpr bool operator<(const Optional<T>& opt, const T& value) {
- return opt != nullopt ? *opt < value : true;
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt < value : true;
}
-template <class T>
-constexpr bool operator<(const T& value, const Optional<T>& opt) {
- return opt != nullopt ? value < *opt : false;
+template <class T, class U>
+constexpr bool operator<(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value < *opt : false;
}
-template <class T>
-constexpr bool operator<=(const Optional<T>& opt, const T& value) {
- return !(opt > value);
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt <= value : true;
}
-template <class T>
-constexpr bool operator<=(const T& value, const Optional<T>& opt) {
- return !(value > opt);
+template <class T, class U>
+constexpr bool operator<=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value <= *opt : false;
}
-template <class T>
-constexpr bool operator>(const Optional<T>& opt, const T& value) {
- return value < opt;
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt > value : false;
}
-template <class T>
-constexpr bool operator>(const T& value, const Optional<T>& opt) {
- return opt < value;
+template <class T, class U>
+constexpr bool operator>(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value > *opt : true;
}
-template <class T>
-constexpr bool operator>=(const Optional<T>& opt, const T& value) {
- return !(opt < value);
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt >= value : false;
}
-template <class T>
-constexpr bool operator>=(const T& value, const Optional<T>& opt) {
- return !(value < opt);
+template <class T, class U>
+constexpr bool operator>=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value >= *opt : true;
}
template <class T>
@@ -484,8 +867,25 @@ constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
return Optional<typename std::decay<T>::type>(std::forward<T>(value));
}
+template <class T, class... Args>
+constexpr Optional<T> make_optional(Args&&... args) {
+ return Optional<T>(in_place, std::forward<Args>(args)...);
+}
+
+template <class T, class U, class... Args>
+constexpr Optional<T> make_optional(std::initializer_list<U> il,
+ Args&&... args) {
+ return Optional<T>(in_place, il, std::forward<Args>(args)...);
+}
+
+// Partial specialization for a function template is not allowed. Also, it is
+// not allowed to add overload function to std namespace, while it is allowed
+// to specialize the template in std. Thus, swap() (kind of) overloading is
+// defined in base namespace, instead.
template <class T>
-void swap(Optional<T>& lhs, Optional<T>& rhs) {
+typename std::enable_if<std::is_move_constructible<T>::value &&
+ internal::IsSwappable<T>::value>::type
+swap(Optional<T>& lhs, Optional<T>& rhs) {
lhs.swap(rhs);
}
diff --git a/deps/v8/src/base/overflowing-math.h b/deps/v8/src/base/overflowing-math.h
new file mode 100644
index 0000000000..fa197a886c
--- /dev/null
+++ b/deps/v8/src/base/overflowing-math.h
@@ -0,0 +1,89 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_OVERFLOWING_MATH_H_
+#define V8_BASE_OVERFLOWING_MATH_H_
+
+#include <stdint.h>
+
+#include <cmath>
+#include <type_traits>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// Helpers for performing overflowing arithmetic operations without relying
+// on C++ undefined behavior.
+#define ASSERT_SIGNED_INTEGER_TYPE(Type) \
+ static_assert(std::is_integral<Type>::value && std::is_signed<Type>::value, \
+ "use this for signed integer types");
+#define OP_WITH_WRAPAROUND(Name, OP) \
+ template <typename signed_type> \
+ inline signed_type Name##WithWraparound(signed_type a, signed_type b) { \
+ ASSERT_SIGNED_INTEGER_TYPE(signed_type); \
+ typedef typename std::make_unsigned<signed_type>::type unsigned_type; \
+ unsigned_type a_unsigned = static_cast<unsigned_type>(a); \
+ unsigned_type b_unsigned = static_cast<unsigned_type>(b); \
+ unsigned_type result = a_unsigned OP b_unsigned; \
+ return static_cast<signed_type>(result); \
+ }
+
+OP_WITH_WRAPAROUND(Add, +)
+OP_WITH_WRAPAROUND(Sub, -)
+OP_WITH_WRAPAROUND(Mul, *)
+
+// 16-bit integers are special due to C++'s implicit conversion rules.
+// See https://bugs.llvm.org/show_bug.cgi?id=25580.
+template <>
+inline int16_t MulWithWraparound(int16_t a, int16_t b) {
+ uint32_t a_unsigned = static_cast<uint32_t>(a);
+ uint32_t b_unsigned = static_cast<uint32_t>(b);
+ uint32_t result = a_unsigned * b_unsigned;
+ return static_cast<int16_t>(static_cast<uint16_t>(result));
+}
+
+#undef OP_WITH_WRAPAROUND
+
+template <typename signed_type>
+inline signed_type NegateWithWraparound(signed_type a) {
+ ASSERT_SIGNED_INTEGER_TYPE(signed_type);
+ if (a == std::numeric_limits<signed_type>::min()) return a;
+ return -a;
+}
+
+template <typename signed_type>
+inline signed_type ShlWithWraparound(signed_type a, signed_type b) {
+ ASSERT_SIGNED_INTEGER_TYPE(signed_type);
+ typedef typename std::make_unsigned<signed_type>::type unsigned_type;
+ const unsigned_type kMask = (sizeof(a) * 8) - 1;
+ return static_cast<signed_type>(static_cast<unsigned_type>(a) << (b & kMask));
+}
+
+#undef ASSERT_SIGNED_INTEGER_TYPE
+
+// Returns the quotient x/y, avoiding C++ undefined behavior if y == 0.
+template <typename T>
+inline T Divide(T x, T y) {
+ if (y != 0) return x / y;
+ if (x == 0 || x != x) return std::numeric_limits<T>::quiet_NaN();
+ if ((x >= 0) == (std::signbit(y) == 0)) {
+ return std::numeric_limits<T>::infinity();
+ }
+ return -std::numeric_limits<T>::infinity();
+}
+
+inline float Recip(float a) { return Divide(1.0f, a); }
+
+inline float RecipSqrt(float a) {
+ if (a != 0) return 1.0f / std::sqrt(a);
+ if (std::signbit(a) == 0) return std::numeric_limits<float>::infinity();
+ return -std::numeric_limits<float>::infinity();
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_OVERFLOWING_MATH_H_
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index c25104739d..b339f528d2 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -58,5 +58,9 @@ bool PageAllocator::SetPermissions(void* address, size_t size,
address, size, static_cast<base::OS::MemoryPermission>(access));
}
+bool PageAllocator::DiscardSystemPages(void* address, size_t size) {
+ return base::OS::DiscardSystemPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index 68e17db494..ced1156cca 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -36,6 +36,8 @@ class V8_BASE_EXPORT PageAllocator
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
+ bool DiscardSystemPages(void* address, size_t size) override;
+
private:
const size_t allocate_page_size_;
const size_t commit_page_size_;
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index cbaed6105d..7f64f4dedb 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -2,6 +2,7 @@ set noparent
hpayer@chromium.org
mlippautz@chromium.org
+ulan@chromium.org
per-file platform-fuchsia.cc=wez@chromium.org
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 165651aae1..5ea70835ee 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -41,7 +41,7 @@ ConditionVariable::~ConditionVariable() {
// Darwin kernel. http://crbug.com/517681.
{
Mutex lock;
- LockGuard<Mutex> l(&lock);
+ MutexGuard l(&lock);
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 1;
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index af00c6e5d5..2cdcb07b59 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -79,7 +79,6 @@ class V8_BASE_EXPORT ConditionVariable final {
DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
};
-
// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
// called).
// Usage:
@@ -87,7 +86,7 @@ class V8_BASE_EXPORT ConditionVariable final {
// LAZY_CONDITION_VARIABLE_INITIALIZER;
//
// void my_function() {
-// LockGuard<Mutex> lock_guard(&my_mutex);
+// MutexGuard lock_guard(&my_mutex);
// my_condvar.Pointer()->Wait(&my_mutex);
// }
typedef LazyStaticInstance<
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 6b4158f079..a69eee0bc6 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -92,13 +92,12 @@ class V8_BASE_EXPORT Mutex final {
DISALLOW_COPY_AND_ASSIGN(Mutex);
};
-
// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
// Usage:
// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
//
// void my_function() {
-// LockGuard<Mutex> guard(my_mutex.Pointer());
+// MutexGuard guard(my_mutex.Pointer());
// // Do something.
// }
//
@@ -225,6 +224,8 @@ class LockGuard final {
DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
+using MutexGuard = LockGuard<Mutex>;
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index b4bba251aa..406462cdd2 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -36,6 +36,20 @@ namespace v8 {
namespace base {
+int64_t get_gmt_offset(const tm& localtm) {
+ // replacement for tm->tm_gmtoff field in glibc
+ // returns seconds east of UTC, taking DST into account
+ struct timeval tv;
+ struct timezone tz;
+ int ret_code = gettimeofday(&tv, &tz);
+ // 0 = success, -1 = failure
+ DCHECK_NE(ret_code, -1);
+ if (ret_code == -1) {
+ return 0;
+ }
+ return (-tz.tz_minuteswest * 60) + (localtm.tm_isdst > 0 ? 3600 : 0);
+}
+
class AIXTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
@@ -54,13 +68,15 @@ const char* AIXTimezoneCache::LocalTimezone(double time_ms) {
}
double AIXTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
- // On AIX, struct tm does not contain a tm_gmtoff field.
+ // On AIX, struct tm does not contain a tm_gmtoff field, use get_gmt_offset
+ // helper function
time_t utc = time(nullptr);
DCHECK_NE(utc, -1);
struct tm tm;
struct tm* loc = localtime_r(&utc, &tm);
DCHECK_NOT_NULL(loc);
- return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+ return static_cast<double>(get_gmt_offset(*loc) * msPerSecond -
+ (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index c5d94fc6ba..f2976301ed 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -172,6 +172,33 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
}
// static
+bool OS::DiscardSystemPages(void* address, size_t size) {
+ // On Windows, discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ using DiscardVirtualMemoryFunction =
+ DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+ static std::atomic<DiscardVirtualMemoryFunction> discard_virtual_memory(
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1));
+ if (discard_virtual_memory ==
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+ discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+ GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+ // Use DiscardVirtualMemory when available because it releases faster than
+ // MEM_RESET.
+ DiscardVirtualMemoryFunction discard_function = discard_virtual_memory.load();
+ if (discard_function) {
+ DWORD ret = discard_function(address, size);
+ if (!ret) return true;
+ }
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
+ return ptr;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 713ee404bd..f6123437ec 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -49,7 +49,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
- if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
+ if (zx_vmo_create(request_size, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@@ -57,6 +57,11 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
strlen(kVirtualMemoryName));
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
+ if ((prot & ZX_VM_FLAG_PERM_EXECUTE) != 0) {
+ if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
+ return nullptr;
+ }
+ }
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
request_size, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
@@ -120,6 +125,12 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
}
// static
+bool OS::DiscardSystemPages(void* address, size_t size) {
+ // TODO(hpayer): Does Fuchsia have madvise?
+ return true;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217.
return false;
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index c93974bcfc..e7edbf5f58 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -92,8 +92,8 @@ bool g_hard_abort = false;
const char* g_gc_fake_mmap = nullptr;
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
+ GetPlatformRandomNumberGenerator);
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
#if !V8_OS_FUCHSIA
@@ -145,32 +145,6 @@ void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
return result;
}
-int ReclaimInaccessibleMemory(void* address, size_t size) {
-#if defined(OS_MACOSX)
- // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
- // marks the pages with the reusable bit, which allows both Activity Monitor
- // and memory-infra to correctly track the pages.
- int ret = madvise(address, size, MADV_FREE_REUSABLE);
-#elif defined(_AIX) || defined(V8_OS_SOLARIS)
- int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
-#else
- int ret = madvise(address, size, MADV_FREE);
-#endif
- if (ret != 0 && errno == ENOSYS)
- return 0; // madvise is not available on all systems.
- if (ret != 0 && errno == EINVAL) {
- // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
- // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
- // imply runtime support.
-#if defined(_AIX) || defined(V8_OS_SOLARIS)
- ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
-#else
- ret = madvise(address, size, MADV_DONTNEED);
-#endif
- }
- return ret;
-}
-
#endif // !V8_OS_FUCHSIA
} // namespace
@@ -213,8 +187,8 @@ size_t OS::CommitPageSize() {
// static
void OS::SetRandomMmapSeed(int64_t seed) {
if (seed) {
- LockGuard<Mutex> guard(rng_mutex.Pointer());
- platform_random_number_generator.Pointer()->SetSeed(seed);
+ MutexGuard guard(rng_mutex.Pointer());
+ GetPlatformRandomNumberGenerator()->SetSeed(seed);
}
}
@@ -222,9 +196,8 @@ void OS::SetRandomMmapSeed(int64_t seed) {
void* OS::GetRandomMmapAddr() {
uintptr_t raw_addr;
{
- LockGuard<Mutex> guard(rng_mutex.Pointer());
- platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
- sizeof(raw_addr));
+ MutexGuard guard(rng_mutex.Pointer());
+ GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
}
#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
@@ -356,7 +329,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
int ret = mprotect(address, size, prot);
if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
// This is advisory; ignore errors and continue execution.
- ReclaimInaccessibleMemory(address, size);
+ USE(DiscardSystemPages(address, size));
}
// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
@@ -373,6 +346,34 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return ret == 0;
}
+bool OS::DiscardSystemPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+#if defined(OS_MACOSX)
+ // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
+ // marks the pages with the reusable bit, which allows both Activity Monitor
+ // and memory-infra to correctly track the pages.
+ int ret = madvise(address, size, MADV_FREE_REUSABLE);
+#elif defined(_AIX) || defined(V8_OS_SOLARIS)
+ int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
+#else
+ int ret = madvise(address, size, MADV_FREE);
+#endif
+ if (ret != 0 && errno == ENOSYS)
+ return true; // madvise is not available on all systems.
+ if (ret != 0 && errno == EINVAL) {
+// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
+// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
+// imply runtime support.
+#if defined(_AIX) || defined(V8_OS_SOLARIS)
+ ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
+#else
+ ret = madvise(address, size, MADV_DONTNEED);
+#endif
+ }
+ return ret == 0;
+}
+
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
@@ -447,7 +448,8 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
if (FILE* file = fopen(name, "r+")) {
if (fseek(file, 0, SEEK_END) == 0) {
long size = ftell(file); // NOLINT(runtime/int)
- if (size >= 0) {
+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
+ if (size > 0) {
void* const memory =
mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
MAP_SHARED, fileno(file), 0);
@@ -466,6 +468,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
+ if (size == 0) return new PosixMemoryMappedFile(file, 0, 0);
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
void* memory = mmap(OS::GetRandomMmapAddr(), result,
@@ -481,7 +484,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) CHECK(OS::Free(memory_, size_));
+ if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())));
fclose(file_);
}
@@ -738,7 +741,7 @@ static void* ThreadEntry(void* arg) {
// We take the lock here to make sure that pthread_create finished first since
// we don't know which thread will run first (the original thread or the new
// one).
- { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
+ { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
DCHECK_NE(thread->data()->thread_, kNoThread);
thread->NotifyStartedAndRun();
@@ -773,7 +776,7 @@ void Thread::Start() {
DCHECK_EQ(0, result);
}
{
- LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+ MutexGuard lock_guard(&data_->thread_creation_mutex_);
result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
}
DCHECK_EQ(0, result);
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 11a008e6c6..45aabf390f 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -689,8 +689,8 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
-static LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
+ GetPlatformRandomNumberGenerator);
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
@@ -723,8 +723,8 @@ size_t OS::CommitPageSize() {
// static
void OS::SetRandomMmapSeed(int64_t seed) {
if (seed) {
- LockGuard<Mutex> guard(rng_mutex.Pointer());
- platform_random_number_generator.Pointer()->SetSeed(seed);
+ MutexGuard guard(rng_mutex.Pointer());
+ GetPlatformRandomNumberGenerator()->SetSeed(seed);
}
}
@@ -744,9 +744,8 @@ void* OS::GetRandomMmapAddr() {
#endif
uintptr_t address;
{
- LockGuard<Mutex> guard(rng_mutex.Pointer());
- platform_random_number_generator.Pointer()->NextBytes(&address,
- sizeof(address));
+ MutexGuard guard(rng_mutex.Pointer());
+ GetPlatformRandomNumberGenerator()->NextBytes(&address, sizeof(address));
}
address <<= kPageSizeBits;
address += kAllocationRandomAddressMin;
@@ -883,6 +882,33 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
}
// static
+bool OS::DiscardSystemPages(void* address, size_t size) {
+ // On Windows, discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ using DiscardVirtualMemoryFunction =
+ DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+ static std::atomic<DiscardVirtualMemoryFunction> discard_virtual_memory(
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1));
+ if (discard_virtual_memory ==
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+ discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+ GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+ // Use DiscardVirtualMemory when available because it releases faster than
+ // MEM_RESET.
+ DiscardVirtualMemoryFunction discard_function = discard_virtual_memory.load();
+ if (discard_function) {
+ DWORD ret = discard_function(address, size);
+ if (!ret) return true;
+ }
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
+ return ptr;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
@@ -943,20 +969,21 @@ class Win32MemoryMappedFile final : public OS::MemoryMappedFile {
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- // Open a physical file
+ // Open a physical file.
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_EXISTING, 0, nullptr);
if (file == INVALID_HANDLE_VALUE) return nullptr;
DWORD size = GetFileSize(file, nullptr);
+ if (size == 0) return new Win32MemoryMappedFile(file, nullptr, nullptr, 0);
- // Create a file mapping for the physical file
+ // Create a file mapping for the physical file.
HANDLE file_mapping =
CreateFileMapping(file, nullptr, PAGE_READWRITE, 0, size, nullptr);
if (file_mapping == nullptr) return nullptr;
- // Map a view of the file into memory
+ // Map a view of the file into memory.
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
@@ -965,16 +992,17 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
- // Open a physical file
+ // Open a physical file.
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, 0, nullptr);
if (file == nullptr) return nullptr;
- // Create a file mapping for the physical file
+ if (size == 0) return new Win32MemoryMappedFile(file, nullptr, nullptr, 0);
+ // Create a file mapping for the physical file.
HANDLE file_mapping = CreateFileMapping(file, nullptr, PAGE_READWRITE, 0,
static_cast<DWORD>(size), nullptr);
if (file_mapping == nullptr) return nullptr;
- // Map a view of the file into memory
+ // Map a view of the file into memory.
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
@@ -983,7 +1011,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
Win32MemoryMappedFile::~Win32MemoryMappedFile() {
if (memory_) UnmapViewOfFile(memory_);
- CloseHandle(file_mapping_);
+ if (file_mapping_) CloseHandle(file_mapping_);
CloseHandle(file_);
}
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index f9d01edf00..e51d5cd547 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -55,17 +55,17 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
const intptr_t kTibExtraTlsOffset = 0xF94;
const intptr_t kMaxInlineSlots = 64;
const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- const intptr_t kPointerSize = sizeof(void*);
+ const intptr_t kSystemPointerSize = sizeof(void*);
DCHECK(0 <= index && index < kMaxSlots);
USE(kMaxSlots);
if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
+ return static_cast<intptr_t>(
+ __readfsdword(kTibInlineTlsOffset + kSystemPointerSize * index));
}
intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
DCHECK_NE(extra, 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
+ return *reinterpret_cast<intptr_t*>(extra + kSystemPointerSize *
+ (index - kMaxInlineSlots));
}
#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
@@ -273,6 +273,9 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
MemoryPermission access);
+ V8_WARN_UNUSED_RESULT static bool DiscardSystemPages(void* address,
+ size_t size);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 1ab56f42b5..2e10f539f4 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -132,36 +132,6 @@ V8_INLINE uint64_t QPCNowRaw() {
namespace v8 {
namespace base {
-TimeDelta TimeDelta::FromDays(int days) {
- return TimeDelta(days * Time::kMicrosecondsPerDay);
-}
-
-
-TimeDelta TimeDelta::FromHours(int hours) {
- return TimeDelta(hours * Time::kMicrosecondsPerHour);
-}
-
-
-TimeDelta TimeDelta::FromMinutes(int minutes) {
- return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
-}
-
-
-TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
- return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
-}
-
-
-TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
- return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
-}
-
-
-TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
- return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
-}
-
-
int TimeDelta::InDays() const {
if (IsMax()) {
// Preserve max to prevent overflow.
@@ -302,7 +272,7 @@ class Clock final {
// Time between resampling the un-granular clock for this API (1 minute).
const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
- LockGuard<Mutex> lock_guard(&mutex_);
+ MutexGuard lock_guard(&mutex_);
// Determine current time and ticks.
TimeTicks ticks = GetSystemTicks();
@@ -321,7 +291,7 @@ class Clock final {
}
Time NowFromSystemTime() {
- LockGuard<Mutex> lock_guard(&mutex_);
+ MutexGuard lock_guard(&mutex_);
initial_ticks_ = GetSystemTicks();
initial_time_ = GetSystemTime();
return initial_time_;
@@ -343,21 +313,13 @@ class Clock final {
Mutex mutex_;
};
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock);
+};
-static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
- ThreadSafeInitOnceTrait>::type clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-Time Time::Now() {
- return clock.Pointer()->Now();
-}
-
-
-Time Time::NowFromSystemTime() {
- return clock.Pointer()->NowFromSystemTime();
-}
+Time Time::Now() { return GetClock()->Now(); }
+Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
// Time between windows epoch and standard epoch.
static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
@@ -829,6 +791,12 @@ void ThreadTicks::WaitUntilInitializedWin() {
::Sleep(10);
}
+#ifdef V8_HOST_ARCH_ARM64
+#define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
+#else
+#define ReadCycleCounter() __rdtsc()
+#endif
+
double ThreadTicks::TSCTicksPerSecond() {
DCHECK(IsSupported());
@@ -849,12 +817,12 @@ double ThreadTicks::TSCTicksPerSecond() {
// The first time that this function is called, make an initial reading of the
// TSC and the performance counter.
- static const uint64_t tsc_initial = __rdtsc();
+ static const uint64_t tsc_initial = ReadCycleCounter();
static const uint64_t perf_counter_initial = QPCNowRaw();
// Make a another reading of the TSC and the performance counter every time
// that this function is called.
- uint64_t tsc_now = __rdtsc();
+ uint64_t tsc_now = ReadCycleCounter();
uint64_t perf_counter_now = QPCNowRaw();
// Reset the thread priority.
@@ -887,6 +855,7 @@ double ThreadTicks::TSCTicksPerSecond() {
return tsc_ticks_per_second;
}
+#undef ReadCycleCounter
#endif // V8_OS_WIN
} // namespace base
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 9e99166487..fd8ef10b55 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -39,6 +39,25 @@ template<class TimeClass>
class TimeBase;
}
+class TimeConstants {
+ public:
+ static constexpr int64_t kHoursPerDay = 24;
+ static constexpr int64_t kMillisecondsPerSecond = 1000;
+ static constexpr int64_t kMillisecondsPerDay =
+ kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
+ static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
+ static constexpr int64_t kMicrosecondsPerSecond =
+ kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+ static constexpr int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static constexpr int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static constexpr int64_t kMicrosecondsPerDay =
+ kMicrosecondsPerHour * kHoursPerDay;
+ static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
+ static constexpr int64_t kNanosecondsPerSecond =
+ kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+};
+
// -----------------------------------------------------------------------------
// TimeDelta
//
@@ -50,15 +69,27 @@ class V8_BASE_EXPORT TimeDelta final {
constexpr TimeDelta() : delta_(0) {}
// Converts units of time to TimeDeltas.
- static TimeDelta FromDays(int days);
- static TimeDelta FromHours(int hours);
- static TimeDelta FromMinutes(int minutes);
- static TimeDelta FromSeconds(int64_t seconds);
- static TimeDelta FromMilliseconds(int64_t milliseconds);
- static TimeDelta FromMicroseconds(int64_t microseconds) {
+ static constexpr TimeDelta FromDays(int days) {
+ return TimeDelta(days * TimeConstants::kMicrosecondsPerDay);
+ }
+ static constexpr TimeDelta FromHours(int hours) {
+ return TimeDelta(hours * TimeConstants::kMicrosecondsPerHour);
+ }
+ static constexpr TimeDelta FromMinutes(int minutes) {
+ return TimeDelta(minutes * TimeConstants::kMicrosecondsPerMinute);
+ }
+ static constexpr TimeDelta FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * TimeConstants::kMicrosecondsPerSecond);
+ }
+ static constexpr TimeDelta FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * TimeConstants::kMicrosecondsPerMillisecond);
+ }
+ static constexpr TimeDelta FromMicroseconds(int64_t microseconds) {
return TimeDelta(microseconds);
}
- static TimeDelta FromNanoseconds(int64_t nanoseconds);
+ static constexpr TimeDelta FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond);
+ }
// Returns the maximum time delta, which should be greater than any reasonable
// time delta we might compare it to. Adding or subtracting the maximum time
@@ -105,8 +136,6 @@ class V8_BASE_EXPORT TimeDelta final {
static TimeDelta FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
- TimeDelta& operator=(const TimeDelta& other) = default;
-
// Computations with other deltas.
TimeDelta operator+(const TimeDelta& other) const {
return TimeDelta(delta_ + other.delta_);
@@ -201,25 +230,9 @@ namespace time_internal {
// classes. Each subclass provides for strong type-checking to ensure
// semantically meaningful comparison/math of time values from the same clock
// source or timeline.
-template<class TimeClass>
-class TimeBase {
+template <class TimeClass>
+class TimeBase : public TimeConstants {
public:
- static constexpr int64_t kHoursPerDay = 24;
- static constexpr int64_t kMillisecondsPerSecond = 1000;
- static constexpr int64_t kMillisecondsPerDay =
- kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
- static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
- static constexpr int64_t kMicrosecondsPerSecond =
- kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
- static constexpr int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
- static constexpr int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
- static constexpr int64_t kMicrosecondsPerDay =
- kMicrosecondsPerHour * kHoursPerDay;
- static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
- static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
- static constexpr int64_t kNanosecondsPerSecond =
- kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
-
#if V8_OS_WIN
// To avoid overflow in QPC to Microseconds calculations, since we multiply
// by kMicrosecondsPerSecond, then the QPC value should not exceed
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
index 46ceca1857..6905b83410 100644
--- a/deps/v8/src/base/region-allocator.cc
+++ b/deps/v8/src/base/region-allocator.cc
@@ -258,6 +258,16 @@ size_t RegionAllocator::CheckRegion(Address address) {
return region->size();
}
+bool RegionAllocator::IsFree(Address address, size_t size) {
+ CHECK(contains(address, size));
+ AllRegionsSet::iterator region_iter = FindRegion(address);
+ if (region_iter == all_regions_.end()) {
+ return true;
+ }
+ Region* region = *region_iter;
+ return !region->is_used() && region->contains(address, size);
+}
+
void RegionAllocator::Region::Print(std::ostream& os) const {
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
os << "[" << begin() << ", " << end() << "), size: " << size();
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index fb51472fa9..6cf8889530 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -60,6 +60,9 @@ class V8_BASE_EXPORT RegionAllocator final {
// otherwise 0.
size_t CheckRegion(Address address);
+ // Returns true if there are no pages allocated in given region.
+ bool IsFree(Address address, size_t size);
+
Address begin() const { return whole_region_.begin(); }
Address end() const { return whole_region_.end(); }
size_t size() const { return whole_region_.size(); }
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
new file mode 100644
index 0000000000..5138e65ab5
--- /dev/null
+++ b/deps/v8/src/base/small-vector.h
@@ -0,0 +1,146 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SMALL_VECTOR_H_
+#define V8_BASE_SMALL_VECTOR_H_
+
+#include <type_traits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// Minimal SmallVector implementation. Uses inline storage first, switches to
+// malloc when it overflows.
+template <typename T, size_t kInlineSize>
+class SmallVector {
+ // Currently only support trivially copyable and trivially destructible data
+ // types, as it uses memcpy to copy elements and never calls destructors.
+ ASSERT_TRIVIALLY_COPYABLE(T);
+ STATIC_ASSERT(std::is_trivially_destructible<T>::value);
+
+ public:
+ SmallVector() = default;
+ SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; }
+ SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); }
+
+ ~SmallVector() {
+ if (is_big()) free(begin_);
+ }
+
+ SmallVector& operator=(const SmallVector& other) V8_NOEXCEPT {
+ if (this == &other) return *this;
+ size_t other_size = other.size();
+ if (capacity() < other_size) {
+ // Create large-enough heap-allocated storage.
+ if (is_big()) free(begin_);
+ begin_ = reinterpret_cast<T*>(malloc(sizeof(T) * other_size));
+ end_of_storage_ = begin_ + other_size;
+ }
+ memcpy(begin_, other.begin_, sizeof(T) * other_size);
+ end_ = begin_ + other_size;
+ return *this;
+ }
+
+ SmallVector& operator=(SmallVector&& other) V8_NOEXCEPT {
+ if (this == &other) return *this;
+ if (other.is_big()) {
+ if (is_big()) free(begin_);
+ begin_ = other.begin_;
+ end_ = other.end_;
+ end_of_storage_ = other.end_of_storage_;
+ other.reset();
+ } else {
+ DCHECK_GE(capacity(), other.size()); // Sanity check.
+ size_t other_size = other.size();
+ memcpy(begin_, other.begin_, sizeof(T) * other_size);
+ end_ = begin_ + other_size;
+ }
+ return *this;
+ }
+
+ T* data() const { return begin_; }
+ T* begin() const { return begin_; }
+ T* end() const { return end_; }
+ size_t size() const { return end_ - begin_; }
+ bool empty() const { return end_ == begin_; }
+ size_t capacity() const { return end_of_storage_ - begin_; }
+
+ T& back() {
+ DCHECK_NE(0, size());
+ return end_[-1];
+ }
+
+ T& operator[](size_t index) {
+ DCHECK_GT(size(), index);
+ return begin_[index];
+ }
+
+ const T& operator[](size_t index) const {
+ DCHECK_GT(size(), index);
+ return begin_[index];
+ }
+
+ template <typename... Args>
+ void emplace_back(Args&&... args) {
+ if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
+ new (end_) T(std::forward<Args>(args)...);
+ ++end_;
+ }
+
+ void pop_back(size_t count = 1) {
+ DCHECK_GE(size(), count);
+ end_ -= count;
+ }
+
+ void resize_no_init(size_t new_size) {
+ // Resizing without initialization is safe if T is trivially copyable.
+ ASSERT_TRIVIALLY_COPYABLE(T);
+ if (new_size > capacity()) Grow(new_size);
+ end_ = begin_ + new_size;
+ }
+
+ // Clear without freeing any storage.
+ void clear() { end_ = begin_; }
+
+ // Clear and go back to inline storage.
+ void reset() {
+ begin_ = inline_storage_begin();
+ end_ = begin_;
+ end_of_storage_ = begin_ + kInlineSize;
+ }
+
+ private:
+ T* begin_ = inline_storage_begin();
+ T* end_ = begin_;
+ T* end_of_storage_ = begin_ + kInlineSize;
+ typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
+ inline_storage_;
+
+ void Grow(size_t min_capacity = 0) {
+ size_t in_use = end_ - begin_;
+ size_t new_capacity =
+ base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
+ T* new_storage = reinterpret_cast<T*>(malloc(sizeof(T) * new_capacity));
+ memcpy(new_storage, begin_, sizeof(T) * in_use);
+ if (is_big()) free(begin_);
+ begin_ = new_storage;
+ end_ = new_storage + in_use;
+ end_of_storage_ = new_storage + new_capacity;
+ }
+
+ bool is_big() const { return begin_ != inline_storage_begin(); }
+
+ T* inline_storage_begin() { return reinterpret_cast<T*>(&inline_storage_); }
+ const T* inline_storage_begin() const {
+ return reinterpret_cast<const T*>(&inline_storage_);
+ }
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SMALL_VECTOR_H_
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index d54bcb8f70..9504a8a93d 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -16,6 +16,8 @@ namespace base {
template <typename T>
struct ThreadedListTraits {
static T** next(T* t) { return t->next(); }
+ static T** start(T** t) { return t; }
+ static T* const* start(T* const* t) { return t; }
};
// Represents a linked list that threads through the nodes in the linked list.
@@ -44,21 +46,6 @@ class ThreadedListBase final : public BaseClass {
head_ = v;
}
- // Reinitializing the head to a new node, this costs O(n).
- void ReinitializeHead(T* v) {
- head_ = v;
- T* current = v;
- if (current != nullptr) { // Find tail
- T* tmp;
- while ((tmp = *TLTraits::next(current))) {
- current = tmp;
- }
- tail_ = TLTraits::next(current);
- } else {
- tail_ = &head_;
- }
- }
-
void DropHead() {
DCHECK_NOT_NULL(head_);
@@ -68,7 +55,16 @@ class ThreadedListBase final : public BaseClass {
*TLTraits::next(old_head) = nullptr;
}
+ bool Contains(T* v) {
+ for (Iterator it = begin(); it != end(); ++it) {
+ if (*it == v) return true;
+ }
+ return false;
+ }
+
void Append(ThreadedListBase&& list) {
+ if (list.is_empty()) return;
+
*tail_ = list.head_;
tail_ = list.tail_;
list.Clear();
@@ -150,7 +146,7 @@ class ThreadedListBase final : public BaseClass {
bool operator!=(const Iterator& other) const {
return entry_ != other.entry_;
}
- T* operator*() { return *entry_; }
+ T*& operator*() { return *entry_; }
T* operator->() { return *entry_; }
Iterator& operator=(T* entry) {
T* next = *TLTraits::next(*entry_);
@@ -159,6 +155,8 @@ class ThreadedListBase final : public BaseClass {
return *this;
}
+ Iterator() : entry_(nullptr) {}
+
private:
explicit Iterator(T** entry) : entry_(entry) {}
@@ -196,10 +194,10 @@ class ThreadedListBase final : public BaseClass {
friend class ThreadedListBase;
};
- Iterator begin() { return Iterator(&head_); }
+ Iterator begin() { return Iterator(TLTraits::start(&head_)); }
Iterator end() { return Iterator(tail_); }
- ConstIterator begin() const { return ConstIterator(&head_); }
+ ConstIterator begin() const { return ConstIterator(TLTraits::start(&head_)); }
ConstIterator end() const { return ConstIterator(tail_); }
// Rewinds the list's tail to the reset point, i.e., cutting of the rest of
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index a3313f4e88..3b38858192 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -23,14 +23,15 @@ static RandomNumberGenerator::EntropySource entropy_source = nullptr;
// static
void RandomNumberGenerator::SetEntropySource(EntropySource source) {
- LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ MutexGuard lock_guard(entropy_mutex.Pointer());
entropy_source = source;
}
RandomNumberGenerator::RandomNumberGenerator() {
// Check if embedder supplied an entropy source.
- { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ {
+ MutexGuard lock_guard(entropy_mutex.Pointer());
if (entropy_source != nullptr) {
int64_t seed;
if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
@@ -90,7 +91,7 @@ int RandomNumberGenerator::NextInt(int max) {
while (true) {
int rnd = Next(31);
int val = rnd % max;
- if (rnd - val + (max - 1) >= 0) {
+ if (std::numeric_limits<int>::max() - (rnd - val) >= (max - 1)) {
return val;
}
}
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc
index d79dbcdfa8..47fd633098 100644
--- a/deps/v8/src/basic-block-profiler.cc
+++ b/deps/v8/src/basic-block-profiler.cc
@@ -13,14 +13,7 @@
namespace v8 {
namespace internal {
-namespace {
-base::LazyInstance<BasicBlockProfiler>::type kBasicBlockProfiler =
- LAZY_INSTANCE_INITIALIZER;
-}
-
-BasicBlockProfiler* BasicBlockProfiler::Get() {
- return kBasicBlockProfiler.Pointer();
-}
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(BasicBlockProfiler, BasicBlockProfiler::Get);
BasicBlockProfiler::Data::Data(size_t n_blocks)
: n_blocks_(n_blocks),
@@ -66,7 +59,7 @@ void BasicBlockProfiler::Data::ResetCounts() {
}
BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
- base::LockGuard<base::Mutex> lock(&data_list_mutex_);
+ base::MutexGuard lock(&data_list_mutex_);
Data* data = new Data(n_blocks);
data_list_.push_back(data);
return data;
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 5be3198cc6..af4bbc9f0d 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -66,8 +66,8 @@ class BitVector : public ZoneObject {
};
static const int kDataLengthForInline = 1;
- static const int kDataBits = kPointerSize * 8;
- static const int kDataBitShift = kPointerSize == 8 ? 6 : 5;
+ static const int kDataBits = kSystemPointerSize * 8;
+ static const int kDataBitShift = kSystemPointerSize == 8 ? 6 : 5;
static const uintptr_t kOne = 1; // This saves some static_casts.
BitVector() : length_(0), data_length_(kDataLengthForInline), data_(0) {}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index d9104863aa..23399546ee 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -8,8 +8,8 @@
#include "src/api-inl.h"
#include "src/api-natives.h"
#include "src/base/ieee754.h"
-#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
@@ -42,8 +42,12 @@
#include "src/objects/js-regexp.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/property-cell.h"
+#include "src/objects/slots-inl.h"
#include "src/objects/templates.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -54,13 +58,17 @@ namespace internal {
void SourceCodeCache::Initialize(Isolate* isolate, bool create_heap_objects) {
cache_ = create_heap_objects ? ReadOnlyRoots(isolate).empty_fixed_array()
- : nullptr;
+ : FixedArray();
+}
+
+void SourceCodeCache::Iterate(RootVisitor* v) {
+ v->VisitRootPointer(Root::kExtensions, nullptr, FullObjectSlot(&cache_));
}
bool SourceCodeCache::Lookup(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i += 2) {
- SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
+ SeqOneByteString str = SeqOneByteString::cast(cache_->get(i));
if (str->IsUtf8EqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)), isolate);
@@ -159,8 +167,7 @@ class Genesis {
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
- GlobalContextType context_type);
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template);
~Genesis() = default;
@@ -219,11 +226,12 @@ class Genesis {
void InstallGlobalThisBinding();
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<JSGlobalObject> global_object,
- Handle<JSFunction> empty_function,
- GlobalContextType context_type);
+ Handle<JSFunction> empty_function);
void InitializeExperimentalGlobal();
+ void InitializeIteratorFunctions();
+ void InitializeCallSiteBuiltins();
// Depending on the situation, expose and/or get rid of the utils object.
- void ConfigureUtilsObject(GlobalContextType context_type);
+ void ConfigureUtilsObject();
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InitializeGlobal_##id();
@@ -239,18 +247,15 @@ class Genesis {
};
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
ArrayBufferKind array_buffer_kind);
- Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
- const char* name,
- ElementsKind elements_kind);
- bool InstallNatives(GlobalContextType context_type);
+ void InstallInternalPackedArrayFunction(Handle<JSObject> prototype,
+ const char* name);
+ void InstallInternalPackedArray(Handle<JSObject> target, const char* name);
+ bool InstallNatives();
Handle<JSFunction> InstallTypedArray(const char* name,
ElementsKind elements_kind);
bool InstallExtraNatives();
- bool InstallExperimentalExtraNatives();
- bool InstallDebuggerNatives();
void InstallBuiltinFunctionIds();
- void InstallExperimentalBuiltinFunctionIds();
void InitializeNormalizedMapCaches();
enum ExtensionTraversalState {
@@ -298,8 +303,6 @@ class Genesis {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
- static bool CallUtilsFunction(Isolate* isolate, const char* name);
-
static bool CompileExtension(Isolate* isolate, v8::Extension* extension);
Isolate* isolate_;
@@ -327,21 +330,19 @@ Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
- GlobalContextType context_type) {
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
HandleScope scope(isolate_);
Handle<Context> env;
{
Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
- context_snapshot_index, embedder_fields_deserializer,
- context_type);
+ context_snapshot_index, embedder_fields_deserializer);
env = genesis.result();
if (env.is_null() || !InstallExtensions(env, extensions)) {
return Handle<Context>();
}
}
- // Log all maps created during bootstrapping.
- if (FLAG_trace_maps) LOG(isolate_, LogMaps());
+ LogAllMaps();
+ isolate_->heap()->NotifyBootstrapComplete();
return scope.CloseAndEscape(env);
}
@@ -355,11 +356,18 @@ Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
global_proxy = genesis.global_proxy();
if (global_proxy.is_null()) return Handle<JSGlobalProxy>();
}
- // Log all maps created during bootstrapping.
- if (FLAG_trace_maps) LOG(isolate_, LogMaps());
+ LogAllMaps();
return scope.CloseAndEscape(global_proxy);
}
+void Bootstrapper::LogAllMaps() {
+ if (!FLAG_trace_maps || isolate_->initialized_from_snapshot()) return;
+ // Log all created Map objects that are on the heap. For snapshots the Map
+ // logging happens during deserialization in order to avoid printing Maps
+ // multiple times during partial deserialization.
+ LOG(isolate_, LogAllMaps());
+}
+
void Bootstrapper::DetachGlobal(Handle<Context> env) {
isolate_->counters()->errors_thrown_per_context()->AddSample(
env->GetErrorsThrown());
@@ -398,43 +406,20 @@ V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateBuiltinSharedFunctionInfo(
return shared;
}
-V8_NOINLINE void InstallFunction(Isolate* isolate, Handle<JSObject> target,
- Handle<Name> property_name,
- Handle<JSFunction> function,
- Handle<String> function_name,
- PropertyAttributes attributes = DONT_ENUM) {
- JSObject::AddProperty(isolate, target, property_name, function, attributes);
-}
-
-V8_NOINLINE void InstallFunction(Isolate* isolate, Handle<JSObject> target,
- Handle<JSFunction> function, Handle<Name> name,
- PropertyAttributes attributes = DONT_ENUM) {
- Handle<String> name_string =
- Name::ToFunctionName(isolate, name).ToHandleChecked();
- InstallFunction(isolate, target, name, function, name_string, attributes);
-}
-
V8_NOINLINE Handle<JSFunction> CreateFunction(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
- int inobject_properties, MaybeHandle<Object> maybe_prototype,
+ int inobject_properties, Handle<Object> prototype,
Builtins::Name builtin_id) {
- Handle<Object> prototype;
Handle<JSFunction> result;
- if (maybe_prototype.ToHandle(&prototype)) {
- NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- name, prototype, type, instance_size, inobject_properties, builtin_id,
- IMMUTABLE);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
+ name, prototype, type, instance_size, inobject_properties, builtin_id,
+ IMMUTABLE);
- result = isolate->factory()->NewFunction(args);
- // Make the JSFunction's prototype object fast.
- JSObject::MakePrototypesFast(handle(result->prototype(), isolate),
- kStartAtReceiver, isolate);
- } else {
- NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
- name, builtin_id, LanguageMode::kStrict);
- result = isolate->factory()->NewFunction(args);
- }
+ result = isolate->factory()->NewFunction(args);
+ // Make the JSFunction's prototype object fast.
+ JSObject::MakePrototypesFast(handle(result->prototype(), isolate),
+ kStartAtReceiver, isolate);
// Make the resulting JSFunction object fast.
JSObject::MakePrototypesFast(result, kStartAtReceiver, isolate);
@@ -442,37 +427,45 @@ V8_NOINLINE Handle<JSFunction> CreateFunction(
return result;
}
+V8_NOINLINE Handle<JSFunction> CreateFunction(
+ Isolate* isolate, const char* name, InstanceType type, int instance_size,
+ int inobject_properties, Handle<Object> prototype,
+ Builtins::Name builtin_id) {
+ return CreateFunction(
+ isolate, isolate->factory()->InternalizeUtf8String(name), type,
+ instance_size, inobject_properties, prototype, builtin_id);
+}
+
V8_NOINLINE Handle<JSFunction> InstallFunction(
- Isolate* isolate, Handle<JSObject> target, Handle<Name> name,
+ Isolate* isolate, Handle<JSObject> target, Handle<String> name,
InstanceType type, int instance_size, int inobject_properties,
- MaybeHandle<Object> maybe_prototype, Builtins::Name call,
- PropertyAttributes attributes) {
- Handle<String> name_string =
- Name::ToFunctionName(isolate, name).ToHandleChecked();
- Handle<JSFunction> function =
- CreateFunction(isolate, name_string, type, instance_size,
- inobject_properties, maybe_prototype, call);
- InstallFunction(isolate, target, name, function, name_string, attributes);
+ Handle<Object> prototype, Builtins::Name call) {
+ Handle<JSFunction> function = CreateFunction(
+ isolate, name, type, instance_size, inobject_properties, prototype, call);
+ JSObject::AddProperty(isolate, target, name, function, DONT_ENUM);
return function;
}
V8_NOINLINE Handle<JSFunction> InstallFunction(
Isolate* isolate, Handle<JSObject> target, const char* name,
InstanceType type, int instance_size, int inobject_properties,
- MaybeHandle<Object> maybe_prototype, Builtins::Name call) {
- PropertyAttributes attributes = DONT_ENUM;
- return InstallFunction(
- isolate, target, isolate->factory()->InternalizeUtf8String(name), type,
- instance_size, inobject_properties, maybe_prototype, call, attributes);
+ Handle<Object> prototype, Builtins::Name call) {
+ return InstallFunction(isolate, target,
+ isolate->factory()->InternalizeUtf8String(name), type,
+ instance_size, inobject_properties, prototype, call);
}
V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
Handle<String> name,
Builtins::Name call,
int len, bool adapt) {
- Handle<JSFunction> fun =
- CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
- MaybeHandle<JSObject>(), call);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
+ name, call, LanguageMode::kStrict);
+ Handle<JSFunction> fun = isolate->factory()->NewFunction(args);
+ // Make the resulting JSFunction object fast.
+ JSObject::MakePrototypesFast(fun, kStartAtReceiver, isolate);
+ fun->shared()->set_native(true);
+
if (adapt) {
fun->shared()->set_internal_formal_parameter_count(len);
} else {
@@ -482,65 +475,54 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
return fun;
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
- Handle<String> function_name, Builtins::Name call, int len, bool adapt,
- PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
+V8_NOINLINE Handle<JSFunction> InstallFunctionWithBuiltinId(
+ Isolate* isolate, Handle<JSObject> base, const char* name,
+ Builtins::Name call, int len, bool adapt, BuiltinFunctionId id) {
+ DCHECK_NE(BuiltinFunctionId::kInvalidBuiltinFunctionId, id);
+ Handle<String> internalized_name =
+ isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> fun =
- SimpleCreateFunction(isolate, function_name, call, len, adapt);
- if (id != BuiltinFunctionId::kInvalidBuiltinFunctionId) {
- fun->shared()->set_builtin_function_id(id);
- }
- InstallFunction(isolate, base, fun, property_name, attrs);
+ SimpleCreateFunction(isolate, internalized_name, call, len, adapt);
+ fun->shared()->set_builtin_function_id(id);
+ JSObject::AddProperty(isolate, base, internalized_name, fun, DONT_ENUM);
return fun;
}
V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Isolate* isolate, Handle<JSObject> base, Handle<String> name,
- Builtins::Name call, int len, bool adapt,
- PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
- return SimpleInstallFunction(isolate, base, name, name, call, len, adapt,
- attrs, id);
-}
-
-V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
- const char* function_name, Builtins::Name call, int len, bool adapt,
- PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
- return SimpleInstallFunction(
- isolate, base, property_name,
- isolate->factory()->InternalizeUtf8String(function_name), call, len,
- adapt, attrs, id);
-}
-
-V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Isolate* isolate, Handle<JSObject> base, const char* name,
Builtins::Name call, int len, bool adapt,
- PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
+ PropertyAttributes attrs = DONT_ENUM) {
// Although function name does not have to be internalized the property name
// will be internalized during property addition anyway, so do it here now.
- return SimpleInstallFunction(isolate, base,
- isolate->factory()->InternalizeUtf8String(name),
- call, len, adapt, attrs, id);
+ Handle<String> internalized_name =
+ isolate->factory()->InternalizeUtf8String(name);
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(isolate, internalized_name, call, len, adapt);
+ JSObject::AddProperty(isolate, base, internalized_name, fun, attrs);
+ return fun;
}
-V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
- Isolate* isolate, Handle<JSObject> base, const char* name,
- Builtins::Name call, int len, bool adapt, BuiltinFunctionId id) {
- return SimpleInstallFunction(isolate, base, name, call, len, adapt, DONT_ENUM,
- id);
+V8_NOINLINE Handle<JSFunction> InstallFunctionAtSymbol(
+ Isolate* isolate, Handle<JSObject> base, Handle<Symbol> symbol,
+ const char* symbol_string, Builtins::Name call, int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM,
+ BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
+ Handle<String> internalized_symbol =
+ isolate->factory()->InternalizeUtf8String(symbol_string);
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(isolate, internalized_symbol, call, len, adapt);
+ if (id != BuiltinFunctionId::kInvalidBuiltinFunctionId) {
+ fun->shared()->set_builtin_function_id(id);
+ }
+ JSObject::AddProperty(isolate, base, symbol, fun, attrs);
+ return fun;
}
V8_NOINLINE void SimpleInstallGetterSetter(Isolate* isolate,
Handle<JSObject> base,
Handle<String> name,
Builtins::Name call_getter,
- Builtins::Name call_setter,
- PropertyAttributes attribs) {
+ Builtins::Name call_setter) {
Handle<String> getter_name =
Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
@@ -553,7 +535,15 @@ V8_NOINLINE void SimpleInstallGetterSetter(Isolate* isolate,
Handle<JSFunction> setter =
SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
- JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
+ JSObject::DefineAccessor(base, name, getter, setter, DONT_ENUM).Check();
+}
+
+void SimpleInstallGetterSetter(Isolate* isolate, Handle<JSObject> base,
+ const char* name, Builtins::Name call_getter,
+ Builtins::Name call_setter) {
+ SimpleInstallGetterSetter(isolate, base,
+ isolate->factory()->InternalizeUtf8String(name),
+ call_getter, call_setter);
}
V8_NOINLINE Handle<JSFunction> SimpleInstallGetter(
@@ -597,6 +587,14 @@ V8_NOINLINE void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
+V8_NOINLINE void InstallTrueValuedProperty(Isolate* isolate,
+ Handle<JSObject> holder,
+ const char* name) {
+ JSObject::AddProperty(isolate, holder,
+ isolate->factory()->InternalizeUtf8String(name),
+ isolate->factory()->true_value(), NONE);
+}
+
V8_NOINLINE void InstallSpeciesGetter(Isolate* isolate,
Handle<JSFunction> constructor) {
Factory* factory = isolate->factory();
@@ -607,6 +605,19 @@ V8_NOINLINE void InstallSpeciesGetter(Isolate* isolate,
true);
}
+V8_NOINLINE void InstallToStringTag(Isolate* isolate, Handle<JSObject> holder,
+ Handle<String> value) {
+ JSObject::AddProperty(isolate, holder,
+ isolate->factory()->to_string_tag_symbol(), value,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+}
+
+void InstallToStringTag(Isolate* isolate, Handle<JSObject> holder,
+ const char* value) {
+ InstallToStringTag(isolate, holder,
+ isolate->factory()->InternalizeUtf8String(value));
+}
+
} // namespace
Handle<JSFunction> Genesis::CreateEmptyFunction() {
@@ -763,8 +774,7 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
// --- O b j e c t ---
int inobject_properties = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
- int instance_size =
- JSObject::kHeaderSize + kPointerSize * inobject_properties;
+ int instance_size = JSObject::kHeaderSize + kTaggedSize * inobject_properties;
Handle<JSFunction> object_fun = CreateFunction(
isolate_, factory->Object_string(), JS_OBJECT_TYPE, instance_size,
@@ -775,7 +785,7 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
{
// Finish setting up Object function's initial map.
- Map* initial_map = object_fun->initial_map();
+ Map initial_map = object_fun->initial_map();
initial_map->set_elements_kind(HOLEY_ELEMENTS);
}
@@ -827,7 +837,7 @@ Handle<Map> CreateNonConstructorMap(Isolate* isolate, Handle<Map> source_map,
// Re-set the unused property fields after changing the instance size.
// TODO(ulan): Do not change instance size after map creation.
int unused_property_fields = map->UnusedPropertyFields();
- map->set_instance_size(map->instance_size() + kPointerSize);
+ map->set_instance_size(map->instance_size() + kTaggedSize);
// The prototype slot shifts the in-object properties area by one slot.
map->SetInObjectPropertiesStartInWords(
map->GetInObjectPropertiesStartInWords() + 1);
@@ -846,9 +856,9 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSObject> iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(isolate(), iterator_prototype,
- factory()->iterator_symbol(), "[Symbol.iterator]",
- Builtins::kReturnReceiver, 0, true);
+ InstallFunctionAtSymbol(isolate(), iterator_prototype,
+ factory()->iterator_symbol(), "[Symbol.iterator]",
+ Builtins::kReturnReceiver, 0, true);
native_context()->set_initial_iterator_prototype(*iterator_prototype);
Handle<JSObject> generator_object_prototype =
@@ -860,10 +870,8 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::ForceSetPrototype(generator_function_prototype, empty);
- JSObject::AddProperty(isolate(), generator_function_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("GeneratorFunction"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), generator_function_prototype,
+ "GeneratorFunction");
JSObject::AddProperty(isolate(), generator_function_prototype,
factory()->prototype_string(),
generator_object_prototype,
@@ -873,10 +881,7 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
factory()->constructor_string(),
generator_function_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(isolate(), generator_object_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("Generator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), generator_object_prototype, "Generator");
SimpleInstallFunction(isolate(), generator_object_prototype, "next",
Builtins::kGeneratorPrototypeNext, 1, false);
SimpleInstallFunction(isolate(), generator_object_prototype, "return",
@@ -934,7 +939,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
Handle<JSObject> async_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(
+ InstallFunctionAtSymbol(
isolate(), async_iterator_prototype, factory()->async_iterator_symbol(),
"[Symbol.asyncIterator]", Builtins::kReturnReceiver, 0, true);
@@ -942,21 +947,17 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
// proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
Handle<JSObject> async_from_sync_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype,
- factory()->next_string(),
+ SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "next",
Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
- SimpleInstallFunction(
- isolate(), async_from_sync_iterator_prototype, factory()->return_string(),
- Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1, true);
- SimpleInstallFunction(
- isolate(), async_from_sync_iterator_prototype, factory()->throw_string(),
- Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1, true);
+ SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "return",
+ Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1,
+ true);
+ SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "throw",
+ Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1,
+ true);
- JSObject::AddProperty(
- isolate(), async_from_sync_iterator_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("Async-from-Sync Iterator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), async_from_sync_iterator_prototype,
+ "Async-from-Sync Iterator");
JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
async_iterator_prototype);
@@ -969,9 +970,6 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
*async_from_sync_iterator_map);
// Async Generators
- Handle<String> AsyncGeneratorFunction_string =
- factory()->NewStringFromAsciiChecked("AsyncGeneratorFunction", TENURED);
-
Handle<JSObject> async_generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSObject> async_generator_function_prototype =
@@ -992,10 +990,8 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
factory()->constructor_string(),
async_generator_function_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(isolate(), async_generator_function_prototype,
- factory()->to_string_tag_symbol(),
- AsyncGeneratorFunction_string,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), async_generator_function_prototype,
+ "AsyncGeneratorFunction");
// %AsyncGeneratorPrototype%
JSObject::ForceSetPrototype(async_generator_object_prototype,
@@ -1003,10 +999,8 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
native_context()->set_initial_async_generator_prototype(
*async_generator_object_prototype);
- JSObject::AddProperty(isolate(), async_generator_object_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("AsyncGenerator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), async_generator_object_prototype,
+ "AsyncGenerator");
SimpleInstallFunction(isolate(), async_generator_object_prototype, "next",
Builtins::kAsyncGeneratorPrototypeNext, 1, false);
SimpleInstallFunction(isolate(), async_generator_object_prototype, "return",
@@ -1058,30 +1052,27 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::ForceSetPrototype(async_function_prototype, empty);
- JSObject::AddProperty(isolate(), async_function_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("AsyncFunction"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), async_function_prototype, "AsyncFunction");
- Handle<Map> map;
- map = CreateNonConstructorMap(
- isolate(), isolate()->strict_function_without_prototype_map(),
- async_function_prototype, "AsyncFunction");
+ Handle<Map> map =
+ Map::Copy(isolate(), isolate()->strict_function_without_prototype_map(),
+ "AsyncFunction");
+ Map::SetPrototype(isolate(), map, async_function_prototype);
native_context()->set_async_function_map(*map);
- map = CreateNonConstructorMap(isolate(), isolate()->method_with_name_map(),
- async_function_prototype,
- "AsyncFunction with name");
+ map = Map::Copy(isolate(), isolate()->method_with_name_map(),
+ "AsyncFunction with name");
+ Map::SetPrototype(isolate(), map, async_function_prototype);
native_context()->set_async_function_with_name_map(*map);
- map = CreateNonConstructorMap(
- isolate(), isolate()->method_with_home_object_map(),
- async_function_prototype, "AsyncFunction with home object");
+ map = Map::Copy(isolate(), isolate()->method_with_home_object_map(),
+ "AsyncFunction with home object");
+ Map::SetPrototype(isolate(), map, async_function_prototype);
native_context()->set_async_function_with_home_object_map(*map);
- map = CreateNonConstructorMap(
- isolate(), isolate()->method_with_name_and_home_object_map(),
- async_function_prototype, "AsyncFunction with name and home object");
+ map = Map::Copy(isolate(), isolate()->method_with_name_and_home_object_map(),
+ "AsyncFunction with name and home object");
+ Map::SetPrototype(isolate(), map, async_function_prototype);
native_context()->set_async_function_with_name_and_home_object_map(*map);
}
@@ -1116,13 +1107,13 @@ void Genesis::CreateJSProxyMaps() {
Descriptor d = Descriptor::DataField(isolate(), factory()->proxy_string(),
JSProxyRevocableResult::kProxyIndex,
NONE, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // revoke
Descriptor d = Descriptor::DataField(
isolate(), factory()->revoke_string(),
JSProxyRevocableResult::kRevokeIndex, NONE, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
@@ -1136,7 +1127,7 @@ namespace {
void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
int idx = descriptors->SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors->Replace(idx, &d);
@@ -1157,14 +1148,14 @@ void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
accessors);
}
-static void AddToWeakNativeContextList(Isolate* isolate, Context* context) {
+static void AddToWeakNativeContextList(Isolate* isolate, Context context) {
DCHECK(context->IsNativeContext());
Heap* heap = isolate->heap();
#ifdef DEBUG
{ // NOLINT
DCHECK(context->next_context_link()->IsUndefined(isolate));
// Check that context is not in the list yet.
- for (Object* current = heap->native_contexts_list();
+ for (Object current = heap->native_contexts_list();
!current->IsUndefined(isolate);
current = Context::cast(current)->next_context_link()) {
DCHECK(current != context);
@@ -1238,7 +1229,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()), isolate());
- Handle<Object> proto_template(global_constructor->prototype_template(),
+ Handle<Object> proto_template(global_constructor->GetPrototypeTemplate(),
isolate());
if (!proto_template->IsUndefined(isolate())) {
js_global_object_template =
@@ -1361,7 +1352,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<JSFunction> error_fun = InstallFunction(
isolate, global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
- factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
+ factory->the_hole_value(), Builtins::kErrorConstructor);
error_fun->shared()->DontAdaptArguments();
error_fun->shared()->set_length(1);
@@ -1384,15 +1375,15 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
if (context_index == Context::ERROR_FUNCTION_INDEX) {
Handle<JSFunction> to_string_fun =
- SimpleInstallFunction(isolate, prototype, factory->toString_string(),
+ SimpleInstallFunction(isolate, prototype, "toString",
Builtins::kErrorPrototypeToString, 0, true);
isolate->native_context()->set_error_to_string(*to_string_fun);
isolate->native_context()->set_initial_error_prototype(*prototype);
} else {
DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
- InstallFunction(isolate, prototype, isolate->error_to_string(),
- factory->toString_string(), DONT_ENUM);
+ JSObject::AddProperty(isolate, prototype, factory->toString_string(),
+ isolate->error_to_string(), DONT_ENUM);
Handle<JSFunction> global_error = isolate->error_function();
CHECK(JSReceiver::SetPrototype(error_fun, global_error, false,
@@ -1412,7 +1403,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<AccessorInfo> info = factory->error_stack_accessor();
Descriptor d = Descriptor::AccessorConstant(handle(info->name(), isolate),
info, DONT_ENUM);
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate, &d);
}
}
@@ -1433,12 +1424,11 @@ void InstallMakeError(Isolate* isolate, int builtin_id, int context_index) {
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
- Handle<JSFunction> empty_function,
- GlobalContextType context_type) {
+ Handle<JSFunction> empty_function) {
// --- N a t i v e C o n t e x t ---
// Use the empty scope info.
native_context()->set_scope_info(empty_function->shared()->scope_info());
- native_context()->set_previous(nullptr);
+ native_context()->set_previous(Context());
// Set extension and global object.
native_context()->set_extension(*global_object);
// Security setup: Set the security token of the native context to the global
@@ -1459,12 +1449,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(isolate_, global_object, object_name, object_function,
DONT_ENUM);
- SimpleInstallFunction(isolate_, object_function, factory->assign_string(),
+ SimpleInstallFunction(isolate_, object_function, "assign",
Builtins::kObjectAssign, 2, false);
SimpleInstallFunction(isolate_, object_function, "getOwnPropertyDescriptor",
Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
SimpleInstallFunction(isolate_, object_function,
- factory->getOwnPropertyDescriptors_string(),
+ "getOwnPropertyDescriptors",
Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
SimpleInstallFunction(isolate_, object_function, "getOwnPropertyNames",
Builtins::kObjectGetOwnPropertyNames, 1, true);
@@ -1478,8 +1468,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kObjectSeal, 1, false);
Handle<JSFunction> object_create = SimpleInstallFunction(
- isolate_, object_function, factory->create_string(),
- Builtins::kObjectCreate, 2, false);
+ isolate_, object_function, "create", Builtins::kObjectCreate, 2, false);
native_context()->set_object_create(*object_create);
Handle<JSFunction> object_define_properties =
@@ -1487,9 +1476,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kObjectDefineProperties, 2, true);
native_context()->set_object_define_properties(*object_define_properties);
- Handle<JSFunction> object_define_property = SimpleInstallFunction(
- isolate_, object_function, factory->defineProperty_string(),
- Builtins::kObjectDefineProperty, 3, true);
+ Handle<JSFunction> object_define_property =
+ SimpleInstallFunction(isolate_, object_function, "defineProperty",
+ Builtins::kObjectDefineProperty, 3, true);
native_context()->set_object_define_property(*object_define_property);
SimpleInstallFunction(isolate_, object_function, "freeze",
@@ -1515,9 +1504,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> object_keys = SimpleInstallFunction(
isolate_, object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
- SimpleInstallFunction(isolate_, object_function, factory->entries_string(),
+ SimpleInstallFunction(isolate_, object_function, "entries",
Builtins::kObjectEntries, 1, true);
- SimpleInstallFunction(isolate_, object_function, factory->values_string(),
+ SimpleInstallFunction(isolate_, object_function, "values",
Builtins::kObjectValues, 1, true);
SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
@@ -1541,20 +1530,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(
isolate_, isolate_->initial_object_prototype(), "propertyIsEnumerable",
Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
- Handle<JSFunction> object_to_string =
- SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
- factory->toString_string(),
- Builtins::kObjectPrototypeToString, 0, true);
+ Handle<JSFunction> object_to_string = SimpleInstallFunction(
+ isolate_, isolate_->initial_object_prototype(), "toString",
+ Builtins::kObjectPrototypeToString, 0, true);
native_context()->set_object_to_string(*object_to_string);
- Handle<JSFunction> object_value_of = SimpleInstallFunction(
- isolate_, isolate_->initial_object_prototype(), "valueOf",
- Builtins::kObjectPrototypeValueOf, 0, true);
- native_context()->set_object_value_of(*object_value_of);
+ SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
+ "valueOf", Builtins::kObjectPrototypeValueOf, 0,
+ true);
- SimpleInstallGetterSetter(isolate_, isolate_->initial_object_prototype(),
- factory->proto_string(),
- Builtins::kObjectPrototypeGetProto,
- Builtins::kObjectPrototypeSetProto, DONT_ENUM);
+ SimpleInstallGetterSetter(
+ isolate_, isolate_->initial_object_prototype(), factory->proto_string(),
+ Builtins::kObjectPrototypeGetProto, Builtins::kObjectPrototypeSetProto);
SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
"toLocaleString",
@@ -1580,17 +1566,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup the methods on the %FunctionPrototype%.
JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
function_fun, DONT_ENUM);
- SimpleInstallFunction(isolate_, prototype, factory->apply_string(),
+ SimpleInstallFunction(isolate_, prototype, "apply",
Builtins::kFunctionPrototypeApply, 2, false);
- SimpleInstallFunction(isolate_, prototype, factory->bind_string(),
+ SimpleInstallFunction(isolate_, prototype, "bind",
Builtins::kFastFunctionPrototypeBind, 1, false);
- SimpleInstallFunction(isolate_, prototype, factory->call_string(),
+ SimpleInstallFunction(isolate_, prototype, "call",
Builtins::kFunctionPrototypeCall, 1, false);
- SimpleInstallFunction(isolate_, prototype, factory->toString_string(),
+ SimpleInstallFunction(isolate_, prototype, "toString",
Builtins::kFunctionPrototypeToString, 0, false);
// Install the @@hasInstance function.
- Handle<JSFunction> has_instance = SimpleInstallFunction(
+ Handle<JSFunction> has_instance = InstallFunctionAtSymbol(
isolate_, prototype, factory->has_instance_symbol(),
"[Symbol.hasInstance]", Builtins::kFunctionPrototypeHasInstance, 1,
true,
@@ -1625,16 +1611,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- A s y n c G e n e r a t o r ---
- Handle<JSFunction> await_caught =
- SimpleCreateFunction(isolate_, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitCaught, 1, false);
- native_context()->set_async_generator_await_caught(*await_caught);
-
- Handle<JSFunction> await_uncaught =
- SimpleCreateFunction(isolate_, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
- native_context()->set_async_generator_await_uncaught(*await_uncaught);
-
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
isolate_, Builtins::kAsyncGeneratorAwaitResolveClosure,
factory->empty_string(), 1);
@@ -1668,6 +1644,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
*info);
}
+ Handle<JSFunction> array_prototype_to_string_fun;
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
@@ -1694,7 +1671,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
factory->length_string(), factory->array_length_accessor(), attribs);
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
InstallWithIntrinsicDefaultProto(isolate_, array_function,
@@ -1751,23 +1728,37 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kArrayPrototypeSlice, 2, false);
SimpleInstallFunction(isolate_, proto, "sort",
Builtins::kArrayPrototypeSort, 1, false);
- SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice, 2,
- false);
+ SimpleInstallFunction(isolate_, proto, "splice",
+ Builtins::kArrayPrototypeSplice, 2, false);
SimpleInstallFunction(isolate_, proto, "includes", Builtins::kArrayIncludes,
1, false);
SimpleInstallFunction(isolate_, proto, "indexOf", Builtins::kArrayIndexOf,
1, false);
- SimpleInstallFunction(isolate_, proto, "keys",
- Builtins::kArrayPrototypeKeys, 0, true,
- BuiltinFunctionId::kArrayKeys);
- SimpleInstallFunction(isolate_, proto, "entries",
- Builtins::kArrayPrototypeEntries, 0, true,
- BuiltinFunctionId::kArrayEntries);
- SimpleInstallFunction(isolate_, proto, factory->iterator_symbol(), "values",
- Builtins::kArrayPrototypeValues, 0, true, DONT_ENUM,
- BuiltinFunctionId::kArrayValues);
- SimpleInstallFunction(isolate_, proto, "forEach", Builtins::kArrayForEach,
- 1, false);
+ SimpleInstallFunction(isolate_, proto, "join",
+ Builtins::kArrayPrototypeJoin, 1, false);
+
+ { // Set up iterator-related properties.
+ Handle<JSFunction> keys = InstallFunctionWithBuiltinId(
+ isolate_, proto, "keys", Builtins::kArrayPrototypeKeys, 0, true,
+ BuiltinFunctionId::kArrayKeys);
+ native_context()->set_array_keys_iterator(*keys);
+
+ Handle<JSFunction> entries = InstallFunctionWithBuiltinId(
+ isolate_, proto, "entries", Builtins::kArrayPrototypeEntries, 0, true,
+ BuiltinFunctionId::kArrayEntries);
+ native_context()->set_array_entries_iterator(*entries);
+
+ Handle<JSFunction> values = InstallFunctionWithBuiltinId(
+ isolate_, proto, "values", Builtins::kArrayPrototypeValues, 0, true,
+ BuiltinFunctionId::kArrayValues);
+ JSObject::AddProperty(isolate_, proto, factory->iterator_symbol(), values,
+ DONT_ENUM);
+ native_context()->set_array_values_iterator(*values);
+ }
+
+ Handle<JSFunction> for_each_fun = SimpleInstallFunction(
+ isolate_, proto, "forEach", Builtins::kArrayForEach, 1, false);
+ native_context()->set_array_for_each_iterator(*for_each_fun);
SimpleInstallFunction(isolate_, proto, "filter", Builtins::kArrayFilter, 1,
false);
SimpleInstallFunction(isolate_, proto, "map", Builtins::kArrayMap, 1,
@@ -1780,6 +1771,30 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false);
SimpleInstallFunction(isolate_, proto, "reduceRight",
Builtins::kArrayReduceRight, 1, false);
+ SimpleInstallFunction(isolate_, proto, "toLocaleString",
+ Builtins::kArrayPrototypeToLocaleString, 0, false);
+ array_prototype_to_string_fun =
+ SimpleInstallFunction(isolate_, proto, "toString",
+ Builtins::kArrayPrototypeToString, 0, false);
+
+ Handle<JSObject> unscopables = factory->NewJSObjectWithNullProto();
+ InstallTrueValuedProperty(isolate_, unscopables, "copyWithin");
+ InstallTrueValuedProperty(isolate_, unscopables, "entries");
+ InstallTrueValuedProperty(isolate_, unscopables, "fill");
+ InstallTrueValuedProperty(isolate_, unscopables, "find");
+ InstallTrueValuedProperty(isolate_, unscopables, "findIndex");
+ InstallTrueValuedProperty(isolate_, unscopables, "flat");
+ InstallTrueValuedProperty(isolate_, unscopables, "flatMap");
+ InstallTrueValuedProperty(isolate_, unscopables, "includes");
+ InstallTrueValuedProperty(isolate_, unscopables, "keys");
+ InstallTrueValuedProperty(isolate_, unscopables, "values");
+ JSObject::MigrateSlowToFast(unscopables, 0, "Bootstrapping");
+ JSObject::AddProperty(
+ isolate_, proto, factory->unscopables_symbol(), unscopables,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<Map> map(proto->map(), isolate_);
+ Map::SetShouldBeFastPrototypeMap(map, true, isolate_);
}
{ // --- A r r a y I t e r a t o r ---
@@ -1790,14 +1805,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), TENURED);
JSObject::ForceSetPrototype(array_iterator_prototype, iterator_prototype);
- JSObject::AddProperty(
- isolate_, array_iterator_prototype, factory->to_string_tag_symbol(),
- factory->ArrayIterator_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, array_iterator_prototype,
+ factory->ArrayIterator_string());
- SimpleInstallFunction(isolate_, array_iterator_prototype, "next",
- Builtins::kArrayIteratorPrototypeNext, 0, true,
- BuiltinFunctionId::kArrayIteratorNext);
+ InstallFunctionWithBuiltinId(isolate_, array_iterator_prototype, "next",
+ Builtins::kArrayIteratorPrototypeNext, 0, true,
+ BuiltinFunctionId::kArrayIteratorNext);
Handle<JSFunction> array_iterator_function =
CreateFunction(isolate_, factory->ArrayIterator_string(),
@@ -1861,72 +1874,39 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> parse_float_fun =
SimpleInstallFunction(isolate_, number_fun, "parseFloat",
Builtins::kNumberParseFloat, 1, true);
- JSObject::AddProperty(isolate_, global_object,
- factory->InternalizeUtf8String("parseFloat"),
+ JSObject::AddProperty(isolate_, global_object, "parseFloat",
parse_float_fun, DONT_ENUM);
// Install Number.parseInt and Global.parseInt.
Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
isolate_, number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
- JSObject::AddProperty(isolate_, global_object,
- factory->InternalizeUtf8String("parseInt"),
- parse_int_fun, DONT_ENUM);
+ JSObject::AddProperty(isolate_, global_object, "parseInt", parse_int_fun,
+ DONT_ENUM);
// Install Number constants
- double kMaxValue = 1.7976931348623157e+308;
- double kMinValue = 5e-324;
- double kMinSafeInteger = -kMaxSafeInteger;
- double kEPS = 2.220446049250313e-16;
-
- Handle<Object> infinity = factory->infinity_value();
- Handle<Object> nan = factory->nan_value();
- Handle<String> nan_name = factory->InternalizeUtf8String("NaN");
-
- JSObject::AddProperty(
- isolate_, number_fun, factory->InternalizeUtf8String("MAX_VALUE"),
- factory->NewNumber(kMaxValue),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun, factory->InternalizeUtf8String("MIN_VALUE"),
- factory->NewNumber(kMinValue),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun, nan_name, nan,
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun,
- factory->InternalizeUtf8String("NEGATIVE_INFINITY"),
- factory->NewNumber(-V8_INFINITY),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun,
- factory->InternalizeUtf8String("POSITIVE_INFINITY"), infinity,
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun,
- factory->InternalizeUtf8String("MAX_SAFE_INTEGER"),
- factory->NewNumber(kMaxSafeInteger),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun,
- factory->InternalizeUtf8String("MIN_SAFE_INTEGER"),
- factory->NewNumber(kMinSafeInteger),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, number_fun, factory->InternalizeUtf8String("EPSILON"),
- factory->NewNumber(kEPS),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-
- JSObject::AddProperty(
- isolate_, global, factory->InternalizeUtf8String("Infinity"), infinity,
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, global, nan_name, nan,
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- isolate_, global, factory->InternalizeUtf8String("undefined"),
- factory->undefined_value(),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ const double kMaxValue = 1.7976931348623157e+308;
+ const double kMinValue = 5e-324;
+ const double kMinSafeInteger = -kMaxSafeInteger;
+ const double kEPS = 2.220446049250313e-16;
+
+ InstallConstant(isolate_, number_fun, "MAX_VALUE",
+ factory->NewNumber(kMaxValue));
+ InstallConstant(isolate_, number_fun, "MIN_VALUE",
+ factory->NewNumber(kMinValue));
+ InstallConstant(isolate_, number_fun, "NaN", factory->nan_value());
+ InstallConstant(isolate_, number_fun, "NEGATIVE_INFINITY",
+ factory->NewNumber(-V8_INFINITY));
+ InstallConstant(isolate_, number_fun, "POSITIVE_INFINITY",
+ factory->infinity_value());
+ InstallConstant(isolate_, number_fun, "MAX_SAFE_INTEGER",
+ factory->NewNumber(kMaxSafeInteger));
+ InstallConstant(isolate_, number_fun, "MIN_SAFE_INTEGER",
+ factory->NewNumber(kMinSafeInteger));
+ InstallConstant(isolate_, number_fun, "EPSILON", factory->NewNumber(kEPS));
+
+ InstallConstant(isolate_, global, "Infinity", factory->infinity_value());
+ InstallConstant(isolate_, global, "NaN", factory->nan_value());
+ InstallConstant(isolate_, global, "undefined", factory->undefined_value());
}
{ // --- B o o l e a n ---
@@ -1977,7 +1957,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
factory->length_string(), factory->string_length_accessor(), attribs);
- string_map->AppendDescriptor(&d);
+ string_map->AppendDescriptor(isolate(), &d);
}
// Install the String.fromCharCode function.
@@ -2091,17 +2071,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> trim_start_fun =
SimpleInstallFunction(isolate_, prototype, "trimStart",
Builtins::kStringPrototypeTrimStart, 0, false);
- JSObject::AddProperty(isolate_, prototype,
- factory->InternalizeUtf8String("trimLeft"),
- trim_start_fun, DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, "trimLeft", trim_start_fun,
+ DONT_ENUM);
// Install `String.prototype.trimEnd` with `trimRight` alias.
Handle<JSFunction> trim_end_fun =
SimpleInstallFunction(isolate_, prototype, "trimEnd",
Builtins::kStringPrototypeTrimEnd, 0, false);
- JSObject::AddProperty(isolate_, prototype,
- factory->InternalizeUtf8String("trimRight"),
- trim_end_fun, DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, "trimRight", trim_end_fun,
+ DONT_ENUM);
SimpleInstallFunction(isolate_, prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
@@ -2123,10 +2101,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
- SimpleInstallFunction(isolate_, prototype, factory->iterator_symbol(),
- "[Symbol.iterator]",
- Builtins::kStringPrototypeIterator, 0, true,
- DONT_ENUM, BuiltinFunctionId::kStringIterator);
+ InstallFunctionAtSymbol(isolate_, prototype, factory->iterator_symbol(),
+ "[Symbol.iterator]",
+ Builtins::kStringPrototypeIterator, 0, true,
+ DONT_ENUM, BuiltinFunctionId::kStringIterator);
}
{ // --- S t r i n g I t e r a t o r ---
@@ -2137,14 +2115,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), TENURED);
JSObject::ForceSetPrototype(string_iterator_prototype, iterator_prototype);
- JSObject::AddProperty(
- isolate_, string_iterator_prototype, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("String Iterator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, string_iterator_prototype, "String Iterator");
- SimpleInstallFunction(isolate_, string_iterator_prototype, "next",
- Builtins::kStringIteratorPrototypeNext, 0, true,
- BuiltinFunctionId::kStringIteratorNext);
+ InstallFunctionWithBuiltinId(isolate_, string_iterator_prototype, "next",
+ Builtins::kStringIteratorPrototypeNext, 0,
+ true, BuiltinFunctionId::kStringIteratorNext);
Handle<JSFunction> string_iterator_function = CreateFunction(
isolate_, factory->InternalizeUtf8String("StringIterator"),
@@ -2198,32 +2173,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(JSObject::cast(symbol_fun->instance_prototype()),
isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("Symbol"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, "Symbol");
// Install the Symbol.prototype methods.
- SimpleInstallFunction(isolate_, prototype, "toString",
- Builtins::kSymbolPrototypeToString, 0, true,
- BuiltinFunctionId::kSymbolPrototypeToString);
- SimpleInstallFunction(isolate_, prototype, "valueOf",
- Builtins::kSymbolPrototypeValueOf, 0, true,
- BuiltinFunctionId::kSymbolPrototypeValueOf);
+ InstallFunctionWithBuiltinId(isolate_, prototype, "toString",
+ Builtins::kSymbolPrototypeToString, 0, true,
+ BuiltinFunctionId::kSymbolPrototypeToString);
+ InstallFunctionWithBuiltinId(isolate_, prototype, "valueOf",
+ Builtins::kSymbolPrototypeValueOf, 0, true,
+ BuiltinFunctionId::kSymbolPrototypeValueOf);
// Install the @@toPrimitive function.
- Handle<JSFunction> to_primitive = InstallFunction(
- isolate_, prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
- Builtins::kSymbolPrototypeToPrimitive,
+ InstallFunctionAtSymbol(
+ isolate_, prototype, factory->to_primitive_symbol(),
+ "[Symbol.toPrimitive]", Builtins::kSymbolPrototypeToPrimitive, 1, true,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- // Set the expected parameters for @@toPrimitive to 1; required by builtin.
- to_primitive->shared()->set_internal_formal_parameter_count(1);
-
- // Set the length for the function to satisfy ECMA-262.
- to_primitive->shared()->set_length(1);
}
{ // --- D a t e ---
@@ -2259,8 +2223,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> to_utc_string =
SimpleInstallFunction(isolate_, prototype, "toUTCString",
Builtins::kDatePrototypeToUTCString, 0, false);
- InstallFunction(isolate_, prototype, to_utc_string,
- factory->InternalizeUtf8String("toGMTString"), DONT_ENUM);
+ JSObject::AddProperty(isolate_, prototype, "toGMTString", to_utc_string,
+ DONT_ENUM);
SimpleInstallFunction(isolate_, prototype, "getDate",
Builtins::kDatePrototypeGetDate, 0, true);
SimpleInstallFunction(isolate_, prototype, "setDate",
@@ -2354,17 +2318,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
#endif // V8_INTL_SUPPORT
// Install the @@toPrimitive function.
- Handle<JSFunction> to_primitive = InstallFunction(
- isolate_, prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, 0, MaybeHandle<JSObject>(),
- Builtins::kDatePrototypeToPrimitive,
+ InstallFunctionAtSymbol(
+ isolate_, prototype, factory->to_primitive_symbol(),
+ "[Symbol.toPrimitive]", Builtins::kDatePrototypeToPrimitive, 1, true,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- // Set the expected parameters for @@toPrimitive to 1; required by builtin.
- to_primitive->shared()->set_internal_formal_parameter_count(1);
-
- // Set the length for the function to satisfy ECMA-262.
- to_primitive->shared()->set_length(1);
}
{
@@ -2388,42 +2345,43 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, promise_fun);
- SimpleInstallFunction(isolate_, promise_fun, "all", Builtins::kPromiseAll,
- 1, true);
+ Handle<JSFunction> promise_all = InstallFunctionWithBuiltinId(
+ isolate_, promise_fun, "all", Builtins::kPromiseAll, 1, true,
+ BuiltinFunctionId::kPromiseAll);
+ native_context()->set_promise_all(*promise_all);
- SimpleInstallFunction(isolate_, promise_fun, "race", Builtins::kPromiseRace,
- 1, true);
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "race",
+ Builtins::kPromiseRace, 1, true,
+ BuiltinFunctionId::kPromiseRace);
- SimpleInstallFunction(isolate_, promise_fun, "resolve",
- Builtins::kPromiseResolveTrampoline, 1, true);
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "resolve",
+ Builtins::kPromiseResolveTrampoline, 1, true,
+ BuiltinFunctionId::kPromiseResolve);
- SimpleInstallFunction(isolate_, promise_fun, "reject",
- Builtins::kPromiseReject, 1, true);
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "reject",
+ Builtins::kPromiseReject, 1, true,
+ BuiltinFunctionId::kPromiseReject);
// Setup %PromisePrototype%.
Handle<JSObject> prototype(
JSObject::cast(promise_fun->instance_prototype()), isolate());
native_context()->set_promise_prototype(*prototype);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Promise_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Promise_string());
- Handle<JSFunction> promise_then = SimpleInstallFunction(
- isolate_, prototype, isolate_->factory()->then_string(),
- Builtins::kPromisePrototypeThen, 2, true);
+ Handle<JSFunction> promise_then = InstallFunctionWithBuiltinId(
+ isolate_, prototype, "then", Builtins::kPromisePrototypeThen, 2, true,
+ BuiltinFunctionId::kPromisePrototypeThen);
native_context()->set_promise_then(*promise_then);
- Handle<JSFunction> promise_catch =
- SimpleInstallFunction(isolate_, prototype, "catch",
- Builtins::kPromisePrototypeCatch, 1, true);
+ Handle<JSFunction> promise_catch = InstallFunctionWithBuiltinId(
+ isolate_, prototype, "catch", Builtins::kPromisePrototypeCatch, 1, true,
+ BuiltinFunctionId::kPromisePrototypeCatch);
native_context()->set_promise_catch(*promise_catch);
- SimpleInstallFunction(isolate_, prototype, "finally",
- Builtins::kPromisePrototypeFinally, 1, true,
- DONT_ENUM);
+ InstallFunctionWithBuiltinId(isolate_, prototype, "finally",
+ Builtins::kPromisePrototypeFinally, 1, true,
+ BuiltinFunctionId::kPromisePrototypeFinally);
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
@@ -2514,7 +2472,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun = InstallFunction(
isolate_, global, "RegExp", JS_REGEXP_TYPE,
- JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize,
+ JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize,
JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
Builtins::kRegExpConstructor);
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
@@ -2528,11 +2486,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup %RegExpPrototype%.
Handle<JSObject> prototype(
JSObject::cast(regexp_fun->instance_prototype()), isolate());
+ native_context()->set_regexp_prototype(*prototype);
{
- Handle<JSFunction> fun = SimpleInstallFunction(
- isolate_, prototype, factory->exec_string(),
- Builtins::kRegExpPrototypeExec, 1, true, DONT_ENUM);
+ Handle<JSFunction> fun =
+ SimpleInstallFunction(isolate_, prototype, "exec",
+ Builtins::kRegExpPrototypeExec, 1, true);
+ // Check that index of "exec" function in JSRegExp is correct.
+ DCHECK_EQ(JSRegExp::kExecFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
native_context()->set_regexp_exec_function(*fun);
}
@@ -2554,29 +2517,35 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeUnicodeGetter, true);
SimpleInstallFunction(isolate_, prototype, "compile",
- Builtins::kRegExpPrototypeCompile, 2, true,
- DONT_ENUM);
- SimpleInstallFunction(isolate_, prototype, factory->toString_string(),
- Builtins::kRegExpPrototypeToString, 0, false,
- DONT_ENUM);
+ Builtins::kRegExpPrototypeCompile, 2, true);
+ SimpleInstallFunction(isolate_, prototype, "toString",
+ Builtins::kRegExpPrototypeToString, 0, false);
SimpleInstallFunction(isolate_, prototype, "test",
- Builtins::kRegExpPrototypeTest, 1, true, DONT_ENUM);
-
- SimpleInstallFunction(isolate_, prototype, factory->match_symbol(),
- "[Symbol.match]", Builtins::kRegExpPrototypeMatch,
- 1, true);
-
- SimpleInstallFunction(isolate_, prototype, factory->replace_symbol(),
- "[Symbol.replace]",
- Builtins::kRegExpPrototypeReplace, 2, false);
-
- SimpleInstallFunction(isolate_, prototype, factory->search_symbol(),
- "[Symbol.search]", Builtins::kRegExpPrototypeSearch,
- 1, true);
-
- SimpleInstallFunction(isolate_, prototype, factory->split_symbol(),
- "[Symbol.split]", Builtins::kRegExpPrototypeSplit,
- 2, false);
+ Builtins::kRegExpPrototypeTest, 1, true);
+
+ InstallFunctionAtSymbol(isolate_, prototype, factory->match_symbol(),
+ "[Symbol.match]", Builtins::kRegExpPrototypeMatch,
+ 1, true);
+ DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
+ InstallFunctionAtSymbol(isolate_, prototype, factory->replace_symbol(),
+ "[Symbol.replace]",
+ Builtins::kRegExpPrototypeReplace, 2, false);
+ DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
+ InstallFunctionAtSymbol(isolate_, prototype, factory->search_symbol(),
+ "[Symbol.search]",
+ Builtins::kRegExpPrototypeSearch, 1, true);
+ DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
+ InstallFunctionAtSymbol(isolate_, prototype, factory->split_symbol(),
+ "[Symbol.split]", Builtins::kRegExpPrototypeSplit,
+ 2, false);
+ DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
Handle<Map> prototype_map(prototype->map(), isolate());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
@@ -2593,50 +2562,45 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Static properties set by a successful match.
- const PropertyAttributes no_enum = DONT_ENUM;
SimpleInstallGetterSetter(isolate_, regexp_fun, factory->input_string(),
Builtins::kRegExpInputGetter,
- Builtins::kRegExpInputSetter, no_enum);
- SimpleInstallGetterSetter(
- isolate_, regexp_fun, factory->InternalizeUtf8String("$_"),
- Builtins::kRegExpInputGetter, Builtins::kRegExpInputSetter, no_enum);
-
- SimpleInstallGetterSetter(
- isolate_, regexp_fun, factory->InternalizeUtf8String("lastMatch"),
- Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(
- isolate_, regexp_fun, factory->InternalizeUtf8String("$&"),
- Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
-
- SimpleInstallGetterSetter(
- isolate_, regexp_fun, factory->InternalizeUtf8String("lastParen"),
- Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(
- isolate_, regexp_fun, factory->InternalizeUtf8String("$+"),
- Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
-
- SimpleInstallGetterSetter(isolate_, regexp_fun,
- factory->InternalizeUtf8String("leftContext"),
+ Builtins::kRegExpInputSetter);
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$_",
+ Builtins::kRegExpInputGetter,
+ Builtins::kRegExpInputSetter);
+
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "lastMatch",
+ Builtins::kRegExpLastMatchGetter,
+ Builtins::kEmptyFunction);
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$&",
+ Builtins::kRegExpLastMatchGetter,
+ Builtins::kEmptyFunction);
+
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "lastParen",
+ Builtins::kRegExpLastParenGetter,
+ Builtins::kEmptyFunction);
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$+",
+ Builtins::kRegExpLastParenGetter,
+ Builtins::kEmptyFunction);
+
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "leftContext",
Builtins::kRegExpLeftContextGetter,
- Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(isolate_, regexp_fun,
- factory->InternalizeUtf8String("$`"),
+ Builtins::kEmptyFunction);
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$`",
Builtins::kRegExpLeftContextGetter,
- Builtins::kEmptyFunction, no_enum);
+ Builtins::kEmptyFunction);
- SimpleInstallGetterSetter(isolate_, regexp_fun,
- factory->InternalizeUtf8String("rightContext"),
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "rightContext",
Builtins::kRegExpRightContextGetter,
- Builtins::kEmptyFunction, no_enum);
- SimpleInstallGetterSetter(isolate_, regexp_fun,
- factory->InternalizeUtf8String("$'"),
+ Builtins::kEmptyFunction);
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$'",
Builtins::kRegExpRightContextGetter,
- Builtins::kEmptyFunction, no_enum);
+ Builtins::kEmptyFunction);
-#define INSTALL_CAPTURE_GETTER(i) \
- SimpleInstallGetterSetter( \
- isolate_, regexp_fun, factory->InternalizeUtf8String("$" #i), \
- Builtins::kRegExpCapture##i##Getter, Builtins::kEmptyFunction, no_enum)
+#define INSTALL_CAPTURE_GETTER(i) \
+ SimpleInstallGetterSetter(isolate_, regexp_fun, "$" #i, \
+ Builtins::kRegExpCapture##i##Getter, \
+ Builtins::kEmptyFunction)
INSTALL_CAPTURE_GETTER(1);
INSTALL_CAPTURE_GETTER(2);
INSTALL_CAPTURE_GETTER(3);
@@ -2662,7 +2626,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor d = Descriptor::DataField(isolate(), factory->lastIndex_string(),
JSRegExp::kLastIndexFieldIndex,
writable, Representation::Tagged());
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
{ // Internal: RegExpInternalMatch
Handle<JSFunction> function =
@@ -2747,28 +2711,26 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
// Initialize the embedder data slot.
- native_context()->set_embedder_data(*factory->empty_fixed_array());
+ // TODO(ishell): microtask queue pointer will be moved from native context
+ // to the embedder data array so we don't need an empty embedder data array.
+ Handle<EmbedderDataArray> embedder_data = factory->NewEmbedderDataArray(0);
+ native_context()->set_embedder_data(*embedder_data);
{ // -- J S O N
- Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSObject> json_object =
factory->NewJSObject(isolate_->object_function(), TENURED);
- JSObject::AddProperty(isolate_, global, name, json_object, DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, "JSON", json_object, DONT_ENUM);
SimpleInstallFunction(isolate_, json_object, "parse", Builtins::kJsonParse,
2, false);
SimpleInstallFunction(isolate_, json_object, "stringify",
Builtins::kJsonStringify, 3, true);
- JSObject::AddProperty(
- isolate_, json_object, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("JSON"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, json_object, "JSON");
}
{ // -- M a t h
- Handle<String> name = factory->InternalizeUtf8String("Math");
Handle<JSObject> math =
factory->NewJSObject(isolate_->object_function(), TENURED);
- JSObject::AddProperty(isolate_, global, name, math, DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, "Math", math, DONT_ENUM);
SimpleInstallFunction(isolate_, math, "abs", Builtins::kMathAbs, 1, true);
SimpleInstallFunction(isolate_, math, "acos", Builtins::kMathAcos, 1, true);
SimpleInstallFunction(isolate_, math, "acosh", Builtins::kMathAcosh, 1,
@@ -2839,10 +2801,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewNumber(std::sqrt(0.5)));
InstallConstant(isolate_, math, "SQRT2",
factory->NewNumber(std::sqrt(2.0)));
- JSObject::AddProperty(
- isolate_, math, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("Math"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, math, "Math");
}
{ // -- C o n s o l e
@@ -2903,18 +2862,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kConsoleTimeStamp, 1, false, NONE);
SimpleInstallFunction(isolate_, console, "context",
Builtins::kConsoleContext, 1, true, NONE);
- JSObject::AddProperty(
- isolate_, console, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("Object"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, console, "Object");
}
#ifdef V8_INTL_SUPPORT
{ // -- I n t l
- Handle<String> name = factory->InternalizeUtf8String("Intl");
Handle<JSObject> intl =
factory->NewJSObject(isolate_->object_function(), TENURED);
- JSObject::AddProperty(isolate_, global, name, intl, DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, "Intl", intl, DONT_ENUM);
+
+ SimpleInstallFunction(isolate(), intl, "getCanonicalLocales",
+ Builtins::kIntlGetCanonicalLocales, 1, false);
{
Handle<JSFunction> date_time_format_constructor = InstallFunction(
@@ -2927,9 +2885,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, date_time_format_constructor,
Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
- native_context()->set_intl_date_time_format_function(
- *date_time_format_constructor);
-
SimpleInstallFunction(
isolate(), date_time_format_constructor, "supportedLocalesOf",
Builtins::kDateTimeFormatSupportedLocalesOf, 1, false);
@@ -2937,11 +2892,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(date_time_format_constructor->prototype()), isolate_);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Object_string());
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kDateTimeFormatPrototypeResolvedOptions,
@@ -2974,11 +2925,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(number_format_constructor->prototype()), isolate_);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Object_string());
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kNumberFormatPrototypeResolvedOptions, 0,
@@ -3007,11 +2954,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(collator_constructor->prototype()), isolate_);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Object_string());
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kCollatorPrototypeResolvedOptions, 0,
@@ -3028,9 +2971,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSV8BreakIterator::kSize, 0, factory->the_hole_value(),
Builtins::kV8BreakIteratorConstructor);
v8_break_iterator_constructor->shared()->DontAdaptArguments();
- InstallWithIntrinsicDefaultProto(
- isolate_, v8_break_iterator_constructor,
- Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
SimpleInstallFunction(
isolate_, v8_break_iterator_constructor, "supportedLocalesOf",
@@ -3039,11 +2979,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(v8_break_iterator_constructor->prototype()), isolate_);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Object_string());
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kV8BreakIteratorPrototypeResolvedOptions,
@@ -3076,9 +3012,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSPluralRules::kSize, 0, factory->the_hole_value(),
Builtins::kPluralRulesConstructor);
plural_rules_constructor->shared()->DontAdaptArguments();
- InstallWithIntrinsicDefaultProto(
- isolate_, plural_rules_constructor,
- Context::INTL_PLURAL_RULES_FUNCTION_INDEX);
SimpleInstallFunction(isolate(), plural_rules_constructor,
"supportedLocalesOf",
@@ -3087,11 +3020,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(plural_rules_constructor->prototype()), isolate_);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Object_string());
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kPluralRulesPrototypeResolvedOptions, 0,
@@ -3164,8 +3093,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- T y p e d A r r a y
Handle<JSFunction> typed_array_fun = CreateFunction(
isolate_, factory->InternalizeUtf8String("TypedArray"),
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, 0, factory->the_hole_value(),
- Builtins::kTypedArrayBaseConstructor);
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kHeaderSize, 0,
+ factory->the_hole_value(), Builtins::kTypedArrayBaseConstructor);
typed_array_fun->shared()->set_native(false);
typed_array_fun->shared()->set_length(0);
InstallSpeciesGetter(isolate_, typed_array_fun);
@@ -3199,15 +3128,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
BuiltinFunctionId::kTypedArrayToStringTag);
// Install "keys", "values" and "entries" methods on the {prototype}.
- SimpleInstallFunction(isolate_, prototype, "entries",
- Builtins::kTypedArrayPrototypeEntries, 0, true,
- BuiltinFunctionId::kTypedArrayEntries);
+ InstallFunctionWithBuiltinId(isolate_, prototype, "entries",
+ Builtins::kTypedArrayPrototypeEntries, 0, true,
+ BuiltinFunctionId::kTypedArrayEntries);
- SimpleInstallFunction(isolate_, prototype, "keys",
- Builtins::kTypedArrayPrototypeKeys, 0, true,
- BuiltinFunctionId::kTypedArrayKeys);
+ InstallFunctionWithBuiltinId(isolate_, prototype, "keys",
+ Builtins::kTypedArrayPrototypeKeys, 0, true,
+ BuiltinFunctionId::kTypedArrayKeys);
- Handle<JSFunction> values = SimpleInstallFunction(
+ Handle<JSFunction> values = InstallFunctionWithBuiltinId(
isolate_, prototype, "values", Builtins::kTypedArrayPrototypeValues, 0,
true, BuiltinFunctionId::kTypedArrayValues);
JSObject::AddProperty(isolate_, prototype, factory->iterator_symbol(),
@@ -3232,6 +3161,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeIncludes, 1, false);
SimpleInstallFunction(isolate_, prototype, "indexOf",
Builtins::kTypedArrayPrototypeIndexOf, 1, false);
+ SimpleInstallFunction(isolate_, prototype, "join",
+ Builtins::kTypedArrayPrototypeJoin, 1, false);
SimpleInstallFunction(isolate_, prototype, "lastIndexOf",
Builtins::kTypedArrayPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(isolate_, prototype, "map",
@@ -3252,6 +3183,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeSort, 1, false);
SimpleInstallFunction(isolate_, prototype, "subarray",
Builtins::kTypedArrayPrototypeSubArray, 2, false);
+ SimpleInstallFunction(isolate_, prototype, "toLocaleString",
+ Builtins::kTypedArrayPrototypeToLocaleString, 0,
+ false);
+ JSObject::AddProperty(isolate_, prototype, factory->toString_string(),
+ array_prototype_to_string_fun, DONT_ENUM);
}
{ // -- T y p e d A r r a y s
@@ -3280,11 +3216,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(data_view_fun->instance_prototype()), isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("DataView"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, "DataView");
// Install the "buffer", "byteOffset" and "byteLength" getters
// on the {prototype}.
@@ -3355,11 +3287,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(JSObject::cast(js_map_fun->instance_prototype()),
isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Map_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Map_string());
Handle<JSFunction> map_get = SimpleInstallFunction(
isolate_, prototype, "get", Builtins::kMapPrototypeGet, 1, true);
@@ -3367,6 +3295,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> map_set = SimpleInstallFunction(
isolate_, prototype, "set", Builtins::kMapPrototypeSet, 2, true);
+ // Check that index of "set" function in JSCollection is correct.
+ DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
native_context()->set_map_set(*map_set);
Handle<JSFunction> map_has = SimpleInstallFunction(
@@ -3435,10 +3366,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kBigIntPrototypeValueOf, 0, false);
// @@toStringTag
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->BigInt_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->BigInt_string());
}
{ // -- S e t
@@ -3456,11 +3384,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(JSObject::cast(js_set_fun->instance_prototype()),
isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->Set_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, factory->Set_string());
Handle<JSFunction> set_has = SimpleInstallFunction(
isolate_, prototype, "has", Builtins::kSetPrototypeHas, 1, true);
@@ -3468,6 +3392,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> set_add = SimpleInstallFunction(
isolate_, prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
+ // Check that index of "add" function in JSCollection is correct.
+ DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
native_context()->set_set_add(*set_add);
Handle<JSFunction> set_delete = SimpleInstallFunction(
@@ -3491,6 +3418,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
values, DONT_ENUM);
native_context()->set_initial_set_prototype_map(prototype->map());
+ native_context()->set_initial_set_prototype(*prototype);
InstallSpeciesGetter(isolate_, js_set_fun);
}
@@ -3510,7 +3438,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor::DataField(isolate(), factory->to_string_tag_symbol(),
JSModuleNamespace::kToStringTagFieldIndex,
attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
}
@@ -3524,14 +3452,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor d = Descriptor::DataField(isolate(), factory->value_string(),
JSIteratorResult::kValueIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // done
Descriptor d = Descriptor::DataField(isolate(), factory->done_string(),
JSIteratorResult::kDoneIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
map->SetConstructor(native_context()->object_function());
@@ -3558,16 +3486,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> weakmap_get = SimpleInstallFunction(
isolate_, prototype, "get", Builtins::kWeakMapGet, 1, true);
native_context()->set_weakmap_get(*weakmap_get);
- SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakMapHas, 1,
- true);
+
Handle<JSFunction> weakmap_set = SimpleInstallFunction(
isolate_, prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
+ // Check that index of "set" function in JSWeakCollection is correct.
+ DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
native_context()->set_weakmap_set(*weakmap_set);
+ SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakMapHas, 1,
+ true);
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("WeakMap"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype, "WeakMap");
native_context()->set_initial_weakmap_prototype_map(prototype->map());
}
@@ -3591,14 +3521,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kWeakSetPrototypeDelete, 1, true);
SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakSetHas, 1,
true);
+
Handle<JSFunction> weakset_add = SimpleInstallFunction(
isolate_, prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
+ // Check that index of "add" function in JSWeakCollection is correct.
+ DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
+ prototype->map()->LastAdded());
+
native_context()->set_weakset_add(*weakset_add);
- JSObject::AddProperty(
- isolate_, prototype, factory->to_string_tag_symbol(),
- factory->InternalizeUtf8String("WeakSet"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate_, prototype,
+ factory->InternalizeUtf8String("WeakSet"));
native_context()->set_initial_weakset_prototype_map(prototype->map());
}
@@ -3623,8 +3556,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
proxy_function->shared()->set_length(2);
native_context()->set_proxy_function(*proxy_function);
- InstallFunction(isolate_, global, name, proxy_function,
- factory->Object_string());
+ JSObject::AddProperty(isolate_, global, name, proxy_function, DONT_ENUM);
DCHECK(!proxy_function->has_prototype_property());
@@ -3644,45 +3576,41 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), TENURED);
JSObject::AddProperty(isolate_, global, reflect_string, reflect, DONT_ENUM);
- Handle<JSFunction> define_property = SimpleInstallFunction(
- isolate_, reflect, factory->defineProperty_string(),
- Builtins::kReflectDefineProperty, 3, true);
+ Handle<JSFunction> define_property =
+ SimpleInstallFunction(isolate_, reflect, "defineProperty",
+ Builtins::kReflectDefineProperty, 3, true);
native_context()->set_reflect_define_property(*define_property);
- Handle<JSFunction> delete_property = SimpleInstallFunction(
- isolate_, reflect, factory->deleteProperty_string(),
- Builtins::kReflectDeleteProperty, 2, true);
+ Handle<JSFunction> delete_property =
+ SimpleInstallFunction(isolate_, reflect, "deleteProperty",
+ Builtins::kReflectDeleteProperty, 2, true);
native_context()->set_reflect_delete_property(*delete_property);
- Handle<JSFunction> apply =
- SimpleInstallFunction(isolate_, reflect, factory->apply_string(),
- Builtins::kReflectApply, 3, false);
+ Handle<JSFunction> apply = SimpleInstallFunction(
+ isolate_, reflect, "apply", Builtins::kReflectApply, 3, false);
native_context()->set_reflect_apply(*apply);
- Handle<JSFunction> construct =
- SimpleInstallFunction(isolate_, reflect, factory->construct_string(),
- Builtins::kReflectConstruct, 2, false);
+ Handle<JSFunction> construct = SimpleInstallFunction(
+ isolate_, reflect, "construct", Builtins::kReflectConstruct, 2, false);
native_context()->set_reflect_construct(*construct);
- SimpleInstallFunction(isolate_, reflect, factory->get_string(),
- Builtins::kReflectGet, 2, false);
- SimpleInstallFunction(isolate_, reflect,
- factory->getOwnPropertyDescriptor_string(),
+ SimpleInstallFunction(isolate_, reflect, "get", Builtins::kReflectGet, 2,
+ false);
+ SimpleInstallFunction(isolate_, reflect, "getOwnPropertyDescriptor",
Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
- SimpleInstallFunction(isolate_, reflect, factory->getPrototypeOf_string(),
+ SimpleInstallFunction(isolate_, reflect, "getPrototypeOf",
Builtins::kReflectGetPrototypeOf, 1, true);
- SimpleInstallFunction(isolate_, reflect, factory->has_string(),
- Builtins::kReflectHas, 2, true);
- SimpleInstallFunction(isolate_, reflect, factory->isExtensible_string(),
+ SimpleInstallFunction(isolate_, reflect, "has", Builtins::kReflectHas, 2,
+ true);
+ SimpleInstallFunction(isolate_, reflect, "isExtensible",
Builtins::kReflectIsExtensible, 1, true);
- SimpleInstallFunction(isolate_, reflect, factory->ownKeys_string(),
+ SimpleInstallFunction(isolate_, reflect, "ownKeys",
Builtins::kReflectOwnKeys, 1, true);
- SimpleInstallFunction(isolate_, reflect,
- factory->preventExtensions_string(),
+ SimpleInstallFunction(isolate_, reflect, "preventExtensions",
Builtins::kReflectPreventExtensions, 1, true);
- SimpleInstallFunction(isolate_, reflect, factory->set_string(),
- Builtins::kReflectSet, 3, false);
- SimpleInstallFunction(isolate_, reflect, factory->setPrototypeOf_string(),
+ SimpleInstallFunction(isolate_, reflect, "set", Builtins::kReflectSet, 3,
+ false);
+ SimpleInstallFunction(isolate_, reflect, "setPrototypeOf",
Builtins::kReflectSetPrototypeOf, 2, true);
}
@@ -3702,14 +3630,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor d = Descriptor::AccessorConstant(
factory->length_string(), factory->bound_function_length_accessor(),
roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // name
Descriptor d = Descriptor::AccessorConstant(
factory->name_string(), factory->bound_function_name_accessor(),
roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
native_context()->set_bound_function_without_constructor_map(*map);
@@ -3735,14 +3663,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor::DataField(isolate(), factory->length_string(),
JSSloppyArgumentsObject::kLengthIndex,
DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // callee
Descriptor d =
Descriptor::DataField(isolate(), factory->callee_string(),
JSSloppyArgumentsObject::kCalleeIndex,
DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
// @@iterator method is added later.
@@ -3789,12 +3717,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Descriptor::DataField(isolate(), factory->length_string(),
JSStrictArgumentsObject::kLengthIndex,
DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // callee
Descriptor d = Descriptor::AccessorConstant(factory->callee_string(),
callee, attributes);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
// @@iterator method is added later.
@@ -3885,23 +3813,6 @@ void Genesis::InitializeExperimentalGlobal() {
#undef FEATURE_INITIALIZE_GLOBAL
}
-bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> source_code =
- isolate->bootstrapper()->GetNativeSource(CORE, index);
-
- // We pass in extras_utils so that builtin code can set it up for later use
- // by actual extras code, compiled with CompileExtraBuiltin.
- Handle<Object> global = isolate->global_object();
- Handle<Object> utils = isolate->natives_utils_object();
- Handle<Object> extras_utils = isolate->extras_utils_object();
- Handle<Object> args[] = {global, utils, extras_utils};
-
- return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args, NATIVES_CODE);
-}
-
-
bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
HandleScope scope(isolate);
Vector<const char> name = ExtraNatives::GetScriptName(index);
@@ -3916,20 +3827,6 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
}
-bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
- int index) {
- HandleScope scope(isolate);
- Vector<const char> name = ExperimentalExtraNatives::GetScriptName(index);
- Handle<String> source_code =
- isolate->bootstrapper()->GetNativeSource(EXPERIMENTAL_EXTRAS, index);
- Handle<Object> global = isolate->global_object();
- Handle<Object> binding = isolate->extras_binding_object();
- Handle<Object> extras_utils = isolate->extras_utils_object();
- Handle<Object> args[] = {global, binding, extras_utils};
- return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args, EXTENSION_CODE);
-}
-
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
Handle<Object> argv[],
@@ -3970,19 +3867,6 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
}
-bool Genesis::CallUtilsFunction(Isolate* isolate, const char* name) {
- Handle<JSObject> utils =
- Handle<JSObject>::cast(isolate->natives_utils_object());
- Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
- Handle<Object> fun = JSObject::GetDataProperty(utils, name_string);
- Handle<Object> receiver = isolate->factory()->undefined_value();
- Handle<Object> args[] = {utils};
- return !Execution::TryCall(isolate, fun, receiver, 1, args,
- Execution::MessageHandling::kKeepPending, nullptr)
- .is_null();
-}
-
-
bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -4060,73 +3944,29 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Isolate* isolate,
return Handle<JSObject>::cast(value);
}
-void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
- switch (context_type) {
- // We still need the utils object to find debug functions.
- case DEBUG_CONTEXT:
- return;
- // Expose the natives in global if a valid name for it is specified.
- case FULL_CONTEXT: {
- // We still need the utils object after deserialization.
- if (isolate()->serializer_enabled()) return;
- if (FLAG_expose_natives_as == nullptr) break;
- if (strlen(FLAG_expose_natives_as) == 0) break;
- HandleScope scope(isolate());
- Handle<String> natives_key =
- factory()->InternalizeUtf8String(FLAG_expose_natives_as);
- uint32_t dummy_index;
- if (natives_key->AsArrayIndex(&dummy_index)) break;
- Handle<Object> utils = isolate()->natives_utils_object();
- Handle<JSObject> global = isolate()->global_object();
- JSObject::AddProperty(isolate(), global, natives_key, utils, DONT_ENUM);
- break;
- }
- }
+void Genesis::ConfigureUtilsObject() {
+ // We still need the utils object after deserialization.
+ if (isolate()->serializer_enabled()) return;
// The utils object can be removed for cases that reach this point.
- HeapObject* undefined = ReadOnlyRoots(heap()).undefined_value();
- native_context()->set_natives_utils_object(undefined);
+ HeapObject undefined = ReadOnlyRoots(heap()).undefined_value();
native_context()->set_extras_utils_object(undefined);
}
-
-void Bootstrapper::ExportFromRuntime(Isolate* isolate,
- Handle<JSObject> container) {
+void Genesis::InitializeIteratorFunctions() {
+ Isolate* isolate = isolate_;
Factory* factory = isolate->factory();
HandleScope scope(isolate);
Handle<NativeContext> native_context = isolate->native_context();
-#define EXPORT_PRIVATE_SYMBOL(_, NAME) \
- Handle<String> NAME##_name = factory->InternalizeUtf8String(#NAME); \
- JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
- PRIVATE_SYMBOL_LIST_GENERATOR(EXPORT_PRIVATE_SYMBOL, /* not used */)
-#undef EXPORT_PRIVATE_SYMBOL
-
-#define EXPORT_PUBLIC_SYMBOL(_, NAME, DESCRIPTION) \
- Handle<String> NAME##_name = factory->InternalizeUtf8String(#NAME); \
- JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
- PUBLIC_SYMBOL_LIST_GENERATOR(EXPORT_PUBLIC_SYMBOL, /* not used */)
- WELL_KNOWN_SYMBOL_LIST_GENERATOR(EXPORT_PUBLIC_SYMBOL, /* not used */)
-#undef EXPORT_PUBLIC_SYMBOL
-
Handle<JSObject> iterator_prototype(
native_context->initial_iterator_prototype(), isolate);
- JSObject::AddProperty(isolate, container,
- factory->InternalizeUtf8String("IteratorPrototype"),
- iterator_prototype, NONE);
-
- {
+ { // -- G e n e r a t o r
PrototypeIterator iter(isolate, native_context->generator_function_map());
Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>(),
isolate);
-
- JSObject::AddProperty(
- isolate, container,
- factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
- generator_function_prototype, NONE);
-
- Handle<JSFunction> generator_function_function = InstallFunction(
- isolate, container, "GeneratorFunction", JS_FUNCTION_TYPE,
+ Handle<JSFunction> generator_function_function = CreateFunction(
+ isolate, "GeneratorFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, generator_function_prototype,
Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
@@ -4148,14 +3988,14 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
*generator_function_function);
}
- {
+ { // -- A s y n c G e n e r a t o r
PrototypeIterator iter(isolate,
native_context->async_generator_function_map());
Handle<JSObject> async_generator_function_prototype(
iter.GetCurrent<JSObject>(), isolate);
- Handle<JSFunction> async_generator_function_function = InstallFunction(
- isolate, container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
+ Handle<JSFunction> async_generator_function_function = CreateFunction(
+ isolate, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, async_generator_function_prototype,
Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
@@ -4179,27 +4019,23 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S e t I t e r a t o r
- Handle<String> name = factory->SetIterator_string();
-
// Setup %SetIteratorPrototype%.
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::ForceSetPrototype(prototype, iterator_prototype);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate, prototype, factory->to_string_tag_symbol(), name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate, prototype, factory->SetIterator_string());
// Install the next function on the {prototype}.
- SimpleInstallFunction(isolate, prototype, "next",
- Builtins::kSetIteratorPrototypeNext, 0, true,
- BuiltinFunctionId::kSetIteratorNext);
+ InstallFunctionWithBuiltinId(isolate, prototype, "next",
+ Builtins::kSetIteratorPrototypeNext, 0, true,
+ BuiltinFunctionId::kSetIteratorNext);
+ native_context->set_initial_set_iterator_prototype(*prototype);
// Setup SetIterator constructor.
- Handle<JSFunction> set_iterator_function = InstallFunction(
- isolate, container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
- JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> set_iterator_function =
+ CreateFunction(isolate, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
+ JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
set_iterator_function->shared()->set_native(false);
Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
@@ -4214,27 +4050,23 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- M a p I t e r a t o r
- Handle<String> name = factory->MapIterator_string();
-
// Setup %MapIteratorPrototype%.
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::ForceSetPrototype(prototype, iterator_prototype);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate, prototype, factory->to_string_tag_symbol(), name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate, prototype, factory->MapIterator_string());
// Install the next function on the {prototype}.
- SimpleInstallFunction(isolate, prototype, "next",
- Builtins::kMapIteratorPrototypeNext, 0, true,
- BuiltinFunctionId::kMapIteratorNext);
+ InstallFunctionWithBuiltinId(isolate, prototype, "next",
+ Builtins::kMapIteratorPrototypeNext, 0, true,
+ BuiltinFunctionId::kMapIteratorNext);
+ native_context->set_initial_map_iterator_prototype(*prototype);
// Setup MapIterator constructor.
- Handle<JSFunction> map_iterator_function = InstallFunction(
- isolate, container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
- JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> map_iterator_function =
+ CreateFunction(isolate, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
+ JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
map_iterator_function->shared()->set_native(false);
Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
@@ -4259,8 +4091,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>(),
isolate);
- Handle<JSFunction> async_function_constructor = InstallFunction(
- isolate, container, "AsyncFunction", JS_FUNCTION_TYPE,
+ Handle<JSFunction> async_function_constructor = CreateFunction(
+ isolate, "AsyncFunction", JS_FUNCTION_TYPE,
JSFunction::kSizeWithPrototype, 0, async_function_prototype,
Builtins::kAsyncFunctionConstructor);
async_function_constructor->set_prototype_or_initial_map(
@@ -4279,19 +4111,15 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
JSFunction::SetPrototype(async_function_constructor,
async_function_prototype);
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitCaught, 2, false);
- native_context->set_async_function_await_caught(*function);
- }
-
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitUncaught, 2, false);
- native_context->set_async_function_await_uncaught(*function);
- }
+ // Async functions don't have a prototype, but they use generator objects
+ // under the hood to model the suspend/resume (in await). Instead of using
+ // the "prototype" / initial_map machinery (like for (async) generators),
+ // there's one global (per native context) map here that is used for the
+ // async function generator objects. These objects never escape to user
+ // JavaScript anyways.
+ Handle<Map> async_function_object_map = factory->NewMap(
+ JS_ASYNC_FUNCTION_OBJECT_TYPE, JSAsyncFunctionObject::kSize);
+ native_context->set_async_function_object_map(*async_function_object_map);
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
@@ -4306,84 +4134,73 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
factory->empty_string(), 1);
native_context->set_async_function_await_resolve_shared_fun(*info);
}
+ }
+}
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionPromiseCreate, 0, false);
- native_context->set_async_function_promise_create(*function);
- }
+void Genesis::InitializeCallSiteBuiltins() {
+ Factory* factory = isolate()->factory();
+ HandleScope scope(isolate());
+ // -- C a l l S i t e
+ // Builtin functions for CallSite.
- {
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(),
- Builtins::kAsyncFunctionPromiseRelease, 2, false);
- native_context->set_async_function_promise_release(*function);
- }
- }
+ // CallSites are a special case; the constructor is for our private use
+ // only, therefore we set it up as a builtin that throws. Internally, we use
+ // CallSiteUtils::Construct to create CallSite objects.
- { // -- C a l l S i t e
- // Builtin functions for CallSite.
+ Handle<JSFunction> callsite_fun = CreateFunction(
+ isolate(), "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
+ factory->the_hole_value(), Builtins::kUnsupportedThrower);
+ callsite_fun->shared()->DontAdaptArguments();
+ isolate()->native_context()->set_callsite_function(*callsite_fun);
- // CallSites are a special case; the constructor is for our private use
- // only, therefore we set it up as a builtin that throws. Internally, we use
- // CallSiteUtils::Construct to create CallSite objects.
+ // Setup CallSite.prototype.
+ Handle<JSObject> prototype(JSObject::cast(callsite_fun->instance_prototype()),
+ isolate());
- Handle<JSFunction> callsite_fun = InstallFunction(
- isolate, container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- 0, factory->the_hole_value(), Builtins::kUnsupportedThrower);
- callsite_fun->shared()->DontAdaptArguments();
- isolate->native_context()->set_callsite_function(*callsite_fun);
+ struct FunctionInfo {
+ const char* name;
+ Builtins::Name id;
+ };
- {
- // Setup CallSite.prototype.
- Handle<JSObject> prototype(
- JSObject::cast(callsite_fun->instance_prototype()), isolate);
-
- struct FunctionInfo {
- const char* name;
- Builtins::Name id;
- };
-
- FunctionInfo infos[] = {
- {"getColumnNumber", Builtins::kCallSitePrototypeGetColumnNumber},
- {"getEvalOrigin", Builtins::kCallSitePrototypeGetEvalOrigin},
- {"getFileName", Builtins::kCallSitePrototypeGetFileName},
- {"getFunction", Builtins::kCallSitePrototypeGetFunction},
- {"getFunctionName", Builtins::kCallSitePrototypeGetFunctionName},
- {"getLineNumber", Builtins::kCallSitePrototypeGetLineNumber},
- {"getMethodName", Builtins::kCallSitePrototypeGetMethodName},
- {"getPosition", Builtins::kCallSitePrototypeGetPosition},
- {"getScriptNameOrSourceURL",
- Builtins::kCallSitePrototypeGetScriptNameOrSourceURL},
- {"getThis", Builtins::kCallSitePrototypeGetThis},
- {"getTypeName", Builtins::kCallSitePrototypeGetTypeName},
- {"isAsync", Builtins::kCallSitePrototypeIsAsync},
- {"isConstructor", Builtins::kCallSitePrototypeIsConstructor},
- {"isEval", Builtins::kCallSitePrototypeIsEval},
- {"isNative", Builtins::kCallSitePrototypeIsNative},
- {"isToplevel", Builtins::kCallSitePrototypeIsToplevel},
- {"toString", Builtins::kCallSitePrototypeToString}};
-
- PropertyAttributes attrs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- Handle<JSFunction> fun;
- for (const FunctionInfo& info : infos) {
- SimpleInstallFunction(isolate, prototype, info.name, info.id, 0, true,
- attrs);
- }
- }
+ FunctionInfo infos[] = {
+ {"getColumnNumber", Builtins::kCallSitePrototypeGetColumnNumber},
+ {"getEvalOrigin", Builtins::kCallSitePrototypeGetEvalOrigin},
+ {"getFileName", Builtins::kCallSitePrototypeGetFileName},
+ {"getFunction", Builtins::kCallSitePrototypeGetFunction},
+ {"getFunctionName", Builtins::kCallSitePrototypeGetFunctionName},
+ {"getLineNumber", Builtins::kCallSitePrototypeGetLineNumber},
+ {"getMethodName", Builtins::kCallSitePrototypeGetMethodName},
+ {"getPosition", Builtins::kCallSitePrototypeGetPosition},
+ {"getPromiseIndex", Builtins::kCallSitePrototypeGetPromiseIndex},
+ {"getScriptNameOrSourceURL",
+ Builtins::kCallSitePrototypeGetScriptNameOrSourceURL},
+ {"getThis", Builtins::kCallSitePrototypeGetThis},
+ {"getTypeName", Builtins::kCallSitePrototypeGetTypeName},
+ {"isAsync", Builtins::kCallSitePrototypeIsAsync},
+ {"isConstructor", Builtins::kCallSitePrototypeIsConstructor},
+ {"isEval", Builtins::kCallSitePrototypeIsEval},
+ {"isNative", Builtins::kCallSitePrototypeIsNative},
+ {"isPromiseAll", Builtins::kCallSitePrototypeIsPromiseAll},
+ {"isToplevel", Builtins::kCallSitePrototypeIsToplevel},
+ {"toString", Builtins::kCallSitePrototypeToString}};
+
+ PropertyAttributes attrs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ Handle<JSFunction> fun;
+ for (const FunctionInfo& info : infos) {
+ SimpleInstallFunction(isolate(), prototype, info.name, info.id, 0, true,
+ attrs);
}
}
-
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_namespace_exports)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
@@ -4392,6 +4209,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_json_stringify)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_await_optimization)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_hashbang)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -4410,46 +4228,13 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Factory* factory = isolate()->factory();
- {
- Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
- JSObject::AddProperty(isolate_, global, name,
- isolate()->shared_array_buffer_fun(), DONT_ENUM);
- }
-
- {
- Handle<String> name = factory->InternalizeUtf8String("Atomics");
- JSObject::AddProperty(isolate_, global, name, isolate()->atomics_object(),
- DONT_ENUM);
- JSObject::AddProperty(
- isolate_, isolate()->atomics_object(), factory->to_string_tag_symbol(),
- name, static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- }
-}
-
-void Genesis::InitializeGlobal_harmony_array_prototype_values() {
- if (!FLAG_harmony_array_prototype_values) return;
- Handle<JSFunction> array_constructor(native_context()->array_function(),
- isolate());
- Handle<JSObject> array_prototype(
- JSObject::cast(array_constructor->instance_prototype()), isolate());
- Handle<Object> values_iterator =
- JSObject::GetProperty(isolate(), array_prototype,
- factory()->iterator_symbol())
- .ToHandleChecked();
- DCHECK(values_iterator->IsJSFunction());
- JSObject::AddProperty(isolate(), array_prototype, factory()->values_string(),
- values_iterator, DONT_ENUM);
+ JSObject::AddProperty(isolate_, global, "SharedArrayBuffer",
+ isolate()->shared_array_buffer_fun(), DONT_ENUM);
- Handle<Object> unscopables =
- JSObject::GetProperty(isolate(), array_prototype,
- factory()->unscopables_symbol())
- .ToHandleChecked();
- DCHECK(unscopables->IsJSObject());
- JSObject::AddProperty(isolate(), Handle<JSObject>::cast(unscopables),
- factory()->values_string(), factory()->true_value(),
- NONE);
+ JSObject::AddProperty(isolate_, global, "Atomics",
+ isolate()->atomics_object(), DONT_ENUM);
+ InstallToStringTag(isolate_, isolate()->atomics_object(), "Atomics");
}
void Genesis::InitializeGlobal_harmony_array_flat() {
@@ -4459,9 +4244,9 @@ void Genesis::InitializeGlobal_harmony_array_flat() {
Handle<JSObject> array_prototype(
JSObject::cast(array_constructor->instance_prototype()), isolate());
SimpleInstallFunction(isolate(), array_prototype, "flat",
- Builtins::kArrayPrototypeFlat, 0, false, DONT_ENUM);
+ Builtins::kArrayPrototypeFlat, 0, false);
SimpleInstallFunction(isolate(), array_prototype, "flatMap",
- Builtins::kArrayPrototypeFlatMap, 1, false, DONT_ENUM);
+ Builtins::kArrayPrototypeFlatMap, 1, false);
}
void Genesis::InitializeGlobal_harmony_symbol_description() {
@@ -4494,12 +4279,14 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
isolate());
Handle<JSObject> regexp_prototype(
JSObject::cast(regexp_fun->instance_prototype()), isolate());
- SimpleInstallFunction(isolate(), regexp_prototype,
- factory()->match_all_symbol(), "[Symbol.matchAll]",
- Builtins::kRegExpPrototypeMatchAll, 1, true);
+ InstallFunctionAtSymbol(isolate(), regexp_prototype,
+ factory()->match_all_symbol(), "[Symbol.matchAll]",
+ Builtins::kRegExpPrototypeMatchAll, 1, true);
Handle<Map> regexp_prototype_map(regexp_prototype->map(), isolate());
Map::SetShouldBeFastPrototypeMap(regexp_prototype_map, true, isolate());
native_context()->set_regexp_prototype_map(*regexp_prototype_map);
+ DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
+ regexp_prototype->map()->LastAdded());
}
{ // --- R e g E x p S t r i n g I t e r a t o r ---
@@ -4511,20 +4298,17 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
JSObject::ForceSetPrototype(regexp_string_iterator_prototype,
iterator_prototype);
- JSObject::AddProperty(
- isolate(), regexp_string_iterator_prototype,
- factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("RegExp String Iterator"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), regexp_string_iterator_prototype,
+ "RegExp String Iterator");
SimpleInstallFunction(isolate(), regexp_string_iterator_prototype, "next",
Builtins::kRegExpStringIteratorPrototypeNext, 0,
true);
Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
- isolate(), factory()->InternalizeUtf8String("RegExpStringIterator"),
- JS_REGEXP_STRING_ITERATOR_TYPE, JSRegExpStringIterator::kSize, 0,
- regexp_string_iterator_prototype, Builtins::kIllegal);
+ isolate(), "RegExpStringIterator", JS_REGEXP_STRING_ITERATOR_TYPE,
+ JSRegExpStringIterator::kSize, 0, regexp_string_iterator_prototype,
+ Builtins::kIllegal);
regexp_string_iterator_function->shared()->set_native(false);
native_context()->set_initial_regexp_string_iterator_prototype_map(
regexp_string_iterator_function->initial_map());
@@ -4538,6 +4322,121 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
}
}
+void Genesis::InitializeGlobal_harmony_weak_refs() {
+ if (!FLAG_harmony_weak_refs) return;
+
+ Factory* factory = isolate()->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+
+ {
+ // Create %WeakFactoryPrototype%
+ Handle<String> weak_factory_name = factory->WeakFactory_string();
+ Handle<JSObject> weak_factory_prototype =
+ factory->NewJSObject(isolate()->object_function(), TENURED);
+
+ // Create %WeakFactory%
+ Handle<JSFunction> weak_factory_fun =
+ CreateFunction(isolate(), weak_factory_name, JS_WEAK_FACTORY_TYPE,
+ JSWeakFactory::kSize, 0, weak_factory_prototype,
+ Builtins::kWeakFactoryConstructor);
+
+ weak_factory_fun->shared()->DontAdaptArguments();
+ weak_factory_fun->shared()->set_length(1);
+
+ // Install the "constructor" property on the prototype.
+ JSObject::AddProperty(isolate(), weak_factory_prototype,
+ factory->constructor_string(), weak_factory_fun,
+ DONT_ENUM);
+
+ InstallToStringTag(isolate(), weak_factory_prototype, weak_factory_name);
+
+ JSObject::AddProperty(isolate(), global, weak_factory_name,
+ weak_factory_fun, DONT_ENUM);
+
+ SimpleInstallFunction(isolate(), weak_factory_prototype, "makeCell",
+ Builtins::kWeakFactoryMakeCell, 2, false);
+
+ SimpleInstallFunction(isolate(), weak_factory_prototype, "cleanupSome",
+ Builtins::kWeakFactoryCleanupSome, 0, false);
+ }
+ {
+ // Create %WeakCellPrototype%
+ Handle<Map> weak_cell_map =
+ factory->NewMap(JS_WEAK_CELL_TYPE, JSWeakCell::kSize);
+ native_context()->set_js_weak_cell_map(*weak_cell_map);
+
+ Handle<JSObject> weak_cell_prototype =
+ factory->NewJSObject(isolate()->object_function(), TENURED);
+ Map::SetPrototype(isolate(), weak_cell_map, weak_cell_prototype);
+
+ InstallToStringTag(isolate(), weak_cell_prototype,
+ factory->WeakCell_string());
+
+ SimpleInstallGetter(isolate(), weak_cell_prototype,
+ factory->InternalizeUtf8String("holdings"),
+ Builtins::kWeakCellHoldingsGetter, false);
+ SimpleInstallFunction(isolate(), weak_cell_prototype, "clear",
+ Builtins::kWeakCellClear, 0, false);
+
+ // Create %WeakRefPrototype%
+ Handle<Map> weak_ref_map =
+ factory->NewMap(JS_WEAK_REF_TYPE, JSWeakRef::kSize);
+ DCHECK(weak_ref_map->IsJSObjectMap());
+ native_context()->set_js_weak_ref_map(*weak_ref_map);
+
+ Handle<JSObject> weak_ref_prototype =
+ factory->NewJSObject(isolate()->object_function(), TENURED);
+ Map::SetPrototype(isolate(), weak_ref_map, weak_ref_prototype);
+ JSObject::ForceSetPrototype(weak_ref_prototype, weak_cell_prototype);
+
+ InstallToStringTag(isolate(), weak_ref_prototype,
+ factory->WeakRef_string());
+
+ SimpleInstallFunction(isolate(), weak_ref_prototype, "deref",
+ Builtins::kWeakRefDeref, 0, false);
+
+ // Create %WeakRef%
+ Handle<String> weak_ref_name = factory->InternalizeUtf8String("WeakRef");
+ Handle<JSFunction> weak_ref_fun = CreateFunction(
+ isolate(), weak_ref_name, JS_WEAK_REF_TYPE, JSWeakRef::kSize, 0,
+ weak_ref_prototype, Builtins::kWeakRefConstructor);
+
+ weak_ref_fun->shared()->DontAdaptArguments();
+ weak_ref_fun->shared()->set_length(1);
+
+ // Install the "constructor" property on the prototype.
+ JSObject::AddProperty(isolate(), weak_ref_prototype,
+ factory->constructor_string(), weak_ref_fun,
+ DONT_ENUM);
+
+ JSObject::AddProperty(isolate(), global, weak_ref_name, weak_ref_fun,
+ DONT_ENUM);
+ }
+
+ {
+ // Create cleanup iterator for JSWeakFactory.
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype(), isolate());
+
+ Handle<JSObject> cleanup_iterator_prototype =
+ factory->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
+
+ InstallToStringTag(isolate(), cleanup_iterator_prototype,
+ "JSWeakFactoryCleanupIterator");
+
+ SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
+ Builtins::kWeakFactoryCleanupIteratorNext, 0, true);
+ Handle<Map> cleanup_iterator_map =
+ factory->NewMap(JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE,
+ JSWeakFactoryCleanupIterator::kSize);
+ Map::SetPrototype(isolate(), cleanup_iterator_map,
+ cleanup_iterator_prototype);
+ native_context()->set_js_weak_factory_cleanup_iterator_map(
+ *cleanup_iterator_map);
+ }
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_list_format() {
if (!FLAG_harmony_intl_list_format) return;
@@ -4562,10 +4461,7 @@ void Genesis::InitializeGlobal_harmony_intl_list_format() {
Handle<JSObject> prototype(
JSObject::cast(list_format_fun->instance_prototype()), isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("Intl.ListFormat"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), prototype, "Intl.ListFormat");
SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
Builtins::kListFormatPrototypeResolvedOptions, 0,
@@ -4598,10 +4494,7 @@ void Genesis::InitializeGlobal_harmony_locale() {
Handle<JSObject> prototype(JSObject::cast(locale_fun->instance_prototype()),
isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("Intl.Locale"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), prototype, "Intl.Locale");
SimpleInstallFunction(isolate(), prototype, "toString",
Builtins::kLocalePrototypeToString, 0, false);
@@ -4668,11 +4561,7 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
JSObject::cast(relative_time_format_fun->instance_prototype()),
isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->InternalizeUtf8String("Intl.RelativeTimeFormat"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), prototype, "Intl.RelativeTimeFormat");
SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
Builtins::kRelativeTimeFormatPrototypeResolvedOptions,
@@ -4702,30 +4591,84 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
SimpleInstallFunction(isolate(), segmenter_fun, "supportedLocalesOf",
Builtins::kSegmenterSupportedLocalesOf, 1, false);
- // Setup %SegmenterPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(segmenter_fun->instance_prototype()), isolate());
+ {
+ // Setup %SegmenterPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(segmenter_fun->instance_prototype()), isolate());
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->NewStringFromStaticChars("Intl.Segmenter"),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), prototype, "Intl.Segmenter");
- SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
- Builtins::kSegmenterPrototypeResolvedOptions, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kSegmenterPrototypeResolvedOptions, 0,
+ false);
+
+ SimpleInstallFunction(isolate(), prototype, "segment",
+ Builtins::kSegmenterPrototypeSegment, 1, false);
+ }
+
+ {
+ // Setup %SegmentIteratorPrototype%.
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype(), isolate());
+
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::ForceSetPrototype(prototype, iterator_prototype);
+
+ InstallToStringTag(isolate(), prototype,
+ factory()->SegmentIterator_string());
+
+ SimpleInstallFunction(isolate(), prototype, "next",
+ Builtins::kSegmentIteratorPrototypeNext, 0, false);
+
+ SimpleInstallFunction(isolate(), prototype, "following",
+ Builtins::kSegmentIteratorPrototypeFollowing, 0,
+ false);
+
+ SimpleInstallFunction(isolate(), prototype, "preceding",
+ Builtins::kSegmentIteratorPrototypePreceding, 0,
+ false);
+
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("index"),
+ Builtins::kSegmentIteratorPrototypeIndex, false);
+
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->InternalizeUtf8String("breakType"),
+ Builtins::kSegmentIteratorPrototypeBreakType, false);
+
+ // Setup SegmentIterator constructor.
+ Handle<String> name_string =
+ Name::ToFunctionName(
+ isolate(),
+ isolate()->factory()->InternalizeUtf8String("SegmentIterator"))
+ .ToHandleChecked();
+ Handle<JSFunction> segment_iterator_fun = CreateFunction(
+ isolate(), name_string, JS_INTL_SEGMENT_ITERATOR_TYPE,
+ JSSegmentIterator::kSize, 0, prototype, Builtins::kIllegal);
+ segment_iterator_fun->shared()->set_native(false);
+
+ Handle<Map> segment_iterator_map(segment_iterator_fun->initial_map(),
+ isolate());
+ native_context()->set_intl_segment_iterator_map(*segment_iterator_map);
+ }
}
#endif // V8_INTL_SUPPORT
+void Genesis::InitializeGlobal_harmony_object_from_entries() {
+ if (!FLAG_harmony_object_from_entries) return;
+ SimpleInstallFunction(isolate(), isolate()->object_function(), "fromEntries",
+ Builtins::kObjectFromEntries, 1, false);
+}
+
Handle<JSFunction> Genesis::CreateArrayBuffer(
Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ InstallToStringTag(isolate(), prototype, name);
// Allocate the constructor with the given {prototype}.
Handle<JSFunction> array_buffer_fun =
@@ -4741,10 +4684,9 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
switch (array_buffer_kind) {
case ARRAY_BUFFER:
- SimpleInstallFunction(isolate(), array_buffer_fun,
- factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
- BuiltinFunctionId::kArrayBufferIsView);
+ InstallFunctionWithBuiltinId(isolate(), array_buffer_fun, "isView",
+ Builtins::kArrayBufferIsView, 1, true,
+ BuiltinFunctionId::kArrayBufferIsView);
// Install the "byteLength" getter on the {prototype}.
SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
@@ -4771,10 +4713,19 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
return array_buffer_fun;
}
+void Genesis::InstallInternalPackedArrayFunction(Handle<JSObject> prototype,
+ const char* function_name) {
+ Handle<JSObject> array_prototype(native_context()->initial_array_prototype(),
+ isolate());
+ Handle<Object> func =
+ JSReceiver::GetProperty(isolate(), array_prototype, function_name)
+ .ToHandleChecked();
+ JSObject::AddProperty(isolate(), prototype, function_name, func,
+ ALL_ATTRIBUTES_MASK);
+}
-Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
- const char* name,
- ElementsKind elements_kind) {
+void Genesis::InstallInternalPackedArray(Handle<JSObject> target,
+ const char* name) {
// --- I n t e r n a l A r r a y ---
// An array constructor on the builtins object that works like
// the public Array constructor, except that its prototype
@@ -4791,7 +4742,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
Handle<Map> original_map(array_function->initial_map(), isolate());
Handle<Map> initial_map = Map::Copy(isolate(), original_map, "InternalArray");
- initial_map->set_elements_kind(elements_kind);
+ initial_map->set_elements_kind(PACKED_ELEMENTS);
JSFunction::SetInitialMap(array_function, initial_map, prototype);
// Make "length" magic on instances.
@@ -4804,60 +4755,92 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
Descriptor d = Descriptor::AccessorConstant(
factory()->length_string(), factory()->array_length_accessor(),
attribs);
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
- return array_function;
+ JSObject::NormalizeProperties(
+ prototype, KEEP_INOBJECT_PROPERTIES, 6,
+ "OptimizeInternalPackedArrayPrototypeForAdding");
+ InstallInternalPackedArrayFunction(prototype, "push");
+ InstallInternalPackedArrayFunction(prototype, "pop");
+ InstallInternalPackedArrayFunction(prototype, "shift");
+ InstallInternalPackedArrayFunction(prototype, "unshift");
+ InstallInternalPackedArrayFunction(prototype, "splice");
+ InstallInternalPackedArrayFunction(prototype, "slice");
+
+ JSObject::ForceSetPrototype(prototype, factory()->null_value());
+ JSObject::MigrateSlowToFast(prototype, 0, "Bootstrapping");
}
-bool Genesis::InstallNatives(GlobalContextType context_type) {
+bool Genesis::InstallNatives() {
HandleScope scope(isolate());
- // Set up the utils object as shared container between native scripts.
- Handle<JSObject> utils = factory()->NewJSObject(isolate()->object_function());
- JSObject::NormalizeProperties(utils, CLEAR_INOBJECT_PROPERTIES, 16,
- "utils container for native scripts");
- native_context()->set_natives_utils_object(*utils);
-
// Set up the extras utils object as a shared container between native
// scripts and extras. (Extras consume things added there by native scripts.)
Handle<JSObject> extras_utils =
factory()->NewJSObject(isolate()->object_function());
native_context()->set_extras_utils_object(*extras_utils);
- InstallInternalArray(extras_utils, "InternalPackedArray", PACKED_ELEMENTS);
+ InstallInternalPackedArray(extras_utils, "InternalPackedArray");
+
+ // Extras need the ability to store private state on their objects without
+ // exposing it to the outside world.
+ SimpleInstallFunction(isolate_, extras_utils, "createPrivateSymbol",
+ Builtins::kExtrasUtilsCreatePrivateSymbol, 1, false);
+
+ SimpleInstallFunction(isolate_, extras_utils, "uncurryThis",
+ Builtins::kExtrasUtilsUncurryThis, 1, false);
+
+ SimpleInstallFunction(isolate_, extras_utils, "markPromiseAsHandled",
+ Builtins::kExtrasUtilsMarkPromiseAsHandled, 1, false);
+
+ SimpleInstallFunction(isolate_, extras_utils, "promiseState",
+ Builtins::kExtrasUtilsPromiseState, 1, false);
+
+ // [[PromiseState]] values (for extrasUtils.promiseState())
+ // These values should be kept in sync with PromiseStatus in globals.h
+ JSObject::AddProperty(
+ isolate(), extras_utils, "kPROMISE_PENDING",
+ factory()->NewNumberFromInt(static_cast<int>(Promise::kPending)),
+ DONT_ENUM);
+ JSObject::AddProperty(
+ isolate(), extras_utils, "kPROMISE_FULFILLED",
+ factory()->NewNumberFromInt(static_cast<int>(Promise::kFulfilled)),
+ DONT_ENUM);
+ JSObject::AddProperty(
+ isolate(), extras_utils, "kPROMISE_REJECTED",
+ factory()->NewNumberFromInt(static_cast<int>(Promise::kRejected)),
+ DONT_ENUM);
// v8.createPromise(parent)
Handle<JSFunction> promise_internal_constructor =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
promise_internal_constructor->shared()->set_native(false);
- InstallFunction(isolate(), extras_utils, promise_internal_constructor,
- factory()->InternalizeUtf8String("createPromise"));
+ JSObject::AddProperty(isolate(), extras_utils, "createPromise",
+ promise_internal_constructor, DONT_ENUM);
// v8.rejectPromise(promise, reason)
Handle<JSFunction> promise_internal_reject =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalReject, 2, true);
promise_internal_reject->shared()->set_native(false);
- InstallFunction(isolate(), extras_utils, promise_internal_reject,
- factory()->InternalizeUtf8String("rejectPromise"));
+ JSObject::AddProperty(isolate(), extras_utils, "rejectPromise",
+ promise_internal_reject, DONT_ENUM);
// v8.resolvePromise(promise, resolution)
Handle<JSFunction> promise_internal_resolve =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalResolve, 2, true);
promise_internal_resolve->shared()->set_native(false);
- InstallFunction(isolate(), extras_utils, promise_internal_resolve,
- factory()->InternalizeUtf8String("resolvePromise"));
+ JSObject::AddProperty(isolate(), extras_utils, "resolvePromise",
+ promise_internal_resolve, DONT_ENUM);
- InstallFunction(isolate(), extras_utils, isolate()->is_promise(),
- factory()->InternalizeUtf8String("isPromise"));
+ JSObject::AddProperty(isolate(), extras_utils, "isPromise",
+ isolate()->is_promise(), DONT_ENUM);
- int builtin_index = Natives::GetDebuggerCount();
- // Only run prologue.js at this point.
- DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
- if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
+ JSObject::MigrateSlowToFast(Handle<JSObject>::cast(extras_utils), 0,
+ "Bootstrapping");
{
// Builtin function for OpaqueReference -- a JSValue-based object,
@@ -4871,26 +4854,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
- // InternalArrays should not use Smi-Only array optimizations. There are too
- // many places in the C++ runtime code (e.g. RegEx) that assume that
- // elements in InternalArrays can be set to non-Smi values without going
- // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
- // transition easy to trap. Moreover, they rarely are smi-only.
- {
- HandleScope scope(isolate());
- Handle<JSObject> utils =
- Handle<JSObject>::cast(isolate()->natives_utils_object());
- Handle<JSFunction> array_function =
- InstallInternalArray(utils, "InternalArray", HOLEY_ELEMENTS);
- native_context()->set_internal_array_function(*array_function);
- }
-
- // Run the rest of the native scripts.
- while (builtin_index < Natives::GetBuiltinsCount()) {
- if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
- }
-
- if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
auto fast_template_instantiations_cache = isolate()->factory()->NewFixedArray(
TemplateInfo::kFastTemplateInstantiationsCacheSize);
native_context()->set_fast_template_instantiations_cache(
@@ -4916,7 +4879,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function(),
isolate());
- JSObject* string_function_prototype =
+ JSObject string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
native_context()->set_string_function_prototype_map(
@@ -4926,52 +4889,51 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
handle(native_context()->global_object(), isolate());
// Install Global.decodeURI.
- SimpleInstallFunction(isolate(), global_object, "decodeURI",
- Builtins::kGlobalDecodeURI, 1, false,
- BuiltinFunctionId::kGlobalDecodeURI);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "decodeURI",
+ Builtins::kGlobalDecodeURI, 1, false,
+ BuiltinFunctionId::kGlobalDecodeURI);
// Install Global.decodeURIComponent.
- SimpleInstallFunction(isolate(), global_object, "decodeURIComponent",
- Builtins::kGlobalDecodeURIComponent, 1, false,
- BuiltinFunctionId::kGlobalDecodeURIComponent);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "decodeURIComponent",
+ Builtins::kGlobalDecodeURIComponent, 1, false,
+ BuiltinFunctionId::kGlobalDecodeURIComponent);
// Install Global.encodeURI.
- SimpleInstallFunction(isolate(), global_object, "encodeURI",
- Builtins::kGlobalEncodeURI, 1, false,
- BuiltinFunctionId::kGlobalEncodeURI);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "encodeURI",
+ Builtins::kGlobalEncodeURI, 1, false,
+ BuiltinFunctionId::kGlobalEncodeURI);
// Install Global.encodeURIComponent.
- SimpleInstallFunction(isolate(), global_object, "encodeURIComponent",
- Builtins::kGlobalEncodeURIComponent, 1, false,
- BuiltinFunctionId::kGlobalEncodeURIComponent);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "encodeURIComponent",
+ Builtins::kGlobalEncodeURIComponent, 1, false,
+ BuiltinFunctionId::kGlobalEncodeURIComponent);
// Install Global.escape.
- SimpleInstallFunction(isolate(), global_object, "escape",
- Builtins::kGlobalEscape, 1, false,
- BuiltinFunctionId::kGlobalEscape);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "escape",
+ Builtins::kGlobalEscape, 1, false,
+ BuiltinFunctionId::kGlobalEscape);
// Install Global.unescape.
- SimpleInstallFunction(isolate(), global_object, "unescape",
- Builtins::kGlobalUnescape, 1, false,
- BuiltinFunctionId::kGlobalUnescape);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "unescape",
+ Builtins::kGlobalUnescape, 1, false,
+ BuiltinFunctionId::kGlobalUnescape);
// Install Global.eval.
{
Handle<JSFunction> eval = SimpleInstallFunction(
- isolate(), global_object, factory()->eval_string(),
- Builtins::kGlobalEval, 1, false);
+ isolate(), global_object, "eval", Builtins::kGlobalEval, 1, false);
native_context()->set_global_eval_fun(*eval);
}
// Install Global.isFinite
- SimpleInstallFunction(isolate(), global_object, "isFinite",
- Builtins::kGlobalIsFinite, 1, true,
- BuiltinFunctionId::kGlobalIsFinite);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "isFinite",
+ Builtins::kGlobalIsFinite, 1, true,
+ BuiltinFunctionId::kGlobalIsFinite);
// Install Global.isNaN
- SimpleInstallFunction(isolate(), global_object, "isNaN",
- Builtins::kGlobalIsNaN, 1, true,
- BuiltinFunctionId::kGlobalIsNaN);
+ InstallFunctionWithBuiltinId(isolate(), global_object, "isNaN",
+ Builtins::kGlobalIsNaN, 1, true,
+ BuiltinFunctionId::kGlobalIsNaN);
// Install Array builtin functions.
{
@@ -4981,7 +4943,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
isolate());
// Verification of important array prototype properties.
- Object* length = proto->length();
+ Object length = proto->length();
CHECK(length->IsSmi());
CHECK_EQ(Smi::ToInt(length), 0);
CHECK(proto->HasSmiOrObjectElements());
@@ -4990,16 +4952,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
proto->set_elements(ReadOnlyRoots(heap()).empty_fixed_array());
}
- // Install InternalArray.prototype.concat
- {
- Handle<JSFunction> array_constructor(
- native_context()->internal_array_function(), isolate());
- Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()),
- isolate());
- SimpleInstallFunction(isolate(), proto, "concat", Builtins::kArrayConcat, 1,
- false);
- }
-
InstallBuiltinFunctionIds();
// Create a map for accessor property descriptors (a variant of JSObject
@@ -5017,28 +4969,28 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor::DataField(isolate(), factory()->get_string(),
JSAccessorPropertyDescriptor::kGetIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // set
Descriptor d =
Descriptor::DataField(isolate(), factory()->set_string(),
JSAccessorPropertyDescriptor::kSetIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // enumerable
Descriptor d =
Descriptor::DataField(isolate(), factory()->enumerable_string(),
JSAccessorPropertyDescriptor::kEnumerableIndex,
NONE, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // configurable
Descriptor d = Descriptor::DataField(
isolate(), factory()->configurable_string(),
JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
@@ -5063,28 +5015,28 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor::DataField(isolate(), factory()->value_string(),
JSDataPropertyDescriptor::kValueIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // writable
Descriptor d =
Descriptor::DataField(isolate(), factory()->writable_string(),
JSDataPropertyDescriptor::kWritableIndex, NONE,
Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // enumerable
Descriptor d =
Descriptor::DataField(isolate(), factory()->enumerable_string(),
JSDataPropertyDescriptor::kEnumerableIndex,
NONE, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // configurable
Descriptor d =
Descriptor::DataField(isolate(), factory()->configurable_string(),
JSDataPropertyDescriptor::kConfigurableIndex,
NONE, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
Map::SetPrototype(isolate(), map, isolate()->initial_object_prototype());
@@ -5121,7 +5073,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// length descriptor.
{
- JSFunction* array_function = native_context()->array_function();
+ JSFunction array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors(), isolate());
Handle<String> length = factory()->length_string();
@@ -5131,7 +5083,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor d = Descriptor::AccessorConstant(
length, handle(array_descriptors->GetStrongValue(old), isolate()),
array_descriptors->GetDetails(old).attributes());
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
// index descriptor.
@@ -5139,7 +5091,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor d = Descriptor::DataField(isolate(), factory()->index_string(),
JSRegExpResult::kIndexIndex, NONE,
Representation::Tagged());
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
// input descriptor.
@@ -5147,7 +5099,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor d = Descriptor::DataField(isolate(), factory()->input_string(),
JSRegExpResult::kInputIndex, NONE,
Representation::Tagged());
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
// groups descriptor.
@@ -5155,7 +5107,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Descriptor d = Descriptor::DataField(
isolate(), factory()->groups_string(), JSRegExpResult::kGroupsIndex,
NONE, Representation::Tagged());
- initial_map->AppendDescriptor(&d);
+ initial_map->AppendDescriptor(isolate(), &d);
}
native_context()->set_regexp_result_map(*initial_map);
@@ -5171,7 +5123,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
arguments_iterator, attribs);
Handle<Map> map(native_context()->sloppy_arguments_map(), isolate());
Map::EnsureDescriptorSlack(isolate(), map, 1);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
@@ -5179,7 +5131,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<Map> map(native_context()->fast_aliased_arguments_map(),
isolate());
Map::EnsureDescriptorSlack(isolate(), map, 1);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
@@ -5187,14 +5139,14 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<Map> map(native_context()->slow_aliased_arguments_map(),
isolate());
Map::EnsureDescriptorSlack(isolate(), map, 1);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{
Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
arguments_iterator, attribs);
Handle<Map> map(native_context()->strict_arguments_map(), isolate());
Map::EnsureDescriptorSlack(isolate(), map, 1);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
}
@@ -5217,8 +5169,7 @@ bool Genesis::InstallExtraNatives() {
native_context()->set_extras_binding_object(*extras_binding);
- for (int i = ExtraNatives::GetDebuggerCount();
- i < ExtraNatives::GetBuiltinsCount(); i++) {
+ for (int i = 0; i < ExtraNatives::GetBuiltinsCount(); i++) {
if (!Bootstrapper::CompileExtraBuiltin(isolate(), i)) return false;
}
@@ -5226,24 +5177,6 @@ bool Genesis::InstallExtraNatives() {
}
-bool Genesis::InstallExperimentalExtraNatives() {
- for (int i = ExperimentalExtraNatives::GetDebuggerCount();
- i < ExperimentalExtraNatives::GetBuiltinsCount(); i++) {
- if (!Bootstrapper::CompileExperimentalExtraBuiltin(isolate(), i))
- return false;
- }
-
- return true;
-}
-
-
-bool Genesis::InstallDebuggerNatives() {
- for (int i = 0; i < Natives::GetDebuggerCount(); ++i) {
- if (!Bootstrapper::CompileBuiltin(isolate(), i)) return false;
- }
- return true;
-}
-
static void InstallBuiltinFunctionId(Isolate* isolate, Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
@@ -5459,9 +5392,9 @@ bool Genesis::ConfigureGlobalObjects(
Handle<FunctionTemplateInfo> proxy_constructor(
FunctionTemplateInfo::cast(global_proxy_data->constructor()),
isolate());
- if (!proxy_constructor->prototype_template()->IsUndefined(isolate())) {
+ if (!proxy_constructor->GetPrototypeTemplate()->IsUndefined(isolate())) {
Handle<ObjectTemplateInfo> global_object_data(
- ObjectTemplateInfo::cast(proxy_constructor->prototype_template()),
+ ObjectTemplateInfo::cast(proxy_constructor->GetPrototypeTemplate()),
isolate());
if (!ConfigureApiObject(global_object, global_object_data)) return false;
}
@@ -5500,16 +5433,22 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
MaybeHandle<JSObject> maybe_obj =
ApiNatives::InstantiateObject(object->GetIsolate(), object_template);
- Handle<JSObject> obj;
- if (!maybe_obj.ToHandle(&obj)) {
+ Handle<JSObject> instantiated_template;
+ if (!maybe_obj.ToHandle(&instantiated_template)) {
DCHECK(isolate()->has_pending_exception());
isolate()->clear_pending_exception();
return false;
}
- TransferObject(obj, object);
+ TransferObject(instantiated_template, object);
return true;
}
+static bool PropertyAlreadyExists(Isolate* isolate, Handle<JSObject> to,
+ Handle<Name> key) {
+ LookupIterator it(isolate, to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ return it.IsFound();
+}
void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<JSObject> to) {
@@ -5527,6 +5466,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (details.kind() == kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
Handle<Object> value =
JSObject::FastPropertyAt(from, details.representation(), index);
@@ -5543,17 +5484,16 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!FLAG_track_constant_fields);
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
Handle<Object> value(descs->GetStrongValue(i), isolate());
JSObject::AddProperty(isolate(), to, key, value,
details.attributes());
} else {
DCHECK_EQ(kAccessor, details.kind());
Handle<Name> key(descs->GetKey(i), isolate());
- LookupIterator it(isolate(), to, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- // If the property is already there we skip it
- if (it.IsFound()) continue;
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
HandleScope inner(isolate());
DCHECK(!to->HasFastProperties());
// Add to dictionary.
@@ -5572,13 +5512,10 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
GlobalDictionary::IterationIndices(isolate(), properties);
for (int i = 0; i < indices->length(); i++) {
int index = Smi::ToInt(indices->get(i));
- // If the property is already there we skip it.
Handle<PropertyCell> cell(properties->CellAt(index), isolate());
Handle<Name> key(cell->name(), isolate());
- LookupIterator it(isolate(), to, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- if (it.IsFound()) continue;
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
// Set the property.
Handle<Object> value(cell->value(), isolate());
if (value->IsTheHole(isolate())) continue;
@@ -5595,15 +5532,12 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
ReadOnlyRoots roots(isolate());
for (int i = 0; i < key_indices->length(); i++) {
int key_index = Smi::ToInt(key_indices->get(i));
- Object* raw_key = properties->KeyAt(key_index);
+ Object raw_key = properties->KeyAt(key_index);
DCHECK(properties->IsKey(roots, raw_key));
DCHECK(raw_key->IsName());
- // If the property is already there we skip it.
Handle<Name> key(Name::cast(raw_key), isolate());
- LookupIterator it(isolate(), to, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- if (it.IsFound()) continue;
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
// Set the property.
Handle<Object> value =
Handle<Object>(properties->ValueAt(key_index), isolate());
@@ -5641,14 +5575,13 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
JSObject::ForceSetPrototype(to, proto);
}
-
Genesis::Genesis(
Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
- GlobalContextType context_type)
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer)
: isolate_(isolate), active_(isolate->bootstrapper()) {
+ RuntimeCallTimerScope rcs_timer(isolate, RuntimeCallCounterId::kGenesis);
result_ = Handle<Context>::null();
global_proxy_ = Handle<JSGlobalProxy>::null();
@@ -5666,7 +5599,7 @@ Genesis::Genesis(
// The global proxy function to reinitialize this global proxy is in the
// context that is yet to be deserialized. We need to prepare a global
// proxy of the correct size.
- Object* size = isolate->heap()->serialized_global_proxy_sizes()->get(
+ Object size = isolate->heap()->serialized_global_proxy_sizes()->get(
static_cast<int>(context_snapshot_index) - 1);
instance_size = Smi::ToInt(size);
} else {
@@ -5725,10 +5658,12 @@ Genesis::Genesis(
CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
- InitializeGlobal(global_object, empty_function, context_type);
+ InitializeGlobal(global_object, empty_function);
InitializeNormalizedMapCaches();
+ InitializeIteratorFunctions();
+ InitializeCallSiteBuiltins();
- if (!InstallNatives(context_type)) return;
+ if (!InstallNatives()) return;
if (!InstallExtraNatives()) return;
if (!ConfigureGlobalObjects(global_proxy_template)) return;
@@ -5736,43 +5671,35 @@ Genesis::Genesis(
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
- i::PrintF("[Initializing context from scratch took %0.3f ms]\n", ms);
+ PrintF("[Initializing context from scratch took %0.3f ms]\n", ms);
}
}
+ native_context()->set_microtask_queue(isolate->default_microtask_queue());
+
// Install experimental natives. Do not include them into the
// snapshot as we should be able to turn them off at runtime. Re-installing
// them after they have already been deserialized would also fail.
- if (context_type == FULL_CONTEXT) {
if (!isolate->serializer_enabled()) {
InitializeExperimentalGlobal();
- if (FLAG_experimental_extras) {
- if (!InstallExperimentalExtraNatives()) return;
- }
-
// Store String.prototype's map again in case it has been changed by
// experimental natives.
Handle<JSFunction> string_function(native_context()->string_function(),
isolate);
- JSObject* string_function_prototype =
+ JSObject string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
native_context()->set_string_function_prototype_map(
string_function_prototype->map());
}
- } else if (context_type == DEBUG_CONTEXT) {
- DCHECK(!isolate->serializer_enabled());
- InitializeExperimentalGlobal();
- if (!InstallDebuggerNatives()) return;
- }
if (FLAG_disallow_code_generation_from_strings) {
native_context()->set_allow_code_gen_from_strings(
ReadOnlyRoots(isolate).false_value());
}
- ConfigureUtilsObject(context_type);
+ ConfigureUtilsObject();
// We created new functions, which may require debug instrumentation.
if (isolate->debug()->is_active()) {
@@ -5809,7 +5736,7 @@ Genesis::Genesis(Isolate* isolate,
FunctionTemplateInfo::cast(global_proxy_data->constructor()), isolate);
Handle<ObjectTemplateInfo> global_object_template(
- ObjectTemplateInfo::cast(global_constructor->prototype_template()),
+ ObjectTemplateInfo::cast(global_constructor->GetPrototypeTemplate()),
isolate);
Handle<JSObject> global_object =
ApiNatives::InstantiateRemoteObject(
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 4ad02eb836..6deff78097 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -6,7 +6,9 @@
#define V8_BOOTSTRAPPER_H_
#include "src/heap/factory.h"
+#include "src/objects/fixed-array.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots.h"
#include "src/snapshot/natives.h"
#include "src/visitors.h"
@@ -14,20 +16,17 @@ namespace v8 {
namespace internal {
// A SourceCodeCache uses a FixedArray to store pairs of
-// (OneByteString*, JSFunction*), mapping names of native code files
+// (OneByteString, SharedFunctionInfo), mapping names of native code files
// (array.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
class SourceCodeCache final {
public:
- explicit SourceCodeCache(Script::Type type) : type_(type), cache_(nullptr) {}
+ explicit SourceCodeCache(Script::Type type) : type_(type) {}
void Initialize(Isolate* isolate, bool create_heap_objects);
- void Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kExtensions, nullptr,
- bit_cast<Object**, FixedArray**>(&cache_));
- }
+ void Iterate(RootVisitor* v);
bool Lookup(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo>* handle);
@@ -37,11 +36,10 @@ class SourceCodeCache final {
private:
Script::Type type_;
- FixedArray* cache_;
+ FixedArray cache_;
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
-enum GlobalContextType { FULL_CONTEXT, DEBUG_CONTEXT };
// The Boostrapper is the public interface for creating a JavaScript global
// context.
@@ -60,8 +58,7 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
- v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
- GlobalContextType context_type = FULL_CONTEXT);
+ v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
Handle<JSGlobalProxy> NewRemoteContext(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
@@ -94,13 +91,13 @@ class Bootstrapper final {
static bool CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
Handle<Object> argv[], NativesFlag natives_flag);
- static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
- static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
-
private:
+ // Log newly created Map objects if no snapshot was used.
+ void LogAllMaps();
+
Isolate* isolate_;
typedef int NestingCounterType;
NestingCounterType nesting_;
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
new file mode 100644
index 0000000000..b758153155
--- /dev/null
+++ b/deps/v8/src/builtins/arguments.tq
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace arguments {
+
+ struct ArgumentsInfo {
+ frame: Frame;
+ argument_count: bint;
+ formal_parameter_count: bint;
+ }
+
+ // Calculates and returns the frame pointer, argument count and formal
+ // parameter count to be used to access a function's parameters, taking
+ // argument adapter frames into account.
+ //
+ // TODO(danno):
+ // This macro is should only be used in builtins that can be called from
+ // interpreted or JITted code, not from CSA/Torque builtins (the number of
+ // returned formal parameters would be wrong).
+ // It is difficult to actually check/assert this, since interpreted or JITted
+ // frames are StandardFrames, but so are hand-written builtins. Doing that
+ // more refined check would be prohibitively expensive.
+ macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction):
+ ArgumentsInfo {
+ let frame: Frame = LoadParentFramePointer();
+ assert(frame.function == f);
+
+ const shared: SharedFunctionInfo = f.shared_function_info;
+ const formalParameterCount: bint =
+ Convert<bint>(shared.formal_parameter_count);
+ let argumentCount: bint = formalParameterCount;
+
+ const adaptor: ArgumentsAdaptorFrame =
+ Cast<ArgumentsAdaptorFrame>(frame.caller)
+ otherwise return ArgumentsInfo{frame, argumentCount, formalParameterCount};
+
+ return ArgumentsInfo{
+ adaptor,
+ Convert<bint>(adaptor.length),
+ formalParameterCount
+ };
+ }
+}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 92cb6df45d..6fdd93821d 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -4,16 +4,21 @@
#if V8_TARGET_ARCH_ARM
-#include "src/assembler-inl.h"
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -92,8 +97,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r0, r0);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+ __ JumpCodeObject(r2);
}
namespace {
@@ -482,8 +486,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(r1, r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+ __ JumpCodeObject(r2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -522,16 +525,221 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
+namespace {
+
+// Total size of the stack space pushed by JSEntryVariant.
+// JSEntryTrampoline uses this to access on stack arguments passed to
+// JSEntryVariant.
+constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize +
+ kPointerSize /* LR */ +
+ kNumDoubleCalleeSaved * kDoubleSize +
+ 4 * kPointerSize /* r5, r6, r7, scratch */ +
+ EntryFrameConstants::kCallerFPOffset;
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** argv)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ // The register state is either:
+ // r0: root_register_value
+ // r1: code entry
+ // r2: function
+ // r3: receiver
+ // [sp + 0 * kSystemPointerSize]: argc
+ // [sp + 1 * kSystemPointerSize]: argv
+ // or
+ // r0: root_register_value
+ // r1: microtask_queue
+ // Preserve all but r0 and pass them to entry_trampoline.
+ Label invoke, handler_entry, exit;
+
+ // Update |pushed_stack_space| when we manipulate the stack.
+ int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Called from C, so do not pop argc and args on exit (preserve sp)
+ // No need to save register-passed args
+ // Save callee-saved registers (incl. cp and fp), sp, and lr
+ __ stm(db_w, sp, kCalleeSaved | lr.bit());
+ pushed_stack_space +=
+ kNumCalleeSaved * kPointerSize + kPointerSize /* LR */;
+
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ pushed_stack_space += kNumDoubleCalleeSaved * kDoubleSize;
+
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, Double(0.0));
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in r0.
+ __ mov(kRootRegister, r0);
+ }
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r0: root_register_value
+ __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
+ __ mov(r6, Operand(StackFrame::TypeToMarker(type)));
+ __ Move(r5, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ ldr(r5, MemOperand(r5));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ // Push a bad frame pointer to fail if it is used.
+ __ mov(scratch, Operand(-1));
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
+ pushed_stack_space += 4 * kPointerSize /* r5, r6, r7, scratch */;
+ }
+
+ Register scratch = r6;
+
+ // Set up frame pointer for the frame to be pushed.
+ __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ Move(r5, js_entry_sp);
+ __ ldr(scratch, MemOperand(r5));
+ __ cmp(scratch, Operand::Zero());
+ __ b(ne, &non_outermost_js);
+ __ str(fp, MemOperand(r5));
+ __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ bind(&non_outermost_js);
+ __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(scratch);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+
+ // Block literal pool emission whilst taking the position of the handler
+ // entry. This avoids making the assumption that literal pools are always
+ // emitted after an instruction is emitted, rather than before.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Move(scratch,
+ ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ }
+ __ str(r0, MemOperand(scratch));
+ __ LoadRoot(r0, RootIndex::kException);
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r0-r4, r5-r6 are available.
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bl(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // r0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r5);
+ __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ b(ne, &non_outermost_js_2);
+ __ mov(r6, Operand::Zero());
+ __ Move(r5, js_entry_sp);
+ __ str(r6, MemOperand(r5));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(r3);
+ __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ str(r3, MemOperand(scratch));
+
+ // Reset the stack to the callee saved registers.
+ __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Restore callee-saved registers and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+
+ __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r0: new.target
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
+ // r0: root_register_value
+ // r1: new.target
+ // r2: function
+ // r3: receiver
+ // [fp + kPushedStackSpace + 0 * kSystemPointerSize]: argc
+ // [fp + kPushedStackSpace + 1 * kSystemPointerSize]: argv
// r5-r6, r8 and cp may be clobbered
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ __ ldr(r0,
+ MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
+ __ ldr(r4,
+ MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
+
+ // r1: new.target
+ // r2: function
+ // r3: receiver
+ // r0: argc
+ // r4: argv
// Enter an internal frame.
{
@@ -544,12 +752,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
- __ Push(r1, r2);
+ __ Push(r2, r3);
// Check if we have enough stack space to push all arguments.
- // Clobbers r2.
+ // Clobbers r3.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, r3, r2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r0, r3, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -558,37 +766,39 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Remember new.target.
- __ mov(r5, r0);
-
// Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
+ // r1: new.target
+ // r2: function
+ // r0: argc
// r4: argv, i.e. points to first arg
Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
+ __ add(r3, r4, Operand(r0, LSL, kPointerSizeLog2));
+ // r1 points past last arg.
__ b(&entry);
__ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
+ __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r5, MemOperand(r5)); // dereference handle
+ __ push(r5); // push parameter
__ bind(&entry);
- __ cmp(r4, r2);
+ __ cmp(r4, r3);
__ b(ne, &loop);
- // Setup new.target and argc.
- __ mov(r0, Operand(r3));
- __ mov(r3, Operand(r5));
+ // Setup new.target and function.
+ __ mov(r3, r1);
+ __ mov(r1, r2);
+ // r0: argc
+ // r1: function
+ // r3: new.target
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r4, RootIndex::kUndefinedValue);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r8, Operand(r4));
+ __ mov(r2, r4);
+ __ mov(r5, r4);
+ __ mov(r6, r4);
+ __ mov(r8, r4);
if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
+ __ mov(r9, r4);
}
// Invoke the code.
@@ -614,6 +824,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // This expects two C++ function parameters passed by Invoke() in
+ // execution.cc.
+ // r0: root_register_value
+ // r1: microtask_queue
+
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
@@ -688,6 +908,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
@@ -736,8 +959,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ add(r2, optimized_code_entry,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -822,32 +1044,38 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = r1;
Register feedback_vector = r2;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ b(ne, &compile_lazy);
+
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
+ __ b(eq, &push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
-
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -855,16 +1083,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ str(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ SmiTst(kInterpreterBytecodeArrayRegister);
- __ Assert(
- ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
- BYTECODE_ARRAY_TYPE);
- __ Assert(
- eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
// Reset code age.
__ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
@@ -923,9 +1147,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
- __ mov(kInterpreterDispatchTableRegister,
- Operand(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
+ __ Move(
+ kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(
@@ -957,6 +1181,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1083,12 +1311,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
@@ -1099,14 +1329,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
- __ Move(r2, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Move(r2, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ ldr(r2, MemOperand(r2));
__ bind(&trampoline_loaded);
- __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value()));
// Initialize the dispatch table register.
__ Move(
@@ -1245,8 +1478,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+ __ JumpCodeObject(r2);
}
namespace {
@@ -1326,7 +1558,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
Label skip;
- __ cmp(r0, Operand(Smi::kZero));
+ __ cmp(r0, Operand(Smi::zero()));
__ b(ne, &skip);
__ Ret();
@@ -1546,7 +1778,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
fp.bit() | lr.bit());
- __ Push(Smi::kZero); // Padding.
+ __ Push(Smi::zero()); // Padding.
__ add(fp, sp,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
@@ -2171,8 +2403,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r2);
+ __ CallCodeObject(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2187,8 +2418,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+ __ JumpCodeObject(r2);
__ bind(&stack_overflow);
{
@@ -2199,9 +2429,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // The function index was put in r4 by the jump table trampoline.
+ // The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
- __ SmiTag(r4, r4);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister,
+ kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2219,13 +2450,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime
// function.
__ push(kWasmInstanceRegister);
- __ push(r4);
+ __ push(kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(cp, Smi::kZero);
+ __ Move(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r2);
// The entrypoint address is the return value.
__ mov(r8, kReturnRegister0);
@@ -2250,7 +2481,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
//
// If argv_mode == kArgvInRegister:
// r2: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mov(r5, Operand(r1));
@@ -2295,21 +2525,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Call C built-in.
// r0 = argc, r1 = argv, r2 = isolate
__ Move(r2, ExternalReference::isolate_address(masm->isolate()));
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. CEntry is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- {
- // Prevent literal pool emission before return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp));
- __ Call(r5);
- }
+ __ StoreReturnAddressAndCall(r5);
// Result returned in r0 or r1:r0 - do not destroy these registers!
@@ -2565,43 +2781,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
-
- __ cmp(r0, Operand(1));
-
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET, lo);
-
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
- __ Jump(code, RelocInfo::CODE_TARGET, hi);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ ldr(r3, MemOperand(sp, 0));
- __ cmp(r3, Operand::Zero());
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET, ne);
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -2621,34 +2800,432 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
- // Figure out the right elements kind
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r3);
+ // Figure out the right elements kind
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r3);
- if (FLAG_debug_code) {
- Label done;
+ // Initial elements kind should be packed elements.
__ cmp(r3, Operand(PACKED_ELEMENTS));
- __ b(eq, &done);
- __ cmp(r3, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray);
+
+ // No arguments should be passed.
+ __ cmp(r0, Operand(0));
+ __ Assert(eq, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
+ }
+
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
+
+namespace {
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == r1 || function_address == r2);
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Move(r9, ExternalReference::is_profiling_address(isolate));
+ __ ldrb(r9, MemOperand(r9, 0));
+ __ cmp(r9, Operand(0));
+ __ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ __ Move(r3, thunk_ref);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ Move(r3, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ Move(r9, next_address);
+ __ ldr(r4, MemOperand(r9, kNextOffset));
+ __ ldr(r5, MemOperand(r9, kLimitOffset));
+ __ ldr(r6, MemOperand(r9, kLevelOffset));
+ __ add(r6, r6, Operand(1));
+ __ str(r6, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ Move(r0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ __ StoreReturnAddressAndCall(r3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ Move(r0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ ldr(r0, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ str(r4, MemOperand(r9, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ ldr(r1, MemOperand(r9, kLevelOffset));
+ __ cmp(r1, r6);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
+ __ sub(r6, r6, Operand(1));
+ __ str(r6, MemOperand(r9, kLevelOffset));
+ __ ldr(r6, MemOperand(r9, kLimitOffset));
+ __ cmp(r5, r6);
+ __ b(ne, &delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ mov(r4, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ ldr(r4, *stack_space_operand);
+ }
+ __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
- Label fast_elements_case;
- __ cmp(r3, Operand(PACKED_ELEMENTS));
- __ b(eq, &fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ // Check if the function scheduled an exception.
+ __ LoadRoot(r4, RootIndex::kTheHoleValue);
+ __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
+ __ ldr(r5, MemOperand(r6));
+ __ cmp(r4, r5);
+ __ b(ne, &promote_scheduled_exception);
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+ __ mov(pc, lr);
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ str(r5, MemOperand(r9, kLimitOffset));
+ __ mov(r4, r0);
+ __ PrepareCallCFunction(1);
+ __ Move(r0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(r0, r4);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- r1 : kApiFunctionAddress
+ // -- r2 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 4] : first argument
+ // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc + 1) * 4] : kHolder
+ // -- sp[(argc + 2) * 4] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = r1;
+ Register argc = r2;
+ Register scratch = r4;
+ Register index = r5; // For indexing MemOperands.
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch, index));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0;
+ static constexpr int kHolderOffset = kReceiverOffset + 1;
+ static constexpr int kCallDataOffset = kHolderOffset + 1;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ add(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
+ __ ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ str(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ str(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ str(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ add(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
+ __ ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ str(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ __ str(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ __ mov(scratch,
+ Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r2;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Push(scratch, scratch);
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Push(scratch, holder);
+ __ Push(Smi::zero()); // should_throw_on_error -> false
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(r0, sp); // r0 = Handle<Name>
+ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ ldr(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ __ str(lr, MemOperand(sp, 0)); // Store the return address.
+ __ blx(ip); // Call the C++ function.
+ __ ldr(pc, MemOperand(sp, 0)); // Return to calling code.
+}
+
+void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+ Register temp1 = r3;
+ Label less_4;
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register temp2 = temps.Acquire();
+ Label loop;
+
+ __ bic(temp2, chars, Operand(0x3), SetCC);
+ __ b(&less_4, eq);
+ __ add(temp2, dest, temp2);
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ str(temp1, MemOperand(dest, 4, PostIndex));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+ }
+
+ __ bind(&less_4);
+ __ mov(chars, Operand(chars, LSL, 31), SetCC);
+ // bit0 => Z (ne), bit1 => C (cs)
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
+ __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strb(temp1, MemOperand(dest), ne);
+ __ Ret();
+}
+
+void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
+ Register dest = r0;
+ Register src = r1;
+ Register chars = r2;
+
+ {
+ UseScratchRegisterScope temps(masm);
+
+ Register temp1 = r3;
+ Register temp2 = temps.Acquire();
+ Register temp3 = lr;
+ Register temp4 = r4;
+ Label loop;
+ Label not_two;
+
+ __ Push(lr, r4);
+ __ bic(temp2, chars, Operand(0x3));
+ __ add(temp2, dest, Operand(temp2, LSL, 1));
+
+ __ bind(&loop);
+ __ ldr(temp1, MemOperand(src, 4, PostIndex));
+ __ uxtb16(temp3, temp1);
+ __ uxtb16(temp4, temp1, 8);
+ __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
+ __ str(temp1, MemOperand(dest));
+ __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
+ __ str(temp1, MemOperand(dest, 4));
+ __ add(dest, dest, Operand(8));
+ __ cmp(dest, temp2);
+ __ b(&loop, ne);
+
+ __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
+ __ b(&not_two, cc);
+ __ ldrh(temp1, MemOperand(src, 2, PostIndex));
+ __ uxtb(temp3, temp1, 8);
+ __ mov(temp3, Operand(temp3, LSL, 16));
+ __ uxtab(temp3, temp3, temp1);
+ __ str(temp3, MemOperand(dest, 4, PostIndex));
+ __ bind(&not_two);
+ __ ldrb(temp1, MemOperand(src), ne);
+ __ strh(temp1, MemOperand(dest), ne);
+ __ Pop(pc, r4);
+ }
}
#undef __
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 0d51d9decf..8fadff4768 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -4,16 +4,21 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/macro-assembler-arm64-inl.h"
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -42,7 +47,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
- Label generic_array_code;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -83,15 +87,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
+ __ JumpCodeObject(x2);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
- Label post_instantiation_deopt_entry;
-
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@@ -194,8 +195,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Label* stack_overflow) {
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Label* stack_overflow) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
@@ -210,6 +211,26 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
+
+#if defined(V8_OS_WIN)
+ // Simulate _chkstk to extend stack guard page on Windows ARM64.
+ const int kPageSize = 4096;
+ Label chkstk, chkstk_done;
+ Register probe = temps.AcquireX();
+
+ __ Sub(scratch, sp, Operand(num_args, LSL, kPointerSizeLog2));
+ __ Mov(probe, sp);
+
+ // Loop start of stack probe.
+ __ Bind(&chkstk);
+ __ Sub(probe, probe, kPageSize);
+ __ Cmp(probe, scratch);
+ __ B(lo, &chkstk_done);
+ __ Ldrb(xzr, MemOperand(probe));
+ __ B(&chkstk);
+
+ __ Bind(&chkstk_done);
+#endif
}
} // namespace
@@ -416,6 +437,17 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ B(ne, &done);
+ __ Ldr(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ Bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -507,13 +539,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- Label check_has_bytecode_array;
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
- __ B(ne, &check_has_bytecode_array);
- __ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
- __ Bind(&check_has_bytecode_array);
+ GetSharedFunctionInfoBytecode(masm, x3, x0);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -530,8 +558,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x2);
+ __ JumpCodeObject(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -564,26 +591,235 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
}
}
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** argv)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+//
+// Input is either:
+// x0: root_register_value.
+// x1: new_target.
+// x2: target.
+// x3: receiver.
+// x4: argc.
+// x5: argv.
+// or
+// x0: root_register_value.
+// x1: microtask_queue.
+// Output:
+// x0: result.
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ __ PushCalleeSavedRegisters();
+
+ // Set up the reserved register for 0.0.
+ __ Fmov(fp_zero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in x0.
+ __ Mov(kRootRegister, x0);
+ }
+
+ // Build an entry frame (see layout below).
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, StackFrame::TypeToMarker(type));
+ __ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Ldr(x10, MemOperand(x11));
+
+ __ Push(x13, x12, xzr, x10);
+ // Set up fp.
+ __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ Mov(x10, js_entry_sp);
+ __ Ldr(x11, MemOperand(x10));
+
+ // Select between the inner and outermost frame marker, based on the JS entry
+ // sp. We assert that the inner marker is zero, so we can use xzr to save a
+ // move instruction.
+ DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
+ __ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
+ __ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
+ __ B(ne, &done);
+ __ Str(fp, MemOperand(x10));
+
+ __ Bind(&done);
+ __ Push(x12, padreg);
+
+ // The frame set up looks like this:
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockPoolsScope block_pools(masm);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Mov(x10,
+ ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
+ masm->isolate()));
+ }
+ __ Str(x0, MemOperand(x10));
+ __ LoadRoot(x0, RootIndex::kException);
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ Bind(&invoke);
+
+ // Push new stack handler.
+ static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kSize");
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kNextOffset");
+
+ // Link the current handler as the next handler.
+ __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ masm->isolate()));
+ __ Ldr(x10, MemOperand(x11));
+ __ Push(padreg, x10);
+
+ // Set this new handler as the current one.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, sp);
+ __ Str(scratch, MemOperand(x11));
+ }
+
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Pop the stack handler and unlink this frame from the handler chain.
+ static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
+ "Unexpected offset for StackHandlerConstants::kNextOffset");
+ __ Pop(x10, padreg);
+ __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ masm->isolate()));
+ __ Drop(StackHandlerConstants::kSlotCount - 2);
+ __ Str(x10, MemOperand(x11));
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // sp[0] : padding.
+ // sp[1] : JS entry frame marker.
+ // sp[2] : C entry FP.
+ // sp[3] : stack frame marker.
+ // sp[4] : stack frame marker.
+ // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ {
+ Register c_entry_fp = x11;
+ __ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
+ __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x12, js_entry_sp);
+ __ Str(xzr, MemOperand(x12));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Mov(x12, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Str(c_entry_fp, MemOperand(x12));
+ }
+
+ // Reset the stack to the callee saved registers.
+ static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
+ "Size of entry frame is not a multiple of 16 bytes");
+ __ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
+ // Restore the callee-saved registers and return.
+ __ PopCalleeSavedRegisters();
+ __ Ret();
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
// Input:
-// x0: new.target.
-// x1: function.
-// x2: receiver.
-// x3: argc.
-// x4: argv.
+// x1: new.target.
+// x2: function.
+// x3: receiver.
+// x4: argc.
+// x5: argv.
// Output:
// x0: result.
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
- Register new_target = x0;
- Register function = x1;
- Register receiver = x2;
- Register argc = x3;
- Register argv = x4;
+ Register new_target = x1;
+ Register function = x2;
+ Register receiver = x3;
+ Register argc = x4;
+ Register argv = x5;
Register scratch = x10;
Register slots_to_claim = x11;
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
{
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -619,8 +855,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Stp(receiver, function, MemOperand(scratch));
// Copy arguments to the stack in a loop, in reverse order.
- // x3: argc.
- // x4: argv.
+ // x4: argc.
+ // x5: argv.
Label loop, done;
// Skip the argument set up if we have no arguments.
@@ -642,15 +878,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Bind(&done);
- __ Mov(scratch, argc);
- __ Mov(argc, new_target);
- __ Mov(new_target, scratch);
+ __ Mov(x0, argc);
+ __ Mov(x3, new_target);
+ __ Mov(x1, function);
// x0: argc.
+ // x1: function.
// x3: new.target.
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- // The original values have been saved in JSEntryStub::GenerateBody().
+ // The original values have been saved in JSEntry.
__ LoadRoot(x19, RootIndex::kUndefinedValue);
__ Mov(x20, x19);
__ Mov(x21, x19);
@@ -685,6 +922,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // This expects two C++ function parameters passed by Invoke() in
+ // execution.cc.
+ // x0: root_register_value
+ // x1: microtask_queue
+
+ __ Mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), x1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
@@ -763,6 +1010,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
&fallthrough);
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
@@ -811,8 +1061,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Add(x2, optimized_code_entry,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -895,58 +1144,54 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = x1;
Register feedback_vector = x2;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
+ BYTECODE_ARRAY_TYPE);
+ __ B(ne, &compile_lazy);
+
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
+ __ B(eq, &push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+ // Increment invocation count for the function.
+ // MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
+ __ Ldr(w10, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add(w10, w10, Operand(1));
+ __ Str(w10, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+ __ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- Label has_bytecode_array;
- __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
- INTERPRETER_DATA_TYPE);
- __ B(ne, &has_bytecode_array);
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(kInterpreterBytecodeArrayRegister,
- InterpreterData::kBytecodeArrayOffset));
- __ Bind(&has_bytecode_array);
-
- // Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
- __ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
- __ Add(w10, w10, Operand(1));
- __ Str(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
-
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ AssertNotSmi(
- kInterpreterBytecodeArrayRegister,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
- BYTECODE_ARRAY_TYPE);
- __ Assert(
- eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
// Reset code age.
__ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
__ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@@ -1009,9 +1254,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+#if defined(V8_OS_WIN)
+ __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1040,6 +1291,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ Unreachable(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1194,12 +1449,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
@@ -1210,14 +1467,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Ldr(x1,
FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add(x1, x1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ B(&trampoline_loaded);
__ Bind(&builtin_trampoline);
- __ LoadObject(x1, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Mov(x1, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ldr(x1, MemOperand(x1));
__ Bind(&trampoline_loaded);
- __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value()));
// Initialize the dispatch table register.
__ Mov(
@@ -1245,9 +1505,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
+#if defined(V8_OS_WIN)
+ __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
+#else
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+#endif
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
__ Jump(kJavaScriptCallCodeStartRegister);
@@ -1316,7 +1582,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
Label at_least_one_arg;
Label three_args;
- DCHECK_NULL(Smi::kZero);
+ DCHECK_EQ(0, Smi::kZero.ptr());
__ Cbnz(argc, &at_least_one_arg);
// No arguments.
@@ -1372,8 +1638,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
- __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x4);
+ __ JumpCodeObject(x4);
}
namespace {
@@ -1429,8 +1694,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Pop(fp, lr);
// Call builtin.
- __ Add(builtin, builtin, Code::kHeaderSize - kHeapObjectTag);
- __ Br(builtin);
+ __ JumpCodeObject(builtin);
}
} // namespace
@@ -1478,7 +1742,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
Label skip;
- __ CompareAndBranch(x0, Smi::kZero, ne, &skip);
+ __ CompareAndBranch(x0, Smi::zero(), ne, &skip);
__ Ret();
__ Bind(&skip);
@@ -2630,8 +2894,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x3 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(x2);
+ __ CallCodeObject(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2645,8 +2908,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(x2);
+ __ JumpCodeObject(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
@@ -2660,8 +2922,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
- __ sxtw(x8, w8);
- __ SmiTag(x8, x8);
+ __ sxtw(kWasmCompileLazyFuncIndexRegister,
+ kWasmCompileLazyFuncIndexRegister.W());
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister,
+ kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2678,13 +2942,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime
// function.
- __ Push(kWasmInstanceRegister, x8);
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Mov(cp, Smi::kZero);
+ __ Mov(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
// The entrypoint address is the return value.
__ mov(x8, kReturnRegister0);
@@ -2708,7 +2972,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
HardAbortScope hard_aborts(masm);
ASM_LOCATION("CEntry::Generate entry");
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Register parameters:
// x0: argc (including receiver, untagged)
@@ -2812,24 +3075,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Mov(x1, argv);
__ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
- Label return_location;
- __ Adr(x12, &return_location);
- __ Poke(x12, 0);
-
- if (__ emit_debug_code()) {
- // Verify that the slot below fp[kSPOffset]-8 points to the return location
- // (currently in x12).
- UseScratchRegisterScope temps(masm);
- Register temp = temps.AcquireX();
- __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
- __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
- __ Cmp(temp, x12);
- __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
- }
-
- // Call the builtin.
- __ Blr(target);
- __ Bind(&return_location);
+ __ StoreReturnAddressAndCall(target);
// Result returned in x0 or x1:x0 - do not destroy these registers!
@@ -2968,7 +3214,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Isolate the mantissa bits, and set the implicit '1'.
Register mantissa = scratch2;
__ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
- __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1ULL << HeapNumber::kMantissaBits);
// Negate the mantissa if necessary.
__ Tst(result, kXSignMask);
@@ -3078,54 +3324,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- Label zero_case, n_case;
- Register argc = x0;
-
- __ Cbz(argc, &zero_case);
- __ CompareAndBranch(argc, 1, ne, &n_case);
-
- // One argument.
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
-
- // We might need to create a holey array; look at the first argument.
- __ Peek(x10, 0);
- __ Cbz(x10, &packed_case);
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&packed_case);
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&zero_case);
- // No arguments.
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&n_case);
- // N arguments.
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -3150,31 +3348,404 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Bind(&unexpected_map);
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Retrieve elements_kind from map.
+ __ LoadElementsKindFromMap(kind, x10);
+
+ // Initial elements kind should be packed elements.
+ __ Cmp(kind, PACKED_ELEMENTS);
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray);
+
+ // No arguments should be passed.
+ __ Cmp(x0, 0);
+ __ Assert(eq, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
}
- Register kind = w3;
- // Figure out the right elements kind
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
- // Retrieve elements_kind from map.
- __ LoadElementsKindFromMap(kind, x10);
+namespace {
- if (FLAG_debug_code) {
- Label done;
- __ Cmp(x3, PACKED_ELEMENTS);
- __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+// The number of register that CallApiFunctionAndReturn will need to save on
+// the stack. The space for these registers need to be allocated in the
+// ExitFrame before calling CallApiFunctionAndReturn.
+constexpr int kCallApiFunctionSpillSpace = 4;
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return static_cast<int>(ref0.address() - ref1.address());
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions.
+// 'stack_space' is the space to be unwound on exit (includes the call JS
+// arguments space and the additional space allocated for the fast call).
+// 'spill_offset' is the offset from the stack pointer where
+// CallApiFunctionAndReturn can spill registers.
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand, int spill_offset,
+ MemOperand return_value_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Mov(x10, ExternalReference::is_profiling_address(isolate));
+ __ Ldrb(w10, MemOperand(x10));
+ __ Cbz(w10, &profiler_disabled);
+ __ Mov(x3, thunk_ref);
+ __ B(&end_profiler_check);
+
+ __ Bind(&profiler_disabled);
+ __ Mov(x3, function_address);
+ __ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ __ Poke(x19, (spill_offset + 0) * kXRegSize);
+ __ Poke(x20, (spill_offset + 1) * kXRegSize);
+ __ Poke(x21, (spill_offset + 2) * kXRegSize);
+ __ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ __ Mov(handle_scope_base, next_address);
+ __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Add(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ __ Mov(x10, x3); // TODO(arm64): Load target into x10 directly.
+ __ StoreReturnAddressAndCall(x10);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ldr(x0, return_value_operand);
+ __ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ __ Cmp(w1, level_reg);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ Sub(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ __ Cmp(limit_reg, x1);
+ __ B(ne, &delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ __ Peek(x19, (spill_offset + 0) * kXRegSize);
+ __ Peek(x20, (spill_offset + 1) * kXRegSize);
+ __ Peek(x21, (spill_offset + 2) * kXRegSize);
+ __ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(stack_space, 0);
+ // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
+ __ Ldr(x19, *stack_space_operand);
}
- Label fast_elements_case;
- __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ __ LeaveExitFrame(false, x1, x5);
+
+ // Check if the function scheduled an exception.
+ __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
+ __ Ldr(x5, MemOperand(x5));
+ __ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception);
- __ Bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ DropSlots(stack_space);
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ DropArguments(x19);
+ }
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ Bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ Bind(&delete_allocated_handles);
+ __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ __ Mov(saved_result, x0);
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ Mov(x0, saved_result);
+ __ B(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- r1 : kApiFunctionAddress
+ // -- r2 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc + 1) * 8] : kHolder
+ // -- sp[(argc + 2) * 8] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = x1;
+ Register argc = x2;
+ Register scratch = x4;
+ Register index = x5; // For indexing MemOperands.
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch, index));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0;
+ static constexpr int kHolderOffset = kReceiverOffset + 1;
+ static constexpr int kCallDataOffset = kHolderOffset + 1;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ Sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ Add(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
+ __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
+ __ Str(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ Mov(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ Str(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ Add(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
+ __ Ldr(scratch, MemOperand(sp, index, LSL, kPointerSizeLog2));
+ __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ Mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, x10,
+ kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ Str(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ Add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
+ __ Str(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ __ Str(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of slots to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes. arm64 must always drop a slot count that is
+ // a multiple of two, and related helper functions (DropArguments) expect a
+ // register containing the slot count.
+ __ Add(scratch, argc, Operand(FCA::kArgsLength + kExtraStackArgumentCount));
+ __ Str(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(x0, api_function_address));
+ __ add(x0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // The current frame needs to be aligned.
+ DCHECK_EQ(FCA::kArgsLength % 2, 0);
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kSpillOffset = 1 + kApiStackSpace;
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ kSpillOffset, return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register data = x4;
+ Register undef = x5;
+ Register isolate_address = x6;
+ Register name = x7;
+ DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
+ name));
+
+ __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadRoot(undef, RootIndex::kUndefinedValue);
+ __ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
+ __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+
+ // PropertyCallbackArguments:
+ // receiver, data, return value, return value default, isolate, holder,
+ // should_throw_on_error
+ // These are followed by the property name, which is also pushed below the
+ // exit frame to make the GC aware of it.
+ __ Push(receiver, data, undef, undef, isolate_address, holder, xzr, name);
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ static const int kStackUnwindSpace =
+ PropertyCallbackArguments::kArgsLength + 1;
+ static_assert(kStackUnwindSpace % 2 == 0,
+ "slots must be a multiple of 2 for stack pointer alignment");
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ Mov(x0, sp); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ Poke(x1, 1 * kPointerSize);
+ __ SlotAddress(x1, 1);
+ // x1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ Register api_function_address = x2;
+ Register js_getter = x4;
+ __ Ldr(js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ldr(api_function_address,
+ FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
+
+ const int spill_offset = 1 + kApiStackSpace;
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ spill_offset, return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ __ Poke(lr, 0); // Store the return address.
+ __ Blr(x10); // Call the C++ function.
+ __ Peek(lr, 0); // Return to calling code.
+ __ AssertFPCRState();
+ __ Ret();
}
#undef __
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index 6b9ba934a4..d492992232 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+namespace array {
macro ConvertToRelativeIndex(index: Number, length: Number): Number {
return index < 0 ? Max(index + length, 0) : Min(index, length);
}
// https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
- javascript builtin ArrayPrototypeCopyWithin(
+ transitioning javascript builtin ArrayPrototypeCopyWithin(
context: Context, receiver: Object, ...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(context, object);
+ const length: Number = GetLengthProperty(object);
// 3. Let relativeTarget be ? ToInteger(target).
const relativeTarget: Number = ToInteger_Inline(context, arguments[0]);
@@ -63,18 +63,18 @@ module array {
// a. Let fromKey be ! ToString(from).
// b. Let toKey be ! ToString(to).
// c. Let fromPresent be ? HasProperty(O, fromKey).
- const fromPresent: Boolean = HasProperty(context, object, from);
+ const fromPresent: Boolean = HasProperty(object, from);
// d. If fromPresent is true, then.
if (fromPresent == True) {
// i. Let fromVal be ? Get(O, fromKey).
- const fromVal: Object = GetProperty(context, object, from);
+ const fromVal: Object = GetProperty(object, from);
// ii. Perform ? Set(O, toKey, fromVal, true).
- SetProperty(context, object, to, fromVal);
+ SetProperty(object, to, fromVal);
} else {
// i. Perform ? DeletePropertyOrThrow(O, toKey).
- DeleteProperty(context, object, to, kStrict);
+ DeleteProperty(object, to, kStrict);
}
// f. Let from be from + direction.
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
new file mode 100644
index 0000000000..222e4e291b
--- /dev/null
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -0,0 +1,244 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ transitioning javascript builtin
+ ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, array: Object,
+ initialK: Object, length: Object, initialTo: Object): Object {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires Object type for all parameters
+ // other than {context}.
+ const jsreceiver: JSReceiver =
+ Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray: JSReceiver =
+ Cast<JSReceiver>(array) otherwise unreachable;
+ const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
+ const numberTo: Number = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFilterLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength, numberTo);
+ }
+
+ transitioning javascript builtin
+ ArrayFilterLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, array: Object,
+ initialK: Object, length: Object, valueK: Object, initialTo: Object,
+ result: Object): Object {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver: JSReceiver =
+ Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray: JSReceiver =
+ Cast<JSReceiver>(array) otherwise unreachable;
+ let numberK: Number = Cast<Number>(initialK) otherwise unreachable;
+ let numberTo: Number = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. filter() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k and to, we can glide into the loop
+ // continuation builtin.
+ if (ToBoolean(result)) {
+ CreateDataProperty(outputArray, numberTo, valueK);
+ numberTo = numberTo + 1;
+ }
+
+ numberK = numberK + 1;
+
+ return ArrayFilterLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength, numberTo);
+ }
+
+ transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
+ initialTo: Number): Object {
+ let to: Number = initialTo;
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: Object = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: Object = Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. If selected is true, then...
+ if (ToBoolean(result)) {
+ // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
+ CreateDataProperty(array, to, kValue);
+ // 2. Increase to by 1.
+ to = to + 1;
+ }
+ }
+
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return array;
+ }
+
+ transitioning macro
+ FilterVisitAllElements<FixedArrayType: type>(implicit context: Context)(
+ kind: constexpr ElementsKind, o: JSArray, len: Smi, callbackfn: Callable,
+ thisArg: Object, a: JSArray) labels Bailout(Smi, Smi) {
+ let k: Smi = 0;
+ let to: Smi = 0;
+ const fastOWitness: FastJSArrayWitness =
+ MakeWitness(Cast<FastJSArray>(o) otherwise goto Bailout(k, to));
+ const fastAWitness: FastJSArrayWitness =
+ MakeWitness(Cast<FastJSArray>(a) otherwise goto Bailout(k, to));
+
+ // Build a fast loop over the smi array.
+ for (; k < len; k++) {
+ let fastO: FastJSArray =
+ Testify(fastOWitness) otherwise goto Bailout(k, to);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastO.length) goto Bailout(k, to);
+
+ try {
+ const value: Object =
+ LoadElementNoHole<FixedArrayType>(fastO, k) otherwise FoundHole;
+ const result: Object =
+ Call(context, callbackfn, thisArg, value, k, fastO);
+ if (ToBoolean(result)) {
+ try {
+ // Since the call to {callbackfn} is observable, we can't
+ // use the Bailout label until we've successfully stored.
+ // Hence the {SlowStore} label.
+ const fastA: FastJSArray =
+ Testify(fastAWitness) otherwise SlowStore;
+ if (fastA.length != to) goto SlowStore;
+ BuildAppendJSArray(kind, fastA, value)
+ otherwise SlowStore;
+ }
+ label SlowStore {
+ CreateDataProperty(a, to, value);
+ }
+ to = to + 1;
+ }
+ }
+ label FoundHole {}
+ }
+ }
+
+ transitioning macro FastArrayFilter(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object,
+ array: JSReceiver): Object
+ labels Bailout(Smi, Smi) {
+ let k: Smi = 0;
+ let to: Smi = 0;
+ const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k, to);
+ const fastArray: FastJSArray =
+ Cast<FastJSArray>(array) otherwise goto Bailout(k, to);
+ let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k, to);
+ EnsureArrayPushable(fastArray.map) otherwise goto Bailout(k, to);
+ const elementsKind: ElementsKind = fastO.map.elements_kind;
+ if (IsElementsKindLessThanOrEqual(elementsKind, HOLEY_SMI_ELEMENTS)) {
+ FilterVisitAllElements<FixedArray>(
+ HOLEY_SMI_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
+ otherwise Bailout;
+ } else if (IsElementsKindLessThanOrEqual(elementsKind, HOLEY_ELEMENTS)) {
+ FilterVisitAllElements<FixedArray>(
+ HOLEY_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
+ otherwise Bailout;
+ } else {
+ assert(IsDoubleElementsKind(elementsKind));
+ FilterVisitAllElements<FixedDoubleArray>(
+ HOLEY_DOUBLE_ELEMENTS, fastO, smiLen, callbackfn, thisArg, fastArray)
+ otherwise Bailout;
+ }
+ return array;
+ }
+
+ // This method creates a 0-length array with the ElementsKind of the
+ // receiver if possible, otherwise, bails out. It makes sense for the
+ // caller to know that the slow case needs to be invoked.
+ macro FastFilterSpeciesCreate(implicit context: Context)(
+ receiver: JSReceiver): JSReceiver labels Slow {
+ const len: Smi = 0;
+ if (IsArraySpeciesProtectorCellInvalid()) goto Slow;
+ const o: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const newMap: Map =
+ LoadJSArrayElementsMap(o.map.elements_kind, LoadNativeContext(context));
+ return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, len, len);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.filter
+ transitioning javascript builtin
+ ArrayFilter(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ try {
+ if (IsNullOrUndefined(receiver)) {
+ goto NullOrUndefinedError;
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
+ }
+ const callbackfn: Callable =
+ Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ let array: JSReceiver;
+
+ // Special cases.
+ let k: Number = 0;
+ let to: Number = 0;
+ try {
+ array = FastFilterSpeciesCreate(o) otherwise SlowSpeciesCreate;
+
+ try {
+ return FastArrayFilter(o, len, callbackfn, thisArg, array)
+ otherwise Bailout;
+ }
+ label Bailout(kValue: Smi, toValue: Smi) deferred {
+ k = kValue;
+ to = toValue;
+ }
+ }
+ label SlowSpeciesCreate {
+ array = ArraySpeciesCreate(context, receiver, 0);
+ }
+
+ return ArrayFilterLoopContinuation(
+ o, callbackfn, thisArg, array, o, k, len, to);
+ }
+ label TypeError deferred {
+ ThrowTypeError(context, kCalledNonCallable, arguments[0]);
+ }
+ label NullOrUndefinedError deferred {
+ ThrowTypeError(
+ context, kCalledOnNullOrUndefined, 'Array.prototype.filter');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index 5a189a517f..7967058e6b 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -2,10 +2,49 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
- macro ArrayForEachTorqueContinuation(
- context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
- thisArg: Object, initialK: Number): Object {
+namespace array {
+ transitioning javascript builtin
+ ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object): Object {
+ // All continuation points in the optimized forEach implemntation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver: JSReceiver =
+ Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
+ const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayForEachLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning javascript builtin
+ ArrayForEachLoopLazyDeoptContinuation(implicit context: Context)(
+ receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ length: Object, result: Object): Object {
+ // All continuation points in the optimized forEach implemntation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver: JSReceiver =
+ Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn: Callable = Cast<Callable>(callback) otherwise unreachable;
+ const numberK: Number = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength: Number = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayForEachLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+ }
+
+ transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
+ receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ array: Object, o: JSReceiver, initialK: Number, len: Number,
+ to: Object): Object {
+ // variables {array} and {to} are ignored.
+
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Number = initialK; k < len; k = k + 1) {
@@ -14,12 +53,12 @@ module array {
// side-effect free and HasProperty/GetProperty do the conversion inline.
// 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(context, o, k);
+ const kPresent: Boolean = HasProperty_Inline(o, k);
// 6c. If kPresent is true, then
if (kPresent == True) {
// 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: Object = GetProperty(context, o, k);
+ const kValue: Object = GetProperty(o, k);
// 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
Call(context, callbackfn, thisArg, kValue, k, o);
@@ -30,114 +69,50 @@ module array {
return Undefined;
}
- javascript builtin ArrayForEachLoopEagerDeoptContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
- initialK: Object, length: Object): Object {
- // The unsafe Cast is safe because all continuation points in forEach are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver: JSReceiver = UnsafeCast<JSReceiver>(receiver);
- return ArrayForEachLoopContinuation(
- context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
- length, Undefined);
- }
-
- javascript builtin ArrayForEachLoopLazyDeoptContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
- initialK: Object, length: Object, result: Object): Object {
- // The unsafe Cast is safe because all continuation points in forEach are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver: JSReceiver = UnsafeCast<JSReceiver>(receiver);
- return ArrayForEachLoopContinuation(
- context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
- length, Undefined);
- }
+ transitioning macro VisitAllElements<FixedArrayType: type>(implicit context:
+ Context)(
+ o: JSArray, len: Smi, callbackfn: Callable, thisArg: Object) labels
+ Bailout(Smi) {
+ let k: Smi = 0;
+ const fastOWitness: FastJSArrayWitness =
+ MakeWitness(Cast<FastJSArray>(o) otherwise goto Bailout(k));
- builtin ArrayForEachLoopContinuation(
- context: Context, receiver: JSReceiver, callback: Object, thisArg: Object,
- array: Object, object: Object, initialK: Object, length: Object,
- to: Object): Object {
- try {
- const callbackfn: Callable =
- Cast<Callable>(callback) otherwise Unexpected;
- const k: Number = Cast<Number>(initialK) otherwise Unexpected;
- const numberLength: Number = Cast<Number>(length) otherwise Unexpected;
+ // Build a fast loop over the smi array.
+ for (; k < len; k++) {
+ let fastO: FastJSArray = Testify(fastOWitness) otherwise goto Bailout(k);
- return ArrayForEachTorqueContinuation(
- context, receiver, numberLength, callbackfn, thisArg, k);
- }
- label Unexpected deferred {
- unreachable;
- }
- }
-
- macro VisitAllElements<FixedArrayType: type>(
- context: Context, a: JSArray, len: Smi, callbackfn: Callable,
- thisArg: Object): void
- labels Bailout(Smi) {
- let k: Smi = 0;
- const map: Map = a.map;
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastO.length) goto Bailout(k);
- try {
- // Build a fast loop over the smi array.
- for (; k < len; k = k + 1) {
- // Ensure that the map didn't change.
- if (map != a.map) goto Slow;
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= a.length) goto Slow;
-
- try {
- const value: Object =
- LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
- Call(context, callbackfn, thisArg, value, k, a);
- }
- label FoundHole {
- // If we found the hole, we need to bail out if the initial
- // array prototype has had elements inserted. This is preferable
- // to walking the prototype chain looking for elements.
-
- if (IsNoElementsProtectorCellInvalid()) goto Bailout(k);
- }
+ try {
+ const value: Object =
+ LoadElementNoHole<FixedArrayType>(fastO, k) otherwise FoundHole;
+ Call(context, callbackfn, thisArg, value, k, fastO);
}
- }
- label Slow deferred {
- goto Bailout(k);
+ label FoundHole {}
}
}
- macro FastArrayForEach(
- context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
- thisArg: Object): Object
+ transitioning macro FastArrayForEach(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: Object): Object
labels Bailout(Smi) {
let k: Smi = 0;
- try {
- const smiLen: Smi = Cast<Smi>(len) otherwise Slow;
- const a: JSArray = Cast<JSArray>(o) otherwise Slow;
- const map: Map = a.map;
-
- if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
- const elementsKind: ElementsKind = map.elements_kind;
- if (!IsFastElementsKind(elementsKind)) goto Slow;
-
- if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
- VisitAllElements<FixedDoubleArray>(
- context, a, smiLen, callbackfn, thisArg)
- otherwise Bailout;
- } else {
- VisitAllElements<FixedArray>(context, a, smiLen, callbackfn, thisArg)
- otherwise Bailout;
- }
- }
- label Slow deferred {
- goto Bailout(k);
+ const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k);
+ let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const elementsKind: ElementsKind = fastO.map.elements_kind;
+ if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
+ VisitAllElements<FixedDoubleArray>(fastO, smiLen, callbackfn, thisArg)
+ otherwise Bailout;
+ } else {
+ VisitAllElements<FixedArray>(fastO, smiLen, callbackfn, thisArg)
+ otherwise Bailout;
}
return Undefined;
}
// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
- javascript builtin ArrayForEach(
- context: Context, receiver: Object, ...arguments): Object {
+ transitioning javascript builtin
+ ArrayForEach(context: Context, receiver: Object, ...arguments): Object {
try {
if (IsNullOrUndefined(receiver)) {
goto NullOrUndefinedError;
@@ -147,7 +122,7 @@ module array {
const o: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(context, o);
+ const len: Number = GetLengthProperty(o);
// 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
if (arguments.length == 0) {
@@ -162,15 +137,15 @@ module array {
// Special cases.
let k: Number = 0;
try {
- return FastArrayForEach(context, o, len, callbackfn, thisArg)
+ return FastArrayForEach(o, len, callbackfn, thisArg)
otherwise Bailout;
}
label Bailout(kValue: Smi) deferred {
k = kValue;
}
- return ArrayForEachTorqueContinuation(
- context, o, len, callbackfn, thisArg, k);
+ return ArrayForEachLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, k, len, Undefined);
}
label TypeError deferred {
ThrowTypeError(context, kCalledNonCallable, arguments[0]);
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
new file mode 100644
index 0000000000..16ac7a7104
--- /dev/null
+++ b/deps/v8/src/builtins/array-join.tq
@@ -0,0 +1,660 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ type LoadJoinElementFn = builtin(Context, JSReceiver, Number) => Object;
+
+ // Fast C call to write a fixed array (see Buffer.fixedArray) to a single
+ // string.
+ extern macro
+ ArrayBuiltinsAssembler::CallJSArrayArrayJoinConcatToSequentialString(
+ FixedArray, intptr, String, String): String;
+
+ transitioning builtin LoadJoinElement<T: type>(
+ context: Context, receiver: JSReceiver, k: Number): Object {
+ return GetProperty(receiver, k);
+ }
+
+ LoadJoinElement<DictionaryElements>(
+ context: Context, receiver: JSReceiver, k: Number): Object {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const dict: NumberDictionary = UnsafeCast<NumberDictionary>(array.elements);
+ try {
+ return BasicLoadNumberDictionaryElement(dict, Signed(Convert<uintptr>(k)))
+ otherwise IfNoData, IfHole;
+ }
+ label IfNoData deferred {
+ return GetProperty(receiver, k);
+ }
+ label IfHole {
+ return kEmptyString;
+ }
+ }
+
+ LoadJoinElement<FastSmiOrObjectElements>(
+ context: Context, receiver: JSReceiver, k: Number): Object {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
+ const element: Object = fixedArray[UnsafeCast<Smi>(k)];
+ return element == Hole ? kEmptyString : element;
+ }
+
+ LoadJoinElement<FastDoubleElements>(
+ context: Context, receiver: JSReceiver, k: Number): Object {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const fixedDoubleArray: FixedDoubleArray =
+ UnsafeCast<FixedDoubleArray>(array.elements);
+ try {
+ const element: float64 = LoadDoubleWithHoleCheck(
+ fixedDoubleArray, UnsafeCast<Smi>(k)) otherwise IfHole;
+ return AllocateHeapNumberWithValue(element);
+ }
+ label IfHole {
+ return kEmptyString;
+ }
+ }
+
+ builtin LoadJoinTypedElement<T: type>(
+ context: Context, receiver: JSReceiver, k: Number): Object {
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
+ assert(!IsDetachedBuffer(typedArray.buffer));
+ return typed_array::LoadFixedTypedArrayElementAsTagged(
+ typedArray.data_ptr, UnsafeCast<Smi>(k),
+ typed_array::KindForArrayType<T>(), SMI_PARAMETERS);
+ }
+
+ transitioning builtin ConvertToLocaleString(
+ context: Context, element: Object, locales: Object,
+ options: Object): String {
+ if (IsNullOrUndefined(element)) return kEmptyString;
+
+ const prop: Object = GetProperty(element, 'toLocaleString');
+ try {
+ const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
+ let result: Object;
+ if (IsNullOrUndefined(locales)) {
+ result = Call(context, callable, element);
+ } else if (IsNullOrUndefined(options)) {
+ result = Call(context, callable, element, locales);
+ } else {
+ result = Call(context, callable, element, locales, options);
+ }
+ return ToString_Inline(context, result);
+ }
+ label TypeError {
+ ThrowTypeError(context, kCalledNonCallable, prop);
+ }
+ }
+
+ // Verifies the current element JSArray accessor can still be safely used
+ // (see LoadJoinElement<ElementsAccessor>).
+ macro CannotUseSameArrayAccessor<T: type>(implicit context: Context)(
+ loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
+ originalLen: Number): never
+ labels Cannot, Can;
+
+ CannotUseSameArrayAccessor<JSArray>(implicit context: Context)(
+ loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
+ originalLen: Number): never
+ labels Cannot, Can {
+ if (loadFn == LoadJoinElement<GenericElementsAccessor>) goto Can;
+
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ if (originalMap != array.map) goto Cannot;
+ if (originalLen != array.length) goto Cannot;
+ if (IsNoElementsProtectorCellInvalid()) goto Cannot;
+ goto Can;
+ }
+
+ CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
+ loadFn: LoadJoinElementFn, receiver: JSReceiver, initialMap: Map,
+ initialLen: Number): never
+ labels Cannot, Can {
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
+ if (IsDetachedBuffer(typedArray.buffer)) goto Cannot;
+ goto Can;
+ }
+
+ // Calculates the running total length of the resulting string. If the
+ // calculated length exceeds the maximum string length (see
+ // String::kMaxLength), throws a range error.
+ macro AddStringLength(implicit context: Context)(lenA: intptr, lenB: intptr):
+ intptr {
+ try {
+ const length: intptr = TryIntPtrAdd(lenA, lenB) otherwise IfOverflow;
+ if (length > kStringMaxLength) goto IfOverflow;
+ return length;
+ }
+ label IfOverflow deferred {
+ ThrowInvalidStringLength(context);
+ }
+ }
+
+ // Stores an element to a fixed array and return the fixed array. If the fixed
+ // array is not large enough, create and return a new, larger fixed array that
+ // contains all previously elements and the new element.
+ macro StoreAndGrowFixedArray<T: type>(
+ fixedArray: FixedArray, index: intptr, element: T): FixedArray {
+ const length: intptr = fixedArray.length_intptr;
+ assert(index <= length);
+ if (index < length) {
+ fixedArray[index] = element;
+ return fixedArray;
+ } else
+ deferred {
+ const newLength: intptr = CalculateNewElementsCapacity(length);
+ assert(index < newLength);
+ const newfixedArray: FixedArray =
+ ExtractFixedArray(fixedArray, 0, length, newLength, kFixedArrays);
+ newfixedArray[index] = element;
+ return newfixedArray;
+ }
+ }
+
+ // Contains the information necessary to create a single, separator delimited,
+ // flattened one or two byte string.
+ // The buffer is maintained and updated by BufferInit(), BufferAdd(),
+ // BufferAddSeparators().
+ struct Buffer {
+ // Fixed array holding elements that are either:
+ // 1) String result of `ToString(next)`.
+ // 2) Smi representing the number of consecutive separators.
+ // `BufferJoin()` will iterate and writes these entries to a flat string.
+ //
+ // To save space, reduce reads and writes, only separators at the beginning,
+ // end, or more than one are written.
+ //
+ // No hole example
+ // receiver: ['hello', 'world']
+ // fixedArray: ['hello', 'world']
+ //
+ // Hole example
+ // receiver: [<hole>, 'hello', <hole>, 'world', <hole>]
+ // fixedArray: [1, 'hello', 2, 'world', 1]
+ fixedArray: FixedArray;
+
+ // Index to insert a new entry into `fixedArray`.
+ index: intptr;
+
+ // Running total of the resulting string length.
+ totalStringLength: intptr;
+
+ // `true` if the separator and all strings in the buffer are one-byte,
+ // otherwise `false`.
+ isOneByte: bool;
+ }
+
+ macro BufferInit(len: uintptr, sep: String): Buffer {
+ const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
+ kMaxNewSpaceFixedArrayElements :
+ Signed(len);
+ assert(cappedBufferSize > 0);
+ const fixedArray: FixedArray = AllocateZeroedFixedArray(cappedBufferSize);
+ const isOneByte: bool = HasOnlyOneByteChars(sep.instanceType);
+ return Buffer{fixedArray, 0, 0, isOneByte};
+ }
+
+ macro BufferAdd(implicit context: Context)(
+ initialBuffer: Buffer, str: String, nofSeparators: intptr,
+ separatorLength: intptr): Buffer {
+ let buffer: Buffer = initialBuffer;
+ // Add separators if necessary (at the beginning or more than one)
+ const writeSeparators: bool = buffer.index == 0 | nofSeparators > 1;
+ buffer = BufferAddSeparators(
+ buffer, nofSeparators, separatorLength, writeSeparators);
+
+ const totalStringLength: intptr =
+ AddStringLength(buffer.totalStringLength, str.length);
+ let index: intptr = buffer.index;
+ const fixedArray: FixedArray =
+ StoreAndGrowFixedArray(buffer.fixedArray, index++, str);
+ const isOneByte: bool =
+ HasOnlyOneByteChars(str.instanceType) & buffer.isOneByte;
+ return Buffer{fixedArray, index, totalStringLength, isOneByte};
+ }
+
+ macro BufferAddSeparators(implicit context: Context)(
+ buffer: Buffer, nofSeparators: intptr, separatorLength: intptr,
+ write: bool): Buffer {
+ if (nofSeparators == 0 || separatorLength == 0) return buffer;
+
+ const nofSeparatorsInt: intptr = nofSeparators;
+ const sepsLen: intptr = separatorLength * nofSeparatorsInt;
+ // Detect integer overflow
+ // TODO(tebbi): Replace with overflow-checked multiplication.
+ if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
+ ThrowInvalidStringLength(context);
+ }
+
+ const totalStringLength: intptr =
+ AddStringLength(buffer.totalStringLength, sepsLen);
+ let index: intptr = buffer.index;
+ let fixedArray: FixedArray = buffer.fixedArray;
+ if (write) deferred {
+ fixedArray = StoreAndGrowFixedArray(
+ buffer.fixedArray, index++, Convert<Smi>(nofSeparatorsInt));
+ }
+ return Buffer{fixedArray, index, totalStringLength, buffer.isOneByte};
+ }
+
+ macro BufferJoin(implicit context: Context)(buffer: Buffer, sep: String):
+ String {
+ assert(IsValidPositiveSmi(buffer.totalStringLength));
+ if (buffer.totalStringLength == 0) return kEmptyString;
+
+ // Fast path when there's only one buffer element.
+ if (buffer.index == 1) {
+ const fixedArray: FixedArray = buffer.fixedArray;
+ typeswitch (fixedArray[0]) {
+ // When the element is a string, just return it and completely avoid
+ // allocating another string.
+ case (str: String): {
+ return str;
+ }
+
+ // When the element is a smi, use StringRepeat to quickly build a memory
+ // efficient separator repeated string.
+ case (nofSeparators: Number): {
+ return StringRepeat(context, sep, nofSeparators);
+ }
+ case (obj: Object): {
+ unreachable;
+ }
+ }
+ }
+
+ const length: uint32 = Convert<uint32>(Unsigned(buffer.totalStringLength));
+ const r: String = buffer.isOneByte ? AllocateSeqOneByteString(length) :
+ AllocateSeqTwoByteString(length);
+ return CallJSArrayArrayJoinConcatToSequentialString(
+ buffer.fixedArray, buffer.index, sep, r);
+ }
+
+ transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
+ receiver: JSReceiver, sep: String, lengthNumber: Number,
+ useToLocaleString: constexpr bool, locales: Object, options: Object,
+ initialLoadFn: LoadJoinElementFn): String {
+ const initialMap: Map = receiver.map;
+ const len: uintptr = Convert<uintptr>(lengthNumber);
+ const separatorLength: intptr = sep.length;
+ let nofSeparators: intptr = 0;
+ let loadFn: LoadJoinElementFn = initialLoadFn;
+ let buffer: Buffer = BufferInit(len, sep);
+
+ // 6. Let k be 0.
+ let k: uintptr = 0;
+
+ // 7. Repeat, while k < len
+ while (k < len) {
+ if (CannotUseSameArrayAccessor<T>(
+ loadFn, receiver, initialMap, lengthNumber))
+ deferred {
+ loadFn = LoadJoinElement<GenericElementsAccessor>;
+ }
+
+ if (k > 0) {
+ // a. If k > 0, let R be the string-concatenation of R and sep.
+ nofSeparators = nofSeparators + 1;
+ }
+
+ // b. Let element be ? Get(O, ! ToString(k)).
+ const element: Object = loadFn(context, receiver, Convert<Number>(k++));
+
+ // c. If element is undefined or null, let next be the empty String;
+ // otherwise, let next be ? ToString(element).
+ let next: String;
+ if constexpr (useToLocaleString) {
+ next = ConvertToLocaleString(context, element, locales, options);
+ if (next == kEmptyString) continue;
+ } else {
+ typeswitch (element) {
+ case (str: String): {
+ if (str == kEmptyString) continue;
+ next = str;
+ }
+ case (num: Number): {
+ next = NumberToString(num);
+ }
+ case (obj: HeapObject): {
+ if (IsNullOrUndefined(obj)) continue;
+ next = ToString(context, obj);
+ }
+ }
+ }
+
+ // d. Set R to the string-concatenation of R and next.
+ buffer = BufferAdd(buffer, next, nofSeparators, separatorLength);
+ nofSeparators = 0;
+ }
+
+ // Add any separators at the end.
+ buffer = BufferAddSeparators(buffer, nofSeparators, separatorLength, true);
+
+ // 8. Return R.
+ return BufferJoin(buffer, sep);
+ }
+
+ transitioning macro ArrayJoin<T: type>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: Object, options: Object): Object;
+
+ ArrayJoin<JSArray>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: Object, options: Object): Object {
+ const map: Map = receiver.map;
+ const kind: ElementsKind = map.elements_kind;
+ let loadFn: LoadJoinElementFn;
+
+ try {
+ const array: JSArray = Cast<JSArray>(receiver) otherwise IfSlowPath;
+ if (array.length != lenNumber) goto IfSlowPath;
+ if (!IsPrototypeInitialArrayPrototype(map)) goto IfSlowPath;
+ if (IsNoElementsProtectorCellInvalid()) goto IfSlowPath;
+
+ if (IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS)) {
+ loadFn = LoadJoinElement<FastSmiOrObjectElements>;
+ } else if (IsElementsKindLessThanOrEqual(kind, HOLEY_DOUBLE_ELEMENTS)) {
+ loadFn = LoadJoinElement<FastDoubleElements>;
+ } else if (kind == DICTIONARY_ELEMENTS)
+ deferred {
+ const dict: NumberDictionary =
+ UnsafeCast<NumberDictionary>(array.elements);
+ const nofElements: Smi = GetNumberDictionaryNumberOfElements(dict);
+ if (nofElements == 0) {
+ if (sep == kEmptyString) return kEmptyString;
+ try {
+ const nofSeparators: Smi =
+ Cast<Smi>(lenNumber - 1) otherwise IfNotSmi;
+ return StringRepeat(context, sep, nofSeparators);
+ }
+ label IfNotSmi {
+ ThrowInvalidStringLength(context);
+ }
+ } else {
+ loadFn = LoadJoinElement<DictionaryElements>;
+ }
+ }
+ else {
+ goto IfSlowPath;
+ }
+ }
+ label IfSlowPath {
+ loadFn = LoadJoinElement<GenericElementsAccessor>;
+ }
+ return ArrayJoinImpl<JSArray>(
+ receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
+ }
+
+ ArrayJoin<JSTypedArray>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: Object, options: Object): Object {
+ const map: Map = receiver.map;
+ const kind: ElementsKind = map.elements_kind;
+ let loadFn: LoadJoinElementFn;
+
+ if (IsElementsKindGreaterThan(kind, UINT32_ELEMENTS)) {
+ if (kind == INT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedInt32Array>;
+ } else if (kind == FLOAT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedFloat32Array>;
+ } else if (kind == FLOAT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedFloat64Array>;
+ } else if (kind == UINT8_CLAMPED_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedUint8ClampedArray>;
+ } else if (kind == BIGUINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedBigUint64Array>;
+ } else if (kind == BIGINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedBigInt64Array>;
+ } else {
+ unreachable;
+ }
+ } else {
+ if (kind == UINT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedUint8Array>;
+ } else if (kind == INT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedInt8Array>;
+ } else if (kind == UINT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedUint16Array>;
+ } else if (kind == INT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedInt16Array>;
+ } else if (kind == UINT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<FixedUint32Array>;
+ } else {
+ unreachable;
+ }
+ }
+ return ArrayJoinImpl<JSTypedArray>(
+ receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
+ }
+
+ // The Join Stack detects cyclical calls to Array Join builtins
+ // (Array.p.join(), Array.p.toString(), Array.p.toLocaleString()). This
+ // FixedArray holds a stack of receivers to the current call.
+ // CycleProtectedArrayJoin() is responsible for calling JoinStackPush and
+ // JoinStackPop when visiting and leaving a receiver, respectively.
+ const kMinJoinStackSize:
+ constexpr int31 generates 'JSArray::kMinJoinStackSize';
+ macro LoadJoinStack(implicit context: Context)(): FixedArray
+ labels IfUninitialized {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const stack: HeapObject =
+ UnsafeCast<HeapObject>(nativeContext[ARRAY_JOIN_STACK_INDEX]);
+ if (stack == Undefined) goto IfUninitialized;
+ assert(IsFixedArray(stack));
+ return UnsafeCast<FixedArray>(stack);
+ }
+
+ macro SetJoinStack(implicit context: Context)(stack: FixedArray): void {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ nativeContext[ARRAY_JOIN_STACK_INDEX] = stack;
+ }
+
+ // Adds a receiver to the stack. The FixedArray will automatically grow to
+ // accommodate the receiver. If the receiver already exists on the stack,
+ // this indicates a cyclical call and False is returned.
+ builtin JoinStackPush(implicit context: Context)(
+ stack: FixedArray, receiver: JSReceiver): Boolean {
+ const capacity: intptr = stack.length_intptr;
+ for (let i: intptr = 0; i < capacity; i++) {
+ const previouslyVisited: Object = stack[i];
+
+ // Add `receiver` to the first open slot
+ if (previouslyVisited == Hole) {
+ stack[i] = receiver;
+ return True;
+ }
+
+ // Detect cycles
+ if (receiver == previouslyVisited) return False;
+ }
+
+ // If no open slots were found, grow the stack and add receiver to the end.
+ const newStack: FixedArray =
+ StoreAndGrowFixedArray(stack, capacity, receiver);
+ SetJoinStack(newStack);
+ return True;
+ }
+
+ // Fast path the common non-nested calls. If the receiver is not already on
+ // the stack, add it to the stack and go to ReceiverAdded. Otherwise go to
+ // ReceiverNotAdded.
+ macro JoinStackPushInline(implicit context: Context)(receiver: JSReceiver):
+ never
+ labels ReceiverAdded, ReceiverNotAdded {
+ try {
+ const stack: FixedArray = LoadJoinStack()
+ otherwise IfUninitialized;
+ if (stack[0] == Hole) {
+ stack[0] = receiver;
+ } else if (JoinStackPush(stack, receiver) == False)
+ deferred {
+ goto ReceiverNotAdded;
+ }
+ }
+ label IfUninitialized {
+ const stack: FixedArray =
+ AllocateFixedArrayWithHoles(kMinJoinStackSize, kNone);
+ stack[0] = receiver;
+ SetJoinStack(stack);
+ }
+ goto ReceiverAdded;
+ }
+
+ // Removes a receiver from the stack. The FixedArray will automatically shrink
+ // to Heap::kMinJoinStackSize once the stack becomes empty.
+ builtin JoinStackPop(implicit context: Context)(
+ stack: FixedArray, receiver: JSReceiver): Object {
+ const len: intptr = stack.length_intptr;
+ for (let i: intptr = 0; i < len; i++) {
+ if (stack[i] == receiver) {
+ // Shrink the Join Stack if the stack will be empty and is larger than
+ // the minimum size.
+ if (i == 0 && len > kMinJoinStackSize) deferred {
+ const newStack: FixedArray =
+ AllocateFixedArrayWithHoles(kMinJoinStackSize, kNone);
+ SetJoinStack(newStack);
+ }
+ else {
+ stack[i] = Hole;
+ }
+ return Undefined;
+ }
+ }
+ unreachable;
+ }
+
+ // Fast path the common non-nested calls.
+ macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver) {
+ const stack: FixedArray = LoadJoinStack()
+ otherwise unreachable;
+ const len: intptr = stack.length_intptr;
+
+ // Builtin call was not nested (receiver is the first entry) and
+ // did not contain other nested arrays that expanded the stack.
+ if (stack[0] == receiver && len == kMinJoinStackSize) {
+ StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER);
+ } else
+ deferred {
+ JoinStackPop(stack, receiver);
+ }
+ }
+
+ // Main entry point for all builtins using Array Join functionality.
+ transitioning macro CycleProtectedArrayJoin<T: type>(implicit context:
+ Context)(
+ useToLocaleString: constexpr bool, o: JSReceiver, len: Number,
+ sepObj: Object, locales: Object, options: Object): Object {
+ // 3. If separator is undefined, let sep be the single-element String ",".
+ // 4. Else, let sep be ? ToString(separator).
+ let sep: String =
+ sepObj == Undefined ? ',' : ToString_Inline(context, sepObj);
+
+ // If the receiver is not empty and not already being joined, continue with
+ // the normal join algorithm.
+ if (len > 0 && JoinStackPushInline(o)) {
+ try {
+ const result: Object =
+ ArrayJoin<T>(useToLocaleString, o, sep, len, locales, options);
+ JoinStackPopInline(o);
+ return result;
+ } catch (e) deferred {
+ JoinStackPopInline(o);
+ ReThrow(context, e);
+ }
+ } else {
+ return kEmptyString;
+ }
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.join
+ transitioning javascript builtin
+ ArrayPrototypeJoin(context: Context, receiver: Object, ...arguments): Object {
+ const separator: Object = arguments[0];
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // Only handle valid array lengths. Although the spec allows larger values,
+ // this matches historical V8 behavior.
+ if (len > kMaxArrayIndex + 1) ThrowTypeError(context, kInvalidArrayLength);
+
+ return CycleProtectedArrayJoin<JSArray>(
+ false, o, len, separator, Undefined, Undefined);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
+ transitioning javascript builtin ArrayPrototypeToLocaleString(
+ context: Context, receiver: Object, ...arguments): Object {
+ const locales: Object = arguments[0];
+ const options: Object = arguments[1];
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // Only handle valid array lengths. Although the spec allows larger values,
+ // this matches historical V8 behavior.
+ if (len > kMaxArrayIndex + 1) ThrowTypeError(context, kInvalidArrayLength);
+
+ return CycleProtectedArrayJoin<JSArray>(
+ true, o, len, ',', locales, options);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.tostring
+ transitioning javascript builtin ArrayPrototypeToString(
+ context: Context, receiver: Object, ...arguments): Object {
+ // 1. Let array be ? ToObject(this value).
+ const array: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let func be ? Get(array, "join").
+ const prop: Object = GetProperty(array, 'join');
+ try {
+ // 3. If IsCallable(func) is false, let func be the intrinsic function
+ // %ObjProto_toString%.
+ const func: Callable = Cast<Callable>(prop) otherwise NotCallable;
+
+ // 4. Return ? Call(func, array).
+ return Call(context, func, array);
+ }
+ label NotCallable {
+ return ObjectToString(context, array);
+ }
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
+ transitioning javascript builtin TypedArrayPrototypeJoin(
+ context: Context, receiver: Object, ...arguments): Object {
+ const separator: Object = arguments[0];
+
+ // Spec: ValidateTypedArray is applied to the this value prior to evaluating
+ // the algorithm.
+ const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ context, receiver, '%TypedArray%.prototype.join');
+ const length: Smi = typedArray.length;
+
+ return CycleProtectedArrayJoin<JSTypedArray>(
+ false, typedArray, length, separator, Undefined, Undefined);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
+ transitioning javascript builtin TypedArrayPrototypeToLocaleString(
+ context: Context, receiver: Object, ...arguments): Object {
+ const locales: Object = arguments[0];
+ const options: Object = arguments[1];
+
+ // Spec: ValidateTypedArray is applied to the this value prior to evaluating
+ // the algorithm.
+ const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ context, receiver, '%TypedArray%.prototype.toLocaleString');
+ const length: Smi = typedArray.length;
+
+ return CycleProtectedArrayJoin<JSTypedArray>(
+ true, typedArray, length, ',', locales, options);
+ }
+}
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 056220092e..967d640e8f 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+namespace array {
macro LoadWithHoleCheck<Elements: type>(
elements: FixedArrayBase, index: Smi): Object
labels IfHole;
- LoadWithHoleCheck<FixedArray>(elements: FixedArrayBase, index: Smi): Object
+ LoadWithHoleCheck<FixedArray>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): Object
labels IfHole {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
const element: Object = elements[index];
@@ -15,8 +16,8 @@ module array {
return element;
}
- LoadWithHoleCheck<FixedDoubleArray>(elements: FixedArrayBase, index: Smi):
- Object
+ LoadWithHoleCheck<FixedDoubleArray>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): Object
labels IfHole {
const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
const element: float64 = LoadDoubleWithHoleCheck(elements, index)
@@ -44,7 +45,7 @@ module array {
const same: Boolean = StrictEqual(searchElement, element);
if (same == True) {
- assert(IsFastJSArray(array, context));
+ assert(Is<FastJSArray>(array));
return k;
}
}
@@ -53,7 +54,7 @@ module array {
--k;
}
- assert(IsFastJSArray(array, context));
+ assert(Is<FastJSArray>(array));
return -1;
}
@@ -83,10 +84,8 @@ module array {
context: Context, receiver: JSReceiver, searchElement: Object,
from: Number): Object
labels Slow {
- EnsureFastJSArray(context, receiver) otherwise Slow;
- const array: JSArray = UnsafeCast<JSArray>(receiver);
-
- const length: Smi = array.length_fast;
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const length: Smi = array.length;
if (length == 0) return SmiConstant(-1);
const fromSmi: Smi = Cast<Smi>(from) otherwise Slow;
@@ -100,7 +99,7 @@ module array {
context, array, fromSmi, searchElement);
}
- macro GenericArrayLastIndexOf(
+ transitioning macro GenericArrayLastIndexOf(
context: Context, object: JSReceiver, searchElement: Object,
from: Number): Object {
let k: Number = from;
@@ -108,12 +107,12 @@ module array {
// 7. Repeat, while k >= 0.
while (k >= 0) {
// a. Let kPresent be ? HasProperty(O, ! ToString(k)).
- const kPresent: Boolean = HasProperty(context, object, k);
+ const kPresent: Boolean = HasProperty(object, k);
// b. If kPresent is true, then.
if (kPresent == True) {
// i. Let elementK be ? Get(O, ! ToString(k)).
- const element: Object = GetProperty(context, object, k);
+ const element: Object = GetProperty(object, k);
// ii. Let same be the result of performing Strict Equality Comparison
// searchElement === elementK.
@@ -132,13 +131,13 @@ module array {
}
// https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
- javascript builtin ArrayPrototypeLastIndexOf(
+ transitioning javascript builtin ArrayPrototypeLastIndexOf(
context: Context, receiver: Object, ...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(context, object);
+ const length: Number = GetLengthProperty(object);
// 3. If len is 0, return -1.
if (length == SmiConstant(0)) return SmiConstant(-1);
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
new file mode 100644
index 0000000000..6434dbc8c8
--- /dev/null
+++ b/deps/v8/src/builtins/array-of.tq
@@ -0,0 +1,54 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ // https://tc39.github.io/ecma262/#sec-array.of
+ transitioning javascript builtin
+ ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object {
+ // 1. Let len be the actual number of arguments passed to this function.
+ const len: Smi = Convert<Smi>(arguments.length);
+
+ // 2. Let items be the List of arguments passed to this function.
+ const items: constexpr Arguments = arguments;
+
+ // 3. Let C be the this value.
+ const c: Object = receiver;
+
+ let a: JSReceiver;
+
+ // 4. If IsConstructor(C) is true, then
+ typeswitch (c) {
+ case (c: Constructor): {
+ // a. Let A be ? Construct(C, « len »).
+ a = Construct(c, len);
+ }
+ case (Object): {
+ // a. Let A be ? ArrayCreate(len).
+ a = ArrayCreate(len);
+ }
+ }
+
+ // 6. Let k be 0.
+ let k: Smi = 0;
+
+ // 7. Repeat, while k < len
+ while (k < len) {
+ // a. Let kValue be items[k].
+ let kValue: Object = items[Convert<intptr>(k)];
+
+ // b. Let Pk be ! ToString(k).
+ // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
+ CreateDataProperty(a, k, kValue);
+
+ // d. Increase k by 1.
+ k++;
+ }
+
+ // 8. Perform ? Set(A, "length", len, true).
+ SetPropertyLength(a, len);
+
+ // 9. Return A.
+ return a;
+ }
+}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 327ef12402..dddad7b42c 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -2,23 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+namespace array {
macro LoadElement<ElementsAccessor: type, T: type>(
elements: FixedArrayBase, index: Smi): T;
- LoadElement<FastPackedSmiElements, Smi>(
+ LoadElement<FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi): Smi {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return UnsafeCast<Smi>(elems[index]);
}
- LoadElement<FastPackedObjectElements, Object>(
+ LoadElement<FastPackedObjectElements, Object>(implicit context: Context)(
elements: FixedArrayBase, index: Smi): Object {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
return elems[index];
}
- LoadElement<FastPackedDoubleElements, float64>(
+ LoadElement<FastPackedDoubleElements, float64>(implicit context: Context)(
elements: FixedArrayBase, index: Smi): float64 {
try {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
@@ -32,25 +32,24 @@ module array {
}
macro StoreElement<ElementsAccessor: type, T: type>(
- elements: FixedArrayBase, index: Smi, value: T);
+ implicit context:
+ Context)(elements: FixedArrayBase, index: Smi, value: T);
- StoreElement<FastPackedSmiElements, Smi>(
+ StoreElement<FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Smi) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
}
- StoreElement<FastPackedObjectElements, Object>(
+ StoreElement<FastPackedObjectElements, Object>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Object) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
elems[index] = value;
}
- StoreElement<FastPackedDoubleElements, float64>(
+ StoreElement<FastPackedDoubleElements, float64>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: float64) {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
-
- assert(value == Float64SilenceNaN(value));
StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value);
}
@@ -58,26 +57,27 @@ module array {
// whether a property is present, so we can simply swap them using fast
// FixedArray loads/stores.
macro FastPackedArrayReverse<Accessor: type, T: type>(
- elements: FixedArrayBase, length: Smi) {
+ implicit context: Context)(elements: FixedArrayBase, length: Smi) {
let lower: Smi = 0;
let upper: Smi = length - 1;
while (lower < upper) {
const lowerValue: T = LoadElement<Accessor, T>(elements, lower);
const upperValue: T = LoadElement<Accessor, T>(elements, upper);
- StoreElement<Accessor, T>(elements, lower, upperValue);
- StoreElement<Accessor, T>(elements, upper, lowerValue);
+ StoreElement<Accessor>(elements, lower, upperValue);
+ StoreElement<Accessor>(elements, upper, lowerValue);
++lower;
--upper;
}
}
- macro GenericArrayReverse(context: Context, receiver: Object): Object {
+ transitioning macro GenericArrayReverse(context: Context, receiver: Object):
+ Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(context, object);
+ const length: Number = GetLengthProperty(object);
// 3. Let middle be floor(len / 2).
// 4. Let lower be 0.
@@ -96,42 +96,42 @@ module array {
// b. Let upperP be ! ToString(upper).
// c. Let lowerP be ! ToString(lower).
// d. Let lowerExists be ? HasProperty(O, lowerP).
- const lowerExists: Boolean = HasProperty(context, object, lower);
+ const lowerExists: Boolean = HasProperty(object, lower);
// e. If lowerExists is true, then.
if (lowerExists == True) {
// i. Let lowerValue be ? Get(O, lowerP).
- lowerValue = GetProperty(context, object, lower);
+ lowerValue = GetProperty(object, lower);
}
// f. Let upperExists be ? HasProperty(O, upperP).
- const upperExists: Boolean = HasProperty(context, object, upper);
+ const upperExists: Boolean = HasProperty(object, upper);
// g. If upperExists is true, then.
if (upperExists == True) {
// i. Let upperValue be ? Get(O, upperP).
- upperValue = GetProperty(context, object, upper);
+ upperValue = GetProperty(object, upper);
}
// h. If lowerExists is true and upperExists is true, then
if (lowerExists == True && upperExists == True) {
// i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(context, object, lower, upperValue);
+ SetProperty(object, lower, upperValue);
// ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(context, object, upper, lowerValue);
+ SetProperty(object, upper, lowerValue);
} else if (lowerExists == False && upperExists == True) {
// i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(context, object, lower, upperValue);
+ SetProperty(object, lower, upperValue);
// ii. Perform ? DeletePropertyOrThrow(O, upperP).
- DeleteProperty(context, object, upper, kStrict);
+ DeleteProperty(object, upper, kStrict);
} else if (lowerExists == True && upperExists == False) {
// i. Perform ? DeletePropertyOrThrow(O, lowerP).
- DeleteProperty(context, object, lower, kStrict);
+ DeleteProperty(object, lower, kStrict);
// ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(context, object, upper, lowerValue);
+ SetProperty(object, upper, lowerValue);
}
// l. Increase lower by 1.
@@ -143,28 +143,29 @@ module array {
return object;
}
- macro TryFastPackedArrayReverse(receiver: Object) labels Slow {
- const array: JSArray = Cast<JSArray>(receiver) otherwise Slow;
+ macro TryFastPackedArrayReverse(implicit context: Context)(receiver: Object)
+ labels Slow {
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
const kind: ElementsKind = array.map.elements_kind;
if (kind == PACKED_SMI_ELEMENTS) {
EnsureWriteableFastElements(array);
FastPackedArrayReverse<FastPackedSmiElements, Smi>(
- array.elements, array.length_fast);
+ array.elements, array.length);
} else if (kind == PACKED_ELEMENTS) {
EnsureWriteableFastElements(array);
FastPackedArrayReverse<FastPackedObjectElements, Object>(
- array.elements, array.length_fast);
+ array.elements, array.length);
} else if (kind == PACKED_DOUBLE_ELEMENTS) {
FastPackedArrayReverse<FastPackedDoubleElements, float64>(
- array.elements, array.length_fast);
+ array.elements, array.length);
} else {
goto Slow;
}
}
// https://tc39.github.io/ecma262/#sec-array.prototype.reverse
- javascript builtin ArrayPrototypeReverse(
+ transitioning javascript builtin ArrayPrototypeReverse(
context: Context, receiver: Object, ...arguments): Object {
try {
TryFastPackedArrayReverse(receiver) otherwise Baseline;
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
new file mode 100644
index 0000000000..615b4b7073
--- /dev/null
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -0,0 +1,212 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace array {
+ macro HandleSimpleArgumentsSlice(
+ context: Context, args: JSArgumentsObjectWithLength, start: Smi,
+ count: Smi): JSArray
+ labels Bailout {
+ // If the resulting array doesn't fit in new space, use the slow path.
+ if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
+
+ const end: Smi = start + count;
+ const sourceElements: FixedArray =
+ Cast<FixedArray>(args.elements) otherwise Bailout;
+ if (SmiAbove(end, sourceElements.length)) goto Bailout;
+
+ const arrayMap: Map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, context);
+ const result: JSArray =
+ AllocateJSArray(HOLEY_ELEMENTS, arrayMap, count, count);
+ const newElements: FixedArray =
+ Cast<FixedArray>(result.elements) otherwise Bailout;
+ CopyElements(
+ PACKED_ELEMENTS, newElements, 0, sourceElements, Convert<intptr>(start),
+ Convert<intptr>(count));
+ return result;
+ }
+
+ macro HandleFastAliasedSloppyArgumentsSlice(
+ context: Context, args: JSArgumentsObjectWithLength, start: Smi,
+ count: Smi): JSArray
+ labels Bailout {
+ // If the resulting array doesn't fit in new space, use the slow path.
+ if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
+
+ const sloppyElements: SloppyArgumentsElements =
+ Cast<SloppyArgumentsElements>(args.elements) otherwise Bailout;
+ const sloppyElementsLength: Smi = sloppyElements.length;
+ const parameterMapLength: Smi =
+ sloppyElementsLength - kSloppyArgumentsParameterMapStart;
+
+ // Check to make sure that the extraction will not access outside the
+ // defined arguments
+ const end: Smi = start + count;
+ const unmappedElements: FixedArray =
+ Cast<FixedArray>(sloppyElements[kSloppyArgumentsArgumentsIndex])
+ otherwise Bailout;
+ const unmappedElementsLength: Smi = unmappedElements.length;
+ if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
+
+ const argumentsContext: Context =
+ UnsafeCast<Context>(sloppyElements[kSloppyArgumentsContextIndex]);
+
+ const arrayMap: Map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, context);
+ const result: JSArray =
+ AllocateJSArray(HOLEY_ELEMENTS, arrayMap, count, count);
+
+ let indexOut: Smi = 0;
+ const resultElements: FixedArray = UnsafeCast<FixedArray>(result.elements);
+ const to: Smi = SmiMin(parameterMapLength, end);
+
+ // Fill in the part of the result that map to context-mapped parameters.
+ for (let current: Smi = start; current < to; ++current) {
+ const e: Object =
+ sloppyElements[current + kSloppyArgumentsParameterMapStart];
+ const newElement: Object = e != Hole ?
+ argumentsContext[UnsafeCast<Smi>(e)] :
+ unmappedElements[current];
+ StoreFixedArrayElementSmi(
+ resultElements, indexOut++, newElement, SKIP_WRITE_BARRIER);
+ }
+
+ // Fill in the rest of the result that contains the unmapped parameters
+ // above the formal parameters.
+ const unmappedFrom: Smi = SmiMin(SmiMax(parameterMapLength, start), end);
+ const restCount: Smi = end - unmappedFrom;
+ CopyElements(
+ PACKED_ELEMENTS, resultElements, Convert<intptr>(indexOut),
+ unmappedElements, Convert<intptr>(unmappedFrom),
+ Convert<intptr>(restCount));
+ return result;
+ }
+
+ macro HandleFastSlice(
+ context: Context, o: Object, startNumber: Number,
+ countNumber: Number): JSArray
+ labels Bailout {
+ const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
+ const count: Smi = Cast<Smi>(countNumber) otherwise Bailout;
+ assert(start >= 0);
+
+ typeswitch (o) {
+ case (a: FastJSArrayForCopy): {
+ // It's possible to modify the array length from a valueOf
+ // callback between the original array length read and this
+ // point. That can change the length of the array backing store,
+ // in the worst case, making it smaller than the region that needs
+ // to be copied out. Therefore, re-check the length before calling
+ // the appropriate fast path. See regress-785804.js
+ if (SmiAbove(start + count, a.length)) goto Bailout;
+ return ExtractFastJSArray(context, a, start, count);
+ }
+ case (a: JSArgumentsObjectWithLength): {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const map: Map = a.map;
+ if (IsFastAliasedArgumentsMap(map)) {
+ return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count)
+ otherwise Bailout;
+ } else if (IsStrictArgumentsMap(map) || IsSloppyArgumentsMap(map)) {
+ return HandleSimpleArgumentsSlice(context, a, start, count)
+ otherwise Bailout;
+ }
+ }
+ case (Object): {
+ }
+ }
+ goto Bailout;
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.slice
+ transitioning javascript builtin
+ ArrayPrototypeSlice(context: Context, receiver: Object, ...arguments):
+ Object {
+ // Handle array cloning case if the receiver is a fast array.
+ if (arguments.length == 0) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (Object): {
+ }
+ }
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. Let relativeStart be ? ToInteger(start).
+ const start: Object = arguments[0];
+ const relativeStart: Number = ToInteger_Inline(context, start);
+
+ // 4. If relativeStart < 0, let k be max((len + relativeStart), 0);
+ // else let k be min(relativeStart, len).
+ let k: Number = relativeStart < 0 ? Max((len + relativeStart), 0) :
+ Min(relativeStart, len);
+
+ // 5. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ const end: Object = arguments[1];
+ const relativeEnd: Number =
+ end == Undefined ? len : ToInteger_Inline(context, end);
+
+ // 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const final: Number =
+ relativeEnd < 0 ? Max((len + relativeEnd), 0) : Min(relativeEnd, len);
+
+ // 7. Let count be max(final - k, 0).
+ const count: Number = Max(final - k, 0);
+
+ assert(0 <= k);
+ assert(k <= len);
+ assert(0 <= final);
+ assert(final <= len);
+ assert(0 <= count);
+ assert(count <= len);
+
+ try {
+ return HandleFastSlice(context, o, k, count) otherwise Slow;
+ }
+ label Slow {}
+
+ // 8. Let A be ? ArraySpeciesCreate(O, count).
+ const a: JSReceiver = ArraySpeciesCreate(context, o, count);
+
+ // 9. Let n be 0.
+ let n: Number = 0;
+
+ // 10. Repeat, while k < final
+ while (k < final) {
+ // a. Let Pk be ! ToString(k).
+ let pK: Number = k;
+
+ // b. Let kPresent be ? HasProperty(O, Pk).
+ const fromPresent: Boolean = HasProperty(o, pK);
+
+ // c. If kPresent is true, then
+ if (fromPresent == True) {
+ // i. Let kValue be ? Get(O, Pk).
+ const kValue: Object = GetProperty(o, pK);
+
+ // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue).
+ CreateDataProperty(a, n, kValue);
+ }
+
+ // d. Increase k by 1.
+ k++;
+
+ // e. Increase n by 1.
+ n++;
+ }
+
+ // 11. Perform ? Set(A, "length", n, true).
+ SetProperty(a, kLengthString, n);
+
+ // 12. Return A.
+ return a;
+ }
+}
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index 5746f4cdf6..7307f45f34 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+namespace array {
// Given {elements}, we want to create a non-zero length array of type
// FixedArrayType. Most of this behavior is outsourced to ExtractFixedArray(),
// but the special case of wanting to have a FixedDoubleArray when given a
@@ -11,14 +11,14 @@ module array {
elements: FixedArrayBase, first: Smi, count: Smi,
capacity: Smi): FixedArrayType;
- Extract<FixedArray>(
+ Extract<FixedArray>(implicit context: Context)(
elements: FixedArrayBase, first: Smi, count: Smi,
capacity: Smi): FixedArray {
return UnsafeCast<FixedArray>(
ExtractFixedArray(elements, first, count, capacity));
}
- Extract<FixedDoubleArray>(
+ Extract<FixedDoubleArray>(implicit context: Context)(
elements: FixedArrayBase, first: Smi, count: Smi,
capacity: Smi): FixedDoubleArray {
if (elements == kEmptyFixedArray) {
@@ -51,11 +51,11 @@ module array {
Convert<intptr>(srcIndex), Convert<intptr>(count));
}
- macro FastSplice<FixedArrayType: type, ElementType: type>(
+ macro FastSplice<FixedArrayType: type, ElementType: type>(implicit context:
+ Context)(
args: constexpr Arguments, a: JSArray, length: Smi, newLength: Smi,
lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
- actualDeleteCount: Smi): void
- labels Bailout {
+ actualDeleteCount: Smi): void labels Bailout {
// Make sure elements are writable.
EnsureWriteableFastElements(a);
@@ -106,7 +106,7 @@ module array {
a.length = newLength;
}
- macro FastArraySplice(
+ transitioning macro FastArraySplice(
context: Context, args: constexpr Arguments, o: JSReceiver,
originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
actualDeleteCountNumber: Number): Object
@@ -122,7 +122,7 @@ module array {
const a: JSArray = Cast<JSArray>(o) otherwise Bailout;
const map: Map = a.map;
- if (!IsPrototypeInitialArrayPrototype(context, map)) goto Bailout;
+ if (!IsPrototypeInitialArrayPrototype(map)) goto Bailout;
if (IsNoElementsProtectorCellInvalid()) goto Bailout;
if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
@@ -177,7 +177,7 @@ module array {
return deletedResult;
}
- macro FillDeletedElementsArray(
+ transitioning macro FillDeletedElementsArray(
context: Context, o: JSReceiver, actualStart: Number,
actualDeleteCount: Number, a: JSReceiver): Object {
// 10. Let k be 0.
@@ -189,28 +189,28 @@ module array {
const from: Number = actualStart + k;
// b. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(context, o, from);
+ const fromPresent: Boolean = HasProperty(o, from);
// c. If fromPresent is true, then
if (fromPresent == True) {
// i. Let fromValue be ? Get(O, from).
- const fromValue: Object = GetProperty(context, o, from);
+ const fromValue: Object = GetProperty(o, from);
// ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
- CreateDataProperty(context, a, k, fromValue);
+ CreateDataProperty(a, k, fromValue);
}
// d. Increment k by 1.
k++;
}
// 12. Perform ? Set(A, "length", actualDeleteCount, true).
- SetProperty(context, a, kLengthString, actualDeleteCount);
+ SetProperty(a, kLengthString, actualDeleteCount);
return a;
}
// HandleForwardCase implements step 15. "If itemCount < actualDeleteCount,
// then...""
- macro HandleForwardCase(
+ transitioning macro HandleForwardCase(
context: Context, o: JSReceiver, len: Number, itemCount: Number,
actualStart: Number, actualDeleteCount: Number): void {
// 15. If itemCount < actualDeleteCount, then
@@ -225,20 +225,20 @@ module array {
const to: Number = k + itemCount;
// iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(context, o, from);
+ const fromPresent: Boolean = HasProperty(o, from);
// iv. If fromPresent is true, then
if (fromPresent == True) {
// 1. Let fromValue be ? Get(O, from).
- const fromValue: Object = GetProperty(context, o, from);
+ const fromValue: Object = GetProperty(o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue);
+ SetProperty(o, to, fromValue);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, kStrict);
+ DeleteProperty(o, to, kStrict);
}
// vi. Increase k by 1.
k++;
@@ -250,7 +250,7 @@ module array {
// d. Repeat, while k > (len - actualDeleteCount + itemCount)
while (k > (len - actualDeleteCount + itemCount)) {
// i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
- DeleteProperty(context, o, k - 1, kStrict);
+ DeleteProperty(o, k - 1, kStrict);
// ii. Decrease k by 1.
k--;
}
@@ -258,7 +258,7 @@ module array {
// HandleBackwardCase implements step 16. "Else if itemCount >
// actualDeleteCount, then..."
- macro HandleBackwardCase(
+ transitioning macro HandleBackwardCase(
context: Context, o: JSReceiver, len: Number, itemCount: Number,
actualStart: Number, actualDeleteCount: Number): void {
// 16. Else if itemCount > actualDeleteCount, then
@@ -274,20 +274,20 @@ module array {
const to: Number = k + itemCount - 1;
// iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(context, o, from);
+ const fromPresent: Boolean = HasProperty(o, from);
// iv. If fromPresent is true, then
if (fromPresent == True) {
// 1. Let fromValue be ? Get(O, from).
- const fromValue: Object = GetProperty(context, o, from);
+ const fromValue: Object = GetProperty(o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue);
+ SetProperty(o, to, fromValue);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, o, to, kStrict);
+ DeleteProperty(o, to, kStrict);
}
// vi. Decrease k by 1.
@@ -295,7 +295,7 @@ module array {
}
}
- macro SlowSplice(
+ transitioning macro SlowSplice(
context: Context, arguments: constexpr Arguments, o: JSReceiver,
len: Number, actualStart: Number, insertCount: Smi,
actualDeleteCount: Number): Object {
@@ -334,7 +334,7 @@ module array {
if (arguments.length > 2) {
for (let e: Object of arguments [2: ]) {
// b. Perform ? Set(O, ! ToString(k), E, true).
- SetProperty(context, o, k, e);
+ SetProperty(o, k, e);
// c. Increase k by 1.
k = k + 1;
@@ -343,19 +343,20 @@ module array {
// 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
// true).
- SetProperty(context, o, kLengthString, len - actualDeleteCount + itemCount);
+ SetProperty(o, kLengthString, len - actualDeleteCount + itemCount);
return a;
}
// https://tc39.github.io/ecma262/#sec-array.prototype.splice
- javascript builtin ArraySplice(
- context: Context, receiver: Object, ...arguments): Object {
+ transitioning javascript builtin
+ ArrayPrototypeSplice(context: Context, receiver: Object, ...arguments):
+ Object {
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(context, o);
+ const len: Number = GetLengthProperty(o);
// 3. Let relativeStart be ? ToInteger(start).
const start: Object = arguments[0];
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index 3595b139a4..7d7647427a 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -2,15 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+namespace array {
extern builtin ArrayUnshift(Context, JSFunction, Object, int32);
macro TryFastArrayUnshift(
- context: Context, receiver: Object, arguments: constexpr Arguments):
- never
+ context: Context, receiver: Object, arguments: constexpr Arguments): never
labels Slow {
- EnsureFastJSArray(context, receiver) otherwise Slow;
- const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
EnsureWriteableFastElements(array);
const map: Map = array.map;
@@ -22,14 +20,14 @@ module array {
Convert<int32>(arguments.length));
}
- macro GenericArrayUnshift(
+ transitioning macro GenericArrayUnshift(
context: Context, receiver: Object,
arguments: constexpr Arguments): Number {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(context, object);
+ const length: Number = GetLengthProperty(object);
// 3. Let argCount be the number of actual arguments.
const argCount: Smi = Convert<Smi>(arguments.length);
@@ -53,18 +51,18 @@ module array {
const to: Number = k + argCount - 1;
// iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(context, object, from);
+ const fromPresent: Boolean = HasProperty(object, from);
// iv. If fromPresent is true, then
if (fromPresent == True) {
// 1. Let fromValue be ? Get(O, from).
- const fromValue: Object = GetProperty(context, object, from);
+ const fromValue: Object = GetProperty(object, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, object, to, fromValue);
+ SetProperty(object, to, fromValue);
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(context, object, to, kStrict);
+ DeleteProperty(object, to, kStrict);
}
// vi. Decrease k by 1.
@@ -79,7 +77,7 @@ module array {
// f. Repeat, while items is not empty
while (j < argCount) {
// ii .Perform ? Set(O, ! ToString(j), E, true).
- SetProperty(context, object, j, arguments[Convert<intptr>(j)]);
+ SetProperty(object, j, arguments[Convert<intptr>(j)]);
// iii. Increase j by 1.
++j;
@@ -88,14 +86,14 @@ module array {
// 5. Perform ? Set(O, "length", len + argCount, true).
const newLength: Number = length + argCount;
- SetProperty(context, object, kLengthString, newLength);
+ SetProperty(object, kLengthString, newLength);
// 6. Return length + argCount.
return newLength;
}
// https://tc39.github.io/ecma262/#sec-array.prototype.unshift
- javascript builtin ArrayPrototypeUnshift(
+ transitioning javascript builtin ArrayPrototypeUnshift(
context: Context, receiver: Object, ...arguments): Object {
try {
TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline;
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index df4387878d..8e3b3ea704 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module array {
+#include 'src/builtins/builtins-array-gen.h'
+
+namespace array {
// Naming convention from elements.cc. We have a similar intent but implement
// fastpaths using generics instead of using a class hierarchy for elements
// kinds specific implementations.
@@ -14,7 +16,7 @@ module array {
type FastDoubleElements;
type DictionaryElements;
- macro EnsureWriteableFastElements(array: JSArray) {
+ macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray) {
assert(IsFastElementsKind(array.map.elements_kind));
const elements: FixedArrayBase = array.elements;
@@ -24,13 +26,13 @@ module array {
// extract FixedArrays and don't have to worry about FixedDoubleArrays.
assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
- const length: Smi = array.length_fast;
+ const length: Smi = Cast<Smi>(array.length) otherwise unreachable;
array.elements =
ExtractFixedArray(elements, 0, length, length, kFixedArrays);
assert(array.elements.map != kCOWMap);
}
- macro IsJSArray(o: Object): bool {
+ macro IsJSArray(implicit context: Context)(o: Object): bool {
try {
const array: JSArray = Cast<JSArray>(o) otherwise NotArray;
return true;
@@ -40,6 +42,46 @@ module array {
}
}
+ macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
+ const e: Object = a[i];
+ return e == Hole ? Undefined : e;
+ }
+
+ macro LoadElementOrUndefined(a: FixedArray, i: intptr): Object {
+ const e: Object = a[i];
+ return e == Hole ? Undefined : e;
+ }
+
+ macro LoadElementOrUndefined(a: FixedArray, i: constexpr int31): Object {
+ return LoadElementOrUndefined(a, Convert<intptr>(i));
+ }
+
+ macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
+ try {
+ const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
+ return AllocateHeapNumberWithValue(f);
+ }
+ label IfHole {
+ return Undefined;
+ }
+ }
+
+ macro LoadElementOrUndefined(a: FixedDoubleArray, i: intptr):
+ NumberOrUndefined {
+ try {
+ const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise IfHole;
+ return AllocateHeapNumberWithValue(f);
+ }
+ label IfHole {
+ return Undefined;
+ }
+ }
+
+ macro LoadElementOrUndefined(a: FixedDoubleArray, i: constexpr int31):
+ NumberOrUndefined {
+ return LoadElementOrUndefined(a, Convert<intptr>(i));
+ }
+
macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
StoreFixedDoubleArrayHoleSmi(elements, k);
}
@@ -66,4 +108,6 @@ module array {
StoreArrayHole(newElements, to);
}
}
+
+ extern macro SetPropertyLength(implicit context: Context)(Object, Number);
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 3f5029834d..7887fa1383 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -2,14 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/builtins/builtins-utils-gen.h'
+#include 'src/builtins/builtins.h'
+#include 'src/code-factory.h'
+#include 'src/elements-kind.h'
+#include 'src/heap/factory-inl.h'
+#include 'src/objects.h'
+#include 'src/objects/arguments.h'
+#include 'src/objects/bigint.h'
+
type Arguments constexpr 'CodeStubArguments*';
-type void generates 'void';
-type never generates 'void';
+type void;
+type never;
+
+type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';
+type Smi extends Tagged generates 'TNode<Smi>' constexpr 'Smi';
+
+// A Smi that is greater than or equal to 0. See TaggedIsPositiveSmi.
+type PositiveSmi extends Smi generates 'TNode<Smi>';
+
+class HeapObject extends Tagged {
+ map_untyped: Tagged;
+}
-type Tagged generates 'TNode<Object>';
-type Smi extends Tagged generates 'TNode<Smi>';
-type HeapObject extends Tagged generates 'TNode<HeapObject>';
-type Object = Smi|HeapObject;
+type Object = Smi | HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
@@ -18,54 +34,136 @@ type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
type float32 generates 'TNode<Float32T>' constexpr 'float';
type float64 generates 'TNode<Float64T>' constexpr 'double';
type bool generates 'TNode<BoolT>' constexpr 'bool';
+type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
-type int31 extends int32 generates 'TNode<Int32T>' constexpr 'int31_t';
+type int31 extends int32
+ generates 'TNode<Int32T>' constexpr 'int31_t';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type AbstractCode extends HeapObject generates 'TNode<AbstractCode>';
type Code extends AbstractCode generates 'TNode<Code>';
-type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
+type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context generates 'TNode<Context>';
type String extends HeapObject generates 'TNode<String>';
type Oddball extends HeapObject generates 'TNode<Oddball>';
type HeapNumber extends HeapObject generates 'TNode<HeapNumber>';
-type Number = Smi|HeapNumber;
+type Number = Smi | HeapNumber;
type BigInt extends HeapObject generates 'TNode<BigInt>';
-type Numeric = Number|BigInt;
-type Boolean extends Oddball generates 'TNode<Oddball>';
-type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
-type JSObject extends JSReceiver generates 'TNode<JSObject>';
-type JSArgumentsObjectWithLength extends JSObject
- generates 'TNode<JSArgumentsObjectWithLength>';
-type JSArray extends JSArgumentsObjectWithLength
- generates 'TNode<JSArray>';
-type JSFunction extends JSObject generates 'TNode<JSFunction>';
-type JSBoundFunction extends JSObject generates 'TNode<JSBoundFunction>';
-type Callable = JSFunction|JSBoundFunction|JSProxy;
+type Numeric = Number | BigInt;
+
type Map extends HeapObject generates 'TNode<Map>';
+// The accessors for HeapObject's map cannot be declared before Map
+// is declared because forward declarations are not (yet) supported.
+// TODO(danno): Make circular references in classes possible. One way to do that
+// would be to pre-process all class declarations and create bindings for them
+// with an uninitialized class type, and then process them later properly
+extern operator '.map' macro LoadMap(HeapObject): Map;
+extern transitioning operator '.map=' macro StoreMap(HeapObject, Map);
+
+// This intrinsic should never be called from Torque code. It's used internally
+// by the 'new' operator and only declared here because it's simpler than
+// building the definition from C++.
+intrinsic %Allocate<Class: type>(size: intptr): Class;
+
type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
type FixedDoubleArray extends FixedArrayBase
generates 'TNode<FixedDoubleArray>';
+
+class JSReceiver extends HeapObject {
+ properties_or_hash: Object;
+}
+
+type Constructor extends JSReceiver generates 'TNode<JSReceiver>';
+type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
+
+class JSObject extends JSReceiver {
+ elements: FixedArrayBase;
+}
+
+class JSArgumentsObjectWithLength extends JSObject {
+ length: Object;
+}
+
+class JSArray extends JSObject {
+ constructor(implicit context: Context)() {
+ super(
+ GetFastPackedSmiElementsJSArrayMap(), kEmptyFixedArray,
+ kEmptyFixedArray);
+ this.length = 0;
+ }
+ IsEmpty(): bool {
+ return this.length == 0;
+ }
+ length: Number;
+}
+
+// A HeapObject with a JSArray map, and either fast packed elements, or fast
+// holey elements when the global NoElementsProtector is not invalidated.
+transient type FastJSArray extends JSArray
+ generates 'TNode<JSArray>';
+
+// A FastJSArray when the global ArraySpeciesProtector is not invalidated.
+transient type FastJSArrayForCopy extends FastJSArray
+ generates 'TNode<JSArray>';
+
+// A FastJSArray when the global ArrayIteratorProtector is not invalidated.
+transient type FastJSArrayWithNoCustomIteration extends FastJSArray
+ generates 'TNode<JSArray>';
+
+type SharedFunctionInfo extends HeapObject
+ generates 'TNode<SharedFunctionInfo>';
+
+class JSFunction extends JSObject {
+ shared_function_info: SharedFunctionInfo;
+ context: Context;
+ feedback_cell: Smi;
+ weak code: Code;
+ weak prototype_or_initial_map: JSReceiver | Map;
+}
+
+extern operator '.formal_parameter_count'
+ macro LoadSharedFunctionInfoFormalParameterCount(SharedFunctionInfo): int32;
+
+class JSBoundFunction extends JSObject {
+ bound_target_function: JSReceiver;
+ bound_this: Object;
+ bound_arguments: FixedArray;
+}
+
+type Callable = JSFunction | JSBoundFunction | JSProxy;
type FixedTypedArrayBase extends FixedArrayBase
generates 'TNode<FixedTypedArrayBase>';
type FixedTypedArray extends FixedTypedArrayBase
generates 'TNode<FixedTypedArray>';
+type SloppyArgumentsElements extends FixedArray
+ generates 'TNode<FixedArray>';
type NumberDictionary extends HeapObject
generates 'TNode<NumberDictionary>';
+// RawObjectCasts should *never* be used anywhere in Torque code except for
+// in Torque-based UnsafeCast operators preceeded by an appropriate
+// type assert()
+intrinsic %RawObjectCast<A: type>(o: Object): A;
+intrinsic %RawPointerCast<A: type>(p: RawPtr): A;
+intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
+
type NativeContextSlot generates 'TNode<IntPtrT>' constexpr 'int32_t';
-const FAST_ALIASED_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
- generates 'Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX';
-const SLOW_ALIASED_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
- generates 'Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX';
-const STRICT_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
- generates 'Context::STRICT_ARGUMENTS_MAP_INDEX';
-const SLOPPY_ARGUMENTS_MAP_INDEX: constexpr NativeContextSlot
- generates 'Context::SLOPPY_ARGUMENTS_MAP_INDEX';
+const ARRAY_BUFFER_FUN_INDEX: constexpr NativeContextSlot
+ generates 'Context::ARRAY_BUFFER_FUN_INDEX';
+const ARRAY_JOIN_STACK_INDEX: constexpr NativeContextSlot
+ generates 'Context::ARRAY_JOIN_STACK_INDEX';
+const OBJECT_FUNCTION_INDEX: constexpr NativeContextSlot
+ generates 'Context::OBJECT_FUNCTION_INDEX';
+const ITERATOR_RESULT_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::ITERATOR_RESULT_MAP_INDEX';
+const JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX';
extern operator '[]' macro LoadContextElement(
NativeContext, NativeContextSlot): Object;
+extern operator '[]=' macro StoreContextElement(
+ NativeContext, NativeContextSlot, Object): void;
extern operator '[]' macro LoadContextElement(Context, intptr): Object;
extern operator '[]' macro LoadContextElement(Context, Smi): Object;
@@ -79,17 +177,23 @@ type JSDataView extends JSArrayBufferView generates 'TNode<JSDataView>';
type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
-type LanguageMode generates 'TNode<Smi>' constexpr 'LanguageMode';
+type LanguageMode extends Tagged
+ generates 'TNode<Smi>' constexpr 'LanguageMode';
type ExtractFixedArrayFlags
- generates 'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
-type ParameterMode generates 'TNode<Int32T>' constexpr 'ParameterMode';
+ generates 'TNode<Smi>'
+ constexpr 'CodeStubAssembler::ExtractFixedArrayFlags';
+type ParameterMode
+ generates 'TNode<Int32T>' constexpr 'ParameterMode';
type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
-type MessageTemplate constexpr 'MessageTemplate::Template';
+type MessageTemplate constexpr 'MessageTemplate';
+type ToIntegerTruncationMode
+constexpr 'CodeStubAssembler::ToIntegerTruncationMode';
+type AllocationFlags constexpr 'AllocationFlags';
-type ToIntegerTruncationMode constexpr 'ToIntegerTruncationMode';
+const kSmiTagSize: constexpr int31 generates 'kSmiTagSize';
const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
@@ -126,6 +230,14 @@ const BIGUINT64_ELEMENTS:
const BIGINT64_ELEMENTS:
constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
+const kNone:
+ constexpr AllocationFlags generates 'CodeStubAssembler::kNone';
+const kDoubleAlignment:
+ constexpr AllocationFlags generates 'kDoubleAlignment';
+const kPretenured: constexpr AllocationFlags generates 'kPretenured';
+const kAllowLargeObjectAllocation:
+ constexpr AllocationFlags generates 'kAllowLargeObjectAllocation';
+
type FixedUint8Array extends FixedTypedArray;
type FixedInt8Array extends FixedTypedArray;
type FixedUint16Array extends FixedTypedArray;
@@ -139,16 +251,18 @@ type FixedBigUint64Array extends FixedTypedArray;
type FixedBigInt64Array extends FixedTypedArray;
const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags
- generates 'ExtractFixedArrayFlag::kFixedDoubleArrays';
+ generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays';
const kAllFixedArrays: constexpr ExtractFixedArrayFlags
- generates 'ExtractFixedArrayFlag::kAllFixedArrays';
+ generates 'CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays';
const kFixedArrays: constexpr ExtractFixedArrayFlags
- generates 'ExtractFixedArrayFlag::kFixedArrays';
+ generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays';
const kFixedCOWArrayMapRootIndex:
constexpr RootIndex generates 'RootIndex::kFixedCOWArrayMap';
const kEmptyFixedArrayRootIndex:
constexpr RootIndex generates 'RootIndex::kEmptyFixedArray';
+const kTheHoleValueRootIndex:
+ constexpr RootIndex generates 'RootIndex::kTheHoleValue';
const kInvalidArrayLength: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidArrayLength';
@@ -156,11 +270,37 @@ const kCalledNonCallable: constexpr MessageTemplate
generates 'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: constexpr MessageTemplate
generates 'MessageTemplate::kCalledOnNullOrUndefined';
-
+const kInvalidTypedArrayLength: constexpr MessageTemplate
+ generates 'MessageTemplate::kInvalidTypedArrayLength';
+const kIteratorValueNotAnObject: constexpr MessageTemplate
+ generates 'MessageTemplate::kIteratorValueNotAnObject';
+const kNotIterable: constexpr MessageTemplate
+ generates 'MessageTemplate::kNotIterable';
+
+const kMaxArrayIndex:
+ constexpr uint32 generates 'JSArray::kMaxArrayIndex';
+const kTypedArrayMaxByteLength:
+ constexpr uintptr generates 'FixedTypedArrayBase::kMaxByteLength';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
+const kStringMaxLength: constexpr int31 generates 'String::kMaxLength';
+const kFixedArrayMaxLength:
+ constexpr int31 generates 'FixedArray::kMaxLength';
+
+const kMaxRegularHeapObjectSize: constexpr int31
+ generates 'kMaxRegularHeapObjectSize';
+
+const kMaxNewSpaceFixedArrayElements: constexpr int31
+ generates 'FixedArray::kMaxRegularLength';
+const kSloppyArgumentsArgumentsIndex: constexpr int31
+ generates 'SloppyArgumentsElements::kArgumentsIndex';
+const kSloppyArgumentsContextIndex: constexpr int31
+ generates 'SloppyArgumentsElements::kContextIndex';
+const kSloppyArgumentsParameterMapStart: constexpr int31
+ generates 'SloppyArgumentsElements::kParameterMapStart';
const kTruncateMinusZero: constexpr ToIntegerTruncationMode
- generates 'ToIntegerTruncationMode::kTruncateMinusZero';
+ generates 'CodeStubAssembler::ToIntegerTruncationMode::kTruncateMinusZero'
+ ;
const kNotTypedArray: constexpr MessageTemplate
generates 'MessageTemplate::kNotTypedArray';
@@ -175,20 +315,31 @@ const kInvalidDataViewAccessorOffset: constexpr MessageTemplate
const kStrictReadOnlyProperty: constexpr MessageTemplate
generates 'MessageTemplate::kStrictReadOnlyProperty';
-extern macro TheHoleConstant(): Oddball;
-extern macro NullConstant(): Oddball;
-extern macro UndefinedConstant(): Oddball;
-extern macro TrueConstant(): Boolean;
-extern macro FalseConstant(): Boolean;
+type Hole extends Oddball generates 'TNode<Oddball>';
+type Null extends Oddball generates 'TNode<Oddball>';
+type Undefined extends Oddball generates 'TNode<Oddball>';
+type True extends Oddball generates 'TNode<Oddball>';
+type False extends Oddball generates 'TNode<Oddball>';
+type Boolean = True | False;
+
+type NumberOrUndefined = Number | Undefined;
+
+extern macro TheHoleConstant(): Hole;
+extern macro NullConstant(): Null;
+extern macro UndefinedConstant(): Undefined;
+extern macro TrueConstant(): True;
+extern macro FalseConstant(): False;
extern macro Int32TrueConstant(): bool;
extern macro Int32FalseConstant(): bool;
+extern macro EmptyStringConstant(): String;
extern macro LengthStringConstant(): String;
-const Hole: Oddball = TheHoleConstant();
-const Null: Oddball = NullConstant();
-const Undefined: Oddball = UndefinedConstant();
-const True: Boolean = TrueConstant();
-const False: Boolean = FalseConstant();
+const Hole: Hole = TheHoleConstant();
+const Null: Null = NullConstant();
+const Undefined: Undefined = UndefinedConstant();
+const True: True = TrueConstant();
+const False: False = FalseConstant();
+const kEmptyString: String = EmptyStringConstant();
const kLengthString: String = LengthStringConstant();
const true: constexpr bool generates 'true';
@@ -197,9 +348,10 @@ const false: constexpr bool generates 'false';
const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
-const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS';
-const INTPTR_PARAMETERS:
- constexpr ParameterMode generates 'INTPTR_PARAMETERS';
+const SMI_PARAMETERS: constexpr ParameterMode
+ generates 'CodeStubAssembler::SMI_PARAMETERS';
+const INTPTR_PARAMETERS: constexpr ParameterMode
+ generates 'CodeStubAssembler::INTPTR_PARAMETERS';
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
@@ -218,23 +370,47 @@ extern macro ToInteger_Inline(
Context, Object, constexpr ToIntegerTruncationMode): Number;
extern macro ToLength_Inline(Context, Object): Number;
extern macro ToNumber_Inline(Context, Object): Number;
+extern macro ToSmiIndex(implicit context: Context)(Object): PositiveSmi
+ labels IfRangeError;
+extern macro ToSmiLength(implicit context: Context)(Object): PositiveSmi
+ labels IfRangeError;
extern macro ToString_Inline(Context, Object): String;
-extern macro GetProperty(Context, Object, Object): Object;
-extern builtin SetProperty(Context, Object, Object, Object);
-extern builtin DeleteProperty(Context, Object, Object, LanguageMode);
-extern builtin HasProperty(Context, JSReceiver, Object): Boolean;
-extern macro HasProperty_Inline(Context, JSReceiver, Object): Boolean;
+extern transitioning macro GetProperty(implicit context: Context)(
+ Object, Object): Object;
+extern transitioning builtin SetProperty(implicit context: Context)(
+ Object, Object, Object);
+extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
+ Object, Object, Object);
+extern transitioning builtin DeleteProperty(implicit context: Context)(
+ Object, Object, LanguageMode);
+extern transitioning builtin HasProperty(implicit context: Context)(
+ JSReceiver, Object): Boolean;
+extern transitioning macro HasProperty_Inline(implicit context: Context)(
+ JSReceiver, Object): Boolean;
extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
+extern macro ThrowRangeError(Context, constexpr MessageTemplate, Object): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
+extern macro ThrowTypeError(
+ Context, constexpr MessageTemplate, constexpr string): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
extern macro ThrowTypeError(
Context, constexpr MessageTemplate, Object, Object, Object): never;
extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver;
-extern macro InternalArrayCreate(Context, Number): JSArray;
+extern macro ArrayCreate(implicit context: Context)(Number): JSArray;
+extern macro BuildAppendJSArray(
+ constexpr ElementsKind, FastJSArray, Object): void labels Bailout;
+
extern macro EnsureArrayPushable(Map): ElementsKind
labels Bailout;
extern macro EnsureArrayLengthWritable(Map) labels Bailout;
+// TODO: Reduce duplication once varargs are supported in macros.
+extern macro Construct(implicit context: Context)(
+ Constructor, Object): JSReceiver;
+extern macro Construct(implicit context: Context)(
+ Constructor, Object, Object): JSReceiver;
+extern macro Construct(implicit context: Context)(
+ Constructor, Object, Object, Object): JSReceiver;
extern builtin ToObject(Context, Object): JSReceiver;
extern macro ToObject_Inline(Context, Object): JSReceiver;
@@ -243,9 +419,15 @@ extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
extern builtin ToString(Context, Object): String;
-extern runtime NormalizeElements(Context, JSObject);
-extern runtime TransitionElementsKindWithKind(Context, JSObject, Smi);
-extern runtime CreateDataProperty(Context, JSReceiver, Object, Object);
+extern transitioning runtime NormalizeElements(Context, JSObject);
+extern transitioning runtime TransitionElementsKindWithKind(
+ Context, JSObject, Smi);
+extern transitioning runtime CreateDataProperty(implicit context: Context)(
+ JSReceiver, Object, Object);
+
+extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
+extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
+extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi;
extern macro LoadRoot(constexpr RootIndex): Object;
extern macro StoreRoot(constexpr RootIndex, Object): Object;
@@ -256,11 +438,20 @@ extern builtin StringLessThan(Context, String, String): Boolean;
extern macro StrictEqual(Object, Object): Boolean;
extern macro SmiLexicographicCompare(Smi, Smi): Smi;
+extern runtime ReThrow(Context, Object): never;
+extern runtime ThrowInvalidStringLength(Context): never;
+
+extern operator '==' macro WordEqual(RawPtr, RawPtr): bool;
+extern operator '!=' macro WordNotEqual(RawPtr, RawPtr): bool;
extern operator '<' macro Int32LessThan(int32, int32): bool;
+extern operator '<' macro Uint32LessThan(uint32, uint32): bool;
extern operator '>' macro Int32GreaterThan(int32, int32): bool;
+extern operator '>' macro Uint32GreaterThan(uint32, uint32): bool;
extern operator '<=' macro Int32LessThanOrEqual(int32, int32): bool;
+extern operator '<=' macro Uint32LessThanOrEqual(uint32, uint32): bool;
extern operator '>=' macro Int32GreaterThanOrEqual(int32, int32): bool;
+extern operator '>=' macro Uint32GreaterThanOrEqual(uint32, uint32): bool;
extern operator '==' macro SmiEqual(Smi, Smi): bool;
extern operator '!=' macro SmiNotEqual(Smi, Smi): bool;
@@ -276,9 +467,18 @@ operator '!=' macro ElementsKindNotEqual(
k1: ElementsKind, k2: ElementsKind): bool {
return !ElementsKindEqual(k1, k2);
}
+extern macro IsElementsKindLessThanOrEqual(
+ ElementsKind, constexpr ElementsKind): bool;
+extern macro IsElementsKindGreaterThan(
+ ElementsKind, constexpr ElementsKind): bool;
extern macro IsFastElementsKind(constexpr ElementsKind): constexpr bool;
extern macro IsDoubleElementsKind(constexpr ElementsKind): constexpr bool;
+extern macro IsFastAliasedArgumentsMap(implicit context: Context)(Map): bool;
+extern macro IsSlowAliasedArgumentsMap(implicit context: Context)(Map): bool;
+extern macro IsSloppyArgumentsMap(implicit context: Context)(Map): bool;
+extern macro IsStrictArgumentsMap(implicit context: Context)(Map): bool;
+
extern macro SmiAbove(Smi, Smi): bool;
extern operator '==' macro WordEqual(intptr, intptr): bool;
@@ -286,17 +486,24 @@ extern operator '==' macro WordEqual(uintptr, uintptr): bool;
extern operator '!=' macro WordNotEqual(intptr, intptr): bool;
extern operator '!=' macro WordNotEqual(uintptr, uintptr): bool;
extern operator '<' macro IntPtrLessThan(intptr, intptr): bool;
+extern operator '<' macro UintPtrLessThan(uintptr, uintptr): bool;
extern operator '>' macro IntPtrGreaterThan(intptr, intptr): bool;
+extern operator '>' macro UintPtrGreaterThan(uintptr, uintptr): bool;
extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool;
+extern operator '<=' macro UintPtrLessThanOrEqual(uintptr, uintptr): bool;
extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
-extern operator '>' macro UintPtrGreaterThan(uintptr, uintptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '==' macro Float64Equal(float64, float64): bool;
extern operator '!=' macro Float64NotEqual(float64, float64): bool;
extern operator '>' macro Float64GreaterThan(float64, float64): bool;
-extern operator '==' macro BranchIfNumberEqual(Number, Number): never
+extern macro BranchIfNumberEqual(Number, Number): never
+ labels Taken, NotTaken;
+operator '==' macro IsNumberEqual(a: Number, b: Number): bool {
+ return (BranchIfNumberEqual(a, b)) ? true : false;
+}
+extern operator '!=' macro BranchIfNumberNotEqual(Number, Number): never
labels Taken, NotTaken;
extern operator '<' macro BranchIfNumberLessThan(Number, Number): never
labels Taken, NotTaken;
@@ -305,8 +512,8 @@ extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never
extern operator '>' macro BranchIfNumberGreaterThan(Number, Number): never
labels Taken, NotTaken;
-extern operator '>=' macro BranchIfNumberGreaterThanOrEqual(Number, Number):
- never
+extern operator '>=' macro BranchIfNumberGreaterThanOrEqual(
+ Number, Number): never
labels Taken, NotTaken;
extern operator '==' macro WordEqual(Object, Object): bool;
@@ -316,18 +523,27 @@ extern operator '+' macro SmiAdd(Smi, Smi): Smi;
extern operator '-' macro SmiSub(Smi, Smi): Smi;
extern operator '&' macro SmiAnd(Smi, Smi): Smi;
extern operator '|' macro SmiOr(Smi, Smi): Smi;
-extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi;
extern operator '<<' macro SmiShl(Smi, constexpr int31): Smi;
+extern operator '>>' macro SmiSar(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
-extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
-extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
+extern operator '*' macro IntPtrMul(intptr, intptr): intptr;
+extern operator '/' macro IntPtrDiv(intptr, intptr): intptr;
extern operator '<<' macro WordShl(intptr, intptr): intptr;
+extern operator '>>' macro WordSar(intptr, intptr): intptr;
extern operator '&' macro WordAnd(intptr, intptr): intptr;
+extern operator '|' macro WordOr(intptr, intptr): intptr;
+
+extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr;
+extern operator '-' macro UintPtrSub(uintptr, uintptr): uintptr;
+extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
extern operator '&' macro WordAnd(uintptr, uintptr): uintptr;
+extern operator '|' macro WordOr(uintptr, uintptr): uintptr;
extern operator '+' macro Int32Add(int32, int32): int32;
+extern operator '+' macro ConstexprUint32Add(
+ constexpr uint32, constexpr int32): constexpr uint32;
extern operator '-' macro Int32Sub(int32, int32): int32;
extern operator '*' macro Int32Mul(int32, int32): int32;
extern operator '%' macro Int32Mod(int32, int32): int32;
@@ -345,6 +561,8 @@ extern operator '<<' macro Word32Shl(int32, int32): int32;
extern operator '<<' macro Word32Shl(uint32, uint32): uint32;
extern operator '|' macro Word32Or(int32, int32): int32;
extern operator '|' macro Word32Or(uint32, uint32): uint32;
+extern operator '&' macro Word32And(bool, bool): bool;
+extern operator '|' macro Word32Or(bool, bool): bool;
extern operator '+' macro Float64Add(float64, float64): float64;
@@ -359,15 +577,19 @@ macro Max(x: Number, y: Number): Number {
return NumberMax(x, y);
}
+extern operator '<<' macro ConstexprUintPtrShl(
+ constexpr uintptr, constexpr int31): constexpr uintptr;
+extern operator '>>>' macro ConstexprUintPtrShr(
+ constexpr uintptr, constexpr int31): constexpr uintptr;
+
extern macro SmiMax(Smi, Smi): Smi;
extern macro SmiMin(Smi, Smi): Smi;
+extern macro SmiMul(Smi, Smi): Number;
extern operator '!' macro ConstexprBoolNot(constexpr bool): constexpr bool;
extern operator '!' macro Word32BinaryNot(bool): bool;
extern operator '!' macro IsFalse(Boolean): bool;
-extern operator '.map' macro LoadMap(HeapObject): Map;
-extern operator '.map=' macro StoreMap(HeapObject, Map);
extern operator '.instanceType' macro LoadInstanceType(HeapObject):
InstanceType;
@@ -380,13 +602,18 @@ extern operator '[]' macro GetArgumentValue(
extern macro TaggedIsSmi(Object): bool;
extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
+extern macro IsValidPositiveSmi(intptr): bool;
extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
+extern macro HeapObjectToJSTypedArray(HeapObject): JSTypedArray
+ labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject
labels CastError;
extern macro TaggedToSmi(Object): Smi
labels CastError;
+extern macro TaggedToPositiveSmi(Object): PositiveSmi
+ labels CastError;
extern macro HeapObjectToJSArray(HeapObject): JSArray
labels CastError;
extern macro HeapObjectToCallable(HeapObject): Callable
@@ -395,52 +622,213 @@ extern macro HeapObjectToFixedArray(HeapObject): FixedArray
labels CastError;
extern macro HeapObjectToFixedDoubleArray(HeapObject): FixedDoubleArray
labels CastError;
+extern macro HeapObjectToString(HeapObject): String
+ labels CastError;
+extern macro HeapObjectToConstructor(HeapObject): Constructor
+ labels CastError;
+extern macro HeapObjectToHeapNumber(HeapObject): HeapNumber
+ labels CastError;
+extern macro HeapObjectToSloppyArgumentsElements(HeapObject):
+ SloppyArgumentsElements
+ labels CastError;
extern macro TaggedToNumber(Object): Number
labels CastError;
macro CastHeapObject<A: type>(o: HeapObject): A
labels CastError;
+
CastHeapObject<HeapObject>(o: HeapObject): HeapObject
labels CastError {
return o;
}
+
CastHeapObject<FixedArray>(o: HeapObject): FixedArray
labels CastError {
return HeapObjectToFixedArray(o) otherwise CastError;
}
+
CastHeapObject<FixedDoubleArray>(o: HeapObject): FixedDoubleArray
labels CastError {
return HeapObjectToFixedDoubleArray(o) otherwise CastError;
}
+
+CastHeapObject<SloppyArgumentsElements>(o: HeapObject): SloppyArgumentsElements
+ labels CastError {
+ return HeapObjectToSloppyArgumentsElements(o) otherwise CastError;
+}
+
CastHeapObject<JSDataView>(o: HeapObject): JSDataView
labels CastError {
return HeapObjectToJSDataView(o) otherwise CastError;
}
+
+CastHeapObject<JSTypedArray>(o: HeapObject): JSTypedArray
+ labels CastError {
+ if (IsJSTypedArray(o)) return %RawObjectCast<JSTypedArray>(o);
+ goto CastError;
+}
+
CastHeapObject<Callable>(o: HeapObject): Callable
labels CastError {
return HeapObjectToCallable(o) otherwise CastError;
}
+
CastHeapObject<JSArray>(o: HeapObject): JSArray
labels CastError {
return HeapObjectToJSArray(o) otherwise CastError;
}
-macro Cast<A: type>(o: HeapObject): A
+CastHeapObject<Context>(o: HeapObject): Context
+ labels CastError {
+ if (IsContext(o)) return %RawObjectCast<Context>(o);
+ goto CastError;
+}
+
+CastHeapObject<JSObject>(o: HeapObject): JSObject
+ labels CastError {
+ if (IsJSObject(o)) return %RawObjectCast<JSObject>(o);
+ goto CastError;
+}
+
+CastHeapObject<NumberDictionary>(o: HeapObject): NumberDictionary
+ labels CastError {
+ if (IsNumberDictionary(o)) return %RawObjectCast<NumberDictionary>(o);
+ goto CastError;
+}
+
+CastHeapObject<FixedTypedArrayBase>(o: HeapObject): FixedTypedArrayBase
+ labels CastError {
+ if (IsFixedTypedArray(o)) return %RawObjectCast<FixedTypedArrayBase>(o);
+ goto CastError;
+}
+
+CastHeapObject<String>(o: HeapObject): String
+ labels CastError {
+ return HeapObjectToString(o) otherwise CastError;
+}
+
+CastHeapObject<Constructor>(o: HeapObject): Constructor
+ labels CastError {
+ return HeapObjectToConstructor(o) otherwise CastError;
+}
+
+CastHeapObject<HeapNumber>(o: HeapObject): HeapNumber
+ labels CastError {
+ if (IsHeapNumber(o)) return %RawObjectCast<HeapNumber>(o);
+ goto CastError;
+}
+
+CastHeapObject<Map>(implicit context: Context)(o: HeapObject): Map
+ labels CastError {
+ if (IsMap(o)) return %RawObjectCast<Map>(o);
+ goto CastError;
+}
+
+CastHeapObject<JSArgumentsObjectWithLength>(implicit context: Context)(
+ o: HeapObject): JSArgumentsObjectWithLength
+ labels CastError {
+ const map: Map = o.map;
+ try {
+ if (IsFastAliasedArgumentsMap(map)) goto True;
+ if (IsSloppyArgumentsMap(map)) goto True;
+ if (IsStrictArgumentsMap(map)) goto True;
+ if (IsSlowAliasedArgumentsMap(map)) goto True;
+ goto CastError;
+ }
+ label True {
+ return %RawObjectCast<JSArgumentsObjectWithLength>(o);
+ }
+}
+
+CastHeapObject<FastJSArray>(implicit context: Context)(o: HeapObject):
+ FastJSArray
+ labels CastError {
+ const map: Map = o.map;
+ if (!IsJSArrayMap(map)) goto CastError;
+
+ // Bailout if receiver has slow elements.
+ const elementsKind: ElementsKind = LoadMapElementsKind(map);
+ if (!IsFastElementsKind(elementsKind)) goto CastError;
+
+ // Verify that our prototype is the initial array prototype.
+ if (!IsPrototypeInitialArrayPrototype(map)) goto CastError;
+
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ return %RawObjectCast<FastJSArray>(o);
+}
+
+struct FastJSArrayWitness {
+ array: HeapObject;
+ map: Map;
+}
+
+macro MakeWitness(array: FastJSArray): FastJSArrayWitness {
+ return FastJSArrayWitness{array, array.map};
+}
+
+macro Testify(witness: FastJSArrayWitness): FastJSArray labels CastError {
+ if (witness.array.map != witness.map) goto CastError;
+ // We don't need to check elements kind or whether the prototype
+ // has changed away from the default JSArray prototype, because
+ // if the map remains the same then those properties hold.
+ //
+ // However, we have to make sure there are no elements in the
+ // prototype chain.
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ return %RawObjectCast<FastJSArray>(witness.array);
+}
+
+CastHeapObject<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
+ FastJSArrayForCopy
+ labels CastError {
+ if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
+ const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawObjectCast<FastJSArrayForCopy>(o);
+}
+
+CastHeapObject<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
+ o: HeapObject): FastJSArrayWithNoCustomIteration
+ labels CastError {
+ if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
+ const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawObjectCast<FastJSArrayWithNoCustomIteration>(o);
+}
+
+CastHeapObject<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
+ labels CastError {
+ if (IsJSReceiver(o)) return %RawObjectCast<JSReceiver>(o);
+ goto CastError;
+}
+
+CastHeapObject<JSFunction>(implicit context: Context)(o: HeapObject): JSFunction
+ labels CastError {
+ if (IsJSFunction(o)) return %RawObjectCast<JSFunction>(o);
+ goto CastError;
+}
+
+macro Cast<A: type>(implicit context: Context)(o: HeapObject): A
labels CastError {
return CastHeapObject<A>(o) otherwise CastError;
}
// CastHeapObject allows this default-implementation to be non-recursive.
// Otherwise the generated CSA code might run into infinite recursion.
-macro Cast<A: type>(o: Object): A
+macro Cast<A: type>(implicit context: Context)(o: Object): A
labels CastError {
return CastHeapObject<A>(TaggedToHeapObject(o) otherwise CastError)
otherwise CastError;
}
+
Cast<Smi>(o: Object): Smi
labels CastError {
return TaggedToSmi(o) otherwise CastError;
}
+
+Cast<PositiveSmi>(o: Object): PositiveSmi
+ labels CastError {
+ return TaggedToPositiveSmi(o) otherwise CastError;
+}
+
Cast<Number>(o: Object): Number
labels CastError {
return TaggedToNumber(o) otherwise CastError;
@@ -471,321 +859,258 @@ extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
+extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
+extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
+extern macro NumberConstant(constexpr uint32): Number;
extern macro IntPtrConstant(constexpr int31): intptr;
extern macro IntPtrConstant(constexpr int32): intptr;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
extern macro Float64Constant(constexpr int31): float64;
extern macro SmiConstant(constexpr int31): Smi;
+extern macro SmiConstant(constexpr Smi): Smi;
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot;
+extern macro IntPtrConstant(constexpr intptr): intptr;
+
+extern macro BitcastWordToTaggedSigned(intptr): Smi;
+extern macro BitcastWordToTaggedSigned(uintptr): Smi;
+extern macro BitcastWordToTagged(intptr): Object;
+extern macro BitcastWordToTagged(uintptr): Object;
+extern macro BitcastTaggedToWord(Tagged): intptr;
-macro FromConstexpr<A: type>(o: constexpr int31): A;
-FromConstexpr<intptr>(i: constexpr int31): intptr {
- return IntPtrConstant(i);
+intrinsic %FromConstexpr<To: type, From: type>(b: From): To;
+macro FromConstexpr<To: type, From: type>(o: From): To;
+FromConstexpr<int31, constexpr int31>(i: constexpr int31): int31 {
+ return %FromConstexpr<int31>(i);
}
-FromConstexpr<int31>(i: constexpr int31): int31 {
- return Int32Constant(i);
+FromConstexpr<int32, constexpr int31>(i: constexpr int31): int32 {
+ return %FromConstexpr<int32>(i);
}
-FromConstexpr<int32>(i: constexpr int31): int32 {
- return Int32Constant(i);
+FromConstexpr<int32, constexpr int32>(i: constexpr int32): int32 {
+ return %FromConstexpr<int32>(i);
}
-FromConstexpr<uint32>(i: constexpr int31): uint32 {
- return Unsigned(Int32Constant(i));
+FromConstexpr<intptr, constexpr int31>(i: constexpr int31): intptr {
+ return %FromConstexpr<intptr>(i);
}
-FromConstexpr<uintptr>(i: constexpr int31): uintptr {
- return ChangeUint32ToWord(i);
+FromConstexpr<intptr, constexpr int32>(i: constexpr int32): intptr {
+ return %FromConstexpr<intptr>(i);
}
-FromConstexpr<Smi>(i: constexpr int31): Smi {
- return SmiConstant(i);
+FromConstexpr<intptr, constexpr intptr>(i: constexpr intptr): intptr {
+ return %FromConstexpr<intptr>(i);
}
-FromConstexpr<Number>(i: constexpr int31): Number {
- return SmiConstant(i);
+FromConstexpr<uintptr, constexpr uintptr>(i: constexpr uintptr): uintptr {
+ return %FromConstexpr<uintptr>(i);
}
-FromConstexpr<float64>(i: constexpr int31): float64 {
- return Float64Constant(i);
+FromConstexpr<Smi, constexpr int31>(i: constexpr int31): Smi {
+ return %FromConstexpr<Smi>(i);
+}
+FromConstexpr<String, constexpr string>(s: constexpr string): String {
+ return %FromConstexpr<String>(s);
}
-macro FromConstexpr<A: type>(o: constexpr int32): A;
-FromConstexpr<intptr>(i: constexpr int32): intptr {
- return IntPtrConstant(i);
+FromConstexpr<Number, constexpr uint32>(i: constexpr uint32): Number {
+ return %FromConstexpr<Number>(i);
}
-FromConstexpr<int32>(i: constexpr int32): int32 {
- return Int32Constant(i);
+FromConstexpr<Number, constexpr int32>(i: constexpr int32): Number {
+ return %FromConstexpr<Number>(i);
}
-FromConstexpr<Number>(i: constexpr int32): Number {
- return NumberConstant(i);
+FromConstexpr<Number, constexpr float64>(f: constexpr float64): Number {
+ return %FromConstexpr<Number>(f);
}
-macro FromConstexpr<A: type>(o: constexpr float64): A;
-FromConstexpr<Number>(f: constexpr float64): Number {
- return NumberConstant(f);
+FromConstexpr<Number, constexpr int31>(i: constexpr int31): Number {
+ return %FromConstexpr<Number>(i);
}
-macro FromConstexpr<A: type>(b: constexpr bool): A;
-FromConstexpr<bool>(b: constexpr bool): bool {
+FromConstexpr<Number, constexpr Smi>(s: constexpr Smi): Number {
+ return SmiConstant(s);
+}
+FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
+ return SmiConstant(s);
+}
+FromConstexpr<uint32, constexpr int31>(i: constexpr int31): uint32 {
+ return Unsigned(Int32Constant(i));
+}
+FromConstexpr<uintptr, constexpr int31>(i: constexpr int31): uintptr {
+ return ChangeUint32ToWord(i);
+}
+FromConstexpr<float64, constexpr int31>(i: constexpr int31): float64 {
+ return Float64Constant(i);
+}
+FromConstexpr<bool, constexpr bool>(b: constexpr bool): bool {
return BoolConstant(b);
}
-macro FromConstexpr<A: type>(l: constexpr LanguageMode): A;
-FromConstexpr<LanguageMode>(b: constexpr LanguageMode): LanguageMode {
- return LanguageModeConstant(b);
+FromConstexpr<LanguageMode, constexpr LanguageMode>(m: constexpr LanguageMode):
+ LanguageMode {
+ return %RawObjectCast<LanguageMode>(%FromConstexpr<Smi>(m));
}
-macro FromConstexpr<A: type>(e: constexpr ElementsKind): A;
-FromConstexpr<ElementsKind>(e: constexpr ElementsKind): ElementsKind {
+FromConstexpr<ElementsKind, constexpr ElementsKind>(e: constexpr ElementsKind):
+ ElementsKind {
return Int32Constant(e);
}
-macro FromConstexpr<A: type>(s: constexpr string): A;
-FromConstexpr<String>(s: constexpr string): String {
- return StringConstant(s);
-}
-FromConstexpr<Object>(s: constexpr string): Object {
+FromConstexpr<Object, constexpr string>(s: constexpr string): Object {
return StringConstant(s);
}
-macro FromConstexpr<A: type>(e: constexpr NativeContextSlot): A;
-FromConstexpr<NativeContextSlot>(c: constexpr NativeContextSlot):
- NativeContextSlot {
+FromConstexpr<NativeContextSlot, constexpr NativeContextSlot>(
+ c: constexpr NativeContextSlot): NativeContextSlot {
return IntPtrConstant(c);
}
-macro Convert<A: type>(i: constexpr int31): A {
+macro Convert<To: type, From: type>(i: From): To {
return i;
}
-extern macro ConvertElementsKindToInt(ElementsKind): int32;
-macro Convert<A: type>(elementsKind: ElementsKind): A;
-Convert<int32>(elementsKind: ElementsKind): int32 {
+extern macro ConvertElementsKindToInt(ElementsKind): int32;
+Convert<int32, ElementsKind>(elementsKind: ElementsKind): int32 {
return ConvertElementsKindToInt(elementsKind);
}
-
-macro Convert<A: type>(i: int32): A;
-Convert<Number>(i: int32): Number {
+Convert<Number, int32>(i: int32): Number {
return ChangeInt32ToTagged(i);
}
-Convert<intptr>(i: int32): intptr {
+Convert<intptr, int32>(i: int32): intptr {
return ChangeInt32ToIntPtr(i);
}
-Convert<Smi>(i: int32): Smi {
+Convert<Smi, int32>(i: int32): Smi {
return SmiFromInt32(i);
}
-macro Convert<A: type>(ui: uint32): A;
-Convert<Number>(ui: uint32): Number {
+Convert<Number, uint32>(ui: uint32): Number {
return ChangeUint32ToTagged(ui);
}
-Convert<Smi>(ui: uint32): Smi {
+Convert<Smi, uint32>(ui: uint32): Smi {
return SmiFromInt32(Signed(ui));
}
-Convert<uintptr>(ui: uint32): uintptr {
+Convert<uintptr, uint32>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
-macro Convert<A: type>(i: intptr): A;
-Convert<int32>(i: intptr): int32 {
+Convert<int32, intptr>(i: intptr): int32 {
return TruncateIntPtrToInt32(i);
}
-Convert<Smi>(i: intptr): Smi {
+Convert<Smi, intptr>(i: intptr): Smi {
return SmiTag(i);
}
-macro Convert<A: type>(ui: uintptr): A;
-Convert<uint32>(ui: uintptr): uint32 {
+Convert<uint32, uintptr>(ui: uintptr): uint32 {
return Unsigned(TruncateIntPtrToInt32(Signed(ui)));
}
-macro Convert<A: type>(s: Smi): A;
-Convert<intptr>(s: Smi): intptr {
+Convert<intptr, Smi>(s: Smi): intptr {
return SmiUntag(s);
}
-Convert<int32>(s: Smi): int32 {
+Convert<int32, Smi>(s: Smi): int32 {
return SmiToInt32(s);
}
-macro Convert<A: type>(h: HeapNumber): A;
-Convert<float64>(h: HeapNumber): float64 {
+Convert<float64, HeapNumber>(h: HeapNumber): float64 {
return LoadHeapNumberValue(h);
}
-macro Convert<A: type>(n: Number): A;
-Convert<float64>(n: Number): float64 {
+Convert<float64, Number>(n: Number): float64 {
return ChangeNumberToFloat64(n);
}
-macro Convert<A: type>(f: float32): A;
-Convert<float64>(f: float32): float64 {
+Convert<uintptr, Number>(n: Number): uintptr {
+ return ChangeNonnegativeNumberToUintPtr(n);
+}
+Convert<float64, float32>(f: float32): float64 {
return ChangeFloat32ToFloat64(f);
}
-macro Convert<A: type>(d: float64): A;
-Convert<Number>(d: float64): Number {
+Convert<Number, float64>(d: float64): Number {
return AllocateHeapNumberWithValue(d);
}
-Convert<float64>(ui: uintptr): float64 {
+Convert<float64, uintptr>(ui: uintptr): float64 {
return ChangeUintPtrToFloat64(ui);
}
-Convert<Number>(ui: uintptr): Number {
+Convert<Number, uintptr>(ui: uintptr): Number {
return ChangeUintPtrToTagged(ui);
}
-Convert<uintptr>(d: float64): uintptr {
+Convert<uintptr, float64>(d: float64): uintptr {
return ChangeFloat64ToUintPtr(d);
}
-macro Convert<A: type>(r: RawPtr): A;
-Convert<uintptr>(r: RawPtr): uintptr {
+Convert<uintptr, intptr>(i: intptr): uintptr {
+ return Unsigned(i);
+}
+Convert<uintptr, RawPtr>(r: RawPtr): uintptr {
return Unsigned(r);
}
-Convert<intptr>(r: RawPtr): intptr {
+Convert<intptr, RawPtr>(r: RawPtr): intptr {
return Signed(r);
}
-
-extern macro UnsafeCastNumberToHeapNumber(Number): HeapNumber;
-extern macro UnsafeCastObjectToFixedArrayBase(Object): FixedArrayBase;
-extern macro UnsafeCastObjectToFixedArray(Object): FixedArray;
-extern macro UnsafeCastObjectToContext(Object): Context;
-extern macro UnsafeCastObjectToFixedDoubleArray(Object): FixedDoubleArray;
-extern macro UnsafeCastObjectToHeapNumber(Object): HeapNumber;
-extern macro UnsafeCastObjectToCallable(Object): Callable;
-extern macro UnsafeCastObjectToSmi(Object): Smi;
-extern macro UnsafeCastObjectToNumber(Object): Number;
-extern macro UnsafeCastObjectToHeapObject(Object): HeapObject;
-extern macro UnsafeCastObjectToJSArray(Object): JSArray;
-extern macro UnsafeCastObjectToFixedTypedArrayBase(Object): FixedTypedArrayBase;
-extern macro UnsafeCastObjectToNumberDictionary(Object): NumberDictionary;
-extern macro UnsafeCastObjectToJSReceiver(Object): JSReceiver;
-extern macro UnsafeCastObjectToJSObject(Object): JSObject;
-extern macro UnsafeCastObjectToMap(Object): Map;
-
-macro UnsafeCast<A: type>(n: Number): A;
-UnsafeCast<HeapNumber>(n: Number): HeapNumber {
- return UnsafeCastNumberToHeapNumber(n);
-}
-macro UnsafeCast<A: type>(o: Object): A;
-UnsafeCast<Object>(o: Object): Object {
- return o;
+Convert<bint, int32>(v: int32): bint {
+ return IntPtrToBInt(Convert<intptr>(v));
}
-UnsafeCast<FixedArray>(o: Object): FixedArray {
- return UnsafeCastObjectToFixedArray(o);
+extern macro IntPtrToBInt(intptr): bint;
+Convert<bint, intptr>(v: intptr): bint {
+ return IntPtrToBInt(v);
}
-UnsafeCast<FixedDoubleArray>(o: Object): FixedDoubleArray {
- return UnsafeCastObjectToFixedDoubleArray(o);
-}
-UnsafeCast<HeapNumber>(o: Object): HeapNumber {
- return UnsafeCastObjectToHeapNumber(o);
-}
-UnsafeCast<Callable>(o: Object): Callable {
- return UnsafeCastObjectToCallable(o);
-}
-UnsafeCast<Smi>(o: Object): Smi {
- return UnsafeCastObjectToSmi(o);
-}
-UnsafeCast<Number>(o: Object): Number {
- return UnsafeCastObjectToNumber(o);
-}
-UnsafeCast<HeapObject>(o: Object): HeapObject {
- return UnsafeCastObjectToHeapObject(o);
-}
-UnsafeCast<JSArray>(o: Object): JSArray {
- return UnsafeCastObjectToJSArray(o);
-}
-UnsafeCast<FixedTypedArrayBase>(o: Object): FixedTypedArrayBase {
- return UnsafeCastObjectToFixedTypedArrayBase(o);
-}
-UnsafeCast<NumberDictionary>(o: Object): NumberDictionary {
- return UnsafeCastObjectToNumberDictionary(o);
-}
-UnsafeCast<JSReceiver>(o: Object): JSReceiver {
- return UnsafeCastObjectToJSReceiver(o);
-}
-UnsafeCast<JSObject>(o: Object): JSObject {
- return UnsafeCastObjectToJSObject(o);
-}
-UnsafeCast<Map>(o: Object): Map {
- return UnsafeCastObjectToMap(o);
-}
-UnsafeCast<FixedArrayBase>(o: Object): FixedArrayBase {
- return UnsafeCastObjectToFixedArrayBase(o);
-}
-UnsafeCast<Context>(o: Object): Context {
- return UnsafeCastObjectToContext(o);
+extern macro SmiToBInt(Smi): bint;
+Convert<bint, Smi>(v: Smi): bint {
+ return SmiToBInt(v);
}
-// RawCasts should *never* be used anywhere in Torque code except for
-// in Torque-based UnsafeCast operators preceeded by an appropriate
-// type check().
-extern macro RawCastObjectToJSArgumentsObjectWithLength(Object):
- JSArgumentsObjectWithLength;
-
-macro BranchIfJSArgumentsObjectWithLength(context: Context, o: Object): never
+macro BranchIf<A: type, B: type>(implicit context: Context)(o: B): never
labels True, False {
- const heapObject: HeapObject = Cast<HeapObject>(o) otherwise False;
- const map: Map = heapObject.map;
- const nativeContext: NativeContext = LoadNativeContext(context);
- if (map == nativeContext[FAST_ALIASED_ARGUMENTS_MAP_INDEX]) goto True;
- if (map == nativeContext[SLOW_ALIASED_ARGUMENTS_MAP_INDEX]) goto True;
- if (map == nativeContext[STRICT_ARGUMENTS_MAP_INDEX]) goto True;
- if (map != nativeContext[SLOPPY_ARGUMENTS_MAP_INDEX]) goto False;
+ Cast<A>(o) otherwise False;
goto True;
}
-macro UnsafeCast<A: type>(context: Context, o: Object): A;
-UnsafeCast<JSArgumentsObjectWithLength>(
- context: Context, o: Object): JSArgumentsObjectWithLength {
- assert(BranchIfJSArgumentsObjectWithLength(context, o));
- return RawCastObjectToJSArgumentsObjectWithLength(o);
+macro BranchIfNot<A: type, B: type>(implicit context: Context)(o: B): never
+ labels True, False {
+ Cast<A>(o) otherwise True;
+ goto False;
}
-macro Cast<A: type>(context: Context, o: Object): A
- labels CastError;
-Cast<JSArgumentsObjectWithLength>(context: Context, o: Object):
- JSArgumentsObjectWithLength
- labels CastError {
- if (BranchIfJSArgumentsObjectWithLength(context, o)) {
- return UnsafeCast<JSArgumentsObjectWithLength>(context, o);
- } else {
- goto CastError;
- }
+macro Is<A: type, B: type>(implicit context: Context)(o: B): bool {
+ return (BranchIf<A, B>(o)) ? true : false;
}
-const kCOWMap: Map = UnsafeCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
-const kEmptyFixedArray: FixedArrayBase =
- UnsafeCast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
-
-extern macro BranchIfFastJSArray(Object, Context): never
- labels Taken, NotTaken;
-extern macro BranchIfNotFastJSArray(Object, Context): never
- labels Taken, NotTaken;
+macro UnsafeCast<A: type>(implicit context: Context)(o: Object): A {
+ assert(Is<A>(o));
+ return %RawObjectCast<A>(o);
+}
-macro EnsureFastJSArray(context: Context, object: Object) labels Bailout {
- if (BranchIfNotFastJSArray(object, context)) goto Bailout;
+UnsafeCast<Object>(o: Object): Object {
+ return o;
}
-extern macro IsPrototypeInitialArrayPrototype(Context, Map): bool;
+const kCOWMap: Map = %RawObjectCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
+const kEmptyFixedArray: FixedArrayBase =
+ %RawObjectCast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
+
+extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
+ bool;
extern macro IsNoElementsProtectorCellInvalid(): bool;
+extern macro IsArrayIteratorProtectorCellInvalid(): bool;
extern macro IsArraySpeciesProtectorCellInvalid(): bool;
extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool;
extern macro IsPromiseSpeciesProtectorCellInvalid(): bool;
-extern operator '.buffer' macro LoadTypedArrayBuffer(JSTypedArray):
- JSArrayBuffer;
+extern operator '.buffer' macro
+TypedArrayBuiltinsAssembler::LoadTypedArrayBuffer(JSTypedArray): JSArrayBuffer;
-extern operator '.data_ptr' macro LoadDataPtr(JSTypedArray): RawPtr;
+extern operator '.data_ptr' macro TypedArrayBuiltinsAssembler::LoadDataPtr(
+ JSTypedArray): RawPtr;
extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
ElementsKind;
-extern operator '.elements' macro LoadElements(JSObject): FixedArrayBase;
-extern operator '.elements=' macro StoreElements(JSObject, FixedArrayBase);
-
extern operator '.length' macro LoadJSTypedArrayLength(JSTypedArray): Smi;
-extern operator '.length' macro LoadJSArrayLength(JSArray): Number;
-extern operator '.length' macro LoadJSArgumentsObjectWithLength(
- JSArgumentsObjectWithLength): Object;
-extern operator '.length_fast' macro LoadFastJSArrayLength(JSArray): Smi;
-extern operator '.length=' macro StoreJSArrayLength(JSArray, Smi);
+extern operator '.length' macro LoadFastJSArrayLength(FastJSArray): Smi;
extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
+extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
+ FixedArrayBase): intptr;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
extern operator '[]' macro LoadFixedArrayElement(
FixedArray, constexpr int31): Object;
extern operator '[]=' macro StoreFixedArrayElement(
- FixedArray, intptr, Object): void;
+ FixedArray, intptr, Smi): void;
+extern operator '[]=' macro StoreFixedArrayElement(
+ FixedArray, intptr, HeapObject): void;
+extern operator '[]=' macro StoreFixedArrayElement(
+ FixedArray, constexpr int31, Smi): void;
extern operator '[]=' macro StoreFixedArrayElement(
- FixedArray, constexpr int31, Object): void;
+ FixedArray, constexpr int31, HeapObject): void;
extern operator '[]=' macro StoreFixedArrayElementSmi(
FixedArray, Smi, Object): void;
operator '[]=' macro StoreFixedDoubleArrayNumber(
@@ -803,15 +1128,19 @@ extern macro Float64SilenceNaN(float64): float64;
extern macro StoreFixedDoubleArrayElement(
FixedDoubleArray, Object, float64, constexpr ParameterMode);
+extern macro StoreFixedArrayElement(
+ FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
+
macro StoreFixedDoubleArrayElementWithSmiIndex(
array: FixedDoubleArray, index: Smi, value: float64) {
StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
}
+extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi;
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object
labels NotData, IfHole;
extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
-labels NotData, IfHole, ReadOnly;
+ labels NotData, IfHole, ReadOnly;
extern macro IsFastElementsKind(ElementsKind): bool;
extern macro IsDoubleElementsKind(ElementsKind): bool;
@@ -843,9 +1172,11 @@ macro AllowNonNumberElements(kind: ElementsKind): ElementsKind {
extern macro AllocateZeroedFixedArray(intptr): FixedArray;
extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
-
extern macro CalculateNewElementsCapacity(Smi): Smi;
+extern macro CalculateNewElementsCapacity(intptr): intptr;
+extern macro AllocateFixedArrayWithHoles(
+ intptr, constexpr AllocationFlags): FixedArray;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
intptr, intptr, intptr): void;
@@ -855,30 +1186,53 @@ extern macro CopyFixedArrayElements(
extern macro AllocateJSArray(constexpr ElementsKind, Map, intptr, Smi): JSArray;
extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
-extern macro IsElementsKindGreaterThan(
- ElementsKind, constexpr ElementsKind): bool;
+
+extern macro AllocateJSObjectFromMap(Map): JSObject;
extern operator '[]=' macro StoreFixedDoubleArrayElementSmi(
FixedDoubleArray, Smi, float64): void;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
+extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, intptr): float64
+ labels IfHole;
extern macro StoreFixedDoubleArrayHoleSmi(FixedDoubleArray, Smi): void;
-extern macro Call(Context, Callable, Object): Object;
-extern macro Call(Context, Callable, Object, Object): Object;
-extern macro Call(Context, Callable, Object, Object, Object): Object;
-extern macro Call(Context, Callable, Object, Object, Object, Object): Object;
-extern macro Call(
+macro GetObjectFunction(implicit context: Context)(): JSFunction {
+ return UnsafeCast<JSFunction>(
+ LoadNativeContext(context)[OBJECT_FUNCTION_INDEX]);
+}
+macro GetArrayBufferFunction(implicit context: Context)(): JSFunction {
+ return UnsafeCast<JSFunction>(
+ LoadNativeContext(context)[ARRAY_BUFFER_FUN_INDEX]);
+}
+
+macro GetFastPackedSmiElementsJSArrayMap(implicit context: Context)(): Map {
+ return UnsafeCast<Map>(
+ LoadNativeContext(context)[JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX]);
+}
+
+extern transitioning macro Call(Context, Callable, Object): Object;
+extern transitioning macro Call(Context, Callable, Object, Object): Object;
+extern transitioning macro Call(
+ Context, Callable, Object, Object, Object): Object;
+extern transitioning macro Call(
+ Context, Callable, Object, Object, Object, Object): Object;
+extern transitioning macro Call(
Context, Callable, Object, Object, Object, Object, Object): Object;
-extern macro Call(
+extern transitioning macro Call(
Context, Callable, Object, Object, Object, Object, Object, Object): Object;
+extern builtin CloneFastJSArray(Context, FastJSArrayForCopy): JSArray;
extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi): FixedArrayBase;
extern macro ExtractFixedArray(
FixedArrayBase, Smi, Smi, Smi,
constexpr ExtractFixedArrayFlags): FixedArrayBase;
+extern macro ExtractFixedArray(
+ FixedArray, intptr, intptr, intptr,
+ constexpr ExtractFixedArrayFlags): FixedArray;
+
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
extern macro MoveElements(
@@ -914,7 +1268,8 @@ macro TorqueCopyElements(
macro LoadElementNoHole<T: type>(a: JSArray, index: Smi): Object
labels IfHole;
-LoadElementNoHole<FixedArray>(a: JSArray, index: Smi): Object
+LoadElementNoHole<FixedArray>(implicit context: Context)(
+ a: JSArray, index: Smi): Object
labels IfHole {
try {
let elements: FixedArray =
@@ -930,7 +1285,8 @@ LoadElementNoHole<FixedArray>(a: JSArray, index: Smi): Object
}
}
-LoadElementNoHole<FixedDoubleArray>(a: JSArray, index: Smi): Object
+LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
+ a: JSArray, index: Smi): Object
labels IfHole {
try {
let elements: FixedDoubleArray =
@@ -943,23 +1299,30 @@ LoadElementNoHole<FixedDoubleArray>(a: JSArray, index: Smi): Object
}
}
-extern macro TransitionElementsKind(JSObject, Map, ElementsKind, ElementsKind):
- void
- labels Bailout;
+extern macro TransitionElementsKind(
+ JSObject, Map, ElementsKind, ElementsKind): void labels Bailout;
extern macro IsCallable(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
+extern macro IsMap(HeapObject): bool;
+extern macro IsJSFunction(HeapObject): bool;
+extern macro IsJSObject(HeapObject): bool;
+extern macro IsJSTypedArray(HeapObject): bool;
+extern macro IsNumberDictionary(HeapObject): bool;
+extern macro IsFixedTypedArray(HeapObject): bool;
+extern macro IsContext(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
extern macro IsNumber(Object): bool;
+extern macro IsJSArrayMap(Map): bool;
extern macro IsExtensibleMap(Map): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
-extern macro IsFastJSArray(Object, Context): bool;
+extern macro IsFastJSArrayWithNoCustomIteration(implicit context: Context)(
+ Object): bool;
extern macro Typeof(Object): Object;
-extern macro LoadTargetFromFrame(): JSFunction;
// Return true iff number is NaN.
macro NumberIsNaN(number: Number): bool {
@@ -974,6 +1337,7 @@ macro NumberIsNaN(number: Number): bool {
}
}
+extern macro GotoIfForceSlowPath() labels Taken;
extern macro BranchIfToBooleanIsTrue(Object): never
labels Taken, NotTaken;
@@ -999,20 +1363,79 @@ macro ToIndex(input: Object, context: Context): Number
return value;
}
-macro GetLengthProperty(context: Context, o: Object): Number {
+transitioning macro GetLengthProperty(implicit context: Context)(o: Object):
+ Number {
try {
- return (Cast<JSArray>(o) otherwise CheckArgs).length;
- }
- label CheckArgs {
- const a: JSArgumentsObjectWithLength =
- Cast<JSArgumentsObjectWithLength>(context, o) otherwise Slow;
- const length: Object = a.length;
- return Cast<Smi>(length) otherwise goto ToLength(length);
- }
- label Slow deferred {
- goto ToLength(GetProperty(context, o, kLengthString));
+ typeswitch (o) {
+ case (a: JSArray): {
+ return a.length;
+ }
+ case (a: JSArgumentsObjectWithLength): {
+ goto ToLength(a.length);
+ }
+ case (Object): deferred {
+ goto ToLength(GetProperty(o, kLengthString));
+ }
+ }
}
label ToLength(length: Object) deferred {
return ToLength_Inline(context, length);
}
}
+
+extern macro NumberToString(Number): String;
+extern macro HasOnlyOneByteChars(InstanceType): bool;
+extern macro AllocateSeqOneByteString(implicit context: Context)(uint32):
+ String;
+extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32):
+ String;
+extern macro TryIntPtrAdd(intptr, intptr): intptr
+ labels IfOverflow;
+
+extern builtin ObjectToString(Context, Object): Object;
+extern builtin StringRepeat(Context, String, Number): String;
+
+struct KeyValuePair {
+ key: Object;
+ value: Object;
+}
+
+// Macro definitions for compatibility that expose functionality to the CSA
+// using "legacy" APIs. In Torque code, these should not be used.
+macro IsFastJSArray(o: Object, context: Context): bool {
+ try {
+ // Long-term, it's likely not a good idea to have this slow-path test here,
+ // since it fundamentally breaks the type system.
+ GotoIfForceSlowPath() otherwise ForceSlow;
+ }
+ label ForceSlow {
+ return false;
+ }
+
+ return Is<FastJSArray>(o);
+}
+
+macro BranchIfFastJSArray(o: Object, context: Context): never
+ labels True, False {
+ // Long-term, it's likely not a good idea to have this slow-path test here,
+ // since it fundamentally breaks the type system.
+ GotoIfForceSlowPath() otherwise False;
+ BranchIf<FastJSArray>(o) otherwise True, False;
+}
+
+macro BranchIfNotFastJSArray(o: Object, context: Context): never
+ labels True, False {
+ BranchIfNot<FastJSArray>(o) otherwise True, False;
+}
+
+macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never
+ labels True, False {
+ // Long-term, it's likely not a good idea to have this slow-path test here,
+ // since it fundamentally breaks the type system.
+ GotoIfForceSlowPath() otherwise False;
+ BranchIf<FastJSArrayForCopy>(o) otherwise True, False;
+}
+
+macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool {
+ return Is<FastJSArrayWithNoCustomIteration>(o);
+}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 8bc5c0b5ac..e1c76c0fd9 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -21,27 +21,27 @@ namespace {
// Returns the holder JSObject if the function can legally be called with this
// receiver. Returns nullptr if the call is illegal.
// TODO(dcarney): CallOptimization duplicates this logic, merge.
-JSReceiver* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
- JSReceiver* receiver) {
- Object* recv_type = info->signature();
+JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
+ JSReceiver receiver) {
+ Object recv_type = info->signature();
// No signature, return holder.
if (!recv_type->IsFunctionTemplateInfo()) return receiver;
// A Proxy cannot have been created from the signature template.
- if (!receiver->IsJSObject()) return nullptr;
+ if (!receiver->IsJSObject()) return JSReceiver();
- JSObject* js_obj_receiver = JSObject::cast(receiver);
- FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
+ JSObject js_obj_receiver = JSObject::cast(receiver);
+ FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type);
// Check the receiver. Fast path for receivers with no hidden prototypes.
if (signature->IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver->map()->has_hidden_prototype()) return nullptr;
+ if (!js_obj_receiver->map()->has_hidden_prototype()) return JSReceiver();
for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
!iter.IsAtEnd(); iter.Advance()) {
- JSObject* current = iter.GetCurrent<JSObject>();
+ JSObject current = iter.GetCurrent<JSObject>();
if (signature->IsTemplateFor(current)) return current;
}
- return nullptr;
+ return JSReceiver();
}
template <bool is_construct>
@@ -50,23 +50,24 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
Handle<Object> receiver, BuiltinArguments args) {
Handle<JSReceiver> js_receiver;
- JSReceiver* raw_holder;
+ JSReceiver raw_holder;
if (is_construct) {
DCHECK(args.receiver()->IsTheHole(isolate));
- if (fun_data->instance_template()->IsUndefined(isolate)) {
+ if (fun_data->GetInstanceTemplate()->IsUndefined(isolate)) {
v8::Local<ObjectTemplate> templ =
ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
ToApiHandle<v8::FunctionTemplate>(fun_data));
- fun_data->set_instance_template(*Utils::OpenHandle(*templ));
+ FunctionTemplateInfo::SetInstanceTemplate(isolate, fun_data,
+ Utils::OpenHandle(*templ));
}
Handle<ObjectTemplateInfo> instance_template(
- ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
+ ObjectTemplateInfo::cast(fun_data->GetInstanceTemplate()), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, js_receiver,
ApiNatives::InstantiateObject(isolate, instance_template,
Handle<JSReceiver>::cast(new_target)),
Object);
- args[0] = *js_receiver;
+ args.set_at(0, *js_receiver);
DCHECK_EQ(*js_receiver, *args.receiver());
raw_holder = *js_receiver;
@@ -89,22 +90,21 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
- if (raw_holder == nullptr) {
+ if (raw_holder.is_null()) {
// This function cannot be called with the given receiver. Abort!
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
}
}
- Object* raw_call_data = fun_data->call_code();
+ Object raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined(isolate)) {
DCHECK(raw_call_data->IsCallHandlerInfo());
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* data_obj = call_data->data();
-
+ CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
+ Object data_obj = call_data->data();
FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
- *new_target, &args[0] - 1,
+ *new_target, args.address_of_arg_at(1),
args.length() - 1);
Handle<Object> result = custom.Call(call_data);
@@ -146,13 +146,13 @@ namespace {
class RelocatableArguments : public BuiltinArguments, public Relocatable {
public:
- RelocatableArguments(Isolate* isolate, int length, Object** arguments)
+ RelocatableArguments(Isolate* isolate, int length, Address* arguments)
: BuiltinArguments(length, arguments), Relocatable(isolate) {}
inline void IterateInstance(RootVisitor* v) override {
if (length() == 0) return;
- v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(),
- highest_address() + 1);
+ v->VisitRootPointers(Root::kRelocatable, nullptr, first_slot(),
+ last_slot() + 1);
}
private:
@@ -167,6 +167,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
Handle<Object> receiver,
int argc, Handle<Object> args[],
Handle<HeapObject> new_target) {
+ RuntimeCallTimerScope timer(isolate,
+ RuntimeCallCounterId::kInvokeApiFunction);
DCHECK(function->IsFunctionTemplateInfo() ||
(function->IsJSFunction() &&
JSFunction::cast(*function)->shared()->IsApiFunction()));
@@ -208,25 +210,25 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
// Construct BuiltinArguments object:
// new target, function, arguments reversed, receiver.
const int kBufferSize = 32;
- Object* small_argv[kBufferSize];
- Object** argv;
+ Address small_argv[kBufferSize];
+ Address* argv;
const int frame_argc = argc + BuiltinArguments::kNumExtraArgsWithReceiver;
if (frame_argc <= kBufferSize) {
argv = small_argv;
} else {
- argv = new Object*[frame_argc];
+ argv = new Address[frame_argc];
}
int cursor = frame_argc - 1;
- argv[cursor--] = *receiver;
+ argv[cursor--] = receiver->ptr();
for (int i = 0; i < argc; ++i) {
- argv[cursor--] = *args[i];
+ argv[cursor--] = args[i]->ptr();
}
DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
argv[BuiltinArguments::kPaddingOffset] =
- ReadOnlyRoots(isolate).the_hole_value();
- argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
- argv[BuiltinArguments::kTargetOffset] = *function;
- argv[BuiltinArguments::kNewTargetOffset] = *new_target;
+ ReadOnlyRoots(isolate).the_hole_value()->ptr();
+ argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc)->ptr();
+ argv[BuiltinArguments::kTargetOffset] = function->ptr();
+ argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
MaybeHandle<Object> result;
{
RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
@@ -245,15 +247,15 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
+V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
Handle<Object> receiver = args.receiver();
// Get the object called.
- JSObject* obj = JSObject::cast(*receiver);
+ JSObject obj = JSObject::cast(*receiver);
// Set the new target.
- HeapObject* new_target;
+ HeapObject new_target;
if (is_construct_call) {
// TODO(adamk): This should be passed through in args instead of
// being patched in here. We need to set a non-undefined value
@@ -267,21 +269,20 @@ V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// Get the invocation callback from the function descriptor that was
// used to create the called object.
DCHECK(obj->map()->is_callable());
- JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
- // TODO(ishell): turn this back to a DCHECK.
- CHECK(constructor->shared()->IsApiFunction());
- Object* handler =
- constructor->shared()->get_api_func_data()->instance_call_handler();
+ JSFunction constructor = JSFunction::cast(obj->map()->GetConstructor());
+ DCHECK(constructor->shared()->IsApiFunction());
+ Object handler =
+ constructor->shared()->get_api_func_data()->GetInstanceCallHandler();
DCHECK(!handler->IsUndefined(isolate));
- CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
+ CallHandlerInfo call_data = CallHandlerInfo::cast(handler);
// Get the data for the call and perform the callback.
- Object* result;
+ Object result;
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
- obj, new_target, &args[0] - 1,
+ obj, new_target, args.address_of_arg_at(1),
args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 0e22db2598..21831e9f46 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -20,52 +20,6 @@ namespace internal {
typedef compiler::Node Node;
std::tuple<Node*, Node*, Node*>
-ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
- ParameterMode mode) {
- CSA_ASSERT(this, HasInstanceType(function, JS_FUNCTION_TYPE));
-
- VARIABLE(frame_ptr, MachineType::PointerRepresentation());
- frame_ptr.Bind(LoadParentFramePointer());
- CSA_ASSERT(this,
- WordEqual(function,
- LoadBufferObject(frame_ptr.value(),
- StandardFrameConstants::kFunctionOffset,
- MachineType::Pointer())));
- VARIABLE(argument_count, ParameterRepresentation(mode));
- VariableList list({&frame_ptr, &argument_count}, zone());
- Label done_argument_count(this, list);
-
- // Determine the number of passed parameters, which is either the count stored
- // in an arguments adapter frame or fetched from the shared function info.
- Node* frame_ptr_above = LoadBufferObject(
- frame_ptr.value(), StandardFrameConstants::kCallerFPOffset,
- MachineType::Pointer());
- Node* shared =
- LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
- CSA_SLOW_ASSERT(this, HasInstanceType(shared, SHARED_FUNCTION_INFO_TYPE));
- Node* formal_parameter_count =
- LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
- MachineType::Uint16());
- formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
-
- argument_count.Bind(formal_parameter_count);
- Node* marker_or_function = LoadBufferObject(
- frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
- GotoIf(
- MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
- &done_argument_count);
- Node* adapted_parameter_count = LoadBufferObject(
- frame_ptr_above, ArgumentsAdaptorFrameConstants::kLengthOffset);
- frame_ptr.Bind(frame_ptr_above);
- argument_count.Bind(TaggedToParameter(adapted_parameter_count, mode));
- Goto(&done_argument_count);
-
- BIND(&done_argument_count);
- return std::tuple<Node*, Node*, Node*>(
- frame_ptr.value(), argument_count.value(), formal_parameter_count);
-}
-
-std::tuple<Node*, Node*, Node*>
ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
Node* arguments_count,
Node* parameter_map_count,
@@ -82,11 +36,11 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
}
bool empty = IsIntPtrOrSmiConstantZero(arguments_count, mode);
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
- Node* size =
+ TNode<IntPtrT> size =
empty ? IntPtrConstant(base_size)
: ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode,
base_size + FixedArray::kHeaderSize);
- Node* result = Allocate(size);
+ TNode<Object> result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray);
@@ -96,7 +50,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
smi_arguments_count);
Node* arguments = nullptr;
if (!empty) {
- arguments = InnerAllocate(result, elements_offset);
+ arguments = InnerAllocate(CAST(result), elements_offset);
StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
smi_arguments_count);
Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap);
@@ -104,9 +58,9 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
}
Node* parameter_map = nullptr;
if (parameter_map_count != nullptr) {
- Node* parameter_map_offset = ElementOffsetFromIndex(
+ TNode<IntPtrT> parameter_map_offset = ElementOffsetFromIndex(
arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize);
- parameter_map = InnerAllocate(arguments, parameter_map_offset);
+ parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
Node* sloppy_elements_map =
@@ -147,7 +101,7 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
[this, elements, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset.value(), arg);
- Increment(&offset, kPointerSize);
+ Increment(&offset, kSystemPointerSize);
},
first_arg, nullptr, param_mode);
return result;
@@ -155,22 +109,19 @@ Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
Node* function) {
- Node* frame_ptr;
- Node* argument_count;
- Node* formal_parameter_count;
-
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
- std::tie(frame_ptr, argument_count, formal_parameter_count) =
- GetArgumentsFrameAndCount(function, mode);
+ ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
+ GetArgumentsFrameAndCount(CAST(context),
+ UncheckedCast<JSFunction>(function));
VARIABLE(result, MachineRepresentation::kTagged);
Label no_rest_parameters(this), runtime(this, Label::kDeferred),
done(this, &result);
Node* rest_count =
- IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
+ IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode);
Node* const native_context = LoadNativeContext(context);
Node* const array_map =
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
@@ -183,8 +134,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
// Allocate the Rest JSArray and the elements together and fill in the
// contents with the arguments above |formal_parameter_count|.
result.Bind(ConstructParametersObjectFromArgs(
- array_map, frame_ptr, argument_count, formal_parameter_count, rest_count,
- mode, JSArray::kSize));
+ array_map, info.frame, info.argument_count, info.formal_parameter_count,
+ rest_count, mode, JSArray::kSize));
Goto(&done);
BIND(&no_rest_parameters);
@@ -213,27 +164,24 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
- Node* frame_ptr;
- Node* argument_count;
- Node* formal_parameter_count;
-
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
- std::tie(frame_ptr, argument_count, formal_parameter_count) =
- GetArgumentsFrameAndCount(function, mode);
+ ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
+ GetArgumentsFrameAndCount(CAST(context),
+ UncheckedCast<JSFunction>(function));
GotoIfFixedArraySizeDoesntFitInNewSpace(
- argument_count, &runtime,
+ info.argument_count, &runtime,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
- GotoIf(WordEqual(argument_count, zero), &empty);
+ GotoIf(WordEqual(info.argument_count, zero), &empty);
result.Bind(ConstructParametersObjectFromArgs(
- map, frame_ptr, argument_count, zero, argument_count, mode,
+ map, info.frame, info.argument_count, zero, info.argument_count, mode,
JSStrictArgumentsObject::kSize));
Goto(&done);
@@ -260,9 +208,6 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Node* function) {
- Node* frame_ptr;
- Node* argument_count;
- Node* formal_parameter_count;
VARIABLE(result, MachineRepresentation::kTagged);
ParameterMode mode = OptimalParameterMode();
@@ -271,25 +216,26 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Label done(this, &result), empty(this), no_parameters(this),
runtime(this, Label::kDeferred);
- std::tie(frame_ptr, argument_count, formal_parameter_count) =
- GetArgumentsFrameAndCount(function, mode);
+ ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
+ GetArgumentsFrameAndCount(CAST(context),
+ UncheckedCast<JSFunction>(function));
- GotoIf(WordEqual(argument_count, zero), &empty);
+ GotoIf(WordEqual(info.argument_count, zero), &empty);
- GotoIf(WordEqual(formal_parameter_count, zero), &no_parameters);
+ GotoIf(WordEqual(info.formal_parameter_count, zero), &no_parameters);
{
Comment("Mapped parameter JSSloppyArgumentsObject");
Node* mapped_count =
- IntPtrOrSmiMin(argument_count, formal_parameter_count, mode);
+ IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count, mode);
Node* parameter_map_size =
IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode);
// Verify that the overall allocation will fit in new space.
Node* elements_allocated =
- IntPtrOrSmiAdd(argument_count, parameter_map_size, mode);
+ IntPtrOrSmiAdd(info.argument_count, parameter_map_size, mode);
GotoIfFixedArraySizeDoesntFitInNewSpace(
elements_allocated, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
@@ -301,8 +247,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Node* elements;
Node* map_array;
std::tie(argument_object, elements, map_array) =
- AllocateArgumentsObject(map, argument_count, parameter_map_size, mode,
- JSSloppyArgumentsObject::kSize);
+ AllocateArgumentsObject(map, info.argument_count, parameter_map_size,
+ mode, JSSloppyArgumentsObject::kSize);
StoreObjectFieldNoWriteBarrier(
argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
StoreFixedArrayElement(CAST(map_array), 0, context, SKIP_WRITE_BARRIER);
@@ -310,24 +256,24 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Comment("Fill in non-mapped parameters");
Node* argument_offset =
- ElementOffsetFromIndex(argument_count, PACKED_ELEMENTS, mode,
+ ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
Node* mapped_offset =
ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
- CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
+ CodeStubArguments arguments(this, info.argument_count, info.frame, mode);
VARIABLE(current_argument, MachineType::PointerRepresentation());
- current_argument.Bind(arguments.AtIndexPtr(argument_count, mode));
+ current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode));
VariableList var_list1({&current_argument}, zone());
mapped_offset = BuildFastLoop(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
- Increment(&current_argument, kPointerSize);
+ Increment(&current_argument, kSystemPointerSize);
Node* arg = LoadBufferObject(current_argument.value(), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
- -kPointerSize, INTPTR_PARAMETERS);
+ -kTaggedSize, INTPTR_PARAMETERS);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
@@ -341,28 +287,27 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
VARIABLE(context_index, OptimalParameterRepresentation());
context_index.Bind(IntPtrOrSmiSub(
IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
- formal_parameter_count, mode),
+ info.formal_parameter_count, mode),
mapped_count, mode));
Node* the_hole = TheHoleConstant();
VariableList var_list2({&context_index}, zone());
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
+ const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2);
Node* adjusted_map_array = IntPtrAdd(
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
- BuildFastLoop(var_list2, mapped_offset, zero_offset,
- [this, the_hole, elements, adjusted_map_array, &context_index,
- mode](Node* offset) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged,
- elements, offset, the_hole);
- StoreNoWriteBarrier(
- MachineRepresentation::kTagged, adjusted_map_array,
- offset, ParameterToTagged(context_index.value(), mode));
- Increment(&context_index, 1, mode);
- },
- -kPointerSize, INTPTR_PARAMETERS);
+ BuildFastLoop(
+ var_list2, mapped_offset, zero_offset,
+ [=, &context_index](Node* offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+ the_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ adjusted_map_array, offset,
+ ParameterToTagged(context_index.value(), mode));
+ Increment(&context_index, 1, mode);
+ },
+ -kTaggedSize, INTPTR_PARAMETERS);
result.Bind(argument_object);
Goto(&done);
@@ -372,13 +317,13 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
{
Comment("No parameters JSSloppyArgumentsObject");
GotoIfFixedArraySizeDoesntFitInNewSpace(
- argument_count, &runtime,
+ info.argument_count, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
result.Bind(ConstructParametersObjectFromArgs(
- map, frame_ptr, argument_count, zero, argument_count, mode,
+ map, info.frame, info.argument_count, zero, info.argument_count, mode,
JSSloppyArgumentsObject::kSize));
StoreObjectFieldNoWriteBarrier(
result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.h b/deps/v8/src/builtins/builtins-arguments-gen.h
index 438d10ab59..0f921c1ca6 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.h
+++ b/deps/v8/src/builtins/builtins-arguments-gen.h
@@ -6,6 +6,7 @@
#define V8_BUILTINS_BUILTINS_ARGUMENTS_GEN_H_
#include "src/code-stub-assembler.h"
+#include "torque-generated/builtins-arguments-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -14,23 +15,17 @@ typedef compiler::Node Node;
typedef compiler::CodeAssemblerState CodeAssemblerState;
typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
-class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
+class ArgumentsBuiltinsAssembler : public CodeStubAssembler,
+ public ArgumentsBuiltinsFromDSLAssembler {
public:
explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : CodeStubAssembler(state), ArgumentsBuiltinsFromDSLAssembler(state) {}
Node* EmitFastNewStrictArguments(Node* context, Node* function);
Node* EmitFastNewSloppyArguments(Node* context, Node* function);
Node* EmitFastNewRestParameter(Node* context, Node* function);
private:
- // Calculates and returns the the frame pointer, argument count and formal
- // parameter count to be used to access a function's parameters, taking
- // argument adapter frames into account. The tuple is of the form:
- // <frame_ptr, # parameters actually passed, formal parameter count>
- std::tuple<Node*, Node*, Node*> GetArgumentsFrameAndCount(Node* function,
- ParameterMode mode);
-
// Allocates an an arguments (either rest, strict or sloppy) together with the
// FixedArray elements for the arguments and a parameter map (for sloppy
// arguments only). A tuple is returned with pointers to the arguments object,
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 4aebe2e02b..db58ecf152 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -13,15 +13,17 @@
#include "src/frame-constants.h"
#include "src/heap/factory-inl.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/property-cell.h"
namespace v8 {
namespace internal {
using Node = compiler::Node;
+using IteratorRecord = IteratorBuiltinsFromDSLAssembler::IteratorRecord;
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
compiler::CodeAssemblerState* state)
- : BaseBuiltinsFromDSLAssembler(state),
+ : CodeStubAssembler(state),
k_(this, MachineRepresentation::kTagged),
a_(this, MachineRepresentation::kTagged),
to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
@@ -124,79 +126,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&ok);
}
- void ArrayBuiltinsAssembler::FilterResultGenerator() {
- // 7. Let A be ArraySpeciesCreate(O, 0).
- // This version of ArraySpeciesCreate will create with the correct
- // ElementsKind in the fast case.
- GenerateArraySpeciesCreate();
- }
-
- Node* ArrayBuiltinsAssembler::FilterProcessor(Node* k_value, Node* k) {
- // ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
- Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
- Label true_continue(this, &to_), false_continue(this);
- BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
- BIND(&true_continue);
- // iii. If selected is true, then...
- {
- Label after_work(this, &to_);
- Node* kind = nullptr;
-
- // If a() is a JSArray, we can have a fast path.
- Label fast(this);
- Label runtime(this);
- Label object_push_pre(this), object_push(this), double_push(this);
- BranchIfFastJSArray(a(), context(), &fast, &runtime);
-
- BIND(&fast);
- {
- GotoIf(WordNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
- kind = EnsureArrayPushable(LoadMap(a()), &runtime);
- GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
- &object_push_pre);
-
- BuildAppendJSArray(HOLEY_SMI_ELEMENTS, a(), k_value, &runtime);
- Goto(&after_work);
- }
-
- BIND(&object_push_pre);
- {
- Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
- &object_push);
- }
-
- BIND(&object_push);
- {
- BuildAppendJSArray(HOLEY_ELEMENTS, a(), k_value, &runtime);
- Goto(&after_work);
- }
-
- BIND(&double_push);
- {
- BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, a(), k_value, &runtime);
- Goto(&after_work);
- }
-
- BIND(&runtime);
- {
- // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
- k_value);
- Goto(&after_work);
- }
-
- BIND(&after_work);
- {
- // 2. Increase to by 1.
- to_.Bind(NumberInc(to_.value()));
- Goto(&false_continue);
- }
- }
- BIND(&false_continue);
- return a();
- }
-
void ArrayBuiltinsAssembler::MapResultGenerator() {
GenerateArraySpeciesCreate(len_);
}
@@ -208,7 +137,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
const char* method_name = "%TypedArray%.prototype.map";
TypedArrayBuiltinsAssembler typedarray_asm(state());
- TNode<JSTypedArray> a = typedarray_asm.SpeciesCreateByLength(
+ TNode<JSTypedArray> a = typedarray_asm.TypedArraySpeciesCreateByLength(
context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
@@ -297,10 +226,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
const ElementsKind kFromKind = HOLEY_SMI_ELEMENTS;
const ElementsKind kToKind = HOLEY_DOUBLE_ELEMENTS;
- const bool kIsJSArray = true;
Label transition_in_runtime(this, Label::kDeferred);
- TransitionElementsKind(a(), double_map, kFromKind, kToKind, kIsJSArray,
+ TransitionElementsKind(a(), double_map, kFromKind, kToKind,
&transition_in_runtime);
Goto(&array_double);
@@ -360,7 +288,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
num_value = ToNumber_Inline(context(), mapped_value);
}
// The only way how this can bailout is because of a detached buffer.
- EmitElementStore(a(), k, num_value, false, source_elements_kind_,
+ EmitElementStore(a(), k, num_value, source_elements_kind_,
KeyedAccessStoreMode::STANDARD_STORE, &detached,
context());
Goto(&done);
@@ -385,7 +313,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array)));
TNode<IntPtrT> length = SmiToIntPtr(smi_length);
- TNode<WordT> byte_length = TimesPointerSize(length);
+ TNode<WordT> byte_length = TimesTaggedSize(length);
CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
@@ -799,9 +727,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&switch_on_elements_kind);
TNode<Smi> smi_len = CAST(len());
// Select by ElementsKind
- Node* o_map = LoadMap(o());
- Node* bit_field2 = LoadMapBitField2(o_map);
- Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ Node* kind = LoadElementsKind(o());
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
&maybe_double_elements, &fast_elements);
@@ -834,54 +760,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- // This version is specialized to create a zero length array
- // of the elements kind of the input array.
- void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate() {
- Label runtime(this, Label::kDeferred), done(this);
-
- TNode<Smi> len = SmiConstant(0);
- TNode<Map> original_map = LoadMap(o());
- GotoIfNot(
- InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
- &runtime);
-
- GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
- &runtime);
-
- Node* species_protector = ArraySpeciesProtectorConstant();
- Node* value =
- LoadObjectField(species_protector, PropertyCell::kValueOffset);
- TNode<Smi> const protector_invalid =
- SmiConstant(Isolate::kProtectorInvalid);
- GotoIf(WordEqual(value, protector_invalid), &runtime);
-
- // Respect the ElementsKind of the input array.
- TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
- GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
- TNode<Context> native_context = LoadNativeContext(context());
- TNode<Map> array_map =
- LoadJSArrayElementsMap(elements_kind, native_context);
- TNode<JSArray> array =
- CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
- nullptr, CodeStubAssembler::SMI_PARAMETERS));
- a_.Bind(array);
-
- Goto(&done);
-
- BIND(&runtime);
- {
- // 5. Let A be ? ArraySpeciesCreate(O, len).
- Node* constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
- a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
- constructor, len));
- Goto(&fully_spec_compliant_);
- }
-
- BIND(&done);
- }
-
- // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
Label runtime(this, Label::kDeferred), done(this);
@@ -900,8 +778,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIf(WordEqual(value, protector_invalid), &runtime);
GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
- GotoIf(
- SmiAbove(CAST(len), SmiConstant(JSArray::kInitialMaxFastElementArray)),
+ GotoIfNot(
+ IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS),
&runtime);
// We need to be conservative and start with holey because the builtins
@@ -912,18 +790,18 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
LoadJSArrayElementsMap(elements_kind, native_context);
- a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
- CodeStubAssembler::SMI_PARAMETERS));
+ a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len),
+ nullptr, CodeStubAssembler::SMI_PARAMETERS,
+ kAllowLargeObjectAllocation));
Goto(&done);
BIND(&runtime);
{
// 5. Let A be ? ArraySpeciesCreate(O, len).
- Node* constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
- a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
- constructor, len));
+ TNode<JSReceiver> constructor =
+ CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o()));
+ a_.Bind(Construct(context(), constructor, len));
Goto(&fully_spec_compliant_);
}
@@ -955,7 +833,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
{
TNode<JSArray> array_receiver = CAST(receiver);
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
- Node* length =
+ TNode<IntPtrT> length =
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
@@ -964,15 +842,15 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(array_receiver);
+ TNode<FixedArrayBase> elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)),
&runtime);
- Node* new_length = IntPtrSub(length, IntPtrConstant(1));
+ TNode<IntPtrT> new_length = IntPtrSub(length, IntPtrConstant(1));
// 4) Check that we're not supposed to shrink the backing store, as
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
- Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
GotoIf(IntPtrLessThan(
IntPtrAdd(IntPtrAdd(new_length, new_length),
IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
@@ -987,26 +865,10 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
- Node* value = LoadFixedDoubleArrayElement(
- elements, new_length, MachineType::Float64(), 0, INTPTR_PARAMETERS,
- &return_undefined);
-
- int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
- INTPTR_PARAMETERS, header_size);
- if (Is64()) {
- Node* double_hole = Int64Constant(kHoleNanInt64);
- StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
- double_hole);
- } else {
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
- Node* double_hole = Int32Constant(kHoleNanLower32);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
- double_hole);
- }
+ Node* value = LoadFixedDoubleArrayElement(CAST(elements), new_length,
+ &return_undefined);
+
+ StoreFixedDoubleArrayHole(CAST(elements), new_length);
args.PopAndReturn(AllocateHeapNumberWithValue(value));
BIND(&fast_elements);
@@ -1085,9 +947,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(array_receiver);
- Node* bit_field2 = LoadMapBitField2(map);
- Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ Node* kind = LoadElementsKind(array_receiver);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
@@ -1131,9 +991,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(array_receiver);
- Node* bit_field2 = LoadMapBitField2(map);
- Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ Node* kind = LoadElementsKind(array_receiver);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
Goto(&object_push);
@@ -1163,341 +1021,6 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
}
}
-class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
- public:
- explicit ArrayPrototypeSliceCodeStubAssembler(
- compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- Node* HandleFastSlice(TNode<Context> context, Node* array, Node* from,
- Node* count, Label* slow) {
- VARIABLE(result, MachineRepresentation::kTagged);
- Label done(this);
-
- GotoIf(TaggedIsNotSmi(from), slow);
- GotoIf(TaggedIsNotSmi(count), slow);
-
- Label try_fast_arguments(this), try_simple_slice(this);
-
- Node* map = LoadMap(array);
- GotoIfNot(IsJSArrayMap(map), &try_fast_arguments);
-
- // Check prototype chain if receiver does not have packed elements
- GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), slow);
-
- GotoIf(IsNoElementsProtectorCellInvalid(), slow);
-
- GotoIf(IsArraySpeciesProtectorCellInvalid(), slow);
-
- // Bailout if receiver has slow elements.
- Node* elements_kind = LoadMapElementsKind(map);
- GotoIfNot(IsFastElementsKind(elements_kind), &try_simple_slice);
-
- // Make sure that the length hasn't been changed by side-effect.
- Node* array_length = LoadJSArrayLength(array);
- GotoIf(TaggedIsNotSmi(array_length), slow);
- GotoIf(SmiAbove(SmiAdd(CAST(from), CAST(count)), CAST(array_length)), slow);
-
- CSA_ASSERT(this, SmiGreaterThanOrEqual(CAST(from), SmiConstant(0)));
-
- result.Bind(CallBuiltin(Builtins::kExtractFastJSArray, context, array, from,
- count));
- Goto(&done);
-
- BIND(&try_fast_arguments);
-
- Node* const native_context = LoadNativeContext(context);
- Node* const fast_aliasted_arguments_map = LoadContextElement(
- native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- GotoIf(WordNotEqual(map, fast_aliasted_arguments_map), &try_simple_slice);
-
- TNode<SloppyArgumentsElements> sloppy_elements = CAST(LoadElements(array));
- TNode<Smi> sloppy_elements_length =
- LoadFixedArrayBaseLength(sloppy_elements);
- TNode<Smi> parameter_map_length =
- SmiSub(sloppy_elements_length,
- SmiConstant(SloppyArgumentsElements::kParameterMapStart));
- VARIABLE(index_out, MachineType::PointerRepresentation());
-
- int max_fast_elements =
- (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
- AllocationMemento::kSize) /
- kPointerSize;
- GotoIf(SmiAboveOrEqual(CAST(count), SmiConstant(max_fast_elements)),
- &try_simple_slice);
-
- GotoIf(SmiLessThan(CAST(from), SmiConstant(0)), slow);
-
- TNode<Smi> end = SmiAdd(CAST(from), CAST(count));
-
- TNode<FixedArray> unmapped_elements = CAST(LoadFixedArrayElement(
- sloppy_elements, SloppyArgumentsElements::kArgumentsIndex));
- TNode<Smi> unmapped_elements_length =
- LoadFixedArrayBaseLength(unmapped_elements);
-
- GotoIf(SmiAbove(end, unmapped_elements_length), slow);
-
- Node* array_map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, native_context);
- result.Bind(AllocateJSArray(HOLEY_ELEMENTS, array_map, count, count,
- nullptr, SMI_PARAMETERS));
-
- index_out.Bind(IntPtrConstant(0));
- TNode<FixedArray> result_elements = CAST(LoadElements(result.value()));
- TNode<Smi> from_mapped = SmiMin(parameter_map_length, CAST(from));
- TNode<Smi> to = SmiMin(parameter_map_length, end);
- Node* arguments_context = LoadFixedArrayElement(
- sloppy_elements, SloppyArgumentsElements::kContextIndex);
- VariableList var_list({&index_out}, zone());
- BuildFastLoop(
- var_list, from_mapped, to,
- [this, result_elements, arguments_context, sloppy_elements,
- unmapped_elements, &index_out](Node* current) {
- Node* context_index = LoadFixedArrayElement(
- sloppy_elements, current,
- kPointerSize * SloppyArgumentsElements::kParameterMapStart,
- SMI_PARAMETERS);
- Label is_the_hole(this), done(this);
- GotoIf(IsTheHole(context_index), &is_the_hole);
- Node* mapped_argument =
- LoadContextElement(arguments_context, SmiUntag(context_index));
- StoreFixedArrayElement(result_elements, index_out.value(),
- mapped_argument, SKIP_WRITE_BARRIER);
- Goto(&done);
- BIND(&is_the_hole);
- Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
- SMI_PARAMETERS);
- StoreFixedArrayElement(result_elements, index_out.value(), argument,
- SKIP_WRITE_BARRIER);
- Goto(&done);
- BIND(&done);
- index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
- },
- 1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
-
- TNode<Smi> unmapped_from =
- SmiMin(SmiMax(parameter_map_length, CAST(from)), end);
-
- BuildFastLoop(
- var_list, unmapped_from, end,
- [this, unmapped_elements, result_elements, &index_out](Node* current) {
- Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
- SMI_PARAMETERS);
- StoreFixedArrayElement(result_elements, index_out.value(), argument,
- SKIP_WRITE_BARRIER);
- index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
- },
- 1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
-
- Goto(&done);
-
- BIND(&try_simple_slice);
- Node* simple_result = CallRuntime(Runtime::kTrySliceSimpleNonFastElements,
- context, array, from, count);
- GotoIfNumber(simple_result, slow);
- result.Bind(simple_result);
-
- Goto(&done);
-
- BIND(&done);
- return result.value();
- }
-
- void CopyOneElement(TNode<Context> context, Node* o, Node* a, Node* p_k,
- Variable& n) {
- // b. Let kPresent be HasProperty(O, Pk).
- // c. ReturnIfAbrupt(kPresent).
- TNode<Oddball> k_present = HasProperty(context, o, p_k, kHasProperty);
-
- // d. If kPresent is true, then
- Label done_element(this);
- GotoIf(IsFalse(k_present), &done_element);
-
- // i. Let kValue be Get(O, Pk).
- // ii. ReturnIfAbrupt(kValue).
- Node* k_value = GetProperty(context, o, p_k);
-
- // iii. Let status be CreateDataPropertyOrThrow(A, ToString(n), kValue).
- // iv. ReturnIfAbrupt(status).
- CallRuntime(Runtime::kCreateDataProperty, context, a, n.value(), k_value);
-
- Goto(&done_element);
- BIND(&done_element);
- }
-};
-
-TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
- Node* const argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Label slow(this, Label::kDeferred), fast_elements_kind(this);
-
- CodeStubArguments args(this, argc);
- TNode<Object> receiver = args.GetReceiver();
-
- TVARIABLE(JSReceiver, o);
- VARIABLE(len, MachineRepresentation::kTagged);
- Label length_done(this), generic_length(this), check_arguments_length(this),
- load_arguments_length(this);
-
- GotoIf(TaggedIsSmi(receiver), &generic_length);
- GotoIfNot(IsJSArray(CAST(receiver)), &check_arguments_length);
-
- TNode<JSArray> array_receiver = CAST(receiver);
- o = array_receiver;
- len.Bind(LoadJSArrayLength(array_receiver));
-
- // Check for the array clone case. There can be no arguments to slice, the
- // array prototype chain must be intact and have no elements, the array has to
- // have fast elements.
- GotoIf(WordNotEqual(argc, IntPtrConstant(0)), &length_done);
-
- Label clone(this);
- BranchIfFastJSArrayForCopy(receiver, context, &clone, &length_done);
- BIND(&clone);
-
- args.PopAndReturn(
- CallBuiltin(Builtins::kCloneFastJSArray, context, receiver));
-
- BIND(&check_arguments_length);
-
- Node* map = LoadMap(array_receiver);
- Node* native_context = LoadNativeContext(context);
- GotoIfContextElementEqual(map, native_context,
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
- &load_arguments_length);
- GotoIfContextElementEqual(map, native_context,
- Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX,
- &load_arguments_length);
- GotoIfContextElementEqual(map, native_context,
- Context::STRICT_ARGUMENTS_MAP_INDEX,
- &load_arguments_length);
- GotoIfContextElementEqual(map, native_context,
- Context::SLOPPY_ARGUMENTS_MAP_INDEX,
- &load_arguments_length);
-
- Goto(&generic_length);
-
- BIND(&load_arguments_length);
- Node* arguments_length = LoadObjectField(
- array_receiver, JSArgumentsObjectWithLength::kLengthOffset);
- GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
- o = CAST(receiver);
- len.Bind(arguments_length);
- Goto(&length_done);
-
- BIND(&generic_length);
- // 1. Let O be ToObject(this value).
- // 2. ReturnIfAbrupt(O).
- o = ToObject_Inline(context, receiver);
-
- // 3. Let len be ToLength(Get(O, "length")).
- // 4. ReturnIfAbrupt(len).
- len.Bind(ToLength_Inline(
- context,
- GetProperty(context, o.value(), isolate()->factory()->length_string())));
- Goto(&length_done);
-
- BIND(&length_done);
-
- // 5. Let relativeStart be ToInteger(start).
- // 6. ReturnIfAbrupt(relativeStart).
- TNode<Object> arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
- Node* relative_start = ToInteger_Inline(context, arg0);
-
- // 7. If relativeStart < 0, let k be max((len + relativeStart),0);
- // else let k be min(relativeStart, len.value()).
- VARIABLE(k, MachineRepresentation::kTagged);
- Label relative_start_positive(this), relative_start_done(this);
- GotoIfNumberGreaterThanOrEqual(relative_start, SmiConstant(0),
- &relative_start_positive);
- k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0)));
- Goto(&relative_start_done);
- BIND(&relative_start_positive);
- k.Bind(NumberMin(relative_start, len.value()));
- Goto(&relative_start_done);
- BIND(&relative_start_done);
-
- // 8. If end is undefined, let relativeEnd be len;
- // else let relativeEnd be ToInteger(end).
- // 9. ReturnIfAbrupt(relativeEnd).
- TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
- Label end_undefined(this), end_done(this);
- VARIABLE(relative_end, MachineRepresentation::kTagged);
- GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
- relative_end.Bind(ToInteger_Inline(context, end));
- Goto(&end_done);
- BIND(&end_undefined);
- relative_end.Bind(len.value());
- Goto(&end_done);
- BIND(&end_done);
-
- // 10. If relativeEnd < 0, let final be max((len + relativeEnd),0);
- // else let final be min(relativeEnd, len).
- VARIABLE(final, MachineRepresentation::kTagged);
- Label relative_end_positive(this), relative_end_done(this);
- GotoIfNumberGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
- &relative_end_positive);
- final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()),
- NumberConstant(0)));
- Goto(&relative_end_done);
- BIND(&relative_end_positive);
- final.Bind(NumberMin(relative_end.value(), len.value()));
- Goto(&relative_end_done);
- BIND(&relative_end_done);
-
- // 11. Let count be max(final – k, 0).
- Node* count =
- NumberMax(NumberSub(final.value(), k.value()), NumberConstant(0));
-
- // Handle FAST_ELEMENTS
- Label non_fast(this);
- Node* fast_result =
- HandleFastSlice(context, o.value(), k.value(), count, &non_fast);
- args.PopAndReturn(fast_result);
-
- // 12. Let A be ArraySpeciesCreate(O, count).
- // 13. ReturnIfAbrupt(A).
- BIND(&non_fast);
-
- Node* constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context, o.value());
- Node* a = ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
- count);
-
- // 14. Let n be 0.
- VARIABLE(n, MachineRepresentation::kTagged);
- n.Bind(SmiConstant(0));
-
- Label loop(this, {&k, &n});
- Label after_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- // 15. Repeat, while k < final
- GotoIfNumberGreaterThanOrEqual(k.value(), final.value(), &after_loop);
-
- Node* p_k = k.value(); // ToString(context, k.value()) is no-op
-
- CopyOneElement(context, o.value(), a, p_k, n);
-
- // e. Increase k by 1.
- k.Bind(NumberInc(k.value()));
-
- // f. Increase n by 1.
- n.Bind(NumberInc(n.value()));
-
- Goto(&loop);
- }
-
- BIND(&after_loop);
-
- // 16. Let setStatus be Set(A, "length", n, true).
- // 17. ReturnIfAbrupt(setStatus).
- SetPropertyStrict(context, CAST(a), CodeStubAssembler::LengthStringConstant(),
- CAST(n.value()));
- args.PopAndReturn(a);
-}
-
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
@@ -1532,18 +1055,18 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
// read-only length.
EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
- Node* length =
+ TNode<IntPtrT> length =
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(array_receiver);
+ TNode<FixedArrayBase> elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)),
&runtime);
- Node* new_length = IntPtrSub(length, IntPtrConstant(1));
+ TNode<IntPtrT> new_length = IntPtrSub(length, IntPtrConstant(1));
// 4) Check that we're not supposed to right-trim the backing store, as
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
@@ -1563,6 +1086,8 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
+ TNode<IntPtrT> element_zero = IntPtrConstant(0);
+ TNode<IntPtrT> element_one = IntPtrConstant(1);
TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
@@ -1580,37 +1105,13 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
Label move_elements(this);
result.Bind(AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
- elements, IntPtrConstant(0), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &move_elements)));
+ CAST(elements), element_zero, &move_elements)));
Goto(&move_elements);
BIND(&move_elements);
- int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* memmove =
- ExternalConstant(ExternalReference::libc_memmove_function());
- Node* start = IntPtrAdd(
- BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_DOUBLE_ELEMENTS,
- INTPTR_PARAMETERS, header_size));
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::Pointer(), MachineType::UintPtr(), memmove,
- start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
- IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
- Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
- INTPTR_PARAMETERS, header_size);
- if (Is64()) {
- Node* double_hole = Int64Constant(kHoleNanInt64);
- StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
- double_hole);
- } else {
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
- Node* double_hole = Int32Constant(kHoleNanLower32);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
- double_hole);
- }
+ MoveElements(HOLEY_DOUBLE_ELEMENTS, elements, element_zero, element_one,
+ new_length);
+ StoreFixedDoubleArrayHole(CAST(elements), new_length);
args.PopAndReturn(result.value());
}
@@ -1618,15 +1119,8 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
{
TNode<FixedArray> elements_fixed_array = CAST(elements);
Node* value = LoadFixedArrayElement(elements_fixed_array, 0);
- BuildFastLoop(
- IntPtrConstant(0), new_length,
- [&](Node* index) {
- StoreFixedArrayElement(
- elements_fixed_array, index,
- LoadFixedArrayElement(elements_fixed_array,
- IntPtrAdd(index, IntPtrConstant(1))));
- },
- 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ MoveElements(HOLEY_ELEMENTS, elements, element_zero, element_one,
+ new_length);
StoreFixedArrayElement(elements_fixed_array, new_length,
TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
@@ -1637,16 +1131,8 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
{
TNode<FixedArray> elements_fixed_array = CAST(elements);
Node* value = LoadFixedArrayElement(elements_fixed_array, 0);
- BuildFastLoop(
- IntPtrConstant(0), new_length,
- [&](Node* index) {
- StoreFixedArrayElement(
- elements_fixed_array, index,
- LoadFixedArrayElement(elements_fixed_array,
- IntPtrAdd(index, IntPtrConstant(1))),
- SKIP_WRITE_BARRIER);
- },
- 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ MoveElements(HOLEY_SMI_ELEMENTS, elements, element_zero, element_one,
+ new_length);
StoreFixedArrayElement(elements_fixed_array, new_length,
TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
@@ -1686,8 +1172,8 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
CSA_ASSERT(this,
- Word32Or(Word32BinaryNot(IsHoleyFastElementsKind(
- LoadMapElementsKind(LoadMap(array)))),
+ Word32Or(Word32BinaryNot(
+ IsHoleyFastElementsKind(LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
@@ -1706,8 +1192,8 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
CSA_ASSERT(this,
- Word32Or(Word32BinaryNot(IsHoleyFastElementsKind(
- LoadMapElementsKind(LoadMap(array)))),
+ Word32Or(Word32BinaryNot(
+ IsHoleyFastElementsKind(LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
@@ -1922,8 +1408,7 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
BIND(&is_constructor);
{
- array = CAST(
- ConstructJS(CodeFactory::Construct(isolate()), context, receiver));
+ array = Construct(context, CAST(receiver));
Goto(&done);
}
@@ -1934,9 +1419,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Map> array_map = CAST(LoadContextElement(
context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
- array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map,
- SmiConstant(0), SmiConstant(0), nullptr,
- ParameterMode::SMI_PARAMETERS));
+ array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0),
+ SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS);
Goto(&done);
}
@@ -1955,95 +1440,19 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
BIND(&is_constructor);
{
- array = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
- receiver, length));
+ array = Construct(context, CAST(receiver), length);
Goto(&done);
}
BIND(&is_not_constructor);
{
- Label allocate_js_array(this);
-
- Label next(this), runtime(this, Label::kDeferred);
- TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
- CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
- BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
- length, SmiConstant(0), ok, not_ok);
- });
- // This check also transitively covers the case where length is too big
- // to be representable by a SMI and so is not usable with
- // AllocateJSArray.
- BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
- limit, &runtime, &next);
-
- BIND(&runtime);
- {
- TNode<Context> native_context = LoadNativeContext(context);
- TNode<JSFunction> array_function = CAST(
- LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
- array = CallRuntime(Runtime::kNewArray, context, array_function, length,
- array_function, UndefinedConstant());
- Goto(&done);
- }
-
- BIND(&next);
- CSA_ASSERT(this, TaggedIsSmi(length));
-
- TNode<Map> array_map = CAST(LoadContextElement(
- context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
-
- // TODO(delphick): Consider using
- // AllocateUninitializedJSArrayWithElements to avoid initializing an
- // array and then writing over it.
- array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
- SmiConstant(0), nullptr,
- ParameterMode::SMI_PARAMETERS));
+ array = ArrayCreate(context, length);
Goto(&done);
}
BIND(&done);
return array.value();
}
-
- void GenerateSetLength(TNode<Context> context, TNode<Object> array,
- TNode<Number> length) {
- Label fast(this), runtime(this), done(this);
- // There's no need to set the length, if
- // 1) the array is a fast JS array and
- // 2) the new length is equal to the old length.
- // as the set is not observable. Otherwise fall back to the run-time.
-
- // 1) Check that the array has fast elements.
- // TODO(delphick): Consider changing this since it does an an unnecessary
- // check for SMIs.
- // TODO(delphick): Also we could hoist this to after the array construction
- // and copy the args into array in the same way as the Array constructor.
- BranchIfFastJSArray(array, context, &fast, &runtime);
-
- BIND(&fast);
- {
- TNode<JSArray> fast_array = CAST(array);
-
- TNode<Smi> length_smi = CAST(length);
- TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
- CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
-
- // 2) If the created array's length matches the required length, then
- // there's nothing else to do. Otherwise use the runtime to set the
- // property as that will insert holes into excess elements or shrink
- // the backing store as appropriate.
- Branch(SmiNotEqual(length_smi, old_length), &runtime, &done);
- }
-
- BIND(&runtime);
- {
- SetPropertyStrict(context, array,
- CodeStubAssembler::LengthStringConstant(), length);
- Goto(&done);
- }
-
- BIND(&done);
- }
};
// ES #sec-array.from
@@ -2053,7 +1462,29 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ TNode<Object> items = args.GetOptionalArgumentValue(0);
+ TNode<Object> receiver = args.GetReceiver();
+
+ Label fast_iterate(this), normal_iterate(this);
+
+ // Use fast path if:
+ // * |items| is the only argument, and
+ // * the receiver is the Array function.
+ GotoIfNot(Word32Equal(argc, Int32Constant(1)), &normal_iterate);
+ TNode<Object> array_function = LoadContextElement(
+ LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+ Branch(WordEqual(array_function, receiver), &fast_iterate, &normal_iterate);
+
+ BIND(&fast_iterate);
+ {
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ TVARIABLE(Object, var_fast_result);
+ iterator_assembler.FastIterableToList(context, items, &var_fast_result,
+ &normal_iterate);
+ args.PopAndReturn(var_fast_result.value());
+ }
+ BIND(&normal_iterate);
TNode<Object> map_function = args.GetOptionalArgumentValue(1);
// If map_function is not undefined, then ensure it's callable else throw.
@@ -2072,7 +1503,6 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
Label iterable(this), not_iterable(this), finished(this), if_exception(this);
TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
- TNode<Object> items = args.GetOptionalArgumentValue(0);
// The spec doesn't require ToObject to be called directly on the iterable
// branch, but it's part of GetMethod that is in the spec.
TNode<JSReceiver> array_like = ToObject_Inline(context, items);
@@ -2109,7 +1539,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
}
// Construct the output array with empty length.
- array = ConstructArrayLike(context, args.GetReceiver());
+ array = ConstructArrayLike(context, receiver);
// Actually get the iterator and throw if the iterator method does not yield
// one.
@@ -2125,8 +1555,8 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
BIND(&loop);
{
// Loop while iterator is not done.
- TNode<Object> next = CAST(iterator_assembler.IteratorStep(
- context, iterator_record, &loop_done, fast_iterator_result_map));
+ TNode<Object> next = iterator_assembler.IteratorStep(
+ context, iterator_record, &loop_done, fast_iterator_result_map);
TVARIABLE(Object, value,
CAST(iterator_assembler.IteratorValue(
context, next, fast_iterator_result_map)));
@@ -2181,7 +1611,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
// Close the iterator, rethrowing either the passed exception or
// exceptions thrown during the close.
iterator_assembler.IteratorCloseOnException(context, iterator_record,
- &var_exception);
+ var_exception.value());
}
}
@@ -2193,7 +1623,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
// Construct an array using the receiver as constructor with the same length
// as the input array.
- array = ConstructArrayLike(context, args.GetReceiver(), length.value());
+ array = ConstructArrayLike(context, receiver, length.value());
TVARIABLE(Number, index, SmiConstant(0));
@@ -2235,36 +1665,10 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
BIND(&finished);
// Finally set the length on the output and return it.
- GenerateSetLength(context, array.value(), length.value());
+ SetPropertyLength(context, array.value(), length.value());
args.PopAndReturn(array.value());
}
-// ES #sec-array.of
-TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Smi> length = SmiFromInt32(argc);
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
-
- TNode<Object> array = ConstructArrayLike(context, args.GetReceiver(), length);
-
- // TODO(delphick): Avoid using CreateDataProperty on the fast path.
- BuildFastLoop(SmiConstant(0), length,
- [=](Node* index) {
- CallRuntime(
- Runtime::kCreateDataProperty, context,
- static_cast<Node*>(array), index,
- args.AtIndex(index, ParameterMode::SMI_PARAMETERS));
- },
- 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
-
- GenerateSetLength(context, array, length);
- args.PopAndReturn(array);
-}
-
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
@@ -2724,101 +2128,6 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
- this_arg, array, object, initial_k,
- len, to);
-
- GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinsAssembler::FilterProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
-}
-
-TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* to = Parameter(Descriptor::kTo);
-
- Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
- callbackfn, this_arg, array, receiver, initial_k, len,
- to));
-}
-
-TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* array = Parameter(Descriptor::kArray);
- Node* initial_k = Parameter(Descriptor::kInitialK);
- TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Node* value_k = Parameter(Descriptor::kValueK);
- Node* result = Parameter(Descriptor::kResult);
-
- VARIABLE(to, MachineRepresentation::kTagged, Parameter(Descriptor::kTo));
-
- // This custom lazy deopt point is right after the callback. filter() needs
- // to pick up at the next step, which is setting the callback result in
- // the output array. After incrementing k and to, we can glide into the loop
- // continuation builtin.
-
- Label true_continue(this, &to), false_continue(this);
-
- // iii. If selected is true, then...
- BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
- BIND(&true_continue);
- {
- // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
- CallRuntime(Runtime::kCreateDataProperty, context, array, to.value(),
- value_k);
- // 2. Increase to by 1.
- to.Bind(NumberInc(to.value()));
- Goto(&false_continue);
- }
- BIND(&false_continue);
-
- // Increment k.
- initial_k = NumberInc(initial_k);
-
- Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
- callbackfn, this_arg, array, receiver, initial_k, len,
- to.value()));
-}
-
-TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0);
- Node* this_arg = args.GetOptionalArgumentValue(1);
-
- InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
-
- GenerateIteratingArrayBuiltinBody(
- "Array.prototype.filter", &ArrayBuiltinsAssembler::FilterResultGenerator,
- &ArrayBuiltinsAssembler::FilterProcessor,
- &ArrayBuiltinsAssembler::NullPostLoopAction,
- Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
- MissingPropertyMode::kSkip);
-}
-
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -3601,6 +2910,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Label if_hole(this, Label::kDeferred);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
TNode<FixedArrayBase> elements = LoadElements(CAST(array));
+ GotoIfForceSlowPath(&if_generic);
var_value.Bind(LoadFixedArrayBaseElementAsTagged(
elements, Signed(ChangeUint32ToWord(index32)), elements_kind,
&if_generic, &if_hole));
@@ -3676,7 +2986,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// If {array} is a JSTypedArray, the {index} must always be a Smi.
CSA_ASSERT(this, TaggedIsSmi(index));
- // Check that the {array}s buffer wasn't neutered.
+ // Check that the {array}s buffer wasn't detached.
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(array), method_name);
// If we go outside of the {length}, we don't need to update the
@@ -3879,6 +3189,10 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
Node* const start = Parameter(Descriptor::kStart);
Node* const depth = Parameter(Descriptor::kDepth);
+ // FlattenIntoArray might get called recursively, check stack for overflow
+ // manually as it has stub linkage.
+ PerformStackCheck(CAST(context));
+
Return(
FlattenIntoArray(context, target, source, source_length, start, depth));
}
@@ -3900,38 +3214,37 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
- Node* const argc =
+ TNode<IntPtrT> const argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const context = Parameter(Descriptor::kContext);
- Node* const receiver = args.GetReceiver();
- Node* const depth = args.GetOptionalArgumentValue(0);
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> const receiver = args.GetReceiver();
+ TNode<Object> const depth = args.GetOptionalArgumentValue(0);
// 1. Let O be ? ToObject(this value).
- Node* const o = ToObject_Inline(CAST(context), CAST(receiver));
+ TNode<JSReceiver> const o = ToObject_Inline(context, receiver);
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
- Node* const source_length =
+ TNode<Number> const source_length =
ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
// 3. Let depthNum be 1.
- VARIABLE(var_depth_num, MachineRepresentation::kTagged, SmiConstant(1));
+ TVARIABLE(Number, var_depth_num, SmiConstant(1));
// 4. If depth is not undefined, then
Label done(this);
GotoIf(IsUndefined(depth), &done);
{
// a. Set depthNum to ? ToInteger(depth).
- var_depth_num.Bind(ToInteger_Inline(context, depth));
+ var_depth_num = ToInteger_Inline(context, depth);
Goto(&done);
}
BIND(&done);
// 5. Let A be ? ArraySpeciesCreate(O, 0).
- Node* const constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
- Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
- constructor, SmiConstant(0));
+ TNode<JSReceiver> const constructor =
+ CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context, o));
+ Node* const a = Construct(context, constructor, SmiConstant(0));
// 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, depthNum).
CallBuiltin(Builtins::kFlattenIntoArray, context, a, o, source_length,
@@ -3943,33 +3256,32 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
- Node* const argc =
+ TNode<IntPtrT> const argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const context = Parameter(Descriptor::kContext);
- Node* const receiver = args.GetReceiver();
- Node* const mapper_function = args.GetOptionalArgumentValue(0);
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> const receiver = args.GetReceiver();
+ TNode<Object> const mapper_function = args.GetOptionalArgumentValue(0);
// 1. Let O be ? ToObject(this value).
- Node* const o = ToObject_Inline(CAST(context), CAST(receiver));
+ TNode<JSReceiver> const o = ToObject_Inline(context, receiver);
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
- Node* const source_length =
+ TNode<Number> const source_length =
ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
// 3. If IsCallable(mapperFunction) is false, throw a TypeError exception.
Label if_not_callable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(mapper_function), &if_not_callable);
- GotoIfNot(IsCallable(mapper_function), &if_not_callable);
+ GotoIfNot(IsCallable(CAST(mapper_function)), &if_not_callable);
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
- Node* const t = args.GetOptionalArgumentValue(1);
+ TNode<Object> const t = args.GetOptionalArgumentValue(1);
// 5. Let A be ? ArraySpeciesCreate(O, 0).
- Node* const constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
- Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
- constructor, SmiConstant(0));
+ TNode<JSReceiver> const constructor =
+ CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context, o));
+ TNode<JSReceiver> const a = Construct(context, constructor, SmiConstant(0));
// 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, 1, mapperFunction, T).
CallBuiltin(Builtins::kFlatMapIntoArray, context, a, o, source_length,
@@ -4208,7 +3520,7 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
- IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+ IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize;
int max_fast_elements =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
AllocationMemento::kSize) /
@@ -4219,8 +3531,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
BIND(&small_smi_size);
{
- Node* array = AllocateJSArray(
- elements_kind, array_map, array_size, array_size,
+ TNode<JSArray> array = AllocateJSArray(
+ elements_kind, CAST(array_map), array_size, CAST(array_size),
mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
CodeStubAssembler::SMI_PARAMETERS);
Return(array);
@@ -4242,8 +3554,8 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
Node* allocation_site =
track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
- Node* array_map = LoadJSArrayElementsMap(kind, native_context);
- Node* array = AllocateJSArray(
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<JSArray> array = AllocateJSArray(
kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
SmiConstant(0), allocation_site);
Return(array);
@@ -4299,31 +3611,6 @@ TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
maybe_allocation_site);
}
-void ArrayBuiltinsAssembler::GenerateInternalArrayNoArgumentConstructor(
- ElementsKind kind) {
- typedef ArrayNoArgumentConstructorDescriptor Descriptor;
- Node* array_map = LoadObjectField(Parameter(Descriptor::kFunction),
- JSFunction::kPrototypeOrInitialMapOffset);
- Node* array = AllocateJSArray(
- kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
- SmiConstant(0));
- Return(array);
-}
-
-void ArrayBuiltinsAssembler::GenerateInternalArraySingleArgumentConstructor(
- ElementsKind kind) {
- typedef ArraySingleArgumentConstructorDescriptor Descriptor;
- Node* context = Parameter(Descriptor::kContext);
- Node* function = Parameter(Descriptor::kFunction);
- Node* array_map =
- LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
- Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
- Node* allocation_site = UndefinedConstant();
-
- GenerateConstructor(context, function, array_map, array_size, allocation_site,
- kind, DONT_TRACK_ALLOCATION_SITE);
-}
-
#define GENERATE_ARRAY_CTOR(name, kind_camel, kind_caps, mode_camel, \
mode_caps) \
TF_BUILTIN(Array##name##Constructor_##kind_camel##_##mode_camel, \
@@ -4369,18 +3656,16 @@ GENERATE_ARRAY_CTOR(SingleArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
#undef GENERATE_ARRAY_CTOR
-#define GENERATE_INTERNAL_ARRAY_CTOR(name, kind_camel, kind_caps) \
- TF_BUILTIN(InternalArray##name##Constructor_##kind_camel, \
- ArrayBuiltinsAssembler) { \
- GenerateInternalArray##name##Constructor(kind_caps); \
- }
-
-GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS);
-GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS);
-GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS);
-GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS);
-
-#undef GENERATE_INTERNAL_ARRAY_CTOR
+TF_BUILTIN(InternalArrayNoArgumentConstructor_Packed, ArrayBuiltinsAssembler) {
+ typedef ArrayNoArgumentConstructorDescriptor Descriptor;
+ TNode<Map> array_map =
+ CAST(LoadObjectField(Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset));
+ TNode<JSArray> array = AllocateJSArray(
+ PACKED_ELEMENTS, array_map,
+ IntPtrConstant(JSArray::kPreallocatedArrayElements), SmiConstant(0));
+ Return(array);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index a73c072cee..86fc09f8b4 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -5,12 +5,12 @@
#ifndef V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
#define V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
-#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
-class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
+class ArrayBuiltinsAssembler : public CodeStubAssembler {
public:
explicit ArrayBuiltinsAssembler(compiler::CodeAssemblerState* state);
@@ -72,6 +72,23 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
void FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<Smi> smi_length);
+ TNode<String> CallJSArrayArrayJoinConcatToSequentialString(
+ TNode<FixedArray> fixed_array, TNode<IntPtrT> length, TNode<String> sep,
+ TNode<String> dest) {
+ TNode<ExternalReference> func = ExternalConstant(
+ ExternalReference::jsarray_array_join_concat_to_sequential_string());
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ return UncheckedCast<String>(
+ CallCFunction5(MachineType::AnyTagged(), // <return> String
+ MachineType::Pointer(), // Isolate*
+ MachineType::AnyTagged(), // FixedArray fixed_array
+ MachineType::IntPtr(), // intptr_t length
+ MachineType::AnyTagged(), // String sep
+ MachineType::AnyTagged(), // String dest
+ func, isolate_ptr, fixed_array, length, sep, dest));
+ }
+
protected:
TNode<Context> context() { return context_; }
TNode<Object> receiver() { return receiver_; }
@@ -142,9 +159,6 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
TNode<Object> new_target, TNode<Int32T> argc,
TNode<HeapObject> maybe_allocation_site);
- void GenerateInternalArrayNoArgumentConstructor(ElementsKind kind);
- void GenerateInternalArraySingleArgumentConstructor(ElementsKind kind);
-
private:
static ElementsKind ElementsKindForInstanceType(InstanceType type);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 1e9de3dbe3..9774f24fe0 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -5,7 +5,6 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/contexts.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -16,6 +15,7 @@
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/smi.h"
#include "src/prototype.h"
namespace v8 {
@@ -24,28 +24,27 @@ namespace internal {
namespace {
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
- JSArray* receiver) {
+ JSArray receiver) {
return JSObject::PrototypeHasNoElements(isolate, receiver);
}
-inline bool HasSimpleElements(JSObject* current) {
+inline bool HasSimpleElements(JSObject current) {
return !current->map()->IsCustomElementsReceiverMap() &&
!current->GetElementsAccessor()->HasAccessors(current);
}
-inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
- JSObject* receiver) {
+inline bool HasOnlySimpleReceiverElements(Isolate* isolate, JSObject receiver) {
// Check that we have no accessors on the receiver's elements.
if (!HasSimpleElements(receiver)) return false;
return JSObject::PrototypeHasNoElements(isolate, receiver);
}
-inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
+inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver receiver) {
DisallowHeapAllocation no_gc;
PrototypeIterator iter(isolate, receiver, kStartAtReceiver);
for (; !iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSProxy()) return false;
- JSObject* current = iter.GetCurrent<JSObject>();
+ JSObject current = iter.GetCurrent<JSObject>();
if (!HasSimpleElements(current)) return false;
}
return true;
@@ -70,7 +69,7 @@ void MatchArrayElementsKindToArguments(Isolate* isolate, Handle<JSArray> array,
DisallowHeapAllocation no_gc;
int last_arg_index = std::min(first_arg_index + num_arguments, args_length);
for (int i = first_arg_index; i < last_arg_index; i++) {
- Object* arg = (*args)[i];
+ Object arg = (*args)[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
target_kind = PACKED_DOUBLE_ELEMENTS;
@@ -180,10 +179,10 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLengthProperty(
isolate->factory()->NewNumber(length), LanguageMode::kStrict);
}
-V8_WARN_UNUSED_RESULT Object* GenericArrayFill(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> value,
- double start, double end) {
+V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> value,
+ double start, double end) {
// 7. Repeat, while k < final.
while (start < end) {
// a. Let Pk be ! ToString(k).
@@ -296,8 +295,8 @@ BUILTIN(ArrayPrototypeFill) {
}
namespace {
-V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
- BuiltinArguments* args) {
+V8_WARN_UNUSED_RESULT Object GenericArrayPush(Isolate* isolate,
+ BuiltinArguments* args) {
// 1. Let O be ? ToObject(this value).
Handle<JSReceiver> receiver;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -389,8 +388,8 @@ BUILTIN(ArrayPush) {
namespace {
-V8_WARN_UNUSED_RESULT Object* GenericArrayPop(Isolate* isolate,
- BuiltinArguments* args) {
+V8_WARN_UNUSED_RESULT Object GenericArrayPop(Isolate* isolate,
+ BuiltinArguments* args) {
// 1. Let O be ? ToObject(this value).
Handle<JSReceiver> receiver;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -409,7 +408,7 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPop(Isolate* isolate,
RETURN_FAILURE_ON_EXCEPTION(
isolate, Object::SetProperty(
isolate, receiver, isolate->factory()->length_string(),
- Handle<Smi>(Smi::kZero, isolate), LanguageMode::kStrict));
+ Handle<Smi>(Smi::zero(), isolate), LanguageMode::kStrict));
// b. Return undefined.
return ReadOnlyRoots(isolate).undefined_value();
@@ -425,8 +424,7 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPop(Isolate* isolate,
// c. Let element be ? Get(O, index).
Handle<Object> element;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element,
- JSReceiver::GetPropertyOrElement(isolate, receiver, index));
+ isolate, element, Object::GetPropertyOrElement(isolate, receiver, index));
// d. Perform ? DeletePropertyOrThrow(O, index).
MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, index,
@@ -491,9 +489,9 @@ V8_WARN_UNUSED_RESULT bool CanUseFastArrayShift(Isolate* isolate,
return !JSArray::HasReadOnlyLength(array);
}
-V8_WARN_UNUSED_RESULT Object* GenericArrayShift(Isolate* isolate,
- Handle<JSReceiver> receiver,
- double length) {
+V8_WARN_UNUSED_RESULT Object GenericArrayShift(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ double length) {
// 4. Let first be ? Get(O, "0").
Handle<Object> first;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first,
@@ -733,9 +731,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
RETURN_ON_EXCEPTION(
isolate_,
- JSReceiver::SetProperty(isolate_, result,
- isolate_->factory()->length_string(), length,
- LanguageMode::kStrict),
+ Object::SetProperty(isolate_, result,
+ isolate_->factory()->length_string(), length,
+ LanguageMode::kStrict),
JSReceiver);
return result;
}
@@ -772,7 +770,7 @@ class ArrayConcatVisitor {
inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
- inline void set_storage(FixedArray* storage) {
+ inline void set_storage(FixedArray storage) {
DCHECK(is_fixed_array());
DCHECK(has_simple_elements());
storage_ = isolate_->global_handles()->Create(storage);
@@ -818,7 +816,7 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
// a 32-bit signed integer.
DCHECK_GE(static_cast<int32_t>(FixedArray::kMaxLength), 0);
int fast_length = static_cast<int>(length);
- FixedArray* elements = FixedArray::cast(array->elements());
+ FixedArray elements = FixedArray::cast(array->elements());
for (int i = 0; i < fast_length; i++) {
if (!elements->get(i)->IsTheHole(isolate)) element_count++;
}
@@ -834,18 +832,18 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
DCHECK_EQ(FixedArray::cast(array->elements())->length(), 0);
break;
}
- FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements());
+ FixedDoubleArray elements = FixedDoubleArray::cast(array->elements());
for (int i = 0; i < fast_length; i++) {
if (!elements->is_the_hole(i)) element_count++;
}
break;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(array->elements());
+ NumberDictionary dictionary = NumberDictionary::cast(array->elements());
int capacity = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object* key = dictionary->KeyAt(i);
+ Object key = dictionary->KeyAt(i);
if (dictionary->IsKey(roots, key)) {
element_count++;
}
@@ -880,7 +878,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS: {
DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(object->elements());
+ FixedArray elements = FixedArray::cast(object->elements());
uint32_t length = static_cast<uint32_t>(elements->length());
if (range < length) length = range;
for (uint32_t i = 0; i < length; i++) {
@@ -909,11 +907,11 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
case DICTIONARY_ELEMENTS: {
DisallowHeapAllocation no_gc;
- NumberDictionary* dict = NumberDictionary::cast(object->elements());
+ NumberDictionary dict = NumberDictionary::cast(object->elements());
uint32_t capacity = dict->Capacity();
ReadOnlyRoots roots(isolate);
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
- Object* k = dict->KeyAt(j);
+ Object k = dict->KeyAt(j);
if (!dict->IsKey(roots, k)) continue;
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
@@ -928,8 +926,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
- uint32_t length = static_cast<uint32_t>(
- FixedArrayBase::cast(object->elements())->length());
+ uint32_t length = static_cast<uint32_t>(object->elements()->length());
if (range <= length) {
length = range;
// We will add all indices, so we might as well clear it first
@@ -945,8 +942,8 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements = object->elements();
- JSObject* raw_object = *object;
+ FixedArrayBase elements = object->elements();
+ JSObject raw_object = *object;
ElementsAccessor* accessor = object->GetElementsAccessor();
for (uint32_t i = 0; i < range; i++) {
if (accessor->HasElement(raw_object, i, elements)) {
@@ -1177,8 +1174,8 @@ static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
return Object::IsArray(obj);
}
-Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
- Isolate* isolate) {
+Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
+ Isolate* isolate) {
int argument_count = args->length();
bool is_array_species = *species == isolate->context()->array_function();
@@ -1193,7 +1190,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
uint32_t estimate_result_length = 0;
uint32_t estimate_nof = 0;
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
- Handle<Object> obj((*args)[i], isolate);
+ Handle<Object> obj = args->at(i);
uint32_t length_estimate;
uint32_t element_estimate;
if (obj->IsJSArray()) {
@@ -1242,7 +1239,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
Handle<FixedDoubleArray> double_storage =
Handle<FixedDoubleArray>::cast(storage);
for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj((*args)[i], isolate);
+ Handle<Object> obj = args->at(i);
if (obj->IsSmi()) {
double_storage->set(j, Smi::ToInt(*obj));
j++;
@@ -1251,14 +1248,14 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
j++;
} else {
DisallowHeapAllocation no_gc;
- JSArray* array = JSArray::cast(*obj);
+ JSArray array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
switch (array->GetElementsKind()) {
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
- FixedDoubleArray* elements =
+ FixedDoubleArray elements =
FixedDoubleArray::cast(array->elements());
for (uint32_t i = 0; i < length; i++) {
if (elements->is_the_hole(i)) {
@@ -1278,10 +1275,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
}
case HOLEY_SMI_ELEMENTS:
case PACKED_SMI_ELEMENTS: {
- Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- FixedArray* elements(FixedArray::cast(array->elements()));
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ FixedArray elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
- Object* element = elements->get(i);
+ Object element = elements->get(i);
if (element == the_hole) {
failure = true;
break;
@@ -1332,7 +1329,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
ArrayConcatVisitor visitor(isolate, storage, fast_case);
for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj((*args)[i], isolate);
+ Handle<Object> obj = args->at(i);
Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
MAYBE_RETURN(spreadable, ReadOnlyRoots(isolate).exception());
if (spreadable.FromJust()) {
@@ -1360,7 +1357,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
DisallowHeapAllocation no_gc;
- Map* map = obj->map();
+ Map map = obj->map();
// If there is only the 'length' property we are fine.
if (map->prototype() ==
isolate->native_context()->initial_array_prototype() &&
@@ -1390,7 +1387,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
// Iterate through all the arguments performing checks
// and calculating total length.
for (int i = 0; i < n_arguments; i++) {
- Object* arg = (*args)[i];
+ Object arg = (*args)[i];
if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
return MaybeHandle<JSArray>();
@@ -1430,7 +1427,7 @@ BUILTIN(ArrayConcat) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, receiver,
Object::ToObject(isolate, args.receiver(), "Array.prototype.concat"));
- args[0] = *receiver;
+ args.set_at(0, *receiver);
Handle<JSArray> result_array;
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index a4de98eb97..c4146a359a 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -23,13 +23,13 @@ namespace internal {
}
// -----------------------------------------------------------------------------
-// ES6 section 21.1 ArrayBuffer Objects
+// ES#sec-arraybuffer-objects
namespace {
-Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
- Handle<JSReceiver> new_target, Handle<Object> length,
- bool initialize) {
+Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
+ Handle<JSReceiver> new_target, Handle<Object> length,
+ bool initialize) {
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -37,6 +37,7 @@ Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
size_t byte_length;
if (!TryNumberToSize(*length, &byte_length) ||
byte_length > JSArrayBuffer::kMaxByteLength) {
+ JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer>::cast(result), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
@@ -117,12 +118,12 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
BUILTIN(ArrayBufferIsView) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- Object* arg = args[1];
+ Object arg = args[1];
return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
}
-static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
- const char* kMethodName, bool is_shared) {
+static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
+ const char* kMethodName, bool is_shared) {
HandleScope scope(isolate);
Handle<Object> start = args.at(1);
Handle<Object> end = args.atOrUndefined(isolate, 2);
@@ -136,7 +137,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
CHECK_SHARED(is_shared, array_buffer, kMethodName);
// * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (!is_shared && array_buffer->was_neutered()) {
+ if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDetachedOperation,
isolate->factory()->NewStringFromAsciiChecked(
@@ -222,7 +223,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
CHECK_SHARED(is_shared, new_array_buffer, kMethodName);
// * [AB] If IsDetachedBuffer(new) is true, throw a TypeError exception.
- if (!is_shared && new_array_buffer->was_neutered()) {
+ if (!is_shared && new_array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDetachedOperation,
isolate->factory()->NewStringFromAsciiChecked(
@@ -253,7 +254,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [AB] NOTE: Side-effects of the above steps may have detached O.
// * [AB] If IsDetachedBuffer(O) is true, throw a TypeError exception.
- if (!is_shared && array_buffer->was_neutered()) {
+ if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDetachedOperation,
isolate->factory()->NewStringFromAsciiChecked(
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 74d6077764..eb17d743a7 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -8,6 +8,7 @@
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/js-generator.h"
+#include "src/objects/js-promise.h"
namespace v8 {
namespace internal {
@@ -18,9 +19,8 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
: AsyncBuiltinsAssembler(state) {}
protected:
- void AsyncFunctionAwait(Node* const context, Node* const generator,
- Node* const awaited, Node* const outer_promise,
- const bool is_predicted_as_caught);
+ template <typename Descriptor>
+ void AsyncFunctionAwait(const bool is_predicted_as_caught);
void AsyncFunctionAwaitResumeClosure(
Node* const context, Node* const sent_value,
@@ -33,28 +33,27 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
- Node* const generator = LoadContextElement(context, Context::EXTENSION_INDEX);
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+ TNode<JSAsyncFunctionObject> async_function_object =
+ CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
// unnecessary runtime checks removed.
- // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
- // Ensure that the generator is neither closed nor running.
+ // Ensure that the {async_function_object} is neither closed nor running.
CSA_SLOW_ASSERT(
- this,
- SmiGreaterThan(CAST(LoadObjectField(
- generator, JSGeneratorObject::kContinuationOffset)),
- SmiConstant(JSGeneratorObject::kGeneratorClosed)));
+ this, SmiGreaterThan(
+ LoadObjectField<Smi>(async_function_object,
+ JSGeneratorObject::kContinuationOffset),
+ SmiConstant(JSGeneratorObject::kGeneratorClosed)));
- // Remember the {resume_mode} for the {generator}.
- StoreObjectFieldNoWriteBarrier(generator,
+ // Remember the {resume_mode} for the {async_function_object}.
+ StoreObjectFieldNoWriteBarrier(async_function_object,
JSGeneratorObject::kResumeModeOffset,
SmiConstant(resume_mode));
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator);
+ CallStub(callable, context, sent_value, async_function_object);
// The resulting Promise is a throwaway, so it doesn't matter what it
// resolves to. What is important is that we don't end up keeping the
@@ -62,6 +61,166 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// of ResumeGenerator, as that would create a memory leak.
}
+TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
+ TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ // Compute the number of registers and parameters.
+ TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Uint16()));
+ TNode<BytecodeArray> bytecode_array =
+ LoadSharedFunctionInfoBytecodeArray(shared);
+ TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField(
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
+ TNode<IntPtrT> parameters_and_register_length =
+ Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
+ formal_parameter_count));
+
+ // Allocate space for the promise, the async function object
+ // and the register file.
+ TNode<IntPtrT> size = IntPtrAdd(
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
+ JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize),
+ Signed(WordShl(parameters_and_register_length,
+ IntPtrConstant(kTaggedSizeLog2))));
+ TNode<HeapObject> base = AllocateInNewSpace(size);
+
+ // Initialize the register file.
+ TNode<FixedArray> parameters_and_registers = UncheckedCast<FixedArray>(
+ InnerAllocate(base, JSAsyncFunctionObject::kSize +
+ JSPromise::kSizeWithEmbedderFields));
+ StoreMapNoWriteBarrier(parameters_and_registers, RootIndex::kFixedArrayMap);
+ StoreObjectFieldNoWriteBarrier(parameters_and_registers,
+ FixedArray::kLengthOffset,
+ SmiFromIntPtr(parameters_and_register_length));
+ FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
+ IntPtrConstant(0), parameters_and_register_length,
+ RootIndex::kUndefinedValue);
+
+ // Initialize the promise.
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> promise_function =
+ CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
+ TNode<Map> promise_map = LoadObjectField<Map>(
+ promise_function, JSFunction::kPrototypeOrInitialMapOffset);
+ TNode<JSPromise> promise = UncheckedCast<JSPromise>(
+ InnerAllocate(base, JSAsyncFunctionObject::kSize));
+ StoreMapNoWriteBarrier(promise, promise_map);
+ StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
+ RootIndex::kEmptyFixedArray);
+ StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
+ RootIndex::kEmptyFixedArray);
+ PromiseInit(promise);
+
+ // Initialize the async function object.
+ TNode<Map> async_function_object_map = CAST(LoadContextElement(
+ native_context, Context::ASYNC_FUNCTION_OBJECT_MAP_INDEX));
+ TNode<JSAsyncFunctionObject> async_function_object =
+ UncheckedCast<JSAsyncFunctionObject>(base);
+ StoreMapNoWriteBarrier(async_function_object, async_function_object_map);
+ StoreObjectFieldRoot(async_function_object,
+ JSAsyncFunctionObject::kPropertiesOrHashOffset,
+ RootIndex::kEmptyFixedArray);
+ StoreObjectFieldRoot(async_function_object,
+ JSAsyncFunctionObject::kElementsOffset,
+ RootIndex::kEmptyFixedArray);
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object, JSAsyncFunctionObject::kFunctionOffset, closure);
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object, JSAsyncFunctionObject::kContextOffset, context);
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object, JSAsyncFunctionObject::kReceiverOffset, receiver);
+ StoreObjectFieldNoWriteBarrier(async_function_object,
+ JSAsyncFunctionObject::kInputOrDebugPosOffset,
+ SmiConstant(0));
+ StoreObjectFieldNoWriteBarrier(async_function_object,
+ JSAsyncFunctionObject::kResumeModeOffset,
+ SmiConstant(JSAsyncFunctionObject::kNext));
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object, JSAsyncFunctionObject::kContinuationOffset,
+ SmiConstant(JSAsyncFunctionObject::kGeneratorExecuting));
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object,
+ JSAsyncFunctionObject::kParametersAndRegistersOffset,
+ parameters_and_registers);
+ StoreObjectFieldNoWriteBarrier(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset, promise);
+
+ // Fire promise hooks if enabled and push the Promise under construction
+ // in an async function on the catch prediction stack to handle exceptions
+ // thrown before the first await.
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_instrumentation, &if_instrumentation_done);
+ BIND(&if_instrumentation);
+ {
+ CallRuntime(Runtime::kDebugAsyncFunctionEntered, context, promise);
+ Goto(&if_instrumentation_done);
+ }
+ BIND(&if_instrumentation_done);
+
+ Return(async_function_object);
+}
+
+TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
+ TNode<JSAsyncFunctionObject> async_function_object =
+ CAST(Parameter(Descriptor::kAsyncFunctionObject));
+ TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
+ TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset);
+
+ // Reject the {promise} for the given {reason}, disabling the
+ // additional debug event for the rejection since a debug event
+ // already happend for the exception that got us here.
+ CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ FalseConstant());
+
+ Label if_debugging(this, Label::kDeferred);
+ GotoIf(HasAsyncEventDelegate(), &if_debugging);
+ GotoIf(IsDebugActive(), &if_debugging);
+ Return(promise);
+
+ BIND(&if_debugging);
+ TailCallRuntime(Runtime::kDebugAsyncFunctionFinished, context, can_suspend,
+ promise);
+}
+
+TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
+ TNode<JSAsyncFunctionObject> async_function_object =
+ CAST(Parameter(Descriptor::kAsyncFunctionObject));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset);
+
+ CallBuiltin(Builtins::kResolvePromise, context, promise, value);
+
+ Label if_debugging(this, Label::kDeferred);
+ GotoIf(HasAsyncEventDelegate(), &if_debugging);
+ GotoIf(IsDebugActive(), &if_debugging);
+ Return(promise);
+
+ BIND(&if_debugging);
+ TailCallRuntime(Runtime::kDebugAsyncFunctionFinished, context, can_suspend,
+ promise);
+}
+
+// AsyncFunctionReject and AsyncFunctionResolve are both required to return
+// the promise instead of the result of RejectPromise or ResolvePromise
+// respectively from a lazy deoptimization.
+TF_BUILTIN(AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) {
+ TNode<JSPromise> promise = CAST(Parameter(Descriptor::kPromise));
+ Return(promise);
+}
+
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
Node* const sentError = Parameter(Descriptor::kSentError);
@@ -84,31 +243,28 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
// ES#abstract-ops-async-function-await
// AsyncFunctionAwait ( value )
// Shared logic for the core of await. The parser desugars
-// await awaited
+// await value
// into
-// yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
-// The 'awaited' parameter is the value; the generator stands in
-// for the asyncContext, and .promise is the larger promise under
-// construction by the enclosing async function.
+// yield AsyncFunctionAwait{Caught,Uncaught}(.generator_object, value)
+// The 'value' parameter is the value; the .generator_object stands in
+// for the asyncContext.
+template <typename Descriptor>
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
- Node* const context, Node* const generator, Node* const awaited,
- Node* const outer_promise, const bool is_predicted_as_caught) {
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
- // the awaited promise if it is already a promise. Reuse is non-spec compliant
- // but part of our old behavior gives us a couple of percent
- // performance boost.
- // TODO(jgruber): Use a faster specialized version of
- // InternalPerformPromiseThen.
+ const bool is_predicted_as_caught) {
+ TNode<JSAsyncFunctionObject> async_function_object =
+ CAST(Parameter(Descriptor::kAsyncFunctionObject));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Node* outer_promise = LoadObjectField(async_function_object,
+ JSAsyncFunctionObject::kPromiseOffset);
Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
Goto(&after_debug_hook);
BIND(&after_debug_hook);
- Await(context, generator, awaited, outer_promise,
+ Await(context, async_function_object, value, outer_promise,
Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
is_predicted_as_caught);
@@ -125,75 +281,15 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
- Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
- Node* const context = Parameter(Descriptor::kContext);
-
static const bool kIsPredictedAsCaught = true;
-
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
- kIsPredictedAsCaught);
+ AsyncFunctionAwait<Descriptor>(kIsPredictedAsCaught);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
- Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
- Node* const context = Parameter(Descriptor::kContext);
-
static const bool kIsPredictedAsCaught = false;
-
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
- kIsPredictedAsCaught);
-}
-
-TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 0);
- Node* const context = Parameter(Descriptor::kContext);
-
- Node* const promise = AllocateAndInitJSPromise(context);
-
- Label if_is_debug_active(this, Label::kDeferred);
- GotoIf(IsDebugActive(), &if_is_debug_active);
-
- // Early exit if debug is not active.
- Return(promise);
-
- BIND(&if_is_debug_active);
- {
- // Push the Promise under construction in an async function on
- // the catch prediction stack to handle exceptions thrown before
- // the first await.
- CallRuntime(Runtime::kDebugPushPromise, context, promise);
- Return(promise);
- }
-}
-
-TF_BUILTIN(AsyncFunctionPromiseRelease, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const context = Parameter(Descriptor::kContext);
-
- Label call_debug_instrumentation(this, Label::kDeferred);
- GotoIf(HasAsyncEventDelegate(), &call_debug_instrumentation);
- GotoIf(IsDebugActive(), &call_debug_instrumentation);
-
- // Early exit if debug is not active.
- Return(UndefinedConstant());
-
- BIND(&call_debug_instrumentation);
- {
- // Pop the Promise under construction in an async function on
- // from catch prediction stack.
- CallRuntime(Runtime::kDebugAsyncFunctionFinished, context,
- Parameter(Descriptor::kCanSuspend), promise);
- Return(promise);
- }
+ AsyncFunctionAwait<Descriptor>(kIsPredictedAsCaught);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index dda781b1a8..7ba72844e8 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -32,17 +32,15 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
static const int kWrappedPromiseOffset =
FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
- static const int kThrowawayPromiseOffset =
- kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kResolveClosureOffset =
- kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
- Node* const base = AllocateInNewSpace(kTotalSize);
- Node* const closure_context = base;
+ TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
+ TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
@@ -70,118 +68,72 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator,
// JSPromise::kSizeWithEmbedderFields.
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
+ kTaggedSize)));
+ TNode<HeapObject> wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
{
// Initialize Promise
StoreMapNoWriteBarrier(wrapped_value, promise_map);
- InitializeJSObjectFromMap(
- wrapped_value, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
+ StoreObjectFieldRoot(wrapped_value, JSPromise::kPropertiesOrHashOffset,
+ RootIndex::kEmptyFixedArray);
+ StoreObjectFieldRoot(wrapped_value, JSPromise::kElementsOffset,
+ RootIndex::kEmptyFixedArray);
PromiseInit(wrapped_value);
}
- Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
- {
- // Initialize throwawayPromise
- StoreMapNoWriteBarrier(throwaway, promise_map);
- InitializeJSObjectFromMap(
- throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(throwaway);
- }
-
- Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
- {
- // Initialize resolve handler
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
- }
-
- Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
- {
- // Initialize reject handler
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
- }
-
- {
- // Add PromiseHooks if needed
- Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &next);
- CallRuntime(Runtime::kAwaitPromisesInit, context, wrapped_value,
- outer_promise, throwaway);
- Goto(&next);
- BIND(&next);
- }
+ // Initialize resolve handler
+ TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
+ InitializeNativeClosure(closure_context, native_context, on_resolve,
+ on_resolve_context_index);
+
+ // Initialize reject handler
+ TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
+ InitializeNativeClosure(closure_context, native_context, on_reject,
+ on_reject_context_index);
+
+ VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer,
+ UndefinedConstant());
+
+ // Deal with PromiseHooks and debug support in the runtime. This
+ // also allocates the throwaway promise, which is only needed in
+ // case of PromiseHooks or debugging.
+ Label if_debugging(this, Label::kDeferred), do_resolve_promise(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_debugging, &do_resolve_promise);
+ BIND(&if_debugging);
+ var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value,
+ wrapped_value, outer_promise, on_reject,
+ is_predicted_as_caught));
+ Goto(&do_resolve_promise);
+ BIND(&do_resolve_promise);
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
CallBuiltin(Builtins::kResolvePromise, context, wrapped_value, value);
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway);
-
- Label do_perform_promise_then(this);
- GotoIfNot(IsDebugActive(), &do_perform_promise_then);
- {
- Label common(this);
- GotoIf(TaggedIsSmi(value), &common);
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
- {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- Node* const key =
- HeapConstant(factory()->promise_forwarding_handler_symbol());
- SetPropertyStrict(CAST(context), CAST(on_reject), CAST(key),
- TrueConstant());
-
- GotoIf(IsFalse(is_predicted_as_caught), &common);
- PromiseSetHandledHint(value);
- }
-
- Goto(&common);
- BIND(&common);
- // Mark the dependency to outer Promise in case the throwaway Promise is
- // found on the Promise stack
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- SetPropertyStrict(CAST(context), CAST(throwaway), CAST(key),
- CAST(outer_promise));
- }
-
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
return CallBuiltin(Builtins::kPerformPromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway);
+ on_resolve, on_reject, var_throwaway.value());
}
-Node* AsyncBuiltinsAssembler::AwaitOptimized(
- Node* context, Node* generator, Node* value, Node* outer_promise,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
+Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator,
+ Node* promise, Node* outer_promise,
+ Node* on_resolve_context_index,
+ Node* on_reject_context_index,
+ Node* is_predicted_as_caught) {
Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- CSA_ASSERT(this, IsConstructor(promise_fun));
+ CSA_ASSERT(this, IsJSPromise(promise));
- static const int kThrowawayPromiseOffset =
- FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
static const int kResolveClosureOffset =
- kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS);
static const int kRejectClosureOffset =
kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
static const int kTotalSize =
kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
// 2. Let promise be ? PromiseResolve(« promise »).
- Node* const promise =
- CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value);
+ // Node* const promise =
+ // CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value);
- Node* const base = AllocateInNewSpace(kTotalSize);
- Node* const closure_context = base;
+ TNode<HeapObject> base = AllocateInNewSpace(kTotalSize);
+ TNode<Context> closure_context = UncheckedCast<Context>(base);
{
// Initialize the await context, storing the {generator} as extension.
StoreMapNoWriteBarrier(closure_context, RootIndex::kAwaitContextMap);
@@ -199,84 +151,34 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
closure_context, Context::NATIVE_CONTEXT_INDEX, native_context);
}
- Node* const promise_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
- {
- // Initialize throwawayPromise
- StoreMapNoWriteBarrier(throwaway, promise_map);
- InitializeJSObjectFromMap(
- throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(throwaway);
- }
-
- Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
- {
- // Initialize resolve handler
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
- }
-
- Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
- {
- // Initialize reject handler
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
- }
-
- {
- // Add PromiseHooks if needed
- Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &next);
- CallRuntime(Runtime::kAwaitPromisesInit, context, promise, outer_promise,
- throwaway);
- Goto(&next);
- BIND(&next);
- }
-
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway);
-
- Label do_perform_promise_then(this);
- GotoIfNot(IsDebugActive(), &do_perform_promise_then);
- {
- Label common(this);
- GotoIf(TaggedIsSmi(value), &common);
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
- {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- Node* const key =
- HeapConstant(factory()->promise_forwarding_handler_symbol());
- SetPropertyStrict(CAST(context), CAST(on_reject), CAST(key),
- TrueConstant());
-
- GotoIf(IsFalse(is_predicted_as_caught), &common);
- PromiseSetHandledHint(value);
- }
-
- Goto(&common);
- BIND(&common);
- // Mark the dependency to outer Promise in case the throwaway Promise is
- // found on the Promise stack
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- SetPropertyStrict(CAST(context), CAST(throwaway), CAST(key),
- CAST(outer_promise));
- }
-
+ // Initialize resolve handler
+ TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
+ InitializeNativeClosure(closure_context, native_context, on_resolve,
+ on_resolve_context_index);
+
+ // Initialize reject handler
+ TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
+ InitializeNativeClosure(closure_context, native_context, on_reject,
+ on_reject_context_index);
+
+ VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer,
+ UndefinedConstant());
+
+ // Deal with PromiseHooks and debug support in the runtime. This
+ // also allocates the throwaway promise, which is only needed in
+ // case of PromiseHooks or debugging.
+ Label if_debugging(this, Label::kDeferred), do_perform_promise_then(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_debugging, &do_perform_promise_then);
+ BIND(&if_debugging);
+ var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInit, context, promise,
+ promise, outer_promise, on_reject,
+ is_predicted_as_caught));
Goto(&do_perform_promise_then);
BIND(&do_perform_promise_then);
+
return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
- on_resolve, on_reject, throwaway);
+ on_resolve, on_reject, var_throwaway.value());
}
Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
@@ -285,16 +187,47 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
Node* on_reject_context_index,
Node* is_predicted_as_caught) {
VARIABLE(result, MachineRepresentation::kTagged);
- Label if_old(this), if_new(this), done(this);
+ Label if_old(this), if_new(this), done(this),
+ if_slow_constructor(this, Label::kDeferred);
STATIC_ASSERT(sizeof(FLAG_harmony_await_optimization) == 1);
-
TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
MachineType::Uint8(),
ExternalConstant(
ExternalReference::address_of_harmony_await_optimization_flag())));
-
- Branch(Word32Equal(flag_value, Int32Constant(0)), &if_old, &if_new);
+ GotoIf(Word32Equal(flag_value, Int32Constant(0)), &if_old);
+
+ // We're running with --harmony-await-optimization enabled, which means
+ // we do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
+ // create wrapper promises. Now if {value} is already a promise with the
+ // intrinsics %Promise% constructor as its "constructor", we don't need
+ // to allocate the wrapper promise and can just use the `AwaitOptimized`
+ // logic.
+ GotoIf(TaggedIsSmi(value), &if_old);
+ Node* const value_map = LoadMap(value);
+ GotoIfNot(IsJSPromiseMap(value_map), &if_old);
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+ &if_slow_constructor);
+ Branch(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor, &if_new);
+
+ // At this point, {value} doesn't have the initial promise prototype or
+ // the promise @@species protector was invalidated, but {value} could still
+ // have the %Promise% as its "constructor", so we need to check that as well.
+ BIND(&if_slow_constructor);
+ {
+ Node* const value_constructor =
+ GetProperty(context, value, isolate()->factory()->constructor_string());
+ Node* const promise_function =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Branch(WordEqual(value_constructor, promise_function), &if_new, &if_old);
+ }
BIND(&if_old);
result.Bind(AwaitOld(context, generator, value, outer_promise,
@@ -316,14 +249,14 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
Node* native_context,
Node* function,
Node* context_index) {
- Node* const function_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ TNode<Map> function_map = CAST(LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
// JSFunction.
CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
- kPointerSize)));
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ kTaggedSize)));
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreMapNoWriteBarrier(function, function_map);
StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -332,13 +265,20 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset,
RootIndex::kManyClosuresCell);
- Node* shared_info = LoadContextElement(native_context, context_index);
- CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
+ TNode<SharedFunctionInfo> shared_info =
+ CAST(LoadContextElement(native_context, context_index));
StoreObjectFieldNoWriteBarrier(
function, JSFunction::kSharedFunctionInfoOffset, shared_info);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
- Node* const code = GetSharedFunctionInfoCode(shared_info);
+ // For the native closures that are initialized here (for `await`)
+ // we know that their SharedFunctionInfo::function_data() slot
+ // contains a builtin index (as Smi), so there's no need to use
+ // CodeStubAssembler::GetSharedFunctionInfoCode() helper here,
+ // which almost doubles the size of `await` builtins (unnecessarily).
+ TNode<Smi> builtin_id = LoadObjectField<Smi>(
+ shared_info, SharedFunctionInfo::kFunctionDataOffset);
+ TNode<Code> code = LoadBuiltin(builtin_id);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index bff2de486a..de19d24bac 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -9,6 +9,7 @@
#include "src/code-stub-assembler.h"
#include "src/frames-inl.h"
#include "src/objects/js-generator.h"
+#include "src/objects/js-promise.h"
namespace v8 {
namespace internal {
@@ -237,24 +238,22 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(Descriptor::kGenerator);
- Node* value = Parameter(Descriptor::kAwaited);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<JSAsyncGeneratorObject> async_generator_object =
+ CAST(Parameter(Descriptor::kAsyncGeneratorObject));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
-
- Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
- CSA_ASSERT(this, IsNotUndefined(request));
-
- Node* outer_promise =
- LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
+ TNode<AsyncGeneratorRequest> request =
+ CAST(LoadFirstAsyncGeneratorRequestFromQueue(async_generator_object));
+ TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
+ request, AsyncGeneratorRequest::kPromiseOffset);
const int resolve_index = Context::ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN;
const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
- SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise, resolve_index, reject_index,
- is_catchable);
+ SetGeneratorAwaiting(async_generator_object);
+ Await(context, async_generator_object, value, outer_promise, resolve_index,
+ reject_index, is_catchable);
Return(UndefinedConstant());
}
@@ -492,9 +491,11 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
- // If this assertion fails, the `value` component was not Awaited as it should
- // have been, per https://github.com/tc39/proposal-async-iteration/pull/102/.
- CSA_SLOW_ASSERT(this, TaggedDoesntHaveInstanceType(value, JS_PROMISE_TYPE));
+ // This operation should be called only when the `value` parameter has been
+ // Await-ed. Typically, this means `value` is not a JSPromise value. However,
+ // it may be a JSPromise value whose "then" method has been overridden to a
+ // non-callable value. This can't be checked with assertions due to being
+ // observable, but keep it in mind.
Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
new file mode 100644
index 0000000000..9b21320086
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// https://tc39.github.io/proposal-bigint/#sec-to-big-int64
+TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<BigInt> bigint = ToBigInt(context, value);
+
+ TVARIABLE(UintPtrT, var_low);
+ TVARIABLE(UintPtrT, var_high);
+
+ // 2. Let int64bit be n modulo 2^64.
+ // 3. If int64bit ≥ 2^63, return int64bit - 2^64;
+ BigIntToRawBytes(bigint, &var_low, &var_high);
+ ReturnRaw(var_low.value());
+}
+
+// https://tc39.github.io/proposal-bigint/#sec-bigint-constructor-number-value
+TF_BUILTIN(I64ToBigInt, CodeStubAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<IntPtrT> argument =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+
+ Return(BigIntFromInt64(argument));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 2800441ffc..6bf8cd0fd3 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -80,7 +80,7 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
if (value->IsJSValue()) {
// 2a. Assert: value.[[BigIntData]] is a BigInt value.
// 2b. Return value.[[BigIntData]].
- Object* data = JSValue::cast(*value)->value();
+ Object data = JSValue::cast(*value)->value();
if (data->IsBigInt()) return handle(BigInt::cast(data), isolate);
}
// 3. Throw a TypeError exception.
@@ -92,8 +92,8 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
BigInt);
}
-Object* BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
- Isolate* isolate, const char* builtin_name) {
+Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
+ Isolate* isolate, const char* builtin_name) {
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc
index aa3dc1f344..8f723d09cf 100644
--- a/deps/v8/src/builtins/builtins-boolean-gen.cc
+++ b/deps/v8/src/builtins/builtins-boolean-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
+#include "src/objects/oddball.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index e23f13d2b3..24935bfbf4 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -7,85 +7,55 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/globals.h"
-#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects/arguments.h"
+#include "src/objects/property-cell.h"
namespace v8 {
namespace internal {
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallBoundFunctionImpl(masm);
}
void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_Call(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructForwardVarargs(masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->CallFunction());
@@ -224,6 +194,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
TNode<Int32T> length = var_length.value();
{
Label normalize_done(this);
+ CSA_ASSERT(this, Int32LessThanOrEqual(
+ length, Int32Constant(FixedArray::kMaxLength)));
GotoIfNot(Word32Equal(length, Int32Constant(0)), &normalize_done);
// Make sure we don't accidentally pass along the
// empty_fixed_double_array since the tailed-called stubs cannot handle
@@ -267,41 +239,28 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
TNode<Object> target, SloppyTNode<Object> new_target,
TNode<FixedDoubleArray> elements, TNode<Int32T> length,
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
- Label if_done(this);
-
const ElementsKind new_kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ CSA_ASSERT(this, Int32LessThanOrEqual(length,
+ Int32Constant(FixedArray::kMaxLength)));
TNode<IntPtrT> intptr_length = ChangeInt32ToIntPtr(length);
CSA_ASSERT(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
// Allocate a new FixedArray of Objects.
TNode<FixedArray> new_elements = CAST(AllocateFixedArray(
new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation));
- Branch(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
- [&] {
- // Fill the FixedArray with pointers to HeapObjects.
- CopyFixedArrayElements(HOLEY_DOUBLE_ELEMENTS, elements, new_kind,
- new_elements, intptr_length, intptr_length,
- barrier_mode);
- Goto(&if_done);
- },
- [&] {
- CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
- new_elements, intptr_length, intptr_length,
- barrier_mode);
- Goto(&if_done);
- });
-
- BIND(&if_done);
- {
- if (new_target == nullptr) {
- Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, length, new_elements);
- } else {
- Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
- new_elements);
- }
+ // CopyFixedArrayElements does not distinguish between holey and packed for
+ // its first argument, so we don't need to dispatch on {kind} here.
+ CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, length, new_elements);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count, length,
+ new_elements);
}
}
@@ -374,6 +333,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
{
TNode<FixedArrayBase> elements = var_elements.value();
TNode<Int32T> length = var_length.value();
+ CSA_ASSERT(this, Int32LessThanOrEqual(
+ length, Int32Constant(FixedArray::kMaxLength)));
if (new_target == nullptr) {
Callable callable = CodeFactory::CallVarargs(isolate());
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index b580bb181f..7f7699927c 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -24,7 +24,7 @@ namespace internal {
namespace {
-Object* PositiveNumberOrNull(int value, Isolate* isolate) {
+Object PositiveNumberOrNull(int value, Isolate* isolate) {
if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
return ReadOnlyRoots(isolate).null_value();
}
@@ -110,6 +110,14 @@ BUILTIN(CallSitePrototypeGetPosition) {
return Smi::FromInt(it.Frame()->GetPosition());
}
+BUILTIN(CallSitePrototypeGetPromiseIndex) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getPromiseIndex");
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return PositiveNumberOrNull(it.Frame()->GetPromiseIndex(), isolate);
+}
+
BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
@@ -169,6 +177,14 @@ BUILTIN(CallSitePrototypeIsNative) {
return isolate->heap()->ToBoolean(it.Frame()->IsNative());
}
+BUILTIN(CallSitePrototypeIsPromiseAll) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isPromiseAll");
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsPromiseAll());
+}
+
BUILTIN(CallSitePrototypeIsToplevel) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isToplevel");
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 1ff64b0877..d782f241b0 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-collections-gen.h"
+
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
@@ -9,6 +11,8 @@
#include "src/heap/factory-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "torque-generated/builtins-collections-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -19,10 +23,11 @@ using TNode = compiler::TNode<T>;
template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>;
-class BaseCollectionsAssembler : public CodeStubAssembler {
+class BaseCollectionsAssembler : public CodeStubAssembler,
+ public CollectionsBuiltinsFromDSLAssembler {
public:
explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : CodeStubAssembler(state), CollectionsBuiltinsFromDSLAssembler(state) {}
virtual ~BaseCollectionsAssembler() = default;
@@ -101,6 +106,16 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<JSFunction> GetInitialAddFunction(Variant variant,
TNode<Context> native_context);
+ // Checks whether {collection}'s initial add/set function has been modified
+ // (depending on {variant}, loaded from {native_context}).
+ void GotoIfInitialAddFunctionModified(Variant variant,
+ TNode<Context> native_context,
+ TNode<Object> collection,
+ Label* if_modified);
+
+ // Gets root index for the name of the add/set function.
+ RootIndex GetAddFunctionNameIndex(Variant variant);
+
// Retrieves the offset to access the backing table from the collection.
int GetTableOffset(Variant variant);
@@ -117,6 +132,10 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<Context> native_context,
TNode<Object> collection);
+ // Gets the initial prototype map for given collection {variant}.
+ TNode<Map> GetInitialCollectionPrototype(Variant variant,
+ TNode<Context> native_context);
+
// Loads an element from a fixed array. If the element is the hole, returns
// `undefined`.
TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<FixedArray> elements,
@@ -126,14 +145,6 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// returns `undefined`.
TNode<Object> LoadAndNormalizeFixedDoubleArrayElement(
TNode<HeapObject> elements, TNode<IntPtrT> index);
-
- // Loads key and value variables with the first and second elements of an
- // array. If the array lacks 2 elements, undefined is used.
- void LoadKeyValue(TNode<Context> context, TNode<Object> maybe_array,
- TVariable<Object>* key, TVariable<Object>* value,
- Label* if_may_have_side_effects = nullptr,
- Label* if_exception = nullptr,
- TVariable<Object>* var_exception = nullptr);
};
void BaseCollectionsAssembler::AddConstructorEntry(
@@ -141,22 +152,23 @@ void BaseCollectionsAssembler::AddConstructorEntry(
TNode<Object> add_function, TNode<Object> key_value,
Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
+ compiler::CodeAssemblerScopedExceptionHandler handler(this, if_exception,
+ var_exception);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
- TVARIABLE(Object, key);
- TVARIABLE(Object, value);
- LoadKeyValue(context, key_value, &key, &value, if_may_have_side_effects,
- if_exception, var_exception);
- Node* key_n = key.value();
- Node* value_n = value.value();
- Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
- collection, key_n, value_n);
- GotoIfException(ret, if_exception, var_exception);
+ BaseBuiltinsFromDSLAssembler::KeyValuePair pair =
+ if_may_have_side_effects != nullptr
+ ? LoadKeyValuePairNoSideEffects(context, key_value,
+ if_may_have_side_effects)
+ : LoadKeyValuePair(context, key_value);
+ Node* key_n = pair.key;
+ Node* value_n = pair.value;
+ CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
+ key_n, value_n);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
- Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
- collection, key_value);
- GotoIfException(ret, if_exception, var_exception);
+ CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
+ key_value);
}
}
@@ -164,7 +176,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
Variant variant, TNode<Context> context, TNode<Context> native_context,
TNode<Object> collection, TNode<Object> initial_entries) {
TVARIABLE(BoolT, use_fast_loop,
- IsFastJSArrayWithNoCustomIteration(initial_entries, context));
+ IsFastJSArrayWithNoCustomIteration(context, initial_entries));
TNode<IntPtrT> at_least_space_for =
EstimatedInitialSize(initial_entries, use_fast_loop.value());
Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this),
@@ -175,9 +187,8 @@ void BaseCollectionsAssembler::AddConstructorEntries(
TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
StoreObjectField(collection, GetTableOffset(variant), table);
GotoIf(IsNullOrUndefined(initial_entries), &exit);
- GotoIfNot(
- HasInitialCollectionPrototype(variant, native_context, collection),
- &slow_loop);
+ GotoIfInitialAddFunctionModified(variant, native_context, collection,
+ &slow_loop);
Branch(use_fast_loop.value(), &fast_loop, &slow_loop);
}
BIND(&fast_loop);
@@ -185,8 +196,8 @@ void BaseCollectionsAssembler::AddConstructorEntries(
TNode<JSArray> initial_entries_jsarray =
UncheckedCast<JSArray>(initial_entries);
#if DEBUG
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(initial_entries_jsarray,
- context));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ context, initial_entries_jsarray));
TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
#endif
@@ -199,8 +210,16 @@ void BaseCollectionsAssembler::AddConstructorEntries(
if (variant == kMap || variant == kWeakMap) {
BIND(&if_may_have_side_effects);
#if DEBUG
- CSA_ASSERT(this, HasInitialCollectionPrototype(variant, native_context,
- collection));
+ {
+ // Check that add/set function has not been modified.
+ Label if_not_modified(this), if_modified(this);
+ GotoIfInitialAddFunctionModified(variant, native_context, collection,
+ &if_modified);
+ Goto(&if_not_modified);
+ BIND(&if_modified);
+ Unreachable();
+ BIND(&if_not_modified);
+ }
CSA_ASSERT(this, WordEqual(original_initial_entries_map,
LoadMap(initial_entries_jsarray)));
#endif
@@ -227,7 +246,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
CSA_ASSERT(
this,
WordEqual(GetAddFunction(variant, native_context, collection), add_func));
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
CSA_ASSERT(
@@ -297,7 +316,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
- IteratorRecord iterator = iterator_assembler.GetIterator(context, iterable);
+ IteratorBuiltinsAssembler::IteratorRecord iterator =
+ iterator_assembler.GetIterator(context, iterable);
CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
@@ -308,8 +328,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
Goto(&loop);
BIND(&loop);
{
- TNode<Object> next = CAST(iterator_assembler.IteratorStep(
- context, iterator, &exit, fast_iterator_result_map));
+ TNode<Object> next = iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map);
TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
context, next, fast_iterator_result_map));
AddConstructorEntry(variant, context, collection, add_func, next_value,
@@ -319,11 +339,35 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
BIND(&if_exception);
{
iterator_assembler.IteratorCloseOnException(context, iterator,
- &var_exception);
+ var_exception.value());
}
BIND(&exit);
}
+RootIndex BaseCollectionsAssembler::GetAddFunctionNameIndex(Variant variant) {
+ switch (variant) {
+ case kMap:
+ case kWeakMap:
+ return RootIndex::kset_string;
+ case kSet:
+ case kWeakSet:
+ return RootIndex::kadd_string;
+ }
+ UNREACHABLE();
+}
+
+void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified(
+ Variant variant, TNode<Context> native_context, TNode<Object> collection,
+ Label* if_modified) {
+ STATIC_ASSERT(JSCollection::kAddFunctionDescriptorIndex ==
+ JSWeakCollection::kAddFunctionDescriptorIndex);
+ GotoIfInitialPrototypePropertyModified(
+ LoadMap(CAST(collection)),
+ GetInitialCollectionPrototype(variant, native_context),
+ JSCollection::kAddFunctionDescriptorIndex,
+ GetAddFunctionNameIndex(variant), if_modified);
+}
+
TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
TNode<Context> context, TNode<JSFunction> constructor,
TNode<Object> new_target) {
@@ -463,8 +507,8 @@ void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
GotoIfNot(IsJSReceiver(obj), if_not_receiver);
}
-TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
- Variant variant, TNode<Context> native_context, TNode<Object> collection) {
+TNode<Map> BaseCollectionsAssembler::GetInitialCollectionPrototype(
+ Variant variant, TNode<Context> native_context) {
int initial_prototype_index;
switch (variant) {
case kMap:
@@ -480,12 +524,16 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
initial_prototype_index = Context::INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX;
break;
}
- TNode<Map> initial_prototype_map =
- CAST(LoadContextElement(native_context, initial_prototype_index));
+ return CAST(LoadContextElement(native_context, initial_prototype_index));
+}
+
+TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
+ Variant variant, TNode<Context> native_context, TNode<Object> collection) {
TNode<Map> collection_proto_map =
LoadMap(LoadMapPrototype(LoadMap(CAST(collection))));
- return WordEqual(collection_proto_map, initial_prototype_map);
+ return WordEqual(collection_proto_map,
+ GetInitialCollectionPrototype(variant, native_context));
}
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
@@ -515,114 +563,27 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
return entry.value();
}
-void BaseCollectionsAssembler::LoadKeyValue(
- TNode<Context> context, TNode<Object> maybe_array, TVariable<Object>* key,
- TVariable<Object>* value, Label* if_may_have_side_effects,
- Label* if_exception, TVariable<Object>* var_exception) {
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(maybe_array)));
-
- Label exit(this), if_fast(this), if_slow(this, Label::kDeferred);
- BranchIfFastJSArray(maybe_array, context, &if_fast, &if_slow);
- BIND(&if_fast);
- {
- TNode<JSArray> array = CAST(maybe_array);
- TNode<Smi> length = LoadFastJSArrayLength(array);
- TNode<FixedArrayBase> elements = LoadElements(array);
- TNode<Int32T> elements_kind = LoadElementsKind(array);
-
- Label if_smiorobjects(this), if_doubles(this);
- Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
- &if_doubles);
- BIND(&if_smiorobjects);
- {
- Label if_one(this), if_two(this);
- GotoIf(SmiGreaterThan(length, SmiConstant(1)), &if_two);
- GotoIf(SmiEqual(length, SmiConstant(1)), &if_one);
- { // empty array
- *key = UndefinedConstant();
- *value = UndefinedConstant();
- Goto(&exit);
- }
- BIND(&if_one);
- {
- *key = LoadAndNormalizeFixedArrayElement(CAST(elements),
- IntPtrConstant(0));
- *value = UndefinedConstant();
- Goto(&exit);
- }
- BIND(&if_two);
- {
- TNode<FixedArray> elements_fixed_array = CAST(elements);
- *key = LoadAndNormalizeFixedArrayElement(elements_fixed_array,
- IntPtrConstant(0));
- *value = LoadAndNormalizeFixedArrayElement(elements_fixed_array,
- IntPtrConstant(1));
- Goto(&exit);
- }
- }
- BIND(&if_doubles);
- {
- Label if_one(this), if_two(this);
- GotoIf(SmiGreaterThan(length, SmiConstant(1)), &if_two);
- GotoIf(SmiEqual(length, SmiConstant(1)), &if_one);
- { // empty array
- *key = UndefinedConstant();
- *value = UndefinedConstant();
- Goto(&exit);
- }
- BIND(&if_one);
- {
- *key = LoadAndNormalizeFixedDoubleArrayElement(elements,
- IntPtrConstant(0));
- *value = UndefinedConstant();
- Goto(&exit);
- }
- BIND(&if_two);
- {
- *key = LoadAndNormalizeFixedDoubleArrayElement(elements,
- IntPtrConstant(0));
- *value = LoadAndNormalizeFixedDoubleArrayElement(elements,
- IntPtrConstant(1));
- Goto(&exit);
- }
- }
- }
- BIND(&if_slow);
- {
- Label if_notobject(this, Label::kDeferred);
- GotoIfNotJSReceiver(maybe_array, &if_notobject);
- if (if_may_have_side_effects != nullptr) {
- // If the element is not a fast array, we cannot guarantee accessing the
- // key and value won't execute user code that will break fast path
- // assumptions.
- Goto(if_may_have_side_effects);
- } else {
- *key = UncheckedCast<Object>(GetProperty(
- context, maybe_array, isolate()->factory()->zero_string()));
- GotoIfException(key->value(), if_exception, var_exception);
-
- *value = UncheckedCast<Object>(GetProperty(
- context, maybe_array, isolate()->factory()->one_string()));
- GotoIfException(value->value(), if_exception, var_exception);
- Goto(&exit);
- }
- BIND(&if_notobject);
- {
- Node* ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIteratorValueNotAnObject), maybe_array);
- GotoIfException(ret, if_exception, var_exception);
- Unreachable();
- }
- }
- BIND(&exit);
-}
-
class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
public:
explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
: BaseCollectionsAssembler(state) {}
+ // Check whether |iterable| is a JS_MAP_KEY_ITERATOR_TYPE or
+ // JS_MAP_VALUE_ITERATOR_TYPE object that is not partially consumed and still
+ // has original iteration behavior.
+ void BranchIfIterableWithOriginalKeyOrValueMapIterator(TNode<Object> iterable,
+ TNode<Context> context,
+ Label* if_true,
+ Label* if_false);
+
+ // Check whether |iterable| is a JS_SET_TYPE or JS_SET_VALUE_ITERATOR_TYPE
+ // object that still has original iteration behavior. In case of the iterator,
+ // the iterator also must not have been partially consumed.
+ void BranchIfIterableWithOriginalValueSetIterator(TNode<Object> iterable,
+ TNode<Context> context,
+ Label* if_true,
+ Label* if_false);
+
protected:
template <typename IteratorType>
Node* AllocateJSCollectionIterator(Node* context, int map_index,
@@ -720,6 +681,26 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
Node* const key, Node* const hash,
Node* const number_of_buckets,
Node* const occupancy);
+
+ // Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or
+ // Map.prototype.values() iterator. The iterator is assumed to satisfy
+ // IterableWithOriginalKeyOrValueMapIterator. This function will skip the
+ // iterator and iterate directly on the underlying hash table. In the end it
+ // will update the state of the iterator to 'exhausted'.
+ TNode<JSArray> MapIteratorToList(TNode<Context> context,
+ TNode<JSMapIterator> iterator);
+
+ // Create a JSArray with PACKED_ELEMENTS kind from a Set.prototype.keys() or
+ // Set.prototype.values() iterator, or a Set. The |iterable| is assumed to
+ // satisfy IterableWithOriginalValueSetIterator. This function will skip the
+ // iterator and iterate directly on the underlying hash table. In the end, if
+ // |iterable| is an iterator, it will update the state of the iterator to
+ // 'exhausted'.
+ TNode<JSArray> SetOrSetIteratorToList(TNode<Context> context,
+ TNode<Object> iterable);
+
+ void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false);
+ void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false);
};
template <typename IteratorType>
@@ -841,6 +822,308 @@ void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
Goto(if_not_same);
}
+void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid(
+ Label* if_true, Label* if_false) {
+ Node* protector_cell = LoadRoot(RootIndex::kMapIteratorProtector);
+ DCHECK(isolate()->heap()->map_iterator_protector()->IsPropertyCell());
+ Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ if_true, if_false);
+}
+
+void CollectionsBuiltinsAssembler::
+ BranchIfIterableWithOriginalKeyOrValueMapIterator(TNode<Object> iterator,
+ TNode<Context> context,
+ Label* if_true,
+ Label* if_false) {
+ Label if_key_or_value_iterator(this), extra_checks(this);
+
+ // Check if iterator is a keys or values JSMapIterator.
+ GotoIf(TaggedIsSmi(iterator), if_false);
+ TNode<Map> iter_map = LoadMap(CAST(iterator));
+ Node* const instance_type = LoadMapInstanceType(iter_map);
+ GotoIf(InstanceTypeEqual(instance_type, JS_MAP_KEY_ITERATOR_TYPE),
+ &if_key_or_value_iterator);
+ Branch(InstanceTypeEqual(instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
+ &if_key_or_value_iterator, if_false);
+
+ BIND(&if_key_or_value_iterator);
+ // Check that the iterator is not partially consumed.
+ Node* const index =
+ LoadObjectField(CAST(iterator), JSMapIterator::kIndexOffset);
+ GotoIfNot(WordEqual(index, SmiConstant(0)), if_false);
+ BranchIfMapIteratorProtectorValid(&extra_checks, if_false);
+
+ BIND(&extra_checks);
+ // Check if the iterator object has the original %MapIteratorPrototype%.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_map_iter_proto = LoadContextElement(
+ native_context, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX);
+ Node* const map_iter_proto = LoadMapPrototype(iter_map);
+ GotoIfNot(WordEqual(map_iter_proto, initial_map_iter_proto), if_false);
+
+ // Check if the original MapIterator prototype has the original
+ // %IteratorPrototype%.
+ Node* const initial_iter_proto = LoadContextElement(
+ native_context, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX);
+ Node* const iter_proto = LoadMapPrototype(LoadMap(map_iter_proto));
+ Branch(WordEqual(iter_proto, initial_iter_proto), if_true, if_false);
+}
+
+void BranchIfIterableWithOriginalKeyOrValueMapIterator(
+ compiler::CodeAssemblerState* state, TNode<Object> iterable,
+ TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerLabel* if_false) {
+ CollectionsBuiltinsAssembler assembler(state);
+ assembler.BranchIfIterableWithOriginalKeyOrValueMapIterator(
+ iterable, context, if_true, if_false);
+}
+
+void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid(
+ Label* if_true, Label* if_false) {
+ Node* const protector_cell = LoadRoot(RootIndex::kSetIteratorProtector);
+ DCHECK(isolate()->heap()->set_iterator_protector()->IsPropertyCell());
+ Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ if_true, if_false);
+}
+
+void CollectionsBuiltinsAssembler::BranchIfIterableWithOriginalValueSetIterator(
+ TNode<Object> iterable, TNode<Context> context, Label* if_true,
+ Label* if_false) {
+ Label if_set(this), if_value_iterator(this), check_protector(this);
+ TVARIABLE(BoolT, var_result);
+
+ GotoIf(TaggedIsSmi(iterable), if_false);
+ TNode<Map> iterable_map = LoadMap(CAST(iterable));
+ Node* const instance_type = LoadMapInstanceType(iterable_map);
+
+ GotoIf(InstanceTypeEqual(instance_type, JS_SET_TYPE), &if_set);
+ Branch(InstanceTypeEqual(instance_type, JS_SET_VALUE_ITERATOR_TYPE),
+ &if_value_iterator, if_false);
+
+ BIND(&if_set);
+ // Check if the set object has the original Set prototype.
+ Node* const initial_set_proto = LoadContextElement(
+ LoadNativeContext(context), Context::INITIAL_SET_PROTOTYPE_INDEX);
+ Node* const set_proto = LoadMapPrototype(iterable_map);
+ GotoIfNot(WordEqual(set_proto, initial_set_proto), if_false);
+ Goto(&check_protector);
+
+ BIND(&if_value_iterator);
+ // Check that the iterator is not partially consumed.
+ Node* const index =
+ LoadObjectField(CAST(iterable), JSSetIterator::kIndexOffset);
+ GotoIfNot(WordEqual(index, SmiConstant(0)), if_false);
+
+ // Check if the iterator object has the original SetIterator prototype.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_set_iter_proto = LoadContextElement(
+ native_context, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX);
+ Node* const set_iter_proto = LoadMapPrototype(iterable_map);
+ GotoIfNot(WordEqual(set_iter_proto, initial_set_iter_proto), if_false);
+
+ // Check if the original SetIterator prototype has the original
+ // %IteratorPrototype%.
+ Node* const initial_iter_proto = LoadContextElement(
+ native_context, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX);
+ Node* const iter_proto = LoadMapPrototype(LoadMap(set_iter_proto));
+ GotoIfNot(WordEqual(iter_proto, initial_iter_proto), if_false);
+ Goto(&check_protector);
+
+ BIND(&check_protector);
+ BranchIfSetIteratorProtectorValid(if_true, if_false);
+}
+
+void BranchIfIterableWithOriginalValueSetIterator(
+ compiler::CodeAssemblerState* state, TNode<Object> iterable,
+ TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerLabel* if_false) {
+ CollectionsBuiltinsAssembler assembler(state);
+ assembler.BranchIfIterableWithOriginalValueSetIterator(iterable, context,
+ if_true, if_false);
+}
+
+TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
+ TNode<Context> context, TNode<JSMapIterator> iterator) {
+ // Transition the {iterator} table if necessary.
+ TNode<OrderedHashMap> table;
+ TNode<IntPtrT> index;
+ std::tie(table, index) =
+ TransitionAndUpdate<JSMapIterator, OrderedHashMap>(iterator);
+ CSA_ASSERT(this, IntPtrEqual(index, IntPtrConstant(0)));
+
+ TNode<IntPtrT> size =
+ LoadAndUntagObjectField(table, OrderedHashMap::NumberOfElementsOffset());
+
+ const ElementsKind kind = PACKED_ELEMENTS;
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
+ TNode<FixedArray> elements = CAST(LoadElements(array));
+
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> first_to_element_offset =
+ ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
+ VARIABLE(
+ var_offset, MachineType::PointerRepresentation(),
+ IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
+ TVARIABLE(IntPtrT, var_index, index);
+ VariableList vars({&var_index, &var_offset}, zone());
+ Label done(this, {&var_index}), loop(this, vars), continue_loop(this, vars),
+ write_key(this, vars), write_value(this, vars);
+
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ // Read the next entry from the {table}, skipping holes.
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
+ TNode<IntPtrT> cur_index;
+ std::tie(entry_key, entry_start_position, cur_index) =
+ NextSkipHoles<OrderedHashMap>(table, var_index.value(), &done);
+
+ // Decide to write key or value.
+ Branch(
+ InstanceTypeEqual(LoadInstanceType(iterator), JS_MAP_KEY_ITERATOR_TYPE),
+ &write_key, &write_value);
+
+ BIND(&write_key);
+ {
+ Store(elements, var_offset.value(), entry_key);
+ Goto(&continue_loop);
+ }
+
+ BIND(&write_value);
+ {
+ CSA_ASSERT(this, InstanceTypeEqual(LoadInstanceType(iterator),
+ JS_MAP_VALUE_ITERATOR_TYPE));
+ TNode<Object> entry_value =
+ LoadFixedArrayElement(table, entry_start_position,
+ (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset) *
+ kTaggedSize);
+
+ Store(elements, var_offset.value(), entry_value);
+ Goto(&continue_loop);
+ }
+
+ BIND(&continue_loop);
+ {
+ // Increment the array offset and continue the loop to the next entry.
+ var_index = cur_index;
+ var_offset.Bind(
+ IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
+ Goto(&loop);
+ }
+ }
+
+ BIND(&done);
+ // Set the {iterator} to exhausted.
+ StoreObjectFieldRoot(iterator, JSMapIterator::kTableOffset,
+ RootIndex::kEmptyOrderedHashMap);
+ StoreObjectFieldNoWriteBarrier(iterator, JSMapIterator::kIndexOffset,
+ SmiTag(var_index.value()));
+ return UncheckedCast<JSArray>(array);
+}
+
+TF_BUILTIN(MapIteratorToList, CollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSMapIterator> iterator = CAST(Parameter(Descriptor::kSource));
+ Return(MapIteratorToList(context, iterator));
+}
+
+TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
+ TNode<Context> context, TNode<Object> iterable) {
+ TVARIABLE(OrderedHashSet, var_table);
+ Label if_set(this), if_iterator(this), copy(this);
+
+ Node* const instance_type = LoadInstanceType(CAST(iterable));
+ Branch(InstanceTypeEqual(instance_type, JS_SET_TYPE), &if_set, &if_iterator);
+
+ BIND(&if_set);
+ {
+ // {iterable} is a JSSet.
+ var_table = CAST(LoadObjectField(CAST(iterable), JSSet::kTableOffset));
+ Goto(&copy);
+ }
+
+ BIND(&if_iterator);
+ {
+ // {iterable} is a JSSetIterator.
+ // Transition the {iterable} table if necessary.
+ TNode<OrderedHashSet> iter_table;
+ TNode<IntPtrT> iter_index;
+ std::tie(iter_table, iter_index) =
+ TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(iterable));
+ CSA_ASSERT(this, IntPtrEqual(iter_index, IntPtrConstant(0)));
+ var_table = iter_table;
+ Goto(&copy);
+ }
+
+ BIND(&copy);
+ TNode<OrderedHashSet> table = var_table.value();
+ TNode<IntPtrT> size =
+ LoadAndUntagObjectField(table, OrderedHashMap::NumberOfElementsOffset());
+
+ const ElementsKind kind = PACKED_ELEMENTS;
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
+ TNode<FixedArray> elements = CAST(LoadElements(array));
+
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> first_to_element_offset =
+ ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
+ VARIABLE(
+ var_offset, MachineType::PointerRepresentation(),
+ IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ Label done(this), finalize(this, {&var_index}),
+ loop(this, {&var_index, &var_offset});
+
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ // Read the next entry from the {table}, skipping holes.
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
+ TNode<IntPtrT> cur_index;
+ std::tie(entry_key, entry_start_position, cur_index) =
+ NextSkipHoles<OrderedHashSet>(table, var_index.value(), &finalize);
+
+ Store(elements, var_offset.value(), entry_key);
+
+ var_index = cur_index;
+ var_offset.Bind(IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)));
+ Goto(&loop);
+ }
+
+ BIND(&finalize);
+ GotoIf(InstanceTypeEqual(instance_type, JS_SET_TYPE), &done);
+ // Set the {iterable} to exhausted if it's an iterator.
+ StoreObjectFieldRoot(iterable, JSSetIterator::kTableOffset,
+ RootIndex::kEmptyOrderedHashSet);
+ StoreObjectFieldNoWriteBarrier(iterable, JSSetIterator::kIndexOffset,
+ SmiTag(var_index.value()));
+ Goto(&done);
+
+ BIND(&done);
+ return UncheckedCast<JSArray>(array);
+}
+
+TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> object = CAST(Parameter(Descriptor::kSource));
+ Return(SetOrSetIteratorToList(context, object));
+}
+
template <typename CollectionType>
void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
Node* table, Node* smi_key, Variable* result, Label* entry_found,
@@ -1008,10 +1291,14 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
GotoIfNot(SmiLessThan(SmiConstant(0), index), &return_zero);
// Check if the {table} was cleared.
+ STATIC_ASSERT(OrderedHashMap::NumberOfDeletedElementsOffset() ==
+ OrderedHashSet::NumberOfDeletedElementsOffset());
Node* number_of_deleted_elements = LoadAndUntagObjectField(
- table, OrderedHashTableBase::kNumberOfDeletedElementsOffset);
+ table, OrderedHashMap::NumberOfDeletedElementsOffset());
+ STATIC_ASSERT(OrderedHashMap::kClearedTableSentinel ==
+ OrderedHashSet::kClearedTableSentinel);
GotoIf(WordEqual(number_of_deleted_elements,
- IntPtrConstant(OrderedHashTableBase::kClearedTableSentinel)),
+ IntPtrConstant(OrderedHashMap::kClearedTableSentinel)),
&return_zero);
VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0));
@@ -1022,9 +1309,10 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
{
Node* i = var_i.value();
GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index);
+ STATIC_ASSERT(OrderedHashMap::RemovedHolesIndex() ==
+ OrderedHashSet::RemovedHolesIndex());
TNode<Smi> removed_index = CAST(LoadFixedArrayElement(
- CAST(table), i,
- OrderedHashTableBase::kRemovedHolesIndex * kPointerSize));
+ CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kTaggedSize));
GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
Decrement(&var_index, 1, SMI_PARAMETERS);
Increment(&var_i);
@@ -1047,7 +1335,7 @@ CollectionsBuiltinsAssembler::Transition(
TVARIABLE(TableType, var_table, table);
Label if_done(this), if_transition(this, Label::kDeferred);
Branch(TaggedIsSmi(
- LoadObjectField(var_table.value(), TableType::kNextTableOffset)),
+ LoadObjectField(var_table.value(), TableType::NextTableOffset())),
&if_done, &if_transition);
BIND(&if_transition);
@@ -1060,7 +1348,7 @@ CollectionsBuiltinsAssembler::Transition(
TNode<IntPtrT> index = var_index.value();
TNode<Object> next_table =
- LoadObjectField(table, TableType::kNextTableOffset);
+ LoadObjectField(table, TableType::NextTableOffset());
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table = CAST(next_table);
@@ -1102,11 +1390,11 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
Label* if_end) {
// Compute the used capacity for the {table}.
TNode<IntPtrT> number_of_buckets =
- LoadAndUntagObjectField(table, TableType::kNumberOfBucketsOffset);
+ LoadAndUntagObjectField(table, TableType::NumberOfBucketsOffset());
TNode<IntPtrT> number_of_elements =
- LoadAndUntagObjectField(table, TableType::kNumberOfElementsOffset);
- TNode<IntPtrT> number_of_deleted_elements =
- LoadAndUntagObjectField(table, TableType::kNumberOfDeletedElementsOffset);
+ LoadAndUntagObjectField(table, TableType::NumberOfElementsOffset());
+ TNode<IntPtrT> number_of_deleted_elements = LoadAndUntagObjectField(
+ table, TableType::NumberOfDeletedElementsOffset());
TNode<IntPtrT> used_capacity =
IntPtrAdd(number_of_elements, number_of_deleted_elements);
@@ -1123,7 +1411,7 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
number_of_buckets);
entry_key =
LoadFixedArrayElement(table, entry_start_position,
- TableType::kHashTableStartIndex * kPointerSize);
+ TableType::HashTableStartIndex() * kTaggedSize);
Increment(&var_index);
Branch(IsTheHole(entry_key), &loop, &done_loop);
}
@@ -1151,8 +1439,8 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
BIND(&if_found);
Return(LoadFixedArrayElement(
CAST(table), SmiUntag(index),
- (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
- kPointerSize));
+ (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
+ kTaggedSize));
BIND(&if_not_found);
Return(UndefinedConstant());
@@ -1222,8 +1510,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// If we found the entry, we just store the value there.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(), value,
UPDATE_WRITE_BARRIER,
- kPointerSize * (OrderedHashMap::kHashTableStartIndex +
- OrderedHashMap::kValueOffset));
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset));
Return(receiver);
Label no_hash(this), add_entry(this), store_new_entry(this);
@@ -1246,14 +1534,14 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
{
// Check we have enough space for the entry.
number_of_buckets.Bind(SmiUntag(CAST(
- LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex))));
+ LoadFixedArrayElement(table, OrderedHashMap::NumberOfBucketsIndex()))));
STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
Node* const number_of_elements = SmiUntag(
- CAST(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset)));
+ CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())));
Node* const number_of_deleted = SmiUntag(CAST(LoadObjectField(
- table, OrderedHashMap::kNumberOfDeletedElementsOffset)));
+ table, OrderedHashMap::NumberOfDeletedElementsOffset())));
occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
@@ -1262,11 +1550,11 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
CallRuntime(Runtime::kMapGrow, context, receiver);
table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
- table_var.value(), OrderedHashMap::kNumberOfBucketsIndex))));
+ table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
- table_var.value(), OrderedHashMap::kNumberOfElementsOffset)));
+ table_var.value(), OrderedHashMap::NumberOfElementsOffset())));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
- table_var.value(), OrderedHashMap::kNumberOfDeletedElementsOffset)));
+ table_var.value(), OrderedHashMap::NumberOfDeletedElementsOffset())));
occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
Goto(&store_new_entry);
}
@@ -1284,29 +1572,30 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const bucket_entry = LoadFixedArrayElement(
- table, bucket, OrderedHashMap::kHashTableStartIndex * kPointerSize);
+ table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)),
number_of_buckets);
StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
- kPointerSize * OrderedHashMap::kHashTableStartIndex);
+ kTaggedSize * OrderedHashMap::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start, value, UPDATE_WRITE_BARRIER,
- kPointerSize * (OrderedHashMap::kHashTableStartIndex +
- OrderedHashMap::kValueOffset));
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset));
StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
- kPointerSize * (OrderedHashMap::kHashTableStartIndex +
- OrderedHashMap::kChainOffset));
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kChainOffset));
// Update the bucket head.
StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
- OrderedHashMap::kHashTableStartIndex * kPointerSize);
+ OrderedHashMap::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
- CAST(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset));
- StoreObjectFieldNoWriteBarrier(table, OrderedHashMap::kNumberOfElementsOffset,
+ CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()));
+ StoreObjectFieldNoWriteBarrier(table,
+ OrderedHashMap::NumberOfElementsOffset(),
SmiAdd(number_of_elements, SmiConstant(1)));
}
@@ -1336,27 +1625,28 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
// If we found the entry, mark the entry as deleted.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
- kPointerSize * OrderedHashMap::kHashTableStartIndex);
+ kTaggedSize * OrderedHashMap::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
- kPointerSize * (OrderedHashMap::kHashTableStartIndex +
- OrderedHashMap::kValueOffset));
+ kTaggedSize * (OrderedHashMap::HashTableStartIndex() +
+ OrderedHashMap::kValueOffset));
// Decrement the number of elements, increment the number of deleted elements.
TNode<Smi> const number_of_elements = SmiSub(
- CAST(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset)),
+ CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())),
SmiConstant(1));
- StoreObjectFieldNoWriteBarrier(table, OrderedHashMap::kNumberOfElementsOffset,
- number_of_elements);
+ StoreObjectFieldNoWriteBarrier(
+ table, OrderedHashMap::NumberOfElementsOffset(), number_of_elements);
TNode<Smi> const number_of_deleted =
SmiAdd(CAST(LoadObjectField(
- table, OrderedHashMap::kNumberOfDeletedElementsOffset)),
+ table, OrderedHashMap::NumberOfDeletedElementsOffset())),
SmiConstant(1));
StoreObjectFieldNoWriteBarrier(
- table, OrderedHashMap::kNumberOfDeletedElementsOffset, number_of_deleted);
+ table, OrderedHashMap::NumberOfDeletedElementsOffset(),
+ number_of_deleted);
- TNode<Smi> const number_of_buckets =
- CAST(LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex));
+ TNode<Smi> const number_of_buckets = CAST(
+ LoadFixedArrayElement(table, OrderedHashMap::NumberOfBucketsIndex()));
// If there fewer elements than #buckets / 2, shrink the table.
Label shrink(this);
@@ -1414,14 +1704,14 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
{
// Check we have enough space for the entry.
number_of_buckets.Bind(SmiUntag(CAST(
- LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex))));
+ LoadFixedArrayElement(table, OrderedHashSet::NumberOfBucketsIndex()))));
STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
Node* const number_of_elements = SmiUntag(
- CAST(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset)));
+ CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())));
Node* const number_of_deleted = SmiUntag(CAST(LoadObjectField(
- table, OrderedHashSet::kNumberOfDeletedElementsOffset)));
+ table, OrderedHashSet::NumberOfDeletedElementsOffset())));
occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted));
GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry);
@@ -1430,11 +1720,11 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
CallRuntime(Runtime::kSetGrow, context, receiver);
table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
- table_var.value(), OrderedHashSet::kNumberOfBucketsIndex))));
+ table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
- table_var.value(), OrderedHashSet::kNumberOfElementsOffset)));
+ table_var.value(), OrderedHashSet::NumberOfElementsOffset())));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
- table_var.value(), OrderedHashSet::kNumberOfDeletedElementsOffset)));
+ table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset())));
occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted));
Goto(&store_new_entry);
}
@@ -1452,26 +1742,27 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const bucket_entry = LoadFixedArrayElement(
- table, bucket, OrderedHashSet::kHashTableStartIndex * kPointerSize);
+ table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)),
number_of_buckets);
StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER,
- kPointerSize * OrderedHashSet::kHashTableStartIndex);
+ kTaggedSize * OrderedHashSet::HashTableStartIndex());
StoreFixedArrayElement(table, entry_start, bucket_entry, SKIP_WRITE_BARRIER,
- kPointerSize * (OrderedHashSet::kHashTableStartIndex +
- OrderedHashSet::kChainOffset));
+ kTaggedSize * (OrderedHashSet::HashTableStartIndex() +
+ OrderedHashSet::kChainOffset));
// Update the bucket head.
StoreFixedArrayElement(table, bucket, SmiTag(occupancy), SKIP_WRITE_BARRIER,
- OrderedHashSet::kHashTableStartIndex * kPointerSize);
+ OrderedHashSet::HashTableStartIndex() * kTaggedSize);
// Bump the elements count.
TNode<Smi> const number_of_elements =
- CAST(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset));
- StoreObjectFieldNoWriteBarrier(table, OrderedHashSet::kNumberOfElementsOffset,
+ CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()));
+ StoreObjectFieldNoWriteBarrier(table,
+ OrderedHashSet::NumberOfElementsOffset(),
SmiAdd(number_of_elements, SmiConstant(1)));
}
@@ -1501,23 +1792,24 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
// If we found the entry, mark the entry as deleted.
StoreFixedArrayElement(table, entry_start_position_or_hash.value(),
TheHoleConstant(), UPDATE_WRITE_BARRIER,
- kPointerSize * OrderedHashSet::kHashTableStartIndex);
+ kTaggedSize * OrderedHashSet::HashTableStartIndex());
// Decrement the number of elements, increment the number of deleted elements.
TNode<Smi> const number_of_elements = SmiSub(
- CAST(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset)),
+ CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())),
SmiConstant(1));
- StoreObjectFieldNoWriteBarrier(table, OrderedHashSet::kNumberOfElementsOffset,
- number_of_elements);
+ StoreObjectFieldNoWriteBarrier(
+ table, OrderedHashSet::NumberOfElementsOffset(), number_of_elements);
TNode<Smi> const number_of_deleted =
SmiAdd(CAST(LoadObjectField(
- table, OrderedHashSet::kNumberOfDeletedElementsOffset)),
+ table, OrderedHashSet::NumberOfDeletedElementsOffset())),
SmiConstant(1));
StoreObjectFieldNoWriteBarrier(
- table, OrderedHashSet::kNumberOfDeletedElementsOffset, number_of_deleted);
+ table, OrderedHashSet::NumberOfDeletedElementsOffset(),
+ number_of_deleted);
- TNode<Smi> const number_of_buckets =
- CAST(LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex));
+ TNode<Smi> const number_of_buckets = CAST(
+ LoadFixedArrayElement(table, OrderedHashSet::NumberOfBucketsIndex()));
// If there fewer elements than #buckets / 2, shrink the table.
Label shrink(this);
@@ -1546,7 +1838,7 @@ TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"get Map.prototype.size");
Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
- Return(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset));
+ Return(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()));
}
TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
@@ -1588,8 +1880,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
// Load the entry value as well.
Node* entry_value = LoadFixedArrayElement(
table, entry_start_position,
- (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
- kPointerSize);
+ (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
+ kTaggedSize);
// Invoke the {callback} passing the {entry_key}, {entry_value} and the
// {receiver}.
@@ -1677,8 +1969,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
&return_value);
var_value.Bind(LoadFixedArrayElement(
table, entry_start_position,
- (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
- kPointerSize));
+ (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) *
+ kTaggedSize));
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&return_value, &return_entry);
@@ -1777,7 +2069,7 @@ TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"get Set.prototype.size");
Node* const table = LoadObjectField(receiver, JSSet::kTableOffset);
- Return(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset));
+ Return(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()));
}
TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h
new file mode 100644
index 0000000000..a78ad5a4a7
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-collections-gen.h
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_COLLECTIONS_GEN_H_
+#define V8_BUILTINS_BUILTINS_COLLECTIONS_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void BranchIfIterableWithOriginalKeyOrValueMapIterator(
+ compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable,
+ compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerLabel* if_false);
+
+void BranchIfIterableWithOriginalValueSetIterator(
+ compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable,
+ compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true,
+ compiler::CodeAssemblerLabel* if_false);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_COLLECTIONS_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
index f9b1ebc0ac..be7a47290b 100644
--- a/deps/v8/src/builtins/builtins-collections.cc
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -4,6 +4,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
+#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/js-collection-inl.h"
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
index 249ec10a28..c3a997af9a 100644
--- a/deps/v8/src/builtins/builtins-console-gen.cc
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index d6819d8f66..6b7db301d6 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -5,7 +5,9 @@
#include "src/api-inl.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
+#include "src/counters.h"
#include "src/debug/interface-types.h"
+#include "src/log.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index d34236bab7..779e96c31f 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -13,32 +13,24 @@
#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/interface-descriptors.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructVarargs(masm,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), ConstructFunction));
@@ -79,6 +71,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* const feedback_cell_map = LoadMap(feedback_cell);
Label no_closures(this), one_closure(this), cell_done(this);
+ GotoIf(IsNoFeedbackCellMap(feedback_cell_map), &cell_done);
GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
@@ -115,9 +108,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
LoadContextElement(native_context, function_map_index);
// Create a new closure from the given function info in new space
- Node* instance_size_in_bytes =
- TimesPointerSize(LoadMapInstanceSizeInWords(function_map));
- Node* const result = Allocate(instance_size_in_bytes);
+ TNode<IntPtrT> instance_size_in_bytes =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(function_map));
+ TNode<Object> result = Allocate(instance_size_in_bytes);
StoreMapNoWriteBarrier(result, function_map);
InitializeJSObjectBodyNoSlackTracking(result, function_map,
instance_size_in_bytes,
@@ -141,14 +134,14 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
BIND(&done);
}
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
- Handle<Code> lazy_builtin_handle(
- isolate()->builtins()->builtin(Builtins::kCompileLazy), isolate());
+ Handle<Code> lazy_builtin_handle =
+ isolate()->builtins()->builtin_handle(Builtins::kCompileLazy);
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
@@ -232,14 +225,10 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
}
Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
- Node* scope_info, Node* slots, Node* context, ScopeType scope_type) {
- slots = ChangeUint32ToWord(slots);
-
- // TODO(ishell): Use CSA::OptimalParameterMode() here.
- ParameterMode mode = INTPTR_PARAMETERS;
- Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
- Node* length = IntPtrAdd(slots, min_context_slots);
- Node* size = GetFixedArrayAllocationSize(length, PACKED_ELEMENTS, mode);
+ Node* scope_info, Node* slots_uint32, Node* context, ScopeType scope_type) {
+ TNode<IntPtrT> slots = Signed(ChangeUint32ToWord(slots_uint32));
+ TNode<IntPtrT> size = ElementOffsetFromIndex(
+ slots, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::kTodoHeaderSize);
// Create a new closure from the given function info in new space
TNode<Context> function_context =
@@ -256,33 +245,34 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
default:
UNREACHABLE();
}
+ // Set up the header.
StoreMapNoWriteBarrier(function_context, context_type);
+ TNode<IntPtrT> min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
+ // TODO(ishell): for now, length also includes MIN_CONTEXT_SLOTS.
+ TNode<IntPtrT> length = IntPtrAdd(slots, min_context_slots);
StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
SmiTag(length));
-
- // Set up the fixed slots.
- StoreFixedArrayElement(function_context, Context::SCOPE_INFO_INDEX,
- scope_info, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(function_context, Context::PREVIOUS_INDEX, context,
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(function_context, Context::EXTENSION_INDEX,
- TheHoleConstant(), SKIP_WRITE_BARRIER);
-
- // Copy the native context from the previous context.
- Node* native_context = LoadNativeContext(context);
- StoreFixedArrayElement(function_context, Context::NATIVE_CONTEXT_INDEX,
- native_context, SKIP_WRITE_BARRIER);
-
- // Initialize the rest of the slots to undefined.
- Node* undefined = UndefinedConstant();
- BuildFastFixedArrayForEach(
- function_context, PACKED_ELEMENTS, min_context_slots, length,
- [this, undefined](Node* context, Node* offset) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
- undefined);
+ StoreObjectFieldNoWriteBarrier(function_context, Context::kScopeInfoOffset,
+ scope_info);
+ StoreObjectFieldNoWriteBarrier(function_context, Context::kPreviousOffset,
+ context);
+ StoreObjectFieldNoWriteBarrier(function_context, Context::kExtensionOffset,
+ TheHoleConstant());
+ TNode<Context> native_context = LoadNativeContext(context);
+ StoreObjectFieldNoWriteBarrier(function_context,
+ Context::kNativeContextOffset, native_context);
+
+ // Initialize the varrest of the slots to undefined.
+ TNode<HeapObject> undefined = UndefinedConstant();
+ TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
+ CodeStubAssembler::VariableList vars(0, zone());
+ BuildFastLoop(
+ vars, start_offset, size,
+ [=](Node* offset) {
+ StoreObjectFieldNoWriteBarrier(
+ function_context, UncheckedCast<IntPtrT>(offset), undefined);
},
- mode);
-
+ kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
return function_context;
}
@@ -314,9 +304,9 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
{
Node* boilerplate = literal_site;
CSA_ASSERT(this, IsJSRegExp(boilerplate));
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
Node* copy = Allocate(size);
- for (int offset = 0; offset < size; offset += kPointerSize) {
+ for (int offset = 0; offset < size; offset += kTaggedSize) {
Node* value = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, value);
}
@@ -415,10 +405,10 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
TNode<Int32T> kind = LoadElementsKind(allocation_site.value());
TNode<Context> native_context = LoadNativeContext(context);
Comment("LoadJSArrayElementsMap");
- Node* array_map = LoadJSArrayElementsMap(kind, native_context);
- Node* zero = SmiConstant(0);
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<Smi> zero = SmiConstant(0);
Comment("Allocate JSArray");
- Node* result =
+ TNode<JSArray> result =
AllocateJSArray(GetInitialFastElementsKind(), array_map, zero, zero,
allocation_site.value(), ParameterMode::SMI_PARAMETERS);
@@ -501,9 +491,9 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
// Ensure new-space allocation for a fresh JSObject so we can skip write
// barriers when copying all object fields.
STATIC_ASSERT(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize);
- Node* instance_size =
- TimesPointerSize(LoadMapInstanceSizeInWords(boilerplate_map));
- Node* allocation_size = instance_size;
+ TNode<IntPtrT> instance_size =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(boilerplate_map));
+ TNode<IntPtrT> allocation_size = instance_size;
bool needs_allocation_memento = FLAG_allocation_site_pretenuring;
if (needs_allocation_memento) {
// Prepare for inner-allocating the AllocationMemento.
@@ -511,7 +501,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
}
- Node* copy = AllocateInNewSpace(allocation_size);
+ TNode<HeapObject> copy =
+ UncheckedCast<HeapObject>(AllocateInNewSpace(allocation_size));
{
Comment("Initialize Literal Copy");
// Initialize Object fields.
@@ -531,8 +522,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
{
// Copy over in-object properties.
Label continue_with_write_barrier(this), done_init(this);
- VARIABLE(offset, MachineType::PointerRepresentation(),
- IntPtrConstant(JSObject::kHeaderSize));
+ TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
// Mutable heap numbers only occur on 32-bit platforms.
bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields;
{
@@ -541,16 +531,21 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
Branch(WordEqual(offset.value(), instance_size), &done_init,
&continue_fast);
BIND(&continue_fast);
- Node* field = LoadObjectField(boilerplate, offset.value());
if (may_use_mutable_heap_numbers) {
+ TNode<Object> field = LoadObjectField(boilerplate, offset.value());
Label store_field(this);
GotoIf(TaggedIsSmi(field), &store_field);
- GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
+ GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier);
Goto(&store_field);
BIND(&store_field);
+ StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
+ } else {
+ // Copy fields as raw data.
+ TNode<IntPtrT> field =
+ LoadObjectField<IntPtrT>(boilerplate, offset.value());
+ StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
}
- StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
- offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+ offset = IntPtrAdd(offset.value(), IntPtrConstant(kTaggedSize));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init);
}
@@ -566,33 +561,36 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier);
{
Comment("Copy in-object properties slow");
- BuildFastLoop(offset.value(), instance_size,
- [=](Node* offset) {
- Node* field = LoadObjectField(boilerplate, offset);
- StoreObjectFieldNoWriteBarrier(copy, offset, field);
- },
- kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ BuildFastLoop(
+ offset.value(), instance_size,
+ [=](Node* offset) {
+ // TODO(ishell): value decompression is not necessary here.
+ Node* field = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, field);
+ },
+ kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values");
- BuildFastLoop(offset.value(), instance_size,
- [=](Node* offset) {
- Node* field = LoadObjectField(copy, offset);
- Label copy_mutable_heap_number(this, Label::kDeferred),
- continue_loop(this);
- // We only have to clone complex field values.
- GotoIf(TaggedIsSmi(field), &continue_loop);
- Branch(IsMutableHeapNumber(field),
- &copy_mutable_heap_number, &continue_loop);
- BIND(&copy_mutable_heap_number);
- {
- Node* double_value = LoadHeapNumberValue(field);
- Node* mutable_heap_number =
- AllocateMutableHeapNumberWithValue(double_value);
- StoreObjectField(copy, offset, mutable_heap_number);
- Goto(&continue_loop);
- }
- BIND(&continue_loop);
- },
- kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ BuildFastLoop(
+ offset.value(), instance_size,
+ [=](Node* offset) {
+ Node* field = LoadObjectField(copy, offset);
+ Label copy_mutable_heap_number(this, Label::kDeferred),
+ continue_loop(this);
+ // We only have to clone complex field values.
+ GotoIf(TaggedIsSmi(field), &continue_loop);
+ Branch(IsMutableHeapNumber(field), &copy_mutable_heap_number,
+ &continue_loop);
+ BIND(&copy_mutable_heap_number);
+ {
+ Node* double_value = LoadHeapNumberValue(field);
+ Node* mutable_heap_number =
+ AllocateMutableHeapNumberWithValue(double_value);
+ StoreObjectField(copy, offset, mutable_heap_number);
+ Goto(&continue_loop);
+ }
+ BIND(&continue_loop);
+ },
+ kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Goto(&done_init);
}
BIND(&done_init);
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
index 0978bf3245..428c8cea8e 100644
--- a/deps/v8/src/builtins/builtins-constructor.h
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -31,13 +31,14 @@ class ConstructorBuiltins {
NameDictionary::kMaxRegularCapacity / 3 * 2;
private:
- static const int kMaximumSlots = 0x8000;
+ static const int kMaximumSlots =
+ (kMaxRegularHeapObjectSize - Context::kTodoHeaderSize) / kTaggedSize - 1;
static const int kSmallMaximumSlots = 10;
// FastNewFunctionContext can only allocate closures which fit in the
// new space.
- STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
- FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(Context::SizeFor(kMaximumSlots + Context::MIN_CONTEXT_SLOTS) <
+ kMaxRegularHeapObjectSize);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index b898056658..20344cf8ef 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -7,6 +7,7 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/oddball.h"
namespace v8 {
namespace internal {
@@ -427,7 +428,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
BIND(&if_wrapjsvalue);
TNode<Context> native_context = LoadNativeContext(context);
- Node* constructor = LoadFixedArrayElement(
+ Node* constructor = LoadContextElement(
native_context, constructor_function_index_var.value());
Node* initial_map =
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
index 4a55a90eef..3be41ddf94 100644
--- a/deps/v8/src/builtins/builtins-data-view-gen.h
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -5,17 +5,17 @@
#ifndef V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
#define V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
+#include "src/code-stub-assembler.h"
#include "src/elements-kind.h"
#include "src/objects/bigint.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
namespace v8 {
namespace internal {
-class DataViewBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
+class DataViewBuiltinsAssembler : public CodeStubAssembler {
public:
explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : BaseBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
return UncheckedCast<Int32T>(
@@ -37,19 +37,19 @@ class DataViewBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
return ElementsKindToByteSize(elements_kind);
}
- TNode<IntPtrT> DataViewEncodeBigIntBits(bool sign, int32_t digits) {
- return IntPtrConstant(BigInt::SignBits::encode(sign) |
- BigInt::LengthBits::encode(digits));
+ TNode<Uint32T> DataViewEncodeBigIntBits(bool sign, int32_t digits) {
+ return Unsigned(Int32Constant(BigInt::SignBits::encode(sign) |
+ BigInt::LengthBits::encode(digits)));
}
- TNode<UintPtrT> DataViewDecodeBigIntLength(TNode<BigInt> value) {
- TNode<WordT> bitfield = LoadBigIntBitfield(value);
- return DecodeWord<BigIntBase::LengthBits>(bitfield);
+ TNode<Uint32T> DataViewDecodeBigIntLength(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord32<BigIntBase::LengthBits>(bitfield);
}
- TNode<UintPtrT> DataViewDecodeBigIntSign(TNode<BigInt> value) {
- TNode<WordT> bitfield = LoadBigIntBitfield(value);
- return DecodeWord<BigIntBase::SignBits>(bitfield);
+ TNode<Uint32T> DataViewDecodeBigIntSign(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord32<BigIntBase::SignBits>(bitfield);
}
};
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index e0cb199920..82a2549b60 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -44,8 +44,8 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
Node* cache_stamp = LoadObjectField(receiver, JSDate::kCacheStampOffset);
GotoIf(WordNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch);
- Return(LoadObjectField(
- receiver, JSDate::kValueOffset + field_index * kPointerSize));
+ Return(LoadObjectField(receiver,
+ JSDate::kValueOffset + field_index * kTaggedSize));
BIND(&stamp_mismatch);
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 91da4d6d7d..dc8cb9e1e5 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -5,9 +5,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/date.h"
#include "src/dateparser-inl.h"
#include "src/objects-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -114,7 +114,7 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
Handle<FixedArray> tmp =
isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
DisallowHeapAllocation no_gc;
- String::FlatContent str_content = str->GetFlatContent();
+ String::FlatContent str_content = str->GetFlatContent(no_gc);
bool result;
if (str_content.IsOneByte()) {
result = DateParser::Parse(isolate, str_content.ToOneByteVector(), *tmp);
@@ -128,13 +128,16 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
tmp->get(5)->Number(), tmp->get(6)->Number());
double date = MakeDate(day, time);
if (tmp->get(7)->IsNull(isolate)) {
- if (!std::isnan(date)) {
+ if (date >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ date <= DateCache::kMaxTimeBeforeUTCInMs) {
date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
+ } else {
+ return std::numeric_limits<double>::quiet_NaN();
}
} else {
date -= tmp->get(7)->Number() * 1000.0;
}
- return date;
+ return DateCache::TimeClip(date);
}
enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
@@ -177,8 +180,8 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
UNREACHABLE();
}
-Object* SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
- double time_val) {
+Object SetLocalDateValue(Isolate* isolate, Handle<JSDate> date,
+ double time_val) {
if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
@@ -851,12 +854,11 @@ BUILTIN(DatePrototypeToLocaleDateString) {
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kDate, // required
- JSDateTimeFormat::DefaultsOption::kDate, // defaults
- "dateformatdate")); // service
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kDate, // required
+ JSDateTimeFormat::DefaultsOption::kDate)); // defaults
}
// ecma402 #sup-date.prototype.tolocalestring
@@ -870,12 +872,11 @@ BUILTIN(DatePrototypeToLocaleString) {
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kAny, // required
- JSDateTimeFormat::DefaultsOption::kAll, // defaults
- "dateformatall")); // service
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kAny, // required
+ JSDateTimeFormat::DefaultsOption::kAll)); // defaults
}
// ecma402 #sup-date.prototype.tolocaletimestring
@@ -889,12 +890,11 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
isolate,
- date, // date
- args.atOrUndefined(isolate, 1), // locales
- args.atOrUndefined(isolate, 2), // options
- JSDateTimeFormat::RequiredOption::kTime, // required
- JSDateTimeFormat::DefaultsOption::kTime, // defaults
- "dateformattime")); // service
+ date, // date
+ args.atOrUndefined(isolate, 1), // locales
+ args.atOrUndefined(isolate, 2), // options
+ JSDateTimeFormat::RequiredOption::kTime, // required
+ JSDateTimeFormat::DefaultsOption::kTime)); // defaults
}
#endif // V8_INTL_SUPPORT
@@ -939,8 +939,11 @@ BUILTIN(DatePrototypeSetYear) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
double m = 0.0, dt = 1.0, y = year->Number();
- if (0.0 <= y && y <= 99.0) {
- y = 1900.0 + DoubleToInteger(y);
+ if (!std::isnan(y)) {
+ double y_int = DoubleToInteger(y);
+ if (0.0 <= y_int && y_int <= 99.0) {
+ y = 1900.0 + y_int;
+ }
}
int time_within_day = 0;
if (!std::isnan(date->value()->Number())) {
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index c47fa7b19b..e0abf90f0d 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -27,12 +27,13 @@ namespace internal {
// Args: name, interface descriptor
// BCH: Bytecode Handlers, with bytecode dispatch linkage.
// Args: name, OperandScale, Bytecode
-// DLH: Deserialize Lazy Handlers, with bytecode dispatch linkage.
-// Args: name, OperandScale
// ASM: Builtin in platform-dependent assembly.
-// Args: name
+// Args: name, interface descriptor
+
+// TODO(jgruber): Remove DummyDescriptor once all ASM builtins have been
+// properly associated with their descriptor.
-#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
+#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite, 1) \
\
@@ -41,41 +42,41 @@ namespace internal {
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor, 1) \
\
/* Calls */ \
- ASM(ArgumentsAdaptorTrampoline) \
+ ASM(ArgumentsAdaptorTrampoline, ArgumentsAdaptor) \
/* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallFunction_ReceiverIsNullOrUndefined) \
- ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(CallFunction_ReceiverIsAny) \
+ ASM(CallFunction_ReceiverIsNullOrUndefined, CallTrampoline) \
+ ASM(CallFunction_ReceiverIsNotNullOrUndefined, CallTrampoline) \
+ ASM(CallFunction_ReceiverIsAny, CallTrampoline) \
/* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallBoundFunction) \
+ ASM(CallBoundFunction, Dummy) \
/* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
- ASM(Call_ReceiverIsNullOrUndefined) \
- ASM(Call_ReceiverIsNotNullOrUndefined) \
- ASM(Call_ReceiverIsAny) \
+ ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
+ ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
+ ASM(Call_ReceiverIsAny, CallTrampoline) \
\
/* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \
TFC(CallProxy, CallTrampoline, 1) \
- ASM(CallVarargs) \
+ ASM(CallVarargs, CallVarargs) \
TFC(CallWithSpread, CallWithSpread, 1) \
TFC(CallWithArrayLike, CallWithArrayLike, 1) \
- ASM(CallForwardVarargs) \
- ASM(CallFunctionForwardVarargs) \
+ ASM(CallForwardVarargs, CallForwardVarargs) \
+ ASM(CallFunctionForwardVarargs, CallForwardVarargs) \
\
/* Construct */ \
/* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
- ASM(ConstructFunction) \
+ ASM(ConstructFunction, JSTrampoline) \
/* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
- ASM(ConstructBoundFunction) \
- ASM(ConstructedNonConstructable) \
+ ASM(ConstructBoundFunction, Dummy) \
+ ASM(ConstructedNonConstructable, Dummy) \
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
- ASM(Construct) \
- ASM(ConstructVarargs) \
+ ASM(Construct, JSTrampoline) \
+ ASM(ConstructVarargs, ConstructVarargs) \
TFC(ConstructWithSpread, ConstructWithSpread, 1) \
TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
- ASM(ConstructForwardVarargs) \
- ASM(ConstructFunctionForwardVarargs) \
- ASM(JSConstructStubGeneric) \
- ASM(JSBuiltinsConstructStub) \
+ ASM(ConstructForwardVarargs, ConstructForwardVarargs) \
+ ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \
+ ASM(JSConstructStubGeneric, Dummy) \
+ ASM(JSBuiltinsConstructStub, Dummy) \
TFC(FastNewObject, FastNewObject, 1) \
TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
@@ -89,13 +90,16 @@ namespace internal {
TFC(ConstructProxy, JSTrampoline, 1) \
\
/* Apply and entries */ \
- ASM(JSEntryTrampoline) \
- ASM(JSConstructEntryTrampoline) \
- ASM(ResumeGeneratorTrampoline) \
+ ASM(JSEntry, Dummy) \
+ ASM(JSConstructEntry, Dummy) \
+ ASM(JSRunMicrotasksEntry, RunMicrotasksEntry) \
+ ASM(JSEntryTrampoline, Dummy) \
+ ASM(JSConstructEntryTrampoline, Dummy) \
+ ASM(ResumeGeneratorTrampoline, ResumeGenerator) \
\
/* Stack and interrupt check */ \
- ASM(InterruptCheck) \
- ASM(StackCheck) \
+ ASM(InterruptCheck, Dummy) \
+ ASM(StackCheck, Dummy) \
\
/* String helpers */ \
TFC(StringCharAt, StringAt, 1) \
@@ -114,27 +118,24 @@ namespace internal {
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
\
/* Interpreter */ \
- ASM(InterpreterEntryTrampoline) \
- ASM(InterpreterPushArgsThenCall) \
- ASM(InterpreterPushUndefinedAndArgsThenCall) \
- ASM(InterpreterPushArgsThenCallWithFinalSpread) \
- ASM(InterpreterPushArgsThenConstruct) \
- ASM(InterpreterPushArgsThenConstructArrayFunction) \
- ASM(InterpreterPushArgsThenConstructWithFinalSpread) \
- ASM(InterpreterEnterBytecodeAdvance) \
- ASM(InterpreterEnterBytecodeDispatch) \
- ASM(InterpreterOnStackReplacement) \
+ ASM(InterpreterEntryTrampoline, Dummy) \
+ ASM(InterpreterPushArgsThenCall, InterpreterPushArgsThenCall) \
+ ASM(InterpreterPushUndefinedAndArgsThenCall, InterpreterPushArgsThenCall) \
+ ASM(InterpreterPushArgsThenCallWithFinalSpread, InterpreterPushArgsThenCall) \
+ ASM(InterpreterPushArgsThenConstruct, InterpreterPushArgsThenConstruct) \
+ ASM(InterpreterPushArgsThenConstructArrayFunction, \
+ InterpreterPushArgsThenConstruct) \
+ ASM(InterpreterPushArgsThenConstructWithFinalSpread, \
+ InterpreterPushArgsThenConstruct) \
+ ASM(InterpreterEnterBytecodeAdvance, Dummy) \
+ ASM(InterpreterEnterBytecodeDispatch, Dummy) \
+ ASM(InterpreterOnStackReplacement, ContextOnly) \
\
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline, 1) \
TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \
- TFC(DeserializeLazy, JSTrampoline, 1) \
- /* The three lazy bytecode handlers do not declare a bytecode. */ \
- DLH(DeserializeLazyHandler, interpreter::OperandScale::kSingle) \
- DLH(DeserializeLazyWideHandler, interpreter::OperandScale::kDouble) \
- DLH(DeserializeLazyExtraWideHandler, interpreter::OperandScale::kQuadruple) \
- ASM(InstantiateAsmJs) \
- ASM(NotifyDeoptimized) \
+ ASM(InstantiateAsmJs, Dummy) \
+ ASM(NotifyDeoptimized, Dummy) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \
@@ -156,12 +157,14 @@ namespace internal {
/* stack parameter to the JavaScript builtin by the "WithResult" */ \
/* trampoline variant. The plain variant is used in EAGER deopt contexts */ \
/* and has no such special handling. */ \
- ASM(ContinueToCodeStubBuiltin) \
- ASM(ContinueToCodeStubBuiltinWithResult) \
- ASM(ContinueToJavaScriptBuiltin) \
- ASM(ContinueToJavaScriptBuiltinWithResult) \
+ ASM(ContinueToCodeStubBuiltin, Dummy) \
+ ASM(ContinueToCodeStubBuiltinWithResult, Dummy) \
+ ASM(ContinueToJavaScriptBuiltin, Dummy) \
+ ASM(ContinueToJavaScriptBuiltinWithResult, Dummy) \
\
/* API callback handling */ \
+ ASM(CallApiCallback, ApiCallback) \
+ ASM(CallApiGetter, ApiGetter) \
API(HandleApiCall) \
API(HandleApiCallAsFunction) \
API(HandleApiCallAsConstructor) \
@@ -178,8 +181,8 @@ namespace internal {
\
/* Debugger */ \
TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- ASM(FrameDropperTrampoline) \
- ASM(HandleDebuggerStatement) \
+ ASM(FrameDropperTrampoline, FrameDropperTrampoline) \
+ ASM(HandleDebuggerStatement, ContextOnly) \
\
/* Type conversions */ \
TFC(ToObject, TypeConversion, 1) \
@@ -203,6 +206,8 @@ namespace internal {
TFC(ToLength, TypeConversion, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
+ TFC(BigIntToI64, BigIntToI64, 1) \
+ TFC(I64ToBigInt, BigIntToWasmI64, 1) \
\
/* Type conversions continuations */ \
TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
@@ -221,9 +226,33 @@ namespace internal {
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \
+ TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \
+ TFH(LoadIndexedInterceptorIC, LoadWithVector) \
+ TFH(StoreInterceptorIC, StoreWithVector) \
+ TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \
+ TFH(KeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW, StoreWithVector) \
+ TFH(KeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB, StoreWithVector) \
+ TFH(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow_Standard, StoreWithVector) \
+ TFH(StoreFastElementIC_Standard, StoreWithVector) \
+ TFH(StoreFastElementIC_GrowNoTransitionHandleCOW, StoreWithVector) \
+ TFH(StoreFastElementIC_NoTransitionIgnoreOOB, StoreWithVector) \
+ TFH(StoreFastElementIC_NoTransitionHandleCOW, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow_NoTransitionHandleCOW, StoreWithVector) \
+ TFH(KeyedStoreIC_Slow_Standard, StoreWithVector) \
+ TFH(KeyedStoreIC_Slow_GrowNoTransitionHandleCOW, StoreWithVector) \
+ TFH(KeyedStoreIC_Slow_NoTransitionIgnoreOOB, StoreWithVector) \
+ TFH(KeyedStoreIC_Slow_NoTransitionHandleCOW, StoreWithVector) \
+ TFH(ElementsTransitionAndStore_Standard, StoreTransition) \
+ TFH(ElementsTransitionAndStore_GrowNoTransitionHandleCOW, StoreTransition) \
+ TFH(ElementsTransitionAndStore_NoTransitionIgnoreOOB, StoreTransition) \
+ TFH(ElementsTransitionAndStore_NoTransitionHandleCOW, StoreTransition) \
\
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
+ ASM(RunMicrotasksTrampoline, RunMicrotasksEntry) \
TFC(RunMicrotasks, RunMicrotasks, 1) \
\
/* Object property helpers */ \
@@ -278,15 +307,10 @@ namespace internal {
TFC(ArraySingleArgumentConstructor_HoleyDouble_DisableAllocationSites, \
ArraySingleArgumentConstructor, 1) \
TFC(ArrayNArgumentsConstructor, ArrayNArgumentsConstructor, 1) \
- ASM(InternalArrayConstructor) \
- ASM(InternalArrayConstructorImpl) \
+ ASM(InternalArrayConstructor, Dummy) \
+ ASM(InternalArrayConstructorImpl, Dummy) \
TFC(InternalArrayNoArgumentConstructor_Packed, ArrayNoArgumentConstructor, \
1) \
- TFC(InternalArrayNoArgumentConstructor_Holey, ArrayNoArgumentConstructor, 1) \
- TFC(InternalArraySingleArgumentConstructor_Packed, \
- ArraySingleArgumentConstructor, 1) \
- TFC(InternalArraySingleArgumentConstructor_Holey, \
- ArraySingleArgumentConstructor, 1) \
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kReceiver, kArg) \
@@ -294,8 +318,6 @@ namespace internal {
CPP(ArrayPrototypeFill) \
/* ES6 #sec-array.from */ \
TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.of */ \
- TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
TFS(ArrayIncludesSmiOrObject, kElements, kSearchElement, kLength, \
kFromIndex) \
@@ -320,8 +342,6 @@ namespace internal {
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.slice */ \
- TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
/* Support for Array.from and other array-copying idioms */ \
@@ -344,14 +364,6 @@ namespace internal {
TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kReceiver, kCallbackFn, kThisArg, \
kInitialK, kLength, kResult) \
TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-array.prototype.filter */ \
- TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
- kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayFilter, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ArrayFilterLoopEagerDeoptContinuation, 6, kReceiver, kCallbackFn, \
- kThisArg, kArray, kInitialK, kLength, kTo) \
- TFJ(ArrayFilterLoopLazyDeoptContinuation, 8, kReceiver, kCallbackFn, \
- kThisArg, kArray, kInitialK, kLength, kValueK, kTo, kResult) \
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
@@ -427,14 +439,14 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kReceiver, kGenerator, kAwaited, \
- kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kReceiver, kGenerator, kAwaited, \
- kOuterPromise) \
+ TFS(AsyncFunctionEnter, kClosure, kReceiver) \
+ TFS(AsyncFunctionReject, kAsyncFunctionObject, kReason, kCanSuspend) \
+ TFS(AsyncFunctionResolve, kAsyncFunctionObject, kValue, kCanSuspend) \
+ TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter, 1) \
+ TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
+ TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
- TFJ(AsyncFunctionPromiseCreate, 0, kReceiver) \
- TFJ(AsyncFunctionPromiseRelease, 2, kReceiver, kPromise, kCanSuspend) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
@@ -461,6 +473,7 @@ namespace internal {
CPP(CallSitePrototypeGetLineNumber) \
CPP(CallSitePrototypeGetMethodName) \
CPP(CallSitePrototypeGetPosition) \
+ CPP(CallSitePrototypeGetPromiseIndex) \
CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
CPP(CallSitePrototypeGetThis) \
CPP(CallSitePrototypeGetTypeName) \
@@ -468,6 +481,7 @@ namespace internal {
CPP(CallSitePrototypeIsConstructor) \
CPP(CallSitePrototypeIsEval) \
CPP(CallSitePrototypeIsNative) \
+ CPP(CallSitePrototypeIsPromiseAll) \
CPP(CallSitePrototypeIsToplevel) \
CPP(CallSitePrototypeToString) \
\
@@ -581,14 +595,18 @@ namespace internal {
CPP(MakeTypeError) \
CPP(MakeURIError) \
\
+ /* ExtrasUtils */ \
+ CPP(ExtrasUtilsUncurryThis) \
+ CPP(ExtrasUtilsCallReflectApply) \
+ \
/* Function */ \
CPP(FunctionConstructor) \
- ASM(FunctionPrototypeApply) \
+ ASM(FunctionPrototypeApply, Dummy) \
CPP(FunctionPrototypeBind) \
/* ES6 #sec-function.prototype.bind */ \
TFJ(FastFunctionPrototypeBind, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- ASM(FunctionPrototypeCall) \
+ ASM(FunctionPrototypeCall, Dummy) \
/* ES6 #sec-function.prototype-@@hasinstance */ \
TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \
/* ES6 #sec-function.prototype.tostring */ \
@@ -677,6 +695,7 @@ namespace internal {
TFJ(MapPrototypeValues, 0, kReceiver) \
/* ES #sec-%mapiteratorprototype%.next */ \
TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
+ TFS(MapIteratorToList, kSource) \
\
/* Math */ \
/* ES6 #sec-math.abs */ \
@@ -839,6 +858,7 @@ namespace internal {
/* ES #sec-object.prototype.tolocalestring */ \
TFJ(ObjectPrototypeToLocaleString, 0, kReceiver) \
CPP(ObjectSeal) \
+ TFS(ObjectToString, kReceiver) \
TFJ(ObjectValues, 1, kReceiver, kObject) \
\
/* instanceof */ \
@@ -913,8 +933,8 @@ namespace internal {
TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \
\
/* Reflect */ \
- ASM(ReflectApply) \
- ASM(ReflectConstruct) \
+ ASM(ReflectApply, Dummy) \
+ ASM(ReflectConstruct, Dummy) \
CPP(ReflectDefineProperty) \
CPP(ReflectDeleteProperty) \
CPP(ReflectGet) \
@@ -1010,6 +1030,7 @@ namespace internal {
TFJ(SetPrototypeValues, 0, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
TFJ(SetIteratorPrototypeNext, 0, kReceiver) \
+ TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
@@ -1218,10 +1239,14 @@ namespace internal {
TFJ(TypedArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
- ASM(WasmCompileLazy) \
+ ASM(WasmCompileLazy, Dummy) \
TFC(WasmAllocateHeapNumber, AllocateHeapNumber, 1) \
+ TFC(WasmAtomicWake, WasmAtomicWake, 1) \
+ TFC(WasmI32AtomicWait, WasmI32AtomicWait, 1) \
+ TFC(WasmI64AtomicWait, WasmI64AtomicWait, 1) \
TFC(WasmCallJavaScript, CallTrampoline, 1) \
- TFC(WasmGrowMemory, WasmGrowMemory, 1) \
+ TFC(WasmMemoryGrow, WasmMemoryGrow, 1) \
+ TFC(WasmRecordWrite, RecordWrite, 1) \
TFC(WasmStackGuard, NoContext, 1) \
TFC(WasmToNumber, TypeConversion, 1) \
TFC(WasmThrow, WasmThrow, 1) \
@@ -1234,6 +1259,11 @@ namespace internal {
TFS(ThrowWasmTrapFloatUnrepresentable) \
TFS(ThrowWasmTrapFuncInvalid) \
TFS(ThrowWasmTrapFuncSigMismatch) \
+ TFS(ThrowWasmTrapDataSegmentDropped) \
+ TFS(ThrowWasmTrapElemSegmentDropped) \
+ TFS(ThrowWasmTrapTableOutOfBounds) \
+ TFC(BigIntToWasmI64, BigIntToWasmI64, 1) \
+ TFC(WasmBigIntToI64, BigIntToI64, 1) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1279,8 +1309,8 @@ namespace internal {
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kReceiver, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kReceiver, kGenerator, kAwaited) \
+ TFS(AsyncGeneratorAwaitCaught, kAsyncGeneratorObject, kValue) \
+ TFS(AsyncGeneratorAwaitUncaught, kAsyncGeneratorObject, kValue) \
TFJ(AsyncGeneratorAwaitResolveClosure, 1, kReceiver, kValue) \
TFJ(AsyncGeneratorAwaitRejectClosure, 1, kReceiver, kValue) \
TFJ(AsyncGeneratorYieldResolveClosure, 1, kReceiver, kValue) \
@@ -1302,16 +1332,17 @@ namespace internal {
TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
\
/* CEntry */ \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit) \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit) \
- ASM(CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit) \
- ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit) \
- ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit) \
- ASM(CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit) \
- ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit) \
- ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit) \
+ ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit, Dummy) \
+ ASM(CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit, Dummy) \
+ ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit, Dummy) \
+ ASM(CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
+ ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit, Dummy) \
+ ASM(DirectCEntry, Dummy) \
\
/* String helpers */ \
TFS(StringAdd_CheckNone, kLeft, kRight) \
@@ -1320,17 +1351,28 @@ namespace internal {
TFS(SubString, kString, kFrom, kTo) \
\
/* Miscellaneous */ \
- ASM(CallApiCallback_Argc0) \
- ASM(CallApiCallback_Argc1) \
- ASM(CallApiGetter) \
- ASM(DoubleToI) \
+ ASM(DoubleToI, Dummy) \
TFC(GetProperty, GetProperty, 1) \
TFS(SetProperty, kReceiver, kKey, kValue) \
- ASM(MathPowInternal) \
+ TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
+ ASM(MathPowInternal, Dummy) \
+ ASM(MemCopyUint8Uint8, CCall) \
+ ASM(MemCopyUint16Uint8, CCall) \
+ ASM(MemMove, CCall) \
\
/* Trace */ \
CPP(IsTraceCategoryEnabled) \
- CPP(Trace)
+ CPP(Trace) \
+ \
+ /* Weak refs */ \
+ CPP(WeakCellClear) \
+ CPP(WeakCellHoldingsGetter) \
+ CPP(WeakFactoryCleanupIteratorNext) \
+ CPP(WeakFactoryCleanupSome) \
+ CPP(WeakFactoryConstructor) \
+ CPP(WeakFactoryMakeCell) \
+ CPP(WeakRefConstructor) \
+ CPP(WeakRefDeref)
#ifdef V8_INTL_SUPPORT
#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
@@ -1361,6 +1403,8 @@ namespace internal {
CPP(DateTimeFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl.datetimeformat.supportedlocalesof */ \
CPP(DateTimeFormatSupportedLocalesOf) \
+ /* ecma402 #sec-intl.getcanonicallocales */ \
+ CPP(IntlGetCanonicalLocales) \
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
@@ -1419,6 +1463,24 @@ namespace internal {
CPP(RelativeTimeFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl.RelativeTimeFormat.supportedlocalesof */ \
CPP(RelativeTimeFormatSupportedLocalesOf) \
+ /* ecma402 #sec-Intl.Segmenter */ \
+ CPP(SegmenterConstructor) \
+ /* ecma402 #sec-Intl.Segmenter.prototype.resolvedOptions */ \
+ CPP(SegmenterPrototypeResolvedOptions) \
+ /* ecma402 #sec-Intl.Segmenter.prototype.segment */ \
+ CPP(SegmenterPrototypeSegment) \
+ /* ecma402 #sec-Intl.Segmenter.supportedLocalesOf */ \
+ CPP(SegmenterSupportedLocalesOf) \
+ /* ecma402 #sec-segment-iterator-prototype-breakType */ \
+ CPP(SegmentIteratorPrototypeBreakType) \
+ /* ecma402 #sec-segment-iterator-prototype-following */ \
+ CPP(SegmentIteratorPrototypeFollowing) \
+ /* ecma402 #sec-segment-iterator-prototype-preceding */ \
+ CPP(SegmentIteratorPrototypePreceding) \
+ /* ecma402 #sec-segment-iterator-prototype-index */ \
+ CPP(SegmentIteratorPrototypeIndex) \
+ /* ecma402 #sec-segment-iterator-prototype-next */ \
+ CPP(SegmentIteratorPrototypeNext) \
/* ES #sec-string.prototype.normalize */ \
CPP(StringPrototypeNormalizeIntl) \
/* ecma402 #sup-string.prototype.tolocalelowercase */ \
@@ -1430,12 +1492,6 @@ namespace internal {
/* ES #sec-string.prototype.touppercase */ \
CPP(StringPrototypeToUpperCaseIntl) \
TFS(StringToLowerCaseIntl, kString) \
- /* ecma402 #sec-Intl.Segmenter */ \
- CPP(SegmenterConstructor) \
- /* ecma402 #sec-Intl.Segmenter.prototype.resolvedOptions */ \
- CPP(SegmenterPrototypeResolvedOptions) \
- /* ecma402 #sec-Intl.Segmenter.supportedLocalesOf */ \
- CPP(SegmenterSupportedLocalesOf) \
CPP(V8BreakIteratorConstructor) \
CPP(V8BreakIteratorInternalAdoptText) \
CPP(V8BreakIteratorInternalBreakType) \
@@ -1463,10 +1519,10 @@ namespace internal {
CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
// The exception thrown in the following builtins are caught
@@ -1492,12 +1548,18 @@ namespace internal {
#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmAllocateHeapNumber) \
+ V(WasmAtomicWake) \
+ V(WasmI32AtomicWait) \
+ V(WasmI64AtomicWait) \
V(WasmCallJavaScript) \
- V(WasmGrowMemory) \
+ V(WasmMemoryGrow) \
+ V(WasmRecordWrite) \
V(WasmStackGuard) \
V(WasmToNumber) \
V(WasmThrow) \
- V(DoubleToI)
+ V(DoubleToI) \
+ V(BigIntToWasmI64) \
+ V(WasmBigIntToI64)
// The exception thrown in the following builtins are caught internally and will
// not be propagated further or re-thrown
@@ -1507,27 +1569,23 @@ namespace internal {
#define BUILTIN_LIST_C(V) \
BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_A(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- V)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V)
#define BUILTIN_LIST_TFS(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN)
+ V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define BUILTIN_LIST_TFJ(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFJ(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define BUILTIN_LIST_TFC(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 2961a61f63..1163730599 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -41,15 +41,19 @@ namespace internal {
#define DEFINE_TFH_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
+#define DEFINE_ASM_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
+ typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
+
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR,
- DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN,
- IGNORE_BUILTIN)
+ DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN,
+ DEFINE_ASM_INTERFACE_DESCRIPTOR)
#undef DEFINE_TFJ_INTERFACE_DESCRIPTOR
#undef DEFINE_TFC_INTERFACE_DESCRIPTOR
#undef DEFINE_TFS_INTERFACE_DESCRIPTOR
#undef DEFINE_TFH_INTERFACE_DESCRIPTOR
+#undef DEFINE_ASM_INTERFACE_DESCRIPTOR
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 6f07618172..6defa92ca3 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -85,8 +85,8 @@ BUILTIN(ErrorPrototypeToString) {
namespace {
-Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
- Handle<JSFunction> constructor) {
+Object MakeGenericError(Isolate* isolate, BuiltinArguments args,
+ Handle<JSFunction> constructor) {
Handle<Object> template_index = args.atOrUndefined(isolate, 1);
Handle<Object> arg0 = args.atOrUndefined(isolate, 2);
Handle<Object> arg1 = args.atOrUndefined(isolate, 3);
@@ -95,9 +95,10 @@ Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
DCHECK(template_index->IsSmi());
RETURN_RESULT_OR_FAILURE(
- isolate, ErrorUtils::MakeGenericError(isolate, constructor,
- Smi::ToInt(*template_index), arg0,
- arg1, arg2, SKIP_NONE));
+ isolate, ErrorUtils::MakeGenericError(
+ isolate, constructor,
+ MessageTemplateFromInt(Smi::ToInt(*template_index)), arg0,
+ arg1, arg2, SKIP_NONE));
}
} // namespace
@@ -126,7 +127,7 @@ BUILTIN(MakeURIError) {
HandleScope scope(isolate);
Handle<JSFunction> constructor = isolate->uri_error_function();
Handle<Object> undefined = isolate->factory()->undefined_value();
- const int template_index = MessageTemplate::kURIMalformed;
+ MessageTemplate template_index = MessageTemplate::kURIMalformed;
RETURN_RESULT_OR_FAILURE(
isolate,
ErrorUtils::MakeGenericError(isolate, constructor, template_index,
diff --git a/deps/v8/src/builtins/builtins-extras-utils.cc b/deps/v8/src/builtins/builtins-extras-utils.cc
new file mode 100644
index 0000000000..31d5fd3069
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-extras-utils.cc
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/builtins/builtins.h"
+#include "src/elements.h"
+
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+enum UncurryThisFunctionContextSlot {
+ kFunctionSlot = Context::MIN_CONTEXT_SLOTS,
+ kFunctionContextLength,
+};
+} // namespace
+
+// These functions are key for safe meta-programming:
+// http://wiki.ecmascript.org/doku.php?id=conventions:safe_meta_programming
+//
+// Technically they could all be derived from combinations of
+// Function.prototype.{bind,call,apply} but that introduces lots of layers of
+// indirection.
+//
+// Equivalent to:
+//
+// function uncurryThis(func) {
+// return function(thisArg, ...args) {
+// return %reflect_apply(func, thisArg, args);
+// };
+// };
+//
+BUILTIN(ExtrasUtilsUncurryThis) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ Handle<JSFunction> function = args.at<JSFunction>(1);
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context,
+ static_cast<int>(UncurryThisFunctionContextSlot::kFunctionContextLength));
+
+ context->set(static_cast<int>(UncurryThisFunctionContextSlot::kFunctionSlot),
+ *function);
+
+ Handle<SharedFunctionInfo> info =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate->factory()->empty_string(),
+ Builtins::kExtrasUtilsCallReflectApply, kNormalFunction);
+ info->DontAdaptArguments();
+
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+ Handle<JSFunction> new_bound_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+
+ return *new_bound_function;
+}
+
+BUILTIN(ExtrasUtilsCallReflectApply) {
+ HandleScope scope(isolate);
+ Handle<Context> context(isolate->context(), isolate);
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<JSFunction> function(
+ JSFunction::cast(context->get(
+ static_cast<int>(UncurryThisFunctionContextSlot::kFunctionSlot))),
+ isolate);
+
+ Handle<Object> this_arg = args.at(1);
+
+ int const rest_args_atart = 2;
+ Arguments argv(args.length() - rest_args_atart,
+ args.address_of_arg_at(rest_args_atart));
+ Handle<JSArray> rest_args_array = isolate->factory()->NewJSArray(0);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ArrayConstructInitializeElements(rest_args_array, &argv));
+
+ Handle<Object> reflect_apply_args[] = {function, this_arg, rest_args_array};
+ Handle<JSFunction> reflect_apply(native_context->reflect_apply(), isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Execution::Call(isolate, reflect_apply,
+ isolate->factory()->undefined_value(),
+ arraysize(reflect_apply_args), reflect_apply_args));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 2f3c876852..b8fb69256c 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -47,12 +47,12 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Comment("Check descriptor array length");
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
// Minimum descriptor array length required for fast path.
- const int min_descriptors_length = DescriptorArray::LengthFor(Max(
- JSFunction::kLengthDescriptorIndex, JSFunction::kNameDescriptorIndex));
- TNode<Smi> descriptors_length = LoadWeakFixedArrayLength(descriptors);
- GotoIf(SmiLessThanOrEqual(descriptors_length,
- SmiConstant(min_descriptors_length)),
- &slow);
+ const int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
+ JSFunction::kNameDescriptorIndex);
+ TNode<Int32T> nof_descriptors = LoadNumberOfDescriptors(descriptors);
+ GotoIf(
+ Int32LessThanOrEqual(nof_descriptors, Int32Constant(min_nof_descriptors)),
+ &slow);
// Check whether the length and name properties are still present as
// AccessorInfo objects. In that case, their value can be recomputed even if
@@ -60,24 +60,23 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Comment("Check name and length properties");
{
const int length_index = JSFunction::kLengthDescriptorIndex;
- TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(length_index)));
+ TNode<Name> maybe_length =
+ LoadKeyByDescriptorEntry(descriptors, length_index);
GotoIf(WordNotEqual(maybe_length, LoadRoot(RootIndex::klength_string)),
&slow);
- TNode<Object> maybe_length_accessor = CAST(LoadWeakFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(length_index)));
+ TNode<Object> maybe_length_accessor =
+ LoadValueByDescriptorEntry(descriptors, length_index);
GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
- TNode<Name> maybe_name = CAST(LoadWeakFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(name_index)));
+ TNode<Name> maybe_name = LoadKeyByDescriptorEntry(descriptors, name_index);
GotoIf(WordNotEqual(maybe_name, LoadRoot(RootIndex::kname_string)), &slow);
- TNode<Object> maybe_name_accessor = CAST(LoadWeakFixedArrayElement(
- descriptors, DescriptorArray::ToValueIndex(name_index)));
+ TNode<Object> maybe_name_accessor =
+ LoadValueByDescriptorEntry(descriptors, name_index);
GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
TNode<Map> name_value_map = LoadMap(CAST(maybe_name_accessor));
GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 43a3853715..cd68b261cc 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -177,7 +177,7 @@ BUILTIN(AsyncGeneratorFunctionConstructor) {
namespace {
-Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
+Object DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
HandleScope scope(isolate);
DCHECK_LE(1, args.length());
if (!args.receiver()->IsCallable()) {
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 8b5dc182cf..aeaa804856 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -12,6 +12,35 @@
namespace v8 {
namespace internal {
+class HandlerBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit HandlerBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void Generate_KeyedStoreIC_SloppyArguments();
+ void Generate_KeyedStoreIC_Slow();
+ void Generate_StoreInArrayLiteralIC_Slow();
+
+ // Essentially turns runtime elements kinds (TNode<Int32T>) into
+ // compile-time types (int) by dispatching over the runtime type and
+ // emitting a specialized copy of the given case function for each elements
+ // kind. Use with caution. This produces a *lot* of code.
+ typedef std::function<void(ElementsKind)> ElementsKindSwitchCase;
+ void DispatchByElementsKind(TNode<Int32T> elements_kind,
+ const ElementsKindSwitchCase& case_function);
+
+ // Dispatches over all possible combinations of {from,to} elements kinds.
+ typedef std::function<void(ElementsKind, ElementsKind)>
+ ElementsKindTransitionSwitchCase;
+ void DispatchForElementsKindTransition(
+ TNode<Int32T> from_kind, TNode<Int32T> to_kind,
+ const ElementsKindTransitionSwitchCase& case_function);
+
+ void Generate_ElementsTransitionAndStore(KeyedAccessStoreMode store_mode);
+ void Generate_StoreFastElementIC(KeyedAccessStoreMode store_mode);
+};
+
TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
Node* string = Parameter(Descriptor::kReceiver);
Return(LoadStringLengthAsSmi(string));
@@ -41,7 +70,8 @@ void Builtins::Generate_StoreIC_Uninitialized(
StoreICUninitializedGenerator::Generate(state);
}
-TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
+void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_Slow() {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -55,7 +85,29 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
receiver, name);
}
-TF_BUILTIN(StoreInArrayLiteralIC_Slow, CodeStubAssembler) {
+TF_BUILTIN(KeyedStoreIC_Slow, HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_Slow();
+}
+
+TF_BUILTIN(KeyedStoreIC_Slow_Standard, HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_Slow();
+}
+
+TF_BUILTIN(KeyedStoreIC_Slow_GrowNoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_Slow();
+}
+
+TF_BUILTIN(KeyedStoreIC_Slow_NoTransitionIgnoreOOB, HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_Slow();
+}
+
+TF_BUILTIN(KeyedStoreIC_Slow_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_Slow();
+}
+
+void HandlerBuiltinsAssembler::Generate_StoreInArrayLiteralIC_Slow() {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* array = Parameter(Descriptor::kReceiver);
Node* index = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -64,6 +116,253 @@ TF_BUILTIN(StoreInArrayLiteralIC_Slow, CodeStubAssembler) {
index);
}
+TF_BUILTIN(StoreInArrayLiteralIC_Slow, HandlerBuiltinsAssembler) {
+ Generate_StoreInArrayLiteralIC_Slow();
+}
+
+TF_BUILTIN(StoreInArrayLiteralIC_Slow_Standard, HandlerBuiltinsAssembler) {
+ Generate_StoreInArrayLiteralIC_Slow();
+}
+
+TF_BUILTIN(StoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_StoreInArrayLiteralIC_Slow();
+}
+
+TF_BUILTIN(StoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB,
+ HandlerBuiltinsAssembler) {
+ Generate_StoreInArrayLiteralIC_Slow();
+}
+
+TF_BUILTIN(StoreInArrayLiteralIC_Slow_NoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_StoreInArrayLiteralIC_Slow();
+}
+
+// All possible fast-to-fast transitions. Transitions to dictionary mode are not
+// handled by ElementsTransitionAndStore.
+#define ELEMENTS_KIND_TRANSITIONS(V) \
+ V(PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS) \
+ V(PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS) \
+ V(PACKED_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS) \
+ V(PACKED_SMI_ELEMENTS, PACKED_ELEMENTS) \
+ V(PACKED_SMI_ELEMENTS, HOLEY_ELEMENTS) \
+ V(HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS) \
+ V(HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS, PACKED_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS, HOLEY_ELEMENTS) \
+ V(HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS) \
+ V(PACKED_ELEMENTS, HOLEY_ELEMENTS)
+
+void HandlerBuiltinsAssembler::DispatchForElementsKindTransition(
+ TNode<Int32T> from_kind, TNode<Int32T> to_kind,
+ const ElementsKindTransitionSwitchCase& case_function) {
+ STATIC_ASSERT(sizeof(ElementsKind) == sizeof(uint8_t));
+
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ int32_t combined_elements_kinds[] = {
+#define ELEMENTS_KINDS_CASE(FROM, TO) (FROM << kBitsPerByte) | TO,
+ ELEMENTS_KIND_TRANSITIONS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+ };
+
+#define ELEMENTS_KINDS_CASE(FROM, TO) Label if_##FROM##_##TO(this);
+ ELEMENTS_KIND_TRANSITIONS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+
+ Label* elements_kind_labels[] = {
+#define ELEMENTS_KINDS_CASE(FROM, TO) &if_##FROM##_##TO,
+ ELEMENTS_KIND_TRANSITIONS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+ };
+ STATIC_ASSERT(arraysize(combined_elements_kinds) ==
+ arraysize(elements_kind_labels));
+
+ TNode<Word32T> combined_elements_kind =
+ Word32Or(Word32Shl(from_kind, Int32Constant(kBitsPerByte)), to_kind);
+
+ Switch(combined_elements_kind, &if_unknown_type, combined_elements_kinds,
+ elements_kind_labels, arraysize(combined_elements_kinds));
+
+#define ELEMENTS_KINDS_CASE(FROM, TO) \
+ BIND(&if_##FROM##_##TO); \
+ { \
+ case_function(FROM, TO); \
+ Goto(&next); \
+ }
+ ELEMENTS_KIND_TRANSITIONS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+ BIND(&next);
+}
+
+#undef ELEMENTS_KIND_TRANSITIONS
+
+void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
+ KeyedAccessStoreMode store_mode) {
+ typedef StoreTransitionDescriptor Descriptor;
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* map = Parameter(Descriptor::kMap);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Comment("ElementsTransitionAndStore: store_mode=", store_mode);
+
+ Label miss(this);
+
+ if (FLAG_trace_elements_transitions) {
+ // Tracing elements transitions is the job of the runtime.
+ Goto(&miss);
+ } else {
+ // TODO(v8:8481): Pass from_kind and to_kind in feedback vector slots.
+ DispatchForElementsKindTransition(
+ LoadElementsKind(receiver), LoadMapElementsKind(map),
+ [=, &miss](ElementsKind from_kind, ElementsKind to_kind) {
+ TransitionElementsKind(receiver, map, from_kind, to_kind, &miss);
+ EmitElementStore(receiver, key, value, to_kind, store_mode, &miss,
+ context);
+ });
+ Return(value);
+ }
+
+ BIND(&miss);
+ TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss, context,
+ receiver, key, value, map, slot, vector);
+}
+
+TF_BUILTIN(ElementsTransitionAndStore_Standard, HandlerBuiltinsAssembler) {
+ Generate_ElementsTransitionAndStore(STANDARD_STORE);
+}
+
+TF_BUILTIN(ElementsTransitionAndStore_GrowNoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_ElementsTransitionAndStore(STORE_AND_GROW_NO_TRANSITION_HANDLE_COW);
+}
+
+TF_BUILTIN(ElementsTransitionAndStore_NoTransitionIgnoreOOB,
+ HandlerBuiltinsAssembler) {
+ Generate_ElementsTransitionAndStore(STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS);
+}
+
+TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_ElementsTransitionAndStore(STORE_NO_TRANSITION_HANDLE_COW);
+}
+
+// All elements kinds handled by EmitElementStore. Specifically, this includes
+// fast elements and fixed typed array elements.
+#define ELEMENTS_KINDS(V) \
+ V(PACKED_SMI_ELEMENTS) \
+ V(HOLEY_SMI_ELEMENTS) \
+ V(PACKED_ELEMENTS) \
+ V(HOLEY_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS) \
+ V(HOLEY_DOUBLE_ELEMENTS) \
+ V(UINT8_ELEMENTS) \
+ V(INT8_ELEMENTS) \
+ V(UINT16_ELEMENTS) \
+ V(INT16_ELEMENTS) \
+ V(UINT32_ELEMENTS) \
+ V(INT32_ELEMENTS) \
+ V(FLOAT32_ELEMENTS) \
+ V(FLOAT64_ELEMENTS) \
+ V(UINT8_CLAMPED_ELEMENTS) \
+ V(BIGUINT64_ELEMENTS) \
+ V(BIGINT64_ELEMENTS)
+
+void HandlerBuiltinsAssembler::DispatchByElementsKind(
+ TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function) {
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ int32_t elements_kinds[] = {
+#define ELEMENTS_KINDS_CASE(KIND) KIND,
+ ELEMENTS_KINDS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+ };
+
+#define ELEMENTS_KINDS_CASE(KIND) Label if_##KIND(this);
+ ELEMENTS_KINDS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+
+ Label* elements_kind_labels[] = {
+#define ELEMENTS_KINDS_CASE(KIND) &if_##KIND,
+ ELEMENTS_KINDS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ arraysize(elements_kinds));
+
+#define ELEMENTS_KINDS_CASE(KIND) \
+ BIND(&if_##KIND); \
+ { \
+ case_function(KIND); \
+ Goto(&next); \
+ }
+ ELEMENTS_KINDS(ELEMENTS_KINDS_CASE)
+#undef ELEMENTS_KINDS_CASE
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+ BIND(&next);
+}
+
+#undef ELEMENTS_KINDS
+
+void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
+ KeyedAccessStoreMode store_mode) {
+ typedef StoreWithVectorDescriptor Descriptor;
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Comment("StoreFastElementStub: store_mode=", store_mode);
+
+ Label miss(this);
+
+ // TODO(v8:8481): Pass elements_kind in feedback vector slots.
+ DispatchByElementsKind(LoadElementsKind(receiver),
+ [=, &miss](ElementsKind elements_kind) {
+ EmitElementStore(receiver, key, value, elements_kind,
+ store_mode, &miss, context);
+ });
+ Return(value);
+
+ BIND(&miss);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
+ receiver, key);
+}
+
+TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) {
+ Generate_StoreFastElementIC(STANDARD_STORE);
+}
+
+TF_BUILTIN(StoreFastElementIC_GrowNoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_StoreFastElementIC(STORE_AND_GROW_NO_TRANSITION_HANDLE_COW);
+}
+
+TF_BUILTIN(StoreFastElementIC_NoTransitionIgnoreOOB, HandlerBuiltinsAssembler) {
+ Generate_StoreFastElementIC(STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS);
+}
+
+TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
+ Generate_StoreFastElementIC(STORE_NO_TRANSITION_HANDLE_COW);
+}
+
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
@@ -109,5 +408,91 @@ TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
receiver, name);
}
+TF_BUILTIN(KeyedLoadIC_SloppyArguments, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label miss(this);
+
+ Node* result = LoadKeyedSloppyArguments(receiver, key, &miss);
+ Return(result);
+
+ BIND(&miss);
+ {
+ Comment("Miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key, slot,
+ vector);
+ }
+}
+
+void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_SloppyArguments() {
+ typedef StoreWithVectorDescriptor Descriptor;
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label miss(this);
+
+ StoreKeyedSloppyArguments(receiver, key, value, &miss);
+ Return(value);
+
+ BIND(&miss);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
+ receiver, key);
+}
+
+TF_BUILTIN(KeyedStoreIC_SloppyArguments_Standard, HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_SloppyArguments();
+}
+
+TF_BUILTIN(KeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_SloppyArguments();
+}
+
+TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB,
+ HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_SloppyArguments();
+}
+
+TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW,
+ HandlerBuiltinsAssembler) {
+ Generate_KeyedStoreIC_SloppyArguments();
+}
+
+TF_BUILTIN(StoreInterceptorIC, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context, value, slot,
+ vector, receiver, name);
+}
+
+TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* key = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label if_keyispositivesmi(this), if_keyisinvalid(this);
+ Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
+ BIND(&if_keyispositivesmi);
+ TailCallRuntime(Runtime::kLoadElementWithInterceptor, context, receiver, key);
+
+ BIND(&if_keyisinvalid);
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key, slot,
+ vector);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 44a18099bf..bae7fd6e1c 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -6,7 +6,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
#include "src/macro-assembler.h"
@@ -24,16 +24,10 @@ using TNode = compiler::TNode<T>;
// Interrupt and stack checks.
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
masm->TailCallRuntime(Runtime::kInterrupt);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
masm->TailCallRuntime(Runtime::kStackGuard);
}
@@ -151,9 +145,9 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
GotoIf(WordEqual(index, length), &done_loop2);
// Load the parameter at the given {index}.
- TNode<Object> value =
- CAST(Load(MachineType::AnyTagged(), frame,
- TimesPointerSize(IntPtrSub(offset, index))));
+ TNode<Object> value = BitcastWordToTagged(
+ Load(MachineType::Pointer(), frame,
+ TimesSystemPointerSize(IntPtrSub(offset, index))));
// Store the {value} into the {result}.
StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);
@@ -249,22 +243,23 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(Node* object, Node** cell, Node** mask) {
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
+ Node* bitmap = Load(MachineType::Pointer(), page,
+ IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
{
// Temp variable to calculate cell offset in bitmap.
Node* r0;
- int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
+ int shift = Bitmap::kBitsPerCellLog2 + kTaggedSizeLog2 -
Bitmap::kBytesPerCellLog2;
r0 = WordShr(object, IntPtrConstant(shift));
r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- *cell = IntPtrAdd(IntPtrAdd(page, r0),
- IntPtrConstant(MemoryChunk::kHeaderSize));
+ *cell = IntPtrAdd(bitmap, r0);
}
{
// Temp variable to calculate bit offset in cell.
Node* r1;
- r1 = WordShr(object, IntPtrConstant(kPointerSizeLog2));
+ r1 = WordShr(object, IntPtrConstant(kTaggedSizeLog2));
r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1));
// It seems that LSB(e.g. cl) is automatically used, so no manual masking
// is needed. Uncomment the following line otherwise.
@@ -334,7 +329,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
slot);
Node* new_store_buffer_top =
- IntPtrAdd(store_buffer_top, IntPtrConstant(kPointerSize));
+ IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
store_buffer_top_addr, new_store_buffer_top);
@@ -441,6 +436,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode);
+ Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), function, object, slot, isolate_constant,
@@ -474,7 +470,7 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
dont_delete);
// Overwrite the entry itself (see NameDictionary::SetEntry).
TNode<HeapObject> filler = TheHoleConstant();
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kTheHoleValue));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue));
StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
SKIP_WRITE_BARRIER);
@@ -629,46 +625,6 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- TNode<MicrotaskQueue> GetDefaultMicrotaskQueue();
- TNode<IntPtrT> GetPendingMicrotaskCount(
- TNode<MicrotaskQueue> microtask_queue);
- void SetPendingMicrotaskCount(TNode<MicrotaskQueue> microtask_queue,
- TNode<IntPtrT> new_num_tasks);
- TNode<FixedArray> GetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue);
- void SetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue,
- TNode<FixedArray> new_queue);
-
- TNode<Context> GetCurrentContext();
- void SetCurrentContext(TNode<Context> context);
-
- void EnterMicrotaskContext(TNode<Context> context);
- void LeaveMicrotaskContext();
-
- void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability);
-
- TNode<Object> GetPendingException() {
- auto ref = ExternalReference::Create(kPendingExceptionAddress, isolate());
- return TNode<Object>::UncheckedCast(
- Load(MachineType::AnyTagged(), ExternalConstant(ref)));
- }
- void ClearPendingException() {
- auto ref = ExternalReference::Create(kPendingExceptionAddress, isolate());
- StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
- TheHoleConstant());
- }
-
- TNode<Object> GetScheduledException() {
- auto ref = ExternalReference::scheduled_exception_address(isolate());
- return TNode<Object>::UncheckedCast(
- Load(MachineType::AnyTagged(), ExternalConstant(ref)));
- }
- void ClearScheduledException() {
- auto ref = ExternalReference::scheduled_exception_address(isolate());
- StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
- TheHoleConstant());
- }
-
template <typename Descriptor>
void GenerateAdaptorWithExitFrameType(
Builtins::ExitFrameType exit_frame_type);
@@ -723,403 +679,6 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
}
-TNode<MicrotaskQueue> InternalBuiltinsAssembler::GetDefaultMicrotaskQueue() {
- return TNode<MicrotaskQueue>::UncheckedCast(
- LoadRoot(RootIndex::kDefaultMicrotaskQueue));
-}
-
-TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount(
- TNode<MicrotaskQueue> microtask_queue) {
- TNode<IntPtrT> result = LoadAndUntagObjectField(
- microtask_queue, MicrotaskQueue::kPendingMicrotaskCountOffset);
- return result;
-}
-
-void InternalBuiltinsAssembler::SetPendingMicrotaskCount(
- TNode<MicrotaskQueue> microtask_queue, TNode<IntPtrT> new_num_tasks) {
- StoreObjectField(microtask_queue,
- MicrotaskQueue::kPendingMicrotaskCountOffset,
- SmiFromIntPtr(new_num_tasks));
-}
-
-TNode<FixedArray> InternalBuiltinsAssembler::GetQueuedMicrotasks(
- TNode<MicrotaskQueue> microtask_queue) {
- return LoadObjectField<FixedArray>(microtask_queue,
- MicrotaskQueue::kQueueOffset);
-}
-
-void InternalBuiltinsAssembler::SetQueuedMicrotasks(
- TNode<MicrotaskQueue> microtask_queue, TNode<FixedArray> new_queue) {
- StoreObjectField(microtask_queue, MicrotaskQueue::kQueueOffset, new_queue);
-}
-
-TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() {
- auto ref = ExternalReference::Create(kContextAddress, isolate());
- return TNode<Context>::UncheckedCast(
- Load(MachineType::AnyTagged(), ExternalConstant(ref)));
-}
-
-void InternalBuiltinsAssembler::SetCurrentContext(TNode<Context> context) {
- auto ref = ExternalReference::Create(kContextAddress, isolate());
- StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
- context);
-}
-
-void InternalBuiltinsAssembler::EnterMicrotaskContext(
- TNode<Context> microtask_context) {
- auto ref = ExternalReference::handle_scope_implementer_address(isolate());
- Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
- StoreNoWriteBarrier(
- MachineType::PointerRepresentation(), hsi,
- IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
- BitcastTaggedToWord(microtask_context));
-
- // Load mirrored std::vector length from
- // HandleScopeImplementer::entered_contexts_count_
- auto type = kSizetSize == 8 ? MachineType::Uint64() : MachineType::Uint32();
- Node* entered_contexts_length = Load(
- type, hsi,
- IntPtrConstant(HandleScopeImplementerOffsets::kEnteredContextsCount));
-
- auto rep = kSizetSize == 8 ? MachineRepresentation::kWord64
- : MachineRepresentation::kWord32;
-
- StoreNoWriteBarrier(
- rep, hsi,
- IntPtrConstant(
- HandleScopeImplementerOffsets::kEnteredContextCountDuringMicrotasks),
- entered_contexts_length);
-}
-
-void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
- auto ref = ExternalReference::handle_scope_implementer_address(isolate());
-
- Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
- StoreNoWriteBarrier(
- MachineType::PointerRepresentation(), hsi,
- IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
- IntPtrConstant(0));
- if (kSizetSize == 4) {
- StoreNoWriteBarrier(
- MachineRepresentation::kWord32, hsi,
- IntPtrConstant(HandleScopeImplementerOffsets::
- kEnteredContextCountDuringMicrotasks),
- Int32Constant(0));
- } else {
- StoreNoWriteBarrier(
- MachineRepresentation::kWord64, hsi,
- IntPtrConstant(HandleScopeImplementerOffsets::
- kEnteredContextCountDuringMicrotasks),
- Int64Constant(0));
- }
-}
-
-void InternalBuiltinsAssembler::RunPromiseHook(
- Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability) {
- Label hook(this, Label::kDeferred), done_hook(this);
- GotoIf(IsDebugActive(), &hook);
- Branch(IsPromiseHookEnabledOrHasAsyncEventDelegate(), &hook, &done_hook);
- BIND(&hook);
- {
- // Get to the underlying JSPromise instance.
- Node* const promise = Select<HeapObject>(
- IsJSPromise(promise_or_capability),
- [=] { return promise_or_capability; },
- [=] {
- return CAST(LoadObjectField(promise_or_capability,
- PromiseCapability::kPromiseOffset));
- });
- CallRuntime(id, context, promise);
- Goto(&done_hook);
- }
- BIND(&done_hook);
-}
-
-TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
- Node* microtask = Parameter(Descriptor::kMicrotask);
-
- TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
- TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
- TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1));
- TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
- TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue);
-
- Label if_append(this), if_grow(this), done(this);
- Branch(WordEqual(num_tasks, queue_length), &if_grow, &if_append);
-
- BIND(&if_grow);
- {
- // Determine the new queue length and check if we need to allocate
- // in large object space (instead of just going to new space, where
- // we also know that we don't need any write barriers for setting
- // up the new queue object).
- Label if_newspace(this), if_lospace(this, Label::kDeferred);
- TNode<IntPtrT> new_queue_length =
- IntPtrMax(IntPtrConstant(8), IntPtrAdd(num_tasks, num_tasks));
- Branch(IntPtrLessThanOrEqual(new_queue_length,
- IntPtrConstant(FixedArray::kMaxRegularLength)),
- &if_newspace, &if_lospace);
-
- BIND(&if_newspace);
- {
- // This is the likely case where the new queue fits into new space,
- // and thus we don't need any write barriers for initializing it.
- TNode<FixedArray> new_queue =
- CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
- CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(new_queue, num_tasks, microtask,
- SKIP_WRITE_BARRIER);
- FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
- new_queue_length, RootIndex::kUndefinedValue);
- SetQueuedMicrotasks(microtask_queue, new_queue);
- Goto(&done);
- }
-
- BIND(&if_lospace);
- {
- // The fallback case where the new queue ends up in large object space.
- TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
- PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
- AllocationFlag::kAllowLargeObjectAllocation));
- CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
- StoreFixedArrayElement(new_queue, num_tasks, microtask);
- FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
- new_queue_length, RootIndex::kUndefinedValue);
- SetQueuedMicrotasks(microtask_queue, new_queue);
- Goto(&done);
- }
- }
-
- BIND(&if_append);
- {
- StoreFixedArrayElement(queue, num_tasks, microtask);
- Goto(&done);
- }
-
- BIND(&done);
- SetPendingMicrotaskCount(microtask_queue, new_num_tasks);
- Return(UndefinedConstant());
-}
-
-TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
- // Load the current context from the isolate.
- TNode<Context> current_context = GetCurrentContext();
- TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
-
- Label init_queue_loop(this);
- Goto(&init_queue_loop);
- BIND(&init_queue_loop);
- {
- TVARIABLE(IntPtrT, index, IntPtrConstant(0));
- Label loop(this, &index), loop_next(this);
-
- TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
- ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
-
- TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
-
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
- LoadAndUntagFixedArrayBaseLength(queue), num_tasks));
- CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
-
- SetQueuedMicrotasks(microtask_queue, EmptyFixedArrayConstant());
- SetPendingMicrotaskCount(microtask_queue, IntPtrConstant(0));
-
- Goto(&loop);
- BIND(&loop);
- {
- TNode<HeapObject> microtask =
- CAST(LoadFixedArrayElement(queue, index.value()));
- index = IntPtrAdd(index.value(), IntPtrConstant(1));
-
- CSA_ASSERT(this, TaggedIsNotSmi(microtask));
-
- TNode<Map> microtask_map = LoadMap(microtask);
- TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
-
- VARIABLE(var_exception, MachineRepresentation::kTagged,
- TheHoleConstant());
- Label if_exception(this, Label::kDeferred);
- Label is_callable(this), is_callback(this),
- is_promise_fulfill_reaction_job(this),
- is_promise_reject_reaction_job(this),
- is_promise_resolve_thenable_job(this),
- is_unreachable(this, Label::kDeferred);
-
- int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
- PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
- PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
- Label* case_labels[] = {
- &is_callable, &is_callback, &is_promise_fulfill_reaction_job,
- &is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
- static_assert(arraysize(case_values) == arraysize(case_labels), "");
- Switch(microtask_type, &is_unreachable, case_values, case_labels,
- arraysize(case_labels));
-
- BIND(&is_callable);
- {
- // Enter the context of the {microtask}.
- TNode<Context> microtask_context =
- LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
- TNode<Context> native_context = LoadNativeContext(microtask_context);
-
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(microtask_context);
- SetCurrentContext(native_context);
-
- TNode<JSReceiver> callable = LoadObjectField<JSReceiver>(
- microtask, CallableTask::kCallableOffset);
- Node* const result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- microtask_context, callable, UndefinedConstant());
- GotoIfException(result, &if_exception, &var_exception);
- LeaveMicrotaskContext();
- SetCurrentContext(current_context);
- Goto(&loop_next);
- }
-
- BIND(&is_callback);
- {
- Node* const microtask_callback =
- LoadObjectField(microtask, CallbackTask::kCallbackOffset);
- Node* const microtask_data =
- LoadObjectField(microtask, CallbackTask::kDataOffset);
-
- // If this turns out to become a bottleneck because of the calls
- // to C++ via CEntry, we can choose to speed them up using a
- // similar mechanism that we use for the CallApiFunction stub,
- // except that calling the MicrotaskCallback is even easier, since
- // it doesn't accept any tagged parameters, doesn't return a value
- // and ignores exceptions.
- //
- // But from our current measurements it doesn't seem to be a
- // serious performance problem, even if the microtask is full
- // of CallHandlerTasks (which is not a realistic use case anyways).
- Node* const result =
- CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
- microtask_callback, microtask_data);
- GotoIfException(result, &if_exception, &var_exception);
- Goto(&loop_next);
- }
-
- BIND(&is_promise_resolve_thenable_job);
- {
- // Enter the context of the {microtask}.
- TNode<Context> microtask_context = LoadObjectField<Context>(
- microtask, PromiseResolveThenableJobTask::kContextOffset);
- TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(microtask_context);
- SetCurrentContext(native_context);
-
- Node* const promise_to_resolve = LoadObjectField(
- microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
- Node* const then = LoadObjectField(
- microtask, PromiseResolveThenableJobTask::kThenOffset);
- Node* const thenable = LoadObjectField(
- microtask, PromiseResolveThenableJobTask::kThenableOffset);
-
- Node* const result =
- CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
- promise_to_resolve, thenable, then);
- GotoIfException(result, &if_exception, &var_exception);
- LeaveMicrotaskContext();
- SetCurrentContext(current_context);
- Goto(&loop_next);
- }
-
- BIND(&is_promise_fulfill_reaction_job);
- {
- // Enter the context of the {microtask}.
- TNode<Context> microtask_context = LoadObjectField<Context>(
- microtask, PromiseReactionJobTask::kContextOffset);
- TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(microtask_context);
- SetCurrentContext(native_context);
-
- Node* const argument =
- LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- Node* const handler =
- LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
- Node* const promise_or_capability = LoadObjectField(
- microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
-
- // Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
-
- Node* const result =
- CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
-
- // Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
-
- LeaveMicrotaskContext();
- SetCurrentContext(current_context);
- Goto(&loop_next);
- }
-
- BIND(&is_promise_reject_reaction_job);
- {
- // Enter the context of the {microtask}.
- TNode<Context> microtask_context = LoadObjectField<Context>(
- microtask, PromiseReactionJobTask::kContextOffset);
- TNode<Context> native_context = LoadNativeContext(microtask_context);
- CSA_ASSERT(this, IsNativeContext(native_context));
- EnterMicrotaskContext(microtask_context);
- SetCurrentContext(native_context);
-
- Node* const argument =
- LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- Node* const handler =
- LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
- Node* const promise_or_capability = LoadObjectField(
- microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
-
- // Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
-
- Node* const result =
- CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
-
- // Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
-
- LeaveMicrotaskContext();
- SetCurrentContext(current_context);
- Goto(&loop_next);
- }
-
- BIND(&is_unreachable);
- Unreachable();
-
- BIND(&if_exception);
- {
- // Report unhandled exceptions from microtasks.
- CallRuntime(Runtime::kReportMessage, current_context,
- var_exception.value());
- LeaveMicrotaskContext();
- SetCurrentContext(current_context);
- Goto(&loop_next);
- }
-
- BIND(&loop_next);
- Branch(IntPtrLessThan(index.value(), num_tasks), &loop, &init_queue_loop);
- }
- }
-}
-
TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
@@ -1199,42 +758,23 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
}
-void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
- // CallApiGetterStub only exists as a stub to avoid duplicating code between
- // here and code-stubs-<arch>.cc. For example, see CallApiFunctionAndReturn.
- // Here we abuse the instantiated stub to generate code.
- CallApiGetterStub stub(masm->isolate());
- stub.Generate(masm);
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
+void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
+ masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
+#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
-void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
- // The common variants of CallApiCallbackStub (i.e. all that are embedded into
- // the snapshot) are generated as builtins. The rest remain available as code
- // stubs. Here we abuse the instantiated stub to generate code and avoid
- // duplication.
- const int kArgc = 0;
- CallApiCallbackStub stub(masm->isolate(), kArgc);
- stub.Generate(masm);
+#ifndef V8_TARGET_ARCH_ARM
+void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
+ masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
+#endif // V8_TARGET_ARCH_ARM
-void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
- // The common variants of CallApiCallbackStub (i.e. all that are embedded into
- // the snapshot) are generated as builtins. The rest remain available as code
- // stubs. Here we abuse the instantiated stub to generate code and avoid
- // duplication.
- const int kArgc = 1;
- CallApiCallbackStub stub(masm->isolate(), kArgc);
- stub.Generate(masm);
+#ifndef V8_TARGET_ARCH_IA32
+void Builtins::Generate_MemMove(MacroAssembler* masm) {
+ masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
+#endif // V8_TARGET_ARCH_IA32
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
@@ -1300,5 +840,19 @@ TF_BUILTIN(SetProperty, CodeStubAssembler) {
value, LanguageMode::kStrict);
}
+// ES6 CreateDataProperty(), specialized for the case where objects are still
+// being initialized, and have not yet been made accessible to the user. Thus,
+// any operation here should be unobservable until after the object has been
+// returned.
+TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context, receiver,
+ key, value);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index fa1684c54b..f0d5160330 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -10,18 +10,12 @@ namespace v8 {
namespace internal {
void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kOther);
@@ -29,36 +23,24 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction(
MacroAssembler* masm) {
-#ifdef V8_TARGET_ARCH_IA32
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kArrayFunction);
}
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
deleted file mode 100644
index d05a8656db..0000000000
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/globals.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Code> Builtins::InterpreterPushArgsThenCall(
- ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) {
- switch (mode) {
- case InterpreterPushArgsMode::kArrayFunction:
- // There is no special-case handling of calls to Array. They will all go
- // through the kOther case below.
- UNREACHABLE();
- case InterpreterPushArgsMode::kWithFinalSpread:
- return builtin_handle(kInterpreterPushArgsThenCallWithFinalSpread);
- case InterpreterPushArgsMode::kOther:
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return builtin_handle(kInterpreterPushUndefinedAndArgsThenCall);
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return builtin_handle(kInterpreterPushArgsThenCall);
- }
- }
- UNREACHABLE();
-}
-
-Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
- InterpreterPushArgsMode mode) {
- switch (mode) {
- case InterpreterPushArgsMode::kArrayFunction:
- return builtin_handle(kInterpreterPushArgsThenConstructArrayFunction);
- case InterpreterPushArgsMode::kWithFinalSpread:
- return builtin_handle(kInterpreterPushArgsThenConstructWithFinalSpread);
- case InterpreterPushArgsMode::kOther:
- return builtin_handle(kInterpreterPushArgsThenConstruct);
- }
- UNREACHABLE();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 49405141c1..c0d469cfe3 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -103,22 +103,18 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
}
// Call into C for case conversion. The signature is:
- // Object* ConvertOneByteToLower(String* src, String* dst, Isolate* isolate);
+ // String ConvertOneByteToLower(String src, String dst);
BIND(&call_c);
{
Node* const src = to_direct.string();
Node* const function_addr =
ExternalConstant(ExternalReference::intl_convert_one_byte_to_lower());
- Node* const isolate_ptr =
- ExternalConstant(ExternalReference::isolate_address(isolate()));
- MachineType type_ptr = MachineType::Pointer();
MachineType type_tagged = MachineType::AnyTagged();
- Node* const result =
- CallCFunction3(type_tagged, type_tagged, type_tagged, type_ptr,
- function_addr, src, dst, isolate_ptr);
+ Node* const result = CallCFunction2(type_tagged, type_tagged, type_tagged,
+ function_addr, src, dst);
Return(result);
}
@@ -188,10 +184,10 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
TNode<JSArray> IntlBuiltinsAssembler::AllocateEmptyJSArray(
TNode<Context> context) {
- return CAST(CodeStubAssembler::AllocateJSArray(
+ return CodeStubAssembler::AllocateJSArray(
PACKED_ELEMENTS,
LoadJSArrayElementsMap(PACKED_ELEMENTS, LoadNativeContext(context)),
- SmiConstant(0), SmiConstant(0)));
+ SmiConstant(0), SmiConstant(0));
}
TF_BUILTIN(ListFormatPrototypeFormat, IntlBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 01c8a9ddcd..ca8237f0df 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -12,9 +12,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
+#include "src/counters.h"
#include "src/date.h"
#include "src/elements.h"
-#include "src/intl.h"
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
@@ -26,21 +26,12 @@
#include "src/objects/js-number-format-inl.h"
#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
+#include "src/objects/smi.h"
#include "src/property-descriptor.h"
-#include "unicode/datefmt.h"
-#include "unicode/decimfmt.h"
-#include "unicode/fieldpos.h"
-#include "unicode/fpositer.h"
-#include "unicode/listformatter.h"
-#include "unicode/normalizer2.h"
-#include "unicode/numfmt.h"
-#include "unicode/smpdtfmt.h"
-#include "unicode/udat.h"
-#include "unicode/ufieldpositer.h"
-#include "unicode/unistr.h"
-#include "unicode/ustring.h"
+#include "unicode/brkiter.h"
namespace v8 {
namespace internal {
@@ -49,7 +40,7 @@ BUILTIN(StringPrototypeToUpperCaseIntl) {
HandleScope scope(isolate);
TO_THIS_STRING(string, "String.prototype.toUpperCase");
string = String::Flatten(isolate, string);
- RETURN_RESULT_OR_FAILURE(isolate, ConvertCase(string, true, isolate));
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToUpper(isolate, string));
}
BUILTIN(StringPrototypeNormalizeIntl) {
@@ -57,77 +48,9 @@ BUILTIN(StringPrototypeNormalizeIntl) {
TO_THIS_STRING(string, "String.prototype.normalize");
Handle<Object> form_input = args.atOrUndefined(isolate, 1);
- const char* form_name;
- UNormalization2Mode form_mode;
- if (form_input->IsUndefined(isolate)) {
- // default is FNC
- form_name = "nfc";
- form_mode = UNORM2_COMPOSE;
- } else {
- Handle<String> form;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
- Object::ToString(isolate, form_input));
-
- if (String::Equals(isolate, form, isolate->factory()->NFC_string())) {
- form_name = "nfc";
- form_mode = UNORM2_COMPOSE;
- } else if (String::Equals(isolate, form,
- isolate->factory()->NFD_string())) {
- form_name = "nfc";
- form_mode = UNORM2_DECOMPOSE;
- } else if (String::Equals(isolate, form,
- isolate->factory()->NFKC_string())) {
- form_name = "nfkc";
- form_mode = UNORM2_COMPOSE;
- } else if (String::Equals(isolate, form,
- isolate->factory()->NFKD_string())) {
- form_name = "nfkc";
- form_mode = UNORM2_DECOMPOSE;
- } else {
- Handle<String> valid_forms =
- isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewRangeError(MessageTemplate::kNormalizationForm, valid_forms));
- }
- }
- int length = string->length();
- string = String::Flatten(isolate, string);
- icu::UnicodeString result;
- std::unique_ptr<uc16[]> sap;
- UErrorCode status = U_ZERO_ERROR;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = string->GetFlatContent();
- const UChar* src = GetUCharBufferFromFlat(flat, &sap, length);
- icu::UnicodeString input(false, src, length);
- // Getting a singleton. Should not free it.
- const icu::Normalizer2* normalizer =
- icu::Normalizer2::getInstance(nullptr, form_name, form_mode, status);
- DCHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(normalizer);
- int32_t normalized_prefix_length =
- normalizer->spanQuickCheckYes(input, status);
- // Quick return if the input is already normalized.
- if (length == normalized_prefix_length) return *string;
- icu::UnicodeString unnormalized =
- input.tempSubString(normalized_prefix_length);
- // Read-only alias of the normalized prefix.
- result.setTo(false, input.getBuffer(), normalized_prefix_length);
- // copy-on-write; normalize the suffix and append to |result|.
- normalizer->normalizeSecondAndAppend(result, unnormalized, status);
- }
-
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(MessageTemplate::kIcuError));
- }
-
- RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Intl::Normalize(isolate, string, form_input));
}
BUILTIN(V8BreakIteratorSupportedLocalesOf) {
@@ -136,8 +59,9 @@ BUILTIN(V8BreakIteratorSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kBreakIterator,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.v8BreakIterator.supportedLocalesOf",
+ JSV8BreakIterator::GetAvailableLocales(), locales, options));
}
BUILTIN(NumberFormatSupportedLocalesOf) {
@@ -146,8 +70,9 @@ BUILTIN(NumberFormatSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kNumberFormat,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.NumberFormat.supportedLocalesOf",
+ JSNumberFormat::GetAvailableLocales(), locales, options));
}
BUILTIN(NumberFormatPrototypeFormatToParts) {
@@ -188,8 +113,9 @@ BUILTIN(DateTimeFormatSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kDateFormat,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.DateTimeFormat.supportedLocalesOf",
+ JSDateTimeFormat::GetAvailableLocales(), locales, options));
}
BUILTIN(DateTimeFormatPrototypeFormatToParts) {
@@ -256,8 +182,10 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
* NumberFormatConstrutor
*/
template <class T>
-Object* FormatConstructor(BuiltinArguments args, Isolate* isolate,
- Handle<Object> constructor, const char* method) {
+Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature,
+ Handle<Object> constructor, const char* method) {
+ isolate->CountUsage(feature);
Handle<JSReceiver> new_target;
// 1. If NewTarget is undefined, let newTarget be the active
// function object, else let newTarget be NewTarget.
@@ -276,11 +204,11 @@ Object* FormatConstructor(BuiltinArguments args, Isolate* isolate,
// 2. Let format be ? OrdinaryCreateFromConstructor(newTarget,
// "%<T>Prototype%", ...).
- Handle<JSObject> format_obj;
+ Handle<JSObject> obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, format_obj,
+ isolate, obj,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> format = Handle<T>::cast(format_obj);
+ Handle<T> format = Handle<T>::cast(obj);
// 3. Perform ? Initialize<T>(Format, locales, options).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -330,16 +258,80 @@ Object* FormatConstructor(BuiltinArguments args, Isolate* isolate,
return *format;
}
+/**
+ * Common code shared by ListFormat, RelativeTimeFormat, PluralRules, and
+ * Segmenter
+ */
+template <class T>
+Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature,
+ const char* method) {
+ isolate->CountUsage(feature);
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(method)));
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<JSObject> obj;
+ // 2. Let result be OrdinaryCreateFromConstructor(NewTarget,
+ // "%<T>Prototype%").
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<T> result = Handle<T>::cast(obj);
+ result->set_flags(0);
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 3. Return Initialize<T>(t, locales, options).
+ RETURN_RESULT_OR_FAILURE(isolate,
+ T::Initialize(isolate, result, locales, options));
+}
+
+/**
+ * Common code shared by Collator and V8BreakIterator
+ */
+template <class T>
+Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) {
+ Handle<JSReceiver> new_target;
+
+ if (args.new_target()->IsUndefined(isolate)) {
+ new_target = args.target();
+ } else {
+ new_target = Handle<JSReceiver>::cast(args.new_target());
+ }
+
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ Handle<JSObject> obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, obj,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<T> result = Handle<T>::cast(obj);
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ T::Initialize(isolate, result, locales, options));
+}
} // namespace
BUILTIN(NumberFormatConstructor) {
HandleScope scope(isolate);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberFormat);
-
- return FormatConstructor<JSNumberFormat>(
- args, isolate, isolate->intl_number_format_function(),
- "Intl.NumberFormat");
+ return LegacyFormatConstructor<JSNumberFormat>(
+ args, isolate, v8::Isolate::UseCounterFeature::kNumberFormat,
+ isolate->intl_number_format_function(), "Intl.NumberFormat");
}
BUILTIN(NumberFormatPrototypeResolvedOptions) {
@@ -413,25 +405,22 @@ BUILTIN(NumberFormatInternalFormatNumber) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_obj,
Object::ToNumber(isolate, value));
- // Spec treats -0 as 0.
- if (number_obj->IsMinusZero()) {
- number_obj = Handle<Smi>(Smi::kZero, isolate);
- }
-
double number = number_obj->Number();
+ icu::NumberFormat* icu_number_format =
+ number_format->icu_number_format()->raw();
+ CHECK_NOT_NULL(icu_number_format);
+
// Return FormatNumber(nf, x).
- RETURN_RESULT_OR_FAILURE(
- isolate, JSNumberFormat::FormatNumber(isolate, number_format, number));
+ RETURN_RESULT_OR_FAILURE(isolate, JSNumberFormat::FormatNumber(
+ isolate, *icu_number_format, number));
}
BUILTIN(DateTimeFormatConstructor) {
HandleScope scope(isolate);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormat);
-
- return FormatConstructor<JSDateTimeFormat>(
- args, isolate, isolate->intl_date_time_format_function(),
- "Intl.DateTimeFormat");
+ return LegacyFormatConstructor<JSDateTimeFormat>(
+ args, isolate, v8::Isolate::UseCounterFeature::kDateTimeFormat,
+ isolate->intl_date_time_format_function(), "Intl.DateTimeFormat");
}
BUILTIN(DateTimeFormatPrototypeFormat) {
@@ -485,37 +474,20 @@ BUILTIN(DateTimeFormatInternalFormat) {
isolate, date_format_holder, date));
}
-BUILTIN(ListFormatConstructor) {
+BUILTIN(IntlGetCanonicalLocales) {
HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kListFormat);
-
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromStaticChars(
- "Intl.ListFormat")));
- }
- // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
-
- Handle<JSObject> result;
- // 2. Let listFormat be OrdinaryCreateFromConstructor(NewTarget,
- // "%ListFormatPrototype%").
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSListFormat> format = Handle<JSListFormat>::cast(result);
- format->set_flags(0);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Intl::GetCanonicalLocales(isolate, locales));
+}
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
+BUILTIN(ListFormatConstructor) {
+ HandleScope scope(isolate);
- // 3. Return InitializeListFormat(listFormat, locales, options).
- RETURN_RESULT_OR_FAILURE(
- isolate, JSListFormat::Initialize(isolate, format, locales, options));
+ return DisallowCallConstructor<JSListFormat>(
+ args, isolate, v8::Isolate::UseCounterFeature::kListFormat,
+ "Intl.ListFormat");
}
BUILTIN(ListFormatPrototypeResolvedOptions) {
@@ -531,8 +503,9 @@ BUILTIN(ListFormatSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kListFormatter,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.ListFormat.supportedLocalesOf",
+ JSListFormat::GetAvailableLocales(), locales, options));
}
namespace {
@@ -558,10 +531,9 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Handle<String> locale_string;
// 8. If Type(tag) is Object and tag has an [[InitializedLocale]] internal
// slot, then
- if (tag->IsJSLocale() && Handle<JSLocale>::cast(tag)->locale()->IsString()) {
+ if (tag->IsJSLocale()) {
// a. Let tag be tag.[[Locale]].
- locale_string =
- Handle<String>(Handle<JSLocale>::cast(tag)->locale(), isolate);
+ locale_string = JSLocale::ToString(isolate, Handle<JSLocale>::cast(tag));
} else { // 9. Else,
// a. Let tag be ? ToString(tag).
ASSIGN_RETURN_ON_EXCEPTION(isolate, locale_string,
@@ -610,26 +582,26 @@ BUILTIN(LocaleConstructor) {
BUILTIN(LocalePrototypeMaximize) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.maximize");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.maximize");
Handle<JSFunction> constructor(
isolate->native_context()->intl_locale_function(), isolate);
+ Handle<String> locale_str = JSLocale::ToString(isolate, locale);
RETURN_RESULT_OR_FAILURE(
- isolate,
- CreateLocale(isolate, constructor, constructor,
- JSLocale::Maximize(isolate, locale_holder->locale()),
- isolate->factory()->NewJSObjectWithNullProto()));
+ isolate, CreateLocale(isolate, constructor, constructor,
+ JSLocale::Maximize(isolate, *locale_str),
+ isolate->factory()->NewJSObjectWithNullProto()));
}
BUILTIN(LocalePrototypeMinimize) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.minimize");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.minimize");
Handle<JSFunction> constructor(
isolate->native_context()->intl_locale_function(), isolate);
+ Handle<String> locale_str = JSLocale::ToString(isolate, locale);
RETURN_RESULT_OR_FAILURE(
- isolate,
- CreateLocale(isolate, constructor, constructor,
- JSLocale::Minimize(isolate, locale_holder->locale()),
- isolate->factory()->NewJSObjectWithNullProto()));
+ isolate, CreateLocale(isolate, constructor, constructor,
+ JSLocale::Minimize(isolate, *locale_str),
+ isolate->factory()->NewJSObjectWithNullProto()));
}
BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
@@ -639,8 +611,9 @@ BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
RETURN_RESULT_OR_FAILURE(
isolate,
- Intl::SupportedLocalesOf(isolate, ICUService::kRelativeDateTimeFormatter,
- locales, options));
+ Intl::SupportedLocalesOf(
+ isolate, "Intl.RelativeTimeFormat.supportedLocalesOf",
+ JSRelativeTimeFormat::GetAvailableLocales(), locales, options));
}
BUILTIN(RelativeTimeFormatPrototypeFormat) {
@@ -678,125 +651,87 @@ BUILTIN(RelativeTimeFormatPrototypeFormatToParts) {
BUILTIN(LocalePrototypeLanguage) {
HandleScope scope(isolate);
// CHECK_RECEIVER will case locale_holder to JSLocale.
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.language");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.language");
- return locale_holder->language();
+ return *JSLocale::Language(isolate, locale);
}
BUILTIN(LocalePrototypeScript) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.script");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.script");
- return locale_holder->script();
+ return *JSLocale::Script(isolate, locale);
}
BUILTIN(LocalePrototypeRegion) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.region");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.region");
- return locale_holder->region();
+ return *JSLocale::Region(isolate, locale);
}
BUILTIN(LocalePrototypeBaseName) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.baseName");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.baseName");
- return locale_holder->base_name();
+ return *JSLocale::BaseName(isolate, locale);
}
BUILTIN(LocalePrototypeCalendar) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.calendar");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.calendar");
- return locale_holder->calendar();
+ return *JSLocale::Calendar(isolate, locale);
}
BUILTIN(LocalePrototypeCaseFirst) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.caseFirst");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.caseFirst");
- return *(locale_holder->CaseFirstAsString());
+ return *JSLocale::CaseFirst(isolate, locale);
}
BUILTIN(LocalePrototypeCollation) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.collation");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.collation");
- return locale_holder->collation();
+ return *JSLocale::Collation(isolate, locale);
}
BUILTIN(LocalePrototypeHourCycle) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.hourCycle");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.hourCycle");
- return *(locale_holder->HourCycleAsString());
+ return *JSLocale::HourCycle(isolate, locale);
}
BUILTIN(LocalePrototypeNumeric) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.numeric");
-
- switch (locale_holder->numeric()) {
- case JSLocale::Numeric::TRUE_VALUE:
- return *(isolate->factory()->true_value());
- case JSLocale::Numeric::FALSE_VALUE:
- return *(isolate->factory()->false_value());
- case JSLocale::Numeric::NOTSET:
- return *(isolate->factory()->undefined_value());
- case JSLocale::Numeric::COUNT:
- UNREACHABLE();
- }
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.numeric");
+
+ return *JSLocale::Numeric(isolate, locale);
}
BUILTIN(LocalePrototypeNumberingSystem) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder,
- "Intl.Locale.prototype.numberingSystem");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.numberingSystem");
- return locale_holder->numbering_system();
+ return *JSLocale::NumberingSystem(isolate, locale);
}
BUILTIN(LocalePrototypeToString) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.toString");
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.toString");
- return locale_holder->locale();
+ return *JSLocale::ToString(isolate, locale);
}
BUILTIN(RelativeTimeFormatConstructor) {
HandleScope scope(isolate);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kRelativeTimeFormat);
-
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromStaticChars(
- "Intl.RelativeTimeFormat")));
- }
- // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
-
- Handle<JSObject> result;
- // 2. Let relativeTimeFormat be
- // ! OrdinaryCreateFromConstructor(NewTarget,
- // "%RelativeTimeFormatPrototype%").
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSRelativeTimeFormat> format =
- Handle<JSRelativeTimeFormat>::cast(result);
- format->set_flags(0);
-
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
-
- // 3. Return ? InitializeRelativeTimeFormat(relativeTimeFormat, locales,
- // options).
- RETURN_RESULT_OR_FAILURE(isolate, JSRelativeTimeFormat::Initialize(
- isolate, format, locales, options));
+ return DisallowCallConstructor<JSRelativeTimeFormat>(
+ args, isolate, v8::Isolate::UseCounterFeature::kRelativeTimeFormat,
+ "Intl.RelativeTimeFormat");
}
BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
@@ -833,39 +768,9 @@ BUILTIN(StringPrototypeToLocaleUpperCase) {
BUILTIN(PluralRulesConstructor) {
HandleScope scope(isolate);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kPluralRules);
-
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromStaticChars(
- "Intl.PluralRules")));
- }
-
- // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
-
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
-
- // 2. Let pluralRules be ? OrdinaryCreateFromConstructor(newTarget,
- // "%PluralRulesPrototype%", « [[InitializedPluralRules]],
- // [[Locale]], [[Type]], [[MinimumIntegerDigits]],
- // [[MinimumFractionDigits]], [[MaximumFractionDigits]],
- // [[MinimumSignificantDigits]], [[MaximumSignificantDigits]] »).
- Handle<JSObject> plural_rules_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, plural_rules_obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSPluralRules> plural_rules =
- Handle<JSPluralRules>::cast(plural_rules_obj);
-
- // 3. Return ? InitializePluralRules(pluralRules, locales, options).
- RETURN_RESULT_OR_FAILURE(
- isolate,
- JSPluralRules::Initialize(isolate, plural_rules, locales, options));
+ return DisallowCallConstructor<JSPluralRules>(
+ args, isolate, v8::Isolate::UseCounterFeature::kPluralRules,
+ "Intl.PluralRules");
}
BUILTIN(PluralRulesPrototypeResolvedOptions) {
@@ -902,8 +807,9 @@ BUILTIN(PluralRulesSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kPluralRules,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.PluralRules.supportedLocalesOf",
+ JSPluralRules::GetAvailableLocales(), locales, options));
}
BUILTIN(CollatorConstructor) {
@@ -911,32 +817,7 @@ BUILTIN(CollatorConstructor) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kCollator);
- Handle<JSReceiver> new_target;
- // 1. If NewTarget is undefined, let newTarget be the active
- // function object, else let newTarget be NewTarget.
- if (args.new_target()->IsUndefined(isolate)) {
- new_target = args.target();
- } else {
- new_target = Handle<JSReceiver>::cast(args.new_target());
- }
-
- // [[Construct]]
- Handle<JSFunction> target = args.target();
-
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
-
- // 5. Let collator be ? OrdinaryCreateFromConstructor(newTarget,
- // "%CollatorPrototype%", internalSlotsList).
- Handle<JSObject> collator_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, collator_obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSCollator> collator = Handle<JSCollator>::cast(collator_obj);
-
- // 6. Return ? InitializeCollator(collator, locales, options).
- RETURN_RESULT_OR_FAILURE(
- isolate, JSCollator::Initialize(isolate, collator, locales, options));
+ return CallOrConstructConstructor<JSCollator>(args, isolate);
}
BUILTIN(CollatorPrototypeResolvedOptions) {
@@ -952,8 +833,9 @@ BUILTIN(CollatorSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kCollator, locales,
- options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.Collator.supportedLocalesOf",
+ JSCollator::GetAvailableLocales(), locales, options));
}
BUILTIN(CollatorPrototypeCompare) {
@@ -991,7 +873,7 @@ BUILTIN(CollatorInternalCompare) {
// 1. Let collator be F.[[Collator]].
// 2. Assert: Type(collator) is Object and collator has an
// [[InitializedCollator]] internal slot.
- Handle<JSCollator> collator_holder = Handle<JSCollator>(
+ Handle<JSCollator> collator = Handle<JSCollator>(
JSCollator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
@@ -1011,39 +893,73 @@ BUILTIN(CollatorInternalCompare) {
Object::ToString(isolate, y));
// 7. Return CompareStrings(collator, X, Y).
- return *Intl::CompareStrings(isolate, collator_holder, string_x, string_y);
+ icu::Collator* icu_collator = collator->icu_collator()->raw();
+ CHECK_NOT_NULL(icu_collator);
+ return *Intl::CompareStrings(isolate, *icu_collator, string_x, string_y);
}
-BUILTIN(SegmenterConstructor) {
+// ecma402 #sec-segment-iterator-prototype-breakType
+BUILTIN(SegmentIteratorPrototypeBreakType) {
+ const char* const method = "get %SegmentIteratorPrototype%.breakType";
HandleScope scope(isolate);
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kSegmenter);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+ return *segment_iterator->BreakType();
+}
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromStaticChars(
- "Intl.Segmenter")));
- }
- // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+// ecma402 #sec-segment-iterator-prototype-following
+BUILTIN(SegmentIteratorPrototypeFollowing) {
+ const char* const method = "%SegmentIteratorPrototype%.following";
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
- Handle<JSObject> result;
- // 2. Let segmenter be OrdinaryCreateFromConstructor(NewTarget,
- // "%SegmenterPrototype%").
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSSegmenter> segmenter = Handle<JSSegmenter>::cast(result);
- segmenter->set_flags(0);
+ Handle<Object> from = args.atOrUndefined(isolate, 1);
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
+ Maybe<bool> success =
+ JSSegmentIterator::Following(isolate, segment_iterator, from);
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(success.FromJust());
+}
- RETURN_RESULT_OR_FAILURE(
- isolate, JSSegmenter::Initialize(isolate, segmenter, locales, options));
+// ecma402 #sec-segment-iterator-prototype-next
+BUILTIN(SegmentIteratorPrototypeNext) {
+ const char* const method = "%SegmentIteratorPrototype%.next";
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSSegmentIterator::Next(isolate, segment_iterator));
+}
+
+// ecma402 #sec-segment-iterator-prototype-preceding
+BUILTIN(SegmentIteratorPrototypePreceding) {
+ const char* const method = "%SegmentIteratorPrototype%.preceding";
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+
+ Handle<Object> from = args.atOrUndefined(isolate, 1);
+
+ Maybe<bool> success =
+ JSSegmentIterator::Preceding(isolate, segment_iterator, from);
+ MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(success.FromJust());
+}
+
+// ecma402 #sec-segment-iterator-prototype-index
+BUILTIN(SegmentIteratorPrototypeIndex) {
+ const char* const method = "get %SegmentIteratorPrototype%.index";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+ return *JSSegmentIterator::Index(isolate, segment_iterator);
+}
+
+BUILTIN(SegmenterConstructor) {
+ HandleScope scope(isolate);
+
+ return DisallowCallConstructor<JSSegmenter>(
+ args, isolate, v8::Isolate::UseCounterFeature::kSegmenter,
+ "Intl.Segmenter");
}
BUILTIN(SegmenterSupportedLocalesOf) {
@@ -1052,8 +968,9 @@ BUILTIN(SegmenterSupportedLocalesOf) {
Handle<Object> options = args.atOrUndefined(isolate, 2);
RETURN_RESULT_OR_FAILURE(
- isolate, Intl::SupportedLocalesOf(isolate, ICUService::kSegmenter,
- locales, options));
+ isolate, Intl::SupportedLocalesOf(
+ isolate, "Intl.Segmenter.supportedLocalesOf",
+ JSSegmenter::GetAvailableLocales(), locales, options));
}
BUILTIN(SegmenterPrototypeResolvedOptions) {
@@ -1063,32 +980,29 @@ BUILTIN(SegmenterPrototypeResolvedOptions) {
return *JSSegmenter::ResolvedOptions(isolate, segmenter_holder);
}
-BUILTIN(V8BreakIteratorConstructor) {
+// ecma402 #sec-Intl.Segmenter.prototype.segment
+BUILTIN(SegmenterPrototypeSegment) {
HandleScope scope(isolate);
- Handle<JSReceiver> new_target;
-
- if (args.new_target()->IsUndefined(isolate)) {
- new_target = args.target();
- } else {
- new_target = Handle<JSReceiver>::cast(args.new_target());
- }
-
- // [[Construct]]
- Handle<JSFunction> target = args.target();
-
- Handle<Object> locales = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
-
- Handle<JSObject> break_iterator_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, break_iterator_obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSV8BreakIterator> break_iterator =
- Handle<JSV8BreakIterator>::cast(break_iterator_obj);
+ CHECK_RECEIVER(JSSegmenter, segmenter_holder,
+ "Intl.Segmenter.prototype.segment");
+ Handle<Object> input_text = args.atOrUndefined(isolate, 1);
+ // 3. Let string be ? ToString(string).
+ Handle<String> text;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text,
+ Object::ToString(isolate, input_text));
+ // 4. Return ? CreateSegmentIterator(segment, string).
RETURN_RESULT_OR_FAILURE(
isolate,
- JSV8BreakIterator::Initialize(isolate, break_iterator, locales, options));
+ JSSegmentIterator::Create(
+ isolate, segmenter_holder->icu_break_iterator()->raw()->clone(),
+ segmenter_holder->granularity(), text));
+}
+
+BUILTIN(V8BreakIteratorConstructor) {
+ HandleScope scope(isolate);
+
+ return CallOrConstructConstructor<JSV8BreakIterator>(args, isolate);
}
BUILTIN(V8BreakIteratorPrototypeResolvedOptions) {
@@ -1120,7 +1034,7 @@ BUILTIN(V8BreakIteratorInternalAdoptText) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ Handle<JSV8BreakIterator> break_iterator = Handle<JSV8BreakIterator>(
JSV8BreakIterator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
@@ -1130,7 +1044,7 @@ BUILTIN(V8BreakIteratorInternalAdoptText) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text,
Object::ToString(isolate, input_text));
- JSV8BreakIterator::AdoptText(isolate, break_iterator_holder, text);
+ JSV8BreakIterator::AdoptText(isolate, break_iterator, text);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1138,18 +1052,17 @@ BUILTIN(V8BreakIteratorPrototypeFirst) {
const char* const method = "get Intl.v8BreakIterator.prototype.first";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
- Handle<Object> bound_first(break_iterator_holder->bound_first(), isolate);
+ Handle<Object> bound_first(break_iterator->bound_first(), isolate);
if (!bound_first->IsUndefined(isolate)) {
DCHECK(bound_first->IsJSFunction());
return *bound_first;
}
- Handle<JSFunction> new_bound_first_function =
- CreateBoundFunction(isolate, break_iterator_holder,
- Builtins::kV8BreakIteratorInternalFirst, 0);
- break_iterator_holder->set_bound_first(*new_bound_first_function);
+ Handle<JSFunction> new_bound_first_function = CreateBoundFunction(
+ isolate, break_iterator, Builtins::kV8BreakIteratorInternalFirst, 0);
+ break_iterator->set_bound_first(*new_bound_first_function);
return *new_bound_first_function;
}
@@ -1157,34 +1070,29 @@ BUILTIN(V8BreakIteratorInternalFirst) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ Handle<JSV8BreakIterator> break_iterator = Handle<JSV8BreakIterator>(
JSV8BreakIterator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
- icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->first());
+ return *JSV8BreakIterator::First(isolate, break_iterator);
}
BUILTIN(V8BreakIteratorPrototypeNext) {
const char* const method = "get Intl.v8BreakIterator.prototype.next";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
- Handle<Object> bound_next(break_iterator_holder->bound_next(), isolate);
+ Handle<Object> bound_next(break_iterator->bound_next(), isolate);
if (!bound_next->IsUndefined(isolate)) {
DCHECK(bound_next->IsJSFunction());
return *bound_next;
}
- Handle<JSFunction> new_bound_next_function =
- CreateBoundFunction(isolate, break_iterator_holder,
- Builtins::kV8BreakIteratorInternalNext, 0);
- break_iterator_holder->set_bound_next(*new_bound_next_function);
+ Handle<JSFunction> new_bound_next_function = CreateBoundFunction(
+ isolate, break_iterator, Builtins::kV8BreakIteratorInternalNext, 0);
+ break_iterator->set_bound_next(*new_bound_next_function);
return *new_bound_next_function;
}
@@ -1192,34 +1100,28 @@ BUILTIN(V8BreakIteratorInternalNext) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ Handle<JSV8BreakIterator> break_iterator = Handle<JSV8BreakIterator>(
JSV8BreakIterator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
-
- icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->next());
+ return *JSV8BreakIterator::Next(isolate, break_iterator);
}
BUILTIN(V8BreakIteratorPrototypeCurrent) {
const char* const method = "get Intl.v8BreakIterator.prototype.current";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
- Handle<Object> bound_current(break_iterator_holder->bound_current(), isolate);
+ Handle<Object> bound_current(break_iterator->bound_current(), isolate);
if (!bound_current->IsUndefined(isolate)) {
DCHECK(bound_current->IsJSFunction());
return *bound_current;
}
- Handle<JSFunction> new_bound_current_function =
- CreateBoundFunction(isolate, break_iterator_holder,
- Builtins::kV8BreakIteratorInternalCurrent, 0);
- break_iterator_holder->set_bound_current(*new_bound_current_function);
+ Handle<JSFunction> new_bound_current_function = CreateBoundFunction(
+ isolate, break_iterator, Builtins::kV8BreakIteratorInternalCurrent, 0);
+ break_iterator->set_bound_current(*new_bound_current_function);
return *new_bound_current_function;
}
@@ -1227,35 +1129,28 @@ BUILTIN(V8BreakIteratorInternalCurrent) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ Handle<JSV8BreakIterator> break_iterator = Handle<JSV8BreakIterator>(
JSV8BreakIterator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
-
- icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
- CHECK_NOT_NULL(break_iterator);
-
- return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+ return *JSV8BreakIterator::Current(isolate, break_iterator);
}
BUILTIN(V8BreakIteratorPrototypeBreakType) {
const char* const method = "get Intl.v8BreakIterator.prototype.breakType";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
- Handle<Object> bound_break_type(break_iterator_holder->bound_break_type(),
- isolate);
+ Handle<Object> bound_break_type(break_iterator->bound_break_type(), isolate);
if (!bound_break_type->IsUndefined(isolate)) {
DCHECK(bound_break_type->IsJSFunction());
return *bound_break_type;
}
- Handle<JSFunction> new_bound_break_type_function =
- CreateBoundFunction(isolate, break_iterator_holder,
- Builtins::kV8BreakIteratorInternalBreakType, 0);
- break_iterator_holder->set_bound_break_type(*new_bound_break_type_function);
+ Handle<JSFunction> new_bound_break_type_function = CreateBoundFunction(
+ isolate, break_iterator, Builtins::kV8BreakIteratorInternalBreakType, 0);
+ break_iterator->set_bound_break_type(*new_bound_break_type_function);
return *new_bound_break_type_function;
}
@@ -1263,30 +1158,11 @@ BUILTIN(V8BreakIteratorInternalBreakType) {
HandleScope scope(isolate);
Handle<Context> context = Handle<Context>(isolate->context(), isolate);
- Handle<JSV8BreakIterator> break_iterator_holder = Handle<JSV8BreakIterator>(
+ Handle<JSV8BreakIterator> break_iterator = Handle<JSV8BreakIterator>(
JSV8BreakIterator::cast(context->get(
static_cast<int>(Intl::BoundFunctionContextSlot::kBoundFunction))),
isolate);
-
- icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
- CHECK_NOT_NULL(break_iterator);
-
- int32_t status = break_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("none");
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return ReadOnlyRoots(isolate).number_string();
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("letter");
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("kana");
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return *isolate->factory()->NewStringFromStaticChars("ideo");
- } else {
- return *isolate->factory()->NewStringFromStaticChars("unknown");
- }
+ return JSV8BreakIterator::BreakType(isolate, break_iterator);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 802ed2edb2..ec8cfd1d78 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -5,15 +5,19 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/growable-fixed-array-gen.h"
+#include "src/builtins/builtins-collections-gen.h"
#include "src/builtins/builtins-string-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
namespace v8 {
namespace internal {
+typedef IteratorBuiltinsFromDSLAssembler::IteratorRecord IteratorRecord;
+
using compiler::Node;
TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
@@ -73,7 +77,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
}
}
-Node* IteratorBuiltinsAssembler::IteratorStep(
+TNode<Object> IteratorBuiltinsAssembler::IteratorStep(
Node* context, const IteratorRecord& iterator, Label* if_done,
Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
DCHECK_NOT_NULL(if_done);
@@ -123,7 +127,7 @@ Node* IteratorBuiltinsAssembler::IteratorStep(
}
BIND(&return_result);
- return result;
+ return UncheckedCast<Object>(result);
}
Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
@@ -163,8 +167,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
// Perform ES #sec-iteratorclose when an exception occurs. This simpler
// algorithm does not include redundant steps which are never reachable from
// the spec IteratorClose algorithm.
- DCHECK_NOT_NULL(if_exception);
- DCHECK_NOT_NULL(exception);
+ DCHECK((if_exception != nullptr && exception != nullptr));
CSA_ASSERT(this, IsNotTheHole(exception->value()));
CSA_ASSERT(this, IsJSReceiver(iterator.object));
@@ -189,12 +192,13 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
}
void IteratorBuiltinsAssembler::IteratorCloseOnException(
- Node* context, const IteratorRecord& iterator, Variable* exception) {
+ Node* context, const IteratorRecord& iterator, TNode<Object> exception) {
Label rethrow(this, Label::kDeferred);
- IteratorCloseOnException(context, iterator, &rethrow, exception);
+ TVARIABLE(Object, exception_variable, exception);
+ IteratorCloseOnException(context, iterator, &rethrow, &exception_variable);
BIND(&rethrow);
- CallRuntime(Runtime::kReThrow, context, exception->value());
+ CallRuntime(Runtime::kReThrow, context, exception_variable.value());
Unreachable();
}
@@ -215,7 +219,7 @@ TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
BIND(&loop_start);
{
// a. Set next to ? IteratorStep(iteratorRecord).
- TNode<Object> next = CAST(IteratorStep(context, iterator_record, &done));
+ TNode<Object> next = IteratorStep(context, iterator_record, &done);
// b. If next is not false, then
// i. Let nextValue be ? IteratorValue(next).
TNode<Object> next_value = CAST(IteratorValue(context, next));
@@ -252,7 +256,7 @@ TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
Label slow_path(this);
- GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context), &slow_path);
+ GotoIfNot(IsFastJSArrayWithNoCustomIteration(context, iterable), &slow_path);
// The fast path will copy holes to the new array.
TailCallBuiltin(Builtins::kCloneFastJSArray, context, iterable);
@@ -261,38 +265,85 @@ TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
TailCallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn);
}
-// This builtin loads the property Symbol.iterator as the iterator, and has a
-// fast path for fast arrays and another one for strings. These fast paths will
-// only be taken if Symbol.iterator and the Iterator prototype are not modified
-// in a way that changes the original iteration behavior.
-// * In case of fast holey arrays, holes will be converted to undefined to
-// reflect iteration semantics. Note that replacement by undefined is only
-// correct when the NoElements protector is valid.
-TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
-
- Label slow_path(this), check_string(this);
-
- GotoIfForceSlowPath(&slow_path);
+void IteratorBuiltinsAssembler::FastIterableToList(
+ TNode<Context> context, TNode<Object> iterable,
+ TVariable<Object>* var_result, Label* slow) {
+ Label done(this), check_string(this), check_map(this), check_set(this);
- GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context),
+ GotoIfNot(IsFastJSArrayWithNoCustomIteration(context, iterable),
&check_string);
// Fast path for fast JSArray.
- TailCallBuiltin(Builtins::kCloneFastJSArrayFillingHoles, context, iterable);
+ *var_result =
+ CallBuiltin(Builtins::kCloneFastJSArrayFillingHoles, context, iterable);
+ Goto(&done);
BIND(&check_string);
{
+ Label string_maybe_fast_call(this);
StringBuiltinsAssembler string_assembler(state());
- GotoIfNot(string_assembler.IsStringPrimitiveWithNoCustomIteration(iterable,
- context),
- &slow_path);
+ string_assembler.BranchIfStringPrimitiveWithNoCustomIteration(
+ iterable, context, &string_maybe_fast_call, &check_map);
+
+ BIND(&string_maybe_fast_call);
+ TNode<IntPtrT> const length = LoadStringLengthAsWord(CAST(iterable));
+ // Use string length as conservative approximation of number of codepoints.
+ GotoIf(
+ IntPtrGreaterThan(length, IntPtrConstant(JSArray::kMaxFastArrayLength)),
+ slow);
+ *var_result = CallBuiltin(Builtins::kStringToList, context, iterable);
+ Goto(&done);
+ }
+
+ BIND(&check_map);
+ {
+ Label map_fast_call(this);
+ BranchIfIterableWithOriginalKeyOrValueMapIterator(
+ state(), iterable, context, &map_fast_call, &check_set);
- // Fast path for strings.
- TailCallBuiltin(Builtins::kStringToList, context, iterable);
+ BIND(&map_fast_call);
+ *var_result = CallBuiltin(Builtins::kMapIteratorToList, context, iterable);
+ Goto(&done);
+ }
+
+ BIND(&check_set);
+ {
+ Label set_fast_call(this);
+ BranchIfIterableWithOriginalValueSetIterator(state(), iterable, context,
+ &set_fast_call, slow);
+
+ BIND(&set_fast_call);
+ *var_result =
+ CallBuiltin(Builtins::kSetOrSetIteratorToList, context, iterable);
+ Goto(&done);
}
+ BIND(&done);
+}
+
+// This builtin loads the property Symbol.iterator as the iterator, and has fast
+// paths for fast arrays, for primitive strings, for sets and set iterators, and
+// for map iterators. These fast paths will only be taken if Symbol.iterator and
+// the Iterator prototype are not modified in a way that changes the original
+// iteration behavior.
+// * In case of fast holey arrays, holes will be converted to undefined to
+// reflect iteration semantics. Note that replacement by undefined is only
+// correct when the NoElements protector is valid.
+// * In case of map/set iterators, there is an additional requirement that the
+// iterator is not partially consumed. To be spec-compliant, after spreading
+// the iterator is set to be exhausted.
+TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+
+ Label slow_path(this);
+
+ GotoIfForceSlowPath(&slow_path);
+
+ TVARIABLE(Object, var_result);
+ FastIterableToList(context, iterable, &var_result, &slow_path);
+ Return(var_result.value());
+
BIND(&slow_path);
{
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index f61f7f52c0..2c79e9095a 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -6,16 +6,19 @@
#define V8_BUILTINS_BUILTINS_ITERATOR_GEN_H_
#include "src/code-stub-assembler.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "torque-generated/builtins-iterator-from-dsl-gen.h"
namespace v8 {
namespace internal {
using compiler::Node;
-class IteratorBuiltinsAssembler : public CodeStubAssembler {
+class IteratorBuiltinsAssembler : public CodeStubAssembler,
+ public IteratorBuiltinsFromDSLAssembler {
public:
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : CodeStubAssembler(state), IteratorBuiltinsFromDSLAssembler(state) {}
// Returns object[Symbol.iterator].
TNode<Object> GetIteratorMethod(Node* context, Node* object);
@@ -34,10 +37,16 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// iterator result.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- Node* IteratorStep(Node* context, const IteratorRecord& iterator,
- Label* if_done, Node* fast_iterator_result_map = nullptr,
- Label* if_exception = nullptr,
- Variable* exception = nullptr);
+ TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Label* if_done,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Node* fast_iterator_result_map, Label* if_done) {
+ return IteratorStep(context, iterator, if_done, fast_iterator_result_map);
+ }
// https://tc39.github.io/ecma262/#sec-iteratorvalue
// Return the `value` field from an iterator.
@@ -52,13 +61,16 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Label* if_exception, Variable* exception);
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
- Variable* exception);
+ TNode<Object> exception);
// #sec-iterabletolist
// Build a JSArray by iterating over {iterable} using {iterator_fn},
// following the ECMAscript operation with the same name.
TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable,
TNode<Object> iterator_fn);
+
+ void FastIterableToList(TNode<Context> context, TNode<Object> iterable,
+ TVariable<Object>* var_result, Label* slow);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index c11722ec6b..dae32f2d52 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -120,6 +120,13 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// First lookup code, maybe we don't need to compile!
Label compile_function(this, Label::kDeferred);
+ // Check the code object for the SFI. If SFI's code entry points to
+ // CompileLazy, then we need to lazy compile regardless of the function or
+ // feedback vector marker.
+ TNode<SharedFunctionInfo> shared =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+ TNode<Code> sfi_code = GetSharedFunctionInfoCode(shared, &compile_function);
+
// Compile function if we don't have a valid feedback vector.
TNode<FeedbackVector> feedback_vector =
LoadFeedbackVector(function, &compile_function);
@@ -127,23 +134,14 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(function, feedback_vector);
- // We found no optimized code. Infer the code object needed for the SFI.
- TNode<SharedFunctionInfo> shared =
- CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
- // If code entry points to anything other than CompileLazy, install that,
- // otherwise call runtime to compile the function.
- TNode<Code> code = GetSharedFunctionInfoCode(shared, &compile_function);
-
- CSA_ASSERT(
- this,
- WordNotEqual(code, HeapConstant(BUILTIN_CODE(isolate(), CompileLazy))));
-
- // Install the SFI's code entry.
- StoreObjectField(function, JSFunction::kCodeOffset, code);
- GenerateTailCallToJSCode(code, function);
+ // If not, install the SFI's code entry and jump to that.
+ CSA_ASSERT(this, WordNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+ isolate(), CompileLazy))));
+ StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
+ GenerateTailCallToJSCode(sfi_code, function);
BIND(&compile_function);
- { GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function); }
+ GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function);
}
TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
@@ -161,43 +159,5 @@ TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
GenerateTailCallToJSCode(code, function);
}
-// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
-TF_BUILTIN(DeserializeLazy, LazyBuiltinsAssembler) {
- Label deserialize_in_runtime(this, Label::kDeferred);
-
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
-
- // Load the builtin id for lazy deserialization from SharedFunctionInfo.
- TNode<SharedFunctionInfo> shared =
- CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
-
- TNode<Smi> sfi_data =
- CAST(LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset));
-
- // The builtin may already have been deserialized. If that is the case, it is
- // stored in the builtins table, and we can copy to correct code object to
- // both the shared function info and function without calling into runtime.
- //
- // Otherwise, we need to call into runtime to deserialize.
-
- TNode<Code> code = LoadBuiltin(sfi_data);
-
- // Check if the loaded code object has already been deserialized. This is
- // the case iff it does not equal DeserializeLazy.
- GotoIf(
- WordEqual(code, HeapConstant(BUILTIN_CODE(isolate(), DeserializeLazy))),
- &deserialize_in_runtime);
-
- // If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over to the target function.
- StoreObjectField(function, JSFunction::kCodeOffset, code);
-
- // All copying is done. Jump to the deserialized code object.
- GenerateTailCallToJSCode(code, function);
-
- BIND(&deserialize_in_runtime);
- { GenerateTailCallToReturnedCode(Runtime::kDeserializeLazy, function); }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
new file mode 100644
index 0000000000..0c00777dd3
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -0,0 +1,545 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/code-stub-assembler.h"
+#include "src/microtask-queue.h"
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/promise.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+using TNode = compiler::TNode<T>;
+
+class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<RawPtrT> GetMicrotaskQueue(TNode<Context> context);
+ TNode<RawPtrT> GetMicrotaskRingBuffer(TNode<RawPtrT> microtask_queue);
+ TNode<IntPtrT> GetMicrotaskQueueCapacity(TNode<RawPtrT> microtask_queue);
+ TNode<IntPtrT> GetMicrotaskQueueSize(TNode<RawPtrT> microtask_queue);
+ void SetMicrotaskQueueSize(TNode<RawPtrT> microtask_queue,
+ TNode<IntPtrT> new_size);
+ TNode<IntPtrT> GetMicrotaskQueueStart(TNode<RawPtrT> microtask_queue);
+ void SetMicrotaskQueueStart(TNode<RawPtrT> microtask_queue,
+ TNode<IntPtrT> new_start);
+ TNode<IntPtrT> CalculateRingBufferOffset(TNode<IntPtrT> capacity,
+ TNode<IntPtrT> start,
+ TNode<IntPtrT> index);
+ void RunSingleMicrotask(TNode<Context> current_context,
+ TNode<Microtask> microtask);
+
+ TNode<Context> GetCurrentContext();
+ void SetCurrentContext(TNode<Context> context);
+
+ TNode<IntPtrT> GetEnteredContextCount();
+ void EnterMicrotaskContext(TNode<Context> native_context);
+ void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
+
+ void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> promise_or_capability);
+};
+
+TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ return LoadObjectField<RawPtrT>(native_context,
+ NativeContext::kMicrotaskQueueOffset);
+}
+
+TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer(
+ TNode<RawPtrT> microtask_queue) {
+ return UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kRingBufferOffset)));
+}
+
+TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueCapacity(
+ TNode<RawPtrT> microtask_queue) {
+ return UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kCapacityOffset)));
+}
+
+TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueSize(
+ TNode<RawPtrT> microtask_queue) {
+ return UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kSizeOffset)));
+}
+
+void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize(
+ TNode<RawPtrT> microtask_queue, TNode<IntPtrT> new_size) {
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kSizeOffset), new_size);
+}
+
+TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueStart(
+ TNode<RawPtrT> microtask_queue) {
+ return UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kStartOffset)));
+}
+
+void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueStart(
+ TNode<RawPtrT> microtask_queue, TNode<IntPtrT> new_start) {
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kStartOffset), new_start);
+}
+
+TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::CalculateRingBufferOffset(
+ TNode<IntPtrT> capacity, TNode<IntPtrT> start, TNode<IntPtrT> index) {
+ return TimesSystemPointerSize(
+ WordAnd(IntPtrAdd(start, index), IntPtrSub(capacity, IntPtrConstant(1))));
+}
+
+void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
+ TNode<Context> current_context, TNode<Microtask> microtask) {
+ CSA_ASSERT(this, TaggedIsNotSmi(microtask));
+
+ StoreRoot(RootIndex::kCurrentMicrotask, microtask);
+ TNode<IntPtrT> saved_entered_context_count = GetEnteredContextCount();
+ TNode<Map> microtask_map = LoadMap(microtask);
+ TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Label if_exception(this, Label::kDeferred);
+ Label is_callable(this), is_callback(this),
+ is_promise_fulfill_reaction_job(this),
+ is_promise_reject_reaction_job(this),
+ is_promise_resolve_thenable_job(this), is_weak_factory_cleanup_job(this),
+ is_unreachable(this, Label::kDeferred), done(this);
+
+ int32_t case_values[] = {CALLABLE_TASK_TYPE,
+ CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
+ WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE};
+ Label* case_labels[] = {&is_callable,
+ &is_callback,
+ &is_promise_fulfill_reaction_job,
+ &is_promise_reject_reaction_job,
+ &is_promise_resolve_thenable_job,
+ &is_weak_factory_cleanup_job};
+ static_assert(arraysize(case_values) == arraysize(case_labels), "");
+ Switch(microtask_type, &is_unreachable, case_values, case_labels,
+ arraysize(case_labels));
+
+ BIND(&is_callable);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context =
+ LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+
+ TNode<JSReceiver> callable =
+ LoadObjectField<JSReceiver>(microtask, CallableTask::kCallableOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ microtask_context, callable, UndefinedConstant());
+ GotoIfException(result, &if_exception, &var_exception);
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&is_callback);
+ {
+ Node* const microtask_callback =
+ LoadObjectField(microtask, CallbackTask::kCallbackOffset);
+ Node* const microtask_data =
+ LoadObjectField(microtask, CallbackTask::kDataOffset);
+
+ // If this turns out to become a bottleneck because of the calls
+ // to C++ via CEntry, we can choose to speed them up using a
+ // similar mechanism that we use for the CallApiFunction stub,
+ // except that calling the MicrotaskCallback is even easier, since
+ // it doesn't accept any tagged parameters, doesn't return a value
+ // and ignores exceptions.
+ //
+ // But from our current measurements it doesn't seem to be a
+ // serious performance problem, even if the microtask is full
+ // of CallHandlerTasks (which is not a realistic use case anyways).
+ Node* const result =
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ GotoIfException(result, &if_exception, &var_exception);
+ Goto(&done);
+ }
+
+ BIND(&is_promise_resolve_thenable_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseResolveThenableJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+
+ Node* const promise_to_resolve = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
+ Node* const then =
+ LoadObjectField(microtask, PromiseResolveThenableJobTask::kThenOffset);
+ Node* const thenable = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ GotoIfException(result, &if_exception, &var_exception);
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&is_promise_fulfill_reaction_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const promise_or_capability = LoadObjectField(
+ microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ promise_or_capability);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, handler, promise_or_capability);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ promise_or_capability);
+
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&is_promise_reject_reaction_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const promise_or_capability = LoadObjectField(
+ microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ promise_or_capability);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, handler, promise_or_capability);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ promise_or_capability);
+
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&is_weak_factory_cleanup_job);
+ {
+ // Enter the context of the {weak_factory}.
+ TNode<JSWeakFactory> weak_factory = LoadObjectField<JSWeakFactory>(
+ microtask, WeakFactoryCleanupJobTask::kFactoryOffset);
+ TNode<Context> native_context = LoadObjectField<Context>(
+ weak_factory, JSWeakFactory::kNativeContextOffset);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(native_context);
+ SetCurrentContext(native_context);
+
+ Node* const result = CallRuntime(Runtime::kWeakFactoryCleanupJob,
+ native_context, weak_factory);
+
+ GotoIfException(result, &if_exception, &var_exception);
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&is_unreachable);
+ Unreachable();
+
+ BIND(&if_exception);
+ {
+ // Report unhandled exceptions from microtasks.
+ CallRuntime(Runtime::kReportMessage, current_context,
+ var_exception.value());
+ RewindEnteredContext(saved_entered_context_count);
+ SetCurrentContext(current_context);
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
+TNode<Context> MicrotaskQueueBuiltinsAssembler::GetCurrentContext() {
+ auto ref = ExternalReference::Create(kContextAddress, isolate());
+ return TNode<Context>::UncheckedCast(
+ Load(MachineType::AnyTagged(), ExternalConstant(ref)));
+}
+
+void MicrotaskQueueBuiltinsAssembler::SetCurrentContext(
+ TNode<Context> context) {
+ auto ref = ExternalReference::Create(kContextAddress, isolate());
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
+ context);
+}
+
+TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+ Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+
+ using ContextStack = DetachableVector<Context>;
+ TNode<IntPtrT> size_offset =
+ IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
+ ContextStack::kSizeOffset);
+ TNode<IntPtrT> size =
+ UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
+ return size;
+}
+
+void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+ Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+
+ using ContextStack = DetachableVector<Context>;
+ TNode<IntPtrT> capacity_offset =
+ IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
+ ContextStack::kCapacityOffset);
+ TNode<IntPtrT> size_offset =
+ IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
+ ContextStack::kSizeOffset);
+
+ TNode<IntPtrT> capacity =
+ UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, capacity_offset));
+ TNode<IntPtrT> size =
+ UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
+
+ Label if_append(this), if_grow(this, Label::kDeferred), done(this);
+ Branch(WordEqual(size, capacity), &if_grow, &if_append);
+ BIND(&if_append);
+ {
+ TNode<IntPtrT> data_offset =
+ IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
+ ContextStack::kDataOffset);
+ Node* data = Load(MachineType::Pointer(), hsi, data_offset);
+ StoreNoWriteBarrier(MachineType::Pointer().representation(), data,
+ TimesSystemPointerSize(size),
+ BitcastTaggedToWord(native_context));
+
+ TNode<IntPtrT> new_size = IntPtrAdd(size, IntPtrConstant(1));
+ StoreNoWriteBarrier(MachineType::IntPtr().representation(), hsi,
+ size_offset, new_size);
+
+ using FlagStack = DetachableVector<int8_t>;
+ TNode<IntPtrT> flag_data_offset =
+ IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
+ FlagStack::kDataOffset);
+ Node* flag_data = Load(MachineType::Pointer(), hsi, flag_data_offset);
+ StoreNoWriteBarrier(MachineType::Int8().representation(), flag_data, size,
+ BoolConstant(true));
+ StoreNoWriteBarrier(
+ MachineType::IntPtr().representation(), hsi,
+ IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
+ FlagStack::kSizeOffset),
+ new_size);
+
+ Goto(&done);
+ }
+
+ BIND(&if_grow);
+ {
+ Node* function =
+ ExternalConstant(ExternalReference::call_enter_context_function());
+ CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer(), function, hsi,
+ BitcastTaggedToWord(native_context));
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
+void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
+ TNode<IntPtrT> saved_entered_context_count) {
+ auto ref = ExternalReference::handle_scope_implementer_address(isolate());
+ Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
+
+ using ContextStack = DetachableVector<Context>;
+ TNode<IntPtrT> size_offset =
+ IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
+ ContextStack::kSizeOffset);
+
+#ifdef ENABLE_VERIFY_CSA
+ TNode<IntPtrT> size =
+ UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset));
+ CSA_ASSERT(this, IntPtrLessThan(IntPtrConstant(0), size));
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(saved_entered_context_count, size));
+#endif
+
+ StoreNoWriteBarrier(MachineType::IntPtr().representation(), hsi, size_offset,
+ saved_entered_context_count);
+
+ using FlagStack = DetachableVector<int8_t>;
+ StoreNoWriteBarrier(
+ MachineType::IntPtr().representation(), hsi,
+ IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
+ FlagStack::kSizeOffset),
+ saved_entered_context_count);
+}
+
+void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
+ Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> promise_or_capability) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
+ &done_hook);
+ BIND(&hook);
+ {
+ // Get to the underlying JSPromise instance.
+ TNode<HeapObject> promise = Select<HeapObject>(
+ IsPromiseCapability(promise_or_capability),
+ [=] {
+ return CAST(LoadObjectField(promise_or_capability,
+ PromiseCapability::kPromiseOffset));
+ },
+
+ [=] { return promise_or_capability; });
+ GotoIf(IsUndefined(promise), &done_hook);
+ CallRuntime(id, context, promise);
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
+TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
+ TNode<Microtask> microtask =
+ UncheckedCast<Microtask>(Parameter(Descriptor::kMicrotask));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
+
+ TNode<RawPtrT> ring_buffer = GetMicrotaskRingBuffer(microtask_queue);
+ TNode<IntPtrT> capacity = GetMicrotaskQueueCapacity(microtask_queue);
+ TNode<IntPtrT> size = GetMicrotaskQueueSize(microtask_queue);
+ TNode<IntPtrT> start = GetMicrotaskQueueStart(microtask_queue);
+
+ Label if_grow(this, Label::kDeferred);
+ GotoIf(IntPtrEqual(size, capacity), &if_grow);
+
+ // |microtask_queue| has an unused slot to store |microtask|.
+ {
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), ring_buffer,
+ CalculateRingBufferOffset(capacity, start, size),
+ BitcastTaggedToWord(microtask));
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), microtask_queue,
+ IntPtrConstant(MicrotaskQueue::kSizeOffset),
+ IntPtrAdd(size, IntPtrConstant(1)));
+ Return(UndefinedConstant());
+ }
+
+ // |microtask_queue| has no space to store |microtask|. Fall back to C++
+ // implementation to grow the buffer.
+ BIND(&if_grow);
+ {
+ Node* isolate_constant =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* function =
+ ExternalConstant(ExternalReference::call_enqueue_microtask_function());
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::AnyTagged(), function,
+ isolate_constant, microtask_queue, microtask);
+ Return(UndefinedConstant());
+ }
+}
+
+TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
+ // Load the current context from the isolate.
+ TNode<Context> current_context = GetCurrentContext();
+
+ TNode<RawPtrT> microtask_queue =
+ UncheckedCast<RawPtrT>(Parameter(Descriptor::kMicrotaskQueue));
+
+ Label loop(this), done(this);
+ Goto(&loop);
+ BIND(&loop);
+
+ TNode<IntPtrT> size = GetMicrotaskQueueSize(microtask_queue);
+
+ // Exit if the queue is empty.
+ GotoIf(WordEqual(size, IntPtrConstant(0)), &done);
+
+ TNode<RawPtrT> ring_buffer = GetMicrotaskRingBuffer(microtask_queue);
+ TNode<IntPtrT> capacity = GetMicrotaskQueueCapacity(microtask_queue);
+ TNode<IntPtrT> start = GetMicrotaskQueueStart(microtask_queue);
+
+ TNode<IntPtrT> offset =
+ CalculateRingBufferOffset(capacity, start, IntPtrConstant(0));
+ TNode<RawPtrT> microtask_pointer =
+ UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), ring_buffer, offset));
+ TNode<Microtask> microtask = CAST(BitcastWordToTagged(microtask_pointer));
+
+ TNode<IntPtrT> new_size = IntPtrSub(size, IntPtrConstant(1));
+ TNode<IntPtrT> new_start = WordAnd(IntPtrAdd(start, IntPtrConstant(1)),
+ IntPtrSub(capacity, IntPtrConstant(1)));
+
+ // Remove |microtask| from |ring_buffer| before running it, since its
+ // invocation may add another microtask into |ring_buffer|.
+ SetMicrotaskQueueSize(microtask_queue, new_size);
+ SetMicrotaskQueueStart(microtask_queue, new_start);
+
+ RunSingleMicrotask(current_context, microtask);
+ Goto(&loop);
+
+ BIND(&done);
+ {
+ // Reset the "current microtask" on the isolate.
+ StoreRoot(RootIndex::kCurrentMicrotask, UndefinedConstant());
+ Return(UndefinedConstant());
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index fbac2e1abc..a6fa78504b 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-object-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
@@ -11,6 +13,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/shared-function-info.h"
+#include "src/property-details.h"
namespace v8 {
namespace internal {
@@ -218,9 +221,10 @@ void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
BIND(&if_no_properties);
{
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
- IntPtrConstant(0), SmiConstant(0));
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<JSArray> empty_array = AllocateJSArray(
+ PACKED_ELEMENTS, array_map, IntPtrConstant(0), SmiConstant(0));
Return(empty_array);
}
@@ -317,15 +321,15 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
// Currently, we will not invoke getters,
// so, map will not be changed.
CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
- TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
- TruncateIntPtrToInt32(var_descriptor_number.value()));
- Node* next_key = GetKey(descriptors, descriptor_index);
+ TNode<IntPtrT> descriptor_entry = var_descriptor_number.value();
+ Node* next_key = LoadKeyByDescriptorEntry(descriptors, descriptor_entry);
// Skip Symbols.
GotoIf(IsSymbol(next_key), &next_descriptor);
- TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
- DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> details =
+ LoadDetailsByDescriptorEntry(descriptors, descriptor_entry);
+
TNode<Uint32T> kind = LoadPropertyKind(details);
// If property is accessor, we escape fast path and call runtime.
@@ -609,8 +613,9 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* array = nullptr;
Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* array_length = SmiTag(object_enum_length);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Smi> array_length = SmiTag(object_enum_length);
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
INTPTR_PARAMETERS);
@@ -640,9 +645,10 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
{
// Wrap the elements into a proper JSArray and return that.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* array = AllocateUninitializedJSArrayWithoutElements(
- array_map, var_length.value(), nullptr);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<JSArray> array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, CAST(var_length.value()), nullptr);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
var_elements.value());
Return(array);
@@ -700,8 +706,9 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
Node* array = nullptr;
Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* array_length = SmiTag(object_enum_length);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Smi> array_length = SmiTag(object_enum_length);
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
INTPTR_PARAMETERS);
@@ -742,9 +749,10 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
{
// Wrap the elements into a proper JSArray and return that.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* array = AllocateUninitializedJSArrayWithoutElements(
- array_map, var_length.value(), nullptr);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<JSArray> array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, CAST(var_length.value()), nullptr);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
var_elements.value());
Return(array);
@@ -814,7 +822,13 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
}
// ES #sec-object.prototype.tostring
-TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
+TF_BUILTIN(ObjectPrototypeToString, CodeStubAssembler) {
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ Return(CallBuiltin(Builtins::kObjectToString, context, receiver));
+}
+
+TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
Label checkstringtag(this), if_apiobject(this, Label::kDeferred),
if_arguments(this), if_array(this), if_boolean(this), if_date(this),
if_error(this), if_function(this), if_number(this, Label::kDeferred),
@@ -1042,17 +1056,57 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&if_value);
{
+ Label if_value_is_number(this, Label::kDeferred),
+ if_value_is_boolean(this, Label::kDeferred),
+ if_value_is_symbol(this, Label::kDeferred),
+ if_value_is_bigint(this, Label::kDeferred),
+ if_value_is_string(this, Label::kDeferred);
+
Node* receiver_value = LoadJSValueValue(receiver);
- GotoIf(TaggedIsSmi(receiver_value), &if_number);
+ // We need to start with the object to see if the value was a subclass
+ // which might have interesting properties.
+ var_holder.Bind(receiver);
+ GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number);
Node* receiver_value_map = LoadMap(receiver_value);
- GotoIf(IsHeapNumberMap(receiver_value_map), &if_number);
- GotoIf(IsBooleanMap(receiver_value_map), &if_boolean);
- GotoIf(IsSymbolMap(receiver_value_map), &if_symbol);
+ GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number);
+ GotoIf(IsBooleanMap(receiver_value_map), &if_value_is_boolean);
+ GotoIf(IsSymbolMap(receiver_value_map), &if_value_is_symbol);
Node* receiver_value_instance_type =
LoadMapInstanceType(receiver_value_map);
- GotoIf(IsBigIntInstanceType(receiver_value_instance_type), &if_bigint);
+ GotoIf(IsBigIntInstanceType(receiver_value_instance_type),
+ &if_value_is_bigint);
CSA_ASSERT(this, IsStringInstanceType(receiver_value_instance_type));
- Goto(&if_string);
+ Goto(&if_value_is_string);
+
+ BIND(&if_value_is_number);
+ {
+ var_default.Bind(LoadRoot(RootIndex::knumber_to_string));
+ Goto(&checkstringtag);
+ }
+
+ BIND(&if_value_is_boolean);
+ {
+ var_default.Bind(LoadRoot(RootIndex::kboolean_to_string));
+ Goto(&checkstringtag);
+ }
+
+ BIND(&if_value_is_string);
+ {
+ var_default.Bind(LoadRoot(RootIndex::kstring_to_string));
+ Goto(&checkstringtag);
+ }
+
+ BIND(&if_value_is_bigint);
+ {
+ var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+ Goto(&checkstringtag);
+ }
+
+ BIND(&if_value_is_symbol);
+ {
+ var_default.Bind(LoadRoot(RootIndex::kobject_to_string));
+ Goto(&checkstringtag);
+ }
}
BIND(&checkstringtag);
@@ -1341,7 +1395,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
MachineType::Uint16()));
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
- Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kPointerSizeLog2)),
+ Node* size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
diff --git a/deps/v8/src/builtins/builtins-object-gen.h b/deps/v8/src/builtins/builtins-object-gen.h
new file mode 100644
index 0000000000..9489f0d1e0
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-object-gen.h
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
+#define V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 7513f60095..75f680844b 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -5,10 +5,10 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/keys.h"
#include "src/lookup.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
@@ -26,7 +26,7 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, name, Object::ToName(isolate, args.atOrUndefined(isolate, 1)));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object, JSReceiver::ToObject(isolate, args.receiver()));
+ isolate, object, Object::ToObject(isolate, args.receiver()));
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, name);
if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
@@ -59,15 +59,15 @@ BUILTIN(ObjectDefineProperty) {
namespace {
template <AccessorComponent which_accessor>
-Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
- Handle<Object> name, Handle<Object> accessor) {
+Object ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
+ Handle<Object> name, Handle<Object> accessor) {
// 1. Let O be ? ToObject(this value).
Handle<JSReceiver> receiver;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
Object::ToObject(isolate, object));
// 2. If IsCallable(getter) is false, throw a TypeError exception.
if (!accessor->IsCallable()) {
- MessageTemplate::Template message =
+ MessageTemplate message =
which_accessor == ACCESSOR_GETTER
? MessageTemplate::kObjectGetterExpectingFunction
: MessageTemplate::kObjectSetterExpectingFunction;
@@ -100,8 +100,8 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
return ReadOnlyRoots(isolate).undefined_value();
}
-Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
- Handle<Object> key, AccessorComponent component) {
+Object ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key, AccessorComponent component) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
Object::ToObject(isolate, object));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
@@ -309,8 +309,8 @@ BUILTIN(ObjectPrototypeSetProto) {
namespace {
-Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
- PropertyFilter filter) {
+Object GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
+ PropertyFilter filter) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
Handle<JSReceiver> receiver;
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1d43217999..39d81ce9dd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -6,17 +6,20 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-iterator-gen.h"
+#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/js-promise.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
using compiler::Node;
+using IteratorRecord = IteratorBuiltinsAssembler::IteratorRecord;
Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const native_context = LoadNativeContext(context);
@@ -37,12 +40,12 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
STATIC_ASSERT(v8::Promise::kPending == 0);
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kReactionsOrResultOffset,
- SmiConstant(Smi::kZero));
+ SmiConstant(Smi::zero()));
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset,
- SmiConstant(Smi::kZero));
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
+ SmiConstant(Smi::zero()));
+ for (int offset = JSPromise::kSize;
+ offset < JSPromise::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::zero()));
}
}
@@ -74,8 +77,8 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
STATIC_ASSERT(JSPromise::kStatusShift == 0);
StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
SmiConstant(status));
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- int offset = JSPromise::kSize + i * kPointerSize;
+ for (int offset = JSPromise::kSize;
+ offset < JSPromise::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectFieldNoWriteBarrier(instance, offset, SmiConstant(0));
}
@@ -114,7 +117,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
Node* const constructor = Parameter(Descriptor::kConstructor);
Node* const debug_event = Parameter(Descriptor::kDebugEvent);
- Node* const native_context = LoadNativeContext(context);
+ TNode<Context> const native_context = LoadNativeContext(context);
Label if_not_constructor(this, Label::kDeferred),
if_notcallable(this, Label::kDeferred), if_fast_promise_capability(this),
@@ -164,11 +167,10 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
Node* function_map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* executor = AllocateFunctionWithMapAndContext(
- function_map, executor_info, executor_context);
+ TNode<JSFunction> executor = CAST(AllocateFunctionWithMapAndContext(
+ function_map, executor_info, executor_context));
- Node* promise = ConstructJS(CodeFactory::Construct(isolate()),
- native_context, constructor, executor);
+ Node* promise = Construct(native_context, CAST(constructor), executor);
StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Node* resolve =
@@ -204,19 +206,22 @@ Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext(
CSA_ASSERT(this, IsNativeContext(native_context));
// TODO(bmeurer): Manually fold this into a single allocation.
- Node* const array_map = LoadContextElement(
- native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
- Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
- IntPtrConstant(0), SmiConstant(0));
+ TNode<Map> array_map = CAST(LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX));
+ TNode<JSArray> values_array = AllocateJSArray(
+ PACKED_ELEMENTS, array_map, IntPtrConstant(0), SmiConstant(0));
- Node* const context =
- CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
+ Node* const context = CreatePromiseContext(
+ native_context, PromiseBuiltins::kPromiseAllResolveElementLength);
StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementRemainingSlot, SmiConstant(1));
+ context, PromiseBuiltins::kPromiseAllResolveElementRemainingSlot,
+ SmiConstant(1));
StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementCapabilitySlot, promise_capability);
+ context, PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot,
+ promise_capability);
StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementValuesArraySlot, values_array);
+ context, PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot,
+ values_array);
return context;
}
@@ -244,20 +249,22 @@ Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
Node* promise, Node* debug_event, Node* native_context) {
- Node* const context =
- CreatePromiseContext(native_context, kPromiseContextLength);
- StoreContextElementNoWriteBarrier(context, kPromiseSlot, promise);
- StoreContextElementNoWriteBarrier(context, kAlreadyResolvedSlot,
- FalseConstant());
- StoreContextElementNoWriteBarrier(context, kDebugEventSlot, debug_event);
+ Node* const context = CreatePromiseContext(
+ native_context, PromiseBuiltins::kPromiseContextLength);
+ StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kPromiseSlot,
+ promise);
+ StoreContextElementNoWriteBarrier(
+ context, PromiseBuiltins::kAlreadyResolvedSlot, FalseConstant());
+ StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kDebugEventSlot,
+ debug_event);
return context;
}
Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
Node* promise_capability, Node* native_context) {
- int kContextLength = kCapabilitiesContextLength;
+ int kContextLength = PromiseBuiltins::kCapabilitiesContextLength;
Node* context = CreatePromiseContext(native_context, kContextLength);
- StoreContextElementNoWriteBarrier(context, kCapabilitySlot,
+ StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kCapabilitySlot,
promise_capability);
return context;
}
@@ -318,8 +325,11 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled)));
CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected)));
CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability));
- CSA_ASSERT(this, Word32Or(IsJSPromise(result_promise_or_capability),
- IsPromiseCapability(result_promise_or_capability)));
+ CSA_ASSERT(
+ this,
+ Word32Or(Word32Or(IsJSPromise(result_promise_or_capability),
+ IsPromiseCapability(result_promise_or_capability)),
+ IsUndefined(result_promise_or_capability)));
Label if_pending(this), if_notpending(this), done(this);
Node* const status = PromiseStatus(promise);
@@ -373,7 +383,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen(
Node* microtask = AllocatePromiseReactionJobTask(
var_map.value(), context, argument, var_handler.value(),
result_promise_or_capability);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), microtask);
+ CallBuiltin(Builtins::kEnqueueMicrotask, context, microtask);
Goto(&done);
}
@@ -390,7 +400,8 @@ TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
Node* const result_promise = Parameter(Descriptor::kResultPromise);
CSA_ASSERT(this, TaggedIsNotSmi(result_promise));
- CSA_ASSERT(this, IsJSPromise(result_promise));
+ CSA_ASSERT(
+ this, Word32Or(IsJSPromise(result_promise), IsUndefined(result_promise)));
PerformPromiseThen(context, promise, on_fulfilled, on_rejected,
result_promise);
@@ -466,7 +477,7 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
{
VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
VARIABLE(var_reversed, MachineRepresentation::kTagged,
- SmiConstant(Smi::kZero));
+ SmiConstant(Smi::zero()));
Label loop(this, {&var_current, &var_reversed}), done_loop(this);
Goto(&loop);
@@ -500,7 +511,8 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
// Morph {current} from a PromiseReaction into a PromiseReactionJobTask
// and schedule that on the microtask queue. We try to minimize the number
// of stores here to avoid screwing up the store buffer.
- STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
+ static_cast<int>(PromiseReactionJobTask::kSize));
if (type == PromiseReaction::kFulfill) {
StoreMapNoWriteBarrier(current,
RootIndex::kPromiseFulfillReactionJobTaskMap);
@@ -508,10 +520,13 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
argument);
StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
context);
- STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
- PromiseReactionJobTask::kHandlerOffset);
- STATIC_ASSERT(PromiseReaction::kPromiseOrCapabilityOffset ==
- PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kFulfillHandlerOffset) ==
+ static_cast<int>(PromiseReactionJobTask::kHandlerOffset));
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseReactionJobTask::kPromiseOrCapabilityOffset));
} else {
Node* handler =
LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
@@ -523,10 +538,12 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
context);
StoreObjectField(current, PromiseReactionJobTask::kHandlerOffset,
handler);
- STATIC_ASSERT(PromiseReaction::kPromiseOrCapabilityOffset ==
- PromiseReactionJobTask::kPromiseOrCapabilityOffset);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseReactionJobTask::kPromiseOrCapabilityOffset));
}
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), current);
+ CallBuiltin(Builtins::kEnqueueMicrotask, context, current);
Goto(&loop);
}
BIND(&done_loop);
@@ -633,6 +650,14 @@ void PromiseBuiltinsAssembler::BranchIfPromiseResolveLookupChainIntact(
Branch(IsPromiseResolveProtectorCellInvalid(), if_slow, if_fast);
}
+void PromiseBuiltinsAssembler::GotoIfNotPromiseResolveLookupChainIntact(
+ Node* native_context, Node* constructor, Label* if_slow) {
+ Label if_fast(this);
+ BranchIfPromiseResolveLookupChainIntact(native_context, constructor, &if_fast,
+ if_slow);
+ BIND(&if_fast);
+}
+
void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
CSA_ASSERT(this, IsNativeContext(native_context));
@@ -737,22 +762,24 @@ TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
// 2. Let promise be F.[[Promise]].
- Node* const promise = LoadContextElement(context, kPromiseSlot);
+ Node* const promise =
+ LoadContextElement(context, PromiseBuiltins::kPromiseSlot);
// 3. Let alreadyResolved be F.[[AlreadyResolved]].
Label if_already_resolved(this, Label::kDeferred);
Node* const already_resolved =
- LoadContextElement(context, kAlreadyResolvedSlot);
+ LoadContextElement(context, PromiseBuiltins::kAlreadyResolvedSlot);
// 4. If alreadyResolved.[[Value]] is true, return undefined.
GotoIf(IsTrue(already_resolved), &if_already_resolved);
// 5. Set alreadyResolved.[[Value]] to true.
- StoreContextElementNoWriteBarrier(context, kAlreadyResolvedSlot,
- TrueConstant());
+ StoreContextElementNoWriteBarrier(
+ context, PromiseBuiltins::kAlreadyResolvedSlot, TrueConstant());
// 6. Return RejectPromise(promise, reason).
- Node* const debug_event = LoadContextElement(context, kDebugEventSlot);
+ Node* const debug_event =
+ LoadContextElement(context, PromiseBuiltins::kDebugEventSlot);
Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
debug_event));
@@ -769,19 +796,20 @@ TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
// 2. Let promise be F.[[Promise]].
- Node* const promise = LoadContextElement(context, kPromiseSlot);
+ Node* const promise =
+ LoadContextElement(context, PromiseBuiltins::kPromiseSlot);
// 3. Let alreadyResolved be F.[[AlreadyResolved]].
Label if_already_resolved(this, Label::kDeferred);
Node* const already_resolved =
- LoadContextElement(context, kAlreadyResolvedSlot);
+ LoadContextElement(context, PromiseBuiltins::kAlreadyResolvedSlot);
// 4. If alreadyResolved.[[Value]] is true, return undefined.
GotoIf(IsTrue(already_resolved), &if_already_resolved);
// 5. Set alreadyResolved.[[Value]] to true.
- StoreContextElementNoWriteBarrier(context, kAlreadyResolvedSlot,
- TrueConstant());
+ StoreContextElementNoWriteBarrier(
+ context, PromiseBuiltins::kAlreadyResolvedSlot, TrueConstant());
// The rest of the logic (and the catch prediction) is
// encapsulated in the dedicated ResolvePromise builtin.
@@ -1082,8 +1110,8 @@ TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
GotoIfNot(WordEqual(then, promise_then), &if_slow);
Node* const thenable_map = LoadMap(thenable);
GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
- GotoIf(IsPromiseHookEnabled(), &if_slow);
- GotoIf(IsDebugActive(), &if_slow);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_slow);
BranchIfPromiseSpeciesLookupChainIntact(native_context, thenable_map,
&if_fast, &if_slow);
@@ -1154,11 +1182,14 @@ void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
CSA_ASSERT(this, TaggedIsNotSmi(handler));
CSA_ASSERT(this, Word32Or(IsUndefined(handler), IsCallable(handler)));
CSA_ASSERT(this, TaggedIsNotSmi(promise_or_capability));
- CSA_ASSERT(this, Word32Or(IsJSPromise(promise_or_capability),
- IsPromiseCapability(promise_or_capability)));
+ CSA_ASSERT(this,
+ Word32Or(Word32Or(IsJSPromise(promise_or_capability),
+ IsPromiseCapability(promise_or_capability)),
+ IsUndefined(promise_or_capability)));
VARIABLE(var_handler_result, MachineRepresentation::kTagged, argument);
- Label if_handler_callable(this), if_fulfill(this), if_reject(this);
+ Label if_handler_callable(this), if_fulfill(this), if_reject(this),
+ if_internal(this);
Branch(IsUndefined(handler),
type == PromiseReaction::kFulfill ? &if_fulfill : &if_reject,
&if_handler_callable);
@@ -1170,7 +1201,16 @@ void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
context, handler, UndefinedConstant(), argument);
GotoIfException(result, &if_reject, &var_handler_result);
var_handler_result.Bind(result);
- Goto(&if_fulfill);
+ Branch(IsUndefined(promise_or_capability), &if_internal, &if_fulfill);
+ }
+
+ BIND(&if_internal);
+ {
+ // There's no [[Capability]] for this promise reaction job, which
+ // means that this is a specification-internal operation (aka await)
+ // where the result does not matter (see the specification change in
+ // https://github.com/tc39/ecma262/pull/1146 for details).
+ Return(UndefinedConstant());
}
BIND(&if_fulfill);
@@ -1371,7 +1411,8 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Node* const reject = Parameter(Descriptor::kReject);
Node* const context = Parameter(Descriptor::kContext);
- Node* const capability = LoadContextElement(context, kCapabilitySlot);
+ Node* const capability =
+ LoadContextElement(context, PromiseBuiltins::kCapabilitySlot);
Label if_alreadyinvoked(this, Label::kDeferred);
GotoIfNot(IsUndefined(
@@ -1439,12 +1480,12 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
Node* on_finally, Node* constructor, Node* native_context) {
- Node* const promise_context =
- CreatePromiseContext(native_context, kPromiseFinallyContextLength);
- StoreContextElementNoWriteBarrier(promise_context, kOnFinallySlot,
- on_finally);
- StoreContextElementNoWriteBarrier(promise_context, kConstructorSlot,
- constructor);
+ Node* const promise_context = CreatePromiseContext(
+ native_context, PromiseBuiltins::kPromiseFinallyContextLength);
+ StoreContextElementNoWriteBarrier(
+ promise_context, PromiseBuiltins::kOnFinallySlot, on_finally);
+ StoreContextElementNoWriteBarrier(
+ promise_context, PromiseBuiltins::kConstructorSlot, constructor);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const then_finally_info = LoadContextElement(
@@ -1461,15 +1502,16 @@ std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = LoadContextElement(context, kValueSlot);
+ Node* const value = LoadContextElement(context, PromiseBuiltins::kValueSlot);
Return(value);
}
Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value,
Node* native_context) {
Node* const value_thunk_context = CreatePromiseContext(
- native_context, kPromiseValueThunkOrReasonContextLength);
- StoreContextElementNoWriteBarrier(value_thunk_context, kValueSlot, value);
+ native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
+ StoreContextElementNoWriteBarrier(value_thunk_context,
+ PromiseBuiltins::kValueSlot, value);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const value_thunk_info = LoadContextElement(
@@ -1486,7 +1528,8 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
// 1. Let onFinally be F.[[OnFinally]].
- Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+ Node* const on_finally =
+ LoadContextElement(context, PromiseBuiltins::kOnFinallySlot);
// 2. Assert: IsCallable(onFinally) is true.
CSA_ASSERT(this, IsCallable(on_finally));
@@ -1497,7 +1540,8 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
- Node* const constructor = LoadContextElement(context, kConstructorSlot);
+ Node* const constructor =
+ LoadContextElement(context, PromiseBuiltins::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
CSA_ASSERT(this, IsConstructor(constructor));
@@ -1517,7 +1561,7 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
- Node* const reason = LoadContextElement(context, kValueSlot);
+ Node* const reason = LoadContextElement(context, PromiseBuiltins::kValueSlot);
CallRuntime(Runtime::kThrow, context, reason);
Unreachable();
}
@@ -1525,8 +1569,9 @@ TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason,
Node* native_context) {
Node* const thrower_context = CreatePromiseContext(
- native_context, kPromiseValueThunkOrReasonContextLength);
- StoreContextElementNoWriteBarrier(thrower_context, kValueSlot, reason);
+ native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength);
+ StoreContextElementNoWriteBarrier(thrower_context,
+ PromiseBuiltins::kValueSlot, reason);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const thrower_info = LoadContextElement(
@@ -1543,7 +1588,8 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
// 1. Let onFinally be F.[[OnFinally]].
- Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+ Node* const on_finally =
+ LoadContextElement(context, PromiseBuiltins::kOnFinallySlot);
// 2. Assert: IsCallable(onFinally) is true.
CSA_ASSERT(this, IsCallable(on_finally));
@@ -1554,7 +1600,8 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
context, on_finally, UndefinedConstant());
// 4. Let C be F.[[Constructor]].
- Node* const constructor = LoadContextElement(context, kConstructorSlot);
+ Node* const constructor =
+ LoadContextElement(context, PromiseBuiltins::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
CSA_ASSERT(this, IsConstructor(constructor));
@@ -1689,8 +1736,8 @@ TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- GotoIf(IsPromiseHookEnabled(), &if_runtime);
- GotoIf(IsDebugActive(), &if_runtime);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_runtime);
// 7. If promise.[[PromiseIsHandled]] is false, perform
// HostPromiseRejectionTracker(promise, "reject").
@@ -1737,8 +1784,8 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- GotoIf(IsPromiseHookEnabled(), &if_runtime);
- GotoIf(IsDebugActive(), &if_runtime);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_runtime);
// 6. If SameValue(resolution, promise) is true, then
// We can use pointer comparison here, since the {promise} is guaranteed
@@ -1837,13 +1884,12 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Variable* var_exception) {
IteratorBuiltinsAssembler iter_assembler(state());
- Node* const instrumenting = IsDebugActive();
Node* const native_context = LoadNativeContext(context);
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
SetForwardingHandlerIfTrue(
- native_context, instrumenting,
+ native_context, IsDebugActive(),
LoadObjectField(capability, PromiseCapability::kRejectOffset));
Node* const resolve_element_context =
@@ -1873,22 +1919,22 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
native_context, next, fast_iterator_result_map, if_exception,
var_exception);
- // Let nextPromise be ? Invoke(constructor, "resolve", « nextValue »).
- Node* const next_promise =
- InvokeResolve(native_context, constructor, next_value, &close_iterator,
- var_exception);
-
// Check if we reached the limit.
TNode<Smi> const index = var_index.value();
GotoIf(SmiEqual(index, SmiConstant(PropertyArray::HashField::kMax)),
&too_many_elements);
+ // Set index to index + 1.
+ var_index = SmiAdd(index, SmiConstant(1));
+
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] + 1.
TNode<Smi> const remaining_elements_count = CAST(LoadContextElement(
- resolve_element_context, kPromiseAllResolveElementRemainingSlot));
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementRemainingSlot));
StoreContextElementNoWriteBarrier(
- resolve_element_context, kPromiseAllResolveElementRemainingSlot,
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementRemainingSlot,
SmiAdd(remaining_elements_count, SmiConstant(1)));
// Let resolveElement be CreateBuiltinFunction(steps,
@@ -1905,28 +1951,77 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve_element_fun = CreatePromiseAllResolveElementFunction(
resolve_element_context, index, native_context);
- // Perform ? Invoke(nextPromise, "then", « resolveElement,
- // resultCapability.[[Reject]] »).
- Node* const then =
- GetProperty(native_context, next_promise, factory()->then_string());
- GotoIfException(then, &close_iterator, var_exception);
+ // We can skip the "resolve" lookup on the {constructor} as well as the
+ // "then" lookup on the result of the "resolve" call, and immediately
+ // chain continuation onto the {next_value} if:
+ //
+ // (a) The {constructor} is the intrinsic %Promise% function, and
+ // looking up "resolve" on {constructor} yields the initial
+ // Promise.resolve() builtin, and
+ // (b) the promise @@species protector cell is valid, meaning that
+ // no one messed with the Symbol.species property on any
+ // intrinsic promise or on the Promise.prototype, and
+ // (c) the {next_value} is a JSPromise whose [[Prototype]] field
+ // contains the intrinsic %PromisePrototype%, and
+ // (d) we're not running with async_hooks or DevTools enabled.
+ //
+ // In that case we also don't need to allocate a chained promise for
+ // the PromiseReaction (aka we can pass undefined to PerformPromiseThen),
+ // since this is only necessary for DevTools and PromiseHooks.
+ Label if_fast(this), if_slow(this);
+ GotoIfNotPromiseResolveLookupChainIntact(native_context, constructor,
+ &if_slow);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ &if_slow);
+ GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow);
+ GotoIf(TaggedIsSmi(next_value), &if_slow);
+ Node* const next_value_map = LoadMap(next_value);
+ BranchIfPromiseThenLookupChainIntact(native_context, next_value_map,
+ &if_fast, &if_slow);
- Node* const then_call = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- native_context, then, next_promise, resolve_element_fun,
- LoadObjectField(capability, PromiseCapability::kRejectOffset));
- GotoIfException(then_call, &close_iterator, var_exception);
+ BIND(&if_fast);
+ {
+ // Register the PromiseReaction immediately on the {next_value}, not
+ // passing any chained promise since neither async_hooks nor DevTools
+ // are enabled, so there's no use of the resulting promise.
+ PerformPromiseThen(
+ native_context, next_value, resolve_element_fun,
+ LoadObjectField(capability, PromiseCapability::kRejectOffset),
+ UndefinedConstant());
+ Goto(&loop);
+ }
- // For catch prediction, mark that rejections here are semantically
- // handled by the combined Promise.
- SetPromiseHandledByIfTrue(native_context, instrumenting, then_call, [=]() {
- // Load promiseCapability.[[Promise]]
- return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
- });
+ BIND(&if_slow);
+ {
+ // Let nextPromise be ? Invoke(constructor, "resolve", « nextValue »).
+ Node* const next_promise =
+ InvokeResolve(native_context, constructor, next_value,
+ &close_iterator, var_exception);
- // Set index to index + 1.
- var_index = SmiAdd(index, SmiConstant(1));
- Goto(&loop);
+ // Perform ? Invoke(nextPromise, "then", « resolveElement,
+ // resultCapability.[[Reject]] »).
+ Node* const then =
+ GetProperty(native_context, next_promise, factory()->then_string());
+ GotoIfException(then, &close_iterator, var_exception);
+
+ Node* const then_call =
+ CallJS(CodeFactory::Call(isolate(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, next_promise, resolve_element_fun,
+ LoadObjectField(capability, PromiseCapability::kRejectOffset));
+ GotoIfException(then_call, &close_iterator, var_exception);
+
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ SetPromiseHandledByIfTrue(
+ native_context, IsDebugActive(), then_call, [=]() {
+ // Load promiseCapability.[[Promise]]
+ return LoadObjectField(capability,
+ PromiseCapability::kPromiseOffset);
+ });
+
+ Goto(&loop);
+ }
}
BIND(&too_many_elements);
@@ -1960,11 +2055,13 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] - 1.
TNode<Smi> remaining_elements_count = CAST(LoadContextElement(
- resolve_element_context, kPromiseAllResolveElementRemainingSlot));
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementRemainingSlot));
remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
- StoreContextElementNoWriteBarrier(resolve_element_context,
- kPromiseAllResolveElementRemainingSlot,
- remaining_elements_count);
+ StoreContextElementNoWriteBarrier(
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementRemainingSlot,
+ remaining_elements_count);
GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)),
&resolve_promise);
@@ -1973,7 +2070,8 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// fancy Thenable that calls the resolve callback immediately, so we need
// to handle that correctly here.
Node* const values_array = LoadContextElement(
- resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot);
Node* const old_elements = LoadElements(values_array);
TNode<Smi> const old_capacity = LoadFixedArrayBaseLength(old_elements);
TNode<Smi> const new_capacity = var_index.value();
@@ -1996,7 +2094,8 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
Node* const values_array = LoadContextElement(
- resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ resolve_element_context,
+ PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot);
Node* const resolve_call = CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
native_context, resolve, UndefinedConstant(), values_array);
@@ -2079,8 +2178,10 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
// first time, in which case we make it point to the native context here
// to mark this resolve element closure as done.
GotoIf(IsNativeContext(context), &already_called);
- CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
- SmiConstant(kPromiseAllResolveElementLength)));
+ CSA_ASSERT(
+ this,
+ SmiEqual(LoadObjectField<Smi>(context, Context::kLengthOffset),
+ SmiConstant(PromiseBuiltins::kPromiseAllResolveElementLength)));
TNode<Context> native_context = LoadNativeContext(context);
StoreObjectField(function, JSFunction::kContextOffset, native_context);
@@ -2093,8 +2194,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
TNode<IntPtrT> index = IntPtrSub(identity_hash, IntPtrConstant(1));
// Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
- TNode<JSArray> values_array = CAST(
- LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot));
+ TNode<JSArray> values_array = CAST(LoadContextElement(
+ context, PromiseBuiltins::kPromiseAllResolveElementValuesArraySlot));
TNode<FixedArray> elements = CAST(LoadElements(values_array));
TNode<IntPtrT> values_length =
LoadAndUntagObjectField(values_array, JSArray::kLengthOffset);
@@ -2153,17 +2254,18 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
}
BIND(&done);
- TNode<Smi> remaining_elements_count =
- CAST(LoadContextElement(context, kPromiseAllResolveElementRemainingSlot));
+ TNode<Smi> remaining_elements_count = CAST(LoadContextElement(
+ context, PromiseBuiltins::kPromiseAllResolveElementRemainingSlot));
remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
- StoreContextElement(context, kPromiseAllResolveElementRemainingSlot,
+ StoreContextElement(context,
+ PromiseBuiltins::kPromiseAllResolveElementRemainingSlot,
remaining_elements_count);
GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)), &resolve_promise);
Return(UndefinedConstant());
BIND(&resolve_promise);
- TNode<PromiseCapability> capability = CAST(
- LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot));
+ TNode<PromiseCapability> capability = CAST(LoadContextElement(
+ context, PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot));
TNode<Object> resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
@@ -2200,14 +2302,12 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
Node* const reject =
LoadObjectField(capability, PromiseCapability::kRejectOffset);
- Node* const instrumenting = IsDebugActive();
-
Label close_iterator(this, Label::kDeferred);
Label reject_promise(this, Label::kDeferred);
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
- SetForwardingHandlerIfTrue(context, instrumenting, reject);
+ SetForwardingHandlerIfTrue(context, IsDebugActive(), reject);
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
@@ -2259,7 +2359,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
- SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ SetPromiseHandledByIfTrue(context, IsDebugActive(), then_call, [=]() {
// Load promiseCapability.[[Promise]]
return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
});
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 39b2a24683..8edc2331a5 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -6,8 +6,9 @@
#define V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
#include "src/code-stub-assembler.h"
-#include "src/contexts.h"
#include "src/objects/promise.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "torque-generated/builtins-iterator-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -16,59 +17,6 @@ typedef compiler::CodeAssemblerState CodeAssemblerState;
class PromiseBuiltinsAssembler : public CodeStubAssembler {
public:
- enum PromiseResolvingFunctionContextSlot {
- // The promise which resolve/reject callbacks fulfill.
- kPromiseSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Whether the callback was already invoked.
- kAlreadyResolvedSlot,
-
- // Whether to trigger a debug event or not. Used in catch
- // prediction.
- kDebugEventSlot,
- kPromiseContextLength,
- };
-
- protected:
- enum PromiseAllResolveElementContextSlots {
- // Remaining elements count
- kPromiseAllResolveElementRemainingSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Promise capability from Promise.all
- kPromiseAllResolveElementCapabilitySlot,
-
- // Values array from Promise.all
- kPromiseAllResolveElementValuesArraySlot,
-
- kPromiseAllResolveElementLength
- };
-
- public:
- enum FunctionContextSlot {
- kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
-
- kCapabilitiesContextLength,
- };
-
- // This is used by the Promise.prototype.finally builtin to store
- // onFinally callback and the Promise constructor.
- // TODO(gsathya): For native promises we can create a variant of
- // this without extra space for the constructor to save memory.
- enum PromiseFinallyContextSlot {
- kOnFinallySlot = Context::MIN_CONTEXT_SLOTS,
- kConstructorSlot,
-
- kPromiseFinallyContextLength,
- };
-
- // This is used by the ThenFinally and CatchFinally builtins to
- // store the value to return or reason to throw.
- enum PromiseValueThunkOrReasonContextSlot {
- kValueSlot = Context::MIN_CONTEXT_SLOTS,
-
- kPromiseValueThunkOrReasonContextLength,
- };
-
explicit PromiseBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
// These allocate and initialize a promise with pending state and
@@ -144,6 +92,9 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void BranchIfPromiseResolveLookupChainIntact(Node* native_context,
Node* constructor,
Label* if_fast, Label* if_slow);
+ void GotoIfNotPromiseResolveLookupChainIntact(Node* native_context,
+ Node* constructor,
+ Label* if_slow);
// We can shortcut the SpeciesConstructor on {promise_map} if it's
// [[Prototype]] is the (initial) Promise.prototype and the @@species
@@ -177,9 +128,10 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunction(Node* reason, Node* native_context);
- Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability,
- const IteratorRecord& record, Label* if_exception,
- Variable* var_exception);
+ Node* PerformPromiseAll(
+ Node* context, Node* constructor, Node* capability,
+ const IteratorBuiltinsFromDSLAssembler::IteratorRecord& record,
+ Label* if_exception, Variable* var_exception);
void SetForwardingHandlerIfTrue(Node* context, Node* condition,
const NodeGenerator& object);
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 88c0632c15..0f18d8fb45 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-promise.h"
+
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
+#include "src/counters.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
new file mode 100644
index 0000000000..66545feafe
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -0,0 +1,75 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_H_
+
+#include "src/contexts.h"
+
+namespace v8 {
+namespace internal {
+
+class PromiseBuiltins {
+ public:
+ enum PromiseResolvingFunctionContextSlot {
+ // The promise which resolve/reject callbacks fulfill.
+ kPromiseSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // Whether the callback was already invoked.
+ kAlreadyResolvedSlot,
+
+ // Whether to trigger a debug event or not. Used in catch
+ // prediction.
+ kDebugEventSlot,
+ kPromiseContextLength,
+ };
+
+ // TODO(bmeurer): Move this to a proper context map in contexts.h?
+ // Similar to the AwaitContext that we introduced for await closures.
+ enum PromiseAllResolveElementContextSlots {
+ // Remaining elements count
+ kPromiseAllResolveElementRemainingSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // Promise capability from Promise.all
+ kPromiseAllResolveElementCapabilitySlot,
+
+ // Values array from Promise.all
+ kPromiseAllResolveElementValuesArraySlot,
+
+ kPromiseAllResolveElementLength
+ };
+
+ enum FunctionContextSlot {
+ kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
+
+ kCapabilitiesContextLength,
+ };
+
+ // This is used by the Promise.prototype.finally builtin to store
+ // onFinally callback and the Promise constructor.
+ // TODO(gsathya): For native promises we can create a variant of
+ // this without extra space for the constructor to save memory.
+ enum PromiseFinallyContextSlot {
+ kOnFinallySlot = Context::MIN_CONTEXT_SLOTS,
+ kConstructorSlot,
+
+ kPromiseFinallyContextLength,
+ };
+
+ // This is used by the ThenFinally and CatchFinally builtins to
+ // store the value to return or reason to throw.
+ enum PromiseValueThunkOrReasonContextSlot {
+ kValueSlot = Context::MIN_CONTEXT_SLOTS,
+
+ kPromiseValueThunkOrReasonContextLength,
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseBuiltins);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_PROMISE_H_
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index f0d891910a..02b4d0b71e 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -81,24 +81,24 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
kAllowLargeObjectAllocation);
elements.Bind(allocated_elements);
- VARIABLE(index, MachineType::PointerRepresentation(),
- IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
- VariableList list({&index}, zone());
+ TVARIABLE(IntPtrT, offset,
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&offset}, zone());
GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
&if_large_object);
- args.ForEach(list, [=, &index](Node* arg) {
+ args.ForEach(list, [=, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
- index.value(), arg);
- Increment(&index, kPointerSize);
+ offset.value(), arg);
+ Increment(&offset, kTaggedSize);
});
Goto(&allocate_js_array);
BIND(&if_large_object);
{
- args.ForEach(list, [=, &index](Node* arg) {
- Store(allocated_elements, index.value(), arg);
- Increment(&index, kPointerSize);
+ args.ForEach(list, [=, &offset](Node* arg) {
+ Store(allocated_elements, offset.value(), arg);
+ Increment(&offset, kTaggedSize);
});
Goto(&allocate_js_array);
}
@@ -113,8 +113,10 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
BIND(&allocate_js_array);
// Allocate the result JSArray.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* array = AllocateUninitializedJSArrayWithoutElements(array_map, length);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<JSArray> array =
+ AllocateUninitializedJSArrayWithoutElements(array_map, length);
StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset,
elements.value());
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 30717f41de..08b8e0457c 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -58,8 +58,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
// The folded allocation.
- Node* result = Allocate(total_size);
- Node* elements = InnerAllocate(result, kElementsOffset);
+ TNode<HeapObject> result = Allocate(total_size);
+ TNode<HeapObject> elements = InnerAllocate(result, kElementsOffset);
// Initialize the JSRegExpResult.
@@ -82,7 +82,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
DCHECK(!IsDoubleElementsKind(elements_kind));
const RootIndex map_index = RootIndex::kFixedArrayMap;
- DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(elements, map_index);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
@@ -118,7 +118,7 @@ TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(
TNode<JSRegExp> regexp) {
// Load the in-object field.
static const int field_offset =
- JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
return LoadObjectField(regexp, field_offset);
}
@@ -139,7 +139,7 @@ TNode<Object> RegExpBuiltinsAssembler::LoadLastIndex(TNode<Context> context,
void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
// Store the in-object field.
static const int field_offset =
- JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
StoreObjectField(regexp, field_offset, value);
}
@@ -592,7 +592,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<Smi> smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
- Increment(&var_to_offset, kPointerSize);
+ Increment(&var_to_offset, kTaggedSize);
},
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
@@ -795,7 +795,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
}
Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
- Node* context, Node* maybe_receiver, MessageTemplate::Template msg_template,
+ Node* context, Node* maybe_receiver, MessageTemplate msg_template,
char const* method_name) {
Label out(this), throw_exception(this, Label::kDeferred);
VARIABLE(var_value_map, MachineRepresentation::kTagged);
@@ -907,17 +907,17 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
// We use a fairly coarse granularity for this and simply check whether both
// the regexp itself is unmodified (i.e. its map has not changed), its
// prototype is unmodified, and lastIndex is a non-negative smi.
-void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
- Node* const object,
- Node* const map,
- Label* const if_isunmodified,
- Label* const if_ismodified) {
+void RegExpBuiltinsAssembler::BranchIfFastRegExp(
+ Node* const context, Node* const object, Node* const map,
+ base::Optional<DescriptorIndexAndName> additional_property_to_check,
+ Label* const if_isunmodified, Label* const if_ismodified) {
CSA_ASSERT(this, WordEqual(LoadMap(object), map));
GotoIfForceSlowPath(if_ismodified);
- // TODO(ishell): Update this check once map changes for constant field
- // tracking are landing.
+ // This should only be needed for String.p.(split||matchAll), but we are
+ // conservative here.
+ GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified);
Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
@@ -930,11 +930,19 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
Node* const initial_proto_initial_map =
LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = LoadMap(LoadMapPrototype(map));
- Node* const proto_has_initialmap =
- WordEqual(proto_map, initial_proto_initial_map);
- GotoIfNot(proto_has_initialmap, if_ismodified);
+ DescriptorIndexAndName properties_to_check[2];
+ int property_count = 0;
+ properties_to_check[property_count++] = DescriptorIndexAndName{
+ JSRegExp::kExecFunctionDescriptorIndex, RootIndex::kexec_string};
+ if (additional_property_to_check) {
+ properties_to_check[property_count++] = *additional_property_to_check;
+ }
+
+ GotoIfInitialPrototypePropertiesModified(
+ CAST(map), CAST(initial_proto_initial_map),
+ Vector<DescriptorIndexAndName>(properties_to_check, property_count),
+ if_ismodified);
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
@@ -947,8 +955,8 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
Label* const if_isunmodified,
Label* const if_ismodified) {
CSA_ASSERT(this, TaggedIsNotSmi(object));
- BranchIfFastRegExp(context, object, LoadMap(object), if_isunmodified,
- if_ismodified);
+ BranchIfFastRegExp(context, object, LoadMap(object), base::nullopt,
+ if_isunmodified, if_ismodified);
}
TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExp(SloppyTNode<Context> context,
@@ -1194,20 +1202,20 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
}
// ES#sec-isregexp IsRegExp ( argument )
-Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
- Node* const maybe_receiver) {
+TNode<BoolT> RegExpBuiltinsAssembler::IsRegExp(TNode<Context> context,
+ TNode<Object> maybe_receiver) {
Label out(this), if_isregexp(this);
- VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(0));
+ TVARIABLE(BoolT, var_result, Int32FalseConstant());
GotoIf(TaggedIsSmi(maybe_receiver), &out);
- GotoIfNot(IsJSReceiver(maybe_receiver), &out);
+ GotoIfNot(IsJSReceiver(CAST(maybe_receiver)), &out);
- Node* const receiver = maybe_receiver;
+ TNode<JSReceiver> receiver = CAST(maybe_receiver);
// Check @@match.
{
- Node* const value =
+ TNode<Object> value =
GetProperty(context, receiver, isolate()->factory()->match_symbol());
Label match_isundefined(this), match_isnotundefined(this);
@@ -1217,11 +1225,26 @@ Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
Branch(IsJSRegExp(receiver), &if_isregexp, &out);
BIND(&match_isnotundefined);
- BranchIfToBooleanIsTrue(value, &if_isregexp, &out);
+ Label match_istrueish(this), match_isfalseish(this);
+ BranchIfToBooleanIsTrue(value, &match_istrueish, &match_isfalseish);
+
+ // The common path. Symbol.match exists, equals the RegExpPrototypeMatch
+ // function (and is thus trueish), and the receiver is a JSRegExp.
+ BIND(&match_istrueish);
+ GotoIf(IsJSRegExp(receiver), &if_isregexp);
+ CallRuntime(Runtime::kIncrementUseCounter, context,
+ SmiConstant(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp));
+ Goto(&if_isregexp);
+
+ BIND(&match_isfalseish);
+ GotoIfNot(IsJSRegExp(receiver), &out);
+ CallRuntime(Runtime::kIncrementUseCounter, context,
+ SmiConstant(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp));
+ Goto(&out);
}
BIND(&if_isregexp);
- var_result.Bind(Int32Constant(1));
+ var_result = Int32TrueConstant();
Goto(&out);
BIND(&out);
@@ -1263,7 +1286,8 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
TNode<JSReceiver> receiver = CAST(maybe_receiver);
Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
- BranchIfFastRegExp(context, receiver, map, &if_isfastpath, &if_isslowpath);
+ BranchIfFastRegExp(context, receiver, map, base::nullopt, &if_isfastpath,
+ &if_isslowpath);
BIND(&if_isfastpath);
Return(FlagsGetter(context, receiver, true));
@@ -1290,7 +1314,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
Node* const regexp_function =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const pattern_is_regexp = IsRegExp(context, pattern);
+ TNode<BoolT> pattern_is_regexp = IsRegExp(context, pattern);
{
Label next(this);
@@ -1498,73 +1522,66 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
}
// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
-Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
- JSRegExp::Flag flag) {
- TNode<Smi> const flags =
- CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
- TNode<Smi> const mask = SmiConstant(flag);
- return SmiToInt32(SmiAnd(flags, mask));
+TNode<Int32T> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
+ JSRegExp::Flag flag) {
+ TNode<Smi> flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
+ TNode<Smi> mask = SmiConstant(flag);
+ return SmiToInt32(SmiShr(SmiAnd(flags, mask), JSRegExp::FlagShiftBits(flag)));
}
// Load through the GetProperty stub.
-Node* RegExpBuiltinsAssembler::SlowFlagGetter(Node* const context,
- Node* const regexp,
- JSRegExp::Flag flag) {
- Factory* factory = isolate()->factory();
-
+TNode<Int32T> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
+ TNode<Object> regexp,
+ JSRegExp::Flag flag) {
Label out(this);
- VARIABLE(var_result, MachineRepresentation::kWord32);
+ TVARIABLE(Int32T, var_result);
Handle<String> name;
switch (flag) {
case JSRegExp::kGlobal:
- name = factory->global_string();
+ name = isolate()->factory()->global_string();
break;
case JSRegExp::kIgnoreCase:
- name = factory->ignoreCase_string();
+ name = isolate()->factory()->ignoreCase_string();
break;
case JSRegExp::kMultiline:
- name = factory->multiline_string();
+ name = isolate()->factory()->multiline_string();
break;
case JSRegExp::kDotAll:
UNREACHABLE(); // Never called for dotAll.
break;
case JSRegExp::kSticky:
- name = factory->sticky_string();
+ name = isolate()->factory()->sticky_string();
break;
case JSRegExp::kUnicode:
- name = factory->unicode_string();
+ name = isolate()->factory()->unicode_string();
break;
default:
UNREACHABLE();
}
- Node* const value = GetProperty(context, regexp, name);
+ TNode<Object> value = GetProperty(context, regexp, name);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
- {
- var_result.Bind(Int32Constant(1));
- Goto(&out);
- }
+ var_result = Int32Constant(1);
+ Goto(&out);
BIND(&if_false);
- {
- var_result.Bind(Int32Constant(0));
- Goto(&out);
- }
+ var_result = Int32Constant(0);
+ Goto(&out);
BIND(&out);
return var_result.value();
}
-Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
- Node* const regexp,
- JSRegExp::Flag flag,
- bool is_fastpath) {
- return is_fastpath ? FastFlagGetter(regexp, flag)
+TNode<Int32T> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
+ TNode<Object> regexp,
+ JSRegExp::Flag flag,
+ bool is_fastpath) {
+ return is_fastpath ? FastFlagGetter(CAST(regexp), flag)
: SlowFlagGetter(context, regexp, flag);
}
@@ -1582,7 +1599,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
BIND(&if_isunmodifiedjsregexp);
{
// Refer to JSRegExp's flag property on the fast-path.
- Node* const is_flag_set = FastFlagGetter(receiver, flag);
+ Node* const is_flag_set = FastFlagGetter(CAST(receiver), flag);
Return(SelectBooleanConstant(is_flag_set));
}
@@ -1846,7 +1863,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const is_global =
- FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
+ FlagGetter(CAST(context), CAST(regexp), JSRegExp::kGlobal, is_fastpath);
Label if_isglobal(this), if_isnotglobal(this);
Branch(is_global, &if_isglobal, &if_isnotglobal);
@@ -1862,8 +1879,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
BIND(&if_isglobal);
{
- Node* const is_unicode =
- FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
+ Node* const is_unicode = FlagGetter(CAST(context), CAST(regexp),
+ JSRegExp::kUnicode, is_fastpath);
StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
@@ -1995,178 +2012,167 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
RegExpPrototypeMatchBody(context, receiver, string, false);
}
-TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
- TNode<Context> context, TNode<Context> native_context,
- TNode<Object> maybe_regexp, TNode<String> string,
- TNode<BoolT> is_fast_regexp, char const* method_name) {
- Label create_iterator(this), if_fast_regexp(this),
- if_slow_regexp(this, Label::kDeferred), if_not_regexp(this);
+void RegExpMatchAllAssembler::Generate(TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> receiver,
+ TNode<Object> maybe_string) {
+ // 1. Let R be the this value.
+ // 2. If Type(R) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@matchAll");
- // 1. Let S be ? ToString(O).
- // Handled by the caller of MatchAllIterator.
- CSA_ASSERT(this, IsString(string));
+ // 3. Let S be ? ToString(O).
+ TNode<String> string = ToString_Inline(context, maybe_string);
TVARIABLE(Object, var_matcher);
TVARIABLE(Int32T, var_global);
TVARIABLE(Int32T, var_unicode);
+ Label create_iterator(this), if_fast_regexp(this),
+ if_slow_regexp(this, Label::kDeferred);
- // 2. If ? IsRegExp(R) is true, then
- GotoIf(is_fast_regexp, &if_fast_regexp);
- Branch(IsRegExp(context, maybe_regexp), &if_slow_regexp, &if_not_regexp);
+ BranchIfFastRegExp(context, receiver, &if_fast_regexp, &if_slow_regexp);
BIND(&if_fast_regexp);
{
- CSA_ASSERT(this, IsFastRegExp(context, maybe_regexp));
- TNode<JSRegExp> fast_regexp = CAST(maybe_regexp);
+ TNode<JSRegExp> fast_regexp = CAST(receiver);
TNode<Object> source =
LoadObjectField(fast_regexp, JSRegExp::kSourceOffset);
- TNode<String> flags = CAST(FlagsGetter(context, fast_regexp, true));
- // c. Let matcher be ? Construct(C, « R, flags »).
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ // 6. Let matcher be ? Construct(C, « R, flags »).
+ TNode<String> flags = CAST(FlagsGetter(context, fast_regexp, true));
var_matcher = RegExpCreate(context, native_context, source, flags);
CSA_ASSERT(this, IsFastRegExp(context, var_matcher.value()));
- // d. Let global be ? ToBoolean(? Get(matcher, "global")).
- var_global = UncheckedCast<Int32T>(
- FastFlagGetter(var_matcher.value(), JSRegExp::kGlobal));
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp));
- // e. Let fullUnicode be ? ToBoolean(? Get(matcher, "unicode").
- var_unicode = UncheckedCast<Int32T>(
- FastFlagGetter(var_matcher.value(), JSRegExp::kUnicode));
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ var_global = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kGlobal);
- // f. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- // g. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp));
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ var_unicode = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kUnicode);
Goto(&create_iterator);
}
+
BIND(&if_slow_regexp);
{
- // a. Let C be ? SpeciesConstructor(R, %RegExp%).
- TNode<Object> regexp_fun =
- LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- TNode<Object> species_constructor =
- SpeciesConstructor(native_context, maybe_regexp, regexp_fun);
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ TNode<JSFunction> regexp_fun = CAST(
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+ TNode<JSReceiver> species_constructor =
+ SpeciesConstructor(native_context, receiver, regexp_fun);
- // b. Let flags be ? ToString(? Get(R, "flags")).
- TNode<Object> flags = GetProperty(context, maybe_regexp,
- isolate()->factory()->flags_string());
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ TNode<Object> flags =
+ GetProperty(context, receiver, isolate()->factory()->flags_string());
TNode<String> flags_string = ToString_Inline(context, flags);
- // c. Let matcher be ? Construct(C, « R, flags »).
+ // 6. Let matcher be ? Construct(C, « R, flags »).
var_matcher =
- CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
- species_constructor, maybe_regexp, flags_string));
-
- // d. Let global be ? ToBoolean(? Get(matcher, "global")).
- var_global = UncheckedCast<Int32T>(
- SlowFlagGetter(context, var_matcher.value(), JSRegExp::kGlobal));
-
- // e. Let fullUnicode be ? ToBoolean(? Get(matcher, "unicode").
- var_unicode = UncheckedCast<Int32T>(
- SlowFlagGetter(context, var_matcher.value(), JSRegExp::kUnicode));
+ Construct(context, species_constructor, receiver, flags_string);
- // f. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Number> last_index = UncheckedCast<Number>(
- ToLength_Inline(context, SlowLoadLastIndex(context, maybe_regexp)));
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ TNode<Number> last_index =
+ ToLength_Inline(context, SlowLoadLastIndex(context, receiver));
- // g. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
SlowStoreLastIndex(context, var_matcher.value(), last_index);
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ TNode<String> global_char_string = StringConstant("g");
+ TNode<Smi> global_ix =
+ CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
+ global_char_string, SmiZero()));
+ var_global =
+ SelectInt32Constant(SmiEqual(global_ix, SmiConstant(-1)), 0, 1);
+
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ TNode<String> unicode_char_string = StringConstant("u");
+ TNode<Smi> unicode_ix =
+ CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string,
+ unicode_char_string, SmiZero()));
+ var_unicode =
+ SelectInt32Constant(SmiEqual(unicode_ix, SmiConstant(-1)), 0, 1);
Goto(&create_iterator);
}
- // 3. Else,
- BIND(&if_not_regexp);
- {
- // a. Let flags be "g".
- // b. Let matcher be ? RegExpCreate(R, flags).
- var_matcher = RegExpCreate(context, native_context, maybe_regexp,
- StringConstant("g"));
-
- // c. Let global be true.
- var_global = Int32Constant(1);
-
- // d. Let fullUnicode be false.
- var_unicode = Int32Constant(0);
-
-#ifdef DEBUG
- // Assert: ! Get(matcher, "lastIndex") is 0.
- TNode<Object> last_index = SlowLoadLastIndex(context, var_matcher.value());
- CSA_ASSERT(this, WordEqual(SmiZero(), last_index));
-#endif // DEBUG
- Goto(&create_iterator);
- }
- // 4. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
BIND(&create_iterator);
{
- TNode<Map> map = CAST(LoadContextElement(
- native_context,
- Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
-
- // 4. Let iterator be ObjectCreate(%RegExpStringIteratorPrototype%, «
- // [[IteratingRegExp]], [[IteratedString]], [[Global]], [[Unicode]],
- // [[Done]] »).
- TNode<Object> iterator = CAST(Allocate(JSRegExpStringIterator::kSize));
- StoreMapNoWriteBarrier(iterator, map);
- StoreObjectFieldRoot(iterator,
- JSRegExpStringIterator::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset,
- RootIndex::kEmptyFixedArray);
-
- // 5. Set iterator.[[IteratingRegExp]] to R.
- StoreObjectFieldNoWriteBarrier(
- iterator, JSRegExpStringIterator::kIteratingRegExpOffset,
- var_matcher.value());
-
- // 6. Set iterator.[[IteratedString]] to S.
- StoreObjectFieldNoWriteBarrier(
- iterator, JSRegExpStringIterator::kIteratedStringOffset, string);
+ // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
+ TNode<Object> iterator =
+ CreateRegExpStringIterator(native_context, var_matcher.value(), string,
+ var_global.value(), var_unicode.value());
+ Return(iterator);
+ }
+}
+
+// ES#sec-createregexpstringiterator
+// CreateRegExpStringIterator ( R, S, global, fullUnicode )
+TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
+ TNode<Context> native_context, TNode<Object> regexp, TNode<String> string,
+ TNode<Int32T> global, TNode<Int32T> full_unicode) {
+ TNode<Map> map = CAST(LoadContextElement(
+ native_context,
+ Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
+
+ // 4. Let iterator be ObjectCreate(%RegExpStringIteratorPrototype%, «
+ // [[IteratingRegExp]], [[IteratedString]], [[Global]], [[Unicode]],
+ // [[Done]] »).
+ TNode<Object> iterator = Allocate(JSRegExpStringIterator::kSize);
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator,
+ JSRegExpStringIterator::kPropertiesOrHashOffset,
+ RootIndex::kEmptyFixedArray);
+ StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset,
+ RootIndex::kEmptyFixedArray);
+
+ // 5. Set iterator.[[IteratingRegExp]] to R.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratingRegExpOffset, regexp);
+
+ // 6. Set iterator.[[IteratedString]] to S.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratedStringOffset, string);
#ifdef DEBUG
- // Verify global and unicode can be bitwise shifted without masking.
- TNode<Int32T> zero = Int32Constant(0);
- TNode<Int32T> one = Int32Constant(1);
- CSA_ASSERT(this, Word32Or(Word32Equal(var_global.value(), zero),
- Word32Equal(var_global.value(), one)));
- CSA_ASSERT(this, Word32Or(Word32Equal(var_unicode.value(), zero),
- Word32Equal(var_unicode.value(), one)));
+ // Verify global and full_unicode can be bitwise shifted without masking.
+ TNode<Int32T> zero = Int32Constant(0);
+ TNode<Int32T> one = Int32Constant(1);
+ CSA_ASSERT(this,
+ Word32Or(Word32Equal(global, zero), Word32Equal(global, one)));
+ CSA_ASSERT(this, Word32Or(Word32Equal(full_unicode, zero),
+ Word32Equal(full_unicode, one)));
#endif // DEBUG
- // 7. Set iterator.[[Global]] to global.
- // 8. Set iterator.[[Unicode]] to fullUnicode.
- // 9. Set iterator.[[Done]] to false.
- TNode<Word32T> global_flag = Word32Shl(
- var_global.value(), Int32Constant(JSRegExpStringIterator::kGlobalBit));
- TNode<Word32T> unicode_flag =
- Word32Shl(var_unicode.value(),
- Int32Constant(JSRegExpStringIterator::kUnicodeBit));
- TNode<Word32T> iterator_flags = Word32Or(global_flag, unicode_flag);
- StoreObjectFieldNoWriteBarrier(iterator,
- JSRegExpStringIterator::kFlagsOffset,
- SmiFromInt32(Signed(iterator_flags)));
+ // 7. Set iterator.[[Global]] to global.
+ // 8. Set iterator.[[Unicode]] to fullUnicode.
+ // 9. Set iterator.[[Done]] to false.
+ TNode<Word32T> global_flag =
+ Word32Shl(global, Int32Constant(JSRegExpStringIterator::kGlobalBit));
+ TNode<Word32T> unicode_flag = Word32Shl(
+ full_unicode, Int32Constant(JSRegExpStringIterator::kUnicodeBit));
+ TNode<Word32T> iterator_flags = Word32Or(global_flag, unicode_flag);
+ StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset,
+ SmiFromInt32(Signed(iterator_flags)));
- return iterator;
- }
+ return iterator;
}
// https://tc39.github.io/proposal-string-matchall/
// RegExp.prototype [ @@matchAll ] ( string )
-TF_BUILTIN(RegExpPrototypeMatchAll, RegExpBuiltinsAssembler) {
+TF_BUILTIN(RegExpPrototypeMatchAll, RegExpMatchAllAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> string = CAST(Parameter(Descriptor::kString));
-
- // 1. Let R be the this value.
- // 2. If Type(R) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@matchAll");
-
- // 3. Return ? MatchAllIterator(R, string).
- Return(MatchAllIterator(
- context, native_context, receiver, ToString_Inline(context, string),
- IsFastRegExp(context, receiver), "RegExp.prototype.@@matchAll"));
+ TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
+ Generate(context, native_context, receiver, maybe_string);
}
// Helper that skips a few initial checks. and assumes...
@@ -2330,7 +2336,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
TNode<String> string,
TNode<Smi> const limit) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
+ CSA_ASSERT(this,
+ Word32BinaryNot(FastFlagGetter(CAST(regexp), JSRegExp::kSticky)));
TNode<IntPtrT> const int_limit = SmiUntag(limit);
@@ -2339,7 +2346,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const allocation_site = nullptr;
Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
Label return_empty_array(this, Label::kDeferred);
@@ -2373,12 +2380,12 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&return_singleton_array);
{
- Node* const length = SmiConstant(1);
- Node* const capacity = IntPtrConstant(1);
- Node* const result = AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, mode);
+ TNode<Smi> length = SmiConstant(1);
+ TNode<IntPtrT> capacity = IntPtrConstant(1);
+ TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity,
+ length, allocation_site, mode);
- TNode<FixedArray> const fixed_array = CAST(LoadElements(result));
+ TNode<FixedArray> fixed_array = CAST(LoadElements(result));
StoreFixedArrayElement(fixed_array, 0, string);
Return(result);
@@ -2452,7 +2459,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
GotoIfNot(SmiEqual(match_to, next_search_from), &next);
GotoIfNot(SmiEqual(match_to, last_matched_until), &next);
- Node* const is_unicode = FastFlagGetter(regexp, JSRegExp::kUnicode);
+ Node* const is_unicode = FastFlagGetter(CAST(regexp), JSRegExp::kUnicode);
Node* const new_next_search_from =
AdvanceStringIndex(string, next_search_from, is_unicode, true);
var_next_search_from = CAST(new_next_search_from);
@@ -2490,10 +2497,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const reg = var_reg.value();
Node* const from = LoadFixedArrayElement(
match_indices, reg,
- RegExpMatchInfo::kFirstCaptureIndex * kPointerSize, mode);
+ RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode);
TNode<Smi> const to = CAST(LoadFixedArrayElement(
match_indices, reg,
- (RegExpMatchInfo::kFirstCaptureIndex + 1) * kPointerSize, mode));
+ (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode));
Label select_capture(this), select_undefined(this), store_value(this);
VARIABLE(var_value, MachineRepresentation::kTagged);
@@ -2550,10 +2557,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&return_empty_array);
{
- Node* const length = SmiZero();
- Node* const capacity = IntPtrZero();
- Node* const result = AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, mode);
+ TNode<Smi> length = SmiZero();
+ TNode<IntPtrT> capacity = IntPtrZero();
+ TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
Return(result);
}
}
@@ -2675,7 +2682,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Node* result_array;
{
ElementsKind kind = PACKED_ELEMENTS;
- Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
TNode<IntPtrT> capacity = IntPtrConstant(16);
TNode<Smi> length = SmiZero();
Node* const allocation_site = nullptr;
@@ -2875,10 +2882,10 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
if_nofurthermatches(this);
// Is {regexp} global?
- Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
+ Node* const is_global = FastFlagGetter(CAST(regexp), JSRegExp::kGlobal);
GotoIfNot(is_global, &loop);
- var_is_unicode.Bind(FastFlagGetter(regexp, JSRegExp::kUnicode));
+ var_is_unicode.Bind(FastFlagGetter(CAST(regexp), JSRegExp::kUnicode));
FastStoreLastIndex(regexp, SmiZero());
Goto(&loop);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index fd0e4b6755..1c2898374e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -5,7 +5,9 @@
#ifndef V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
#define V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
+#include "src/base/optional.h"
#include "src/code-stub-assembler.h"
+#include "src/message-template.h"
namespace v8 {
namespace internal {
@@ -15,9 +17,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
explicit RegExpBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void BranchIfFastRegExp(Node* const context, Node* const object,
- Node* const map, Label* const if_isunmodified,
- Label* const if_ismodified);
+ void BranchIfFastRegExp(
+ Node* const context, Node* const object, Node* const map,
+ base::Optional<DescriptorIndexAndName> additional_property_to_check,
+ Label* const if_isunmodified, Label* const if_ismodified);
// Create and initialize a RegExp object.
TNode<Object> RegExpCreate(TNode<Context> context,
@@ -27,12 +30,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map,
TNode<Object> regexp_string, TNode<String> flags);
- TNode<Object> MatchAllIterator(TNode<Context> context,
- TNode<Context> native_context,
- TNode<Object> regexp, TNode<String> string,
- TNode<BoolT> is_fast_regexp,
- char const* method_name);
-
protected:
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
@@ -82,7 +79,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
const bool is_fastpath);
Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
- MessageTemplate::Template msg_template,
+ MessageTemplate msg_template,
char const* method_name);
// Analogous to BranchIfFastRegExp, for use in asserts.
@@ -106,15 +103,16 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
- Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
- Node* SlowFlagGetter(Node* const context, Node* const regexp,
- JSRegExp::Flag flag);
- Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
- bool is_fastpath);
+ TNode<Int32T> FastFlagGetter(TNode<JSRegExp> regexp, JSRegExp::Flag flag);
+ TNode<Int32T> SlowFlagGetter(TNode<Context> context, TNode<Object> regexp,
+ JSRegExp::Flag flag);
+ TNode<Int32T> FlagGetter(TNode<Context> context, TNode<Object> regexp,
+ JSRegExp::Flag flag, bool is_fastpath);
+
void FlagGetter(Node* context, Node* receiver, JSRegExp::Flag flag,
int counter, const char* method_name);
- Node* IsRegExp(Node* const context, Node* const maybe_receiver);
+ TNode<BoolT> IsRegExp(TNode<Context> context, TNode<Object> maybe_receiver);
Node* RegExpInitialize(Node* const context, Node* const regexp,
Node* const maybe_pattern, Node* const maybe_flags);
@@ -144,6 +142,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<String> replace_string);
};
+class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler {
+ public:
+ explicit RegExpMatchAllAssembler(compiler::CodeAssemblerState* state)
+ : RegExpBuiltinsAssembler(state) {}
+
+ TNode<Object> CreateRegExpStringIterator(TNode<Context> native_context,
+ TNode<Object> regexp,
+ TNode<String> string,
+ TNode<Int32T> global,
+ TNode<Int32T> full_unicode);
+ void Generate(TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> receiver, TNode<Object> maybe_string);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 4befb13d7c..d92e988aef 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -104,7 +104,7 @@ Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
// The |number_index| output parameter is used only for architectures that
// don't currently have a TF implementation and forward to runtime functions
// instead; they expect the value has already been coerced to an integer.
- *number_index = ToSmiIndex(CAST(tagged), CAST(context), &range_error);
+ *number_index = ToSmiIndex(CAST(context), CAST(tagged), &range_error);
var_result.Bind(SmiToInt32(*number_index));
Goto(&done);
@@ -134,7 +134,7 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
Node* array, Node* index_word, Node* context) {
// In Debug mode, we re-validate the index as a sanity check because
// ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
- // neutered and the TypedArray length can't change either, so skipping this
+ // detached and the TypedArray length can't change either, so skipping this
// check in Release mode is safe.
CSA_ASSERT(this,
Uint32LessThan(index_word,
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 859d634cc9..0918f1e27b 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -74,7 +74,7 @@ V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
size_t access_index;
if (!TryNumberToSize(*access_index_obj, &access_index) ||
- typed_array->WasNeutered() ||
+ typed_array->WasDetached() ||
access_index >= typed_array->length_value()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
@@ -183,8 +183,8 @@ BUILTIN(AtomicsWait) {
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
size_t addr = (i << 2) + sta->byte_offset();
- return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32,
- timeout_number);
+ return FutexEmulation::WaitJs(isolate, array_buffer, addr, value_int32,
+ timeout_number);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 574f425a0a..085ffcfafa 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/heap/factory-inl.h"
#include "src/objects.h"
+#include "src/objects/property-cell.h"
namespace v8 {
namespace internal {
@@ -1057,36 +1058,13 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Node* const maybe_string,
- Handle<Symbol> symbol, const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call) {
+ Handle<Symbol> symbol, DescriptorIndexAndName symbol_index,
+ const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
Label out(this);
// Smis definitely don't have an attached symbol.
GotoIf(TaggedIsSmi(object), &out);
- Node* const object_map = LoadMap(object);
-
- // Skip the slow lookup for Strings.
- {
- Label next(this);
-
- GotoIfNot(IsStringInstanceType(LoadMapInstanceType(object_map)), &next);
-
- Node* const native_context = LoadNativeContext(context);
- Node* const initial_proto_initial_map = LoadContextElement(
- native_context, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX);
-
- Node* const string_fun =
- LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX);
- Node* const initial_map =
- LoadObjectField(string_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const proto_map = LoadMap(LoadMapPrototype(initial_map));
-
- Branch(WordEqual(proto_map, initial_proto_initial_map), &out, &next);
-
- BIND(&next);
- }
-
// Take the fast path for RegExps.
// There's two conditions: {object} needs to be a fast regexp, and
// {maybe_string} must be a string (we can't call ToString on the fast path
@@ -1098,8 +1076,8 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
GotoIfNot(IsString(maybe_string), &slow_lookup);
RegExpBuiltinsAssembler regexp_asm(state());
- regexp_asm.BranchIfFastRegExp(context, object, object_map, &stub_call,
- &slow_lookup);
+ regexp_asm.BranchIfFastRegExp(context, object, LoadMap(object),
+ symbol_index, &stub_call, &slow_lookup);
BIND(&stub_call);
// TODO(jgruber): Add a no-JS scope once it exists.
@@ -1257,7 +1235,6 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, Word32BinaryNot(IsEmptyString(string)));
CSA_ASSERT(this, TaggedIsPositiveSmi(count));
- CSA_ASSERT(this, SmiLessThanOrEqual(count, SmiConstant(String::kMaxLength)));
// The string is repeated with the following algorithm:
// let n = count;
@@ -1314,6 +1291,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, search, receiver, isolate()->factory()->replace_symbol(),
+ DescriptorIndexAndName{JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
+ RootIndex::kreplace_symbol},
[=]() {
Return(CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
replace));
@@ -1464,18 +1443,25 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
Builtins::Name builtin;
Handle<Symbol> symbol;
+ DescriptorIndexAndName property_to_check;
if (variant == kMatch) {
builtin = Builtins::kRegExpMatchFast;
symbol = isolate()->factory()->match_symbol();
+ property_to_check =
+ DescriptorIndexAndName{JSRegExp::kSymbolMatchFunctionDescriptorIndex,
+ RootIndex::kmatch_symbol};
} else {
builtin = Builtins::kRegExpSearchFast;
symbol = isolate()->factory()->search_symbol();
+ property_to_check =
+ DescriptorIndexAndName{JSRegExp::kSymbolSearchFunctionDescriptorIndex,
+ RootIndex::ksearch_symbol};
}
RequireObjectCoercible(context, receiver, method_name);
MaybeCallFunctionAtSymbol(
- context, maybe_regexp, receiver, symbol,
+ context, maybe_regexp, receiver, symbol, property_to_check,
[=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
@@ -1496,8 +1482,8 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
context, initial_map, maybe_regexp, EmptyStringConstant());
Label fast_path(this), slow_path(this);
- regexp_asm.BranchIfFastRegExp(context, regexp, initial_map, &fast_path,
- &slow_path);
+ regexp_asm.BranchIfFastRegExp(context, regexp, initial_map,
+ property_to_check, &fast_path, &slow_path);
BIND(&fast_path);
Return(CallBuiltin(builtin, context, regexp, receiver_string));
@@ -1535,48 +1521,41 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
RequireObjectCoercible(context, receiver, method_name);
// 2. If regexp is neither undefined nor null, then
- Label return_match_all_iterator(this),
- tostring_and_return_match_all_iterator(this, Label::kDeferred);
- TVARIABLE(BoolT, var_is_fast_regexp);
- TVARIABLE(String, var_receiver_string);
- GotoIf(IsNullOrUndefined(maybe_regexp),
- &tostring_and_return_match_all_iterator);
- {
- // a. Let matcher be ? GetMethod(regexp, @@matchAll).
- // b. If matcher is not undefined, then
- // i. Return ? Call(matcher, regexp, « O »).
- auto if_regexp_call = [&] {
- // MaybeCallFunctionAtSymbol guarantees fast path is chosen only if
- // maybe_regexp is a fast regexp and receiver is a string.
- var_receiver_string = CAST(receiver);
- CSA_ASSERT(this, IsString(var_receiver_string.value()));
- var_is_fast_regexp = Int32TrueConstant();
- Goto(&return_match_all_iterator);
- };
- auto if_generic_call = [=](Node* fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
- };
- MaybeCallFunctionAtSymbol(context, maybe_regexp, receiver,
- isolate()->factory()->match_all_symbol(),
- if_regexp_call, if_generic_call);
- Goto(&tostring_and_return_match_all_iterator);
- }
- BIND(&tostring_and_return_match_all_iterator);
- {
- var_receiver_string = ToString_Inline(context, receiver);
- var_is_fast_regexp = Int32FalseConstant();
- Goto(&return_match_all_iterator);
- }
- BIND(&return_match_all_iterator);
- {
- // 3. Return ? MatchAllIterator(regexp, O).
- RegExpBuiltinsAssembler regexp_asm(state());
- TNode<Object> iterator = regexp_asm.MatchAllIterator(
- context, native_context, maybe_regexp, var_receiver_string.value(),
- var_is_fast_regexp.value(), method_name);
- Return(iterator);
- }
+ // a. Let matcher be ? GetMethod(regexp, @@matchAll).
+ // b. If matcher is not undefined, then
+ // i. Return ? Call(matcher, regexp, « O »).
+ auto if_regexp_call = [&] {
+ // MaybeCallFunctionAtSymbol guarantees fast path is chosen only if
+ // maybe_regexp is a fast regexp and receiver is a string.
+ TNode<String> s = CAST(receiver);
+
+ RegExpMatchAllAssembler regexp_asm(state());
+ regexp_asm.Generate(context, native_context, maybe_regexp, s);
+ };
+ auto if_generic_call = [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ };
+ MaybeCallFunctionAtSymbol(
+ context, maybe_regexp, receiver, isolate()->factory()->match_all_symbol(),
+ DescriptorIndexAndName{JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
+ RootIndex::kmatch_all_symbol},
+ if_regexp_call, if_generic_call);
+
+ RegExpMatchAllAssembler regexp_asm(state());
+
+ // 3. Let S be ? ToString(O).
+ TNode<String> s = ToString_Inline(context, receiver);
+
+ // 4. Let rx be ? RegExpCreate(R, "g").
+ TNode<Object> rx = regexp_asm.RegExpCreate(context, native_context,
+ maybe_regexp, StringConstant("g"));
+
+ // 5. Return ? Invoke(rx, @@matchAll, « S »).
+ Callable callable = CodeFactory::Call(isolate());
+ TNode<Object> match_all_func =
+ GetProperty(context, rx, isolate()->factory()->match_all_symbol());
+ Return(CallJS(callable, context, match_all_func, rx, s));
}
class StringPadAssembler : public StringBuiltinsAssembler {
@@ -1824,8 +1803,8 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
- result_array = CAST(
- AllocateUninitializedJSArrayWithoutElements(array_map, length_smi));
+ result_array =
+ AllocateUninitializedJSArrayWithoutElements(array_map, length_smi);
StoreObjectField(result_array.value(), JSObject::kElementsOffset, elements);
Goto(&done);
@@ -1870,6 +1849,8 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, separator, receiver, isolate()->factory()->split_symbol(),
+ DescriptorIndexAndName{JSRegExp::kSymbolSplitFunctionDescriptorIndex,
+ RootIndex::ksplit_symbol},
[&]() {
args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context,
separator, receiver, limit));
@@ -1902,13 +1883,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
- Node* const length = SmiConstant(1);
- Node* const capacity = IntPtrConstant(1);
- Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+ TNode<Smi> length = SmiConstant(1);
+ TNode<IntPtrT> capacity = IntPtrConstant(1);
+ TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length);
- TNode<FixedArray> const fixed_array = CAST(LoadElements(result));
+ TNode<FixedArray> fixed_array = CAST(LoadElements(result));
StoreFixedArrayElement(fixed_array, 0, subject_string);
args.PopAndReturn(result);
@@ -1940,11 +1921,11 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
{
const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
- Node* const length = smi_zero;
- Node* const capacity = IntPtrConstant(0);
- Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+ TNode<Smi> length = smi_zero;
+ TNode<IntPtrT> capacity = IntPtrConstant(0);
+ TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length);
args.PopAndReturn(result);
}
@@ -2493,50 +2474,40 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
}
}
-TNode<BoolT> StringBuiltinsAssembler::IsStringPrimitiveWithNoCustomIteration(
- TNode<Object> object, TNode<Context> context) {
- Label if_false(this, Label::kDeferred), exit(this);
- TVARIABLE(BoolT, var_result);
-
- GotoIf(TaggedIsSmi(object), &if_false);
- GotoIfNot(IsString(CAST(object)), &if_false);
+void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context, Label* if_true,
+ Label* if_false) {
+ GotoIf(TaggedIsSmi(object), if_false);
+ GotoIfNot(IsString(CAST(object)), if_false);
// Check that the String iterator hasn't been modified in a way that would
// affect iteration.
Node* protector_cell = LoadRoot(RootIndex::kStringIteratorProtector);
DCHECK(isolate()->heap()->string_iterator_protector()->IsPropertyCell());
- var_result =
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid));
- Goto(&exit);
-
- BIND(&if_false);
- {
- var_result = Int32FalseConstant();
- Goto(&exit);
- }
-
- BIND(&exit);
- return var_result.value();
+ Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ if_true, if_false);
}
+// This function assumes StringPrimitiveWithNoCustomIteration is true.
TNode<JSArray> StringBuiltinsAssembler::StringToList(TNode<Context> context,
TNode<String> string) {
- CSA_ASSERT(this, IsStringPrimitiveWithNoCustomIteration(string, context));
const ElementsKind kind = PACKED_ELEMENTS;
const TNode<IntPtrT> length = LoadStringLengthAsWord(string);
- Node* const array_map =
+ TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- Node* const array = AllocateJSArray(kind, array_map, length, SmiTag(length));
- Node* const elements = LoadElements(array);
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, length, SmiTag(length), nullptr,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
+ TNode<FixedArrayBase> elements = LoadElements(array);
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_to_element_offset =
ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
- VARIABLE(
- var_offset, MachineType::PointerRepresentation(),
- IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset)));
+ TNode<IntPtrT> first_offset =
+ IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset));
+ TVARIABLE(IntPtrT, var_offset, first_offset);
TVARIABLE(IntPtrT, var_position, IntPtrConstant(0));
Label done(this), next_codepoint(this, {&var_position, &var_offset});
@@ -2557,12 +2528,18 @@ TNode<JSArray> StringBuiltinsAssembler::StringToList(TNode<Context> context,
TNode<IntPtrT> ch_length = LoadStringLengthAsWord(value);
var_position = IntPtrAdd(var_position.value(), ch_length);
// Increment the array offset and continue the loop.
- var_offset.Bind(
- IntPtrAdd(var_offset.value(), IntPtrConstant(kPointerSize)));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize));
Goto(&next_codepoint);
}
BIND(&done);
+ TNode<IntPtrT> new_length = IntPtrDiv(
+ IntPtrSub(var_offset.value(), first_offset), IntPtrConstant(kTaggedSize));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_length, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, new_length));
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset,
+ SmiTag(new_length));
+
return UncheckedCast<JSArray>(array);
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 2420ad3014..863a008549 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -23,8 +23,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* rhs, Node* rhs_instance_type,
TNode<IntPtrT> length, Label* if_equal,
Label* if_not_equal, Label* if_indirect);
- TNode<BoolT> IsStringPrimitiveWithNoCustomIteration(TNode<Object> object,
- TNode<Context> context);
+ void BranchIfStringPrimitiveWithNoCustomIteration(TNode<Object> object,
+ TNode<Context> context,
+ Label* if_true,
+ Label* if_false);
protected:
TNode<JSArray> StringToList(TNode<Context> context, TNode<String> string);
@@ -110,6 +112,7 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Node* const maybe_string,
Handle<Symbol> symbol,
+ DescriptorIndexAndName symbol_index,
const NodeFunction0& regexp_call,
const NodeFunction1& generic_call);
};
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 7aba998aa4..d656c8769c 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -109,9 +109,11 @@ BUILTIN(StringFromCodePoint) {
isolate->factory()->NewRawTwoByteString(
static_cast<int>(one_byte_buffer.size() + two_byte_buffer.size())));
- CopyChars(result->GetChars(), one_byte_buffer.data(), one_byte_buffer.size());
- CopyChars(result->GetChars() + one_byte_buffer.size(), two_byte_buffer.data(),
- two_byte_buffer.size());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), one_byte_buffer.data(),
+ one_byte_buffer.size());
+ CopyChars(result->GetChars(no_gc) + one_byte_buffer.size(),
+ two_byte_buffer.data(), two_byte_buffer.size());
return *result;
}
@@ -157,8 +159,8 @@ BUILTIN(StringPrototypeEndsWith) {
search_string = String::Flatten(isolate, search_string);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
- String::FlatContent str_content = str->GetFlatContent();
- String::FlatContent search_content = search_string->GetFlatContent();
+ String::FlatContent str_content = str->GetFlatContent(no_gc);
+ String::FlatContent search_content = search_string->GetFlatContent(no_gc);
if (str_content.IsOneByte() && search_content.IsOneByte()) {
Vector<const uint8_t> str_vector = str_content.ToOneByteVector();
@@ -239,8 +241,8 @@ BUILTIN(StringPrototypeLocaleCompare) {
str2 = String::Flatten(isolate, str2);
DisallowHeapAllocation no_gc;
- String::FlatContent flat1 = str1->GetFlatContent();
- String::FlatContent flat2 = str2->GetFlatContent();
+ String::FlatContent flat1 = str1->GetFlatContent(no_gc);
+ String::FlatContent flat2 = str2->GetFlatContent(no_gc);
for (int i = 0; i < end; i++) {
if (flat1.Get(i) != flat2.Get(i)) {
@@ -348,8 +350,8 @@ inline bool ToUpperOverflows(uc32 character) {
}
template <class Converter>
-V8_WARN_UNUSED_RESULT static Object* ConvertCaseHelper(
- Isolate* isolate, String* string, SeqString* result, int result_length,
+V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
+ Isolate* isolate, String string, SeqString result, int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
DisallowHeapAllocation no_gc;
// We try this twice, once with the assumption that the result is no longer
@@ -445,7 +447,7 @@ V8_WARN_UNUSED_RESULT static Object* ConvertCaseHelper(
}
template <class Converter>
-V8_WARN_UNUSED_RESULT static Object* ConvertCase(
+V8_WARN_UNUSED_RESULT static Object ConvertCase(
Handle<String> s, Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
s = String::Flatten(isolate, s);
@@ -459,16 +461,16 @@ V8_WARN_UNUSED_RESULT static Object* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsOneByteRepresentationUnderneath()) {
+ if (String::IsOneByteRepresentationUnderneath(*s)) {
// Same length as input.
Handle<SeqOneByteString> result =
isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
DisallowHeapAllocation no_gc;
- String::FlatContent flat_content = s->GetFlatContent();
+ String::FlatContent flat_content = s->GetFlatContent(no_gc);
DCHECK(flat_content.IsFlat());
bool has_changed_character = false;
int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
- reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<char*>(result->GetChars(no_gc)),
reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
length, &has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
@@ -483,7 +485,7 @@ V8_WARN_UNUSED_RESULT static Object* ConvertCase(
result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
}
- Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ Object answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
if (answer->IsException(isolate) || answer->IsString()) return answer;
DCHECK(answer->IsSmi());
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 97e0def67c..4e4a9d8db9 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -52,7 +52,7 @@ BUILTIN(SymbolKeyFor) {
}
Handle<Symbol> symbol = Handle<Symbol>::cast(obj);
DisallowHeapAllocation no_gc;
- Object* result;
+ Object result;
if (symbol->is_public()) {
result = symbol->name();
DCHECK(result->IsString());
diff --git a/deps/v8/src/builtins/builtins-test-gen.h b/deps/v8/src/builtins/builtins-test-gen.h
deleted file mode 100644
index 5412beb9bd..0000000000
--- a/deps/v8/src/builtins/builtins-test-gen.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BUILTINS_BUILTINS_TEST_GEN_H_
-#define V8_BUILTINS_BUILTINS_TEST_GEN_H_
-
-#include "torque-generated/builtins-base-from-dsl-gen.h"
-
-namespace v8 {
-namespace internal {
-
-class TestBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
- public:
- explicit TestBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : BaseBuiltinsFromDSLAssembler(state) {}
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BUILTINS_BUILTINS_TEST_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index c2b799412f..dc7e709cae 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -34,7 +34,9 @@ class MaybeUtf8 {
// Why copy? Well, the trace event mechanism requires null-terminated
// strings, the bytes we get from SeqOneByteString are not. buf_ is
// guaranteed to be null terminated.
- memcpy(buf_, Handle<SeqOneByteString>::cast(string)->GetChars(), len);
+ DisallowHeapAllocation no_gc;
+ memcpy(buf_, Handle<SeqOneByteString>::cast(string)->GetChars(no_gc),
+ len);
}
} else {
Local<v8::String> local = Utils::ToLocal(string);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 99979b0283..add9a3af2e 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -10,6 +10,7 @@
#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
#include "src/heap/factory-inl.h"
+#include "torque-generated/builtins-typed-array-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -73,8 +74,8 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset,
byte_length,
MachineType::PointerRepresentation());
- for (int offset = JSTypedArray::kSize;
- offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
+ for (int offset = JSTypedArray::kHeaderSize;
+ offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectField(holder, offset, SmiConstant(0));
}
}
@@ -185,14 +186,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
empty_fixed_array);
// Setup the ArrayBuffer.
// - Set BitField to 0.
- // - Set IsExternal and IsNeuterable bits of BitFieldSlot.
+ // - Set IsExternal and IsDetachable bits of BitFieldSlot.
// - Set the byte_length field to byte_length.
// - Set backing_store to null/Smi(0).
// - Set all embedder fields to Smi(0).
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldSlot,
- SmiConstant(0));
+ if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset));
+ StoreObjectFieldNoWriteBarrier(
+ buffer, JSArrayBuffer::kOptionalPaddingOffset, Int32Constant(0),
+ MachineRepresentation::kWord32);
+ }
int32_t bitfield_value = (1 << JSArrayBuffer::IsExternalBit::kShift) |
- (1 << JSArrayBuffer::IsNeuterableBit::kShift);
+ (1 << JSArrayBuffer::IsDetachableBit::kShift);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
Int32Constant(bitfield_value),
MachineRepresentation::kWord32);
@@ -202,8 +207,9 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
SmiConstant(0));
- for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
- int offset = JSArrayBuffer::kSize + i * kPointerSize;
+ for (int offset = JSArrayBuffer::kHeaderSize;
+ offset < JSArrayBuffer::kSizeWithEmbedderFields;
+ offset += kTaggedSize) {
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
@@ -276,8 +282,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&allocate_off_heap);
{
GotoIf(IsFalse(initialize), &allocate_off_heap_no_init);
- var_buffer = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
- default_constructor, byte_length));
+ var_buffer = CAST(Construct(context, default_constructor, byte_length));
Goto(&attach_buffer);
}
@@ -313,44 +318,6 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Return(UndefinedConstant());
}
-// ES6 #sec-typedarray-length
-void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
- TNode<JSTypedArray> holder,
- TNode<Object> length,
- TNode<Smi> element_size) {
- // TODO(7881): support larger-than-smi typed array lengths
- CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
-
- Label invalid_length(this, Label::kDeferred), done(this);
-
- TNode<Number> converted_length =
- ToInteger_Inline(context, length, CodeStubAssembler::kTruncateMinusZero);
-
- // The maximum length of a TypedArray is MaxSmi().
- // Note: this is not per spec, but rather a constraint of our current
- // representation (which uses Smis).
- // TODO(7881): support larger-than-smi typed array lengths
- GotoIf(TaggedIsNotSmi(converted_length), &invalid_length);
- // The goto above ensures that byte_length is a Smi.
- TNode<Smi> smi_converted_length = CAST(converted_length);
- GotoIf(SmiLessThan(smi_converted_length, SmiConstant(0)), &invalid_length);
-
- Node* initialize = TrueConstant();
- TNode<JSFunction> default_constructor = CAST(LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- CallBuiltin(Builtins::kTypedArrayInitialize, context, holder,
- converted_length, element_size, initialize, default_constructor);
- Goto(&done);
-
- BIND(&invalid_length);
- {
- ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
- converted_length);
- }
-
- BIND(&done);
-}
-
// ES6 #sec-typedarray-buffer-byteoffset-length
void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
TNode<Context> context, TNode<JSTypedArray> holder,
@@ -424,7 +391,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
BIND(&length_defined);
{
- TNode<Smi> new_length = ToSmiIndex(length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(context, length, &invalid_length);
ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
@@ -445,7 +412,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
TNode<Object> raw_length = CallBuiltin(
Builtins::kDivide, context, new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
- TNode<Smi> new_length = ToSmiIndex(raw_length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(context, raw_length, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
new_length, buffer, element_size, offset.value());
@@ -522,7 +489,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
BIND(&if_buffernotshared);
{
buffer_constructor =
- CAST(SpeciesConstructor(context, source_buffer, default_constructor));
+ SpeciesConstructor(context, source_buffer, default_constructor);
// TODO(petermarshall): Throw on detached typedArray.
GotoIfNot(IsDetachedBuffer(source_buffer), &construct);
source_length = SmiConstant(0);
@@ -531,8 +498,10 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
BIND(&construct);
{
- ConstructByArrayLike(context, holder, typed_array, source_length.value(),
- element_size, buffer_constructor.value());
+ TypedArrayBuiltinsFromDSLAssembler(this->state())
+ .ConstructByArrayLike(context, holder, typed_array,
+ source_length.value(), element_size,
+ buffer_constructor.value());
Goto(&done);
}
@@ -569,75 +538,6 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
return is_valid.value();
}
-void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
- TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<HeapObject> array_like, TNode<Object> initial_length,
- TNode<Smi> element_size, TNode<JSReceiver> buffer_constructor) {
- Label invalid_length(this, Label::kDeferred), fill(this), fast_copy(this),
- detached_check(this), done(this);
-
- // The caller has looked up length on array_like, which is observable.
- TNode<Smi> length = ToSmiLength(initial_length, context, &invalid_length);
-
- Node* initialize = FalseConstant();
- CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, initialize, buffer_constructor);
-
- GotoIf(IsJSTypedArray(array_like), &detached_check);
- Goto(&fill);
-
- BIND(&detached_check);
- ThrowIfArrayBufferViewBufferIsDetached(context, CAST(array_like),
- "Construct");
- Goto(&fill);
-
- BIND(&fill);
- GotoIf(SmiEqual(length, SmiConstant(0)), &done);
- TNode<Int32T> holder_kind = LoadElementsKind(holder);
- TNode<Int32T> source_kind = LoadElementsKind(array_like);
- GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy);
-
- // Copy using the elements accessor.
- CallRuntime(Runtime::kTypedArrayCopyElements, context, holder, array_like,
- length);
- Goto(&done);
-
- BIND(&fast_copy);
- {
- Node* holder_data_ptr = LoadDataPtr(holder);
- Node* source_data_ptr = LoadDataPtr(array_like);
-
- // Calculate the byte length. We shouldn't be trying to copy if the typed
- // array was neutered.
- CSA_ASSERT(this, SmiNotEqual(length, SmiConstant(0)));
- CSA_ASSERT(this, Word32Equal(IsDetachedBuffer(LoadObjectField(
- array_like, JSTypedArray::kBufferOffset)),
- Int32Constant(0)));
-
- TNode<Number> byte_length = SmiMul(length, element_size);
- CSA_ASSERT(this, ByteLengthIsValid(byte_length));
- TNode<UintPtrT> byte_length_intptr =
- ChangeNonnegativeNumberToUintPtr(byte_length);
- CSA_ASSERT(this, UintPtrLessThanOrEqual(
- byte_length_intptr,
- IntPtrConstant(FixedTypedArrayBase::kMaxByteLength)));
-
- Node* memcpy = ExternalConstant(ExternalReference::libc_memcpy_function());
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::Pointer(), MachineType::UintPtr(), memcpy,
- holder_data_ptr, source_data_ptr, byte_length_intptr);
- Goto(&done);
- }
-
- BIND(&invalid_length);
- {
- ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
- initial_length);
- }
-
- BIND(&done);
-}
-
void TypedArrayBuiltinsAssembler::ConstructByIterable(
TNode<Context> context, TNode<JSTypedArray> holder,
TNode<JSReceiver> iterable, TNode<JSReceiver> iterator_fn,
@@ -652,8 +552,9 @@ void TypedArrayBuiltinsAssembler::ConstructByIterable(
TNode<JSFunction> default_constructor = CAST(LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- ConstructByArrayLike(context, holder, array_like, initial_length,
- element_size, default_constructor);
+ TypedArrayBuiltinsFromDSLAssembler(this->state())
+ .ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size, default_constructor);
}
TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
@@ -741,8 +642,9 @@ TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
TNode<JSFunction> default_constructor = CAST(LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
- ConstructByArrayLike(context, result, array_like, initial_length,
- element_size, default_constructor);
+ TypedArrayBuiltinsFromDSLAssembler(this->state())
+ .ConstructByArrayLike(context, result, array_like, initial_length,
+ element_size, default_constructor);
Goto(&return_result);
}
@@ -754,7 +656,8 @@ TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
// a number. https://tc39.github.io/ecma262/#sec-typedarray-length
BIND(&if_arg1isnumber);
{
- ConstructByLength(context, result, arg1, element_size);
+ TypedArrayBuiltinsFromDSLAssembler(this->state())
+ .ConstructByLength(context, result, arg1, element_size);
Goto(&return_result);
}
@@ -801,7 +704,7 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was neutered.
+ // Default to zero if the {receiver}s buffer was detached.
TNode<JSArrayBuffer> receiver_buffer =
LoadJSArrayBufferViewBuffer(CAST(receiver));
TNode<UintPtrT> byte_length = Select<UintPtrT>(
@@ -819,7 +722,7 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was neutered.
+ // Default to zero if the {receiver}s buffer was detached.
TNode<JSArrayBuffer> receiver_buffer =
LoadJSArrayBufferViewBuffer(CAST(receiver));
TNode<UintPtrT> byte_offset = Select<UintPtrT>(
@@ -837,7 +740,7 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was neutered.
+ // Default to zero if the {receiver}s buffer was detached.
TNode<JSArrayBuffer> receiver_buffer =
LoadJSArrayBufferViewBuffer(CAST(receiver));
TNode<Smi> length = Select<Smi>(
@@ -871,7 +774,7 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
return element_size.value();
}
-TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+TNode<JSFunction> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
TNode<Context> context, TNode<JSTypedArray> exemplar) {
TVARIABLE(IntPtrT, context_slot);
TNode<Word32T> elements_kind = LoadElementsKind(exemplar);
@@ -882,61 +785,81 @@ TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
context_slot = IntPtrConstant(typed_array_function_index);
});
- return LoadContextElement(LoadNativeContext(context), context_slot.value());
+ return CAST(
+ LoadContextElement(LoadNativeContext(context), context_slot.value()));
}
-TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
- TNode<Context> context, TNode<JSTypedArray> exemplar) {
- TVARIABLE(Object, var_constructor);
- Label slow(this), done(this);
+template <class... TArgs>
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::TypedArraySpeciesCreate(
+ const char* method_name, TNode<Context> context,
+ TNode<JSTypedArray> exemplar, TArgs... args) {
+ TVARIABLE(JSTypedArray, var_new_typed_array);
+ Label slow(this, Label::kDeferred), done(this);
// Let defaultConstructor be the intrinsic object listed in column one of
// Table 52 for exemplar.[[TypedArrayName]].
- TNode<Object> default_constructor = GetDefaultConstructor(context, exemplar);
+ TNode<JSFunction> default_constructor =
+ GetDefaultConstructor(context, exemplar);
- var_constructor = default_constructor;
- Node* map = LoadMap(exemplar);
+ TNode<Map> map = LoadMap(exemplar);
GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
- Branch(IsTypedArraySpeciesProtectorCellInvalid(), &slow, &done);
-
+ GotoIf(IsTypedArraySpeciesProtectorCellInvalid(), &slow);
+ {
+ const size_t argc = sizeof...(args);
+ static_assert(argc >= 1 && argc <= 3,
+ "TypedArraySpeciesCreate called with unexpected arguments");
+ TNode<Object> arg_list[argc] = {args...};
+ TNode<Object> arg0 = argc < 1 ? UndefinedConstant() : arg_list[0];
+ TNode<Object> arg1 = argc < 2 ? UndefinedConstant() : arg_list[1];
+ TNode<Object> arg2 = argc < 3 ? UndefinedConstant() : arg_list[2];
+ var_new_typed_array = UncheckedCast<JSTypedArray>(
+ CallBuiltin(Builtins::kCreateTypedArray, context, default_constructor,
+ default_constructor, arg0, arg1, arg2));
+#ifdef DEBUG
+ // It is assumed that the CreateTypedArray builtin does not produce a
+ // typed array that fails ValidateTypedArray.
+ TNode<JSArrayBuffer> buffer =
+ LoadJSArrayBufferViewBuffer(var_new_typed_array.value());
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(buffer)));
+#endif // DEBUG
+ Goto(&done);
+ }
BIND(&slow);
- var_constructor = SpeciesConstructor(context, exemplar, default_constructor);
- Goto(&done);
-
- BIND(&done);
- return var_constructor.value();
-}
+ {
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<JSReceiver> constructor =
+ SpeciesConstructor(context, exemplar, default_constructor);
-TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByArrayBuffer(
- TNode<Context> context, TNode<JSTypedArray> exemplar,
- TNode<JSArrayBuffer> buffer, TNode<Number> byte_offset, TNode<Smi> len,
- const char* method_name) {
- // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<JSReceiver> new_object = Construct(context, constructor, args...);
- // Let newTypedArray be ? Construct(constructor, argumentList).
- TNode<Object> new_object =
- CAST(ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
- buffer, byte_offset, len));
+ // Perform ? ValidateTypedArray(newTypedArray).
+ var_new_typed_array = ValidateTypedArray(context, new_object, method_name);
+ Goto(&done);
+ }
- // Perform ? ValidateTypedArray(newTypedArray).
- return ValidateTypedArray(context, new_object, method_name);
+ BIND(&done);
+ return var_new_typed_array.value();
}
-TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByLength(
+TNode<JSTypedArray>
+TypedArrayBuiltinsAssembler::TypedArraySpeciesCreateByLength(
TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
const char* method_name) {
CSA_ASSERT(this, TaggedIsPositiveSmi(len));
- // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- TNode<HeapObject> constructor =
- CAST(TypedArraySpeciesConstructor(context, exemplar));
- return CreateByLength(context, constructor, len, method_name);
+ TNode<JSTypedArray> new_typed_array =
+ TypedArraySpeciesCreate(method_name, context, exemplar, len);
+
+ ThrowIfLengthLessThan(context, new_typed_array, len);
+ return new_typed_array;
}
-TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::TypedArrayCreateByLength(
TNode<Context> context, TNode<Object> constructor, TNode<Smi> len,
const char* method_name) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(len));
+
// Let newTypedArray be ? Construct(constructor, argumentList).
TNode<Object> new_object = CAST(ConstructJS(CodeFactory::Construct(isolate()),
context, constructor, len));
@@ -945,15 +868,20 @@ TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
TNode<JSTypedArray> new_typed_array =
ValidateTypedArray(context, new_object, method_name);
- // If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
- // exception.
+ ThrowIfLengthLessThan(context, new_typed_array, len);
+ return new_typed_array;
+}
+
+void TypedArrayBuiltinsAssembler::ThrowIfLengthLessThan(
+ TNode<Context> context, TNode<JSTypedArray> typed_array,
+ TNode<Smi> min_length) {
+ // If typed_array.[[ArrayLength]] < min_length, throw a TypeError exception.
Label if_length_is_not_short(this);
- TNode<Smi> new_length = LoadJSTypedArrayLength(new_typed_array);
- GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
+ TNode<Smi> new_length = LoadJSTypedArrayLength(typed_array);
+ GotoIfNot(SmiLessThan(new_length, min_length), &if_length_is_not_short);
ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
BIND(&if_length_is_not_short);
- return new_typed_array;
}
TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
@@ -1121,6 +1049,16 @@ void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<IntPtrT> dest_ptr,
dest_ptr, src_ptr, byte_length);
}
+void TypedArrayBuiltinsAssembler::CallCMemcpy(TNode<RawPtrT> dest_ptr,
+ TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length) {
+ TNode<ExternalReference> memcpy =
+ ExternalConstant(ExternalReference::libc_memcpy_function());
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memcpy,
+ dest_ptr, src_ptr, byte_length);
+}
+
void TypedArrayBuiltinsAssembler::
CallCCopyFastNumberJSArrayElementsToTypedArray(TNode<Context> context,
TNode<JSArray> source,
@@ -1231,7 +1169,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
GotoIfNot(TaggedIsPositiveSmi(offset_num), &if_offset_is_out_of_bounds);
TNode<Smi> offset_smi = CAST(offset_num);
- // Check the receiver is not neutered.
+ // Check the receiver is not detached.
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name);
// Check the source argument is valid and whether a fast path can be taken.
@@ -1245,7 +1183,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
// Fast path for a typed array source argument.
BIND(&if_source_is_typed_array);
{
- // Check the source argument is not neutered.
+ // Check the source argument is not detached.
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(source), method_name);
SetTypedArraySource(context, CAST(source), CAST(receiver),
@@ -1312,15 +1250,15 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
// Create a result array by invoking TypedArraySpeciesCreate.
TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
TNode<JSTypedArray> result_array =
- SpeciesCreateByLength(context, source, count, method_name);
+ TypedArraySpeciesCreateByLength(context, source, count, method_name);
// If count is zero, return early.
GotoIf(SmiGreaterThan(count, SmiConstant(0)), &if_count_is_not_zero);
args.PopAndReturn(result_array);
BIND(&if_count_is_not_zero);
- // Check the source array is neutered or not. We don't need to check if the
- // result array is neutered or not since TypedArraySpeciesCreate checked it.
+ // Check the source array is detached or not. We don't need to check if the
+ // result array is detached or not since TypedArraySpeciesCreate checked it.
CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
result_array, JSTypedArray::kBufferOffset))));
TNode<JSArrayBuffer> receiver_buffer =
@@ -1455,8 +1393,8 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
// 16. Let argumentsList be « buffer, beginByteOffset, newLength ».
// 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
- args.PopAndReturn(SpeciesCreateByArrayBuffer(
- context, source, buffer, begin_byte_offset, new_length, method_name));
+ args.PopAndReturn(TypedArraySpeciesCreate(
+ method_name, context, source, buffer, begin_byte_offset, new_length));
}
// ES #sec-get-%typedarray%.prototype-@@tostringtag
@@ -1510,7 +1448,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
GotoIfNot(IsJSTypedArray(CAST(receiver)), &throw_bad_receiver);
- // Check if the {receiver}'s JSArrayBuffer was neutered.
+ // Check if the {receiver}'s JSArrayBuffer was detached.
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name);
Return(CreateArrayIterator(context, receiver, kind));
@@ -1557,7 +1495,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
CodeStubArguments::ReceiverMode::kHasReceiver);
Label if_not_constructor(this, Label::kDeferred),
- if_neutered(this, Label::kDeferred);
+ if_detached(this, Label::kDeferred);
// 3. Let C be the this value.
// 4. If IsConstructor(C) is false, throw a TypeError exception.
@@ -1566,8 +1504,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
GotoIfNot(IsConstructor(CAST(receiver)), &if_not_constructor);
// 5. Let newObj be ? TypedArrayCreate(C, len).
- TNode<JSTypedArray> new_typed_array =
- CreateByLength(context, receiver, SmiTag(length), "%TypedArray%.of");
+ TNode<JSTypedArray> new_typed_array = TypedArrayCreateByLength(
+ context, receiver, SmiTag(length), "%TypedArray%.of");
TNode<Word32T> elements_kind = LoadElementsKind(new_typed_array);
@@ -1590,16 +1528,16 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
EmitBigTypedArrayElementStore(new_typed_array, elements,
intptr_index, item, context,
- &if_neutered);
+ &if_detached);
} else {
Node* value =
PrepareValueForWriteToTypedArray(item, kind, context);
- // ToNumber may execute JavaScript code, which could neuter
+ // ToNumber may execute JavaScript code, which could detach
// the array's buffer.
Node* buffer = LoadObjectField(new_typed_array,
JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in ToNumber, thus load backing
// store everytime in this loop.
@@ -1618,7 +1556,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
BIND(&if_not_constructor);
ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
- BIND(&if_neutered);
+ BIND(&if_detached);
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"%TypedArray%.of");
}
@@ -1628,11 +1566,11 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label check_iterator(this), from_array_like(this), fast_path(this),
- slow_path(this), create_typed_array(this),
+ slow_path(this), create_typed_array(this), check_typedarray(this),
if_not_constructor(this, Label::kDeferred),
if_map_fn_not_callable(this, Label::kDeferred),
if_iterator_fn_not_callable(this, Label::kDeferred),
- if_neutered(this, Label::kDeferred);
+ if_detached(this, Label::kDeferred);
CodeStubArguments args(
this,
@@ -1651,7 +1589,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// 3. If mapfn is present and mapfn is not undefined, then
TNode<Object> map_fn = args.GetOptionalArgumentValue(1);
TVARIABLE(BoolT, mapping, Int32FalseConstant());
- GotoIf(IsUndefined(map_fn), &check_iterator);
+ GotoIf(IsUndefined(map_fn), &check_typedarray);
// a. If IsCallable(mapfn) is false, throw a TypeError exception.
// b. Let mapping be true.
@@ -1659,7 +1597,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
GotoIf(TaggedIsSmi(map_fn), &if_map_fn_not_callable);
GotoIfNot(IsCallable(CAST(map_fn)), &if_map_fn_not_callable);
mapping = Int32TrueConstant();
- Goto(&check_iterator);
+ Goto(&check_typedarray);
TVARIABLE(Object, final_source);
TVARIABLE(Smi, final_length);
@@ -1673,13 +1611,66 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// (starting at 7.e and 13) because they are essentially identical. We also
// save on code-size this way.
+ // Get the iterator function
+ BIND(&check_typedarray);
+ TNode<Object> iterator_fn =
+ CAST(GetMethod(context, source, isolate()->factory()->iterator_symbol(),
+ &from_array_like));
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+
+ {
+ // TypedArrays have iterators, so normally we would go through the
+ // IterableToList case below, which would convert the TypedArray to a
+ // JSArray (boxing the values if they won't fit in a Smi).
+ //
+ // However, if we can guarantee that the source object has the built-in
+ // iterator and that the %ArrayIteratorPrototype%.next method has not been
+ // overridden, then we know the behavior of the iterator: returning the
+ // values in the TypedArray sequentially from index 0 to length-1.
+ //
+ // In this case, we can avoid creating the intermediate array and the
+ // associated HeapNumbers, and use the fast path in TypedArrayCopyElements
+ // which uses the same ordering as the default iterator.
+ //
+ // Drop through to the default check_iterator behavior if any of these
+ // checks fail.
+
+ // Check that the source is a TypedArray
+ GotoIf(TaggedIsSmi(source), &check_iterator);
+ GotoIfNot(IsJSTypedArray(CAST(source)), &check_iterator);
+ TNode<JSArrayBuffer> source_buffer =
+ LoadJSArrayBufferViewBuffer(CAST(source));
+ GotoIf(IsDetachedBuffer(source_buffer), &check_iterator);
+
+ // Check that the iterator function is Builtins::kTypedArrayPrototypeValues
+ GotoIfNot(IsJSFunction(CAST(iterator_fn)), &check_iterator);
+ TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
+ CAST(iterator_fn), JSFunction::kSharedFunctionInfoOffset);
+ GotoIfNot(
+ WordEqual(LoadObjectField(shared_info,
+ SharedFunctionInfo::kFunctionDataOffset),
+ SmiConstant(Builtins::kTypedArrayPrototypeValues)),
+ &check_iterator);
+ // Check that the ArrayIterator prototype's "next" method hasn't been
+ // overridden
+ TNode<PropertyCell> protector_cell =
+ CAST(LoadRoot(RootIndex::kArrayIteratorProtector));
+ GotoIfNot(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &check_iterator);
+
+ // Source is a TypedArray with unmodified iterator behavior. Use the
+ // source object directly, taking advantage of the special-case code in
+ // TypedArrayCopyElements
+ final_length = LoadJSTypedArrayLength(CAST(source));
+ final_source = source;
+ Goto(&create_typed_array);
+ }
+
BIND(&check_iterator);
{
// 6. Let usingIterator be ? GetMethod(source, @@iterator).
- TNode<Object> iterator_fn =
- CAST(GetMethod(context, source, isolate()->factory()->iterator_symbol(),
- &from_array_like));
- GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
GotoIfNot(IsCallable(CAST(iterator_fn)), &if_iterator_fn_not_callable);
// We are using the iterator.
@@ -1713,7 +1704,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// 10. Let len be ? ToLength(? Get(arrayLike, "length")).
TNode<Object> raw_length =
GetProperty(context, final_source.value(), LengthStringConstant());
- final_length = ToSmiLength(raw_length, context, &if_length_not_smi);
+ final_length = ToSmiLength(context, raw_length, &if_length_not_smi);
Goto(&create_typed_array);
BIND(&if_length_not_smi);
@@ -1726,8 +1717,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
BIND(&create_typed_array);
{
// 7c/11. Let targetObj be ? TypedArrayCreate(C, «len»).
- target_obj = CreateByLength(context, receiver, final_length.value(),
- "%TypedArray%.from");
+ target_obj = TypedArrayCreateByLength(
+ context, receiver, final_length.value(), "%TypedArray%.from");
Branch(mapping.value(), &slow_path, &fast_path);
}
@@ -1767,16 +1758,16 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
EmitBigTypedArrayElementStore(target_obj.value(), elements,
intptr_index, mapped_value,
- context, &if_neutered);
+ context, &if_detached);
} else {
Node* const final_value = PrepareValueForWriteToTypedArray(
mapped_value, kind, context);
- // ToNumber may execute JavaScript code, which could neuter
+ // ToNumber may execute JavaScript code, which could detach
// the array's buffer.
Node* buffer = LoadObjectField(target_obj.value(),
JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in map_fn, thus load backing
// store in each iteration of this loop.
@@ -1800,7 +1791,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
BIND(&if_iterator_fn_not_callable);
ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
- BIND(&if_neutered);
+ BIND(&if_detached);
ThrowTypeError(context, MessageTemplate::kDetachedOperation,
"%TypedArray%.from");
}
@@ -1886,7 +1877,7 @@ TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
// 10. Let A be ? TypedArraySpeciesCreate(O, captured).
TNode<JSTypedArray> result_array =
- SpeciesCreateByLength(context, source, captured, method_name);
+ TypedArraySpeciesCreateByLength(context, source, captured, method_name);
// 11. Let n be 0.
// 12. For each element e of kept, do
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 1e35ae69a9..a82b32c25c 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -5,29 +5,31 @@
#ifndef V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
#define V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
-#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
-class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
public:
explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : BaseBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
- TNode<JSTypedArray> SpeciesCreateByLength(TNode<Context> context,
- TNode<JSTypedArray> exemplar,
- TNode<Smi> len,
- const char* method_name);
+ template <class... TArgs>
+ TNode<JSTypedArray> TypedArraySpeciesCreate(const char* method_name,
+ TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TArgs... args);
+
+ TNode<JSTypedArray> TypedArraySpeciesCreateByLength(
+ TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
+ const char* method_name);
- protected:
void GenerateTypedArrayPrototypeIterationMethod(TNode<Context> context,
TNode<Object> receiver,
const char* method_name,
IterationKind iteration_kind);
- void ConstructByLength(TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<Object> length, TNode<Smi> element_size);
void ConstructByArrayBuffer(TNode<Context> context,
TNode<JSTypedArray> holder,
TNode<JSArrayBuffer> buffer,
@@ -36,11 +38,6 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
void ConstructByTypedArray(TNode<Context> context, TNode<JSTypedArray> holder,
TNode<JSTypedArray> typed_array,
TNode<Smi> element_size);
- void ConstructByArrayLike(TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<HeapObject> array_like,
- TNode<Object> initial_length,
- TNode<Smi> element_size,
- TNode<JSReceiver> buffer_constructor);
void ConstructByIterable(TNode<Context> context, TNode<JSTypedArray> holder,
TNode<JSReceiver> iterable,
TNode<JSReceiver> iterator_fn,
@@ -73,22 +70,17 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
JSTypedArray::kBufferOffset);
}
- TNode<Object> GetDefaultConstructor(TNode<Context> context,
- TNode<JSTypedArray> exemplar);
-
- TNode<Object> TypedArraySpeciesConstructor(TNode<Context> context,
- TNode<JSTypedArray> exemplar);
+ TNode<JSFunction> GetDefaultConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
- TNode<JSTypedArray> SpeciesCreateByArrayBuffer(TNode<Context> context,
- TNode<JSTypedArray> exemplar,
- TNode<JSArrayBuffer> buffer,
- TNode<Number> byte_offset,
- TNode<Smi> len,
- const char* method_name);
+ TNode<JSTypedArray> TypedArrayCreateByLength(TNode<Context> context,
+ TNode<Object> constructor,
+ TNode<Smi> len,
+ const char* method_name);
- TNode<JSTypedArray> CreateByLength(TNode<Context> context,
- TNode<Object> constructor, TNode<Smi> len,
- const char* method_name);
+ void ThrowIfLengthLessThan(TNode<Context> context,
+ TNode<JSTypedArray> typed_array,
+ TNode<Smi> min_length);
TNode<JSArrayBuffer> GetBuffer(TNode<Context> context,
TNode<JSTypedArray> array);
@@ -110,6 +102,9 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
TNode<IntPtrT> byte_length);
+ void CallCMemcpy(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
+ TNode<UintPtrT> byte_length);
+
void CallCCopyFastNumberJSArrayElementsToTypedArray(
TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index 34993faf01..8c913c301d 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -7,6 +7,7 @@
#include "src/counters.h"
#include "src/elements.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
@@ -86,7 +87,7 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
// TODO(caitp): throw here, as though the full algorithm were performed (the
// throw would have come from ecma262/#sec-integerindexedelementget)
// (see )
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
+ if (V8_UNLIKELY(array->WasDetached())) return *array;
// Ensure processed indexes are within array bounds
DCHECK_GE(from, 0);
@@ -149,7 +150,7 @@ BUILTIN(TypedArrayPrototypeFill) {
int64_t count = end - start;
if (count <= 0) return *array;
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
+ if (V8_UNLIKELY(array->WasDetached())) return *array;
// Ensure processed indexes are within array bounds
DCHECK_GE(start, 0);
@@ -185,7 +186,7 @@ BUILTIN(TypedArrayPrototypeIncludes) {
}
// TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasNeutered()))
+ if (V8_UNLIKELY(array->WasDetached()))
return ReadOnlyRoots(isolate).false_value();
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
@@ -217,7 +218,7 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
}
// TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasNeutered())) return Smi::FromInt(-1);
+ if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
@@ -252,7 +253,7 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
if (index < 0) return Smi::FromInt(-1);
// TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasNeutered())) return Smi::FromInt(-1);
+ if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index 9984330980..a9b040b040 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -26,27 +26,27 @@ class CodeAssemblerState;
//
// In the body of the builtin function the arguments can be accessed
// as "Parameter(n)".
-#define TF_BUILTIN(Name, AssemblerBase) \
- class Name##Assembler : public AssemblerBase { \
- public: \
- typedef Builtin_##Name##_InterfaceDescriptor Descriptor; \
- \
- explicit Name##Assembler(compiler::CodeAssemblerState* state) \
- : AssemblerBase(state) {} \
- void Generate##Name##Impl(); \
- \
- Node* Parameter(Descriptor::ParameterIndices index) { \
- return CodeAssembler::Parameter(static_cast<int>(index)); \
- } \
- }; \
- void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
- Name##Assembler assembler(state); \
- state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
- if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
- assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
- } \
- assembler.Generate##Name##Impl(); \
- } \
+#define TF_BUILTIN(Name, AssemblerBase) \
+ class Name##Assembler : public AssemblerBase { \
+ public: \
+ typedef Builtin_##Name##_InterfaceDescriptor Descriptor; \
+ \
+ explicit Name##Assembler(compiler::CodeAssemblerState* state) \
+ : AssemblerBase(state) {} \
+ void Generate##Name##Impl(); \
+ \
+ Node* Parameter(Descriptor::ParameterIndices index) { \
+ return CodeAssembler::Parameter(static_cast<int>(index)); \
+ } \
+ }; \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ Name##Assembler assembler(state); \
+ state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
+ if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
+ assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
+ } \
+ assembler.Generate##Name##Impl(); \
+ } \
void Name##Assembler::Generate##Name##Impl()
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 1ea2093702..283c521067 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -17,13 +17,13 @@ namespace internal {
// Arguments object passed to C++ builtins.
class BuiltinArguments : public Arguments {
public:
- BuiltinArguments(int length, Object** arguments)
+ BuiltinArguments(int length, Address* arguments)
: Arguments(length, arguments) {
// Check we have at least the receiver.
DCHECK_LE(1, this->length());
}
- Object*& operator[](int index) {
+ Object operator[](int index) {
DCHECK_LT(index, length());
return Arguments::operator[](index);
}
@@ -66,31 +66,31 @@ class BuiltinArguments : public Arguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-#define BUILTIN(name) \
- V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
- BuiltinArguments args, Isolate* isolate); \
- \
- V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, \
- RuntimeCallCounterId::kBuiltin_##name); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
- "V8.Builtin_" #name); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- \
- V8_WARN_UNUSED_RESULT Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
- if (V8_UNLIKELY(FLAG_runtime_stats)) { \
- return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
- } \
- BuiltinArguments args(args_length, args_object); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- \
- V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
+ \
+ V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ BuiltinArguments args(args_length, args_object); \
+ RuntimeCallTimerScope timer(isolate, \
+ RuntimeCallCounterId::kBuiltin_##name); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
+ "V8.Builtin_" #name); \
+ return Builtin_Impl_##name(args, isolate)->ptr(); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT Address Builtin_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context()->IsContext()); \
+ if (V8_UNLIKELY(FLAG_runtime_stats)) { \
+ return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
+ } \
+ BuiltinArguments args(args_length, args_object); \
+ return Builtin_Impl_##name(args, isolate)->ptr(); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 60be33de20..a79ff81101 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -23,13 +23,13 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
TNode<Code> LoadBuiltinFromFrame(Builtins::Name id) {
TNode<Object> instance = LoadInstanceFromFrame();
- TNode<IntPtrT> roots = UncheckedCast<IntPtrT>(
+ TNode<IntPtrT> isolate_root = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), instance,
- IntPtrConstant(WasmInstanceObject::kRootsArrayAddressOffset -
+ IntPtrConstant(WasmInstanceObject::kIsolateRootOffset -
kHeapObjectTag)));
- TNode<Code> target = UncheckedCast<Code>(Load(
- MachineType::TaggedPointer(), roots,
- IntPtrConstant(Heap::roots_to_builtins_offset() + id * kPointerSize)));
+ TNode<Code> target = UncheckedCast<Code>(
+ Load(MachineType::TaggedPointer(), isolate_root,
+ IntPtrConstant(IsolateData::builtin_slot_offset(id))));
return target;
}
@@ -66,6 +66,16 @@ TF_BUILTIN(WasmCallJavaScript, WasmBuiltinsAssembler) {
TailCallStub(CallTrampolineDescriptor{}, target, context, function, argc);
}
+TF_BUILTIN(WasmRecordWrite, WasmBuiltinsAssembler) {
+ TNode<Object> object = UncheckedParameter(Descriptor::kObject);
+ TNode<Object> slot = UncheckedParameter(Descriptor::kSlot);
+ TNode<Object> remembered = UncheckedParameter(Descriptor::kRememberedSet);
+ TNode<Object> fp_mode = UncheckedParameter(Descriptor::kFPMode);
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kRecordWrite);
+ TailCallStub(RecordWriteDescriptor{}, target, NoContextConstant(), object,
+ slot, remembered, fp_mode);
+}
+
TF_BUILTIN(WasmToNumber, WasmBuiltinsAssembler) {
TNode<Object> context = UncheckedParameter(Descriptor::kContext);
TNode<Object> argument = UncheckedParameter(Descriptor::kArgument);
@@ -88,7 +98,106 @@ TF_BUILTIN(WasmThrow, WasmBuiltinsAssembler) {
TailCallRuntimeWithCEntry(Runtime::kThrow, centry, context, exception);
}
-TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
+TF_BUILTIN(WasmAtomicWake, WasmBuiltinsAssembler) {
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ TNode<Uint32T> count = UncheckedCast<Uint32T>(Parameter(Descriptor::kCount));
+
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+
+ // TODO(aseemgarg): Use SMIs if possible for address and count
+ TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
+
+ TNode<HeapNumber> count_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(count_heap, ChangeUint32ToFloat64(count));
+
+ TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
+ Runtime::kWasmAtomicWake, centry, NoContextConstant(), instance,
+ address_heap, count_heap));
+ ReturnRaw(SmiToInt32(result_smi));
+}
+
+TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ TNode<Int32T> expected_value =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
+ TNode<Float64T> timeout =
+ UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+
+ // TODO(aseemgarg): Use SMIs if possible for address and expected_value
+ TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
+
+ TNode<HeapNumber> expected_value_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(expected_value_heap,
+ ChangeInt32ToFloat64(expected_value));
+
+ TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(timeout_heap, timeout);
+
+ TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
+ Runtime::kWasmI32AtomicWait, centry, NoContextConstant(), instance,
+ address_heap, expected_value_heap, timeout_heap));
+ ReturnRaw(SmiToInt32(result_smi));
+}
+
+TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ TNode<Uint32T> expected_value_high =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueHigh));
+ TNode<Uint32T> expected_value_low =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueLow));
+ TNode<Float64T> timeout =
+ UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+
+ // TODO(aseemgarg): Use SMIs if possible for address and expected_value
+ TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
+
+ TNode<HeapNumber> expected_value_high_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(expected_value_high_heap,
+ ChangeUint32ToFloat64(expected_value_high));
+
+ TNode<HeapNumber> expected_value_low_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(expected_value_low_heap,
+ ChangeUint32ToFloat64(expected_value_low));
+
+ TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>(
+ CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ StoreHeapNumberValue(timeout_heap, timeout);
+
+ TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
+ Runtime::kWasmI64AtomicWait, centry, NoContextConstant(), instance,
+ address_heap, expected_value_high_heap, expected_value_low_heap,
+ timeout_heap));
+ ReturnRaw(SmiToInt32(result_smi));
+}
+
+TF_BUILTIN(WasmMemoryGrow, WasmBuiltinsAssembler) {
TNode<Int32T> num_pages =
UncheckedCast<Int32T>(Parameter(Descriptor::kNumPages));
Label num_pages_out_of_range(this, Label::kDeferred);
@@ -102,7 +211,7 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
TNode<Code> centry = LoadCEntryFromInstance(instance);
TNode<Object> context = LoadContextFromInstance(instance);
TNode<Smi> ret_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
- Runtime::kWasmGrowMemory, centry, context, instance, num_pages_smi));
+ Runtime::kWasmMemoryGrow, centry, context, instance, num_pages_smi));
TNode<Int32T> ret = SmiToInt32(ret_smi);
ReturnRaw(ret);
@@ -110,14 +219,44 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) {
ReturnRaw(Int32Constant(-1));
}
-#define DECLARE_ENUM(name) \
- TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \
- TNode<Object> instance = LoadInstanceFromFrame(); \
- TNode<Code> centry = LoadCEntryFromInstance(instance); \
- TNode<Object> context = LoadContextFromInstance(instance); \
- int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, context, \
- SmiConstant(message_id)); \
+TF_BUILTIN(BigIntToWasmI64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kI64ToBigInt);
+ TNode<IntPtrT> argument =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+
+ TailCallStub(BigIntToWasmI64Descriptor(), target, NoContextConstant(),
+ argument);
+}
+
+TF_BUILTIN(WasmBigIntToI64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Object> context =
+ UncheckedCast<Object>(Parameter(Descriptor::kContext));
+ TNode<Code> target = LoadBuiltinFromFrame(Builtins::kBigIntToI64);
+ TNode<IntPtrT> argument =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+
+ TailCallStub(BigIntToI64Descriptor(), target, context, argument);
+}
+
+#define DECLARE_ENUM(name) \
+ TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \
+ TNode<Object> instance = LoadInstanceFromFrame(); \
+ TNode<Code> centry = LoadCEntryFromInstance(instance); \
+ TNode<Object> context = LoadContextFromInstance(instance); \
+ MessageTemplate message_id = \
+ wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
+ TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, context, \
+ SmiConstant(static_cast<int>(message_id))); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
#undef DECLARE_ENUM
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
new file mode 100644
index 0000000000..e89deb705b
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -0,0 +1,169 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/counters.h"
+#include "src/objects/js-weak-refs-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(WeakFactoryConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->Name(), isolate)));
+ }
+ // [[Construct]]
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> cleanup = args.atOrUndefined(isolate, 1);
+
+ if (!cleanup->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable));
+ }
+
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+
+ Handle<JSWeakFactory> weak_factory = Handle<JSWeakFactory>::cast(result);
+ weak_factory->set_native_context(*isolate->native_context());
+ weak_factory->set_cleanup(*cleanup);
+ weak_factory->set_flags(
+ JSWeakFactory::ScheduledForCleanupField::encode(false));
+ return *weak_factory;
+}
+
+BUILTIN(WeakFactoryMakeCell) {
+ HandleScope scope(isolate);
+ const char* method_name = "WeakFactory.prototype.makeCell";
+
+ CHECK_RECEIVER(JSWeakFactory, weak_factory, method_name);
+
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsMakeCellTargetMustBeObject));
+ }
+ Handle<JSReceiver> target_receiver = Handle<JSReceiver>::cast(target);
+ Handle<Object> holdings = args.atOrUndefined(isolate, 2);
+ if (target->SameValue(*holdings)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kWeakRefsMakeCellTargetAndHoldingsMustNotBeSame));
+ }
+
+ // TODO(marja): Realms.
+
+ Handle<Map> weak_cell_map(isolate->native_context()->js_weak_cell_map(),
+ isolate);
+
+ // Allocate the JSWeakCell object in the old space, because 1) JSWeakCell
+ // weakness handling is only implemented in the old space 2) they're
+ // supposedly long-living. TODO(marja): Support JSWeakCells in Scavenger.
+ Handle<JSWeakCell> weak_cell =
+ Handle<JSWeakCell>::cast(isolate->factory()->NewJSObjectFromMap(
+ weak_cell_map, TENURED, Handle<AllocationSite>::null()));
+ weak_cell->set_target(*target_receiver);
+ weak_cell->set_holdings(*holdings);
+ weak_factory->AddWeakCell(*weak_cell);
+ return *weak_cell;
+}
+
+BUILTIN(WeakFactoryCleanupSome) {
+ HandleScope scope(isolate);
+ const char* method_name = "WeakFactory.prototype.cleanupSome";
+
+ CHECK_RECEIVER(JSWeakFactory, weak_factory, method_name);
+
+ // Don't do set_scheduled_for_cleanup(false); we still have the microtask
+ // scheduled and don't want to schedule another one in case the user never
+ // executes microtasks.
+ JSWeakFactory::Cleanup(weak_factory, isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+BUILTIN(WeakFactoryCleanupIteratorNext) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSWeakFactoryCleanupIterator, iterator, "next");
+
+ Handle<JSWeakFactory> weak_factory(iterator->factory(), isolate);
+ if (!weak_factory->NeedsCleanup()) {
+ return *isolate->factory()->NewJSIteratorResult(
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate), true);
+ }
+ Handle<JSWeakCell> weak_cell_object =
+ handle(weak_factory->PopClearedCell(isolate), isolate);
+
+ return *isolate->factory()->NewJSIteratorResult(weak_cell_object, false);
+}
+
+BUILTIN(WeakCellHoldingsGetter) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSWeakCell, weak_cell, "get WeakCell.holdings");
+ return weak_cell->holdings();
+}
+
+BUILTIN(WeakCellClear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSWeakCell, weak_cell, "WeakCell.prototype.clear");
+ weak_cell->Clear(isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+BUILTIN(WeakRefConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->Name(), isolate)));
+ }
+ // [[Construct]]
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> target_object = args.atOrUndefined(isolate, 1);
+ if (!target_object->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kWeakRefsWeakRefConstructorTargetMustBeObject));
+ }
+ isolate->heap()->AddKeepDuringJobTarget(
+ Handle<JSReceiver>::cast(target_object));
+
+ // TODO(marja): Realms.
+
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+
+ Handle<JSWeakRef> weak_ref = Handle<JSWeakRef>::cast(result);
+ weak_ref->set_target(*target_object);
+ return *weak_ref;
+}
+
+BUILTIN(WeakRefDeref) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSWeakRef, weak_ref, "WeakRef.prototype.deref");
+ if (weak_ref->target()->IsJSReceiver()) {
+ Handle<JSReceiver> target =
+ handle(JSReceiver::cast(weak_ref->target()), isolate);
+ // AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear
+ // weak_ref since we hold a Handle to its target.
+ isolate->heap()->AddKeepDuringJobTarget(target);
+ } else {
+ DCHECK(weak_ref->target()->IsUndefined(isolate));
+ }
+ return weak_ref->target();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 103f00c56e..129beb2700 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -8,10 +8,13 @@
#include "src/assembler-inl.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/callable.h"
-#include "src/instruction-stream.h"
+#include "src/code-tracer.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/fixed-array.h"
+#include "src/ostreams.h"
+#include "src/snapshot/embedded-data.h"
#include "src/visitors.h"
namespace v8 {
@@ -19,7 +22,7 @@ namespace internal {
// Forward declarations for C++ builtins.
#define FORWARD_DECLARE(Name) \
- Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+ Address Builtin_##Name(int argc, Address* args, Isolate* isolate);
BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
@@ -29,34 +32,25 @@ namespace {
struct BuiltinMetadata {
const char* name;
Builtins::Kind kind;
- union {
- Address cpp_entry; // For CPP and API builtins.
- int8_t parameter_count; // For TFJ builtins.
- } kind_specific_data;
+ // For CPP and API builtins it's cpp_entry address and for TFJ it's a
+ // parameter count.
+ Address cpp_entry_or_parameter_count;
};
-// clang-format off
-#define DECL_CPP(Name, ...) { #Name, Builtins::CPP, \
- { FUNCTION_ADDR(Builtin_##Name) }},
-#define DECL_API(Name, ...) { #Name, Builtins::API, \
- { FUNCTION_ADDR(Builtin_##Name) }},
-#ifdef V8_TARGET_BIG_ENDIAN
-#define DECL_TFJ(Name, Count, ...) { #Name, Builtins::TFJ, \
- { static_cast<Address>(static_cast<uintptr_t>( \
- Count) << (kBitsPerByte * (kPointerSize - 1))) }},
-#else
-#define DECL_TFJ(Name, Count, ...) { #Name, Builtins::TFJ, \
- { static_cast<Address>(Count) }},
-#endif
-#define DECL_TFC(Name, ...) { #Name, Builtins::TFC, {} },
-#define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} },
-#define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} },
-#define DECL_BCH(Name, ...) { #Name, Builtins::BCH, {} },
-#define DECL_DLH(Name, ...) { #Name, Builtins::DLH, {} },
-#define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} },
+#define DECL_CPP(Name, ...) \
+ {#Name, Builtins::CPP, FUNCTION_ADDR(Builtin_##Name)},
+#define DECL_API(Name, ...) \
+ {#Name, Builtins::API, FUNCTION_ADDR(Builtin_##Name)},
+#define DECL_TFJ(Name, Count, ...) \
+ {#Name, Builtins::TFJ, static_cast<Address>(Count)},
+#define DECL_TFC(Name, ...) {#Name, Builtins::TFC, kNullAddress},
+#define DECL_TFS(Name, ...) {#Name, Builtins::TFS, kNullAddress},
+#define DECL_TFH(Name, ...) {#Name, Builtins::TFH, kNullAddress},
+#define DECL_BCH(Name, ...) {#Name, Builtins::BCH, kNullAddress},
+#define DECL_ASM(Name, ...) {#Name, Builtins::ASM, kNullAddress},
const BuiltinMetadata builtin_metadata[] = {
BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
- DECL_BCH, DECL_DLH, DECL_ASM)
+ DECL_BCH, DECL_ASM)
};
#undef DECL_CPP
#undef DECL_API
@@ -65,9 +59,7 @@ const BuiltinMetadata builtin_metadata[] = {
#undef DECL_TFS
#undef DECL_TFH
#undef DECL_BCH
-#undef DECL_DLH
#undef DECL_ASM
-// clang-format on
} // namespace
@@ -88,8 +80,8 @@ void Builtins::TearDown() { initialized_ = false; }
const char* Builtins::Lookup(Address pc) {
// Off-heap pc's can be looked up through binary search.
if (FLAG_embedded_builtins) {
- Code* maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
- if (maybe_builtin != nullptr) return name(maybe_builtin->builtin_index());
+ Code maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
+ if (!maybe_builtin.is_null()) return name(maybe_builtin->builtin_index());
}
// May be called during initialization (disassembler).
@@ -101,18 +93,6 @@ const char* Builtins::Lookup(Address pc) {
return nullptr;
}
-Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
- switch (scope_type) {
- case ScopeType::EVAL_SCOPE:
- return builtin_handle(kFastNewFunctionContextEval);
- case ScopeType::FUNCTION_SCOPE:
- return builtin_handle(kFastNewFunctionContextFunction);
- default:
- UNREACHABLE();
- }
- return Handle<Code>::null();
-}
-
Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
@@ -135,22 +115,22 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
UNREACHABLE();
}
-void Builtins::set_builtin(int index, HeapObject* builtin) {
+void Builtins::set_builtin(int index, Code builtin) {
isolate_->heap()->set_builtin(index, builtin);
}
-Code* Builtins::builtin(int index) { return isolate_->heap()->builtin(index); }
+Code Builtins::builtin(int index) { return isolate_->heap()->builtin(index); }
Handle<Code> Builtins::builtin_handle(int index) {
DCHECK(IsBuiltinId(index));
return Handle<Code>(
- reinterpret_cast<Code**>(isolate_->heap()->builtin_address(index)));
+ reinterpret_cast<Address*>(isolate_->heap()->builtin_address(index)));
}
// static
int Builtins::GetStackParameterCount(Name name) {
DCHECK(Builtins::KindOf(name) == TFJ);
- return builtin_metadata[name].kind_specific_data.parameter_count;
+ return static_cast<int>(builtin_metadata[name].cpp_entry_or_parameter_count);
}
// static
@@ -166,12 +146,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
break; \
}
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
- CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN,
- IGNORE_BUILTIN)
+ CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, CASE_OTHER)
#undef CASE_OTHER
default:
Builtins::Kind kind = Builtins::KindOf(name);
- DCHECK(kind != BCH && kind != DLH);
+ DCHECK_NE(BCH, kind);
if (kind == TFJ || kind == CPP) {
return Callable(code, JSTrampolineDescriptor{});
}
@@ -187,14 +166,42 @@ const char* Builtins::name(int index) {
return builtin_metadata[index].name;
}
+void Builtins::PrintBuiltinCode() {
+ DCHECK(FLAG_print_builtin_code);
+#ifdef ENABLE_DISASSEMBLER
+ for (int i = 0; i < builtin_count; i++) {
+ const char* builtin_name = name(i);
+ Handle<Code> code = builtin_handle(i);
+ if (PassesFilter(CStrVector(builtin_name),
+ CStrVector(FLAG_print_builtin_code_filter))) {
+ CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
+ OFStream os(trace_scope.file());
+ code->Disassemble(builtin_name, os);
+ os << "\n";
+ }
+ }
+#endif
+}
+
+void Builtins::PrintBuiltinSize() {
+ DCHECK(FLAG_print_builtin_size);
+ for (int i = 0; i < builtin_count; i++) {
+ const char* builtin_name = name(i);
+ const char* kind = KindNameOf(i);
+ Code code = builtin(i);
+ PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
+ code->InstructionSize());
+ }
+}
+
// static
Address Builtins::CppEntryOf(int index) {
DCHECK(Builtins::HasCppImplementation(index));
- return builtin_metadata[index].kind_specific_data.cpp_entry;
+ return builtin_metadata[index].cpp_entry_or_parameter_count;
}
// static
-bool Builtins::IsBuiltin(const Code* code) {
+bool Builtins::IsBuiltin(const Code code) {
return Builtins::IsBuiltinId(code->builtin_index());
}
@@ -206,13 +213,13 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
Address end = heap->builtin_address(Builtins::builtin_count);
if (handle_location >= end) return false;
if (handle_location < start) return false;
- *index = static_cast<int>(handle_location - start) >> kPointerSizeLog2;
+ *index = static_cast<int>(handle_location - start) >> kSystemPointerSizeLog2;
DCHECK(Builtins::IsBuiltinId(*index));
return true;
}
// static
-bool Builtins::IsIsolateIndependentBuiltin(const Code* code) {
+bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
if (FLAG_embedded_builtins) {
const int builtin_index = code->builtin_index();
return Builtins::IsBuiltinId(builtin_index) &&
@@ -223,151 +230,6 @@ bool Builtins::IsIsolateIndependentBuiltin(const Code* code) {
}
// static
-bool Builtins::IsLazy(int index) {
- DCHECK(IsBuiltinId(index));
-
- if (FLAG_embedded_builtins) {
- // We don't want to lazy-deserialize off-heap builtins.
- if (Builtins::IsIsolateIndependent(index)) return false;
- }
-
- // There are a couple of reasons that builtins can require eager-loading,
- // i.e. deserialization at isolate creation instead of on-demand. For
- // instance:
- // * DeserializeLazy implements lazy loading.
- // * Immovability requirement. This can only conveniently be guaranteed at
- // isolate creation (at runtime, we'd have to allocate in LO space).
- // * To avoid conflicts in SharedFunctionInfo::function_data (Illegal,
- // HandleApiCall, interpreter entry trampolines).
- // * Frequent use makes lazy loading unnecessary (CompileLazy).
- // TODO(wasm): Remove wasm builtins once immovability is no longer required.
- switch (index) {
- case kAbort: // Required by wasm.
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kAsyncFunctionAwaitResolveClosure: // https://crbug.com/v8/7522
- case kAsyncGeneratorAwaitResolveClosure: // https://crbug.com/v8/7522
- case kAsyncGeneratorYieldResolveClosure: // https://crbug.com/v8/7522
- case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
- case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
- // CEntry variants must be immovable, whereas lazy deserialization allocates
- // movable code.
- case kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
- case kCEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
- case kCEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case kCEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit:
- case kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
- case kCEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
- case kCEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
- case kCEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit:
- case kCompileLazy:
- case kDebugBreakTrampoline:
- case kDeserializeLazy:
- case kDeserializeLazyHandler:
- case kDeserializeLazyWideHandler:
- case kDeserializeLazyExtraWideHandler:
- case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
- case kHandleApiCall:
- case kIllegal:
- case kIllegalHandler:
- case kInstantiateAsmJs:
- case kInterpreterEnterBytecodeAdvance:
- case kInterpreterEnterBytecodeDispatch:
- case kInterpreterEntryTrampoline:
- case kPromiseConstructorLazyDeoptContinuation:
- case kRecordWrite: // https://crbug.com/chromium/765301.
- case kThrowWasmTrapDivByZero: // Required by wasm.
- case kThrowWasmTrapDivUnrepresentable: // Required by wasm.
- case kThrowWasmTrapFloatUnrepresentable: // Required by wasm.
- case kThrowWasmTrapFuncInvalid: // Required by wasm.
- case kThrowWasmTrapFuncSigMismatch: // Required by wasm.
- case kThrowWasmTrapMemOutOfBounds: // Required by wasm.
- case kThrowWasmTrapRemByZero: // Required by wasm.
- case kThrowWasmTrapUnreachable: // Required by wasm.
- case kToBooleanLazyDeoptContinuation:
- case kToNumber: // Required by wasm.
- case kGenericConstructorLazyDeoptContinuation:
- case kWasmCompileLazy: // Required by wasm.
- case kWasmStackGuard: // Required by wasm.
- return false;
- default:
- // TODO(6624): Extend to other kinds.
- return KindOf(index) == TFJ || KindOf(index) == BCH;
- }
- UNREACHABLE();
-}
-
-// static
-bool Builtins::IsLazyDeserializer(Code* code) {
- return IsLazyDeserializer(code->builtin_index());
-}
-
-// static
-bool Builtins::IsIsolateIndependent(int index) {
- DCHECK(IsBuiltinId(index));
-#ifndef V8_TARGET_ARCH_IA32
- switch (index) {
- // TODO(jgruber): There's currently two blockers for moving
- // InterpreterEntryTrampoline into the binary:
- // 1. InterpreterEnterBytecode calculates a pointer into the middle of
- // InterpreterEntryTrampoline (see interpreter_entry_return_pc_offset).
- // When the builtin is embedded, the pointer would need to be calculated
- // at an offset from the embedded instruction stream (instead of the
- // trampoline code object).
- // 2. We create distinct copies of the trampoline to make it possible to
- // attribute ticks in the interpreter to individual JS functions.
- // See https://crrev.com/c/959081 and InstallBytecodeArray. When the
- // trampoline is embedded, we need to ensure that CopyCode creates a copy
- // of the builtin itself (and not just the trampoline).
- case kInterpreterEntryTrampoline:
- return false;
- default:
- return true;
- }
-#else // V8_TARGET_ARCH_IA32
- // TODO(jgruber, v8:6666): Implement support.
- // ia32 is a work-in-progress. This will let us make builtins
- // isolate-independent one-by-one.
- switch (index) {
- case kContinueToCodeStubBuiltin:
- case kContinueToCodeStubBuiltinWithResult:
- case kContinueToJavaScriptBuiltin:
- case kContinueToJavaScriptBuiltinWithResult:
- case kWasmAllocateHeapNumber:
- case kWasmCallJavaScript:
- case kWasmToNumber:
- case kDoubleToI:
- return true;
- default:
- return false;
- }
-#endif // V8_TARGET_ARCH_IA32
- UNREACHABLE();
-}
-
-// static
bool Builtins::IsWasmRuntimeStub(int index) {
DCHECK(IsBuiltinId(index));
switch (index) {
@@ -384,27 +246,76 @@ bool Builtins::IsWasmRuntimeStub(int index) {
}
// static
+void Builtins::UpdateBuiltinEntryTable(Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ Address* builtin_entry_table = isolate->builtin_entry_table();
+ for (int i = 0; i < builtin_count; i++) {
+ builtin_entry_table[i] = heap->builtin(i)->InstructionStart();
+ }
+}
+
+namespace {
+
+class OffHeapTrampolineGenerator {
+ public:
+ explicit OffHeapTrampolineGenerator(Isolate* isolate)
+ : isolate_(isolate),
+ masm_(isolate, CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer_, kBufferSize)) {}
+
+ CodeDesc Generate(Address off_heap_entry) {
+ // Generate replacement code that simply tail-calls the off-heap code.
+ DCHECK(!masm_.has_frame());
+ {
+ FrameScope scope(&masm_, StackFrame::NONE);
+ masm_.JumpToInstructionStream(off_heap_entry);
+ }
+
+ CodeDesc desc;
+ masm_.GetCode(isolate_, &desc);
+ return desc;
+ }
+
+ Handle<HeapObject> CodeObject() { return masm_.CodeObject(); }
+
+ private:
+ Isolate* isolate_;
+ // Enough to fit the single jmp.
+ static constexpr int kBufferSize = 256;
+ byte buffer_[kBufferSize];
+ MacroAssembler masm_;
+};
+
+constexpr int OffHeapTrampolineGenerator::kBufferSize;
+
+} // namespace
+
+// static
Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
Address off_heap_entry) {
- DCHECK(isolate->serializer_enabled());
DCHECK_NOT_NULL(isolate->embedded_blob());
DCHECK_NE(0, isolate->embedded_blob_size());
- constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ OffHeapTrampolineGenerator generator(isolate);
+ CodeDesc desc = generator.Generate(off_heap_entry);
- // Generate replacement code that simply tail-calls the off-heap code.
- MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
- DCHECK(!masm.has_frame());
- {
- FrameScope scope(&masm, StackFrame::NONE);
- masm.JumpToInstructionStream(off_heap_entry);
- }
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
+ return isolate->factory()->NewCode(desc, Code::BUILTIN,
+ generator.CodeObject());
+}
- return isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
+// static
+Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
+ Isolate* isolate) {
+ OffHeapTrampolineGenerator generator(isolate);
+ // Generate a jump to a dummy address as we're not actually interested in the
+ // generated instruction stream.
+ CodeDesc desc = generator.Generate(kNullAddress);
+
+ Handle<ByteArray> reloc_info =
+ isolate->factory()->NewByteArray(desc.reloc_size, TENURED_READ_ONLY);
+ Code::CopyRelocInfoToByteArray(*reloc_info, desc);
+
+ return reloc_info;
}
// static
@@ -425,7 +336,6 @@ const char* Builtins::KindNameOf(int index) {
case TFS: return "TFS";
case TFH: return "TFH";
case BCH: return "BCH";
- case DLH: return "DLH";
case ASM: return "ASM";
}
// clang-format on
@@ -446,9 +356,7 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy) {
if (FLAG_allow_unsafe_function_constructor) return true;
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- Handle<Context> responsible_context =
- impl->MicrotaskContextIsLastEnteredContext() ? impl->MicrotaskContext()
- : impl->LastEnteredContext();
+ Handle<Context> responsible_context = impl->LastEnteredOrMicrotaskContext();
// TODO(jochen): Remove this.
if (responsible_context.is_null()) {
return true;
@@ -457,5 +365,18 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
return isolate->MayAccess(responsible_context, target_global_proxy);
}
+Builtins::Name ExampleBuiltinForTorqueFunctionPointerType(
+ size_t function_pointer_type_id) {
+ switch (function_pointer_type_id) {
+#define FUNCTION_POINTER_ID_CASE(id, name) \
+ case id: \
+ return Builtins::k##name;
+ TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(FUNCTION_POINTER_ID_CASE)
+#undef FUNCTION_POINTER_ID_CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 9f404a0ac0..7ea440e004 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -12,6 +12,7 @@
namespace v8 {
namespace internal {
+class ByteArray;
class Callable;
template <typename T>
class Handle;
@@ -46,7 +47,7 @@ class Builtins {
enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
- DEF_ENUM, DEF_ENUM, DEF_ENUM)
+ DEF_ENUM, DEF_ENUM)
#undef DEF_ENUM
builtin_count,
@@ -64,7 +65,7 @@ class Builtins {
}
// The different builtin kinds are documented in builtins-definitions.h.
- enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM };
+ enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM };
static BailoutId GetContinuationBailoutId(Name name);
static Name GetBuiltinFromBailoutId(BailoutId);
@@ -75,16 +76,12 @@ class Builtins {
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
- Handle<Code> InterpreterPushArgsThenCall(ConvertReceiverMode receiver_mode,
- InterpreterPushArgsMode mode);
- Handle<Code> InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode);
- Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> JSConstructStubGeneric();
- // Used by BuiltinDeserializer and CreateOffHeapTrampolines in isolate.cc.
- void set_builtin(int index, HeapObject* builtin);
+ // Used by CreateOffHeapTrampolines in isolate.cc.
+ void set_builtin(int index, Code builtin);
- Code* builtin(int index);
+ Code builtin(int index);
V8_EXPORT_PRIVATE Handle<Code> builtin_handle(int index);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
@@ -93,6 +90,10 @@ class Builtins {
static const char* name(int index);
+ // Support for --print-builtin-size and --print-builtin-code.
+ void PrintBuiltinCode();
+ void PrintBuiltinSize();
+
// Returns the C++ entry point for builtins implemented in C++, and the null
// Address otherwise.
static Address CppEntryOf(int index);
@@ -105,19 +106,14 @@ class Builtins {
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
- static bool IsBuiltin(const Code* code);
+ static bool IsBuiltin(const Code code);
// As above, but safe to access off the main thread since the check is done
// by handle location. Similar to Heap::IsRootHandle.
bool IsBuiltinHandle(Handle<HeapObject> maybe_code, int* index) const;
// True, iff the given code object is a builtin with off-heap embedded code.
- static bool IsIsolateIndependentBuiltin(const Code* code);
-
- // Returns true iff the given builtin can be lazy-loaded from the snapshot.
- // This is true in general for most builtins with the exception of a few
- // special cases such as CompileLazy and DeserializeLazy.
- static bool IsLazy(int index);
+ static bool IsIsolateIndependentBuiltin(const Code code);
static constexpr int kFirstWideBytecodeHandler =
kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
@@ -127,36 +123,26 @@ class Builtins {
kNumberOfWideBytecodeHandlers ==
builtin_count);
- // Returns the index of the appropriate lazy deserializer in the builtins
- // table.
- static constexpr int LazyDeserializerForBuiltin(const int index) {
- return index < kFirstWideBytecodeHandler
- ? (index < kFirstBytecodeHandler
- ? Builtins::kDeserializeLazy
- : Builtins::kDeserializeLazyHandler)
- : (index < kFirstExtraWideBytecodeHandler
- ? Builtins::kDeserializeLazyWideHandler
- : Builtins::kDeserializeLazyExtraWideHandler);
+ // True, iff the given builtin contains no isolate-specific code and can be
+ // embedded into the binary.
+ static constexpr bool kAllBuiltinsAreIsolateIndependent = true;
+ static constexpr bool AllBuiltinsAreIsolateIndependent() {
+ return kAllBuiltinsAreIsolateIndependent;
}
-
- static constexpr bool IsLazyDeserializer(int builtin_index) {
- return builtin_index == kDeserializeLazy ||
- builtin_index == kDeserializeLazyHandler ||
- builtin_index == kDeserializeLazyWideHandler ||
- builtin_index == kDeserializeLazyExtraWideHandler;
+ static constexpr bool IsIsolateIndependent(int index) {
+ STATIC_ASSERT(kAllBuiltinsAreIsolateIndependent);
+ return kAllBuiltinsAreIsolateIndependent;
}
- static bool IsLazyDeserializer(Code* code);
-
- // Helper methods used for testing isolate-independent builtins.
- // TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
- static bool IsIsolateIndependent(int index);
-
// Wasm runtime stubs are treated specially by wasm. To guarantee reachability
// through near jumps, their code is completely copied into a fresh off-heap
// area.
static bool IsWasmRuntimeStub(int index);
+ // Updates the table of builtin entry points based on the current contents of
+ // the builtins table.
+ static void UpdateBuiltinEntryTable(Isolate* isolate);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
@@ -188,6 +174,35 @@ class Builtins {
static Handle<Code> GenerateOffHeapTrampolineFor(Isolate* isolate,
Address off_heap_entry);
+ // Generate the RelocInfo ByteArray that would be generated for an offheap
+ // trampoline.
+ static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
+
+ static bool IsJSEntryVariant(int builtin_index) {
+ switch (builtin_index) {
+ case kJSEntry:
+ case kJSConstructEntry:
+ case kJSRunMicrotasksEntry:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ }
+
+ int js_entry_handler_offset() const {
+ DCHECK_NE(js_entry_handler_offset_, 0);
+ return js_entry_handler_offset_;
+ }
+
+ void SetJSEntryHandlerOffset(int offset) {
+ // Check the stored offset is either uninitialized or unchanged (we
+ // generate multiple variants of this builtin but they should all have the
+ // same handler offset).
+ CHECK(js_entry_handler_offset_ == 0 || js_entry_handler_offset_ == offset);
+ js_entry_handler_offset_ = offset;
+ }
+
private:
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode);
@@ -216,8 +231,7 @@ class Builtins {
static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
- DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, IGNORE_BUILTIN,
- DECLARE_ASM)
+ DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM)
#undef DECLARE_ASM
#undef DECLARE_TF
@@ -225,11 +239,19 @@ class Builtins {
Isolate* isolate_;
bool initialized_ = false;
+ // Stores the offset of exception handler entry point (the handler_entry
+ // label) in JSEntry and its variants. It's used to generate the handler table
+ // during codegen (mksnapshot-only).
+ int js_entry_handler_offset_ = 0;
+
friend class SetupIsolateDelegate;
DISALLOW_COPY_AND_ASSIGN(Builtins);
};
+Builtins::Name ExampleBuiltinForTorqueFunctionPointerType(
+ size_t function_pointer_type_id);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
new file mode 100644
index 0000000000..e4bc3b758b
--- /dev/null
+++ b/deps/v8/src/builtins/collections.tq
@@ -0,0 +1,57 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-collections-gen.h'
+
+namespace collections {
+ macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: Object):
+ KeyValuePair
+ labels MayHaveSideEffects {
+ typeswitch (o) {
+ case (a: FastJSArray): {
+ const length: Smi = a.length;
+ typeswitch (a.elements) {
+ case (elements: FixedArray): {
+ return KeyValuePair{
+ length > 0 ? array::LoadElementOrUndefined(elements, 0) :
+ Undefined,
+ length > 1 ? array::LoadElementOrUndefined(elements, 1) :
+ Undefined
+ };
+ }
+ case (elements: FixedDoubleArray): {
+ return KeyValuePair{
+ length > 0 ? array::LoadElementOrUndefined(elements, 0) :
+ Undefined,
+ length > 1 ? array::LoadElementOrUndefined(elements, 1) :
+ Undefined
+ };
+ }
+ case (Object): deferred {
+ unreachable;
+ }
+ }
+ }
+ case (receiver: JSReceiver): {
+ goto MayHaveSideEffects;
+ }
+ case (o: Object): deferred {
+ ThrowTypeError(context, kIteratorValueNotAnObject, o);
+ }
+ }
+ }
+
+ transitioning macro LoadKeyValuePair(implicit context: Context)(o: Object):
+ KeyValuePair {
+ try {
+ return LoadKeyValuePairNoSideEffects(o) otherwise Generic;
+ }
+ label Generic {
+ return KeyValuePair{
+ GetProperty(o, Convert<Smi>(0)),
+ GetProperty(o, Convert<Smi>(1))
+ };
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 26995453dd..516fed39d3 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -5,6 +5,9 @@
#include "src/builtins/constants-table-builder.h"
#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
+#include "src/objects/oddball-inl.h"
+#include "src/roots-inl.h"
namespace v8 {
namespace internal {
@@ -18,8 +21,7 @@ BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
// And that the initial value of the builtins constants table can be treated
// as a constant, which means that codegen will load it using the root
// register.
- DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kEmptyFixedArray));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kEmptyFixedArray));
}
uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
@@ -27,7 +29,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// Roots must not be inserted into the constants table as they are already
// accessibly from the root list.
RootIndex root_list_index;
- DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
+ DCHECK(!isolate_->roots_table().IsRootHandle(object, &root_list_index));
// Not yet finalized.
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
@@ -36,8 +38,12 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// Must be on the main thread.
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
- // Must be serializing.
- DCHECK(isolate_->serializer_enabled());
+ // Must be generating embedded builtin code.
+ DCHECK(isolate_->ShouldLoadConstantsFromRootList());
+
+ // All code objects should be loaded through the root register or use
+ // pc-relative addressing.
+ DCHECK(!object->IsCode());
#endif
uint32_t* maybe_key = map_.Find(object);
@@ -57,22 +63,17 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
// Roots must not be inserted into the constants table as they are already
// accessibly from the root list.
RootIndex root_list_index;
- DCHECK(!isolate_->heap()->IsRootHandle(code_object, &root_list_index));
+ DCHECK(!isolate_->roots_table().IsRootHandle(code_object, &root_list_index));
// Not yet finalized.
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
- DCHECK(isolate_->serializer_enabled());
+ DCHECK(isolate_->ShouldLoadConstantsFromRootList());
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference)->kind() ==
Oddball::kSelfReferenceMarker);
-
- // During indirection generation, we always create a distinct marker for each
- // macro assembler. The canonical marker is only used when not generating a
- // snapshot.
- DCHECK(*self_reference != ReadOnlyRoots(isolate_).self_reference_marker());
#endif
uint32_t key;
@@ -87,7 +88,7 @@ void BuiltinsConstantsTableBuilder::Finalize() {
DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
- DCHECK(isolate_->serializer_enabled());
+ DCHECK(isolate_->ShouldLoadConstantsFromRootList());
// An empty map means there's nothing to do.
if (map_.size() == 0) return;
@@ -99,7 +100,7 @@ void BuiltinsConstantsTableBuilder::Finalize() {
ConstantsMap::IteratableScope it_scope(&map_);
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
uint32_t index = *it.entry();
- Object* value = it.key();
+ Object value = it.key();
if (value->IsCode() && Code::cast(value)->kind() == Code::BUILTIN) {
// Replace placeholder code objects with the real builtin.
// See also: SetupIsolateDelegate::PopulateWithPlaceholders.
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
index 12644d4964..53cf2b4d49 100644
--- a/deps/v8/src/builtins/constants-table-builder.h
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -45,7 +45,7 @@ class BuiltinsConstantsTableBuilder final {
typedef IdentityMap<uint32_t, FreeStoreAllocationPolicy> ConstantsMap;
ConstantsMap map_;
- DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder)
+ DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 1e86f88d83..c354313e29 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module data_view {
+#include 'src/builtins/builtins-data-view-gen.h'
+
+namespace data_view {
extern operator '.buffer'
macro LoadJSArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer;
extern operator '.byte_length'
@@ -68,8 +70,8 @@ module data_view {
return IsDetachedBuffer(view.buffer);
}
- macro ValidateDataView(
- context: Context, o: Object, method: String): JSDataView {
+ macro ValidateDataView(context: Context, o: Object, method: String):
+ JSDataView {
try {
return Cast<JSDataView>(o) otherwise CastError;
}
@@ -119,8 +121,8 @@ module data_view {
extern macro Float64InsertLowWord32(float64, uint32): float64;
extern macro Float64InsertHighWord32(float64, uint32): float64;
- extern macro LoadUint8(RawPtr, uintptr): uint32;
- extern macro LoadInt8(RawPtr, uintptr): int32;
+ extern macro DataViewBuiltinsAssembler::LoadUint8(RawPtr, uintptr): uint32;
+ extern macro DataViewBuiltinsAssembler::LoadInt8(RawPtr, uintptr): int32;
macro LoadDataView8(
buffer: JSArrayBuffer, offset: uintptr, signed: constexpr bool): Smi {
@@ -219,10 +221,10 @@ module data_view {
}
extern macro AllocateBigInt(intptr): BigInt;
- extern macro StoreBigIntBitfield(BigInt, intptr): void;
+ extern macro StoreBigIntBitfield(BigInt, uint32): void;
extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void;
- extern macro DataViewEncodeBigIntBits(
- constexpr bool, constexpr int31): intptr;
+ extern macro DataViewBuiltinsAssembler::DataViewEncodeBigIntBits(
+ constexpr bool, constexpr int31): uint32;
const kPositiveBigInt: constexpr bool = false;
const kNegativeBigInt: constexpr bool = true;
@@ -345,8 +347,8 @@ module data_view {
return result;
}
- macro MakeBigInt(
- lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
+ macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool):
+ BigInt {
// A BigInt digit has the platform word size, so we only need one digit
// on 64-bit platforms but may need two on 32-bit.
if constexpr (Is64()) {
@@ -385,7 +387,8 @@ module data_view {
extern macro ToSmiIndex(Object, Context): Smi
labels RangeError;
- extern macro DataViewElementSize(constexpr ElementsKind): constexpr int31;
+ extern macro DataViewBuiltinsAssembler::DataViewElementSize(
+ constexpr ElementsKind): constexpr int31;
macro DataViewGet(
context: Context, receiver: Object, offset: Object,
@@ -414,7 +417,7 @@ module data_view {
let viewOffsetWord: uintptr = dataView.byte_offset;
let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = Convert<float64>(DataViewElementSize(kind));
+ let elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(context, kInvalidDataViewAccessorOffset);
@@ -536,7 +539,8 @@ module data_view {
extern macro TruncateFloat64ToFloat32(float64): float32;
extern macro TruncateFloat64ToWord32(float64): uint32;
- extern macro StoreWord8(RawPtr, uintptr, uint32): void;
+ extern macro DataViewBuiltinsAssembler::StoreWord8(RawPtr, uintptr, uint32):
+ void;
macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
StoreWord8(buffer.backing_store, offset, value & 0xFF);
@@ -618,8 +622,10 @@ module data_view {
}
}
- extern macro DataViewDecodeBigIntLength(BigInt): uintptr;
- extern macro DataViewDecodeBigIntSign(BigInt): uintptr;
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigInt):
+ uint32;
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigInt):
+ uint32;
extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr;
// We might get here a BigInt that is bigger than 64 bits, but we're only
@@ -628,8 +634,8 @@ module data_view {
macro StoreDataViewBigInt(
buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
requestedLittleEndian: bool) {
- let length: uintptr = DataViewDecodeBigIntLength(bigIntValue);
- let sign: uintptr = DataViewDecodeBigIntSign(bigIntValue);
+ let length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
+ let sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
// The 32-bit words that will hold the BigInt's value in
// two's complement representation.
@@ -679,37 +685,50 @@ module data_view {
let littleEndian: bool = ToBoolean(requestedLittleEndian);
let buffer: JSArrayBuffer = dataView.buffer;
- let bigIntValue: BigInt;
- let numValue: Number;
// According to ES6 section 24.2.1.2 SetViewValue, we must perform
// the conversion before doing the bounds check.
if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
- bigIntValue = ToBigInt(context, value);
- } else {
- numValue = ToNumber(context, value);
- }
+ let bigIntValue: BigInt = ToBigInt(context, value);
- if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- context, kDetachedOperation, MakeDataViewSetterNameString(kind));
- }
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(
+ context, kDetachedOperation, MakeDataViewSetterNameString(kind));
+ }
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ let getIndexFloat: float64 = Convert<float64>(getIndex);
+ let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = Convert<float64>(DataViewElementSize(kind));
+ let viewOffsetWord: uintptr = dataView.byte_offset;
+ let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ let elementSizeFloat: float64 = DataViewElementSize(kind);
- if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
- ThrowRangeError(context, kInvalidDataViewAccessorOffset);
- }
-
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
- if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
+ let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
} else {
+ let numValue: Number = ToNumber(context, value);
+
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(
+ context, kDetachedOperation, MakeDataViewSetterNameString(kind));
+ }
+
+ let getIndexFloat: float64 = Convert<float64>(getIndex);
+ let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+
+ let viewOffsetWord: uintptr = dataView.byte_offset;
+ let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ let elementSizeFloat: float64 = DataViewElementSize(kind);
+
+ if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
+ ThrowRangeError(context, kInvalidDataViewAccessorOffset);
+ }
+
+ let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+
let doubleValue: float64 = ChangeNumberToFloat64(numValue);
if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) {
diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq
new file mode 100644
index 0000000000..2b9b79739e
--- /dev/null
+++ b/deps/v8/src/builtins/extras-utils.tq
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace extras_utils {
+ extern runtime CreatePrivateSymbol(Context, Object): HeapObject;
+ extern runtime PromiseMarkAsHandled(Context, Object): Undefined;
+ extern runtime PromiseStatus(Context, Object): Smi;
+
+ javascript builtin ExtrasUtilsCreatePrivateSymbol(
+ context: Context, receiver: Object, ...arguments): HeapObject {
+ return CreatePrivateSymbol(context, arguments[0]);
+ }
+
+ javascript builtin ExtrasUtilsMarkPromiseAsHandled(
+ context: Context, receiver: Object, ...arguments): Undefined {
+ return PromiseMarkAsHandled(context, arguments[0]);
+ }
+
+ javascript builtin ExtrasUtilsPromiseState(
+ context: Context, receiver: Object, ...arguments): Smi {
+ return PromiseStatus(context, arguments[0]);
+ }
+}
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
new file mode 100644
index 0000000000..109991af5a
--- /dev/null
+++ b/deps/v8/src/builtins/frames.tq
@@ -0,0 +1,150 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+type FrameType extends Smi
+ generates 'TNode<Smi>' constexpr 'StackFrame::Type';
+const ARGUMENTS_ADAPTOR_FRAME: constexpr FrameType
+ generates 'StackFrame::ARGUMENTS_ADAPTOR';
+const STUB_FRAME: constexpr FrameType
+ generates 'StackFrame::STUB';
+const kFrameTypeCount:
+ constexpr int31 generates 'StackFrame::NUMBER_OF_TYPES';
+
+FromConstexpr<FrameType, constexpr FrameType>(t: constexpr FrameType):
+ FrameType {
+ // Note that althought FrameTypes sometimes masquerade as Smis (their
+ // LSB is a zero), they are not. For efficiency in storing them as a
+ // constant into a frame, they are simply the FrameType value shifted
+ // up by a single bit.
+ const i: constexpr uintptr = %RawConstexprCast<constexpr uintptr>(t)
+ << kSmiTagSize;
+ return %RawObjectCast<FrameType>(BitcastWordToTaggedSigned(i));
+}
+Cast<FrameType>(o: Object): FrameType
+ labels CastError {
+ if (TaggedIsNotSmi(o)) goto CastError;
+ assert(
+ (Convert<uintptr>(BitcastTaggedToWord(o)) >>> kSmiTagSize) <
+ kFrameTypeCount);
+ return %RawObjectCast<FrameType>(o);
+}
+
+type FrameBase extends RawPtr
+ generates 'TNode<RawPtrT>' constexpr 'void*';
+type StandardFrame extends FrameBase
+ generates 'TNode<RawPtrT>' constexpr 'void*';
+type ArgumentsAdaptorFrame extends FrameBase
+ generates 'TNode<RawPtrT>' constexpr 'void*';
+type StubFrame extends FrameBase
+ generates 'TNode<RawPtrT>' constexpr 'void*';
+type Frame = ArgumentsAdaptorFrame | StandardFrame | StubFrame;
+
+extern macro LoadFramePointer(): Frame;
+extern macro LoadParentFramePointer(): Frame;
+
+// Load values from a specified frame by given offset in bytes.
+macro LoadObjectFromFrame(f: Frame, o: constexpr int32): Object {
+ return LoadBufferObject(f, o);
+}
+macro LoadPointerFromFrame(f: Frame, o: constexpr int32): RawPtr {
+ return LoadBufferPointer(f, o);
+}
+macro LoadSmiFromFrame(f: Frame, o: constexpr int32): Smi {
+ return LoadBufferSmi(f, o);
+}
+
+const kStandardFrameFunctionOffset: constexpr int31
+ generates 'StandardFrameConstants::kFunctionOffset';
+operator '.function' macro LoadFunctionFromFrame(f: Frame): JSFunction {
+ // TODO(danno): Use RawObjectCast here in order to avoid passing the implicit
+ // context, since this accessor is used in legacy CSA code through
+ // LoadTargetFromFrame
+ const result: Object = LoadObjectFromFrame(f, kStandardFrameFunctionOffset);
+ return %RawObjectCast<JSFunction>(result);
+}
+
+const kStandardFrameCallerFPOffset: constexpr int31
+ generates 'StandardFrameConstants::kCallerFPOffset';
+operator '.caller' macro LoadCallerFromFrame(f: Frame): Frame {
+ const result: RawPtr = LoadPointerFromFrame(f, kStandardFrameCallerFPOffset);
+ return %RawPointerCast<Frame>(result);
+}
+
+type ContextOrFrameType = Context | FrameType;
+Cast<ContextOrFrameType>(implicit context: Context)(o: Object):
+ ContextOrFrameType
+ labels CastError {
+ typeswitch (o) {
+ case (c: Context): {
+ return c;
+ }
+ case (t: FrameType): {
+ return t;
+ }
+ case (Object): {
+ goto CastError;
+ }
+ }
+}
+
+const kStandardFrameContextOrFrameTypeOffset: constexpr int31
+ generates 'StandardFrameConstants::kContextOrFrameTypeOffset';
+operator '.context_or_frame_type'
+macro LoadContextOrFrameTypeFromFrame(implicit context: Context)(f: Frame):
+ ContextOrFrameType {
+ return UnsafeCast<ContextOrFrameType>(
+ LoadObjectFromFrame(f, kStandardFrameContextOrFrameTypeOffset));
+}
+
+const kArgumentsAdaptorFrameLengthOffset: constexpr int31
+ generates 'ArgumentsAdaptorFrameConstants::kLengthOffset';
+operator '.length'
+macro LoadLengthFromAdapterFrame(implicit context: Context)(
+ f: ArgumentsAdaptorFrame): Smi {
+ return LoadSmiFromFrame(f, kArgumentsAdaptorFrameLengthOffset);
+}
+
+operator '==' macro FrameTypeEquals(f1: FrameType, f2: FrameType): bool {
+ return WordEqual(f1, f2);
+}
+
+macro Cast<A: type>(implicit context: Context)(o: Frame): A labels CastError;
+Cast<StandardFrame>(implicit context: Context)(f: Frame):
+ StandardFrame labels CastError {
+ const o: HeapObject =
+ Cast<HeapObject>(f.context_or_frame_type) otherwise CastError;
+ // StandardFrames (which include interpreted and JIT-compiled frames),
+ // unlike other frame types, don't have their own type marker stored in
+ // the frame, but rather have the function's context stored where the
+ // type marker is stored for other frame types. From Torque, it would
+ // be quite expensive to do the test required to distinguish interpreter
+ // frames from JITted ones (and other StandardFrame types), so
+ // StandardFrame is the level of granularity support when iterating the
+ // stack from generated code.
+ // See the descriptions and frame layouts in src/frame-constants.h.
+ if (IsContext(o)) {
+ return %RawPointerCast<StandardFrame>(f);
+ }
+ goto CastError;
+}
+
+Cast<ArgumentsAdaptorFrame>(implicit context: Context)(f: Frame):
+ ArgumentsAdaptorFrame labels CastError {
+ const t: FrameType =
+ Cast<FrameType>(f.context_or_frame_type) otherwise CastError;
+ if (t == ARGUMENTS_ADAPTOR_FRAME) {
+ return %RawPointerCast<ArgumentsAdaptorFrame>(f);
+ }
+ goto CastError;
+}
+
+// Load target function from the current JS frame.
+// This is an alternative way of getting the target function in addition to
+// Parameter(Descriptor::kJSTarget). The latter should be used near the
+// beginning of builtin code while the target value is still in the register
+// and the former should be used in slow paths in order to reduce register
+// pressure on the fast path.
+macro LoadTargetFromFrame(): JSFunction {
+ return LoadFramePointer().function;
+}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index 3a155e26f9..7af40748b5 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -56,9 +56,8 @@ TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
}
TNode<Smi> const result_length = SmiTag(length());
- TNode<JSArray> const result =
- CAST(AllocateUninitializedJSArrayWithoutElements(array_map, result_length,
- nullptr));
+ TNode<JSArray> const result = AllocateUninitializedJSArrayWithoutElements(
+ array_map, result_length, nullptr);
StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 063daed5e3..8e70a4cd0a 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -4,14 +4,22 @@
#if V8_TARGET_ARCH_IA32
+#include "src/api-arguments.h"
#include "src/base/adapters.h"
#include "src/code-factory.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -22,9 +30,8 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
- __ mov(kJavaScriptCallExtraArg1Register,
- Immediate(ExternalReference::Create(address)));
+ __ Move(kJavaScriptCallExtraArg1Register,
+ Immediate(ExternalReference::Create(address)));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -64,14 +71,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
+ __ JumpCodeObject(ecx);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -139,9 +144,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ ret(0);
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow,
- bool include_receiver = false) {
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow,
+ bool include_receiver = false) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -149,7 +154,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
ExternalReference::address_of_real_stack_limit(masm->isolate());
// Compute the space that is left as a negative number in scratch. If
// we already overflowed, this will be a positive number.
- __ mov(scratch, __ StaticVariable(real_stack_limit));
+ __ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
__ sub(scratch, esp);
// Add the size of the arguments.
static_assert(kPointerSize == 4,
@@ -167,7 +172,6 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// The construct stub for ES5 constructor functions and ES6 class constructors.
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax: number of arguments (untagged)
// -- edi: constructor function
@@ -204,7 +208,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ eax);
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
RelocInfo::CODE_TARGET);
__ jmp(&post_instantiation_deopt_entry, Label::kNear);
@@ -354,21 +359,149 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edi);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** argv)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+ Label not_outermost_js, not_outermost_js_2;
+
+ { // NOLINT. Scope block confuses linter.
+ NoRootArrayScope uninitialized_root_register(masm);
+
+ // Set up frame.
+ __ push(ebp);
+ __ mov(ebp, esp);
+
+ // Push marker in two places.
+ __ push(Immediate(StackFrame::TypeToMarker(type)));
+ // Reserve a slot for the context. It is filled after the root register has
+ // been set up.
+ __ sub(esp, Immediate(kPointerSize));
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Initialize the root register based on the given Isolate* argument.
+ // C calling convention. The first argument is passed on the stack.
+ __ mov(kRootRegister,
+ Operand(ebp, EntryFrameConstants::kRootRegisterValueOffset));
+ }
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ push(__ ExternalReferenceAsOperand(c_entry_fp, edi));
+
+ // Store the context address in the previously-reserved slot.
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ mov(edi, __ ExternalReferenceAsOperand(context_address, edi));
+ static constexpr int kOffsetToContextSlot = -2 * kPointerSize;
+ __ mov(Operand(ebp, kOffsetToContextSlot), edi);
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ cmp(__ ExternalReferenceAsOperand(js_entry_sp, edi), Immediate(0));
+ __ j(not_equal, &not_outermost_js, Label::kNear);
+ __ mov(__ ExternalReferenceAsOperand(js_entry_sp, edi), ebp);
+ __ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ jmp(&invoke, Label::kNear);
+ __ bind(&not_outermost_js);
+ __ push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ mov(__ ExternalReferenceAsOperand(pending_exception, edi), eax);
+ __ Move(eax, masm->isolate()->factory()->exception());
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler(edi);
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler(edi);
+
+ __ bind(&exit);
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(edi);
+ __ cmp(edi, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ j(not_equal, &not_outermost_js_2);
+ __ mov(__ ExternalReferenceAsOperand(js_entry_sp, edi), Immediate(0));
+ __ bind(&not_outermost_js_2);
+
+ // Restore the top frame descriptor from the stack.
+ __ pop(__ ExternalReferenceAsOperand(c_entry_fp, edi));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -378,7 +511,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
- __ mov(esi, __ StaticVariable(context_address));
+ __ mov(esi, __ ExternalReferenceAsOperand(context_address, scratch1));
// Load the previous frame pointer (edx) to access C arguments
__ mov(scratch1, Operand(ebp, 0));
@@ -438,15 +571,22 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // This expects two C++ function parameters passed by Invoke() in
+ // execution.cc.
+ // r1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(),
+ Operand(ebp, EntryFrameConstants::kMicrotaskQueueArgOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Register sfi_data,
Register scratch1) {
@@ -462,8 +602,6 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : the value to pass to the generator
// -- edx : the JSGeneratorObject to resume
@@ -485,20 +623,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Label stepping_prepared;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ cmpb(__ StaticVariable(debug_hook), Immediate(0));
+ __ cmpb(__ ExternalReferenceAsOperand(debug_hook, ecx), Immediate(0));
__ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ cmp(edx, __ StaticVariable(debug_suspended_generator));
+ __ cmp(edx, __ ExternalReferenceAsOperand(debug_suspended_generator, ecx));
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit);
+ __ CompareRealStackLimit(esp);
__ j(below, &stack_overflow);
// Pop return address.
@@ -516,7 +654,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
{
- Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
__ movd(xmm0, ebx);
// Copy the function arguments from the generator object's register file.
@@ -567,8 +704,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(ecx);
+ __ JumpCodeObject(ecx);
}
__ bind(&prepare_step_in_if_stepping);
@@ -652,17 +788,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// -- eax : argument count (preserved for callee if needed, and caller)
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
+ // -- ecx : feedback vector (also used as scratch, value is not preserved)
// -----------------------------------
DCHECK(!AreAliased(eax, edx, edi, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi;
- // Load the feedback vector from the closure.
+ // Scratch contains feedback_vector.
Register feedback_vector = scratch;
- __ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
@@ -682,6 +816,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
@@ -729,8 +866,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ Move(ecx, optimized_code_entry);
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ pop(eax);
__ jmp(ecx);
@@ -822,19 +958,32 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = edi;
- __ VerifyRootRegister();
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, ecx, eax);
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
+ __ j(not_equal, &compile_lazy);
- Register closure = edi;
+ Register feedback_vector = ecx;
+ Label push_stack_frame;
+ // Load feedback vector and check if it is valid. If valid, check for
+ // optimized code and update invocation count. Otherwise, setup the stack
+ // frame.
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, RootIndex::kUndefinedValue, &push_stack_frame);
// Read off the optimized code slot in the closure's feedback vector, and if
// there is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, ecx);
// Load the feedback vector and increment the invocation count.
- Register feedback_vector = ecx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
@@ -843,6 +992,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set
// up the frame (that is done below).
+ __ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@@ -854,9 +1004,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
- __ Push(eax);
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
- __ Pop(eax);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -889,9 +1037,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label ok;
__ mov(eax, esp);
__ sub(eax, frame_size);
- ExternalReference stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ cmp(eax, __ StaticVariable(stack_limit));
+ __ CompareRealStackLimit(eax);
__ j(above_equal, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -899,7 +1045,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ Move(eax, masm->isolate()->factory()->undefined_value());
__ jmp(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
@@ -930,15 +1076,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
+ __ Move(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
__ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, ecx, times_pointer_size, 0));
- __ VerifyRootRegister();
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -962,15 +1107,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in eax.
LeaveInterpreterFrame(masm, edx, ecx);
- __ VerifyRootRegister();
__ ret(0);
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ int3(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit,
Register start_address) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- start_address : Pointer to the last argument in the args array.
// -- array_limit : Pointer to one before the first argument in the
@@ -990,7 +1137,6 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
@@ -1061,7 +1207,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
MacroAssembler* masm, Register num_args, Register start_addr,
Register scratch1, Register scratch2, int num_slots_to_move,
Label* stack_overflow) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// We have to move return address and the temporary registers above it
// before we can copy arguments onto the stack. To achieve this:
// Step 1: Increment the stack pointer by num_args + 1 (for receiver).
@@ -1121,7 +1266,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// static
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ecx : the address of the first argument to be pushed. Subsequent
@@ -1156,10 +1300,11 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Pop(kJavaScriptCallNewTargetRegister);
__ Pop(kJavaScriptCallTargetRegister);
__ PushReturnAddressFrom(eax);
- __ movd(eax, xmm0); // Reload number of arguments.
__ AssertFunction(kJavaScriptCallTargetRegister);
- __ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register);
+ __ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register, eax);
+
+ __ movd(eax, xmm0); // Reload number of arguments.
__ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1195,14 +1340,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
static constexpr Register scratch = ecx;
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ mov(scratch, Operand(ebp, StandardFrameConstants::kFunctionOffset));
__ mov(scratch, FieldOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
__ mov(scratch,
@@ -1213,21 +1360,26 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ mov(scratch,
FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset));
+ __ add(scratch, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
- __ Move(scratch, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ mov(scratch,
+ __ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()),
+ scratch));
__ bind(&trampoline_loaded);
__ Pop(eax);
- __ add(scratch, Immediate(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, Immediate(interpreter_entry_return_pc_offset->value()));
__ push(scratch);
// Initialize the dispatch table register.
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
+ __ Move(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the bytecode array pointer from the frame.
__ mov(kInterpreterBytecodeArrayRegister,
@@ -1258,8 +1410,6 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1286,13 +1436,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
// -- edx : new target (preserved for callee)
@@ -1361,21 +1508,14 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(ecx);
+ __ JumpCodeObject(ecx);
}
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Fold into Default config once root is fully supported.
- const RegisterConfiguration* config(
- RegisterConfiguration::PreserveRootIA32());
-#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
-#endif
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1405,42 +1545,24 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Remove the ifdef once root is preserved by default.
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_ContinueToBuiltinHelper(masm, false, false);
}
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) {
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Remove the ifdef once root is preserved by default.
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_ContinueToBuiltinHelper(masm, false, true);
}
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Remove the ifdef once root is preserved by default.
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_ContinueToBuiltinHelper(masm, true, false);
}
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) {
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Remove the ifdef once root is preserved by default.
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
Generate_ContinueToBuiltinHelper(masm, true, true);
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kNotifyDeoptimized);
@@ -1454,8 +1576,6 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1524,8 +1644,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -1571,8 +1689,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1629,8 +1745,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1694,8 +1808,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
@@ -1758,7 +1870,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- edi : target
// -- esi : context for the Call / Construct builtin
@@ -1775,7 +1886,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ movd(xmm2, eax);
__ movd(xmm3, esi); // Spill the context.
- // TODO(v8:6666): Remove this usage of ebx to enable kRootRegister support.
const Register kArgumentsList = esi;
const Register kArgumentsLength = ecx;
@@ -1850,7 +1960,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object)
@@ -1943,8 +2052,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -2052,7 +2159,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : new.target (only in case of [[Construct]])
@@ -2085,7 +2191,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit);
+ __ CompareRealStackLimit(esp);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ lea(esp, Operand(esp, edx, times_pointer_size, 0));
@@ -2142,7 +2248,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
@@ -2164,23 +2269,28 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_function, non_smi, non_jsfunction,
+ non_jsboundfunction;
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET);
+ __ j(not_equal, &non_jsfunction);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_jsfunction);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, BUILTIN_CODE(masm->isolate(), CallBoundFunction),
- RelocInfo::CODE_TARGET);
+ __ j(not_equal, &non_jsboundfunction);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
+ RelocInfo::CODE_TARGET);
// Check if target is a proxy and call CallProxy external builtin
+ __ bind(&non_jsboundfunction);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(Map::IsCallableBit::kMask));
__ j(zero, &non_callable);
@@ -2212,7 +2322,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target (checked to be a constructor)
@@ -2245,7 +2354,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target (checked to be a constructor)
@@ -2281,27 +2389,31 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -----------------------------------
// Check if target is a Smi.
- Label non_constructor, non_proxy;
- __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+ Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
+ __ JumpIfSmi(edi, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
- __ j(zero, &non_constructor, Label::kNear);
+ __ j(zero, &non_constructor);
// Dispatch based on instance type.
__ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET);
+ __ j(not_equal, &non_jsfunction);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET);
// Only dispatch to bound functions after checking whether they are
// constructors.
+ __ bind(&non_jsfunction);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
- RelocInfo::CODE_TARGET);
+ __ j(not_equal, &non_jsboundfunction);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
+ RelocInfo::CODE_TARGET);
// Only dispatch to proxies after checking whether they are constructors.
+ __ bind(&non_jsboundfunction);
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
@@ -2333,14 +2445,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- edi : function (passed through to callee)
// -----------------------------------
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
const Register kExpectedNumberOfArgumentsRegister = ecx;
- Label invoke, dont_adapt_arguments, stack_overflow;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
+ Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
__ cmp(kExpectedNumberOfArgumentsRegister,
SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
@@ -2403,7 +2510,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ bind(&fill);
__ inc(eax);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ Push(Immediate(masm->isolate()->factory()->undefined_value()));
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &fill);
@@ -2420,8 +2527,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// edi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(ecx);
+ __ CallCodeObject(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2436,8 +2542,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(ecx);
+ __ JumpCodeObject(ecx);
__ bind(&stack_overflow);
{
@@ -2448,8 +2553,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
@@ -2493,11 +2596,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
- __ SmiTag(edi);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2505,7 +2606,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
arraysize(wasm::kGpParamRegisters),
"frame size mismatch");
@@ -2525,17 +2625,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
- __ Push(edi);
+ __ Push(kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(kContextRegister, Smi::kZero);
+ __ Move(kContextRegister, Smi::zero());
{
// At this point, ebx has been spilled to the stack but is not yet
// overwritten with another value. We can still use it as kRootRegister.
- Assembler::SupportsRootRegisterScope root_is_unclobbered(masm);
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx);
}
// The entrypoint address is the return value.
@@ -2569,11 +2668,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument
-#ifdef V8_EMBEDDED_BUILTINS
- // TODO(v8:6666): Remove the ifdef once branch load poisoning is removed.
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-#endif
-
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
@@ -2584,8 +2678,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
kRuntimeCallFunctionRegister, kContextRegister,
kJSFunctionRegister, kRootRegister));
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
// Reserve space on the stack for the three arguments passed to the call. If
// result size is greater than can be returned in registers, also reserve
// space for the hidden argument for the result location, and space for the
@@ -2596,7 +2688,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (argv_mode == kArgvInRegister) {
DCHECK(save_doubles == kDontSaveFPRegs);
DCHECK(!builtin_exit_frame);
- __ EnterApiExitFrame(arg_stack_space);
+ __ EnterApiExitFrame(arg_stack_space, edi);
// Move argc and argv into the correct registers.
__ mov(esi, ecx);
@@ -2622,26 +2714,26 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(ecx, Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx);
__ call(kRuntimeCallFunctionRegister);
// Result is in eax or edx:eax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
- __ cmp(eax, masm->isolate()->factory()->exception());
+ __ CompareRoot(eax, RootIndex::kException);
__ j(equal, &exception_returned);
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
__ push(edx);
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ LoadRoot(edx, RootIndex::kTheHoleValue);
Label okay;
ExternalReference pending_exception_address = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
- __ cmp(edx, __ StaticVariable(pending_exception_address));
+ __ cmp(edx, __ ExternalReferenceAsOperand(pending_exception_address, ecx));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
__ int3();
@@ -2675,15 +2767,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ PrepareCallCFunction(3, eax);
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ Move(esi,
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 2 * kPointerSize), esi);
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
- __ mov(esi, __ StaticVariable(pending_handler_context_address));
- __ mov(esp, __ StaticVariable(pending_handler_sp_address));
- __ mov(ebp, __ StaticVariable(pending_handler_fp_address));
+ __ mov(esp, __ ExternalReferenceAsOperand(pending_handler_sp_address, esi));
+ __ mov(ebp, __ ExternalReferenceAsOperand(pending_handler_fp_address, esi));
+ __ mov(esi,
+ __ ExternalReferenceAsOperand(pending_handler_context_address, esi));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (esi == 0) for non-JS frames.
@@ -2693,25 +2787,13 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
-#ifdef V8_EMBEDDED_BUILTINS
- STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister);
- CHECK(!FLAG_untrusted_code_mitigations);
-#else
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-#endif
-
// Compute the handler entry address and jump to it.
- __ mov(edi, __ StaticVariable(pending_handler_entrypoint_address));
+ __ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
+ edi));
__ jmp(edi);
}
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
@@ -2725,7 +2807,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
MemOperand return_operand = mantissa_operand;
Register scratch1 = ebx;
- Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm);
// Since we must use ecx for shifts below, use some other register (eax)
// to calculate the result.
@@ -2806,8 +2887,6 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
const Register exponent = eax;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
@@ -2938,56 +3017,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ ret(0);
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
-
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ mov(ecx, Operand(esp, kPointerSize));
- __ test(ecx, ecx);
- __ j(zero, &normal_sequence);
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET);
- }
-
- __ bind(&normal_sequence);
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&not_one_case);
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ mov(kJavaScriptCallExtraArg1Register,
- masm->isolate()->factory()->undefined_value());
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// ----------- S t a t e -------------
// -- eax : argc
// -- edi : constructor
@@ -3006,35 +3036,805 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
- // Figure out the right elements kind
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(ecx);
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(ecx);
- if (FLAG_debug_code) {
- Label done;
+ // Initial elements kind should be packed elements.
__ cmp(ecx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &done);
- __ cmp(ecx, Immediate(HOLEY_ELEMENTS));
- __ Assert(
- equal,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
+ __ Assert(equal, AbortReason::kInvalidElementsKindForInternalPackedArray);
+
+ // No arguments should be passed.
+ __ test(eax, eax);
+ __ Assert(zero, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
+ }
+
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
+
+namespace {
+
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
+}
+
+// Prepares stack to put arguments (aligns and so on). Reserves
+// space for return value if needed (assumes the return value is a handle).
+// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+// etc. Saves context (esi). If space was reserved for return value then
+// stores the pointer to the reserved slot into esi.
+void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
+ __ EnterApiExitFrame(argc, scratch);
+ if (__ emit_debug_code()) {
+ __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
+ }
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Clobbers esi, edi and
+// caller-save registers. Restores context. On return removes
+// stack_space * kPointerSize (GCed).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg, int stack_space,
+ Operand* stack_space_operand,
+ Operand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address(isolate);
+ ExternalReference level_address =
+ ExternalReference::handle_scope_level_address(isolate);
+
+ DCHECK(edx == function_address);
+ // Allocate HandleScope in callee-save registers.
+ __ add(__ ExternalReferenceAsOperand(level_address, esi), Immediate(1));
+ __ mov(esi, __ ExternalReferenceAsOperand(next_address, esi));
+ __ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ Move(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
+ __ cmpb(Operand(eax, 0), Immediate(0));
+ __ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ __ mov(thunk_last_arg, function_address);
+ // Call the api function.
+ __ Move(eax, Immediate(thunk_ref));
+ __ call(eax);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ // Call the api function.
+ __ call(function_address);
+ __ bind(&end_profiler_check);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ mov(eax, Immediate(ExternalReference::isolate_address(isolate)));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label prologue;
+ // Load the value from ReturnValue
+ __ mov(eax, return_value_operand);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ __ bind(&prologue);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ mov(__ ExternalReferenceAsOperand(next_address, ecx), esi);
+ __ sub(__ ExternalReferenceAsOperand(level_address, ecx), Immediate(1));
+ __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
+ __ cmp(edi, __ ExternalReferenceAsOperand(limit_address, ecx));
+ __ j(not_equal, &delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(stack_space, 0);
+ __ mov(edx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame();
+
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate);
+ __ mov(ecx, __ ExternalReferenceAsOperand(scheduled_exception_address, ecx));
+ __ CompareRoot(ecx, RootIndex::kTheHoleValue);
+ __ j(not_equal, &promote_scheduled_exception);
+
+#if DEBUG
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ __ JumpIfSmi(return_value, &ok, Label::kNear);
+ __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ __ CmpInstanceType(map, LAST_NAME_TYPE);
+ __ j(below_equal, &ok, Label::kNear);
+
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+ __ j(above_equal, &ok, Label::kNear);
+
+ __ CompareRoot(map, RootIndex::kHeapNumberMap);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kUndefinedValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kTrueValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kFalseValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kNullValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
+
+ __ bind(&ok);
+#endif
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ ret(stack_space * kPointerSize);
+ } else {
+ DCHECK_EQ(0, stack_space);
+ __ pop(ecx);
+ __ add(esp, edx);
+ __ jmp(ecx);
+ }
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions();
+ __ bind(&delete_allocated_handles);
+ __ mov(__ ExternalReferenceAsOperand(limit_address, ecx), edi);
+ __ mov(edi, eax);
+ __ Move(eax, Immediate(ExternalReference::isolate_address(isolate)));
+ __ mov(Operand(esp, 0), eax);
+ __ Move(eax, Immediate(delete_extensions));
+ __ call(eax);
+ __ mov(eax, edi);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esi : kTargetContext
+ // -- edx : kApiFunctionAddress
+ // -- ecx : kArgc
+ // --
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[argc * 4] : first argument
+ // -- esp[(argc + 1) * 4] : receiver
+ // -- esp[(argc + 2) * 4] : kHolder
+ // -- esp[(argc + 3) * 4] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = edx;
+ Register argc = ecx;
+ Register scratch = eax;
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = kPointerSize;
+ static constexpr int kHolderOffset = kReceiverOffset + kPointerSize;
+ static constexpr int kCallDataOffset = kHolderOffset + kPointerSize;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Current state:
+ // esp[0]: return address
+ //
+ // Target state:
+ // esp[0 * kPointerSize]: return address
+ // esp[1 * kPointerSize]: kHolder
+ // esp[2 * kPointerSize]: kIsolate
+ // esp[3 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // esp[4 * kPointerSize]: undefined (kReturnValue)
+ // esp[5 * kPointerSize]: kData
+ // esp[6 * kPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ sub(esp, Immediate(FCA::kArgsLength * kPointerSize));
+
+ // Return address (the old stack location is overwritten later on).
+ __ mov(scratch, Operand(esp, FCA::kArgsLength * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), scratch);
+
+ // kHolder.
+ __ mov(scratch, Operand(esp, argc, times_pointer_size,
+ FCA::kArgsLength * kPointerSize + kHolderOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), scratch);
+
+ // kIsolate.
+ __ Move(scratch,
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 2 * kPointerSize), scratch);
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ mov(Operand(esp, 3 * kPointerSize), scratch);
+ __ mov(Operand(esp, 4 * kPointerSize), scratch);
+ __ mov(Operand(esp, 6 * kPointerSize), scratch);
+
+ // kData.
+ __ mov(scratch, Operand(esp, argc, times_pointer_size,
+ FCA::kArgsLength * kPointerSize + kCallDataOffset));
+ __ mov(Operand(esp, 5 * kPointerSize), scratch);
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ lea(scratch, Operand(esp, 1 * kPointerSize));
+
+ // The API function takes a reference to v8::Arguments. If the CPU profiler
+ // is enabled, a wrapper function will be called and we need to pass
+ // the address of the callback as an additional parameter. Always allocate
+ // space for it.
+ static constexpr int kApiArgc = 1 + 1;
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+
+ PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace, edi);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ __ mov(ApiParameterOperand(kApiArgc + 0), scratch);
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ lea(scratch, Operand(scratch, argc, times_pointer_size,
+ (FCA::kArgsLength - 1) * kPointerSize));
+ __ mov(ApiParameterOperand(kApiArgc + 1), scratch);
+
+ // FunctionCallbackInfo::length_.
+ __ mov(ApiParameterOperand(kApiArgc + 2), argc);
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ __ lea(scratch,
+ Operand(argc, times_pointer_size,
+ (FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ __ mov(ApiParameterOperand(kApiArgc + 3), scratch);
+
+ // v8::InvocationCallback's argument.
+ __ lea(scratch, ApiParameterOperand(kApiArgc + 0));
+ __ mov(ApiParameterOperand(0), scratch);
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack:
+ // the stored ebp (pushed by EnterApiExitFrame), and the return address.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ Operand return_value_operand(
+ ebp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ ApiParameterOperand(1), kUseStackSpaceOperand,
+ &stack_space_operand, return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = edi;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ __ pop(scratch); // Pop return address to extend the frame.
+ __ push(receiver);
+ __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ PushRoot(RootIndex::kUndefinedValue); // ReturnValue
+ // ReturnValue default value
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ push(holder);
+ __ push(Immediate(Smi::zero())); // should_throw_on_error -> false
+ __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch); // Restore return address.
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::PropertyCallbackInfo object, arguments for callback and
+ // space for optional callback address parameter (in case CPU profiler is
+ // active) in non-GCed stack space.
+ const int kApiArgc = 3 + 1;
+
+ PrepareCallApiFunction(masm, kApiArgc, scratch);
+
+ // Load address of v8::PropertyAccessorInfo::args_ array. The value in ebp
+ // here corresponds to esp + kPointersize before PrepareCallApiFunction.
+ __ lea(scratch, Operand(ebp, kPointerSize + 2 * kPointerSize));
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ Operand info_object = ApiParameterOperand(3);
+ __ mov(info_object, scratch);
+
+ // Name as handle.
+ __ sub(scratch, Immediate(kPointerSize));
+ __ mov(ApiParameterOperand(0), scratch);
+ // Arguments pointer.
+ __ lea(scratch, info_object);
+ __ mov(ApiParameterOperand(1), scratch);
+ // Reserve space for optional callback address parameter.
+ Operand thunk_last_arg = ApiParameterOperand(2);
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ Register function_address = edx;
+ __ mov(function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
+ // +3 is to skip prolog, return address and name handle.
+ Operand return_value_operand(
+ ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ Operand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ __ int3(); // Unused on this architecture.
+}
+
+namespace {
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
+
+// Expects registers:
+// esi - source, aligned if alignment == ALIGNED
+// edi - destination, always aligned
+// ecx - count (copy size in bytes)
+// edx - loop count (number of 64 byte chunks)
+void MemMoveEmitMainLoop(MacroAssembler* masm, Label* move_last_15,
+ Direction direction, Alignment alignment) {
+ Register src = esi;
+ Register dst = edi;
+ Register count = ecx;
+ Register loop_count = edx;
+ Label loop, move_last_31, move_last_63;
+ __ cmp(loop_count, 0);
+ __ j(equal, &move_last_63);
+ __ bind(&loop);
+ // Main loop. Copy in 64 byte chunks.
+ if (direction == BACKWARD) __ sub(src, Immediate(0x40));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
+ if (direction == FORWARD) __ add(src, Immediate(0x40));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ movdqa(Operand(dst, 0x20), xmm2);
+ __ movdqa(Operand(dst, 0x30), xmm3);
+ if (direction == FORWARD) __ add(dst, Immediate(0x40));
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ // At most 63 bytes left to copy.
+ __ bind(&move_last_63);
+ __ test(count, Immediate(0x20));
+ __ j(zero, &move_last_31);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ if (direction == FORWARD) __ add(src, Immediate(0x20));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ if (direction == FORWARD) __ add(dst, Immediate(0x20));
+ // At most 31 bytes left to copy.
+ __ bind(&move_last_31);
+ __ test(count, Immediate(0x10));
+ __ j(zero, move_last_15);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
+ if (direction == FORWARD) __ add(src, Immediate(0x10));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ if (direction == FORWARD) __ add(dst, Immediate(0x10));
+}
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_MemMove(MacroAssembler* masm) {
+ // Generated code is put into a fixed, unmovable buffer, and not into
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
+ // (e.g. the JavaScript nan-object).
+
+ // 32-bit C declaration function calls pass arguments on stack.
+
+ // Stack layout:
+ // esp[12]: Third argument, size.
+ // esp[8]: Second argument, source pointer.
+ // esp[4]: First argument, destination pointer.
+ // esp[0]: return address
+
+ const int kDestinationOffset = 1 * kPointerSize;
+ const int kSourceOffset = 2 * kPointerSize;
+ const int kSizeOffset = 3 * kPointerSize;
+
+ // When copying up to this many bytes, use special "small" handlers.
+ const size_t kSmallCopySize = 8;
+ // When copying up to this many bytes, use special "medium" handlers.
+ const size_t kMediumCopySize = 63;
+ // When non-overlapping region of src and dst is less than this,
+ // use a more careful implementation (slightly slower).
+ const size_t kMinMoveDistance = 16;
+ // Note that these values are dictated by the implementation below,
+ // do not just change them and hope things will work!
+
+ int stack_offset = 0; // Update if we change the stack height.
+
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ Register loop_count = edx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
+ __ prefetch(Operand(src, 0), 1);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ cmp(count, kMediumCopySize);
+ __ j(below_equal, &medium_size);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+
+ {
+ // |dst| is a lower address than |src|. Copy front-to-back.
+ Label unaligned_source, move_last_15, skip_last_move;
+ __ mov(eax, src);
+ __ sub(eax, dst);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &forward_much_overlap);
+ // Copy first 16 bytes.
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ // Determine distance to alignment: 16 - (dst & 0xF).
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ bind(&move_last_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(masm);
+
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
+ __ jmp(&move_last_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, last_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ mov_b(eax, Operand(src, 0));
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ dec(count);
+ __ bind(&forward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(masm, &last_15_much_overlap, FORWARD, MOVE_UNALIGNED);
+ __ bind(&last_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
}
- Label fast_elements_case;
- __ cmp(ecx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ {
+ // |dst| is a higher address than |src|. Copy backwards.
+ Label unaligned_source, move_first_15, skip_last_move;
+ __ bind(&backward);
+ // |dst| and |src| always point to the end of what's left to copy.
+ __ add(dst, count);
+ __ add(src, count);
+ __ mov(eax, dst);
+ __ sub(eax, src);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &backward_much_overlap);
+ // Copy last 16 bytes.
+ __ movdqu(xmm0, Operand(src, -0x10));
+ __ movdqu(Operand(dst, -0x10), xmm0);
+ // Find distance to alignment: dst & 0xF
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ sub(dst, edx);
+ __ sub(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
+ __ bind(&move_first_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ sub(src, count);
+ __ sub(dst, count);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(masm);
+
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
+ __ jmp(&move_first_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, first_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ dec(src);
+ __ dec(dst);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ __ dec(count);
+ __ bind(&backward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(masm, &first_15_much_overlap, BACKWARD, MOVE_UNALIGNED);
+ __ bind(&first_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ // Small/medium handlers expect dst/src to point to the beginning.
+ __ sub(dst, count);
+ __ sub(src, count);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
+ {
+ // Special handlers for 9 <= copy_size < 64. No assumptions about
+ // alignment or move distance, so all reads must be unaligned and
+ // must happen before any writes.
+ Label f9_16, f17_32, f33_48, f49_63;
+
+ __ bind(&f9_16);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f17_32);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f33_48);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f49_63);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, 0x20));
+ __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, 0x20), xmm2);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&medium_size); // Entry point into this block.
+ __ mov(eax, count);
+ __ dec(eax);
+ __ shr(eax, 4);
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(eax, 3);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+
+ // Dispatch to handlers.
+ Label eax_is_2_or_3;
+
+ __ cmp(eax, 1);
+ __ j(greater, &eax_is_2_or_3);
+ __ j(less, &f9_16); // eax == 0.
+ __ jmp(&f17_32); // eax == 1.
+
+ __ bind(&eax_is_2_or_3);
+ __ cmp(eax, 3);
+ __ j(less, &f33_48); // eax == 2.
+ __ jmp(&f49_63); // eax == 3.
+ }
+ {
+ // Specialized copiers for copy_size <= 8 bytes.
+ Label f0, f1, f2, f3, f4, f5_8;
+ __ bind(&f0);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f1);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f2);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_w(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f3);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_b(edx, Operand(src, 2));
+ __ mov_w(Operand(dst, 0), eax);
+ __ mov_b(Operand(dst, 2), edx);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f4);
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&f5_8);
+ __ mov(eax, Operand(src, 0));
+ __ mov(edx, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, 0), eax);
+ __ mov(Operand(dst, count, times_1, -4), edx);
+ MemMoveEmitPopAndReturn(masm);
+
+ __ bind(&small_size); // Entry point into this block.
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(count, 8);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+
+ // Dispatch to handlers.
+ Label count_is_above_3, count_is_2_or_3;
+
+ __ cmp(count, 3);
+ __ j(greater, &count_is_above_3);
+
+ __ cmp(count, 1);
+ __ j(greater, &count_is_2_or_3);
+ __ j(less, &f0); // count == 0.
+ __ jmp(&f1); // count == 1.
+
+ __ bind(&count_is_2_or_3);
+ __ cmp(count, 3);
+ __ j(less, &f2); // count == 2.
+ __ jmp(&f3); // count == 3.
+
+ __ bind(&count_is_above_3);
+ __ cmp(count, 5);
+ __ j(less, &f4); // count == 4.
+ __ jmp(&f5_8); // count in [5, 8[.
+ }
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(masm);
}
#undef __
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
new file mode 100644
index 0000000000..5c9439dfc7
--- /dev/null
+++ b/deps/v8/src/builtins/iterator.tq
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-iterator-gen.h'
+
+namespace iterator {
+ // Returned from IteratorBuiltinsAssembler::GetIterator().
+ struct IteratorRecord {
+ // iteratorRecord.[[Iterator]]
+ object: JSReceiver;
+
+ // iteratorRecord.[[NextMethod]]
+ next: Object;
+ }
+
+ extern macro IteratorBuiltinsAssembler::GetIteratorMethod(
+ implicit context: Context)(Object): Object;
+ extern macro IteratorBuiltinsAssembler::GetIterator(
+ implicit context: Context)(Object): IteratorRecord;
+
+ extern macro IteratorBuiltinsAssembler::IteratorStep(
+ implicit context: Context)(IteratorRecord): Object
+ labels Done;
+ extern macro IteratorBuiltinsAssembler::IteratorStep(
+ implicit context: Context)(IteratorRecord, Map): Object
+ labels Done;
+
+ extern macro IteratorBuiltinsAssembler::IteratorValue(
+ implicit context: Context)(Object): Object;
+ extern macro IteratorBuiltinsAssembler::IteratorValue(
+ implicit context: Context)(Object, Map): Object;
+
+ extern macro IteratorBuiltinsAssembler::IteratorCloseOnException(
+ implicit context: Context)(IteratorRecord, Object): never;
+
+ extern macro IteratorBuiltinsAssembler::IterableToList(
+ implicit context: Context)(Object, Object): JSArray;
+
+ extern builtin IterableToListMayPreserveHoles(implicit context:
+ Context)(Object, Object);
+ extern builtin IterableToListWithSymbolLookup(implicit context:
+ Context)(Object);
+}
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index c653ce404d..b455d9ef29 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
+arikalo@wavecomp.com
+prudic@wavecomp.com
skovacevic@wavecomp.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index a2a335c70f..558e6495f1 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -4,16 +4,22 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/mips/constants-mips.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -154,6 +160,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ subu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ sll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -240,6 +262,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ Branch(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ mov(t3, a0);
@@ -335,20 +370,21 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(a2, RootIndex::kRealStackLimit);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
- __ Subu(a2, sp, a2);
+ __ Subu(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ sll(t3, argc, kPointerSizeLog2);
+ __ sll(scratch2, argc, kPointerSizeLog2);
// Signed comparison.
- __ Branch(&okay, gt, a2, Operand(t3));
+ __ Branch(&okay, gt, scratch1, Operand(scratch2));
// Out of stack space.
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -356,16 +392,196 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
__ bind(&okay);
}
+namespace {
+
+// Used by JSEntryTrampoline to refer C++ parameter to JSEntryVariant.
+constexpr int kPushedStackSpace =
+ kCArgsSlotsSize + (kNumCalleeSaved + 1) * kPointerSize +
+ kNumCalleeSavedFPU * kDoubleSize + 4 * kPointerSize +
+ EntryFrameConstants::kCallerFPOffset;
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** argv)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+//
+// Passes through a0, a1, a2, a3 and stack to JSEntryTrampoline.
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ int pushed_stack_space = kCArgsSlotsSize;
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Registers:
+ // a0: root_register_value
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+ pushed_stack_space +=
+ kNumCalleeSaved * kPointerSize + kPointerSize /* ra */;
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ pushed_stack_space += kNumCalleeSavedFPU * kDoubleSize;
+
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ mov(kRootRegister, a0);
+ }
+
+ // We build an EntryFrame.
+ __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(t2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(t1, Operand(StackFrame::TypeToMarker(type)));
+ __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ lw(t0, MemOperand(t0));
+ __ Push(t3, t2, t1, t0);
+ pushed_stack_space += 4 * kPointerSize;
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+ pushed_stack_space += EntryFrameConstants::kCallerFPOffset;
+
+ // Registers:
+ // a0: root_register_value
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // 4 args slots
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(t1, js_entry_sp);
+ __ lw(t2, MemOperand(t1));
+ __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+ __ sw(fp, MemOperand(t1));
+ __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(t0);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(t0, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
+ __ LoadRoot(v0, RootIndex::kException);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Preserve a1, a2 and a3 passed by C++ and pass them to the trampoline.
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // v0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(t1);
+ __ Branch(&non_outermost_js_2, ne, t1,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(t1, ExternalReference(js_entry_sp));
+ __ sw(zero_reg, MemOperand(t1));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(t1);
+ __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ sw(t1, MemOperand(t0));
+
+ // Reset the stack to the callee saved registers.
+ __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// ----------- S t a t e -------------
- // -- a0: new.target
- // -- a1: function
- // -- a2: receiver_pointer
- // -- a3: argc
- // -- s0: argv
+ // -- a0: root_register_value (unused)
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- [fp + kPushedStackSpace + 0 * kPointerSize]: argc
+ // -- [fp + kPushedStackSpace + 1 * kPointerSize]: argv
// -----------------------------------
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter an internal frame.
{
@@ -378,20 +594,31 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ lw(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
- __ Push(a1, a2);
+ __ Push(a2, a3);
- // Check if we have enough stack space to push all arguments.
- // Clobbers a2.
- Generate_CheckStackOverflow(masm, a3);
+ __ mov(a3, a1);
+ __ mov(a1, a2);
- // Remember new.target.
- __ mov(t1, a0);
+ __ lw(s0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0,
+ MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
+ __ lw(s0,
+ MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ // s0: argv
+
+ // Check if we have enough stack space to push all arguments.
+ // Clobbers a2 and t0.
+ Generate_CheckStackOverflow(masm, a0, a2, t0);
// Copy arguments to the stack in a loop.
- // a3: argc
+ // a0: argc
// s0: argv, i.e. points to first arg
Label loop, entry;
- __ Lsa(t2, s0, a3, kPointerSizeLog2);
+ __ Lsa(t2, s0, a0, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
@@ -403,13 +630,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(t2));
- // Setup new.target and argc.
- __ mov(a0, a3);
- __ mov(a3, t1);
+ // a0: argc
+ // a1: function
+ // a3: new.target
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(t0, RootIndex::kUndefinedValue);
+ __ mov(s0, t0);
__ mov(s1, t0);
__ mov(s2, t0);
__ mov(s3, t0);
@@ -438,6 +666,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Register sfi_data,
Register scratch1) {
@@ -786,11 +1020,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = a1;
Register feedback_vector = a2;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a0, a0);
+ __ Branch(&compile_lazy, ne, a0, Operand(BYTECODE_ARRAY_TYPE));
+
// Load the feedback vector from the closure.
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@@ -805,12 +1050,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
@@ -819,18 +1058,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
- __ Assert(ne,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
- t0, Operand(zero_reg));
- __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
- __ Assert(eq,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
- t0, Operand(BYTECODE_ARRAY_TYPE));
- }
-
// Reset code age.
DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
__ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@@ -924,23 +1151,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
-}
-
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ subu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ sll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1067,12 +1282,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
@@ -1082,14 +1299,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(INTERPRETER_DATA_TYPE));
__ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline);
- __ li(t0, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ lw(t0, MemOperand(t0));
__ bind(&trampoline_loaded);
- __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value()));
// Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
@@ -1302,7 +1522,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// If the code object is null, just return to the caller.
- __ Ret(eq, v0, Operand(Smi::kZero));
+ __ Ret(eq, v0, Operand(Smi::zero()));
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -1542,7 +1762,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Push(Smi::kZero); // Padding.
+ __ Push(Smi::zero()); // Padding.
__ Addu(fp, sp,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
@@ -2245,7 +2465,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call.
- __ SmiTag(t0);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2261,13 +2481,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as an explicit arguments to the runtime
// function.
- __ Push(kWasmInstanceRegister, t0);
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ lw(a2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(kContextRegister, Smi::kZero);
+ __ Move(kContextRegister, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, a2);
// Restore registers.
@@ -2291,8 +2511,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// a2: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
if (argv_mode == kArgvInRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
@@ -2326,40 +2544,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ li(a2, ExternalReference::isolate_address(masm->isolate()));
__ mov(a1, s1);
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntry is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- int kNumInstructionsToJump = 4;
- Label find_ra;
- // Adjust the value in ra to point to the correct return location, 2nd
- // instruction past the real call into C code (the jalr(t9)), and push it.
- // This is the return address of the exit frame.
- if (kArchVariant >= kMips32r6) {
- __ addiupc(ra, kNumInstructionsToJump + 1);
- } else {
- // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
- __ nal(); // nal has branch delay slot.
- __ Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
- }
- __ bind(&find_ra);
-
- // This spot was reserved in EnterExitFrame.
- __ sw(ra, MemOperand(sp));
- // Stack space reservation moved to the branch delay slot below.
- // Stack is still aligned.
-
- // Call the C routine.
- __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- __ jalr(t9);
- // Set up sp in the delay slot.
- __ addiu(sp, sp, -kCArgsSlotsSize);
- // Make sure the stored 'ra' points to this position.
- DCHECK_EQ(kNumInstructionsToJump,
- masm->InstructionsGeneratedSince(&find_ra));
- }
+ __ StoreReturnAddressAndCall(s2);
// Result returned in v0 or v1:v0 - do not destroy these registers!
@@ -2675,40 +2860,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
-
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET, lo, a0, Operand(1));
-
- __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
- RelocInfo::CODE_TARGET, hi, a0, Operand(1));
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument.
- __ lw(kScratchReg, MemOperand(sp, 0));
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -2730,33 +2881,901 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ GetObjectType(a3, a3, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
Operand(MAP_TYPE));
+
+ // Figure out the right elements kind.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(a3);
+
+ // Initial elements kind should be packed elements.
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray, a3,
+ Operand(PACKED_ELEMENTS));
+
+ // No arguments should be passed.
+ __ Assert(eq, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray,
+ a0, Operand(0));
}
- // Figure out the right elements kind.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
- // Load the map's "bit field 2" into a3. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(a3);
+namespace {
- if (FLAG_debug_code) {
- Label done;
- __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(HOLEY_ELEMENTS));
- __ bind(&done);
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ li(t9, ExternalReference::is_profiling_address(isolate));
+ __ lb(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ mov(t9, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ lw(s0, MemOperand(s5, kNextOffset));
+ __ lw(s1, MemOperand(s5, kLimitOffset));
+ __ lw(s2, MemOperand(s5, kLevelOffset));
+ __ Addu(s2, s2, Operand(1));
+ __ sw(s2, MemOperand(s5, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ __ StoreReturnAddressAndCall(t9);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ lw(v0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ sw(s0, MemOperand(s5, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ lw(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Subu(s2, s2, Operand(1));
+ __ sw(s2, MemOperand(s5, kLevelOffset));
+ __ lw(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s0, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ // The ExitFrame contains four MIPS argument slots after the call so this
+ // must be accounted for.
+ // TODO(jgruber): Investigate if this is needed by the direct call.
+ __ Drop(kCArgSlotCount);
+ __ lw(s0, *stack_space_operand);
}
- Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(t0, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ lw(t1, MemOperand(kScratchReg));
+ __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ sw(s1, MemOperand(s5, kLimitOffset));
+ __ mov(s0, v0);
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(v0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- a1 : kApiFunctionAddress
+ // -- a2 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 4] : first argument
+ // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc + 1) * 4] : kHolder
+ // -- sp[(argc + 2) * 4] : kCallData
+ // -----------------------------------
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch, base));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0 * kPointerSize;
+ static constexpr int kHolderOffset = kReceiverOffset + kPointerSize;
+ static constexpr int kCallDataOffset = kHolderOffset + kPointerSize;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ Lsa(base, sp, argc, kPointerSizeLog2);
+
+ // Reserve space on the stack.
+ __ Subu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ lw(scratch, MemOperand(base, kHolderOffset));
+ __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ sw(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ sw(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ lw(scratch, MemOperand(base, kCallDataOffset));
+ __ sw(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Subu(scratch, base, Operand(1 * kPointerSize));
+ __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ __ sw(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Addu(scratch, argc, Operand(FCA::kArgsLength + kExtraStackArgumentCount));
+ __ sw(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Addu(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = t0;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::kZero.ptr());
+ __ sw(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ lw(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ // Make place for arguments to fit C calling convention. Callers use
+ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
+ // have to do that here. Any caller must drop kCArgsSlotsSize stack space
+ // after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
+ __ Call(t9); // Call the C++ function.
+ __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
+
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ lw(t0, MemOperand(t9));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ }
+
+ __ Jump(t9);
+}
+
+void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
+ {
+ Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop,
+ skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref,
+ ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+ // The size of each prefetch.
+ uint32_t pref_chunk = 32;
+ // The maximum size of a prefetch, it must not be less than pref_chunk.
+ // If the real size of a prefetch is greater than max_pref_size and
+ // the kPrefHintPrepareForStore hint is used, the code will not work
+ // correctly.
+ uint32_t max_pref_size = 128;
+ DCHECK(pref_chunk < max_pref_size);
+
+ // pref_limit is set based on the fact that we never use an offset
+ // greater then 5 on a store pref and that a single pref can
+ // never be larger then max_pref_size.
+ uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+ int32_t pref_hint_load = kPrefHintLoadStreamed;
+ int32_t pref_hint_store = kPrefHintPrepareForStore;
+ uint32_t loadstore_chunk = 4;
+
+ // The initial prefetches may fetch bytes that are before the buffer being
+ // copied. Start copies with an offset of 4 so avoid this situation when
+ // using kPrefHintPrepareForStore.
+ DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
+ pref_chunk * 4 >= max_pref_size);
+
+ // If the size is less than 8, go to lastb. Regardless of size,
+ // copy dst pointer to v0 for the retuen value.
+ __ slti(t2, a2, 2 * loadstore_chunk);
+ __ bne(t2, zero_reg, &lastb);
+ __ mov(v0, a0); // In delay slot.
+
+ // If src and dst have different alignments, go to unaligned, if they
+ // have the same alignment (but are not actually aligned) do a partial
+ // load/store to make them aligned. If they are both already aligned
+ // we can start copying at aligned.
+ __ xor_(t8, a1, a0);
+ __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
+ __ bne(t8, zero_reg, &unaligned);
+ __ subu(a3, zero_reg, a0); // In delay slot.
+
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &aligned); // Already aligned.
+ __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swl(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+ // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+ // count how many bytes we have to copy after all the 64 byte chunks are
+ // copied and a3 to the dst pointer after all the 64 byte chunks have been
+ // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&aligned);
+ __ andi(t8, a2, 0x3F);
+ __ beq(a2, t8, &chkw); // Less than 64?
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
+
+ // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+ // in this case the a0+x should be past the "t0-32" address. This means:
+ // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
+ // x=64 the last "safe" a0 address is "t0-96". In the current version we
+ // will use "pref hint, 128(a0)", so "t0-160" is the limit.
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2); // t0 is the "past the end" address.
+ __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+ __ bind(&loop16w);
+ __ lw(t0, MemOperand(a1));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
+ __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&skip_pref);
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+
+ __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go.
+ // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+ // down to chk1w to handle the tail end of the copy.
+ __ bind(&chkw);
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ andi(t8, a2, 0x1F);
+ __ beq(a2, t8, &chk1w); // Less than 32?
+ __ nop(); // In delay slot.
+ __ lw(t0, MemOperand(a1));
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Here we have less than 32 bytes to copy. Set up for a loop to copy
+ // one word at a time. Set a2 to count how many bytes we have to copy
+ // after all the word chunks are copied and a3 to the dst pointer after
+ // all the word chunks have been copied. We will loop, incrementing a0
+ // and a1 until a0 equals a3.
+ __ bind(&chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &lastb);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&wordCopy_loop);
+ __ lw(t3, MemOperand(a1));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &wordCopy_loop);
+ __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ __ bind(&lastb);
+ __ Branch(&leave, le, a2, Operand(zero_reg));
+ __ addu(a3, a0, a2);
+
+ __ bind(&lastbloop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &lastbloop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ bind(&leave);
+ __ jr(ra);
+ __ nop();
+
+ // Unaligned case. Only the dst gets aligned so we need to do partial
+ // loads of the source followed by normal stores to the dst (once we
+ // have aligned the destination).
+ __ bind(&unaligned);
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &ua_chk16w);
+ __ subu(a2, a2, a3); // In delay slot.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swl(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+
+ // Now the dst (but not the source) is aligned. Set a2 to count how many
+ // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+ // the dst pointer after all the 64 byte chunks have been copied. We will
+ // loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&ua_chk16w);
+ __ andi(t8, a2, 0x3F);
+ __ beq(a2, t8, &ua_chkw);
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2);
+ __ Subu(t9, t0, pref_limit);
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+
+ __ bind(&ua_loop16w);
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &ua_loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here less than 64-bytes. Check for
+ // a 32 byte chunk and copy if there is one. Otherwise jump down to
+ // ua_chk1w to handle the tail end of the copy.
+ __ bind(&ua_chkw);
+ __ Pref(pref_hint_load, MemOperand(a1));
+ __ andi(t8, a2, 0x1F);
+
+ __ beq(a2, t8, &ua_chk1w);
+ __ nop(); // In delay slot.
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Less than 32 bytes to copy. Set up for a loop to
+ // copy one word at a time.
+ __ bind(&ua_chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &ua_smallCopy);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&ua_wordCopy_loop);
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &ua_wordCopy_loop);
+ __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ // Copy the last 8 bytes.
+ __ bind(&ua_smallCopy);
+ __ beq(a2, zero_reg, &leave);
+ __ addu(a3, a0, a2); // In delay slot.
+
+ __ bind(&ua_smallCopy_loop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &ua_smallCopy_loop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ jr(ra);
+ __ nop();
+ }
}
#undef __
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 4f1ba93a99..6826fef162 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -4,16 +4,22 @@
#if V8_TARGET_ARCH_MIPS64
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/mips64/constants-mips64.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -154,6 +160,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ dsubu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ dsll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -240,6 +262,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ Branch(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ mov(t3, a0);
@@ -481,19 +516,20 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(a2, RootIndex::kRealStackLimit);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
- __ dsubu(a2, sp, a2);
+ __ dsubu(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ dsll(a7, argc, kPointerSizeLog2);
- __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
+ __ dsll(scratch2, argc, kPointerSizeLog2);
+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
// Out of stack space.
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -501,16 +537,218 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
__ bind(&okay);
}
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // TODO(plind): unify the ABI description here.
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // 0 arg slots on mips64 (4 args slots on mips)
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ mov(kRootRegister, a0);
+ }
+
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+
+ // We build an EntryFrame.
+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(s2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(s3, Operand(StackFrame::TypeToMarker(type)));
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ li(s4, c_entry_fp);
+ __ Ld(s4, MemOperand(s4));
+ __ Push(s1, s2, s3, s4);
+ // Set up frame pointer for the frame to be pushed.
+ __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // either
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a1: microtask_queue
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(s1, js_entry_sp);
+ __ Ld(s2, MemOperand(s1));
+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
+ __ Sd(fp, MemOperand(s1));
+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(s3);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(s1, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ Sd(v0, MemOperand(s1)); // We come back from 'invoke'. result is in v0.
+ __ LoadRoot(v0, RootIndex::kException);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // v0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(a5);
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(a5, js_entry_sp);
+ __ Sd(zero_reg, MemOperand(a5));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(a5);
+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Sd(a5, MemOperand(a4));
+
+ // Reset the stack to the callee saved registers.
+ __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// ----------- S t a t e -------------
- // -- a0: new.target
- // -- a1: function
- // -- a2: receiver_pointer
- // -- a3: argc
- // -- s0: argv
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- a4: argc
+ // -- a5: argv
// -----------------------------------
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter an internal frame.
{
@@ -523,38 +761,46 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Ld(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
- __ Push(a1, a2);
+ __ Push(a2, a3);
// Check if we have enough stack space to push all arguments.
- // Clobbers a2.
- Generate_CheckStackOverflow(masm, a3);
+ // Clobbers a0 and a3.
+ Generate_CheckStackOverflow(masm, a4, a0, a3);
- // Remember new.target.
- __ mov(a5, a0);
+ // Setup new.target, function and argc.
+ __ mov(a3, a1);
+ __ mov(a1, a2);
+ __ mov(a0, a4);
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ // a5: argv
// Copy arguments to the stack in a loop.
// a3: argc
- // s0: argv, i.e. points to first arg
+ // a5: argv, i.e. points to first arg
Label loop, entry;
- __ Dlsa(a6, s0, a3, kPointerSizeLog2);
+ __ Dlsa(s1, a5, a4, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
- // a6 points past last arg.
+ // s1 points past last arg.
__ bind(&loop);
- __ Ld(a4, MemOperand(s0)); // Read next parameter.
- __ daddiu(s0, s0, kPointerSize);
- __ Ld(a4, MemOperand(a4)); // Dereference handle.
- __ push(a4); // Push parameter.
+ __ Ld(s2, MemOperand(a5)); // Read next parameter.
+ __ daddiu(a5, a5, kPointerSize);
+ __ Ld(s2, MemOperand(s2)); // Dereference handle.
+ __ push(s2); // Push parameter.
__ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(a6));
+ __ Branch(&loop, ne, a5, Operand(s1));
- // Setup new.target and argc.
- __ mov(a0, a3);
- __ mov(a3, a5);
+ // a0: argc
+ // a1: function
+ // a3: new.target
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(a4, RootIndex::kUndefinedValue);
+ __ mov(a5, a4);
__ mov(s1, a4);
__ mov(s2, a4);
__ mov(s3, a4);
@@ -582,6 +828,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
@@ -785,11 +1037,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = a1;
Register feedback_vector = a2;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a0, a0);
+ __ Branch(&compile_lazy, ne, a0, Operand(BYTECODE_ARRAY_TYPE));
+
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@@ -804,13 +1067,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
-
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -818,18 +1074,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
- __ Assert(ne,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
- a4, Operand(zero_reg));
- __ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
- __ Assert(eq,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
- a4, Operand(BYTECODE_ARRAY_TYPE));
- }
-
// Reset code age.
DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
__ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
@@ -924,22 +1168,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
-}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ dsubu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ dsll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1066,12 +1299,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
@@ -1081,14 +1316,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(INTERPRETER_DATA_TYPE));
__ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Branch(&trampoline_loaded);
__ bind(&builtin_trampoline);
- __ li(t0, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ld(t0, MemOperand(t0));
__ bind(&trampoline_loaded);
- __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value()));
// Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
@@ -1299,7 +1537,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// If the code object is null, just return to the caller.
- __ Ret(eq, v0, Operand(Smi::kZero));
+ __ Ret(eq, v0, Operand(Smi::zero()));
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -1555,7 +1793,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(a0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
- __ Push(Smi::kZero); // Padding.
+ __ Push(Smi::zero()); // Padding.
__ Daddu(fp, sp,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
@@ -2262,7 +2500,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
- __ SmiTag(t0);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2279,13 +2517,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as an explicit arguments to the runtime
// function.
- __ Push(kWasmInstanceRegister, t0);
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ Ld(a2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(kContextRegister, Smi::kZero);
+ __ Move(kContextRegister, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, a2);
// Restore registers.
@@ -2309,8 +2547,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// a2: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
if (argv_mode == kArgvInRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
@@ -2344,40 +2580,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ li(a2, ExternalReference::isolate_address(masm->isolate()));
__ mov(a1, s1);
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntry is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- int kNumInstructionsToJump = 4;
- Label find_ra;
- // Adjust the value in ra to point to the correct return location, 2nd
- // instruction past the real call into C code (the jalr(t9)), and push it.
- // This is the return address of the exit frame.
- if (kArchVariant >= kMips64r6) {
- __ addiupc(ra, kNumInstructionsToJump + 1);
- } else {
- // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
- __ nal(); // nal has branch delay slot.
- __ Daddu(ra, ra, kNumInstructionsToJump * kInstrSize);
- }
- __ bind(&find_ra);
-
- // This spot was reserved in EnterExitFrame.
- __ Sd(ra, MemOperand(sp));
- // Stack space reservation moved to the branch delay slot below.
- // Stack is still aligned.
-
- // Call the C routine.
- __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- __ jalr(t9);
- // Set up sp in the delay slot.
- __ daddiu(sp, sp, -kCArgsSlotsSize);
- // Make sure the stored 'ra' points to this position.
- DCHECK_EQ(kNumInstructionsToJump,
- masm->InstructionsGeneratedSince(&find_ra));
- }
+ __ StoreReturnAddressAndCall(s2);
// Result returned in v0 or v1:v0 - do not destroy these registers!
@@ -2694,40 +2897,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
-
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET, lo, a0, Operand(1));
-
- __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
- RelocInfo::CODE_TARGET, hi, a0, Operand(1));
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument.
- __ Ld(kScratchReg, MemOperand(sp, 0));
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -2749,33 +2918,394 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ GetObjectType(a3, a3, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
Operand(MAP_TYPE));
+
+ // Figure out the right elements kind.
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into a3. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(a3);
+
+ // Initial elements kind should be packed elements.
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray, a3,
+ Operand(PACKED_ELEMENTS));
+
+ // No arguments should be passed.
+ __ Assert(eq, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray,
+ a0, Operand(static_cast<int64_t>(0)));
}
- // Figure out the right elements kind.
- __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
- // Load the map's "bit field 2" into a3. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(a3);
+namespace {
- if (FLAG_debug_code) {
- Label done;
- __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(HOLEY_ELEMENTS));
- __ bind(&done);
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ li(t9, ExternalReference::is_profiling_address(isolate));
+ __ Lb(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ mov(t9, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ Ld(s0, MemOperand(s5, kNextOffset));
+ __ Ld(s1, MemOperand(s5, kLimitOffset));
+ __ Lw(s2, MemOperand(s5, kLevelOffset));
+ __ Addu(s2, s2, Operand(1));
+ __ Sw(s2, MemOperand(s5, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
}
- Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ __ StoreReturnAddressAndCall(t9);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ld(v0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ Sd(s0, MemOperand(s5, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ Lw(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Subu(s2, s2, Operand(1));
+ __ Sw(s2, MemOperand(s5, kLevelOffset));
+ __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s0, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ STATIC_ASSERT(kCArgSlotCount == 0);
+ __ Ld(s0, *stack_space_operand);
+ }
+
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ Ld(a5, MemOperand(kScratchReg));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ Sd(s1, MemOperand(s5, kLimitOffset));
+ __ mov(s0, v0);
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(v0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- a1 : kApiFunctionAddress
+ // -- a2 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc + 1) * 8] : kHolder
+ // -- sp[(argc + 2) * 8] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+ DCHECK(!AreAliased(api_function_address, argc, scratch, base));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0 * kPointerSize;
+ static constexpr int kHolderOffset = kReceiverOffset + kPointerSize;
+ static constexpr int kCallDataOffset = kHolderOffset + kPointerSize;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ Dlsa(base, sp, argc, kPointerSizeLog2);
+
+ // Reserve space on the stack.
+ __ Dsubu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ Ld(scratch, MemOperand(base, kHolderOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ Ld(scratch, MemOperand(base, kCallDataOffset));
+ __ Sd(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // EnterExitFrame may align the sp.
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Dsubu(scratch, base, Operand(1 * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ Sw(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Daddu(scratch, argc, Operand(FCA::kArgsLength + kExtraStackArgumentCount));
+ __ Sd(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Daddu(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::kZero.ptr());
+ __ Sd(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Daddu(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ // Make place for arguments to fit C calling convention. Callers use
+ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
+ // have to do that here. Any caller must drop kCArgsSlotsSize stack space
+ // after the call.
+ __ daddiu(sp, sp, -kCArgsSlotsSize);
+
+ __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
+ __ Call(t9); // Call the C++ function.
+ __ Ld(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
+
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ Uld(a4, MemOperand(t9));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+
+ __ Jump(t9);
}
#undef __
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
new file mode 100644
index 0000000000..a04b034085
--- /dev/null
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -0,0 +1,69 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace object {
+
+ transitioning macro ObjectFromEntriesFastCase(implicit context: Context)(
+ iterable: Object): JSObject labels IfSlow {
+ typeswitch (iterable) {
+ case (array: FastJSArrayWithNoCustomIteration): {
+ const elements: FixedArray =
+ Cast<FixedArray>(array.elements) otherwise IfSlow;
+ const length: Smi = array.length;
+ const result: JSObject = AllocateEmptyJSObject();
+
+ for (let k: Smi = 0; k < length; ++k) {
+ const value: Object = array::LoadElementOrUndefined(elements, k);
+ const pair: KeyValuePair =
+ collections::LoadKeyValuePairNoSideEffects(value)
+ otherwise IfSlow;
+ // Bail out if ToPropertyKey will attempt to load and call
+ // Symbol.toPrimitive, toString, and valueOf, which could
+ // invalidate assumptions about the iterable.
+ if (Is<JSReceiver>(pair.key)) goto IfSlow;
+ CreateDataProperty(result, pair.key, pair.value);
+ }
+ return result;
+ }
+ case (Object): {
+ goto IfSlow;
+ }
+ }
+ }
+
+ transitioning javascript builtin
+ ObjectFromEntries(implicit context: Context)(receiver: Object, ...arguments):
+ Object {
+ const iterable: Object = arguments[0];
+ try {
+ if (IsNullOrUndefined(iterable)) goto Throw;
+ return ObjectFromEntriesFastCase(iterable) otherwise IfSlow;
+ }
+ label IfSlow {
+ const result: JSObject = AllocateEmptyJSObject();
+ const fastIteratorResultMap: Map =
+ Cast<Map>(LoadNativeContext(context)[ITERATOR_RESULT_MAP_INDEX])
+ otherwise unreachable;
+ let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
+ try {
+ assert(!IsNullOrUndefined(i.object));
+ while (true) {
+ const step: Object = iterator::IteratorStep(i, fastIteratorResultMap)
+ otherwise return result;
+ const iteratorValue: Object =
+ iterator::IteratorValue(step, fastIteratorResultMap);
+ const pair: KeyValuePair =
+ collections::LoadKeyValuePair(iteratorValue);
+ CreateDataProperty(result, pair.key, pair.value);
+ }
+ return result;
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(i, e);
+ }
+ }
+ label Throw deferred {
+ ThrowTypeError(context, kNotIterable);
+ }
+ }
+} // namespace object
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
new file mode 100644
index 0000000000..5cdcfd83b8
--- /dev/null
+++ b/deps/v8/src/builtins/object.tq
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace object {
+ macro AllocateEmptyJSObject(implicit context: Context)(): JSObject {
+ const objectFunction: JSFunction = GetObjectFunction();
+ const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
+ otherwise unreachable;
+ return AllocateJSObjectFromMap(map);
+ }
+}
diff --git a/deps/v8/src/builtins/ppc/OWNERS b/deps/v8/src/builtins/ppc/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/builtins/ppc/OWNERS
+++ b/deps/v8/src/builtins/ppc/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 4446f81e58..58419b1ccc 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -4,14 +4,20 @@
#if V8_TARGET_ARCH_PPC
-#include "src/assembler-inl.h"
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -80,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r3);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
+ __ JumpCodeObject(r5);
}
namespace {
@@ -161,6 +166,21 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ blr();
}
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
+ __ cmp(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -242,11 +262,24 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(r3, SetRC);
+ __ SmiUntag(r3);
// Set up pointer to last argument.
__ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow);
+ __ b(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, no_args;
// ----------- S t a t e -------------
@@ -261,7 +294,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -----------------------------------
- __ beq(&no_args, cr0);
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_args);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ sub(sp, sp, ip);
__ mtctr(r3);
@@ -383,7 +417,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(ip, debug_hook);
__ LoadByte(ip, MemOperand(ip), r0);
__ extsb(ip, ip);
- __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ CmpSmiLiteral(ip, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -457,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
+ __ JumpCodeObject(r5);
}
__ bind(&prepare_step_in_if_stepping);
@@ -497,19 +530,208 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-// Clobbers r5; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ // The register state is either:
+ // r3: root_register_value
+ // r4: code entry
+ // r5: function
+ // r6: receiver
+ // r7: argc
+ // r8: argv
+ // or
+ // r3: root_register_value
+ // r4: microtask_queue
+
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // PPC LINUX ABI:
+ // preserve LR in pre-reserved slot in caller's frame
+ __ mflr(r0);
+ __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved);
+
+ // Save callee-saved double registers.
+ __ MultiPushDoubles(kCalleeSavedDoubles);
+ // Set up the reserved register for 0.0.
+ __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in r3.
+ __ mr(kRootRegister, r3);
+ }
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r4: code entry
+ // r5: function
+ // r6: receiver
+ // r7: argc
+ // r8: argv
+ __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ push(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ li(kConstantPoolRegister, Operand::Zero());
+ __ push(kConstantPoolRegister);
+ }
+ __ mov(r0, Operand(StackFrame::TypeToMarker(type)));
+ __ push(r0);
+ __ push(r0);
+ // Save copies of the top frame descriptor on the stack.
+ __ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ LoadP(r0, MemOperand(r3));
+ __ push(r0);
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp =
+ ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
+ masm->isolate());
+ __ Move(r3, js_entry_sp);
+ __ LoadP(r9, MemOperand(r3));
+ __ cmpi(r9, Operand::Zero());
+ __ bne(&non_outermost_js);
+ __ StoreP(fp, MemOperand(r3));
+ __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ bind(&non_outermost_js);
+ __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(ip); // frame-type
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ b(&invoke);
+
+ // Block literal pool emission whilst taking the position of the handler
+ // entry. This avoids making the assumption that literal pools are always
+ // emitted after an instruction is emitted, rather than before.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Move(ip,
+ ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
+ masm->isolate()));
+ }
+
+ __ StoreP(r3, MemOperand(ip));
+ __ LoadRoot(r3, RootIndex::kException);
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r4-r8.
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the b(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // r3 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r8);
+ __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ bne(&non_outermost_js_2);
+ __ mov(r9, Operand::Zero());
+ __ Move(r8, js_entry_sp);
+ __ StoreP(r9, MemOperand(r8));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(r6);
+ __ Move(ip, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate()));
+ __ StoreP(r6, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Restore callee-saved double registers.
+ __ MultiPopDoubles(kCalleeSavedDoubles);
+
+ // Restore callee-saved registers.
+ __ MultiPop(kCalleeSaved);
+
+ // Return
+ __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+ __ mtlr(r0);
+ __ blr();
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(r5, RootIndex::kRealStackLimit);
- // Make r5 the space we have left. The stack might already be overflowed
- // here which will cause r5 to become negative.
- __ sub(r5, sp, r5);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ sub(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, argc, Operand(kPointerSizeLog2));
- __ cmp(r5, r0);
+ __ ShiftLeftImm(scratch2, argc, Operand(kPointerSizeLog2));
+ __ cmp(scratch1, scratch2);
__ bgt(&okay); // Signed comparison.
// Out of stack space.
@@ -521,13 +743,12 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r3: new.target
- // r4: function
- // r5: receiver
- // r6: argc
- // r7: argv
- // r0,r8-r9, cp may be clobbered
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // r4: new.target
+ // r5: function
+ // r6: receiver
+ // r7: argc
+ // r8: argv
+ // r0,r3,r9, cp may be clobbered
// Enter an internal frame.
{
@@ -540,38 +761,54 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
- __ Push(r4, r5);
+ __ Push(r5, r6);
// Check if we have enough stack space to push all arguments.
- // Clobbers r5.
- Generate_CheckStackOverflow(masm, r6);
+ // Clobbers r3 and r6.
+ Generate_CheckStackOverflow(masm, r7, r3, r6);
+
+ // r4: new.target
+ // r5: function
+ // r7: argc
+ // r8: argv
+ // r0,r3,r6,r9, cp may be clobbered
+
+ // Setup new.target, argc and function.
+ __ mr(r3, r7);
+ __ mr(r6, r4);
+ __ mr(r4, r5);
+
+ // r3: argc
+ // r4: function
+ // r6: new.target
+ // r8: argv
// Copy arguments to the stack in a loop.
// r4: function
- // r6: argc
- // r7: argv, i.e. points to first arg
+ // r3: argc
+ // r8: argv, i.e. points to first arg
Label loop, entry;
- __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2));
- __ add(r5, r7, r0);
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ add(r5, r8, r0);
// r5 points past last arg.
__ b(&entry);
__ bind(&loop);
- __ LoadP(r8, MemOperand(r7)); // read next parameter
- __ addi(r7, r7, Operand(kPointerSize));
- __ LoadP(r0, MemOperand(r8)); // dereference handle
+ __ LoadP(r9, MemOperand(r8)); // read next parameter
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ LoadP(r0, MemOperand(r9)); // dereference handle
__ push(r0); // push parameter
__ bind(&entry);
- __ cmp(r7, r5);
+ __ cmp(r8, r5);
__ bne(&loop);
- // Setup new.target and argc.
- __ mr(r7, r3);
- __ mr(r3, r6);
- __ mr(r6, r7);
+ // r3: argc
+ // r4: function
+ // r6: new.target
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r7, RootIndex::kUndefinedValue);
+ __ mr(r8, r7);
__ mr(r14, r7);
__ mr(r15, r7);
__ mr(r16, r7);
@@ -599,6 +836,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // This expects two C++ function parameters passed by Invoke() in
+ // execution.cc.
+ // r3: root_register_value
+ // r4: microtask_queue
+
+ __ mr(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r4);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
@@ -721,8 +968,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ addi(r5, optimized_code_entry,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(r5, optimized_code_entry);
__ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -810,11 +1056,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = r4;
Register feedback_vector = r5;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Load original bytecode array or the debug copy.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ bne(&compile_lazy);
+
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@@ -824,20 +1083,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Load original bytecode array or the debug copy.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
-
// Increment invocation count for the function.
__ LoadWord(
r8,
@@ -849,18 +1094,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
- // Check function data field is actually a BytecodeArray object.
-
- if (FLAG_debug_code) {
- __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
- cr0);
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
- BYTECODE_ARRAY_TYPE);
- __ Assert(
- eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
// Reset code age.
__ mov(r8, Operand(BytecodeArray::kNoAgeBytecodeAge));
@@ -958,22 +1196,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r3.
LeaveInterpreterFrame(masm, r5);
__ blr();
-}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, RootIndex::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
- __ cmp(scratch, r0);
- __ ble(stack_overflow); // Signed comparison.
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1100,12 +1326,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadP(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
@@ -1116,14 +1344,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadP(r5,
FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
- __ Move(r5, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Move(r5, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ LoadP(r5, MemOperand(r5));
__ bind(&trampoline_loaded);
- __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value()));
__ mtlr(r0);
// Initialize the dispatch table register.
@@ -1257,8 +1488,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
+ __ JumpCodeObject(r5);
}
namespace {
@@ -1339,7 +1569,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
Label skip;
- __ CmpSmiLiteral(r3, Smi::kZero, r0);
+ __ CmpSmiLiteral(r3, Smi::zero(), r0);
__ bne(&skip);
__ Ret();
@@ -1597,7 +1827,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
} else {
__ Push(fp, r7, r4, r3);
}
- __ Push(Smi::kZero); // Padding.
+ __ Push(Smi::zero()); // Padding.
__ addi(fp, sp,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
@@ -2241,8 +2471,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r6 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(r5);
+ __ CallCodeObject(r5);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2257,8 +2486,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
+ __ JumpCodeObject(r5);
__ bind(&stack_overflow);
{
@@ -2269,9 +2497,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // The function index was put in r15 by the jump table trampoline.
+ // The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
- __ SmiTag(r15, r15);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister,
+ kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2288,13 +2517,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime
// function.
- __ Push(kWasmInstanceRegister, r15);
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object.
__ LoadP(r5, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ LoadSmiLiteral(cp, Smi::kZero);
+ __ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r5);
// The entrypoint address is the return value.
__ mr(r11, kReturnRegister0);
@@ -2319,7 +2548,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
//
// If argv_mode == kArgvInRegister:
// r5: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mr(r15, r4);
@@ -2374,30 +2602,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
Register target = r15;
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- // AIX/PPC64BE Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
- __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- target = ip;
- } else if (ABI_CALL_VIA_IP) {
- __ Move(ip, r15);
- target = ip;
- }
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntryStub is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- Label start_call;
- constexpr int after_call_offset = 5 * kInstrSize;
- DCHECK_NE(r7, target);
- __ LoadPC(r7);
- __ bind(&start_call);
- __ addi(r7, r7, Operand(after_call_offset));
- __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ Call(target);
- DCHECK_EQ(after_call_offset - kInstrSize,
- __ SizeOfCodeGeneratedSince(&start_call));
+ __ StoreReturnAddressAndCall(target);
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
@@ -2712,43 +2917,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
-
- __ cmpli(r3, Operand(1));
-
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET, lt);
-
- __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
- RelocInfo::CODE_TARGET, gt);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ LoadP(r6, MemOperand(sp, 0));
- __ cmpi(r6, Operand::Zero());
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET, ne);
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
@@ -2768,33 +2936,402 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r6, r6, r7, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
- // Figure out the right elements kind
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|.
- __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r6);
+ // Figure out the right elements kind
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r6);
- if (FLAG_debug_code) {
- Label done;
+ // Initial elements kind should be packed elements.
__ cmpi(r6, Operand(PACKED_ELEMENTS));
- __ beq(&done);
- __ cmpi(r6, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray);
+
+ // No arguments should be passed.
+ __ cmpi(r3, Operand(0));
+ __ Assert(eq, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
+ }
+
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
+
+namespace {
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ // Additional parameter is the address of the actual callback.
+ DCHECK(function_address == r4 || function_address == r5);
+ Register scratch = r6;
+
+ __ Move(scratch, ExternalReference::is_profiling_address(isolate));
+ __ lbz(scratch, MemOperand(scratch, 0));
+ __ cmpi(scratch, Operand::Zero());
+
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ Move(scratch, thunk_ref);
+ __ isel(eq, scratch, function_address, scratch);
+ } else {
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ beq(&profiler_disabled);
+ __ Move(scratch, thunk_ref);
+ __ b(&end_profiler_check);
+ __ bind(&profiler_disabled);
+ __ mr(scratch, function_address);
+ __ bind(&end_profiler_check);
+ }
+
+ // Allocate HandleScope in callee-save registers.
+ // r17 - next_address
+ // r14 - next_address->kNextOffset
+ // r15 - next_address->kLimitOffset
+ // r16 - next_address->kLevelOffset
+ __ Move(r17, next_address);
+ __ LoadP(r14, MemOperand(r17, kNextOffset));
+ __ LoadP(r15, MemOperand(r17, kLimitOffset));
+ __ lwz(r16, MemOperand(r17, kLevelOffset));
+ __ addi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ Move(r3, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
}
- Label fast_elements_case;
- __ cmpi(r6, Operand(PACKED_ELEMENTS));
- __ beq(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ __ StoreReturnAddressAndCall(scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ Move(r3, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ LoadP(r3, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ StoreP(r14, MemOperand(r17, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ lwz(r4, MemOperand(r17, kLevelOffset));
+ __ cmp(r4, r16);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ subi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+ __ LoadP(r0, MemOperand(r17, kLimitOffset));
+ __ cmp(r15, r0);
+ __ bne(&delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand != nullptr) {
+ __ LoadP(r14, *stack_space_operand);
+ } else {
+ __ mov(r14, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(r14, RootIndex::kTheHoleValue);
+ __ Move(r15, ExternalReference::scheduled_exception_address(isolate));
+ __ LoadP(r15, MemOperand(r15));
+ __ cmp(r14, r15);
+ __ bne(&promote_scheduled_exception);
+
+ __ blr();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ StoreP(r15, MemOperand(r17, kLimitOffset));
+ __ mr(r14, r3);
+ __ PrepareCallCFunction(1, r15);
+ __ Move(r3, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mr(r3, r14);
+ __ b(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- r4 : kApiFunctionAddress
+ // -- r5 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc + 1) * 4] : kHolder
+ // -- sp[(argc + 2) * 4] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = r4;
+ Register argc = r5;
+ Register scratch = r7;
+ Register index = r8; // For indexing MemOperands.
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch, index));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0;
+ static constexpr int kHolderOffset = kReceiverOffset + 1;
+ static constexpr int kCallDataOffset = kHolderOffset + 1;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ subi(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ addi(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
+ __ ShiftLeftImm(ip, index, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch, MemOperand(sp, ip));
+ __ StoreP(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ addi(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
+ __ ShiftLeftImm(ip, index, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch, MemOperand(sp, ip));
+ __ StoreP(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mr(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ // PPC LINUX ABI:
+ //
+ // Create 4 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1-3] FunctionCallbackInfo
+ // [4] number of bytes to drop from the stack after returning
+ static constexpr int kApiStackSpace = 5;
+ static constexpr bool kDontSaveDoubles = false;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ addi(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ ShiftLeftImm(ip, argc, Operand(kPointerSizeLog2));
+ __ add(scratch, scratch, ip);
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ __ stw(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ __ mov(scratch,
+ Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ __ ShiftLeftImm(ip, argc, Operand(kPointerSizeLog2));
+ __ add(scratch, scratch, ip);
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(
+ sp, (kStackFrameExtraParamSlot + 4) * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ int arg0Slot = 0;
+ int accessorInfoSlot = 0;
+ int apiStackSpace = 0;
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r7;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r5;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Push(scratch, scratch);
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Push(scratch, holder);
+ __ Push(Smi::zero()); // should_throw_on_error -> false
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mr(r3, sp); // r3 = Handle<Name>
+ __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
+
+// If ABI passes Handles (pointer-sized struct) in a register:
+//
+// Create 2 extra slots on stack:
+// [0] space for DirectCEntryStub's LR save
+// [1] AccessorInfo&
+//
+// Otherwise:
+//
+// Create 3 extra slots on stack:
+// [0] space for DirectCEntryStub's LR save
+// [1] copy of Handle (first arg)
+// [2] AccessorInfo&
+ if (ABI_PASSES_HANDLES_IN_REGS) {
+ accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ apiStackSpace = 2;
+ } else {
+ arg0Slot = kStackFrameExtraParamSlot + 1;
+ accessorInfoSlot = arg0Slot + 1;
+ apiStackSpace = 3;
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, apiStackSpace);
+
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ // pass 1st arg by reference
+ __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
+ __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
+ }
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
+ __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
+ // r4 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadP(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // Unused.
+ __ stop(0);
}
#undef __
diff --git a/deps/v8/src/builtins/s390/OWNERS b/deps/v8/src/builtins/s390/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/builtins/s390/OWNERS
+++ b/deps/v8/src/builtins/s390/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 198ba0971d..2179e7bcac 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -4,14 +4,20 @@
#if V8_TARGET_ARCH_S390
-#include "src/assembler-inl.h"
+#include "src/api-arguments.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -80,8 +86,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r2);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
+ __ JumpCodeObject(r4);
}
namespace {
@@ -155,6 +160,21 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ SubP(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
+ __ CmpP(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -241,6 +261,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+ Label enough_stack_space, stack_overflow;
+ Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow);
+ __ b(&enough_stack_space);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+
+ __ bind(&enough_stack_space);
+
// Copy arguments and receiver to the expression stack.
Label loop, no_args;
// ----------- S t a t e -------------
@@ -256,6 +289,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[5*kPointerSize]: context
// -----------------------------------
+ __ ltgr(r2, r2);
__ beq(&no_args);
__ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
__ SubP(sp, sp, ip);
@@ -374,7 +408,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
__ Move(ip, debug_hook);
__ LoadB(ip, MemOperand(ip));
- __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ CmpSmiLiteral(ip, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -457,8 +491,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRR(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
+ __ JumpCodeObject(r4);
}
__ bind(&prepare_step_in_if_stepping);
@@ -497,19 +530,247 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-// Clobbers r4; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
+namespace {
+
+constexpr int kPushedStackSpace =
+ (kNumCalleeSaved + 2) * kPointerSize +
+ kNumCalleeSavedDoubles * kDoubleSize + 5 * kPointerSize +
+ EntryFrameConstants::kCallerFPOffset - kPointerSize;
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ // The register state is either:
+ // r2: root register value
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // [sp + 20 * kSystemPointerSize]: argv
+ // or
+ // r2: root_register_value
+ // r3: microtask_queue
+
+ Label invoke, handler_entry, exit;
+
+ int pushed_stack_space = 0;
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // saving floating point registers
+ // 64bit ABI requires f8 to f15 be saved
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
+ __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
+ __ std(d8, MemOperand(sp));
+ __ std(d9, MemOperand(sp, 1 * kDoubleSize));
+ __ std(d10, MemOperand(sp, 2 * kDoubleSize));
+ __ std(d11, MemOperand(sp, 3 * kDoubleSize));
+ __ std(d12, MemOperand(sp, 4 * kDoubleSize));
+ __ std(d13, MemOperand(sp, 5 * kDoubleSize));
+ __ std(d14, MemOperand(sp, 6 * kDoubleSize));
+ __ std(d15, MemOperand(sp, 7 * kDoubleSize));
+ pushed_stack_space += kNumCalleeSavedDoubles * kDoubleSize;
+
+ // zLinux ABI
+ // Incoming parameters:
+ // r2: root register value
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // [sp + 20 * kSystemPointerSize]: argv
+ // Requires us to save the callee-preserved registers r6-r13
+ // General convention is to also save r14 (return addr) and
+ // sp/r15 as well in a single STM/STMG
+ __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+ __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
+ pushed_stack_space += (kNumCalleeSaved + 2) * kPointerSize;
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in r2.
+ __ LoadRR(kRootRegister, r2);
+ }
+
+ // save r6 to r1
+ __ LoadRR(r1, r6);
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // Bad FP (-1)
+ // SMI Marker
+ // SMI Marker
+ // kCEntryFPAddress
+ // Frame type
+ __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+ pushed_stack_space += 5 * kPointerSize;
+
+ // Push a bad frame pointer to fail if it is used.
+ __ LoadImmP(r9, Operand(-1));
+
+ __ mov(r8, Operand(StackFrame::TypeToMarker(type)));
+ __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
+ // Save copies of the top frame descriptor on the stack.
+ __ Move(r6, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate()));
+ __ LoadP(r6, MemOperand(r6));
+ __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize));
+ // Set up frame pointer for the frame to be pushed.
+ // Need to add kPointerSize, because sp has one extra
+ // frame already for the frame type being pushed later.
+ __ lay(fp, MemOperand(
+ sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
+ pushed_stack_space += EntryFrameConstants::kCallerFPOffset - kPointerSize;
+
+ // restore r6
+ __ LoadRR(r6, r1);
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp =
+ ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
+ masm->isolate());
+ __ Move(r7, js_entry_sp);
+ __ LoadAndTestP(r8, MemOperand(r7));
+ __ bne(&non_outermost_js, Label::kNear);
+ __ StoreP(fp, MemOperand(r7));
+ __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont, Label::kNear);
+ __ bind(&non_outermost_js);
+ __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+
+ __ bind(&cont);
+ __ StoreP(ip, MemOperand(sp)); // frame-type
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ b(&invoke, Label::kNear);
+
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Move(ip, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+
+ __ StoreP(r2, MemOperand(ip));
+ __ LoadRoot(r2, RootIndex::kException);
+ __ b(&exit, Label::kNear);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r2-r6.
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the b(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+ __ bind(&exit); // r2 holds result
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r7);
+ __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ bne(&non_outermost_js_2, Label::kNear);
+ __ mov(r8, Operand::Zero());
+ __ Move(r7, js_entry_sp);
+ __ StoreP(r8, MemOperand(r7));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(r5);
+ __ Move(ip, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate()));
+ __ StoreP(r5, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
+
+ // Reload callee-saved preserved regs, return address reg (r14) and sp
+ __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
+ __ la(sp, MemOperand(sp, 10 * kPointerSize));
+
+// saving floating point registers
+#if V8_TARGET_ARCH_S390X
+ // 64bit ABI requires f8 to f15 be saved
+ __ ld(d8, MemOperand(sp));
+ __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
+ __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
+ __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
+ __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
+ __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
+ __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
+ __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
+ __ la(sp, MemOperand(sp, 8 * kDoubleSize));
+#else
+ // 31bit ABI requires you to store f4 and f6:
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
+ __ ld(d4, MemOperand(sp));
+ __ ld(d6, MemOperand(sp, kDoubleSize));
+ __ la(sp, MemOperand(sp, 2 * kDoubleSize));
+#endif
+
+ __ b(r14);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(r4, RootIndex::kRealStackLimit);
- // Make r4 the space we have left. The stack might already be overflowed
- // here which will cause r4 to become negative.
- __ SubP(r4, sp, r4);
+ __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ SubP(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
- __ CmpP(r4, r0);
+ __ ShiftLeftP(scratch2, argc, Operand(kPointerSizeLog2));
+ __ CmpP(scratch1, scratch2);
__ bgt(&okay); // Signed comparison.
// Out of stack space.
@@ -521,13 +782,12 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r2: new.target
- // r3: function
- // r4: receiver
- // r5: argc
- // r6: argv
- // r0,r7-r9, cp may be clobbered
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // r3: new.target
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // [fp + kPushedStackSpace + 20 * kPointerSize]: argv
+ // r0,r2,r7-r9, cp may be clobbered
// Enter an internal frame.
{
@@ -541,24 +801,47 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadP(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
- __ Push(r3, r4);
+ __ Push(r4, r5);
// Check if we have enough stack space to push all arguments.
- // Clobbers r4.
- Generate_CheckStackOverflow(masm, r5);
+ // Clobbers r5 and r0.
+ Generate_CheckStackOverflow(masm, r6, r5, r0);
+
+ // r3: new.target
+ // r4: function
+ // r6: argc
+ // [fp + kPushedStackSpace + 20 * kPointerSize]: argv
+ // r0,r2,r5,r7-r9, cp may be clobbered
+
+ // Setup new.target, argc and function.
+ __ LoadRR(r2, r6);
+ __ LoadRR(r5, r3);
+ __ LoadRR(r3, r4);
+
+ // Load argv from the stack.
+ __ LoadP(r6, MemOperand(fp));
+ __ LoadP(r6, MemOperand(
+ r6, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
+
+ // r2: argc
+ // r3: function
+ // r5: new.target
+ // r6: argv
+ // r0,r4,r7-r9, cp may be clobbered
// Copy arguments to the stack in a loop from argv to sp.
// The arguments are actually placed in reverse order on sp
// compared to argv (i.e. arg1 is highest memory in sp).
+ // r2: argc
// r3: function
- // r5: argc
+ // r5: new.target
// r6: argv, i.e. points to first arg
// r7: scratch reg to hold scaled argc
// r8: scratch reg to hold arg handle
// r9: scratch reg to hold index into argv
Label argLoop, argExit;
intptr_t zero = 0;
- __ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ SubRR(sp, r7); // Buy the stack frame to fit args
__ LoadImmP(r9, Operand(zero)); // Initialize argv index
__ bind(&argLoop);
@@ -572,14 +855,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ b(&argLoop);
__ bind(&argExit);
- // Setup new.target and argc.
- __ LoadRR(r6, r2);
- __ LoadRR(r2, r5);
- __ LoadRR(r5, r6);
+ // r2: argc
+ // r3: function
+ // r5: new.target
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(r6, RootIndex::kUndefinedValue);
+ __ LoadRoot(r4, RootIndex::kUndefinedValue);
+ __ LoadRR(r6, r4);
__ LoadRR(r7, r6);
__ LoadRR(r8, r6);
__ LoadRR(r9, r6);
@@ -606,6 +889,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // This expects two C++ function parameters passed by Invoke() in
+ // execution.cc.
+ // r2: root_register_value
+ // r3: microtask_queue
+
+ __ LoadRR(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r3);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void ReplaceClosureCodeWithOptimizedCode(
MacroAssembler* masm, Register optimized_code, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
@@ -729,8 +1022,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ AddP(r4, optimized_code_entry,
- Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(r4, optimized_code_entry);
__ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -819,11 +1111,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = r3;
Register feedback_vector = r4;
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Load original bytecode array or the debug copy.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ bne(&compile_lazy);
+
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@@ -833,20 +1138,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Load original bytecode array or the debug copy.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
-
// Increment invocation count for the function.
__ LoadW(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -854,16 +1145,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreW(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ TestIfSmi(kInterpreterBytecodeArrayRegister);
- __ Assert(
- ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
- BYTECODE_ARRAY_TYPE);
- __ Assert(
- eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
// Reset code age.
__ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
@@ -927,9 +1213,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
- __ mov(kInterpreterDispatchTableRegister,
- Operand(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
+ __ Move(
+ kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -963,22 +1249,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r2.
LeaveInterpreterFrame(masm, r4);
__ Ret();
-}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, RootIndex::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ SubP(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
- __ CmpP(scratch, r0);
- __ ble(stack_overflow); // Signed comparison.
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1105,12 +1379,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
@@ -1121,14 +1397,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadP(r4,
FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
- __ Move(r4, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Move(r4, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ LoadP(r4, MemOperand(r4));
__ bind(&trampoline_loaded);
- __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value()));
// Initialize the dispatch table register.
__ Move(
@@ -1260,8 +1539,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
+ __ JumpCodeObject(r4);
}
namespace {
@@ -1341,7 +1619,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
Label skip;
- __ CmpSmiLiteral(r2, Smi::kZero, r0);
+ __ CmpSmiLiteral(r2, Smi::zero(), r0);
__ bne(&skip);
__ Ret();
@@ -1601,7 +1879,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
- __ Push(Smi::kZero); // Padding.
+ __ Push(Smi::zero()); // Padding.
__ la(fp,
MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
}
@@ -2246,8 +2524,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(r4);
+ __ CallCodeObject(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2262,8 +2539,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
+ __ JumpCodeObject(r4);
__ bind(&stack_overflow);
{
@@ -2274,9 +2550,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
- // The function index was put in r7 by the jump table trampoline.
+ // The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
- __ SmiTag(r7, r7);
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister,
+ kWasmCompileLazyFuncIndexRegister);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2301,7 +2578,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ LoadSmiLiteral(cp, Smi::kZero);
+ __ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r4);
// The entrypoint address is the return value.
__ LoadRR(ip, r2);
@@ -2326,7 +2603,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
//
// If argv_mode == kArgvInRegister:
// r4: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ LoadRR(r7, r3);
@@ -2387,24 +2663,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Call C built-in.
__ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
- Register target = r7;
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntryStub is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- {
- Label return_label;
- __ larl(r14, &return_label); // Generate the return addr of call later.
- __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
-
- // zLinux ABI requires caller's frame to have sufficient space for callee
- // preserved regsiter save area.
- // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
- __ b(target);
- __ bind(&return_label);
- // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
- }
+ __ StoreReturnAddressAndCall(r7);
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
@@ -2702,43 +2961,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
-
- __ CmpLogicalP(r2, Operand(1));
-
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET, lt);
-
- __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
- RelocInfo::CODE_TARGET, gt);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ LoadP(r5, MemOperand(sp, 0));
- __ CmpP(r5, Operand::Zero());
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET, ne);
- }
-
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
@@ -2758,33 +2980,392 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r5, r5, r6, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
- // Figure out the right elements kind
- __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the map's "bit field 2" into |result|.
- __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(r5);
+ // Figure out the right elements kind
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r5);
- if (FLAG_debug_code) {
- Label done;
__ CmpP(r5, Operand(PACKED_ELEMENTS));
- __ beq(&done);
- __ CmpP(r5, Operand(HOLEY_ELEMENTS));
- __ Assert(
- eq,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
+ __ Assert(eq, AbortReason::kInvalidElementsKindForInternalPackedArray);
+ }
+
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
+
+namespace {
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ // Additional parameter is the address of the actual callback.
+ DCHECK(function_address == r3 || function_address == r4);
+ Register scratch = r5;
+
+ __ Move(scratch, ExternalReference::is_profiling_address(isolate));
+ __ LoadlB(scratch, MemOperand(scratch, 0));
+ __ CmpP(scratch, Operand::Zero());
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ beq(&profiler_disabled, Label::kNear);
+ __ Move(scratch, thunk_ref);
+ __ b(&end_profiler_check, Label::kNear);
+ __ bind(&profiler_disabled);
+ __ LoadRR(scratch, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ // r9 - next_address
+ // r6 - next_address->kNextOffset
+ // r7 - next_address->kLimitOffset
+ // r8 - next_address->kLevelOffset
+ __ Move(r9, next_address);
+ __ LoadP(r6, MemOperand(r9, kNextOffset));
+ __ LoadP(r7, MemOperand(r9, kLimitOffset));
+ __ LoadlW(r8, MemOperand(r9, kLevelOffset));
+ __ AddP(r8, Operand(1));
+ __ StoreW(r8, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r2);
+ __ Move(r2, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ __ StoreReturnAddressAndCall(scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r2);
+ __ Move(r2, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ LoadP(r2, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ StoreP(r6, MemOperand(r9, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ LoadlW(r3, MemOperand(r9, kLevelOffset));
+ __ CmpP(r3, r8);
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ SubP(r8, Operand(1));
+ __ StoreW(r8, MemOperand(r9, kLevelOffset));
+ __ CmpP(r7, MemOperand(r9, kLimitOffset));
+ __ bne(&delete_allocated_handles, Label::kNear);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ mov(r6, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ LoadP(r6, *stack_space_operand);
+ }
+ __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
+
+ // Check if the function scheduled an exception.
+ __ Move(r7, ExternalReference::scheduled_exception_address(isolate));
+ __ LoadP(r7, MemOperand(r7));
+ __ CompareRoot(r7, RootIndex::kTheHoleValue);
+ __ bne(&promote_scheduled_exception, Label::kNear);
+
+ __ b(r14);
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ StoreP(r7, MemOperand(r9, kLimitOffset));
+ __ LoadRR(r6, r2);
+ __ PrepareCallCFunction(1, r7);
+ __ Move(r2, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ LoadRR(r2, r6);
+ __ b(&leave_exit_frame, Label::kNear);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : kTargetContext
+ // -- r3 : kApiFunctionAddress
+ // -- r4 : kArgc
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 4] : first argument
+ // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc + 1) * 4] : kHolder
+ // -- sp[(argc + 2) * 4] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = r3;
+ Register argc = r4;
+ Register scratch = r6;
+ Register index = r7; // For indexing MemOperands.
+
+ DCHECK(!AreAliased(api_function_address, argc, scratch, index));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = 0;
+ static constexpr int kHolderOffset = kReceiverOffset + 1;
+ static constexpr int kCallDataOffset = kHolderOffset + 1;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize)));
+
+ // kHolder.
+ __ AddP(index, argc, Operand(FCA::kArgsLength + kHolderOffset));
+ __ ShiftLeftP(r1, index, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(sp, r1));
+ __ StoreP(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // kData.
+ __ AddP(index, argc, Operand(FCA::kArgsLength + kCallDataOffset));
+ __ ShiftLeftP(r1, index, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(sp, r1));
+ __ StoreP(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ LoadRR(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ // S390 LINUX ABI:
+ //
+ // Create 4 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1-3] FunctionCallbackInfo
+ // [4] number of bytes to drop from the stack after returning
+ static constexpr int kApiStackSpace = 5;
+ static constexpr bool kDontSaveDoubles = false;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ AddP(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
+ __ AddP(scratch, scratch, r1);
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ __ StoreW(argc,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ __ mov(scratch,
+ Operand((FCA::kArgsLength + kExtraStackArgumentCount) * kPointerSize));
+ __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
+ __ AddP(scratch, r1);
+ __ StoreP(scratch,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ __ lay(r2,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(
+ sp, (kStackFrameExtraParamSlot + 4) * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ int arg0Slot = 0;
+ int accessorInfoSlot = 0;
+ int apiStackSpace = 0;
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r6;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r4;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Push(scratch, scratch);
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Push(scratch, holder);
+ __ Push(Smi::zero()); // should_throw_on_error -> false
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ LoadRR(r2, sp); // r2 = Handle<Name>
+ __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
+
+ // If ABI passes Handles (pointer-sized struct) in a register:
+ //
+ // Create 2 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] AccessorInfo&
+ //
+ // Otherwise:
+ //
+ // Create 3 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] copy of Handle (first arg)
+ // [2] AccessorInfo&
+ if (ABI_PASSES_HANDLES_IN_REGS) {
+ accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ apiStackSpace = 2;
+ } else {
+ arg0Slot = kStackFrameExtraParamSlot + 1;
+ accessorInfoSlot = arg0Slot + 1;
+ apiStackSpace = 3;
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, apiStackSpace);
+
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ // pass 1st arg by reference
+ __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
+ __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
}
- Label fast_elements_case;
- __ CmpP(r5, Operand(PACKED_ELEMENTS));
- __ beq(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
+ __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
+ // r3 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadP(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // Unused.
+ __ stop(0);
}
#undef __
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 630473f407..cf7aa704f2 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -8,35 +8,32 @@
#include "src/builtins/builtins.h"
#include "src/code-events.h"
#include "src/compiler/code-assembler.h"
+
#include "src/handles-inl.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
// Forward declarations for C++ builtins.
#define FORWARD_DECLARE(Name) \
- Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+ Address Builtin_##Name(int argc, Address* args, Isolate* isolate);
BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
namespace {
-void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
- const char* name) {
+void PostBuildProfileAndTracing(Isolate* isolate, Code code, const char* name) {
PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
AbstractCode::cast(code), name));
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- code->PrintBuiltinCode(isolate, name);
- }
-#endif
}
AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
@@ -54,7 +51,8 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
isolate->heap()->memory_allocator()->code_range();
bool pc_relative_calls_fit_in_code_range =
!code_range.is_empty() &&
- code_range.size() <= kMaxPCRelativeCodeRangeInMB * MB;
+ std::ceil(static_cast<float>(code_range.size() / MB)) <=
+ kMaxPCRelativeCodeRangeInMB;
options.isolate_independent_code = true;
options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range;
@@ -67,15 +65,16 @@ typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
Handle<Code> BuildPlaceholder(Isolate* isolate, int32_t builtin_index) {
HandleScope scope(isolate);
- const size_t buffer_size = 1 * KB;
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
- MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ constexpr int kBufferSize = 1 * KB;
+ byte buffer[kBufferSize];
+ MacroAssembler masm(isolate, CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, kBufferSize));
DCHECK(!masm.has_frame());
{
FrameScope scope(&masm, StackFrame::NONE);
// The contents of placeholder don't matter, as long as they don't create
// embedded constants or external references.
- masm.Move(kJavaScriptCallCodeStartRegister, Smi::kZero);
+ masm.Move(kJavaScriptCallCodeStartRegister, Smi::zero());
masm.Call(kJavaScriptCallCodeStartRegister);
}
CodeDesc desc;
@@ -85,40 +84,65 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, int32_t builtin_index) {
return scope.CloseAndEscape(code);
}
-Code* BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
- MacroAssemblerGenerator generator,
- const char* s_name) {
+Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
+ MacroAssemblerGenerator generator,
+ const char* s_name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- const size_t buffer_size = 32 * KB;
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ constexpr int kBufferSize = 32 * KB;
+ byte buffer[kBufferSize];
MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin_index),
- buffer, buffer_size, CodeObjectRequired::kYes);
+ CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, kBufferSize));
masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
generator(&masm);
+
+ int handler_table_offset = 0;
+
+ // JSEntry builtins are a special case and need to generate a handler table.
+ DCHECK_EQ(Builtins::KindOf(Builtins::kJSEntry), Builtins::ASM);
+ DCHECK_EQ(Builtins::KindOf(Builtins::kJSConstructEntry), Builtins::ASM);
+ DCHECK_EQ(Builtins::KindOf(Builtins::kJSRunMicrotasksEntry), Builtins::ASM);
+ if (Builtins::IsJSEntryVariant(builtin_index)) {
+ static constexpr int kJSEntryHandlerCount = 1;
+ handler_table_offset =
+ HandlerTable::EmitReturnTableStart(&masm, kJSEntryHandlerCount);
+ HandlerTable::EmitReturnEntry(
+ &masm, 0, isolate->builtins()->js_entry_handler_offset());
+ }
+
CodeDesc desc;
masm.GetCode(isolate, &desc);
+
+ static constexpr bool kIsNotTurbofanned = false;
+ static constexpr int kStackSlots = 0;
+ static constexpr int kSafepointTableOffset = 0;
+
Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
+ desc, Code::BUILTIN, masm.CodeObject(), builtin_index,
+ MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate), kMovable,
+ kIsNotTurbofanned, kStackSlots, kSafepointTableOffset,
+ handler_table_offset);
PostBuildProfileAndTracing(isolate, *code, s_name);
return *code;
}
-Code* BuildAdaptor(Isolate* isolate, int32_t builtin_index,
- Address builtin_address,
- Builtins::ExitFrameType exit_frame_type, const char* name) {
+Code BuildAdaptor(Isolate* isolate, int32_t builtin_index,
+ Address builtin_address,
+ Builtins::ExitFrameType exit_frame_type, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- const size_t buffer_size = 32 * KB;
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ constexpr int kBufferSize = 32 * KB;
+ byte buffer[kBufferSize];
MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin_index),
- buffer, buffer_size, CodeObjectRequired::kYes);
+ CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, kBufferSize));
masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
@@ -131,9 +155,9 @@ Code* BuildAdaptor(Isolate* isolate, int32_t builtin_index,
}
// Builder for builtins implemented in TurboFan with JS linkage.
-Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
- CodeAssemblerGenerator generator, int argc,
- const char* name) {
+Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
+ CodeAssemblerGenerator generator, int argc,
+ const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -156,10 +180,10 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
}
// Builder for builtins implemented in TurboFan with CallStub linkage.
-Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
- CodeAssemblerGenerator generator,
- CallDescriptors::Key interface_descriptor,
- const char* name, int result_size) {
+Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
+ CodeAssemblerGenerator generator,
+ CallDescriptors::Key interface_descriptor,
+ const char* name, int result_size) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -176,7 +200,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(
isolate, &zone, descriptor, Code::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, 0, builtin_index);
+ PoisoningMitigationLevel::kDontPoison, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin_index));
@@ -188,7 +212,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// static
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, int index,
- Code* code) {
+ Code code) {
DCHECK_EQ(index, code->builtin_index());
builtins->set_builtin(index, code);
}
@@ -217,27 +241,28 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
HeapIterator iterator(isolate->heap());
- while (HeapObject* obj = iterator.next()) {
+ for (HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (!obj->IsCode()) continue;
- Code* code = Code::cast(obj);
+ Code code = Code::cast(obj);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
Builtins::IsIsolateIndependent(target->builtin_index()));
if (!target->is_builtin()) continue;
- Code* new_target = builtins->builtin(target->builtin_index());
+ Code new_target = builtins->builtin(target->builtin_index());
rinfo->set_target_address(new_target->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
- Object* object = rinfo->target_object();
+ Object object = rinfo->target_object();
if (!object->IsCode()) continue;
- Code* target = Code::cast(object);
+ Code target = Code::cast(object);
if (!target->is_builtin()) continue;
- Code* new_target = builtins->builtin(target->builtin_index());
+ Code new_target = builtins->builtin(target->builtin_index());
rinfo->set_target_object(isolate->heap(), new_target,
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
@@ -252,10 +277,10 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
namespace {
-Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
- const char* name,
- interpreter::OperandScale operand_scale,
- interpreter::Bytecode bytecode) {
+Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
+ const char* name,
+ interpreter::OperandScale operand_scale,
+ interpreter::Bytecode bytecode) {
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
Handle<Code> code = interpreter::GenerateBytecodeHandler(
@@ -267,18 +292,6 @@ Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
return *code;
}
-Code* GenerateLazyBytecodeHandler(Isolate* isolate, int builtin_index,
- const char* name,
- interpreter::OperandScale operand_scale) {
- Handle<Code> code = interpreter::GenerateDeserializeLazyHandler(
- isolate, operand_scale, builtin_index,
- BuiltinAssemblerOptions(isolate, builtin_index));
-
- PostBuildProfileAndTracing(isolate, *code, name);
-
- return *code;
-}
-
} // namespace
// static
@@ -292,7 +305,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
HandleScope scope(isolate);
int index = 0;
- Code* code;
+ Code code;
#define BUILD_CPP(Name) \
code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), \
Builtins::BUILTIN_EXIT, #Name); \
@@ -328,18 +341,13 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
OperandScale, Bytecode); \
AddBuiltin(builtins, index++, code);
-#define BUILD_DLH(Name, OperandScale) \
- code = GenerateLazyBytecodeHandler(isolate, index, Builtins::name(index), \
- OperandScale); \
- AddBuiltin(builtins, index++, code);
-
-#define BUILD_ASM(Name) \
+#define BUILD_ASM(Name, InterfaceDescriptor) \
code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \
#Name); \
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
- BUILD_BCH, BUILD_DLH, BUILD_ASM);
+ BUILD_BCH, BUILD_ASM);
#undef BUILD_CPP
#undef BUILD_API
@@ -348,7 +356,6 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
#undef BUILD_TFS
#undef BUILD_TFH
#undef BUILD_BCH
-#undef BUILD_DLH
#undef BUILD_ASM
CHECK_EQ(Builtins::builtin_count, index);
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
new file mode 100644
index 0000000000..64d9930815
--- /dev/null
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace typed_array {
+ extern builtin TypedArrayInitialize(implicit context: Context)(
+ JSTypedArray, PositiveSmi, PositiveSmi, Boolean, JSReceiver): void;
+
+ extern macro TypedArrayBuiltinsAssembler::ByteLengthIsValid(Number): bool;
+ extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
+ RawPtr, RawPtr, uintptr): void;
+
+ extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
+ void;
+
+ // 22.2.4.2 TypedArray ( length )
+ // ES #sec-typedarray-length
+ macro ConstructByLength(implicit context: Context)(
+ typedArray: JSTypedArray, length: Object, elementSize: Smi): void {
+ const positiveElementSize: PositiveSmi =
+ Cast<PositiveSmi>(elementSize) otherwise unreachable;
+ const convertedLength: Number =
+ ToInteger_Inline(context, length, kTruncateMinusZero);
+ // The maximum length of a TypedArray is MaxSmi().
+ // Note: this is not per spec, but rather a constraint of our current
+ // representation (which uses Smis).
+ // TODO(7881): support larger-than-smi typed array lengths
+ const positiveLength: PositiveSmi = Cast<PositiveSmi>(convertedLength)
+ otherwise ThrowRangeError(context, kInvalidTypedArrayLength, length);
+ const defaultConstructor: JSFunction = GetArrayBufferFunction();
+ const initialize: Boolean = True;
+ TypedArrayInitialize(
+ typedArray, positiveLength, positiveElementSize, initialize,
+ defaultConstructor);
+ }
+
+ // 22.2.4.4 TypedArray ( object )
+ // ES #sec-typedarray-object
+ macro ConstructByArrayLike(implicit context: Context)(
+ typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object,
+ elementSize: Smi, bufferConstructor: JSReceiver): void {
+ const positiveElementSize: PositiveSmi =
+ Cast<PositiveSmi>(elementSize) otherwise unreachable;
+ // The caller has looked up length on arrayLike, which is observable.
+ const length: PositiveSmi = ToSmiLength(initialLength)
+ otherwise ThrowRangeError(context, kInvalidTypedArrayLength, initialLength);
+ const initialize: Boolean = False;
+ TypedArrayInitialize(
+ typedArray, length, positiveElementSize, initialize, bufferConstructor);
+
+ try {
+ const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
+
+ if (IsDetachedBuffer(src.buffer)) {
+ ThrowTypeError(context, kDetachedOperation, 'Construct');
+
+ } else if (src.elements_kind != typedArray.elements_kind) {
+ goto IfSlow;
+
+ } else if (length > 0) {
+ const byteLength: Number = SmiMul(length, elementSize);
+ assert(ByteLengthIsValid(byteLength));
+ CallCMemcpy(
+ typedArray.data_ptr, src.data_ptr, Convert<uintptr>(byteLength));
+ }
+ }
+ label IfSlow deferred {
+ if (length > 0) {
+ TypedArrayCopyElements(context, typedArray, arrayLike, length);
+ }
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index b3ff7dbca1..278e844966 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module typed_array {
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
extern runtime TypedArraySortFast(Context, Object): JSTypedArray;
- extern macro ValidateTypedArray(
+ extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
Context, Object, constexpr string): JSTypedArray;
extern macro LoadFixedTypedArrayElementAsTagged(
@@ -16,6 +18,18 @@ module typed_array {
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
+ // These UnsafeCast specializations are necessary becuase there is no
+ // way to definitively test whether an Object is a Torque function
+ // with a specific signature, and the default UnsafeCast implementation
+ // would try to check this through an assert(Is<>), so the test
+ // is bypassed in this specialization.
+ UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
+ return %RawObjectCast<LoadFn>(o);
+ }
+ UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
+ return %RawObjectCast<StoreFn>(o);
+ }
+
macro KindForArrayType<T: type>(): constexpr ElementsKind;
KindForArrayType<FixedUint8Array>(): constexpr ElementsKind {
return UINT8_ELEMENTS;
@@ -67,7 +81,7 @@ module typed_array {
return Undefined;
}
- macro CallCompareWithDetachedCheck(
+ transitioning macro CallCompareWithDetachedCheck(
context: Context, array: JSTypedArray, comparefn: Callable, a: Object,
b: Object): Number
labels Detached {
@@ -86,10 +100,10 @@ module typed_array {
}
// InsertionSort is used for smaller arrays.
- macro TypedArrayInsertionSort(
+ transitioning macro TypedArrayInsertionSort(
context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
comparefn: Callable, load: LoadFn, store: StoreFn)
- labels Detached {
+ labels Detached {
let from: Smi = fromArg;
let to: Smi = toArg;
@@ -112,10 +126,10 @@ module typed_array {
}
}
- macro TypedArrayQuickSortImpl(
+ transitioning macro TypedArrayQuickSortImpl(
context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi,
comparefn: Callable, load: LoadFn, store: StoreFn)
- labels Detached {
+ labels Detached {
let from: Smi = fromArg;
let to: Smi = toArg;
@@ -132,7 +146,7 @@ module typed_array {
// TODO(szuend): Check if a more involved thirdIndex calculation is
// worth it for very large arrays.
- const thirdIndex: Smi = from + ((to - from) >>> 1);
+ const thirdIndex: Smi = from + ((to - from) >> 1);
if (IsDetachedBuffer(array.buffer)) goto Detached;
@@ -241,7 +255,7 @@ module typed_array {
}
}
- builtin TypedArrayQuickSort(
+ transitioning builtin TypedArrayQuickSort(
context: Context, array: JSTypedArray, from: Smi, to: Smi,
comparefn: Callable, load: LoadFn, store: StoreFn): JSTypedArray {
try {
@@ -256,7 +270,7 @@ module typed_array {
}
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
- javascript builtin TypedArrayPrototypeSort(
+ transitioning javascript builtin TypedArrayPrototypeSort(
context: Context, receiver: Object, ...arguments): JSTypedArray {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 2bc7768417..51ed934869 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -4,15 +4,22 @@
#if V8_TARGET_ARCH_X64
+#include "src/api-arguments.h"
#include "src/base/adapters.h"
#include "src/code-factory.h"
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
#include "src/objects/debug-objects.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -63,8 +70,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(rax, rax);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
- __ jmp(rcx);
+ __ JumpCodeObject(rcx);
}
namespace {
@@ -96,14 +102,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label loop, entry;
__ movp(rcx, rax);
// ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdi: constructor function
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter
- // -- sp[0*kPointerSize]: the hole (receiver)
- // -- sp[1*kPointerSize]: number of arguments (tagged)
- // -- sp[2*kPointerSize]: context
+ // -- rax: number of arguments (untagged)
+ // -- rdi: constructor function
+ // -- rdx: new target
+ // -- rbx: pointer to last argument
+ // -- rcx: counter
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[2*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
@@ -129,8 +135,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
@@ -148,7 +154,7 @@ void Generate_StackOverflowCheck(
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ subp(scratch, kScratchRegister);
- __ sarp(scratch, Immediate(kPointerSizeLog2));
+ __ sarp(scratch, Immediate(kSystemPointerSizeLog2));
// Check if the arguments will overflow the stack.
__ cmpp(scratch, num_args);
// Signed comparison.
@@ -181,14 +187,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(rdx);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- rdi and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: argument count
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- rdi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
@@ -205,11 +216,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax implicit receiver
- // -- Slot 4 / sp[0*kPointerSize] new target
- // -- Slot 3 / sp[1*kPointerSize] padding
- // -- Slot 2 / sp[2*kPointerSize] constructor function
- // -- Slot 1 / sp[3*kPointerSize] number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize] context
+ // -- Slot 4 / sp[0*kSystemPointerSize] new target
+ // -- Slot 3 / sp[1*kSystemPointerSize] padding
+ // -- Slot 2 / sp[2*kSystemPointerSize] constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize] context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -226,12 +237,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(rax);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize] implicit receiver
- // -- sp[1*kPointerSize] implicit receiver
- // -- sp[2*kPointerSize] padding
- // -- sp[3*kPointerSize] constructor function
- // -- sp[4*kPointerSize] number of arguments (tagged)
- // -- sp[5*kPointerSize] context
+ // -- sp[0*kSystemPointerSize] implicit receiver
+ // -- sp[1*kSystemPointerSize] implicit receiver
+ // -- sp[2*kSystemPointerSize] padding
+ // -- sp[3*kSystemPointerSize] constructor function
+ // -- sp[4*kSystemPointerSize] number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize] context
// -----------------------------------
// Restore constructor function and argument count.
@@ -260,16 +271,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label loop, entry;
__ movp(rcx, rax);
// ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter (tagged)
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- rdi and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- rax: number of arguments (untagged)
+ // -- rdx: new target
+ // -- rbx: pointer to last argument
+ // -- rcx: counter (tagged)
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- rdi and sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
@@ -284,11 +295,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax constructor result
- // -- sp[0*kPointerSize] implicit receiver
- // -- sp[1*kPointerSize] padding
- // -- sp[2*kPointerSize] constructor function
- // -- sp[3*kPointerSize] number of arguments
- // -- sp[4*kPointerSize] context
+ // -- sp[0*kSystemPointerSize] implicit receiver
+ // -- sp[1*kSystemPointerSize] padding
+ // -- sp[2*kSystemPointerSize] constructor function
+ // -- sp[3*kSystemPointerSize] number of arguments
+ // -- sp[4*kSystemPointerSize] context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -325,7 +336,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movp(rax, Operand(rsp, 0 * kPointerSize));
+ __ movp(rax, Operand(rsp, 0 * kSystemPointerSize));
__ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
__ bind(&leave_frame);
@@ -335,8 +346,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
}
@@ -351,67 +362,231 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Expects five C++ function parameters.
- // - Object* new_target
- // - JSFunction* function
- // - Object* receiver
- // - int argc
- // - Object*** argv
- // (see Handle::Invoke in execution.cc).
+namespace {
- // Open a C++ scope for the FrameScope.
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** argv)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+ Label not_outermost_js, not_outermost_js_2;
+
+ { // NOLINT. Scope block confuses linter.
+ NoRootArrayScope uninitialized_root_register(masm);
+ // Set up frame.
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
+
+ // Push the stack frame type.
+ __ Push(Immediate(StackFrame::TypeToMarker(type)));
+ // Reserve a slot for the context. It is filled after the root register has
+ // been set up.
+ __ subp(rsp, Immediate(kSystemPointerSize));
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
+#ifdef _WIN64
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
+ __ pushq(rbx);
+
+#ifdef _WIN64
+ // On Win64 XMM6-XMM15 are callee-save.
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
+ __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
+ STATIC_ASSERT(EntryFrameConstants::kCalleeSaveXMMRegisters == 10);
+ STATIC_ASSERT(EntryFrameConstants::kXMMRegistersBlockSize ==
+ EntryFrameConstants::kXMMRegisterSize *
+ EntryFrameConstants::kCalleeSaveXMMRegisters);
+#endif
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in arg_reg_1.
+ __ movp(kRootRegister, arg_reg_1);
+ }
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
{
-// Platform specific argument handling. After this, the stack contains
-// an internal frame and the pushed function and receiver, and
-// register rax and rbx holds the argument count and argument array,
-// while rdi holds the function pointer, rsi the context, and rdx the
-// new.target.
+ Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
+ __ Push(c_entry_fp_operand);
+ }
+ // Store the context address in the previously-reserved slot.
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ Load(kScratchRegister, context_address);
+ static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
+ __ movp(Operand(rbp, kOffsetToContextSlot), kScratchRegister);
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ Load(rax, js_entry_sp);
+ __ testp(rax, rax);
+ __ j(not_zero, &not_outermost_js);
+ __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ movp(rax, rbp);
+ __ Store(js_entry_sp, rax);
+ Label cont;
+ __ jmp(&cont);
+ __ bind(&not_outermost_js);
+ __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ Store(pending_exception, rax);
+ __ LoadRoot(rax, RootIndex::kException);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit);
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ Pop(rbx);
+ __ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ j(not_equal, &not_outermost_js_2);
+ __ Move(kScratchRegister, js_entry_sp);
+ __ movp(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(&not_outermost_js_2);
+
+ // Restore the top frame descriptor from the stack.
+ {
+ Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
+ __ Pop(c_entry_fp_operand);
+ }
+
+ // Restore callee-saved registers (X64 conventions).
#ifdef _WIN64
- // MSVC parameters in:
- // rcx : new_target
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
+ // On Win64 XMM6-XMM15 are callee-save
+ __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
+ __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
+ __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
+ __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
+ __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
+ __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
+ __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
+ __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
+ __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
+ __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+#endif
+
+ __ popq(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
+ __ popq(rsi);
+ __ popq(rdi);
+#endif
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kSystemPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ popq(rbp);
+ __ ret(0);
+}
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+} // namespace
- // Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address = ExternalReference::Create(
- IsolateAddressId::kContextAddress, masm->isolate());
- __ movp(rsi, masm->ExternalOperand(context_address));
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
- // Push the function and the receiver onto the stack.
- __ Push(rdx);
- __ Push(r8);
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
- // Load the number of arguments and setup pointer to the arguments.
- __ movp(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movp(kScratchRegister, Operand(rbp, 0));
- __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movp(rdi, rdx);
- // Load the new.target into rdx.
- __ movp(rdx, rcx);
-#else // _WIN64
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Expects six C++ function parameters.
+ // - Address root_register_value
+ // - Address new_target (tagged Object pointer)
+ // - Address function (tagged JSFunction pointer)
+ // - Address receiver (tagged Object pointer)
+ // - intptr_t argc
+ // - Address** argv (pointer to array of tagged Object pointers)
+ // (see Handle::Invoke in execution.cc).
+
+ // Open a C++ scope for the FrameScope.
+ {
+ // Platform specific argument handling. After this, the stack contains
+ // an internal frame and the pushed function and receiver, and
+ // register rax and rbx holds the argument count and argument array,
+ // while rdi holds the function pointer, rsi the context, and rdx the
+ // new.target.
+
+ // MSVC parameters in:
+ // rcx : root_register_value
+ // rdx : new_target
+ // r8 : function
+ // r9 : receiver
+ // [rsp+0x20] : argc
+ // [rsp+0x28] : argv
+ //
// GCC parameters in:
- // rdi : new_target
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movp(r11, rdi);
- __ movp(rdi, rsi);
+ // rdi : root_register_value
+ // rsi : new_target
+ // rdx : function
+ // rcx : receiver
+ // r8 : argc
+ // r9 : argv
+
+ __ movp(rdi, arg_reg_3);
+ __ Move(rdx, arg_reg_2);
// rdi : function
- // r11 : new_target
+ // rdx : new_target
// Clear the context before we push it when entering the internal frame.
__ Set(rsi, 0);
@@ -422,23 +597,27 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
- __ movp(rsi, masm->ExternalOperand(context_address));
+ __ movp(rsi, masm->ExternalReferenceAsOperand(context_address));
- // Push the function and receiver onto the stack.
+ // Push the function and the receiver onto the stack.
__ Push(rdi);
- __ Push(rdx);
+ __ Push(arg_reg_4);
+#ifdef _WIN64
+ // Load the previous frame pointer to access C arguments on stack
+ __ movp(kScratchRegister, Operand(rbp, 0));
// Load the number of arguments and setup pointer to the arguments.
- __ movp(rax, rcx);
- __ movp(rbx, r8);
-
- // Load the new.target into rdx.
- __ movp(rdx, r11);
+ __ movp(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+#else // _WIN64
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movp(rax, r8);
+ __ movp(rbx, r9);
#endif // _WIN64
// Current stack contents:
- // [rsp + 2 * kPointerSize ... ] : Internal frame
- // [rsp + kPointerSize] : function
+ // [rsp + 2 * kSystemPointerSize ... ] : Internal frame
+ // [rsp + kSystemPointerSize] : function
// [rsp] : receiver
// Current register contents:
// rax : argc
@@ -496,6 +675,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // arg_reg_2: microtask_queue
+ __ movp(RunMicrotasksDescriptor::MicrotaskQueueRegister(), arg_reg_2);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Register sfi_data,
Register scratch1) {
@@ -503,8 +688,12 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
__ j(not_equal, &done, Label::kNear);
- __ movp(sfi_data,
- FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? scratch1 : no_reg;
+
+ __ LoadTaggedPointerField(
+ sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset),
+ decompr_scratch_for_debug);
__ bind(&done);
}
@@ -519,20 +708,29 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(rdx);
// Store input value into generator object.
- __ movp(FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
+ __ StoreTaggedField(
+ FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
kDontSaveFPRegs);
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Load suspended function and context.
- __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
+ decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
+ decompr_scratch_for_debug);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- Operand debug_hook_operand = masm->ExternalOperand(debug_hook);
+ Operand debug_hook_operand = masm->ExternalReferenceAsOperand(debug_hook);
__ cmpb(debug_hook_operand, Immediate(0));
__ j(not_equal, &prepare_step_in_if_stepping);
@@ -540,7 +738,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
Operand debug_suspended_generator_operand =
- masm->ExternalOperand(debug_suspended_generator);
+ masm->ExternalReferenceAsOperand(debug_suspended_generator);
__ cmpp(rdx, debug_suspended_generator_operand);
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -555,7 +753,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PopReturnAddressTo(rax);
// Push receiver.
- __ Push(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset));
+ __ PushTaggedPointerField(
+ FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1,
+ decompr_scratch_for_debug);
// ----------- S t a t e -------------
// -- rax : return address
@@ -566,12 +766,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ movp(rbx,
- FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset),
+ decompr_scratch_for_debug);
{
Label done_loop, loop;
@@ -580,7 +783,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ cmpl(r9, rcx);
__ j(greater_equal, &done_loop, Label::kNear);
- __ Push(FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ PushTaggedAnyField(
+ FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch1, decompr_scratch2, decompr_scratch_for_debug);
__ addl(r9, Immediate(1));
__ jmp(&loop);
@@ -589,8 +794,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset),
+ decompr_scratch_for_debug);
GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
@@ -599,16 +808,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
- __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(rcx);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
+ decompr_scratch_for_debug);
+ __ JumpCodeObject(rcx);
}
__ bind(&prepare_step_in_if_stepping);
@@ -620,7 +831,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
- __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
+ decompr_scratch_for_debug);
}
__ jmp(&stepping_prepared);
@@ -630,7 +843,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(rdx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
- __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
+ decompr_scratch_for_debug);
}
__ jmp(&stepping_prepared);
@@ -649,7 +864,8 @@ static void ReplaceClosureCodeWithOptimizedCode(
Register scratch1, Register scratch2, Register scratch3) {
// Store the optimized code in the closure.
- __ movp(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
+ __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
+ optimized_code);
__ movp(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -704,9 +920,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = rdi;
Register optimized_code_entry = scratch1;
+ Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? scratch3 : no_reg;
- __ movp(optimized_code_entry,
- FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
+ decompr_scratch, decompr_scratch_for_debug);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -721,6 +942,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Smi::FromEnum(OptimizationMarker::kNone));
__ j(equal, &fallthrough);
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
@@ -753,8 +977,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ movp(scratch2,
- FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch2,
+ FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset),
+ decompr_scratch_for_debug);
__ testl(
FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
@@ -768,8 +994,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(rcx);
+ __ JumpCodeObject(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -852,50 +1077,60 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
Register closure = rdi;
Register feedback_vector = rbx;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ LoadTaggedPointerField(
+ rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset),
+ decompr_scratch_for_debug);
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
+ kScratchRegister);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, rax);
+ __ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure.
- __ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset),
+ decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(feedback_vector,
+ FieldOperand(feedback_vector, Cell::kValueOffset),
+ decompr_scratch_for_debug);
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ JumpIfRoot(feedback_vector, RootIndex::kUndefinedValue, &push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15);
+
+ // Increment invocation count for the function.
+ __ incl(
+ FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+ __ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
- // Get the bytecode array from the function object and load it into
- // kInterpreterBytecodeArrayRegister.
- __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ movp(kInterpreterBytecodeArrayRegister,
- FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
- kScratchRegister);
-
- // Increment invocation count for the function.
- __ incl(
- FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
-
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- rax);
- __ Assert(
- equal,
- AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
// Reset code age.
__ movb(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kBytecodeAgeOffset),
@@ -935,7 +1170,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rax);
// Continue loop if not done.
__ bind(&loop_check);
- __ subp(rcx, Immediate(kPointerSize));
+ __ subp(rcx, Immediate(kSystemPointerSize));
__ j(greater_equal, &loop_header, Label::kNear);
}
@@ -993,6 +1228,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ __ int3(); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1001,7 +1240,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register scratch) {
// Find the address of the last argument.
__ Move(scratch, num_args);
- __ shlp(scratch, Immediate(kPointerSizeLog2));
+ __ shlp(scratch, Immediate(kSystemPointerSizeLog2));
__ negp(scratch);
__ addp(scratch, start_address);
@@ -1010,7 +1249,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
- __ subp(start_address, Immediate(kPointerSize));
+ __ subp(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
__ cmpp(start_address, scratch);
__ j(greater, &loop_header, Label::kNear);
@@ -1141,28 +1380,43 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Label builtin_trampoline, trampoline_loaded;
- Smi* interpreter_entry_return_pc_offset(
+ Smi interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- // If the SFI function_data is an InterpreterData, get the trampoline stored
- // in it, otherwise get the trampoline from the builtins list.
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
__ movp(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset),
+ decompr_scratch_for_debug);
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
__ movp(rbx,
FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
+ __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
- __ Move(rbx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ // TODO(jgruber): Replace this by a lookup in the builtin entry table.
+ __ movp(rbx,
+ __ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()),
+ kScratchRegister));
__ bind(&trampoline_loaded);
- __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
+ __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
__ Push(rbx);
// Initialize dispatch table register.
@@ -1262,8 +1516,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
- __ Push(Operand(
- rbp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+ __ Push(Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ i * kSystemPointerSize));
}
for (int i = 0; i < 3 - j; ++i) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1300,9 +1554,11 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(rcx);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
+ decompr_scratch_for_debug);
+ __ JumpCodeObject(rcx);
}
namespace {
@@ -1314,10 +1570,11 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
- __ movq(Operand(rsp,
- config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
- rax);
+ __ movq(
+ Operand(rsp, config->num_allocatable_general_registers() *
+ kSystemPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
+ rax);
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1330,9 +1587,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
rbp,
Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
const int offsetToPC =
- BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
+ kSystemPointerSize;
__ popq(Operand(rsp, offsetToPC));
- __ Drop(offsetToPC / kPointerSize);
+ __ Drop(offsetToPC / kSystemPointerSize);
__ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ Ret();
}
@@ -1366,7 +1624,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
__ movp(rax, Operand(rsp, kPCOnStackSize));
- __ ret(1 * kPointerSize); // Remove rax.
+ __ ret(1 * kSystemPointerSize); // Remove rax.
}
// static
@@ -1399,7 +1657,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
@@ -1515,7 +1773,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movp(rbx, args.GetArgumentOperand(3)); // argumentsList
__ bind(&done);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
@@ -1567,7 +1825,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ movp(rdx, args.GetArgumentOperand(3)); // new.target
__ bind(&done);
__ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rcx);
}
@@ -1602,8 +1860,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
Label generic_array_code;
if (FLAG_debug_code) {
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Initial map for the builtin InternalArray functions should be maps.
- __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
+ decompr_scratch_for_debug);
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1648,8 +1910,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1661,11 +1923,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- rdi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments, stack_overflow;
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->arguments_adaptors(), 1);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
- Label enough, too_few;
+ Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ cmpp(rax, rbx);
@@ -1686,7 +1947,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ incp(r8);
__ Push(Operand(rax, 0));
- __ subp(rax, Immediate(kPointerSize));
+ __ subp(rax, Immediate(kSystemPointerSize));
__ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
@@ -1708,7 +1969,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ incp(r8);
__ Push(Operand(rdi, 0));
- __ subp(rdi, Immediate(kPointerSize));
+ __ subp(rdi, Immediate(kSystemPointerSize));
__ cmpp(r8, rax);
__ j(less, &copy);
@@ -1732,9 +1993,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(rcx);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
+ decompr_scratch_for_debug);
+ __ CallCodeObject(rcx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1748,9 +2009,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(rcx);
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
+ decompr_scratch_for_debug);
+ __ JumpCodeObject(rcx);
__ bind(&stack_overflow);
{
@@ -1771,12 +2032,18 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- rdx : new.target (for [[Construct]])
// -- rsp[0] : return address
// -----------------------------------
+ Register scratch = r11;
+ Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
if (masm->emit_debug_code()) {
// Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
- __ movp(map, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset),
+ decompr_scratch_for_debug);
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
@@ -1795,6 +2062,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Push additional arguments onto the stack.
{
+ Register value = scratch;
__ PopReturnAddressTo(r8);
__ Set(r9, 0);
Label done, push, loop;
@@ -1802,13 +2070,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmpl(r9, rcx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ movp(r11,
- FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(r11, RootIndex::kTheHoleValue);
+ __ LoadAnyTaggedField(
+ value,
+ FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch, decompr_scratch_for_debug);
+ __ CompareRoot(value, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
- __ LoadRoot(r11, RootIndex::kUndefinedValue);
+ __ LoadRoot(value, RootIndex::kUndefinedValue);
__ bind(&push);
- __ Push(r11);
+ __ Push(value);
__ incl(r9);
__ jmp(&loop);
__ bind(&done);
@@ -1834,11 +2104,15 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- rcx : start index (to support rest parameters)
// -----------------------------------
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset),
+ decompr_scratch_for_debug);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
@@ -1860,7 +2134,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ j(equal, &arguments_adaptor, Label::kNear);
{
__ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ movzxwq(
r8, FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx, rbp);
@@ -1911,13 +2187,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
StackArgumentsAccessor args(rsp, rax);
__ AssertFunction(rdi);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -1931,7 +2212,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
+ decompr_scratch_for_debug);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
@@ -1988,7 +2270,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(rax);
__ SmiUntag(rax, rax);
}
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ bind(&convert_receiver);
}
__ movp(args.GetReceiverOperand(), rcx);
@@ -2027,10 +2311,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- rdi : target (checked to be a JSBoundFunction)
// -----------------------------------
+ Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
- __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
+ decompr_scratch_for_debug);
+ __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
{
@@ -2081,14 +2371,22 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
- __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
+ decompr_scratch_for_debug);
+ __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
- __ decl(rbx);
- __ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ movp(Operand(rsp, rax, times_pointer_size, 0), kScratchRegister);
+ // Instead of doing decl(rbx) here subtract kTaggedSize from the header
+ // offset in order to move be able to move decl(rbx) right before the loop
+ // condition. This is necessary in order to avoid flags corruption by
+ // pointer decompression code.
+ __ LoadAnyTaggedField(r12,
+ FieldOperand(rcx, rbx, times_tagged_size,
+ FixedArray::kHeaderSize - kTaggedSize),
+ decompr_scratch, decompr_scratch_for_debug);
+ __ movp(Operand(rsp, rax, times_pointer_size, 0), r12);
__ leal(rax, Operand(rax, 1));
+ __ decl(rbx);
__ j(greater, &loop);
}
@@ -2110,16 +2408,24 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// -----------------------------------
__ AssertBoundFunction(rdi);
+ Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
- __ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
+ __ LoadAnyTaggedField(rbx,
+ FieldOperand(rdi, JSBoundFunction::kBoundThisOffset),
+ decompr_scratch, decompr_scratch_for_debug);
__ movp(args.GetReceiverOperand(), rbx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
+ decompr_scratch_for_debug);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2182,12 +2488,17 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertFunction(rdi);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
__ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
@@ -2207,6 +2518,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertBoundFunction(rdi);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2215,13 +2529,16 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ cmpp(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
- __ movp(rdx,
- FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
+ decompr_scratch_for_debug);
__ bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
+ decompr_scratch_for_debug);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2235,12 +2552,16 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -----------------------------------
StackArgumentsAccessor args(rsp, rax);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
// Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ movq(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset),
+ decompr_scratch_for_debug);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
@@ -2303,15 +2624,16 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ leave();
// Load deoptimization data from the code object.
- __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ LoadTaggedPointerField(rbx,
+ FieldOperand(rax, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
- __ SmiUntag(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
+ __ SmiUntagField(
+ rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
- __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
@@ -2353,11 +2675,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object.
- __ movp(rcx, FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+ __ LoadTaggedPointerField(
+ rcx,
+ FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset),
+ decompr_scratch_for_debug);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(kContextRegister, Smi::kZero);
+ __ Move(kContextRegister, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, rcx);
// The entrypoint address is the return value.
__ movq(r11, kReturnRegister0);
@@ -2389,8 +2716,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// r15: pointer to the first argument
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
#ifdef _WIN64
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
// stack to be aligned to 16 bytes. It only allows a single-word to be
@@ -2483,7 +2808,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
ExternalReference pending_exception_address = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
+ masm->ExternalReferenceAsOperand(pending_exception_address);
__ cmpp(r14, pending_exception_operand);
__ j(equal, &okay, Label::kNear);
__ int3();
@@ -2520,9 +2845,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
- __ movp(rsi, masm->ExternalOperand(pending_handler_context_address));
- __ movp(rsp, masm->ExternalOperand(pending_handler_sp_address));
- __ movp(rbp, masm->ExternalOperand(pending_handler_fp_address));
+ __ movp(rsi,
+ masm->ExternalReferenceAsOperand(pending_handler_context_address));
+ __ movp(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
+ __ movp(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (rsi == 0) for non-JS frames.
@@ -2539,7 +2865,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
- __ movp(rdi, masm->ExternalOperand(pending_handler_entrypoint_address));
+ __ movp(rdi,
+ masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
__ jmp(rdi);
}
@@ -2737,53 +3064,6 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ ret(0);
}
-namespace {
-
-void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
- ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
-
- __ testp(rax, rax);
- __ j(not_zero, &not_zero_case);
- __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, args.GetArgumentOperand(0));
- __ testp(rcx, rcx);
- __ j(zero, &normal_sequence);
-
- __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
- masm->isolate(), GetHoleyElementsKind(kind))
- .code(),
- RelocInfo::CODE_TARGET);
- }
-
- __ bind(&normal_sequence);
- __ Jump(
- CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
- .code(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&not_one_case);
- // Load undefined into the allocation site parameter as required by
- // ArrayNArgumentsConstructor.
- __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
- Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-} // namespace
-
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -2792,47 +3072,459 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// -- rsp[8] : last argument
// -----------------------------------
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
+ decompr_scratch_for_debug);
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
- }
- // Figure out the right elements kind
- __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Figure out the right elements kind
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
+ decompr_scratch_for_debug);
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(rcx);
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(rcx);
- if (FLAG_debug_code) {
- Label done;
+ // Initial elements kind should be packed elements.
__ cmpl(rcx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &done);
- __ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
- __ Assert(
- equal,
- AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
+ __ Assert(equal, AbortReason::kInvalidElementsKindForInternalPackedArray);
+
+ // No arguments should be passed.
+ __ testp(rax, rax);
+ __ Assert(zero, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
+ }
+
+ __ Jump(
+ BUILTIN_CODE(masm->isolate(), InternalArrayNoArgumentConstructor_Packed),
+ RelocInfo::CODE_TARGET);
+}
+
+namespace {
+
+int Offset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ // Check that fits into int.
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Clobbers r14, r15, rbx and
+// caller-save registers. Restores context. On return removes
+// stack_space * kSystemPointerSize (GCed).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg, int stack_space,
+ Operand* stack_space_operand,
+ Operand return_value_operand) {
+ Label prologue;
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label write_back;
+
+ Isolate* isolate = masm->isolate();
+ Factory* factory = isolate->factory();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = Offset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = Offset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate);
+
+ DCHECK(rdx == function_address || r8 == function_address);
+ // Allocate HandleScope in callee-save registers.
+ Register prev_next_address_reg = r14;
+ Register prev_limit_reg = rbx;
+ Register base_reg = r15;
+ __ Move(base_reg, next_address);
+ __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
+ __ PopSafepointRegisters();
}
- Label fast_elements_case;
- __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Move(rax, ExternalReference::is_profiling_address(isolate));
+ __ cmpb(Operand(rax, 0), Immediate(0));
+ __ j(zero, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ __ Move(thunk_last_arg, function_address);
+ __ Move(rax, thunk_ref);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ // Call the api function!
+ __ Move(rax, function_address);
+
+ __ bind(&end_profiler_check);
+
+ // Call the api function!
+ __ call(rax);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Load the value from ReturnValue
+ __ movp(rax, return_value_operand);
+ __ bind(&prologue);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
+ __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ j(not_equal, &delete_allocated_handles);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(stack_space, 0);
+ __ movp(rbx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame();
+
+ // Check if the function scheduled an exception.
+ __ Move(rdi, scheduled_exception_address);
+ __ Cmp(Operand(rdi, 0), factory->the_hole_value());
+ __ j(not_equal, &promote_scheduled_exception);
+
+#if DEBUG
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = rax;
+ Register map = rcx;
+
+ __ JumpIfSmi(return_value, &ok, Label::kNear);
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+ __ LoadTaggedPointerField(map,
+ FieldOperand(return_value, HeapObject::kMapOffset),
+ decompr_scratch_for_debug);
+
+ __ CmpInstanceType(map, LAST_NAME_TYPE);
+ __ j(below_equal, &ok, Label::kNear);
+
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+ __ j(above_equal, &ok, Label::kNear);
+
+ __ CompareRoot(map, RootIndex::kHeapNumberMap);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kUndefinedValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kTrueValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kFalseValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, RootIndex::kNullValue);
+ __ j(equal, &ok, Label::kNear);
+
+ __ Abort(AbortReason::kAPICallReturnedInvalidObject);
+
+ __ bind(&ok);
+#endif
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ ret(stack_space * kSystemPointerSize);
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ PopReturnAddressTo(rcx);
+ __ addq(rsp, rbx);
+ __ jmp(rcx);
+ }
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ __ movp(prev_limit_reg, rax);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ LoadAddress(rax, ExternalReference::delete_handle_scope_extensions());
+ __ call(rax);
+ __ movp(rax, prev_limit_reg);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+// TODO(jgruber): Instead of explicitly setting up implicit_args_ on the stack
+// in CallApiCallback, we could use the calling convention to set up the stack
+// correctly in the first place.
+//
+// TODO(jgruber): I suspect that most of CallApiCallback could be implemented
+// as a C++ trampoline, vastly simplifying the assembly implementation.
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsi : kTargetContext
+ // -- rdx : kApiFunctionAddress
+ // -- rcx : kArgc
+ // --
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[argc * 8] : first argument
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -- rsp[(argc + 2) * 8] : kHolder
+ // -- rsp[(argc + 3) * 8] : kCallData
+ // -----------------------------------
+
+ Register api_function_address = rdx;
+ Register argc = rcx;
+
+ DCHECK(!AreAliased(api_function_address, argc, kScratchRegister));
+
+ // Stack offsets (without argc).
+ static constexpr int kReceiverOffset = kSystemPointerSize;
+ static constexpr int kHolderOffset = kReceiverOffset + kSystemPointerSize;
+ static constexpr int kCallDataOffset = kHolderOffset + kSystemPointerSize;
+
+ // Extra stack arguments are: the receiver, kHolder, kCallData.
+ static constexpr int kExtraStackArgumentCount = 3;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Current state:
+ // rsp[0]: return address
+ //
+ // Target state:
+ // rsp[0 * kSystemPointerSize]: return address
+ // rsp[1 * kSystemPointerSize]: kHolder
+ // rsp[2 * kSystemPointerSize]: kIsolate
+ // rsp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // rsp[4 * kSystemPointerSize]: undefined (kReturnValue)
+ // rsp[5 * kSystemPointerSize]: kData
+ // rsp[6 * kSystemPointerSize]: undefined (kNewTarget)
+
+ // Reserve space on the stack.
+ __ subp(rsp, Immediate(FCA::kArgsLength * kSystemPointerSize));
+
+ // Return address (the old stack location is overwritten later on).
+ __ movp(kScratchRegister,
+ Operand(rsp, FCA::kArgsLength * kSystemPointerSize));
+ __ movp(Operand(rsp, 0 * kSystemPointerSize), kScratchRegister);
+
+ // kHolder.
+ __ movp(kScratchRegister,
+ Operand(rsp, argc, times_pointer_size,
+ FCA::kArgsLength * kSystemPointerSize + kHolderOffset));
+ __ movp(Operand(rsp, 1 * kSystemPointerSize), kScratchRegister);
+
+ // kIsolate.
+ __ Move(kScratchRegister,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ movp(Operand(rsp, 2 * kSystemPointerSize), kScratchRegister);
+
+ // kReturnValueDefaultValue, kReturnValue, and kNewTarget.
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ movp(Operand(rsp, 3 * kSystemPointerSize), kScratchRegister);
+ __ movp(Operand(rsp, 4 * kSystemPointerSize), kScratchRegister);
+ __ movp(Operand(rsp, 6 * kSystemPointerSize), kScratchRegister);
+
+ // kData.
+ __ movp(kScratchRegister,
+ Operand(rsp, argc, times_pointer_size,
+ FCA::kArgsLength * kSystemPointerSize + kCallDataOffset));
+ __ movp(Operand(rsp, 5 * kSystemPointerSize), kScratchRegister);
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ Register scratch = rbx;
+ __ leap(scratch, Operand(rsp, 1 * kSystemPointerSize));
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ __ EnterApiExitFrame(kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ __ movp(StackSpaceOperand(0), scratch);
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ leap(scratch, Operand(scratch, argc, times_pointer_size,
+ (FCA::kArgsLength - 1) * kSystemPointerSize));
+ __ movp(StackSpaceOperand(1), scratch);
+
+ // FunctionCallbackInfo::length_.
+ __ movp(StackSpaceOperand(2), argc);
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ __ leaq(kScratchRegister,
+ Operand(argc, times_pointer_size,
+ (FCA::kArgsLength + kExtraStackArgumentCount) *
+ kSystemPointerSize));
+ __ movp(StackSpaceOperand(3), kScratchRegister);
+
+ Register arguments_arg = arg_reg_1;
+ Register callback_arg = arg_reg_2;
+
+ // It's okay if api_function_address == callback_arg
+ // but not arguments_arg
+ DCHECK(api_function_address != arguments_arg);
+
+ // v8::InvocationCallback's argument.
+ __ leap(arguments_arg, StackSpaceOperand(0));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack:
+ // the stored ebp (pushed by EnterApiExitFrame), and the return address.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ Operand return_value_operand(
+ rbp,
+ (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ Operand stack_space_operand = StackSpaceOperand(3);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ Register name_arg = arg_reg_1;
+ Register accessor_info_arg = arg_reg_2;
+ Register getter_arg = arg_reg_3;
+ Register api_function_address = r8;
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = rax;
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
+ DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1,
+ decompr_scratch2, decompr_scratch_for_debug));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ // Insert additional parameters into the stack frame above return address.
+ __ PopReturnAddressTo(scratch);
+ __ Push(receiver);
+ __ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
+ decompr_scratch1, decompr_scratch2,
+ decompr_scratch_for_debug);
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
+ __ Push(holder);
+ __ Push(Smi::zero()); // should_throw_on_error -> false
+ __ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
+ decompr_scratch1, decompr_scratch_for_debug);
+ __ PushReturnAddressFrom(scratch);
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
+ const int kArgStackSpace = 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array.
+ __ leap(scratch, Operand(rsp, 2 * kSystemPointerSize));
+
+ __ EnterApiExitFrame(kArgStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ Operand info_object = StackSpaceOperand(0);
+ __ movp(info_object, scratch);
+
+ __ leap(name_arg, Operand(scratch, -kSystemPointerSize));
+ // The context register (rsi) has been saved in EnterApiExitFrame and
+ // could be used to pass arguments.
+ __ leap(accessor_info_arg, info_object);
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ // It's okay if api_function_address == getter_arg
+ // but not accessor_info_arg or name_arg
+ DCHECK(api_function_address != accessor_info_arg);
+ DCHECK(api_function_address != name_arg);
+ __ LoadTaggedPointerField(
+ scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset),
+ decompr_scratch_for_debug);
+ __ movp(api_function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ Operand return_value_operand(
+ rbp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ Operand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
- __ bind(&fast_elements_case);
- GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ __ int3(); // Unused on this architecture.
}
#undef __
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index 1876773e7a..dc89128229 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -10,46 +10,55 @@
namespace v8 {
namespace internal {
-
-Cancelable::Cancelable(CancelableTaskManager* parent)
- : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
- id_ = parent->Register(this);
-}
-
-
Cancelable::~Cancelable() {
// The following check is needed to avoid calling an already terminated
// manager object. This happens when the manager cancels all pending tasks
// in {CancelAndWait} only before destroying the manager object.
- if (TryRun() || IsRunning()) {
+ Status previous;
+ if (TryRun(&previous) || previous == kRunning) {
parent_->RemoveFinishedTask(id_);
}
}
CancelableTaskManager::CancelableTaskManager()
- : task_id_counter_(0), canceled_(false) {}
+ : task_id_counter_(kInvalidTaskId), canceled_(false) {}
+
+CancelableTaskManager::~CancelableTaskManager() {
+ // It is required that {CancelAndWait} is called before the manager object is
+ // destroyed. This guarantees that all tasks managed by this
+ // {CancelableTaskManager} are either canceled or finished their execution
+ // when the {CancelableTaskManager} dies.
+ CHECK(canceled_);
+}
CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
+ if (canceled_) {
+ // The CancelableTaskManager has already been canceled. Therefore we mark
+ // the new task immediately as canceled so that it does not get executed.
+ task->Cancel();
+ return kInvalidTaskId;
+ }
CancelableTaskManager::Id id = ++task_id_counter_;
// Id overflows are not supported.
- CHECK_NE(0, id);
+ CHECK_NE(kInvalidTaskId, id);
CHECK(!canceled_);
cancelable_tasks_[id] = task;
return id;
}
void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ CHECK_NE(kInvalidTaskId, id);
+ base::MutexGuard guard(&mutex_);
size_t removed = cancelable_tasks_.erase(id);
USE(removed);
DCHECK_NE(0u, removed);
cancelable_tasks_barrier_.NotifyOne();
}
-CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
- CancelableTaskManager::Id id) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+TryAbortResult CancelableTaskManager::TryAbort(CancelableTaskManager::Id id) {
+ CHECK_NE(kInvalidTaskId, id);
+ base::MutexGuard guard(&mutex_);
auto entry = cancelable_tasks_.find(id);
if (entry != cancelable_tasks_.end()) {
Cancelable* value = entry->second;
@@ -57,12 +66,12 @@ CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
// Cannot call RemoveFinishedTask here because of recursive locking.
cancelable_tasks_.erase(entry);
cancelable_tasks_barrier_.NotifyOne();
- return kTaskAborted;
+ return TryAbortResult::kTaskAborted;
} else {
- return kTaskRunning;
+ return TryAbortResult::kTaskRunning;
}
}
- return kTaskRemoved;
+ return TryAbortResult::kTaskRemoved;
}
void CancelableTaskManager::CancelAndWait() {
@@ -70,7 +79,7 @@ void CancelableTaskManager::CancelAndWait() {
// the way if possible, i.e., if they have not started yet. After each round
// of canceling we wait for the background tasks that have already been
// started.
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
canceled_ = true;
// Cancelable tasks could be running or could potentially register new
@@ -91,12 +100,12 @@ void CancelableTaskManager::CancelAndWait() {
}
}
-CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
+TryAbortResult CancelableTaskManager::TryAbortAll() {
// Clean up all cancelable fore- and background tasks. Tasks are canceled on
// the way if possible, i.e., if they have not started yet.
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
- if (cancelable_tasks_.empty()) return kTaskRemoved;
+ if (cancelable_tasks_.empty()) return TryAbortResult::kTaskRemoved;
for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
if (it->second->Cancel()) {
@@ -106,7 +115,8 @@ CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
}
}
- return cancelable_tasks_.empty() ? kTaskAborted : kTaskRunning;
+ return cancelable_tasks_.empty() ? TryAbortResult::kTaskAborted
+ : TryAbortResult::kTaskRunning;
}
CancelableTask::CancelableTask(Isolate* isolate)
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 0ef3ca5a15..a82f2b605e 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -5,10 +5,10 @@
#ifndef V8_CANCELABLE_TASK_H_
#define V8_CANCELABLE_TASK_H_
+#include <atomic>
#include <unordered_map>
#include "include/v8-platform.h"
-#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/globals.h"
@@ -19,6 +19,13 @@ namespace internal {
class Cancelable;
class Isolate;
+// The possible outcomes of trying to abort a job are:
+// (1) The task is already finished running or was canceled before and
+// thus has been removed from the manager.
+// (2) The task is currently running and cannot be canceled anymore.
+// (3) The task is not yet running (or finished) so it is canceled and
+// removed.
+enum class TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
// Keeps track of cancelable tasks. It is possible to register and remove tasks
// from any fore- and background task/thread.
@@ -28,25 +35,20 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
CancelableTaskManager();
+ ~CancelableTaskManager();
+
// Registers a new cancelable {task}. Returns the unique {id} of the task that
// can be used to try to abort a task by calling {Abort}.
- // Must not be called after CancelAndWait.
+ // If {Register} is called after {CancelAndWait}, then the task will be
+ // aborted immediately.
+ // {Register} should only be called by the thread which owns the
+ // {CancelableTaskManager}, or by a task which is managed by the
+ // {CancelableTaskManager}.
Id Register(Cancelable* task);
- // Try to abort running a task identified by {id}. The possible outcomes are:
- // (1) The task is already finished running or was canceled before and
- // thus has been removed from the manager.
- // (2) The task is currently running and cannot be canceled anymore.
- // (3) The task is not yet running (or finished) so it is canceled and
- // removed.
- //
- enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
+ // Try to abort running a task identified by {id}.
TryAbortResult TryAbort(Id id);
- // Cancels all remaining registered tasks and waits for tasks that are
- // already running. This disallows subsequent Register calls.
- void CancelAndWait();
-
// Tries to cancel all remaining registered tasks. The return value indicates
// whether
//
@@ -58,7 +60,16 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// 3) All registered tasks were cancelled (kTaskAborted).
TryAbortResult TryAbortAll();
+ // Cancels all remaining registered tasks and waits for tasks that are
+ // already running. This disallows subsequent Register calls.
+ void CancelAndWait();
+
+ // Returns true of the task manager has been cancelled.
+ bool canceled() const { return canceled_; }
+
private:
+ static constexpr Id kInvalidTaskId = 0;
+
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
void RemoveFinishedTask(Id id);
@@ -83,7 +94,9 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
class V8_EXPORT_PRIVATE Cancelable {
public:
- explicit Cancelable(CancelableTaskManager* parent);
+ explicit Cancelable(CancelableTaskManager* parent)
+ : parent_(parent), id_(parent->Register(this)) {}
+
virtual ~Cancelable();
// Never invoke after handing over the task to the platform! The reason is
@@ -94,42 +107,37 @@ class V8_EXPORT_PRIVATE Cancelable {
CancelableTaskManager::Id id() { return id_; }
protected:
- bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
- bool IsRunning() { return status_.Value() == kRunning; }
- intptr_t CancelAttempts() { return cancel_counter_; }
-
- private:
// Identifies the state a cancelable task is in:
// |kWaiting|: The task is scheduled and waiting to be executed. {TryRun} will
// succeed.
// |kCanceled|: The task has been canceled. {TryRun} will fail.
// |kRunning|: The task is currently running and cannot be canceled anymore.
- enum Status {
- kWaiting,
- kCanceled,
- kRunning,
- };
+ enum Status { kWaiting, kCanceled, kRunning };
- // Use {CancelableTaskManager} to abort a task that has not yet been
- // executed.
- bool Cancel() {
- if (status_.TrySetValue(kWaiting, kCanceled)) {
- return true;
- }
- cancel_counter_++;
- return false;
+ bool TryRun(Status* previous = nullptr) {
+ return CompareExchangeStatus(kWaiting, kRunning, previous);
}
- CancelableTaskManager* parent_;
- base::AtomicValue<Status> status_;
- CancelableTaskManager::Id id_;
+ private:
+ friend class CancelableTaskManager;
- // The counter is incremented for failing tries to cancel a task. This can be
- // used by the task itself as an indication how often external entities tried
- // to abort it.
- std::atomic<intptr_t> cancel_counter_;
+ // Use {CancelableTaskManager} to abort a task that has not yet been
+ // executed.
+ bool Cancel() { return CompareExchangeStatus(kWaiting, kCanceled); }
+
+ bool CompareExchangeStatus(Status expected, Status desired,
+ Status* previous = nullptr) {
+ // {compare_exchange_strong} updates {expected}.
+ bool success = status_.compare_exchange_strong(expected, desired,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire);
+ if (previous) *previous = expected;
+ return success;
+ }
- friend class CancelableTaskManager;
+ CancelableTaskManager* const parent_;
+ std::atomic<Status> status_{kWaiting};
+ const CancelableTaskManager::Id id_;
DISALLOW_COPY_AND_ASSIGN(Cancelable);
};
@@ -154,33 +162,6 @@ class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
DISALLOW_COPY_AND_ASSIGN(CancelableTask);
};
-// TODO(clemensh): Use std::function and move implementation to cc file.
-template <typename Func>
-class CancelableLambdaTask final : public CancelableTask {
- public:
- CancelableLambdaTask(Isolate* isolate, Func func)
- : CancelableTask(isolate), func_(std::move(func)) {}
- CancelableLambdaTask(CancelableTaskManager* manager, Func func)
- : CancelableTask(manager), func_(std::move(func)) {}
- void RunInternal() final { func_(); }
-
- private:
- Func func_;
-};
-
-template <typename Func>
-std::unique_ptr<CancelableTask> MakeCancelableLambdaTask(Isolate* isolate,
- Func func) {
- return std::unique_ptr<CancelableTask>(
- new CancelableLambdaTask<Func>(isolate, std::move(func)));
-}
-template <typename Func>
-std::unique_ptr<CancelableTask> MakeCancelableLambdaTask(
- CancelableTaskManager* manager, Func func) {
- return std::unique_ptr<CancelableTask>(
- new CancelableLambdaTask<Func>(manager, std::move(func)));
-}
-
// Multiple inheritance can be used because IdleTask is a pure interface.
class CancelableIdleTask : public Cancelable, public IdleTask {
public:
@@ -200,34 +181,6 @@ class CancelableIdleTask : public Cancelable, public IdleTask {
DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
};
-template <typename Func>
-class CancelableIdleLambdaTask final : public CancelableIdleTask {
- public:
- CancelableIdleLambdaTask(Isolate* isolate, Func func)
- : CancelableIdleTask(isolate), func_(std::move(func)) {}
- CancelableIdleLambdaTask(CancelableTaskManager* manager, Func func)
- : CancelableIdleTask(manager), func_(std::move(func)) {}
- void RunInternal(double deadline_in_seconds) final {
- func_(deadline_in_seconds);
- }
-
- private:
- Func func_;
-};
-
-template <typename Func>
-std::unique_ptr<CancelableIdleTask> MakeCancelableIdleLambdaTask(
- Isolate* isolate, Func func) {
- return std::unique_ptr<CancelableIdleTask>(
- new CancelableIdleLambdaTask<Func>(isolate, std::move(func)));
-}
-template <typename Func>
-std::unique_ptr<CancelableIdleTask> MakeCancelableIdleLambdaTask(
- CancelableTaskManager* manager, Func func) {
- return std::unique_ptr<CancelableIdleTask>(
- new CancelableIdleLambdaTask<Func>(manager, std::move(func)));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 3662514bca..329b3a0fbb 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -14,65 +14,112 @@ namespace internal {
// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
// Else, return something outside of 'A'-'Z' and 'a'-'z'.
// Note: it ignores LOCALE.
-inline int AsciiAlphaToLower(uc32 c) {
- return c | 0x20;
-}
+inline constexpr int AsciiAlphaToLower(uc32 c) { return c | 0x20; }
-inline bool IsCarriageReturn(uc32 c) {
- return c == 0x000D;
-}
+inline constexpr bool IsCarriageReturn(uc32 c) { return c == 0x000D; }
-inline bool IsLineFeed(uc32 c) {
- return c == 0x000A;
-}
+inline constexpr bool IsLineFeed(uc32 c) { return c == 0x000A; }
-inline bool IsAsciiIdentifier(uc32 c) {
+inline constexpr bool IsAsciiIdentifier(uc32 c) {
return IsAlphaNumeric(c) || c == '$' || c == '_';
}
-inline bool IsAlphaNumeric(uc32 c) {
+inline constexpr bool IsAlphaNumeric(uc32 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c);
}
-inline bool IsDecimalDigit(uc32 c) {
+inline constexpr bool IsDecimalDigit(uc32 c) {
// ECMA-262, 3rd, 7.8.3 (p 16)
return IsInRange(c, '0', '9');
}
-inline bool IsHexDigit(uc32 c) {
+inline constexpr bool IsHexDigit(uc32 c) {
// ECMA-262, 3rd, 7.6 (p 15)
return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
}
-inline bool IsOctalDigit(uc32 c) {
+inline constexpr bool IsOctalDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return IsInRange(c, '0', '7');
}
-inline bool IsNonOctalDecimalDigit(uc32 c) { return IsInRange(c, '8', '9'); }
+inline constexpr bool IsNonOctalDecimalDigit(uc32 c) {
+ return IsInRange(c, '8', '9');
+}
-inline bool IsBinaryDigit(uc32 c) {
+inline constexpr bool IsBinaryDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return c == '0' || c == '1';
}
-inline bool IsRegExpWord(uc16 c) {
+inline constexpr bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)
|| (c == '_');
}
+inline constexpr bool IsRegExpNewline(uc16 c) {
+ // CR LF LS PS
+ return c != 0x000A && c != 0x000D && c != 0x2028 && c != 0x2029;
+}
-inline bool IsRegExpNewline(uc16 c) {
- switch (c) {
- // CR LF LS PS
- case 0x000A: case 0x000D: case 0x2028: case 0x2029:
- return false;
- default:
- return true;
- }
+// Constexpr cache table for character flags.
+enum AsciiCharFlags {
+ kIsIdentifierStart = 1 << 0,
+ kIsIdentifierPart = 1 << 1,
+ kIsWhiteSpace = 1 << 2,
+ kIsWhiteSpaceOrLineTerminator = 1 << 3
+};
+constexpr uint8_t BuildAsciiCharFlags(uc32 c) {
+ // clang-format off
+ return
+ (IsAsciiIdentifier(c) || c == '\\') ? (
+ kIsIdentifierPart | (!IsDecimalDigit(c) ? kIsIdentifierStart : 0)) : 0 |
+ (c == ' ' || c == '\t' || c == '\v' || c == '\f') ?
+ kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator : 0 |
+ (c == '\r' || c == '\n') ? kIsWhiteSpaceOrLineTerminator : 0;
+ // clang-format on
+}
+const constexpr uint8_t kAsciiCharFlags[128] = {
+#define BUILD_CHAR_FLAGS(N) BuildAsciiCharFlags(N),
+ INT_0_TO_127_LIST(BUILD_CHAR_FLAGS)
+#undef BUILD_CHAR_FLAGS
+};
+
+bool IsIdentifierStart(uc32 c) {
+ if (!IsInRange(c, 0, 127)) return IsIdentifierStartSlow(c);
+ DCHECK_EQ(IsIdentifierStartSlow(c),
+ static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierStart));
+ return kAsciiCharFlags[c] & kIsIdentifierStart;
}
+bool IsIdentifierPart(uc32 c) {
+ if (!IsInRange(c, 0, 127)) return IsIdentifierPartSlow(c);
+ DCHECK_EQ(IsIdentifierPartSlow(c),
+ static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierPart));
+ return kAsciiCharFlags[c] & kIsIdentifierPart;
+}
+
+bool IsWhiteSpace(uc32 c) {
+ if (!IsInRange(c, 0, 127)) return IsWhiteSpaceSlow(c);
+ DCHECK_EQ(IsWhiteSpaceSlow(c),
+ static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpace));
+ return kAsciiCharFlags[c] & kIsWhiteSpace;
+}
+
+bool IsWhiteSpaceOrLineTerminator(uc32 c) {
+ if (!IsInRange(c, 0, 127)) return IsWhiteSpaceOrLineTerminatorSlow(c);
+ DCHECK_EQ(
+ IsWhiteSpaceOrLineTerminatorSlow(c),
+ static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator));
+ return kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator;
+}
+
+bool IsLineTerminatorSequence(uc32 c, uc32 next) {
+ if (!unibrow::IsLineTerminator(c)) return false;
+ if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
+ return true;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/char-predicates.cc b/deps/v8/src/char-predicates.cc
index 747f4194f4..a1e8b68fe9 100644
--- a/deps/v8/src/char-predicates.cc
+++ b/deps/v8/src/char-predicates.cc
@@ -16,7 +16,7 @@ namespace internal {
// ES#sec-names-and-keywords Names and Keywords
// UnicodeIDStart, '$', '_' and '\'
-bool IdentifierStart::Is(uc32 c) {
+bool IsIdentifierStartSlow(uc32 c) {
// cannot use u_isIDStart because it does not work for
// Other_ID_Start characters.
return u_hasBinaryProperty(c, UCHAR_ID_START) ||
@@ -25,7 +25,7 @@ bool IdentifierStart::Is(uc32 c) {
// ES#sec-names-and-keywords Names and Keywords
// UnicodeIDContinue, '$', '_', '\', ZWJ, and ZWNJ
-bool IdentifierPart::Is(uc32 c) {
+bool IsIdentifierPartSlow(uc32 c) {
// Can't use u_isIDPart because it does not work for
// Other_ID_Continue characters.
return u_hasBinaryProperty(c, UCHAR_ID_CONTINUE) ||
@@ -35,7 +35,7 @@ bool IdentifierPart::Is(uc32 c) {
// ES#sec-white-space White Space
// gC=Zs, U+0009, U+000B, U+000C, U+FEFF
-bool WhiteSpace::Is(uc32 c) {
+bool IsWhiteSpaceSlow(uc32 c) {
return (u_charType(c) == U_SPACE_SEPARATOR) ||
(c < 0x0D && (c == 0x09 || c == 0x0B || c == 0x0C)) || c == 0xFEFF;
}
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index c1107e6568..4828e19a00 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -14,75 +14,70 @@ namespace internal {
// Unicode character predicates as defined by ECMA-262, 3rd,
// used for lexical analysis.
-inline int AsciiAlphaToLower(uc32 c);
-inline bool IsCarriageReturn(uc32 c);
-inline bool IsLineFeed(uc32 c);
-inline bool IsAsciiIdentifier(uc32 c);
-inline bool IsAlphaNumeric(uc32 c);
-inline bool IsDecimalDigit(uc32 c);
-inline bool IsHexDigit(uc32 c);
-inline bool IsOctalDigit(uc32 c);
-inline bool IsBinaryDigit(uc32 c);
-inline bool IsRegExpWord(uc32 c);
-inline bool IsRegExpNewline(uc32 c);
+inline constexpr int AsciiAlphaToLower(uc32 c);
+inline constexpr bool IsCarriageReturn(uc32 c);
+inline constexpr bool IsLineFeed(uc32 c);
+inline constexpr bool IsAsciiIdentifier(uc32 c);
+inline constexpr bool IsAlphaNumeric(uc32 c);
+inline constexpr bool IsDecimalDigit(uc32 c);
+inline constexpr bool IsHexDigit(uc32 c);
+inline constexpr bool IsOctalDigit(uc32 c);
+inline constexpr bool IsBinaryDigit(uc32 c);
+inline constexpr bool IsRegExpWord(uc32 c);
+inline constexpr bool IsRegExpNewline(uc32 c);
// ES#sec-names-and-keywords
// This includes '_', '$' and '\', and ID_Start according to
// http://www.unicode.org/reports/tr31/, which consists of categories
// 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', but excluding properties
// 'Pattern_Syntax' or 'Pattern_White_Space'.
+inline bool IsIdentifierStart(uc32 c);
#ifdef V8_INTL_SUPPORT
-struct V8_EXPORT_PRIVATE IdentifierStart {
- static bool Is(uc32 c);
+V8_EXPORT_PRIVATE bool IsIdentifierStartSlow(uc32 c);
#else
-struct IdentifierStart {
+inline bool IsIdentifierStartSlow(uc32 c) {
// Non-BMP characters are not supported without I18N.
- static inline bool Is(uc32 c) {
- return (c <= 0xFFFF) ? unibrow::ID_Start::Is(c) : false;
- }
+ return (c <= 0xFFFF) ? unibrow::ID_Start::Is(c) : false;
+}
#endif
-};
// ES#sec-names-and-keywords
// This includes \u200c and \u200d, and ID_Continue according to
// http://www.unicode.org/reports/tr31/, which consists of ID_Start,
// the categories 'Mn', 'Mc', 'Nd', 'Pc', but excluding properties
// 'Pattern_Syntax' or 'Pattern_White_Space'.
+inline bool IsIdentifierPart(uc32 c);
#ifdef V8_INTL_SUPPORT
-struct V8_EXPORT_PRIVATE IdentifierPart {
- static bool Is(uc32 c);
+V8_EXPORT_PRIVATE bool IsIdentifierPartSlow(uc32 c);
#else
-struct IdentifierPart {
- static inline bool Is(uc32 c) {
- // Non-BMP charaacters are not supported without I18N.
- if (c <= 0xFFFF) {
- return unibrow::ID_Start::Is(c) || unibrow::ID_Continue::Is(c);
- }
- return false;
+inline bool IsIdentifierPartSlow(uc32 c) {
+ // Non-BMP charaacters are not supported without I18N.
+ if (c <= 0xFFFF) {
+ return unibrow::ID_Start::Is(c) || unibrow::ID_Continue::Is(c);
}
+ return false;
+}
#endif
-};
// ES6 draft section 11.2
// This includes all code points of Unicode category 'Zs'.
// Further included are \u0009, \u000b, \u000c, and \ufeff.
+inline bool IsWhiteSpace(uc32 c);
#ifdef V8_INTL_SUPPORT
-struct V8_EXPORT_PRIVATE WhiteSpace {
- static bool Is(uc32 c);
+V8_EXPORT_PRIVATE bool IsWhiteSpaceSlow(uc32 c);
#else
-struct WhiteSpace {
- static inline bool Is(uc32 c) { return unibrow::WhiteSpace::Is(c); }
+inline bool IsWhiteSpaceSlow(uc32 c) { return unibrow::WhiteSpace::Is(c); }
#endif
-};
// WhiteSpace and LineTerminator according to ES6 draft section 11.2 and 11.3
// This includes all the characters with Unicode category 'Z' (= Zs+Zl+Zp)
// as well as \u0009 - \u000d and \ufeff.
-struct WhiteSpaceOrLineTerminator {
- static inline bool Is(uc32 c) {
- return WhiteSpace::Is(c) || unibrow::IsLineTerminator(c);
- }
-};
+inline bool IsWhiteSpaceOrLineTerminator(uc32 c);
+inline bool IsWhiteSpaceOrLineTerminatorSlow(uc32 c) {
+ return IsWhiteSpaceSlow(c) || unibrow::IsLineTerminator(c);
+}
+
+inline bool IsLineTerminatorSequence(uc32 c, uc32 next);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-comments.cc b/deps/v8/src/code-comments.cc
new file mode 100644
index 0000000000..6e64eb7fa1
--- /dev/null
+++ b/deps/v8/src/code-comments.cc
@@ -0,0 +1,102 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+#include <iomanip>
+
+#include "src/assembler-inl.h"
+#include "src/code-comments.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+static constexpr uint8_t kOffsetToFirstCommentEntry = kUInt32Size;
+static constexpr uint8_t kOffsetToPCOffset = 0;
+static constexpr uint8_t kOffsetToCommentSize = kOffsetToPCOffset + kUInt32Size;
+static constexpr uint8_t kOffsetToCommentString =
+ kOffsetToCommentSize + kUInt32Size;
+} // namespace
+
+uint32_t CodeCommentEntry::comment_length() const {
+ return static_cast<uint32_t>(comment.size() + 1);
+}
+
+uint32_t CodeCommentEntry::size() const {
+ return kOffsetToCommentString + comment_length();
+}
+
+CodeCommentsIterator::CodeCommentsIterator(Address code_comments_start)
+ : code_comments_start_(code_comments_start),
+ current_entry_(code_comments_start + kOffsetToFirstCommentEntry) {}
+
+uint32_t CodeCommentsIterator::size() const {
+ return code_comments_start_ != kNullAddress
+ ? *reinterpret_cast<uint32_t*>(code_comments_start_)
+ : 0;
+}
+
+const char* CodeCommentsIterator::GetComment() const {
+ const char* comment_string =
+ reinterpret_cast<const char*>(current_entry_ + kOffsetToCommentString);
+ CHECK_EQ(GetCommentSize(), strlen(comment_string) + 1);
+ return comment_string;
+}
+
+uint32_t CodeCommentsIterator::GetCommentSize() const {
+ return *reinterpret_cast<uint32_t*>(current_entry_ + kOffsetToCommentSize);
+}
+
+uint32_t CodeCommentsIterator::GetPCOffset() const {
+ return *reinterpret_cast<uint32_t*>(current_entry_ + kOffsetToPCOffset);
+}
+
+void CodeCommentsIterator::Next() {
+ current_entry_ += kOffsetToCommentString + GetCommentSize();
+}
+
+bool CodeCommentsIterator::HasCurrent() const {
+ return current_entry_ < code_comments_start_ + size();
+}
+
+void CodeCommentsWriter::Emit(Assembler* assm) {
+ assm->dd(section_size());
+ for (auto i = comments_.begin(); i != comments_.end(); ++i) {
+ assm->dd(i->pc_offset);
+ assm->dd(i->comment_length());
+ for (char c : i->comment) {
+ EnsureSpace ensure_space(assm);
+ assm->db(c);
+ }
+ assm->db('\0');
+ }
+}
+
+void CodeCommentsWriter::Add(uint32_t pc_offset, std::string comment) {
+ CodeCommentEntry entry = {pc_offset, std::move(comment)};
+ byte_count_ += entry.size();
+ comments_.push_back(std::move(entry));
+}
+
+size_t CodeCommentsWriter::entry_count() const { return comments_.size(); }
+uint32_t CodeCommentsWriter::section_size() const {
+ return kOffsetToFirstCommentEntry + static_cast<uint32_t>(byte_count_);
+}
+
+void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start) {
+ CodeCommentsIterator it(code_comments_start);
+ out << "CodeComments (size = " << it.size() << ")\n";
+ if (it.HasCurrent()) {
+ out << std::setw(6) << "pc" << std::setw(6) << "len"
+ << " comment\n";
+ }
+ for (; it.HasCurrent(); it.Next()) {
+ out << std::hex << std::setw(6) << it.GetPCOffset() << std::dec
+ << std::setw(6) << it.GetCommentSize() << " (" << it.GetComment()
+ << ")\n";
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-comments.h b/deps/v8/src/code-comments.h
new file mode 100644
index 0000000000..8d3d7637a1
--- /dev/null
+++ b/deps/v8/src/code-comments.h
@@ -0,0 +1,68 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_COMMENTS_H_
+#define V8_CODE_COMMENTS_H_
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "include/v8-internal.h"
+
+namespace v8 {
+namespace internal {
+
+class Assembler;
+
+// Code comments section layout:
+// byte count content
+// ------------------------------------------------------------------------
+// 4 size as uint32_t
+// [Inline array of CodeCommentEntry in increasing pc_offset order]
+// ┌ 4 pc_offset of entry as uint32_t
+// ├ 4 length of the comment including terminating '\0'
+// └ <variable length> characters of the comment including terminating '\0'
+
+struct CodeCommentEntry {
+ uint32_t pc_offset;
+ std::string comment;
+ uint32_t comment_length() const;
+ uint32_t size() const;
+};
+
+class CodeCommentsWriter {
+ public:
+ void Add(uint32_t pc_offset, std::string comment);
+ void Emit(Assembler* assm);
+ size_t entry_count() const;
+ uint32_t section_size() const;
+
+ private:
+ uint32_t byte_count_ = 0;
+ std::vector<CodeCommentEntry> comments_;
+};
+
+class CodeCommentsIterator {
+ public:
+ // Address can be kNullAddress. In this case HasCurrent() will return false.
+ explicit CodeCommentsIterator(Address code_comments_start);
+ uint32_t size() const;
+ const char* GetComment() const;
+ uint32_t GetCommentSize() const;
+ uint32_t GetPCOffset() const;
+ void Next();
+ bool HasCurrent() const;
+
+ private:
+ Address code_comments_start_;
+ Address current_entry_;
+};
+
+void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODE_COMMENTS_H_
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 07a883be0d..8a818cab7e 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -9,6 +9,10 @@
#include "src/base/platform/mutex.h"
#include "src/globals.h"
+#include "src/objects/code.h"
+#include "src/objects/name.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
#include "src/vector.h"
namespace v8 {
@@ -68,27 +72,27 @@ class CodeEventListener {
virtual ~CodeEventListener() = default;
- virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
const char* comment) = 0;
- virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- Name* name) = 0;
- virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source) = 0;
- virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source,
- int line, int column) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ Name name) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line,
+ int column) = 0;
virtual void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) = 0;
- virtual void CallbackEvent(Name* name, Address entry_point) = 0;
- virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
- virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
- virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
- virtual void CodeMoveEvent(AbstractCode* from, AbstractCode* to) = 0;
+ virtual void CallbackEvent(Name name, Address entry_point) = 0;
+ virtual void GetterCallbackEvent(Name name, Address entry_point) = 0;
+ virtual void SetterCallbackEvent(Name name, Address entry_point) = 0;
+ virtual void RegExpCodeCreateEvent(AbstractCode code, String source) = 0;
+ virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
- virtual void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) = 0;
- virtual void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ virtual void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) = 0;
+ virtual void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) = 0;
virtual bool is_listening_to_code_events() { return false; }
@@ -101,11 +105,11 @@ class CodeEventDispatcher {
CodeEventDispatcher() = default;
bool AddListener(CodeEventListener* listener) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
return listeners_.insert(listener).second;
}
void RemoveListener(CodeEventListener* listener) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
listeners_.erase(listener);
}
bool IsListeningToCodeEvents() {
@@ -117,23 +121,23 @@ class CodeEventDispatcher {
return false;
}
-#define CODE_EVENT_DISPATCH(code) \
- base::LockGuard<base::Mutex> guard(&mutex_); \
+#define CODE_EVENT_DISPATCH(code) \
+ base::MutexGuard guard(&mutex_); \
for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
const char* comment) {
CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, comment));
}
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name) {
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code, Name name) {
CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
}
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) {
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name name) {
CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, shared, name));
}
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source, int line,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line,
int column) {
CODE_EVENT_DISPATCH(
CodeCreateEvent(tag, code, shared, source, line, column));
@@ -142,29 +146,29 @@ class CodeEventDispatcher {
wasm::WasmName name) {
CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
}
- void CallbackEvent(Name* name, Address entry_point) {
+ void CallbackEvent(Name name, Address entry_point) {
CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
}
- void GetterCallbackEvent(Name* name, Address entry_point) {
+ void GetterCallbackEvent(Name name, Address entry_point) {
CODE_EVENT_DISPATCH(GetterCallbackEvent(name, entry_point));
}
- void SetterCallbackEvent(Name* name, Address entry_point) {
+ void SetterCallbackEvent(Name name, Address entry_point) {
CODE_EVENT_DISPATCH(SetterCallbackEvent(name, entry_point));
}
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
+ void RegExpCodeCreateEvent(AbstractCode code, String source) {
CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) {
CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
}
void SharedFunctionInfoMoveEvent(Address from, Address to) {
CODE_EVENT_DISPATCH(SharedFunctionInfoMoveEvent(from, to));
}
void CodeMovingGCEvent() { CODE_EVENT_DISPATCH(CodeMovingGCEvent()); }
- void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared) {
+ void CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) {
CODE_EVENT_DISPATCH(CodeDisableOptEvent(code, shared));
}
- void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) {
CODE_EVENT_DISPATCH(CodeDeoptEvent(code, kind, pc, fp_to_sp_delta));
}
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index cffb16b7d4..7303f44f2b 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -12,17 +12,6 @@
namespace v8 {
namespace internal {
-namespace {
-
-// TODO(ishell): make it (const Stub& stub) once CodeStub::GetCode() is const.
-template <typename Stub>
-Callable make_callable(Stub& stub) {
- typedef typename Stub::Descriptor Descriptor;
- return Callable(stub.GetCode(), Descriptor{});
-}
-
-} // namespace
-
// static
Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
return CodeFactory::CEntry(isolate, result_size);
@@ -70,54 +59,160 @@ Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallApiGetter), ApiGetterDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallApiGetter);
}
// static
-Callable CodeFactory::CallApiCallback(Isolate* isolate, int argc) {
- switch (argc) {
- case 0:
- return Callable(BUILTIN_CODE(isolate, CallApiCallback_Argc0),
- ApiCallbackDescriptor{});
- case 1:
- return Callable(BUILTIN_CODE(isolate, CallApiCallback_Argc1),
- ApiCallbackDescriptor{});
- default: {
- CallApiCallbackStub stub(isolate, argc);
- return make_callable(stub);
- }
- }
- UNREACHABLE();
+Callable CodeFactory::CallApiCallback(Isolate* isolate) {
+ return Builtins::CallableFor(isolate, Builtins::kCallApiCallback);
}
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
- return Callable(
- typeof_mode == NOT_INSIDE_TYPEOF
- ? BUILTIN_CODE(isolate, LoadGlobalICTrampoline)
- : BUILTIN_CODE(isolate, LoadGlobalICInsideTypeofTrampoline),
- LoadGlobalDescriptor{});
+ return typeof_mode == NOT_INSIDE_TYPEOF
+ ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalICTrampoline)
+ : Builtins::CallableFor(
+ isolate, Builtins::kLoadGlobalICInsideTypeofTrampoline);
}
// static
Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode) {
- return Callable(typeof_mode == NOT_INSIDE_TYPEOF
- ? BUILTIN_CODE(isolate, LoadGlobalIC)
- : BUILTIN_CODE(isolate, LoadGlobalICInsideTypeof),
- LoadGlobalWithVectorDescriptor{});
+ return typeof_mode == NOT_INSIDE_TYPEOF
+ ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalIC)
+ : Builtins::CallableFor(isolate,
+ Builtins::kLoadGlobalICInsideTypeof);
}
Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreICTrampoline), StoreDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kStoreICTrampoline);
}
Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Callable(BUILTIN_CODE(isolate, StoreIC), StoreWithVectorDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kStoreIC);
+}
+
+Callable CodeFactory::KeyedStoreIC_SloppyArguments(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
+ Builtins::Name builtin_index;
+ switch (mode) {
+ case STANDARD_STORE:
+ builtin_index = Builtins::kKeyedStoreIC_SloppyArguments_Standard;
+ break;
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ builtin_index =
+ Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB;
+ break;
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return isolate->builtins()->CallableFor(isolate, builtin_index);
+}
+
+Callable CodeFactory::KeyedStoreIC_Slow(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
+ Builtins::Name builtin_index;
+ switch (mode) {
+ case STANDARD_STORE:
+ builtin_index = Builtins::kKeyedStoreIC_Slow_Standard;
+ break;
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ builtin_index = Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ builtin_index = Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB;
+ break;
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ builtin_index = Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return isolate->builtins()->CallableFor(isolate, builtin_index);
+}
+
+Callable CodeFactory::StoreInArrayLiteralIC_Slow(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
+ Builtins::Name builtin_index;
+ switch (mode) {
+ case STANDARD_STORE:
+ builtin_index = Builtins::kStoreInArrayLiteralIC_Slow_Standard;
+ break;
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ builtin_index =
+ Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB;
+ break;
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return isolate->builtins()->CallableFor(isolate, builtin_index);
+}
+
+Callable CodeFactory::ElementsTransitionAndStore(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
+ Builtins::Name builtin_index;
+ switch (mode) {
+ case STANDARD_STORE:
+ builtin_index = Builtins::kElementsTransitionAndStore_Standard;
+ break;
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ builtin_index =
+ Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB;
+ break;
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ builtin_index =
+ Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return isolate->builtins()->CallableFor(isolate, builtin_index);
+}
+
+Callable CodeFactory::StoreFastElementIC(Isolate* isolate,
+ KeyedAccessStoreMode mode) {
+ Builtins::Name builtin_index;
+ switch (mode) {
+ case STANDARD_STORE:
+ builtin_index = Builtins::kStoreFastElementIC_Standard;
+ break;
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ builtin_index = Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ builtin_index = Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB;
+ break;
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ builtin_index = Builtins::kStoreFastElementIC_NoTransitionHandleCOW;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return isolate->builtins()->CallableFor(isolate, builtin_index);
}
// static
@@ -180,33 +275,37 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags) {
// static
Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ResumeGeneratorTrampoline),
- ResumeGeneratorDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kResumeGeneratorTrampoline);
}
// static
Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, FrameDropperTrampoline),
- FrameDropperTrampolineDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kFrameDropperTrampoline);
}
// static
Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, HandleDebuggerStatement),
- ContextOnlyDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kHandleDebuggerStatement);
}
// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
- return Callable(isolate->builtins()->NewFunctionContext(scope_type),
- FastNewFunctionContextDescriptor{});
+ switch (scope_type) {
+ case ScopeType::EVAL_SCOPE:
+ return Builtins::CallableFor(isolate,
+ Builtins::kFastNewFunctionContextEval);
+ case ScopeType::FUNCTION_SCOPE:
+ return Builtins::CallableFor(isolate,
+ Builtins::kFastNewFunctionContextFunction);
+ default:
+ UNREACHABLE();
+ }
}
// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ArgumentsAdaptorTrampoline),
- ArgumentsAdaptorDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kArgumentsAdaptorTrampoline);
}
// static
@@ -216,14 +315,12 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallWithArrayLike),
- CallWithArrayLikeDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike);
}
// static
Callable CodeFactory::CallWithSpread(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallWithSpread),
- CallWithSpreadDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallWithSpread);
}
// static
@@ -234,70 +331,91 @@ Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
// static
Callable CodeFactory::CallVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallVarargs), CallVarargsDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallVarargs);
}
// static
Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallForwardVarargs),
- CallForwardVarargsDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallForwardVarargs);
}
// static
Callable CodeFactory::CallFunctionForwardVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, CallFunctionForwardVarargs),
- CallForwardVarargsDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kCallFunctionForwardVarargs);
}
// static
Callable CodeFactory::Construct(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, Construct), JSTrampolineDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kConstruct);
}
// static
Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ConstructWithSpread),
- ConstructWithSpreadDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kConstructWithSpread);
}
// static
Callable CodeFactory::ConstructFunction(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ConstructFunction),
- JSTrampolineDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kConstructFunction);
}
// static
Callable CodeFactory::ConstructVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ConstructVarargs),
- ConstructVarargsDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kConstructVarargs);
}
// static
Callable CodeFactory::ConstructForwardVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ConstructForwardVarargs),
- ConstructForwardVarargsDescriptor{});
+ return Builtins::CallableFor(isolate, Builtins::kConstructForwardVarargs);
}
// static
Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, ConstructFunctionForwardVarargs),
- ConstructForwardVarargsDescriptor{});
+ return Builtins::CallableFor(isolate,
+ Builtins::kConstructFunctionForwardVarargs);
}
// static
Callable CodeFactory::InterpreterPushArgsThenCall(
Isolate* isolate, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
- return Callable(
- isolate->builtins()->InterpreterPushArgsThenCall(receiver_mode, mode),
- InterpreterPushArgsThenCallDescriptor{});
+ switch (mode) {
+ case InterpreterPushArgsMode::kArrayFunction:
+ // There is no special-case handling of calls to Array. They will all go
+ // through the kOther case below.
+ UNREACHABLE();
+ case InterpreterPushArgsMode::kWithFinalSpread:
+ return Builtins::CallableFor(
+ isolate, Builtins::kInterpreterPushArgsThenCallWithFinalSpread);
+ case InterpreterPushArgsMode::kOther:
+ switch (receiver_mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kInterpreterPushUndefinedAndArgsThenCall);
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ case ConvertReceiverMode::kAny:
+ return Builtins::CallableFor(isolate,
+ Builtins::kInterpreterPushArgsThenCall);
+ }
+ }
+ UNREACHABLE();
}
// static
Callable CodeFactory::InterpreterPushArgsThenConstruct(
Isolate* isolate, InterpreterPushArgsMode mode) {
- return Callable(isolate->builtins()->InterpreterPushArgsThenConstruct(mode),
- InterpreterPushArgsThenConstructDescriptor{});
+ switch (mode) {
+ case InterpreterPushArgsMode::kArrayFunction:
+ return Builtins::CallableFor(
+ isolate, Builtins::kInterpreterPushArgsThenConstructArrayFunction);
+ case InterpreterPushArgsMode::kWithFinalSpread:
+ return Builtins::CallableFor(
+ isolate, Builtins::kInterpreterPushArgsThenConstructWithFinalSpread);
+ case InterpreterPushArgsMode::kOther:
+ return Builtins::CallableFor(isolate,
+ Builtins::kInterpreterPushArgsThenConstruct);
+ }
+ UNREACHABLE();
}
// static
@@ -316,20 +434,19 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// static
Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
- return Callable(BUILTIN_CODE(isolate, InterpreterOnStackReplacement),
- ContextOnlyDescriptor{});
+ return Builtins::CallableFor(isolate,
+ Builtins::kInterpreterOnStackReplacement);
}
// static
Callable CodeFactory::ArrayNoArgumentConstructor(
Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode) {
-#define CASE(kind_caps, kind_camel, mode_camel) \
- case kind_caps: \
- return Callable( \
- BUILTIN_CODE(isolate, \
- ArrayNoArgumentConstructor_##kind_camel##_##mode_camel), \
- ArrayNoArgumentConstructorDescriptor{})
+#define CASE(kind_caps, kind_camel, mode_camel) \
+ case kind_caps: \
+ return Builtins::CallableFor( \
+ isolate, \
+ Builtins::kArrayNoArgumentConstructor_##kind_camel##_##mode_camel);
if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
DCHECK(IsSmiElementsKind(kind));
switch (kind) {
@@ -359,13 +476,11 @@ Callable CodeFactory::ArrayNoArgumentConstructor(
Callable CodeFactory::ArraySingleArgumentConstructor(
Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode) {
-#define CASE(kind_caps, kind_camel, mode_camel) \
- case kind_caps: \
- return Callable( \
- BUILTIN_CODE( \
- isolate, \
- ArraySingleArgumentConstructor_##kind_camel##_##mode_camel), \
- ArraySingleArgumentConstructorDescriptor{})
+#define CASE(kind_caps, kind_camel, mode_camel) \
+ case kind_caps: \
+ return Builtins::CallableFor( \
+ isolate, \
+ Builtins::kArraySingleArgumentConstructor_##kind_camel##_##mode_camel)
if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
DCHECK(IsSmiElementsKind(kind));
switch (kind) {
@@ -391,39 +506,5 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#undef CASE
}
-// static
-Callable CodeFactory::InternalArrayNoArgumentConstructor(Isolate* isolate,
- ElementsKind kind) {
- switch (kind) {
- case PACKED_ELEMENTS:
- return Callable(
- BUILTIN_CODE(isolate, InternalArrayNoArgumentConstructor_Packed),
- ArrayNoArgumentConstructorDescriptor{});
- case HOLEY_ELEMENTS:
- return Callable(
- BUILTIN_CODE(isolate, InternalArrayNoArgumentConstructor_Holey),
- ArrayNoArgumentConstructorDescriptor{});
- default:
- UNREACHABLE();
- }
-}
-
-// static
-Callable CodeFactory::InternalArraySingleArgumentConstructor(
- Isolate* isolate, ElementsKind kind) {
- switch (kind) {
- case PACKED_ELEMENTS:
- return Callable(
- BUILTIN_CODE(isolate, InternalArraySingleArgumentConstructor_Packed),
- ArraySingleArgumentConstructorDescriptor{});
- case HOLEY_ELEMENTS:
- return Callable(
- BUILTIN_CODE(isolate, InternalArraySingleArgumentConstructor_Holey),
- ArraySingleArgumentConstructorDescriptor{});
- default:
- UNREACHABLE();
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 3e8bc3790c..8a4f13e91e 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -7,9 +7,9 @@
#include "src/allocation.h"
#include "src/callable.h"
-#include "src/code-stubs.h"
#include "src/globals.h"
#include "src/interface-descriptors.h"
+#include "src/type-hints.h"
namespace v8 {
namespace internal {
@@ -40,6 +40,17 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StoreOwnIC(Isolate* isolate);
static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
+ static Callable KeyedStoreIC_SloppyArguments(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+ static Callable KeyedStoreIC_Slow(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+ static Callable StoreInArrayLiteralIC_Slow(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+ static Callable ElementsTransitionAndStore(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+ static Callable StoreFastElementIC(Isolate* isolate,
+ KeyedAccessStoreMode mode);
+
static Callable ResumeGenerator(Isolate* isolate);
static Callable FrameDropperTrampoline(Isolate* isolate);
@@ -48,10 +59,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable BinaryOperation(Isolate* isolate, Operation op);
static Callable ApiGetter(Isolate* isolate);
- static Callable CallApiCallback(Isolate* isolate, int argc);
-
- // Code stubs. Add methods here as needed to reduce dependency on
- // code-stubs.h.
+ static Callable CallApiCallback(Isolate* isolate);
static Callable NonPrimitiveToPrimitive(
Isolate* isolate, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
@@ -95,11 +103,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArraySingleArgumentConstructor(
Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode);
-
- static Callable InternalArrayNoArgumentConstructor(Isolate* isolate,
- ElementsKind kind);
- static Callable InternalArraySingleArgumentConstructor(Isolate* isolate,
- ElementsKind kind);
};
} // namespace internal
diff --git a/deps/v8/src/code-reference.cc b/deps/v8/src/code-reference.cc
index 3716ad04d9..941e69f36d 100644
--- a/deps/v8/src/code-reference.cc
+++ b/deps/v8/src/code-reference.cc
@@ -4,6 +4,7 @@
#include "src/code-reference.h"
+#include "src/globals.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-code-manager.h"
@@ -11,43 +12,91 @@
namespace v8 {
namespace internal {
-Address CodeReference::constant_pool() const {
- return kind_ == JS ? js_code_->constant_pool() : wasm_code_->constant_pool();
-}
-
-Address CodeReference::instruction_start() const {
- return kind_ == JS
- ? js_code_->InstructionStart()
- : reinterpret_cast<Address>(wasm_code_->instructions().start());
-}
-
-Address CodeReference::instruction_end() const {
- return kind_ == JS
- ? js_code_->InstructionEnd()
- : reinterpret_cast<Address>(wasm_code_->instructions().start() +
- wasm_code_->instructions().size());
-}
-
-int CodeReference::instruction_size() const {
- return kind_ == JS ? js_code_->InstructionSize()
- : wasm_code_->instructions().length();
-}
-
-const byte* CodeReference::relocation_start() const {
- return kind_ == JS ? js_code_->relocation_start()
- : wasm_code_->reloc_info().start();
-}
-
-const byte* CodeReference::relocation_end() const {
- return kind_ == JS ? js_code_->relocation_end()
- : wasm_code_->reloc_info().start() +
- wasm_code_->reloc_info().length();
-}
-
-int CodeReference::relocation_size() const {
- return kind_ == JS ? js_code_->relocation_size()
- : wasm_code_->reloc_info().length();
-}
+namespace {
+struct JSOps {
+ Handle<Code> code;
+
+ Address constant_pool() const { return code->constant_pool(); }
+ Address instruction_start() const { return code->InstructionStart(); }
+ Address instruction_end() const { return code->InstructionEnd(); }
+ int instruction_size() const { return code->InstructionSize(); }
+ const byte* relocation_start() const { return code->relocation_start(); }
+ const byte* relocation_end() const { return code->relocation_end(); }
+ int relocation_size() const { return code->relocation_size(); }
+ Address code_comments() const { return code->code_comments(); }
+};
+
+struct WasmOps {
+ const wasm::WasmCode* code;
+
+ Address constant_pool() const { return code->constant_pool(); }
+ Address instruction_start() const {
+ return reinterpret_cast<Address>(code->instructions().start());
+ }
+ Address instruction_end() const {
+ return reinterpret_cast<Address>(code->instructions().start() +
+ code->instructions().size());
+ }
+ int instruction_size() const { return code->instructions().length(); }
+ const byte* relocation_start() const { return code->reloc_info().start(); }
+ const byte* relocation_end() const {
+ return code->reloc_info().start() + code->reloc_info().length();
+ }
+ int relocation_size() const { return code->reloc_info().length(); }
+ Address code_comments() const { return code->code_comments(); }
+};
+
+struct CodeDescOps {
+ const CodeDesc* code_desc;
+
+ Address constant_pool() const {
+ return instruction_start() + code_desc->constant_pool_offset();
+ }
+ Address instruction_start() const {
+ return reinterpret_cast<Address>(code_desc->buffer);
+ }
+ Address instruction_end() const {
+ return instruction_start() + code_desc->instr_size;
+ }
+ int instruction_size() const { return code_desc->instr_size; }
+ const byte* relocation_start() const {
+ return code_desc->buffer + code_desc->buffer_size - code_desc->reloc_size;
+ }
+ const byte* relocation_end() const {
+ return code_desc->buffer + code_desc->buffer_size;
+ }
+ int relocation_size() const { return code_desc->reloc_size; }
+ Address code_comments() const {
+ return instruction_start() + code_desc->code_comments_size;
+ }
+};
+} // namespace
+
+#define DISPATCH(ret, method) \
+ ret CodeReference::method() const { \
+ DCHECK(!is_null()); \
+ switch (kind_) { \
+ case JS: \
+ return JSOps{js_code_}.method(); \
+ case WASM: \
+ return WasmOps{wasm_code_}.method(); \
+ case CODE_DESC: \
+ return CodeDescOps{code_desc_}.method(); \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
+
+DISPATCH(Address, constant_pool);
+DISPATCH(Address, instruction_start);
+DISPATCH(Address, instruction_end);
+DISPATCH(int, instruction_size);
+DISPATCH(const byte*, relocation_start);
+DISPATCH(const byte*, relocation_end);
+DISPATCH(int, relocation_size);
+DISPATCH(Address, code_comments);
+
+#undef DISPATCH
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-reference.h b/deps/v8/src/code-reference.h
index cb4b25a621..7dce2e1857 100644
--- a/deps/v8/src/code-reference.h
+++ b/deps/v8/src/code-reference.h
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
class Code;
+struct CodeDesc;
namespace wasm {
class WasmCode;
@@ -19,9 +20,11 @@ class WasmCode;
class CodeReference {
public:
- CodeReference() : kind_(JS), js_code_() {}
+ CodeReference() : kind_(NONE), null_(nullptr) {}
explicit CodeReference(const wasm::WasmCode* wasm_code)
: kind_(WASM), wasm_code_(wasm_code) {}
+ explicit CodeReference(const CodeDesc* code_desc)
+ : kind_(CODE_DESC), code_desc_(code_desc) {}
explicit CodeReference(Handle<Code> js_code) : kind_(JS), js_code_(js_code) {}
Address constant_pool() const;
@@ -31,9 +34,11 @@ class CodeReference {
const byte* relocation_start() const;
const byte* relocation_end() const;
int relocation_size() const;
- bool is_null() const {
- return kind_ == JS ? js_code_.is_null() : wasm_code_ == nullptr;
- }
+ Address code_comments() const;
+
+ bool is_null() const { return kind_ == NONE; }
+ bool is_js() const { return kind_ == JS; }
+ bool is_wasm_code() const { return kind_ == WASM; }
Handle<Code> as_js_code() const {
DCHECK_EQ(JS, kind_);
@@ -46,9 +51,11 @@ class CodeReference {
}
private:
- enum { JS, WASM } kind_;
+ enum { NONE, JS, WASM, CODE_DESC } kind_;
union {
+ std::nullptr_t null_;
const wasm::WasmCode* wasm_code_;
+ const CodeDesc* code_desc_;
Handle<Code> js_code_;
};
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 6a70ee825e..314ad411e1 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -5,11 +5,16 @@
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
+#include "src/counters.h"
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-cell.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -22,7 +27,7 @@ template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
- : compiler::CodeAssembler(state) {
+ : compiler::CodeAssembler(state), BaseBuiltinsFromDSLAssembler(state) {
if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
HandleBreakOnNode();
}
@@ -109,34 +114,16 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
- Comment("[ Assert: %s", message);
+ Comment("[ Assert: ", message);
} else {
Comment("[ Assert");
}
branch(&ok, &not_ok);
BIND(&not_ok);
- DCHECK_NOT_NULL(message);
- char chars[1024];
- Vector<char> buffer(chars);
- if (file != nullptr) {
- SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
- } else {
- SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
- }
- Node* message_node = StringConstant(&(buffer[0]));
-
-#ifdef DEBUG
- // Only print the extra nodes in debug builds.
- MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
- MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
- MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
- MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
- MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
-#endif
-
- DebugAbort(message_node);
- Unreachable();
+ FailAssert(message, file, line, extra_node1, extra_node1_name, extra_node2,
+ extra_node2_name, extra_node3, extra_node3_name, extra_node4,
+ extra_node4_name, extra_node5, extra_node5_name);
BIND(&ok);
Comment("] Assert");
@@ -168,6 +155,36 @@ void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
BIND(&ok);
}
+void CodeStubAssembler::FailAssert(
+ const char* message, const char* file, int line, Node* extra_node1,
+ const char* extra_node1_name, Node* extra_node2,
+ const char* extra_node2_name, Node* extra_node3,
+ const char* extra_node3_name, Node* extra_node4,
+ const char* extra_node4_name, Node* extra_node5,
+ const char* extra_node5_name) {
+ DCHECK_NOT_NULL(message);
+ char chars[1024];
+ Vector<char> buffer(chars);
+ if (file != nullptr) {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+ } else {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ }
+ Node* message_node = StringConstant(&(buffer[0]));
+
+#ifdef DEBUG
+ // Only print the extra nodes in debug builds.
+ MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
+ MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
+ MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
+ MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
+ MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
+#endif
+
+ DebugAbort(message_node);
+ Unreachable();
+}
+
Node* CodeStubAssembler::SelectImpl(TNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
@@ -209,8 +226,8 @@ TNode<Oddball> CodeStubAssembler::SelectBooleanConstant(
}
TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
- Smi* true_value,
- Smi* false_value) {
+ Smi true_value,
+ Smi false_value) {
return SelectConstant<Smi>(condition, SmiConstant(true_value),
SmiConstant(false_value));
}
@@ -219,24 +236,24 @@ TNode<Object> CodeStubAssembler::NoContextConstant() {
return SmiConstant(Context::kNoContext);
}
-#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_reference<decltype( \
- *std::declval<Heap>().rootAccessorName())>::type> \
- CodeStubAssembler::name##Constant() { \
- return UncheckedCast<std::remove_reference<decltype( \
- *std::declval<Heap>().rootAccessorName())>::type>( \
- LoadRoot(RootIndex::k##rootIndexName)); \
+#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
+ compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<Heap>().rootAccessorName())>::type>::type> \
+ CodeStubAssembler::name##Constant() { \
+ return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<Heap>().rootAccessorName())>::type>::type>( \
+ LoadRoot(RootIndex::k##rootIndexName)); \
}
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_reference<decltype( \
- *std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
- CodeStubAssembler::name##Constant() { \
- return UncheckedCast<std::remove_reference<decltype( \
- *std::declval<ReadOnlyRoots>().rootAccessorName())>::type>( \
- LoadRoot(RootIndex::k##rootIndexName)); \
+#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
+ compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
+ CodeStubAssembler::name##Constant() { \
+ return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type>( \
+ LoadRoot(RootIndex::k##rootIndexName)); \
}
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
@@ -265,14 +282,14 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
ParameterMode mode) {
int32_t constant_test;
- Smi* smi_test;
+ Smi smi_test;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(test, constant_test) && constant_test == 0) {
return true;
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
- if (ToSmiConstant(test, smi_test) && smi_test->value() == 0) {
+ if (ToSmiConstant(test, &smi_test) && smi_test->value() == 0) {
return true;
}
}
@@ -290,8 +307,8 @@ bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
- Smi* smi_constant;
- if (ToSmiConstant(maybe_constant, smi_constant)) {
+ Smi smi_constant;
+ if (ToSmiConstant(maybe_constant, &smi_constant)) {
*value = Smi::ToInt(smi_constant);
return true;
}
@@ -543,7 +560,7 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
}
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
- if (SmiValuesAre31Bits() && kPointerSize == kInt64Size) {
+ if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) {
// Check that the Smi value is properly sign-extended.
TNode<IntPtrT> value = Signed(BitcastTaggedToWord(smi));
return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
@@ -559,9 +576,6 @@ TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
-#if V8_COMPRESS_POINTERS
- CSA_ASSERT(this, IsValidSmi(smi));
-#endif
return smi;
}
@@ -584,16 +598,10 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
}
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
-#if V8_COMPRESS_POINTERS
- CSA_ASSERT(this, IsValidSmi(smi));
-#endif
return smi;
}
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
-#if V8_COMPRESS_POINTERS
- CSA_ASSERT(this, IsValidSmi(value));
-#endif
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
@@ -618,15 +626,20 @@ TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) {
return SelectConstant<Smi>(SmiLessThan(a, b), a, b);
}
+TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a,
+ TNode<IntPtrT> b,
+ Label* if_overflow) {
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(a, b);
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ return Projection<0>(pair);
+}
+
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
- TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(
- BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
- TNode<BoolT> overflow = Projection<1>(pair);
- GotoIf(overflow, if_overflow);
- TNode<IntPtrT> result = Projection<0>(pair);
- return BitcastWordToTaggedSigned(result);
+ return BitcastWordToTaggedSigned(TryIntPtrAdd(
+ BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs), if_overflow));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair =
@@ -660,42 +673,42 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
}
}
-TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
- SloppyTNode<Object> b) {
+TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
- result.Bind(NanConstant());
+ result = NanConstant();
Goto(&done);
BIND(&greater_than_equal_a);
- result.Bind(a);
+ result = a;
Goto(&done);
BIND(&greater_than_equal_b);
- result.Bind(b);
+ result = b;
Goto(&done);
BIND(&done);
- return TNode<Object>::UncheckedCast(result.value());
+ return result.value();
}
-TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
- SloppyTNode<Object> b) {
+TNode<Number> CodeStubAssembler::NumberMin(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
- result.Bind(NanConstant());
+ result = NanConstant();
Goto(&done);
BIND(&greater_than_equal_a);
- result.Bind(b);
+ result = b;
Goto(&done);
BIND(&greater_than_equal_b);
- result.Bind(a);
+ result = a;
Goto(&done);
BIND(&done);
- return TNode<Object>::UncheckedCast(result.value());
+ return result.value();
}
TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
@@ -948,25 +961,33 @@ TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
IntPtrConstant(0));
}
-TNode<BoolT> CodeStubAssembler::WordIsWordAligned(SloppyTNode<WordT> word) {
+TNode<BoolT> CodeStubAssembler::WordIsAligned(SloppyTNode<WordT> word,
+ size_t alignment) {
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
return WordEqual(IntPtrConstant(0),
- WordAnd(word, IntPtrConstant(kPointerSize - 1)));
+ WordAnd(word, IntPtrConstant(alignment - 1)));
}
#if DEBUG
void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
CodeAssembler::Bind(label, debug_info);
}
-#else
-void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
#endif // DEBUG
+void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
+
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
TNode<FixedDoubleArray> array, TNode<Smi> index, Label* if_hole) {
return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
SMI_PARAMETERS, if_hole);
}
+TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
+ TNode<FixedDoubleArray> array, TNode<IntPtrT> index, Label* if_hole) {
+ return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, if_hole);
+}
+
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
@@ -1022,83 +1043,6 @@ void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
Branch(IsJSReceiver(object), if_true, if_false);
}
-TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
- SloppyTNode<Context> context) {
- Label if_true(this), if_false(this, Label::kDeferred), exit(this);
- BranchIfFastJSArray(object, context, &if_true, &if_false);
- TVARIABLE(BoolT, var_result);
- BIND(&if_true);
- {
- var_result = Int32TrueConstant();
- Goto(&exit);
- }
- BIND(&if_false);
- {
- var_result = Int32FalseConstant();
- Goto(&exit);
- }
- BIND(&exit);
- return var_result.value();
-}
-
-TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
- TNode<Object> object, TNode<Context> context) {
- Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
- TVARIABLE(BoolT, var_result);
- BranchIfFastJSArray(object, context, &if_fast, &if_false, true);
- BIND(&if_fast);
- {
- // Check that the Array.prototype hasn't been modified in a way that would
- // affect iteration.
- Node* protector_cell = LoadRoot(RootIndex::kArrayIteratorProtector);
- DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
- var_result =
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid));
- Goto(&exit);
- }
- BIND(&if_false);
- {
- var_result = Int32FalseConstant();
- Goto(&exit);
- }
- BIND(&exit);
- return var_result.value();
-}
-
-void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
- Label* if_true, Label* if_false,
- bool iteration_only) {
- GotoIfForceSlowPath(if_false);
-
- // Bailout if receiver is a Smi.
- GotoIf(TaggedIsSmi(object), if_false);
-
- Node* map = LoadMap(object);
- GotoIfNot(IsJSArrayMap(map), if_false);
-
- // Bailout if receiver has slow elements.
- Node* elements_kind = LoadMapElementsKind(map);
- GotoIfNot(IsFastElementsKind(elements_kind), if_false);
-
- // Verify that our prototype is the initial array prototype.
- GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
-
- if (iteration_only) {
- // If we are only iterating over the array, there is no need to check
- // the NoElements protector if the array is not holey.
- GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true);
- }
- Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true);
-}
-
-void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
- Label* if_true,
- Label* if_false) {
- GotoIf(IsArraySpeciesProtectorCellInvalid(), if_false);
- BranchIfFastJSArray(object, context, if_true, if_false);
-}
-
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
Node* const force_slow_path_addr =
@@ -1122,28 +1066,36 @@ void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects(
if_true);
}
-Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
- Node* top_address, Node* limit_address) {
- // TODO(jgruber, chromium:848672): TNodeify AllocateRaw.
- // TODO(jgruber, chromium:848672): Call FatalProcessOutOfMemory if this fails.
- {
- intptr_t constant_value;
- if (ToIntPtrConstant(size_in_bytes, constant_value)) {
- CHECK(Internals::IsValidSmi(constant_value));
- CHECK_GT(constant_value, 0);
- } else {
- CSA_CHECK(this,
- IsValidPositiveSmi(UncheckedCast<IntPtrT>(size_in_bytes)));
- }
+TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
+ AllocationFlags flags,
+ TNode<RawPtrT> top_address,
+ TNode<RawPtrT> limit_address) {
+ Label if_out_of_memory(this, Label::kDeferred);
+
+ // TODO(jgruber,jkummerow): Extract the slow paths (= probably everything
+ // but bump pointer allocation) into a builtin to save code space. The
+ // size_in_bytes check may be moved there as well since a non-smi
+ // size_in_bytes probably doesn't fit into the bump pointer region
+ // (double-check that).
+
+ intptr_t size_in_bytes_constant;
+ bool size_in_bytes_is_constant = false;
+ if (ToIntPtrConstant(size_in_bytes, size_in_bytes_constant)) {
+ size_in_bytes_is_constant = true;
+ CHECK(Internals::IsValidSmi(size_in_bytes_constant));
+ CHECK_GT(size_in_bytes_constant, 0);
+ } else {
+ GotoIfNot(IsValidPositiveSmi(size_in_bytes), &if_out_of_memory);
}
- Node* top = Load(MachineType::Pointer(), top_address);
- Node* limit = Load(MachineType::Pointer(), limit_address);
+ TNode<RawPtrT> top =
+ UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), top_address));
+ TNode<RawPtrT> limit =
+ UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), limit_address));
// If there's not enough space, call the runtime.
- VARIABLE(result, MachineRepresentation::kTagged);
- Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
- Label merge_runtime(this, &result);
+ TVARIABLE(Object, result);
+ Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
bool needs_double_alignment = flags & kDoubleAlignment;
@@ -1151,101 +1103,96 @@ Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
- Node* runtime_flags = SmiConstant(
+ TNode<Smi> runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
- Node* const runtime_result =
- CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
- SmiTag(size_in_bytes), runtime_flags);
- result.Bind(runtime_result);
- Goto(&merge_runtime);
+ result = CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
+ Goto(&out);
BIND(&next);
}
- VARIABLE(adjusted_size, MachineType::PointerRepresentation(), size_in_bytes);
+ TVARIABLE(IntPtrT, adjusted_size, size_in_bytes);
if (needs_double_alignment) {
- Label not_aligned(this), done_alignment(this, &adjusted_size);
-
- Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
- &done_alignment);
+ Label next(this);
+ GotoIfNot(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &next);
- BIND(&not_aligned);
- Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
- adjusted_size.Bind(not_aligned_size);
- Goto(&done_alignment);
+ adjusted_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
+ Goto(&next);
- BIND(&done_alignment);
+ BIND(&next);
}
- Node* new_top = IntPtrAdd(top, adjusted_size.value());
+ TNode<IntPtrT> new_top =
+ IntPtrAdd(UncheckedCast<IntPtrT>(top), adjusted_size.value());
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
BIND(&runtime_call);
- Node* runtime_result;
- if (flags & kPretenured) {
- Node* runtime_flags = SmiConstant(
- Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
- AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
- runtime_result =
- CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
- SmiTag(size_in_bytes), runtime_flags);
- } else {
- runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
- NoContextConstant(), SmiTag(size_in_bytes));
+ {
+ if (flags & kPretenured) {
+ TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
+ AllocateDoubleAlignFlag::encode(needs_double_alignment) |
+ AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
+ result = CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
+ } else {
+ result = CallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
+ SmiTag(size_in_bytes));
+ }
+ Goto(&out);
}
- result.Bind(runtime_result);
- Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
BIND(&no_runtime_call);
- Node* no_runtime_result = top;
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
- new_top);
+ {
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+ new_top);
- VARIABLE(address, MachineType::PointerRepresentation(), no_runtime_result);
+ TVARIABLE(IntPtrT, address, UncheckedCast<IntPtrT>(top));
- if (needs_double_alignment) {
- Label needs_filler(this), done_filling(this, &address);
- Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
- &needs_filler);
+ if (needs_double_alignment) {
+ Label next(this);
+ GotoIf(IntPtrEqual(adjusted_size.value(), size_in_bytes), &next);
- BIND(&needs_filler);
- // Store a filler and increase the address by kPointerSize.
- StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
- LoadRoot(RootIndex::kOnePointerFillerMap));
- address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
+ // Store a filler and increase the address by 4.
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
+ LoadRoot(RootIndex::kOnePointerFillerMap));
+ address = IntPtrAdd(UncheckedCast<IntPtrT>(top), IntPtrConstant(4));
+ Goto(&next);
- Goto(&done_filling);
+ BIND(&next);
+ }
- BIND(&done_filling);
+ result = BitcastWordToTagged(
+ IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
+ Goto(&out);
}
- no_runtime_result = BitcastWordToTagged(
- IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
-
- result.Bind(no_runtime_result);
- Goto(&merge_runtime);
+ if (!size_in_bytes_is_constant) {
+ BIND(&if_out_of_memory);
+ CallRuntime(Runtime::kFatalProcessOutOfMemoryInAllocateRaw,
+ NoContextConstant());
+ Unreachable();
+ }
- BIND(&merge_runtime);
- return result.value();
+ BIND(&out);
+ return UncheckedCast<HeapObject>(result.value());
}
-Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
+TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned(
+ TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
+ TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
DCHECK_EQ(flags & kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
-Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
+TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
+ TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
+ TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
@@ -1258,61 +1205,74 @@ Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
#endif
}
-Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes,
- AllocationFlags flags) {
+TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
+ TNode<IntPtrT> size_in_bytes, AllocationFlags flags) {
DCHECK(flags == kNone || flags == kDoubleAlignment);
CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
-Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
+ AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
- Node* top_address = ExternalConstant(
+ if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) {
+ return OptimizedAllocate(size_in_bytes, new_space
+ ? PretenureFlag::NOT_TENURED
+ : PretenureFlag::TENURED);
+ }
+ TNode<ExternalReference> top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- DCHECK_EQ(kPointerSize,
+ DCHECK_EQ(kTaggedSize,
ExternalReference::new_space_allocation_limit_address(isolate())
.address() -
ExternalReference::new_space_allocation_top_address(isolate())
.address());
- DCHECK_EQ(kPointerSize,
+ DCHECK_EQ(kTaggedSize,
ExternalReference::old_space_allocation_limit_address(isolate())
.address() -
ExternalReference::old_space_allocation_top_address(isolate())
.address());
- Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
+ TNode<IntPtrT> limit_address = IntPtrAdd(
+ ReinterpretCast<IntPtrT>(top_address), IntPtrConstant(kTaggedSize));
if (flags & kDoubleAlignment) {
- return AllocateRawDoubleAligned(size_in_bytes, flags, top_address,
- limit_address);
+ return AllocateRawDoubleAligned(size_in_bytes, flags,
+ ReinterpretCast<RawPtrT>(top_address),
+ ReinterpretCast<RawPtrT>(limit_address));
} else {
- return AllocateRawUnaligned(size_in_bytes, flags, top_address,
- limit_address);
+ return AllocateRawUnaligned(size_in_bytes, flags,
+ ReinterpretCast<RawPtrT>(top_address),
+ ReinterpretCast<RawPtrT>(limit_address));
}
}
-Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
- AllocationFlags flags) {
+TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
+ AllocationFlags flags) {
CHECK(flags == kNone || flags == kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
-Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes,
+ AllocationFlags flags) {
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
-Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
- return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
+TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
+ TNode<IntPtrT> offset) {
+ return UncheckedCast<HeapObject>(
+ BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)));
}
-Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
+TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
+ int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
-Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
+TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
}
@@ -1371,22 +1331,11 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
}
}
-Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
- Node* frame_pointer = LoadFramePointer();
- return Load(rep, frame_pointer, IntPtrConstant(offset));
-}
-
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
-TNode<JSFunction> CodeStubAssembler::LoadTargetFromFrame() {
- DCHECK(IsJSFunctionCall());
- return CAST(LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer()));
-}
-
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
@@ -1409,7 +1358,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
SloppyTNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
+ offset += 4;
#endif
return ChangeInt32ToIntPtr(
LoadObjectField(object, offset, MachineType::Int32()));
@@ -1423,7 +1372,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
+ offset += 4;
#endif
return UncheckedCast<Int32T>(
LoadObjectField(object, offset, MachineType::Int32()));
@@ -1436,7 +1385,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
- index += kPointerSize / 2;
+ index += 4;
#endif
return ChangeInt32ToIntPtr(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
@@ -1448,36 +1397,36 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
RootIndex root_index) {
- Node* roots_array_start =
- ExternalConstant(ExternalReference::roots_array_start(isolate()));
- int offset = static_cast<int>(root_index) * kPointerSize;
+ Node* isolate_root =
+ ExternalConstant(ExternalReference::isolate_root(isolate()));
+ int offset = IsolateData::root_slot_offset(root_index);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
+ offset += 4;
#endif
return UncheckedCast<Int32T>(
- Load(MachineType::Int32(), roots_array_start, IntPtrConstant(offset)));
+ Load(MachineType::Int32(), isolate_root, IntPtrConstant(offset)));
} else {
- return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(offset)));
+ return SmiToInt32(
+ Load(MachineType::AnyTagged(), isolate_root, IntPtrConstant(offset)));
}
}
-Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
+void CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
if (SmiValuesAre32Bits()) {
- int zero_offset = offset + kPointerSize / 2;
+ int zero_offset = offset + 4;
int payload_offset = offset;
#if V8_TARGET_LITTLE_ENDIAN
std::swap(zero_offset, payload_offset);
#endif
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(zero_offset), Int32Constant(0));
- return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
- IntPtrConstant(payload_offset),
- TruncateInt64ToInt32(value));
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+ IntPtrConstant(payload_offset),
+ TruncateInt64ToInt32(value));
} else {
- return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
- IntPtrConstant(offset), SmiTag(value));
+ StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
+ IntPtrConstant(offset), SmiTag(value));
}
}
@@ -1518,8 +1467,7 @@ TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
- TNode<Object> properties =
- LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
+ TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(TaggedIsSmi(properties),
[=] { return EmptyFixedArrayConstant(); },
[=] { return CAST(properties); });
@@ -1528,18 +1476,12 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
- TNode<Object> properties =
- LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
+ TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(TaggedIsSmi(properties),
[=] { return EmptyPropertyDictionaryConstant(); },
[=] { return CAST(properties); });
}
-TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
- SloppyTNode<JSObject> object) {
- return CAST(LoadObjectField(object, JSObject::kElementsOffset));
-}
-
TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
return CAST(LoadObjectField(array, JSArray::kLengthOffset));
@@ -1586,6 +1528,13 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
+TNode<Int32T> CodeStubAssembler::LoadNumberOfDescriptors(
+ TNode<DescriptorArray> array) {
+ return UncheckedCast<Int32T>(
+ LoadObjectField(array, DescriptorArray::kNumberOfDescriptorsOffset,
+ MachineType::Int16()));
+}
+
TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
@@ -1834,14 +1783,12 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi);
- GotoIf(WordEqual(BitcastMaybeObjectToWord(maybe_object),
- IntPtrConstant(reinterpret_cast<intptr_t>(
- HeapObjectReference::ClearedValue()))),
- if_cleared);
+ GotoIf(IsCleared(maybe_object), if_cleared);
- GotoIf(WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object),
- IntPtrConstant(kHeapObjectTagMask)),
- IntPtrConstant(kHeapObjectTag)),
+ GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32(
+ BitcastMaybeObjectToWord(maybe_object)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kHeapObjectTag)),
&inner_if_strong);
*extracted =
@@ -1871,19 +1818,20 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
}
TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) {
- return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
- IntPtrConstant(kHeapObjectTagMask)),
- IntPtrConstant(kWeakHeapObjectTag));
+ return Word32Equal(
+ Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kWeakHeapObjectTag));
}
TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
- return WordEqual(BitcastMaybeObjectToWord(value),
- IntPtrConstant(kClearedWeakHeapObject));
+ return Word32Equal(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
+ Int32Constant(kClearedWeakHeapObjectLower32));
}
TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) {
- return WordNotEqual(BitcastMaybeObjectToWord(value),
- IntPtrConstant(kClearedWeakHeapObject));
+ return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
+ Int32Constant(kClearedWeakHeapObjectLower32));
}
TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
@@ -1925,59 +1873,80 @@ TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag))));
}
+template <>
+TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<FixedArray> array) {
+ return LoadAndUntagFixedArrayBaseLength(array);
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<WeakFixedArray> array) {
+ return LoadAndUntagWeakFixedArrayLength(array);
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<PropertyArray> array) {
+ return LoadPropertyArrayLength(array);
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
+ TNode<DescriptorArray> array) {
+ return IntPtrMul(ChangeInt32ToIntPtr(LoadNumberOfDescriptors(array)),
+ IntPtrConstant(DescriptorArray::kEntrySize));
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
+ TNode<TransitionArray> array) {
+ return LoadAndUntagWeakFixedArrayLength(array);
+}
+
+template <typename Array>
TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
- SloppyTNode<HeapObject> array, int array_header_size, Node* index_node,
+ TNode<Array> array, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode,
LoadSensitivity needs_poisoning) {
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
- DCHECK_EQ(additional_offset % kPointerSize, 0);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset ==
- PropertyArray::kLengthAndHashOffset);
- // Check that index_node + additional_offset <= object.length.
- // TODO(cbruni): Use proper LoadXXLength helpers
- CSA_ASSERT(
- this,
- IsOffsetInBounds(
- offset,
- Select<IntPtrT>(
- IsPropertyArray(array),
- [=] {
- TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
- array, PropertyArray::kLengthAndHashOffset);
- return TNode<IntPtrT>::UncheckedCast(
- DecodeWord<PropertyArray::LengthField>(length_and_hash));
- },
- [=] {
- return LoadAndUntagObjectField(array,
- FixedArrayBase::kLengthOffset);
- }),
- FixedArray::kHeaderSize));
+ CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
+ array_header_size));
return UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), array, offset, needs_poisoning));
}
+template TNode<MaybeObject>
+CodeStubAssembler::LoadArrayElement<TransitionArray>(TNode<TransitionArray>,
+ int, Node*, int,
+ ParameterMode,
+ LoadSensitivity);
+
+template TNode<MaybeObject>
+CodeStubAssembler::LoadArrayElement<DescriptorArray>(TNode<DescriptorArray>,
+ int, Node*, int,
+ ParameterMode,
+ LoadSensitivity);
+
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
Node* index,
int additional_offset,
ParameterMode parameter_mode) {
if (!FLAG_fixed_array_bounds_checks) return;
- DCHECK_EQ(0, additional_offset % kPointerSize);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
if (parameter_mode == ParameterMode::SMI_PARAMETERS) {
TNode<Smi> effective_index;
- Smi* constant_index;
- bool index_is_constant = ToSmiConstant(index, constant_index);
+ Smi constant_index;
+ bool index_is_constant = ToSmiConstant(index, &constant_index);
if (index_is_constant) {
effective_index = SmiConstant(Smi::ToInt(constant_index) +
- additional_offset / kPointerSize);
+ additional_offset / kTaggedSize);
} else if (additional_offset != 0) {
effective_index =
- SmiAdd(CAST(index), SmiConstant(additional_offset / kPointerSize));
+ SmiAdd(CAST(index), SmiConstant(additional_offset / kTaggedSize));
} else {
effective_index = CAST(index);
}
@@ -1986,7 +1955,7 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
// IntPtrAdd does constant-folding automatically.
TNode<IntPtrT> effective_index =
IntPtrAdd(UncheckedCast<IntPtrT>(index),
- IntPtrConstant(additional_offset / kPointerSize));
+ IntPtrConstant(additional_offset / kTaggedSize));
CSA_CHECK(this, UintPtrLessThan(effective_index,
LoadAndUntagFixedArrayBaseLength(array)));
}
@@ -2005,12 +1974,10 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
}
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
- SloppyTNode<PropertyArray> object, SloppyTNode<IntPtrT> index) {
+ TNode<PropertyArray> object, SloppyTNode<IntPtrT> index) {
int additional_offset = 0;
ParameterMode parameter_mode = INTPTR_PARAMETERS;
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
- STATIC_ASSERT(PropertyArray::kHeaderSize == FixedArray::kHeaderSize);
-
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
additional_offset, parameter_mode,
needs_poisoning));
@@ -2048,13 +2015,13 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kPointerSize))));
+ Int32Add(offset, Int32Constant(kSystemPointerSize))));
#else
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kPointerSize))));
+ Int32Add(offset, Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromInt32Pair(low, high);
}
@@ -2064,7 +2031,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
TNode<IntPtrT> high) {
DCHECK(!Is64());
TVARIABLE(BigInt, var_result);
- TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
+ TVARIABLE(Word32T, var_sign, Int32Constant(BigInt::SignBits::encode(false)));
TVARIABLE(IntPtrT, var_high, high);
TVARIABLE(IntPtrT, var_low, low);
Label high_zero(this), negative(this), allocate_one_digit(this),
@@ -2080,7 +2047,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
BIND(&negative);
{
- var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
+ var_sign = Int32Constant(BigInt::SignBits::encode(true));
// We must negate the value by computing "0 - (high|low)", performing
// both parts of the subtraction separately and manually taking care
// of the carry bit (which is 1 iff low != 0).
@@ -2102,8 +2069,8 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
{
var_result = AllocateRawBigInt(IntPtrConstant(1));
StoreBigIntBitfield(var_result.value(),
- WordOr(var_sign.value(),
- IntPtrConstant(BigInt::LengthBits::encode(1))));
+ Word32Or(var_sign.value(),
+ Int32Constant(BigInt::LengthBits::encode(1))));
StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
Goto(&done);
}
@@ -2112,8 +2079,8 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low,
{
var_result = AllocateRawBigInt(IntPtrConstant(2));
StoreBigIntBitfield(var_result.value(),
- WordOr(var_sign.value(),
- IntPtrConstant(BigInt::LengthBits::encode(2))));
+ Word32Or(var_sign.value(),
+ Int32Constant(BigInt::LengthBits::encode(2))));
StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
Goto(&done);
@@ -2139,8 +2106,8 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
BIND(&if_positive);
{
StoreBigIntBitfield(var_result.value(),
- IntPtrConstant(BigInt::SignBits::encode(false) |
- BigInt::LengthBits::encode(1)));
+ Int32Constant(BigInt::SignBits::encode(false) |
+ BigInt::LengthBits::encode(1)));
StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
Goto(&done);
}
@@ -2148,8 +2115,8 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
BIND(&if_negative);
{
StoreBigIntBitfield(var_result.value(),
- IntPtrConstant(BigInt::SignBits::encode(true) |
- BigInt::LengthBits::encode(1)));
+ Int32Constant(BigInt::SignBits::encode(true) |
+ BigInt::LengthBits::encode(1)));
StoreBigIntDigit(var_result.value(), 0,
Unsigned(IntPtrSub(IntPtrConstant(0), value)));
Goto(&done);
@@ -2179,13 +2146,13 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kPointerSize))));
+ Int32Add(offset, Int32Constant(kSystemPointerSize))));
#else
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kPointerSize))));
+ Int32Add(offset, Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromUint32Pair(low, high);
}
@@ -2367,27 +2334,22 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
Load(MachineType::AnyTagged(), object, offset));
}
+template <typename Array>
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
- SloppyTNode<HeapObject> object, int array_header_size, Node* index_node,
+ TNode<Array> object, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
- DCHECK_EQ(additional_offset % kPointerSize, 0);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
int endian_correction = 0;
#if V8_TARGET_LITTLE_ENDIAN
- if (SmiValuesAre32Bits()) endian_correction = kPointerSize / 2;
+ if (SmiValuesAre32Bits()) endian_correction = 4;
#endif
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag +
endian_correction;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
- // Check that index_node + additional_offset <= object.length.
- // TODO(cbruni): Use proper LoadXXLength helpers
- CSA_ASSERT(this,
- IsOffsetInBounds(
- offset,
- LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset),
- FixedArray::kHeaderSize + endian_correction));
+ CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object),
+ array_header_size + endian_correction));
if (SmiValuesAre32Bits()) {
return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
} else {
@@ -2396,7 +2358,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
- SloppyTNode<HeapObject> object, Node* index_node, int additional_offset,
+ TNode<FixedArray> object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize,
@@ -2416,7 +2378,7 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
MachineType machine_type, int additional_offset,
ParameterMode parameter_mode, Label* if_hole) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
- DCHECK_EQ(additional_offset % kPointerSize, 0);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
@@ -2526,17 +2488,15 @@ TNode<Object> CodeStubAssembler::LoadContextElement(
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
- Node* offset =
- ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS,
- Context::kHeaderSize - kHeapObjectTag);
+ Node* offset = ElementOffsetFromIndex(
+ slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
TNode<Smi> slot_index) {
- Node* offset =
- ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, SMI_PARAMETERS,
- Context::kHeaderSize - kHeapObjectTag);
+ Node* offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
+ SMI_PARAMETERS, Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
@@ -2550,9 +2510,8 @@ void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index,
SloppyTNode<Object> value) {
- Node* offset =
- IntPtrAdd(TimesPointerSize(slot_index),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ Node* offset = IntPtrAdd(TimesTaggedSize(slot_index),
+ IntPtrConstant(Context::SlotOffset(0)));
Store(context, offset, value);
}
@@ -2677,10 +2636,8 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
return var_result.value();
}
-Node* CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(Node* shared) {
- CSA_ASSERT(this, TaggedIsNotSmi(shared));
- CSA_ASSERT(this, IsSharedFunctionInfo(shared));
-
+TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
+ SloppyTNode<SharedFunctionInfo> shared) {
Node* function_data =
LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
@@ -2694,7 +2651,7 @@ Node* CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(Node* shared) {
Goto(&done);
BIND(&done);
- return var_result.value();
+ return CAST(var_result.value());
}
void CodeStubAssembler::StoreObjectByteNoWriteBarrier(TNode<HeapObject> object,
@@ -2716,73 +2673,74 @@ void CodeStubAssembler::StoreMutableHeapNumberValue(
MachineRepresentation::kFloat64);
}
-Node* CodeStubAssembler::StoreObjectField(
- Node* object, int offset, Node* value) {
+void CodeStubAssembler::StoreObjectField(Node* object, int offset,
+ Node* value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
- return Store(object, IntPtrConstant(offset - kHeapObjectTag), value);
+
+ OptimizedStoreField(MachineRepresentation::kTagged,
+ UncheckedCast<HeapObject>(object), offset, value,
+ WriteBarrierKind::kFullWriteBarrier);
}
-Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
- Node* value) {
+void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
+ Node* value) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
- return StoreObjectField(object, const_offset, value);
+ StoreObjectField(object, const_offset, value);
+ } else {
+ Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
- return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
- value);
}
-Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
- return StoreNoWriteBarrier(rep, object,
- IntPtrConstant(offset - kHeapObjectTag), value);
+ OptimizedStoreField(rep, UncheckedCast<HeapObject>(object), offset, value,
+ WriteBarrierKind::kNoWriteBarrier);
}
-Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value, MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
}
- return StoreNoWriteBarrier(
- rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+ StoreNoWriteBarrier(rep, object,
+ IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
-Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
- return StoreWithMapWriteBarrier(
- object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
+void CodeStubAssembler::StoreMap(Node* object, Node* map) {
+ OptimizedStoreMap(UncheckedCast<HeapObject>(object), CAST(map));
}
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object,
- RootIndex map_root_index) {
- return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
+void CodeStubAssembler::StoreMapNoWriteBarrier(Node* object,
+ RootIndex map_root_index) {
+ StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
}
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+void CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return StoreNoWriteBarrier(
- MachineRepresentation::kTagged, object,
- IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
+ OptimizedStoreField(MachineRepresentation::kTaggedPointer,
+ UncheckedCast<HeapObject>(object), HeapObject::kMapOffset,
+ map, WriteBarrierKind::kNoWriteBarrier);
}
-Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
- RootIndex root_index) {
- if (Heap::RootIsImmortalImmovable(root_index)) {
+void CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
+ RootIndex root_index) {
+ if (RootsTable::IsImmortalImmovable(root_index)) {
return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
} else {
return StoreObjectField(object, offset, LoadRoot(root_index));
}
}
-Node* CodeStubAssembler::StoreJSArrayLength(TNode<JSArray> array,
- TNode<Smi> length) {
- return StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+void CodeStubAssembler::StoreJSArrayLength(TNode<JSArray> array,
+ TNode<Smi> length) {
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
}
-Node* CodeStubAssembler::StoreElements(TNode<Object> object,
- TNode<FixedArrayBase> elements) {
- return StoreObjectField(object, JSObject::kElementsOffset, elements);
+void CodeStubAssembler::StoreElements(TNode<Object> object,
+ TNode<FixedArrayBase> elements) {
+ StoreObjectField(object, JSObject::kElementsOffset, elements);
}
void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
@@ -2793,15 +2751,17 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
- DCHECK_EQ(additional_offset % kPointerSize, 0);
- STATIC_ASSERT(FixedArray::kHeaderSize == PropertyArray::kHeaderSize);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
+ STATIC_ASSERT(static_cast<int>(FixedArray::kHeaderSize) ==
+ static_cast<int>(PropertyArray::kHeaderSize));
int header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset ==
- PropertyArray::kLengthAndHashOffset);
+ STATIC_ASSERT(static_cast<int>(FixedArrayBase::kLengthOffset) ==
+ static_cast<int>(WeakFixedArray::kLengthOffset));
+ STATIC_ASSERT(static_cast<int>(FixedArrayBase::kLengthOffset) ==
+ static_cast<int>(PropertyArray::kLengthAndHashOffset));
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
CSA_ASSERT(
@@ -2841,15 +2801,15 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement(
StoreNoWriteBarrier(rep, object, offset, value);
}
-Node* CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
- Node* slot_index_node,
- Node* value,
- WriteBarrierMode barrier_mode,
- int additional_offset,
- ParameterMode parameter_mode) {
+void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
+ Node* slot_index_node,
+ Node* value,
+ WriteBarrierMode barrier_mode,
+ int additional_offset,
+ ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
- DCHECK_EQ(additional_offset % kPointerSize, 0);
+ DCHECK(IsAligned(additional_offset, kTaggedSize));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
@@ -2861,10 +2821,9 @@ Node* CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
FeedbackVector::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
} else {
- return Store(object, offset, value);
+ Store(object, offset, value);
}
}
@@ -2880,14 +2839,14 @@ void CodeStubAssembler::EnsureArrayLengthWritable(TNode<Map> map,
int length_index = JSArray::kLengthDescriptorIndex;
#ifdef DEBUG
- TNode<Name> maybe_length = CAST(LoadWeakFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(length_index)));
+ TNode<Name> maybe_length =
+ LoadKeyByDescriptorEntry(descriptors, length_index);
CSA_ASSERT(this,
WordEqual(maybe_length, LoadRoot(RootIndex::klength_string)));
#endif
- TNode<Uint32T> details = LoadDetailsByKeyIndex(
- descriptors, IntPtrConstant(DescriptorArray::ToKeyIndex(length_index)));
+ TNode<Uint32T> details =
+ LoadDetailsByDescriptorEntry(descriptors, length_index);
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
bailout);
}
@@ -2933,7 +2892,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TVariable<IntPtrT>* arg_index,
Label* bailout) {
CSA_SLOW_ASSERT(this, IsJSArray(array));
- Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
+ Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
Label pre_bailout(this);
Label success(this);
TVARIABLE(Smi, var_tagged_length);
@@ -3000,7 +2959,7 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
Node* value, Label* bailout) {
CSA_SLOW_ASSERT(this, IsJSArray(array));
- Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
+ Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
ParameterMode mode = OptimalParameterMode();
VARIABLE(var_length, OptimalParameterRepresentation(),
TaggedToParameter(LoadFastJSArrayLength(array), mode));
@@ -3034,15 +2993,15 @@ Node* CodeStubAssembler::LoadCellValue(Node* cell) {
return LoadObjectField(cell, Cell::kValueOffset);
}
-Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
- WriteBarrierMode mode) {
+void CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
+ WriteBarrierMode mode) {
CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
DCHECK(mode == SKIP_WRITE_BARRIER || mode == UPDATE_WRITE_BARRIER);
if (mode == UPDATE_WRITE_BARRIER) {
- return StoreObjectField(cell, Cell::kValueOffset, value);
+ StoreObjectField(cell, Cell::kValueOffset, value);
} else {
- return StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, value);
+ StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, value);
}
}
@@ -3094,7 +3053,9 @@ TNode<MutableHeapNumber> CodeStubAssembler::AllocateMutableHeapNumberWithValue(
TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
TNode<BigInt> result = AllocateRawBigInt(length);
- StoreBigIntBitfield(result, WordShl(length, BigInt::LengthBits::kShift));
+ StoreBigIntBitfield(result,
+ Word32Shl(TruncateIntPtrToInt32(length),
+ Int32Constant(BigInt::LengthBits::kShift)));
return result;
}
@@ -3103,35 +3064,42 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
// applicability is required, a large-object check must be added.
CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3)));
- TNode<IntPtrT> size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
- Signed(WordShl(length, kPointerSizeLog2)));
+ TNode<IntPtrT> size =
+ IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
+ Signed(WordShl(length, kSystemPointerSizeLog2)));
Node* raw_result = Allocate(size, kNone);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
+ if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
+ StoreObjectFieldNoWriteBarrier(raw_result, BigInt::kOptionalPaddingOffset,
+ Int32Constant(0),
+ MachineRepresentation::kWord32);
+ }
return UncheckedCast<BigInt>(raw_result);
}
void CodeStubAssembler::StoreBigIntBitfield(TNode<BigInt> bigint,
- TNode<WordT> bitfield) {
+ TNode<Word32T> bitfield) {
StoreObjectFieldNoWriteBarrier(bigint, BigInt::kBitfieldOffset, bitfield,
- MachineType::PointerRepresentation());
+ MachineRepresentation::kWord32);
}
void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
TNode<UintPtrT> digit) {
StoreObjectFieldNoWriteBarrier(
- bigint, BigInt::kDigitsOffset + digit_index * kPointerSize, digit,
+ bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, digit,
UintPtrT::kMachineRepresentation);
}
-TNode<WordT> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
- return UncheckedCast<WordT>(
- LoadObjectField(bigint, BigInt::kBitfieldOffset, MachineType::UintPtr()));
+TNode<Word32T> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
+ return UncheckedCast<Word32T>(
+ LoadObjectField(bigint, BigInt::kBitfieldOffset, MachineType::Uint32()));
}
TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
int digit_index) {
return UncheckedCast<UintPtrT>(LoadObjectField(
- bigint, BigInt::kDigitsOffset + digit_index * kPointerSize,
+ bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize,
MachineType::UintPtr()));
}
@@ -3142,7 +3110,7 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
return CAST(LoadRoot(RootIndex::kempty_string));
}
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
Uint32Constant(length),
@@ -3173,15 +3141,16 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
Node* raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
- Node* result = AllocateInNewSpace(size, flags);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap));
+ TNode<Object> result =
+ AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kOneByteStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
length, MachineRepresentation::kWord32);
@@ -3218,7 +3187,7 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
return CAST(LoadRoot(RootIndex::kempty_string));
}
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
Uint32Constant(length),
@@ -3243,15 +3212,16 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
Node* raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
- Node* result = AllocateInNewSpace(size, flags);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap));
+ TNode<Object> result =
+ AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kStringMap));
StoreMapNoWriteBarrier(result, RootIndex::kStringMap);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
length, MachineRepresentation::kWord32);
@@ -3288,7 +3258,7 @@ TNode<String> CodeStubAssembler::AllocateSlicedString(RootIndex map_root_index,
DCHECK(map_root_index == RootIndex::kSlicedOneByteStringMap ||
map_root_index == RootIndex::kSlicedStringMap);
Node* result = Allocate(SlicedString::kSize);
- DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
Int32Constant(String::kEmptyHashField),
@@ -3322,7 +3292,7 @@ TNode<String> CodeStubAssembler::AllocateConsString(RootIndex map_root_index,
DCHECK(map_root_index == RootIndex::kConsOneByteStringMap ||
map_root_index == RootIndex::kConsStringMap);
Node* result = Allocate(ConsString::kSize, flags);
- DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kWord32);
@@ -3429,14 +3399,14 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> length = EntryToIndex<NameDictionary>(capacity);
- TNode<WordT> store_size = IntPtrAdd(
- TimesPointerSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
+ TNode<IntPtrT> store_size = IntPtrAdd(
+ TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
TNode<NameDictionary> result =
UncheckedCast<NameDictionary>(AllocateInNewSpace(store_size));
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kNameDictionaryMap));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap));
StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromIntPtr(length));
@@ -3491,12 +3461,12 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
static const int kDataTableLength = kCapacity * CollectionType::kEntrySize;
static const int kFixedArrayLength =
- CollectionType::kHashTableStartIndex + kBucketCount + kDataTableLength;
+ CollectionType::HashTableStartIndex() + kBucketCount + kDataTableLength;
static const int kDataTableStartIndex =
- CollectionType::kHashTableStartIndex + kBucketCount;
+ CollectionType::HashTableStartIndex() + kBucketCount;
STATIC_ASSERT(base::bits::IsPowerOfTwo(kCapacity));
- STATIC_ASSERT(kCapacity <= CollectionType::kMaxCapacity);
+ STATIC_ASSERT(kCapacity <= CollectionType::MaxCapacity());
// Allocate the table and add the proper map.
const ElementsKind elements_kind = HOLEY_ELEMENTS;
@@ -3509,21 +3479,21 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
// Initialize the OrderedHashTable fields.
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
- StoreFixedArrayElement(table, CollectionType::kNumberOfElementsIndex,
+ StoreFixedArrayElement(table, CollectionType::NumberOfElementsIndex(),
SmiConstant(0), barrier_mode);
- StoreFixedArrayElement(table, CollectionType::kNumberOfDeletedElementsIndex,
+ StoreFixedArrayElement(table, CollectionType::NumberOfDeletedElementsIndex(),
SmiConstant(0), barrier_mode);
- StoreFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex,
+ StoreFixedArrayElement(table, CollectionType::NumberOfBucketsIndex(),
SmiConstant(kBucketCount), barrier_mode);
// Fill the buckets with kNotFound.
TNode<Smi> not_found = SmiConstant(CollectionType::kNotFound);
- STATIC_ASSERT(CollectionType::kHashTableStartIndex ==
- CollectionType::kNumberOfBucketsIndex + 1);
- STATIC_ASSERT((CollectionType::kHashTableStartIndex + kBucketCount) ==
+ STATIC_ASSERT(CollectionType::HashTableStartIndex() ==
+ CollectionType::NumberOfBucketsIndex() + 1);
+ STATIC_ASSERT((CollectionType::HashTableStartIndex() + kBucketCount) ==
kDataTableStartIndex);
for (int i = 0; i < kBucketCount; i++) {
- StoreFixedArrayElement(table, CollectionType::kHashTableStartIndex + i,
+ StoreFixedArrayElement(table, CollectionType::HashTableStartIndex() + i,
not_found, barrier_mode);
}
@@ -3548,10 +3518,10 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
capacity, IntPtrConstant(CollectionType::kMaxCapacity)));
TNode<IntPtrT> data_table_start_offset =
- IntPtrConstant(CollectionType::kDataTableStartOffset);
+ IntPtrConstant(CollectionType::DataTableStartOffset());
TNode<IntPtrT> data_table_size = IntPtrMul(
- capacity, IntPtrConstant(CollectionType::kEntrySize * kPointerSize));
+ capacity, IntPtrConstant(CollectionType::kEntrySize * kTaggedSize));
TNode<Int32T> hash_table_size =
Int32Div(TruncateIntPtrToInt32(capacity),
@@ -3567,28 +3537,28 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
IntPtrAdd(hash_table_start_offset, hash_table_and_chain_table_size);
TNode<IntPtrT> total_size_word_aligned =
- IntPtrAdd(total_size, IntPtrConstant(kPointerSize - 1));
+ IntPtrAdd(total_size, IntPtrConstant(kTaggedSize - 1));
total_size_word_aligned = ChangeInt32ToIntPtr(
Int32Div(TruncateIntPtrToInt32(total_size_word_aligned),
- Int32Constant(kPointerSize)));
+ Int32Constant(kTaggedSize)));
total_size_word_aligned =
- UncheckedCast<IntPtrT>(TimesPointerSize(total_size_word_aligned));
+ UncheckedCast<IntPtrT>(TimesTaggedSize(total_size_word_aligned));
// Allocate the table and add the proper map.
TNode<Map> small_ordered_hash_map =
CAST(LoadRoot(CollectionType::GetMapRootIndex()));
- TNode<Object> table_obj = CAST(AllocateInNewSpace(total_size_word_aligned));
+ TNode<Object> table_obj = AllocateInNewSpace(total_size_word_aligned);
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
// Initialize the SmallOrderedHashTable fields.
StoreObjectByteNoWriteBarrier(
- table, CollectionType::kNumberOfBucketsOffset,
+ table, CollectionType::NumberOfBucketsOffset(),
Word32And(Int32Constant(0xFF), hash_table_size));
- StoreObjectByteNoWriteBarrier(table, CollectionType::kNumberOfElementsOffset,
+ StoreObjectByteNoWriteBarrier(table, CollectionType::NumberOfElementsOffset(),
Int32Constant(0));
StoreObjectByteNoWriteBarrier(
- table, CollectionType::kNumberOfDeletedElementsOffset, Int32Constant(0));
+ table, CollectionType::NumberOfDeletedElementsOffset(), Int32Constant(0));
TNode<IntPtrT> table_address =
IntPtrSub(BitcastTaggedToWord(table), IntPtrConstant(kHeapObjectTag));
@@ -3628,12 +3598,12 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
Node* const number_of_buckets = SmiUntag(CAST(LoadFixedArrayElement(
- CAST(table), CollectionType::kNumberOfBucketsIndex)));
+ CAST(table), CollectionType::NumberOfBucketsIndex())));
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const first_entry = SmiUntag(CAST(LoadFixedArrayElement(
CAST(table), bucket,
- CollectionType::kHashTableStartIndex * kPointerSize)));
+ CollectionType::HashTableStartIndex() * kTaggedSize)));
// Walk the bucket chain.
Node* entry_start;
@@ -3652,14 +3622,15 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
// Make sure the entry index is within range.
CSA_ASSERT(
- this, UintPtrLessThan(
- var_entry.value(),
- SmiUntag(SmiAdd(
- CAST(LoadFixedArrayElement(
- CAST(table), CollectionType::kNumberOfElementsIndex)),
- CAST(LoadFixedArrayElement(
- CAST(table),
- CollectionType::kNumberOfDeletedElementsIndex))))));
+ this,
+ UintPtrLessThan(
+ var_entry.value(),
+ SmiUntag(SmiAdd(
+ CAST(LoadFixedArrayElement(
+ CAST(table), CollectionType::NumberOfElementsIndex())),
+ CAST(LoadFixedArrayElement(
+ CAST(table),
+ CollectionType::NumberOfDeletedElementsIndex()))))));
// Compute the index of the entry relative to kHashTableStartIndex.
entry_start =
@@ -3670,7 +3641,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
// Load the key from the entry.
Node* const candidate_key = LoadFixedArrayElement(
CAST(table), entry_start,
- CollectionType::kHashTableStartIndex * kPointerSize);
+ CollectionType::HashTableStartIndex() * kTaggedSize);
key_compare(candidate_key, &if_key_found, &continue_next_entry);
@@ -3678,8 +3649,8 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
// Load the index of the next entry in the bucket chain.
var_entry.Bind(SmiUntag(CAST(LoadFixedArrayElement(
CAST(table), entry_start,
- (CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
- kPointerSize))));
+ (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
+ kTaggedSize))));
Goto(&loop);
}
@@ -3701,8 +3672,8 @@ template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>(
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
Comment("AllocateStruct");
CSA_ASSERT(this, IsMap(map));
- Node* size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
- Node* object = Allocate(size, flags);
+ TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
+ TNode<Object> object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeStructBody(object, map, size, Struct::kHeaderSize);
return object;
@@ -3729,8 +3700,9 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap(
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
- Node* instance_size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
- Node* object = AllocateInNewSpace(instance_size, flags);
+ TNode<IntPtrT> instance_size =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(map));
+ TNode<Object> object = AllocateInNewSpace(instance_size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
slack_tracking_mode);
@@ -3808,7 +3780,7 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
- Node* used_size = TimesPointerSize(ChangeUint32ToWord(
+ Node* used_size = TimesTaggedSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
MachineType::Uint8())));
@@ -3843,85 +3815,135 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Node* end_address,
Node* value) {
Comment("StoreFieldsNoWriteBarrier");
- CSA_ASSERT(this, WordIsWordAligned(start_address));
- CSA_ASSERT(this, WordIsWordAligned(end_address));
- BuildFastLoop(start_address, end_address,
- [this, value](Node* current) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, current,
- value);
- },
- kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
+ CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
+ BuildFastLoop(
+ start_address, end_address,
+ [this, value](Node* current) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+ },
+ kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+}
+
+TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
+ Node* capacity, ParameterMode capacity_mode) {
+ return UncheckedCast<BoolT>(
+ UintPtrLessThanOrEqual(ParameterToIntPtr(capacity, capacity_mode),
+ IntPtrConstant(JSArray::kMaxFastArrayLength)));
}
-Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
- Node* array_map, Node* length, Node* allocation_site) {
+TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
+ TNode<Map> array_map, TNode<Smi> length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- CSA_SLOW_ASSERT(this, IsMap(array_map));
+
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
- Node* size = IntPtrConstant(base_size);
- Node* array =
- AllocateUninitializedJSArray(array_map, length, allocation_site, size);
- return array;
+ TNode<IntPtrT> size = IntPtrConstant(base_size);
+ return AllocateUninitializedJSArray(array_map, length, allocation_site, size);
}
-std::pair<Node*, Node*>
+std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
- ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
- Node* capacity, ParameterMode capacity_mode) {
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
+ Node* allocation_site, Node* capacity, ParameterMode capacity_mode,
+ AllocationFlags allocation_flags) {
Comment("begin allocation of JSArray with elements");
+ CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- CSA_SLOW_ASSERT(this, IsMap(array_map));
- int base_size = JSArray::kSize;
- if (allocation_site != nullptr) {
- base_size += AllocationMemento::kSize;
- }
+ int base_size = JSArray::kSize;
+ if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
- int elements_offset = base_size;
+ const int elements_offset = base_size;
// Compute space for elements
base_size += FixedArray::kHeaderSize;
- Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
+ TNode<IntPtrT> size =
+ ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
+
+ TVARIABLE(JSArray, array);
+ TVARIABLE(FixedArrayBase, elements);
+
+ Label out(this);
+
+ // For very large arrays in which the requested allocation exceeds the
+ // maximal size of a regular heap object, we cannot use the allocation
+ // folding trick. Instead, we first allocate the elements in large object
+ // space, and then allocate the JSArray (and possibly the allocation memento)
+ // in new space.
+ if (allocation_flags & kAllowLargeObjectAllocation) {
+ Label next(this);
+ GotoIf(IsRegularHeapObjectSize(size), &next);
+
+ CSA_CHECK(this, IsValidFastJSArrayCapacity(capacity, capacity_mode));
+
+ // Allocate and initialize the elements first. Full initialization is needed
+ // because the upcoming JSArray allocation could trigger GC.
+ elements =
+ AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
+
+ if (IsDoubleElementsKind(kind)) {
+ FillFixedDoubleArrayWithZero(CAST(elements.value()),
+ ParameterToIntPtr(capacity, capacity_mode));
+ } else {
+ FillFixedArrayWithSmiZero(CAST(elements.value()),
+ ParameterToIntPtr(capacity, capacity_mode));
+ }
+
+ // The JSArray and possibly allocation memento next. Note that
+ // allocation_flags are *not* passed on here and the resulting JSArray will
+ // always be in new space.
+ array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
+ allocation_site);
+ StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
+ elements.value());
+
+ Goto(&out);
- Node* array =
+ BIND(&next);
+ }
+
+ // Fold all objects into a single new space allocation.
+ array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
+ elements = UncheckedCast<FixedArrayBase>(
+ InnerAllocate(array.value(), elements_offset));
+
+ StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
+ elements.value());
- Node* elements = InnerAllocate(array, elements_offset);
- StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
// Setup elements object.
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
RootIndex elements_map_index = IsDoubleElementsKind(kind)
? RootIndex::kFixedDoubleArrayMap
: RootIndex::kFixedArrayMap;
- DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
- StoreMapNoWriteBarrier(elements, elements_map_index);
+ DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
+ StoreMapNoWriteBarrier(elements.value(), elements_map_index);
+
TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
capacity_smi);
- return {array, elements};
+ Goto(&out);
+
+ BIND(&out);
+ return {array.value(), elements.value()};
}
-Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map,
- Node* length,
- Node* allocation_site,
- Node* size_in_bytes) {
+TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
+ TNode<Map> array_map, TNode<Smi> length, Node* allocation_site,
+ TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- CSA_SLOW_ASSERT(this, IsMap(array_map));
// Allocate space for the JSArray and the elements FixedArray in one go.
- Node* array = AllocateInNewSpace(size_in_bytes);
+ TNode<Object> array = AllocateInNewSpace(size_in_bytes);
- Comment("write JSArray headers");
StoreMapNoWriteBarrier(array, array_map);
-
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
-
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -3929,19 +3951,21 @@ Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map,
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize),
allocation_site);
}
- return array;
+
+ return CAST(array);
}
-Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
- Node* capacity, Node* length,
- Node* allocation_site,
- ParameterMode capacity_mode) {
- CSA_SLOW_ASSERT(this, IsMap(array_map));
+TNode<JSArray> CodeStubAssembler::AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, Node* capacity, TNode<Smi> length,
+ Node* allocation_site, ParameterMode capacity_mode,
+ AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode));
+ TNode<JSArray> array;
+ TNode<FixedArrayBase> elements;
int capacity_as_constant;
- Node *array = nullptr, *elements = nullptr;
+
if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
// Array is empty. Use the shared empty fixed array instead of allocating a
// new one.
@@ -3950,18 +3974,19 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
StoreObjectFieldRoot(array, JSArray::kElementsOffset,
RootIndex::kEmptyFixedArray);
} else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant,
- capacity_mode) &&
- capacity_as_constant > 0) {
+ capacity_mode)) {
+ CHECK_GT(capacity_as_constant, 0);
// Allocate both array and elements object, and initialize the JSArray.
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, array_map, length, allocation_site, capacity, capacity_mode);
+ kind, array_map, length, allocation_site, capacity, capacity_mode,
+ allocation_flags);
// Fill in the elements with holes.
FillFixedArrayWithValue(kind, elements,
IntPtrOrSmiConstant(0, capacity_mode), capacity,
RootIndex::kTheHoleValue, capacity_mode);
} else {
Label out(this), empty(this), nonempty(this);
- VARIABLE(var_array, MachineRepresentation::kTagged);
+ TVARIABLE(JSArray, var_array);
Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
&empty, &nonempty);
@@ -3970,8 +3995,8 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
{
// Array is empty. Use the shared empty fixed array instead of allocating
// a new one.
- var_array.Bind(AllocateUninitializedJSArrayWithoutElements(
- array_map, length, allocation_site));
+ var_array = AllocateUninitializedJSArrayWithoutElements(array_map, length,
+ allocation_site);
StoreObjectFieldRoot(var_array.value(), JSArray::kElementsOffset,
RootIndex::kEmptyFixedArray);
Goto(&out);
@@ -3980,10 +4005,11 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
BIND(&nonempty);
{
// Allocate both array and elements object, and initialize the JSArray.
- Node* array;
+ TNode<JSArray> array;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, array_map, length, allocation_site, capacity, capacity_mode);
- var_array.Bind(array);
+ kind, array_map, length, allocation_site, capacity, capacity_mode,
+ allocation_flags);
+ var_array = array;
// Fill in the elements with holes.
FillFixedArrayWithValue(kind, elements,
IntPtrOrSmiConstant(0, capacity_mode), capacity,
@@ -4007,13 +4033,13 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
// Use the cannonical map for the Array's ElementsKind
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+ TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- Node* new_elements =
- ExtractFixedArray(LoadElements(array), begin, count, capacity,
- ExtractFixedArrayFlag::kAllFixedArrays, mode);
+ Node* new_elements = ExtractFixedArray(
+ LoadElements(array), begin, count, capacity,
+ ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
- Node* result = AllocateUninitializedJSArrayWithoutElements(
+ TNode<Object> result = AllocateUninitializedJSArrayWithoutElements(
array_map, ParameterToTagged(count, mode), allocation_site);
StoreObjectField(result, JSObject::kElementsOffset, new_elements);
return result;
@@ -4043,10 +4069,11 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
}
// Simple extraction that preserves holes.
- new_elements = ExtractFixedArray(
- LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(length, mode), nullptr,
- ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode);
+ new_elements =
+ ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode),
+ TaggedToParameter(length, mode), nullptr,
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
+ nullptr, var_elements_kind.value());
var_new_elements.Bind(new_elements);
Goto(&allocate_jsarray);
@@ -4075,11 +4102,11 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
BIND(&allocate_jsarray);
// Use the cannonical map for the chosen elements kind.
Node* native_context = LoadNativeContext(context);
- Node* array_map =
+ TNode<Map> array_map =
LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
- Node* result = AllocateUninitializedJSArrayWithoutElements(array_map, length,
- allocation_site);
+ TNode<Object> result = AllocateUninitializedJSArrayWithoutElements(
+ array_map, CAST(length), allocation_site);
StoreObjectField(result, JSObject::kElementsOffset, var_new_elements.value());
return result;
}
@@ -4091,6 +4118,28 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity,
IntPtrOrSmiConstant(0, mode), mode));
+
+ const intptr_t kMaxLength = IsDoubleElementsKind(kind)
+ ? FixedDoubleArray::kMaxLength
+ : FixedArray::kMaxLength;
+ intptr_t capacity_constant;
+ if (ToParameterConstant(capacity, &capacity_constant, mode)) {
+ CHECK_LE(capacity_constant, kMaxLength);
+ } else {
+ Label if_out_of_memory(this, Label::kDeferred), next(this);
+ Branch(IntPtrOrSmiGreaterThan(
+ capacity,
+ IntPtrOrSmiConstant(static_cast<int>(kMaxLength), mode), mode),
+ &if_out_of_memory, &next);
+
+ BIND(&if_out_of_memory);
+ CallRuntime(Runtime::kFatalProcessOutOfMemoryInvalidArrayLength,
+ NoContextConstant());
+ Unreachable();
+
+ BIND(&next);
+ }
+
TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind, mode);
if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
@@ -4112,7 +4161,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
RootIndex map_index = IsDoubleElementsKind(kind)
? RootIndex::kFixedDoubleArrayMap
: RootIndex::kFixedArrayMap;
- DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
}
StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
@@ -4124,7 +4173,8 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
ElementsKind from_kind, AllocationFlags allocation_flags,
ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
- HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted,
+ Node* source_elements_kind) {
DCHECK_NE(first, nullptr);
DCHECK_NE(count, nullptr);
DCHECK_NE(capacity, nullptr);
@@ -4196,30 +4246,76 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
Comment("Copy FixedArray new space");
// We use PACKED_ELEMENTS to tell AllocateFixedArray and
// CopyFixedArrayElements that we want a FixedArray.
- ElementsKind to_kind = PACKED_ELEMENTS;
- Node* to_elements =
+ const ElementsKind to_kind = PACKED_ELEMENTS;
+ TNode<FixedArrayBase> to_elements =
AllocateFixedArray(to_kind, capacity, parameter_mode,
AllocationFlag::kNone, var_target_map.value());
var_result.Bind(to_elements);
- CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
- count, capacity, SKIP_WRITE_BARRIER, parameter_mode,
- convert_holes, var_holes_converted);
+
+ if (convert_holes == HoleConversionMode::kDontConvert &&
+ !IsDoubleElementsKind(from_kind)) {
+ // We can use CopyElements (memcpy) because we don't need to replace or
+ // convert any values. Since {to_elements} is in new-space, CopyElements
+ // will efficiently use memcpy.
+ FillFixedArrayWithValue(to_kind, to_elements, count, capacity,
+ RootIndex::kTheHoleValue, parameter_mode);
+ CopyElements(to_kind, to_elements, IntPtrConstant(0), CAST(source),
+ ParameterToIntPtr(first, parameter_mode),
+ ParameterToIntPtr(count, parameter_mode),
+ SKIP_WRITE_BARRIER);
+ } else {
+ CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
+ count, capacity, SKIP_WRITE_BARRIER,
+ parameter_mode, convert_holes,
+ var_holes_converted);
+ }
Goto(&done);
if (handle_old_space) {
BIND(&old_space);
{
Comment("Copy FixedArray old space");
+ Label copy_one_by_one(this);
+
+ // Try to use memcpy if we don't need to convert holes to undefined.
+ if (convert_holes == HoleConversionMode::kDontConvert &&
+ source_elements_kind != nullptr) {
+ // Only try memcpy if we're not copying object pointers.
+ GotoIfNot(IsFastSmiElementsKind(source_elements_kind),
+ &copy_one_by_one);
+
+ const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS;
+ to_elements =
+ AllocateFixedArray(to_smi_kind, capacity, parameter_mode,
+ allocation_flags, var_target_map.value());
+ var_result.Bind(to_elements);
+
+ FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity,
+ RootIndex::kTheHoleValue, parameter_mode);
+ // CopyElements will try to use memcpy if it's not conflicting with
+ // GC. Otherwise it will copy elements by elements, but skip write
+ // barriers (since we're copying smis to smis).
+ CopyElements(to_smi_kind, to_elements, IntPtrConstant(0),
+ CAST(source), ParameterToIntPtr(first, parameter_mode),
+ ParameterToIntPtr(count, parameter_mode),
+ SKIP_WRITE_BARRIER);
+ Goto(&done);
+ } else {
+ Goto(&copy_one_by_one);
+ }
- to_elements =
- AllocateFixedArray(to_kind, capacity, parameter_mode,
- allocation_flags, var_target_map.value());
- var_result.Bind(to_elements);
- CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
- count, capacity, UPDATE_WRITE_BARRIER,
- parameter_mode, convert_holes,
- var_holes_converted);
- Goto(&done);
+ BIND(&copy_one_by_one);
+ {
+ to_elements =
+ AllocateFixedArray(to_kind, capacity, parameter_mode,
+ allocation_flags, var_target_map.value());
+ var_result.Bind(to_elements);
+ CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
+ count, capacity, UPDATE_WRITE_BARRIER,
+ parameter_mode, convert_holes,
+ var_holes_converted);
+ Goto(&done);
+ }
}
}
}
@@ -4240,7 +4336,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map));
VARIABLE(var_result, MachineRepresentation::kTagged);
- ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ const ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
Node* to_elements = AllocateFixedArray(kind, capacity, mode, allocation_flags,
fixed_array_map);
var_result.Bind(to_elements);
@@ -4317,7 +4413,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
Node* source, Node* first, Node* count, Node* capacity,
ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
- TVariable<BoolT>* var_holes_converted) {
+ TVariable<BoolT>* var_holes_converted, Node* source_runtime_kind) {
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays ||
extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays);
// If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should not
@@ -4367,10 +4463,10 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
// Here we can only get |source| as FixedArray, never FixedDoubleArray.
// PACKED_ELEMENTS is used to signify that the source is a FixedArray.
- Node* to_elements =
- ExtractToFixedArray(source, first, count, capacity, source_map,
- PACKED_ELEMENTS, allocation_flags, extract_flags,
- parameter_mode, convert_holes, var_holes_converted);
+ Node* to_elements = ExtractToFixedArray(
+ source, first, count, capacity, source_map, PACKED_ELEMENTS,
+ allocation_flags, extract_flags, parameter_mode, convert_holes,
+ var_holes_converted, source_runtime_kind);
var_result.Bind(to_elements);
Goto(&done);
}
@@ -4389,11 +4485,14 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
// the target are FixedDoubleArray. That it is PACKED or HOLEY does not
// matter.
ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
- Node* to_elements = AllocateFixedArray(kind, capacity, parameter_mode,
- allocation_flags, source_map);
+ TNode<FixedArrayBase> to_elements = AllocateFixedArray(
+ kind, capacity, parameter_mode, allocation_flags, source_map);
+ FillFixedArrayWithValue(kind, to_elements, count, capacity,
+ RootIndex::kTheHoleValue, parameter_mode);
+ CopyElements(kind, to_elements, IntPtrConstant(0), CAST(source),
+ ParameterToIntPtr(first, parameter_mode),
+ ParameterToIntPtr(count, parameter_mode));
var_result.Bind(to_elements);
- CopyFixedArrayElements(kind, source, kind, to_elements, first, count,
- capacity, SKIP_WRITE_BARRIER, parameter_mode);
}
Goto(&done);
@@ -4433,11 +4532,12 @@ Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
- Node* total_size = GetPropertyArrayAllocationSize(capacity_node, mode);
+ TNode<IntPtrT> total_size =
+ GetPropertyArrayAllocationSize(capacity_node, mode);
- Node* array = Allocate(total_size, flags);
+ TNode<Object> array = Allocate(total_size, flags);
RootIndex map_index = RootIndex::kPropertyArrayMap;
- DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
InitializePropertyArrayLength(array, capacity_node, mode);
return array;
@@ -4514,7 +4614,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(
StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
double_hole);
StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ IntPtrAdd(offset, IntPtrConstant(kInt32Size)),
double_hole);
}
}
@@ -4523,7 +4623,7 @@ void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<IntPtrT> length) {
CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
- TNode<IntPtrT> byte_length = TimesPointerSize(length);
+ TNode<IntPtrT> byte_length = TimesTaggedSize(length);
CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
@@ -4584,7 +4684,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<IntPtrT> length) {
Label finished(this);
Label needs_barrier(this);
- const bool needs_barrier_check = IsObjectElementsKind(kind);
+ const bool needs_barrier_check = !IsDoubleElementsKind(kind);
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
@@ -4595,8 +4695,8 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
IntPtrLessThanOrEqual(IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
- // The write barrier can be ignored if {elements} is in new space, or if
- // we have a SMI or double ElementsKind.
+ // The write barrier can be ignored if {dst_elements} is in new space, or if
+ // the elements pointer is FixedDoubleArray.
if (needs_barrier_check) {
JumpIfPointersFromHereAreInteresting(elements, &needs_barrier);
}
@@ -4668,10 +4768,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
TNode<IntPtrT> dst_index,
TNode<FixedArrayBase> src_elements,
TNode<IntPtrT> src_index,
- TNode<IntPtrT> length) {
+ TNode<IntPtrT> length,
+ WriteBarrierMode write_barrier) {
Label finished(this);
Label needs_barrier(this);
- const bool needs_barrier_check = IsObjectElementsKind(kind);
+ const bool needs_barrier_check = !IsDoubleElementsKind(kind);
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
@@ -4682,10 +4783,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
CSA_ASSERT(this, IntPtrLessThanOrEqual(
IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(src_elements)));
- CSA_ASSERT(this, WordNotEqual(dst_elements, src_elements));
+ CSA_ASSERT(this, Word32Or(WordNotEqual(dst_elements, src_elements),
+ WordEqual(length, IntPtrConstant(0))));
// The write barrier can be ignored if {dst_elements} is in new space, or if
- // we have a SMI or double ElementsKind.
+ // the elements pointer is FixedDoubleArray.
if (needs_barrier_check) {
JumpIfPointersFromHereAreInteresting(dst_elements, &needs_barrier);
}
@@ -4725,7 +4827,12 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
[&](Node* array, Node* offset) {
Node* const element = Load(MachineType::AnyTagged(), array, offset);
Node* const delta_offset = IntPtrAdd(offset, delta);
- Store(dst_elements, delta_offset, element);
+ if (write_barrier == SKIP_WRITE_BARRIER) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, dst_elements,
+ delta_offset, element);
+ } else {
+ Store(dst_elements, delta_offset, element);
+ }
},
INTPTR_PARAMETERS, ForEachDirection::kForward);
Goto(&finished);
@@ -4823,7 +4930,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
{
Node* from_offset = IntPtrSub(
var_from_offset.value(),
- IntPtrConstant(from_double_elements ? kDoubleSize : kPointerSize));
+ IntPtrConstant(from_double_elements ? kDoubleSize : kTaggedSize));
var_from_offset.Bind(from_offset);
Node* to_offset;
@@ -4832,7 +4939,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
} else {
to_offset = IntPtrSub(
var_to_offset.value(),
- IntPtrConstant(to_double_elements ? kDoubleSize : kPointerSize));
+ IntPtrConstant(to_double_elements ? kDoubleSize : kTaggedSize));
var_to_offset.Bind(to_offset);
}
@@ -4885,7 +4992,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array_adjusted,
to_offset, double_hole);
StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array_adjusted,
- IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)),
+ IntPtrAdd(to_offset, IntPtrConstant(kInt32Size)),
double_hole);
}
Goto(&next_iter);
@@ -4985,8 +5092,8 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
DCHECK_IMPLIES(to_one_byte, from_one_byte);
- Comment("CopyStringCharacters %s -> %s",
- from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING",
+ Comment("CopyStringCharacters ",
+ from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ",
to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
@@ -5144,7 +5251,8 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
Node* base_allocation_size,
Node* allocation_site) {
Comment("[Initialize AllocationMemento");
- Node* memento = InnerAllocate(base, base_allocation_size);
+ TNode<Object> memento =
+ InnerAllocate(CAST(base), UncheckedCast<IntPtrT>(base_allocation_size));
StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
@@ -5584,8 +5692,13 @@ TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
return result.value();
}
-TNode<WordT> CodeStubAssembler::TimesPointerSize(SloppyTNode<WordT> value) {
- return WordShl(value, kPointerSizeLog2);
+TNode<WordT> CodeStubAssembler::TimesSystemPointerSize(
+ SloppyTNode<WordT> value) {
+ return WordShl(value, kSystemPointerSizeLog2);
+}
+
+TNode<WordT> CodeStubAssembler::TimesTaggedSize(SloppyTNode<WordT> value) {
+ return WordShl(value, kTaggedSizeLog2);
}
TNode<WordT> CodeStubAssembler::TimesDoubleSize(SloppyTNode<WordT> value) {
@@ -5700,9 +5813,9 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
-Node* CodeStubAssembler::ThrowIfNotJSReceiver(
- Node* context, Node* value, MessageTemplate::Template msg_template,
- const char* method_name) {
+Node* CodeStubAssembler::ThrowIfNotJSReceiver(Node* context, Node* value,
+ MessageTemplate msg_template,
+ const char* method_name) {
Label out(this), throw_exception(this, Label::kDeferred);
VARIABLE(var_value_map, MachineRepresentation::kTagged);
@@ -5722,10 +5835,9 @@ Node* CodeStubAssembler::ThrowIfNotJSReceiver(
return var_value_map.value();
}
-void CodeStubAssembler::ThrowRangeError(Node* context,
- MessageTemplate::Template message,
+void CodeStubAssembler::ThrowRangeError(Node* context, MessageTemplate message,
Node* arg0, Node* arg1, Node* arg2) {
- Node* template_index = SmiConstant(message);
+ Node* template_index = SmiConstant(static_cast<int>(message));
if (arg0 == nullptr) {
CallRuntime(Runtime::kThrowRangeError, context, template_index);
} else if (arg1 == nullptr) {
@@ -5739,8 +5851,7 @@ void CodeStubAssembler::ThrowRangeError(Node* context,
Unreachable();
}
-void CodeStubAssembler::ThrowTypeError(Node* context,
- MessageTemplate::Template message,
+void CodeStubAssembler::ThrowTypeError(Node* context, MessageTemplate message,
char const* arg0, char const* arg1) {
Node* arg0_node = nullptr;
if (arg0) arg0_node = StringConstant(arg0);
@@ -5749,10 +5860,9 @@ void CodeStubAssembler::ThrowTypeError(Node* context,
ThrowTypeError(context, message, arg0_node, arg1_node);
}
-void CodeStubAssembler::ThrowTypeError(Node* context,
- MessageTemplate::Template message,
+void CodeStubAssembler::ThrowTypeError(Node* context, MessageTemplate message,
Node* arg0, Node* arg1, Node* arg2) {
- Node* template_index = SmiConstant(message);
+ Node* template_index = SmiConstant(static_cast<int>(message));
if (arg0 == nullptr) {
CallRuntime(Runtime::kThrowTypeError, context, template_index);
} else if (arg1 == nullptr) {
@@ -5811,6 +5921,13 @@ TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(RootIndex::kArrayIteratorProtector);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(RootIndex::kPromiseResolveProtector);
@@ -5839,6 +5956,13 @@ TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(RootIndex::kRegExpSpeciesProtector);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(RootIndex::kPromiseSpeciesProtector);
@@ -5867,6 +5991,38 @@ TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
return WordEqual(proto_of_proto, typed_array_prototype);
}
+TNode<BoolT> CodeStubAssembler::IsFastAliasedArgumentsMap(
+ TNode<Context> context, TNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const arguments_map = LoadContextElement(
+ native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+ return WordEqual(arguments_map, map);
+}
+
+TNode<BoolT> CodeStubAssembler::IsSlowAliasedArgumentsMap(
+ TNode<Context> context, TNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const arguments_map = LoadContextElement(
+ native_context, Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX);
+ return WordEqual(arguments_map, map);
+}
+
+TNode<BoolT> CodeStubAssembler::IsSloppyArgumentsMap(TNode<Context> context,
+ TNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const arguments_map =
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ return WordEqual(arguments_map, map);
+}
+
+TNode<BoolT> CodeStubAssembler::IsStrictArgumentsMap(TNode<Context> context,
+ TNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const arguments_map =
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ return WordEqual(arguments_map, map);
+}
+
TNode<BoolT> CodeStubAssembler::TaggedIsCallable(TNode<Object> object) {
return Select<BoolT>(
TaggedIsSmi(object), [=] { return Int32FalseConstant(); },
@@ -5929,6 +6085,12 @@ TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
Int32Constant(kOneByteStringTag));
}
+TNode<BoolT> CodeStubAssembler::HasOnlyOneByteChars(
+ TNode<Int32T> instance_type) {
+ CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ return IsSetWord32(instance_type, kStringEncodingMask | kOneByteDataHintMask);
+}
+
TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
SloppyTNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
@@ -6184,6 +6346,10 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsOddball(SloppyTNode<HeapObject> object) {
+ return IsOddballInstanceType(LoadInstanceType(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
@@ -6474,7 +6640,7 @@ Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
int base_size,
ParameterMode mode) {
int max_newspace_elements =
- (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
+ (kMaxRegularHeapObjectSize - base_size) / kTaggedSize;
return IntPtrOrSmiGreaterThan(
element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode);
}
@@ -6646,12 +6812,12 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
&original_string_or_invalid_length);
// A real substring (substr_length < string_length).
+ Label empty(this);
+ GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty);
Label single_char(this);
GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
- // TODO(jgruber): Add an additional case for substring of length == 0?
-
// Deal with different string types: update the index if necessary
// and extract the underlying string.
@@ -6726,6 +6892,12 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
Goto(&end);
}
+ BIND(&empty);
+ {
+ var_result = EmptyStringConstant();
+ Goto(&end);
+ }
+
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
@@ -6929,7 +7101,8 @@ Node* CodeStubAssembler::DerefIndirectString(TNode<String> string,
Label deref(this);
BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
BIND(&deref);
- STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
+ STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
+ static_cast<int>(ConsString::kFirstOffset));
return LoadObjectField(string, ThinString::kActualOffset);
}
@@ -6945,7 +7118,8 @@ void CodeStubAssembler::DerefIndirectString(Variable* var_string,
BIND(&can_deref);
#endif // DEBUG
- STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
+ STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
+ static_cast<int>(ConsString::kFirstOffset));
var_string->Bind(
LoadObjectField(var_string->value(), ThinString::kActualOffset));
}
@@ -7234,7 +7408,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
// Heap number match, return value from cache entry.
result = CAST(
- LoadFixedArrayElement(CAST(number_string_cache), index, kPointerSize));
+ LoadFixedArrayElement(CAST(number_string_cache), index, kTaggedSize));
Goto(&done);
}
@@ -7249,7 +7423,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
// Smi match, return value from cache entry.
result = CAST(LoadFixedArrayElement(CAST(number_string_cache), smi_index,
- kPointerSize, SMI_PARAMETERS));
+ kTaggedSize, SMI_PARAMETERS));
Goto(&done);
}
@@ -7731,8 +7905,8 @@ TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
return result.value();
}
-TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Object> input,
- TNode<Context> context,
+TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Context> context,
+ TNode<Object> input,
Label* range_error) {
TVARIABLE(Smi, result);
Label check_undefined(this), return_zero(this), defined(this),
@@ -7763,8 +7937,8 @@ TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Object> input,
return result.value();
}
-TNode<Smi> CodeStubAssembler::ToSmiLength(TNode<Object> input,
- TNode<Context> context,
+TNode<Smi> CodeStubAssembler::ToSmiLength(TNode<Context> context,
+ TNode<Object> input,
Label* range_error) {
TVARIABLE(Smi, result);
Label to_integer(this), negative_check(this),
@@ -8047,30 +8221,89 @@ TNode<IntPtrT> CodeStubAssembler::EntryToIndex(TNode<IntPtrT> entry,
field_index));
}
+TNode<MaybeObject> CodeStubAssembler::LoadDescriptorArrayElement(
+ TNode<DescriptorArray> object, Node* index, int additional_offset) {
+ return LoadArrayElement(object, DescriptorArray::kHeaderSize, index,
+ additional_offset);
+}
+
+TNode<Name> CodeStubAssembler::LoadKeyByKeyIndex(
+ TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
+ return CAST(LoadDescriptorArrayElement(container, key_index, 0));
+}
+
TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToDetailsOffset =
- (DescriptorArray::kEntryDetailsIndex - DescriptorArray::kEntryKeyIndex) *
- kPointerSize;
- return Unsigned(LoadAndUntagToWord32ArrayElement(
- container, WeakFixedArray::kHeaderSize, key_index, kKeyToDetailsOffset));
+ const int kKeyToDetails =
+ DescriptorArray::ToDetailsIndex(0) - DescriptorArray::ToKeyIndex(0);
+ return Unsigned(
+ LoadAndUntagToWord32ArrayElement(container, DescriptorArray::kHeaderSize,
+ key_index, kKeyToDetails * kTaggedSize));
}
TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValueOffset =
- (DescriptorArray::kEntryValueIndex - DescriptorArray::kEntryKeyIndex) *
- kPointerSize;
- return CAST(
- LoadWeakFixedArrayElement(container, key_index, kKeyToValueOffset));
+ const int kKeyToValue =
+ DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
+ return CAST(LoadDescriptorArrayElement(container, key_index,
+ kKeyToValue * kTaggedSize));
}
TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValueOffset =
- (DescriptorArray::kEntryValueIndex - DescriptorArray::kEntryKeyIndex) *
- kPointerSize;
- return LoadWeakFixedArrayElement(container, key_index, kKeyToValueOffset);
+ const int kKeyToValue =
+ DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
+ return LoadDescriptorArrayElement(container, key_index,
+ kKeyToValue * kTaggedSize);
+}
+
+TNode<IntPtrT> CodeStubAssembler::DescriptorEntryToIndex(
+ TNode<IntPtrT> descriptor_entry) {
+ return IntPtrMul(descriptor_entry,
+ IntPtrConstant(DescriptorArray::kEntrySize));
+}
+
+TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
+ TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
+ return CAST(LoadDescriptorArrayElement(
+ container, DescriptorEntryToIndex(descriptor_entry),
+ DescriptorArray::ToKeyIndex(0) * kTaggedSize));
+}
+
+TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
+ TNode<DescriptorArray> container, int descriptor_entry) {
+ return CAST(LoadDescriptorArrayElement(
+ container, IntPtrConstant(0),
+ DescriptorArray::ToKeyIndex(descriptor_entry) * kTaggedSize));
+}
+
+TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
+ TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ container, DescriptorArray::kHeaderSize,
+ DescriptorEntryToIndex(descriptor_entry),
+ DescriptorArray::ToDetailsIndex(0) * kTaggedSize));
+}
+
+TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
+ TNode<DescriptorArray> container, int descriptor_entry) {
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ container, DescriptorArray::kHeaderSize, IntPtrConstant(0),
+ DescriptorArray::ToDetailsIndex(descriptor_entry) * kTaggedSize));
+}
+
+TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
+ TNode<DescriptorArray> container, int descriptor_entry) {
+ return CAST(LoadDescriptorArrayElement(
+ container, IntPtrConstant(0),
+ DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize));
+}
+
+TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByDescriptorEntry(
+ TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
+ return LoadDescriptorArrayElement(
+ container, DescriptorEntryToIndex(descriptor_entry),
+ DescriptorArray::ToValueIndex(0) * kTaggedSize);
}
template TNode<IntPtrT> CodeStubAssembler::EntryToIndex<NameDictionary>(
@@ -8449,7 +8682,8 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found) {
static_assert(std::is_base_of<FixedArray, Array>::value ||
- std::is_base_of<WeakFixedArray, Array>::value,
+ std::is_base_of<WeakFixedArray, Array>::value ||
+ std::is_base_of<DescriptorArray, Array>::value,
"T must be a descendant of FixedArray or a WeakFixedArray");
Comment("LookupLinear");
TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
@@ -8473,9 +8707,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
template <>
TNode<Uint32T> CodeStubAssembler::NumberOfEntries<DescriptorArray>(
TNode<DescriptorArray> descriptors) {
- return Unsigned(LoadAndUntagToWord32ArrayElement(
- descriptors, WeakFixedArray::kHeaderSize,
- IntPtrConstant(DescriptorArray::kDescriptorLengthIndex)));
+ return Unsigned(LoadNumberOfDescriptors(descriptors));
}
template <>
@@ -8528,10 +8760,10 @@ TNode<Uint32T> CodeStubAssembler::GetSortedKeyIndex<TransitionArray>(
template <typename Array>
TNode<Name> CodeStubAssembler::GetKey(TNode<Array> array,
TNode<Uint32T> entry_index) {
- static_assert(std::is_base_of<FixedArray, Array>::value ||
- std::is_base_of<WeakFixedArray, Array>::value,
- "T must be a descendant of FixedArray or a TransitionArray");
- const int key_offset = Array::ToKeyIndex(0) * kPointerSize;
+ static_assert(std::is_base_of<TransitionArray, Array>::value ||
+ std::is_base_of<DescriptorArray, Array>::value,
+ "T must be a descendant of DescriptorArray or TransitionArray");
+ const int key_offset = Array::ToKeyIndex(0) * kTaggedSize;
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize,
EntryIndexToIndex<Array>(entry_index), key_offset);
@@ -8545,9 +8777,9 @@ template TNode<Name> CodeStubAssembler::GetKey<TransitionArray>(
TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
- const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kTaggedSize;
return Unsigned(LoadAndUntagToWord32ArrayElement(
- descriptors, WeakFixedArray::kHeaderSize,
+ descriptors, DescriptorArray::kHeaderSize,
EntryIndexToIndex<DescriptorArray>(descriptor_number), details_offset));
}
@@ -8626,18 +8858,13 @@ void CodeStubAssembler::LookupBinary(TNode<Name> unique_name,
void CodeStubAssembler::DescriptorArrayForEach(
VariableList& variable_list, TNode<Uint32T> start_descriptor,
TNode<Uint32T> end_descriptor, const ForEachDescriptorBodyFunction& body) {
- TNode<IntPtrT> start_index =
- IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- EntryIndexToIndex<DescriptorArray>(start_descriptor));
-
- TNode<IntPtrT> end_index =
- IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- EntryIndexToIndex<DescriptorArray>(end_descriptor));
+ TNode<IntPtrT> start_index = ToKeyIndex<DescriptorArray>(start_descriptor);
+ TNode<IntPtrT> end_index = ToKeyIndex<DescriptorArray>(end_descriptor);
BuildFastLoop(variable_list, start_index, end_index,
[=](Node* index) {
- TNode<UintPtrT> descriptor_key_index =
- TNode<UintPtrT>::UncheckedCast(index);
+ TNode<IntPtrT> descriptor_key_index =
+ TNode<IntPtrT>::UncheckedCast(index);
body(descriptor_key_index);
},
DescriptorArray::kEntrySize, INTPTR_PARAMETERS,
@@ -8659,9 +8886,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
DescriptorArrayForEach(
list, Unsigned(Int32Constant(0)), nof_descriptors,
- [=, &var_stable](TNode<UintPtrT> descriptor_key_index) {
+ [=, &var_stable](TNode<IntPtrT> descriptor_key_index) {
TNode<Name> next_key =
- CAST(LoadWeakFixedArrayElement(descriptors, descriptor_key_index));
+ LoadKeyByKeyIndex(descriptors, descriptor_key_index);
TVARIABLE(Object, var_value, SmiConstant(0));
Label callback(this), next_iteration(this);
@@ -8963,7 +9190,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
BIND(&if_inobject);
{
Comment("if_inobject");
- Node* field_offset = TimesPointerSize(field_index);
+ Node* field_offset = TimesTaggedSize(field_index);
Label if_double(this), if_tagged(this);
Branch(Word32NotEqual(representation,
@@ -8989,9 +9216,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
BIND(&if_backing_store);
{
Comment("if_backing_store");
- Node* properties = LoadFastProperties(object);
+ TNode<HeapObject> properties = LoadFastProperties(object);
field_index = IntPtrSub(field_index, instance_size_in_words);
- Node* value = LoadPropertyArrayElement(properties, field_index);
+ Node* value = LoadPropertyArrayElement(CAST(properties), field_index);
Label if_double(this), if_tagged(this);
Branch(Word32NotEqual(representation,
@@ -9647,8 +9874,8 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
bool constant_index = false;
if (mode == SMI_PARAMETERS) {
element_size_shift -= kSmiShiftBits;
- Smi* smi_index;
- constant_index = ToSmiConstant(index_node, smi_index);
+ Smi smi_index;
+ constant_index = ToSmiConstant(index_node, &smi_index);
if (constant_index) index = smi_index->value();
index_node = BitcastTaggedToWord(index_node);
} else {
@@ -9682,32 +9909,42 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVector(
SloppyTNode<JSFunction> closure, Label* if_undefined) {
- TNode<FeedbackCell> feedback_cell =
- CAST(LoadObjectField(closure, JSFunction::kFeedbackCellOffset));
- TNode<Object> maybe_vector =
- LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
+ TNode<Object> maybe_vector = LoadFeedbackVectorUnchecked(closure);
if (if_undefined) {
GotoIf(IsUndefined(maybe_vector), if_undefined);
}
return CAST(maybe_vector);
}
+TNode<Object> CodeStubAssembler::LoadFeedbackVectorUnchecked(
+ SloppyTNode<JSFunction> closure) {
+ TNode<FeedbackCell> feedback_cell =
+ CAST(LoadObjectField(closure, JSFunction::kFeedbackCellOffset));
+ TNode<Object> maybe_vector =
+ LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
+ return maybe_vector;
+}
+
TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
TNode<JSFunction> function =
CAST(LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset));
return LoadFeedbackVector(function);
}
-void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
+void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
Node* slot_id) {
+ Label end(this);
+ // If feedback_vector is not valid, then nothing to do.
+ GotoIf(IsUndefined(maybe_vector), &end);
+
// This method is used for binary op and compare feedback. These
// vector nodes are initialized with a smi 0, so we can simply OR
// our new feedback in place.
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_vector);
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
TNode<Smi> previous_feedback = CAST(feedback_element);
TNode<Smi> combined_feedback = SmiOr(previous_feedback, CAST(feedback));
- Label end(this);
GotoIf(SmiEqual(previous_feedback, combined_feedback), &end);
{
@@ -9720,6 +9957,57 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
BIND(&end);
}
+Node* CodeStubAssembler::GetLanguageMode(
+ TNode<SharedFunctionInfo> shared_function_info, Node* context) {
+ VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
+ SmiConstant(LanguageMode::kStrict));
+ Label language_mode_determined(this), language_mode_sloppy(this);
+
+ // Get the language mode from SFI
+ TNode<Uint32T> closure_is_strict =
+ DecodeWord32<SharedFunctionInfo::IsStrictBit>(LoadObjectField(
+ shared_function_info, SharedFunctionInfo::kFlagsOffset,
+ MachineType::Uint32()));
+ // It is already strict, we need not check context's language mode.
+ GotoIf(closure_is_strict, &language_mode_determined);
+
+ // SFI::LanguageMode is sloppy, check if context has a stricter mode.
+ TNode<ScopeInfo> scope_info =
+ CAST(LoadObjectField(context, Context::kScopeInfoOffset));
+ // If no flags field assume sloppy
+ GotoIf(SmiLessThanOrEqual(LoadFixedArrayBaseLength(scope_info),
+ SmiConstant(ScopeInfo::Fields::kFlags)),
+ &language_mode_sloppy);
+ TNode<Smi> flags = CAST(LoadFixedArrayElement(
+ scope_info, SmiConstant(ScopeInfo::Fields::kFlags)));
+ TNode<Uint32T> context_is_strict =
+ DecodeWord32<ScopeInfo::LanguageModeField>(SmiToInt32(flags));
+ GotoIf(context_is_strict, &language_mode_determined);
+ Goto(&language_mode_sloppy);
+
+ // Both Context::ScopeInfo::LanguageMode and SFI::LanguageMode are sloppy.
+ BIND(&language_mode_sloppy);
+ var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
+ Goto(&language_mode_determined);
+
+ BIND(&language_mode_determined);
+ return var_language_mode.value();
+}
+
+Node* CodeStubAssembler::GetLanguageMode(TNode<JSFunction> closure,
+ Node* context) {
+ TNode<SharedFunctionInfo> sfi =
+ CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
+ return GetLanguageMode(sfi, context);
+}
+
+Node* CodeStubAssembler::GetLanguageMode(TNode<FeedbackVector> vector,
+ Node* context) {
+ TNode<SharedFunctionInfo> sfi =
+ CAST(LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset));
+ return GetLanguageMode(sfi, context);
+}
+
void CodeStubAssembler::ReportFeedbackUpdate(
SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id,
const char* reason) {
@@ -9858,17 +10146,12 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
{
TNode<IntPtrT> mapped_index_intptr = SmiUntag(CAST(mapped_index));
TNode<Context> the_context = CAST(LoadFixedArrayElement(elements, 0));
- // Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
- // methods for accessing Context.
- STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
- DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
- FixedArray::OffsetOfElementAt(0));
if (is_load) {
- Node* result = LoadFixedArrayElement(the_context, mapped_index_intptr);
+ Node* result = LoadContextElement(the_context, mapped_index_intptr);
CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
} else {
- StoreFixedArrayElement(the_context, mapped_index_intptr, value);
+ StoreContextElement(the_context, mapped_index_intptr, value);
}
Goto(&end);
}
@@ -9908,7 +10191,7 @@ TNode<Context> CodeStubAssembler::LoadScriptContext(
TNode<Context> script_context = CAST(LoadFixedArrayElement(
script_context_table, context_index,
- ScriptContextTable::kFirstContextSlotIndex * kPointerSize));
+ ScriptContextTable::kFirstContextSlotIndex * kTaggedSize));
return script_context;
}
@@ -10083,13 +10366,13 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
void CodeStubAssembler::EmitBigTypedArrayElementStore(
TNode<JSTypedArray> object, TNode<FixedTypedArrayBase> elements,
TNode<IntPtrT> intptr_key, TNode<Object> value, TNode<Context> context,
- Label* opt_if_neutered) {
+ Label* opt_if_detached) {
TNode<BigInt> bigint_value = ToBigInt(context, value);
- if (opt_if_neutered != nullptr) {
- // Check if buffer has been neutered. Must happen after {ToBigInt}!
+ if (opt_if_detached != nullptr) {
+ // Check if buffer has been detached. Must happen after {ToBigInt}!
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), opt_if_neutered);
+ GotoIf(IsDetachedBuffer(buffer), opt_if_detached);
}
TNode<RawPtrT> backing_store = LoadFixedTypedArrayBackingStore(elements);
@@ -10104,19 +10387,19 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
Label done(this);
*var_low = Unsigned(IntPtrConstant(0));
*var_high = Unsigned(IntPtrConstant(0));
- TNode<WordT> bitfield = LoadBigIntBitfield(bigint);
- TNode<UintPtrT> length = DecodeWord<BigIntBase::LengthBits>(bitfield);
- TNode<UintPtrT> sign = DecodeWord<BigIntBase::SignBits>(bitfield);
- GotoIf(WordEqual(length, IntPtrConstant(0)), &done);
+ TNode<Word32T> bitfield = LoadBigIntBitfield(bigint);
+ TNode<Uint32T> length = DecodeWord32<BigIntBase::LengthBits>(bitfield);
+ TNode<Uint32T> sign = DecodeWord32<BigIntBase::SignBits>(bitfield);
+ GotoIf(Word32Equal(length, Int32Constant(0)), &done);
*var_low = LoadBigIntDigit(bigint, 0);
if (!Is64()) {
Label load_done(this);
- GotoIf(WordEqual(length, IntPtrConstant(1)), &load_done);
+ GotoIf(Word32Equal(length, Int32Constant(1)), &load_done);
*var_high = LoadBigIntDigit(bigint, 1);
Goto(&load_done);
BIND(&load_done);
}
- GotoIf(WordEqual(sign, IntPtrConstant(0)), &done);
+ GotoIf(Word32Equal(sign, Int32Constant(0)), &done);
// Negative value. Simulate two's complement.
if (!Is64()) {
*var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high->value()));
@@ -10150,7 +10433,7 @@ void CodeStubAssembler::EmitBigTypedArrayElementStore(
if (!Is64()) {
StoreNoWriteBarrier(rep, backing_store, offset, var_high.value());
StoreNoWriteBarrier(rep, backing_store,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
var_low.value());
} else {
StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
@@ -10159,14 +10442,13 @@ void CodeStubAssembler::EmitBigTypedArrayElementStore(
StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
if (!Is64()) {
StoreNoWriteBarrier(rep, backing_store,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
var_high.value());
}
#endif
}
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
- bool is_jsarray,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
Label* bailout, Node* context) {
@@ -10196,7 +10478,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// the buffer is not alive or move the elements.
// TODO(ishell): introduce DisallowHeapAllocationCode scope here.
- // Check if buffer has been neutered.
+ // Check if buffer has been detached.
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), bailout);
@@ -10208,9 +10490,14 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// Skip the store if we write beyond the length or
// to a property with a negative integer index.
GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
- } else {
- DCHECK_EQ(STANDARD_STORE, store_mode);
+ } else if (store_mode == STANDARD_STORE) {
GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
+ } else {
+ // This case is produced due to the dispatched call in
+ // ElementsTransitionAndStore and StoreFastElement.
+ // TODO(jgruber): Avoid generating unsupported combinations to save code
+ // size.
+ DebugBreak();
}
if (elements_kind == BIGINT64_ELEMENTS ||
@@ -10233,11 +10520,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
BIND(&done);
return;
}
- DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
- IsDoubleElementsKind(elements_kind));
+ DCHECK(IsFastElementsKind(elements_kind));
- Node* length = is_jsarray ? LoadJSArrayLength(object)
- : LoadFixedArrayBaseLength(elements);
+ Node* length =
+ SelectImpl(IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
+ [=]() { return LoadFixedArrayBaseLength(elements); },
+ MachineRepresentation::kTagged);
length = TaggedToParameter(length, parameter_mode);
// In case value is stored into a fast smi array, assure that the value is
@@ -10250,9 +10538,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
}
if (IsGrowStoreMode(store_mode)) {
- elements = CheckForCapacityGrow(object, elements, elements_kind, store_mode,
- length, intptr_key, parameter_mode,
- is_jsarray, bailout);
+ elements = CheckForCapacityGrow(object, elements, elements_kind, length,
+ intptr_key, parameter_mode, bailout);
} else {
GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
@@ -10270,10 +10557,10 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
StoreElement(elements, elements_kind, intptr_key, value, parameter_mode);
}
-Node* CodeStubAssembler::CheckForCapacityGrow(
- Node* object, Node* elements, ElementsKind kind,
- KeyedAccessStoreMode store_mode, Node* length, Node* key,
- ParameterMode mode, bool is_js_array, Label* bailout) {
+Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
+ ElementsKind kind, Node* length,
+ Node* key, ParameterMode mode,
+ Label* bailout) {
DCHECK(IsFastElementsKind(kind));
VARIABLE(checked_elements, MachineRepresentation::kTagged);
Label grow_case(this), no_grow_case(this), done(this),
@@ -10318,11 +10605,11 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
}
BIND(&fits_capacity);
- if (is_js_array) {
- Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
- StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
- ParameterToTagged(new_length, mode));
- }
+ GotoIfNot(IsJSArray(object), &done);
+
+ Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
+ StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
+ ParameterToTagged(new_length, mode));
Goto(&done);
}
@@ -10361,7 +10648,6 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
ElementsKind from_kind,
ElementsKind to_kind,
- bool is_jsarray,
Label* bailout) {
DCHECK(!IsHoleyElementsKind(from_kind) || IsHoleyElementsKind(to_kind));
if (AllocationSite::ShouldTrack(from_kind, to_kind)) {
@@ -10378,8 +10664,14 @@ void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
// TODO(ishell): Use OptimalParameterMode().
ParameterMode mode = INTPTR_PARAMETERS;
Node* elements_length = SmiUntag(LoadFixedArrayBaseLength(elements));
- Node* array_length =
- is_jsarray ? SmiUntag(LoadFastJSArrayLength(object)) : elements_length;
+ Node* array_length = SelectImpl(
+ IsJSArray(object),
+ [=]() {
+ CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(object)));
+ return SmiUntag(LoadFastJSArrayLength(object));
+ },
+ [=]() { return elements_length; },
+ MachineType::PointerRepresentation());
CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0)));
@@ -10402,7 +10694,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
ExternalReference::new_space_allocation_top_address(isolate()));
const int kMementoMapOffset = JSArray::kSize;
const int kMementoLastWordOffset =
- kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
+ kMementoMapOffset + AllocationMemento::kSize - kTaggedSize;
// Bail out if the object is not in new space.
TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
@@ -10638,7 +10930,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
FixedArray::kHeaderSize - kHeapObjectTag);
if (direction == ForEachDirection::kReverse) std::swap(start, limit);
- int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
+ int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop(
vars, start, limit,
[fixed_array, &body](Node* offset) { body(fixed_array, offset); },
@@ -10662,13 +10954,14 @@ void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
Node* root_value = LoadRoot(root_index);
- BuildFastLoop(end_offset, start_offset,
- [this, object, root_value](Node* current) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, object,
- current, root_value);
- },
- -kPointerSize, INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPre);
+ BuildFastLoop(
+ end_offset, start_offset,
+ [this, object, root_value](Node* current) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
+ root_value);
+ },
+ -kTaggedSize, INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumberRelationalComparison(
@@ -11250,15 +11543,16 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
if (var_type_feedback != nullptr) {
Node* instance_type = LoadMapInstanceType(value_map);
- Label if_string(this), if_receiver(this), if_symbol(this), if_bigint(this),
- if_other(this, Label::kDeferred);
+ Label if_string(this), if_receiver(this), if_oddball(this), if_symbol(this),
+ if_bigint(this);
GotoIf(IsStringInstanceType(instance_type), &if_string);
GotoIf(IsJSReceiverInstanceType(instance_type), &if_receiver);
- GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
- Branch(IsSymbolInstanceType(instance_type), &if_symbol, &if_other);
+ GotoIf(IsOddballInstanceType(instance_type), &if_oddball);
+ Branch(IsBigIntInstanceType(instance_type), &if_bigint, &if_symbol);
BIND(&if_string);
{
+ CSA_ASSERT(this, IsString(value));
CombineFeedback(var_type_feedback,
CollectFeedbackForString(instance_type));
Goto(if_equal);
@@ -11266,26 +11560,44 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
BIND(&if_symbol);
{
+ CSA_ASSERT(this, IsSymbol(value));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
Goto(if_equal);
}
BIND(&if_receiver);
{
+ CSA_ASSERT(this, IsJSReceiver(value));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(if_equal);
}
BIND(&if_bigint);
{
+ CSA_ASSERT(this, IsBigInt(value));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
Goto(if_equal);
}
- BIND(&if_other);
+ BIND(&if_oddball);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
- Goto(if_equal);
+ CSA_ASSERT(this, IsOddball(value));
+ Label if_boolean(this), if_not_boolean(this);
+ Branch(IsBooleanMap(value_map), &if_boolean, &if_not_boolean);
+
+ BIND(&if_boolean);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(if_equal);
+ }
+
+ BIND(&if_not_boolean);
+ {
+ CSA_ASSERT(this, IsNullOrUndefined(value));
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ Goto(if_equal);
+ }
}
} else {
Goto(if_equal);
@@ -11425,13 +11737,12 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
Node* left_type = LoadMapInstanceType(left_map);
Node* right_type = LoadMapInstanceType(right_map);
- GotoIf(Int32LessThan(left_type, Int32Constant(FIRST_NONSTRING_TYPE)),
- &if_left_string);
- GotoIf(InstanceTypeEqual(left_type, SYMBOL_TYPE), &if_left_symbol);
- GotoIf(InstanceTypeEqual(left_type, HEAP_NUMBER_TYPE), &if_left_number);
- GotoIf(InstanceTypeEqual(left_type, ODDBALL_TYPE), &if_left_oddball);
- GotoIf(InstanceTypeEqual(left_type, BIGINT_TYPE), &if_left_bigint);
- Goto(&if_left_receiver);
+ GotoIf(IsStringInstanceType(left_type), &if_left_string);
+ GotoIf(IsSymbolInstanceType(left_type), &if_left_symbol);
+ GotoIf(IsHeapNumberInstanceType(left_type), &if_left_number);
+ GotoIf(IsOddballInstanceType(left_type), &if_left_oddball);
+ Branch(IsBigIntInstanceType(left_type), &if_left_bigint,
+ &if_left_receiver);
BIND(&if_left_string);
{
@@ -11528,20 +11839,53 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_left_oddball);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
+ Label if_left_boolean(this), if_left_not_boolean(this);
+ Branch(IsBooleanMap(left_map), &if_left_boolean, &if_left_not_boolean);
+
+ BIND(&if_left_not_boolean);
+ {
+ // {left} is either Null or Undefined. Check if {right} is
+ // undetectable (which includes Null and Undefined).
+ Label if_right_undetectable(this), if_right_not_undetectable(this);
+ Branch(IsUndetectableMap(right_map), &if_right_undetectable,
+ &if_right_not_undetectable);
- Label if_left_boolean(this);
- GotoIf(IsBooleanMap(left_map), &if_left_boolean);
- // {left} is either Null or Undefined. Check if {right} is
- // undetectable (which includes Null and Undefined).
- Branch(IsUndetectableMap(right_map), &if_equal, &if_notequal);
+ BIND(&if_right_undetectable);
+ {
+ if (var_type_feedback != nullptr) {
+ // If {right} is undetectable, it must be either also
+ // Null or Undefined, or a Receiver (aka document.all).
+ var_type_feedback->Bind(SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ }
+ Goto(&if_equal);
+ }
+
+ BIND(&if_right_not_undetectable);
+ {
+ if (var_type_feedback != nullptr) {
+ // Track whether {right} is Null, Undefined or Receiver.
+ var_type_feedback->Bind(SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
+ GotoIfNot(IsBooleanMap(right_map), &if_notequal);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+ Goto(&if_notequal);
+ }
+ }
BIND(&if_left_boolean);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// If {right} is a Boolean too, it must be a different Boolean.
GotoIf(WordEqual(right_map, left_map), &if_notequal);
+
// Otherwise, convert {left} to number and try again.
var_left.Bind(LoadObjectField(left, Oddball::kToNumberOffset));
Goto(&loop);
@@ -11585,29 +11929,50 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&if_left_receiver);
{
CSA_ASSERT(this, IsJSReceiverInstanceType(left_type));
- Label if_right_not_receiver(this);
- GotoIfNot(IsJSReceiverInstanceType(right_type), &if_right_not_receiver);
+ Label if_right_receiver(this), if_right_not_receiver(this);
+ Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
+ &if_right_not_receiver);
- // {left} and {right} are different JSReceiver references.
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
- Goto(&if_notequal);
+ BIND(&if_right_receiver);
+ {
+ // {left} and {right} are different JSReceiver references.
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiver);
+ Goto(&if_notequal);
+ }
BIND(&if_right_not_receiver);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ // Check if {right} is undetectable, which means it must be Null
+ // or Undefined, since we already ruled out Receiver for {right}.
+ Label if_right_undetectable(this),
+ if_right_not_undetectable(this, Label::kDeferred);
+ Branch(IsUndetectableMap(right_map), &if_right_undetectable,
+ &if_right_not_undetectable);
+
+ BIND(&if_right_undetectable);
+ {
+ // When we get here, {right} must be either Null or Undefined.
+ CSA_ASSERT(this, IsNullOrUndefined(right));
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ }
+ Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal);
}
- Label if_right_null_or_undefined(this);
- GotoIf(IsUndetectableMap(right_map), &if_right_null_or_undefined);
-
- // {right} is a Primitive; convert {left} to Primitive too.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_left.Bind(CallStub(callable, context, left));
- Goto(&loop);
- BIND(&if_right_null_or_undefined);
- Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal);
+ BIND(&if_right_not_undetectable);
+ {
+ // {right} is a Primitive, and neither Null or Undefined;
+ // convert {left} to Primitive too.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_left.Bind(CallStub(callable, context, left));
+ Goto(&loop);
+ }
}
}
}
@@ -11872,20 +12237,41 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotbigint);
if (var_type_feedback != nullptr) {
// Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ Node* rhs_map = LoadMap(rhs);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
- Label if_lhsissymbol(this), if_lhsisreceiver(this);
+ Label if_lhsissymbol(this), if_lhsisreceiver(this),
+ if_lhsisoddball(this);
GotoIf(IsJSReceiverInstanceType(lhs_instance_type),
&if_lhsisreceiver);
+ GotoIf(IsBooleanMap(lhs_map), &if_notequal);
+ GotoIf(IsOddballInstanceType(lhs_instance_type), &if_lhsisoddball);
Branch(IsSymbolInstanceType(lhs_instance_type), &if_lhsissymbol,
&if_notequal);
BIND(&if_lhsisreceiver);
{
- GotoIfNot(IsJSReceiverInstanceType(rhs_instance_type),
- &if_notequal);
+ GotoIf(IsBooleanMap(rhs_map), &if_notequal);
var_type_feedback->Bind(
SmiConstant(CompareOperationFeedback::kReceiver));
+ GotoIf(IsJSReceiverInstanceType(rhs_instance_type), &if_notequal);
+ var_type_feedback->Bind(SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ GotoIf(IsOddballInstanceType(rhs_instance_type), &if_notequal);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_lhsisoddball);
+ {
+ STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ GotoIf(IsBooleanMap(rhs_map), &if_notequal);
+ GotoIf(
+ Int32LessThan(rhs_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &if_notequal);
+ var_type_feedback->Bind(SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined));
Goto(&if_notequal);
}
@@ -12269,11 +12655,11 @@ TNode<Object> CodeStubAssembler::GetSuperConstructor(
return result.value();
}
-TNode<Object> CodeStubAssembler::SpeciesConstructor(
+TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<Object> default_constructor) {
+ SloppyTNode<JSReceiver> default_constructor) {
Isolate* isolate = this->isolate();
- TVARIABLE(Object, var_result, default_constructor);
+ TVARIABLE(JSReceiver, var_result, default_constructor);
// 2. Let C be ? Get(O, "constructor").
TNode<Object> constructor =
@@ -12298,7 +12684,7 @@ TNode<Object> CodeStubAssembler::SpeciesConstructor(
Label throw_error(this);
GotoIf(TaggedIsSmi(species), &throw_error);
GotoIfNot(IsConstructorMap(LoadMap(CAST(species))), &throw_error);
- var_result = species;
+ var_result = CAST(species);
Goto(&out);
// 8. Throw a TypeError exception.
@@ -12334,15 +12720,6 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
GotoIfNot(WordEqual(inst_of_handler, function_has_instance),
&if_otherhandler);
{
- // TODO(6786): A direct call to a TFJ builtin breaks the lazy
- // deserialization mechanism in two ways: first, we always pass in a
- // callable containing the DeserializeLazy code object (assuming that
- // FunctionPrototypeHasInstance is lazy). Second, a direct call (without
- // going through CodeFactory::Call) to DeserializeLazy will not initialize
- // new_target properly. For now we can avoid this by marking
- // FunctionPrototypeHasInstance as eager, but this should be fixed at some
- // point.
- //
// Call to Function.prototype[@@hasInstance] directly.
Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance),
CallTrampolineDescriptor{});
@@ -12622,7 +12999,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreFixedArrayElement(elements, 1, value);
Node* array_map = LoadContextElement(
native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
- Node* array = InnerAllocate(elements, elements_size);
+ TNode<HeapObject> array = InnerAllocate(elements, elements_size);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -12630,7 +13007,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Node* iterator_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- Node* result = InnerAllocate(array, JSArray::kSize);
+ TNode<HeapObject> result = InnerAllocate(array, JSArray::kSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -12642,28 +13019,18 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
return result;
}
-Node* CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
- TNode<Object> o,
- TNode<Number> len) {
- Node* constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
- return ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
- len);
-}
-
-Node* CodeStubAssembler::InternalArrayCreate(TNode<Context> context,
- TNode<Number> len) {
- Node* native_context = LoadNativeContext(context);
- Node* const constructor = LoadContextElement(
- native_context, Context::INTERNAL_ARRAY_FUNCTION_INDEX);
- return ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
- len);
+TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
+ TNode<Object> o,
+ TNode<Number> len) {
+ TNode<JSReceiver> constructor =
+ CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context, o));
+ return Construct(context, constructor, len);
}
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer));
- return IsSetWord32<JSArrayBuffer::WasNeuteredBit>(buffer_bit_field);
+ return IsSetWord32<JSArrayBuffer::WasDetachedBit>(buffer_bit_field);
}
void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
@@ -12728,7 +13095,8 @@ CodeStubArguments::CodeStubArguments(
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
Node* offset = assembler_->ElementOffsetFromIndex(
argc_, PACKED_ELEMENTS, param_mode,
- (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
+ (StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
+ kSystemPointerSize);
arguments_ = assembler_->UncheckedCast<RawPtr<Object>>(
assembler_->IntPtrAdd(fp_, offset));
}
@@ -12737,14 +13105,14 @@ TNode<Object> CodeStubArguments::GetReceiver() const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
return assembler_->UncheckedCast<Object>(
assembler_->Load(MachineType::AnyTagged(), arguments_,
- assembler_->IntPtrConstant(kPointerSize)));
+ assembler_->IntPtrConstant(kSystemPointerSize)));
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
- assembler_->StoreNoWriteBarrier(MachineRepresentation::kTagged, arguments_,
- assembler_->IntPtrConstant(kPointerSize),
- object);
+ assembler_->StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, arguments_,
+ assembler_->IntPtrConstant(kSystemPointerSize), object);
}
TNode<RawPtr<Object>> CodeStubArguments::AtIndexPtr(
@@ -12827,18 +13195,18 @@ void CodeStubArguments::ForEach(
}
Node* start = assembler_->IntPtrSub(
assembler_->UncheckedCast<IntPtrT>(arguments_),
- assembler_->ElementOffsetFromIndex(first, PACKED_ELEMENTS, mode));
+ assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode));
Node* end = assembler_->IntPtrSub(
assembler_->UncheckedCast<IntPtrT>(arguments_),
- assembler_->ElementOffsetFromIndex(last, PACKED_ELEMENTS, mode));
- assembler_->BuildFastLoop(vars, start, end,
- [this, &body](Node* current) {
- Node* arg = assembler_->Load(
- MachineType::AnyTagged(), current);
- body(arg);
- },
- -kPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPost);
+ assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
+ assembler_->BuildFastLoop(
+ vars, start, end,
+ [this, &body](Node* current) {
+ Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
+ body(arg);
+ },
+ -kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
+ CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(Node* value) {
@@ -12896,6 +13264,11 @@ Node* CodeStubAssembler::IsElementsKindGreaterThan(
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
}
+TNode<BoolT> CodeStubAssembler::IsElementsKindLessThanOrEqual(
+ TNode<Int32T> target_kind, ElementsKind reference_kind) {
+ return Int32LessThanOrEqual(target_kind, Int32Constant(reference_kind));
+}
+
Node* CodeStubAssembler::IsDebugActive() {
Node* is_debug_active = Load(
MachineType::Uint8(),
@@ -12934,13 +13307,25 @@ Node* CodeStubAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate() {
return Word32NotEqual(promise_hook_or_async_event_delegate, Int32Constant(0));
}
+Node* CodeStubAssembler::
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
+ Node* const promise_hook_or_debug_is_active_or_async_event_delegate = Load(
+ MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::
+ promise_hook_or_debug_is_active_or_async_event_delegate_address(
+ isolate())));
+ return Word32NotEqual(promise_hook_or_debug_is_active_or_async_event_delegate,
+ Int32Constant(0));
+}
+
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0)));
CSA_ASSERT(this,
SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count)));
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- int index_shift = kPointerSizeLog2 - kSmiShiftBits;
+ int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits;
TNode<WordT> table_index =
index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift)
: WordSar(BitcastTaggedToWord(builtin_id), -index_shift);
@@ -12976,22 +13361,22 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
- FIXED_ARRAY_TYPE,
- UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
- UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
+ ASM_WASM_DATA_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE};
Label check_is_bytecode_array(this);
Label check_is_exported_function_data(this);
- Label check_is_fixed_array(this);
- Label check_is_uncompiled_data_without_pre_parsed_scope(this);
- Label check_is_uncompiled_data_with_pre_parsed_scope(this);
+ Label check_is_asm_wasm_data(this);
+ Label check_is_uncompiled_data_without_preparse_data(this);
+ Label check_is_uncompiled_data_with_preparse_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
Label* case_labels[] = {&check_is_bytecode_array,
&check_is_exported_function_data,
- &check_is_fixed_array,
- &check_is_uncompiled_data_without_pre_parsed_scope,
- &check_is_uncompiled_data_with_pre_parsed_scope,
+ &check_is_asm_wasm_data,
+ &check_is_uncompiled_data_without_preparse_data,
+ &check_is_uncompiled_data_with_preparse_data,
&check_is_function_template_info};
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
Switch(data_type, &check_is_interpreter_data, case_values, case_labels,
@@ -12999,7 +13384,6 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBytecodeArray: Interpret bytecode
BIND(&check_is_bytecode_array);
- DCHECK(!Builtins::IsLazy(Builtins::kInterpreterEntryTrampoline));
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
Goto(&done);
@@ -13009,24 +13393,21 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
CAST(sfi_data), WasmExportedFunctionData::kWrapperCodeOffset));
Goto(&done);
- // IsFixedArray: Instantiate using AsmWasmData
- BIND(&check_is_fixed_array);
- DCHECK(!Builtins::IsLazy(Builtins::kInstantiateAsmJs));
+ // IsAsmWasmData: Instantiate using AsmWasmData
+ BIND(&check_is_asm_wasm_data);
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
Goto(&done);
- // IsUncompiledDataWithPreParsedScope | IsUncompiledDataWithoutPreParsedScope:
+ // IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
- BIND(&check_is_uncompiled_data_with_pre_parsed_scope);
- Goto(&check_is_uncompiled_data_without_pre_parsed_scope);
- BIND(&check_is_uncompiled_data_without_pre_parsed_scope);
- DCHECK(!Builtins::IsLazy(Builtins::kCompileLazy));
+ BIND(&check_is_uncompiled_data_with_preparse_data);
+ Goto(&check_is_uncompiled_data_without_preparse_data);
+ BIND(&check_is_uncompiled_data_without_preparse_data);
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
Goto(if_compile_lazy ? if_compile_lazy : &done);
// IsFunctionTemplateInfo: API call
BIND(&check_is_function_template_info);
- DCHECK(!Builtins::IsLazy(Builtins::kHandleApiCall));
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall));
Goto(&done);
@@ -13056,7 +13437,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
Node* const fun = Allocate(JSFunction::kSizeWithoutPrototype);
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreMapNoWriteBarrier(fun, map);
StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -13102,7 +13483,8 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
// The following relies on the elements only aliasing with JSProxy::target,
// which is a Javascript value and hence cannot be confused with an elements
// backing store.
- STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
+ STATIC_ASSERT(static_cast<int>(JSObject::kElementsOffset) ==
+ static_cast<int>(JSProxy::kTargetOffset));
Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
GotoIf(IsEmptyFixedArray(object_elements), &if_no_elements);
GotoIf(IsEmptySlowElementDictionary(object_elements), &if_no_elements);
@@ -13230,5 +13612,147 @@ void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
native_context);
}
+TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
+ TNode<Number> length) {
+ TVARIABLE(JSArray, array);
+ Label allocate_js_array(this);
+
+ Label done(this), next(this), runtime(this, Label::kDeferred);
+ TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
+ SmiConstant(0), ok, not_ok);
+ });
+ // This check also transitively covers the case where length is too big
+ // to be representable by a SMI and so is not usable with
+ // AllocateJSArray.
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
+ limit, &runtime, &next);
+
+ BIND(&runtime);
+ {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> array_function =
+ CAST(LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
+ array = CAST(CallRuntime(Runtime::kNewArray, context, array_function,
+ length, array_function, UndefinedConstant()));
+ Goto(&done);
+ }
+
+ BIND(&next);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ // TODO(delphick): Consider using
+ // AllocateUninitializedJSArrayWithElements to avoid initializing an
+ // array and then writing over it.
+ array =
+ AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, SmiConstant(0),
+ nullptr, ParameterMode::SMI_PARAMETERS);
+ Goto(&done);
+
+ BIND(&done);
+ return array.value();
+}
+
+void CodeStubAssembler::SetPropertyLength(TNode<Context> context,
+ TNode<Object> array,
+ TNode<Number> length) {
+ Label fast(this), runtime(this), done(this);
+ // There's no need to set the length, if
+ // 1) the array is a fast JS array and
+ // 2) the new length is equal to the old length.
+ // as the set is not observable. Otherwise fall back to the run-time.
+
+ // 1) Check that the array has fast elements.
+ // TODO(delphick): Consider changing this since it does an an unnecessary
+ // check for SMIs.
+ // TODO(delphick): Also we could hoist this to after the array construction
+ // and copy the args into array in the same way as the Array constructor.
+ BranchIfFastJSArray(array, context, &fast, &runtime);
+
+ BIND(&fast);
+ {
+ TNode<JSArray> fast_array = CAST(array);
+
+ TNode<Smi> length_smi = CAST(length);
+ TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+
+ // 2) If the created array's length matches the required length, then
+ // there's nothing else to do. Otherwise use the runtime to set the
+ // property as that will insert holes into excess elements or shrink
+ // the backing store as appropriate.
+ Branch(SmiNotEqual(length_smi, old_length), &runtime, &done);
+ }
+
+ BIND(&runtime);
+ {
+ SetPropertyStrict(context, array, CodeStubAssembler::LengthStringConstant(),
+ length);
+ Goto(&done);
+ }
+
+ BIND(&done);
+}
+
+void CodeStubAssembler::GotoIfInitialPrototypePropertyModified(
+ TNode<Map> object_map, TNode<Map> initial_prototype_map, int descriptor,
+ RootIndex field_name_root_index, Label* if_modified) {
+ DescriptorIndexAndName index_name{descriptor, field_name_root_index};
+ GotoIfInitialPrototypePropertiesModified(
+ object_map, initial_prototype_map,
+ Vector<DescriptorIndexAndName>(&index_name, 1), if_modified);
+}
+
+void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
+ TNode<Map> object_map, TNode<Map> initial_prototype_map,
+ Vector<DescriptorIndexAndName> properties, Label* if_modified) {
+ TNode<Map> prototype_map = LoadMap(LoadMapPrototype(object_map));
+ GotoIfNot(WordEqual(prototype_map, initial_prototype_map), if_modified);
+
+ if (FLAG_track_constant_fields) {
+ // With constant field tracking, we need to make sure that important
+ // properties in the prototype has not been tampered with. We do this by
+ // checking that their slots in the prototype's descriptor array are still
+ // marked as const.
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(prototype_map);
+
+ TNode<Uint32T> combined_details;
+ for (int i = 0; i < properties.length(); i++) {
+ // Assert the descriptor index is in-bounds.
+ int descriptor = properties[i].descriptor_index;
+ CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
+ LoadNumberOfDescriptors(descriptors)));
+ // Assert that the name is correct. This essentially checks that
+ // the descriptor index corresponds to the insertion order in
+ // the bootstrapper.
+ CSA_ASSERT(this,
+ WordEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
+ LoadRoot(properties[i].name_root_index)));
+
+ TNode<Uint32T> details =
+ DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
+ if (i == 0) {
+ combined_details = details;
+ } else {
+ combined_details = Unsigned(Word32And(combined_details, details));
+ }
+ }
+
+ TNode<Uint32T> constness =
+ DecodeWord32<PropertyDetails::ConstnessField>(combined_details);
+
+ GotoIfNot(
+ Word32Equal(constness,
+ Int32Constant(static_cast<int>(PropertyConstness::kConst))),
+ if_modified);
+ }
+}
+
} // namespace internal
+// TODO(petermarshall): Remove. This is a workaround for crbug.com/v8/8719
+namespace {} // namespace
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 2be168af85..4dfd176eaa 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -7,14 +7,21 @@
#include <functional>
+#include "src/bailout-reason.h"
#include "src/base/macros.h"
#include "src/compiler/code-assembler.h"
+#include "src/frames.h"
#include "src/globals.h"
+#include "src/message-template.h"
#include "src/objects.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
#include "src/roots.h"
+#include "torque-generated/builtins-base-from-dsl-gen.h"
+
namespace v8 {
namespace internal {
@@ -28,81 +35,71 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
V(PromiseSpeciesProtector, promise_species_protector, \
PromiseSpeciesProtector) \
V(TypedArraySpeciesProtector, typed_array_species_protector, \
- TypedArraySpeciesProtector)
-
-#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
- V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
- AllocationSiteWithoutWeakNextMap) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(CodeMap, code_map, CodeMap) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(iterator_symbol, iterator_symbol, IteratorSymbol) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(MetaMap, meta_map, MetaMap) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
- V(NanValue, nan_value, Nan) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(NullValue, null_value, Null) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(Tuple3Map, tuple3_map, Tuple3Map) \
- V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
- ArrayBoilerplateDescriptionMap) \
- V(UncompiledDataWithoutPreParsedScopeMap, \
- uncompiled_data_without_pre_parsed_scope_map, \
- UncompiledDataWithoutPreParsedScopeMap) \
- V(UncompiledDataWithPreParsedScopeMap, \
- uncompiled_data_with_pre_parsed_scope_map, \
- UncompiledDataWithPreParsedScopeMap) \
- V(UndefinedValue, undefined_value, Undefined) \
+ TypedArraySpeciesProtector) \
+ V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector)
+
+#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
+ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
+ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \
+ V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \
+ AllocationSiteWithoutWeakNextMap) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(CodeMap, code_map, CodeMap) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(FalseValue, false_value, False) \
+ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(HeapNumberMap, heap_number_map, HeapNumberMap) \
+ V(iterator_symbol, iterator_symbol, IteratorSymbol) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(MetaMap, meta_map, MetaMap) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(NanValue, nan_value, Nan) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(NoFeedbackCellMap, no_feedback_cell_map, NoFeedbackCellMap) \
+ V(NullValue, null_value, Null) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
+ V(SymbolMap, symbol_map, SymbolMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
+ V(TrueValue, true_value, True) \
+ V(Tuple2Map, tuple2_map, Tuple2Map) \
+ V(Tuple3Map, tuple3_map, Tuple3Map) \
+ V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
+ ArrayBoilerplateDescriptionMap) \
+ V(UncompiledDataWithoutPreparseDataMap, \
+ uncompiled_data_without_preparse_data_map, \
+ UncompiledDataWithoutPreparseDataMap) \
+ V(UncompiledDataWithPreparseDataMap, uncompiled_data_with_preparse_data_map, \
+ UncompiledDataWithPreparseDataMap) \
+ V(UndefinedValue, undefined_value, Undefined) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
-// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
-// here to simplify use in other generated builtins.
-struct IteratorRecord {
- public:
- // iteratorRecord.[[Iterator]]
- compiler::TNode<JSReceiver> object;
-
- // iteratorRecord.[[NextMethod]]
- compiler::TNode<Object> next;
-};
-
#ifdef DEBUG
#define CSA_CHECK(csa, x) \
(csa)->Check( \
@@ -202,30 +199,14 @@ struct IteratorRecord {
#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
#endif
-class int31_t {
- public:
- int31_t() : value_(0) {}
- int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- }
- int31_t& operator=(int value) {
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- value_ = value;
- return *this;
- }
- int32_t value() const { return value_; }
- operator int32_t() const { return value_; }
-
- private:
- int32_t value_;
-};
-
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
// without modifying files in the compiler directory (and requiring a review
// from a compiler directory OWNER).
-class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
+class V8_EXPORT_PRIVATE CodeStubAssembler
+ : public compiler::CodeAssembler,
+ public BaseBuiltinsFromDSLAssembler {
public:
using Node = compiler::Node;
template <class T>
@@ -236,7 +217,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <typename T>
using LazyNode = std::function<TNode<T>()>;
- CodeStubAssembler(compiler::CodeAssemblerState* state);
+ explicit CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
kNone = 0,
@@ -294,11 +275,53 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return value;
}
+ bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) {
+ if (mode == ParameterMode::SMI_PARAMETERS) {
+ Smi constant;
+ if (ToSmiConstant(node, &constant)) {
+ *out = static_cast<intptr_t>(constant->value());
+ return true;
+ }
+ } else {
+ DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS);
+ intptr_t constant;
+ if (ToIntPtrConstant(node, constant)) {
+ *out = constant;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+#if defined(V8_HOST_ARCH_32_BIT)
+ TNode<Smi> BIntToSmi(TNode<BInt> source) { return source; }
+ TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) {
+ return SmiToIntPtr(source);
+ }
+ TNode<BInt> SmiToBInt(TNode<Smi> source) { return source; }
+ TNode<BInt> IntPtrToBInt(TNode<IntPtrT> source) {
+ return SmiFromIntPtr(source);
+ }
+#elif defined(V8_HOST_ARCH_64_BIT)
+ TNode<Smi> BIntToSmi(TNode<BInt> source) { return SmiFromIntPtr(source); }
+ TNode<IntPtrT> BIntToIntPtr(TNode<BInt> source) { return source; }
+ TNode<BInt> SmiToBInt(TNode<Smi> source) { return SmiToIntPtr(source); }
+ TNode<BInt> IntPtrToBInt(TNode<IntPtrT> source) { return source; }
+#else
+#error Unknown architecture.
+#endif
+
TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsNotSmi(value), fail);
return UncheckedCast<Smi>(value);
}
+ TNode<Smi> TaggedToPositiveSmi(TNode<Object> value, Label* fail) {
+ GotoIfNot(TaggedIsPositiveSmi(value), fail);
+ return UncheckedCast<Smi>(value);
+ }
+
TNode<Number> TaggedToNumber(TNode<Object> value, Label* fail) {
GotoIfNot(IsNumber(value), fail);
return UncheckedCast<Number>(value);
@@ -335,82 +358,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return CAST(heap_object);
}
- TNode<HeapNumber> UnsafeCastNumberToHeapNumber(TNode<Number> p_n) {
- return CAST(p_n);
- }
-
- TNode<FixedArrayBase> UnsafeCastObjectToFixedArrayBase(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<FixedArray> UnsafeCastObjectToFixedArray(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<Context> UnsafeCastObjectToContext(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<FixedDoubleArray> UnsafeCastObjectToFixedDoubleArray(
- TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<HeapNumber> UnsafeCastObjectToHeapNumber(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<HeapObject> UnsafeCastObjectToCallable(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<Smi> UnsafeCastObjectToSmi(TNode<Object> p_o) { return CAST(p_o); }
-
- TNode<Number> UnsafeCastObjectToNumber(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<HeapObject> UnsafeCastObjectToHeapObject(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<JSArray> UnsafeCastObjectToJSArray(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<FixedTypedArrayBase> UnsafeCastObjectToFixedTypedArrayBase(
- TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<Object> UnsafeCastObjectToCompareBuiltinFn(TNode<Object> p_o) {
- return p_o;
- }
-
- TNode<Object> UnsafeCastObjectToLoadFn(TNode<Object> p_o) { return p_o; }
- TNode<Object> UnsafeCastObjectToStoreFn(TNode<Object> p_o) { return p_o; }
- TNode<Object> UnsafeCastObjectToCanUseSameAccessorFn(TNode<Object> p_o) {
- return p_o;
- }
-
- TNode<NumberDictionary> UnsafeCastObjectToNumberDictionary(
- TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<JSReceiver> UnsafeCastObjectToJSReceiver(TNode<Object> p_o) {
- return CAST(p_o);
- }
-
- TNode<JSObject> UnsafeCastObjectToJSObject(TNode<Object> p_o) {
- return CAST(p_o);
+ TNode<String> HeapObjectToString(TNode<HeapObject> heap_object, Label* fail) {
+ GotoIfNot(IsString(heap_object), fail);
+ return CAST(heap_object);
}
- TNode<Map> UnsafeCastObjectToMap(TNode<Object> p_o) { return CAST(p_o); }
-
- TNode<JSArgumentsObjectWithLength> RawCastObjectToJSArgumentsObjectWithLength(
- TNode<Object> p_o) {
- return TNode<JSArgumentsObjectWithLength>::UncheckedCast(p_o);
+ TNode<JSReceiver> HeapObjectToConstructor(TNode<HeapObject> heap_object,
+ Label* fail) {
+ GotoIfNot(IsConstructor(heap_object), fail);
+ return CAST(heap_object);
}
Node* MatchesParameterMode(Node* value, ParameterMode mode);
@@ -438,18 +394,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SmiAboveOrEqual)
#undef PARAMETER_BINOP
+ uintptr_t ConstexprUintPtrShl(uintptr_t a, int32_t b) { return a << b; }
+ uintptr_t ConstexprUintPtrShr(uintptr_t a, int32_t b) { return a >> b; }
+
TNode<Object> NoContextConstant();
-#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_reference<decltype( \
- *std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
+#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
+ compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_reference<decltype( \
- *std::declval<Heap>().rootAccessorName())>::type> \
+ compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ std::declval<Heap>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
@@ -461,9 +420,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
#undef HEAP_CONSTANT_TEST
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- TNode<Smi> LanguageModeConstant(LanguageMode mode) {
- return SmiConstant(static_cast<int>(mode));
- }
bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
@@ -485,9 +441,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
// Select the minimum of the two provided Number values.
- TNode<Object> NumberMax(SloppyTNode<Object> left, SloppyTNode<Object> right);
+ TNode<Number> NumberMax(SloppyTNode<Number> left, SloppyTNode<Number> right);
// Select the minimum of the two provided Number values.
- TNode<Object> NumberMin(SloppyTNode<Object> left, SloppyTNode<Object> right);
+ TNode<Number> NumberMin(SloppyTNode<Number> left, SloppyTNode<Number> right);
// After converting an index to an integer, calculate a relative index: if
// index < 0, max(length + index, 0); else min(index, length)
@@ -518,7 +474,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
} else { \
DCHECK(SmiValuesAre31Bits()); \
- if (kPointerSize == kInt64Size) { \
+ if (kSystemPointerSize == kInt64Size) { \
CSA_ASSERT(this, IsValidSmi(a)); \
CSA_ASSERT(this, IsValidSmi(b)); \
} \
@@ -534,6 +490,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
#undef SMI_ARITHMETIC_BINOP
TNode<Smi> SmiInc(TNode<Smi> value) { return SmiAdd(value, SmiConstant(1)); }
+ TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
+ Label* if_overflow);
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
@@ -547,6 +505,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
BitcastTaggedToWord(SmiConstant(-1))));
}
+ TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
+ return BitcastWordToTaggedSigned(
+ WordAnd(WordSar(BitcastTaggedToWord(a), shift),
+ BitcastTaggedToWord(SmiConstant(-1))));
+ }
+
Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiShl(CAST(a), shift);
@@ -571,7 +535,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
} else { \
DCHECK(SmiValuesAre31Bits()); \
- if (kPointerSize == kInt64Size) { \
+ if (kSystemPointerSize == kInt64Size) { \
CSA_ASSERT(this, IsValidSmi(a)); \
CSA_ASSERT(this, IsValidSmi(b)); \
} \
@@ -621,13 +585,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
// Allocate an object of the given size.
- Node* AllocateInNewSpace(Node* size, AllocationFlags flags = kNone);
- Node* AllocateInNewSpace(int size, AllocationFlags flags = kNone);
- Node* Allocate(Node* size, AllocationFlags flags = kNone);
- Node* Allocate(int size, AllocationFlags flags = kNone);
- Node* InnerAllocate(Node* previous, int offset);
- Node* InnerAllocate(Node* previous, Node* offset);
- Node* IsRegularHeapObjectSize(Node* size);
+ TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
+ AllocationFlags flags = kNone);
+ TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> Allocate(TNode<IntPtrT> size,
+ AllocationFlags flags = kNone);
+ TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
+ TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
+ TNode<IntPtrT> offset);
+
+ TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
typedef std::function<void(Label*, Label*)> BranchGenerator;
typedef std::function<Node*()> NodeGenerator;
@@ -661,6 +629,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
+ void FailAssert(
+ const char* message = nullptr, const char* file = nullptr, int line = 0,
+ Node* extra_node1 = nullptr, const char* extra_node1_name = "",
+ Node* extra_node2 = nullptr, const char* extra_node2_name = "",
+ Node* extra_node3 = nullptr, const char* extra_node3_name = "",
+ Node* extra_node4 = nullptr, const char* extra_node4_name = "",
+ Node* extra_node5 = nullptr, const char* extra_node5_name = "");
+
void FastCheck(TNode<BoolT> condition);
// The following Call wrappers call an object according to the semantics that
@@ -685,6 +661,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
callable, receiver, args...));
}
+ template <class... TArgs>
+ TNode<JSReceiver> Construct(TNode<Context> context,
+ TNode<JSReceiver> new_target, TArgs... args) {
+ return CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ new_target, implicit_cast<TNode<Object>>(args)...));
+ }
+
template <class A, class F, class G>
TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
const G& false_body) {
@@ -707,13 +690,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
int true_value, int false_value);
TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi* true_value,
- Smi* false_value);
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
+ Smi false_value);
TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
- Smi* false_value) {
+ Smi false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
}
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi* true_value,
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
int false_value) {
return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
}
@@ -732,14 +715,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
// Check that a word has a word-aligned address.
- TNode<BoolT> WordIsWordAligned(SloppyTNode<WordT> word);
+ TNode<BoolT> WordIsAligned(SloppyTNode<WordT> word, size_t alignment);
TNode<BoolT> WordIsPowerOfTwo(SloppyTNode<IntPtrT> value);
#if DEBUG
void Bind(Label* label, AssemblerDebugInfo debug_info);
-#else
- void Bind(Label* label);
#endif // DEBUG
+ void Bind(Label* label);
+
+ template <class... T>
+ void Bind(compiler::CodeAssemblerParameterizedLabel<T...>* label,
+ TNode<T>*... phis) {
+ CodeAssembler::Bind(label, phis...);
+ }
void BranchIfSmiEqual(TNode<Smi> a, TNode<Smi> b, Label* if_true,
Label* if_false) {
@@ -766,15 +754,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
- void BranchIfFastJSArray(Node* object, Node* context, Label* if_true,
- Label* if_false, bool iteration_only = false);
- void BranchIfNotFastJSArray(Node* object, Node* context, Label* if_true,
- Label* if_false) {
- BranchIfFastJSArray(object, context, if_false, if_true);
- }
- void BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true,
- Label* if_false);
-
// Branches to {if_true} when --force-slow-path flag has been passed.
// It's used for testing to ensure that slow path implementation behave
// equivalent to corresponding fast paths (where applicable).
@@ -785,23 +764,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
- // Load value from current frame by given offset in bytes.
- Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
// Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset,
MachineType rep = MachineType::AnyTagged());
- // Load target function from the current JS frame.
- // This is an alternative way of getting the target function in addition to
- // Parameter(Descriptor::kJSTarget). The latter should be used near the
- // beginning of builtin code while the target value is still in the register
- // and the former should be used in slow paths in order to reduce register
- // pressure on the fast path.
- TNode<JSFunction> LoadTargetFromFrame();
-
// Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
+ TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
+ return UncheckedCast<RawPtrT>(
+ LoadBufferObject(buffer, offset, MachineType::Pointer()));
+ }
+ TNode<Smi> LoadBufferSmi(TNode<RawPtrT> buffer, int offset) {
+ return CAST(LoadBufferObject(buffer, offset, MachineType::TaggedSigned()));
+ }
// Load a field from an object on the heap.
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
MachineType rep);
@@ -829,6 +805,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
+ template <class T, typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
+ int>::type = 0>
+ TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
+ return UncheckedCast<T>(
+ LoadObjectField(object, offset, MachineTypeOf<T>::value));
+ }
// Load a SMI field and untag it.
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
int offset);
@@ -846,7 +829,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
// Tag a smi and store it.
- Node* StoreAndTagSmi(Node* base, int offset, Node* value);
+ void StoreAndTagSmi(Node* base, int offset, Node* value);
// Load the floating point value of a HeapNumber.
TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapNumber> object);
@@ -865,7 +848,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
// Load the elements backing store of a JSObject.
- TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object);
+ TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
+ return LoadJSObjectElements(object);
+ }
// Load the length of a JSArray instance.
TNode<Object> LoadJSArgumentsObjectWithLength(
SloppyTNode<JSArgumentsObjectWithLength> array);
@@ -882,6 +867,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
SloppyTNode<WeakFixedArray> array);
+ // Load the number of descriptors in DescriptorArray.
+ TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
// Load the bit field of a Map.
TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
// Load bit field 2 of a map.
@@ -892,7 +879,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
// Load the ElementsKind of a map.
TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
- TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> map);
+ TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
// Load the instance descriptors of a map.
TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
// Load the prototype of a map.
@@ -991,14 +978,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
- // Load an array element from a FixedArray / WeakFixedArray / PropertyArray.
+ // Array is any array-like type that has a fixed header followed by
+ // tagged elements.
+ template <typename Array>
+ TNode<IntPtrT> LoadArrayLength(TNode<Array> array);
+
+ // Array is any array-like type that has a fixed header followed by
+ // tagged elements.
+ template <typename Array>
TNode<MaybeObject> LoadArrayElement(
- SloppyTNode<HeapObject> object, int array_header_size, Node* index,
+ TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- // Load an array element from a FixedArray.
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
@@ -1030,24 +1023,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
}
- TNode<Object> LoadPropertyArrayElement(SloppyTNode<PropertyArray> object,
+ TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
SloppyTNode<IntPtrT> index);
TNode<IntPtrT> LoadPropertyArrayLength(TNode<PropertyArray> object);
- // Load an array element from a FixedArray / WeakFixedArray, untag it and
- // return it as Word32.
+ // Load an element from an array and untag it and return it as Word32.
+ // Array is any array-like type that has a fixed header followed by
+ // tagged elements.
+ template <typename Array>
TNode<Int32T> LoadAndUntagToWord32ArrayElement(
- SloppyTNode<HeapObject> object, int array_header_size, Node* index,
+ TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Load an array element from a FixedArray, untag it and return it as Word32.
TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
- SloppyTNode<HeapObject> object, Node* index, int additional_offset = 0,
+ TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
- SloppyTNode<HeapObject> object, int index, int additional_offset = 0) {
+ TNode<FixedArray> object, int index, int additional_offset = 0) {
return LoadAndUntagToWord32FixedArrayElement(
object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS);
}
@@ -1074,9 +1069,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_hole = nullptr);
Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
- TNode<Smi> index) {
+ TNode<Smi> index,
+ Label* if_hole = nullptr) {
+ return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
+ SMI_PARAMETERS, if_hole);
+ }
+
+ Node* LoadFixedDoubleArrayElement(TNode<FixedDoubleArray> object,
+ TNode<IntPtrT> index,
+ Label* if_hole = nullptr) {
return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0,
- SMI_PARAMETERS);
+ INTPTR_PARAMETERS, if_hole);
}
// Load an array element from a FixedArray, FixedDoubleArray or a
@@ -1098,6 +1101,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
TNode<Smi> index,
Label* if_hole = nullptr);
+ TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
+ TNode<IntPtrT> index,
+ Label* if_hole = nullptr);
// Load Float64 value by |base| + |offset| address. If the value is a double
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
@@ -1165,7 +1171,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the "prototype" property of a JSFunction.
Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
- Node* LoadSharedFunctionInfoBytecodeArray(Node* shared);
+ TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
+ SloppyTNode<SharedFunctionInfo> shared);
+
+ TNode<Int32T> LoadSharedFunctionInfoFormalParameterCount(
+ TNode<SharedFunctionInfo> function) {
+ return TNode<Int32T>::UncheckedCast(LoadObjectField(
+ function, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Uint16()));
+ }
void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<Word32T> value);
@@ -1176,19 +1190,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void StoreMutableHeapNumberValue(SloppyTNode<MutableHeapNumber> object,
SloppyTNode<Float64T> value);
// Store a field to an object on the heap.
- Node* StoreObjectField(Node* object, int offset, Node* value);
- Node* StoreObjectField(Node* object, Node* offset, Node* value);
- Node* StoreObjectFieldNoWriteBarrier(
+ void StoreObjectField(Node* object, int offset, Node* value);
+ void StoreObjectField(Node* object, Node* offset, Node* value);
+ void StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
- Node* StoreObjectFieldNoWriteBarrier(
+ void StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
+
+ template <class T = Object>
+ void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
+ TNode<IntPtrT> offset, TNode<T> value) {
+ StoreObjectFieldNoWriteBarrier(object, offset, value,
+ MachineRepresentationOf<T>::value);
+ }
+
// Store the Map of an HeapObject.
- Node* StoreMap(Node* object, Node* map);
- Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
- Node* StoreMapNoWriteBarrier(Node* object, Node* map);
- Node* StoreObjectFieldRoot(Node* object, int offset, RootIndex root);
+ void StoreMap(Node* object, Node* map);
+ void StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
+ void StoreMapNoWriteBarrier(Node* object, Node* map);
+ void StoreObjectFieldRoot(Node* object, int offset, RootIndex root);
// Store an array element to a FixedArray.
void StoreFixedArrayElement(
TNode<FixedArray> object, int index, SloppyTNode<Object> value,
@@ -1196,9 +1218,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
barrier_mode);
}
+ void StoreFixedArrayElement(TNode<FixedArray> object, int index,
+ TNode<Smi> value) {
+ return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+ SKIP_WRITE_BARRIER);
+ }
- Node* StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
- Node* StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
+ void StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
+ void StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
void StoreFixedArrayOrPropertyArrayElement(
Node* array, Node* index, Node* value,
@@ -1231,6 +1258,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
StoreFixedArrayElement(array, index, value, barrier_mode, 0,
SMI_PARAMETERS);
}
+ void StoreFixedArrayElement(TNode<FixedArray> array, TNode<IntPtrT> index,
+ TNode<Smi> value) {
+ StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
+ }
void StoreFixedDoubleArrayElement(
TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
@@ -1249,7 +1280,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS);
}
- Node* StoreFeedbackVectorSlot(
+ void StoreFeedbackVectorSlot(
Node* object, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
@@ -1286,8 +1317,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadCellValue(Node* cell);
- Node* StoreCellValue(Node* cell, Node* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void StoreCellValue(Node* cell, Node* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Allocate a HeapNumber without initializing its value.
TNode<HeapNumber> AllocateHeapNumber();
@@ -1306,10 +1337,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
// Like above, but allowing custom bitfield initialization.
TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
- void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<WordT> bitfield);
+ void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<Word32T> bitfield);
void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
TNode<UintPtrT> digit);
- TNode<WordT> LoadBigIntBitfield(TNode<BigInt> bigint);
+ TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
// Allocate a SeqOneByteString with the given length.
@@ -1405,29 +1436,39 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* object, Node* map, Node* instance_size,
int start_offset = JSObject::kHeaderSize);
+ TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
+ ParameterMode capacity_mode);
+
// Allocate a JSArray without elements and initialize the header fields.
- Node* AllocateUninitializedJSArrayWithoutElements(
- Node* array_map, Node* length, Node* allocation_site = nullptr);
+ TNode<JSArray> AllocateUninitializedJSArrayWithoutElements(
+ TNode<Map> array_map, TNode<Smi> length, Node* allocation_site = nullptr);
+ //
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
// The ParameterMode argument is only used for the capacity parameter.
- std::pair<Node*, Node*> AllocateUninitializedJSArrayWithElements(
- ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
- Node* capacity, ParameterMode capacity_mode = INTPTR_PARAMETERS);
+ std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
+ AllocateUninitializedJSArrayWithElements(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
+ Node* allocation_site, Node* capacity,
+ ParameterMode capacity_mode = INTPTR_PARAMETERS,
+ AllocationFlags allocation_flags = kNone);
+
// Allocate a JSArray and fill elements with the hole.
// The ParameterMode argument is only used for the capacity parameter.
- Node* AllocateJSArray(ElementsKind kind, Node* array_map, Node* capacity,
- Node* length, Node* allocation_site = nullptr,
- ParameterMode capacity_mode = INTPTR_PARAMETERS);
-
- Node* AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, Node* capacity,
+ TNode<Smi> length, Node* allocation_site = nullptr,
+ ParameterMode capacity_mode = INTPTR_PARAMETERS,
+ AllocationFlags allocation_flags = kNone);
+
+ TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
+ TNode<Smi> capacity, TNode<Smi> length) {
return AllocateJSArray(kind, array_map, capacity, length, nullptr,
SMI_PARAMETERS);
}
- Node* AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length) {
+ TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
+ TNode<IntPtrT> capacity, TNode<Smi> length) {
return AllocateJSArray(kind, array_map, capacity, length, nullptr,
INTPTR_PARAMETERS);
}
@@ -1482,6 +1523,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return result;
}
+ TNode<FixedArray> AllocateFixedArrayWithHoles(TNode<IntPtrT> capacity,
+ AllocationFlags flags) {
+ TNode<FixedArray> result = UncheckedCast<FixedArray>(
+ AllocateFixedArray(PACKED_ELEMENTS, capacity, flags));
+ FillFixedArrayWithValue(PACKED_ELEMENTS, result, IntPtrConstant(0),
+ capacity, RootIndex::kTheHoleValue);
+ return result;
+ }
+
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
@@ -1494,9 +1544,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
- Node* ArraySpeciesCreate(TNode<Context> context, TNode<Object> originalArray,
- TNode<Number> len);
- Node* InternalArrayCreate(TNode<Context> context, TNode<Number> len);
+ TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
+ TNode<Object> originalArray,
+ TNode<Number> len);
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index, RootIndex value_root_index,
@@ -1590,10 +1640,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// needs to be the same. Copy from src_elements at
// [src_index, src_index + length) to dst_elements at
// [dst_index, dst_index + length).
+ // The function decides whether it can use memcpy. In case it cannot,
+ // |write_barrier| can help it to skip write barrier. SKIP_WRITE_BARRIER is
+ // only safe when copying to new space, or when copying to old space and the
+ // array does not contain object pointers.
void CopyElements(ElementsKind kind, TNode<FixedArrayBase> dst_elements,
TNode<IntPtrT> dst_index,
TNode<FixedArrayBase> src_elements,
- TNode<IntPtrT> src_index, TNode<IntPtrT> length);
+ TNode<IntPtrT> src_index, TNode<IntPtrT> length,
+ WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER);
TNode<FixedArray> HeapObjectToFixedArray(TNode<HeapObject> base,
Label* cast_fail);
@@ -1606,6 +1661,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<FixedDoubleArray>(base);
}
+ TNode<FixedArray> HeapObjectToSloppyArgumentsElements(TNode<HeapObject> base,
+ Label* cast_fail) {
+ GotoIf(WordNotEqual(LoadMap(base),
+ LoadRoot(RootIndex::kSloppyArgumentsElementsMap)),
+ cast_fail);
+ return UncheckedCast<FixedArray>(base);
+ }
+
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
return UncheckedCast<Int32T>(elements_kind);
}
@@ -1646,13 +1709,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// * If |var_holes_converted| is given, any holes will be converted to
// undefined and the variable will be set according to whether or not there
// were any hole.
+ // * If |source_elements_kind| is given, the function will try to use the
+ // runtime elements kind of source to make copy faster. More specifically, it
+ // can skip write barriers.
TNode<FixedArrayBase> ExtractFixedArray(
Node* source, Node* first, Node* count = nullptr,
Node* capacity = nullptr,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
- TVariable<BoolT>* var_holes_converted = nullptr);
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ Node* source_elements_kind = nullptr);
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
@@ -1698,7 +1765,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
- TVariable<BoolT>* var_holes_converted = nullptr);
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ Node* source_runtime_kind = nullptr);
// Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case
// where the source array has a hole, produce a FixedArray instead where holes
@@ -1835,13 +1903,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Variable* var_numeric,
Variable* var_feedback);
- TNode<WordT> TimesPointerSize(SloppyTNode<WordT> value);
- TNode<IntPtrT> TimesPointerSize(TNode<IntPtrT> value) {
- return Signed(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
+ TNode<WordT> TimesSystemPointerSize(SloppyTNode<WordT> value);
+ TNode<IntPtrT> TimesSystemPointerSize(TNode<IntPtrT> value) {
+ return Signed(TimesSystemPointerSize(implicit_cast<TNode<WordT>>(value)));
+ }
+ TNode<UintPtrT> TimesSystemPointerSize(TNode<UintPtrT> value) {
+ return Unsigned(TimesSystemPointerSize(implicit_cast<TNode<WordT>>(value)));
+ }
+
+ TNode<WordT> TimesTaggedSize(SloppyTNode<WordT> value);
+ TNode<IntPtrT> TimesTaggedSize(TNode<IntPtrT> value) {
+ return Signed(TimesTaggedSize(implicit_cast<TNode<WordT>>(value)));
}
- TNode<UintPtrT> TimesPointerSize(TNode<UintPtrT> value) {
- return Unsigned(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
+ TNode<UintPtrT> TimesTaggedSize(TNode<UintPtrT> value) {
+ return Unsigned(TimesTaggedSize(implicit_cast<TNode<WordT>>(value)));
}
+
TNode<WordT> TimesDoubleSize(SloppyTNode<WordT> value);
TNode<UintPtrT> TimesDoubleSize(TNode<UintPtrT> value) {
return Unsigned(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
@@ -1869,16 +1946,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Throws a TypeError for {method_name} if {value} is not a JSReceiver.
// Returns the {value}'s map.
Node* ThrowIfNotJSReceiver(Node* context, Node* value,
- MessageTemplate::Template msg_template,
+ MessageTemplate msg_template,
const char* method_name = nullptr);
- void ThrowRangeError(Node* context, MessageTemplate::Template message,
+ void ThrowRangeError(Node* context, MessageTemplate message,
Node* arg0 = nullptr, Node* arg1 = nullptr,
Node* arg2 = nullptr);
- void ThrowTypeError(Node* context, MessageTemplate::Template message,
+ void ThrowTypeError(Node* context, MessageTemplate message,
char const* arg0 = nullptr, char const* arg1 = nullptr);
- void ThrowTypeError(Node* context, MessageTemplate::Template message,
- Node* arg0, Node* arg1 = nullptr, Node* arg2 = nullptr);
+ void ThrowTypeError(Node* context, MessageTemplate message, Node* arg0,
+ Node* arg1 = nullptr, Node* arg2 = nullptr);
// Type checks.
// Check whether the map is for an object with special properties, such as a
@@ -1889,6 +1966,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAnyHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
+ TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(SloppyTNode<HeapObject> object);
TNode<BoolT> IsBoolean(SloppyTNode<HeapObject> object);
@@ -1906,10 +1984,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
- SloppyTNode<Context> context);
- TNode<BoolT> IsFastJSArrayWithNoCustomIteration(TNode<Object> object,
- TNode<Context> context);
TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFeedbackVector(SloppyTNode<HeapObject> object);
TNode<BoolT> IsContext(SloppyTNode<HeapObject> object);
@@ -1926,6 +2000,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
@@ -1965,6 +2040,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> HasOnlyOneByteChars(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
@@ -1974,6 +2050,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Map> map);
TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
SloppyTNode<Map> map);
+
+ TNode<BoolT> IsFastAliasedArgumentsMap(TNode<Context> context,
+ TNode<Map> map);
+ TNode<BoolT> IsSlowAliasedArgumentsMap(TNode<Context> context,
+ TNode<Map> map);
+ TNode<BoolT> IsSloppyArgumentsMap(TNode<Context> context, TNode<Map> map);
+ TNode<BoolT> IsStrictArgumentsMap(TNode<Context> context, TNode<Map> map);
+
TNode<BoolT> IsSequentialStringInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsUncachedExternalStringInstanceType(
@@ -2001,6 +2085,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
+ TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
// True iff |object| is a Smi or a HeapNumber.
@@ -2057,6 +2142,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsHoleyFastElementsKind(Node* elements_kind);
Node* IsElementsKindGreaterThan(Node* target_kind,
ElementsKind reference_kind);
+ TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
+ ElementsKind reference_kind);
// String helpers.
// Load a character from a String (might flatten a ConsString).
@@ -2152,11 +2239,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
};
// ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi.
- TNode<Smi> ToSmiIndex(TNode<Object> input, TNode<Context> context,
+ TNode<Smi> ToSmiIndex(TNode<Context> context, TNode<Object> input,
Label* range_error);
// ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi.
- TNode<Smi> ToSmiLength(TNode<Object> input, TNode<Context> context,
+ TNode<Smi> ToSmiLength(TNode<Context> context, TNode<Object> input,
Label* range_error);
// ES6 7.1.15 ToLength, but with inlined fast path.
@@ -2339,7 +2426,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
"Use the non-templatized version for DescriptorArray");
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
- kPointerSize;
+ kTaggedSize;
return Unsigned(LoadAndUntagToWord32FixedArrayElement(
CAST(container), key_index, kKeyToDetailsOffset));
}
@@ -2352,17 +2439,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
"Use the non-templatized version for DescriptorArray");
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
- kPointerSize;
+ kTaggedSize;
return LoadFixedArrayElement(CAST(container), key_index, kKeyToValueOffset);
}
- TNode<Uint32T> LoadDetailsByKeyIndex(TNode<DescriptorArray> container,
- TNode<IntPtrT> key_index);
- TNode<Object> LoadValueByKeyIndex(TNode<DescriptorArray> container,
- TNode<IntPtrT> key_index);
- TNode<MaybeObject> LoadFieldTypeByKeyIndex(TNode<DescriptorArray> container,
- TNode<IntPtrT> key_index);
-
// Stores the details for the entry with the given key_index.
// |details| must be a Smi.
template <class ContainerType>
@@ -2370,7 +2450,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<IntPtrT> key_index, TNode<Smi> details) {
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
- kPointerSize;
+ kTaggedSize;
StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER,
kKeyToDetailsOffset);
}
@@ -2383,7 +2463,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
- kPointerSize;
+ kTaggedSize;
StoreFixedArrayElement(container, key_index, value, write_barrier,
kKeyToValueOffset);
}
@@ -2397,6 +2477,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex));
}
+ TNode<Smi> GetNumberDictionaryNumberOfElements(
+ TNode<NumberDictionary> dictionary) {
+ return GetNumberOfElements<NumberDictionary>(dictionary);
+ }
+
template <class Dictionary>
void SetNumberOfElements(TNode<Dictionary> dictionary,
TNode<Smi> num_elements_smi) {
@@ -2522,14 +2607,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value);
}
+ TNode<Object> SetPropertyInLiteral(TNode<Context> context,
+ TNode<JSObject> receiver,
+ TNode<Object> key, TNode<Object> value) {
+ return CallBuiltin(Builtins::kSetPropertyInLiteral, context, receiver, key,
+ value);
+ }
+
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
template <class... TArgs>
TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
- DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
- !Builtins::IsLazy(id));
return CallStub<Object>(Builtins::CallableFor(isolate(), id), context,
args...);
}
@@ -2537,8 +2627,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class... TArgs>
void TailCallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
TArgs... args) {
- DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
- !Builtins::IsLazy(id));
return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
@@ -2644,9 +2732,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<FeedbackVector> LoadFeedbackVector(SloppyTNode<JSFunction> closure,
Label* if_undefined = nullptr);
+ // Load the object from feedback vector cell for the given closure.
+ // The returned object could be undefined if the closure does not have
+ // a feedback vector associated with it.
+ TNode<Object> LoadFeedbackVectorUnchecked(SloppyTNode<JSFunction> closure);
+
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ // Returns the stricter of the Context::ScopeInfo::LanguageMode and
+ // the language mode on the SFI.
+ Node* GetLanguageMode(TNode<SharedFunctionInfo> sfi, Node* context);
+ Node* GetLanguageMode(TNode<JSFunction> closure, Node* context);
+ Node* GetLanguageMode(TNode<FeedbackVector> vector, Node* context);
+
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
@@ -2699,7 +2798,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<IntPtrT> intptr_key,
TNode<Object> value,
TNode<Context> context,
- Label* opt_if_neutered);
+ Label* opt_if_detached);
// Part of the above, refactored out to reuse in another place.
void EmitBigTypedArrayElementStore(TNode<FixedTypedArrayBase> elements,
TNode<RawPtrT> backing_store,
@@ -2712,28 +2811,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BigIntToRawBytes(TNode<BigInt> bigint, TVariable<UintPtrT>* var_low,
TVariable<UintPtrT>* var_high);
- void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
+ void EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode, Label* bailout,
Node* context);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
- KeyedAccessStoreMode store_mode, Node* length,
- Node* key, ParameterMode mode, bool is_js_array,
+ Node* length, Node* key, ParameterMode mode,
Label* bailout);
Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
Node* length, ParameterMode mode, Label* bailout);
void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind,
- ElementsKind to_kind, bool is_jsarray,
- Label* bailout);
-
- void TransitionElementsKind(TNode<JSReceiver> object, TNode<Map> map,
- ElementsKind from_kind, ElementsKind to_kind,
- Label* bailout) {
- TransitionElementsKind(object, map, from_kind, to_kind, true, bailout);
- }
+ ElementsKind to_kind, Label* bailout);
void TrapAllocationMemento(Node* object, Label* memento_found);
@@ -2832,6 +2923,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
if_false);
}
+ void BranchIfNumberNotEqual(TNode<Number> left, TNode<Number> right,
+ Label* if_true, Label* if_false) {
+ BranchIfNumberEqual(left, right, if_false, if_true);
+ }
+
void BranchIfNumberLessThan(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
BranchIfNumberRelationalComparison(Operation::kLessThan, left, right,
@@ -2895,9 +2991,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
SloppyTNode<JSFunction> active_function);
- TNode<Object> SpeciesConstructor(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
- SloppyTNode<Object> default_constructor);
+ TNode<JSReceiver> SpeciesConstructor(
+ SloppyTNode<Context> context, SloppyTNode<Object> object,
+ SloppyTNode<JSReceiver> default_constructor);
Node* InstanceOf(Node* object, Node* callable, Node* context);
@@ -2954,6 +3050,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsPromiseHookEnabled();
Node* HasAsyncEventDelegate();
Node* IsPromiseHookEnabledOrHasAsyncEventDelegate();
+ Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
// Helpers for StackFrame markers.
Node* MarkerIsFrameType(Node* marker_or_function,
@@ -2980,8 +3077,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
template <class... TArgs>
- Node* MakeTypeError(MessageTemplate::Template message, Node* context,
- TArgs... args) {
+ Node* MakeTypeError(MessageTemplate message, Node* context, TArgs... args) {
STATIC_ASSERT(sizeof...(TArgs) <= 3);
Node* const make_type_error = LoadContextElement(
LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
@@ -2997,10 +3093,41 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
bool ConstexprBoolNot(bool value) { return !value; }
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
+ uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
void PerformStackCheck(TNode<Context> context);
- protected:
+ void SetPropertyLength(TNode<Context> context, TNode<Object> array,
+ TNode<Number> length);
+
+ // Checks that {object_map}'s prototype map is the {initial_prototype_map} and
+ // makes sure that the field with name at index {descriptor} is still
+ // constant. If it is not, go to label {if_modified}.
+ //
+ // To make the checks robust, the method also asserts that the descriptor has
+ // the right key, the caller must pass the root index of the key
+ // in {field_name_root_index}.
+ //
+ // This is useful for checking that given function has not been patched
+ // on the prototype.
+ void GotoIfInitialPrototypePropertyModified(TNode<Map> object_map,
+ TNode<Map> initial_prototype_map,
+ int descfriptor,
+ RootIndex field_name_root_index,
+ Label* if_modified);
+ struct DescriptorIndexAndName {
+ DescriptorIndexAndName() {}
+ DescriptorIndexAndName(int descriptor_index, RootIndex name_root_index)
+ : descriptor_index(descriptor_index),
+ name_root_index(name_root_index) {}
+
+ int descriptor_index;
+ RootIndex name_root_index;
+ };
+ void GotoIfInitialPrototypePropertiesModified(
+ TNode<Map> object_map, TNode<Map> initial_prototype_map,
+ Vector<DescriptorIndexAndName> properties, Label* if_modified);
+
// Implements DescriptorArray::Search().
void DescriptorLookup(SloppyTNode<Name> unique_name,
SloppyTNode<DescriptorArray> descriptors,
@@ -3050,7 +3177,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
TNode<Uint32T> descriptor_number);
- typedef std::function<void(TNode<UintPtrT> descriptor_key_index)>
+ typedef std::function<void(TNode<IntPtrT> descriptor_key_index)>
ForEachDescriptorBodyFunction;
void DescriptorArrayForEach(VariableList& variable_list,
@@ -3058,6 +3185,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Uint32T> end_descriptor,
const ForEachDescriptorBodyFunction& body);
+ // Descriptor array accessors based on key_index, which is equal to
+ // DescriptorArray::ToKeyIndex(descriptor).
+ TNode<Name> LoadKeyByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+ TNode<Uint32T> LoadDetailsByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+ TNode<Object> LoadValueByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+ TNode<MaybeObject> LoadFieldTypeByKeyIndex(TNode<DescriptorArray> container,
+ TNode<IntPtrT> key_index);
+
+ TNode<IntPtrT> DescriptorEntryToIndex(TNode<IntPtrT> descriptor);
+
+ // Descriptor array accessors based on descriptor.
+ TNode<Name> LoadKeyByDescriptorEntry(TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor);
+ TNode<Name> LoadKeyByDescriptorEntry(TNode<DescriptorArray> descriptors,
+ int descriptor);
+ TNode<Uint32T> LoadDetailsByDescriptorEntry(
+ TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
+ TNode<Uint32T> LoadDetailsByDescriptorEntry(
+ TNode<DescriptorArray> descriptors, int descriptor);
+ TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
+ int descriptor);
+ TNode<MaybeObject> LoadFieldTypeByDescriptorEntry(
+ TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
+
typedef std::function<void(TNode<Name> key, TNode<Object> value)>
ForEachKeyValueFunction;
@@ -3082,6 +3236,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void InitializeFunctionContext(Node* native_context, Node* context,
int slots);
+ TNode<JSArray> ArrayCreate(TNode<Context> context, TNode<Number> length);
+
// Allocate a clone of a mutable primitive, if {object} is a
// MutableHeapNumber.
TNode<Object> CloneIfMutablePrimitive(TNode<Object> object);
@@ -3091,17 +3247,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void HandleBreakOnNode();
- Node* AllocateRawDoubleAligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_address, Node* limit_address);
- Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_adddress, Node* limit_address);
- Node* AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
- Node* top_address, Node* limit_address);
+ TNode<HeapObject> AllocateRawDoubleAligned(TNode<IntPtrT> size_in_bytes,
+ AllocationFlags flags,
+ TNode<RawPtrT> top_address,
+ TNode<RawPtrT> limit_address);
+ TNode<HeapObject> AllocateRawUnaligned(TNode<IntPtrT> size_in_bytes,
+ AllocationFlags flags,
+ TNode<RawPtrT> top_address,
+ TNode<RawPtrT> limit_address);
+ TNode<HeapObject> AllocateRaw(TNode<IntPtrT> size_in_bytes,
+ AllocationFlags flags,
+ TNode<RawPtrT> top_address,
+ TNode<RawPtrT> limit_address);
+
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
- Node* AllocateUninitializedJSArray(Node* array_map, Node* length,
- Node* allocation_site,
- Node* size_in_bytes);
+ TNode<JSArray> AllocateUninitializedJSArray(TNode<Map> array_map,
+ TNode<Smi> length,
+ Node* allocation_site,
+ TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
Node* SmiShiftBitsConstant();
@@ -3158,6 +3322,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_bigint = nullptr,
Variable* var_bigint = nullptr,
Variable* var_feedback = nullptr);
+
+ private:
+ // Low-level accessors for Descriptor arrays.
+ TNode<MaybeObject> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
+ Node* index,
+ int additional_offset = 0);
};
class CodeStubArguments {
@@ -3302,7 +3472,6 @@ class ToDirectStringAssembler : public CodeStubAssembler {
const Flags flags_;
};
-
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
} // namespace internal
diff --git a/deps/v8/src/code-stubs-utils.h b/deps/v8/src/code-stubs-utils.h
deleted file mode 100644
index 4b411acb9b..0000000000
--- a/deps/v8/src/code-stubs-utils.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CODE_STUBS_UTILS_H_
-#define V8_CODE_STUBS_UTILS_H_
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-class CodeAssemblerState;
-} // namespace compiler
-
-// ----------------------------------------------------------------------------
-// Support macro for defining code stubs with Turbofan.
-// ----------------------------------------------------------------------------
-//
-// A code stub generator is defined by writing:
-//
-// TF_STUB(name, code_assember_base_class) {
-// ...
-// }
-//
-// In the body of the generator function the arguments can be accessed
-// as "Parameter(n)".
-#define TF_STUB(StubName, AssemblerBase) \
- class StubName##Assembler : public AssemblerBase { \
- public: \
- typedef StubName::Descriptor Descriptor; \
- \
- explicit StubName##Assembler(compiler::CodeAssemblerState* state) \
- : AssemblerBase(state) {} \
- void Generate##StubName##Impl(const StubName* stub); \
- \
- Node* Parameter(Descriptor::ParameterIndices index) { \
- return CodeAssembler::Parameter(static_cast<int>(index)); \
- } \
- }; \
- void StubName::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
- StubName##Assembler assembler(state); \
- assembler.Generate##StubName##Impl(this); \
- } \
- void StubName##Assembler::Generate##StubName##Impl(const StubName* stub)
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CODE_STUBS_UTILS_H_
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
deleted file mode 100644
index adca79ac8f..0000000000
--- a/deps/v8/src/code-stubs.cc
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/code-stubs.h"
-
-#include <sstream>
-
-#include "src/arguments.h"
-#include "src/assembler-inl.h"
-#include "src/ast/ast.h"
-#include "src/bootstrapper.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/code-stubs-utils.h"
-#include "src/code-tracer.h"
-#include "src/counters.h"
-#include "src/gdb-jit.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic-stats.h"
-#include "src/ic/ic.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/objects/hash-table-inl.h"
-#include "src/tracing/tracing-category-observer.h"
-
-namespace v8 {
-namespace internal {
-
-using compiler::CodeAssemblerState;
-
-CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
- : isolate_(stub->isolate()),
- call_descriptor_(stub->GetCallInterfaceDescriptor()),
- stack_parameter_count_(no_reg),
- hint_stack_parameter_count_(-1),
- function_mode_(NOT_JS_FUNCTION_STUB_MODE),
- deoptimization_handler_(kNullAddress),
- miss_handler_(),
- has_miss_handler_(false) {}
-
-CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
- : isolate_(isolate),
- stack_parameter_count_(no_reg),
- hint_stack_parameter_count_(-1),
- function_mode_(NOT_JS_FUNCTION_STUB_MODE),
- deoptimization_handler_(kNullAddress),
- miss_handler_(),
- has_miss_handler_(false) {
- CodeStub::InitializeDescriptor(isolate, stub_key, this);
-}
-
-
-void CodeStubDescriptor::Initialize(Address deoptimization_handler,
- int hint_stack_parameter_count,
- StubFunctionMode function_mode) {
- deoptimization_handler_ = deoptimization_handler;
- hint_stack_parameter_count_ = hint_stack_parameter_count;
- function_mode_ = function_mode;
-}
-
-
-void CodeStubDescriptor::Initialize(Register stack_parameter_count,
- Address deoptimization_handler,
- int hint_stack_parameter_count,
- StubFunctionMode function_mode) {
- Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
- stack_parameter_count_ = stack_parameter_count;
-}
-
-
-bool CodeStub::FindCodeInCache(Code** code_out) {
- SimpleNumberDictionary* stubs = isolate()->heap()->code_stubs();
- int index = stubs->FindEntry(isolate(), GetKey());
- if (index != SimpleNumberDictionary::kNotFound) {
- *code_out = Code::cast(stubs->ValueAt(index));
- return true;
- }
- return false;
-}
-
-
-void CodeStub::RecordCodeGeneration(Handle<Code> code) {
- std::ostringstream os;
- os << *this;
- PROFILE(isolate(),
- CodeCreateEvent(CodeEventListener::STUB_TAG,
- AbstractCode::cast(*code), os.str().c_str()));
- Counters* counters = isolate()->counters();
- counters->total_stubs_code_size()->Increment(code->raw_instruction_size());
-#ifdef DEBUG
- code->VerifyEmbeddedObjects(isolate());
-#endif
-}
-
-
-void CodeStub::DeleteStubFromCacheForTesting() {
- Heap* heap = isolate_->heap();
- Handle<SimpleNumberDictionary> dict(heap->code_stubs(), isolate());
- int entry = dict->FindEntry(isolate(), GetKey());
- DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
- dict = SimpleNumberDictionary::DeleteEntry(isolate(), dict, entry);
- heap->SetRootCodeStubs(*dict);
-}
-
-Handle<Code> PlatformCodeStub::GenerateCode() {
- Factory* factory = isolate()->factory();
-
- // Generate the new code.
- // TODO(yangguo): remove this once we can serialize IC stubs.
- AssemblerOptions options = AssemblerOptions::Default(isolate(), true);
- MacroAssembler masm(isolate(), options, nullptr, 256,
- CodeObjectRequired::kYes);
-
- {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- // Generate the code for the stub.
- NoCurrentFrameScope scope(&masm);
- Generate(&masm);
- }
-
- // Generate the handler table.
- int handler_table_offset = GenerateHandlerTable(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(isolate(), &desc);
- // Copy the generated code into a heap object.
- Handle<Code> new_object = factory->NewCode(
- desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate()),
- NeedsImmovableCode(), GetKey(), false, 0, 0, handler_table_offset);
- return new_object;
-}
-
-
-Handle<Code> CodeStub::GetCode() {
- Heap* heap = isolate()->heap();
- Code* code;
- if (FindCodeInCache(&code)) {
- DCHECK(code->is_stub());
- return handle(code, isolate_);
- }
-
- {
- HandleScope scope(isolate());
- // Canonicalize handles, so that we can share constant pool entries pointing
- // to code targets without dereferencing their handles.
- CanonicalHandleScope canonical(isolate());
-
- Handle<Code> new_object = GenerateCode();
- DCHECK_EQ(GetKey(), new_object->stub_key());
- RecordCodeGeneration(new_object);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
- CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
- OFStream os(trace_scope.file());
- std::ostringstream name;
- name << *this;
- new_object->Disassemble(name.str().c_str(), os);
- os << "\n";
- }
-#endif
-
- // Update the dictionary and the root in Heap.
- Handle<SimpleNumberDictionary> dict = SimpleNumberDictionary::Set(
- isolate(), handle(heap->code_stubs(), isolate_), GetKey(), new_object);
- heap->SetRootCodeStubs(*dict);
- code = *new_object;
- }
-
- Activate(code);
- DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code));
- return Handle<Code>(code, isolate());
-}
-
-CodeStub::Major CodeStub::GetMajorKey(const Code* code_stub) {
- return MajorKeyFromKey(code_stub->stub_key());
-}
-
-const char* CodeStub::MajorName(CodeStub::Major major_key) {
- switch (major_key) {
-#define DEF_CASE(name) case name: return #name "Stub";
- CODE_STUB_LIST(DEF_CASE)
-#undef DEF_CASE
- case NoCache:
- return "<NoCache>Stub";
- case NUMBER_OF_IDS:
- UNREACHABLE();
- }
- return nullptr;
-}
-
-
-void CodeStub::PrintBaseName(std::ostream& os) const { // NOLINT
- os << MajorName(MajorKey());
-}
-
-
-void CodeStub::PrintName(std::ostream& os) const { // NOLINT
- PrintBaseName(os);
- PrintState(os);
-}
-
-
-void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
- DispatchedCall call) {
- switch (MajorKeyFromKey(key)) {
-#define DEF_CASE(NAME) \
- case NAME: { \
- NAME##Stub stub(key, isolate); \
- CodeStub* pstub = &stub; \
- call(pstub, value_out); \
- break; \
- }
- CODE_STUB_LIST(DEF_CASE)
-#undef DEF_CASE
- case NUMBER_OF_IDS:
- case NoCache:
- UNREACHABLE();
- break;
- }
-}
-
-int PlatformCodeStub::GenerateHandlerTable(MacroAssembler* masm) { return 0; }
-
-static void InitializeDescriptorDispatchedCall(CodeStub* stub,
- void** value_out) {
- CodeStubDescriptor* descriptor_out =
- reinterpret_cast<CodeStubDescriptor*>(value_out);
- descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
-}
-
-
-void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
- CodeStubDescriptor* desc) {
- void** value_out = reinterpret_cast<void**>(desc);
- Dispatch(isolate, key, value_out, &InitializeDescriptorDispatchedCall);
-}
-
-
-void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
- Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
- *code_out = stub->GetCode();
-}
-
-
-MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
- HandleScope scope(isolate);
- Handle<Code> code;
- void** value_out = reinterpret_cast<void**>(&code);
- Dispatch(isolate, key, value_out, &GetCodeDispatchCall);
- return scope.CloseAndEscape(code);
-}
-
-Handle<Code> TurboFanCodeStub::GenerateCode() {
- const char* name = CodeStub::MajorName(MajorKey());
- Zone zone(isolate()->allocator(), ZONE_NAME);
- CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- compiler::CodeAssemblerState state(
- isolate(), &zone, descriptor, Code::STUB, name,
- PoisoningMitigationLevel::kDontPoison, GetKey());
- GenerateAssembly(&state);
- return compiler::CodeAssembler::GenerateCode(
- &state, AssemblerOptions::Default(isolate()));
-}
-
-TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* map = Parameter(Descriptor::kMap);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Comment(
- "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
- " is_jsarray=%d, store_mode=%d",
- ElementsKindToString(stub->from_kind()),
- ElementsKindToString(stub->to_kind()), stub->is_jsarray(),
- stub->store_mode());
-
- Label miss(this);
-
- if (FLAG_trace_elements_transitions) {
- // Tracing elements transitions is the job of the runtime.
- Goto(&miss);
- } else {
- TransitionElementsKind(receiver, map, stub->from_kind(), stub->to_kind(),
- stub->is_jsarray(), &miss);
- EmitElementStore(receiver, key, value, stub->is_jsarray(), stub->to_kind(),
- stub->store_mode(), &miss, context);
- Return(value);
- }
-
- BIND(&miss);
- {
- Comment("Miss");
- TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss, context,
- receiver, key, value, map, slot, vector);
- }
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(KeyedLoadSloppyArgumentsStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Label miss(this);
-
- Node* result = LoadKeyedSloppyArguments(receiver, key, &miss);
- Return(result);
-
- BIND(&miss);
- {
- Comment("Miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key, slot,
- vector);
- }
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(KeyedStoreSloppyArgumentsStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Label miss(this);
-
- StoreKeyedSloppyArguments(receiver, key, value, &miss);
- Return(value);
-
- BIND(&miss);
- {
- Comment("Miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, key);
- }
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(StoreInterceptorStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
- TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context, value, slot,
- vector, receiver, name);
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kName);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Label if_keyispositivesmi(this), if_keyisinvalid(this);
- Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
- BIND(&if_keyispositivesmi);
- TailCallRuntime(Runtime::kLoadElementWithInterceptor, context, receiver, key);
-
- BIND(&if_keyisinvalid);
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key, slot,
- vector);
-}
-
-int JSEntryStub::GenerateHandlerTable(MacroAssembler* masm) {
- int handler_table_offset = HandlerTable::EmitReturnTableStart(masm, 1);
- HandlerTable::EmitReturnEntry(masm, 0, handler_offset_);
- return handler_table_offset;
-}
-
-// TODO(ishell): move to builtins-handler-gen.
-TF_STUB(StoreSlowElementStub, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot, vector,
- receiver, name);
-}
-
-TF_STUB(StoreInArrayLiteralSlowStub, CodeStubAssembler) {
- Node* array = Parameter(Descriptor::kReceiver);
- Node* index = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, context, value, array,
- index);
-}
-
-TF_STUB(StoreFastElementStub, CodeStubAssembler) {
- Comment("StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
- stub->is_js_array(), ElementsKindToString(stub->elements_kind()),
- stub->store_mode());
-
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* key = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- Label miss(this);
-
- EmitElementStore(receiver, key, value, stub->is_js_array(),
- stub->elements_kind(), stub->store_mode(), &miss, context);
- Return(value);
-
- BIND(&miss);
- {
- Comment("Miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, key);
- }
-}
-
-// static
-void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
- if (FLAG_minimal) return;
- StoreFastElementStub(isolate, false, HOLEY_ELEMENTS, STANDARD_STORE)
- .GetCode();
- StoreFastElementStub(isolate, false, HOLEY_ELEMENTS,
- STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
- .GetCode();
- for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
- ElementsKind kind = static_cast<ElementsKind>(i);
- StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
- StoreFastElementStub(isolate, true, kind,
- STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
- .GetCode();
- }
-}
-
-
-void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer,
- Isolate* isolate) {
- FunctionEntryHook entry_hook = isolate->function_entry_hook();
- DCHECK_NOT_NULL(entry_hook);
- entry_hook(function, stack_pointer);
-}
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
deleted file mode 100644
index 4630fe7639..0000000000
--- a/deps/v8/src/code-stubs.h
+++ /dev/null
@@ -1,670 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CODE_STUBS_H_
-#define V8_CODE_STUBS_H_
-
-#include "src/interface-descriptors.h"
-#include "src/type-hints.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CodeStubDescriptor;
-class Isolate;
-namespace compiler {
-class CodeAssemblerState;
-}
-
-// List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- /* --- PlatformCodeStubs --- */ \
- V(CallApiCallback) \
- V(CallApiGetter) \
- V(JSEntry) \
- V(ProfileEntryHook) \
- /* --- TurboFanCodeStubs --- */ \
- V(StoreSlowElement) \
- V(StoreInArrayLiteralSlow) \
- V(ElementsTransitionAndStore) \
- V(KeyedLoadSloppyArguments) \
- V(KeyedStoreSloppyArguments) \
- V(StoreFastElement) \
- V(StoreInterceptor) \
- V(LoadIndexedInterceptor)
-
-// List of code stubs only used on ARM 32 bits platforms.
-#if V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) V(DirectCEntry)
-
-#else
-#define CODE_STUB_LIST_ARM(V)
-#endif
-
-// List of code stubs only used on ARM 64 bits platforms.
-#if V8_TARGET_ARCH_ARM64
-#define CODE_STUB_LIST_ARM64(V) V(DirectCEntry)
-
-#else
-#define CODE_STUB_LIST_ARM64(V)
-#endif
-
-// List of code stubs only used on PPC platforms.
-#ifdef V8_TARGET_ARCH_PPC
-#define CODE_STUB_LIST_PPC(V) V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_PPC(V)
-#endif
-
-// List of code stubs only used on MIPS platforms.
-#if V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V) V(DirectCEntry)
-#elif V8_TARGET_ARCH_MIPS64
-#define CODE_STUB_LIST_MIPS(V) V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_MIPS(V)
-#endif
-
-// List of code stubs only used on S390 platforms.
-#ifdef V8_TARGET_ARCH_S390
-#define CODE_STUB_LIST_S390(V) V(DirectCEntry)
-#else
-#define CODE_STUB_LIST_S390(V)
-#endif
-
-// Combined list of code stubs.
-#define CODE_STUB_LIST(V) \
- CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V) \
- CODE_STUB_LIST_ARM64(V) \
- CODE_STUB_LIST_PPC(V) \
- CODE_STUB_LIST_MIPS(V) \
- CODE_STUB_LIST_S390(V)
-
-static const int kHasReturnedMinusZeroSentinel = 1;
-
-class CodeStub : public ZoneObject {
- public:
- enum Major {
- // TODO(mvstanton): eliminate the NoCache key by getting rid
- // of the non-monomorphic-cache.
- NoCache = 0, // marker for stubs that do custom caching]
-#define DEF_ENUM(name) name,
- CODE_STUB_LIST(DEF_ENUM)
-#undef DEF_ENUM
- NUMBER_OF_IDS
- };
-
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode();
-
- static Major MajorKeyFromKey(uint32_t key) {
- return static_cast<Major>(MajorKeyBits::decode(key));
- }
- static uint32_t MinorKeyFromKey(uint32_t key) {
- return MinorKeyBits::decode(key);
- }
-
- // Gets the major key from a code object that is a code stub or binary op IC.
- static Major GetMajorKey(const Code* code_stub);
-
- static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); }
-
- static const char* MajorName(Major major_key);
-
- explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {}
- virtual ~CodeStub() = default;
-
- static void GenerateStubsAheadOfTime(Isolate* isolate);
-
- // Some stubs put untagged junk on the stack that cannot be scanned by the
- // GC. This means that we must be statically sure that no GC can occur while
- // they are running. If that is the case they should override this to return
- // true, which will cause an assertion if we try to call something that can
- // GC or if we try to put a stack frame on top of the junk, which would not
- // result in a traversable stack.
- virtual bool SometimesSetsUpAFrame() { return true; }
-
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
-
- virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() const = 0;
-
- virtual int GetStackParameterCount() const {
- return GetCallInterfaceDescriptor().GetStackParameterCount();
- }
-
- static void InitializeDescriptor(Isolate* isolate, uint32_t key,
- CodeStubDescriptor* desc);
-
- static MaybeHandle<Code> GetCode(Isolate* isolate, uint32_t key);
-
- // Returns information for computing the number key.
- virtual Major MajorKey() const = 0;
- uint32_t MinorKey() const { return minor_key_; }
-
- friend std::ostream& operator<<(std::ostream& os, const CodeStub& s) {
- s.PrintName(os);
- return os;
- }
-
- Isolate* isolate() const { return isolate_; }
- void set_isolate(Isolate* isolate) {
- DCHECK_NOT_NULL(isolate);
- DCHECK(isolate_ == nullptr || isolate_ == isolate);
- isolate_ = isolate;
- }
-
- void DeleteStubFromCacheForTesting();
-
- protected:
- CodeStub(uint32_t key, Isolate* isolate)
- : minor_key_(MinorKeyFromKey(key)), isolate_(isolate) {}
-
- // Generates the assembler code for the stub.
- virtual Handle<Code> GenerateCode() = 0;
-
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- // TODO(jgruber): Only required by DirectCEntryStub. Can be removed when/if
- // that is ported to a builtin.
- virtual Movability NeedsImmovableCode() { return kMovable; }
-
- virtual void PrintName(std::ostream& os) const; // NOLINT
- virtual void PrintBaseName(std::ostream& os) const; // NOLINT
- virtual void PrintState(std::ostream& os) const { ; } // NOLINT
-
- // Computes the key based on major and minor.
- uint32_t GetKey() {
- DCHECK(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
- return MinorKeyBits::encode(MinorKey()) | MajorKeyBits::encode(MajorKey());
- }
-
- uint32_t minor_key_;
-
- private:
- // Perform bookkeeping required after code generation when stub code is
- // initially generated.
- void RecordCodeGeneration(Handle<Code> code);
-
- // Activate newly generated stub. Is called after
- // registering stub in the stub cache.
- virtual void Activate(Code* code) { }
-
- // We use this dispatch to statically instantiate the correct code stub for
- // the given stub key and call the passed function with that code stub.
- typedef void (*DispatchedCall)(CodeStub* stub, void** value_out);
- static void Dispatch(Isolate* isolate, uint32_t key, void** value_out,
- DispatchedCall call);
-
- static void GetCodeDispatchCall(CodeStub* stub, void** value_out);
-
- STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits));
- class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
- class MinorKeyBits: public BitField<uint32_t,
- kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
-
- friend class BreakPointIterator;
-
- Isolate* isolate_;
-};
-
-
-#define DEFINE_CODE_STUB_BASE(NAME, SUPER) \
- public: \
- NAME(uint32_t key, Isolate* isolate) : SUPER(key, isolate) {} \
- \
- private: \
- DISALLOW_COPY_AND_ASSIGN(NAME)
-
-
-#define DEFINE_CODE_STUB(NAME, SUPER) \
- public: \
- inline Major MajorKey() const override { return NAME; }; \
- \
- DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
-
-
-#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER) \
- private: \
- void Generate(MacroAssembler* masm) override; \
- DEFINE_CODE_STUB(NAME, SUPER)
-
-
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
- public: \
- void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
- DEFINE_CODE_STUB(NAME, SUPER)
-
-#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
- public: \
- typedef NAME##Descriptor Descriptor; \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- return Descriptor(); \
- }
-
-// There are some code stubs we just can't describe right now with a
-// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
-// An attempt to retrieve a descriptor will fail.
-#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- UNREACHABLE(); \
- return CallInterfaceDescriptor(); \
- }
-
-
-class PlatformCodeStub : public CodeStub {
- public:
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GenerateCode() override;
-
- protected:
- explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
-
- // Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
-
- // Generates the exception handler table for the stub.
- virtual int GenerateHandlerTable(MacroAssembler* masm);
-
- DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
-};
-
-
-enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
-
-
-class CodeStubDescriptor {
- public:
- explicit CodeStubDescriptor(CodeStub* stub);
-
- CodeStubDescriptor(Isolate* isolate, uint32_t stub_key);
-
- void Initialize(Address deoptimization_handler = kNullAddress,
- int hint_stack_parameter_count = -1,
- StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
- void Initialize(Register stack_parameter_count,
- Address deoptimization_handler = kNullAddress,
- int hint_stack_parameter_count = -1,
- StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
-
- void SetMissHandler(Runtime::FunctionId id) {
- miss_handler_id_ = id;
- miss_handler_ = ExternalReference::Create(Runtime::FunctionForId(id));
- has_miss_handler_ = true;
- // Our miss handler infrastructure doesn't currently support
- // variable stack parameter counts.
- DCHECK(!stack_parameter_count_.is_valid());
- }
-
- void set_call_descriptor(CallInterfaceDescriptor d) {
- call_descriptor_ = std::move(d);
- }
- CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; }
-
- int GetRegisterParameterCount() const {
- return call_descriptor().GetRegisterParameterCount();
- }
-
- int GetStackParameterCount() const {
- return call_descriptor().GetStackParameterCount();
- }
-
- int GetParameterCount() const {
- return call_descriptor().GetParameterCount();
- }
-
- Register GetRegisterParameter(int index) const {
- return call_descriptor().GetRegisterParameter(index);
- }
-
- MachineType GetParameterType(int index) const {
- return call_descriptor().GetParameterType(index);
- }
-
- ExternalReference miss_handler() const {
- DCHECK(has_miss_handler_);
- return miss_handler_;
- }
-
- Runtime::FunctionId miss_handler_id() const {
- DCHECK(has_miss_handler_);
- return miss_handler_id_;
- }
-
- bool has_miss_handler() const {
- return has_miss_handler_;
- }
-
- int GetHandlerParameterCount() const {
- int params = GetParameterCount();
- if (PassesArgumentsToDeoptimizationHandler()) {
- params += 1;
- }
- return params;
- }
-
- int hint_stack_parameter_count() const { return hint_stack_parameter_count_; }
- Register stack_parameter_count() const { return stack_parameter_count_; }
- StubFunctionMode function_mode() const { return function_mode_; }
- Address deoptimization_handler() const { return deoptimization_handler_; }
-
- private:
- bool PassesArgumentsToDeoptimizationHandler() const {
- return stack_parameter_count_.is_valid();
- }
-
- Isolate* isolate_;
- CallInterfaceDescriptor call_descriptor_;
- Register stack_parameter_count_;
- // If hint_stack_parameter_count_ > 0, the code stub can optimize the
- // return sequence. Default value is -1, which means it is ignored.
- int hint_stack_parameter_count_;
- StubFunctionMode function_mode_;
-
- Address deoptimization_handler_;
-
- ExternalReference miss_handler_;
- Runtime::FunctionId miss_handler_id_;
- bool has_miss_handler_;
-};
-
-
-class TurboFanCodeStub : public CodeStub {
- public:
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GenerateCode() override;
-
- int GetStackParameterCount() const override {
- return GetCallInterfaceDescriptor().GetStackParameterCount();
- }
-
- protected:
- explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
-
- virtual void GenerateAssembly(compiler::CodeAssemblerState* state) const = 0;
-
- private:
- DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#if V8_TARGET_ARCH_IA32
-#elif V8_TARGET_ARCH_X64
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/code-stubs-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/code-stubs-arm.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/code-stubs-ppc.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/code-stubs-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/code-stubs-mips64.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/code-stubs-s390.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// TODO(jgruber): Convert this stub into a builtin.
-class StoreInterceptorStub : public TurboFanCodeStub {
- public:
- explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class LoadIndexedInterceptorStub : public TurboFanCodeStub {
- public:
- explicit LoadIndexedInterceptorStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
- public:
- explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- protected:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_TURBOFAN_CODE_STUB(KeyedLoadSloppyArguments, TurboFanCodeStub);
-};
-
-
-class CommonStoreModeBits : public BitField<KeyedAccessStoreMode, 0, 3> {};
-
-class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
- public:
- explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate,
- KeyedAccessStoreMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CommonStoreModeBits::encode(mode);
- }
-
- protected:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
-};
-
-class CallApiCallbackStub : public PlatformCodeStub {
- public:
- static const int kArgBits = 7;
- static const int kArgMax = (1 << kArgBits) - 1;
-
- CallApiCallbackStub(Isolate* isolate, int argc)
- : PlatformCodeStub(isolate) {
- CHECK_LE(0, argc); // The argc in {0, 1} cases are covered by builtins.
- CHECK_LE(argc, kArgMax);
- minor_key_ = ArgumentBits::encode(argc);
- }
-
- private:
- int argc() const { return ArgumentBits::decode(minor_key_); }
-
- class ArgumentBits : public BitField<int, 0, kArgBits> {};
-
- friend class Builtins; // For generating the related builtin.
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
- DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
-};
-
-// TODO(jgruber): This stub only exists to avoid code duplication between
-// code-stubs-<arch>.cc and builtins-<arch>.cc. If CallApiCallbackStub is ever
-// completely removed, CallApiGetterStub can also be deleted.
-class CallApiGetterStub : public PlatformCodeStub {
- private:
- // For generating the related builtin.
- explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- friend class Builtins;
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiGetter);
- DEFINE_PLATFORM_CODE_STUB(CallApiGetter, PlatformCodeStub);
-};
-
-class JSEntryStub : public PlatformCodeStub {
- public:
- enum class SpecialTarget { kNone, kRunMicrotasks };
- JSEntryStub(Isolate* isolate, StackFrame::Type type)
- : PlatformCodeStub(isolate) {
- DCHECK(type == StackFrame::ENTRY || type == StackFrame::CONSTRUCT_ENTRY);
- minor_key_ = StackFrameTypeBits::encode(type) |
- SpecialTargetBits::encode(SpecialTarget::kNone);
- }
-
- JSEntryStub(Isolate* isolate, SpecialTarget target)
- : PlatformCodeStub(isolate) {
- minor_key_ = StackFrameTypeBits::encode(StackFrame::ENTRY) |
- SpecialTargetBits::encode(target);
- }
-
- private:
- int GenerateHandlerTable(MacroAssembler* masm) override;
-
- void PrintName(std::ostream& os) const override { // NOLINT
- os << (type() == StackFrame::ENTRY ? "JSEntryStub"
- : "JSConstructEntryStub");
- }
-
- StackFrame::Type type() const {
- return StackFrameTypeBits::decode(minor_key_);
- }
-
- SpecialTarget special_target() const {
- return SpecialTargetBits::decode(minor_key_);
- }
-
- Handle<Code> EntryTrampoline() {
- switch (special_target()) {
- case SpecialTarget::kNone:
- return (type() == StackFrame::CONSTRUCT_ENTRY)
- ? BUILTIN_CODE(isolate(), JSConstructEntryTrampoline)
- : BUILTIN_CODE(isolate(), JSEntryTrampoline);
- case SpecialTarget::kRunMicrotasks:
- return BUILTIN_CODE(isolate(), RunMicrotasks);
- }
- UNREACHABLE();
- return Handle<Code>();
- }
-
- class StackFrameTypeBits : public BitField<StackFrame::Type, 0, 5> {};
- class SpecialTargetBits
- : public BitField<SpecialTarget, StackFrameTypeBits::kNext, 1> {};
-
- int handler_offset_;
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
-};
-
-class StoreFastElementStub : public TurboFanCodeStub {
- public:
- StoreFastElementStub(Isolate* isolate, bool is_js_array,
- ElementsKind elements_kind, KeyedAccessStoreMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CommonStoreModeBits::encode(mode) |
- ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array);
- }
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- bool is_js_array() const { return IsJSArrayBits::decode(minor_key_); }
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(minor_key_);
- }
-
- KeyedAccessStoreMode store_mode() const {
- return CommonStoreModeBits::decode(minor_key_);
- }
-
- private:
- class ElementsKindBits
- : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
- class IsJSArrayBits : public BitField<bool, ElementsKindBits::kNext, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreFastElement, TurboFanCodeStub);
-};
-
-class StoreSlowElementStub : public TurboFanCodeStub {
- public:
- StoreSlowElementStub(Isolate* isolate, KeyedAccessStoreMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CommonStoreModeBits::encode(mode);
- }
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
-};
-
-class StoreInArrayLiteralSlowStub : public TurboFanCodeStub {
- public:
- StoreInArrayLiteralSlowStub(Isolate* isolate, KeyedAccessStoreMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CommonStoreModeBits::encode(mode);
- }
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreInArrayLiteralSlow, TurboFanCodeStub);
-};
-
-class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
- public:
- ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
- ElementsKind to_kind, bool is_jsarray,
- KeyedAccessStoreMode store_mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CommonStoreModeBits::encode(store_mode) |
- FromBits::encode(from_kind) | ToBits::encode(to_kind) |
- IsJSArrayBits::encode(is_jsarray);
- }
-
- ElementsKind from_kind() const { return FromBits::decode(minor_key_); }
- ElementsKind to_kind() const { return ToBits::decode(minor_key_); }
- bool is_jsarray() const { return IsJSArrayBits::decode(minor_key_); }
- KeyedAccessStoreMode store_mode() const {
- return CommonStoreModeBits::decode(minor_key_);
- }
-
- private:
- class FromBits
- : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
- class ToBits : public BitField<ElementsKind, 11, 8> {};
- class IsJSArrayBits : public BitField<bool, 19, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
- DEFINE_TURBOFAN_CODE_STUB(ElementsTransitionAndStore, TurboFanCodeStub);
-};
-
-// TODO(jgruber): Convert this stub into a builtin.
-class ProfileEntryHookStub : public PlatformCodeStub {
- public:
- explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- // The profile entry hook function is not allowed to cause a GC.
- bool SometimesSetsUpAFrame() override { return false; }
-
- // Generates a call to the entry hook if it's enabled.
- static void MaybeCallEntryHook(MacroAssembler* masm);
- static void MaybeCallEntryHookDelayed(TurboAssembler* tasm, Zone* zone);
-
- private:
- static void EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer,
- Isolate* isolate);
-
- // ProfileEntryHookStub is called at the start of a function, so it has the
- // same register set.
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction)
- DEFINE_PLATFORM_CODE_STUB(ProfileEntryHook, PlatformCodeStub);
-};
-
-
-#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
-#undef DEFINE_PLATFORM_CODE_STUB
-#undef DEFINE_CODE_STUB
-#undef DEFINE_CODE_STUB_BASE
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
deleted file mode 100644
index 198ee8f572..0000000000
--- a/deps/v8/src/codegen.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen.h"
-
-#include <cmath>
-#include <memory>
-
-#include "src/flags.h"
-
-namespace v8 {
-namespace internal {
-
-#define UNARY_MATH_FUNCTION(name, generator) \
- static UnaryMathFunction fast_##name##_function = nullptr; \
- double std_##name(double x) { return std::name(x); } \
- void init_fast_##name##_function() { \
- if (FLAG_fast_math) fast_##name##_function = generator(); \
- if (!fast_##name##_function) fast_##name##_function = std_##name; \
- } \
- void lazily_initialize_fast_##name() { \
- if (!fast_##name##_function) init_fast_##name##_function(); \
- } \
- double fast_##name(double x) { return (*fast_##name##_function)(x); }
-
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
-
-#undef UNARY_MATH_FUNCTION
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
deleted file mode 100644
index 3e07c86fc2..0000000000
--- a/deps/v8/src/codegen.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CODEGEN_H_
-#define V8_CODEGEN_H_
-
-namespace v8 {
-namespace internal {
-
-// Results of the library implementation of transcendental functions may differ
-// from the one we use in our generated code. Therefore we use the same
-// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunction)(double x);
-
-UnaryMathFunction CreateSqrtFunction();
-
-// Custom implementation of math functions.
-double fast_sqrt(double input);
-void lazily_initialize_fast_sqrt();
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 0068c83362..e210dd4025 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -7,8 +7,10 @@
#include "src/counters.h"
#include "src/globals.h"
#include "src/heap/factory.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/slots.h"
#include "src/visitors.h"
namespace v8 {
@@ -41,7 +43,7 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
- CompilationCacheTable* table =
+ CompilationCacheTable table =
CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table, isolate());
}
@@ -67,12 +69,14 @@ void CompilationSubCache::Age() {
}
void CompilationSubCache::Iterate(RootVisitor* v) {
- v->VisitRootPointers(Root::kCompilationCache, nullptr, &tables_[0],
- &tables_[generations_]);
+ v->VisitRootPointers(Root::kCompilationCache, nullptr,
+ FullObjectSlot(&tables_[0]),
+ FullObjectSlot(&tables_[generations_]));
}
void CompilationSubCache::Clear() {
- MemsetPointer(tables_, ReadOnlyRoots(isolate()).undefined_value(),
+ MemsetPointer(reinterpret_cast<Address*>(tables_),
+ ReadOnlyRoots(isolate()).undefined_value()->ptr(),
generations_);
}
@@ -135,8 +139,8 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
- MaybeHandle<SharedFunctionInfo> probe =
- table->LookupScript(source, native_context, language_mode);
+ MaybeHandle<SharedFunctionInfo> probe = CompilationCacheTable::LookupScript(
+ table, source, native_context, language_mode);
Handle<SharedFunctionInfo> function_info;
if (probe.ToHandle(&function_info)) {
// Break when we've found a suitable shared function info that
@@ -190,8 +194,8 @@ InfoCellPair CompilationCacheEval::Lookup(Handle<String> source,
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(source, outer_info, native_context, language_mode,
- position);
+ result = CompilationCacheTable::LookupEval(
+ table, source, outer_info, native_context, language_mode, position);
if (result.has_shared()) {
isolate()->counters()->compilation_cache_hits()->Increment();
} else {
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index ed3f1986b6..883dd4ef37 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -26,7 +26,7 @@ class CompilationSubCache {
CompilationSubCache(Isolate* isolate, int generations)
: isolate_(isolate),
generations_(generations) {
- tables_ = NewArray<Object*>(generations);
+ tables_ = NewArray<Object>(generations);
}
~CompilationSubCache() { DeleteArray(tables_); }
@@ -68,7 +68,7 @@ class CompilationSubCache {
private:
Isolate* isolate_;
int generations_; // Number of generations.
- Object** tables_; // Compilation cache tables - one for each generation.
+ Object* tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
diff --git a/deps/v8/src/compilation-statistics.cc b/deps/v8/src/compilation-statistics.cc
index c3a990f41e..031df63d98 100644
--- a/deps/v8/src/compilation-statistics.cc
+++ b/deps/v8/src/compilation-statistics.cc
@@ -14,7 +14,7 @@ namespace internal {
void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
const char* phase_name,
const BasicStats& stats) {
- base::LockGuard<base::Mutex> guard(&record_mutex_);
+ base::MutexGuard guard(&record_mutex_);
std::string phase_name_str(phase_name);
auto it = phase_map_.find(phase_name_str);
@@ -28,7 +28,7 @@ void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
const BasicStats& stats) {
- base::LockGuard<base::Mutex> guard(&record_mutex_);
+ base::MutexGuard guard(&record_mutex_);
std::string phase_kind_name_str(phase_kind_name);
auto it = phase_kind_map_.find(phase_kind_name_str);
@@ -43,7 +43,7 @@ void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
void CompilationStatistics::RecordTotalStats(size_t source_size,
const BasicStats& stats) {
- base::LockGuard<base::Mutex> guard(&record_mutex_);
+ base::MutexGuard guard(&record_mutex_);
source_size += source_size;
total_stats_.Accumulate(stats);
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
deleted file mode 100644
index 9e46556fae..0000000000
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
-
-#include "src/compiler-dispatcher/unoptimized-compile-job.h"
-
-namespace v8 {
-namespace internal {
-
-const UnoptimizedCompileJob* CompilerDispatcherJob::AsUnoptimizedCompileJob()
- const {
- DCHECK_EQ(type(), Type::kUnoptimizedCompile);
- return static_cast<const UnoptimizedCompileJob*>(this);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
deleted file mode 100644
index 827a2aa18d..0000000000
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
-#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
-
-#include "src/contexts.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class SharedFunctionInfo;
-
-class UnoptimizedCompileJob;
-
-class V8_EXPORT_PRIVATE CompilerDispatcherJob {
- public:
- enum class Type { kUnoptimizedCompile };
-
- enum class Status {
- kInitial,
- kReadyToFinalize,
- kDone,
- kFailed,
- };
-
- CompilerDispatcherJob(Type type) : type_(type), status_(Status::kInitial) {}
-
- virtual ~CompilerDispatcherJob() = default;
-
- Type type() const { return type_; }
-
- // Returns the current status of the compile
- Status status() const { return status_; }
-
- // Returns true if this CompilerDispatcherJob has finished (either with a
- // success or a failure).
- bool IsFinished() const {
- return status() == Status::kDone || status() == Status::kFailed;
- }
-
- // Returns true if this CompilerDispatcherJob has failed.
- bool IsFailed() const { return status() == Status::kFailed; }
-
- // Return true if the next step can be run on any thread.
- bool NextStepCanRunOnAnyThread() const {
- return status() == Status::kInitial;
- }
-
- // Casts to implementations.
- const UnoptimizedCompileJob* AsUnoptimizedCompileJob() const;
-
- // Transition from kInitial to kReadyToFinalize.
- virtual void Compile(bool on_background_thread) = 0;
-
- // Transition from kReadyToFinalize to kDone (or kFailed). Must only be
- // invoked on the main thread.
- virtual void FinalizeOnMainThread(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) = 0;
-
- // Free all resources. Must only be invoked on the main thread.
- virtual void ResetOnMainThread(Isolate* isolate) = 0;
-
- // Estimate how long the next step will take using the tracer.
- virtual double EstimateRuntimeOfNextStepInMs() const = 0;
-
- protected:
- void set_status(Status status) { status_ = status; }
-
- private:
- Type type_;
- Status status_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
deleted file mode 100644
index ab8bc5adec..0000000000
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-
-#include "src/isolate.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-double MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- static_cast<double>(base::Time::kMillisecondsPerSecond);
-}
-
-const double kEstimatedRuntimeWithoutData = 1.0;
-
-} // namespace
-
-CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
- ScopeID scope_id, size_t num)
- : tracer_(tracer), scope_id_(scope_id), num_(num) {
- start_time_ = MonotonicallyIncreasingTimeInMs();
-}
-
-CompilerDispatcherTracer::Scope::~Scope() {
- double elapsed = MonotonicallyIncreasingTimeInMs() - start_time_;
- switch (scope_id_) {
- case ScopeID::kPrepare:
- tracer_->RecordPrepare(elapsed);
- break;
- case ScopeID::kCompile:
- tracer_->RecordCompile(elapsed, num_);
- break;
- case ScopeID::kFinalize:
- tracer_->RecordFinalize(elapsed);
- break;
- }
-}
-
-// static
-const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
- switch (scope_id) {
- case ScopeID::kPrepare:
- return "V8.BackgroundCompile_Prepare";
- case ScopeID::kCompile:
- return "V8.BackgroundCompile_Compile";
- case ScopeID::kFinalize:
- return "V8.BackgroundCompile_Finalize";
- }
- UNREACHABLE();
-}
-
-CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
- : runtime_call_stats_(nullptr) {
- // isolate might be nullptr during unittests.
- if (isolate) {
- runtime_call_stats_ = isolate->counters()->runtime_call_stats();
- }
-}
-
-CompilerDispatcherTracer::~CompilerDispatcherTracer() = default;
-
-void CompilerDispatcherTracer::RecordPrepare(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- prepare_events_.Push(duration_ms);
-}
-
-void CompilerDispatcherTracer::RecordCompile(double duration_ms,
- size_t source_length) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- compile_events_.Push(std::make_pair(source_length, duration_ms));
-}
-
-void CompilerDispatcherTracer::RecordFinalize(double duration_ms) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- finalize_events_.Push(duration_ms);
-}
-
-double CompilerDispatcherTracer::EstimatePrepareInMs() const {
- base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(prepare_events_);
-}
-
-double CompilerDispatcherTracer::EstimateCompileInMs(
- size_t source_length) const {
- base::LockGuard<base::Mutex> lock(&mutex_);
- return Estimate(compile_events_, source_length);
-}
-
-double CompilerDispatcherTracer::EstimateFinalizeInMs() const {
- base::LockGuard<base::Mutex> lock(&mutex_);
- return Average(finalize_events_);
-}
-
-void CompilerDispatcherTracer::DumpStatistics() const {
- PrintF(
- "CompilerDispatcherTracer: "
- "prepare=%.2lfms compiling=%.2lfms/kb finalize=%.2lfms\n",
- EstimatePrepareInMs(), EstimateCompileInMs(1 * KB),
- EstimateFinalizeInMs());
-}
-
-double CompilerDispatcherTracer::Average(
- const base::RingBuffer<double>& buffer) {
- if (buffer.Count() == 0) return 0.0;
- double sum = buffer.Sum([](double a, double b) { return a + b; }, 0.0);
- return sum / buffer.Count();
-}
-
-double CompilerDispatcherTracer::Estimate(
- const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num) {
- if (buffer.Count() == 0) return kEstimatedRuntimeWithoutData;
- std::pair<size_t, double> sum = buffer.Sum(
- [](std::pair<size_t, double> a, std::pair<size_t, double> b) {
- return std::make_pair(a.first + b.first, a.second + b.second);
- },
- std::make_pair(0, 0.0));
- return num * (sum.second / sum.first);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
deleted file mode 100644
index 3043e07d72..0000000000
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
-#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
-
-#include <utility>
-
-#include "src/base/macros.h"
-#include "src/base/platform/mutex.h"
-#include "src/base/ring-buffer.h"
-#include "src/counters.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-class RuntimeCallStats;
-
-#define COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, num) \
- CompilerDispatcherTracer::ScopeID tracer_scope_id( \
- CompilerDispatcherTracer::ScopeID::scope_id); \
- CompilerDispatcherTracer::Scope trace_scope(tracer, tracer_scope_id, num); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), \
- CompilerDispatcherTracer::Scope::Name(tracer_scope_id))
-
-#define COMPILER_DISPATCHER_TRACE_SCOPE(tracer, scope_id) \
- COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, 0)
-
-class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
- public:
- enum class ScopeID { kPrepare, kCompile, kFinalize };
-
- class Scope {
- public:
- Scope(CompilerDispatcherTracer* tracer, ScopeID scope_id, size_t num = 0);
- ~Scope();
-
- static const char* Name(ScopeID scoped_id);
-
- private:
- CompilerDispatcherTracer* tracer_;
- ScopeID scope_id_;
- size_t num_;
- double start_time_;
-
- DISALLOW_COPY_AND_ASSIGN(Scope);
- };
-
- explicit CompilerDispatcherTracer(Isolate* isolate);
- ~CompilerDispatcherTracer();
-
- void RecordPrepare(double duration_ms);
- void RecordCompile(double duration_ms, size_t source_length);
- void RecordFinalize(double duration_ms);
-
- double EstimatePrepareInMs() const;
- double EstimateCompileInMs(size_t source_length) const;
- double EstimateFinalizeInMs() const;
-
- void DumpStatistics() const;
-
- private:
- static double Average(const base::RingBuffer<double>& buffer);
- static double Estimate(
- const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num);
-
- mutable base::Mutex mutex_;
- base::RingBuffer<double> prepare_events_;
- base::RingBuffer<std::pair<size_t, double>> compile_events_;
- base::RingBuffer<double> finalize_events_;
-
- RuntimeCallStats* runtime_call_stats_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTracer);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 6148770385..affb1ddd37 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -8,38 +8,21 @@
#include "src/base/platform/time.h"
#include "src/base/template-utils.h"
#include "src/cancelable-task.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-#include "src/compiler-dispatcher/unoptimized-compile-job.h"
+#include "src/compiler.h"
#include "src/flags.h"
#include "src/global-handles.h"
#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+#include "src/task-utils.h"
namespace v8 {
namespace internal {
-namespace {
+CompilerDispatcher::Job::Job(BackgroundCompileTask* task_arg)
+ : task(task_arg), has_run(false), aborted(false) {}
-enum class ExceptionHandling { kSwallow, kThrow };
-
-void FinalizeJobOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
- Handle<SharedFunctionInfo> shared,
- ExceptionHandling exception_handling) {
- DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kReadyToFinalize);
-
- job->FinalizeOnMainThread(isolate, shared);
- DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
- if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
- isolate->clear_pending_exception();
- }
-}
-
-// Theoretically we get 50ms of idle time max, however it's unlikely that
-// we'll get all of it so try to be a conservative.
-const double kMaxIdleTimeToExpectInMs = 40;
-
-} // namespace
+CompilerDispatcher::Job::~Job() = default;
CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size)
@@ -54,12 +37,9 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
platform_(platform),
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
- tracer_(new CompilerDispatcherTracer(isolate_)),
task_manager_(new CancelableTaskManager()),
next_job_id_(0),
shared_to_unoptimized_job_id_(isolate->heap()),
- memory_pressure_level_(MemoryPressureLevel::kNone),
- abort_(false),
idle_task_scheduled_(false),
num_worker_tasks_(0),
main_thread_blocking_on_job_(nullptr),
@@ -71,26 +51,8 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
}
CompilerDispatcher::~CompilerDispatcher() {
- // To avoid crashing in unit tests due to unfished jobs.
- AbortAll(BlockingBehavior::kBlock);
- task_manager_->CancelAndWait();
-}
-
-bool CompilerDispatcher::CanEnqueue() {
- if (!IsEnabled()) return false;
-
- // TODO(rmcilroy): Investigate if MemoryPressureLevel::kNone is ever sent on
- // Android, if not, remove this check.
- if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
- return false;
- }
-
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (abort_) return false;
- }
-
- return true;
+ // AbortAll must be called before CompilerDispatcher is destroyed.
+ CHECK(task_manager_->canceled());
}
base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
@@ -101,12 +63,12 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
RuntimeCallTimerScope runtimeTimer(
isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
- if (!CanEnqueue()) return base::nullopt;
+ if (!IsEnabled()) return base::nullopt;
- std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
- tracer_.get(), allocator_, outer_parse_info, function_name,
- function_literal, worker_thread_runtime_call_stats_,
- background_compile_timer_, max_stack_size_));
+ std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask(
+ allocator_, outer_parse_info, function_name, function_literal,
+ worker_thread_runtime_call_stats_, background_compile_timer_,
+ static_cast<int>(max_stack_size_)));
JobMap::const_iterator it = InsertJob(std::move(job));
JobId id = it->first;
if (trace_compiler_dispatcher_) {
@@ -114,10 +76,13 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
id, function_literal->function_literal_id());
}
- // Post a idle task and a background worker task to perform the compilation
- // either on the worker thread or during idle time (whichever is first).
- ConsiderJobForBackgroundProcessing(it->second.get());
- ScheduleIdleTaskIfNeeded();
+ // Post a a background worker task to perform the compilation on the worker
+ // thread.
+ {
+ base::MutexGuard lock(&mutex_);
+ pending_background_jobs_.insert(it->second.get());
+ }
+ ScheduleMoreWorkerTasksIfNeeded();
return base::make_optional(id);
}
@@ -131,10 +96,10 @@ bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
bool CompilerDispatcher::IsEnqueued(JobId job_id) const {
return jobs_.find(job_id) != jobs_.end();
}
+
void CompilerDispatcher::RegisterSharedFunctionInfo(
- JobId job_id, SharedFunctionInfo* function) {
+ JobId job_id, SharedFunctionInfo function) {
DCHECK_NE(jobs_.find(job_id), jobs_.end());
- DCHECK_EQ(job_id_to_shared_.find(job_id), job_id_to_shared_.end());
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: registering ");
@@ -143,25 +108,32 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
}
// Make a global handle to the function.
- Handle<SharedFunctionInfo> function_handle =
- isolate_->global_handles()->Create(function);
+ Handle<SharedFunctionInfo> function_handle = Handle<SharedFunctionInfo>::cast(
+ isolate_->global_handles()->Create(function));
// Register mapping.
- job_id_to_shared_.insert(std::make_pair(job_id, function_handle));
+ auto job_it = jobs_.find(job_id);
+ DCHECK_NE(job_it, jobs_.end());
+ Job* job = job_it->second.get();
shared_to_unoptimized_job_id_.Set(function_handle, job_id);
- // Schedule an idle task to finalize job if it is ready.
- ScheduleIdleTaskIfNeeded();
+ {
+ base::MutexGuard lock(&mutex_);
+ job->function = function_handle;
+ if (job->IsReadyToFinalize(lock)) {
+ // Schedule an idle task to finalize job if it is ready.
+ ScheduleIdleTaskFromAnyThread(lock);
+ }
+ }
}
-void CompilerDispatcher::WaitForJobIfRunningOnBackground(
- CompilerDispatcherJob* job) {
+void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherWaitForBackgroundJob");
RuntimeCallTimerScope runtimeTimer(
isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
pending_background_jobs_.erase(job);
return;
@@ -188,123 +160,60 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
JobMap::const_iterator it = GetJobFor(function);
CHECK(it != jobs_.end());
- CompilerDispatcherJob* job = it->second.get();
+ Job* job = it->second.get();
WaitForJobIfRunningOnBackground(job);
- while (!job->IsFinished()) {
- switch (job->status()) {
- case CompilerDispatcherJob::Status::kInitial:
- job->Compile(false);
- break;
- case CompilerDispatcherJob::Status::kReadyToFinalize: {
- FinalizeJobOnMainThread(isolate_, job, function,
- ExceptionHandling::kThrow);
- break;
- }
- case CompilerDispatcherJob::Status::kFailed:
- case CompilerDispatcherJob::Status::kDone:
- UNREACHABLE();
- }
+
+ if (!job->has_run) {
+ job->task->Run();
+ job->has_run = true;
}
- DCHECK_EQ(job->IsFailed(), isolate_->has_pending_exception());
- DCHECK(job->IsFinished());
- bool result = !job->IsFailed();
+
+ DCHECK(job->IsReadyToFinalize(&mutex_));
+ DCHECK(!job->aborted);
+ bool success = Compiler::FinalizeBackgroundCompileTask(
+ job->task.get(), function, isolate_, Compiler::KEEP_EXCEPTION);
+
+ DCHECK_NE(success, isolate_->has_pending_exception());
RemoveJob(it);
- return result;
+ return success;
}
-void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
- bool background_tasks_running =
- task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
- if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
- for (auto& it : jobs_) {
- WaitForJobIfRunningOnBackground(it.second.get());
- if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
- }
- it.second->ResetOnMainThread(isolate_);
- }
- jobs_.clear();
- shared_to_unoptimized_job_id_.Clear();
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- DCHECK(pending_background_jobs_.empty());
- DCHECK(running_background_jobs_.empty());
- abort_ = false;
- }
- return;
+void CompilerDispatcher::AbortJob(JobId job_id) {
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: aborted job %zu\n", job_id);
}
+ JobMap::const_iterator job_it = jobs_.find(job_id);
+ Job* job = job_it->second.get();
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- abort_ = true;
- pending_background_jobs_.clear();
- idle_task_scheduled_ = false; // Idle task cancelled by TryAbortAll.
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ pending_background_jobs_.erase(job);
+ if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
+ RemoveJob(job_it);
+ } else {
+ // Job is currently running on the background thread, wait until it's done
+ // and remove job then.
+ job->aborted = true;
}
- AbortInactiveJobs();
-
- // All running background jobs might already have scheduled idle tasks instead
- // of abort tasks. Schedule a single abort task here to make sure they get
- // processed as soon as possible (and not first when we have idle time).
- ScheduleAbortTask();
}
-void CompilerDispatcher::AbortInactiveJobs() {
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- // Since we schedule two abort tasks per async abort, we might end up
- // here with nothing left to do.
- if (!abort_) return;
- }
- for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
- auto job = it;
- ++it;
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (running_background_jobs_.find(job->second.get()) !=
- running_background_jobs_.end()) {
- continue;
- }
- }
+void CompilerDispatcher::AbortAll() {
+ task_manager_->TryAbortAll();
+
+ for (auto& it : jobs_) {
+ WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: aborted job %zu\n", job->first);
+ PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
}
- it = RemoveJob(job);
}
- if (jobs_.empty()) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (num_worker_tasks_ == 0) abort_ = false;
+ jobs_.clear();
+ shared_to_unoptimized_job_id_.Clear();
+ {
+ base::MutexGuard lock(&mutex_);
+ DCHECK(pending_background_jobs_.empty());
+ DCHECK(running_background_jobs_.empty());
}
-}
-void CompilerDispatcher::MemoryPressureNotification(
- v8::MemoryPressureLevel level, bool is_isolate_locked) {
- MemoryPressureLevel previous = memory_pressure_level_.Value();
- memory_pressure_level_.SetValue(level);
- // If we're already under pressure, we haven't accepted new tasks meanwhile
- // and can just return. If we're no longer under pressure, we're also done.
- if (previous != MemoryPressureLevel::kNone ||
- level == MemoryPressureLevel::kNone) {
- return;
- }
- if (trace_compiler_dispatcher_) {
- PrintF("CompilerDispatcher: received memory pressure notification\n");
- }
- if (is_isolate_locked) {
- AbortAll(BlockingBehavior::kDontBlock);
- } else {
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (abort_) return;
- // By going into abort mode here, and clearing the
- // pending_background_jobs_, we at keep existing background jobs from
- // picking up more work before the MemoryPressureTask gets executed.
- abort_ = true;
- pending_background_jobs_.clear();
- }
- taskrunner_->PostTask(MakeCancelableLambdaTask(task_manager_.get(), [this] {
- AbortAll(BlockingBehavior::kDontBlock);
- }));
- }
+ task_manager_->CancelAndWait();
}
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
@@ -317,60 +226,39 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
return job;
}
-void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
+void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
+ const base::MutexGuard&) {
if (!taskrunner_->IdleTasksEnabled()) return;
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (idle_task_scheduled_ || abort_) return;
- idle_task_scheduled_ = true;
- }
- taskrunner_->PostIdleTask(MakeCancelableIdleLambdaTask(
+ if (idle_task_scheduled_) return;
+
+ idle_task_scheduled_ = true;
+ taskrunner_->PostIdleTask(MakeCancelableIdleTask(
task_manager_.get(),
[this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
}
-void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
- if (jobs_.empty()) return;
- ScheduleIdleTaskFromAnyThread();
-}
-
-void CompilerDispatcher::ScheduleAbortTask() {
- taskrunner_->PostTask(MakeCancelableLambdaTask(
- task_manager_.get(), [this] { AbortInactiveJobs(); }));
-}
-
-void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
- CompilerDispatcherJob* job) {
- if (!job->NextStepCanRunOnAnyThread()) return;
- {
- base::LockGuard<base::Mutex> lock(&mutex_);
- pending_background_jobs_.insert(job);
- }
- ScheduleMoreWorkerTasksIfNeeded();
-}
-
void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (pending_background_jobs_.empty()) return;
if (platform_->NumberOfWorkerThreads() <= num_worker_tasks_) {
return;
}
++num_worker_tasks_;
}
- platform_->CallOnWorkerThread(MakeCancelableLambdaTask(
- task_manager_.get(), [this] { DoBackgroundWork(); }));
+ platform_->CallOnWorkerThread(
+ MakeCancelableTask(task_manager_.get(), [this] { DoBackgroundWork(); }));
}
void CompilerDispatcher::DoBackgroundWork() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherDoBackgroundWork");
for (;;) {
- CompilerDispatcherJob* job = nullptr;
+ Job* job = nullptr;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (!pending_background_jobs_.empty()) {
auto it = pending_background_jobs_.begin();
job = *it;
@@ -389,18 +277,19 @@ void CompilerDispatcher::DoBackgroundWork() {
PrintF("CompilerDispatcher: doing background work\n");
}
- DCHECK(job->NextStepCanRunOnAnyThread());
- DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kInitial);
- job->Compile(true);
-
- // Unconditionally schedule an idle task, as all background steps have to be
- // followed by a main thread step.
- ScheduleIdleTaskFromAnyThread();
+ job->task->Run();
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
running_background_jobs_.erase(job);
+ job->has_run = true;
+ if (job->IsReadyToFinalize(lock)) {
+ // Schedule an idle task to finalize the compilation on the main thread
+ // if the job has a shared function info registered.
+ ScheduleIdleTaskFromAnyThread(lock);
+ }
+
if (main_thread_blocking_on_job_ == job) {
main_thread_blocking_on_job_ = nullptr;
main_thread_blocking_signal_.NotifyOne();
@@ -409,15 +298,8 @@ void CompilerDispatcher::DoBackgroundWork() {
}
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
--num_worker_tasks_;
-
- if (running_background_jobs_.empty() && abort_) {
- // This is the last background job that finished. The abort task
- // scheduled by AbortAll might already have ran, so schedule another
- // one to be on the safe side.
- ScheduleAbortTask();
- }
}
// Don't touch |this| anymore after this point, as it might have been
// deleted.
@@ -426,114 +308,53 @@ void CompilerDispatcher::DoBackgroundWork() {
void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherDoIdleWork");
- bool aborted = false;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
idle_task_scheduled_ = false;
- aborted = abort_;
- }
-
- if (aborted) {
- AbortInactiveJobs();
- return;
}
- // Number of jobs that are unlikely to make progress during any idle callback
- // due to their estimated duration.
- size_t jobs_unlikely_to_progress = 0;
-
- // Iterate over all available jobs & remaining time. For each job, decide
- // whether to 1) skip it (if it would take too long), 2) erase it (if it's
- // finished), or 3) make progress on it if possible.
- double idle_time_in_seconds =
- deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
-
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
- idle_time_in_seconds *
+ (deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) *
static_cast<double>(base::Time::kMillisecondsPerSecond));
}
- for (auto job = jobs_.cbegin();
- job != jobs_.cend() && idle_time_in_seconds > 0.0;
- idle_time_in_seconds =
- deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
- // Don't work on jobs that are being worked on by background tasks.
- // Similarly, remove jobs we work on from the set of available background
- // jobs.
- std::unique_ptr<base::LockGuard<base::Mutex>> lock(
- new base::LockGuard<base::Mutex>(&mutex_));
- if (running_background_jobs_.find(job->second.get()) !=
- running_background_jobs_.end()) {
- ++job;
- continue;
- }
- DCHECK(!job->second->IsFinished());
- auto it = pending_background_jobs_.find(job->second.get());
- double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
- if (idle_time_in_seconds <
- (estimate_in_ms /
- static_cast<double>(base::Time::kMillisecondsPerSecond))) {
- // If there's not enough time left, try to estimate whether we would
- // have managed to finish the job in a large idle task to assess
- // whether we should ask for another idle callback.
- // TODO(rmcilroy): Consider running the job anyway when we have a long
- // idle time since this would probably be the best time to run.
- if (estimate_in_ms > kMaxIdleTimeToExpectInMs)
- ++jobs_unlikely_to_progress;
- if (it == pending_background_jobs_.end()) {
- lock.reset();
- ConsiderJobForBackgroundProcessing(job->second.get());
- }
- ++job;
- } else if (job->second->status() ==
- CompilerDispatcherJob::Status::kInitial) {
- if (it != pending_background_jobs_.end()) {
- pending_background_jobs_.erase(it);
- }
- lock.reset();
- job->second->Compile(false);
- // Don't update job so we can immediately finalize it on the next loop.
- } else {
- DCHECK_EQ(job->second->status(),
- CompilerDispatcherJob::Status::kReadyToFinalize);
- DCHECK(it == pending_background_jobs_.end());
- lock.reset();
-
- auto shared_it = job_id_to_shared_.find(job->first);
- if (shared_it != job_id_to_shared_.end()) {
- Handle<SharedFunctionInfo> shared = shared_it->second;
- FinalizeJobOnMainThread(isolate_, job->second.get(), shared,
- ExceptionHandling::kSwallow);
- DCHECK(job->second->IsFinished());
- job = RemoveJob(job);
- } else {
- // If we can't step the job yet, go to the next job.
- ++jobs_unlikely_to_progress;
- ++job;
+ while (deadline_in_seconds > platform_->MonotonicallyIncreasingTime()) {
+ // Find a job which is pending finalization and has a shared function info
+ CompilerDispatcher::JobMap::const_iterator it;
+ {
+ base::MutexGuard lock(&mutex_);
+ for (it = jobs_.cbegin(); it != jobs_.cend(); ++it) {
+ if (it->second->IsReadyToFinalize(lock)) break;
}
+ // Since we hold the lock here, we can be sure no jobs have become ready
+ // for finalization while we looped through the list.
+ if (it == jobs_.cend()) return;
+
+ DCHECK(it->second->IsReadyToFinalize(lock));
+ DCHECK_EQ(running_background_jobs_.find(it->second.get()),
+ running_background_jobs_.end());
+ DCHECK_EQ(pending_background_jobs_.find(it->second.get()),
+ pending_background_jobs_.end());
}
- }
- if (jobs_.size() > jobs_unlikely_to_progress) ScheduleIdleTaskIfNeeded();
-}
-CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
- JobMap::const_iterator job) {
- if (!job->second->IsFinished()) {
- return job;
+ Job* job = it->second.get();
+ if (!job->aborted) {
+ Compiler::FinalizeBackgroundCompileTask(
+ job->task.get(), job->function.ToHandleChecked(), isolate_,
+ Compiler::CLEAR_EXCEPTION);
+ }
+ RemoveJob(it);
}
- if (trace_compiler_dispatcher_) {
- bool result = !job->second->IsFailed();
- PrintF("CompilerDispatcher: finished working on job %zu: %s\n", job->first,
- result ? "success" : "failure");
- tracer_->DumpStatistics();
+ // We didn't return above so there still might be jobs to finalize.
+ {
+ base::MutexGuard lock(&mutex_);
+ ScheduleIdleTaskFromAnyThread(lock);
}
-
- return RemoveJob(job);
}
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
- std::unique_ptr<CompilerDispatcherJob> job) {
+ std::unique_ptr<Job> job) {
bool added;
JobMap::const_iterator it;
std::tie(it, added) =
@@ -544,30 +365,19 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
CompilerDispatcher::JobMap::const_iterator it) {
- CompilerDispatcherJob* job = it->second.get();
-
- // Delete SFI associated with job if its been registered.
- auto shared_it = job_id_to_shared_.find(it->first);
- if (shared_it != job_id_to_shared_.end()) {
- Handle<SharedFunctionInfo> shared = shared_it->second;
-
- JobId deleted_id;
- shared_to_unoptimized_job_id_.Delete(shared, &deleted_id);
- DCHECK_EQ(it->first, deleted_id);
-
- job_id_to_shared_.erase(shared_it);
- GlobalHandles::Destroy(Handle<Object>::cast(shared).location());
- }
+ Job* job = it->second.get();
- job->ResetOnMainThread(isolate_);
+ DCHECK_EQ(running_background_jobs_.find(job), running_background_jobs_.end());
+ DCHECK_EQ(pending_background_jobs_.find(job), pending_background_jobs_.end());
- it = jobs_.erase(it);
- if (jobs_.empty()) {
- base::LockGuard<base::Mutex> lock(&mutex_);
- if (num_worker_tasks_ == 0) abort_ = false;
+ // Delete SFI associated with job if its been registered.
+ Handle<SharedFunctionInfo> function;
+ if (job->function.ToHandle(&function)) {
+ GlobalHandles::Destroy(function.location());
}
- return it;
+ // Delete job.
+ return jobs_.erase(it);
}
} // namespace internal
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index dd024e297a..0bfbe9d719 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -19,6 +19,7 @@
#include "src/base/platform/semaphore.h"
#include "src/globals.h"
#include "src/identity-map.h"
+#include "src/maybe-handles.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -30,8 +31,8 @@ namespace internal {
class AstRawString;
class AstValueFactory;
+class BackgroundCompileTask;
class CancelableTaskManager;
-class CompilerDispatcherJob;
class UnoptimizedCompileJob;
class CompilerDispatcherTracer;
class DeferredHandles;
@@ -88,7 +89,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
const FunctionLiteral* function_literal);
// Registers the given |function| with the compilation job |job_id|.
- void RegisterSharedFunctionInfo(JobId job_id, SharedFunctionInfo* function);
+ void RegisterSharedFunctionInfo(JobId job_id, SharedFunctionInfo function);
// Returns true if there is a pending job with the given id.
bool IsEnqueued(JobId job_id) const;
@@ -100,54 +101,52 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function);
- // Aborts a given job. Blocks if requested.
- void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking);
+ // Aborts compilation job |job_id|.
+ void AbortJob(JobId job_id);
- // Aborts all jobs. Blocks if requested.
- void AbortAll(BlockingBehavior blocking);
-
- // Memory pressure notifications from the embedder.
- void MemoryPressureNotification(v8::MemoryPressureLevel level,
- bool is_isolate_locked);
+ // Aborts all jobs, blocking until all jobs are aborted.
+ void AbortAll();
private:
- FRIEND_TEST(CompilerDispatcherTest, EnqueueJob);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueWithoutSFI);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStep);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepWithoutSFI);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepTwice);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueParsed);
- FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepParsed);
+ FRIEND_TEST(CompilerDispatcherTest, IdleTaskNoIdleTime);
FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
- FRIEND_TEST(CompilerDispatcherTest, CompileOnBackgroundThread);
FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask);
+ FRIEND_TEST(CompilerDispatcherTest, AbortJobNotStarted);
+ FRIEND_TEST(CompilerDispatcherTest, AbortJobAlreadyStarted);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
- FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
- typedef std::map<JobId, std::unique_ptr<CompilerDispatcherJob>> JobMap;
- typedef std::map<JobId, Handle<SharedFunctionInfo>> JobIdToSharedMap;
+ struct Job {
+ explicit Job(BackgroundCompileTask* task_arg);
+ ~Job();
+
+ bool IsReadyToFinalize(const base::MutexGuard&) {
+ return has_run && (!function.is_null() || aborted);
+ }
+
+ bool IsReadyToFinalize(base::Mutex* mutex) {
+ base::MutexGuard lock(mutex);
+ return IsReadyToFinalize(lock);
+ }
+
+ std::unique_ptr<BackgroundCompileTask> task;
+ MaybeHandle<SharedFunctionInfo> function;
+ bool has_run;
+ bool aborted;
+ };
+
+ typedef std::map<JobId, std::unique_ptr<Job>> JobMap;
typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
- class AbortTask;
- class WorkerTask;
- class IdleTask;
- bool CanEnqueue();
- void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
- void AbortInactiveJobs();
+ void WaitForJobIfRunningOnBackground(Job* job);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
- void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
void ScheduleMoreWorkerTasksIfNeeded();
- void ScheduleIdleTaskFromAnyThread();
- void ScheduleIdleTaskIfNeeded();
- void ScheduleAbortTask();
+ void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
void DoBackgroundWork();
void DoIdleWork(double deadline_in_seconds);
- // Returns job if not removed otherwise iterator following the removed job.
- JobMap::const_iterator RemoveIfFinished(JobMap::const_iterator job);
// Returns iterator to the inserted job.
- JobMap::const_iterator InsertJob(std::unique_ptr<CompilerDispatcherJob> job);
+ JobMap::const_iterator InsertJob(std::unique_ptr<Job> job);
// Returns iterator following the removed job.
JobMap::const_iterator RemoveJob(JobMap::const_iterator job);
@@ -162,8 +161,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// Copy of FLAG_trace_compiler_dispatcher to allow for access from any thread.
bool trace_compiler_dispatcher_;
- std::unique_ptr<CompilerDispatcherTracer> tracer_;
-
std::unique_ptr<CancelableTaskManager> task_manager_;
// Id for next job to be added
@@ -172,37 +169,29 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// Mapping from job_id to job.
JobMap jobs_;
- // Mapping from job_id to SharedFunctionInfo.
- JobIdToSharedMap job_id_to_shared_;
-
// Mapping from SharedFunctionInfo to the corresponding unoptimized
// compilation's JobId;
SharedToJobIdMap shared_to_unoptimized_job_id_;
- base::AtomicValue<v8::MemoryPressureLevel> memory_pressure_level_;
-
// The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them.
base::Mutex mutex_;
- // True if the dispatcher is in the process of aborting running tasks.
- bool abort_;
-
+ // True if an idle task is scheduled to be run.
bool idle_task_scheduled_;
// Number of scheduled or running WorkerTask objects.
int num_worker_tasks_;
- // The set of CompilerDispatcherJobs that can be advanced on any thread.
- std::unordered_set<CompilerDispatcherJob*> pending_background_jobs_;
+ // The set of jobs that can be run on a background thread.
+ std::unordered_set<Job*> pending_background_jobs_;
- // The set of CompilerDispatcherJobs currently processed on background
- // threads.
- std::unordered_set<CompilerDispatcherJob*> running_background_jobs_;
+ // The set of jobs currently being run on background threads.
+ std::unordered_set<Job*> running_background_jobs_;
// If not nullptr, then the main thread waits for the task processing
// this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
- CompilerDispatcherJob* main_thread_blocking_on_job_;
+ Job* main_thread_blocking_on_job_;
base::ConditionVariable main_thread_blocking_signal_;
// Test support.
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 492e80abe0..7d0440f598 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -8,7 +8,9 @@
#include "src/base/template-utils.h"
#include "src/cancelable-task.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/isolate.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
@@ -46,7 +48,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
dispatcher_(dispatcher) {
- base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+ base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
++dispatcher_->ref_count_;
}
@@ -78,7 +80,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
dispatcher_->CompileNext(dispatcher_->NextInput(true));
}
{
- base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+ base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
if (--dispatcher_->ref_count_ == 0) {
dispatcher_->ref_count_zero_.NotifyOne();
}
@@ -95,7 +97,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
#ifdef DEBUG
{
- base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ base::MutexGuard lock_guard(&ref_count_mutex_);
DCHECK_EQ(0, ref_count_);
}
#endif
@@ -105,14 +107,14 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
bool check_if_flushing) {
- base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
+ base::MutexGuard access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
if (check_if_flushing) {
- if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
+ if (mode_ == FLUSH) {
AllowHandleDereference allow_handle_dereference;
DisposeCompilationJob(job, true);
return nullptr;
@@ -131,7 +133,7 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ base::MutexGuard access_output_queue_(&output_queue_mutex_);
output_queue_.push(job);
isolate_->stack_guard()->RequestInstallCode();
}
@@ -140,7 +142,7 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
for (;;) {
OptimizedCompilationJob* job = nullptr;
{
- base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ base::MutexGuard access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
job = output_queue_.front();
output_queue_.pop();
@@ -153,7 +155,7 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
if (blocking_behavior == BlockingBehavior::kDontBlock) {
if (FLAG_block_concurrent_recompilation) Unblock();
- base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
+ base::MutexGuard access_input_queue_(&input_queue_mutex_);
while (input_queue_length_ > 0) {
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
@@ -167,12 +169,12 @@ void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
}
return;
}
- base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
+ mode_ = FLUSH;
if (FLAG_block_concurrent_recompilation) Unblock();
{
- base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ base::MutexGuard lock_guard(&ref_count_mutex_);
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
+ mode_ = COMPILE;
}
FlushOutputQueue(true);
if (FLAG_trace_concurrent_recompilation) {
@@ -181,12 +183,12 @@ void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
}
void OptimizingCompileDispatcher::Stop() {
- base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
+ mode_ = FLUSH;
if (FLAG_block_concurrent_recompilation) Unblock();
{
- base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ base::MutexGuard lock_guard(&ref_count_mutex_);
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
+ mode_ = COMPILE;
}
if (recompilation_delay_ != 0) {
@@ -205,7 +207,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
for (;;) {
OptimizedCompilationJob* job = nullptr;
{
- base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ base::MutexGuard access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
job = output_queue_.front();
output_queue_.pop();
@@ -230,7 +232,7 @@ void OptimizingCompileDispatcher::QueueForOptimization(
DCHECK(IsQueueAvailable());
{
// Add job to the back of the input queue.
- base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
+ base::MutexGuard access_input_queue(&input_queue_mutex_);
DCHECK_LT(input_queue_length_, input_queue_capacity_);
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index deb7af99a4..1011808e99 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -5,10 +5,10 @@
#ifndef V8_COMPILER_DISPATCHER_OPTIMIZING_COMPILE_DISPATCHER_H_
#define V8_COMPILER_DISPATCHER_OPTIMIZING_COMPILE_DISPATCHER_H_
+#include <atomic>
#include <queue>
#include "src/allocation.h"
-#include "src/base/atomicops.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
@@ -28,10 +28,10 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
+ mode_(COMPILE),
blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
- base::Relaxed_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
}
@@ -45,7 +45,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void InstallOptimizedFunctions();
inline bool IsQueueAvailable() {
- base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
+ base::MutexGuard access_input_queue(&input_queue_mutex_);
return input_queue_length_ < input_queue_capacity_;
}
@@ -82,7 +82,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- volatile base::AtomicWord mode_;
+ std::atomic<ModeFlag> mode_;
int blocked_jobs_;
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
deleted file mode 100644
index 59f4c3e8ff..0000000000
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler-dispatcher/unoptimized-compile-job.h"
-
-#include "src/assert-scope.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-#include "src/compiler.h"
-#include "src/flags.h"
-#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/scanner-character-streams.h"
-#include "src/unicode-cache.h"
-#include "src/unoptimized-compilation-info.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-UnoptimizedCompileJob::UnoptimizedCompileJob(
- CompilerDispatcherTracer* tracer, AccountingAllocator* allocator,
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
- WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
- TimedHistogram* timer, size_t max_stack_size)
- : CompilerDispatcherJob(Type::kUnoptimizedCompile),
- tracer_(tracer),
- task_(new BackgroundCompileTask(allocator, outer_parse_info,
- function_name, function_literal,
- worker_thread_runtime_stats, timer,
- static_cast<int>(max_stack_size))) {}
-
-UnoptimizedCompileJob::~UnoptimizedCompileJob() {
- DCHECK(status() == Status::kInitial || status() == Status::kDone);
-}
-
-void UnoptimizedCompileJob::Compile(bool on_background_thread) {
- DCHECK_EQ(status(), Status::kInitial);
- COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
- tracer_, kCompile,
- task_->info()->end_position() - task_->info()->start_position());
- task_->Run();
- set_status(Status::kReadyToFinalize);
-}
-
-void UnoptimizedCompileJob::FinalizeOnMainThread(
- Isolate* isolate, Handle<SharedFunctionInfo> shared) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), isolate->thread_id().ToInteger());
- DCHECK_EQ(status(), Status::kReadyToFinalize);
- COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalize);
-
- bool succeeded = Compiler::FinalizeBackgroundCompileTask(
- task_.get(), shared, isolate, Compiler::KEEP_EXCEPTION);
-
- ResetDataOnMainThread(isolate);
- set_status(succeeded ? Status::kDone : Status::kFailed);
-}
-
-void UnoptimizedCompileJob::ResetDataOnMainThread(Isolate* isolate) {
- DCHECK_EQ(ThreadId::Current().ToInteger(), isolate->thread_id().ToInteger());
- task_.reset();
-}
-
-void UnoptimizedCompileJob::ResetOnMainThread(Isolate* isolate) {
- ResetDataOnMainThread(isolate);
- set_status(Status::kInitial);
-}
-
-double UnoptimizedCompileJob::EstimateRuntimeOfNextStepInMs() const {
- switch (status()) {
- case Status::kInitial:
- return tracer_->EstimateCompileInMs(task_->info()->end_position() -
- task_->info()->start_position());
- case Status::kReadyToFinalize:
- // TODO(rmcilroy): Pass size of bytecode to tracer to get better estimate.
- return tracer_->EstimateFinalizeInMs();
-
- case Status::kFailed:
- case Status::kDone:
- return 0.0;
- }
-
- UNREACHABLE();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
deleted file mode 100644
index 31a66e4eb4..0000000000
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_DISPATCHER_UNOPTIMIZED_COMPILE_JOB_H_
-#define V8_COMPILER_DISPATCHER_UNOPTIMIZED_COMPILE_JOB_H_
-
-#include <memory>
-
-#include "include/v8.h"
-#include "src/base/macros.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class AccountingAllocator;
-class AstRawString;
-class AstValueFactory;
-class AstStringConstants;
-class BackgroundCompileTask;
-class CompilerDispatcherTracer;
-class DeferredHandles;
-class FunctionLiteral;
-class Isolate;
-class ParseInfo;
-class Parser;
-class SharedFunctionInfo;
-class String;
-class TimedHistogram;
-class UnicodeCache;
-class UnoptimizedCompilationJob;
-class WorkerThreadRuntimeCallStats;
-
-// TODO(rmcilroy): Remove this class entirely and just have CompilerDispatcher
-// manage BackgroundCompileTasks.
-class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
- public:
- // Creates a UnoptimizedCompileJob in the initial state.
- UnoptimizedCompileJob(
- CompilerDispatcherTracer* tracer, AccountingAllocator* allocator,
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
- WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
- TimedHistogram* timer, size_t max_stack_size);
- ~UnoptimizedCompileJob() override;
-
- // CompilerDispatcherJob implementation.
- void Compile(bool on_background_thread) override;
- void FinalizeOnMainThread(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) override;
- void ResetOnMainThread(Isolate* isolate) override;
- double EstimateRuntimeOfNextStepInMs() const override;
-
- private:
- friend class CompilerDispatcherTest;
- friend class UnoptimizedCompileJobTest;
-
- void ResetDataOnMainThread(Isolate* isolate);
-
- CompilerDispatcherTracer* tracer_;
- std::unique_ptr<BackgroundCompileTask> task_;
-
- DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJob);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_DISPATCHER_UNOPTIMIZED_COMPILE_JOB_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 7cb8a45696..a4a89d13ee 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -26,9 +26,11 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
+#include "src/objects/feedback-cell-inl.h"
#include "src/objects/map.h"
#include "src/optimized-compilation-info.h"
+#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
@@ -36,7 +38,6 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
-#include "src/unicode-cache.h"
#include "src/unoptimized-compilation-info.h"
#include "src/vm-state-inl.h"
@@ -92,9 +93,9 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
int column_num = Script::GetColumnNumber(script, shared->StartPosition()) + 1;
- String* script_name = script->name()->IsString()
- ? String::cast(script->name())
- : ReadOnlyRoots(isolate).empty_string();
+ String script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag =
Logger::ToNativeByScript(tag, *script);
PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared,
@@ -124,7 +125,7 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
shared->DebugName()));
}
-ScriptOriginOptions OriginOptionsForEval(Object* script) {
+ScriptOriginOptions OriginOptionsForEval(Object script) {
if (!script->IsScript()) return ScriptOriginOptions();
const auto outer_origin_options = Script::cast(script)->origin_options();
@@ -225,7 +226,6 @@ CompilationJob::Status OptimizedCompilationJob::ExecuteJob() {
CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
- DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate);
// Delegate to the underlying implementation.
@@ -316,13 +316,8 @@ void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
return;
}
- Handle<Code> code;
- {
- CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
-
- code = isolate->factory()->CopyCode(
- BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
- }
+ Handle<Code> code = isolate->factory()->CopyCode(Handle<Code>::cast(
+ isolate->factory()->interpreter_entry_trampoline_for_profiling()));
Handle<InterpreterData> interpreter_data = Handle<InterpreterData>::cast(
isolate->factory()->NewStruct(INTERPRETER_DATA_TYPE, TENURED));
@@ -338,9 +333,9 @@ void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
Script::GetLineNumber(script, shared_info->StartPosition()) + 1;
int column_num =
Script::GetColumnNumber(script, shared_info->StartPosition()) + 1;
- String* script_name = script->name()->IsString()
- ? String::cast(script->name())
- : ReadOnlyRoots(isolate).empty_string();
+ String script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag = Logger::ToNativeByScript(
CodeEventListener::INTERPRETED_FUNCTION_TAG, *script);
PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared_info,
@@ -362,12 +357,18 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
DCHECK(!compilation_info->has_asm_wasm_data());
DCHECK(!shared_info->HasFeedbackMetadata());
- Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
- isolate, compilation_info->feedback_vector_spec());
-
InstallBytecodeArray(compilation_info->bytecode_array(), shared_info,
parse_info, isolate);
- shared_info->set_feedback_metadata(*feedback_metadata);
+ if (FLAG_lite_mode) {
+ // Clear the feedback metadata field. In lite mode we don't need feedback
+ // metadata since we never allocate feedback vectors.
+ shared_info->set_raw_outer_scope_info_or_feedback_metadata(
+ ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
+ isolate, compilation_info->feedback_vector_spec());
+ shared_info->set_feedback_metadata(*feedback_metadata);
+ }
} else {
DCHECK(compilation_info->has_asm_wasm_data());
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
@@ -376,7 +377,8 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
}
// Install coverage info on the shared function info.
- if (compilation_info->has_coverage_info()) {
+ if (compilation_info->has_coverage_info() &&
+ !shared_info->HasCoverageInfo()) {
DCHECK(isolate->is_block_code_coverage());
isolate->debug()->InstallCoverageInfo(shared_info,
compilation_info->coverage_info());
@@ -405,6 +407,7 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
}
shared_info->set_has_duplicate_parameters(
literal->has_duplicate_parameters());
+ shared_info->set_is_oneshot_iife(literal->is_oneshot_iife());
shared_info->SetExpectedNofPropertiesFromEstimate(literal);
if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
shared_info->DisableOptimization(literal->dont_optimize_reason());
@@ -452,7 +455,7 @@ std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
// with a validation error or another error that could be solve by falling
// through to standard unoptimized compile.
}
- ZoneVector<FunctionLiteral*> eager_inner_literals(0, parse_info->zone());
+ std::vector<FunctionLiteral*> eager_inner_literals;
std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
parse_info, literal, allocator, &eager_inner_literals));
@@ -497,6 +500,70 @@ std::unique_ptr<UnoptimizedCompilationJob> GenerateUnoptimizedCode(
return outer_function_job;
}
+MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
+ Isolate* isolate, ParseInfo* parse_info, AccountingAllocator* allocator,
+ IsCompiledScope* is_compiled_scope) {
+ EnsureSharedFunctionInfosArrayOnScript(parse_info, isolate);
+ parse_info->ast_value_factory()->Internalize(isolate);
+
+ if (!Compiler::Analyze(parse_info)) return MaybeHandle<SharedFunctionInfo>();
+ DeclarationScope::AllocateScopeInfos(parse_info, isolate);
+
+ // Prepare and execute compilation of the outer-most function.
+ // Create the SharedFunctionInfo and add it to the script's list.
+ Handle<Script> script = parse_info->script();
+ Handle<SharedFunctionInfo> top_level =
+ isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
+ script, true);
+
+ std::vector<FunctionLiteral*> functions_to_compile;
+ functions_to_compile.push_back(parse_info->literal());
+
+ while (!functions_to_compile.empty()) {
+ FunctionLiteral* literal = functions_to_compile.back();
+ functions_to_compile.pop_back();
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(literal, script, isolate);
+ if (shared_info->is_compiled()) continue;
+ if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
+ std::unique_ptr<UnoptimizedCompilationJob> asm_job(
+ AsmJs::NewCompilationJob(parse_info, literal, allocator));
+ if (asm_job->ExecuteJob() == CompilationJob::SUCCEEDED &&
+ FinalizeUnoptimizedCompilationJob(asm_job.get(), shared_info,
+ isolate) ==
+ CompilationJob::SUCCEEDED) {
+ continue;
+ }
+ // asm.js validation failed, fall through to standard unoptimized compile.
+ // Note: we rely on the fact that AsmJs jobs have done all validation in
+ // the PrepareJob and ExecuteJob phases and can't fail in FinalizeJob with
+ // with a validation error or another error that could be solve by falling
+ // through to standard unoptimized compile.
+ }
+
+ std::unique_ptr<UnoptimizedCompilationJob> job(
+ interpreter::Interpreter::NewCompilationJob(
+ parse_info, literal, allocator, &functions_to_compile));
+
+ if (job->ExecuteJob() == CompilationJob::FAILED ||
+ FinalizeUnoptimizedCompilationJob(job.get(), shared_info, isolate) ==
+ CompilationJob::FAILED) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ if (shared_info.is_identical_to(top_level)) {
+ // Ensure that the top level function is retained.
+ *is_compiled_scope = shared_info->is_compiled_scope();
+ DCHECK(is_compiled_scope->is_compiled());
+ }
+ }
+
+ // Character stream shouldn't be used again.
+ parse_info->ResetCharacterStream();
+
+ return top_level;
+}
+
bool FinalizeUnoptimizedCode(
ParseInfo* parse_info, Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
@@ -504,6 +571,9 @@ bool FinalizeUnoptimizedCode(
UnoptimizedCompilationJobList* inner_function_jobs) {
DCHECK(AllowCompilation::IsAllowed(isolate));
+ // TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
+ // rely on accessing native context during finalization.
+
// Allocate scope infos for the literal.
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
@@ -520,7 +590,6 @@ bool FinalizeUnoptimizedCode(
inner_job->compilation_info()->literal(), parse_info->script(),
isolate);
// The inner function might be compiled already if compiling for debug.
- // TODO(rmcilroy): Fix this and DCHECK !is_compiled() once Full-Codegen dies
if (inner_shared_info->is_compiled()) continue;
if (FinalizeUnoptimizedCompilationJob(inner_job.get(), inner_shared_info,
isolate) !=
@@ -546,13 +615,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
- if (function->feedback_cell()->value()->IsFeedbackVector()) {
- FeedbackVector* feedback_vector = function->feedback_vector();
+ if (function->has_feedback_vector()) {
+ FeedbackVector feedback_vector = function->feedback_vector();
feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
- Code* code = feedback_vector->optimized_code();
+ Code code = feedback_vector->optimized_code();
- if (code != nullptr) {
+ if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
@@ -676,6 +745,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
function->ClearOptimizationMarker();
}
+ if (shared->optimization_disabled() &&
+ shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
+ return MaybeHandle<Code>();
+ }
+
if (isolate->debug()->needs_check_on_function_call()) {
// Do not optimize when debugger needs to hook into every call.
return MaybeHandle<Code>();
@@ -718,15 +792,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
- // Do not use TurboFan when %NeverOptimizeFunction was applied.
- if (shared->optimization_disabled() &&
- shared->disable_optimization_reason() ==
- BailoutReason::kOptimizationDisabledForTest) {
- compilation_info->AbortOptimization(
- BailoutReason::kOptimizationDisabledForTest);
- return MaybeHandle<Code>();
- }
-
// Do not use TurboFan if optimization is disabled or function doesn't pass
// turbo_filter.
if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) {
@@ -788,12 +853,32 @@ bool FailWithPendingException(Isolate* isolate, ParseInfo* parse_info,
return false;
}
+void FinalizeScriptCompilation(Isolate* isolate, ParseInfo* parse_info) {
+ Handle<Script> script = parse_info->script();
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+
+ // Register any pending parallel tasks with the associated SFI.
+ if (parse_info->parallel_tasks()) {
+ CompilerDispatcher* dispatcher = parse_info->parallel_tasks()->dispatcher();
+ for (auto& it : *parse_info->parallel_tasks()) {
+ FunctionLiteral* literal = it.first;
+ CompilerDispatcher::JobId job_id = it.second;
+ MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
+ script->FindSharedFunctionInfo(isolate, literal);
+ Handle<SharedFunctionInfo> shared_for_task;
+ if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
+ dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
+ } else {
+ dispatcher->AbortJob(job_id);
+ }
+ }
+ }
+}
+
MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
ParseInfo* parse_info, Isolate* isolate,
UnoptimizedCompilationJob* outer_function_job,
UnoptimizedCompilationJobList* inner_function_jobs) {
- Handle<Script> script = parse_info->script();
-
// Internalize ast values onto the heap.
parse_info->ast_value_factory()->Internalize(isolate);
@@ -814,15 +899,14 @@ MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
return MaybeHandle<SharedFunctionInfo>();
}
- if (!script.is_null()) {
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- }
+ FinalizeScriptCompilation(isolate, parse_info);
return shared_info;
}
-MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
- Isolate* isolate) {
+MaybeHandle<SharedFunctionInfo> CompileToplevel(
+ ParseInfo* parse_info, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
TimerEventScope<TimerEventCompileCode> top_level_timer(isolate);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
@@ -848,18 +932,17 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Generate the unoptimized bytecode or asm-js data.
- UnoptimizedCompilationJobList inner_function_jobs;
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
- GenerateUnoptimizedCode(parse_info, isolate->allocator(),
- &inner_function_jobs));
- if (!outer_function_job) {
+ MaybeHandle<SharedFunctionInfo> shared_info =
+ GenerateUnoptimizedCodeForToplevel(
+ isolate, parse_info, isolate->allocator(), is_compiled_scope);
+ if (shared_info.is_null()) {
FailWithPendingException(isolate, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
- return FinalizeTopLevel(parse_info, isolate, outer_function_job.get(),
- &inner_function_jobs);
+ FinalizeScriptCompilation(isolate, parse_info);
+ return shared_info;
}
std::unique_ptr<UnoptimizedCompilationJob> CompileOnBackgroundThread(
@@ -899,7 +982,6 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
info_->script_id()));
info_->set_toplevel();
- info_->set_unicode_cache(&unicode_cache_);
info_->set_allow_lazy_parsing();
if (V8_UNLIKELY(info_->block_coverage_enabled())) {
info_->AllocateSourceRangeMap();
@@ -927,8 +1009,6 @@ BackgroundCompileTask::BackgroundCompileTask(
DCHECK(outer_parse_info->is_toplevel());
DCHECK(!function_literal->is_toplevel());
- info_->set_unicode_cache(&unicode_cache_);
-
// Clone the character stream so both can be accessed independently.
std::unique_ptr<Utf16CharacterStream> character_stream =
outer_parse_info->character_stream()->Clone();
@@ -936,16 +1016,16 @@ BackgroundCompileTask::BackgroundCompileTask(
info_->set_character_stream(std::move(character_stream));
// Get preparsed scope data from the function literal.
- if (function_literal->produced_preparsed_scope_data()) {
- DCHECK(FLAG_preparser_scope_analysis);
- ZonePreParsedScopeData* serialized_data =
- function_literal->produced_preparsed_scope_data()->Serialize(
- info_->zone());
- info_->set_consumed_preparsed_scope_data(
- ConsumedPreParsedScopeData::For(info_->zone(), serialized_data));
+ if (function_literal->produced_preparse_data()) {
+ ZonePreparseData* serialized_data =
+ function_literal->produced_preparse_data()->Serialize(info_->zone());
+ info_->set_consumed_preparse_data(
+ ConsumedPreparseData::For(info_->zone(), serialized_data));
}
}
+BackgroundCompileTask::~BackgroundCompileTask() = default;
+
namespace {
// A scope object that ensures a parse info's runtime call stats, stack limit
@@ -1039,9 +1119,11 @@ bool Compiler::ParseAndAnalyze(ParseInfo* parse_info,
}
bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
- ClearExceptionFlag flag) {
+ ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope) {
// We should never reach here if the function is already compiled.
DCHECK(!shared_info->is_compiled());
+ DCHECK(!is_compiled_scope->is_compiled());
Isolate* isolate = shared_info->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
@@ -1066,22 +1148,21 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (!dispatcher->FinishNow(shared_info)) {
return FailWithPendingException(isolate, &parse_info, flag);
}
+ *is_compiled_scope = shared_info->is_compiled_scope();
+ DCHECK(is_compiled_scope->is_compiled());
return true;
}
- if (FLAG_preparser_scope_analysis) {
- if (shared_info->HasUncompiledDataWithPreParsedScope()) {
- parse_info.set_consumed_preparsed_scope_data(
- ConsumedPreParsedScopeData::For(
- isolate,
- handle(shared_info->uncompiled_data_with_pre_parsed_scope()
- ->pre_parsed_scope_data(),
- isolate)));
- }
+ if (shared_info->HasUncompiledDataWithPreparseData()) {
+ parse_info.set_consumed_preparse_data(ConsumedPreparseData::For(
+ isolate,
+ handle(
+ shared_info->uncompiled_data_with_preparse_data()->preparse_data(),
+ isolate)));
}
// Parse and update ParseInfo with the results.
- if (!parsing::ParseFunction(&parse_info, shared_info, isolate)) {
+ if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
return FailWithPendingException(isolate, &parse_info, flag);
}
@@ -1105,21 +1186,33 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
DCHECK(!isolate->has_pending_exception());
+ *is_compiled_scope = shared_info->is_compiled_scope();
+ DCHECK(is_compiled_scope->is_compiled());
return true;
}
-bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
+bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope) {
// We should never reach here if the function is already compiled or optimized
DCHECK(!function->is_compiled());
DCHECK(!function->IsOptimized());
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->HasOptimizedCode());
+ // Reset the JSFunction if we are recompiling due to the bytecode having been
+ // flushed.
+ function->ResetIfBytecodeFlushed();
+
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
// Ensure shared function info is compiled.
- if (!shared_info->is_compiled() && !Compile(shared_info, flag)) return false;
+ *is_compiled_scope = shared_info->is_compiled_scope();
+ if (!is_compiled_scope->is_compiled() &&
+ !Compile(shared_info, flag, is_compiled_scope)) {
+ return false;
+ }
+ DCHECK(is_compiled_scope->is_compiled());
Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Allocate FeedbackVector for the JSFunction.
@@ -1221,7 +1314,8 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
MaybeHandle<SharedFunctionInfo> Compiler::CompileForLiveEdit(
ParseInfo* parse_info, Isolate* isolate) {
- return CompileToplevel(parse_info, isolate);
+ IsCompiledScope is_compiled_scope;
+ return CompileToplevel(parse_info, isolate, &is_compiled_scope);
}
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
@@ -1259,10 +1353,12 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<SharedFunctionInfo> shared_info;
Handle<Script> script;
+ IsCompiledScope is_compiled_scope;
bool allow_eval_cache;
if (eval_result.has_shared()) {
shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
script = Handle<Script>(Script::cast(shared_info->script()), isolate);
+ is_compiled_scope = shared_info->is_compiled_scope();
allow_eval_cache = true;
} else {
ParseInfo parse_info(isolate);
@@ -1296,7 +1392,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
}
DCHECK(!parse_info.is_module());
- if (!CompileToplevel(&parse_info, isolate).ToHandle(&shared_info)) {
+ if (!CompileToplevel(&parse_info, isolate, &is_compiled_scope)
+ .ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
}
allow_eval_cache = parse_info.allow_eval_cache();
@@ -1316,7 +1413,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
JSFunction::EnsureFeedbackVector(result);
if (allow_eval_cache) {
// Make sure to cache this result.
- Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(),
+ Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
isolate);
compilation_cache->PutEval(source, outer_info, context, shared_info,
new_feedback_cell, eval_scope_position);
@@ -1329,11 +1426,13 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (allow_eval_cache) {
// Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
// we didn't retrieve from there.
- Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(), isolate);
+ Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
+ isolate);
compilation_cache->PutEval(source, outer_info, context, shared_info,
new_feedback_cell, eval_scope_position);
}
}
+ DCHECK(is_compiled_scope.is_compiled());
return result;
}
@@ -1636,6 +1735,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Do a lookup in the compilation cache but not for extensions.
MaybeHandle<SharedFunctionInfo> maybe_result;
+ IsCompiledScope is_compiled_scope;
if (extension == nullptr) {
bool can_consume_code_cache =
compile_options == ScriptCompiler::kConsumeCodeCache;
@@ -1663,7 +1763,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
origin_options)
.ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
- DCHECK(inner_result->is_compiled());
+ is_compiled_scope = inner_result->is_compiled_scope();
+ DCHECK(is_compiled_scope.is_compiled());
compilation_cache->PutScript(source, isolate->native_context(),
language_mode, inner_result);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
@@ -1688,10 +1789,10 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
- maybe_result = CompileToplevel(&parse_info, isolate);
+ maybe_result = CompileToplevel(&parse_info, isolate, &is_compiled_scope);
Handle<SharedFunctionInfo> result;
if (extension == nullptr && maybe_result.ToHandle(&result)) {
- DCHECK(result->is_compiled());
+ DCHECK(is_compiled_scope.is_compiled());
compilation_cache->PutScript(source, isolate->native_context(),
language_mode, result);
} else if (maybe_result.is_null() && natives != EXTENSION_CODE &&
@@ -1746,6 +1847,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<SharedFunctionInfo> wrapped;
Handle<Script> script;
+ IsCompiledScope is_compiled_scope;
if (!maybe_result.ToHandle(&wrapped)) {
ParseInfo parse_info(isolate);
script = NewScript(isolate, &parse_info, source, script_details,
@@ -1762,12 +1864,13 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
stricter_language_mode(parse_info.language_mode(), language_mode));
Handle<SharedFunctionInfo> top_level;
- maybe_result = CompileToplevel(&parse_info, isolate);
+ maybe_result = CompileToplevel(&parse_info, isolate, &is_compiled_scope);
if (maybe_result.is_null()) isolate->ReportPendingMessages();
ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level, maybe_result, JSFunction);
SharedFunctionInfo::ScriptIterator infos(isolate, *script);
- while (SharedFunctionInfo* info = infos.Next()) {
+ for (SharedFunctionInfo info = infos.Next(); !info.is_null();
+ info = infos.Next()) {
if (info->is_wrapped()) {
wrapped = Handle<SharedFunctionInfo>(info, isolate);
break;
@@ -1775,11 +1878,13 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
}
DCHECK(!wrapped.is_null());
} else {
+ is_compiled_scope = wrapped->is_compiled_scope();
script = Handle<Script>(Script::cast(wrapped->script()), isolate);
}
+ DCHECK(is_compiled_scope.is_compiled());
return isolate->factory()->NewFunctionFromSharedFunctionInfo(wrapped, context,
- NOT_TENURED);
+ NOT_TENURED);
}
MaybeHandle<SharedFunctionInfo>
@@ -1857,10 +1962,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// If we found an existing shared function info, return it.
Handle<SharedFunctionInfo> existing;
- if (maybe_existing.ToHandle(&existing)) {
- DCHECK(!existing->is_toplevel());
- return existing;
- }
+ if (maybe_existing.ToHandle(&existing)) return existing;
// Allocate a shared function info object which will be compiled lazily.
Handle<SharedFunctionInfo> result =
@@ -1938,38 +2040,36 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
void Compiler::PostInstantiation(Handle<JSFunction> function,
PretenureFlag pretenure) {
- Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
-
- if (FLAG_always_opt && shared->allows_lazy_compilation() &&
- !shared->optimization_disabled() && !shared->HasAsmWasmData() &&
- shared->is_compiled()) {
- JSFunction::EnsureFeedbackVector(function);
-
- if (!function->IsOptimized()) {
- // Only mark for optimization if we don't already have optimized code.
- if (!function->HasOptimizedCode()) {
- function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
- }
- }
- }
+ Isolate* isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
- if (shared->is_compiled() && !shared->HasAsmWasmData()) {
+ // If code is compiled to bytecode (i.e., isn't asm.js), then allocate a
+ // feedback and check for optimized code.
+ if (is_compiled_scope.is_compiled() && shared->HasBytecodeArray()) {
JSFunction::EnsureFeedbackVector(function);
- Code* code = function->feedback_vector()->optimized_code();
- if (code != nullptr) {
+ Code code = function->has_feedback_vector()
+ ? function->feedback_vector()->optimized_code()
+ : Code();
+ if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
DCHECK(function->shared()->is_compiled());
function->set_code(code);
}
+
+ if (FLAG_always_opt && shared->allows_lazy_compilation() &&
+ !shared->optimization_disabled() && !function->IsOptimized() &&
+ !function->HasOptimizedCode()) {
+ function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ }
}
if (shared->is_toplevel() || shared->is_wrapped()) {
// If it's a top-level script, report compilation to the debugger.
- Handle<Script> script(
- handle(Script::cast(shared->script()), function->GetIsolate()));
- function->GetIsolate()->debug()->OnAfterCompile(script);
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+ isolate->debug()->OnAfterCompile(script);
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index f32d771266..e33d9fdf04 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -13,7 +13,6 @@
#include "src/code-events.h"
#include "src/contexts.h"
#include "src/isolate.h"
-#include "src/unicode-cache.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -22,6 +21,7 @@ namespace internal {
// Forward declarations.
class AstRawString;
class BackgroundCompileTask;
+class IsCompiledScope;
class JavaScriptFrame;
class OptimizedCompilationInfo;
class OptimizedCompilationJob;
@@ -58,8 +58,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// given function holds (except for live-edit, which compiles the world).
static bool Compile(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
- static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
+ ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope);
+ static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
@@ -316,13 +318,14 @@ class OptimizedCompilationJob : public CompilationJob {
const char* compiler_name_;
};
-class BackgroundCompileTask {
+class V8_EXPORT_PRIVATE BackgroundCompileTask {
public:
// Creates a new task that when run will parse and compile the streamed
// script associated with |data| and can be finalized with
// Compiler::GetSharedFunctionInfoForStreamedScript.
// Note: does not take ownership of |data|.
BackgroundCompileTask(ScriptStreamingData* data, Isolate* isolate);
+ ~BackgroundCompileTask();
// Creates a new task that when run will parse and compile the
// |function_literal| and can be finalized with
@@ -351,9 +354,6 @@ class BackgroundCompileTask {
// compilation starts.
std::unique_ptr<ParseInfo> info_;
std::unique_ptr<Parser> parser_;
- // TODO(rmcilroy): Consider having thread-local unicode-caches rather than
- // creating a new one each time.
- UnicodeCache unicode_cache_;
// Data needed for finalizing compilation after background compilation.
std::unique_ptr<UnoptimizedCompilationJob> outer_function_job_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 40783a3511..8665788162 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -10,15 +10,11 @@ tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
-# For backend
-bbudge@chromium.org
-gdeepti@chromium.org
-
per-file wasm-*=ahaas@chromium.org
+per-file wasm-*=bbudge@chromium.org
per-file wasm-*=binji@chromium.org
-per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
-per-file wasm-*=kschimpf@chromium.org
+per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index a0648d0257..9d24c08dde 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -11,6 +11,8 @@
#include "src/heap/heap.h"
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
+#include "src/objects/cell.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
@@ -30,9 +32,12 @@ FieldAccess AccessBuilder::ForExternalTaggedValue() {
// static
FieldAccess AccessBuilder::ForExternalUint8Value() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- TypeCache::Get().kUint8, MachineType::Uint8(),
+ FieldAccess access = {kUntaggedBase,
+ 0,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get()->kUint8,
+ MachineType::Uint8(),
kNoWriteBarrier};
return access;
}
@@ -50,8 +55,8 @@ FieldAccess AccessBuilder::ForMap() {
// static
FieldAccess AccessBuilder::ForHeapNumberValue() {
FieldAccess access = {
- kTaggedBase, HeapNumber::kValueOffset, MaybeHandle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kFloat64, MachineType::Float64(),
+ kTaggedBase, HeapNumber::kValueOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kFloat64, MachineType::Float64(),
kNoWriteBarrier};
return access;
}
@@ -59,8 +64,8 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
// static
FieldAccess AccessBuilder::ForBigIntBitfield() {
FieldAccess access = {
- kTaggedBase, BigInt::kBitfieldOffset, MaybeHandle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::IntPtr(),
+ kTaggedBase, BigInt::kBitfieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kInt32, MachineType::Uint32(),
kNoWriteBarrier};
return access;
}
@@ -131,7 +136,7 @@ FieldAccess AccessBuilder::ForJSCollectionIteratorIndex() {
JSCollectionIterator::kIndexOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kFixedArrayLengthType,
+ TypeCache::Get()->kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -283,6 +288,16 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
}
// static
+FieldAccess AccessBuilder::ForJSAsyncFunctionObjectPromise() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncFunctionObject::kPromiseOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherObject(), MachineType::TaggedPointer(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
FieldAccess access = {
kTaggedBase, JSAsyncGeneratorObject::kQueueOffset,
@@ -304,19 +319,19 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting() {
// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
- TypeCache const& type_cache = TypeCache::Get();
+ TypeCache const* type_cache = TypeCache::Get();
FieldAccess access = {kTaggedBase,
JSArray::kLengthOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- type_cache.kJSArrayLengthType,
+ type_cache->kJSArrayLengthType,
MachineType::TaggedSigned(),
kFullWriteBarrier};
if (IsDoubleElementsKind(elements_kind)) {
- access.type = type_cache.kFixedDoubleArrayLengthType;
+ access.type = type_cache->kFixedDoubleArrayLengthType;
access.write_barrier_kind = kNoWriteBarrier;
} else if (IsFastElementsKind(elements_kind)) {
- access.type = type_cache.kFixedArrayLengthType;
+ access.type = type_cache->kFixedArrayLengthType;
access.write_barrier_kind = kNoWriteBarrier;
}
return access;
@@ -337,7 +352,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
FieldAccess access = {
kTaggedBase, JSArrayBuffer::kBitFieldOffset, MaybeHandle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint32(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint8, MachineType::Uint32(),
kNoWriteBarrier};
return access;
}
@@ -357,7 +372,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
JSArrayBufferView::kByteLengthOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kJSArrayBufferViewByteLengthType,
+ TypeCache::Get()->kJSArrayBufferViewByteLengthType,
MachineType::UintPtr(),
kNoWriteBarrier};
return access;
@@ -369,7 +384,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
JSArrayBufferView::kByteOffsetOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kJSArrayBufferViewByteOffsetType,
+ TypeCache::Get()->kJSArrayBufferViewByteOffsetType,
MachineType::UintPtr(),
kNoWriteBarrier};
return access;
@@ -381,7 +396,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayLength() {
JSTypedArray::kLengthOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kJSTypedArrayLengthType,
+ TypeCache::Get()->kJSTypedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -393,7 +408,7 @@ FieldAccess AccessBuilder::ForJSDateValue() {
JSDate::kValueOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kJSDateValueType,
+ TypeCache::Get()->kJSDateValueType,
MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
@@ -402,7 +417,7 @@ FieldAccess AccessBuilder::ForJSDateValue() {
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
FieldAccess access = {
- kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ kTaggedBase, JSDate::kValueOffset + index * kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Number(), MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -472,7 +487,7 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
FixedArray::kLengthOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kFixedArrayLengthType,
+ TypeCache::Get()->kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -524,8 +539,8 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
// static
FieldAccess AccessBuilder::ForMapBitField() {
FieldAccess access = {
- kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint8, MachineType::Uint8(),
kNoWriteBarrier};
return access;
}
@@ -533,8 +548,8 @@ FieldAccess AccessBuilder::ForMapBitField() {
// static
FieldAccess AccessBuilder::ForMapBitField2() {
FieldAccess access = {
- kTaggedBase, Map::kBitField2Offset, Handle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kTaggedBase, Map::kBitField2Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint8, MachineType::Uint8(),
kNoWriteBarrier};
return access;
}
@@ -542,8 +557,8 @@ FieldAccess AccessBuilder::ForMapBitField2() {
// static
FieldAccess AccessBuilder::ForMapBitField3() {
FieldAccess access = {
- kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::Int32(),
+ kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kInt32, MachineType::Int32(),
kNoWriteBarrier};
return access;
}
@@ -562,8 +577,8 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {
- kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- MaybeHandle<Map>(), TypeCache::Get().kUint16, MachineType::Uint16(),
+ kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint16, MachineType::Uint16(),
kNoWriteBarrier};
return access;
}
@@ -611,7 +626,7 @@ FieldAccess AccessBuilder::ForStringLength() {
String::kLengthOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kStringLengthType,
+ TypeCache::Get()->kStringLengthType,
MachineType::Uint32(),
kNoWriteBarrier};
return access;
@@ -676,14 +691,14 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
// static
ElementAccess AccessBuilder::ForExternalOneByteStringCharacter() {
- ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint8,
+ ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint8,
MachineType::Uint8(), kNoWriteBarrier};
return access;
}
// static
ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() {
- ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint16,
+ ElementAccess access = {kUntaggedBase, 0, TypeCache::Get()->kUint16,
MachineType::Uint16(), kNoWriteBarrier};
return access;
}
@@ -691,7 +706,7 @@ ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() {
// static
ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() {
ElementAccess access = {kTaggedBase, SeqOneByteString::kHeaderSize,
- TypeCache::Get().kUint8, MachineType::Uint8(),
+ TypeCache::Get()->kUint8, MachineType::Uint8(),
kNoWriteBarrier};
return access;
}
@@ -699,7 +714,7 @@ ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() {
// static
ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
ElementAccess access = {kTaggedBase, SeqTwoByteString::kHeaderSize,
- TypeCache::Get().kUint16, MachineType::Uint16(),
+ TypeCache::Get()->kUint16, MachineType::Uint16(),
kNoWriteBarrier};
return access;
}
@@ -751,7 +766,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorNextIndex() {
JSArrayIterator::kNextIndexOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kPositiveSafeInteger,
+ TypeCache::Get()->kPositiveSafeInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
@@ -763,7 +778,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorKind() {
JSArrayIterator::kKindOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kJSArrayIteratorKindType,
+ TypeCache::Get()->kJSArrayIteratorKindType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -784,7 +799,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
JSStringIterator::kNextIndexOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kStringLengthType,
+ TypeCache::Get()->kStringLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -845,7 +860,7 @@ FieldAccess AccessBuilder::ForCellValue() {
// static
FieldAccess AccessBuilder::ForContextSlot(size_t index) {
- int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
+ int offset = Context::OffsetOfElementAt(static_cast<int>(index));
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
FieldAccess access = {kTaggedBase, offset,
@@ -875,7 +890,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(
access.write_barrier_kind = kNoWriteBarrier;
break;
case HOLEY_SMI_ELEMENTS:
- access.type = TypeCache::Get().kHoleySmi;
+ access.type = TypeCache::Get()->kHoleySmi;
break;
case PACKED_ELEMENTS:
access.type = Type::NonInternal();
@@ -902,7 +917,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(
// static
ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
- TypeCache::Get().kFloat64, MachineType::Float64(),
+ TypeCache::Get()->kFloat64, MachineType::Float64(),
kNoWriteBarrier};
return access;
}
@@ -1026,11 +1041,13 @@ FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
}
// static
-FieldAccess AccessBuilder::ForOrderedHashTableBaseNextTable() {
+FieldAccess AccessBuilder::ForOrderedHashMapOrSetNextTable() {
// TODO(turbofan): This will be redundant with the HashTableBase
// methods above once the hash table unification is done.
+ STATIC_ASSERT(OrderedHashMap::NextTableOffset() ==
+ OrderedHashSet::NextTableOffset());
FieldAccess const access = {
- kTaggedBase, OrderedHashTableBase::kNextTableOffset,
+ kTaggedBase, OrderedHashMap::NextTableOffset(),
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -1038,43 +1055,48 @@ FieldAccess AccessBuilder::ForOrderedHashTableBaseNextTable() {
}
// static
-FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets() {
+FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets() {
// TODO(turbofan): This will be redundant with the HashTableBase
// methods above once the hash table unification is done.
+ STATIC_ASSERT(OrderedHashMap::NumberOfBucketsOffset() ==
+ OrderedHashSet::NumberOfBucketsOffset());
FieldAccess const access = {kTaggedBase,
- OrderedHashTableBase::kNumberOfBucketsOffset,
+ OrderedHashMap::NumberOfBucketsOffset(),
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kFixedArrayLengthType,
+ TypeCache::Get()->kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements() {
+FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfDeletedElements() {
// TODO(turbofan): This will be redundant with the HashTableBase
// methods above once the hash table unification is done.
- FieldAccess const access = {
- kTaggedBase,
- OrderedHashTableBase::kNumberOfDeletedElementsOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- TypeCache::Get().kFixedArrayLengthType,
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ STATIC_ASSERT(OrderedHashMap::NumberOfDeletedElementsOffset() ==
+ OrderedHashSet::NumberOfDeletedElementsOffset());
+ FieldAccess const access = {kTaggedBase,
+ OrderedHashMap::NumberOfDeletedElementsOffset(),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get()->kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfElements() {
+FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfElements() {
// TODO(turbofan): This will be redundant with the HashTableBase
// methods above once the hash table unification is done.
+ STATIC_ASSERT(OrderedHashMap::NumberOfElementsOffset() ==
+ OrderedHashSet::NumberOfElementsOffset());
FieldAccess const access = {kTaggedBase,
- OrderedHashTableBase::kNumberOfElementsOffset,
+ OrderedHashMap::NumberOfElementsOffset(),
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- TypeCache::Get().kFixedArrayLengthType,
+ TypeCache::Get()->kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -1083,8 +1105,8 @@ FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfElements() {
// static
ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() {
ElementAccess const access = {kTaggedBase,
- OrderedHashMap::kHashTableStartOffset +
- OrderedHashMap::kValueOffset * kPointerSize,
+ OrderedHashMap::HashTableStartOffset() +
+ OrderedHashMap::kValueOffset * kTaggedSize,
Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 889a139a38..56bc5afe89 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -107,6 +107,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::resume_mode() field.
static FieldAccess ForJSGeneratorObjectResumeMode();
+ // Provides access to JSAsyncFunctionObject::promise() field.
+ static FieldAccess ForJSAsyncFunctionObjectPromise();
+
// Provides access to JSAsyncGeneratorObject::queue() field.
static FieldAccess ForJSAsyncGeneratorObjectQueue();
@@ -299,11 +302,11 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForHashTableBaseNumberOfDeletedElement();
static FieldAccess ForHashTableBaseCapacity();
- // Provides access to OrderedHashTableBase fields.
- static FieldAccess ForOrderedHashTableBaseNextTable();
- static FieldAccess ForOrderedHashTableBaseNumberOfBuckets();
- static FieldAccess ForOrderedHashTableBaseNumberOfElements();
- static FieldAccess ForOrderedHashTableBaseNumberOfDeletedElements();
+ // Provides access to OrderedHashMapOrSet fields.
+ static FieldAccess ForOrderedHashMapOrSetNextTable();
+ static FieldAccess ForOrderedHashMapOrSetNumberOfBuckets();
+ static FieldAccess ForOrderedHashMapOrSetNumberOfElements();
+ static FieldAccess ForOrderedHashMapOrSetNumberOfDeletedElements();
// Provides access to OrderedHashMap elements.
static ElementAccess ForOrderedHashMapEntryValue();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 5bf515f654..4e67c35cdf 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -13,7 +13,9 @@
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/objects/templates.h"
namespace v8 {
@@ -245,10 +247,10 @@ Handle<Cell> PropertyAccessInfo::export_cell() const {
return Handle<Cell>::cast(constant_);
}
-AccessInfoFactory::AccessInfoFactory(JSHeapBroker* js_heap_broker,
+AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
- : js_heap_broker_(js_heap_broker),
+ : broker_(broker),
dependencies_(dependencies),
native_context_(native_context),
isolate_(native_context->GetIsolate()),
@@ -302,11 +304,11 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
for (Handle<Map> map : maps) {
if (Map::TryUpdate(isolate(), map).ToHandle(&map)) {
// Don't generate elements kind transitions from stable maps.
- Map* transition_target =
- map->is_stable() ? nullptr
- : map->FindElementsKindTransitionedMap(
- isolate(), possible_transition_targets);
- if (transition_target == nullptr) {
+ Map transition_target = map->is_stable()
+ ? Map()
+ : map->FindElementsKindTransitionedMap(
+ isolate(), possible_transition_targets);
+ if (transition_target.is_null()) {
receiver_maps.push_back(map);
} else {
transitions.push_back(
@@ -339,15 +341,14 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
bool AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode,
PropertyAccessInfo* access_info) {
+ CHECK(name->IsUniqueName());
+
// Check if it is safe to inline property access for the {map}.
if (!CanInlinePropertyAccess(map)) return false;
// Compute the receiver type.
Handle<Map> receiver_map = map;
- // Property lookups require the name to be internalized.
- name = isolate()->factory()->InternalizeName(name);
-
// We support fast inline cases for certain JSObject getters.
if (access_mode == AccessMode::kLoad &&
LookupSpecialFieldAccessor(map, name, access_info)) {
@@ -389,7 +390,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
field_type = Type::SignedSmall();
field_representation = MachineRepresentation::kTaggedSigned;
} else if (details_representation.IsDouble()) {
- field_type = type_cache_.kFloat64;
+ field_type = type_cache_->kFloat64;
field_representation = MachineRepresentation::kFloat64;
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
@@ -404,12 +405,12 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
} else if (descriptors_field_type->IsClass()) {
- MapRef map_ref(js_heap_broker(), map);
+ MapRef map_ref(broker(), map);
map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
dependencies()->DependOnFieldType(map_ref, number);
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(MapRef(js_heap_broker(), map));
+ field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
@@ -435,8 +436,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK_EQ(kAccessor, details.kind());
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
- Handle<PrototypeInfo> proto_info =
- Map::GetOrCreatePrototypeInfo(map, isolate());
+ Handle<PrototypeInfo> proto_info(
+ PrototypeInfo::cast(map->prototype_info()), isolate());
Handle<JSModuleNamespace> module_namespace(
JSModuleNamespace::cast(proto_info->module_namespace()),
isolate());
@@ -501,7 +502,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Don't search on the prototype chain for special indices in case of
// integer indexed exotic objects (see ES6 section 9.4.5).
if (map->IsJSTypedArrayMap() && name->IsString() &&
- IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name))) {
+ IsSpecialIndex(String::cast(*name))) {
return false;
}
@@ -650,13 +651,13 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
// in case of other fast elements, and [0, kMaxUInt32] in
// case of other arrays.
if (IsDoubleElementsKind(map->elements_kind())) {
- field_type = type_cache_.kFixedDoubleArrayLengthType;
+ field_type = type_cache_->kFixedDoubleArrayLengthType;
field_representation = MachineRepresentation::kTaggedSigned;
} else if (IsFastElementsKind(map->elements_kind())) {
- field_type = type_cache_.kFixedArrayLengthType;
+ field_type = type_cache_->kFixedArrayLengthType;
field_representation = MachineRepresentation::kTaggedSigned;
} else {
- field_type = type_cache_.kJSArrayLengthType;
+ field_type = type_cache_->kJSArrayLengthType;
}
}
// Special fields are always mutable.
@@ -673,9 +674,9 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
MaybeHandle<JSObject> holder,
PropertyAccessInfo* access_info) {
// Check if the {map} has a data transition with the given {name}.
- Map* transition =
+ Map transition =
TransitionsAccessor(isolate(), map).SearchTransition(*name, kData, NONE);
- if (transition == nullptr) return false;
+ if (transition.is_null()) return false;
Handle<Map> transition_map(transition, isolate());
int const number = transition_map->LastAdded();
@@ -696,7 +697,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
field_type = Type::SignedSmall();
field_representation = MachineRepresentation::kTaggedSigned;
} else if (details_representation.IsDouble()) {
- field_type = type_cache_.kFloat64;
+ field_type = type_cache_->kFloat64;
field_representation = MachineRepresentation::kFloat64;
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
@@ -709,17 +710,17 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Store is not safe if the field type was cleared.
return false;
} else if (descriptors_field_type->IsClass()) {
- MapRef transition_map_ref(js_heap_broker(), transition_map);
+ MapRef transition_map_ref(broker(), transition_map);
transition_map_ref
.SerializeOwnDescriptors(); // TODO(neis): Remove later.
dependencies()->DependOnFieldType(transition_map_ref, number);
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(MapRef(js_heap_broker(), map));
+ field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
- dependencies()->DependOnTransition(MapRef(js_heap_broker(), transition_map));
+ dependencies()->DependOnTransition(MapRef(broker(), transition_map));
// Transitioning stores are never stores to constant fields.
*access_info = PropertyAccessInfo::DataField(
PropertyConstness::kMutable, MapHandles{map}, field_index,
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 9d6828ee69..4673ce9306 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -143,8 +143,7 @@ class PropertyAccessInfo final {
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
- AccessInfoFactory(JSHeapBroker* js_heap_broker,
- CompilationDependencies* dependencies,
+ AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone);
@@ -172,17 +171,17 @@ class AccessInfoFactory final {
PropertyAccessInfo* access_info);
CompilationDependencies* dependencies() const { return dependencies_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Factory* factory() const;
Isolate* isolate() const { return isolate_; }
Handle<Context> native_context() const { return native_context_; }
Zone* zone() const { return zone_; }
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
CompilationDependencies* const dependencies_;
Handle<Context> const native_context_;
Isolate* const isolate_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
new file mode 100644
index 0000000000..5da6fd4f3b
--- /dev/null
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ALLOCATION_BUILDER_INL_H_
+#define V8_COMPILER_ALLOCATION_BUILDER_INL_H_
+
+#include "src/compiler/allocation-builder.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/objects/map-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void AllocationBuilder::AllocateContext(int variadic_part_length,
+ Handle<Map> map) {
+ DCHECK(
+ IsInRange(map->instance_type(), FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE));
+ DCHECK_NE(NATIVE_CONTEXT_TYPE, map->instance_type());
+ int size = Context::SizeFor(variadic_part_length);
+ Allocate(size, NOT_TENURED, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ STATIC_ASSERT(static_cast<int>(Context::kLengthOffset) ==
+ static_cast<int>(FixedArray::kLengthOffset));
+ Store(AccessBuilder::ForFixedArrayLength(),
+ jsgraph()->Constant(variadic_part_length));
+}
+
+// Compound allocation of a FixedArray.
+void AllocationBuilder::AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ALLOCATION_BUILDER_INL_H_
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 6943e3ae78..0997e9fa26 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -49,27 +49,11 @@ class AllocationBuilder final {
}
// Compound allocation of a context.
- void AllocateContext(int length, Handle<Map> map) {
- DCHECK(map->instance_type() >= AWAIT_CONTEXT_TYPE &&
- map->instance_type() <= WITH_CONTEXT_TYPE);
- int size = FixedArray::SizeFor(length);
- Allocate(size, NOT_TENURED, Type::OtherInternal());
- Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
- }
+ inline void AllocateContext(int variadic_part_length, Handle<Map> map);
// Compound allocation of a FixedArray.
- void AllocateArray(int length, Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED) {
- DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
- map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
- int size = (map->instance_type() == FIXED_ARRAY_TYPE)
- ? FixedArray::SizeFor(length)
- : FixedDoubleArray::SizeFor(length);
- Allocate(size, pretenure, Type::OtherInternal());
- Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
- }
+ inline void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED);
// Compound store of a constant into a field.
void Store(const FieldAccess& access, Handle<Object> value) {
diff --git a/deps/v8/src/compiler/backend/OWNERS b/deps/v8/src/compiler/backend/OWNERS
new file mode 100644
index 0000000000..cbae36824a
--- /dev/null
+++ b/deps/v8/src/compiler/backend/OWNERS
@@ -0,0 +1,6 @@
+bbudge@chromium.org
+gdeepti@chromium.org
+
+# Plus src/compiler owners.
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 718272b2cc..9d353050fd 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
-#include "src/arm/macro-assembler-arm.h"
#include "src/assembler-inl.h"
#include "src/boxed-float.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -169,7 +170,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode,
+ RecordWriteMode mode, StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
@@ -179,13 +180,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode,
+ RecordWriteMode mode, StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
@@ -195,6 +197,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
@@ -222,8 +225,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -238,6 +246,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode stub_mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
@@ -590,13 +599,13 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
+ tasm->sub(sp, sp, Operand(stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -682,12 +691,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
@@ -726,14 +731,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(reg);
+ __ CallCodeObject(reg);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -761,8 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(reg);
+ __ JumpCodeObject(reg);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -808,8 +819,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r2);
+ __ CallCodeObject(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
@@ -828,9 +838,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -841,7 +851,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -874,7 +884,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -968,16 +978,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_Offset_RI) {
int32_t index = i.InputInt32(1);
- ool = new (zone())
- OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
- mode, &unwinding_info_writer_);
+ ool = new (zone()) OutOfLineRecordWrite(
+ this, object, index, value, scratch0, scratch1, mode,
+ DetermineStubCallMode(), &unwinding_info_writer_);
__ str(value, MemOperand(object, index));
} else {
DCHECK_EQ(kMode_Offset_RR, addressing_mode);
Register index(i.InputRegister(1));
- ool = new (zone())
- OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
- mode, &unwinding_info_writer_);
+ ool = new (zone()) OutOfLineRecordWrite(
+ this, object, index, value, scratch0, scratch1, mode,
+ DetermineStubCallMode(), &unwinding_info_writer_);
__ str(value, MemOperand(object, index));
}
__ CheckPageFlag(object, scratch0,
@@ -1729,11 +1739,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case MachineRepresentation::kFloat64:
__ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize /
+ kSystemPointerSize);
break;
case MachineRepresentation::kSimd128: {
__ vpush(i.InputSimd128Register(0));
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kSimd128Size /
+ kSystemPointerSize);
break;
}
default:
@@ -1748,7 +1760,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmPoke: {
int const slot = MiscField::decode(instr->opcode());
- __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ __ str(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -2846,7 +2858,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArmOperandConverter i(this, instr);
@@ -2913,11 +2924,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
@@ -2969,7 +2981,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
ArmOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3000,7 +3011,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
DCHECK_EQ((last - first + 1), base::bits::CountPopulation(saves_fp));
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -3024,6 +3035,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ ldr(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ ldr(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
}
}
@@ -3062,25 +3083,25 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ ldr(scratch, MemOperand(scratch));
- __ add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ add(scratch, scratch, Operand(shrink_slots * kSystemPointerSize));
__ cmp(sp, scratch);
__ b(cs, &done);
}
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
- __ Move(cp, Smi::kZero);
+ __ Move(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r2);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
@@ -3094,7 +3115,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= frame()->GetReturnSlotCount();
shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
if (shrink_slots > 0) {
- __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
+ __ sub(sp, sp, Operand(shrink_slots * kSystemPointerSize));
}
}
@@ -3116,7 +3137,7 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ sub(sp, sp, Operand(returns * kPointerSize));
+ __ sub(sp, sp, Operand(returns * kSystemPointerSize));
}
}
@@ -3127,7 +3148,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Free space of returns.
- __ add(sp, sp, Operand(returns * kPointerSize));
+ __ add(sp, sp, Operand(returns * kSystemPointerSize));
}
// Restore registers.
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 751530e206..722502edc7 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
-#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#ifndef V8_COMPILER_BACKEND_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_BACKEND_ARM_INSTRUCTION_CODES_ARM_H_
namespace v8 {
namespace internal {
@@ -302,4 +302,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#endif // V8_COMPILER_BACKEND_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 3de063b3fe..211abd85b8 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -10,7 +10,6 @@ namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
-
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
@@ -288,7 +287,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
#define CASE(Name) case k##Name:
- COMMON_ARCH_OPCODE_LIST(CASE)
+ COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
@@ -297,7 +296,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
UNREACHABLE();
}
-
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 28d7a7fcd0..04f46a1d6b 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -4,7 +4,8 @@
#include "src/base/adapters.h"
#include "src/base/bits.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/base/enum-set.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -86,7 +87,6 @@ class ArmOperandGenerator : public OperandGenerator {
}
};
-
namespace {
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
@@ -187,7 +187,6 @@ bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
value_return, shift_return);
}
-
bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
InstructionOperand* shift_return) {
@@ -196,7 +195,6 @@ bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
value_return, shift_return);
}
-
bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
InstructionOperand* shift_return) {
@@ -222,7 +220,6 @@ bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
value_return, shift_return);
}
-
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand* value_return,
@@ -234,7 +231,6 @@ bool TryMatchShift(InstructionSelector* selector,
TryMatchROR(selector, opcode_return, node, value_return, shift_return));
}
-
bool TryMatchImmediateOrShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
size_t* input_count_return,
@@ -253,7 +249,6 @@ bool TryMatchImmediateOrShift(InstructionSelector* selector,
return false;
}
-
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode,
FlagsContinuation* cont) {
@@ -304,14 +299,12 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, reverse_opcode, &cont);
}
-
void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
InstructionOperand result_operand, InstructionOperand left_operand,
@@ -331,7 +324,6 @@ void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
selector->Emit(i32f64_opcode, result_operand, result_double_operand);
}
-
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
ArmOperandGenerator g(selector);
@@ -341,7 +333,6 @@ void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
g.UseRegister(m.right().node()));
}
-
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
ArmOperandGenerator g(selector);
@@ -385,8 +376,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
}
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
- size_t input_count, InstructionOperand* inputs,
- Node* index) {
+ size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
if (g.CanBeImmediate(index, opcode)) {
@@ -479,14 +469,14 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
case MachineRepresentation::kSimd128:
opcode = kArmVld1S128;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -572,14 +562,14 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kArmStr;
break;
case MachineRepresentation::kSimd128:
opcode = kArmVst1S128;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -777,9 +767,9 @@ void EmitBic(InstructionSelector* selector, Node* node, Node* left,
g.UseRegister(right));
}
-
void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
uint32_t lsb, uint32_t width) {
+ DCHECK_LE(lsb, 31u);
DCHECK_LE(1u, width);
DCHECK_LE(width, 32u - lsb);
ArmOperandGenerator g(selector);
@@ -789,7 +779,6 @@ void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
} // namespace
-
void InstructionSelector::VisitWord32And(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -887,12 +876,10 @@ void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kArmAnd, kArmAnd);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kArmOrr, kArmOrr);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -912,7 +899,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
VisitBinop(this, node, kArmEor, kArmEor);
}
-
namespace {
template <typename TryMatchShift>
@@ -939,22 +925,19 @@ void VisitShift(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
template <typename TryMatchShift>
void VisitShift(InstructionSelector* selector, Node* node,
- TryMatchShift try_match_shift) {
+ TryMatchShift try_match_shift) {
FlagsContinuation cont;
VisitShift(selector, node, try_match_shift, &cont);
}
} // namespace
-
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitShift(this, node, TryMatchLSL);
}
-
void InstructionSelector::VisitWord32Shr(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -966,7 +949,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
- if (msb + width + lsb == 32) {
+ if ((width != 0) && (msb + width + lsb == 32)) {
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
return EmitUbfx(this, node, mleft.left().node(), lsb, width);
}
@@ -975,7 +958,6 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitShift(this, node, TryMatchLSR);
}
-
void InstructionSelector::VisitWord32Sar(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1266,7 +1248,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kArmAdd, kArmAdd);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1334,22 +1315,18 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
}
-
void InstructionSelector::VisitUint32Div(Node* node) {
VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
}
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
}
-
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
}
@@ -1448,7 +1425,6 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kArmVaddF32, node);
}
-
void InstructionSelector::VisitFloat64Add(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
@@ -1498,7 +1474,8 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
- g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+ g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -1582,7 +1559,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1601,7 +1577,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
}
}
-
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1761,7 +1736,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
InstructionCode opcode = kArmCmp;
@@ -1949,33 +1923,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2012,32 +1981,27 @@ void InstructionSelector::VisitFloat32Equal(Node* node) {
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
@@ -2059,7 +2023,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
ArmOperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -2505,7 +2468,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
void InstructionSelector::VisitS128Zero(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+ Emit(kArmS128Zero, g.DefineAsRegister(node));
}
#define SIMD_VISIT_SPLAT(Type) \
@@ -2759,7 +2722,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
- EnumSet<MachineRepresentation> req_aligned;
+ base::EnumSet<MachineRepresentation> req_aligned;
req_aligned.Add(MachineRepresentation::kFloat32);
req_aligned.Add(MachineRepresentation::kFloat64);
return MachineOperatorBuilder::AlignmentRequirements::
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.cc
index 579e5c7f0a..ec59c9df78 100644
--- a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/arm/unwinding-info-writer-arm.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/arm/unwinding-info-writer-arm.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
@@ -19,22 +19,15 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
- if (initial_state) {
- if (initial_state->saved_lr_ != saved_lr_) {
- eh_frame_writer_.AdvanceLocation(pc_offset);
- if (initial_state->saved_lr_) {
- eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
- } else {
- eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
- }
- saved_lr_ = initial_state->saved_lr_;
+ if (!initial_state) return;
+ if (initial_state->saved_lr_ != saved_lr_) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ if (initial_state->saved_lr_) {
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kSystemPointerSize);
+ } else {
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
}
- } else {
- // The entry block always lacks an explicit initial state.
- // The exit block may lack an explicit state, if it is only reached by
- // the block ending in a bx lr.
- // All the other blocks must have an explicit initial state.
- DCHECK(block->predecessors().empty() || block->successors().empty());
+ saved_lr_ = initial_state->saved_lr_;
}
}
@@ -75,7 +68,7 @@ void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
// The LR is pushed on the stack, and we can record this fact at the end of
// the construction, since the LR itself is not modified in the process.
eh_frame_writer_.AdvanceLocation(at_pc);
- eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kSystemPointerSize);
saved_lr_ = true;
}
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h
index a741121e32..237abe40ab 100644
--- a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
+++ b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
-#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
+#ifndef V8_COMPILER_BACKEND_ARM_UNWINDING_INFO_WRITER_ARM_H_
+#define V8_COMPILER_BACKEND_ARM_UNWINDING_INFO_WRITER_ARM_H_
#include "src/eh-frame.h"
+#include "src/flags.h"
namespace v8 {
namespace internal {
@@ -69,4 +70,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
+#endif // V8_COMPILER_BACKEND_ARM_UNWINDING_INFO_WRITER_ARM_H_
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 128ed9ffee..9890b58e3d 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/frame-constants.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -212,7 +213,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kInt64:
- if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
+ if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt64(), constant.rmode());
} else {
return Operand(constant.ToInt64());
@@ -255,14 +256,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
}
};
-
namespace {
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode,
+ RecordWriteMode mode, StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
@@ -271,6 +271,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
@@ -293,8 +294,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
__ Pop(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -308,12 +317,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
};
-
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -532,7 +541,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
InstructionOperandConverter g(this, instr);
int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
if (optional_padding_slot % 2) {
- __ Poke(padreg, optional_padding_slot * kPointerSize);
+ __ Poke(padreg, optional_padding_slot * kSystemPointerSize);
}
}
@@ -561,12 +570,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
__ Tbz(scratch, Code::kMarkedForDeoptimizationBit, &not_deoptimized);
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET);
__ Bind(&not_deoptimized);
}
@@ -608,13 +613,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
- __ Call(reg);
+ __ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -642,8 +654,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(reg);
+ __ JumpCodeObject(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -688,8 +699,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(x2);
+ __ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -709,9 +719,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -722,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -755,7 +765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -843,9 +853,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone())
- OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
- mode, &unwinding_info_writer_);
+ auto ool = new (zone()) OutOfLineRecordWrite(
+ this, object, index, value, scratch0, scratch1, mode,
+ DetermineStubCallMode(), &unwinding_info_writer_);
__ Str(value, MemOperand(object, index));
__ CheckPageFlagSet(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -956,8 +966,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
} else {
- __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
- i.InputOperand2_64(1));
+ __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
}
break;
case kArm64Add32:
@@ -1126,8 +1136,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
} else {
- __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
- i.InputOperand2_64(1));
+ __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
}
break;
case kArm64Sub32:
@@ -1220,7 +1230,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Poke: {
- Operand operand(i.InputInt32(1) * kPointerSize);
+ Operand operand(i.InputInt32(1) * kSystemPointerSize);
if (instr->InputAt(0)->IsSimd128Register()) {
__ Poke(i.InputSimd128Register(0), operand);
} else if (instr->InputAt(0)->IsFPRegister()) {
@@ -1234,10 +1244,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int slot = i.InputInt32(2) - 1;
if (instr->InputAt(0)->IsFPRegister()) {
__ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
- slot * kPointerSize);
+ slot * kSystemPointerSize);
} else {
__ PokePair(i.InputRegister(1), i.InputRegister(0),
- slot * kPointerSize);
+ slot * kSystemPointerSize);
}
break;
}
@@ -2269,11 +2279,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
// The trap code should never return.
@@ -2324,7 +2335,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
UseScratchRegisterScope scope(tasm());
@@ -2357,7 +2367,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
DCHECK_EQ(saved_count % 2, 0);
frame->AllocateSavedCalleeRegisterSlots(saved_count *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
@@ -2420,14 +2430,14 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ Ldr(scratch, MemOperand(scratch));
- __ Add(scratch, scratch, shrink_slots * kPointerSize);
+ __ Add(scratch, scratch, shrink_slots * kSystemPointerSize);
__ Cmp(sp, scratch);
__ B(hs, &done);
}
@@ -2445,11 +2455,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
- __ Mov(cp, Smi::kZero);
+ __ Mov(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, x2);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ Brk(0);
@@ -2494,6 +2504,20 @@ void CodeGenerator::AssembleConstructFrame() {
__ Str(kWasmInstanceRegister,
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
+ case CallDescriptor::kCallWasmImportWrapper: {
+ UseScratchRegisterScope temps(tasm());
+ __ ldr(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ ldr(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Claim(shrink_slots + 2); // Claim extra slots for marker + instance.
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch,
+ StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
+ __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ __ Str(kWasmInstanceRegister,
+ MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
+ } break;
case CallDescriptor::kCallAddress:
__ Claim(shrink_slots);
break;
@@ -2698,7 +2722,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
@@ -2769,7 +2792,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 64-bit ARM we emit the jump tables inline.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 7b119c8fe7..6627b4e6a1 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
-#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#ifndef V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
namespace v8 {
namespace internal {
@@ -371,4 +371,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#endif // V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index d443bd7641..57f23b31fb 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -10,7 +10,6 @@ namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
-
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
@@ -352,7 +351,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
#define CASE(Name) case k##Name:
- COMMON_ARCH_OPCODE_LIST(CASE)
+ COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
@@ -361,7 +360,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
UNREACHABLE();
}
-
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for arm64 instructions. They have been determined
// in an empirical way.
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index dd9914b8bc..bb2f5c7af2 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -17,14 +17,13 @@ enum ImmediateMode {
kShift64Imm, // 0 - 63
kLogical32Imm,
kLogical64Imm,
- kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
+ kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
kLoadStoreImm16,
kLoadStoreImm32,
kLoadStoreImm64,
kNoImmediate
};
-
// Adds Arm64-specific methods for generating operands.
class Arm64OperandGenerator final : public OperandGenerator {
public:
@@ -146,7 +145,6 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
};
-
namespace {
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
@@ -155,7 +153,6 @@ void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(0)));
}
-
void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -294,7 +291,6 @@ bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
}
}
-
bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
Node* node, Node* left_node, Node* right_node,
InstructionOperand* left_op,
@@ -490,7 +486,6 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
@@ -499,7 +494,6 @@ void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
-
template <typename Matcher>
void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ArchOpcode negate_opcode) {
@@ -515,7 +509,6 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
}
}
-
// For multiplications by immediate of the form x * (2^k + 1), where k > 0,
// return the value of k, otherwise return zero. This is used to reduce the
// multiplication to addition with left shift: x + (x << k).
@@ -629,7 +622,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
@@ -735,7 +728,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
@@ -848,7 +841,6 @@ static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
}
}
-
void InstructionSelector::VisitWord32And(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -889,7 +881,6 @@ void InstructionSelector::VisitWord32And(Node* node) {
CanCover(node, m.right().node()), kLogical32Imm);
}
-
void InstructionSelector::VisitWord64And(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -930,7 +921,6 @@ void InstructionSelector::VisitWord64And(Node* node) {
CanCover(node, m.right().node()), kLogical64Imm);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node);
VisitLogical<Int32BinopMatcher>(
@@ -938,7 +928,6 @@ void InstructionSelector::VisitWord32Or(Node* node) {
CanCover(node, m.right().node()), kLogical32Imm);
}
-
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
VisitLogical<Int64BinopMatcher>(
@@ -946,7 +935,6 @@ void InstructionSelector::VisitWord64Or(Node* node) {
CanCover(node, m.right().node()), kLogical64Imm);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
Int32BinopMatcher m(node);
VisitLogical<Int32BinopMatcher>(
@@ -954,7 +942,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
CanCover(node, m.right().node()), kLogical32Imm);
}
-
void InstructionSelector::VisitWord64Xor(Node* node) {
Int64BinopMatcher m(node);
VisitLogical<Int64BinopMatcher>(
@@ -962,7 +949,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
CanCover(node, m.right().node()), kLogical64Imm);
}
-
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
@@ -999,7 +985,6 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
}
-
void InstructionSelector::VisitWord64Shl(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1015,7 +1000,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
VisitRRO(this, kArm64Lsl, node, kShift64Imm);
}
-
namespace {
bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
@@ -1045,7 +1029,6 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
} // namespace
-
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
@@ -1089,7 +1072,6 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
}
-
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
@@ -1115,7 +1097,6 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
VisitRRO(this, kArm64Lsr, node, kShift64Imm);
}
-
void InstructionSelector::VisitWord32Sar(Node* node) {
if (TryEmitBitfieldExtract32(this, node)) {
return;
@@ -1168,18 +1149,15 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
-
void InstructionSelector::VisitWord64Sar(Node* node) {
if (TryEmitExtendingLoad(this, node)) return;
VisitRRO(this, kArm64Asr, node, kShift64Imm);
}
-
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kArm64Ror32, node, kShift32Imm);
}
-
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kArm64Ror, node, kShift64Imm);
}
@@ -1203,6 +1181,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \
V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
+ V(TruncateFloat64ToInt64, kArm64Float64ToInt64) \
V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \
V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -1315,7 +1294,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
}
-
void InstructionSelector::VisitInt64Add(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1346,7 +1324,6 @@ void InstructionSelector::VisitInt64Add(Node* node) {
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1367,7 +1344,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
-
void InstructionSelector::VisitInt64Sub(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1447,7 +1423,6 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kArm64Mul32, node);
}
-
void InstructionSelector::VisitInt64Mul(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1496,7 +1471,6 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
@@ -1505,7 +1479,6 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
}
-
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1522,7 +1495,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1539,7 +1511,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1556,7 +1527,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1573,7 +1543,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
@@ -1606,7 +1575,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
}
-
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1672,8 +1640,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
- g.UseFixed(node->InputAt(0), d0),
- g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+ g.UseFixed(node->InputAt(0), d0), g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -1697,7 +1665,7 @@ void InstructionSelector::EmitPrepareArguments(
Arm64OperandGenerator g(this);
// `arguments` includes alignment "holes". This means that slots bigger than
- // kPointerSize, e.g. Simd128, will span across multiple arguments.
+ // kSystemPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
claim_count = RoundUp(claim_count, 2);
@@ -1769,7 +1737,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
@@ -2062,7 +2029,6 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
}
-
void VisitWordTest(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
@@ -2070,13 +2036,11 @@ void VisitWordTest(InstructionSelector* selector, Node* node,
cont);
}
-
void VisitWord32Test(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordTest(selector, node, kArm64Tst32, cont);
}
-
void VisitWord64Test(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordTest(selector, node, kArm64Tst, cont);
@@ -2137,7 +2101,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
}
}
-
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -2508,33 +2471,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitWord64Equal(Node* const node) {
Node* const user = node;
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -2555,7 +2513,6 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2566,7 +2523,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2600,7 +2556,6 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
}
-
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2611,64 +2566,54 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
}
-
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
-
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
-
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
-
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
-
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
@@ -2692,7 +2637,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -3065,7 +3009,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
void InstructionSelector::VisitS128Zero(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+ Emit(kArm64S128Zero, g.DefineAsRegister(node));
}
#define SIMD_VISIT_SPLAT(Type) \
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc
index edf96026e7..3747019c7d 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/arm64/unwinding-info-writer-arm64.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
@@ -19,23 +19,16 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
- if (initial_state) {
- if (initial_state->saved_lr_ != saved_lr_) {
- eh_frame_writer_.AdvanceLocation(pc_offset);
- if (initial_state->saved_lr_) {
- eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
- eh_frame_writer_.RecordRegisterSavedToStack(fp, 0);
- } else {
- eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
- }
- saved_lr_ = initial_state->saved_lr_;
+ if (!initial_state) return;
+ if (initial_state->saved_lr_ != saved_lr_) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ if (initial_state->saved_lr_) {
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kSystemPointerSize);
+ eh_frame_writer_.RecordRegisterSavedToStack(fp, 0);
+ } else {
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
}
- } else {
- // The entry block always lacks an explicit initial state.
- // The exit block may lack an explicit state, if it is only reached by
- // the block ending in a ret.
- // All the other blocks must have an explicit initial state.
- DCHECK(block->predecessors().empty() || block->successors().empty());
+ saved_lr_ = initial_state->saved_lr_;
}
}
@@ -76,7 +69,7 @@ void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
// The LR is pushed on the stack, and we can record this fact at the end of
// the construction, since the LR itself is not modified in the process.
eh_frame_writer_.AdvanceLocation(at_pc);
- eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kSystemPointerSize);
eh_frame_writer_.RecordRegisterSavedToStack(fp, 0);
saved_lr_ = true;
}
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h
index 25c4fcf77f..6b67f0ff64 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
-#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
+#ifndef V8_COMPILER_BACKEND_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
+#define V8_COMPILER_BACKEND_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
#include "src/eh-frame.h"
+#include "src/flags.h"
namespace v8 {
namespace internal {
@@ -69,4 +70,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
+#endif // V8_COMPILER_BACKEND_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 1298657774..2685b109d9 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
-#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_BACKEND_CODE_GENERATOR_IMPL_H_
-#include "src/code-stubs.h"
-#include "src/compiler/code-generator.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
#include "src/macro-assembler.h"
@@ -224,4 +223,4 @@ inline bool HasCallDescriptorFlag(Instruction* instr,
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#endif // V8_COMPILER_BACKEND_CODE_GENERATOR_IMPL_H_
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index b6d782d96a..47d416030c 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -2,19 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/address-map.h"
#include "src/assembler-inl.h"
#include "src/base/adapters.h"
-#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
#include "src/eh-frame.h"
#include "src/frames.h"
-#include "src/lsan.h"
+#include "src/log.h"
#include "src/macro-assembler-inl.h"
+#include "src/objects/smi.h"
#include "src/optimized-compilation-info.h"
#include "src/string-constants.h"
@@ -44,7 +46,8 @@ CodeGenerator::CodeGenerator(
InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper, int start_source_position,
JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
- const AssemblerOptions& options, int32_t builtin_index)
+ const AssemblerOptions& options, int32_t builtin_index,
+ std::unique_ptr<AssemblerBuffer> buffer)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -56,17 +59,14 @@ CodeGenerator::CodeGenerator(
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- tasm_(isolate, options, nullptr, 0, CodeObjectRequired::kNo),
+ tasm_(isolate, options, CodeObjectRequired::kNo, std::move(buffer)),
resolver_(this),
safepoints_(zone()),
handlers_(zone()),
deoptimization_exits_(zone()),
deoptimization_states_(zone()),
deoptimization_literals_(zone()),
- inlined_function_count_(0),
translations_(zone()),
- handler_table_offset_(0),
- last_lazy_deopt_pc_(0),
caller_registers_saved_(false),
jump_tables_(nullptr),
ools_(nullptr),
@@ -114,17 +114,19 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, SourcePosition pos) {
+ if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
+ return kTooManyDeoptimizationBailouts;
+ }
+
DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- tasm()->isolate(), deoptimization_id, deopt_kind);
- if (deopt_entry == kNullAddress) return kTooManyDeoptimizationBailouts;
+ Address deopt_entry =
+ Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
if (info()->is_source_positions_enabled()) {
tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
- tasm()->CallForDeoptimization(deopt_entry, deoptimization_id,
- RelocInfo::RUNTIME_ENTRY);
+ tasm()->CallForDeoptimization(deopt_entry, deoptimization_id);
return kSuccess;
}
@@ -140,11 +142,6 @@ void CodeGenerator::AssembleCode() {
AssembleSourcePosition(start_source_position());
}
- // Place function entry hook if requested to do so.
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
- }
-
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
info->code_kind() == Code::BYTECODE_HANDLER)) {
@@ -152,9 +149,7 @@ void CodeGenerator::AssembleCode() {
AssembleCodeStartRegisterCheck();
}
- // TODO(jupvfranco): This should be the first thing in the code, otherwise
- // MaybeCallEntryHookDelayed may happen twice (for optimized and deoptimized
- // code). We want to bailout only from JS functions, which are the only ones
+ // We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
@@ -176,6 +171,16 @@ void CodeGenerator::AssembleCode() {
}
inlined_function_count_ = deoptimization_literals_.size();
+ // Define deoptimization literals for all BytecodeArrays to which we might
+ // deopt to ensure they are strongly held by the optimized code.
+ if (info->has_bytecode_array()) {
+ DefineDeoptimizationLiteral(DeoptimizationLiteral(info->bytecode_array()));
+ }
+ for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
+ info->inlined_functions()) {
+ DefineDeoptimizationLiteral(DeoptimizationLiteral(inlined.bytecode_array));
+ }
+
unwinding_info_writer_.SetNumberOfInstructionBlocks(
code()->InstructionBlockCount());
@@ -183,78 +188,63 @@ void CodeGenerator::AssembleCode() {
block_starts_.assign(code()->instruction_blocks().size(), -1);
instr_starts_.assign(code()->instructions().size(), -1);
}
- // Assemble all non-deferred blocks, followed by deferred ones.
- for (int deferred = 0; deferred < 2; ++deferred) {
- for (const InstructionBlock* block : code()->instruction_blocks()) {
- if (block->IsDeferred() == (deferred == 0)) {
- continue;
- }
- // Align loop headers on 16-byte boundaries.
- if (block->IsLoopHeader() && !tasm()->jump_optimization_info()) {
- tasm()->Align(16);
+ // Assemble instructions in assembly order.
+ for (const InstructionBlock* block : code()->ao_blocks()) {
+ // Align loop headers on vendor recommended boundaries.
+ if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
+ tasm()->CodeTargetAlign();
+ }
+ if (info->trace_turbo_json_enabled()) {
+ block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
+ }
+ // Bind a label for a block.
+ current_block_ = block->rpo_number();
+ unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
+ if (FLAG_code_comments) {
+ std::ostringstream buffer;
+ buffer << "-- B" << block->rpo_number().ToInt() << " start";
+ if (block->IsDeferred()) buffer << " (deferred)";
+ if (!block->needs_frame()) buffer << " (no frame)";
+ if (block->must_construct_frame()) buffer << " (construct frame)";
+ if (block->must_deconstruct_frame()) buffer << " (deconstruct frame)";
+
+ if (block->IsLoopHeader()) {
+ buffer << " (loop up to " << block->loop_end().ToInt() << ")";
}
- if (info->trace_turbo_json_enabled()) {
- block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
- }
- // Bind a label for a block.
- current_block_ = block->rpo_number();
- unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
- if (FLAG_code_comments) {
- Vector<char> buffer = Vector<char>::New(200);
- char* buffer_start = buffer.start();
- LSAN_IGNORE_OBJECT(buffer_start);
-
- int next = SNPrintF(
- buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
- block->IsDeferred() ? " (deferred)" : "",
- block->needs_frame() ? "" : " (no frame)",
- block->must_construct_frame() ? " (construct frame)" : "",
- block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
-
- buffer = buffer.SubVector(next, buffer.length());
-
- if (block->IsLoopHeader()) {
- next =
- SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
- buffer = buffer.SubVector(next, buffer.length());
- }
- if (block->loop_header().IsValid()) {
- next =
- SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
- buffer = buffer.SubVector(next, buffer.length());
- }
- SNPrintF(buffer, " --");
- tasm()->RecordComment(buffer_start);
+ if (block->loop_header().IsValid()) {
+ buffer << " (in loop " << block->loop_header().ToInt() << ")";
}
+ buffer << " --";
+ tasm()->RecordComment(buffer.str().c_str());
+ }
- frame_access_state()->MarkHasFrame(block->needs_frame());
+ frame_access_state()->MarkHasFrame(block->needs_frame());
- tasm()->bind(GetLabel(current_block_));
+ tasm()->bind(GetLabel(current_block_));
- TryInsertBranchPoisoning(block);
+ TryInsertBranchPoisoning(block);
- if (block->must_construct_frame()) {
- AssembleConstructFrame();
- // We need to setup the root register after we assemble the prologue, to
- // avoid clobbering callee saved registers in case of C linkage and
- // using the roots.
- // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
- if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
- tasm()->InitializeRootRegister();
- }
+ if (block->must_construct_frame()) {
+ AssembleConstructFrame();
+ // We need to setup the root register after we assemble the prologue, to
+ // avoid clobbering callee saved registers in case of C linkage and
+ // using the roots.
+ // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
+ if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+ tasm()->InitializeRootRegister();
}
+ }
- if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
- ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
- result_ = AssembleBlock(block);
- } else {
- result_ = AssembleBlock(block);
- }
- if (result_ != kSuccess) return;
- unwinding_info_writer_.EndInstructionBlock(block);
+ if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ result_ = AssembleBlock(block);
+ } else {
+ result_ = AssembleBlock(block);
+ }
+ if (result_ != kSuccess) return;
+ unwinding_info_writer_.EndInstructionBlock(block);
}
- }
// Assemble all out-of-line code.
if (ools_) {
@@ -291,7 +281,7 @@ void CodeGenerator::AssembleCode() {
// Emit the jump tables.
if (jump_tables_) {
- tasm()->Align(kPointerSize);
+ tasm()->Align(kSystemPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
tasm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
@@ -315,6 +305,8 @@ void CodeGenerator::AssembleCode() {
}
}
+ tasm()->FinalizeJumpOptimizationInfo();
+
result_ = kSuccess;
}
@@ -401,7 +393,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
- source_positions, deopt_data, kMovable, info()->stub_key(), true,
+ source_positions, deopt_data, kMovable, true,
frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
handler_table_offset_);
@@ -410,6 +402,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
tasm()->AbortedCodeGeneration();
return MaybeHandle<Code>();
}
+
isolate()->counters()->total_compiled_code_size()->Increment(
code->raw_instruction_size());
@@ -420,7 +413,6 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
return code;
}
-
bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
return code()
->InstructionBlockAt(current_block_)
@@ -428,12 +420,10 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
.IsNext(code()->InstructionBlockAt(block)->ao_number());
}
-
void CodeGenerator::RecordSafepoint(ReferenceMap* references,
- Safepoint::Kind kind, int arguments,
+ Safepoint::Kind kind,
Safepoint::DeoptMode deopt_mode) {
- Safepoint safepoint =
- safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
+ Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), kind, deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
@@ -459,9 +449,8 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
const CallDescriptor* incoming_descriptor =
linkage()->GetIncomingDescriptor();
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
- Heap* heap = isolate()->heap();
- return heap->IsRootHandle(object, index_return) &&
- !heap->RootCanBeWrittenAfterInitialization(*index_return);
+ return isolate()->roots_table().IsRootHandle(object, index_return) &&
+ RootsTable::IsImmortalImmovable(*index_return);
}
return false;
}
@@ -515,9 +504,8 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
// then the full gap resolver must be used since optimization with
// pushes don't participate in the parallel move and might clobber
// values needed for the gap resolve.
- if (source.IsStackSlot() &&
- LocationOperand::cast(source).index() >=
- first_push_compatible_index) {
+ if (source.IsStackSlot() && LocationOperand::cast(source).index() >=
+ first_push_compatible_index) {
pushes->clear();
return;
}
@@ -725,7 +713,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
source_position, false);
if (FLAG_code_comments) {
OptimizedCompilationInfo* info = this->info();
- if (info->IsStub()) return;
+ if (info->IsNotOptimizedFunctionOrWasmFunction()) return;
std::ostringstream buffer;
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
@@ -740,9 +728,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << source_position.InliningStack(info);
}
buffer << " --";
- char* str = StrDup(buffer.str().c_str());
- LSAN_IGNORE_OBJECT(str);
- tasm()->RecordComment(str);
+ tasm()->RecordComment(buffer.str().c_str());
}
}
@@ -762,7 +748,7 @@ StubCallMode CodeGenerator::DetermineStubCallMode() const {
return (code_kind == Code::WASM_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION)
? StubCallMode::kCallWasmRuntimeStub
- : StubCallMode::kCallOnHeapBuiltin;
+ : StubCallMode::kCallCodeObject;
}
void CodeGenerator::AssembleGaps(Instruction* instr) {
@@ -854,20 +840,18 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
return data;
}
-
Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
return jump_tables_->label();
}
-
void CodeGenerator::RecordCallPosition(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
RecordSafepoint(
- instr->reference_map(), Safepoint::kSimple, 0,
+ instr->reference_map(), Safepoint::kSimple,
needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
if (flags & CallDescriptor::kHasExceptionHandler) {
@@ -967,46 +951,25 @@ void CodeGenerator::TranslateStateValueDescriptor(
}
}
-
void CodeGenerator::TranslateFrameStateDescriptorOperands(
FrameStateDescriptor* desc, InstructionOperandIterator* iter,
- OutputFrameStateCombine combine, Translation* translation) {
+ Translation* translation) {
size_t index = 0;
StateValueList* values = desc->GetStateValueDescriptors();
for (StateValueList::iterator it = values->begin(); it != values->end();
++it, ++index) {
- StateValueDescriptor* value_desc = (*it).desc;
- if (!combine.IsOutputIgnored()) {
- // The result of the call should be placed at position
- // [index_from_top] in the stack (overwriting whatever was
- // previously there).
- size_t index_from_top = desc->GetSize() - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + iter->instruction()->OutputCount()) {
- DCHECK_NOT_NULL(translation);
- AddTranslationForOperand(
- translation, iter->instruction(),
- iter->instruction()->OutputAt(index - index_from_top),
- MachineType::AnyTagged());
- // Skip the instruction operands.
- TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
- continue;
- }
- }
- TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
+ TranslateStateValueDescriptor((*it).desc, (*it).nested, translation, iter);
}
DCHECK_EQ(desc->GetSize(), index);
}
-
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
Translation* translation, OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
if (descriptor->outer_state() != nullptr) {
BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
- translation,
- OutputFrameStateCombine::Ignore());
+ translation, state_combine);
}
Handle<SharedFunctionInfo> shared_info;
@@ -1020,11 +983,19 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
switch (descriptor->type()) {
- case FrameStateType::kInterpretedFunction:
+ case FrameStateType::kInterpretedFunction: {
+ int return_offset = 0;
+ int return_count = 0;
+ if (!state_combine.IsOutputIgnored()) {
+ return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
+ return_count = static_cast<int>(iter->instruction()->OutputCount());
+ }
translation->BeginInterpretedFrame(
descriptor->bailout_id(), shared_info_id,
- static_cast<unsigned int>(descriptor->locals_count() + 1));
+ static_cast<unsigned int>(descriptor->locals_count() + 1),
+ return_offset, return_count);
break;
+ }
case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
shared_info_id,
@@ -1062,11 +1033,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
}
}
- TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
- translation);
+ TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
}
-
int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
OutputFrameStateCombine state_combine) {
@@ -1159,8 +1128,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
if (type.representation() == MachineRepresentation::kTagged) {
// When pointers are 4 bytes, we can use int32 constants to represent
// Smis.
- DCHECK_EQ(4, kPointerSize);
- Smi* smi = reinterpret_cast<Smi*>(constant.ToInt32());
+ DCHECK_EQ(4, kSystemPointerSize);
+ Smi smi(static_cast<Address>(constant.ToInt32()));
DCHECK(smi->IsSmi());
literal = DeoptimizationLiteral(smi->value());
} else if (type.representation() == MachineRepresentation::kBit) {
@@ -1187,7 +1156,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
}
break;
case Constant::kInt64:
- DCHECK_EQ(8, kPointerSize);
+ DCHECK_EQ(8, kSystemPointerSize);
if (type.representation() == MachineRepresentation::kWord64) {
literal =
DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
@@ -1195,7 +1164,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
- Smi* smi = reinterpret_cast<Smi*>(constant.ToInt64());
+ Smi smi(static_cast<Address>(constant.ToInt64()));
DCHECK(smi->IsSmi());
literal = DeoptimizationLiteral(smi->value());
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 1ba0e32ce6..4ab5dc11d5 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_CODE_GENERATOR_H_
-#define V8_COMPILER_CODE_GENERATOR_H_
+#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
+#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
#include "src/base/optional.h"
-#include "src/compiler/gap-resolver.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/backend/unwinding-info-writer.h"
#include "src/compiler/osr.h"
-#include "src/compiler/unwinding-info-writer.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
@@ -36,7 +36,6 @@ struct BranchInfo {
bool fallthru;
};
-
class InstructionOperandIterator {
public:
InstructionOperandIterator(Instruction* instr, size_t pos)
@@ -96,8 +95,8 @@ class CodeGenerator final : public GapResolver::Assembler {
int start_source_position,
JumpOptimizationInfo* jump_opt,
PoisoningMitigationLevel poisoning_level,
- const AssemblerOptions& options,
- int32_t builtin_index);
+ const AssemblerOptions& options, int32_t builtin_index,
+ std::unique_ptr<AssemblerBuffer> = {});
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -131,7 +130,7 @@ class CodeGenerator final : public GapResolver::Assembler {
// Record a safepoint with the given pointer map.
void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode deopt_mode);
+ Safepoint::DeoptMode deopt_mode);
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
@@ -355,7 +354,6 @@ class CodeGenerator final : public GapResolver::Assembler {
InstructionOperandIterator* iter);
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
InstructionOperandIterator* iter,
- OutputFrameStateCombine combine,
Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
@@ -417,10 +415,10 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
- size_t inlined_function_count_;
+ size_t inlined_function_count_ = 0;
TranslationBuffer translations_;
- int handler_table_offset_;
- int last_lazy_deopt_pc_;
+ int handler_table_offset_ = 0;
+ int last_lazy_deopt_pc_ = 0;
// kArchCallCFunction could be reached either:
// kArchCallCFunction;
@@ -451,4 +449,4 @@ class CodeGenerator final : public GapResolver::Assembler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_H_
+#endif // V8_COMPILER_BACKEND_CODE_GENERATOR_H_
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc
index 35d292b4e3..2167d0abaa 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/backend/frame-elider.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/frame-elider.h"
+#include "src/compiler/backend/frame-elider.h"
#include "src/base/adapters.h"
@@ -18,7 +18,6 @@ void FrameElider::Run() {
MarkDeConstruction();
}
-
void FrameElider::MarkBlocks() {
for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) continue;
@@ -34,13 +33,11 @@ void FrameElider::MarkBlocks() {
}
}
-
void FrameElider::PropagateMarks() {
while (PropagateInOrder() || PropagateReversed()) {
}
}
-
void FrameElider::MarkDeConstruction() {
for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) {
@@ -78,7 +75,6 @@ void FrameElider::MarkDeConstruction() {
}
}
-
bool FrameElider::PropagateInOrder() {
bool changed = false;
for (InstructionBlock* block : instruction_blocks()) {
@@ -87,7 +83,6 @@ bool FrameElider::PropagateInOrder() {
return changed;
}
-
bool FrameElider::PropagateReversed() {
bool changed = false;
for (InstructionBlock* block : base::Reversed(instruction_blocks())) {
@@ -96,7 +91,6 @@ bool FrameElider::PropagateReversed() {
return changed;
}
-
bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
// Already marked, nothing to do...
if (block->needs_frame()) return false;
@@ -147,17 +141,14 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
}
}
-
const InstructionBlocks& FrameElider::instruction_blocks() const {
return code_->instruction_blocks();
}
-
InstructionBlock* FrameElider::InstructionBlockAt(RpoNumber rpo_number) const {
return code_->InstructionBlockAt(rpo_number);
}
-
Instruction* FrameElider::InstructionAt(int index) const {
return code_->InstructionAt(index);
}
diff --git a/deps/v8/src/compiler/frame-elider.h b/deps/v8/src/compiler/backend/frame-elider.h
index 7d31619220..11dfce2c52 100644
--- a/deps/v8/src/compiler/frame-elider.h
+++ b/deps/v8/src/compiler/backend/frame-elider.h
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_FRAME_ELIDER_H_
-#define V8_COMPILER_FRAME_ELIDER_H_
+#ifndef V8_COMPILER_BACKEND_FRAME_ELIDER_H_
+#define V8_COMPILER_BACKEND_FRAME_ELIDER_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
namespace compiler {
-
// Determine which instruction blocks need a frame and where frames must be
// constructed/deconstructed.
class FrameElider {
@@ -19,7 +18,6 @@ class FrameElider {
explicit FrameElider(InstructionSequence* code);
void Run();
-
private:
void MarkBlocks();
void PropagateMarks();
@@ -38,4 +36,4 @@ class FrameElider {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_FRAME_ELIDER_H_
+#endif // V8_COMPILER_BACKEND_FRAME_ELIDER_H_
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/backend/gap-resolver.cc
index c102c62ad4..6cb7d7fbaf 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/backend/gap-resolver.cc
@@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/gap-resolver.h"
#include <algorithm>
#include <set>
+#include "src/base/enum-set.h"
+#include "src/register-configuration.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -21,7 +24,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
ParallelMove* moves) {
DCHECK(!kSimpleFPAliasing);
// Splitting is only possible when the slot size is the same as float size.
- DCHECK_EQ(kPointerSize, kFloatSize);
+ DCHECK_EQ(kSystemPointerSize, kFloatSize);
const LocationOperand& src_loc = LocationOperand::cast(move->source());
const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
MachineRepresentation dst_rep = dst_loc.representation();
@@ -37,7 +40,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
dst_rep, 0, smaller_rep, &base));
int src_index = -1;
- int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
+ int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kSystemPointerSize;
int src_step = 1;
if (src_kind == LocationOperand::REGISTER) {
src_index = src_loc.register_code() * aliases;
@@ -71,31 +74,54 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
return move;
}
+enum MoveOperandKind : uint8_t { kConstant, kGpReg, kFpReg, kStack };
+
+MoveOperandKind GetKind(const InstructionOperand& move) {
+ if (move.IsConstant()) return kConstant;
+ LocationOperand loc_op = LocationOperand::cast(move);
+ if (loc_op.location_kind() != LocationOperand::REGISTER) return kStack;
+ return IsFloatingPoint(loc_op.representation()) ? kFpReg : kGpReg;
+}
+
} // namespace
void GapResolver::Resolve(ParallelMove* moves) {
- // Clear redundant moves, and collect FP move representations if aliasing
- // is non-simple.
- int reps = 0;
- for (size_t i = 0; i < moves->size();) {
- MoveOperands* move = (*moves)[i];
+ base::EnumSet<MoveOperandKind, uint8_t> source_kinds;
+ base::EnumSet<MoveOperandKind, uint8_t> destination_kinds;
+
+ // Remove redundant moves, collect source kinds and destination kinds to
+ // detect simple non-overlapping moves, and collect FP move representations if
+ // aliasing is non-simple.
+ int fp_reps = 0;
+ for (auto it = moves->begin(); it != moves->end();) {
+ MoveOperands* move = *it;
if (move->IsRedundant()) {
- (*moves)[i] = moves->back();
+ *it = moves->back();
moves->pop_back();
continue;
}
- i++;
+ source_kinds.Add(GetKind(move->source()));
+ destination_kinds.Add(GetKind(move->destination()));
if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
- reps |= RepresentationBit(
+ fp_reps |= RepresentationBit(
LocationOperand::cast(move->destination()).representation());
}
+ ++it;
+ }
+
+ if ((source_kinds & destination_kinds).empty() || moves->size() < 2) {
+ // Fast path for non-conflicting parallel moves.
+ for (MoveOperands* move : *moves) {
+ assembler_->AssembleMove(&move->source(), &move->destination());
+ }
+ return;
}
if (!kSimpleFPAliasing) {
- if (reps && !base::bits::IsPowerOfTwo(reps)) {
+ if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
- if ((reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
+ if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
split_rep_ = MachineRepresentation::kFloat32;
for (size_t i = 0; i < moves->size(); ++i) {
auto move = (*moves)[i];
@@ -103,7 +129,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
PerformMove(moves, move);
}
}
- if ((reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
+ if ((fp_reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
split_rep_ = MachineRepresentation::kFloat64;
for (size_t i = 0; i < moves->size(); ++i) {
auto move = (*moves)[i];
diff --git a/deps/v8/src/compiler/gap-resolver.h b/deps/v8/src/compiler/backend/gap-resolver.h
index 9a4fe4e6d6..f76afcfc91 100644
--- a/deps/v8/src/compiler/gap-resolver.h
+++ b/deps/v8/src/compiler/backend/gap-resolver.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_GAP_RESOLVER_H_
-#define V8_COMPILER_GAP_RESOLVER_H_
+#ifndef V8_COMPILER_BACKEND_GAP_RESOLVER_H_
+#define V8_COMPILER_BACKEND_GAP_RESOLVER_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
@@ -50,4 +50,4 @@ class GapResolver final {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_GAP_RESOLVER_H_
+#endif // V8_COMPILER_BACKEND_GAP_RESOLVER_H_
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index c73ad99ad1..9dc6e50e4e 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2,19 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/callable.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/frame-constants.h"
#include "src/frames.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/macro-assembler-ia32.h"
+#include "src/macro-assembler.h"
+#include "src/objects/smi.h"
#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -27,7 +29,6 @@ namespace compiler {
#define kScratchDoubleReg xmm0
-
// Adds IA-32 specific methods for decoding operands.
class IA32OperandConverter : public InstructionOperandConverter {
public:
@@ -187,8 +188,18 @@ class IA32OperandConverter : public InstructionOperandConverter {
UNREACHABLE();
}
}
-};
+ void MoveInstructionOperandToRegister(Register destination,
+ InstructionOperand* op) {
+ if (op->IsImmediate() || op->IsConstant()) {
+ gen_->tasm()->mov(destination, ToImmediate(op));
+ } else if (op->IsRegister()) {
+ gen_->tasm()->Move(destination, ToRegister(op));
+ } else {
+ gen_->tasm()->mov(destination, ToOperand(op));
+ }
+ }
+};
namespace {
@@ -240,7 +251,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ movsd(MemOperand(esp, 0), input_);
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
@@ -257,12 +269,11 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Zone* zone_;
};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
operand_(operand),
@@ -270,26 +281,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- __ push(Register::from_code(i));
- }
- }
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
- if ((registers >> i) & 1u) {
- __ pop(Register::from_code(i));
- }
- }
- }
-
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -303,8 +297,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
}
private:
@@ -314,37 +316,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
Zone* zone_;
};
-void MoveOperandIfAliasedWithPoisonRegister(Instruction* call_instruction,
- CodeGenerator* gen) {
- IA32OperandConverter i(gen, call_instruction);
- int const poison_index = i.InputInt32(1);
- if (poison_index == -1) {
- // No aliasing -> nothing to move.
- return;
- }
-
- InstructionOperand* op = call_instruction->InputAt(poison_index);
- if (op->IsImmediate() || op->IsConstant()) {
- gen->tasm()->mov(kSpeculationPoisonRegister, i.ToImmediate(op));
- } else {
- gen->tasm()->mov(kSpeculationPoisonRegister, i.InputOperand(poison_index));
- }
-}
-
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- IA32OperandConverter& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_COMPARE(asm_instr) \
@@ -430,30 +405,24 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
-#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
- do { \
- Label binop; \
- __ bind(&binop); \
- TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm()); \
- __ mov(eax, i.MemoryOperand(2)); \
- __ mov(edx, i.NextMemoryOperand(2)); \
- __ push(ebx); \
- frame_access_state()->IncreaseSPDelta(1); \
- InstructionOperand* op = instr->InputAt(0); \
- if (op->IsImmediate() || op->IsConstant()) { \
- __ mov(ebx, i.ToImmediate(op)); \
- } else { \
- __ mov(ebx, i.ToOperand(op)); \
- } \
- __ push(i.InputRegister(1)); \
- __ instr1(ebx, eax); \
- __ instr2(i.InputRegister(1), edx); \
- __ lock(); \
- __ cmpxchg8b(i.MemoryOperand(2)); \
- __ pop(i.InputRegister(1)); \
- __ pop(ebx); \
- frame_access_state()->IncreaseSPDelta(-1); \
- __ j(not_equal, &binop); \
+#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov(eax, i.MemoryOperand(2)); \
+ __ mov(edx, i.NextMemoryOperand(2)); \
+ __ push(ebx); \
+ frame_access_state()->IncreaseSPDelta(1); \
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0)); \
+ __ push(i.InputRegister(1)); \
+ __ instr1(ebx, eax); \
+ __ instr2(i.InputRegister(1), edx); \
+ __ lock(); \
+ __ cmpxchg8b(i.MemoryOperand(2)); \
+ __ pop(i.InputRegister(1)); \
+ __ pop(ebx); \
+ frame_access_state()->IncreaseSPDelta(-1); \
+ __ j(not_equal, &binop); \
} while (false);
#define ASSEMBLE_MOVX(mov_instr) \
@@ -551,10 +520,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ tasm->sub(esp, Immediate(stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -596,7 +565,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand source_location(LocationOperand::cast(source));
__ push(source_location.GetRegister());
} else if (source.IsImmediate()) {
- __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ __ Push(Immediate(ImmediateOperand::cast(source).inline_value()));
} else {
// Pushes of non-scalar data types is not supported.
UNIMPLEMENTED();
@@ -638,32 +607,22 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ pop(eax); // Restore eax.
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ j(not_zero, code, RelocInfo::CODE_TARGET);
+
+ Label skip;
+ __ j(zero, &skip);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET);
+ __ bind(&skip);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- __ push(eax); // Push eax so we can use it as a scratch register.
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(eax);
- __ mov(kSpeculationPoisonRegister, Immediate(0));
- __ cmp(kJavaScriptCallCodeStartRegister, eax);
- __ mov(eax, Immediate(-1));
- __ cmov(equal, kSpeculationPoisonRegister, eax);
-
- __ pop(eax); // Restore eax.
+ // TODO(860429): Remove remaining poisoning infrastructure on ia32.
+ UNREACHABLE();
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kSpeculationPoisonRegister);
- __ and_(esp, kSpeculationPoisonRegister);
+ // TODO(860429): Remove remaining poisoning infrastructure on ia32.
+ UNREACHABLE();
}
// Assembles an instruction after register allocation, producing machine code.
@@ -674,28 +633,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
- if (HasImmediateInput(instr, 0)) {
+ InstructionOperand* op = instr->InputAt(0);
+ if (op->IsImmediate()) {
Handle<Code> code = i.InputCode(0);
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else if (op->IsRegister()) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
}
+ } else {
+ CHECK(tasm()->root_array_available());
+ // This is used to allow calls to the arguments adaptor trampoline from
+ // code that only has 5 gp registers available and cannot call through
+ // an immediate. This happens when the arguments adaptor trampoline is
+ // not an embedded builtin.
+ // TODO(v8:6666): Remove once only embedded builtins are supported.
+ __ push(eax);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand virtual_call_target_register(
+ kRootRegister, IsolateData::virtual_call_target_register_offset());
+ __ mov(eax, i.InputOperand(0));
+ __ LoadCodeObjectEntry(eax, eax);
+ __ mov(virtual_call_target_register, eax);
+ __ pop(eax);
+ frame_access_state()->IncreaseSPDelta(-1);
+ __ call(virtual_call_target_register);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!HasImmediateInput(instr, 0));
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
@@ -722,20 +705,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
}
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
- __ jmp(code, RelocInfo::CODE_TARGET);
+ __ Jump(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -747,7 +729,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallWasm: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
@@ -765,7 +746,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallAddress: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -781,7 +761,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallJSFunction: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -790,8 +769,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
- __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(ecx);
+ __ CallCodeObject(ecx);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -809,9 +787,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -822,7 +800,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -832,7 +810,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- MoveOperandIfAliasedWithPoisonRegister(instr, this);
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
@@ -856,7 +833,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -940,8 +917,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
- scratch0, scratch1, mode);
+ auto ool = new (zone())
+ OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
+ mode, DetermineStubCallMode());
__ mov(operand, value);
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -1117,7 +1095,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ if ((instr->InputAt(1)->IsRegister() &&
+ i.OutputRegister(0).code() == i.InputRegister(1).code()) ||
i.OutputRegister(0).code() == i.InputRegister(3).code()) {
// We cannot write to the output register directly, because it would
// overwrite an input for adc. We have to use the temp register.
@@ -1127,9 +1106,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ add(i.OutputRegister(0), i.InputRegister(2));
}
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
+ i.MoveInstructionOperandToRegister(i.OutputRegister(1),
+ instr->InputAt(1));
__ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
@@ -1142,7 +1120,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ if ((instr->InputAt(1)->IsRegister() &&
+ i.OutputRegister(0).code() == i.InputRegister(1).code()) ||
i.OutputRegister(0).code() == i.InputRegister(3).code()) {
// We cannot write to the output register directly, because it would
// overwrite an input for adc. We have to use the temp register.
@@ -1152,9 +1131,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ sub(i.OutputRegister(0), i.InputRegister(2));
}
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
+ i.MoveInstructionOperandToRegister(i.OutputRegister(1),
+ instr->InputAt(1));
__ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
@@ -1163,7 +1141,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32MulPair: {
__ imul(i.OutputRegister(1), i.InputOperand(0));
- __ mov(i.TempRegister(0), i.InputOperand(1));
+ i.MoveInstructionOperandToRegister(i.TempRegister(0), instr->InputAt(1));
__ imul(i.TempRegister(0), i.InputOperand(2));
__ add(i.OutputRegister(1), i.TempRegister(0));
__ mov(i.OutputRegister(0), i.InputOperand(0));
@@ -1217,8 +1195,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bswap(i.OutputRegister());
break;
case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ and_(i.InputRegister(0), kSpeculationPoisonRegister);
+ // TODO(860429): Remove remaining poisoning infrastructure on ia32.
+ UNREACHABLE();
break;
case kLFence:
__ lfence();
@@ -1475,7 +1453,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEUint32ToFloat64:
- __ Cvtui2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Cvtui2sd(i.OutputDoubleRegister(), i.InputOperand(0),
+ i.TempRegister(0));
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
@@ -1592,11 +1571,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxbl:
ASSEMBLE_MOVX(movzx_b);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movb: {
size_t index = 0;
@@ -1606,16 +1583,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_b(operand, i.InputRegister(index));
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movsxwl:
ASSEMBLE_MOVX(movsx_w);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxwl:
ASSEMBLE_MOVX(movzx_w);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movw: {
size_t index = 0;
@@ -1625,13 +1599,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_w(operand, i.InputRegister(index));
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1696,7 +1668,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
- __ sub(i.OutputRegister(), Immediate(-constant_summand));
+ __ sub(i.OutputRegister(),
+ Immediate(base::NegateWithWraparound(constant_summand)));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1) == i.OutputRegister()) {
@@ -1725,34 +1698,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputFloat32(0));
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else {
__ movss(kScratchDoubleReg, i.InputOperand(0));
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
}
break;
case kIA32PushFloat64:
if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputDouble(0));
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
} else {
__ movsd(kScratchDoubleReg, i.InputOperand(0));
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
}
break;
case kIA32PushSimd128:
@@ -1764,18 +1737,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub(esp, Immediate(kSimd128Size));
__ movups(Operand(esp, 0), kScratchDoubleReg);
}
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize);
break;
case kIA32Push:
if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kFloatSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1787,9 +1760,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
- __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ __ mov(Operand(esp, slot * kSystemPointerSize), i.InputImmediate(0));
} else {
- __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ __ mov(Operand(esp, slot * kSystemPointerSize), i.InputRegister(0));
}
break;
}
@@ -3264,8 +3237,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
__ vxorps(kScratchDoubleReg, i.InputSimd128Register(2),
i.InputOperand(1));
- __ vandps(dst, kScratchDoubleReg, i.InputOperand(0));
- __ vxorps(dst, dst, i.InputSimd128Register(2));
+ __ vandps(kScratchDoubleReg, kScratchDoubleReg, i.InputOperand(0));
+ __ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2));
break;
}
case kIA32S8x16Shuffle: {
@@ -3631,7 +3604,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
__ xor_(tmp, tmp);
- __ mov(dst, Immediate(-1));
+ __ mov(dst, Immediate(1));
__ Ptest(src, src);
__ cmov(zero, dst, tmp);
break;
@@ -3642,27 +3615,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
Operand src = i.InputOperand(0);
Register tmp = i.TempRegister(0);
- __ mov(tmp, Immediate(-1));
+ __ mov(tmp, Immediate(1));
__ xor_(dst, dst);
- // Compare all src lanes to false.
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- if (arch_opcode == kIA32S1x4AllTrue) {
- __ Pcmpeqd(kScratchDoubleReg, src);
- } else if (arch_opcode == kIA32S1x8AllTrue) {
- __ Pcmpeqw(kScratchDoubleReg, src);
- } else {
- __ Pcmpeqb(kScratchDoubleReg, src);
- }
- // If kScratchDoubleReg is all zero, none of src lanes are false.
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(kScratchDoubleReg, src);
__ Ptest(kScratchDoubleReg, kScratchDoubleReg);
__ cmov(zero, dst, tmp);
break;
}
case kIA32StackCheck: {
- ExternalReference const stack_limit =
- ExternalReference::address_of_stack_limit(__ isolate());
- __ VerifyRootRegister();
- __ cmp(esp, tasm()->StaticVariable(stack_limit));
+ __ CompareStackLimit(esp);
break;
}
case kIA32Word32AtomicPairLoad: {
@@ -3678,21 +3640,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairStore: {
- TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
+ Label store;
+ __ bind(&store);
__ mov(i.TempRegister(0), i.MemoryOperand(2));
__ mov(i.TempRegister(1), i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
- InstructionOperand* op = instr->InputAt(0);
- if (op->IsImmediate() || op->IsConstant()) {
- __ mov(ebx, i.ToImmediate(op));
- } else {
- __ mov(ebx, i.ToOperand(op));
- }
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
__ pop(ebx);
frame_access_state()->IncreaseSPDelta(-1);
+ __ j(not_equal, &store);
break;
}
case kWord32AtomicExchangeInt8: {
@@ -3721,21 +3680,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32Word32AtomicPairExchange: {
DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
- TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
+ Label exchange;
+ __ bind(&exchange);
__ mov(eax, i.MemoryOperand(2));
__ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
- InstructionOperand* op = instr->InputAt(0);
- if (op->IsImmediate() || op->IsConstant()) {
- __ mov(ebx, i.ToImmediate(op));
- } else {
- __ mov(ebx, i.ToOperand(op));
- }
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
__ pop(ebx);
frame_access_state()->IncreaseSPDelta(-1);
+ __ j(not_equal, &exchange);
break;
}
case kWord32AtomicCompareExchangeInt8: {
@@ -3768,45 +3724,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairCompareExchange: {
- TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
- InstructionOperand* op = instr->InputAt(2);
- if (op->IsImmediate() || op->IsConstant()) {
- __ mov(ebx, i.ToImmediate(op));
- } else {
- __ mov(ebx, i.ToOperand(op));
- }
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(2));
__ lock();
__ cmpxchg8b(i.MemoryOperand(4));
__ pop(ebx);
frame_access_state()->IncreaseSPDelta(-1);
break;
}
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movsx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movzx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Int16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movsx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movzx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Word32: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
- break; \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movsx_b(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Int16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movsx_w(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Word32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ break; \
}
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -3829,19 +3779,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
Label binop;
__ bind(&binop);
- TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
// Move memory operand into edx:eax
__ mov(eax, i.MemoryOperand(2));
__ mov(edx, i.NextMemoryOperand(2));
// Save input registers temporarily on the stack.
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
- InstructionOperand* op = instr->InputAt(0);
- if (op->IsImmediate() || op->IsConstant()) {
- __ mov(ebx, i.ToImmediate(op));
- } else {
- __ mov(ebx, i.ToOperand(op));
- }
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
__ push(i.InputRegister(1));
// Negate input in place
__ neg(ebx);
@@ -3938,15 +3882,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ setcc(FlagsConditionToCondition(condition), kSpeculationPoisonRegister);
- __ add(kSpeculationPoisonRegister, Immediate(255));
- __ sar(kSpeculationPoisonRegister, 31u);
+ // TODO(860429): Remove remaining poisoning infrastructure on ia32.
+ UNREACHABLE();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -3982,17 +3919,19 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
+ size_t pop_size =
+ call_descriptor->StackParameterCount() * kSystemPointerSize;
// Use ecx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), ecx);
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ wasm_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -4073,7 +4012,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
IA32OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -4088,7 +4026,6 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
// The calling convention for JSFunctions on IA32 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
@@ -4244,6 +4181,18 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ mov(kJSFunctionRegister,
+ Operand(kWasmInstanceRegister,
+ Tuple2::kValue2Offset - kHeapObjectTag));
+ __ mov(kWasmInstanceRegister,
+ Operand(kWasmInstanceRegister,
+ Tuple2::kValue1Offset - kHeapObjectTag));
+ __ push(kWasmInstanceRegister);
}
}
}
@@ -4262,7 +4211,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4278,24 +4226,24 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
Register scratch = esi;
__ push(scratch);
__ mov(scratch,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ mov(scratch, Operand(scratch, 0));
- __ add(scratch, Immediate(shrink_slots * kPointerSize));
+ __ add(scratch, Immediate(shrink_slots * kSystemPointerSize));
__ cmp(esp, scratch);
__ pop(scratch);
__ j(above_equal, &done);
}
__ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
- __ Move(esi, Smi::kZero);
+ __ Move(esi, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, ecx);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
@@ -4305,7 +4253,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
- __ sub(esp, Immediate(shrink_slots * kPointerSize));
+ __ sub(esp, Immediate(shrink_slots * kSystemPointerSize));
}
}
@@ -4318,7 +4266,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
- __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kSystemPointerSize));
}
}
@@ -4330,7 +4278,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
- __ add(esp, Immediate(returns * kPointerSize));
+ __ add(esp, Immediate(returns * kSystemPointerSize));
}
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
@@ -4341,7 +4289,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Might need ecx for scratch if pop_size is too big or if there is a variable
// pop count.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
+ size_t pop_size = call_descriptor->StackParameterCount() * kSystemPointerSize;
IA32OperandConverter g(this, nullptr);
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4364,7 +4312,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
if (pop->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+ pop_size += g.ToConstant(pop).ToInt32() * kSystemPointerSize;
__ Ret(static_cast<int>(pop_size), ecx);
} else {
Register pop_reg = g.ToRegister(pop);
@@ -4479,11 +4427,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
- if (src.type() == Constant::kHeapObject) {
- __ mov(dst, src.ToHeapObject());
- } else {
- __ Move(dst, g.ToImmediate(source));
- }
+ __ Move(dst, g.ToImmediate(source));
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
@@ -4494,7 +4438,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
uint32_t lower = static_cast<uint32_t>(constant_value);
uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
Operand dst0 = dst;
- Operand dst1 = g.ToOperand(destination, kPointerSize);
+ Operand dst1 = g.ToOperand(destination, kSystemPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
@@ -4505,7 +4449,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr);
@@ -4589,20 +4532,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
+ __ push(g.ToOperand(source, kSystemPointerSize));
+ __ pop(g.ToOperand(destination, kSystemPointerSize));
__ movsd(src0, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
- __ push(g.ToOperand(source, 2 * kPointerSize));
- __ pop(g.ToOperand(destination, 2 * kPointerSize));
- __ push(g.ToOperand(source, 3 * kPointerSize));
- __ pop(g.ToOperand(destination, 3 * kPointerSize));
+ __ push(g.ToOperand(source, kSystemPointerSize));
+ __ pop(g.ToOperand(destination, kSystemPointerSize));
+ __ push(g.ToOperand(source, 2 * kSystemPointerSize));
+ __ pop(g.ToOperand(destination, 2 * kSystemPointerSize));
+ __ push(g.ToOperand(source, 3 * kSystemPointerSize));
+ __ pop(g.ToOperand(destination, 3 * kSystemPointerSize));
__ movups(src0, kScratchDoubleReg);
}
}
@@ -4614,7 +4557,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dd(targets[index]);
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index e157a29e13..60ed1cc29c 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
-#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#ifndef V8_COMPILER_BACKEND_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_BACKEND_IA32_INSTRUCTION_CODES_IA32_H_
namespace v8 {
namespace internal {
@@ -399,4 +399,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#endif // V8_COMPILER_BACKEND_IA32_INSTRUCTION_CODES_IA32_H_
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 54454e41cb..f2d5cc0d17 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -10,7 +10,6 @@ namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
-
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
@@ -335,8 +334,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32S1x16AnyTrue:
case kIA32S1x16AllTrue:
return (instr->addressing_mode() == kMode_None)
- ? kNoOpcodeFlags
- : kIsLoadOperation | kHasSideEffect;
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
case kIA32Idiv:
case kIA32Udiv:
@@ -383,7 +382,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
#define CASE(Name) case k##Name:
- COMMON_ARCH_OPCODE_LIST(CASE)
+ COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
@@ -392,7 +391,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
UNREACHABLE();
}
-
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 43b572170f..1e241a8ae9 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -180,15 +180,19 @@ class IA32OperandGenerator final : public OperandGenerator {
}
};
-
namespace {
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ InstructionOperand temps[] = {g.TempRegister()};
+ Node* input = node->InputAt(0);
+ // We have to use a byte register as input to movsxb.
+ InstructionOperand input_op =
+ opcode == kIA32Movsxbl ? g.UseFixed(input, eax) : g.Use(input);
+ selector->Emit(opcode, g.DefineAsRegister(node), input_op, arraysize(temps),
+ temps);
}
-
void VisitRR(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
IA32OperandGenerator g(selector);
@@ -196,7 +200,6 @@ void VisitRR(InstructionSelector* selector, Node* node,
g.UseRegister(node->InputAt(0)));
}
-
void VisitRROFloat(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
@@ -209,7 +212,6 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
}
}
-
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
@@ -237,7 +239,12 @@ void VisitRRISimd(InstructionSelector* selector, Node* node,
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 =
g.UseImmediate(OpParameter<int32_t>(node->op()));
- selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
+ // 8x16 uses movsx_b on dest to extract a byte, which only works
+ // if dest is a byte register.
+ InstructionOperand dest = opcode == kIA32I8x16ExtractLane
+ ? g.DefineAsFixed(node, eax)
+ : g.DefineAsRegister(node);
+ selector->Emit(opcode, dest, operand0, operand1);
}
void VisitRRISimd(InstructionSelector* selector, Node* node,
@@ -301,7 +308,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kIA32Movdqu;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -392,7 +399,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kIA32Movdqu;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -494,7 +501,6 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
// Shared routine for multiple binary operations.
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
@@ -508,12 +514,10 @@ void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kIA32And);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kIA32Or);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -524,7 +528,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
}
}
-
// Shared routine for multiple shift operations.
static inline void VisitShift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
@@ -541,7 +544,6 @@ static inline void VisitShift(InstructionSelector* selector, Node* node,
}
}
-
namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
@@ -553,7 +555,6 @@ void VisitMulHigh(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
-
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister(edx)};
@@ -562,7 +563,6 @@ void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
}
-
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister(eax)};
@@ -594,7 +594,6 @@ void EmitLea(InstructionSelector* selector, Node* result, Node* index,
} // namespace
-
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
@@ -606,12 +605,10 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitShift(this, node, kIA32Shl);
}
-
void InstructionSelector::VisitWord32Shr(Node* node) {
VisitShift(this, node, kIA32Shr);
}
-
void InstructionSelector::VisitWord32Sar(Node* node) {
VisitShift(this, node, kIA32Sar);
}
@@ -624,7 +621,8 @@ void InstructionSelector::VisitInt32PairAdd(Node* node) {
// We use UseUniqueRegister here to avoid register sharing with the temp
// register.
InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)),
+ g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
@@ -649,7 +647,8 @@ void InstructionSelector::VisitInt32PairSub(Node* node) {
// We use UseUniqueRegister here to avoid register sharing with the temp
// register.
InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)),
+ g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
@@ -673,10 +672,11 @@ void InstructionSelector::VisitInt32PairMul(Node* node) {
if (projection1) {
// InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
// register and one mov instruction.
- InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
- g.UseUnique(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseFixed(node->InputAt(3), ecx)};
+ InstructionOperand inputs[] = {
+ g.UseUnique(node->InputAt(0)),
+ g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseFixed(node->InputAt(3), ecx)};
InstructionOperand outputs[] = {
g.DefineAsFixed(node, eax),
@@ -860,7 +860,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kIA32Add);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -871,7 +870,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
}
-
void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
@@ -895,37 +893,30 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
}
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kIA32ImulHigh);
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitMulHigh(this, node, kIA32UmulHigh);
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
VisitDiv(this, node, kIA32Idiv);
}
-
void InstructionSelector::VisitUint32Div(Node* node) {
VisitDiv(this, node, kIA32Udiv);
}
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kIA32Idiv);
}
-
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitMod(this, node, kIA32Udiv);
}
-
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -1076,7 +1067,6 @@ void InstructionSelector::EmitPrepareResults(
}
}
-
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
@@ -1106,7 +1096,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
Node* left, Node* right, FlagsContinuation* cont,
@@ -1207,7 +1196,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
}
-
// Shared routine for multiple float64 compare operations (inputs commuted).
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1267,15 +1255,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m(
- selector->isolate(), node);
- if (m.Matched()) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kIA32StackCheck);
- CHECK(cont->IsBranch());
- selector->EmitWithContinuation(opcode, cont);
- return;
+ if (selector->isolate() != nullptr) {
+ StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m(
+ selector->isolate(), node);
+ if (m.Matched()) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kIA32StackCheck);
+ CHECK(cont->IsBranch());
+ selector->EmitWithContinuation(opcode, cont);
+ return;
+ }
}
WasmStackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> wasm_m(
node);
@@ -1343,11 +1333,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
// For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3);
-#if defined(V8_EMBEDDED_BUILTINS)
- bool block_root_register = !selector->CanUseRootsRegister();
-#else
- bool block_root_register = true;
-#endif
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
@@ -1360,22 +1345,19 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
- InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- num_temps, temps);
+ 0, {});
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
- InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
- g.TempRegister(ebx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps,
temps);
}
@@ -1504,7 +1486,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
@@ -1514,33 +1495,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1550,7 +1526,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop(this, node, kIA32Add, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1574,42 +1549,35 @@ void InstructionSelector::VisitFloat32Equal(Node* node) {
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -1623,7 +1591,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.Use(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -1797,11 +1764,6 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
-#if defined(V8_EMBEDDED_BUILTINS)
- bool block_root_register = !CanUseRootsRegister();
-#else
- bool block_root_register = true;
-#endif
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
@@ -1811,9 +1773,8 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
- g.TempRegister(ebx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
@@ -1847,11 +1808,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* index = node->InputAt(1);
AddressingMode addressing_mode;
-#if defined(V8_EMBEDDED_BUILTINS)
- bool block_root_register = !CanUseRootsRegister();
-#else
- bool block_root_register = true;
-#endif
InstructionOperand inputs[] = {
// High, Low values of old value
@@ -1868,22 +1824,18 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
AddressingModeField::encode(addressing_mode);
if (projection1) {
- InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- num_temps, temps);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {});
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
- InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
- g.TempRegister(ebx)};
- const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 83e8da9e8a..65ae236f90 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
-#define V8_COMPILER_INSTRUCTION_CODES_H_
+#ifndef V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
+#define V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
#include <iosfwd>
#if V8_TARGET_ARCH_ARM
-#include "src/compiler/arm/instruction-codes-arm.h"
+#include "src/compiler/backend/arm/instruction-codes-arm.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/compiler/arm64/instruction-codes-arm64.h"
+#include "src/compiler/backend/arm64/instruction-codes-arm64.h"
#elif V8_TARGET_ARCH_IA32
-#include "src/compiler/ia32/instruction-codes-ia32.h"
+#include "src/compiler/backend/ia32/instruction-codes-ia32.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/compiler/mips/instruction-codes-mips.h"
+#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/compiler/mips64/instruction-codes-mips64.h"
+#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
-#include "src/compiler/x64/instruction-codes-x64.h"
+#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/compiler/ppc/instruction-codes-ppc.h"
+#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
-#include "src/compiler/s390/instruction-codes-s390.h"
+#include "src/compiler/backend/s390/instruction-codes-s390.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -37,7 +37,6 @@ namespace compiler {
// Modes for ArchStoreWithWriteBarrier below.
enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
-
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define COMMON_ARCH_OPCODE_LIST(V) \
@@ -53,6 +52,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchPrepareTailCall) \
V(ArchCallWasmFunction) \
V(ArchTailCallWasm) \
+ V(ArchCallBuiltinPointer) \
V(ArchJmp) \
V(ArchBinarySearchSwitch) \
V(ArchLookupSwitch) \
@@ -145,7 +145,7 @@ enum ArchOpcode {
ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
#undef DECLARE_ARCH_OPCODE
#define COUNT_ARCH_OPCODE(Name) +1
- kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+ kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
#undef COUNT_ARCH_OPCODE
};
@@ -165,7 +165,7 @@ enum AddressingMode {
ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
#undef DECLARE_ADDRESSING_MODE
#define COUNT_ADDRESSING_MODE(Name) +1
- kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+ kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
#undef COUNT_ADDRESSING_MODE
};
@@ -249,4 +249,4 @@ typedef BitField<int, 22, 10> MiscField;
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INSTRUCTION_CODES_H_
+#endif // V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index de042cb670..b3220f07fd 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
#include "src/base/adapters.h"
#include "src/base/utils/random-number-generator.h"
@@ -24,7 +24,6 @@ void InstructionScheduler::SchedulingQueueBase::AddNode(
nodes_.insert(it, node);
}
-
InstructionScheduler::ScheduleGraphNode*
InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
DCHECK(!IsEmpty());
@@ -38,7 +37,7 @@ InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
}
if (candidate != nodes_.end()) {
- ScheduleGraphNode *result = *candidate;
+ ScheduleGraphNode* result = *candidate;
nodes_.erase(candidate);
return result;
}
@@ -46,31 +45,26 @@ InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
return nullptr;
}
-
InstructionScheduler::ScheduleGraphNode*
InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) {
DCHECK(!IsEmpty());
// Choose a random element from the ready list.
auto candidate = nodes_.begin();
std::advance(candidate, isolate()->random_number_generator()->NextInt(
- static_cast<int>(nodes_.size())));
- ScheduleGraphNode *result = *candidate;
+ static_cast<int>(nodes_.size())));
+ ScheduleGraphNode* result = *candidate;
nodes_.erase(candidate);
return result;
}
-
-InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
- Zone* zone,
- Instruction* instr)
+InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(Zone* zone,
+ Instruction* instr)
: instr_(instr),
successors_(zone),
unscheduled_predecessors_count_(0),
latency_(GetInstructionLatency(instr)),
total_latency_(-1),
- start_cycle_(-1) {
-}
-
+ start_cycle_(-1) {}
void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
ScheduleGraphNode* node) {
@@ -99,7 +93,6 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) {
sequence()->StartBlock(rpo);
}
-
void InstructionScheduler::EndBlock(RpoNumber rpo) {
if (FLAG_turbo_stress_instruction_scheduling) {
ScheduleBlock<StressSchedulerQueue>();
@@ -203,7 +196,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
graph_.push_back(new_node);
}
-
template <typename QueueType>
void InstructionScheduler::ScheduleBlock() {
QueueType ready_list(this);
@@ -229,8 +221,7 @@ void InstructionScheduler::ScheduleBlock() {
for (ScheduleGraphNode* successor : candidate->successors()) {
successor->DropUnscheduledPredecessor();
successor->set_start_cycle(
- std::max(successor->start_cycle(),
- cycle + candidate->latency()));
+ std::max(successor->start_cycle(), cycle + candidate->latency()));
if (!successor->HasUnscheduledPredecessor()) {
ready_list.AddNode(successor);
@@ -242,7 +233,6 @@ void InstructionScheduler::ScheduleBlock() {
}
}
-
int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArchNop:
@@ -303,6 +293,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchCallWasmFunction:
+ case kArchCallBuiltinPointer:
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
@@ -364,7 +355,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kHasSideEffect;
#define CASE(Name) case k##Name:
- TARGET_ARCH_OPCODE_LIST(CASE)
+ TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE
return GetTargetInstructionFlags(instr);
}
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/backend/instruction-scheduler.h
index 87d41d30d6..31207ce1ca 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
-#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
+#define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -41,7 +41,7 @@ class InstructionScheduler final : public ZoneObject {
private:
// A scheduling graph node.
// Represent an instruction and their dependencies.
- class ScheduleGraphNode: public ZoneObject {
+ class ScheduleGraphNode : public ZoneObject {
public:
ScheduleGraphNode(Zone* zone, Instruction* instr);
@@ -100,15 +100,11 @@ class InstructionScheduler final : public ZoneObject {
class SchedulingQueueBase {
public:
explicit SchedulingQueueBase(InstructionScheduler* scheduler)
- : scheduler_(scheduler),
- nodes_(scheduler->zone()) {
- }
+ : scheduler_(scheduler), nodes_(scheduler->zone()) {}
void AddNode(ScheduleGraphNode* node);
- bool IsEmpty() const {
- return nodes_.empty();
- }
+ bool IsEmpty() const { return nodes_.empty(); }
protected:
InstructionScheduler* scheduler_;
@@ -118,10 +114,10 @@ class InstructionScheduler final : public ZoneObject {
// A scheduling queue which prioritize nodes on the critical path (we look
// for the instruction with the highest latency on the path to reach the end
// of the graph).
- class CriticalPathFirstQueue : public SchedulingQueueBase {
+ class CriticalPathFirstQueue : public SchedulingQueueBase {
public:
explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
- : SchedulingQueueBase(scheduler) { }
+ : SchedulingQueueBase(scheduler) {}
// Look for the best candidate to schedule, remove it from the queue and
// return it.
@@ -130,17 +126,15 @@ class InstructionScheduler final : public ZoneObject {
// A queue which pop a random node from the queue to perform stress tests on
// the scheduler.
- class StressSchedulerQueue : public SchedulingQueueBase {
+ class StressSchedulerQueue : public SchedulingQueueBase {
public:
explicit StressSchedulerQueue(InstructionScheduler* scheduler)
- : SchedulingQueueBase(scheduler) { }
+ : SchedulingQueueBase(scheduler) {}
ScheduleGraphNode* PopBestCandidate(int cycle);
private:
- Isolate *isolate() {
- return scheduler_->isolate();
- }
+ Isolate* isolate() { return scheduler_->isolate(); }
};
// Perform scheduling for the current block specifying the queue type to
@@ -234,4 +228,4 @@ class InstructionScheduler final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+#endif // V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index bff70d5edf..228abb9533 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
-#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_IMPL_H_
+#include "src/compiler/backend/instruction-selector.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
@@ -320,7 +320,7 @@ class OperandGenerator {
// We cannot use {intptr_t} here, since the Constant constructor would
// be ambiguous on some architectures.
using ptrsize_int_t =
- std::conditional<kPointerSize == 8, int64_t, int32_t>::type;
+ std::conditional<kSystemPointerSize == 8, int64_t, int32_t>::type;
return Constant(reinterpret_cast<ptrsize_int_t>(
OpParameter<const char*>(node->op())));
}
@@ -422,4 +422,4 @@ class OperandGenerator {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_IMPL_H_
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 4bd4dc18fe..bbf13e49b7 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-selector.h"
+#include "src/compiler/backend/instruction-selector.h"
#include <limits>
#include "src/assembler-inl.h"
#include "src/base/adapters.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
-#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
@@ -126,7 +126,6 @@ void InstructionSelector::StartBlock(RpoNumber rpo) {
}
}
-
void InstructionSelector::EndBlock(RpoNumber rpo) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
@@ -162,7 +161,6 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
-
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a, size_t temp_count,
@@ -171,7 +169,6 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
-
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
@@ -184,7 +181,6 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
temps);
}
-
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
@@ -198,7 +194,6 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
temps);
}
-
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
@@ -210,7 +205,6 @@ Instruction* InstructionSelector::Emit(
temps);
}
-
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
@@ -222,7 +216,6 @@ Instruction* InstructionSelector::Emit(
temps);
}
-
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
@@ -235,7 +228,6 @@ Instruction* InstructionSelector::Emit(
temps);
}
-
Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
@@ -253,13 +245,11 @@ Instruction* InstructionSelector::Emit(
return Emit(instr);
}
-
Instruction* InstructionSelector::Emit(Instruction* instr) {
instructions_.push_back(instr);
return instr;
}
-
bool InstructionSelector::CanCover(Node* user, Node* node) const {
// 1. Both {user} and {node} must be in the same basic block.
if (schedule()->block(node) != schedule()->block(user)) {
@@ -282,6 +272,21 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
return true;
}
+bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
+ Node* node_input) const {
+ if (CanCover(user, node) && CanCover(node, node_input)) {
+ // If {node} is pure, transitivity might not hold.
+ if (node->op()->HasProperty(Operator::kPure)) {
+ // If {node_input} is pure, the effect levels do not matter.
+ if (node_input->op()->HasProperty(Operator::kPure)) return true;
+ // Otherwise, {user} and {node_input} must have the same effect level.
+ return GetEffectLevel(user) == GetEffectLevel(node_input);
+ }
+ return true;
+ }
+ return false;
+}
+
bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
Node* node) const {
BasicBlock* bb_user = schedule()->block(user);
@@ -356,7 +361,6 @@ int InstructionSelector::GetVirtualRegister(const Node* node) {
return virtual_register;
}
-
const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
const {
std::map<NodeId, int> virtual_registers;
@@ -369,7 +373,6 @@ const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
return virtual_registers;
}
-
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -377,7 +380,6 @@ bool InstructionSelector::IsDefined(Node* node) const {
return defined_[id];
}
-
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -385,7 +387,6 @@ void InstructionSelector::MarkAsDefined(Node* node) {
defined_[id] = true;
}
-
bool InstructionSelector::IsUsed(Node* node) const {
DCHECK_NOT_NULL(node);
// TODO(bmeurer): This is a terrible monster hack, but we have to make sure
@@ -397,7 +398,6 @@ bool InstructionSelector::IsUsed(Node* node) const {
return used_[id];
}
-
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -435,7 +435,6 @@ void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
-
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
Node* node) {
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
@@ -472,7 +471,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
Handle<HeapObject> constant = HeapConstantOf(input->op());
RootIndex root_index;
- if (isolate->heap()->IsRootHandle(constant, &root_index) &&
+ if (isolate->roots_table().IsRootHandle(constant, &root_index) &&
root_index == RootIndex::kOptimizedOut) {
// For an optimized-out object we return an invalid instruction
// operand, so that we take the fast path for optimized-out values.
@@ -611,7 +610,6 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
}
-
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
@@ -792,7 +790,6 @@ struct CallBuffer {
instruction_args.reserve(input_count() + frame_state_value_count());
}
-
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
ZoneVector<PushParameter> output_nodes;
@@ -812,7 +809,6 @@ struct CallBuffer {
}
};
-
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
@@ -886,6 +882,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
+ bool call_through_slot = (flags & kAllowCallThroughSlot) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
// TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
@@ -899,7 +896,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: is_tail_call ? g.UseUniqueRegister(callee)
- : g.UseRegister(callee));
+ : call_through_slot ? g.UseUniqueSlot(callee)
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
@@ -911,6 +909,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: g.UseRegister(callee));
break;
case CallDescriptor::kCallWasmFunction:
+ case CallDescriptor::kCallWasmImportWrapper:
buffer->instruction_args.push_back(
(call_address_immediate &&
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
@@ -920,6 +919,15 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
+ case CallDescriptor::kCallBuiltinPointer:
+ // The common case for builtin pointers is to have the target in a
+ // register. If we have a constant, we use a register anyway to simplify
+ // related code.
+ buffer->instruction_args.push_back(
+ call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+ : g.UseRegister(callee));
+ break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
@@ -1012,7 +1020,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
- if (reg == kSpeculationPoisonRegister.code()) {
+ if (Register::from_code(reg) == kSpeculationPoisonRegister) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
@@ -1126,7 +1134,6 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
current_block_ = nullptr;
}
-
void InstructionSelector::VisitControl(BasicBlock* block) {
#ifdef DEBUG
// SSA deconstruction requires targets of branches not to have phis.
@@ -1490,6 +1497,8 @@ void InstructionSelector::VisitNode(Node* node) {
} else {
return EmitIdentity(node);
}
+ case IrOpcode::kTruncateFloat64ToInt64:
+ return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
case IrOpcode::kTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
@@ -2155,7 +2164,6 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-
void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand) {
OperandGenerator g(this);
@@ -2206,88 +2214,64 @@ void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64ReverseBits(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
UNIMPLEMENTED();
}
@@ -2308,46 +2292,42 @@ void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
@@ -2356,7 +2336,6 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
@@ -2455,20 +2434,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -2489,7 +2454,7 @@ LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr());
}
-}
+} // namespace
void InstructionSelector::VisitIfException(Node* node) {
OperandGenerator g(this);
@@ -2497,7 +2462,6 @@ void InstructionSelector::VisitIfException(Node* node) {
Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
}
-
void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
int index = OsrValueIndexOf(node->op());
@@ -2505,7 +2469,6 @@ void InstructionSelector::VisitOsrValue(Node* node) {
g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
}
-
void InstructionSelector::VisitPhi(Node* node) {
const int input_count = node->op()->ValueInputCount();
DCHECK_EQ(input_count, current_block_->PredecessorCount());
@@ -2522,7 +2485,6 @@ void InstructionSelector::VisitPhi(Node* node) {
}
}
-
void InstructionSelector::VisitProjection(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -2556,7 +2518,6 @@ void InstructionSelector::VisitProjection(Node* node) {
}
}
-
void InstructionSelector::VisitConstant(Node* node) {
// We must emit a NOP here because every live range needs a defining
// instruction in the register allocator.
@@ -2564,7 +2525,6 @@ void InstructionSelector::VisitConstant(Node* node) {
Emit(kArchNop, g.DefineAsConstant(node));
}
-
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
auto call_descriptor = CallDescriptorOf(node->op());
@@ -2576,6 +2536,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
+ CallDescriptor::Flags flags = call_descriptor->flags();
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on some architectures it's probably better to use
@@ -2583,12 +2544,20 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Improve constant pool and the heuristics in the register allocator
// for where to emit constants.
CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
+ if (flags & CallDescriptor::kAllowCallThroughSlot) {
+ // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
+ // instead once builtins are embedded in every build configuration.
+ call_buffer_flags |= kAllowCallThroughSlot;
+#ifndef V8_TARGET_ARCH_32_BIT
+ // kAllowCallThroughSlot is only supported on ia32.
+ UNREACHABLE();
+#endif
+ }
InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
// Pass label of exception handler block.
- CallDescriptor::Flags flags = call_descriptor->flags();
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
flags |= CallDescriptor::kHasExceptionHandler;
@@ -2609,8 +2578,12 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
case CallDescriptor::kCallWasmFunction:
+ case CallDescriptor::kCallWasmImportWrapper:
opcode = kArchCallWasmFunction | MiscField::encode(flags);
break;
+ case CallDescriptor::kCallBuiltinPointer:
+ opcode = kArchCallBuiltinPointer | MiscField::encode(flags);
+ break;
}
// Emit the call instruction.
@@ -2655,6 +2628,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
flags |= kCallFixedTargetRegister;
}
+ DCHECK_EQ(callee->flags() & CallDescriptor::kAllowCallThroughSlot, 0);
InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
// Select the appropriate opcode based on the call type.
@@ -2712,7 +2686,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
temps.empty() ? nullptr : &temps.front());
}
-
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 435b7185a6..e0cdf91028 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
-#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
#include <map>
+#include "src/compiler/backend/instruction-scheduler.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/instruction-scheduler.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/cpu-features.h"
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -393,6 +394,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// instruction. A node can be covered if the {user} of the node has the only
// edge and the two are in the same basic block.
bool CanCover(Node* user, Node* node) const;
+ // CanCover is not transitive. The counter example are Nodes A,B,C such that
+ // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
+ // and B might differ. CanCoverTransitively does the additional checks.
+ bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
// Used in pattern matching during code generation.
// This function checks that {node} and {user} are in the same basic block,
@@ -545,6 +550,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
kCallAddressImmediate = 1u << 1,
kCallTail = 1u << 2,
kCallFixedTargetRegister = 1u << 3,
+ kAllowCallThroughSlot = 1u << 4
};
typedef base::Flags<CallBufferFlag> CallBufferFlags;
@@ -768,4 +774,4 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 04a2bd9581..cfe97bb1aa 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include <iomanip>
@@ -10,6 +10,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
+#include "src/register-configuration.h"
#include "src/source-position.h"
namespace v8 {
@@ -89,10 +90,12 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
// the gap resolver may break a move into 2 or 4 equivalent smaller moves.
DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
int index_hi = loc.index();
- int index_lo = index_hi - (1 << ElementSizeLog2Of(rep)) / kPointerSize + 1;
+ int index_lo =
+ index_hi - (1 << ElementSizeLog2Of(rep)) / kSystemPointerSize + 1;
int other_index_hi = other_loc.index();
int other_index_lo =
- other_index_hi - (1 << ElementSizeLog2Of(other_rep)) / kPointerSize + 1;
+ other_index_hi -
+ (1 << ElementSizeLog2Of(other_rep)) / kSystemPointerSize + 1;
return other_index_hi >= index_lo && index_hi >= other_index_lo;
}
return false;
@@ -118,19 +121,9 @@ bool LocationOperand::IsCompatible(LocationOperand* op) {
}
}
-void InstructionOperand::Print(const RegisterConfiguration* config) const {
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config;
- wrapper.op_ = *this;
- StdoutStream{} << wrapper << std::endl;
-}
-
-void InstructionOperand::Print() const { Print(GetRegConfig()); }
+void InstructionOperand::Print() const { StdoutStream{} << *this << std::endl; }
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionOperand& printable) {
- const InstructionOperand& op = printable.op_;
- const RegisterConfiguration* conf = printable.register_configuration_;
+std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
switch (op.kind()) {
case InstructionOperand::UNALLOCATED: {
const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
@@ -143,12 +136,11 @@ std::ostream& operator<<(std::ostream& os,
return os;
case UnallocatedOperand::FIXED_REGISTER:
return os << "(="
- << conf->GetGeneralRegisterName(
- unalloc->fixed_register_index())
+ << Register::from_code(unalloc->fixed_register_index())
<< ")";
case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(="
- << conf->GetDoubleRegisterName(
+ << DoubleRegister::from_code(
unalloc->fixed_register_index())
<< ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
@@ -183,22 +175,20 @@ std::ostream& operator<<(std::ostream& os,
} else if (op.IsFPStackSlot()) {
os << "[fp_stack:" << allocated.index();
} else if (op.IsRegister()) {
- os << "["
- << GetRegConfig()->GetGeneralOrSpecialRegisterName(
- allocated.register_code())
- << "|R";
+ const char* name =
+ allocated.register_code() < Register::kNumRegisters
+ ? RegisterName(Register::from_code(allocated.register_code()))
+ : Register::GetSpecialRegisterName(allocated.register_code());
+ os << "[" << name << "|R";
} else if (op.IsDoubleRegister()) {
- os << "["
- << GetRegConfig()->GetDoubleRegisterName(allocated.register_code())
+ os << "[" << DoubleRegister::from_code(allocated.register_code())
<< "|R";
} else if (op.IsFloatRegister()) {
- os << "["
- << GetRegConfig()->GetFloatRegisterName(allocated.register_code())
+ os << "[" << FloatRegister::from_code(allocated.register_code())
<< "|R";
} else {
DCHECK(op.IsSimd128Register());
- os << "["
- << GetRegConfig()->GetSimd128RegisterName(allocated.register_code())
+ os << "[" << Simd128Register::from_code(allocated.register_code())
<< "|R";
}
if (allocated.IsExplicit()) {
@@ -250,32 +240,18 @@ std::ostream& operator<<(std::ostream& os,
UNREACHABLE();
}
-void MoveOperands::Print(const RegisterConfiguration* config) const {
- StdoutStream os;
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config;
- wrapper.op_ = destination();
- os << wrapper << " = ";
- wrapper.op_ = source();
- os << wrapper << std::endl;
+void MoveOperands::Print() const {
+ StdoutStream{} << destination() << " = " << source() << std::endl;
}
-void MoveOperands::Print() const { Print(GetRegConfig()); }
-
-std::ostream& operator<<(std::ostream& os,
- const PrintableMoveOperands& printable) {
- const MoveOperands& mo = *printable.move_operands_;
- PrintableInstructionOperand printable_op = {printable.register_configuration_,
- mo.destination()};
- os << printable_op;
+std::ostream& operator<<(std::ostream& os, const MoveOperands& mo) {
+ os << mo.destination();
if (!mo.source().Equals(mo.destination())) {
- printable_op.op_ = mo.source();
- os << " = " << printable_op;
+ os << " = " << mo.source();
}
return os << ";";
}
-
bool ParallelMove::IsRedundant() const {
for (MoveOperands* move : *this) {
if (!move->IsRedundant()) return false;
@@ -357,7 +333,6 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
}
}
-
bool Instruction::AreMovesRedundant() const {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -368,30 +343,18 @@ bool Instruction::AreMovesRedundant() const {
return true;
}
-void Instruction::Print(const RegisterConfiguration* config) const {
- PrintableInstruction wrapper;
- wrapper.instr_ = this;
- wrapper.register_configuration_ = config;
- StdoutStream{} << wrapper << std::endl;
-}
-
-void Instruction::Print() const { Print(GetRegConfig()); }
+void Instruction::Print() const { StdoutStream{} << *this << std::endl; }
-std::ostream& operator<<(std::ostream& os,
- const PrintableParallelMove& printable) {
- const ParallelMove& pm = *printable.parallel_move_;
- bool first = true;
+std::ostream& operator<<(std::ostream& os, const ParallelMove& pm) {
+ const char* space = "";
for (MoveOperands* move : pm) {
if (move->IsEliminated()) continue;
- if (!first) os << " ";
- first = false;
- PrintableMoveOperands pmo = {printable.register_configuration_, move};
- os << pmo;
+ os << space << *move;
+ space = " ";
}
return os;
}
-
void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
@@ -399,24 +362,16 @@ void ReferenceMap::RecordReference(const AllocatedOperand& op) {
reference_operands_.push_back(op);
}
-
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
- bool first = true;
- PrintableInstructionOperand poi = {GetRegConfig(), InstructionOperand()};
+ const char* separator = "";
for (const InstructionOperand& op : pm.reference_operands_) {
- if (!first) {
- os << ";";
- } else {
- first = false;
- }
- poi.op_ = op;
- os << poi;
+ os << separator << op;
+ separator = ";";
}
return os << "}";
}
-
std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
switch (ao) {
#define CASE(Name) \
@@ -428,7 +383,6 @@ std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
switch (am) {
case kMode_None:
@@ -442,7 +396,6 @@ std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
switch (fm) {
case kFlags_none:
@@ -463,7 +416,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
switch (fc) {
case kEqual:
@@ -518,35 +470,28 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
UNREACHABLE();
}
-
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstruction& printable) {
- const Instruction& instr = *printable.instr_;
- PrintableInstructionOperand printable_op = {printable.register_configuration_,
- InstructionOperand()};
+std::ostream& operator<<(std::ostream& os, const Instruction& instr) {
os << "gap ";
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
os << "(";
if (instr.parallel_moves()[i] != nullptr) {
- PrintableParallelMove ppm = {printable.register_configuration_,
- instr.parallel_moves()[i]};
- os << ppm;
+ os << *instr.parallel_moves()[i];
}
os << ") ";
}
os << "\n ";
- if (instr.OutputCount() > 1) os << "(";
- for (size_t i = 0; i < instr.OutputCount(); i++) {
- if (i > 0) os << ", ";
- printable_op.op_ = *instr.OutputAt(i);
- os << printable_op;
+ if (instr.OutputCount() == 1) {
+ os << *instr.OutputAt(0) << " = ";
+ } else if (instr.OutputCount() > 1) {
+ os << "(" << *instr.OutputAt(0);
+ for (size_t i = 1; i < instr.OutputCount(); i++) {
+ os << ", " << *instr.OutputAt(i);
+ }
+ os << ") = ";
}
- if (instr.OutputCount() > 1) os << ") = ";
- if (instr.OutputCount() == 1) os << " = ";
-
os << ArchOpcodeField::decode(instr.opcode());
AddressingMode am = AddressingModeField::decode(instr.opcode());
if (am != kMode_None) {
@@ -556,16 +501,12 @@ std::ostream& operator<<(std::ostream& os,
if (fm != kFlags_none) {
os << " && " << fm << " if " << FlagsConditionField::decode(instr.opcode());
}
- if (instr.InputCount() > 0) {
- for (size_t i = 0; i < instr.InputCount(); i++) {
- printable_op.op_ = *instr.InputAt(i);
- os << " " << printable_op;
- }
+ for (size_t i = 0; i < instr.InputCount(); i++) {
+ os << " " << *instr.InputAt(i);
}
return os;
}
-
Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
Constant::Constant(RelocatablePtrConstantInfo info) {
@@ -583,13 +524,13 @@ Constant::Constant(RelocatablePtrConstantInfo info) {
Handle<HeapObject> Constant::ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
Handle<HeapObject> value(
- bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
+ reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
return value;
}
Handle<Code> Constant::ToCode() const {
DCHECK_EQ(kHeapObject, type());
- Handle<Code> value(bit_cast<Code**>(static_cast<intptr_t>(value_)));
+ Handle<Code> value(reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
return value;
}
@@ -623,7 +564,6 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
UNREACHABLE();
}
-
PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
size_t input_count)
: virtual_register_(virtual_register),
@@ -631,7 +571,6 @@ PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
operands_(input_count, InstructionOperand::kInvalidVirtualRegister,
zone) {}
-
void PhiInstruction::SetInput(size_t offset, int virtual_register) {
DCHECK_EQ(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
operands_[offset] = virtual_register;
@@ -648,17 +587,12 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
: successors_(zone),
predecessors_(zone),
phis_(zone),
- ao_number_(rpo_number),
+ ao_number_(RpoNumber::Invalid()),
rpo_number_(rpo_number),
loop_header_(loop_header),
loop_end_(loop_end),
- code_start_(-1),
- code_end_(-1),
deferred_(deferred),
- handler_(handler),
- needs_frame_(false),
- must_construct_frame_(false),
- must_deconstruct_frame_(false) {}
+ handler_(handler) {}
size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
size_t j = 0;
@@ -669,19 +603,16 @@ size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
return j;
}
-
static RpoNumber GetRpo(const BasicBlock* block) {
if (block == nullptr) return RpoNumber::Invalid();
return RpoNumber::FromInt(block->rpo_number());
}
-
static RpoNumber GetLoopEndRpo(const BasicBlock* block) {
if (!block->IsLoopHeader()) return RpoNumber::Invalid();
return RpoNumber::FromInt(block->loop_end()->rpo_number());
}
-
static InstructionBlock* InstructionBlockFor(Zone* zone,
const BasicBlock* block) {
bool is_handler =
@@ -698,13 +629,16 @@ static InstructionBlock* InstructionBlockFor(Zone* zone,
for (BasicBlock* predecessor : block->predecessors()) {
instr_block->predecessors().push_back(GetRpo(predecessor));
}
+ if (block->PredecessorCount() == 1 &&
+ block->predecessors()[0]->control() == BasicBlock::Control::kSwitch) {
+ instr_block->set_switch_target(true);
+ }
return instr_block;
}
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionBlock& printable_block) {
const InstructionBlock* block = printable_block.block_;
- const RegisterConfiguration* config = printable_block.register_configuration_;
const InstructionSequence* code = printable_block.code_;
os << "B" << block->rpo_number();
@@ -727,20 +661,17 @@ std::ostream& operator<<(std::ostream& os,
os << std::endl;
for (const PhiInstruction* phi : block->phis()) {
- PrintableInstructionOperand printable_op = {config, phi->output()};
- os << " phi: " << printable_op << " =";
+ os << " phi: " << phi->output() << " =";
for (int input : phi->operands()) {
os << " v" << input;
}
os << std::endl;
}
- PrintableInstruction printable_instr;
- printable_instr.register_configuration_ = config;
for (int j = block->first_instruction_index();
j <= block->last_instruction_index(); j++) {
- printable_instr.instr_ = code->InstructionAt(j);
- os << " " << std::setw(5) << j << ": " << printable_instr << std::endl;
+ os << " " << std::setw(5) << j << ": " << *code->InstructionAt(j)
+ << std::endl;
}
os << " successors:";
@@ -763,7 +694,6 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
DCHECK(GetRpo(*it).ToSize() == rpo_number);
(*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
}
- ComputeAssemblyOrder(blocks);
return blocks;
}
@@ -822,18 +752,62 @@ void InstructionSequence::ValidateSSA() const {
}
}
-void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
+void InstructionSequence::ComputeAssemblyOrder() {
int ao = 0;
- for (InstructionBlock* const block : *blocks) {
- if (!block->IsDeferred()) {
- block->set_ao_number(RpoNumber::FromInt(ao++));
+ RpoNumber invalid = RpoNumber::Invalid();
+
+ ao_blocks_ = zone()->NewArray<InstructionBlocks>(1);
+ new (ao_blocks_) InstructionBlocks(zone());
+ ao_blocks_->reserve(instruction_blocks_->size());
+
+ // Place non-deferred blocks.
+ for (InstructionBlock* const block : *instruction_blocks_) {
+ DCHECK_NOT_NULL(block);
+ if (block->IsDeferred()) continue; // skip deferred blocks.
+ if (block->ao_number() != invalid) continue; // loop rotated.
+ if (block->IsLoopHeader()) {
+ bool header_align = true;
+ if (FLAG_turbo_loop_rotation) {
+ // Perform loop rotation for non-deferred loops.
+ InstructionBlock* loop_end =
+ instruction_blocks_->at(block->loop_end().ToSize() - 1);
+ if (loop_end->SuccessorCount() == 1 && /* ends with goto */
+ loop_end != block /* not a degenerate infinite loop */) {
+ // If the last block has an unconditional jump back to the header,
+ // then move it to be in front of the header in the assembly order.
+ DCHECK_EQ(block->rpo_number(), loop_end->successors()[0]);
+ loop_end->set_ao_number(RpoNumber::FromInt(ao++));
+ ao_blocks_->push_back(loop_end);
+ // This block will be the new machine-level loop header, so align
+ // this block instead of the loop header block.
+ loop_end->set_alignment(true);
+ header_align = false;
+ }
+ }
+ block->set_alignment(header_align);
+ }
+ if (block->loop_header().IsValid() && block->IsSwitchTarget()) {
+ block->set_alignment(true);
}
+ block->set_ao_number(RpoNumber::FromInt(ao++));
+ ao_blocks_->push_back(block);
}
- for (InstructionBlock* const block : *blocks) {
- if (block->IsDeferred()) {
+ // Add all leftover (deferred) blocks.
+ for (InstructionBlock* const block : *instruction_blocks_) {
+ if (block->ao_number() == invalid) {
block->set_ao_number(RpoNumber::FromInt(ao++));
+ ao_blocks_->push_back(block);
}
}
+ DCHECK_EQ(instruction_blocks_->size(), ao);
+}
+
+void InstructionSequence::RecomputeAssemblyOrderForTesting() {
+ RpoNumber invalid = RpoNumber::Invalid();
+ for (InstructionBlock* block : *instruction_blocks_) {
+ block->set_ao_number(invalid);
+ }
+ ComputeAssemblyOrder();
}
InstructionSequence::InstructionSequence(Isolate* isolate,
@@ -842,6 +816,7 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
: isolate_(isolate),
zone_(instruction_zone),
instruction_blocks_(instruction_blocks),
+ ao_blocks_(nullptr),
source_positions_(zone()),
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
@@ -852,7 +827,9 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
representations_(zone()),
representation_mask_(0),
deoptimization_entries_(zone()),
- current_block_(nullptr) {}
+ current_block_(nullptr) {
+ ComputeAssemblyOrder();
+}
int InstructionSequence::NextVirtualRegister() {
int virtual_register = next_virtual_register_++;
@@ -860,13 +837,11 @@ int InstructionSequence::NextVirtualRegister() {
return virtual_register;
}
-
Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
const InstructionBlock* block = InstructionBlockAt(rpo);
return InstructionAt(block->code_start());
}
-
void InstructionSequence::StartBlock(RpoNumber rpo) {
DCHECK_NULL(current_block_);
current_block_ = InstructionBlockAt(rpo);
@@ -874,7 +849,6 @@ void InstructionSequence::StartBlock(RpoNumber rpo) {
current_block_->set_code_start(code_start);
}
-
void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
DCHECK_EQ(current_block_->rpo_number(), rpo);
@@ -884,7 +858,6 @@ void InstructionSequence::EndBlock(RpoNumber rpo) {
current_block_ = nullptr;
}
-
int InstructionSequence::AddInstruction(Instruction* instr) {
DCHECK_NOT_NULL(current_block_);
int index = static_cast<int>(instructions_.size());
@@ -900,13 +873,11 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
return index;
}
-
InstructionBlock* InstructionSequence::GetInstructionBlock(
int instruction_index) const {
return instructions()[instruction_index]->block();
}
-
static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kBit:
@@ -929,7 +900,6 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
UNREACHABLE();
}
-
MachineRepresentation InstructionSequence::GetRepresentation(
int virtual_register) const {
DCHECK_LE(0, virtual_register);
@@ -940,7 +910,6 @@ MachineRepresentation InstructionSequence::GetRepresentation(
return representations_[virtual_register];
}
-
void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int virtual_register) {
DCHECK_LE(0, virtual_register);
@@ -969,7 +938,6 @@ DeoptimizationEntry const& InstructionSequence::GetDeoptimizationEntry(
return deoptimization_entries_[state_id];
}
-
RpoNumber InstructionSequence::InputRpo(Instruction* instr, size_t index) {
InstructionOperand* operand = instr->InputAt(index);
Constant constant =
@@ -979,7 +947,6 @@ RpoNumber InstructionSequence::InputRpo(Instruction* instr, size_t index) {
return constant.ToRpoNumber();
}
-
bool InstructionSequence::GetSourcePosition(const Instruction* instr,
SourcePosition* result) const {
auto it = source_positions_.find(instr);
@@ -988,32 +955,20 @@ bool InstructionSequence::GetSourcePosition(const Instruction* instr,
return true;
}
-
void InstructionSequence::SetSourcePosition(const Instruction* instr,
SourcePosition value) {
source_positions_.insert(std::make_pair(instr, value));
}
-void InstructionSequence::Print(const RegisterConfiguration* config) const {
- PrintableInstructionSequence wrapper;
- wrapper.register_configuration_ = config;
- wrapper.sequence_ = this;
- StdoutStream{} << wrapper << std::endl;
+void InstructionSequence::Print() const {
+ StdoutStream{} << *this << std::endl;
}
-void InstructionSequence::Print() const { Print(GetRegConfig()); }
-
-void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
- int block_id) const {
+void InstructionSequence::PrintBlock(int block_id) const {
RpoNumber rpo = RpoNumber::FromInt(block_id);
const InstructionBlock* block = InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
- PrintableInstructionBlock printable_block = {config, block, this};
- StdoutStream{} << printable_block << std::endl;
-}
-
-void InstructionSequence::PrintBlock(int block_id) const {
- PrintBlock(GetRegConfig(), block_id);
+ StdoutStream{} << PrintableInstructionBlock{block, this} << std::endl;
}
const RegisterConfiguration*
@@ -1052,7 +1007,6 @@ size_t FrameStateDescriptor::GetSize() const {
(HasContext() ? 1 : 0);
}
-
size_t FrameStateDescriptor::GetTotalSize() const {
size_t total_size = 0;
for (const FrameStateDescriptor* iter = this; iter != nullptr;
@@ -1062,7 +1016,6 @@ size_t FrameStateDescriptor::GetTotalSize() const {
return total_size;
}
-
size_t FrameStateDescriptor::GetFrameCount() const {
size_t count = 0;
for (const FrameStateDescriptor* iter = this; iter != nullptr;
@@ -1072,7 +1025,6 @@ size_t FrameStateDescriptor::GetFrameCount() const {
return count;
}
-
size_t FrameStateDescriptor::GetJSFrameCount() const {
size_t count = 0;
for (const FrameStateDescriptor* iter = this; iter != nullptr;
@@ -1084,15 +1036,11 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
return count;
}
-
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
}
-
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionSequence& printable) {
- const InstructionSequence& code = *printable.sequence_;
+std::ostream& operator<<(std::ostream& os, const InstructionSequence& code) {
for (size_t i = 0; i < code.immediates_.size(); ++i) {
Constant constant = code.immediates_[i];
os << "IMM#" << i << ": " << constant << "\n";
@@ -1102,11 +1050,9 @@ std::ostream& operator<<(std::ostream& os,
it != code.constants_.end(); ++i, ++it) {
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
}
- PrintableInstructionBlock printable_block = {
- printable.register_configuration_, nullptr, printable.sequence_};
for (int i = 0; i < code.InstructionBlockCount(); i++) {
- printable_block.block_ = code.InstructionBlockAt(RpoNumber::FromInt(i));
- os << printable_block;
+ auto* block = code.InstructionBlockAt(RpoNumber::FromInt(i));
+ os << PrintableInstructionBlock{block, &code};
}
return os;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 39d083c2de..760f9ffd88 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INSTRUCTION_H_
-#define V8_COMPILER_INSTRUCTION_H_
+#ifndef V8_COMPILER_BACKEND_INSTRUCTION_H_
+#define V8_COMPILER_BACKEND_INSTRUCTION_H_
#include <deque>
#include <iosfwd>
@@ -11,19 +11,22 @@
#include <set>
#include "src/base/compiler-specific.h"
+#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
-#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
#include "src/double.h"
+#include "src/external-reference.h"
#include "src/globals.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
+#include "src/register-arch.h"
#include "src/source-position.h"
#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
+
+class RegisterConfiguration;
+
namespace compiler {
class Schedule;
@@ -119,8 +122,7 @@ class V8_EXPORT_PRIVATE InstructionOperand {
bool InterferesWith(const InstructionOperand& other) const;
- // APIs to aid debugging. For general-stream APIs, use operator<<
- void Print(const RegisterConfiguration* config) const;
+ // APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
protected:
@@ -133,19 +135,9 @@ class V8_EXPORT_PRIVATE InstructionOperand {
uint64_t value_;
};
-
typedef ZoneVector<InstructionOperand> InstructionOperandVector;
-
-struct PrintableInstructionOperand {
- const RegisterConfiguration* register_configuration_;
- InstructionOperand op_;
-};
-
-
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionOperand& op);
-
+std::ostream& operator<<(std::ostream&, const InstructionOperand&);
#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
\
@@ -282,9 +274,7 @@ class UnallocatedOperand final : public InstructionOperand {
}
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
- BasicPolicy basic_policy() const {
- return BasicPolicyField::decode(value_);
- }
+ BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
ExtendedPolicy extended_policy() const {
@@ -363,7 +353,6 @@ class UnallocatedOperand final : public InstructionOperand {
}
};
-
class ConstantOperand : public InstructionOperand {
public:
explicit ConstantOperand(int virtual_register)
@@ -386,7 +375,6 @@ class ConstantOperand : public InstructionOperand {
class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
};
-
class ImmediateOperand : public InstructionOperand {
public:
enum ImmediateType { INLINE, INDEXED };
@@ -420,7 +408,6 @@ class ImmediateOperand : public InstructionOperand {
class ValueField : public BitField64<int32_t, 32, 32> {};
};
-
class LocationOperand : public InstructionOperand {
public:
enum LocationKind { REGISTER, STACK_SLOT };
@@ -534,7 +521,6 @@ class V8_EXPORT_PRIVATE ExplicitOperand
INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
};
-
class AllocatedOperand : public LocationOperand {
public:
AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
@@ -548,7 +534,6 @@ class AllocatedOperand : public LocationOperand {
INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
};
-
#undef INSTRUCTION_OPERAND_CASTS
bool InstructionOperand::IsAnyLocationOperand() const {
@@ -571,7 +556,6 @@ bool InstructionOperand::IsAnyRegister() const {
LocationOperand::REGISTER;
}
-
bool InstructionOperand::IsRegister() const {
return IsAnyRegister() &&
!IsFloatingPoint(LocationOperand::cast(this)->representation());
@@ -583,15 +567,13 @@ bool InstructionOperand::IsFPRegister() const {
}
bool InstructionOperand::IsFloatRegister() const {
- return IsAnyRegister() &&
- LocationOperand::cast(this)->representation() ==
- MachineRepresentation::kFloat32;
+ return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat32;
}
bool InstructionOperand::IsDoubleRegister() const {
- return IsAnyRegister() &&
- LocationOperand::cast(this)->representation() ==
- MachineRepresentation::kFloat64;
+ return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat64;
}
bool InstructionOperand::IsSimd128Register() const {
@@ -707,8 +689,7 @@ class V8_EXPORT_PRIVATE MoveOperands final
return source_.IsInvalid();
}
- // APIs to aid debugging. For general-stream APIs, use operator<<
- void Print(const RegisterConfiguration* config) const;
+ // APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
private:
@@ -718,22 +699,13 @@ class V8_EXPORT_PRIVATE MoveOperands final
DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
-
-struct PrintableMoveOperands {
- const RegisterConfiguration* register_configuration_;
- const MoveOperands* move_operands_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
+std::ostream& operator<<(std::ostream&, const MoveOperands&);
class V8_EXPORT_PRIVATE ParallelMove final
- : public NON_EXPORTED_BASE(ZoneVector<MoveOperands *>),
+ : public NON_EXPORTED_BASE(ZoneVector<MoveOperands*>),
public NON_EXPORTED_BASE(ZoneObject) {
public:
- explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
- reserve(4);
- }
+ explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {}
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
@@ -744,7 +716,9 @@ class V8_EXPORT_PRIVATE ParallelMove final
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to,
Zone* operand_allocation_zone) {
+ if (from.EqualsCanonicalized(to)) return nullptr;
MoveOperands* move = new (operand_allocation_zone) MoveOperands(from, to);
+ if (empty()) reserve(4);
push_back(move);
return move;
}
@@ -761,15 +735,7 @@ class V8_EXPORT_PRIVATE ParallelMove final
DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
-
-struct PrintableParallelMove {
- const RegisterConfiguration* register_configuration_;
- const ParallelMove* parallel_move_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const PrintableParallelMove& pm);
-
+std::ostream& operator<<(std::ostream&, const ParallelMove&);
class ReferenceMap final : public ZoneObject {
public:
@@ -789,13 +755,13 @@ class ReferenceMap final : public ZoneObject {
void RecordReference(const AllocatedOperand& op);
private:
- friend std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
+ friend std::ostream& operator<<(std::ostream&, const ReferenceMap&);
ZoneVector<InstructionOperand> reference_operands_;
int instruction_position_;
};
-std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
+std::ostream& operator<<(std::ostream&, const ReferenceMap&);
class InstructionBlock;
@@ -954,8 +920,7 @@ class V8_EXPORT_PRIVATE Instruction final {
block_ = block;
}
- // APIs to aid debugging. For general-stream APIs, use operator<<
- void Print(const RegisterConfiguration* config) const;
+ // APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
typedef BitField<size_t, 0, 8> OutputCountField;
@@ -986,13 +951,7 @@ class V8_EXPORT_PRIVATE Instruction final {
DISALLOW_COPY_AND_ASSIGN(Instruction);
};
-
-struct PrintableInstruction {
- const RegisterConfiguration* register_configuration_;
- const Instruction* instr_;
-};
-std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
-
+std::ostream& operator<<(std::ostream&, const Instruction&);
class RpoNumber final {
public:
@@ -1027,7 +986,6 @@ class RpoNumber final {
int32_t index_;
};
-
std::ostream& operator<<(std::ostream&, const RpoNumber&);
class V8_EXPORT_PRIVATE Constant final {
@@ -1111,9 +1069,7 @@ class V8_EXPORT_PRIVATE Constant final {
int64_t value_;
};
-
-std::ostream& operator<<(std::ostream& os, const Constant& constant);
-
+std::ostream& operator<<(std::ostream&, const Constant&);
// Forward declarations.
class FrameStateDescriptor;
@@ -1372,7 +1328,6 @@ class V8_EXPORT_PRIVATE PhiInstruction final
IntVector operands_;
};
-
// Analogue of BasicBlock for Instructions instead of Nodes.
class V8_EXPORT_PRIVATE InstructionBlock final
: public NON_EXPORTED_BASE(ZoneObject) {
@@ -1411,6 +1366,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final
return loop_end_;
}
inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
+ inline bool IsSwitchTarget() const { return switch_target_; }
+ inline bool ShouldAlign() const { return alignment_; }
typedef ZoneVector<RpoNumber> Predecessors;
Predecessors& predecessors() { return predecessors_; }
@@ -1430,6 +1387,10 @@ class V8_EXPORT_PRIVATE InstructionBlock final
void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
+ void set_alignment(bool val) { alignment_ = val; }
+
+ void set_switch_target(bool val) { switch_target_ = val; }
+
bool needs_frame() const { return needs_frame_; }
void mark_needs_frame() { needs_frame_ = true; }
@@ -1448,24 +1409,24 @@ class V8_EXPORT_PRIVATE InstructionBlock final
const RpoNumber loop_header_;
const RpoNumber loop_end_;
int32_t code_start_; // start index of arch-specific code.
- int32_t code_end_; // end index of arch-specific code.
- const bool deferred_; // Block contains deferred code.
+ int32_t code_end_ = -1; // end index of arch-specific code.
+ const bool deferred_ = -1; // Block contains deferred code.
const bool handler_; // Block is a handler entry point.
- bool needs_frame_;
- bool must_construct_frame_;
- bool must_deconstruct_frame_;
+ bool switch_target_ = false;
+ bool alignment_ = false; // insert alignment before this block
+ bool needs_frame_ = false;
+ bool must_construct_frame_ = false;
+ bool must_deconstruct_frame_ = false;
};
class InstructionSequence;
struct PrintableInstructionBlock {
- const RegisterConfiguration* register_configuration_;
const InstructionBlock* block_;
const InstructionSequence* code_;
};
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionBlock& printable_block);
+std::ostream& operator<<(std::ostream&, const PrintableInstructionBlock&);
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
@@ -1476,11 +1437,6 @@ typedef ZoneDeque<Instruction*> InstructionDeque;
typedef ZoneDeque<ReferenceMap*> ReferenceMapDeque;
typedef ZoneVector<InstructionBlock*> InstructionBlocks;
-
-// Forward declarations.
-struct PrintableInstructionSequence;
-
-
// Represents architecture-specific generated code before, during, and after
// register allocation.
class V8_EXPORT_PRIVATE InstructionSequence final
@@ -1488,9 +1444,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
public:
static InstructionBlocks* InstructionBlocksFor(Zone* zone,
const Schedule* schedule);
- // Puts the deferred blocks last.
- static void ComputeAssemblyOrder(InstructionBlocks* blocks);
-
InstructionSequence(Isolate* isolate, Zone* zone,
InstructionBlocks* instruction_blocks);
@@ -1501,6 +1454,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return *instruction_blocks_;
}
+ const InstructionBlocks& ao_blocks() const { return *ao_blocks_; }
+
int InstructionBlockCount() const {
return static_cast<int>(instruction_blocks_->size());
}
@@ -1629,11 +1584,9 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return false;
}
- // APIs to aid debugging. For general-stream APIs, use operator<<
- void Print(const RegisterConfiguration* config) const;
+ // APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
- void PrintBlock(const RegisterConfiguration* config, int block_id) const;
void PrintBlock(int block_id) const;
void ValidateEdgeSplitForm() const;
@@ -1645,18 +1598,24 @@ class V8_EXPORT_PRIVATE InstructionSequence final
const RegisterConfiguration* regConfig);
static void ClearRegisterConfigurationForTesting();
+ void RecomputeAssemblyOrderForTesting();
+
private:
- friend V8_EXPORT_PRIVATE std::ostream& operator<<(
- std::ostream& os, const PrintableInstructionSequence& code);
+ friend V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ const InstructionSequence&);
typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
static const RegisterConfiguration* RegisterConfigurationForTesting();
static const RegisterConfiguration* registerConfigurationForTesting_;
+ // Puts the deferred blocks last and may rotate loops.
+ void ComputeAssemblyOrder();
+
Isolate* isolate_;
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
+ InstructionBlocks* ao_blocks_;
SourcePositionMap source_positions_;
ConstantMap constants_;
Immediates immediates_;
@@ -1673,17 +1632,11 @@ class V8_EXPORT_PRIVATE InstructionSequence final
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
};
-
-struct PrintableInstructionSequence {
- const RegisterConfiguration* register_configuration_;
- const InstructionSequence* sequence_;
-};
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(
- std::ostream& os, const PrintableInstructionSequence& code);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ const InstructionSequence&);
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INSTRUCTION_H_
+#endif // V8_COMPILER_BACKEND_INSTRUCTION_H_
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index d10f06e4f4..dfb917a58c 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/jump-threading.h"
-#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/backend/jump-threading.h"
+#include "src/compiler/backend/code-generator-impl.h"
namespace v8 {
namespace internal {
@@ -197,19 +197,12 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
}
}
- // Recompute assembly order numbers.
+ // Renumber the blocks so that IsNextInAssemblyOrder() will return true,
+ // even if there are skipped blocks in-between.
int ao = 0;
- for (auto const block : code->instruction_blocks()) {
- if (!block->IsDeferred()) {
- block->set_ao_number(RpoNumber::FromInt(ao));
- if (!skip[block->rpo_number().ToInt()]) ao++;
- }
- }
- for (auto const block : code->instruction_blocks()) {
- if (block->IsDeferred()) {
- block->set_ao_number(RpoNumber::FromInt(ao));
- if (!skip[block->rpo_number().ToInt()]) ao++;
- }
+ for (auto const block : code->ao_blocks()) {
+ block->set_ao_number(RpoNumber::FromInt(ao));
+ if (!skip[block->rpo_number().ToInt()]) ao++;
}
}
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index 4d57f281c5..45d601cc34 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_JUMP_THREADING_H_
-#define V8_COMPILER_JUMP_THREADING_H_
+#ifndef V8_COMPILER_BACKEND_JUMP_THREADING_H_
+#define V8_COMPILER_BACKEND_JUMP_THREADING_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
@@ -31,4 +31,4 @@ class JumpThreading {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_JUMP_THREADING_H_
+#endif // V8_COMPILER_BACKEND_JUMP_THREADING_H_
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
index 67d1c77a83..f0173e6ed7 100644
--- a/deps/v8/src/compiler/live-range-separator.cc
+++ b/deps/v8/src/compiler/backend/live-range-separator.cc
@@ -2,24 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/live-range-separator.h"
-#include "src/compiler/register-allocator.h"
+#include "src/compiler/backend/live-range-separator.h"
+#include "src/compiler/backend/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
-
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
-
namespace {
-
-void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
+void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
LifetimePosition first_cut, LifetimePosition last_cut) {
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
@@ -45,22 +42,23 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
data->CreateSpillRangeForLiveRange(range);
}
if (range->splinter() == nullptr) {
- TopLevelLiveRange *splinter =
+ TopLevelLiveRange* splinter =
data->NextLiveRange(range->representation());
DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
data->live_ranges()[splinter->vreg()] = splinter;
range->SetSplinter(splinter);
}
- Zone *zone = data->allocation_zone();
- TRACE("creating splinter for range %d between %d and %d\n", range->vreg(),
- start.ToInstructionIndex(), end.ToInstructionIndex());
+ Zone* zone = data->allocation_zone();
+ TRACE("creating splinter %d for range %d between %d and %d\n",
+ range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(),
+ end.ToInstructionIndex());
range->Splinter(start, end, zone);
}
}
-void SetSlotUse(TopLevelLiveRange *range) {
+void SetSlotUse(TopLevelLiveRange* range) {
range->set_has_slot_use(false);
- for (const UsePosition *pos = range->first_pos();
+ for (const UsePosition* pos = range->first_pos();
!range->has_slot_use() && pos != nullptr; pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) {
range->set_has_slot_use(true);
@@ -68,29 +66,38 @@ void SetSlotUse(TopLevelLiveRange *range) {
}
}
-void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
- const InstructionSequence *code = data->code();
- UseInterval *interval = range->first_interval();
+void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
+ const InstructionSequence* code = data->code();
+ UseInterval* interval = range->first_interval();
LifetimePosition first_cut = LifetimePosition::Invalid();
LifetimePosition last_cut = LifetimePosition::Invalid();
while (interval != nullptr) {
- UseInterval *next_interval = interval->next();
- const InstructionBlock *first_block =
+ // We have to cache these here, as splintering might destroy the original
+ // interval below.
+ UseInterval* next_interval = interval->next();
+ LifetimePosition interval_end = interval->end();
+ const InstructionBlock* first_block =
code->GetInstructionBlock(interval->FirstGapIndex());
- const InstructionBlock *last_block =
+ const InstructionBlock* last_block =
code->GetInstructionBlock(interval->LastGapIndex());
int first_block_nr = first_block->rpo_number().ToInt();
int last_block_nr = last_block->rpo_number().ToInt();
for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
- const InstructionBlock *current_block =
+ const InstructionBlock* current_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
if (current_block->IsDeferred()) {
if (!first_cut.IsValid()) {
first_cut = LifetimePosition::GapFromInstructionIndex(
current_block->first_instruction_index());
}
+ // We splinter until the last gap in the block. I assume this is done to
+ // leave a little range to be allocated by normal register allocation
+ // and then use that range to connect when splinters are merged back.
+ // This might be done as control flow resolution does not insert moves
+ // if two consecutive blocks in rpo order are also consecutive in
+ // control flow.
last_cut = LifetimePosition::GapFromInstructionIndex(
current_block->last_instruction_index());
} else {
@@ -101,13 +108,20 @@ void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
}
}
}
+ // If we reach the end of an interval with a first_cut and last_cut set, it
+ // means that we can splinter to the end of the interval, as the value dies
+ // in this control flow branch or is not live in the next block. In the
+ // former case, we won't need to reload the value, so we can splinter to the
+ // end of its lifetime. In the latter case, control flow resolution will
+ // have to connect blocks anyway, so we can also splinter to the end of the
+ // block, too.
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, interval_end);
+ first_cut = LifetimePosition::Invalid();
+ last_cut = LifetimePosition::Invalid();
+ }
interval = next_interval;
}
- // When the range ends in deferred blocks, first_cut will be valid here.
- // Splinter from there to the last instruction that was in a deferred block.
- if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, last_cut);
- }
// Redo has_slot_use
if (range->has_slot_use() && range->splinter() != nullptr) {
@@ -118,11 +132,10 @@ void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
} // namespace
-
void LiveRangeSeparator::Splinter() {
size_t virt_reg_count = data()->live_ranges().size();
for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
- TopLevelLiveRange *range = data()->live_ranges()[vreg];
+ TopLevelLiveRange* range = data()->live_ranges()[vreg];
if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
continue;
}
@@ -133,16 +146,15 @@ void LiveRangeSeparator::Splinter() {
}
}
-
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
- const InstructionSequence *code = data()->code();
- for (TopLevelLiveRange *top : data()->live_ranges()) {
+ const InstructionSequence* code = data()->code();
+ for (TopLevelLiveRange* top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
continue;
}
- LiveRange *child = top;
+ LiveRange* child = top;
for (; child != nullptr; child = child->next()) {
if (child->spilled() ||
child->NextSlotPosition(child->Start()) != nullptr) {
@@ -156,17 +168,16 @@ void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
}
}
-
void LiveRangeMerger::Merge() {
MarkRangesSpilledInDeferredBlocks();
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
- TopLevelLiveRange *range = data()->live_ranges()[i];
+ TopLevelLiveRange* range = data()->live_ranges()[i];
if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
continue;
}
- TopLevelLiveRange *splinter_parent = range->splintered_from();
+ TopLevelLiveRange* splinter_parent = range->splintered_from();
int to_remove = range->vreg();
splinter_parent->Merge(range, data()->allocation_zone());
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/backend/live-range-separator.h
index 0d48f25e5d..dc7b141a9f 100644
--- a/deps/v8/src/compiler/live-range-separator.h
+++ b/deps/v8/src/compiler/backend/live-range-separator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
-#define V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
+#ifndef V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
+#define V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
#include "src/zone/zone.h"
namespace v8 {
@@ -15,7 +15,6 @@ namespace compiler {
class RegisterAllocationData;
-
// A register allocation pair of transformations: splinter and merge live ranges
class LiveRangeSeparator final : public ZoneObject {
public:
@@ -34,7 +33,6 @@ class LiveRangeSeparator final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
};
-
class LiveRangeMerger final : public ZoneObject {
public:
LiveRangeMerger(RegisterAllocationData* data, Zone* zone)
@@ -57,8 +55,7 @@ class LiveRangeMerger final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
};
-
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
+#endif // V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
diff --git a/deps/v8/src/compiler/backend/mips/OWNERS b/deps/v8/src/compiler/backend/mips/OWNERS
new file mode 100644
index 0000000000..b455d9ef29
--- /dev/null
+++ b/deps/v8/src/compiler/backend/mips/OWNERS
@@ -0,0 +1,3 @@
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index e44ffee34b..af726bd065 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -4,14 +4,15 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/code-generator.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h"
-#include "src/mips/macro-assembler-mips.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -28,7 +29,6 @@ namespace compiler {
PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
__LINE__)
-
// Adds Mips-specific methods to convert InstructionOperands.
class MipsOperandConverter final : public InstructionOperandConverter {
public:
@@ -131,20 +131,17 @@ class MipsOperandConverter final : public InstructionOperandConverter {
}
};
-
static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
-
namespace {
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
index_(index),
@@ -152,31 +149,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPush(regs | ra.bit());
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPop(regs | ra.bit());
- }
-
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -194,8 +170,17 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
__ Pop(ra);
}
@@ -208,6 +193,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
bool must_save_lr_;
Zone* zone_;
};
@@ -264,7 +250,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
UNREACHABLE();
}
-
Condition FlagsConditionToConditionTst(FlagsCondition condition) {
switch (condition) {
case kNotEqual:
@@ -277,7 +262,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
FlagsCondition condition) {
switch (condition) {
@@ -328,7 +312,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} // namespace
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
__ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
@@ -356,39 +339,63 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr) \
+#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
do { \
if (IsMipsArchVariant(kMips32r6)) { \
Label binop; \
+ Register oldval_low = \
+ instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
+ Register oldval_high = \
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
- __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
- __ bin_instr(i.TempRegister(0), i.TempRegister(1), i.TempRegister(0), \
- i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
- __ scwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
+ __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
+ __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
+ oldval_high, i.InputRegister(2), i.InputRegister(3)); \
+ __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
+ __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
__ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
__ sync(); \
} else { \
- UNREACHABLE(); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PrepareCallCFunction(3, 0, kScratchReg); \
+ __ CallCFunction(ExternalReference::external(), 3, 0); \
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
} \
} while (0)
-#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr) \
- do { \
- if (IsMipsArchVariant(kMips32r6)) { \
- Label binop; \
- __ sync(); \
- __ bind(&binop); \
- __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
- __ bin_instr(i.TempRegister(0), i.TempRegister(1), i.TempRegister(0), \
- i.TempRegister(1), i.InputRegister(0), i.InputRegister(1), \
- i.TempRegister(2), i.TempRegister(3)); \
- __ scwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(2)); \
- __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
- } else { \
- UNREACHABLE(); \
- } \
+#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
+ do { \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ Label binop; \
+ Register oldval_low = \
+ instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
+ Register oldval_high = \
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
+ __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
+ oldval_high, i.InputRegister(2), i.InputRegister(3), \
+ kScratchReg, kScratchReg2); \
+ __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
+ __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } else { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PrepareCallCFunction(3, 0, kScratchReg); \
+ __ CallCFunction(ExternalReference::external(), 3, 0); \
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
+ } \
} while (0)
#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
@@ -555,10 +562,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Subu(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Addu(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -599,19 +606,15 @@ void CodeGenerator::BailoutIfDeoptimized() {
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerPointer - 1))
+ // poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
@@ -621,7 +624,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerPointer - 1);
+ kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
@@ -653,6 +656,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -739,9 +750,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -752,7 +763,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -785,7 +796,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -861,8 +872,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
+ auto ool = new (zone())
+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+ mode, DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
__ sw(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
@@ -881,19 +893,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
alignment == 16);
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
- __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ And(kScratchReg, i.OutputRegister(),
+ Operand(kSystemPointerSize - 1));
__ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
- if (alignment == 2 * kPointerSize) {
+ if (alignment == 2 * kSystemPointerSize) {
Label done;
__ Addu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
- __ Addu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ Addu(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize);
__ bind(&done);
- } else if (alignment > 2 * kPointerSize) {
+ } else if (alignment > 2 * kSystemPointerSize) {
Label done;
__ Addu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
@@ -1602,12 +1615,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case MachineRepresentation::kFloat32:
__ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
__ Subu(sp, sp, Operand(kFloatSize));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize /
+ kSystemPointerSize);
break;
case MachineRepresentation::kFloat64:
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize /
+ kSystemPointerSize);
break;
default: {
UNREACHABLE();
@@ -1639,7 +1654,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
- frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
break;
}
case kMipsStoreToStackSlot: {
@@ -1741,57 +1757,120 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ATOMIC_BINOP_CASE
case kMipsWord32AtomicPairLoad: {
if (IsMipsArchVariant(kMips32r6)) {
- Register second_output =
- instr->OutputCount() == 2 ? i.OutputRegister(1) : i.TempRegister(0);
- __ llwp(i.OutputRegister(0), second_output, i.InputRegister(0));
- __ sync();
+ if (instr->OutputCount() > 0) {
+ Register second_output = instr->OutputCount() == 2
+ ? i.OutputRegister(1)
+ : i.TempRegister(1);
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ llx(second_output, MemOperand(a0, 4));
+ __ ll(i.OutputRegister(0), MemOperand(a0, 0));
+ __ sync();
+ }
} else {
- UNREACHABLE();
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PrepareCallCFunction(1, 0, kScratchReg);
+ __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
}
break;
}
case kMipsWord32AtomicPairStore: {
if (IsMipsArchVariant(kMips32r6)) {
Label store;
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
__ sync();
__ bind(&store);
- __ llwp(i.TempRegister(0), i.TempRegister(1), i.InputRegister(0));
- __ Move(i.TempRegister(0), i.InputRegister(2));
- __ scwp(i.InputRegister(1), i.TempRegister(0), i.InputRegister(0));
- __ BranchShort(&store, eq, i.TempRegister(0), Operand(zero_reg));
+ __ llx(i.TempRegister(2), MemOperand(a0, 4));
+ __ ll(i.TempRegister(1), MemOperand(a0, 0));
+ __ Move(i.TempRegister(1), i.InputRegister(2));
+ __ scx(i.InputRegister(3), MemOperand(a0, 4));
+ __ sc(i.TempRegister(1), MemOperand(a0, 0));
+ __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg));
__ sync();
} else {
- UNREACHABLE();
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ PushCallerSaved(kDontSaveFPRegs);
+ __ PrepareCallCFunction(3, 0, kScratchReg);
+ __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
+ __ PopCallerSaved(kDontSaveFPRegs);
}
break;
}
-#define ATOMIC64_BINOP_ARITH_CASE(op, instr) \
- case kMipsWord32AtomicPair##op: \
- ASSEMBLE_ATOMIC64_ARITH_BINOP(instr); \
+#define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \
+ case kMipsWord32AtomicPair##op: \
+ ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \
break;
- ATOMIC64_BINOP_ARITH_CASE(Add, AddPair)
- ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair)
+ ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function)
+ ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function)
#undef ATOMIC64_BINOP_ARITH_CASE
-#define ATOMIC64_BINOP_LOGIC_CASE(op, instr) \
- case kMipsWord32AtomicPair##op: \
- ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
+#define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \
+ case kMipsWord32AtomicPair##op: \
+ ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \
break;
- ATOMIC64_BINOP_LOGIC_CASE(And, AndPair)
- ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair)
- ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair)
+ ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function)
+ ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
+ ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
#undef ATOMIC64_BINOP_LOGIC_CASE
case kMipsWord32AtomicPairExchange:
- UNREACHABLE();
+ if (IsMipsArchVariant(kMips32r6)) {
+ Label binop;
+ Register oldval_low =
+ instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1);
+ Register oldval_high =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2);
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ sync();
+ __ bind(&binop);
+ __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
+ __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
+ __ Move(i.TempRegister(1), i.InputRegister(2));
+ __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4));
+ __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));
+ __ sync();
+ } else {
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PrepareCallCFunction(3, 0, kScratchReg);
+ __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
+ 0);
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ }
break;
case kMipsWord32AtomicPairCompareExchange: {
- FrameScope scope(tasm(), StackFrame::MANUAL);
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
- __ PrepareCallCFunction(5, 0, kScratchReg);
- __ addu(a0, i.InputRegister(0), i.InputRegister(1));
- __ sw(i.InputRegister(5), MemOperand(sp, 16));
- __ CallCFunction(
- ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Label compareExchange, exit;
+ Register oldval_low =
+ instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg;
+ Register oldval_high =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2;
+ __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ sync();
+ __ bind(&compareExchange);
+ __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
+ __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
+ __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low));
+ __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high));
+ __ mov(kScratchReg, i.InputRegister(4));
+ __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4));
+ __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0));
+ __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg));
+ __ bind(&exit);
+ __ sync();
+ } else {
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PrepareCallCFunction(5, 0, kScratchReg);
+ __ addu(a0, i.InputRegister(0), i.InputRegister(1));
+ __ sw(i.InputRegister(5), MemOperand(sp, 16));
+ __ CallCFunction(
+ ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
+ __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ }
break;
}
case kMipsS128Zero: {
@@ -2872,7 +2951,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -3070,11 +3148,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
@@ -3261,7 +3340,6 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3284,7 +3362,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3310,6 +3388,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ lw(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ lw(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
}
}
}
@@ -3340,7 +3428,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
shrink_slots -= returns;
if (shrink_slots > 0) {
- __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
+ __ Subu(sp, sp, Operand(shrink_slots * kSystemPointerSize));
}
// Save callee-saved FPU registers.
@@ -3356,7 +3444,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (returns != 0) {
// Create space for returns.
- __ Subu(sp, sp, Operand(returns * kPointerSize));
+ __ Subu(sp, sp, Operand(returns * kSystemPointerSize));
}
}
@@ -3366,7 +3454,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
- __ Addu(sp, sp, Operand(returns * kPointerSize));
+ __ Addu(sp, sp, Operand(returns * kSystemPointerSize));
}
// Restore GP registers.
@@ -3404,7 +3492,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
pop_count += g.ToConstant(pop).ToInt32();
} else {
Register pop_reg = g.ToRegister(pop);
- __ sll(pop_reg, pop_reg, kPointerSizeLog2);
+ __ sll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Addu(sp, sp, Operand(pop_reg));
}
if (pop_count != 0) {
@@ -3571,7 +3659,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
MipsOperandConverter g(this, nullptr);
@@ -3698,7 +3785,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 32-bit MIPS we emit the jump tables inline.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 4b49de36b4..ba64e59429 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
-#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#ifndef V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_
namespace v8 {
namespace internal {
@@ -305,9 +305,8 @@ namespace compiler {
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
-
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#endif // V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 26f543d838..26a3e808cc 100644
--- a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -1137,7 +1137,7 @@ int Float64MaxLatency() {
int PrepareCallCFunctionLatency() {
int frame_alignment = TurboAssembler::ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
return 1 + SubuLatency(false) + AndLatency(false) + 1;
} else {
return SubuLatency(false);
@@ -1154,7 +1154,7 @@ int CallLatency() {
int CallCFunctionHelperLatency() {
// Estimated.
int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
latency++;
} else {
latency += AdduLatency(false);
@@ -1293,6 +1293,7 @@ int TryInlineTruncateDoubleToILatency() {
int CallStubDelayedLatency() { return 1 + CallLatency(); }
int TruncateDoubleToIDelayedLatency() {
+ // TODO(mips): This no longer reflects how TruncateDoubleToI is called.
return TryInlineTruncateDoubleToILatency() + 1 + SubuLatency(false) +
Sdc1Latency() + CallStubDelayedLatency() + AdduLatency(false) + 1;
}
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 954942c9af..44cadd2505 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -4,7 +4,7 @@
#include "src/base/adapters.h"
#include "src/base/bits.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -17,7 +17,6 @@ namespace compiler {
#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
-
// Adds Mips-specific methods for generating InstructionOperands.
class MipsOperandGenerator final : public OperandGenerator {
public:
@@ -107,7 +106,6 @@ class MipsOperandGenerator final : public OperandGenerator {
}
};
-
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
MipsOperandGenerator g(selector);
@@ -237,29 +235,29 @@ static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
- InstructionOperand addr_reg = g.TempRegister();
-
- selector->Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
-
- InstructionOperand inputs[] = {g.UseRegister(value),
- g.UseRegister(value_high), addr_reg};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister(), g.TempRegister()};
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseFixed(value, a1),
+ g.UseFixed(value_high, a2)};
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
- InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
- g.DefineAsRegister(projection1)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
+ g.DefineAsFixed(projection1, v1)};
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
+ g.TempRegister()};
selector->Emit(opcode | AddressingModeField::encode(kMode_None),
arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
} else if (projection0) {
- InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1),
+ g.TempRegister()};
selector->Emit(opcode | AddressingModeField::encode(kMode_None),
arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
} else {
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
+ g.TempRegister(v1)};
selector->Emit(opcode | AddressingModeField::encode(kMode_None), 0, nullptr,
arraysize(inputs), inputs, arraysize(temps), temps);
}
@@ -304,14 +302,14 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
case MachineRepresentation::kSimd128:
opcode = kMipsMsaLd;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -397,14 +395,14 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsSw;
break;
case MachineRepresentation::kSimd128:
opcode = kMipsMsaSt;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -482,12 +480,10 @@ void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kMipsOr, true, kMipsOr);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
@@ -511,7 +507,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
VisitBinop(this, node, kMipsXor, true, kMipsXor);
}
-
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
@@ -542,7 +537,6 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kMipsShl, node);
}
-
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
@@ -567,7 +561,6 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kMipsShr, node);
}
-
void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -682,7 +675,6 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
}
-
void InstructionSelector::VisitWord32Clz(Node* node) {
VisitRR(this, kMipsClz, node);
}
@@ -696,24 +688,24 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- InstructionOperand inputs[] = {addr_reg};
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
- InstructionOperand temps[] = {g.TempRegister()};
if (projection1) {
- InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
- g.DefineAsRegister(projection1)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
+ g.DefineAsFixed(projection1, v1)};
+ InstructionOperand temps[] = {g.TempRegister(a0)};
Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
- outputs, arraysize(inputs), inputs, 1, temps);
+ outputs, arraysize(inputs), inputs, arraysize(temps), temps);
} else if (projection0) {
- InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1)};
Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
- outputs, arraysize(inputs), inputs, 1, temps);
+ outputs, arraysize(inputs), inputs, arraysize(temps), temps);
} else {
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
+ g.TempRegister(v1)};
Emit(opcode | AddressingModeField::encode(kMode_MRI), 0, nullptr,
- arraysize(inputs), inputs, 1, temps);
+ arraysize(inputs), inputs, arraysize(temps), temps);
}
}
@@ -724,13 +716,11 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* value_low = node->InputAt(2);
Node* value_high = node->InputAt(3);
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
-
- InstructionOperand inputs[] = {addr_reg, g.UseRegister(value_low),
- g.UseRegister(value_high)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseFixed(value_low, a1),
+ g.UseFixed(value_high, a2)};
+ InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
+ g.TempRegister()};
Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
}
@@ -803,50 +793,56 @@ void InstructionSelector::VisitWord32Ctz(Node* node) {
Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitWord32Popcnt(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- // Select Lsa for (left + (left_of_right << imm)).
- if (m.right().opcode() == IrOpcode::kWord32Shl &&
- CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
- Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
- Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
- return;
+ if (IsMipsArchVariant(kMips32r6)) {
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue() && !m.left().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMipsLsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
- }
- // Select Lsa for ((left_of_left << imm) + right).
- if (m.left().opcode() == IrOpcode::kWord32Shl &&
- CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
- Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
- g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
- return;
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMipsLsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
}
VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitBinop(this, node, kMipsSub);
}
-
void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -858,7 +854,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) &&
+ value - 1 > 0 && value - 1 <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -877,19 +874,16 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kMipsMul, node);
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitRRR(this, kMipsMulHigh, node);
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -897,7 +891,6 @@ void InstructionSelector::VisitInt32Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -905,7 +898,6 @@ void InstructionSelector::VisitUint32Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitInt32Mod(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -913,7 +905,6 @@ void InstructionSelector::VisitInt32Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint32Mod(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -921,42 +912,34 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
VisitRR(this, kMipsCvtDS, node);
}
-
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
VisitRR(this, kMipsCvtSW, node);
}
-
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
VisitRR(this, kMipsCvtSUw, node);
}
-
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMipsCvtDW, node);
}
-
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kMipsCvtDUw, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
VisitRR(this, kMipsTruncWS, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
VisitRR(this, kMipsTruncUwS, node);
}
-
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
MipsOperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1020,7 +1003,6 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kMipsTruncWD, node);
}
-
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMipsTruncUwD, node);
}
@@ -1055,7 +1037,6 @@ void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMipsFloat64ExtractLowWord32, node);
}
-
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
@@ -1063,7 +1044,6 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
@@ -1088,7 +1068,6 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}
-
void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
@@ -1113,7 +1092,6 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
VisitRRR(this, kMipsAddD, node);
}
-
void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this);
if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
@@ -1150,26 +1128,23 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
}
-
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMipsMulD, node);
}
-
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kMipsDivS, node);
}
-
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMipsDivD, node);
}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
- g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+ g.UseFixed(node->InputAt(1), f14))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1196,12 +1171,10 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kMipsAbsS, node);
}
-
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kMipsAbsD, node);
}
@@ -1210,52 +1183,42 @@ void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node);
}
-
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kMipsSqrtD, node);
}
-
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
VisitRR(this, kMipsFloat32RoundDown, node);
}
-
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMipsFloat64RoundDown, node);
}
-
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
VisitRR(this, kMipsFloat32RoundUp, node);
}
-
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
VisitRR(this, kMipsFloat64RoundUp, node);
}
-
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kMipsFloat32RoundTruncate, node);
}
-
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMipsFloat64RoundTruncate, node);
}
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
-
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
VisitRR(this, kMipsFloat32RoundTiesEven, node);
}
-
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kMipsFloat64RoundTiesEven, node);
}
@@ -1299,7 +1262,7 @@ void InstructionSelector::EmitPrepareArguments(
for (PushParameter input : (*arguments)) {
if (input.node) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(slot << kPointerSizeLog2));
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
++slot;
}
}
@@ -1316,13 +1279,13 @@ void InstructionSelector::EmitPrepareArguments(
}
}
Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(stack_size << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
if (input.node) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(n << kPointerSizeLog2));
+ g.TempImmediate(n << kSystemPointerSizeLog2));
}
}
}
@@ -1372,7 +1335,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsUlw;
break;
@@ -1385,7 +1348,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kMipsMsaLd;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1430,14 +1393,14 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsUsw;
break;
case MachineRepresentation::kSimd128:
opcode = kMipsMsaSt;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1465,7 +1428,6 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1480,7 +1442,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
}
-
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1495,7 +1456,6 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
@@ -1568,7 +1528,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
}
}
-
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordCompare(selector, node, kMipsCmp, cont, false);
@@ -1705,33 +1664,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1741,7 +1695,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop(this, node, kMipsAddOvf, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1765,53 +1718,45 @@ void InstructionSelector::VisitFloat32Equal(Node* node) {
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
MipsOperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -1820,7 +1765,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
MipsOperandGenerator g(this);
Node* left = node->InputAt(0);
diff --git a/deps/v8/src/compiler/backend/mips64/OWNERS b/deps/v8/src/compiler/backend/mips64/OWNERS
new file mode 100644
index 0000000000..b455d9ef29
--- /dev/null
+++ b/deps/v8/src/compiler/backend/mips64/OWNERS
@@ -0,0 +1,3 @@
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 0e2b508a29..8788fa7ee3 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -4,15 +4,16 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/code-generator.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/macro-assembler.h"
#include "src/mips64/constants-mips64.h"
-#include "src/mips64/macro-assembler-mips64.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -20,7 +21,6 @@ namespace compiler {
#define __ tasm()->
-
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
@@ -30,7 +30,6 @@ namespace compiler {
PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
__LINE__)
-
// Adds Mips-specific methods to convert InstructionOperands.
class MipsOperandConverter final : public InstructionOperandConverter {
public:
@@ -134,20 +133,17 @@ class MipsOperandConverter final : public InstructionOperandConverter {
}
};
-
static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
-
namespace {
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
index_(index),
@@ -155,31 +151,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPush(regs | ra.bit());
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPop(regs | ra.bit());
- }
-
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -197,8 +172,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
__ Pop(ra);
}
@@ -211,6 +194,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
bool must_save_lr_;
Zone* zone_;
};
@@ -267,7 +251,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
UNREACHABLE();
}
-
Condition FlagsConditionToConditionTst(FlagsCondition condition) {
switch (condition) {
case kNotEqual:
@@ -280,7 +263,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-
Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
switch (condition) {
case kOverflow:
@@ -293,7 +275,6 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
FlagsCondition condition) {
switch (condition) {
@@ -367,11 +348,16 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} while (0)
#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
- size, bin_instr) \
+ size, bin_instr, representation) \
do { \
Label binop; \
__ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
__ Dsubu(i.TempRegister(0), i.TempRegister(0), \
Operand(i.TempRegister(3))); \
__ sll(i.TempRegister(3), i.TempRegister(3), 3); \
@@ -402,12 +388,17 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
- sign_extend, size) \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label exchange; \
__ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
__ Dsubu(i.TempRegister(0), i.TempRegister(0), \
Operand(i.TempRegister(1))); \
__ sll(i.TempRegister(1), i.TempRegister(1), 3); \
@@ -443,12 +434,17 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
- load_linked, store_conditional, sign_extend, size) \
+ load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label compareExchange; \
Label exit; \
__ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
__ Dsubu(i.TempRegister(0), i.TempRegister(0), \
Operand(i.TempRegister(1))); \
__ sll(i.TempRegister(1), i.TempRegister(1), 3); \
@@ -537,10 +533,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -581,19 +577,15 @@ void CodeGenerator::BailoutIfDeoptimized() {
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerPointer - 1))
+ // poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
@@ -603,7 +595,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerPointer - 1);
+ kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
@@ -636,6 +628,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
@@ -728,9 +728,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -741,7 +741,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -774,7 +774,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -850,8 +850,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
+ auto ool = new (zone())
+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+ mode, DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
@@ -870,18 +871,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
alignment == 16);
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
- __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ And(kScratchReg, i.OutputRegister(),
+ Operand(kSystemPointerSize - 1));
__ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
- if (alignment == 2 * kPointerSize) {
+ if (alignment == 2 * kSystemPointerSize) {
Label done;
__ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
- __ Daddu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ Daddu(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize);
__ bind(&done);
- } else if (alignment > 2 * kPointerSize) {
+ } else if (alignment > 2 * kSystemPointerSize) {
Label done;
__ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
@@ -1785,7 +1787,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1813,7 +1815,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
- frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
break;
}
case kMips64StoreToStackSlot: {
@@ -1886,75 +1889,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
case kWord32AtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
break;
case kWord32AtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
break;
case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
break;
case kMips64Word64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
break;
case kMips64Word64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
break;
case kMips64Word64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
break;
case kMips64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
case kWord32AtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
break;
case kWord32AtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
break;
case kWord32AtomicCompareExchangeWord32:
__ sll(i.InputRegister(2), i.InputRegister(2), 0);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
break;
case kMips64Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
break;
case kMips64Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
break;
case kMips64Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
break;
case kMips64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
ATOMIC_BINOP_CASE(Sub, Subu)
@@ -1962,18 +1965,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kMips64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst); \
- break; \
- case kMips64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst); \
- break; \
- case kMips64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst); \
- break; \
- case kMips64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kMips64Word64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
+ break; \
+ case kMips64Word64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
+ break; \
+ case kMips64Word64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
break;
ATOMIC_BINOP_CASE(Add, Daddu)
ATOMIC_BINOP_CASE(Sub, Dsubu)
@@ -3292,11 +3295,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
@@ -3516,7 +3520,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
frame->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3543,6 +3547,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ ld(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ ld(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
}
}
}
@@ -3573,7 +3587,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= base::bits::CountPopulation(saves_fpu);
shrink_slots -= returns;
if (shrink_slots > 0) {
- __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
+ __ Dsubu(sp, sp, Operand(shrink_slots * kSystemPointerSize));
}
if (saves_fpu != 0) {
@@ -3590,7 +3604,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (returns != 0) {
// Create space for returns.
- __ Dsubu(sp, sp, Operand(returns * kPointerSize));
+ __ Dsubu(sp, sp, Operand(returns * kSystemPointerSize));
}
}
@@ -3599,7 +3613,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
- __ Daddu(sp, sp, Operand(returns * kPointerSize));
+ __ Daddu(sp, sp, Operand(returns * kSystemPointerSize));
}
// Restore GP registers.
@@ -3637,7 +3651,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
pop_count += g.ToConstant(pop).ToInt32();
} else {
Register pop_reg = g.ToRegister(pop);
- __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
+ __ dsll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Daddu(sp, sp, pop_reg);
}
if (pop_count != 0) {
@@ -3685,7 +3699,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ li(dst, Operand(src.ToInt64(), src.rmode()));
} else {
__ li(dst, Operand(src.ToInt64()));
@@ -3790,7 +3804,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
MipsOperandConverter g(this, nullptr);
@@ -3896,7 +3909,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 64-bit MIPS we emit the jump tables inline.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 7ea707db53..24f01b1af1 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
-#define V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
+#ifndef V8_COMPILER_BACKEND_MIPS64_INSTRUCTION_CODES_MIPS64_H_
+#define V8_COMPILER_BACKEND_MIPS64_INSTRUCTION_CODES_MIPS64_H_
namespace v8 {
namespace internal {
@@ -362,9 +362,8 @@ namespace compiler {
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
-
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
+#endif // V8_COMPILER_BACKEND_MIPS64_INSTRUCTION_CODES_MIPS64_H_
diff --git a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 8fe669fe02..a3031cf698 100644
--- a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -724,7 +725,7 @@ int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
int frame_alignment = TurboAssembler::ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
return 1 + DsubuLatency(false) + AndLatency(false) + 1;
} else {
return DsubuLatency(false);
@@ -887,7 +888,7 @@ int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
int CallCFunctionHelperLatency() {
// Estimated.
int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
latency++;
} else {
latency += DadduLatency(false);
@@ -944,6 +945,7 @@ int TryInlineTruncateDoubleToILatency() {
int CallStubDelayedLatency() { return 1 + CallLatency(); }
int TruncateDoubleToIDelayedLatency() {
+ // TODO(mips): This no longer reflects how TruncateDoubleToI is called.
return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) +
Sdc1Latency() + CallStubDelayedLatency() + DadduLatency(false) + 1;
}
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index f27ad218fd..95e52452d7 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -4,7 +4,7 @@
#include "src/base/adapters.h"
#include "src/base/bits.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -17,7 +17,6 @@ namespace compiler {
#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
-
// Adds Mips-specific methods for generating InstructionOperands.
class Mips64OperandGenerator final : public OperandGenerator {
public:
@@ -119,7 +118,6 @@ class Mips64OperandGenerator final : public OperandGenerator {
}
};
-
static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
@@ -383,7 +381,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
@@ -468,7 +466,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Sd;
break;
@@ -549,7 +547,6 @@ void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kMips64And32, true, kMips64And32);
}
-
void InstructionSelector::VisitWord64And(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -604,17 +601,14 @@ void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kMips64And, true, kMips64And);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
}
-
void InstructionSelector::VisitWord64Or(Node* node) {
VisitBinop(this, node, kMips64Or, true, kMips64Or);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
@@ -638,7 +632,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
}
-
void InstructionSelector::VisitWord64Xor(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
@@ -662,7 +655,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
}
-
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
@@ -693,7 +685,6 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kMips64Shl, node);
}
-
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
@@ -718,7 +709,6 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kMips64Shr, node);
}
-
void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
@@ -745,7 +735,6 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kMips64Sar, node);
}
-
void InstructionSelector::VisitWord64Shl(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -786,7 +775,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
VisitRRO(this, kMips64Dshl, node);
}
-
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
@@ -811,26 +799,21 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
VisitRRO(this, kMips64Dshr, node);
}
-
void InstructionSelector::VisitWord64Sar(Node* node) {
if (TryEmitExtendingLoad(this, node, node)) return;
VisitRRO(this, kMips64Dsar, node);
}
-
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMips64Ror, node);
}
-
void InstructionSelector::VisitWord32Clz(Node* node) {
VisitRR(this, kMips64Clz, node);
}
-
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
@@ -850,113 +833,121 @@ void InstructionSelector::VisitWord32Ctz(Node* node) {
Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitWord64Ctz(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitWord32Popcnt(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Popcnt, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitWord64Popcnt(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
-
void InstructionSelector::VisitWord64Clz(Node* node) {
VisitRR(this, kMips64Dclz, node);
}
-
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- // Select Lsa for (left + (left_of_right << imm)).
- if (m.right().opcode() == IrOpcode::kWord32Shl &&
- CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
- Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
- Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
- return;
+ if (kArchVariant == kMips64r6) {
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue() && !m.left().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMips64Lsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
- }
- // Select Lsa for ((left_of_left << imm) + right).
- if (m.left().opcode() == IrOpcode::kWord32Shl &&
- CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
- Emit(kMips64Lsa, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.TempImmediate(shift_value));
- return;
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMips64Lsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
}
+
VisitBinop(this, node, kMips64Add, true, kMips64Add);
}
-
void InstructionSelector::VisitInt64Add(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- // Select Dlsa for (left + (left_of_right << imm)).
- if (m.right().opcode() == IrOpcode::kWord64Shl &&
- CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
- Int64BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
- Emit(kMips64Dlsa, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.TempImmediate(shift_value));
- return;
+ if (kArchVariant == kMips64r6) {
+ // Select Dlsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue() && !m.left().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
- }
- // Select Dlsa for ((left_of_left << imm) + right).
- if (m.left().opcode() == IrOpcode::kWord64Shl &&
- CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
- Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
- Emit(kMips64Dlsa, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.TempImmediate(shift_value));
- return;
+ // Select Dlsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
}
}
VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
}
-
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitBinop(this, node, kMips64Sub);
}
-
void InstructionSelector::VisitInt64Sub(Node* node) {
VisitBinop(this, node, kMips64Dsub);
}
-
void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -968,7 +959,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
+ value - 1 > 0 && value - 1 <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -1002,17 +994,14 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kMips64Mul, node);
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitRRR(this, kMips64MulHigh, node);
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitRRR(this, kMips64MulHighU, node);
}
-
void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1025,7 +1014,8 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
+ value - 1 > 0 && value - 1 <= 31) {
// Dlsa macro will handle the shifting value out of bound cases.
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
@@ -1046,7 +1036,6 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1069,7 +1058,6 @@ void InstructionSelector::VisitInt32Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1077,7 +1065,6 @@ void InstructionSelector::VisitUint32Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1100,7 +1087,6 @@ void InstructionSelector::VisitInt32Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1108,7 +1094,6 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1116,7 +1101,6 @@ void InstructionSelector::VisitInt64Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1124,7 +1108,6 @@ void InstructionSelector::VisitUint64Div(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitInt64Mod(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1132,7 +1115,6 @@ void InstructionSelector::VisitInt64Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitUint64Mod(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1140,22 +1122,18 @@ void InstructionSelector::VisitUint64Mod(Node* node) {
g.UseRegister(m.right().node()));
}
-
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDS, node);
}
-
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSW, node);
}
-
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSUw, node);
}
-
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDW, node);
}
@@ -1168,17 +1146,14 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDUw, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
VisitRR(this, kMips64TruncWS, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwS, node);
}
-
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1258,6 +1233,10 @@ void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
}
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64TruncLD, node);
+}
+
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1273,7 +1252,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Mips64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1289,7 +1267,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1305,7 +1282,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Mips64OperandGenerator g(this);
@@ -1322,7 +1298,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
@@ -1352,7 +1327,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
}
-
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1387,7 +1361,6 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
g.TempImmediate(0), g.TempImmediate(32));
}
-
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1416,7 +1389,6 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
g.TempImmediate(0), g.TempImmediate(32));
}
-
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1443,32 +1415,26 @@ void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSL, node);
}
-
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDL, node);
}
-
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSUl, node);
}
-
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDUl, node);
}
-
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
-
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kMips64BitcastDL, node);
}
-
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
@@ -1476,26 +1442,22 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kMips64BitcastLD, node);
}
-
void InstructionSelector::VisitFloat32Add(Node* node) {
// Optimization with Madd.S(z, x, y) is intentionally removed.
// See explanation for madd_s in assembler-mips64.cc.
VisitRRR(this, kMips64AddS, node);
}
-
void InstructionSelector::VisitFloat64Add(Node* node) {
// Optimization with Madd.D(z, x, y) is intentionally removed.
// See explanation for madd_d in assembler-mips64.cc.
VisitRRR(this, kMips64AddD, node);
}
-
void InstructionSelector::VisitFloat32Sub(Node* node) {
// Optimization with Msub.S(z, x, y) is intentionally removed.
// See explanation for madd_s in assembler-mips64.cc.
@@ -1512,27 +1474,23 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMips64MulS, node);
}
-
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMips64MulD, node);
}
-
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kMips64DivS, node);
}
-
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMips64DivD, node);
}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64ModD, g.DefineAsFixed(node, f0),
- g.UseFixed(node->InputAt(0), f12),
- g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+ g.UseFixed(node->InputAt(0), f12), g.UseFixed(node->InputAt(1), f14))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1559,12 +1517,10 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kMips64AbsS, node);
}
-
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kMips64AbsD, node);
}
@@ -1573,52 +1529,42 @@ void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node);
}
-
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kMips64SqrtD, node);
}
-
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
VisitRR(this, kMips64Float32RoundDown, node);
}
-
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMips64Float64RoundDown, node);
}
-
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
VisitRR(this, kMips64Float32RoundUp, node);
}
-
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
VisitRR(this, kMips64Float64RoundUp, node);
}
-
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kMips64Float32RoundTruncate, node);
}
-
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMips64Float64RoundTruncate, node);
}
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
-
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
VisitRR(this, kMips64Float32RoundTiesEven, node);
}
-
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kMips64Float64RoundTiesEven, node);
}
@@ -1661,7 +1607,7 @@ void InstructionSelector::EmitPrepareArguments(
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(slot << kPointerSizeLog2));
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
++slot;
}
} else {
@@ -1675,13 +1621,13 @@ void InstructionSelector::EmitPrepareArguments(
}
}
Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(stack_size << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
if (input.node) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
}
}
}
@@ -1740,7 +1686,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Uld;
break;
@@ -1792,7 +1738,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Usd;
break;
@@ -1827,7 +1773,6 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1842,7 +1787,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
}
-
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1857,7 +1801,6 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
@@ -2013,14 +1956,11 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
}
}
-
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
-
-
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
@@ -2292,7 +2232,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
@@ -2303,33 +2242,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2339,7 +2273,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop(this, node, kMips64Dadd, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2367,7 +2300,6 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
VisitBinop(this, node, kMips64DaddOvf, &cont);
}
-
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2377,7 +2309,6 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
VisitBinop(this, node, kMips64DsubOvf, &cont);
}
-
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
@@ -2388,76 +2319,64 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
-
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
VisitRR(this, kMips64Float64ExtractHighWord32, node);
}
@@ -2474,7 +2393,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
Mips64OperandGenerator g(this);
Node* left = node->InputAt(0);
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/backend/move-optimizer.cc
index cf6edc2b67..3352271e1b 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/backend/move-optimizer.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/move-optimizer.h"
+#include "src/compiler/backend/move-optimizer.h"
+
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -364,13 +366,11 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
}
}
-
const Instruction* MoveOptimizer::LastInstruction(
const InstructionBlock* block) const {
return code()->instructions()[block->last_instruction_index()];
}
-
void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
DCHECK_LT(1, block->PredecessorCount());
// Ensure that the last instruction in all incoming blocks don't contain
@@ -495,14 +495,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
CompressBlock(block);
}
-
namespace {
bool IsSlot(const InstructionOperand& op) {
return op.IsStackSlot() || op.IsFPStackSlot();
}
-
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (!a->source().EqualsCanonicalized(b->source())) {
return a->source().CompareCanonicalized(b->source());
@@ -514,7 +512,6 @@ bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
} // namespace
-
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/backend/move-optimizer.h
index c78da1e517..6da351f0ac 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/backend/move-optimizer.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MOVE_OPTIMIZER_H_
-#define V8_COMPILER_MOVE_OPTIMIZER_H_
+#ifndef V8_COMPILER_BACKEND_MOVE_OPTIMIZER_H_
+#define V8_COMPILER_BACKEND_MOVE_OPTIMIZER_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -65,4 +65,4 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MOVE_OPTIMIZER_H_
+#endif // V8_COMPILER_BACKEND_MOVE_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/backend/ppc/OWNERS b/deps/v8/src/compiler/backend/ppc/OWNERS
new file mode 100644
index 0000000000..6d1a8fc472
--- /dev/null
+++ b/deps/v8/src/compiler/backend/ppc/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index fd2b2eefdb..b74834df17 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
+#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -23,7 +24,6 @@ namespace compiler {
#define kScratchReg r11
-
// Adds PPC-specific methods to convert InstructionOperands.
class PPCOperandConverter final : public InstructionOperandConverter {
public:
@@ -117,19 +117,17 @@ class PPCOperandConverter final : public InstructionOperandConverter {
}
};
-
static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
-
namespace {
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
offset_(offset),
@@ -138,12 +136,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
offset_(no_reg),
@@ -152,32 +151,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
-
- __ MultiPush(regs);
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPop(regs);
- }
-
void Generate() final {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -202,8 +179,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch0_);
__ Push(scratch0_);
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Pop(scratch0_);
@@ -219,11 +201,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode stub_mode_;
bool must_save_lr_;
Zone* zone_;
};
-
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
switch (condition) {
case kEqual:
@@ -318,7 +300,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} \
} while (0)
-
#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -330,7 +311,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} \
} while (0)
-
#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -342,7 +322,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} \
} while (0)
-
#define ASSEMBLE_ADD_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -354,7 +333,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} \
} while (0)
-
#define ASSEMBLE_SUB_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -366,7 +344,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
} \
} while (0)
-
#if V8_TARGET_ARCH_PPC64
#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
do { \
@@ -384,7 +361,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
#endif
-
#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
do { \
const CRegister cr = cr0; \
@@ -404,7 +380,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DCHECK_EQ(SetRC, i.OutputRCBit()); \
} while (0)
-
#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
do { \
const CRegister cr = cr0; \
@@ -412,7 +387,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DCHECK_EQ(SetRC, i.OutputRCBit()); \
} while (0)
-
#define ASSEMBLE_MODULO(div_instr, mul_instr) \
do { \
const Register scratch = kScratchReg; \
@@ -549,72 +523,64 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DoubleRegister result = i.OutputDoubleRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
+ bool is_atomic = i.InputInt32(2); \
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
__ asm_instrx(result, operand); \
} \
+ if (is_atomic) __ lwsync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode); \
+ bool is_atomic = i.InputInt32(2); \
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
__ asm_instrx(result, operand); \
} \
+ if (is_atomic) __ lwsync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-#define ASSEMBLE_STORE_FLOAT32() \
+#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
+ bool is_atomic = i.InputInt32(3); \
+ if (is_atomic) __ lwsync(); \
/* removed frsp as instruction-selector checked */ \
/* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
- __ stfs(value, operand); \
- } else { \
- __ stfsx(value, operand); \
- } \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-
-#define ASSEMBLE_STORE_DOUBLE() \
- do { \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, &index); \
- DoubleRegister value = i.InputDoubleRegister(index); \
- if (mode == kMode_MRI) { \
- __ stfd(value, operand); \
+ __ asm_instr(value, operand); \
} else { \
- __ stfdx(value, operand); \
+ __ asm_instrx(value, operand); \
} \
+ if (is_atomic) __ sync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
+ bool is_atomic = i.InputInt32(3); \
+ if (is_atomic) __ lwsync(); \
if (mode == kMode_MRI) { \
__ asm_instr(value, operand); \
} else { \
__ asm_instrx(value, operand); \
} \
+ if (is_atomic) __ sync(); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
@@ -625,97 +591,82 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
#define CleanUInt32(x)
#endif
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- Register result = i.OutputRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- __ asm_instrx(result, operand); \
- } \
- __ lwsync(); \
- } while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, &index); \
- Register value = i.InputRegister(index); \
- __ lwsync(); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- __ asm_instrx(value, operand); \
- } \
- __ sync(); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
do { \
Label exchange; \
+ __ lwsync(); \
__ bind(&exchange); \
__ load_instr(i.OutputRegister(0), \
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
__ store_instr(i.InputRegister(2), \
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
__ bne(&exchange, cr0); \
+ __ sync(); \
} while (0)
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
do { \
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
Label binop; \
+ __ lwsync(); \
__ bind(&binop); \
__ load_inst(i.OutputRegister(), operand); \
__ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
__ store_inst(kScratchReg, operand); \
__ bne(&binop, cr0); \
+ __ sync(); \
} while (false)
-#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, \
- store_inst, ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(i.InputRegister(2), operand); \
- __ bne(&binop, cr0); \
+#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst, \
+ ext_instr) \
+ do { \
+ MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ Label binop; \
+ __ lwsync(); \
+ __ bind(&binop); \
+ __ load_inst(i.OutputRegister(), operand); \
+ __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
+ __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
+ __ store_inst(kScratchReg, operand); \
+ __ bne(&binop, cr0); \
+ __ sync(); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), i.InputRegister(2), cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst, \
+ input_ext) \
+ do { \
+ MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ Label loop; \
+ Label exit; \
+ __ input_ext(r0, i.InputRegister(2)); \
+ __ lwsync(); \
+ __ bind(&loop); \
+ __ load_inst(i.OutputRegister(), operand); \
+ __ cmp_inst(i.OutputRegister(), r0, cr0); \
+ __ bne(&exit, cr0); \
+ __ store_inst(i.InputRegister(3), operand); \
+ __ bne(&loop, cr0); \
+ __ bind(&exit); \
+ __ sync(); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
- store_inst, ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
+ store_inst, ext_instr) \
+ do { \
+ MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ Label loop; \
+ Label exit; \
+ __ ext_instr(r0, i.InputRegister(2)); \
+ __ lwsync(); \
+ __ bind(&loop); \
+ __ load_inst(i.OutputRegister(), operand); \
+ __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
+ __ cmp_inst(i.OutputRegister(), r0, cr0); \
+ __ bne(&exit, cr0); \
+ __ store_inst(i.InputRegister(3), operand); \
+ __ bne(&loop, cr0); \
+ __ bind(&exit); \
+ __ sync(); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -792,13 +743,13 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kSystemPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -875,12 +826,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, cr0);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
@@ -893,8 +840,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ cmp(kJavaScriptCallCodeStartRegister, scratch);
__ li(scratch, Operand::Zero());
__ notx(kSpeculationPoisonRegister, scratch);
- __ isel(eq, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister, scratch);
+ __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
@@ -918,8 +864,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(reg);
+ __ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@@ -928,6 +873,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
@@ -959,8 +912,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(reg);
+ __ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@@ -1015,8 +967,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r5);
+ __ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
@@ -1035,9 +986,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -1048,7 +999,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -1088,7 +1039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -1174,14 +1125,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_MRI) {
int32_t offset = i.InputInt32(1);
- ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, offset, value, scratch0,
+ scratch1, mode, DetermineStubCallMode());
__ StoreP(value, MemOperand(object, offset));
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
- ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, offset, value, scratch0,
+ scratch1, mode, DetermineStubCallMode());
__ StorePX(value, MemOperand(object, offset));
}
__ CheckPageFlag(object, scratch0,
@@ -1459,16 +1412,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputRegister(0) == i.InputRegister(1) ||
i.OutputRegister(1) == i.InputRegister(0) ||
i.OutputRegister(1) == i.InputRegister(1)) {
- __ mullw(kScratchReg,
- i.InputRegister(0), i.InputRegister(1)); // low
- __ mulhw(i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1)); // high
+ __ mullw(kScratchReg, i.InputRegister(0), i.InputRegister(1)); // low
+ __ mulhw(i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1)); // high
__ mr(i.OutputRegister(0), kScratchReg);
} else {
- __ mullw(i.OutputRegister(0),
- i.InputRegister(0), i.InputRegister(1)); // low
- __ mulhw(i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1)); // high
+ __ mullw(i.OutputRegister(0), i.InputRegister(0),
+ i.InputRegister(1)); // low
+ __ mulhw(i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1)); // high
}
break;
case kPPC_MulHigh32:
@@ -1701,15 +1653,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDoubleU(i.InputDoubleRegister(0),
MemOperand(sp, -kDoubleSize), r0);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize /
+ kSystemPointerSize);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreSingleU(i.InputDoubleRegister(0),
- MemOperand(sp, -kPointerSize), r0);
+ MemOperand(sp, -kSystemPointerSize), r0);
frame_access_state()->IncreaseSPDelta(1);
}
} else {
- __ StorePU(i.InputRegister(0), MemOperand(sp, -kPointerSize), r0);
+ __ StorePU(i.InputRegister(0), MemOperand(sp, -kSystemPointerSize), r0);
frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1720,15 +1673,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDoubleU(i.InputDoubleRegister(0),
- MemOperand(sp, -num_slots * kPointerSize), r0);
+ MemOperand(sp, -num_slots * kSystemPointerSize), r0);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreSingleU(i.InputDoubleRegister(0),
- MemOperand(sp, -num_slots * kPointerSize), r0);
+ MemOperand(sp, -num_slots * kSystemPointerSize), r0);
}
} else {
__ StorePU(i.InputRegister(0),
- MemOperand(sp, -num_slots * kPointerSize), r0);
+ MemOperand(sp, -num_slots * kSystemPointerSize), r0);
}
break;
}
@@ -1738,14 +1691,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize), r0);
+ MemOperand(sp, slot * kSystemPointerSize), r0);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreSingle(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize), r0);
+ MemOperand(sp, slot * kSystemPointerSize), r0);
}
} else {
- __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize),
+ r0);
}
break;
}
@@ -1970,112 +1924,84 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kPPC_StoreFloat32:
- ASSEMBLE_STORE_FLOAT32();
+ ASSEMBLE_STORE_FLOAT(stfs, stfsx);
break;
case kPPC_StoreDouble:
- ASSEMBLE_STORE_DOUBLE();
+ ASSEMBLE_STORE_FLOAT(stfd, stfdx);
break;
case kWord32AtomicLoadInt8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
- __ extsb(i.OutputRegister(), i.OutputRegister());
- break;
- case kPPC_Word64AtomicLoadUint8:
- case kWord32AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
- break;
+ case kPPC_AtomicLoadUint8:
case kWord32AtomicLoadInt16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
- break;
- case kPPC_Word64AtomicLoadUint16:
- case kWord32AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
- break;
- case kPPC_Word64AtomicLoadUint32:
- case kWord32AtomicLoadWord32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
- break;
- case kPPC_Word64AtomicLoadUint64:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(ld, ldx);
- break;
-
- case kPPC_Word64AtomicStoreUint8:
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
- break;
- case kPPC_Word64AtomicStoreUint16:
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
- break;
- case kPPC_Word64AtomicStoreUint32:
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
- break;
- case kPPC_Word64AtomicStoreUint64:
- ASSEMBLE_ATOMIC_STORE_INTEGER(std, stdx);
+ case kPPC_AtomicLoadUint16:
+ case kPPC_AtomicLoadWord32:
+ case kPPC_AtomicLoadWord64:
+ case kPPC_AtomicStoreUint8:
+ case kPPC_AtomicStoreUint16:
+ case kPPC_AtomicStoreWord32:
+ case kPPC_AtomicStoreWord64:
+ UNREACHABLE();
break;
case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kPPC_Word64AtomicExchangeUint8:
- case kWord32AtomicExchangeUint8:
+ case kPPC_AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kPPC_Word64AtomicExchangeUint16:
- case kWord32AtomicExchangeUint16:
+ case kPPC_AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
break;
- case kPPC_Word64AtomicExchangeUint32:
- case kWord32AtomicExchangeWord32:
+ case kPPC_AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break;
- case kPPC_Word64AtomicExchangeUint64:
+ case kPPC_AtomicExchangeWord64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
break;
case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
break;
- case kPPC_Word64AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx);
+ case kPPC_AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx, ZeroExtByte);
break;
case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
break;
- case kPPC_Word64AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx);
+ case kPPC_AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx, ZeroExtHalfWord);
break;
- case kPPC_Word64AtomicCompareExchangeUint32:
- case kWord32AtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx);
+ case kPPC_AtomicCompareExchangeWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32);
break;
- case kPPC_Word64AtomicCompareExchangeUint64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx);
+ case kPPC_AtomicCompareExchangeWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, ldarx, stdcx, mr);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kPPC_Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
break; \
- case kPPC_Word64Atomic##op##Uint8: \
- case kWord32Atomic##op##Uint8: \
+ case kPPC_Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kPPC_Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
break; \
- case kPPC_Word64Atomic##op##Uint16: \
- case kWord32Atomic##op##Uint16: \
+ case kPPC_Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
break; \
- case kPPC_Word64Atomic##op##Uint32: \
- case kWord32Atomic##op##Word32: \
+ case kPPC_Atomic##op##Int32: \
+ ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
+ break; \
+ case kPPC_Atomic##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
+ break; \
+ case kPPC_Atomic##op##Int64: \
+ case kPPC_Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2084,17 +2010,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kPPC_Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
- break;
- ATOMIC64_BINOP_CASE(Add, add)
- ATOMIC64_BINOP_CASE(Sub, sub)
- ATOMIC64_BINOP_CASE(And, and_)
- ATOMIC64_BINOP_CASE(Or, orx)
- ATOMIC64_BINOP_CASE(Xor, xor_)
-#undef ATOMIC64_BINOP_CASE
-
case kPPC_ByteRev32: {
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
@@ -2131,7 +2046,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
PPCOperandConverter i(this, instr);
@@ -2213,11 +2127,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
@@ -2294,9 +2209,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// r0 implies logical zero in this form
__ isel(NegateCondition(cond), reg, r0, reg, cr);
break;
- default:
- UNREACHABLE();
- break;
+ default:
+ UNREACHABLE();
+ break;
}
} else {
if (reg_value != 0) __ li(reg, Operand::Zero());
@@ -2339,7 +2254,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Cmpli(input, Operand(case_count), r0);
__ bge(GetLabel(i.InputRpo(1)));
__ mov_label_addr(kScratchReg, table);
- __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r0, input, Operand(kSystemPointerSizeLog2));
__ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
__ Jump(kScratchReg);
}
@@ -2354,7 +2269,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
DCHECK_EQ(kNumCalleeSavedDoubles,
base::bits::CountPopulation(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
// Save callee-saved registers.
const RegList saves = FLAG_enable_embedded_constant_pool
@@ -2374,7 +2289,6 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ function_descriptor();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
__ Push(r0, fp, kConstantPoolRegister);
@@ -2396,6 +2310,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ LoadP(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ LoadP(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
}
}
}
@@ -2433,13 +2357,14 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
Register scratch = ip;
- __ LoadP(scratch, FieldMemOperand(
- kWasmInstanceRegister,
+ __ LoadP(
+ scratch,
+ FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ LoadP(scratch, MemOperand(scratch), r0);
- __ Add(scratch, scratch, shrink_slots * kPointerSize, r0);
+ __ Add(scratch, scratch, shrink_slots * kSystemPointerSize, r0);
__ cmpl(sp, scratch);
__ bge(&done);
}
@@ -2448,11 +2373,11 @@ void CodeGenerator::AssembleConstructFrame() {
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset),
r0);
- __ Move(cp, Smi::kZero);
+ __ Move(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r5);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
@@ -2464,9 +2389,9 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= frame()->GetReturnSlotCount();
- shrink_slots -=
- (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
- __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
+ shrink_slots -= (kDoubleSize / kSystemPointerSize) *
+ base::bits::CountPopulation(saves_fp);
+ __ Add(sp, sp, -shrink_slots * kSystemPointerSize, r0);
}
// Save callee-saved Double registers.
@@ -2484,7 +2409,7 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ Add(sp, sp, -returns * kPointerSize, r0);
+ __ Add(sp, sp, -returns * kSystemPointerSize, r0);
}
}
@@ -2495,7 +2420,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ Add(sp, sp, returns * kPointerSize, r0);
+ __ Add(sp, sp, returns * kSystemPointerSize, r0);
}
// Restore registers.
@@ -2588,7 +2513,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
#endif
@@ -2756,14 +2681,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
return;
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ emit_label_addr(targets[index]);
}
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
new file mode 100644
index 0000000000..1c241711b9
--- /dev/null
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -0,0 +1,208 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_PPC_INSTRUCTION_CODES_PPC_H_
+#define V8_COMPILER_BACKEND_PPC_INSTRUCTION_CODES_PPC_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// PPC-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64) \
+ V(PPC_AtomicStoreUint8) \
+ V(PPC_AtomicStoreUint16) \
+ V(PPC_AtomicStoreWord32) \
+ V(PPC_AtomicStoreWord64) \
+ V(PPC_AtomicLoadUint8) \
+ V(PPC_AtomicLoadUint16) \
+ V(PPC_AtomicLoadWord32) \
+ V(PPC_AtomicLoadWord64) \
+ V(PPC_AtomicExchangeUint8) \
+ V(PPC_AtomicExchangeUint16) \
+ V(PPC_AtomicExchangeWord32) \
+ V(PPC_AtomicExchangeWord64) \
+ V(PPC_AtomicCompareExchangeUint8) \
+ V(PPC_AtomicCompareExchangeUint16) \
+ V(PPC_AtomicCompareExchangeWord32) \
+ V(PPC_AtomicCompareExchangeWord64) \
+ V(PPC_AtomicAddUint8) \
+ V(PPC_AtomicAddUint16) \
+ V(PPC_AtomicAddUint32) \
+ V(PPC_AtomicAddUint64) \
+ V(PPC_AtomicAddInt8) \
+ V(PPC_AtomicAddInt16) \
+ V(PPC_AtomicAddInt32) \
+ V(PPC_AtomicAddInt64) \
+ V(PPC_AtomicSubUint8) \
+ V(PPC_AtomicSubUint16) \
+ V(PPC_AtomicSubUint32) \
+ V(PPC_AtomicSubUint64) \
+ V(PPC_AtomicSubInt8) \
+ V(PPC_AtomicSubInt16) \
+ V(PPC_AtomicSubInt32) \
+ V(PPC_AtomicSubInt64) \
+ V(PPC_AtomicAndUint8) \
+ V(PPC_AtomicAndUint16) \
+ V(PPC_AtomicAndUint32) \
+ V(PPC_AtomicAndUint64) \
+ V(PPC_AtomicAndInt8) \
+ V(PPC_AtomicAndInt16) \
+ V(PPC_AtomicAndInt32) \
+ V(PPC_AtomicAndInt64) \
+ V(PPC_AtomicOrUint8) \
+ V(PPC_AtomicOrUint16) \
+ V(PPC_AtomicOrUint32) \
+ V(PPC_AtomicOrUint64) \
+ V(PPC_AtomicOrInt8) \
+ V(PPC_AtomicOrInt16) \
+ V(PPC_AtomicOrInt32) \
+ V(PPC_AtomicOrInt64) \
+ V(PPC_AtomicXorUint8) \
+ V(PPC_AtomicXorUint16) \
+ V(PPC_AtomicXorUint32) \
+ V(PPC_AtomicXorUint64) \
+ V(PPC_AtomicXorInt8) \
+ V(PPC_AtomicXorInt16) \
+ V(PPC_AtomicXorInt32) \
+ V(PPC_AtomicXorInt64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_PPC_INSTRUCTION_CODES_PPC_H_
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 6e7284f30a..423dd7ac99 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -10,7 +10,6 @@ namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
-
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
@@ -135,48 +134,68 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreToStackSlot:
return kHasSideEffect;
- case kPPC_Word64AtomicLoadUint8:
- case kPPC_Word64AtomicLoadUint16:
- case kPPC_Word64AtomicLoadUint32:
- case kPPC_Word64AtomicLoadUint64:
+ case kPPC_AtomicLoadUint8:
+ case kPPC_AtomicLoadUint16:
+ case kPPC_AtomicLoadWord32:
+ case kPPC_AtomicLoadWord64:
return kIsLoadOperation;
- case kPPC_Word64AtomicStoreUint8:
- case kPPC_Word64AtomicStoreUint16:
- case kPPC_Word64AtomicStoreUint32:
- case kPPC_Word64AtomicStoreUint64:
- case kPPC_Word64AtomicExchangeUint8:
- case kPPC_Word64AtomicExchangeUint16:
- case kPPC_Word64AtomicExchangeUint32:
- case kPPC_Word64AtomicExchangeUint64:
- case kPPC_Word64AtomicCompareExchangeUint8:
- case kPPC_Word64AtomicCompareExchangeUint16:
- case kPPC_Word64AtomicCompareExchangeUint32:
- case kPPC_Word64AtomicCompareExchangeUint64:
- case kPPC_Word64AtomicAddUint8:
- case kPPC_Word64AtomicAddUint16:
- case kPPC_Word64AtomicAddUint32:
- case kPPC_Word64AtomicAddUint64:
- case kPPC_Word64AtomicSubUint8:
- case kPPC_Word64AtomicSubUint16:
- case kPPC_Word64AtomicSubUint32:
- case kPPC_Word64AtomicSubUint64:
- case kPPC_Word64AtomicAndUint8:
- case kPPC_Word64AtomicAndUint16:
- case kPPC_Word64AtomicAndUint32:
- case kPPC_Word64AtomicAndUint64:
- case kPPC_Word64AtomicOrUint8:
- case kPPC_Word64AtomicOrUint16:
- case kPPC_Word64AtomicOrUint32:
- case kPPC_Word64AtomicOrUint64:
- case kPPC_Word64AtomicXorUint8:
- case kPPC_Word64AtomicXorUint16:
- case kPPC_Word64AtomicXorUint32:
- case kPPC_Word64AtomicXorUint64:
+ case kPPC_AtomicStoreUint8:
+ case kPPC_AtomicStoreUint16:
+ case kPPC_AtomicStoreWord32:
+ case kPPC_AtomicStoreWord64:
+ case kPPC_AtomicExchangeUint8:
+ case kPPC_AtomicExchangeUint16:
+ case kPPC_AtomicExchangeWord32:
+ case kPPC_AtomicExchangeWord64:
+ case kPPC_AtomicCompareExchangeUint8:
+ case kPPC_AtomicCompareExchangeUint16:
+ case kPPC_AtomicCompareExchangeWord32:
+ case kPPC_AtomicCompareExchangeWord64:
+ case kPPC_AtomicAddUint8:
+ case kPPC_AtomicAddUint16:
+ case kPPC_AtomicAddUint32:
+ case kPPC_AtomicAddUint64:
+ case kPPC_AtomicAddInt8:
+ case kPPC_AtomicAddInt16:
+ case kPPC_AtomicAddInt32:
+ case kPPC_AtomicAddInt64:
+ case kPPC_AtomicSubUint8:
+ case kPPC_AtomicSubUint16:
+ case kPPC_AtomicSubUint32:
+ case kPPC_AtomicSubUint64:
+ case kPPC_AtomicSubInt8:
+ case kPPC_AtomicSubInt16:
+ case kPPC_AtomicSubInt32:
+ case kPPC_AtomicSubInt64:
+ case kPPC_AtomicAndUint8:
+ case kPPC_AtomicAndUint16:
+ case kPPC_AtomicAndUint32:
+ case kPPC_AtomicAndUint64:
+ case kPPC_AtomicAndInt8:
+ case kPPC_AtomicAndInt16:
+ case kPPC_AtomicAndInt32:
+ case kPPC_AtomicAndInt64:
+ case kPPC_AtomicOrUint8:
+ case kPPC_AtomicOrUint16:
+ case kPPC_AtomicOrUint32:
+ case kPPC_AtomicOrUint64:
+ case kPPC_AtomicOrInt8:
+ case kPPC_AtomicOrInt16:
+ case kPPC_AtomicOrInt32:
+ case kPPC_AtomicOrInt64:
+ case kPPC_AtomicXorUint8:
+ case kPPC_AtomicXorUint16:
+ case kPPC_AtomicXorUint32:
+ case kPPC_AtomicXorUint64:
+ case kPPC_AtomicXorInt8:
+ case kPPC_AtomicXorInt16:
+ case kPPC_AtomicXorInt32:
+ case kPPC_AtomicXorInt64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
- COMMON_ARCH_OPCODE_LIST(CASE)
+ COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
@@ -185,7 +204,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
UNREACHABLE();
}
-
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// TODO(all): Add instruction cost modeling.
return 1;
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 5d336652c9..9dcae4d465 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/ppc/frame-constants-ppc.h"
@@ -22,7 +22,6 @@ enum ImmediateMode {
kNoImmediate
};
-
// Adds PPC-specific methods for generating operands.
class PPCOperandGenerator final : public OperandGenerator {
public:
@@ -79,7 +78,6 @@ class PPCOperandGenerator final : public OperandGenerator {
}
};
-
namespace {
void VisitRR(InstructionSelector* selector, InstructionCode opcode,
@@ -105,7 +103,6 @@ void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
-
#if V8_TARGET_ARCH_PPC64
void VisitTryTruncateDouble(InstructionSelector* selector,
InstructionCode opcode, Node* node) {
@@ -124,7 +121,6 @@ void VisitTryTruncateDouble(InstructionSelector* selector,
}
#endif
-
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
@@ -155,10 +151,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
- inputs, cont);
+ inputs, cont);
}
-
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
@@ -204,25 +199,16 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
break;
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#endif
case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordU32;
break;
-#if V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
break;
-#else
- case MachineRepresentation::kWord64: // Fall through.
-#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -234,15 +220,21 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
+ bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
+ node->opcode() == IrOpcode::kWord64AtomicLoad);
+
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
+ g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+ g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
+ g.UseImmediate(is_atomic));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+ g.UseImmediate(is_atomic));
}
}
@@ -259,9 +251,19 @@ void InstructionSelector::VisitStore(Node* node) {
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
- WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
+ bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
+ node->opcode() == IrOpcode::kWord64AtomicStore);
+
+ MachineRepresentation rep;
+ WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
+
+ if (is_atomic) {
+ rep = AtomicStoreRepresentationOf(node->op());
+ } else {
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ write_barrier_kind = store_rep.write_barrier_kind();
+ rep = store_rep.representation();
+ }
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(rep));
@@ -303,6 +305,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
+ CHECK_EQ(is_atomic, false);
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
@@ -324,7 +327,7 @@ void InstructionSelector::VisitStore(Node* node) {
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
@@ -332,7 +335,7 @@ void InstructionSelector::VisitStore(Node* node) {
#if V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
mode = kInt16Imm_4ByteAligned;
@@ -345,15 +348,19 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
+
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
+ g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
+ g.UseImmediate(is_atomic));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
+ g.UseImmediate(is_atomic));
}
}
}
@@ -415,7 +422,6 @@ static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
VisitBinop<Matcher>(selector, node, opcode, imm_mode);
}
-
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
@@ -427,7 +433,6 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
return true;
}
-
#if V8_TARGET_ARCH_PPC64
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation(value);
@@ -441,7 +446,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
}
#endif
-
// TODO(mbrandy): Absorb rotate-right into rlwinm?
void InstructionSelector::VisitWord32And(Node* node) {
PPCOperandGenerator g(this);
@@ -479,7 +483,6 @@ void InstructionSelector::VisitWord32And(Node* node) {
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
-
#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): Absorb rotate-right into rldic?
void InstructionSelector::VisitWord64And(Node* node) {
@@ -537,7 +540,6 @@ void InstructionSelector::VisitWord64And(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node);
VisitLogical<Int32BinopMatcher>(
@@ -545,7 +547,6 @@ void InstructionSelector::VisitWord32Or(Node* node) {
CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
@@ -555,7 +556,6 @@ void InstructionSelector::VisitWord64Or(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Xor(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -566,7 +566,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
}
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Xor(Node* node) {
PPCOperandGenerator g(this);
@@ -579,7 +578,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Shl(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -604,7 +602,6 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Shl(Node* node) {
PPCOperandGenerator g(this);
@@ -650,7 +647,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Shr(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -717,7 +713,6 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Sar(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -868,7 +863,7 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(mleft.base()),
- g.TempImmediate(offset));
+ g.TempImmediate(offset), g.UseImmediate(0));
return;
}
}
@@ -877,13 +872,11 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
#endif
-
// TODO(mbrandy): Absorb logical-and into rlwinm?
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
}
-
#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): Absorb logical-and into rldic?
void InstructionSelector::VisitWord64Ror(Node* node) {
@@ -891,13 +884,11 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Clz(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Clz(Node* node) {
PPCOperandGenerator g(this);
@@ -905,14 +896,12 @@ void InstructionSelector::VisitWord64Clz(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Popcnt(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Popcnt(Node* node) {
PPCOperandGenerator g(this);
@@ -921,18 +910,14 @@ void InstructionSelector::VisitWord64Popcnt(Node* node) {
}
#endif
-
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
#endif
-
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
@@ -956,7 +941,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
@@ -973,7 +957,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Sub(Node* node) {
PPCOperandGenerator g(this);
@@ -1016,111 +999,92 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} // namespace
-
void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kPPC_Mul32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Mul(Node* node) {
VisitRRR(this, kPPC_Mul64, node);
}
#endif
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
VisitRRR(this, kPPC_Div32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Div(Node* node) {
VisitRRR(this, kPPC_Div64, node);
}
#endif
-
void InstructionSelector::VisitUint32Div(Node* node) {
VisitRRR(this, kPPC_DivU32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitUint64Div(Node* node) {
VisitRRR(this, kPPC_DivU64, node);
}
#endif
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitRRR(this, kPPC_Mod32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Mod(Node* node) {
VisitRRR(this, kPPC_Mod64, node);
}
#endif
-
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitRRR(this, kPPC_ModU32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitUint64Mod(Node* node) {
VisitRRR(this, kPPC_ModU64, node);
}
#endif
-
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
VisitRR(this, kPPC_Float32ToDouble, node);
}
-
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
VisitRR(this, kPPC_Int32ToFloat32, node);
}
-
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
VisitRR(this, kPPC_Uint32ToFloat32, node);
}
-
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kPPC_Int32ToDouble, node);
}
-
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kPPC_Uint32ToDouble, node);
}
-
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kPPC_DoubleToInt32, node);
}
-
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kPPC_DoubleToUint32, node);
}
@@ -1144,22 +1108,22 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
}
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt64, node);
+}
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
}
-
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_ExtendSignWord32, node);
@@ -1194,7 +1158,6 @@ void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
}
#endif
-
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kPPC_DoubleToFloat32, node);
}
@@ -1207,29 +1170,24 @@ void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
VisitRR(this, kPPC_DoubleToInt32, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
VisitRR(this, kPPC_DoubleToInt32, node);
}
-
void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
VisitRR(this, kPPC_DoubleToUint32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_Int64ToInt32, node);
}
-
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
VisitRR(this, kPPC_Int64ToFloat32, node);
}
-
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
VisitRR(this, kPPC_Int64ToDouble, node);
}
@@ -1242,48 +1200,40 @@ void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
VisitRR(this, kPPC_Uint64ToFloat32, node);
}
-
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
VisitRR(this, kPPC_Uint64ToDouble, node);
}
#endif
-
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kPPC_BitcastDoubleToInt64, node);
}
#endif
-
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kPPC_BitcastInt64ToDouble, node);
}
#endif
-
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64Add(Node* node) {
// TODO(mbrandy): detect multiply-add
VisitRRR(this, kPPC_AddDouble, node);
}
-
void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
}
@@ -1297,28 +1247,24 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64Mul(Node* node) {
// TODO(mbrandy): detect negate
VisitRRR(this, kPPC_MulDouble, node);
}
-
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kPPC_DivDouble, node);
}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
- g.UseFixed(node->InputAt(0), d1),
- g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+ g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1329,7 +1275,6 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
VisitRRR(this, kPPC_MaxDouble, node);
}
-
void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
VisitRR(this, kPPC_Float64SilenceNaN, node);
}
@@ -1342,12 +1287,10 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
VisitRRR(this, kPPC_MinDouble, node);
}
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kPPC_AbsDouble, node);
}
@@ -1360,62 +1303,53 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
InstructionCode opcode) {
PPCOperandGenerator g(this);
Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
- ->MarkAsCall();
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
- InstructionCode opcode) {
+ InstructionCode opcode) {
PPCOperandGenerator g(this);
- Emit(opcode, g.DefineAsFixed(node, d1),
- g.UseFixed(node->InputAt(0), d1),
- g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+ Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))
+ ->MarkAsCall();
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble, node);
}
-
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kPPC_FloorDouble, node);
}
-
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
VisitRR(this, kPPC_CeilDouble, node);
}
-
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kPPC_TruncateDouble, node);
}
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
VisitRR(this, kPPC_RoundDouble, node);
}
-
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
UNREACHABLE();
}
-
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
@@ -1439,7 +1373,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
&cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1451,7 +1384,6 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
kInt16Imm_Negate, &cont);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1463,7 +1395,6 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
}
-
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1475,7 +1406,6 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
}
#endif
-
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
@@ -1489,7 +1419,6 @@ static bool CompareLogical(FlagsContinuation* cont) {
UNREACHABLE();
}
-
namespace {
// Shared routine for multiple compare operations.
@@ -1499,7 +1428,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
@@ -1522,14 +1450,12 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
}
}
-
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
}
-
#if V8_TARGET_ARCH_PPC64
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1538,7 +1464,6 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
}
#endif
-
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1549,7 +1474,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
g.UseRegister(right), cont);
}
-
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1739,65 +1663,55 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
@@ -1814,38 +1728,32 @@ void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
EmitInt32MulWithOverflow(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
@@ -1881,7 +1789,6 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
@@ -1892,14 +1799,12 @@ void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -1915,7 +1820,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -1931,122 +1835,16 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- return;
- }
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
-}
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kPPC_Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kPPC_Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kPPC_Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kPPC_Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
-}
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- 0, nullptr, input_count, inputs);
+ VisitStore(node);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kPPC_Word64AtomicStoreUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kPPC_Word64AtomicStoreUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kPPC_Word64AtomicStoreUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kPPC_Word64AtomicStoreUint64;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
- inputs);
+ VisitStore(node);
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -2074,13 +1872,13 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kPPC_AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2092,13 +1890,13 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kPPC_Word64AtomicExchangeUint8;
+ opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kPPC_Word64AtomicExchangeUint16;
+ opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kPPC_Word64AtomicExchangeUint32;
+ opcode = kPPC_AtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
- opcode = kPPC_Word64AtomicExchangeUint64;
+ opcode = kPPC_AtomicExchangeWord64;
} else {
UNREACHABLE();
return;
@@ -2137,13 +1935,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kPPC_AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2155,13 +1953,13 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
- opcode = kPPC_Word64AtomicCompareExchangeUint8;
+ opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kPPC_Word64AtomicCompareExchangeUint16;
+ opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kPPC_Word64AtomicCompareExchangeUint32;
+ opcode = kPPC_AtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
- opcode = kPPC_Word64AtomicCompareExchangeUint64;
+ opcode = kPPC_AtomicCompareExchangeWord64;
} else {
UNREACHABLE();
return;
@@ -2170,11 +1968,38 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode int8_op, ArchOpcode uint8_op,
+ ArchOpcode int16_op, ArchOpcode uint16_op,
+ ArchOpcode int32_op, ArchOpcode uint32_op,
+ ArchOpcode int64_op, ArchOpcode uint64_op) {
PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+ MachineType type = AtomicOpType(node->op());
+
+ ArchOpcode opcode = kArchNop;
+
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32()) {
+ opcode = int32_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Int64()) {
+ opcode = int64_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
AddressingMode addressing_mode = kMode_MRR;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
@@ -2195,32 +2020,31 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ // Unused
+ UNREACHABLE();
+}
- if (type == MachineType::Int8()) {
- opcode = int8_op;
- } else if (type == MachineType::Uint8()) {
- opcode = uint8_op;
- } else if (type == MachineType::Int16()) {
- opcode = int16_op;
- } else if (type == MachineType::Uint16()) {
- opcode = uint16_op;
- } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = word32_op;
- } else {
- UNREACHABLE();
- return;
- }
- VisitAtomicBinaryOperation(this, node, opcode);
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ // Unused
+ UNREACHABLE();
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
+ kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
+ kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
+ kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
+ } \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
+ kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
+ kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
+ kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2229,40 +2053,6 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-void InstructionSelector::VisitWord64AtomicBinaryOperation(
- Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
- ArchOpcode uint64_op) {
- MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
-
- if (type == MachineType::Uint8()) {
- opcode = uint8_op;
- } else if (type == MachineType::Uint16()) {
- opcode = uint16_op;
- } else if (type == MachineType::Uint32()) {
- opcode = uint32_op;
- } else if (type == MachineType::Uint64()) {
- opcode = uint64_op;
- } else {
- UNREACHABLE();
- return;
- }
- VisitAtomicBinaryOperation(this, node, opcode);
-}
-
-#define VISIT_ATOMIC64_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kPPC_Word64Atomic##op##Uint8, kPPC_Word64Atomic##op##Uint16, \
- kPPC_Word64Atomic##op##Uint32, kPPC_Word64Atomic##op##Uint64); \
- }
-VISIT_ATOMIC64_BINOP(Add)
-VISIT_ATOMIC64_BINOP(Sub)
-VISIT_ATOMIC64_BINOP(And)
-VISIT_ATOMIC64_BINOP(Or)
-VISIT_ATOMIC64_BINOP(Xor)
-#undef VISIT_ATOMIC64_BINOP
-
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -2534,7 +2324,6 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
@@ -2547,6 +2336,16 @@ void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index f294471fd3..a66c35bd99 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/register-allocator-verifier.h"
+#include "src/compiler/backend/register-allocator-verifier.h"
#include "src/bit-vector.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/ostreams.h"
namespace v8 {
@@ -317,11 +317,8 @@ void BlockAssessments::Print() const {
const InstructionOperand op = pair.first;
const Assessment* assessment = pair.second;
// Use operator<< so we can write the assessment on the same
- // line. Since we need a register configuration, just pick
- // Turbofan for now.
- PrintableInstructionOperand wrapper = {RegisterConfiguration::Default(),
- op};
- os << wrapper << " : ";
+ // line.
+ os << op << " : ";
if (assessment->kind() == AssessmentKind::Final) {
os << "v" << FinalAssessment::cast(assessment)->virtual_register();
} else {
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h
index 140b3a3ef5..efe5e863b2 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
-#define V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
+#ifndef V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_VERIFIER_H_
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -271,4 +271,4 @@ class RegisterAllocatorVerifier final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
+#endif // V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_VERIFIER_H_
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 0649748a35..883e0001a7 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/register-allocator.h"
+#include "src/compiler/backend/register-allocator.h"
+
+#include <iomanip>
#include "src/assembler-inl.h"
#include "src/base/adapters.h"
@@ -18,7 +20,6 @@ namespace compiler {
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
-
namespace {
static constexpr int kFloat32Bit =
@@ -26,32 +27,23 @@ static constexpr int kFloat32Bit =
static constexpr int kSimd128Bit =
RepresentationBit(MachineRepresentation::kSimd128);
-void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
- auto it = std::find(v->begin(), v->end(), range);
- DCHECK(it != v->end());
- v->erase(it);
-}
-
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_double_registers()
: cfg->num_general_registers();
}
-
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
: cfg->num_allocatable_general_registers();
}
-
const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
: cfg->allocatable_general_codes();
}
-
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
RpoNumber index = block->loop_header();
@@ -59,13 +51,11 @@ const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
return sequence->InstructionBlockAt(index);
}
-
const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
LifetimePosition pos) {
return code->GetInstructionBlock(pos.ToInstructionIndex());
}
-
Instruction* GetLastInstruction(InstructionSequence* code,
const InstructionBlock* block) {
return code->InstructionAt(block->last_instruction_index());
@@ -78,11 +68,13 @@ int GetByteWidth(MachineRepresentation rep) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
+ case MachineRepresentation::kFloat32:
+ return kSystemPointerSize;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- case MachineRepresentation::kFloat32:
- return kPointerSize;
+ // TODO(ishell): kTaggedSize once half size locations are supported.
+ return kSystemPointerSize;
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return kDoubleSize;
@@ -116,13 +108,11 @@ class LiveRangeBound {
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
-
struct FindResult {
LiveRange* cur_cover_;
LiveRange* pred_cover_;
};
-
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
@@ -204,7 +194,6 @@ class LiveRangeBoundArray {
DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
-
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
@@ -237,10 +226,8 @@ class LiveRangeFinder {
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
-
typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
-
struct DelayedInsertionMapCompare {
bool operator()(const DelayedInsertionMapKey& a,
const DelayedInsertionMapKey& b) const {
@@ -251,10 +238,9 @@ struct DelayedInsertionMapCompare {
}
};
-
typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
- DelayedInsertionMapCompare> DelayedInsertionMap;
-
+ DelayedInsertionMapCompare>
+ DelayedInsertionMap;
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type)
@@ -282,13 +268,11 @@ UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
DCHECK(pos_.IsValid());
}
-
bool UsePosition::HasHint() const {
int hint_register;
return HintRegister(&hint_register);
}
-
bool UsePosition::HintRegister(int* register_code) const {
if (hint_ == nullptr) return false;
switch (HintTypeField::decode(flags_)) {
@@ -320,7 +304,6 @@ bool UsePosition::HintRegister(int* register_code) const {
UNREACHABLE();
}
-
UsePositionHintType UsePosition::HintTypeForOperand(
const InstructionOperand& op) {
switch (op.kind()) {
@@ -356,7 +339,6 @@ void UsePosition::ResolveHint(UsePosition* use_pos) {
flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
}
-
void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
@@ -366,7 +348,6 @@ void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
AssignedRegisterField::encode(kUnassignedRegister);
}
-
UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
DCHECK(Contains(pos) && pos != start());
UseInterval* after = new (zone) UseInterval(pos, end_);
@@ -411,7 +392,6 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
RepresentationField::encode(rep);
}
-
void LiveRange::VerifyPositions() const {
// Walk the positions, verifying that each is in an interval.
UseInterval* interval = first_interval_;
@@ -426,7 +406,6 @@ void LiveRange::VerifyPositions() const {
}
}
-
void LiveRange::VerifyIntervals() const {
DCHECK(first_interval()->start() == Start());
LifetimePosition last_end = first_interval()->end();
@@ -438,19 +417,16 @@ void LiveRange::VerifyIntervals() const {
DCHECK(last_end == End());
}
-
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, reg);
}
-
void LiveRange::UnsetAssignedRegister() {
DCHECK(HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
-
void LiveRange::Spill() {
DCHECK(!spilled());
DCHECK(!TopLevel()->HasNoSpillType());
@@ -458,12 +434,10 @@ void LiveRange::Spill() {
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
-
RegisterKind LiveRange::kind() const {
return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
}
-
UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
if (pos->HintRegister(register_index)) return pos;
@@ -471,7 +445,6 @@ UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
return nullptr;
}
-
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
UsePosition* use_pos = last_processed_use_;
if (use_pos == nullptr || use_pos->pos() > start) {
@@ -484,7 +457,6 @@ UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
return use_pos;
}
-
UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
@@ -512,7 +484,6 @@ UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
return prev;
}
-
UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
@@ -521,7 +492,6 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
return pos;
}
-
UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
pos = pos->next()) {
@@ -531,7 +501,6 @@ UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
return nullptr;
}
-
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
@@ -540,10 +509,8 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
return use_pos->pos() > pos.NextStart().End();
}
-
bool LiveRange::IsTopLevel() const { return top_level_ == this; }
-
InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
DCHECK(!spilled());
@@ -560,7 +527,6 @@ InstructionOperand LiveRange::GetAssignedOperand() const {
return TopLevel()->GetSpillRangeOperand();
}
-
UseInterval* LiveRange::FirstSearchIntervalForPosition(
LifetimePosition position) const {
if (current_interval_ == nullptr) return first_interval_;
@@ -571,7 +537,6 @@ UseInterval* LiveRange::FirstSearchIntervalForPosition(
return current_interval_;
}
-
void LiveRange::AdvanceLastProcessedMarker(
UseInterval* to_start_of, LifetimePosition but_not_past) const {
if (to_start_of == nullptr) return;
@@ -584,10 +549,10 @@ void LiveRange::AdvanceLastProcessedMarker(
}
}
-
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
+ child->set_bundle(bundle_);
// If we split, we do so because we're about to switch registers or move
// to/from a slot, so there's no value in connecting hints.
DetachAt(position, child, zone, DoNotConnectHints);
@@ -690,7 +655,6 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
return use_before;
}
-
void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
LiveRange* child = this;
for (; child != nullptr; child = child->next()) {
@@ -698,7 +662,6 @@ void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
}
}
-
void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op) {
for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
@@ -720,7 +683,6 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
}
}
-
// This implements an ordering on live ranges so that they are ordered by their
// start positions. This is needed for the correctness of the register
// allocation algorithm. If two live ranges start at the same offset then there
@@ -739,7 +701,6 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
return start < other_start;
}
-
void LiveRange::SetUseHints(int register_index) {
for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
if (!pos->HasOperand()) continue;
@@ -755,13 +716,11 @@ void LiveRange::SetUseHints(int register_index) {
}
}
-
bool LiveRange::CanCover(LifetimePosition position) const {
if (IsEmpty()) return false;
return Start() <= position && position < End();
}
-
bool LiveRange::Covers(LifetimePosition position) const {
if (!CanCover(position)) return false;
UseInterval* start_search = FirstSearchIntervalForPosition(position);
@@ -776,6 +735,21 @@ bool LiveRange::Covers(LifetimePosition position) const {
return false;
}
+LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ while (start_search->end() < position) {
+ start_search = start_search->next();
+ }
+ return start_search->end();
+}
+
+LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const {
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ while (start_search->start() < position) {
+ start_search = start_search->next();
+ }
+ return start_search->start();
+}
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
UseInterval* b = other->first_interval();
@@ -812,11 +786,20 @@ void LiveRange::Print(const RegisterConfiguration* config,
}
}
-
void LiveRange::Print(bool with_children) const {
Print(RegisterConfiguration::Default(), with_children);
}
+bool LiveRange::RegisterFromBundle(int* hint) const {
+ if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
+ *hint = bundle_->reg();
+ return true;
+}
+
+void LiveRange::UpdateBundleRegister(int reg) const {
+ if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
+ bundle_->set_reg(reg);
+}
struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
@@ -827,7 +810,6 @@ struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionList* const next;
};
-
TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
: LiveRange(0, rep, this),
vreg_(vreg),
@@ -843,14 +825,12 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
-
#if DEBUG
int TopLevelLiveRange::debug_virt_reg() const {
return IsSplinter() ? splintered_from()->vreg() : vreg();
}
#endif
-
void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
@@ -890,7 +870,6 @@ void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
}
}
-
void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
DCHECK(HasNoSpillType());
DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
@@ -898,21 +877,18 @@ void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
spill_operand_ = operand;
}
-
void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
DCHECK(!HasSpillOperand());
DCHECK(spill_range);
spill_range_ = spill_range;
}
-
AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
SpillRange* spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
-
void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
Zone* zone) {
DCHECK(start != Start() || end != End());
@@ -989,7 +965,6 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
#endif
}
-
void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
splintered_from_ = splinter_parent;
if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
@@ -997,7 +972,6 @@ void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
}
}
-
void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
DCHECK(merged->TopLevel() == this);
@@ -1010,7 +984,6 @@ void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
}
}
-
void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
DCHECK(Start() < other->Start());
DCHECK(other->splintered_from() == this);
@@ -1068,7 +1041,6 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
#endif
}
-
void TopLevelLiveRange::VerifyChildrenInOrder() const {
LifetimePosition last_end = End();
for (const LiveRange* child = this->next(); child != nullptr;
@@ -1078,7 +1050,6 @@ void TopLevelLiveRange::VerifyChildrenInOrder() const {
}
}
-
void TopLevelLiveRange::Verify() const {
VerifyChildrenInOrder();
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
@@ -1086,7 +1057,6 @@ void TopLevelLiveRange::Verify() const {
}
}
-
void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
DCHECK_NOT_NULL(first_interval_);
@@ -1095,7 +1065,6 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
first_interval_->set_start(start);
}
-
void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
LifetimePosition end, Zone* zone) {
TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
@@ -1116,7 +1085,6 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
}
}
-
void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
LifetimePosition end, Zone* zone) {
TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
@@ -1143,7 +1111,6 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
}
}
-
void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
LifetimePosition pos = use_pos->pos();
TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
@@ -1169,7 +1136,6 @@ void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
}
}
-
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
@@ -1188,7 +1154,6 @@ static bool AreUseIntervalsIntersecting(UseInterval* interval1,
return false;
}
-
std::ostream& operator<<(std::ostream& os,
const PrintableLiveRange& printable_range) {
const LiveRange* range = printable_range.range_;
@@ -1200,12 +1165,9 @@ std::ostream& operator<<(std::ostream& os,
os << "{" << std::endl;
UseInterval* interval = range->first_interval();
UsePosition* use_pos = range->first_pos();
- PrintableInstructionOperand pio;
- pio.register_configuration_ = printable_range.register_configuration_;
while (use_pos != nullptr) {
if (use_pos->HasOperand()) {
- pio.op_ = *use_pos->operand();
- os << pio << use_pos->pos() << " ";
+ os << *use_pos->operand() << use_pos->pos() << " ";
}
use_pos = use_pos->next();
}
@@ -1220,6 +1182,90 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
+namespace {
+void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
+ os << " ";
+ for (auto block : blocks) {
+ LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
+ block->last_instruction_index())
+ .NextFullStart();
+ int length = end_pos.value() - start_pos.value();
+ constexpr int kMaxPrefixLength = 32;
+ char buffer[kMaxPrefixLength];
+ int rpo_number = block->rpo_number().ToInt();
+ const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
+ int max_prefix_length = std::min(length, kMaxPrefixLength);
+ int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
+ deferred_marker);
+ os << buffer;
+ int remaining = length - std::min(prefix, max_prefix_length) - 1;
+ for (int i = 0; i < remaining; ++i) os << '-';
+ os << ']';
+ }
+ os << '\n';
+}
+} // namespace
+
+void LinearScanAllocator::PrintRangeRow(std::ostream& os,
+ const TopLevelLiveRange* toplevel) {
+ int position = 0;
+ os << std::setw(3) << toplevel->vreg()
+ << (toplevel->IsSplinter() ? "s:" : ": ");
+
+ for (const LiveRange* range = toplevel; range != nullptr;
+ range = range->next()) {
+ for (UseInterval* interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ LifetimePosition start = interval->start();
+ LifetimePosition end = interval->end();
+ CHECK_GE(start.value(), position);
+ for (; start.value() > position; position++) {
+ os << ' ';
+ }
+ int length = end.value() - start.value();
+ constexpr int kMaxPrefixLength = 32;
+ char buffer[kMaxPrefixLength];
+ int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
+ int prefix;
+ if (range->spilled()) {
+ prefix = snprintf(buffer, max_prefix_length, "|ss");
+ } else {
+ const char* reg_name;
+ if (range->assigned_register() == kUnassignedRegister) {
+ reg_name = "???";
+ } else {
+ reg_name = RegisterName(range->assigned_register());
+ }
+ prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name);
+ }
+ os << buffer;
+ position += std::min(prefix, max_prefix_length - 1);
+ CHECK_GE(end.value(), position);
+ const char line_style = range->spilled() ? '-' : '=';
+ for (; end.value() > position; position++) {
+ os << line_style;
+ }
+ }
+ }
+ os << '\n';
+}
+
+void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
+ PrintBlockRow(os, code()->instruction_blocks());
+ for (auto toplevel : data()->fixed_live_ranges()) {
+ if (toplevel == nullptr) continue;
+ PrintRangeRow(os, toplevel);
+ }
+ int rowcount = 0;
+ for (auto toplevel : data()->live_ranges()) {
+ if (!CanProcessRange(toplevel)) continue;
+ if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
+ PrintRangeRow(os, toplevel);
+ }
+}
+
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
@@ -1259,7 +1305,6 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
}
-
bool SpillRange::TryMerge(SpillRange* other) {
if (HasSlot() || other->HasSlot()) return false;
if (byte_width() != other->byte_width() || IsIntersectingWith(other))
@@ -1286,7 +1331,6 @@ bool SpillRange::TryMerge(SpillRange* other) {
return true;
}
-
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
UseInterval* current = use_interval_;
@@ -1309,7 +1353,6 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
// Other list is empty => we are done
}
-
void SpillRange::Print() const {
StdoutStream os;
os << "{" << std::endl;
@@ -1324,7 +1367,6 @@ void SpillRange::Print() const {
os << "}" << std::endl;
}
-
RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
const InstructionBlock* block,
Zone* zone)
@@ -1335,13 +1377,11 @@ RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
incoming_operands_.reserve(phi->operands().size());
}
-
void RegisterAllocationData::PhiMapValue::AddOperand(
InstructionOperand* operand) {
incoming_operands_.push_back(operand);
}
-
void RegisterAllocationData::PhiMapValue::CommitAssignment(
const InstructionOperand& assigned) {
for (InstructionOperand* operand : incoming_operands_) {
@@ -1385,11 +1425,15 @@ RegisterAllocationData::RegisterAllocationData(
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
BitVector(this->config()->num_double_registers(), code_zone());
+ fixed_register_use_ = new (code_zone())
+ BitVector(this->config()->num_general_registers(), code_zone());
+ fixed_fp_register_use_ = new (code_zone())
+ BitVector(this->config()->num_double_registers(), code_zone());
+
this->frame()->SetAllocatedRegisters(assigned_registers_);
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
-
MoveOperands* RegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
@@ -1398,14 +1442,12 @@ MoveOperands* RegisterAllocationData::AddGapMove(
return moves->AddMove(from, to);
}
-
MachineRepresentation RegisterAllocationData::RepresentationFor(
int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
-
TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
@@ -1418,13 +1460,11 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
return result;
}
-
TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
int index, MachineRepresentation rep) {
return new (allocation_zone()) TopLevelLiveRange(index, rep);
}
-
int RegisterAllocationData::GetNextLiveRangeId() {
int vreg = virtual_register_count_++;
if (vreg >= static_cast<int>(live_ranges().size())) {
@@ -1433,7 +1473,6 @@ int RegisterAllocationData::GetNextLiveRangeId() {
return vreg;
}
-
TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
MachineRepresentation rep) {
int vreg = GetNextLiveRangeId();
@@ -1441,7 +1480,6 @@ TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
return ret;
}
-
RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
const InstructionBlock* block, PhiInstruction* phi) {
RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
@@ -1453,7 +1491,6 @@ RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
return map_value;
}
-
RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
int virtual_register) {
auto it = phi_map_.find(virtual_register);
@@ -1461,13 +1498,11 @@ RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
return it->second;
}
-
RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
TopLevelLiveRange* top_range) {
return GetPhiMapValueFor(top_range->vreg());
}
-
bool RegisterAllocationData::ExistsUseWithoutDefinition() {
bool found = false;
BitVector::Iterator iterator(live_in_sets()[0]);
@@ -1488,7 +1523,6 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
return found;
}
-
// If a range is defined in a deferred block, we can expect all the range
// to only cover positions in deferred blocks. Otherwise, a block on the
// hot path would be dominated by a deferred block, meaning it is unreachable
@@ -1541,7 +1575,6 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
return spill_range;
}
-
SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
@@ -1551,6 +1584,63 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
return spill_range;
}
+void RegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
+ int index) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kSimd128:
+ if (kSimpleFPAliasing) {
+ fixed_fp_register_use_->Add(index);
+ } else {
+ int alias_base_index = -1;
+ int aliases = config()->GetAliases(
+ rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ fixed_fp_register_use_->Add(aliased_reg);
+ }
+ }
+ break;
+ case MachineRepresentation::kFloat64:
+ fixed_fp_register_use_->Add(index);
+ break;
+ default:
+ DCHECK(!IsFloatingPoint(rep));
+ fixed_register_use_->Add(index);
+ break;
+ }
+}
+
+bool RegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kSimd128:
+ if (kSimpleFPAliasing) {
+ return fixed_fp_register_use_->Contains(index);
+ } else {
+ int alias_base_index = -1;
+ int aliases = config()->GetAliases(
+ rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ bool result = false;
+ while (aliases-- && !result) {
+ int aliased_reg = alias_base_index + aliases;
+ result |= fixed_fp_register_use_->Contains(aliased_reg);
+ }
+ return result;
+ }
+ break;
+ case MachineRepresentation::kFloat64:
+ return fixed_fp_register_use_->Contains(index);
+ break;
+ default:
+ DCHECK(!IsFloatingPoint(rep));
+ return fixed_register_use_->Contains(index);
+ break;
+ }
+}
+
void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
@@ -1585,13 +1675,11 @@ bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
pos.ToInstructionIndex();
}
-
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
-
InstructionOperand* ConstraintBuilder::AllocateFixed(
- UnallocatedOperand* operand, int pos, bool is_tagged) {
+ UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) {
TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
InstructionOperand allocated;
@@ -1617,6 +1705,9 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
} else {
UNREACHABLE();
}
+ if (is_input && allocated.IsAnyRegister()) {
+ data()->MarkFixedUse(rep, operand->fixed_register_index());
+ }
InstructionOperand::ReplaceWith(operand, &allocated);
if (is_tagged) {
TRACE("Fixed reg is tagged at %d\n", pos);
@@ -1628,14 +1719,12 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
return operand;
}
-
void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) {
MeetRegisterConstraints(block);
}
}
-
void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
@@ -1648,7 +1737,6 @@ void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
MeetRegisterConstraintsForLastInstructionInBlock(block);
}
-
void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
@@ -1661,7 +1749,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
- AllocateFixed(output, -1, false);
+ AllocateFixed(output, -1, false, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
DCHECK(LocationOperand::cast(output)->index() <
@@ -1695,13 +1783,12 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
}
}
-
void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
Instruction* first = code()->InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
- if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
+ if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false);
}
// Handle constant/fixed output operands.
for (size_t i = 0; i < first->OutputCount(); i++) {
@@ -1727,7 +1814,7 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
data()->preassigned_slot_ranges().push_back(
std::make_pair(range, first_output->GetSecondaryStorage()));
}
- AllocateFixed(first_output, instr_index, is_tagged);
+ AllocateFixed(first_output, instr_index, is_tagged, false);
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
@@ -1750,7 +1837,6 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
}
}
-
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
Instruction* second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
@@ -1765,7 +1851,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
input_vreg);
bool is_tagged = code()->IsReference(input_vreg);
- AllocateFixed(cur_input, instr_index, is_tagged);
+ AllocateFixed(cur_input, instr_index, is_tagged, true);
data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
}
}
@@ -1786,6 +1872,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
UnallocatedOperand(*cur_input, second_output->virtual_register());
MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
input_copy, *cur_input);
+ DCHECK_NOT_NULL(gap_move);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
RegisterAllocationData::DelayedReference delayed_reference = {
@@ -1804,7 +1891,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
}
}
-
void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
@@ -1812,7 +1898,6 @@ void ConstraintBuilder::ResolvePhis() {
}
}
-
void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int phi_vreg = phi->virtual_register();
@@ -1842,12 +1927,10 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
}
}
-
LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
Zone* local_zone)
: data_(data), phi_hints_(local_zone) {}
-
BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
RegisterAllocationData* data) {
size_t block_index = block->rpo_number().ToSize();
@@ -1881,7 +1964,6 @@ BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
return live_out;
}
-
void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
// Add an interval that includes the entire block to the live range for
@@ -1984,7 +2066,6 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
}
}
-
UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
InstructionOperand* operand,
void* hint,
@@ -1992,7 +2073,6 @@ UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
}
-
UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type) {
@@ -2014,7 +2094,6 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
return use_pos;
}
-
UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
LifetimePosition position,
InstructionOperand* operand, void* hint,
@@ -2031,7 +2110,6 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
return use_pos;
}
-
void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
int block_start = block->first_instruction_index();
@@ -2358,7 +2436,6 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
}
}
-
void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
BitVector* live) {
DCHECK(block->IsLoopHeader());
@@ -2383,7 +2460,6 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
}
}
-
void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
@@ -2447,7 +2523,6 @@ void LiveRangeBuilder::BuildLiveRanges() {
#endif
}
-
void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
UsePosition* use_pos) {
DCHECK(!use_pos->IsResolved());
@@ -2456,7 +2531,6 @@ void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
USE(res);
}
-
void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
UsePosition* use_pos) {
auto it = phi_hints_.find(operand);
@@ -2465,7 +2539,6 @@ void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
it->second->ResolveHint(use_pos);
}
-
void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
@@ -2544,6 +2617,99 @@ bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
return block->rpo_number() < next_block->rpo_number();
}
+void BundleBuilder::BuildBundles() {
+ TRACE("Build bundles\n");
+ // Process the blocks in reverse order.
+ for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
+ --block_id) {
+ InstructionBlock* block =
+ code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ TRACE("Block B%d\n", block_id);
+ for (auto phi : block->phis()) {
+ LiveRange* out_range =
+ data()->GetOrCreateLiveRangeFor(phi->virtual_register());
+ LiveRangeBundle* out = out_range->get_bundle();
+ if (out == nullptr) {
+ out = new (data()->allocation_zone())
+ LiveRangeBundle(data()->allocation_zone(), next_bundle_id_++);
+ out->TryAddRange(out_range);
+ }
+ TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
+ out_range->TopLevel()->vreg(), out_range->relative_id());
+ for (auto input : phi->operands()) {
+ LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
+ TRACE("Input value v%d with range %d:%d\n", input,
+ input_range->TopLevel()->vreg(), input_range->relative_id());
+ LiveRangeBundle* input_bundle = input_range->get_bundle();
+ if (input_bundle != nullptr) {
+ TRACE("Merge\n");
+ if (out->TryMerge(input_bundle))
+ TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
+ out->id());
+ } else {
+ TRACE("Add\n");
+ if (out->TryAddRange(input_range))
+ TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
+ out->id());
+ }
+ }
+ }
+ TRACE("Done block B%d\n", block_id);
+ }
+}
+
+bool LiveRangeBundle::TryAddRange(LiveRange* range) {
+ DCHECK_NULL(range->get_bundle());
+ // We may only add a new live range if its use intervals do not
+ // overlap with existing intervals in the bundle.
+ if (UsesOverlap(range->first_interval())) return false;
+ ranges_.insert(range);
+ range->set_bundle(this);
+ InsertUses(range->first_interval());
+ return true;
+}
+bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
+ if (other == this) return true;
+
+ auto iter1 = uses_.begin();
+ auto iter2 = other->uses_.begin();
+
+ while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
+ if (iter1->start > iter2->end) {
+ ++iter2;
+ } else if (iter2->start > iter1->end) {
+ ++iter1;
+ } else {
+ TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start,
+ iter2->end);
+ return false;
+ }
+ }
+ // Uses are disjoint, merging is possible.
+ for (auto it = other->ranges_.begin(); it != other->ranges_.end(); ++it) {
+ (*it)->set_bundle(this);
+ InsertUses((*it)->first_interval());
+ }
+ ranges_.insert(other->ranges_.begin(), other->ranges_.end());
+ other->ranges_.clear();
+
+ return true;
+}
+
+void LiveRangeBundle::MergeSpillRanges() {
+ SpillRange* target = nullptr;
+ for (auto range : ranges_) {
+ if (range->TopLevel()->HasSpillRange()) {
+ SpillRange* current = range->TopLevel()->GetSpillRange();
+ if (target == nullptr) {
+ target = current;
+ } else if (target != current) {
+ target->TryMerge(current);
+ }
+ }
+ }
+}
+
RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterKind kind)
: data_(data),
@@ -2617,7 +2783,6 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
}
}
-
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
LifetimePosition pos) {
DCHECK(!range->TopLevel()->IsFixed());
@@ -2636,7 +2801,6 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
return result;
}
-
LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
@@ -2650,7 +2814,6 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
return SplitRangeAt(range, split_pos);
}
-
LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end) {
int start_instr = start.ToInstructionIndex();
@@ -2689,7 +2852,6 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
block->first_instruction_index());
}
-
LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* range, LifetimePosition pos) {
const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
@@ -2722,7 +2884,6 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
return pos;
}
-
void RegisterAllocator::Spill(LiveRange* range) {
DCHECK(!range->spilled());
TopLevelLiveRange* first = range->TopLevel();
@@ -2735,11 +2896,9 @@ void RegisterAllocator::Spill(LiveRange* range) {
}
const char* RegisterAllocator::RegisterName(int register_code) const {
- if (mode() == GENERAL_REGISTERS) {
- return data()->config()->GetGeneralRegisterName(register_code);
- } else {
- return data()->config()->GetDoubleRegisterName(register_code);
- }
+ return mode() == GENERAL_REGISTERS
+ ? i::RegisterName(Register::from_code(register_code))
+ : i::RegisterName(DoubleRegister::from_code(register_code));
}
LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
@@ -2747,16 +2906,13 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
: RegisterAllocator(data, kind),
unhandled_live_ranges_(local_zone),
active_live_ranges_(local_zone),
- inactive_live_ranges_(local_zone) {
+ inactive_live_ranges_(local_zone),
+ next_active_ranges_change_(LifetimePosition::Invalid()),
+ next_inactive_ranges_change_(LifetimePosition::Invalid()) {
active_live_ranges().reserve(8);
inactive_live_ranges().reserve(8);
- // TryAllocateFreeReg and AllocateBlockedReg assume this
- // when allocating local arrays.
- DCHECK_GE(RegisterConfiguration::kMaxFPRegisters,
- this->data()->config()->num_general_registers());
}
-
void LinearScanAllocator::AllocateRegisters() {
DCHECK(unhandled_live_ranges().empty());
DCHECK(active_live_ranges().empty());
@@ -2808,32 +2964,16 @@ void LinearScanAllocator::AllocateRegisters() {
if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
continue;
- for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- LiveRange* cur_active = active_live_ranges()[i];
- if (cur_active->End() <= position) {
- ActiveToHandled(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- } else if (!cur_active->Covers(position)) {
- ActiveToInactive(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- }
- }
-
- for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges()[i];
- if (cur_inactive->End() <= position) {
- InactiveToHandled(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- } else if (cur_inactive->Covers(position)) {
- InactiveToActive(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- }
- }
+ ForwardStateTo(position);
DCHECK(!current->HasRegisterAssigned() && !current->spilled());
ProcessCurrentRange(current);
}
+
+ if (FLAG_trace_alloc) {
+ PrintRangeOverview(std::cout);
+ }
}
bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
@@ -2862,23 +3002,26 @@ void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
data()->MarkAllocated(range->representation(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
+ range->UpdateBundleRegister(reg);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
}
}
-
void LinearScanAllocator::AddToActive(LiveRange* range) {
- TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
- range->relative_id());
+ TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
+ range->relative_id(), RegisterName(range->assigned_register()));
active_live_ranges().push_back(range);
+ next_active_ranges_change_ =
+ std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
}
-
void LinearScanAllocator::AddToInactive(LiveRange* range) {
TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
range->relative_id());
inactive_live_ranges().push_back(range);
+ next_inactive_ranges_change_ = std::min(
+ next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
}
void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
@@ -2891,34 +3034,77 @@ void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
unhandled_live_ranges().insert(range);
}
-
-void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
- RemoveElement(&active_live_ranges(), range);
+ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
+ const ZoneVector<LiveRange*>::iterator it) {
TRACE("Moving live range %d:%d from active to handled\n",
- range->TopLevel()->vreg(), range->relative_id());
+ (*it)->TopLevel()->vreg(), (*it)->relative_id());
+ return active_live_ranges().erase(it);
}
-
-void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
- RemoveElement(&active_live_ranges(), range);
+ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
+ const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
+ LiveRange* range = *it;
inactive_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from active to inactive\n",
- range->TopLevel()->vreg(), range->relative_id());
+ (range)->TopLevel()->vreg(), range->relative_id());
+ next_inactive_ranges_change_ =
+ std::min(next_inactive_ranges_change_, range->NextStartAfter(position));
+ return active_live_ranges().erase(it);
}
-
-void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
- RemoveElement(&inactive_live_ranges(), range);
+ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled(
+ ZoneVector<LiveRange*>::iterator it) {
TRACE("Moving live range %d:%d from inactive to handled\n",
- range->TopLevel()->vreg(), range->relative_id());
+ (*it)->TopLevel()->vreg(), (*it)->relative_id());
+ return inactive_live_ranges().erase(it);
}
-
-void LinearScanAllocator::InactiveToActive(LiveRange* range) {
- RemoveElement(&inactive_live_ranges(), range);
+ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive(
+ ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
+ LiveRange* range = *it;
active_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from inactive to active\n",
range->TopLevel()->vreg(), range->relative_id());
+ next_active_ranges_change_ =
+ std::min(next_active_ranges_change_, range->NextEndAfter(position));
+ return inactive_live_ranges().erase(it);
+}
+
+void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
+ if (position >= next_active_ranges_change_) {
+ next_active_ranges_change_ = LifetimePosition::MaxPosition();
+ for (auto it = active_live_ranges().begin();
+ it != active_live_ranges().end();) {
+ LiveRange* cur_active = *it;
+ if (cur_active->End() <= position) {
+ it = ActiveToHandled(it);
+ } else if (!cur_active->Covers(position)) {
+ it = ActiveToInactive(it, position);
+ } else {
+ next_active_ranges_change_ = std::min(
+ next_active_ranges_change_, cur_active->NextEndAfter(position));
+ ++it;
+ }
+ }
+ }
+
+ if (position >= next_inactive_ranges_change_) {
+ next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
+ for (auto it = inactive_live_ranges().begin();
+ it != inactive_live_ranges().end();) {
+ LiveRange* cur_inactive = *it;
+ if (cur_inactive->End() <= position) {
+ it = InactiveToHandled(it);
+ } else if (cur_inactive->Covers(position)) {
+ it = InactiveToActive(it, position);
+ } else {
+ next_inactive_ranges_change_ =
+ std::min(next_inactive_ranges_change_,
+ cur_inactive->NextStartAfter(position));
+ ++it;
+ }
+ }
+ }
}
void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
@@ -2957,8 +3143,10 @@ void LinearScanAllocator::FindFreeRegistersForRange(
int cur_reg = cur_active->assigned_register();
if (kSimpleFPAliasing || !check_fp_aliasing()) {
positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
- TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
- LifetimePosition::GapFromInstructionIndex(0).value());
+ TRACE("Register %s is free until pos %d (1) due to %d\n",
+ RegisterName(cur_reg),
+ LifetimePosition::GapFromInstructionIndex(0).value(),
+ cur_active->TopLevel()->vreg());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
@@ -3037,9 +3225,8 @@ void LinearScanAllocator::FindFreeRegistersForRange(
// - a phi. The same analysis as in the case of the input constraint applies.
//
void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
- LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
- Vector<LifetimePosition> free_until_pos(
- free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
+ EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
+ free_until_pos;
FindFreeRegistersForRange(current, free_until_pos);
if (!TryAllocatePreferredReg(current, free_until_pos)) {
if (current->TopLevel()->IsSplinter()) {
@@ -3057,7 +3244,8 @@ void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
bool LinearScanAllocator::TryAllocatePreferredReg(
LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int hint_register;
- if (current->FirstHintPosition(&hint_register) != nullptr) {
+ if (current->FirstHintPosition(&hint_register) != nullptr ||
+ current->RegisterFromBundle(&hint_register)) {
TRACE(
"Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
RegisterName(hint_register), free_until_pos[hint_register].value(),
@@ -3076,8 +3264,9 @@ bool LinearScanAllocator::TryAllocatePreferredReg(
return false;
}
-bool LinearScanAllocator::TryAllocateFreeReg(
- LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
+int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
+ LiveRange* current, int hint_reg,
+ const Vector<LifetimePosition>& free_until_pos) {
int num_regs = 0; // used only for the call to GetFPRegisterSet.
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
@@ -3097,17 +3286,33 @@ bool LinearScanAllocator::TryAllocateFreeReg(
// cloberred after the call except for the argument registers, which are
// set before the call. Hence, the argument registers always get ignored,
// as their available time is shorter.
- int reg;
- if (current->FirstHintPosition(&reg) == nullptr) {
- reg = codes[0];
- }
+ int reg = hint_reg == kUnassignedRegister ? codes[0] : hint_reg;
for (int i = 0; i < num_codes; ++i) {
int code = codes[i];
- if (free_until_pos[code].ToInstructionIndex() >
- free_until_pos[reg].ToInstructionIndex()) {
+ // Prefer registers that have no fixed uses to avoid blocking later hints.
+ // We use the first register that has no fixed uses to ensure we use
+ // byte addressable registers in ia32 first.
+ int candidate_free = free_until_pos[code].ToInstructionIndex();
+ int current_free = free_until_pos[reg].ToInstructionIndex();
+ if (candidate_free > current_free ||
+ (candidate_free == current_free && reg != hint_reg &&
+ data()->HasFixedUse(current->representation(), reg) &&
+ !data()->HasFixedUse(current->representation(), code))) {
reg = code;
}
}
+ return reg;
+}
+
+bool LinearScanAllocator::TryAllocateFreeReg(
+ LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
+ // Compute register hint, if such exists.
+ int hint_reg = kUnassignedRegister;
+ current->FirstHintPosition(&hint_reg) != nullptr ||
+ current->RegisterFromBundle(&hint_reg);
+
+ int reg =
+ PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos);
LifetimePosition pos = free_until_pos[reg];
@@ -3145,22 +3350,15 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
return;
}
- int num_regs = num_registers();
- int num_codes = num_allocatable_registers();
- const int* codes = allocatable_register_codes();
MachineRepresentation rep = current->representation();
- if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kSimd128))
- GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
// use_pos keeps track of positions a register/alias is used at.
// block_pos keeps track of positions where a register/alias is blocked
// from.
- LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
- LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
- for (int i = 0; i < num_regs; i++) {
- use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
- }
+ EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
+ use_pos(LifetimePosition::MaxPosition());
+ EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
+ block_pos(LifetimePosition::MaxPosition());
for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
@@ -3241,13 +3439,11 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = codes[0];
- for (int i = 1; i < num_codes; ++i) {
- int code = codes[i];
- if (use_pos[code] > use_pos[reg]) {
- reg = code;
- }
- }
+ // Compute register hint if it exists.
+ int hint_reg = kUnassignedRegister;
+ register_use->HintRegister(&hint_reg) ||
+ current->RegisterFromBundle(&hint_reg);
+ int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
if (use_pos[reg] < register_use->pos()) {
// If there is a gap position before the next register use, we can
@@ -3281,19 +3477,23 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
SplitAndSpillIntersecting(current);
}
-
void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
LifetimePosition split_pos = current->Start();
- for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- LiveRange* range = active_live_ranges()[i];
+ for (auto it = active_live_ranges().begin();
+ it != active_live_ranges().end();) {
+ LiveRange* range = *it;
if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (range->assigned_register() != reg) continue;
+ if (range->assigned_register() != reg) {
+ ++it;
+ continue;
+ }
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
range->assigned_register())) {
+ ++it;
continue;
}
}
@@ -3315,21 +3515,29 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
next_pos->pos()));
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
- ActiveToHandled(range);
- --i;
+ it = ActiveToHandled(it);
}
- for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- LiveRange* range = inactive_live_ranges()[i];
+ for (auto it = inactive_live_ranges().begin();
+ it != inactive_live_ranges().end();) {
+ LiveRange* range = *it;
DCHECK(range->End() > current->Start());
- if (range->TopLevel()->IsFixed()) continue;
+ if (range->TopLevel()->IsFixed()) {
+ ++it;
+ continue;
+ }
if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (range->assigned_register() != reg) continue;
+ if (range->assigned_register() != reg) {
+ ++it;
+ continue;
+ }
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
- range->assigned_register()))
+ range->assigned_register())) {
+ ++it;
continue;
+ }
}
LifetimePosition next_intersection = range->FirstIntersection(current);
@@ -3341,24 +3549,25 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
}
- InactiveToHandled(range);
- --i;
+ it = InactiveToHandled(it);
+ } else {
+ ++it;
}
}
}
-
bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (!range->is_phi()) return false;
DCHECK(!range->HasSpillOperand());
+ // Check how many operands belong to the same bundle as the output.
+ LiveRangeBundle* out_bundle = range->get_bundle();
RegisterAllocationData::PhiMapValue* phi_map_value =
data()->GetPhiMapValueFor(range);
const PhiInstruction* phi = phi_map_value->phi();
const InstructionBlock* block = phi_map_value->block();
// Count the number of spilled operands.
size_t spilled_count = 0;
- LiveRange* first_op = nullptr;
for (size_t i = 0; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
@@ -3371,82 +3580,43 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
op_range = op_range->next();
}
- if (op_range != nullptr && op_range->spilled()) {
+ if (op_range != nullptr && op_range->spilled() &&
+ op_range->get_bundle() == out_bundle) {
spilled_count++;
- if (first_op == nullptr) {
- first_op = op_range->TopLevel();
- }
}
}
- // Only continue if more than half of the operands are spilled.
+ // Only continue if more than half of the operands are spilled to the same
+ // slot (because part of same bundle).
if (spilled_count * 2 <= phi->operands().size()) {
return false;
}
- // Try to merge the spilled operands and count the number of merged spilled
- // operands.
- DCHECK_NOT_NULL(first_op);
- SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
- size_t num_merged = 1;
- for (size_t i = 1; i < phi->operands().size(); i++) {
- int op = phi->operands()[i];
- TopLevelLiveRange* op_range = data()->live_ranges()[op];
- if (!op_range->HasSpillRange()) continue;
- SpillRange* op_spill = op_range->GetSpillRange();
- if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
- num_merged++;
- }
- }
-
- // Only continue if enough operands could be merged to the
- // same spill slot.
- if (num_merged * 2 <= phi->operands().size() ||
- AreUseIntervalsIntersecting(first_op_spill->interval(),
- range->first_interval())) {
- return false;
- }
-
// If the range does not need register soon, spill it to the merged
// spill range.
LifetimePosition next_pos = range->Start();
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- SpillRange* spill_range =
- range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : data()->AssignSpillRangeToLiveRange(range->TopLevel());
- bool merged = first_op_spill->TryMerge(spill_range);
- if (!merged) return false;
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
- SpillRange* spill_range =
- range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : data()->AssignSpillRangeToLiveRange(range->TopLevel());
- bool merged = first_op_spill->TryMerge(spill_range);
- if (!merged) return false;
SpillBetween(range, range->Start(), pos->pos());
return true;
}
return false;
}
-
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
LiveRange* second_part = SplitRangeAt(range, pos);
Spill(second_part);
}
-
void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end) {
SpillBetweenUntil(range, start, start, end);
}
-
void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition start,
LifetimePosition until,
@@ -3476,11 +3646,9 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
}
}
-
SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
: data_(data) {}
-
void SpillSlotLocator::LocateSpillSlots() {
const InstructionSequence* code = data()->code();
const size_t live_ranges_size = data()->live_ranges().size();
@@ -3501,11 +3669,14 @@ void SpillSlotLocator::LocateSpillSlots() {
}
}
-
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
-
void OperandAssigner::AssignSpillSlots() {
+ for (auto range : data()->live_ranges()) {
+ if (range != nullptr && range->get_bundle() != nullptr) {
+ range->get_bundle()->MergeSpillRanges();
+ }
+ }
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) {
@@ -3530,7 +3701,6 @@ void OperandAssigner::AssignSpillSlots() {
}
}
-
void OperandAssigner::CommitAssignment() {
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* top_range : data()->live_ranges()) {
@@ -3576,11 +3746,9 @@ void OperandAssigner::CommitAssignment() {
}
}
-
ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
: data_(data) {}
-
bool ReferenceMapPopulator::SafePointsAreInOrder() const {
int safe_point = 0;
for (ReferenceMap* map : *data()->code()->reference_maps()) {
@@ -3590,7 +3758,6 @@ bool ReferenceMapPopulator::SafePointsAreInOrder() const {
return true;
}
-
void ReferenceMapPopulator::PopulateReferenceMaps() {
DCHECK(SafePointsAreInOrder());
// Map all delayed references.
@@ -3718,18 +3885,15 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
}
}
-
LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
: data_(data) {}
-
bool LiveRangeConnector::CanEagerlyResolveControlFlow(
const InstructionBlock* block) const {
if (block->PredecessorCount() != 1) return false;
return block->predecessors()[0].IsNext(block->rpo_number());
}
-
void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
// Lazily linearize live ranges in memory for fast lookup.
LiveRangeFinder finder(data(), local_zone);
@@ -3810,7 +3974,6 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
}
}
-
int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
@@ -3881,10 +4044,9 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
// Reloads or spills for spilled in deferred blocks ranges must happen
// only in deferred blocks.
- DCHECK_IMPLIES(
- connect_spilled &&
- !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
- code()->GetInstructionBlock(gap_index)->IsDeferred());
+ DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
+ cur_operand.IsAnyRegister()),
+ code()->GetInstructionBlock(gap_index)->IsDeferred());
ParallelMove* move =
code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
@@ -3928,7 +4090,6 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
}
-
void LiveRangeConnector::CommitSpillsInDeferredBlocks(
TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
DCHECK(range->IsSpilledOnlyInDeferredBlocks());
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index b5286e8e95..6eae9f7682 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_REGISTER_ALLOCATOR_H_
-#define V8_COMPILER_REGISTER_ALLOCATOR_H_
+#ifndef V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
+#define V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/globals.h"
#include "src/ostreams.h"
#include "src/register-configuration.h"
@@ -168,10 +168,8 @@ class LifetimePosition final {
int value_;
};
-
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
-
// Representation of the non-empty interval [start,end[.
class UseInterval final : public ZoneObject {
public:
@@ -244,12 +242,7 @@ enum class UsePositionHintType : uint8_t {
kUnresolved
};
-
-static const int32_t kUnassignedRegister =
- RegisterConfiguration::kMaxGeneralRegisters;
-
-static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxFPRegisters,
- "kUnassignedRegister too small");
+static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters;
// Representation of a use position.
class V8_EXPORT_PRIVATE UsePosition final
@@ -304,10 +297,10 @@ class V8_EXPORT_PRIVATE UsePosition final
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
-
class SpillRange;
class RegisterAllocationData;
class TopLevelLiveRange;
+class LiveRangeBundle;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
@@ -416,6 +409,8 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
bool ShouldBeAllocatedBefore(const LiveRange* other) const;
bool CanCover(LifetimePosition position) const;
bool Covers(LifetimePosition position) const;
+ LifetimePosition NextStartAfter(LifetimePosition position) const;
+ LifetimePosition NextEndAfter(LifetimePosition position) const;
LifetimePosition FirstIntersection(LiveRange* other) const;
void VerifyChildStructure() const {
@@ -431,6 +426,11 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
+ void set_bundle(LiveRangeBundle* bundle) { bundle_ = bundle; }
+ LiveRangeBundle* get_bundle() const { return bundle_; }
+ bool RegisterFromBundle(int* hint) const;
+ void UpdateBundleRegister(int reg) const;
+
private:
friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep,
@@ -467,10 +467,78 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
+ LiveRangeBundle* bundle_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
+struct LiveRangeOrdering {
+ bool operator()(const LiveRange* left, const LiveRange* right) const {
+ return left->Start() < right->Start();
+ }
+};
+class LiveRangeBundle : public ZoneObject {
+ public:
+ void MergeSpillRanges();
+
+ int id() { return id_; }
+
+ int reg() { return reg_; }
+
+ void set_reg(int reg) {
+ DCHECK_EQ(reg_, kUnassignedRegister);
+ reg_ = reg;
+ }
+
+ private:
+ friend class BundleBuilder;
+
+ class Range {
+ public:
+ Range(int s, int e) : start(s), end(e) {}
+ Range(LifetimePosition s, LifetimePosition e)
+ : start(s.value()), end(e.value()) {}
+ int start;
+ int end;
+ };
+
+ struct RangeOrdering {
+ bool operator()(const Range left, const Range right) const {
+ return left.start < right.start;
+ }
+ };
+ bool UsesOverlap(UseInterval* interval) {
+ auto use = uses_.begin();
+ while (interval != nullptr && use != uses_.end()) {
+ if (use->end <= interval->start().value()) {
+ ++use;
+ } else if (interval->end().value() <= use->start) {
+ interval = interval->next();
+ } else {
+ return true;
+ }
+ }
+ return false;
+ }
+ void InsertUses(UseInterval* interval) {
+ while (interval != nullptr) {
+ auto done = uses_.insert({interval->start(), interval->end()});
+ USE(done);
+ DCHECK_EQ(done.second, 1);
+ interval = interval->next();
+ }
+ }
+ explicit LiveRangeBundle(Zone* zone, int id)
+ : ranges_(zone), uses_(zone), id_(id) {}
+
+ bool TryAddRange(LiveRange* range);
+ bool TryMerge(LiveRangeBundle* other);
+
+ ZoneSet<LiveRange*, LiveRangeOrdering> ranges_;
+ ZoneSet<Range, RangeOrdering> uses_;
+ int id_;
+ int reg_ = kUnassignedRegister;
+};
class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
@@ -613,6 +681,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
splinter->relative_id_ = GetNextChildId();
splinter->set_spill_type(spill_type());
splinter->SetSplinteredFrom(this);
+ if (bundle_ != nullptr) splinter->set_bundle(bundle_);
}
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
@@ -661,17 +730,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
-
struct PrintableLiveRange {
const RegisterConfiguration* register_configuration_;
const LiveRange* range_;
};
-
std::ostream& operator<<(std::ostream& os,
const PrintableLiveRange& printable_range);
-
class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
@@ -714,7 +780,6 @@ class SpillRange final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
-
class RegisterAllocationData final : public ZoneObject {
public:
class PhiMapValue : public ZoneObject {
@@ -820,6 +885,9 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition();
bool RangesDefinedInDeferredStayInDeferred();
+ void MarkFixedUse(MachineRepresentation rep, int index);
+ bool HasFixedUse(MachineRepresentation rep, int index);
+
void MarkAllocated(MachineRepresentation rep, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
@@ -852,13 +920,14 @@ class RegisterAllocationData final : public ZoneObject {
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
+ BitVector* fixed_register_use_;
+ BitVector* fixed_fp_register_use_;
int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
-
class ConstraintBuilder final : public ZoneObject {
public:
explicit ConstraintBuilder(RegisterAllocationData* data);
@@ -876,7 +945,7 @@ class ConstraintBuilder final : public ZoneObject {
Zone* allocation_zone() const { return data()->allocation_zone(); }
InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
- bool is_tagged);
+ bool is_tagged, bool is_input);
void MeetRegisterConstraints(const InstructionBlock* block);
void MeetConstraintsBefore(int index);
void MeetConstraintsAfter(int index);
@@ -889,7 +958,6 @@ class ConstraintBuilder final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ConstraintBuilder);
};
-
class LiveRangeBuilder final : public ZoneObject {
public:
explicit LiveRangeBuilder(RegisterAllocationData* data, Zone* local_zone);
@@ -956,6 +1024,18 @@ class LiveRangeBuilder final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
};
+class BundleBuilder final : public ZoneObject {
+ public:
+ explicit BundleBuilder(RegisterAllocationData* data) : data_(data) {}
+
+ void BuildBundles();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data_->code(); }
+ RegisterAllocationData* data_;
+ int next_bundle_id_ = 0;
+};
class RegisterAllocator : public ZoneObject {
public:
@@ -995,7 +1075,6 @@ class RegisterAllocator : public ZoneObject {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
}
-
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end);
@@ -1030,7 +1109,6 @@ class RegisterAllocator : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
-
class LinearScanAllocator final : public RegisterAllocator {
public:
LinearScanAllocator(RegisterAllocationData* data, RegisterKind kind,
@@ -1041,7 +1119,7 @@ class LinearScanAllocator final : public RegisterAllocator {
private:
struct LiveRangeOrdering {
- bool operator()(LiveRange* a, LiveRange* b) {
+ bool operator()(const LiveRange* a, const LiveRange* b) const {
return a->ShouldBeAllocatedBefore(b);
}
};
@@ -1058,13 +1136,22 @@ class LinearScanAllocator final : public RegisterAllocator {
void AddToActive(LiveRange* range);
void AddToInactive(LiveRange* range);
void AddToUnhandled(LiveRange* range);
- void ActiveToHandled(LiveRange* range);
- void ActiveToInactive(LiveRange* range);
- void InactiveToHandled(LiveRange* range);
- void InactiveToActive(LiveRange* range);
+ ZoneVector<LiveRange*>::iterator ActiveToHandled(
+ ZoneVector<LiveRange*>::iterator it);
+ ZoneVector<LiveRange*>::iterator ActiveToInactive(
+ ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
+ ZoneVector<LiveRange*>::iterator InactiveToHandled(
+ ZoneVector<LiveRange*>::iterator it);
+ ZoneVector<LiveRange*>::iterator InactiveToActive(
+ ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
+
+ void ForwardStateTo(LifetimePosition position);
// Helper methods for allocating registers.
bool TryReuseSpillForPhi(TopLevelLiveRange* range);
+ int PickRegisterThatIsAvailableLongest(
+ LiveRange* current, int hint_reg,
+ const Vector<LifetimePosition>& free_until_pos);
bool TryAllocateFreeReg(LiveRange* range,
const Vector<LifetimePosition>& free_until_pos);
bool TryAllocatePreferredReg(LiveRange* range,
@@ -1091,10 +1178,19 @@ class LinearScanAllocator final : public RegisterAllocator {
void SplitAndSpillIntersecting(LiveRange* range);
+ void PrintRangeRow(std::ostream& os, const TopLevelLiveRange* toplevel);
+
+ void PrintRangeOverview(std::ostream& os);
+
LiveRangeQueue unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
ZoneVector<LiveRange*> inactive_live_ranges_;
+ // Approximate at what position the set of ranges will change next.
+ // Used to avoid scanning for updates even if none are present.
+ LifetimePosition next_active_ranges_change_;
+ LifetimePosition next_inactive_ranges_change_;
+
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
@@ -1102,7 +1198,6 @@ class LinearScanAllocator final : public RegisterAllocator {
DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
};
-
class SpillSlotLocator final : public ZoneObject {
public:
explicit SpillSlotLocator(RegisterAllocationData* data);
@@ -1117,7 +1212,6 @@ class SpillSlotLocator final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(SpillSlotLocator);
};
-
class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(RegisterAllocationData* data);
@@ -1136,7 +1230,6 @@ class OperandAssigner final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(OperandAssigner);
};
-
class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(RegisterAllocationData* data);
@@ -1154,7 +1247,6 @@ class ReferenceMapPopulator final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator);
};
-
class LiveRangeBoundArray;
// Insert moves of the form
//
@@ -1201,4 +1293,4 @@ class LiveRangeConnector final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_REGISTER_ALLOCATOR_H_
+#endif // V8_COMPILER_BACKEND_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/backend/s390/OWNERS b/deps/v8/src/compiler/backend/s390/OWNERS
new file mode 100644
index 0000000000..6d1a8fc472
--- /dev/null
+++ b/deps/v8/src/compiler/backend/s390/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 03a6430ef2..757576dd85 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
-#include "src/s390/macro-assembler-s390.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -165,7 +166,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
offset_(offset),
@@ -174,12 +175,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
offset_(no_reg),
@@ -188,31 +190,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
- void SaveRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPush(regs | r14.bit());
- }
-
- void RestoreRegisters(RegList registers) {
- DCHECK_LT(0, NumRegs(registers));
- RegList regs = 0;
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs |= Register::from_code(i).bit();
- }
- }
- __ MultiPop(regs | r14.bit());
- }
-
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
@@ -235,8 +216,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@@ -251,6 +237,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode stub_mode_;
bool must_save_lr_;
Zone* zone_;
};
@@ -265,28 +252,28 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_NOP;
- V8_FALLTHROUGH;
+ V8_FALLTHROUGH;
case kSignedLessThan:
return lt;
case kUnsignedGreaterThanOrEqual:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_ALWAYS;
- V8_FALLTHROUGH;
+ V8_FALLTHROUGH;
case kSignedGreaterThanOrEqual:
return ge;
case kUnsignedLessThanOrEqual:
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_EQ;
- V8_FALLTHROUGH;
+ V8_FALLTHROUGH;
case kSignedLessThanOrEqual:
return le;
case kUnsignedGreaterThan:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return ne;
- V8_FALLTHROUGH;
+ V8_FALLTHROUGH;
case kSignedGreaterThan:
return gt;
case kOverflow:
@@ -451,9 +438,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return ret; \
}
-static int nullInstr() {
- UNREACHABLE();
-}
+static int nullInstr() { UNREACHABLE(); }
template <int numOfOperand, class RType, class MType, class IType>
static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
@@ -905,596 +890,272 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
{ \
__ LoadlW(temp0, MemOperand(addr, offset)); \
__ llgfr(temp1, temp0); \
- __ RotateInsertSelectBits(temp0, old_val, Operand(start), \
- Operand(end), Operand(shift_amount), false); \
- __ RotateInsertSelectBits(temp1, new_val, Operand(start), \
- Operand(end), Operand(shift_amount), false); \
+ __ RotateInsertSelectBits(temp0, old_val, Operand(start), Operand(end), \
+ Operand(shift_amount), false); \
+ __ RotateInsertSelectBits(temp1, new_val, Operand(start), Operand(end), \
+ Operand(shift_amount), false); \
__ CmpAndSwap(temp0, temp1, MemOperand(addr, offset)); \
- __ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount), \
- Operand(end+shift_amount), Operand(64-shift_amount), true); \
+ __ RotateInsertSelectBits(output, temp0, Operand(start + shift_amount), \
+ Operand(end + shift_amount), \
+ Operand(64 - shift_amount), true); \
}
#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * idx; \
- constexpr int end = start + 7; \
- constexpr int shift_amount = (3 - idx) * 8; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * idx; \
- constexpr int end = start + 15; \
- constexpr int shift_amount = (1 - idx) * 16; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
+#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (3 - idx) * 8; \
+ ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (1 - idx) * 16; \
+ ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#else
-#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 8 * (3 - idx); \
- constexpr int end = start + 7; \
- constexpr int shift_amount = idx * 8; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
- constexpr int start = 32 + 16 * (1 - idx); \
- constexpr int end = start + 15; \
- constexpr int shift_amount = idx * 16; \
- ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
+#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#endif
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label three, two, one, done; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- __ b(Condition(4), &one); \
- /* ending with 0b00 */ \
- ATOMIC_COMP_EXCHANGE_BYTE(0); \
- __ b(&done); \
- /* ending with 0b01 */ \
- __ bind(&one); \
- ATOMIC_COMP_EXCHANGE_BYTE(1); \
- __ b(&done); \
- /* ending with 0b10 */ \
- __ bind(&two); \
- ATOMIC_COMP_EXCHANGE_BYTE(2); \
- __ b(&done); \
- /* ending with 0b11 */ \
- __ bind(&three); \
- ATOMIC_COMP_EXCHANGE_BYTE(3); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label two, done; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(2), &two); \
- ATOMIC_COMP_EXCHANGE_HALFWORD(0); \
- __ b(&done); \
- __ bind(&two); \
- ATOMIC_COMP_EXCHANGE_HALFWORD(1); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
+ do { \
+ Register old_val = i.InputRegister(0); \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ Label three, two, one, done; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ __ b(Condition(4), &one); \
+ /* ending with 0b00 */ \
+ ATOMIC_COMP_EXCHANGE_BYTE(0); \
+ __ b(&done); \
+ /* ending with 0b01 */ \
+ __ bind(&one); \
+ ATOMIC_COMP_EXCHANGE_BYTE(1); \
+ __ b(&done); \
+ /* ending with 0b10 */ \
+ __ bind(&two); \
+ ATOMIC_COMP_EXCHANGE_BYTE(2); \
+ __ b(&done); \
+ /* ending with 0b11 */ \
+ __ bind(&three); \
+ ATOMIC_COMP_EXCHANGE_BYTE(3); \
+ __ bind(&done); \
+ __ load_and_ext(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
- do { \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- __ lay(addr, op); \
- __ CmpAndSwap(output, new_val, MemOperand(addr)); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
+ do { \
+ Register old_val = i.InputRegister(0); \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ Label two, done; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(2), &two); \
+ ATOMIC_COMP_EXCHANGE_HALFWORD(0); \
+ __ b(&done); \
+ __ bind(&two); \
+ ATOMIC_COMP_EXCHANGE_HALFWORD(1); \
+ __ bind(&done); \
+ __ load_and_ext(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = r1; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- __ lay(addr, op); \
- __ load_and_op(result, value, MemOperand(addr)); \
- __ LoadlW(result, result); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
+ do { \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ __ CmpAndSwap(output, new_val, MemOperand(addr)); \
+ __ LoadlW(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = r1; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- __ lay(addr, op); \
- __ load_and_op(result, value, MemOperand(addr)); \
+#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = r1; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ __ lay(addr, op); \
+ __ load_and_op(result, value, MemOperand(addr)); \
+ __ LoadlW(result, result); \
} while (false)
-#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
- do { \
- Label do_cs; \
- __ LoadlW(prev, MemOperand(addr, offset)); \
- __ bind(&do_cs); \
- __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
- Operand(static_cast<intptr_t>(shift_amount)), true); \
- __ bin_inst(new_val, prev, temp); \
- __ lr(temp, prev); \
- __ RotateInsertSelectBits(temp, new_val, Operand(start), \
- Operand(end), Operand::Zero(), false); \
- __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
- __ bne(&do_cs, Label::kNear); \
+#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = r1; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ __ lay(addr, op); \
+ __ load_and_op(result, value, MemOperand(addr)); \
} while (false)
-#define ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end) \
+#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
do { \
Label do_cs; \
- __ lg(prev, MemOperand(addr, offset)); \
+ __ LoadlW(prev, MemOperand(addr, offset)); \
__ bind(&do_cs); \
__ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
Operand(static_cast<intptr_t>(shift_amount)), \
true); \
__ bin_inst(new_val, prev, temp); \
- __ lgr(temp, prev); \
+ __ lr(temp, prev); \
__ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
Operand::Zero(), false); \
- __ CmpAndSwap64(prev, temp, MemOperand(addr, offset)); \
+ __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
__ bne(&do_cs, Label::kNear); \
} while (false)
#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(2 * index); \
- constexpr int shift_amount = 16 - (index * 16); \
- constexpr int start = 48 - shift_amount; \
- constexpr int end = start + 15; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
- }
-#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(index); \
- constexpr int shift_amount = 24 - (index * 8); \
- constexpr int start = 56 - shift_amount; \
- constexpr int end = start + 7; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
- }
-#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(4 * index); \
- constexpr int shift_amount = 32 - (index * 32); \
- constexpr int start = 32 - shift_amount; \
- constexpr int end = start + 31; \
- ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(2 * index); \
+ constexpr int shift_amount = 16 - (index * 16); \
+ constexpr int start = 48 - shift_amount; \
+ constexpr int end = start + 15; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
+ }
+#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(index); \
+ constexpr int shift_amount = 24 - (index * 8); \
+ constexpr int start = 56 - shift_amount; \
+ constexpr int end = start + 7; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
}
#else
-#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(2 * index); \
- constexpr int shift_amount = index * 16; \
- constexpr int start = 48 - shift_amount; \
- constexpr int end = start + 15; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
- }
-#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(index); \
- constexpr int shift_amount = index * 8; \
- constexpr int start = 56 - shift_amount; \
- constexpr int end = start + 7; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
- }
-#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(4 * index); \
- constexpr int shift_amount = index * 32; \
- constexpr int start = 32 - shift_amount; \
- constexpr int end = start + 31; \
- ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(2 * index); \
+ constexpr int shift_amount = index * 16; \
+ constexpr int start = 48 - shift_amount; \
+ constexpr int end = start + 15; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
+ }
+#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(index); \
+ constexpr int shift_amount = index * 8; \
+ constexpr int start = 56 - shift_amount; \
+ constexpr int end = start + 7; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
+ extract_result(); \
}
#endif // V8_TARGET_BIG_ENDIAN
-#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register prev = i.TempRegister(0); \
- Register new_val = r0; \
- Register addr = r1; \
- Register temp = kScratchReg; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- Label two, done; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(2), &two); \
- /* word boundary */ \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
- __ b(&done); \
- __ bind(&two); \
- /* halfword boundary */ \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = i.TempRegister(0); \
- Register prev = r0; \
- Register new_val = r1; \
- Register temp = kScratchReg; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- Label done, one, two, three; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- __ b(Condition(4), &one); \
- /* ending with 0b00 (word boundary) */ \
- ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
- __ b(&done); \
- /* ending with 0b01 */ \
- __ bind(&one); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
- __ b(&done); \
- /* ending with 0b10 (hw boundary) */ \
- __ bind(&two); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
- __ b(&done); \
- /* ending with 0b11 */ \
- __ bind(&three); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC64_BINOP_BYTE(bin_inst, extract_result) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = i.TempRegister(0); \
- Register prev = r0; \
- Register new_val = r1; \
- Register temp = kScratchReg; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- Label done, leftmost0, leftmost1, two, three, four, five, seven; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(7)); \
- __ b(Condition(1), &seven); \
- __ b(Condition(2), &leftmost1); \
- __ b(Condition(4), &leftmost0); \
- /* ending with 0b000 */ \
- ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
- __ b(&done); \
- /* ending in 0b001 to 0b011 */ \
- __ bind(&leftmost0); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
- __ b(&done); \
- /* ending in 0b010 */ \
- __ bind(&two); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
- __ b(&done); \
- /* ending in 0b011 */ \
- __ bind(&three); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
- __ b(&done); \
- /* ending in 0b100 to 0b110 */ \
- __ bind(&leftmost1); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(8), &four); \
- __ b(Condition(4), &five); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 6, extract_result); \
- __ b(&done); \
- /* ending in 0b100 */ \
- __ bind(&four); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 4, extract_result); \
- __ b(&done); \
- /* ending in 0b101 */ \
- __ bind(&five); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 5, extract_result); \
- __ b(&done); \
- /* ending in 0b111 */ \
- __ bind(&seven); \
- ATOMIC_BIN_OP_BYTE(bin_inst, 7, extract_result); \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC64_BINOP_HALFWORD(bin_inst, extract_result) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register prev = i.TempRegister(0); \
- Register new_val = r0; \
- Register addr = r1; \
- Register temp = kScratchReg; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- Label done, one, two, three; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(6)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- __ b(Condition(4), &one); \
- /* ending in 0b00 */ \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
- __ b(&done); \
- /* ending in 0b01 */ \
- __ bind(&one); \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
- __ b(&done); \
- /* ending in 0b10 */ \
- __ bind(&two); \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 2, extract_result); \
- __ b(&done); \
- /* ending in 0b11 */ \
- __ bind(&three); \
- ATOMIC_BIN_OP_HALFWORD(bin_inst, 3, extract_result); \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC64_BINOP_WORD(bin_inst, extract_result) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register prev = i.TempRegister(0); \
- Register new_val = r0; \
- Register addr = r1; \
- Register temp = kScratchReg; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- Label done, one; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(4)); \
- __ b(Condition(2), &one); \
- /* ending in 0b000 */ \
- ATOMIC_BIN_OP_WORD(bin_inst, 0, extract_result); \
- __ b(&done); \
- __ bind(&one); \
- /* ending in 0b100 */ \
- ATOMIC_BIN_OP_WORD(bin_inst, 1, extract_result); \
- __ bind(&done); \
- } while (false)
-
-#define ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, offset) \
- { \
- __ lg(temp0, MemOperand(addr, offset)); \
- __ lgr(temp1, temp0); \
- __ RotateInsertSelectBits(temp0, old_val, Operand(start), \
- Operand(end), Operand(shift_amount), false); \
- __ RotateInsertSelectBits(temp1, new_val, Operand(start), \
- Operand(end), Operand(shift_amount), false); \
- __ CmpAndSwap64(temp0, temp1, MemOperand(addr, offset)); \
- __ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount), \
- Operand(end+shift_amount), Operand(64-shift_amount), true); \
- }
-
-#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 8 * idx; \
- constexpr int end = start + 7; \
- constexpr int shift_amount = (7 - idx) * 8; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 16 * idx; \
- constexpr int end = start + 15; \
- constexpr int shift_amount = (3 - idx) * 16; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 * idx; \
- constexpr int end = start + 31; \
- constexpr int shift_amount = (1 - idx) * 32; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
- }
-#else
-#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 + 8 * (3 - idx); \
- constexpr int end = start + 7; \
- constexpr int shift_amount = idx * 8; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 + 16 * (1 - idx); \
- constexpr int end = start + 15; \
- constexpr int shift_amount = idx * 16; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 * (1 - idx); \
- constexpr int end = start + 31; \
- constexpr int shift_amount = idx * 32; \
- ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
- }
-#endif
-
-#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label done, leftmost0, leftmost1, two, three, four, five, seven; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(7)); \
- __ b(Condition(1), &seven); \
- __ b(Condition(2), &leftmost1); \
- __ b(Condition(4), &leftmost0); \
- /* ending with 0b000 */ \
- ATOMIC64_COMP_EXCHANGE_BYTE(0); \
- __ b(&done); \
- /* ending in 0b001 to 0b011 */ \
- __ bind(&leftmost0); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- ATOMIC64_COMP_EXCHANGE_BYTE(1); \
- __ b(&done); \
- /* ending in 0b010 */ \
- __ bind(&two); \
- ATOMIC64_COMP_EXCHANGE_BYTE(2); \
- __ b(&done); \
- /* ending in 0b011 */ \
- __ bind(&three); \
- ATOMIC64_COMP_EXCHANGE_BYTE(3); \
- __ b(&done); \
- /* ending in 0b100 to 0b110 */ \
- __ bind(&leftmost1); \
- __ tmll(addr, Operand(3)); \
- __ b(Condition(8), &four); \
- __ b(Condition(4), &five); \
- ATOMIC64_COMP_EXCHANGE_BYTE(6); \
- __ b(&done); \
- /* ending in 0b100 */ \
- __ bind(&four); \
- ATOMIC64_COMP_EXCHANGE_BYTE(4); \
- __ b(&done); \
- /* ending in 0b101 */ \
- __ bind(&five); \
- ATOMIC64_COMP_EXCHANGE_BYTE(5); \
- __ b(&done); \
- /* ending in 0b111 */ \
- __ bind(&seven); \
- ATOMIC64_COMP_EXCHANGE_BYTE(7); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label done, one, two, three; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(6)); \
- __ b(Condition(1), &three); \
- __ b(Condition(2), &two); \
- __ b(Condition(4), &one); \
- /* ending in 0b00 */ \
- ATOMIC64_COMP_EXCHANGE_HALFWORD(0); \
- __ b(&done); \
- /* ending in 0b01 */ \
- __ bind(&one); \
- ATOMIC64_COMP_EXCHANGE_HALFWORD(1); \
- __ b(&done); \
- /* ending in 0b10 */ \
- __ bind(&two); \
- ATOMIC64_COMP_EXCHANGE_HALFWORD(2); \
- __ b(&done); \
- /* ending in 0b11 */ \
- __ bind(&three); \
- ATOMIC64_COMP_EXCHANGE_HALFWORD(3); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register prev = i.TempRegister(0); \
+ Register new_val = r0; \
+ Register addr = r1; \
+ Register temp = kScratchReg; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ Label two, done; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(2), &two); \
+ /* word boundary */ \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
+ __ b(&done); \
+ __ bind(&two); \
+ /* halfword boundary */ \
+ ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
+ __ bind(&done); \
} while (false)
-#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(load_and_ext) \
- do { \
- Register old_val = i.InputRegister(0); \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- Label done, one; \
- __ lay(addr, op); \
- __ tmll(addr, Operand(4)); \
- __ b(Condition(2), &one); \
- /* ending in 0b000 */ \
- ATOMIC64_COMP_EXCHANGE_WORD(0); \
- __ b(&done); \
- __ bind(&one); \
- /* ending in 0b100 */ \
- ATOMIC64_COMP_EXCHANGE_WORD(1); \
- __ bind(&done); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = i.TempRegister(0); \
+ Register prev = r0; \
+ Register new_val = r1; \
+ Register temp = kScratchReg; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ Label done, one, two, three; \
+ __ lay(addr, op); \
+ __ tmll(addr, Operand(3)); \
+ __ b(Condition(1), &three); \
+ __ b(Condition(2), &two); \
+ __ b(Condition(4), &one); \
+ /* ending with 0b00 (word boundary) */ \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
+ __ b(&done); \
+ /* ending with 0b01 */ \
+ __ bind(&one); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
+ __ b(&done); \
+ /* ending with 0b10 (hw boundary) */ \
+ __ bind(&two); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
+ __ b(&done); \
+ /* ending with 0b11 */ \
+ __ bind(&three); \
+ ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
+ __ bind(&done); \
} while (false)
-#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
- do { \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- __ lay(addr, op); \
- __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
+ do { \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1571,13 +1232,13 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -1664,22 +1325,14 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ LoadW(ip,
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = r1;
- Label current_pc;
- __ larl(scratch, &current_pc);
-
- __ bind(&current_pc);
- __ SubP(scratch, Operand(__ pc_offset()));
+ __ ComputeCodeStartAddress(scratch);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
@@ -1715,8 +1368,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(reg);
+ __ CallCodeObject(reg);
} else {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
@@ -1724,6 +1376,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
@@ -1754,8 +1414,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(reg);
+ __ JumpCodeObject(reg);
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
@@ -1806,8 +1465,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r4);
+ __ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1825,9 +1483,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -1838,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -1871,7 +1529,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
break;
}
@@ -1949,14 +1607,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_MRI) {
int32_t offset = i.InputInt32(1);
- ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, offset, value, scratch0,
+ scratch1, mode, DetermineStubCallMode());
__ StoreP(value, MemOperand(object, offset));
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
- ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, offset, value, scratch0,
+ scratch1, mode, DetermineStubCallMode());
__ StoreP(value, MemOperand(object, offset));
}
__ CheckPageFlag(object, scratch0,
@@ -2165,7 +1825,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int endBit = 63 - shiftAmount;
int startBit = 63 - i.InputInt32(2);
__ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
- Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
+ Operand(startBit), Operand(endBit),
+ Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = 63 - i.InputInt32(2);
@@ -2182,7 +1843,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int endBit = 63;
int startBit = 63 - i.InputInt32(2);
__ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
- Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
+ Operand(startBit), Operand(endBit),
+ Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = 63 - i.InputInt32(2);
@@ -2197,7 +1859,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int endBit = 63 - i.InputInt32(2);
int startBit = 0;
__ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
- Operand(startBit), Operand(endBit), Operand(shiftAmount), true);
+ Operand(startBit), Operand(endBit),
+ Operand(shiftAmount), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBit = i.InputInt32(2);
@@ -2524,7 +2187,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_StackClaim: {
int num_slots = i.InputInt32(0);
- __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+ __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
frame_access_state()->IncreaseSPDelta(num_slots);
break;
}
@@ -2534,10 +2197,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (op->representation() == MachineRepresentation::kFloat64) {
__ lay(sp, MemOperand(sp, -kDoubleSize));
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize /
+ kSystemPointerSize);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ lay(sp, MemOperand(sp, -kPointerSize));
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
__ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
frame_access_state()->IncreaseSPDelta(1);
}
@@ -2548,7 +2212,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
- __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+ __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
@@ -2558,8 +2222,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
}
} else {
- __ StoreP(i.InputRegister(0),
- MemOperand(sp));
+ __ StoreP(i.InputRegister(0), MemOperand(sp));
}
break;
}
@@ -2569,14 +2232,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize));
+ MemOperand(sp, slot * kSystemPointerSize));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreFloat32(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize));
+ MemOperand(sp, slot * kSystemPointerSize));
}
} else {
- __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ __ StoreP(i.InputRegister(0),
+ MemOperand(sp, slot * kSystemPointerSize));
}
break;
}
@@ -2875,43 +2539,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kWord32AtomicLoadInt8:
- __ LoadB(i.OutputRegister(), i.MemoryOperand());
- break;
- case kWord32AtomicLoadUint8:
- __ LoadlB(i.OutputRegister(), i.MemoryOperand());
- break;
- case kWord32AtomicLoadInt16:
- __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
- break;
- case kWord32AtomicLoadUint16:
- __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
- break;
- case kWord32AtomicLoadWord32:
- __ LoadlW(i.OutputRegister(), i.MemoryOperand());
- break;
- case kWord32AtomicStoreWord8:
- __ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
- case kWord32AtomicStoreWord16:
- __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
- case kWord32AtomicStoreWord32:
- __ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
// 0x aa bb cc dd
// index = 3..2..1..0
-#define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
- { \
- Label do_cs; \
- __ LoadlW(output, MemOperand(r1, offset)); \
- __ bind(&do_cs); \
- __ llgfr(r0, output); \
- __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ csy(output, r0, MemOperand(r1, offset)); \
- __ bne(&do_cs, Label::kNear); \
- __ srl(output, Operand(shift_amount)); \
+#define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
+ { \
+ Label do_cs; \
+ __ LoadlW(output, MemOperand(r1, offset)); \
+ __ bind(&do_cs); \
+ __ llgfr(r0, output); \
+ __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
+ Operand(shift_amount), false); \
+ __ csy(output, r0, MemOperand(r1, offset)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ srl(output, Operand(shift_amount)); \
}
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_EXCHANGE_BYTE(i) \
@@ -2952,6 +2592,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#endif
+ case kS390_Word64AtomicExchangeUint8:
case kWord32AtomicExchangeInt8:
case kWord32AtomicExchangeUint8: {
Register base = i.InputRegister(0);
@@ -2985,12 +2626,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&done);
if (opcode == kWord32AtomicExchangeInt8) {
- __ lbr(output, output);
+ __ lgbr(output, output);
} else {
- __ llcr(output, output);
+ __ llgcr(output, output);
}
break;
}
+ case kS390_Word64AtomicExchangeUint16:
case kWord32AtomicExchangeInt16:
case kWord32AtomicExchangeUint16: {
Register base = i.InputRegister(0);
@@ -3011,13 +2653,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE_HALFWORD(1);
__ bind(&done);
- if (opcode == kWord32AtomicExchangeInt8) {
- __ lhr(output, output);
+ if (opcode == kWord32AtomicExchangeInt16) {
+ __ lghr(output, output);
} else {
- __ llhr(output, output);
+ __ llghr(output, output);
}
break;
}
+ case kS390_Word64AtomicExchangeUint32:
case kWord32AtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -3034,48 +2677,53 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadB);
break;
+ case kS390_Word64AtomicCompareExchangeUint8:
case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadlB);
break;
case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadHalfWordP);
break;
+ case kS390_Word64AtomicCompareExchangeUint16:
case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
break;
+ case kS390_Word64AtomicCompareExchangeUint32:
case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
- intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
- __ srlk(result, prev, Operand(shift_right)); \
- __ LoadB(result, result); \
- }); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
- int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ RotateInsertSelectBits(result, prev, Operand(56), \
- Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
- true); \
- }); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
- intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
- __ srlk(result, prev, Operand(shift_right)); \
- __ LoadHalfWordP(result, result); \
- }); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
- int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ RotateInsertSelectBits(result, prev, Operand(48), \
- Operand(63), Operand(static_cast<intptr_t>(rotate_left)), \
- true); \
- }); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
+ intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
+ __ srlk(result, prev, Operand(shift_right)); \
+ __ LoadB(result, result); \
+ }); \
+ break; \
+ case kS390_Word64Atomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
+ int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
+ __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
+ Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
+ }); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
+ intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
+ __ srlk(result, prev, Operand(shift_right)); \
+ __ LoadHalfWordP(result, result); \
+ }); \
+ break; \
+ case kS390_Word64Atomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
+ int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
+ __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
+ Operand(static_cast<intptr_t>(rotate_left)), \
+ true); \
+ }); \
break;
ATOMIC_BINOP_CASE(Add, Add32)
ATOMIC_BINOP_CASE(Sub, Sub32)
@@ -3083,76 +2731,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
+ case kS390_Word64AtomicAddUint32:
case kWord32AtomicAddWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(laa);
break;
+ case kS390_Word64AtomicSubUint32:
case kWord32AtomicSubWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
break;
+ case kS390_Word64AtomicAndUint32:
case kWord32AtomicAndWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lan);
break;
+ case kS390_Word64AtomicOrUint32:
case kWord32AtomicOrWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lao);
break;
+ case kS390_Word64AtomicXorUint32:
case kWord32AtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
- case kS390_Word64AtomicLoadUint8:
- __ LoadlB(i.OutputRegister(), i.MemoryOperand());
- break;
- case kS390_Word64AtomicLoadUint16:
- __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
- break;
- case kS390_Word64AtomicLoadUint32:
- __ LoadlW(i.OutputRegister(), i.MemoryOperand());
- break;
- case kS390_Word64AtomicLoadUint64:
- __ lg(i.OutputRegister(), i.MemoryOperand());
- break;
- case kS390_Word64AtomicStoreUint8:
- __ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
- case kS390_Word64AtomicStoreUint16:
- __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
- case kS390_Word64AtomicStoreUint32:
- __ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
- case kS390_Word64AtomicStoreUint64:
- __ stg(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
- break;
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kS390_Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC64_BINOP_BYTE(inst, [&]() { \
- int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
- Operand(static_cast<intptr_t>(rotate_left)), \
- true); \
- }); \
- break; \
- case kS390_Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC64_BINOP_HALFWORD(inst, [&]() { \
- int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
- Operand(static_cast<intptr_t>(rotate_left)), \
- true); \
- }); \
- break; \
- case kS390_Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC64_BINOP_WORD(inst, [&]() { \
- int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
- __ RotateInsertSelectBits(result, prev, Operand(32), Operand(63), \
- Operand(static_cast<intptr_t>(rotate_left)), \
- true); \
- }); \
- break;
- ATOMIC64_BINOP_CASE(Add, AddP)
- ATOMIC64_BINOP_CASE(Sub, SubP)
- ATOMIC64_BINOP_CASE(And, AndP)
- ATOMIC64_BINOP_CASE(Or, OrP)
- ATOMIC64_BINOP_CASE(Xor, XorP)
-#undef ATOMIC64_BINOP_CASE
case kS390_Word64AtomicAddUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
break;
@@ -3168,165 +2766,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Word64AtomicXorUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
break;
-#define ATOMIC64_EXCHANGE(start, end, shift_amount, offset) \
- { \
- Label do_cs; \
- __ lg(output, MemOperand(r1, offset)); \
- __ bind(&do_cs); \
- __ lgr(r0, output); \
- __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
- Operand(shift_amount), false); \
- __ csg(output, r0, MemOperand(r1, offset)); \
- __ bne(&do_cs, Label::kNear); \
- __ srlg(output, output, Operand(shift_amount)); \
- }
-#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC64_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 8 * idx; \
- constexpr int end = start + 7; \
- constexpr int shift_amount = (7 - idx) * 8; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC64_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 16 * idx; \
- constexpr int end = start + 15; \
- constexpr int shift_amount = (3 - idx) * 16; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#define ATOMIC64_EXCHANGE_WORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 * idx; \
- constexpr int end = start + 31; \
- constexpr int shift_amount = (1 - idx) * 32; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
- }
-#else
-#define ATOMIC64_EXCHANGE_BYTE(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 + 8 * (3 - idx); \
- constexpr int end = start + 7; \
- constexpr int shift_amount = idx * 8; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
- }
-#define ATOMIC64_EXCHANGE_HALFWORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 + 16 * (1 - idx); \
- constexpr int end = start + 15; \
- constexpr int shift_amount = idx * 16; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
- }
-#define ATOMIC64_EXCHANGE_WORD(i) \
- { \
- constexpr int idx = (i); \
- constexpr int start = 32 * (1 - idx); \
- constexpr int end = start + 31; \
- constexpr int shift_amount = idx * 32; \
- ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
- }
-#endif // V8_TARGET_BIG_ENDIAN
- case kS390_Word64AtomicExchangeUint8: {
- Register base = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- Register output = i.OutputRegister();
- Label done, leftmost0, leftmost1, two, three, four, five, seven;
- __ la(r1, MemOperand(base, index));
- __ tmll(r1, Operand(7));
- __ b(Condition(1), &seven);
- __ b(Condition(2), &leftmost1);
- __ b(Condition(4), &leftmost0);
- /* ending with 0b000 */
- ATOMIC64_EXCHANGE_BYTE(0);
- __ b(&done);
- /* ending in 0b001 to 0b011 */
- __ bind(&leftmost0);
- __ tmll(r1, Operand(3));
- __ b(Condition(1), &three);
- __ b(Condition(2), &two);
- ATOMIC64_EXCHANGE_BYTE(1);
- __ b(&done);
- /* ending in 0b010 */
- __ bind(&two);
- ATOMIC64_EXCHANGE_BYTE(2);
- __ b(&done);
- /* ending in 0b011 */
- __ bind(&three);
- ATOMIC64_EXCHANGE_BYTE(3);
- __ b(&done);
- /* ending in 0b100 to 0b110 */
- __ bind(&leftmost1);
- __ tmll(r1, Operand(3));
- __ b(Condition(8), &four);
- __ b(Condition(4), &five);
- ATOMIC64_EXCHANGE_BYTE(6);
- __ b(&done);
- /* ending in 0b100 */
- __ bind(&four);
- ATOMIC64_EXCHANGE_BYTE(4);
- __ b(&done);
- /* ending in 0b101 */
- __ bind(&five);
- ATOMIC64_EXCHANGE_BYTE(5);
- __ b(&done);
- /* ending in 0b111 */
- __ bind(&seven);
- ATOMIC64_EXCHANGE_BYTE(7);
- __ bind(&done);
- break;
- }
- case kS390_Word64AtomicExchangeUint16: {
- Register base = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- Register output = i.OutputRegister();
- Label done, one, two, three;
- __ la(r1, MemOperand(base, index));
- __ tmll(r1, Operand(6));
- __ b(Condition(1), &three);
- __ b(Condition(2), &two);
- __ b(Condition(4), &one);
- /* ending in 0b00 */
- ATOMIC64_EXCHANGE_HALFWORD(0);
- __ b(&done);
- /* ending in 0b01 */
- __ bind(&one);
- ATOMIC64_EXCHANGE_HALFWORD(1);
- __ b(&done);
- /* ending in 0b10 */
- __ bind(&two);
- ATOMIC64_EXCHANGE_HALFWORD(2);
- __ b(&done);
- /* ending in 0b11 */
- __ bind(&three);
- ATOMIC64_EXCHANGE_HALFWORD(3);
- __ bind(&done);
- break;
- }
- case kS390_Word64AtomicExchangeUint32: {
- Register base = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- Register output = i.OutputRegister();
- Label done, one;
- __ la(r1, MemOperand(base, index));
- __ tmll(r1, Operand(4));
- __ b(Condition(2), &one);
- /* ending in 0b0 */
- ATOMIC64_EXCHANGE_WORD(0);
- __ b(&done);
- __ bind(&one);
- /* ending in 0b1 */
- ATOMIC64_EXCHANGE_WORD(1);
- __ bind(&done);
- break;
- }
case kS390_Word64AtomicExchangeUint64: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -3335,20 +2774,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label do_cs;
__ la(r1, MemOperand(base, index));
__ lg(output, MemOperand(r1));
- __ csg(output, value, MemOperand(r1));
__ bind(&do_cs);
+ __ csg(output, value, MemOperand(r1));
__ bne(&do_cs, Label::kNear);
break;
}
- case kS390_Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(LoadlB);
- break;
- case kS390_Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
- break;
- case kS390_Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(LoadlW);
- break;
case kS390_Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
@@ -3437,11 +2867,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
@@ -3535,7 +2966,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ CmpLogicalP(input, Operand(case_count));
__ bge(GetLabel(i.InputRpo(1)));
__ larl(kScratchReg, table);
- __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r1, input, Operand(kSystemPointerSizeLog2));
__ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
__ Jump(kScratchReg);
}
@@ -3550,7 +2981,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
DCHECK_EQ(kNumCalleeSavedDoubles,
base::bits::CountPopulation(double_saves));
frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
+ (kDoubleSize / kSystemPointerSize));
}
// Save callee-saved registers.
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3581,6 +3012,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ LoadP(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ LoadP(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
}
}
}
@@ -3615,24 +3056,25 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
+ if ((shrink_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
Register scratch = r1;
- __ LoadP(scratch, FieldMemOperand(
- kWasmInstanceRegister,
+ __ LoadP(
+ scratch,
+ FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ LoadP(scratch, MemOperand(scratch));
- __ AddP(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ AddP(scratch, scratch, Operand(shrink_slots * kSystemPointerSize));
__ CmpLogicalP(sp, scratch);
__ bge(&done);
}
__ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
- __ Move(cp, Smi::kZero);
+ __ Move(cp, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r4);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
@@ -3644,9 +3086,9 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= frame()->GetReturnSlotCount();
- shrink_slots -=
- (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
- __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
+ shrink_slots -= (kDoubleSize / kSystemPointerSize) *
+ base::bits::CountPopulation(saves_fp);
+ __ lay(sp, MemOperand(sp, -shrink_slots * kSystemPointerSize));
}
// Save callee-saved Double registers.
@@ -3664,7 +3106,7 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ lay(sp, MemOperand(sp, -returns * kPointerSize));
+ __ lay(sp, MemOperand(sp, -returns * kSystemPointerSize));
}
}
@@ -3675,7 +3117,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ lay(sp, MemOperand(sp, returns * kPointerSize));
+ __ lay(sp, MemOperand(sp, returns * kSystemPointerSize));
}
// Restore registers.
@@ -3761,7 +3203,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
__ Load(dst, Operand(src.ToInt64()));
@@ -3919,7 +3361,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
new file mode 100644
index 0000000000..1f9408ee47
--- /dev/null
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -0,0 +1,217 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_S390_INSTRUCTION_CODES_S390_H_
+#define V8_COMPILER_BACKEND_S390_INSTRUCTION_CODES_S390_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// S390-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftLeftPair) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightPair) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_ShiftRightArithPair) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not32) \
+ V(S390_Not64) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
+ V(S390_Add32) \
+ V(S390_Add64) \
+ V(S390_AddPair) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_SubPair) \
+ V(S390_MulPair) \
+ V(S390_Mul32) \
+ V(S390_Mul32WithOverflow) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
+ V(S390_NegDouble) \
+ V(S390_NegFloat) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_MaxFloat) \
+ V(S390_MaxDouble) \
+ V(S390_MinFloat) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StackClaim) \
+ V(S390_StoreToStackSlot) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble) \
+ V(S390_Word64AtomicExchangeUint8) \
+ V(S390_Word64AtomicExchangeUint16) \
+ V(S390_Word64AtomicExchangeUint32) \
+ V(S390_Word64AtomicExchangeUint64) \
+ V(S390_Word64AtomicCompareExchangeUint8) \
+ V(S390_Word64AtomicCompareExchangeUint16) \
+ V(S390_Word64AtomicCompareExchangeUint32) \
+ V(S390_Word64AtomicCompareExchangeUint64) \
+ V(S390_Word64AtomicAddUint8) \
+ V(S390_Word64AtomicAddUint16) \
+ V(S390_Word64AtomicAddUint32) \
+ V(S390_Word64AtomicAddUint64) \
+ V(S390_Word64AtomicSubUint8) \
+ V(S390_Word64AtomicSubUint16) \
+ V(S390_Word64AtomicSubUint32) \
+ V(S390_Word64AtomicSubUint64) \
+ V(S390_Word64AtomicAndUint8) \
+ V(S390_Word64AtomicAndUint16) \
+ V(S390_Word64AtomicAndUint32) \
+ V(S390_Word64AtomicAndUint64) \
+ V(S390_Word64AtomicOrUint8) \
+ V(S390_Word64AtomicOrUint16) \
+ V(S390_Word64AtomicOrUint32) \
+ V(S390_Word64AtomicOrUint64) \
+ V(S390_Word64AtomicXorUint8) \
+ V(S390_Word64AtomicXorUint16) \
+ V(S390_Word64AtomicXorUint32) \
+ V(S390_Word64AtomicXorUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MR) /* [%r0 ] */ \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1 ] */ \
+ V(MRRI) /* [%r0 + %r1 + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_S390_INSTRUCTION_CODES_S390_H_
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index fbd81e17c4..052784ca79 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -169,16 +169,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_StackClaim:
return kHasSideEffect;
- case kS390_Word64AtomicLoadUint8:
- case kS390_Word64AtomicLoadUint16:
- case kS390_Word64AtomicLoadUint32:
- case kS390_Word64AtomicLoadUint64:
- return kIsLoadOperation;
-
- case kS390_Word64AtomicStoreUint8:
- case kS390_Word64AtomicStoreUint16:
- case kS390_Word64AtomicStoreUint32:
- case kS390_Word64AtomicStoreUint64:
case kS390_Word64AtomicExchangeUint8:
case kS390_Word64AtomicExchangeUint16:
case kS390_Word64AtomicExchangeUint32:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 018c288939..b0afed0bd1 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/frame-constants.h"
@@ -281,8 +281,6 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
: OperandMode::kInt20Imm)
ArchOpcode SelectLoadOpcode(Node* node) {
- NodeMatcher m(node);
- DCHECK(m.IsLoad() || m.IsPoisonedLoad());
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
@@ -544,8 +542,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
#if V8_TARGET_ARCH_S390X
-#define VISIT_OP_LIST(V) \
- VISIT_OP_LIST_32(V) \
+#define VISIT_OP_LIST(V) \
+ VISIT_OP_LIST_32(V) \
V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
#else
#define VISIT_OP_LIST VISIT_OP_LIST_32
@@ -715,16 +713,13 @@ void InstructionSelector::VisitProtectedLoad(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- S390OperandGenerator g(this);
+static void VisitGeneralStore(
+ InstructionSelector* selector, Node* node, MachineRepresentation rep,
+ WriteBarrierKind write_barrier_kind = kNoWriteBarrier) {
+ S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
-
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
- WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
-
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
@@ -761,7 +756,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
NodeMatcher m(value);
@@ -782,7 +777,7 @@ void InstructionSelector::VisitStore(Node* node) {
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_StoreWord32;
@@ -794,7 +789,7 @@ void InstructionSelector::VisitStore(Node* node) {
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_StoreWord64;
if (m.IsWord64ReverseBytes()) {
@@ -818,11 +813,19 @@ void InstructionSelector::VisitStore(Node* node) {
opcode | AddressingModeField::encode(addressing_mode);
InstructionOperand value_operand = g.UseRegister(value);
inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs);
}
}
+void InstructionSelector::VisitStore(Node* node) {
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ VisitGeneralStore(this, node, rep, write_barrier_kind);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1411,6 +1414,8 @@ static inline bool TryMatchDoubleConstructFromInsert(
null) \
V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
null) \
+ V(Float64, TruncateFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
+ null) \
V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
OperandMode::kNone, null)
@@ -2037,13 +2042,13 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
value_operand, g.TempImmediate(-sw.min_value()));
}
#if V8_TARGET_ARCH_S390X
- InstructionOperand index_operand_zero_ext = g.TempRegister();
- Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
- index_operand = index_operand_zero_ext;
+ InstructionOperand index_operand_zero_ext = g.TempRegister();
+ Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
+ index_operand = index_operand_zero_ext;
#endif
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
- }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
}
// Generate a tree of conditional jumps.
@@ -2176,7 +2181,7 @@ void InstructionSelector::EmitPrepareArguments(
if (input.node == nullptr) continue;
num_slots += input.location.GetType().representation() ==
MachineRepresentation::kFloat64
- ? kDoubleSize / kPointerSize
+ ? kDoubleSize / kSystemPointerSize
: 1;
}
Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
@@ -2187,7 +2192,7 @@ void InstructionSelector::EmitPrepareArguments(
g.TempImmediate(slot));
slot += input.location.GetType().representation() ==
MachineRepresentation::kFloat64
- ? (kDoubleSize / kPointerSize)
+ ? (kDoubleSize / kSystemPointerSize)
: 1;
}
}
@@ -2201,59 +2206,16 @@ int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- return;
- }
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(value);
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
- inputs);
+ VisitGeneralStore(this, node, rep);
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -2373,7 +2335,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kS390_Word64AtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kS390_Word64AtomicCompareExchangeUint32;
- } else if (type == MachineType::Uint64()) {
+ } else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
@@ -2490,63 +2452,13 @@ VISIT_ATOMIC64_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kS390_Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kS390_Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kS390_Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kS390_Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ USE(load_rep);
+ VisitLoad(node);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kS390_Word64AtomicStoreUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kS390_Word64AtomicStoreUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kS390_Word64AtomicStoreUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kS390_Word64AtomicStoreUint64;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(value);
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
- inputs);
+ VisitGeneralStore(this, node, rep);
}
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
@@ -2812,7 +2724,6 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
@@ -2825,6 +2736,16 @@ void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 723b6f9ec2..3383da99f7 100644
--- a/deps/v8/src/compiler/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -2,15 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_BACKEND_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_BACKEND_UNWINDING_INFO_WRITER_H_
+
+#include "src/flags.h"
#if V8_TARGET_ARCH_ARM
-#include "src/compiler/arm/unwinding-info-writer-arm.h"
+#include "src/compiler/backend/arm/unwinding-info-writer-arm.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
+#include "src/compiler/backend/arm64/unwinding-info-writer-arm64.h"
#elif V8_TARGET_ARCH_X64
-#include "src/compiler/x64/unwinding-info-writer-x64.h"
+#include "src/compiler/backend/x64/unwinding-info-writer-x64.h"
#else
// Placeholder for unsupported architectures.
@@ -52,4 +54,4 @@ class UnwindingInfoWriter {
#endif
-#endif // V8_COMPILER_UNWINDING_INFO_WRITER_H_
+#endif // V8_COMPILER_BACKEND_UNWINDING_INFO_WRITER_H_
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 178d2b33b9..bcb37e1b46 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -2,20 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
#include <limits>
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
+#include "src/base/overflowing-math.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/macro-assembler.h"
+#include "src/objects/smi.h"
#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
#include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -151,7 +153,6 @@ class X64OperandConverter : public InstructionOperandConverter {
}
};
-
namespace {
bool HasImmediateInput(Instruction* instr, size_t index) {
@@ -206,7 +207,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ Movsd(MemOperand(rsp, 0), input_);
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
@@ -226,12 +228,11 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Zone* zone_;
};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode, StubCallMode stub_mode)
: OutOfLineCode(gen),
object_(object),
operand_(operand),
@@ -239,6 +240,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
+ stub_mode_(stub_mode),
zone_(gen->zone()) {}
void Generate() final {
@@ -256,8 +258,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
- save_fp_mode);
+ if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
}
private:
@@ -267,6 +277,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
Zone* zone_;
};
@@ -297,17 +308,19 @@ class WasmOutOfLineTrap : public OutOfLineCode {
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
+ size_t pop_size =
+ call_descriptor->StackParameterCount() * kSystemPointerSize;
// Use rcx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), rcx);
} else {
gen_->AssembleSourcePosition(instr_);
// A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -525,6 +538,39 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_SIMD_INSTR(opcode, dst_operand, index) \
+ do { \
+ if (instr->InputAt(index)->IsSimd128Register()) { \
+ __ opcode(dst_operand, i.InputSimd128Register(index)); \
+ } else { \
+ __ opcode(dst_operand, i.InputOperand(index)); \
+ } \
+ } while (false)
+
+#define ASSEMBLE_SIMD_IMM_INSTR(opcode, dst_operand, index, imm) \
+ do { \
+ if (instr->InputAt(index)->IsSimd128Register()) { \
+ __ opcode(dst_operand, i.InputSimd128Register(index), imm); \
+ } else { \
+ __ opcode(dst_operand, i.InputOperand(index), imm); \
+ } \
+ } while (false)
+
+#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE(opcode) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ byte input_index = instr->InputCount() == 2 ? 1 : 0; \
+ ASSEMBLE_SIMD_INSTR(opcode, dst, input_index); \
+ } while (false)
+
+#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
+ do { \
+ CpuFeatureScope sse_scope(tasm(), SSELevel); \
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -572,14 +618,23 @@ void AdjustStackPointerForTailCall(Assembler* assembler,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- assembler->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
+ assembler->subq(rsp, Immediate(stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- assembler->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
+ assembler->addq(rsp, Immediate(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
+void SetupShuffleMaskOnStack(TurboAssembler* assembler, uint32_t* mask) {
+ int64_t shuffle_mask = (mask[2]) | (static_cast<uint64_t>(mask[3]) << 32);
+ assembler->movq(kScratchRegister, shuffle_mask);
+ assembler->Push(kScratchRegister);
+ shuffle_mask = (mask[0]) | (static_cast<uint64_t>(mask[1]) << 32);
+ assembler->movq(kScratchRegister, shuffle_mask);
+ assembler->Push(kScratchRegister);
+}
+
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -640,15 +695,12 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ movp(rbx, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ LoadTaggedPointerField(rbx,
+ Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
- // Ensure we're not serializing (otherwise we'd need to use an indirection to
- // access the builtin below).
- DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
- Handle<Code> code = isolate()->builtins()->builtin_handle(
- Builtins::kCompileLazyDeoptimizedCode);
- __ j(not_zero, code, RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, not_zero);
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
@@ -683,7 +735,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
@@ -694,6 +746,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchCallBuiltinPointer: {
+ DCHECK(!HasImmediateInput(instr, 0));
+ Register builtin_pointer = i.InputRegister(0);
+ __ CallBuiltinPointer(builtin_pointer);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
@@ -734,7 +794,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -789,13 +849,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+ __ cmp_tagged(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
- __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(rcx);
+ __ LoadTaggedPointerField(rcx,
+ FieldOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(rcx);
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
@@ -813,9 +873,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
- DCHECK_EQ(0, bytes % kPointerSize);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
@@ -826,7 +886,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
@@ -859,7 +919,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
- frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
// TODO(tebbi): Do we need an lfence here?
break;
@@ -952,8 +1012,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
- scratch0, scratch1, mode);
+ auto ool = new (zone())
+ OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
+ mode, DetermineStubCallMode());
__ movp(operand, value);
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -1319,10 +1380,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ andl(rax, Immediate(0xFF));
__ pushq(rax);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
__ popfq();
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
+ -kSystemPointerSize);
}
__ j(parity_even, &mod_loop);
// Move output to stack and clean up.
@@ -1868,6 +1929,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_MOVX(movsxlq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
+ case kX64MovqDecompressTaggedSigned: {
+ CHECK(instr->HasOutput());
+ __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand(),
+ DEBUG_BOOL ? i.TempRegister(0) : no_reg);
+ break;
+ }
+ case kX64MovqDecompressTaggedPointer: {
+ CHECK(instr->HasOutput());
+ __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand(),
+ DEBUG_BOOL ? i.TempRegister(0) : no_reg);
+ break;
+ }
+ case kX64MovqDecompressAnyTagged: {
+ CHECK(instr->HasOutput());
+ __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand(),
+ i.TempRegister(0),
+ DEBUG_BOOL ? i.TempRegister(1) : no_reg);
+ break;
+ }
case kX64Movq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
@@ -1967,7 +2047,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (constant_summand > 0) {
__ addl(i.OutputRegister(), Immediate(constant_summand));
} else {
- __ subl(i.OutputRegister(), Immediate(-constant_summand));
+ __ subl(i.OutputRegister(),
+ Immediate(base::NegateWithWraparound(constant_summand)));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1) == i.OutputRegister()) {
@@ -2042,29 +2123,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pushq(operand);
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
} else if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
} else if (instr->InputAt(0)->IsFloatRegister() ||
instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else if (instr->InputAt(0)->IsSimd128Register()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kSimd128Size));
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kSimd128Size /
+ kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kSimd128Size);
__ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
@@ -2074,13 +2156,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pushq(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
} else {
DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
__ Movups(kScratchDoubleReg, i.InputOperand(0));
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kSimd128Size));
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kSimd128Size /
+ kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kSimd128Size);
__ Movups(Operand(rsp, 0), kScratchDoubleReg);
@@ -2089,9 +2172,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
- __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
+ __ movq(Operand(rsp, slot * kSystemPointerSize), i.InputImmediate(0));
} else {
- __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
+ __ movq(Operand(rsp, slot * kSystemPointerSize), i.InputRegister(0));
}
break;
}
@@ -2669,6 +2752,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16Shl: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ int8_t shift = i.InputInt8(1) & 0x7;
+ if (shift < 4) {
+ // For small shifts, doubling is faster.
+ for (int i = 0; i < shift; ++i) {
+ __ paddb(dst, dst);
+ }
+ } else {
+ // Mask off the unwanted bits before word-shifting.
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlw(kScratchDoubleReg, 8 + shift);
+ __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pand(dst, kScratchDoubleReg);
+ __ psllw(dst, shift);
+ }
+ break;
+ }
+ case kX64I8x16ShrS: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ __ punpckhbw(kScratchDoubleReg, src);
+ __ punpcklbw(dst, src);
+ __ psraw(kScratchDoubleReg, 8 + shift);
+ __ psraw(dst, 8 + shift);
+ __ packsswb(dst, kScratchDoubleReg);
+ break;
+ }
case kX64I8x16Add: {
__ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2685,6 +2799,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I8x16Mul: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ __ movaps(tmp, dst);
+ __ movaps(kScratchDoubleReg, right);
+ __ psrlw(tmp, 8);
+ __ psrlw(kScratchDoubleReg, 8);
+ // dst = left * 256
+ __ psllw(dst, 8);
+ // t = I16x8Mul(t, s)
+ // => __PP __PP ... __PP __PP
+ __ pmullw(tmp, kScratchDoubleReg);
+ // dst = I16x8Mul(left * 256, right)
+ // => pp__ pp__ ... pp__ pp__
+ __ pmullw(dst, right);
+ // t = I16x8Shl(t, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ psllw(tmp, 8);
+ // dst = I16x8Shr(dst, 8)
+ // => 00pp 00pp ... 00pp 00pp
+ __ psrlw(dst, 8);
+ // dst = I16x8Or(dst, t)
+ // => PPpp PPpp ... PPpp PPpp
+ __ por(dst, tmp);
+ break;
+ }
case kX64I8x16MinS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
@@ -2729,6 +2876,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ packuswb(dst, kScratchDoubleReg);
break;
}
+ case kX64I8x16ShrU: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ // Unpack the bytes into words, do logical shifts, and repack.
+ __ punpckhbw(kScratchDoubleReg, src);
+ __ punpcklbw(dst, src);
+ __ psrlw(kScratchDoubleReg, 8 + shift);
+ __ psrlw(dst, 8 + shift);
+ __ packuswb(dst, kScratchDoubleReg);
+ break;
+ }
case kX64I8x16AddSaturateU: {
__ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2800,6 +2959,258 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
+ case kX64S8x16Shuffle: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Register tmp = i.TempRegister(0);
+ // Prepare 16 byte aligned buffer for shuffle control mask
+ __ movq(tmp, rsp);
+ __ andq(rsp, Immediate(-16));
+ if (instr->InputCount() == 5) { // only one input operand
+ uint32_t mask[4] = {};
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ for (int j = 4; j > 0; j--) {
+ mask[j - 1] = i.InputUint32(j);
+ }
+
+ SetupShuffleMaskOnStack(tasm(), mask);
+ __ pshufb(dst, Operand(rsp, 0));
+ } else { // two input operands
+ DCHECK_EQ(6, instr->InputCount());
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 0);
+ uint32_t mask[4] = {};
+ for (int j = 5; j > 1; j--) {
+ uint32_t lanes = i.InputUint32(j);
+ for (int k = 0; k < 32; k += 8) {
+ uint8_t lane = lanes >> k;
+ mask[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k;
+ }
+ }
+ SetupShuffleMaskOnStack(tasm(), mask);
+ __ pshufb(kScratchDoubleReg, Operand(rsp, 0));
+ uint32_t mask1[4] = {};
+ if (instr->InputAt(1)->IsSimd128Register()) {
+ XMMRegister src1 = i.InputSimd128Register(1);
+ if (src1 != dst) __ movups(dst, src1);
+ } else {
+ __ movups(dst, i.InputOperand(1));
+ }
+ for (int j = 5; j > 1; j--) {
+ uint32_t lanes = i.InputUint32(j);
+ for (int k = 0; k < 32; k += 8) {
+ uint8_t lane = lanes >> k;
+ mask1[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k;
+ }
+ }
+ SetupShuffleMaskOnStack(tasm(), mask1);
+ __ pshufb(dst, Operand(rsp, 0));
+ __ por(dst, kScratchDoubleReg);
+ }
+ __ movq(rsp, tmp);
+ break;
+ }
+ case kX64S32x4Swizzle: {
+ DCHECK_EQ(2, instr->InputCount());
+ ASSEMBLE_SIMD_IMM_INSTR(pshufd, i.OutputSimd128Register(), 0,
+ i.InputInt8(1));
+ break;
+ }
+ case kX64S32x4Shuffle: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ DCHECK_EQ(4, instr->InputCount()); // Swizzles should be handled above.
+ int8_t shuffle = i.InputInt8(2);
+ DCHECK_NE(0xe4, shuffle); // A simple blend should be handled below.
+ ASSEMBLE_SIMD_IMM_INSTR(pshufd, kScratchDoubleReg, 1, shuffle);
+ ASSEMBLE_SIMD_IMM_INSTR(pshufd, i.OutputSimd128Register(), 0, shuffle);
+ __ pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputInt8(3));
+ break;
+ }
+ case kX64S16x8Blend: {
+ ASSEMBLE_SIMD_IMM_SHUFFLE(pblendw, SSE4_1, i.InputInt8(2));
+ break;
+ }
+ case kX64S16x8HalfShuffle1: {
+ XMMRegister dst = i.OutputSimd128Register();
+ ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, i.InputInt8(1));
+ __ pshufhw(dst, dst, i.InputInt8(2));
+ break;
+ }
+ case kX64S16x8HalfShuffle2: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ ASSEMBLE_SIMD_IMM_INSTR(pshuflw, kScratchDoubleReg, 1, i.InputInt8(2));
+ __ pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputInt8(3));
+ ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, i.InputInt8(2));
+ __ pshufhw(dst, dst, i.InputInt8(3));
+ __ pblendw(dst, kScratchDoubleReg, i.InputInt8(4));
+ break;
+ }
+ case kX64S8x16Alignr: {
+ ASSEMBLE_SIMD_IMM_SHUFFLE(palignr, SSSE3, i.InputInt8(2));
+ break;
+ }
+ case kX64S16x8Dup: {
+ XMMRegister dst = i.OutputSimd128Register();
+ int8_t lane = i.InputInt8(1) & 0x7;
+ int8_t lane4 = lane & 0x3;
+ int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ if (lane < 4) {
+ ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, half_dup);
+ __ pshufd(dst, dst, 0);
+ } else {
+ ASSEMBLE_SIMD_IMM_INSTR(pshufhw, dst, 0, half_dup);
+ __ pshufd(dst, dst, 0xaa);
+ }
+ break;
+ }
+ case kX64S8x16Dup: {
+ XMMRegister dst = i.OutputSimd128Register();
+ int8_t lane = i.InputInt8(1) & 0xf;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (lane < 8) {
+ __ punpcklbw(dst, dst);
+ } else {
+ __ punpckhbw(dst, dst);
+ }
+ lane &= 0x7;
+ int8_t lane4 = lane & 0x3;
+ int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ if (lane < 4) {
+ __ pshuflw(dst, dst, half_dup);
+ __ pshufd(dst, dst, 0);
+ } else {
+ __ pshufhw(dst, dst, half_dup);
+ __ pshufd(dst, dst, 0xaa);
+ }
+ break;
+ }
+ case kX64S64x2UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhqdq);
+ break;
+ case kX64S32x4UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhdq);
+ break;
+ case kX64S16x8UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhwd);
+ break;
+ case kX64S8x16UnpackHigh:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhbw);
+ break;
+ case kX64S64x2UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklqdq);
+ break;
+ case kX64S32x4UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckldq);
+ break;
+ case kX64S16x8UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklwd);
+ break;
+ case kX64S8x16UnpackLow:
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
+ break;
+ case kX64S16x8UnzipHigh: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
+ __ psrld(kScratchDoubleReg, 16);
+ src2 = kScratchDoubleReg;
+ }
+ __ psrld(dst, 16);
+ __ packusdw(dst, src2);
+ break;
+ }
+ case kX64S16x8UnzipLow: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ if (instr->InputCount() == 2) {
+ ASSEMBLE_SIMD_IMM_INSTR(pblendw, kScratchDoubleReg, 1, 0x55);
+ src2 = kScratchDoubleReg;
+ }
+ __ pblendw(dst, kScratchDoubleReg, 0xaa);
+ __ packusdw(dst, src2);
+ break;
+ }
+ case kX64S8x16UnzipHigh: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
+ __ psrlw(kScratchDoubleReg, 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ psrlw(dst, 8);
+ __ packuswb(dst, src2);
+ break;
+ }
+ case kX64S8x16UnzipLow: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src2 = dst;
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (instr->InputCount() == 2) {
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
+ __ psllw(kScratchDoubleReg, 8);
+ __ psrlw(kScratchDoubleReg, 8);
+ src2 = kScratchDoubleReg;
+ }
+ __ psllw(dst, 8);
+ __ psrlw(dst, 8);
+ __ packuswb(dst, src2);
+ break;
+ }
+ case kX64S8x16TransposeLow: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ psllw(dst, 8);
+ if (instr->InputCount() == 1) {
+ __ movups(kScratchDoubleReg, dst);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
+ __ psllw(kScratchDoubleReg, 8);
+ }
+ __ psrlw(dst, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64S8x16TransposeHigh: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ psrlw(dst, 8);
+ if (instr->InputCount() == 1) {
+ __ movups(kScratchDoubleReg, dst);
+ } else {
+ DCHECK_EQ(2, instr->InputCount());
+ ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
+ __ psrlw(kScratchDoubleReg, 8);
+ }
+ __ psllw(kScratchDoubleReg, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64S8x8Reverse:
+ case kX64S8x4Reverse:
+ case kX64S8x2Reverse: {
+ DCHECK_EQ(1, instr->InputCount());
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ if (arch_opcode != kX64S8x2Reverse) {
+ // First shuffle words into position.
+ int8_t shuffle_mask = arch_opcode == kX64S8x4Reverse ? 0xB1 : 0x1B;
+ __ pshuflw(dst, dst, shuffle_mask);
+ __ pshufhw(dst, dst, shuffle_mask);
+ }
+ __ movaps(kScratchDoubleReg, dst);
+ __ psrlw(kScratchDoubleReg, 8);
+ __ psllw(dst, 8);
+ __ por(dst, kScratchDoubleReg);
+ break;
+ }
case kX64S1x4AnyTrue:
case kX64S1x8AnyTrue:
case kX64S1x16AnyTrue: {
@@ -2808,7 +3219,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
__ xorq(tmp, tmp);
- __ movq(dst, Immediate(-1));
+ __ movq(dst, Immediate(1));
__ ptest(src, src);
__ cmovq(zero, dst, tmp);
break;
@@ -2820,18 +3231,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
- __ movq(tmp, Immediate(-1));
+ __ movq(tmp, Immediate(1));
__ xorq(dst, dst);
- // Compare all src lanes to false.
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
- if (arch_opcode == kX64S1x4AllTrue) {
- __ pcmpeqd(kScratchDoubleReg, src);
- } else if (arch_opcode == kX64S1x8AllTrue) {
- __ pcmpeqw(kScratchDoubleReg, src);
- } else {
- __ pcmpeqb(kScratchDoubleReg, src);
- }
- // If kScratchDoubleReg is all zero, none of src lanes are false.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(kScratchDoubleReg, src);
__ ptest(kScratchDoubleReg, kScratchDoubleReg);
__ cmovq(zero, dst, tmp);
break;
@@ -3014,6 +3417,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
+#undef ASSEMBLE_SIMD_INSTR
+#undef ASSEMBLE_SIMD_IMM_INSTR
+#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
+#undef ASSEMBLE_SIMD_IMM_SHUFFLE
namespace {
@@ -3217,8 +3624,8 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignSavedCalleeRegisterSlots();
if (saves_fp != 0) { // Save callee-saved XMM registers.
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
- frame->AllocateSavedCalleeRegisterSlots(saves_fp_count *
- (kQuadWordSize / kPointerSize));
+ frame->AllocateSavedCalleeRegisterSlots(
+ saves_fp_count * (kQuadWordSize / kSystemPointerSize));
}
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3250,6 +3657,18 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ pushq(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper()) {
+ // WASM import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ LoadTaggedPointerField(
+ kJSFunctionRegister,
+ FieldOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ LoadTaggedPointerField(
+ kWasmInstanceRegister,
+ FieldOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ pushq(kWasmInstanceRegister);
}
}
@@ -3287,21 +3706,22 @@ void CodeGenerator::AssembleConstructFrame() {
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
- if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ if (shrink_slots * kSystemPointerSize < FLAG_stack_size * 1024) {
__ movq(kScratchRegister,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ addq(kScratchRegister, Immediate(shrink_slots * kPointerSize));
+ __ addq(kScratchRegister, Immediate(shrink_slots * kSystemPointerSize));
__ cmpq(rsp, kScratchRegister);
__ j(above_equal, &done);
}
- __ movp(rcx, FieldOperand(kWasmInstanceRegister,
- WasmInstanceObject::kCEntryStubOffset));
- __ Move(rsi, Smi::kZero);
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kCEntryStubOffset));
+ __ Move(rsi, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, rcx);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ RecordSafepoint(reference_map, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
@@ -3309,11 +3729,11 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -=
- base::bits::CountPopulation(saves_fp) * (kQuadWordSize / kPointerSize);
+ shrink_slots -= base::bits::CountPopulation(saves_fp) *
+ (kQuadWordSize / kSystemPointerSize);
shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
- __ subq(rsp, Immediate(shrink_slots * kPointerSize));
+ __ subq(rsp, Immediate(shrink_slots * kSystemPointerSize));
}
}
@@ -3341,7 +3761,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
- __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kSystemPointerSize));
}
}
@@ -3353,7 +3773,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
- __ addq(rsp, Immediate(returns * kPointerSize));
+ __ addq(rsp, Immediate(returns * kSystemPointerSize));
}
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
@@ -3382,7 +3802,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// pop count.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
- size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
+ size_t pop_size = call_descriptor->StackParameterCount() * kSystemPointerSize;
X64OperandConverter g(this, nullptr);
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -3402,7 +3822,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
if (pop->IsImmediate()) {
- pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+ pop_size += g.ToConstant(pop).ToInt32() * kSystemPointerSize;
CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), rcx);
} else {
@@ -3423,7 +3843,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
auto MoveConstantToRegister = [&](Register dst, Constant src) {
switch (src.type()) {
case Constant::kInt32: {
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
int32_t value = src.ToInt32();
@@ -3436,7 +3856,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
case Constant::kInt64:
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
__ Set(dst, src.ToInt64());
@@ -3473,7 +3893,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
};
// Helper function to write the given constant to the stack.
auto MoveConstantToSlot = [&](Operand dst, Constant src) {
- if (!RelocInfo::IsWasmPtrReference(src.rmode())) {
+ if (!RelocInfo::IsWasmReference(src.rmode())) {
switch (src.type()) {
case Constant::kInt32:
__ movq(dst, Immediate(src.ToInt32()));
@@ -3592,7 +4012,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
X64OperandConverter g(this, nullptr);
@@ -3622,12 +4041,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ pushq(src);
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
__ movq(src, g.ToOperand(destination));
frame_access_state()->IncreaseSPDelta(-1);
__ popq(g.ToOperand(destination));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
+ -kSystemPointerSize);
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
@@ -3656,10 +4075,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movq(tmp, dst);
__ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
+ -kSystemPointerSize);
__ movq(src, tmp);
} else {
// Without AVX, misaligned reads and writes will trap. Move using the
@@ -3667,16 +4086,16 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
__ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
+ kSystemPointerSize);
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ pushq(g.ToOperand(source, kPointerSize));
+ -kSystemPointerSize);
+ __ pushq(g.ToOperand(source, kSystemPointerSize));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(g.ToOperand(destination, kPointerSize));
+ kSystemPointerSize);
+ __ popq(g.ToOperand(destination, kSystemPointerSize));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
+ -kSystemPointerSize);
__ movups(src, kScratchDoubleReg);
}
return;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index c2a194e94a..59f9a45ecf 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
-#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#ifndef V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
+#define V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
namespace v8 {
namespace internal {
@@ -132,6 +132,9 @@ namespace compiler {
V(X64Movw) \
V(X64Movl) \
V(X64Movsxlq) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
@@ -230,10 +233,13 @@ namespace compiler {
V(X64I8x16ReplaceLane) \
V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
V(X64I8x16Add) \
V(X64I8x16AddSaturateS) \
V(X64I8x16Sub) \
V(X64I8x16SubSaturateS) \
+ V(X64I8x16Mul) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
@@ -243,16 +249,43 @@ namespace compiler {
V(X64I8x16UConvertI16x8) \
V(X64I8x16AddSaturateU) \
V(X64I8x16SubSaturateU) \
+ V(X64I8x16ShrU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
V(X64I8x16GtU) \
V(X64I8x16GeU) \
+ V(X64S128Zero) \
+ V(X64S128Not) \
V(X64S128And) \
V(X64S128Or) \
V(X64S128Xor) \
- V(X64S128Not) \
V(X64S128Select) \
- V(X64S128Zero) \
+ V(X64S8x16Shuffle) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
V(X64S1x4AnyTrue) \
V(X64S1x4AllTrue) \
V(X64S1x8AnyTrue) \
@@ -333,4 +366,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#endif // V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index e5523fd49d..2764a44078 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
@@ -10,7 +10,6 @@ namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return true; }
-
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
@@ -207,10 +206,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16ReplaceLane:
case kX64I8x16SConvertI16x8:
case kX64I8x16Neg:
+ case kX64I8x16Shl:
+ case kX64I8x16ShrS:
case kX64I8x16Add:
case kX64I8x16AddSaturateS:
case kX64I8x16Sub:
case kX64I8x16SubSaturateS:
+ case kX64I8x16Mul:
case kX64I8x16MinS:
case kX64I8x16MaxS:
case kX64I8x16Eq:
@@ -220,6 +222,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16UConvertI16x8:
case kX64I8x16AddSaturateU:
case kX64I8x16SubSaturateU:
+ case kX64I8x16ShrU:
case kX64I8x16MinU:
case kX64I8x16MaxU:
case kX64I8x16GtU:
@@ -234,11 +237,37 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
case kX64S1x8AllTrue:
+ case kX64S8x16Shuffle:
+ case kX64S32x4Swizzle:
+ case kX64S32x4Shuffle:
+ case kX64S16x8Blend:
+ case kX64S16x8HalfShuffle1:
+ case kX64S16x8HalfShuffle2:
+ case kX64S8x16Alignr:
+ case kX64S16x8Dup:
+ case kX64S8x16Dup:
+ case kX64S16x8UnzipHigh:
+ case kX64S16x8UnzipLow:
+ case kX64S8x16UnzipHigh:
+ case kX64S8x16UnzipLow:
+ case kX64S64x2UnpackHigh:
+ case kX64S32x4UnpackHigh:
+ case kX64S16x8UnpackHigh:
+ case kX64S8x16UnpackHigh:
+ case kX64S64x2UnpackLow:
+ case kX64S32x4UnpackLow:
+ case kX64S16x8UnpackLow:
+ case kX64S8x16UnpackLow:
+ case kX64S8x16TransposeLow:
+ case kX64S8x16TransposeHigh:
+ case kX64S8x8Reverse:
+ case kX64S8x4Reverse:
+ case kX64S8x2Reverse:
case kX64S1x16AnyTrue:
case kX64S1x16AllTrue:
return (instr->addressing_mode() == kMode_None)
- ? kNoOpcodeFlags
- : kIsLoadOperation | kHasSideEffect;
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
case kX64Idiv:
case kX64Idiv32:
@@ -274,6 +303,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
}
+ case kX64MovqDecompressTaggedSigned:
+ case kX64MovqDecompressTaggedPointer:
+ case kX64MovqDecompressAnyTagged:
case kX64Movq:
case kX64Movsd:
case kX64Movss:
@@ -332,7 +364,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
#define CASE(Name) case k##Name:
- COMMON_ARCH_OPCODE_LIST(CASE)
+ COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
// Already covered in architecture independent code.
UNREACHABLE();
@@ -341,7 +373,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
UNREACHABLE();
}
-
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for x64 instructions. They have been determined
// in an empirical way.
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 211794ace8..3f5fe12051 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -5,10 +5,11 @@
#include <algorithm>
#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
+#include "src/base/overflowing-math.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/turbo-assembler.h"
+#include "src/roots-inl.h"
namespace v8 {
namespace internal {
@@ -233,9 +234,18 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ return kX64MovqDecompressTaggedSigned;
+ case MachineRepresentation::kTaggedPointer:
+ return kX64MovqDecompressTaggedPointer;
+ case MachineRepresentation::kTagged:
+ return kX64MovqDecompressAnyTagged;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
@@ -308,8 +318,22 @@ void InstructionSelector::VisitLoad(Node* node) {
X64OperandGenerator g(this);
ArchOpcode opcode = GetLoadOpcode(load_rep);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
+ size_t temp_count = 0;
+ InstructionOperand temps[2];
+#ifdef V8_COMPRESS_POINTERS
+ if (opcode == kX64MovqDecompressAnyTagged) {
+ temps[temp_count++] = g.TempRegister();
+ }
+#ifdef DEBUG
+ if (opcode == kX64MovqDecompressTaggedSigned ||
+ opcode == kX64MovqDecompressTaggedPointer ||
+ opcode == kX64MovqDecompressAnyTagged) {
+ temps[temp_count++] = g.TempRegister();
+ }
+#endif // DEBUG
+#endif // V8_COMPRESS_POINTERS
+ DCHECK_LE(temp_count, arraysize(temps));
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
@@ -321,7 +345,7 @@ void InstructionSelector::VisitLoad(Node* node) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
code |= MiscField::encode(kMemoryAccessPoisoned);
}
- Emit(code, 1, outputs, input_count, inputs);
+ Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
@@ -372,7 +396,8 @@ void InstructionSelector::VisitStore(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
- if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
+ if ((ElementSizeLog2Of(store_rep.representation()) <
+ kSystemPointerSizeLog2) &&
(value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
CanCover(node, value)) {
value = value->InputAt(0);
@@ -476,7 +501,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs, cont);
}
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
@@ -484,7 +508,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, &cont);
}
-
void InstructionSelector::VisitWord32And(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
@@ -497,7 +520,6 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
}
-
void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kX64And);
}
@@ -506,12 +528,10 @@ void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kX64Or32);
}
-
void InstructionSelector::VisitWord64Or(Node* node) {
VisitBinop(this, node, kX64Or);
}
-
void InstructionSelector::VisitWord32Xor(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
@@ -522,7 +542,6 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
}
}
-
void InstructionSelector::VisitWord64Xor(Node* node) {
X64OperandGenerator g(this);
Uint64BinopMatcher m(node);
@@ -533,9 +552,45 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
}
}
-
namespace {
+bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
+ Node* node, Node* load) {
+ if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
+ LoadRepresentation load_rep = LoadRepresentationOf(load->op());
+ MachineRepresentation rep = load_rep.representation();
+ InstructionCode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
+ break;
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTagged:
+ opcode = kX64Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ X64OperandGenerator g(selector);
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ size_t input_count = 0;
+ InstructionOperand inputs[3];
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ node->InputAt(0), inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ selector->Emit(opcode, 1, outputs, input_count, inputs);
+ return true;
+ }
+ return false;
+}
+
// Shared routine for multiple 32-bit shift operations.
// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
void VisitWord32Shift(InstructionSelector* selector, Node* node,
@@ -545,6 +600,11 @@ void VisitWord32Shift(InstructionSelector* selector, Node* node,
Node* left = m.left().node();
Node* right = m.right().node();
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
+ selector->CanCover(node, left)) {
+ left = left->InputAt(0);
+ }
+
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
@@ -554,7 +614,6 @@ void VisitWord32Shift(InstructionSelector* selector, Node* node,
}
}
-
// Shared routine for multiple 64-bit shift operations.
// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
void VisitWord64Shift(InstructionSelector* selector, Node* node,
@@ -625,7 +684,6 @@ void EmitLea(InstructionSelector* selector, InstructionCode opcode,
} // namespace
-
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
@@ -638,7 +696,6 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
VisitWord32Shift(this, node, kX64Shl32);
}
-
void InstructionSelector::VisitWord64Shl(Node* node) {
X64OperandGenerator g(this);
Int64ScaleMatcher m(node, true);
@@ -664,12 +721,57 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
VisitWord64Shift(this, node, kX64Shl);
}
-
void InstructionSelector::VisitWord32Shr(Node* node) {
VisitWord32Shift(this, node, kX64Shr32);
}
namespace {
+
+inline AddressingMode AddDisplacementToAddressingMode(AddressingMode mode) {
+ switch (mode) {
+ case kMode_MR:
+ return kMode_MRI;
+ break;
+ case kMode_MR1:
+ return kMode_MR1I;
+ break;
+ case kMode_MR2:
+ return kMode_MR2I;
+ break;
+ case kMode_MR4:
+ return kMode_MR4I;
+ break;
+ case kMode_MR8:
+ return kMode_MR8I;
+ break;
+ case kMode_M1:
+ return kMode_M1I;
+ break;
+ case kMode_M2:
+ return kMode_M2I;
+ break;
+ case kMode_M4:
+ return kMode_M4I;
+ break;
+ case kMode_M8:
+ return kMode_M8I;
+ break;
+ case kMode_None:
+ case kMode_MRI:
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I:
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I:
+ case kMode_Root:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
@@ -678,6 +780,8 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
Int64BinopMatcher m(node);
if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
m.right().Is(32)) {
+ DCHECK_EQ(selector->GetEffectLevel(node),
+ selector->GetEffectLevel(m.left().node()));
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
@@ -692,47 +796,7 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
// Make sure that the addressing mode indicates the presence of an
// immediate displacement. It seems that we never use M1 and M2, but we
// handle them here anyways.
- switch (mode) {
- case kMode_MR:
- mode = kMode_MRI;
- break;
- case kMode_MR1:
- mode = kMode_MR1I;
- break;
- case kMode_MR2:
- mode = kMode_MR2I;
- break;
- case kMode_MR4:
- mode = kMode_MR4I;
- break;
- case kMode_MR8:
- mode = kMode_MR8I;
- break;
- case kMode_M1:
- mode = kMode_M1I;
- break;
- case kMode_M2:
- mode = kMode_M2I;
- break;
- case kMode_M4:
- mode = kMode_M4I;
- break;
- case kMode_M8:
- mode = kMode_M8I;
- break;
- case kMode_None:
- case kMode_MRI:
- case kMode_MR1I:
- case kMode_MR2I:
- case kMode_MR4I:
- case kMode_MR8I:
- case kMode_M1I:
- case kMode_M2I:
- case kMode_M4I:
- case kMode_M8I:
- case kMode_Root:
- UNREACHABLE();
- }
+ mode = AddDisplacementToAddressingMode(mode);
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
// In the case that the base address was zero, the displacement will be
@@ -751,6 +815,7 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
}
return false;
}
+
} // namespace
void InstructionSelector::VisitWord64Shr(Node* node) {
@@ -779,19 +844,16 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
VisitWord64Shift(this, node, kX64Sar);
}
-
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitWord32Shift(this, node, kX64Ror32);
}
-
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitWord64Shift(this, node, kX64Ror);
}
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
@@ -820,7 +882,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kX64Add32);
}
-
void InstructionSelector::VisitInt64Add(Node* node) {
X64OperandGenerator g(this);
@@ -837,7 +898,6 @@ void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kX64Add);
}
-
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -880,7 +940,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(-m.right().Value()));
+ g.TempImmediate(base::NegateWithWraparound(m.right().Value())));
} else {
VisitBinop(this, node, kX64Sub32);
}
@@ -904,7 +964,6 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
}
-
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -914,7 +973,6 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
VisitBinop(this, node, kX64Sub, &cont);
}
-
namespace {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
@@ -949,7 +1007,6 @@ void VisitMulHigh(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(right), arraysize(temps), temps);
}
-
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X64OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister(rdx)};
@@ -958,7 +1015,6 @@ void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
-
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X64OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister(rax)};
@@ -969,7 +1025,6 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
} // namespace
-
void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
@@ -1000,47 +1055,38 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64ImulHigh32);
}
-
void InstructionSelector::VisitInt32Div(Node* node) {
VisitDiv(this, node, kX64Idiv32);
}
-
void InstructionSelector::VisitInt64Div(Node* node) {
VisitDiv(this, node, kX64Idiv);
}
-
void InstructionSelector::VisitUint32Div(Node* node) {
VisitDiv(this, node, kX64Udiv32);
}
-
void InstructionSelector::VisitUint64Div(Node* node) {
VisitDiv(this, node, kX64Udiv);
}
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kX64Idiv32);
}
-
void InstructionSelector::VisitInt64Mod(Node* node) {
VisitMod(this, node, kX64Idiv);
}
-
void InstructionSelector::VisitUint32Mod(Node* node) {
VisitMod(this, node, kX64Udiv32);
}
-
void InstructionSelector::VisitUint64Mod(Node* node) {
VisitMod(this, node, kX64Udiv);
}
-
void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64UmulHigh32);
}
@@ -1060,7 +1106,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1076,7 +1121,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1092,7 +1136,6 @@ void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1108,7 +1151,6 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
}
-
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
@@ -1168,6 +1210,7 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kUint32Mod:
case IrOpcode::kUint32MulHigh:
+ case IrOpcode::kTruncateInt64ToInt32:
// These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
// zero-extension is a no-op.
return true;
@@ -1183,6 +1226,7 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
}
}
case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
case IrOpcode::kPoisonedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
@@ -1214,7 +1258,6 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
-
namespace {
void VisitRO(InstructionSelector* selector, Node* node,
@@ -1223,7 +1266,6 @@ void VisitRO(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
-
void VisitRR(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
X64OperandGenerator g(selector);
@@ -1250,7 +1292,6 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
}
-
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
X64OperandGenerator g(selector);
@@ -1275,6 +1316,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
+ V(TruncateFloat64ToInt64, kSSEFloat64ToInt64) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
@@ -1335,6 +1377,9 @@ void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ // We rely on the fact that TruncateInt64ToInt32 zero extends the
+ // value (see ZeroExtendsWord32ToWord64). So all code paths here
+ // have to satisfy that condition.
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
@@ -1343,7 +1388,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().Is(32)) {
- if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
return EmitIdentity(node);
}
Emit(kX64Shr, g.DefineSameAsFirst(node),
@@ -1352,6 +1398,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
break;
}
+ case IrOpcode::kLoad: {
+ if (TryMergeTruncateInt64ToInt32IntoLoad(this, node, value)) {
+ return;
+ }
+ break;
+ }
default:
break;
}
@@ -1363,7 +1415,6 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
-
void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
@@ -1372,17 +1423,14 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
}
-
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
}
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
VisitRRO(this, node, kSSEFloat32Max);
}
@@ -1395,7 +1443,6 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
@@ -1404,12 +1451,10 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
}
-
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(rax)};
@@ -1418,27 +1463,22 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
temps);
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
VisitRRO(this, node, kSSEFloat64Max);
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
VisitRRO(this, node, kSSEFloat64Min);
}
-
void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
UNREACHABLE();
}
-
void InstructionSelector::VisitFloat32Neg(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
}
@@ -1509,7 +1549,7 @@ void InstructionSelector::EmitPrepareArguments(
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.Use(input.node));
+ Emit(kX64Push, g.NoOutput(), g.UseAny(input.node));
}
}
}
@@ -1567,7 +1607,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
Node* left, Node* right, FlagsContinuation* cont,
@@ -1670,10 +1709,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// The 32-bit comparisons automatically truncate Word64
// values to Word32 range, no need to do that explicitly.
if (opcode == kX64Cmp32 || opcode == kX64Test32) {
- while (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
+ selector->CanCover(node, left)) {
left = left->InputAt(0);
}
- while (right->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+
+ if (right->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
+ selector->CanCover(node, right)) {
right = right->InputAt(0);
}
}
@@ -1720,37 +1762,41 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
X64OperandGenerator g(selector);
if (selector->CanUseRootsRegister()) {
- Heap* const heap = selector->isolate()->heap();
+ const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
if (m.right().HasValue() &&
- heap->IsRootHandle(m.right().Value(), &root_index)) {
+ roots_table.IsRootHandle(m.right().Value(), &root_index)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
- g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
+ g.TempImmediate(
+ TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.left().node()), cont);
} else if (m.left().HasValue() &&
- heap->IsRootHandle(m.left().Value(), &root_index)) {
+ roots_table.IsRootHandle(m.left().Value(), &root_index)) {
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
- g.TempImmediate(TurboAssemblerBase::RootRegisterOffset(root_index)),
+ g.TempImmediate(
+ TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.right().node()), cont);
}
}
- StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
- selector->isolate(), node);
- if (m.Matched()) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kX64StackCheck);
- CHECK(cont->IsBranch());
- selector->EmitWithContinuation(opcode, cont);
- return;
+ if (selector->isolate() != nullptr) {
+ StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
+ selector->isolate(), node);
+ if (m.Matched()) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX64StackCheck);
+ CHECK(cont->IsBranch());
+ selector->EmitWithContinuation(opcode, cont);
+ return;
+ }
}
WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
node);
@@ -1816,10 +1862,39 @@ void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node,
break;
}
}
- VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+ if (node->opcode() == IrOpcode::kLoad) {
+ switch (LoadRepresentationOf(node->op()).representation()) {
+ case MachineRepresentation::kWord8:
+ if (opcode == kX64Cmp32) {
+ opcode = kX64Cmp8;
+ } else if (opcode == kX64Test32) {
+ opcode = kX64Test8;
+ }
+ break;
+ case MachineRepresentation::kWord16:
+ if (opcode == kX64Cmp32) {
+ opcode = kX64Cmp16;
+ } else if (opcode == kX64Test32) {
+ opcode = kX64Test16;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if (g.CanBeMemoryOperand(opcode, user, node, effect_level)) {
+ VisitCompareWithMemoryOperand(selector, opcode, node, g.TempImmediate(0),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
+ }
}
-
// Shared routine for multiple float32 compare operations (inputs commuted).
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -1830,7 +1905,6 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, opcode, right, left, cont, false);
}
-
// Shared routine for multiple float64 compare operations (inputs commuted).
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
@@ -2075,7 +2149,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
return EmitBinarySearchSwitch(sw, value_operand);
}
-
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* user = node;
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -2086,33 +2159,28 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-
void InstructionSelector::VisitInt32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-
void InstructionSelector::VisitUint32LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
@@ -2134,7 +2202,6 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2144,7 +2211,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop(this, node, kX64Add32, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -2154,53 +2220,45 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kX64Sub32, &cont);
}
-
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
@@ -2247,7 +2305,6 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
g.UseRegister(left), g.Use(right));
}
-
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
X64OperandGenerator g(this);
Node* left = node->InputAt(0);
@@ -2561,7 +2618,10 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
- V(I16x8ShrU)
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
V(S1x4AnyTrue) \
@@ -2575,7 +2635,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+ Emit(kX64S128Zero, g.DefineAsRegister(node));
}
#define VISIT_SIMD_SPLAT(Type) \
@@ -2683,8 +2743,9 @@ void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
@@ -2699,6 +2760,14 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
+void InstructionSelector::VisitI8x16Mul(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I8x16Mul, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -2707,6 +2776,282 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+namespace {
+
+// Packs a 4 lane shuffle into a single imm8 suitable for use by pshufd,
+// pshuflw, and pshufhw.
+uint8_t PackShuffle4(uint8_t* shuffle) {
+ return (shuffle[0] & 3) | ((shuffle[1] & 3) << 2) | ((shuffle[2] & 3) << 4) |
+ ((shuffle[3] & 3) << 6);
+}
+
+// Gets an 8 bit lane mask suitable for 16x8 pblendw.
+uint8_t PackBlend8(const uint8_t* shuffle16x8) {
+ int8_t result = 0;
+ for (int i = 0; i < 8; ++i) {
+ result |= (shuffle16x8[i] >= 8 ? 1 : 0) << i;
+ }
+ return result;
+}
+
+// Gets an 8 bit lane mask suitable for 32x4 pblendw.
+uint8_t PackBlend4(const uint8_t* shuffle32x4) {
+ int8_t result = 0;
+ for (int i = 0; i < 4; ++i) {
+ result |= (shuffle32x4[i] >= 4 ? 0x3 : 0) << (i * 2);
+ }
+ return result;
+}
+
+// Returns true if shuffle can be decomposed into two 16x4 half shuffles
+// followed by a 16x8 blend.
+// E.g. [3 2 1 0 15 14 13 12].
+bool TryMatch16x8HalfShuffle(uint8_t* shuffle16x8, uint8_t* blend_mask) {
+ *blend_mask = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((shuffle16x8[i] & 0x4) != (i & 0x4)) return false;
+ *blend_mask |= (shuffle16x8[i] > 7 ? 1 : 0) << i;
+ }
+ return true;
+}
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+ bool src0_needs_reg;
+ bool src1_needs_reg;
+};
+
+// Shuffles that map to architecture-specific instruction sequences. These are
+// matched very early, so we shouldn't include shuffles that match better in
+// later tests, like 32x4 and 16x8 shuffles. In general, these patterns should
+// map to either a single instruction, or be finer grained, such as zip/unzip or
+// transpose patterns.
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23},
+ kX64S64x2UnpackLow,
+ true,
+ false},
+ {{8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31},
+ kX64S64x2UnpackHigh,
+ true,
+ false},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kX64S32x4UnpackLow,
+ true,
+ false},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kX64S32x4UnpackHigh,
+ true,
+ false},
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kX64S16x8UnpackLow,
+ true,
+ false},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kX64S16x8UnpackHigh,
+ true,
+ false},
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kX64S8x16UnpackLow,
+ true,
+ false},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kX64S8x16UnpackHigh,
+ true,
+ false},
+
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kX64S16x8UnzipLow,
+ true,
+ false},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kX64S16x8UnzipHigh,
+ true,
+ true},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kX64S8x16UnzipLow,
+ true,
+ true},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kX64S8x16UnzipHigh,
+ true,
+ true},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kX64S8x16TransposeLow,
+ true,
+ true},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kX64S8x16TransposeHigh,
+ true,
+ true},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kX64S8x8Reverse,
+ false,
+ false},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kX64S8x4Reverse,
+ false,
+ false},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kX64S8x2Reverse,
+ true,
+ true}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ const ShuffleEntry** arch_shuffle) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *arch_shuffle = &entry;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+
+ int imm_count = 0;
+ static const int kMaxImms = 6;
+ uint32_t imms[kMaxImms];
+ int temp_count = 0;
+ static const int kMaxTemps = 2;
+ InstructionOperand temps[kMaxTemps];
+
+ X64OperandGenerator g(this);
+ // Swizzles don't generally need DefineSameAsFirst to avoid a move.
+ bool no_same_as_first = is_swizzle;
+ // We generally need UseRegister for input0, Use for input1.
+ bool src0_needs_reg = true;
+ bool src1_needs_reg = false;
+ ArchOpcode opcode = kX64S8x16Shuffle; // general shuffle is the default
+
+ uint8_t offset;
+ uint8_t shuffle32x4[4];
+ uint8_t shuffle16x8[8];
+ int index;
+ const ShuffleEntry* arch_shuffle;
+ if (TryMatchConcat(shuffle, &offset)) {
+ // Swap inputs from the normal order for (v)palignr.
+ SwapShuffleInputs(node);
+ is_swizzle = false; // It's simpler to just handle the general case.
+ no_same_as_first = false; // SSE requires same-as-first.
+ opcode = kX64S8x16Alignr;
+ // palignr takes a single imm8 offset.
+ imms[imm_count++] = offset;
+ } else if (TryMatchArchShuffle(shuffle, arch_shuffles,
+ arraysize(arch_shuffles), is_swizzle,
+ &arch_shuffle)) {
+ opcode = arch_shuffle->opcode;
+ src0_needs_reg = arch_shuffle->src0_needs_reg;
+ // SSE can't take advantage of both operands in registers and needs
+ // same-as-first.
+ src1_needs_reg = false;
+ no_same_as_first = false;
+ } else if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ uint8_t shuffle_mask = PackShuffle4(shuffle32x4);
+ if (is_swizzle) {
+ if (TryMatchIdentity(shuffle)) {
+ // Bypass normal shuffle code generation in this case.
+ EmitIdentity(node);
+ return;
+ } else {
+ // pshufd takes a single imm8 shuffle mask.
+ opcode = kX64S32x4Swizzle;
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ imms[imm_count++] = shuffle_mask;
+ }
+ } else {
+ // 2 operand shuffle
+ // A blend is more efficient than a general 32x4 shuffle; try it first.
+ if (TryMatchBlend(shuffle)) {
+ opcode = kX64S16x8Blend;
+ uint8_t blend_mask = PackBlend4(shuffle32x4);
+ imms[imm_count++] = blend_mask;
+ } else {
+ opcode = kX64S32x4Shuffle;
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ imms[imm_count++] = shuffle_mask;
+ int8_t blend_mask = PackBlend4(shuffle32x4);
+ imms[imm_count++] = blend_mask;
+ }
+ }
+ } else if (TryMatch16x8Shuffle(shuffle, shuffle16x8)) {
+ uint8_t blend_mask;
+ if (TryMatchBlend(shuffle)) {
+ opcode = kX64S16x8Blend;
+ blend_mask = PackBlend8(shuffle16x8);
+ imms[imm_count++] = blend_mask;
+ } else if (TryMatchDup<8>(shuffle, &index)) {
+ opcode = kX64S16x8Dup;
+ src0_needs_reg = false;
+ imms[imm_count++] = index;
+ } else if (TryMatch16x8HalfShuffle(shuffle16x8, &blend_mask)) {
+ opcode = is_swizzle ? kX64S16x8HalfShuffle1 : kX64S16x8HalfShuffle2;
+ // Half-shuffles don't need DefineSameAsFirst or UseRegister(src0).
+ no_same_as_first = true;
+ src0_needs_reg = false;
+ uint8_t mask_lo = PackShuffle4(shuffle16x8);
+ uint8_t mask_hi = PackShuffle4(shuffle16x8 + 4);
+ imms[imm_count++] = mask_lo;
+ imms[imm_count++] = mask_hi;
+ if (!is_swizzle) imms[imm_count++] = blend_mask;
+ }
+ } else if (TryMatchDup<16>(shuffle, &index)) {
+ opcode = kX64S8x16Dup;
+ no_same_as_first = false;
+ src0_needs_reg = true;
+ imms[imm_count++] = index;
+ }
+ if (opcode == kX64S8x16Shuffle) {
+ // Use same-as-first for general swizzle, but not shuffle.
+ no_same_as_first = !is_swizzle;
+ src0_needs_reg = !no_same_as_first;
+ imms[imm_count++] = Pack4Lanes(shuffle);
+ imms[imm_count++] = Pack4Lanes(shuffle + 4);
+ imms[imm_count++] = Pack4Lanes(shuffle + 8);
+ imms[imm_count++] = Pack4Lanes(shuffle + 12);
+ temps[temp_count++] = g.TempRegister();
+ }
+
+ // Use DefineAsRegister(node) and Use(src0) if we can without forcing an extra
+ // move instruction in the CodeGenerator.
+ Node* input0 = node->InputAt(0);
+ InstructionOperand dst =
+ no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ InstructionOperand src0 =
+ src0_needs_reg ? g.UseRegister(input0) : g.Use(input0);
+
+ int input_count = 0;
+ InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
+ inputs[input_count++] = src0;
+ if (!is_swizzle) {
+ Node* input1 = node->InputAt(1);
+ inputs[input_count++] =
+ src1_needs_reg ? g.UseRegister(input1) : g.Use(input1);
+ }
+ for (int i = 0; i < imm_count; ++i) {
+ inputs[input_count++] = g.UseImmediate(imms[i]);
+ }
+ Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.cc
index 94b82ad8cf..0a144d5e68 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/x64/unwinding-info-writer-x64.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/x64/unwinding-info-writer-x64.h"
+#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
@@ -19,28 +19,21 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
- if (initial_state) {
- if (initial_state->register_ != eh_frame_writer_.base_register() &&
- initial_state->offset_ != eh_frame_writer_.base_offset()) {
- eh_frame_writer_.AdvanceLocation(pc_offset);
- eh_frame_writer_.SetBaseAddressRegisterAndOffset(initial_state->register_,
- initial_state->offset_);
- } else if (initial_state->register_ != eh_frame_writer_.base_register()) {
- eh_frame_writer_.AdvanceLocation(pc_offset);
- eh_frame_writer_.SetBaseAddressRegister(initial_state->register_);
- } else if (initial_state->offset_ != eh_frame_writer_.base_offset()) {
- eh_frame_writer_.AdvanceLocation(pc_offset);
- eh_frame_writer_.SetBaseAddressOffset(initial_state->offset_);
- }
+ if (!initial_state) return;
+ if (initial_state->register_ != eh_frame_writer_.base_register() &&
+ initial_state->offset_ != eh_frame_writer_.base_offset()) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(initial_state->register_,
+ initial_state->offset_);
+ } else if (initial_state->register_ != eh_frame_writer_.base_register()) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegister(initial_state->register_);
+ } else if (initial_state->offset_ != eh_frame_writer_.base_offset()) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressOffset(initial_state->offset_);
+ }
tracking_fp_ = initial_state->tracking_fp_;
- } else {
- // The entry block always lacks an explicit initial state.
- // The exit block may lack an explicit state, if it is only reached by
- // the block ending in a ret.
- // All the other blocks must have an explicit initial state.
- DCHECK(block->predecessors().empty() || block->successors().empty());
- }
}
void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h
index e1c6000d4f..f460cbca99 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
+++ b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
-#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
+#ifndef V8_COMPILER_BACKEND_X64_UNWINDING_INFO_WRITER_X64_H_
+#define V8_COMPILER_BACKEND_X64_UNWINDING_INFO_WRITER_X64_H_
#include "src/eh-frame.h"
+#include "src/flags.h"
namespace v8 {
namespace internal {
@@ -76,4 +77,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
+#endif // V8_COMPILER_BACKEND_X64_UNWINDING_INFO_WRITER_X64_H_
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 77f88502c3..803150fdfe 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -41,8 +41,9 @@ static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
// TODO(dcarney): need to mark code as non-serializable.
static const Operator* PointerConstant(CommonOperatorBuilder* common,
intptr_t ptr) {
- return kPointerSize == 8 ? common->Int64Constant(ptr)
- : common->Int32Constant(static_cast<int32_t>(ptr));
+ return kSystemPointerSize == 8
+ ? common->Int64Constant(ptr)
+ : common->Int32Constant(static_cast<int32_t>(ptr));
}
BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 255a4f3926..7da43cc375 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 4405aff207..53868038e7 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -17,6 +17,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/smi.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -514,7 +515,8 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
+ Zone* local_zone, Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
@@ -523,14 +525,13 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
- bytecode_array_(
- handle(shared_info->GetBytecodeArray(), jsgraph->isolate())),
+ bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
- bytecode_array()->parameter_count(),
- bytecode_array()->register_count(), shared_info)),
+ bytecode_array->parameter_count(), bytecode_array->register_count(),
+ shared_info)),
bytecode_iterator_(nullptr),
bytecode_analysis_(nullptr),
environment_(nullptr),
@@ -746,11 +747,11 @@ class BytecodeGraphBuilder::OsrIteratorState {
private:
struct IteratorsStates {
int exception_handler_index_;
- SourcePositionTableIterator::IndexAndPosition source_iterator_state_;
+ SourcePositionTableIterator::IndexAndPositionState source_iterator_state_;
- IteratorsStates(
- int exception_handler_index,
- SourcePositionTableIterator::IndexAndPosition source_iterator_state)
+ IteratorsStates(int exception_handler_index,
+ SourcePositionTableIterator::IndexAndPositionState
+ source_iterator_state)
: exception_handler_index_(exception_handler_index),
source_iterator_state_(source_iterator_state) {}
};
@@ -869,7 +870,7 @@ void BytecodeGraphBuilder::VisitSingleBytecode(
Visit##name(); \
break;
BYTECODE_LIST(BYTECODE_CASE)
-#undef BYTECODE_CODE
+#undef BYTECODE_CASE
}
}
}
@@ -903,9 +904,20 @@ void BytecodeGraphBuilder::VisitBytecodes() {
AdvanceToOsrEntryAndPeelLoops(&iterator, &source_position_iterator);
}
+ bool has_one_shot_bytecode = false;
for (; !iterator.done(); iterator.Advance()) {
+ if (interpreter::Bytecodes::IsOneShotBytecode(
+ iterator.current_bytecode())) {
+ has_one_shot_bytecode = true;
+ }
VisitSingleBytecode(&source_position_iterator);
}
+
+ if (has_one_shot_bytecode) {
+ isolate()->CountUsage(
+ v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
+ }
+
set_bytecode_analysis(nullptr);
set_bytecode_iterator(nullptr);
DCHECK(exception_handlers_.empty());
@@ -1650,8 +1662,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
int number_of_properties = constant_properties->size();
Node* literal = NewNode(javascript()->CreateLiteralObject(
constant_properties, pair, literal_flags, number_of_properties));
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
- literal, Environment::kAttachFrameState);
+ environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
@@ -1681,7 +1692,7 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
FeedbackNexus nexus(feedback_vector(), slot);
Handle<JSArray> cached_value;
- if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::kZero)) {
+ if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) {
// It's not observable when the template object is created, so we
// can just create it eagerly during graph building and bake in
// the JSArray constant here.
@@ -2279,8 +2290,13 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
- return CallFrequency(nexus.ComputeCallFrequency() *
- invocation_frequency_.value());
+ float feedback_frequency = nexus.ComputeCallFrequency();
+ if (feedback_frequency == 0.0f) {
+ // This is to prevent multiplying zero and infinity.
+ return CallFrequency(0.0f);
+ } else {
+ return CallFrequency(feedback_frequency * invocation_frequency_.value());
+ }
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
@@ -3465,7 +3481,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = EnsureInputBufferSize(input_count_with_deps);
- memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count);
Node** current_input = buffer + value_input_count;
if (has_context) {
*current_input++ = OperatorProperties::NeedsExactContext(op)
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 016134ddbb..eaa43c6816 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -29,9 +29,10 @@ class SourcePositionTable;
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(
- Zone* local_zone, Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
- JSGraph* jsgraph, CallFrequency& invocation_frequency,
+ Zone* local_zone, Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared, Handle<FeedbackVector> feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 93e384444e..4b1c211084 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -7,8 +7,8 @@
#include <ostream>
#include "src/code-factory.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
@@ -17,11 +17,11 @@
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
-#include "src/lsan.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
+#include "src/memcopy.h"
#include "src/objects-inl.h"
-#include "src/utils.h"
+#include "src/objects/smi.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -44,7 +44,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
Code::Kind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- uint32_t stub_key, int32_t builtin_index)
+ int32_t builtin_index)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -52,7 +52,7 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
- kind, name, poisoning_level, stub_key, builtin_index) {}
+ kind, name, poisoning_level, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, Code::Kind kind,
@@ -66,13 +66,13 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
(kind == Code::BUILTIN ? CallDescriptor::kPushArgumentCount
: CallDescriptor::kNoFlags) |
CallDescriptor::kCanUseRoots),
- kind, name, poisoning_level, 0, builtin_index) {}
+ kind, name, poisoning_level, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
Code::Kind kind, const char* name,
PoisoningMitigationLevel poisoning_level,
- uint32_t stub_key, int32_t builtin_index)
+ int32_t builtin_index)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
@@ -80,7 +80,6 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
InstructionSelector::AlignmentRequirements(), poisoning_level)),
kind_(kind),
name_(name),
- stub_key_(stub_key),
builtin_index_(builtin_index),
code_generated_(false),
variables_(zone) {}
@@ -97,15 +96,16 @@ CodeAssembler::~CodeAssembler() = default;
void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
raw_assembler_->PrintCurrentBlock(os);
}
+#endif
bool CodeAssemblerState::InsideBlock() { return raw_assembler_->InsideBlock(); }
-#endif
void CodeAssemblerState::SetInitialDebugInformation(const char* msg,
const char* file,
int line) {
#if DEBUG
AssemblerDebugInfo debug_info = {msg, file, line};
+ raw_assembler_->SetSourcePosition(file, line);
raw_assembler_->SetInitialDebugInformation(debug_info);
#endif // DEBUG
}
@@ -173,31 +173,15 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
DCHECK(!state->code_generated_);
RawMachineAssembler* rasm = state->raw_assembler_.get();
- Schedule* schedule = rasm->Export();
- JumpOptimizationInfo jump_opt;
- bool should_optimize_jumps =
- rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
+ Handle<Code> code;
+ Graph* graph = rasm->ExportForOptimization();
- Handle<Code> code =
- Pipeline::GenerateCodeForCodeStub(
- rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_level(),
- options)
- .ToHandleChecked();
-
- if (jump_opt.is_optimizable()) {
- jump_opt.set_optimizing();
-
- // Regenerate machine code
- code =
- Pipeline::GenerateCodeForCodeStub(
- rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
- state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- &jump_opt, rasm->poisoning_level(), options)
- .ToHandleChecked();
- }
+ code = Pipeline::GenerateCodeForCodeStub(
+ rasm->isolate(), rasm->call_descriptor(), graph,
+ rasm->source_positions(), state->kind_, state->name_,
+ state->builtin_index_, rasm->poisoning_level(), options)
+ .ToHandleChecked();
state->code_generated_ = true;
return code;
@@ -275,9 +259,9 @@ TNode<Number> CodeAssembler::NumberConstant(double value) {
}
}
-TNode<Smi> CodeAssembler::SmiConstant(Smi* value) {
- return UncheckedCast<Smi>(
- BitcastWordToTaggedSigned(IntPtrConstant(bit_cast<intptr_t>(value))));
+TNode<Smi> CodeAssembler::SmiConstant(Smi value) {
+ return UncheckedCast<Smi>(BitcastWordToTaggedSigned(
+ IntPtrConstant(static_cast<intptr_t>(value.ptr()))));
}
TNode<Smi> CodeAssembler::SmiConstant(int value) {
@@ -296,7 +280,9 @@ TNode<String> CodeAssembler::StringConstant(const char* str) {
}
TNode<Oddball> CodeAssembler::BooleanConstant(bool value) {
- return UncheckedCast<Oddball>(raw_assembler()->BooleanConstant(value));
+ Handle<Object> object = isolate()->factory()->ToBoolean(value);
+ return UncheckedCast<Oddball>(
+ raw_assembler()->HeapConstant(Handle<HeapObject>::cast(object)));
}
TNode<ExternalReference> CodeAssembler::ExternalConstant(
@@ -340,7 +326,7 @@ bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
return m.HasValue();
}
-bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
+bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
node = node->InputAt(0);
}
@@ -349,7 +335,7 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
intptr_t value = m.Value();
// Make sure that the value is actually a smi
CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
- out_value = Smi::cast(bit_cast<Object*>(value));
+ *out_value = Smi(static_cast<Address>(value));
return true;
}
return false;
@@ -434,25 +420,13 @@ void CodeAssembler::Unreachable() {
raw_assembler()->Unreachable();
}
-void CodeAssembler::Comment(const char* format, ...) {
+void CodeAssembler::Comment(std::string str) {
if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, arraysize(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- const int prefix_len = 2;
- int length = builder.position() + 1;
- char* copy = reinterpret_cast<char*>(malloc(length + prefix_len));
- LSAN_IGNORE_OBJECT(copy);
- MemCopy(copy + prefix_len, builder.Finalize(), length);
- copy[0] = ';';
- copy[1] = ' ';
- raw_assembler()->Comment(copy);
+ raw_assembler()->Comment(str);
+}
+
+void CodeAssembler::SetSourcePosition(const char* file, int line) {
+ raw_assembler()->SetSourcePosition(file, line);
}
void CodeAssembler::Bind(Label* label) { return label->Bind(); }
@@ -514,6 +488,23 @@ TNode<WordT> CodeAssembler::IntPtrAdd(SloppyTNode<WordT> left,
return UncheckedCast<WordT>(raw_assembler()->IntPtrAdd(left, right));
}
+TNode<IntPtrT> CodeAssembler::IntPtrDiv(TNode<IntPtrT> left,
+ TNode<IntPtrT> right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_right_constant) {
+ if (is_left_constant) {
+ return IntPtrConstant(left_constant / right_constant);
+ }
+ if (base::bits::IsPowerOfTwo(right_constant)) {
+ return WordSar(left, WhichPowerOf2(right_constant));
+ }
+ }
+ return UncheckedCast<IntPtrT>(raw_assembler()->IntPtrDiv(left, right));
+}
+
TNode<WordT> CodeAssembler::IntPtrSub(SloppyTNode<WordT> left,
SloppyTNode<WordT> right) {
intptr_t left_constant;
@@ -965,8 +956,8 @@ Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
}
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
- if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
- Handle<Object> root = isolate()->heap()->root_handle(root_index);
+ if (RootsTable::IsImmortalImmovable(root_index)) {
+ Handle<Object> root = isolate()->root_handle(root_index);
if (root->IsSmi()) {
return SmiConstant(Smi::cast(*root));
} else {
@@ -977,11 +968,11 @@ TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
// TODO(jgruber): In theory we could generate better code for this by
// letting the macro assembler decide how to load from the roots list. In most
// cases, it would boil down to loading from a fixed kRootRegister offset.
- Node* roots_array_start =
- ExternalConstant(ExternalReference::roots_array_start(isolate()));
- size_t offset = static_cast<size_t>(root_index) * kPointerSize;
- return UncheckedCast<Object>(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(offset)));
+ Node* isolate_root =
+ ExternalConstant(ExternalReference::isolate_root(isolate()));
+ int offset = IsolateData::root_slot_offset(root_index);
+ return UncheckedCast<Object>(
+ Load(MachineType::AnyTagged(), isolate_root, IntPtrConstant(offset)));
}
Node* CodeAssembler::Store(Node* base, Node* value) {
@@ -989,15 +980,21 @@ Node* CodeAssembler::Store(Node* base, Node* value) {
kFullWriteBarrier);
}
-Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
- return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
- value, kFullWriteBarrier);
+void CodeAssembler::OptimizedStoreField(MachineRepresentation rep,
+ TNode<HeapObject> object, int offset,
+ Node* value,
+ WriteBarrierKind write_barrier) {
+ raw_assembler()->OptimizedStoreField(rep, object, offset, value,
+ write_barrier);
+}
+void CodeAssembler::OptimizedStoreMap(TNode<HeapObject> object,
+ TNode<Map> map) {
+ raw_assembler()->OptimizedStoreMap(object, map);
}
-Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
- Node* value) {
+Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
- value, kMapWriteBarrier);
+ value, kFullWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
@@ -1040,11 +1037,11 @@ Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
}
Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
- DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
- Node* roots_array_start =
- ExternalConstant(ExternalReference::roots_array_start(isolate()));
- size_t offset = static_cast<size_t>(root_index) * kPointerSize;
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
+ DCHECK(!RootsTable::IsImmortalImmovable(root_index));
+ Node* isolate_root =
+ ExternalConstant(ExternalReference::isolate_root(isolate()));
+ int offset = IsolateData::root_slot_offset(root_index);
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, isolate_root,
IntPtrConstant(offset), value);
}
@@ -1053,7 +1050,7 @@ Node* CodeAssembler::Retain(Node* value) {
}
Node* CodeAssembler::Projection(int index, Node* value) {
- DCHECK(index < value->op()->ValueOutputCount());
+ DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
}
@@ -1064,6 +1061,8 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
return;
}
+ // No catch handlers should be active if we're using catch labels
+ DCHECK_EQ(state()->exception_handler_labels_.size(), 0);
DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
Label success(this), exception(this, Label::kDeferred);
@@ -1081,6 +1080,38 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Goto(if_exception);
Bind(&success);
+ raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
+}
+
+TNode<HeapObject> CodeAssembler::OptimizedAllocate(TNode<IntPtrT> size,
+ PretenureFlag pretenure) {
+ return UncheckedCast<HeapObject>(
+ raw_assembler()->OptimizedAllocate(size, pretenure));
+}
+
+void CodeAssembler::HandleException(Node* node) {
+ if (state_->exception_handler_labels_.size() == 0) return;
+ CodeAssemblerExceptionHandlerLabel* label =
+ state_->exception_handler_labels_.back();
+
+ if (node->op()->HasProperty(Operator::kNoThrow)) {
+ return;
+ }
+
+ Label success(this), exception(this, Label::kDeferred);
+ success.MergeVariables();
+ exception.MergeVariables();
+
+ raw_assembler()->Continuations(node, success.label_, exception.label_);
+
+ Bind(&exception);
+ const Operator* op = raw_assembler()->common()->IfException();
+ Node* exception_value = raw_assembler()->AddNode(op, node, node);
+ label->AddInputs({UncheckedCast<Object>(exception_value)});
+ Goto(label->plain_label());
+
+ Bind(&success);
+ raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
}
namespace {
@@ -1133,6 +1164,7 @@ TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl(
CallPrologue();
Node* return_value =
raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data());
+ HandleException(return_value);
CallEpilogue();
return UncheckedCast<Object>(return_value);
}
@@ -1168,9 +1200,13 @@ void CodeAssembler::TailCallRuntimeWithCEntryImpl(
raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
-Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
+Node* CodeAssembler::CallStubN(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor,
size_t result_size, int input_count,
Node* const* inputs) {
+ DCHECK(call_mode == StubCallMode::kCallCodeObject ||
+ call_mode == StubCallMode::kCallBuiltinPointer);
+
// implicit nodes are target and optionally context.
int implicit_nodes = descriptor.HasContextParameter() ? 2 : 1;
DCHECK_LE(implicit_nodes, input_count);
@@ -1183,11 +1219,12 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
- Operator::kNoProperties);
+ Operator::kNoProperties, call_mode);
CallPrologue();
Node* return_value =
raw_assembler()->CallN(call_descriptor, input_count, inputs);
+ HandleException(return_value);
CallEpilogue();
return return_value;
}
@@ -1212,10 +1249,14 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
-Node* CodeAssembler::CallStubRImpl(const CallInterfaceDescriptor& descriptor,
- size_t result_size, SloppyTNode<Code> target,
+Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor,
+ size_t result_size, Node* target,
SloppyTNode<Object> context,
std::initializer_list<Node*> args) {
+ DCHECK(call_mode == StubCallMode::kCallCodeObject ||
+ call_mode == StubCallMode::kCallBuiltinPointer);
+
constexpr size_t kMaxNumArgs = 10;
DCHECK_GE(kMaxNumArgs, args.size());
@@ -1226,7 +1267,8 @@ Node* CodeAssembler::CallStubRImpl(const CallInterfaceDescriptor& descriptor,
inputs.Add(context);
}
- return CallStubN(descriptor, result_size, inputs.size(), inputs.data());
+ return CallStubN(call_mode, descriptor, result_size, inputs.size(),
+ inputs.data());
}
Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
@@ -1484,6 +1526,10 @@ Factory* CodeAssembler::factory() const { return isolate()->factory(); }
Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
+bool CodeAssembler::IsExceptionHandlerActive() const {
+ return state_->exception_handler_labels_.size() != 0;
+}
+
RawMachineAssembler* CodeAssembler::raw_assembler() const {
return state_->raw_assembler_.get();
}
@@ -1494,13 +1540,14 @@ RawMachineAssembler* CodeAssembler::raw_assembler() const {
// properly be verified.
class CodeAssemblerVariable::Impl : public ZoneObject {
public:
- explicit Impl(MachineRepresentation rep)
+ explicit Impl(MachineRepresentation rep, CodeAssemblerState::VariableId id)
:
#if DEBUG
debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)),
#endif
value_(nullptr),
- rep_(rep) {
+ rep_(rep),
+ var_id_(id) {
}
#if DEBUG
@@ -1511,13 +1558,25 @@ class CodeAssemblerVariable::Impl : public ZoneObject {
AssemblerDebugInfo debug_info_;
#endif // DEBUG
+ bool operator<(const CodeAssemblerVariable::Impl& other) const {
+ return var_id_ < other.var_id_;
+ }
Node* value_;
MachineRepresentation rep_;
+ CodeAssemblerState::VariableId var_id_;
};
+bool CodeAssemblerVariable::ImplComparator::operator()(
+ const CodeAssemblerVariable::Impl* a,
+ const CodeAssemblerVariable::Impl* b) const {
+ return *a < *b;
+}
+
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+ : impl_(new (assembler->zone())
+ Impl(rep, assembler->state()->NextVariableId())),
+ state_(assembler->state()) {
state_->variables_.insert(impl_);
}
@@ -1532,7 +1591,9 @@ CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
AssemblerDebugInfo debug_info,
MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+ : impl_(new (assembler->zone())
+ Impl(rep, assembler->state()->NextVariableId())),
+ state_(assembler->state()) {
impl_->set_debug_info(debug_info);
state_->variables_.insert(impl_);
}
@@ -1677,6 +1738,7 @@ void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
<< "\n# previous: " << *label_->block();
FATAL("%s", str.str().c_str());
}
+ state_->raw_assembler_->SetSourcePosition(debug_info.file, debug_info.line);
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
}
@@ -1791,21 +1853,79 @@ const std::vector<Node*>& CodeAssemblerParameterizedLabelBase::CreatePhis(
return phi_nodes_;
}
+void CodeAssemblerState::PushExceptionHandler(
+ CodeAssemblerExceptionHandlerLabel* label) {
+ exception_handler_labels_.push_back(label);
+}
+
+void CodeAssemblerState::PopExceptionHandler() {
+ exception_handler_labels_.pop_back();
+}
+
+CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
+ CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label)
+ : has_handler_(label != nullptr),
+ assembler_(assembler),
+ compatibility_label_(nullptr),
+ exception_(nullptr) {
+ if (has_handler_) {
+ assembler_->state()->PushExceptionHandler(label);
+ }
+}
+
+CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
+ CodeAssembler* assembler, CodeAssemblerLabel* label,
+ TypedCodeAssemblerVariable<Object>* exception)
+ : has_handler_(label != nullptr),
+ assembler_(assembler),
+ compatibility_label_(label),
+ exception_(exception) {
+ if (has_handler_) {
+ label_ = base::make_unique<CodeAssemblerExceptionHandlerLabel>(
+ assembler, CodeAssemblerLabel::kDeferred);
+ assembler_->state()->PushExceptionHandler(label_.get());
+ }
+}
+
+CodeAssemblerScopedExceptionHandler::~CodeAssemblerScopedExceptionHandler() {
+ if (has_handler_) {
+ assembler_->state()->PopExceptionHandler();
+ }
+ if (label_ && label_->is_used()) {
+ CodeAssembler::Label skip(assembler_);
+ bool inside_block = assembler_->state()->InsideBlock();
+ if (inside_block) {
+ assembler_->Goto(&skip);
+ }
+ TNode<Object> e;
+ assembler_->Bind(label_.get(), &e);
+ *exception_ = e;
+ assembler_->Goto(compatibility_label_);
+ if (inside_block) {
+ assembler_->Bind(&skip);
+ }
+ }
+}
+
} // namespace compiler
-Smi* CheckObjectType(Object* value, Smi* type, String* location) {
+Address CheckObjectType(Address raw_value, Address raw_type,
+ Address raw_location) {
#ifdef DEBUG
+ Object value(raw_value);
+ Smi type(raw_type);
+ String location = String::cast(Object(raw_location));
const char* expected;
switch (static_cast<ObjectType>(type->value())) {
-#define TYPE_CASE(Name) \
- case ObjectType::k##Name: \
- if (value->Is##Name()) return Smi::FromInt(0); \
- expected = #Name; \
+#define TYPE_CASE(Name) \
+ case ObjectType::k##Name: \
+ if (value->Is##Name()) return Smi::FromInt(0).ptr(); \
+ expected = #Name; \
break;
-#define TYPE_STRUCT_CASE(NAME, Name, name) \
- case ObjectType::k##Name: \
- if (value->Is##Name()) return Smi::FromInt(0); \
- expected = #Name; \
+#define TYPE_STRUCT_CASE(NAME, Name, name) \
+ case ObjectType::k##Name: \
+ if (value->Is##Name()) return Smi::FromInt(0).ptr(); \
+ expected = #Name; \
break;
TYPE_CASE(Object)
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 3a5f06bb95..4f63ea3198 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -18,20 +18,31 @@
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/objects/arguments.h"
#include "src/objects/data-handler.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/oddball.h"
#include "src/runtime/runtime.h"
+#include "src/source-position.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class AsmWasmData;
+class AsyncGeneratorRequest;
+struct AssemblerOptions;
+class BigInt;
class CallInterfaceDescriptor;
class Callable;
class Factory;
class InterpreterData;
class Isolate;
+class JSAsyncFunctionObject;
class JSAsyncGeneratorObject;
class JSCollator;
class JSCollection;
@@ -42,10 +53,15 @@ class JSNumberFormat;
class JSPluralRules;
class JSRegExpStringIterator;
class JSRelativeTimeFormat;
+class JSSegmentIterator;
class JSSegmenter;
class JSV8BreakIterator;
+class JSWeakCell;
class JSWeakCollection;
+class JSWeakFactory;
+class JSWeakFactoryCleanupIterator;
class JSWeakMap;
+class JSWeakRef;
class JSWeakSet;
class MaybeObject;
class PromiseCapability;
@@ -53,7 +69,8 @@ class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
-class TorqueAssembler;
+class WasmDebugInfo;
+class WeakFactoryCleanupJobTask;
class Zone;
template <typename T>
@@ -65,8 +82,8 @@ struct IntegralT : UntaggedT {};
struct WordT : IntegralT {
static const MachineRepresentation kMachineRepresentation =
- (kPointerSize == 4) ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
+ (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
};
struct RawPtrT : WordT {
@@ -224,6 +241,27 @@ struct UnionT {
using Number = UnionT<Smi, HeapNumber>;
using Numeric = UnionT<Number, BigInt>;
+// A pointer to a builtin function, used by Torque's function pointers.
+using BuiltinPtr = Smi;
+
+class int31_t {
+ public:
+ int31_t() : value_(0) {}
+ int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
#define ENUM_ELEMENT(Name) k##Name,
#define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name,
enum class ObjectType {
@@ -241,8 +279,10 @@ class BooleanWrapper;
class CompilationCacheTable;
class Constructor;
class Filler;
+class FunctionTemplateRareData;
class InternalizedString;
class JSArgumentsObject;
+class JSArrayBufferView;
class JSContextExtensionObject;
class JSError;
class JSSloppyArgumentsObject;
@@ -257,6 +297,7 @@ class SymbolWrapper;
class Undetectable;
class UniqueName;
class WasmExceptionObject;
+class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
class WasmMemoryObject;
@@ -290,7 +331,12 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE
-Smi* CheckObjectType(Object* value, Smi* type, String* location);
+// {raw_value} must be a tagged Object.
+// {raw_type} must be a tagged Smi.
+// {raw_location} must be a tagged String.
+// Returns a tagged Smi.
+Address CheckObjectType(Address raw_value, Address raw_type,
+ Address raw_location);
namespace compiler {
@@ -303,6 +349,7 @@ class CodeAssemblerState;
class Node;
class RawMachineAssembler;
class RawMachineLabel;
+class SourcePositionTable;
typedef ZoneVector<CodeAssemblerVariable*> CodeAssemblerVariableList;
@@ -436,6 +483,9 @@ class SloppyTNode : public TNode<T> {
: TNode<T>(other) {}
};
+template <class... Types>
+class CodeAssemblerParameterizedLabel;
+
// This macro alias allows to use PairT<T1, T2> as a macro argument.
#define PAIR_TYPE(T1, T2) PairT<T1, T2>
@@ -697,8 +747,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define TO_STRING_LITERAL(x) STRINGIFY(x)
#define CAST(x) \
Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__))
+#define TORQUE_CAST(x) \
+ ca_.Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__))
#else
#define CAST(x) Cast(x)
+#define TORQUE_CAST(x) ca_.Cast(x)
#endif
#ifdef DEBUG
@@ -716,7 +769,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return Unsigned(IntPtrConstant(bit_cast<intptr_t>(value)));
}
TNode<Number> NumberConstant(double value);
- TNode<Smi> SmiConstant(Smi* value);
+ TNode<Smi> SmiConstant(Smi value);
TNode<Smi> SmiConstant(int value);
template <typename E,
typename = typename std::enable_if<std::is_enum<E>::value>::type>
@@ -744,9 +797,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return value ? Int32TrueConstant() : Int32FalseConstant();
}
+ // TODO(jkummerow): The style guide wants pointers for output parameters.
+ // https://google.github.io/styleguide/cppguide.html#Output_Parameters
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
- bool ToSmiConstant(Node* node, Smi*& out_value);
+ bool ToSmiConstant(Node* node, Smi* out_value);
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
bool IsUndefinedConstant(TNode<Object> node);
@@ -779,7 +834,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void DebugAbort(Node* message);
void DebugBreak();
void Unreachable();
- void Comment(const char* format, ...);
+ void Comment(const char* msg) {
+ if (!FLAG_code_comments) return;
+ Comment(std::string(msg));
+ }
+ void Comment(std::string msg);
+ template <class... Args>
+ void Comment(Args&&... args) {
+ if (!FLAG_code_comments) return;
+ std::ostringstream s;
+ USE((s << std::forward<Args>(args))...);
+ Comment(s.str());
+ }
+
+ void SetSourcePosition(const char* file, int line);
void Bind(Label* label);
#if DEBUG
@@ -791,6 +859,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label);
+ template <class T>
+ TNode<T> Uninitialized() {
+ return {};
+ }
+
+ template <class... T>
+ void Bind(CodeAssemblerParameterizedLabel<T...>* label, TNode<T>*... phis) {
+ Bind(label->plain_label());
+ label->CreatePhis(phis...);
+ }
+ template <class... T, class... Args>
+ void Branch(TNode<BoolT> condition,
+ CodeAssemblerParameterizedLabel<T...>* if_true,
+ CodeAssemblerParameterizedLabel<T...>* if_false, Args... args) {
+ if_true->AddInputs(args...);
+ if_false->AddInputs(args...);
+ Branch(condition, if_true->plain_label(), if_false->plain_label());
+ }
+
+ template <class... T, class... Args>
+ void Goto(CodeAssemblerParameterizedLabel<T...>* label, Args... args) {
+ label->AddInputs(args...);
+ Goto(label->plain_label());
+ }
+
void Branch(TNode<BoolT> condition, const std::function<void()>& true_body,
const std::function<void()>& false_body);
void Branch(TNode<BoolT> condition, Label* true_label,
@@ -831,10 +924,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Store value to raw memory location.
Node* Store(Node* base, Node* value);
Node* Store(Node* base, Node* offset, Node* value);
- Node* StoreWithMapWriteBarrier(Node* base, Node* offset, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
+ // Optimized memory operations that map to Turbofan simplified nodes.
+ TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
+ PretenureFlag pretenure);
+ void OptimizedStoreField(MachineRepresentation rep, TNode<HeapObject> object,
+ int offset, Node* value,
+ WriteBarrierKind write_barrier);
+ void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
@@ -878,6 +977,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return UncheckedCast<IntPtrT>(
WordShr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<IntPtrT> WordSar(TNode<IntPtrT> left, TNode<IntegralT> right) {
+ return UncheckedCast<IntPtrT>(
+ WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
TNode<IntPtrT> WordAnd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
return UncheckedCast<IntPtrT>(
@@ -940,6 +1043,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
+ TNode<IntPtrT> IntPtrDiv(TNode<IntPtrT> left, TNode<IntPtrT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<WordT> IntPtrMul(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<IntPtrT> IntPtrAdd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
@@ -969,6 +1073,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<IntPtrT> WordShr(TNode<IntPtrT> value, int shift) {
return UncheckedCast<IntPtrT>(WordShr(static_cast<Node*>(value), shift));
}
+ TNode<IntPtrT> WordSar(TNode<IntPtrT> value, int shift) {
+ return UncheckedCast<IntPtrT>(WordSar(static_cast<Node*>(value), shift));
+ }
TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift);
TNode<WordT> WordOr(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -1096,19 +1203,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
SloppyTNode<Code> target, SloppyTNode<Object> context,
TArgs... args) {
- return UncheckedCast<T>(CallStubR(descriptor, 1, target, context, args...));
+ return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
+ 1, target, context, args...));
}
template <class... TArgs>
- Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
- SloppyTNode<Code> target, SloppyTNode<Object> context,
+ Node* CallStubR(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor, size_t result_size,
+ SloppyTNode<Object> target, SloppyTNode<Object> context,
TArgs... args) {
- return CallStubRImpl(descriptor, result_size, target, context, {args...});
+ return CallStubRImpl(call_mode, descriptor, result_size, target, context,
+ {args...});
}
- Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
+ Node* CallStubN(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor, size_t result_size,
int input_count, Node* const* inputs);
+ template <class T = Object, class... TArgs>
+ TNode<T> CallBuiltinPointer(const CallInterfaceDescriptor& descriptor,
+ TNode<BuiltinPtr> target, TNode<Object> context,
+ TArgs... args) {
+ return UncheckedCast<T>(CallStubR(StubCallMode::kCallBuiltinPointer,
+ descriptor, 1, target, context, args...));
+ }
+
template <class... TArgs>
void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
TArgs... args) {
@@ -1245,6 +1364,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool UnalignedLoadSupported(MachineRepresentation rep) const;
bool UnalignedStoreSupported(MachineRepresentation rep) const;
+ bool IsExceptionHandlerActive() const;
+
protected:
void RegisterCallGenerationCallbacks(
const CodeAssemblerCallback& call_prologue,
@@ -1257,6 +1378,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsJSFunctionCall() const;
private:
+ void HandleException(Node* result);
+
TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
TNode<Object> context,
std::initializer_list<TNode<Object>> args);
@@ -1282,8 +1405,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
- Node* CallStubRImpl(const CallInterfaceDescriptor& descriptor,
- size_t result_size, SloppyTNode<Code> target,
+ Node* CallStubRImpl(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor,
+ size_t result_size, Node* target,
SloppyTNode<Object> context,
std::initializer_list<Node*> args);
@@ -1328,6 +1452,10 @@ class CodeAssemblerVariable {
friend class CodeAssemblerState;
friend std::ostream& operator<<(std::ostream&, const Impl&);
friend std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&);
+ struct ImplComparator {
+ bool operator()(const CodeAssemblerVariable::Impl* a,
+ const CodeAssemblerVariable::Impl* b) const;
+ };
Impl* impl_;
CodeAssemblerState* state_;
DISALLOW_COPY_AND_ASSIGN(CodeAssemblerVariable);
@@ -1417,10 +1545,14 @@ class CodeAssemblerLabel {
RawMachineLabel* label_;
// Map of variables that need to be merged to their phi nodes (or placeholders
// for those phis).
- std::map<CodeAssemblerVariable::Impl*, Node*> variable_phis_;
+ std::map<CodeAssemblerVariable::Impl*, Node*,
+ CodeAssemblerVariable::ImplComparator>
+ variable_phis_;
// Map of variables to the list of value nodes that have been added from each
// merge path in their order of merging.
- std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
+ std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>,
+ CodeAssemblerVariable::ImplComparator>
+ variable_merges_;
};
class CodeAssemblerParameterizedLabelBase {
@@ -1457,7 +1589,7 @@ class CodeAssemblerParameterizedLabel
: CodeAssemblerParameterizedLabelBase(assembler, kArity, type) {}
private:
- friend class internal::TorqueAssembler;
+ friend class CodeAssembler;
void AddInputs(TNode<Types>... inputs) {
CodeAssemblerParameterizedLabelBase::AddInputs(
@@ -1477,6 +1609,9 @@ class CodeAssemblerParameterizedLabel
}
};
+typedef CodeAssemblerParameterizedLabel<Object>
+ CodeAssemblerExceptionHandlerLabel;
+
class V8_EXPORT_PRIVATE CodeAssemblerState {
public:
// Create with CallStub linkage.
@@ -1485,7 +1620,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, Code::Kind kind,
const char* name, PoisoningMitigationLevel poisoning_level,
- uint32_t stub_key = 0,
int32_t builtin_index = Builtins::kNoBuiltinId);
// Create with JSCall linkage.
@@ -1501,8 +1635,8 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
#if DEBUG
void PrintCurrentBlock(std::ostream& os);
- bool InsideBlock();
#endif // DEBUG
+ bool InsideBlock();
void SetInitialDebugInformation(const char* msg, const char* file, int line);
private:
@@ -1511,26 +1645,65 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
friend class CodeAssemblerVariable;
friend class CodeAssemblerTester;
friend class CodeAssemblerParameterizedLabelBase;
+ friend class CodeAssemblerScopedExceptionHandler;
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Kind kind,
const char* name, PoisoningMitigationLevel poisoning_level,
- uint32_t stub_key, int32_t builtin_index);
+ int32_t builtin_index);
+
+ void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
+ void PopExceptionHandler();
std::unique_ptr<RawMachineAssembler> raw_assembler_;
Code::Kind kind_;
const char* name_;
- uint32_t stub_key_;
int32_t builtin_index_;
bool code_generated_;
- ZoneSet<CodeAssemblerVariable::Impl*> variables_;
+ ZoneSet<CodeAssemblerVariable::Impl*, CodeAssemblerVariable::ImplComparator>
+ variables_;
CodeAssemblerCallback call_prologue_;
CodeAssemblerCallback call_epilogue_;
+ std::vector<CodeAssemblerExceptionHandlerLabel*> exception_handler_labels_;
+ typedef uint32_t VariableId;
+ VariableId next_variable_id_ = 0;
+
+ VariableId NextVariableId() { return next_variable_id_++; }
DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
+class CodeAssemblerScopedExceptionHandler {
+ public:
+ CodeAssemblerScopedExceptionHandler(
+ CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label);
+
+ // Use this constructor for compatability/ports of old CSA code only. New code
+ // should use the CodeAssemblerExceptionHandlerLabel version.
+ CodeAssemblerScopedExceptionHandler(
+ CodeAssembler* assembler, CodeAssemblerLabel* label,
+ TypedCodeAssemblerVariable<Object>* exception);
+
+ ~CodeAssemblerScopedExceptionHandler();
+
+ private:
+ bool has_handler_;
+ CodeAssembler* assembler_;
+ CodeAssemblerLabel* compatibility_label_;
+ std::unique_ptr<CodeAssemblerExceptionHandlerLabel> label_;
+ TypedCodeAssemblerVariable<Object>* exception_;
+};
+
} // namespace compiler
+
+#if defined(V8_HOST_ARCH_32_BIT)
+typedef Smi BInt;
+#elif defined(V8_HOST_ARCH_64_BIT)
+typedef IntPtrT BInt;
+#else
+#error Unknown architecture.
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index fa4ca34468..d9fd5ca013 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -4,8 +4,8 @@
#include "src/compiler/common-node-cache.h"
-#include "src/assembler.h"
#include "src/compiler/node.h"
+#include "src/external-reference.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 16a9096079..57f1866bdb 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -38,13 +38,13 @@ Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
} // namespace
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
- JSHeapBroker* js_heap_broker,
+ JSHeapBroker* broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
Zone* temp_zone)
: AdvancedReducer(editor),
graph_(graph),
- js_heap_broker_(js_heap_broker),
+ broker_(broker),
common_(common),
machine_(machine),
dead_(graph->NewNode(common->Dead())),
@@ -89,10 +89,8 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// not (i.e. true being returned in the false case and vice versa).
if (cond->opcode() == IrOpcode::kBooleanNot ||
(cond->opcode() == IrOpcode::kSelect &&
- DecideCondition(js_heap_broker(), cond->InputAt(1)) ==
- Decision::kFalse &&
- DecideCondition(js_heap_broker(), cond->InputAt(2)) ==
- Decision::kTrue)) {
+ DecideCondition(broker(), cond->InputAt(1)) == Decision::kFalse &&
+ DecideCondition(broker(), cond->InputAt(2)) == Decision::kTrue)) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -114,7 +112,7 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
return Changed(node);
}
- Decision const decision = DecideCondition(js_heap_broker(), cond);
+ Decision const decision = DecideCondition(broker(), cond);
if (decision == Decision::kUnknown) return NoChange();
Node* const control = node->InputAt(1);
for (Node* const use : node->uses()) {
@@ -154,7 +152,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
: common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
return Changed(node);
}
- Decision const decision = DecideCondition(js_heap_broker(), condition);
+ Decision const decision = DecideCondition(broker(), condition);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
@@ -387,7 +385,7 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Node* const vtrue = node->InputAt(1);
Node* const vfalse = node->InputAt(2);
if (vtrue == vfalse) return Replace(vtrue);
- switch (DecideCondition(js_heap_broker(), cond)) {
+ switch (DecideCondition(broker(), cond)) {
case Decision::kTrue:
return Replace(vtrue);
case Decision::kFalse:
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 32b3181b7a..b1d98e0558 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -24,8 +24,7 @@ class Operator;
class V8_EXPORT_PRIVATE CommonOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- CommonOperatorReducer(Editor* editor, Graph* graph,
- JSHeapBroker* js_heap_broker,
+ CommonOperatorReducer(Editor* editor, Graph* graph, JSHeapBroker* broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final = default;
@@ -48,13 +47,13 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
Graph* graph() const { return graph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
Node* dead() const { return dead_; }
Graph* const graph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 9ed6943367..2421d7d43b 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -4,7 +4,6 @@
#include "src/compiler/common-operator.h"
-#include "src/assembler.h"
#include "src/base/lazy-instance.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
@@ -70,7 +69,16 @@ const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
}
BranchHint BranchHintOf(const Operator* const op) {
- return BranchOperatorInfoOf(op).hint;
+ switch (op->opcode()) {
+ case IrOpcode::kBranch:
+ return BranchOperatorInfoOf(op).hint;
+ case IrOpcode::kIfValue:
+ return IfValueParametersOf(op).hint();
+ case IrOpcode::kIfDefault:
+ return OpParameter<BranchHint>(op);
+ default:
+ UNREACHABLE();
+ }
}
int ValueInputCountOfReturn(Operator const* const op) {
@@ -420,16 +428,18 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V8_EXPORT_PRIVATE bool operator==(IfValueParameters const& l,
IfValueParameters const& r) {
- return l.value() == r.value() && r.comparison_order() == r.comparison_order();
+ return l.value() == r.value() &&
+ r.comparison_order() == r.comparison_order() && l.hint() == r.hint();
}
size_t hash_value(IfValueParameters const& p) {
- return base::hash_combine(p.value(), p.comparison_order());
+ return base::hash_combine(p.value(), p.comparison_order(), p.hint());
}
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out,
IfValueParameters const& p) {
- out << p.value() << " (order " << p.comparison_order() << ")";
+ out << p.value() << " (order " << p.comparison_order() << ", hint "
+ << p.hint() << ")";
return out;
}
@@ -445,7 +455,6 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1) \
- V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
@@ -872,11 +881,13 @@ struct CommonOperatorGlobalCache final {
#undef CACHED_STATE_VALUES
};
-static base::LazyInstance<CommonOperatorGlobalCache>::type
- kCommonOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CommonOperatorGlobalCache,
+ GetCommonOperatorGlobalCache);
+}
CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
- : cache_(kCommonOperatorGlobalCache.Get()), zone_(zone) {}
+ : cache_(*GetCommonOperatorGlobalCache()), zone_(zone) {}
#define CACHED(Name, properties, value_input_count, effect_input_count, \
control_input_count, value_output_count, effect_output_count, \
@@ -1043,15 +1054,23 @@ const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
}
const Operator* CommonOperatorBuilder::IfValue(int32_t index,
- int32_t comparison_order) {
- return new (zone()) Operator1<IfValueParameters>( // --
- IrOpcode::kIfValue, Operator::kKontrol, // opcode
- "IfValue", // name
- 0, 0, 1, 0, 0, 1, // counts
- IfValueParameters(index, comparison_order)); // parameter
+ int32_t comparison_order,
+ BranchHint hint) {
+ return new (zone()) Operator1<IfValueParameters>( // --
+ IrOpcode::kIfValue, Operator::kKontrol, // opcode
+ "IfValue", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ IfValueParameters(index, comparison_order, hint)); // parameter
+}
+
+const Operator* CommonOperatorBuilder::IfDefault(BranchHint hint) {
+ return new (zone()) Operator1<BranchHint>( // --
+ IrOpcode::kIfDefault, Operator::kKontrol, // opcode
+ "IfDefault", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ hint); // parameter
}
-
const Operator* CommonOperatorBuilder::Start(int value_output_count) {
return new (zone()) Operator( // --
IrOpcode::kStart, Operator::kFoldable | Operator::kNoThrow, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 609dfc8c1b..c9251e1c42 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -410,15 +410,18 @@ MachineRepresentation DeadValueRepresentationOf(Operator const*)
class IfValueParameters final {
public:
- IfValueParameters(int32_t value, int32_t comparison_order)
- : value_(value), comparison_order_(comparison_order) {}
+ IfValueParameters(int32_t value, int32_t comparison_order,
+ BranchHint hint = BranchHint::kNone)
+ : value_(value), comparison_order_(comparison_order), hint_(hint) {}
int32_t value() const { return value_; }
int32_t comparison_order() const { return comparison_order_; }
+ BranchHint hint() const { return hint_; }
private:
int32_t value_;
int32_t comparison_order_;
+ BranchHint hint_;
};
V8_EXPORT_PRIVATE bool operator==(IfValueParameters const&,
@@ -458,8 +461,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfSuccess();
const Operator* IfException();
const Operator* Switch(size_t control_output_count);
- const Operator* IfValue(int32_t value, int32_t order = 0);
- const Operator* IfDefault();
+ const Operator* IfValue(int32_t value, int32_t order = 0,
+ BranchHint hint = BranchHint::kNone);
+ const Operator* IfDefault(BranchHint hint = BranchHint::kNone);
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
VectorSlotPair const& feedback);
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index d5eb8b54be..4b3e684c51 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -12,12 +12,17 @@ namespace internal {
namespace compiler {
CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
- : zone_(zone), dependencies_(zone) {}
+ : zone_(zone), dependencies_(zone), isolate_(isolate) {}
class CompilationDependencies::Dependency : public ZoneObject {
public:
virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() {}
virtual void Install(const MaybeObjectHandle& code) = 0;
+
+#ifdef DEBUG
+ virtual bool IsPretenureModeDependency() const { return false; }
+#endif
};
class InitialMapDependency final : public CompilationDependencies::Dependency {
@@ -31,15 +36,15 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
}
bool IsValid() const override {
- Handle<JSFunction> function = function_.object<JSFunction>();
+ Handle<JSFunction> function = function_.object();
return function->has_initial_map() &&
- function->initial_map() == *initial_map_.object<Map>();
+ function->initial_map() == *initial_map_.object();
}
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(function_.isolate(), code,
- initial_map_.object<Map>(),
+ initial_map_.object(),
DependentCode::kInitialMapChangedGroup);
}
@@ -62,16 +67,22 @@ class PrototypePropertyDependency final
}
bool IsValid() const override {
- Handle<JSFunction> function = function_.object<JSFunction>();
+ Handle<JSFunction> function = function_.object();
return function->has_prototype_slot() && function->has_prototype() &&
!function->PrototypeRequiresRuntimeLookup() &&
function->prototype() == *prototype_.object();
}
- void Install(const MaybeObjectHandle& code) override {
+ void PrepareInstall() override {
SLOW_DCHECK(IsValid());
- Handle<JSFunction> function = function_.object<JSFunction>();
+ Handle<JSFunction> function = function_.object();
if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
+ }
+
+ void Install(const MaybeObjectHandle& code) override {
+ SLOW_DCHECK(IsValid());
+ Handle<JSFunction> function = function_.object();
+ DCHECK(function->has_initial_map());
Handle<Map> initial_map(function->initial_map(), function_.isolate());
DependentCode::InstallDependency(function_.isolate(), code, initial_map,
DependentCode::kInitialMapChangedGroup);
@@ -88,11 +99,11 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
DCHECK(map_.is_stable());
}
- bool IsValid() const override { return map_.object<Map>()->is_stable(); }
+ bool IsValid() const override { return map_.object()->is_stable(); }
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
DependentCode::kPrototypeCheckGroup);
}
@@ -106,11 +117,11 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
DCHECK(!map_.is_deprecated());
}
- bool IsValid() const override { return !map_.object<Map>()->is_deprecated(); }
+ bool IsValid() const override { return !map_.object()->is_deprecated(); }
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
DependentCode::kTransitionGroup);
}
@@ -129,16 +140,20 @@ class PretenureModeDependency final
}
bool IsValid() const override {
- return mode_ == site_.object<AllocationSite>()->GetPretenureMode();
+ return mode_ == site_.object()->GetPretenureMode();
}
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- site_.isolate(), code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object(),
DependentCode::kAllocationSiteTenuringChangedGroup);
}
+#ifdef DEBUG
+ bool IsPretenureModeDependency() const override { return true; }
+#endif
+
private:
AllocationSiteRef site_;
PretenureFlag mode_;
@@ -149,23 +164,29 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
FieldTypeDependency(const MapRef& owner, int descriptor,
- const ObjectRef& type)
- : owner_(owner), descriptor_(descriptor), type_(type) {
+ const ObjectRef& type, PropertyConstness constness)
+ : owner_(owner),
+ descriptor_(descriptor),
+ type_(type),
+ constness_(constness) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
DCHECK(type_.equals(owner_.GetFieldType(descriptor_)));
+ DCHECK_EQ(constness_, owner_.GetPropertyDetails(descriptor_).constness());
}
bool IsValid() const override {
DisallowHeapAllocation no_heap_allocation;
- Handle<Map> owner = owner_.object<Map>();
- Handle<FieldType> type = type_.object<FieldType>();
- return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
+ Handle<Map> owner = owner_.object();
+ Handle<Object> type = type_.object();
+ return *type == owner->instance_descriptors()->GetFieldType(descriptor_) &&
+ constness_ == owner->instance_descriptors()
+ ->GetDetails(descriptor_)
+ .constness();
}
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(owner_.isolate(), code,
- owner_.object<Map>(),
+ DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
DependentCode::kFieldOwnerGroup);
}
@@ -173,6 +194,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
MapRef owner_;
int descriptor_;
ObjectRef type_;
+ PropertyConstness constness_;
};
class GlobalPropertyDependency final
@@ -188,15 +210,23 @@ class GlobalPropertyDependency final
}
bool IsValid() const override {
- Handle<PropertyCell> cell = cell_.object<PropertyCell>();
+ Handle<PropertyCell> cell = cell_.object();
+ // The dependency is never valid if the cell is 'invalidated'. This is
+ // marked by setting the value to the hole.
+ if (cell->value() == *(cell_.isolate()->factory()->the_hole_value())) {
+ DCHECK(cell->property_details().cell_type() ==
+ PropertyCellType::kInvalidated ||
+ cell->property_details().cell_type() ==
+ PropertyCellType::kUninitialized);
+ return false;
+ }
return type_ == cell->property_details().cell_type() &&
read_only_ == cell->property_details().IsReadOnly();
}
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(cell_.isolate(), code,
- cell_.object<PropertyCell>(),
+ DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -213,14 +243,13 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
}
bool IsValid() const override {
- Handle<PropertyCell> cell = cell_.object<PropertyCell>();
+ Handle<PropertyCell> cell = cell_.object();
return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
}
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(cell_.isolate(), code,
- cell_.object<PropertyCell>(),
+ DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -242,7 +271,7 @@ class ElementsKindDependency final
}
bool IsValid() const override {
- Handle<AllocationSite> site = site_.object<AllocationSite>();
+ Handle<AllocationSite> site = site_.object();
ElementsKind kind = site->PointsToLiteral()
? site->boilerplate()->GetElementsKind()
: site->GetElementsKind();
@@ -252,7 +281,7 @@ class ElementsKindDependency final
void Install(const MaybeObjectHandle& code) override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- site_.isolate(), code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object(),
DependentCode::kAllocationSiteTransitionChangedGroup);
}
@@ -271,17 +300,22 @@ class InitialMapInstanceSizePredictionDependency final
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
// slack tracking result.
- if (!function_.object<JSFunction>()->has_initial_map()) return false;
- int instance_size =
- function_.object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
- function_.isolate());
+ if (!function_.object()->has_initial_map()) return false;
+ int instance_size = function_.object()->ComputeInstanceSizeWithMinSlack(
+ function_.isolate());
return instance_size == instance_size_;
}
+ void PrepareInstall() override {
+ SLOW_DCHECK(IsValid());
+ function_.object()->CompleteInobjectSlackTrackingIfActive();
+ }
+
void Install(const MaybeObjectHandle& code) override {
- DCHECK(IsValid());
- // Finish the slack tracking.
- function_.object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
+ SLOW_DCHECK(IsValid());
+ DCHECK(!function_.object()
+ ->initial_map()
+ ->IsInobjectSlackTrackingInProgress());
}
private:
@@ -331,9 +365,11 @@ void CompilationDependencies::DependOnFieldType(const MapRef& map,
int descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
ObjectRef type = owner.GetFieldType(descriptor);
+ PropertyConstness constness =
+ owner.GetPropertyDetails(descriptor).constness();
DCHECK(type.equals(map.GetFieldType(descriptor)));
- dependencies_.push_front(new (zone_)
- FieldTypeDependency(owner, descriptor, type));
+ dependencies_.push_front(
+ new (zone_) FieldTypeDependency(owner, descriptor, type, constness));
}
void CompilationDependencies::DependOnGlobalProperty(
@@ -367,58 +403,77 @@ bool CompilationDependencies::AreValid() const {
}
bool CompilationDependencies::Commit(Handle<Code> code) {
- // Check validity of all dependencies first, such that we can avoid installing
- // anything when there's already an invalid dependency.
- if (!AreValid()) {
- dependencies_.clear();
- return false;
+ for (auto dep : dependencies_) {
+ if (!dep->IsValid()) {
+ dependencies_.clear();
+ return false;
+ }
+ dep->PrepareInstall();
}
+ DisallowCodeDependencyChange no_dependency_change;
for (auto dep : dependencies_) {
// Check each dependency's validity again right before installing it,
- // because a GC can trigger invalidation for some dependency kinds.
+ // because the first iteration above might have invalidated some
+ // dependencies. For example, PrototypePropertyDependency::PrepareInstall
+ // can call EnsureHasInitialMap, which can invalidate a StableMapDependency
+ // on the prototype object's map.
if (!dep->IsValid()) {
dependencies_.clear();
return false;
}
dep->Install(MaybeObjectHandle::Weak(code));
}
+
+ // It is even possible that a GC during the above installations invalidated
+ // one of the dependencies. However, this should only affect pretenure mode
+ // dependencies, which we assert below. It is safe to return successfully in
+ // these cases, because once the code gets executed it will do a stack check
+ // that triggers its deoptimization.
+ if (FLAG_stress_gc_during_compilation) {
+ isolate_->heap()->PreciseCollectAllGarbage(
+ Heap::kNoGCFlags, GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
+ }
+#ifdef DEBUG
+ for (auto dep : dependencies_) {
+ CHECK_IMPLIES(!dep->IsValid(), dep->IsPretenureModeDependency());
+ }
+#endif
+
dependencies_.clear();
return true;
}
namespace {
+// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(JSHeapBroker* broker,
- CompilationDependencies* deps,
- Handle<Map> map,
- MaybeHandle<JSReceiver> last_prototype) {
- for (PrototypeIterator i(broker->isolate(), map); !i.IsAtEnd(); i.Advance()) {
- Handle<JSReceiver> const current =
- PrototypeIterator::GetCurrent<JSReceiver>(i);
- deps->DependOnStableMap(
- MapRef(broker, handle(current->map(), broker->isolate())));
- Handle<JSReceiver> last;
- if (last_prototype.ToHandle(&last) && last.is_identical_to(current)) {
- break;
- }
+ CompilationDependencies* deps, MapRef map,
+ const JSObjectRef& last_prototype) {
+ while (true) {
+ map.SerializePrototype();
+ JSObjectRef proto = map.prototype().AsJSObject();
+ map = proto.map();
+ deps->DependOnStableMap(map);
+ if (proto.equals(last_prototype)) break;
}
}
} // namespace
void CompilationDependencies::DependOnStablePrototypeChains(
- JSHeapBroker* broker, Handle<Context> native_context,
- std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
- Isolate* isolate = holder->GetIsolate();
+ JSHeapBroker* broker, std::vector<Handle<Map>> const& receiver_maps,
+ const JSObjectRef& holder) {
// Determine actual holder and perform prototype chain checks.
for (auto map : receiver_maps) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context)
- .ToHandle(&constructor)) {
- map = handle(constructor->initial_map(), isolate);
+ MapRef receiver_map(broker, map);
+ if (receiver_map.IsPrimitiveMap()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ base::Optional<JSFunctionRef> constructor =
+ broker->native_context().GetConstructorFunction(receiver_map);
+ if (constructor.has_value()) receiver_map = constructor->initial_map();
}
- DependOnStablePrototypeChain(broker, this, map, holder);
+ DependOnStablePrototypeChain(broker, this, receiver_map, holder);
}
}
@@ -437,7 +492,7 @@ SlackTrackingPrediction::SlackTrackingPrediction(MapRef initial_map,
int instance_size)
: instance_size_(instance_size),
inobject_property_count_(
- (instance_size >> kPointerSizeLog2) -
+ (instance_size >> kTaggedSizeLog2) -
initial_map.GetInObjectPropertiesStartInWords()) {}
SlackTrackingPrediction
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 5d4cd221df..1a6760f867 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -67,10 +67,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Depend on the stability of (the maps of) all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
- // TODO(neis): Fully brokerize!
void DependOnStablePrototypeChains(
- JSHeapBroker* broker, Handle<Context> native_context,
- std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder);
+ JSHeapBroker* broker, std::vector<Handle<Map>> const& receiver_maps,
+ const JSObjectRef& holder);
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
@@ -92,6 +91,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
private:
Zone* zone_;
ZoneForwardList<Dependency*> dependencies_;
+ Isolate* isolate_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 4508d90b71..c9f838f814 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -12,10 +12,8 @@ namespace internal {
namespace compiler {
ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker) {}
+ JSHeapBroker* broker)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
ConstantFoldingReducer::~ConstantFoldingReducer() = default;
@@ -40,7 +38,7 @@ Reduction ConstantFoldingReducer::Reduce(Node* node) {
replacement = jsgraph()->Constant(upper.AsHeapConstant()->Ref());
} else if (upper.Is(Type::MinusZero())) {
Factory* factory = jsgraph()->isolate()->factory();
- ObjectRef minus_zero(js_heap_broker(), factory->minus_zero_value());
+ ObjectRef minus_zero(broker(), factory->minus_zero_value());
replacement = jsgraph()->Constant(minus_zero);
} else if (upper.Is(Type::NaN())) {
replacement = jsgraph()->NaNConstant();
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index 3fbe5c4c2e..f98ab0595e 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -18,7 +18,7 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker);
+ JSHeapBroker* broker);
~ConstantFoldingReducer() final;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -27,10 +27,10 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
private:
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* const jsgraph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 424db00fc4..2251121c7f 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -65,7 +65,8 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kTerminate:
- return ReduceDeoptimizeOrReturnOrTerminate(node);
+ case IrOpcode::kTailCall:
+ return ReduceDeoptimizeOrReturnOrTerminateOrTailCall(node);
case IrOpcode::kThrow:
return PropagateDeadControl(node);
case IrOpcode::kBranch:
@@ -281,10 +282,12 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
return NoChange();
}
-Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminate(Node* node) {
+Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminateOrTailCall(
+ Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimize ||
node->opcode() == IrOpcode::kReturn ||
- node->opcode() == IrOpcode::kTerminate);
+ node->opcode() == IrOpcode::kTerminate ||
+ node->opcode() == IrOpcode::kTailCall);
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
if (FindDeadInput(node) != nullptr) {
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index a1cab2f0f0..95b9179595 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -56,7 +56,7 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
Reduction ReducePureNode(Node* node);
Reduction ReduceUnreachableOrIfException(Node* node);
Reduction ReduceEffectNode(Node* node);
- Reduction ReduceDeoptimizeOrReturnOrTerminate(Node* node);
+ Reduction ReduceDeoptimizeOrReturnOrTerminateOrTailCall(Node* node);
Reduction ReduceBranchOrSwitch(Node* node);
Reduction RemoveLoopExit(Node* node);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 5f56d080d9..02e5d10574 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -15,6 +15,8 @@
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
#include "src/heap/factory-inl.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/oddball.h"
namespace v8 {
namespace internal {
@@ -386,13 +388,6 @@ void EffectControlLinearizer::Run() {
Node* effect = effect_phi;
if (effect == nullptr) {
// There was no effect phi.
-
- // Since a loop should have at least a StackCheck, only loops in
- // unreachable code can have no effect phi.
- DCHECK_IMPLIES(
- HasIncomingBackEdges(block),
- block_effects.For(block->PredecessorAt(0), block)
- .current_effect->opcode() == IrOpcode::kUnreachable);
if (block == schedule()->start()) {
// Start block => effect is start.
DCHECK_EQ(graph()->start(), control);
@@ -676,9 +671,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
- case IrOpcode::kCheckBounds:
- result = LowerCheckBounds(node, frame_state);
- break;
case IrOpcode::kPoisonIndex:
result = LowerPoisonIndex(node);
break;
@@ -694,6 +686,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckReceiver:
result = LowerCheckReceiver(node, frame_state);
break;
+ case IrOpcode::kCheckReceiverOrNullOrUndefined:
+ result = LowerCheckReceiverOrNullOrUndefined(node, frame_state);
+ break;
case IrOpcode::kCheckSymbol:
result = LowerCheckSymbol(node, frame_state);
break;
@@ -736,12 +731,18 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt64ToTaggedSigned:
result = LowerCheckedInt64ToTaggedSigned(node, frame_state);
break;
+ case IrOpcode::kCheckedUint32Bounds:
+ result = LowerCheckedUint32Bounds(node, frame_state);
+ break;
case IrOpcode::kCheckedUint32ToInt32:
result = LowerCheckedUint32ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToTaggedSigned:
result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
break;
+ case IrOpcode::kCheckedUint64Bounds:
+ result = LowerCheckedUint64Bounds(node, frame_state);
+ break;
case IrOpcode::kCheckedUint64ToInt32:
result = LowerCheckedUint64ToInt32(node, frame_state);
break;
@@ -751,6 +752,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedFloat64ToInt32:
result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
+ case IrOpcode::kCheckedFloat64ToInt64:
+ result = LowerCheckedFloat64ToInt64(node, frame_state);
+ break;
case IrOpcode::kCheckedTaggedSignedToInt32:
if (frame_state == nullptr) {
FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
@@ -761,6 +765,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToInt32:
result = LowerCheckedTaggedToInt32(node, frame_state);
break;
+ case IrOpcode::kCheckedTaggedToInt64:
+ result = LowerCheckedTaggedToInt64(node, frame_state);
+ break;
case IrOpcode::kCheckedTaggedToFloat64:
result = LowerCheckedTaggedToFloat64(node, frame_state);
break;
@@ -1285,9 +1292,9 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
__ Bind(&if_bigint);
{
Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
- Node* length_is_zero = __ WordEqual(
- __ WordAnd(bitfield, __ IntPtrConstant(BigInt::LengthBits::kMask)),
- __ IntPtrConstant(0));
+ Node* length_is_zero = __ Word32Equal(
+ __ Word32And(bitfield, __ Int32Constant(BigInt::LengthBits::kMask)),
+ __ Int32Constant(0));
__ Goto(done, __ Word32Equal(length_is_zero, zero));
}
}
@@ -1427,17 +1434,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
- Node* index = node->InputAt(0);
- Node* limit = node->InputAt(1);
- const CheckParameters& params = CheckParametersOf(node->op());
-
- Node* check = __ Uint32LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
- return index;
-}
-
Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
Node* index = node->InputAt(0);
if (mask_array_index_ == kMaskArrayIndex) {
@@ -1607,6 +1603,29 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
+ Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ // Rule out all primitives except oddballs (true, false, undefined, null).
+ STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
+ value_instance_type);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
+ VectorSlotPair(), check0, frame_state);
+
+ // Rule out booleans.
+ Node* check1 = __ WordEqual(value_map, __ BooleanMapConstant());
+ __ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
+ VectorSlotPair(), check1, frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -2032,6 +2051,18 @@ Node* EffectControlLinearizer::LowerCheckedInt64ToTaggedSigned(
}
}
+Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
+ Node* frame_state) {
+ Node* index = node->InputAt(0);
+ Node* limit = node->InputAt(1);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* check = __ Uint32LessThan(index, limit);
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ return index;
+}
+
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -2052,6 +2083,18 @@ Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
return ChangeUint32ToSmi(value);
}
+Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
+ Node* frame_state) {
+ CheckParameters const& params = CheckParametersOf(node->op());
+ Node* const index = node->InputAt(0);
+ Node* const limit = node->InputAt(1);
+
+ Node* check = __ Uint64LessThan(index, limit);
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ return index;
+}
+
Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -2114,6 +2157,45 @@ Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
frame_state);
}
+Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
+ Node* value64 = __ TruncateFloat64ToInt64(value);
+ Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
+ check_same, frame_state);
+
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Check if {value} is -0.
+ auto if_zero = __ MakeDeferredLabel();
+ auto check_done = __ MakeLabel();
+
+ Node* check_zero = __ Word64Equal(value64, __ Int64Constant(0));
+ __ GotoIf(check_zero, &if_zero);
+ __ Goto(&check_done);
+
+ __ Bind(&if_zero);
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(0));
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
+ frame_state);
+ __ Goto(&check_done);
+
+ __ Bind(&check_done);
+ }
+ return value64;
+}
+
+Node* EffectControlLinearizer::LowerCheckedFloat64ToInt64(Node* node,
+ Node* frame_state) {
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ Node* value = node->InputAt(0);
+ return BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), value,
+ frame_state);
+}
+
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -2154,6 +2236,36 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
+ Node* frame_state) {
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
+ Node* value = node->InputAt(0);
+
+ auto if_not_smi = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord64);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoIfNot(check, &if_not_smi);
+ // In the Smi case, just convert to int64.
+ __ Goto(&done, ChangeSmiToInt64(value));
+
+ // In the non-Smi case, check the heap numberness, load the number and convert
+ // to int64.
+ __ Bind(&if_not_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check_map, frame_state);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), vfalse,
+ frame_state);
+ __ Goto(&done, vfalse);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value,
Node* frame_state) {
@@ -2869,7 +2981,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
Node* frame = __ LoadFramePointer();
Node* parent_frame =
- __ Load(MachineType::AnyTagged(), frame,
+ __ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load(
MachineType::AnyTagged(), parent_frame,
@@ -2946,7 +3058,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
// Compute the effective size of the backing store.
Node* size =
- __ Int32Add(__ Word32Shl(length, __ Int32Constant(kPointerSizeLog2)),
+ __ Int32Add(__ Word32Shl(length, __ Int32Constant(kTaggedSizeLog2)),
__ Int32Constant(FixedArray::kHeaderSize));
// Allocate the result and initialize the header.
@@ -2970,7 +3082,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
// Storing "the_hole" doesn't need a write barrier.
StoreRepresentation rep(MachineRepresentation::kTagged, kNoWriteBarrier);
Node* offset =
- __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
__ Store(rep, result, offset, the_hole);
@@ -3723,6 +3835,11 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
}
Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ ChangeInt32ToInt64(
+ __ Word32Shl(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
+ }
return __ WordShl(value, SmiShiftBitsConstant());
}
@@ -3741,6 +3858,10 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ ChangeInt32ToInt64(__ Word32Shl(value, SmiShiftBitsConstant()));
+ }
return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
}
@@ -3757,20 +3878,32 @@ Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
}
Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
- value = ChangeUint32ToUintPtr(value);
- return __ WordShl(value, SmiShiftBitsConstant());
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ ChangeUint32ToUint64(__ Word32Shl(value, SmiShiftBitsConstant()));
+ } else {
+ return __ WordShl(ChangeUint32ToUintPtr(value), SmiShiftBitsConstant());
+ }
}
Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ ChangeInt32ToInt64(
+ __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
+ }
return __ WordSar(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
- value = ChangeSmiToIntPtr(value);
+ // Do shift on 32bit values if Smis are stored in the lower word.
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant());
+ }
if (machine()->Is64()) {
- value = __ TruncateInt64ToInt32(value);
+ return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));
}
- return value;
+ return ChangeSmiToIntPtr(value);
}
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
@@ -3788,6 +3921,9 @@ Node* EffectControlLinearizer::SmiMaxValueConstant() {
}
Node* EffectControlLinearizer::SmiShiftBitsConstant() {
+ if (machine()->Is64() && SmiValuesAre31Bits()) {
+ return __ Int32Constant(kSmiShiftSize + kSmiTagSize);
+ }
return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
@@ -3994,7 +4130,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
// The field is located in the {object} itself.
{
Node* offset =
- __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2 - 1)),
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2 - 1)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
Node* result = __ Load(MachineType::AnyTagged(), object, offset);
__ Goto(&done, result);
@@ -4008,8 +4144,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
Node* offset =
__ IntAdd(__ WordShl(__ IntSub(zero, index),
- __ IntPtrConstant(kPointerSizeLog2 - 1)),
- __ IntPtrConstant((FixedArray::kHeaderSize - kPointerSize) -
+ __ IntPtrConstant(kTaggedSizeLog2 - 1)),
+ __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
__ Goto(&done, result);
@@ -4031,7 +4167,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
// The field is located in the {object} itself.
{
Node* offset =
- __ IntAdd(__ WordShl(index, __ IntPtrConstant(kPointerSizeLog2)),
+ __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
if (FLAG_unbox_double_fields) {
Node* result = __ Load(MachineType::Float64(), object, offset);
@@ -4049,8 +4185,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ LoadField(AccessBuilder::ForJSObjectPropertiesOrHash(), object);
Node* offset =
__ IntAdd(__ WordShl(__ IntSub(zero, index),
- __ IntPtrConstant(kPointerSizeLog2)),
- __ IntPtrConstant((FixedArray::kHeaderSize - kPointerSize) -
+ __ IntPtrConstant(kTaggedSizeLog2)),
+ __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
@@ -4125,17 +4261,21 @@ Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* storage = node->InputAt(1);
- Node* index = node->InputAt(2);
- Node* is_little_endian = node->InputAt(3);
+ Node* byte_offset = node->InputAt(2);
+ Node* index = node->InputAt(3);
+ Node* is_little_endian = node->InputAt(4);
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
+ // Compute the effective offset.
+ Node* offset = __ IntAdd(byte_offset, index);
+
MachineType const machine_type =
AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- Node* value = __ LoadUnaligned(machine_type, storage, index);
+ Node* value = __ LoadUnaligned(machine_type, storage, offset);
auto big_endian = __ MakeLabel();
auto done = __ MakeLabel(machine_type.representation());
@@ -4166,14 +4306,18 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* storage = node->InputAt(1);
- Node* index = node->InputAt(2);
- Node* value = node->InputAt(3);
- Node* is_little_endian = node->InputAt(4);
+ Node* byte_offset = node->InputAt(2);
+ Node* index = node->InputAt(3);
+ Node* value = node->InputAt(4);
+ Node* is_little_endian = node->InputAt(5);
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
+ // Compute the effective offset.
+ Node* offset = __ IntAdd(byte_offset, index);
+
MachineType const machine_type =
AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
@@ -4199,7 +4343,7 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
}
__ Bind(&done);
- __ StoreUnaligned(machine_type.representation(), storage, index,
+ __ StoreUnaligned(machine_type.representation(), storage, offset,
done.PhiAt(0));
}
@@ -5079,12 +5223,12 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
Node* hash = ChangeUint32ToUintPtr(ComputeUnseededHash(key));
Node* number_of_buckets = ChangeSmiToIntPtr(__ LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets(), table));
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets(), table));
hash = __ WordAnd(hash, __ IntSub(number_of_buckets, __ IntPtrConstant(1)));
Node* first_entry = ChangeSmiToIntPtr(__ Load(
MachineType::TaggedSigned(), table,
- __ IntAdd(__ WordShl(hash, __ IntPtrConstant(kPointerSizeLog2)),
- __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset -
+ __ IntAdd(__ WordShl(hash, __ IntPtrConstant(kTaggedSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
kHeapObjectTag))));
auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
@@ -5102,8 +5246,8 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
Node* candidate_key = __ Load(
MachineType::AnyTagged(), table,
- __ IntAdd(__ WordShl(entry, __ IntPtrConstant(kPointerSizeLog2)),
- __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset -
+ __ IntAdd(__ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
kHeapObjectTag)));
auto if_match = __ MakeLabel();
@@ -5131,9 +5275,9 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
Node* next_entry = ChangeSmiToIntPtr(__ Load(
MachineType::TaggedSigned(), table,
__ IntAdd(
- __ WordShl(entry, __ IntPtrConstant(kPointerSizeLog2)),
- __ IntPtrConstant(OrderedHashMap::kHashTableStartOffset +
- OrderedHashMap::kChainOffset * kPointerSize -
+ __ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
+ __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() +
+ OrderedHashMap::kChainOffset * kTaggedSize -
kHeapObjectTag))));
__ Goto(&loop, next_entry);
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 20c94b3d4f..af3cba6083 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -61,13 +61,13 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerCheckBounds(Node* node, Node* frame_state);
Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
+ Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
@@ -81,13 +81,17 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Bounds(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64Bounds(Node* node, Node* frame_state);
Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedFloat64ToInt64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToInt64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
@@ -183,6 +187,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
const VectorSlotPair& feedback, Node* value,
Node* frame_state);
+ Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
+ Node* frame_state);
Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
const VectorSlotPair& feedback,
Node* value,
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 1434a4b98a..ffe49d1e67 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -192,7 +192,7 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
return ObjectIdNode(vobject);
} else {
std::vector<Node*> inputs;
- for (int offset = 0; offset < vobject->size(); offset += kPointerSize) {
+ for (int offset = 0; offset < vobject->size(); offset += kTaggedSize) {
Node* field =
analysis_result().GetVirtualObjectField(vobject, offset, effect);
CHECK_NOT_NULL(field);
@@ -315,10 +315,10 @@ void EscapeAnalysisReducer::Finalize() {
ElementAccess stack_access;
stack_access.base_is_tagged = BaseTaggedness::kUntaggedBase;
- // Reduce base address by {kPointerSize} such that (length - index)
+ // Reduce base address by {kSystemPointerSize} such that (length - index)
// resolves to the right position.
stack_access.header_size =
- CommonFrameConstants::kFixedFrameSizeAboveFp - kPointerSize;
+ CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize;
stack_access.type = Type::NonInternal();
stack_access.machine_type = MachineType::AnyTagged();
stack_access.write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
@@ -335,7 +335,7 @@ void EscapeAnalysisReducer::Finalize() {
jsgraph()->simplified()->NumberSubtract(), arguments_length,
index);
NodeProperties::SetType(offset,
- TypeCache::Get().kArgumentsLengthType);
+ TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
NodeProperties::ChangeOp(load, load_stack_op);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 9b1ef8d907..8b7c599891 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -503,7 +503,7 @@ int OffsetOfFieldAccess(const Operator* op) {
int OffsetOfElementAt(ElementAccess const& access, int index) {
DCHECK_GE(index, 0);
DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
+ kTaggedSizeLog2);
return access.header_size +
(index << ElementSizeLog2Of(access.machine_type.representation()));
}
@@ -516,7 +516,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
double max = index_type.Max();
double min = index_type.Min();
int index = static_cast<int>(min);
- if (!(index == min && index == max)) return Nothing<int>();
+ if (index < 0 || index != min || index != max) return Nothing<int>();
return Just(OffsetOfElementAt(ElementAccessOf(op), index));
}
@@ -846,9 +846,9 @@ const VirtualObject* EscapeAnalysisResult::GetVirtualObject(Node* node) {
VirtualObject::VirtualObject(VariableTracker* var_states, VirtualObject::Id id,
int size)
: Dependable(var_states->zone()), id_(id), fields_(var_states->zone()) {
- DCHECK_EQ(0, size % kPointerSize);
+ DCHECK(IsAligned(size, kTaggedSize));
TRACE("Creating VirtualObject id:%d size:%d\n", id, size);
- int num_fields = size / kPointerSize;
+ int num_fields = size / kTaggedSize;
fields_.reserve(num_fields);
for (int i = 0; i < num_fields; ++i) {
fields_.push_back(var_states->NewVariable());
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index c3d4e5978d..9fc992766c 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -121,7 +121,7 @@ class VirtualObject : public Dependable {
typedef ZoneVector<Variable>::const_iterator const_iterator;
VirtualObject(VariableTracker* var_states, Id id, int size);
Maybe<Variable> FieldAt(int offset) const {
- CHECK_EQ(0, offset % kPointerSize);
+ CHECK(IsAligned(offset, kTaggedSize));
CHECK(!HasEscaped());
if (offset >= size()) {
// TODO(tebbi): Reading out-of-bounds can only happen in unreachable
@@ -130,10 +130,10 @@ class VirtualObject : public Dependable {
// once we can handle dead nodes everywhere.
return Nothing<Variable>();
}
- return Just(fields_.at(offset / kPointerSize));
+ return Just(fields_.at(offset / kTaggedSize));
}
Id id() const { return id_; }
- int size() const { return static_cast<int>(kPointerSize * fields_.size()); }
+ int size() const { return static_cast<int>(kTaggedSize * fields_.size()); }
// Escaped might mean that the object escaped to untracked memory or that it
// is used in an operation that requires materialization.
void SetEscaped() { escaped_ = true; }
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index bd210d714d..267da154e1 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -143,8 +143,8 @@ Node* CreateStubBuiltinContinuationFrameState(
std::vector<Node*> actual_parameters;
// Stack parameters first. Depending on {mode}, final parameters are added
// by the deoptimizer and aren't explicitly passed in the frame state.
- int stack_parameter_count = descriptor.GetRegisterParameterCount() -
- DeoptimizerParameterCountFor(mode);
+ int stack_parameter_count =
+ descriptor.GetParameterCount() - DeoptimizerParameterCountFor(mode);
// Reserving space in the vector, except for the case where
// stack_parameter_count is -1.
actual_parameters.reserve(stack_parameter_count >= 0
@@ -168,7 +168,7 @@ Node* CreateStubBuiltinContinuationFrameState(
}
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* jsgraph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ JSGraph* jsgraph, const SharedFunctionInfoRef& shared, Builtins::Name name,
Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode) {
@@ -202,7 +202,8 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
? FrameStateType::kJavaScriptBuiltinContinuationWithCatch
: FrameStateType::kJavaScriptBuiltinContinuation,
name, target, context, &actual_parameters[0],
- static_cast<int>(actual_parameters.size()), outer_frame_state, shared);
+ static_cast<int>(actual_parameters.size()), outer_frame_state,
+ shared.object());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index b12758ac3b..dbe4deeb20 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -17,6 +17,7 @@ namespace compiler {
class JSGraph;
class Node;
+class SharedFunctionInfoRef;
// Flag that describes how to combine the current environment with
// the output of a node to obtain a framestate for lazy bailout.
@@ -155,7 +156,7 @@ Node* CreateStubBuiltinContinuationFrameState(
ContinuationFrameStateMode mode);
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* graph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ JSGraph* graph, const SharedFunctionInfoRef& shared, Builtins::Name name,
Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode);
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index 0b6d7ac193..5313ec3d61 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -5,8 +5,6 @@
#include "src/compiler/frame.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/register-allocator.h"
-#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -21,7 +19,7 @@ Frame::Frame(int fixed_frame_size_in_slots)
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
- int alignment_slots = alignment / kPointerSize;
+ int alignment_slots = alignment / kSystemPointerSize;
// We have to align return slots separately, because they are claimed
// separately on the stack.
int return_delta =
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 94789276bb..87083c7e33 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -110,7 +110,7 @@ class Frame : public ZoneObject {
}
void AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
- int alignment_slots = alignment / kPointerSize;
+ int alignment_slots = alignment / kSystemPointerSize;
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
@@ -126,10 +126,10 @@ class Frame : public ZoneObject {
DCHECK_EQ(frame_slot_count_,
fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
int frame_slot_count_before = frame_slot_count_;
- if (alignment > kPointerSize) {
+ if (alignment > kSystemPointerSize) {
// Slots are pointer sized, so alignment greater than a pointer size
// requires allocating additional slots.
- width += alignment - kPointerSize;
+ width += alignment - kSystemPointerSize;
}
AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
@@ -156,12 +156,13 @@ class Frame : public ZoneObject {
private:
void AllocateAlignedFrameSlots(int width) {
DCHECK_LT(0, width);
- int new_frame_slots = (width + kPointerSize - 1) / kPointerSize;
+ int new_frame_slots = (width + kSystemPointerSize - 1) / kSystemPointerSize;
// Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
// multiple of 16.
- int align_to = (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kPointerSize;
- frame_slot_count_ =
- RoundUp(frame_slot_count_ + new_frame_slots, align_to / kPointerSize);
+ int align_to =
+ (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kSystemPointerSize;
+ frame_slot_count_ = RoundUp(frame_slot_count_ + new_frame_slots,
+ align_to / kSystemPointerSize);
DCHECK_LT(0, frame_slot_count_);
}
@@ -236,7 +237,9 @@ class FrameAccessState : public ZoneObject {
StandardFrameConstants::kFixedSlotCountAboveFp;
return frame_slot_count + sp_delta();
}
- int GetSPToFPOffset() const { return GetSPToFPSlotCount() * kPointerSize; }
+ int GetSPToFPOffset() const {
+ return GetSPToFPSlotCount() * kSystemPointerSize;
+ }
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index de02f941be..cbb250bf5c 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -206,6 +206,12 @@ Node* GraphAssembler::BitcastWordToTagged(Node* value) {
current_effect_, current_control_);
}
+Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastTaggedToWord(), value,
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index fa527a8bb0..45392c068b 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -29,6 +29,7 @@ namespace compiler {
V(ChangeFloat64ToUint32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
+ V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToWord32) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
@@ -90,6 +91,7 @@ namespace compiler {
V(TrueConstant) \
V(FalseConstant) \
V(NullConstant) \
+ V(BooleanMapConstant) \
V(HeapNumberMapConstant) \
V(NoContextConstant) \
V(EmptyStringConstant) \
@@ -215,6 +217,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
+ Node* BitcastTaggedToWord(Node* value);
Node* Allocate(PretenureFlag pretenure, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index cbb7188993..76c5313329 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -8,8 +8,8 @@
#include <sstream>
#include <string>
-#include "src/code-stubs.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/backend/register-allocator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-origin-table.h"
@@ -18,7 +18,6 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/operator.h"
-#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/interpreter/bytecodes.h"
@@ -63,6 +62,30 @@ std::ostream& operator<<(std::ostream& out, const NodeOriginAsJSON& asJSON) {
return out;
}
+class JSONEscaped {
+ public:
+ explicit JSONEscaped(const std::ostringstream& os) : str_(os.str()) {}
+
+ friend std::ostream& operator<<(std::ostream& os, const JSONEscaped& e) {
+ for (char c : e.str_) PipeCharacter(os, c);
+ return os;
+ }
+
+ private:
+ static std::ostream& PipeCharacter(std::ostream& os, char c) {
+ if (c == '"') return os << "\\\"";
+ if (c == '\\') return os << "\\\\";
+ if (c == '\b') return os << "\\b";
+ if (c == '\f') return os << "\\f";
+ if (c == '\n') return os << "\\n";
+ if (c == '\r') return os << "\\r";
+ if (c == '\t') return os << "\\t";
+ return os << c;
+ }
+
+ const std::string str_;
+};
+
void JsonPrintFunctionSource(std::ostream& os, int source_id,
std::unique_ptr<char[]> function_name,
Handle<Script> script, Isolate* isolate,
@@ -76,10 +99,12 @@ void JsonPrintFunctionSource(std::ostream& os, int source_id,
int start = 0;
int end = 0;
if (!script.is_null() && !script->IsUndefined(isolate) && !shared.is_null()) {
- Object* source_name = script->name();
+ Object source_name = script->name();
os << ", \"sourceName\": \"";
if (source_name->IsString()) {
- os << String::cast(source_name)->ToCString().get();
+ std::ostringstream escaped_name;
+ escaped_name << String::cast(source_name)->ToCString().get();
+ os << JSONEscaped(escaped_name);
}
os << "\"";
{
@@ -88,7 +113,8 @@ void JsonPrintFunctionSource(std::ostream& os, int source_id,
end = shared->EndPosition();
os << ", \"sourceText\": \"";
int len = shared->EndPosition() - start;
- String::SubStringRange source(String::cast(script->source()), start, len);
+ SubStringRange source(String::cast(script->source()), no_allocation,
+ start, len);
for (const auto& c : source) {
os << AsEscapedUC16ForJSON(c);
}
@@ -139,7 +165,8 @@ void JsonPrintAllSourceWithPositions(std::ostream& os,
AllowDeferredHandleDereference allow_deference_for_print_code;
os << "\"sources\" : {";
Handle<Script> script =
- (info->shared_info().is_null() || !info->shared_info()->script())
+ (info->shared_info().is_null() ||
+ info->shared_info()->script() == Object())
? Handle<Script>()
: handle(Script::cast(info->shared_info()->script()), isolate);
JsonPrintFunctionSource(os, -1,
@@ -189,9 +216,9 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
bool source_available = false;
if (FLAG_trace_file_names && info->has_shared_info() &&
info->shared_info()->script()->IsScript()) {
- Object* source_name = Script::cast(info->shared_info()->script())->name();
+ Object source_name = Script::cast(info->shared_info()->script())->name();
if (source_name->IsString()) {
- String* str = String::cast(source_name);
+ String str = String::cast(source_name);
if (str->length() > 0) {
SNPrintF(source_file, "%s", str->ToCString().get());
std::replace(source_file.start(),
@@ -238,30 +265,6 @@ static const char* SafeMnemonic(Node* node) {
return node == nullptr ? "null" : node->op()->mnemonic();
}
-class JSONEscaped {
- public:
- explicit JSONEscaped(const std::ostringstream& os) : str_(os.str()) {}
-
- friend std::ostream& operator<<(std::ostream& os, const JSONEscaped& e) {
- for (char c : e.str_) PipeCharacter(os, c);
- return os;
- }
-
- private:
- static std::ostream& PipeCharacter(std::ostream& os, char c) {
- if (c == '"') return os << "\\\"";
- if (c == '\\') return os << "\\\\";
- if (c == '\b') return os << "\\b";
- if (c == '\f') return os << "\\f";
- if (c == '\n') return os << "\\n";
- if (c == '\r') return os << "\\r";
- if (c == '\t') return os << "\\t";
- return os << c;
- }
-
- const std::string str_;
-};
-
class JSONGraphNodeWriter {
public:
JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
@@ -699,9 +702,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {RegisterConfiguration::Default(),
- instructions->InstructionAt(j)};
- os_ << j << " " << printable << " <|@\n";
+ os_ << j << " " << *instructions->InstructionAt(j) << " <|@\n";
}
}
}
@@ -743,17 +744,13 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- const auto config = RegisterConfiguration::Default();
if (op.IsRegister()) {
- os_ << " \"" << config->GetGeneralRegisterName(op.register_code())
- << "\"";
+ os_ << " \"" << Register::from_code(op.register_code()) << "\"";
} else if (op.IsDoubleRegister()) {
- os_ << " \"" << config->GetDoubleRegisterName(op.register_code())
- << "\"";
+ os_ << " \"" << DoubleRegister::from_code(op.register_code()) << "\"";
} else {
DCHECK(op.IsFloatRegister());
- os_ << " \"" << config->GetFloatRegisterName(op.register_code())
- << "\"";
+ os_ << " \"" << FloatRegister::from_code(op.register_code()) << "\"";
}
} else if (range->spilled()) {
const TopLevelLiveRange* top = range->TopLevel();
@@ -774,11 +771,18 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
}
}
- // The toplevel range is always suffixed with :0. Use that as parent.
- os_ << " " << vreg << ":0";
+ // The toplevel range might be a splinter. Pre-resolve those here so that
+ // they have a proper parent.
+ const TopLevelLiveRange* parent = range->TopLevel();
+ if (parent->IsSplinter()) parent = parent->splintered_from();
+ os_ << " " << parent->vreg() << ":" << parent->relative_id();
// TODO(herhut) Find something useful to print for the hint field
- os_ << " unknown";
+ if (range->get_bundle() != nullptr) {
+ os_ << " B" << range->get_bundle()->id();
+ } else {
+ os_ << " unknown";
+ }
for (const UseInterval* interval = range->first_interval();
interval != nullptr; interval = interval->next()) {
@@ -964,7 +968,6 @@ std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled) {
}
std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
- const RegisterConfiguration* conf = o.register_configuration_;
const InstructionOperand* op = o.op_;
const InstructionSequence* code = o.code_;
os << "{";
@@ -983,13 +986,12 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
break;
case UnallocatedOperand::FIXED_REGISTER: {
os << ",\"tooltip\": \"FIXED_REGISTER: "
- << conf->GetGeneralRegisterName(unalloc->fixed_register_index())
- << "\"";
+ << Register::from_code(unalloc->fixed_register_index()) << "\"";
break;
}
case UnallocatedOperand::FIXED_FP_REGISTER: {
os << ",\"tooltip\": \"FIXED_FP_REGISTER: "
- << conf->GetDoubleRegisterName(unalloc->fixed_register_index())
+ << DoubleRegister::from_code(unalloc->fixed_register_index())
<< "\"";
break;
}
@@ -1067,14 +1069,18 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
} else if (op->IsFPStackSlot()) {
os << "fp_stack:" << allocated->index();
} else if (op->IsRegister()) {
- os << conf->GetGeneralOrSpecialRegisterName(allocated->register_code());
+ if (allocated->register_code() < Register::kNumRegisters) {
+ os << Register::from_code(allocated->register_code());
+ } else {
+ os << Register::GetSpecialRegisterName(allocated->register_code());
+ }
} else if (op->IsDoubleRegister()) {
- os << conf->GetDoubleRegisterName(allocated->register_code());
+ os << DoubleRegister::from_code(allocated->register_code());
} else if (op->IsFloatRegister()) {
- os << conf->GetFloatRegisterName(allocated->register_code());
+ os << FloatRegister::from_code(allocated->register_code());
} else {
DCHECK(op->IsSimd128Register());
- os << conf->GetSimd128RegisterName(allocated->register_code());
+ os << Simd128Register::from_code(allocated->register_code());
}
os << "\",";
os << "\"tooltip\": \""
@@ -1088,13 +1094,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
return os;
}
-std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
- const Instruction* instr = i.instr_;
- InstructionOperandAsJSON json_op = {i.register_configuration_, nullptr,
- i.code_};
+std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i_json) {
+ const Instruction* instr = i_json.instr_;
os << "{";
- os << "\"id\": " << i.index_ << ",";
+ os << "\"id\": " << i_json.index_ << ",";
os << "\"opcode\": \"" << ArchOpcodeField::decode(instr->opcode()) << "\",";
os << "\"flags\": \"";
FlagsMode fm = FlagsModeField::decode(instr->opcode());
@@ -1123,10 +1127,9 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
if (move->IsEliminated()) continue;
if (!first) os << ",";
first = false;
- json_op.op_ = &move->destination();
- os << "[" << json_op << ",";
- json_op.op_ = &move->source();
- os << json_op << "]";
+ os << "[" << InstructionOperandAsJSON{&move->destination(), i_json.code_}
+ << "," << InstructionOperandAsJSON{&move->source(), i_json.code_}
+ << "]";
}
os << "]";
}
@@ -1137,8 +1140,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
if (need_comma) os << ",";
need_comma = true;
- json_op.op_ = instr->OutputAt(i);
- os << json_op;
+ os << InstructionOperandAsJSON{instr->OutputAt(i), i_json.code_};
}
os << "],";
@@ -1147,8 +1149,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
for (size_t i = 0; i < instr->InputCount(); i++) {
if (need_comma) os << ",";
need_comma = true;
- json_op.op_ = instr->InputAt(i);
- os << json_op;
+ os << InstructionOperandAsJSON{instr->InputAt(i), i_json.code_};
}
os << "],";
@@ -1157,8 +1158,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i) {
for (size_t i = 0; i < instr->TempCount(); i++) {
if (need_comma) os << ",";
need_comma = true;
- json_op.op_ = instr->TempAt(i);
- os << json_op;
+ os << InstructionOperandAsJSON{instr->TempAt(i), i_json.code_};
}
os << "]";
os << "}";
@@ -1194,7 +1194,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b) {
os << "],";
os << "\"phis\": [";
bool needs_comma = false;
- InstructionOperandAsJSON json_op = {b.register_configuration_, nullptr, code};
+ InstructionOperandAsJSON json_op = {nullptr, code};
for (const PhiInstruction* phi : block->phis()) {
if (needs_comma) os << ",";
needs_comma = true;
@@ -1212,7 +1212,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b) {
os << "],";
os << "\"instructions\": [";
- InstructionAsJSON json_instr = {b.register_configuration_, -1, nullptr, code};
+ InstructionAsJSON json_instr = {-1, nullptr, code};
need_comma = false;
for (int j = block->first_instruction_index();
j <= block->last_instruction_index(); j++) {
@@ -1232,15 +1232,13 @@ std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s) {
const InstructionSequence* code = s.sequence_;
os << "\"blocks\": [";
- InstructionBlockAsJSON json_block = {s.register_configuration_, nullptr,
- code};
bool need_comma = false;
for (int i = 0; i < code->InstructionBlockCount(); i++) {
if (need_comma) os << ",";
need_comma = true;
- json_block.block_ = code->InstructionBlockAt(RpoNumber::FromInt(i));
- os << json_block;
+ os << InstructionBlockAsJSON{
+ code->InstructionBlockAt(RpoNumber::FromInt(i)), code};
}
os << "]";
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 93524d74cd..9e97f22c8a 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -17,7 +17,6 @@ namespace v8 {
namespace internal {
class OptimizedCompilationInfo;
-class RegisterConfiguration;
class SharedFunctionInfo;
class SourcePosition;
namespace compiler {
@@ -157,7 +156,6 @@ std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac);
struct InstructionOperandAsJSON {
- const RegisterConfiguration* register_configuration_;
const InstructionOperand* op_;
const InstructionSequence* code_;
};
@@ -165,7 +163,6 @@ struct InstructionOperandAsJSON {
std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o);
struct InstructionAsJSON {
- const RegisterConfiguration* register_configuration_;
int index_;
const Instruction* instr_;
const InstructionSequence* code_;
@@ -173,7 +170,6 @@ struct InstructionAsJSON {
std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i);
struct InstructionBlockAsJSON {
- const RegisterConfiguration* register_configuration_;
const InstructionBlock* block_;
const InstructionSequence* code_;
};
@@ -181,7 +177,6 @@ struct InstructionBlockAsJSON {
std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b);
struct InstructionSequenceAsJSON {
- const RegisterConfiguration* register_configuration_;
const InstructionSequence* sequence_;
};
std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s);
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 41a5098081..4fd2454e54 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -127,8 +127,7 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
}
void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
- Node* value = node->InputAt(2);
- node->ReplaceInput(2, GetReplacementLow(value));
+ DefaultLowering(node, true);
NodeProperties::ChangeOp(node, op);
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
@@ -887,6 +886,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
MachineType type = AtomicOpType(node->op());
+ DefaultLowering(node, true);
if (type == MachineType::Uint64()) {
NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
ReplaceNodeWithProjections(node);
@@ -942,10 +942,7 @@ void Int64Lowering::LowerNode(Node* node) {
} else {
DCHECK(type == MachineType::Uint32() || type == MachineType::Uint16() ||
type == MachineType::Uint8());
- Node* old_value = node->InputAt(2);
- node->ReplaceInput(2, GetReplacementLow(old_value));
- Node* new_value = node->InputAt(3);
- node->ReplaceInput(3, GetReplacementLow(new_value));
+ DefaultLowering(node, true);
NodeProperties::ChangeOp(node,
machine()->Word32AtomicCompareExchange(type));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 5b04731a64..d16e38a458 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,10 +5,9 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api-inl.h"
-#include "src/builtins/builtins-promise-gen.h"
+#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder.h"
@@ -253,8 +252,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
// We can fold away the Object(x) call if |x| is definitely not a primitive.
- if (NodeProperties::CanBePrimitive(isolate(), value, effect)) {
- if (!NodeProperties::CanBeNullOrUndefined(isolate(), value, effect)) {
+ if (NodeProperties::CanBePrimitive(broker(), value, effect)) {
+ if (!NodeProperties::CanBeNullOrUndefined(broker(), value, effect)) {
// Turn the {node} into a {JSToObject} call if we know that
// the {value} cannot be null or undefined.
NodeProperties::ReplaceValueInputs(node, value);
@@ -295,7 +294,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// If {arguments_list} cannot be null or undefined, we don't need
// to expand this {node} to control-flow.
- if (!NodeProperties::CanBeNullOrUndefined(isolate(), arguments_list,
+ if (!NodeProperties::CanBeNullOrUndefined(broker(), arguments_list,
effect)) {
// Massage the value inputs appropriately.
node->ReplaceInput(0, target);
@@ -408,30 +407,37 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// definitely a constructor or not a constructor.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
- bool const is_constructor = receiver_maps[0]->is_constructor();
- Handle<Object> const prototype(receiver_maps[0]->prototype(), isolate());
- for (Handle<Map> const receiver_map : receiver_maps) {
+ MapRef first_receiver_map(broker(), receiver_maps[0]);
+ bool const is_constructor = first_receiver_map.is_constructor();
+ first_receiver_map.SerializePrototype();
+ ObjectRef const prototype = first_receiver_map.prototype();
+ for (Handle<Map> const map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+
// Check for consistency among the {receiver_maps}.
STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
- if (receiver_map->prototype() != *prototype) return NoChange();
- if (receiver_map->is_constructor() != is_constructor) return NoChange();
- if (receiver_map->instance_type() < FIRST_FUNCTION_TYPE) return NoChange();
+ receiver_map.SerializePrototype();
+ if (!receiver_map.prototype().equals(prototype) ||
+ receiver_map.is_constructor() != is_constructor ||
+ receiver_map.instance_type() < FIRST_FUNCTION_TYPE) {
+ return NoChange();
+ }
// Disallow binding of slow-mode functions. We need to figure out
// whether the length and name property are in the original state.
- if (receiver_map->is_dictionary_map()) return NoChange();
+ if (receiver_map.is_dictionary_map()) return NoChange();
// Check whether the length and name properties are still present
// as AccessorInfo objects. In that case, their values can be
// recomputed even if the actual value of the object changes.
// This mirrors the checks done in builtins-function-gen.cc at
// runtime otherwise.
- Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
- isolate());
+ Handle<DescriptorArray> descriptors(
+ receiver_map.object()->instance_descriptors(), isolate());
if (descriptors->number_of_descriptors() < 2) return NoChange();
if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) !=
ReadOnlyRoots(isolate()).length_string()) {
@@ -451,16 +457,12 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
}
}
- // Setup the map for the resulting JSBoundFunction with the
- // correct instance {prototype}.
- Handle<Map> map(
- is_constructor
- ? native_context()->bound_function_with_constructor_map()
- : native_context()->bound_function_without_constructor_map(),
- isolate());
- if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(isolate(), map, prototype);
- }
+ // Choose the map for the resulting JSBoundFunction (but bail out in case of a
+ // custom prototype).
+ MapRef map = is_constructor
+ ? native_context().bound_function_with_constructor_map()
+ : native_context().bound_function_without_constructor_map();
+ if (!map.prototype().equals(prototype)) return NoChange();
// Make sure we can rely on the {receiver_maps}.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -481,8 +483,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
inputs[2 + arity + 0] = context;
inputs[2 + arity + 1] = effect;
inputs[2 + arity + 2] = control;
- Node* value = effect = graph()->NewNode(
- javascript()->CreateBoundFunction(arity, map), input_count, inputs);
+ Node* value = effect =
+ graph()->NewNode(javascript()->CreateBoundFunction(arity, map.object()),
+ input_count, inputs);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -500,8 +503,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
Node* context;
HeapObjectMatcher m(target);
if (m.HasValue()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- context = jsgraph()->HeapConstant(handle(function->context(), isolate()));
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
@@ -570,18 +573,19 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
// Try to determine the {object} map.
ZoneHandleSet<Map> object_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), object, effect,
- &object_maps);
+ NodeProperties::InferReceiverMaps(broker(), object, effect, &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
- Handle<Map> candidate_map = object_maps[0];
- Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+ MapRef candidate_map(broker(), object_maps[0]);
+ candidate_map.SerializePrototype();
+ ObjectRef candidate_prototype = candidate_map.prototype();
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
- Handle<Map> object_map = object_maps[i];
- if (object_map->IsSpecialReceiverMap() ||
- object_map->has_hidden_prototype() ||
- object_map->prototype() != *candidate_prototype) {
+ MapRef object_map(broker(), object_maps[i]);
+ object_map.SerializePrototype();
+ if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
+ object_map.has_hidden_prototype() ||
+ !object_map.prototype().equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
// with hidden prototypes at this point.
@@ -589,16 +593,15 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
}
// The above check also excludes maps for primitive values, which is
// important because we are not applying [[ToObject]] here as expected.
- DCHECK(!object_map->IsPrimitiveMap() && object_map->IsJSReceiverMap());
+ DCHECK(!object_map.IsPrimitiveMap() && object_map.IsJSReceiverMap());
if (result == NodeProperties::kUnreliableReceiverMaps &&
- !object_map->is_stable()) {
+ !object_map.is_stable()) {
return NoChange();
}
}
if (result == NodeProperties::kUnreliableReceiverMaps) {
for (size_t i = 0; i < object_maps.size(); ++i) {
- dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), object_maps[i]));
+ dependencies()->DependOnStableMap(MapRef(broker(), object_maps[i]));
}
}
Node* value = jsgraph()->Constant(candidate_prototype);
@@ -731,11 +734,12 @@ Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
// the ToObject step of Object.prototype.isPrototypeOf is a no-op).
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSReceiverMap()) return NoChange();
}
// We don't check whether {value} is a proper JSReceiver here explicitly,
@@ -852,10 +856,10 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
{
if_false = efalse = graph()->NewNode(
javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kCalledOnNonObject),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("Reflect.get")),
- context, frame_state, efalse, if_false);
+ jsgraph()->Constant(
+ static_cast<int>(MessageTemplate::kCalledOnNonObject)),
+ jsgraph()->HeapConstant(factory()->ReflectGet_string()), context,
+ frame_state, efalse, if_false);
}
// Otherwise just use the existing GetPropertyStub.
@@ -929,10 +933,10 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
{
if_false = efalse = graph()->NewNode(
javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kCalledOnNonObject),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("Reflect.has")),
- context, frame_state, efalse, if_false);
+ jsgraph()->Constant(
+ static_cast<int>(MessageTemplate::kCalledOnNonObject)),
+ jsgraph()->HeapConstant(factory()->ReflectHas_string()), context,
+ frame_state, efalse, if_false);
}
// Otherwise just use the existing {JSHasProperty} logic.
@@ -973,17 +977,6 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
return Changed(vtrue);
}
-bool CanInlineArrayIteratingBuiltin(Isolate* isolate,
- Handle<Map> receiver_map) {
- if (!receiver_map->prototype()->IsJSArray()) return false;
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- isolate->IsNoElementsProtectorIntact() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype);
-}
-
Node* JSCallReducer::WireInLoopStart(Node* k, Node** control, Node** effect) {
Node* loop = *control =
graph()->NewNode(common()->Loop(2), *control, *control);
@@ -1002,8 +995,50 @@ void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
eloop->ReplaceInput(1, effect);
}
-Reduction JSCallReducer::ReduceArrayForEach(Node* node,
- Handle<SharedFunctionInfo> shared) {
+namespace {
+bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
+ ZoneHandleSet<Map> receiver_maps,
+ ElementsKind* kind_return) {
+ DCHECK_NE(0, receiver_maps.size());
+ *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
+ for (auto receiver_map : receiver_maps) {
+ MapRef map(broker, receiver_map);
+ if (!map.supports_fast_array_iteration() ||
+ !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
+ ZoneHandleSet<Map> receiver_maps,
+ ElementsKind* kind_return,
+ bool builtin_is_push = false) {
+ DCHECK_NE(0, receiver_maps.size());
+ *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
+ for (auto receiver_map : receiver_maps) {
+ MapRef map(broker, receiver_map);
+ if (!map.supports_fast_array_resize()) return false;
+ if (builtin_is_push) {
+ if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) {
+ return false;
+ }
+ } else {
+ // TODO(turbofan): We should also handle fast holey double elements once
+ // we got the hole NaN mess sorted out in TurboFan/V8.
+ if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS ||
+ !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+} // namespace
+
+Reduction JSCallReducer::ReduceArrayForEach(
+ Node* node, const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1026,36 +1061,19 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- // By ensuring that {kind} is object or double, we can be polymorphic
- // on different elements kinds.
- ElementsKind kind = receiver_maps[0]->elements_kind();
- if (IsSmiElementsKind(kind)) {
- kind = FastSmiToObjectElementsKind(kind);
- }
- for (Handle<Map> receiver_map : receiver_maps) {
- ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map)) {
- return NoChange();
- }
- if (!IsFastElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsHoleyElementsKind(next_kind)) {
- kind = GetHoleyElementsKind(kind);
- }
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1180,6 +1198,11 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
control = if_false;
effect = eloop;
+ // Introduce proper LoopExit and LoopExitEffect nodes to mark
+ // {loop} as a candidate for loop peeling (crbug.com/v8/8273).
+ control = graph()->NewNode(common()->LoopExit(), control, loop);
+ effect = graph()->NewNode(common()->LoopExitEffect(), effect, control);
+
// Wire up the branch for the case when IsCallable fails for the callback.
// Since {check_throw} is an unconditional throw, it's impossible to
// return a successful completion. Therefore, we simply connect the successful
@@ -1192,9 +1215,9 @@ Reduction JSCallReducer::ReduceArrayForEach(Node* node,
return Replace(jsgraph()->UndefinedConstant());
}
-Reduction JSCallReducer::ReduceArrayReduce(Node* node,
- ArrayReduceDirection direction,
- Handle<SharedFunctionInfo> shared) {
+Reduction JSCallReducer::ReduceArrayReduce(
+ Node* node, ArrayReduceDirection direction,
+ const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1216,16 +1239,13 @@ Reduction JSCallReducer::ReduceArrayReduce(Node* node,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- ElementsKind kind = receiver_maps[0]->elements_kind();
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
- if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
- return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
std::function<Node*(Node*)> hole_check = [this, kind](Node* element) {
@@ -1240,7 +1260,7 @@ Reduction JSCallReducer::ReduceArrayReduce(Node* node,
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -1470,7 +1490,7 @@ Reduction JSCallReducer::ReduceArrayReduce(Node* node,
}
Reduction JSCallReducer::ReduceArrayMap(Node* node,
- Handle<SharedFunctionInfo> shared) {
+ const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1493,37 +1513,28 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
- // We can handle different maps, as long as their elements kind are the
- // same.
- if (receiver_map->elements_kind() != kind) return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
if (IsHoleyElementsKind(kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
-
- Handle<JSFunction> handle_constructor(
- JSFunction::cast(
- native_context()->GetInitialJSArrayMap(kind)->GetConstructor()),
- isolate());
- Node* array_constructor = jsgraph()->HeapConstant(handle_constructor);
+ PropertyCellRef(broker(), factory()->array_species_protector()));
+ Node* array_constructor = jsgraph()->Constant(
+ native_context().GetInitialJSArrayMap(kind).GetConstructor());
Node* k = jsgraph()->ZeroConstant();
@@ -1643,15 +1654,12 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
// The array {a} should be HOLEY_SMI_ELEMENTS because we'd only come into this
// loop if the input array length is non-zero, and "new Array({x > 0})" always
// produces a HOLEY array.
- Handle<Map> double_map(Map::cast(native_context()->get(
- Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))),
- isolate());
- Handle<Map> fast_map(
- Map::cast(native_context()->get(Context::ArrayMapIndex(HOLEY_ELEMENTS))),
- isolate());
- effect = graph()->NewNode(
- simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
- callback_value, effect, control);
+ MapRef holey_double_map =
+ native_context().GetInitialJSArrayMap(HOLEY_DOUBLE_ELEMENTS);
+ MapRef holey_map = native_context().GetInitialJSArrayMap(HOLEY_ELEMENTS);
+ effect = graph()->NewNode(simplified()->TransitionAndStoreElement(
+ holey_double_map.object(), holey_map.object()),
+ a, k, callback_value, effect, control);
if (IsHoleyElementsKind(kind)) {
Node* after_call_and_store_control = control;
@@ -1682,8 +1690,8 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
return Replace(a);
}
-Reduction JSCallReducer::ReduceArrayFilter(Node* node,
- Handle<SharedFunctionInfo> shared) {
+Reduction JSCallReducer::ReduceArrayFilter(
+ Node* node, const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1705,37 +1713,30 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- const ElementsKind kind = receiver_maps[0]->elements_kind();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
+ }
+
// The output array is packed (filter doesn't visit holes).
const ElementsKind packed_kind = GetPackedElementsKind(kind);
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map)) {
- return NoChange();
- }
- // We can handle different maps, as long as their elements kind are the
- // same.
- if (receiver_map->elements_kind() != kind) return NoChange();
- }
-
if (IsHoleyElementsKind(kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+ PropertyCellRef(broker(), factory()->array_species_protector()));
- Handle<Map> initial_map(
- Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)),
- isolate());
+ MapRef initial_map = native_context().GetInitialJSArrayMap(packed_kind);
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
@@ -1751,16 +1752,15 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
Node* a; // Construct the output array.
{
AllocationBuilder ab(jsgraph(), effect, control);
- ab.Allocate(initial_map->instance_size(), NOT_TENURED, Type::Array());
+ ab.Allocate(initial_map.instance_size(), NOT_TENURED, Type::Array());
ab.Store(AccessBuilder::ForMap(), initial_map);
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
jsgraph()->ZeroConstant());
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- ab.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ for (int i = 0; i < initial_map.GetInObjectProperties(); ++i) {
+ ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
a = effect = ab.Finish();
@@ -1947,7 +1947,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
}
Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
- Handle<SharedFunctionInfo> shared) {
+ const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1988,24 +1988,19 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
- // We can handle different maps, as long as their elements kind are the
- // same.
- if (receiver_map->elements_kind() != kind) return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2140,10 +2135,16 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
Node* if_not_found_value = (variant == ArrayFindVariant::kFind)
? jsgraph()->UndefinedConstant()
: jsgraph()->MinusOneConstant();
- Node* return_value =
+ Node* value =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
if_found_return_value, if_not_found_value, control);
+ // Introduce proper LoopExit/LoopExitEffect/LoopExitValue to mark
+ // {loop} as a candidate for loop peeling (crbug.com/v8/8273).
+ control = graph()->NewNode(common()->LoopExit(), control, loop);
+ effect = graph()->NewNode(common()->LoopExitEffect(), effect, control);
+ value = graph()->NewNode(common()->LoopExitValue(), value, control);
+
// Wire up the branch for the case when IsCallable fails for the callback.
// Since {check_throw} is an unconditional throw, it's impossible to
// return a successful completion. Therefore, we simply connect the successful
@@ -2152,8 +2153,8 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, return_value, effect, control);
- return Replace(return_value);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
@@ -2174,10 +2175,10 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), a, etrue,
if_true);
- DCHECK(TypeCache::Get().kFixedDoubleArrayLengthType.Is(
- TypeCache::Get().kFixedArrayLengthType));
+ DCHECK(TypeCache::Get()->kFixedDoubleArrayLengthType.Is(
+ TypeCache::Get()->kFixedArrayLengthType));
Node* checked_to = etrue = graph()->NewNode(
- common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), to, etrue,
+ common()->TypeGuard(TypeCache::Get()->kFixedArrayLengthType), to, etrue,
if_true);
Node* elements_length = etrue = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
@@ -2226,8 +2227,9 @@ void JSCallReducer::WireInCallbackIsCallableCheck(
*check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
*check_throw = *check_fail = graph()->NewNode(
javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kCalledNonCallable), fncallback,
- context, check_frame_state, effect, *check_fail);
+ jsgraph()->Constant(
+ static_cast<int>(MessageTemplate::kCalledNonCallable)),
+ fncallback, context, check_frame_state, effect, *check_fail);
*control = graph()->NewNode(common()->IfTrue(), check_branch);
}
@@ -2280,7 +2282,7 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
}
Reduction JSCallReducer::ReduceArrayEvery(Node* node,
- Handle<SharedFunctionInfo> shared) {
+ const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2302,30 +2304,25 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
- // We can handle different maps, as long as their elements kind are the
- // same.
- if (receiver_map->elements_kind() != kind) return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
if (IsHoleyElementsKind(kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+ PropertyCellRef(broker(), factory()->array_species_protector()));
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2488,10 +2485,16 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
control = graph()->NewNode(common()->Merge(2), if_false, if_false_callback);
effect =
graph()->NewNode(common()->EffectPhi(2), eloop, efalse_callback, control);
- Node* return_value = graph()->NewNode(
+ Node* value = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2),
jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
+ // Introduce proper LoopExit/LoopExitEffect/LoopExitValue to mark
+ // {loop} as a candidate for loop peeling (crbug.com/v8/8273).
+ control = graph()->NewNode(common()->LoopExit(), control, loop);
+ effect = graph()->NewNode(common()->LoopExitEffect(), effect, control);
+ value = graph()->NewNode(common()->LoopExitValue(), value, control);
+
// Wire up the branch for the case when IsCallable fails for the callback.
// Since {check_throw} is an unconditional throw, it's impossible to
// return a successful completion. Therefore, we simply connect the successful
@@ -2500,8 +2503,8 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, return_value, effect, control);
- return Replace(return_value);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
namespace {
@@ -2562,23 +2565,23 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
return NoChange();
}
- Handle<Map> receiver_map;
- if (!NodeProperties::GetMapWitness(isolate(), node).ToHandle(&receiver_map))
+ Handle<Map> map;
+ if (!NodeProperties::GetMapWitness(broker(), node).ToHandle(&map))
return NoChange();
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.supports_fast_array_iteration()) return NoChange();
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
+ ElementsKind const elements_kind = receiver_map.elements_kind();
+ if (IsHoleyElementsKind(elements_kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
Callable const callable =
search_variant == SearchVariant::kIndexOf
- ? GetCallableForArrayIndexOf(receiver_map->elements_kind(), isolate())
- : GetCallableForArrayIncludes(receiver_map->elements_kind(),
- isolate());
+ ? GetCallableForArrayIndexOf(elements_kind, isolate())
+ : GetCallableForArrayIncludes(elements_kind, isolate());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
@@ -2596,8 +2599,7 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(elements_kind)),
receiver, effect, control);
Node* new_from_index = jsgraph()->ZeroConstant();
if (node->op()->ValueInputCount() >= 4) {
@@ -2627,7 +2629,7 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
}
Reduction JSCallReducer::ReduceArraySome(Node* node,
- Handle<SharedFunctionInfo> shared) {
+ const SharedFunctionInfoRef& shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2649,32 +2651,25 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
: jsgraph()->UndefinedConstant();
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
- const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
- // We can handle different maps, as long as their elements kind are the
- // same.
- if (receiver_map->elements_kind() != kind) return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
if (IsHoleyElementsKind(kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+ PropertyCellRef(broker(), factory()->array_species_protector()));
Node* k = jsgraph()->ZeroConstant();
@@ -2844,10 +2839,16 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
control = graph()->NewNode(common()->Merge(2), if_false, if_true_callback);
effect =
graph()->NewNode(common()->EffectPhi(2), eloop, etrue_callback, control);
- Node* return_value = graph()->NewNode(
+ Node* value = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2),
jsgraph()->FalseConstant(), jsgraph()->TrueConstant(), control);
+ // Introduce proper LoopExit/LoopExitEffect/LoopExitValue to mark
+ // {loop} as a candidate for loop peeling (crbug.com/v8/8273).
+ control = graph()->NewNode(common()->LoopExit(), control, loop);
+ effect = graph()->NewNode(common()->LoopExitEffect(), effect, control);
+ value = graph()->NewNode(common()->LoopExitValue(), value, control);
+
// Wire up the branch for the case when IsCallable fails for the callback.
// Since {check_throw} is an unconditional throw, it's impossible to
// return a successful completion. Therefore, we simply connect the successful
@@ -2856,48 +2857,44 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, return_value, effect, control);
- return Replace(return_value);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
Reduction JSCallReducer::ReduceCallApiFunction(
- Node* node, Handle<SharedFunctionInfo> shared) {
+ Node* node, const SharedFunctionInfoRef& shared) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
- ? jsgraph()->HeapConstant(global_proxy())
- : NodeProperties::GetValueInput(node, 1);
+ Node* receiver =
+ (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
+ ? jsgraph()->Constant(native_context().global_proxy_object())
+ : NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared->function_data()), isolate());
-
- // CallApiCallbackStub expects the target in a register, so we count it out,
- // and counts the receiver as an implicit argument, so we count the receiver
- // out too.
- if (argc > CallApiCallbackStub::kArgMax) return NoChange();
+ FunctionTemplateInfo::cast(shared.object()->function_data()), isolate());
// Infer the {receiver} maps, and check if we can inline the API function
// callback based on those.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- Handle<Map> receiver_map = receiver_maps[i];
- if (!receiver_map->IsJSObjectMap() ||
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSObjectMap() ||
(!function_template_info->accept_any_receiver() &&
- receiver_map->is_access_check_needed())) {
+ receiver_map.is_access_check_needed())) {
return NoChange();
}
// In case of unreliable {receiver} information, the {receiver_maps}
// must all be stable in order to consume the information.
if (result == NodeProperties::kUnreliableReceiverMaps) {
- if (!receiver_map->is_stable()) return NoChange();
+ if (!receiver_map.is_stable()) return NoChange();
}
}
@@ -2918,9 +2915,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// Install stability dependencies for unreliable {receiver_maps}.
if (result == NodeProperties::kUnreliableReceiverMaps) {
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), receiver_maps[i]));
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ dependencies()->DependOnStableMap(receiver_map);
}
}
@@ -2937,7 +2934,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Handle<CallHandlerInfo> call_handler_info(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
Handle<Object> data(call_handler_info->data(), isolate());
- Callable call_api_callback = CodeFactory::CallApiCallback(isolate(), argc);
+ Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), cid,
@@ -2952,13 +2949,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, context);
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
- node->InsertInput(graph()->zone(), 3, holder);
- node->InsertInput(graph()->zone(), 4,
+ node->InsertInput(graph()->zone(), 2,
jsgraph()->ExternalConstant(function_reference));
- node->ReplaceInput(5, receiver);
- node->RemoveInput(6 + argc); // Remove context input.
- node->ReplaceInput(7 + argc, effect); // Update effect input.
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 5, holder);
+ node->ReplaceInput(6, receiver);
+ node->RemoveInput(7 + argc); // Remove context input.
+ node->ReplaceInput(8 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -3018,8 +3016,9 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
FieldAccess const& access = FieldAccessOf(user->op());
if (access.offset == JSArray::kLengthOffset) {
// Ignore uses for arguments#length.
- STATIC_ASSERT(JSArray::kLengthOffset ==
- JSArgumentsObjectWithLength::kLengthOffset);
+ STATIC_ASSERT(
+ static_cast<int>(JSArray::kLengthOffset) ==
+ static_cast<int>(JSArgumentsObjectWithLength::kLengthOffset));
continue;
} else if (access.offset == JSObject::kElementsOffset) {
// Ignore safe uses for arguments#elements.
@@ -3066,10 +3065,15 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int start_index = 0;
- // Determine the formal parameter count;
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- int formal_parameter_count = shared->internal_formal_parameter_count();
+
+ int formal_parameter_count;
+ {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ formal_parameter_count = SharedFunctionInfoRef(broker(), shared)
+ .internal_formal_parameter_count();
+ }
+
if (type == CreateArgumentsType::kMappedArguments) {
// Mapped arguments (sloppy mode) that are aliased can only be handled
// here if there's no side-effect between the {node} and the {arg_array}.
@@ -3090,8 +3094,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// that no one messed with the %ArrayIteratorPrototype%.next method.
if (node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstructWithSpread) {
- dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_iterator_protector()));
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->array_iterator_protector()));
}
// Remove the {arguments_list} input from the {node}.
@@ -3146,10 +3150,10 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
Node* check_branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* check_fail = graph()->NewNode(common()->IfFalse(), check_branch);
- Node* check_throw = check_fail =
- graph()->NewNode(javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kNotConstructor),
- new_target, context, frame_state, effect, check_fail);
+ Node* check_throw = check_fail = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(static_cast<int>(MessageTemplate::kNotConstructor)),
+ new_target, context, frame_state, effect, check_fail);
control = graph()->NewNode(common()->IfTrue(), check_branch);
NodeProperties::ReplaceControlInput(node, control);
@@ -3211,6 +3215,13 @@ bool ShouldUseCallICFeedback(Node* node) {
return true;
}
+base::Optional<HeapObjectRef> GetHeapObjectFeedback(
+ JSHeapBroker* broker, const FeedbackNexus& nexus) {
+ HeapObject object;
+ if (!nexus.GetFeedback()->GetHeapObject(&object)) return base::nullopt;
+ return HeapObjectRef(broker, handle(object, broker->isolate()));
+}
+
} // namespace
Reduction JSCallReducer::ReduceJSCall(Node* node) {
@@ -3225,40 +3236,45 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
- if (m.Value()->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ ObjectRef target_ref = m.Ref(broker());
+ if (target_ref.IsJSFunction()) {
+ JSFunctionRef function = target_ref.AsJSFunction();
+ function.Serialize();
// Don't inline cross native context.
- if (function->native_context() != *native_context()) return NoChange();
-
- return ReduceJSCall(node, handle(function->shared(), isolate()));
- } else if (m.Value()->IsJSBoundFunction()) {
- Handle<JSBoundFunction> function =
- Handle<JSBoundFunction>::cast(m.Value());
- Handle<JSReceiver> bound_target_function(
- function->bound_target_function(), isolate());
- Handle<Object> bound_this(function->bound_this(), isolate());
- Handle<FixedArray> bound_arguments(function->bound_arguments(),
- isolate());
+ if (!function.native_context().equals(native_context())) {
+ return NoChange();
+ }
+
+ return ReduceJSCall(node, function.shared());
+ } else if (target_ref.IsJSBoundFunction()) {
+ JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
+ function.Serialize();
+
+ ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
- (bound_this->IsNullOrUndefined(isolate()))
+ bound_this.IsNullOrUndefined()
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
+
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(bound_target_function), 0);
+ node, jsgraph()->Constant(function.bound_target_function()), 0);
NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
1);
+
// Insert the [[BoundArguments]] for {node}.
- for (int i = 0; i < bound_arguments->length(); ++i) {
- node->InsertInput(
- graph()->zone(), i + 2,
- jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ FixedArrayRef bound_arguments = function.bound_arguments();
+ for (int i = 0; i < bound_arguments.length(); ++i) {
+ node->InsertInput(graph()->zone(), i + 2,
+ jsgraph()->Constant(bound_arguments.get(i)));
arity++;
}
+
NodeProperties::ChangeOp(
node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
convert_mode));
+
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -3275,7 +3291,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// the {target} must have the same native context as the call site.
if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& p = CreateClosureParametersOf(target->op());
- return ReduceJSCall(node, p.shared_info());
+ return ReduceJSCall(node, SharedFunctionInfoRef(broker(), p.shared_info()));
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -3300,7 +3316,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Update the JSCall operator on {node}.
ConvertReceiverMode const convert_mode =
- NodeProperties::CanBeNullOrUndefined(isolate(), bound_this, effect)
+ NodeProperties::CanBeNullOrUndefined(broker(), bound_this, effect)
? ConvertReceiverMode::kAny
: ConvertReceiverMode::kNotNullOrUndefined;
NodeProperties::ChangeOp(
@@ -3324,44 +3340,41 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
- HeapObject* heap_object;
- if (nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object)) {
- Handle<HeapObject> feedback(heap_object, isolate());
- // Check if we want to use CallIC feedback here.
- if (!ShouldUseCallICFeedback(target)) return NoChange();
-
- if (feedback->IsCallable()) {
- Node* target_function = jsgraph()->Constant(feedback);
+ base::Optional<HeapObjectRef> feedback =
+ GetHeapObjectFeedback(broker(), nexus);
+ if (feedback.has_value() && ShouldUseCallICFeedback(target) &&
+ feedback->map().is_callable()) {
+ Node* target_function = jsgraph()->Constant(*feedback);
- // Check that the {target} is still the {target_function}.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
- target_function);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
- effect, control);
+ // Check that the {target} is still the {target_function}.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+ target_function);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
- // Specialize the JSCall node to the {target_function}.
- NodeProperties::ReplaceValueInput(node, target_function, 0);
- NodeProperties::ReplaceEffectInput(node, effect);
+ // Specialize the JSCall node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
- // Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
- }
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
+
return NoChange();
}
Reduction JSCallReducer::ReduceJSCall(Node* node,
- Handle<SharedFunctionInfo> shared) {
+ const SharedFunctionInfoRef& shared) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* target = NodeProperties::GetValueInput(node, 0);
// Do not reduce calls to functions with break points.
- if (shared->HasBreakInfo()) return NoChange();
+ if (shared.HasBreakInfo()) return NoChange();
// Raise a TypeError if the {target} is a "classConstructor".
- if (IsClassConstructor(shared->kind())) {
+ if (IsClassConstructor(shared.kind())) {
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
node, javascript()->CallRuntime(
@@ -3372,7 +3385,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
// Check for known builtin functions.
int builtin_id =
- shared->HasBuiltinId() ? shared->builtin_id() : Builtins::kNoBuiltinId;
+ shared.HasBuiltinId() ? shared.builtin_id() : Builtins::kNoBuiltinId;
switch (builtin_id) {
case Builtins::kArrayConstructor:
return ReduceArrayConstructor(node);
@@ -3642,17 +3655,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kStringIteratorPrototypeNext:
return ReduceStringIteratorPrototypeNext(node);
case Builtins::kStringPrototypeConcat:
- return ReduceStringPrototypeConcat(node, shared);
+ return ReduceStringPrototypeConcat(node);
case Builtins::kTypedArrayPrototypeEntries:
return ReduceArrayIterator(node, IterationKind::kEntries);
case Builtins::kTypedArrayPrototypeKeys:
return ReduceArrayIterator(node, IterationKind::kKeys);
case Builtins::kTypedArrayPrototypeValues:
return ReduceArrayIterator(node, IterationKind::kValues);
- case Builtins::kAsyncFunctionPromiseCreate:
- return ReduceAsyncFunctionPromiseCreate(node);
- case Builtins::kAsyncFunctionPromiseRelease:
- return ReduceAsyncFunctionPromiseRelease(node);
case Builtins::kPromiseInternalConstructor:
return ReducePromiseInternalConstructor(node);
case Builtins::kPromiseInternalReject:
@@ -3665,6 +3674,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReducePromisePrototypeFinally(node);
case Builtins::kPromisePrototypeThen:
return ReducePromisePrototypeThen(node);
+ case Builtins::kPromiseResolveTrampoline:
+ return ReducePromiseResolveTrampoline(node);
case Builtins::kMapPrototypeEntries:
return ReduceCollectionIteration(node, CollectionKind::kMap,
IterationKind::kEntries);
@@ -3702,7 +3713,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
break;
}
- if (!FLAG_runtime_stats && shared->IsApiFunction()) {
+ if (!FLAG_runtime_stats && shared.object()->IsApiFunction()) {
return ReduceCallApiFunction(node, shared);
}
return NoChange();
@@ -3750,19 +3761,16 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
- HeapObject* feedback_object;
- if (nexus.GetFeedback()->GetHeapObjectIfStrong(&feedback_object) &&
- feedback_object->IsAllocationSite()) {
+ base::Optional<HeapObjectRef> feedback =
+ GetHeapObjectFeedback(broker(), nexus);
+ if (feedback.has_value() && feedback->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
// for the resulting arrays. This has to be kept in sync with the
// implementation in Ignition.
- Handle<AllocationSite> site(AllocationSite::cast(feedback_object),
- isolate());
- // Retrieve the Array function from the {node}.
- Node* array_function = jsgraph()->HeapConstant(
- handle(native_context()->array_function(), isolate()));
+ Node* array_function =
+ jsgraph()->Constant(native_context().array_function());
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -3778,40 +3786,42 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
node, NodeProperties::GetValueInput(node, i), i + 1);
}
NodeProperties::ReplaceValueInput(node, array_function, 1);
- NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ NodeProperties::ChangeOp(
+ node, javascript()->CreateArray(
+ arity, feedback->AsAllocationSite().object()));
return Changed(node);
- } else if (nexus.GetFeedback()->GetHeapObjectIfWeak(&feedback_object) &&
- !HeapObjectMatcher(new_target).HasValue()) {
- Handle<HeapObject> object(feedback_object, isolate());
- if (object->IsConstructor()) {
- Node* new_target_feedback = jsgraph()->Constant(object);
-
- // Check that the {new_target} is still the {new_target_feedback}.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- new_target, new_target_feedback);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
- effect, control);
-
- // Specialize the JSConstruct node to the {new_target_feedback}.
- NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
- NodeProperties::ReplaceEffectInput(node, effect);
- if (target == new_target) {
- NodeProperties::ReplaceValueInput(node, new_target_feedback, 0);
- }
+ } else if (feedback.has_value() &&
+ !HeapObjectMatcher(new_target).HasValue() &&
+ feedback->map().is_constructor()) {
+ Node* new_target_feedback = jsgraph()->Constant(*feedback);
+
+ // Check that the {new_target} is still the {new_target_feedback}.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), new_target,
+ new_target_feedback);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
- // Try to further reduce the JSConstruct {node}.
- Reduction const reduction = ReduceJSConstruct(node);
- return reduction.Changed() ? reduction : Changed(node);
+ // Specialize the JSConstruct node to the {new_target_feedback}.
+ NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ if (target == new_target) {
+ NodeProperties::ReplaceValueInput(node, new_target_feedback, 0);
}
+
+ // Try to further reduce the JSConstruct {node}.
+ Reduction const reduction = ReduceJSConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
}
// Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
+ HeapObjectRef target_ref = m.Ref(broker()).AsHeapObject();
+
// Raise a TypeError if the {target} is not a constructor.
- if (!m.Value()->IsConstructor()) {
+ if (!target_ref.map().is_constructor()) {
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(node,
javascript()->CallRuntime(
@@ -3819,31 +3829,33 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return Changed(node);
}
- if (m.Value()->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ if (target_ref.IsJSFunction()) {
+ JSFunctionRef function = target_ref.AsJSFunction();
+ function.Serialize();
// Do not reduce constructors with break points.
- if (function->shared()->HasBreakInfo()) return NoChange();
+ if (function.shared().HasBreakInfo()) return NoChange();
// Don't inline cross native context.
- if (function->native_context() != *native_context()) return NoChange();
+ if (!function.native_context().equals(native_context())) {
+ return NoChange();
+ }
// Check for known builtin functions.
- int builtin_id = function->shared()->HasBuiltinId()
- ? function->shared()->builtin_id()
+ int builtin_id = function.shared().HasBuiltinId()
+ ? function.shared().builtin_id()
: Builtins::kNoBuiltinId;
switch (builtin_id) {
case Builtins::kArrayConstructor: {
// TODO(bmeurer): Deal with Array subclasses here.
- Handle<AllocationSite> site;
// Turn the {node} into a {JSCreateArray} call.
for (int i = arity; i > 0; --i) {
NodeProperties::ReplaceValueInput(
node, NodeProperties::GetValueInput(node, i), i + 1);
}
NodeProperties::ReplaceValueInput(node, new_target, 1);
- NodeProperties::ChangeOp(node,
- javascript()->CreateArray(arity, site));
+ NodeProperties::ChangeOp(
+ node, javascript()->CreateArray(arity, Handle<AllocationSite>()));
return Changed(node);
}
case Builtins::kObjectConstructor: {
@@ -3858,7 +3870,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// the value parameter is ignored, which is only the case if
// the {new_target} and {target} are definitely not identical.
HeapObjectMatcher mnew_target(new_target);
- if (mnew_target.HasValue() && *mnew_target.Value() != *function) {
+ if (mnew_target.HasValue() &&
+ !mnew_target.Ref(broker()).equals(function)) {
// Drop the value inputs.
for (int i = arity; i > 0; --i) node->RemoveInput(i);
NodeProperties::ChangeOp(node, javascript()->Create());
@@ -3869,18 +3882,16 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
case Builtins::kPromiseConstructor:
return ReducePromiseConstructor(node);
case Builtins::kTypedArrayConstructor:
- return ReduceTypedArrayConstructor(
- node, handle(function->shared(), isolate()));
+ return ReduceTypedArrayConstructor(node, function.shared());
default:
break;
}
- } else if (m.Value()->IsJSBoundFunction()) {
- Handle<JSBoundFunction> function =
- Handle<JSBoundFunction>::cast(m.Value());
- Handle<JSReceiver> bound_target_function(
- function->bound_target_function(), isolate());
- Handle<FixedArray> bound_arguments(function->bound_arguments(),
- isolate());
+ } else if (target_ref.IsJSBoundFunction()) {
+ JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
+ function.Serialize();
+
+ ObjectRef bound_target_function = function.bound_target_function();
+ FixedArrayRef bound_arguments = function.bound_arguments();
// Patch {node} to use [[BoundTargetFunction]].
NodeProperties::ReplaceValueInput(
@@ -3898,10 +3909,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
arity + 1);
// Insert the [[BoundArguments]] for {node}.
- for (int i = 0; i < bound_arguments->length(); ++i) {
- node->InsertInput(
- graph()->zone(), i + 1,
- jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ for (int i = 0; i < bound_arguments.length(); ++i) {
+ node->InsertInput(graph()->zone(), i + 1,
+ jsgraph()->Constant(bound_arguments.get(i)));
arity++;
}
@@ -4308,32 +4318,6 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
-namespace {
-
-// TODO(turbofan): This was copied from old compiler, might be too restrictive.
-bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number = descriptors->Search(*length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
-}
-
-// TODO(turbofan): This was copied from old compiler, might be too restrictive.
-bool CanInlineArrayResizeOperation(Isolate* isolate, Handle<Map> receiver_map) {
- if (!receiver_map->prototype()->IsJSArray()) return false;
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
- !IsReadOnlyLengthDescriptor(isolate, receiver_map);
-}
-
-} // namespace
-
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -4342,33 +4326,25 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
return NoChange();
}
- if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
-
int const num_values = node->op()->ValueInputCount() - 2;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Try to determine the {receiver} map(s).
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
- ElementsKind kind = receiver_maps[0]->elements_kind();
-
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
- return NoChange();
- if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
- return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) {
+ return NoChange();
}
- // Install code dependencies on the {receiver} global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4456,34 +4432,24 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
return NoChange();
}
- if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
-
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
- ElementsKind kind = receiver_maps[0]->elements_kind();
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
- return NoChange();
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
- return NoChange();
- if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
- return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
- // Install code dependencies on the {receiver} global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4573,7 +4539,6 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
return NoChange();
}
- if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
@@ -4583,26 +4548,18 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
- ElementsKind kind = receiver_maps[0]->elements_kind();
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayResizeOperation(isolate(), receiver_map))
- return NoChange();
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
- return NoChange();
- if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
- return NoChange();
+ ElementsKind kind;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ return NoChange();
}
- // Install code dependencies on the {receiver} global array protector cell.
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// If the {receiver_maps} information is not reliable, we need
// to check that the {receiver} still has one of these maps.
@@ -4771,51 +4728,60 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
return NoChange();
}
- int arity = static_cast<int>(p.arity() - 2);
- // Here we only optimize for cloning, that is when slice is called
- // without arguments, or with a single argument that is the constant 0.
- if (arity >= 2) return NoChange();
- if (arity == 1) {
- NumberMatcher m(NodeProperties::GetValueInput(node, 2));
- if (!m.HasValue()) return NoChange();
- if (m.Value() != 0) return NoChange();
- }
-
Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Try to determine the {receiver} map.
+ // Optimize for the case where we simply clone the {receiver},
+ // i.e. when the {start} is zero and the {end} is undefined
+ // (meaning it will be set to {receiver}s "length" property).
+ if (!NumberMatcher(start).Is(0) ||
+ !HeapObjectMatcher(end).Is(factory()->undefined_value())) {
+ return NoChange();
+ }
+
+ // Try to determine the {receiver} maps.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- // Ensure that any changes to the Array species constructor cause deopt.
+ // We cannot optimize unless the Array[@@species] lookup chain is intact.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+ // Check that the maps are of JSArray (and more).
+ // TODO(turbofan): Consider adding special case for the common pattern
+ // `slice.call(arguments)`, for example jQuery makes heavy use of that.
bool can_be_holey = false;
- // Check that the maps are of JSArray (and more)
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
- return NoChange();
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.supports_fast_array_iteration()) return NoChange();
- if (IsHoleyElementsKind(receiver_map->elements_kind())) can_be_holey = true;
+ if (IsHoleyElementsKind(receiver_map.elements_kind())) {
+ can_be_holey = true;
+ }
}
+ // Install code dependency on the Array[@@species] protector.
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->array_species_protector()));
+
// Install code dependency on the array protector for holey arrays.
if (can_be_holey) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
- // If we have unreliable maps, we need a map check.
- // This is actually redundant due to how JSNativeContextSpecialization
- // reduces the load of slice, but we do it here nevertheless for consistency
- // and robustness.
+ // If we have unreliable maps, we need a map check, as there might be
+ // side-effects caused by the evaluation of the {node}s parameters.
if (result == NodeProperties::kUnreliableReceiverMaps) {
effect =
graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
@@ -4823,8 +4789,12 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
receiver, effect, control);
}
- Node* context = NodeProperties::GetContextInput(node);
-
+ // TODO(turbofan): We can do even better here, either adding a CloneArray
+ // simplified operator, whose output type indicates that it's an Array,
+ // saving subsequent checks, or yet better, by introducing new operators
+ // CopySmiOrObjectElements / CopyDoubleElements and inlining the JSArray
+ // allocation in here. That way we'd even get escape analysis and scalar
+ // replacement to help in some cases.
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCloneFastJSArray);
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -4876,12 +4846,13 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
// Check if we know that {receiver} is a valid JSReceiver.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!receiver_map->IsJSReceiverMap()) return NoChange();
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSReceiverMap()) return NoChange();
}
// Morph the {node} into a JSCreateArrayIterator with the given {kind}.
@@ -4897,14 +4868,14 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
namespace {
-bool InferIteratedObjectMaps(Isolate* isolate, Node* iterator,
+bool InferIteratedObjectMaps(JSHeapBroker* broker, Node* iterator,
ZoneHandleSet<Map>* iterated_object_maps) {
DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, iterator->opcode());
Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
Node* effect = NodeProperties::GetEffectInput(iterator);
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate, iterated_object, effect,
+ NodeProperties::InferReceiverMaps(broker, iterated_object, effect,
iterated_object_maps);
return result != NodeProperties::kNoReceiverMaps;
}
@@ -4931,40 +4902,37 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Try to infer the [[IteratedObject]] maps from the {iterator}.
ZoneHandleSet<Map> iterated_object_maps;
- if (!InferIteratedObjectMaps(isolate(), iterator, &iterated_object_maps)) {
+ if (!InferIteratedObjectMaps(broker(), iterator, &iterated_object_maps)) {
return NoChange();
}
DCHECK_NE(0, iterated_object_maps.size());
// Check that various {iterated_object_maps} have compatible elements kinds.
- ElementsKind elements_kind = iterated_object_maps[0]->elements_kind();
+ ElementsKind elements_kind =
+ MapRef(broker(), iterated_object_maps[0]).elements_kind();
if (IsFixedTypedArrayElementsKind(elements_kind)) {
// TurboFan doesn't support loading from BigInt typed arrays yet.
if (elements_kind == BIGUINT64_ELEMENTS ||
elements_kind == BIGINT64_ELEMENTS) {
return NoChange();
}
- for (Handle<Map> iterated_object_map : iterated_object_maps) {
- if (iterated_object_map->elements_kind() != elements_kind) {
+ for (Handle<Map> map : iterated_object_maps) {
+ MapRef iterated_object_map(broker(), map);
+ if (iterated_object_map.elements_kind() != elements_kind) {
return NoChange();
}
}
} else {
- for (Handle<Map> iterated_object_map : iterated_object_maps) {
- if (!CanInlineArrayIteratingBuiltin(isolate(), iterated_object_map)) {
- return NoChange();
- }
- if (!UnionElementsKindUptoSize(&elements_kind,
- iterated_object_map->elements_kind())) {
- return NoChange();
- }
+ if (!CanInlineArrayIteratingBuiltin(broker(), iterated_object_maps,
+ &elements_kind)) {
+ return NoChange();
}
}
// Install code dependency on the array protector for holey arrays.
if (IsHoleyElementsKind(elements_kind)) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
}
// Load the (current) {iterated_object} from the {iterator}.
@@ -4980,14 +4948,14 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
iterated_object, effect, control);
if (IsFixedTypedArrayElementsKind(elements_kind)) {
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // See if we can skip the detaching check.
+ if (isolate()->IsArrayBufferDetachingIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
+ // gets detached.
dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ broker(), factory()->array_buffer_detaching_protector()));
} else {
- // Bail out if the {iterated_object}s JSArrayBuffer was neutered.
+ // Bail out if the {iterated_object}s JSArrayBuffer was detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
iterated_object, effect, control);
@@ -4998,10 +4966,10 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
simplified()->NumberEqual(),
graph()->NewNode(
simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
jsgraph()->ZeroConstant());
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached,
p.feedback()),
check, effect, control);
}
@@ -5013,11 +4981,11 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// latter case we even know that it's a Smi in UnsignedSmall range.
FieldAccess index_access = AccessBuilder::ForJSArrayIteratorNextIndex();
if (IsFixedTypedArrayElementsKind(elements_kind)) {
- index_access.type = TypeCache::Get().kJSTypedArrayLengthType;
+ index_access.type = TypeCache::Get()->kJSTypedArrayLengthType;
index_access.machine_type = MachineType::TaggedSigned();
index_access.write_barrier_kind = kNoWriteBarrier;
} else {
- index_access.type = TypeCache::Get().kJSArrayLengthType;
+ index_access.type = TypeCache::Get()->kJSArrayLengthType;
}
Node* index = effect = graph()->NewNode(simplified()->LoadField(index_access),
iterator, effect, control);
@@ -5373,7 +5341,7 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
JS_STRING_ITERATOR_TYPE)) {
Node* string = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
@@ -5438,8 +5406,7 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
}
// ES #sec-string.prototype.concat
-Reduction JSCallReducer::ReduceStringPrototypeConcat(
- Node* node, Handle<SharedFunctionInfo> shared) {
+Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
if (node->op()->ValueInputCount() < 2 || node->op()->ValueInputCount() > 3) {
return NoChange();
}
@@ -5479,46 +5446,13 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(
return Replace(value);
}
-Reduction JSCallReducer::ReduceAsyncFunctionPromiseCreate(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
-
- // Install a code dependency on the promise hook protector cell.
- dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
-
- // Morph this {node} into a JSCreatePromise node.
- RelaxControls(node);
- node->ReplaceInput(0, context);
- node->ReplaceInput(1, effect);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, javascript()->CreatePromise());
- return Changed(node);
-}
-
-Reduction JSCallReducer::ReduceAsyncFunctionPromiseRelease(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
-
- dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
-
- // The AsyncFunctionPromiseRelease builtin is a no-op as long as neither
- // the debugger is active nor any promise hook has been installed (ever).
- Node* value = jsgraph()->UndefinedConstant();
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
Node* JSCallReducer::CreateArtificialFrameState(
Node* node, Node* outer_frame_state, int parameter_count,
BailoutId bailout_id, FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared, Node* context) {
+ const SharedFunctionInfoRef& shared, Node* context) {
const FrameStateFunctionInfo* state_info =
- common()->CreateFrameStateFunctionInfo(frame_state_type,
- parameter_count + 1, 0, shared);
+ common()->CreateFrameStateFunctionInfo(
+ frame_state_type, parameter_count + 1, 0, shared.object());
const Operator* op = common()->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
@@ -5562,16 +5496,16 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
if (target != new_target) return NoChange();
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
- Handle<SharedFunctionInfo> promise_shared(
- handle(native_context()->promise_function()->shared(), isolate()));
+ SharedFunctionInfoRef promise_shared =
+ native_context().promise_function().shared();
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
// For the frame state, we only provide the executor parameter, even if more
// arugments were passed. This is not observable from JS.
- DCHECK_EQ(1, promise_shared->internal_formal_parameter_count());
+ DCHECK_EQ(1, promise_shared.internal_formal_parameter_count());
Node* constructor_frame_state = CreateArtificialFrameState(
node, outer_frame_state, 1, BailoutId::ConstructStubInvoke(),
FrameStateType::kConstructStub, promise_shared, context);
@@ -5604,45 +5538,42 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
// 8. CreatePromiseResolvingFunctions
// Allocate a promise context for the closures below.
- Node* promise_context = effect =
- graph()->NewNode(javascript()->CreateFunctionContext(
- handle(native_context()->scope_info(), isolate()),
- PromiseBuiltinsAssembler::kPromiseContextLength -
- Context::MIN_CONTEXT_SLOTS,
- FUNCTION_SCOPE),
- context, effect, control);
- effect =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
- PromiseBuiltinsAssembler::kPromiseSlot)),
- promise_context, promise, effect, control);
+ Node* promise_context = effect = graph()->NewNode(
+ javascript()->CreateFunctionContext(
+ handle(native_context().object()->scope_info(), isolate()),
+ PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS,
+ FUNCTION_SCOPE),
+ context, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForContextSlot(PromiseBuiltins::kPromiseSlot)),
+ promise_context, promise, effect, control);
effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForContextSlot(
- PromiseBuiltinsAssembler::kAlreadyResolvedSlot)),
+ simplified()->StoreField(
+ AccessBuilder::ForContextSlot(PromiseBuiltins::kAlreadyResolvedSlot)),
promise_context, jsgraph()->FalseConstant(), effect, control);
effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForContextSlot(
- PromiseBuiltinsAssembler::kDebugEventSlot)),
+ simplified()->StoreField(
+ AccessBuilder::ForContextSlot(PromiseBuiltins::kDebugEventSlot)),
promise_context, jsgraph()->TrueConstant(), effect, control);
// Allocate the closure for the resolve case.
- Handle<SharedFunctionInfo> resolve_shared(
- native_context()->promise_capability_default_resolve_shared_fun(),
- isolate());
- Node* resolve = effect =
- graph()->NewNode(javascript()->CreateClosure(
- resolve_shared, factory()->many_closures_cell(),
- handle(resolve_shared->GetCode(), isolate())),
- promise_context, effect, control);
+ SharedFunctionInfoRef resolve_shared =
+ native_context().promise_capability_default_resolve_shared_fun();
+ Node* resolve = effect = graph()->NewNode(
+ javascript()->CreateClosure(
+ resolve_shared.object(), factory()->many_closures_cell(),
+ handle(resolve_shared.object()->GetCode(), isolate())),
+ promise_context, effect, control);
// Allocate the closure for the reject case.
- Handle<SharedFunctionInfo> reject_shared(
- native_context()->promise_capability_default_reject_shared_fun(),
- isolate());
- Node* reject = effect =
- graph()->NewNode(javascript()->CreateClosure(
- reject_shared, factory()->many_closures_cell(),
- handle(reject_shared->GetCode(), isolate())),
- promise_context, effect, control);
+ SharedFunctionInfoRef reject_shared =
+ native_context().promise_capability_default_reject_shared_fun();
+ Node* reject = effect = graph()->NewNode(
+ javascript()->CreateClosure(
+ reject_shared.object(), factory()->many_closures_cell(),
+ handle(reject_shared.object()->GetCode(), isolate())),
+ promise_context, effect, control);
const std::vector<Node*> checkpoint_parameters_continuation(
{jsgraph()->UndefinedConstant() /* receiver */, promise, reject});
@@ -5720,7 +5651,7 @@ Reduction JSCallReducer::ReducePromiseInternalConstructor(Node* node) {
if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
// Create a new pending promise.
Node* value = effect =
@@ -5798,22 +5729,25 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!receiver_map->IsJSPromiseMap()) return NoChange();
- if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ receiver_map.SerializePrototype();
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
return NoChange();
}
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_then_protector()));
+ PropertyCellRef(broker(), factory()->promise_then_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5827,8 +5761,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onRejected parameter, and then filling up the parameters
// to two inputs from the left with undefined.
- Node* target =
- jsgraph()->Constant(handle(native_context()->promise_then(), isolate()));
+ Node* target = jsgraph()->Constant(native_context().promise_then());
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceEffectInput(node, effect);
for (; arity > 1; --arity) node->RemoveInput(3);
@@ -5875,26 +5808,29 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!receiver_map->IsJSPromiseMap()) return NoChange();
- if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ receiver_map.SerializePrototype();
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
return NoChange();
}
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_then_protector()));
- dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->promise_species_protector()));
+ PropertyCellRef(broker(), factory()->promise_then_protector()));
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_species_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5916,44 +5852,44 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
Node* catch_true;
Node* then_true;
{
- Node* context = jsgraph()->HeapConstant(native_context());
- Node* constructor = jsgraph()->HeapConstant(
- handle(native_context()->promise_function(), isolate()));
+ Node* context = jsgraph()->Constant(native_context());
+ Node* constructor =
+ jsgraph()->Constant(native_context().promise_function());
// Allocate shared context for the closures below.
context = etrue = graph()->NewNode(
javascript()->CreateFunctionContext(
- handle(native_context()->scope_info(), isolate()),
- PromiseBuiltinsAssembler::kPromiseFinallyContextLength -
+ handle(native_context().object()->scope_info(), isolate()),
+ PromiseBuiltins::kPromiseFinallyContextLength -
Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
context, etrue, if_true);
- etrue =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
- PromiseBuiltinsAssembler::kOnFinallySlot)),
- context, on_finally, etrue, if_true);
- etrue =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
- PromiseBuiltinsAssembler::kConstructorSlot)),
- context, constructor, etrue, if_true);
+ etrue = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)),
+ context, on_finally, etrue, if_true);
+ etrue = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForContextSlot(PromiseBuiltins::kConstructorSlot)),
+ context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
- Handle<SharedFunctionInfo> catch_finally(
- native_context()->promise_catch_finally_shared_fun(), isolate());
- catch_true = etrue =
- graph()->NewNode(javascript()->CreateClosure(
- catch_finally, factory()->many_closures_cell(),
- handle(catch_finally->GetCode(), isolate())),
- context, etrue, if_true);
+ SharedFunctionInfoRef catch_finally =
+ native_context().promise_catch_finally_shared_fun();
+ catch_true = etrue = graph()->NewNode(
+ javascript()->CreateClosure(
+ catch_finally.object(), factory()->many_closures_cell(),
+ handle(catch_finally.object()->GetCode(), isolate())),
+ context, etrue, if_true);
// Allocate the closure for the fulfill case.
- Handle<SharedFunctionInfo> then_finally(
- native_context()->promise_then_finally_shared_fun(), isolate());
- then_true = etrue =
- graph()->NewNode(javascript()->CreateClosure(
- then_finally, factory()->many_closures_cell(),
- handle(then_finally->GetCode(), isolate())),
- context, etrue, if_true);
+ SharedFunctionInfoRef then_finally =
+ native_context().promise_then_finally_shared_fun();
+ then_true = etrue = graph()->NewNode(
+ javascript()->CreateClosure(
+ then_finally.object(), factory()->many_closures_cell(),
+ handle(then_finally.object()->GetCode(), isolate())),
+ context, etrue, if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -5979,8 +5915,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Massage the {node} to call "then" instead by first removing all inputs
// following the onFinally parameter, and then replacing the only parameter
// input with the {on_finally} value.
- Node* target =
- jsgraph()->Constant(handle(native_context()->promise_then(), isolate()));
+ Node* target = jsgraph()->Constant(native_context().promise_then());
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
@@ -6028,7 +5963,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
return NoChange();
@@ -6037,17 +5972,20 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!receiver_map->IsJSPromiseMap()) return NoChange();
- if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ receiver_map.SerializePrototype();
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
return NoChange();
}
}
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->promise_hook_protector()));
- dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->promise_species_protector()));
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_species_protector()));
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -6078,6 +6016,18 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
result = effect = graph()->NewNode(
javascript()->PerformPromiseThen(), receiver, on_fulfilled, on_rejected,
result, context, frame_state, effect, control);
+
+ // At this point we know that {result} is going to have the
+ // initial Promise map, since even if {PerformPromiseThen}
+ // above called into the host rejection tracker, the {result}
+ // doesn't escape to user JavaScript. So bake this information
+ // into the graph such that subsequent passes can use the
+ // information for further optimizations.
+ MapRef result_map = native_context().promise_function().initial_map();
+ effect = graph()->NewNode(
+ simplified()->MapGuard(ZoneHandleSet<Map>(result_map.object())), result,
+ effect, control);
+
ReplaceWithValue(node, result, effect, control);
return Replace(result);
}
@@ -6097,7 +6047,7 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
return NoChange();
@@ -6105,8 +6055,9 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
DCHECK_NE(0, receiver_maps.size());
// Only reduce when all {receiver_maps} are JSReceiver maps.
- for (Handle<Map> receiver_map : receiver_maps) {
- if (!receiver_map->IsJSReceiverMap()) return NoChange();
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSReceiverMap()) return NoChange();
}
// Morph the {node} into a JSPromiseResolve operation.
@@ -6123,7 +6074,7 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
// ES #sec-typedarray-constructors
Reduction JSCallReducer::ReduceTypedArrayConstructor(
- Node* node, Handle<SharedFunctionInfo> shared) {
+ Node* node, const SharedFunctionInfoRef& shared) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -6293,7 +6244,7 @@ Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (!NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
JS_MAP_TYPE))
return NoChange();
@@ -6338,7 +6289,7 @@ Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (!NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
JS_MAP_TYPE))
return NoChange();
@@ -6379,7 +6330,7 @@ Reduction JSCallReducer::ReduceCollectionIteration(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (NodeProperties::HasInstanceTypeWitness(
- isolate(), receiver, effect,
+ broker(), receiver, effect,
InstanceTypeForCollectionKind(collection_kind))) {
Node* js_create_iterator = effect = graph()->NewNode(
javascript()->CreateCollectionIterator(collection_kind, iteration_kind),
@@ -6397,14 +6348,14 @@ Reduction JSCallReducer::ReduceCollectionPrototypeSize(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (NodeProperties::HasInstanceTypeWitness(
- isolate(), receiver, effect,
+ broker(), receiver, effect,
InstanceTypeForCollectionKind(collection_kind))) {
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
receiver, effect, control);
Node* value = effect = graph()->NewNode(
simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfElements()),
table, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -6438,7 +6389,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
InstanceType receiver_instance_type;
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, receiver_maps.size());
@@ -6471,7 +6422,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
receiver, effect, control);
Node* next_table = effect =
graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNextTable()),
+ AccessBuilder::ForOrderedHashMapOrSetNextTable()),
table, effect, control);
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), next_table);
control =
@@ -6500,7 +6451,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
jsgraph()->NoContextConstant(), effect);
index = effect = graph()->NewNode(
- common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), index,
+ common()->TypeGuard(TypeCache::Get()->kFixedArrayLengthType), index,
effect, control);
// Update the {index} and {table} on the {receiver}.
@@ -6540,15 +6491,15 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
// Compute the currently used capacity.
Node* number_of_buckets = effect = graph()->NewNode(
simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets()),
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets()),
table, effect, control);
Node* number_of_elements = effect = graph()->NewNode(
simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfElements()),
table, effect, control);
Node* number_of_deleted_elements = effect = graph()->NewNode(
simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements()),
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfDeletedElements()),
table, effect, control);
Node* used_capacity =
graph()->NewNode(simplified()->NumberAdd(), number_of_elements,
@@ -6564,7 +6515,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
Node* index = effect = graph()->NewNode(
- common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), iloop,
+ common()->TypeGuard(TypeCache::Get()->kFixedArrayLengthType), iloop,
eloop, control);
{
Node* check0 = graph()->NewNode(simplified()->NumberLessThan(), index,
@@ -6590,6 +6541,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
Node* etrue0 = effect;
{
// Load the key of the entry.
+ STATIC_ASSERT(OrderedHashMap::HashTableStartIndex() ==
+ OrderedHashSet::HashTableStartIndex());
Node* entry_start_position = graph()->NewNode(
simplified()->NumberAdd(),
graph()->NewNode(
@@ -6597,7 +6550,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
graph()->NewNode(simplified()->NumberMultiply(), index,
jsgraph()->Constant(entry_size)),
number_of_buckets),
- jsgraph()->Constant(OrderedHashTableBase::kHashTableStartIndex));
+ jsgraph()->Constant(OrderedHashMap::HashTableStartIndex()));
Node* entry_key = etrue0 = graph()->NewNode(
simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
table, entry_start_position, etrue0, if_true0);
@@ -6716,20 +6669,20 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
instance_type)) {
// Load the {receiver}s field.
Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
receiver, effect, control);
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // See if we can skip the detaching check.
+ if (isolate()->IsArrayBufferDetachingIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
+ // gets detached.
dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ broker(), factory()->array_buffer_detaching_protector()));
} else {
- // Check whether {receiver}s JSArrayBuffer was neutered.
+ // Check whether {receiver}s JSArrayBuffer was detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
@@ -6740,11 +6693,11 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
simplified()->NumberEqual(),
graph()->NewNode(
simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
jsgraph()->ZeroConstant());
// TODO(turbofan): Ideally we would bail out here if the {receiver}s
- // JSArrayBuffer was neutered, but there's no way to guard against
+ // JSArrayBuffer was detached, but there's no way to guard against
// deoptimization loops right now, since the JSCall {node} is usually
// created from a LOAD_IC inlining, and so there's no CALL_IC slot
// from which we could use the speculation bit.
@@ -6802,68 +6755,57 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
}
// Only do stuff if the {receiver} is really a DataView.
- if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
+ Node* byte_offset;
+
// Check that the {offset} is within range for the {receiver}.
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
// We only deal with DataViews here whose [[ByteLength]] is at least
- // {element_size} and less than 2^31-{element_size}.
- Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
- if (dataview->byte_length() < element_size ||
- dataview->byte_length() - element_size > kMaxInt) {
- return NoChange();
- }
-
- // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
- if (dataview->byte_offset() > kMaxInt) {
- return NoChange();
- }
+ // {element_size}, as for all other DataViews it'll be out-of-bounds.
+ JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
+ if (dataview.byte_length() < element_size) return NoChange();
// Check that the {offset} is within range of the {byte_length}.
Node* byte_length =
- jsgraph()->Constant(dataview->byte_length() - (element_size - 1));
+ jsgraph()->Constant(dataview.byte_length() - (element_size - 1));
offset = effect =
graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
byte_length, effect, control);
- // Add the [[ByteOffset]] to compute the effective offset.
- Node* byte_offset = jsgraph()->Constant(dataview->byte_offset());
- offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ // Load the [[ByteOffset]] from the {dataview}.
+ byte_offset = jsgraph()->Constant(dataview.byte_offset());
} else {
// We only deal with DataViews here that have Smi [[ByteLength]]s.
Node* byte_length = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayBufferViewByteLength()),
receiver, effect, control);
- byte_length = effect = graph()->NewNode(
- simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
+
+ if (element_size > 1) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ // So to keep this as a single check on the {offset}, we subtract
+ // the {element_size}-1 from the {byte_length} here (clamped to
+ // positive safe integer range), and perform a check against that
+ // with the {offset} below.
+ byte_length = graph()->NewNode(
+ simplified()->NumberMax(), jsgraph()->ZeroConstant(),
+ graph()->NewNode(simplified()->NumberSubtract(), byte_length,
+ jsgraph()->Constant(element_size - 1)));
+ }
// Check that the {offset} is within range of the {byte_length}.
offset = effect =
graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
byte_length, effect, control);
- if (element_size > 0) {
- // For non-byte accesses we also need to check that the {offset}
- // plus the {element_size}-1 fits within the given {byte_length}.
- Node* end_offset =
- graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size - 1));
- effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
- end_offset, byte_length, effect, control);
- }
-
- // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
- Node* byte_offset = effect =
+ // Also load the [[ByteOffset]] from the {receiver}.
+ byte_offset = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayBufferViewByteOffset()),
receiver, effect, control);
- byte_offset = effect = graph()->NewNode(
- simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
-
- // Compute the buffer index at which we'll read.
- offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
}
// Coerce {is_little_endian} to boolean.
@@ -6878,18 +6820,18 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
value, effect, control);
}
- // Get the underlying buffer and check that it has not been neutered.
+ // Get the underlying buffer and check that it has not been detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- if (isolate()->IsArrayBufferNeuteringIntact()) {
+ if (isolate()->IsArrayBufferDetachingIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
+ // gets detached.
dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ broker(), factory()->array_buffer_detaching_protector()));
} else {
- // Bail out if the {buffer} was neutered.
+ // Bail out if the {buffer} was detached.
Node* buffer_bit_field = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
buffer, effect, control);
@@ -6897,10 +6839,10 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
simplified()->NumberEqual(),
graph()->NewNode(
simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
jsgraph()->ZeroConstant());
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached,
p.feedback()),
check, effect, control);
}
@@ -6913,15 +6855,17 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
switch (access) {
case DataViewAccess::kGet:
// Perform the load.
- value = effect = graph()->NewNode(
- simplified()->LoadDataViewElement(element_type), buffer,
- backing_store, offset, is_little_endian, effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->LoadDataViewElement(element_type),
+ buffer, backing_store, byte_offset, offset,
+ is_little_endian, effect, control);
break;
case DataViewAccess::kSet:
// Perform the store.
- effect = graph()->NewNode(
- simplified()->StoreDataViewElement(element_type), buffer,
- backing_store, offset, value, is_little_endian, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreDataViewElement(element_type),
+ buffer, backing_store, byte_offset, offset, value,
+ is_little_endian, effect, control);
value = jsgraph()->UndefinedConstant();
break;
}
@@ -6989,7 +6933,7 @@ Reduction JSCallReducer::ReduceDatePrototypeGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
+ if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
JS_DATE_TYPE)) {
Node* value = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
@@ -7053,8 +6997,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Check if we know something about the {regexp}.
ZoneHandleSet<Map> regexp_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), regexp, effect,
- &regexp_maps);
+ NodeProperties::InferReceiverMaps(broker(), regexp, effect, &regexp_maps);
bool need_map_check = false;
switch (result) {
@@ -7068,31 +7011,63 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
}
for (auto map : regexp_maps) {
- if (map->instance_type() != JS_REGEXP_TYPE) return NoChange();
+ MapRef receiver_map(broker(), map);
+ if (receiver_map.instance_type() != JS_REGEXP_TYPE) return NoChange();
}
// Compute property access info for "exec" on {resolution}.
PropertyAccessInfo ai_exec;
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context(), graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
MapHandles(regexp_maps.begin(), regexp_maps.end()),
factory()->exec_string(), AccessMode::kLoad, &ai_exec)) {
return NoChange();
}
// If "exec" has been modified on {regexp}, we can't do anything.
- if (!ai_exec.IsDataConstant()) return NoChange();
- Handle<Object> exec_on_proto = ai_exec.constant();
- if (*exec_on_proto != *isolate()->regexp_exec_function()) return NoChange();
+ if (ai_exec.IsDataConstant()) {
+ if (!ai_exec.constant().is_identical_to(
+ isolate()->regexp_exec_function())) {
+ return NoChange();
+ }
+ } else if (ai_exec.IsDataConstantField()) {
+ Handle<JSObject> holder;
+ // Do not reduce if the exec method is not on the prototype chain.
+ if (!ai_exec.holder().ToHandle(&holder)) return NoChange();
+
+ // Bail out if the exec method is not the original one.
+ Handle<Object> constant = JSObject::FastPropertyAt(
+ holder, Representation::Tagged(), ai_exec.field_index());
+ if (!constant.is_identical_to(isolate()->regexp_exec_function())) {
+ return NoChange();
+ }
+
+ // Protect the prototype chain from changes.
+ dependencies()->DependOnStablePrototypeChains(
+ broker(), ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
+
+ // Protect the exec method change in the holder.
+ Handle<Object> exec_on_proto;
+ Handle<Map> holder_map(holder->map(), isolate());
+ Handle<DescriptorArray> descriptors(holder_map->instance_descriptors(),
+ isolate());
+ int descriptor_index =
+ descriptors->Search(*(factory()->exec_string()), *holder_map);
+ CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ dependencies()->DependOnFieldType(MapRef(broker(), holder_map),
+ descriptor_index);
+ } else {
+ return NoChange();
+ }
+
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
// Add proper dependencies on the {regexp}s [[Prototype]]s.
Handle<JSObject> holder;
if (ai_exec.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context(), ai_exec.receiver_maps(), holder);
+ broker(), ai_exec.receiver_maps(), JSObjectRef(broker(), holder));
}
if (need_map_check) {
@@ -7145,8 +7120,8 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
// Create the artificial frame state in the middle of the Number constructor.
- Handle<SharedFunctionInfo> shared_info(
- handle(native_context()->number_function()->shared(), isolate()));
+ SharedFunctionInfoRef shared_info =
+ native_context().number_function().shared();
Node* stack_parameters[] = {receiver};
int stack_parameter_count = arraysize(stack_parameters);
Node* continuation_frame_state =
@@ -7169,11 +7144,6 @@ Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
-Handle<JSGlobalProxy> JSCallReducer::global_proxy() const {
- return handle(JSGlobalProxy::cast(native_context()->global_proxy()),
- isolate());
-}
-
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6683a0b18e..6566f4ed50 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -38,14 +38,12 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
typedef base::Flags<Flag> Flags;
- JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
- Flags flags, Handle<Context> native_context,
- CompilationDependencies* dependencies)
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
+ Flags flags, CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
+ broker_(broker),
flags_(flags),
- native_context_(native_context),
dependencies_(dependencies) {}
const char* reducer_name() const override { return "JSCallReducer"; }
@@ -60,7 +58,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node,
- Handle<SharedFunctionInfo> shared);
+ const SharedFunctionInfoRef& shared);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -78,20 +76,20 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGet(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
- Reduction ReduceArrayForEach(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayForEach(Node* node, const SharedFunctionInfoRef& shared);
enum class ArrayReduceDirection { kLeft, kRight };
Reduction ReduceArrayReduce(Node* node, ArrayReduceDirection direction,
- Handle<SharedFunctionInfo> shared);
- Reduction ReduceArrayMap(Node* node, Handle<SharedFunctionInfo> shared);
- Reduction ReduceArrayFilter(Node* node, Handle<SharedFunctionInfo> shared);
+ const SharedFunctionInfoRef& shared);
+ Reduction ReduceArrayMap(Node* node, const SharedFunctionInfoRef& shared);
+ Reduction ReduceArrayFilter(Node* node, const SharedFunctionInfoRef& shared);
enum class ArrayFindVariant { kFind, kFindIndex };
Reduction ReduceArrayFind(Node* node, ArrayFindVariant variant,
- Handle<SharedFunctionInfo> shared);
- Reduction ReduceArrayEvery(Node* node, Handle<SharedFunctionInfo> shared);
+ const SharedFunctionInfoRef& shared);
+ Reduction ReduceArrayEvery(Node* node, const SharedFunctionInfoRef& shared);
enum class SearchVariant { kIncludes, kIndexOf };
Reduction ReduceArrayIndexOfIncludes(SearchVariant search_variant,
Node* node);
- Reduction ReduceArraySome(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArraySome(Node* node, const SharedFunctionInfoRef& shared);
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
@@ -110,7 +108,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSConstructWithArrayLike(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
- Reduction ReduceJSCall(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceJSCall(Node* node, const SharedFunctionInfoRef& shared);
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceRegExpPrototypeTest(Node* node);
@@ -132,11 +130,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceStringFromCodePoint(Node* node);
Reduction ReduceStringPrototypeIterator(Node* node);
Reduction ReduceStringIteratorPrototypeNext(Node* node);
- Reduction ReduceStringPrototypeConcat(Node* node,
- Handle<SharedFunctionInfo> shared);
+ Reduction ReduceStringPrototypeConcat(Node* node);
- Reduction ReduceAsyncFunctionPromiseCreate(Node* node);
- Reduction ReduceAsyncFunctionPromiseRelease(Node* node);
Reduction ReducePromiseConstructor(Node* node);
Reduction ReducePromiseInternalConstructor(Node* node);
Reduction ReducePromiseInternalReject(Node* node);
@@ -147,7 +142,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReducePromiseResolveTrampoline(Node* node);
Reduction ReduceTypedArrayConstructor(Node* node,
- Handle<SharedFunctionInfo> shared);
+ const SharedFunctionInfoRef& shared);
Reduction ReduceTypedArrayPrototypeToStringTag(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -230,16 +225,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared,
+ const SharedFunctionInfoRef& shared,
Node* context = nullptr);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Factory* factory() const;
- Handle<Context> native_context() const { return native_context_; }
- Handle<JSGlobalProxy> global_proxy() const;
+ NativeContextRef native_context() const { return broker()->native_context(); }
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
@@ -247,9 +241,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
Flags const flags_;
- Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
std::set<Node*> waitlist_;
};
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 1b2f3c3a7c..cac3b0dd65 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -135,7 +135,7 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
Node* context = NodeProperties::GetOuterContext(node, &depth);
base::Optional<ContextRef> maybe_concrete =
- GetSpecializationContext(js_heap_broker(), context, &depth, outer());
+ GetSpecializationContext(broker(), context, &depth, outer());
if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
@@ -197,7 +197,7 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
Node* context = NodeProperties::GetOuterContext(node, &depth);
base::Optional<ContextRef> maybe_concrete =
- GetSpecializationContext(js_heap_broker(), context, &depth, outer());
+ GetSpecializationContext(broker(), context, &depth, outer());
if (!maybe_concrete.has_value()) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 7324c5aaf0..e93fbc7dfb 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -18,11 +18,12 @@ class JSOperatorBuilder;
// Pair of a context and its distance from some point of reference.
struct OuterContext {
- OuterContext() : context(), distance() {}
+ OuterContext() = default;
OuterContext(Handle<Context> context_, size_t distance_)
: context(context_), distance(distance_) {}
+
Handle<Context> context;
- size_t distance;
+ size_t distance = 0;
};
// Specializes a given JSGraph to a given context, potentially constant folding
@@ -34,14 +35,13 @@ struct OuterContext {
class JSContextSpecialization final : public AdvancedReducer {
public:
JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker,
- Maybe<OuterContext> outer,
+ JSHeapBroker* broker, Maybe<OuterContext> outer,
MaybeHandle<JSFunction> closure)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
outer_(outer),
closure_(closure),
- js_heap_broker_(js_heap_broker) {}
+ broker_(broker) {}
const char* reducer_name() const override {
return "JSContextSpecialization";
@@ -63,12 +63,12 @@ class JSContextSpecialization final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* const jsgraph_;
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 3848e1f814..8388714c5d 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -6,7 +6,7 @@
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/allocation-builder.h"
+#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -21,6 +21,7 @@
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
@@ -47,7 +48,7 @@ bool IsAllocationInlineable(const JSFunctionRef& target,
CHECK_IMPLIES(new_target.has_initial_map(),
!new_target.initial_map().is_dictionary_map());
return new_target.has_initial_map() &&
- new_target.initial_map().constructor_or_backpointer().equals(target);
+ new_target.initial_map().GetConstructor().equals(target);
}
// When initializing arrays, we'll unfold the loop if the number of
@@ -71,6 +72,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArray(node);
case IrOpcode::kJSCreateArrayIterator:
return ReduceJSCreateArrayIterator(node);
+ case IrOpcode::kJSCreateAsyncFunctionObject:
+ return ReduceJSCreateAsyncFunctionObject(node);
case IrOpcode::kJSCreateBoundFunction:
return ReduceJSCreateBoundFunction(node);
case IrOpcode::kJSCreateClosure:
@@ -171,7 +174,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
Node* const control = graph()->start();
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- SharedFunctionInfoRef shared(js_heap_broker(),
+ SharedFunctionInfoRef shared(broker(),
state_info.shared_info().ToHandleChecked());
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
@@ -203,7 +206,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
- STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
a.Allocate(JSSloppyArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -232,7 +235,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
a.Allocate(JSStrictArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -262,7 +265,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
a.Allocate(JSArray::kSize);
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -308,7 +311,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kTaggedSize);
a.Allocate(JSSloppyArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -344,7 +347,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kTaggedSize);
a.Allocate(JSStrictArgumentsObject::kSize);
a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -384,7 +387,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// -1 to minus receiver
int argument_count = args_state_info.parameter_count() - 1;
int length = std::max(0, argument_count - start_index);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
a.Allocate(JSArray::kSize);
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
@@ -633,7 +636,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
{
Handle<AllocationSite> site;
if (p.site().ToHandle(&site)) {
- site_ref = AllocationSiteRef(js_heap_broker(), site);
+ site_ref = AllocationSiteRef(broker(), site);
}
}
PretenureFlag pretenure = NOT_TENURED;
@@ -671,7 +674,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
pretenure = dependencies()->DependOnPretenureMode(*site_ref);
dependencies()->DependOnElementsKind(*site_ref);
} else {
- can_inline_call = isolate()->IsArrayConstructorIntact();
+ CellRef array_constructor_protector(
+ broker(), factory()->array_constructor_protector());
+ can_inline_call = array_constructor_protector.value().AsSmi() ==
+ Isolate::kProtectorValid;
}
if (arity == 0) {
@@ -783,6 +789,49 @@ Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateAsyncFunctionObject, node->opcode());
+ int const register_count = RegisterCountOf(node->op());
+ Node* closure = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* promise = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Create the register file.
+ AllocationBuilder ab(jsgraph(), effect, control);
+ ab.AllocateArray(register_count, factory()->fixed_array_map());
+ for (int i = 0; i < register_count; ++i) {
+ ab.Store(AccessBuilder::ForFixedArraySlot(i),
+ jsgraph()->UndefinedConstant());
+ }
+ Node* parameters_and_registers = effect = ab.Finish();
+
+ // Create the JSAsyncFunctionObject result.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSAsyncFunctionObject::kSize);
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ a.Store(AccessBuilder::ForMap(),
+ native_context().async_function_object_map());
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context);
+ a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure);
+ a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver);
+ a.Store(AccessBuilder::ForJSGeneratorObjectInputOrDebugPos(),
+ jsgraph()->UndefinedConstant());
+ a.Store(AccessBuilder::ForJSGeneratorObjectResumeMode(),
+ jsgraph()->Constant(JSGeneratorObject::kNext));
+ a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
+ jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+ a.Store(AccessBuilder::ForJSGeneratorObjectParametersAndRegisters(),
+ parameters_and_registers);
+ a.Store(AccessBuilder::ForJSAsyncFunctionObjectPromise(), promise);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
namespace {
MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
@@ -851,7 +900,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- MapRef const map(js_heap_broker(), p.map());
+ MapRef const map(broker(), p.map());
Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
Node* bound_this = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -889,9 +938,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- SharedFunctionInfoRef shared(js_heap_broker(), p.shared_info());
- HeapObjectRef feedback_cell(js_heap_broker(), p.feedback_cell());
- HeapObjectRef code(js_heap_broker(), p.code());
+ SharedFunctionInfoRef shared(broker(), p.shared_info());
+ HeapObjectRef feedback_cell(broker(), p.feedback_cell());
+ HeapObjectRef code(broker(), p.code());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -900,7 +949,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
if (!feedback_cell.map().equals(
- MapRef(js_heap_broker(), factory()->many_closures_cell_map()))) {
+ MapRef(broker(), factory()->many_closures_cell_map()))) {
return NoChange();
}
@@ -921,7 +970,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
PretenureFlag pretenure = NOT_TENURED;
// Emit code to allocate the JSFunction instance.
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(function_map.instance_size(), pretenure, Type::Function());
a.Store(AccessBuilder::ForMap(), function_map);
@@ -933,11 +982,11 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
a.Store(AccessBuilder::ForJSFunctionContext(), context);
a.Store(AccessBuilder::ForJSFunctionFeedbackCell(), feedback_cell);
a.Store(AccessBuilder::ForJSFunctionCode(), code);
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
if (function_map.has_prototype_slot()) {
a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
jsgraph()->TheHoleConstant());
- STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
+ STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kTaggedSize);
}
for (int i = 0; i < function_map.GetInObjectProperties(); i++) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
@@ -967,7 +1016,7 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kTaggedSize);
a.FinishAndChange(node);
return Changed(node);
}
@@ -989,7 +1038,7 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSStringIteratorString(), string);
a.Store(AccessBuilder::ForJSStringIteratorIndex(), jsgraph()->SmiConstant(0));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kTaggedSize);
a.FinishAndChange(node);
return Changed(node);
}
@@ -1019,7 +1068,7 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), length);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(JSArray::kSize == 4 * kTaggedSize);
a.FinishAndChange(node);
return Changed(node);
}
@@ -1042,11 +1091,11 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
STATIC_ASSERT(v8::Promise::kPending == 0);
a.Store(AccessBuilder::ForJSObjectOffset(JSPromise::kFlagsOffset),
jsgraph()->ZeroConstant());
- STATIC_ASSERT(JSPromise::kSize == 5 * kPointerSize);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; ++i) {
- a.Store(
- AccessBuilder::ForJSObjectOffset(JSPromise::kSize + i * kPointerSize),
- jsgraph()->ZeroConstant());
+ STATIC_ASSERT(JSPromise::kSize == 5 * kTaggedSize);
+ for (int offset = JSPromise::kSize;
+ offset < JSPromise::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ a.Store(AccessBuilder::ForJSObjectOffset(offset),
+ jsgraph()->ZeroConstant());
}
a.FinishAndChange(node);
return Changed(node);
@@ -1059,7 +1108,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- FeedbackVectorRef feedback_vector(js_heap_broker(), p.feedback().vector());
+ FeedbackVectorRef feedback_vector(broker(), p.feedback().vector());
ObjectRef feedback = feedback_vector.get(p.feedback().slot());
if (feedback.IsAllocationSite()) {
AllocationSiteRef site = feedback.AsAllocationSite();
@@ -1082,7 +1131,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef fv(js_heap_broker(), p.feedback().vector());
+ FeedbackVectorRef fv(broker(), p.feedback().vector());
ObjectRef feedback = fv.get(p.feedback().slot());
if (feedback.IsAllocationSite()) {
AllocationSiteRef site = feedback.AsAllocationSite();
@@ -1139,7 +1188,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- FeedbackVectorRef feedback_vector(js_heap_broker(), p.feedback().vector());
+ FeedbackVectorRef feedback_vector(broker(), p.feedback().vector());
ObjectRef feedback = feedback_vector.get(p.feedback().slot());
if (feedback.IsJSRegExp()) {
JSRegExpRef boilerplate = feedback.AsJSRegExp();
@@ -1154,7 +1203,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef scope_info(js_heap_broker(), parameters.scope_info());
+ ScopeInfoRef scope_info(broker(), parameters.scope_info());
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
@@ -1199,7 +1248,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
Node* extension = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1220,7 +1269,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1245,7 +1294,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
int const context_length = scope_info.ContextLength();
// Use inline allocation for block contexts up to a size limit.
@@ -1303,8 +1352,7 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
if (!prototype_type.IsHeapConstant()) return NoChange();
HeapObjectRef prototype_const = prototype_type.AsHeapConstant()->Ref();
- auto maybe_instance_map =
- GetObjectCreateMap(js_heap_broker(), prototype_const);
+ auto maybe_instance_map = GetObjectCreateMap(broker(), prototype_const);
if (!maybe_instance_map) return NoChange();
MapRef instance_map = maybe_instance_map.value();
@@ -1312,7 +1360,7 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
if (instance_map.is_dictionary_map()) {
DCHECK_EQ(prototype_const.map().oddball_type(), OddballType::kNull);
// Allocate an empty NameDictionary as backing store for the properties.
- Handle<Map> map = isolate()->factory()->name_dictionary_map();
+ MapRef map(broker(), factory()->name_dictionary_map());
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
DCHECK(base::bits::IsPowerOfTwo(capacity));
@@ -1364,7 +1412,7 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
// Initialize Object fields.
Node* undefined = jsgraph()->UndefinedConstant();
for (int offset = JSObject::kHeaderSize; offset < instance_size;
- offset += kPointerSize) {
+ offset += kTaggedSize) {
a.Store(AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier),
undefined);
}
@@ -1605,7 +1653,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
NameRef property_name = boilerplate_map.GetPropertyKey(i);
FieldIndex index = boilerplate_map.GetFieldIndexFor(i);
FieldAccess access = {
- kTaggedBase, index.offset(), property_name.object<Name>(),
+ kTaggedBase, index.offset(), property_name.object(),
MaybeHandle<Map>(), Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
Node* value;
@@ -1692,7 +1740,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
boilerplate.EnsureElementsTenured();
boilerplate_elements = boilerplate.elements();
}
- return jsgraph()->HeapConstant(boilerplate_elements.object<HeapObject>());
+ return jsgraph()->HeapConstant(boilerplate_elements.object());
}
// Compute the elements to store first (might have effects).
@@ -1721,7 +1769,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
// Allocate the backing store array and store the elements.
AllocationBuilder builder(jsgraph(), effect, control);
- builder.AllocateArray(elements_length, elements_map.object<Map>(), pretenure);
+ builder.AllocateArray(elements_length, elements_map.object(), pretenure);
ElementAccess const access =
(elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
? AccessBuilder::ForFixedDoubleArrayElement()
@@ -1737,18 +1785,18 @@ Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
MapRef boilerplate_map = boilerplate.map();
// Sanity check that JSRegExp object layout hasn't changed.
- STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
- STATIC_ASSERT(JSRegExp::kSourceOffset ==
- JSRegExp::kDataOffset + kPointerSize);
+ STATIC_ASSERT(static_cast<int>(JSRegExp::kDataOffset) ==
+ static_cast<int>(JSObject::kHeaderSize));
+ STATIC_ASSERT(JSRegExp::kSourceOffset == JSRegExp::kDataOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kFlagsOffset ==
- JSRegExp::kSourceOffset + kPointerSize);
- STATIC_ASSERT(JSRegExp::kSize == JSRegExp::kFlagsOffset + kPointerSize);
+ JSRegExp::kSourceOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kSize == JSRegExp::kFlagsOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kLastIndexOffset == JSRegExp::kSize);
STATIC_ASSERT(JSRegExp::kInObjectFieldCount == 1); // LastIndex.
const PretenureFlag pretenure = NOT_TENURED;
const int size =
- JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(size, pretenure, Type::For(boilerplate_map));
@@ -1766,12 +1814,12 @@ Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
return builder.Finish();
}
-Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
+Factory* JSCreateLowering::factory() const {
+ return jsgraph()->isolate()->factory();
+}
Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
-Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
-
CommonOperatorBuilder* JSCreateLowering::common() const {
return jsgraph()->common();
}
@@ -1781,7 +1829,7 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
}
NativeContextRef JSCreateLowering::native_context() const {
- return js_heap_broker()->native_context();
+ return broker()->native_context();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 4099edb7b6..7c4a51afb0 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -33,11 +33,11 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Zone* zone)
+ JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
+ broker_(broker),
zone_(zone) {}
~JSCreateLowering() final = default;
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
Reduction ReduceJSCreateArrayIterator(Node* node);
+ Reduction ReduceJSCreateAsyncFunctionObject(Node* node);
Reduction ReduceJSCreateCollectionIterator(Node* node);
Reduction ReduceJSCreateBoundFunction(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
@@ -111,17 +112,16 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const;
NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
CompilationDependencies* dependencies() const { return dependencies_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Zone* zone() const { return zone_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 731159f3d1..db64b984fb 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -7,7 +7,6 @@
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
@@ -15,6 +14,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/feedback-vector.h"
+#include "src/objects/feedback-cell.h"
#include "src/objects/scope-info.h"
namespace v8 {
@@ -87,6 +87,9 @@ REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
REPLACE_STUB_CALL(ForInEnumerate)
+REPLACE_STUB_CALL(AsyncFunctionEnter)
+REPLACE_STUB_CALL(AsyncFunctionReject)
+REPLACE_STUB_CALL(AsyncFunctionResolve)
REPLACE_STUB_CALL(FulfillPromise)
REPLACE_STUB_CALL(PerformPromiseThen)
REPLACE_STUB_CALL(PromiseResolve)
@@ -401,6 +404,10 @@ void JSGenericLowering::LowerJSCreateArrayIterator(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreateAsyncFunctionObject(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateCollectionIterator(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 7b74c2a32c..eca30ad525 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -73,24 +73,23 @@ Node* JSGraph::Constant(const ObjectRef& ref) {
if (ref.IsHeapNumber()) {
return Constant(ref.AsHeapNumber().value());
} else if (oddball_type == OddballType::kUndefined) {
- DCHECK(
- ref.object<Object>().equals(isolate()->factory()->undefined_value()));
+ DCHECK(ref.object().equals(isolate()->factory()->undefined_value()));
return UndefinedConstant();
} else if (oddball_type == OddballType::kNull) {
- DCHECK(ref.object<Object>().equals(isolate()->factory()->null_value()));
+ DCHECK(ref.object().equals(isolate()->factory()->null_value()));
return NullConstant();
} else if (oddball_type == OddballType::kHole) {
- DCHECK(ref.object<Object>().equals(isolate()->factory()->the_hole_value()));
+ DCHECK(ref.object().equals(isolate()->factory()->the_hole_value()));
return TheHoleConstant();
} else if (oddball_type == OddballType::kBoolean) {
- if (ref.object<Object>().equals(isolate()->factory()->true_value())) {
+ if (ref.object().equals(isolate()->factory()->true_value())) {
return TrueConstant();
} else {
- DCHECK(ref.object<Object>().equals(isolate()->factory()->false_value()));
+ DCHECK(ref.object().equals(isolate()->factory()->false_value()));
return FalseConstant();
}
} else {
- return HeapConstant(ref.object<HeapObject>());
+ return HeapConstant(ref.AsHeapObject().object());
}
}
@@ -135,6 +134,8 @@ DEFINE_GETTER(AllocateInOldSpaceStubConstant,
DEFINE_GETTER(ArrayConstructorStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
+DEFINE_GETTER(BooleanMapConstant, HeapConstant(factory()->boolean_map()))
+
DEFINE_GETTER(ToNumberBuiltinConstant,
HeapConstant(BUILTIN_CODE(isolate(), ToNumber)))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 774b8e7433..3ce87d37a6 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -84,6 +84,7 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
V(AllocateInNewSpaceStubConstant) \
V(AllocateInOldSpaceStubConstant) \
V(ArrayConstructorStubConstant) \
+ V(BooleanMapConstant) \
V(ToNumberBuiltinConstant) \
V(EmptyFixedArrayConstant) \
V(EmptyStringConstant) \
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index a95bfaad21..d4805d278a 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -11,6 +11,10 @@
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
@@ -20,6 +24,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define TRACE(broker, x) TRACE_BROKER(broker, x)
+
#define FORWARD_DECL(Name) class Name##Data;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
@@ -50,12 +56,9 @@ class ObjectData : public ZoneObject {
// in an endless recursion.
*storage = this;
- broker->Trace("Creating data %p for handle %" V8PRIuPTR " (", this,
- object.address());
- if (FLAG_trace_heap_broker) {
- object->ShortPrint();
- PrintF(")\n");
- }
+ TRACE(broker, "Creating data " << this << " for handle " << object.address()
+ << " (" << Brief(*object) << ")");
+
CHECK_NOT_NULL(broker->isolate()->handle_scope_data()->canonical_scope);
}
@@ -107,9 +110,9 @@ class PropertyCellData : public HeapObjectData {
ObjectData* value_ = nullptr;
};
-void JSHeapBroker::IncrementTracingIndentation() { ++tracing_indentation_; }
+void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
-void JSHeapBroker::DecrementTracingIndentation() { --tracing_indentation_; }
+void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
class TraceScope {
public:
@@ -126,7 +129,7 @@ class TraceScope {
TraceScope(JSHeapBroker* broker, void* self, const char* label)
: broker_(broker) {
- broker_->Trace("Running %s on %p.\n", label, self);
+ TRACE(broker_, "Running " << label << " on " << self << ".");
broker_->IncrementTracingIndentation();
}
};
@@ -227,6 +230,81 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
}
}
+class JSTypedArrayData : public JSObjectData {
+ public:
+ JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSTypedArray> object);
+
+ bool is_on_heap() const { return is_on_heap_; }
+ size_t length_value() const { return length_value_; }
+ void* elements_external_pointer() const { return elements_external_pointer_; }
+
+ void Serialize(JSHeapBroker* broker);
+
+ HeapObjectData* buffer() const { return buffer_; }
+
+ private:
+ bool const is_on_heap_;
+ size_t const length_value_;
+ void* const elements_external_pointer_;
+
+ bool serialized_ = false;
+ HeapObjectData* buffer_ = nullptr;
+};
+
+JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSTypedArray> object)
+ : JSObjectData(broker, storage, object),
+ is_on_heap_(object->is_on_heap()),
+ length_value_(object->length_value()),
+ elements_external_pointer_(
+ FixedTypedArrayBase::cast(object->elements())->external_pointer()) {}
+
+void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
+
+ if (!is_on_heap()) {
+ DCHECK_NULL(buffer_);
+ buffer_ = broker->GetOrCreateData(typed_array->buffer())->AsHeapObject();
+ }
+}
+
+class JSDataViewData : public JSObjectData {
+ public:
+ JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSDataView> object);
+
+ size_t byte_length() const { return byte_length_; }
+ size_t byte_offset() const { return byte_offset_; }
+
+ private:
+ size_t const byte_length_;
+ size_t const byte_offset_;
+};
+
+class JSBoundFunctionData : public JSObjectData {
+ public:
+ JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSBoundFunction> object);
+
+ void Serialize(JSHeapBroker* broker);
+
+ ObjectData* bound_target_function() const { return bound_target_function_; }
+ ObjectData* bound_this() const { return bound_this_; }
+ FixedArrayData* bound_arguments() const { return bound_arguments_; }
+
+ private:
+ bool serialized_ = false;
+
+ ObjectData* bound_target_function_ = nullptr;
+ ObjectData* bound_this_ = nullptr;
+ FixedArrayData* bound_arguments_ = nullptr;
+};
+
class JSFunctionData : public JSObjectData {
public:
JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
@@ -240,7 +318,8 @@ class JSFunctionData : public JSObjectData {
void Serialize(JSHeapBroker* broker);
- JSGlobalProxyData* global_proxy() const { return global_proxy_; }
+ ContextData* context() const { return context_; }
+ NativeContextData* native_context() const { return native_context_; }
MapData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
SharedFunctionInfoData* shared() const { return shared_; }
@@ -256,7 +335,8 @@ class JSFunctionData : public JSObjectData {
bool serialized_ = false;
- JSGlobalProxyData* global_proxy_ = nullptr;
+ ContextData* context_ = nullptr;
+ NativeContextData* native_context_ = nullptr;
MapData* initial_map_ = nullptr;
ObjectData* prototype_ = nullptr;
SharedFunctionInfoData* shared_ = nullptr;
@@ -396,6 +476,12 @@ class StringData : public NameData {
static constexpr int kMaxLengthForDoubleConversion = 23;
};
+class SymbolData : public NameData {
+ public:
+ SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
+ : NameData(broker, storage, object) {}
+};
+
StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
Handle<String> object)
: NameData(broker, storage, object),
@@ -405,18 +491,30 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
is_seq_string_(object->IsSeqString()) {
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
if (length_ <= kMaxLengthForDoubleConversion) {
- to_number_ = StringToDouble(
- broker->isolate(), broker->isolate()->unicode_cache(), object, flags);
+ to_number_ = StringToDouble(broker->isolate(), object, flags);
}
}
class InternalizedStringData : public StringData {
public:
InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
- Handle<InternalizedString> object)
- : StringData(broker, storage, object) {}
+ Handle<InternalizedString> object);
+
+ uint32_t array_index() const { return array_index_; }
+
+ private:
+ uint32_t array_index_;
};
+InternalizedStringData::InternalizedStringData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<InternalizedString> object)
+ : StringData(broker, storage, object) {
+ if (!object->AsArrayIndex(&array_index_)) {
+ array_index_ = InternalizedStringRef::kNotAnArrayIndex;
+ }
+}
+
namespace {
bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
@@ -566,6 +664,15 @@ class MapData : public HeapObjectData {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_;
}
+ int constructor_function_index() const { return constructor_function_index_; }
+ int NextFreePropertyIndex() const { return next_free_property_index_; }
+ int UnusedPropertyFields() const { return unused_property_fields_; }
+ bool supports_fast_array_iteration() const {
+ return supports_fast_array_iteration_;
+ }
+ bool supports_fast_array_resize() const {
+ return supports_fast_array_resize_;
+ }
// Extra information.
@@ -583,10 +690,10 @@ class MapData : public HeapObjectData {
return instance_descriptors_;
}
- void SerializeConstructorOrBackpointer(JSHeapBroker* broker);
- ObjectData* constructor_or_backpointer() const {
- CHECK(serialized_constructor_or_backpointer_);
- return constructor_or_backpointer_;
+ void SerializeConstructor(JSHeapBroker* broker);
+ ObjectData* GetConstructor() const {
+ CHECK(serialized_constructor_);
+ return constructor_;
}
void SerializePrototype(JSHeapBroker* broker);
@@ -605,6 +712,11 @@ class MapData : public HeapObjectData {
bool const can_transition_;
int const in_object_properties_start_in_words_;
int const in_object_properties_;
+ int const constructor_function_index_;
+ int const next_free_property_index_;
+ int const unused_property_fields_;
+ bool const supports_fast_array_iteration_;
+ bool const supports_fast_array_resize_;
bool serialized_elements_kind_generalizations_ = false;
ZoneVector<MapData*> elements_kind_generalizations_;
@@ -612,8 +724,8 @@ class MapData : public HeapObjectData {
bool serialized_own_descriptors_ = false;
DescriptorArrayData* instance_descriptors_ = nullptr;
- bool serialized_constructor_or_backpointer_ = false;
- ObjectData* constructor_or_backpointer_ = nullptr;
+ bool serialized_constructor_ = false;
+ ObjectData* constructor_ = nullptr;
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
@@ -666,6 +778,31 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
CHECK(broker->SerializingAllowed());
}
+namespace {
+bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ Handle<Name> length_string = isolate->factory()->length_string();
+ DescriptorArray descriptors = jsarray_map->instance_descriptors();
+ int number = descriptors->Search(*length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
+}
+
+bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
+ return map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(map->elements_kind()) &&
+ map->prototype()->IsJSArray() &&
+ isolate->IsAnyInitialArrayPrototype(
+ handle(JSArray::cast(map->prototype()), isolate)) &&
+ isolate->IsNoElementsProtectorIntact();
+}
+
+bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
+ return SupportsFastArrayIteration(isolate, map) && map->is_extensible() &&
+ !map->is_dictionary_map() && !IsReadOnlyLengthDescriptor(isolate, map);
+}
+} // namespace
+
MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
: HeapObjectData(broker, storage, object),
instance_type_(object->instance_type()),
@@ -682,6 +819,15 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
: 0),
in_object_properties_(
object->IsJSObjectMap() ? object->GetInObjectProperties() : 0),
+ constructor_function_index_(object->IsPrimitiveMap()
+ ? object->GetConstructorFunctionIndex()
+ : Map::kNoConstructorFunctionIndex),
+ next_free_property_index_(object->NextFreePropertyIndex()),
+ unused_property_fields_(object->UnusedPropertyFields()),
+ supports_fast_array_iteration_(
+ SupportsFastArrayIteration(broker->isolate(), object)),
+ supports_fast_array_resize_(
+ SupportsFastArrayResize(broker->isolate(), object)),
elements_kind_generalizations_(broker->zone()) {}
JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
@@ -700,13 +846,15 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "JSFunctionData::Serialize");
Handle<JSFunction> function = Handle<JSFunction>::cast(object());
- DCHECK_NULL(global_proxy_);
+ DCHECK_NULL(context_);
+ DCHECK_NULL(native_context_);
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
- global_proxy_ =
- broker->GetOrCreateData(function->global_proxy())->AsJSGlobalProxy();
+ context_ = broker->GetOrCreateData(function->context())->AsContext();
+ native_context_ =
+ broker->GetOrCreateData(function->native_context())->AsNativeContext();
shared_ = broker->GetOrCreateData(function->shared())->AsSharedFunctionInfo();
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())->AsMap()
@@ -720,7 +868,7 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
if (initial_map_->instance_type() == JS_ARRAY_TYPE) {
initial_map_->SerializeElementsKindGeneralizations(broker);
}
- initial_map_->SerializeConstructorOrBackpointer(broker);
+ initial_map_->SerializeConstructor(broker);
// TODO(neis): This is currently only needed for native_context's
// object_function, as used by GetObjectCreateMap. If no further use sites
// show up, we should move this into NativeContextData::Serialize.
@@ -742,7 +890,7 @@ void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
ElementsKind to_kind = static_cast<ElementsKind>(i);
if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
Handle<Map> target =
- Map::AsElementsKind(broker->isolate(), self.object<Map>(), to_kind);
+ Map::AsElementsKind(broker->isolate(), self.object(), to_kind);
elements_kind_generalizations_.push_back(
broker->GetOrCreateData(target)->AsMap());
}
@@ -789,7 +937,7 @@ void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) {
DCHECK(feedback_.empty());
feedback_.reserve(vector->length());
for (int i = 0; i < vector->length(); ++i) {
- MaybeObject* value = vector->get(i);
+ MaybeObject value = vector->get(i);
ObjectData* slot_value =
value->IsObject() ? broker->GetOrCreateData(value->cast<Object>())
: nullptr;
@@ -804,7 +952,7 @@ void FeedbackVectorData::SerializeSlots(JSHeapBroker* broker) {
}
}
DCHECK_EQ(vector->length(), feedback_.size());
- broker->Trace("Copied %zu slots.\n", feedback_.size());
+ TRACE(broker, "Copied " << feedback_.size() << " slots.");
}
class FixedArrayBaseData : public HeapObjectData {
@@ -819,11 +967,6 @@ class FixedArrayBaseData : public HeapObjectData {
int const length_;
};
-JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object)
- : HeapObjectData(broker, storage, object),
- inobject_fields_(broker->zone()) {}
-
class FixedArrayData : public FixedArrayBaseData {
public:
FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -839,6 +982,46 @@ class FixedArrayData : public FixedArrayBaseData {
ZoneVector<ObjectData*> contents_;
};
+JSDataViewData::JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSDataView> object)
+ : JSObjectData(broker, storage, object),
+ byte_length_(object->byte_length()),
+ byte_offset_(object->byte_offset()) {}
+
+JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<JSBoundFunction> object)
+ : JSObjectData(broker, storage, object) {}
+
+void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
+
+ DCHECK_NULL(bound_target_function_);
+ DCHECK_NULL(bound_this_);
+ DCHECK_NULL(bound_arguments_);
+
+ bound_target_function_ =
+ broker->GetOrCreateData(function->bound_target_function());
+ bound_this_ = broker->GetOrCreateData(function->bound_this());
+ bound_arguments_ =
+ broker->GetOrCreateData(function->bound_arguments())->AsFixedArray();
+
+ bound_arguments_->SerializeContents(broker);
+}
+
+JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSObject> object)
+ : HeapObjectData(broker, storage, object),
+ inobject_fields_(broker->zone()) {}
+
+FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArray> object)
+ : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
+
void FixedArrayData::SerializeContents(JSHeapBroker* broker) {
if (serialized_contents_) return;
serialized_contents_ = true;
@@ -853,13 +1036,9 @@ void FixedArrayData::SerializeContents(JSHeapBroker* broker) {
Handle<Object> value(array->get(i), broker->isolate());
contents_.push_back(broker->GetOrCreateData(value));
}
- broker->Trace("Copied %zu elements.\n", contents_.size());
+ TRACE(broker, "Copied " << contents_.size() << " elements.");
}
-FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedArray> object)
- : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
-
class FixedDoubleArrayData : public FixedArrayBaseData {
public:
FixedDoubleArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -893,7 +1072,7 @@ void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
for (int i = 0; i < length(); i++) {
contents_.push_back(Float64::FromBits(self->get_representation(i)));
}
- broker->Trace("Copied %zu elements.\n", contents_.size());
+ TRACE(broker, "Copied " << contents_.size() << " elements.");
}
class BytecodeArrayData : public FixedArrayBaseData {
@@ -954,39 +1133,60 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
class SharedFunctionInfoData : public HeapObjectData {
public:
+ SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SharedFunctionInfo> object);
+
int builtin_id() const { return builtin_id_; }
BytecodeArrayData* GetBytecodeArray() const { return GetBytecodeArray_; }
+ void SetSerializedForCompilation(FeedbackVectorRef feedback);
+ bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
BROKER_SFI_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
- SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<SharedFunctionInfo> object)
- : HeapObjectData(broker, storage, object),
- builtin_id_(object->HasBuiltinId() ? object->builtin_id()
- : Builtins::kNoBuiltinId),
- GetBytecodeArray_(
- object->HasBytecodeArray()
- ? broker->GetOrCreateData(object->GetBytecodeArray())
- ->AsBytecodeArray()
- : nullptr)
-#define INIT_MEMBER(type, name) , name##_(object->name())
- BROKER_SFI_FIELDS(INIT_MEMBER)
-#undef INIT_MEMBER
- {
- DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
- DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
- }
-
private:
int const builtin_id_;
BytecodeArrayData* const GetBytecodeArray_;
+ ZoneUnorderedSet<Handle<FeedbackVector>, Handle<FeedbackVector>::hash,
+ Handle<FeedbackVector>::equal_to>
+ serialized_for_compilation_;
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
};
+SharedFunctionInfoData::SharedFunctionInfoData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<SharedFunctionInfo> object)
+ : HeapObjectData(broker, storage, object),
+ builtin_id_(object->HasBuiltinId() ? object->builtin_id()
+ : Builtins::kNoBuiltinId),
+ GetBytecodeArray_(
+ object->HasBytecodeArray()
+ ? broker->GetOrCreateData(object->GetBytecodeArray())
+ ->AsBytecodeArray()
+ : nullptr),
+ serialized_for_compilation_(broker->zone())
+#define INIT_MEMBER(type, name) , name##_(object->name())
+ BROKER_SFI_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+{
+ DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
+ DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
+}
+
+void SharedFunctionInfoData::SetSerializedForCompilation(
+ FeedbackVectorRef feedback) {
+ CHECK(serialized_for_compilation_.insert(feedback.object()).second);
+}
+
+bool SharedFunctionInfoData::IsSerializedForCompilation(
+ FeedbackVectorRef feedback) const {
+ return serialized_for_compilation_.find(feedback.object()) !=
+ serialized_for_compilation_.end();
+}
+
class ModuleData : public HeapObjectData {
public:
ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle<Module> object);
@@ -1041,7 +1241,7 @@ void ModuleData::Serialize(JSHeapBroker* broker) {
for (int i = 0; i < imports_length; ++i) {
imports_.push_back(broker->GetOrCreateData(imports->get(i))->AsCell());
}
- broker->Trace("Copied %zu imports.\n", imports_.size());
+ TRACE(broker, "Copied " << imports_.size() << " imports.");
DCHECK(exports_.empty());
Handle<FixedArray> exports(module->regular_exports(), broker->isolate());
@@ -1050,15 +1250,35 @@ void ModuleData::Serialize(JSHeapBroker* broker) {
for (int i = 0; i < exports_length; ++i) {
exports_.push_back(broker->GetOrCreateData(exports->get(i))->AsCell());
}
- broker->Trace("Copied %zu exports.\n", exports_.size());
+ TRACE(broker, "Copied " << exports_.size() << " exports.");
}
class CellData : public HeapObjectData {
public:
- CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
- : HeapObjectData(broker, storage, object) {}
+ CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object);
+
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* value() { return value_; }
+
+ private:
+ bool serialized_ = false;
+ ObjectData* value_ = nullptr;
};
+CellData::CellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Cell> object)
+ : HeapObjectData(broker, storage, object) {}
+
+void CellData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "CellData::Serialize");
+ auto cell = Handle<Cell>::cast(object());
+ DCHECK_NULL(value_);
+ value_ = broker->GetOrCreateData(cell->value());
+}
+
class JSGlobalProxyData : public JSObjectData {
public:
JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
@@ -1118,15 +1338,14 @@ void JSObjectData::SerializeElements(JSHeapBroker* broker) {
elements_ = broker->GetOrCreateData(elements_object)->AsFixedArrayBase();
}
-void MapData::SerializeConstructorOrBackpointer(JSHeapBroker* broker) {
- if (serialized_constructor_or_backpointer_) return;
- serialized_constructor_or_backpointer_ = true;
+void MapData::SerializeConstructor(JSHeapBroker* broker) {
+ if (serialized_constructor_) return;
+ serialized_constructor_ = true;
- TraceScope tracer(broker, this, "MapData::SerializeConstructorOrBackpointer");
+ TraceScope tracer(broker, this, "MapData::SerializeConstructor");
Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(constructor_or_backpointer_);
- constructor_or_backpointer_ =
- broker->GetOrCreateData(map->constructor_or_backpointer());
+ DCHECK_NULL(constructor_);
+ constructor_ = broker->GetOrCreateData(map->GetConstructor());
}
void MapData::SerializePrototype(JSHeapBroker* broker) {
@@ -1189,9 +1408,9 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
}
}
- broker->Trace("Copied %zu descriptors into %p (%zu total).\n",
- number_of_own - current_size, instance_descriptors_,
- number_of_own);
+ TRACE(broker, "Copied " << number_of_own - current_size
+ << " descriptors into " << instance_descriptors_
+ << " (" << number_of_own << " total).");
}
void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
@@ -1285,7 +1504,7 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
inobject_fields_.push_back(JSObjectField{value_data});
}
}
- broker->Trace("Copied %zu in-object fields.\n", inobject_fields_.size());
+ TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields.");
map()->SerializeOwnDescriptors(broker);
@@ -1319,8 +1538,8 @@ ContextRef ContextRef::previous() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- return ContextRef(
- broker(), handle(object<Context>()->previous(), broker()->isolate()));
+ return ContextRef(broker(),
+ handle(object()->previous(), broker()->isolate()));
}
return ContextRef(broker(), data()->AsContext()->previous());
}
@@ -1329,7 +1548,7 @@ ContextRef ContextRef::previous() const {
ObjectRef ContextRef::get(int index) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- Handle<Object> value(object<Context>()->get(index), broker()->isolate());
+ Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
@@ -1338,49 +1557,41 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
broker_zone_(broker_zone),
current_zone_(broker_zone),
refs_(new (zone())
- RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())) {
+ RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())),
+ array_and_object_prototypes_(zone()) {
// Note that this initialization of the refs_ pointer with the minimal
// initial capacity is redundant in the normal use case (concurrent
// compilation enabled, standard objects to be serialized), as the map
// is going to be replaced immediatelly with a larger capacity one.
// It doesn't seem to affect the performance in a noticeable way though.
- Trace("Constructing heap broker.\n");
+ TRACE(this, "Constructing heap broker.");
}
-void JSHeapBroker::Trace(const char* format, ...) const {
- if (FLAG_trace_heap_broker) {
- PrintF("[%p] ", this);
- for (unsigned i = 0; i < tracing_indentation_; ++i) PrintF(" ");
- va_list arguments;
- va_start(arguments, format);
- base::OS::VPrint(format, arguments);
- va_end(arguments);
- }
+std::ostream& JSHeapBroker::Trace() const {
+ std::cout << "[" << this << "] " << std::string(trace_indentation_ * 2, ' ');
+ return std::cout;
}
void JSHeapBroker::StartSerializing() {
CHECK_EQ(mode_, kDisabled);
- Trace("Starting serialization.\n");
+ TRACE(this, "Starting serialization.");
mode_ = kSerializing;
refs_->Clear();
}
void JSHeapBroker::StopSerializing() {
CHECK_EQ(mode_, kSerializing);
- Trace("Stopping serialization.\n");
+ TRACE(this, "Stopping serialization.");
mode_ = kSerialized;
}
void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
- Trace("Retiring.\n");
+ TRACE(this, "Retiring.");
mode_ = kRetired;
}
-bool JSHeapBroker::SerializingAllowed() const {
- return mode() == kSerializing ||
- (!FLAG_strict_heap_broker && mode() == kSerialized);
-}
+bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; }
void JSHeapBroker::SetNativeContextRef() {
native_context_ = NativeContextRef(this, isolate()->native_context());
@@ -1393,7 +1604,7 @@ bool IsShareable(Handle<Object> object, Isolate* isolate) {
RootIndex root_index;
return (object->IsHeapObject() &&
b->IsBuiltinHandle(Handle<HeapObject>::cast(object), &index)) ||
- isolate->heap()->IsRootHandle(object, &root_index);
+ isolate->roots_table().IsRootHandle(object, &root_index);
}
void JSHeapBroker::SerializeShareableObjects() {
@@ -1456,6 +1667,37 @@ void JSHeapBroker::SerializeShareableObjects() {
current_zone_ = broker_zone_;
}
+void JSHeapBroker::CollectArrayAndObjectPrototypes() {
+ DisallowHeapAllocation no_gc;
+ CHECK_EQ(mode(), kSerializing);
+ CHECK(array_and_object_prototypes_.empty());
+
+ Object maybe_context = isolate()->heap()->native_contexts_list();
+ while (!maybe_context->IsUndefined(isolate())) {
+ Context context = Context::cast(maybe_context);
+ Object array_prot = context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ Object object_prot = context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+ array_and_object_prototypes_.emplace(JSObject::cast(array_prot), isolate());
+ array_and_object_prototypes_.emplace(JSObject::cast(object_prot),
+ isolate());
+ maybe_context = context->next_context_link();
+ }
+
+ CHECK(!array_and_object_prototypes_.empty());
+}
+
+bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
+ if (mode() == kDisabled) {
+ return isolate()->IsInAnyContext(*object.object(),
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate()->IsInAnyContext(*object.object(),
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+ }
+ CHECK(!array_and_object_prototypes_.empty());
+ return array_and_object_prototypes_.find(object.object()) !=
+ array_and_object_prototypes_.end();
+}
+
void JSHeapBroker::SerializeStandardObjects() {
if (mode() == kDisabled) return;
CHECK_EQ(mode(), kSerializing);
@@ -1464,6 +1706,8 @@ void JSHeapBroker::SerializeStandardObjects() {
TraceScope tracer(this, "JSHeapBroker::SerializeStandardObjects");
+ CollectArrayAndObjectPrototypes();
+
SetNativeContextRef();
native_context().Serialize();
@@ -1519,10 +1763,11 @@ void JSHeapBroker::SerializeStandardObjects() {
GetOrCreateData(f->with_context_map());
GetOrCreateData(f->zero_string());
- // Property cells
- GetOrCreateData(f->array_buffer_neutering_protector())
+ // Protector cells
+ GetOrCreateData(f->array_buffer_detaching_protector())
->AsPropertyCell()
->Serialize(this);
+ GetOrCreateData(f->array_constructor_protector())->AsCell()->Serialize(this);
GetOrCreateData(f->array_iterator_protector())
->AsPropertyCell()
->Serialize(this);
@@ -1541,12 +1786,13 @@ void JSHeapBroker::SerializeStandardObjects() {
GetOrCreateData(f->promise_then_protector())
->AsPropertyCell()
->Serialize(this);
+ GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this);
// CEntry stub
GetOrCreateData(
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
- Trace("Finished serializing standard objects.\n");
+ TRACE(this, "Finished serializing standard objects.");
}
ObjectData* JSHeapBroker::GetData(Handle<Object> object) const {
@@ -1579,7 +1825,7 @@ ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
}
// clang-format on
-ObjectData* JSHeapBroker::GetOrCreateData(Object* object) {
+ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
return GetOrCreateData(handle(object, isolate()));
}
@@ -1597,7 +1843,7 @@ bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
DCHECK(IsSmi());
// Handle-dereference is always allowed for Handle<Smi>.
- return object<Smi>()->value();
+ return Handle<Smi>::cast(object())->value();
}
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
@@ -1606,7 +1852,7 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
AllowHandleDereference allow_handle_dereference;
AllowHeapAllocation heap_allocation;
Handle<Map> instance_map;
- if (Map::TryGetObjectCreateMap(broker()->isolate(), object<HeapObject>())
+ if (Map::TryGetObjectCreateMap(broker()->isolate(), object())
.ToHandle(&instance_map)) {
return MapRef(broker(), instance_map);
} else {
@@ -1618,13 +1864,20 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
: base::Optional<MapRef>();
}
+#define DEF_TESTER(Type, ...) \
+ bool MapRef::Is##Type##Map() const { \
+ return InstanceTypeChecker::Is##Type(instance_type()); \
+ }
+INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHeapAllocation heap_allocation;
AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), Map::AsElementsKind(broker()->isolate(),
- object<Map>(), kind));
+ return MapRef(broker(),
+ Map::AsElementsKind(broker()->isolate(), object(), kind));
}
if (kind == elements_kind()) return *this;
const ZoneVector<MapData*>& elements_kind_generalizations =
@@ -1636,12 +1889,29 @@ base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
return base::Optional<MapRef>();
}
+bool MapRef::supports_fast_array_iteration() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation handle_allocation;
+ return SupportsFastArrayIteration(broker()->isolate(), object());
+ }
+ return data()->AsMap()->supports_fast_array_iteration();
+}
+
+bool MapRef::supports_fast_array_resize() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation handle_allocation;
+ return SupportsFastArrayResize(broker()->isolate(), object());
+ }
+ return data()->AsMap()->supports_fast_array_resize();
+}
+
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
AllowHandleAllocation handle_allocation;
- return object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
- broker()->isolate());
+ return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
}
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
@@ -1653,9 +1923,9 @@ ScriptContextTableRef::lookup(const NameRef& name) const {
AllowHandleDereference handle_dereference;
if (!name.IsString()) return {};
ScriptContextTable::LookupResult lookup_result;
- auto table = object<ScriptContextTable>();
+ auto table = object();
if (!ScriptContextTable::Lookup(broker()->isolate(), table,
- name.object<String>(), &lookup_result)) {
+ name.AsString().object(), &lookup_result)) {
return {};
}
Handle<Context> script_context = ScriptContextTable::GetContext(
@@ -1697,7 +1967,7 @@ ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- Handle<Object> value(object<FeedbackVector>()->Get(slot)->cast<Object>(),
+ Handle<Object> value(object()->Get(slot)->cast<Object>(),
broker()->isolate());
return ObjectRef(broker(), value);
}
@@ -1708,7 +1978,7 @@ ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference handle_dereference;
- return object<JSObject>()->RawFastDoublePropertyAt(index);
+ return object()->RawFastDoublePropertyAt(index);
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
@@ -1719,9 +1989,8 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSObject>()->RawFastPropertyAt(index),
- broker()->isolate()));
+ return ObjectRef(broker(), handle(object()->RawFastPropertyAt(index),
+ broker()->isolate()));
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
@@ -1736,7 +2005,7 @@ bool AllocationSiteRef::IsFastLiteral() const {
AllowHandleAllocation allow_handle_allocation;
AllowHandleDereference allow_handle_dereference;
return IsInlinableFastLiteral(
- handle(object<AllocationSite>()->boilerplate(), broker()->isolate()));
+ handle(object()->boilerplate(), broker()->isolate()));
}
return data()->AsAllocationSite()->IsFastLiteral();
}
@@ -1747,8 +2016,7 @@ void JSObjectRef::EnsureElementsTenured() {
AllowHandleDereference allow_handle_dereference;
AllowHeapAllocation allow_heap_allocation;
- Handle<FixedArrayBase> object_elements =
- elements().object<FixedArrayBase>();
+ Handle<FixedArrayBase> object_elements = elements().object();
if (Heap::InNewSpace(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
@@ -1756,7 +2024,7 @@ void JSObjectRef::EnsureElementsTenured() {
object_elements =
broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
Handle<FixedArray>::cast(object_elements));
- object<JSObject>()->set_elements(*object_elements);
+ object()->set_elements(*object_elements);
}
return;
}
@@ -1766,7 +2034,7 @@ void JSObjectRef::EnsureElementsTenured() {
FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return FieldIndex::ForDescriptor(*object<Map>(), descriptor_index);
+ return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index).field_index;
@@ -1775,15 +2043,15 @@ FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const {
int MapRef::GetInObjectPropertyOffset(int i) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<Map>()->GetInObjectPropertyOffset(i);
+ return object()->GetInObjectPropertyOffset(i);
}
- return (GetInObjectPropertiesStartInWords() + i) * kPointerSize;
+ return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
}
PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_descriptors()->GetDetails(descriptor_index);
+ return object()->instance_descriptors()->GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index).details;
@@ -1795,7 +2063,7 @@ NameRef MapRef::GetPropertyKey(int descriptor_index) const {
AllowHandleDereference allow_handle_dereference;
return NameRef(
broker(),
- handle(object<Map>()->instance_descriptors()->GetKey(descriptor_index),
+ handle(object()->instance_descriptors()->GetKey(descriptor_index),
broker()->isolate()));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -1808,12 +2076,16 @@ bool MapRef::IsFixedCowArrayMap() const {
return equals(MapRef(broker(), fixed_cow_array_map));
}
+bool MapRef::IsPrimitiveMap() const {
+ return instance_type() <= LAST_PRIMITIVE_TYPE;
+}
+
MapRef MapRef::FindFieldOwner(int descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
Handle<Map> owner(
- object<Map>()->FindFieldOwner(broker()->isolate(), descriptor_index),
+ object()->FindFieldOwner(broker()->isolate(), descriptor_index),
broker()->isolate());
return MapRef(broker(), owner);
}
@@ -1827,7 +2099,7 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
Handle<FieldType> field_type(
- object<Map>()->instance_descriptors()->GetFieldType(descriptor_index),
+ object()->instance_descriptors()->GetFieldType(descriptor_index),
broker()->isolate());
return ObjectRef(broker(), field_type);
}
@@ -1839,8 +2111,8 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const {
bool MapRef::IsUnboxedDoubleField(int descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<Map>()->IsUnboxedDoubleField(
- FieldIndex::ForDescriptor(*object<Map>(), descriptor_index));
+ return object()->IsUnboxedDoubleField(
+ FieldIndex::ForDescriptor(*object(), descriptor_index));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index).is_unboxed_double_field;
@@ -1849,7 +2121,7 @@ bool MapRef::IsUnboxedDoubleField(int descriptor_index) const {
uint16_t StringRef::GetFirstChar() {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<String>()->Get(0);
+ return object()->Get(0);
}
return data()->AsString()->first_char();
}
@@ -1860,19 +2132,29 @@ base::Optional<double> StringRef::ToNumber() {
AllowHandleAllocation allow_handle_allocation;
AllowHeapAllocation allow_heap_allocation;
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return StringToDouble(broker()->isolate(),
- broker()->isolate()->unicode_cache(),
- object<String>(), flags);
+ return StringToDouble(broker()->isolate(), object(), flags);
}
return data()->AsString()->to_number();
}
+uint32_t InternalizedStringRef::array_index() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ uint32_t result;
+ if (!object()->AsArrayIndex(&result)) {
+ result = kNotAnArrayIndex;
+ }
+ return result;
+ }
+ return data()->AsInternalizedString()->array_index();
+}
+
ObjectRef FixedArrayRef::get(int i) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<FixedArray>()->get(i), broker()->isolate()));
+ return ObjectRef(broker(), handle(object()->get(i), broker()->isolate()));
}
return ObjectRef(broker(), data()->AsFixedArray()->Get(i));
}
@@ -1880,7 +2162,7 @@ ObjectRef FixedArrayRef::get(int i) const {
bool FixedDoubleArrayRef::is_the_hole(int i) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<FixedDoubleArray>()->is_the_hole(i);
+ return object()->is_the_hole(i);
}
return data()->AsFixedDoubleArray()->Get(i).is_hole_nan();
}
@@ -1888,7 +2170,7 @@ bool FixedDoubleArrayRef::is_the_hole(int i) const {
double FixedDoubleArrayRef::get_scalar(int i) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<FixedDoubleArray>()->get_scalar(i);
+ return object()->get_scalar(i);
}
CHECK(!data()->AsFixedDoubleArray()->Get(i).is_hole_nan());
return data()->AsFixedDoubleArray()->Get(i).get_scalar();
@@ -1898,15 +2180,15 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
if (broker()->mode() == JSHeapBroker::kDisabled) { \
AllowHandleAllocation handle_allocation; \
AllowHandleDereference allow_handle_dereference; \
- return object<holder>()->name(); \
+ return object()->name(); \
}
-#define IF_BROKER_DISABLED_ACCESS_HANDLE(holder, result, name) \
- if (broker()->mode() == JSHeapBroker::kDisabled) { \
- AllowHandleAllocation handle_allocation; \
- AllowHandleDereference allow_handle_dereference; \
- return result##Ref(broker(), \
- handle(object<holder>()->name(), broker()->isolate())); \
+#define IF_BROKER_DISABLED_ACCESS_HANDLE(holder, result, name) \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return result##Ref(broker(), \
+ handle(object()->name(), broker()->isolate())); \
}
// Macros for definining a const getter that, depending on the broker mode,
@@ -1939,31 +2221,52 @@ BIMODAL_ACCESSOR_C(AllocationSite, PretenureFlag, GetPretenureMode)
BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
+BIMODAL_ACCESSOR(Cell, Object, value)
+
BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR(JSArray, Object, length)
+BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_target_function)
+BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
+BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
+
+BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
+BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_offset)
+
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
-BIMODAL_ACCESSOR(JSFunction, JSGlobalProxy, global_proxy)
+BIMODAL_ACCESSOR(JSFunction, Context, context)
+BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
+BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
+BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length_value)
+BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
+
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
+BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::NumberOfOwnDescriptorsBits)
+BIMODAL_ACCESSOR_B(Map, bit_field3, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
+ Map::IsAccessCheckNeededBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_callable, Map::IsCallableBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_constructor, Map::IsConstructorBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_undetectable, Map::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
+BIMODAL_ACCESSOR_C(Map, int, NextFreePropertyIndex)
+BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR(Map, Object, prototype)
BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR(Map, Object, constructor_or_backpointer)
+BIMODAL_ACCESSOR(Map, Object, GetConstructor)
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
@@ -1982,12 +2285,26 @@ BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
BIMODAL_ACCESSOR_C(String, int, length)
+void* JSTypedArrayRef::elements_external_pointer() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return FixedTypedArrayBase::cast(object()->elements())->external_pointer();
+ }
+ return data()->AsJSTypedArray()->elements_external_pointer();
+}
+
bool MapRef::IsInobjectSlackTrackingInProgress() const {
IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, IsInobjectSlackTrackingInProgress);
return Map::ConstructionCounterBits::decode(data()->AsMap()->bit_field3()) !=
Map::kNoSlackTracking;
}
+int MapRef::constructor_function_index() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, GetConstructorFunctionIndex);
+ CHECK(IsPrimitiveMap());
+ return data()->AsMap()->constructor_function_index();
+}
+
bool MapRef::is_stable() const {
IF_BROKER_DISABLED_ACCESS_HANDLE_C(Map, is_stable);
return !Map::IsUnstableBit::decode(data()->AsMap()->bit_field3());
@@ -2058,10 +2375,37 @@ MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
}
}
+base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
+ const MapRef& map) const {
+ CHECK(map.IsPrimitiveMap());
+ switch (map.constructor_function_index()) {
+ case Map::kNoConstructorFunctionIndex:
+ return base::nullopt;
+ case Context::BIGINT_FUNCTION_INDEX:
+ return bigint_function();
+ case Context::BOOLEAN_FUNCTION_INDEX:
+ return boolean_function();
+ case Context::NUMBER_FUNCTION_INDEX:
+ return number_function();
+ case Context::STRING_FUNCTION_INDEX:
+ return string_function();
+ case Context::SYMBOL_FUNCTION_INDEX:
+ return symbol_function();
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool ObjectRef::IsNullOrUndefined() const {
+ if (IsSmi()) return false;
+ OddballType type = AsHeapObject().map().oddball_type();
+ return type == OddballType::kNull || type == OddballType::kUndefined;
+}
+
bool ObjectRef::BooleanValue() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object<Object>()->BooleanValue(broker()->isolate());
+ return object()->BooleanValue(broker()->isolate());
}
return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
}
@@ -2105,8 +2449,8 @@ CellRef ModuleRef::GetCell(int cell_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- return CellRef(broker(), handle(object<Module>()->GetCell(cell_index),
- broker()->isolate()));
+ return CellRef(broker(),
+ handle(object()->GetCell(cell_index), broker()->isolate()));
}
return CellRef(broker(), data()->AsModule()->GetCell(cell_index));
}
@@ -2115,8 +2459,7 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
: broker_(broker) {
switch (broker->mode()) {
case JSHeapBroker::kSerialized:
- data_ = FLAG_strict_heap_broker ? broker->GetData(object)
- : broker->GetOrCreateData(object);
+ data_ = broker->GetData(object);
break;
case JSHeapBroker::kSerializing:
data_ = broker->GetOrCreateData(object);
@@ -2141,7 +2484,7 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
}
namespace {
-OddballType GetOddballType(Isolate* isolate, Map* map) {
+OddballType GetOddballType(Isolate* isolate, Map map) {
if (map->instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
}
@@ -2171,7 +2514,7 @@ OddballType GetOddballType(Isolate* isolate, Map* map) {
HeapObjectType HeapObjectRef::GetHeapObjectType() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference handle_dereference;
- Map* map = Handle<HeapObject>::cast(object())->map();
+ Map map = Handle<HeapObject>::cast(object())->map();
HeapObjectType::Flags flags(0);
if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable;
if (map->is_callable()) flags |= HeapObjectType::kCallable;
@@ -2187,8 +2530,8 @@ base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- return JSObjectRef(broker(), handle(object<AllocationSite>()->boilerplate(),
- broker()->isolate()));
+ return JSObjectRef(broker(),
+ handle(object()->boilerplate(), broker()->isolate()));
}
JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
if (boilerplate) {
@@ -2206,8 +2549,8 @@ FixedArrayBaseRef JSObjectRef::elements() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- return FixedArrayBaseRef(
- broker(), handle(object<JSObject>()->elements(), broker()->isolate()));
+ return FixedArrayBaseRef(broker(),
+ handle(object()->elements(), broker()->isolate()));
}
return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
}
@@ -2260,6 +2603,13 @@ ObjectRef JSRegExpRef::source() const {
Handle<Object> ObjectRef::object() const { return data_->object(); }
+#define DEF_OBJECT_GETTER(T) \
+ Handle<T> T##Ref::object() const { \
+ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
+#undef DEF_OBJECT_GETTER
+
JSHeapBroker* ObjectRef::broker() const { return broker_; }
ObjectData* ObjectRef::data() const {
@@ -2296,16 +2646,20 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "NativeContextData::Serialize");
Handle<NativeContext> context = Handle<NativeContext>::cast(object());
-#define SERIALIZE_MEMBER(type, name) \
- DCHECK_NULL(name##_); \
- name##_ = broker->GetOrCreateData(context->name())->As##type(); \
- if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker);
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name())->As##type(); \
+ if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
+ if (name##_->IsMap()) name##_->AsMap()->SerializeConstructor(broker);
BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
if (!broker->isolate()->bootstrapper()->IsActive()) {
BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
}
#undef SERIALIZE_MEMBER
+ bound_function_with_constructor_map_->SerializePrototype(broker);
+ bound_function_without_constructor_map_->SerializePrototype(broker);
+
DCHECK(function_maps_.empty());
int const first = Context::FIRST_FUNCTION_MAP_INDEX;
int const last = Context::LAST_FUNCTION_MAP_INDEX;
@@ -2321,6 +2675,18 @@ void JSFunctionRef::Serialize() {
data()->AsJSFunction()->Serialize(broker());
}
+void SharedFunctionInfoRef::SetSerializedForCompilation(
+ FeedbackVectorRef feedback) {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsSharedFunctionInfo()->SetSerializedForCompilation(feedback);
+}
+
+bool SharedFunctionInfoRef::IsSerializedForCompilation(
+ FeedbackVectorRef feedback) const {
+ CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
+ return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
+}
+
void JSObjectRef::SerializeObjectCreateMap() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -2333,6 +2699,12 @@ void MapRef::SerializeOwnDescriptors() {
data()->AsMap()->SerializeOwnDescriptors(broker());
}
+void MapRef::SerializePrototype() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializePrototype(broker());
+}
+
void ModuleRef::Serialize() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -2351,11 +2723,24 @@ void NativeContextRef::Serialize() {
data()->AsNativeContext()->Serialize(broker());
}
+void JSTypedArrayRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSTypedArray()->Serialize(broker());
+}
+
+void JSBoundFunctionRef::Serialize() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSBoundFunction()->Serialize(broker());
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
#undef IF_BROKER_DISABLED_ACCESS_HANDLE
#undef IF_BROKER_DISABLED_ACCESS_HANDLE_C
+#undef TRACE
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 89f3ee871e..0108575013 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -9,12 +9,27 @@
#include "src/base/optional.h"
#include "src/compiler/refs-map.h"
#include "src/globals.h"
+#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/builtin-function-id.h"
+#include "src/objects/instance-type.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+
+class BytecodeArray;
+class FixedDoubleArray;
+class HeapNumber;
+class InternalizedString;
+class JSBoundFunction;
+class JSDataView;
+class JSGlobalProxy;
+class JSRegExp;
+class JSTypedArray;
+class NativeContext;
+class ScriptContextTable;
+
namespace compiler {
enum class OddballType : uint8_t {
@@ -32,9 +47,12 @@ enum class OddballType : uint8_t {
#define HEAP_BROKER_OBJECT_LIST(V) \
/* Subtypes of JSObject */ \
V(JSArray) \
+ V(JSBoundFunction) \
+ V(JSDataView) \
V(JSFunction) \
V(JSGlobalProxy) \
V(JSRegExp) \
+ V(JSTypedArray) \
/* Subtypes of Context */ \
V(NativeContext) \
/* Subtypes of FixedArray */ \
@@ -48,6 +66,7 @@ enum class OddballType : uint8_t {
/* Subtypes of Name */ \
V(InternalizedString) \
V(String) \
+ V(Symbol) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
V(Cell) \
@@ -69,6 +88,7 @@ enum class OddballType : uint8_t {
class CompilationDependencies;
class JSHeapBroker;
class ObjectData;
+class PerIsolateCompilerCache;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
@@ -77,19 +97,13 @@ class ObjectRef {
public:
ObjectRef(JSHeapBroker* broker, Handle<Object> object);
ObjectRef(JSHeapBroker* broker, ObjectData* data)
- : broker_(broker), data_(data) {
+ : data_(data), broker_(broker) {
CHECK_NOT_NULL(data_);
}
- bool equals(const ObjectRef& other) const;
-
Handle<Object> object() const;
- // TODO(neis): Remove eventually.
- template <typename T>
- Handle<T> object() const {
- AllowHandleDereference handle_dereference;
- return Handle<T>::cast(object());
- }
+
+ bool equals(const ObjectRef& other) const;
bool IsSmi() const;
int AsSmi() const;
@@ -102,6 +116,8 @@ class ObjectRef {
HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
+ bool IsNullOrUndefined() const;
+
bool BooleanValue() const;
double OddballToNumber() const;
@@ -110,10 +126,10 @@ class ObjectRef {
protected:
JSHeapBroker* broker() const;
ObjectData* data() const;
+ ObjectData* data_; // Should be used only by object() getters.
private:
JSHeapBroker* broker_;
- ObjectData* data_;
};
// Temporary class that carries information from a Map. We'd like to remove
@@ -154,6 +170,7 @@ class HeapObjectType {
class HeapObjectRef : public ObjectRef {
public:
using ObjectRef::ObjectRef;
+ Handle<HeapObject> object() const;
MapRef map() const;
@@ -164,6 +181,7 @@ class HeapObjectRef : public ObjectRef {
class PropertyCellRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<PropertyCell> object() const;
PropertyDetails property_details() const;
ObjectRef value() const;
@@ -172,6 +190,7 @@ class PropertyCellRef : public HeapObjectRef {
class JSObjectRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<JSObject> object() const;
double RawFastDoublePropertyAt(FieldIndex index) const;
ObjectRef RawFastPropertyAt(FieldIndex index) const;
@@ -184,9 +203,32 @@ class JSObjectRef : public HeapObjectRef {
base::Optional<MapRef> GetObjectCreateMap() const;
};
+class JSDataViewRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSDataView> object() const;
+
+ size_t byte_length() const;
+ size_t byte_offset() const;
+};
+
+class JSBoundFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSBoundFunction> object() const;
+
+ void Serialize();
+
+ // The following are available only after calling Serialize().
+ ObjectRef bound_target_function() const;
+ ObjectRef bound_this() const;
+ FixedArrayRef bound_arguments() const;
+};
+
class JSFunctionRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
+ Handle<JSFunction> object() const;
bool has_initial_map() const;
bool has_prototype() const;
@@ -197,7 +239,8 @@ class JSFunctionRef : public JSObjectRef {
// The following are available only after calling Serialize().
ObjectRef prototype() const;
MapRef initial_map() const;
- JSGlobalProxyRef global_proxy() const;
+ ContextRef context() const;
+ NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
int InitialMapInstanceSizeWithMinSlack() const;
};
@@ -205,6 +248,7 @@ class JSFunctionRef : public JSObjectRef {
class JSRegExpRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
+ Handle<JSRegExp> object() const;
ObjectRef raw_properties_or_hash() const;
ObjectRef data() const;
@@ -216,6 +260,7 @@ class JSRegExpRef : public JSObjectRef {
class HeapNumberRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<HeapNumber> object() const;
double value() const;
};
@@ -223,6 +268,7 @@ class HeapNumberRef : public HeapObjectRef {
class MutableHeapNumberRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<MutableHeapNumber> object() const;
double value() const;
};
@@ -230,34 +276,50 @@ class MutableHeapNumberRef : public HeapObjectRef {
class ContextRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- void Serialize();
+ Handle<Context> object() const;
+ void Serialize();
ContextRef previous() const;
ObjectRef get(int index) const;
};
-#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(ScriptContextTable, script_context_table)
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, boolean_function) \
+ V(JSFunction, bigint_function) \
+ V(JSFunction, number_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(JSFunction, promise_then) \
+ V(JSFunction, string_function) \
+ V(JSFunction, symbol_function) \
+ V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, promise_prototype) \
+ V(Map, bound_function_with_constructor_map) \
+ V(Map, bound_function_without_constructor_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(ScriptContextTable, script_context_table) \
+ V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
+ V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_then_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
// happened when Turbofan is invoked via --always-opt.
#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
V(Map, map_key_iterator_map) \
V(Map, map_key_value_iterator_map) \
V(Map, map_value_iterator_map) \
@@ -271,6 +333,8 @@ class ContextRef : public HeapObjectRef {
class NativeContextRef : public ContextRef {
public:
using ContextRef::ContextRef;
+ Handle<NativeContext> object() const;
+
void Serialize();
#define DECL_ACCESSOR(type, name) type##Ref name() const;
@@ -279,16 +343,19 @@ class NativeContextRef : public ContextRef {
MapRef GetFunctionMapFromIndex(int index) const;
MapRef GetInitialJSArrayMap(ElementsKind kind) const;
+ base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
};
class NameRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<Name> object() const;
};
class ScriptContextTableRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<ScriptContextTable> object() const;
struct LookupResult {
ContextRef context;
@@ -302,11 +369,13 @@ class ScriptContextTableRef : public HeapObjectRef {
class DescriptorArrayRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<DescriptorArray> object() const;
};
class FeedbackVectorRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackVector> object() const;
ObjectRef get(FeedbackSlot slot) const;
@@ -316,6 +385,7 @@ class FeedbackVectorRef : public HeapObjectRef {
class AllocationSiteRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<AllocationSite> object() const;
bool PointsToLiteral() const;
PretenureFlag GetPretenureMode() const;
@@ -337,6 +407,7 @@ class AllocationSiteRef : public HeapObjectRef {
class MapRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<Map> object() const;
int instance_size() const;
InstanceType instance_type() const;
@@ -344,20 +415,35 @@ class MapRef : public HeapObjectRef {
int GetInObjectPropertiesStartInWords() const;
int NumberOfOwnDescriptors() const;
int GetInObjectPropertyOffset(int index) const;
+ int constructor_function_index() const;
+ int NextFreePropertyIndex() const;
+ int UnusedPropertyFields() const;
ElementsKind elements_kind() const;
bool is_stable() const;
+ bool is_extensible() const;
bool is_constructor() const;
bool has_prototype_slot() const;
+ bool is_access_check_needed() const;
bool is_deprecated() const;
bool CanBeDeprecated() const;
bool CanTransition() const;
bool IsInobjectSlackTrackingInProgress() const;
bool is_dictionary_map() const;
bool IsFixedCowArrayMap() const;
+ bool IsPrimitiveMap() const;
bool is_undetectable() const;
bool is_callable() const;
+ bool has_hidden_prototype() const;
+ bool supports_fast_array_iteration() const;
+ bool supports_fast_array_resize() const;
+
+#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
+ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
+ ObjectRef GetConstructor() const;
- ObjectRef constructor_or_backpointer() const;
+ void SerializePrototype();
ObjectRef prototype() const;
OddballType oddball_type() const;
@@ -377,6 +463,7 @@ class MapRef : public HeapObjectRef {
class FixedArrayBaseRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<FixedArrayBase> object() const;
int length() const;
};
@@ -384,6 +471,7 @@ class FixedArrayBaseRef : public HeapObjectRef {
class FixedArrayRef : public FixedArrayBaseRef {
public:
using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedArray> object() const;
ObjectRef get(int i) const;
};
@@ -391,6 +479,7 @@ class FixedArrayRef : public FixedArrayBaseRef {
class FixedDoubleArrayRef : public FixedArrayBaseRef {
public:
using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedDoubleArray> object() const;
double get_scalar(int i) const;
bool is_the_hole(int i) const;
@@ -399,6 +488,7 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
class BytecodeArrayRef : public FixedArrayBaseRef {
public:
using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<BytecodeArray> object() const;
int register_count() const;
};
@@ -406,6 +496,7 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
class JSArrayRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
+ Handle<JSArray> object() const;
ObjectRef length() const;
};
@@ -413,6 +504,7 @@ class JSArrayRef : public JSObjectRef {
class ScopeInfoRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<ScopeInfo> object() const;
int ContextLength() const;
};
@@ -434,17 +526,22 @@ class ScopeInfoRef : public HeapObjectRef {
class SharedFunctionInfoRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<SharedFunctionInfo> object() const;
int builtin_id() const;
BytecodeArrayRef GetBytecodeArray() const;
#define DECL_ACCESSOR(type, name) type name() const;
BROKER_SFI_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
+
+ bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SetSerializedForCompilation(FeedbackVectorRef feedback);
};
class StringRef : public NameRef {
public:
using NameRef::NameRef;
+ Handle<String> object() const;
int length() const;
uint16_t GetFirstChar();
@@ -453,9 +550,30 @@ class StringRef : public NameRef {
bool IsExternalString() const;
};
+class SymbolRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<Symbol> object() const;
+};
+
+class JSTypedArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSTypedArray> object() const;
+
+ bool is_on_heap() const;
+ size_t length_value() const;
+ void* elements_external_pointer() const;
+
+ void Serialize();
+
+ HeapObjectRef buffer() const;
+};
+
class ModuleRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<Module> object() const;
void Serialize();
@@ -465,24 +583,31 @@ class ModuleRef : public HeapObjectRef {
class CellRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<Cell> object() const;
+
+ ObjectRef value() const;
};
class JSGlobalProxyRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
+ Handle<JSGlobalProxy> object() const;
};
class CodeRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
+ Handle<Code> object() const;
};
class InternalizedStringRef : public StringRef {
public:
using StringRef::StringRef;
-};
+ Handle<InternalizedString> object() const;
-class PerIsolateCompilerCache;
+ uint32_t array_index() const;
+ static const uint32_t kNotAnArrayIndex = -1; // 2^32-1 is not a valid index.
+};
class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
public:
@@ -507,9 +632,14 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
// Never returns nullptr.
ObjectData* GetOrCreateData(Handle<Object>);
// Like the previous but wraps argument in handle first (for convenience).
- ObjectData* GetOrCreateData(Object*);
+ ObjectData* GetOrCreateData(Object);
+
+ // Check if {object} is any native context's %ArrayPrototype% or
+ // %ObjectPrototype%.
+ bool IsArrayOrObjectPrototype(const JSObjectRef& object) const;
+
+ std::ostream& Trace() const;
- void Trace(const char* format, ...) const;
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -519,33 +649,41 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
friend class ObjectData;
void SerializeShareableObjects();
+ void CollectArrayAndObjectPrototypes();
Isolate* const isolate_;
Zone* const broker_zone_;
Zone* current_zone_;
base::Optional<NativeContextRef> native_context_;
RefsMap* refs_;
+ ZoneUnorderedSet<Handle<JSObject>, Handle<JSObject>::hash,
+ Handle<JSObject>::equal_to>
+ array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
- unsigned tracing_indentation_ = 0;
+ unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_;
static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
};
-#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
- optionally_something) \
- auto optionally_something_ = optionally_something; \
- if (!optionally_something_) \
- return NoChangeBecauseOfMissingData(js_heap_broker(), __FUNCTION__, \
- __LINE__); \
+#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
+ optionally_something) \
+ auto optionally_something_ = optionally_something; \
+ if (!optionally_something_) \
+ return NoChangeBecauseOfMissingData(broker(), __FUNCTION__, __LINE__); \
something_var = *optionally_something_;
class Reduction;
Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
const char* function, int line);
+#define TRACE_BROKER(broker, x) \
+ do { \
+ if (FLAG_trace_heap_broker) broker->Trace() << x << '\n'; \
+ } while (false)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index ca510d5054..1e701113d2 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -8,6 +8,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/heap/factory-inl.h"
#include "src/objects/map.h"
#include "src/objects/scope-info.h"
@@ -99,6 +100,42 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
NameRef(broker(), p.name());
break;
}
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField: {
+ FieldAccess access = FieldAccessOf(node->op());
+ Handle<Map> map_handle;
+ if (access.map.ToHandle(&map_handle)) {
+ MapRef(broker(), map_handle);
+ }
+ Handle<Name> name_handle;
+ if (access.name.ToHandle(&name_handle)) {
+ NameRef(broker(), name_handle);
+ }
+ break;
+ }
+ case IrOpcode::kMapGuard: {
+ ZoneHandleSet<Map> const maps = MapGuardMapsOf(node->op()).maps();
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
+ }
+ break;
+ }
+ case IrOpcode::kCheckMaps: {
+ ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
+ }
+ break;
+ }
+ case IrOpcode::kCompareMaps: {
+ ZoneHandleSet<Map> const maps =
+ CompareMapsParametersOf(node->op()).maps();
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
+ }
+ break;
+ }
+
default:
break;
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 588626e292..413cd2fc6b 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -23,11 +23,15 @@ namespace compiler {
namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions,
- int functions_size, Handle<SharedFunctionInfo>& shared) {
+ Handle<BytecodeArray>* bytecode, int functions_size,
+ Handle<SharedFunctionInfo>& shared, Isolate* isolate) {
DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) {
functions[0] = Handle<JSFunction>::cast(m.Value());
+ if (functions[0]->shared()->HasBytecodeArray()) {
+ bytecode[0] = handle(functions[0]->shared()->GetBytecodeArray(), isolate);
+ }
return 1;
}
if (m.IsPhi()) {
@@ -37,6 +41,10 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
HeapObjectMatcher m(node->InputAt(n));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
functions[n] = Handle<JSFunction>::cast(m.Value());
+ if (functions[n]->shared()->HasBytecodeArray()) {
+ bytecode[n] =
+ handle(functions[n]->shared()->GetBytecodeArray(), isolate);
+ }
}
return value_input_count;
}
@@ -44,36 +52,19 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
functions[0] = Handle<JSFunction>::null();
shared = p.shared_info();
+ if (shared->HasBytecodeArray()) {
+ bytecode[0] = handle(shared->GetBytecodeArray(), isolate);
+ }
return 1;
}
return 0;
}
-bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
- // Built-in functions are handled by the JSCallReducer.
- if (shared->HasBuiltinFunctionId()) return false;
-
- // Only choose user code for inlining.
- if (!shared->IsUserJavaScript()) return false;
-
- // If there is no bytecode array, it is either not compiled or it is compiled
- // with WebAssembly for the asm.js pipeline. In either case we don't want to
- // inline.
- if (!shared->HasBytecodeArray()) return false;
-
- // Quick check on the size of the bytecode to avoid inlining large functions.
- if (shared->GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
- return false;
- }
-
- return true;
-}
-
-bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) {
+bool IsSmallInlineFunction(Handle<BytecodeArray> bytecode) {
// Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet.
- if (shared->HasBytecodeArray() && shared->GetBytecodeArray()->length() <=
- FLAG_max_inlined_bytecode_size_small) {
+ if (!bytecode.is_null() &&
+ bytecode->length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
@@ -92,8 +83,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
Node* callee = node->InputAt(0);
Candidate candidate;
candidate.node = node;
- candidate.num_functions = CollectFunctions(
- callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info);
+ candidate.num_functions =
+ CollectFunctions(callee, candidate.functions, candidate.bytecode,
+ kMaxCallPolymorphism, candidate.shared_info, isolate());
if (candidate.num_functions == 0) {
return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
@@ -114,7 +106,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
- candidate.can_inline_function[i] = CanInlineFunction(shared);
+ candidate.can_inline_function[i] = shared->IsInlineable();
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
@@ -130,11 +122,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
node->id(), node->op()->mnemonic());
candidate.can_inline_function[i] = false;
}
+ Handle<BytecodeArray> bytecode = candidate.bytecode[i];
if (candidate.can_inline_function[i]) {
can_inline = true;
- candidate.total_size += shared->GetBytecodeArray()->length();
+ candidate.total_size += bytecode->length();
}
- if (!IsSmallInlineFunction(shared)) {
+ if (!IsSmallInlineFunction(bytecode)) {
small_inline = false;
}
}
@@ -604,13 +597,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
if (num_calls == 1) {
- Handle<SharedFunctionInfo> shared =
- candidate.functions[0].is_null()
- ? candidate.shared_info
- : handle(candidate.functions[0]->shared(), isolate());
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += shared->GetBytecodeArray()->length();
+ cumulative_count_ += candidate.bytecode[0]->length();
}
return reduction;
}
@@ -669,7 +658,6 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Inline the individual, cloned call sites.
for (int i = 0; i < num_calls; ++i) {
- Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
if (small_function ||
(candidate.can_inline_function[i] &&
@@ -679,7 +667,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
- cumulative_count_ += function->shared()->GetBytecodeArray()->length();
+ cumulative_count_ += candidate.bytecode[i]->length();
}
}
}
@@ -720,7 +708,7 @@ void JSInliningHeuristic::PrintCandidates() {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
- PrintF(" - size:%d, name: %s\n", shared->GetBytecodeArray()->length(),
+ PrintF(" - size:%d, name: %s\n", candidate.bytecode[i]->length(),
shared->DebugName()->ToCString().get());
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 68919c9aec..48454316bd 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -16,10 +16,11 @@ class JSInliningHeuristic final : public AdvancedReducer {
enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
OptimizedCompilationInfo* info, JSGraph* jsgraph,
+ JSHeapBroker* broker,
SourcePositionTable* source_positions)
: AdvancedReducer(editor),
mode_(mode),
- inliner_(editor, local_zone, info, jsgraph, source_positions),
+ inliner_(editor, local_zone, info, jsgraph, broker, source_positions),
candidates_(local_zone),
seen_(local_zone),
source_positions_(source_positions),
@@ -43,6 +44,9 @@ class JSInliningHeuristic final : public AdvancedReducer {
// In the case of polymorphic inlining, this tells if each of the
// functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism];
+ // Strong references to bytecode to ensure it is not flushed from SFI
+ // while choosing inlining candidates.
+ Handle<BytecodeArray> bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 3c5b1b8046..12ca2d42ff 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -17,6 +17,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/isolate-inl.h"
+#include "src/objects/feedback-cell-inl.h"
#include "src/optimized-compilation-info.h"
#include "src/parsing/parse-info.h"
@@ -288,6 +289,9 @@ bool JSInliner::DetermineCallTarget(
if (match.HasValue() && match.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+ // Don't inline if the function has never run.
+ if (!function->has_feedback_vector()) return false;
+
// Disallow cross native-context inlining for now. This means that all parts
// of the resulting code will operate on the same global object. This also
// prevents cross context leaks, where we could inline functions from a
@@ -296,8 +300,7 @@ bool JSInliner::DetermineCallTarget(
// TODO(turbofan): We might want to revisit this restriction later when we
// have a need for this, and we know how to model different native contexts
// in the same graph in a compositional way.
- if (function->context()->native_context() !=
- info_->context()->native_context()) {
+ if (function->native_context() != info_->native_context()) {
return false;
}
@@ -340,10 +343,7 @@ void JSInliner::DetermineCallContext(
if (match.HasValue() && match.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
-
- // If the target function was never invoked, its feedback cell array might
- // not contain a feedback vector. We ensure at this point that it's created.
- JSFunction::EnsureFeedbackVector(function);
+ CHECK(function->has_feedback_vector());
// The inlinee specializes to the context from the JSFunction object.
context_out = jsgraph()->Constant(handle(function->context(), isolate()));
@@ -376,7 +376,7 @@ Reduction JSInliner::Reduce(Node* node) {
}
Handle<Context> JSInliner::native_context() const {
- return handle(info_->context()->native_context(), isolate());
+ return handle(info_->native_context(), isolate());
}
Reduction JSInliner::ReduceJSCall(Node* node) {
@@ -387,13 +387,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Determine the call target.
if (!DetermineCallTarget(node, shared_info)) return NoChange();
- // Function must be inlineable.
- if (!shared_info->IsInlineable()) {
- TRACE("Not inlining %s into %s because callee is not inlineable\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
+ DCHECK(shared_info->IsInlineable());
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
@@ -453,8 +447,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- if (!shared_info->is_compiled() &&
- !Compiler::Compile(shared_info, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope(shared_info->is_compiled_scope());
+ if (!is_compiled_scope.is_compiled() &&
+ !Compiler::Compile(shared_info, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
TRACE("Not inlining %s into %s because bytecode generation failed\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -469,14 +465,29 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
info_->shared_info()->DebugName()->ToCString().get(),
(exception_target != nullptr) ? " (inside try-block)" : "");
+ // Get the bytecode array.
+ Handle<BytecodeArray> bytecode_array =
+ handle(shared_info->GetBytecodeArray(), isolate());
+
// Determine the targets feedback vector and its context.
Node* context;
Handle<FeedbackVector> feedback_vector;
DetermineCallContext(node, context, feedback_vector);
+ if (FLAG_concurrent_inlining) {
+ SharedFunctionInfoRef sfi(broker(), shared_info);
+ FeedbackVectorRef feedback(broker(), feedback_vector);
+ if (!sfi.IsSerializedForCompilation(feedback)) {
+ TRACE_BROKER(broker(),
+ "Would have missed opportunity to inline a function ("
+ << Brief(*sfi.object()) << " with "
+ << Brief(*feedback.object()) << ")");
+ }
+ }
+
// Remember that we inlined this function.
int inlining_id = info_->AddInlinedFunction(
- shared_info, source_positions_->GetSourcePosition(node));
+ shared_info, bytecode_array, source_positions_->GetSourcePosition(node));
// Create the subgraph for the inlinee.
Node* start;
@@ -490,8 +501,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
CallFrequency frequency = call.frequency();
BytecodeGraphBuilder graph_builder(
- zone(), shared_info, feedback_vector, BailoutId::None(), jsgraph(),
- frequency, source_positions_, native_context(), inlining_id,
+ zone(), bytecode_array, shared_info, feedback_vector, BailoutId::None(),
+ jsgraph(), frequency, source_positions_, native_context(), inlining_id,
flags, false, info_->is_analyze_environment_liveness());
graph_builder.CreateGraph();
@@ -609,7 +620,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (node->opcode() == IrOpcode::kJSCall &&
is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
Node* effect = NodeProperties::GetEffectInput(node);
- if (NodeProperties::CanBePrimitive(isolate(), call.receiver(), effect)) {
+ if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) {
CallParameters const& p = CallParametersOf(node->op());
Node* global_proxy = jsgraph()->HeapConstant(
handle(info_->native_context()->global_proxy(), isolate()));
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index baca345f27..083fa2892b 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -24,11 +24,13 @@ class SourcePositionTable;
class JSInliner final : public AdvancedReducer {
public:
JSInliner(Editor* editor, Zone* local_zone, OptimizedCompilationInfo* info,
- JSGraph* jsgraph, SourcePositionTable* source_positions)
+ JSGraph* jsgraph, JSHeapBroker* broker,
+ SourcePositionTable* source_positions)
: AdvancedReducer(editor),
local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
+ broker_(broker),
source_positions_(source_positions) {}
const char* reducer_name() const override { return "JSInliner"; }
@@ -47,12 +49,15 @@ class JSInliner final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ // TODO(neis): Make heap broker a component of JSGraph?
+ JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const { return jsgraph_->isolate(); }
Handle<Context> native_context() const;
Zone* const local_zone_;
OptimizedCompilationInfo* info_;
JSGraph* const jsgraph_;
+ JSHeapBroker* const broker_;
SourcePositionTable* const source_positions_;
bool DetermineCallTarget(Node* node,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index b132cfa6e9..0ba3e7dfda 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -38,6 +38,20 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGeneratorClose(node);
case Runtime::kInlineCreateJSGeneratorObject:
return ReduceCreateJSGeneratorObject(node);
+ case Runtime::kInlineAsyncFunctionAwaitCaught:
+ return ReduceAsyncFunctionAwaitCaught(node);
+ case Runtime::kInlineAsyncFunctionAwaitUncaught:
+ return ReduceAsyncFunctionAwaitUncaught(node);
+ case Runtime::kInlineAsyncFunctionEnter:
+ return ReduceAsyncFunctionEnter(node);
+ case Runtime::kInlineAsyncFunctionReject:
+ return ReduceAsyncFunctionReject(node);
+ case Runtime::kInlineAsyncFunctionResolve:
+ return ReduceAsyncFunctionResolve(node);
+ case Runtime::kInlineAsyncGeneratorAwaitCaught:
+ return ReduceAsyncGeneratorAwaitCaught(node);
+ case Runtime::kInlineAsyncGeneratorAwaitUncaught:
+ return ReduceAsyncGeneratorAwaitUncaught(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -54,10 +68,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
- case Runtime::kInlineRejectPromise:
- return ReduceRejectPromise(node);
- case Runtime::kInlineResolvePromise:
- return ReduceResolvePromise(node);
case Runtime::kInlineToLength:
return ReduceToLength(node);
case Runtime::kInlineToObject:
@@ -127,6 +137,50 @@ Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
return Change(node, op, generator, closed, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitCaught), 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitUncaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionEnter(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->AsyncFunctionEnter());
+ return Changed(node);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionReject(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->AsyncFunctionReject());
+ return Changed(node);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionResolve(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->AsyncFunctionResolve());
+ return Changed(node);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitCaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitUncaught),
+ 0);
+}
+
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
return Change(
node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
@@ -206,18 +260,6 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
return Change(node, simplified()->ObjectIsSmi());
}
-Reduction JSIntrinsicLowering::ReduceRejectPromise(Node* node) {
- RelaxControls(node);
- NodeProperties::ChangeOp(node, javascript()->RejectPromise());
- return Changed(node);
-}
-
-Reduction JSIntrinsicLowering::ReduceResolvePromise(Node* node) {
- RelaxControls(node);
- NodeProperties::ChangeOp(node, javascript()->ResolvePromise());
- return Changed(node);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index f71af1156c..7313264c08 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -43,16 +43,20 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
+ Reduction ReduceAsyncFunctionAwaitCaught(Node* node);
+ Reduction ReduceAsyncFunctionAwaitUncaught(Node* node);
+ Reduction ReduceAsyncFunctionEnter(Node* node);
+ Reduction ReduceAsyncFunctionReject(Node* node);
+ Reduction ReduceAsyncFunctionResolve(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitCaught(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitUncaught(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceAsyncGeneratorYield(Node* node);
- Reduction ReduceGeneratorSaveInputForAwait(Node* node);
Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
- Reduction ReduceRejectPromise(Node* node);
- Reduction ReduceResolvePromise(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index d449e72367..c78970f0c9 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -21,6 +21,7 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/templates.h"
@@ -39,40 +40,35 @@ namespace compiler {
namespace {
-bool HasNumberMaps(MapHandles const& maps) {
+bool HasNumberMaps(JSHeapBroker* broker, MapHandles const& maps) {
for (auto map : maps) {
- if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
+ MapRef map_ref(broker, map);
+ if (map_ref.IsHeapNumberMap()) return true;
}
return false;
}
-bool HasOnlyJSArrayMaps(MapHandles const& maps) {
+bool HasOnlyJSArrayMaps(JSHeapBroker* broker, MapHandles const& maps) {
for (auto map : maps) {
- if (!map->IsJSArrayMap()) return false;
+ MapRef map_ref(broker, map);
+ if (!map_ref.IsJSArrayMap()) return false;
}
return true;
}
} // namespace
-struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
- Handle<Context> context;
- bool immutable;
- int index;
-};
-
JSNativeContextSpecialization::JSNativeContextSpecialization(
- Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Flags flags,
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
Handle<Context> native_context, CompilationDependencies* dependencies,
Zone* zone, Zone* shared_zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
+ broker_(broker),
flags_(flags),
global_object_(native_context->global_object(), jsgraph->isolate()),
global_proxy_(JSGlobalProxy::cast(native_context->global_proxy()),
jsgraph->isolate()),
- native_context_(js_heap_broker, native_context),
dependencies_(dependencies),
zone_(zone),
shared_zone_(shared_zone),
@@ -82,6 +78,12 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
+ case IrOpcode::kJSAsyncFunctionEnter:
+ return ReduceJSAsyncFunctionEnter(node);
+ case IrOpcode::kJSAsyncFunctionReject:
+ return ReduceJSAsyncFunctionReject(node);
+ case IrOpcode::kJSAsyncFunctionResolve:
+ return ReduceJSAsyncFunctionResolve(node);
case IrOpcode::kJSGetSuperConstructor:
return ReduceJSGetSuperConstructor(node);
case IrOpcode::kJSInstanceOf:
@@ -153,7 +155,7 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
Reduction reduction;
HeapObjectMatcher matcher(input);
- if (matcher.HasValue() && matcher.Ref(js_heap_broker()).IsString()) {
+ if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
reduction = Changed(input); // JSToString(x:string) => x
ReplaceWithValue(node, reduction.replacement());
return reduction;
@@ -186,10 +188,10 @@ JSNativeContextSpecialization::CreateDelayedStringConstant(Node* node) {
return new (shared_zone()) NumberToStringConstant(number_matcher.Value());
} else {
HeapObjectMatcher matcher(node);
- if (matcher.HasValue() && matcher.Ref(js_heap_broker()).IsString()) {
- StringRef s = matcher.Ref(js_heap_broker()).AsString();
+ if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
+ StringRef s = matcher.Ref(broker()).AsString();
return new (shared_zone())
- StringLiteral(s.object<String>(), static_cast<size_t>(s.length()));
+ StringLiteral(s.object(), static_cast<size_t>(s.length()));
} else {
UNREACHABLE();
}
@@ -208,6 +210,114 @@ bool IsStringConstant(JSHeapBroker* broker, Node* node) {
}
}
+Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSAsyncFunctionEnter, node->opcode());
+ Node* closure = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
+
+ // Create the promise for the async function.
+ Node* promise = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ // Create the JSAsyncFunctionObject based on the SharedFunctionInfo
+ // extracted from the top-most frame in {frame_state}.
+ Handle<SharedFunctionInfo> shared =
+ FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked();
+ DCHECK(shared->is_compiled());
+ int register_count = shared->internal_formal_parameter_count() +
+ shared->GetBytecodeArray()->register_count();
+ Node* value = effect =
+ graph()->NewNode(javascript()->CreateAsyncFunctionObject(register_count),
+ closure, receiver, promise, context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionReject(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSAsyncFunctionReject, node->opcode());
+ Node* async_function_object = NodeProperties::GetValueInput(node, 0);
+ Node* reason = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
+
+ // Load the promise from the {async_function_object}.
+ Node* promise = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSAsyncFunctionObjectPromise()),
+ async_function_object, effect, control);
+
+ // Create a nested frame state inside the current method's most-recent
+ // {frame_state} that will ensure that lazy deoptimizations at this
+ // point will still return the {promise} instead of the result of the
+ // JSRejectPromise operation (which yields undefined).
+ Node* parameters[] = {promise};
+ frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kAsyncFunctionLazyDeoptContinuation, context,
+ parameters, arraysize(parameters), frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // Disable the additional debug event for the rejection since a
+ // debug event already happend for the exception that got us here.
+ Node* debug_event = jsgraph()->FalseConstant();
+ effect = graph()->NewNode(javascript()->RejectPromise(), promise, reason,
+ debug_event, context, frame_state, effect, control);
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSAsyncFunctionResolve, node->opcode());
+ Node* async_function_object = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
+
+ // Load the promise from the {async_function_object}.
+ Node* promise = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSAsyncFunctionObjectPromise()),
+ async_function_object, effect, control);
+
+ // Create a nested frame state inside the current method's most-recent
+ // {frame_state} that will ensure that lazy deoptimizations at this
+ // point will still return the {promise} instead of the result of the
+ // JSResolvePromise operation (which yields undefined).
+ Node* parameters[] = {promise};
+ frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kAsyncFunctionLazyDeoptContinuation, context,
+ parameters, arraysize(parameters), frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ effect = graph()->NewNode(javascript()->ResolvePromise(), promise, value,
+ context, frame_state, effect, control);
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
// TODO(turbofan): This has to run together with the inlining and
// native context specialization to be able to leverage the string
@@ -218,8 +328,8 @@ Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
Node* const lhs = node->InputAt(0);
Node* const rhs = node->InputAt(1);
- base::Optional<size_t> lhs_len = GetMaxStringLength(js_heap_broker(), lhs);
- base::Optional<size_t> rhs_len = GetMaxStringLength(js_heap_broker(), rhs);
+ base::Optional<size_t> lhs_len = GetMaxStringLength(broker(), lhs);
+ base::Optional<size_t> rhs_len = GetMaxStringLength(broker(), rhs);
if (!lhs_len || !rhs_len) {
return NoChange();
}
@@ -227,8 +337,7 @@ Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
// Fold into DelayedStringConstant if at least one of the parameters is a
// string constant and the addition won't throw due to too long result.
if (*lhs_len + *rhs_len <= String::kMaxLength &&
- (IsStringConstant(js_heap_broker(), lhs) ||
- IsStringConstant(js_heap_broker(), rhs))) {
+ (IsStringConstant(broker(), lhs) || IsStringConstant(broker(), rhs))) {
const StringConstantBase* left = CreateDelayedStringConstant(lhs);
const StringConstantBase* right = CreateDelayedStringConstant(rhs);
const StringConstantBase* cons =
@@ -250,8 +359,10 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
if (!m.HasValue()) return NoChange();
- JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
+ // TODO(neis): Remove SerializePrototype call once brokerization is complete.
+ function_map.SerializePrototype();
ObjectRef function_prototype = function_map.prototype();
// We can constant-fold the super constructor access if the
@@ -292,60 +403,53 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
Handle<Map> receiver_map(receiver->map(), isolate());
- // Compute property access info for @@hasInstance on {receiver}.
+ // Compute property access info for @@hasInstance on the constructor.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context().object<Context>(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
&access_info)) {
return NoChange();
}
+ DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ DCHECK_EQ(access_info.receiver_maps()[0].address(), receiver_map.address());
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
- // takes over, but that requires the {receiver} to be callable.
- if (receiver->IsCallable()) {
- // Determine actual holder and perform prototype chain checks.
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
- dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context().object<Context>(),
- access_info.receiver_maps(), holder);
- }
-
- // Check that {constructor} is actually {receiver}.
- constructor = access_builder.BuildCheckValue(constructor, &effect,
- control, receiver);
+ // takes over, but that requires the constructor to be callable.
+ if (!receiver_map->is_callable()) return NoChange();
- // Monomorphic property access.
- access_builder.BuildCheckMaps(constructor, &effect, control,
- access_info.receiver_maps());
-
- // Lower to OrdinaryHasInstance(C, O).
- NodeProperties::ReplaceValueInput(node, constructor, 0);
- NodeProperties::ReplaceValueInput(node, object, 1);
- NodeProperties::ReplaceEffectInput(node, effect);
- NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
- return reduction.Changed() ? reduction : Changed(node);
- }
- } else if (access_info.IsDataConstant() ||
- access_info.IsDataConstantField()) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context().object<Context>(),
- access_info.receiver_maps(), holder);
- } else {
- holder = receiver;
+ broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
+ // Monomorphic property access.
+ constructor =
+ access_builder.BuildCheckHeapObject(constructor, &effect, control);
+ access_builder.BuildCheckMaps(constructor, &effect, control,
+ access_info.receiver_maps());
+
+ // Lower to OrdinaryHasInstance(C, O).
+ NodeProperties::ReplaceValueInput(node, constructor, 0);
+ NodeProperties::ReplaceValueInput(node, object, 1);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+ Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ if (access_info.IsDataConstant() || access_info.IsDataConstantField()) {
+ // Determine actual holder.
+ Handle<JSObject> holder;
+ bool found_on_proto = access_info.holder().ToHandle(&holder);
+ if (!found_on_proto) holder = receiver;
+
Handle<Object> constant;
if (access_info.IsDataConstant()) {
DCHECK(!FLAG_track_constant_fields);
@@ -353,12 +457,30 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
} else {
DCHECK(FLAG_track_constant_fields);
DCHECK(access_info.IsDataConstantField());
- // The value must be callable therefore tagged.
- DCHECK(CanBeTaggedPointer(access_info.field_representation()));
FieldIndex field_index = access_info.field_index();
constant = JSObject::FastPropertyAt(holder, Representation::Tagged(),
field_index);
+ if (!constant->IsCallable()) {
+ return NoChange();
+ }
+
+ // Install dependency on constness. Unfortunately, access_info does not
+ // track descriptor index, so we have to search for it.
+ MapRef holder_map(broker(), handle(holder->map(), isolate()));
+ Handle<DescriptorArray> descriptors(
+ holder_map.object()->instance_descriptors(), isolate());
+ int descriptor_index = descriptors->Search(
+ *(factory()->has_instance_symbol()), *(holder_map.object()));
+ CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
+ holder_map.SerializeOwnDescriptors();
+ dependencies()->DependOnFieldType(holder_map, descriptor_index);
}
+
+ if (found_on_proto) {
+ dependencies()->DependOnStablePrototypeChains(
+ broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ }
+
DCHECK(constant->IsCallable());
// Check that {constructor} is actually {receiver}.
@@ -409,7 +531,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
@@ -507,7 +629,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Optimize if we currently know the "prototype" property.
if (m.Value()->IsJSFunction()) {
- JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
// TODO(neis): This is a temporary hack needed because the copy reducer
// runs only after this pass.
function.Serialize();
@@ -541,18 +663,21 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) {
+ return NoChange();
+ }
+
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
if (!m.HasValue() ||
- !m.Ref(js_heap_broker())
- .equals(js_heap_broker()->native_context().promise_function())) {
+ !m.Ref(broker()).equals(broker()->native_context().promise_function())) {
return NoChange();
}
// Check if we know something about the {value}.
ZoneHandleSet<Map> value_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), value, effect, &value_maps);
+ NodeProperties::InferReceiverMaps(broker(), value, effect, &value_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, value_maps.size());
@@ -561,6 +686,10 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
if (value_map->IsJSPromiseMap()) return NoChange();
}
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->DependOnProtector(
+ PropertyCellRef(broker(), factory()->promise_hook_protector()));
+
// Create a %Promise% instance and resolve it with {value}.
Node* promise = effect =
graph()->NewNode(javascript()->CreatePromise(), context, effect);
@@ -582,16 +711,23 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// Check if we know something about the {resolution}.
ZoneHandleSet<Map> resolution_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), resolution, effect,
+ NodeProperties::InferReceiverMaps(broker(), resolution, effect,
&resolution_maps);
- if (result != NodeProperties::kReliableReceiverMaps) return NoChange();
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
DCHECK_NE(0, resolution_maps.size());
+ // When the {resolution_maps} information is unreliable, we can
+ // still optimize if all individual {resolution_maps} are stable.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ for (Handle<Map> resolution_map : resolution_maps) {
+ if (!resolution_map->is_stable()) return NoChange();
+ }
+ }
+
// Compute property access info for "then" on {resolution}.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context().object<Context>(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
MapHandles(resolution_maps.begin(), resolution_maps.end()),
factory()->then_string(), AccessMode::kLoad, &access_info)) {
@@ -601,15 +737,20 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
// We can further optimize the case where {resolution}
// definitely doesn't have a "then" property.
if (!access_info.IsNotFound()) return NoChange();
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
// Add proper dependencies on the {resolution}s [[Prototype]]s.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context().object<Context>(),
- access_info.receiver_maps(), holder);
+ broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
+ }
+
+ // Add stability dependencies on the {resolution_maps}.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ for (Handle<Map> resolution_map : resolution_maps) {
+ dependencies()->DependOnStableMap(MapRef(broker(), resolution_map));
+ }
}
// Simply fulfill the {promise} with the {resolution}.
@@ -716,13 +857,15 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
if (property_details.cell_type() != PropertyCellType::kMutable ||
property_details.IsConfigurable()) {
dependencies()->DependOnGlobalProperty(
- PropertyCellRef(js_heap_broker(), property_cell));
+ PropertyCellRef(broker(), property_cell));
}
// Load from constant/undefined global property can be constant-folded.
if (property_details.cell_type() == PropertyCellType::kConstant ||
property_details.cell_type() == PropertyCellType::kUndefined) {
value = jsgraph()->Constant(property_cell_value);
+ CHECK(
+ !property_cell_value.is_identical_to(factory()->the_hole_value()));
} else {
// Load from constant type cell can benefit from type feedback.
MaybeHandle<Map> map;
@@ -738,9 +881,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
MapRef property_cell_value_map(
- js_heap_broker(),
- handle(HeapObject::cast(*property_cell_value)->map(),
- isolate()));
+ broker(), handle(HeapObject::cast(*property_cell_value)->map(),
+ isolate()));
property_cell_value_type = Type::For(property_cell_value_map);
representation = MachineRepresentation::kTaggedPointer;
@@ -749,7 +891,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// mutated without the cell state being updated.
if (property_cell_value_map.is_stable()) {
dependencies()->DependOnStableMap(property_cell_value_map);
- map = property_cell_value_map.object<Map>();
+ map = property_cell_value_map.object();
}
}
}
@@ -771,7 +913,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
dependencies()->DependOnGlobalProperty(
- PropertyCellRef(js_heap_broker(), property_cell));
+ PropertyCellRef(broker(), property_cell));
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value,
jsgraph()->Constant(property_cell_value));
@@ -785,7 +927,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// values' type doesn't match the type of the previous value in the
// cell.
dependencies()->DependOnGlobalProperty(
- PropertyCellRef(js_heap_broker(), property_cell));
+ PropertyCellRef(broker(), property_cell));
Type property_cell_value_type;
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_cell_value->IsHeapObject()) {
@@ -795,7 +937,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
DCHECK(property_cell_value_map->is_stable());
dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), property_cell_value_map));
+ MapRef(broker(), property_cell_value_map));
// Check that the {value} is a HeapObject.
value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
@@ -827,7 +969,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Record a code dependency on the cell, and just deoptimize if the
// property ever becomes read-only.
dependencies()->DependOnGlobalProperty(
- PropertyCellRef(js_heap_broker(), property_cell));
+ PropertyCellRef(broker(), property_cell));
effect = graph()->NewNode(
simplified()->StoreField(ForPropertyCellValue(
MachineRepresentation::kTagged, Type::NonInternal(),
@@ -844,7 +986,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
- NameRef name(js_heap_broker(), LoadGlobalParametersOf(node->op()).name());
+ NameRef name(broker(), LoadGlobalParametersOf(node->op()).name());
Node* effect = NodeProperties::GetEffectInput(node);
// Try to lookup the name on the script context table first (lexical scoping).
@@ -865,13 +1007,13 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
}
// Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, nullptr, name.object<Name>(),
+ return ReduceGlobalAccess(node, nullptr, nullptr, name.object(),
AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
- NameRef name(js_heap_broker(), StoreGlobalParametersOf(node->op()).name());
+ NameRef name(broker(), StoreGlobalParametersOf(node->op()).name());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -894,7 +1036,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
// Lookup the {name} on the global object instead.
- return ReduceGlobalAccess(node, nullptr, value, name.object<Name>(),
+ return ReduceGlobalAccess(node, nullptr, value, name.object(),
AccessMode::kStore);
}
@@ -918,11 +1060,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (receiver_maps.size() == 1) {
Handle<Map> receiver_map = receiver_maps.front();
if (receiver_map->IsJSGlobalProxyMap()) {
- Object* maybe_constructor = receiver_map->GetConstructor();
+ Object maybe_constructor = receiver_map->GetConstructor();
// Detached global proxies have |null| as their constructor.
if (maybe_constructor->IsJSFunction() &&
JSFunction::cast(maybe_constructor)->native_context() ==
- *native_context().object<Context>()) {
+ *native_context().object()) {
return ReduceGlobalAccess(node, receiver, value, name, access_mode,
index);
}
@@ -930,9 +1072,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Compute property access infos for the receiver maps.
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context().object<Context>(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
ZoneVector<PropertyAccessInfo> access_infos(zone());
if (!access_info_factory.ComputePropertyAccessInfos(
receiver_maps, name, access_mode, &access_infos)) {
@@ -958,8 +1099,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if_exceptions = &if_exception_nodes;
}
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
@@ -970,7 +1110,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
&receiver, &effect, control) &&
!access_builder.TryBuildNumberCheck(access_info.receiver_maps(),
&receiver, &effect, control)) {
- if (HasNumberMaps(access_info.receiver_maps())) {
+ if (HasNumberMaps(broker(), access_info.receiver_maps())) {
// We need to also let Smi {receiver}s through in this case, so
// we construct a diamond, guarded by the Sminess of the {receiver}
// and if {receiver} is not a Smi just emit a sequence of map checks.
@@ -1015,7 +1155,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Check if {receiver} may be a number.
bool receiverissmi_possible = false;
for (PropertyAccessInfo const& access_info : access_infos) {
- if (HasNumberMaps(access_info.receiver_maps())) {
+ if (HasNumberMaps(broker(), access_info.receiver_maps())) {
receiverissmi_possible = true;
break;
}
@@ -1079,7 +1219,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// The Number case requires special treatment to also deal with Smis.
- if (HasNumberMaps(receiver_maps)) {
+ if (HasNumberMaps(broker(), receiver_maps)) {
// Join this check with the "receiver is smi" check above.
DCHECK_NOT_NULL(receiverissmi_effect);
DCHECK_NOT_NULL(receiverissmi_control);
@@ -1206,15 +1346,16 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const receiver = NodeProperties::GetValueInput(node, 0);
- Node* const value = jsgraph()->Dead();
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- if (m.Value()->IsJSFunction() &&
- p.name().is_identical_to(factory()->prototype_string())) {
+ ObjectRef object = m.Ref(broker());
+ NameRef name(broker(), p.name());
+ if (object.IsJSFunction() &&
+ name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
// Optimize "prototype" property of functions.
- JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ JSFunctionRef function = object.AsJSFunction();
// TODO(neis): This is a temporary hack needed because the copy reducer
// runs only after this pass.
function.Serialize();
@@ -1228,11 +1369,10 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
Node* value = jsgraph()->Constant(prototype);
ReplaceWithValue(node, value);
return Replace(value);
- } else if (m.Value()->IsString() &&
- p.name().is_identical_to(factory()->length_string())) {
+ } else if (object.IsString() &&
+ name.equals(ObjectRef(broker(), factory()->length_string()))) {
// Constant-fold "length" property on constant strings.
- Handle<String> string = Handle<String>::cast(m.Value());
- Node* value = jsgraph()->Constant(string->length());
+ Node* value = jsgraph()->Constant(object.AsString().length());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -1243,7 +1383,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+ return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, p.name(),
AccessMode::kLoad);
}
@@ -1307,9 +1447,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
} else {
// Retrieve the native context from the given {node}.
// Compute element access infos for the receiver maps.
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context().object<Context>(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(
receiver_maps, access_mode, &access_infos)) {
@@ -1333,7 +1472,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ZoneVector<Handle<Map>> prototype_maps(zone());
for (ElementAccessInfo const& access_info : access_infos) {
for (Handle<Map> receiver_map : access_info.receiver_maps()) {
- // If the {receiver_map} has a prototype and it's elements backing
+ // If the {receiver_map} has a prototype and its elements backing
// store is either holey, or we have a potentially growing store,
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
@@ -1356,14 +1495,12 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Install dependencies on the relevant prototype maps.
for (Handle<Map> prototype_map : prototype_maps) {
- dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), prototype_map));
+ dependencies()->DependOnStableMap(MapRef(broker(), prototype_map));
}
}
// Ensure that {receiver} is a heap object.
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
@@ -1512,77 +1649,83 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Optimize the case where we load from a constant {receiver}.
if (access_mode == AccessMode::kLoad) {
HeapObjectMatcher mreceiver(receiver);
- if (mreceiver.HasValue() && !mreceiver.Value()->IsTheHole(isolate()) &&
- !mreceiver.Value()->IsNullOrUndefined(isolate())) {
- // Check whether we're accessing a known element on the {receiver}
- // that is non-configurable, non-writable (i.e. the {receiver} was
- // frozen using Object.freeze).
- NumberMatcher mindex(index);
- if (mindex.IsInteger() && mindex.IsInRange(0.0, kMaxUInt32 - 1.0)) {
- LookupIterator it(isolate(), mreceiver.Value(),
- static_cast<uint32_t>(mindex.Value()),
- LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA) {
- if (it.IsReadOnly() && !it.IsConfigurable()) {
- // We can safely constant-fold the {index} access to {receiver},
- // since the element is non-configurable, non-writable and thus
- // cannot change anymore.
- value = jsgraph()->Constant(it.GetDataValue());
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- // Check if the {receiver} is a known constant with a copy-on-write
- // backing store, and whether {index} is within the appropriate
- // bounds. In that case we can constant-fold the access and only
- // check that the {elements} didn't change. This is sufficient as
- // the backing store of a copy-on-write JSArray is defensively copied
- // whenever the length or the elements (might) change.
- //
- // What's interesting here is that we don't need to map check the
- // {receiver}, since JSArray's will always have their elements in
- // the backing store.
- if (mreceiver.Value()->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(mreceiver.Value());
- if (array->elements()->IsCowArray()) {
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- Handle<FixedArray> array_elements(
- FixedArray::cast(array->elements()), isolate());
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), elements,
- jsgraph()->HeapConstant(array_elements));
- effect = graph()->NewNode(
- simplified()->CheckIf(
- DeoptimizeReason::kCowArrayElementsChanged),
- check, effect, control);
+ if (mreceiver.HasValue()) {
+ HeapObjectRef receiver_ref = mreceiver.Ref(broker()).AsHeapObject();
+ if (receiver_ref.map().oddball_type() != OddballType::kHole &&
+ receiver_ref.map().oddball_type() != OddballType::kNull &&
+ receiver_ref.map().oddball_type() != OddballType::kUndefined) {
+ // Check whether we're accessing a known element on the {receiver}
+ // that is non-configurable, non-writable (i.e. the {receiver} was
+ // frozen using Object.freeze).
+ NumberMatcher mindex(index);
+ if (mindex.IsInteger() && mindex.IsInRange(0.0, kMaxUInt32 - 1.0)) {
+ LookupIterator it(isolate(), receiver_ref.object(),
+ static_cast<uint32_t>(mindex.Value()),
+ LookupIterator::OWN);
+ if (it.state() == LookupIterator::DATA) {
+ if (it.IsReadOnly() && !it.IsConfigurable()) {
+ // We can safely constant-fold the {index} access to {receiver},
+ // since the element is non-configurable, non-writable and thus
+ // cannot change anymore.
value = jsgraph()->Constant(it.GetDataValue());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
+
+ // Check if the {receiver} is a known constant with a copy-on-write
+ // backing store, and whether {index} is within the appropriate
+ // bounds. In that case we can constant-fold the access and only
+ // check that the {elements} didn't change. This is sufficient as
+ // the backing store of a copy-on-write JSArray is defensively
+ // copied whenever the length or the elements (might) change.
+ //
+ // What's interesting here is that we don't need to map check the
+ // {receiver}, since JSArray's will always have their elements in
+ // the backing store.
+ if (receiver_ref.IsJSArray()) {
+ Handle<JSArray> array = receiver_ref.AsJSArray().object();
+ if (array->elements()->IsCowArray()) {
+ Node* elements = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Handle<FixedArray> array_elements(
+ FixedArray::cast(array->elements()), isolate());
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), elements,
+ jsgraph()->HeapConstant(array_elements));
+ effect = graph()->NewNode(
+ simplified()->CheckIf(
+ DeoptimizeReason::kCowArrayElementsChanged),
+ check, effect, control);
+ value = jsgraph()->Constant(it.GetDataValue());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
}
}
- }
- // For constant Strings we can eagerly strength-reduce the keyed
- // accesses using the known length, which doesn't change.
- if (mreceiver.Value()->IsString()) {
- Handle<String> string = Handle<String>::cast(mreceiver.Value());
-
- // We can only assume that the {index} is a valid array index if the IC
- // is in element access mode and not MEGAMORPHIC, otherwise there's no
- // guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Ensure that {index} is less than {receiver} length.
- Node* length = jsgraph()->Constant(string->length());
-
- // Load the single character string from {receiver} or yield undefined
- // if the {index} is out of bounds (depending on the {load_mode}).
- value = BuildIndexedStringLoad(receiver, index, length, &effect,
- &control, load_mode);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ // For constant Strings we can eagerly strength-reduce the keyed
+ // accesses using the known length, which doesn't change.
+ if (receiver_ref.IsString()) {
+ // We can only assume that the {index} is a valid array index if the
+ // IC is in element access mode and not MEGAMORPHIC, otherwise there's
+ // no guard for the bounds check below.
+ if (nexus.ic_state() != MEGAMORPHIC &&
+ nexus.GetKeyType() == ELEMENT) {
+ // Ensure that {index} is less than {receiver} length.
+ Node* length =
+ jsgraph()->Constant(receiver_ref.AsString().length());
+
+ // Load the single character string from {receiver} or yield
+ // undefined if the {index} is out of bounds (depending on the
+ // {load_mode}).
+ value = BuildIndexedStringLoad(receiver, index, length, &effect,
+ &control, load_mode);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
}
}
}
@@ -1604,26 +1747,26 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Optimize access for constant {index}.
HeapObjectMatcher mindex(index);
- if (mindex.HasValue() && mindex.Value()->IsPrimitive()) {
- // Keyed access requires a ToPropertyKey on the {index} first before
- // looking up the property on the object (see ES6 section 12.3.2.1).
- // We can only do this for non-observable ToPropertyKey invocations,
- // so we limit the constant indices to primitives at this point.
- Handle<Name> name;
- if (Object::ToName(isolate(), mindex.Value()).ToHandle(&name)) {
- uint32_t array_index;
- if (name->AsArrayIndex(&array_index)) {
- // Use the constant array index.
+ if (mindex.HasValue()) {
+ ObjectRef name = mindex.Ref(broker());
+ if (name.IsSymbol()) {
+ return ReduceNamedAccess(node, value, receiver_maps,
+ name.AsName().object(), access_mode);
+ }
+ if (name.IsInternalizedString()) {
+ uint32_t array_index = name.AsInternalizedString().array_index();
+ if (array_index != InternalizedStringRef::kNotAnArrayIndex) {
index = jsgraph()->Constant(static_cast<double>(array_index));
} else {
- name = factory()->InternalizeName(name);
- return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
+ return ReduceNamedAccess(node, value, receiver_maps,
+ name.AsName().object(), access_mode);
}
}
}
// Check if we have feedback for a named access.
- if (Name* name = nexus.FindFirstName()) {
+ Name name = nexus.FindFirstName();
+ if (!name.is_null()) {
return ReduceNamedAccess(node, value, receiver_maps,
handle(name, isolate()), access_mode, index);
} else if (nexus.GetKeyType() != ELEMENT) {
@@ -1885,7 +2028,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
- Callable call_api_callback = CodeFactory::CallApiCallback(isolate(), argc);
+ Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor call_interface_descriptor =
call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -1903,16 +2046,17 @@ Node* JSNativeContextSpecialization::InlineApiCall(
// Add CallApiCallbackStub's register argument as well.
Node* context = jsgraph()->Constant(native_context());
- Node* inputs[10] = {code, context, data, holder, function_reference,
- receiver};
- int index = 6 + argc;
+ Node* inputs[11] = {
+ code, context, function_reference, jsgraph()->Constant(argc), data,
+ holder, receiver};
+ int index = 7 + argc;
inputs[index++] = frame_state;
inputs[index++] = *effect;
inputs[index++] = *control;
// This needs to stay here because of the edge case described in
// http://crbug.com/675648.
if (value != nullptr) {
- inputs[6] = value;
+ inputs[7] = value;
}
return *effect = *control =
@@ -1926,12 +2070,10 @@ JSNativeContextSpecialization::BuildPropertyLoad(
PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context().object<Context>(),
- access_info.receiver_maps(), holder);
+ broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
// Generate the actual property access.
@@ -1986,13 +2128,11 @@ JSNativeContextSpecialization::BuildPropertyStore(
PropertyAccessInfo const& access_info, AccessMode access_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_info.holder().ToHandle(&holder)) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
- js_heap_broker(), native_context().object<Context>(),
- access_info.receiver_maps(), holder);
+ broker(), access_info.receiver_maps(), JSObjectRef(broker(), holder));
}
DCHECK(!access_info.IsNotFound());
@@ -2152,7 +2292,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
// Reallocate the properties {storage}.
storage = effect = BuildExtendPropertiesBackingStore(
- original_map, storage, effect, control);
+ MapRef(broker(), original_map), storage, effect, control);
// Perform the actual store.
effect = graph()->NewNode(simplified()->StoreField(field_access),
@@ -2201,8 +2341,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
- Map* map = nexus.FindFirstMap();
- if (map == nullptr) {
+ Map map = nexus.FindFirstMap();
+ if (map.is_null()) {
// Maps are weakly held in the type feedback vector, we may not have one.
return NoChange();
}
@@ -2211,14 +2351,13 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!Map::TryUpdate(isolate(), receiver_map).ToHandle(&receiver_map))
return NoChange();
- Handle<Name> cached_name =
- handle(Name::cast(nexus.GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
- isolate());
+ Handle<Name> cached_name(
+ Name::cast(nexus.GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
+ isolate());
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(js_heap_broker(), dependencies(),
- native_context().object<Context>(),
- graph()->zone());
+ AccessInfoFactory access_info_factory(
+ broker(), dependencies(), native_context().object(), graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, cached_name, AccessMode::kStoreInLiteral,
&access_info)) {
@@ -2230,8 +2369,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* control = NodeProperties::GetControlInput(node);
// Monomorphic property access.
- PropertyAccessBuilder access_builder(jsgraph(), js_heap_broker(),
- dependencies());
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
access_builder.BuildCheckMaps(receiver, &effect, control,
access_info.receiver_maps());
@@ -2305,7 +2443,7 @@ Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect,
&receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
@@ -2332,12 +2470,14 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
UNREACHABLE();
}
-MaybeHandle<JSTypedArray> GetTypedArrayConstant(Node* receiver) {
+base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
+ Node* receiver) {
HeapObjectMatcher m(receiver);
- if (!m.HasValue()) return MaybeHandle<JSTypedArray>();
- if (!m.Value()->IsJSTypedArray()) return MaybeHandle<JSTypedArray>();
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
- if (typed_array->is_on_heap()) return MaybeHandle<JSTypedArray>();
+ if (!m.HasValue()) return base::nullopt;
+ ObjectRef object = m.Ref(broker);
+ if (!object.IsJSTypedArray()) return base::nullopt;
+ JSTypedArrayRef typed_array = object.AsJSTypedArray();
+ if (typed_array.is_on_heap()) return base::nullopt;
return typed_array;
}
@@ -2362,19 +2502,20 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check if we can constant-fold information about the {receiver} (i.e.
// for asm.js-like code patterns).
- Handle<JSTypedArray> typed_array;
- if (GetTypedArrayConstant(receiver).ToHandle(&typed_array)) {
- buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
+ base::Optional<JSTypedArrayRef> typed_array =
+ GetTypedArrayConstant(broker(), receiver);
+ if (typed_array.has_value()) {
+ typed_array->Serialize();
+ buffer = jsgraph()->Constant(typed_array->buffer());
length =
jsgraph()->Constant(static_cast<double>(typed_array->length_value()));
// Load the (known) base and external pointer for the {receiver}. The
- // {external_pointer} might be invalid if the {buffer} was neutered, so
+ // {external_pointer} might be invalid if the {buffer} was detached, so
// we need to make sure that any access is properly guarded.
base_pointer = jsgraph()->ZeroConstant();
- external_pointer = jsgraph()->PointerConstant(
- FixedTypedArrayBase::cast(typed_array->elements())
- ->external_pointer());
+ external_pointer =
+ jsgraph()->PointerConstant(typed_array->elements_external_pointer());
} else {
// Load the {receiver}s length.
length = effect = graph()->NewNode(
@@ -2412,15 +2553,15 @@ JSNativeContextSpecialization::BuildElementAccess(
elements, effect, control);
}
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // See if we can skip the detaching check.
+ if (isolate()->IsArrayBufferDetachingIntact()) {
// Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
+ // gets detached.
dependencies()->DependOnProtector(PropertyCellRef(
- js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ broker(), factory()->array_buffer_detaching_protector()));
} else {
- // Deopt if the {buffer} was neutered.
- // Note: A neutered buffer leads to megamorphic feedback.
+ // Deopt if the {buffer} was detached.
+ // Note: A detached buffer leads to megamorphic feedback.
Node* buffer_bit_field = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
buffer, effect, control);
@@ -2428,10 +2569,10 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->NumberEqual(),
graph()->NewNode(
simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasNeuteredBit::kMask)),
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
jsgraph()->ZeroConstant());
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached),
check, effect, control);
}
@@ -2572,7 +2713,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if the {receiver} is a JSArray.
- bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
+ bool receiver_is_jsarray = HasOnlyJSArrayMaps(broker(), receiver_maps);
// Load the length of the {receiver}.
Node* length = effect =
@@ -2829,7 +2970,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
isolate()->IsNoElementsProtectorIntact()) {
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
// Ensure that the {index} is a valid String length.
index = *effect = graph()->NewNode(
@@ -2880,7 +3021,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
}
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
- Handle<Map> map, Node* properties, Node* effect, Node* control) {
+ const MapRef& map, Node* properties, Node* effect, Node* control) {
// TODO(bmeurer/jkummerow): Property deletions can undo map transitions
// while keeping the backing store around, meaning that even though the
// map might believe that objects have no unused property fields, there
@@ -2890,9 +3031,9 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
// difficult for escape analysis to get rid of the backing stores used
// for intermediate states of chains of property additions. That makes
// it unclear what the best approach is here.
- DCHECK_EQ(0, map->UnusedPropertyFields());
+ DCHECK_EQ(0, map.UnusedPropertyFields());
// Compute the length of the old {properties} and the new properties.
- int length = map->NextFreePropertyIndex() - map->GetInObjectProperties();
+ int length = map.NextFreePropertyIndex() - map.GetInObjectProperties();
int new_length = length + JSObject::kFieldsAdded;
// Collect the field values from the {properties}.
ZoneVector<Node*> values(zone());
@@ -2961,16 +3102,17 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(Handle<Name> name,
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
MapHandles const& receiver_maps) {
- // Check if all {receiver_maps} either have one of the initial Array.prototype
+ // Check if all {receiver_maps} have one of the initial Array.prototype
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
- for (Handle<Map> receiver_map : receiver_maps) {
- DisallowHeapAllocation no_gc;
- Object* const receiver_prototype = receiver_map->prototype();
- if (!isolate()->IsInAnyContext(receiver_prototype,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX) &&
- !isolate()->IsInAnyContext(receiver_prototype,
- Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ // TODO(neis): Remove SerializePrototype call once brokerization is
+ // complete.
+ receiver_map.SerializePrototype();
+ ObjectRef receiver_prototype = receiver_map.prototype();
+ if (!receiver_prototype.IsJSObject() ||
+ !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
return false;
}
}
@@ -2979,7 +3121,7 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
if (!isolate()->IsNoElementsProtectorIntact()) return false;
dependencies()->DependOnProtector(
- PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ PropertyCellRef(broker(), factory()->no_elements_protector()));
return true;
}
@@ -3027,7 +3169,7 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate(), receiver, effect, &maps);
+ NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
@@ -3037,7 +3179,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
// For untrusted receiver maps, we can still use the information
// if the maps are stable.
for (size_t i = 0; i < maps.size(); ++i) {
- if (!maps[i]->is_stable()) return false;
+ MapRef map(broker(), maps[i]);
+ if (!map.is_stable()) return false;
}
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 0bd62e07c9..fb8ee9b616 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -51,7 +51,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
typedef base::Flags<Flag> Flags;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker, Flags flags,
+ JSHeapBroker* broker, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone, Zone* shared_zone);
@@ -70,6 +70,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
private:
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSAsyncFunctionEnter(Node* node);
+ Reduction ReduceJSAsyncFunctionReject(Node* node);
+ Reduction ReduceJSAsyncFunctionResolve(Node* node);
Reduction ReduceJSGetSuperConstructor(Node* node);
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSHasInPrototypeChain(Node* node);
@@ -183,7 +186,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
KeyedAccessLoadMode load_mode);
// Construct appropriate subgraph to extend properties backing store.
- Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
+ Node* BuildExtendPropertiesBackingStore(const MapRef& map, Node* properties,
Node* effect, Node* control);
// Construct appropriate subgraph to check that the {value} matches
@@ -223,15 +226,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
InferHasInPrototypeChainResult InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype);
- // Script context lookup logic.
- struct ScriptContextTableLookupResult;
- bool LookupInScriptContextTable(Handle<Name> name,
- ScriptContextTableLookupResult* result);
-
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Factory* factory() const;
CommonOperatorBuilder* common() const;
@@ -240,21 +238,20 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Flags flags() const { return flags_; }
Handle<JSGlobalObject> global_object() const { return global_object_; }
Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
- const NativeContextRef& native_context() const { return native_context_; }
+ NativeContextRef native_context() const { return broker()->native_context(); }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
Zone* shared_zone() const { return shared_zone_; }
JSGraph* const jsgraph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
Flags const flags_;
Handle<JSGlobalObject> global_object_;
Handle<JSGlobalProxy> global_proxy_;
- NativeContextRef native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
Zone* const shared_zone_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a30b4ddcdd..f2a2e7c924 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -593,55 +593,58 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
return OpParameter<CompareOperationHint>(op);
}
-#define CACHED_OP_LIST(V) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(Exponentiate, Operator::kNoProperties, 2, 1) \
- V(BitwiseNot, Operator::kNoProperties, 1, 1) \
- V(Decrement, Operator::kNoProperties, 1, 1) \
- V(Increment, Operator::kNoProperties, 1, 1) \
- V(Negate, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToNumberConvertBigInt, Operator::kNoProperties, 1, 1) \
- V(ToNumeric, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kNoProperties, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(CreateStringIterator, Operator::kEliminatable, 1, 1) \
- V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
- V(CreatePromise, Operator::kEliminatable, 0, 1) \
- V(CreateTypedArray, Operator::kNoProperties, 5, 1) \
- V(CreateObject, Operator::kNoProperties, 1, 1) \
- V(ObjectIsArray, Operator::kNoProperties, 1, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
- V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
- V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
- V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
- V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
- V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0) \
- V(Debugger, Operator::kNoProperties, 0, 0) \
- V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
- V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
- V(PromiseResolve, Operator::kNoProperties, 2, 1) \
- V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
- V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
- V(GetSuperConstructor, Operator::kNoWrite, 1, 1) \
- V(ParseInt, Operator::kNoProperties, 2, 1) \
+#define CACHED_OP_LIST(V) \
+ V(BitwiseOr, Operator::kNoProperties, 2, 1) \
+ V(BitwiseXor, Operator::kNoProperties, 2, 1) \
+ V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
+ V(ShiftLeft, Operator::kNoProperties, 2, 1) \
+ V(ShiftRight, Operator::kNoProperties, 2, 1) \
+ V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
+ V(Subtract, Operator::kNoProperties, 2, 1) \
+ V(Multiply, Operator::kNoProperties, 2, 1) \
+ V(Divide, Operator::kNoProperties, 2, 1) \
+ V(Modulus, Operator::kNoProperties, 2, 1) \
+ V(Exponentiate, Operator::kNoProperties, 2, 1) \
+ V(BitwiseNot, Operator::kNoProperties, 1, 1) \
+ V(Decrement, Operator::kNoProperties, 1, 1) \
+ V(Increment, Operator::kNoProperties, 1, 1) \
+ V(Negate, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumberConvertBigInt, Operator::kNoProperties, 1, 1) \
+ V(ToNumeric, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kNoProperties, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateStringIterator, Operator::kEliminatable, 1, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
+ V(CreatePromise, Operator::kEliminatable, 0, 1) \
+ V(CreateTypedArray, Operator::kNoProperties, 5, 1) \
+ V(CreateObject, Operator::kNoProperties, 1, 1) \
+ V(ObjectIsArray, Operator::kNoProperties, 1, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
+ V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
+ V(AsyncFunctionEnter, Operator::kNoProperties, 2, 1) \
+ V(AsyncFunctionReject, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(AsyncFunctionResolve, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+ V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(Debugger, Operator::kNoProperties, 0, 0) \
+ V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
+ V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
+ V(PromiseResolve, Operator::kNoProperties, 2, 1) \
+ V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
+ V(GetSuperConstructor, Operator::kNoWrite, 1, 1) \
+ V(ParseInt, Operator::kNoProperties, 2, 1) \
V(RegExpTest, Operator::kNoProperties, 2, 1)
#define BINARY_OP_LIST(V) V(Add)
@@ -711,16 +714,20 @@ struct JSOperatorGlobalCache final {
Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
+ Name##Operator<CompareOperationHint::kReceiverOrNullOrUndefined> \
+ k##Name##ReceiverOrNullOrUndefinedOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
};
-static base::LazyInstance<JSOperatorGlobalCache>::type kJSOperatorGlobalCache =
- LAZY_INSTANCE_INITIALIZER;
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(JSOperatorGlobalCache,
+ GetJSOperatorGlobalCache);
+}
JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
- : cache_(kJSOperatorGlobalCache.Get()), zone_(zone) {}
+ : cache_(*GetJSOperatorGlobalCache()), zone_(zone) {}
#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
const Operator* JSOperatorBuilder::Name() { \
@@ -778,6 +785,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##BigIntOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
+ case CompareOperationHint::kReceiverOrNullOrUndefined: \
+ return &cache_.k##Name##ReceiverOrNullOrUndefinedOperator; \
case CompareOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -976,6 +985,11 @@ const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
register_count); // parameter
}
+int RegisterCountOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateAsyncFunctionObject, op->opcode());
+ return OpParameter<int>(op);
+}
+
int GeneratorStoreValueCountOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSGeneratorStore, op->opcode());
return OpParameter<int>(op);
@@ -1133,6 +1147,16 @@ const Operator* JSOperatorBuilder::CreateArrayIterator(IterationKind kind) {
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CreateAsyncFunctionObject(
+ int register_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSCreateAsyncFunctionObject, // opcode
+ Operator::kEliminatable, // flags
+ "JSCreateAsyncFunctionObject", // name
+ 3, 1, 1, 1, 1, 0, // counts
+ register_count); // parameter
+}
+
const Operator* JSOperatorBuilder::CreateCollectionIterator(
CollectionKind collection_kind, IterationKind iteration_kind) {
CreateCollectionIteratorParameters parameters(collection_kind,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index db38941219..16eaf615d9 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -667,6 +667,8 @@ BinaryOperationHint BinaryOperationHintOf(const Operator* op);
CompareOperationHint CompareOperationHintOf(const Operator* op);
+int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -717,6 +719,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, MaybeHandle<AllocationSite> site);
const Operator* CreateArrayIterator(IterationKind);
+ const Operator* CreateAsyncFunctionObject(int register_count);
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
@@ -809,6 +812,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* InstanceOf(const VectorSlotPair& feedback);
const Operator* OrdinaryHasInstance();
+ const Operator* AsyncFunctionEnter();
+ const Operator* AsyncFunctionReject();
+ const Operator* AsyncFunctionResolve();
+
const Operator* ForInEnumerate();
const Operator* ForInNext(ForInMode);
const Operator* ForInPrepare(ForInMode);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index fc7fab4b54..cfcfb4ce58 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -91,6 +91,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kSymbol:
case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
case CompareOperationHint::kInternalizedString:
break;
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 7b3728428b..9be71cbd27 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -51,6 +51,7 @@ class JSBinopReduction final {
case CompareOperationHint::kSymbol:
case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
case CompareOperationHint::kInternalizedString:
break;
}
@@ -71,6 +72,13 @@ class JSBinopReduction final {
BothInputsMaybe(Type::Receiver());
}
+ bool IsReceiverOrNullOrUndefinedCompareOperation() {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kReceiverOrNullOrUndefined) &&
+ BothInputsMaybe(Type::ReceiverOrNullOrUndefined());
+ }
+
bool IsStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
return (CompareOperationHintOf(node_->op()) ==
@@ -94,7 +102,7 @@ class JSBinopReduction final {
if (BothInputsAre(Type::String()) ||
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
- JSHeapBroker* broker = lowering_->js_heap_broker();
+ JSHeapBroker* broker = lowering_->broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
@@ -122,6 +130,15 @@ class JSBinopReduction final {
update_effect(left_input);
}
+ // Inserts a CheckReceiverOrNullOrUndefined for the left input.
+ void CheckLeftInputToReceiverOrNullOrUndefined() {
+ Node* left_input =
+ graph()->NewNode(simplified()->CheckReceiverOrNullOrUndefined(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+
// Checks that both inputs are Receiver, and if we don't know
// statically that one side is already a Receiver, insert a
// CheckReceiver node.
@@ -137,6 +154,22 @@ class JSBinopReduction final {
}
}
+ // Checks that both inputs are Receiver, Null or Undefined and if
+ // we don't know statically that one side is already a Receiver,
+ // Null or Undefined, insert CheckReceiverOrNullOrUndefined nodes.
+ void CheckInputsToReceiverOrNullOrUndefined() {
+ if (!left_type().Is(Type::ReceiverOrNullOrUndefined())) {
+ CheckLeftInputToReceiverOrNullOrUndefined();
+ }
+ if (!right_type().Is(Type::ReceiverOrNullOrUndefined())) {
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckReceiverOrNullOrUndefined(),
+ right(), effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
// Inserts a CheckSymbol for the left input.
void CheckLeftInputToSymbol() {
Node* left_input = graph()->NewNode(simplified()->CheckSymbol(), left(),
@@ -310,31 +343,6 @@ class JSBinopReduction final {
UNREACHABLE();
}
- const Operator* NumberOpFromSpeculativeNumberOp() {
- switch (node_->opcode()) {
- case IrOpcode::kSpeculativeNumberEqual:
- return simplified()->NumberEqual();
- case IrOpcode::kSpeculativeNumberLessThan:
- return simplified()->NumberLessThan();
- case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- return simplified()->NumberLessThanOrEqual();
- case IrOpcode::kSpeculativeNumberAdd:
- // Handled by ReduceSpeculativeNumberAdd.
- UNREACHABLE();
- case IrOpcode::kSpeculativeNumberSubtract:
- return simplified()->NumberSubtract();
- case IrOpcode::kSpeculativeNumberMultiply:
- return simplified()->NumberMultiply();
- case IrOpcode::kSpeculativeNumberDivide:
- return simplified()->NumberDivide();
- case IrOpcode::kSpeculativeNumberModulus:
- return simplified()->NumberModulus();
- default:
- break;
- }
- UNREACHABLE();
- }
-
bool LeftInputIs(Type t) { return left_type().Is(t); }
bool RightInputIs(Type t) { return right_type().Is(t); }
@@ -414,12 +422,12 @@ class JSBinopReduction final {
// - relax effects from generic but not-side-effecting operations
JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker, Zone* zone)
+ JSHeapBroker* broker, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
- empty_string_type_(Type::HeapConstant(
- js_heap_broker, factory()->empty_string(), graph()->zone())),
+ broker_(broker),
+ empty_string_type_(Type::HeapConstant(broker, factory()->empty_string(),
+ graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
@@ -427,21 +435,6 @@ JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
graph()->zone())),
type_cache_(TypeCache::Get()) {}
-Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
- JSBinopReduction r(this, node);
- NumberOperationHint hint = NumberOperationHintOf(node->op());
- if ((hint == NumberOperationHint::kNumber ||
- hint == NumberOperationHint::kNumberOrOddball) &&
- r.BothInputsAre(Type::PlainPrimitive()) &&
- r.NeitherInputCanBe(Type::StringOrReceiver())) {
- // SpeculativeNumberAdd(x:-string, y:-string) =>
- // NumberAdd(ToNumber(x), ToNumber(y))
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceJSBitwiseNot(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
Type input_type = NodeProperties::GetType(input);
@@ -569,7 +562,9 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* length =
graph()->NewNode(simplified()->NumberAdd(), left_length, right_length);
- if (isolate()->IsStringLengthOverflowIntact()) {
+ CellRef string_length_protector(broker(),
+ factory()->string_length_protector());
+ if (string_length_protector.value().AsSmi() == Isolate::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
// generating a shorter code sequence than the version below, this
// has the additional benefit of not holding on to the lazy {frame_state}
@@ -613,7 +608,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
control = graph()->NewNode(common()->IfTrue(), branch);
length = effect =
- graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
+ graph()->NewNode(common()->TypeGuard(type_cache_->kStringLengthType),
length, effect, control);
}
@@ -670,22 +665,6 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
- JSBinopReduction r(this, node);
- NumberOperationHint hint = NumberOperationHintOf(node->op());
- if ((hint == NumberOperationHint::kNumber ||
- hint == NumberOperationHint::kNumberOrOddball) &&
- r.BothInputsAre(Type::NumberOrUndefinedOrNullOrBoolean())) {
- // We intentionally do this only in the Number and NumberOrOddball hint case
- // because simplified lowering of these speculative ops may do some clever
- // reductions in the other cases.
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
- Type::Number());
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::PlainPrimitive())) {
@@ -708,15 +687,6 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
- JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Signed32()) ||
- r.BothInputsAre(Type::Unsigned32())) {
- return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp());
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
@@ -819,6 +789,46 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
} else if (r.IsReceiverCompareOperation()) {
r.CheckInputsToReceiver();
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
+ } else if (r.IsReceiverOrNullOrUndefinedCompareOperation()) {
+ // Check that both inputs are Receiver, Null or Undefined.
+ r.CheckInputsToReceiverOrNullOrUndefined();
+
+ // If one side is known to be a detectable receiver now, we
+ // can simply perform reference equality here, since this
+ // known detectable receiver is going to only match itself.
+ if (r.OneInputIs(Type::DetectableReceiver())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
+ }
+
+ // Known that both sides are Receiver, Null or Undefined, the
+ // abstract equality operation can be performed like this:
+ //
+ // if ObjectIsUndetectable(left)
+ // then ObjectIsUndetectable(right)
+ // else ReferenceEqual(left, right)
+ //
+ Node* left = r.left();
+ Node* right = r.right();
+ Node* effect = r.effect();
+ Node* control = r.control();
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsUndetectable(), left);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = graph()->NewNode(simplified()->ObjectIsUndetectable(), right);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse =
+ graph()->NewNode(simplified()->ReferenceEqual(), left, right);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
@@ -879,6 +889,13 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
// both sides refer to the same Receiver.
r.CheckLeftInputToReceiver();
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
+ } else if (r.IsReceiverOrNullOrUndefinedCompareOperation()) {
+ // For strict equality, it's enough to know that one input is a Receiver,
+ // Null or Undefined, as a strict equality comparison with a Receiver,
+ // Null or Undefined can only yield true if both sides refer to the same
+ // instance.
+ r.CheckLeftInputToReceiverOrNullOrUndefined();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
@@ -906,7 +923,7 @@ Reduction JSTypedLowering::ReduceJSToName(Node* node) {
Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
Type input_type = NodeProperties::GetType(input);
- if (input_type.Is(type_cache_.kIntegerOrMinusZero)) {
+ if (input_type.Is(type_cache_->kIntegerOrMinusZero)) {
if (input_type.IsNone() || input_type.Max() <= 0.0) {
input = jsgraph()->ZeroConstant();
} else if (input_type.Min() >= kMaxSafeInteger) {
@@ -933,8 +950,8 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Ref(js_heap_broker()).IsString()) {
- StringRef input_value = m.Ref(js_heap_broker()).AsString();
+ if (m.HasValue() && m.Ref(broker()).IsString()) {
+ StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
return Replace(jsgraph()->Constant(number));
@@ -1107,8 +1124,8 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type receiver_type = NodeProperties::GetType(receiver);
- NameRef name(js_heap_broker(), NamedAccessOf(node->op()).name());
- NameRef length_str(js_heap_broker(), factory()->length_string());
+ NameRef name(broker(), NamedAccessOf(node->op()).name());
+ NameRef length_str(broker(), factory()->length_string());
// Optimize "length" property of strings.
if (name.equals(length_str) && receiver_type.Is(Type::String())) {
Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
@@ -1541,7 +1558,7 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
// Patch {node} to an indirect call via the {function}s construct stub.
bool use_builtin_construct_stub = shared.construct_as_builtin();
- CodeRef code(js_heap_broker(),
+ CodeRef code(broker(),
use_builtin_construct_stub
? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
: BUILTIN_CODE(isolate(), JSConstructStubGeneric));
@@ -1625,22 +1642,27 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (IsClassConstructor(shared.kind())) return NoChange();
- // Load the context from the {target}.
- Node* context = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
- effect, control);
- NodeProperties::ReplaceContextInput(node, context);
-
- // Check if we need to convert the {receiver}.
+ // Check if we need to convert the {receiver}, but bailout if it would
+ // require data from a foreign native context.
if (is_sloppy(shared.language_mode()) && !shared.native() &&
!receiver_type.Is(Type::Receiver())) {
- Node* global_proxy = jsgraph()->Constant(function.global_proxy());
+ if (!function.native_context().equals(broker()->native_context())) {
+ return NoChange();
+ }
+ Node* global_proxy =
+ jsgraph()->Constant(function.native_context().global_proxy_object());
receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(convert_mode),
receiver, global_proxy, effect, control);
NodeProperties::ReplaceValueInput(node, receiver, 1);
}
+ // Load the context from the {target}.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+ NodeProperties::ReplaceContextInput(node, context);
+
// Update the effect dependency for the {node}.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -2205,9 +2227,9 @@ Reduction JSTypedLowering::ReduceJSParseInt(Node* node) {
Type radix_type = NodeProperties::GetType(radix);
// We need kTenOrUndefined and kZeroOrUndefined because
// the type representing {0,10} would become the range 1-10.
- if (value_type.Is(type_cache_.kSafeInteger) &&
- (radix_type.Is(type_cache_.kTenOrUndefined) ||
- radix_type.Is(type_cache_.kZeroOrUndefined))) {
+ if (value_type.Is(type_cache_->kSafeInteger) &&
+ (radix_type.Is(type_cache_->kTenOrUndefined) ||
+ radix_type.Is(type_cache_->kZeroOrUndefined))) {
// Number.parseInt(a:safe-integer) -> a
// Number.parseInt(a:safe-integer,b:#0\/undefined) -> a
// Number.parseInt(a:safe-integer,b:#10\/undefined) -> a
@@ -2217,6 +2239,22 @@ Reduction JSTypedLowering::ReduceJSParseInt(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
+ Node* resolution = NodeProperties::GetValueInput(node, 1);
+ Type resolution_type = NodeProperties::GetType(resolution);
+ // We can strength-reduce JSResolvePromise to JSFulfillPromise
+ // if the {resolution} is known to be a primitive, as in that
+ // case we don't perform the implicit chaining (via "then").
+ if (resolution_type.Is(Type::Primitive())) {
+ // JSResolvePromise(p,v:primitive) -> JSFulfillPromise(p,v)
+ node->RemoveInput(3); // frame state
+ NodeProperties::ChangeOp(node, javascript()->FulfillPromise());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
@@ -2308,23 +2346,12 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorRestoreRegister(node);
case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
return ReduceJSGeneratorRestoreInputOrDebugPos(node);
- // TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
- // fooling anyone. Consider moving this into a separate reducer.
- case IrOpcode::kSpeculativeNumberAdd:
- return ReduceSpeculativeNumberAdd(node);
- case IrOpcode::kSpeculativeNumberSubtract:
- case IrOpcode::kSpeculativeNumberMultiply:
- case IrOpcode::kSpeculativeNumberDivide:
- case IrOpcode::kSpeculativeNumberModulus:
- return ReduceSpeculativeNumberBinop(node);
- case IrOpcode::kSpeculativeNumberEqual:
- case IrOpcode::kSpeculativeNumberLessThan:
- case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- return ReduceSpeculativeNumberComparison(node);
case IrOpcode::kJSObjectIsArray:
return ReduceObjectIsArray(node);
case IrOpcode::kJSParseInt:
return ReduceJSParseInt(node);
+ case IrOpcode::kJSResolvePromise:
+ return ReduceJSResolvePromise(node);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index e25e092453..d8164ac97d 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -31,8 +31,8 @@ enum Signedness { kSigned, kUnsigned };
class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker, Zone* zone);
+ JSTypedLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone);
~JSTypedLowering() final = default;
const char* reducer_name() const override { return "JSTypedLowering"; }
@@ -81,12 +81,9 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
- Reduction ReduceSpeculativeNumberAdd(Node* node);
- Reduction ReduceSpeculativeNumberMultiply(Node* node);
- Reduction ReduceSpeculativeNumberBinop(Node* node);
- Reduction ReduceSpeculativeNumberComparison(Node* node);
Reduction ReduceObjectIsArray(Node* node);
Reduction ReduceJSParseInt(Node* node);
+ Reduction ReduceJSResolvePromise(Node* node);
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
@@ -94,17 +91,17 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
- JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* broker_;
Type empty_string_type_;
Type pointer_comparable_type_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 9bba09329d..0dc28e0f77 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -5,12 +5,12 @@
#include "src/compiler/linkage.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
+#include "src/macro-assembler.h"
#include "src/optimized-compilation-info.h"
namespace v8 {
@@ -38,7 +38,13 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
os << "Addr";
break;
case CallDescriptor::kCallWasmFunction:
- os << "Wasm";
+ os << "WasmFunction";
+ break;
+ case CallDescriptor::kCallWasmImportWrapper:
+ os << "WasmImportWrapper";
+ break;
+ case CallDescriptor::kCallBuiltinPointer:
+ os << "BuiltinPointer";
break;
}
return os;
@@ -127,8 +133,10 @@ int CallDescriptor::CalculateFixedFrameSize() const {
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
+ case kCallBuiltinPointer:
return TypedFrameConstants::kFixedSlotCount;
case kCallWasmFunction:
+ case kCallWasmImportWrapper:
return WasmCompiledFrameConstants::kFixedSlotCount;
}
UNREACHABLE();
@@ -136,11 +144,11 @@ int CallDescriptor::CalculateFixedFrameSize() const {
CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info) {
- DCHECK(!info->IsStub());
+ DCHECK(!info->IsNotOptimizedFunctionOrWasmFunction());
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
- SharedFunctionInfo* shared = info->closure()->shared();
+ SharedFunctionInfo shared = info->closure()->shared();
return GetJSCallDescriptor(zone, info->is_osr(),
1 + shared->internal_formal_parameter_count(),
CallDescriptor::kCanUseRoots);
@@ -388,12 +396,23 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
}
// The target for stub calls depends on the requested mode.
- CallDescriptor::Kind kind = stub_mode == StubCallMode::kCallWasmRuntimeStub
- ? CallDescriptor::kCallWasmFunction
- : CallDescriptor::kCallCodeObject;
- MachineType target_type = stub_mode == StubCallMode::kCallWasmRuntimeStub
- ? MachineType::Pointer()
- : MachineType::AnyTagged();
+ CallDescriptor::Kind kind;
+ MachineType target_type;
+ switch (stub_mode) {
+ case StubCallMode::kCallCodeObject:
+ kind = CallDescriptor::kCallCodeObject;
+ target_type = MachineType::AnyTagged();
+ break;
+ case StubCallMode::kCallWasmRuntimeStub:
+ kind = CallDescriptor::kCallWasmFunction;
+ target_type = MachineType::Pointer();
+ break;
+ case StubCallMode::kCallBuiltinPointer:
+ kind = CallDescriptor::kCallBuiltinPointer;
+ target_type = MachineType::AnyTagged();
+ break;
+ }
+
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return new (zone) CallDescriptor( // --
kind, // kind
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index e8c15123d4..0be3053274 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -12,6 +12,7 @@
#include "src/globals.h"
#include "src/interface-descriptors.h"
#include "src/machine-type.h"
+#include "src/register-arch.h"
#include "src/reglist.h"
#include "src/runtime/runtime.h"
#include "src/signature.h"
@@ -66,14 +67,14 @@ class LinkageLocation {
static LinkageLocation ForSavedCallerReturnAddress() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kCallerPCOffset) /
- kPointerSize,
+ kSystemPointerSize,
MachineType::Pointer());
}
static LinkageLocation ForSavedCallerFramePtr() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kCallerFPOffset) /
- kPointerSize,
+ kSystemPointerSize,
MachineType::Pointer());
}
@@ -81,14 +82,14 @@ class LinkageLocation {
DCHECK(V8_EMBEDDED_CONSTANT_POOL);
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kConstantPoolOffset) /
- kPointerSize,
+ kSystemPointerSize,
MachineType::AnyTagged());
}
static LinkageLocation ForSavedCallerFunction() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kFunctionOffset) /
- kPointerSize,
+ kSystemPointerSize,
MachineType::AnyTagged());
}
@@ -110,7 +111,7 @@ class LinkageLocation {
int GetSizeInPointers() const {
// Round up
- return (GetSize() + kPointerSize - 1) / kPointerSize;
+ return (GetSize() + kSystemPointerSize - 1) / kSystemPointerSize;
}
int32_t GetLocation() const {
@@ -169,10 +170,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final
public:
// Describes the kind of this call, which determines the target.
enum Kind {
- kCallCodeObject, // target is a Code object
- kCallJSFunction, // target is a JSFunction object
- kCallAddress, // target is a machine pointer
- kCallWasmFunction // target is a wasm function
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress, // target is a machine pointer
+ kCallWasmFunction, // target is a wasm function
+ kCallWasmImportWrapper, // target is a wasm import wrapper
+ kCallBuiltinPointer, // target is a builtin pointer
};
enum Flag {
@@ -190,7 +193,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kRetpoline = 1u << 6,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 7
+ kFixedTargetRegister = 1u << 7,
+ kAllowCallThroughSlot = 1u << 8
};
typedef base::Flags<Flag> Flags;
@@ -227,6 +231,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Returns {true} if this descriptor is a call to a WebAssembly function.
bool IsWasmFunctionCall() const { return kind_ == kCallWasmFunction; }
+ // Returns {true} if this descriptor is a call to a WebAssembly function.
+ bool IsWasmImportWrapper() const { return kind_ == kCallWasmImportWrapper; }
+
bool RequiresFrameAsIncoming() const {
return IsCFunctionCall() || IsJSFunctionCall() || IsWasmFunctionCall();
}
@@ -391,7 +398,7 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
- StubCallMode stub_mode = StubCallMode::kCallOnHeapBuiltin);
+ StubCallMode stub_mode = StubCallMode::kCallCodeObject);
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
Zone* zone, const CallInterfaceDescriptor& descriptor,
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 46966b552f..4e89ab6b9c 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -22,7 +22,7 @@ bool IsRename(Node* node) {
case IrOpcode::kCheckHeapObject:
case IrOpcode::kFinishRegion:
case IrOpcode::kTypeGuard:
- return true;
+ return !node->IsDead();
default:
return false;
}
@@ -1209,8 +1209,8 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
// static
int LoadElimination::FieldIndexOf(int offset) {
- DCHECK_EQ(0, offset % kPointerSize);
- int field_index = offset / kPointerSize;
+ DCHECK(IsAligned(offset, kTaggedSize));
+ int field_index = offset / kTaggedSize;
if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
DCHECK_LT(0, field_index);
return field_index - 1;
@@ -1226,9 +1226,13 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
UNREACHABLE();
break;
case MachineRepresentation::kWord32:
+ if (kInt32Size != kTaggedSize) {
+ return -1; // We currently only track tagged pointer size fields.
+ }
+ break;
case MachineRepresentation::kWord64:
- if (rep != MachineType::PointerRepresentation()) {
- return -1; // We currently only track pointer size fields.
+ if (kInt64Size != kTaggedSize) {
+ return -1; // We currently only track tagged pointer size fields.
}
break;
case MachineRepresentation::kWord8:
@@ -1236,8 +1240,8 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kFloat32:
return -1; // Currently untracked.
case MachineRepresentation::kFloat64:
- if (kDoubleSize != kPointerSize) {
- return -1; // We currently only track pointer size fields.
+ if (kDoubleSize != kTaggedSize) {
+ return -1; // We currently only track tagged pointer size fields.
}
break;
case MachineRepresentation::kTaggedSigned:
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index b81ad03d83..92bf3910c2 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -54,7 +54,7 @@ Node* MachineGraph::RelocatableInt64Constant(int64_t value,
Node* MachineGraph::RelocatableIntPtrConstant(intptr_t value,
RelocInfo::Mode rmode) {
- return kPointerSize == 8
+ return kSystemPointerSize == 8
? RelocatableInt64Constant(value, rmode)
: RelocatableInt32Constant(static_cast<int>(value), rmode);
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 8ef7e7ce08..751cdacca6 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
+#include "src/base/overflowing-math.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-graph.h"
@@ -18,9 +19,12 @@ namespace v8 {
namespace internal {
namespace compiler {
-MachineOperatorReducer::MachineOperatorReducer(MachineGraph* mcgraph,
+MachineOperatorReducer::MachineOperatorReducer(Editor* editor,
+ MachineGraph* mcgraph,
bool allow_signalling_nan)
- : mcgraph_(mcgraph), allow_signalling_nan_(allow_signalling_nan) {}
+ : AdvancedReducer(editor),
+ mcgraph_(mcgraph),
+ allow_signalling_nan_(allow_signalling_nan) {}
MachineOperatorReducer::~MachineOperatorReducer() = default;
@@ -217,7 +221,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K
- return ReplaceInt32(m.left().Value() * m.right().Value());
+ return ReplaceInt32(
+ base::MulWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int32Constant(0));
@@ -432,7 +437,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // K / K => K
- return ReplaceFloat64(m.left().Value() / m.right().Value());
+ return ReplaceFloat64(
+ base::Divide(m.left().Value(), m.right().Value()));
}
if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
@@ -702,6 +708,14 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64Compare(node);
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
+ case IrOpcode::kBitcastTaggedToWord: {
+ NodeMatcher m(node->InputAt(0));
+ if (m.IsBitcastWordToTaggedSigned()) {
+ RelaxEffectsAndControls(node);
+ return Replace(m.InputAt(0));
+ }
+ break;
+ }
default:
break;
}
@@ -713,8 +727,8 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
if (m.IsFoldable()) { // K + K => K
- return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
- bit_cast<uint32_t>(m.right().Value()));
+ return ReplaceInt32(
+ base::AddWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.left().IsInt32Sub()) {
Int32BinopMatcher mleft(m.left().node());
@@ -743,8 +757,8 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
if (m.IsFoldable()) {
- return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) +
- bit_cast<uint64_t>(m.right().Value())));
+ return ReplaceInt64(
+ base::AddWithWraparound(m.left().Value(), m.right().Value()));
}
return NoChange();
}
@@ -754,12 +768,13 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K
- return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
- static_cast<uint32_t>(m.right().Value()));
+ return ReplaceInt32(
+ base::SubWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
if (m.right().HasValue()) { // x - K => x + -K
- node->ReplaceInput(1, Int32Constant(-m.right().Value()));
+ node->ReplaceInput(
+ 1, Int32Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -772,12 +787,13 @@ Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K
- return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) -
- bit_cast<uint64_t>(m.right().Value())));
+ return ReplaceInt64(
+ base::SubWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
if (m.right().HasValue()) { // x - K => x + -K
- node->ReplaceInput(1, Int64Constant(-m.right().Value()));
+ node->ReplaceInput(
+ 1, Int64Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int64Add());
Reduction const reduction = ReduceInt64Add(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -1056,7 +1072,8 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K
- return ReplaceInt32(m.left().Value() << m.right().Value());
+ return ReplaceInt32(
+ base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().IsInRange(1, 31)) {
// (x >>> K) << K => x & ~(2^K - 1)
@@ -1081,7 +1098,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K
- return ReplaceInt64(m.left().Value() << m.right().Value());
+ return ReplaceInt64(
+ base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
return NoChange();
}
@@ -1090,12 +1108,12 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
Uint32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >>> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
+ return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
}
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 0x1F;
+ uint32_t shift = m.right().Value() & 31;
uint32_t mask = mleft.right().Value();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
@@ -1111,7 +1129,7 @@ Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
Uint64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >> K => K
- return ReplaceInt64(m.left().Value() >> m.right().Value());
+ return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
}
return NoChange();
}
@@ -1120,7 +1138,7 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) { // K >> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
+ return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
}
if (m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
@@ -1155,7 +1173,7 @@ Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) {
- return ReplaceInt64(m.left().Value() >> m.right().Value());
+ return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
}
return NoChange();
}
@@ -1184,6 +1202,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (m.right().IsNegativePowerOf2()) {
int32_t const mask = m.right().Value();
+ int32_t const neg_mask = base::NegateWithWraparound(mask);
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
@@ -1205,7 +1224,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.left().IsInt32Mul()) {
Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().IsMultipleOf(-mask)) {
+ if (mleftleft.right().IsMultipleOf(neg_mask)) {
// (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
node->ReplaceInput(0,
Word32And(mleft.right().node(), m.right().node()));
@@ -1217,7 +1236,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.right().IsInt32Mul()) {
Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().IsMultipleOf(-mask)) {
+ if (mleftright.right().IsMultipleOf(neg_mask)) {
// (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
node->ReplaceInput(0,
Word32And(mleft.left().node(), m.right().node()));
@@ -1253,7 +1272,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
} else if (m.left().IsInt32Mul()) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsMultipleOf(-mask)) {
+ if (mleft.right().IsMultipleOf(neg_mask)) {
// (x * (K << L)) & (-1 << L) => x * (K << L)
return Replace(mleft.node());
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index c44ec5f551..8c0d4c810d 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -21,9 +21,9 @@ class MachineGraph;
// Performs constant folding and strength reduction on nodes that have
// machine operators.
class V8_EXPORT_PRIVATE MachineOperatorReducer final
- : public NON_EXPORTED_BASE(Reducer) {
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- explicit MachineOperatorReducer(MachineGraph* mcgraph,
+ explicit MachineOperatorReducer(Editor* editor, MachineGraph* mcgraph,
bool allow_signalling_nan = true);
~MachineOperatorReducer() override;
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index f3fcd7758c..d740ff6f72 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -147,6 +147,7 @@ MachineType AtomicOpType(Operator const* op) {
V(ChangeFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
@@ -812,17 +813,19 @@ struct MachineOperatorGlobalCache {
struct CommentOperator : public Operator1<const char*> {
explicit CommentOperator(const char* msg)
: Operator1<const char*>(IrOpcode::kComment, Operator::kNoThrow,
- "Comment", 0, 0, 0, 0, 0, 0, msg) {}
+ "Comment", 0, 1, 1, 0, 1, 0, msg) {}
};
-static base::LazyInstance<MachineOperatorGlobalCache>::type
- kMachineOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
+ GetMachineOperatorGlobalCache);
+}
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: zone_(zone),
- cache_(kMachineOperatorGlobalCache.Get()),
+ cache_(*GetMachineOperatorGlobalCache()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index a34360a375..25b053ea66 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -6,10 +6,10 @@
#define V8_COMPILER_MACHINE_OPERATOR_H_
#include "src/base/compiler-specific.h"
+#include "src/base/enum-set.h"
#include "src/base/flags.h"
#include "src/globals.h"
#include "src/machine-type.h"
-#include "src/utils.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -172,8 +172,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
return AlignmentRequirements(kNoSupport);
}
static AlignmentRequirements SomeUnalignedAccessUnsupported(
- EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes,
- EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes) {
+ base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes,
+ base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes) {
return AlignmentRequirements(kSomeSupport, unalignedLoadUnsupportedTypes,
unalignedStoreUnsupportedTypes);
}
@@ -181,15 +181,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
private:
explicit AlignmentRequirements(
AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
- EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes =
- EnumSet<MachineRepresentation>(),
- EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes =
- EnumSet<MachineRepresentation>())
+ base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes =
+ base::EnumSet<MachineRepresentation>(),
+ base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes =
+ base::EnumSet<MachineRepresentation>())
: unalignedSupport_(unalignedAccessSupport),
unalignedLoadUnsupportedTypes_(unalignedLoadUnsupportedTypes),
unalignedStoreUnsupportedTypes_(unalignedStoreUnsupportedTypes) {}
- bool IsUnalignedSupported(EnumSet<MachineRepresentation> unsupported,
+ bool IsUnalignedSupported(base::EnumSet<MachineRepresentation> unsupported,
MachineRepresentation rep) const {
// All accesses of bytes in memory are aligned.
DCHECK_NE(MachineRepresentation::kWord8, rep);
@@ -199,14 +199,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
case kNoSupport:
return false;
case kSomeSupport:
- return !unsupported.Contains(rep);
+ return !unsupported.contains(rep);
}
UNREACHABLE();
}
const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
- const EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes_;
- const EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes_;
+ const base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes_;
+ const base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes_;
};
explicit MachineOperatorBuilder(
@@ -322,6 +322,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* ChangeFloat64ToInt64();
const Operator* ChangeFloat64ToUint32(); // narrowing
const Operator* ChangeFloat64ToUint64();
+ const Operator* TruncateFloat64ToInt64();
const Operator* TruncateFloat64ToUint32();
const Operator* TruncateFloat32ToInt32();
const Operator* TruncateFloat32ToUint32();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 298a503771..91a19891db 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -10,6 +10,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -96,21 +97,55 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kComment:
+ case IrOpcode::kDebugAbort:
+ case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
- case IrOpcode::kUnalignedLoad:
- case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
- case IrOpcode::kUnalignedStore:
case IrOpcode::kRetain:
+ case IrOpcode::kStore:
+ case IrOpcode::kTaggedPoisonOnSpeculation:
+ case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kUnalignedStore:
case IrOpcode::kUnsafePointerAdd:
- case IrOpcode::kDebugBreak:
case IrOpcode::kUnreachable:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairStore:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicXor:
case IrOpcode::kWord32PoisonOnSpeculation:
+ case IrOpcode::kWord64AtomicAdd:
+ case IrOpcode::kWord64AtomicAnd:
+ case IrOpcode::kWord64AtomicCompareExchange:
+ case IrOpcode::kWord64AtomicExchange:
+ case IrOpcode::kWord64AtomicLoad:
+ case IrOpcode::kWord64AtomicOr:
+ case IrOpcode::kWord64AtomicStore:
+ case IrOpcode::kWord64AtomicSub:
+ case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64PoisonOnSpeculation:
+ // These operations cannot trigger GC.
return VisitOtherEffect(node, state);
default:
break;
@@ -249,7 +284,8 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
- Node* vfalse = __ Call(allocate_operator_.get(), target, size);
+ Node* vfalse = __ BitcastTaggedToWord(
+ __ Call(allocate_operator_.get(), target, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
deleted file mode 100644
index 8bbcab4c2d..0000000000
--- a/deps/v8/src/compiler/mips/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
deleted file mode 100644
index 8bbcab4c2d..0000000000
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index cbefb0ac35..375a420dd4 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -13,6 +13,7 @@
#include "src/double.h"
#include "src/external-reference.h"
#include "src/globals.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -130,7 +131,8 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
}
bool IsNegativePowerOf2() const {
return this->HasValue() && this->Value() < 0 &&
- (-this->Value() & (-this->Value() - 1)) == 0;
+ ((this->Value() == kMinInt) ||
+ (-this->Value() & (-this->Value() - 1)) == 0);
}
bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
@@ -678,11 +680,13 @@ struct BaseWithIndexAndDisplacementMatcher {
switch (from->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kPoisonedLoad:
+ case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
// Skip addressing uses.
break;
case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore:
// If the stored value is this node, it is not an addressing use.
if (from->InputAt(2) == node) return false;
// Otherwise it is used as an address and skipped.
@@ -780,7 +784,9 @@ struct WasmStackCheckMatcher {
template <class BinopMatcher, IrOpcode::Value expected_opcode>
struct StackCheckMatcher {
StackCheckMatcher(Isolate* isolate, Node* compare)
- : isolate_(isolate), compare_(compare) {}
+ : isolate_(isolate), compare_(compare) {
+ DCHECK_NOT_NULL(isolate);
+ }
bool Matched() {
// TODO(jgruber): Ideally, we could be more flexible here and also match the
// same pattern with switched operands (i.e.: left is LoadStackPointer and
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index 8429b2f692..c4f5bbeb11 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -33,7 +33,7 @@ class NodeOrigin {
origin_kind_(origin_kind),
created_from_(created_from) {}
- NodeOrigin(const NodeOrigin& other) = default;
+ NodeOrigin(const NodeOrigin& other) V8_NOEXCEPT = default;
static NodeOrigin Unknown() { return NodeOrigin(); }
bool IsKnown() { return created_from_ >= 0; }
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index d72980f9fd..8e3421c1a0 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -364,11 +364,11 @@ bool NodeProperties::IsSame(Node* a, Node* b) {
// static
NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
- Isolate* isolate, Node* receiver, Node* effect,
+ JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- Handle<HeapObject> receiver = m.Value();
+ HeapObjectRef receiver = m.Ref(broker).AsHeapObject();
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
// we better make sure that TurboFan doesn't outsmart the system here
@@ -376,15 +376,12 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
//
// TODO(bmeurer): This can be removed once the Array.prototype and
// Object.prototype have NO_ELEMENTS elements kind.
- if (!isolate->IsInAnyContext(*receiver,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX) &&
- !isolate->IsInAnyContext(*receiver,
- Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
- Handle<Map> receiver_map(receiver->map(), isolate);
- if (receiver_map->is_stable()) {
+ if (!receiver.IsJSObject() ||
+ !broker->IsArrayOrObjectPrototype(receiver.AsJSObject())) {
+ if (receiver.map().is_stable()) {
// The {receiver_map} is only reliable when we install a stability
// code dependency.
- *maps_return = ZoneHandleSet<Map>(receiver_map);
+ *maps_return = ZoneHandleSet<Map>(receiver.map().object());
return kUnreliableReceiverMaps;
}
}
@@ -413,15 +410,14 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
HeapObjectMatcher mtarget(GetValueInput(effect, 0));
HeapObjectMatcher mnewtarget(GetValueInput(effect, 1));
if (mtarget.HasValue() && mnewtarget.HasValue() &&
- mnewtarget.Value()->IsJSFunction()) {
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(mnewtarget.Value());
- if (original_constructor->has_initial_map()) {
- Handle<Map> initial_map(original_constructor->initial_map(),
- isolate);
- if (initial_map->constructor_or_backpointer() ==
- *mtarget.Value()) {
- *maps_return = ZoneHandleSet<Map>(initial_map);
+ mnewtarget.Ref(broker).IsJSFunction()) {
+ JSFunctionRef original_constructor =
+ mnewtarget.Ref(broker).AsJSFunction();
+ if (original_constructor.has_initial_map()) {
+ original_constructor.Serialize();
+ MapRef initial_map = original_constructor.initial_map();
+ if (initial_map.GetConstructor().equals(mtarget.Ref(broker))) {
+ *maps_return = ZoneHandleSet<Map>(initial_map.object());
return result;
}
}
@@ -431,6 +427,16 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
break;
}
+ case IrOpcode::kJSCreatePromise: {
+ if (IsSame(receiver, effect)) {
+ *maps_return = ZoneHandleSet<Map>(broker->native_context()
+ .promise_function()
+ .initial_map()
+ .object());
+ return result;
+ }
+ break;
+ }
case IrOpcode::kStoreField: {
// We only care about StoreField of maps.
Node* const object = GetValueInput(effect, 0);
@@ -441,7 +447,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
if (m.HasValue()) {
- *maps_return = ZoneHandleSet<Map>(Handle<Map>::cast(m.Value()));
+ *maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object());
return result;
}
}
@@ -505,12 +511,13 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
-MaybeHandle<Map> NodeProperties::GetMapWitness(Isolate* isolate, Node* node) {
+MaybeHandle<Map> NodeProperties::GetMapWitness(JSHeapBroker* broker,
+ Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate, receiver, effect, &maps);
+ NodeProperties::InferReceiverMaps(broker, receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
return maps[0];
}
@@ -518,19 +525,20 @@ MaybeHandle<Map> NodeProperties::GetMapWitness(Isolate* isolate, Node* node) {
}
// static
-bool NodeProperties::HasInstanceTypeWitness(Isolate* isolate, Node* receiver,
- Node* effect,
+bool NodeProperties::HasInstanceTypeWitness(JSHeapBroker* broker,
+ Node* receiver, Node* effect,
InstanceType instance_type) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(isolate, receiver, effect,
+ NodeProperties::InferReceiverMaps(broker, receiver, effect,
&receiver_maps);
switch (result) {
case NodeProperties::kUnreliableReceiverMaps:
case NodeProperties::kReliableReceiverMaps:
DCHECK_NE(0, receiver_maps.size());
for (size_t i = 0; i < receiver_maps.size(); ++i) {
- if (receiver_maps[i]->instance_type() != instance_type) return false;
+ MapRef map(broker, receiver_maps[i]);
+ if (map.instance_type() != instance_type) return false;
}
return true;
@@ -555,7 +563,7 @@ bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
}
// static
-bool NodeProperties::CanBePrimitive(Isolate* isolate, Node* receiver,
+bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver,
Node* effect) {
switch (receiver->opcode()) {
#define CASE(Opcode) case IrOpcode::k##Opcode:
@@ -568,19 +576,21 @@ bool NodeProperties::CanBePrimitive(Isolate* isolate, Node* receiver,
case IrOpcode::kJSToObject:
return false;
case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
- return value->IsPrimitive();
+ HeapObjectRef value =
+ HeapObjectMatcher(receiver).Ref(broker).AsHeapObject();
+ return value.map().IsPrimitiveMap();
}
default: {
// We don't really care about the exact maps here,
// just the instance types, which don't change
// across potential side-effecting operations.
ZoneHandleSet<Map> maps;
- if (InferReceiverMaps(isolate, receiver, effect, &maps) !=
+ if (InferReceiverMaps(broker, receiver, effect, &maps) !=
kNoReceiverMaps) {
- // Check if all {maps} are actually JSReceiver maps.
+ // Check if one of the {maps} is not a JSReceiver map.
for (size_t i = 0; i < maps.size(); ++i) {
- if (!maps[i]->IsJSReceiverMap()) return true;
+ MapRef map(broker, maps[i]);
+ if (!map.IsJSReceiverMap()) return true;
}
return false;
}
@@ -590,9 +600,9 @@ bool NodeProperties::CanBePrimitive(Isolate* isolate, Node* receiver,
}
// static
-bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
+bool NodeProperties::CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver,
Node* effect) {
- if (CanBePrimitive(isolate, receiver, effect)) {
+ if (CanBePrimitive(broker, receiver, effect)) {
switch (receiver->opcode()) {
case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
@@ -608,8 +618,10 @@ bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
case IrOpcode::kToBoolean:
return false;
case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
- return value->IsNullOrUndefined(isolate);
+ HeapObjectRef value =
+ HeapObjectMatcher(receiver).Ref(broker).AsHeapObject();
+ OddballType type = value.map().oddball_type();
+ return type == OddballType::kNull || type == OddballType::kUndefined;
}
default:
return true;
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index abcee4eaf9..df50a1d90a 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -152,11 +152,11 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// but instance type is reliable.
};
static InferReceiverMapsResult InferReceiverMaps(
- Isolate* isolate, Node* receiver, Node* effect,
+ JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return);
- static MaybeHandle<Map> GetMapWitness(Isolate* isolate, Node* node);
- static bool HasInstanceTypeWitness(Isolate* isolate, Node* receiver,
+ static MaybeHandle<Map> GetMapWitness(JSHeapBroker* broker, Node* node);
+ static bool HasInstanceTypeWitness(JSHeapBroker* broker, Node* receiver,
Node* effect, InstanceType instance_type);
// Walks up the {effect} chain to check that there's no observable side-effect
@@ -167,11 +167,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Returns true if the {receiver} can be a primitive value (i.e. is not
// definitely a JavaScript object); might walk up the {effect} chain to
// find map checks on {receiver}.
- static bool CanBePrimitive(Isolate* isolate, Node* receiver, Node* effect);
+ static bool CanBePrimitive(JSHeapBroker* broker, Node* receiver,
+ Node* effect);
// Returns true if the {receiver} can be null or undefined. Might walk
// up the {effect} chain to find map checks for {receiver}.
- static bool CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
+ static bool CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver,
Node* effect);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 0e1ebce9bc..3576c9b589 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -298,6 +298,10 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
void Node::Print() const {
StdoutStream os;
+ Print(os);
+}
+
+void Node::Print(std::ostream& os) const {
os << *this << std::endl;
for (Node* input : this->inputs()) {
os << " " << *input << std::endl;
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 01dca47cbe..2ddd33ff31 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -160,6 +160,7 @@ class V8_EXPORT_PRIVATE Node final {
bool OwnedBy(Node const* owner1, Node const* owner2) const;
void Print() const;
+ void Print(std::ostream&) const;
private:
struct Use;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index ab854e6eb6..706248bd04 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -133,28 +133,29 @@
V(JSIncrement) \
V(JSNegate)
-#define JS_CREATE_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateArray) \
- V(JSCreateArrayIterator) \
- V(JSCreateBoundFunction) \
- V(JSCreateClosure) \
- V(JSCreateCollectionIterator) \
- V(JSCreateGeneratorObject) \
- V(JSCreateIterResultObject) \
- V(JSCreateStringIterator) \
- V(JSCreateKeyValueArray) \
- V(JSCreateObject) \
- V(JSCreatePromise) \
- V(JSCreateTypedArray) \
- V(JSCreateLiteralArray) \
- V(JSCreateEmptyLiteralArray) \
- V(JSCreateArrayFromIterable) \
- V(JSCreateLiteralObject) \
- V(JSCreateEmptyLiteralObject) \
- V(JSCloneObject) \
- V(JSCreateLiteralRegExp)
+#define JS_CREATE_OP_LIST(V) \
+ V(JSCloneObject) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateArrayFromIterable) \
+ V(JSCreateArrayIterator) \
+ V(JSCreateAsyncFunctionObject) \
+ V(JSCreateBoundFunction) \
+ V(JSCreateClosure) \
+ V(JSCreateCollectionIterator) \
+ V(JSCreateEmptyLiteralArray) \
+ V(JSCreateEmptyLiteralObject) \
+ V(JSCreateGeneratorObject) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateKeyValueArray) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSCreateObject) \
+ V(JSCreatePromise) \
+ V(JSCreateStringIterator) \
+ V(JSCreateTypedArray)
#define JS_OBJECT_OP_LIST(V) \
JS_CREATE_OP_LIST(V) \
@@ -194,6 +195,9 @@
#define JS_OTHER_OP_LIST(V) \
JS_CALL_OP_LIST(V) \
JS_CONSTRUCT_OP_LIST(V) \
+ V(JSAsyncFunctionEnter) \
+ V(JSAsyncFunctionReject) \
+ V(JSAsyncFunctionResolve) \
V(JSCallRuntime) \
V(JSForInEnumerate) \
V(JSForInNext) \
@@ -258,15 +262,19 @@
V(CheckedInt32ToTaggedSigned) \
V(CheckedInt64ToInt32) \
V(CheckedInt64ToTaggedSigned) \
+ V(CheckedUint32Bounds) \
V(CheckedUint32ToInt32) \
V(CheckedUint32ToTaggedSigned) \
+ V(CheckedUint64Bounds) \
V(CheckedUint64ToInt32) \
V(CheckedUint64ToTaggedSigned) \
V(CheckedFloat64ToInt32) \
+ V(CheckedFloat64ToInt64) \
V(CheckedTaggedSignedToInt32) \
V(CheckedTaggedToInt32) \
V(CheckedTruncateTaggedToWord32) \
V(CheckedTaggedToFloat64) \
+ V(CheckedTaggedToInt64) \
V(CheckedTaggedToTaggedSigned) \
V(CheckedTaggedToTaggedPointer)
@@ -376,6 +384,7 @@
V(CheckNumber) \
V(CheckInternalizedString) \
V(CheckReceiver) \
+ V(CheckReceiverOrNullOrUndefined) \
V(CheckString) \
V(CheckSymbol) \
V(CheckSmi) \
@@ -620,6 +629,7 @@
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
+ V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 313a263ebb..070c17c8e9 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -16,32 +16,30 @@ namespace v8 {
namespace internal {
namespace compiler {
-OperationTyper::OperationTyper(JSHeapBroker* js_heap_broker, Zone* zone)
+OperationTyper::OperationTyper(JSHeapBroker* broker, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
- Factory* factory = js_heap_broker->isolate()->factory();
+ Factory* factory = broker->isolate()->factory();
infinity_ = Type::NewConstant(V8_INFINITY, zone);
minus_infinity_ = Type::NewConstant(-V8_INFINITY, zone);
Type truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero.Maybe(Type::Integral32()));
singleton_empty_string_ =
- Type::HeapConstant(js_heap_broker, factory->empty_string(), zone);
+ Type::HeapConstant(broker, factory->empty_string(), zone);
singleton_NaN_string_ =
- Type::HeapConstant(js_heap_broker, factory->NaN_string(), zone);
+ Type::HeapConstant(broker, factory->NaN_string(), zone);
singleton_zero_string_ =
- Type::HeapConstant(js_heap_broker, factory->zero_string(), zone);
- singleton_false_ =
- Type::HeapConstant(js_heap_broker, factory->false_value(), zone);
- singleton_true_ =
- Type::HeapConstant(js_heap_broker, factory->true_value(), zone);
+ Type::HeapConstant(broker, factory->zero_string(), zone);
+ singleton_false_ = Type::HeapConstant(broker, factory->false_value(), zone);
+ singleton_true_ = Type::HeapConstant(broker, factory->true_value(), zone);
singleton_the_hole_ =
- Type::HeapConstant(js_heap_broker, factory->the_hole_value(), zone);
+ Type::HeapConstant(broker, factory->the_hole_value(), zone);
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
falsish_ = Type::Union(
Type::Undetectable(),
- Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ Type::Union(Type::Union(singleton_false_, cache_->kZeroish, zone),
Type::Union(singleton_empty_string_, Type::Hole(), zone),
zone),
zone);
@@ -132,7 +130,7 @@ Type OperationTyper::WeakenRange(Type previous_range, Type current_range) {
Type OperationTyper::Rangify(Type type) {
if (type.IsRange()) return type; // Shortcut.
- if (!type.Is(cache_.kInteger)) {
+ if (!type.Is(cache_->kInteger)) {
return type; // Give up on non-integer types.
}
return Type::Range(type.Min(), type.Max(), zone());
@@ -223,31 +221,36 @@ Type OperationTyper::SubtractRanger(double lhs_min, double lhs_max,
// [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
}
-Type OperationTyper::MultiplyRanger(Type lhs, Type rhs) {
+Type OperationTyper::MultiplyRanger(double lhs_min, double lhs_max,
+ double rhs_min, double rhs_max) {
double results[4];
- double lmin = lhs.AsRange()->Min();
- double lmax = lhs.AsRange()->Max();
- double rmin = rhs.AsRange()->Min();
- double rmax = rhs.AsRange()->Max();
- results[0] = lmin * rmin;
- results[1] = lmin * rmax;
- results[2] = lmax * rmin;
- results[3] = lmax * rmax;
- // If the result may be nan, we give up on calculating a precise type, because
- // the discontinuity makes it too complicated. Note that even if none of the
- // "results" above is nan, the actual result may still be, so we have to do a
- // different check:
- bool maybe_nan = (lhs.Maybe(cache_.kSingletonZero) &&
- (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs.Maybe(cache_.kSingletonZero) &&
- (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
- bool maybe_minuszero = (lhs.Maybe(cache_.kSingletonZero) && rmin < 0) ||
- (rhs.Maybe(cache_.kSingletonZero) && lmin < 0);
- Type range =
- Type::Range(array_min(results, 4), array_max(results, 4), zone());
- return maybe_minuszero ? Type::Union(range, Type::MinusZero(), zone())
- : range;
+ results[0] = lhs_min * rhs_min;
+ results[1] = lhs_min * rhs_max;
+ results[2] = lhs_max * rhs_min;
+ results[3] = lhs_max * rhs_max;
+ // If the result may be nan, we give up on calculating a precise type,
+ // because the discontinuity makes it too complicated. Note that even if
+ // none of the "results" above is nan, the actual result may still be, so we
+ // have to do a different check:
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) {
+ return cache_->kIntegerOrMinusZeroOrNaN;
+ }
+ }
+ double min = array_min(results, 4);
+ double max = array_max(results, 4);
+ Type type = Type::Range(min, max, zone());
+ if (min <= 0.0 && 0.0 <= max && (lhs_min < 0.0 || rhs_min < 0.0)) {
+ type = Type::Union(type, Type::MinusZero(), zone());
+ }
+ // 0 * V8_INFINITY is NaN, regardless of sign
+ if (((lhs_min == -V8_INFINITY || lhs_max == V8_INFINITY) &&
+ (rhs_min <= 0.0 && 0.0 <= rhs_max)) ||
+ ((rhs_min == -V8_INFINITY || rhs_max == V8_INFINITY) &&
+ (lhs_min <= 0.0 && 0.0 <= lhs_max))) {
+ type = Type::Union(type, Type::NaN(), zone());
+ }
+ return type;
}
Type OperationTyper::ConvertReceiver(Type type) {
@@ -281,7 +284,7 @@ Type OperationTyper::ToNumber(Type type) {
DCHECK(type.Is(Type::NumberOrOddball()));
if (type.Maybe(Type::Null())) {
// ToNumber(null) => +0
- type = Type::Union(type, cache_.kSingletonZero, zone());
+ type = Type::Union(type, cache_->kSingletonZero, zone());
}
if (type.Maybe(Type::Undefined())) {
// ToNumber(undefined) => NaN
@@ -289,11 +292,11 @@ Type OperationTyper::ToNumber(Type type) {
}
if (type.Maybe(singleton_false_)) {
// ToNumber(false) => +0
- type = Type::Union(type, cache_.kSingletonZero, zone());
+ type = Type::Union(type, cache_->kSingletonZero, zone());
}
if (type.Maybe(singleton_true_)) {
// ToNumber(true) => +1
- type = Type::Union(type, cache_.kSingletonOne, zone());
+ type = Type::Union(type, cache_->kSingletonOne, zone());
}
return Type::Intersect(type, Type::Number(), zone());
}
@@ -306,7 +309,7 @@ Type OperationTyper::ToNumberConvertBigInt(Type type) {
type = ToNumber(Type::Intersect(type, Type::NonBigInt(), zone()));
// Any BigInt is rounded to an integer Number in the range [-inf, inf].
- return maybe_bigint ? Type::Union(type, cache_.kInteger, zone()) : type;
+ return maybe_bigint ? Type::Union(type, cache_->kInteger, zone()) : type;
}
Type OperationTyper::ToNumeric(Type type) {
@@ -331,7 +334,7 @@ Type OperationTyper::NumberAbs(Type type) {
double const max = type.Max();
double const min = type.Min();
if (min < 0) {
- if (type.Is(cache_.kInteger)) {
+ if (type.Is(cache_->kInteger)) {
type =
Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), zone());
} else {
@@ -341,7 +344,7 @@ Type OperationTyper::NumberAbs(Type type) {
}
if (maybe_minuszero) {
- type = Type::Union(type, cache_.kSingletonZero, zone());
+ type = Type::Union(type, cache_->kSingletonZero, zone());
}
if (maybe_nan) {
type = Type::Union(type, Type::NaN(), zone());
@@ -386,15 +389,15 @@ Type OperationTyper::NumberCbrt(Type type) {
Type OperationTyper::NumberCeil(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ if (type.Is(cache_->kIntegerOrMinusZeroOrNaN)) return type;
type = Type::Intersect(type, Type::NaN(), zone());
- type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ type = Type::Union(type, cache_->kIntegerOrMinusZero, zone());
return type;
}
Type OperationTyper::NumberClz32(Type type) {
DCHECK(type.Is(Type::Number()));
- return cache_.kZeroToThirtyTwo;
+ return cache_->kZeroToThirtyTwo;
}
Type OperationTyper::NumberCos(Type type) {
@@ -419,9 +422,9 @@ Type OperationTyper::NumberExpm1(Type type) {
Type OperationTyper::NumberFloor(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ if (type.Is(cache_->kIntegerOrMinusZeroOrNaN)) return type;
type = Type::Intersect(type, Type::MinusZeroOrNaN(), zone());
- type = Type::Union(type, cache_.kInteger, zone());
+ type = Type::Union(type, cache_->kInteger, zone());
return type;
}
@@ -452,28 +455,28 @@ Type OperationTyper::NumberLog10(Type type) {
Type OperationTyper::NumberRound(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ if (type.Is(cache_->kIntegerOrMinusZeroOrNaN)) return type;
type = Type::Intersect(type, Type::NaN(), zone());
- type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ type = Type::Union(type, cache_->kIntegerOrMinusZero, zone());
return type;
}
Type OperationTyper::NumberSign(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kZeroish)) return type;
+ if (type.Is(cache_->kZeroish)) return type;
bool maybe_minuszero = type.Maybe(Type::MinusZero());
bool maybe_nan = type.Maybe(Type::NaN());
type = Type::Intersect(type, Type::PlainNumber(), zone());
if (type.IsNone()) {
// Do nothing.
} else if (type.Max() < 0.0) {
- type = cache_.kSingletonMinusOne;
+ type = cache_->kSingletonMinusOne;
} else if (type.Max() <= 0.0) {
- type = cache_.kMinusOneOrZero;
+ type = cache_->kMinusOneOrZero;
} else if (type.Min() > 0.0) {
- type = cache_.kSingletonOne;
+ type = cache_->kSingletonOne;
} else if (type.Min() >= 0.0) {
- type = cache_.kZeroOrOne;
+ type = cache_->kZeroOrOne;
} else {
type = Type::Range(-1.0, 1.0, zone());
}
@@ -510,16 +513,16 @@ Type OperationTyper::NumberTanh(Type type) {
Type OperationTyper::NumberTrunc(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ if (type.Is(cache_->kIntegerOrMinusZeroOrNaN)) return type;
type = Type::Intersect(type, Type::NaN(), zone());
- type = Type::Union(type, cache_.kIntegerOrMinusZero, zone());
+ type = Type::Union(type, cache_->kIntegerOrMinusZero, zone());
return type;
}
Type OperationTyper::NumberToBoolean(Type type) {
DCHECK(type.Is(Type::Number()));
if (type.IsNone()) return type;
- if (type.Is(cache_.kZeroish)) return singleton_false_;
+ if (type.Is(cache_->kZeroish)) return singleton_false_;
if (type.Is(Type::PlainNumber()) && (type.Max() < 0 || 0 < type.Min())) {
return singleton_true_; // Ruled out nan, -0 and +0.
}
@@ -530,9 +533,9 @@ Type OperationTyper::NumberToInt32(Type type) {
DCHECK(type.Is(Type::Number()));
if (type.Is(Type::Signed32())) return type;
- if (type.Is(cache_.kZeroish)) return cache_.kSingletonZero;
+ if (type.Is(cache_->kZeroish)) return cache_->kSingletonZero;
if (type.Is(signed32ish_)) {
- return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+ return Type::Intersect(Type::Union(type, cache_->kSingletonZero, zone()),
Type::Signed32(), zone());
}
return Type::Signed32();
@@ -542,7 +545,7 @@ Type OperationTyper::NumberToString(Type type) {
DCHECK(type.Is(Type::Number()));
if (type.IsNone()) return type;
if (type.Is(Type::NaN())) return singleton_NaN_string_;
- if (type.Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
+ if (type.Is(cache_->kZeroOrMinusZero)) return singleton_zero_string_;
return Type::String();
}
@@ -550,9 +553,9 @@ Type OperationTyper::NumberToUint32(Type type) {
DCHECK(type.Is(Type::Number()));
if (type.Is(Type::Unsigned32())) return type;
- if (type.Is(cache_.kZeroish)) return cache_.kSingletonZero;
+ if (type.Is(cache_->kZeroish)) return cache_->kSingletonZero;
if (type.Is(unsigned32ish_)) {
- return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+ return Type::Intersect(Type::Union(type, cache_->kSingletonZero, zone()),
Type::Unsigned32(), zone());
}
return Type::Unsigned32();
@@ -561,8 +564,8 @@ Type OperationTyper::NumberToUint32(Type type) {
Type OperationTyper::NumberToUint8Clamped(Type type) {
DCHECK(type.Is(Type::Number()));
- if (type.Is(cache_.kUint8)) return type;
- return cache_.kUint8;
+ if (type.Is(cache_->kUint8)) return type;
+ return cache_->kUint8;
}
Type OperationTyper::NumberSilenceNaN(Type type) {
@@ -587,12 +590,12 @@ Type OperationTyper::NumberAdd(Type lhs, Type rhs) {
// Addition can yield minus zero only if both inputs can be minus zero.
bool maybe_minuszero = true;
if (lhs.Maybe(Type::MinusZero())) {
- lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
} else {
maybe_minuszero = false;
}
if (rhs.Maybe(Type::MinusZero())) {
- rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
} else {
maybe_minuszero = false;
}
@@ -602,7 +605,7 @@ Type OperationTyper::NumberAdd(Type lhs, Type rhs) {
lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
if (!lhs.IsNone() && !rhs.IsNone()) {
- if (lhs.Is(cache_.kInteger) && rhs.Is(cache_.kInteger)) {
+ if (lhs.Is(cache_->kInteger) && rhs.Is(cache_->kInteger)) {
type = AddRanger(lhs.Min(), lhs.Max(), rhs.Min(), rhs.Max());
} else {
if ((lhs.Maybe(minus_infinity_) && rhs.Maybe(infinity_)) ||
@@ -633,11 +636,11 @@ Type OperationTyper::NumberSubtract(Type lhs, Type rhs) {
// can be zero.
bool maybe_minuszero = false;
if (lhs.Maybe(Type::MinusZero())) {
- lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
- maybe_minuszero = rhs.Maybe(cache_.kSingletonZero);
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
+ maybe_minuszero = rhs.Maybe(cache_->kSingletonZero);
}
if (rhs.Maybe(Type::MinusZero())) {
- rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
}
// We can give more precise types for integers.
@@ -645,7 +648,7 @@ Type OperationTyper::NumberSubtract(Type lhs, Type rhs) {
lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
if (!lhs.IsNone() && !rhs.IsNone()) {
- if (lhs.Is(cache_.kInteger) && rhs.Is(cache_.kInteger)) {
+ if (lhs.Is(cache_->kInteger) && rhs.Is(cache_->kInteger)) {
type = SubtractRanger(lhs.Min(), lhs.Max(), rhs.Min(), rhs.Max());
} else {
if ((lhs.Maybe(infinity_) && rhs.Maybe(infinity_)) ||
@@ -669,7 +672,7 @@ Type OperationTyper::SpeculativeSafeIntegerAdd(Type lhs, Type rhs) {
// In either case the result will be in the safe integer range, so we
// can bake in the type here. This needs to be in sync with
// SimplifiedLowering::VisitSpeculativeAdditiveOp.
- return Type::Intersect(result, cache_.kSafeIntegerOrMinusZero, zone());
+ return Type::Intersect(result, cache_->kSafeIntegerOrMinusZero, zone());
}
Type OperationTyper::SpeculativeSafeIntegerSubtract(Type lhs, Type rhs) {
@@ -679,7 +682,7 @@ Type OperationTyper::SpeculativeSafeIntegerSubtract(Type lhs, Type rhs) {
// In either case the result will be in the safe integer range, so we
// can bake in the type here. This needs to be in sync with
// SimplifiedLowering::VisitSpeculativeAdditiveOp.
- return result = Type::Intersect(result, cache_.kSafeInteger, zone());
+ return Type::Intersect(result, cache_->kSafeIntegerOrMinusZero, zone());
}
Type OperationTyper::NumberMultiply(Type lhs, Type rhs) {
@@ -687,14 +690,44 @@ Type OperationTyper::NumberMultiply(Type lhs, Type rhs) {
DCHECK(rhs.Is(Type::Number()));
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
-
- lhs = Rangify(lhs);
- rhs = Rangify(rhs);
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return Type::NaN();
- if (lhs.IsRange() && rhs.IsRange()) {
- return MultiplyRanger(lhs, rhs);
+
+ // Multiplication propagates NaN:
+ // NaN * x = NaN (regardless of sign of x)
+ // 0 * Infinity = NaN (regardless of signs)
+ bool maybe_nan = lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN()) ||
+ (lhs.Maybe(cache_->kZeroish) &&
+ (rhs.Min() == -V8_INFINITY || rhs.Max() == V8_INFINITY)) ||
+ (rhs.Maybe(cache_->kZeroish) &&
+ (lhs.Min() == -V8_INFINITY || lhs.Max() == V8_INFINITY));
+ lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ DCHECK(!lhs.IsNone());
+ rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ DCHECK(!rhs.IsNone());
+
+ // Try to rule out -0.
+ bool maybe_minuszero = lhs.Maybe(Type::MinusZero()) ||
+ rhs.Maybe(Type::MinusZero()) ||
+ (lhs.Maybe(cache_->kZeroish) && rhs.Min() < 0.0) ||
+ (rhs.Maybe(cache_->kZeroish) && lhs.Min() < 0.0);
+ if (lhs.Maybe(Type::MinusZero())) {
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
+ lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
}
- return Type::Number();
+ if (rhs.Maybe(Type::MinusZero())) {
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
+ rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+ }
+
+ // Compute the effective type, utilizing range information if possible.
+ Type type = (lhs.Is(cache_->kInteger) && rhs.Is(cache_->kInteger))
+ ? MultiplyRanger(lhs.Min(), lhs.Max(), rhs.Min(), rhs.Max())
+ : Type::OrderedNumber();
+
+ // Take into account the -0 and NaN information computed earlier.
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
}
Type OperationTyper::NumberDivide(Type lhs, Type rhs) {
@@ -705,7 +738,7 @@ Type OperationTyper::NumberDivide(Type lhs, Type rhs) {
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return Type::NaN();
// Division is tricky, so all we do is try ruling out -0 and NaN.
- bool maybe_nan = lhs.Maybe(Type::NaN()) || rhs.Maybe(cache_.kZeroish) ||
+ bool maybe_nan = lhs.Maybe(Type::NaN()) || rhs.Maybe(cache_->kZeroish) ||
((lhs.Min() == -V8_INFINITY || lhs.Max() == +V8_INFINITY) &&
(rhs.Min() == -V8_INFINITY || rhs.Max() == +V8_INFINITY));
lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
@@ -715,8 +748,8 @@ Type OperationTyper::NumberDivide(Type lhs, Type rhs) {
// Try to rule out -0.
bool maybe_minuszero =
- !lhs.Is(cache_.kInteger) ||
- (lhs.Maybe(cache_.kZeroish) && rhs.Min() < 0.0) ||
+ !lhs.Is(cache_->kInteger) ||
+ (lhs.Maybe(cache_->kZeroish) && rhs.Min() < 0.0) ||
(rhs.Min() == -V8_INFINITY || rhs.Max() == +V8_INFINITY);
// Take into account the -0 and NaN information computed earlier.
@@ -734,17 +767,17 @@ Type OperationTyper::NumberModulus(Type lhs, Type rhs) {
// Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
// {lhs} is not finite, or the {rhs} is a zero value.
- bool maybe_nan = lhs.Maybe(Type::NaN()) || rhs.Maybe(cache_.kZeroish) ||
+ bool maybe_nan = lhs.Maybe(Type::NaN()) || rhs.Maybe(cache_->kZeroish) ||
lhs.Min() == -V8_INFINITY || lhs.Max() == +V8_INFINITY;
// Deal with -0 inputs, only the signbit of {lhs} matters for the result.
bool maybe_minuszero = false;
if (lhs.Maybe(Type::MinusZero())) {
maybe_minuszero = true;
- lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
}
if (rhs.Maybe(Type::MinusZero())) {
- rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
}
// Rule out NaN and -0, and check what we can do with the remaining type info.
@@ -754,7 +787,7 @@ Type OperationTyper::NumberModulus(Type lhs, Type rhs) {
// We can only derive a meaningful type if both {lhs} and {rhs} are inhabited,
// and the {rhs} is not 0, otherwise the result is NaN independent of {lhs}.
- if (!lhs.IsNone() && !rhs.Is(cache_.kSingletonZero)) {
+ if (!lhs.IsNone() && !rhs.Is(cache_->kSingletonZero)) {
// Determine the bounds of {lhs} and {rhs}.
double const lmin = lhs.Min();
double const lmax = lhs.Max();
@@ -765,7 +798,7 @@ Type OperationTyper::NumberModulus(Type lhs, Type rhs) {
if (lmin < 0.0) maybe_minuszero = true;
// For integer inputs {lhs} and {rhs} we can infer a precise type.
- if (lhs.Is(cache_.kInteger) && rhs.Is(cache_.kInteger)) {
+ if (lhs.Is(cache_->kInteger) && rhs.Is(cache_->kInteger)) {
double labs = std::max(std::abs(lmin), std::abs(lmax));
double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
double abs = std::min(labs, rabs);
@@ -1005,8 +1038,8 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
DCHECK(!lhs.IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_.kIntegerOrMinusZero) &&
- rhs.Is(cache_.kIntegerOrMinusZero)) {
+ if (lhs.Is(cache_->kIntegerOrMinusZero) &&
+ rhs.Is(cache_->kIntegerOrMinusZero)) {
// TODO(turbofan): This could still be improved in ruling out -0 when
// one of the inputs' min is 0.
double max = std::max(lhs.Max(), rhs.Max());
@@ -1037,8 +1070,8 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
DCHECK(!lhs.IsNone());
rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_.kIntegerOrMinusZero) &&
- rhs.Is(cache_.kIntegerOrMinusZero)) {
+ if (lhs.Is(cache_->kIntegerOrMinusZero) &&
+ rhs.Is(cache_->kIntegerOrMinusZero)) {
double max = std::min(lhs.Max(), rhs.Max());
double min = std::min(lhs.Min(), rhs.Min());
type = Type::Union(type, Type::Range(min, max, zone()), zone());
@@ -1173,16 +1206,13 @@ Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
}
Type OperationTyper::CheckBounds(Type index, Type length) {
- DCHECK(length.Is(Type::Unsigned31()));
+ DCHECK(length.Is(cache_->kPositiveSafeInteger));
+ if (length.Is(cache_->kSingletonZero)) return Type::None();
+ Type mask = Type::Range(0.0, length.Max() - 1, zone());
if (index.Maybe(Type::MinusZero())) {
- index = Type::Union(index, cache_.kSingletonZero, zone());
+ index = Type::Union(index, cache_->kSingletonZero, zone());
}
- index = Type::Intersect(index, Type::Integral32(), zone());
- if (index.IsNone() || length.IsNone()) return Type::None();
- double min = std::max(index.Min(), 0.0);
- double max = std::min(index.Max(), length.Max() - 1);
- if (max < min) return Type::None();
- return Type::Range(min, max, zone());
+ return Type::Intersect(index, mask, zone());
}
Type OperationTyper::CheckFloat64Hole(Type type) {
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index e84e97d2a1..25c3c9d1e4 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -27,7 +27,7 @@ class TypeCache;
class V8_EXPORT_PRIVATE OperationTyper {
public:
- OperationTyper(JSHeapBroker* js_heap_broker, Zone* zone);
+ OperationTyper(JSHeapBroker* broker, Zone* zone);
// Typing Phi.
Type Merge(Type left, Type right);
@@ -87,12 +87,13 @@ class V8_EXPORT_PRIVATE OperationTyper {
double rhs_max);
Type SubtractRanger(double lhs_min, double lhs_max, double rhs_min,
double rhs_max);
- Type MultiplyRanger(Type lhs, Type rhs);
+ Type MultiplyRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max);
Zone* zone() const { return zone_; }
Zone* const zone_;
- TypeCache const& cache_;
+ TypeCache const* cache_;
Type infinity_;
Type minus_infinity_;
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 8da3ccfd81..959e743369 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -87,7 +87,11 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSStoreProperty:
return true;
+ case IrOpcode::kJSAsyncFunctionEnter:
+ case IrOpcode::kJSAsyncFunctionReject:
+ case IrOpcode::kJSAsyncFunctionResolve:
case IrOpcode::kJSCreateArrayIterator:
+ case IrOpcode::kJSCreateAsyncFunctionObject:
case IrOpcode::kJSCreateBoundFunction:
case IrOpcode::kJSCreateCollectionIterator:
case IrOpcode::kJSCreateIterResultObject:
@@ -215,6 +219,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCallWithSpread:
// Misc operations
+ case IrOpcode::kJSAsyncFunctionEnter:
+ case IrOpcode::kJSAsyncFunctionReject:
+ case IrOpcode::kJSAsyncFunctionResolve:
case IrOpcode::kJSForInEnumerate:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSStackCheck:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index b2211b3b07..659b3f4c8f 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -15,12 +15,10 @@ namespace internal {
namespace compiler {
OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
- : parameter_count_(
- info->shared_info()->GetBytecodeArray()->parameter_count()),
- stack_slot_count_(
- InterpreterFrameConstants::RegisterStackSlotCount(
- info->shared_info()->GetBytecodeArray()->register_count()) +
- InterpreterFrameConstants::kExtraSlotCount) {}
+ : parameter_count_(info->bytecode_array()->parameter_count()),
+ stack_slot_count_(InterpreterFrameConstants::RegisterStackSlotCount(
+ info->bytecode_array()->register_count()) +
+ InterpreterFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index c4169266e7..3a4212e3d5 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,11 +16,19 @@
#include "src/bootstrapper.h"
#include "src/code-tracer.h"
#include "src/compiler.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/frame-elider.h"
+#include "src/compiler/backend/instruction-selector.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/backend/jump-threading.h"
+#include "src/compiler/backend/live-range-separator.h"
+#include "src/compiler/backend/move-optimizer.h"
+#include "src/compiler/backend/register-allocator-verifier.h"
+#include "src/compiler/backend/register-allocator.h"
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/checkpoint-elimination.h"
-#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -30,11 +38,8 @@
#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/escape-analysis.h"
-#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-create-lowering.h"
@@ -45,8 +50,6 @@
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-typed-lowering.h"
-#include "src/compiler/jump-threading.h"
-#include "src/compiler/live-range-separator.h"
#include "src/compiler/load-elimination.h"
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
@@ -54,16 +57,14 @@
#include "src/compiler/machine-graph-verifier.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
-#include "src/compiler/move-optimizer.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/redundancy-elimination.h"
-#include "src/compiler/register-allocator-verifier.h"
-#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
+#include "src/compiler/serializer-for-background-compilation.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
@@ -84,15 +85,11 @@
#include "src/register-configuration.h"
#include "src/utils.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
-
-namespace trap_handler {
-struct ProtectedInstructionData;
-} // namespace trap_handler
-
namespace compiler {
// Turbofan can only handle 2^16 control inputs. Since each control flow split
@@ -137,7 +134,7 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
- js_heap_broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
+ broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
dependencies_ =
new (info_->zone()) CompilationDependencies(isolate_, info_->zone());
}
@@ -147,14 +144,13 @@ class PipelineData {
OptimizedCompilationInfo* info, MachineGraph* mcgraph,
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins, int wasm_function_index,
+ NodeOriginTable* node_origins,
const AssemblerOptions& assembler_options)
: isolate_(nullptr),
wasm_engine_(wasm_engine),
allocator_(wasm_engine->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
- wasm_function_index_(wasm_function_index),
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
@@ -174,7 +170,7 @@ class PipelineData {
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(assembler_options) {}
- // For machine graph testing entry point.
+ // For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
Isolate* isolate, Graph* graph, Schedule* schedule,
SourcePositionTable* source_positions,
@@ -186,6 +182,7 @@ class PipelineData {
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_(graph_zone_scope_.zone()),
graph_(graph),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -197,7 +194,17 @@ class PipelineData {
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
jump_optimization_info_(jump_opt),
- assembler_options_(assembler_options) {}
+ assembler_options_(assembler_options) {
+ simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
+ machine_ = new (graph_zone_) MachineOperatorBuilder(
+ graph_zone_, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
+ common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
+ javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
+ jsgraph_ = new (graph_zone_)
+ JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+ }
// For register allocation testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
@@ -269,7 +276,7 @@ class PipelineData {
return handle(info()->global_object(), isolate());
}
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
@@ -315,7 +322,7 @@ class PipelineData {
Typer* CreateTyper() {
DCHECK_NULL(typer_);
- typer_ = new Typer(js_heap_broker(), typer_flags_, graph());
+ typer_ = new Typer(broker(), typer_flags_, graph());
return typer_;
}
@@ -357,7 +364,7 @@ class PipelineData {
codegen_zone_scope_.Destroy();
codegen_zone_ = nullptr;
dependencies_ = nullptr;
- js_heap_broker_ = nullptr;
+ broker_ = nullptr;
frame_ = nullptr;
}
@@ -410,14 +417,15 @@ class PipelineData {
start_source_position_ = position;
}
- void InitializeCodeGenerator(Linkage* linkage) {
+ void InitializeCodeGenerator(Linkage* linkage,
+ std::unique_ptr<AssemblerBuffer> buffer) {
DCHECK_NULL(code_generator_);
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
info()->GetPoisoningMitigationLevel(), assembler_options_,
- info_->builtin_index());
+ info_->builtin_index(), std::move(buffer));
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -434,15 +442,12 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
- int wasm_function_index() const { return wasm_function_index_; }
-
private:
Isolate* const isolate_;
wasm::WasmEngine* const wasm_engine_ = nullptr;
AccountingAllocator* const allocator_;
OptimizedCompilationInfo* const info_;
std::unique_ptr<char[]> debug_name_;
- int wasm_function_index_ = -1;
bool may_have_unverifiable_graph_ = true;
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
@@ -483,7 +488,7 @@ class PipelineData {
ZoneStats::Scope codegen_zone_scope_;
Zone* codegen_zone_;
CompilationDependencies* dependencies_ = nullptr;
- JSHeapBroker* js_heap_broker_ = nullptr;
+ JSHeapBroker* broker_ = nullptr;
Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
@@ -510,12 +515,8 @@ class PipelineImpl final {
explicit PipelineImpl(PipelineData* data) : data_(data) {}
// Helpers for executing pipeline phases.
- template <typename Phase>
- void Run();
- template <typename Phase, typename Arg0>
- void Run(Arg0 arg_0);
- template <typename Phase, typename Arg0, typename Arg1>
- void Run(Arg0 arg_0, Arg1 arg_1);
+ template <typename Phase, typename... Args>
+ void Run(Args&&... args);
// Step A. Run the graph creation and initial optimization passes.
bool CreateGraph();
@@ -530,7 +531,8 @@ class PipelineImpl final {
bool SelectInstructions(Linkage* linkage);
// Step C. Run the code assembly pass.
- void AssembleCode(Linkage* linkage);
+ void AssembleCode(Linkage* linkage,
+ std::unique_ptr<AssemblerBuffer> buffer = {});
// Step D. Run the code finalization pass.
MaybeHandle<Code> FinalizeCode();
@@ -540,6 +542,7 @@ class PipelineImpl final {
void VerifyGeneratedCodeIsIdempotent();
void RunPrintAndVerify(const char* phase, bool untyped = false);
+ bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor, bool run_verifier);
@@ -561,7 +564,7 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
if (!script->source()->IsUndefined(isolate)) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- Object* source_name = script->name();
+ Object source_name = script->name();
OFStream os(tracing_scope.file());
os << "--- FUNCTION SOURCE (";
if (source_name->IsString()) {
@@ -574,8 +577,8 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
DisallowHeapAllocation no_allocation;
int start = shared->StartPosition();
int len = shared->EndPosition() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
+ SubStringRange source(String::cast(script->source()), no_allocation,
+ start, len);
for (const auto& c : source) {
os << AsReversiblyEscapedUC16(c);
}
@@ -631,12 +634,9 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code =
- isolate->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code && info->shared_info()->PassesFilter(
- FLAG_print_builtin_code_filter)
- : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
- (info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
+ FLAG_print_code ||
+ (info->IsOptimizing() && FLAG_print_opt_code &&
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
@@ -806,11 +806,10 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
return pipeline_statistics;
}
-PipelineStatistics* CreatePipelineStatistics(wasm::WasmEngine* wasm_engine,
- wasm::FunctionBody function_body,
- wasm::WasmModule* wasm_module,
- OptimizedCompilationInfo* info,
- ZoneStats* zone_stats) {
+PipelineStatistics* CreatePipelineStatistics(
+ wasm::WasmEngine* wasm_engine, wasm::FunctionBody function_body,
+ const wasm::WasmModule* wasm_module, OptimizedCompilationInfo* info,
+ ZoneStats* zone_stats) {
PipelineStatistics* pipeline_statistics = nullptr;
if (FLAG_turbo_stats_wasm) {
@@ -893,7 +892,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) {
- if (compilation_info()->shared_info()->GetBytecodeArray()->length() >
+ if (compilation_info()->bytecode_array()->length() >
kMaxBytecodeSizeForTurbofan) {
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
@@ -926,7 +925,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsAllocationFoldingEnabled();
}
- if (compilation_info()->closure()->feedback_cell()->map() ==
+ if (compilation_info()->closure()->raw_feedback_cell()->map() ==
ReadOnlyRoots(isolate).one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -944,10 +943,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
- // Make sure that we have generated the maximal number of deopt entries.
- // This is in order to avoid triggering the generation of deopt entries later
- // during code assembly.
- Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
+ // Make sure that we have generated the deopt entries code. This is in order
+ // to avoid triggering the generation of deopt entries later during code
+ // assembly.
+ Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
return SUCCEEDED;
}
@@ -973,7 +972,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
}
compilation_info()->SetCode(code);
- compilation_info()->context()->native_context()->AddOptimizedCode(*code);
+ compilation_info()->native_context()->AddOptimizedCode(*code);
RegisterWeakObjectsInOptimizedCode(code, isolate);
return SUCCEEDED;
}
@@ -1003,149 +1002,11 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
-// The stack limit used during compilation is used to limit the recursion
-// depth in, e.g. AST walking. No such recursion happens in WASM compilations.
-constexpr uintptr_t kNoStackLimit = 0;
-
-class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
- public:
- explicit PipelineWasmCompilationJob(
- OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
- MachineGraph* mcgraph, CallDescriptor* call_descriptor,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
- wasm::NativeModule* native_module, int function_index, bool asmjs_origin)
- : OptimizedCompilationJob(kNoStackLimit, info, "TurboFan",
- State::kReadyToExecute),
- zone_stats_(wasm_engine->allocator()),
- pipeline_statistics_(CreatePipelineStatistics(
- wasm_engine, function_body, wasm_module, info, &zone_stats_)),
- data_(&zone_stats_, wasm_engine, info, mcgraph,
- pipeline_statistics_.get(), source_positions, node_origins,
- function_index, WasmAssemblerOptions()),
- pipeline_(&data_),
- linkage_(call_descriptor),
- native_module_(native_module),
- asmjs_origin_(asmjs_origin) {}
-
- protected:
- Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl() final;
- Status FinalizeJobImpl(Isolate* isolate) final;
-
- private:
- ZoneStats zone_stats_;
- std::unique_ptr<PipelineStatistics> pipeline_statistics_;
- PipelineData data_;
- PipelineImpl pipeline_;
- Linkage linkage_;
- wasm::NativeModule* native_module_;
- bool asmjs_origin_;
-};
-
-PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::PrepareJobImpl(
- Isolate* isolate) {
- UNREACHABLE(); // Prepare should always be skipped for WasmCompilationJob.
-}
-
-PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::ExecuteJobImpl() {
- pipeline_.RunPrintAndVerify("Machine", true);
-
- PipelineData* data = &data_;
- data->BeginPhaseKind("wasm optimization");
- if (FLAG_wasm_opt || asmjs_origin_) {
- PipelineRunScope scope(data, "wasm full optimization");
- GraphReducer graph_reducer(scope.zone(), data->graph(),
- data->mcgraph()->Dead());
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), scope.zone());
- ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
- MachineOperatorReducer machine_reducer(data->mcgraph(), asmjs_origin_);
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
- data->machine(), scope.zone());
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &machine_reducer);
- AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &value_numbering);
- graph_reducer.ReduceGraph();
- } else {
- PipelineRunScope scope(data, "wasm base optimization");
- GraphReducer graph_reducer(scope.zone(), data->graph(),
- data->mcgraph()->Dead());
- ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
- AddReducer(data, &graph_reducer, &value_numbering);
- graph_reducer.ReduceGraph();
- }
- pipeline_.RunPrintAndVerify("wasm optimization", true);
-
- if (data_.node_origins()) {
- data_.node_origins()->RemoveDecorator();
- }
-
- pipeline_.ComputeScheduledGraph();
- if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
- pipeline_.AssembleCode(&linkage_);
-
- CodeGenerator* code_generator = pipeline_.code_generator();
- CodeDesc code_desc;
- code_generator->tasm()->GetCode(nullptr, &code_desc);
-
- wasm::WasmCode* code = native_module_->AddCode(
- data_.wasm_function_index(), code_desc,
- code_generator->frame()->GetTotalFrameSlotCount(),
- code_generator->GetSafepointTableOffset(),
- code_generator->GetHandlerTableOffset(),
- code_generator->GetProtectedInstructions(),
- code_generator->GetSourcePositionTable(), wasm::WasmCode::kTurbofan);
-
- if (data_.info()->trace_turbo_json_enabled()) {
- TurboJsonFile json_of(data_.info(), std::ios_base::app);
- json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
-#ifdef ENABLE_DISASSEMBLER
- std::stringstream disassembler_stream;
- Disassembler::Decode(
- nullptr, &disassembler_stream, code->instructions().start(),
- code->instructions().start() + code->safepoint_table_offset(),
- CodeReference(code));
- for (auto const c : disassembler_stream.str()) {
- json_of << AsEscapedUC16ForJSON(c);
- }
-#endif // ENABLE_DISASSEMBLER
- json_of << "\"}\n]";
- json_of << "\n}";
- }
-
- compilation_info()->SetCode(code);
-
- return SUCCEEDED;
-}
-
-PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
- Isolate* isolate) {
- UNREACHABLE(); // Finalize should always be skipped for WasmCompilationJob.
-}
-
-template <typename Phase>
-void PipelineImpl::Run() {
+template <typename Phase, typename... Args>
+void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
- phase.Run(this->data_, scope.zone());
-}
-
-template <typename Phase, typename Arg0>
-void PipelineImpl::Run(Arg0 arg_0) {
- PipelineRunScope scope(this->data_, Phase::phase_name());
- Phase phase;
- phase.Run(this->data_, scope.zone(), arg_0);
-}
-
-template <typename Phase, typename Arg0, typename Arg1>
-void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
- PipelineRunScope scope(this->data_, Phase::phase_name());
- Phase phase;
- phase.Run(this->data_, scope.zone(), arg_0, arg_1);
+ phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
}
struct GraphBuilderPhase {
@@ -1158,7 +1019,7 @@ struct GraphBuilderPhase {
}
CallFrequency frequency = CallFrequency(1.0f);
BytecodeGraphBuilder graph_builder(
- temp_zone, data->info()->shared_info(),
+ temp_zone, data->info()->bytecode_array(), data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
@@ -1171,7 +1032,7 @@ struct GraphBuilderPhase {
namespace {
Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
- Context* current = closure->context();
+ Context current = closure->context();
size_t distance = 0;
while (!current->IsNativeContext()) {
if (current->IsModuleContext()) {
@@ -1207,16 +1068,15 @@ struct InliningPhase {
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
+ data->broker(), data->common(),
data->machine(), temp_zone);
- JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
- data->js_heap_broker(),
+ JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
data->info()->is_bailout_on_uninitialized()
? JSCallReducer::kBailoutOnUninitialized
: JSCallReducer::kNoFlags,
- data->native_context(), data->dependencies());
+ data->dependencies());
JSContextSpecialization context_specialization(
- &graph_reducer, data->jsgraph(), data->js_heap_broker(),
+ &graph_reducer, data->jsgraph(), data->broker(),
ChooseSpecializationContext(isolate, data->info()),
data->info()->is_function_context_specializing()
? data->info()->closure()
@@ -1233,13 +1093,14 @@ struct InliningPhase {
// JSNativeContextSpecialization allocates out-of-heap objects
// that need to live until code generation.
JSNativeContextSpecialization native_context_specialization(
- &graph_reducer, data->jsgraph(), data->js_heap_broker(), flags,
+ &graph_reducer, data->jsgraph(), data->broker(), flags,
data->native_context(), data->dependencies(), temp_zone, info->zone());
- JSInliningHeuristic inlining(
- &graph_reducer, data->info()->is_inlining_enabled()
- ? JSInliningHeuristic::kGeneralInlining
- : JSInliningHeuristic::kRestrictedInlining,
- temp_zone, data->info(), data->jsgraph(), data->source_positions());
+ JSInliningHeuristic inlining(&graph_reducer,
+ data->info()->is_inlining_enabled()
+ ? JSInliningHeuristic::kGeneralInlining
+ : JSInliningHeuristic::kRestrictedInlining,
+ temp_zone, data->info(), data->jsgraph(),
+ data->broker(), data->source_positions());
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
@@ -1306,7 +1167,7 @@ struct SerializeStandardObjectsPhase {
static const char* phase_name() { return "serialize standard objects"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->js_heap_broker()->SerializeStandardObjects();
+ data->broker()->SerializeStandardObjects();
}
};
@@ -1316,7 +1177,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
data->jsgraph()->Dead());
- JSHeapCopyReducer heap_copy_reducer(data->js_heap_broker());
+ JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
graph_reducer.ReduceGraph();
@@ -1327,6 +1188,19 @@ struct CopyMetadataForConcurrentCompilePhase {
}
};
+// TODO(turbofan): Move all calls from CopyMetaDataForConcurrentCompilePhase
+// here. Also all the calls to Serialize* methods that are currently sprinkled
+// over inlining will move here as well.
+struct SerializationPhase {
+ static const char* phase_name() { return "serialize bytecode"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ SerializerForBackgroundCompilation serializer(data->broker(), temp_zone,
+ data->info()->closure());
+ serializer.Run();
+ }
+};
+
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -1336,26 +1210,25 @@ struct TypedLoweringPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
- data->jsgraph(), data->js_heap_broker(),
+ data->jsgraph(), data->broker(),
temp_zone);
JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
- data->js_heap_broker(), temp_zone);
+ data->broker(), temp_zone);
ConstantFoldingReducer constant_folding_reducer(
- &graph_reducer, data->jsgraph(), data->js_heap_broker());
+ &graph_reducer, data->jsgraph(), data->broker());
TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
- data->jsgraph(),
- data->js_heap_broker());
+ data->jsgraph(), data->broker());
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
- data->js_heap_broker());
+ data->broker());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
+ data->broker(), data->common(),
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &create_lowering);
AddReducer(data, &graph_reducer, &constant_folding_reducer);
- AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &typed_lowering);
+ AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -1385,9 +1258,8 @@ struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SimplifiedLowering lowering(data->jsgraph(), data->js_heap_broker(),
- temp_zone, data->source_positions(),
- data->node_origins(),
+ SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
+ data->source_positions(), data->node_origins(),
data->info()->GetPoisoningMitigationLevel());
lowering.LowerAllNodes();
}
@@ -1439,12 +1311,12 @@ struct EarlyOptimizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
- data->js_heap_broker());
+ data->broker());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- MachineOperatorReducer machine_reducer(data->jsgraph());
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
+ data->broker(), data->common(),
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
@@ -1514,9 +1386,9 @@ struct EffectControlLinearizationPhase {
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
- CommonOperatorReducer common_reducer(
- &graph_reducer, data->graph(), data->js_heap_broker(), data->common(),
- data->machine(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -1553,18 +1425,21 @@ struct LoadEliminationPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
+ data->broker(), data->common(),
data->machine(), temp_zone);
+ TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
+ data->jsgraph(), data->broker());
ConstantFoldingReducer constant_folding_reducer(
- &graph_reducer, data->jsgraph(), data->js_heap_broker());
+ &graph_reducer, data->jsgraph(), data->broker());
TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
- data->js_heap_broker());
+ data->broker());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &type_narrowing_reducer);
AddReducer(data, &graph_reducer, &constant_folding_reducer);
+ AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
@@ -1603,9 +1478,9 @@ struct LateOptimizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
- MachineOperatorReducer machine_reducer(data->jsgraph());
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->js_heap_broker(), data->common(),
+ data->broker(), data->common(),
data->machine(), temp_zone);
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
@@ -1619,6 +1494,28 @@ struct LateOptimizationPhase {
}
};
+struct CsaOptimizationPhase {
+ static const char* phase_name() { return "csa optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ data->jsgraph()->Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct EarlyGraphTrimmingPhase {
static const char* phase_name() { return "early trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
@@ -1705,7 +1602,8 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
- !data->isolate() || data->isolate()->serializer_enabled()
+ !data->isolate() || data->isolate()->serializer_enabled() ||
+ data->isolate()->ShouldLoadConstantsFromRootList()
? InstructionSelector::kDisableRootsRelativeAddressing
: InstructionSelector::kEnableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
@@ -1756,6 +1654,14 @@ struct BuildLiveRangesPhase {
}
};
+struct BuildBundlesPhase {
+ static const char* phase_name() { return "build live range bundles"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ BundleBuilder builder(data->register_allocation_data());
+ builder.BuildBundles();
+ }
+};
struct SplinterLiveRangesPhase {
static const char* phase_name() { return "splinter live ranges"; }
@@ -2005,9 +1911,21 @@ bool PipelineImpl::CreateGraph() {
data->node_origins()->AddDecorator();
}
+ if (FLAG_concurrent_inlining) {
+ data->broker()->StartSerializing();
+ Run<SerializeStandardObjectsPhase>();
+ Run<SerializationPhase>();
+ } else {
+ data->broker()->SetNativeContextRef();
+ }
+
Run<GraphBuilderPhase>();
RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
+ if (FLAG_concurrent_inlining) {
+ Run<CopyMetadataForConcurrentCompilePhase>();
+ }
+
// Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify(InliningPhase::phase_name(), true);
@@ -2031,20 +1949,16 @@ bool PipelineImpl::CreateGraph() {
// Run the type-sensitive lowerings and optimizations on the graph.
{
- if (FLAG_concurrent_compiler_frontend) {
- data->js_heap_broker()->StartSerializing();
- Run<SerializeStandardObjectsPhase>();
+ if (FLAG_concurrent_inlining) {
+ // TODO(neis): Remove CopyMetadataForConcurrentCompilePhase call once
+ // brokerization of JSNativeContextSpecialization is complete.
Run<CopyMetadataForConcurrentCompilePhase>();
- data->js_heap_broker()->StopSerializing();
+ data->broker()->StopSerializing();
} else {
- data->js_heap_broker()->SetNativeContextRef();
- // Type the graph and keep the Typer running such that new nodes get
- // automatically typed when they are created.
- Run<TyperPhase>(data->CreateTyper());
- RunPrintAndVerify(TyperPhase::phase_name());
- Run<TypedLoweringPhase>();
- RunPrintAndVerify(TypedLoweringPhase::phase_name());
- data->DeleteTyper();
+ data->broker()->StartSerializing();
+ Run<SerializeStandardObjectsPhase>();
+ Run<CopyMetadataForConcurrentCompilePhase>();
+ data->broker()->StopSerializing();
}
}
@@ -2058,15 +1972,12 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->BeginPhaseKind("lowering");
- if (FLAG_concurrent_compiler_frontend) {
- // Type the graph and keep the Typer running such that new nodes get
- // automatically typed when they are created.
- Run<TyperPhase>(data->CreateTyper());
- RunPrintAndVerify(TyperPhase::phase_name());
- Run<TypedLoweringPhase>();
- RunPrintAndVerify(TypedLoweringPhase::phase_name());
- data->DeleteTyper();
- }
+ // Type the graph and keep the Typer running such that new nodes get
+ // automatically typed when they are created.
+ Run<TyperPhase>(data->CreateTyper());
+ RunPrintAndVerify(TyperPhase::phase_name());
+ Run<TypedLoweringPhase>();
+ RunPrintAndVerify(TypedLoweringPhase::phase_name());
if (data->info()->is_loop_peeling_enabled()) {
Run<LoopPeelingPhase>();
@@ -2080,6 +1991,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<LoadEliminationPhase>();
RunPrintAndVerify(LoadEliminationPhase::phase_name());
}
+ data->DeleteTyper();
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
@@ -2156,12 +2068,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, Code::Kind kind, const char* debug_name,
- uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
+ SourcePositionTable* source_positions, Code::Kind kind,
+ const char* debug_name, int32_t builtin_index,
PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options) {
OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
info.set_builtin_index(builtin_index);
- info.set_stub_key(stub_key);
if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
info.SetPoisoningMitigationLevel(poisoning_level);
@@ -2170,8 +2081,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, schedule, nullptr,
- &node_origins, jump_opt, options);
+ JumpOptimizationInfo jump_opt;
+ bool should_optimize_jumps =
+ isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
+ PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
+ source_positions, &node_origins,
+ should_optimize_jumps ? &jump_opt : nullptr, options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
@@ -2181,7 +2096,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
}
PipelineImpl pipeline(&data);
- DCHECK_NOT_NULL(data.schedule());
if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
CodeTracer::Scope tracing_scope(data.GetCodeTracer());
@@ -2199,14 +2113,131 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("Machine");
}
- TraceSchedule(data.info(), &data, data.schedule(), "schedule");
+ // Optimize memory access and allocation operations.
+ pipeline.Run<MemoryOptimizationPhase>();
+ pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
+
+ pipeline.Run<CsaOptimizationPhase>();
+ pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
+
+ pipeline.Run<VerifyGraphPhase>(true);
+ pipeline.ComputeScheduledGraph();
+ DCHECK_NOT_NULL(data.schedule());
+
+ // First run code generation on a copy of the pipeline, in order to be able to
+ // repeat it for jump optimization. The first run has to happen on a temporary
+ // pipeline to avoid deletion of zones on the main pipeline.
+ PipelineData second_data(&zone_stats, &info, isolate, data.graph(),
+ data.schedule(), data.source_positions(),
+ data.node_origins(), data.jump_optimization_info(),
+ options);
+ second_data.set_verify_graph(FLAG_verify_csa);
+ PipelineImpl second_pipeline(&second_data);
+ second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
+
+ Handle<Code> code;
+ if (jump_opt.is_optimizable()) {
+ jump_opt.set_optimizing();
+ code = pipeline.GenerateCode(call_descriptor).ToHandleChecked();
+ } else {
+ code = second_pipeline.FinalizeCode().ToHandleChecked();
+ }
+
+ return code;
+}
+
+// static
+wasm::WasmCode* Pipeline::GenerateCodeForWasmNativeStub(
+ wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
+ MachineGraph* mcgraph, Code::Kind kind, int wasm_kind,
+ const char* debug_name, const AssemblerOptions& options,
+ wasm::NativeModule* native_module, SourcePositionTable* source_positions) {
+ Graph* graph = mcgraph->graph();
+ OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
+ // Construct a pipeline for scheduling and code generation.
+ ZoneStats zone_stats(wasm_engine->allocator());
+ NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
+ PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
+ source_positions, node_positions, options);
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
+ pipeline_statistics->BeginPhaseKind("wasm stub codegen");
+ }
+
+ PipelineImpl pipeline(&data);
+
+ if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info.GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
+
+ if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
+ << std::endl
+ << AsRPO(*graph);
+ }
+
+ if (info.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+
+ pipeline.RunPrintAndVerify("machine", true);
+ pipeline.ComputeScheduledGraph();
+
+ Linkage linkage(call_descriptor);
+ if (!pipeline.SelectInstructions(&linkage)) return nullptr;
+ pipeline.AssembleCode(&linkage);
+
+ CodeGenerator* code_generator = pipeline.code_generator();
+ CodeDesc code_desc;
+ code_generator->tasm()->GetCode(nullptr, &code_desc);
+
+ wasm::WasmCode* code = native_module->AddCode(
+ wasm::WasmCode::kAnonymousFuncIndex, code_desc,
+ code_generator->frame()->GetTotalFrameSlotCount(),
+ code_generator->GetSafepointTableOffset(),
+ code_generator->GetHandlerTableOffset(),
+ code_generator->GetProtectedInstructions(),
+ code_generator->GetSourcePositionTable(),
+ static_cast<wasm::WasmCode::Kind>(wasm_kind), wasm::WasmCode::kOther);
+
+ if (info.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info, std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+#ifdef ENABLE_DISASSEMBLER
+ std::stringstream disassembler_stream;
+ Disassembler::Decode(
+ nullptr, &disassembler_stream, code->instructions().start(),
+ code->instructions().start() + code->safepoint_table_offset(),
+ CodeReference(code));
+ for (auto const c : disassembler_stream.str()) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n]";
+ json_of << "\n}";
+ }
+
+ if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Finished compiling method " << info.GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
- pipeline.Run<VerifyGraphPhase>(false, true);
- return pipeline.GenerateCode(call_descriptor);
+ return code;
}
// static
-MaybeHandle<Code> Pipeline::GenerateCodeForWasmStub(
+MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions) {
@@ -2225,6 +2256,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForWasmStub(
PipelineImpl pipeline(&data);
+ if (info.trace_turbo_json_enabled() ||
+ info.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info.GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
+
if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
<< std::endl
@@ -2236,7 +2276,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForWasmStub(
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
}
- // TODO(rossberg): Should this really be untyped?
+
pipeline.RunPrintAndVerify("machine", true);
pipeline.ComputeScheduledGraph();
@@ -2259,7 +2299,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
- Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
+ Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
@@ -2321,17 +2361,120 @@ OptimizedCompilationJob* Pipeline::NewCompilationJob(
}
// static
-OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
+void Pipeline::GenerateCodeForWasmFunction(
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
- wasm::NativeModule* native_module, int function_index,
- wasm::ModuleOrigin asmjs_origin) {
- return new PipelineWasmCompilationJob(
- info, wasm_engine, mcgraph, call_descriptor, source_positions,
- node_origins, function_body, wasm_module, native_module, function_index,
- asmjs_origin);
+ wasm::FunctionBody function_body, const wasm::WasmModule* module,
+ int function_index) {
+ ZoneStats zone_stats(wasm_engine->allocator());
+ std::unique_ptr<PipelineStatistics> pipeline_statistics(
+ CreatePipelineStatistics(wasm_engine, function_body, module, info,
+ &zone_stats));
+ // {instruction_buffer} must live longer than {PipelineData}, since
+ // {PipelineData} will reference the {instruction_buffer} via the
+ // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
+ std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
+ wasm::WasmInstructionBuffer::New();
+ PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
+ pipeline_statistics.get(), source_positions, node_origins,
+ WasmAssemblerOptions());
+
+ PipelineImpl pipeline(&data);
+
+ if (data.info()->trace_turbo_json_enabled() ||
+ data.info()->trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << data.info()->GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
+
+ pipeline.RunPrintAndVerify("Machine", true);
+
+ data.BeginPhaseKind("wasm optimization");
+ const bool is_asm_js = module->origin == wasm::kAsmJsOrigin;
+ if (FLAG_turbo_splitting && !is_asm_js) {
+ data.info()->MarkAsSplittingEnabled();
+ }
+ if (FLAG_wasm_opt || is_asm_js) {
+ PipelineRunScope scope(&data, "wasm full optimization");
+ GraphReducer graph_reducer(scope.zone(), data.graph(),
+ data.mcgraph()->Dead());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
+ data.common(), scope.zone());
+ ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
+ const bool allow_signalling_nan = is_asm_js;
+ MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
+ allow_signalling_nan);
+ CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
+ data.broker(), data.common(),
+ data.machine(), scope.zone());
+ AddReducer(&data, &graph_reducer, &dead_code_elimination);
+ AddReducer(&data, &graph_reducer, &machine_reducer);
+ AddReducer(&data, &graph_reducer, &common_reducer);
+ AddReducer(&data, &graph_reducer, &value_numbering);
+ graph_reducer.ReduceGraph();
+ } else {
+ PipelineRunScope scope(&data, "wasm base optimization");
+ GraphReducer graph_reducer(scope.zone(), data.graph(),
+ data.mcgraph()->Dead());
+ ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
+ AddReducer(&data, &graph_reducer, &value_numbering);
+ graph_reducer.ReduceGraph();
+ }
+ pipeline.RunPrintAndVerify("wasm optimization", true);
+
+ if (data.node_origins()) {
+ data.node_origins()->RemoveDecorator();
+ }
+
+ pipeline.ComputeScheduledGraph();
+
+ Linkage linkage(call_descriptor);
+ if (!pipeline.SelectInstructions(&linkage)) return;
+ pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
+
+ auto result = base::make_unique<wasm::WasmCompilationResult>();
+ CodeGenerator* code_generator = pipeline.code_generator();
+ code_generator->tasm()->GetCode(nullptr, &result->code_desc);
+
+ result->instr_buffer = instruction_buffer->ReleaseBuffer();
+ result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
+ result->safepoint_table_offset = code_generator->GetSafepointTableOffset();
+ result->handler_table_offset = code_generator->GetHandlerTableOffset();
+ result->source_positions = code_generator->GetSourcePositionTable();
+ result->protected_instructions = code_generator->GetProtectedInstructions();
+
+ if (data.info()->trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(data.info(), std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+#ifdef ENABLE_DISASSEMBLER
+ std::stringstream disassembler_stream;
+ Disassembler::Decode(
+ nullptr, &disassembler_stream, result->code_desc.buffer,
+ result->code_desc.buffer + result->safepoint_table_offset,
+ CodeReference(&result->code_desc));
+ for (auto const c : disassembler_stream.str()) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n]";
+ json_of << "\n}";
+ }
+
+ if (data.info()->trace_turbo_json_enabled() ||
+ data.info()->trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Finished compiling method " << data.info()->GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
+
+ DCHECK(result->succeeded());
+ info->SetWasmCompilationResult(std::move(result));
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -2399,9 +2542,10 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
<< "--------------------------------------------------\n";
}
Zone temp_zone(data->allocator(), ZONE_NAME);
- MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
- data->info()->IsStub(), data->debug_name(),
- &temp_zone);
+ MachineGraphVerifier::Run(
+ data->graph(), data->schedule(), linkage,
+ data->info()->IsNotOptimizedFunctionOrWasmFunction(),
+ data->debug_name(), &temp_zone);
}
data->InitializeInstructionSequence(call_descriptor);
@@ -2450,19 +2594,12 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
AllocateRegisters(config.get(), call_descriptor, run_verifier);
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
+#ifdef V8_TARGET_ARCH_IA32
+ FATAL("Poisoning is not supported on ia32.");
+#else
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
- } else if (Builtins::IsBuiltinId(data->info()->builtin_index())) {
- // TODO(v8:6666): Extend support to user code. Ensure that
- // it is mutually exclusive with the Poisoning configuration above; and that
- // it cooperates with restricted allocatable registers above.
- static_assert(kRootRegister == kSpeculationPoisonRegister,
- "The following checks assume root equals poison register");
- CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
- AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
- call_descriptor, run_verifier);
-#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+#endif // V8_TARGET_ARCH_IA32
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2532,10 +2669,12 @@ std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
return out;
}
-void PipelineImpl::AssembleCode(Linkage* linkage) {
+void PipelineImpl::AssembleCode(Linkage* linkage,
+ std::unique_ptr<AssemblerBuffer> buffer) {
PipelineData* data = this->data_;
data->BeginPhaseKind("code generation");
- data->InitializeCodeGenerator(linkage);
+ data->InitializeCodeGenerator(linkage, std::move(buffer));
+
Run<AssembleCodePhase>();
if (data->info()->trace_turbo_json_enabled()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
@@ -2566,8 +2705,8 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
MaybeHandle<Code> PipelineImpl::FinalizeCode() {
PipelineData* data = this->data_;
- if (data->js_heap_broker() && FLAG_concurrent_compiler_frontend) {
- data->js_heap_broker()->Retire();
+ if (data->broker()) {
+ data->broker()->Retire();
}
Run<FinalizeCodePhase>();
@@ -2619,14 +2758,21 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode() {
return code;
}
-MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
+bool PipelineImpl::SelectInstructionsAndAssemble(
+ CallDescriptor* call_descriptor) {
Linkage linkage(call_descriptor);
// Perform instruction selection and register allocation.
- if (!SelectInstructions(&linkage)) return MaybeHandle<Code>();
+ if (!SelectInstructions(&linkage)) return false;
// Generate the final machine code.
AssembleCode(&linkage);
+ return true;
+}
+
+MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
+ if (!SelectInstructionsAndAssemble(call_descriptor))
+ return MaybeHandle<Code>();
return FinalizeCode();
}
@@ -2638,13 +2784,12 @@ bool PipelineImpl::CommitDependencies(Handle<Code> code) {
namespace {
void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
- const RegisterConfiguration* config,
const char* phase_name) {
if (info->trace_turbo_json_enabled()) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\",";
- json_of << InstructionSequenceAsJSON{config, data->sequence()};
+ json_of << InstructionSequenceAsJSON{data->sequence()};
json_of << "},\n";
}
if (info->trace_turbo_graph_enabled()) {
@@ -2652,7 +2797,7 @@ void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
CodeTracer::Scope tracing_scope(data->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "----- Instruction sequence " << phase_name << " -----\n"
- << PrintableInstructionSequence({config, data->sequence()});
+ << *data->sequence();
}
}
@@ -2683,15 +2828,29 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
Run<BuildLiveRangesPhase>();
- TraceSequence(info(), data, config, "before register allocation");
+ Run<BuildBundlesPhase>();
+
+ TraceSequence(info(), data, "before register allocation");
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
CHECK(data->register_allocation_data()
->RangesDefinedInDeferredStayInDeferred());
}
+ if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ TurboCfgFile tcf(isolate());
+ tcf << AsC1VRegisterAllocationData("PreAllocation",
+ data->register_allocation_data());
+ }
+
if (FLAG_turbo_preprocess_ranges) {
Run<SplinterLiveRangesPhase>();
+ if (info()->trace_turbo_json_enabled() &&
+ !data->MayHaveUnverifiableGraph()) {
+ TurboCfgFile tcf(isolate());
+ tcf << AsC1VRegisterAllocationData("PostSplinter",
+ data->register_allocation_data());
+ }
}
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
@@ -2724,7 +2883,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<LocateSpillSlotsPhase>();
- TraceSequence(info(), data, config, "after register allocation");
+ TraceSequence(info(), data, "after register allocation");
if (verifier != nullptr) {
verifier->VerifyAssignment("End of regalloc pipeline.");
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index a86efe840b..8c56911023 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -18,12 +18,11 @@ struct AssemblerOptions;
class OptimizedCompilationInfo;
class OptimizedCompilationJob;
class RegisterConfiguration;
-class JumpOptimizationInfo;
namespace wasm {
-enum ModuleOrigin : uint8_t;
struct FunctionBody;
class NativeModule;
+class WasmCode;
class WasmEngine;
struct WasmModule;
} // namespace wasm
@@ -45,28 +44,34 @@ class Pipeline : public AllStatic {
Handle<JSFunction> function,
bool has_script);
- // Returns a new compilation job for the WebAssembly compilation info.
- static OptimizedCompilationJob* NewWasmCompilationJob(
+ // Run the pipeline for the WebAssembly compilation info.
+ static void GenerateCodeForWasmFunction(
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
- wasm::NativeModule* native_module, int function_index,
- wasm::ModuleOrigin wasm_origin);
+ wasm::FunctionBody function_body, const wasm::WasmModule* module,
+ int function_index);
// Run the pipeline on a machine graph and generate code.
- static MaybeHandle<Code> GenerateCodeForWasmStub(
+ static wasm::WasmCode* GenerateCodeForWasmNativeStub(
+ wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
+ MachineGraph* mcgraph, Code::Kind kind, int wasm_kind,
+ const char* debug_name, const AssemblerOptions& assembler_options,
+ wasm::NativeModule* native_module,
+ SourcePositionTable* source_positions = nullptr);
+
+ // Run the pipeline on a machine graph and generate code.
+ static MaybeHandle<Code> GenerateCodeForWasmHeapStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Code::Kind kind, const char* debug_name,
const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
- // Run the pipeline on a machine graph and generate code. The {schedule} must
- // be valid, hence the given {graph} does not need to be schedulable.
+ // Run the pipeline on a machine graph and generate code.
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, Code::Kind kind, const char* debug_name,
- uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
+ SourcePositionTable* source_positions, Code::Kind kind,
+ const char* debug_name, int32_t builtin_index,
PoisoningMitigationLevel poisoning_level,
const AssemblerOptions& options);
diff --git a/deps/v8/src/compiler/ppc/OWNERS b/deps/v8/src/compiler/ppc/OWNERS
deleted file mode 100644
index cf60da5cc7..0000000000
--- a/deps/v8/src/compiler/ppc/OWNERS
+++ /dev/null
@@ -1,7 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
deleted file mode 100644
index e189a18543..0000000000
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
-#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// PPC-specific opcodes that specify which assembly sequence to emit.
-// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(PPC_And) \
- V(PPC_AndComplement) \
- V(PPC_Or) \
- V(PPC_OrComplement) \
- V(PPC_Xor) \
- V(PPC_ShiftLeft32) \
- V(PPC_ShiftLeft64) \
- V(PPC_ShiftLeftPair) \
- V(PPC_ShiftRight32) \
- V(PPC_ShiftRight64) \
- V(PPC_ShiftRightPair) \
- V(PPC_ShiftRightAlg32) \
- V(PPC_ShiftRightAlg64) \
- V(PPC_ShiftRightAlgPair) \
- V(PPC_RotRight32) \
- V(PPC_RotRight64) \
- V(PPC_Not) \
- V(PPC_RotLeftAndMask32) \
- V(PPC_RotLeftAndClear64) \
- V(PPC_RotLeftAndClearLeft64) \
- V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add32) \
- V(PPC_Add64) \
- V(PPC_AddWithOverflow32) \
- V(PPC_AddPair) \
- V(PPC_AddDouble) \
- V(PPC_Sub) \
- V(PPC_SubWithOverflow32) \
- V(PPC_SubPair) \
- V(PPC_SubDouble) \
- V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
- V(PPC_Mul64) \
- V(PPC_MulHigh32) \
- V(PPC_MulHighU32) \
- V(PPC_MulPair) \
- V(PPC_MulDouble) \
- V(PPC_Div32) \
- V(PPC_Div64) \
- V(PPC_DivU32) \
- V(PPC_DivU64) \
- V(PPC_DivDouble) \
- V(PPC_Mod32) \
- V(PPC_Mod64) \
- V(PPC_ModU32) \
- V(PPC_ModU64) \
- V(PPC_ModDouble) \
- V(PPC_Neg) \
- V(PPC_NegDouble) \
- V(PPC_SqrtDouble) \
- V(PPC_FloorDouble) \
- V(PPC_CeilDouble) \
- V(PPC_TruncateDouble) \
- V(PPC_RoundDouble) \
- V(PPC_MaxDouble) \
- V(PPC_MinDouble) \
- V(PPC_AbsDouble) \
- V(PPC_Cntlz32) \
- V(PPC_Cntlz64) \
- V(PPC_Popcnt32) \
- V(PPC_Popcnt64) \
- V(PPC_Cmp32) \
- V(PPC_Cmp64) \
- V(PPC_CmpDouble) \
- V(PPC_Tst32) \
- V(PPC_Tst64) \
- V(PPC_Push) \
- V(PPC_PushFrame) \
- V(PPC_StoreToStackSlot) \
- V(PPC_ExtendSignWord8) \
- V(PPC_ExtendSignWord16) \
- V(PPC_ExtendSignWord32) \
- V(PPC_Uint32ToUint64) \
- V(PPC_Int64ToInt32) \
- V(PPC_Int64ToFloat32) \
- V(PPC_Int64ToDouble) \
- V(PPC_Uint64ToFloat32) \
- V(PPC_Uint64ToDouble) \
- V(PPC_Int32ToFloat32) \
- V(PPC_Int32ToDouble) \
- V(PPC_Uint32ToFloat32) \
- V(PPC_Uint32ToDouble) \
- V(PPC_Float32ToDouble) \
- V(PPC_Float64SilenceNaN) \
- V(PPC_DoubleToInt32) \
- V(PPC_DoubleToUint32) \
- V(PPC_DoubleToInt64) \
- V(PPC_DoubleToUint64) \
- V(PPC_DoubleToFloat32) \
- V(PPC_DoubleExtractLowWord32) \
- V(PPC_DoubleExtractHighWord32) \
- V(PPC_DoubleInsertLowWord32) \
- V(PPC_DoubleInsertHighWord32) \
- V(PPC_DoubleConstruct) \
- V(PPC_BitcastInt32ToFloat32) \
- V(PPC_BitcastFloat32ToInt32) \
- V(PPC_BitcastInt64ToDouble) \
- V(PPC_BitcastDoubleToInt64) \
- V(PPC_LoadWordS8) \
- V(PPC_LoadWordU8) \
- V(PPC_LoadWordS16) \
- V(PPC_LoadWordU16) \
- V(PPC_LoadWordS32) \
- V(PPC_LoadWordU32) \
- V(PPC_LoadWord64) \
- V(PPC_LoadFloat32) \
- V(PPC_LoadDouble) \
- V(PPC_StoreWord8) \
- V(PPC_StoreWord16) \
- V(PPC_StoreWord32) \
- V(PPC_StoreWord64) \
- V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_ByteRev32) \
- V(PPC_ByteRev64) \
- V(PPC_Word64AtomicStoreUint8) \
- V(PPC_Word64AtomicStoreUint16) \
- V(PPC_Word64AtomicStoreUint32) \
- V(PPC_Word64AtomicStoreUint64) \
- V(PPC_Word64AtomicLoadUint8) \
- V(PPC_Word64AtomicLoadUint16) \
- V(PPC_Word64AtomicLoadUint32) \
- V(PPC_Word64AtomicLoadUint64) \
- V(PPC_Word64AtomicExchangeUint8) \
- V(PPC_Word64AtomicExchangeUint16) \
- V(PPC_Word64AtomicExchangeUint32) \
- V(PPC_Word64AtomicExchangeUint64) \
- V(PPC_Word64AtomicCompareExchangeUint8) \
- V(PPC_Word64AtomicCompareExchangeUint16) \
- V(PPC_Word64AtomicCompareExchangeUint32) \
- V(PPC_Word64AtomicCompareExchangeUint64) \
- V(PPC_Word64AtomicAddUint8) \
- V(PPC_Word64AtomicAddUint16) \
- V(PPC_Word64AtomicAddUint32) \
- V(PPC_Word64AtomicAddUint64) \
- V(PPC_Word64AtomicSubUint8) \
- V(PPC_Word64AtomicSubUint16) \
- V(PPC_Word64AtomicSubUint32) \
- V(PPC_Word64AtomicSubUint64) \
- V(PPC_Word64AtomicAndUint8) \
- V(PPC_Word64AtomicAndUint16) \
- V(PPC_Word64AtomicAndUint32) \
- V(PPC_Word64AtomicAndUint64) \
- V(PPC_Word64AtomicOrUint8) \
- V(PPC_Word64AtomicOrUint16) \
- V(PPC_Word64AtomicOrUint32) \
- V(PPC_Word64AtomicOrUint64) \
- V(PPC_Word64AtomicXorUint8) \
- V(PPC_Word64AtomicXorUint16) \
- V(PPC_Word64AtomicXorUint32) \
- V(PPC_Word64AtomicXorUint64)
-
-// Addressing modes represent the "shape" of inputs to an instruction.
-// Many instructions support multiple addressing modes. Addressing modes
-// are encoded into the InstructionCode of the instruction and tell the
-// code generator after register allocation which assembler method to call.
-//
-// We use the following local notation for addressing modes:
-//
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MRI = [register + immediate]
-// MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 31950f32f2..ee06867cd1 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -11,6 +11,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/lookup.h"
+#include "src/objects/heap-number.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
@@ -82,27 +83,37 @@ bool NeedsCheckHeapObject(Node* receiver) {
switch (receiver->opcode()) {
case IrOpcode::kConvertReceiver:
case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCloneObject:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructForwardVarargs:
+ case IrOpcode::kJSConstructWithArrayLike:
+ case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSCreate:
case IrOpcode::kJSCreateArguments:
case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateArrayFromIterable:
+ case IrOpcode::kJSCreateArrayIterator:
+ case IrOpcode::kJSCreateAsyncFunctionObject:
+ case IrOpcode::kJSCreateBoundFunction:
case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateCollectionIterator:
+ case IrOpcode::kJSCreateEmptyLiteralArray:
+ case IrOpcode::kJSCreateEmptyLiteralObject:
+ case IrOpcode::kJSCreateGeneratorObject:
case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateKeyValueArray:
case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateEmptyLiteralArray:
- case IrOpcode::kJSCreateArrayFromIterable:
case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSCreateGeneratorObject:
- case IrOpcode::kJSConstructForwardVarargs:
- case IrOpcode::kJSConstruct:
- case IrOpcode::kJSConstructWithArrayLike:
- case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSCreateObject:
+ case IrOpcode::kJSCreatePromise:
+ case IrOpcode::kJSCreateStringIterator:
+ case IrOpcode::kJSCreateTypedArray:
+ case IrOpcode::kJSGetSuperConstructor:
case IrOpcode::kJSToName:
- case IrOpcode::kJSToString:
case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
case IrOpcode::kTypeOf:
- case IrOpcode::kJSGetSuperConstructor:
return false;
case IrOpcode::kPhi: {
Node* control = NodeProperties::GetControlInput(receiver);
@@ -137,8 +148,7 @@ void PropertyAccessBuilder::BuildCheckMaps(
if (receiver_map->is_stable()) {
for (Handle<Map> map : receiver_maps) {
if (map.is_identical_to(receiver_map)) {
- dependencies()->DependOnStableMap(
- MapRef(js_heap_broker(), receiver_map));
+ dependencies()->DependOnStableMap(MapRef(broker(), receiver_map));
return;
}
}
@@ -207,7 +217,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
// the field.
DCHECK(access_info.IsDataConstantField());
DCHECK(!it.is_dictionary_holder());
- MapRef map(js_heap_broker(),
+ MapRef map(broker(),
handle(it.GetHolder<HeapObject>()->map(), isolate()));
map.SerializeOwnDescriptors(); // TODO(neis): Remove later.
dependencies()->DependOnFieldType(map, it.GetFieldDescriptorIndex());
@@ -270,7 +280,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
if (field_map->is_stable()) {
- dependencies()->DependOnStableMap(MapRef(js_heap_broker(), field_map));
+ dependencies()->DependOnStableMap(MapRef(broker(), field_map));
field_access.map = field_map;
}
}
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 6a073be65d..20345ed7d3 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -26,11 +26,9 @@ class SimplifiedOperatorBuilder;
class PropertyAccessBuilder {
public:
- PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
+ PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* broker,
CompilationDependencies* dependencies)
- : jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
- dependencies_(dependencies) {}
+ : jsgraph_(jsgraph), broker_(broker), dependencies_(dependencies) {}
// Builds the appropriate string check if the maps are only string
// maps.
@@ -54,7 +52,7 @@ class PropertyAccessBuilder {
private:
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Graph* graph() const;
Isolate* isolate() const;
@@ -69,7 +67,7 @@ class PropertyAccessBuilder {
Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
JSGraph* jsgraph_;
- JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* broker_;
CompilationDependencies* dependencies_;
};
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 9e7bc9a611..73a7701926 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -4,6 +4,7 @@
#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
@@ -21,8 +22,10 @@ RawMachineAssembler::RawMachineAssembler(
: isolate_(isolate),
graph_(graph),
schedule_(new (zone()) Schedule(zone())),
+ source_positions_(new (zone()) SourcePositionTable(graph)),
machine_(zone(), word, flags, alignment_requirements),
common_(zone()),
+ simplified_(zone()),
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
@@ -40,6 +43,14 @@ RawMachineAssembler::RawMachineAssembler(
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
graph->SetEnd(graph->NewNode(common_.End(0)));
+ source_positions_->AddDecorator();
+}
+
+void RawMachineAssembler::SetSourcePosition(const char* file, int line) {
+ int file_id = isolate()->LookupOrAddExternallyCompiledFilename(file);
+ SourcePosition p = SourcePosition::External(line, file_id);
+ DCHECK(p.ExternalLine() == line);
+ source_positions()->SetCurrentPosition(p);
}
Node* RawMachineAssembler::NullConstant() {
@@ -52,11 +63,16 @@ Node* RawMachineAssembler::UndefinedConstant() {
Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
RelocInfo::Mode rmode) {
- return kPointerSize == 8
+ return kSystemPointerSize == 8
? RelocatableInt64Constant(value, rmode)
: RelocatableInt32Constant(static_cast<int>(value), rmode);
}
+Node* RawMachineAssembler::OptimizedAllocate(Node* size,
+ PretenureFlag pretenure) {
+ return AddNode(simplified()->AllocateRaw(Type::Any(), pretenure), size);
+}
+
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
@@ -72,18 +88,337 @@ Schedule* RawMachineAssembler::Export() {
StdoutStream{} << *schedule_;
}
// Invalidate RawMachineAssembler.
+ source_positions_->RemoveDecorator();
Schedule* schedule = schedule_;
schedule_ = nullptr;
return schedule;
}
+Graph* RawMachineAssembler::ExportForOptimization() {
+ // Compute the correct codegen order.
+ DCHECK(schedule_->rpo_order()->empty());
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("--- RAW SCHEDULE -------------------------------------------\n");
+ StdoutStream{} << *schedule_;
+ }
+ schedule_->EnsureCFGWellFormedness();
+ Scheduler::ComputeSpecialRPO(zone(), schedule_);
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("--- SCHEDULE BEFORE GRAPH CREATION -------------------------\n");
+ StdoutStream{} << *schedule_;
+ }
+ MakeReschedulable();
+ // Invalidate RawMachineAssembler.
+ schedule_ = nullptr;
+ return graph();
+}
+
+void RawMachineAssembler::MakeReschedulable() {
+ std::vector<Node*> block_final_control(schedule_->all_blocks_.size());
+ std::vector<Node*> block_final_effect(schedule_->all_blocks_.size());
+
+ struct LoopHeader {
+ BasicBlock* block;
+ Node* loop_node;
+ Node* effect_phi;
+ };
+ std::vector<LoopHeader> loop_headers;
+
+ // These are hoisted outside of the loop to avoid re-allocation.
+ std::vector<Node*> merge_inputs;
+ std::vector<Node*> effect_phi_inputs;
+
+ for (BasicBlock* block : *schedule_->rpo_order()) {
+ Node* current_control;
+ Node* current_effect;
+ if (block == schedule_->start()) {
+ current_control = current_effect = graph()->start();
+ } else if (block == schedule_->end()) {
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ NodeProperties::MergeControlToEnd(
+ graph(), common(), block->PredecessorAt(i)->control_input());
+ }
+ } else if (block->IsLoopHeader()) {
+ // The graph()->start() inputs are just placeholders until we computed the
+ // real back-edges and re-structure the control flow so the loop has
+ // exactly two predecessors.
+ current_control = graph()->NewNode(common()->Loop(2), graph()->start(),
+ graph()->start());
+ current_effect =
+ graph()->NewNode(common()->EffectPhi(2), graph()->start(),
+ graph()->start(), current_control);
+
+ Node* terminate = graph()->NewNode(common()->Terminate(), current_effect,
+ current_control);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ loop_headers.push_back(
+ LoopHeader{block, current_control, current_effect});
+ } else if (block->PredecessorCount() == 1) {
+ BasicBlock* predecessor = block->PredecessorAt(0);
+ DCHECK_LT(predecessor->rpo_number(), block->rpo_number());
+ current_effect = block_final_effect[predecessor->id().ToSize()];
+ current_control = block_final_control[predecessor->id().ToSize()];
+ } else {
+ // Create control merge nodes and effect phis for all predecessor blocks.
+ merge_inputs.clear();
+ effect_phi_inputs.clear();
+ int predecessor_count = static_cast<int>(block->PredecessorCount());
+ for (int i = 0; i < predecessor_count; ++i) {
+ BasicBlock* predecessor = block->PredecessorAt(i);
+ DCHECK_LT(predecessor->rpo_number(), block->rpo_number());
+ merge_inputs.push_back(block_final_control[predecessor->id().ToSize()]);
+ effect_phi_inputs.push_back(
+ block_final_effect[predecessor->id().ToSize()]);
+ }
+ current_control = graph()->NewNode(common()->Merge(predecessor_count),
+ static_cast<int>(merge_inputs.size()),
+ merge_inputs.data());
+ effect_phi_inputs.push_back(current_control);
+ current_effect = graph()->NewNode(
+ common()->EffectPhi(predecessor_count),
+ static_cast<int>(effect_phi_inputs.size()), effect_phi_inputs.data());
+ }
+
+ auto update_current_control_and_effect = [&](Node* node) {
+ bool existing_effect_and_control =
+ IrOpcode::IsIfProjectionOpcode(node->opcode()) ||
+ IrOpcode::IsPhiOpcode(node->opcode());
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ if (existing_effect_and_control) {
+ NodeProperties::ReplaceEffectInput(node, current_effect);
+ } else {
+ node->AppendInput(graph()->zone(), current_effect);
+ }
+ }
+ if (node->op()->ControlInputCount() > 0) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ if (existing_effect_and_control) {
+ NodeProperties::ReplaceControlInput(node, current_control);
+ } else {
+ node->AppendInput(graph()->zone(), current_control);
+ }
+ }
+ if (node->op()->EffectOutputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ current_effect = node;
+ }
+ if (node->op()->ControlOutputCount() > 0) {
+ current_control = node;
+ }
+ };
+
+ for (Node* node : *block) {
+ update_current_control_and_effect(node);
+ }
+ if (block->deferred()) MarkControlDeferred(current_control);
+
+ if (Node* block_terminator = block->control_input()) {
+ update_current_control_and_effect(block_terminator);
+ }
+
+ block_final_effect[block->id().ToSize()] = current_effect;
+ block_final_control[block->id().ToSize()] = current_control;
+ }
+
+ // Fix-up loop backedges and re-structure control flow so that loop nodes have
+ // exactly two control predecessors.
+ for (const LoopHeader& loop_header : loop_headers) {
+ BasicBlock* block = loop_header.block;
+ std::vector<BasicBlock*> loop_entries;
+ std::vector<BasicBlock*> loop_backedges;
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ BasicBlock* predecessor = block->PredecessorAt(i);
+ if (block->LoopContains(predecessor)) {
+ loop_backedges.push_back(predecessor);
+ } else {
+ DCHECK(loop_backedges.empty());
+ loop_entries.push_back(predecessor);
+ }
+ }
+ DCHECK(!loop_entries.empty());
+ DCHECK(!loop_backedges.empty());
+
+ int entrance_count = static_cast<int>(loop_entries.size());
+ int backedge_count = static_cast<int>(loop_backedges.size());
+ Node* control_loop_entry = CreateNodeFromPredecessors(
+ loop_entries, block_final_control, common()->Merge(entrance_count), {});
+ Node* control_backedge =
+ CreateNodeFromPredecessors(loop_backedges, block_final_control,
+ common()->Merge(backedge_count), {});
+ Node* effect_loop_entry = CreateNodeFromPredecessors(
+ loop_entries, block_final_effect, common()->EffectPhi(entrance_count),
+ {control_loop_entry});
+ Node* effect_backedge = CreateNodeFromPredecessors(
+ loop_backedges, block_final_effect, common()->EffectPhi(backedge_count),
+ {control_backedge});
+
+ loop_header.loop_node->ReplaceInput(0, control_loop_entry);
+ loop_header.loop_node->ReplaceInput(1, control_backedge);
+ loop_header.effect_phi->ReplaceInput(0, effect_loop_entry);
+ loop_header.effect_phi->ReplaceInput(1, effect_backedge);
+
+ for (Node* node : *block) {
+ if (node->opcode() == IrOpcode::kPhi) {
+ MakePhiBinary(node, static_cast<int>(loop_entries.size()),
+ control_loop_entry, control_backedge);
+ }
+ }
+ }
+}
+
+Node* RawMachineAssembler::CreateNodeFromPredecessors(
+ const std::vector<BasicBlock*>& predecessors,
+ const std::vector<Node*>& sidetable, const Operator* op,
+ const std::vector<Node*>& additional_inputs) {
+ if (predecessors.size() == 1) {
+ return sidetable[predecessors.front()->id().ToSize()];
+ }
+ std::vector<Node*> inputs;
+ for (BasicBlock* predecessor : predecessors) {
+ inputs.push_back(sidetable[predecessor->id().ToSize()]);
+ }
+ for (Node* additional_input : additional_inputs) {
+ inputs.push_back(additional_input);
+ }
+ return graph()->NewNode(op, static_cast<int>(inputs.size()), inputs.data());
+}
+
+void RawMachineAssembler::MakePhiBinary(Node* phi, int split_point,
+ Node* left_control,
+ Node* right_control) {
+ int value_count = phi->op()->ValueInputCount();
+ if (value_count == 2) return;
+ DCHECK_LT(split_point, value_count);
+ DCHECK_GT(split_point, 0);
+
+ MachineRepresentation rep = PhiRepresentationOf(phi->op());
+ int left_input_count = split_point;
+ int right_input_count = value_count - split_point;
+
+ Node* left_input;
+ if (left_input_count == 1) {
+ left_input = NodeProperties::GetValueInput(phi, 0);
+ } else {
+ std::vector<Node*> inputs;
+ for (int i = 0; i < left_input_count; ++i) {
+ inputs.push_back(NodeProperties::GetValueInput(phi, i));
+ }
+ inputs.push_back(left_control);
+ left_input =
+ graph()->NewNode(common()->Phi(rep, static_cast<int>(left_input_count)),
+ static_cast<int>(inputs.size()), inputs.data());
+ }
+
+ Node* right_input;
+ if (right_input_count == 1) {
+ right_input = NodeProperties::GetValueInput(phi, split_point);
+ } else {
+ std::vector<Node*> inputs;
+ for (int i = split_point; i < value_count; ++i) {
+ inputs.push_back(NodeProperties::GetValueInput(phi, i));
+ }
+ inputs.push_back(right_control);
+ right_input = graph()->NewNode(
+ common()->Phi(rep, static_cast<int>(right_input_count)),
+ static_cast<int>(inputs.size()), inputs.data());
+ }
+
+ Node* control = NodeProperties::GetControlInput(phi);
+ phi->TrimInputCount(3);
+ phi->ReplaceInput(0, left_input);
+ phi->ReplaceInput(1, right_input);
+ phi->ReplaceInput(2, control);
+ NodeProperties::ChangeOp(phi, common()->Phi(rep, 2));
+}
+
+void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
+ BranchHint new_branch_hint;
+ Node* responsible_branch = nullptr;
+ while (responsible_branch == nullptr) {
+ switch (control_node->opcode()) {
+ case IrOpcode::kIfException:
+ // IfException projections are deferred by default.
+ return;
+ case IrOpcode::kIfSuccess:
+ control_node = NodeProperties::GetControlInput(control_node);
+ continue;
+ case IrOpcode::kIfValue: {
+ IfValueParameters parameters = IfValueParametersOf(control_node->op());
+ if (parameters.hint() != BranchHint::kFalse) {
+ NodeProperties::ChangeOp(
+ control_node, common()->IfValue(parameters.value(),
+ parameters.comparison_order(),
+ BranchHint::kFalse));
+ }
+ return;
+ }
+ case IrOpcode::kIfDefault:
+ if (BranchHintOf(control_node->op()) != BranchHint::kFalse) {
+ NodeProperties::ChangeOp(control_node,
+ common()->IfDefault(BranchHint::kFalse));
+ }
+ return;
+ case IrOpcode::kIfTrue: {
+ Node* branch = NodeProperties::GetControlInput(control_node);
+ BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ if (hint == BranchHint::kTrue) {
+ // The other possibility is also deferred, so the responsible branch
+ // has to be before.
+ control_node = NodeProperties::GetControlInput(branch);
+ continue;
+ }
+ new_branch_hint = BranchHint::kFalse;
+ responsible_branch = branch;
+ break;
+ }
+ case IrOpcode::kIfFalse: {
+ Node* branch = NodeProperties::GetControlInput(control_node);
+ BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ if (hint == BranchHint::kFalse) {
+ // The other possibility is also deferred, so the responsible branch
+ // has to be before.
+ control_node = NodeProperties::GetControlInput(branch);
+ continue;
+ }
+ new_branch_hint = BranchHint::kTrue;
+ responsible_branch = branch;
+ break;
+ }
+ case IrOpcode::kMerge:
+ for (int i = 0; i < control_node->op()->ControlInputCount(); ++i) {
+ MarkControlDeferred(NodeProperties::GetControlInput(control_node, i));
+ }
+ return;
+ case IrOpcode::kLoop:
+ control_node = NodeProperties::GetControlInput(control_node, 0);
+ continue;
+ case IrOpcode::kBranch:
+ case IrOpcode::kSwitch:
+ UNREACHABLE();
+ case IrOpcode::kStart:
+ return;
+ default:
+ DCHECK_EQ(1, control_node->op()->ControlInputCount());
+ control_node = NodeProperties::GetControlInput(control_node);
+ continue;
+ }
+ }
+
+ BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
+ if (info.hint == new_branch_hint) return;
+ NodeProperties::ChangeOp(
+ responsible_branch,
+ common()->Branch(new_branch_hint, info.is_safety_check));
+}
+
Node* RawMachineAssembler::TargetParameter() {
DCHECK_NOT_NULL(target_parameter_);
return target_parameter_;
}
Node* RawMachineAssembler::Parameter(size_t index) {
- DCHECK(index < parameter_count());
+ DCHECK_LT(index, parameter_count());
return parameters_[index];
}
@@ -101,7 +436,16 @@ void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
Node* branch = MakeNode(
common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
&condition);
- schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
+ BasicBlock* true_block = schedule()->NewBasicBlock();
+ BasicBlock* false_block = schedule()->NewBasicBlock();
+ schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
+
+ true_block->AddNode(MakeNode(common()->IfTrue(), 1, &branch));
+ schedule()->AddGoto(true_block, Use(true_val));
+
+ false_block->AddNode(MakeNode(common()->IfFalse(), 1, &branch));
+ schedule()->AddGoto(false_block, Use(false_val));
+
current_block_ = nullptr;
}
@@ -119,7 +463,7 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
size_t succ_count = case_count + 1;
- Node* switch_node = AddNode(common()->Switch(succ_count), index);
+ Node* switch_node = MakeNode(common()->Switch(succ_count), 1, &index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
for (size_t index = 0; index < case_count; ++index) {
int32_t case_value = case_values[index];
@@ -220,8 +564,11 @@ void RawMachineAssembler::Unreachable() {
current_block_ = nullptr;
}
-void RawMachineAssembler::Comment(const char* msg) {
- AddNode(machine()->Comment(msg));
+void RawMachineAssembler::Comment(std::string msg) {
+ size_t length = msg.length() + 1;
+ char* zone_buffer = zone()->NewArray<char>(length);
+ MemCopy(zone_buffer, msg.c_str(), length);
+ AddNode(machine()->Comment(zone_buffer));
}
Node* RawMachineAssembler::CallN(CallDescriptor* call_descriptor,
@@ -478,14 +825,14 @@ void RawMachineAssembler::PrintCurrentBlock(std::ostream& os) {
os << CurrentBlock();
}
-bool RawMachineAssembler::InsideBlock() { return current_block_ != nullptr; }
-
void RawMachineAssembler::SetInitialDebugInformation(
AssemblerDebugInfo debug_info) {
CurrentBlock()->set_debug_info(debug_info);
}
#endif // DEBUG
+bool RawMachineAssembler::InsideBlock() { return current_block_ != nullptr; }
+
BasicBlock* RawMachineAssembler::CurrentBlock() {
DCHECK(current_block_);
return current_block_;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 9b4806a3a6..11aefe570c 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -6,14 +6,17 @@
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#include "src/assembler.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
+#include "src/compiler/simplified-operator.h"
#include "src/globals.h"
#include "src/heap/factory.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -22,7 +25,7 @@ namespace compiler {
class BasicBlock;
class RawMachineLabel;
class Schedule;
-
+class SourcePositionTable;
// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
// into a graph and also placed into a schedule immediately, hence subsequent
@@ -54,12 +57,17 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
Schedule* Export();
+ // Finalizes the schedule and transforms it into a graph that's suitable for
+ // it to be used for Turbofan optimization and re-scheduling. Note that this
+ // RawMachineAssembler becomes invalid after export.
+ Graph* ExportForOptimization();
// ===========================================================================
// The following utility methods create new nodes with specific operators and
@@ -75,8 +83,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* IntPtrConstant(intptr_t value) {
// TODO(dcarney): mark generated code as unserializable if value != 0.
- return kPointerSize == 8 ? Int64Constant(value)
- : Int32Constant(static_cast<int>(value));
+ return kSystemPointerSize == 8 ? Int64Constant(value)
+ : Int32Constant(static_cast<int>(value));
}
Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
Node* Int32Constant(int32_t value) {
@@ -100,10 +108,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* HeapConstant(Handle<HeapObject> object) {
return AddNode(common()->HeapConstant(object));
}
- Node* BooleanConstant(bool value) {
- Handle<Object> object = isolate()->factory()->ToBoolean(value);
- return HeapConstant(Handle<HeapObject>::cast(object));
- }
Node* ExternalConstant(ExternalReference address) {
return AddNode(common()->ExternalConstant(address));
}
@@ -142,8 +146,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
base, index, value);
}
+ void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
+ Node* value, WriteBarrierKind write_barrier) {
+ AddNode(simplified()->StoreField(FieldAccess(
+ BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), Type::Any(),
+ MachineType::TypeForRepresentation(rep), write_barrier)),
+ object, value);
+ }
+ void OptimizedStoreMap(Node* object, Node* value) {
+ AddNode(simplified()->StoreField(AccessBuilder::ForMap()), object, value);
+ }
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
+ Node* OptimizedAllocate(Node* size, PretenureFlag pretenure);
+
// Unaligned memory operations
Node* UnalignedLoad(MachineType type, Node* base) {
return UnalignedLoad(type, base, IntPtrConstant(0));
@@ -473,10 +490,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
}
-#define INTPTR_BINOP(prefix, name) \
- Node* IntPtr##name(Node* a, Node* b) { \
- return kPointerSize == 8 ? prefix##64##name(a, b) \
- : prefix##32##name(a, b); \
+#define INTPTR_BINOP(prefix, name) \
+ Node* IntPtr##name(Node* a, Node* b) { \
+ return kSystemPointerSize == 8 ? prefix##64##name(a, b) \
+ : prefix##32##name(a, b); \
}
INTPTR_BINOP(Int, Add);
@@ -494,10 +511,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#undef INTPTR_BINOP
-#define UINTPTR_BINOP(prefix, name) \
- Node* UintPtr##name(Node* a, Node* b) { \
- return kPointerSize == 8 ? prefix##64##name(a, b) \
- : prefix##32##name(a, b); \
+#define UINTPTR_BINOP(prefix, name) \
+ Node* UintPtr##name(Node* a, Node* b) { \
+ return kSystemPointerSize == 8 ? prefix##64##name(a, b) \
+ : prefix##32##name(a, b); \
}
UINTPTR_BINOP(Uint, LessThan);
@@ -516,8 +533,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* IntPtrAbsWithOverflow(Node* a) {
- return kPointerSize == 8 ? Int64AbsWithOverflow(a)
- : Int32AbsWithOverflow(a);
+ return kSystemPointerSize == 8 ? Int64AbsWithOverflow(a)
+ : Int32AbsWithOverflow(a);
}
Node* Float32Add(Node* a, Node* b) {
@@ -625,28 +642,16 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Conversions.
Node* BitcastTaggedToWord(Node* a) {
-#ifdef ENABLE_VERIFY_CSA
- return AddNode(machine()->BitcastTaggedToWord(), a);
-#else
- return a;
-#endif
+ return AddNode(machine()->BitcastTaggedToWord(), a);
}
Node* BitcastMaybeObjectToWord(Node* a) {
-#ifdef ENABLE_VERIFY_CSA
- return AddNode(machine()->BitcastMaybeObjectToWord(), a);
-#else
- return a;
-#endif
+ return AddNode(machine()->BitcastMaybeObjectToWord(), a);
}
Node* BitcastWordToTagged(Node* a) {
return AddNode(machine()->BitcastWordToTagged(), a);
}
Node* BitcastWordToTaggedSigned(Node* a) {
-#ifdef ENABLE_VERIFY_CSA
- return AddNode(machine()->BitcastWordToTaggedSigned(), a);
-#else
- return a;
-#endif
+ return AddNode(machine()->BitcastWordToTaggedSigned(), a);
}
Node* TruncateFloat64ToWord32(Node* a) {
return AddNode(machine()->TruncateFloat64ToWord32(), a);
@@ -936,14 +941,14 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void DebugAbort(Node* message);
void DebugBreak();
void Unreachable();
- void Comment(const char* msg);
+ void Comment(std::string msg);
#if DEBUG
void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
void SetInitialDebugInformation(AssemblerDebugInfo info);
void PrintCurrentBlock(std::ostream& os);
- bool InsideBlock();
#endif // DEBUG
+ bool InsideBlock();
// Add success / exception successor blocks and ends the current block ending
// in a potentially throwing call node.
@@ -980,20 +985,37 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(op, sizeof...(args) + 1, buffer);
}
+ void SetSourcePosition(const char* file, int line);
+ SourcePositionTable* source_positions() { return source_positions_; }
+
private:
Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
BasicBlock* Use(RawMachineLabel* label);
BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock();
+ // A post-processing pass to add effect and control edges so that the graph
+ // can be optimized and re-scheduled.
+ // TODO(tebbi): Move this to a separate class.
+ void MakeReschedulable();
+ Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
+ const std::vector<Node*>& sidetable,
+ const Operator* op,
+ const std::vector<Node*>& additional_inputs);
+ void MakePhiBinary(Node* phi, int split_point, Node* left_control,
+ Node* right_control);
+ void MarkControlDeferred(Node* control_input);
+
Schedule* schedule() { return schedule_; }
size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
Isolate* isolate_;
Graph* graph_;
Schedule* schedule_;
+ SourcePositionTable* source_positions_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
+ SimplifiedOperatorBuilder simplified_;
CallDescriptor* call_descriptor_;
Node* target_parameter_;
NodeVector parameters_;
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index fdab39fb43..5fe10848e0 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -29,6 +29,7 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckReceiver:
+ case IrOpcode::kCheckReceiverOrNullOrUndefined:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckSymbol:
@@ -135,6 +136,9 @@ bool CheckSubsumes(Node const* a, Node const* b) {
} else if (a->opcode() == IrOpcode::kCheckedTaggedSignedToInt32 &&
b->opcode() == IrOpcode::kCheckedTaggedToInt32) {
// CheckedTaggedSignedToInt32(node) implies CheckedTaggedToInt32(node)
+ } else if (a->opcode() == IrOpcode::kCheckReceiver &&
+ b->opcode() == IrOpcode::kCheckReceiverOrNullOrUndefined) {
+ // CheckReceiver(node) implies CheckReceiverOrNullOrUndefined(node)
} else if (a->opcode() != b->opcode()) {
return false;
} else {
@@ -150,13 +154,17 @@ bool CheckSubsumes(Node const* a, Node const* b) {
case IrOpcode::kCheckedTaggedSignedToInt32:
case IrOpcode::kCheckedTaggedToTaggedPointer:
case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedUint32Bounds:
case IrOpcode::kCheckedUint32ToInt32:
case IrOpcode::kCheckedUint32ToTaggedSigned:
+ case IrOpcode::kCheckedUint64Bounds:
case IrOpcode::kCheckedUint64ToInt32:
case IrOpcode::kCheckedUint64ToTaggedSigned:
break;
case IrOpcode::kCheckedFloat64ToInt32:
- case IrOpcode::kCheckedTaggedToInt32: {
+ case IrOpcode::kCheckedFloat64ToInt64:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToInt64: {
const CheckMinusZeroParameters& ap =
CheckMinusZeroParametersOf(a->op());
const CheckMinusZeroParameters& bp =
@@ -192,11 +200,22 @@ bool CheckSubsumes(Node const* a, Node const* b) {
return true;
}
+bool TypeSubsumes(Node* node, Node* replacement) {
+ if (!NodeProperties::IsTyped(node) || !NodeProperties::IsTyped(replacement)) {
+ // If either node is untyped, we are running during an untyped optimization
+ // phase, and replacement is OK.
+ return true;
+ }
+ Type node_type = NodeProperties::GetType(node);
+ Type replacement_type = NodeProperties::GetType(replacement);
+ return replacement_type.Is(node_type);
+}
+
} // namespace
Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
- if (CheckSubsumes(check->node, node)) {
+ if (CheckSubsumes(check->node, node) && TypeSubsumes(node, check->node)) {
DCHECK(!check->node->IsDead());
return check->node;
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index b141cad773..dfb0ff3f7b 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -192,8 +192,10 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetWord32RepresentationFor(node, output_rep, output_type, use_node,
use_info);
case MachineRepresentation::kWord64:
- DCHECK_EQ(TypeCheckKind::kNone, use_info.type_check());
- return GetWord64RepresentationFor(node, output_rep, output_type);
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
+ use_info.type_check() == TypeCheckKind::kSigned64);
+ return GetWord64RepresentationFor(node, output_rep, output_type, use_node,
+ use_info);
case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
return node;
@@ -250,9 +252,9 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
node = InsertTruncateInt64ToInt32(node);
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ if (output_type.Is(cache_->kPositiveSafeInteger)) {
op = simplified()->CheckedUint64ToTaggedSigned(use_info.feedback());
- } else if (output_type.Is(cache_.kSafeInteger)) {
+ } else if (output_type.Is(cache_->kSafeInteger)) {
op = simplified()->CheckedInt64ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
@@ -386,7 +388,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
op = simplified()->ChangeFloat64ToTaggedPointer();
} else if (output_rep == MachineRepresentation::kWord64) {
- if (output_type.Is(cache_.kSafeInteger)) {
+ if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> float64 -> tagged pointer
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
@@ -489,10 +491,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
// int64 -> uint32 -> tagged
node = InsertTruncateInt64ToInt32(node);
op = simplified()->ChangeUint32ToTagged();
- } else if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ } else if (output_type.Is(cache_->kPositiveSafeInteger)) {
// uint64 -> tagged
op = simplified()->ChangeUint64ToTagged();
- } else if (output_type.Is(cache_.kSafeInteger)) {
+ } else if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> tagged
op = simplified()->ChangeInt64ToTagged();
} else {
@@ -586,7 +588,7 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
} else if (output_rep == MachineRepresentation::kFloat64) {
op = machine()->TruncateFloat64ToFloat32();
} else if (output_rep == MachineRepresentation::kWord64) {
- if (output_type.Is(cache_.kSafeInteger)) {
+ if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> float64 -> float32
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
@@ -603,18 +605,17 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type,
Node* use_node, UseInfo use_info) {
- // Eagerly fold representation changes for constants.
- if ((use_info.type_check() == TypeCheckKind::kNone)) {
- // TODO(jarin) Handle checked constant conversions.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node->op()));
- case IrOpcode::kInt32Constant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kFloat32Constant:
- UNREACHABLE();
- break;
- default:
+ NumberMatcher m(node);
+ if (m.HasValue()) {
+ switch (use_info.type_check()) {
+ case TypeCheckKind::kNone:
+ case TypeCheckKind::kNumber:
+ case TypeCheckKind::kNumberOrOddball:
+ return jsgraph()->Float64Constant(m.Value());
+ case TypeCheckKind::kHeapObject:
+ case TypeCheckKind::kSigned32:
+ case TypeCheckKind::kSigned64:
+ case TypeCheckKind::kSignedSmall:
break;
}
}
@@ -664,7 +665,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
} else if (output_rep == MachineRepresentation::kWord64) {
- if (output_type.Is(cache_.kSafeInteger)) {
+ if (output_type.Is(cache_->kSafeInteger)) {
op = machine()->ChangeInt64ToFloat64();
}
}
@@ -707,7 +708,9 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
double const fv = OpParameter<double>(node->op());
if (use_info.type_check() == TypeCheckKind::kNone ||
((use_info.type_check() == TypeCheckKind::kSignedSmall ||
- use_info.type_check() == TypeCheckKind::kSigned32) &&
+ use_info.type_check() == TypeCheckKind::kSigned32 ||
+ use_info.type_check() == TypeCheckKind::kNumber ||
+ use_info.type_check() == TypeCheckKind::kNumberOrOddball) &&
IsInt32Double(fv))) {
return MakeTruncatedInt32Constant(fv);
}
@@ -808,19 +811,17 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
MachineRepresentation::kWord32);
}
} else if (output_rep == MachineRepresentation::kWord32) {
- if (use_info.truncation().IdentifiesZeroAndMinusZero()) {
- if (output_type.Is(Type::Signed32OrMinusZero()) ||
- output_type.Is(Type::Unsigned32OrMinusZero())) {
- return node;
- }
- }
// Only the checked case should get here, the non-checked case is
// handled in GetRepresentationFor.
if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
- if (output_type.Is(Type::Signed32())) {
+ bool indentify_zeros = use_info.truncation().IdentifiesZeroAndMinusZero();
+ if (output_type.Is(Type::Signed32()) ||
+ (indentify_zeros && output_type.Is(Type::Signed32OrMinusZero()))) {
return node;
- } else if (output_type.Is(Type::Unsigned32())) {
+ } else if (output_type.Is(Type::Unsigned32()) ||
+ (indentify_zeros &&
+ output_type.Is(Type::Unsigned32OrMinusZero()))) {
op = simplified()->CheckedUint32ToInt32(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
@@ -840,14 +841,14 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
if (output_type.Is(Type::Signed32()) ||
output_type.Is(Type::Unsigned32())) {
op = machine()->TruncateInt64ToInt32();
- } else if (output_type.Is(cache_.kSafeInteger) &&
+ } else if (output_type.Is(cache_->kSafeInteger) &&
use_info.truncation().IsUsedAsWord32()) {
op = machine()->TruncateInt64ToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
- if (output_type.Is(cache_.kPositiveSafeInteger)) {
+ if (output_type.Is(cache_->kPositiveSafeInteger)) {
op = simplified()->CheckedUint64ToInt32(use_info.feedback());
- } else if (output_type.Is(cache_.kSafeInteger)) {
+ } else if (output_type.Is(cache_->kSafeInteger)) {
op = simplified()->CheckedInt64ToInt32(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
@@ -948,7 +949,8 @@ Node* RepresentationChanger::GetBitRepresentationFor(
}
Node* RepresentationChanger::GetWord64RepresentationFor(
- Node* node, MachineRepresentation output_rep, Type output_type) {
+ Node* node, MachineRepresentation output_rep, Type output_type,
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -987,23 +989,37 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
- if (output_type.Is(cache_.kInt64)) {
+ if (output_type.Is(cache_->kInt64)) {
// float32 -> float64 -> int64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToInt64();
- } else if (output_type.Is(cache_.kUint64)) {
+ } else if (output_type.Is(cache_->kUint64)) {
// float32 -> float64 -> uint64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToUint64();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned64) {
+ // float32 -> float64 -> int64
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->CheckedFloat64ToInt64(
+ output_type.Maybe(Type::MinusZero())
+ ? use_info.minus_zero_check()
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat64) {
- if (output_type.Is(cache_.kInt64)) {
+ if (output_type.Is(cache_->kInt64)) {
op = machine()->ChangeFloat64ToInt64();
- } else if (output_type.Is(cache_.kUint64)) {
+ } else if (output_type.Is(cache_->kUint64)) {
op = machine()->ChangeFloat64ToUint64();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned64) {
+ op = simplified()->CheckedFloat64ToInt64(
+ output_type.Maybe(Type::MinusZero())
+ ? use_info.minus_zero_check()
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
@@ -1016,8 +1032,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (CanBeTaggedPointer(output_rep)) {
- if (output_type.Is(cache_.kInt64)) {
+ if (output_type.Is(cache_->kInt64)) {
op = simplified()->ChangeTaggedToInt64();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned64) {
+ op = simplified()->CheckedTaggedToInt64(
+ output_type.Maybe(Type::MinusZero())
+ ? use_info.minus_zero_check()
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
@@ -1026,7 +1048,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
- return jsgraph()->graph()->NewNode(op, node);
+ return InsertConversion(node, op, use_node);
}
const Operator* RepresentationChanger::Int32OperatorFor(
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 673c062d94..b6e0d279de 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -106,6 +106,7 @@ enum class TypeCheckKind : uint8_t {
kNone,
kSignedSmall,
kSigned32,
+ kSigned64,
kNumber,
kNumberOrOddball,
kHeapObject
@@ -119,6 +120,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "SignedSmall";
case TypeCheckKind::kSigned32:
return os << "Signed32";
+ case TypeCheckKind::kSigned64:
+ return os << "Signed64";
case TypeCheckKind::kNumber:
return os << "Number";
case TypeCheckKind::kNumberOrOddball:
@@ -208,6 +211,12 @@ class UseInfo {
Truncation::Any(identify_zeros), TypeCheckKind::kSigned32,
feedback);
}
+ static UseInfo CheckedSigned64AsWord64(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kWord64,
+ Truncation::Any(identify_zeros), TypeCheckKind::kSigned64,
+ feedback);
+ }
static UseInfo CheckedNumberAsFloat64(IdentifyZeros identify_zeros,
const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kFloat64,
@@ -293,7 +302,7 @@ class RepresentationChanger final {
}
private:
- TypeCache const& cache_;
+ TypeCache const* cache_;
JSGraph* jsgraph_;
Isolate* isolate_;
@@ -325,7 +334,8 @@ class RepresentationChanger final {
Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
Type output_type);
Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
- Type output_type);
+ Type output_type, Node* use_node,
+ UseInfo use_info);
Node* TypeError(Node* node, MachineRepresentation output_rep,
Type output_type, MachineRepresentation use);
Node* MakeTruncatedInt32Constant(double value);
diff --git a/deps/v8/src/compiler/s390/OWNERS b/deps/v8/src/compiler/s390/OWNERS
deleted file mode 100644
index cf60da5cc7..0000000000
--- a/deps/v8/src/compiler/s390/OWNERS
+++ /dev/null
@@ -1,7 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
deleted file mode 100644
index 9a704f9bef..0000000000
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
-#define V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// S390-specific opcodes that specify which assembly sequence to emit.
-// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_Abs32) \
- V(S390_Abs64) \
- V(S390_And32) \
- V(S390_And64) \
- V(S390_Or32) \
- V(S390_Or64) \
- V(S390_Xor32) \
- V(S390_Xor64) \
- V(S390_ShiftLeft32) \
- V(S390_ShiftLeft64) \
- V(S390_ShiftLeftPair) \
- V(S390_ShiftRight32) \
- V(S390_ShiftRight64) \
- V(S390_ShiftRightPair) \
- V(S390_ShiftRightArith32) \
- V(S390_ShiftRightArith64) \
- V(S390_ShiftRightArithPair) \
- V(S390_RotRight32) \
- V(S390_RotRight64) \
- V(S390_Not32) \
- V(S390_Not64) \
- V(S390_RotLeftAndClear64) \
- V(S390_RotLeftAndClearLeft64) \
- V(S390_RotLeftAndClearRight64) \
- V(S390_Lay) \
- V(S390_Add32) \
- V(S390_Add64) \
- V(S390_AddPair) \
- V(S390_AddFloat) \
- V(S390_AddDouble) \
- V(S390_Sub32) \
- V(S390_Sub64) \
- V(S390_SubFloat) \
- V(S390_SubDouble) \
- V(S390_SubPair) \
- V(S390_MulPair) \
- V(S390_Mul32) \
- V(S390_Mul32WithOverflow) \
- V(S390_Mul64) \
- V(S390_MulHigh32) \
- V(S390_MulHighU32) \
- V(S390_MulFloat) \
- V(S390_MulDouble) \
- V(S390_Div32) \
- V(S390_Div64) \
- V(S390_DivU32) \
- V(S390_DivU64) \
- V(S390_DivFloat) \
- V(S390_DivDouble) \
- V(S390_Mod32) \
- V(S390_Mod64) \
- V(S390_ModU32) \
- V(S390_ModU64) \
- V(S390_ModDouble) \
- V(S390_Neg32) \
- V(S390_Neg64) \
- V(S390_NegDouble) \
- V(S390_NegFloat) \
- V(S390_SqrtFloat) \
- V(S390_FloorFloat) \
- V(S390_CeilFloat) \
- V(S390_TruncateFloat) \
- V(S390_AbsFloat) \
- V(S390_SqrtDouble) \
- V(S390_FloorDouble) \
- V(S390_CeilDouble) \
- V(S390_TruncateDouble) \
- V(S390_RoundDouble) \
- V(S390_MaxFloat) \
- V(S390_MaxDouble) \
- V(S390_MinFloat) \
- V(S390_MinDouble) \
- V(S390_AbsDouble) \
- V(S390_Cntlz32) \
- V(S390_Cntlz64) \
- V(S390_Popcnt32) \
- V(S390_Popcnt64) \
- V(S390_Cmp32) \
- V(S390_Cmp64) \
- V(S390_CmpFloat) \
- V(S390_CmpDouble) \
- V(S390_Tst32) \
- V(S390_Tst64) \
- V(S390_Push) \
- V(S390_PushFrame) \
- V(S390_StackClaim) \
- V(S390_StoreToStackSlot) \
- V(S390_SignExtendWord8ToInt32) \
- V(S390_SignExtendWord16ToInt32) \
- V(S390_SignExtendWord8ToInt64) \
- V(S390_SignExtendWord16ToInt64) \
- V(S390_SignExtendWord32ToInt64) \
- V(S390_Uint32ToUint64) \
- V(S390_Int64ToInt32) \
- V(S390_Int64ToFloat32) \
- V(S390_Int64ToDouble) \
- V(S390_Uint64ToFloat32) \
- V(S390_Uint64ToDouble) \
- V(S390_Int32ToFloat32) \
- V(S390_Int32ToDouble) \
- V(S390_Uint32ToFloat32) \
- V(S390_Uint32ToDouble) \
- V(S390_Float32ToInt64) \
- V(S390_Float32ToUint64) \
- V(S390_Float32ToInt32) \
- V(S390_Float32ToUint32) \
- V(S390_Float32ToDouble) \
- V(S390_Float64SilenceNaN) \
- V(S390_DoubleToInt32) \
- V(S390_DoubleToUint32) \
- V(S390_DoubleToInt64) \
- V(S390_DoubleToUint64) \
- V(S390_DoubleToFloat32) \
- V(S390_DoubleExtractLowWord32) \
- V(S390_DoubleExtractHighWord32) \
- V(S390_DoubleInsertLowWord32) \
- V(S390_DoubleInsertHighWord32) \
- V(S390_DoubleConstruct) \
- V(S390_BitcastInt32ToFloat32) \
- V(S390_BitcastFloat32ToInt32) \
- V(S390_BitcastInt64ToDouble) \
- V(S390_BitcastDoubleToInt64) \
- V(S390_LoadWordS8) \
- V(S390_LoadWordU8) \
- V(S390_LoadWordS16) \
- V(S390_LoadWordU16) \
- V(S390_LoadWordS32) \
- V(S390_LoadWordU32) \
- V(S390_LoadAndTestWord32) \
- V(S390_LoadAndTestWord64) \
- V(S390_LoadAndTestFloat32) \
- V(S390_LoadAndTestFloat64) \
- V(S390_LoadReverse16RR) \
- V(S390_LoadReverse32RR) \
- V(S390_LoadReverse64RR) \
- V(S390_LoadReverse16) \
- V(S390_LoadReverse32) \
- V(S390_LoadReverse64) \
- V(S390_LoadWord64) \
- V(S390_LoadFloat32) \
- V(S390_LoadDouble) \
- V(S390_StoreWord8) \
- V(S390_StoreWord16) \
- V(S390_StoreWord32) \
- V(S390_StoreWord64) \
- V(S390_StoreReverse16) \
- V(S390_StoreReverse32) \
- V(S390_StoreReverse64) \
- V(S390_StoreFloat32) \
- V(S390_StoreDouble) \
- V(S390_Word64AtomicLoadUint8) \
- V(S390_Word64AtomicLoadUint16) \
- V(S390_Word64AtomicLoadUint32) \
- V(S390_Word64AtomicLoadUint64) \
- V(S390_Word64AtomicStoreUint8) \
- V(S390_Word64AtomicStoreUint16) \
- V(S390_Word64AtomicStoreUint32) \
- V(S390_Word64AtomicStoreUint64) \
- V(S390_Word64AtomicExchangeUint8) \
- V(S390_Word64AtomicExchangeUint16) \
- V(S390_Word64AtomicExchangeUint32) \
- V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint8) \
- V(S390_Word64AtomicCompareExchangeUint16) \
- V(S390_Word64AtomicCompareExchangeUint32) \
- V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint8) \
- V(S390_Word64AtomicAddUint16) \
- V(S390_Word64AtomicAddUint32) \
- V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint8) \
- V(S390_Word64AtomicSubUint16) \
- V(S390_Word64AtomicSubUint32) \
- V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint8) \
- V(S390_Word64AtomicAndUint16) \
- V(S390_Word64AtomicAndUint32) \
- V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint8) \
- V(S390_Word64AtomicOrUint16) \
- V(S390_Word64AtomicOrUint32) \
- V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint8) \
- V(S390_Word64AtomicXorUint16) \
- V(S390_Word64AtomicXorUint32) \
- V(S390_Word64AtomicXorUint64)
-
-// Addressing modes represent the "shape" of inputs to an instruction.
-// Many instructions support multiple addressing modes. Addressing modes
-// are encoded into the InstructionCode of the instruction and tell the
-// code generator after register allocation which assembler method to call.
-//
-// We use the following local notation for addressing modes:
-//
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MRI = [register + immediate]
-// MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MR) /* [%r0 ] */ \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1 ] */ \
- V(MRRI) /* [%r0 + %r1 + K] */
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 7632d3cc8c..f547f584ae 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -55,6 +55,9 @@ void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); }
void BasicBlock::set_control(Control control) { control_ = control; }
void BasicBlock::set_control_input(Node* control_input) {
+ if (!nodes_.empty() && control_input == nodes_.back()) {
+ nodes_.pop_back();
+ }
control_input_ = control_input;
}
@@ -363,30 +366,14 @@ void Schedule::EliminateRedundantPhiNodes() {
}
void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
+#ifdef DEBUG
DCHECK(block->PredecessorCount() > 1 && block != end_);
for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred;
- if (pred->SuccessorCount() > 1) {
- // Found a predecessor block with multiple successors.
- BasicBlock* split_edge_block = NewBasicBlock();
- split_edge_block->set_control(BasicBlock::kGoto);
- split_edge_block->successors().push_back(block);
- split_edge_block->predecessors().push_back(pred);
- split_edge_block->set_deferred(block->deferred());
- *current_pred = split_edge_block;
- // Find a corresponding successor in the previous block, replace it
- // with the split edge block... but only do it once, since we only
- // replace the previous blocks in the current block one at a time.
- for (auto successor = pred->successors().begin();
- successor != pred->successors().end(); ++successor) {
- if (*successor == block) {
- *successor = split_edge_block;
- break;
- }
- }
- }
+ DCHECK_LE(pred->SuccessorCount(), 1);
}
+#endif
}
void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 13712a3561..568fc012e8 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -517,6 +517,12 @@ class CFGBuilder : public ZoneObject {
}
schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
}
+ for (size_t index = 0; index < successor_count; ++index) {
+ if (BranchHintOf(successor_blocks[index]->front()->op()) ==
+ BranchHint::kFalse) {
+ successor_blocks[index]->set_deferred(true);
+ }
+ }
}
void ConnectMerge(Node* merge) {
@@ -1443,13 +1449,19 @@ class ScheduleLateNodeVisitor {
}
}
+ bool IsMarked(BasicBlock* block) const {
+ DCHECK_LT(block->id().ToSize(), marked_.size());
+ return marked_[block->id().ToSize()];
+ }
+
+ void Mark(BasicBlock* block) { marked_[block->id().ToSize()] = true; }
+
// Mark {block} and push its non-marked predecessor on the marking queue.
void MarkBlock(BasicBlock* block) {
DCHECK_LT(block->id().ToSize(), marked_.size());
- marked_[block->id().ToSize()] = true;
+ Mark(block);
for (BasicBlock* pred_block : block->predecessors()) {
- DCHECK_LT(pred_block->id().ToSize(), marked_.size());
- if (marked_[pred_block->id().ToSize()]) continue;
+ if (IsMarked(pred_block)) continue;
marking_queue_.push_back(pred_block);
}
}
@@ -1474,7 +1486,7 @@ class ScheduleLateNodeVisitor {
for (Edge edge : node->use_edges()) {
if (!scheduler_->IsLive(edge.from())) continue;
BasicBlock* use_block = GetBlockForUse(edge);
- if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
+ if (use_block == nullptr || IsMarked(use_block)) continue;
if (use_block == block) {
TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
node->op()->mnemonic(), block->id().ToInt());
@@ -1489,10 +1501,10 @@ class ScheduleLateNodeVisitor {
do {
BasicBlock* top_block = marking_queue_.front();
marking_queue_.pop_front();
- if (marked_[top_block->id().ToSize()]) continue;
+ if (IsMarked(top_block)) continue;
bool marked = true;
for (BasicBlock* successor : top_block->successors()) {
- if (!marked_[successor->id().ToSize()]) {
+ if (!IsMarked(successor)) {
marked = false;
break;
}
@@ -1503,7 +1515,7 @@ class ScheduleLateNodeVisitor {
// If the (common dominator) {block} is marked, we know that all paths from
// {block} to the end contain at least one use of {node}, and hence there's
// no point in splitting the {node} in this case.
- if (marked_[block->id().ToSize()]) {
+ if (IsMarked(block)) {
TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
node->id(), node->op()->mnemonic(), block->id().ToInt());
return block;
@@ -1518,7 +1530,7 @@ class ScheduleLateNodeVisitor {
if (!scheduler_->IsLive(edge.from())) continue;
BasicBlock* use_block = GetBlockForUse(edge);
if (use_block == nullptr) continue;
- while (marked_[use_block->dominator()->id().ToSize()]) {
+ while (IsMarked(use_block->dominator())) {
use_block = use_block->dominator();
}
auto& use_node = dominators[use_block];
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
new file mode 100644
index 0000000000..0d761f82b3
--- /dev/null
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -0,0 +1,521 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/serializer-for-background-compilation.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/handles-inl.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects/code.h"
+#include "src/objects/shared-function-info-inl.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Hints::Hints(Zone* zone)
+ : constants_(zone), maps_(zone), function_blueprints_(zone) {}
+
+const ZoneVector<Handle<Object>>& Hints::constants() const {
+ return constants_;
+}
+
+const ZoneVector<Handle<Map>>& Hints::maps() const { return maps_; }
+
+const ZoneVector<FunctionBlueprint>& Hints::function_blueprints() const {
+ return function_blueprints_;
+}
+
+void Hints::AddConstant(Handle<Object> constant) {
+ constants_.push_back(constant);
+}
+
+void Hints::AddMap(Handle<Map> map) { maps_.push_back(map); }
+
+void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint) {
+ function_blueprints_.push_back(function_blueprint);
+}
+
+void Hints::Add(const Hints& other) {
+ for (auto x : other.constants()) AddConstant(x);
+ for (auto x : other.maps()) AddMap(x);
+ for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
+}
+
+void Hints::Clear() {
+ constants_.clear();
+ maps_.clear();
+ function_blueprints_.clear();
+}
+
+class SerializerForBackgroundCompilation::Environment : public ZoneObject {
+ public:
+ explicit Environment(Zone* zone, Isolate* isolate, int register_count,
+ int parameter_count);
+
+ Environment(SerializerForBackgroundCompilation* serializer, Isolate* isolate,
+ int register_count, int parameter_count,
+ const HintsVector& arguments);
+
+ int parameter_count() const { return parameter_count_; }
+ int register_count() const { return register_count_; }
+
+ Hints& accumulator_hints() { return environment_hints_[accumulator_index()]; }
+ Hints& register_hints(interpreter::Register reg) {
+ int local_index = RegisterToLocalIndex(reg);
+ DCHECK_LT(local_index, environment_hints_.size());
+ return environment_hints_[local_index];
+ }
+ Hints& return_value_hints() { return return_value_hints_; }
+
+ void ClearAccumulatorAndRegisterHints() {
+ for (auto& hints : environment_hints_) hints.Clear();
+ }
+
+ private:
+ explicit Environment(Zone* zone)
+ : register_count_(0),
+ parameter_count_(0),
+ environment_hints_(zone),
+ return_value_hints_(zone) {}
+ Zone* zone() const { return zone_; }
+
+ int RegisterToLocalIndex(interpreter::Register reg) const;
+
+ Zone* zone_;
+
+ // environment_hints_ contains hints for the contents of the registers,
+ // the accumulator and the parameters. The layout is as follows:
+ // [ receiver | parameters | registers | accumulator | context | closure ]
+ const int register_count_;
+ const int parameter_count_;
+ HintsVector environment_hints_;
+ int register_base() const { return parameter_count_; }
+ int accumulator_index() const { return register_base() + register_count_; }
+ int current_context_index() const { return accumulator_index() + 1; }
+ int function_closure_index() const { return current_context_index() + 1; }
+ int environment_hints_size() const { return function_closure_index() + 1; }
+
+ Hints return_value_hints_;
+};
+
+SerializerForBackgroundCompilation::Environment::Environment(
+ Zone* zone, Isolate* isolate, int register_count, int parameter_count)
+ : zone_(zone),
+ register_count_(register_count),
+ parameter_count_(parameter_count),
+ environment_hints_(environment_hints_size(), Hints(zone), zone),
+ return_value_hints_(zone) {}
+
+SerializerForBackgroundCompilation::Environment::Environment(
+ SerializerForBackgroundCompilation* serializer, Isolate* isolate,
+ int register_count, int parameter_count, const HintsVector& arguments)
+ : Environment(serializer->zone(), isolate, register_count,
+ parameter_count) {
+ size_t param_count = static_cast<size_t>(parameter_count);
+
+ // Copy the hints for the actually passed arguments, at most up to
+ // the parameter_count.
+ for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) {
+ environment_hints_[i] = arguments[i];
+ }
+
+ Hints undefined_hint(serializer->zone());
+ undefined_hint.AddConstant(
+ serializer->broker()->isolate()->factory()->undefined_value());
+ // Pad the rest with "undefined".
+ for (size_t i = arguments.size(); i < param_count; ++i) {
+ environment_hints_[i] = undefined_hint;
+ }
+}
+
+int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
+ interpreter::Register reg) const {
+ // TODO(mslekova): We also want to gather hints for the context and
+ // we already have data about the closure that we should record.
+ if (reg.is_current_context()) return current_context_index();
+ if (reg.is_function_closure()) return function_closure_index();
+ if (reg.is_parameter()) {
+ return reg.ToParameterIndex(parameter_count());
+ } else {
+ return register_base() + reg.index();
+ }
+}
+
+SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, Zone* zone, Handle<JSFunction> function)
+ : broker_(broker),
+ zone_(zone),
+ shared_(function->shared(), broker->isolate()),
+ feedback_(function->feedback_vector(), broker->isolate()),
+ environment_(new (zone) Environment(
+ zone, broker_->isolate(),
+ shared_->GetBytecodeArray()->register_count(),
+ shared_->GetBytecodeArray()->parameter_count())) {
+ JSFunctionRef(broker, function).Serialize();
+}
+
+SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, Zone* zone, FunctionBlueprint function,
+ const HintsVector& arguments)
+ : broker_(broker),
+ zone_(zone),
+ shared_(function.shared),
+ feedback_(function.feedback),
+ environment_(new (zone) Environment(
+ this, broker->isolate(),
+ shared_->GetBytecodeArray()->register_count(),
+ shared_->GetBytecodeArray()->parameter_count(), arguments)) {}
+
+Hints SerializerForBackgroundCompilation::Run() {
+ SharedFunctionInfoRef shared(broker(), shared_);
+ FeedbackVectorRef feedback(broker(), feedback_);
+ if (shared.IsSerializedForCompilation(feedback)) {
+ return Hints(zone());
+ }
+ shared.SetSerializedForCompilation(feedback);
+ feedback.SerializeSlots();
+ TraverseBytecode();
+ return environment()->return_value_hints();
+}
+
+void SerializerForBackgroundCompilation::TraverseBytecode() {
+ BytecodeArrayRef bytecode_array(
+ broker(), handle(shared_->GetBytecodeArray(), broker()->isolate()));
+ interpreter::BytecodeArrayIterator iterator(bytecode_array.object());
+
+ for (; !iterator.done(); iterator.Advance()) {
+ switch (iterator.current_bytecode()) {
+#define DEFINE_BYTECODE_CASE(name) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(&iterator); \
+ break;
+ SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE)
+#undef DEFINE_BYTECODE_CASE
+ default: {
+ environment()->ClearAccumulatorAndRegisterHints();
+ break;
+ }
+ }
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitIllegal(
+ interpreter::BytecodeArrayIterator* iterator) {
+ UNREACHABLE();
+}
+
+void SerializerForBackgroundCompilation::VisitWide(
+ interpreter::BytecodeArrayIterator* iterator) {
+ UNREACHABLE();
+}
+
+void SerializerForBackgroundCompilation::VisitExtraWide(
+ interpreter::BytecodeArrayIterator* iterator) {
+ UNREACHABLE();
+}
+
+void SerializerForBackgroundCompilation::VisitLdaUndefined(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(
+ broker()->isolate()->factory()->undefined_value());
+}
+
+void SerializerForBackgroundCompilation::VisitLdaNull(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(
+ broker()->isolate()->factory()->null_value());
+}
+
+void SerializerForBackgroundCompilation::VisitLdaZero(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(
+ handle(Smi::FromInt(0), broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitLdaSmi(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(handle(
+ Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitLdaConstant(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(
+ handle(iterator->GetConstantForIndexOperand(0), broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitLdar(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().Add(
+ environment()->register_hints(iterator->GetRegisterOperand(0)));
+}
+
+void SerializerForBackgroundCompilation::VisitStar(
+ interpreter::BytecodeArrayIterator* iterator) {
+ interpreter::Register reg = iterator->GetRegisterOperand(0);
+ environment()->register_hints(reg).Clear();
+ environment()->register_hints(reg).Add(environment()->accumulator_hints());
+}
+
+void SerializerForBackgroundCompilation::VisitMov(
+ interpreter::BytecodeArrayIterator* iterator) {
+ interpreter::Register src = iterator->GetRegisterOperand(0);
+ interpreter::Register dst = iterator->GetRegisterOperand(1);
+ environment()->register_hints(dst).Clear();
+ environment()->register_hints(dst).Add(environment()->register_hints(src));
+}
+
+void SerializerForBackgroundCompilation::VisitCreateClosure(
+ interpreter::BytecodeArrayIterator* iterator) {
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)),
+ broker()->isolate());
+
+ FeedbackNexus nexus(feedback_, iterator->GetSlotOperand(1));
+ Handle<Object> cell_value(nexus.GetFeedbackCell()->value(),
+ broker()->isolate());
+
+ environment()->accumulator_hints().Clear();
+ if (cell_value->IsFeedbackVector()) {
+ environment()->accumulator_hints().AddFunctionBlueprint(
+ {shared, Handle<FeedbackVector>::cast(cell_value)});
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver(
+ interpreter::BytecodeArrayIterator* iterator) {
+ ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
+}
+
+void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
+ interpreter::BytecodeArrayIterator* iterator) {
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
+ interpreter::BytecodeArrayIterator* iterator) {
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const Hints& arg0 =
+ environment()->register_hints(iterator->GetRegisterOperand(1));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+ parameters.push_back(arg0);
+
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
+ interpreter::BytecodeArrayIterator* iterator) {
+ Hints receiver(zone());
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const Hints& arg0 =
+ environment()->register_hints(iterator->GetRegisterOperand(1));
+ const Hints& arg1 =
+ environment()->register_hints(iterator->GetRegisterOperand(2));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+ parameters.push_back(arg0);
+ parameters.push_back(arg1);
+
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
+ interpreter::BytecodeArrayIterator* iterator) {
+ ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny);
+}
+
+void SerializerForBackgroundCompilation::VisitCallNoFeedback(
+ interpreter::BytecodeArrayIterator* iterator) {
+ ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
+}
+
+void SerializerForBackgroundCompilation::VisitCallProperty(
+ interpreter::BytecodeArrayIterator* iterator) {
+ ProcessCallVarArgs(iterator, ConvertReceiverMode::kNullOrUndefined);
+}
+
+void SerializerForBackgroundCompilation::VisitCallProperty0(
+ interpreter::BytecodeArrayIterator* iterator) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const Hints& receiver =
+ environment()->register_hints(iterator->GetRegisterOperand(1));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::VisitCallProperty1(
+ interpreter::BytecodeArrayIterator* iterator) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const Hints& receiver =
+ environment()->register_hints(iterator->GetRegisterOperand(1));
+ const Hints& arg0 =
+ environment()->register_hints(iterator->GetRegisterOperand(2));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+ parameters.push_back(arg0);
+
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::VisitCallProperty2(
+ interpreter::BytecodeArrayIterator* iterator) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const Hints& receiver =
+ environment()->register_hints(iterator->GetRegisterOperand(1));
+ const Hints& arg0 =
+ environment()->register_hints(iterator->GetRegisterOperand(2));
+ const Hints& arg1 =
+ environment()->register_hints(iterator->GetRegisterOperand(3));
+
+ HintsVector parameters(zone());
+ parameters.push_back(receiver);
+ parameters.push_back(arg0);
+ parameters.push_back(arg1);
+
+ ProcessCallOrConstruct(callee, parameters);
+}
+
+void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
+ const Hints& callee, const HintsVector& arguments) {
+ environment()->accumulator_hints().Clear();
+
+ for (auto hint : callee.constants()) {
+ if (!hint->IsJSFunction()) continue;
+
+ Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
+ if (!function->shared()->IsInlineable()) continue;
+
+ JSFunctionRef(broker(), function).Serialize();
+
+ Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
+ Handle<FeedbackVector> feedback(function->feedback_vector(),
+ broker()->isolate());
+ SerializerForBackgroundCompilation child_serializer(
+ broker(), zone(), {shared, feedback}, arguments);
+ environment()->accumulator_hints().Add(child_serializer.Run());
+ }
+
+ for (auto hint : callee.function_blueprints()) {
+ if (!hint.shared->IsInlineable()) continue;
+ SerializerForBackgroundCompilation child_serializer(broker(), zone(), hint,
+ arguments);
+ environment()->accumulator_hints().Add(child_serializer.Run());
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessCallVarArgs(
+ interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
+
+ bool first_reg_is_receiver =
+ receiver_mode != ConvertReceiverMode::kNullOrUndefined;
+
+ Hints receiver(zone());
+ if (first_reg_is_receiver) {
+ // The receiver is the first register, followed by the arguments in the
+ // consecutive registers.
+ receiver.Add(environment()->register_hints(first_reg));
+ } else {
+ // The receiver is implicit (and undefined), the arguments are in
+ // consecutive registers.
+ receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
+ }
+
+ HintsVector arguments(zone());
+ arguments.push_back(receiver);
+ int arg_base = BoolToInt(first_reg_is_receiver);
+ for (int i = arg_base; i < reg_count; ++i) {
+ arguments.push_back(environment()->register_hints(
+ interpreter::Register(first_reg.index() + i)));
+ }
+
+ ProcessCallOrConstruct(callee, arguments);
+}
+
+void SerializerForBackgroundCompilation::VisitReturn(
+ interpreter::BytecodeArrayIterator* iterator) {
+ environment()->return_value_hints().Add(environment()->accumulator_hints());
+ environment()->ClearAccumulatorAndRegisterHints();
+}
+
+void SerializerForBackgroundCompilation::VisitConstruct(
+ interpreter::BytecodeArrayIterator* iterator) {
+ const Hints& callee =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ size_t reg_count = iterator->GetRegisterCountOperand(2);
+
+ HintsVector arguments(zone());
+ // Push the target (callee) of the construct.
+ arguments.push_back(callee);
+
+ // The function arguments are in consecutive registers.
+ int arg_base = first_reg.index();
+ for (int i = 0; i < static_cast<int>(reg_count); ++i) {
+ arguments.push_back(
+ environment()->register_hints(interpreter::Register(arg_base + i)));
+ }
+ // Push the new_target of the construct.
+ arguments.push_back(environment()->accumulator_hints());
+
+ ProcessCallOrConstruct(callee, arguments);
+}
+
+#define DEFINE_SKIPPED_JUMP(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ interpreter::BytecodeArrayIterator* iterator) { \
+ environment()->ClearAccumulatorAndRegisterHints(); \
+ }
+CLEAR_ENVIRONMENT_LIST(DEFINE_SKIPPED_JUMP)
+#undef DEFINE_SKIPPED_JUMP
+
+#define DEFINE_CLEAR_ACCUMULATOR(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ interpreter::BytecodeArrayIterator* iterator) { \
+ environment()->accumulator_hints().Clear(); \
+ }
+CLEAR_ACCUMULATOR_LIST(DEFINE_CLEAR_ACCUMULATOR)
+#undef DEFINE_CLEAR_ACCUMULATOR
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
new file mode 100644
index 0000000000..76bc675e66
--- /dev/null
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -0,0 +1,183 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
+#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
+
+#include "src/handles.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+namespace interpreter {
+class BytecodeArrayIterator;
+} // namespace interpreter
+
+class BytecodeArray;
+class FeedbackVector;
+class LookupIterator;
+class NativeContext;
+class ScriptContextTable;
+class SharedFunctionInfo;
+class SourcePositionTableIterator;
+class Zone;
+
+namespace compiler {
+
+#define CLEAR_ENVIRONMENT_LIST(V) \
+ V(Abort) \
+ V(CallRuntime) \
+ V(CallRuntimeForPair) \
+ V(CreateBlockContext) \
+ V(CreateFunctionContext) \
+ V(CreateEvalContext) \
+ V(Jump) \
+ V(JumpConstant) \
+ V(JumpIfFalse) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotNull) \
+ V(JumpIfNotNullConstant) \
+ V(JumpIfNotUndefined) \
+ V(JumpIfNotUndefinedConstant) \
+ V(JumpIfNull) \
+ V(JumpIfNullConstant) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfToBooleanFalseConstant) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanFalse) \
+ V(JumpIfTrue) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfUndefined) \
+ V(JumpIfUndefinedConstant) \
+ V(JumpLoop) \
+ V(PushContext) \
+ V(PopContext) \
+ V(ReThrow) \
+ V(StaContextSlot) \
+ V(StaCurrentContextSlot) \
+ V(Throw)
+
+#define CLEAR_ACCUMULATOR_LIST(V) \
+ V(CallWithSpread) \
+ V(ConstructWithSpread) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateMappedArguments) \
+ V(CreateRestParameter) \
+ V(CreateUnmappedArguments) \
+ V(LdaContextSlot) \
+ V(LdaCurrentContextSlot) \
+ V(LdaGlobal) \
+ V(LdaGlobalInsideTypeof) \
+ V(LdaImmutableContextSlot) \
+ V(LdaImmutableCurrentContextSlot) \
+ V(LdaKeyedProperty) \
+ V(LdaNamedProperty) \
+ V(LdaNamedPropertyNoFeedback)
+
+#define SUPPORTED_BYTECODE_LIST(V) \
+ V(CallAnyReceiver) \
+ V(CallNoFeedback) \
+ V(CallProperty) \
+ V(CallProperty0) \
+ V(CallProperty1) \
+ V(CallProperty2) \
+ V(CallUndefinedReceiver) \
+ V(CallUndefinedReceiver0) \
+ V(CallUndefinedReceiver1) \
+ V(CallUndefinedReceiver2) \
+ V(Construct) \
+ V(CreateClosure) \
+ V(ExtraWide) \
+ V(Illegal) \
+ V(LdaConstant) \
+ V(LdaNull) \
+ V(Ldar) \
+ V(LdaSmi) \
+ V(LdaUndefined) \
+ V(LdaZero) \
+ V(Mov) \
+ V(Return) \
+ V(Star) \
+ V(Wide) \
+ CLEAR_ENVIRONMENT_LIST(V) \
+ CLEAR_ACCUMULATOR_LIST(V)
+
+class JSHeapBroker;
+
+struct FunctionBlueprint {
+ Handle<SharedFunctionInfo> shared;
+ Handle<FeedbackVector> feedback;
+};
+
+class Hints {
+ public:
+ explicit Hints(Zone* zone);
+
+ const ZoneVector<Handle<Object>>& constants() const;
+ const ZoneVector<Handle<Map>>& maps() const;
+ const ZoneVector<FunctionBlueprint>& function_blueprints() const;
+
+ void AddConstant(Handle<Object> constant);
+ void AddMap(Handle<Map> map);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
+
+ void Add(const Hints& other);
+
+ void Clear();
+
+ private:
+ ZoneVector<Handle<Object>> constants_;
+ ZoneVector<Handle<Map>> maps_;
+ ZoneVector<FunctionBlueprint> function_blueprints_;
+};
+
+typedef ZoneVector<Hints> HintsVector;
+
+// The SerializerForBackgroundCompilation makes sure that the relevant function
+// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
+// optimizations in the compiler, is copied to the heap broker.
+class SerializerForBackgroundCompilation {
+ public:
+ SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
+ Handle<JSFunction> function);
+ Hints Run(); // NOTE: Returns empty for an already-serialized function.
+
+ private:
+ SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
+ FunctionBlueprint function,
+ const HintsVector& arguments);
+
+ void TraverseBytecode();
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(interpreter::BytecodeArrayIterator* iterator);
+ SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ class Environment;
+
+ Zone* zone() const { return zone_; }
+ JSHeapBroker* broker() const { return broker_; }
+ Environment* environment() const { return environment_; }
+
+ void ProcessCallOrConstruct(const Hints& callee,
+ const HintsVector& arguments);
+ void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode);
+
+ JSHeapBroker* broker_;
+ Zone* zone_;
+ Handle<SharedFunctionInfo> shared_;
+ Handle<FeedbackVector> feedback_;
+ Environment* environment_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 32de83061d..cab398c160 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -1080,6 +1080,15 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kS128Zero: {
+ DCHECK_EQ(0, node->InputCount());
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
+ rep_node[i] = mcgraph_->Int32Constant(0);
+ }
+ ReplaceNode(node, rep_node, kNumLanes32);
+ break;
+ }
case IrOpcode::kS128Not: {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index a06cb74237..4a8935b855 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -172,21 +172,6 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
}
}
-void ChangeToPureOp(Node* node, const Operator* new_op) {
- DCHECK(new_op->HasProperty(Operator::kPure));
- if (node->op()->EffectInputCount() > 0) {
- DCHECK_LT(0, node->op()->ControlInputCount());
- // Disconnect the node from effect and control chains.
- Node* control = NodeProperties::GetControlInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- ReplaceEffectControlUses(node, effect, control);
- node->TrimInputCount(new_op->ValueInputCount());
- } else {
- DCHECK_EQ(0, node->op()->ControlInputCount());
- }
- NodeProperties::ChangeOp(node, new_op);
-}
-
bool CanOverflowSigned32(const Operator* op, Type left, Type right,
Zone* type_zone) {
// We assume the inputs are checked Signed32 (or known statically
@@ -287,8 +272,8 @@ class RepresentationSelector {
bool weakened_ = false;
};
- RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
- Zone* zone, RepresentationChanger* changer,
+ RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
+ RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins)
: jsgraph_(jsgraph),
@@ -307,7 +292,7 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(js_heap_broker, graph_zone()) {
+ op_typer_(broker, graph_zone()) {
}
// Forward propagation of types from type feedback.
@@ -567,7 +552,7 @@ class RepresentationSelector {
Type Weaken(Node* node, Type previous_type, Type current_type) {
// If the types have nothing to do with integers, return the types.
- Type const integer = type_cache_.kInteger;
+ Type const integer = type_cache_->kInteger;
if (!previous_type.Maybe(integer)) {
return current_type;
}
@@ -758,6 +743,31 @@ class RepresentationSelector {
!GetUpperBound(node->InputAt(1)).Maybe(type);
}
+ void ChangeToPureOp(Node* node, const Operator* new_op) {
+ DCHECK(new_op->HasProperty(Operator::kPure));
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (TypeOf(node).IsNone()) {
+ // If the node is unreachable, insert an Unreachable node and mark the
+ // value dead.
+ // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // InsertUnreachableIfNecessary.
+ Node* unreachable = effect = graph()->NewNode(
+ jsgraph_->common()->Unreachable(), effect, control);
+ new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation());
+ node->ReplaceInput(0, unreachable);
+ }
+ // Rewire the effect and control chains.
+ node->TrimInputCount(new_op->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
// Converts input {index} of {node} according to given UseInfo {use},
// assuming the type of the input is {input_type}. If {input_type} is null,
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
@@ -1034,7 +1044,6 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
MachineRepresentation::kTaggedPointer);
}
- return;
}
void VisitCall(Node* node, SimplifiedLowering* lowering) {
@@ -1063,6 +1072,15 @@ class RepresentationSelector {
}
}
+ void MaskShiftOperand(Node* node, Type rhs_type) {
+ if (!rhs_type.Is(type_cache_->kZeroToThirtyOne)) {
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ node->ReplaceInput(1,
+ graph()->NewNode(jsgraph_->machine()->Word32And(), rhs,
+ jsgraph_->Int32Constant(0x1F)));
+ }
+ }
+
static MachineSemantic DeoptValueSemanticOf(Type type) {
// We only need signedness to do deopt correctly.
if (type.Is(Type::Signed32())) {
@@ -1084,7 +1102,7 @@ class RepresentationSelector {
}
// Word64 representation is only valid for safe integer values.
if (rep == MachineRepresentation::kWord64) {
- DCHECK(type.Is(TypeCache::Get().kSafeInteger));
+ DCHECK(type.Is(TypeCache::Get()->kSafeInteger));
return MachineType(rep, MachineSemantic::kInt64);
}
MachineType machine_type(rep, DeoptValueSemanticOf(type));
@@ -1220,10 +1238,10 @@ class RepresentationSelector {
}
if (value_type.IsHeapConstant()) {
RootIndex root_index;
- Heap* heap = jsgraph_->isolate()->heap();
- if (heap->IsRootHandle(value_type.AsHeapConstant()->Value(),
- &root_index)) {
- if (heap->RootIsImmortalImmovable(root_index)) {
+ const RootsTable& roots_table = jsgraph_->isolate()->roots_table();
+ if (roots_table.IsRootHandle(value_type.AsHeapConstant()->Value(),
+ &root_index)) {
+ if (RootsTable::IsImmortalImmovable(root_index)) {
// Write barriers are unnecessary for immortal immovable roots.
return kNoWriteBarrier;
}
@@ -1298,8 +1316,8 @@ class RepresentationSelector {
Type left_upper = GetUpperBound(node->InputAt(0));
Type right_upper = GetUpperBound(node->InputAt(1));
- if (left_upper.Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
- right_upper.Is(type_cache_.kAdditiveSafeIntegerOrMinusZero)) {
+ if (left_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) &&
+ right_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero)) {
// Only eliminate the node if its typing rule can be satisfied, namely
// that a safe integer is produced.
if (truncation.IsUnused()) return VisitUnused(node);
@@ -1319,7 +1337,6 @@ class RepresentationSelector {
// Try to use type feedback.
NumberOperationHint hint = NumberOperationHintOf(node->op());
-
DCHECK(hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32);
@@ -1327,8 +1344,14 @@ class RepresentationSelector {
Type right_feedback_type = TypeOf(node->InputAt(1));
// Handle the case when no int32 checks on inputs are necessary (but
// an overflow check is needed on the output). Note that we do not
- // have to do any check if at most one side can be minus zero.
- if (left_upper.Is(Type::Signed32OrMinusZero()) &&
+ // have to do any check if at most one side can be minus zero. For
+ // subtraction we need to handle the case of -0 - 0 properly, since
+ // that can produce -0.
+ Type left_constraint_type =
+ node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd
+ ? Type::Signed32OrMinusZero()
+ : Type::Signed32();
+ if (left_upper.Is(left_constraint_type) &&
right_upper.Is(Type::Signed32OrMinusZero()) &&
(left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1369,7 +1392,7 @@ class RepresentationSelector {
void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ if (BothInputsAre(node, type_cache_->kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node).Is(Type::Signed32()) ||
GetUpperBound(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32())) {
@@ -1521,6 +1544,53 @@ class RepresentationSelector {
}
}
+ void VisitCheckBounds(Node* node, SimplifiedLowering* lowering) {
+ CheckParameters const& p = CheckParametersOf(node->op());
+ Type const index_type = TypeOf(node->InputAt(0));
+ Type const length_type = TypeOf(node->InputAt(1));
+ if (length_type.Is(Type::Unsigned31())) {
+ if (index_type.Is(Type::Integral32OrMinusZero())) {
+ // Map -0 to 0, and the values in the [-2^31,-1] range to the
+ // [2^31,2^32-1] range, which will be considered out-of-bounds
+ // as well, because the {length_type} is limited to Unsigned31.
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ if (lowering->poisoning_level_ ==
+ PoisoningMitigationLevel::kDontPoison &&
+ (index_type.IsNone() || length_type.IsNone() ||
+ (index_type.Min() >= 0.0 &&
+ index_type.Max() < length_type.Min()))) {
+ // The bounds check is redundant if we already know that
+ // the index is within the bounds of [0.0, length[.
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32Bounds(p.feedback()));
+ }
+ }
+ } else {
+ VisitBinop(
+ node,
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, p.feedback()),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32Bounds(p.feedback()));
+ }
+ }
+ } else {
+ DCHECK(length_type.Is(type_cache_->kPositiveSafeInteger));
+ VisitBinop(node,
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, p.feedback()),
+ UseInfo::Word64(), MachineRepresentation::kWord64);
+ if (lower()) {
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint64Bounds(p.feedback()));
+ }
+ }
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1672,7 +1742,7 @@ class RepresentationSelector {
rhs_type.Is(Type::Unsigned32OrMinusZero())) ||
(lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
- OneInputCannotBe(node, type_cache_.kZeroish))) {
+ OneInputCannotBe(node, type_cache_->kZeroish))) {
// => unsigned Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
@@ -1683,7 +1753,7 @@ class RepresentationSelector {
rhs_type.Is(Type::Signed32OrMinusZero())) ||
(lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
rhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
- OneInputCannotBe(node, type_cache_.kZeroish))) {
+ OneInputCannotBe(node, type_cache_->kZeroish))) {
// => signed Int32Cmp
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
@@ -1813,9 +1883,9 @@ class RepresentationSelector {
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
if (TypeOf(node->InputAt(0))
- .Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ .Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) &&
TypeOf(node->InputAt(1))
- .Is(type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ .Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) &&
(TypeOf(node).Is(Type::Signed32()) ||
TypeOf(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32())) {
@@ -1823,8 +1893,8 @@ class RepresentationSelector {
VisitWord32TruncatingBinop(node);
if (lower()) ChangeToPureOp(node, Int32Op(node));
} else if (jsgraph_->machine()->Is64() &&
- BothInputsAre(node, type_cache_.kSafeInteger) &&
- GetUpperBound(node).Is(type_cache_.kSafeInteger)) {
+ BothInputsAre(node, type_cache_->kSafeInteger) &&
+ GetUpperBound(node).Is(type_cache_->kSafeInteger)) {
// => Int64Add/Sub
VisitInt64Binop(node);
if (lower()) ChangeToPureOp(node, Int64Op(node));
@@ -1841,7 +1911,7 @@ class RepresentationSelector {
NodeProperties::GetType(node).Is(Type::Unsigned32()) ||
(truncation.IsUsedAsWord32() &&
NodeProperties::GetType(node).Is(
- type_cache_.kSafeIntegerOrMinusZero)))) {
+ type_cache_->kSafeIntegerOrMinusZero)))) {
// Multiply reduces to Int32Mul if the inputs are integers, and
// (a) the output is either known to be Signed32, or
// (b) the output is known to be Unsigned32, or
@@ -1896,7 +1966,7 @@ class RepresentationSelector {
(TypeOf(node).Is(Type::Signed32()) ||
TypeOf(node).Is(Type::Unsigned32()) ||
(truncation.IsUsedAsWord32() &&
- TypeOf(node).Is(type_cache_.kSafeIntegerOrMinusZero)))) {
+ TypeOf(node).Is(type_cache_->kSafeIntegerOrMinusZero)))) {
// Multiply reduces to Int32Mul if the inputs are integers, and
// (a) the output is either known to be Signed32, or
// (b) the output is known to be Unsigned32, or
@@ -2066,7 +2136,8 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
return;
}
@@ -2077,7 +2148,8 @@ class RepresentationSelector {
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
return;
}
@@ -2086,7 +2158,8 @@ class RepresentationSelector {
VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
MachineRepresentation::kWord32, Type::Signed32());
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
return;
}
@@ -2095,7 +2168,8 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
return;
}
@@ -2106,7 +2180,8 @@ class RepresentationSelector {
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
return;
}
@@ -2115,7 +2190,8 @@ class RepresentationSelector {
VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
MachineRepresentation::kWord32, Type::Signed32());
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
return;
}
@@ -2124,14 +2200,15 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
return;
}
case IrOpcode::kSpeculativeNumberShiftRightLogical: {
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type rhs_type = GetUpperBound(node->InputAt(1));
- if (rhs_type.Is(type_cache_.kZeroish) &&
+ if (rhs_type.Is(type_cache_->kZeroish) &&
(hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) &&
!truncation.IsUsedAsWord32()) {
@@ -2153,14 +2230,16 @@ class RepresentationSelector {
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
return;
}
VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
MachineRepresentation::kWord32, Type::Unsigned32());
if (lower()) {
- lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ MaskShiftOperand(node, rhs_type);
+ ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
return;
}
@@ -2177,7 +2256,7 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
- } else if (input_type.Is(type_cache_.kPositiveIntegerOrNaN)) {
+ } else if (input_type.Is(type_cache_->kPositiveIntegerOrNaN)) {
VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -2235,14 +2314,26 @@ class RepresentationSelector {
lowering->DoMax(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
+ } else if (jsgraph_->machine()->Is64() &&
+ lhs_type.Is(type_cache_->kSafeInteger) &&
+ rhs_type.Is(type_cache_->kSafeInteger)) {
+ VisitInt64Binop(node);
+ if (lower()) {
+ lowering->DoMax(node, lowering->machine()->Int64LessThan(),
+ MachineRepresentation::kWord64);
+ }
} else {
VisitBinop(node,
UseInfo::TruncatingFloat64(truncation.identify_zeros()),
MachineRepresentation::kFloat64);
if (lower()) {
- if (truncation.IdentifiesZeroAndMinusZero() ||
- (lhs_type.Is(Type::PlainNumber()) &&
- rhs_type.Is(Type::PlainNumber()))) {
+ // If the right hand side is not NaN, and the left hand side
+ // is not NaN (or -0 if the difference between the zeros is
+ // observed), we can do a simple floating point comparison here.
+ if (lhs_type.Is(truncation.IdentifiesZeroAndMinusZero()
+ ? Type::OrderedNumber()
+ : Type::PlainNumber()) &&
+ rhs_type.Is(Type::OrderedNumber())) {
lowering->DoMax(node, lowering->machine()->Float64LessThan(),
MachineRepresentation::kFloat64);
} else {
@@ -2281,15 +2372,28 @@ class RepresentationSelector {
lowering->DoMin(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
+ } else if (jsgraph_->machine()->Is64() &&
+ lhs_type.Is(type_cache_->kSafeInteger) &&
+ rhs_type.Is(type_cache_->kSafeInteger)) {
+ VisitInt64Binop(node);
+ if (lower()) {
+ lowering->DoMin(node, lowering->machine()->Int64LessThan(),
+ MachineRepresentation::kWord64);
+ }
} else {
VisitBinop(node,
UseInfo::TruncatingFloat64(truncation.identify_zeros()),
MachineRepresentation::kFloat64);
if (lower()) {
- if (truncation.IdentifiesZeroAndMinusZero() ||
- (lhs_type.Is(Type::PlainNumber()) &&
- rhs_type.Is(Type::PlainNumber()))) {
- lowering->DoMin(node, lowering->machine()->Float64LessThan(),
+ // If the left hand side is not NaN, and the right hand side
+ // is not NaN (or -0 if the difference between the zeros is
+ // observed), we can do a simple floating point comparison here.
+ if (lhs_type.Is(Type::OrderedNumber()) &&
+ rhs_type.Is(truncation.IdentifiesZeroAndMinusZero()
+ ? Type::OrderedNumber()
+ : Type::PlainNumber())) {
+ lowering->DoMin(node,
+ lowering->machine()->Float64LessThanOrEqual(),
MachineRepresentation::kFloat64);
} else {
NodeProperties::ChangeOp(node, Float64Op(node));
@@ -2317,7 +2421,7 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::TruncatingFloat64(truncation.identify_zeros()),
MachineRepresentation::kFloat64);
if (lower()) {
- if (input_type.Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
DeferReplacement(node, node->InputAt(0));
} else if (node->opcode() == IrOpcode::kNumberRound) {
DeferReplacement(node, lowering->Float64Round(node));
@@ -2426,7 +2530,7 @@ class RepresentationSelector {
}
case IrOpcode::kNumberToUint8Clamped: {
Type const input_type = TypeOf(node->InputAt(0));
- if (input_type.Is(type_cache_.kUint8OrMinusZeroOrNaN)) {
+ if (input_type.Is(type_cache_->kUint8OrMinusZeroOrNaN)) {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -2438,7 +2542,7 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) lowering->DoSigned32ToUint8Clamped(node);
- } else if (input_type.Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ } else if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) lowering->DoIntegerToUint8Clamped(node);
@@ -2537,34 +2641,8 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kCheckBounds: {
- const CheckParameters& p = CheckParametersOf(node->op());
- Type index_type = TypeOf(node->InputAt(0));
- Type length_type = TypeOf(node->InputAt(1));
- if (index_type.Is(Type::Integral32OrMinusZero())) {
- // Map -0 to 0, and the values in the [-2^31,-1] range to the
- // [2^31,2^32-1] range, which will be considered out-of-bounds
- // as well, because the {length_type} is limited to Unsigned31.
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower() && lowering->poisoning_level_ ==
- PoisoningMitigationLevel::kDontPoison) {
- if (index_type.IsNone() || length_type.IsNone() ||
- (index_type.Min() >= 0.0 &&
- index_type.Max() < length_type.Min())) {
- // The bounds check is redundant if we already know that
- // the index is within the bounds of [0.0, length[.
- DeferReplacement(node, node->InputAt(0));
- }
- }
- } else {
- VisitBinop(
- node,
- UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, p.feedback()),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- }
- return;
- }
+ case IrOpcode::kCheckBounds:
+ return VisitCheckBounds(node, lowering);
case IrOpcode::kPoisonIndex: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
@@ -2604,6 +2682,10 @@ class RepresentationSelector {
VisitCheck(node, Type::Receiver(), lowering);
return;
}
+ case IrOpcode::kCheckReceiverOrNullOrUndefined: {
+ VisitCheck(node, Type::ReceiverOrNullOrUndefined(), lowering);
+ return;
+ }
case IrOpcode::kCheckSmi: {
const CheckParameters& params = CheckParametersOf(node->op());
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
@@ -2774,9 +2856,10 @@ class RepresentationSelector {
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::Word()); // external pointer
- ProcessInput(node, 2, UseInfo::Word()); // index
- ProcessInput(node, 3, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 4);
+ ProcessInput(node, 2, UseInfo::Word()); // byte offset
+ ProcessInput(node, 3, UseInfo::Word()); // index
+ ProcessInput(node, 4, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 5);
SetOutput(node, rep);
return;
}
@@ -2798,11 +2881,12 @@ class RepresentationSelector {
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::Word()); // external pointer
- ProcessInput(node, 2, UseInfo::Word()); // index
- ProcessInput(node, 3,
+ ProcessInput(node, 2, UseInfo::Word()); // byte offset
+ ProcessInput(node, 3, UseInfo::Word()); // index
+ ProcessInput(node, 4,
TruncatingUseInfoFromRepresentation(rep)); // value
- ProcessInput(node, 4, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 5);
+ ProcessInput(node, 5, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 6);
SetOutput(node, MachineRepresentation::kNone);
return;
}
@@ -2909,7 +2993,7 @@ class RepresentationSelector {
}
case IrOpcode::kObjectIsFiniteNumber: {
Type const input_type = GetUpperBound(node->InputAt(0));
- if (input_type.Is(type_cache_.kSafeInteger)) {
+ if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
@@ -2938,7 +3022,7 @@ class RepresentationSelector {
}
case IrOpcode::kObjectIsSafeInteger: {
Type const input_type = GetUpperBound(node->InputAt(0));
- if (input_type.Is(type_cache_.kSafeInteger)) {
+ if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
@@ -2965,7 +3049,7 @@ class RepresentationSelector {
}
case IrOpcode::kObjectIsInteger: {
Type const input_type = GetUpperBound(node->InputAt(0));
- if (input_type.Is(type_cache_.kSafeInteger)) {
+ if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
@@ -3093,32 +3177,25 @@ class RepresentationSelector {
}
case IrOpcode::kCheckFloat64Hole: {
Type const input_type = TypeOf(node->InputAt(0));
- if (input_type.Is(Type::Number())) {
- VisitNoop(node, truncation);
- } else {
- CheckFloat64HoleMode mode =
- CheckFloat64HoleParametersOf(node->op()).mode();
- switch (mode) {
- case CheckFloat64HoleMode::kAllowReturnHole:
- if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsFloat64()) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(
- node,
- UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
- MachineRepresentation::kFloat64, Type::Number());
- }
- break;
- case CheckFloat64HoleMode::kNeverReturnHole:
- VisitUnop(
- node,
+ CheckFloat64HoleMode mode =
+ CheckFloat64HoleParametersOf(node->op()).mode();
+ if (mode == CheckFloat64HoleMode::kAllowReturnHole) {
+ // If {mode} is allow-return-hole _and_ the {truncation}
+ // identifies NaN and undefined, we can just pass along
+ // the {truncation} and completely wipe the {node}.
+ if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUsedAsFloat64()) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ return;
+ }
+ }
+ VisitUnop(node,
UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
MachineRepresentation::kFloat64, Type::Number());
- break;
- }
+ if (lower() && input_type.Is(Type::Number())) {
+ DeferReplacement(node, node->InputAt(0));
}
return;
}
@@ -3394,7 +3471,7 @@ class RepresentationSelector {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
NodeInfo* GetInfo(Node* node) {
@@ -3405,13 +3482,13 @@ class RepresentationSelector {
Zone* graph_zone() { return jsgraph_->zone(); }
};
-SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker, Zone* zone,
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level)
: jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
+ broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
@@ -3420,7 +3497,7 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph,
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
- RepresentationSelector selector(jsgraph(), js_heap_broker_, zone_, &changer,
+ RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
source_positions_, node_origins_);
selector.Run(this);
}
@@ -3956,16 +4033,6 @@ void SimplifiedLowering::DoMin(Node* node, Operator const* op,
NodeProperties::ChangeOp(node, common()->Select(rep));
}
-void SimplifiedLowering::DoShift(Node* node, Operator const* op,
- Type rhs_type) {
- if (!rhs_type.Is(type_cache_.kZeroToThirtyOne)) {
- Node* const rhs = NodeProperties::GetValueInput(node, 1);
- node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
- jsgraph()->Int32Constant(0x1F)));
- }
- ChangeToPureOp(node, op);
-}
-
void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
Node* const input = node->InputAt(0);
Node* const zero = jsgraph()->Int32Constant(0);
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index ba7c9b68b7..e434af9d4f 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -23,7 +23,7 @@ class TypeCache;
class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
- SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Zone* zone,
+ SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level);
@@ -37,7 +37,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
Node* node, RepresentationSelector* selector);
void DoJSToNumberOrNumericTruncatesToWord32(Node* node,
RepresentationSelector* selector);
- void DoShift(Node* node, Operator const* op, Type rhs_type);
void DoIntegral32ToBit(Node* node);
void DoOrderedNumberToBit(Node* node);
void DoNumberToBit(Node* node);
@@ -48,9 +47,9 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
- JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* broker_;
Zone* const zone_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
SetOncePointer<Node> to_number_code_;
SetOncePointer<Node> to_number_convert_big_int_code_;
SetOncePointer<Node> to_numeric_code_;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 851d7927bd..96c434a595 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -32,11 +32,10 @@ Decision DecideObjectIsSmi(Node* const input) {
} // namespace
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(
- Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker) {}
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
+ JSGraph* jsgraph,
+ JSHeapBroker* broker)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
@@ -62,7 +61,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasValue()) {
- return ReplaceInt32(m.Ref(js_heap_broker()).BooleanValue());
+ return ReplaceInt32(m.Ref(broker()).BooleanValue());
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
break;
@@ -259,15 +258,11 @@ Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
}
Factory* SimplifiedOperatorReducer::factory() const {
- return isolate()->factory();
+ return jsgraph()->isolate()->factory();
}
Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
-Isolate* SimplifiedOperatorReducer::isolate() const {
- return jsgraph()->isolate();
-}
-
MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
return jsgraph()->machine();
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 93104e31b0..4024a3e439 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker);
+ JSHeapBroker* broker);
~SimplifiedOperatorReducer() final;
const char* reducer_name() const override {
@@ -51,15 +51,14 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
Factory* factory() const;
Graph* graph() const;
- Isolate* isolate() const;
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* const jsgraph_;
- JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const broker_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index a898b715a5..c2831bf293 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -631,8 +631,10 @@ bool operator==(CheckTaggedInputParameters const& lhs,
}
const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op) {
- DCHECK(IrOpcode::kCheckedTaggedToInt32 == op->opcode() ||
- IrOpcode::kCheckedFloat64ToInt32 == op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToInt32 ||
+ op->opcode() == IrOpcode::kCheckedTaggedToInt64 ||
+ op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
+ op->opcode() == IrOpcode::kCheckedFloat64ToInt64);
return OpParameter<CheckMinusZeroParameters>(op);
}
@@ -783,19 +785,20 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckEqualsInternalizedString, 2, 0) \
- V(CheckEqualsSymbol, 2, 0) \
- V(CheckHeapObject, 1, 1) \
- V(CheckInternalizedString, 1, 1) \
- V(CheckNotTaggedHole, 1, 1) \
- V(CheckReceiver, 1, 1) \
- V(CheckSymbol, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
+#define CHECKED_OP_LIST(V) \
+ V(CheckEqualsInternalizedString, 2, 0) \
+ V(CheckEqualsSymbol, 2, 0) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckInternalizedString, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
+ V(CheckReceiver, 1, 1) \
+ V(CheckReceiverOrNullOrUndefined, 1, 1) \
+ V(CheckSymbol, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
V(CheckedUint32Mod, 2, 1)
#define CHECKED_WITH_FEEDBACK_OP_LIST(V) \
@@ -809,8 +812,10 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckedTaggedSignedToInt32, 1, 1) \
V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
+ V(CheckedUint32Bounds, 2, 1) \
V(CheckedUint32ToInt32, 1, 1) \
V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedUint64Bounds, 2, 1) \
V(CheckedUint64ToInt32, 1, 1) \
V(CheckedUint64ToTaggedSigned, 1, 1)
@@ -967,6 +972,21 @@ struct SimplifiedOperatorGlobalCache final {
kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
template <CheckForMinusZeroMode kMode>
+ struct CheckedFloat64ToInt64Operator final
+ : public Operator1<CheckMinusZeroParameters> {
+ CheckedFloat64ToInt64Operator()
+ : Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedFloat64ToInt64,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt64",
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+ };
+ CheckedFloat64ToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kCheckedFloat64ToInt64CheckForMinusZeroOperator;
+ CheckedFloat64ToInt64Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kCheckedFloat64ToInt64DontCheckForMinusZeroOperator;
+
+ template <CheckForMinusZeroMode kMode>
struct CheckedTaggedToInt32Operator final
: public Operator1<CheckMinusZeroParameters> {
CheckedTaggedToInt32Operator()
@@ -981,6 +1001,21 @@ struct SimplifiedOperatorGlobalCache final {
CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ template <CheckForMinusZeroMode kMode>
+ struct CheckedTaggedToInt64Operator final
+ : public Operator1<CheckMinusZeroParameters> {
+ CheckedTaggedToInt64Operator()
+ : Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedTaggedToInt64,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt64",
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
+ };
+ CheckedTaggedToInt64Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kCheckedTaggedToInt64CheckForMinusZeroOperator;
+ CheckedTaggedToInt64Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kCheckedTaggedToInt64DontCheckForMinusZeroOperator;
+
template <CheckTaggedInputMode kMode>
struct CheckedTaggedToFloat64Operator final
: public Operator1<CheckTaggedInputParameters> {
@@ -1116,11 +1151,13 @@ struct SimplifiedOperatorGlobalCache final {
kSpeculativeToNumberNumberOrOddballOperator;
};
-static base::LazyInstance<SimplifiedOperatorGlobalCache>::type
- kSimplifiedOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(SimplifiedOperatorGlobalCache,
+ GetSimplifiedOperatorGlobalCache);
+}
SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
- : cache_(kSimplifiedOperatorGlobalCache.Get()), zone_(zone) {}
+ : cache_(*GetSimplifiedOperatorGlobalCache()), zone_(zone) {}
#define GET_FROM_CACHE(Name, ...) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
@@ -1221,6 +1258,22 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
1, 1, 1, 0, CheckMinusZeroParameters(mode, feedback));
}
+const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt64(
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt64CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt64DontCheckForMinusZeroOperator;
+ }
+ }
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedFloat64ToInt64,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt64", 1, 1,
+ 1, 1, 1, 0, CheckMinusZeroParameters(mode, feedback));
+}
+
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1237,6 +1290,22 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
CheckMinusZeroParameters(mode, feedback));
}
+const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt64(
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt64CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt64DontCheckForMinusZeroOperator;
+ }
+ }
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedTaggedToInt64, Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTaggedToInt64", 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(mode, feedback));
+}
+
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1563,8 +1632,8 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
- V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
- V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
+ V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 5, 1, 1) \
+ V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 6, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
output_count) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 4cea393a15..9dddab4861 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -679,12 +679,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const VectorSlotPair& feedback);
const Operator* CheckReceiver();
+ const Operator* CheckReceiverOrNullOrUndefined();
const Operator* CheckSmi(const VectorSlotPair& feedback);
const Operator* CheckString(const VectorSlotPair& feedback);
const Operator* CheckSymbol();
const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode,
const VectorSlotPair& feedback);
+ const Operator* CheckedFloat64ToInt64(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32Add();
const Operator* CheckedInt32Div();
const Operator* CheckedInt32Mod();
@@ -698,14 +701,18 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToInt64(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode,
const VectorSlotPair& feedback);
const Operator* CheckedUint32Div();
const Operator* CheckedUint32Mod();
+ const Operator* CheckedUint32Bounds(const VectorSlotPair& feedback);
const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedUint64Bounds(const VectorSlotPair& feedback);
const Operator* CheckedUint64ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedUint64ToTaggedSigned(const VectorSlotPair& feedback);
@@ -790,13 +797,13 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
- // load-data-view-element buffer, [base + index]
+ // load-data-view-element buffer, [base + byte_offset + index]
const Operator* LoadDataViewElement(ExternalArrayType const&);
// store-typed-element buffer, [base + external + index], value
const Operator* StoreTypedElement(ExternalArrayType const&);
- // store-data-view-element buffer, [base + index], value
+ // store-data-view-element buffer, [base + byte_offset + index], value
const Operator* StoreDataViewElement(ExternalArrayType const&);
// Abort (for terminating execution on internal error).
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 589e3948b9..e56af8e47b 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -100,7 +100,7 @@ class UnobservablesSet final {
static UnobservablesSet Unvisited();
static UnobservablesSet VisitedEmpty(Zone* zone);
UnobservablesSet(); // unvisited
- UnobservablesSet(const UnobservablesSet& other) = default;
+ UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
UnobservablesSet Intersect(const UnobservablesSet& other, Zone* zone) const;
UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
diff --git a/deps/v8/src/compiler/type-cache.cc b/deps/v8/src/compiler/type-cache.cc
index 4c6252e5fc..ecb6c1c6a8 100644
--- a/deps/v8/src/compiler/type-cache.cc
+++ b/deps/v8/src/compiler/type-cache.cc
@@ -10,14 +10,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-base::LazyInstance<TypeCache>::type kTypeCache = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-// static
-TypeCache const& TypeCache::Get() { return kTypeCache.Get(); }
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(const TypeCache, TypeCache::Get);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 251ea08751..6b1cbb7318 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -22,7 +22,7 @@ class TypeCache final {
Zone zone_;
public:
- static TypeCache const& Get();
+ static TypeCache const* Get();
TypeCache() : zone_(&allocator, ZONE_NAME) {}
@@ -165,9 +165,10 @@ class TypeCache final {
Type const kJSDateYearType =
Type::Union(Type::SignedSmall(), Type::NaN(), zone());
- // The valid number of arguments for JavaScript functions.
- Type const kArgumentsLengthType =
- Type::Range(0.0, Code::kMaxArguments, zone());
+ // The valid number of arguments for JavaScript functions. We can never
+ // materialize more than the max size of a fixed array, because we require a
+ // fixed array in spread/apply calls.
+ Type const kArgumentsLengthType = CreateRange(0.0, FixedArray::kMaxLength);
// The JSArrayIterator::kind property always contains an integer in the
// range [0, 2], representing the possible IterationKinds.
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index c0343f70e7..79687fe2a7 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -12,10 +12,8 @@ namespace internal {
namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- op_typer_(js_heap_broker, zone()) {}
+ JSHeapBroker* broker)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), op_typer_(broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() = default;
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 62237ccce3..136f11626e 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -19,8 +19,7 @@ class JSGraph;
class V8_EXPORT_PRIVATE TypeNarrowingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
- TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker);
+ TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker);
~TypeNarrowingReducer() final;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 6a9430bd29..3145e1bbff 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -20,15 +20,14 @@ namespace compiler {
TypedOptimization::TypedOptimization(Editor* editor,
CompilationDependencies* dependencies,
- JSGraph* jsgraph,
- JSHeapBroker* js_heap_broker)
+ JSGraph* jsgraph, JSHeapBroker* broker)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
- js_heap_broker_(js_heap_broker),
- true_type_(Type::HeapConstant(js_heap_broker, factory()->true_value(),
- graph()->zone())),
- false_type_(Type::HeapConstant(js_heap_broker, factory()->false_value(),
+ broker_(broker),
+ true_type_(
+ Type::HeapConstant(broker, factory()->true_value(), graph()->zone())),
+ false_type_(Type::HeapConstant(broker, factory()->false_value(),
graph()->zone())),
type_cache_(TypeCache::Get()) {}
@@ -85,6 +84,17 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceToBoolean(node);
case IrOpcode::kSpeculativeToNumber:
return ReduceSpeculativeToNumber(node);
+ case IrOpcode::kSpeculativeNumberAdd:
+ return ReduceSpeculativeNumberAdd(node);
+ case IrOpcode::kSpeculativeNumberSubtract:
+ case IrOpcode::kSpeculativeNumberMultiply:
+ case IrOpcode::kSpeculativeNumberDivide:
+ case IrOpcode::kSpeculativeNumberModulus:
+ return ReduceSpeculativeNumberBinop(node);
+ case IrOpcode::kSpeculativeNumberEqual:
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return ReduceSpeculativeNumberComparison(node);
default:
break;
}
@@ -93,7 +103,7 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
-base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* js_heap_broker,
+base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* broker,
Type object_type) {
if (object_type.IsHeapConstant()) {
HeapObjectRef object = object_type.AsHeapConstant()->Ref();
@@ -149,7 +159,7 @@ Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
Type const object_type = NodeProperties::GetType(object);
Node* const effect = NodeProperties::GetEffectInput(node);
base::Optional<MapRef> object_map =
- GetStableMapFromObjectType(js_heap_broker(), object_type);
+ GetStableMapFromObjectType(broker(), object_type);
if (object_map.has_value()) {
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
Node* const map = NodeProperties::GetValueInput(node, i);
@@ -220,7 +230,7 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
// (2) deoptimization is enabled and we can add a code dependency on the
// stability of map (to guard the Constant type information).
base::Optional<MapRef> object_map =
- GetStableMapFromObjectType(js_heap_broker(), object_type);
+ GetStableMapFromObjectType(broker(), object_type);
if (object_map.has_value()) {
dependencies()->DependOnStableMap(*object_map);
Node* const value = jsgraph()->Constant(*object_map);
@@ -234,7 +244,7 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
- if (input_type.Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
return Replace(input);
}
if (input_type.Is(Type::PlainNumber()) &&
@@ -270,7 +280,7 @@ Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
- if (input_type.Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
return Replace(input);
}
return NoChange();
@@ -288,7 +298,7 @@ Reduction TypedOptimization::ReduceNumberSilenceNaN(Node* node) {
Reduction TypedOptimization::ReduceNumberToUint8Clamped(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
- if (input_type.Is(type_cache_.kUint8)) {
+ if (input_type.Is(type_cache_->kUint8)) {
return Replace(input);
}
return NoChange();
@@ -392,7 +402,7 @@ TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
const Operator* comparison_op = NumberComparisonFor(comparison->op());
Node* from_char_code_repl = NodeProperties::GetValueInput(from_char_code, 0);
Type from_char_code_repl_type = NodeProperties::GetType(from_char_code_repl);
- if (!from_char_code_repl_type.Is(type_cache_.kUint16)) {
+ if (!from_char_code_repl_type.Is(type_cache_->kUint16)) {
// Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
from_char_code_repl =
graph()->NewNode(simplified()->NumberToInt32(), from_char_code_repl);
@@ -438,14 +448,14 @@ Reduction TypedOptimization::ReduceStringComparison(Node* node) {
Node* right = NodeProperties::GetValueInput(rhs, 0);
Type left_type = NodeProperties::GetType(left);
Type right_type = NodeProperties::GetType(right);
- if (!left_type.Is(type_cache_.kUint16)) {
+ if (!left_type.Is(type_cache_->kUint16)) {
// Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
left = graph()->NewNode(simplified()->NumberToInt32(), left);
left = graph()->NewNode(
simplified()->NumberBitwiseAnd(), left,
jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
}
- if (!right_type.Is(type_cache_.kUint16)) {
+ if (!right_type.Is(type_cache_->kUint16)) {
// Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
right = graph()->NewNode(simplified()->NumberToInt32(), right);
right = graph()->NewNode(
@@ -474,8 +484,8 @@ Reduction TypedOptimization::ReduceStringLength(Node* node) {
case IrOpcode::kHeapConstant: {
// Constant-fold the String::length of the {input}.
HeapObjectMatcher m(input);
- if (m.Ref(js_heap_broker()).IsString()) {
- uint32_t const length = m.Ref(js_heap_broker()).AsString().length();
+ if (m.Ref(broker()).IsString()) {
+ uint32_t const length = m.Ref(broker()).AsString().length();
Node* value = jsgraph()->Constant(length);
return Replace(value);
}
@@ -593,28 +603,28 @@ Reduction TypedOptimization::ReduceTypeOf(Node* node) {
Factory* const f = factory();
if (type.Is(Type::Boolean())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->boolean_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->boolean_string())));
} else if (type.Is(Type::Number())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->number_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->number_string())));
} else if (type.Is(Type::String())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->string_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->string_string())));
} else if (type.Is(Type::BigInt())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->bigint_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->bigint_string())));
} else if (type.Is(Type::Symbol())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->symbol_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->symbol_string())));
} else if (type.Is(Type::OtherUndetectableOrUndefined())) {
- return Replace(jsgraph()->Constant(
- ObjectRef(js_heap_broker(), f->undefined_string())));
+ return Replace(
+ jsgraph()->Constant(ObjectRef(broker(), f->undefined_string())));
} else if (type.Is(Type::NonCallableOrNull())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->object_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->object_string())));
} else if (type.Is(Type::Function())) {
return Replace(
- jsgraph()->Constant(ObjectRef(js_heap_broker(), f->function_string())));
+ jsgraph()->Constant(ObjectRef(broker(), f->function_string())));
}
return NoChange();
}
@@ -665,11 +675,151 @@ Reduction TypedOptimization::ReduceToBoolean(Node* node) {
return NoChange();
}
-Factory* TypedOptimization::factory() const { return isolate()->factory(); }
+namespace {
+bool BothAre(Type t1, Type t2, Type t3) { return t1.Is(t3) && t2.Is(t3); }
-Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
+bool NeitherCanBe(Type t1, Type t2, Type t3) {
+ return !t1.Maybe(t3) && !t2.Maybe(t3);
+}
+
+const Operator* NumberOpFromSpeculativeNumberOp(
+ SimplifiedOperatorBuilder* simplified, const Operator* op) {
+ switch (op->opcode()) {
+ case IrOpcode::kSpeculativeNumberEqual:
+ return simplified->NumberEqual();
+ case IrOpcode::kSpeculativeNumberLessThan:
+ return simplified->NumberLessThan();
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return simplified->NumberLessThanOrEqual();
+ case IrOpcode::kSpeculativeNumberAdd:
+ // Handled by ReduceSpeculativeNumberAdd.
+ UNREACHABLE();
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return simplified->NumberSubtract();
+ case IrOpcode::kSpeculativeNumberMultiply:
+ return simplified->NumberMultiply();
+ case IrOpcode::kSpeculativeNumberDivide:
+ return simplified->NumberDivide();
+ case IrOpcode::kSpeculativeNumberModulus:
+ return simplified->NumberModulus();
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+Reduction TypedOptimization::ReduceSpeculativeNumberAdd(Node* node) {
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type const lhs_type = NodeProperties::GetType(lhs);
+ Type const rhs_type = NodeProperties::GetType(rhs);
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
+ BothAre(lhs_type, rhs_type, Type::PlainPrimitive()) &&
+ NeitherCanBe(lhs_type, rhs_type, Type::StringOrReceiver())) {
+ // SpeculativeNumberAdd(x:-string, y:-string) =>
+ // NumberAdd(ToNumber(x), ToNumber(y))
+ Node* const toNum_lhs = ConvertPlainPrimitiveToNumber(lhs);
+ Node* const toNum_rhs = ConvertPlainPrimitiveToNumber(rhs);
+ Node* const value =
+ graph()->NewNode(simplified()->NumberAdd(), toNum_lhs, toNum_rhs);
+ ReplaceWithValue(node, value);
+ return Replace(node);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
+ // Try constant-folding of JSToNumber with constant inputs.
+ Type input_type = NodeProperties::GetType(input);
+
+ if (input_type.Is(Type::String())) {
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Ref(broker()).IsString()) {
+ StringRef input_value = m.Ref(broker()).AsString();
+ double number;
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
+ return Replace(jsgraph()->Constant(number));
+ }
+ }
+ if (input_type.IsHeapConstant()) {
+ HeapObjectRef input_value = input_type.AsHeapConstant()->Ref();
+ if (input_value.map().oddball_type() != OddballType::kNone) {
+ return Replace(jsgraph()->Constant(input_value.OddballToNumber()));
+ }
+ }
+ if (input_type.Is(Type::Number())) {
+ // JSToNumber(x:number) => x
+ return Changed(input);
+ }
+ if (input_type.Is(Type::Undefined())) {
+ // JSToNumber(undefined) => #NaN
+ return Replace(jsgraph()->NaNConstant());
+ }
+ if (input_type.Is(Type::Null())) {
+ // JSToNumber(null) => #0
+ return Replace(jsgraph()->ZeroConstant());
+ }
+ return NoChange();
+}
+
+Node* TypedOptimization::ConvertPlainPrimitiveToNumber(Node* node) {
+ DCHECK(NodeProperties::GetType(node).Is(Type::PlainPrimitive()));
+ // Avoid inserting too many eager ToNumber() operations.
+ Reduction const reduction = ReduceJSToNumberInput(node);
+ if (reduction.Changed()) return reduction.replacement();
+ if (NodeProperties::GetType(node).Is(Type::Number())) {
+ return node;
+ }
+ return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), node);
+}
+
+Reduction TypedOptimization::ReduceSpeculativeNumberBinop(Node* node) {
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type const lhs_type = NodeProperties::GetType(lhs);
+ Type const rhs_type = NodeProperties::GetType(rhs);
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
+ BothAre(lhs_type, rhs_type, Type::NumberOrUndefinedOrNullOrBoolean())) {
+ // We intentionally do this only in the Number and NumberOrOddball hint case
+ // because simplified lowering of these speculative ops may do some clever
+ // reductions in the other cases.
+ Node* const toNum_lhs = ConvertPlainPrimitiveToNumber(lhs);
+ Node* const toNum_rhs = ConvertPlainPrimitiveToNumber(rhs);
+ Node* const value = graph()->NewNode(
+ NumberOpFromSpeculativeNumberOp(simplified(), node->op()), toNum_lhs,
+ toNum_rhs);
+ ReplaceWithValue(node, value);
+ return Replace(node);
+ }
+ return NoChange();
+}
-Isolate* TypedOptimization::isolate() const { return jsgraph()->isolate(); }
+Reduction TypedOptimization::ReduceSpeculativeNumberComparison(Node* node) {
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type const lhs_type = NodeProperties::GetType(lhs);
+ Type const rhs_type = NodeProperties::GetType(rhs);
+ if (BothAre(lhs_type, rhs_type, Type::Signed32()) ||
+ BothAre(lhs_type, rhs_type, Type::Unsigned32())) {
+ Node* const value = graph()->NewNode(
+ NumberOpFromSpeculativeNumberOp(simplified(), node->op()), lhs, rhs);
+ ReplaceWithValue(node, value);
+ return Replace(node);
+ }
+ return NoChange();
+}
+
+Factory* TypedOptimization::factory() const {
+ return jsgraph()->isolate()->factory();
+}
+
+Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
SimplifiedOperatorBuilder* TypedOptimization::simplified() const {
return jsgraph()->simplified();
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index b49982b4e6..ed9c56e59f 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, JSHeapBroker* js_heap_broker);
+ JSGraph* jsgraph, JSHeapBroker* broker);
~TypedOptimization() override;
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -58,6 +58,10 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckNotTaggedHole(Node* node);
Reduction ReduceTypeOf(Node* node);
Reduction ReduceToBoolean(Node* node);
+ Reduction ReduceSpeculativeNumberAdd(Node* node);
+ Reduction ReduceSpeculativeNumberMultiply(Node* node);
+ Reduction ReduceSpeculativeNumberBinop(Node* node);
+ Reduction ReduceSpeculativeNumberComparison(Node* node);
Reduction TryReduceStringComparisonOfStringFromSingleCharCode(
Node* comparison, Node* from_char_code, Type constant_type,
@@ -66,21 +70,23 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Node* comparison, const StringRef& string, bool inverted);
const Operator* NumberComparisonFor(const Operator* op);
+ Node* ConvertPlainPrimitiveToNumber(Node* node);
+ Reduction ReduceJSToNumberInput(Node* input);
+
SimplifiedOperatorBuilder* simplified() const;
Factory* factory() const;
Graph* graph() const;
- Isolate* isolate() const;
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* jsgraph() const { return jsgraph_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* broker_;
Type const true_type_;
Type const false_type_;
- TypeCache const& type_cache_;
+ TypeCache const* type_cache_;
DISALLOW_COPY_AND_ASSIGN(TypedOptimization);
};
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 7ef20e7fae..248de5a0d5 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -34,13 +34,13 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(JSHeapBroker* js_heap_broker, Flags flags, Graph* graph)
+Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
: flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
- js_heap_broker_(js_heap_broker),
- operation_typer_(js_heap_broker, zone()) {
+ broker_(broker),
+ operation_typer_(broker, zone()) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -59,7 +59,8 @@ class Typer::Visitor : public Reducer {
explicit Visitor(Typer* typer, LoopVariableOptimizer* induction_vars)
: typer_(typer),
induction_vars_(induction_vars),
- weakened_nodes_(typer->zone()) {}
+ weakened_nodes_(typer->zone()),
+ remembered_types_(typer->zone()) {}
const char* reducer_name() const override { return "Typer"; }
@@ -205,6 +206,8 @@ class Typer::Visitor : public Reducer {
Typer* typer_;
LoopVariableOptimizer* induction_vars_;
ZoneSet<NodeId> weakened_nodes_;
+ // TODO(tebbi): remove once chromium:906567 is resolved.
+ ZoneUnorderedMap<std::pair<Node*, int>, Type> remembered_types_;
#define DECLARE_METHOD(x) inline Type Type##x(Node* node);
DECLARE_METHOD(Start)
@@ -328,7 +331,52 @@ class Typer::Visitor : public Reducer {
current = Weaken(node, current, previous);
}
- CHECK(previous.Is(current));
+ if (V8_UNLIKELY(!previous.Is(current))) {
+ AllowHandleDereference allow;
+ std::ostringstream ostream;
+ node->Print(ostream);
+
+ if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
+ ostream << "Previous UpdateType run (inputs first):";
+ for (int i = 0; i < 3; ++i) {
+ ostream << " ";
+ if (remembered_types_[{node, i}].IsInvalid()) {
+ ostream << "untyped";
+ } else {
+ remembered_types_[{node, i}].PrintTo(ostream);
+ }
+ }
+
+ ostream << "\nCurrent (output) type: ";
+ previous.PrintTo(ostream);
+
+ ostream << "\nThis UpdateType run (inputs first):";
+ for (int i = 0; i < 2; ++i) {
+ ostream << " ";
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (NodeProperties::IsTyped(input)) {
+ NodeProperties::GetType(input).PrintTo(ostream);
+ } else {
+ ostream << "untyped";
+ }
+ }
+ ostream << " ";
+ current.PrintTo(ostream);
+ ostream << "\n";
+ }
+
+ FATAL("UpdateType error for node %s", ostream.str().c_str());
+ }
+
+ if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
+ for (int i = 0; i < 2; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
+ ? NodeProperties::GetType(input)
+ : Type::Invalid();
+ }
+ remembered_types_[{node, 2}] = current;
+ }
NodeProperties::SetType(node, current);
if (!current.Is(previous)) {
@@ -337,6 +385,16 @@ class Typer::Visitor : public Reducer {
}
return NoChange();
} else {
+ if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
+ for (int i = 0; i < 2; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
+ ? NodeProperties::GetType(input)
+ : Type::Invalid();
+ }
+ remembered_types_[{node, 2}] = current;
+ }
+
// No previous type, simply update the type.
NodeProperties::SetType(node, current);
return Changed(node);
@@ -407,10 +465,12 @@ Type Typer::Visitor::BinaryNumberOpTyper(Type lhs, Type rhs, Typer* t,
if (lhs_is_number && rhs_is_number) {
return f(lhs, rhs, t);
}
- if (lhs_is_number || rhs_is_number) {
+ // In order to maintain monotonicity, the following two conditions are
+ // intentionally asymmetric.
+ if (lhs_is_number) {
return Type::Number();
}
- if (lhs.Is(Type::BigInt()) || rhs.Is(Type::BigInt())) {
+ if (lhs.Is(Type::BigInt())) {
return Type::BigInt();
}
return Type::Numeric();
@@ -439,7 +499,7 @@ Type Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
Type Typer::Visitor::BitwiseNot(Type type, Typer* t) {
type = ToNumeric(type, t);
if (type.Is(Type::Number())) {
- return NumberBitwiseXor(type, t->cache_.kSingletonMinusOne, t);
+ return NumberBitwiseXor(type, t->cache_->kSingletonMinusOne, t);
}
return Type::Numeric();
}
@@ -447,7 +507,7 @@ Type Typer::Visitor::BitwiseNot(Type type, Typer* t) {
Type Typer::Visitor::Decrement(Type type, Typer* t) {
type = ToNumeric(type, t);
if (type.Is(Type::Number())) {
- return NumberSubtract(type, t->cache_.kSingletonOne, t);
+ return NumberSubtract(type, t->cache_->kSingletonOne, t);
}
return Type::Numeric();
}
@@ -455,7 +515,7 @@ Type Typer::Visitor::Decrement(Type type, Typer* t) {
Type Typer::Visitor::Increment(Type type, Typer* t) {
type = ToNumeric(type, t);
if (type.Is(Type::Number())) {
- return NumberAdd(type, t->cache_.kSingletonOne, t);
+ return NumberAdd(type, t->cache_->kSingletonOne, t);
}
return Type::Numeric();
}
@@ -463,7 +523,7 @@ Type Typer::Visitor::Increment(Type type, Typer* t) {
Type Typer::Visitor::Negate(Type type, Typer* t) {
type = ToNumeric(type, t);
if (type.Is(Type::Number())) {
- return NumberMultiply(type, t->cache_.kSingletonMinusOne, t);
+ return NumberMultiply(type, t->cache_->kSingletonMinusOne, t);
}
return Type::Numeric();
}
@@ -486,13 +546,13 @@ Type Typer::Visitor::ToBoolean(Type type, Typer* t) {
Type Typer::Visitor::ToInteger(Type type, Typer* t) {
// ES6 section 7.1.4 ToInteger ( argument )
type = ToNumber(type, t);
- if (type.Is(t->cache_.kIntegerOrMinusZero)) return type;
- if (type.Is(t->cache_.kIntegerOrMinusZeroOrNaN)) {
+ if (type.Is(t->cache_->kIntegerOrMinusZero)) return type;
+ if (type.Is(t->cache_->kIntegerOrMinusZeroOrNaN)) {
return Type::Union(
- Type::Intersect(type, t->cache_.kIntegerOrMinusZero, t->zone()),
- t->cache_.kSingletonZero, t->zone());
+ Type::Intersect(type, t->cache_->kIntegerOrMinusZero, t->zone()),
+ t->cache_->kSingletonZero, t->zone());
}
- return t->cache_.kIntegerOrMinusZero;
+ return t->cache_->kIntegerOrMinusZero;
}
@@ -691,7 +751,7 @@ Type Typer::Visitor::TypeParameter(Node* node) {
return Type::Union(Type::Receiver(), Type::Undefined(), typer_->zone());
}
} else if (index == Linkage::GetJSCallArgCountParamIndex(parameter_count)) {
- return Type::Range(0.0, Code::kMaxArguments, typer_->zone());
+ return Type::Range(0.0, FixedArray::kMaxLength, typer_->zone());
} else if (index == Linkage::GetJSCallContextParamIndex(parameter_count)) {
return Type::OtherInternal();
}
@@ -754,8 +814,8 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// We only handle integer induction variables (otherwise ranges
// do not apply and we cannot do anything).
- if (!initial_type.Is(typer_->cache_.kInteger) ||
- !increment_type.Is(typer_->cache_.kInteger)) {
+ if (!initial_type.Is(typer_->cache_->kInteger) ||
+ !increment_type.Is(typer_->cache_->kInteger)) {
// Fallback to normal phi typing, but ensure monotonicity.
// (Unfortunately, without baking in the previous type, monotonicity might
// be violated because we might not yet have retyped the incrementing
@@ -771,7 +831,7 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// If we do not have enough type information for the initial value or
// the increment, just return the initial value's type.
if (initial_type.IsNone() ||
- increment_type.Is(typer_->cache_.kSingletonZero)) {
+ increment_type.Is(typer_->cache_->kSingletonZero)) {
return initial_type;
}
@@ -802,7 +862,7 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
for (auto bound : induction_var->upper_bounds()) {
Type bound_type = TypeOrNone(bound.bound);
// If the type is not an integer, just skip the bound.
- if (!bound_type.Is(typer_->cache_.kInteger)) continue;
+ if (!bound_type.Is(typer_->cache_->kInteger)) continue;
// If the type is not inhabited, then we can take the initial value.
if (bound_type.IsNone()) {
max = initial_type.Max();
@@ -822,7 +882,7 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
for (auto bound : induction_var->lower_bounds()) {
Type bound_type = TypeOrNone(bound.bound);
// If the type is not an integer, just skip the bound.
- if (!bound_type.Is(typer_->cache_.kInteger)) continue;
+ if (!bound_type.Is(typer_->cache_->kInteger)) continue;
// If the type is not inhabited, then we can take the initial value.
if (bound_type.IsNone()) {
min = initial_type.Min();
@@ -839,7 +899,7 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
} else {
// Shortcut: If the increment can be both positive and negative,
// the variable can go arbitrarily far, so just return integer.
- return typer_->cache_.kInteger;
+ return typer_->cache_->kInteger;
}
if (FLAG_trace_turbo_loop) {
StdoutStream{} << std::setprecision(10) << "Loop ("
@@ -1170,6 +1230,10 @@ Type Typer::Visitor::TypeJSCreateArrayIterator(Node* node) {
return Type::OtherObject();
}
+Type Typer::Visitor::TypeJSCreateAsyncFunctionObject(Node* node) {
+ return Type::OtherObject();
+}
+
Type Typer::Visitor::TypeJSCreateCollectionIterator(Node* node) {
return Type::OtherObject();
}
@@ -1275,7 +1339,7 @@ Type Typer::Visitor::Weaken(Node* node, Type current_type, Type previous_type) {
STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
// If the types have nothing to do with integers, return the types.
- Type const integer = typer_->cache_.kInteger;
+ Type const integer = typer_->cache_->kInteger;
if (!previous_type.Maybe(integer)) {
return current_type;
}
@@ -1436,7 +1500,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kMathCeil:
case BuiltinFunctionId::kMathRound:
case BuiltinFunctionId::kMathTrunc:
- return t->cache_.kIntegerOrMinusZeroOrNaN;
+ return t->cache_->kIntegerOrMinusZeroOrNaN;
// Unary math functions.
case BuiltinFunctionId::kMathAbs:
case BuiltinFunctionId::kMathExp:
@@ -1460,7 +1524,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kMathTan:
return Type::Number();
case BuiltinFunctionId::kMathSign:
- return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
+ return t->cache_->kMinusOneToOneOrMinusZeroOrNaN;
// Binary math functions.
case BuiltinFunctionId::kMathAtan2:
case BuiltinFunctionId::kMathPow:
@@ -1470,29 +1534,29 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kMathImul:
return Type::Signed32();
case BuiltinFunctionId::kMathClz32:
- return t->cache_.kZeroToThirtyTwo;
+ return t->cache_->kZeroToThirtyTwo;
// Date functions.
case BuiltinFunctionId::kDateNow:
- return t->cache_.kTimeValueType;
+ return t->cache_->kTimeValueType;
case BuiltinFunctionId::kDateGetDate:
- return t->cache_.kJSDateDayType;
+ return t->cache_->kJSDateDayType;
case BuiltinFunctionId::kDateGetDay:
- return t->cache_.kJSDateWeekdayType;
+ return t->cache_->kJSDateWeekdayType;
case BuiltinFunctionId::kDateGetFullYear:
- return t->cache_.kJSDateYearType;
+ return t->cache_->kJSDateYearType;
case BuiltinFunctionId::kDateGetHours:
- return t->cache_.kJSDateHourType;
+ return t->cache_->kJSDateHourType;
case BuiltinFunctionId::kDateGetMilliseconds:
return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
t->zone());
case BuiltinFunctionId::kDateGetMinutes:
- return t->cache_.kJSDateMinuteType;
+ return t->cache_->kJSDateMinuteType;
case BuiltinFunctionId::kDateGetMonth:
- return t->cache_.kJSDateMonthType;
+ return t->cache_->kJSDateMonthType;
case BuiltinFunctionId::kDateGetSeconds:
- return t->cache_.kJSDateSecondType;
+ return t->cache_->kJSDateSecondType;
case BuiltinFunctionId::kDateGetTime:
- return t->cache_.kJSDateValueType;
+ return t->cache_->kJSDateValueType;
// Symbol functions.
case BuiltinFunctionId::kSymbolConstructor:
@@ -1517,7 +1581,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kNumberParseFloat:
return Type::Number();
case BuiltinFunctionId::kNumberParseInt:
- return t->cache_.kIntegerOrMinusZeroOrNaN;
+ return t->cache_->kIntegerOrMinusZeroOrNaN;
case BuiltinFunctionId::kNumberToString:
return Type::String();
@@ -1602,7 +1666,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kArrayMap:
return Type::Receiver();
case BuiltinFunctionId::kArrayPush:
- return t->cache_.kPositiveSafeInteger;
+ return t->cache_->kPositiveSafeInteger;
case BuiltinFunctionId::kArrayReverse:
case BuiltinFunctionId::kArraySlice:
return Type::Receiver();
@@ -1611,7 +1675,7 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kArraySplice:
return Type::Receiver();
case BuiltinFunctionId::kArrayUnshift:
- return t->cache_.kPositiveSafeInteger;
+ return t->cache_->kPositiveSafeInteger;
// ArrayBuffer functions.
case BuiltinFunctionId::kArrayBufferIsView:
@@ -1629,6 +1693,17 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
case BuiltinFunctionId::kObjectToString:
return Type::String();
+ case BuiltinFunctionId::kPromiseAll:
+ return Type::Receiver();
+ case BuiltinFunctionId::kPromisePrototypeThen:
+ return Type::Receiver();
+ case BuiltinFunctionId::kPromiseRace:
+ return Type::Receiver();
+ case BuiltinFunctionId::kPromiseReject:
+ return Type::Receiver();
+ case BuiltinFunctionId::kPromiseResolve:
+ return Type::Receiver();
+
// RegExp functions.
case BuiltinFunctionId::kRegExpCompile:
return Type::OtherObject();
@@ -1762,7 +1837,7 @@ Type Typer::Visitor::TypeJSForInPrepare(Node* node) {
Type const cache_type =
Type::Union(Type::SignedSmall(), Type::OtherInternal(), zone());
Type const cache_array = Type::OtherInternal();
- Type const cache_length = typer_->cache_.kFixedArrayLengthType;
+ Type const cache_length = typer_->cache_->kFixedArrayLengthType;
return Type::Tuple(cache_type, cache_array, cache_length, zone());
}
@@ -1796,6 +1871,18 @@ Type Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+Type Typer::Visitor::TypeJSAsyncFunctionEnter(Node* node) {
+ return Type::OtherObject();
+}
+
+Type Typer::Visitor::TypeJSAsyncFunctionReject(Node* node) {
+ return Type::OtherObject();
+}
+
+Type Typer::Visitor::TypeJSAsyncFunctionResolve(Node* node) {
+ return Type::OtherObject();
+}
+
Type Typer::Visitor::TypeJSFulfillPromise(Node* node) {
return Type::Undefined();
}
@@ -1925,7 +2012,7 @@ Type Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) {
}
Type Typer::Visitor::TypeStringCharCodeAt(Node* node) {
- return typer_->cache_.kUint16;
+ return typer_->cache_->kUint16;
}
Type Typer::Visitor::TypeStringCodePointAt(Node* node) {
@@ -1945,13 +2032,13 @@ Type Typer::Visitor::TypeStringIndexOf(Node* node) {
}
Type Typer::Visitor::TypeStringLength(Node* node) {
- return typer_->cache_.kStringLengthType;
+ return typer_->cache_->kStringLengthType;
}
Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
Type Typer::Visitor::TypePoisonIndex(Node* node) {
- return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
+ return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone());
}
Type Typer::Visitor::TypeCheckBounds(Node* node) {
@@ -1984,6 +2071,11 @@ Type Typer::Visitor::TypeCheckReceiver(Node* node) {
return Type::Intersect(arg, Type::Receiver(), zone());
}
+Type Typer::Visitor::TypeCheckReceiverOrNullOrUndefined(Node* node) {
+ Type arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::ReceiverOrNullOrUndefined(), zone());
+}
+
Type Typer::Visitor::TypeCheckSmi(Node* node) {
Type arg = Operand(node, 0);
return Type::Intersect(arg, Type::SignedSmall(), zone());
@@ -2047,7 +2139,7 @@ Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
case kExternal##ElemType##Array: \
- return typer_->cache_.k##ElemType;
+ return typer_->cache_->k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
@@ -2058,7 +2150,7 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
case kExternal##ElemType##Array: \
- return typer_->cache_.k##ElemType;
+ return typer_->cache_->k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
@@ -2172,7 +2264,7 @@ Type Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
}
Type Typer::Visitor::TypeArgumentsLength(Node* node) {
- return TypeCache::Get().kArgumentsLengthType;
+ return TypeCache::Get()->kArgumentsLengthType;
}
Type Typer::Visitor::TypeArgumentsFrame(Node* node) {
@@ -2210,7 +2302,7 @@ Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
- return Type::NewConstant(typer_->js_heap_broker(), value, zone());
+ return Type::NewConstant(typer_->broker(), value, zone());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index f6703fe366..ff361727cd 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE Typer {
};
typedef base::Flags<Flag> Flags;
- Typer(JSHeapBroker* js_heap_broker, Flags flags, Graph* graph);
+ Typer(JSHeapBroker* broker, Flags flags, Graph* graph);
~Typer();
void Run();
@@ -41,13 +41,13 @@ class V8_EXPORT_PRIVATE Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
OperationTyper* operation_typer() { return &operation_typer_; }
- JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* broker() const { return broker_; }
Flags const flags_;
Graph* const graph_;
Decorator* decorator_;
- TypeCache const& cache_;
- JSHeapBroker* js_heap_broker_;
+ TypeCache const* cache_;
+ JSHeapBroker* broker_;
OperationTyper operation_typer_;
Type singleton_false_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index e8954f9202..e2ff1e6c72 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -218,10 +218,12 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
@@ -239,7 +241,11 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_WEAK_CELL_TYPE:
+ case JS_WEAK_FACTORY_TYPE:
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
case WASM_EXCEPTION_TYPE:
@@ -266,11 +272,14 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
+ case FUNCTION_TEMPLATE_RARE_DATA_TYPE:
case ACCESSOR_PAIR_TYPE:
+ case EMBEDDER_DATA_ARRAY_TYPE:
case FIXED_ARRAY_TYPE:
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -308,11 +317,10 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
- case MICROTASK_QUEUE_TYPE:
case CELL_TYPE:
- case PRE_PARSED_SCOPE_DATA_TYPE:
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ case PREPARSE_DATA_TYPE:
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -326,6 +334,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
#undef FIXED_TYPED_ARRAY_CASE
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
+ case ASM_WASM_DATA_TYPE:
case CALL_HANDLER_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
@@ -337,11 +346,13 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case STACK_FRAME_INFO_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
case PROTOTYPE_INFO_TYPE:
case INTERPRETER_DATA_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case WASM_DEBUG_INFO_TYPE:
+ case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
@@ -352,6 +363,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
+ case WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE:
UNREACHABLE();
}
UNREACHABLE();
@@ -478,7 +490,7 @@ HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
: TypeBase(kHeapConstant), bitset_(bitset), heap_ref_(heap_ref) {}
Handle<HeapObject> HeapConstantType::Value() const {
- return heap_ref_.object<HeapObject>();
+ return heap_ref_.object();
}
// -----------------------------------------------------------------------------
@@ -835,9 +847,9 @@ Type Type::NewConstant(double value, Zone* zone) {
return OtherNumberConstant(value, zone);
}
-Type Type::NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+Type Type::NewConstant(JSHeapBroker* broker, Handle<i::Object> value,
Zone* zone) {
- ObjectRef ref(js_heap_broker, value);
+ ObjectRef ref(broker, value);
if (ref.IsSmi()) {
return NewConstant(static_cast<double>(ref.AsSmi()), zone);
}
@@ -1075,10 +1087,10 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+Type Type::HeapConstant(JSHeapBroker* broker, Handle<i::Object> value,
Zone* zone) {
return FromTypeBase(
- HeapConstantType::New(HeapObjectRef(js_heap_broker, value), zone));
+ HeapConstantType::New(HeapObjectRef(broker, value), zone));
}
// static
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 27f38edae7..74a548b236 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -369,8 +369,8 @@ class V8_EXPORT_PRIVATE Type {
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone);
+ static Type HeapConstant(JSHeapBroker* broker, Handle<i::Object> value,
+ Zone* zone);
static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
static Type Range(RangeType::Limits lims, Zone* zone);
@@ -378,7 +378,7 @@ class V8_EXPORT_PRIVATE Type {
static Type Union(int length, Zone* zone);
// NewConstant is a factory that returns Constant, Range or Number.
- static Type NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ static Type NewConstant(JSHeapBroker* broker, Handle<i::Object> value,
Zone* zone);
static Type NewConstant(double value, Zone* zone);
@@ -457,8 +457,8 @@ class V8_EXPORT_PRIVATE Type {
friend UnionType;
friend size_t hash_value(Type type);
- Type(bitset bits) : payload_(bits | 1u) {}
- Type(TypeBase* type_base)
+ explicit Type(bitset bits) : payload_(bits | 1u) {}
+ Type(TypeBase* type_base) // NOLINT(runtime/explicit)
: payload_(reinterpret_cast<uintptr_t>(type_base)) {}
// Internal inspection.
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 7eedd2b37b..38ffbe63fc 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -667,6 +667,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateAsyncFunctionObject:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateCollectionIterator:
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
@@ -872,6 +876,23 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kJSAsyncFunctionEnter:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSAsyncFunctionReject:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Boolean());
+ CheckTypeIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSAsyncFunctionResolve:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Boolean());
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSFulfillPromise:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Any());
@@ -1089,7 +1110,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kStringConcat:
- CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 0, TypeCache::Get()->kStringLengthType);
CheckValueInputIs(node, 1, Type::String());
CheckValueInputIs(node, 2, Type::String());
CheckTypeIs(node, Type::String());
@@ -1138,7 +1159,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kStringLength:
CheckValueInputIs(node, 0, Type::String());
- CheckTypeIs(node, TypeCache::Get().kStringLengthType);
+ CheckTypeIs(node, TypeCache::Get()->kStringLengthType);
break;
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl:
@@ -1224,7 +1245,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kArgumentsLength:
CheckValueInputIs(node, 0, Type::ExternalPointer());
- CheckTypeIs(node, TypeCache::Get().kArgumentsLengthType);
+ CheckTypeIs(node, TypeCache::Get()->kArgumentsLengthType);
break;
case IrOpcode::kArgumentsFrame:
CheckTypeIs(node, Type::ExternalPointer());
@@ -1237,12 +1258,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kNewArgumentsElements:
CheckValueInputIs(node, 0, Type::ExternalPointer());
- CheckValueInputIs(node, 1, Type::Range(-Code::kMaxArguments,
- Code::kMaxArguments, zone));
+ CheckValueInputIs(node, 1,
+ Type::Range(0.0, FixedArray::kMaxLength, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
case IrOpcode::kNewConsString:
- CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 0, TypeCache::Get()->kStringLengthType);
CheckValueInputIs(node, 1, Type::String());
CheckValueInputIs(node, 2, Type::String());
CheckTypeIs(node, Type::String());
@@ -1400,8 +1421,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckBounds:
CheckValueInputIs(node, 0, Type::Any());
- CheckValueInputIs(node, 1, Type::Unsigned31());
- CheckTypeIs(node, Type::Unsigned31());
+ CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger);
+ CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger);
break;
case IrOpcode::kPoisonIndex:
CheckValueInputIs(node, 0, Type::Unsigned32());
@@ -1434,6 +1455,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Receiver());
break;
+ case IrOpcode::kCheckReceiverOrNullOrUndefined:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::ReceiverOrNullOrUndefined());
+ break;
case IrOpcode::kCheckSmi:
CheckValueInputIs(node, 0, Type::Any());
break;
@@ -1462,13 +1487,17 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
+ case IrOpcode::kCheckedUint32Bounds:
case IrOpcode::kCheckedUint32ToInt32:
case IrOpcode::kCheckedUint32ToTaggedSigned:
+ case IrOpcode::kCheckedUint64Bounds:
case IrOpcode::kCheckedUint64ToInt32:
case IrOpcode::kCheckedUint64ToTaggedSigned:
case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedFloat64ToInt64:
case IrOpcode::kCheckedTaggedSignedToInt32:
case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToInt64:
case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToTaggedSigned:
case IrOpcode::kCheckedTaggedToTaggedPointer:
@@ -1715,6 +1744,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kChangeFloat64ToUint64:
case IrOpcode::kFloat64SilenceNaN:
+ case IrOpcode::kTruncateFloat64ToInt64:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 4f35476dfb..b52f6f0640 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -14,14 +14,13 @@
#include "src/base/v8-fallthrough.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/compiler.h"
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
#include "src/compiler/int64-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -32,14 +31,18 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
+#include "src/counters.h"
#include "src/heap/factory.h"
+#include "src/interface-descriptors.h"
#include "src/isolate-inl.h"
-#include "src/log-inl.h"
+#include "src/log.h"
+#include "src/objects/heap-number.h"
#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/object-access.h"
@@ -80,24 +83,53 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(name, type) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Load( \
- assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \
- instance_node_.get(), \
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
- Control()))
-
-#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \
- mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
-
-#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
- LOAD_TAGGED_POINTER( \
- array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
-
-constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
+#define LOAD_RAW(base_pointer, byte_offset, type) \
+ SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
+ mcgraph()->Int32Constant(byte_offset), Effect(), \
+ Control()))
+
+#define LOAD_INSTANCE_FIELD(name, type) \
+ LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
+ assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
+
+#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
+ LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
+
+#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
+ LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
+
+#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
+ LOAD_RAW(array_node, \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
+
+#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
+
+#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
+
+#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
+
+// This can be used to store tagged Smi values only.
+#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Store(StoreRepresentation( \
+ MachineRepresentation::kTaggedSigned, kNoWriteBarrier)), \
+ array_node, \
+ mcgraph()->Int32Constant( \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)), \
+ value, Effect(), Control()))
+
+// This can be used to store any tagged (Smi and HeapObject) value.
+#define STORE_FIXED_ARRAY_SLOT_ANY(array_node, index, value) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Store(StoreRepresentation( \
+ MachineRepresentation::kTagged, kFullWriteBarrier)), \
+ array_node, \
+ mcgraph()->Int32Constant( \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)), \
+ value, Effect(), Control()))
void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
Graph* g = mcgraph->graph();
@@ -124,7 +156,7 @@ bool ContainsInt64(wasm::FunctionSig* sig) {
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
- wasm::ModuleEnv* env, Zone* zone, MachineGraph* mcgraph,
+ wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: zone_(zone),
@@ -157,13 +189,20 @@ Node* WasmGraphBuilder::Loop(Node* entry) {
return graph()->NewNode(mcgraph()->common()->Loop(1), entry);
}
-Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
+Node* WasmGraphBuilder::TerminateLoop(Node* effect, Node* control) {
Node* terminate =
graph()->NewNode(mcgraph()->common()->Terminate(), effect, control);
MergeControlToEnd(mcgraph(), terminate);
return terminate;
}
+Node* WasmGraphBuilder::TerminateThrow(Node* effect, Node* control) {
+ Node* terminate =
+ graph()->NewNode(mcgraph()->common()->Throw(), effect, control);
+ MergeControlToEnd(mcgraph(), terminate);
+ return terminate;
+}
+
bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
NodeProperties::GetControlInput(phi) == merge;
@@ -2009,10 +2048,10 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
return tl_d.Phi(int_ty.representation(), nan_val, load);
}
-Node* WasmGraphBuilder::GrowMemory(Node* input) {
+Node* WasmGraphBuilder::MemoryGrow(Node* input) {
needs_stack_check_ = true;
- WasmGrowMemoryDescriptor interface_descriptor;
+ WasmMemoryGrowDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
interface_descriptor, // descriptor
@@ -2023,22 +2062,56 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched at relocation.
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmGrowMemory, RelocInfo::WASM_STUB_CALL);
+ wasm::WasmCode::kWasmMemoryGrow, RelocInfo::WASM_STUB_CALL);
return SetEffect(
SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
call_target, input, Effect(), Control())));
}
+#ifdef DEBUG
+
+namespace {
+
+constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
+
+size_t ComputeEncodedElementSize(wasm::ValueType type) {
+ size_t byte_size =
+ static_cast<size_t>(wasm::ValueTypes::ElementSizeInBytes(type));
+ DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
+ DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
+ return byte_size / kBytesPerExceptionValuesArrayElement;
+}
+
+} // namespace
+
+#endif // DEBUG
+
uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
const wasm::WasmException* exception) const {
const wasm::WasmExceptionSig* sig = exception->sig;
uint32_t encoded_size = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
- size_t byte_size = static_cast<size_t>(
- wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)));
- DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
- DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
- encoded_size += byte_size / kBytesPerExceptionValuesArrayElement;
+ switch (sig->GetParam(i)) {
+ case wasm::kWasmI32:
+ case wasm::kWasmF32:
+ DCHECK_EQ(2, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 2;
+ break;
+ case wasm::kWasmI64:
+ case wasm::kWasmF64:
+ DCHECK_EQ(4, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 4;
+ break;
+ case wasm::kWasmS128:
+ DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
+ encoded_size += 8;
+ break;
+ case wasm::kWasmAnyRef:
+ encoded_size += 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
}
return encoded_size;
}
@@ -2054,6 +2127,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
Node* except_obj =
BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
arraysize(create_parameters));
+ Node* values_array =
+ BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
@@ -2064,7 +2139,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
V8_FALLTHROUGH;
case wasm::kWasmI32:
- BuildEncodeException32BitValue(except_obj, &index, value);
+ BuildEncodeException32BitValue(values_array, &index, value);
break;
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
@@ -2073,11 +2148,29 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
Node* upper32 = graph()->NewNode(
m->TruncateInt64ToInt32(),
Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
- BuildEncodeException32BitValue(except_obj, &index, upper32);
+ BuildEncodeException32BitValue(values_array, &index, upper32);
Node* lower32 = graph()->NewNode(m->TruncateInt64ToInt32(), value);
- BuildEncodeException32BitValue(except_obj, &index, lower32);
+ BuildEncodeException32BitValue(values_array, &index, lower32);
break;
}
+ case wasm::kWasmS128:
+ BuildEncodeException32BitValue(
+ values_array, &index,
+ graph()->NewNode(m->I32x4ExtractLane(0), value));
+ BuildEncodeException32BitValue(
+ values_array, &index,
+ graph()->NewNode(m->I32x4ExtractLane(1), value));
+ BuildEncodeException32BitValue(
+ values_array, &index,
+ graph()->NewNode(m->I32x4ExtractLane(2), value));
+ BuildEncodeException32BitValue(
+ values_array, &index,
+ graph()->NewNode(m->I32x4ExtractLane(3), value));
+ break;
+ case wasm::kWasmAnyRef:
+ STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
+ ++index;
+ break;
default:
UNREACHABLE();
}
@@ -2095,40 +2188,45 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
except_obj, Effect(), Control())));
}
-void WasmGraphBuilder::BuildEncodeException32BitValue(Node* except_obj,
+void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
uint32_t* index,
Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
- Node* upper_parameters[] = {
- except_obj, BuildChangeUint31ToSmi(Int32Constant(*index)),
- BuildChangeUint31ToSmi(
- graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16))),
- };
- BuildCallToRuntime(Runtime::kWasmExceptionSetElement, upper_parameters,
- arraysize(upper_parameters));
+ Node* upper_halfword_as_smi = BuildChangeUint31ToSmi(
+ graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16)));
+ STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, upper_halfword_as_smi);
++(*index);
- Node* lower_parameters[] = {
- except_obj, BuildChangeUint31ToSmi(Int32Constant(*index)),
- BuildChangeUint31ToSmi(graph()->NewNode(machine->Word32And(), value,
- Int32Constant(0xFFFFu))),
- };
- BuildCallToRuntime(Runtime::kWasmExceptionSetElement, lower_parameters,
- arraysize(lower_parameters));
+ Node* lower_halfword_as_smi = BuildChangeUint31ToSmi(
+ graph()->NewNode(machine->Word32And(), value, Int32Constant(0xFFFFu)));
+ STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, lower_halfword_as_smi);
++(*index);
}
-Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* const* values,
+Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) {
MachineOperatorBuilder* machine = mcgraph()->machine();
- Node* upper = BuildChangeSmiToInt32(values[*index]);
+ Node* upper =
+ BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
- Node* lower = BuildChangeSmiToInt32(values[*index]);
+ Node* lower =
+ BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
return value;
}
+Node* WasmGraphBuilder::BuildDecodeException64BitValue(Node* values_array,
+ uint32_t* index) {
+ Node* upper = Binop(wasm::kExprI64Shl,
+ Unop(wasm::kExprI64UConvertI32,
+ BuildDecodeException32BitValue(values_array, index)),
+ Int64Constant(32));
+ Node* lower = Unop(wasm::kExprI64UConvertI32,
+ BuildDecodeException32BitValue(values_array, index));
+ return Binop(wasm::kExprI64Ior, upper, lower);
+}
+
Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
needs_stack_check_ = true;
WasmThrowDescriptor interface_descriptor;
@@ -2152,7 +2250,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
- Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index);
+ Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
return tag;
}
@@ -2162,52 +2260,55 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
}
Node** WasmGraphBuilder::GetExceptionValues(
- Node* except_obj, const wasm::WasmException* except_decl) {
- // TODO(kschimpf): We need to move this code to the function-body-decoder.cc
- // in order to build landing-pad (exception) edges in case the runtime
- // call causes an exception.
-
- // Start by getting the encoded values from the exception.
- uint32_t encoded_size = GetExceptionEncodedSize(except_decl);
- Node** values = Buffer(encoded_size);
- for (uint32_t i = 0; i < encoded_size; ++i) {
- Node* parameters[] = {except_obj,
- BuildChangeUint31ToSmi(Uint32Constant(i))};
- values[i] = BuildCallToRuntime(Runtime::kWasmExceptionGetElement,
- parameters, arraysize(parameters));
- }
-
- // Now convert the leading entries to the corresponding parameter values.
+ Node* except_obj, const wasm::WasmException* exception) {
+ Node* values_array =
+ BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
uint32_t index = 0;
- const wasm::WasmExceptionSig* sig = except_decl->sig;
+ const wasm::WasmExceptionSig* sig = exception->sig;
+ Node** values = Buffer(sig->parameter_count());
for (size_t i = 0; i < sig->parameter_count(); ++i) {
- Node* value = BuildDecodeException32BitValue(values, &index);
- switch (wasm::ValueType type = sig->GetParam(i)) {
+ Node* value;
+ switch (sig->GetParam(i)) {
+ case wasm::kWasmI32:
+ value = BuildDecodeException32BitValue(values_array, &index);
+ break;
+ case wasm::kWasmI64:
+ value = BuildDecodeException64BitValue(values_array, &index);
+ break;
case wasm::kWasmF32: {
- value = Unop(wasm::kExprF32ReinterpretI32, value);
+ value = Unop(wasm::kExprF32ReinterpretI32,
+ BuildDecodeException32BitValue(values_array, &index));
break;
}
- case wasm::kWasmI32:
- break;
- case wasm::kWasmF64:
- case wasm::kWasmI64: {
- Node* upper =
- Binop(wasm::kExprI64Shl, Unop(wasm::kExprI64UConvertI32, value),
- Int64Constant(32));
- Node* lower = Unop(wasm::kExprI64UConvertI32,
- BuildDecodeException32BitValue(values, &index));
- value = Binop(wasm::kExprI64Ior, upper, lower);
- if (type == wasm::kWasmF64) {
- value = Unop(wasm::kExprF64ReinterpretI64, value);
- }
+ case wasm::kWasmF64: {
+ value = Unop(wasm::kExprF64ReinterpretI64,
+ BuildDecodeException64BitValue(values_array, &index));
break;
}
+ case wasm::kWasmS128:
+ value = graph()->NewNode(
+ mcgraph()->machine()->I32x4Splat(),
+ BuildDecodeException32BitValue(values_array, &index));
+ value = graph()->NewNode(
+ mcgraph()->machine()->I32x4ReplaceLane(1), value,
+ BuildDecodeException32BitValue(values_array, &index));
+ value = graph()->NewNode(
+ mcgraph()->machine()->I32x4ReplaceLane(2), value,
+ BuildDecodeException32BitValue(values_array, &index));
+ value = graph()->NewNode(
+ mcgraph()->machine()->I32x4ReplaceLane(3), value,
+ BuildDecodeException32BitValue(values_array, &index));
+ break;
+ case wasm::kWasmAnyRef:
+ value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
+ ++index;
+ break;
default:
UNREACHABLE();
}
values[i] = value;
}
- DCHECK_EQ(index, encoded_size);
+ DCHECK_EQ(index, GetExceptionEncodedSize(exception));
return values;
}
@@ -2596,20 +2697,21 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
int func_index) {
- // Load the instance from the imported_instances array at a known offset.
- Node* imported_instances = LOAD_INSTANCE_FIELD(ImportedFunctionInstances,
- MachineType::TaggedPointer());
- Node* instance_node = LOAD_FIXED_ARRAY_SLOT(imported_instances, func_index);
+ // Load the imported function refs array from the instance.
+ Node* imported_function_refs =
+ LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
+ Node* ref_node =
+ LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * kPointerSize), Effect(),
+ mcgraph()->Int32Constant(func_index * kSystemPointerSize), Effect(),
Control()));
args[0] = target_node;
- return BuildWasmCall(sig, args, rets, position, instance_node,
+ return BuildWasmCall(sig, args, rets, position, ref_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
@@ -2617,31 +2719,33 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
Node* func_index) {
- // Load the instance from the imported_instances array.
- Node* imported_instances = LOAD_INSTANCE_FIELD(ImportedFunctionInstances,
- MachineType::TaggedPointer());
- // Access fixed array at {header_size - tag + func_index * kPointerSize}.
+ // Load the imported function refs array from the instance.
+ Node* imported_function_refs =
+ LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
+ // Access fixed array at {header_size - tag + func_index * kTaggedSize}.
Node* imported_instances_data = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), imported_instances,
+ mcgraph()->machine()->IntAdd(), imported_function_refs,
mcgraph()->IntPtrConstant(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
- Node* func_index_times_pointersize = graph()->NewNode(
+ Node* func_index_times_tagged_size = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
- mcgraph()->Int32Constant(kPointerSize));
- Node* instance_node = SetEffect(
+ mcgraph()->Int32Constant(kTaggedSize));
+ Node* ref_node = SetEffect(
graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- imported_instances_data, func_index_times_pointersize,
+ imported_instances_data, func_index_times_tagged_size,
Effect(), Control()));
// Load the target from the imported_targets array at the offset of
// {func_index}.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Node* func_index_times_pointersize = func_index_times_tagged_size;
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
func_index_times_pointersize, Effect(), Control()));
args[0] = target_node;
- return BuildWasmCall(sig, args, rets, position, instance_node,
+ return BuildWasmCall(sig, args, rets, position, ref_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
@@ -2700,12 +2804,12 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
- Node* scaled_key = Uint32ToUintptr(
+ Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
- Node* loaded_sig =
- SetEffect(graph()->NewNode(machine->Load(MachineType::Int32()),
- ift_sig_ids, scaled_key, Effect(), Control()));
+ Node* loaded_sig = SetEffect(
+ graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
+ int32_scaled_key, Effect(), Control()));
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
Int32Constant(expected_sig_id));
@@ -2713,19 +2817,22 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* ift_targets =
LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
- Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableInstances,
+ Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableRefs,
MachineType::TaggedPointer());
- scaled_key = graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2));
+ Node* intptr_scaled_key = graph()->NewNode(
+ machine->Word32Shl(), key, Int32Constant(kSystemPointerSizeLog2));
- Node* target =
- SetEffect(graph()->NewNode(machine->Load(MachineType::Pointer()),
- ift_targets, scaled_key, Effect(), Control()));
+ Node* target = SetEffect(
+ graph()->NewNode(machine->Load(MachineType::Pointer()), ift_targets,
+ intptr_scaled_key, Effect(), Control()));
+
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Node* tagged_scaled_key = intptr_scaled_key;
Node* target_instance = SetEffect(graph()->NewNode(
machine->Load(MachineType::TaggedPointer()),
- graph()->NewNode(machine->IntAdd(), ift_instances, scaled_key),
+ graph()->NewNode(machine->IntAdd(), ift_instances, tagged_scaled_key),
Int32Constant(wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)),
Effect(), Control()));
@@ -2741,7 +2848,7 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
Int32Matcher m(right);
if (m.HasValue()) {
return Binop(wasm::kExprI32Ror, left,
- mcgraph()->Int32Constant(32 - m.Value()));
+ mcgraph()->Int32Constant(32 - (m.Value() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
@@ -2754,7 +2861,7 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
Int64Matcher m(right);
if (m.HasValue()) {
return Binop(wasm::kExprI64Ror, left,
- mcgraph()->Int64Constant(64 - m.Value()));
+ mcgraph()->Int64Constant(64 - (m.Value() & 0x3F)));
} else {
return Binop(wasm::kExprI64Ror, left,
Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right));
@@ -2810,6 +2917,19 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
return value;
}
+Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
+ uint32_t maxval) {
+ DCHECK(Smi::IsValid(maxval));
+ Node* max = Uint32Constant(maxval);
+ Node* check = graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(),
+ value, max);
+ Node* valsmi = BuildChangeUint31ToSmi(value);
+ Node* maxsmi = graph()->NewNode(mcgraph()->common()->NumberConstant(maxval));
+ Diamond d(graph(), mcgraph()->common(), check, BranchHint::kTrue);
+ d.Chain(Control());
+ return d.Phi(MachineRepresentation::kTagged, valsmi, maxsmi);
+}
+
void WasmGraphBuilder::InitInstanceCache(
WasmInstanceCacheNodes* instance_cache) {
DCHECK_NOT_NULL(instance_node_);
@@ -2916,24 +3036,28 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
return tnode;
}
+Node* WasmGraphBuilder::GetImportedMutableGlobals() {
+ if (imported_mutable_globals_ == nullptr) {
+ // Load imported_mutable_globals_ from the instance object at runtime.
+ imported_mutable_globals_ = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::UintPtr()),
+ instance_node_.get(),
+ mcgraph()->Int32Constant(
+ WASM_INSTANCE_OBJECT_OFFSET(ImportedMutableGlobals)),
+ graph()->start(), graph()->start());
+ }
+ return imported_mutable_globals_.get();
+}
+
void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
const wasm::WasmGlobal& global,
Node** base_node,
Node** offset_node) {
DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- if (imported_mutable_globals_ == nullptr) {
- // Load imported_mutable_globals_ from the instance object at runtime.
- imported_mutable_globals_ = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::UintPtr()),
- instance_node_.get(),
- mcgraph()->Int32Constant(
- WASM_INSTANCE_OBJECT_OFFSET(ImportedMutableGlobals)),
- graph()->start(), graph()->start());
- }
*base_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
- imported_mutable_globals_.get(),
+ GetImportedMutableGlobals(),
mcgraph()->Int32Constant(global.index * sizeof(Address)), Effect(),
Control()));
*offset_node = mcgraph()->Int32Constant(0);
@@ -2966,6 +3090,34 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
}
}
+void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+ const wasm::WasmGlobal& global, Node** base, Node** offset) {
+ // Load the base from the ImportedMutableGlobalsBuffer of the instance.
+ Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
+ MachineType::TaggedPointer());
+ *base = LOAD_FIXED_ARRAY_SLOT_ANY(buffers, global.index);
+
+ // For the offset we need the index of the global in the buffer, and then
+ // calculate the actual offset from the index. Load the index from the
+ // ImportedMutableGlobals array of the instance.
+ Node* index = SetEffect(
+ graph()->NewNode(mcgraph()->machine()->Load(MachineType::UintPtr()),
+ GetImportedMutableGlobals(),
+ mcgraph()->Int32Constant(global.index * sizeof(Address)),
+ Effect(), Control()));
+
+ // From the index, calculate the actual offset in the FixeArray. This
+ // is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
+ // wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
+ Node* index_times_tagged_size =
+ graph()->NewNode(mcgraph()->machine()->IntMul(), Uint32ToUintptr(index),
+ mcgraph()->Int32Constant(kTaggedSize));
+ *offset = graph()->NewNode(
+ mcgraph()->machine()->IntAdd(), index_times_tagged_size,
+ mcgraph()->IntPtrConstant(
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
+}
+
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_start = instance_cache_->mem_start;
@@ -2993,18 +3145,16 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
- Node* roots =
- LOAD_INSTANCE_FIELD(RootsArrayAddress, MachineType::TaggedPointer());
- return LOAD_TAGGED_POINTER(
- roots, Heap::roots_to_builtins_offset() + builtin_index * kPointerSize);
+ Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(isolate_root,
+ IsolateData::builtin_slot_offset(builtin_index));
}
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded js_context.
-Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
- Node* js_context,
- Node** parameters,
- int parameter_count) {
+Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
+ Runtime::FunctionId f, Node* js_context, Node** parameters,
+ int parameter_count, Node** effect, Node* control) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
mcgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -3014,9 +3164,10 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
DCHECK_EQ(1, fun->result_size);
Node* centry_stub =
LOAD_INSTANCE_FIELD(CEntryStub, MachineType::TaggedPointer());
- // At the moment we only allow 4 parameters. If more parameters are needed,
+ // TODO(titzer): allow arbitrary number of runtime arguments
+ // At the moment we only allow 5 parameters. If more parameters are needed,
// increase this constant accordingly.
- static const int kMaxParams = 4;
+ static const int kMaxParams = 5;
DCHECK_GE(kMaxParams, parameter_count);
Node* inputs[kMaxParams + 6];
int count = 0;
@@ -3028,37 +3179,72 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
mcgraph()->ExternalConstant(ExternalReference::Create(f)); // ref
inputs[count++] = mcgraph()->Int32Constant(fun->nargs); // arity
inputs[count++] = js_context; // js_context
- inputs[count++] = Effect();
- inputs[count++] = Control();
+ inputs[count++] = *effect;
+ inputs[count++] = control;
- return SetEffect(mcgraph()->graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), count, inputs));
+ Node* call = mcgraph()->graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), count, inputs);
+ *effect = call;
+ return call;
}
Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node** parameters,
int parameter_count) {
return BuildCallToRuntimeWithContext(f, NoContextConstant(), parameters,
- parameter_count);
+ parameter_count, effect_, Control());
}
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
+ const wasm::WasmGlobal& global = env_->module->globals[index];
+ if (global.type == wasm::ValueType::kWasmAnyRef) {
+ if (global.mutability && global.imported) {
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+ return SetEffect(
+ graph()->NewNode(mcgraph()->machine()->Load(MachineType::AnyTagged()),
+ base, offset, Effect(), Control()));
+ }
+ Node* globals_buffer =
+ LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
+ return LOAD_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset);
+ }
+
MachineType mem_type =
wasm::ValueTypes::MachineTypeFor(env_->module->globals[index].type);
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
&offset);
- Node* load = SetEffect(graph()->NewNode(mcgraph()->machine()->Load(mem_type),
- base, offset, Effect(), Control()));
+ Node* result = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(mem_type), base, offset, Effect(), Control()));
#if defined(V8_TARGET_BIG_ENDIAN)
- load = BuildChangeEndiannessLoad(load, mem_type,
- env_->module->globals[index].type);
+ result = BuildChangeEndiannessLoad(result, mem_type,
+ env_->module->globals[index].type);
#endif
- return load;
+ return result;
}
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
+ const wasm::WasmGlobal& global = env_->module->globals[index];
+ if (global.type == wasm::ValueType::kWasmAnyRef) {
+ if (global.mutability && global.imported) {
+ Node* base = nullptr;
+ Node* offset = nullptr;
+ GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+
+ return SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Store(StoreRepresentation(
+ MachineRepresentation::kTagged, kFullWriteBarrier)),
+ base, offset, val, Effect(), Control()));
+ }
+ Node* globals_buffer =
+ LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
+ return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer,
+ env_->module->globals[index].offset, val);
+ }
+
MachineType mem_type =
wasm::ValueTypes::MachineTypeFor(env_->module->globals[index].type);
Node* base = nullptr;
@@ -3078,19 +3264,33 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
- // Atomic operations access the memory, need to be bound checked till
- // TrapHandlers are enabled on atomic operations
+ // Atomic operations need bounds checks until the backend can emit protected
+ // loads.
index =
BoundsCheckMem(access_size, index, offset, position, kNeedsBoundsCheck);
- Node* effective_address =
- graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(offset),
- Uint32ToUintptr(index));
- // Unlike regular memory accesses, unaligned memory accesses for atomic
- // operations should trap
- // Access sizes are in powers of two, calculate mod without using division
- Node* cond =
- graph()->NewNode(mcgraph()->machine()->WordAnd(), effective_address,
- IntPtrConstant(access_size - 1));
+
+ const uintptr_t align_mask = access_size - 1;
+
+ // Don't emit an alignment check if the index is a constant.
+ // TODO(wasm): a constant match is also done above in {BoundsCheckMem}.
+ UintPtrMatcher match(index);
+ if (match.HasValue()) {
+ uintptr_t effective_offset = match.Value() + offset;
+ if ((effective_offset & align_mask) != 0) {
+ // statically known to be unaligned; trap.
+ TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
+ }
+ return index;
+ }
+
+ // Unlike regular memory accesses, atomic memory accesses should trap if
+ // the effective offset is misaligned.
+ // TODO(wasm): this addition is redundant with one inserted by {MemBuffer}.
+ Node* effective_offset = graph()->NewNode(mcgraph()->machine()->IntAdd(),
+ MemBuffer(offset), index);
+
+ Node* cond = graph()->NewNode(mcgraph()->machine()->WordAnd(),
+ effective_offset, IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess,
graph()->NewNode(mcgraph()->machine()->Word32Equal(), cond,
mcgraph()->Int32Constant(0)),
@@ -3098,6 +3298,9 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
return index;
}
+// Insert code to bounds check a memory access if necessary. Return the
+// bounds-checked index, which is guaranteed to have (the equivalent of)
+// {uintptr_t} representation.
Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
@@ -3110,9 +3313,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
- const bool statically_oob = access_size > env_->max_memory_size ||
- offset > env_->max_memory_size - access_size;
- if (statically_oob) {
+ if (!IsInBounds(offset, access_size, env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
@@ -3167,6 +3368,39 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
+// Check that the range [start, start + size) is in the range [0, max).
+void WasmGraphBuilder::BoundsCheckRange(Node* start, Node* size, Node* max,
+ wasm::WasmCodePosition position) {
+ // The accessed memory is [start, end), where {end} is {start + size}. We
+ // want to check that {start + size <= max}, making sure that {start + size}
+ // doesn't overflow. This can be expressed as {start <= max - size} as long
+ // as {max - size} isn't negative, which is true if {size <= max}.
+ auto m = mcgraph()->machine();
+ Node* cond = graph()->NewNode(m->Uint32LessThanOrEqual(), size, max);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+
+ // This produces a positive number, since {size <= max}.
+ Node* effective_size = graph()->NewNode(m->Int32Sub(), max, size);
+
+ // Introduce the actual bounds check.
+ Node* check =
+ graph()->NewNode(m->Uint32LessThanOrEqual(), start, effective_size);
+ TrapIfFalse(wasm::kTrapMemOutOfBounds, check, position);
+
+ // TODO(binji): Does this need addtional untrusted_code_mitigations_ mask
+ // like BoundsCheckMem above?
+}
+
+Node* WasmGraphBuilder::BoundsCheckMemRange(Node* start, Node* size,
+ wasm::WasmCodePosition position) {
+ // TODO(binji): Support trap handler.
+ if (!FLAG_wasm_no_bounds_checks) {
+ BoundsCheckRange(start, size, instance_cache_->mem_size, position);
+ }
+ return graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(0),
+ Uint32ToUintptr(start));
+}
+
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
wasm::ValueType type) {
int alignment = offset % (wasm::ValueTypes::ElementSizeInBytes(type));
@@ -3773,8 +4007,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128Not:
return graph()->NewNode(mcgraph()->machine()->S128Not(), inputs[0]);
case wasm::kExprS128Select:
- return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[0],
- inputs[1], inputs[2]);
+ return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2],
+ inputs[0], inputs[1]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
@@ -3993,6 +4227,89 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
+ case wasm::kExprAtomicWake: {
+ Node* index = CheckBoundsAndAlignment(
+ wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
+ position);
+ // Now that we've bounds-checked, compute the effective address.
+ Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
+ Uint32Constant(offset), index);
+ WasmAtomicWakeDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmAtomicWake, RelocInfo::WASM_STUB_CALL);
+ node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, address, inputs[1], Effect(),
+ Control());
+ break;
+ }
+
+ case wasm::kExprI32AtomicWait: {
+ Node* index = CheckBoundsAndAlignment(
+ wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
+ position);
+ // Now that we've bounds-checked, compute the effective address.
+ Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
+ Uint32Constant(offset), index);
+ Node* timeout;
+ if (mcgraph()->machine()->Is32()) {
+ timeout = BuildF64SConvertI64(inputs[2]);
+ } else {
+ timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
+ inputs[2]);
+ }
+ WasmI32AtomicWaitDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmI32AtomicWait, RelocInfo::WASM_STUB_CALL);
+ node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, address, inputs[1], timeout,
+ Effect(), Control());
+ break;
+ }
+
+ case wasm::kExprI64AtomicWait: {
+ Node* index = CheckBoundsAndAlignment(
+ wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
+ position);
+ // Now that we've bounds-checked, compute the effective address.
+ Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
+ Uint32Constant(offset), index);
+ Node* timeout;
+ if (mcgraph()->machine()->Is32()) {
+ timeout = BuildF64SConvertI64(inputs[2]);
+ } else {
+ timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
+ inputs[2]);
+ }
+ Node* expected_value_low = graph()->NewNode(
+ mcgraph()->machine()->TruncateInt64ToInt32(), inputs[1]);
+ Node* tmp = graph()->NewNode(mcgraph()->machine()->Word64Shr(), inputs[1],
+ Int64Constant(32));
+ Node* expected_value_high =
+ graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), tmp);
+ WasmI64AtomicWaitDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmI64AtomicWait, RelocInfo::WASM_STUB_CALL);
+ node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, address, expected_value_high,
+ expected_value_low, timeout, Effect(), Control());
+ break;
+ }
+
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
@@ -4004,6 +4321,160 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_STORE_LIST
+Node* WasmGraphBuilder::CheckDataSegmentIsPassiveAndNotDropped(
+ uint32_t data_segment_index, wasm::WasmCodePosition position) {
+ // The data segment index must be in bounds since it is required by
+ // validation.
+ DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
+
+ Node* dropped_data_segments =
+ LOAD_INSTANCE_FIELD(DroppedDataSegments, MachineType::Pointer());
+ Node* is_segment_dropped = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Uint8()), dropped_data_segments,
+ mcgraph()->IntPtrConstant(data_segment_index), Effect(), Control()));
+ TrapIfTrue(wasm::kTrapDataSegmentDropped, is_segment_dropped, position);
+ return dropped_data_segments;
+}
+
+Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
+ Node* src, Node* size,
+ wasm::WasmCodePosition position) {
+ CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
+ dst = BoundsCheckMemRange(dst, size, position);
+ MachineOperatorBuilder* m = mcgraph()->machine();
+
+ Node* seg_index = Uint32Constant(data_segment_index);
+
+ {
+ // Load segment size from WasmInstanceObject::data_segment_sizes.
+ Node* seg_size_array =
+ LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
+ STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
+ Node* scaled_index = Uint32ToUintptr(
+ graph()->NewNode(m->Word32Shl(), seg_index, Int32Constant(2)));
+ Node* seg_size = SetEffect(graph()->NewNode(m->Load(MachineType::Uint32()),
+ seg_size_array, scaled_index,
+ Effect(), Control()));
+
+ // Bounds check the src index against the segment size.
+ BoundsCheckRange(src, size, seg_size, position);
+ }
+
+ {
+ // Load segment's base pointer from WasmInstanceObject::data_segment_starts.
+ Node* seg_start_array =
+ LOAD_INSTANCE_FIELD(DataSegmentStarts, MachineType::Pointer());
+ STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >>
+ kPointerSizeLog2);
+ Node* scaled_index = Uint32ToUintptr(graph()->NewNode(
+ m->Word32Shl(), seg_index, Int32Constant(kPointerSizeLog2)));
+ Node* seg_start = SetEffect(
+ graph()->NewNode(m->Load(MachineType::Pointer()), seg_start_array,
+ scaled_index, Effect(), Control()));
+
+ // Convert src index to pointer.
+ src = graph()->NewNode(m->IntAdd(), seg_start, Uint32ToUintptr(src));
+ }
+
+ Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_memory_copy()));
+ MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Uint32()};
+ MachineSignature sig(0, 3, sig_types);
+ return BuildCCall(&sig, function, dst, src, size);
+}
+
+Node* WasmGraphBuilder::MemoryDrop(uint32_t data_segment_index,
+ wasm::WasmCodePosition position) {
+ Node* dropped_data_segments =
+ CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
+ const Operator* store_op = mcgraph()->machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
+ return SetEffect(
+ graph()->NewNode(store_op, dropped_data_segments,
+ mcgraph()->IntPtrConstant(data_segment_index),
+ mcgraph()->Int32Constant(1), Effect(), Control()));
+}
+
+Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position) {
+ dst = BoundsCheckMemRange(dst, size, position);
+ src = BoundsCheckMemRange(src, size, position);
+ Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_memory_copy()));
+ MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Uint32()};
+ MachineSignature sig(0, 3, sig_types);
+ return BuildCCall(&sig, function, dst, src, size);
+}
+
+Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
+ wasm::WasmCodePosition position) {
+ dst = BoundsCheckMemRange(dst, size, position);
+ Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_memory_fill()));
+ MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(),
+ MachineType::Uint32()};
+ MachineSignature sig(0, 3, sig_types);
+ return BuildCCall(&sig, function, dst, value, size);
+}
+
+Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
+ uint32_t elem_segment_index, wasm::WasmCodePosition position) {
+ // The elem segment index must be in bounds since it is required by
+ // validation.
+ DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
+
+ Node* dropped_elem_segments =
+ LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
+ Node* is_segment_dropped = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Uint8()), dropped_elem_segments,
+ mcgraph()->IntPtrConstant(elem_segment_index), Effect(), Control()));
+ TrapIfTrue(wasm::kTrapElemSegmentDropped, is_segment_dropped, position);
+ return dropped_elem_segments;
+}
+
+Node* WasmGraphBuilder::TableInit(uint32_t table_index,
+ uint32_t elem_segment_index, Node* dst,
+ Node* src, Node* size,
+ wasm::WasmCodePosition position) {
+ Node* args[] = {
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(elem_segment_index)),
+ BuildConvertUint32ToSmiWithSaturation(dst, wasm::kV8MaxWasmTableSize),
+ BuildConvertUint32ToSmiWithSaturation(src, wasm::kV8MaxWasmTableSize),
+ BuildConvertUint32ToSmiWithSaturation(size, wasm::kV8MaxWasmTableSize)};
+ Node* result =
+ BuildCallToRuntime(Runtime::kWasmTableInit, args, arraysize(args));
+
+ return result;
+}
+
+Node* WasmGraphBuilder::TableDrop(uint32_t elem_segment_index,
+ wasm::WasmCodePosition position) {
+ Node* dropped_elem_segments =
+ CheckElemSegmentIsPassiveAndNotDropped(elem_segment_index, position);
+ const Operator* store_op = mcgraph()->machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
+ return SetEffect(
+ graph()->NewNode(store_op, dropped_elem_segments,
+ mcgraph()->IntPtrConstant(elem_segment_index),
+ mcgraph()->Int32Constant(1), Effect(), Control()));
+}
+
+Node* WasmGraphBuilder::TableCopy(uint32_t table_index, Node* dst, Node* src,
+ Node* size, wasm::WasmCodePosition position) {
+ Node* args[] = {
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
+ BuildConvertUint32ToSmiWithSaturation(dst, wasm::kV8MaxWasmTableSize),
+ BuildConvertUint32ToSmiWithSaturation(src, wasm::kV8MaxWasmTableSize),
+ BuildConvertUint32ToSmiWithSaturation(size, wasm::kV8MaxWasmTableSize)};
+ Node* result =
+ BuildCallToRuntime(Runtime::kWasmTableCopy, args, arraysize(args));
+
+ return result;
+}
+
class WasmDecorator final : public GraphDecorator {
public:
explicit WasmDecorator(NodeOriginTable* origins, wasm::Decoder* decoder)
@@ -4058,14 +4529,14 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
- WasmWrapperGraphBuilder(Zone* zone, wasm::ModuleEnv* env, JSGraph* jsgraph,
- wasm::FunctionSig* sig,
+ WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt,
- StubCallMode stub_mode)
- : WasmGraphBuilder(env, zone, jsgraph, sig, spt),
+ StubCallMode stub_mode, wasm::WasmFeatures features)
+ : WasmGraphBuilder(nullptr, zone, jsgraph, sig, spt),
isolate_(jsgraph->isolate()),
jsgraph_(jsgraph),
- stub_mode_(stub_mode) {}
+ stub_mode_(stub_mode),
+ enabled_features_(features) {}
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
MachineOperatorBuilder* machine = mcgraph()->machine();
@@ -4317,8 +4788,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kWasmI32:
return BuildChangeInt32ToTagged(node);
case wasm::kWasmS128:
- case wasm::kWasmI64:
UNREACHABLE();
+ case wasm::kWasmI64: {
+ DCHECK(enabled_features_.bigint);
+ return BuildChangeInt64ToBigInt(node);
+ }
case wasm::kWasmF32:
node = graph()->NewNode(mcgraph()->machine()->ChangeFloat32ToFloat64(),
node);
@@ -4326,12 +4800,57 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmAnyRef:
+ case wasm::kWasmAnyFunc:
return node;
default:
UNREACHABLE();
}
}
+ Node* BuildChangeInt64ToBigInt(Node* input) {
+ BigIntToWasmI64Descriptor interface_descriptor;
+
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode_); // stub call mode
+
+ Node* target =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kBigIntToWasmI64, RelocInfo::WASM_STUB_CALL)
+ : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt));
+
+ return SetEffect(
+ SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ target, input, Effect(), Control())));
+ }
+
+ Node* BuildChangeBigIntToInt64(Node* input, Node* context) {
+ BigIntToI64Descriptor interface_descriptor;
+
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode_); // stub call mode
+
+ Node* target =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
+ : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64));
+
+ return SetEffect(SetControl(
+ graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target,
+ input, context, Effect(), Control())));
+ }
+
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
@@ -4340,11 +4859,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return node;
}
- // Do a JavaScript ToNumber.
- Node* num = BuildJavaScriptToNumber(node, js_context);
+ if (type == wasm::kWasmAnyFunc) {
+ Node* check =
+ BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
+ Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_,
+ Control())));
+
+ Diamond type_check(graph(), mcgraph()->common(), check,
+ BranchHint::kTrue);
+ type_check.Chain(Control());
+
+ Node* effect = Effect();
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ nullptr, 0, &effect, type_check.if_false);
+
+ SetEffect(type_check.EffectPhi(Effect(), effect));
+
+ SetControl(type_check.merge);
+
+ return node;
+ }
+ Node* num = nullptr;
- // Change representation.
- num = BuildChangeTaggedToFloat64(num);
+ if (type != wasm::kWasmI64) {
+ // Do a JavaScript ToNumber.
+ num = BuildJavaScriptToNumber(node, js_context);
+
+ // Change representation.
+ num = BuildChangeTaggedToFloat64(num);
+ }
switch (type) {
case wasm::kWasmI32: {
@@ -4352,31 +4895,62 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
num);
break;
}
- case wasm::kWasmS128:
- case wasm::kWasmI64:
- UNREACHABLE();
+ case wasm::kWasmI64: {
+ DCHECK(enabled_features_.bigint);
+ num = BuildChangeBigIntToInt64(node, js_context);
+ break;
+ }
case wasm::kWasmF32:
num = graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToFloat32(),
num);
break;
case wasm::kWasmF64:
break;
+ case wasm::kWasmS128:
+ UNREACHABLE();
default:
UNREACHABLE();
}
+ DCHECK_NOT_NULL(num);
+
return num;
}
void BuildModifyThreadInWasmFlag(bool new_value) {
if (!trap_handler::IsTrapHandlerEnabled()) return;
- Node* thread_in_wasm_flag_address_address =
- graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_thread_in_wasm_flag_address_address(
- isolate_)));
- Node* thread_in_wasm_flag_address = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(LoadRepresentation(MachineType::Pointer())),
- thread_in_wasm_flag_address_address, mcgraph()->Int32Constant(0),
- Effect(), Control()));
+ Node* isolate_root =
+ LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+
+ Node* thread_in_wasm_flag_address =
+ LOAD_RAW(isolate_root, Isolate::thread_in_wasm_flag_address_offset(),
+ MachineType::Pointer());
+
+ if (FLAG_debug_code) {
+ Node* flag_value = SetEffect(
+ graph()->NewNode(mcgraph()->machine()->Load(MachineType::Pointer()),
+ thread_in_wasm_flag_address,
+ mcgraph()->Int32Constant(0), Effect(), Control()));
+ Node* check =
+ graph()->NewNode(mcgraph()->machine()->Word32Equal(), flag_value,
+ mcgraph()->Int32Constant(new_value ? 0 : 1));
+
+ Diamond flag_check(graph(), mcgraph()->common(), check,
+ BranchHint::kTrue);
+ flag_check.Chain(Control());
+ Node* message_id = jsgraph()->SmiConstant(static_cast<int32_t>(
+ new_value ? AbortReason::kUnexpectedThreadInWasmSet
+ : AbortReason::kUnexpectedThreadInWasmUnset));
+
+ Node* effect = Effect();
+ BuildCallToRuntimeWithContext(Runtime::kAbort, NoContextConstant(),
+ &message_id, 1, &effect,
+ flag_check.if_false);
+
+ SetEffect(flag_check.EffectPhi(Effect(), effect));
+
+ SetControl(flag_check.merge);
+ }
+
SetEffect(graph()->NewNode(
mcgraph()->machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, kNoWriteBarrier)),
@@ -4449,12 +5023,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
instance_node_.set(
BuildLoadInstanceFromExportedFunctionData(function_data));
- if (!wasm::IsJSCompatibleSignature(sig_)) {
+ if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_.bigint)) {
// Throw a TypeError. Use the js_context of the calling javascript
// function (passed as a parameter), such that the generated code is
// js_context independent.
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
+ nullptr, 0, effect_, Control());
Return(jsgraph()->SmiConstant(0));
return;
}
@@ -4502,13 +5076,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Return(jsval);
}
- bool BuildWasmImportCallWrapper(WasmImportCallKind kind, int func_index) {
+ bool BuildWasmImportCallWrapper(WasmImportCallKind kind) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- SetEffect(SetControl(Start(wasm_count + 3)));
+ SetEffect(SetControl(Start(wasm_count + 4)));
- // Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
Node* native_context =
@@ -4519,23 +5092,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// === Runtime TypeError =================================================
// =======================================================================
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
- native_context, nullptr, 0);
+ native_context, nullptr, 0, effect_,
+ Control());
// We don't need to return a value here, as the runtime call will not
// return anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
return false;
}
- Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables,
- MachineType::TaggedPointer());
- Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, func_index);
+ // The callable is passed as the last parameter, after WASM arguments.
+ Node* callable_node = Param(wasm_count + 1);
+
Node* undefined_node =
LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
Node* call = nullptr;
bool sloppy_receiver = true;
- BuildModifyThreadInWasmFlag(false); // exiting WASM via call.
+ // Clear the ThreadInWasm flag.
+ BuildModifyThreadInWasmFlag(false);
switch (kind) {
// =======================================================================
@@ -4556,7 +5131,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = callable_node; // target callable.
// Receiver.
if (sloppy_receiver) {
- Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy;
} else {
@@ -4619,16 +5194,23 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Receiver.
if (sloppy_receiver) {
- Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy;
} else {
args[pos++] = undefined_node;
}
+#ifdef V8_TARGET_ARCH_IA32
+ // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative
+ // call instead once builtins are embedded in every build configuration.
+ CallDescriptor::Flags flags = CallDescriptor::kAllowCallThroughSlot;
+#else
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+#endif
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties);
+ flags, Operator::kNoProperties);
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(args, pos, wasm_count, sig_);
@@ -4685,7 +5267,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
? mcgraph()->Int32Constant(0)
: FromJS(call, native_context, sig_->GetReturn());
- BuildModifyThreadInWasmFlag(true); // reentering WASM upon return.
+ // Set the ThreadInWasm flag again.
+ BuildModifyThreadInWasmFlag(true);
Return(val);
return true;
@@ -4765,16 +5348,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1);
- Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1);
+ Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count = wasm_arg_count + 4; // code, instance_node, control, effect
+ int arg_count =
+ wasm_arg_count + 4; // code, object_ref_node, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_entry;
- args[pos++] = instance_node;
+ args[pos++] = object_ref_node;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -4828,6 +5412,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
JSGraph* jsgraph_;
StubCallMode stub_mode_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
+ wasm::WasmFeatures enabled_features_;
};
void AppendSignature(char* buffer, size_t max_name_len,
@@ -4868,10 +5453,9 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
Node* control = nullptr;
Node* effect = nullptr;
- wasm::ModuleEnv env(nullptr, wasm::kNoTrapHandler,
- wasm::kRuntimeExceptionSupport);
- WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, sig, nullptr,
- StubCallMode::kCallOnHeapBuiltin);
+ WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ StubCallMode::kCallCodeObject,
+ wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildJSToWasmWrapper(is_import);
@@ -4888,7 +5472,7 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name,
WasmAssemblerOptions());
Handle<Code> code;
@@ -4912,7 +5496,8 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
}
WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
- wasm::FunctionSig* expected_sig) {
+ wasm::FunctionSig* expected_sig,
+ bool has_bigint_feature) {
if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
auto imported_function = WasmExportedFunction::cast(*target);
wasm::FunctionSig* imported_sig =
@@ -4926,20 +5511,72 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
return WasmImportCallKind::kWasmToWasm;
}
// Assuming we are calling to JS, check whether this would be a runtime error.
- if (!wasm::IsJSCompatibleSignature(expected_sig)) {
+ if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
return WasmImportCallKind::kRuntimeTypeError;
}
// For JavaScript calls, determine whether the target has an arity match
// and whether it has a sloppy receiver.
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
- if (IsClassConstructor(function->shared()->kind())) {
+ SharedFunctionInfo shared = function->shared();
+
+// Check for math intrinsics.
+#define COMPARE_SIG_FOR_BUILTIN(name) \
+ { \
+ wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
+ if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
+ DCHECK_NOT_NULL(sig); \
+ if (*expected_sig == *sig) return WasmImportCallKind::k##name; \
+ }
+#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
+ case Builtins::kMath##name: \
+ COMPARE_SIG_FOR_BUILTIN(F64##name); \
+ break;
+#define COMPARE_SIG_FOR_BUILTIN_F32_F64(name) \
+ case Builtins::kMath##name: \
+ COMPARE_SIG_FOR_BUILTIN(F64##name); \
+ COMPARE_SIG_FOR_BUILTIN(F32##name); \
+ break;
+
+ if (FLAG_wasm_math_intrinsics && shared->HasBuiltinId()) {
+ switch (shared->builtin_id()) {
+ COMPARE_SIG_FOR_BUILTIN_F64(Acos);
+ COMPARE_SIG_FOR_BUILTIN_F64(Asin);
+ COMPARE_SIG_FOR_BUILTIN_F64(Atan);
+ COMPARE_SIG_FOR_BUILTIN_F64(Cos);
+ COMPARE_SIG_FOR_BUILTIN_F64(Sin);
+ COMPARE_SIG_FOR_BUILTIN_F64(Tan);
+ COMPARE_SIG_FOR_BUILTIN_F64(Exp);
+ COMPARE_SIG_FOR_BUILTIN_F64(Log);
+ COMPARE_SIG_FOR_BUILTIN_F64(Atan2);
+ //===========================================================
+ // TODO(8505): Math.pow for wasm does not match JS.
+ // COMPARE_SIG_FOR_BUILTIN_F64(Pow);
+ //===========================================================
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Min);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Max);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Abs);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Ceil);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Floor);
+ COMPARE_SIG_FOR_BUILTIN_F32_F64(Sqrt);
+ case Builtins::kMathFround:
+ COMPARE_SIG_FOR_BUILTIN(F32ConvertF64);
+ break;
+ default:
+ break;
+ }
+ }
+
+#undef COMPARE_SIG_FOR_BUILTIN
+#undef COMPARE_SIG_FOR_BUILTIN_F64
+#undef COMPARE_SIG_FOR_BUILTIN_F32_F64
+
+ if (IsClassConstructor(shared->kind())) {
// Class constructor will throw anyway.
return WasmImportCallKind::kUseCallBuiltin;
}
- bool sloppy = is_sloppy(function->shared()->language_mode()) &&
- !function->shared()->native();
- if (function->shared()->internal_formal_parameter_count() ==
+ bool sloppy = is_sloppy(shared->language_mode()) && !shared->native();
+ if (shared->internal_formal_parameter_count() ==
expected_sig->parameter_count()) {
return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
: WasmImportCallKind::kJSFunctionArityMatch;
@@ -4951,95 +5588,196 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
return WasmImportCallKind::kUseCallBuiltin;
}
-MaybeHandle<Code> CompileWasmImportCallWrapper(
- Isolate* isolate, WasmImportCallKind kind, wasm::FunctionSig* sig,
- uint32_t index, wasm::ModuleOrigin origin,
- wasm::UseTrapHandler use_trap_handler) {
+wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
+ const char** name_ptr) {
+#define CASE(name) \
+ case WasmImportCallKind::k##name: \
+ *name_ptr = "WasmMathIntrinsic:" #name; \
+ return wasm::kExpr##name
+ switch (kind) {
+ CASE(F64Acos);
+ CASE(F64Asin);
+ CASE(F64Atan);
+ CASE(F64Cos);
+ CASE(F64Sin);
+ CASE(F64Tan);
+ CASE(F64Exp);
+ CASE(F64Log);
+ CASE(F64Atan2);
+ CASE(F64Pow);
+ CASE(F64Ceil);
+ CASE(F64Floor);
+ CASE(F64Sqrt);
+ CASE(F64Min);
+ CASE(F64Max);
+ CASE(F64Abs);
+ CASE(F32Min);
+ CASE(F32Max);
+ CASE(F32Abs);
+ CASE(F32Ceil);
+ CASE(F32Floor);
+ CASE(F32Sqrt);
+ CASE(F32ConvertF64);
+ default:
+ UNREACHABLE();
+ return wasm::kExprUnreachable;
+ }
+#undef CASE
+}
+
+wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
+ wasm::NativeModule* native_module,
+ WasmImportCallKind kind,
+ wasm::FunctionSig* sig) {
+ DCHECK_EQ(1, sig->return_count());
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "CompileWasmMathIntrinsic");
+
+ Zone zone(wasm_engine->allocator(), ZONE_NAME);
+
+ // Compile a WASM function with a single bytecode and let TurboFan
+ // generate either inlined machine code or a call to a helper.
+ SourcePositionTable* source_positions = nullptr;
+ MachineGraph* mcgraph = new (&zone) MachineGraph(
+ new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
+ new (&zone) MachineOperatorBuilder(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
+
+ wasm::CompilationEnv env(
+ native_module->module(), wasm::UseTrapHandler::kNoTrapHandler,
+ wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
+ wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd);
+
+ WasmGraphBuilder builder(&env, mcgraph->zone(), mcgraph, sig,
+ source_positions);
+
+ // Set up the graph start.
+ Node* start = builder.Start(static_cast<int>(sig->parameter_count() + 1 + 1));
+ Node* effect = start;
+ Node* control = start;
+ builder.set_effect_ptr(&effect);
+ builder.set_control_ptr(&control);
+ builder.set_instance_node(builder.Param(wasm::kWasmInstanceParameterIndex));
+
+ // Generate either a unop or a binop.
+ Node* result = nullptr;
+ const char* debug_name = "WasmMathIntrinsic";
+ auto opcode = GetMathIntrinsicOpcode(kind, &debug_name);
+ switch (sig->parameter_count()) {
+ case 1:
+ result = builder.Unop(opcode, builder.Param(1));
+ break;
+ case 2:
+ result = builder.Binop(opcode, builder.Param(1), builder.Param(2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ builder.Return(result);
+
+ // Run the compiler pipeline to generate machine code.
+ auto call_descriptor = GetWasmCallDescriptor(&zone, sig);
+ if (mcgraph->machine()->Is32()) {
+ call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
+ }
+
+ wasm::WasmCode* wasm_code = Pipeline::GenerateCodeForWasmNativeStub(
+ wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION,
+ wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
+ native_module, source_positions);
+ CHECK_NOT_NULL(wasm_code);
+ // TODO(titzer): add counters for math intrinsic code size / allocation
+
+ return wasm_code;
+}
+
+wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
+ wasm::NativeModule* native_module,
+ WasmImportCallKind kind,
+ wasm::FunctionSig* sig,
+ bool source_positions) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
+ // Check for math intrinsics first.
+ if (FLAG_wasm_math_intrinsics &&
+ kind >= WasmImportCallKind::kFirstMathIntrinsic &&
+ kind <= WasmImportCallKind::kLastMathIntrinsic) {
+ return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig);
+ }
+
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"CompileWasmImportCallWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
+ Zone zone(wasm_engine->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
MachineOperatorBuilder machine(
&zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
SourcePositionTable* source_position_table =
- origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
- : nullptr;
+ source_positions ? new (&zone) SourcePositionTable(&graph) : nullptr;
- wasm::ModuleEnv env(nullptr, use_trap_handler,
- wasm::kRuntimeExceptionSupport);
-
- WasmWrapperGraphBuilder builder(&zone, &env, &jsgraph, sig,
- source_position_table,
- StubCallMode::kCallWasmRuntimeStub);
+ WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table,
+ StubCallMode::kCallWasmRuntimeStub,
+ native_module->enabled_features());
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildWasmImportCallWrapper(kind, index);
+ builder.BuildWasmImportCallWrapper(kind);
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", index));
+ const char* func_name = "wasm-to-js";
// Schedule and compile to machine code.
- CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
+ CallDescriptor* incoming =
+ GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
+ WasmGraphBuilder::kExtraCallableParam);
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
- isolate, incoming, &graph, Code::WASM_TO_JS_FUNCTION, func_name.start(),
- AssemblerOptions::Default(isolate), source_position_table);
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(func_name.start(), os);
- }
-#endif
-
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "%.*s", func_name.length(), func_name.start());
- }
+ wasm::WasmCode* wasm_code = Pipeline::GenerateCodeForWasmNativeStub(
+ wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION,
+ wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
+ native_module, source_position_table);
+ CHECK_NOT_NULL(wasm_code);
- return code;
+ return wasm_code;
}
-MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate* isolate,
- uint32_t func_index,
- wasm::FunctionSig* sig) {
+wasm::WasmCode* CompileWasmInterpreterEntry(wasm::WasmEngine* wasm_engine,
+ wasm::NativeModule* native_module,
+ uint32_t func_index,
+ wasm::FunctionSig* sig) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
+ Zone zone(wasm_engine->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
MachineOperatorBuilder machine(
&zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(nullptr, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr,
- StubCallMode::kCallWasmRuntimeStub);
+ WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ StubCallMode::kCallWasmRuntimeStub,
+ native_module->enabled_features());
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmInterpreterEntry(func_index);
@@ -5054,27 +5792,13 @@ MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate* isolate,
func_name.Truncate(
SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
- isolate, incoming, &graph, Code::WASM_INTERPRETER_ENTRY,
- func_name.start(), AssemblerOptions::Default(isolate));
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(func_name.start(), os);
- }
-#endif
+ wasm::WasmCode* wasm_code = Pipeline::GenerateCodeForWasmNativeStub(
+ wasm_engine, incoming, &jsgraph, Code::WASM_INTERPRETER_ENTRY,
+ wasm::WasmCode::kInterpreterEntry, func_name.start(),
+ WasmStubAssemblerOptions(), native_module);
+ CHECK_NOT_NULL(wasm_code);
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "%.*s", func_name.length(), func_name.start());
- }
-
- return maybe_code;
+ return wasm_code;
}
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
@@ -5090,8 +5814,9 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, nullptr, &jsgraph, sig, nullptr,
- StubCallMode::kCallOnHeapBuiltin);
+ WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ StubCallMode::kCallCodeObject,
+ wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildCWasmEntry();
@@ -5106,7 +5831,7 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
char debug_name[kMaxNameLen] = "c-wasm-entry:";
AppendSignature(debug_name, kMaxNameLen, sig);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmStub(
+ MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name,
AssemblerOptions::Default(isolate));
Handle<Code> code;
@@ -5131,51 +5856,50 @@ TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
// Clears unique_ptrs, but (part of) the type is forward declared in the header.
TurbofanWasmCompilationUnit::~TurbofanWasmCompilationUnit() = default;
-SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
+bool TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
+ wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
wasm::WasmFeatures* detected, double* decode_ms, MachineGraph* mcgraph,
- NodeOriginTable* node_origins) {
+ NodeOriginTable* node_origins, SourcePositionTable* source_positions,
+ wasm::WasmError* error_out) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
}
// Create a TF graph during decoding.
- SourcePositionTable* source_position_table =
- new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
- WasmGraphBuilder builder(wasm_unit_->env_, mcgraph->zone(), mcgraph,
- wasm_unit_->func_body_.sig, source_position_table);
- graph_construction_result_ = wasm::BuildTFGraph(
- wasm_unit_->wasm_engine_->allocator(),
- wasm_unit_->native_module_->enabled_features(), wasm_unit_->env_->module,
- &builder, detected, wasm_unit_->func_body_, node_origins);
- if (graph_construction_result_.failed()) {
+ WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
+ source_positions);
+ wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
+ wasm_unit_->wasm_engine_->allocator(), env->enabled_features, env->module,
+ &builder, detected, func_body, node_origins);
+ if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
- << graph_construction_result_.error_msg() << std::endl;
+ << graph_construction_result.error().message()
+ << std::endl;
}
- return nullptr;
+ *error_out = graph_construction_result.error();
+ return false;
}
builder.LowerInt64();
if (builder.has_simd() &&
- (!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->env_->lower_simd)) {
- SimdScalarLowering(
- mcgraph,
- CreateMachineSignature(mcgraph->zone(), wasm_unit_->func_body_.sig))
+ (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
+ SimdScalarLowering(mcgraph,
+ CreateMachineSignature(mcgraph->zone(), func_body.sig))
.LowerGraph();
}
if (wasm_unit_->func_index_ >= FLAG_trace_wasm_ast_start &&
wasm_unit_->func_index_ < FLAG_trace_wasm_ast_end) {
- PrintRawWasmCode(wasm_unit_->wasm_engine_->allocator(),
- wasm_unit_->func_body_, wasm_unit_->env_->module,
- wasm::kPrintLocals);
+ PrintRawWasmCode(wasm_unit_->wasm_engine_->allocator(), func_body,
+ env->module, wasm::kPrintLocals);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
}
- return source_position_table;
+ return true;
}
namespace {
@@ -5191,124 +5915,81 @@ Vector<const char> GetDebugName(Zone* zone, int index) {
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
}
-
} // namespace
-void TurbofanWasmCompilationUnit::ExecuteCompilation(
- wasm::WasmFeatures* detected) {
+wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
+ wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
+ Counters* counters, wasm::WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteTurbofanCompilation");
double decode_ms = 0;
size_t node_count = 0;
- // Scope for the {graph_zone}.
- {
- Zone graph_zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
- MachineGraph* mcgraph = new (&graph_zone)
- MachineGraph(new (&graph_zone) Graph(&graph_zone),
- new (&graph_zone) CommonOperatorBuilder(&graph_zone),
- new (&graph_zone) MachineOperatorBuilder(
- &graph_zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()));
-
- Zone compilation_zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
-
- OptimizedCompilationInfo info(
- GetDebugName(&compilation_zone, wasm_unit_->func_index_),
- &compilation_zone, Code::WASM_FUNCTION);
- if (wasm_unit_->env_->runtime_exception_support) {
- info.SetWasmRuntimeExceptionSupport();
- }
+ Zone zone(wasm_unit_->wasm_engine_->allocator(), ZONE_NAME);
+ MachineGraph* mcgraph = new (&zone) MachineGraph(
+ new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
+ new (&zone) MachineOperatorBuilder(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
- if (info.trace_turbo_json_enabled()) {
- TurboCfgFile tcf;
- tcf << AsC1VCompilation(&info);
- }
+ OptimizedCompilationInfo info(GetDebugName(&zone, wasm_unit_->func_index_),
+ &zone, Code::WASM_FUNCTION);
+ if (env->runtime_exception_support) {
+ info.SetWasmRuntimeExceptionSupport();
+ }
- NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
- ? new (&graph_zone)
- NodeOriginTable(mcgraph->graph())
- : nullptr;
- SourcePositionTable* source_positions =
- BuildGraphForWasmFunction(detected, &decode_ms, mcgraph, node_origins);
+ if (info.trace_turbo_json_enabled()) {
+ TurboCfgFile tcf;
+ tcf << AsC1VCompilation(&info);
+ }
- if (graph_construction_result_.failed()) {
- ok_ = false;
- return;
- }
+ NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
+ ? new (&zone)
+ NodeOriginTable(mcgraph->graph())
+ : nullptr;
+ SourcePositionTable* source_positions =
+ new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
+ wasm::WasmError error;
+ if (!BuildGraphForWasmFunction(env, func_body, detected, &decode_ms, mcgraph,
+ node_origins, source_positions, &error)) {
+ DCHECK(!error.empty());
+ return wasm::WasmCompilationResult{std::move(error)};
+ }
- if (node_origins) {
- node_origins->AddDecorator();
- }
+ if (node_origins) {
+ node_origins->AddDecorator();
+ }
- base::ElapsedTimer pipeline_timer;
- if (FLAG_trace_wasm_decode_time) {
- node_count = mcgraph->graph()->NodeCount();
- pipeline_timer.Start();
- }
+ base::ElapsedTimer pipeline_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ node_count = mcgraph->graph()->NodeCount();
+ pipeline_timer.Start();
+ }
- // Run the compiler pipeline to generate machine code.
- auto call_descriptor =
- GetWasmCallDescriptor(&compilation_zone, wasm_unit_->func_body_.sig);
- if (mcgraph->machine()->Is32()) {
- call_descriptor =
- GetI32WasmCallDescriptor(&compilation_zone, call_descriptor);
- }
+ // Run the compiler pipeline to generate machine code.
+ auto call_descriptor = GetWasmCallDescriptor(&zone, func_body.sig);
+ if (mcgraph->machine()->Is32()) {
+ call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
+ }
- std::unique_ptr<OptimizedCompilationJob> job(
- Pipeline::NewWasmCompilationJob(
- &info, wasm_unit_->wasm_engine_, mcgraph, call_descriptor,
- source_positions, node_origins, wasm_unit_->func_body_,
- const_cast<wasm::WasmModule*>(wasm_unit_->env_->module),
- wasm_unit_->native_module_, wasm_unit_->func_index_,
- wasm_unit_->env_->module->origin));
- ok_ = job->ExecuteJob() == CompilationJob::SUCCEEDED;
- // TODO(bradnelson): Improve histogram handling of size_t.
- wasm_unit_->counters_->wasm_compile_function_peak_memory_bytes()->AddSample(
- static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
-
- if (FLAG_trace_wasm_decode_time) {
- double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
- "%0.3f ms pipeline\n",
- static_cast<unsigned>(wasm_unit_->func_body_.end -
- wasm_unit_->func_body_.start),
- decode_ms, node_count, pipeline_ms);
- }
- if (ok_) wasm_code_ = info.wasm_code();
- }
- if (ok_) wasm_unit_->native_module()->PublishCode(wasm_code_);
-}
-
-wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
- wasm::ErrorThrower* thrower) {
- if (!ok_) {
- if (graph_construction_result_.failed()) {
- // Add the function as another context for the exception. This is
- // user-visible, so use official format.
- EmbeddedVector<char, 128> message;
- wasm::ModuleWireBytes wire_bytes(
- wasm_unit_->native_module()->wire_bytes());
- wasm::WireBytesRef name_ref =
- wasm_unit_->native_module()->module()->LookupFunctionName(
- wire_bytes, wasm_unit_->func_index_);
- if (name_ref.is_set()) {
- wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
- SNPrintF(message, "Compiling wasm function \"%.*s\" failed",
- name.length(), name.start());
- } else {
- SNPrintF(message,
- "Compiling wasm function \"wasm-function[%d]\" failed",
- wasm_unit_->func_index_);
- }
- thrower->CompileFailed(message.start(), graph_construction_result_);
- }
+ Pipeline::GenerateCodeForWasmFunction(
+ &info, wasm_unit_->wasm_engine_, mcgraph, call_descriptor,
+ source_positions, node_origins, func_body, env->module,
+ wasm_unit_->func_index_);
- return nullptr;
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<unsigned>(func_body.end - func_body.start), decode_ms,
+ node_count, pipeline_ms);
}
- return wasm_code_;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ counters->wasm_compile_function_peak_memory_bytes()->AddSample(
+ static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
+ return std::move(*info.ReleaseWasmCompilationResult());
}
namespace {
@@ -5347,10 +6028,13 @@ class LinkageLocationAllocator {
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline) {
- // The '+ 1' here is to accomodate the instance object as first parameter.
+ WasmGraphBuilder::UseRetpoline use_retpoline,
+ WasmGraphBuilder::ExtraCallableParam extra_callable_param) {
+ // The extra here is to accomodate the instance object as first parameter
+ // and, in the case of an import wrapper, the additional callable.
+ int extra_params = extra_callable_param ? 2 : 1;
LocationSignature::Builder locations(zone, fsig->return_count(),
- fsig->parameter_count() + 1);
+ fsig->parameter_count() + extra_params);
// Add register and/or stack parameter(s).
LinkageLocationAllocator params(wasm::kGpParamRegisters,
@@ -5358,13 +6042,34 @@ CallDescriptor* GetWasmCallDescriptor(
// The instance object.
locations.AddParam(params.Next(MachineRepresentation::kTaggedPointer));
+ const size_t param_offset = 1; // Actual params start here.
- const int parameter_count = static_cast<int>(fsig->parameter_count());
- for (int i = 0; i < parameter_count; i++) {
+ // Parameters are separated into two groups (first all untagged, then all
+ // tagged parameters). This allows for easy iteration of tagged parameters
+ // during frame iteration.
+ const size_t parameter_count = fsig->parameter_count();
+ for (size_t i = 0; i < parameter_count; i++) {
+ MachineRepresentation param =
+ wasm::ValueTypes::MachineRepresentationFor(fsig->GetParam(i));
+ // Skip tagged parameters (e.g. any-ref).
+ if (IsAnyTagged(param)) continue;
+ auto l = params.Next(param);
+ locations.AddParamAt(i + param_offset, l);
+ }
+ for (size_t i = 0; i < parameter_count; i++) {
MachineRepresentation param =
wasm::ValueTypes::MachineRepresentationFor(fsig->GetParam(i));
+ // Skip untagged parameters.
+ if (!IsAnyTagged(param)) continue;
auto l = params.Next(param);
- locations.AddParam(l);
+ locations.AddParamAt(i + param_offset, l);
+ }
+
+ // Import call wrappers have an additional (implicit) parameter, the callable.
+ // For consistency with JS, we use the JSFunction register.
+ if (extra_callable_param) {
+ locations.AddParam(LinkageLocation::ForRegister(
+ kJSFunctionRegister.code(), MachineType::TaggedPointer()));
}
// Add return location(s).
@@ -5391,7 +6096,9 @@ CallDescriptor* GetWasmCallDescriptor(
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Kind kind = CallDescriptor::kCallWasmFunction;
+ CallDescriptor::Kind kind = extra_callable_param
+ ? CallDescriptor::kCallWasmImportWrapper
+ : CallDescriptor::kCallWasmFunction;
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
@@ -5492,18 +6199,34 @@ CallDescriptor* GetI32WasmCallDescriptorForSimd(
AssemblerOptions WasmAssemblerOptions() {
AssemblerOptions options;
+ // Relocation info required to serialize {WasmCode} for proper functions.
options.record_reloc_info_for_serialization = true;
options.enable_root_array_delta_access = false;
return options;
}
+AssemblerOptions WasmStubAssemblerOptions() {
+ AssemblerOptions options;
+ // Relocation info not necessary because stubs are not serialized.
+ options.record_reloc_info_for_serialization = false;
+ options.enable_root_array_delta_access = false;
+ return options;
+}
+
#undef WASM_64
#undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
+#undef LOAD_RAW
#undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER
+#undef LOAD_TAGGED_ANY
#undef LOAD_FIXED_ARRAY_SLOT
+#undef LOAD_FIXED_ARRAY_SLOT_SMI
+#undef LOAD_FIXED_ARRAY_SLOT_PTR
+#undef LOAD_FIXED_ARRAY_SLOT_ANY
+#undef STORE_FIXED_ARRAY_SLOT_SMI
+#undef STORE_FIXED_ARRAY_SLOT_ANY
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index f1f341c9af..5cb24afa81 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -50,20 +50,21 @@ class TurbofanWasmCompilationUnit {
explicit TurbofanWasmCompilationUnit(wasm::WasmCompilationUnit* wasm_unit);
~TurbofanWasmCompilationUnit();
- SourcePositionTable* BuildGraphForWasmFunction(wasm::WasmFeatures* detected,
- double* decode_ms,
- MachineGraph* mcgraph,
- NodeOriginTable* node_origins);
-
- void ExecuteCompilation(wasm::WasmFeatures* detected);
-
- wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
+ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
+ const wasm::FunctionBody& func_body,
+ wasm::WasmFeatures* detected,
+ double* decode_ms, MachineGraph* mcgraph,
+ NodeOriginTable* node_origins,
+ SourcePositionTable* source_positions,
+ wasm::WasmError* error_out);
+
+ wasm::WasmCompilationResult ExecuteCompilation(wasm::CompilationEnv*,
+ const wasm::FunctionBody&,
+ Counters*,
+ wasm::WasmFeatures* detected);
private:
wasm::WasmCompilationUnit* const wasm_unit_;
- bool ok_ = true;
- wasm::WasmCode* wasm_code_ = nullptr;
- wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
DISALLOW_COPY_AND_ASSIGN(TurbofanWasmCompilationUnit);
};
@@ -71,7 +72,7 @@ class TurbofanWasmCompilationUnit {
// Calls to WASM imports are handled in several different ways, depending
// on the type of the target function/callable and whether the signature
// matches the argument arity.
-enum class WasmImportCallKind {
+enum class WasmImportCallKind : uint8_t {
kLinkError, // static WASM->WASM type error
kRuntimeTypeError, // runtime WASM->JS type error
kWasmToWasm, // fast WASM->WASM call
@@ -79,18 +80,46 @@ enum class WasmImportCallKind {
kJSFunctionArityMatchSloppy, // fast WASM->JS call, sloppy receiver
kJSFunctionArityMismatch, // WASM->JS, needs adapter frame
kJSFunctionArityMismatchSloppy, // WASM->JS, needs adapter frame, sloppy
- kUseCallBuiltin // everything else
+ // Math functions imported from JavaScript that are intrinsified
+ kFirstMathIntrinsic,
+ kF64Acos = kFirstMathIntrinsic,
+ kF64Asin,
+ kF64Atan,
+ kF64Cos,
+ kF64Sin,
+ kF64Tan,
+ kF64Exp,
+ kF64Log,
+ kF64Atan2,
+ kF64Pow,
+ kF64Ceil,
+ kF64Floor,
+ kF64Sqrt,
+ kF64Min,
+ kF64Max,
+ kF64Abs,
+ kF32Min,
+ kF32Max,
+ kF32Abs,
+ kF32Ceil,
+ kF32Floor,
+ kF32Sqrt,
+ kF32ConvertF64,
+ kLastMathIntrinsic = kF32ConvertF64,
+ // For everything else, there's the call builtin.
+ kUseCallBuiltin
};
WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> callable,
- wasm::FunctionSig* sig);
+ wasm::FunctionSig* sig,
+ bool has_bigint_feature);
// Compiles an import call wrapper, which allows WASM to call imports.
-MaybeHandle<Code> CompileWasmImportCallWrapper(Isolate*, WasmImportCallKind,
- wasm::FunctionSig*,
- uint32_t index,
- wasm::ModuleOrigin,
- wasm::UseTrapHandler);
+wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine*,
+ wasm::NativeModule*,
+ WasmImportCallKind,
+ wasm::FunctionSig*,
+ bool source_positions);
// Creates a code object calling a wasm function with the given signature,
// callable from JS.
@@ -100,12 +129,14 @@ V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
-MaybeHandle<Code> CompileWasmInterpreterEntry(Isolate*, uint32_t func_index,
- wasm::FunctionSig*);
+wasm::WasmCode* CompileWasmInterpreterEntry(wasm::WasmEngine*,
+ wasm::NativeModule*,
+ uint32_t func_index,
+ wasm::FunctionSig*);
enum CWasmEntryParameters {
kCodeEntry,
- kWasmInstance,
+ kObjectRef,
kArgumentsBuffer,
// marker:
kNumParameters
@@ -130,13 +161,20 @@ struct WasmInstanceCacheNodes {
// the wasm decoder from the internal details of TurboFan.
class WasmGraphBuilder {
public:
- enum EnforceBoundsCheck : bool {
+ enum EnforceBoundsCheck : bool { // --
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum UseRetpoline : bool { kRetpoline = true, kNoRetpoline = false };
+ enum UseRetpoline : bool { // --
+ kRetpoline = true,
+ kNoRetpoline = false
+ };
+ enum ExtraCallableParam : bool { // --
+ kExtraCallableParam = true,
+ kNoExtraCallableParam = false
+ };
- WasmGraphBuilder(wasm::ModuleEnv* env, Zone* zone, MachineGraph* mcgraph,
+ WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr);
@@ -157,7 +195,8 @@ class WasmGraphBuilder {
Node* Start(unsigned params);
Node* Param(unsigned index);
Node* Loop(Node* entry);
- Node* Terminate(Node* effect, Node* control);
+ Node* TerminateLoop(Node* effect, Node* control);
+ Node* TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge,
@@ -175,7 +214,7 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* Unop(wasm::WasmOpcode opcode, Node* input,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
- Node* GrowMemory(Node* input);
+ Node* MemoryGrow(Node* input);
Node* Throw(uint32_t exception_index, const wasm::WasmException* exception,
const Vector<Node*> values);
Node* Rethrow(Node* except_obj);
@@ -183,7 +222,7 @@ class WasmGraphBuilder {
Node* LoadExceptionTagFromTable(uint32_t exception_index);
Node* GetExceptionTag(Node* except_obj);
Node** GetExceptionValues(Node* except_obj,
- const wasm::WasmException* except_decl);
+ const wasm::WasmException* exception);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
@@ -274,9 +313,14 @@ class WasmGraphBuilder {
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
+ Node* GetImportedMutableGlobals();
+
void GetGlobalBaseAndOffset(MachineType mem_type, const wasm::WasmGlobal&,
Node** base_node, Node** offset_node);
+ void GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+ const wasm::WasmGlobal& global, Node** base, Node** offset);
+
// Utilities to manipulate sets of instance cache nodes.
void InitInstanceCache(WasmInstanceCacheNodes* instance_cache);
void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache,
@@ -316,11 +360,34 @@ class WasmGraphBuilder {
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position);
+ // Returns a pointer to the dropped_data_segments array. Traps if the data
+ // segment is active or has been dropped.
+ Node* CheckDataSegmentIsPassiveAndNotDropped(uint32_t data_segment_index,
+ wasm::WasmCodePosition position);
+ Node* CheckElemSegmentIsPassiveAndNotDropped(uint32_t elem_segment_index,
+ wasm::WasmCodePosition position);
+ Node* MemoryInit(uint32_t data_segment_index, Node* dst, Node* src,
+ Node* size, wasm::WasmCodePosition position);
+ Node* MemoryCopy(Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position);
+ Node* MemoryDrop(uint32_t data_segment_index,
+ wasm::WasmCodePosition position);
+ Node* MemoryFill(Node* dst, Node* fill, Node* size,
+ wasm::WasmCodePosition position);
+
+ Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
+ Node* src, Node* size, wasm::WasmCodePosition position);
+ Node* TableDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
+ Node* TableCopy(uint32_t table_index, Node* dst, Node* src, Node* size,
+ wasm::WasmCodePosition position);
+
bool has_simd() const { return has_simd_; }
const wasm::WasmModule* module() { return env_ ? env_->module : nullptr; }
- bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+ wasm::UseTrapHandler use_trap_handler() const {
+ return env_ ? env_->use_trap_handler : wasm::kNoTrapHandler;
+ }
MachineGraph* mcgraph() { return mcgraph_; }
Graph* graph();
@@ -335,7 +402,7 @@ class WasmGraphBuilder {
Zone* const zone_;
MachineGraph* const mcgraph_;
- wasm::ModuleEnv* const env_;
+ wasm::CompilationEnv* const env_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
@@ -366,8 +433,15 @@ class WasmGraphBuilder {
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ // Check that the range [start, start + size) is in the range [0, max).
+ void BoundsCheckRange(Node* start, Node* size, Node* max,
+ wasm::WasmCodePosition);
+ // BoundsCheckMemRange receives a uint32 {start} and {size} and returns
+ // a pointer into memory at that index, if it is in bounds.
+ Node* BoundsCheckMemRange(Node* start, Node* size, wasm::WasmCodePosition);
Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
uint32_t offset, wasm::WasmCodePosition);
+
Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
@@ -449,6 +523,8 @@ class WasmGraphBuilder {
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
Node* BuildChangeSmiToInt32(Node* value);
+ // generates {index > max ? Smi(max) : Smi(index)}
+ Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
@@ -463,9 +539,10 @@ class WasmGraphBuilder {
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
uint32_t GetExceptionEncodedSize(const wasm::WasmException* exception) const;
- void BuildEncodeException32BitValue(Node* except_obj, uint32_t* index,
+ void BuildEncodeException32BitValue(Node* values_array, uint32_t* index,
Node* value);
- Node* BuildDecodeException32BitValue(Node* const* values, uint32_t* index);
+ Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
+ Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index);
Node** Realloc(Node* const* buffer, size_t old_count, size_t new_count) {
Node** buf = Buffer(new_count);
@@ -483,14 +560,17 @@ class WasmGraphBuilder {
int parameter_count);
Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* js_context,
- Node** parameters, int parameter_count);
+ Node** parameters, int parameter_count,
+ Node** effect, Node* control);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
};
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* signature,
WasmGraphBuilder::UseRetpoline use_retpoline =
- WasmGraphBuilder::kNoRetpoline);
+ WasmGraphBuilder::kNoRetpoline,
+ WasmGraphBuilder::ExtraCallableParam callable_param =
+ WasmGraphBuilder::kNoExtraCallableParam);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, CallDescriptor* call_descriptor);
@@ -499,6 +579,7 @@ V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
Zone* zone, CallDescriptor* call_descriptor);
AssemblerOptions WasmAssemblerOptions();
+AssemblerOptions WasmStubAssemblerOptions();
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/constant-pool.cc b/deps/v8/src/constant-pool.cc
new file mode 100644
index 0000000000..ebb7099120
--- /dev/null
+++ b/deps/v8/src/constant-pool.cc
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/constant-pool.h"
+#include "src/assembler-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(V8_TARGET_ARCH_PPC)
+
+ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
+ int double_reach_bits) {
+ info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
+ info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
+ info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
+}
+
+ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
+ ConstantPoolEntry::Type type) const {
+ const PerTypeEntryInfo& info = info_[type];
+
+ if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
+
+ int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
+ int dbl_offset = dbl_count * kDoubleSize;
+ int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
+ int ptr_offset = ptr_count * kSystemPointerSize + dbl_offset;
+
+ if (type == ConstantPoolEntry::DOUBLE) {
+ // Double overflow detection must take into account the reach for both types
+ int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
+ if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
+ (ptr_count > 0 &&
+ !is_uintn(ptr_offset + kDoubleSize - kSystemPointerSize,
+ ptr_reach_bits))) {
+ return ConstantPoolEntry::OVERFLOWED;
+ }
+ } else {
+ DCHECK(type == ConstantPoolEntry::INTPTR);
+ if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
+ return ConstantPoolEntry::OVERFLOWED;
+ }
+ }
+
+ return ConstantPoolEntry::REGULAR;
+}
+
+ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
+ ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
+ DCHECK(!emitted_label_.is_bound());
+ PerTypeEntryInfo& info = info_[type];
+ const int entry_size = ConstantPoolEntry::size(type);
+ bool merged = false;
+
+ if (entry.sharing_ok()) {
+ // Try to merge entries
+ std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
+ int end = static_cast<int>(info.shared_entries.size());
+ for (int i = 0; i < end; i++, it++) {
+ if ((entry_size == kSystemPointerSize)
+ ? entry.value() == it->value()
+ : entry.value64() == it->value64()) {
+ // Merge with found entry.
+ entry.set_merged_index(i);
+ merged = true;
+ break;
+ }
+ }
+ }
+
+ // By definition, merged entries have regular access.
+ DCHECK(!merged || entry.merged_index() < info.regular_count);
+ ConstantPoolEntry::Access access =
+ (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
+
+ // Enforce an upper bound on search time by limiting the search to
+ // unique sharable entries which fit in the regular section.
+ if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
+ info.shared_entries.push_back(entry);
+ } else {
+ info.entries.push_back(entry);
+ }
+
+ // We're done if we found a match or have already triggered the
+ // overflow state.
+ if (merged || info.overflow()) return access;
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ info.regular_count++;
+ } else {
+ info.overflow_start = static_cast<int>(info.entries.size()) - 1;
+ }
+
+ return access;
+}
+
+void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
+ ConstantPoolEntry::Type type) {
+ PerTypeEntryInfo& info = info_[type];
+ std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
+ const int entry_size = ConstantPoolEntry::size(type);
+ int base = emitted_label_.pos();
+ DCHECK_GT(base, 0);
+ int shared_end = static_cast<int>(shared_entries.size());
+ std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
+ for (int i = 0; i < shared_end; i++, shared_it++) {
+ int offset = assm->pc_offset() - base;
+ shared_it->set_offset(offset); // Save offset for merged entries.
+ if (entry_size == kSystemPointerSize) {
+ assm->dp(shared_it->value());
+ } else {
+ assm->dq(shared_it->value64());
+ }
+ DCHECK(is_uintn(offset, info.regular_reach_bits));
+
+ // Patch load sequence with correct offset.
+ assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
+ ConstantPoolEntry::REGULAR, type);
+ }
+}
+
+void ConstantPoolBuilder::EmitGroup(Assembler* assm,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ PerTypeEntryInfo& info = info_[type];
+ const bool overflow = info.overflow();
+ std::vector<ConstantPoolEntry>& entries = info.entries;
+ std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
+ const int entry_size = ConstantPoolEntry::size(type);
+ int base = emitted_label_.pos();
+ DCHECK_GT(base, 0);
+ int begin;
+ int end;
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ // Emit any shared entries first
+ EmitSharedEntries(assm, type);
+ }
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ begin = 0;
+ end = overflow ? info.overflow_start : static_cast<int>(entries.size());
+ } else {
+ DCHECK(access == ConstantPoolEntry::OVERFLOWED);
+ if (!overflow) return;
+ begin = info.overflow_start;
+ end = static_cast<int>(entries.size());
+ }
+
+ std::vector<ConstantPoolEntry>::iterator it = entries.begin();
+ if (begin > 0) std::advance(it, begin);
+ for (int i = begin; i < end; i++, it++) {
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ ConstantPoolEntry::Access entry_access;
+ if (!it->is_merged()) {
+ // Emit new entry
+ offset = assm->pc_offset() - base;
+ entry_access = access;
+ if (entry_size == kSystemPointerSize) {
+ assm->dp(it->value());
+ } else {
+ assm->dq(it->value64());
+ }
+ } else {
+ // Retrieve offset from shared entry.
+ offset = shared_entries[it->merged_index()].offset();
+ entry_access = ConstantPoolEntry::REGULAR;
+ }
+
+ DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
+ is_uintn(offset, info.regular_reach_bits));
+
+ // Patch load sequence with correct offset.
+ assm->PatchConstantPoolAccessInstruction(it->position(), offset,
+ entry_access, type);
+ }
+}
+
+// Emit and return size of pool.
+int ConstantPoolBuilder::Emit(Assembler* assm) {
+ bool emitted = emitted_label_.is_bound();
+ bool empty = IsEmpty();
+
+ if (!emitted) {
+ // Mark start of constant pool. Align if necessary.
+ if (!empty) assm->DataAlign(kDoubleSize);
+ assm->bind(&emitted_label_);
+ if (!empty) {
+ // Emit in groups based on access and type.
+ // Emit doubles first for alignment purposes.
+ EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
+ EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
+ if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
+ assm->DataAlign(kDoubleSize);
+ EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
+ ConstantPoolEntry::DOUBLE);
+ }
+ if (info_[ConstantPoolEntry::INTPTR].overflow()) {
+ EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
+ ConstantPoolEntry::INTPTR);
+ }
+ }
+ }
+
+ return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
+}
+
+#endif // defined(V8_TARGET_ARCH_PPC)
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/constant-pool.h b/deps/v8/src/constant-pool.h
new file mode 100644
index 0000000000..15faeeaaa2
--- /dev/null
+++ b/deps/v8/src/constant-pool.h
@@ -0,0 +1,159 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CONSTANT_POOL_H_
+#define V8_CONSTANT_POOL_H_
+
+#include <map>
+
+#include "src/double.h"
+#include "src/globals.h"
+#include "src/label.h"
+#include "src/reloc-info.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Constant pool support
+
+class ConstantPoolEntry {
+ public:
+ ConstantPoolEntry() = default;
+ ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : position_(position),
+ merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
+ value_(value),
+ rmode_(rmode) {}
+ ConstantPoolEntry(int position, Double value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : position_(position),
+ merged_index_(SHARING_ALLOWED),
+ value64_(value.AsUint64()),
+ rmode_(rmode) {}
+
+ int position() const { return position_; }
+ bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
+ bool is_merged() const { return merged_index_ >= 0; }
+ int merged_index() const {
+ DCHECK(is_merged());
+ return merged_index_;
+ }
+ void set_merged_index(int index) {
+ DCHECK(sharing_ok());
+ merged_index_ = index;
+ DCHECK(is_merged());
+ }
+ int offset() const {
+ DCHECK_GE(merged_index_, 0);
+ return merged_index_;
+ }
+ void set_offset(int offset) {
+ DCHECK_GE(offset, 0);
+ merged_index_ = offset;
+ }
+ intptr_t value() const { return value_; }
+ uint64_t value64() const { return value64_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
+
+ static int size(Type type) {
+ return (type == INTPTR) ? kSystemPointerSize : kDoubleSize;
+ }
+
+ enum Access { REGULAR, OVERFLOWED };
+
+ private:
+ int position_;
+ int merged_index_;
+ union {
+ intptr_t value_;
+ uint64_t value64_;
+ };
+ // TODO(leszeks): The way we use this, it could probably be packed into
+ // merged_index_ if size is a concern.
+ RelocInfo::Mode rmode_;
+ enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
+};
+
+#if defined(V8_TARGET_ARCH_PPC)
+
+// -----------------------------------------------------------------------------
+// Embedded constant pool support
+
+class ConstantPoolBuilder {
+ public:
+ ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
+
+ // Add pointer-sized constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
+ bool sharing_ok) {
+ ConstantPoolEntry entry(position, value, sharing_ok);
+ return AddEntry(entry, ConstantPoolEntry::INTPTR);
+ }
+
+ // Add double constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, Double value) {
+ ConstantPoolEntry entry(position, value);
+ return AddEntry(entry, ConstantPoolEntry::DOUBLE);
+ }
+
+ // Add double constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, double value) {
+ return AddEntry(position, Double(value));
+ }
+
+ // Previews the access type required for the next new entry to be added.
+ ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
+
+ bool IsEmpty() {
+ return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
+ info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
+ info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
+ info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
+ }
+
+ // Emit the constant pool. Invoke only after all entries have been
+ // added and all instructions have been emitted.
+ // Returns position of the emitted pool (zero implies no constant pool).
+ int Emit(Assembler* assm);
+
+ // Returns the label associated with the start of the constant pool.
+ // Linking to this label in the function prologue may provide an
+ // efficient means of constant pool pointer register initialization
+ // on some architectures.
+ inline Label* EmittedPosition() { return &emitted_label_; }
+
+ private:
+ ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
+ ConstantPoolEntry::Type type);
+ void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
+ void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type);
+
+ struct PerTypeEntryInfo {
+ PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
+ bool overflow() const {
+ return (overflow_start >= 0 &&
+ overflow_start < static_cast<int>(entries.size()));
+ }
+ int regular_reach_bits;
+ int regular_count;
+ int overflow_start;
+ std::vector<ConstantPoolEntry> entries;
+ std::vector<ConstantPoolEntry> shared_entries;
+ };
+
+ Label emitted_label_; // Records pc_offset of emitted pool
+ PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
+};
+
+#endif // defined(V8_TARGET_ARCH_PPC)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CONSTANT_POOL_H_
diff --git a/deps/v8/src/constants-arch.h b/deps/v8/src/constants-arch.h
new file mode 100644
index 0000000000..546d316cf4
--- /dev/null
+++ b/deps/v8/src/constants-arch.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CONSTANTS_ARCH_H_
+#define V8_CONSTANTS_ARCH_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/arm/constants-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/constants-arm64.h" // NOLINT
+#elif V8_TARGET_ARCH_IA32
+#include "src/ia32/constants-ia32.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/constants-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/constants-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/constants-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/constants-s390.h" // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/constants-x64.h" // NOLINT
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif // V8_CONSTANTS_ARCH_H_
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 2194c6bf9b..b132a3e793 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -6,24 +6,24 @@
#define V8_CONTEXTS_INL_H_
#include "src/contexts.h"
-#include "src/heap/heap.h"
+
+#include "src/heap/heap-write-barrier.h"
#include "src/objects-inl.h"
-#include "src/objects/dictionary.h"
+#include "src/objects/dictionary-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
-#include "src/objects/shared-function-info-inl.h"
-#include "src/objects/template-objects.h"
+#include "src/objects/shared-function-info.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-
-// static
-ScriptContextTable* ScriptContextTable::cast(Object* context) {
- DCHECK(context->IsScriptContextTable());
- return reinterpret_cast<ScriptContextTable*>(context);
-}
+OBJECT_CONSTRUCTORS_IMPL(ScriptContextTable, FixedArray)
+CAST_ACCESSOR(ScriptContextTable)
int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlotIndex)); }
@@ -31,7 +31,6 @@ void ScriptContextTable::set_used(int used) {
set(kUsedSlotIndex, Smi::FromInt(used));
}
-
// static
Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
Handle<ScriptContextTable> table,
@@ -41,45 +40,61 @@ Handle<Context> ScriptContextTable::GetContext(Isolate* isolate,
FixedArray::get(*table, i + kFirstContextSlotIndex, isolate));
}
-// static
-Context* Context::cast(Object* context) {
- DCHECK(context->IsContext());
- return reinterpret_cast<Context*>(context);
+OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(Context)
+CAST_ACCESSOR(Context)
+SMI_ACCESSORS(Context, length, kLengthOffset)
+
+CAST_ACCESSOR(NativeContext)
+
+Object Context::get(int index) const {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
}
-NativeContext* NativeContext::cast(Object* context) {
- DCHECK(context->IsNativeContext());
- return reinterpret_cast<NativeContext*>(context);
+void Context::set(int index, Object value) {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ WRITE_BARRIER(*this, offset, value);
}
-void Context::set_scope_info(ScopeInfo* scope_info) {
+void Context::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void Context::set_scope_info(ScopeInfo scope_info) {
set(SCOPE_INFO_INDEX, scope_info);
}
-Context* Context::previous() {
- Object* result = get(PREVIOUS_INDEX);
- DCHECK(IsBootstrappingOrValidParentContext(result, this));
- return reinterpret_cast<Context*>(result);
+Context Context::previous() {
+ Object result = get(PREVIOUS_INDEX);
+ DCHECK(IsBootstrappingOrValidParentContext(result, *this));
+ return Context::unchecked_cast(result);
}
-void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); }
-Object* Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
+Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
bool Context::has_extension() { return !extension()->IsTheHole(); }
-HeapObject* Context::extension() {
+HeapObject Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
-void Context::set_extension(HeapObject* object) {
- set(EXTENSION_INDEX, object);
-}
+void Context::set_extension(HeapObject object) { set(EXTENSION_INDEX, object); }
-NativeContext* Context::native_context() const {
- Object* result = get(NATIVE_CONTEXT_INDEX);
+NativeContext Context::native_context() const {
+ Object result = get(NATIVE_CONTEXT_INDEX);
DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
- return reinterpret_cast<NativeContext*>(result);
+ return NativeContext::unchecked_cast(result);
}
-void Context::set_native_context(NativeContext* context) {
+void Context::set_native_context(NativeContext context) {
set(NATIVE_CONTEXT_INDEX, context);
}
@@ -119,21 +134,21 @@ bool Context::IsScriptContext() const {
return map()->instance_type() == SCRIPT_CONTEXT_TYPE;
}
-bool Context::HasSameSecurityTokenAs(Context* that) const {
+bool Context::HasSameSecurityTokenAs(Context that) const {
return this->native_context()->security_token() ==
that->native_context()->security_token();
}
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- void Context::set_##name(type* value) { \
+ void Context::set_##name(type value) { \
DCHECK(IsNativeContext()); \
set(index, value); \
} \
- bool Context::is_##name(type* value) const { \
+ bool Context::is_##name(type value) const { \
DCHECK(IsNativeContext()); \
return type::cast(get(index)) == value; \
} \
- type* Context::name() const { \
+ type Context::name() const { \
DCHECK(IsNativeContext()); \
return type::cast(get(index)); \
}
@@ -208,16 +223,30 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
#undef CHECK_FOLLOWS2
#undef CHECK_FOLLOWS4
-Map* Context::GetInitialJSArrayMap(ElementsKind kind) const {
+Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
DCHECK(IsNativeContext());
- if (!IsFastElementsKind(kind)) return nullptr;
+ if (!IsFastElementsKind(kind)) return Map();
DisallowHeapAllocation no_gc;
- Object* const initial_js_array_map = get(Context::ArrayMapIndex(kind));
+ Object const initial_js_array_map = get(Context::ArrayMapIndex(kind));
DCHECK(!initial_js_array_map->IsUndefined());
return Map::cast(initial_js_array_map);
}
+MicrotaskQueue* NativeContext::microtask_queue() const {
+ return reinterpret_cast<MicrotaskQueue*>(
+ READ_INTPTR_FIELD(this, kMicrotaskQueueOffset));
+}
+
+void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
+ WRITE_INTPTR_FIELD(this, kMicrotaskQueueOffset,
+ reinterpret_cast<intptr_t>(microtask_queue));
+}
+
+OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
+
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_CONTEXTS_INL_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 2117179219..24bd9bc8b4 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -70,17 +70,16 @@ bool Context::is_declaration_context() {
return scope_info()->is_declaration_scope();
}
-
-Context* Context::declaration_context() {
- Context* current = this;
+Context Context::declaration_context() {
+ Context current = *this;
while (!current->is_declaration_context()) {
current = current->previous();
}
return current;
}
-Context* Context::closure_context() {
- Context* current = this;
+Context Context::closure_context() {
+ Context current = *this;
while (!current->IsFunctionContext() && !current->IsScriptContext() &&
!current->IsModuleContext() && !current->IsNativeContext() &&
!current->IsEvalContext()) {
@@ -89,56 +88,54 @@ Context* Context::closure_context() {
return current;
}
-JSObject* Context::extension_object() {
+JSObject Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
IsEvalContext() || IsCatchContext());
- HeapObject* object = extension();
- if (object->IsTheHole()) return nullptr;
+ HeapObject object = extension();
+ if (object->IsTheHole()) return JSObject();
DCHECK(object->IsJSContextExtensionObject() ||
(IsNativeContext() && object->IsJSGlobalObject()));
return JSObject::cast(object);
}
-JSReceiver* Context::extension_receiver() {
+JSReceiver Context::extension_receiver() {
DCHECK(IsNativeContext() || IsWithContext() || IsEvalContext() ||
IsFunctionContext() || IsBlockContext());
return IsWithContext() ? JSReceiver::cast(extension()) : extension_object();
}
-ScopeInfo* Context::scope_info() {
+ScopeInfo Context::scope_info() {
return ScopeInfo::cast(get(SCOPE_INFO_INDEX));
}
-Module* Context::module() {
- Context* current = this;
+Module Context::module() {
+ Context current = *this;
while (!current->IsModuleContext()) {
current = current->previous();
}
return Module::cast(current->extension());
}
-JSGlobalObject* Context::global_object() {
+JSGlobalObject Context::global_object() {
return JSGlobalObject::cast(native_context()->extension());
}
-
-Context* Context::script_context() {
- Context* current = this;
+Context Context::script_context() {
+ Context current = *this;
while (!current->IsScriptContext()) {
current = current->previous();
}
return current;
}
-JSGlobalProxy* Context::global_proxy() {
+JSGlobalProxy Context::global_proxy() {
return native_context()->global_proxy_object();
}
-void Context::set_global_proxy(JSGlobalProxy* object) {
+void Context::set_global_proxy(JSGlobalProxy object) {
native_context()->set_global_proxy_object(object);
}
-
/**
* Lookups a property in an object environment, taking the unscopables into
* account. This is used For HasBinding spec algorithms for ObjectEnvironment.
@@ -171,13 +168,14 @@ static PropertyAttributes GetAttributesForMode(VariableMode mode) {
return mode == VariableMode::kConst ? READ_ONLY : NONE;
}
-Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index, PropertyAttributes* attributes,
+// static
+Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
+ ContextLookupFlags flags, int* index,
+ PropertyAttributes* attributes,
InitializationFlag* init_flag,
VariableMode* variable_mode,
bool* is_sloppy_function_name) {
- Isolate* isolate = GetIsolate();
- Handle<Context> context(this, isolate);
+ Isolate* isolate = context->GetIsolate();
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
bool failed_whitelist = false;
@@ -197,7 +195,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
do {
if (FLAG_trace_contexts) {
- PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
+ PrintF(" - looking in context %p",
+ reinterpret_cast<void*>(context->ptr()));
if (context->IsScriptContext()) PrintF(" (script context)");
if (context->IsNativeContext()) PrintF(" (native context)");
PrintF("\n");
@@ -206,10 +205,9 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// 1. Check global objects, subjects of with, and extension objects.
DCHECK_IMPLIES(context->IsEvalContext(),
context->extension()->IsTheHole(isolate));
- if ((context->IsNativeContext() ||
- (context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
+ if ((context->IsNativeContext() || context->IsWithContext() ||
context->IsFunctionContext() || context->IsBlockContext()) &&
- context->extension_receiver() != nullptr) {
+ !context->extension_receiver().is_null()) {
Handle<JSReceiver> object(context->extension_receiver(), isolate);
if (context->IsNativeContext()) {
@@ -226,7 +224,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
Handle<Context> c = ScriptContextTable::GetContext(
isolate, script_contexts, r.context_index);
PrintF("=> found property in script context %d: %p\n",
- r.context_index, reinterpret_cast<void*>(*c));
+ r.context_index, reinterpret_cast<void*>(c->ptr()));
}
*index = r.slot_index;
*variable_mode = r.mode;
@@ -277,7 +275,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
if (maybe.FromJust() != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*object));
+ reinterpret_cast<void*>(object->ptr()));
}
return object;
}
@@ -311,8 +309,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// Check the slot corresponding to the intermediate context holding
// only the function name variable. It's conceptually (and spec-wise)
// in an outer scope of the function's declaration scope.
- if (follow_context_chain && (flags & STOP_AT_DECLARATION_SCOPE) == 0 &&
- context->IsFunctionContext()) {
+ if (follow_context_chain && context->IsFunctionContext()) {
int function_index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (FLAG_trace_contexts) {
@@ -354,7 +351,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
} else if (context->IsDebugEvaluateContext()) {
// Check materialized locals.
- Object* ext = context->get(EXTENSION_INDEX);
+ Object ext = context->get(EXTENSION_INDEX);
if (ext->IsJSReceiver()) {
Handle<JSReceiver> extension(JSReceiver::cast(ext), isolate);
LookupIterator it(extension, name, extension);
@@ -365,11 +362,12 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
}
// Check the original context, but do not follow its context chain.
- Object* obj = context->get(WRAPPED_CONTEXT_INDEX);
+ Object obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj->IsContext()) {
+ Handle<Context> context(Context::cast(obj), isolate);
Handle<Object> result =
- Context::cast(obj)->Lookup(name, DONT_FOLLOW_CHAINS, index,
- attributes, init_flag, variable_mode);
+ Context::Lookup(context, name, DONT_FOLLOW_CHAINS, index,
+ attributes, init_flag, variable_mode);
if (!result.is_null()) return result;
}
// Check whitelist. Names that do not pass whitelist shall only resolve
@@ -382,20 +380,16 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
// 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsNativeContext() ||
- ((flags & STOP_AT_DECLARATION_SCOPE) != 0 &&
- context->is_declaration_context())) {
- follow_context_chain = false;
- } else {
- do {
- context = Handle<Context>(context->previous(), isolate);
- // If we come across a whitelist context, and the name is not
- // whitelisted, then only consider with, script, module or native
- // contexts.
- } while (failed_whitelist && !context->IsScriptContext() &&
- !context->IsNativeContext() && !context->IsWithContext() &&
- !context->IsModuleContext());
- }
+ if (context->IsNativeContext()) break;
+
+ do {
+ context = Handle<Context>(context->previous(), isolate);
+ // If we come across a whitelist context, and the name is not
+ // whitelisted, then only consider with, script, module or native
+ // contexts.
+ } while (failed_whitelist && !context->IsScriptContext() &&
+ !context->IsNativeContext() && !context->IsWithContext() &&
+ !context->IsModuleContext());
} while (follow_context_chain);
if (FLAG_trace_contexts) {
@@ -404,8 +398,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
return Handle<Object>::null();
}
-
-void Context::AddOptimizedCode(Code* code) {
+void Context::AddOptimizedCode(Code code) {
DCHECK(IsNativeContext());
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(code->next_code_link()->IsUndefined());
@@ -413,31 +406,26 @@ void Context::AddOptimizedCode(Code* code) {
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
-
-void Context::SetOptimizedCodeListHead(Object* head) {
+void Context::SetOptimizedCodeListHead(Object head) {
DCHECK(IsNativeContext());
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-
-Object* Context::OptimizedCodeListHead() {
+Object Context::OptimizedCodeListHead() {
DCHECK(IsNativeContext());
return get(OPTIMIZED_CODE_LIST);
}
-
-void Context::SetDeoptimizedCodeListHead(Object* head) {
+void Context::SetDeoptimizedCodeListHead(Object head) {
DCHECK(IsNativeContext());
set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-
-Object* Context::DeoptimizedCodeListHead() {
+Object Context::DeoptimizedCodeListHead() {
DCHECK(IsNativeContext());
return get(DEOPTIMIZED_CODE_LIST);
}
-
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Isolate* isolate = GetIsolate();
Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
@@ -446,15 +434,8 @@ Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
"Code generation from strings disallowed for this context");
}
-
#define COMPARE_NAME(index, type, name) \
- if (string->IsOneByteEqualTo(STATIC_CHAR_VECTOR(#name))) return index;
-
-int Context::ImportedFieldIndexForName(Handle<String> string) {
- NATIVE_CONTEXT_IMPORTED_FIELDS(COMPARE_NAME)
- return kNotFound;
-}
-
+ if (string->IsOneByteEqualTo(StaticCharVector(#name))) return index;
int Context::IntrinsicIndexForName(Handle<String> string) {
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
@@ -477,21 +458,20 @@ int Context::IntrinsicIndexForName(const unsigned char* unsigned_string,
#ifdef DEBUG
-bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object* object) {
+bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() || object->IsNativeContext();
}
-
-bool Context::IsBootstrappingOrValidParentContext(
- Object* object, Context* child) {
+bool Context::IsBootstrappingOrValidParentContext(Object object,
+ Context child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
if (!object->IsContext()) return false;
- Context* context = Context::cast(object);
+ Context context = Context::cast(object);
return context->IsNativeContext() || context->IsScriptContext() ||
context->IsModuleContext() || !child->IsModuleContext();
}
@@ -510,8 +490,27 @@ void Context::IncrementErrorsThrown() {
set_errors_thrown(Smi::FromInt(previous_value + 1));
}
-
int Context::GetErrorsThrown() { return errors_thrown()->value(); }
+STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
+STATIC_ASSERT(NativeContext::kScopeInfoOffset ==
+ Context::OffsetOfElementAt(NativeContext::SCOPE_INFO_INDEX));
+STATIC_ASSERT(NativeContext::kPreviousOffset ==
+ Context::OffsetOfElementAt(NativeContext::PREVIOUS_INDEX));
+STATIC_ASSERT(NativeContext::kExtensionOffset ==
+ Context::OffsetOfElementAt(NativeContext::EXTENSION_INDEX));
+STATIC_ASSERT(NativeContext::kNativeContextOffset ==
+ Context::OffsetOfElementAt(NativeContext::NATIVE_CONTEXT_INDEX));
+
+STATIC_ASSERT(NativeContext::kStartOfStrongFieldsOffset ==
+ Context::OffsetOfElementAt(0));
+STATIC_ASSERT(NativeContext::kStartOfWeakFieldsOffset ==
+ Context::OffsetOfElementAt(NativeContext::FIRST_WEAK_SLOT));
+STATIC_ASSERT(NativeContext::kMicrotaskQueueOffset ==
+ Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS));
+STATIC_ASSERT(NativeContext::kSize ==
+ (Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS) +
+ kSystemPointerSize));
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index f33c8b16bc..dec66691d5 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -7,19 +7,21 @@
#include "src/objects/fixed-array.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
class JSGlobalObject;
class JSGlobalProxy;
+class MicrotaskQueue;
class NativeContext;
class RegExpMatchInfo;
enum ContextLookupFlags {
FOLLOW_CONTEXT_CHAIN = 1 << 0,
FOLLOW_PROTOTYPE_CHAIN = 1 << 1,
- STOP_AT_DECLARATION_SCOPE = 1 << 2,
- SKIP_WITH_CONTEXT = 1 << 3,
DONT_FOLLOW_CHAINS = 0,
FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN,
@@ -27,96 +29,50 @@ enum ContextLookupFlags {
// Heap-allocated activation contexts.
//
-// Contexts are implemented as FixedArray objects; the Context
-// class is a convenience interface casted on a FixedArray object.
+// Contexts are implemented as FixedArray-like objects having a fixed
+// header with a set of common fields.
//
// Note: Context must have no virtual functions and Context objects
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
- V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
- V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
-
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
- V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
- V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(CANONICALIZE_LOCALE_LIST_FUNCTION_INDEX, JSFunction, \
- canonicalize_locale_list) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
- V(MAP_DELETE_INDEX, JSFunction, map_delete) \
- V(MAP_GET_INDEX, JSFunction, map_get) \
- V(MAP_HAS_INDEX, JSFunction, map_has) \
- V(MAP_SET_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(CACHED_OR_NEW_SERVICE_LOCALE_FUNCTION_INDEX, JSFunction, \
- cached_or_new_service) \
- V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
- V(SET_ADD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_INDEX, JSFunction, set_delete) \
- V(SET_HAS_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_compile_error_function) \
- V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
- V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_runtime_error_function) \
- V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
- V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
- V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then)
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSGlobalProxy, global_proxy_object) \
- V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
+ /* TODO(ishell): Actually we store exactly EmbedderDataArray here but */ \
+ /* it's already UBSan-fiendly and doesn't require a star... So declare */ \
+ /* it as a HeapObject for now. */ \
+ V(EMBEDDER_DATA_INDEX, HeapObject, embedder_data) \
/* Below is alpha-sorted */ \
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
accessor_property_descriptor_map) \
@@ -125,12 +81,14 @@ enum ContextLookupFlags {
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(ARRAY_JOIN_STACK_INDEX, HeapObject, array_join_stack) \
V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
async_function_await_reject_shared_fun) \
V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
async_function_await_resolve_shared_fun) \
V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
+ V(ASYNC_FUNCTION_OBJECT_MAP_INDEX, Map, async_function_object_map) \
V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
async_generator_function_function) \
V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
@@ -191,8 +149,13 @@ enum ContextLookupFlags {
V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \
initial_async_generator_prototype) \
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
+ V(INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_map_iterator_prototype) \
V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_SET_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_set_iterator_prototype) \
+ V(INITIAL_SET_PROTOTYPE_INDEX, JSObject, initial_set_prototype) \
V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \
V(INITIAL_STRING_ITERATOR_MAP_INDEX, Map, initial_string_iterator_map) \
V(INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX, JSObject, \
@@ -203,17 +166,14 @@ enum ContextLookupFlags {
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
- V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
- V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
+ V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_date_time_format_function) \
V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_number_format_function) \
V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
- V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
- V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \
- V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction, \
- intl_v8_break_iterator_function) \
+ V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \
+ V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_packed_smi_elements_map) \
V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
@@ -229,7 +189,11 @@ enum ContextLookupFlags {
V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(JS_WEAK_CELL_MAP_INDEX, Map, js_weak_cell_map) \
+ V(JS_WEAK_FACTORY_CLEANUP_ITERATOR_MAP_INDEX, Map, \
+ js_weak_factory_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
+ V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
@@ -239,7 +203,6 @@ enum ContextLookupFlags {
V(MATH_RANDOM_STATE_INDEX, ByteArray, math_random_state) \
V(MATH_RANDOM_CACHE_INDEX, FixedDoubleArray, math_random_cache) \
V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
- V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
@@ -277,6 +240,7 @@ enum ContextLookupFlags {
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
@@ -347,8 +311,41 @@ enum ContextLookupFlags {
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
- NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- NATIVE_CONTEXT_IMPORTED_FIELDS(V)
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_INDEX, JSFunction, map_has) \
+ V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+ V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
+ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)
// A table of all script contexts. Every loaded top-level script with top-level
// lexical declarations contributes its ScriptContext into this table.
@@ -357,8 +354,7 @@ enum ContextLookupFlags {
// the subsequent slots 1..used contain ScriptContexts.
class ScriptContextTable : public FixedArray {
public:
- // Conversions.
- static inline ScriptContextTable* cast(Object* context);
+ DECL_CAST(ScriptContextTable)
struct LookupResult {
int context_index;
@@ -391,7 +387,7 @@ class ScriptContextTable : public FixedArray {
static const int kFirstContextSlotIndex = 1;
static const int kMinLength = kFirstContextSlotIndex;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
+ OBJECT_CONSTRUCTORS(ScriptContextTable, FixedArray);
};
// JSFunctions are pairs (context, function code), sometimes also called
@@ -412,11 +408,10 @@ class ScriptContextTable : public FixedArray {
//
// [ extension ] Additional data.
//
+// For native contexts, it contains the global object.
// For module contexts, it contains the module object.
-//
-// For block contexts, it may contain an "extension object"
-// (see below).
-//
+// For await contexts, it contains the generator object.
+// For block contexts, it may contain an "extension object".
// For with contexts, it contains an "extension object".
//
// An "extension object" is used to dynamically extend a
@@ -440,21 +435,67 @@ class ScriptContextTable : public FixedArray {
// Script contexts from all top-level scripts are gathered in
// ScriptContextTable.
-class Context : public FixedArray, public NeverReadOnlySpaceObject {
+class Context : public HeapObject {
public:
- // Conversions.
- static inline Context* cast(Object* context);
+ NEVER_READ_ONLY_SPACE
+
+ DECL_CAST(Context)
+
+ // [length]: length of the context.
+ V8_INLINE int length() const;
+ V8_INLINE void set_length(int value);
+
+ // Setter and getter for elements.
+ V8_INLINE Object get(int index) const;
+ V8_INLINE void set(int index, Object value);
+ // Setter with explicit barrier mode.
+ V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
+
+ // Layout description.
+#define CONTEXT_FIELDS(V) \
+ V(kLengthOffset, kTaggedSize) \
+ /* TODO(ishell): remove this fixedArray-like header size. */ \
+ V(kHeaderSize, 0) \
+ V(kStartOfTaggedFieldsOffset, 0) \
+ V(kStartOfStrongFieldsOffset, 0) \
+ /* Tagged fields. */ \
+ V(kScopeInfoOffset, kTaggedSize) \
+ V(kPreviousOffset, kTaggedSize) \
+ V(kExtensionOffset, kTaggedSize) \
+ V(kNativeContextOffset, kTaggedSize) \
+ /* Header size. */ \
+ /* TODO(ishell): use this as header size once MIN_CONTEXT_SLOTS */ \
+ /* is removed in favour of offset-based access to common fields. */ \
+ V(kTodoHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CONTEXT_FIELDS)
+#undef CONTEXT_FIELDS
+
+ // Garbage collection support.
+ V8_INLINE static constexpr int SizeFor(int length) {
+ // TODO(ishell): switch to kTodoHeaderSize based approach once we no longer
+ // reference common Context fields via index
+ return kHeaderSize + length * kTaggedSize;
+ }
+
+ // Code Generation support.
+ // Offset of the element from the beginning of object.
+ V8_INLINE static constexpr int OffsetOfElementAt(int index) {
+ return SizeFor(index);
+ }
+ // Offset of the element from the heap object pointer.
+ V8_INLINE static constexpr int SlotOffset(int index) {
+ return SizeFor(index) - kHeapObjectTag;
+ }
+ // TODO(ishell): eventually migrate to the offset based access instead of
+ // index-based.
// The default context slot layout; indices are FixedArray slot indices.
enum Field {
+ // TODO(shell): use offset-based approach for accessing common values.
// These slots are in all contexts.
SCOPE_INFO_INDEX,
PREVIOUS_INDEX,
- // The extension slot is used for either the global object (in native
- // contexts), eval extension object (function contexts), subject of with
- // (with contexts), or the variable name (catch contexts), the serialized
- // scope info (block contexts), the module instance (module contexts), or
- // the generator object (await contexts).
EXTENSION_INDEX,
NATIVE_CONTEXT_INDEX,
@@ -474,7 +515,9 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
FIRST_WEAK_SLOT = OPTIMIZED_CODE_LIST,
FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
+ // TODO(shell): Remove, once it becomes zero
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
+
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
@@ -496,44 +539,44 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
int GetErrorsThrown();
// Direct slot access.
- inline void set_scope_info(ScopeInfo* scope_info);
- inline Context* previous();
- inline void set_previous(Context* context);
+ inline void set_scope_info(ScopeInfo scope_info);
+ inline Context previous();
+ inline void set_previous(Context context);
- inline Object* next_context_link();
+ inline Object next_context_link();
inline bool has_extension();
- inline HeapObject* extension();
- inline void set_extension(HeapObject* object);
- JSObject* extension_object();
- JSReceiver* extension_receiver();
- ScopeInfo* scope_info();
+ inline HeapObject extension();
+ inline void set_extension(HeapObject object);
+ JSObject extension_object();
+ JSReceiver extension_receiver();
+ ScopeInfo scope_info();
// Find the module context (assuming there is one) and return the associated
// module object.
- Module* module();
+ Module module();
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
- Context* declaration_context();
+ Context declaration_context();
bool is_declaration_context();
// Get the next closure's context on the context chain.
- Context* closure_context();
+ Context closure_context();
// Returns a JSGlobalProxy object or null.
- JSGlobalProxy* global_proxy();
- void set_global_proxy(JSGlobalProxy* global);
+ JSGlobalProxy global_proxy();
+ void set_global_proxy(JSGlobalProxy global);
// Get the JSGlobalObject object.
- V8_EXPORT_PRIVATE JSGlobalObject* global_object();
+ V8_EXPORT_PRIVATE JSGlobalObject global_object();
// Get the script context by traversing the context chain.
- Context* script_context();
+ Context script_context();
// Compute the native context.
- inline NativeContext* native_context() const;
- inline void set_native_context(NativeContext* context);
+ inline NativeContext native_context() const;
+ inline void set_native_context(NativeContext context);
// Predicates for context types. IsNativeContext is already defined on
// Object.
@@ -547,26 +590,25 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
inline bool IsEvalContext() const;
inline bool IsScriptContext() const;
- inline bool HasSameSecurityTokenAs(Context* that) const;
+ inline bool HasSameSecurityTokenAs(Context that) const;
// The native context also stores a list of all optimized code and a
// list of all deoptimized code, which are needed by the deoptimizer.
- void AddOptimizedCode(Code* code);
- void SetOptimizedCodeListHead(Object* head);
- Object* OptimizedCodeListHead();
- void SetDeoptimizedCodeListHead(Object* head);
- Object* DeoptimizedCodeListHead();
+ void AddOptimizedCode(Code code);
+ void SetOptimizedCodeListHead(Object head);
+ Object OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object head);
+ Object DeoptimizedCodeListHead();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
- static int ImportedFieldIndexForName(Handle<String> name);
static int IntrinsicIndexForName(Handle<String> name);
static int IntrinsicIndexForName(const unsigned char* name, int length);
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- inline void set_##name(type* value); \
- inline bool is_##name(type* value) const; \
- inline type* name() const;
+ inline void set_##name(type value); \
+ inline bool is_##name(type value) const; \
+ inline type name() const;
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
@@ -591,16 +633,12 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
// 4) result.is_null():
// There was no binding found, *index is always -1 and *attributes is
// always ABSENT.
- Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index, PropertyAttributes* attributes,
- InitializationFlag* init_flag,
- VariableMode* variable_mode,
- bool* is_sloppy_function_name = nullptr);
-
- // Code generation support.
- static int SlotOffset(int index) {
- return kHeaderSize + index * kPointerSize - kHeapObjectTag;
- }
+ static Handle<Object> Lookup(Handle<Context> context, Handle<String> name,
+ ContextLookupFlags flags, int* index,
+ PropertyAttributes* attributes,
+ InitializationFlag* init_flag,
+ VariableMode* variable_mode,
+ bool* is_sloppy_function_name = nullptr);
static inline int FunctionMapIndex(LanguageMode language_mode,
FunctionKind kind, bool has_prototype_slot,
@@ -612,32 +650,66 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
return elements_kind + FIRST_JS_ARRAY_MAP_SLOT;
}
- inline Map* GetInitialJSArrayMap(ElementsKind kind) const;
+ inline Map GetInitialJSArrayMap(ElementsKind kind) const;
- static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
static const int kNotFound = -1;
- class BodyDescriptor;
+ // Dispatched behavior.
+ DECL_PRINTER(Context)
+ DECL_VERIFIER(Context)
+
+ typedef FlexibleBodyDescriptor<kStartOfTaggedFieldsOffset> BodyDescriptor;
private:
#ifdef DEBUG
// Bootstrapping-aware type checks.
V8_EXPORT_PRIVATE static bool IsBootstrappingOrNativeContext(Isolate* isolate,
- Object* object);
- static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
+ Object object);
+ static bool IsBootstrappingOrValidParentContext(Object object, Context kid);
#endif
- STATIC_ASSERT(kHeaderSize == Internals::kContextHeaderSize);
- STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
+ OBJECT_CONSTRUCTORS(Context, HeapObject)
};
class NativeContext : public Context {
public:
- static inline NativeContext* cast(Object* context);
+ DECL_CAST(NativeContext)
// TODO(neis): Move some stuff from Context here.
+ // [microtask_queue]: pointer to the MicrotaskQueue object.
+ DECL_PRIMITIVE_ACCESSORS(microtask_queue, MicrotaskQueue*)
+
+ // Dispatched behavior.
+ DECL_PRINTER(NativeContext)
+ DECL_VERIFIER(NativeContext)
+
+ // Layout description.
+#define NATIVE_CONTEXT_FIELDS_DEF(V) \
+ /* TODO(ishell): move definition of common context offsets to Context. */ \
+ V(kStartOfNativeContextFieldsOffset, \
+ (FIRST_WEAK_SLOT - MIN_CONTEXT_SLOTS) * kTaggedSize) \
+ V(kEndOfStrongFieldsOffset, 0) \
+ V(kStartOfWeakFieldsOffset, \
+ (NATIVE_CONTEXT_SLOTS - FIRST_WEAK_SLOT) * kTaggedSize) \
+ V(kEndOfWeakFieldsOffset, 0) \
+ V(kEndOfNativeContextFieldsOffset, 0) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data. */ \
+ V(kMicrotaskQueueOffset, kSystemPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Context::kTodoHeaderSize,
+ NATIVE_CONTEXT_FIELDS_DEF)
+#undef NATIVE_CONTEXT_FIELDS_DEF
+
+ class BodyDescriptor;
+
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(NativeContext);
+ STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
+ Internals::kNativeContextEmbedderDataOffset);
+
+ OBJECT_CONSTRUCTORS(NativeContext, Context);
};
typedef Context::Field ContextField;
@@ -645,4 +717,6 @@ typedef Context::Field ContextField;
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_CONTEXTS_H_
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index f2a6be4227..3bdb2efcb4 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -19,6 +19,7 @@
#include "src/conversions.h"
#include "src/double.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
namespace v8 {
namespace internal {
@@ -71,7 +72,7 @@ inline double DoubleToInteger(double x) {
return (x >= 0) ? std::floor(x) : std::ceil(x);
}
-
+// Implements most of https://tc39.github.io/ecma262/#sec-toint32.
int32_t DoubleToInt32(double x) {
if ((std::isfinite(x)) && (x <= INT_MAX) && (x >= INT_MIN)) {
int32_t i = static_cast<int32_t>(x);
@@ -79,13 +80,18 @@ int32_t DoubleToInt32(double x) {
}
Double d(x);
int exponent = d.Exponent();
+ uint64_t bits;
if (exponent < 0) {
if (exponent <= -Double::kSignificandSize) return 0;
- return d.Sign() * static_cast<int32_t>(d.Significand() >> -exponent);
+ bits = d.Significand() >> -exponent;
} else {
if (exponent > 31) return 0;
- return d.Sign() * static_cast<int32_t>(d.Significand() << exponent);
+ // Masking to a 32-bit value ensures that the result of the
+ // static_cast<int64_t> below is not the minimal int64_t value,
+ // which would overflow on multiplication with d.Sign().
+ bits = (d.Significand() << exponent) & 0xFFFFFFFFul;
}
+ return static_cast<int32_t>(d.Sign() * static_cast<int64_t>(bits));
}
bool DoubleToSmiInteger(double value, int* smi_int_value) {
@@ -142,17 +148,17 @@ bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
return false;
}
-int32_t NumberToInt32(Object* number) {
+int32_t NumberToInt32(Object number) {
if (number->IsSmi()) return Smi::ToInt(number);
return DoubleToInt32(number->Number());
}
-uint32_t NumberToUint32(Object* number) {
+uint32_t NumberToUint32(Object number) {
if (number->IsSmi()) return Smi::ToInt(number);
return DoubleToUint32(number->Number());
}
-uint32_t PositiveNumberToUint32(Object* number) {
+uint32_t PositiveNumberToUint32(Object number) {
if (number->IsSmi()) {
int value = Smi::ToInt(number);
if (value <= 0) return 0;
@@ -167,7 +173,7 @@ uint32_t PositiveNumberToUint32(Object* number) {
return max;
}
-int64_t NumberToInt64(Object* number) {
+int64_t NumberToInt64(Object number) {
if (number->IsSmi()) return Smi::ToInt(number);
double d = number->Number();
if (std::isnan(d)) return 0;
@@ -180,7 +186,7 @@ int64_t NumberToInt64(Object* number) {
return static_cast<int64_t>(d);
}
-uint64_t PositiveNumberToUint64(Object* number) {
+uint64_t PositiveNumberToUint64(Object number) {
if (number->IsSmi()) {
int value = Smi::ToInt(number);
if (value <= 0) return 0;
@@ -195,7 +201,7 @@ uint64_t PositiveNumberToUint64(Object* number) {
return max;
}
-bool TryNumberToSize(Object* number, size_t* result) {
+bool TryNumberToSize(Object number, size_t* result) {
// Do not create handles in this function! Don't use SealHandleScope because
// the function can be used concurrently.
if (number->IsSmi()) {
@@ -224,14 +230,13 @@ bool TryNumberToSize(Object* number, size_t* result) {
}
}
-size_t NumberToSize(Object* number) {
+size_t NumberToSize(Object number) {
size_t result = 0;
bool is_valid = TryNumberToSize(number, &result);
CHECK(is_valid);
return result;
}
-
uint32_t DoubleToUint32(double x) {
return static_cast<uint32_t>(DoubleToInt32(x));
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index ee40201544..2a1ec46ef3 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -17,7 +17,6 @@
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
#include "src/strtod.h"
-#include "src/unicode-cache-inl.h"
#include "src/utils.h"
#if defined(_STLP_VENDOR_CSTD)
@@ -60,10 +59,9 @@ bool SubStringEquals(Iterator* current, EndMark end, const char* substring) {
// Returns true if a nonspace character has been found and false if the
// end was been reached before finding a nonspace character.
template <class Iterator, class EndMark>
-inline bool AdvanceToNonspace(UnicodeCache* unicode_cache, Iterator* current,
- EndMark end) {
+inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
+ if (!IsWhiteSpaceOrLineTerminator(**current)) return true;
++*current;
}
return false;
@@ -71,8 +69,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache, Iterator* current,
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
-double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
- EndMark end, bool negative,
+double InternalStringToIntDouble(Iterator current, EndMark end, bool negative,
bool allow_trailing_junk) {
DCHECK(current != end);
@@ -99,8 +96,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
} else if (*current >= 'A' && *current < lim_A) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
- if (allow_trailing_junk ||
- !AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
break;
} else {
return JunkStringValue();
@@ -131,8 +127,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
exponent += radix_log_2;
}
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JunkStringValue();
}
@@ -214,18 +209,20 @@ class StringToIntHelper {
bool IsOneByte() const {
return raw_one_byte_subject_ != nullptr ||
- subject_->IsOneByteRepresentationUnderneath();
+ String::IsOneByteRepresentationUnderneath(*subject_);
}
Vector<const uint8_t> GetOneByteVector() {
if (raw_one_byte_subject_ != nullptr) {
return Vector<const uint8_t>(raw_one_byte_subject_, length_);
}
- return subject_->GetFlatContent().ToOneByteVector();
+ DisallowHeapAllocation no_gc;
+ return subject_->GetFlatContent(no_gc).ToOneByteVector();
}
Vector<const uc16> GetTwoByteVector() {
- return subject_->GetFlatContent().ToUC16Vector();
+ DisallowHeapAllocation no_gc;
+ return subject_->GetFlatContent(no_gc).ToUC16Vector();
}
// Subclasses get access to internal state:
@@ -296,9 +293,8 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
Char start = current;
length_ = length;
Char end = start + length;
- UnicodeCache* unicode_cache = isolate_->unicode_cache();
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (!AdvanceToNonspace(&current, end)) {
return set_state(kEmpty);
}
@@ -429,8 +425,7 @@ void StringToIntHelper::ParseInternal(Char start) {
ResultMultiplyAdd(multiplier, part);
} while (!done);
- if (!allow_trailing_junk_ &&
- AdvanceToNonspace(isolate_->unicode_cache(), &current, end)) {
+ if (!allow_trailing_junk_ && AdvanceToNonspace(&current, end)) {
return set_state(kJunk);
}
@@ -488,28 +483,27 @@ class NumberParseIntHelper : public StringToIntHelper {
double HandlePowerOfTwoCase(Char start) {
Char current = start + cursor();
Char end = start + length();
- UnicodeCache* unicode_cache = isolate()->unicode_cache();
const bool allow_trailing_junk = true;
// GetResult() will take care of the sign bit, so ignore it for now.
const bool negative = false;
switch (radix()) {
case 2:
- return InternalStringToIntDouble<1>(unicode_cache, current, end,
- negative, allow_trailing_junk);
+ return InternalStringToIntDouble<1>(current, end, negative,
+ allow_trailing_junk);
case 4:
- return InternalStringToIntDouble<2>(unicode_cache, current, end,
- negative, allow_trailing_junk);
+ return InternalStringToIntDouble<2>(current, end, negative,
+ allow_trailing_junk);
case 8:
- return InternalStringToIntDouble<3>(unicode_cache, current, end,
- negative, allow_trailing_junk);
+ return InternalStringToIntDouble<3>(current, end, negative,
+ allow_trailing_junk);
case 16:
- return InternalStringToIntDouble<4>(unicode_cache, current, end,
- negative, allow_trailing_junk);
+ return InternalStringToIntDouble<4>(current, end, negative,
+ allow_trailing_junk);
case 32:
- return InternalStringToIntDouble<5>(unicode_cache, current, end,
- negative, allow_trailing_junk);
+ return InternalStringToIntDouble<5>(current, end, negative,
+ allow_trailing_junk);
default:
UNREACHABLE();
}
@@ -552,8 +546,8 @@ class NumberParseIntHelper : public StringToIntHelper {
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
-double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
- EndMark end, int flags, double empty_string_val) {
+double InternalStringToDouble(Iterator current, EndMark end, int flags,
+ double empty_string_val) {
// To make sure that iterator dereferencing is valid the following
// convention is used:
// 1. Each '++current' statement is followed by check for equality to 'end'.
@@ -562,7 +556,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (!AdvanceToNonspace(&current, end)) {
return empty_string_val;
}
@@ -610,8 +604,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
return JunkStringValue();
}
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JunkStringValue();
}
@@ -633,7 +626,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
return JunkStringValue(); // "0x".
}
- return InternalStringToIntDouble<4>(unicode_cache, current, end, false,
+ return InternalStringToIntDouble<4>(current, end, false,
allow_trailing_junk);
// It could be an explicit octal value.
@@ -643,7 +636,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
return JunkStringValue(); // "0o".
}
- return InternalStringToIntDouble<3>(unicode_cache, current, end, false,
+ return InternalStringToIntDouble<3>(current, end, false,
allow_trailing_junk);
// It could be a binary value.
@@ -653,7 +646,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
return JunkStringValue(); // "0b".
}
- return InternalStringToIntDouble<1>(unicode_cache, current, end, false,
+ return InternalStringToIntDouble<1>(current, end, false,
allow_trailing_junk);
}
@@ -785,7 +778,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, Iterator current,
exponent += (sign == '-' ? -num : num);
}
- if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, &current, end)) {
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JunkStringValue();
}
@@ -793,9 +786,8 @@ parsing_done:
exponent += insignificant_digits;
if (octal) {
- return InternalStringToIntDouble<3>(unicode_cache, buffer,
- buffer + buffer_pos, sign == NEGATIVE,
- allow_trailing_junk);
+ return InternalStringToIntDouble<3>(buffer, buffer + buffer_pos,
+ sign == NEGATIVE, allow_trailing_junk);
}
if (nonzero_digit_dropped) {
@@ -810,37 +802,27 @@ parsing_done:
return (sign == NEGATIVE) ? -converted : converted;
}
-double StringToDouble(UnicodeCache* unicode_cache,
- const char* str, int flags, double empty_string_val) {
+double StringToDouble(const char* str, int flags, double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
const uint8_t* end = start + StrLength(str);
- return InternalStringToDouble(unicode_cache, start, end, flags,
- empty_string_val);
+ return InternalStringToDouble(start, end, flags, empty_string_val);
}
-
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uint8_t> str,
- int flags,
+double StringToDouble(Vector<const uint8_t> str, int flags,
double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
const uint8_t* end = start + str.length();
- return InternalStringToDouble(unicode_cache, start, end, flags,
- empty_string_val);
+ return InternalStringToDouble(start, end, flags, empty_string_val);
}
-
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uc16> str,
- int flags,
+double StringToDouble(Vector<const uc16> str, int flags,
double empty_string_val) {
const uc16* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
- empty_string_val);
+ return InternalStringToDouble(str.start(), end, flags, empty_string_val);
}
double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
@@ -954,6 +936,11 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
case FP_ZERO: return "0";
default: {
+ if (IsInt32Double(v)) {
+ // This will trigger if v is -0 and -0.0 is stringified to "0".
+ // (see ES section 7.1.12.1 #sec-tostring-applied-to-the-number-type)
+ return IntToCString(FastD2I(v), buffer);
+ }
SimpleStringBuilder builder(buffer.start(), buffer.length());
int decimal_point;
int sign;
@@ -1004,18 +991,17 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
const char* IntToCString(int n, Vector<char> buffer) {
- bool negative = false;
- if (n < 0) {
- // We must not negate the most negative int.
- if (n == kMinInt) return DoubleToCString(n, buffer);
- negative = true;
+ bool negative = true;
+ if (n >= 0) {
n = -n;
+ negative = false;
}
// Build the string backwards from the least significant digit.
int i = buffer.length();
buffer[--i] = '\0';
do {
- buffer[--i] = '0' + (n % 10);
+ // We ensured n <= 0, so the subtraction does the right addition.
+ buffer[--i] = '0' - (n % 10);
n /= 10;
} while (n);
if (negative) buffer[--i] = '-';
@@ -1326,26 +1312,22 @@ char* DoubleToRadixCString(double value, int radix) {
// ES6 18.2.4 parseFloat(string)
-double StringToDouble(Isolate* isolate, UnicodeCache* unicode_cache,
- Handle<String> string, int flags,
+double StringToDouble(Isolate* isolate, Handle<String> string, int flags,
double empty_string_val) {
Handle<String> flattened = String::Flatten(isolate, string);
{
DisallowHeapAllocation no_gc;
- String::FlatContent flat = flattened->GetFlatContent();
+ String::FlatContent flat = flattened->GetFlatContent(no_gc);
DCHECK(flat.IsFlat());
if (flat.IsOneByte()) {
- return StringToDouble(unicode_cache, flat.ToOneByteVector(), flags,
- empty_string_val);
+ return StringToDouble(flat.ToOneByteVector(), flags, empty_string_val);
} else {
- return StringToDouble(unicode_cache, flat.ToUC16Vector(), flags,
- empty_string_val);
+ return StringToDouble(flat.ToUC16Vector(), flags, empty_string_val);
}
}
}
-
-bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string) {
+bool IsSpecialIndex(String string) {
// Max length of canonical double: -X.XXXXXXXXXXXXXXXXX-eXXX
const int kBufferSize = 24;
const int length = string->length();
@@ -1391,7 +1373,7 @@ bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string) {
}
// Slow path: test DoubleToString(StringToDouble(string)) == string.
Vector<const uint16_t> vector(buffer, length);
- double d = StringToDouble(unicode_cache, vector, NO_FLAGS);
+ double d = StringToDouble(vector, NO_FLAGS);
if (std::isnan(d)) return false;
// Compute reverse string.
char reverse_buffer[kBufferSize + 1]; // Result will be /0 terminated.
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 3077ae4204..dd25b74aed 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -16,7 +16,6 @@ namespace internal {
class BigInt;
template <typename T>
class Handle;
-class UnicodeCache;
// The limit for the the fractionDigits/precision for toFixed, toPrecision
// and toExponential.
@@ -90,19 +89,12 @@ enum ConversionFlags {
// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uint8_t> str,
- int flags,
+double StringToDouble(Vector<const uint8_t> str, int flags,
double empty_string_val = 0);
-double StringToDouble(UnicodeCache* unicode_cache,
- Vector<const uc16> str,
- int flags,
+double StringToDouble(Vector<const uc16> str, int flags,
double empty_string_val = 0);
// This version expects a zero-terminated character array.
-double StringToDouble(UnicodeCache* unicode_cache,
- const char* str,
- int flags,
- double empty_string_val = 0);
+double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToInt(Isolate* isolate, Handle<String> string, int radix);
@@ -123,11 +115,12 @@ const int kDoubleToCStringMinBufferSize = 100;
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.
// 100 characters is enough.
-const char* DoubleToCString(double value, Vector<char> buffer);
+V8_EXPORT_PRIVATE const char* DoubleToCString(double value,
+ Vector<char> buffer);
// Convert an int to a null-terminated string. The returned string is
// located inside the buffer, but not necessarily at the start.
-const char* IntToCString(int n, Vector<char> buffer);
+V8_EXPORT_PRIVATE const char* IntToCString(int n, Vector<char> buffer);
// Additional number to string conversions for the number type.
// The caller is responsible for calling free on the returned pointer.
@@ -164,23 +157,22 @@ inline bool IsUint32Double(double value);
inline bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value);
// Convert from Number object to C integer.
-inline uint32_t PositiveNumberToUint32(Object* number);
-inline int32_t NumberToInt32(Object* number);
-inline uint32_t NumberToUint32(Object* number);
-inline int64_t NumberToInt64(Object* number);
-inline uint64_t PositiveNumberToUint64(Object* number);
-
-double StringToDouble(Isolate* isolate, UnicodeCache* unicode_cache,
- Handle<String> string, int flags,
+inline uint32_t PositiveNumberToUint32(Object number);
+inline int32_t NumberToInt32(Object number);
+inline uint32_t NumberToUint32(Object number);
+inline int64_t NumberToInt64(Object number);
+inline uint64_t PositiveNumberToUint64(Object number);
+
+double StringToDouble(Isolate* isolate, Handle<String> string, int flags,
double empty_string_val = 0.0);
-inline bool TryNumberToSize(Object* number, size_t* result);
+inline bool TryNumberToSize(Object number, size_t* result);
// Converts a number into size_t.
-inline size_t NumberToSize(Object* number);
+inline size_t NumberToSize(Object number);
// returns DoubleToString(StringToDouble(string)) == string
-bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string);
+bool IsSpecialIndex(String string);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index 0f60a76806..fc55569e39 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -58,7 +58,7 @@ void RuntimeCallTimer::CommitTimeToCounter() {
bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
- HeapObject* heap_object,
+ HeapObject heap_object,
RuntimeCallCounterId counter_id)
: RuntimeCallTimerScope(isolate, counter_id) {}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index a4b08127cd..6dff372f90 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -8,9 +8,11 @@
#include "src/base/platform/platform.h"
#include "src/builtins/builtins-definitions.h"
+#include "src/counters-inl.h"
#include "src/isolate.h"
#include "src/log-inl.h"
#include "src/log.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -34,35 +36,35 @@ StatsCounterThreadSafe::StatsCounterThreadSafe(Counters* counters,
void StatsCounterThreadSafe::Set(int Value) {
if (ptr_) {
- base::LockGuard<base::Mutex> Guard(&mutex_);
+ base::MutexGuard Guard(&mutex_);
SetLoc(ptr_, Value);
}
}
void StatsCounterThreadSafe::Increment() {
if (ptr_) {
- base::LockGuard<base::Mutex> Guard(&mutex_);
+ base::MutexGuard Guard(&mutex_);
IncrementLoc(ptr_);
}
}
void StatsCounterThreadSafe::Increment(int value) {
if (ptr_) {
- base::LockGuard<base::Mutex> Guard(&mutex_);
+ base::MutexGuard Guard(&mutex_);
IncrementLoc(ptr_, value);
}
}
void StatsCounterThreadSafe::Decrement() {
if (ptr_) {
- base::LockGuard<base::Mutex> Guard(&mutex_);
+ base::MutexGuard Guard(&mutex_);
DecrementLoc(ptr_);
}
}
void StatsCounterThreadSafe::Decrement(int value) {
if (ptr_) {
- base::LockGuard<base::Mutex> Guard(&mutex_);
+ base::MutexGuard Guard(&mutex_);
DecrementLoc(ptr_, value);
}
}
@@ -530,11 +532,16 @@ void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
in_use_ = false;
}
-WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats()
- : tls_key_(base::Thread::CreateThreadLocalKey()) {}
+WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats() {}
WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
- base::Thread::DeleteThreadLocalKey(tls_key_);
+ if (tls_key_) base::Thread::DeleteThreadLocalKey(*tls_key_);
+}
+
+base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
+ DCHECK(FLAG_runtime_stats);
+ if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
+ return *tls_key_;
}
RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
@@ -543,14 +550,14 @@ RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
base::make_unique<RuntimeCallStats>();
RuntimeCallStats* result = new_table.get();
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tables_.push_back(std::move(new_table));
return result;
}
void WorkerThreadRuntimeCallStats::AddToMainTable(
RuntimeCallStats* main_call_stats) {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
for (auto& worker_stats : tables_) {
DCHECK_NE(main_call_stats, worker_stats.get());
main_call_stats->Add(worker_stats.get());
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 719bcc55e0..4b05a6693e 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
+#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
#include "src/globals.h"
@@ -305,6 +306,34 @@ class TimedHistogramScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
};
+enum class OptionalTimedHistogramScopeMode { TAKE_TIME, DONT_TAKE_TIME };
+
+// Helper class for scoping a TimedHistogram.
+// It will not take time for mode = DONT_TAKE_TIME.
+class OptionalTimedHistogramScope {
+ public:
+ OptionalTimedHistogramScope(TimedHistogram* histogram, Isolate* isolate,
+ OptionalTimedHistogramScopeMode mode)
+ : histogram_(histogram), isolate_(isolate), mode_(mode) {
+ if (mode == OptionalTimedHistogramScopeMode::TAKE_TIME) {
+ histogram_->Start(&timer_, isolate);
+ }
+ }
+
+ ~OptionalTimedHistogramScope() {
+ if (mode_ == OptionalTimedHistogramScopeMode::TAKE_TIME) {
+ histogram_->Stop(&timer_, isolate_);
+ }
+ }
+
+ private:
+ base::ElapsedTimer timer_;
+ TimedHistogram* const histogram_;
+ Isolate* const isolate_;
+ const OptionalTimedHistogramScopeMode mode_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OptionalTimedHistogramScope);
+};
+
// Helper class for recording a TimedHistogram asynchronously with manual
// controls (it will not generate a report if destroyed without explicitly
// triggering a report). |async_counters| should be a shared_ptr to
@@ -322,13 +351,6 @@ class AsyncTimedHistogram {
histogram_->Start(&timer_, nullptr);
}
- ~AsyncTimedHistogram() = default;
-
- AsyncTimedHistogram(const AsyncTimedHistogram& other) = default;
- AsyncTimedHistogram& operator=(const AsyncTimedHistogram& other) = default;
- AsyncTimedHistogram(AsyncTimedHistogram&& other) = default;
- AsyncTimedHistogram& operator=(AsyncTimedHistogram&& other) = default;
-
// Records the time elapsed to |histogram_| and stops |timer_|.
void RecordDone() { histogram_->Stop(&timer_, nullptr); }
@@ -435,27 +457,6 @@ class HistogramTimerScope {
#endif
};
-enum class OptionalHistogramTimerScopeMode { TAKE_TIME, DONT_TAKE_TIME };
-
-// Helper class for scoping a HistogramTimer.
-// It will not take time if take_time is set to false.
-class OptionalHistogramTimerScope {
- public:
- OptionalHistogramTimerScope(HistogramTimer* timer,
- OptionalHistogramTimerScopeMode mode)
- : timer_(timer), mode_(mode) {
- if (mode == OptionalHistogramTimerScopeMode::TAKE_TIME) timer_->Start();
- }
-
- ~OptionalHistogramTimerScope() {
- if (mode_ == OptionalHistogramTimerScopeMode::TAKE_TIME) timer_->Stop();
- }
-
- private:
- HistogramTimer* timer_;
- OptionalHistogramTimerScopeMode mode_;
-};
-
// A histogram timer that can aggregate events within a larger scope.
//
// Intended use of this timer is to have an outer (aggregating) and an inner
@@ -708,15 +709,15 @@ class RuntimeCallTimer final {
#define FOR_EACH_API_COUNTER(V) \
V(ArrayBuffer_Cast) \
- V(ArrayBuffer_Neuter) \
+ V(ArrayBuffer_Detach) \
V(ArrayBuffer_New) \
V(Array_CloneElementAt) \
V(Array_New) \
- V(BigInt_NewFromWords) \
V(BigInt64Array_New) \
- V(BigUint64Array_New) \
- V(BigIntObject_New) \
+ V(BigInt_NewFromWords) \
V(BigIntObject_BigIntValue) \
+ V(BigIntObject_New) \
+ V(BigUint64Array_New) \
V(BooleanObject_BooleanValue) \
V(BooleanObject_New) \
V(Context_New) \
@@ -750,9 +751,6 @@ class RuntimeCallTimer final {
V(Map_Has) \
V(Map_New) \
V(Map_Set) \
- V(WeakMap_Get) \
- V(WeakMap_Set) \
- V(WeakMap_New) \
V(Message_GetEndColumn) \
V(Message_GetLineNumber) \
V(Message_GetSourceLine) \
@@ -807,8 +805,8 @@ class RuntimeCallTimer final {
V(Promise_Chain) \
V(Promise_HasRejectHandler) \
V(Promise_Resolver_New) \
- V(Promise_Resolver_Resolve) \
V(Promise_Resolver_Reject) \
+ V(Promise_Resolver_Resolve) \
V(Promise_Result) \
V(Promise_Status) \
V(Promise_Then) \
@@ -852,35 +850,38 @@ class RuntimeCallTimer final {
V(UnboundScript_GetName) \
V(UnboundScript_GetSourceMappingURL) \
V(UnboundScript_GetSourceURL) \
+ V(ValueDeserializer_ReadHeader) \
+ V(ValueDeserializer_ReadValue) \
+ V(ValueSerializer_WriteValue) \
V(Value_InstanceOf) \
- V(Value_IntegerValue) \
V(Value_Int32Value) \
+ V(Value_IntegerValue) \
V(Value_NumberValue) \
V(Value_TypeOf) \
V(Value_Uint32Value) \
- V(ValueDeserializer_ReadHeader) \
- V(ValueDeserializer_ReadValue) \
- V(ValueSerializer_WriteValue)
+ V(WeakMap_Get) \
+ V(WeakMap_New) \
+ V(WeakMap_Set)
#define FOR_EACH_MANUAL_COUNTER(V) \
V(AccessorGetterCallback) \
V(AccessorSetterCallback) \
V(ArrayLengthGetter) \
V(ArrayLengthSetter) \
- V(BoundFunctionNameGetter) \
V(BoundFunctionLengthGetter) \
+ V(BoundFunctionNameGetter) \
+ V(CompileAnalyse) \
V(CompileBackgroundAnalyse) \
V(CompileBackgroundCompileTask) \
V(CompileBackgroundEval) \
V(CompileBackgroundFunction) \
V(CompileBackgroundIgnition) \
- V(CompileBackgroundScript) \
V(CompileBackgroundRewriteReturnResult) \
V(CompileBackgroundScopeAnalysis) \
+ V(CompileBackgroundScript) \
V(CompileDeserialize) \
- V(CompileEval) \
- V(CompileAnalyse) \
V(CompileEnqueueOnDispatcher) \
+ V(CompileEval) \
V(CompileFinalizeBackgroundCompileTask) \
V(CompileFinishNowOnDispatcher) \
V(CompileFunction) \
@@ -893,37 +894,44 @@ class RuntimeCallTimer final {
V(CompileSerialize) \
V(CompileWaitForDispatcher) \
V(DeoptimizeCode) \
+ V(DeserializeContext) \
+ V(DeserializeIsolate) \
V(FunctionCallback) \
+ V(FunctionLengthGetter) \
V(FunctionPrototypeGetter) \
V(FunctionPrototypeSetter) \
- V(FunctionLengthGetter) \
V(GC_Custom_AllAvailableGarbage) \
V(GC_Custom_IncrementalMarkingObserver) \
V(GC_Custom_SlowAllocateRaw) \
V(GCEpilogueCallback) \
V(GCPrologueCallback) \
+ V(Genesis) \
V(GetMoreDataCallback) \
- V(NamedDefinerCallback) \
- V(NamedDeleterCallback) \
- V(NamedDescriptorCallback) \
- V(NamedQueryCallback) \
- V(NamedSetterCallback) \
- V(NamedGetterCallback) \
- V(NamedEnumeratorCallback) \
V(IndexedDefinerCallback) \
V(IndexedDeleterCallback) \
V(IndexedDescriptorCallback) \
+ V(IndexedEnumeratorCallback) \
V(IndexedGetterCallback) \
V(IndexedQueryCallback) \
V(IndexedSetterCallback) \
- V(IndexedEnumeratorCallback) \
+ V(Invoke) \
+ V(InvokeApiFunction) \
V(InvokeApiInterruptCallbacks) \
V(InvokeFunctionCallback) \
V(JS_Execution) \
V(Map_SetPrototype) \
V(Map_TransitionToAccessorProperty) \
V(Map_TransitionToDataProperty) \
+ V(MessageListenerCallback) \
+ V(NamedDefinerCallback) \
+ V(NamedDeleterCallback) \
+ V(NamedDescriptorCallback) \
+ V(NamedEnumeratorCallback) \
+ V(NamedGetterCallback) \
+ V(NamedQueryCallback) \
+ V(NamedSetterCallback) \
V(Object_DeleteProperty) \
+ V(ObjectVerify) \
V(OptimizeCode) \
V(ParseArrowFunctionLiteral) \
V(ParseBackgroundArrowFunctionLiteral) \
@@ -935,9 +943,7 @@ class RuntimeCallTimer final {
V(ParseProgram) \
V(PreParseArrowFunctionLiteral) \
V(PreParseBackgroundArrowFunctionLiteral) \
- V(PreParseBackgroundNoVariableResolution) \
V(PreParseBackgroundWithVariableResolution) \
- V(PreParseNoVariableResolution) \
V(PreParseWithVariableResolution) \
V(PropertyCallback) \
V(PrototypeMap_TransitionToAccessorProperty) \
@@ -952,17 +958,16 @@ class RuntimeCallTimer final {
V(TestCounter3)
#define FOR_EACH_HANDLER_COUNTER(V) \
- V(KeyedLoadIC_LoadIndexedInterceptorStub) \
V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
V(KeyedLoadIC_LoadElementDH) \
+ V(KeyedLoadIC_LoadIndexedInterceptorStub) \
V(KeyedLoadIC_LoadIndexedStringDH) \
V(KeyedLoadIC_SlowStub) \
V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
V(KeyedStoreIC_SlowStub) \
- V(KeyedStoreIC_StoreFastElementStub) \
V(KeyedStoreIC_StoreElementStub) \
- V(StoreInArrayLiteralIC_SlowStub) \
+ V(KeyedStoreIC_StoreFastElementStub) \
V(LoadGlobalIC_LoadScriptContextField) \
V(LoadGlobalIC_SlowStub) \
V(LoadIC_FunctionPrototypeStub) \
@@ -979,11 +984,11 @@ class RuntimeCallTimer final {
V(LoadIC_LoadGlobalFromPrototypeDH) \
V(LoadIC_LoadIntegerIndexedExoticDH) \
V(LoadIC_LoadInterceptorDH) \
- V(LoadIC_LoadNonMaskingInterceptorDH) \
V(LoadIC_LoadInterceptorFromPrototypeDH) \
V(LoadIC_LoadNativeDataPropertyDH) \
V(LoadIC_LoadNativeDataPropertyFromPrototypeDH) \
V(LoadIC_LoadNonexistentDH) \
+ V(LoadIC_LoadNonMaskingInterceptorDH) \
V(LoadIC_LoadNormalDH) \
V(LoadIC_LoadNormalFromPrototypeDH) \
V(LoadIC_NonReceiver) \
@@ -991,8 +996,8 @@ class RuntimeCallTimer final {
V(LoadIC_SlowStub) \
V(LoadIC_StringLength) \
V(LoadIC_StringWrapperLength) \
- V(StoreGlobalIC_StoreScriptContextField) \
V(StoreGlobalIC_SlowStub) \
+ V(StoreGlobalIC_StoreScriptContextField) \
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
@@ -1007,7 +1012,8 @@ class RuntimeCallTimer final {
V(StoreIC_StoreNativeDataPropertyDH) \
V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
V(StoreIC_StoreNormalDH) \
- V(StoreIC_StoreTransitionDH)
+ V(StoreIC_StoreTransitionDH) \
+ V(StoreInArrayLiteralIC_SlowStub)
enum RuntimeCallCounterId {
#define CALL_RUNTIME_COUNTER(name) kGC_##name,
@@ -1089,7 +1095,7 @@ class WorkerThreadRuntimeCallStats final {
~WorkerThreadRuntimeCallStats();
// Returns the TLS key associated with this WorkerThreadRuntimeCallStats.
- base::Thread::LocalStorageKey GetKey() const { return tls_key_; }
+ base::Thread::LocalStorageKey GetKey();
// Returns a new worker thread runtime call stats table managed by this
// WorkerThreadRuntimeCallStats.
@@ -1101,7 +1107,7 @@ class WorkerThreadRuntimeCallStats final {
private:
base::Mutex mutex_;
std::vector<std::unique_ptr<RuntimeCallStats>> tables_;
- base::Thread::LocalStorageKey tls_key_;
+ base::Optional<base::Thread::LocalStorageKey> tls_key_;
};
// Creating a WorkerThreadRuntimeCallStatsScope will provide a thread-local
@@ -1139,7 +1145,7 @@ class RuntimeCallTimerScope {
RuntimeCallCounterId counter_id);
// This constructor is here just to avoid calling GetIsolate() when the
// stats are disabled and the isolate is not directly available.
- inline RuntimeCallTimerScope(Isolate* isolate, HeapObject* heap_object,
+ inline RuntimeCallTimerScope(Isolate* isolate, HeapObject heap_object,
RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
RuntimeCallCounterId counter_id) {
@@ -1182,6 +1188,7 @@ class RuntimeCallTimerScope {
HR(gc_finalize_sweep, V8.GCFinalizeMC.Sweep, 0, 10000, 101) \
HR(gc_scavenger_scavenge_main, V8.GCScavenger.ScavengeMain, 0, 10000, 101) \
HR(gc_scavenger_scavenge_roots, V8.GCScavenger.ScavengeRoots, 0, 10000, 101) \
+ HR(gc_mark_compactor, V8.GCMarkCompactor, 0, 10000, 101) \
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
@@ -1226,21 +1233,6 @@ class RuntimeCallTimerScope {
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
- HT(gc_compactor_background, V8.GCCompactorBackground, 10000, MILLISECOND) \
- HT(gc_compactor_foreground, V8.GCCompactorForeground, 10000, MILLISECOND) \
- HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
- HT(gc_finalize_background, V8.GCFinalizeMCBackground, 10000, MILLISECOND) \
- HT(gc_finalize_foreground, V8.GCFinalizeMCForeground, 10000, MILLISECOND) \
- HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
- MILLISECOND) \
- HT(gc_finalize_reduce_memory_background, \
- V8.GCFinalizeMCReduceMemoryBackground, 10000, MILLISECOND) \
- HT(gc_finalize_reduce_memory_foreground, \
- V8.GCFinalizeMCReduceMemoryForeground, 10000, MILLISECOND) \
- HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
- HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
- HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
HT(gc_context, V8.GCContext, 10000, \
MILLISECOND) /* GC context cleanup time */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
@@ -1271,6 +1263,23 @@ class RuntimeCallTimerScope {
MICROSECOND)
#define TIMED_HISTOGRAM_LIST(HT) \
+ /* Garbage collection timers. */ \
+ HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_compactor_background, V8.GCCompactorBackground, 10000, MILLISECOND) \
+ HT(gc_compactor_foreground, V8.GCCompactorForeground, 10000, MILLISECOND) \
+ HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
+ HT(gc_finalize_background, V8.GCFinalizeMCBackground, 10000, MILLISECOND) \
+ HT(gc_finalize_foreground, V8.GCFinalizeMCForeground, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
+ MILLISECOND) \
+ HT(gc_finalize_reduce_memory_background, \
+ V8.GCFinalizeMCReduceMemoryBackground, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory_foreground, \
+ V8.GCFinalizeMCReduceMemoryForeground, 10000, MILLISECOND) \
+ HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
+ HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
+ HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
+ /* Wasm timers. */ \
HT(wasm_decode_asm_module_time, V8.WasmDecodeModuleMicroSeconds.asm, \
1000000, MICROSECOND) \
HT(wasm_decode_wasm_module_time, V8.WasmDecodeModuleMicroSeconds.wasm, \
@@ -1359,7 +1368,6 @@ class RuntimeCallTimerScope {
SC(string_table_capacity, V8.StringTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
SC(inlined_copied_elements, V8.InlinedCopiedElements) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
/* Amount of evaled source code. */ \
@@ -1385,10 +1393,6 @@ class RuntimeCallTimerScope {
SC(store_buffer_overflows, V8.StoreBufferOverflows)
#define STATS_COUNTER_LIST_2(SC) \
- /* Number of code stubs. */ \
- SC(code_stubs, V8.CodeStubs) \
- /* Amount of stub code. */ \
- SC(total_stubs_code_size, V8.TotalStubsCodeSize) \
/* Amount of (JS) compiled code. */ \
SC(total_compiled_code_size, V8.TotalCompiledCodeSize) \
SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
diff --git a/deps/v8/src/cpu-features.h b/deps/v8/src/cpu-features.h
new file mode 100644
index 0000000000..310fafe272
--- /dev/null
+++ b/deps/v8/src/cpu-features.h
@@ -0,0 +1,124 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CPU_FEATURES_H_
+#define V8_CPU_FEATURES_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+
+namespace internal {
+
+// CPU feature flags.
+enum CpuFeature {
+ // x86
+ SSE4_1,
+ SSSE3,
+ SSE3,
+ SAHF,
+ AVX,
+ FMA3,
+ BMI1,
+ BMI2,
+ LZCNT,
+ POPCNT,
+ ATOM,
+ // ARM
+ // - Standard configurations. The baseline is ARMv6+VFPv2.
+ ARMv7, // ARMv7-A + VFPv3-D32 + NEON
+ ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
+ ARMv8, // ARMv8-A (+ all of the above)
+ // MIPS, MIPS64
+ FPU,
+ FP64FPU,
+ MIPSr1,
+ MIPSr2,
+ MIPSr6,
+ MIPS_SIMD, // MSA instructions
+ // PPC
+ FPR_GPR_MOV,
+ LWSYNC,
+ ISELECT,
+ VSX,
+ MODULO,
+ // S390
+ DISTINCT_OPS,
+ GENERAL_INSTR_EXT,
+ FLOATING_POINT_EXT,
+ VECTOR_FACILITY,
+ MISC_INSTR_EXT2,
+
+ NUMBER_OF_CPU_FEATURES,
+
+ // ARM feature aliases (based on the standard configurations above).
+ VFPv3 = ARMv7,
+ NEON = ARMv7,
+ VFP32DREGS = ARMv7,
+ SUDIV = ARMv7_SUDIV
+};
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+// Example:
+// if (assembler->IsSupported(SSE3)) {
+// CpuFeatureScope fscope(assembler, SSE3);
+// // Generate code containing SSE3 instructions.
+// } else {
+// // Generate alternative code.
+// }
+class CpuFeatures : public AllStatic {
+ public:
+ static void Probe(bool cross_compile) {
+ STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
+ if (initialized_) return;
+ initialized_ = true;
+ ProbeImpl(cross_compile);
+ }
+
+ static unsigned SupportedFeatures() {
+ Probe(false);
+ return supported_;
+ }
+
+ static bool IsSupported(CpuFeature f) {
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ static inline bool SupportsOptimizer();
+
+ static inline bool SupportsWasmSimd128();
+
+ static inline unsigned icache_line_size() {
+ DCHECK_NE(icache_line_size_, 0);
+ return icache_line_size_;
+ }
+
+ static inline unsigned dcache_line_size() {
+ DCHECK_NE(dcache_line_size_, 0);
+ return dcache_line_size_;
+ }
+
+ static void PrintTarget();
+ static void PrintFeatures();
+
+ private:
+ friend class ExternalReference;
+ friend class AssemblerBase;
+ // Flush instruction cache.
+ static void FlushICache(void* start, size_t size);
+
+ // Platform-dependent implementation.
+ static void ProbeImpl(bool cross_compile);
+
+ static unsigned supported_;
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+ static bool initialized_;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_CPU_FEATURES_H_
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8-js.cc
index 9dfb966902..c1dac77075 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8-js.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/d8.h"
+
+const char* v8::Shell::stringify_source_ = R"D8(
(function() {
"use strict";
@@ -44,7 +47,7 @@ function Stringify(x, depth) {
case "bigint":
return x.toString() + "n";
case "object":
- if (IS_NULL(x)) return "null";
+ if (x === null) return "null";
if (x.constructor && x.constructor.name === "Array") {
var elems = [];
for (var i = 0; i < x.length; ++i) {
@@ -63,8 +66,8 @@ function Stringify(x, depth) {
for (var i in names) {
var name = names[i];
var desc = Object.getOwnPropertyDescriptor(x, name);
- if (IS_UNDEFINED(desc)) continue;
- if (IS_SYMBOL(name)) name = "[" + Stringify(name) + "]";
+ if (desc === (void 0)) continue;
+ if (typeof name === 'symbol') name = "[" + Stringify(name) + "]";
if ("value" in desc) {
props.push(name + ": " + Stringify(desc.value, depth - 1));
}
@@ -94,3 +97,5 @@ function StringifyProxy(proxy, depth) {
return Stringify;
})();
+
+)D8";
diff --git a/deps/v8/src/d8-platforms.cc b/deps/v8/src/d8-platforms.cc
new file mode 100644
index 0000000000..0c179bbdd2
--- /dev/null
+++ b/deps/v8/src/d8-platforms.cc
@@ -0,0 +1,309 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <unordered_map>
+
+#include "include/v8-platform.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/template-utils.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/d8-platforms.h"
+
+namespace v8 {
+
+class PredictablePlatform : public Platform {
+ public:
+ explicit PredictablePlatform(std::unique_ptr<Platform> platform)
+ : platform_(std::move(platform)) {
+ DCHECK_NOT_NULL(platform_);
+ }
+
+ PageAllocator* GetPageAllocator() override {
+ return platform_->GetPageAllocator();
+ }
+
+ void OnCriticalMemoryPressure() override {
+ platform_->OnCriticalMemoryPressure();
+ }
+
+ bool OnCriticalMemoryPressure(size_t length) override {
+ return platform_->OnCriticalMemoryPressure(length);
+ }
+
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ return platform_->GetForegroundTaskRunner(isolate);
+ }
+
+ int NumberOfWorkerThreads() override { return 0; }
+
+ void CallOnWorkerThread(std::unique_ptr<Task> task) override {
+ // It's not defined when background tasks are being executed, so we can just
+ // execute them right away.
+ task->Run();
+ }
+
+ void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ // Never run delayed tasks.
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
+ }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return false; }
+
+ double MonotonicallyIncreasingTime() override {
+ return synthetic_time_in_sec_ += 0.00001;
+ }
+
+ double CurrentClockTimeMillis() override {
+ return MonotonicallyIncreasingTime() * base::Time::kMillisecondsPerSecond;
+ }
+
+ v8::TracingController* GetTracingController() override {
+ return platform_->GetTracingController();
+ }
+
+ Platform* platform() const { return platform_.get(); }
+
+ private:
+ double synthetic_time_in_sec_ = 0.0;
+ std::unique_ptr<Platform> platform_;
+
+ DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
+};
+
+std::unique_ptr<Platform> MakePredictablePlatform(
+ std::unique_ptr<Platform> platform) {
+ return base::make_unique<PredictablePlatform>(std::move(platform));
+}
+
+class DelayedTasksPlatform : public Platform {
+ public:
+ explicit DelayedTasksPlatform(std::unique_ptr<Platform> platform)
+ : platform_(std::move(platform)) {
+ DCHECK_NOT_NULL(platform_);
+ }
+
+ explicit DelayedTasksPlatform(std::unique_ptr<Platform> platform,
+ int64_t random_seed)
+ : platform_(std::move(platform)), rng_(random_seed) {
+ DCHECK_NOT_NULL(platform_);
+ }
+
+ ~DelayedTasksPlatform() {
+ // When the platform shuts down, all task runners must be freed.
+ DCHECK_EQ(0, delayed_task_runners_.size());
+ }
+
+ PageAllocator* GetPageAllocator() override {
+ return platform_->GetPageAllocator();
+ }
+
+ void OnCriticalMemoryPressure() override {
+ platform_->OnCriticalMemoryPressure();
+ }
+
+ bool OnCriticalMemoryPressure(size_t length) override {
+ return platform_->OnCriticalMemoryPressure(length);
+ }
+
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate* isolate) override {
+ std::shared_ptr<TaskRunner> runner =
+ platform_->GetForegroundTaskRunner(isolate);
+
+ base::MutexGuard lock_guard(&mutex_);
+ // Check if we can re-materialize the weak ptr in our map.
+ std::weak_ptr<DelayedTaskRunner>& weak_delayed_runner =
+ delayed_task_runners_[runner.get()];
+ std::shared_ptr<DelayedTaskRunner> delayed_runner =
+ weak_delayed_runner.lock();
+
+ if (!delayed_runner) {
+ // Create a new {DelayedTaskRunner} and keep a weak reference in our map.
+ delayed_runner.reset(new DelayedTaskRunner(runner, this),
+ DelayedTaskRunnerDeleter{});
+ weak_delayed_runner = delayed_runner;
+ }
+
+ return std::move(delayed_runner);
+ }
+
+ int NumberOfWorkerThreads() override {
+ return platform_->NumberOfWorkerThreads();
+ }
+
+ void CallOnWorkerThread(std::unique_ptr<Task> task) override {
+ platform_->CallOnWorkerThread(MakeDelayedTask(std::move(task)));
+ }
+
+ void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ platform_->CallDelayedOnWorkerThread(MakeDelayedTask(std::move(task)),
+ delay_in_seconds);
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
+ }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
+ // This is a deprecated function and should not be called anymore.
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(Isolate* isolate) override {
+ return platform_->IdleTasksEnabled(isolate);
+ }
+
+ double MonotonicallyIncreasingTime() override {
+ return platform_->MonotonicallyIncreasingTime();
+ }
+
+ double CurrentClockTimeMillis() override {
+ return platform_->CurrentClockTimeMillis();
+ }
+
+ v8::TracingController* GetTracingController() override {
+ return platform_->GetTracingController();
+ }
+
+ private:
+ class DelayedTaskRunnerDeleter;
+ class DelayedTaskRunner final : public TaskRunner {
+ public:
+ DelayedTaskRunner(std::shared_ptr<TaskRunner> task_runner,
+ DelayedTasksPlatform* platform)
+ : task_runner_(task_runner), platform_(platform) {}
+
+ void PostTask(std::unique_ptr<Task> task) final {
+ task_runner_->PostTask(platform_->MakeDelayedTask(std::move(task)));
+ }
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) final {
+ task_runner_->PostDelayedTask(platform_->MakeDelayedTask(std::move(task)),
+ delay_in_seconds);
+ }
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) final {
+ task_runner_->PostIdleTask(
+ platform_->MakeDelayedIdleTask(std::move(task)));
+ }
+
+ bool IdleTasksEnabled() final { return task_runner_->IdleTasksEnabled(); }
+
+ private:
+ friend class DelayedTaskRunnerDeleter;
+ std::shared_ptr<TaskRunner> task_runner_;
+ DelayedTasksPlatform* platform_;
+ };
+
+ class DelayedTaskRunnerDeleter {
+ public:
+ void operator()(DelayedTaskRunner* runner) const {
+ TaskRunner* original_runner = runner->task_runner_.get();
+ base::MutexGuard lock_guard(&runner->platform_->mutex_);
+ auto& delayed_task_runners = runner->platform_->delayed_task_runners_;
+ DCHECK_EQ(1, delayed_task_runners.count(original_runner));
+ delayed_task_runners.erase(original_runner);
+ }
+ };
+
+ class DelayedTask : public Task {
+ public:
+ DelayedTask(std::unique_ptr<Task> task, int32_t delay_ms)
+ : task_(std::move(task)), delay_ms_(delay_ms) {}
+ void Run() final {
+ base::OS::Sleep(base::TimeDelta::FromMicroseconds(delay_ms_));
+ task_->Run();
+ }
+
+ private:
+ std::unique_ptr<Task> task_;
+ int32_t delay_ms_;
+ };
+
+ class DelayedIdleTask : public IdleTask {
+ public:
+ DelayedIdleTask(std::unique_ptr<IdleTask> task, int32_t delay_ms)
+ : task_(std::move(task)), delay_ms_(delay_ms) {}
+ void Run(double deadline_in_seconds) final {
+ base::OS::Sleep(base::TimeDelta::FromMicroseconds(delay_ms_));
+ task_->Run(deadline_in_seconds);
+ }
+
+ private:
+ std::unique_ptr<IdleTask> task_;
+ int32_t delay_ms_;
+ };
+
+ std::unique_ptr<Platform> platform_;
+
+ // The Mutex protects the RNG, which is used by foreground and background
+ // threads, and the {delayed_task_runners_} map might be accessed concurrently
+ // by the shared_ptr destructor.
+ base::Mutex mutex_;
+ base::RandomNumberGenerator rng_;
+ std::unordered_map<TaskRunner*, std::weak_ptr<DelayedTaskRunner>>
+ delayed_task_runners_;
+
+ int32_t GetRandomDelayInMilliseconds() {
+ base::MutexGuard lock_guard(&mutex_);
+ double delay_fraction = rng_.NextDouble();
+ // Sleep up to 100ms (100000us). Square {delay_fraction} to shift
+ // distribution towards shorter sleeps.
+ return 1e5 * (delay_fraction * delay_fraction);
+ }
+
+ std::unique_ptr<Task> MakeDelayedTask(std::unique_ptr<Task> task) {
+ return base::make_unique<DelayedTask>(std::move(task),
+ GetRandomDelayInMilliseconds());
+ }
+
+ std::unique_ptr<IdleTask> MakeDelayedIdleTask(
+ std::unique_ptr<IdleTask> task) {
+ return base::make_unique<DelayedIdleTask>(std::move(task),
+ GetRandomDelayInMilliseconds());
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(DelayedTasksPlatform);
+};
+
+std::unique_ptr<Platform> MakeDelayedTasksPlatform(
+ std::unique_ptr<Platform> platform, int64_t random_seed) {
+ if (random_seed) {
+ return base::make_unique<DelayedTasksPlatform>(std::move(platform),
+ random_seed);
+ }
+ return base::make_unique<DelayedTasksPlatform>(std::move(platform));
+}
+
+} // namespace v8
diff --git a/deps/v8/src/d8-platforms.h b/deps/v8/src/d8-platforms.h
new file mode 100644
index 0000000000..d78207a5e1
--- /dev/null
+++ b/deps/v8/src/d8-platforms.h
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_D8_PLATFORMS_H_
+#define V8_D8_PLATFORMS_H_
+
+#include <cstdint>
+#include <memory>
+
+namespace v8 {
+
+class Platform;
+
+// Returns a predictable v8::Platform implementation.
+// orker threads are disabled, idle tasks are disallowed, and the time reported
+// by {MonotonicallyIncreasingTime} is deterministic.
+std::unique_ptr<Platform> MakePredictablePlatform(
+ std::unique_ptr<Platform> platform);
+
+// Returns a v8::Platform implementation which randomly delays tasks (both
+// foreground and background) for stress-testing different interleavings.
+// If {random_seed} is 0, a random seed is chosen.
+std::unique_ptr<Platform> MakeDelayedTasksPlatform(
+ std::unique_ptr<Platform> platform, int64_t random_seed);
+
+} // namespace v8
+
+#endif // V8_D8_PLATFORMS_H_
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 5295f3957c..cd54285d3a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -1,4 +1,4 @@
-/// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -17,10 +17,6 @@
#include "src/third_party/vtune/v8-vtune.h"
#endif
-#include "src/d8-console.h"
-#include "src/d8.h"
-#include "src/ostreams.h"
-
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
#include "include/v8-inspector.h"
@@ -31,11 +27,15 @@
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
#include "src/basic-block-profiler.h"
+#include "src/d8-console.h"
+#include "src/d8-platforms.h"
+#include "src/d8.h"
#include "src/debug/debug-interface.h"
#include "src/interpreter/interpreter.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/ostreams.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
@@ -190,92 +190,9 @@ class MockArrayBufferAllocatiorWithLimit : public MockArrayBufferAllocator {
std::atomic<size_t> space_left_;
};
-// Predictable v8::Platform implementation. Worker threads are disabled, idle
-// tasks are disallowed, and the time reported by {MonotonicallyIncreasingTime}
-// is deterministic.
-class PredictablePlatform : public Platform {
- public:
- explicit PredictablePlatform(std::unique_ptr<Platform> platform)
- : platform_(std::move(platform)) {
- DCHECK_NOT_NULL(platform_);
- }
-
- PageAllocator* GetPageAllocator() override {
- return platform_->GetPageAllocator();
- }
-
- void OnCriticalMemoryPressure() override {
- platform_->OnCriticalMemoryPressure();
- }
-
- bool OnCriticalMemoryPressure(size_t length) override {
- return platform_->OnCriticalMemoryPressure(length);
- }
-
- std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
- v8::Isolate* isolate) override {
- return platform_->GetForegroundTaskRunner(isolate);
- }
-
- int NumberOfWorkerThreads() override { return 0; }
-
- void CallOnWorkerThread(std::unique_ptr<Task> task) override {
- // It's not defined when background tasks are being executed, so we can just
- // execute them right away.
- task->Run();
- }
-
- void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
- double delay_in_seconds) override {
- // Never run delayed tasks.
- }
-
- void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- // This is a deprecated function and should not be called anymore.
- UNREACHABLE();
- }
-
- void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
- double delay_in_seconds) override {
- // This is a deprecated function and should not be called anymore.
- UNREACHABLE();
- }
-
- void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
- UNREACHABLE();
- }
-
- bool IdleTasksEnabled(Isolate* isolate) override { return false; }
-
- double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
- }
-
- double CurrentClockTimeMillis() override {
- return MonotonicallyIncreasingTime() * base::Time::kMillisecondsPerSecond;
- }
-
- v8::TracingController* GetTracingController() override {
- return platform_->GetTracingController();
- }
-
- Platform* platform() const { return platform_.get(); }
-
- private:
- double synthetic_time_in_sec_ = 0.0;
- std::unique_ptr<Platform> platform_;
-
- DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
-};
-
+v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
-v8::Platform* GetDefaultPlatform() {
- return i::FLAG_verify_predictable
- ? static_cast<PredictablePlatform*>(g_platform.get())->platform()
- : g_platform.get();
-}
-
static Local<Value> Throw(Isolate* isolate, const char* message) {
return isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -424,16 +341,17 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
class ExternalOwningOneByteStringResource
: public String::ExternalOneByteStringResource {
public:
- ExternalOwningOneByteStringResource() : length_(0) {}
- ExternalOwningOneByteStringResource(std::unique_ptr<const char[]> data,
- size_t length)
- : data_(std::move(data)), length_(length) {}
- const char* data() const override { return data_.get(); }
- size_t length() const override { return length_; }
+ ExternalOwningOneByteStringResource() {}
+ ExternalOwningOneByteStringResource(
+ std::unique_ptr<base::OS::MemoryMappedFile> file)
+ : file_(std::move(file)) {}
+ const char* data() const override {
+ return static_cast<char*>(file_->memory());
+ }
+ size_t length() const override { return file_->size(); }
private:
- std::unique_ptr<const char[]> data_;
- size_t length_;
+ std::unique_ptr<base::OS::MemoryMappedFile> file_;
};
CounterMap* Shell::counter_map_;
@@ -510,7 +428,7 @@ class BackgroundCompileThread : public base::Thread {
ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
Local<Value> source) {
- base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ base::MutexGuard lock_guard(cached_code_mutex_.Pointer());
CHECK(source->IsString());
v8::String::Utf8Value key(isolate, source);
DCHECK(*key);
@@ -528,7 +446,7 @@ ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
const ScriptCompiler::CachedData* cache_data) {
- base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
+ base::MutexGuard lock_guard(cached_code_mutex_.Pointer());
CHECK(source->IsString());
if (cache_data == nullptr) return;
v8::String::Utf8Value key(isolate, source);
@@ -561,8 +479,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
- DCHECK(options.compile_options != ScriptCompiler::kProduceParserCache);
- DCHECK(options.compile_options != ScriptCompiler::kConsumeParserCache);
if (options.compile_options == ScriptCompiler::kConsumeCodeCache) {
ScriptCompiler::CachedData* cached_code =
LookupCodeCache(isolate, source);
@@ -1011,7 +927,7 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_switch_ = 0;
data_->realms_ = new Global<Context>[1];
data_->realms_[0].Reset(data_->isolate_,
- data_->isolate_->GetEnteredContext());
+ data_->isolate_->GetEnteredOrMicrotaskContext());
}
@@ -1023,14 +939,9 @@ PerIsolateData::RealmScope::~RealmScope() {
Global<Context>& realm = data_->realms_[i];
if (realm.IsEmpty()) continue;
DisposeModuleEmbedderData(realm.Get(data_->isolate_));
- // TODO(adamk): No need to reset manually, Globals reset when destructed.
- realm.Reset();
}
data_->realm_count_ = 0;
delete[] data_->realms_;
- // TODO(adamk): No need to reset manually, Globals reset when destructed.
- if (!data_->realm_shared_.IsEmpty())
- data_->realm_shared_.Reset();
}
@@ -1078,7 +989,7 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmFind(isolate->GetEnteredContext());
+ int index = data->RealmFind(isolate->GetEnteredOrMicrotaskContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -1161,7 +1072,7 @@ void Shell::RealmCreateAllowCrossRealmAccess(
Local<Context> context;
if (CreateRealm(args, -1, v8::MaybeLocal<Value>()).ToLocal(&context)) {
context->SetSecurityToken(
- args.GetIsolate()->GetEnteredContext()->GetSecurityToken());
+ args.GetIsolate()->GetEnteredOrMicrotaskContext()->GetSecurityToken());
}
}
@@ -1481,7 +1392,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
{
- base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ base::MutexGuard lock_guard(workers_mutex_.Pointer());
if (workers_.size() >= kMaxWorkers) {
Throw(args.GetIsolate(), "Too many workers, I won't let you create more");
return;
@@ -1749,18 +1660,11 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
if (stringify_function_.IsEmpty()) {
- int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> source_string =
- i::NativesCollection<i::D8>::GetScriptSource(source_index);
- i::Vector<const char> source_name =
- i::NativesCollection<i::D8>::GetScriptName(source_index);
Local<String> source =
- String::NewFromUtf8(isolate, source_string.start(),
- NewStringType::kNormal, source_string.length())
+ String::NewFromUtf8(isolate, stringify_source_, NewStringType::kNormal)
.ToLocalChecked();
Local<String> name =
- String::NewFromUtf8(isolate, source_name.start(),
- NewStringType::kNormal, source_name.length())
+ String::NewFromUtf8(isolate, "d8-stringify", NewStringType::kNormal)
.ToLocalChecked();
ScriptOrigin origin(name);
Local<Script> script =
@@ -2014,32 +1918,30 @@ void Shell::Initialize(Isolate* isolate) {
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
// This needs to be a critical section since this is not thread-safe
- base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
+ base::MutexGuard lock_guard(context_mutex_.Pointer());
// Initialize the global objects
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, nullptr, global_template);
DCHECK(!context.IsEmpty());
InitializeModuleEmbedderData(context);
- Context::Scope scope(context);
-
- i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
- i::JSArguments js_args = i::FLAG_js_arguments;
- i::Handle<i::FixedArray> arguments_array =
- factory->NewFixedArray(js_args.argc);
- for (int j = 0; j < js_args.argc; j++) {
- i::Handle<i::String> arg =
- factory->NewStringFromUtf8(i::CStrVector(js_args[j])).ToHandleChecked();
- arguments_array->set(j, *arg);
- }
- i::Handle<i::JSArray> arguments_jsarray =
- factory->NewJSArrayWithElements(arguments_array);
- context->Global()
- ->Set(context,
- String::NewFromUtf8(isolate, "arguments", NewStringType::kNormal)
- .ToLocalChecked(),
- Utils::ToLocal(arguments_jsarray))
- .FromJust();
+ if (options.include_arguments) {
+ Context::Scope scope(context);
+ const std::vector<const char*>& args = options.arguments;
+ int size = static_cast<int>(args.size());
+ Local<Array> array = Array::New(isolate, size);
+ for (int i = 0; i < size; i++) {
+ Local<String> arg =
+ v8::String::NewFromUtf8(isolate, args[i], v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<Number> index = v8::Number::New(isolate, i);
+ array->Set(context, index, arg).FromJust();
+ }
+ Local<String> name =
+ String::NewFromUtf8(isolate, "arguments", NewStringType::kInternalized)
+ .ToLocalChecked();
+ context->Global()->Set(context, name, array).FromJust();
+ }
return handle_scope.Escape(context);
}
@@ -2318,19 +2220,20 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Reads a file into a v8 string.
Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
- int size = 0;
- char* chars = ReadChars(name, &size);
- if (chars == nullptr) return Local<String>();
+ std::unique_ptr<base::OS::MemoryMappedFile> file(
+ base::OS::MemoryMappedFile::open(name));
+ if (!file) return Local<String>();
+
+ int size = static_cast<int>(file->size());
+ char* chars = static_cast<char*>(file->memory());
Local<String> result;
if (i::FLAG_use_external_strings && i::String::IsAscii(chars, size)) {
String::ExternalOneByteStringResource* resource =
- new ExternalOwningOneByteStringResource(
- std::unique_ptr<const char[]>(chars), size);
+ new ExternalOwningOneByteStringResource(std::move(file));
result = String::NewExternalOneByte(isolate, resource).ToLocalChecked();
} else {
result = String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
.ToLocalChecked();
- delete[] chars;
}
return result;
}
@@ -2644,14 +2547,14 @@ ExternalizedContents::~ExternalizedContents() {
}
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ base::MutexGuard lock_guard(&mutex_);
data_.push_back(std::move(data));
}
bool SerializationDataQueue::Dequeue(
std::unique_ptr<SerializationData>* out_data) {
out_data->reset();
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ base::MutexGuard lock_guard(&mutex_);
if (data_.empty()) return false;
*out_data = std::move(data_[0]);
data_.erase(data_.begin());
@@ -2660,13 +2563,13 @@ bool SerializationDataQueue::Dequeue(
bool SerializationDataQueue::IsEmpty() {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ base::MutexGuard lock_guard(&mutex_);
return data_.empty();
}
void SerializationDataQueue::Clear() {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ base::MutexGuard lock_guard(&mutex_);
data_.clear();
}
@@ -2843,7 +2746,17 @@ void SetFlagsFromString(const char* flags) {
bool Shell::SetOptions(int argc, char* argv[]) {
bool logfile_per_isolate = false;
for (int i = 0; i < argc; i++) {
- if (strcmp(argv[i], "--stress-opt") == 0) {
+ if (strcmp(argv[i], "--") == 0) {
+ argv[i] = nullptr;
+ for (int j = i + 1; j < argc; j++) {
+ options.arguments.push_back(argv[j]);
+ argv[j] = nullptr;
+ }
+ break;
+ } else if (strcmp(argv[i], "--no-arguments") == 0) {
+ options.include_arguments = false;
+ argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
argv[i] = nullptr;
} else if (strcmp(argv[i], "--nostress-opt") == 0 ||
@@ -2970,6 +2883,10 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strncmp(argv[i], "--thread-pool-size=", 19) == 0) {
options.thread_pool_size = atoi(argv[i] + 19);
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--stress-delay-tasks") == 0) {
+ // Delay execution of tasks by 0-100ms randomly (based on --random-seed).
+ options.stress_delay_tasks = true;
+ argv[i] = nullptr;
}
}
@@ -3065,7 +2982,7 @@ void Shell::CollectGarbage(Isolate* isolate) {
}
void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
- base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
+ base::MutexGuard guard(isolate_status_lock_.Pointer());
if (isolate_status_.count(isolate) == 0) {
isolate_status_.insert(std::make_pair(isolate, value));
} else {
@@ -3077,17 +2994,17 @@ namespace {
bool ProcessMessages(
Isolate* isolate,
const std::function<platform::MessageLoopBehavior()>& behavior) {
- Platform* platform = GetDefaultPlatform();
while (true) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::SaveContext saved_context(i_isolate);
- i_isolate->set_context(nullptr);
+ i_isolate->set_context(i::Context());
SealHandleScope shs(isolate);
- while (v8::platform::PumpMessageLoop(platform, isolate, behavior())) {
- isolate->RunMicrotasks();
+ while (v8::platform::PumpMessageLoop(g_default_platform, isolate,
+ behavior())) {
+ MicrotasksScope::PerformCheckpoint(isolate);
}
- if (platform->IdleTasksEnabled(isolate)) {
- v8::platform::RunIdleTasks(platform, isolate,
+ if (g_default_platform->IdleTasksEnabled(isolate)) {
+ v8::platform::RunIdleTasks(g_default_platform, isolate,
50.0 / base::Time::kMillisecondsPerSecond);
}
HandleScope handle_scope(isolate);
@@ -3110,7 +3027,7 @@ bool ProcessMessages(
void Shell::CompleteMessageLoop(Isolate* isolate) {
auto get_waiting_behaviour = [isolate]() {
- base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
+ base::MutexGuard guard(isolate_status_lock_.Pointer());
DCHECK_GT(isolate_status_.count(isolate), 0);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::wasm::WasmEngine* wasm_engine = i_isolate->wasm_engine();
@@ -3192,7 +3109,7 @@ class Serializer : public ValueSerializer::Delegate {
}
Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmCompiledModule> module) override {
+ Isolate* isolate, Local<WasmModuleObject> module) override {
DCHECK_NOT_NULL(data_);
for (size_t index = 0; index < wasm_modules_.size(); ++index) {
if (wasm_modules_[index] == module) {
@@ -3273,13 +3190,13 @@ class Serializer : public ValueSerializer::Delegate {
for (const auto& global_array_buffer : array_buffers_) {
Local<ArrayBuffer> array_buffer =
Local<ArrayBuffer>::New(isolate_, global_array_buffer);
- if (!array_buffer->IsNeuterable()) {
+ if (!array_buffer->IsDetachable()) {
Throw(isolate_, "ArrayBuffer could not be transferred");
return Nothing<bool>();
}
ArrayBuffer::Contents contents = MaybeExternalize(array_buffer);
- array_buffer->Neuter();
+ array_buffer->Detach();
data_->array_buffer_contents_.push_back(contents);
}
@@ -3291,7 +3208,7 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> data_;
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
- std::vector<Global<WasmCompiledModule>> wasm_modules_;
+ std::vector<Global<WasmModuleObject>> wasm_modules_;
std::vector<ExternalizedContents> externalized_contents_;
size_t current_memory_usage_;
@@ -3335,14 +3252,14 @@ class Deserializer : public ValueDeserializer::Delegate {
return MaybeLocal<SharedArrayBuffer>();
}
- MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(
+ MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id) override {
DCHECK_NOT_NULL(data_);
if (transfer_id < data_->transferrable_modules().size()) {
- return WasmCompiledModule::FromTransferrableModule(
+ return WasmModuleObject::FromTransferrableModule(
isolate_, data_->transferrable_modules().at(transfer_id));
}
- return MaybeLocal<WasmCompiledModule>();
+ return MaybeLocal<WasmModuleObject>();
}
private:
@@ -3363,7 +3280,7 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
data = serializer.Release();
}
// Append externalized contents even when WriteValue fails.
- base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ base::MutexGuard lock_guard(workers_mutex_.Pointer());
serializer.AppendExternalizedContentsTo(&externalized_contents_);
return data;
}
@@ -3383,7 +3300,7 @@ void Shell::CleanupWorkers() {
// create a new Worker, it would deadlock.
std::vector<Worker*> workers_copy;
{
- base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ base::MutexGuard lock_guard(workers_mutex_.Pointer());
allow_new_workers_ = false;
workers_copy.swap(workers_);
}
@@ -3394,7 +3311,7 @@ void Shell::CleanupWorkers() {
}
// Now that all workers are terminated, we can re-enable Worker creation.
- base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ base::MutexGuard lock_guard(workers_mutex_.Pointer());
allow_new_workers_ = true;
externalized_contents_.clear();
}
@@ -3426,8 +3343,16 @@ int Shell::Main(int argc, char* argv[]) {
g_platform = v8::platform::NewDefaultPlatform(
options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
in_process_stack_dumping, std::move(tracing));
+ g_default_platform = g_platform.get();
if (i::FLAG_verify_predictable) {
- g_platform.reset(new PredictablePlatform(std::move(g_platform)));
+ g_platform = MakePredictablePlatform(std::move(g_platform));
+ }
+ if (options.stress_delay_tasks) {
+ int64_t random_seed = i::FLAG_fuzzer_random_seed;
+ if (!random_seed) random_seed = i::FLAG_random_seed;
+ // If random_seed is still 0 here, the {DelayedTasksPlatform} will choose a
+ // random seed.
+ g_platform = MakeDelayedTasksPlatform(std::move(g_platform), random_seed);
}
if (i::FLAG_trace_turbo_cfg_file == nullptr) {
@@ -3563,6 +3488,8 @@ int Shell::Main(int argc, char* argv[]) {
DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kNoProduceCache;
printf("============ Run: Consume code cache ============\n");
// Second run to consume the cache in current isolate
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 29f693bcb0..568a54188f 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -186,7 +186,7 @@ class SerializationData {
shared_array_buffer_contents() {
return shared_array_buffer_contents_;
}
- const std::vector<WasmCompiledModule::TransferrableModule>&
+ const std::vector<WasmModuleObject::TransferrableModule>&
transferrable_modules() {
return transferrable_modules_;
}
@@ -200,7 +200,7 @@ class SerializationData {
size_t size_;
std::vector<ArrayBuffer::Contents> array_buffer_contents_;
std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
- std::vector<WasmCompiledModule::TransferrableModule> transferrable_modules_;
+ std::vector<WasmModuleObject::TransferrableModule> transferrable_modules_;
private:
friend class Serializer;
@@ -386,6 +386,9 @@ class ShellOptions {
bool enable_os_system = false;
bool quiet_load = false;
int thread_pool_size = 0;
+ bool stress_delay_tasks = false;
+ std::vector<const char*> arguments;
+ bool include_arguments = true;
};
class Shell : public i::AllStatic {
@@ -540,6 +543,7 @@ class Shell : public i::AllStatic {
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
static Global<Function> stringify_function_;
+ static const char* stringify_source_;
static CounterMap* counter_map_;
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 88a056b367..8562eb279a 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -4,12 +4,11 @@
#include "src/date.h"
+#include "src/base/overflowing-math.h"
#include "src/conversions.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
-
#ifdef V8_INTL_SUPPORT
-#include "src/intl.h"
+#include "src/objects/intl-objects.h"
#endif
namespace v8 {
@@ -27,11 +26,10 @@ static const char kDaysInMonths[] =
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
DateCache::DateCache()
- : stamp_(nullptr),
+ : stamp_(kNullAddress),
tz_cache_(
#ifdef V8_INTL_SUPPORT
- FLAG_icu_timezone_data ? new ICUTimezoneCache()
- : base::OS::CreateTimezoneCache()
+ Intl::CreateTimeZoneCache()
#else
base::OS::CreateTimezoneCache()
#endif
@@ -40,9 +38,8 @@ DateCache::DateCache()
}
void DateCache::ResetDateCache() {
- static const int kMaxStamp = Smi::kMaxValue;
- if (stamp_->value() >= kMaxStamp) {
- stamp_ = Smi::kZero;
+ if (stamp_->value() >= Smi::kMaxValue) {
+ stamp_ = Smi::zero();
} else {
stamp_ = Smi::FromInt(stamp_->value() + 1);
}
@@ -287,7 +284,8 @@ int DateCache::GetLocalOffsetFromOS(int64_t time_ms, bool is_utc) {
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
- after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
+ after_->start_sec <=
+ base::AddWithWraparound(time_sec, kDefaultDSTDeltaInSec) &&
time_sec <= after_->end_sec) {
// Extend the after_ segment.
after_->start_sec = time_sec;
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index 28947ebcbe..066eb8edaa 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -7,6 +7,7 @@
#include "src/base/timezone-cache.h"
#include "src/globals.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -160,7 +161,7 @@ class DateCache {
// We increment the stamp each time when the timezone information changes.
// JSDate objects perform stamp check and invalidate their caches if
// their saved stamp is not equal to the current stamp.
- Smi* stamp() { return stamp_; }
+ Smi stamp() { return stamp_; }
void* stamp_address() { return &stamp_; }
// These functions are virtual so that we can override them when testing.
@@ -217,7 +218,7 @@ class DateCache {
return segment->start_sec > segment->end_sec;
}
- Smi* stamp_;
+ Smi stamp_;
// Daylight Saving Time cache.
DST dst_[kDSTSize];
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index a20e8393ce..b4376b4789 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -7,16 +7,15 @@
#include "src/char-predicates-inl.h"
#include "src/dateparser.h"
-#include "src/unicode-cache-inl.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
template <typename Char>
-bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray* out) {
- UnicodeCache* unicode_cache = isolate->unicode_cache();
+bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
DCHECK(out->length() >= OUTPUT_SIZE);
- InputReader<Char> in(unicode_cache, str);
+ InputReader<Char> in(str);
DateStringTokenizer<Char> scanner(&in);
TimeZoneComposer tz;
TimeComposer time;
@@ -182,7 +181,6 @@ bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray* out) {
return success;
}
-
template<typename CharType>
DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
int pre_pos = in_->position();
@@ -219,7 +217,7 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
template <typename Char>
bool DateParser::InputReader<Char>::SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
+ if (IsWhiteSpaceOrLineTerminator(ch_)) {
Next();
return true;
}
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index b1807f27b6..cf99a8c0c1 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-bool DateParser::DayComposer::Write(FixedArray* output) {
+bool DateParser::DayComposer::Write(FixedArray output) {
if (index_ < 1) return false;
// Day and month defaults to 1.
while (index_ < kSize) {
@@ -62,8 +62,7 @@ bool DateParser::DayComposer::Write(FixedArray* output) {
return true;
}
-
-bool DateParser::TimeComposer::Write(FixedArray* output) {
+bool DateParser::TimeComposer::Write(FixedArray output) {
// All time slots default to 0
while (index_ < kSize) {
comp_[index_++] = 0;
@@ -95,8 +94,7 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
return true;
}
-
-bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
+bool DateParser::TimeZoneComposer::Write(FixedArray output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;
if (minute_ == kNone) minute_ = 0;
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index e26b8688f1..557750283d 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/char-predicates.h"
-#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
@@ -26,7 +25,7 @@ class DateParser : public AllStatic {
// [7]: UTC offset in seconds, or null value if no timezone specified
// If parsing fails, return false (content of output array is not defined).
template <typename Char>
- static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray* output);
+ static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray output);
enum {
YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
@@ -49,12 +48,7 @@ class DateParser : public AllStatic {
template <typename Char>
class InputReader {
public:
- InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
- : index_(0),
- buffer_(s),
- unicode_cache_(unicode_cache) {
- Next();
- }
+ explicit InputReader(Vector<Char> s) : index_(0), buffer_(s) { Next(); }
int position() { return index_; }
@@ -116,7 +110,6 @@ class DateParser : public AllStatic {
int index_;
Vector<Char> buffer_;
uint32_t ch_;
- UnicodeCache* unicode_cache_;
};
enum KeywordType {
@@ -283,7 +276,7 @@ class DateParser : public AllStatic {
return hour_ != kNone && minute_ == kNone && TimeComposer::IsMinute(n);
}
bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
- bool Write(FixedArray* output);
+ bool Write(FixedArray output);
bool IsEmpty() { return hour_ == kNone; }
private:
int sign_;
@@ -309,7 +302,7 @@ class DateParser : public AllStatic {
return true;
}
void SetHourOffset(int n) { hour_offset_ = n; }
- bool Write(FixedArray* output);
+ bool Write(FixedArray output);
static bool IsMinute(int x) { return Between(x, 0, 59); }
static bool IsHour(int x) { return Between(x, 0, 23); }
@@ -338,7 +331,7 @@ class DateParser : public AllStatic {
return false;
}
void SetNamedMonth(int n) { named_month_ = n; }
- bool Write(FixedArray* output);
+ bool Write(FixedArray output);
void set_iso_date() { is_iso_date_ = true; }
static bool IsMonth(int x) { return Between(x, 1, 12); }
static bool IsDay(int x) { return Between(x, 1, 31); }
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 9e0791babc..33223652cf 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -17,12 +17,12 @@ namespace v8 {
namespace internal {
class SharedToCounterMap
- : public base::TemplateHashMapImpl<SharedFunctionInfo*, uint32_t,
- base::KeyEqualityMatcher<void*>,
+ : public base::TemplateHashMapImpl<SharedFunctionInfo, uint32_t,
+ base::KeyEqualityMatcher<Object>,
base::DefaultAllocationPolicy> {
public:
- typedef base::TemplateHashMapEntry<SharedFunctionInfo*, uint32_t> Entry;
- inline void Add(SharedFunctionInfo* key, uint32_t count) {
+ typedef base::TemplateHashMapEntry<SharedFunctionInfo, uint32_t> Entry;
+ inline void Add(SharedFunctionInfo key, uint32_t count) {
Entry* entry = LookupOrInsert(key, Hash(key), []() { return 0; });
uint32_t old_count = entry->value;
if (UINT32_MAX - count < old_count) {
@@ -32,28 +32,28 @@ class SharedToCounterMap
}
}
- inline uint32_t Get(SharedFunctionInfo* key) {
+ inline uint32_t Get(SharedFunctionInfo key) {
Entry* entry = Lookup(key, Hash(key));
if (entry == nullptr) return 0;
return entry->value;
}
private:
- static uint32_t Hash(SharedFunctionInfo* key) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(key));
+ static uint32_t Hash(SharedFunctionInfo key) {
+ return static_cast<uint32_t>(key.ptr());
}
DisallowHeapAllocation no_gc;
};
namespace {
-int StartPosition(SharedFunctionInfo* info) {
+int StartPosition(SharedFunctionInfo info) {
int start = info->function_token_position();
if (start == kNoSourcePosition) start = info->StartPosition();
return start;
}
-bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
+bool CompareSharedFunctionInfo(SharedFunctionInfo a, SharedFunctionInfo b) {
int a_start = StartPosition(a);
int b_start = StartPosition(b);
if (a_start == b_start) return a->EndPosition() > b->EndPosition();
@@ -72,10 +72,10 @@ void SortBlockData(std::vector<CoverageBlock>& v) {
std::sort(v.begin(), v.end(), CompareCoverageBlock);
}
-std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo* shared) {
+std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo shared) {
DCHECK(shared->HasCoverageInfo());
- CoverageInfo* coverage_info =
+ CoverageInfo coverage_info =
CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
std::vector<CoverageBlock> result;
@@ -364,10 +364,10 @@ void ClampToBinary(CoverageFunction* function) {
}
}
-void ResetAllBlockCounts(SharedFunctionInfo* shared) {
+void ResetAllBlockCounts(SharedFunctionInfo shared) {
DCHECK(shared->HasCoverageInfo());
- CoverageInfo* coverage_info =
+ CoverageInfo coverage_info =
CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
for (int i = 0; i < coverage_info->SlotCount(); i++) {
@@ -395,7 +395,7 @@ bool IsBinaryMode(debug::Coverage::Mode mode) {
}
}
-void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo* info,
+void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
debug::Coverage::Mode mode) {
DCHECK(IsBlockMode(mode));
@@ -477,8 +477,8 @@ std::unique_ptr<Coverage> Coverage::Collect(
Handle<ArrayList> list = Handle<ArrayList>::cast(
isolate->factory()->feedback_vectors_for_profiling_tools());
for (int i = 0; i < list->Length(); i++) {
- FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo* shared = vector->shared_function_info();
+ FeedbackVector vector = FeedbackVector::cast(list->Get(i));
+ SharedFunctionInfo shared = vector->shared_function_info();
DCHECK(shared->IsSubjectToDebugging());
uint32_t count = static_cast<uint32_t>(vector->invocation_count());
if (reset_count) vector->clear_invocation_count();
@@ -492,10 +492,11 @@ std::unique_ptr<Coverage> Coverage::Collect(
->IsArrayList());
DCHECK_EQ(v8::debug::Coverage::kBestEffort, collectionMode);
HeapIterator heap_iterator(isolate->heap());
- while (HeapObject* current_obj = heap_iterator.next()) {
+ for (HeapObject current_obj = heap_iterator.next();
+ !current_obj.is_null(); current_obj = heap_iterator.next()) {
if (!current_obj->IsFeedbackVector()) continue;
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
+ FeedbackVector vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo shared = vector->shared_function_info();
if (!shared->IsSubjectToDebugging()) continue;
uint32_t count = static_cast<uint32_t>(vector->invocation_count());
counter_map.Add(shared, count);
@@ -508,7 +509,8 @@ std::unique_ptr<Coverage> Coverage::Collect(
// between source ranges and invocation counts.
std::unique_ptr<Coverage> result(new Coverage());
Script::Iterator scripts(isolate);
- while (Script* script = scripts.Next()) {
+ for (Script script = scripts.Next(); !script.is_null();
+ script = scripts.Next()) {
if (!script->IsUserJavaScript()) continue;
// Create and add new script data.
@@ -516,12 +518,13 @@ std::unique_ptr<Coverage> Coverage::Collect(
result->emplace_back(script_handle);
std::vector<CoverageFunction>* functions = &result->back().functions;
- std::vector<SharedFunctionInfo*> sorted;
+ std::vector<SharedFunctionInfo> sorted;
{
// Sort functions by start position, from outer to inner functions.
SharedFunctionInfo::ScriptIterator infos(isolate, *script_handle);
- while (SharedFunctionInfo* info = infos.Next()) {
+ for (SharedFunctionInfo info = infos.Next(); !info.is_null();
+ info = infos.Next()) {
sorted.push_back(info);
}
std::sort(sorted.begin(), sorted.end(), CompareSharedFunctionInfo);
@@ -531,7 +534,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
std::vector<size_t> nesting;
// Use sorted list to reconstruct function nesting.
- for (SharedFunctionInfo* info : sorted) {
+ for (SharedFunctionInfo info : sorted) {
int start = StartPosition(info);
int end = info->EndPosition();
uint32_t count = counter_map.Get(info);
@@ -607,17 +610,17 @@ void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
isolate->MaybeInitializeVectorListFromHeap();
HeapIterator heap_iterator(isolate->heap());
- while (HeapObject* o = heap_iterator.next()) {
+ for (HeapObject o = heap_iterator.next(); !o.is_null();
+ o = heap_iterator.next()) {
if (IsBinaryMode(mode) && o->IsSharedFunctionInfo()) {
// If collecting binary coverage, reset
// SFI::has_reported_binary_coverage to avoid optimizing / inlining
// functions before they have reported coverage.
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(o);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(o);
shared->set_has_reported_binary_coverage(false);
} else if (o->IsFeedbackVector()) {
// In any case, clear any collected invocation counts.
- FeedbackVector* vector = FeedbackVector::cast(o);
- vector->clear_invocation_count();
+ FeedbackVector::cast(o)->clear_invocation_count();
}
}
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index 13816670f7..fc46ebc66e 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -8,6 +8,7 @@
#include <vector>
#include "src/debug/debug-interface.h"
+#include "src/handles.h"
#include "src/objects.h"
namespace v8 {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 98e4c58fb9..a0427647d8 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -279,6 +279,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowInvalidStringLength) \
V(ThrowIteratorError) \
V(ThrowIteratorResultNotAnObject) \
+ V(ThrowPatternAssignmentNonCoercible) \
V(ThrowReferenceError) \
V(ThrowSymbolIteratorInvalid) \
/* Strings */ \
@@ -337,6 +338,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(Call) \
V(CompleteInobjectSlackTrackingForMap) \
V(HasInPrototypeChain) \
+ V(IncrementUseCounter) \
V(MaxSmi) \
V(NewObject) \
V(SmiLexicographicCompare) \
@@ -373,43 +375,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef INLINE_INTRINSIC_WHITELIST
}
-#ifdef DEBUG
-bool BuiltinToIntrinsicHasNoSideEffect(Builtins::Name builtin_id,
- Runtime::FunctionId intrinsic_id) {
- // First check the intrinsic whitelist.
- if (IntrinsicHasNoSideEffect(intrinsic_id)) return true;
-
-// Whitelist intrinsics called from specific builtins.
-#define BUILTIN_INTRINSIC_WHITELIST(V, W) \
- /* Arrays */ \
- V(Builtins::kArrayFilter, W(CreateDataProperty)) \
- V(Builtins::kArrayMap, W(CreateDataProperty)) \
- V(Builtins::kArrayPrototypeSlice, \
- W(CreateDataProperty) W(SetKeyedProperty) W(SetNamedProperty)) \
- /* TypedArrays */ \
- V(Builtins::kTypedArrayConstructor, \
- W(TypedArrayCopyElements) W(ThrowInvalidTypedArrayAlignment)) \
- V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
- V(Builtins::kTypedArrayPrototypeMap, W(SetKeyedProperty) W(SetNamedProperty))
-
-#define CASE(Builtin, ...) \
- case Builtin: \
- return (__VA_ARGS__ false);
-
-#define MATCH(Intrinsic) intrinsic_id == Runtime::k##Intrinsic ||
-
- switch (builtin_id) {
- BUILTIN_INTRINSIC_WHITELIST(CASE, MATCH)
- default:
- return false;
- }
-
-#undef MATCH
-#undef CASE
-#undef BUILTIN_INTRINSIC_WHITELIST
-}
-#endif // DEBUG
-
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
typedef interpreter::Bytecode Bytecode;
typedef interpreter::Bytecodes Bytecodes;
@@ -552,10 +517,13 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeFlat:
case Builtins::kArrayPrototypeFlatMap:
+ case Builtins::kArrayPrototypeJoin:
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayPrototypeLastIndexOf:
case Builtins::kArrayPrototypeSlice:
case Builtins::kArrayPrototypeSort:
+ case Builtins::kArrayPrototypeToLocaleString:
+ case Builtins::kArrayPrototypeToString:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
case Builtins::kArraySome:
@@ -745,6 +713,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kStringPrototypeItalics:
case Builtins::kStringPrototypeLastIndexOf:
case Builtins::kStringPrototypeLink:
+ case Builtins::kStringPrototypeMatchAll:
case Builtins::kStringPrototypePadEnd:
case Builtins::kStringPrototypePadStart:
case Builtins::kStringPrototypeRepeat:
@@ -802,6 +771,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kRegExpConstructor:
// Internal.
case Builtins::kStrictPoisonPillThrower:
+ case Builtins::kAllocateInNewSpace:
+ case Builtins::kAllocateInOldSpace:
return DebugInfo::kHasNoSideEffect;
// Set builtins.
@@ -816,7 +787,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeReverse:
case Builtins::kArrayPrototypeShift:
case Builtins::kArrayPrototypeUnshift:
- case Builtins::kArraySplice:
+ case Builtins::kArrayPrototypeSplice:
case Builtins::kArrayUnshift:
// Map builtins.
case Builtins::kMapIteratorPrototypeNext:
@@ -830,6 +801,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kRegExpPrototypeFlagsGetter:
case Builtins::kRegExpPrototypeGlobalGetter:
case Builtins::kRegExpPrototypeIgnoreCaseGetter:
+ case Builtins::kRegExpPrototypeMatchAll:
case Builtins::kRegExpPrototypeMultilineGetter:
case Builtins::kRegExpPrototypeDotAllGetter:
case Builtins::kRegExpPrototypeUnicodeGetter:
@@ -917,47 +889,158 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
// Check built-ins against whitelist.
int builtin_index =
info->HasBuiltinId() ? info->builtin_id() : Builtins::kNoBuiltinId;
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
if (!Builtins::IsBuiltinId(builtin_index))
return DebugInfo::kHasSideEffects;
DebugInfo::SideEffectState state =
BuiltinGetSideEffectState(static_cast<Builtins::Name>(builtin_index));
+ return state;
+ }
+
+ return DebugInfo::kHasSideEffects;
+}
+
#ifdef DEBUG
- if (state == DebugInfo::kHasNoSideEffect) {
- Code* code = isolate->builtins()->builtin(builtin_index);
- if (code->builtin_index() == Builtins::kDeserializeLazy) {
- // Target builtin is not yet deserialized. Deserialize it now.
+static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
+ Builtins::Name callee) {
+ switch (callee) {
+ // Transitively called Builtins:
+ case Builtins::kAbort:
+ case Builtins::kAbortJS:
+ case Builtins::kAdaptorWithBuiltinExitFrame:
+ case Builtins::kArrayConstructorImpl:
+ case Builtins::kArrayEveryLoopContinuation:
+ case Builtins::kArrayFilterLoopContinuation:
+ case Builtins::kArrayFindIndexLoopContinuation:
+ case Builtins::kArrayFindLoopContinuation:
+ case Builtins::kArrayForEachLoopContinuation:
+ case Builtins::kArrayIncludesHoleyDoubles:
+ case Builtins::kArrayIncludesPackedDoubles:
+ case Builtins::kArrayIncludesSmiOrObject:
+ case Builtins::kArrayIndexOfHoleyDoubles:
+ case Builtins::kArrayIndexOfPackedDoubles:
+ case Builtins::kArrayIndexOfSmiOrObject:
+ case Builtins::kArrayMapLoopContinuation:
+ case Builtins::kArrayReduceLoopContinuation:
+ case Builtins::kArrayReduceRightLoopContinuation:
+ case Builtins::kArraySomeLoopContinuation:
+ case Builtins::kArrayTimSort:
+ case Builtins::kCall_ReceiverIsAny:
+ case Builtins::kCallWithArrayLike:
+ case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
+ case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
+ case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
+ case Builtins::kCEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
+ case Builtins::kCEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit:
+ case Builtins::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
+ case Builtins::kCEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit:
+ case Builtins::kCEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit:
+ case Builtins::kCEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit:
+ case Builtins::kCEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit:
+ case Builtins::kCloneFastJSArray:
+ case Builtins::kConstruct:
+ case Builtins::kConvertToLocaleString:
+ case Builtins::kCreateTypedArray:
+ case Builtins::kDirectCEntry:
+ case Builtins::kDoubleToI:
+ case Builtins::kExtractFastJSArray:
+ case Builtins::kFastNewObject:
+ case Builtins::kFindOrderedHashMapEntry:
+ case Builtins::kFlatMapIntoArray:
+ case Builtins::kFlattenIntoArray:
+ case Builtins::kGetProperty:
+ case Builtins::kHasProperty:
+ case Builtins::kMathPowInternal:
+ case Builtins::kNonNumberToNumber:
+ case Builtins::kNonPrimitiveToPrimitive_Number:
+ case Builtins::kNumberToString:
+ case Builtins::kObjectToString:
+ case Builtins::kOrderedHashTableHealIndex:
+ case Builtins::kOrdinaryToPrimitive_Number:
+ case Builtins::kOrdinaryToPrimitive_String:
+ case Builtins::kParseInt:
+ case Builtins::kProxyHasProperty:
+ case Builtins::kRecordWrite:
+ case Builtins::kStringAdd_CheckNone:
+ case Builtins::kStringEqual:
+ case Builtins::kStringIndexOf:
+ case Builtins::kStringRepeat:
+ case Builtins::kToInteger:
+ case Builtins::kToInteger_TruncateMinusZero:
+ case Builtins::kToLength:
+ case Builtins::kToName:
+ case Builtins::kToObject:
+ case Builtins::kToString:
+ case Builtins::kWeakMapLookupHashIndex:
+ return true;
+ case Builtins::kJoinStackPop:
+ case Builtins::kJoinStackPush:
+ switch (caller) {
+ case Builtins::kArrayPrototypeJoin:
+ case Builtins::kArrayPrototypeToLocaleString:
+ return true;
+ default:
+ return false;
+ }
+ case Builtins::kSetProperty:
+ switch (caller) {
+ case Builtins::kArrayPrototypeSlice:
+ case Builtins::kTypedArrayPrototypeMap:
+ case Builtins::kStringPrototypeMatchAll:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
- DCHECK(Builtins::IsLazy(builtin_index));
- DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_index));
+// static
+void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
+ // TODO(yangguo): also check runtime calls.
+ bool failed = false;
+ bool sanity_check = false;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Builtins::Name caller = static_cast<Builtins::Name>(i);
+ DebugInfo::SideEffectState state = BuiltinGetSideEffectState(caller);
+ if (state != DebugInfo::kHasNoSideEffect) continue;
+ Code code = isolate->builtins()->builtin(caller);
+ int mode = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
- code = Snapshot::DeserializeBuiltin(isolate, builtin_index);
- DCHECK_NE(Builtins::kDeserializeLazy, code->builtin_index());
+ for (RelocIterator it(code, mode); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
+ Code callee_code = isolate->heap()->GcSafeFindCodeForInnerPointer(
+ rinfo->target_address());
+ if (!callee_code->is_builtin()) continue;
+ Builtins::Name callee =
+ static_cast<Builtins::Name>(callee_code->builtin_index());
+ if (BuiltinGetSideEffectState(callee) == DebugInfo::kHasNoSideEffect) {
+ continue;
}
- // TODO(yangguo): Check builtin-to-builtin calls too.
- int mode = RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
- bool failed = false;
- for (RelocIterator it(code, mode); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address address = rinfo->target_external_reference();
- const Runtime::Function* function = Runtime::FunctionForEntry(address);
- if (function == nullptr) continue;
- if (!BuiltinToIntrinsicHasNoSideEffect(
- static_cast<Builtins::Name>(builtin_index),
- function->function_id)) {
- PrintF("Whitelisted builtin %s calls non-whitelisted intrinsic %s\n",
- Builtins::name(builtin_index), function->name);
- failed = true;
- }
- DCHECK(!failed);
+ if (TransitivelyCalledBuiltinHasNoSideEffect(caller, callee)) {
+ sanity_check = true;
+ continue;
}
+ PrintF("Whitelisted builtin %s calls non-whitelisted builtin %s\n",
+ Builtins::name(caller), Builtins::name(callee));
+ failed = true;
}
-#endif // DEBUG
- return state;
}
-
- return DebugInfo::kHasSideEffects;
+ CHECK(!failed);
+#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_MIPS64)
+ // Isolate-independent builtin calls and jumps do not emit reloc infos
+ // on PPC. We try to avoid using PC relative code due to performance
+ // issue with especially older hardwares.
+ // MIPS64 doesn't have PC relative code currently.
+ // TODO(mips): Add PC relative code to MIPS64.
+ USE(sanity_check);
+#else
+ CHECK(sanity_check);
+#endif
}
+#endif // DEBUG
// static
void DebugEvaluate::ApplySideEffectChecks(
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 470a4900a7..9aaa959bc2 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -43,6 +43,10 @@ class DebugEvaluate : public AllStatic {
Isolate* isolate, Handle<SharedFunctionInfo> info);
static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
+#ifdef DEBUG
+ static void VerifyTransitiveBuiltins(Isolate* isolate);
+#endif // DEBUG
+
private:
// This class builds a context chain for evaluation of expressions
// in debugger.
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 14ccf2c20a..402130bb63 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -193,13 +193,7 @@ void ResetBlackboxedStateCache(Isolate* isolate,
int EstimatedValueSize(Isolate* isolate, v8::Local<v8::Value> value);
-enum Builtin {
- kObjectKeys,
- kObjectGetPrototypeOf,
- kObjectGetOwnPropertyDescriptor,
- kObjectGetOwnPropertyNames,
- kObjectGetOwnPropertySymbols,
-};
+enum Builtin { kStringToLowerCase };
Local<Function> GetBuiltin(Isolate* isolate, Builtin builtin);
@@ -474,14 +468,9 @@ void SetReturnValue(v8::Isolate* isolate, v8::Local<v8::Value> value);
enum class NativeAccessorType {
None = 0,
HasGetter = 1 << 0,
- HasSetter = 1 << 1,
- IsBuiltin = 1 << 2
+ HasSetter = 1 << 1
};
-int GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object,
- v8::Local<v8::Name> name);
-
int64_t GetNextRandomInt64(v8::Isolate* isolate);
v8::MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
@@ -518,6 +507,39 @@ class WeakMap : public v8::Object {
private:
WeakMap();
};
+
+struct PropertyDescriptor {
+ bool enumerable : 1;
+ bool has_enumerable : 1;
+ bool configurable : 1;
+ bool has_configurable : 1;
+ bool writable : 1;
+ bool has_writable : 1;
+ v8::Local<v8::Value> value;
+ v8::Local<v8::Value> get;
+ v8::Local<v8::Value> set;
+};
+
+class PropertyIterator {
+ public:
+ static std::unique_ptr<PropertyIterator> Create(v8::Local<v8::Object> object);
+
+ virtual ~PropertyIterator() = default;
+
+ virtual bool Done() const = 0;
+ virtual void Advance() = 0;
+
+ virtual v8::Local<v8::Name> name() const = 0;
+
+ virtual bool is_native_accessor() = 0;
+ virtual bool has_native_getter() = 0;
+ virtual bool has_native_setter() = 0;
+ virtual Maybe<PropertyAttribute> attributes() = 0;
+ virtual Maybe<PropertyDescriptor> descriptor() = 0;
+
+ virtual bool is_own() = 0;
+ virtual bool is_array_index() = 0;
+};
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
new file mode 100644
index 0000000000..1bef58192c
--- /dev/null
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -0,0 +1,213 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-property-iterator.h"
+
+#include "src/api-inl.h"
+#include "src/base/flags.h"
+#include "src/keys.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/property-descriptor.h"
+#include "src/property-details.h"
+
+namespace v8 {
+
+std::unique_ptr<debug::PropertyIterator> debug::PropertyIterator::Create(
+ v8::Local<v8::Object> v8_object) {
+ internal::Isolate* isolate =
+ reinterpret_cast<internal::Isolate*>(v8_object->GetIsolate());
+ return std::unique_ptr<debug::PropertyIterator>(
+ new internal::DebugPropertyIterator(isolate,
+ Utils::OpenHandle(*v8_object)));
+}
+
+namespace internal {
+
+DebugPropertyIterator::DebugPropertyIterator(Isolate* isolate,
+ Handle<JSReceiver> receiver)
+ : isolate_(isolate),
+ prototype_iterator_(isolate, receiver, kStartAtReceiver,
+ PrototypeIterator::END_AT_NULL) {
+ if (receiver->IsJSProxy()) {
+ is_own_ = false;
+ prototype_iterator_.AdvanceIgnoringProxies();
+ }
+ if (prototype_iterator_.IsAtEnd()) return;
+ FillKeysForCurrentPrototypeAndStage();
+ if (should_move_to_next_stage()) Advance();
+}
+
+bool DebugPropertyIterator::Done() const {
+ return prototype_iterator_.IsAtEnd();
+}
+
+void DebugPropertyIterator::Advance() {
+ ++current_key_index_;
+ calculated_native_accessor_flags_ = false;
+ while (should_move_to_next_stage()) {
+ switch (stage_) {
+ case Stage::kExoticIndices:
+ stage_ = Stage::kEnumerableStrings;
+ break;
+ case Stage::kEnumerableStrings:
+ stage_ = Stage::kAllProperties;
+ break;
+ case Stage::kAllProperties:
+ stage_ = kExoticIndices;
+ is_own_ = false;
+ prototype_iterator_.AdvanceIgnoringProxies();
+ break;
+ }
+ FillKeysForCurrentPrototypeAndStage();
+ }
+}
+
+bool DebugPropertyIterator::is_native_accessor() {
+ if (stage_ == kExoticIndices) return false;
+ CalculateNativeAccessorFlags();
+ return native_accessor_flags_;
+}
+
+bool DebugPropertyIterator::has_native_getter() {
+ if (stage_ == kExoticIndices) return false;
+ CalculateNativeAccessorFlags();
+ return native_accessor_flags_ &
+ static_cast<int>(debug::NativeAccessorType::HasGetter);
+}
+
+bool DebugPropertyIterator::has_native_setter() {
+ if (stage_ == kExoticIndices) return false;
+ CalculateNativeAccessorFlags();
+ return native_accessor_flags_ &
+ static_cast<int>(debug::NativeAccessorType::HasSetter);
+}
+
+Handle<Name> DebugPropertyIterator::raw_name() const {
+ DCHECK(!Done());
+ if (stage_ == kExoticIndices) {
+ return isolate_->factory()->Uint32ToString(current_key_index_);
+ } else {
+ return Handle<Name>::cast(
+ FixedArray::get(*keys_, current_key_index_, isolate_));
+ }
+}
+
+v8::Local<v8::Name> DebugPropertyIterator::name() const {
+ return Utils::ToLocal(raw_name());
+}
+
+v8::Maybe<v8::PropertyAttribute> DebugPropertyIterator::attributes() {
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+ auto result = JSReceiver::GetPropertyAttributes(receiver, raw_name());
+ if (result.IsNothing()) return Nothing<v8::PropertyAttribute>();
+ DCHECK(result.FromJust() != ABSENT);
+ return Just(static_cast<v8::PropertyAttribute>(result.FromJust()));
+}
+
+v8::Maybe<v8::debug::PropertyDescriptor> DebugPropertyIterator::descriptor() {
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, receiver, raw_name(), &descriptor);
+ if (did_get_descriptor.IsNothing()) {
+ return Nothing<v8::debug::PropertyDescriptor>();
+ }
+ DCHECK(did_get_descriptor.FromJust());
+ return Just(v8::debug::PropertyDescriptor{
+ descriptor.enumerable(), descriptor.has_enumerable(),
+ descriptor.configurable(), descriptor.has_configurable(),
+ descriptor.writable(), descriptor.has_writable(),
+ descriptor.has_value() ? Utils::ToLocal(descriptor.value())
+ : v8::Local<v8::Value>(),
+ descriptor.has_get() ? Utils::ToLocal(descriptor.get())
+ : v8::Local<v8::Value>(),
+ descriptor.has_set() ? Utils::ToLocal(descriptor.set())
+ : v8::Local<v8::Value>(),
+ });
+}
+
+bool DebugPropertyIterator::is_own() { return is_own_; }
+
+bool DebugPropertyIterator::is_array_index() {
+ if (stage_ == kExoticIndices) return true;
+ uint32_t index = 0;
+ return raw_name()->AsArrayIndex(&index);
+}
+
+void DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
+ current_key_index_ = 0;
+ exotic_length_ = 0;
+ keys_ = Handle<FixedArray>::null();
+ if (prototype_iterator_.IsAtEnd()) return;
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+ bool has_exotic_indices = receiver->IsJSTypedArray();
+ if (stage_ == kExoticIndices) {
+ if (!has_exotic_indices) return;
+ exotic_length_ = static_cast<uint32_t>(
+ Handle<JSTypedArray>::cast(receiver)->length_value());
+ return;
+ }
+ bool skip_indices = has_exotic_indices;
+ PropertyFilter filter =
+ stage_ == kEnumerableStrings ? ENUMERABLE_STRINGS : ALL_PROPERTIES;
+ if (!KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+ GetKeysConversion::kConvertToString, false,
+ skip_indices)
+ .ToHandle(&keys_)) {
+ keys_ = Handle<FixedArray>::null();
+ }
+}
+
+bool DebugPropertyIterator::should_move_to_next_stage() const {
+ if (prototype_iterator_.IsAtEnd()) return false;
+ if (stage_ == kExoticIndices) return current_key_index_ >= exotic_length_;
+ return keys_.is_null() ||
+ current_key_index_ >= static_cast<uint32_t>(keys_->length());
+}
+
+namespace {
+base::Flags<debug::NativeAccessorType, int> GetNativeAccessorDescriptorInternal(
+ Handle<JSReceiver> object, Handle<Name> name) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return debug::NativeAccessorType::None;
+ LookupIterator it =
+ LookupIterator(object->GetIsolate(), object, name, LookupIterator::OWN);
+ if (!it.IsFound()) return debug::NativeAccessorType::None;
+ if (it.state() != LookupIterator::ACCESSOR) {
+ return debug::NativeAccessorType::None;
+ }
+ Handle<Object> structure = it.GetAccessors();
+ if (!structure->IsAccessorInfo()) return debug::NativeAccessorType::None;
+ auto isolate = object->GetIsolate();
+ base::Flags<debug::NativeAccessorType, int> result;
+#define IS_BUILTIN_ACESSOR(_, name, ...) \
+ if (*structure == *isolate->factory()->name##_accessor()) \
+ return debug::NativeAccessorType::None;
+ ACCESSOR_INFO_LIST_GENERATOR(IS_BUILTIN_ACESSOR, /* not used */)
+#undef IS_BUILTIN_ACESSOR
+ Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
+ if (accessor_info->getter() != Object()) {
+ result |= debug::NativeAccessorType::HasGetter;
+ }
+ if (accessor_info->setter() != Object()) {
+ result |= debug::NativeAccessorType::HasSetter;
+ }
+ return result;
+}
+} // anonymous namespace
+
+void DebugPropertyIterator::CalculateNativeAccessorFlags() {
+ if (calculated_native_accessor_flags_) return;
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+ native_accessor_flags_ =
+ GetNativeAccessorDescriptorInternal(receiver, raw_name());
+ calculated_native_accessor_flags_ = true;
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
new file mode 100644
index 0000000000..6a527f5dc7
--- /dev/null
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
+#define V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
+
+#include "src/debug/debug-interface.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/prototype.h"
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class JSReceiver;
+
+class DebugPropertyIterator final : public debug::PropertyIterator {
+ public:
+ DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver);
+ ~DebugPropertyIterator() override = default;
+
+ bool Done() const override;
+ void Advance() override;
+
+ v8::Local<v8::Name> name() const override;
+ bool is_native_accessor() override;
+ bool has_native_getter() override;
+ bool has_native_setter() override;
+ v8::Maybe<v8::PropertyAttribute> attributes() override;
+ v8::Maybe<v8::debug::PropertyDescriptor> descriptor() override;
+
+ bool is_own() override;
+ bool is_array_index() override;
+
+ private:
+ void FillKeysForCurrentPrototypeAndStage();
+ bool should_move_to_next_stage() const;
+ void CalculateNativeAccessorFlags();
+ Handle<Name> raw_name() const;
+
+ Isolate* isolate_;
+ PrototypeIterator prototype_iterator_;
+ enum Stage { kExoticIndices = 0, kEnumerableStrings = 1, kAllProperties = 2 };
+ Stage stage_ = kExoticIndices;
+
+ uint32_t current_key_index_ = 0;
+ Handle<FixedArray> keys_;
+ uint32_t exotic_length_ = 0;
+
+ bool calculated_native_accessor_flags_ = false;
+ int native_accessor_flags_ = 0;
+ bool is_own_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(DebugPropertyIterator);
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 01cd017eb2..65794e85fb 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -14,6 +14,7 @@
#include "src/isolate-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module.h"
+#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
@@ -46,7 +47,7 @@ Handle<Object> ScopeIterator::GetFunctionDebugName() const {
if (!context_->IsNativeContext()) {
DisallowHeapAllocation no_gc;
- ScopeInfo* closure_info = context_->closure_context()->scope_info();
+ ScopeInfo closure_info = context_->closure_context()->scope_info();
Handle<String> debug_name(closure_info->FunctionDebugName(), isolate_);
if (debug_name->length() > 0) return debug_name;
}
@@ -54,13 +55,12 @@ Handle<Object> ScopeIterator::GetFunctionDebugName() const {
}
ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
- : isolate_(isolate),
- context_(function->context(), isolate),
- script_(Script::cast(function->shared()->script()), isolate) {
+ : isolate_(isolate), context_(function->context(), isolate) {
if (!function->shared()->IsSubjectToDebugging()) {
context_ = Handle<Context>();
return;
}
+ script_ = handle(Script::cast(function->shared()->script()), isolate);
UnwrapEvaluationContext();
}
@@ -71,10 +71,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate,
function_(generator->function(), isolate),
context_(generator->context(), isolate),
script_(Script::cast(function_->shared()->script()), isolate) {
- if (!function_->shared()->IsSubjectToDebugging()) {
- context_ = Handle<Context>();
- return;
- }
+ CHECK(function_->shared()->IsSubjectToDebugging());
TryParseAndRetrieveScopes(DEFAULT);
}
@@ -98,6 +95,16 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
return;
}
+ // Class fields initializer functions don't have any scope
+ // information. We short circuit the parsing of the class literal
+ // and return an empty context here.
+ if (IsClassMembersInitializerFunction(shared_info->kind())) {
+ current_scope_ = closure_scope_ = nullptr;
+ context_ = Handle<Context>();
+ function_ = Handle<JSFunction>();
+ return;
+ }
+
DCHECK_NE(IGNORE_NESTED_SCOPES, option);
bool ignore_nested_scopes = false;
if (shared_info->HasBreakInfo() && frame_inspector_ != nullptr) {
@@ -177,13 +184,13 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
void ScopeIterator::UnwrapEvaluationContext() {
if (!context_->IsDebugEvaluateContext()) return;
- Context* current = *context_;
+ Context current = *context_;
do {
- Object* wrapped = current->get(Context::WRAPPED_CONTEXT_INDEX);
+ Object wrapped = current->get(Context::WRAPPED_CONTEXT_INDEX);
if (wrapped->IsContext()) {
current = Context::cast(wrapped);
} else {
- DCHECK_NOT_NULL(current->previous());
+ DCHECK(!current->previous().is_null());
current = current->previous();
}
} while (current->IsDebugEvaluateContext());
@@ -277,7 +284,7 @@ void ScopeIterator::Next() {
DCHECK_NOT_NULL(current_scope_);
do {
if (current_scope_->NeedsContext()) {
- DCHECK_NOT_NULL(context_->previous());
+ DCHECK(!context_->previous().is_null());
context_ = handle(context_->previous(), isolate_);
}
DCHECK_IMPLIES(InInnerScope(), current_scope_->outer_scope() != nullptr);
@@ -568,7 +575,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
int index;
Handle<String> name;
{
- String* raw_name;
+ String raw_name;
scope_info->ModuleVariable(i, &raw_name, &index);
CHECK(!ScopeInfo::VariableIsSynthetic(raw_name));
name = handle(raw_name, isolate_);
@@ -599,7 +606,12 @@ bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
for (Variable* var : *current_scope_->locals()) {
- if (!var->is_this() && ScopeInfo::VariableIsSynthetic(*var->name())) {
+ if (var->is_this()) {
+ // Only collect "this" for DebugEvaluate. The debugger will manually add
+ // "this" in a different way, and if we'd add it here as well, it shows up
+ // twice.
+ if (mode == Mode::ALL) continue;
+ } else if (ScopeInfo::VariableIsSynthetic(*var->name())) {
continue;
}
@@ -612,8 +624,6 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
case VariableLocation::UNALLOCATED:
if (!var->is_this()) continue;
- // No idea why we only add it sometimes.
- if (mode == Mode::ALL) continue;
// No idea why this diverges...
value = frame_inspector_->GetReceiver();
break;
@@ -625,7 +635,7 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
if (var->is_this()) {
value = handle(generator_->receiver(), isolate_);
} else {
- FixedArray* parameters_and_registers =
+ FixedArray parameters_and_registers =
generator_->parameters_and_registers();
DCHECK_LT(index, parameters_and_registers->length());
value = handle(parameters_and_registers->get(index), isolate_);
@@ -647,7 +657,7 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
if (frame_inspector_ == nullptr) {
// Get the variable from the suspended generator.
DCHECK(!generator_.is_null());
- FixedArray* parameters_and_registers =
+ FixedArray parameters_and_registers =
generator_->parameters_and_registers();
int parameter_count =
function_->shared()->scope_info()->ParameterCount();
@@ -675,8 +685,6 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
case VariableLocation::CONTEXT:
if (mode == Mode::STACK) continue;
- // TODO(verwaest): Why don't we want to show it if it's there?...
- if (var->is_this()) continue;
DCHECK(var->IsContextSlot());
value = handle(context_->get(index), isolate_);
// Reflect variables under TDZ as undeclared in scope object.
@@ -750,7 +758,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
DCHECK(!context_->IsNativeContext());
DCHECK(!context_->IsWithContext());
if (!context_->scope_info()->CallsSloppyEval()) return;
- if (context_->extension_object() == nullptr) return;
+ if (context_->extension_object().is_null()) return;
Handle<JSObject> extension(context_->extension_object(), isolate_);
Handle<FixedArray> keys =
KeyAccumulator::GetKeys(extension, KeyCollectionMode::kOwnOnly,
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 5f2d657194..99d9be380d 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -69,7 +69,7 @@ int DebugStackTraceIterator::GetContextId() const {
DCHECK(!Done());
Handle<Object> context = frame_inspector_->GetContext();
if (context->IsContext()) {
- Object* value =
+ Object value =
Context::cast(*context)->native_context()->debug_context_id();
if (value->IsSmi()) return Smi::ToInt(value);
}
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index 794087115f..1f2eb7b44e 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -24,7 +24,8 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
Script::Iterator scripts(isolate);
- while (Script* script = scripts.Next()) {
+ for (Script script = scripts.Next(); !script.is_null();
+ script = scripts.Next()) {
if (!script->IsUserJavaScript()) {
continue;
}
@@ -37,15 +38,16 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
// TODO(franzih): Sort the vectors by script first instead of iterating
// the list multiple times.
for (int i = 0; i < list->Length(); i++) {
- FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo* info = vector->shared_function_info();
+ FeedbackVector vector = FeedbackVector::cast(list->Get(i));
+ SharedFunctionInfo info = vector->shared_function_info();
DCHECK(info->IsSubjectToDebugging());
// Match vectors with script.
if (script != info->script()) {
continue;
}
- if (info->feedback_metadata()->is_empty() ||
+ if (!info->HasFeedbackMetadata() ||
+ info->feedback_metadata()->is_empty() ||
!info->feedback_metadata()->HasTypeProfileSlot()) {
continue;
}
@@ -86,8 +88,8 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
isolate->factory()->feedback_vectors_for_profiling_tools());
for (int i = 0; i < list->Length(); i++) {
- FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo* info = vector->shared_function_info();
+ FeedbackVector vector = FeedbackVector::cast(list->Get(i));
+ SharedFunctionInfo info = vector->shared_function_info();
DCHECK(info->IsSubjectToDebugging());
if (info->feedback_metadata()->HasTypeProfileSlot()) {
FeedbackSlot slot = vector->GetTypeProfileSlot();
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
index 0d2e88a7d5..3bdcfc62ba 100644
--- a/deps/v8/src/debug/debug-type-profile.h
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -8,6 +8,7 @@
#include <vector>
#include "src/debug/debug-interface.h"
+#include "src/handles.h"
#include "src/objects.h"
namespace v8 {
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 8a5a9b6eb0..2789513514 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -13,9 +13,9 @@
#include "src/base/platform/mutex.h"
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
@@ -27,12 +27,12 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/log.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/slots.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -49,7 +49,7 @@ class Debug::TemporaryObjectsTracker : public HeapObjectAllocationTracker {
void MoveEvent(Address from, Address to, int) override {
if (from == to) return;
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
auto it = objects_.find(from);
if (it == objects_.end()) {
// If temporary object was collected we can get MoveEvent which moves
@@ -131,14 +131,13 @@ void BreakLocation::AllAtCurrentStatement(
}
}
-JSGeneratorObject* BreakLocation::GetGeneratorObjectForSuspendedFrame(
+JSGeneratorObject BreakLocation::GetGeneratorObjectForSuspendedFrame(
JavaScriptFrame* frame) const {
DCHECK(IsSuspend());
DCHECK_GE(generator_obj_reg_index_, 0);
- Object* generator_obj =
- InterpretedFrame::cast(frame)->ReadInterpreterRegister(
- generator_obj_reg_index_);
+ Object generator_obj = InterpretedFrame::cast(frame)->ReadInterpreterRegister(
+ generator_obj_reg_index_);
return JSGeneratorObject::cast(generator_obj);
}
@@ -247,7 +246,7 @@ void BreakIterator::Next() {
}
DebugBreakType BreakIterator::GetDebugBreakType() {
- BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
+ BytecodeArray bytecode_array = debug_info_->OriginalBytecodeArray();
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
@@ -292,8 +291,8 @@ void BreakIterator::ClearDebugBreak() {
DebugBreakType debug_break_type = GetDebugBreakType();
if (debug_break_type == DEBUGGER_STATEMENT) return;
DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
- BytecodeArray* bytecode_array = debug_info_->DebugBytecodeArray();
- BytecodeArray* original = debug_info_->OriginalBytecodeArray();
+ BytecodeArray bytecode_array = debug_info_->DebugBytecodeArray();
+ BytecodeArray original = debug_info_->OriginalBytecodeArray();
bytecode_array->set(code_offset(), original->get(code_offset()));
}
@@ -308,7 +307,7 @@ BreakLocation BreakIterator::GetBreakLocation() {
// index that holds the generator object by reading it directly off the
// bytecode array, and we'll read the actual generator object off the
// interpreter stack frame in GetGeneratorObjectForSuspendedFrame.
- BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
+ BytecodeArray bytecode_array = debug_info_->OriginalBytecodeArray();
interpreter::BytecodeArrayAccessor accessor(
handle(bytecode_array, isolate()), code_offset());
@@ -380,24 +379,25 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
void Debug::Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kDebug, nullptr, &thread_local_.return_value_);
v->VisitRootPointer(Root::kDebug, nullptr,
- &thread_local_.suspended_generator_);
+ FullObjectSlot(&thread_local_.return_value_));
v->VisitRootPointer(Root::kDebug, nullptr,
- &thread_local_.ignore_step_into_function_);
+ FullObjectSlot(&thread_local_.suspended_generator_));
+ v->VisitRootPointer(
+ Root::kDebug, nullptr,
+ FullObjectSlot(&thread_local_.ignore_step_into_function_));
}
-DebugInfoListNode::DebugInfoListNode(Isolate* isolate, DebugInfo* debug_info)
+DebugInfoListNode::DebugInfoListNode(Isolate* isolate, DebugInfo debug_info)
: next_(nullptr) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = isolate->global_handles();
debug_info_ = global_handles->Create(debug_info).location();
}
-
DebugInfoListNode::~DebugInfoListNode() {
if (debug_info_ == nullptr) return;
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_));
+ GlobalHandles::Destroy(debug_info_);
debug_info_ = nullptr;
}
@@ -682,10 +682,10 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
debug_info->SetBreakAtEntry();
} else {
if (!debug_info->HasInstrumentedBytecodeArray()) return;
- FixedArray* break_points = debug_info->break_points();
+ FixedArray break_points = debug_info->break_points();
for (int i = 0; i < break_points->length(); i++) {
if (break_points->get(i)->IsUndefined(isolate_)) continue;
- BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
+ BreakPointInfo info = BreakPointInfo::cast(break_points->get(i));
if (info->GetBreakPointCount(isolate_) == 0) continue;
DCHECK(debug_info->HasInstrumentedBytecodeArray());
BreakIterator it(debug_info);
@@ -892,7 +892,7 @@ void Debug::PrepareStepOnThrow() {
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
- std::vector<SharedFunctionInfo*> infos;
+ std::vector<SharedFunctionInfo> infos;
frame->GetFunctions(&infos);
current_frame_count -= infos.size();
it.Advance();
@@ -1090,7 +1090,7 @@ Handle<Object> Debug::GetSourceBreakLocations(
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); ++i) {
if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
+ BreakPointInfo break_point_info =
BreakPointInfo::cast(debug_info->break_points()->get(i));
int break_points = break_point_info->GetBreakPointCount(isolate);
if (break_points == 0) continue;
@@ -1135,7 +1135,7 @@ void Debug::ClearOneShot() {
class RedirectActiveFunctions : public ThreadVisitor {
public:
- explicit RedirectActiveFunctions(SharedFunctionInfo* shared)
+ explicit RedirectActiveFunctions(SharedFunctionInfo shared)
: shared_(shared) {
DCHECK(shared->HasBytecodeArray());
}
@@ -1143,18 +1143,18 @@ class RedirectActiveFunctions : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
- JSFunction* function = frame->function();
+ JSFunction function = frame->function();
if (!frame->is_interpreted()) continue;
if (function->shared() != shared_) continue;
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(frame);
- BytecodeArray* debug_copy = shared_->GetDebugInfo()->DebugBytecodeArray();
+ BytecodeArray debug_copy = shared_->GetDebugInfo()->DebugBytecodeArray();
interpreted_frame->PatchBytecodeArray(debug_copy);
}
}
private:
- SharedFunctionInfo* shared_;
+ SharedFunctionInfo shared_;
DisallowHeapAllocation no_gc_;
};
@@ -1169,12 +1169,14 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
bool found_something = false;
Code::OptimizedCodeIterator iterator(isolate_);
- while (Code* code = iterator.Next()) {
+ do {
+ Code code = iterator.Next();
+ if (code.is_null()) break;
if (code->Inlines(*shared)) {
code->set_marked_for_deoptimization(true);
found_something = true;
}
- }
+ } while (true);
if (found_something) {
// Only go through with the deoptimization if something was found.
@@ -1200,6 +1202,7 @@ void Debug::PrepareFunctionForDebugExecution(
handle(shared->GetBytecodeArray(), isolate_);
Handle<BytecodeArray> debug_bytecode_array =
isolate_->factory()->CopyBytecodeArray(original_bytecode_array);
+ debug_info->set_debug_bytecode_array(*debug_bytecode_array);
shared->SetDebugBytecodeArray(*debug_bytecode_array);
maybe_original_bytecode_array = original_bytecode_array;
}
@@ -1246,13 +1249,14 @@ void Debug::InstallDebugBreakTrampoline() {
std::vector<Handle<JSFunction>> needs_compile;
{
HeapIterator iterator(isolate_->heap());
- while (HeapObject* obj = iterator.next()) {
+ for (HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (needs_to_clear_ic && obj->IsFeedbackVector()) {
FeedbackVector::cast(obj)->ClearSlots(isolate_);
continue;
} else if (obj->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(obj);
- SharedFunctionInfo* shared = fun->shared();
+ JSFunction fun = JSFunction::cast(obj);
+ SharedFunctionInfo shared = fun->shared();
if (!shared->HasDebugInfo()) continue;
if (!shared->GetDebugInfo()->CanBreakAtEntry()) continue;
if (!fun->is_compiled()) {
@@ -1266,7 +1270,9 @@ void Debug::InstallDebugBreakTrampoline() {
// By overwriting the function code with DebugBreakTrampoline, which tailcalls
// to shared code, we bypass CompileLazy. Perform CompileLazy here instead.
for (Handle<JSFunction> fun : needs_compile) {
- Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION);
+ IsCompiledScope is_compiled_scope;
+ Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION, &is_compiled_scope);
+ DCHECK(is_compiled_scope.is_compiled());
fun->set_code(*trampoline);
}
}
@@ -1314,8 +1320,9 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
while (true) {
HandleScope scope(isolate_);
std::vector<Handle<SharedFunctionInfo>> candidates;
+ std::vector<IsCompiledScope> compiled_scopes;
SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
- for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+ for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
info = iterator.Next()) {
if (info->EndPosition() < start_position ||
info->StartPosition() >= end_position) {
@@ -1330,13 +1337,17 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
for (const auto& candidate : candidates) {
// Code that cannot be compiled lazily are internal and not debuggable.
DCHECK(candidate->allows_lazy_compilation());
- if (!candidate->is_compiled()) {
- if (!Compiler::Compile(candidate, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope(candidate->is_compiled_scope());
+ if (!is_compiled_scope.is_compiled()) {
+ if (!Compiler::Compile(candidate, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
return false;
} else {
was_compiled = true;
}
}
+ DCHECK(is_compiled_scope.is_compiled());
+ compiled_scopes.push_back(is_compiled_scope);
if (!EnsureBreakInfo(candidate)) return false;
PrepareFunctionForDebugExecution(candidate);
}
@@ -1356,12 +1367,11 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
- : current_candidate_(nullptr),
- current_candidate_closure_(nullptr),
- current_start_position_(kNoSourcePosition),
+ : current_start_position_(kNoSourcePosition),
target_position_(target_position) {}
- void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = nullptr) {
+ void NewCandidate(SharedFunctionInfo shared,
+ JSFunction closure = JSFunction()) {
if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
if (start_position == kNoSourcePosition) {
@@ -1371,11 +1381,11 @@ class SharedFunctionInfoFinder {
if (start_position > target_position_) return;
if (target_position_ > shared->EndPosition()) return;
- if (current_candidate_ != nullptr) {
+ if (!current_candidate_.is_null()) {
if (current_start_position_ == start_position &&
shared->EndPosition() == current_candidate_->EndPosition()) {
// If we already have a matching closure, do not throw it away.
- if (current_candidate_closure_ != nullptr && closure == nullptr) return;
+ if (!current_candidate_closure_.is_null() && closure.is_null()) return;
// If a top-level function contains only one function
// declaration the source for the top-level and the function
// is the same. In that case prefer the non top-level function.
@@ -1391,13 +1401,13 @@ class SharedFunctionInfoFinder {
current_candidate_closure_ = closure;
}
- SharedFunctionInfo* Result() { return current_candidate_; }
+ SharedFunctionInfo Result() { return current_candidate_; }
- JSFunction* ResultClosure() { return current_candidate_closure_; }
+ JSFunction ResultClosure() { return current_candidate_closure_; }
private:
- SharedFunctionInfo* current_candidate_;
- JSFunction* current_candidate_closure_;
+ SharedFunctionInfo current_candidate_;
+ JSFunction current_candidate_closure_;
int current_start_position_;
int target_position_;
DisallowHeapAllocation no_gc_;
@@ -1418,18 +1428,20 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// If there is no shared function info for this script at all, there is
// no point in looking for it by walking the heap.
- SharedFunctionInfo* shared;
+ SharedFunctionInfo shared;
+ IsCompiledScope is_compiled_scope;
{
SharedFunctionInfoFinder finder(position);
SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
- for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+ for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
info = iterator.Next()) {
finder.NewCandidate(info);
}
shared = finder.Result();
- if (shared == nullptr) break;
+ if (shared.is_null()) break;
// We found it if it's already compiled.
- if (shared->is_compiled()) {
+ is_compiled_scope = shared->is_compiled_scope();
+ if (is_compiled_scope.is_compiled()) {
Handle<SharedFunctionInfo> shared_handle(shared, isolate_);
// If the iteration count is larger than 1, we had to compile the outer
// function in order to create this shared function info. So there can
@@ -1446,8 +1458,10 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
HandleScope scope(isolate_);
// Code that cannot be compiled lazily are internal and not debuggable.
DCHECK(shared->allows_lazy_compilation());
- if (!Compiler::Compile(handle(shared, isolate_), Compiler::CLEAR_EXCEPTION))
+ if (!Compiler::Compile(handle(shared, isolate_), Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
break;
+ }
}
return isolate_->factory()->undefined_value();
}
@@ -1460,14 +1474,12 @@ bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
if (!shared->IsSubjectToDebugging() && !CanBreakAtEntry(shared)) {
return false;
}
- if (!shared->is_compiled() &&
- !Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope = shared->is_compiled_scope();
+ if (!is_compiled_scope.is_compiled() &&
+ !Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
return false;
}
- if (shared->GetCode() ==
- isolate_->builtins()->builtin(Builtins::kDeserializeLazy)) {
- Snapshot::EnsureBuiltinIsDeserialized(isolate_, shared);
- }
CreateBreakInfo(shared);
return true;
}
@@ -1635,8 +1647,8 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
int length = 0;
{
Script::Iterator iterator(isolate_);
- Script* script;
- while ((script = iterator.Next()) != nullptr) {
+ for (Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
if (script->HasValidSource()) results->set(length++, script);
}
}
@@ -1711,7 +1723,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- JSObject::SetProperty(isolate_, jspromise, key, key, LanguageMode::kStrict)
+ Object::SetProperty(isolate_, jspromise, key, key, LanguageMode::kStrict)
.Assert();
// Check whether the promise reject is considered an uncaught exception.
uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
@@ -1768,7 +1780,7 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
int inspector_break_points_count = 0;
// This array contains breakpoints installed using JS debug API.
for (int i = 0; i < break_points_hit->length(); ++i) {
- BreakPoint* break_point = BreakPoint::cast(break_points_hit->get(i));
+ BreakPoint break_point = BreakPoint::cast(break_points_hit->get(i));
inspector_break_points_hit.push_back(break_point->id());
++inspector_break_points_count;
}
@@ -1820,6 +1832,7 @@ bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
bool Debug::AllFramesOnStackAreBlackboxed() {
HandleScope scope(isolate_);
for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ if (!it.is_javascript()) continue;
if (!IsFrameBlackboxed(it.javascript_frame())) return false;
}
return true;
@@ -1883,7 +1896,7 @@ int Debug::CurrentFrameCount() {
int counter = 0;
while (!it.done()) {
if (it.frame()->is_optimized()) {
- std::vector<SharedFunctionInfo*> infos;
+ std::vector<SharedFunctionInfo> infos;
OptimizedFrame::cast(it.frame())->GetFunctions(&infos);
counter += infos.size();
} else {
@@ -1913,9 +1926,7 @@ void Debug::UpdateState() {
Unload();
}
is_active_ = is_active;
- if (is_active && isolate_->IsPromiseHookProtectorIntact()) {
- isolate_->InvalidatePromiseHookProtector();
- }
+ isolate_->PromiseHookStateUpdated();
}
void Debug::UpdateHookOnFunctionCall() {
@@ -1941,8 +1952,8 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
{ JavaScriptFrameIterator it(isolate_);
DCHECK(!it.done());
- Object* fun = it.frame()->function();
- if (fun && fun->IsJSFunction()) {
+ Object fun = it.frame()->function();
+ if (fun->IsJSFunction()) {
HandleScope scope(isolate_);
Handle<JSFunction> function(JSFunction::cast(fun), isolate_);
// Don't stop in builtin and blackboxed functions.
@@ -1991,7 +2002,7 @@ void Debug::PrintBreakLocation() {
int line_start = line == 0 ? 0 : Smi::ToInt(line_ends->get(line - 1)) + 1;
int line_end = Smi::ToInt(line_ends->get(line));
DisallowHeapAllocation no_gc;
- String::FlatContent content = source->GetFlatContent();
+ String::FlatContent content = source->GetFlatContent(no_gc);
if (content.IsOneByte()) {
PrintF("[debug] %.*s\n", line_end - line_start,
content.ToOneByteVector().start() + line_start);
@@ -2137,10 +2148,13 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
Handle<Object> receiver) {
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
DisallowJavascriptExecution no_js(isolate_);
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
if (!function->is_compiled() &&
- !Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+ !Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
return false;
}
+ DCHECK(is_compiled_scope.is_compiled());
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
DebugInfo::SideEffectState side_effect_state =
@@ -2162,10 +2176,6 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
// If function has bytecode array then prepare function for debug
// execution to perform runtime side effect checks.
DCHECK(shared->is_compiled());
- if (shared->GetCode() ==
- isolate_->builtins()->builtin(Builtins::kDeserializeLazy)) {
- Snapshot::EnsureBuiltinIsDeserialized(isolate_, shared);
- }
PrepareFunctionForDebugExecution(shared);
ApplySideEffectChecks(debug_info);
return true;
@@ -2197,7 +2207,7 @@ bool Debug::PerformSideEffectCheckForCallback(
if (!callback_info.is_null()) {
if (callback_info->IsAccessorInfo()) {
// List of whitelisted internal accessors can be found in accessors.h.
- AccessorInfo* info = AccessorInfo::cast(*callback_info);
+ AccessorInfo info = AccessorInfo::cast(*callback_info);
DCHECK_NE(kNotAccessor, accessor_kind);
switch (accessor_kind == kSetter ? info->setter_side_effect_type()
: info->getter_side_effect_type()) {
@@ -2221,13 +2231,13 @@ bool Debug::PerformSideEffectCheckForCallback(
PrintF("' may cause side effect.\n");
}
} else if (callback_info->IsInterceptorInfo()) {
- InterceptorInfo* info = InterceptorInfo::cast(*callback_info);
+ InterceptorInfo info = InterceptorInfo::cast(*callback_info);
if (info->has_no_side_effect()) return true;
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] API Interceptor may cause side effect.\n");
}
} else if (callback_info->IsCallHandlerInfo()) {
- CallHandlerInfo* info = CallHandlerInfo::cast(*callback_info);
+ CallHandlerInfo info = CallHandlerInfo::cast(*callback_info);
if (info->IsSideEffectFreeCallHandlerInfo()) return true;
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n");
@@ -2245,8 +2255,8 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
using interpreter::Bytecode;
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
- SharedFunctionInfo* shared = frame->function()->shared();
- BytecodeArray* bytecode_array = shared->GetBytecodeArray();
+ SharedFunctionInfo shared = frame->function()->shared();
+ BytecodeArray bytecode_array = shared->GetBytecodeArray();
int offset = frame->GetBytecodeOffset();
interpreter::BytecodeArrayAccessor bytecode_accessor(
handle(bytecode_array, isolate_), offset);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 3b6748851b..215ec1d8ac 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -92,7 +92,7 @@ class BreakLocation {
debug::BreakLocationType type() const;
- JSGeneratorObject* GetGeneratorObjectForSuspendedFrame(
+ JSGeneratorObject GetGeneratorObjectForSuspendedFrame(
JavaScriptFrame* frame) const;
private:
@@ -170,7 +170,7 @@ class BreakIterator {
// weak handles to avoid a debug info object to keep a function alive.
class DebugInfoListNode {
public:
- DebugInfoListNode(Isolate* isolate, DebugInfo* debug_info);
+ DebugInfoListNode(Isolate* isolate, DebugInfo debug_info);
~DebugInfoListNode();
DebugInfoListNode* next() { return next_; }
@@ -179,7 +179,7 @@ class DebugInfoListNode {
private:
// Global (weak) handle to the debug info object.
- DebugInfo** debug_info_;
+ Address* debug_info_;
// Next pointer for linked list.
DebugInfoListNode* next_;
@@ -349,8 +349,8 @@ class Debug {
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
Handle<Object> return_value_handle();
- Object* return_value() { return thread_local_.return_value_; }
- void set_return_value(Object* value) { thread_local_.return_value_ = value; }
+ Object return_value() { return thread_local_.return_value_; }
+ void set_return_value(Object value) { thread_local_.return_value_ = value; }
// Support for embedding into generated code.
Address is_active_address() {
@@ -368,6 +368,9 @@ class Debug {
Address restart_fp_address() {
return reinterpret_cast<Address>(&thread_local_.restart_fp_);
}
+ bool will_restart() const {
+ return thread_local_.restart_fp_ != kNullAddress;
+ }
StepAction last_step_action() { return thread_local_.last_step_action_; }
bool break_on_next_function_call() const {
@@ -506,7 +509,7 @@ class Debug {
// If set, next PrepareStepIn will ignore this function until stepped into
// another function, at which point this will be cleared.
- Object* ignore_step_into_function_;
+ Object ignore_step_into_function_;
// If set then we need to repeat StepOut action at return.
bool fast_forward_to_return_;
@@ -521,10 +524,10 @@ class Debug {
int target_frame_count_;
// Value of the accumulator at the point of entering the debugger.
- Object* return_value_;
+ Object return_value_;
// The suspended generator object to track when stepping.
- Object* suspended_generator_;
+ Object suspended_generator_;
// The new frame pointer to drop to when restarting a frame.
Address restart_fp_;
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 1e3ab38966..a4466ee9eb 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -16,8 +16,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
@@ -29,8 +27,6 @@ void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
}
void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
// Frame is being dropped:
// - Drop to the target frame specified by eax.
// - Look up current function on the frame.
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 371d1d5575..e76202e96b 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -14,7 +14,7 @@
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
@@ -809,12 +809,12 @@ class FunctionDataMap : public ThreadVisitor {
FunctionData{literal, should_restart});
}
- bool Lookup(SharedFunctionInfo* sfi, FunctionData** data) {
+ bool Lookup(SharedFunctionInfo sfi, FunctionData** data) {
int start_position = sfi->StartPosition();
if (!sfi->script()->IsScript() || start_position == -1) {
return false;
}
- Script* script = Script::cast(sfi->script());
+ Script script = Script::cast(sfi->script());
return Lookup(GetFuncId(script->id(), sfi), data);
}
@@ -826,22 +826,23 @@ class FunctionDataMap : public ThreadVisitor {
void Fill(Isolate* isolate, Address* restart_frame_fp) {
{
HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
- while (HeapObject* obj = iterator.next()) {
+ for (HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->shared = handle(sfi, isolate);
} else if (obj->IsJSFunction()) {
- JSFunction* js_function = JSFunction::cast(obj);
- SharedFunctionInfo* sfi = js_function->shared();
+ JSFunction js_function = JSFunction::cast(obj);
+ SharedFunctionInfo sfi = js_function->shared();
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->js_functions.emplace_back(js_function, isolate);
} else if (obj->IsJSGeneratorObject()) {
- JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
+ JSGeneratorObject gen = JSGeneratorObject::cast(obj);
if (gen->is_closed()) continue;
- SharedFunctionInfo* sfi = gen->function()->shared();
+ SharedFunctionInfo sfi = gen->function()->shared();
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->running_generators.emplace_back(gen, isolate);
@@ -900,7 +901,7 @@ class FunctionDataMap : public ThreadVisitor {
return FuncId(script_id, start_position);
}
- FuncId GetFuncId(int script_id, SharedFunctionInfo* sfi) {
+ FuncId GetFuncId(int script_id, SharedFunctionInfo sfi) {
DCHECK_EQ(script_id, Script::cast(sfi->script())->id());
int start_position = sfi->StartPosition();
DCHECK_NE(start_position, -1);
@@ -1129,18 +1130,19 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
start_position_to_unchanged_id[mapping.second->start_position()] =
mapping.second->function_literal_id();
- if (sfi->HasUncompiledDataWithPreParsedScope()) {
- sfi->ClearPreParsedScopeData();
+ if (sfi->HasUncompiledDataWithPreparseData()) {
+ sfi->ClearPreparseData();
}
for (auto& js_function : data->js_functions) {
- js_function->set_feedback_cell(*isolate->factory()->many_closures_cell());
+ js_function->set_raw_feedback_cell(
+ *isolate->factory()->many_closures_cell());
if (!js_function->is_compiled()) continue;
JSFunction::EnsureFeedbackVector(js_function);
}
if (!sfi->HasBytecodeArray()) continue;
- FixedArray* constants = sfi->GetBytecodeArray()->constant_pool();
+ FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
for (int i = 0; i < constants->length(); ++i) {
if (!constants->get(i)->IsSharedFunctionInfo()) continue;
FunctionData* data = nullptr;
@@ -1174,18 +1176,20 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
js_function->set_shared(*new_sfi);
js_function->set_code(js_function->shared()->GetCode());
- js_function->set_feedback_cell(*isolate->factory()->many_closures_cell());
+ js_function->set_raw_feedback_cell(
+ *isolate->factory()->many_closures_cell());
if (!js_function->is_compiled()) continue;
JSFunction::EnsureFeedbackVector(js_function);
}
-
- if (!new_sfi->HasBytecodeArray()) continue;
- FixedArray* constants = new_sfi->GetBytecodeArray()->constant_pool();
+ }
+ SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
+ for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
+ if (!sfi->HasBytecodeArray()) continue;
+ FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
for (int i = 0; i < constants->length(); ++i) {
if (!constants->get(i)->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* inner_sfi =
+ SharedFunctionInfo inner_sfi =
SharedFunctionInfo::cast(constants->get(i));
-
// See if there is a mapping from this function's start position to a
// unchanged function's id.
auto unchanged_it =
@@ -1194,17 +1198,15 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
// Grab that function id from the new script's SFI list, which should have
// already been updated in in the unchanged pass.
- SharedFunctionInfo* old_unchanged_inner_sfi =
+ SharedFunctionInfo old_unchanged_inner_sfi =
SharedFunctionInfo::cast(new_script->shared_function_infos()
->Get(unchanged_it->second)
->GetHeapObject());
- // Now some sanity checks. Make sure that this inner_sfi is not the
- // unchanged SFI yet...
+ if (old_unchanged_inner_sfi == inner_sfi) continue;
DCHECK_NE(old_unchanged_inner_sfi, inner_sfi);
- // ... and that the unchanged SFI has already been processed and patched
- // to be on the new script ...
+ // Now some sanity checks. Make sure that the unchanged SFI has already
+ // been processed and patched to be on the new script ...
DCHECK_EQ(old_unchanged_inner_sfi->script(), *new_script);
-
constants->set(i, old_unchanged_inner_sfi);
}
}
@@ -1217,7 +1219,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
std::set<int> start_positions;
- while (SharedFunctionInfo* sfi = it.Next()) {
+ for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
DCHECK_EQ(sfi->script(), *new_script);
DCHECK_EQ(sfi->FunctionLiteralId(isolate), it.CurrentIndex());
// Don't check the start position of the top-level function, as it can
@@ -1232,10 +1234,10 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
// Check that all the functions in this function's constant pool are also
// on the new script, and that their id matches their index in the new
// scripts function list.
- FixedArray* constants = sfi->GetBytecodeArray()->constant_pool();
+ FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
for (int i = 0; i < constants->length(); ++i) {
if (!constants->get(i)->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* inner_sfi =
+ SharedFunctionInfo inner_sfi =
SharedFunctionInfo::cast(constants->get(i));
DCHECK_EQ(inner_sfi->script(), *new_script);
DCHECK_EQ(inner_sfi, new_script->shared_function_infos()
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/debug/ppc/OWNERS
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/debug/s390/OWNERS b/deps/v8/src/debug/s390/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/debug/s390/OWNERS
+++ b/deps/v8/src/debug/s390/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 6667a5f3a7..0000445e90 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -34,11 +34,17 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
+
+ Register decompr_scratch_for_debug =
+ COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
+
__ movp(rbp, rbx);
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ leave();
- __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
+ decompr_scratch_for_debug);
__ movzxwq(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 6881e114b3..b5c3447c7d 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
- V(ArrayBufferWasNeutered, "array buffer was neutered") \
+ V(ArrayBufferWasDetached, "array buffer was detached") \
V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
V(CouldNotGrowElements, "failed to grow elements store") \
V(DeoptimizeNow, "%_DeoptimizeNow") \
@@ -39,6 +39,8 @@ namespace internal {
V(NoCache, "no cache") \
V(NotAHeapNumber, "not a heap number") \
V(NotAJavaScriptObject, "not a JavaScript object") \
+ V(NotAJavaScriptObjectOrNullOrUndefined, \
+ "not a JavaScript object, Null or Undefined") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
V(NotAString, "not a String") \
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index abda40bf51..2f34733b61 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -10,12 +10,17 @@
#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
#include "src/callable.h"
+#include "src/counters.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
+#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/smi.h"
+#include "src/register-configuration.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -46,8 +51,8 @@ class FrameWriter {
}
}
- void PushRawObject(Object* obj, const char* debug_hint) {
- intptr_t value = reinterpret_cast<intptr_t>(obj);
+ void PushRawObject(Object obj, const char* debug_hint) {
+ intptr_t value = obj->ptr();
PushValue(value);
if (trace_scope_ != nullptr) {
DebugPrintOutputObject(obj, top_offset_, debug_hint);
@@ -67,14 +72,14 @@ class FrameWriter {
}
void PushCallerConstantPool(intptr_t cp) {
- top_offset_ -= kPointerSize;
+ top_offset_ -= kSystemPointerSize;
frame_->SetCallerConstantPool(top_offset_, cp);
DebugPrintOutputValue(cp, "caller's constant_pool\n");
}
void PushTranslatedValue(const TranslatedFrame::iterator& iterator,
const char* debug_hint = "") {
- Object* obj = iterator->GetRawValue();
+ Object obj = iterator->GetRawValue();
PushRawObject(obj, debug_hint);
@@ -91,7 +96,7 @@ class FrameWriter {
private:
void PushValue(intptr_t value) {
CHECK_GE(top_offset_, 0);
- top_offset_ -= kPointerSize;
+ top_offset_ -= kSystemPointerSize;
frame_->SetFrameSlot(top_offset_, value);
}
@@ -109,14 +114,13 @@ class FrameWriter {
}
}
- void DebugPrintOutputObject(Object* obj, unsigned output_offset,
+ void DebugPrintOutputObject(Object obj, unsigned output_offset,
const char* debug_hint = "") {
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ",
output_address(output_offset), output_offset);
if (obj->IsSmi()) {
- PrintF(V8PRIxPTR_FMT " <Smi %d>", reinterpret_cast<Address>(obj),
- Smi::cast(obj)->value());
+ PrintF(V8PRIxPTR_FMT " <Smi %d>", obj->ptr(), Smi::cast(obj)->value());
} else {
obj->ShortPrint(trace_scope_->file());
}
@@ -131,54 +135,48 @@ class FrameWriter {
};
DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
- for (int i = 0; i <= DeoptimizerData::kLastDeoptimizeKind; ++i) {
- deopt_entry_code_[i] = nullptr;
- }
- Code** start = &deopt_entry_code_[0];
- Code** end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
- heap_->RegisterStrongRoots(reinterpret_cast<Object**>(start),
- reinterpret_cast<Object**>(end));
+ Code* start = &deopt_entry_code_[0];
+ Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
+ heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end));
}
DeoptimizerData::~DeoptimizerData() {
- for (int i = 0; i <= DeoptimizerData::kLastDeoptimizeKind; ++i) {
- deopt_entry_code_[i] = nullptr;
- }
- Code** start = &deopt_entry_code_[0];
- heap_->UnregisterStrongRoots(reinterpret_cast<Object**>(start));
+ Code* start = &deopt_entry_code_[0];
+ heap_->UnregisterStrongRoots(FullObjectSlot(start));
}
-Code* DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
+Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
return deopt_entry_code_[static_cast<int>(kind)];
}
-void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code* code) {
+void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) {
deopt_entry_code_[static_cast<int>(kind)] = code;
}
-Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
+Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = isolate_;
- Context* native_context = function_->context()->native_context();
- Object* element = native_context->DeoptimizedCodeListHead();
+ Context native_context = function_->context()->native_context();
+ Object element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
- Code* code = Code::cast(element);
+ Code code = Code::cast(element);
CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->contains(addr)) return code;
element = code->next_code_link();
}
}
- return nullptr;
+ return Code();
}
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
-Deoptimizer* Deoptimizer::New(JSFunction* function, DeoptimizeKind kind,
+Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
unsigned bailout_id, Address from,
int fp_to_sp_delta, Isolate* isolate) {
+ JSFunction function = JSFunction::cast(Object(raw_function));
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
bailout_id, from, fp_to_sp_delta);
CHECK_NULL(isolate->deoptimizer_data()->current_);
@@ -186,7 +184,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function, DeoptimizeKind kind,
return deoptimizer;
}
-
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_;
CHECK_NOT_NULL(result);
@@ -230,18 +227,10 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
return info;
}
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, int count,
- DeoptimizeKind kind) {
- NoRootArrayScope no_root_array(masm);
- TableEntryGenerator generator(masm, kind, count);
- generator.Generate();
-}
-
namespace {
class ActivationsFinder : public ThreadVisitor {
public:
- explicit ActivationsFinder(std::set<Code*>* codes,
- Code* topmost_optimized_code,
+ explicit ActivationsFinder(std::set<Code>* codes, Code topmost_optimized_code,
bool safe_to_deopt_topmost_optimized_code)
: codes_(codes) {
#ifdef DEBUG
@@ -256,7 +245,7 @@ class ActivationsFinder : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
+ Code code = it.frame()->LookupCode();
if (code->kind() == Code::OPTIMIZED_FUNCTION &&
code->marked_for_deoptimization()) {
codes_->erase(code);
@@ -272,10 +261,10 @@ class ActivationsFinder : public ThreadVisitor {
}
private:
- std::set<Code*>* codes_;
+ std::set<Code>* codes_;
#ifdef DEBUG
- Code* topmost_;
+ Code topmost_;
bool safe_to_deopt_;
#endif
};
@@ -283,11 +272,11 @@ class ActivationsFinder : public ThreadVisitor {
// Move marked code from the optimized code list to the deoptimized code list,
// and replace pc on the stack for codes marked for deoptimization.
-void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
+void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
DisallowHeapAllocation no_allocation;
Isolate* isolate = context->GetHeap()->isolate();
- Code* topmost_optimized_code = nullptr;
+ Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
@@ -297,26 +286,23 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
!it.done(); it.Advance()) {
StackFrame::Type type = it.frame()->type();
if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- JSFunction* function =
+ Code code = it.frame()->LookupCode();
+ JSFunction function =
static_cast<OptimizedFrame*>(it.frame())->function();
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimizer found activation of function: ");
function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ PrintF(scope.file(), " / %" V8PRIxPTR "]\n", function.ptr());
}
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool safe_if_deopt_triggered =
- deopt_index != Safepoint::kNoDeoptimizationIndex;
+ bool safe_if_deopt_triggered = safepoint.has_deoptimization_index();
bool is_builtin_code = code->kind() == Code::BUILTIN;
- DCHECK(topmost_optimized_code == nullptr || safe_if_deopt_triggered ||
+ DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered ||
is_builtin_code);
- if (topmost_optimized_code == nullptr) {
+ if (topmost_optimized_code.is_null()) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered;
}
@@ -326,23 +312,21 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// We will use this set to mark those Code objects that are marked for
// deoptimization and have not been found in stack frames.
- std::set<Code*> codes;
+ std::set<Code> codes;
// Move marked code from the optimized code list to the deoptimized code list.
// Walk over all optimized code objects in this native context.
- Code* prev = nullptr;
- Object* element = context->OptimizedCodeListHead();
+ Code prev;
+ Object element = context->OptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
- Code* code = Code::cast(element);
+ Code code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
- Object* next = code->next_code_link();
+ Object next = code->next_code_link();
if (code->marked_for_deoptimization()) {
- // Make sure that this object does not point to any garbage.
- isolate->heap()->InvalidateCodeEmbeddedObjects(code);
codes.insert(code);
- if (prev != nullptr) {
+ if (!prev.is_null()) {
// Skip this code in the optimized code list.
prev->set_next_code_link(next);
} else {
@@ -372,7 +356,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// If there's no activation of a code in any stack then we can remove its
// deoptimization data. We do this to ensure that code objects that are
// unlinked don't transitively keep objects alive unnecessarily.
- for (Code* code : codes) {
+ for (Code code : codes) {
isolate->heap()->InvalidateCodeDeoptimizationData(code);
}
}
@@ -390,9 +374,9 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
- Object* context = isolate->heap()->native_contexts_list();
+ Object context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
- Context* native_context = Context::cast(context);
+ Context native_context = Context::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->next_context_link();
@@ -411,32 +395,33 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
}
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
- Object* context = isolate->heap()->native_contexts_list();
+ Object context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
- Context* native_context = Context::cast(context);
+ Context native_context = Context::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->next_context_link();
}
}
-void Deoptimizer::MarkAllCodeForContext(Context* context) {
- Object* element = context->OptimizedCodeListHead();
+void Deoptimizer::MarkAllCodeForContext(Context context) {
+ Object element = context->OptimizedCodeListHead();
Isolate* isolate = context->GetIsolate();
while (!element->IsUndefined(isolate)) {
- Code* code = Code::cast(element);
+ Code code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
code->set_marked_for_deoptimization(true);
element = code->next_code_link();
}
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
+void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
Isolate* isolate = function->GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- if (code == nullptr) code = function->code();
+ function->ResetIfBytecodeFlushed();
+ if (code.is_null()) code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
@@ -455,7 +440,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
}
}
-
void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
deoptimizer->DoComputeOutputFrames();
}
@@ -473,7 +457,7 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind) {
return nullptr;
}
-Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
+Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizeKind kind, unsigned bailout_id, Address from,
int fp_to_sp_delta)
: isolate_(isolate),
@@ -503,7 +487,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
- DCHECK_NOT_NULL(compiled_code_);
+ DCHECK(!compiled_code_.is_null());
DCHECK(function->IsJSFunction());
trace_scope_ = FLAG_trace_deopt
@@ -523,7 +507,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
- } else if (function != nullptr) {
+ } else if (!function.is_null()) {
function->feedback_vector()->increment_deopt_count();
}
}
@@ -538,11 +522,10 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
input_ = new (size) FrameDescription(size, parameter_count);
}
-Code* Deoptimizer::FindOptimizedCode() {
- Code* compiled_code = FindDeoptimizingCode(from_);
- return (compiled_code == nullptr)
- ? static_cast<Code*>(isolate_->FindCodeObject(from_))
- : compiled_code;
+Code Deoptimizer::FindOptimizedCode() {
+ Code compiled_code = FindDeoptimizingCode(from_);
+ return !compiled_code.is_null() ? compiled_code
+ : isolate_->FindCodeObject(from_);
}
@@ -585,52 +568,34 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
-Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
+Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind) {
- CHECK_GE(id, 0);
- if (id >= kMaxNumberOfEntries) return kNullAddress;
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
- CHECK_NOT_NULL(data->deopt_entry_code(kind));
- Code* code = data->deopt_entry_code(kind);
- return code->raw_instruction_start() + (id * table_entry_size_);
+ CHECK(!data->deopt_entry_code(kind).is_null());
+ return data->deopt_entry_code(kind)->raw_instruction_start();
}
-int Deoptimizer::GetDeoptimizationId(Isolate* isolate, Address addr,
- DeoptimizeKind kind) {
- DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
- DCHECK(IsInDeoptimizationTable(isolate, addr, kind));
- Code* code = data->deopt_entry_code(kind);
- Address start = code->raw_instruction_start();
- DCHECK_EQ(0,
- static_cast<int>(addr - start) % table_entry_size_);
- return static_cast<int>(addr - start) / table_entry_size_;
-}
-
-bool Deoptimizer::IsInDeoptimizationTable(Isolate* isolate, Address addr,
- DeoptimizeKind type) {
+bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
+ DeoptimizeKind type) {
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
- Code* code = data->deopt_entry_code(type);
- if (code == nullptr) return false;
- Address start = code->raw_instruction_start();
- return ((table_entry_size_ == 0 && addr == start) ||
- (addr >= start &&
- addr < start + (kMaxNumberOfEntries * table_entry_size_)));
+ Code code = data->deopt_entry_code(type);
+ if (code.is_null()) return false;
+ return addr == code->raw_instruction_start();
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type) {
- if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kEager)) {
+ if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) {
*type = DeoptimizeKind::kEager;
return true;
}
- if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kSoft)) {
+ if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) {
*type = DeoptimizeKind::kSoft;
return true;
}
- if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kLazy)) {
+ if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) {
*type = DeoptimizeKind::kLazy;
return true;
}
@@ -640,12 +605,12 @@ bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
// Count all entries in the deoptimizing code list of every context.
- Object* context = isolate->heap()->native_contexts_list();
+ Object context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
- Context* native_context = Context::cast(context);
- Object* element = native_context->DeoptimizedCodeListHead();
+ Context native_context = Context::cast(context);
+ Object element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
- Code* code = Code::cast(element);
+ Code code = Code::cast(element);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
if (!code->marked_for_deoptimization()) {
length++;
@@ -689,7 +654,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
- DeoptimizationData* input_data =
+ DeoptimizationData input_data =
DeoptimizationData::cast(compiled_code_->deoptimization_data());
{
@@ -732,7 +697,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
BailoutId node_id = input_data->BytecodeOffset(bailout_id_);
- ByteArray* translations = input_data->TranslationByteArray();
+ ByteArray translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
@@ -809,6 +774,10 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
+ FrameDescription* topmost = output_[count - 1];
+ topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
+ isolate()->isolate_root());
+
// Print some helpful diagnostic information.
if (trace_scope_ != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
@@ -827,7 +796,7 @@ void Deoptimizer::DoComputeOutputFrames() {
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index,
bool goto_catch_handler) {
- SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+ SharedFunctionInfo shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_bottommost = (0 == frame_index);
@@ -838,12 +807,12 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int register_count = height - 1; // Exclude accumulator.
int register_stack_slot_count =
InterpreterFrameConstants::RegisterStackSlotCount(register_count);
- int height_in_bytes = register_stack_slot_count * kPointerSize;
+ int height_in_bytes = register_stack_slot_count * kSystemPointerSize;
// The topmost frame will contain the accumulator.
if (is_topmost) {
- height_in_bytes += kPointerSize;
- if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
+ height_in_bytes += kSystemPointerSize;
+ if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize;
}
TranslatedFrame::iterator function_iterator = value_iterator++;
@@ -955,23 +924,23 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
}
// Read the context from the translations.
- Object* context = context_pos->GetRawValue();
- output_frame->SetContext(reinterpret_cast<intptr_t>(context));
- frame_writer.PushTranslatedValue(context_pos, "context\n");
+ Object context = context_pos->GetRawValue();
+ output_frame->SetContext(static_cast<intptr_t>(context->ptr()));
+ frame_writer.PushTranslatedValue(context_pos, "context");
// The function was mentioned explicitly in the BEGIN_FRAME.
- frame_writer.PushTranslatedValue(function_iterator, "function\n");
+ frame_writer.PushTranslatedValue(function_iterator, "function");
// Set the bytecode array pointer.
- Object* bytecode_array = shared->HasBreakInfo()
- ? shared->GetDebugInfo()->DebugBytecodeArray()
- : shared->GetBytecodeArray();
+ Object bytecode_array = shared->HasBreakInfo()
+ ? shared->GetDebugInfo()->DebugBytecodeArray()
+ : shared->GetBytecodeArray();
frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
int raw_bytecode_offset =
BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
- Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
+ Smi smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
frame_writer.PushRawObject(smi_bytecode_offset, "bytecode offset\n");
if (trace_scope_ != nullptr) {
@@ -979,8 +948,35 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// Translate the rest of the interpreter registers in the frame.
+ // The return_value_offset is counted from the top. Here, we compute the
+ // register index (counted from the start).
+ int return_value_first_reg =
+ register_count - translated_frame->return_value_offset();
+ int return_value_count = translated_frame->return_value_count();
for (int i = 0; i < register_count; ++i, ++value_iterator) {
- frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
+ // Ensure we write the return value if we have one and we are returning
+ // normally to a lazy deopt point.
+ if (is_topmost && !goto_catch_handler &&
+ deopt_kind_ == DeoptimizeKind::kLazy && i >= return_value_first_reg &&
+ i < return_value_first_reg + return_value_count) {
+ int return_index = i - return_value_first_reg;
+ if (return_index == 0) {
+ frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()),
+ "return value 0\n");
+ // We do not handle the situation when one return value should go into
+ // the accumulator and another one into an ordinary register. Since
+ // the interpreter should never create such situation, just assert
+ // this does not happen.
+ CHECK_LE(return_value_first_reg + return_value_count, register_count);
+ } else {
+ CHECK_EQ(return_index, 1);
+ frame_writer.PushRawValue(input_->GetRegister(kReturnRegister1.code()),
+ "return value 1\n");
+ }
+ } else {
+ // This is not return value, just write the value from the translations.
+ frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
+ }
}
int register_slots_written = register_count;
@@ -1005,12 +1001,21 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// the exception (which lives in the result register).
intptr_t accumulator_value =
input_->GetRegister(kInterpreterAccumulatorRegister.code());
- frame_writer.PushRawObject(reinterpret_cast<Object*>(accumulator_value),
- "accumulator\n");
- ++value_iterator; // Skip the accumulator.
+ frame_writer.PushRawObject(Object(accumulator_value), "accumulator\n");
} else {
- frame_writer.PushTranslatedValue(value_iterator++, "accumulator");
+ // If we are lazily deoptimizing make sure we store the deopt
+ // return value into the appropriate slot.
+ if (deopt_kind_ == DeoptimizeKind::kLazy &&
+ translated_frame->return_value_offset() == 0 &&
+ translated_frame->return_value_count() > 0) {
+ CHECK_EQ(translated_frame->return_value_count(), 1);
+ frame_writer.PushRawValue(input_->GetRegister(kReturnRegister0.code()),
+ "return value 0\n");
+ } else {
+ frame_writer.PushTranslatedValue(value_iterator, "accumulator");
+ }
}
+ ++value_iterator; // Move over the accumulator.
} else {
// For non-topmost frames, skip the accumulator translation. For those
// frames, the return value from the callee will become the accumulator.
@@ -1024,7 +1029,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// bailout handlers also advance the bytecode offset before dispatch, hence
// simulating what normal handlers do upon completion of the operation.
Builtins* builtins = isolate_->builtins();
- Code* dispatch_builtin =
+ Code dispatch_builtin =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
@@ -1048,11 +1053,11 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
+ intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
// Set the continuation for the topmost frame.
- Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
+ Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation->InstructionStart()));
}
@@ -1064,9 +1069,10 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
bool is_bottommost = (0 == frame_index);
unsigned height = translated_frame->height();
- unsigned height_in_bytes = height * kPointerSize;
+ unsigned height_in_bytes = height * kSystemPointerSize;
int parameter_count = height;
- if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+ if (ShouldPadArguments(parameter_count))
+ height_in_bytes += kSystemPointerSize;
TranslatedFrame::iterator function_iterator = value_iterator++;
if (trace_scope_ != nullptr) {
@@ -1147,7 +1153,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
DCHECK_EQ(0, frame_writer.top_offset());
Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
+ Code adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = static_cast<intptr_t>(
adaptor_trampoline->InstructionStart() +
@@ -1170,11 +1176,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK(!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy);
Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
BailoutId bailout_id = translated_frame->node_id();
unsigned height = translated_frame->height();
unsigned parameter_count = height - 1; // Exclude the context.
- unsigned height_in_bytes = parameter_count * kPointerSize;
+ unsigned height_in_bytes = parameter_count * kSystemPointerSize;
// If the construct frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
@@ -1182,11 +1188,12 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// top of the reconstructed stack and popping it in
// {Builtins::kNotifyDeoptimized}.
if (is_topmost) {
- height_in_bytes += kPointerSize;
- if (PadTopOfStackRegister()) height_in_bytes += kPointerSize;
+ height_in_bytes += kSystemPointerSize;
+ if (PadTopOfStackRegister()) height_in_bytes += kSystemPointerSize;
}
- if (ShouldPadArguments(parameter_count)) height_in_bytes += kPointerSize;
+ if (ShouldPadArguments(parameter_count))
+ height_in_bytes += kSystemPointerSize;
TranslatedFrame::iterator function_iterator = value_iterator++;
if (trace_scope_ != nullptr) {
@@ -1259,14 +1266,14 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
intptr_t marker = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
frame_writer.PushRawValue(marker, "context (construct stub sentinel)\n");
- frame_writer.PushTranslatedValue(value_iterator++, "context\n");
+ frame_writer.PushTranslatedValue(value_iterator++, "context");
// Number of incoming arguments.
frame_writer.PushRawObject(Smi::FromInt(parameter_count - 1), "argc\n");
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
- frame_writer.PushTranslatedValue(function_iterator, "constuctor function\n");
+ frame_writer.PushTranslatedValue(function_iterator, "constructor function\n");
// The deopt info contains the implicit receiver or the new target at the
// position of the receiver. Copy it to the top of stack, with the hole value
@@ -1320,7 +1327,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
+ intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -1329,7 +1336,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
- Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
+ Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation->InstructionStart()));
}
@@ -1413,7 +1420,7 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
// TO
// | .... |
// +-------------------------+
-// | arg padding (arch dept) |<- at most 1*kPointerSize
+// | arg padding (arch dept) |<- at most 1*kSystemPointerSize
// +-------------------------+
// | builtin param 0 |<- FrameState input value n becomes
// +-------------------------+
@@ -1461,8 +1468,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
BailoutId bailout_id = translated_frame->node_id();
Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
- CHECK(!Builtins::IsLazy(builtin_name));
- Code* builtin = isolate()->builtins()->builtin(builtin_name);
+ Code builtin = isolate()->builtins()->builtin(builtin_name);
Callable continuation_callable =
Builtins::CallableFor(isolate(), builtin_name);
CallInterfaceDescriptor continuation_descriptor =
@@ -1473,13 +1479,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool must_handle_result =
!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
- // TODO(v8:6666): Fold into Default config once root is fully supported.
- const RegisterConfiguration* config(
- RegisterConfiguration::PreserveRootIA32());
-#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
-#endif
const int allocatable_register_count =
config->num_allocatable_general_registers();
const int padding_slot_count =
@@ -1507,14 +1507,14 @@ void Deoptimizer::DoComputeBuiltinContinuation(
is_topmost ? (PadTopOfStackRegister() ? 2 : 1) : 0;
const unsigned output_frame_size =
- kPointerSize * (stack_param_count + stack_param_pad_count +
- allocatable_register_count + padding_slot_count +
- push_result_count) +
+ kSystemPointerSize * (stack_param_count + stack_param_pad_count +
+ allocatable_register_count + padding_slot_count +
+ push_result_count) +
BuiltinContinuationFrameConstants::kFixedFrameSize;
const unsigned output_frame_size_above_fp =
- kPointerSize * (allocatable_register_count + padding_slot_count +
- push_result_count) +
+ kSystemPointerSize * (allocatable_register_count + padding_slot_count +
+ push_result_count) +
(BuiltinContinuationFrameConstants::kFixedFrameSize -
BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp);
@@ -1563,8 +1563,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// Get the possible JSFunction for the case that this is a
// JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer
// like a normal JavaScriptFrame.
- const intptr_t maybe_function =
- reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ const intptr_t maybe_function = value_iterator->GetRawValue()->ptr();
++value_iterator;
ReadOnlyRoots roots(isolate());
@@ -1588,7 +1587,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: {
intptr_t accumulator_value =
input_->GetRegister(kInterpreterAccumulatorRegister.code());
- frame_writer.PushRawObject(reinterpret_cast<Object*>(accumulator_value),
+ frame_writer.PushRawObject(Object(accumulator_value),
"exception (from accumulator)\n");
} break;
}
@@ -1615,8 +1614,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// sure that it's harvested from the translation and copied into the register
// set (it was automatically added at the end of the FrameState by the
// instruction selector).
- Object* context = value_iterator->GetRawValue();
- const intptr_t value = reinterpret_cast<intptr_t>(context);
+ Object context = value_iterator->GetRawValue();
+ const intptr_t value = context->ptr();
TranslatedFrame::iterator context_register_value = value_iterator++;
register_values[kContextRegister.code()] = context_register_value;
output_frame->SetContext(value);
@@ -1679,10 +1678,10 @@ void Deoptimizer::DoComputeBuiltinContinuation(
SNPrintF(
str,
"tagged argument count %s (will be untagged by continuation)\n",
- config->GetGeneralRegisterName(code));
+ RegisterName(Register::from_code(code)));
} else {
SNPrintF(str, "builtin register argument %s\n",
- config->GetGeneralRegisterName(code));
+ RegisterName(Register::from_code(code)));
}
}
frame_writer.PushTranslatedValue(
@@ -1717,7 +1716,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
+ intptr_t context_value = static_cast<intptr_t>(Smi::zero().ptr());
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -1727,12 +1726,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
Register fp_reg = JavaScriptFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
- Code* continue_to_builtin = isolate()->builtins()->builtin(
+ Code continue_to_builtin = isolate()->builtins()->builtin(
TrampolineForBuiltinContinuation(mode, must_handle_result));
output_frame->SetPc(
static_cast<intptr_t>(continue_to_builtin->InstructionStart()));
- Code* continuation =
+ Code continuation =
isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
static_cast<intptr_t>(continuation->InstructionStart()));
@@ -1752,13 +1751,13 @@ void Deoptimizer::MaterializeHeapObjects() {
if (trace_scope_ != nullptr) {
PrintF("Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ",
static_cast<intptr_t>(materialization.output_slot_address_),
- reinterpret_cast<intptr_t>(*value));
+ value->ptr());
value->ShortPrint(trace_scope_->file());
PrintF(trace_scope_->file(), "\n");
}
- *(reinterpret_cast<intptr_t*>(materialization.output_slot_address_)) =
- reinterpret_cast<intptr_t>(*value);
+ *(reinterpret_cast<Address*>(materialization.output_slot_address_)) =
+ value->ptr();
}
translated_state_.VerifyMaterializedObjects();
@@ -1775,7 +1774,7 @@ void Deoptimizer::MaterializeHeapObjects() {
}
void Deoptimizer::QueueValueForMaterialization(
- Address output_address, Object* obj,
+ Address output_address, Object obj,
const TranslatedFrame::iterator& iterator) {
if (obj == ReadOnlyRoots(isolate_).arguments_marker()) {
values_to_materialize_.push_back({output_address, iterator});
@@ -1784,6 +1783,8 @@ void Deoptimizer::QueueValueForMaterialization(
unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
+ // TODO(jkummerow): If {function_->IsSmi()} can indeed be true, then
+ // {function_} should not have type {JSFunction}.
if (!function_->IsSmi()) {
fixed_size += ComputeIncomingArgumentSize(function_->shared());
}
@@ -1799,7 +1800,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = 0;
// ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
- CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
+ CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
result);
}
@@ -1807,7 +1808,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
}
// static
-unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
+unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, bytecode offset and all the incoming arguments.
return ComputeIncomingArgumentSize(shared) +
@@ -1815,10 +1816,10 @@ unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
}
// static
-unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
+unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
int parameter_slots = shared->internal_formal_parameter_count() + 1;
if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
- return parameter_slots * kPointerSize;
+ return parameter_slots * kSystemPointerSize;
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
@@ -1826,11 +1827,12 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft ||
kind == DeoptimizeKind::kLazy);
DeoptimizerData* data = isolate->deoptimizer_data();
- if (data->deopt_entry_code(kind) != nullptr) return;
+ if (!data->deopt_entry_code(kind).is_null()) return;
- MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(16 * KB));
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, kind);
+ GenerateDeoptimizationEntries(&masm, masm.isolate(), kind);
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
@@ -1840,13 +1842,13 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId,
MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable);
- CHECK(Heap::IsImmovable(*code));
+ CHECK(isolate->heap()->IsImmovable(*code));
- CHECK_NULL(data->deopt_entry_code(kind));
+ CHECK(data->deopt_entry_code(kind).is_null());
data->set_deopt_entry_code(kind, *code);
}
-void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
+void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) {
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);
@@ -1865,11 +1867,19 @@ FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
// TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
// isn't used before the next safepoint, the GC will try to scan it as a
// tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
+#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
+ // x18 is reserved as platform register on Windows arm64 platform
+ const int kPlatformRegister = 18;
+ if (r != kPlatformRegister) {
+ SetRegister(r, kZapUint32);
+ }
+#else
SetRegister(r, kZapUint32);
+#endif
}
// Zap all the slots.
- for (unsigned o = 0; o < frame_size; o += kPointerSize) {
+ for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) {
SetFrameSlot(o, kZapUint32);
}
}
@@ -1890,7 +1900,7 @@ void TranslationBuffer::Add(int32_t value) {
} while (bits != 0);
}
-TranslationIterator::TranslationIterator(ByteArray* buffer, int index)
+TranslationIterator::TranslationIterator(ByteArray buffer, int index)
: buffer_(buffer), index_(index) {
DCHECK(index >= 0 && index < buffer->length());
}
@@ -1961,11 +1971,15 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
}
void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
- int literal_id, unsigned height) {
+ int literal_id, unsigned height,
+ int return_value_offset,
+ int return_value_count) {
buffer_->Add(INTERPRETED_FRAME);
buffer_->Add(bytecode_offset.ToInt());
buffer_->Add(literal_id);
buffer_->Add(height);
+ buffer_->Add(return_value_offset);
+ buffer_->Add(return_value_count);
}
void Translation::ArgumentsElements(CreateArgumentsType type) {
@@ -2080,7 +2094,7 @@ void Translation::AddUpdateFeedback(int vector_literal, int slot) {
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kFunctionOffset) /
- kPointerSize);
+ kSystemPointerSize);
}
int Translation::NumberOfOperandsFor(Opcode opcode) {
@@ -2109,12 +2123,13 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case UPDATE_FEEDBACK:
return 2;
case BEGIN:
- case INTERPRETED_FRAME:
case CONSTRUCT_STUB_FRAME:
case BUILTIN_CONTINUATION_FRAME:
case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
return 3;
+ case INTERPRETED_FRAME:
+ return 5;
}
FATAL("Unexpected translation type");
return -1;
@@ -2165,7 +2180,7 @@ bool MaterializedObjectStore::Remove(Address fp) {
int index = static_cast<int>(std::distance(frame_fps_.begin(), it));
frame_fps_.erase(it);
- FixedArray* array = isolate()->heap()->materialized_objects();
+ FixedArray array = isolate()->heap()->materialized_objects();
CHECK_LT(index, array->length());
int fps_size = static_cast<int>(frame_fps_.size());
@@ -2207,7 +2222,7 @@ Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
for (int i = 0; i < array->length(); i++) {
new_array->set(i, array->get(i));
}
- HeapObject* undefined_value = ReadOnlyRoots(isolate()).undefined_value();
+ HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value();
for (int i = array->length(); i < length; i++) {
new_array->set(i, undefined_value);
}
@@ -2282,8 +2297,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
CHECK(stack_it == frame_it->end());
}
-
-Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
+Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
CHECK(code->InstructionStart() <= pc && pc <= code->InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
@@ -2313,7 +2327,7 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
// static
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
- SharedFunctionInfo* shared, BailoutId node_id) {
+ SharedFunctionInfo shared, BailoutId node_id) {
DCHECK(shared->HasBytecodeArray());
return AbstractCode::cast(shared->GetBytecodeArray())
->SourcePosition(node_id.ToInt());
@@ -2391,13 +2405,12 @@ TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
// static
TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
- Object* literal) {
+ Object literal) {
TranslatedValue slot(container, kTagged);
slot.raw_literal_ = literal;
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
return TranslatedValue(container, kInvalid);
@@ -2406,13 +2419,11 @@ TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
-
-Object* TranslatedValue::raw_literal() const {
+Object TranslatedValue::raw_literal() const {
DCHECK_EQ(kTagged, kind());
return raw_literal_;
}
-
int32_t TranslatedValue::int32_value() const {
DCHECK_EQ(kInt32, kind());
return int32_value_;
@@ -2450,8 +2461,7 @@ int TranslatedValue::object_index() const {
return materialization_info_.id_;
}
-
-Object* TranslatedValue::GetRawValue() const {
+Object TranslatedValue::GetRawValue() const {
// If we have a value, return it.
if (materialization_state() == kFinished) {
return *storage_;
@@ -2560,7 +2570,7 @@ void TranslatedValue::MaterializeSimple() {
// If we already have materialized, return.
if (materialization_state() == kFinished) return;
- Object* raw_value = GetRawValue();
+ Object raw_value = GetRawValue();
if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) {
// We can get the value without allocation, just return it here.
set_initialized_storage(Handle<Object>(raw_value, isolate()));
@@ -2659,47 +2669,47 @@ Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
void TranslatedValue::Handlify() {
if (kind() == kTagged) {
set_initialized_storage(Handle<Object>(raw_literal(), isolate()));
- raw_literal_ = nullptr;
+ raw_literal_ = Object();
}
}
-
TranslatedFrame TranslatedFrame::InterpretedFrame(
- BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
- TranslatedFrame frame(kInterpretedFunction, shared_info, height);
+ BailoutId bytecode_offset, SharedFunctionInfo shared_info, int height,
+ int return_value_offset, int return_value_count) {
+ TranslatedFrame frame(kInterpretedFunction, shared_info, height,
+ return_value_offset, return_value_count);
frame.node_id_ = bytecode_offset;
return frame;
}
-
TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
- SharedFunctionInfo* shared_info, int height) {
+ SharedFunctionInfo shared_info, int height) {
return TranslatedFrame(kArgumentsAdaptor, shared_info, height);
}
TranslatedFrame TranslatedFrame::ConstructStubFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
TranslatedFrame frame(kConstructStub, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
TranslatedFrame frame(kBuiltinContinuation, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height);
frame.node_id_ = bailout_id;
return frame;
}
TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info,
height);
frame.node_id_ = bailout_id;
@@ -2731,42 +2741,46 @@ int TranslatedFrame::GetValueCount() {
void TranslatedFrame::Handlify() {
- if (raw_shared_info_ != nullptr) {
+ if (!raw_shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
raw_shared_info_->GetIsolate());
- raw_shared_info_ = nullptr;
+ raw_shared_info_ = SharedFunctionInfo();
}
for (auto& value : values_) {
value.Handlify();
}
}
-
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
- TranslationIterator* iterator, FixedArray* literal_array, Address fp,
+ TranslationIterator* iterator, FixedArray literal_array, Address fp,
FILE* trace_file) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::INTERPRETED_FRAME: {
BailoutId bytecode_offset = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
+ int return_value_offset = iterator->Next();
+ int return_value_count = iterator->Next();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file,
- " => bytecode_offset=%d, args=%d, height=%d; inputs:\n",
- bytecode_offset.ToInt(), arg_count, height);
+ " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
+ "inputs:\n",
+ bytecode_offset.ToInt(), arg_count, height, return_value_offset,
+ return_value_count);
}
return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info,
- height);
+ height, return_value_offset,
+ return_value_count);
}
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
@@ -2779,7 +2793,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::CONSTRUCT_STUB_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
@@ -2794,7 +2808,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::BUILTIN_CONTINUATION_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
@@ -2813,7 +2827,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
@@ -2831,7 +2845,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
}
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
+ SharedFunctionInfo shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
@@ -2875,7 +2889,6 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::InvalidFrame();
}
-
// static
void TranslatedFrame::AdvanceIterator(
std::deque<TranslatedValue>::iterator* iter) {
@@ -2902,7 +2915,7 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
if (parent_frame_type ==
StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)) {
if (length)
- *length = Smi::cast(*reinterpret_cast<Object**>(
+ *length = Smi::cast(*FullObjectSlot(
parent_frame_pointer +
ArgumentsAdaptorFrameConstants::kLengthOffset))
->value();
@@ -2943,7 +2956,7 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
object_positions_.push_back({frame_index, value_index});
frame.Add(TranslatedValue::NewDeferredObject(
- this, length + FixedArray::kHeaderSize / kPointerSize, object_index));
+ this, length + FixedArray::kHeaderSize / kTaggedSize, object_index));
ReadOnlyRoots roots(isolate_);
frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map()));
@@ -2961,9 +2974,8 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
for (int i = length - number_of_holes - 1; i >= 0; --i) {
Address argument_slot = arguments_frame +
CommonFrameConstants::kFixedFrameSizeAboveFp +
- i * kPointerSize;
- frame.Add(TranslatedValue::NewTagged(
- this, *reinterpret_cast<Object**>(argument_slot)));
+ i * kSystemPointerSize;
+ frame.Add(TranslatedValue::NewTagged(this, *FullObjectSlot(argument_slot)));
}
}
@@ -2977,7 +2989,7 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
// Returns the number of expected nested translations from the
// TranslationIterator.
int TranslatedState::CreateNextTranslatedValue(
- int frame_index, TranslationIterator* iterator, FixedArray* literal_array,
+ int frame_index, TranslationIterator* iterator, FixedArray literal_array,
Address fp, RegisterValues* registers, FILE* trace_file) {
disasm::NameConverter converter;
@@ -3056,10 +3068,10 @@ int TranslatedState::CreateNextTranslatedValue(
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ Object(value)->ShortPrint(trace_file);
}
TranslatedValue translated_value =
- TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
+ TranslatedValue::NewTagged(this, Object(value));
frame.Add(translated_value);
return translated_value.GetChildrenCount();
}
@@ -3145,9 +3157,8 @@ int TranslatedState::CreateNextTranslatedValue(
}
Float32 value = registers->GetFloatRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(
- trace_file, "%e ; %s (float)", value.get_scalar(),
- RegisterConfiguration::Default()->GetFloatRegisterName(input_reg));
+ PrintF(trace_file, "%e ; %s (float)", value.get_scalar(),
+ RegisterName(FloatRegister::from_code(input_reg)));
}
TranslatedValue translated_value = TranslatedValue::NewFloat(this, value);
frame.Add(translated_value);
@@ -3163,9 +3174,8 @@ int TranslatedState::CreateNextTranslatedValue(
}
Float64 value = registers->GetDoubleRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(
- trace_file, "%e ; %s (double)", value.get_scalar(),
- RegisterConfiguration::Default()->GetDoubleRegisterName(input_reg));
+ PrintF(trace_file, "%e ; %s (double)", value.get_scalar(),
+ RegisterName(DoubleRegister::from_code(input_reg)));
}
TranslatedValue translated_value =
TranslatedValue::NewDouble(this, value);
@@ -3180,10 +3190,10 @@ int TranslatedState::CreateNextTranslatedValue(
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ", value,
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
- reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ Object(value)->ShortPrint(trace_file);
}
TranslatedValue translated_value =
- TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
+ TranslatedValue::NewTagged(this, Object(value));
frame.Add(translated_value);
return translated_value.GetChildrenCount();
}
@@ -3272,11 +3282,11 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::LITERAL: {
int literal_index = iterator->Next();
- Object* value = literal_array->get(literal_index);
+ Object value = literal_array->get(literal_index);
if (trace_file != nullptr) {
- PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ",
- reinterpret_cast<intptr_t>(value), literal_index);
- reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value->ptr(),
+ literal_index);
+ value->ShortPrint(trace_file);
}
TranslatedValue translated_value =
@@ -3291,10 +3301,10 @@ int TranslatedState::CreateNextTranslatedValue(
TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationData* data =
+ DeoptimizationData data =
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
&deopt_index);
- DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
+ DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Init(frame->isolate(), frame->fp(), &it, data->LiteralArray(),
@@ -3304,7 +3314,7 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
TranslationIterator* iterator,
- FixedArray* literal_array, RegisterValues* registers,
+ FixedArray literal_array, RegisterValues* registers,
FILE* trace_file, int formal_parameter_count) {
DCHECK(frames_.empty());
@@ -3383,10 +3393,10 @@ void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
void TranslatedState::Prepare(Address stack_frame_pointer) {
for (auto& frame : frames_) frame.Handlify();
- if (feedback_vector_ != nullptr) {
+ if (!feedback_vector_.is_null()) {
feedback_vector_handle_ =
Handle<FeedbackVector>(feedback_vector_, isolate());
- feedback_vector_ = nullptr;
+ feedback_vector_ = FeedbackVector();
}
stack_frame_pointer_ = stack_frame_pointer;
@@ -3632,7 +3642,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
Smi::cast(frame->values_[value_index].GetRawValue())->value();
int instance_size = FixedArray::SizeFor(array_length);
- CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
// Canonicalize empty fixed array.
if (*map == ReadOnlyRoots(isolate()).empty_fixed_array()->map() &&
@@ -3653,7 +3663,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
Smi::cast(frame->values_[value_index].GetRawValue())->value();
int array_length = PropertyArray::LengthField::decode(length_or_hash);
int instance_size = PropertyArray::SizeFor(array_length);
- CHECK_EQ(instance_size, slot->GetChildrenCount() * kPointerSize);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
slot->set_storage(AllocateStorageFor(slot));
// Make sure all the remaining children (after the map) are allocated.
@@ -3724,7 +3734,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
!index.is_inobject()) {
CHECK(!map->IsUnboxedDoubleField(index));
int outobject_index = index.outobject_array_index();
- int array_index = outobject_index * kPointerSize;
+ int array_index = outobject_index * kTaggedSize;
object_storage->set(array_index, kStoreMutableHeapNumber);
}
}
@@ -3732,7 +3742,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
int allocate_size =
- ByteArray::LengthFor(slot->GetChildrenCount() * kPointerSize);
+ ByteArray::LengthFor(slot->GetChildrenCount() * kTaggedSize);
// It is important to allocate all the objects tenured so that the marker
// does not visit them.
Handle<ByteArray> object_storage =
@@ -3745,7 +3755,7 @@ Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<Map> map) {
- CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kPointerSize);
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize);
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
@@ -3757,8 +3767,8 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (descriptors->GetDetails(i).representation().IsDouble() &&
index.is_inobject()) {
- CHECK_GE(index.index(), FixedArray::kHeaderSize / kPointerSize);
- int array_index = index.index() * kPointerSize - FixedArray::kHeaderSize;
+ CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize);
+ int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize;
uint8_t marker = map->IsUnboxedDoubleField(index)
? kStoreUnboxedDouble
: kStoreMutableHeapNumber;
@@ -3790,7 +3800,7 @@ void TranslatedState::InitializeJSObjectAt(
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
+ *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Fill the property array field.
{
@@ -3803,7 +3813,7 @@ void TranslatedState::InitializeJSObjectAt(
// For all the other fields we first look at the fixed array and check the
// marker to see if we store an unboxed double.
- DCHECK_EQ(kPointerSize, JSObject::kPropertiesOrHashOffset);
+ DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset);
for (int i = 2; i < slot->GetChildrenCount(); i++) {
// Initialize and extract the value from its slot.
Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
@@ -3811,7 +3821,7 @@ void TranslatedState::InitializeJSObjectAt(
// Read out the marker and ensure the field is consistent with
// what the markers in the storage say (note that all heap numbers
// should be fully initialized by now).
- int offset = i * kPointerSize;
+ int offset = i * kTaggedSize;
uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
if (marker == kStoreUnboxedDouble) {
double double_field_value;
@@ -3850,12 +3860,12 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kPointerSize, no_allocation);
+ *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {
Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
- int offset = i * kPointerSize;
+ int offset = i * kTaggedSize;
uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
if (i > 1 && marker == kStoreMutableHeapNumber) {
CHECK(field_value->IsMutableHeapNumber());
@@ -4035,7 +4045,7 @@ bool TranslatedState::DoUpdateFeedback() {
}
void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator,
- FixedArray* literal_array,
+ FixedArray literal_array,
FILE* trace_file) {
CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next());
feedback_vector_ = FeedbackVector::cast(literal_array->get(iterator->Next()));
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 0c5254e773..31268c7d4a 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -15,10 +15,12 @@
#include "src/deoptimize-reason.h"
#include "src/feedback-vector.h"
#include "src/frame-constants.h"
+#include "src/frames.h"
#include "src/globals.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
+#include "src/label.h"
#include "src/objects/shared-function-info.h"
+#include "src/register-arch.h"
#include "src/source-position.h"
#include "src/zone/zone-chunk-list.h"
@@ -30,13 +32,14 @@ class TranslationIterator;
class DeoptimizedFrameInfo;
class TranslatedState;
class RegisterValues;
+class MacroAssembler;
class TranslatedValue {
public:
// Allocation-less getter of the value.
// Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary
// to get the value.
- Object* GetRawValue() const;
+ Object GetRawValue() const;
// Getter for the value, takes care of materializing the subgraph
// reachable from this value.
@@ -92,7 +95,7 @@ class TranslatedValue {
static TranslatedValue NewInt64(TranslatedState* container, int64_t value);
static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
- static TranslatedValue NewTagged(TranslatedState* container, Object* literal);
+ static TranslatedValue NewTagged(TranslatedState* container, Object literal);
static TranslatedValue NewInvalid(TranslatedState* container);
Isolate* isolate() const;
@@ -125,7 +128,7 @@ class TranslatedValue {
union {
// kind kTagged. After handlification it is always nullptr.
- Object* raw_literal_;
+ Object raw_literal_;
// kind is kUInt32 or kBoolBit.
uint32_t uint32_value_;
// kind is kInt32.
@@ -141,7 +144,7 @@ class TranslatedValue {
};
// Checked accessors for the union members.
- Object* raw_literal() const;
+ Object raw_literal() const;
int32_t int32_value() const;
int64_t int64_value() const;
uint32_t uint32_value() const;
@@ -170,9 +173,11 @@ class TranslatedFrame {
BailoutId node_id() const { return node_id_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
int height() const { return height_; }
+ int return_value_offset() const { return return_value_offset_; }
+ int return_value_count() const { return return_value_count_; }
- SharedFunctionInfo* raw_shared_info() const {
- CHECK_NOT_NULL(raw_shared_info_);
+ SharedFunctionInfo raw_shared_info() const {
+ CHECK(!raw_shared_info_.is_null());
return raw_shared_info_;
}
@@ -185,8 +190,8 @@ class TranslatedFrame {
}
iterator operator++(int) {
+ iterator original(position_, input_index_);
++input_index_;
- iterator original(position_);
AdvanceIterator(&position_);
return original;
}
@@ -207,8 +212,9 @@ class TranslatedFrame {
private:
friend TranslatedFrame;
- explicit iterator(std::deque<TranslatedValue>::iterator position)
- : position_(position), input_index_(0) {}
+ explicit iterator(std::deque<TranslatedValue>::iterator position,
+ int input_index = 0)
+ : position_(position), input_index_(input_index) {}
std::deque<TranslatedValue>::iterator position_;
int input_index_;
@@ -228,33 +234,38 @@ class TranslatedFrame {
// Constructor static methods.
static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
- SharedFunctionInfo* shared_info,
- int height);
+ SharedFunctionInfo shared_info,
+ int height, int return_value_offset,
+ int return_value_count);
static TranslatedFrame AccessorFrame(Kind kind,
- SharedFunctionInfo* shared_info);
- static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
+ SharedFunctionInfo shared_info);
+ static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo shared_info,
int height);
static TranslatedFrame ConstructStubFrame(BailoutId bailout_id,
- SharedFunctionInfo* shared_info,
+ SharedFunctionInfo shared_info,
int height);
static TranslatedFrame BuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
static TranslatedFrame JavaScriptBuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame(
- BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
+ BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
static TranslatedFrame InvalidFrame() {
- return TranslatedFrame(kInvalid, nullptr);
+ return TranslatedFrame(kInvalid, SharedFunctionInfo());
}
static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter);
- TranslatedFrame(Kind kind, SharedFunctionInfo* shared_info = nullptr,
- int height = 0)
+ TranslatedFrame(Kind kind,
+ SharedFunctionInfo shared_info = SharedFunctionInfo(),
+ int height = 0, int return_value_offset = 0,
+ int return_value_count = 0)
: kind_(kind),
node_id_(BailoutId::None()),
raw_shared_info_(shared_info),
- height_(height) {}
+ height_(height),
+ return_value_offset_(return_value_offset),
+ return_value_count_(return_value_count) {}
void Add(const TranslatedValue& value) { values_.push_back(value); }
TranslatedValue* ValueAt(int index) { return &(values_[index]); }
@@ -262,9 +273,11 @@ class TranslatedFrame {
Kind kind_;
BailoutId node_id_;
- SharedFunctionInfo* raw_shared_info_;
+ SharedFunctionInfo raw_shared_info_;
Handle<SharedFunctionInfo> shared_info_;
int height_;
+ int return_value_offset_;
+ int return_value_count_;
typedef std::deque<TranslatedValue> ValuesContainer;
@@ -314,7 +327,7 @@ class TranslatedState {
Isolate* isolate() { return isolate_; }
void Init(Isolate* isolate, Address input_frame_pointer,
- TranslationIterator* iterator, FixedArray* literal_array,
+ TranslationIterator* iterator, FixedArray literal_array,
RegisterValues* registers, FILE* trace_file, int parameter_count);
void VerifyMaterializedObjects();
@@ -324,11 +337,10 @@ class TranslatedState {
friend TranslatedValue;
TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator,
- FixedArray* literal_array,
- Address fp,
- FILE* trace_file);
+ FixedArray literal_array,
+ Address fp, FILE* trace_file);
int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator,
- FixedArray* literal_array, Address fp,
+ FixedArray literal_array, Address fp,
RegisterValues* registers, FILE* trace_file);
Address ComputeArgumentsPosition(Address input_frame_pointer,
CreateArgumentsType type, int* length);
@@ -366,7 +378,7 @@ class TranslatedState {
Handle<Map> map, const DisallowHeapAllocation& no_allocation);
void ReadUpdateFeedback(TranslationIterator* iterator,
- FixedArray* literal_array, FILE* trace_file);
+ FixedArray literal_array, FILE* trace_file);
TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
TranslatedValue* GetValueByObjectIndex(int object_index);
@@ -388,14 +400,14 @@ class TranslatedState {
};
std::deque<ObjectPosition> object_positions_;
Handle<FeedbackVector> feedback_vector_handle_;
- FeedbackVector* feedback_vector_ = nullptr;
+ FeedbackVector feedback_vector_;
FeedbackSlot feedback_slot_;
};
class OptimizedFunctionVisitor {
public:
virtual ~OptimizedFunctionVisitor() = default;
- virtual void VisitFunction(JSFunction* function) = 0;
+ virtual void VisitFunction(JSFunction function) = 0;
};
class Deoptimizer : public Malloced {
@@ -412,9 +424,9 @@ class Deoptimizer : public Malloced {
static const int kNoDeoptId = -1;
};
- static DeoptInfo GetDeoptInfo(Code* code, Address from);
+ static DeoptInfo GetDeoptInfo(Code code, Address from);
- static int ComputeSourcePositionFromBytecodeArray(SharedFunctionInfo* shared,
+ static int ComputeSourcePositionFromBytecodeArray(SharedFunctionInfo shared,
BailoutId node_id);
struct JumpTableEntry : public ZoneObject {
@@ -449,7 +461,7 @@ class Deoptimizer : public Malloced {
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
- static Deoptimizer* New(JSFunction* function, DeoptimizeKind kind,
+ static Deoptimizer* New(Address raw_function, DeoptimizeKind kind,
unsigned bailout_id, Address from, int fp_to_sp_delta,
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
@@ -464,7 +476,7 @@ class Deoptimizer : public Malloced {
// again and any activations of the optimized code will get deoptimized when
// execution returns. If {code} is specified then the given code is targeted
// instead of the function code (e.g. OSR code not installed on function).
- static void DeoptimizeFunction(JSFunction* function, Code* code = nullptr);
+ static void DeoptimizeFunction(JSFunction function, Code code = Code());
// Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
@@ -480,10 +492,7 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- static Address GetDeoptimizationEntry(Isolate* isolate, int id,
- DeoptimizeKind kind);
- static int GetDeoptimizationId(Isolate* isolate, Address addr,
- DeoptimizeKind kind);
+ static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
// {type}. Returns false if {addr} is not a deoptimization entry.
@@ -505,51 +514,28 @@ class Deoptimizer : public Malloced {
static const int kNotDeoptimizationEntry = -1;
- // Generators for the deoptimization entry code.
- class TableEntryGenerator {
- public:
- TableEntryGenerator(MacroAssembler* masm, DeoptimizeKind kind, int count)
- : masm_(masm), deopt_kind_(kind), count_(count) {}
-
- void Generate();
-
- protected:
- MacroAssembler* masm() const { return masm_; }
- DeoptimizeKind deopt_kind() const { return deopt_kind_; }
- Isolate* isolate() const { return masm_->isolate(); }
-
- void GeneratePrologue();
-
- private:
- int count() const { return count_; }
-
- MacroAssembler* masm_;
- DeoptimizeKind deopt_kind_;
- int count_;
- };
-
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind);
- static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
+ static void EnsureCodeForDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
+ static const int kMaxNumberOfEntries = 16384;
+
private:
friend class FrameWriter;
- void QueueValueForMaterialization(Address output_address, Object* obj,
+ void QueueValueForMaterialization(Address output_address, Object obj,
const TranslatedFrame::iterator& iterator);
- static const int kMinNumberOfEntries = 64;
- static const int kMaxNumberOfEntries = 16384;
- Deoptimizer(Isolate* isolate, JSFunction* function, DeoptimizeKind kind,
+ Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
unsigned bailout_id, Address from, int fp_to_sp_delta);
- Code* FindOptimizedCode();
+ Code FindOptimizedCode();
void PrintFunctionName();
void DeleteFrameDescriptions();
- static bool IsInDeoptimizationTable(Isolate* isolate, Address addr,
- DeoptimizeKind type);
+ static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
+ DeoptimizeKind type);
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
@@ -578,19 +564,20 @@ class Deoptimizer : public Malloced {
unsigned ComputeInputFrameAboveFpFixedSize() const;
unsigned ComputeInputFrameSize() const;
- static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
+ static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo shared);
- static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
- static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
+ static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
+ static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
- static void GenerateDeoptimizationEntries(MacroAssembler* masm, int count,
+ static void GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
DeoptimizeKind kind);
// Marks all the code in the given context for deoptimization.
- static void MarkAllCodeForContext(Context* native_context);
+ static void MarkAllCodeForContext(Context native_context);
// Deoptimizes all code marked in the given context.
- static void DeoptimizeMarkedCodeForContext(Context* native_context);
+ static void DeoptimizeMarkedCodeForContext(Context native_context);
// Some architectures need to push padding together with the TOS register
// in order to maintain stack alignment.
@@ -599,11 +586,11 @@ class Deoptimizer : public Malloced {
// Searches the list of known deoptimizing code for a Code object
// containing the given address (which is supposedly faster than
// searching all code objects).
- Code* FindDeoptimizingCode(Address addr);
+ Code FindDeoptimizingCode(Address addr);
Isolate* isolate_;
- JSFunction* function_;
- Code* compiled_code_;
+ JSFunction function_;
+ Code compiled_code_;
unsigned bailout_id_;
DeoptimizeKind deopt_kind_;
Address from_;
@@ -707,9 +694,9 @@ class FrameDescription {
explicit FrameDescription(uint32_t frame_size, int parameter_count = 0);
void* operator new(size_t size, uint32_t frame_size) {
- // Subtracts kPointerSize, as the member frame_content_ already supplies
- // the first element of the area to store the frame.
- return malloc(size + frame_size - kPointerSize);
+ // Subtracts kSystemPointerSize, as the member frame_content_ already
+ // supplies the first element of the area to store the frame.
+ return malloc(size + frame_size - kSystemPointerSize);
}
void operator delete(void* pointer, uint32_t frame_size) {
@@ -733,7 +720,7 @@ class FrameDescription {
unsigned GetLastArgumentSlotOffset() {
int parameter_slots = parameter_count();
if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
- return GetFrameSize() - parameter_slots * kPointerSize;
+ return GetFrameSize() - parameter_slots * kSystemPointerSize;
}
Address GetFramePointerAddress() {
@@ -858,9 +845,9 @@ class DeoptimizerData {
Heap* heap_;
static const int kLastDeoptimizeKind =
static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind);
- Code* deopt_entry_code_[kLastDeoptimizeKind + 1];
- Code* deopt_entry_code(DeoptimizeKind kind);
- void set_deopt_entry_code(DeoptimizeKind kind, Code* code);
+ Code deopt_entry_code_[kLastDeoptimizeKind + 1];
+ Code deopt_entry_code(DeoptimizeKind kind);
+ void set_deopt_entry_code(DeoptimizeKind kind, Code code);
Deoptimizer* current_;
@@ -884,7 +871,7 @@ class TranslationBuffer {
class TranslationIterator {
public:
- TranslationIterator(ByteArray* buffer, int index);
+ TranslationIterator(ByteArray buffer, int index);
int32_t Next();
@@ -895,7 +882,7 @@ class TranslationIterator {
}
private:
- ByteArray* buffer_;
+ ByteArray buffer_;
int index_;
};
@@ -950,7 +937,8 @@ class Translation {
// Commands.
void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
- unsigned height);
+ unsigned height, int return_value_offset,
+ int return_value_count);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
unsigned height);
diff --git a/deps/v8/src/detachable-vector.cc b/deps/v8/src/detachable-vector.cc
new file mode 100644
index 0000000000..68e1ec8f17
--- /dev/null
+++ b/deps/v8/src/detachable-vector.cc
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/detachable-vector.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t DetachableVectorBase::kMinimumCapacity = 8;
+const size_t DetachableVectorBase::kDataOffset =
+ offsetof(DetachableVectorBase, data_);
+const size_t DetachableVectorBase::kCapacityOffset =
+ offsetof(DetachableVectorBase, capacity_);
+const size_t DetachableVectorBase::kSizeOffset =
+ offsetof(DetachableVectorBase, size_);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/detachable-vector.h b/deps/v8/src/detachable-vector.h
index 4609ebf0e1..1e9ac98df2 100644
--- a/deps/v8/src/detachable-vector.h
+++ b/deps/v8/src/detachable-vector.h
@@ -5,65 +5,96 @@
#ifndef V8_DETACHABLE_VECTOR_H_
#define V8_DETACHABLE_VECTOR_H_
-#include <vector>
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
-// This class wraps a std::vector and provides a few of the common member
-// functions for accessing the data. It acts as a lazy wrapper of the vector,
-// not initiliazing the backing store until push_back() is first called. Two
-// extra methods are also provided: free() and detach(), which allow for manual
-// control of the backing store. This is currently required for use in the
-// HandleScopeImplementer. Any other class should just use a std::vector
-// directly.
-template <typename T>
-class DetachableVector {
+class V8_EXPORT_PRIVATE DetachableVectorBase {
public:
- DetachableVector() : vector_(nullptr) {}
+ // Clear our reference to the backing store. Does not delete it!
+ void detach() {
+ data_ = nullptr;
+ capacity_ = 0;
+ size_ = 0;
+ }
+
+ void pop_back() { --size_; }
+ size_t capacity() const { return capacity_; }
+ size_t size() const { return size_; }
+ bool empty() const { return size_ == 0; }
- ~DetachableVector() { delete vector_; }
+ static const size_t kMinimumCapacity;
+ static const size_t kDataOffset;
+ static const size_t kCapacityOffset;
+ static const size_t kSizeOffset;
+
+ protected:
+ void* data_ = nullptr;
+ size_t capacity_ = 0;
+ size_t size_ = 0;
+};
+
+// This class wraps an array and provides a few of the common member
+// functions for accessing the data. Two extra methods are also provided: free()
+// and detach(), which allow for manual control of the backing store. This is
+// currently required for use in the HandleScopeImplementer. Any other class
+// should just use a std::vector.
+template <typename T>
+class DetachableVector : public DetachableVectorBase {
+ public:
+ DetachableVector() = default;
+ ~DetachableVector() { delete[] data(); }
void push_back(const T& value) {
- ensureAttached();
- vector_->push_back(value);
+ if (size_ == capacity_) {
+ size_t new_capacity = std::max(kMinimumCapacity, 2 * capacity_);
+ Resize(new_capacity);
+ }
+
+ data()[size_] = value;
+ ++size_;
}
// Free the backing store and clear our reference to it.
void free() {
- delete vector_;
- vector_ = nullptr;
+ delete[] data();
+ data_ = nullptr;
+ capacity_ = 0;
+ size_ = 0;
}
- // Clear our reference to the backing store. Does not delete it!
- void detach() { vector_ = nullptr; }
-
- T& at(typename std::vector<T>::size_type i) const { return vector_->at(i); }
-
- T& back() const { return vector_->back(); }
-
- T& front() const { return vector_->front(); }
-
- void pop_back() { vector_->pop_back(); }
-
- typename std::vector<T>::size_type size() const {
- if (vector_) return vector_->size();
- return 0;
+ T& at(size_t i) const {
+ DCHECK_LT(i, size_);
+ return data()[i];
}
+ T& back() const { return at(size_ - 1); }
+ T& front() const { return at(0); }
- bool empty() const {
- if (vector_) return vector_->empty();
- return true;
+ void shrink_to_fit() {
+ size_t new_capacity = std::max(size_, kMinimumCapacity);
+ if (new_capacity < capacity_ / 2) {
+ Resize(new_capacity);
+ }
}
private:
- std::vector<T>* vector_;
+ T* data() const { return static_cast<T*>(data_); }
- // Attach a vector backing store if not present.
- void ensureAttached() {
- if (vector_ == nullptr) {
- vector_ = new std::vector<T>();
- }
+ void Resize(size_t new_capacity) {
+ DCHECK_LE(size_, new_capacity);
+ T* new_data_ = new T[new_capacity];
+
+ std::copy(data(), data() + size_, new_data_);
+ delete[] data();
+
+ data_ = new_data_;
+ capacity_ = new_capacity;
}
};
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 81a0055cc5..adba1897de 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -24,8 +24,8 @@ class NameConverter {
virtual const char* NameOfConstant(byte* addr) const;
virtual const char* NameInCode(byte* addr) const;
- // Given a root-relative offset, returns either a name or nullptr if none is
- // found.
+ // Given a root-register-relative offset, returns either a name or nullptr if
+ // none is found.
// TODO(jgruber,v8:7989): This is a temporary solution until we can preserve
// code comments through snapshotting.
virtual const char* RootRelativeName(int offset) const { UNREACHABLE(); }
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 0bb59ec0fd..71e2b58530 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -9,15 +9,16 @@
#include <vector>
#include "src/assembler-inl.h"
+#include "src/code-comments.h"
#include "src/code-reference.h"
-#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
#include "src/ic/ic.h"
-#include "src/instruction-stream.h"
+#include "src/isolate-data.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
#include "src/wasm/wasm-code-manager.h"
@@ -55,18 +56,17 @@ class V8NameConverter: public disasm::NameConverter {
void V8NameConverter::InitExternalRefsCache() const {
ExternalReferenceTable* external_reference_table =
- isolate_->heap()->external_reference_table();
+ isolate_->external_reference_table();
if (!external_reference_table->is_initialized()) return;
base::AddressRegion addressable_region =
isolate_->root_register_addressable_region();
- Address roots_start =
- reinterpret_cast<Address>(isolate_->heap()->roots_array_start());
+ Address isolate_root = isolate_->isolate_root();
- for (uint32_t i = 0; i < external_reference_table->size(); i++) {
+ for (uint32_t i = 0; i < ExternalReferenceTable::kSize; i++) {
Address address = external_reference_table->address(i);
if (addressable_region.contains(address)) {
- int offset = static_cast<int>(address - roots_start);
+ int offset = static_cast<int>(address - isolate_root);
const char* name = external_reference_table->name(i);
directly_accessed_external_refs_.insert({offset, name});
}
@@ -116,54 +116,51 @@ const char* V8NameConverter::NameInCode(byte* addr) const {
const char* V8NameConverter::RootRelativeName(int offset) const {
if (isolate_ == nullptr) return nullptr;
- const int kRootsStart = 0;
- const int kRootsEnd = Heap::roots_to_external_reference_table_offset();
- const int kExtRefsStart = kRootsEnd;
- const int kExtRefsEnd = Heap::roots_to_builtins_offset();
- const int kBuiltinsStart = kExtRefsEnd;
- const int kBuiltinsEnd =
- kBuiltinsStart + Builtins::builtin_count * kPointerSize;
+ const int kRootsTableStart = IsolateData::roots_table_offset();
+ const unsigned kRootsTableSize = sizeof(RootsTable);
+ const int kExtRefsTableStart = IsolateData::external_reference_table_offset();
+ const unsigned kExtRefsTableSize = ExternalReferenceTable::kSizeInBytes;
+ const int kBuiltinsTableStart = IsolateData::builtins_table_offset();
+ const unsigned kBuiltinsTableSize =
+ Builtins::builtin_count * kSystemPointerSize;
- if (kRootsStart <= offset && offset < kRootsEnd) {
- uint32_t offset_in_roots_table = offset - kRootsStart;
+ if (static_cast<unsigned>(offset - kRootsTableStart) < kRootsTableSize) {
+ uint32_t offset_in_roots_table = offset - kRootsTableStart;
// Fail safe in the unlikely case of an arbitrary root-relative offset.
- if (offset_in_roots_table % kPointerSize != 0) return nullptr;
+ if (offset_in_roots_table % kSystemPointerSize != 0) return nullptr;
RootIndex root_index =
- static_cast<RootIndex>(offset_in_roots_table / kPointerSize);
+ static_cast<RootIndex>(offset_in_roots_table / kSystemPointerSize);
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- isolate_->heap()->root(root_index)->ShortPrint(&accumulator);
- std::unique_ptr<char[]> obj_name = accumulator.ToCString();
-
- SNPrintF(v8_buffer_, "root (%s)", obj_name.get());
+ SNPrintF(v8_buffer_, "root (%s)", RootsTable::name(root_index));
return v8_buffer_.start();
- } else if (kExtRefsStart <= offset && offset < kExtRefsEnd) {
- uint32_t offset_in_extref_table = offset - kExtRefsStart;
+ } else if (static_cast<unsigned>(offset - kExtRefsTableStart) <
+ kExtRefsTableSize) {
+ uint32_t offset_in_extref_table = offset - kExtRefsTableStart;
// Fail safe in the unlikely case of an arbitrary root-relative offset.
- if (offset_in_extref_table % ExternalReferenceTable::EntrySize() != 0) {
+ if (offset_in_extref_table % ExternalReferenceTable::kEntrySize != 0) {
return nullptr;
}
// Likewise if the external reference table is uninitialized.
- if (!isolate_->heap()->external_reference_table()->is_initialized()) {
+ if (!isolate_->external_reference_table()->is_initialized()) {
return nullptr;
}
SNPrintF(v8_buffer_, "external reference (%s)",
- isolate_->heap()->external_reference_table()->NameFromOffset(
+ isolate_->external_reference_table()->NameFromOffset(
offset_in_extref_table));
return v8_buffer_.start();
- } else if (kBuiltinsStart <= offset && offset < kBuiltinsEnd) {
- uint32_t offset_in_builtins_table = (offset - kBuiltinsStart);
+ } else if (static_cast<unsigned>(offset - kBuiltinsTableStart) <
+ kBuiltinsTableSize) {
+ uint32_t offset_in_builtins_table = (offset - kBuiltinsTableStart);
- Builtins::Name builtin_id =
- static_cast<Builtins::Name>(offset_in_builtins_table / kPointerSize);
+ Builtins::Name builtin_id = static_cast<Builtins::Name>(
+ offset_in_builtins_table / kSystemPointerSize);
const char* name = Builtins::name(builtin_id);
SNPrintF(v8_buffer_, "builtin (%s)", name);
@@ -235,24 +232,15 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
out->AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTargetMode(rmode)) {
out->AddFormatted(" ;; code:");
- Code* code = isolate->heap()->GcSafeFindCodeForInnerPointer(
+ Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(
relocinfo->target_address());
Code::Kind kind = code->kind();
- if (kind == Code::STUB) {
- // Get the STUB key and extract major and minor key.
- uint32_t key = code->stub_key();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
- out->AddFormatted(" %s, %s, ", Code::Kind2String(kind),
- CodeStub::MajorName(major_key));
- out->AddFormatted("minor: %d", minor_key);
- } else if (code->is_builtin()) {
+ if (code->is_builtin()) {
out->AddFormatted(" Builtin::%s", Builtins::name(code->builtin_index()));
} else {
out->AddFormatted(" %s", Code::Kind2String(kind));
}
- } else if (RelocInfo::IsWasmStubCall(rmode) && !isolate) {
+ } else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) {
// Host is isolate-independent, try wasm native module instead.
wasm::WasmCode* code = host.as_wasm_code()->native_module()->Lookup(
relocinfo->wasm_stub_call_address());
@@ -263,9 +251,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
Address addr = relocinfo->target_address();
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, addr, &type)) {
- int id = relocinfo->GetDeoptimizationId(isolate, type);
- out->AddFormatted(" ;; %s deoptimization bailout %d",
- Deoptimizer::MessageFor(type), id);
+ out->AddFormatted(" ;; %s deoptimization bailout",
+ Deoptimizer::MessageFor(type));
} else {
out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
@@ -278,6 +265,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
std::ostream* os, CodeReference code,
const V8NameConverter& converter, byte* begin, byte* end,
Address current_pc) {
+ CHECK(!code.is_null());
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
StringBuilder out(out_buffer.start(), out_buffer.length());
@@ -285,7 +273,11 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
disasm::Disassembler d(converter,
disasm::Disassembler::kContinueOnUnimplementedOpcode);
RelocIterator* it = nullptr;
- if (!code.is_null()) {
+ CodeCommentsIterator cit(code.code_comments());
+ // Relocation exists if we either have no isolate (wasm code),
+ // or we have an isolate and it is not an off-heap instruction stream.
+ if (!isolate ||
+ !InstructionStream::PcIsOffHeap(isolate, bit_cast<Address>(begin))) {
it = new RelocIterator(code);
} else {
// No relocation information when printing code stubs.
@@ -331,19 +323,18 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
std::vector<intptr_t> datas;
if (it != nullptr) {
while (!it->done() && it->rinfo()->pc() < reinterpret_cast<Address>(pc)) {
- if (RelocInfo::IsComment(it->rinfo()->rmode())) {
- // For comments just collect the text.
- comments.push_back(
- reinterpret_cast<const char*>(it->rinfo()->data()));
- } else {
- // For other reloc info collect all data.
- pcs.push_back(it->rinfo()->pc());
- rmodes.push_back(it->rinfo()->rmode());
- datas.push_back(it->rinfo()->data());
- }
+ // Collect all data.
+ pcs.push_back(it->rinfo()->pc());
+ rmodes.push_back(it->rinfo()->rmode());
+ datas.push_back(it->rinfo()->data());
it->next();
}
}
+ while (cit.HasCurrent() &&
+ cit.GetPCOffset() < static_cast<Address>(pc - begin)) {
+ comments.push_back(cit.GetComment());
+ cit.Next();
+ }
// Comments.
for (size_t i = 0; i < comments.size(); i++) {
@@ -368,7 +359,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
const CodeReference& host = code;
Address constant_pool =
host.is_null() ? kNullAddress : host.constant_pool();
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], nullptr, constant_pool);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], Code(), constant_pool);
bool first_reloc_info = (i == 0);
PrintRelocInfo(&out, isolate, ref_encoder, os, code, &relocinfo,
@@ -379,8 +370,9 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
if (pcs.empty() && !code.is_null()) {
- RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
- 0, nullptr);
+ RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
+ RelocInfo::NONE,
+ 0, Code());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
@@ -406,14 +398,11 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
}
// Emit comments following the last instruction (if any).
- if (it != nullptr) {
- for ( ; !it->done(); it->next()) {
- if (RelocInfo::IsComment(it->rinfo()->rmode())) {
- out.AddFormatted(" %s",
- reinterpret_cast<const char*>(it->rinfo()->data()));
- DumpBuffer(os, &out);
- }
- }
+ while (cit.HasCurrent() &&
+ cit.GetPCOffset() < static_cast<Address>(pc - begin)) {
+ out.AddFormatted(" %s", cit.GetComment());
+ DumpBuffer(os, &out);
+ cit.Next();
}
delete it;
@@ -423,19 +412,16 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
byte* end, CodeReference code, Address current_pc) {
V8NameConverter v8NameConverter(isolate, code);
- bool decode_off_heap = isolate && InstructionStream::PcIsOffHeap(
- isolate, bit_cast<Address>(begin));
- CodeReference code_ref = decode_off_heap ? CodeReference() : code;
if (isolate) {
// We have an isolate, so support external reference names.
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
- return DecodeIt(isolate, &ref_encoder, os, code_ref, v8NameConverter, begin,
+ return DecodeIt(isolate, &ref_encoder, os, code, v8NameConverter, begin,
end, current_pc);
} else {
// No isolate => isolate-independent code. No external reference names.
- return DecodeIt(nullptr, nullptr, os, code_ref, v8NameConverter, begin, end,
+ return DecodeIt(nullptr, nullptr, os, code, v8NameConverter, begin, end,
current_pc);
}
}
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index 7b1addb4f1..9e98a15550 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -239,7 +239,7 @@ void EhFrameWriter::WritePaddingToAlignedSize(int unpadded_size) {
DCHECK_EQ(writer_state_, InternalState::kInitialized);
DCHECK_GE(unpadded_size, 0);
- int padding_size = RoundUp(unpadded_size, kPointerSize) - unpadded_size;
+ int padding_size = RoundUp(unpadded_size, kSystemPointerSize) - unpadded_size;
byte nop = static_cast<byte>(EhFrameConstants::DwarfOpcodes::kNop);
static const byte kPadding[] = {nop, nop, nop, nop, nop, nop, nop, nop};
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index 5aa61520bb..1b4e647058 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -7,7 +7,8 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/macro-assembler.h"
+#include "src/register-arch.h"
+#include "src/v8memory.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/elements-inl.h b/deps/v8/src/elements-inl.h
index 68099d2f2c..a52ccf66ba 100644
--- a/deps/v8/src/elements-inl.h
+++ b/deps/v8/src/elements-inl.h
@@ -27,7 +27,7 @@ inline MaybeHandle<FixedArray> ElementsAccessor::PrependElementIndices(
keys, convert, filter);
}
-inline bool ElementsAccessor::HasElement(JSObject* holder, uint32_t index,
+inline bool ElementsAccessor::HasElement(JSObject holder, uint32_t index,
PropertyFilter filter) {
return HasElement(holder, index, holder->elements(), filter);
}
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index eedf74e49c..d75852de70 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -41,7 +41,7 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- return kPointerSizeLog2;
+ return kTaggedSizeLog2;
case NO_ELEMENTS:
UNREACHABLE();
}
@@ -68,48 +68,32 @@ const char* ElementsKindToString(ElementsKind kind) {
return accessor->name();
}
-
-struct InitializeFastElementsKindSequence {
- static void Construct(void* fast_elements_kind_sequence_ptr_arg) {
- auto fast_elements_kind_sequence_ptr =
- reinterpret_cast<ElementsKind**>(fast_elements_kind_sequence_ptr_arg);
- ElementsKind* fast_elements_kind_sequence =
- new ElementsKind[kFastElementsKindCount];
- *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
- fast_elements_kind_sequence[0] = PACKED_SMI_ELEMENTS;
- fast_elements_kind_sequence[1] = HOLEY_SMI_ELEMENTS;
- fast_elements_kind_sequence[2] = PACKED_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[3] = HOLEY_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[4] = PACKED_ELEMENTS;
- fast_elements_kind_sequence[5] = HOLEY_ELEMENTS;
-
- // Verify that kFastElementsKindPackedToHoley is correct.
- STATIC_ASSERT(PACKED_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
- HOLEY_SMI_ELEMENTS);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
- HOLEY_DOUBLE_ELEMENTS);
- STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley ==
- HOLEY_ELEMENTS);
- }
+ElementsKind kFastElementsKindSequence[kFastElementsKindCount] = {
+ PACKED_SMI_ELEMENTS, // 0
+ HOLEY_SMI_ELEMENTS, // 1
+ PACKED_DOUBLE_ELEMENTS, // 2
+ HOLEY_DOUBLE_ELEMENTS, // 3
+ PACKED_ELEMENTS, // 4
+ HOLEY_ELEMENTS // 5
};
-
-
-static base::LazyInstance<ElementsKind*,
- InitializeFastElementsKindSequence>::type
- fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
-
+STATIC_ASSERT(PACKED_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
+// Verify that kFastElementsKindPackedToHoley is correct.
+STATIC_ASSERT(PACKED_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_SMI_ELEMENTS);
+STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_DOUBLE_ELEMENTS);
+STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_ELEMENTS);
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
DCHECK(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
- return fast_elements_kind_sequence.Get()[sequence_number];
+ return kFastElementsKindSequence[sequence_number];
}
-
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
- if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
+ if (kFastElementsKindSequence[i] == elements_kind) {
return i;
}
}
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 473c4ebd85..5c14b0c48d 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -68,15 +68,22 @@ enum ElementsKind : uint8_t {
LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
- TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS
+ TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS,
+
+// Alias for kSystemPointerSize-sized elements
+#ifdef V8_COMPRESS_POINTERS
+ SYSTEM_POINTER_ELEMENTS = PACKED_DOUBLE_ELEMENTS,
+#else
+ SYSTEM_POINTER_ELEMENTS = PACKED_ELEMENTS,
+#endif
};
-const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-const int kFastElementsKindCount =
+constexpr int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+constexpr int kFastElementsKindCount =
LAST_FAST_ELEMENTS_KIND - FIRST_FAST_ELEMENTS_KIND + 1;
// The number to add to a packed elements kind to reach a holey elements kind
-const int kFastElementsKindPackedToHoley =
+constexpr int kFastElementsKindPackedToHoley =
HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS;
int ElementsKindToShiftSize(ElementsKind elements_kind);
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 5fad30711d..5f97a2f24d 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -11,12 +11,14 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/slots-atomic-inl.h"
+#include "src/objects/slots.h"
#include "src/utils.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
@@ -129,9 +131,9 @@ WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
return UPDATE_WRITE_BARRIER;
}
-void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase* from_base,
+void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
ElementsKind from_kind, uint32_t from_start,
- FixedArrayBase* to_base, ElementsKind to_kind,
+ FixedArrayBase to_base, ElementsKind to_kind,
uint32_t to_start, int raw_copy_size) {
ReadOnlyRoots roots(isolate);
DCHECK(to_base->map() != roots.fixed_cow_array_map());
@@ -146,16 +148,16 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase* from_base,
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- roots.the_hole_value(), length);
+ MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ roots.the_hole_value(), length);
}
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ FixedArray from = FixedArray::cast(from_base);
+ FixedArray to = FixedArray::cast(to_base);
DCHECK(IsSmiOrObjectElementsKind(from_kind));
DCHECK(IsSmiOrObjectElementsKind(to_kind));
@@ -164,17 +166,17 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase* from_base,
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
- Object* value = from->get(from_start + i);
+ Object value = from->get(from_start + i);
to->set(to_start + i, value, write_barrier_mode);
}
}
static void CopyDictionaryToObjectElements(
- Isolate* isolate, FixedArrayBase* from_base, uint32_t from_start,
- FixedArrayBase* to_base, ElementsKind to_kind, uint32_t to_start,
+ Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, ElementsKind to_kind, uint32_t to_start,
int raw_copy_size) {
DisallowHeapAllocation no_allocation;
- NumberDictionary* from = NumberDictionary::cast(from_base);
+ NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -184,15 +186,15 @@ static void CopyDictionaryToObjectElements(
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- ReadOnlyRoots(isolate).the_hole_value(), length);
+ MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
}
DCHECK(to_base != from_base);
DCHECK(IsSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
+ FixedArray to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -201,7 +203,7 @@ static void CopyDictionaryToObjectElements(
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
if (entry != NumberDictionary::kNotFound) {
- Object* value = from->ValueAt(entry);
+ Object value = from->ValueAt(entry);
DCHECK(!value->IsTheHole(isolate));
to->set(i + to_start, value, write_barrier_mode);
} else {
@@ -210,14 +212,13 @@ static void CopyDictionaryToObjectElements(
}
}
-
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessorBase::CopyElements() for details.
static void CopyDoubleToObjectElements(Isolate* isolate,
- FixedArrayBase* from_base,
+ FixedArrayBase from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArrayBase to_base,
uint32_t to_start, int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -233,8 +234,8 @@ static void CopyDoubleToObjectElements(Isolate* isolate,
int start = to_start;
int length = to_base->length() - start;
if (length > 0) {
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- ReadOnlyRoots(isolate).the_hole_value(), length);
+ MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
}
@@ -263,10 +264,9 @@ static void CopyDoubleToObjectElements(Isolate* isolate,
}
}
-
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
+static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArrayBase to_base,
uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
@@ -284,23 +284,20 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ FixedDoubleArray from = FixedDoubleArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
from_address += kDoubleSize * from_start;
- int words_per_double = (kDoubleSize / kPointerSize);
- CopyWords(reinterpret_cast<Object**>(to_address),
- reinterpret_cast<Object**>(from_address),
+ int words_per_double = (kDoubleSize / kSystemPointerSize);
+ CopyWords(to_address, from_address,
static_cast<size_t>(words_per_double * copy_size));
}
-
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base, uint32_t to_start,
- int raw_copy_size) {
+static void CopySmiToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -316,12 +313,12 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetReadOnlyRoots().the_hole_value();
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ Object the_hole = from->GetReadOnlyRoots().the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
- Object* hole_or_smi = from->get(from_start);
+ Object hole_or_smi = from->get(from_start);
if (hole_or_smi == the_hole) {
to->set_the_hole(to_start);
} else {
@@ -330,10 +327,9 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
}
-
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
+static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArrayBase to_base,
uint32_t to_start, int packed_size,
int raw_copy_size) {
DisallowHeapAllocation no_allocation;
@@ -359,20 +355,19 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
- Object* smi = from->get(from_start);
+ Object smi = from->get(from_start);
DCHECK(!smi->IsTheHole());
to->set(to_start, Smi::ToInt(smi));
}
}
-
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
+static void CopyObjectToDoubleElements(FixedArrayBase from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArrayBase to_base,
uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
@@ -389,12 +384,12 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetReadOnlyRoots().the_hole_value();
+ FixedArray from = FixedArray::cast(from_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
+ Object the_hole = from->GetReadOnlyRoots().the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
- Object* hole_or_object = from->get(from_start);
+ Object hole_or_object = from->get(from_start);
if (hole_or_object == the_hole) {
to->set_the_hole(to_start);
} else {
@@ -404,10 +399,10 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
static void CopyDictionaryToDoubleElements(
- Isolate* isolate, FixedArrayBase* from_base, uint32_t from_start,
- FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) {
+ Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
- NumberDictionary* from = NumberDictionary::cast(from_base);
+ NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
@@ -420,7 +415,7 @@ static void CopyDictionaryToDoubleElements(
}
}
if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ FixedDoubleArray to = FixedDoubleArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -443,7 +438,7 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* current_code_object =
+ Code current_code_object =
isolate->heap()->GcSafeFindCodeForInnerPointer(raw_frame->pc());
if (current_code_object->builtin_index() ==
Builtins::kFunctionPrototypeApply) {
@@ -458,16 +453,15 @@ static void TraceTopFrame(Isolate* isolate) {
static void SortIndices(
Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
- // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
- base::AtomicElement<Object*>* start =
- reinterpret_cast<base::AtomicElement<Object*>*>(
- indices->GetFirstElementAddress());
+ AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
- [isolate](const base::AtomicElement<Object*>& elementA,
- const base::AtomicElement<Object*>& elementB) {
- const Object* a = elementA.value();
- const Object* b = elementB.value();
+ [isolate](Tagged_t elementA, Tagged_t elementB) {
+ // TODO(ishell): revisit the code below
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Object a(elementA);
+ Object b(elementB);
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;
@@ -530,11 +524,11 @@ class InternalElementsAccessor : public ElementsAccessor {
explicit InternalElementsAccessor(const char* name)
: ElementsAccessor(name) {}
- uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index) override = 0;
- PropertyDetails GetDetails(JSObject* holder, uint32_t entry) override = 0;
+ PropertyDetails GetDetails(JSObject holder, uint32_t entry) override = 0;
};
// Base class for element handler implementations. Contains the
@@ -565,16 +559,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static ElementsKind kind() { return ElementsTraits::Kind; }
- static void ValidateContents(JSObject* holder, int length) {}
+ static void ValidateContents(JSObject holder, int length) {}
- static void ValidateImpl(JSObject* holder) {
- FixedArrayBase* fixed_array_base = holder->elements();
+ static void ValidateImpl(JSObject holder) {
+ FixedArrayBase fixed_array_base = holder->elements();
if (!fixed_array_base->IsHeapObject()) return;
// Arrays that have been shifted in place can't be verified.
if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
- Object* length_obj = JSArray::cast(holder)->length();
+ Object length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::ToInt(length_obj);
}
@@ -584,12 +578,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Subclass::ValidateContents(holder, length);
}
- void Validate(JSObject* holder) final {
+ void Validate(JSObject holder) final {
DisallowHeapAllocation no_gc;
Subclass::ValidateImpl(holder);
}
- static bool IsPackedImpl(JSObject* holder, FixedArrayBase* backing_store,
+ static bool IsPackedImpl(JSObject holder, FixedArrayBase backing_store,
uint32_t start, uint32_t end) {
DisallowHeapAllocation no_gc;
if (IsFastPackedElementsKind(kind())) return true;
@@ -620,35 +614,34 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
}
- bool HasElement(JSObject* holder, uint32_t index,
- FixedArrayBase* backing_store, PropertyFilter filter) final {
+ bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store,
+ PropertyFilter filter) final {
return Subclass::HasElementImpl(holder->GetIsolate(), holder, index,
backing_store, filter);
}
- static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index,
- FixedArrayBase* backing_store,
+ static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
PropertyFilter filter = ALL_PROPERTIES) {
return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
filter) != kMaxUInt32;
}
- bool HasEntry(JSObject* holder, uint32_t entry) final {
+ bool HasEntry(JSObject holder, uint32_t entry) final {
return Subclass::HasEntryImpl(holder->GetIsolate(), holder->elements(),
entry);
}
- static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
UNIMPLEMENTED();
}
- bool HasAccessors(JSObject* holder) final {
+ bool HasAccessors(JSObject holder) final {
return Subclass::HasAccessorsImpl(holder, holder->elements());
}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
return false;
}
@@ -661,13 +654,13 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry);
}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
uint32_t index = GetIndexForEntryImpl(backing_store, entry);
return handle(BackingStore::cast(backing_store)->get(index), isolate);
}
- void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final {
+ void Set(Handle<JSObject> holder, uint32_t entry, Object value) final {
Subclass::SetImpl(holder, entry, value);
}
@@ -801,16 +794,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::ValidateElements(*array);
}
- uint32_t NumberOfElements(JSObject* receiver) final {
+ uint32_t NumberOfElements(JSObject receiver) final {
return Subclass::NumberOfElementsImpl(receiver, receiver->elements());
}
- static uint32_t NumberOfElementsImpl(JSObject* receiver,
- FixedArrayBase* backing_store) {
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
UNREACHABLE();
}
- static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
+ static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
if (receiver->IsJSArray()) {
DCHECK(JSArray::cast(receiver)->length()->IsSmi());
return static_cast<uint32_t>(
@@ -819,8 +812,8 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetCapacityImpl(receiver, elements);
}
- static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
- FixedArrayBase* elements) {
+ static uint32_t GetMaxNumberOfEntries(JSObject receiver,
+ FixedArrayBase elements) {
return Subclass::GetMaxIndex(receiver, elements);
}
@@ -980,14 +973,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Subclass::DeleteImpl(obj, entry);
}
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
UNREACHABLE();
}
- void CopyElements(JSObject* from_holder, uint32_t from_start,
+ void CopyElements(JSObject from_holder, uint32_t from_start,
ElementsKind from_kind, Handle<FixedArrayBase> to,
uint32_t to_start, int copy_size) final {
int packed_size = kPackedSizeNotKnown;
@@ -999,7 +992,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
packed_size = copy_size;
}
}
- FixedArrayBase* from = from_holder->elements();
+ FixedArrayBase from = from_holder->elements();
// NOTE: the Subclass::CopyElementsImpl() methods
// violate the handlified function signature convention:
// raw pointer parameters in the function that allocates. This is done
@@ -1020,27 +1013,27 @@ class ElementsAccessorBase : public InternalElementsAccessor {
0, kPackedSizeNotKnown, size);
}
- void CopyTypedArrayElementsSlice(JSTypedArray* source,
- JSTypedArray* destination, size_t start,
+ void CopyTypedArrayElementsSlice(JSTypedArray source,
+ JSTypedArray destination, size_t start,
size_t end) override {
Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end);
}
- static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
- JSTypedArray* destination,
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray source,
+ JSTypedArray destination,
size_t start, size_t end) {
UNREACHABLE();
}
- Object* CopyElements(Handle<Object> source, Handle<JSObject> destination,
- size_t length, uint32_t offset) final {
+ Object CopyElements(Handle<Object> source, Handle<JSObject> destination,
+ size_t length, uint32_t offset) final {
return Subclass::CopyElementsHandleImpl(source, destination, length,
offset);
}
- static Object* CopyElementsHandleImpl(Handle<Object> source,
- Handle<JSObject> destination,
- size_t length, uint32_t offset) {
+ static Object CopyElementsHandleImpl(Handle<Object> source,
+ Handle<JSObject> destination,
+ size_t length, uint32_t offset) {
UNREACHABLE();
}
@@ -1272,22 +1265,22 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert);
}
- static uint32_t GetCapacityImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
+ static uint32_t GetCapacityImpl(JSObject holder,
+ FixedArrayBase backing_store) {
return backing_store->length();
}
- uint32_t GetCapacity(JSObject* holder, FixedArrayBase* backing_store) final {
+ uint32_t GetCapacity(JSObject holder, FixedArrayBase backing_store) final {
return Subclass::GetCapacityImpl(holder, backing_store);
}
- static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) {
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
UNREACHABLE();
}
- Object* Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) override {
+ Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) override {
return Subclass::FillImpl(receiver, obj_value, start, end);
}
@@ -1331,17 +1324,17 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::LastIndexOfValueImpl(receiver, value, start_from);
}
- static void ReverseImpl(JSObject* receiver) { UNREACHABLE(); }
+ static void ReverseImpl(JSObject receiver) { UNREACHABLE(); }
- void Reverse(JSObject* receiver) final { Subclass::ReverseImpl(receiver); }
+ void Reverse(JSObject receiver) final { Subclass::ReverseImpl(receiver); }
- static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
uint32_t entry) {
return entry;
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index, PropertyFilter filter) {
DCHECK(IsFastElementsKind(kind()));
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
@@ -1356,23 +1349,23 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
}
- uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index) final {
return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
ALL_PROPERTIES);
}
- static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
uint32_t entry) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
+ PropertyDetails GetDetails(JSObject holder, uint32_t entry) final {
return Subclass::GetDetailsImpl(holder, entry);
}
@@ -1401,19 +1394,19 @@ class DictionaryElementsAccessor
: ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
- static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
+ static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
// We cannot properly estimate this for dictionaries.
UNREACHABLE();
}
- static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
- FixedArrayBase* backing_store) {
+ static uint32_t GetMaxNumberOfEntries(JSObject receiver,
+ FixedArrayBase backing_store) {
return NumberOfElementsImpl(receiver, backing_store);
}
- static uint32_t NumberOfElementsImpl(JSObject* receiver,
- FixedArrayBase* backing_store) {
- NumberDictionary* dict = NumberDictionary::cast(backing_store);
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
+ NumberDictionary dict = NumberDictionary::cast(backing_store);
return dict->NumberOfElements();
}
@@ -1433,7 +1426,7 @@ class DictionaryElementsAccessor
// Find last non-deletable element in range of elements to be
// deleted and adjust range accordingly.
for (int entry = 0; entry < capacity; entry++) {
- Object* index = dict->KeyAt(entry);
+ Object index = dict->KeyAt(entry);
if (dict->IsKey(roots, index)) {
uint32_t number = static_cast<uint32_t>(index->Number());
if (length <= number && number < old_length) {
@@ -1451,7 +1444,7 @@ class DictionaryElementsAccessor
// Remove elements that should be deleted.
int removed_entries = 0;
for (int entry = 0; entry < capacity; entry++) {
- Object* index = dict->KeyAt(entry);
+ Object index = dict->KeyAt(entry);
if (dict->IsKey(roots, index)) {
uint32_t number = static_cast<uint32_t>(index->Number());
if (length <= number && number < old_length) {
@@ -1473,8 +1466,8 @@ class DictionaryElementsAccessor
array->set_length(*length_obj);
}
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
UNREACHABLE();
@@ -1495,7 +1488,7 @@ class DictionaryElementsAccessor
int entry_count = source_dict->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < entry_count; i++) {
- Object* key = source_dict->KeyAt(i);
+ Object key = source_dict->KeyAt(i);
if (!source_dict->ToKey(roots, i, &key)) continue;
uint64_t key_value = NumberToInt64(key);
if (key_value >= start && key_value < end) {
@@ -1519,15 +1512,14 @@ class DictionaryElementsAccessor
obj->set_elements(*dict);
}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
DisallowHeapAllocation no_gc;
- NumberDictionary* dict = NumberDictionary::cast(backing_store);
+ NumberDictionary dict = NumberDictionary::cast(backing_store);
if (!dict->requires_slow_elements()) return false;
int capacity = dict->Capacity();
ReadOnlyRoots roots = holder->GetReadOnlyRoots();
for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
+ Object key = dict->KeyAt(i);
if (!dict->IsKey(roots, key)) continue;
PropertyDetails details = dict->DetailsAt(i);
if (details.kind() == kAccessor) return true;
@@ -1535,23 +1527,23 @@ class DictionaryElementsAccessor
return false;
}
- static Object* GetRaw(FixedArrayBase* store, uint32_t entry) {
- NumberDictionary* backing_store = NumberDictionary::cast(store);
+ static Object GetRaw(FixedArrayBase store, uint32_t entry) {
+ NumberDictionary backing_store = NumberDictionary::cast(store);
return backing_store->ValueAt(entry);
}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
return handle(GetRaw(backing_store, entry), isolate);
}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object* value) {
+ Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
NumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
}
@@ -1559,7 +1551,7 @@ class DictionaryElementsAccessor
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- NumberDictionary* dictionary = NumberDictionary::cast(*store);
+ NumberDictionary dictionary = NumberDictionary::cast(*store);
if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary->ValueAtPut(entry, *value);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1586,27 +1578,27 @@ class DictionaryElementsAccessor
object->set_elements(*new_dictionary);
}
- static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* store,
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store,
uint32_t entry) {
DisallowHeapAllocation no_gc;
- NumberDictionary* dict = NumberDictionary::cast(store);
- Object* index = dict->KeyAt(entry);
+ NumberDictionary dict = NumberDictionary::cast(store);
+ Object index = dict->KeyAt(entry);
return !index->IsTheHole(isolate);
}
- static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) {
DisallowHeapAllocation no_gc;
- NumberDictionary* dict = NumberDictionary::cast(store);
+ NumberDictionary dict = NumberDictionary::cast(store);
uint32_t result = 0;
CHECK(dict->KeyAt(entry)->ToArrayIndex(&result));
return result;
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
- FixedArrayBase* store, uint32_t index,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase store, uint32_t index,
PropertyFilter filter) {
DisallowHeapAllocation no_gc;
- NumberDictionary* dictionary = NumberDictionary::cast(store);
+ NumberDictionary dictionary = NumberDictionary::cast(store);
int entry = dictionary->FindEntry(isolate, index);
if (entry == NumberDictionary::kNotFound) return kMaxUInt32;
if (filter != ALL_PROPERTIES) {
@@ -1617,17 +1609,17 @@ class DictionaryElementsAccessor
return static_cast<uint32_t>(entry);
}
- static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
return GetDetailsImpl(holder->elements(), entry);
}
- static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
uint32_t entry) {
return NumberDictionary::cast(backing_store)->DetailsAt(entry);
}
static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry,
- Object* raw_key, PropertyFilter filter) {
+ Object raw_key, PropertyFilter filter) {
DCHECK(raw_key->IsNumber());
DCHECK_LE(raw_key->Number(), kMaxUInt32);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1640,7 +1632,7 @@ class DictionaryElementsAccessor
Handle<NumberDictionary> dictionary,
int entry, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
- Object* raw_key = dictionary->KeyAt(entry);
+ Object raw_key = dictionary->KeyAt(entry);
if (!dictionary->IsKey(ReadOnlyRoots(isolate), raw_key)) return kMaxUInt32;
return FilterKey(dictionary, entry, raw_key, filter);
}
@@ -1659,7 +1651,7 @@ class DictionaryElementsAccessor
PropertyFilter filter = keys->filter();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object* raw_key = dictionary->KeyAt(i);
+ Object raw_key = dictionary->KeyAt(i);
if (!dictionary->IsKey(roots, raw_key)) continue;
uint32_t key = FilterKey(dictionary, i, raw_key, filter);
if (key == kMaxUInt32) {
@@ -1706,9 +1698,9 @@ class DictionaryElementsAccessor
int capacity = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object* k = dictionary->KeyAt(i);
+ Object k = dictionary->KeyAt(i);
if (!dictionary->IsKey(roots, k)) continue;
- Object* value = dictionary->ValueAt(i);
+ Object value = dictionary->ValueAt(i);
DCHECK(!value->IsTheHole(isolate));
DCHECK(!value->IsAccessorPair());
DCHECK(!value->IsAccessorInfo());
@@ -1720,16 +1712,16 @@ class DictionaryElementsAccessor
Handle<Object> value, uint32_t start_from,
uint32_t length, Maybe<bool>* result) {
DisallowHeapAllocation no_gc;
- NumberDictionary* dictionary = NumberDictionary::cast(receiver->elements());
+ NumberDictionary dictionary = NumberDictionary::cast(receiver->elements());
int capacity = dictionary->Capacity();
- Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- Object* undefined = ReadOnlyRoots(isolate).undefined_value();
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
// Scan for accessor properties. If accessors are present, then elements
// must be accessed in order via the slow path.
bool found = false;
for (int i = 0; i < capacity; ++i) {
- Object* k = dictionary->KeyAt(i);
+ Object k = dictionary->KeyAt(i);
if (k == the_hole) continue;
if (k == undefined) continue;
@@ -1743,7 +1735,7 @@ class DictionaryElementsAccessor
// access getters out of order
return false;
} else if (!found) {
- Object* element_k = dictionary->ValueAt(i);
+ Object element_k = dictionary->ValueAt(i);
if (value->SameValueZero(element_k)) found = true;
}
}
@@ -1783,7 +1775,7 @@ class DictionaryElementsAccessor
PropertyDetails details = GetDetailsImpl(*dictionary, entry);
switch (details.kind()) {
case kData: {
- Object* element_k = dictionary->ValueAt(entry);
+ Object element_k = dictionary->ValueAt(entry);
if (value->SameValueZero(element_k)) return Just(true);
break;
}
@@ -1794,9 +1786,9 @@ class DictionaryElementsAccessor
DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
Handle<Object> element_k;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_k, JSObject::GetPropertyWithAccessor(&it),
- Nothing<bool>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<bool>());
if (value->SameValueZero(*element_k)) return Just(true);
@@ -1852,7 +1844,7 @@ class DictionaryElementsAccessor
PropertyDetails details = GetDetailsImpl(*dictionary, entry);
switch (details.kind()) {
case kData: {
- Object* element_k = dictionary->ValueAt(entry);
+ Object element_k = dictionary->ValueAt(entry);
if (value->StrictEquals(element_k)) {
return Just<int64_t>(k);
}
@@ -1865,9 +1857,9 @@ class DictionaryElementsAccessor
DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
Handle<Object> element_k;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_k, JSObject::GetPropertyWithAccessor(&it),
- Nothing<int64_t>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<int64_t>());
if (value->StrictEquals(*element_k)) return Just<int64_t>(k);
@@ -1895,19 +1887,19 @@ class DictionaryElementsAccessor
return Just<int64_t>(-1);
}
- static void ValidateContents(JSObject* holder, int length) {
+ static void ValidateContents(JSObject holder, int length) {
DisallowHeapAllocation no_gc;
#if DEBUG
DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS);
if (!FLAG_enable_slow_asserts) return;
ReadOnlyRoots roots = holder->GetReadOnlyRoots();
- NumberDictionary* dictionary = NumberDictionary::cast(holder->elements());
+ NumberDictionary dictionary = NumberDictionary::cast(holder->elements());
// Validate the requires_slow_elements and max_number_key values.
int capacity = dictionary->Capacity();
bool requires_slow_elements = false;
int max_key = 0;
for (int i = 0; i < capacity; ++i) {
- Object* k;
+ Object k;
if (!dictionary->ToKey(roots, i, &k)) continue;
DCHECK_LE(0.0, k->Number());
if (k->Number() > NumberDictionary::kRequiresSlowElementsLimit) {
@@ -1980,7 +1972,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (!backing_store->is_the_hole(isolate, entry - 1)) break;
}
if (entry == 0) {
- FixedArray* empty = ReadOnlyRoots(isolate).empty_fixed_array();
+ FixedArray empty = ReadOnlyRoots(isolate).empty_fixed_array();
// Dynamically ask for the elements kind here since we manually redirect
// the operations for argument backing stores.
if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
@@ -2109,13 +2101,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate()));
}
- static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
return !BackingStore::cast(backing_store)->is_the_hole(isolate, entry);
}
- static uint32_t NumberOfElementsImpl(JSObject* receiver,
- FixedArrayBase* backing_store) {
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store);
if (IsFastPackedElementsKind(Subclass::kind())) return max_index;
Isolate* isolate = receiver->GetIsolate();
@@ -2140,12 +2132,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
- static void ValidateContents(JSObject* holder, int length) {
+ static void ValidateContents(JSObject holder, int length) {
#if DEBUG
Isolate* isolate = holder->GetIsolate();
Heap* heap = isolate->heap();
- FixedArrayBase* elements = holder->elements();
- Map* map = elements->map();
+ FixedArrayBase elements = holder->elements();
+ Map map = elements->map();
if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
DCHECK_NE(map, ReadOnlyRoots(heap).fixed_double_array_map());
} else if (IsDoubleElementsKind(KindTraits::Kind)) {
@@ -2157,7 +2149,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (length == 0) return; // nothing to do!
#if ENABLE_SLOW_DCHECKS
DisallowHeapAllocation no_gc;
- BackingStore* backing_store = BackingStore::cast(elements);
+ BackingStore backing_store = BackingStore::cast(elements);
if (IsSmiElementsKind(KindTraits::Kind)) {
HandleScope scope(isolate);
for (int i = 0; i < length; i++) {
@@ -2226,30 +2218,24 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
heap->CanMoveObjectStart(*dst_elms)) {
// Update all the copies of this backing_store handle.
*dst_elms.location() =
- BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
+ BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index))
+ ->ptr();
receiver->set_elements(*dst_elms);
// Adjust the hole offset as the array has been shrunk.
hole_end -= src_index;
DCHECK_LE(hole_start, backing_store->length());
DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) {
- if (IsDoubleElementsKind(KindTraits::Kind)) {
- MemMove(dst_elms->data_start() + dst_index,
- dst_elms->data_start() + src_index, len * kDoubleSize);
- } else {
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
- heap->MoveElements(FixedArray::cast(*dst_elms), dst_index, src_index,
- len, mode);
- }
+ WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
+ dst_elms->MoveElements(heap, dst_index, src_index, len, mode);
}
if (hole_start != hole_end) {
dst_elms->FillWithHoles(hole_start, hole_end);
}
}
- static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) {
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
// Ensure indexes are within array bounds
DCHECK_LE(0, start);
DCHECK_LE(start, end);
@@ -2280,56 +2266,48 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements_base = receiver->elements();
- Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- Object* undefined = ReadOnlyRoots(isolate).undefined_value();
- Object* value = *search_value;
+ FixedArrayBase elements_base = receiver->elements();
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ Object value = *search_value;
+
+ if (start_from >= length) return Just(false);
// Elements beyond the capacity of the backing store treated as undefined.
- if (value == undefined &&
- static_cast<uint32_t>(elements_base->length()) < length) {
- return Just(true);
+ uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
+ if (value == undefined && elements_length < length) return Just(true);
+ if (elements_length == 0) {
+ DCHECK_NE(value, undefined);
+ return Just(false);
}
- if (start_from >= length) return Just(false);
-
- length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+ length = std::min(elements_length, length);
if (!value->IsNumber()) {
if (value == undefined) {
- // Only PACKED_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SMI_ELEMENTS, and
- // HOLEY_DOUBLE_ELEMENTS can have `undefined` as a value.
- if (!IsObjectElementsKind(Subclass::kind()) &&
- !IsHoleyElementsKind(Subclass::kind())) {
- return Just(false);
- }
-
- // Search for `undefined` or The Hole in PACKED_ELEMENTS,
- // HOLEY_ELEMENTS or HOLEY_SMI_ELEMENTS
+ // Search for `undefined` or The Hole. Even in the case of
+ // PACKED_DOUBLE_ELEMENTS or PACKED_SMI_ELEMENTS, we might encounter The
+ // Hole here, since the {length} used here can be larger than
+ // JSArray::length.
if (IsSmiOrObjectElementsKind(Subclass::kind())) {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object* element_k = elements->get(k);
+ Object element_k = elements->get(k);
- if (IsHoleyElementsKind(Subclass::kind()) &&
- element_k == the_hole) {
- return Just(true);
- }
- if (IsObjectElementsKind(Subclass::kind()) &&
- element_k == undefined) {
+ if (element_k == the_hole || element_k == undefined) {
return Just(true);
}
}
return Just(false);
} else {
- // Search for The Hole in HOLEY_DOUBLE_ELEMENTS
- DCHECK_EQ(Subclass::kind(), HOLEY_DOUBLE_ELEMENTS);
+ // Search for The Hole in HOLEY_DOUBLE_ELEMENTS or
+ // PACKED_DOUBLE_ELEMENTS.
+ DCHECK(IsDoubleElementsKind(Subclass::kind()));
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsHoleyElementsKind(Subclass::kind()) &&
- elements->is_the_hole(k)) {
+ if (elements->is_the_hole(k)) {
return Just(true);
}
}
@@ -2348,8 +2326,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object* element_k = elements->get(k);
- if (IsHoleyElementsKind(Subclass::kind()) && element_k == the_hole) {
+ Object element_k = elements->get(k);
+ if (element_k == the_hole) {
continue;
}
@@ -2367,8 +2345,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsHoleyElementsKind(Subclass::kind()) &&
- elements->is_the_hole(k)) {
+ if (elements->is_the_hole(k)) {
continue;
}
if (elements->get_scalar(k) == search_value) return Just(true);
@@ -2381,7 +2358,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object* element_k = elements->get(k);
+ Object element_k = elements->get(k);
if (element_k->IsNumber() && element_k->Number() == search_value) {
return Just(true);
}
@@ -2400,8 +2377,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsHoleyElementsKind(Subclass::kind()) &&
- elements->is_the_hole(k)) {
+ if (elements->is_the_hole(k)) {
continue;
}
if (std::isnan(elements->get_scalar(k))) return Just(true);
@@ -2509,10 +2485,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
uint32_t dst_index) {
// Add the provided values.
DisallowHeapAllocation no_gc;
- FixedArrayBase* raw_backing_store = *dst_store;
+ FixedArrayBase raw_backing_store = *dst_store;
WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
for (uint32_t i = 0; i < copy_size; i++) {
- Object* argument = (*args)[src_index + i];
+ Object argument = (*args)[src_index + i];
DCHECK(!argument->IsTheHole());
Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
}
@@ -2527,21 +2503,21 @@ class FastSmiOrObjectElementsAccessor
: FastElementsAccessor<Subclass, KindTraits>(name) {}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object* value) {
+ Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
FixedArray::cast(backing_store)->set(entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value, WriteBarrierMode mode) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
FixedArray::cast(backing_store)->set(entry, value, mode);
}
- static Object* GetRaw(FixedArray* backing_store, uint32_t entry) {
+ static Object GetRaw(FixedArray backing_store, uint32_t entry) {
uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
return backing_store->get(index);
}
@@ -2551,8 +2527,8 @@ class FastSmiOrObjectElementsAccessor
// See ElementsAccessor::CopyElements() for details.
// This method could actually allocate if copying from double elements to
// object elements.
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
DisallowHeapAllocation no_gc;
@@ -2612,11 +2588,11 @@ class FastSmiOrObjectElementsAccessor
} else {
// No allocations here, so we can avoid handlification overhead.
DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(object->elements());
+ FixedArray elements = FixedArray::cast(object->elements());
uint32_t length = elements->length();
for (uint32_t index = 0; index < length; ++index) {
if (!Subclass::HasEntryImpl(isolate, elements, index)) continue;
- Object* value = GetRaw(elements, index);
+ Object value = GetRaw(elements, index);
values_or_entries->set(count++, value);
}
}
@@ -2630,8 +2606,8 @@ class FastSmiOrObjectElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements_base = receiver->elements();
- Object* value = *search_value;
+ FixedArrayBase elements_base = receiver->elements();
+ Object value = *search_value;
if (start_from >= length) return Just<int64_t>(-1);
@@ -2644,7 +2620,11 @@ class FastSmiOrObjectElementsAccessor
// NaN can never be found by strict equality.
if (value->IsNaN()) return Just<int64_t>(-1);
- FixedArray* elements = FixedArray::cast(receiver->elements());
+ // k can be greater than receiver->length() below, but it is bounded by
+ // elements_base->length() so we never read out of bounds. This means that
+ // elements->get(k) can return the hole, for which the StrictEquals will
+ // always fail.
+ FixedArray elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
if (value->StrictEquals(elements->get(k))) return Just<int64_t>(k);
}
@@ -2702,29 +2682,29 @@ class FastDoubleElementsAccessor
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<Subclass, KindTraits>(name) {}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
isolate);
}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object* value) {
+ Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value, WriteBarrierMode mode) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
}
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
DisallowHeapAllocation no_allocation;
@@ -2789,8 +2769,8 @@ class FastDoubleElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements_base = receiver->elements();
- Object* value = *search_value;
+ FixedArrayBase elements_base = receiver->elements();
+ Object value = *search_value;
length = std::min(static_cast<uint32_t>(elements_base->length()), length);
@@ -2803,7 +2783,7 @@ class FastDoubleElementsAccessor
return Just<int64_t>(-1);
}
double numeric_search_value = value->Number();
- FixedDoubleArray* elements = FixedDoubleArray::cast(receiver->elements());
+ FixedDoubleArray elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
if (elements->is_the_hole(k)) {
@@ -2854,42 +2834,41 @@ class TypedElementsAccessor
typedef TypedElementsAccessor<Kind, ctype> AccessorClass;
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object* value) {
+ Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
BackingStore::cast(backing_store)->SetValue(entry, value);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value, WriteBarrierMode mode) {
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
BackingStore::cast(backing_store)->SetValue(entry, value);
}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
return BackingStore::get(isolate, BackingStore::cast(backing_store), entry);
}
- static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
- static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
uint32_t entry) {
return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
- static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index,
- FixedArrayBase* backing_store,
+ static bool HasElementImpl(Isolate* isolate, JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store);
}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
return false;
}
@@ -2904,32 +2883,32 @@ class TypedElementsAccessor
UNREACHABLE();
}
- static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store,
uint32_t entry) {
return entry;
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index, PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
? index
: kMaxUInt32;
}
- static bool WasNeutered(JSObject* holder) {
- JSArrayBufferView* view = JSArrayBufferView::cast(holder);
- return view->WasNeutered();
+ static bool WasDetached(JSObject holder) {
+ JSArrayBufferView view = JSArrayBufferView::cast(holder);
+ return view->WasDetached();
}
- static uint32_t GetCapacityImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
- if (WasNeutered(holder)) return 0;
+ static uint32_t GetCapacityImpl(JSObject holder,
+ FixedArrayBase backing_store) {
+ if (WasDetached(holder)) return 0;
return backing_store->length();
}
- static uint32_t NumberOfElementsImpl(JSObject* receiver,
- FixedArrayBase* backing_store) {
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
return AccessorClass::GetCapacityImpl(receiver, backing_store);
}
@@ -2966,10 +2945,10 @@ class TypedElementsAccessor
return Just(true);
}
- static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) {
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- DCHECK(!array->WasNeutered());
+ DCHECK(!array->WasDetached());
DCHECK(obj_value->IsNumeric());
ctype value = BackingStore::FromHandle(obj_value);
@@ -2980,7 +2959,7 @@ class TypedElementsAccessor
CHECK_LE(end, array->length_value());
DisallowHeapAllocation no_gc;
- BackingStore* elements = BackingStore::cast(receiver->elements());
+ BackingStore elements = BackingStore::cast(receiver->elements());
ctype* data = static_cast<ctype*>(elements->DataPtr());
std::fill(data + start, data + end, value);
return *array;
@@ -2993,12 +2972,12 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
// TODO(caitp): return Just(false) here when implementing strict throwing on
- // neutered views.
- if (WasNeutered(*receiver)) {
+ // detached views.
+ if (WasDetached(*receiver)) {
return Just(value->IsUndefined(isolate) && length > start_from);
}
- BackingStore* elements = BackingStore::cast(receiver->elements());
+ BackingStore elements = BackingStore::cast(receiver->elements());
if (value->IsUndefined(isolate) &&
length > static_cast<uint32_t>(elements->length())) {
return Just(true);
@@ -3054,9 +3033,9 @@ class TypedElementsAccessor
uint32_t start_from, uint32_t length) {
DisallowHeapAllocation no_gc;
- if (WasNeutered(*receiver)) return Just<int64_t>(-1);
+ if (WasDetached(*receiver)) return Just<int64_t>(-1);
- BackingStore* elements = BackingStore::cast(receiver->elements());
+ BackingStore elements = BackingStore::cast(receiver->elements());
ctype typed_search_value;
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
@@ -3103,9 +3082,9 @@ class TypedElementsAccessor
Handle<Object> value,
uint32_t start_from) {
DisallowHeapAllocation no_gc;
- DCHECK(!WasNeutered(*receiver));
+ DCHECK(!WasDetached(*receiver));
- BackingStore* elements = BackingStore::cast(receiver->elements());
+ BackingStore elements = BackingStore::cast(receiver->elements());
ctype typed_search_value;
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
@@ -3145,11 +3124,11 @@ class TypedElementsAccessor
return Just<int64_t>(-1);
}
- static void ReverseImpl(JSObject* receiver) {
+ static void ReverseImpl(JSObject receiver) {
DisallowHeapAllocation no_gc;
- DCHECK(!WasNeutered(receiver));
+ DCHECK(!WasDetached(receiver));
- BackingStore* elements = BackingStore::cast(receiver->elements());
+ BackingStore elements = BackingStore::cast(receiver->elements());
uint32_t len = elements->length();
if (len == 0) return;
@@ -3161,7 +3140,7 @@ class TypedElementsAccessor
static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
Handle<JSObject> object,
uint32_t length) {
- DCHECK(!WasNeutered(*object));
+ DCHECK(!WasDetached(*object));
DCHECK(object->IsJSTypedArray());
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
Handle<BackingStore> elements(BackingStore::cast(object->elements()),
@@ -3173,22 +3152,22 @@ class TypedElementsAccessor
return result;
}
- static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
- JSTypedArray* destination,
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray source,
+ JSTypedArray destination,
size_t start, size_t end) {
DisallowHeapAllocation no_gc;
DCHECK_EQ(destination->GetElementsKind(), AccessorClass::kind());
- CHECK(!source->WasNeutered());
- CHECK(!destination->WasNeutered());
+ CHECK(!source->WasDetached());
+ CHECK(!destination->WasDetached());
DCHECK_LE(start, end);
DCHECK_LE(end, source->length_value());
size_t count = end - start;
DCHECK_LE(count, destination->length_value());
- FixedTypedArrayBase* src_elements =
+ FixedTypedArrayBase src_elements =
FixedTypedArrayBase::cast(source->elements());
- BackingStore* dest_elements = BackingStore::cast(destination->elements());
+ BackingStore dest_elements = BackingStore::cast(destination->elements());
size_t element_size = source->element_size();
uint8_t* source_data =
@@ -3228,9 +3207,8 @@ class TypedElementsAccessor
}
template <typename SourceTraits>
- static void CopyBetweenBackingStores(void* source_data_ptr,
- BackingStore* dest, size_t length,
- uint32_t offset) {
+ static void CopyBetweenBackingStores(void* source_data_ptr, BackingStore dest,
+ size_t length, uint32_t offset) {
DisallowHeapAllocation no_gc;
for (uint32_t i = 0; i < length; i++) {
// We use scalar accessors to avoid boxing/unboxing, so there are no
@@ -3242,19 +3220,19 @@ class TypedElementsAccessor
}
}
- static void CopyElementsFromTypedArray(JSTypedArray* source,
- JSTypedArray* destination,
+ static void CopyElementsFromTypedArray(JSTypedArray source,
+ JSTypedArray destination,
size_t length, uint32_t offset) {
// The source is a typed array, so we know we don't need to do ToNumber
// side-effects, as the source elements will always be a number.
DisallowHeapAllocation no_gc;
- CHECK(!source->WasNeutered());
- CHECK(!destination->WasNeutered());
+ CHECK(!source->WasDetached());
+ CHECK(!destination->WasDetached());
- FixedTypedArrayBase* source_elements =
+ FixedTypedArrayBase source_elements =
FixedTypedArrayBase::cast(source->elements());
- BackingStore* destination_elements =
+ BackingStore destination_elements =
BackingStore::cast(destination->elements());
DCHECK_LE(offset, destination->length_value());
@@ -3311,8 +3289,8 @@ class TypedElementsAccessor
}
}
- static bool HoleyPrototypeLookupRequired(Isolate* isolate, Context* context,
- JSArray* source) {
+ static bool HoleyPrototypeLookupRequired(Isolate* isolate, Context context,
+ JSArray source) {
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
@@ -3320,7 +3298,7 @@ class TypedElementsAccessor
if (isolate->force_slow_path()) return true;
#endif
- Object* source_proto = source->map()->prototype();
+ Object source_proto = source->map()->prototype();
// Null prototypes are OK - we don't need to do prototype chain lookups on
// them.
@@ -3334,15 +3312,15 @@ class TypedElementsAccessor
return !isolate->IsNoElementsProtectorIntact(context);
}
- static bool TryCopyElementsFastNumber(Context* context, JSArray* source,
- JSTypedArray* destination,
- size_t length, uint32_t offset) {
+ static bool TryCopyElementsFastNumber(Context context, JSArray source,
+ JSTypedArray destination, size_t length,
+ uint32_t offset) {
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
Isolate* isolate = source->GetIsolate();
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
- CHECK(!destination->WasNeutered());
+ CHECK(!destination->WasDetached());
size_t current_length;
DCHECK(source->length()->IsNumber() &&
@@ -3355,7 +3333,7 @@ class TypedElementsAccessor
USE(dest_length);
ElementsKind kind = source->GetElementsKind();
- BackingStore* dest = BackingStore::cast(destination->elements());
+ BackingStore dest = BackingStore::cast(destination->elements());
// When we find the hole, we normally have to look up the element on the
// prototype chain, which is not handled here and we return false instead.
@@ -3364,26 +3342,26 @@ class TypedElementsAccessor
// the hole into undefined.
if (HoleyPrototypeLookupRequired(isolate, context, source)) return false;
- Object* undefined = ReadOnlyRoots(isolate).undefined_value();
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
// Fastpath for packed Smi kind.
if (kind == PACKED_SMI_ELEMENTS) {
- FixedArray* source_store = FixedArray::cast(source->elements());
+ FixedArray source_store = FixedArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
- Object* elem = source_store->get(i);
+ Object elem = source_store->get(i);
DCHECK(elem->IsSmi());
int int_value = Smi::ToInt(elem);
dest->set(offset + i, dest->from(int_value));
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
- FixedArray* source_store = FixedArray::cast(source->elements());
+ FixedArray source_store = FixedArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
if (source_store->is_the_hole(isolate, i)) {
dest->SetValue(offset + i, undefined);
} else {
- Object* elem = source_store->get(i);
+ Object elem = source_store->get(i);
DCHECK(elem->IsSmi());
int int_value = Smi::ToInt(elem);
dest->set(offset + i, dest->from(int_value));
@@ -3393,7 +3371,7 @@ class TypedElementsAccessor
} else if (kind == PACKED_DOUBLE_ELEMENTS) {
// Fastpath for packed double kind. We avoid boxing and then immediately
// unboxing the double here by using get_scalar.
- FixedDoubleArray* source_store =
+ FixedDoubleArray source_store =
FixedDoubleArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
@@ -3404,7 +3382,7 @@ class TypedElementsAccessor
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
- FixedDoubleArray* source_store =
+ FixedDoubleArray source_store =
FixedDoubleArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
if (source_store->is_the_hole(i)) {
@@ -3419,9 +3397,9 @@ class TypedElementsAccessor
return false;
}
- static Object* CopyElementsHandleSlow(Handle<Object> source,
- Handle<JSTypedArray> destination,
- size_t length, uint32_t offset) {
+ static Object CopyElementsHandleSlow(Handle<Object> source,
+ Handle<JSTypedArray> destination,
+ size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
Handle<BackingStore> destination_elements(
BackingStore::cast(destination->elements()), isolate);
@@ -3438,10 +3416,9 @@ class TypedElementsAccessor
Object::ToNumber(isolate, elem));
}
- if (V8_UNLIKELY(destination->WasNeutered())) {
+ if (V8_UNLIKELY(destination->WasDetached())) {
const char* op = "set";
- const MessageTemplate::Template message =
- MessageTemplate::kDetachedOperation;
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
isolate->factory()->NewStringFromAsciiChecked(op);
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
@@ -3457,14 +3434,14 @@ class TypedElementsAccessor
// This doesn't guarantee that the destination array will be completely
// filled. The caller must do this by passing a source with equal length, if
// that is required.
- static Object* CopyElementsHandleImpl(Handle<Object> source,
- Handle<JSObject> destination,
- size_t length, uint32_t offset) {
+ static Object CopyElementsHandleImpl(Handle<Object> source,
+ Handle<JSObject> destination,
+ size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
Handle<JSTypedArray> destination_ta =
Handle<JSTypedArray>::cast(destination);
DCHECK_LE(offset + length, destination_ta->length_value());
- CHECK(!destination_ta->WasNeutered());
+ CHECK(!destination_ta->WasDetached());
if (length == 0) return *isolate->factory()->undefined_value();
@@ -3491,7 +3468,8 @@ class TypedElementsAccessor
}
// If we have to copy more elements than we have in the source, we need to
// do special handling and conversion; that happens in the slow case.
- if (length + offset <= source_ta->length_value()) {
+ if (!source_ta->WasDetached() &&
+ length + offset <= source_ta->length_value()) {
CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
return *isolate->factory()->undefined_value();
}
@@ -3539,7 +3517,7 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* parameters,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase parameters,
uint32_t entry) {
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(parameters), isolate);
@@ -3547,9 +3525,9 @@ class SloppyArgumentsElementsAccessor
if (entry < length) {
// Read context mapped entry.
DisallowHeapAllocation no_gc;
- Object* probe = elements->get_mapped_entry(entry);
+ Object probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole(isolate));
- Context* context = elements->context();
+ Context context = elements->context();
int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole(isolate));
return handle(context->get(context_entry), isolate);
@@ -3572,30 +3550,30 @@ class SloppyArgumentsElementsAccessor
}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object* value) {
+ Object value) {
SetImpl(holder->elements(), entry, value);
}
- static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
- Object* value) {
- SloppyArgumentsElements* elements = SloppyArgumentsElements::cast(store);
+ static inline void SetImpl(FixedArrayBase store, uint32_t entry,
+ Object value) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
uint32_t length = elements->parameter_map_length();
if (entry < length) {
// Store context mapped entry.
DisallowHeapAllocation no_gc;
- Object* probe = elements->get_mapped_entry(entry);
+ Object probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole());
- Context* context = elements->context();
+ Context context = elements->context();
int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole());
context->set(context_entry, value);
} else {
// Entry is not context mapped defer to arguments.
- FixedArray* arguments = elements->arguments();
- Object* current = ArgumentsAccessor::GetRaw(arguments, entry - length);
+ FixedArray arguments = elements->arguments();
+ Object current = ArgumentsAccessor::GetRaw(arguments, entry - length);
if (current->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(current);
- Context* context = elements->context();
+ AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current);
+ Context context = elements->context();
int context_entry = alias->aliased_context_slot();
DCHECK(!context->get(context_entry)->IsTheHole());
context->set(context_entry, value);
@@ -3612,28 +3590,28 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
- static uint32_t GetCapacityImpl(JSObject* holder, FixedArrayBase* store) {
- SloppyArgumentsElements* elements = SloppyArgumentsElements::cast(store);
- FixedArray* arguments = elements->arguments();
+ static uint32_t GetCapacityImpl(JSObject holder, FixedArrayBase store) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
+ FixedArray arguments = elements->arguments();
return elements->parameter_map_length() +
ArgumentsAccessor::GetCapacityImpl(holder, arguments);
}
- static uint32_t GetMaxNumberOfEntries(JSObject* holder,
- FixedArrayBase* backing_store) {
- SloppyArgumentsElements* elements =
+ static uint32_t GetMaxNumberOfEntries(JSObject holder,
+ FixedArrayBase backing_store) {
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArrayBase* arguments = elements->arguments();
+ FixedArrayBase arguments = elements->arguments();
return elements->parameter_map_length() +
ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
}
- static uint32_t NumberOfElementsImpl(JSObject* receiver,
- FixedArrayBase* backing_store) {
+ static uint32_t NumberOfElementsImpl(JSObject receiver,
+ FixedArrayBase backing_store) {
Isolate* isolate = receiver->GetIsolate();
- SloppyArgumentsElements* elements =
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArrayBase* arguments = elements->arguments();
+ FixedArrayBase arguments = elements->arguments();
uint32_t nof_elements = 0;
uint32_t length = elements->parameter_map_length();
for (uint32_t entry = 0; entry < length; entry++) {
@@ -3656,43 +3634,42 @@ class SloppyArgumentsElementsAccessor
}
}
- static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* parameters,
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters,
uint32_t entry) {
- SloppyArgumentsElements* elements =
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
uint32_t length = elements->parameter_map_length();
if (entry < length) {
return HasParameterMapArg(isolate, elements, entry);
}
- FixedArrayBase* arguments = elements->arguments();
+ FixedArrayBase arguments = elements->arguments();
return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
- SloppyArgumentsElements* elements =
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArray* arguments = elements->arguments();
+ FixedArray arguments = elements->arguments();
return ArgumentsAccessor::HasAccessorsImpl(holder, arguments);
}
- static uint32_t GetIndexForEntryImpl(FixedArrayBase* parameters,
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase parameters,
uint32_t entry) {
- SloppyArgumentsElements* elements =
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
uint32_t length = elements->parameter_map_length();
if (entry < length) return entry;
- FixedArray* arguments = elements->arguments();
+ FixedArray arguments = elements->arguments();
return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
- FixedArrayBase* parameters,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase parameters,
uint32_t index, PropertyFilter filter) {
- SloppyArgumentsElements* elements =
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
if (HasParameterMapArg(isolate, elements, index)) return index;
- FixedArray* arguments = elements->arguments();
+ FixedArray arguments = elements->arguments();
uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
isolate, holder, arguments, index, filter);
if (entry == kMaxUInt32) return kMaxUInt32;
@@ -3701,19 +3678,19 @@ class SloppyArgumentsElementsAccessor
return elements->parameter_map_length() + entry;
}
- static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
- SloppyArgumentsElements* elements =
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
+ SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(holder->elements());
uint32_t length = elements->parameter_map_length();
if (entry < length) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- FixedArray* arguments = elements->arguments();
+ FixedArray arguments = elements->arguments();
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
static bool HasParameterMapArg(Isolate* isolate,
- SloppyArgumentsElements* elements,
+ SloppyArgumentsElements elements,
uint32_t index) {
uint32_t length = elements->parameter_map_length();
if (index >= length) return false;
@@ -3877,8 +3854,8 @@ class SloppyArgumentsElementsAccessor
Handle<JSArray> result_array =
isolate->factory()->NewJSArray(HOLEY_ELEMENTS, result_len, result_len);
DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(result_array->elements());
- FixedArray* parameters = FixedArray::cast(receiver->elements());
+ FixedArray elements = FixedArray::cast(result_array->elements());
+ FixedArray parameters = FixedArray::cast(receiver->elements());
uint32_t insertion_index = 0;
for (uint32_t i = start; i < end; i++) {
uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
@@ -3911,8 +3888,8 @@ class SlowSloppyArgumentsElementsAccessor
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
DisallowHeapAllocation no_gc;
- AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(*result);
- Context* context = elements->context();
+ AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(*result);
+ Context context = elements->context();
int context_entry = alias->aliased_context_slot();
DCHECK(!context->get(context_entry)->IsTheHole(isolate));
return handle(context->get(context_entry), isolate);
@@ -3961,9 +3938,9 @@ class SlowSloppyArgumentsElementsAccessor
Handle<SloppyArgumentsElements>::cast(store);
uint32_t length = elements->parameter_map_length();
if (entry < length) {
- Object* probe = elements->get_mapped_entry(entry);
+ Object probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole(isolate));
- Context* context = elements->context();
+ Context context = elements->context();
int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole(isolate));
context->set(context_entry, *value);
@@ -4014,8 +3991,8 @@ class FastSloppyArgumentsElementsAccessor
}
static Handle<FixedArray> GetArguments(Isolate* isolate,
- FixedArrayBase* store) {
- SloppyArgumentsElements* elements = SloppyArgumentsElements::cast(store);
+ FixedArrayBase store) {
+ SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
return Handle<FixedArray>(elements->arguments(), isolate);
}
@@ -4062,7 +4039,7 @@ class FastSloppyArgumentsElementsAccessor
static_cast<uint32_t>(old_arguments->length()) < new_capacity) {
GrowCapacityAndConvertImpl(object, new_capacity);
}
- FixedArray* arguments = elements->arguments();
+ FixedArray arguments = elements->arguments();
// For fast holey objects, the entry equals the index. The code above made
// sure that there's enough space to store the value. We cannot convert
// index to entry explicitly since the slot still contains the hole, so the
@@ -4083,8 +4060,8 @@ class FastSloppyArgumentsElementsAccessor
value, attributes);
}
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
DCHECK(!to->IsDictionary());
@@ -4146,12 +4123,12 @@ class StringWrapperElementsAccessor
entry - length);
}
- static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* elements,
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase elements,
uint32_t entry) {
UNREACHABLE();
}
- static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
if (entry < length) {
PropertyAttributes attributes =
@@ -4161,8 +4138,8 @@ class StringWrapperElementsAccessor
return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
}
- static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index, PropertyFilter filter) {
uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
if (index < length) return index;
@@ -4181,7 +4158,7 @@ class StringWrapperElementsAccessor
BackingStoreAccessor::DeleteImpl(holder, entry - length);
}
- static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object* value) {
+ static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
if (entry < length) {
return; // String contents are read-only.
@@ -4267,8 +4244,8 @@ class StringWrapperElementsAccessor
capacity);
}
- static void CopyElementsImpl(Isolate* isolate, FixedArrayBase* from,
- uint32_t from_start, FixedArrayBase* to,
+ static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
+ uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
DCHECK(!to->IsDictionary());
@@ -4282,17 +4259,17 @@ class StringWrapperElementsAccessor
}
}
- static uint32_t NumberOfElementsImpl(JSObject* object,
- FixedArrayBase* backing_store) {
+ static uint32_t NumberOfElementsImpl(JSObject object,
+ FixedArrayBase backing_store) {
uint32_t length = GetString(object)->length();
return length +
BackingStoreAccessor::NumberOfElementsImpl(object, backing_store);
}
private:
- static String* GetString(JSObject* holder) {
+ static String GetString(JSObject holder) {
DCHECK(holder->IsJSValue());
- JSValue* js_value = JSValue::cast(holder);
+ JSValue js_value = JSValue::cast(holder);
DCHECK(js_value->value()->IsString());
return String::cast(js_value->value());
}
@@ -4324,8 +4301,7 @@ class SlowStringWrapperElementsAccessor
SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>>(name) {}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
+ static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store);
}
};
@@ -4336,10 +4312,10 @@ class SlowStringWrapperElementsAccessor
void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
bool allow_appending) {
DisallowHeapAllocation no_allocation;
- Object* raw_length = nullptr;
+ Object raw_length;
const char* elements_type = "array";
if (obj->IsJSArray()) {
- JSArray* array = JSArray::cast(*obj);
+ JSArray array = JSArray::cast(*obj);
raw_length = array->length();
} else {
raw_length = Smi::FromInt(obj->elements()->length());
@@ -4462,14 +4438,14 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
return array;
}
-void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
- JSArray* source,
- JSTypedArray* destination,
+void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
+ Address raw_source,
+ Address raw_destination,
uintptr_t length,
uintptr_t offset) {
- DCHECK(context->IsContext());
- DCHECK(source->IsJSArray());
- DCHECK(destination->IsJSTypedArray());
+ Context context = Context::cast(Object(raw_context));
+ JSArray source = JSArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
switch (destination->GetElementsKind()) {
#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
@@ -4484,9 +4460,12 @@ void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
}
}
-void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
- JSTypedArray* destination,
+void CopyTypedArrayElementsToTypedArray(Address raw_source,
+ Address raw_destination,
uintptr_t length, uintptr_t offset) {
+ JSTypedArray source = JSTypedArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
+
switch (destination->GetElementsKind()) {
#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
@@ -4500,9 +4479,11 @@ void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
}
}
-void CopyTypedArrayElementsSlice(JSTypedArray* source,
- JSTypedArray* destination, uintptr_t start,
- uintptr_t end) {
+void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
+ uintptr_t start, uintptr_t end) {
+ JSTypedArray source = JSTypedArray::cast(Object(raw_source));
+ JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
+
destination->GetElementsAccessor()->CopyTypedArrayElementsSlice(
source, destination, start, end);
}
@@ -4538,7 +4519,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
DisallowHeapAllocation no_gc;
bool is_holey = false;
for (uint32_t i = 0; i < concat_size; i++) {
- Object* arg = (*args)[i];
+ Object arg = (*args)[i];
ElementsKind arg_kind = JSArray::cast(arg)->GetElementsKind();
has_raw_doubles = has_raw_doubles || IsDoubleElementsKind(arg_kind);
is_holey = is_holey || IsHoleyElementsKind(arg_kind);
@@ -4568,7 +4549,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
for (uint32_t i = 0; i < concat_size; i++) {
// It is crucial to keep |array| in a raw pointer form to avoid
// performance degradation.
- JSArray* array = JSArray::cast((*args)[i]);
+ JSArray array = JSArray::cast((*args)[i]);
uint32_t len = 0;
array->length()->ToArrayLength(&len);
if (len == 0) continue;
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 8cdbf331ef..12f8ddc4b5 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -31,7 +31,7 @@ class ElementsAccessor {
// Checks the elements of an object for consistency, asserting when a problem
// is found.
- virtual void Validate(JSObject* obj) = 0;
+ virtual void Validate(JSObject obj) = 0;
// Returns true if a holder contains an element with the specified index
// without iterating up the prototype chain. The caller can optionally pass
@@ -43,23 +43,23 @@ class ElementsAccessor {
// index is ignored. Note that only Dictionary elements have custom
// PropertyAttributes associated, hence the |filter| argument is ignored for
// all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
- virtual bool HasElement(JSObject* holder, uint32_t index,
- FixedArrayBase* backing_store,
+ virtual bool HasElement(JSObject holder, uint32_t index,
+ FixedArrayBase backing_store,
PropertyFilter filter = ALL_PROPERTIES) = 0;
- inline bool HasElement(JSObject* holder, uint32_t index,
+ inline bool HasElement(JSObject holder, uint32_t index,
PropertyFilter filter = ALL_PROPERTIES);
// Note: this is currently not implemented for string wrapper and
// typed array elements.
- virtual bool HasEntry(JSObject* holder, uint32_t entry) = 0;
+ virtual bool HasEntry(JSObject holder, uint32_t entry) = 0;
// TODO(cbruni): HasEntry and Get should not be exposed publicly with the
// entry parameter.
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
- virtual bool HasAccessors(JSObject* holder) = 0;
- virtual uint32_t NumberOfElements(JSObject* holder) = 0;
+ virtual bool HasAccessors(JSObject holder) = 0;
+ virtual uint32_t NumberOfElements(JSObject holder) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
@@ -117,7 +117,7 @@ class ElementsAccessor {
static void InitializeOncePerProcess();
static void TearDown();
- virtual void Set(Handle<JSObject> holder, uint32_t entry, Object* value) = 0;
+ virtual void Set(Handle<JSObject> holder, uint32_t entry, Object value) = 0;
virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
@@ -141,11 +141,11 @@ class ElementsAccessor {
virtual Handle<NumberDictionary> Normalize(Handle<JSObject> object) = 0;
- virtual uint32_t GetCapacity(JSObject* holder,
- FixedArrayBase* backing_store) = 0;
+ virtual uint32_t GetCapacity(JSObject holder,
+ FixedArrayBase backing_store) = 0;
- virtual Object* Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
- uint32_t start, uint32_t end) = 0;
+ virtual Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) = 0;
// Check an Object's own elements for an element (using SameValueZero
// semantics)
@@ -164,22 +164,22 @@ class ElementsAccessor {
Handle<Object> value,
uint32_t start) = 0;
- virtual void Reverse(JSObject* receiver) = 0;
+ virtual void Reverse(JSObject receiver) = 0;
virtual void CopyElements(Isolate* isolate, Handle<FixedArrayBase> source,
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
- virtual Object* CopyElements(Handle<Object> source,
- Handle<JSObject> destination, size_t length,
- uint32_t offset = 0) = 0;
+ virtual Object CopyElements(Handle<Object> source,
+ Handle<JSObject> destination, size_t length,
+ uint32_t offset = 0) = 0;
virtual Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate,
Handle<JSObject> object,
uint32_t length) = 0;
- virtual void CopyTypedArrayElementsSlice(JSTypedArray* source,
- JSTypedArray* destination,
+ virtual void CopyTypedArrayElementsSlice(JSTypedArray source,
+ JSTypedArray destination,
size_t start, size_t end) = 0;
protected:
@@ -193,11 +193,11 @@ class ElementsAccessor {
// indices are equivalent to entries. In the NumberDictionary
// ElementsAccessor, entries are mapped to an index using the KeyAt method on
// the NumberDictionary.
- virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
- FixedArrayBase* backing_store,
+ virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
+ FixedArrayBase backing_store,
uint32_t index) = 0;
- virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+ virtual PropertyDetails GetDetails(JSObject holder, uint32_t entry) = 0;
virtual void Reconfigure(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store, uint32_t entry,
Handle<Object> value,
@@ -210,7 +210,7 @@ class ElementsAccessor {
// raw pointer parameter |source_holder| in the function that allocates.
// This is done intentionally to avoid ArrayConcat() builtin performance
// degradation.
- virtual void CopyElements(JSObject* source_holder, uint32_t source_start,
+ virtual void CopyElements(JSObject source_holder, uint32_t source_start,
ElementsKind source_kind,
Handle<FixedArrayBase> destination,
uint32_t destination_start, int copy_size) = 0;
@@ -229,17 +229,21 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Handle<JSArray> array, Arguments* args);
// Called directly from CSA.
-void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
- JSArray* source,
- JSTypedArray* destination,
+// {raw_context}: Context pointer.
+// {raw_source}: JSArray pointer.
+// {raw_destination}: JSTypedArray pointer.
+void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
+ Address raw_source,
+ Address raw_destination,
uintptr_t length,
uintptr_t offset);
-void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
- JSTypedArray* destination,
+// {raw_source}, {raw_destination}: JSTypedArray pointers.
+void CopyTypedArrayElementsToTypedArray(Address raw_source,
+ Address raw_destination,
uintptr_t length, uintptr_t offset);
-void CopyTypedArrayElementsSlice(JSTypedArray* source,
- JSTypedArray* destination, uintptr_t start,
- uintptr_t end);
+// {raw_source}, {raw_destination}: JSTypedArray pointers.
+void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
+ uintptr_t start, uintptr_t end);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 792d20ee58..c244121d7b 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -9,15 +9,12 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
#include "src/runtime-profiler.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-StackGuard::StackGuard() : isolate_(nullptr) {}
-
void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(kInterruptLimit);
@@ -25,7 +22,6 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
isolate_->heap()->SetStackLimits();
}
-
void StackGuard::reset_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(thread_local_.real_jslimit_);
@@ -33,31 +29,149 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
isolate_->heap()->SetStackLimits();
}
+namespace {
-static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
- if (function->code() == function->shared()->GetCode() &&
- function->shared()->deserialized()) {
- PrintF("[Running deserialized script");
- Object* script = function->shared()->script();
- if (script->IsScript()) {
- Object* name = Script::cast(script)->name();
- if (name->IsString()) {
- PrintF(": %s", String::cast(name)->ToCString().get());
- }
- }
- PrintF("]\n");
+Handle<Object> NormalizeReceiver(Isolate* isolate, Handle<Object> receiver) {
+ // Convert calls on global objects to be calls on the global
+ // receiver instead to avoid having a 'this' pointer which refers
+ // directly to a global object.
+ if (receiver->IsJSGlobalObject()) {
+ return handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(),
+ isolate);
}
+ return receiver;
}
+struct InvokeParams {
+ static InvokeParams SetUpForNew(Isolate* isolate, Handle<Object> constructor,
+ Handle<Object> new_target, int argc,
+ Handle<Object>* argv);
-namespace {
+ static InvokeParams SetUpForCall(Isolate* isolate, Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object>* argv);
+
+ static InvokeParams SetUpForTryCall(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+ int argc, Handle<Object>* argv,
+ Execution::MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out);
+
+ static InvokeParams SetUpForRunMicrotasks(Isolate* isolate,
+ MicrotaskQueue* microtask_queue,
+ MaybeHandle<Object>* exception_out);
+
+ Handle<Object> target;
+ Handle<Object> receiver;
+ int argc;
+ Handle<Object>* argv;
+ Handle<Object> new_target;
+
+ MicrotaskQueue* microtask_queue;
+
+ Execution::MessageHandling message_handling;
+ MaybeHandle<Object>* exception_out;
+
+ bool is_construct;
+ Execution::Target execution_target;
+};
+
+// static
+InvokeParams InvokeParams::SetUpForNew(Isolate* isolate,
+ Handle<Object> constructor,
+ Handle<Object> new_target, int argc,
+ Handle<Object>* argv) {
+ InvokeParams params;
+ params.target = constructor;
+ params.receiver = isolate->factory()->undefined_value();
+ params.argc = argc;
+ params.argv = argv;
+ params.new_target = new_target;
+ params.microtask_queue = nullptr;
+ params.message_handling = Execution::MessageHandling::kReport;
+ params.exception_out = nullptr;
+ params.is_construct = true;
+ params.execution_target = Execution::Target::kCallable;
+ return params;
+}
+
+// static
+InvokeParams InvokeParams::SetUpForCall(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object>* argv) {
+ InvokeParams params;
+ params.target = callable;
+ params.receiver = NormalizeReceiver(isolate, receiver);
+ params.argc = argc;
+ params.argv = argv;
+ params.new_target = isolate->factory()->undefined_value();
+ params.microtask_queue = nullptr;
+ params.message_handling = Execution::MessageHandling::kReport;
+ params.exception_out = nullptr;
+ params.is_construct = false;
+ params.execution_target = Execution::Target::kCallable;
+ return params;
+}
+
+// static
+InvokeParams InvokeParams::SetUpForTryCall(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+ int argc, Handle<Object>* argv, Execution::MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out) {
+ InvokeParams params;
+ params.target = callable;
+ params.receiver = NormalizeReceiver(isolate, receiver);
+ params.argc = argc;
+ params.argv = argv;
+ params.new_target = isolate->factory()->undefined_value();
+ params.microtask_queue = nullptr;
+ params.message_handling = message_handling;
+ params.exception_out = exception_out;
+ params.is_construct = false;
+ params.execution_target = Execution::Target::kCallable;
+ return params;
+}
+
+// static
+InvokeParams InvokeParams::SetUpForRunMicrotasks(
+ Isolate* isolate, MicrotaskQueue* microtask_queue,
+ MaybeHandle<Object>* exception_out) {
+ auto undefined = isolate->factory()->undefined_value();
+ InvokeParams params;
+ params.target = undefined;
+ params.receiver = undefined;
+ params.argc = 0;
+ params.argv = nullptr;
+ params.new_target = undefined;
+ params.microtask_queue = microtask_queue;
+ params.message_handling = Execution::MessageHandling::kReport;
+ params.exception_out = exception_out;
+ params.is_construct = false;
+ params.execution_target = Execution::Target::kRunMicrotasks;
+ return params;
+}
+
+Handle<Code> JSEntry(Isolate* isolate, Execution::Target execution_target,
+ bool is_construct) {
+ if (is_construct) {
+ DCHECK_EQ(Execution::Target::kCallable, execution_target);
+ return BUILTIN_CODE(isolate, JSConstructEntry);
+ } else if (execution_target == Execution::Target::kCallable) {
+ DCHECK(!is_construct);
+ return BUILTIN_CODE(isolate, JSEntry);
+ } else if (execution_target == Execution::Target::kRunMicrotasks) {
+ DCHECK(!is_construct);
+ return BUILTIN_CODE(isolate, JSRunMicrotasksEntry);
+ }
+ UNREACHABLE();
+}
-V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
- Isolate* isolate, bool is_construct, Handle<Object> target,
- Handle<Object> receiver, int argc, Handle<Object> args[],
- Handle<Object> new_target, Execution::MessageHandling message_handling,
- Execution::Target execution_target) {
- DCHECK(!receiver->IsJSGlobalObject());
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
+ const InvokeParams& params) {
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kInvoke);
+ DCHECK(!params.receiver->IsJSGlobalObject());
+ DCHECK_LE(params.argc, FixedArray::kMaxLength);
#ifdef USE_SIMULATOR
// Simulators use separate stacks for C++ and JS. JS stack overflow checks
@@ -67,7 +181,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
StackLimitCheck check(isolate);
if (check.HasOverflowed()) {
isolate->StackOverflow();
- if (message_handling == Execution::MessageHandling::kReport) {
+ if (params.message_handling == Execution::MessageHandling::kReport) {
isolate->ReportPendingMessages();
}
return MaybeHandle<Object>();
@@ -76,22 +190,25 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
// api callbacks can be called directly, unless we want to take the detour
// through JS to set up a frame for break-at-entry.
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
- if ((!is_construct || function->IsConstructor()) &&
+ if (params.target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(params.target);
+ if ((!params.is_construct || function->IsConstructor()) &&
function->shared()->IsApiFunction() &&
!function->shared()->BreakAtEntry()) {
SaveContext save(isolate);
isolate->set_context(function->context());
DCHECK(function->context()->global_object()->IsJSGlobalObject());
- if (is_construct) receiver = isolate->factory()->the_hole_value();
+
+ Handle<Object> receiver = params.is_construct
+ ? isolate->factory()->the_hole_value()
+ : params.receiver;
auto value = Builtins::InvokeApiFunction(
- isolate, is_construct, function, receiver, argc, args,
- Handle<HeapObject>::cast(new_target));
+ isolate, params.is_construct, function, receiver, params.argc,
+ params.argv, Handle<HeapObject>::cast(params.new_target));
bool has_exception = value.is_null();
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
- if (message_handling == Execution::MessageHandling::kReport) {
+ if (params.message_handling == Execution::MessageHandling::kReport) {
isolate->ReportPendingMessages();
}
return MaybeHandle<Object>();
@@ -107,52 +224,63 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
CHECK(AllowJavascriptExecution::IsAllowed(isolate));
if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
isolate->ThrowIllegalOperation();
- if (message_handling == Execution::MessageHandling::kReport) {
+ if (params.message_handling == Execution::MessageHandling::kReport) {
isolate->ReportPendingMessages();
}
return MaybeHandle<Object>();
}
+ if (!DumpOnJavascriptExecution::IsAllowed(isolate)) {
+ V8::GetCurrentPlatform()->DumpWithoutCrashing();
+ return isolate->factory()->undefined_value();
+ }
// Placeholder for return value.
- Object* value = nullptr;
-
- using JSEntryFunction =
- GeneratedCode<Object*(Object * new_target, Object * target,
- Object * receiver, int argc, Object*** args)>;
-
- Handle<Code> code;
- switch (execution_target) {
- case Execution::Target::kCallable:
- code = is_construct ? isolate->factory()->js_construct_entry_code()
- : isolate->factory()->js_entry_code();
- break;
- case Execution::Target::kRunMicrotasks:
- code = isolate->factory()->js_run_microtasks_entry_code();
- break;
- default:
- UNREACHABLE();
- }
+ Object value;
+ Handle<Code> code =
+ JSEntry(isolate, params.execution_target, params.is_construct);
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
SealHandleScope shs(isolate);
- JSEntryFunction stub_entry =
- JSEntryFunction::FromAddress(isolate, code->entry());
if (FLAG_clear_exceptions_on_js_entry) isolate->clear_pending_exception();
- // Call the function through the right JS entry stub.
- Object* orig_func = *new_target;
- Object* func = *target;
- Object* recv = *receiver;
- Object*** argv = reinterpret_cast<Object***>(args);
- if (FLAG_profile_deserialization && target->IsJSFunction()) {
- PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
+ if (params.execution_target == Execution::Target::kCallable) {
+ // clang-format off
+ // {new_target}, {target}, {receiver}, return value: tagged pointers
+ // {argv}: pointer to array of tagged pointers
+ using JSEntryFunction = GeneratedCode<Address(
+ Address root_register_value, Address new_target, Address target,
+ Address receiver, intptr_t argc, Address** argv)>;
+ // clang-format on
+ JSEntryFunction stub_entry =
+ JSEntryFunction::FromAddress(isolate, code->InstructionStart());
+
+ Address orig_func = params.new_target->ptr();
+ Address func = params.target->ptr();
+ Address recv = params.receiver->ptr();
+ Address** argv = reinterpret_cast<Address**>(params.argv);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
+ orig_func, func, recv, params.argc, argv));
+ } else {
+ DCHECK_EQ(Execution::Target::kRunMicrotasks, params.execution_target);
+
+ // clang-format off
+ // return value: tagged pointers
+ // {microtask_queue}: pointer to a C++ object
+ using JSEntryFunction = GeneratedCode<Address(
+ Address root_register_value, MicrotaskQueue* microtask_queue)>;
+ // clang-format on
+ JSEntryFunction stub_entry =
+ JSEntryFunction::FromAddress(isolate, code->InstructionStart());
+
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
+ params.microtask_queue));
}
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
- value = stub_entry.Call(orig_func, func, recv, argc, argv);
}
#ifdef VERIFY_HEAP
@@ -165,7 +293,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
bool has_exception = value->IsException(isolate);
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
- if (message_handling == Execution::MessageHandling::kReport) {
+ if (params.message_handling == Execution::MessageHandling::kReport) {
isolate->ReportPendingMessages();
}
return MaybeHandle<Object>();
@@ -176,59 +304,16 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
return Handle<Object>(value, isolate);
}
-MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
- Handle<Object> receiver, int argc,
- Handle<Object> argv[],
- Execution::MessageHandling message_handling,
- Execution::Target target) {
- // Convert calls on global objects to be calls on the global
- // receiver instead to avoid having a 'this' pointer which refers
- // directly to a global object.
- if (receiver->IsJSGlobalObject()) {
- receiver =
- handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
- }
- return Invoke(isolate, false, callable, receiver, argc, argv,
- isolate->factory()->undefined_value(), message_handling,
- target);
-}
-
-} // namespace
-
-// static
-MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
- Handle<Object> receiver, int argc,
- Handle<Object> argv[]) {
- return CallInternal(isolate, callable, receiver, argc, argv,
- MessageHandling::kReport, Execution::Target::kCallable);
-}
-
-
-// static
-MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
- int argc, Handle<Object> argv[]) {
- return New(isolate, constructor, constructor, argc, argv);
-}
-
-
-// static
-MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
- Handle<Object> new_target, int argc,
- Handle<Object> argv[]) {
- return Invoke(isolate, true, constructor,
- isolate->factory()->undefined_value(), argc, argv, new_target,
- MessageHandling::kReport, Execution::Target::kCallable);
-}
-
-MaybeHandle<Object> Execution::TryCall(
- Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
- int argc, Handle<Object> args[], MessageHandling message_handling,
- MaybeHandle<Object>* exception_out, Target target) {
+MaybeHandle<Object> InvokeWithTryCatch(Isolate* isolate,
+ const InvokeParams& params) {
bool is_termination = false;
MaybeHandle<Object> maybe_result;
- if (exception_out != nullptr) *exception_out = MaybeHandle<Object>();
- DCHECK_IMPLIES(message_handling == MessageHandling::kKeepPending,
- exception_out == nullptr);
+ if (params.exception_out != nullptr) {
+ *params.exception_out = MaybeHandle<Object>();
+ }
+ DCHECK_IMPLIES(
+ params.message_handling == Execution::MessageHandling::kKeepPending,
+ params.exception_out == nullptr);
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
// creating message objects during stack overflow we shouldn't
@@ -238,8 +323,7 @@ MaybeHandle<Object> Execution::TryCall(
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result = CallInternal(isolate, callable, receiver, argc, args,
- message_handling, target);
+ maybe_result = Invoke(isolate, params);
if (maybe_result.is_null()) {
DCHECK(isolate->has_pending_exception());
@@ -247,13 +331,13 @@ MaybeHandle<Object> Execution::TryCall(
ReadOnlyRoots(isolate).termination_exception()) {
is_termination = true;
} else {
- if (exception_out != nullptr) {
+ if (params.exception_out != nullptr) {
DCHECK(catcher.HasCaught());
DCHECK(isolate->external_caught_exception());
- *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+ *params.exception_out = v8::Utils::OpenHandle(*catcher.Exception());
}
}
- if (message_handling == MessageHandling::kReport) {
+ if (params.message_handling == Execution::MessageHandling::kReport) {
isolate->OptionalRescheduleException(true);
}
}
@@ -265,12 +349,50 @@ MaybeHandle<Object> Execution::TryCall(
return maybe_result;
}
-MaybeHandle<Object> Execution::RunMicrotasks(
- Isolate* isolate, MessageHandling message_handling,
+} // namespace
+
+// static
+MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[]) {
+ return Invoke(isolate, InvokeParams::SetUpForCall(isolate, callable, receiver,
+ argc, argv));
+}
+
+// static
+MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
+ int argc, Handle<Object> argv[]) {
+ return New(isolate, constructor, constructor, argc, argv);
+}
+
+// static
+MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
+ Handle<Object> new_target, int argc,
+ Handle<Object> argv[]) {
+ return Invoke(isolate, InvokeParams::SetUpForNew(isolate, constructor,
+ new_target, argc, argv));
+}
+
+// static
+MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[],
+ MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out) {
+ return InvokeWithTryCatch(
+ isolate,
+ InvokeParams::SetUpForTryCall(isolate, callable, receiver, argc, argv,
+ message_handling, exception_out));
+}
+
+// static
+MaybeHandle<Object> Execution::TryRunMicrotasks(
+ Isolate* isolate, MicrotaskQueue* microtask_queue,
MaybeHandle<Object>* exception_out) {
- auto undefined = isolate->factory()->undefined_value();
- return TryCall(isolate, undefined, undefined, 0, {}, message_handling,
- exception_out, Target::kRunMicrotasks);
+ return InvokeWithTryCatch(
+ isolate, InvokeParams::SetUpForRunMicrotasks(isolate, microtask_queue,
+ exception_out));
}
void StackGuard::SetStackLimit(uintptr_t limit) {
@@ -494,8 +616,7 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-
-Object* StackGuard::HandleInterrupts() {
+Object StackGuard::HandleInterrupts() {
if (FLAG_verify_predictable) {
// Advance synthetic time by making a time request.
isolate_->heap()->MonotonicallyIncreasingTimeInMs();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 44491c6e67..03123f94f2 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -11,6 +11,8 @@
namespace v8 {
namespace internal {
+class MicrotaskQueue;
+
template <typename T>
class Handle;
@@ -50,12 +52,11 @@ class Execution final : public AllStatic {
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MessageHandling message_handling,
- MaybeHandle<Object>* exception_out,
- Target target = Target::kCallable);
+ MaybeHandle<Object>* exception_out);
// Convenience method for performing RunMicrotasks
- static MaybeHandle<Object> RunMicrotasks(Isolate* isolate,
- MessageHandling message_handling,
- MaybeHandle<Object>* exception_out);
+ static MaybeHandle<Object> TryRunMicrotasks(
+ Isolate* isolate, MicrotaskQueue* microtask_queue,
+ MaybeHandle<Object>* exception_out);
};
@@ -67,6 +68,8 @@ class InterruptsScope;
// invocation.
class V8_EXPORT_PRIVATE StackGuard final {
public:
+ explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
+
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
void SetStackLimit(uintptr_t limit);
@@ -133,11 +136,9 @@ class V8_EXPORT_PRIVATE StackGuard final {
// If the stack guard is triggered, but it is not an actual
// stack overflow, then handle the interruption accordingly.
- Object* HandleInterrupts();
+ Object HandleInterrupts();
private:
- StackGuard();
-
bool CheckInterrupt(InterruptFlag flag);
void RequestInterrupt(InterruptFlag flag);
void ClearInterrupt(InterruptFlag flag);
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 25081b69e0..96a805f328 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -5,7 +5,7 @@
#include "src/extensions/statistics-extension.h"
#include "src/counters.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/isolate.h"
namespace v8 {
@@ -109,6 +109,10 @@ void StatisticsExtension::GetCounters(
{heap->lo_space()->Size(), "lo_space_live_bytes"},
{heap->lo_space()->Available(), "lo_space_available_bytes"},
{heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
+ {heap->code_lo_space()->Size(), "code_lo_space_live_bytes"},
+ {heap->code_lo_space()->Available(), "code_lo_space_available_bytes"},
+ {heap->code_lo_space()->CommittedMemory(),
+ "code_lo_space_commited_bytes"},
};
for (size_t i = 0; i < arraysize(numbers); i++) {
@@ -120,14 +124,14 @@ void StatisticsExtension::GetCounters(
args.GetReturnValue().Set(result);
HeapIterator iterator(reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
- HeapObject* obj;
int reloc_info_total = 0;
int source_position_table_total = 0;
- while ((obj = iterator.next()) != nullptr) {
+ for (HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (obj->IsCode()) {
- Code* code = Code::cast(obj);
+ Code code = Code::cast(obj);
reloc_info_total += code->relocation_info()->Size();
- ByteArray* source_position_table = code->SourcePositionTable();
+ ByteArray source_position_table = code->SourcePositionTable();
if (source_position_table->length() > 0) {
source_position_table_total += code->SourcePositionTable()->Size();
}
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 47bc1b9ee4..9c4d0d1d89 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -18,9 +18,54 @@
namespace v8 {
namespace internal {
+#define ADD_EXT_REF_NAME(name, desc) desc,
+#define ADD_BUILTIN_NAME(Name, ...) "Builtin_" #Name,
+#define ADD_RUNTIME_FUNCTION(name, ...) "Runtime::" #name,
+#define ADD_ISOLATE_ADDR(Name, name) "Isolate::" #name "_address",
+#define ADD_ACCESSOR_INFO_NAME(_, __, AccessorName, ...) \
+ "Accessors::" #AccessorName "Getter",
+#define ADD_ACCESSOR_SETTER_NAME(name) "Accessors::" #name,
+// static
+const char* const
+ ExternalReferenceTable::ref_name_[ExternalReferenceTable::kSize] = {
+ // Special references:
+ "nullptr",
+ // External references:
+ EXTERNAL_REFERENCE_LIST(ADD_EXT_REF_NAME)
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXT_REF_NAME)
+ // Builtins:
+ BUILTIN_LIST_C(ADD_BUILTIN_NAME)
+ // Runtime functions:
+ FOR_EACH_INTRINSIC(ADD_RUNTIME_FUNCTION)
+ // Isolate addresses:
+ FOR_EACH_ISOLATE_ADDRESS_NAME(ADD_ISOLATE_ADDR)
+ // Accessors:
+ ACCESSOR_INFO_LIST_GENERATOR(ADD_ACCESSOR_INFO_NAME, /* not used */)
+ ACCESSOR_SETTER_LIST(ADD_ACCESSOR_SETTER_NAME)
+ // Stub cache:
+ "Load StubCache::primary_->key",
+ "Load StubCache::primary_->value",
+ "Load StubCache::primary_->map",
+ "Load StubCache::secondary_->key",
+ "Load StubCache::secondary_->value",
+ "Load StubCache::secondary_->map",
+ "Store StubCache::primary_->key",
+ "Store StubCache::primary_->value",
+ "Store StubCache::primary_->map",
+ "Store StubCache::secondary_->key",
+ "Store StubCache::secondary_->value",
+ "Store StubCache::secondary_->map",
+};
+#undef ADD_EXT_REF_NAME
+#undef ADD_BUILTIN_NAME
+#undef ADD_RUNTIME_FUNCTION
+#undef ADD_ISOLATE_ADDR
+#undef ADD_ACCESSOR_INFO_NAME
+#undef ADD_ACCESSOR_SETTER_NAME
+
// Forward declarations for C++ builtins.
#define FORWARD_DECLARE(Name) \
- Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+ Address Builtin_##Name(int argc, Address* args, Isolate* isolate);
BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
@@ -28,7 +73,7 @@ void ExternalReferenceTable::Init(Isolate* isolate) {
int index = 0;
// kNullAddress is preserved through serialization/deserialization.
- Add(kNullAddress, "nullptr", &index);
+ Add(kNullAddress, &index);
AddReferences(isolate, &index);
AddBuiltins(&index);
AddRuntimeFunctions(&index);
@@ -54,21 +99,20 @@ const char* ExternalReferenceTable::ResolveSymbol(void* address) {
#endif // SYMBOLIZE_FUNCTION
}
-void ExternalReferenceTable::Add(Address address, const char* name,
- int* index) {
- refs_[(*index)++] = {address, name};
+void ExternalReferenceTable::Add(Address address, int* index) {
+ ref_addr_[(*index)++] = address;
}
void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
CHECK_EQ(kSpecialReferenceCount, *index);
#define ADD_EXTERNAL_REFERENCE(name, desc) \
- Add(ExternalReference::name().address(), desc, index);
+ Add(ExternalReference::name().address(), index);
EXTERNAL_REFERENCE_LIST(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
#define ADD_EXTERNAL_REFERENCE(name, desc) \
- Add(ExternalReference::name(isolate).address(), desc, index);
+ Add(ExternalReference::name(isolate).address(), index);
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
@@ -78,18 +122,13 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
void ExternalReferenceTable::AddBuiltins(int* index) {
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
- struct CBuiltinEntry {
- Address address;
- const char* name;
- };
- static const CBuiltinEntry c_builtins[] = {
-#define DEF_ENTRY(Name, ...) {FUNCTION_ADDR(&Builtin_##Name), "Builtin_" #Name},
+ static const Address c_builtins[] = {
+#define DEF_ENTRY(Name, ...) FUNCTION_ADDR(&Builtin_##Name),
BUILTIN_LIST_C(DEF_ENTRY)
#undef DEF_ENTRY
};
- for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
- Add(ExternalReference::Create(c_builtins[i].address).address(),
- c_builtins[i].name, index);
+ for (Address addr : c_builtins) {
+ Add(ExternalReference::Create(addr).address(), index);
}
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
@@ -102,20 +141,14 @@ void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
kBuiltinsReferenceCount,
*index);
- struct RuntimeEntry {
- Runtime::FunctionId id;
- const char* name;
- };
-
- static const RuntimeEntry runtime_functions[] = {
-#define RUNTIME_ENTRY(name, i1, i2) {Runtime::k##name, "Runtime::" #name},
+ static constexpr Runtime::FunctionId runtime_functions[] = {
+#define RUNTIME_ENTRY(name, ...) Runtime::k##name,
FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
};
- for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
- ExternalReference ref = ExternalReference::Create(runtime_functions[i].id);
- Add(ref.address(), runtime_functions[i].name, index);
+ for (Runtime::FunctionId fId : runtime_functions) {
+ Add(ExternalReference::Create(fId).address(), index);
}
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
@@ -128,16 +161,8 @@ void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate, int* index) {
kBuiltinsReferenceCount + kRuntimeReferenceCount,
*index);
- // Top addresses
- static const char* address_names[] = {
-#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
- FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) nullptr
-#undef BUILD_NAME_LITERAL
- };
-
for (int i = 0; i < IsolateAddressId::kIsolateAddressCount; ++i) {
- Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)),
- address_names[i], index);
+ Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)), index);
}
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
@@ -152,32 +177,20 @@ void ExternalReferenceTable::AddAccessors(int* index) {
kIsolateAddressReferenceCount,
*index);
- // Accessors
- struct AccessorRefTable {
- Address address;
- const char* name;
- };
-
- static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(_, accessor_name, AccessorName, ...) \
- {FUNCTION_ADDR(&Accessors::AccessorName##Getter), \
- "Accessors::" #AccessorName "Getter"}, /* NOLINT(whitespace/indent) */
+ static const Address accessors[] = {
+ // Getters:
+#define ACCESSOR_INFO_DECLARATION(_, __, AccessorName, ...) \
+ FUNCTION_ADDR(&Accessors::AccessorName##Getter),
ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_DECLARATION, /* not used */)
#undef ACCESSOR_INFO_DECLARATION
- };
- static const AccessorRefTable setters[] = {
-#define ACCESSOR_SETTER_DECLARATION(name) \
- { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
- ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+ // Setters:
+#define ACCESSOR_SETTER_DECLARATION(name) FUNCTION_ADDR(&Accessors::name),
+ ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_SETTER_DECLARATION
};
- for (unsigned i = 0; i < arraysize(getters); ++i) {
- Add(getters[i].address, getters[i].name, index);
- }
-
- for (unsigned i = 0; i < arraysize(setters); ++i) {
- Add(setters[i].address, setters[i].name, index);
+ for (Address addr : accessors) {
+ Add(addr, index);
}
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
@@ -195,34 +208,23 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
StubCache* load_stub_cache = isolate->load_stub_cache();
// Stub cache tables
- Add(load_stub_cache->key_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->key", index);
- Add(load_stub_cache->value_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->value", index);
- Add(load_stub_cache->map_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->map", index);
- Add(load_stub_cache->key_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->key", index);
- Add(load_stub_cache->value_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->value", index);
- Add(load_stub_cache->map_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->map", index);
+ Add(load_stub_cache->key_reference(StubCache::kPrimary).address(), index);
+ Add(load_stub_cache->value_reference(StubCache::kPrimary).address(), index);
+ Add(load_stub_cache->map_reference(StubCache::kPrimary).address(), index);
+ Add(load_stub_cache->key_reference(StubCache::kSecondary).address(), index);
+ Add(load_stub_cache->value_reference(StubCache::kSecondary).address(), index);
+ Add(load_stub_cache->map_reference(StubCache::kSecondary).address(), index);
StubCache* store_stub_cache = isolate->store_stub_cache();
// Stub cache tables
- Add(store_stub_cache->key_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->key", index);
- Add(store_stub_cache->value_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->value", index);
- Add(store_stub_cache->map_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->map", index);
- Add(store_stub_cache->key_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->key", index);
+ Add(store_stub_cache->key_reference(StubCache::kPrimary).address(), index);
+ Add(store_stub_cache->value_reference(StubCache::kPrimary).address(), index);
+ Add(store_stub_cache->map_reference(StubCache::kPrimary).address(), index);
+ Add(store_stub_cache->key_reference(StubCache::kSecondary).address(), index);
Add(store_stub_cache->value_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->value", index);
- Add(store_stub_cache->map_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->map", index);
+ index);
+ Add(store_stub_cache->map_reference(StubCache::kSecondary).address(), index);
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 4fa1088ab7..cee764311d 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -42,52 +42,34 @@ class ExternalReferenceTable {
kBuiltinsReferenceCount + kRuntimeReferenceCount +
kIsolateAddressReferenceCount + kAccessorReferenceCount +
kStubCacheReferenceCount;
+ static constexpr uint32_t kEntrySize =
+ static_cast<uint32_t>(kSystemPointerSize);
+ static constexpr uint32_t kSizeInBytes = kSize * kEntrySize + 2 * kUInt32Size;
- static constexpr uint32_t size() { return static_cast<uint32_t>(kSize); }
- Address address(uint32_t i) { return refs_[i].address; }
- const char* name(uint32_t i) { return refs_[i].name; }
+ Address address(uint32_t i) const { return ref_addr_[i]; }
+ const char* name(uint32_t i) const { return ref_name_[i]; }
bool is_initialized() const { return is_initialized_ != 0; }
static const char* ResolveSymbol(void* address);
- static constexpr uint32_t EntrySize() {
- return sizeof(ExternalReferenceEntry);
- }
-
static constexpr uint32_t OffsetOfEntry(uint32_t i) {
// Used in CodeAssembler::LookupExternalReference.
- STATIC_ASSERT(offsetof(ExternalReferenceEntry, address) == 0);
- return i * EntrySize();
+ return i * kEntrySize;
}
const char* NameFromOffset(uint32_t offset) {
- DCHECK_EQ(offset % EntrySize(), 0);
- DCHECK_LT(offset, SizeInBytes());
- int index = offset / EntrySize();
+ DCHECK_EQ(offset % kEntrySize, 0);
+ DCHECK_LT(offset, kSizeInBytes);
+ int index = offset / kEntrySize;
return name(index);
}
- static constexpr uint32_t SizeInBytes() {
- STATIC_ASSERT(OffsetOfEntry(size()) + 2 * kUInt32Size ==
- sizeof(ExternalReferenceTable));
- return OffsetOfEntry(size()) + 2 * kUInt32Size;
- }
-
ExternalReferenceTable() = default;
void Init(Isolate* isolate);
private:
- struct ExternalReferenceEntry {
- Address address;
- const char* name;
-
- ExternalReferenceEntry() : address(kNullAddress), name(nullptr) {}
- ExternalReferenceEntry(Address address, const char* name)
- : address(address), name(name) {}
- };
-
- void Add(Address address, const char* name, int* index);
+ void Add(Address address, int* index);
void AddReferences(Isolate* isolate, int* index);
void AddBuiltins(int* index);
@@ -96,13 +78,19 @@ class ExternalReferenceTable {
void AddAccessors(int* index);
void AddStubCache(Isolate* isolate, int* index);
- ExternalReferenceEntry refs_[kSize];
+ STATIC_ASSERT(sizeof(Address) == kEntrySize);
+ Address ref_addr_[kSize];
+ static const char* const ref_name_[kSize];
+
uint32_t is_initialized_ = 0; // Not bool to guarantee deterministic size.
uint32_t unused_padding_ = 0; // For alignment.
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
+STATIC_ASSERT(ExternalReferenceTable::kSizeInBytes ==
+ sizeof(ExternalReferenceTable));
+
} // namespace internal
} // namespace v8
#endif // V8_EXTERNAL_REFERENCE_TABLE_H_
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
index 806fbb8af5..78d4127758 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/external-reference.cc
@@ -6,9 +6,10 @@
#include "src/api.h"
#include "src/base/ieee754.h"
-#include "src/codegen.h"
#include "src/compiler/code-assembler.h"
#include "src/counters.h"
+#include "src/cpu-features.h"
+#include "src/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -16,7 +17,9 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
+#include "src/log.h"
#include "src/math-random.h"
+#include "src/microtask-queue.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator-base.h"
@@ -47,7 +50,7 @@
#endif // V8_INTERPRETED_REGEXP
#ifdef V8_INTL_SUPPORT
-#include "src/intl.h"
+#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
namespace v8 {
@@ -62,27 +65,27 @@ constexpr uint64_t double_the_hole_nan_constant = kHoleNanInt64;
constexpr double double_uint32_bias_constant =
static_cast<double>(kMaxUInt32) + 1;
-constexpr struct V8_ALIGNED(16) {
+constexpr struct alignas(16) {
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
-constexpr struct V8_ALIGNED(16) {
+constexpr struct alignas(16) {
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
-constexpr struct V8_ALIGNED(16) {
+constexpr struct alignas(16) {
uint64_t a;
uint64_t b;
} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
uint64_t{0x7FFFFFFFFFFFFFFF}};
-constexpr struct V8_ALIGNED(16) {
+constexpr struct alignas(16) {
uint64_t a;
uint64_t b;
} double_negate_constant = {uint64_t{0x8000000000000000},
@@ -146,6 +149,14 @@ ExternalReference ExternalReference::interpreter_dispatch_counters(
isolate->interpreter()->bytecode_dispatch_counters_table());
}
+ExternalReference
+ExternalReference::address_of_interpreter_entry_trampoline_instruction_start(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->interpreter()
+ ->address_of_interpreter_entry_trampoline_instruction_start());
+}
+
ExternalReference ExternalReference::bytecode_size_table_address() {
return ExternalReference(
interpreter::Bytecodes::bytecode_size_table_address());
@@ -168,25 +179,70 @@ ExternalReference ExternalReference::Create(const SCTableReference& table_ref) {
return ExternalReference(table_ref.address());
}
-ExternalReference
-ExternalReference::incremental_marking_record_write_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+namespace {
+
+// Helper function to verify that all types in a list of types are scalar.
+// This includes primitive types (int, Address) and pointer types. We also
+// allow void.
+template <typename T>
+constexpr bool AllScalar() {
+ return std::is_scalar<T>::value || std::is_void<T>::value;
}
+template <typename T1, typename T2, typename... Rest>
+constexpr bool AllScalar() {
+ return AllScalar<T1>() && AllScalar<T2, Rest...>();
+}
+
+// Checks a function pointer's type for compatibility with the
+// ExternalReference calling mechanism. Specifically, all arguments
+// as well as the result type must pass the AllScalar check above,
+// because we expect each item to fit into one register or stack slot.
+template <typename T>
+struct IsValidExternalReferenceType;
+
+template <typename Result, typename... Args>
+struct IsValidExternalReferenceType<Result (*)(Args...)> {
+ static const bool value = AllScalar<Result, Args...>();
+};
+
+template <typename Result, typename Class, typename... Args>
+struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
+ static const bool value = AllScalar<Result, Args...>();
+};
+
+} // namespace
+
+#define FUNCTION_REFERENCE(Name, Target) \
+ ExternalReference ExternalReference::Name() { \
+ STATIC_ASSERT(IsValidExternalReferenceType<decltype(&Target)>::value); \
+ return ExternalReference(Redirect(FUNCTION_ADDR(Target))); \
+ }
+
+#define FUNCTION_REFERENCE_WITH_ISOLATE(Name, Target) \
+ ExternalReference ExternalReference::Name(Isolate* isolate) { \
+ STATIC_ASSERT(IsValidExternalReferenceType<decltype(&Target)>::value); \
+ return ExternalReference(Redirect(FUNCTION_ADDR(Target))); \
+ }
+
+#define FUNCTION_REFERENCE_WITH_TYPE(Name, Target, Type) \
+ ExternalReference ExternalReference::Name() { \
+ STATIC_ASSERT(IsValidExternalReferenceType<decltype(&Target)>::value); \
+ return ExternalReference(Redirect(FUNCTION_ADDR(Target), Type)); \
+ }
+
+FUNCTION_REFERENCE(incremental_marking_record_write_function,
+ IncrementalMarking::RecordWriteFromCode);
+
ExternalReference ExternalReference::store_buffer_overflow_function() {
return ExternalReference(
Redirect(Heap::store_buffer_overflow_function_address()));
}
-ExternalReference ExternalReference::delete_handle_scope_extensions() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(HandleScope::DeleteExtensions)));
-}
+FUNCTION_REFERENCE(delete_handle_scope_extensions,
+ HandleScope::DeleteExtensions)
-ExternalReference ExternalReference::get_date_field_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(JSDate::GetField)));
-}
+FUNCTION_REFERENCE(get_date_field_function, JSDate::GetField)
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
@@ -216,149 +272,55 @@ ExternalReference ExternalReference::force_slow_path(Isolate* isolate) {
return ExternalReference(isolate->force_slow_path_address());
}
-ExternalReference ExternalReference::new_deoptimizer_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Deoptimizer::New)));
-}
-
-ExternalReference ExternalReference::compute_output_frames_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
-}
-
-ExternalReference ExternalReference::wasm_f32_trunc() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_floor() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f32_floor_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_ceil() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_nearest_int() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_trunc() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_floor() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f64_floor_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_ceil() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_nearest_int() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_to_float32() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_to_float32() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_to_float64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_to_float64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float32_to_int64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float32_to_uint64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float64_to_int64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float64_to_uint64() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_div() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::int64_div_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_mod() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::int64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_div() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::uint64_div_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_mod() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_ctz() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::word32_ctz_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word64_ctz() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::word64_ctz_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_popcnt() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::word32_popcnt_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word64_popcnt() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_rol() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::word32_rol_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_ror() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::word32_ror_wrapper)));
-}
+FUNCTION_REFERENCE(new_deoptimizer_function, Deoptimizer::New)
+
+FUNCTION_REFERENCE(compute_output_frames_function,
+ Deoptimizer::ComputeOutputFrames)
+
+FUNCTION_REFERENCE(wasm_f32_trunc, wasm::f32_trunc_wrapper)
+FUNCTION_REFERENCE(wasm_f32_floor, wasm::f32_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f32_ceil, wasm::f32_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f32_nearest_int, wasm::f32_nearest_int_wrapper)
+FUNCTION_REFERENCE(wasm_f64_trunc, wasm::f64_trunc_wrapper)
+FUNCTION_REFERENCE(wasm_f64_floor, wasm::f64_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f64_ceil, wasm::f64_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f64_nearest_int, wasm::f64_nearest_int_wrapper)
+FUNCTION_REFERENCE(wasm_int64_to_float32, wasm::int64_to_float32_wrapper)
+FUNCTION_REFERENCE(wasm_uint64_to_float32, wasm::uint64_to_float32_wrapper)
+FUNCTION_REFERENCE(wasm_int64_to_float64, wasm::int64_to_float64_wrapper)
+FUNCTION_REFERENCE(wasm_uint64_to_float64, wasm::uint64_to_float64_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_int64, wasm::float32_to_int64_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_uint64, wasm::float32_to_uint64_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_int64, wasm::float64_to_int64_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_uint64, wasm::float64_to_uint64_wrapper)
+FUNCTION_REFERENCE(wasm_int64_div, wasm::int64_div_wrapper)
+FUNCTION_REFERENCE(wasm_int64_mod, wasm::int64_mod_wrapper)
+FUNCTION_REFERENCE(wasm_uint64_div, wasm::uint64_div_wrapper)
+FUNCTION_REFERENCE(wasm_uint64_mod, wasm::uint64_mod_wrapper)
+FUNCTION_REFERENCE(wasm_word32_ctz, wasm::word32_ctz_wrapper)
+FUNCTION_REFERENCE(wasm_word64_ctz, wasm::word64_ctz_wrapper)
+FUNCTION_REFERENCE(wasm_word32_popcnt, wasm::word32_popcnt_wrapper)
+FUNCTION_REFERENCE(wasm_word64_popcnt, wasm::word64_popcnt_wrapper)
+FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
+FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
+FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
+FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
WriteUnalignedValue(data, base::ieee754::acos(input));
}
-ExternalReference ExternalReference::f64_acos_wrapper_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(f64_acos_wrapper)));
-}
+FUNCTION_REFERENCE(f64_acos_wrapper_function, f64_acos_wrapper)
static void f64_asin_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
WriteUnalignedValue<double>(data, base::ieee754::asin(input));
}
-ExternalReference ExternalReference::f64_asin_wrapper_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(f64_asin_wrapper)));
-}
+FUNCTION_REFERENCE(f64_asin_wrapper_function, f64_asin_wrapper)
-ExternalReference ExternalReference::wasm_float64_pow() {
- return ExternalReference(Redirect(FUNCTION_ADDR(wasm::float64_pow_wrapper)));
-}
+FUNCTION_REFERENCE(wasm_float64_pow, wasm::float64_pow_wrapper)
static void f64_mod_wrapper(Address data) {
double dividend = ReadUnalignedValue<double>(data);
@@ -366,25 +328,16 @@ static void f64_mod_wrapper(Address data) {
WriteUnalignedValue<double>(data, Modulo(dividend, divisor));
}
-ExternalReference ExternalReference::f64_mod_wrapper_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(f64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_call_trap_callback_for_testing() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
-}
+FUNCTION_REFERENCE(f64_mod_wrapper_function, f64_mod_wrapper)
-ExternalReference ExternalReference::log_enter_external_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Logger::EnterExternal)));
-}
+FUNCTION_REFERENCE(wasm_call_trap_callback_for_testing,
+ wasm::call_trap_callback_for_testing)
-ExternalReference ExternalReference::log_leave_external_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Logger::LeaveExternal)));
-}
+FUNCTION_REFERENCE(log_enter_external_function, Logger::EnterExternal)
+FUNCTION_REFERENCE(log_leave_external_function, Logger::LeaveExternal)
-ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->roots_array_start());
+ExternalReference ExternalReference::isolate_root(Isolate* isolate) {
+ return ExternalReference(isolate->isolate_root());
}
ExternalReference ExternalReference::allocation_sites_list_address(
@@ -455,9 +408,7 @@ ExternalReference ExternalReference::address_of_pending_message_obj(
return ExternalReference(isolate->pending_message_obj_address());
}
-ExternalReference ExternalReference::abort_with_reason() {
- return ExternalReference(Redirect(FUNCTION_ADDR(i::abort_with_reason)));
-}
+FUNCTION_REFERENCE(abort_with_reason, i::abort_with_reason)
ExternalReference
ExternalReference::address_of_harmony_await_optimization_flag() {
@@ -524,41 +475,35 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#ifndef V8_INTERPRETED_REGEXP
-ExternalReference ExternalReference::re_check_stack_guard_state(
- Isolate* isolate) {
- Address function;
#if V8_TARGET_ARCH_X64
- function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerX64::CheckStackGuardState
#elif V8_TARGET_ARCH_IA32
- function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerIA32::CheckStackGuardState
#elif V8_TARGET_ARCH_ARM64
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerARM64::CheckStackGuardState
#elif V8_TARGET_ARCH_ARM
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState
#elif V8_TARGET_ARCH_PPC
- function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
- function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
+#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#else
UNREACHABLE();
#endif
- return ExternalReference(Redirect(function));
-}
-ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
-}
+FUNCTION_REFERENCE_WITH_ISOLATE(re_check_stack_guard_state, re_stack_check_func)
+#undef re_stack_check_func
-ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
-}
+FUNCTION_REFERENCE_WITH_ISOLATE(re_grow_stack,
+ NativeRegExpMacroAssembler::GrowStack)
+
+FUNCTION_REFERENCE_WITH_ISOLATE(
+ re_case_insensitive_compare_uc16,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)
ExternalReference ExternalReference::re_word_character_map(Isolate* isolate) {
return ExternalReference(
@@ -588,146 +533,77 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
#endif // V8_INTERPRETED_REGEXP
-ExternalReference ExternalReference::ieee754_acos_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::acos), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_acosh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::acosh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_asin_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::asin), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_asinh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::asinh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atan_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::atan), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atanh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::atanh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atan2_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::atan2), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cbrt_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::cbrt), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cos_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::cos), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cosh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::cosh), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_exp_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::exp), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_expm1_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::expm1), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::log), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log1p_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::log1p), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log10_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::log10), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log2_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::log2), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_sin_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::sin), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_sinh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::sinh), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_tan_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::tan), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_tanh_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
-}
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_log10_function, base::ieee754::log10,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_log2_function, base::ieee754::log2,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_sin_function, base::ieee754::sin,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_sinh_function, base::ieee754::sinh,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_tan_function, base::ieee754::tan,
+ BUILTIN_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(ieee754_tanh_function, base::ieee754::tanh,
+ BUILTIN_FP_CALL)
void* libc_memchr(void* string, int character, size_t search_length) {
return memchr(string, character, search_length);
}
-ExternalReference ExternalReference::libc_memchr_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(libc_memchr)));
-}
+FUNCTION_REFERENCE(libc_memchr_function, libc_memchr)
void* libc_memcpy(void* dest, const void* src, size_t n) {
return memcpy(dest, src, n);
}
-ExternalReference ExternalReference::libc_memcpy_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(libc_memcpy)));
-}
+FUNCTION_REFERENCE(libc_memcpy_function, libc_memcpy)
void* libc_memmove(void* dest, const void* src, size_t n) {
return memmove(dest, src, n);
}
-ExternalReference ExternalReference::libc_memmove_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(libc_memmove)));
-}
+FUNCTION_REFERENCE(libc_memmove_function, libc_memmove)
void* libc_memset(void* dest, int value, size_t n) {
DCHECK_EQ(static_cast<byte>(value), value);
return memset(dest, value, n);
}
-ExternalReference ExternalReference::libc_memset_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(libc_memset)));
-}
+FUNCTION_REFERENCE(libc_memset_function, libc_memset)
ExternalReference ExternalReference::printf_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(std::printf)));
}
-ExternalReference ExternalReference::refill_math_random() {
- return ExternalReference(Redirect(FUNCTION_ADDR(MathRandom::RefillCache)));
-}
+FUNCTION_REFERENCE(refill_math_random, MathRandom::RefillCache)
template <typename SubjectChar, typename PatternChar>
ExternalReference ExternalReference::search_string_raw() {
@@ -735,6 +611,9 @@ ExternalReference ExternalReference::search_string_raw() {
return ExternalReference(Redirect(FUNCTION_ADDR(f)));
}
+FUNCTION_REFERENCE(jsarray_array_join_concat_to_sequential_string,
+ JSArray::ArrayJoinConcatToSequentialString)
+
ExternalReference ExternalReference::search_string_raw_one_one() {
return search_string_raw<const uint8_t, const uint8_t>();
}
@@ -751,69 +630,60 @@ ExternalReference ExternalReference::search_string_raw_two_two() {
return search_string_raw<const uc16, const uc16>();
}
-ExternalReference ExternalReference::orderedhashmap_gethash_raw() {
- auto f = OrderedHashMap::GetHash;
- return ExternalReference(Redirect(FUNCTION_ADDR(f)));
-}
+FUNCTION_REFERENCE(orderedhashmap_gethash_raw, OrderedHashMap::GetHash)
-ExternalReference ExternalReference::get_or_create_hash_raw() {
- typedef Smi* (*GetOrCreateHash)(Isolate * isolate, Object * key);
- GetOrCreateHash f = Object::GetOrCreateHash;
- return ExternalReference(Redirect(FUNCTION_ADDR(f)));
+Address GetOrCreateHash(Isolate* isolate, Address raw_key) {
+ DisallowHeapAllocation no_gc;
+ return Object(raw_key)->GetOrCreateHash(isolate).ptr();
}
-ExternalReference ExternalReference::jsreceiver_create_identity_hash() {
- typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
- CreateIdentityHash f = JSReceiver::CreateIdentityHash;
- return ExternalReference(Redirect(FUNCTION_ADDR(f)));
+FUNCTION_REFERENCE(get_or_create_hash_raw, GetOrCreateHash)
+
+static Address JSReceiverCreateIdentityHash(Isolate* isolate, Address raw_key) {
+ JSReceiver key = JSReceiver::cast(Object(raw_key));
+ return JSReceiver::CreateIdentityHash(isolate, key).ptr();
}
+FUNCTION_REFERENCE(jsreceiver_create_identity_hash,
+ JSReceiverCreateIdentityHash)
+
static uint32_t ComputeSeededIntegerHash(Isolate* isolate, uint32_t key) {
DisallowHeapAllocation no_gc;
return ComputeSeededHash(key, isolate->heap()->HashSeed());
}
-ExternalReference ExternalReference::compute_integer_hash() {
- return ExternalReference(Redirect(FUNCTION_ADDR(ComputeSeededIntegerHash)));
-}
+FUNCTION_REFERENCE(compute_integer_hash, ComputeSeededIntegerHash)
+FUNCTION_REFERENCE(copy_fast_number_jsarray_elements_to_typed_array,
+ CopyFastNumberJSArrayElementsToTypedArray)
+FUNCTION_REFERENCE(copy_typed_array_elements_to_typed_array,
+ CopyTypedArrayElementsToTypedArray)
+FUNCTION_REFERENCE(copy_typed_array_elements_slice, CopyTypedArrayElementsSlice)
+FUNCTION_REFERENCE(try_internalize_string_function,
+ StringTable::LookupStringIfExists_NoAllocate)
-ExternalReference
-ExternalReference::copy_fast_number_jsarray_elements_to_typed_array() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(CopyFastNumberJSArrayElementsToTypedArray)));
+static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x,
+ Address smi_y) {
+ Smi x(smi_x);
+ Smi y(smi_y);
+ return Smi::LexicographicCompare(isolate, x, y);
}
-ExternalReference
-ExternalReference::copy_typed_array_elements_to_typed_array() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
-}
+FUNCTION_REFERENCE(smi_lexicographic_compare_function,
+ LexicographicCompareWrapper)
-ExternalReference ExternalReference::copy_typed_array_elements_slice() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
-}
-
-ExternalReference ExternalReference::try_internalize_string_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
-}
-
-ExternalReference ExternalReference::smi_lexicographic_compare_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Smi::LexicographicCompare)));
-}
-
-ExternalReference ExternalReference::check_object_type() {
- return ExternalReference(Redirect(FUNCTION_ADDR(CheckObjectType)));
-}
+FUNCTION_REFERENCE(check_object_type, CheckObjectType)
#ifdef V8_INTL_SUPPORT
-ExternalReference ExternalReference::intl_convert_one_byte_to_lower() {
- return ExternalReference(Redirect(FUNCTION_ADDR(ConvertOneByteToLower)));
+
+static Address ConvertOneByteToLower(Address raw_src, Address raw_dst) {
+ String src = String::cast(Object(raw_src));
+ String dst = String::cast(Object(raw_dst));
+ return Intl::ConvertOneByteToLower(src, dst).ptr();
}
+FUNCTION_REFERENCE(intl_convert_one_byte_to_lower, ConvertOneByteToLower)
ExternalReference ExternalReference::intl_to_latin1_lower_table() {
- uint8_t* ptr = const_cast<uint8_t*>(ToLatin1LowerTable());
+ uint8_t* ptr = const_cast<uint8_t*>(Intl::ToLatin1LowerTable());
return ExternalReference(reinterpret_cast<Address>(ptr));
}
#endif // V8_INTL_SUPPORT
@@ -858,6 +728,14 @@ ExternalReference::promise_hook_or_async_event_delegate_address(
isolate->promise_hook_or_async_event_delegate_address());
}
+ExternalReference ExternalReference::
+ promise_hook_or_debug_is_active_or_async_event_delegate_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate
+ ->promise_hook_or_debug_is_active_or_async_event_delegate_address());
+}
+
ExternalReference ExternalReference::debug_execution_mode_address(
Isolate* isolate) {
return ExternalReference(isolate->debug_execution_mode_address());
@@ -878,45 +756,13 @@ ExternalReference ExternalReference::runtime_function_table_address(
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
-ExternalReference ExternalReference::invalidate_prototype_chains_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
+static Address InvalidatePrototypeChainsWrapper(Address raw_map) {
+ Map map = Map::cast(Object(raw_map));
+ return JSObject::InvalidatePrototypeChains(map).ptr();
}
-double power_helper(double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1 if exponent is 0.
- }
- if (y == 0.5) {
- lazily_initialize_fast_sqrt();
- return (std::isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0); // Convert -0 to +0.
- }
- if (y == -0.5) {
- lazily_initialize_fast_sqrt();
- return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
- }
- return power_double_double(x, y);
-}
-
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-double power_double_int(double x, int y) {
- double m = (y < 0) ? 1 / x : x;
- unsigned n = (y < 0) ? -y : y;
- double p = 1;
- while (n != 0) {
- if ((n & 1) != 0) p *= m;
- m *= m;
- if ((n & 2) != 0) p *= m;
- m *= m;
- n >>= 2;
- }
- return p;
-}
+FUNCTION_REFERENCE(invalidate_prototype_chains_function,
+ InvalidatePrototypeChainsWrapper)
double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
@@ -929,15 +775,10 @@ double power_double_double(double x, double y) {
double modulo_double_double(double x, double y) { return Modulo(x, y); }
-ExternalReference ExternalReference::power_double_double_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(power_double_double), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::mod_two_doubles_operation() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
-}
+FUNCTION_REFERENCE_WITH_TYPE(power_double_double_function, power_double_double,
+ BUILTIN_FP_FP_CALL)
+FUNCTION_REFERENCE_WITH_TYPE(mod_two_doubles_operation, modulo_double_double,
+ BUILTIN_FP_FP_CALL)
ExternalReference ExternalReference::debug_suspended_generator_address(
Isolate* isolate) {
@@ -949,10 +790,16 @@ ExternalReference ExternalReference::debug_restart_fp_address(
return ExternalReference(isolate->debug()->restart_fp_address());
}
-ExternalReference ExternalReference::wasm_thread_in_wasm_flag_address_address(
+ExternalReference ExternalReference::fast_c_call_caller_fp_address(
Isolate* isolate) {
- return ExternalReference(reinterpret_cast<Address>(
- &isolate->thread_local_top()->thread_in_wasm_flag_address_));
+ return ExternalReference(
+ isolate->isolate_data()->fast_c_call_caller_fp_address());
+}
+
+ExternalReference ExternalReference::fast_c_call_caller_pc_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->isolate_data()->fast_c_call_caller_pc_address());
}
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
@@ -960,6 +807,98 @@ ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
}
+FUNCTION_REFERENCE(call_enqueue_microtask_function,
+ MicrotaskQueue::CallEnqueueMicrotask)
+
+static int64_t atomic_pair_load(intptr_t address) {
+ return std::atomic_load(reinterpret_cast<std::atomic<int64_t>*>(address));
+}
+
+ExternalReference ExternalReference::atomic_pair_load_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_load)));
+}
+
+static void atomic_pair_store(intptr_t address, int value_low, int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ std::atomic_store(reinterpret_cast<std::atomic<int64_t>*>(address), value);
+}
+
+ExternalReference ExternalReference::atomic_pair_store_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_store)));
+}
+
+static int64_t atomic_pair_add(intptr_t address, int value_low,
+ int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_fetch_add(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_add_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_add)));
+}
+
+static int64_t atomic_pair_sub(intptr_t address, int value_low,
+ int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_fetch_sub(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_sub_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_sub)));
+}
+
+static int64_t atomic_pair_and(intptr_t address, int value_low,
+ int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_fetch_and(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_and_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_and)));
+}
+
+static int64_t atomic_pair_or(intptr_t address, int value_low, int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_fetch_or(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_or_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_or)));
+}
+
+static int64_t atomic_pair_xor(intptr_t address, int value_low,
+ int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_fetch_xor(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_xor_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_xor)));
+}
+
+static int64_t atomic_pair_exchange(intptr_t address, int value_low,
+ int value_high) {
+ int64_t value =
+ static_cast<int64_t>(value_high) << 32 | (value_low & 0xFFFFFFFF);
+ return std::atomic_exchange(reinterpret_cast<std::atomic<int64_t>*>(address),
+ value);
+}
+
+ExternalReference ExternalReference::atomic_pair_exchange_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(atomic_pair_exchange)));
+}
+
static uint64_t atomic_pair_compare_exchange(intptr_t address,
int old_value_low,
int old_value_high,
@@ -974,11 +913,18 @@ static uint64_t atomic_pair_compare_exchange(intptr_t address,
return old_value;
}
-ExternalReference ExternalReference::atomic_pair_compare_exchange_function() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(atomic_pair_compare_exchange)));
+FUNCTION_REFERENCE(atomic_pair_compare_exchange_function,
+ atomic_pair_compare_exchange)
+
+static int EnterMicrotaskContextWrapper(HandleScopeImplementer* hsi,
+ Address raw_context) {
+ Context context = Context::cast(Object(raw_context));
+ hsi->EnterMicrotaskContext(context);
+ return 0;
}
+FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper);
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
@@ -1009,5 +955,9 @@ void abort_with_reason(int reason) {
UNREACHABLE();
}
+#undef FUNCTION_REFERENCE
+#undef FUNCTION_REFERENCE_WITH_ISOLATE
+#undef FUNCTION_REFERENCE_WITH_TYPE
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
index eb4b235cb0..5f8d045cf3 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/external-reference.h
@@ -27,12 +27,14 @@ class StatsCounter;
V(builtins_address, "builtins") \
V(handle_scope_implementer_address, \
"Isolate::handle_scope_implementer_address") \
+ V(address_of_interpreter_entry_trampoline_instruction_start, \
+ "Address of the InterpreterEntryTrampoline instruction start") \
V(interpreter_dispatch_counters, "Interpreter::dispatch_counters") \
V(interpreter_dispatch_table_address, "Interpreter::dispatch_table_address") \
V(date_cache_stamp, "date_cache_stamp") \
V(stress_deopt_count, "Isolate::stress_deopt_count_address()") \
V(force_slow_path, "Isolate::force_slow_path_address()") \
- V(roots_array_start, "Heap::roots_array_start()") \
+ V(isolate_root, "Isolate::isolate_root()") \
V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \
V(address_of_stack_limit, "StackGuard::address_of_jslimit()") \
V(address_of_real_stack_limit, "StackGuard::address_of_real_jslimit()") \
@@ -53,6 +55,9 @@ class StatsCounter;
V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
V(promise_hook_or_async_event_delegate_address, \
"Isolate::promise_hook_or_async_event_delegate_address()") \
+ V(promise_hook_or_debug_is_active_or_async_event_delegate_address, \
+ "Isolate::promise_hook_or_debug_is_active_or_async_event_delegate_" \
+ "address()") \
V(debug_execution_mode_address, "Isolate::debug_execution_mode_address()") \
V(debug_is_active_address, "Debug::is_active_address()") \
V(debug_hook_on_function_call_address, \
@@ -63,8 +68,10 @@ class StatsCounter;
V(debug_suspended_generator_address, \
"Debug::step_suspended_generator_address()") \
V(debug_restart_fp_address, "Debug::restart_fp_address()") \
- V(wasm_thread_in_wasm_flag_address_address, \
- "&Isolate::thread_in_wasm_flag_address") \
+ V(fast_c_call_caller_fp_address, \
+ "IsolateData::fast_c_call_caller_fp_address") \
+ V(fast_c_call_caller_pc_address, \
+ "IsolateData::fast_c_call_caller_pc_address") \
EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V)
#define EXTERNAL_REFERENCE_LIST(V) \
@@ -101,18 +108,18 @@ class StatsCounter;
V(ieee754_acosh_function, "base::ieee754::acosh") \
V(ieee754_asin_function, "base::ieee754::asin") \
V(ieee754_asinh_function, "base::ieee754::asinh") \
- V(ieee754_atan2_function, "base::ieee754::atan2") \
V(ieee754_atan_function, "base::ieee754::atan") \
+ V(ieee754_atan2_function, "base::ieee754::atan2") \
V(ieee754_atanh_function, "base::ieee754::atanh") \
V(ieee754_cbrt_function, "base::ieee754::cbrt") \
V(ieee754_cos_function, "base::ieee754::cos") \
V(ieee754_cosh_function, "base::ieee754::cosh") \
V(ieee754_exp_function, "base::ieee754::exp") \
V(ieee754_expm1_function, "base::ieee754::expm1") \
+ V(ieee754_log_function, "base::ieee754::log") \
V(ieee754_log10_function, "base::ieee754::log10") \
V(ieee754_log1p_function, "base::ieee754::log1p") \
V(ieee754_log2_function, "base::ieee754::log2") \
- V(ieee754_log_function, "base::ieee754::log") \
V(ieee754_sin_function, "base::ieee754::sin") \
V(ieee754_sinh_function, "base::ieee754::sinh") \
V(ieee754_tan_function, "base::ieee754::tan") \
@@ -123,6 +130,8 @@ class StatsCounter;
"JSObject::InvalidatePrototypeChains()") \
V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
V(invoke_function_callback, "InvokeFunctionCallback") \
+ V(jsarray_array_join_concat_to_sequential_string, \
+ "jsarray_array_join_concat_to_sequential_string") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
@@ -136,13 +145,13 @@ class StatsCounter;
V(power_double_double_function, "power_double_double_function") \
V(printf_function, "printf") \
V(refill_math_random, "MathRandom::RefillCache") \
- V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
V(search_string_raw_one_one, "search_string_raw_one_one") \
V(search_string_raw_one_two, "search_string_raw_one_two") \
V(search_string_raw_two_one, "search_string_raw_two_one") \
V(search_string_raw_two_two, "search_string_raw_two_two") \
- V(try_internalize_string_function, "try_internalize_string_function") \
V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
+ V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
+ V(try_internalize_string_function, "try_internalize_string_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \
V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
@@ -172,6 +181,18 @@ class StatsCounter;
V(wasm_word32_ror, "wasm::word32_ror") \
V(wasm_word64_ctz, "wasm::word64_ctz") \
V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(wasm_memory_copy, "wasm::memory_copy") \
+ V(wasm_memory_fill, "wasm::memory_fill") \
+ V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
+ V(call_enter_context_function, "call_enter_context_function") \
+ V(atomic_pair_load_function, "atomic_pair_load_function") \
+ V(atomic_pair_store_function, "atomic_pair_store_function") \
+ V(atomic_pair_add_function, "atomic_pair_add_function") \
+ V(atomic_pair_sub_function, "atomic_pair_sub_function") \
+ V(atomic_pair_and_function, "atomic_pair_and_function") \
+ V(atomic_pair_or_function, "atomic_pair_or_function") \
+ V(atomic_pair_xor_function, "atomic_pair_xor_function") \
+ V(atomic_pair_exchange_function, "atomic_pair_exchange_function") \
V(atomic_pair_compare_exchange_function, \
"atomic_pair_compare_exchange_function") \
EXTERNAL_REFERENCE_LIST_INTL(V)
@@ -210,7 +231,7 @@ class ExternalReference {
// Used in the simulator to support different native api calls.
enum Type {
// Builtin call.
- // Object* f(v8::internal::Arguments).
+ // Address f(v8::internal::Arguments).
BUILTIN_CALL, // default
// Builtin call returning object pair.
@@ -280,7 +301,7 @@ class ExternalReference {
#undef DECL_EXTERNAL_REFERENCE
#define DECL_EXTERNAL_REFERENCE(name, desc) \
- static ExternalReference name(Isolate* isolate);
+ static V8_EXPORT_PRIVATE ExternalReference name(Isolate* isolate);
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(DECL_EXTERNAL_REFERENCE)
#undef DECL_EXTERNAL_REFERENCE
@@ -311,6 +332,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
void abort_with_reason(int reason);
+// Computes pow(x, y) with the special cases in the spec for Math.pow.
+double power_double_double(double x, double y);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index c9cdb0a157..bdfc8ee013 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -9,8 +9,10 @@
#include "src/globals.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,6 +20,14 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(FeedbackVector, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject)
+
+NEVER_READ_ONLY_SPACE_IMPL(FeedbackVector)
+
+CAST_ACCESSOR(FeedbackVector)
+CAST_ACCESSOR(FeedbackMetadata)
+
INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const {
@@ -25,12 +35,6 @@ int32_t FeedbackMetadata::synchronized_slot_count() const {
FIELD_ADDR(this, kSlotCountOffset)));
}
-// static
-FeedbackMetadata* FeedbackMetadata::cast(Object* obj) {
- DCHECK(obj->IsFeedbackMetadata());
- return reinterpret_cast<FeedbackMetadata*>(obj);
-}
-
int32_t FeedbackMetadata::get(int index) const {
DCHECK(index >= 0 && index < length());
int offset = kHeaderSize + index * kInt32Size;
@@ -49,12 +53,6 @@ int FeedbackMetadata::length() const {
return FeedbackMetadata::length(slot_count());
}
-// static
-FeedbackVector* FeedbackVector::cast(Object* obj) {
- DCHECK(obj->IsFeedbackVector());
- return reinterpret_cast<FeedbackVector*>(obj);
-}
-
int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
switch (kind) {
case FeedbackSlotKind::kForIn:
@@ -101,7 +99,7 @@ INT32_ACCESSORS(FeedbackVector, deopt_count, kDeoptCountOffset)
bool FeedbackVector::is_empty() const { return length() == 0; }
-FeedbackMetadata* FeedbackVector::metadata() const {
+FeedbackMetadata FeedbackVector::metadata() const {
return shared_function_info()->feedback_metadata();
}
@@ -114,22 +112,22 @@ void FeedbackVector::increment_deopt_count() {
}
}
-Code* FeedbackVector::optimized_code() const {
- MaybeObject* slot = optimized_code_weak_or_smi();
+Code FeedbackVector::optimized_code() const {
+ MaybeObject slot = optimized_code_weak_or_smi();
DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
- HeapObject* heap_object;
- return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : nullptr;
+ HeapObject heap_object;
+ return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
}
OptimizationMarker FeedbackVector::optimization_marker() const {
- MaybeObject* slot = optimized_code_weak_or_smi();
- Smi* value;
+ MaybeObject slot = optimized_code_weak_or_smi();
+ Smi value;
if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
return static_cast<OptimizationMarker>(value->value());
}
bool FeedbackVector::has_optimized_code() const {
- return optimized_code() != nullptr;
+ return !optimized_code().is_null();
}
bool FeedbackVector::has_optimization_marker() const {
@@ -144,41 +142,41 @@ FeedbackSlot FeedbackVector::ToSlot(int index) {
return FeedbackSlot(index);
}
-MaybeObject* FeedbackVector::Get(FeedbackSlot slot) const {
+MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
return get(GetIndex(slot));
}
-MaybeObject* FeedbackVector::get(int index) const {
+MaybeObject FeedbackVector::get(int index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kPointerSize;
- return RELAXED_READ_WEAK_FIELD(this, offset);
+ int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ return RELAXED_READ_WEAK_FIELD(*this, offset);
}
-void FeedbackVector::Set(FeedbackSlot slot, MaybeObject* value,
+void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode) {
set(GetIndex(slot), value, mode);
}
-void FeedbackVector::set(int index, MaybeObject* value, WriteBarrierMode mode) {
+void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
+ int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
}
-void FeedbackVector::Set(FeedbackSlot slot, Object* value,
+void FeedbackVector::Set(FeedbackSlot slot, Object value,
WriteBarrierMode mode) {
set(GetIndex(slot), MaybeObject::FromObject(value), mode);
}
-void FeedbackVector::set(int index, Object* value, WriteBarrierMode mode) {
+void FeedbackVector::set(int index, Object value, WriteBarrierMode mode) {
set(index, MaybeObject::FromObject(value), mode);
}
-inline MaybeObject** FeedbackVector::slots_start() {
- return HeapObject::RawMaybeWeakField(this, kFeedbackSlotsOffset);
+inline MaybeObjectSlot FeedbackVector::slots_start() {
+ return RawMaybeWeakField(kFeedbackSlotsOffset);
}
// Helper function to transform the feedback to BinaryOperationHint.
@@ -225,6 +223,8 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kBigInt;
case CompareOperationFeedback::kReceiver:
return CompareOperationHint::kReceiver;
+ case CompareOperationFeedback::kReceiverOrNullOrUndefined:
+ return CompareOperationHint::kReceiverOrNullOrUndefined;
default:
return CompareOperationHint::kAny;
}
@@ -248,7 +248,7 @@ ForInHint ForInHintFromFeedback(int type_feedback) {
void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count) {
- MaybeObject* megamorphic_sentinel = MaybeObject::FromObject(
+ MaybeObject megamorphic_sentinel = MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(GetIsolate()));
int with = 0;
int gen = 0;
@@ -258,7 +258,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
- MaybeObject* const obj = Get(slot);
+ MaybeObject const obj = Get(slot);
AssertNoLegacyTypes(obj);
switch (kind) {
case FeedbackSlotKind::kCall:
@@ -276,7 +276,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile: {
- HeapObject* heap_object;
+ HeapObject heap_object;
if (obj->IsWeakOrCleared() ||
(obj->GetHeapObjectIfStrong(&heap_object) &&
(heap_object->IsWeakFixedArray() || heap_object->IsString()))) {
@@ -289,7 +289,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kBinaryOp: {
- int const feedback = Smi::ToInt(obj->cast<Smi>());
+ int const feedback = obj.ToSmi().value();
BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
if (hint == BinaryOperationHint::kAny) {
gen++;
@@ -301,7 +301,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kCompareOp: {
- int const feedback = Smi::ToInt(obj->cast<Smi>());
+ int const feedback = obj.ToSmi().value();
CompareOperationHint hint = CompareOperationHintFromFeedback(feedback);
if (hint == CompareOperationHint::kAny) {
gen++;
@@ -313,7 +313,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
break;
}
case FeedbackSlotKind::kForIn: {
- int const feedback = Smi::ToInt(obj->cast<Smi>());
+ int const feedback = obj.ToSmi().value();
ForInHint hint = ForInHintFromFeedback(feedback);
if (hint == ForInHint::kAny) {
gen++;
@@ -366,7 +366,7 @@ Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
return isolate->factory()->premonomorphic_symbol();
}
-Symbol* FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
return ReadOnlyRoots(isolate).uninitialized_symbol();
}
@@ -386,13 +386,13 @@ int FeedbackMetadataIterator::entry_size() const {
return FeedbackMetadata::GetSlotSize(kind());
}
-MaybeObject* FeedbackNexus::GetFeedback() const {
- MaybeObject* feedback = vector()->Get(slot());
+MaybeObject FeedbackNexus::GetFeedback() const {
+ MaybeObject feedback = vector()->Get(slot());
FeedbackVector::AssertNoLegacyTypes(feedback);
return feedback;
}
-MaybeObject* FeedbackNexus::GetFeedbackExtra() const {
+MaybeObject FeedbackNexus::GetFeedbackExtra() const {
#ifdef DEBUG
FeedbackSlotKind kind = vector()->GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
@@ -401,16 +401,16 @@ MaybeObject* FeedbackNexus::GetFeedbackExtra() const {
return vector()->get(extra_index);
}
-void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
+void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
SetFeedback(MaybeObject::FromObject(feedback));
}
-void FeedbackNexus::SetFeedback(MaybeObject* feedback, WriteBarrierMode mode) {
+void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) {
FeedbackVector::AssertNoLegacyTypes(feedback);
vector()->Set(slot(), feedback, mode);
}
-void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
+void FeedbackNexus::SetFeedbackExtra(Object feedback_extra,
WriteBarrierMode mode) {
#ifdef DEBUG
FeedbackSlotKind kind = vector()->GetKind(slot());
@@ -421,7 +421,7 @@ void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
vector()->set(index, MaybeObject::FromObject(feedback_extra), mode);
}
-void FeedbackNexus::SetFeedbackExtra(MaybeObject* feedback_extra,
+void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
WriteBarrierMode mode) {
#ifdef DEBUG
FeedbackVector::AssertNoLegacyTypes(feedback_extra);
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index eaea7a978c..2ac34a5e4d 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -3,12 +3,12 @@
// found in the LICENSE file.
#include "src/feedback-vector.h"
-#include "src/code-stubs.h"
#include "src/feedback-vector-inl.h"
#include "src/ic/ic-inl.h"
#include "src/objects.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/map-inl.h"
#include "src/objects/object-macros.h"
namespace v8 {
@@ -40,12 +40,15 @@ bool FeedbackVectorSpec::HasTypeProfileSlot() const {
return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
-static bool IsPropertyNameFeedback(MaybeObject* feedback) {
- HeapObject* heap_object;
+static bool IsPropertyNameFeedback(MaybeObject feedback) {
+ HeapObject heap_object;
if (!feedback->GetHeapObjectIfStrong(&heap_object)) return false;
- if (heap_object->IsString()) return true;
+ if (heap_object->IsString()) {
+ DCHECK(heap_object->IsInternalizedString());
+ return true;
+ }
if (!heap_object->IsSymbol()) return false;
- Symbol* symbol = Symbol::cast(heap_object);
+ Symbol symbol = Symbol::cast(heap_object);
ReadOnlyRoots roots = symbol->GetReadOnlyRoots();
return symbol != roots.uninitialized_symbol() &&
symbol != roots.premonomorphic_symbol() &&
@@ -231,13 +234,13 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
int index = FeedbackVector::GetIndex(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
- Object* extra_value = *uninitialized_sentinel;
+ Object extra_value = *uninitialized_sentinel;
switch (kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
- vector->set(index, HeapObjectReference::ClearedValue(),
+ vector->set(index, HeapObjectReference::ClearedValue(isolate),
SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kForIn:
@@ -325,8 +328,8 @@ void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
- SharedFunctionInfo* shared, const char* reason) {
- MaybeObject* slot = optimized_code_weak_or_smi();
+ SharedFunctionInfo shared, const char* reason) {
+ MaybeObject slot = optimized_code_weak_or_smi();
if (slot->IsSmi()) {
return;
}
@@ -336,7 +339,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
return;
}
- Code* code = Code::cast(slot->GetHeapObject());
+ Code code = Code::cast(slot->GetHeapObject());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
@@ -353,7 +356,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
bool FeedbackVector::ClearSlots(Isolate* isolate) {
- MaybeObject* uninitialized_sentinel = MaybeObject::FromObject(
+ MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
FeedbackVector::RawUninitializedSentinel(isolate));
bool feedback_updated = false;
@@ -361,18 +364,18 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
- MaybeObject* obj = Get(slot);
+ MaybeObject obj = Get(slot);
if (obj != uninitialized_sentinel) {
- FeedbackNexus nexus(this, slot);
+ FeedbackNexus nexus(*this, slot);
feedback_updated |= nexus.Clear();
}
}
return feedback_updated;
}
-void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
+void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) {
#ifdef DEBUG
- HeapObject* heap_object;
+ HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
// Instead of FixedArray, the Feedback and the Extra should contain
// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
@@ -383,7 +386,7 @@ void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
- HeapObject* heap_object;
+ HeapObject heap_object;
if (GetFeedback()->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray() &&
WeakFixedArray::cast(heap_object)->length() == length) {
@@ -396,7 +399,7 @@ Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
- HeapObject* heap_object;
+ HeapObject heap_object;
if (GetFeedbackExtra()->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray() &&
WeakFixedArray::cast(heap_object)->length() == length) {
@@ -414,7 +417,8 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
- SetFeedback(HeapObjectReference::ClearedValue(), SKIP_WRITE_BARRIER);
+ SetFeedback(HeapObjectReference::ClearedValue(isolate),
+ SKIP_WRITE_BARRIER);
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
break;
@@ -510,11 +514,11 @@ void FeedbackNexus::ConfigurePremonomorphic(Handle<Map> receiver_map) {
bool FeedbackNexus::ConfigureMegamorphic() {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
- MaybeObject* sentinel =
+ MaybeObject sentinel =
MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
if (GetFeedback() != sentinel) {
SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
return true;
}
@@ -525,14 +529,14 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
bool changed = false;
- MaybeObject* sentinel =
+ MaybeObject sentinel =
MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
if (GetFeedback() != sentinel) {
SetFeedback(sentinel, SKIP_WRITE_BARRIER);
changed = true;
}
- Smi* extra = Smi::FromInt(static_cast<int>(property_type));
+ Smi extra = Smi::FromInt(static_cast<int>(property_type));
if (changed || GetFeedbackExtra() != MaybeObject::FromSmi(extra)) {
SetFeedbackExtra(extra, SKIP_WRITE_BARRIER);
changed = true;
@@ -540,9 +544,16 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
return changed;
}
+Map FeedbackNexus::FindFirstMap() const {
+ MapHandles maps;
+ ExtractMaps(&maps);
+ if (maps.size() > 0) return *maps.at(0);
+ return Map();
+}
+
InlineCacheState FeedbackNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
switch (kind()) {
case FeedbackSlotKind::kCreateClosure:
@@ -559,7 +570,7 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
if (feedback->IsSmi()) return MONOMORPHIC;
DCHECK(feedback->IsWeakOrCleared());
- MaybeObject* extra = GetFeedbackExtra();
+ MaybeObject extra = GetFeedbackExtra();
if (!feedback->IsCleared() ||
extra != MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
@@ -592,7 +603,7 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
// Don't check if the map is cleared.
return MONOMORPHIC;
}
- HeapObject* heap_object;
+ HeapObject heap_object;
if (feedback->GetHeapObjectIfStrong(&heap_object)) {
if (heap_object->IsWeakFixedArray()) {
// Determine state purely by our structure, don't check if the maps
@@ -601,15 +612,15 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
}
if (heap_object->IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
- Object* extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
- WeakFixedArray* extra_array = WeakFixedArray::cast(extra);
+ Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
+ WeakFixedArray extra_array = WeakFixedArray::cast(extra);
return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
}
UNREACHABLE();
}
case FeedbackSlotKind::kCall: {
- HeapObject* heap_object;
+ HeapObject heap_object;
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(isolate))) {
return GENERIC;
@@ -737,17 +748,17 @@ bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
DCHECK(IsGlobalICKind(kind()));
DCHECK(IC::IsHandler(*handler));
- SetFeedback(HeapObjectReference::ClearedValue());
+ SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()));
SetFeedbackExtra(*handler);
}
void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
Handle<Map> result_map) {
Isolate* isolate = GetIsolate();
- MaybeObject* maybe_feedback = GetFeedback();
+ MaybeObject maybe_feedback = GetFeedback();
Handle<HeapObject> feedback(maybe_feedback->IsStrongOrWeak()
? maybe_feedback->GetHeapObject()
- : nullptr,
+ : HeapObject(),
isolate);
switch (ic_state()) {
case UNINITIALIZED:
@@ -769,7 +780,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
array->Set(1, GetFeedbackExtra());
array->Set(2, HeapObjectReference::Weak(*source_map));
array->Set(3, MaybeObject::FromObject(*result_map));
- SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
}
break;
case POLYMORPHIC: {
@@ -778,7 +789,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(feedback);
int i = 0;
for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) {
- MaybeObject* feedback = array->Get(i);
+ MaybeObject feedback = array->Get(i);
if (feedback->IsCleared()) break;
Handle<Map> cached_map(Map::cast(feedback->GetHeapObject()), isolate);
if (cached_map.is_identical_to(source_map) ||
@@ -789,10 +800,10 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
if (i >= array->length()) {
if (i == kMaxElements) {
// Transition to MEGAMORPHIC.
- MaybeObject* sentinel = MaybeObject::FromObject(
+ MaybeObject sentinel = MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(isolate));
SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
break;
}
@@ -818,7 +829,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
int FeedbackNexus::GetCallCount() {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->cast<Object>();
+ Object call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return CallCountField::decode(value);
@@ -827,7 +838,7 @@ int FeedbackNexus::GetCallCount() {
void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->cast<Object>();
+ Object call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
uint32_t value = CallCountField::encode(CallCountField::decode(count));
@@ -838,7 +849,7 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
SpeculationMode FeedbackNexus::GetSpeculationMode() {
DCHECK(IsCallICKind(kind()));
- Object* call_count = GetFeedbackExtra()->cast<Object>();
+ Object call_count = GetFeedbackExtra()->cast<Object>();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return SpeculationModeField::decode(value);
@@ -907,14 +918,14 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
IsStoreInArrayLiteralICKind(kind()));
Isolate* isolate = GetIsolate();
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- HeapObject* heap_object;
+ HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
int found = 0;
- WeakFixedArray* array;
+ WeakFixedArray array;
if (is_named_feedback) {
array =
WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
@@ -922,25 +933,25 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
- HeapObject* heap_object;
+ HeapObject heap_object;
for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->Get(i)->IsWeakOrCleared());
if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
found++;
}
}
return found;
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
} else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object ==
heap_object->GetReadOnlyRoots().premonomorphic_symbol()) {
if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
}
@@ -954,14 +965,14 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- HeapObject* heap_object;
+ HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
- WeakFixedArray* array;
+ WeakFixedArray array;
if (is_named_feedback) {
array =
WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
@@ -969,22 +980,22 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
- HeapObject* heap_object;
+ HeapObject heap_object;
for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->Get(i)->IsWeakOrCleared());
if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
- Map* array_map = Map::cast(heap_object);
+ Map array_map = Map::cast(heap_object);
if (array_map == *map && !array->Get(i + increment - 1)->IsCleared()) {
- MaybeObject* handler = array->Get(i + increment - 1);
+ MaybeObject handler = array->Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
return handle(handler, isolate);
}
}
}
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- Map* cell_map = Map::cast(heap_object);
+ Map cell_map = Map::cast(heap_object);
if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
- MaybeObject* handler = GetFeedbackExtra();
+ MaybeObject handler = GetFeedbackExtra();
DCHECK(IC::IsHandler(handler));
return handle(handler, isolate);
}
@@ -1000,15 +1011,15 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()));
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
- HeapObject* heap_object;
+ HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsWeakFixedArray()) ||
is_named_feedback) {
- WeakFixedArray* array;
+ WeakFixedArray array;
if (is_named_feedback) {
array =
WeakFixedArray::cast(GetFeedbackExtra()->GetHeapObjectAssumeStrong());
@@ -1016,20 +1027,20 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
array = WeakFixedArray::cast(heap_object);
}
const int increment = 2;
- HeapObject* heap_object;
+ HeapObject heap_object;
for (int i = 0; i < array->length(); i += increment) {
// Be sure to skip handlers whose maps have been cleared.
DCHECK(array->Get(i)->IsWeakOrCleared());
if (array->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
!array->Get(i + increment - 1)->IsCleared()) {
- MaybeObject* handler = array->Get(i + increment - 1);
+ MaybeObject handler = array->Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
code_list->push_back(handle(handler, isolate));
count++;
}
}
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- MaybeObject* extra = GetFeedbackExtra();
+ MaybeObject extra = GetFeedbackExtra();
if (!extra->IsCleared()) {
DCHECK(IC::IsHandler(extra));
code_list->push_back(handle(extra, isolate));
@@ -1039,14 +1050,14 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
return count == length;
}
-Name* FeedbackNexus::FindFirstName() const {
+Name FeedbackNexus::FindFirstName() const {
if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback->GetHeapObjectAssumeStrong());
}
}
- return nullptr;
+ return Name();
}
KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
@@ -1066,6 +1077,72 @@ KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
return STANDARD_LOAD;
}
+namespace {
+
+bool BuiltinHasKeyedAccessStoreMode(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ switch (builtin_index) {
+ case Builtins::kKeyedStoreIC_SloppyArguments_Standard:
+ case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_Standard:
+ case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB:
+ case Builtins::kStoreFastElementIC_NoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_Standard:
+ case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_Standard:
+ case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_Standard:
+ case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB:
+ case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(int builtin_index) {
+ DCHECK(BuiltinHasKeyedAccessStoreMode(builtin_index));
+ switch (builtin_index) {
+ case Builtins::kKeyedStoreIC_SloppyArguments_Standard:
+ case Builtins::kStoreInArrayLiteralIC_Slow_Standard:
+ case Builtins::kKeyedStoreIC_Slow_Standard:
+ case Builtins::kStoreFastElementIC_Standard:
+ case Builtins::kElementsTransitionAndStore_Standard:
+ return STANDARD_STORE;
+ case Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW:
+ return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB:
+ case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB:
+ case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB:
+ return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
+ case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW:
+ case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW:
+ case Builtins::kStoreFastElementIC_NoTransitionHandleCOW:
+ case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW:
+ return STORE_NO_TRANSITION_HANDLE_COW;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
@@ -1092,18 +1169,13 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
} else {
// Element store without prototype chain check.
handler = Handle<Code>::cast(maybe_code_handler.object());
- if (handler->is_builtin()) continue;
}
- CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
- uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
- CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
- major_key == CodeStub::StoreFastElement ||
- major_key == CodeStub::StoreSlowElement ||
- major_key == CodeStub::StoreInArrayLiteralSlow ||
- major_key == CodeStub::ElementsTransitionAndStore ||
- major_key == CodeStub::NoCache);
- if (major_key != CodeStub::NoCache) {
- mode = CommonStoreModeBits::decode(minor_key);
+
+ if (handler->is_builtin()) {
+ const int builtin_index = handler->builtin_index();
+ if (!BuiltinHasKeyedAccessStoreMode(builtin_index)) continue;
+
+ mode = KeyedAccessStoreModeForBuiltin(builtin_index);
break;
}
}
@@ -1114,7 +1186,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()));
- MaybeObject* feedback = GetFeedback();
+ MaybeObject feedback = GetFeedback();
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
return static_cast<IcCheckType>(
@@ -1125,19 +1197,19 @@ IcCheckType FeedbackNexus::GetKeyType() const {
BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
- int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
+ int feedback = GetFeedback().ToSmi().value();
return BinaryOperationHintFromFeedback(feedback);
}
CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
- int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
+ int feedback = GetFeedback().ToSmi().value();
return CompareOperationHintFromFeedback(feedback);
}
ForInHint FeedbackNexus::GetForInFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
- int feedback = Smi::ToInt(GetFeedback()->cast<Smi>());
+ int feedback = GetFeedback().ToSmi().value();
return ForInHintFromFeedback(feedback);
}
@@ -1150,8 +1222,8 @@ Handle<FeedbackCell> FeedbackNexus::GetFeedbackCell() const {
MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
Isolate* isolate = GetIsolate();
- MaybeObject* feedback = GetFeedback();
- HeapObject* heap_object;
+ MaybeObject feedback = GetFeedback();
+ HeapObject heap_object;
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
return handle(JSObject::cast(heap_object), isolate);
}
@@ -1162,7 +1234,7 @@ namespace {
bool InList(Handle<ArrayList> types, Handle<String> type) {
for (int i = 0; i < types->Length(); i++) {
- Object* obj = types->Get(i);
+ Object obj = types->Get(i);
if (String::cast(obj)->Equals(*type)) {
return true;
}
@@ -1176,7 +1248,7 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
- MaybeObject* const feedback = GetFeedback();
+ MaybeObject const feedback = GetFeedback();
// Map source position to collection of types
Handle<SimpleNumberDictionary> types;
@@ -1216,7 +1288,7 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
std::vector<int> source_positions;
Isolate* isolate = GetIsolate();
- MaybeObject* const feedback = GetFeedback();
+ MaybeObject const feedback = GetFeedback();
if (feedback == MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
@@ -1230,7 +1302,7 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
for (int index = SimpleNumberDictionary::kElementsStartIndex;
index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
- Object* key = types->get(key_index);
+ Object key = types->get(key_index);
if (key->IsSmi()) {
int position = Smi::cast(key)->value();
source_positions.push_back(position);
@@ -1244,7 +1316,7 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
- MaybeObject* const feedback = GetFeedback();
+ MaybeObject const feedback = GetFeedback();
std::vector<Handle<String>> types_for_position;
if (feedback == MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
@@ -1263,7 +1335,7 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
Handle<ArrayList> position_specific_types =
Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)), isolate);
for (int i = 0; i < position_specific_types->Length(); i++) {
- Object* t = position_specific_types->Get(i);
+ Object t = position_specific_types->Get(i);
types_for_position.push_back(Handle<String>(String::cast(t), isolate));
}
@@ -1281,7 +1353,7 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
index < feedback->length();
index += SimpleNumberDictionary::kEntrySize) {
int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
- Object* key = feedback->get(key_index);
+ Object key = feedback->get(key_index);
if (key->IsSmi()) {
int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
@@ -1300,11 +1372,11 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
}
} // namespace
-JSObject* FeedbackNexus::GetTypeProfile() const {
+JSObject FeedbackNexus::GetTypeProfile() const {
DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
- MaybeObject* const feedback = GetFeedback();
+ MaybeObject const feedback = GetFeedback();
if (feedback == MaybeObject::FromObject(
*FeedbackVector::UninitializedSentinel(isolate))) {
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 71d84534b6..69fd86436f 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -13,10 +13,12 @@
#include "src/globals.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
-#include "src/objects/object-macros.h"
#include "src/type-hints.h"
#include "src/zone/zone-containers.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -145,17 +147,18 @@ class FeedbackMetadata;
// - optimized code cell (weak cell or Smi marker)
// followed by an array of feedback slots, of length determined by the feedback
// metadata.
-class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
+class FeedbackVector : public HeapObject {
public:
- // Casting.
- static inline FeedbackVector* cast(Object* obj);
+ NEVER_READ_ONLY_SPACE
+
+ DECL_CAST(FeedbackVector)
inline void ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count);
inline bool is_empty() const;
- inline FeedbackMetadata* metadata() const;
+ inline FeedbackMetadata metadata() const;
// [shared_function_info]: The shared function info for the function with this
// feedback vector.
@@ -182,12 +185,12 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
inline void clear_invocation_count();
inline void increment_deopt_count();
- inline Code* optimized_code() const;
+ inline Code optimized_code() const;
inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
inline bool has_optimization_marker() const;
void ClearOptimizedCode();
- void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo* shared,
+ void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
@@ -201,19 +204,19 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(int index);
- inline MaybeObject* Get(FeedbackSlot slot) const;
- inline MaybeObject* get(int index) const;
- inline void Set(FeedbackSlot slot, MaybeObject* value,
+ inline MaybeObject Get(FeedbackSlot slot) const;
+ inline MaybeObject get(int index) const;
+ inline void Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void set(int index, MaybeObject* value,
+ inline void set(int index, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void Set(FeedbackSlot slot, Object* value,
+ inline void Set(FeedbackSlot slot, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void set(int index, Object* value,
+ inline void set(int index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Gives access to raw memory which stores the array's data.
- inline MaybeObject** slots_start();
+ inline MaybeObjectSlot slots_start();
// Returns slot kind for given slot.
FeedbackSlotKind GetKind(FeedbackSlot slot) const;
@@ -248,7 +251,7 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
return GetLanguageModeFromSlotKind(GetKind(slot));
}
- static void AssertNoLegacyTypes(MaybeObject* object);
+ static void AssertNoLegacyTypes(MaybeObject object);
DECL_PRINTER(FeedbackVector)
DECL_VERIFIER(FeedbackVector)
@@ -272,38 +275,38 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
- static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
+ static inline Symbol RawUninitializedSentinel(Isolate* isolate);
// Layout description.
-#define FEEDBACK_VECTOR_FIELDS(V) \
- /* Header fields. */ \
- V(kSharedFunctionInfoOffset, kPointerSize) \
- V(kOptimizedCodeOffset, kPointerSize) \
- V(kLengthOffset, kInt32Size) \
- V(kInvocationCountOffset, kInt32Size) \
- V(kProfilerTicksOffset, kInt32Size) \
- V(kDeoptCountOffset, kInt32Size) \
+#define FEEDBACK_VECTOR_FIELDS(V) \
+ /* Header fields. */ \
+ V(kSharedFunctionInfoOffset, kTaggedSize) \
+ V(kOptimizedCodeOffset, kTaggedSize) \
+ V(kLengthOffset, kInt32Size) \
+ V(kInvocationCountOffset, kInt32Size) \
+ V(kProfilerTicksOffset, kInt32Size) \
+ V(kDeoptCountOffset, kInt32Size) \
V(kUnalignedHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_VECTOR_FIELDS)
#undef FEEDBACK_VECTOR_FIELDS
static const int kHeaderSize =
- RoundUp<kPointerAlignment>(int{kUnalignedHeaderSize});
+ RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor;
// Garbage collection support.
static constexpr int SizeFor(int length) {
- return kFeedbackSlotsOffset + length * kPointerSize;
+ return kFeedbackSlotsOffset + length * kTaggedSize;
}
private:
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
- DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
+ OBJECT_CONSTRUCTORS(FeedbackVector, HeapObject);
};
class V8_EXPORT_PRIVATE FeedbackVectorSpec {
@@ -450,8 +453,7 @@ class SharedFeedbackSlot {
// the number of slots is static once an instance is created.
class FeedbackMetadata : public HeapObject {
public:
- // Casting.
- static inline FeedbackMetadata* cast(Object* obj);
+ DECL_CAST(FeedbackMetadata)
// The number of slots that this metadata contains. Stored as an int32.
DECL_INT32_ACCESSORS(slot_count)
@@ -515,7 +517,7 @@ class FeedbackMetadata : public HeapObject {
kInt32Size * kBitsPerByte, uint32_t>
VectorICComputer;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackMetadata);
+ OBJECT_CONSTRUCTORS(FeedbackMetadata, HeapObject);
};
// Verify that an empty hash field looks like a tagged object, but can't
@@ -532,7 +534,7 @@ class FeedbackMetadataIterator {
next_slot_(FeedbackSlot(0)),
slot_kind_(FeedbackSlotKind::kInvalid) {}
- explicit FeedbackMetadataIterator(FeedbackMetadata* metadata)
+ explicit FeedbackMetadataIterator(FeedbackMetadata metadata)
: metadata_(metadata),
next_slot_(FeedbackSlot(0)),
slot_kind_(FeedbackSlotKind::kInvalid) {}
@@ -552,7 +554,7 @@ class FeedbackMetadataIterator {
inline int entry_size() const;
private:
- FeedbackMetadata* metadata() const {
+ FeedbackMetadata metadata() const {
return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
}
@@ -560,7 +562,7 @@ class FeedbackMetadataIterator {
// to have a single iterator implementation for both "handlified" and raw
// pointer use cases.
Handle<FeedbackMetadata> metadata_handle_;
- FeedbackMetadata* metadata_;
+ FeedbackMetadata metadata_;
FeedbackSlot cur_slot_;
FeedbackSlot next_slot_;
FeedbackSlotKind slot_kind_;
@@ -570,18 +572,21 @@ class FeedbackMetadataIterator {
class FeedbackNexus final {
public:
FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector),
- vector_(nullptr),
- slot_(slot),
- kind_(vector->GetKind(slot)) {}
- FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot), kind_(vector->GetKind(slot)) {}
+ : vector_handle_(vector), slot_(slot) {
+ kind_ =
+ (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ }
+ FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
+ : vector_(vector), slot_(slot) {
+ kind_ =
+ (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ }
Handle<FeedbackVector> vector_handle() const {
- DCHECK_NULL(vector_);
+ DCHECK(vector_.is_null());
return vector_handle_;
}
- FeedbackVector* vector() const {
+ FeedbackVector vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
FeedbackSlot slot() const { return slot_; }
@@ -599,12 +604,7 @@ class FeedbackNexus final {
void Print(std::ostream& os); // NOLINT
// For map-based ICs (load, keyed-load, store, keyed-store).
- Map* FindFirstMap() const {
- MapHandles maps;
- ExtractMaps(&maps);
- if (maps.size() > 0) return *maps.at(0);
- return nullptr;
- }
+ Map FindFirstMap() const;
InlineCacheState StateFromFeedback() const;
int ExtractMaps(MapHandles* maps) const;
@@ -625,8 +625,8 @@ class FeedbackNexus final {
bool ConfigureMegamorphic();
bool ConfigureMegamorphic(IcCheckType property_type);
- inline MaybeObject* GetFeedback() const;
- inline MaybeObject* GetFeedbackExtra() const;
+ inline MaybeObject GetFeedback() const;
+ inline MaybeObject GetFeedbackExtra() const;
inline Isolate* GetIsolate() const;
@@ -648,7 +648,7 @@ class FeedbackNexus final {
// For KeyedLoad and KeyedStore ICs.
IcCheckType GetKeyType() const;
- Name* FindFirstName() const;
+ Name FindFirstName() const;
// For Call ICs.
int GetCallCount();
@@ -696,18 +696,18 @@ class FeedbackNexus final {
// Add a type to the list of types for source position <position>.
void Collect(Handle<String> type, int position);
- JSObject* GetTypeProfile() const;
+ JSObject GetTypeProfile() const;
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- inline void SetFeedback(Object* feedback,
+ inline void SetFeedback(Object feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedback(MaybeObject* feedback,
+ inline void SetFeedback(MaybeObject feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(Object* feedback_extra,
+ inline void SetFeedbackExtra(Object feedback_extra,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(MaybeObject* feedback_extra,
+ inline void SetFeedbackExtra(MaybeObject feedback_extra,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
Handle<WeakFixedArray> EnsureArrayOfSize(int length);
@@ -719,7 +719,7 @@ class FeedbackNexus final {
// you have a handle to the vector that is better because more operations can
// be done, like allocation.
Handle<FeedbackVector> vector_handle_;
- FeedbackVector* vector_;
+ FeedbackVector vector_;
FeedbackSlot slot_;
FeedbackSlotKind kind_;
};
@@ -731,4 +731,6 @@ inline ForInHint ForInHintFromFeedback(int type_feedback);
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_FEEDBACK_VECTOR_H_
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index abc3166b56..d54e43121e 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -12,15 +12,14 @@
namespace v8 {
namespace internal {
-inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding,
- const Map* map) {
- DCHECK(map == nullptr || offset < map->instance_size());
- DCHECK(encoding == kWord32 ? (offset % kInt32Size) == 0
- : (offset % kPointerSize) == 0);
+inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
+ DCHECK_IMPLIES(encoding == kWord32, IsAligned(offset, kInt32Size));
+ DCHECK_IMPLIES(encoding == kTagged, IsAligned(offset, kTaggedSize));
+ DCHECK_IMPLIES(encoding == kDouble, IsAligned(offset, kDoubleSize));
return FieldIndex(true, offset, encoding, 0, 0);
}
-inline FieldIndex FieldIndex::ForPropertyIndex(const Map* map,
+inline FieldIndex FieldIndex::ForPropertyIndex(const Map map,
int property_index,
Representation representation) {
DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
@@ -34,7 +33,7 @@ inline FieldIndex FieldIndex::ForPropertyIndex(const Map* map,
} else {
first_inobject_offset = FixedArray::kHeaderSize;
property_index -= inobject_properties;
- offset = FixedArray::kHeaderSize + property_index * kPointerSize;
+ offset = PropertyArray::OffsetOfElementAt(property_index);
}
Encoding encoding = FieldEncoding(representation);
return FieldIndex(is_inobject, offset, encoding, inobject_properties,
@@ -53,16 +52,16 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
// signifying if the field is a mutable double box (1) or not (0).
int result = index();
if (is_inobject()) {
- result -= JSObject::kHeaderSize / kPointerSize;
+ result -= JSObject::kHeaderSize / kTaggedSize;
} else {
- result -= FixedArray::kHeaderSize / kPointerSize;
+ result -= FixedArray::kHeaderSize / kTaggedSize;
result = -result - 1;
}
result <<= 1;
return is_double() ? (result | 1) : result;
}
-inline FieldIndex FieldIndex::ForDescriptor(const Map* map,
+inline FieldIndex FieldIndex::ForDescriptor(const Map map,
int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 2135c5ef25..f2a117e94b 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -24,11 +24,10 @@ class FieldIndex final {
FieldIndex() : bit_field_(0) {}
static FieldIndex ForPropertyIndex(
- const Map* map, int index,
+ const Map map, int index,
Representation representation = Representation::Tagged());
- static FieldIndex ForInObjectOffset(int offset, Encoding encoding,
- const Map* map = nullptr);
- static FieldIndex ForDescriptor(const Map* map, int descriptor_index);
+ static FieldIndex ForInObjectOffset(int offset, Encoding encoding);
+ static FieldIndex ForDescriptor(const Map map, int descriptor_index);
int GetLoadByFieldIndex() const;
@@ -44,20 +43,20 @@ class FieldIndex final {
// Zero-indexed from beginning of the object.
int index() const {
- DCHECK_EQ(0, offset() % kPointerSize);
- return offset() / kPointerSize;
+ DCHECK(IsAligned(offset(), kTaggedSize));
+ return offset() / kTaggedSize;
}
int outobject_array_index() const {
DCHECK(!is_inobject());
- return index() - first_inobject_property_offset() / kPointerSize;
+ return index() - first_inobject_property_offset() / kTaggedSize;
}
// Zero-based from the first inobject property. Overflows to out-of-object
// properties.
int property_index() const {
DCHECK(!is_hidden_field());
- int result = index() - first_inobject_property_offset() / kPointerSize;
+ int result = index() - first_inobject_property_offset() / kTaggedSize;
if (!is_inobject()) {
result += InObjectPropertyBits::decode(bit_field_);
}
@@ -78,7 +77,7 @@ class FieldIndex final {
FieldIndex(bool is_inobject, int offset, Encoding encoding,
int inobject_properties, int first_inobject_property_offset,
bool is_hidden = false) {
- DCHECK_EQ(first_inobject_property_offset & (kPointerSize - 1), 0);
+ DCHECK(IsAligned(first_inobject_property_offset, kTaggedSize));
bit_field_ = IsInObjectBits::encode(is_inobject) |
EncodingBits::encode(encoding) |
FirstInobjectPropertyOffsetBits::encode(
@@ -110,7 +109,7 @@ class FieldIndex final {
}
static const int kOffsetBitsSize =
- (kDescriptorIndexBitCount + 1 + kPointerSizeLog2);
+ (kDescriptorIndexBitCount + 1 + kTaggedSizeLog2);
// Index from beginning of object.
class OffsetBits : public BitField64<int, 0, kOffsetBitsSize> {};
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index c4d2080f78..bb0869b262 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -6,22 +6,17 @@
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "src/ostreams.h"
namespace v8 {
namespace internal {
// static
-FieldType* FieldType::None() {
- // Do not Smi::kZero here or for Any(), as that may translate
- // as `nullptr` which is not a valid value for `this`.
- return reinterpret_cast<FieldType*>(Smi::FromInt(2));
-}
+FieldType FieldType::None() { return FieldType(Smi::FromInt(2).ptr()); }
// static
-FieldType* FieldType::Any() {
- return reinterpret_cast<FieldType*>(Smi::FromInt(1));
-}
+FieldType FieldType::Any() { return FieldType(Smi::FromInt(1).ptr()); }
// static
Handle<FieldType> FieldType::None(Isolate* isolate) {
@@ -34,58 +29,58 @@ Handle<FieldType> FieldType::Any(Isolate* isolate) {
}
// static
-FieldType* FieldType::Class(i::Map* map) { return FieldType::cast(map); }
+FieldType FieldType::Class(Map map) { return FieldType::cast(map); }
// static
-Handle<FieldType> FieldType::Class(i::Handle<i::Map> map, Isolate* isolate) {
+Handle<FieldType> FieldType::Class(Handle<Map> map, Isolate* isolate) {
return handle(Class(*map), isolate);
}
// static
-FieldType* FieldType::cast(Object* object) {
+FieldType FieldType::cast(Object object) {
DCHECK(object == None() || object == Any() || object->IsMap());
- return reinterpret_cast<FieldType*>(object);
+ return FieldType(object->ptr());
}
-bool FieldType::IsClass() { return this->IsMap(); }
+bool FieldType::IsClass() const { return this->IsMap(); }
-Map* FieldType::AsClass() {
+Map FieldType::AsClass() const {
DCHECK(IsClass());
- return Map::cast(this);
+ return Map::cast(*this);
}
-bool FieldType::NowStable() {
+bool FieldType::NowStable() const {
return !this->IsClass() || AsClass()->is_stable();
}
-bool FieldType::NowIs(FieldType* other) {
+bool FieldType::NowIs(FieldType other) const {
if (other->IsAny()) return true;
if (IsNone()) return true;
if (other->IsNone()) return false;
if (IsAny()) return false;
DCHECK(IsClass());
DCHECK(other->IsClass());
- return this == other;
+ return *this == other;
}
-bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
+bool FieldType::NowIs(Handle<FieldType> other) const { return NowIs(*other); }
-void FieldType::PrintTo(std::ostream& os) {
+void FieldType::PrintTo(std::ostream& os) const {
if (IsAny()) {
os << "Any";
} else if (IsNone()) {
os << "None";
} else {
DCHECK(IsClass());
- os << "Class(" << static_cast<void*>(AsClass()) << ")";
+ os << "Class(" << reinterpret_cast<void*>(AsClass()->ptr()) << ")";
}
}
-bool FieldType::NowContains(Object* value) {
- if (this == Any()) return true;
- if (this == None()) return false;
+bool FieldType::NowContains(Object value) const {
+ if (*this == Any()) return true;
+ if (*this == None()) return false;
if (!value->IsHeapObject()) return false;
- return HeapObject::cast(value)->map() == Map::cast(this);
+ return HeapObject::cast(value)->map() == Map::cast(*this);
}
} // namespace internal
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index d66d12c095..5e280339e5 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -6,6 +6,7 @@
#define V8_FIELD_TYPE_H_
#include "src/objects.h"
+#include "src/objects/heap-object.h"
#include "src/objects/map.h"
namespace v8 {
@@ -16,27 +17,36 @@ class Handle;
class FieldType : public Object {
public:
- static FieldType* None();
- static FieldType* Any();
+ static FieldType None();
+ static FieldType Any();
static Handle<FieldType> None(Isolate* isolate);
static Handle<FieldType> Any(Isolate* isolate);
- static FieldType* Class(i::Map* map);
- static Handle<FieldType> Class(i::Handle<i::Map> map, Isolate* isolate);
- static FieldType* cast(Object* object);
+ static FieldType Class(Map map);
+ static Handle<FieldType> Class(Handle<Map> map, Isolate* isolate);
+ static FieldType cast(Object object);
+ static FieldType unchecked_cast(Object object) {
+ return FieldType(object.ptr());
+ }
- bool NowContains(Object* value);
+ bool NowContains(Object value) const;
- bool NowContains(Handle<Object> value) { return NowContains(*value); }
+ bool NowContains(Handle<Object> value) const { return NowContains(*value); }
- bool IsClass();
- Map* AsClass();
- bool IsNone() { return this == None(); }
- bool IsAny() { return this == Any(); }
- bool NowStable();
- bool NowIs(FieldType* other);
- bool NowIs(Handle<FieldType> other);
+ bool IsClass() const;
+ Map AsClass() const;
+ bool IsNone() const { return *this == None(); }
+ bool IsAny() const { return *this == Any(); }
+ bool NowStable() const;
+ bool NowIs(FieldType other) const;
+ bool NowIs(Handle<FieldType> other) const;
- void PrintTo(std::ostream& os);
+ void PrintTo(std::ostream& os) const;
+
+ FieldType* operator->() { return this; }
+ const FieldType* operator->() const { return this; }
+
+ private:
+ explicit constexpr FieldType(Address ptr) : Object(ptr) {}
};
} // namespace internal
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc
index 95b7cbead7..b360ffb85c 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/fixed-dtoa.cc
@@ -240,7 +240,7 @@ static void FillFractionals(uint64_t fractionals, int exponent,
fractionals -= static_cast<uint64_t>(digit) << point;
}
// If the first bit after the point is set we have to round up.
- if (((fractionals >> (point - 1)) & 1) == 1) {
+ if (point > 0 && ((fractionals >> (point - 1)) & 1) == 1) {
DtoaRoundUp(buffer, length, decimal_point);
}
} else { // We need 128 bits.
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 170a777c72..d262fb7012 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -97,19 +97,6 @@
#define COMMA ,
#ifdef FLAG_MODE_DECLARE
-// Structure used to hold a collection of arguments to the JavaScript code.
-struct JSArguments {
- public:
- inline const char*& operator[](int idx) const { return argv[idx]; }
- static JSArguments Create(int argc, const char** argv) {
- JSArguments args;
- args.argc = argc;
- args.argv = argv;
- return args;
- }
- int argc;
- const char** argv;
-};
struct MaybeBoolFlag {
static MaybeBoolFlag Create(bool has_value, bool value) {
@@ -129,6 +116,12 @@ struct MaybeBoolFlag {
#define DEBUG_BOOL false
#endif
+#ifdef V8_COMPRESS_POINTERS
+#define COMPRESS_POINTERS_BOOL true
+#else
+#define COMPRESS_POINTERS_BOOL false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -166,16 +159,12 @@ struct MaybeBoolFlag {
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_SIZE_T(nam, def, cmt) FLAG(SIZE_T, size_t, nam, def, cmt)
#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_ARGS(nam, cmt) \
- FLAG(ARGS, JSArguments, nam, {0 COMMA nullptr}, cmt)
-
#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
#define DEFINE_ALIAS_FLOAT(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
#define DEFINE_ALIAS_SIZE_T(alias, nam) FLAG_ALIAS(SIZE_T, size_t, alias, nam)
#define DEFINE_ALIAS_STRING(alias, nam) \
FLAG_ALIAS(STRING, const char*, alias, nam)
-#define DEFINE_ALIAS_ARGS(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
#ifdef DEBUG
#define DEFINE_DEBUG_BOOL DEFINE_BOOL
@@ -188,9 +177,6 @@ struct MaybeBoolFlag {
//
#define FLAG FLAG_FULL
-DEFINE_BOOL(experimental_extras, false,
- "enable code compiled in via v8_experimental_extra_library_files")
-
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
@@ -206,47 +192,60 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
+DEFINE_IMPLICATION(harmony_private_methods, harmony_private_fields)
+
// Update bootstrapper.cc whenever adding a new feature flag.
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_class_fields, "harmony fields in class literals") \
- V(harmony_await_optimization, "harmony await taking 1 tick") \
- V(harmony_regexp_sequence, "RegExp Unicode sequence properties")
+#define HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_class_fields, "harmony fields in class literals") \
+ V(harmony_private_methods, "harmony private methods in class literals") \
+ V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
+ V(harmony_weak_refs, "harmony weak references") \
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_locale, "Intl.Locale") \
- V(harmony_intl_list_format, "Intl.ListFormat") \
- V(harmony_intl_segmenter, "Intl.Segmenter")
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_locale, "Intl.Locale")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_public_fields, "harmony public fields in class literals") \
+#define HARMONY_STAGED_BASE(V) \
V(harmony_private_fields, "harmony private fields in class literals") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
- V(harmony_string_matchall, "harmony String.prototype.matchAll") \
- V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_json_stringify, "Well-formed JSON.stringify")
+ V(harmony_hashbang, "harmony hashbang syntax")
+
+#ifdef V8_INTL_SUPPORT
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_segmenter, "Intl.Segmenter")
+#else
+#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
+#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}") \
- V(harmony_symbol_description, "harmony Symbol.prototype.description") \
- V(harmony_global, "harmony global")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_namespace_exports, \
+ "harmony namespace exports (export * as foo from 'bar')") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}") \
+ V(harmony_symbol_description, "harmony Symbol.prototype.description") \
+ V(harmony_global, "harmony global") \
+ V(harmony_json_stringify, "well-formed JSON.stringify") \
+ V(harmony_public_fields, "harmony public instance fields in class literals") \
+ V(harmony_static_fields, "harmony static fields in class literals") \
+ V(harmony_string_matchall, "harmony String.prototype.matchAll") \
+ V(harmony_object_from_entries, "harmony Object.fromEntries()") \
+ V(harmony_await_optimization, "harmony await taking 1 tick")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_list_format, "Intl.ListFormat") \
V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
@@ -279,6 +278,12 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#endif
+#ifdef V8_LITE_MODE
+#define V8_LITE_BOOL true
+#else
+#define V8_LITE_BOOL false
+#endif
+
#ifdef V8_ENABLE_FUTURE
#define FUTURE_BOOL true
#else
@@ -289,6 +294,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"not-too-far future")
DEFINE_IMPLICATION(future, write_protect_code_memory)
+DEFINE_IMPLICATION(future, flush_bytecode)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
@@ -318,17 +324,11 @@ DEFINE_BOOL(feedback_normalization, false,
DEFINE_BOOL_READONLY(internalize_on_the_fly, true,
"internalize string keys for generic keyed ICs on the fly")
-// Flags for optimization types.
-DEFINE_BOOL(optimize_for_size, false,
- "Enables optimizations which favor memory size over execution "
- "speed")
-
// Flag for one shot optimiztions.
DEFINE_BOOL(enable_one_shot_optimization, true,
"Enable size optimizations for the code that will "
"only be executed once")
-DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
// Flags for data representation optimizations
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -394,10 +394,9 @@ DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
-DEFINE_BOOL(concurrent_compiler_frontend, false,
- "run optimizing compiler's frontend phases on a separate thread")
-DEFINE_IMPLICATION(future, concurrent_compiler_frontend)
-DEFINE_BOOL(strict_heap_broker, false, "fail on incomplete serialization")
+DEFINE_BOOL(concurrent_inlining, false,
+ "run optimizing compiler's inlining phase on a separate thread")
+DEFINE_IMPLICATION(future, concurrent_inlining)
DEFINE_BOOL(trace_heap_broker, false, "trace the heap broker")
// Flags for stress-testing the compiler.
@@ -501,6 +500,7 @@ DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
+DEFINE_BOOL(turbo_loop_rotation, true, "Turbofan loop rotation")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
DEFINE_BOOL(turbo_allocation_folding, true, "Turbofan allocation folding")
@@ -515,6 +515,9 @@ DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
DEFINE_BOOL(experimental_inline_promise_constructor, true,
"inline the Promise constructor in TurboFan")
+DEFINE_BOOL(
+ stress_gc_during_compilation, false,
+ "simulate GC/compiler thread race related to https://crbug.com/v8/8520")
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
@@ -525,13 +528,6 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-// Flags to help platform porters
-DEFINE_BOOL(minimal, false,
- "simplifies execution model to make porting "
- "easier (e.g. always use Ignition, never optimize)")
-DEFINE_NEG_IMPLICATION(minimal, opt)
-DEFINE_NEG_IMPLICATION(minimal, use_ic)
-
// Flags for native WebAssembly.
DEFINE_BOOL(expose_wasm, true, "expose wasm interface to JavaScript")
DEFINE_BOOL(assume_asmjs_origin, false,
@@ -554,6 +550,8 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum number of 64KiB memory pages of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
+DEFINE_UINT(wasm_max_code_space, v8::internal::kMaxWasmCodeMB,
+ "maximum committed code space for wasm (in MB)")
// Enable Liftoff by default on ia32 and x64. More architectures will follow
// once they are implemented and sufficiently tested.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
@@ -622,6 +620,8 @@ DEFINE_BOOL(wasm_no_bounds_checks, false,
"disable bounds checks (performance testing only)")
DEFINE_BOOL(wasm_no_stack_checks, false,
"disable stack checks (performance testing only)")
+DEFINE_BOOL(wasm_math_intrinsics, true,
+ "intrinsify some Math imports into wasm")
DEFINE_BOOL(wasm_shared_engine, true,
"shares one wasm engine between all isolates within a process")
@@ -638,6 +638,7 @@ DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"Generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
+DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
DEFINE_BOOL(wasm_interpret_all, false,
"Execute all wasm code in the wasm interpreter")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
@@ -692,6 +693,8 @@ DEFINE_BOOL(trace_idle_notification_verbose, false,
"prints the heap state used by the idle notification")
DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
+DEFINE_IMPLICATION(trace_gc_verbose, trace_gc)
+
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
DEFINE_INT(trace_duplicate_threshold_kb, 0,
@@ -709,13 +712,7 @@ DEFINE_BOOL(incremental_marking_wrappers, true,
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
-#define V8_WRITE_PROTECT_CODE_MEMORY_BOOL false
-#else
-#define V8_WRITE_PROTECT_CODE_MEMORY_BOOL true
-#endif
-DEFINE_BOOL(write_protect_code_memory, V8_WRITE_PROTECT_CODE_MEMORY_BOOL,
- "write protect code memory")
+DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
#define V8_CONCURRENT_MARKING_BOOL true
#else
@@ -724,12 +721,10 @@ DEFINE_BOOL(write_protect_code_memory, V8_WRITE_PROTECT_CODE_MEMORY_BOOL,
DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
-DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
-DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
"use concurrent store buffer processing")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
@@ -778,6 +773,10 @@ DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
+DEFINE_BOOL(flush_bytecode, V8_LITE_BOOL,
+ "flush of bytecode when it has not been executed recently")
+DEFINE_BOOL(stress_flush_bytecode, false, "stress bytecode flushing")
+DEFINE_IMPLICATION(stress_flush_bytecode, flush_bytecode)
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
@@ -819,6 +818,8 @@ DEFINE_BOOL(young_generation_large_objects, false,
"allocates large objects by default in the young generation large "
"object space")
+DEFINE_BOOL(idle_time_scavenge, true, "Perform scavenges in idle time.")
+
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
@@ -866,7 +867,6 @@ DEFINE_BOOL(disable_old_api_accessors, false,
"prototype chain")
// bootstrapper.cc
-DEFINE_STRING(expose_natives_as, nullptr, "expose natives in global object")
DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension")
DEFINE_BOOL(expose_gc, false, "expose gc extension")
DEFINE_STRING(expose_gc_as, nullptr,
@@ -886,6 +886,9 @@ DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
DEFINE_BOOL(allow_unsafe_function_constructor, false,
"allow invoking the function constructor without security checks")
DEFINE_BOOL(force_slow_path, false, "always take the slow path for builtins")
+DEFINE_BOOL(test_small_max_function_context_stub_size, false,
+ "enable testing the function context size overflow path "
+ "by making the maximum size smaller")
// builtins-ia32.cc
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
@@ -903,7 +906,6 @@ DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization")
DEFINE_BOOL(trace_file_names, false,
"include file names in trace-opt/trace-deopt output")
DEFINE_BOOL(trace_interrupts, false, "trace interrupts when they are handled")
-DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(always_opt, false, "always try to optimize functions")
DEFINE_BOOL(always_osr, false, "always try to OSR functions")
DEFINE_BOOL(prepare_always_opt, false, "prepare for turning on always opt")
@@ -920,7 +922,9 @@ DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
// compiler-dispatcher.cc
+DEFINE_BOOL(parallel_compile_tasks, false, "enable parallel compile tasks")
DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
+DEFINE_IMPLICATION(parallel_compile_tasks, compiler_dispatcher)
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
@@ -980,7 +984,6 @@ DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
DEFINE_BOOL(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
-DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false,
"trace inline cache state transitions for tools/ic-processor")
DEFINE_IMPLICATION(trace_ic, log_code)
@@ -989,6 +992,8 @@ DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
DEFINE_BOOL_READONLY(track_constant_fields, false,
"enable constant field tracking")
DEFINE_BOOL_READONLY(modify_map_inplace, false, "enable in-place map updates")
+DEFINE_BOOL_READONLY(fast_map_update, false,
+ "enable fast map update by caching the migration target")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -1006,13 +1011,6 @@ DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
-DEFINE_BOOL(aggressive_lazy_inner_functions, false,
- "even lazier inner function parsing")
-DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
-DEFINE_BOOL(preparser_scope_analysis, true,
- "perform scope analysis for preparsed inner functions")
-DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -1040,7 +1038,7 @@ DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
-DEFINE_BOOL(async_stack_traces, false,
+DEFINE_BOOL(async_stack_traces, true,
"include async stack traces in Error.stack")
DEFINE_IMPLICATION(async_stack_traces, harmony_await_optimization)
DEFINE_BOOL(stack_trace_on_illegal, false,
@@ -1084,16 +1082,6 @@ DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
#endif
DEFINE_BOOL_READONLY(embedded_builtins, V8_EMBEDDED_BUILTINS_BOOL,
"Embed builtin code into the binary.")
-// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
-DEFINE_BOOL_READONLY(
- ia32_verify_root_register, false,
- "Check that the value of the root register was not clobbered.")
-// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
-DEFINE_BOOL(print_embedded_builtin_candidates, false,
- "Prints builtins that are not yet embedded but could be.")
-DEFINE_BOOL(lazy_deserialization, true,
- "Deserialize code lazily from the snapshot.")
-DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
@@ -1146,8 +1134,6 @@ DEFINE_BOOL(dump_counters_nvp, false,
DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
DEFINE_STRING(map_counters, "", "Map counters to a file")
-DEFINE_ARGS(js_arguments,
- "Pass all remaining arguments to the script. Alias for \"--\".")
DEFINE_BOOL(mock_arraybuffer_allocator, false,
"Use a mock ArrayBuffer allocator for testing.")
DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
@@ -1155,6 +1141,44 @@ DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
"OOM for testing.")
//
+// Flags only available in non-Lite modes.
+//
+#undef FLAG
+#ifdef V8_LITE_MODE
+#define FLAG FLAG_READONLY
+#else
+#define FLAG FLAG_FULL
+#endif
+
+DEFINE_BOOL(jitless, V8_LITE_BOOL,
+ "Disable runtime allocation of executable memory.")
+
+// Jitless V8 has a few implications:
+#ifndef V8_LITE_MODE
+// Optimizations (i.e. jitting) are disabled.
+DEFINE_NEG_IMPLICATION(jitless, opt)
+#endif
+// asm.js validation is disabled since it triggers wasm code generation.
+DEFINE_NEG_IMPLICATION(jitless, validate_asm)
+// Wasm is put into interpreter-only mode. We repeat flag implications down
+// here to ensure they're applied correctly by setting the --jitless flag.
+DEFINE_IMPLICATION(jitless, wasm_interpret_all)
+DEFINE_NEG_IMPLICATION(jitless, asm_wasm_lazy_compilation)
+DEFINE_NEG_IMPLICATION(jitless, wasm_lazy_compilation)
+
+// Enable recompilation of function with optimized code.
+DEFINE_BOOL(opt, !V8_LITE_BOOL, "use adaptive optimizations")
+
+// Enable use of inline caches to optimize object access operations.
+DEFINE_BOOL(use_ic, !V8_LITE_BOOL, "use inline caching")
+
+// Favor memory over execution speed.
+DEFINE_BOOL(optimize_for_size, V8_LITE_BOOL,
+ "Enables optimizations which favor memory size over execution "
+ "speed")
+DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
+
+//
// GDB JIT integration flags.
//
#undef FLAG
@@ -1287,9 +1311,6 @@ DEFINE_BOOL(prof_browser_mode, true,
DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
-DEFINE_BOOL(interpreted_frames_native_stack, false,
- "Show interpreted frames on the native stack (useful for external "
- "profilers).")
DEFINE_BOOL(perf_basic_prof, false,
"Enable perf linux profiler (basic support).")
DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
@@ -1299,6 +1320,9 @@ DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
DEFINE_BOOL(perf_prof, false,
"Enable perf linux profiler (experimental annotate support).")
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
+// TODO(v8:8462) Remove implication once perf supports remapping.
+DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
+DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
DEFINE_BOOL(perf_prof_unwinding_info, false,
"Enable unwinding info for perf linux profiler (experimental).")
DEFINE_IMPLICATION(perf_prof, perf_prof_unwinding_info)
@@ -1324,6 +1348,18 @@ DEFINE_STRING(redirect_code_traces_to, nullptr,
DEFINE_BOOL(print_opt_source, false,
"print source code of optimized and inlined functions")
+#ifdef V8_TARGET_ARCH_ARM
+// Unsupported on arm. See https://crbug.com/v8/8713.
+DEFINE_BOOL_READONLY(
+ interpreted_frames_native_stack, false,
+ "Show interpreted frames on the native stack (useful for external "
+ "profilers).")
+#else
+DEFINE_BOOL(interpreted_frames_native_stack, false,
+ "Show interpreted frames on the native stack (useful for external "
+ "profilers).")
+#endif
+
//
// Disassembler only flags
//
@@ -1340,18 +1376,6 @@ DEFINE_BOOL(trace_elements_transitions, false, "trace elements transitions")
DEFINE_BOOL(trace_creation_allocation_sites, false,
"trace the creation of allocation sites")
-// code-stubs.cc
-DEFINE_BOOL(print_code_stubs, false, "print code stubs")
-DEFINE_BOOL(test_secondary_stub_cache, false,
- "test secondary stub cache by disabling the primary one")
-
-DEFINE_BOOL(test_primary_stub_cache, false,
- "test primary stub cache by disabling the secondary one")
-
-DEFINE_BOOL(test_small_max_function_context_stub_size, false,
- "enable testing the function context size overflow path "
- "by making the maximum size smaller")
-
// codegen-ia32.cc / codegen-arm.cc
DEFINE_BOOL(print_code, false, "print generated code")
DEFINE_BOOL(print_opt_code, false, "print optimized code")
@@ -1367,7 +1391,6 @@ DEFINE_BOOL(sodium, false,
"print generated code output suitable for use with "
"the Sodium code viewer")
-DEFINE_IMPLICATION(sodium, print_code_stubs)
DEFINE_IMPLICATION(sodium, print_code)
DEFINE_IMPLICATION(sodium, print_opt_code)
DEFINE_IMPLICATION(sodium, code_comments)
@@ -1377,7 +1400,6 @@ DEFINE_IMPLICATION(print_all_code, print_code)
DEFINE_IMPLICATION(print_all_code, print_opt_code)
DEFINE_IMPLICATION(print_all_code, print_code_verbose)
DEFINE_IMPLICATION(print_all_code, print_builtin_code)
-DEFINE_IMPLICATION(print_all_code, print_code_stubs)
DEFINE_IMPLICATION(print_all_code, code_comments)
#endif
@@ -1394,6 +1416,15 @@ DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_NEG_IMPLICATION(single_threaded, wasm_async_compilation)
+DEFINE_BOOL(predictable_gc_schedule, false,
+ "Predictable garbage collection schedule. Fixes heap growing, "
+ "idle, and memory reducing behavior.")
+DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, min_semi_space_size, 4)
+DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, max_semi_space_size, 4)
+DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, heap_growing_percent, 30)
+DEFINE_NEG_IMPLICATION(predictable_gc_schedule, idle_time_scavenge)
+DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
+
//
// Threading related flags.
//
@@ -1446,6 +1477,10 @@ DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
+DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
+ "enables trade-off of performance for memory savings "
+ "(Lite mode only)")
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
@@ -1458,7 +1493,6 @@ DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
#undef DEFINE_INT
#undef DEFINE_STRING
#undef DEFINE_FLOAT
-#undef DEFINE_ARGS
#undef DEFINE_IMPLICATION
#undef DEFINE_NEG_IMPLICATION
#undef DEFINE_NEG_VALUE_IMPLICATION
@@ -1467,7 +1501,6 @@ DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
#undef DEFINE_ALIAS_INT
#undef DEFINE_ALIAS_STRING
#undef DEFINE_ALIAS_FLOAT
-#undef DEFINE_ALIAS_ARGS
#undef FLAG_MODE_DECLARE
#undef FLAG_MODE_DEFINE
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 13046097a6..33938a6347 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -10,9 +10,10 @@
#include <sstream>
#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
+#include "src/cpu-features.h"
+#include "src/memcopy.h"
#include "src/ostreams.h"
#include "src/utils.h"
#include "src/wasm/wasm-limits.h"
@@ -43,7 +44,6 @@ struct Flag {
TYPE_FLOAT,
TYPE_SIZE_T,
TYPE_STRING,
- TYPE_ARGS
};
FlagType type_; // What type of flag, bool, int, or string.
@@ -107,11 +107,6 @@ struct Flag {
owns_ptr_ = owns_ptr;
}
- JSArguments* args_variable() const {
- DCHECK(type_ == TYPE_ARGS);
- return reinterpret_cast<JSArguments*>(valptr_);
- }
-
bool bool_default() const {
DCHECK(type_ == TYPE_BOOL);
return *reinterpret_cast<const bool*>(defptr_);
@@ -147,11 +142,6 @@ struct Flag {
return *reinterpret_cast<const char* const *>(defptr_);
}
- JSArguments args_default() const {
- DCHECK(type_ == TYPE_ARGS);
- return *reinterpret_cast<const JSArguments*>(defptr_);
- }
-
// Compare this flag's current value against the default.
bool IsDefault() const {
switch (type_) {
@@ -176,8 +166,6 @@ struct Flag {
if (str1 == nullptr) return str2 == nullptr;
return strcmp(str1, str2) == 0;
}
- case TYPE_ARGS:
- return args_variable()->argc == 0;
}
UNREACHABLE();
}
@@ -209,9 +197,6 @@ struct Flag {
case TYPE_STRING:
set_string_value(string_default(), false);
break;
- case TYPE_ARGS:
- *args_variable() = args_default();
- break;
}
}
};
@@ -239,7 +224,6 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_SIZE_T:
return "size_t";
case Flag::TYPE_STRING: return "string";
- case Flag::TYPE_ARGS: return "arguments";
}
UNREACHABLE();
}
@@ -275,16 +259,6 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
os << (str ? str : "nullptr");
break;
}
- case Flag::TYPE_ARGS: {
- JSArguments args = *flag.args_variable();
- if (args.argc > 0) {
- os << args[0];
- for (int i = 1; i < args.argc; i++) {
- os << args[i];
- }
- }
- break;
- }
}
return os;
}
@@ -293,15 +267,9 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
// static
std::vector<const char*>* FlagList::argv() {
std::vector<const char*>* args = new std::vector<const char*>(8);
- Flag* args_flag = nullptr;
for (size_t i = 0; i < num_flags; ++i) {
Flag* f = &flags[i];
if (!f->IsDefault()) {
- if (f->type() == Flag::TYPE_ARGS) {
- DCHECK_NULL(args_flag);
- args_flag = f; // Must be last in arguments.
- continue;
- }
{
bool disabled = f->type() == Flag::TYPE_BOOL && !*f->bool_variable();
std::ostringstream os;
@@ -315,15 +283,6 @@ std::vector<const char*>* FlagList::argv() {
}
}
}
- if (args_flag != nullptr) {
- std::ostringstream os;
- os << "--" << args_flag->name();
- args->push_back(StrDup(os.str().c_str()));
- JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc; j++) {
- args->push_back(StrDup(jsargs[j]));
- }
- }
return args;
}
@@ -454,8 +413,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
- flag->type() != Flag::TYPE_MAYBE_BOOL &&
- flag->type() != Flag::TYPE_ARGS && value == nullptr) {
+ flag->type() != Flag::TYPE_MAYBE_BOOL && value == nullptr) {
if (i < *argc) {
value = argv[i++];
}
@@ -503,20 +461,6 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_STRING:
flag->set_string_value(value ? StrDup(value) : nullptr, true);
break;
- case Flag::TYPE_ARGS: {
- int start_pos = (value == nullptr) ? i : i - 1;
- int js_argc = *argc - start_pos;
- const char** js_argv = NewArray<const char*>(js_argc);
- if (value != nullptr) {
- js_argv[0] = StrDup(value);
- }
- for (int k = i; k < *argc; k++) {
- js_argv[k - start_pos] = StrDup(argv[k]);
- }
- *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
- i = *argc; // Consume all arguments
- break;
- }
}
// handle errors
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index deb050980a..7f4e842f69 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -8,6 +8,7 @@
#include <vector>
#include "src/globals.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index 1426c72bd7..e91339d6be 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -50,7 +50,7 @@ namespace internal {
//
class CommonFrameConstants : public AllStatic {
public:
- static constexpr int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
static constexpr int kCallerPCOffset = kCallerFPOffset + 1 * kFPOnStackSize;
static constexpr int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
@@ -60,19 +60,19 @@ class CommonFrameConstants : public AllStatic {
// is the last object pointer.
static constexpr int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
static constexpr int kFixedSlotCountAboveFp =
- kFixedFrameSizeAboveFp / kPointerSize;
+ kFixedFrameSizeAboveFp / kSystemPointerSize;
static constexpr int kCPSlotSize =
- FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
- static constexpr int kCPSlotCount = kCPSlotSize / kPointerSize;
+ FLAG_enable_embedded_constant_pool ? kSystemPointerSize : 0;
+ static constexpr int kCPSlotCount = kCPSlotSize / kSystemPointerSize;
static constexpr int kConstantPoolOffset =
- kCPSlotSize ? -1 * kPointerSize : 0;
- static constexpr int kContextOrFrameTypeSize = kPointerSize;
+ kCPSlotSize ? -1 * kSystemPointerSize : 0;
+ static constexpr int kContextOrFrameTypeSize = kSystemPointerSize;
static constexpr int kContextOrFrameTypeOffset =
-(kCPSlotSize + kContextOrFrameTypeSize);
};
-// StandardFrames are used for interpreted, full-codegen and optimized
-// JavaScript frames. They always have a context below the saved fp/constant
+// StandardFrames are used for interpreted and optimized JavaScript
+// frames. They always have a context below the saved fp/constant
// pool and below that the JSFunction of the executing function.
//
// slot JS frame
@@ -104,15 +104,17 @@ class CommonFrameConstants : public AllStatic {
//
class StandardFrameConstants : public CommonFrameConstants {
public:
- static constexpr int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
+ static constexpr int kFixedFrameSizeFromFp =
+ 2 * kSystemPointerSize + kCPSlotSize;
static constexpr int kFixedFrameSize =
kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
static constexpr int kFixedSlotCountFromFp =
- kFixedFrameSizeFromFp / kPointerSize;
- static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ kFixedFrameSizeFromFp / kSystemPointerSize;
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize;
static constexpr int kContextOffset = kContextOrFrameTypeOffset;
- static constexpr int kFunctionOffset = -2 * kPointerSize - kCPSlotSize;
- static constexpr int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
+ static constexpr int kFunctionOffset = -2 * kSystemPointerSize - kCPSlotSize;
+ static constexpr int kExpressionsOffset =
+ -3 * kSystemPointerSize - kCPSlotSize;
static constexpr int kLastObjectOffset = kContextOffset;
};
@@ -152,10 +154,10 @@ class StandardFrameConstants : public CommonFrameConstants {
//
class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
public:
- static constexpr int kArgCSize = kPointerSize;
- static constexpr int kArgCOffset = -3 * kPointerSize - kCPSlotSize;
+ static constexpr int kArgCSize = kSystemPointerSize;
+ static constexpr int kArgCOffset = -3 * kSystemPointerSize - kCPSlotSize;
static constexpr int kFixedFrameSize = kFixedFrameSizeAboveFp - kArgCOffset;
- static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize;
};
// TypedFrames have a SMI type maker value below the saved FP/constant pool to
@@ -195,27 +197,28 @@ class TypedFrameConstants : public CommonFrameConstants {
static constexpr int kFrameTypeOffset = kContextOrFrameTypeOffset;
static constexpr int kFixedFrameSizeFromFp = kCPSlotSize + kFrameTypeSize;
static constexpr int kFixedSlotCountFromFp =
- kFixedFrameSizeFromFp / kPointerSize;
+ kFixedFrameSizeFromFp / kSystemPointerSize;
static constexpr int kFixedFrameSize =
StandardFrameConstants::kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
- static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize;
static constexpr int kFirstPushedFrameValueOffset =
- -StandardFrameConstants::kCPSlotSize - kFrameTypeSize - kPointerSize;
+ -StandardFrameConstants::kCPSlotSize - kFrameTypeSize -
+ kSystemPointerSize;
};
#define TYPED_FRAME_PUSHED_VALUE_OFFSET(x) \
- (TypedFrameConstants::kFirstPushedFrameValueOffset - (x)*kPointerSize)
+ (TypedFrameConstants::kFirstPushedFrameValueOffset - (x)*kSystemPointerSize)
#define TYPED_FRAME_SIZE(count) \
- (TypedFrameConstants::kFixedFrameSize + (count)*kPointerSize)
+ (TypedFrameConstants::kFixedFrameSize + (count)*kSystemPointerSize)
#define TYPED_FRAME_SIZE_FROM_SP(count) \
- (TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kPointerSize)
-#define DEFINE_TYPED_FRAME_SIZES(count) \
- static constexpr int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
- static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize; \
- static constexpr int kFixedFrameSizeFromFp = \
- TYPED_FRAME_SIZE_FROM_SP(count); \
- static constexpr int kFixedSlotCountFromFp = \
- kFixedFrameSizeFromFp / kPointerSize
+ (TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kSystemPointerSize)
+#define DEFINE_TYPED_FRAME_SIZES(count) \
+ static constexpr int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize; \
+ static constexpr int kFixedFrameSizeFromFp = \
+ TYPED_FRAME_SIZE_FROM_SP(count); \
+ static constexpr int kFixedSlotCountFromFp = \
+ kFixedFrameSizeFromFp / kSystemPointerSize
class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
public:
@@ -278,11 +281,14 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
// Behaves like an exit frame but with target and new target args.
class BuiltinExitFrameConstants : public CommonFrameConstants {
public:
- static constexpr int kNewTargetOffset = kCallerPCOffset + 1 * kPointerSize;
- static constexpr int kTargetOffset = kNewTargetOffset + 1 * kPointerSize;
- static constexpr int kArgcOffset = kTargetOffset + 1 * kPointerSize;
- static constexpr int kPaddingOffset = kArgcOffset + 1 * kPointerSize;
- static constexpr int kFirstArgumentOffset = kPaddingOffset + 1 * kPointerSize;
+ static constexpr int kNewTargetOffset =
+ kCallerPCOffset + 1 * kSystemPointerSize;
+ static constexpr int kTargetOffset =
+ kNewTargetOffset + 1 * kSystemPointerSize;
+ static constexpr int kArgcOffset = kTargetOffset + 1 * kSystemPointerSize;
+ static constexpr int kPaddingOffset = kArgcOffset + 1 * kSystemPointerSize;
+ static constexpr int kFirstArgumentOffset =
+ kPaddingOffset + 1 * kSystemPointerSize;
static constexpr int kNumExtraArgsWithReceiver = 5;
};
@@ -290,9 +296,9 @@ class InterpreterFrameConstants : public AllStatic {
public:
// Fixed frame includes bytecode array and bytecode offset.
static constexpr int kFixedFrameSize =
- StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 2 * kSystemPointerSize;
static constexpr int kFixedFrameSizeFromFp =
- StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kSystemPointerSize;
// FP-relative.
static constexpr int kLastParamFromFp =
@@ -300,18 +306,18 @@ class InterpreterFrameConstants : public AllStatic {
static constexpr int kCallerPCOffsetFromFp =
StandardFrameConstants::kCallerPCOffset;
static constexpr int kBytecodeArrayFromFp =
- -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kSystemPointerSize;
static constexpr int kBytecodeOffsetFromFp =
- -StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kSystemPointerSize;
static constexpr int kRegisterFileFromFp =
- -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kSystemPointerSize;
static constexpr int kExpressionsOffset = kRegisterFileFromFp;
// Number of fixed slots in addition to a {StandardFrame}.
static constexpr int kExtraSlotCount =
- InterpreterFrameConstants::kFixedFrameSize / kPointerSize -
- StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ InterpreterFrameConstants::kFixedFrameSize / kSystemPointerSize -
+ StandardFrameConstants::kFixedFrameSize / kSystemPointerSize;
// Expression index for {StandardFrame::GetExpressionAddress}.
static constexpr int kBytecodeArrayExpressionIndex = -2;
@@ -326,12 +332,12 @@ class InterpreterFrameConstants : public AllStatic {
inline static int FPOffsetToFrameSlot(int frame_offset) {
return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
- frame_offset / kPointerSize;
+ frame_offset / kSystemPointerSize;
}
inline static int FrameSlotToFPOffset(int slot) {
return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
- kPointerSize;
+ kSystemPointerSize;
}
} // namespace internal
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 419adbf73e..f3f6805aa7 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -14,6 +14,33 @@
namespace v8 {
namespace internal {
+class InnerPointerToCodeCache {
+ public:
+ struct InnerPointerToCodeCacheEntry {
+ Address inner_pointer;
+ Code code;
+ SafepointEntry safepoint_entry;
+ };
+
+ explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
+ Flush();
+ }
+
+ void Flush() { memset(static_cast<void*>(&cache_[0]), 0, sizeof(cache_)); }
+
+ InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
+
+ private:
+ InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+
+ Isolate* isolate_;
+
+ static const int kInnerPointerToCodeCacheSize = 1024;
+ InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
+
+ DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
+};
+
inline Address StackHandler::address() const {
return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
}
@@ -24,6 +51,9 @@ inline StackHandler* StackHandler::next() const {
return FromAddress(Memory<Address>(address() + offset));
}
+inline Address StackHandler::next_address() const {
+ return Memory<Address>(address() + StackHandlerConstants::kNextOffset);
+}
inline StackHandler* StackHandler::FromAddress(Address address) {
return reinterpret_cast<StackHandler*>(address);
@@ -70,48 +100,47 @@ inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
-inline Object* BuiltinExitFrame::receiver_slot_object() const {
+inline Object BuiltinExitFrame::receiver_slot_object() const {
// The receiver is the first argument on the frame.
// fp[1]: return address.
// fp[2]: the last argument (new target).
// fp[4]: argc.
// fp[2 + argc - 1]: receiver.
- Object* argc_slot = argc_slot_object();
+ Object argc_slot = argc_slot_object();
DCHECK(argc_slot->IsSmi());
int argc = Smi::ToInt(argc_slot);
- const int receiverOffset =
- BuiltinExitFrameConstants::kNewTargetOffset + (argc - 1) * kPointerSize;
- return Memory<Object*>(fp() + receiverOffset);
+ const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
+ (argc - 1) * kSystemPointerSize;
+ return Object(Memory<Address>(fp() + receiverOffset));
}
-inline Object* BuiltinExitFrame::argc_slot_object() const {
- return Memory<Object*>(fp() + BuiltinExitFrameConstants::kArgcOffset);
+inline Object BuiltinExitFrame::argc_slot_object() const {
+ return Object(Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
}
-inline Object* BuiltinExitFrame::target_slot_object() const {
- return Memory<Object*>(fp() + BuiltinExitFrameConstants::kTargetOffset);
+inline Object BuiltinExitFrame::target_slot_object() const {
+ return Object(
+ Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
}
-inline Object* BuiltinExitFrame::new_target_slot_object() const {
- return Memory<Object*>(fp() + BuiltinExitFrameConstants::kNewTargetOffset);
+inline Object BuiltinExitFrame::new_target_slot_object() const {
+ return Object(
+ Memory<Address>(fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
-
-inline Object* StandardFrame::GetExpression(int index) const {
- return Memory<Object*>(GetExpressionAddress(index));
+inline Object StandardFrame::GetExpression(int index) const {
+ return Object(Memory<Address>(GetExpressionAddress(index)));
}
-
-inline void StandardFrame::SetExpression(int index, Object* value) {
- Memory<Object*>(GetExpressionAddress(index)) = value;
+inline void StandardFrame::SetExpression(int index, Object value) {
+ Memory<Address>(GetExpressionAddress(index)) = value->ptr();
}
-
inline Address StandardFrame::caller_fp() const {
return Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
@@ -153,23 +182,21 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
DCHECK(-1 <= index &&
(index < param_count ||
param_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- int parameter_offset = (param_count - index - 1) * kPointerSize;
+ int parameter_offset = (param_count - index - 1) * kSystemPointerSize;
return caller_sp() + parameter_offset;
}
-inline void JavaScriptFrame::set_receiver(Object* value) {
- Memory<Object*>(GetParameterSlot(-1)) = value;
+inline void JavaScriptFrame::set_receiver(Object value) {
+ Memory<Address>(GetParameterSlot(-1)) = value->ptr();
}
-
inline bool JavaScriptFrame::has_adapted_arguments() const {
return IsArgumentsAdaptorFrame(caller_fp());
}
-
-inline Object* JavaScriptFrame::function_slot_object() const {
+inline Object JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory<Object*>(fp() + offset);
+ return Object(Memory<Address>(fp() + offset));
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index a4d2561cd3..7af1ef1e98 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -11,8 +11,13 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/ic/ic-stats.h"
+#include "src/macro-assembler.h"
+#include "src/objects/code.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
+#include "src/snapshot/snapshot.h"
#include "src/string-stream.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
@@ -171,11 +176,11 @@ namespace {
bool IsInterpreterFramePc(Isolate* isolate, Address pc,
StackFrame::State* state) {
- Code* interpreter_entry_trampoline =
+ Code interpreter_entry_trampoline =
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- Code* interpreter_bytecode_advance =
+ Code interpreter_bytecode_advance =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
- Code* interpreter_bytecode_dispatch =
+ Code interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
if (interpreter_entry_trampoline->contains(pc) ||
@@ -186,9 +191,10 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
MSAN_MEMORY_IS_INITIALIZED(
- state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
- Object* maybe_function =
- Memory<Object*>(state->fp + StandardFrameConstants::kFunctionOffset);
+ state->fp + StandardFrameConstants::kFunctionOffset,
+ kSystemPointerSize);
+ Object maybe_function = Object(
+ Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
// There's no need to run a full ContainsSlow if we know the frame can't be
// an InterpretedFrame, so we do these fast checks first
if (StackFrame::IsTypeMarker(marker) || maybe_function->IsSmi()) {
@@ -222,7 +228,24 @@ SafeStackFrameIterator::SafeStackFrameIterator(
StackFrame::Type type;
ThreadLocalTop* top = isolate->thread_local_top();
bool advance_frame = true;
- if (IsValidTop(top)) {
+
+ Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
+ // 'Fast C calls' are a special type of C call where we call directly from JS
+ // to C without an exit frame inbetween. The CEntryStub is responsible for
+ // setting Isolate::c_entry_fp, meaning that it won't be set for fast C calls.
+ // To keep the stack iterable, we store the FP and PC of the caller of the
+ // fast C call on the isolate. This is guaranteed to be the topmost JS frame,
+ // because fast C calls cannot call back into JS. We start iterating the stack
+ // from this topmost JS frame.
+ if (fast_c_fp) {
+ DCHECK_NE(kNullAddress, isolate->isolate_data()->fast_c_call_caller_pc());
+ type = StackFrame::Type::OPTIMIZED;
+ top_frame_type_ = type;
+ state.fp = fast_c_fp;
+ state.sp = sp;
+ state.pc_address = isolate->isolate_data()->fast_c_call_caller_pc_address();
+ advance_frame = false;
+ } else if (IsValidTop(top)) {
type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
top_frame_type_ = type;
} else if (IsValidStackAddress(fp)) {
@@ -236,7 +259,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(
// then we are likely in a bytecode handler with elided frame. In that
// case, set the PC properly and make sure we do not drop the frame.
if (IsValidStackAddress(sp)) {
- MSAN_MEMORY_IS_INITIALIZED(sp, kPointerSize);
+ MSAN_MEMORY_IS_INITIALIZED(sp, kSystemPointerSize);
Address tos = ReadMemoryAt(sp);
if (IsInterpreterFramePc(isolate, tos, &state)) {
state.pc_address = reinterpret_cast<Address*>(sp);
@@ -330,8 +353,8 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check
// that it really an Smi.
- Object* number_of_args = reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->
- GetExpression(0);
+ Object number_of_args =
+ reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->GetExpression(0);
if (!number_of_args->IsSmi()) {
return false;
}
@@ -388,27 +411,27 @@ void SafeStackFrameIterator::Advance() {
// -------------------------------------------------------------------------
namespace {
-Code* GetContainingCode(Isolate* isolate, Address pc) {
+Code GetContainingCode(Isolate* isolate, Address pc) {
return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
}
} // namespace
-Code* StackFrame::LookupCode() const {
- Code* result = GetContainingCode(isolate(), pc());
+Code StackFrame::LookupCode() const {
+ Code result = GetContainingCode(isolate(), pc());
DCHECK_GE(pc(), result->InstructionStart());
DCHECK_LT(pc(), result->InstructionEnd());
return result;
}
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
- Address* constant_pool_address, Code* holder) {
+ Address* constant_pool_address, Code holder) {
Address pc = *pc_address;
DCHECK(holder->GetHeap()->GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->InstructionStart());
- Object* code = holder;
- v->VisitRootPointer(Root::kTop, nullptr, &code);
+ Object code = holder;
+ v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&code));
if (code == holder) return;
- holder = reinterpret_cast<Code*>(code);
+ holder = Code::unchecked_cast(code);
pc = holder->InstructionStart() + pc_offset;
*pc_address = pc;
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
@@ -429,7 +452,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
- kPointerSize);
+ kSystemPointerSize);
intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
if (!iterator->can_access_heap_objects_) {
@@ -439,9 +462,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// anything on the stack. So basically none of these checks are 100%
// reliable.
MSAN_MEMORY_IS_INITIALIZED(
- state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
- Object* maybe_function =
- Memory<Object*>(state->fp + StandardFrameConstants::kFunctionOffset);
+ state->fp + StandardFrameConstants::kFunctionOffset,
+ kSystemPointerSize);
+ Object maybe_function = Object(
+ Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
return NATIVE;
@@ -475,8 +499,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
} else {
// Look up the code object to figure out the type of the stack frame.
- Code* code_obj = GetContainingCode(iterator->isolate(), pc);
- if (code_obj != nullptr) {
+ Code code_obj = GetContainingCode(iterator->isolate(), pc);
+ if (!code_obj.is_null()) {
switch (code_obj->kind()) {
case Code::BUILTIN:
if (StackFrame::IsTypeMarker(marker)) break;
@@ -559,6 +583,8 @@ Address StackFrame::UnpaddedFP() const {
return fp();
}
+Code NativeFrame::unchecked_code() const { return Code(); }
+
void NativeFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
state->fp = Memory<Address>(fp() + CommonFrameConstants::kCallerFPOffset);
@@ -568,8 +594,8 @@ void NativeFrame::ComputeCallerState(State* state) const {
state->constant_pool_address = nullptr;
}
-Code* EntryFrame::unchecked_code() const {
- return isolate()->heap()->js_entry_code();
+Code EntryFrame::unchecked_code() const {
+ return isolate()->heap()->builtin(Builtins::kJSEntry);
}
@@ -584,21 +610,19 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
return ExitFrame::GetStateForFramePointer(fp, state);
}
-Code* ConstructEntryFrame::unchecked_code() const {
- return isolate()->heap()->js_construct_entry_code();
+Code ConstructEntryFrame::unchecked_code() const {
+ return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
-
-Object*& ExitFrame::code_slot() const {
+Address& ExitFrame::code_slot() const {
const int offset = ExitFrameConstants::kCodeOffset;
- return Memory<Object*>(fp() + offset);
+ return Memory<Address>(fp() + offset);
}
-Code* ExitFrame::unchecked_code() const {
- return reinterpret_cast<Code*>(code_slot());
+Code ExitFrame::unchecked_code() const {
+ return Code::unchecked_cast(Object(code_slot()));
}
-
void ExitFrame::ComputeCallerState(State* state) const {
// Set up the caller state.
state->sp = caller_sp();
@@ -617,7 +641,7 @@ void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
- v->VisitRootPointer(Root::kTop, nullptr, &code_slot());
+ v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&code_slot()));
}
@@ -639,7 +663,7 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
// Distinguish between between regular and builtin exit frames.
// Default to EXIT in all hairy cases (e.g., when called from profiler).
const int offset = ExitFrameConstants::kFrameTypeOffset;
- Object* marker = Memory<Object*>(fp + offset);
+ Object marker(Memory<Address>(fp + offset));
if (!marker->IsSmi()) {
return EXIT;
@@ -656,7 +680,8 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
}
Address ExitFrame::ComputeStackPointer(Address fp) {
- MSAN_MEMORY_IS_INITIALIZED(fp + ExitFrameConstants::kSPOffset, kPointerSize);
+ MSAN_MEMORY_IS_INITIALIZED(fp + ExitFrameConstants::kSPOffset,
+ kSystemPointerSize);
return Memory<Address>(fp + ExitFrameConstants::kSPOffset);
}
@@ -673,25 +698,25 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->constant_pool_address = nullptr;
}
-JSFunction* BuiltinExitFrame::function() const {
+JSFunction BuiltinExitFrame::function() const {
return JSFunction::cast(target_slot_object());
}
-Object* BuiltinExitFrame::receiver() const { return receiver_slot_object(); }
+Object BuiltinExitFrame::receiver() const { return receiver_slot_object(); }
bool BuiltinExitFrame::IsConstructor() const {
return !new_target_slot_object()->IsUndefined(isolate());
}
-Object* BuiltinExitFrame::GetParameter(int i) const {
+Object BuiltinExitFrame::GetParameter(int i) const {
DCHECK(i >= 0 && i < ComputeParametersCount());
int offset =
- BuiltinExitFrameConstants::kFirstArgumentOffset + i * kPointerSize;
- return Memory<Object*>(fp() + offset);
+ BuiltinExitFrameConstants::kFirstArgumentOffset + i * kSystemPointerSize;
+ return Object(Memory<Address>(fp() + offset));
}
int BuiltinExitFrame::ComputeParametersCount() const {
- Object* argc_slot = argc_slot_object();
+ Object argc_slot = argc_slot_object();
DCHECK(argc_slot->IsSmi());
// Argc also counts the receiver, target, new target, and argc itself as args,
// therefore the real argument count is argc - 4.
@@ -730,13 +755,13 @@ void StackFrame::Print(StringStream* accumulator, PrintMode mode,
void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
DisallowHeapAllocation no_gc;
- Object* receiver = this->receiver();
- JSFunction* function = this->function();
+ Object receiver = this->receiver();
+ JSFunction function = this->function();
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
accumulator->Add("builtin exit frame: ");
- Code* code = nullptr;
+ Code code;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
@@ -753,43 +778,43 @@ void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp() + offset - n * kPointerSize;
+ return fp() + offset - n * kSystemPointerSize;
}
Address InterpretedFrame::GetExpressionAddress(int n) const {
const int offset = InterpreterFrameConstants::kExpressionsOffset;
- return fp() + offset - n * kPointerSize;
+ return fp() + offset - n * kSystemPointerSize;
}
-Script* StandardFrame::script() const {
+Script StandardFrame::script() const {
// This should only be called on frames which override this method.
- DCHECK(false);
- return nullptr;
+ UNREACHABLE();
+ return Script();
}
-Object* StandardFrame::receiver() const {
+Object StandardFrame::receiver() const {
return ReadOnlyRoots(isolate()).undefined_value();
}
-Object* StandardFrame::context() const {
+Object StandardFrame::context() const {
return ReadOnlyRoots(isolate()).undefined_value();
}
int StandardFrame::position() const {
- AbstractCode* code = AbstractCode::cast(LookupCode());
+ AbstractCode code = AbstractCode::cast(LookupCode());
int code_offset = static_cast<int>(pc() - code->InstructionStart());
return code->SourcePosition(code_offset);
}
int StandardFrame::ComputeExpressionsCount() const {
Address base = GetExpressionAddress(0);
- Address limit = sp() - kPointerSize;
+ Address limit = sp() - kSystemPointerSize;
DCHECK(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
- return static_cast<int>((base - limit) / kPointerSize);
+ return static_cast<int>((base - limit) / kSystemPointerSize);
}
-Object* StandardFrame::GetParameter(int index) const {
+Object StandardFrame::GetParameter(int index) const {
// StandardFrame does not define any parameters.
UNREACHABLE();
}
@@ -825,7 +850,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer);
SafepointEntry safepoint_entry;
uint32_t stack_slots;
- Code* code = nullptr;
+ Code code;
bool has_tagged_params = false;
if (wasm_code != nullptr) {
SafepointTable table(wasm_code->instruction_start(),
@@ -850,7 +875,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
stack_slots = code->stack_slots();
has_tagged_params = code->has_tagged_params();
}
- uint32_t slot_space = stack_slots * kPointerSize;
+ uint32_t slot_space = stack_slots * kSystemPointerSize;
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
@@ -898,18 +923,11 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
slot_space -=
(frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
- Object** frame_header_base = &Memory<Object*>(fp() - frame_header_size);
- Object** frame_header_limit =
- &Memory<Object*>(fp() - StandardFrameConstants::kCPSlotSize);
- Object** parameters_base = &Memory<Object*>(sp());
- Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
-
- // Visit the parameters that may be on top of the saved registers.
- if (safepoint_entry.argument_count() > 0) {
- v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
- parameters_base + safepoint_entry.argument_count());
- parameters_base += safepoint_entry.argument_count();
- }
+ FullObjectSlot frame_header_base(&Memory<Address>(fp() - frame_header_size));
+ FullObjectSlot frame_header_limit(
+ &Memory<Address>(fp() - StandardFrameConstants::kCPSlotSize));
+ FullObjectSlot parameters_base(&Memory<Address>(sp()));
+ FullObjectSlot parameters_limit(frame_header_base.address() - slot_space);
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
@@ -917,7 +935,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
DCHECK(!isolate()->serializer_enabled());
parameters_base +=
RegisterConfiguration::Default()->num_allocatable_double_registers() *
- kDoubleSize / kPointerSize;
+ kDoubleSize / kSystemPointerSize;
}
// Visit the registers that contain pointers if any.
@@ -953,7 +971,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
}
// For the off-heap code cases, we can skip this.
- if (code != nullptr) {
+ if (!code.is_null()) {
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), constant_pool_address(), code);
}
@@ -967,7 +985,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
void StubFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-Code* StubFrame::unchecked_code() const {
+Code StubFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
@@ -982,7 +1000,7 @@ int StubFrame::GetNumberOfIncomingArguments() const {
}
int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
- Code* code = LookupCode();
+ Code code = LookupCode();
DCHECK(code->is_turbofanned());
DCHECK_EQ(code->kind(), Code::BUILTIN);
HandlerTable table(code);
@@ -993,11 +1011,10 @@ int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
- Memory<Object*>(GetParameterSlot(index)) = value;
+void JavaScriptFrame::SetParameterValue(int index, Object value) const {
+ Memory<Address>(GetParameterSlot(index)) = value->ptr();
}
-
bool JavaScriptFrame::IsConstructor() const {
Address fp = caller_fp();
if (has_adapted_arguments()) {
@@ -1009,16 +1026,12 @@ bool JavaScriptFrame::IsConstructor() const {
bool JavaScriptFrame::HasInlinedFrames() const {
- std::vector<SharedFunctionInfo*> functions;
+ std::vector<SharedFunctionInfo> functions;
GetFunctions(&functions);
return functions.size() > 1;
}
-
-Code* JavaScriptFrame::unchecked_code() const {
- return function()->code();
-}
-
+Code JavaScriptFrame::unchecked_code() const { return function()->code(); }
int JavaScriptFrame::GetNumberOfIncomingArguments() const {
DCHECK(can_access_heap_objects() &&
@@ -1027,7 +1040,7 @@ int JavaScriptFrame::GetNumberOfIncomingArguments() const {
}
int OptimizedFrame::GetNumberOfIncomingArguments() const {
- Code* code = LookupCode();
+ Code code = LookupCode();
if (code->kind() == Code::BUILTIN) {
return static_cast<int>(
Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
@@ -1041,7 +1054,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
}
void JavaScriptFrame::GetFunctions(
- std::vector<SharedFunctionInfo*>* functions) const {
+ std::vector<SharedFunctionInfo>* functions) const {
DCHECK(functions->empty());
functions->push_back(function()->shared());
}
@@ -1049,7 +1062,7 @@ void JavaScriptFrame::GetFunctions(
void JavaScriptFrame::GetFunctions(
std::vector<Handle<SharedFunctionInfo>>* functions) const {
DCHECK(functions->empty());
- std::vector<SharedFunctionInfo*> raw_functions;
+ std::vector<SharedFunctionInfo> raw_functions;
GetFunctions(&raw_functions);
for (const auto& raw_function : raw_functions) {
functions->push_back(
@@ -1059,20 +1072,20 @@ void JavaScriptFrame::GetFunctions(
void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- Code* code = LookupCode();
+ Code code = LookupCode();
int offset = static_cast<int>(pc() - code->InstructionStart());
- AbstractCode* abstract_code = AbstractCode::cast(code);
+ AbstractCode abstract_code = AbstractCode::cast(code);
FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
function(), abstract_code,
offset, IsConstructor());
functions->push_back(summary);
}
-JSFunction* JavaScriptFrame::function() const {
+JSFunction JavaScriptFrame::function() const {
return JSFunction::cast(function_slot_object());
}
-Object* JavaScriptFrame::unchecked_function() const {
+Object JavaScriptFrame::unchecked_function() const {
// During deoptimization of an optimized function, we may have yet to
// materialize some closures on the stack. The arguments marker object
// marks this case.
@@ -1081,16 +1094,16 @@ Object* JavaScriptFrame::unchecked_function() const {
return function_slot_object();
}
-Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
+Object JavaScriptFrame::receiver() const { return GetParameter(-1); }
-Object* JavaScriptFrame::context() const {
+Object JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
- Object* maybe_result = Memory<Object*>(fp() + offset);
+ Object maybe_result(Memory<Address>(fp() + offset));
DCHECK(!maybe_result->IsSmi());
return maybe_result;
}
-Script* JavaScriptFrame::script() const {
+Script JavaScriptFrame::script() const {
return Script::cast(function()->shared()->script());
}
@@ -1101,23 +1114,23 @@ int JavaScriptFrame::LookupExceptionHandlerInTable(
return -1;
}
-void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
- AbstractCode* code,
- int code_offset, FILE* file,
+void JavaScriptFrame::PrintFunctionAndOffset(JSFunction function,
+ AbstractCode code, int code_offset,
+ FILE* file,
bool print_line_number) {
PrintF(file, "%s", function->IsOptimized() ? "*" : "~");
function->PrintName(file);
PrintF(file, "+%d", code_offset);
if (print_line_number) {
- SharedFunctionInfo* shared = function->shared();
+ SharedFunctionInfo shared = function->shared();
int source_pos = code->SourcePosition(code_offset);
- Object* maybe_script = shared->script();
+ Object maybe_script = shared->script();
if (maybe_script->IsScript()) {
- Script* script = Script::cast(maybe_script);
+ Script script = Script::cast(maybe_script);
int line = script->GetLineNumber(source_pos) + 1;
- Object* script_name_raw = script->name();
+ Object script_name_raw = script->name();
if (script_name_raw->IsString()) {
- String* script_name = String::cast(script->name());
+ String script_name = String::cast(script->name());
std::unique_ptr<char[]> c_script_name =
script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", c_script_name.get(), line);
@@ -1139,13 +1152,13 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
- JSFunction* function = frame->function();
+ JSFunction function = frame->function();
int code_offset = 0;
if (frame->is_interpreted()) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
code_offset = iframe->GetBytecodeOffset();
} else {
- Code* code = frame->unchecked_code();
+ Code code = frame->unchecked_code();
code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
@@ -1169,20 +1182,20 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
}
}
-void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction* function,
- AbstractCode* code,
+void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
+ AbstractCode code,
int code_offset) {
auto ic_stats = ICStats::instance();
ICInfo& ic_info = ic_stats->Current();
- SharedFunctionInfo* shared = function->shared();
+ SharedFunctionInfo shared = function->shared();
ic_info.function_name = ic_stats->GetOrCacheFunctionName(function);
ic_info.script_offset = code_offset;
int source_pos = code->SourcePosition(code_offset);
- Object* maybe_script = shared->script();
+ Object maybe_script = shared->script();
if (maybe_script->IsScript()) {
- Script* script = Script::cast(maybe_script);
+ Script script = Script::cast(maybe_script);
ic_info.line_num = script->GetLineNumber(source_pos) + 1;
ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
}
@@ -1197,13 +1210,13 @@ void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) ic_info.is_constructor = true;
- JSFunction* function = frame->function();
+ JSFunction function = frame->function();
int code_offset = 0;
if (frame->is_interpreted()) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
code_offset = iframe->GetBytecodeOffset();
} else {
- Code* code = frame->unchecked_code();
+ Code code = frame->unchecked_code();
code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
@@ -1214,8 +1227,8 @@ void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
}
}
-Object* JavaScriptFrame::GetParameter(int index) const {
- return Memory<Object*>(GetParameterSlot(index));
+Object JavaScriptFrame::GetParameter(int index) const {
+ return Object(Memory<Address>(GetParameterSlot(index)));
}
int JavaScriptFrame::ComputeParametersCount() const {
@@ -1227,38 +1240,38 @@ int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
// register.
DCHECK_EQ(RegisterConfiguration::Default()->GetAllocatableGeneralCode(0),
kJavaScriptCallArgCountRegister.code());
- Object* argc_object =
- Memory<Object*>(fp() + BuiltinContinuationFrameConstants::kArgCOffset);
+ Object argc_object(
+ Memory<Address>(fp() + BuiltinContinuationFrameConstants::kArgCOffset));
return Smi::ToInt(argc_object);
}
intptr_t JavaScriptBuiltinContinuationFrame::GetSPToFPDelta() const {
Address height_slot =
fp() + BuiltinContinuationFrameConstants::kFrameSPtoFPDeltaAtDeoptimize;
- intptr_t height = Smi::ToInt(*reinterpret_cast<Smi**>(height_slot));
+ intptr_t height = Smi::ToInt(Smi(Memory<Address>(height_slot)));
return height;
}
-Object* JavaScriptBuiltinContinuationFrame::context() const {
- return Memory<Object*>(
- fp() + BuiltinContinuationFrameConstants::kBuiltinContextOffset);
+Object JavaScriptBuiltinContinuationFrame::context() const {
+ return Object(Memory<Address>(
+ fp() + BuiltinContinuationFrameConstants::kBuiltinContextOffset));
}
void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
- Object* exception) {
+ Object exception) {
Address exception_argument_slot =
fp() + JavaScriptFrameConstants::kLastParameterOffset +
- kPointerSize; // Skip over return value slot.
+ kSystemPointerSize; // Skip over return value slot.
// Only allow setting exception if previous value was the hole.
CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
- Memory<Object*>(exception_argument_slot));
- Memory<Object*>(exception_argument_slot) = exception;
+ Object(Memory<Address>(exception_argument_slot)));
+ Memory<Address>(exception_argument_slot) = exception->ptr();
}
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
- Isolate* isolate, Object* receiver, JSFunction* function,
- AbstractCode* abstract_code, int code_offset, bool is_constructor)
+ Isolate* isolate, Object receiver, JSFunction function,
+ AbstractCode abstract_code, int code_offset, bool is_constructor)
: FrameSummaryBase(isolate, FrameSummary::JAVA_SCRIPT),
receiver_(receiver, isolate),
function_(function, isolate),
@@ -1450,15 +1463,15 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- Code* code = LookupCode();
+ Code code = LookupCode();
if (code->kind() == Code::BUILTIN) {
return JavaScriptFrame::Summarize(frames);
}
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationData* const data = GetDeoptimizationData(&deopt_index);
+ DeoptimizationData const data = GetDeoptimizationData(&deopt_index);
if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
- CHECK_NULL(data);
+ CHECK(data.is_null());
FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
}
@@ -1530,7 +1543,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
// to use FrameSummary to find the corresponding code offset in unoptimized
// code to perform prediction there.
DCHECK_NULL(prediction);
- Code* code = LookupCode();
+ Code code = LookupCode();
HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code->InstructionStart());
if (stack_slots) *stack_slots = code->stack_slots();
@@ -1546,12 +1559,12 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
return table.LookupReturn(pc_offset);
}
-DeoptimizationData* OptimizedFrame::GetDeoptimizationData(
+DeoptimizationData OptimizedFrame::GetDeoptimizationData(
int* deopt_index) const {
DCHECK(is_optimized());
- JSFunction* opt_function = function();
- Code* code = opt_function->code();
+ JSFunction opt_function = function();
+ Code code = opt_function->code();
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
@@ -1559,49 +1572,51 @@ DeoptimizationData* OptimizedFrame::GetDeoptimizationData(
if (!code->contains(pc())) {
code = isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
}
- DCHECK_NOT_NULL(code);
+ DCHECK(!code.is_null());
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
- *deopt_index = safepoint_entry.deoptimization_index();
- if (*deopt_index != Safepoint::kNoDeoptimizationIndex) {
+ if (safepoint_entry.has_deoptimization_index()) {
+ *deopt_index = safepoint_entry.deoptimization_index();
return DeoptimizationData::cast(code->deoptimization_data());
}
- return nullptr;
+ *deopt_index = Safepoint::kNoDeoptimizationIndex;
+ return DeoptimizationData();
}
-Object* OptimizedFrame::receiver() const {
- Code* code = LookupCode();
+Object OptimizedFrame::receiver() const {
+ Code code = LookupCode();
if (code->kind() == Code::BUILTIN) {
Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
intptr_t args_size =
- (StandardFrameConstants::kFixedSlotCountAboveFp + argc) * kPointerSize;
+ (StandardFrameConstants::kFixedSlotCountAboveFp + argc) *
+ kSystemPointerSize;
Address receiver_ptr = fp() + args_size;
- return *reinterpret_cast<Object**>(receiver_ptr);
+ return *FullObjectSlot(receiver_ptr);
} else {
return JavaScriptFrame::receiver();
}
}
void OptimizedFrame::GetFunctions(
- std::vector<SharedFunctionInfo*>* functions) const {
+ std::vector<SharedFunctionInfo>* functions) const {
DCHECK(functions->empty());
DCHECK(is_optimized());
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- Code* code = LookupCode();
+ Code code = LookupCode();
if (code->kind() == Code::BUILTIN) {
return JavaScriptFrame::GetFunctions(functions);
}
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationData* const data = GetDeoptimizationData(&deopt_index);
- DCHECK_NOT_NULL(data);
+ DeoptimizationData const data = GetDeoptimizationData(&deopt_index);
+ DCHECK(!data.is_null());
DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
- FixedArray* const literal_array = data->LiteralArray();
+ FixedArray const literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
@@ -1623,7 +1638,7 @@ void OptimizedFrame::GetFunctions(
jsframe_count--;
// The second operand of the frame points to the function.
- Object* shared = literal_array->get(it.Next());
+ Object shared = literal_array->get(it.Next());
functions->push_back(SharedFunctionInfo::cast(shared));
// Skip over remaining operands to advance to the next opcode.
@@ -1635,34 +1650,32 @@ void OptimizedFrame::GetFunctions(
}
}
-
int OptimizedFrame::StackSlotOffsetRelativeToFp(int slot_index) {
return StandardFrameConstants::kCallerSPOffset -
- ((slot_index + 1) * kPointerSize);
+ ((slot_index + 1) * kSystemPointerSize);
}
-
-Object* OptimizedFrame::StackSlotAt(int index) const {
- return Memory<Object*>(fp() + StackSlotOffsetRelativeToFp(index));
+Object OptimizedFrame::StackSlotAt(int index) const {
+ return Object(Memory<Address>(fp() + StackSlotOffsetRelativeToFp(index)));
}
int InterpretedFrame::position() const {
- AbstractCode* code = AbstractCode::cast(GetBytecodeArray());
+ AbstractCode code = AbstractCode::cast(GetBytecodeArray());
int code_offset = GetBytecodeOffset();
return code->SourcePosition(code_offset);
}
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
- HandlerTable table(function()->shared()->GetBytecodeArray());
+ HandlerTable table(GetBytecodeArray());
return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
int InterpretedFrame::GetBytecodeOffset() const {
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kBytecodeOffsetFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
int raw_offset = Smi::ToInt(GetExpression(index));
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
@@ -1670,60 +1683,59 @@ int InterpretedFrame::GetBytecodeOffset() const {
int InterpretedFrame::GetBytecodeOffset(Address fp) {
const int offset = InterpreterFrameConstants::kExpressionsOffset;
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kBytecodeOffsetFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
- Address expression_offset = fp + offset - index * kPointerSize;
- int raw_offset = Smi::ToInt(Memory<Object*>(expression_offset));
+ DCHECK_EQ(InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
+ Address expression_offset = fp + offset - index * kSystemPointerSize;
+ int raw_offset = Smi::ToInt(Object(Memory<Address>(expression_offset)));
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kBytecodeOffsetFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
int raw_offset = new_offset + BytecodeArray::kHeaderSize - kHeapObjectTag;
SetExpression(index, Smi::FromInt(raw_offset));
}
-BytecodeArray* InterpretedFrame::GetBytecodeArray() const {
+BytecodeArray InterpretedFrame::GetBytecodeArray() const {
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kBytecodeArrayFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kBytecodeArrayFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
return BytecodeArray::cast(GetExpression(index));
}
-void InterpretedFrame::PatchBytecodeArray(BytecodeArray* bytecode_array) {
+void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kBytecodeArrayFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kBytecodeArrayFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
SetExpression(index, bytecode_array);
}
-Object* InterpretedFrame::ReadInterpreterRegister(int register_index) const {
+Object InterpretedFrame::ReadInterpreterRegister(int register_index) const {
const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kRegisterFileFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kRegisterFileFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
return GetExpression(index + register_index);
}
void InterpretedFrame::WriteInterpreterRegister(int register_index,
- Object* value) {
+ Object value) {
const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
- DCHECK_EQ(
- InterpreterFrameConstants::kRegisterFileFromFp,
- InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ DCHECK_EQ(InterpreterFrameConstants::kRegisterFileFromFp,
+ InterpreterFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
return SetExpression(index + register_index, value);
}
void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- AbstractCode* abstract_code =
- AbstractCode::cast(function()->shared()->GetBytecodeArray());
+ AbstractCode abstract_code = AbstractCode::cast(GetBytecodeArray());
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
IsConstructor());
@@ -1734,7 +1746,7 @@ int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::ToInt(GetExpression(0));
}
-Code* ArgumentsAdaptorFrame::unchecked_code() const {
+Code ArgumentsAdaptorFrame::unchecked_code() const {
return isolate()->builtins()->builtin(
Builtins::kArgumentsAdaptorTrampoline);
}
@@ -1753,7 +1765,7 @@ Address InternalFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
-Code* InternalFrame::unchecked_code() const { UNREACHABLE(); }
+Code InternalFrame::unchecked_code() const { UNREACHABLE(); }
void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
@@ -1783,7 +1795,7 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-Code* WasmCompiledFrame::unchecked_code() const {
+Code WasmCompiledFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
@@ -1799,13 +1811,13 @@ wasm::WasmCode* WasmCompiledFrame::wasm_code() const {
return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
-WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
+WasmInstanceObject WasmCompiledFrame::wasm_instance() const {
const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
- Object* instance = Memory<Object*>(fp() + offset);
+ Object instance(Memory<Address>(fp() + offset));
return WasmInstanceObject::cast(instance);
}
-WasmModuleObject* WasmCompiledFrame::module_object() const {
+WasmModuleObject WasmCompiledFrame::module_object() const {
return wasm_instance()->module_object();
}
@@ -1813,7 +1825,7 @@ uint32_t WasmCompiledFrame::function_index() const {
return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Script* WasmCompiledFrame::script() const { return module_object()->script(); }
+Script WasmCompiledFrame::script() const { return module_object()->script(); }
int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
@@ -1866,7 +1878,7 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM INTERPRETER ENTRY [");
- Script* script = this->script();
+ Script script = this->script();
accumulator->PrintName(script->name());
accumulator->Add("]");
if (mode != OVERVIEW) accumulator->Add("\n");
@@ -1885,23 +1897,23 @@ void WasmInterpreterEntryFrame::Summarize(
}
}
-Code* WasmInterpreterEntryFrame::unchecked_code() const { UNREACHABLE(); }
+Code WasmInterpreterEntryFrame::unchecked_code() const { UNREACHABLE(); }
-WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
+WasmInstanceObject WasmInterpreterEntryFrame::wasm_instance() const {
const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
- Object* instance = Memory<Object*>(fp() + offset);
+ Object instance(Memory<Address>(fp() + offset));
return WasmInstanceObject::cast(instance);
}
-WasmDebugInfo* WasmInterpreterEntryFrame::debug_info() const {
+WasmDebugInfo WasmInterpreterEntryFrame::debug_info() const {
return wasm_instance()->debug_info();
}
-WasmModuleObject* WasmInterpreterEntryFrame::module_object() const {
+WasmModuleObject WasmInterpreterEntryFrame::module_object() const {
return wasm_instance()->module_object();
}
-Script* WasmInterpreterEntryFrame::script() const {
+Script WasmInterpreterEntryFrame::script() const {
return module_object()->script();
}
@@ -1909,7 +1921,7 @@ int WasmInterpreterEntryFrame::position() const {
return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
-Object* WasmInterpreterEntryFrame::context() const {
+Object WasmInterpreterEntryFrame::context() const {
return wasm_instance()->native_context();
}
@@ -1917,19 +1929,21 @@ Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-WasmInstanceObject* WasmCompileLazyFrame::wasm_instance() const {
+Code WasmCompileLazyFrame::unchecked_code() const { return Code(); }
+
+WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
return WasmInstanceObject::cast(*wasm_instance_slot());
}
-Object** WasmCompileLazyFrame::wasm_instance_slot() const {
+FullObjectSlot WasmCompileLazyFrame::wasm_instance_slot() const {
const int offset = WasmCompileLazyFrameConstants::kWasmInstanceOffset;
- return &Memory<Object*>(fp() + offset);
+ return FullObjectSlot(&Memory<Address>(fp() + offset));
}
void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
const int header_size = WasmCompileLazyFrameConstants::kFixedFrameSizeFromFp;
- Object** base = &Memory<Object*>(sp());
- Object** limit = &Memory<Object*>(fp() - header_size);
+ FullObjectSlot base(&Memory<Address>(sp()));
+ FullObjectSlot limit(&Memory<Address>(fp() - header_size));
v->VisitRootPointers(Root::kTop, nullptr, base, limit);
v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
}
@@ -1940,10 +1954,9 @@ Address WasmCompileLazyFrame::GetCallerStackPointer() const {
namespace {
-
-void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo* shared,
- Code* code) {
- if (FLAG_max_stack_trace_source_length != 0 && code != nullptr) {
+void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo shared,
+ Code code) {
+ if (FLAG_max_stack_trace_source_length != 0 && !code.is_null()) {
std::ostringstream os;
os << "--------- s o u r c e c o d e ---------\n"
<< SourceCodeOf(shared, FLAG_max_stack_trace_source_length)
@@ -1952,7 +1965,6 @@ void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo* shared,
}
}
-
} // namespace
@@ -1960,13 +1972,13 @@ void JavaScriptFrame::Print(StringStream* accumulator,
PrintMode mode,
int index) const {
DisallowHeapAllocation no_gc;
- Object* receiver = this->receiver();
- JSFunction* function = this->function();
+ Object receiver = this->receiver();
+ JSFunction function = this->function();
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
PrintFrameKind(accumulator);
- Code* code = nullptr;
+ Code code;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
accumulator->Add(" [%p]", function);
@@ -1975,22 +1987,23 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// doesn't contain scope info, scope_info will return 0 for the number of
// parameters, stack local variables, context local variables, stack slots,
// or context slots.
- SharedFunctionInfo* shared = function->shared();
- ScopeInfo* scope_info = shared->scope_info();
- Object* script_obj = shared->script();
+ SharedFunctionInfo shared = function->shared();
+ ScopeInfo scope_info = shared->scope_info();
+ Object script_obj = shared->script();
if (script_obj->IsScript()) {
- Script* script = Script::cast(script_obj);
+ Script script = Script::cast(script_obj);
accumulator->Add(" [");
accumulator->PrintName(script->name());
if (is_interpreted()) {
const InterpretedFrame* iframe =
reinterpret_cast<const InterpretedFrame*>(this);
- BytecodeArray* bytecodes = iframe->GetBytecodeArray();
+ BytecodeArray bytecodes = iframe->GetBytecodeArray();
int offset = iframe->GetBytecodeOffset();
int source_pos = AbstractCode::cast(bytecodes)->SourcePosition(offset);
int line = script->GetLineNumber(source_pos) + 1;
- accumulator->Add(":%d] [bytecode=%p offset=%d]", line, bytecodes, offset);
+ accumulator->Add(":%d] [bytecode=%p offset=%d]", line,
+ reinterpret_cast<void*>(bytecodes.ptr()), offset);
} else {
int function_start_pos = shared->StartPosition();
int line = script->GetLineNumber(function_start_pos) + 1;
@@ -2025,12 +2038,12 @@ void JavaScriptFrame::Print(StringStream* accumulator,
int expressions_count = ComputeExpressionsCount();
// Try to get hold of the context of this frame.
- Context* context = nullptr;
- if (this->context() != nullptr && this->context()->IsContext()) {
+ Context context;
+ if (this->context()->IsContext()) {
context = Context::cast(this->context());
while (context->IsWithContext()) {
context = context->previous();
- DCHECK_NOT_NULL(context);
+ DCHECK(!context.is_null());
}
}
@@ -2042,7 +2055,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" var ");
accumulator->PrintName(scope_info->ContextLocalName(i));
accumulator->Add(" = ");
- if (context != nullptr) {
+ if (!context.is_null()) {
int index = Context::MIN_CONTEXT_SLOTS + i;
if (index < context->length()) {
accumulator->Add("%o", context->get(index));
@@ -2075,7 +2088,7 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
int index) const {
int actual = ComputeParametersCount();
int expected = -1;
- JSFunction* function = this->function();
+ JSFunction function = this->function();
expected = function->shared()->internal_formal_parameter_count();
PrintIndex(accumulator, mode, index);
@@ -2105,8 +2118,8 @@ void EntryFrame::Iterate(RootVisitor* v) const {
void StandardFrame::IterateExpressions(RootVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
- Object** base = &Memory<Object*>(sp());
- Object** limit = &Memory<Object*>(fp() + offset) + 1;
+ FullObjectSlot base(&Memory<Address>(sp()));
+ FullObjectSlot limit(&Memory<Address>(fp() + offset) + 1);
v->VisitRootPointers(Root::kTop, nullptr, base, limit);
}
@@ -2116,7 +2129,7 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
}
void InternalFrame::Iterate(RootVisitor* v) const {
- Code* code = LookupCode();
+ Code code = LookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
// Internal frames typically do not receive any arguments, hence their stack
// only contains tagged pointers.
@@ -2129,12 +2142,24 @@ void InternalFrame::Iterate(RootVisitor* v) const {
// -------------------------------------------------------------------------
+namespace {
+
+uint32_t PcAddressForHashing(Isolate* isolate, Address address) {
+ if (InstructionStream::PcIsOffHeap(isolate, address)) {
+ // Ensure that we get predictable hashes for addresses in embedded code.
+ return EmbeddedData::FromBlob(isolate).AddressForHashing(address);
+ }
+ return ObjectAddressForHashing(address);
+}
+
+} // namespace
+
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
DCHECK(base::bits::IsPowerOfTwo(kInnerPointerToCodeCacheSize));
- uint32_t hash = ComputeUnseededHash(
- ObjectAddressForHashing(reinterpret_cast<void*>(inner_pointer)));
+ uint32_t hash =
+ ComputeUnseededHash(PcAddressForHashing(isolate_, inner_pointer));
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index a8c0989036..6672d7b3bc 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -20,6 +20,7 @@ class WasmCode;
class AbstractCode;
class Debug;
class ExternalCallbackScope;
+class InnerPointerToCodeCache;
class Isolate;
class ObjectVisitor;
class RootVisitor;
@@ -30,41 +31,13 @@ class WasmDebugInfo;
class WasmInstanceObject;
class WasmModuleObject;
-class InnerPointerToCodeCache {
- public:
- struct InnerPointerToCodeCacheEntry {
- Address inner_pointer;
- Code* code;
- SafepointEntry safepoint_entry;
- };
-
- explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
- Flush();
- }
-
- void Flush() { memset(static_cast<void*>(&cache_[0]), 0, sizeof(cache_)); }
-
- InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
-
- private:
- InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
-
- Isolate* isolate_;
-
- static const int kInnerPointerToCodeCacheSize = 1024;
- InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
-
- DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
-};
-
-
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kPaddingOffset = 1 * kPointerSize;
+ static const int kNextOffset = 0 * kSystemPointerSize;
+ static const int kPaddingOffset = 1 * kSystemPointerSize;
- static const int kSize = kPaddingOffset + kPointerSize;
- static const int kSlotCount = kSize >> kPointerSizeLog2;
+ static const int kSize = kPaddingOffset + kSystemPointerSize;
+ static const int kSlotCount = kSize >> kSystemPointerSizeLog2;
};
class StackHandler {
@@ -75,6 +48,10 @@ class StackHandler {
// Get the next stack handler in the chain.
inline StackHandler* next() const;
+ // Get the next stack handler, as an Address. This is safe to use even
+ // when the next handler is null.
+ inline Address next_address() const;
+
// Conversion support.
static inline StackHandler* FromAddress(Address address);
@@ -185,7 +162,7 @@ class StackFrame {
// Copy constructor; it breaks the connection to host iterator
// (as an iterator usually lives on stack).
- StackFrame(const StackFrame& original) {
+ StackFrame(const StackFrame& original) V8_NOEXCEPT {
this->state_ = original.state_;
this->iterator_ = nullptr;
this->isolate_ = original.isolate_;
@@ -269,14 +246,14 @@ class StackFrame {
// Get the code associated with this frame.
// This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const = 0;
+ virtual Code unchecked_code() const = 0;
// Search for the code associated with this frame.
- Code* LookupCode() const;
+ Code LookupCode() const;
virtual void Iterate(RootVisitor* v) const = 0;
static void IteratePc(RootVisitor* v, Address* pc_address,
- Address* constant_pool_address, Code* holder);
+ Address* constant_pool_address, Code holder);
// Sets a callback function for return-address rewriting profilers
// to resolve the location of a return address to the location of the
@@ -335,7 +312,7 @@ class NativeFrame : public StackFrame {
public:
Type type() const override { return NATIVE; }
- Code* unchecked_code() const override { return nullptr; }
+ Code unchecked_code() const override;
// Garbage collection support.
void Iterate(RootVisitor* v) const override {}
@@ -356,7 +333,7 @@ class EntryFrame: public StackFrame {
public:
Type type() const override { return ENTRY; }
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -385,7 +362,7 @@ class ConstructEntryFrame : public EntryFrame {
public:
Type type() const override { return CONSTRUCT_ENTRY; }
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
static ConstructEntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_construct_entry());
@@ -405,9 +382,9 @@ class ExitFrame: public StackFrame {
public:
Type type() const override { return EXIT; }
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
- Object*& code_slot() const;
+ Address& code_slot() const;
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -448,8 +425,8 @@ class BuiltinExitFrame : public ExitFrame {
return static_cast<BuiltinExitFrame*>(frame);
}
- JSFunction* function() const;
- Object* receiver() const;
+ JSFunction function() const;
+ Object receiver() const;
bool IsConstructor() const;
@@ -460,13 +437,13 @@ class BuiltinExitFrame : public ExitFrame {
inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
private:
- Object* GetParameter(int i) const;
+ Object GetParameter(int i) const;
int ComputeParametersCount() const;
- inline Object* receiver_slot_object() const;
- inline Object* argc_slot_object() const;
- inline Object* target_slot_object() const;
- inline Object* new_target_slot_object() const;
+ inline Object receiver_slot_object() const;
+ inline Object argc_slot_object() const;
+ inline Object target_slot_object() const;
+ inline Object new_target_slot_object() const;
friend class StackFrameIteratorBase;
};
@@ -501,8 +478,8 @@ class FrameSummary {
class JavaScriptFrameSummary : public FrameSummaryBase {
public:
- JavaScriptFrameSummary(Isolate* isolate, Object* receiver,
- JSFunction* function, AbstractCode* abstract_code,
+ JavaScriptFrameSummary(Isolate* isolate, Object receiver,
+ JSFunction function, AbstractCode abstract_code,
int code_offset, bool is_constructor);
Handle<Object> receiver() const { return receiver_; }
@@ -631,18 +608,18 @@ class StandardFrame : public StackFrame {
bool is_standard() const override { return true; }
// Accessors.
- virtual Object* receiver() const;
- virtual Script* script() const;
- virtual Object* context() const;
+ virtual Object receiver() const;
+ virtual Script script() const;
+ virtual Object context() const;
virtual int position() const;
// Access the expressions in the stack frame including locals.
- inline Object* GetExpression(int index) const;
- inline void SetExpression(int index, Object* value);
+ inline Object GetExpression(int index) const;
+ inline void SetExpression(int index, Object value);
int ComputeExpressionsCount() const;
// Access the parameters.
- virtual Object* GetParameter(int index) const;
+ virtual Object GetParameter(int index) const;
virtual int ComputeParametersCount() const;
// Check if this frame is a constructor frame invoked through 'new'.
@@ -705,21 +682,21 @@ class JavaScriptFrame : public StandardFrame {
void Summarize(std::vector<FrameSummary>* frames) const override;
// Accessors.
- virtual JSFunction* function() const;
- Object* unchecked_function() const;
- Object* receiver() const override;
- Object* context() const override;
- Script* script() const override;
+ virtual JSFunction function() const;
+ Object unchecked_function() const;
+ Object receiver() const override;
+ Object context() const override;
+ Script script() const override;
- inline void set_receiver(Object* value);
+ inline void set_receiver(Object value);
// Access the parameters.
inline Address GetParameterSlot(int index) const;
- Object* GetParameter(int index) const override;
+ Object GetParameter(int index) const override;
int ComputeParametersCount() const override;
// Debugger access.
- void SetParameterValue(int index, Object* value) const;
+ void SetParameterValue(int index, Object value) const;
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const override;
@@ -741,10 +718,10 @@ class JavaScriptFrame : public StandardFrame {
int index) const override;
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
// Return a list with {SharedFunctionInfo} objects of this frame.
- virtual void GetFunctions(std::vector<SharedFunctionInfo*>* functions) const;
+ virtual void GetFunctions(std::vector<SharedFunctionInfo>* functions) const;
void GetFunctions(std::vector<Handle<SharedFunctionInfo>>* functions) const;
@@ -765,15 +742,15 @@ class JavaScriptFrame : public StandardFrame {
return static_cast<JavaScriptFrame*>(frame);
}
- static void PrintFunctionAndOffset(JSFunction* function, AbstractCode* code,
+ static void PrintFunctionAndOffset(JSFunction function, AbstractCode code,
int code_offset, FILE* file,
bool print_line_number);
static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
bool print_line_number);
- static void CollectFunctionAndOffsetForICStats(JSFunction* function,
- AbstractCode* code,
+ static void CollectFunctionAndOffsetForICStats(JSFunction function,
+ AbstractCode code,
int code_offset);
static void CollectTopFrameForICStats(Isolate* isolate);
@@ -787,7 +764,7 @@ class JavaScriptFrame : public StandardFrame {
virtual void PrintFrameKind(StringStream* accumulator) const {}
private:
- inline Object* function_slot_object() const;
+ inline Object function_slot_object() const;
friend class StackFrameIteratorBase;
};
@@ -801,7 +778,7 @@ class StubFrame : public StandardFrame {
void Iterate(RootVisitor* v) const override;
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
// Lookup exception handler for current {pc}, returns -1 if none found. Only
// TurboFan stub frames are supported. Also returns data associated with the
@@ -830,7 +807,7 @@ class OptimizedFrame : public JavaScriptFrame {
// Return a list with {SharedFunctionInfo} objects of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
- void GetFunctions(std::vector<SharedFunctionInfo*>* functions) const override;
+ void GetFunctions(std::vector<SharedFunctionInfo>* functions) const override;
void Summarize(std::vector<FrameSummary>* frames) const override;
@@ -838,9 +815,9 @@ class OptimizedFrame : public JavaScriptFrame {
int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction) override;
- DeoptimizationData* GetDeoptimizationData(int* deopt_index) const;
+ DeoptimizationData GetDeoptimizationData(int* deopt_index) const;
- Object* receiver() const override;
+ Object receiver() const override;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -852,7 +829,7 @@ class OptimizedFrame : public JavaScriptFrame {
private:
friend class StackFrameIteratorBase;
- Object* StackSlotAt(int index) const;
+ Object StackSlotAt(int index) const;
};
@@ -875,15 +852,15 @@ class InterpretedFrame : public JavaScriptFrame {
void PatchBytecodeOffset(int new_offset);
// Returns the frame's current bytecode array.
- BytecodeArray* GetBytecodeArray() const;
+ BytecodeArray GetBytecodeArray() const;
// Updates the frame's BytecodeArray with |bytecode_array|. Used by the
// debugger to swap execution onto a BytecodeArray patched with breakpoints.
- void PatchBytecodeArray(BytecodeArray* bytecode_array);
+ void PatchBytecodeArray(BytecodeArray bytecode_array);
// Access to the interpreter register file for this frame.
- Object* ReadInterpreterRegister(int register_index) const;
- void WriteInterpreterRegister(int register_index, Object* value);
+ Object ReadInterpreterRegister(int register_index) const;
+ void WriteInterpreterRegister(int register_index, Object value);
// Build a list with summaries for this frame including all inlined frames.
void Summarize(std::vector<FrameSummary>* frames) const override;
@@ -913,7 +890,7 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
Type type() const override { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
DCHECK(frame->is_arguments_adaptor());
@@ -970,13 +947,13 @@ class WasmCompiledFrame final : public StandardFrame {
int LookupExceptionHandlerInTable(int* data);
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
// Accessors.
- WasmInstanceObject* wasm_instance() const;
+ WasmInstanceObject wasm_instance() const;
wasm::WasmCode* wasm_code() const;
uint32_t function_index() const;
- Script* script() const override;
+ Script script() const override;
int position() const override;
bool at_to_number_conversion() const;
@@ -994,7 +971,7 @@ class WasmCompiledFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
- WasmModuleObject* module_object() const;
+ WasmModuleObject module_object() const;
};
class WasmInterpreterEntryFrame final : public StandardFrame {
@@ -1011,15 +988,15 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
void Summarize(std::vector<FrameSummary>* frames) const override;
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
// Accessors.
- WasmDebugInfo* debug_info() const;
- WasmInstanceObject* wasm_instance() const;
+ WasmDebugInfo debug_info() const;
+ WasmInstanceObject wasm_instance() const;
- Script* script() const override;
+ Script script() const override;
int position() const override;
- Object* context() const override;
+ Object context() const override;
static WasmInterpreterEntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_wasm_interpreter_entry());
@@ -1033,7 +1010,7 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
- WasmModuleObject* module_object() const;
+ WasmModuleObject module_object() const;
};
class WasmToJsFrame : public StubFrame {
@@ -1073,9 +1050,9 @@ class WasmCompileLazyFrame : public StandardFrame {
public:
Type type() const override { return WASM_COMPILE_LAZY; }
- Code* unchecked_code() const override { return nullptr; }
- WasmInstanceObject* wasm_instance() const;
- Object** wasm_instance_slot() const;
+ Code unchecked_code() const override;
+ WasmInstanceObject wasm_instance() const;
+ FullObjectSlot wasm_instance_slot() const;
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -1102,7 +1079,7 @@ class InternalFrame: public StandardFrame {
void Iterate(RootVisitor* v) const override;
// Determine the code for the frame.
- Code* unchecked_code() const override;
+ Code unchecked_code() const override;
static InternalFrame* cast(StackFrame* frame) {
DCHECK(frame->is_internal());
@@ -1165,7 +1142,7 @@ class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
int ComputeParametersCount() const override;
intptr_t GetSPToFPDelta() const;
- Object* context() const override;
+ Object context() const override;
protected:
inline explicit JavaScriptBuiltinContinuationFrame(
@@ -1189,7 +1166,7 @@ class JavaScriptBuiltinContinuationWithCatchFrame
// Patch in the exception object at the appropriate location into the stack
// frame.
- void SetException(Object* exception);
+ void SetException(Object exception);
protected:
inline explicit JavaScriptBuiltinContinuationWithCatchFrame(
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index c1dd523984..1cd856541b 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -27,17 +27,13 @@ base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
void FutexWaitListNode::NotifyWake() {
// Lock the FutexEmulation mutex before notifying. We know that the mutex
// will have been unlocked if we are currently waiting on the condition
- // variable.
- //
- // The mutex may also not be locked if the other thread is currently handling
- // interrupts, or if FutexEmulation::Wait was just called and the mutex
- // hasn't been locked yet. In either of those cases, we set the interrupted
- // flag to true, which will be tested after the mutex is re-locked.
- base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
- if (waiting_) {
- cond_.NotifyOne();
- interrupted_ = true;
- }
+ // variable. The mutex will not be locked if FutexEmulation::Wait hasn't
+ // locked it yet. In that case, we set the interrupted_
+ // flag to true, which will be tested after the mutex locked by a future wait.
+ base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
+ // if not waiting, this will not have any effect.
+ cond_.NotifyOne();
+ interrupted_ = true;
}
@@ -80,25 +76,51 @@ void AtomicsWaitWakeHandle::Wake() {
// The split lock by itself isn’t an issue, as long as the caller properly
// synchronizes this with the closing `AtomicsWaitCallback`.
{
- base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
+ base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
stopped_ = true;
}
isolate_->futex_wait_list_node()->NotifyWake();
}
-Object* FutexEmulation::Wait(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
- int32_t value, double rel_timeout_ms) {
- DCHECK_LT(addr, array_buffer->byte_length());
+enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
+
+Object FutexEmulation::WaitJs(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int32_t value, double rel_timeout_ms) {
+ Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+ if (res->IsSmi()) {
+ int val = Smi::ToInt(res);
+ switch (val) {
+ case WaitReturnValue::kOk:
+ return ReadOnlyRoots(isolate).ok();
+ case WaitReturnValue::kNotEqual:
+ return ReadOnlyRoots(isolate).not_equal();
+ case WaitReturnValue::kTimedOut:
+ return ReadOnlyRoots(isolate).timed_out();
+ default:
+ UNREACHABLE();
+ }
+ }
+ return res;
+}
- void* backing_store = array_buffer->backing_store();
- int32_t* p =
- reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
+Object FutexEmulation::Wait32(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int32_t value, double rel_timeout_ms) {
+ return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
+}
- FutexWaitListNode* node = isolate->futex_wait_list_node();
- node->backing_store_ = backing_store;
- node->wait_addr_ = addr;
- node->waiting_ = true;
+Object FutexEmulation::Wait64(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int64_t value, double rel_timeout_ms) {
+ return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
+}
+
+template <typename T>
+Object FutexEmulation::Wait(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ T value, double rel_timeout_ms) {
+ DCHECK_LT(addr, array_buffer->byte_length());
bool use_timeout = rel_timeout_ms != V8_INFINITY;
@@ -125,21 +147,28 @@ Object* FutexEmulation::Wait(Isolate* isolate,
addr, value, rel_timeout_ms, &stop_handle);
if (isolate->has_scheduled_exception()) {
- node->waiting_ = false;
return isolate->PromoteScheduledException();
}
- Object* result;
+ Object result;
AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
do { // Not really a loop, just makes it easier to break out early.
- base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ base::MutexGuard lock_guard(mutex_.Pointer());
+ void* backing_store = array_buffer->backing_store();
+
+ FutexWaitListNode* node = isolate->futex_wait_list_node();
+ node->backing_store_ = backing_store;
+ node->wait_addr_ = addr;
+ node->waiting_ = true;
+
// Reset node->waiting_ = false when leaving this scope (but while
// still holding the lock).
ResetWaitingOnScopeExit reset_waiting(node);
+ T* p = reinterpret_cast<T*>(static_cast<int8_t*>(backing_store) + addr);
if (*p != value) {
- result = ReadOnlyRoots(isolate).not_equal();
+ result = Smi::FromInt(WaitReturnValue::kNotEqual);
callback_result = AtomicsWaitEvent::kNotEqual;
break;
}
@@ -175,7 +204,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
// notification will wake up the condition variable. node->waiting() will
// be false, so we'll loop and then check interrupts.
if (interrupted) {
- Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
if (interrupt_object->IsException(isolate)) {
result = interrupt_object;
callback_result = AtomicsWaitEvent::kTerminatedExecution;
@@ -197,7 +226,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
}
if (!node->waiting_) {
- result = ReadOnlyRoots(isolate).ok();
+ result = Smi::FromInt(WaitReturnValue::kOk);
break;
}
@@ -205,7 +234,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
if (use_timeout) {
current_time = base::TimeTicks::Now();
if (current_time >= timeout_time) {
- result = ReadOnlyRoots(isolate).timed_out();
+ result = Smi::FromInt(WaitReturnValue::kTimedOut);
callback_result = AtomicsWaitEvent::kTimedOut;
break;
}
@@ -236,17 +265,18 @@ Object* FutexEmulation::Wait(Isolate* isolate,
return result;
}
-Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
- uint32_t num_waiters_to_wake) {
+Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
+ uint32_t num_waiters_to_wake) {
DCHECK_LT(addr, array_buffer->byte_length());
int waiters_woken = 0;
void* backing_store = array_buffer->backing_store();
- base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ base::MutexGuard lock_guard(mutex_.Pointer());
FutexWaitListNode* node = wait_list_.Pointer()->head_;
while (node && num_waiters_to_wake > 0) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
+ node->waiting_) {
node->waiting_ = false;
node->cond_.NotifyOne();
if (num_waiters_to_wake != kWakeAll) {
@@ -261,12 +291,12 @@ Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
return Smi::FromInt(waiters_woken);
}
-Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
- size_t addr) {
+Object FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
+ size_t addr) {
DCHECK_LT(addr, array_buffer->byte_length());
void* backing_store = array_buffer->backing_store();
- base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ base::MutexGuard lock_guard(mutex_.Pointer());
int waiters = 0;
FutexWaitListNode* node = wait_list_.Pointer()->head_;
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index 80c1a6322b..1cdcac7248 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -118,25 +118,39 @@ class FutexEmulation : public AllStatic {
// |rel_timeout_ms| can be Infinity.
// If woken, return "ok", otherwise return "timed-out". The initial check and
// the decision to wait happen atomically.
- static Object* Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int32_t value, double rel_timeout_ms);
+ static Object WaitJs(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value, double rel_timeout_ms);
+
+ // Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
+ // out) as expected by Wasm.
+ static Object Wait32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value, double rel_timeout_ms);
+
+ // Same as Wait32 above except it checks for an int64_t value in the
+ // array_buffer.
+ static Object Wait64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int64_t value, double rel_timeout_ms);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
// woken. The rest of the waiters will continue to wait. The return value is
// the number of woken waiters.
- static Object* Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
- uint32_t num_waiters_to_wake);
+ static Object Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
+ uint32_t num_waiters_to_wake);
// Return the number of threads waiting on |addr|. Should only be used for
// testing.
- static Object* NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
- size_t addr);
+ static Object NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
+ size_t addr);
private:
friend class FutexWaitListNode;
friend class AtomicsWaitWakeHandle;
+ template <typename T>
+ static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, T value, double rel_timeout_ms);
+
// `mutex_` protects the composition of `wait_list_` (i.e. no elements may be
// added or removed without holding this mutex), as well as the `waiting_`
// and `interrupted_` fields for each individual list node that is currently
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 712408ab7e..5ed9347e96 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -14,7 +14,6 @@
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/global-handles.h"
-#include "src/messages.h"
#include "src/objects.h"
#include "src/ostreams.h"
#include "src/snapshot/natives.h"
@@ -954,7 +953,7 @@ class CodeDescription {
};
#endif
- CodeDescription(const char* name, Code* code, SharedFunctionInfo* shared,
+ CodeDescription(const char* name, Code code, SharedFunctionInfo shared,
LineInfo* lineinfo)
: name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
@@ -969,9 +968,9 @@ class CodeDescription {
return kind == Code::OPTIMIZED_FUNCTION;
}
- bool has_scope_info() const { return shared_info_ != nullptr; }
+ bool has_scope_info() const { return !shared_info_.is_null(); }
- ScopeInfo* scope_info() const {
+ ScopeInfo scope_info() const {
DCHECK(has_scope_info());
return shared_info_->scope_info();
}
@@ -989,10 +988,10 @@ class CodeDescription {
}
bool has_script() {
- return shared_info_ != nullptr && shared_info_->script()->IsScript();
+ return !shared_info_.is_null() && shared_info_->script()->IsScript();
}
- Script* script() { return Script::cast(shared_info_->script()); }
+ Script script() { return Script::cast(shared_info_->script()); }
bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
@@ -1009,7 +1008,7 @@ class CodeDescription {
#endif
std::unique_ptr<char[]> GetFilename() {
- if (shared_info_ != nullptr) {
+ if (!shared_info_.is_null()) {
return String::cast(script()->name())->ToCString();
} else {
std::unique_ptr<char[]> result(new char[1]);
@@ -1019,7 +1018,7 @@ class CodeDescription {
}
int GetScriptLineNumber(int pos) {
- if (shared_info_ != nullptr) {
+ if (!shared_info_.is_null()) {
return script()->GetLineNumber(pos) + 1;
} else {
return 0;
@@ -1028,8 +1027,8 @@ class CodeDescription {
private:
const char* name_;
- Code* code_;
- SharedFunctionInfo* shared_info_;
+ Code code_;
+ SharedFunctionInfo shared_info_;
LineInfo* lineinfo_;
#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
@@ -1131,7 +1130,7 @@ class DebugInfoSection : public DebugSection {
w->WriteString("v8value");
if (desc_->has_scope_info()) {
- ScopeInfo* scope = desc_->scope_info();
+ ScopeInfo scope = desc_->scope_info();
w->WriteULEB128(2);
w->WriteString(desc_->name());
w->Write<intptr_t>(desc_->CodeStart());
@@ -1333,7 +1332,7 @@ class DebugAbbrevSection : public DebugSection {
w->WriteULEB128(0);
if (extra_info) {
- ScopeInfo* scope = desc_->scope_info();
+ ScopeInfo scope = desc_->scope_info();
int params = scope->ParameterCount();
int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
@@ -1839,7 +1838,7 @@ extern "C" {
JITDescriptor __jit_debug_descriptor = {1, 0, nullptr, nullptr};
#ifdef OBJECT_PRINT
- void __gdb_print_v8_object(Object* object) {
+ void __gdb_print_v8_object(Object object) {
StdoutStream os;
object->Print(os);
os << std::flush;
@@ -2083,8 +2082,7 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
RegisterCodeEntry(entry);
}
-
-static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
+static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
LineInfo* lineinfo) {
DisallowHeapAllocation no_gc;
@@ -2121,23 +2119,23 @@ static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
}
-
void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
- base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
+ base::MutexGuard lock_guard(mutex.Pointer());
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
Address addr = reinterpret_cast<Address>(event->code_start);
- Code* code = Code::GetCodeFromTargetAddress(addr);
+ Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
+ Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
LineInfo* lineinfo = GetLineInfo(addr);
EmbeddedVector<char, 256> buffer;
StringBuilder builder(buffer.start(), buffer.length());
builder.AddSubstring(event->name.str, static_cast<int>(event->name.len));
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
- SharedFunctionInfo* shared = event->script.IsEmpty()
- ? nullptr
- : *Utils::OpenHandle(*event->script);
+ SharedFunctionInfo shared = event->script.IsEmpty()
+ ? SharedFunctionInfo()
+ : *Utils::OpenHandle(*event->script);
AddCode(builder.Finalize(), code, shared, lineinfo);
break;
}
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index e8f72c7177..8e1da8f4ca 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -5,8 +5,11 @@
#include "src/global-handles.h"
#include "src/api-inl.h"
+#include "src/base/compiler-specific.h"
#include "src/cancelable-task.h"
#include "src/objects-inl.h"
+#include "src/objects/slots.h"
+#include "src/task-utils.h"
#include "src/v8.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
@@ -14,7 +17,223 @@
namespace v8 {
namespace internal {
-class GlobalHandles::Node {
+namespace {
+
+constexpr size_t kBlockSize = 256;
+
+} // namespace
+
+template <class _NodeType>
+class GlobalHandles::NodeBlock final {
+ public:
+ using BlockType = NodeBlock<_NodeType>;
+ using NodeType = _NodeType;
+
+ V8_INLINE static NodeBlock* From(NodeType* node);
+
+ NodeBlock(GlobalHandles* global_handles,
+ GlobalHandles::NodeSpace<NodeType>* space,
+ NodeBlock* next) V8_NOEXCEPT : next_(next),
+ global_handles_(global_handles),
+ space_(space) {}
+
+ NodeType* at(size_t index) { return &nodes_[index]; }
+ const NodeType* at(size_t index) const { return &nodes_[index]; }
+ GlobalHandles::NodeSpace<NodeType>* space() const { return space_; }
+ GlobalHandles* global_handles() const { return global_handles_; }
+
+ V8_INLINE bool IncreaseUsage();
+ V8_INLINE bool DecreaseUsage();
+
+ V8_INLINE void ListAdd(NodeBlock** top);
+ V8_INLINE void ListRemove(NodeBlock** top);
+
+ NodeBlock* next() const { return next_; }
+ NodeBlock* next_used() const { return next_used_; }
+
+ private:
+ NodeType nodes_[kBlockSize];
+ NodeBlock* const next_;
+ GlobalHandles* const global_handles_;
+ GlobalHandles::NodeSpace<NodeType>* const space_;
+ NodeBlock* next_used_ = nullptr;
+ NodeBlock* prev_used_ = nullptr;
+ uint32_t used_nodes_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeBlock);
+};
+
+template <class NodeType>
+GlobalHandles::NodeBlock<NodeType>* GlobalHandles::NodeBlock<NodeType>::From(
+ NodeType* node) {
+ uintptr_t ptr =
+ reinterpret_cast<uintptr_t>(node) - sizeof(NodeType) * node->index();
+ BlockType* block = reinterpret_cast<BlockType*>(ptr);
+ DCHECK_EQ(node, block->at(node->index()));
+ return block;
+}
+
+template <class NodeType>
+bool GlobalHandles::NodeBlock<NodeType>::IncreaseUsage() {
+ DCHECK_LT(used_nodes_, kBlockSize);
+ return used_nodes_++ == 0;
+}
+
+template <class NodeType>
+void GlobalHandles::NodeBlock<NodeType>::ListAdd(BlockType** top) {
+ BlockType* old_top = *top;
+ *top = this;
+ next_used_ = old_top;
+ prev_used_ = nullptr;
+ if (old_top != nullptr) {
+ old_top->prev_used_ = this;
+ }
+}
+
+template <class NodeType>
+bool GlobalHandles::NodeBlock<NodeType>::DecreaseUsage() {
+ DCHECK_GT(used_nodes_, 0);
+ return --used_nodes_ == 0;
+}
+
+template <class NodeType>
+void GlobalHandles::NodeBlock<NodeType>::ListRemove(BlockType** top) {
+ if (next_used_ != nullptr) next_used_->prev_used_ = prev_used_;
+ if (prev_used_ != nullptr) prev_used_->next_used_ = next_used_;
+ if (this == *top) {
+ *top = next_used_;
+ }
+}
+
+template <class BlockType>
+class GlobalHandles::NodeIterator final {
+ public:
+ using NodeType = typename BlockType::NodeType;
+
+ // Iterator traits.
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = NodeType*;
+ using reference = value_type;
+ using pointer = value_type*;
+
+ explicit NodeIterator(BlockType* block) V8_NOEXCEPT : block_(block) {}
+ NodeIterator(NodeIterator&& other) V8_NOEXCEPT : block_(other.block_),
+ index_(other.index_) {}
+
+ bool operator==(const NodeIterator& other) const {
+ return block_ == other.block_;
+ }
+ bool operator!=(const NodeIterator& other) const {
+ return block_ != other.block_;
+ }
+
+ NodeIterator& operator++() {
+ if (++index_ < kBlockSize) return *this;
+ index_ = 0;
+ block_ = block_->next_used();
+ return *this;
+ }
+
+ NodeType* operator*() { return block_->at(index_); }
+ NodeType* operator->() { return block_->at(index_); }
+
+ private:
+ BlockType* block_ = nullptr;
+ size_t index_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeIterator);
+};
+
+template <class NodeType>
+class GlobalHandles::NodeSpace final {
+ public:
+ using BlockType = NodeBlock<NodeType>;
+ using iterator = NodeIterator<BlockType>;
+
+ static NodeSpace* From(NodeType* node);
+ static void Release(NodeType* node);
+
+ explicit NodeSpace(GlobalHandles* global_handles) V8_NOEXCEPT
+ : global_handles_(global_handles) {}
+ ~NodeSpace();
+
+ V8_INLINE NodeType* Acquire(Object object);
+
+ iterator begin() { return iterator(first_used_block_); }
+ iterator end() { return iterator(nullptr); }
+
+ private:
+ void PutNodesOnFreeList(BlockType* block);
+ V8_INLINE void Free(NodeType* node);
+
+ GlobalHandles* const global_handles_;
+ BlockType* first_block_ = nullptr;
+ BlockType* first_used_block_ = nullptr;
+ NodeType* first_free_ = nullptr;
+};
+
+template <class NodeType>
+GlobalHandles::NodeSpace<NodeType>::~NodeSpace() {
+ auto* block = first_block_;
+ while (block != nullptr) {
+ auto* tmp = block->next();
+ delete block;
+ block = tmp;
+ }
+}
+
+template <class NodeType>
+NodeType* GlobalHandles::NodeSpace<NodeType>::Acquire(Object object) {
+ if (first_free_ == nullptr) {
+ first_block_ = new BlockType(global_handles_, this, first_block_);
+ PutNodesOnFreeList(first_block_);
+ }
+ DCHECK_NOT_NULL(first_free_);
+ NodeType* node = first_free_;
+ first_free_ = first_free_->next_free();
+ node->Acquire(object);
+ BlockType* block = BlockType::From(node);
+ if (block->IncreaseUsage()) {
+ block->ListAdd(&first_used_block_);
+ }
+ global_handles_->isolate()->counters()->global_handles()->Increment();
+ global_handles_->handles_count_++;
+ DCHECK(node->IsInUse());
+ return node;
+}
+
+template <class NodeType>
+void GlobalHandles::NodeSpace<NodeType>::PutNodesOnFreeList(BlockType* block) {
+ for (int32_t i = kBlockSize - 1; i >= 0; --i) {
+ NodeType* node = block->at(i);
+ const uint8_t index = static_cast<uint8_t>(i);
+ DCHECK_EQ(i, index);
+ node->set_index(index);
+ node->Free(first_free_);
+ first_free_ = node;
+ }
+}
+
+template <class NodeType>
+void GlobalHandles::NodeSpace<NodeType>::Release(NodeType* node) {
+ BlockType* block = BlockType::From(node);
+ block->space()->Free(node);
+}
+
+template <class NodeType>
+void GlobalHandles::NodeSpace<NodeType>::Free(NodeType* node) {
+ node->Release(first_free_);
+ first_free_ = node;
+ BlockType* block = BlockType::From(node);
+ if (block->DecreaseUsage()) {
+ block->ListRemove(&first_used_block_);
+ }
+ global_handles_->isolate()->counters()->global_handles()->Decrement();
+ global_handles_->handles_count_--;
+}
+
+class GlobalHandles::Node final {
public:
// State transition diagram:
// FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
@@ -28,7 +247,7 @@ class GlobalHandles::Node {
};
// Maps handle location (slot) to the containing node.
- static Node* FromLocation(Object** location) {
+ static Node* FromLocation(Address* location) {
DCHECK_EQ(offsetof(Node, object_), 0);
return reinterpret_cast<Node*>(location);
}
@@ -45,74 +264,54 @@ class GlobalHandles::Node {
Internals::kNodeIsIndependentShift);
STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
Internals::kNodeIsActiveShift);
+ set_in_new_space_list(false);
}
#ifdef ENABLE_HANDLE_ZAPPING
~Node() {
- // TODO(1428): if it's a weak handle we should have invoked its callback.
- // Zap the values for eager trapping.
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- index_ = 0;
- set_independent(false);
- set_active(false);
- set_in_new_space_list(false);
+ ClearFields();
data_.next_free = nullptr;
- weak_callback_ = nullptr;
+ index_ = 0;
}
#endif
- void Initialize(int index, Node** first_free) {
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
- index_ = static_cast<uint8_t>(index);
- DCHECK(static_cast<int>(index_) == index);
+ void Free(Node* free_list) {
+ ClearFields();
set_state(FREE);
- set_in_new_space_list(false);
- data_.next_free = *first_free;
- *first_free = this;
+ data_.next_free = free_list;
}
- void Acquire(Object* object) {
- DCHECK(state() == FREE);
- object_ = object;
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
- set_active(false);
+ void Acquire(Object object) {
+ DCHECK(!IsInUse());
+ CheckFieldsAreCleared();
+ object_ = object.ptr();
set_state(NORMAL);
data_.parameter = nullptr;
- weak_callback_ = nullptr;
- IncreaseBlockUses();
+ DCHECK(IsInUse());
}
- void Zap() {
+ void Release(Node* free_list) {
DCHECK(IsInUse());
- // Zap the values for eager trapping.
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
+ Free(free_list);
+ DCHECK(!IsInUse());
}
- void Release() {
+ void Zap() {
DCHECK(IsInUse());
- set_state(FREE);
// Zap the values for eager trapping.
- object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
- class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- set_independent(false);
- set_active(false);
- weak_callback_ = nullptr;
- DecreaseBlockUses();
+ object_ = kGlobalHandleZapValue;
}
// Object slot accessors.
- Object* object() const { return object_; }
- Object** location() { return &object_; }
+ Object object() const { return Object(object_); }
+ FullObjectSlot location() { return FullObjectSlot(&object_); }
const char* label() { return state() == NORMAL ? data_.label : nullptr; }
- Handle<Object> handle() { return Handle<Object>(location()); }
+ Handle<Object> handle() { return Handle<Object>(&object_); }
// Wrapper class ID accessors.
bool has_wrapper_class_id() const {
return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
}
-
uint16_t wrapper_class_id() const { return class_id_; }
// State and flag accessors.
@@ -174,6 +373,12 @@ class GlobalHandles::Node {
return state() == PENDING && IsPhantomResetHandle();
}
+ bool IsPendingFinalizer() const {
+ return state() == PENDING && weakness_type() == FINALIZER_WEAK;
+ }
+
+ bool IsPending() const { return state() == PENDING; }
+
bool IsRetainer() const {
return state() != FREE &&
!(state() == NEAR_DEATH && weakness_type() != FINALIZER_WEAK);
@@ -201,22 +406,20 @@ class GlobalHandles::Node {
return data_.parameter;
}
+ bool has_callback() const { return weak_callback_ != nullptr; }
+
// Accessors for next free node in the free list.
Node* next_free() {
- DCHECK(state() == FREE);
+ DCHECK_EQ(FREE, state());
return data_.next_free;
}
- void set_next_free(Node* value) {
- DCHECK(state() == FREE);
- data_.next_free = value;
- }
void MakeWeak(void* parameter,
WeakCallbackInfo<void>::Callback phantom_callback,
v8::WeakCallbackType type) {
DCHECK_NOT_NULL(phantom_callback);
DCHECK(IsInUse());
- CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
+ CHECK_NE(object_, kGlobalHandleZapValue);
set_state(WEAK);
switch (type) {
case v8::WeakCallbackType::kParameter:
@@ -233,9 +436,9 @@ class GlobalHandles::Node {
weak_callback_ = phantom_callback;
}
- void MakeWeak(Object*** location_addr) {
+ void MakeWeak(Address** location_addr) {
DCHECK(IsInUse());
- CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
+ CHECK_NE(object_, kGlobalHandleZapValue);
set_state(WEAK);
set_weakness_type(PHANTOM_WEAK_RESET_HANDLE);
set_parameter(location_addr);
@@ -256,7 +459,6 @@ class GlobalHandles::Node {
}
void CollectPhantomCallbackData(
-
std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
@@ -266,17 +468,19 @@ class GlobalHandles::Node {
void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
nullptr};
if (weakness_type() != PHANTOM_WEAK && object()->IsJSObject()) {
- auto jsobject = JSObject::cast(object());
+ JSObject jsobject = JSObject::cast(object());
int field_count = jsobject->GetEmbedderFieldCount();
for (int i = 0; i < v8::kEmbedderFieldsInWeakCallback; ++i) {
if (field_count == i) break;
- auto field = jsobject->GetEmbedderField(i);
- if (field->IsSmi()) embedder_fields[i] = field;
+ void* pointer;
+ if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(&pointer)) {
+ embedder_fields[i] = pointer;
+ }
}
}
// Zap with something dangerous.
- *location() = reinterpret_cast<Object*>(0x6057CA11);
+ location().store(Object(0x6057CA11));
pending_phantom_callbacks->push_back(PendingPhantomCallback(
this, weak_callback_, parameter(), embedder_fields));
@@ -288,30 +492,23 @@ class GlobalHandles::Node {
DCHECK(weakness_type() == PHANTOM_WEAK_RESET_HANDLE);
DCHECK(state() == PENDING);
DCHECK_NULL(weak_callback_);
- Object*** handle = reinterpret_cast<Object***>(parameter());
+ Address** handle = reinterpret_cast<Address**>(parameter());
*handle = nullptr;
- Release();
+ NodeSpace<Node>::Release(this);
}
- bool PostGarbageCollectionProcessing(Isolate* isolate) {
- // Handles only weak handles (not phantom) that are dying.
- if (state() != Node::PENDING) return false;
- if (weak_callback_ == nullptr) {
- Release();
- return false;
- }
+ void PostGarbageCollectionProcessing(Isolate* isolate) {
+ // This method invokes a finalizer. Updating the method name would require
+ // adjusting CFI blacklist as weak_callback_ is invoked on the wrong type.
+ CHECK(IsPendingFinalizer());
+ CHECK(!is_active());
set_state(NEAR_DEATH);
-
// Check that we are not passing a finalized external string to
// the callback.
- DCHECK(!object_->IsExternalOneByteString() ||
- ExternalOneByteString::cast(object_)->resource() != nullptr);
- DCHECK(!object_->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object_)->resource() != nullptr);
- if (weakness_type() != FINALIZER_WEAK) {
- return false;
- }
-
+ DCHECK(!object()->IsExternalOneByteString() ||
+ ExternalOneByteString::cast(object())->resource() != nullptr);
+ DCHECK(!object()->IsExternalTwoByteString() ||
+ ExternalTwoByteString::cast(object())->resource() != nullptr);
// Leaving V8.
VMState<EXTERNAL> vmstate(isolate);
HandleScope handle_scope(isolate);
@@ -320,23 +517,42 @@ class GlobalHandles::Node {
v8::WeakCallbackInfo<void> data(reinterpret_cast<v8::Isolate*>(isolate),
parameter(), embedder_fields, nullptr);
weak_callback_(data);
-
- // Absence of explicit cleanup or revival of weak handle
- // in most of the cases would lead to memory leak.
- CHECK(state() != NEAR_DEATH);
- return true;
+ // For finalizers the handle must have either been reset or made strong.
+ // Both cases reset the state.
+ CHECK_NE(NEAR_DEATH, state());
}
inline GlobalHandles* GetGlobalHandles();
+ uint8_t index() const { return index_; }
+ void set_index(uint8_t value) { index_ = value; }
+
private:
- inline NodeBlock* FindBlock();
- inline void IncreaseBlockUses();
- inline void DecreaseBlockUses();
+ // Fields that are not used for managing node memory.
+ void ClearFields() {
+ // Zap the values for eager trapping.
+ object_ = kGlobalHandleZapValue;
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ set_independent(false);
+ set_active(false);
+ weak_callback_ = nullptr;
+ }
+
+ void CheckFieldsAreCleared() {
+ DCHECK_EQ(kGlobalHandleZapValue, object_);
+ DCHECK_EQ(v8::HeapProfiler::kPersistentHandleNoClassId, class_id_);
+ DCHECK(!is_independent());
+ DCHECK(!is_active());
+ DCHECK_EQ(nullptr, weak_callback_);
+ }
// Storage for object pointer.
- // Placed first to avoid offset computation.
- Object* object_;
+ //
+ // Placed first to avoid offset computation. The stored data is equivalent to
+ // an Object. It is stored as a plain Address for convenience (smallest number
+ // of casts), and because it is a private implementation detail: the public
+ // interface provides type safety.
+ Address object_;
// Next word stores class_id, index, state, and independent.
// Note: the most aligned fields should go first.
@@ -374,162 +590,18 @@ class GlobalHandles::Node {
DISALLOW_COPY_AND_ASSIGN(Node);
};
-
-class GlobalHandles::NodeBlock {
- public:
- static const int kSize = 256;
-
- explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
- : next_(next),
- used_nodes_(0),
- next_used_(nullptr),
- prev_used_(nullptr),
- global_handles_(global_handles) {}
-
- void PutNodesOnFreeList(Node** first_free) {
- for (int i = kSize - 1; i >= 0; --i) {
- nodes_[i].Initialize(i, first_free);
- }
- }
-
- Node* node_at(int index) {
- DCHECK(0 <= index && index < kSize);
- return &nodes_[index];
- }
-
- void IncreaseUses() {
- DCHECK_LT(used_nodes_, kSize);
- if (used_nodes_++ == 0) {
- NodeBlock* old_first = global_handles_->first_used_block_;
- global_handles_->first_used_block_ = this;
- next_used_ = old_first;
- prev_used_ = nullptr;
- if (old_first == nullptr) return;
- old_first->prev_used_ = this;
- }
- }
-
- void DecreaseUses() {
- DCHECK_GT(used_nodes_, 0);
- if (--used_nodes_ == 0) {
- if (next_used_ != nullptr) next_used_->prev_used_ = prev_used_;
- if (prev_used_ != nullptr) prev_used_->next_used_ = next_used_;
- if (this == global_handles_->first_used_block_) {
- global_handles_->first_used_block_ = next_used_;
- }
- }
- }
-
- GlobalHandles* global_handles() { return global_handles_; }
-
- // Next block in the list of all blocks.
- NodeBlock* next() const { return next_; }
-
- // Next/previous block in the list of blocks with used nodes.
- NodeBlock* next_used() const { return next_used_; }
- NodeBlock* prev_used() const { return prev_used_; }
-
- private:
- Node nodes_[kSize];
- NodeBlock* const next_;
- int used_nodes_;
- NodeBlock* next_used_;
- NodeBlock* prev_used_;
- GlobalHandles* global_handles_;
-};
-
-
GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
- return FindBlock()->global_handles();
-}
-
-
-GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
- intptr_t ptr = reinterpret_cast<intptr_t>(this);
- ptr = ptr - index_ * sizeof(Node);
- NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
- DCHECK(block->node_at(index_) == this);
- return block;
-}
-
-
-void GlobalHandles::Node::IncreaseBlockUses() {
- NodeBlock* node_block = FindBlock();
- node_block->IncreaseUses();
- GlobalHandles* global_handles = node_block->global_handles();
- global_handles->isolate()->counters()->global_handles()->Increment();
- global_handles->number_of_global_handles_++;
-}
-
-
-void GlobalHandles::Node::DecreaseBlockUses() {
- NodeBlock* node_block = FindBlock();
- GlobalHandles* global_handles = node_block->global_handles();
- data_.next_free = global_handles->first_free_;
- global_handles->first_free_ = this;
- node_block->DecreaseUses();
- global_handles->isolate()->counters()->global_handles()->Decrement();
- global_handles->number_of_global_handles_--;
+ return NodeBlock<Node>::From(this)->global_handles();
}
-
-class GlobalHandles::NodeIterator {
- public:
- explicit NodeIterator(GlobalHandles* global_handles)
- : block_(global_handles->first_used_block_),
- index_(0) {}
-
- bool done() const { return block_ == nullptr; }
-
- Node* node() const {
- DCHECK(!done());
- return block_->node_at(index_);
- }
-
- void Advance() {
- DCHECK(!done());
- if (++index_ < NodeBlock::kSize) return;
- index_ = 0;
- block_ = block_->next_used();
- }
-
- private:
- NodeBlock* block_;
- int index_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeIterator);
-};
-
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
- number_of_global_handles_(0),
- first_block_(nullptr),
- first_used_block_(nullptr),
- first_free_(nullptr),
- post_gc_processing_count_(0),
- number_of_phantom_handle_resets_(0) {}
-
-GlobalHandles::~GlobalHandles() {
- NodeBlock* block = first_block_;
- while (block != nullptr) {
- NodeBlock* tmp = block->next();
- delete block;
- block = tmp;
- }
- first_block_ = nullptr;
-}
+ regular_nodes_(new NodeSpace<GlobalHandles::Node>(this)) {}
+GlobalHandles::~GlobalHandles() { regular_nodes_.reset(nullptr); }
-Handle<Object> GlobalHandles::Create(Object* value) {
- if (first_free_ == nullptr) {
- first_block_ = new NodeBlock(this, first_block_);
- first_block_->PutNodesOnFreeList(&first_free_);
- }
- DCHECK_NOT_NULL(first_free_);
- // Take the first node in the free list.
- Node* result = first_free_;
- first_free_ = result->next_free();
- result->Acquire(value);
+Handle<Object> GlobalHandles::Create(Object value) {
+ GlobalHandles::Node* result = regular_nodes_->Acquire(value);
if (Heap::InNewSpace(value) && !result->is_in_new_space_list()) {
new_space_nodes_.push_back(result);
result->set_in_new_space_list(true);
@@ -537,60 +609,61 @@ Handle<Object> GlobalHandles::Create(Object* value) {
return result->handle();
}
+Handle<Object> GlobalHandles::Create(Address value) {
+ return Create(Object(value));
+}
-Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
+Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
DCHECK_NOT_NULL(location);
GlobalHandles* global_handles =
Node::FromLocation(location)->GetGlobalHandles();
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- (*location)->ObjectVerify(global_handles->isolate());
+ Object(*location)->ObjectVerify(global_handles->isolate());
}
#endif // VERIFY_HEAP
return global_handles->Create(*location);
}
-
-void GlobalHandles::Destroy(Object** location) {
- if (location != nullptr) Node::FromLocation(location)->Release();
+void GlobalHandles::Destroy(Address* location) {
+ if (location != nullptr) {
+ NodeSpace<Node>::Release(Node::FromLocation(location));
+ }
}
-
typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
+void GlobalHandles::MakeWeak(Address* location, void* parameter,
GenericCallback phantom_callback,
v8::WeakCallbackType type) {
Node::FromLocation(location)->MakeWeak(parameter, phantom_callback, type);
}
-void GlobalHandles::MakeWeak(Object*** location_addr) {
+void GlobalHandles::MakeWeak(Address** location_addr) {
Node::FromLocation(*location_addr)->MakeWeak(location_addr);
}
-void* GlobalHandles::ClearWeakness(Object** location) {
+void* GlobalHandles::ClearWeakness(Address* location) {
return Node::FromLocation(location)->ClearWeakness();
}
-void GlobalHandles::AnnotateStrongRetainer(Object** location,
+void GlobalHandles::AnnotateStrongRetainer(Address* location,
const char* label) {
Node::FromLocation(location)->AnnotateStrongRetainer(label);
}
-bool GlobalHandles::IsNearDeath(Object** location) {
+bool GlobalHandles::IsNearDeath(Address* location) {
return Node::FromLocation(location)->IsNearDeath();
}
-
-bool GlobalHandles::IsWeak(Object** location) {
+bool GlobalHandles::IsWeak(Address* location) {
return Node::FromLocation(location)->IsWeak();
}
DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- Node* node = it.node();
+ for (Node* node : *regular_nodes_) {
if (node->IsWeakRetainer() && node->state() == Node::PENDING) {
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
@@ -604,8 +677,7 @@ void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRootsForPhantomHandles(
WeakSlotCallbackWithHeap should_reset_handle) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- Node* node = it.node();
+ for (Node* node : *regular_nodes_) {
if (node->IsWeakRetainer() &&
should_reset_handle(isolate()->heap(), node->location())) {
if (node->IsPhantomResetHandle()) {
@@ -622,8 +694,7 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
void GlobalHandles::IdentifyWeakHandles(
WeakSlotCallbackWithHeap should_reset_handle) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- Node* node = it.node();
+ for (Node* node : *regular_nodes_) {
if (node->IsWeak() &&
should_reset_handle(isolate()->heap(), node->location())) {
if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
@@ -644,22 +715,6 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
}
}
-void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- RootVisitor* v, size_t start, size_t end) {
- for (size_t i = start; i < end; ++i) {
- Node* node = new_space_nodes_[i];
- if (node->IsWeak() && !JSObject::IsUnmodifiedApiObject(node->location())) {
- node->set_active(true);
- }
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- node->is_active())) {
- v->VisitRootPointer(Root::kGlobalHandles, node->label(),
- node->location());
- }
- }
-}
-
void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
for (Node* node : new_space_nodes_) {
@@ -703,8 +758,8 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
DCHECK(node->is_in_new_space_list());
if ((node->is_independent() || !node->is_active()) &&
node->IsWeakRetainer() && (node->state() != Node::PENDING)) {
- DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
if (should_reset_handle(isolate_->heap(), node->location())) {
+ DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
if (node->IsPhantomResetHandle()) {
node->MarkPending();
node->ResetPhantomHandle();
@@ -746,67 +801,48 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
}
}
-int GlobalHandles::PostScavengeProcessing(
- const int initial_post_gc_processing_count) {
- int freed_nodes = 0;
+size_t GlobalHandles::PostScavengeProcessing(unsigned post_processing_count) {
+ size_t freed_nodes = 0;
for (Node* node : new_space_nodes_) {
- DCHECK(node->is_in_new_space_list());
- if (!node->IsRetainer()) {
- // Free nodes do not have weak callbacks. Do not use them to compute
- // the freed_nodes.
- continue;
- }
- // Skip dependent or unmodified handles. Their weak callbacks might expect
- // to be
- // called between two global garbage collection callbacks which
- // are not called for minor collections.
- if (!node->is_independent() && (node->is_active())) {
- node->set_active(false);
- continue;
- }
+ // Filter free nodes.
+ if (!node->IsRetainer()) continue;
+
+ // Reset active state for all affected nodes.
node->set_active(false);
- if (node->PostGarbageCollectionProcessing(isolate_)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // Weak callback triggered another GC and another round of
- // PostGarbageCollection processing. The current node might
- // have been deleted in that round, so we need to bail out (or
- // restart the processing).
- return freed_nodes;
- }
- }
- if (!node->IsRetainer()) {
- freed_nodes++;
+ if (node->IsPending()) {
+ DCHECK(node->has_callback());
+ DCHECK(node->IsPendingFinalizer());
+ node->PostGarbageCollectionProcessing(isolate_);
}
+ if (InRecursiveGC(post_processing_count)) return freed_nodes;
+
+ if (!node->IsRetainer()) freed_nodes++;
}
return freed_nodes;
}
+size_t GlobalHandles::PostMarkSweepProcessing(unsigned post_processing_count) {
+ size_t freed_nodes = 0;
+ for (Node* node : *regular_nodes_) {
+ // Filter free nodes.
+ if (!node->IsRetainer()) continue;
-int GlobalHandles::PostMarkSweepProcessing(
- const int initial_post_gc_processing_count) {
- int freed_nodes = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (!it.node()->IsRetainer()) {
- // Free nodes do not have weak callbacks. Do not use them to compute
- // the freed_nodes.
- continue;
- }
- it.node()->set_active(false);
- if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // See the comment above.
- return freed_nodes;
- }
- }
- if (!it.node()->IsRetainer()) {
- freed_nodes++;
+ // Reset active state for all affected nodes.
+ node->set_active(false);
+
+ if (node->IsPending()) {
+ DCHECK(node->has_callback());
+ DCHECK(node->IsPendingFinalizer());
+ node->PostGarbageCollectionProcessing(isolate_);
}
+ if (InRecursiveGC(post_processing_count)) return freed_nodes;
+
+ if (!node->IsRetainer()) freed_nodes++;
}
return freed_nodes;
}
-
void GlobalHandles::UpdateListOfNewSpaceNodes() {
size_t last = 0;
for (Node* node : new_space_nodes_) {
@@ -829,11 +865,8 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
new_space_nodes_.shrink_to_fit();
}
-
-int GlobalHandles::DispatchPendingPhantomCallbacks(
- bool synchronous_second_pass) {
- int freed_nodes = 0;
- // Protect against callback modifying pending_phantom_callbacks_.
+size_t GlobalHandles::InvokeFirstPassWeakCallbacks() {
+ size_t freed_nodes = 0;
std::vector<PendingPhantomCallback> pending_phantom_callbacks;
pending_phantom_callbacks.swap(pending_phantom_callbacks_);
{
@@ -846,6 +879,11 @@ int GlobalHandles::DispatchPendingPhantomCallbacks(
freed_nodes++;
}
}
+ return freed_nodes;
+}
+
+void GlobalHandles::InvokeOrScheduleSecondPassPhantomCallbacks(
+ bool synchronous_second_pass) {
if (!second_pass_callbacks_.empty()) {
if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
isolate()->heap()->CallGCPrologueCallbacks(
@@ -857,14 +895,12 @@ int GlobalHandles::DispatchPendingPhantomCallbacks(
second_pass_callbacks_task_posted_ = true;
auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(isolate()));
- taskrunner->PostTask(MakeCancelableLambdaTask(
+ taskrunner->PostTask(MakeCancelableTask(
isolate(), [this] { InvokeSecondPassPhantomCallbacksFromTask(); }));
}
}
- return freed_nodes;
}
-
void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
Data::Callback* callback_addr = nullptr;
if (node_ != nullptr) {
@@ -888,73 +924,56 @@ void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
}
}
+bool GlobalHandles::InRecursiveGC(unsigned gc_processing_counter) {
+ return gc_processing_counter != post_gc_processing_count_;
+}
-int GlobalHandles::PostGarbageCollectionProcessing(
+size_t GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
- DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
- const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- int freed_nodes = 0;
+ DCHECK_EQ(Heap::NOT_IN_GC, isolate_->heap()->gc_state());
+ const unsigned post_processing_count = ++post_gc_processing_count_;
+ size_t freed_nodes = 0;
bool synchronous_second_pass =
isolate_->heap()->IsTearingDown() ||
(gc_callback_flags &
(kGCCallbackFlagForced | kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
- freed_nodes += DispatchPendingPhantomCallbacks(synchronous_second_pass);
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // If the callbacks caused a nested GC, then return. See comment in
- // PostScavengeProcessing.
- return freed_nodes;
- }
- if (Heap::IsYoungGenerationCollector(collector)) {
- freed_nodes += PostScavengeProcessing(initial_post_gc_processing_count);
- } else {
- freed_nodes += PostMarkSweepProcessing(initial_post_gc_processing_count);
- }
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // If the callbacks caused a nested GC, then return. See comment in
- // PostScavengeProcessing.
- return freed_nodes;
- }
- if (initial_post_gc_processing_count == post_gc_processing_count_) {
- UpdateListOfNewSpaceNodes();
- }
+ InvokeOrScheduleSecondPassPhantomCallbacks(synchronous_second_pass);
+ if (InRecursiveGC(post_processing_count)) return freed_nodes;
+
+ freed_nodes += Heap::IsYoungGenerationCollector(collector)
+ ? PostScavengeProcessing(post_processing_count)
+ : PostMarkSweepProcessing(post_processing_count);
+ if (InRecursiveGC(post_processing_count)) return freed_nodes;
+
+ UpdateListOfNewSpaceNodes();
return freed_nodes;
}
void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsStrongRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
- it.node()->location());
+ for (Node* node : *regular_nodes_) {
+ if (node->IsStrongRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeak()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
- it.node()->location());
+ for (Node* node : *regular_nodes_) {
+ if (node->IsWeak()) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
DISABLE_CFI_PERF
void GlobalHandles::IterateAllRoots(RootVisitor* v) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
- it.node()->location());
- }
- }
-}
-
-DISABLE_CFI_PERF
-void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
- for (Node* node : new_space_nodes_) {
+ for (Node* node : *regular_nodes_) {
if (node->IsRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->label(),
node->location());
@@ -963,10 +982,8 @@ void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
}
DISABLE_CFI_PERF
-void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
- size_t end) {
- for (size_t i = start; i < end; ++i) {
- Node* node = new_space_nodes_[i];
+void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
+ for (Node* node : new_space_nodes_) {
if (node->IsRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->label(),
node->location());
@@ -977,7 +994,7 @@ void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
DISABLE_CFI_PERF
void GlobalHandles::ApplyPersistentHandleVisitor(
v8::PersistentHandleVisitor* visitor, GlobalHandles::Node* node) {
- v8::Value* value = ToApi<v8::Value>(Handle<Object>(node->location()));
+ v8::Value* value = ToApi<v8::Value>(node->handle());
visitor->VisitPersistentHandle(
reinterpret_cast<v8::Persistent<v8::Value>*>(&value),
node->wrapper_class_id());
@@ -986,9 +1003,9 @@ void GlobalHandles::ApplyPersistentHandleVisitor(
DISABLE_CFI_PERF
void GlobalHandles::IterateAllRootsWithClassIds(
v8::PersistentHandleVisitor* visitor) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
- ApplyPersistentHandleVisitor(visitor, it.node());
+ for (Node* node : *regular_nodes_) {
+ if (node->IsRetainer() && node->has_wrapper_class_id()) {
+ ApplyPersistentHandleVisitor(visitor, node);
}
}
}
@@ -1021,15 +1038,15 @@ void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->pending_global_handle_count = 0;
*stats->near_death_global_handle_count = 0;
*stats->free_global_handle_count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
+ for (Node* node : *regular_nodes_) {
*stats->global_handle_count += 1;
- if (it.node()->state() == Node::WEAK) {
+ if (node->state() == Node::WEAK) {
*stats->weak_global_handle_count += 1;
- } else if (it.node()->state() == Node::PENDING) {
+ } else if (node->state() == Node::PENDING) {
*stats->pending_global_handle_count += 1;
- } else if (it.node()->state() == Node::NEAR_DEATH) {
+ } else if (node->state() == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count += 1;
- } else if (it.node()->state() == Node::FREE) {
+ } else if (node->state() == Node::FREE) {
*stats->free_global_handle_count += 1;
}
}
@@ -1044,12 +1061,12 @@ void GlobalHandles::PrintStats() {
int near_death = 0;
int destroyed = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
+ for (Node* node : *regular_nodes_) {
total++;
- if (it.node()->state() == Node::WEAK) weak++;
- if (it.node()->state() == Node::PENDING) pending++;
- if (it.node()->state() == Node::NEAR_DEATH) near_death++;
- if (it.node()->state() == Node::FREE) destroyed++;
+ if (node->state() == Node::WEAK) weak++;
+ if (node->state() == Node::PENDING) pending++;
+ if (node->state() == Node::NEAR_DEATH) near_death++;
+ if (node->state() == Node::FREE) destroyed++;
}
PrintF("Global Handle Statistics:\n");
@@ -1064,35 +1081,26 @@ void GlobalHandles::PrintStats() {
void GlobalHandles::Print() {
PrintF("Global handles:\n");
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- PrintF(" handle %p to %p%s\n",
- reinterpret_cast<void*>(it.node()->location()),
- reinterpret_cast<void*>(it.node()->object()),
- it.node()->IsWeak() ? " (weak)" : "");
+ for (Node* node : *regular_nodes_) {
+ PrintF(" handle %p to %p%s\n", node->location().ToVoidPtr(),
+ reinterpret_cast<void*>(node->object()->ptr()),
+ node->IsWeak() ? " (weak)" : "");
}
}
#endif
-void GlobalHandles::TearDown() {}
-
-EternalHandles::EternalHandles() : size_(0) {
- for (unsigned i = 0; i < arraysize(singleton_handles_); i++) {
- singleton_handles_[i] = kInvalidIndex;
- }
-}
-
-
EternalHandles::~EternalHandles() {
- for (Object** block : blocks_) delete[] block;
+ for (Address* block : blocks_) delete[] block;
}
void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
- for (Object** block : blocks_) {
+ for (Address* block : blocks_) {
DCHECK_GT(limit, 0);
- visitor->VisitRootPointers(Root::kEternalHandles, nullptr, block,
- block + Min(limit, kSize));
+ visitor->VisitRootPointers(Root::kEternalHandles, nullptr,
+ FullObjectSlot(block),
+ FullObjectSlot(block + Min(limit, kSize)));
limit -= kSize;
}
}
@@ -1100,14 +1108,14 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
for (int index : new_space_indices_) {
visitor->VisitRootPointer(Root::kEternalHandles, nullptr,
- GetLocation(index));
+ FullObjectSlot(GetLocation(index)));
}
}
void EternalHandles::PostGarbageCollectionProcessing() {
size_t last = 0;
for (int index : new_space_indices_) {
- if (Heap::InNewSpace(*GetLocation(index))) {
+ if (Heap::InNewSpace(Object(*GetLocation(index)))) {
new_space_indices_[last++] = index;
}
}
@@ -1115,28 +1123,26 @@ void EternalHandles::PostGarbageCollectionProcessing() {
new_space_indices_.resize(last);
}
-
-void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
+void EternalHandles::Create(Isolate* isolate, Object object, int* index) {
DCHECK_EQ(kInvalidIndex, *index);
- if (object == nullptr) return;
- Object* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ if (object == Object()) return;
+ Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
DCHECK_NE(the_hole, object);
int block = size_ >> kShift;
int offset = size_ & kMask;
- // need to resize
+ // Need to resize.
if (offset == 0) {
- Object** next_block = new Object*[kSize];
- MemsetPointer(next_block, the_hole, kSize);
+ Address* next_block = new Address[kSize];
+ MemsetPointer(FullObjectSlot(next_block), the_hole, kSize);
blocks_.push_back(next_block);
}
- DCHECK_EQ(the_hole, blocks_[block][offset]);
- blocks_[block][offset] = object;
+ DCHECK_EQ(the_hole->ptr(), blocks_[block][offset]);
+ blocks_[block][offset] = object->ptr();
if (Heap::InNewSpace(object)) {
new_space_indices_.push_back(size_);
}
*index = size_++;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index d5e5628c3d..d12e0c10fd 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -12,6 +12,7 @@
#include "include/v8-profiler.h"
#include "src/handles.h"
+#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
@@ -20,12 +21,6 @@ namespace internal {
class HeapStats;
class RootVisitor;
-// Structure for tracking global handles.
-// A single list keeps all the allocated global handles.
-// Destroyed handles stay in the list but is added to the free list.
-// At GC the destroyed global handles are removed from the free list
-// and deallocated.
-
enum WeaknessType {
// Embedder gets a handle to the dying object.
FINALIZER_WEAK,
@@ -41,26 +36,15 @@ enum WeaknessType {
PHANTOM_WEAK_RESET_HANDLE
};
-class GlobalHandles {
+// Global handles hold handles that are independent of stack-state and can have
+// callbacks and finalizers attached to them.
+class GlobalHandles final {
public:
- ~GlobalHandles();
-
- // Creates a new global handle that is alive until Destroy is called.
- Handle<Object> Create(Object* value);
-
- template <typename T>
- Handle<T> Create(T* value) {
- static_assert(std::is_base_of<Object, T>::value, "static type violation");
- // The compiler should only pick this method if T is not Object.
- static_assert(!std::is_same<Object, T>::value, "compiler error");
- return Handle<T>::cast(Create(static_cast<Object*>(value)));
- }
-
// Copy a global handle
- static Handle<Object> CopyGlobal(Object** location);
+ static Handle<Object> CopyGlobal(Address* location);
// Destroy a global handle.
- static void Destroy(Object** location);
+ static void Destroy(Address* location);
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
@@ -71,53 +55,53 @@ class GlobalHandles {
// GC. For a phantom weak handle the handle is cleared (set to a Smi)
// before the callback is invoked, but the handle can still be identified
// in the callback by using the location() of the handle.
- static void MakeWeak(Object** location, void* parameter,
+ static void MakeWeak(Address* location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
v8::WeakCallbackType type);
- static void MakeWeak(Object*** location_addr);
+ static void MakeWeak(Address** location_addr);
- static void AnnotateStrongRetainer(Object** location, const char* label);
+ static void AnnotateStrongRetainer(Address* location, const char* label);
- void RecordStats(HeapStats* stats);
+ // Clear the weakness of a global handle.
+ static void* ClearWeakness(Address* location);
- // Returns the current number of handles to global objects.
- int global_handles_count() const {
- return number_of_global_handles_;
- }
+ // Tells whether global handle is near death.
+ static bool IsNearDeath(Address* location);
- size_t NumberOfPhantomHandleResets() {
- return number_of_phantom_handle_resets_;
- }
+ // Tells whether global handle is weak.
+ static bool IsWeak(Address* location);
- void ResetNumberOfPhantomHandleResets() {
- number_of_phantom_handle_resets_ = 0;
- }
+ explicit GlobalHandles(Isolate* isolate);
+ ~GlobalHandles();
- size_t NumberOfNewSpaceNodes() { return new_space_nodes_.size(); }
+ // Creates a new global handle that is alive until Destroy is called.
+ Handle<Object> Create(Object value);
+ Handle<Object> Create(Address value);
- // Clear the weakness of a global handle.
- static void* ClearWeakness(Object** location);
+ template <typename T>
+ Handle<T> Create(T value) {
+ static_assert(std::is_base_of<Object, T>::value, "static type violation");
+ // The compiler should only pick this method if T is not Object.
+ static_assert(!std::is_same<Object, T>::value, "compiler error");
+ return Handle<T>::cast(Create(Object(value)));
+ }
- // Tells whether global handle is near death.
- static bool IsNearDeath(Object** location);
+ void RecordStats(HeapStats* stats);
- // Tells whether global handle is weak.
- static bool IsWeak(Object** location);
+ size_t InvokeFirstPassWeakCallbacks();
+ void InvokeSecondPassPhantomCallbacks();
// Process pending weak handles.
// Returns the number of freed nodes.
- int PostGarbageCollectionProcessing(
+ size_t PostGarbageCollectionProcessing(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
void IterateStrongRoots(RootVisitor* v);
-
void IterateWeakRoots(RootVisitor* v);
-
void IterateAllRoots(RootVisitor* v);
void IterateAllNewSpaceRoots(RootVisitor* v);
- void IterateNewSpaceRoots(RootVisitor* v, size_t start, size_t end);
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(v8::PersistentHandleVisitor* v);
@@ -139,19 +123,14 @@ class GlobalHandles {
// |should_reset_handle| as pending.
void IdentifyWeakHandles(WeakSlotCallbackWithHeap should_reset_handle);
- // NOTE: Five ...NewSpace... functions below are used during
- // scavenge collections and iterate over sets of handles that are
- // guaranteed to contain all handles holding new space objects (but
- // may also include old space objects).
+ // Note: The following *NewSpace* methods are used for the Scavenger to
+ // identify and process handles in new space. The set of new space handles is
+ // complete but the methods may encounter handles that are already in old
+ // space.
// Iterates over strong and dependent handles. See the note above.
void IterateNewSpaceStrongAndDependentRoots(RootVisitor* v);
- // Iterates over strong and dependent handles. See the note above.
- // Also marks unmodified nodes in the same iteration.
- void IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- RootVisitor* v, size_t start, size_t end);
-
// Marks weak unmodified handles satisfying |is_dead| as pending.
void MarkNewSpaceWeakUnmodifiedObjectsPending(
WeakSlotCallbackWithHeap is_dead);
@@ -166,68 +145,67 @@ class GlobalHandles {
// unmodified
void IdentifyWeakUnmodifiedObjects(WeakSlotCallback is_unmodified);
- // Tear down the global handle structure.
- void TearDown();
+ Isolate* isolate() const { return isolate_; }
- Isolate* isolate() { return isolate_; }
+ // Number of global handles.
+ size_t handles_count() const { return handles_count_; }
+
+ size_t GetAndResetGlobalHandleResetCount() {
+ size_t old = number_of_phantom_handle_resets_;
+ number_of_phantom_handle_resets_ = 0;
+ return old;
+ }
#ifdef DEBUG
void PrintStats();
void Print();
#endif // DEBUG
- void InvokeSecondPassPhantomCallbacks();
-
private:
// Internal node structures.
class Node;
+ template <class NodeType>
class NodeBlock;
+ template <class BlockType>
class NodeIterator;
+ template <class NodeType>
+ class NodeSpace;
class PendingPhantomCallback;
- explicit GlobalHandles(Isolate* isolate);
+ bool InRecursiveGC(unsigned gc_processing_counter);
void InvokeSecondPassPhantomCallbacksFromTask();
- int PostScavengeProcessing(int initial_post_gc_processing_count);
- int PostMarkSweepProcessing(int initial_post_gc_processing_count);
- int DispatchPendingPhantomCallbacks(bool synchronous_second_pass);
+ void InvokeOrScheduleSecondPassPhantomCallbacks(bool synchronous_second_pass);
+ size_t PostScavengeProcessing(unsigned post_processing_count);
+ size_t PostMarkSweepProcessing(unsigned post_processing_count);
+
void UpdateListOfNewSpaceNodes();
+
void ApplyPersistentHandleVisitor(v8::PersistentHandleVisitor* visitor,
Node* node);
- Isolate* isolate_;
-
- // Field always containing the number of handles to global objects.
- int number_of_global_handles_;
-
- // List of all allocated node blocks.
- NodeBlock* first_block_;
-
- // List of node blocks with used nodes.
- NodeBlock* first_used_block_;
-
- // Free list of nodes.
- Node* first_free_;
+ Isolate* const isolate_;
+ std::unique_ptr<NodeSpace<Node>> regular_nodes_;
// Contains all nodes holding new space objects. Note: when the list
// is accessed, some of the objects may have been promoted already.
std::vector<Node*> new_space_nodes_;
- int post_gc_processing_count_;
-
- size_t number_of_phantom_handle_resets_;
+ // Field always containing the number of handles to global objects.
+ size_t handles_count_ = 0;
+ size_t number_of_phantom_handle_resets_ = 0;
std::vector<PendingPhantomCallback> pending_phantom_callbacks_;
std::vector<PendingPhantomCallback> second_pass_callbacks_;
bool second_pass_callbacks_task_posted_ = false;
- friend class Isolate;
+ // Counter for recursive garbage collections during callback processing.
+ unsigned post_gc_processing_count_ = 0;
DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
};
-
-class GlobalHandles::PendingPhantomCallback {
+class GlobalHandles::PendingPhantomCallback final {
public:
typedef v8::WeakCallbackInfo<void> Data;
PendingPhantomCallback(
@@ -241,8 +219,8 @@ class GlobalHandles::PendingPhantomCallback {
void Invoke(Isolate* isolate);
- Node* node() { return node_; }
- Data::Callback callback() { return callback_; }
+ Node* node() const { return node_; }
+ Data::Callback callback() const { return callback_; }
private:
Node* node_;
@@ -251,47 +229,19 @@ class GlobalHandles::PendingPhantomCallback {
void* embedder_fields_[v8::kEmbedderFieldsInWeakCallback];
};
-
-class EternalHandles {
+class EternalHandles final {
public:
- enum SingletonHandle {
- DATE_CACHE_VERSION,
-
- NUMBER_OF_SINGLETON_HANDLES
- };
-
- EternalHandles();
+ EternalHandles() = default;
~EternalHandles();
- int NumberOfHandles() { return size_; }
-
// Create an EternalHandle, overwriting the index.
- void Create(Isolate* isolate, Object* object, int* index);
+ void Create(Isolate* isolate, Object object, int* index);
// Grab the handle for an existing EternalHandle.
inline Handle<Object> Get(int index) {
return Handle<Object>(GetLocation(index));
}
- // Grab the handle for an existing SingletonHandle.
- inline Handle<Object> GetSingleton(SingletonHandle singleton) {
- DCHECK(Exists(singleton));
- return Get(singleton_handles_[singleton]);
- }
-
- // Checks whether a SingletonHandle has been assigned.
- inline bool Exists(SingletonHandle singleton) {
- return singleton_handles_[singleton] != kInvalidIndex;
- }
-
- // Assign a SingletonHandle to an empty slot and returns the handle.
- Handle<Object> CreateSingleton(Isolate* isolate,
- Object* object,
- SingletonHandle singleton) {
- Create(isolate, object, &singleton_handles_[singleton]);
- return Get(singleton_handles_[singleton]);
- }
-
// Iterates over all handles.
void IterateAllRoots(RootVisitor* visitor);
// Iterates over all handles which might be in new space.
@@ -299,27 +249,28 @@ class EternalHandles {
// Rebuilds new space list.
void PostGarbageCollectionProcessing();
+ size_t handles_count() const { return size_; }
+
private:
static const int kInvalidIndex = -1;
static const int kShift = 8;
static const int kSize = 1 << kShift;
static const int kMask = 0xff;
- // Gets the slot for an index
- inline Object** GetLocation(int index) {
+ // Gets the slot for an index. This returns an Address* rather than an
+ // ObjectSlot in order to avoid #including slots.h in this header file.
+ inline Address* GetLocation(int index) {
DCHECK(index >= 0 && index < size_);
return &blocks_[index >> kShift][index & kMask];
}
- int size_;
- std::vector<Object**> blocks_;
+ int size_ = 0;
+ std::vector<Address*> blocks_;
std::vector<int> new_space_indices_;
- int singleton_handles_[NUMBER_OF_SINGLETON_HANDLES];
DISALLOW_COPY_AND_ASSIGN(EternalHandles);
};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index c20e6086ee..d83de13005 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -12,6 +12,7 @@
#include <ostream>
#include "include/v8-internal.h"
+#include "src/base/atomic-utils.h"
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
@@ -96,8 +97,6 @@ class AllStatic {
};
typedef uint8_t byte;
-typedef uintptr_t Address;
-static const Address kNullAddress = 0;
// -----------------------------------------------------------------------------
// Constants
@@ -120,6 +119,7 @@ constexpr uint32_t kMaxUInt32 = 0xFFFFFFFFu;
constexpr int kMinUInt32 = 0;
constexpr int kUInt8Size = sizeof(uint8_t);
+constexpr int kByteSize = sizeof(byte);
constexpr int kCharSize = sizeof(char);
constexpr int kShortSize = sizeof(short); // NOLINT
constexpr int kUInt16Size = sizeof(uint16_t);
@@ -132,18 +132,18 @@ constexpr int kFloatSize = sizeof(float);
constexpr int kDoubleSize = sizeof(double);
constexpr int kIntptrSize = sizeof(intptr_t);
constexpr int kUIntptrSize = sizeof(uintptr_t);
-constexpr int kPointerSize = sizeof(void*);
-constexpr int kPointerHexDigits = kPointerSize == 4 ? 8 : 12;
+constexpr int kSystemPointerSize = sizeof(void*);
+constexpr int kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-constexpr int kRegisterSize = kPointerSize + kPointerSize;
+constexpr int kRegisterSize = kSystemPointerSize + kSystemPointerSize;
#else
-constexpr int kRegisterSize = kPointerSize;
+constexpr int kRegisterSize = kSystemPointerSize;
#endif
constexpr int kPCOnStackSize = kRegisterSize;
constexpr int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-constexpr int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
+constexpr int kElidedFrameSlots = kPCOnStackSize / kSystemPointerSize;
#else
constexpr int kElidedFrameSlots = 0;
#endif
@@ -151,26 +151,27 @@ constexpr int kElidedFrameSlots = 0;
constexpr int kDoubleSizeLog2 = 3;
#if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range.
-constexpr size_t kMaxWasmCodeMemory = 128 * MB;
+constexpr size_t kMaxWasmCodeMB = 128;
#else
-constexpr size_t kMaxWasmCodeMemory = 1024 * MB;
+constexpr size_t kMaxWasmCodeMB = 1024;
#endif
+constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT
-constexpr int kPointerSizeLog2 = 3;
+constexpr int kSystemPointerSizeLog2 = 3;
constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
constexpr bool kRequiresCodeRange = true;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_ARM64
constexpr size_t kMaximalCodeRangeSize = 128 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#else
constexpr size_t kMaximalCodeRangeSize = 128 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
constexpr size_t kMinimumCodeRangeSize = 4 * MB;
@@ -180,7 +181,7 @@ constexpr size_t kMinimumCodeRangeSize = 3 * MB;
constexpr size_t kReservedCodeRangePages = 0;
#endif
#else
-constexpr int kPointerSizeLog2 = 2;
+constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
@@ -188,21 +189,55 @@ constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
constexpr bool kRequiresCodeRange = true;
constexpr size_t kMaximalCodeRangeSize = 256 * MB;
constexpr size_t kMinimumCodeRangeSize = 3 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
+constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
+#elif V8_TARGET_ARCH_MIPS
+constexpr bool kRequiresCodeRange = false;
+constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
+constexpr size_t kMinimumCodeRangeSize = 0 * MB;
+constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#else
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
-constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#endif
constexpr size_t kReservedCodeRangePages = 0;
#endif
+STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
+
+constexpr int kTaggedSize = kSystemPointerSize;
+constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
+STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
+
+// These types define raw and atomic storage types for tagged values stored
+// on V8 heap.
+using Tagged_t = Address;
+using AtomicTagged_t = base::AtomicWord;
+using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
+STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
+STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
+
+// TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
+constexpr int kPointerSize = kSystemPointerSize;
+constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
+STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
+
+constexpr int kEmbedderDataSlotSize =
+#ifdef V8_COMPRESS_POINTERS
+ kTaggedSize +
+#endif
+ kTaggedSize;
+
+constexpr int kEmbedderDataSlotSizeInTaggedSlots =
+ kEmbedderDataSlotSize / kTaggedSize;
+STATIC_ASSERT(kEmbedderDataSlotSize >= kSystemPointerSize);
+
constexpr int kExternalAllocationSoftLimit =
internal::Internals::kExternalAllocationSoftLimit;
@@ -213,17 +248,16 @@ constexpr int kExternalAllocationSoftLimit =
// account.
//
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
+#ifdef V8_HOST_ARCH_PPC
+// Reduced kMaxRegularHeapObjectSize due to larger page size(64k) on ppc64le
+constexpr int kMaxRegularHeapObjectSize = 327680;
+#else
constexpr int kMaxRegularHeapObjectSize = 507136;
-
-// Objects smaller or equal kMaxNewSpaceHeapObjectSize are allocated in the
-// new large object space.
-constexpr int kMaxNewSpaceHeapObjectSize = 32 * KB;
-
-STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
+#endif
constexpr int kBitsPerByte = 8;
constexpr int kBitsPerByteLog2 = 3;
-constexpr int kBitsPerPointer = kPointerSize * kBitsPerByte;
+constexpr int kBitsPerSystemPointer = kSystemPointerSize * kBitsPerByte;
constexpr int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
@@ -374,6 +408,20 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
UNREACHABLE();
}
+enum class IsolateAllocationMode {
+ // Allocate Isolate in C++ heap using default new/delete operators.
+ kInCppHeap,
+
+ // Allocate Isolate in a committed region inside V8 heap reservation.
+ kInV8Heap,
+
+#ifdef V8_COMPRESS_POINTERS
+ kDefault = kInV8Heap,
+#else
+ kDefault = kInCppHeap,
+#endif
+};
+
// Indicates whether the lookup is related to sloppy-mode block-scoped
// function hoisting, and is a synthetic assignment for that.
enum class LookupHoistingMode { kNormal, kLegacySloppy };
@@ -410,12 +458,13 @@ static_assert(SmiValuesAre31Bits() == kIsSmiValueInLower32Bits,
constexpr intptr_t kSmiSignMask = static_cast<intptr_t>(
uintptr_t{1} << (kSmiValueSize + kSmiShiftSize + kSmiTagSize - 1));
-constexpr int kObjectAlignmentBits = kPointerSizeLog2;
+// Desired alignment for tagged pointers.
+constexpr int kObjectAlignmentBits = kTaggedSizeLog2;
constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
-// Desired alignment for pointers.
-constexpr intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+// Desired alignment for system pointers.
+constexpr intptr_t kPointerAlignment = (1 << kSystemPointerSizeLog2);
constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for double values.
@@ -428,8 +477,21 @@ constexpr int kCodeAlignmentBits = 5;
constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-const intptr_t kWeakHeapObjectMask = 1 << 1;
-const intptr_t kClearedWeakHeapObject = 3;
+const Address kWeakHeapObjectMask = 1 << 1;
+
+// The lower 32 bits of the cleared weak reference value is always equal to
+// the |kClearedWeakHeapObjectLower32| constant but on 64-bit architectures
+// the value of the upper 32 bits part may be
+// 1) zero when pointer compression is disabled,
+// 2) upper 32 bits of the isolate root value when pointer compression is
+// enabled.
+// This is necessary to make pointer decompression computation also suitable
+// for cleared weak reference.
+// Note, that real heap objects can't have lower 32 bits equal to 3 because
+// this offset belongs to page header. So, in either case it's enough to
+// compare only the lower 32 bits of a MaybeObject value in order to figure
+// out if it's a cleared reference or not.
+const uint32_t kClearedWeakHeapObjectLower32 = 3;
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a failure.
@@ -476,23 +538,20 @@ class Arguments;
class Assembler;
class Code;
class CodeSpace;
-class CodeStub;
class Context;
+class DeclarationScope;
class Debug;
class DebugInfo;
class Descriptor;
class DescriptorArray;
class TransitionArray;
class ExternalReference;
+class FeedbackVector;
class FixedArray;
+class Foreign;
class FreeStoreAllocationPolicy;
class FunctionTemplateInfo;
-class MemoryChunk;
-class NumberDictionary;
-class SimpleNumberDictionary;
-class NameDictionary;
class GlobalDictionary;
-template <typename T> class MaybeHandle;
template <typename T> class Handle;
class Heap;
class HeapObject;
@@ -509,34 +568,92 @@ class MacroAssembler;
class Map;
class MapSpace;
class MarkCompactCollector;
+template <typename T>
+class MaybeHandle;
class MaybeObject;
+class MemoryChunk;
+class MessageLocation;
+class ModuleScope;
+class Name;
+class NameDictionary;
class NewSpace;
class NewLargeObjectSpace;
+class NumberDictionary;
class Object;
+class CompressedObjectSlot;
+class CompressedMaybeObjectSlot;
+class CompressedMapWordSlot;
+class CompressedHeapObjectSlot;
+class FullObjectSlot;
+class FullMaybeObjectSlot;
+class FullHeapObjectSlot;
class OldSpace;
class ParameterCount;
class ReadOnlySpace;
-class Foreign;
+class RelocInfo;
class Scope;
-class DeclarationScope;
-class ModuleScope;
class ScopeInfo;
class Script;
+class SimpleNumberDictionary;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
class String;
-class Symbol;
-class Name;
class Struct;
-class FeedbackVector;
+class Symbol;
class Variable;
-class RelocInfo;
-class MessageLocation;
-typedef bool (*WeakSlotCallback)(Object** pointer);
+enum class SlotLocation { kOnHeap, kOffHeap };
+
+template <SlotLocation slot_location>
+struct SlotTraits;
+
+// Off-heap slots are always full-pointer slots.
+template <>
+struct SlotTraits<SlotLocation::kOffHeap> {
+ using TObjectSlot = FullObjectSlot;
+ using TMapWordSlot = FullObjectSlot;
+ using TMaybeObjectSlot = FullMaybeObjectSlot;
+ using THeapObjectSlot = FullHeapObjectSlot;
+};
+
+// On-heap slots are either full-pointer slots or compressed slots depending
+// on whether the pointer compression is enabled or not.
+template <>
+struct SlotTraits<SlotLocation::kOnHeap> {
+#ifdef V8_COMPRESS_POINTERS
+ using TObjectSlot = CompressedObjectSlot;
+ using TMapWordSlot = CompressedMapWordSlot;
+ using TMaybeObjectSlot = CompressedMaybeObjectSlot;
+ using THeapObjectSlot = CompressedHeapObjectSlot;
+#else
+ using TObjectSlot = FullObjectSlot;
+ using TMapWordSlot = FullObjectSlot;
+ using TMaybeObjectSlot = FullMaybeObjectSlot;
+ using THeapObjectSlot = FullHeapObjectSlot;
+#endif
+};
+
+// An ObjectSlot instance describes a kTaggedSize-sized on-heap field ("slot")
+// holding Object value (smi or strong heap object).
+using ObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TObjectSlot;
-typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
+// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot")
+// holding HeapObject (strong heap object) value or a forwarding pointer.
+using MapWordSlot = SlotTraits<SlotLocation::kOnHeap>::TMapWordSlot;
+
+// A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field
+// ("slot") holding MaybeObject (smi or weak heap object or strong heap object).
+using MaybeObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TMaybeObjectSlot;
+
+// A HeapObjectSlot instance describes a kTaggedSize-sized field ("slot")
+// holding a weak or strong pointer to a heap object (think:
+// HeapObjectReference).
+using HeapObjectSlot = SlotTraits<SlotLocation::kOnHeap>::THeapObjectSlot;
+
+typedef bool (*WeakSlotCallback)(FullObjectSlot pointer);
+
+typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, FullObjectSlot pointer);
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -552,16 +669,18 @@ enum AllocationSpace {
CODE_SPACE, // Old generation code object space, marked executable.
MAP_SPACE, // Old generation map object space, non-movable.
LO_SPACE, // Old generation large object space.
- NEW_LO_SPACE, // Young generation large object space.
+ CODE_LO_SPACE, // Old generation large code object space.
+ NEW_LO_SPACE, // Young generation large object space.
FIRST_SPACE = RO_SPACE,
LAST_SPACE = NEW_LO_SPACE,
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
};
-constexpr int kSpaceTagSize = 3;
+constexpr int kSpaceTagSize = 4;
STATIC_ASSERT(FIRST_SPACE == 0);
+// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
@@ -623,13 +742,11 @@ enum Movability { kMovable, kImmovable };
enum VisitMode {
VISIT_ALL,
- VISIT_ALL_BUT_READ_ONLY,
VISIT_ALL_IN_MINOR_MC_MARK,
VISIT_ALL_IN_MINOR_MC_UPDATE,
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION,
VISIT_FOR_SERIALIZATION,
};
@@ -651,44 +768,38 @@ enum ParseRestriction {
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward. A constant pool may exist at the
-// end of the instructions.
+// the buffer and grows backward. A constant pool and a code comments
+// section may exist at the in this order at the end of the instructions.
//
-// |<--------------- buffer_size ----------------------------------->|
-// |<------------- instr_size ---------->| |<-- reloc_size -->|
-// | |<- const_pool_size ->| |
-// +=====================================+========+==================+
-// | instructions | data | free | reloc info |
-// +=====================================+========+==================+
-// ^
-// |
+// │<--------------- buffer_size ----------------------------------->│
+// │<---------------- instr_size ------------->│ │<-reloc_size->│
+// │ │<-const pool->│ │ │ │
+// │ │<- comments->│ │ │
+// ├───────────────────────────────────────────┼──────┼──────────────┤
+// │ instructions │ data │ free │ reloc info │
+// ├───────────────────────────────────────────┴──────┴──────────────┘
// buffer
struct CodeDesc {
- byte* buffer;
- int buffer_size;
- int instr_size;
- int reloc_size;
- int constant_pool_size;
- byte* unwinding_info;
- int unwinding_info_size;
- Assembler* origin;
+ byte* buffer = nullptr;
+ int buffer_size = 0;
+ int instr_size = 0;
+ int reloc_size = 0;
+ int constant_pool_size = 0;
+ int code_comments_size = 0;
+ byte* unwinding_info = 0;
+ int unwinding_info_size = 0;
+ Assembler* origin = nullptr;
+ int constant_pool_offset() const {
+ return code_comments_offset() - constant_pool_size;
+ }
+ int code_comments_offset() const { return instr_size - code_comments_size; }
};
-
-// Callback function used for checking constraints when copying/relocating
-// objects. Returns true if an object can be copied/relocated from its
-// old_addr to a new_addr.
-typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
-
-
-// Callback function on inline caches, used for iterating over inline caches
-// in compiled code.
-typedef void (*InlineCacheCallback)(Code* code, Address ic);
-
-
// State for inline cache call sites. Aliased as IC::State.
enum InlineCacheState {
+ // No feedback will be collected.
+ NO_FEEDBACK,
// Has never been executed.
UNINITIALIZED,
// Has been executed but monomorhic state has been delayed.
@@ -708,6 +819,8 @@ enum InlineCacheState {
// Printing support.
inline const char* InlineCacheState2String(InlineCacheState state) {
switch (state) {
+ case NO_FEEDBACK:
+ return "NOFEEDBACK";
case UNINITIALIZED:
return "UNINITIALIZED";
case PREMONOMORPHIC:
@@ -784,77 +897,41 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
// Testers for test.
#define HAS_SMI_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
+ ((static_cast<intptr_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
-#define HAS_HEAP_OBJECT_TAG(value) \
- (((reinterpret_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
+#define HAS_HEAP_OBJECT_TAG(value) \
+ (((static_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
::i::kHeapObjectTag))
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
-// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
+// OBJECT_POINTER_PADDING returns the padding size required to align value
+// as a HeapObject pointer
+#define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value))
+
+// POINTER_SIZE_ALIGN returns the value aligned as a system pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
+// POINTER_SIZE_PADDING returns the padding size required to align value
+// as a system pointer.
+#define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value))
+
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
+// CODE_POINTER_PADDING returns the padding size required to align value
+// as a generated code segment.
+#define CODE_POINTER_PADDING(value) (CODE_POINTER_ALIGN(value) - (value))
+
// DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
#define DOUBLE_POINTER_ALIGN(value) \
(((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask)
-// CPU feature flags.
-enum CpuFeature {
- // x86
- SSE4_1,
- SSSE3,
- SSE3,
- SAHF,
- AVX,
- FMA3,
- BMI1,
- BMI2,
- LZCNT,
- POPCNT,
- ATOM,
- // ARM
- // - Standard configurations. The baseline is ARMv6+VFPv2.
- ARMv7, // ARMv7-A + VFPv3-D32 + NEON
- ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
- ARMv8, // ARMv8-A (+ all of the above)
- // MIPS, MIPS64
- FPU,
- FP64FPU,
- MIPSr1,
- MIPSr2,
- MIPSr6,
- MIPS_SIMD, // MSA instructions
- // PPC
- FPR_GPR_MOV,
- LWSYNC,
- ISELECT,
- VSX,
- MODULO,
- // S390
- DISTINCT_OPS,
- GENERAL_INSTR_EXT,
- FLOATING_POINT_EXT,
- VECTOR_FACILITY,
- MISC_INSTR_EXT2,
-
- NUMBER_OF_CPU_FEATURES,
-
- // ARM feature aliases (based on the standard configurations above).
- VFPv3 = ARMv7,
- NEON = ARMv7,
- VFP32DREGS = ARMv7,
- SUDIV = ARMv7_SUDIV
-};
-
// Defines hints about receiver values based on structural knowledge.
enum class ConvertReceiverMode : unsigned {
kNullOrUndefined, // Guaranteed to be null or undefined.
@@ -987,10 +1064,12 @@ enum class VariableMode : uint8_t {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- kDynamicLocal // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
+ kDynamicLocal, // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
+
+ kLastLexicalVariableMode = kConst,
};
// Printing support
@@ -1018,7 +1097,7 @@ inline const char* VariableMode2String(VariableMode mode) {
enum VariableKind : uint8_t {
NORMAL_VARIABLE,
- FUNCTION_VARIABLE,
+ PARAMETER_VARIABLE,
THIS_VARIABLE,
SLOPPY_FUNCTION_NAME_VARIABLE
};
@@ -1036,7 +1115,7 @@ inline bool IsDeclaredVariableMode(VariableMode mode) {
inline bool IsLexicalVariableMode(VariableMode mode) {
STATIC_ASSERT(static_cast<uint8_t>(VariableMode::kLet) ==
0); // Implies that mode >= VariableMode::kLet.
- return mode <= VariableMode::kConst;
+ return mode <= VariableMode::kLastLexicalVariableMode;
}
enum VariableLocation : uint8_t {
@@ -1110,7 +1189,7 @@ enum FunctionKind : uint8_t {
kSetterFunction,
kAsyncFunction,
kModule,
- kClassFieldsInitializerFunction,
+ kClassMembersInitializerFunction,
kDefaultBaseConstructor,
kDefaultDerivedConstructor,
@@ -1159,7 +1238,7 @@ inline bool IsConciseMethod(FunctionKind kind) {
kind == FunctionKind::kConciseGeneratorMethod ||
kind == FunctionKind::kAsyncConciseMethod ||
kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kClassFieldsInitializerFunction;
+ kind == FunctionKind::kClassMembersInitializerFunction;
}
inline bool IsGetterFunction(FunctionKind kind) {
@@ -1195,8 +1274,8 @@ inline bool IsClassConstructor(FunctionKind kind) {
return IsBaseConstructor(kind) || IsDerivedConstructor(kind);
}
-inline bool IsClassFieldsInitializerFunction(FunctionKind kind) {
- return kind == FunctionKind::kClassFieldsInitializerFunction;
+inline bool IsClassMembersInitializerFunction(FunctionKind kind) {
+ return kind == FunctionKind::kClassMembersInitializerFunction;
}
inline bool IsConstructable(FunctionKind kind) {
@@ -1230,8 +1309,8 @@ inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
return os << "AsyncFunction";
case FunctionKind::kModule:
return os << "Module";
- case FunctionKind::kClassFieldsInitializerFunction:
- return os << "ClassFieldsInitializerFunction";
+ case FunctionKind::kClassMembersInitializerFunction:
+ return os << "ClassMembersInitializerFunction";
case FunctionKind::kDefaultBaseConstructor:
return os << "DefaultBaseConstructor";
case FunctionKind::kDefaultDerivedConstructor:
@@ -1276,7 +1355,7 @@ inline std::ostream& operator<<(std::ostream& os,
inline uint32_t ObjectHash(Address address) {
// All objects are at least pointer aligned, so we can remove the trailing
// zeros.
- return static_cast<uint32_t>(address >> kPointerSizeLog2);
+ return static_cast<uint32_t>(address >> kTaggedSizeLog2);
}
// Type feedback is encoded in such a way that, we can combine the feedback
@@ -1311,27 +1390,28 @@ class BinaryOperationFeedback {
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
//
-// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
-// kInternalizedString -> kString -> kAny
-// kSymbol -> kAny
-// kBigInt -> kAny
-// kReceiver -> kAny
+// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
+// kReceiver -> kReceiverOrNullOrUndefined -> kAny
+// kInternalizedString -> kString -> kAny
+// kSymbol -> kAny
+// kBigInt -> kAny
//
// This is distinct from BinaryOperationFeedback on purpose, because the
// feedback that matters differs greatly as well as the way it is consumed.
class CompareOperationFeedback {
public:
enum {
- kNone = 0x00,
- kSignedSmall = 0x01,
- kNumber = 0x3,
- kNumberOrOddball = 0x7,
- kInternalizedString = 0x8,
- kString = 0x18,
- kSymbol = 0x20,
- kBigInt = 0x30,
- kReceiver = 0x40,
- kAny = 0xff
+ kNone = 0x000,
+ kSignedSmall = 0x001,
+ kNumber = 0x003,
+ kNumberOrOddball = 0x007,
+ kInternalizedString = 0x008,
+ kString = 0x018,
+ kSymbol = 0x020,
+ kBigInt = 0x040,
+ kReceiver = 0x080,
+ kReceiverOrNullOrUndefined = 0x180,
+ kAny = 0x1ff
};
};
@@ -1534,38 +1614,12 @@ enum IsolateAddressId {
kIsolateAddressCount
};
-V8_INLINE static bool HasWeakHeapObjectTag(const internal::MaybeObject* value) {
- return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+V8_INLINE static bool HasWeakHeapObjectTag(Address value) {
+ // TODO(jkummerow): Consolidate integer types here.
+ return ((static_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kWeakHeapObjectTag);
}
-// Object* should never have the weak tag; this variant is for overzealous
-// checking.
-V8_INLINE static bool HasWeakHeapObjectTag(const Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
- kWeakHeapObjectTag);
-}
-
-V8_INLINE static bool IsClearedWeakHeapObject(const MaybeObject* value) {
- return reinterpret_cast<intptr_t>(value) == kClearedWeakHeapObject;
-}
-
-V8_INLINE static HeapObject* RemoveWeakHeapObjectMask(
- HeapObjectReference* value) {
- return reinterpret_cast<HeapObject*>(reinterpret_cast<intptr_t>(value) &
- ~kWeakHeapObjectMask);
-}
-
-V8_INLINE static HeapObjectReference* AddWeakHeapObjectMask(Object* value) {
- return reinterpret_cast<HeapObjectReference*>(
- reinterpret_cast<intptr_t>(value) | kWeakHeapObjectMask);
-}
-
-V8_INLINE static MaybeObject* AddWeakHeapObjectMask(MaybeObject* value) {
- return reinterpret_cast<MaybeObject*>(reinterpret_cast<intptr_t>(value) |
- kWeakHeapObjectMask);
-}
-
enum class HeapObjectReferenceType {
WEAK,
STRONG,
@@ -1595,7 +1649,10 @@ enum class LoadSensitivity {
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
V(TrapFuncInvalid) \
- V(TrapFuncSigMismatch)
+ V(TrapFuncSigMismatch) \
+ V(TrapDataSegmentDropped) \
+ V(TrapElemSegmentDropped) \
+ V(TrapTableOutOfBounds)
enum KeyedAccessLoadMode {
STANDARD_LOAD,
@@ -1656,6 +1713,21 @@ static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
}
enum IcCheckType { ELEMENT, PROPERTY };
+
+// Helper stubs can be called in different ways depending on where the target
+// code is located and how the call sequence is expected to look like:
+// - CodeObject: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
+// - WasmRuntimeStub: Call native {WasmCode} stub via
+// {RelocInfo::WASM_STUB_CALL}.
+// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
+// contents. If builtins are embedded, we call directly into off-heap code
+// without going through the on-heap Code trampoline.
+enum class StubCallMode {
+ kCallCodeObject,
+ kCallWasmRuntimeStub,
+ kCallBuiltinPointer,
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/handler-table.cc
index dd246575ab..cdf95ce57b 100644
--- a/deps/v8/src/handler-table.cc
+++ b/deps/v8/src/handler-table.cc
@@ -13,13 +13,13 @@
namespace v8 {
namespace internal {
-HandlerTable::HandlerTable(Code* code)
+HandlerTable::HandlerTable(Code code)
: HandlerTable(code->InstructionStart(), code->handler_table_offset()) {}
-HandlerTable::HandlerTable(BytecodeArray* bytecode_array)
+HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array->handler_table()) {}
-HandlerTable::HandlerTable(ByteArray* byte_array)
+HandlerTable::HandlerTable(ByteArray byte_array)
: number_of_entries_(byte_array->length() / kRangeEntrySize /
sizeof(int32_t)),
#ifdef DEBUG
diff --git a/deps/v8/src/handler-table.h b/deps/v8/src/handler-table.h
index c2e282001c..97f91dd6b0 100644
--- a/deps/v8/src/handler-table.h
+++ b/deps/v8/src/handler-table.h
@@ -46,9 +46,9 @@ class V8_EXPORT_PRIVATE HandlerTable {
};
// Constructors for the various encodings.
- explicit HandlerTable(Code* code);
- explicit HandlerTable(ByteArray* byte_array);
- explicit HandlerTable(BytecodeArray* bytecode_array);
+ explicit HandlerTable(Code code);
+ explicit HandlerTable(ByteArray byte_array);
+ explicit HandlerTable(BytecodeArray bytecode_array);
explicit HandlerTable(Address instruction_start, size_t handler_table_offset);
// Getters for handler table based on ranges.
@@ -110,7 +110,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
// the GC heap (either {ByteArray} or {Code}) and hence would become stale
// during a collection. Hence we disallow any allocation.
Address raw_encoded_data_;
- DisallowHeapAllocation no_gc_;
+ DISALLOW_HEAP_ALLOCATION(no_gc_);
// Layout description for handler table based on ranges.
static const int kRangeStartIndex = 0;
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index d8e195c6f9..941c839d9c 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -12,22 +12,21 @@
namespace v8 {
namespace internal {
-HandleBase::HandleBase(Object* object, Isolate* isolate)
+HandleBase::HandleBase(Address object, Isolate* isolate)
: location_(HandleScope::GetHandle(isolate, object)) {}
+// Allocate a new handle for the object, do not canonicalize.
template <typename T>
-// Allocate a new handle for the object, do not canonicalize.
-Handle<T> Handle<T>::New(T* object, Isolate* isolate) {
- return Handle(
- reinterpret_cast<T**>(HandleScope::CreateHandle(isolate, object)));
+Handle<T> Handle<T>::New(T object, Isolate* isolate) {
+ return Handle(HandleScope::CreateHandle(isolate, object.ptr()));
}
template <typename T>
template <typename S>
const Handle<T> Handle<T>::cast(Handle<S> that) {
- T::cast(*reinterpret_cast<T**>(that.location()));
- return Handle<T>(reinterpret_cast<T**>(that.location_));
+ T::cast(*FullObjectSlot(that.location()));
+ return Handle<T>(that.location_);
}
HandleScope::HandleScope(Isolate* isolate) {
@@ -39,10 +38,11 @@ HandleScope::HandleScope(Isolate* isolate) {
}
template <typename T>
-Handle<T>::Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
+Handle<T>::Handle(T object, Isolate* isolate)
+ : HandleBase(object.ptr(), isolate) {}
template <typename T>
-V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
+V8_INLINE Handle<T> handle(T object, Isolate* isolate) {
return Handle<T>(object, isolate);
}
@@ -67,15 +67,13 @@ HandleScope::~HandleScope() {
#endif // DEBUG
}
-
-void HandleScope::CloseScope(Isolate* isolate,
- Object** prev_next,
- Object** prev_limit) {
+void HandleScope::CloseScope(Isolate* isolate, Address* prev_next,
+ Address* prev_limit) {
HandleScopeData* current = isolate->handle_scope_data();
std::swap(current->next, prev_next);
current->level--;
- Object** limit = prev_next;
+ Address* limit = prev_next;
if (current->limit != prev_limit) {
current->limit = prev_limit;
limit = prev_limit;
@@ -85,15 +83,15 @@ void HandleScope::CloseScope(Isolate* isolate,
ZapRange(current->next, limit);
#endif
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(
- current->next, static_cast<size_t>(limit - current->next));
+ current->next,
+ static_cast<size_t>(reinterpret_cast<Address>(limit) -
+ reinterpret_cast<Address>(current->next)));
}
-
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
HandleScopeData* current = isolate_->handle_scope_data();
-
- T* value = *handle_value;
+ T value = *handle_value;
// Throw away all handles in the current scope.
CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
@@ -107,23 +105,24 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
return result;
}
-Object** HandleScope::CreateHandle(Isolate* isolate, Object* value) {
+Address* HandleScope::CreateHandle(Isolate* isolate, Address value) {
DCHECK(AllowHandleAllocation::IsAllowed());
HandleScopeData* data = isolate->handle_scope_data();
-
- Object** result = data->next;
- if (result == data->limit) result = Extend(isolate);
- // Update the current next field, set the value in the created
- // handle, and return the result.
- DCHECK(result < data->limit);
- data->next = result + 1;
-
+ Address* result = data->next;
+ if (result == data->limit) {
+ result = Extend(isolate);
+ }
+ // Update the current next field, set the value in the created handle,
+ // and return the result.
+ DCHECK_LT(reinterpret_cast<Address>(result),
+ reinterpret_cast<Address>(data->limit));
+ data->next = reinterpret_cast<Address*>(reinterpret_cast<Address>(result) +
+ sizeof(Address));
*result = value;
return result;
}
-
-Object** HandleScope::GetHandle(Isolate* isolate, Object* value) {
+Address* HandleScope::GetHandle(Isolate* isolate, Address value) {
DCHECK(AllowHandleAllocation::IsAllowed());
HandleScopeData* data = isolate->handle_scope_data();
CanonicalHandleScope* canonical = data->canonical_scope;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index a9f4a0f0b4..7a6c06f571 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -10,6 +10,7 @@
#include "src/identity-map.h"
#include "src/maybe-handles.h"
#include "src/objects-inl.h"
+#include "src/roots-inl.h"
namespace v8 {
namespace internal {
@@ -24,15 +25,14 @@ ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
#ifdef DEBUG
bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
DCHECK_NOT_NULL(location_);
- Object* object = *location_;
+ Object object(*location_);
if (object->IsSmi()) return true;
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
Isolate* isolate;
if (!Isolate::FromWritableHeapObject(heap_object, &isolate)) return true;
- Heap* heap = isolate->heap();
RootIndex root_index;
- if (heap->IsRootHandleLocation(location_, &root_index) &&
- heap->RootCanBeTreatedAsConstant(root_index)) {
+ if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
+ RootsTable::IsImmortalImmovable(root_index)) {
return true;
}
if (!AllowHandleDereference::IsAllowed()) return false;
@@ -58,11 +58,10 @@ int HandleScope::NumberOfHandles(Isolate* isolate) {
(isolate->handle_scope_data()->next - impl->blocks()->back()));
}
-
-Object** HandleScope::Extend(Isolate* isolate) {
+Address* HandleScope::Extend(Isolate* isolate) {
HandleScopeData* current = isolate->handle_scope_data();
- Object** result = current->next;
+ Address* result = current->next;
DCHECK(result == current->limit);
// Make sure there's at least one scope on the stack and that the
@@ -76,7 +75,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
if (!impl->blocks()->empty()) {
- Object** limit = &impl->blocks()->back()[kHandleBlockSize];
+ Address* limit = &impl->blocks()->back()[kHandleBlockSize];
if (current->limit != limit) {
current->limit = limit;
DCHECK_LT(limit - current->next, kHandleBlockSize);
@@ -105,10 +104,10 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
#ifdef ENABLE_HANDLE_ZAPPING
-void HandleScope::ZapRange(Object** start, Object** end) {
+void HandleScope::ZapRange(Address* start, Address* end) {
DCHECK_LE(end - start, kHandleBlockSize);
- for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = static_cast<Address>(kHandleZapValue);
+ for (Address* p = start; p != end; p++) {
+ *p = static_cast<Address>(kHandleZapValue);
}
}
#endif
@@ -134,7 +133,7 @@ CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
root_index_map_ = new RootIndexMap(isolate);
- identity_map_ = new IdentityMap<Object**, ZoneAllocationPolicy>(
+ identity_map_ = new IdentityMap<Address*, ZoneAllocationPolicy>(
isolate->heap(), ZoneAllocationPolicy(&zone_));
canonical_level_ = handle_scope_data->level;
}
@@ -146,26 +145,25 @@ CanonicalHandleScope::~CanonicalHandleScope() {
isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
}
-
-Object** CanonicalHandleScope::Lookup(Object* object) {
+Address* CanonicalHandleScope::Lookup(Address object) {
DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
if (isolate_->handle_scope_data()->level != canonical_level_) {
// We are in an inner handle scope. Do not canonicalize since we will leave
// this handle scope while still being in the canonical scope.
return HandleScope::CreateHandle(isolate_, object);
}
- if (object->IsHeapObject()) {
+ if (Internals::HasHeapObjectTag(object)) {
RootIndex root_index;
- if (root_index_map_->Lookup(HeapObject::cast(object), &root_index)) {
- return isolate_->heap()->root_handle(root_index).location();
+ if (root_index_map_->Lookup(object, &root_index)) {
+ return isolate_->root_handle(root_index).location();
}
}
- Object*** entry = identity_map_->Get(object);
+ Address** entry = identity_map_->Get(Object(object));
if (*entry == nullptr) {
// Allocate new handle location.
*entry = HandleScope::CreateHandle(isolate_, object);
}
- return reinterpret_cast<Object**>(*entry);
+ return *entry;
}
@@ -173,8 +171,8 @@ DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
HandleScopeData* data = impl_->isolate()->handle_scope_data();
- Object** new_next = impl_->GetSpareOrNewBlock();
- Object** new_limit = &new_next[kHandleBlockSize];
+ Address* new_next = impl_->GetSpareOrNewBlock();
+ Address* new_limit = &new_next[kHandleBlockSize];
// Check that at least one HandleScope with at least one Handle in it exists,
// see the class description.
DCHECK(!impl_->blocks()->empty());
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index f37162f70e..2115f4a878 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -24,13 +24,20 @@ class Isolate;
template <typename T>
class MaybeHandle;
class Object;
+class OrderedHashMap;
+class OrderedHashSet;
+class OrderedNameDictionary;
+class SmallOrderedHashMap;
+class SmallOrderedHashSet;
+class SmallOrderedNameDictionary;
+class WasmExportedFunctionData;
// ----------------------------------------------------------------------------
// Base class for Handle instantiations. Don't use directly.
class HandleBase {
public:
- V8_INLINE explicit HandleBase(Object** location) : location_(location) {}
- V8_INLINE explicit HandleBase(Object* object, Isolate* isolate);
+ V8_INLINE explicit HandleBase(Address* location) : location_(location) {}
+ V8_INLINE explicit HandleBase(Address object, Isolate* isolate);
// Check if this handle refers to the exact same object as the other handle.
V8_INLINE bool is_identical_to(const HandleBase that) const {
@@ -52,13 +59,13 @@ class HandleBase {
protected:
// Provides the C++ dereference operator.
- V8_INLINE Object* operator*() const {
+ V8_INLINE Address operator*() const {
SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return *location_;
}
// Returns the address to where the raw pointer is stored.
- V8_INLINE Object** location() const {
+ V8_INLINE Address* location() const {
SLOW_DCHECK(location_ == nullptr ||
IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return location_;
@@ -74,7 +81,10 @@ class HandleBase {
}
#endif // DEBUG
- Object** location_;
+ // This uses type Address* as opposed to a pointer type to a typed
+ // wrapper class, because it doesn't point to instances of such a
+ // wrapper class. Design overview: https://goo.gl/Ph4CGz
+ Address* location_;
};
@@ -92,17 +102,18 @@ class HandleBase {
template <typename T>
class Handle final : public HandleBase {
public:
- V8_INLINE explicit Handle(T** location = nullptr)
- : HandleBase(reinterpret_cast<Object**>(location)) {
+ V8_INLINE explicit Handle(Address* location = nullptr)
+ : HandleBase(location) {
// Type check:
static_assert(std::is_convertible<T*, Object*>::value,
"static type violation");
+ // TODO(jkummerow): Runtime type check here as a SLOW_DCHECK?
}
- V8_INLINE Handle(T* object, Isolate* isolate);
+ V8_INLINE Handle(T object, Isolate* isolate);
// Allocate a new handle for the object, do not canonicalize.
- V8_INLINE static Handle<T> New(T* object, Isolate* isolate);
+ V8_INLINE static Handle<T> New(T object, Isolate* isolate);
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
@@ -110,17 +121,19 @@ class Handle final : public HandleBase {
std::is_convertible<S*, T*>::value>::type>
V8_INLINE Handle(Handle<S> handle) : HandleBase(handle) {}
- V8_INLINE T* operator->() const { return operator*(); }
+ V8_INLINE T operator->() const {
+ return operator*();
+ }
// Provides the C++ dereference operator.
- V8_INLINE T* operator*() const {
- return reinterpret_cast<T*>(HandleBase::operator*());
+ V8_INLINE T operator*() const {
+ // unchecked_cast because we rather trust Handle<T> to contain a T than
+ // include all the respective -inl.h headers for SLOW_DCHECKs.
+ return T::unchecked_cast(Object(HandleBase::operator*()));
}
// Returns the address to where the raw pointer is stored.
- V8_INLINE T** location() const {
- return reinterpret_cast<T**>(HandleBase::location());
- }
+ V8_INLINE Address* location() const { return HandleBase::location(); }
template <typename S>
inline static const Handle<T> cast(Handle<S> that);
@@ -133,14 +146,14 @@ class Handle final : public HandleBase {
bool equals(Handle<T> other) const { return address() == other.address(); }
// Provide function object for location equality comparison.
- struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
+ struct equal_to {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
return lhs.equals(rhs);
}
};
// Provide function object for location hashing.
- struct hash : public std::unary_function<Handle<T>, size_t> {
+ struct hash {
V8_INLINE size_t operator()(Handle<T> const& handle) const {
return base::hash<Address>()(handle.address());
}
@@ -181,10 +194,10 @@ class HandleScope {
V8_EXPORT_PRIVATE static int NumberOfHandles(Isolate* isolate);
// Create a new handle or lookup a canonical handle.
- V8_INLINE static Object** GetHandle(Isolate* isolate, Object* value);
+ V8_INLINE static Address* GetHandle(Isolate* isolate, Address value);
// Creates a new handle with the given value.
- V8_INLINE static Object** CreateHandle(Isolate* isolate, Object* value);
+ V8_INLINE static Address* CreateHandle(Isolate* isolate, Address value);
// Deallocates any extensions used by the current scope.
V8_EXPORT_PRIVATE static void DeleteExtensions(Isolate* isolate);
@@ -213,20 +226,19 @@ class HandleScope {
void operator delete(void* size_t);
Isolate* isolate_;
- Object** prev_next_;
- Object** prev_limit_;
+ Address* prev_next_;
+ Address* prev_limit_;
// Close the handle scope resetting limits to a previous state.
- static inline void CloseScope(Isolate* isolate,
- Object** prev_next,
- Object** prev_limit);
+ static inline void CloseScope(Isolate* isolate, Address* prev_next,
+ Address* prev_limit);
// Extend the handle scope making room for more handles.
- V8_EXPORT_PRIVATE static Object** Extend(Isolate* isolate);
+ V8_EXPORT_PRIVATE static Address* Extend(Isolate* isolate);
#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
- V8_EXPORT_PRIVATE static void ZapRange(Object** start, Object** end);
+ V8_EXPORT_PRIVATE static void ZapRange(Address* start, Address* end);
#endif
friend class v8::HandleScope;
@@ -256,12 +268,12 @@ class V8_EXPORT_PRIVATE CanonicalHandleScope final {
~CanonicalHandleScope();
private:
- Object** Lookup(Object* object);
+ Address* Lookup(Address object);
Isolate* isolate_;
Zone zone_;
RootIndexMap* root_index_map_;
- IdentityMap<Object**, ZoneAllocationPolicy>* identity_map_;
+ IdentityMap<Address*, ZoneAllocationPolicy>* identity_map_;
// Ordinary nested handle scopes within the current one are not canonical.
int canonical_level_;
// We may have nested canonical scopes. Handles are canonical within each one.
@@ -300,8 +312,8 @@ class V8_EXPORT_PRIVATE DeferredHandleScope final {
~DeferredHandleScope();
private:
- Object** prev_limit_;
- Object** prev_next_;
+ Address* prev_limit_;
+ Address* prev_next_;
HandleScopeImplementer* impl_;
#ifdef DEBUG
@@ -325,15 +337,15 @@ class SealHandleScope final {
inline ~SealHandleScope();
private:
Isolate* isolate_;
- Object** prev_limit_;
+ Address* prev_limit_;
int prev_sealed_level_;
#endif
};
struct HandleScopeData final {
- Object** next;
- Object** limit;
+ Address* next;
+ Address* limit;
int level;
int sealed_level;
CanonicalHandleScope* canonical_scope;
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 152c894796..f22f8cbb61 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -7,25 +7,38 @@
#ifdef V8_INTL_SUPPORT
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, breakType_string, "breakType") \
+ V(_, calendar_string, "calendar") \
+ V(_, cardinal_string, "cardinal") \
V(_, caseFirst_string, "caseFirst") \
V(_, day_string, "day") \
V(_, dayPeriod_string, "dayPeriod") \
V(_, decimal_string, "decimal") \
V(_, era_string, "era") \
V(_, fraction_string, "fraction") \
+ V(_, granularity_string, "granularity") \
+ V(_, grapheme_string, "grapheme") \
V(_, group_string, "group") \
V(_, h11_string, "h11") \
V(_, h12_string, "h12") \
V(_, h23_string, "h23") \
V(_, h24_string, "h24") \
V(_, hour_string, "hour") \
+ V(_, hour12_string, "hour12") \
+ V(_, hourCycle_string, "hourCycle") \
V(_, collation_string, "collation") \
V(_, currency_string, "currency") \
V(_, currencyDisplay_string, "currencyDisplay") \
+ V(_, ideo_string, "ideo") \
V(_, ignorePunctuation_string, "ignorePunctuation") \
+ V(_, Invalid_Date_string, "Invalid Date") \
V(_, integer_string, "integer") \
+ V(_, kana_string, "kana") \
+ V(_, letter_string, "letter") \
+ V(_, list_string, "list") \
V(_, literal_string, "literal") \
V(_, locale_string, "locale") \
+ V(_, loose_string, "loose") \
V(_, lower_string, "lower") \
V(_, maximumFractionDigits_string, "maximumFractionDigits") \
V(_, maximumSignificantDigits_string, "maximumSignificantDigits") \
@@ -36,19 +49,30 @@
V(_, minusSign_string, "minusSign") \
V(_, minute_string, "minute") \
V(_, month_string, "month") \
+ V(_, none_string, "none") \
+ V(_, normal_string, "normal") \
V(_, numberingSystem_string, "numberingSystem") \
V(_, numeric_string, "numeric") \
+ V(_, ordinal_string, "ordinal") \
V(_, percentSign_string, "percentSign") \
V(_, plusSign_string, "plusSign") \
V(_, quarter_string, "quarter") \
V(_, second_string, "second") \
+ V(_, segment_string, "segment") \
+ V(_, SegmentIterator_string, "Segment Iterator") \
V(_, sensitivity_string, "sensitivity") \
+ V(_, sep_string, "sep") \
+ V(_, strict_string, "strict") \
V(_, style_string, "style") \
+ V(_, term_string, "term") \
+ V(_, timeZone_string, "timeZone") \
V(_, timeZoneName_string, "timeZoneName") \
V(_, type_string, "type") \
+ V(_, unknown_string, "unknown") \
V(_, upper_string, "upper") \
V(_, usage_string, "usage") \
V(_, useGrouping_string, "useGrouping") \
+ V(_, UTC_string, "UTC") \
V(_, unit_string, "unit") \
V(_, weekday_string, "weekday") \
V(_, year_string, "year")
@@ -70,6 +94,7 @@
V(_, array_to_string, "[object Array]") \
V(_, ArrayBuffer_string, "ArrayBuffer") \
V(_, ArrayIterator_string, "Array Iterator") \
+ V(_, as_string, "as") \
V(_, assign_string, "assign") \
V(_, async_string, "async") \
V(_, auto_string, "auto") \
@@ -89,8 +114,6 @@
V(_, call_string, "call") \
V(_, callee_string, "callee") \
V(_, caller_string, "caller") \
- V(_, cell_value_string, "%cell_value") \
- V(_, char_at_string, "CharAt") \
V(_, character_string, "character") \
V(_, closure_string, "(closure)") \
V(_, code_string, "code") \
@@ -106,7 +129,6 @@
V(_, default_string, "default") \
V(_, defineProperty_string, "defineProperty") \
V(_, deleteProperty_string, "deleteProperty") \
- V(_, did_handle_string, "didHandle") \
V(_, disjunction_string, "disjunction") \
V(_, display_name_string, "displayName") \
V(_, done_string, "done") \
@@ -119,7 +141,6 @@
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
- V(_, enqueue_string, "enqueue") \
V(_, entries_string, "entries") \
V(_, enumerable_string, "enumerable") \
V(_, element_string, "element") \
@@ -132,6 +153,7 @@
V(_, flags_string, "flags") \
V(_, Float32Array_string, "Float32Array") \
V(_, Float64Array_string, "Float64Array") \
+ V(_, from_string, "from") \
V(_, Function_string, "Function") \
V(_, function_native_code_string, "function () { [native code] }") \
V(_, function_string, "function") \
@@ -144,8 +166,6 @@
V(_, getPrototypeOf_string, "getPrototypeOf") \
V(_, global_string, "global") \
V(_, globalThis_string, "globalThis") \
- V(_, granularity_string, "granularity") \
- V(_, grapheme_string, "grapheme") \
V(_, groups_string, "groups") \
V(_, has_string, "has") \
V(_, ignoreCase_string, "ignoreCase") \
@@ -160,22 +180,18 @@
V(_, Int8Array_string, "Int8Array") \
V(_, isExtensible_string, "isExtensible") \
V(_, isView_string, "isView") \
- V(_, KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(_, KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
V(_, keys_string, "keys") \
V(_, lastIndex_string, "lastIndex") \
V(_, length_string, "length") \
V(_, let_string, "let") \
- V(_, lineBreakStyle_string, "lineBreakStyle") \
V(_, line_string, "line") \
V(_, LinkError_string, "LinkError") \
V(_, long_string, "long") \
- V(_, loose_string, "loose") \
V(_, Map_string, "Map") \
V(_, MapIterator_string, "Map Iterator") \
V(_, message_string, "message") \
+ V(_, meta_string, "meta") \
V(_, minus_Infinity_string, "-Infinity") \
- V(_, minus_zero_string, "-0") \
V(_, Module_string, "Module") \
V(_, multiline_string, "multiline") \
V(_, name_string, "name") \
@@ -189,7 +205,6 @@
V(_, NFKC_string, "NFKC") \
V(_, NFKD_string, "NFKD") \
V(_, not_equal, "not-equal") \
- V(_, normal_string, "normal") \
V(_, null_string, "null") \
V(_, null_to_string, "[object Null]") \
V(_, Number_string, "Number") \
@@ -198,15 +213,16 @@
V(_, Object_string, "Object") \
V(_, object_string, "object") \
V(_, object_to_string, "[object Object]") \
+ V(_, of_string, "of") \
V(_, ok, "ok") \
V(_, one_string, "1") \
V(_, ownKeys_string, "ownKeys") \
V(_, percent_string, "percent") \
V(_, position_string, "position") \
V(_, preventExtensions_string, "preventExtensions") \
+ V(_, private_constructor_string, "#constructor") \
V(_, Promise_string, "Promise") \
V(_, promise_string, "promise") \
- V(_, PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
V(_, proto_string, "__proto__") \
V(_, prototype_string, "prototype") \
V(_, proxy_string, "proxy") \
@@ -214,11 +230,11 @@
V(_, query_colon_string, "(?:)") \
V(_, RangeError_string, "RangeError") \
V(_, raw_string, "raw") \
- V(_, ReconfigureToDataProperty_string, "ReconfigureToDataProperty") \
V(_, ReferenceError_string, "ReferenceError") \
+ V(_, ReflectGet_string, "Reflect.get") \
+ V(_, ReflectHas_string, "Reflect.has") \
V(_, RegExp_string, "RegExp") \
V(_, regexp_to_string, "[object RegExp]") \
- V(_, reject_string, "reject") \
V(_, resolve_string, "resolve") \
V(_, return_string, "return") \
V(_, revoke_string, "revoke") \
@@ -239,7 +255,6 @@
V(_, stackTraceLimit_string, "stackTraceLimit") \
V(_, star_default_star_string, "*default*") \
V(_, sticky_string, "sticky") \
- V(_, strict_string, "strict") \
V(_, String_string, "String") \
V(_, string_string, "string") \
V(_, string_to_string, "[object String]") \
@@ -247,6 +262,7 @@
V(_, Symbol_string, "Symbol") \
V(_, symbol_string, "symbol") \
V(_, SyntaxError_string, "SyntaxError") \
+ V(_, target_string, "target") \
V(_, then_string, "then") \
V(_, this_function_string, ".this_function") \
V(_, this_string, "this") \
@@ -264,15 +280,15 @@
V(_, undefined_to_string, "[object Undefined]") \
V(_, unicode_string, "unicode") \
V(_, URIError_string, "URIError") \
- V(_, use_asm_string, "use asm") \
- V(_, use_strict_string, "use strict") \
V(_, value_string, "value") \
V(_, valueOf_string, "valueOf") \
V(_, values_string, "values") \
+ V(_, WeakCell_string, "WeakCell") \
+ V(_, WeakFactory_string, "WeakFactory") \
V(_, WeakMap_string, "WeakMap") \
+ V(_, WeakRef_string, "WeakRef") \
V(_, WeakSet_string, "WeakSet") \
V(_, week_string, "week") \
- V(_, will_handle_string, "willHandle") \
V(_, word_string, "word") \
V(_, writable_string, "writable") \
V(_, zero_string, "0")
@@ -292,19 +308,15 @@
V(_, frozen_symbol) \
V(_, generic_symbol) \
V(_, home_object_symbol) \
- V(_, intl_initialized_marker_symbol) \
- V(_, intl_resolved_symbol) \
V(_, interpreter_trampoline_symbol) \
V(_, megamorphic_symbol) \
V(_, native_context_index_symbol) \
V(_, nonextensible_symbol) \
V(_, not_mapped_symbol) \
V(_, premonomorphic_symbol) \
- V(_, promise_async_stack_id_symbol) \
V(_, promise_debug_marker_symbol) \
V(_, promise_forwarding_handler_symbol) \
V(_, promise_handled_by_symbol) \
- V(_, promise_async_id_symbol) \
V(_, sealed_symbol) \
V(_, stack_trace_symbol) \
V(_, strict_function_transition_symbol) \
@@ -337,14 +349,15 @@
#define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \
- F(MC_INCREMENTAL_START) \
- F(MC_INCREMENTAL_SWEEPING) \
- F(MC_INCREMENTAL_WRAPPER_PROLOGUE) \
- F(MC_INCREMENTAL_WRAPPER_TRACING) \
+ F(MC_INCREMENTAL_EMBEDDER_PROLOGUE) \
+ F(MC_INCREMENTAL_EMBEDDER_TRACING) \
+ F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
+ F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
F(MC_INCREMENTAL_FINALIZE) \
F(MC_INCREMENTAL_FINALIZE_BODY) \
- F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
- F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
+ F(MC_INCREMENTAL_LAYOUT_CHANGE) \
+ F(MC_INCREMENTAL_START) \
+ F(MC_INCREMENTAL_SWEEPING)
#define TOP_MC_SCOPES(F) \
F(MC_CLEAR) \
@@ -357,6 +370,7 @@
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
+ F(HEAP_EMBEDDER_TRACING_EPILOGUE) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
@@ -365,6 +379,8 @@
F(HEAP_PROLOGUE) \
TOP_MC_SCOPES(F) \
F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_FLUSHABLE_BYTECODE) \
+ F(MC_CLEAR_FLUSHED_JS_FUNCTIONS) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
@@ -383,6 +399,9 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_MARK_EMBEDDER_PROLOGUE) \
+ F(MC_MARK_EMBEDDER_TRACING) \
+ F(MC_MARK_EMBEDDER_TRACING_CLOSURE) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_MAIN) \
F(MC_MARK_ROOTS) \
@@ -393,10 +412,6 @@
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
- F(MC_MARK_WRAPPERS) \
- F(MC_MARK_WRAPPER_EPILOGUE) \
- F(MC_MARK_WRAPPER_PROLOGUE) \
- F(MC_MARK_WRAPPER_TRACING) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 0cf4ae945d..6d4e1bb3c3 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -5,9 +5,11 @@
#include "src/heap/array-buffer-collector.h"
#include "src/base/template-utils.h"
+#include "src/cancelable-task.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/task-utils.h"
namespace v8 {
namespace internal {
@@ -28,13 +30,13 @@ void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
if (heap_->ShouldReduceMemory()) {
FreeAllocationsHelper(heap_, allocations);
} else {
- base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ base::MutexGuard guard(&allocations_mutex_);
allocations_.push_back(std::move(allocations));
}
}
void ArrayBufferCollector::PerformFreeAllocations() {
- base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ base::MutexGuard guard(&allocations_mutex_);
for (const std::vector<JSArrayBuffer::Allocation>& allocations :
allocations_) {
FreeAllocationsHelper(heap_, allocations);
@@ -48,7 +50,7 @@ void ArrayBufferCollector::FreeAllocations() {
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
FLAG_concurrent_array_buffer_freeing) {
V8::GetCurrentPlatform()->CallOnWorkerThread(
- MakeCancelableLambdaTask(heap_->isolate(), [this] {
+ MakeCancelableTask(heap_->isolate(), [this] {
TRACE_BACKGROUND_GC(
heap_->tracer(),
GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 814cfce63a..08d4cc7d9e 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -15,13 +15,13 @@
namespace v8 {
namespace internal {
-void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
+void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
if (buffer->backing_store() == nullptr) return;
const size_t length = buffer->byte_length();
- Page* page = Page::FromAddress(buffer->address());
+ Page* page = Page::FromHeapObject(buffer);
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) {
page->AllocateLocalTracker();
@@ -38,13 +38,13 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
->AdjustAmountOfExternalAllocatedMemory(length);
}
-void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
+void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
if (buffer->backing_store() == nullptr) return;
- Page* page = Page::FromAddress(buffer->address());
+ Page* page = Page::FromHeapObject(buffer);
const size_t length = buffer->byte_length();
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
tracker->Remove(buffer, length);
@@ -62,7 +62,8 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
Isolate* isolate = page_->heap()->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ // Unchecked cast because the map might already be dead at this point.
+ JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first);
const size_t length = it->second.length;
if (should_free(buffer)) {
@@ -88,7 +89,7 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
- tracker->Free([marking_state](JSArrayBuffer* buffer) {
+ tracker->Free([marking_state](JSArrayBuffer buffer) {
return marking_state->IsWhite(buffer);
});
if (tracker->IsEmpty()) {
@@ -96,15 +97,14 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
}
}
-void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
+void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) {
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
AddInternal(buffer, length);
}
-void LocalArrayBufferTracker::AddInternal(JSArrayBuffer* buffer,
- size_t length) {
+void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) {
auto ret = array_buffers_.insert(
{buffer,
{buffer->backing_store(), length, buffer->backing_store(),
@@ -115,7 +115,7 @@ void LocalArrayBufferTracker::AddInternal(JSArrayBuffer* buffer,
DCHECK(ret.second);
}
-void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
+void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) {
page_->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index f35f2b3754..58cd4f9e43 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -23,21 +23,21 @@ void LocalArrayBufferTracker::Process(Callback callback) {
std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
TrackingData kept_array_buffers;
- JSArrayBuffer* new_buffer = nullptr;
- JSArrayBuffer* old_buffer = nullptr;
+ JSArrayBuffer new_buffer;
+ JSArrayBuffer old_buffer;
size_t freed_memory = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end(); ++it) {
old_buffer = it->first;
- DCHECK_EQ(page_, Page::FromAddress(old_buffer->address()));
+ DCHECK_EQ(page_, Page::FromHeapObject(old_buffer));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
kept_array_buffers.insert(*it);
} else if (result == kUpdateEntry) {
- DCHECK_NOT_NULL(new_buffer);
- Page* target_page = Page::FromAddress(new_buffer->address());
+ DCHECK(!new_buffer.is_null());
+ Page* target_page = Page::FromHeapObject(new_buffer);
{
- base::LockGuard<base::Mutex> guard(target_page->mutex());
+ base::MutexGuard guard(target_page->mutex());
LocalArrayBufferTracker* tracker = target_page->local_tracker();
if (tracker == nullptr) {
target_page->AllocateLocalTracker();
@@ -92,7 +92,7 @@ void ArrayBufferTracker::PrepareToFreeDeadInNewSpace(Heap* heap) {
void ArrayBufferTracker::FreeAll(Page* page) {
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
- tracker->Free([](JSArrayBuffer* buffer) { return true; });
+ tracker->Free([](JSArrayBuffer buffer) { return true; });
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
@@ -103,24 +103,23 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
if (tracker == nullptr) return true;
DCHECK(page->SweepingDone());
- tracker->Process(
- [mode](JSArrayBuffer* old_buffer, JSArrayBuffer** new_buffer) {
- MapWord map_word = old_buffer->map_word();
- if (map_word.IsForwardingAddress()) {
- *new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
- return LocalArrayBufferTracker::kUpdateEntry;
- }
- return mode == kUpdateForwardedKeepOthers
- ? LocalArrayBufferTracker::kKeepEntry
- : LocalArrayBufferTracker::kRemoveEntry;
- });
+ tracker->Process([mode](JSArrayBuffer old_buffer, JSArrayBuffer* new_buffer) {
+ MapWord map_word = old_buffer->map_word();
+ if (map_word.IsForwardingAddress()) {
+ *new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
+ return LocalArrayBufferTracker::kUpdateEntry;
+ }
+ return mode == kUpdateForwardedKeepOthers
+ ? LocalArrayBufferTracker::kKeepEntry
+ : LocalArrayBufferTracker::kRemoveEntry;
+ });
return tracker->IsEmpty();
}
-bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
- Page* page = Page::FromAddress(buffer->address());
+bool ArrayBufferTracker::IsTracked(JSArrayBuffer buffer) {
+ Page* page = Page::FromHeapObject(buffer);
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return false;
return tracker->IsTracked(buffer);
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 3c00c2c486..3d1c2cab76 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -31,8 +31,8 @@ class ArrayBufferTracker : public AllStatic {
// Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
// access to the tracker by taking the page lock for the corresponding page.
- inline static void RegisterNew(Heap* heap, JSArrayBuffer* buffer);
- inline static void Unregister(Heap* heap, JSArrayBuffer* buffer);
+ inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer);
+ inline static void Unregister(Heap* heap, JSArrayBuffer buffer);
// Identifies all backing store pointers for dead JSArrayBuffers in new space.
// Does not take any locks and can only be called during Scavenge.
@@ -53,7 +53,7 @@ class ArrayBufferTracker : public AllStatic {
static bool ProcessBuffers(Page* page, ProcessingMode mode);
// Returns whether a buffer is currently tracked.
- static bool IsTracked(JSArrayBuffer* buffer);
+ static bool IsTracked(JSArrayBuffer buffer);
// Tears down the tracker and frees up all registered array buffers.
static void TearDown(Heap* heap);
@@ -70,13 +70,13 @@ class LocalArrayBufferTracker {
explicit LocalArrayBufferTracker(Page* page) : page_(page) {}
~LocalArrayBufferTracker();
- inline void Add(JSArrayBuffer* buffer, size_t length);
- inline void Remove(JSArrayBuffer* buffer, size_t length);
+ inline void Add(JSArrayBuffer buffer, size_t length);
+ inline void Remove(JSArrayBuffer buffer, size_t length);
// Frees up array buffers.
//
// Sample usage:
- // Free([](HeapObject* array_buffer) {
+ // Free([](HeapObject array_buffer) {
// if (should_free_internal(array_buffer)) return true;
// return false;
// });
@@ -87,21 +87,21 @@ class LocalArrayBufferTracker {
// what action to take on the buffer.
//
// Callback should be of type:
- // CallbackResult fn(JSArrayBuffer* buffer, JSArrayBuffer** new_buffer);
+ // CallbackResult fn(JSArrayBuffer buffer, JSArrayBuffer* new_buffer);
template <typename Callback>
void Process(Callback callback);
bool IsEmpty() const { return array_buffers_.empty(); }
- bool IsTracked(JSArrayBuffer* buffer) const {
+ bool IsTracked(JSArrayBuffer buffer) const {
return array_buffers_.find(buffer) != array_buffers_.end();
}
private:
class Hasher {
public:
- size_t operator()(JSArrayBuffer* buffer) const {
- return reinterpret_cast<size_t>(buffer) >> 3;
+ size_t operator()(JSArrayBuffer buffer) const {
+ return static_cast<size_t>(buffer.ptr() >> 3);
}
};
@@ -110,12 +110,12 @@ class LocalArrayBufferTracker {
// HeapNumber. The reason for tracking the length is that in the case of
// length being a HeapNumber, the buffer and its length may be stored on
// different memory pages, making it impossible to guarantee order of freeing.
- typedef std::unordered_map<JSArrayBuffer*, JSArrayBuffer::Allocation, Hasher>
+ typedef std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>
TrackingData;
// Internal version of add that does not update counters. Requires separate
// logic for updating external memory counters.
- inline void AddInternal(JSArrayBuffer* buffer, size_t length);
+ inline void AddInternal(JSArrayBuffer buffer, size_t length);
inline Space* space();
diff --git a/deps/v8/src/heap/barrier.h b/deps/v8/src/heap/barrier.h
index d945a83d90..a5a4b51263 100644
--- a/deps/v8/src/heap/barrier.h
+++ b/deps/v8/src/heap/barrier.h
@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
namespace v8 {
namespace internal {
@@ -14,6 +15,10 @@ namespace internal {
// Barrier that can be used once to synchronize a dynamic number of tasks
// working concurrently.
//
+// The barrier takes a timeout which is used to avoid waiting for too long. If
+// any of the users ever reach the timeout they will disable the barrier and
+// signal others to fall through.
+//
// Usage:
// void RunConcurrently(OneShotBarrier* shared_barrier) {
// shared_barrier->Start();
@@ -31,20 +36,20 @@ namespace internal {
// immediately.
class OneshotBarrier {
public:
- OneshotBarrier() : tasks_(0), waiting_(0), done_(false) {}
+ explicit OneshotBarrier(base::TimeDelta timeout) : timeout_(timeout) {}
void Start() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
tasks_++;
}
void NotifyAll() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (waiting_ > 0) condition_.NotifyAll();
}
bool Wait() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (done_) return true;
DCHECK_LE(waiting_, tasks_);
@@ -54,7 +59,11 @@ class OneshotBarrier {
condition_.NotifyAll();
} else {
// Spurious wakeup is ok here.
- condition_.Wait(&mutex_);
+ if (!condition_.WaitFor(&mutex_, timeout_)) {
+ // If predefined timeout was reached, Stop waiting and signal being done
+ // also to other tasks.
+ done_ = true;
+ }
}
waiting_--;
return done_;
@@ -66,9 +75,10 @@ class OneshotBarrier {
private:
base::ConditionVariable condition_;
base::Mutex mutex_;
- int tasks_;
- int waiting_;
- bool done_;
+ base::TimeDelta timeout_;
+ int tasks_ = 0;
+ int waiting_ = 0;
+ bool done_ = false;
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 5d8c2ab527..d8e1001106 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -4,6 +4,7 @@
#include "src/heap/code-stats.h"
+#include "src/code-comments.h"
#include "src/objects-inl.h"
#include "src/reloc-info.h"
@@ -11,21 +12,21 @@ namespace v8 {
namespace internal {
// Record code statisitcs.
-void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject* object,
+void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
if (object->IsScript()) {
- Script* script = Script::cast(object);
+ Script script = Script::cast(object);
// Log the size of external source code.
- Object* source = script->source();
+ Object source = script->source();
if (source->IsExternalString()) {
- ExternalString* external_source_string = ExternalString::cast(source);
+ ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
size += external_source_string->ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
} else if (object->IsAbstractCode()) {
// Record code+metadata statisitcs.
- AbstractCode* abstract_code = AbstractCode::cast(object);
+ AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code->SizeIncludingMetadata();
if (abstract_code->IsCode()) {
size += isolate->code_and_metadata_size();
@@ -60,7 +61,7 @@ void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
Isolate* isolate) {
HeapObjectIterator obj_it(space);
- for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next()) {
+ for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
}
@@ -72,7 +73,7 @@ void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
Isolate* isolate) {
LargeObjectIterator obj_it(space);
- for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next()) {
+ for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
}
@@ -164,42 +165,34 @@ void CodeStatistics::EnterComment(Isolate* isolate, const char* comment,
// Call for each nested comment start (start marked with '[ xxx', end marked
// with ']'. RelocIterator 'it' must point to a comment reloc info.
void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
- RelocIterator* it) {
- DCHECK(!it->done());
- DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
- if (tmp[0] != '[') {
+ CodeCommentsIterator* cit) {
+ DCHECK(cit->HasCurrent());
+ const char* comment_txt = cit->GetComment();
+ if (comment_txt[0] != '[') {
// Not a nested comment; skip
return;
}
// Search for end of nested comment or a new nested comment
- const char* const comment_txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- Address prev_pc = it->rinfo()->pc();
+ int prev_pc_offset = cit->GetPCOffset();
int flat_delta = 0;
- it->next();
- while (true) {
+ cit->Next();
+ for (; cit->HasCurrent(); cit->Next()) {
// All nested comments must be terminated properly, and therefore exit
// from loop.
- DCHECK(!it->done());
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
- const char* const txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
- if (txt[0] == ']') break; // End of nested comment
- // A new comment
- CollectCommentStatistics(isolate, it);
- // Skip code that was covered with previous comment
- prev_pc = it->rinfo()->pc();
- }
- it->next();
+ const char* const txt = cit->GetComment();
+ flat_delta += cit->GetPCOffset() - prev_pc_offset;
+ if (txt[0] == ']') break; // End of nested comment
+ // A new comment
+ CollectCommentStatistics(isolate, cit);
+ // Skip code that was covered with previous comment
+ prev_pc_offset = cit->GetPCOffset();
}
EnterComment(isolate, comment_txt, flat_delta);
}
// Collects code comment statistics
-void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
+void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
Isolate* isolate) {
// Bytecode objects do not contain RelocInfo. Only process code objects
// for code comment statistics.
@@ -207,22 +200,19 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
return;
}
- Code* code = Code::cast(obj);
- RelocIterator it(code);
+ Code code = Code::cast(obj);
+ CodeCommentsIterator cit(code->code_comments());
int delta = 0;
- Address prev_pc = code->raw_instruction_start();
- while (!it.done()) {
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(isolate, &it);
- prev_pc = it.rinfo()->pc();
- }
- it.next();
+ int prev_pc_offset = 0;
+ while (cit.HasCurrent()) {
+ delta += static_cast<int>(cit.GetPCOffset() - prev_pc_offset);
+ CollectCommentStatistics(isolate, &cit);
+ prev_pc_offset = cit.GetPCOffset();
+ cit.Next();
}
- DCHECK(code->raw_instruction_start() <= prev_pc &&
- prev_pc <= code->raw_instruction_end());
- delta += static_cast<int>(code->raw_instruction_end() - prev_pc);
+ DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code->raw_instruction_size());
+ delta += static_cast<int>(code->raw_instruction_size() - prev_pc_offset);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/code-stats.h b/deps/v8/src/heap/code-stats.h
index ef964caa85..cc28640f2a 100644
--- a/deps/v8/src/heap/code-stats.h
+++ b/deps/v8/src/heap/code-stats.h
@@ -8,11 +8,11 @@
namespace v8 {
namespace internal {
-class Isolate;
+class CodeCommentsIterator;
class HeapObject;
+class Isolate;
class LargeObjectSpace;
class PagedSpace;
-class RelocIterator;
class CodeStatistics {
public:
@@ -31,12 +31,13 @@ class CodeStatistics {
#endif
private:
- static void RecordCodeAndMetadataStatistics(HeapObject* object,
+ static void RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate);
#ifdef DEBUG
- static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it);
- static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate);
+ static void CollectCommentStatistics(Isolate* isolate,
+ CodeCommentsIterator* it);
+ static void CollectCodeCommentStatistics(HeapObject obj, Isolate* isolate);
static void EnterComment(Isolate* isolate, const char* comment, int delta);
static void ResetCodeStatistics(Isolate* isolate);
#endif
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 5e147ca9a5..f98c4c400d 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -20,6 +20,7 @@
#include "src/heap/worklist.h"
#include "src/isolate.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/slots-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -30,22 +31,25 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
- explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
- : live_bytes_(live_bytes) {}
+ explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
+ : memory_chunk_data_(memory_chunk_data) {}
Bitmap* bitmap(const MemoryChunk* chunk) {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- (*live_bytes_)[chunk] += by;
+ (*memory_chunk_data_)[chunk].live_bytes += by;
}
// The live_bytes and SetLiveBytes methods of the marking state are
// not used by the concurrent marker.
private:
- LiveBytesMap* live_bytes_;
+ MemoryChunkDataMap* memory_chunk_data_;
};
// Helper class for storing in-object slot addresses and values.
@@ -53,19 +57,17 @@ class SlotSnapshot {
public:
SlotSnapshot() : number_of_slots_(0) {}
int number_of_slots() const { return number_of_slots_; }
- Object** slot(int i) const { return snapshot_[i].first; }
- Object* value(int i) const { return snapshot_[i].second; }
+ ObjectSlot slot(int i) const { return snapshot_[i].first; }
+ Object value(int i) const { return snapshot_[i].second; }
void clear() { number_of_slots_ = 0; }
- void add(Object** slot, Object* value) {
- snapshot_[number_of_slots_].first = slot;
- snapshot_[number_of_slots_].second = value;
- ++number_of_slots_;
+ void add(ObjectSlot slot, Object value) {
+ snapshot_[number_of_slots_++] = {slot, value};
}
private:
- static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
+ static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kTaggedSize;
int number_of_slots_;
- std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
+ std::pair<ObjectSlot, Object> snapshot_[kMaxSnapshotSize];
DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
};
@@ -76,37 +78,41 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(
ConcurrentMarking::MarkingWorklist* shared,
- ConcurrentMarking::MarkingWorklist* bailout, LiveBytesMap* live_bytes,
- WeakObjects* weak_objects,
+ MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
- bool embedder_tracing_enabled)
+ bool embedder_tracing_enabled, unsigned mark_compact_epoch,
+ bool is_forced_gc)
: shared_(shared, task_id),
- bailout_(bailout, task_id),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects, task_id),
- marking_state_(live_bytes),
+ marking_state_(memory_chunk_data),
+ memory_chunk_data_(memory_chunk_data),
task_id_(task_id),
- embedder_tracing_enabled_(embedder_tracing_enabled) {}
+ embedder_tracing_enabled_(embedder_tracing_enabled),
+ mark_compact_epoch_(mark_compact_epoch),
+ is_forced_gc_(is_forced_gc) {}
template <typename T>
- static V8_INLINE T* Cast(HeapObject* object) {
+ static V8_INLINE T Cast(HeapObject object) {
return T::cast(object);
}
- bool ShouldVisit(HeapObject* object) {
+ bool ShouldVisit(HeapObject object) {
return marking_state_.GreyToBlack(object);
}
bool AllowDefaultJSObjectVisit() { return false; }
- void ProcessStrongHeapObject(HeapObject* host, Object** slot,
- HeapObject* heap_object) {
+ template <typename THeapObjectSlot>
+ void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
+ HeapObject heap_object) {
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
- void ProcessWeakHeapObject(HeapObject* host, HeapObjectReference** slot,
- HeapObject* heap_object) {
+ template <typename THeapObjectSlot>
+ void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
+ HeapObject heap_object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
@@ -127,46 +133,67 @@ class ConcurrentMarkingVisitor final
}
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** slot = start; slot < end; slot++) {
- Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
- DCHECK(!HasWeakHeapObjectTag(object));
- if (object->IsHeapObject()) {
- ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
- }
- }
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ VisitPointersImpl(host, start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
- for (MaybeObject** slot = start; slot < end; slot++) {
- MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
- HeapObject* heap_object;
- if (object->GetHeapObjectIfStrong(&heap_object)) {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ VisitPointersImpl(host, start, end);
+ }
+
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = slot.Relaxed_Load();
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
// barrier will treat the weak reference as strong, so we won't miss the
// weak reference.
- ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
- heap_object);
- } else if (object->GetHeapObjectIfWeak(&heap_object)) {
- ProcessWeakHeapObject(
- host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
+ ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
+ } else if (TSlot::kCanBeWeak &&
+ object.GetHeapObjectIfWeak(&heap_object)) {
+ ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
}
}
}
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
- void VisitCustomWeakPointers(HeapObject* host, Object** start,
- Object** end) override {}
+ void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {}
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject object = rinfo->target_object();
+ RecordRelocSlot(host, rinfo, object);
+ if (!marking_state_.IsBlackOrGrey(object)) {
+ if (host->IsWeakObject(object)) {
+ weak_objects_->weak_objects_in_code.Push(task_id_,
+ std::make_pair(object, host));
+ } else {
+ MarkObject(object);
+ }
+ }
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ RecordRelocSlot(host, rinfo, target);
+ MarkObject(target);
+ }
- void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
+ void VisitPointersInSnapshot(HeapObject host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
- Object** slot = snapshot.slot(i);
- Object* object = snapshot.value(i);
+ ObjectSlot slot = snapshot.slot(i);
+ Object object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
if (!object->IsHeapObject()) continue;
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
@@ -176,34 +203,79 @@ class ConcurrentMarkingVisitor final
// JS object =================================================================
// ===========================================================================
- int VisitJSObject(Map* map, JSObject* object) {
+ int VisitJSObject(Map map, JSObject object) {
return VisitJSObjectSubclass(map, object);
}
- int VisitJSObjectFast(Map* map, JSObject* object) {
- return VisitJSObjectSubclass(map, object);
+ int VisitJSObjectFast(Map map, JSObject object) {
+ return VisitJSObjectSubclassFast(map, object);
}
- int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
+ int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
+ int size = VisitJSObjectSubclass(map, weak_ref);
+ if (size == 0) {
+ return 0;
+ }
+ if (weak_ref->target()->IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_ref->target());
+ if (marking_state_.IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakRef, since the
+ // VisitJSObjectSubclass above didn't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ MarkCompactCollector::RecordSlot(weak_ref, slot, target);
+ } else {
+ // JSWeakRef points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
+ }
+ }
+ return size;
+ }
+
+ int VisitJSWeakCell(Map map, JSWeakCell weak_cell) {
+ int size = VisitJSObjectSubclass(map, weak_cell);
+ if (size == 0) {
+ return 0;
+ }
+
+ if (weak_cell->target()->IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_cell->target());
+ if (marking_state_.IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakCell, since the
+ // VisitJSObjectSubclass above didn't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ MarkCompactCollector::RecordSlot(weak_cell, slot, target);
+ } else {
+ // JSWeakCell points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
+ }
+ }
+ return size;
+ }
+
// Some JS objects can carry back links to embedders that contain information
// relevant to the garbage collectors.
- int VisitJSApiObject(Map* map, JSObject* object) {
+ int VisitJSApiObject(Map map, JSObject object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
+ int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSDataView(Map* map, JSDataView* object) {
+ int VisitJSDataView(Map map, JSDataView object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSTypedArray(Map* map, JSTypedArray* object) {
+ int VisitJSTypedArray(Map map, JSTypedArray object) {
return VisitEmbedderTracingSubclass(map, object);
}
@@ -211,100 +283,178 @@ class ConcurrentMarkingVisitor final
// Strings with pointers =====================================================
// ===========================================================================
- int VisitConsString(Map* map, ConsString* object) {
- int size = ConsString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ int VisitConsString(Map map, ConsString object) {
+ return VisitFullyWithSnapshot(map, object);
}
- int VisitSlicedString(Map* map, SlicedString* object) {
- int size = SlicedString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ int VisitSlicedString(Map map, SlicedString object) {
+ return VisitFullyWithSnapshot(map, object);
}
- int VisitThinString(Map* map, ThinString* object) {
- int size = ThinString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ int VisitThinString(Map map, ThinString object) {
+ return VisitFullyWithSnapshot(map, object);
}
// ===========================================================================
// Strings without pointers ==================================================
// ===========================================================================
- int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
- int size = SeqOneByteString::SizeFor(object->synchronized_length());
+ int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object->map_slot());
- return size;
+ return SeqOneByteString::SizeFor(object->synchronized_length());
}
- int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
- int size = SeqTwoByteString::SizeFor(object->synchronized_length());
+ int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object->map_slot());
- return size;
+ return SeqTwoByteString::SizeFor(object->synchronized_length());
}
// ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
- int VisitFixedArray(Map* map, FixedArray* object) {
- return VisitLeftTrimmableArray(map, object);
+ int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
+ MemoryChunk* chunk) {
+ // The concurrent marker can process larger chunks than the main thread
+ // marker.
+ const int kProgressBarScanningChunk =
+ RoundUp(kMaxRegularHeapObjectSize, kTaggedSize);
+ DCHECK(marking_state_.IsBlackOrGrey(object));
+ marking_state_.GreyToBlack(object);
+ int size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int start =
+ Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+ int end = Min(size, start + kProgressBarScanningChunk);
+ if (start < end) {
+ VisitPointers(object, HeapObject::RawField(object, start),
+ HeapObject::RawField(object, end));
+ chunk->set_progress_bar(end);
+ if (end < size) {
+ // The object can be pushed back onto the marking worklist only after
+ // progress bar was updated.
+ shared_.Push(object);
+ }
+ }
+ return end - start;
}
- int VisitFixedDoubleArray(Map* map, FixedDoubleArray* object) {
- return VisitLeftTrimmableArray(map, object);
+ int VisitFixedArray(Map map, FixedArray object) {
+ // Arrays with the progress bar are not left-trimmable because they reside
+ // in the large object space.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
+ ? VisitFixedArrayWithProgressBar(map, object, chunk)
+ : VisitLeftTrimmableArray(map, object);
}
- // ===========================================================================
- // Code object ===============================================================
- // ===========================================================================
-
- int VisitCode(Map* map, Code* object) {
- bailout_.Push(object);
- return 0;
+ int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
+ return VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
// Side-effectful visitation.
// ===========================================================================
- int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ int VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
+ if (!ShouldVisit(shared_info)) return 0;
+
+ int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
+ VisitMapPointer(shared_info, shared_info->map_slot());
+ SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
+ this);
+
+ // If the SharedFunctionInfo has old bytecode, mark it as flushable,
+ // otherwise visit the function data field strongly.
+ if (shared_info->ShouldFlushBytecode()) {
+ weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
+ } else {
+ VisitPointer(shared_info, shared_info->RawField(
+ SharedFunctionInfo::kFunctionDataOffset));
+ }
+ return size;
+ }
+
+ int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
- object->MakeOlder();
+ if (!is_forced_gc_) {
+ object->MakeOlder();
+ }
return size;
}
- int VisitMap(Map* meta_map, Map* map) {
- if (marking_state_.IsGrey(map)) {
- // Maps have ad-hoc weakness for descriptor arrays. They also clear the
- // code-cache. Conservatively visit strong fields skipping the
- // descriptor array field and the code cache field.
- VisitMapPointer(map, map->map_slot());
- VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
- VisitPointer(
- map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
- VisitPointer(map, HeapObject::RawMaybeWeakField(
- map, Map::kTransitionsOrPrototypeInfoOffset));
- VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
- bailout_.Push(map);
+ int VisitJSFunction(Map map, JSFunction object) {
+ int size = VisitJSObjectSubclass(map, object);
+
+ // Check if the JSFunction needs reset due to bytecode being flushed.
+ if (object->NeedsResetDueToFlushedBytecode()) {
+ weak_objects_->flushed_js_functions.Push(task_id_, object);
}
- return 0;
+
+ return size;
+ }
+
+ int VisitMap(Map meta_map, Map map) {
+ if (!ShouldVisit(map)) return 0;
+ int size = Map::BodyDescriptor::SizeOf(meta_map, map);
+ if (map->CanTransition()) {
+ // Maps that can transition share their descriptor arrays and require
+ // special visiting logic to avoid memory leaks.
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The
+ // slot holding the descriptor array will be implicitly recorded when the
+ // pointer fields of this map are visited.
+ DescriptorArray descriptors = map->synchronized_instance_descriptors();
+ MarkDescriptorArrayBlack(descriptors);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors) {
+ // It is possible that the concurrent marker observes the
+ // number_of_own_descriptors out of sync with the descriptors. In that
+ // case the marking write barrier for the descriptor array will ensure
+ // that all required descriptors are marked. The concurrent marker
+ // just should avoid crashing in that case. That's why we need the
+ // std::min<int>() below.
+ VisitDescriptors(descriptors,
+ std::min<int>(number_of_own_descriptors,
+ descriptors->number_of_descriptors()));
+ }
+ // Mark the pointer fields of the Map. Since the transitions array has
+ // been marked already, it is fine that one of these fields contains a
+ // pointer to it.
+ }
+ Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
+ return size;
}
- int VisitTransitionArray(Map* map, TransitionArray* array) {
+ void VisitDescriptors(DescriptorArray descriptor_array,
+ int number_of_own_descriptors) {
+ int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
+ int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
+ mark_compact_epoch_, new_marked);
+ if (old_marked < new_marked) {
+ VisitPointers(
+ descriptor_array,
+ MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
+ }
+ }
+
+ int VisitDescriptorArray(Map map, DescriptorArray array) {
+ if (!ShouldVisit(array)) return 0;
+ VisitMapPointer(array, array->map_slot());
+ int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
+ VisitPointers(array, array->GetFirstPointerSlot(),
+ array->GetDescriptorSlot(0));
+ VisitDescriptors(array, array->number_of_descriptors());
+ return size;
+ }
+
+ int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
@@ -313,31 +463,31 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
+ int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
}
- int VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
for (int i = 0; i < table->Capacity(); i++) {
- Object** key_slot =
+ ObjectSlot key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
- HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ HeapObject key = HeapObject::cast(table->KeyAt(i));
MarkCompactCollector::RecordSlot(table, key_slot, key);
- Object** value_slot =
+ ObjectSlot value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
- Object* value_obj = table->ValueAt(i);
+ Object value_obj = table->ValueAt(i);
if (value_obj->IsHeapObject()) {
- HeapObject* value = HeapObject::cast(value_obj);
+ HeapObject value = HeapObject::cast(value_obj);
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
@@ -355,7 +505,7 @@ class ConcurrentMarkingVisitor final
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
- bool VisitEphemeron(HeapObject* key, HeapObject* value) {
+ bool VisitEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
@@ -369,7 +519,7 @@ class ConcurrentMarkingVisitor final
return false;
}
- void MarkObject(HeapObject* object) {
+ void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
@@ -382,6 +532,14 @@ class ConcurrentMarkingVisitor final
}
}
+ void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
+ marking_state_.WhiteToGrey(descriptors);
+ if (marking_state_.GreyToBlack(descriptors)) {
+ VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
+ descriptors->GetDescriptorSlot(0));
+ }
+ }
+
private:
// Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitor {
@@ -391,40 +549,61 @@ class ConcurrentMarkingVisitor final
slot_snapshot_->clear();
}
- void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) {
- Object* object = reinterpret_cast<Object*>(
- base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
+ Object object = p.Relaxed_Load();
slot_snapshot_->add(p, object);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
// This should never happen, because we don't use snapshotting for objects
// which contain weak references.
UNREACHABLE();
}
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ // This should never happen, because snapshotting is performed only on
+ // JSObjects (and derived classes).
+ UNREACHABLE();
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ // This should never happen, because snapshotting is performed only on
+ // JSObjects (and derived classes).
+ UNREACHABLE();
+ }
+
+ void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ DCHECK(host->IsJSWeakCell() || host->IsJSWeakRef());
+ }
+
private:
SlotSnapshot* slot_snapshot_;
};
template <typename T>
- int VisitJSObjectSubclass(Map* map, T* object) {
- int size = T::BodyDescriptor::SizeOf(map, object);
+ int VisitJSObjectSubclassFast(Map map, T object) {
+ DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
+ using TBodyDescriptor = typename T::FastBodyDescriptor;
+ return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
+ }
+
+ template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
+ int VisitJSObjectSubclass(Map map, T object) {
+ int size = TBodyDescriptor::SizeOf(map, object);
int used_size = map->UsedInstanceSize();
DCHECK_LE(used_size, size);
DCHECK_GE(used_size, T::kHeaderSize);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
+ used_size, size);
}
template <typename T>
- int VisitEmbedderTracingSubclass(Map* map, T* object) {
+ int VisitEmbedderTracingSubclass(Map map, T object) {
DCHECK(object->IsApiWrapper());
int size = VisitJSObjectSubclass(map, object);
if (size && embedder_tracing_enabled_) {
@@ -436,10 +615,10 @@ class ConcurrentMarkingVisitor final
}
template <typename T>
- int VisitLeftTrimmableArray(Map* map, T* object) {
+ int VisitLeftTrimmableArray(Map map, T object) {
// The synchronized_length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
- Object* length = object->unchecked_synchronized_length();
+ Object length = object->unchecked_synchronized_length();
if (!ShouldVisit(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
@@ -451,55 +630,85 @@ class ConcurrentMarkingVisitor final
}
template <typename T>
- const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
+ int VisitFullyWithSnapshot(Map map, T object) {
+ using TBodyDescriptor = typename T::BodyDescriptor;
+ int size = TBodyDescriptor::SizeOf(map, object);
+ return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object, size,
+ size);
+ }
+
+ template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
+ int VisitPartiallyWithSnapshot(Map map, T object, int used_size, int size) {
+ const SlotSnapshot& snapshot =
+ MakeSlotSnapshot<T, TBodyDescriptor>(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ template <typename T, typename TBodyDescriptor>
+ const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object,
- reinterpret_cast<Object**>(object->map_slot()));
- T::BodyDescriptor::IterateBody(map, object, size, &visitor);
+ visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
+ TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
+ void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
+ MarkCompactCollector::RecordRelocSlotInfo info =
+ MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
+ if (info.should_record) {
+ MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
+ if (!data.typed_slots) {
+ data.typed_slots.reset(new TypedSlots());
+ }
+ data.typed_slots->Insert(info.slot_type, info.offset);
+ }
+ }
+
ConcurrentMarking::MarkingWorklist::View shared_;
- ConcurrentMarking::MarkingWorklist::View bailout_;
WeakObjects* weak_objects_;
ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
ConcurrentMarkingState marking_state_;
+ MemoryChunkDataMap* memory_chunk_data_;
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
+ const unsigned mark_compact_epoch_;
+ bool is_forced_gc_;
};
// Strings can change maps due to conversion to thin string or external strings.
-// Use reinterpret cast to avoid data race in slow dchecks.
+// Use unchecked cast to avoid data race in slow dchecks.
template <>
-ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<ConsString*>(object);
+ConsString ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return ConsString::unchecked_cast(object);
}
template <>
-SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SlicedString*>(object);
+SlicedString ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return SlicedString::unchecked_cast(object);
}
template <>
-ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<ThinString*>(object);
+ThinString ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return ThinString::unchecked_cast(object);
}
template <>
-SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SeqOneByteString*>(object);
+SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return SeqOneByteString::unchecked_cast(object);
}
template <>
-SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SeqTwoByteString*>(object);
+SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return SeqTwoByteString::unchecked_cast(object);
}
// Fixed array can become a free space during left trimming.
template <>
-FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<FixedArray*>(object);
+FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return FixedArray::unchecked_cast(object);
}
class ConcurrentMarking::Task : public CancelableTask {
@@ -526,19 +735,17 @@ class ConcurrentMarking::Task : public CancelableTask {
};
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
- MarkingWorklist* bailout,
MarkingWorklist* on_hold,
WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects)
: heap_(heap),
shared_(shared),
- bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects),
embedder_objects_(embedder_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
- CHECK(!FLAG_concurrent_marking);
+ CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
}
@@ -548,8 +755,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(
- shared_, bailout_, &task_state->live_bytes, weak_objects_,
- embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
+ shared_, &task_state->memory_chunk_data, weak_objects_, embedder_objects_,
+ task_id, heap_->local_embedder_heap_tracer()->InUse(),
+ task_state->mark_compact_epoch, task_state->is_forced_gc);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@@ -577,19 +785,20 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
- HeapObject* object;
+ HeapObject object;
if (!shared_->Pop(task_id, &object)) {
done = true;
break;
}
objects_processed++;
- Address new_space_top = heap_->new_space()->original_top();
- Address new_space_limit = heap_->new_space()->original_limit();
+ // The order of the two loads is important.
+ Address new_space_top = heap_->new_space()->original_top_acquire();
+ Address new_space_limit = heap_->new_space()->original_limit_relaxed();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
on_hold_->Push(task_id, object);
} else {
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
}
}
@@ -614,7 +823,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
shared_->FlushToGlobal(task_id);
- bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
embedder_objects_->FlushToGlobal(task_id);
@@ -624,6 +832,11 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->next_ephemerons.FlushToGlobal(task_id);
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
+ weak_objects_->js_weak_refs.FlushToGlobal(task_id);
+ weak_objects_->js_weak_cells.FlushToGlobal(task_id);
+ weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
+ weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
+ weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
@@ -632,7 +845,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
{
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
is_pending_[task_id] = false;
--pending_task_count_;
pending_condition_.NotifyAll();
@@ -646,9 +859,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
- if (!FLAG_concurrent_marking) return;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
static const int num_cores =
@@ -673,6 +886,9 @@ void ConcurrentMarking::ScheduleTasks() {
"Scheduling concurrent marking task %d\n", i);
}
task_state_[i].preemption_request = false;
+ task_state_[i].mark_compact_epoch =
+ heap_->mark_compact_collector()->epoch();
+ task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
is_pending_[i] = true;
++pending_task_count_;
auto task =
@@ -685,9 +901,10 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ if (heap_->IsTearingDown()) return;
{
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
if (!shared_->IsGlobalPoolEmpty() ||
@@ -698,8 +915,8 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
bool ConcurrentMarking::Stop(StopRequest stop_request) {
- if (!FLAG_concurrent_marking) return false;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ == 0) return false;
@@ -709,7 +926,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
for (int i = 1; i <= task_count_; i++) {
if (is_pending_[i]) {
if (task_manager->TryAbort(cancelable_id_[i]) ==
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
is_pending_[i] = false;
--pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
@@ -730,32 +947,40 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
return pending_task_count_ == 0;
}
-void ConcurrentMarking::FlushLiveBytes(
+void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
for (int i = 1; i <= task_count_; i++) {
- LiveBytesMap& live_bytes = task_state_[i].live_bytes;
- for (auto pair : live_bytes) {
+ MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
+ for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
// Pages with zero live bytes might be already unmapped.
- if (pair.second != 0) {
- marking_state->IncrementLiveBytes(pair.first, pair.second);
+ MemoryChunk* memory_chunk = pair.first;
+ MemoryChunkData& data = pair.second;
+ if (data.live_bytes) {
+ marking_state->IncrementLiveBytes(memory_chunk, data.live_bytes);
+ }
+ if (data.typed_slots) {
+ RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
+ std::move(data.typed_slots));
}
}
- live_bytes.clear();
+ memory_chunk_data.clear();
task_state_[i].marked_bytes = 0;
}
total_marked_bytes_ = 0;
}
-void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
+void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
for (int i = 1; i <= task_count_; i++) {
- if (task_state_[i].live_bytes.count(chunk)) {
- task_state_[i].live_bytes[chunk] = 0;
+ auto it = task_state_[i].memory_chunk_data.find(chunk);
+ if (it != task_state_[i].memory_chunk_data.end()) {
+ it->second.live_bytes = 0;
+ it->second.typed_slots.reset();
}
}
}
@@ -772,8 +997,9 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking),
- resume_on_exit_(concurrent_marking_->Stop(
- ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ resume_on_exit_(FLAG_concurrent_marking &&
+ concurrent_marking_->Stop(
+ ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 34de02fea1..9b6b5f4a75 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -11,6 +11,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
+#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/utils.h"
@@ -24,8 +25,13 @@ class Isolate;
class MajorNonAtomicMarkingState;
struct WeakObjects;
-using LiveBytesMap =
- std::unordered_map<MemoryChunk*, intptr_t, MemoryChunk::Hasher>;
+struct MemoryChunkData {
+ intptr_t live_bytes;
+ std::unique_ptr<TypedSlots> typed_slots;
+};
+
+using MemoryChunkDataMap =
+ std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;
class ConcurrentMarking {
public:
@@ -57,12 +63,11 @@ class ConcurrentMarking {
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7;
- using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
- using EmbedderTracingWorklist = Worklist<HeapObject*, 16 /* segment size */>;
+ using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
+ using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
- MarkingWorklist* bailout, MarkingWorklist* on_hold,
- WeakObjects* weak_objects,
+ MarkingWorklist* on_hold, WeakObjects* weak_objects,
EmbedderTracingWorklist* embedder_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
@@ -75,11 +80,11 @@ class ConcurrentMarking {
bool Stop(StopRequest stop_request);
void RescheduleTasksIfNeeded();
- // Flushes the local live bytes into the given marking state.
- void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
+ // Flushes memory chunk data using the given marking state.
+ void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
// scavenge and is going to be re-used.
- void ClearLiveness(MemoryChunk* chunk);
+ void ClearMemoryChunkData(MemoryChunk* chunk);
int TaskCount() { return task_count_; }
@@ -98,16 +103,16 @@ class ConcurrentMarking {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
std::atomic<bool> preemption_request;
-
- LiveBytesMap live_bytes;
+ MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0;
+ unsigned mark_compact_epoch;
+ bool is_forced_gc;
char cache_line_padding[64];
};
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklist* const shared_;
- MarkingWorklist* const bailout_;
MarkingWorklist* const on_hold_;
WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 198cdd4b1a..0ba84d8798 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -5,22 +5,31 @@
#include "src/heap/embedder-tracing.h"
#include "src/base/logging.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
namespace v8 {
namespace internal {
+void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+
+ remote_tracer_ = tracer;
+ if (remote_tracer_)
+ remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
+}
+
void LocalEmbedderHeapTracer::TracePrologue() {
if (!InUse()) return;
- CHECK(cached_wrappers_to_trace_.empty());
num_v8_marking_worklist_was_empty_ = 0;
+ embedder_worklist_empty_ = false;
remote_tracer_->TracePrologue();
}
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
- CHECK(cached_wrappers_to_trace_.empty());
remote_tracer_->TraceEpilogue();
}
@@ -36,37 +45,58 @@ void LocalEmbedderHeapTracer::EnterFinalPause() {
bool LocalEmbedderHeapTracer::Trace(double deadline) {
if (!InUse()) return true;
- DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
return remote_tracer_->AdvanceTracing(deadline);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
- return (InUse()) ? cached_wrappers_to_trace_.empty() &&
- remote_tracer_->IsTracingDone()
- : true;
+ return !InUse() || remote_tracer_->IsTracingDone();
}
-void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
+void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
if (!InUse()) return;
- if (cached_wrappers_to_trace_.empty()) {
- return;
- }
+ embedder_stack_state_ = stack_state;
+}
- remote_tracer_->RegisterV8References(cached_wrappers_to_trace_);
- cached_wrappers_to_trace_.clear();
+LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
+ LocalEmbedderHeapTracer* tracer)
+ : tracer_(tracer) {
+ wrapper_cache_.reserve(kWrapperCacheSize);
}
-bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
- const size_t kTooManyWrappers = 16000;
- return cached_wrappers_to_trace_.size() > kTooManyWrappers;
+LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
+ if (!wrapper_cache_.empty()) {
+ tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ }
}
-void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- if (!InUse()) return;
+void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
+ JSObject js_object) {
+ DCHECK(js_object->IsApiWrapper());
+ if (js_object->GetEmbedderFieldCount() < 2) return;
- embedder_stack_state_ = stack_state;
+ void* pointer0;
+ void* pointer1;
+ if (EmbedderDataSlot(js_object, 0).ToAlignedPointer(&pointer0) && pointer0 &&
+ EmbedderDataSlot(js_object, 1).ToAlignedPointer(&pointer1)) {
+ wrapper_cache_.push_back({pointer0, pointer1});
+ }
+ FlushWrapperCacheIfFull();
+}
+
+void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
+ if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
+ tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ wrapper_cache_.clear();
+ wrapper_cache_.reserve(kWrapperCacheSize);
+ }
+}
+
+void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting(
+ WrapperInfo info) {
+ wrapper_cache_.push_back(info);
+ FlushWrapperCacheIfFull();
}
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 2588200db9..09242042dd 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -13,74 +13,98 @@ namespace v8 {
namespace internal {
class Heap;
+class JSObject;
class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
typedef std::pair<void*, void*> WrapperInfo;
+ typedef std::vector<WrapperInfo> WrapperCache;
- explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
+ class V8_EXPORT_PRIVATE ProcessingScope {
+ public:
+ explicit ProcessingScope(LocalEmbedderHeapTracer* tracer);
+ ~ProcessingScope();
- ~LocalEmbedderHeapTracer() {
- if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
- }
+ void TracePossibleWrapper(JSObject js_object);
- EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ void AddWrapperInfoForTesting(WrapperInfo info);
- void SetRemoteTracer(EmbedderHeapTracer* tracer) {
- if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+ private:
+ static constexpr size_t kWrapperCacheSize = 1000;
+
+ void FlushWrapperCacheIfFull();
- remote_tracer_ = tracer;
- if (remote_tracer_)
- remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
+ LocalEmbedderHeapTracer* const tracer_;
+ WrapperCache wrapper_cache_;
+ };
+
+ explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
+
+ ~LocalEmbedderHeapTracer() {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
}
bool InUse() const { return remote_tracer_ != nullptr; }
+ EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ void SetRemoteTracer(EmbedderHeapTracer* tracer);
void TracePrologue();
void TraceEpilogue();
void EnterFinalPause();
bool Trace(double deadline);
bool IsRemoteTracingDone();
- size_t NumberOfCachedWrappersToTrace() {
- return cached_wrappers_to_trace_.size();
- }
- void AddWrapperToTrace(WrapperInfo entry) {
- cached_wrappers_to_trace_.push_back(entry);
- }
- void ClearCachedWrappersToTrace() { cached_wrappers_to_trace_.clear(); }
- void RegisterWrappersWithRemoteTracer();
-
- // In order to avoid running out of memory we force tracing wrappers if there
- // are too many of them.
- bool RequiresImmediateWrapperProcessing();
-
void NotifyV8MarkingWorklistWasEmpty() {
num_v8_marking_worklist_was_empty_++;
}
+
bool ShouldFinalizeIncrementalMarking() {
static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
- IsRemoteTracingDone() ||
+ (IsRemoteTracingDone() && embedder_worklist_empty_) ||
num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
void SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state);
- private:
- typedef std::vector<WrapperInfo> WrapperCache;
+ void SetEmbedderWorklistEmpty(bool is_empty) {
+ embedder_worklist_empty_ = is_empty;
+ }
+ private:
Isolate* const isolate_;
- WrapperCache cached_wrappers_to_trace_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+
size_t num_v8_marking_worklist_was_empty_ = 0;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
EmbedderHeapTracer::kUnknown;
+ // Indicates whether the embedder worklist was observed empty on the main
+ // thread. This is opportunistic as concurrent marking tasks may hold local
+ // segments of potential embedder fields to move to the main thread.
+ bool embedder_worklist_empty_ = false;
friend class EmbedderStackStateScope;
};
+class V8_EXPORT_PRIVATE EmbedderStackStateScope final {
+ public:
+ EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(local_tracer),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ local_tracer_->embedder_stack_state_ = stack_state;
+ }
+
+ ~EmbedderStackStateScope() {
+ local_tracer_->embedder_stack_state_ = old_stack_state_;
+ }
+
+ private:
+ LocalEmbedderHeapTracer* const local_tracer_;
+ const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index eb1661aaee..f707cd242d 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -11,15 +11,16 @@
// Do not include anything from src/heap here!
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/oddball.h"
#include "src/string-hasher.h"
namespace v8 {
namespace internal {
-#define ROOT_ACCESSOR(type, name, CamelName) \
- Handle<type> Factory::name() { \
- return Handle<type>(bit_cast<type**>( \
- &isolate()->heap()->roots_[RootIndex::k##CamelName])); \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Handle<Type> Factory::name() { \
+ return Handle<Type>(&isolate()->roots_table()[RootIndex::k##CamelName]); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 9535eb4b88..f82d8937c3 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -13,25 +13,34 @@
#include "src/builtins/constants-table-builder.h"
#include "src/compiler.h"
#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/macro-assembler.h"
+#include "src/log.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/unicode-cache.h"
#include "src/unicode-decoder.h"
@@ -61,7 +70,7 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
Handle<ByteArray> source_position_table,
Handle<DeoptimizationData> deopt_data,
Handle<ByteArray> reloc_info,
- Handle<CodeDataContainer> data_container, uint32_t stub_key,
+ Handle<CodeDataContainer> data_container,
bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
@@ -80,9 +89,9 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
code->set_handler_table_offset(handler_table_offset);
code->set_code_data_container(*data_container);
code->set_deoptimization_data(*deopt_data);
- code->set_stub_key(stub_key);
code->set_source_position_table(*source_position_table);
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_constant_pool_offset(desc.constant_pool_offset());
+ code->set_code_comments_offset(desc.code_comments_offset());
code->set_builtin_index(builtin_index);
// Allow self references to created code object by patching the handle to
@@ -94,11 +103,12 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
auto builder = heap->isolate()->builtins_constants_table_builder();
if (builder != nullptr) builder->PatchSelfReference(self_ref, code);
}
- *(self_ref.location()) = *code;
+ *(self_ref.location()) = code->ptr();
}
// Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
+ // The generated code can contain embedded objects (typically from handles)
+ // in a pointer-to-tagged-value format (i.e. with indirection like a handle)
// that are dereferenced during the copy to point directly to the actual heap
// objects. These pointers can include references to the code object itself,
// through the self_reference parameter.
@@ -113,38 +123,37 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
} // namespace
-HeapObject* Factory::AllocateRawWithImmortalMap(int size,
- PretenureFlag pretenure,
- Map* map,
- AllocationAlignment alignment) {
- HeapObject* result = isolate()->heap()->AllocateRawWithRetryOrFail(
+HeapObject Factory::AllocateRawWithImmortalMap(int size,
+ PretenureFlag pretenure, Map map,
+ AllocationAlignment alignment) {
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
size, Heap::SelectSpace(pretenure), alignment);
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
-HeapObject* Factory::AllocateRawWithAllocationSite(
+HeapObject Factory::AllocateRawWithAllocationSite(
Handle<Map> map, PretenureFlag pretenure,
Handle<AllocationSite> allocation_site) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject* result =
+ HeapObject result =
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
WriteBarrierMode write_barrier_mode =
space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(*map, write_barrier_mode);
if (!allocation_site.is_null()) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
+ AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
+ Object(result->ptr() + map->instance_size()));
InitializeAllocationMemento(alloc_memento, *allocation_site);
}
return result;
}
-void Factory::InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site) {
+void Factory::InitializeAllocationMemento(AllocationMemento memento,
+ AllocationSite allocation_site) {
memento->set_map_after_allocation(*allocation_memento_map(),
SKIP_WRITE_BARRIER);
memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
@@ -153,38 +162,37 @@ void Factory::InitializeAllocationMemento(AllocationMemento* memento,
}
}
-HeapObject* Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
+HeapObject Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject* result =
+ HeapObject result =
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
return result;
}
-HeapObject* Factory::AllocateRawFixedArray(int length,
- PretenureFlag pretenure) {
+HeapObject Factory::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
return AllocateRawArray(FixedArray::SizeFor(length), pretenure);
}
-HeapObject* Factory::AllocateRawWeakArrayList(int capacity,
- PretenureFlag pretenure) {
+HeapObject Factory::AllocateRawWeakArrayList(int capacity,
+ PretenureFlag pretenure) {
if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), pretenure);
}
-HeapObject* Factory::New(Handle<Map> map, PretenureFlag pretenure) {
+HeapObject Factory::New(Handle<Map> map, PretenureFlag pretenure) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject* result =
+ HeapObject result =
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode =
@@ -197,9 +205,9 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
AllocationSpace space) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
- HeapObject* result = heap->AllocateRawWithRetryOrFail(size, space, alignment);
+ HeapObject result = heap->AllocateRawWithRetryOrFail(size, space, alignment);
#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
DCHECK(chunk->owner()->identity() == space);
#endif
heap->CreateFillerObjectAt(result->address(), size, ClearRecordedSlots::kNo);
@@ -209,7 +217,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
- result->set_prototype_users(*empty_weak_array_list());
+ result->set_prototype_users(Smi::kZero);
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
result->set_module_namespace(*undefined_value());
@@ -281,24 +289,24 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length,
PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject* result = AllocateRawFixedArray(length, pretenure);
+ HeapObject result = AllocateRawFixedArray(length, pretenure);
result->set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
array->initialize_length(length);
- MemsetPointer(array->data_start(), *undefined_value(), length);
+ MemsetTagged(array->data_start(), *undefined_value(), length);
return array;
}
Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
- int length, Object* filler,
+ int length, Object filler,
PretenureFlag pretenure) {
- HeapObject* result = AllocateRawFixedArray(length, pretenure);
- DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
- Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ HeapObject result = AllocateRawFixedArray(length, pretenure);
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
+ Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(), filler, length);
+ MemsetTagged(array->data_start(), filler, length);
return array;
}
@@ -324,15 +332,14 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
// Zero-length case must be handled outside.
DCHECK_LT(0, length);
- HeapObject* result =
+ HeapObject result =
AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
- Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(),
- HeapObjectReference::Strong(*undefined_value()), length);
+ MemsetTagged(ObjectSlot(array->data_start()), *undefined_value(), length);
return Handle<T>::cast(array);
}
@@ -340,10 +347,6 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
RootIndex, int, PretenureFlag);
-template Handle<DescriptorArray>
-Factory::NewWeakFixedArrayWithMap<DescriptorArray>(RootIndex, int,
- PretenureFlag);
-
Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -355,14 +358,13 @@ Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_weak_fixed_array();
- HeapObject* result =
+ HeapObject result =
AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(),
- HeapObjectReference::Strong(*undefined_value()), length);
+ MemsetTagged(ObjectSlot(array->data_start()), *undefined_value(), length);
return array;
}
@@ -375,17 +377,17 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
AllocationSpace space = Heap::SelectSpace(pretenure);
Heap* heap = isolate()->heap();
AllocationResult allocation = heap->AllocateRaw(size, space);
- HeapObject* result = nullptr;
+ HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
- length);
+ MemsetTagged(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
+ length);
return array;
}
@@ -415,7 +417,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, pretenure, *feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
@@ -427,11 +429,29 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector->set_profiler_ticks(0);
vector->set_deopt_count(0);
// TODO(leszeks): Initialize based on the feedback metadata.
- MemsetPointer(vector->slots_start(),
- MaybeObject::FromObject(*undefined_value()), length);
+ MemsetTagged(ObjectSlot(vector->slots_start()), *undefined_value(), length);
return vector;
}
+Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(
+ int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ int size = EmbedderDataArray::SizeFor(length);
+
+ HeapObject result =
+ AllocateRawWithImmortalMap(size, pretenure, *embedder_data_array_map());
+ Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
+ array->set_length(length);
+
+ if (length > 0) {
+ ObjectSlot start(array->slots_start());
+ ObjectSlot end(array->slots_end());
+ size_t slot_count = end - start;
+ MemsetTagged(start, *undefined_value(), slot_count);
+ }
+ return array;
+}
+
Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
DCHECK_GE(boilerplate, 0);
@@ -469,14 +489,13 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
PretenureFlag pretenure) {
- DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
- if (length > FixedDoubleArray::kMaxLength) {
+ if (length < 0 || length > FixedDoubleArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = FixedDoubleArray::SizeFor(length);
- Map* map = *fixed_double_array_map();
- HeapObject* result =
+ Map map = *fixed_double_array_map();
+ HeapObject result =
AllocateRawWithImmortalMap(size, pretenure, map, kDoubleAligned);
Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
array->set_length(length);
@@ -497,7 +516,7 @@ Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(int slot_count,
PretenureFlag tenure) {
DCHECK_LE(0, slot_count);
int size = FeedbackMetadata::SizeFor(slot_count);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, tenure, *feedback_metadata_map());
Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
data->set_slot_count(slot_count);
@@ -520,34 +539,47 @@ Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
return Handle<FrameArray>::cast(result);
}
-Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
- int capacity, PretenureFlag pretenure) {
- DCHECK_LE(0, capacity);
- CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
- DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
-
- int size = SmallOrderedHashSet::SizeFor(capacity);
- Map* map = *small_ordered_hash_set_map();
- HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
- Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
- isolate());
+template <typename T>
+Handle<T> Factory::AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
+ PretenureFlag pretenure) {
+ // Capacity must be a power of two, since we depend on being able
+ // to divide and multiple by 2 (kLoadFactor) to derive capacity
+ // from number of buckets. If we decide to change kLoadFactor
+ // to something other than 2, capacity should be stored as another
+ // field of this object.
+ DCHECK_EQ(T::kLoadFactor, 2);
+ capacity = base::bits::RoundUpToPowerOfTwo32(Max(T::kMinCapacity, capacity));
+ capacity = Min(capacity, T::kMaxCapacity);
+
+ DCHECK_LT(0, capacity);
+ DCHECK_EQ(0, capacity % T::kLoadFactor);
+
+ int size = T::SizeFor(capacity);
+ HeapObject result = AllocateRawWithImmortalMap(size, pretenure, *map);
+ Handle<T> table(T::cast(result), isolate());
table->Initialize(isolate(), capacity);
return table;
}
+Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
+ int capacity, PretenureFlag pretenure) {
+ return AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
+ small_ordered_hash_set_map(), capacity, pretenure);
+}
+
Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
int capacity, PretenureFlag pretenure) {
- DCHECK_LE(0, capacity);
- CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
- DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
-
- int size = SmallOrderedHashMap::SizeFor(capacity);
- Map* map = *small_ordered_hash_map_map();
- HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
- Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),
- isolate());
- table->Initialize(isolate(), capacity);
- return table;
+ return AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
+ small_ordered_hash_map_map(), capacity, pretenure);
+}
+
+Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
+ int capacity, PretenureFlag pretenure) {
+ Handle<SmallOrderedNameDictionary> dict =
+ AllocateSmallOrderedHashTable<SmallOrderedNameDictionary>(
+ small_ordered_name_dictionary_map(), capacity, pretenure);
+ dict->SetHash(PropertyArray::kNoHashSentinel);
+ return dict;
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
@@ -558,6 +590,11 @@ Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
}
+Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
+ return OrderedNameDictionary::Allocate(isolate(),
+ OrderedNameDictionary::kMinCapacity);
+}
+
Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
@@ -595,6 +632,7 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
int length = string.length();
if (length == 0) return empty_string();
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
@@ -605,13 +643,14 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
DisallowHeapAllocation no_gc;
// Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(*result)->GetChars(), string.start(),
+ CopyChars(SeqOneByteString::cast(*result)->GetChars(no_gc), string.start(),
length);
return result;
}
MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
// Check for ASCII first since this is the common case.
const char* ascii_data = string.start();
int length = string.length();
@@ -638,7 +677,8 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
// Copy ASCII portion.
- uint16_t* data = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ uint16_t* data = result->GetChars(no_gc);
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
@@ -651,23 +691,31 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
PretenureFlag pretenure) {
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars() + begin);
- int non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
+ int non_ascii_start;
+ int utf16_length = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars(no_gc) + begin);
+ non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ if (non_ascii_start < length) {
+ // Non-ASCII and we need to decode.
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
+ utf16_length = static_cast<int>(decoder->Utf16Length());
+ }
+ }
+
if (non_ascii_start >= length) {
// If the string is ASCII, we can just make a substring.
// TODO(v8): the pretenure flag is ignored in this case.
return NewSubString(str, begin, begin + length);
}
- // Non-ASCII and we need to decode.
- auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
- Access<UnicodeCache::Utf8Decoder> decoder(
- isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(non_ascii);
-
- int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
// Allocate string.
@@ -678,12 +726,14 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
// Update pointer references, since the original string may have moved after
// allocation.
- ascii_data = reinterpret_cast<const char*>(str->GetChars() + begin);
- non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
+ DisallowHeapAllocation no_gc;
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars(no_gc) + begin);
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
// Copy ASCII portion.
- uint16_t* data = result->GetChars();
+ uint16_t* data = result->GetChars(no_gc);
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
@@ -696,19 +746,22 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
int length,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
if (length == 0) return empty_string();
if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
NewRawOneByteString(length, pretenure), String);
- CopyChars(result->GetChars(), string, length);
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string, length);
return result;
} else {
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
NewRawTwoByteString(length, pretenure), String);
- CopyChars(result->GetChars(), string, length);
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string, length);
return result;
}
}
@@ -775,11 +828,11 @@ Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
// The canonical empty_string is the only zero-length string we allow.
DCHECK_IMPLIES(
length == 0,
- isolate()->heap()->roots_[RootIndex::kempty_string] == nullptr);
+ isolate()->roots_table()[RootIndex::kempty_string] == kNullAddress);
- Map* map = *one_byte_internalized_string_map();
+ Map map = *one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(length);
- HeapObject* result = AllocateRawWithImmortalMap(
+ HeapObject result = AllocateRawWithImmortalMap(
size,
isolate()->heap()->CanAllocateInReadOnlySpace() ? TENURED_READ_ONLY
: TENURED,
@@ -796,16 +849,17 @@ Handle<String> Factory::AllocateTwoByteInternalizedString(
CHECK_GE(String::kMaxLength, str.length());
DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
- Map* map = *internalized_string_map();
+ Map map = *internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
- HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ HeapObject result = AllocateRawWithImmortalMap(size, TENURED, map);
Handle<SeqTwoByteString> answer(SeqTwoByteString::cast(result), isolate());
answer->set_length(str.length());
answer->set_hash_field(hash_field);
DCHECK_EQ(size, answer->Size());
+ DisallowHeapAllocation no_gc;
// Fill in the characters.
- MemCopy(answer->GetChars(), str.start(), str.length() * kUC16Size);
+ MemCopy(answer->GetChars(no_gc), str.start(), str.length() * kUC16Size);
return answer;
}
@@ -818,7 +872,7 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
// Compute map and object size.
int size;
- Map* map;
+ Map map;
if (is_one_byte) {
map = *one_byte_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
@@ -827,7 +881,7 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
size = SeqTwoByteString::SizeFor(chars);
}
- HeapObject* result = AllocateRawWithImmortalMap(
+ HeapObject result = AllocateRawWithImmortalMap(
size,
isolate()->heap()->CanAllocateInReadOnlySpace() ? TENURED_READ_ONLY
: TENURED,
@@ -836,11 +890,14 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
answer->set_length(chars);
answer->set_hash_field(hash_field);
DCHECK_EQ(size, answer->Size());
+ DisallowHeapAllocation no_gc;
if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(), chars);
+ WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(no_gc),
+ chars);
} else {
- WriteTwoByteData(t, SeqTwoByteString::cast(*answer)->GetChars(), chars);
+ WriteTwoByteData(t, SeqTwoByteString::cast(*answer)->GetChars(no_gc),
+ chars);
}
return answer;
}
@@ -851,7 +908,8 @@ Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
if (IsOneByte(str, chars)) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
- MemCopy(result->GetChars(), str.start(), str.length());
+ DisallowHeapAllocation no_allocation;
+ MemCopy(result->GetChars(no_allocation), str.start(), str.length());
return result;
}
return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
@@ -861,7 +919,8 @@ Handle<String> Factory::NewOneByteInternalizedString(Vector<const uint8_t> str,
uint32_t hash_field) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
- MemCopy(result->GetChars(), str.start(), str.length());
+ DisallowHeapAllocation no_allocation;
+ MemCopy(result->GetChars(no_allocation), str.start(), str.length());
return result;
}
@@ -870,7 +929,9 @@ Handle<String> Factory::NewOneByteInternalizedSubString(
uint32_t hash_field) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(length, hash_field);
- MemCopy(result->GetChars(), string->GetChars() + offset, length);
+ DisallowHeapAllocation no_allocation;
+ MemCopy(result->GetChars(no_allocation),
+ string->GetChars(no_allocation) + offset, length);
return result;
}
@@ -950,7 +1011,7 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
int size = SeqOneByteString::SizeFor(length);
DCHECK_GE(SeqOneByteString::kMaxSize, size);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, pretenure, *one_byte_string_map());
Handle<SeqOneByteString> string(SeqOneByteString::cast(result), isolate());
string->set_length(length);
@@ -968,7 +1029,7 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
int size = SeqTwoByteString::SizeFor(length);
DCHECK_GE(SeqTwoByteString::kMaxSize, size);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, pretenure, *string_map());
Handle<SeqTwoByteString> string(SeqTwoByteString::cast(result), isolate());
string->set_length(length);
@@ -981,7 +1042,7 @@ Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
if (code <= String::kMaxOneByteCharCodeU) {
{
DisallowHeapAllocation no_allocation;
- Object* value = single_character_string_cache()->get(code);
+ Object value = single_character_string_cache()->get(code);
if (value != *undefined_value()) {
return handle(String::cast(value), isolate());
}
@@ -1027,14 +1088,16 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
1)); // because of this.
Handle<SeqOneByteString> str =
isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
- uint8_t* dest = str->GetChars();
+ DisallowHeapAllocation no_allocation;
+ uint8_t* dest = str->GetChars(no_allocation);
dest[0] = static_cast<uint8_t>(c1);
dest[1] = static_cast<uint8_t>(c2);
return str;
} else {
Handle<SeqTwoByteString> str =
isolate->factory()->NewRawTwoByteString(2).ToHandleChecked();
- uc16* dest = str->GetChars();
+ DisallowHeapAllocation no_allocation;
+ uc16* dest = str->GetChars(no_allocation);
dest[0] = c1;
dest[1] = c2;
return str;
@@ -1046,7 +1109,7 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
Handle<String> first,
Handle<String> second) {
DisallowHeapAllocation pointer_stays_valid;
- SinkChar* sink = result->GetChars();
+ SinkChar* sink = result->GetChars(pointer_stays_valid);
String::WriteToFlat(*first, sink, 0, first->length());
String::WriteToFlat(*second, sink + first->length(), 0, second->length());
return result;
@@ -1106,17 +1169,17 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
Handle<SeqOneByteString> result =
NewRawOneByteString(length).ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* dest = result->GetChars();
+ uint8_t* dest = result->GetChars(no_gc);
// Copy left part.
const uint8_t* src =
left->IsExternalString()
? Handle<ExternalOneByteString>::cast(left)->GetChars()
- : Handle<SeqOneByteString>::cast(left)->GetChars();
+ : Handle<SeqOneByteString>::cast(left)->GetChars(no_gc);
for (int i = 0; i < left_length; i++) *dest++ = src[i];
// Copy right part.
src = right->IsExternalString()
? Handle<ExternalOneByteString>::cast(right)->GetChars()
- : Handle<SeqOneByteString>::cast(right)->GetChars();
+ : Handle<SeqOneByteString>::cast(right)->GetChars(no_gc);
for (int i = 0; i < right_length; i++) *dest++ = src[i];
return result;
}
@@ -1163,7 +1226,8 @@ Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
Handle<SeqTwoByteString> str =
isolate()->factory()->NewRawTwoByteString(2).ToHandleChecked();
- uc16* dest = str->GetChars();
+ DisallowHeapAllocation no_allocation;
+ uc16* dest = str->GetChars(no_allocation);
dest[0] = lead;
dest[1] = trail;
return str;
@@ -1196,15 +1260,15 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result =
NewRawOneByteString(length).ToHandleChecked();
- uint8_t* dest = result->GetChars();
DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars(no_gc);
String::WriteToFlat(*str, dest, begin, end);
return result;
} else {
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length).ToHandleChecked();
- uc16* dest = result->GetChars();
DisallowHeapAllocation no_gc;
+ uc16* dest = result->GetChars(no_gc);
String::WriteToFlat(*str, dest, begin, end);
return result;
}
@@ -1325,7 +1389,7 @@ Handle<Symbol> Factory::NewSymbol(PretenureFlag flag) {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(Symbol::kSize, flag, *symbol_map());
// Generate a random hash value.
@@ -1347,27 +1411,58 @@ Handle<Symbol> Factory::NewPrivateSymbol(PretenureFlag flag) {
return symbol;
}
-Handle<Symbol> Factory::NewPrivateFieldSymbol() {
+Handle<Symbol> Factory::NewPrivateNameSymbol(Handle<String> name) {
Handle<Symbol> symbol = NewSymbol();
- symbol->set_is_private_field();
+ symbol->set_is_private_name();
+ symbol->set_name(*name);
return symbol;
}
+Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
+ int variadic_part_length,
+ PretenureFlag pretenure) {
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
+ DCHECK_LE(Context::kTodoHeaderSize, size);
+ DCHECK(IsAligned(size, kTaggedSize));
+ DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
+ DCHECK_LE(Context::SizeFor(variadic_part_length), size);
+
+ Map map = Map::cast(isolate()->root(map_root_index));
+ HeapObject result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<Context> context(Context::cast(result), isolate());
+ context->set_length(variadic_part_length);
+ DCHECK_EQ(context->SizeFromMap(map), size);
+ if (size > Context::kTodoHeaderSize) {
+ ObjectSlot start = context->RawField(Context::kTodoHeaderSize);
+ ObjectSlot end = context->RawField(size);
+ size_t slot_count = end - start;
+ MemsetTagged(start, *undefined_value(), slot_count);
+ }
+ return context;
+}
+
Handle<NativeContext> Factory::NewNativeContext() {
- Handle<NativeContext> context = NewFixedArrayWithMap<NativeContext>(
- RootIndex::kNativeContextMap, Context::NATIVE_CONTEXT_SLOTS, TENURED);
+ Handle<NativeContext> context = Handle<NativeContext>::cast(
+ NewContext(RootIndex::kNativeContextMap, NativeContext::kSize,
+ NativeContext::NATIVE_CONTEXT_SLOTS, TENURED));
+ context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
+ context->set_previous(Context::unchecked_cast(Smi::zero()));
+ context->set_extension(*the_hole_value());
context->set_native_context(*context);
- context->set_errors_thrown(Smi::kZero);
- context->set_math_random_index(Smi::kZero);
+ context->set_errors_thrown(Smi::zero());
+ context->set_math_random_index(Smi::zero());
context->set_serialized_objects(*empty_fixed_array());
+ context->set_microtask_queue(nullptr);
return context;
}
Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kScriptContextMap, scope_info->ContextLength(), TENURED);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kScriptContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1388,8 +1483,10 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kModuleContextMap, scope_info->ContextLength(), TENURED);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kModuleContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*module);
@@ -1400,8 +1497,6 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
Handle<ScopeInfo> scope_info) {
- int length = scope_info->ContextLength();
- DCHECK_LE(Context::MIN_CONTEXT_SLOTS, length);
RootIndex mapRootIndex;
switch (scope_info->scope_type()) {
case EVAL_SCOPE:
@@ -1413,7 +1508,10 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
default:
UNREACHABLE();
}
- Handle<Context> context = NewFixedArrayWithMap<Context>(mapRootIndex, length);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context =
+ NewContext(mapRootIndex, Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1424,9 +1522,13 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<Object> thrown_object) {
+ DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kCatchContextMap, Context::MIN_CONTEXT_SLOTS + 1);
+ // TODO(ishell): Take the details from CatchContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 1;
+ Handle<Context> context = NewContext(RootIndex::kCatchContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1445,8 +1547,11 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<HeapObject> ext = extension.is_null()
? Handle<HeapObject>::cast(the_hole_value())
: Handle<HeapObject>::cast(extension);
- Handle<Context> c = NewFixedArrayWithMap<Context>(
- RootIndex::kDebugEvaluateContextMap, Context::MIN_CONTEXT_SLOTS + 2);
+ // TODO(ishell): Take the details from DebugEvaluateContextContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 2;
+ Handle<Context> c = NewContext(RootIndex::kDebugEvaluateContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
c->set_scope_info(*scope_info);
c->set_previous(*previous);
c->set_native_context(previous->native_context());
@@ -1459,8 +1564,12 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension) {
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kWithContextMap, Context::MIN_CONTEXT_SLOTS);
+ DCHECK_EQ(scope_info->scope_type(), WITH_SCOPE);
+ // TODO(ishell): Take the details from WithContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS;
+ Handle<Context> context = NewContext(RootIndex::kWithContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*extension);
@@ -1471,8 +1580,10 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kBlockContextMap, scope_info->ContextLength());
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kBlockContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1481,18 +1592,20 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
}
Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
- int length) {
- DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
- Handle<Context> context =
- NewFixedArrayWithMap<Context>(RootIndex::kFunctionContextMap, length);
+ int variadic_part_length) {
+ DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
+ Handle<Context> context = NewContext(RootIndex::kFunctionContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
+ context->set_previous(*native_context);
context->set_extension(*the_hole_value());
context->set_native_context(*native_context);
return context;
}
Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
- Map* map;
+ Map map;
switch (type) {
#define MAKE_CASE(TYPE, Name, name) \
case TYPE: \
@@ -1504,7 +1617,7 @@ Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
UNREACHABLE();
}
int size = map->instance_size();
- HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ HeapObject result = AllocateRawWithImmortalMap(size, pretenure, map);
Handle<Struct> str(Struct::cast(result), isolate());
str->InitializeBody(size);
return str;
@@ -1622,21 +1735,20 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<MicrotaskQueue> Factory::NewMicrotaskQueue() {
- // MicrotaskQueue should be TENURED, as it outlives Context, and is mostly
- // as long-living as Context is.
- Handle<MicrotaskQueue> microtask_queue =
- Handle<MicrotaskQueue>::cast(NewStruct(MICROTASK_QUEUE_TYPE, TENURED));
- microtask_queue->set_queue(*empty_fixed_array());
- microtask_queue->set_pending_microtask_count(0);
- return microtask_queue;
+Handle<WeakFactoryCleanupJobTask> Factory::NewWeakFactoryCleanupJobTask(
+ Handle<JSWeakFactory> weak_factory) {
+ Handle<WeakFactoryCleanupJobTask> microtask =
+ Handle<WeakFactoryCleanupJobTask>::cast(
+ NewStruct(WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE));
+ microtask->set_factory(*weak_factory);
+ return microtask;
}
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *foreign_map();
- HeapObject* result =
+ Map map = *foreign_map();
+ HeapObject result =
AllocateRawWithImmortalMap(map->instance_size(), pretenure, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
foreign->set_foreign_address(addr);
@@ -1649,7 +1761,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = ByteArray::SizeFor(length);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, pretenure, *byte_array_map());
Handle<ByteArray> array(ByteArray::cast(result), isolate());
array->set_length(length);
@@ -1668,7 +1780,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
DCHECK(!Heap::InNewSpace(*constant_pool));
int size = BytecodeArray::SizeFor(length);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
instance->set_length(length);
@@ -1695,7 +1807,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
// TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
int size = FixedTypedArrayBase::kHeaderSize;
- HeapObject* result = AllocateRawWithImmortalMap(
+ HeapObject result = AllocateRawWithImmortalMap(
size, pretenure,
ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type));
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
@@ -1714,11 +1826,11 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
size_t size =
OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
- Map* map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
+ Map map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
AllocationAlignment alignment =
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
- HeapObject* object = AllocateRawWithImmortalMap(static_cast<int>(size),
- pretenure, map, alignment);
+ HeapObject object = AllocateRawWithImmortalMap(static_cast<int>(size),
+ pretenure, map, alignment);
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(object),
isolate());
@@ -1735,7 +1847,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(Cell::kSize, TENURED, *cell_map());
Handle<Cell> cell(Cell::cast(result), isolate());
cell->set_value(*value);
@@ -1744,8 +1856,8 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *no_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
return cell;
@@ -1753,8 +1865,8 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *one_closure_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
return cell;
@@ -1762,28 +1874,56 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *many_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
return cell;
}
+Handle<FeedbackCell> Factory::NewNoFeedbackCell() {
+ AllowDeferredHandleDereference convert_to_cell;
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *no_feedback_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ // Set the value to undefined. We wouldn't allocate feedback vectors with
+ // NoFeedbackCell map type.
+ cell->set_value(*undefined_value());
+ return cell;
+}
+
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
PretenureFlag pretenure) {
DCHECK(name->IsUniqueName());
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result = AllocateRawWithImmortalMap(
- PropertyCell::kSize, pretenure, *global_property_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(PropertyCell::kSize, pretenure,
+ *global_property_cell_map());
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_property_details(PropertyDetails(Smi::zero()));
cell->set_name(*name);
cell->set_value(*the_hole_value());
return cell;
}
+Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
+ int slack,
+ PretenureFlag pretenure) {
+ int number_of_all_descriptors = number_of_descriptors + slack;
+ // Zero-length case must be handled outside.
+ DCHECK_LT(0, number_of_all_descriptors);
+ int size = DescriptorArray::SizeFor(number_of_all_descriptors);
+ DCHECK_LT(size, kMaxRegularHeapObjectSize);
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject obj = isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
+ obj->set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
+ DescriptorArray array = DescriptorArray::cast(obj);
+ array->Initialize(*empty_enum_cache(), *undefined_value(),
+ number_of_descriptors, slack);
+ return Handle<DescriptorArray>(array, isolate());
+}
+
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
@@ -1826,7 +1966,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
!Map::CanHaveFastTransitionableElementsKind(type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
- HeapObject* result =
+ HeapObject result =
isolate()->heap()->AllocateRawWithRetryOrFail(Map::kSize, MAP_SPACE);
result->set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
@@ -1834,16 +1974,16 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
isolate());
}
-Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
+Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
map->set_instance_type(type);
map->set_prototype(*null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
if (map->IsJSObjectMap()) {
DCHECK(!isolate()->heap()->InReadOnlySpace(map));
- map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
+ map->SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
inobject_properties);
DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
map->set_prototype_validity_cell(*invalid_prototype_validity_cell());
@@ -1854,9 +1994,9 @@ Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
}
map->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map->SetInObjectUnusedPropertyFields(inobject_properties);
- map->set_instance_descriptors(*empty_descriptor_array());
+ map->SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
@@ -1903,7 +2043,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
int object_size = map->instance_size();
int adjusted_object_size =
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
- HeapObject* raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
+ HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
adjusted_object_size, NEW_SPACE);
SLOW_DCHECK(Heap::InNewSpace(raw_clone));
@@ -1913,16 +2053,16 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
Handle<JSObject> clone(JSObject::cast(raw_clone), isolate());
if (!site.is_null()) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(raw_clone) + object_size);
+ AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
+ Object(raw_clone->ptr() + object_size));
InitializeAllocationMemento(alloc_memento, *site);
}
SLOW_DCHECK(clone->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ FixedArrayBase elements = source->elements();
// Update elements if necessary.
if (elements->length() > 0) {
- FixedArrayBase* elem = nullptr;
+ FixedArrayBase elem;
if (elements->map() == *fixed_cow_array_map()) {
elem = elements;
} else if (source->HasDoubleElements()) {
@@ -1936,7 +2076,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// Update properties if necessary.
if (source->HasFastProperties()) {
- PropertyArray* properties = source->property_array();
+ PropertyArray properties = source->property_array();
if (properties->length() > 0) {
// TODO(gsathya): Do not copy hash code.
Handle<PropertyArray> prop = CopyArrayWithMap(
@@ -1954,12 +2094,12 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
namespace {
template <typename T>
-void initialize_length(T* array, int length) {
+void initialize_length(Handle<T> array, int length) {
array->set_length(length);
}
template <>
-void initialize_length<PropertyArray>(PropertyArray* array, int length) {
+void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
array->initialize_length(length);
}
@@ -1968,24 +2108,23 @@ void initialize_length<PropertyArray>(PropertyArray* array, int length) {
template <typename T>
Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
int len = src->length();
- HeapObject* obj = AllocateRawFixedArray(len, NOT_TENURED);
+ HeapObject obj = AllocateRawFixedArray(len, NOT_TENURED);
obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
+ Handle<T> result(T::cast(obj), isolate());
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
if (mode == SKIP_WRITE_BARRIER) {
// Eliminate the write barrier if possible.
- Heap::CopyBlock(obj->address() + kPointerSize,
- src->address() + kPointerSize,
- T::SizeFor(len) - kPointerSize);
+ Heap::CopyBlock(obj->address() + kTaggedSize, src->address() + kTaggedSize,
+ T::SizeFor(len) - kTaggedSize);
} else {
// Slow case: Just copy the content one-by-one.
initialize_length(result, len);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
}
- return Handle<T>(result, isolate());
+ return result;
}
template <typename T>
@@ -1995,18 +2134,18 @@ Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
DCHECK_LE(grow_by, kMaxInt - src->length());
int old_len = src->length();
int new_len = old_len + grow_by;
- HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
+ Handle<T> result(T::cast(obj), isolate());
initialize_length(result, new_len);
// Copy the content.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
- MemsetPointer(result->data_start() + old_len, *undefined_value(), grow_by);
- return Handle<T>(result, isolate());
+ MemsetTagged(result->data_start() + old_len, *undefined_value(), grow_by);
+ return result;
}
Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
@@ -2027,20 +2166,19 @@ Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
int old_len = src->length();
int new_len = old_len + grow_by;
DCHECK_GE(new_len, old_len);
- HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
DCHECK_EQ(old_len, src->length());
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- WeakFixedArray* result = WeakFixedArray::cast(obj);
+ WeakFixedArray result = WeakFixedArray::cast(obj);
result->set_length(new_len);
// Copy the content.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->Set(i, src->Get(i), mode);
- HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
- MemsetPointer(result->data_start() + old_len, undefined_reference, grow_by);
+ MemsetTagged(ObjectSlot(result->RawFieldOfElementAt(old_len)),
+ ReadOnlyRoots(isolate()).undefined_value(), grow_by);
return Handle<WeakFixedArray>(result, isolate());
}
@@ -2049,10 +2187,10 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
int old_capacity = src->capacity();
int new_capacity = old_capacity + grow_by;
DCHECK_GE(new_capacity, old_capacity);
- HeapObject* obj = AllocateRawWeakArrayList(new_capacity, pretenure);
+ HeapObject obj = AllocateRawWeakArrayList(new_capacity, pretenure);
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- WeakArrayList* result = WeakArrayList::cast(obj);
+ WeakArrayList result = WeakArrayList::cast(obj);
result->set_length(src->length());
result->set_capacity(new_capacity);
@@ -2060,10 +2198,8 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_capacity; i++) result->Set(i, src->Get(i), mode);
- HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
- MemsetPointer(result->data_start() + old_capacity, undefined_reference,
- grow_by);
+ MemsetTagged(ObjectSlot(result->data_start() + old_capacity),
+ ReadOnlyRoots(isolate()).undefined_value(), grow_by);
return Handle<WeakArrayList>(result, isolate());
}
@@ -2079,7 +2215,7 @@ Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
DCHECK_LE(new_len, array->length());
if (new_len == 0) return empty_fixed_array();
- HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
obj->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> result(FixedArray::cast(obj), isolate());
result->set_length(new_len);
@@ -2125,7 +2261,7 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FeedbackVector> Factory::CopyFeedbackVector(
Handle<FeedbackVector> array) {
int len = array->length();
- HeapObject* obj = AllocateRawWithImmortalMap(
+ HeapObject obj = AllocateRawWithImmortalMap(
FeedbackVector::SizeFor(len), NOT_TENURED, *feedback_vector_map());
Handle<FeedbackVector> result(FeedbackVector::cast(obj), isolate());
@@ -2134,9 +2270,9 @@ Handle<FeedbackVector> Factory::CopyFeedbackVector(
// Eliminate the write barrier if possible.
if (mode == SKIP_WRITE_BARRIER) {
- Heap::CopyBlock(result->address() + kPointerSize,
- result->address() + kPointerSize,
- FeedbackVector::SizeFor(len) - kPointerSize);
+ Heap::CopyBlock(result->address() + kTaggedSize,
+ result->address() + kTaggedSize,
+ FeedbackVector::SizeFor(len) - kTaggedSize);
} else {
// Slow case: Just copy the content one-by-one.
result->set_shared_function_info(array->shared_function_info());
@@ -2176,17 +2312,17 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
Handle<HeapNumber> Factory::NewHeapNumber(PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *heap_number_map();
- HeapObject* result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
- map, kDoubleUnaligned);
+ Map map = *heap_number_map();
+ HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
+ map, kDoubleUnaligned);
return handle(HeapNumber::cast(result), isolate());
}
Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *mutable_heap_number_map();
- HeapObject* result = AllocateRawWithImmortalMap(
+ Map map = *mutable_heap_number_map();
+ HeapObject result = AllocateRawWithImmortalMap(
MutableHeapNumber::kSize, pretenure, map, kDoubleUnaligned);
return handle(MutableHeapNumber::cast(result), isolate());
}
@@ -2196,20 +2332,22 @@ Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
if (length < 0 || length > BigInt::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid BigInt length");
}
- HeapObject* result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
- pretenure, *bigint_map());
- return handle(FreshlyAllocatedBigInt::cast(result), isolate());
+ HeapObject result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
+ pretenure, *bigint_map());
+ FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
+ bigint->clear_padding();
+ return handle(bigint, isolate());
}
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
- MessageTemplate::Template template_index,
+ MessageTemplate template_index,
Handle<Object> arg0, Handle<Object> arg1,
Handle<Object> arg2) {
HandleScope scope(isolate());
if (isolate()->bootstrapper()->IsActive()) {
// During bootstrapping we cannot construct error objects.
return scope.CloseAndEscape(NewStringFromAsciiChecked(
- MessageTemplate::TemplateString(template_index)));
+ MessageFormatter::TemplateString(template_index)));
}
if (arg0.is_null()) arg0 = undefined_value();
@@ -2260,7 +2398,7 @@ Handle<Object> Factory::NewInvalidStringLengthError() {
}
#define DEFINE_ERROR(NAME, name) \
- Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> Factory::New##NAME(MessageTemplate template_index, \
Handle<Object> arg0, Handle<Object> arg1, \
Handle<Object> arg2) { \
return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
@@ -2288,7 +2426,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_shared(*info);
function->set_code(info->GetCode());
function->set_context(*context);
- function->set_feedback_cell(*many_closures_cell());
+ function->set_raw_feedback_cell(*many_closures_cell());
int header_size;
if (map->has_prototype_slot()) {
header_size = JSFunction::kSizeWithPrototype;
@@ -2471,7 +2609,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
} else if (feedback_cell->map() == *one_closure_cell_map()) {
feedback_cell->set_map(*many_closures_cell_map());
} else {
- DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
+ DCHECK(feedback_cell->map() == *no_feedback_cell_map() ||
+ feedback_cell->map() == *many_closures_cell_map());
}
// Check that the optimized code in the feedback cell wasn't marked for
@@ -2481,7 +2620,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
->EvictOptimizedCodeMarkedForDeoptimization(
*info, "new function from shared function info");
}
- result->set_feedback_cell(*feedback_cell);
+ result->set_raw_feedback_cell(*feedback_cell);
// Give compiler a chance to pre-initialize.
Compiler::PostInstantiation(result, pretenure);
@@ -2499,54 +2638,49 @@ Handle<ModuleInfo> Factory::NewModuleInfo() {
ModuleInfo::kLength, TENURED);
}
-Handle<PreParsedScopeData> Factory::NewPreParsedScopeData(int length) {
- int size = PreParsedScopeData::SizeFor(length);
- Handle<PreParsedScopeData> result(
- PreParsedScopeData::cast(AllocateRawWithImmortalMap(
- size, TENURED, *pre_parsed_scope_data_map())),
- isolate());
- result->set_scope_data(PodArray<uint8_t>::cast(*empty_byte_array()));
- result->set_length(length);
- MemsetPointer(result->child_data_start(), *null_value(), length);
-
+Handle<PreparseData> Factory::NewPreparseData(int data_length,
+ int children_length) {
+ int size = PreparseData::SizeFor(data_length, children_length);
+ Handle<PreparseData> result(PreparseData::cast(AllocateRawWithImmortalMap(
+ size, TENURED, *preparse_data_map())),
+ isolate());
+ result->set_data_length(data_length);
+ result->set_children_length(children_length);
+ MemsetTagged(result->inner_data_start(), *null_value(), children_length);
result->clear_padding();
return result;
}
-Handle<UncompiledDataWithoutPreParsedScope>
-Factory::NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
- int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id) {
- Handle<UncompiledDataWithoutPreParsedScope> result(
- UncompiledDataWithoutPreParsedScope::cast(
- New(uncompiled_data_without_pre_parsed_scope_map(), TENURED)),
+Handle<UncompiledDataWithoutPreparseData>
+Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position,
+ int32_t function_literal_id) {
+ Handle<UncompiledDataWithoutPreparseData> result(
+ UncompiledDataWithoutPreparseData::cast(
+ New(uncompiled_data_without_preparse_data_map(), TENURED)),
isolate());
- result->set_inferred_name(*inferred_name);
- result->set_start_position(start_position);
- result->set_end_position(end_position);
- result->set_function_literal_id(function_literal_id);
- result->clear_padding();
+ UncompiledData::Initialize(*result, *inferred_name, start_position,
+ end_position, function_literal_id);
return result;
}
-Handle<UncompiledDataWithPreParsedScope>
-Factory::NewUncompiledDataWithPreParsedScope(
- Handle<String> inferred_name, int32_t start_position, int32_t end_position,
- int32_t function_literal_id,
- Handle<PreParsedScopeData> pre_parsed_scope_data) {
- Handle<UncompiledDataWithPreParsedScope> result(
- UncompiledDataWithPreParsedScope::cast(
- New(uncompiled_data_with_pre_parsed_scope_map(), TENURED)),
+Handle<UncompiledDataWithPreparseData>
+Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position,
+ int32_t function_literal_id,
+ Handle<PreparseData> preparse_data) {
+ Handle<UncompiledDataWithPreparseData> result(
+ UncompiledDataWithPreparseData::cast(
+ New(uncompiled_data_with_preparse_data_map(), TENURED)),
isolate());
- result->set_inferred_name(*inferred_name);
- result->set_start_position(start_position);
- result->set_end_position(end_position);
- result->set_function_literal_id(function_literal_id);
- result->set_pre_parsed_scope_data(*pre_parsed_scope_data);
- result->clear_padding();
+ UncompiledDataWithPreparseData::Initialize(
+ *result, *inferred_name, start_position, end_position,
+ function_literal_id, *preparse_data);
+
return result;
}
@@ -2571,10 +2705,12 @@ MaybeHandle<Code> Factory::TryNewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- uint32_t stub_key, bool is_turbofanned, int stack_slots,
- int safepoint_table_offset, int handler_table_offset) {
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<ByteArray> reloc_info = NewByteArray(
+ desc.reloc_size,
+ Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2589,11 +2725,11 @@ MaybeHandle<Code> Factory::TryNewCode(
Heap* heap = isolate()->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject* result =
+ HeapObject result =
heap->AllocateRawWithLightRetry(object_size, CODE_SPACE);
// Return an empty handle if we cannot allocate the code object.
- if (!result) return MaybeHandle<Code>();
+ if (result.is_null()) return MaybeHandle<Code>();
if (movability == kImmovable) {
result = heap->EnsureImmovableCode(result, object_size);
@@ -2608,11 +2744,16 @@ MaybeHandle<Code> Factory::TryNewCode(
InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
- data_container, stub_key, is_turbofanned, stack_slots,
+ data_container, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
+
+ // Flush the instruction cache before changing the permissions.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ code->FlushICache();
}
- // Flush the instruction cache after changing the permissions.
- code->FlushICache();
return code;
}
@@ -2621,10 +2762,12 @@ Handle<Code> Factory::NewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- uint32_t stub_key, bool is_turbofanned, int stack_slots,
- int safepoint_table_offset, int handler_table_offset) {
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<ByteArray> reloc_info = NewByteArray(
+ desc.reloc_size,
+ Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2640,9 +2783,8 @@ Handle<Code> Factory::NewCode(
Heap* heap = isolate()->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject* result =
+ HeapObject result =
heap->AllocateRawWithRetryOrFail(object_size, CODE_SPACE);
-
if (movability == kImmovable) {
result = heap->EnsureImmovableCode(result, object_size);
}
@@ -2656,34 +2798,22 @@ Handle<Code> Factory::NewCode(
InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
source_position_table, deopt_data, reloc_info,
- data_container, stub_key, is_turbofanned, stack_slots,
+ data_container, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
+
+ // Flush the instruction cache before changing the permissions.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ code->FlushICache();
}
- // Flush the instruction cache after changing the permissions.
- code->FlushICache();
return code;
}
-Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
- DCHECK(IsAligned(static_cast<intptr_t>(size), kCodeAlignment));
- Heap* heap = isolate()->heap();
- HeapObject* result = heap->AllocateRawWithRetryOrFail(size, CODE_SPACE);
- // Unprotect the memory chunk of the object if it was not unprotected
- // already.
- heap->UnprotectAndRegisterMemoryChunk(result);
- heap->ZapCodeObject(result->address(), size);
- result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
- DCHECK(IsAligned(result->address(), kCodeAlignment));
- DCHECK_IMPLIES(
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(result->address()));
- return handle(Code::cast(result), isolate());
-}
-
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
- CHECK(isolate()->serializer_enabled());
CHECK_NOT_NULL(isolate()->embedded_blob());
CHECK_NE(0, isolate()->embedded_blob_size());
CHECK(Builtins::IsIsolateIndependentBuiltin(*code));
@@ -2694,18 +2824,40 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
// The trampoline code object must inherit specific flags from the original
// builtin (e.g. the safepoint-table offset). We set them manually here.
- const bool set_is_off_heap_trampoline = true;
- const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
- result->initialize_flags(code->kind(), code->has_unwinding_info(),
- code->is_turbofanned(), stack_slots,
- set_is_off_heap_trampoline);
- result->set_builtin_index(code->builtin_index());
- result->set_handler_table_offset(code->handler_table_offset());
- result->code_data_container()->set_kind_specific_flags(
- code->code_data_container()->kind_specific_flags());
- result->set_constant_pool_offset(code->constant_pool_offset());
- if (code->has_safepoint_info()) {
- result->set_safepoint_table_offset(code->safepoint_table_offset());
+ {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*result);
+ CodePageMemoryModificationScope code_allocation(chunk);
+
+ const bool set_is_off_heap_trampoline = true;
+ const int stack_slots =
+ code->has_safepoint_info() ? code->stack_slots() : 0;
+ result->initialize_flags(code->kind(), code->has_unwinding_info(),
+ code->is_turbofanned(), stack_slots,
+ set_is_off_heap_trampoline);
+ result->set_builtin_index(code->builtin_index());
+ result->set_handler_table_offset(code->handler_table_offset());
+ result->code_data_container()->set_kind_specific_flags(
+ code->code_data_container()->kind_specific_flags());
+ result->set_constant_pool_offset(code->constant_pool_offset());
+ if (code->has_safepoint_info()) {
+ result->set_safepoint_table_offset(code->safepoint_table_offset());
+ }
+ result->set_code_comments_offset(code->code_comments_offset());
+
+ // Replace the newly generated trampoline's RelocInfo ByteArray with the
+ // canonical one stored in the roots to avoid duplicating it for every
+ // single builtin.
+ ByteArray canonical_reloc_info =
+ ReadOnlyRoots(isolate()).off_heap_trampoline_relocation_info();
+#ifdef DEBUG
+ // Verify that the contents are the same.
+ ByteArray reloc_info = result->relocation_info();
+ DCHECK_EQ(reloc_info->length(), canonical_reloc_info->length());
+ for (int i = 0; i < reloc_info->length(); ++i) {
+ DCHECK_EQ(reloc_info->get(i), canonical_reloc_info->get(i));
+ }
+#endif
+ result->set_relocation_info(canonical_reloc_info);
}
return result;
@@ -2716,24 +2868,28 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
Heap* heap = isolate()->heap();
- int obj_size = code->Size();
- HeapObject* result = heap->AllocateRawWithRetryOrFail(obj_size, CODE_SPACE);
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = result->address();
- Heap::CopyBlock(new_addr, old_addr, obj_size);
- Handle<Code> new_code(Code::cast(result), isolate());
-
- // Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(*data_container);
-
- new_code->Relocate(new_addr - old_addr);
- // We have to iterate over the object and process its pointers when black
- // allocation is on.
- heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
- // Record all references to embedded objects in the new code object.
- WriteBarrierForCode(*new_code);
+ Handle<Code> new_code;
+ {
+ int obj_size = code->Size();
+ CodePageCollectionMemoryModificationScope code_allocation(heap);
+ HeapObject result = heap->AllocateRawWithRetryOrFail(obj_size, CODE_SPACE);
+
+ // Copy code object.
+ Address old_addr = code->address();
+ Address new_addr = result->address();
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ new_code = handle(Code::cast(result), isolate());
+
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(*data_container);
+
+ new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
+ // Record all references to embedded objects in the new code object.
+ WriteBarrierForCode(*new_code);
+ }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
@@ -2748,7 +2904,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<BytecodeArray> Factory::CopyBytecodeArray(
Handle<BytecodeArray> bytecode_array) {
int size = BytecodeArray::SizeFor(bytecode_array->length());
- HeapObject* result =
+ HeapObject result =
AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
Handle<BytecodeArray> copy(BytecodeArray::cast(result), isolate());
@@ -2835,6 +2991,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
Handle<Map> new_map = Map::CopyDropDescriptors(isolate(), map);
new_map->set_may_have_interesting_symbols(true);
new_map->set_is_dictionary_map(true);
+ LOG(isolate(), MapDetails(*new_map));
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary);
@@ -2875,7 +3032,7 @@ void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
bool in_progress = map->IsInobjectSlackTrackingInProgress();
- Object* filler;
+ Object filler;
if (in_progress) {
filler = *one_pointer_filler_map();
} else {
@@ -2898,7 +3055,7 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
// AllocateGlobalObject to be properly initialized.
DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- HeapObject* obj =
+ HeapObject obj =
AllocateRawWithAllocationSite(map, pretenure, allocation_site);
Handle<JSObject> js_obj(JSObject::cast(obj), isolate());
@@ -2920,12 +3077,32 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
return js_object;
}
+Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
+ Handle<Object> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements, PretenureFlag pretenure) {
+ Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
+ if (object_map->prototype() != *prototype) {
+ object_map = Map::TransitionToPrototype(isolate(), object_map, prototype);
+ }
+ DCHECK(object_map->is_dictionary_map());
+ Handle<JSObject> object = NewJSObjectFromMap(object_map, pretenure);
+ object->set_raw_properties_or_hash(*properties);
+ if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
+ DCHECK(elements->IsNumberDictionary());
+ object_map =
+ JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
+ JSObject::MigrateToMap(object, object_map);
+ object->set_elements(*elements);
+ }
+ return object;
+}
+
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- NativeContext* native_context = isolate()->raw_native_context();
- Map* map = native_context->GetInitialJSArrayMap(elements_kind);
- if (map == nullptr) {
- JSFunction* array_function = native_context->array_function();
+ NativeContext native_context = isolate()->raw_native_context();
+ Map map = native_context->GetInitialJSArrayMap(elements_kind);
+ if (map.is_null()) {
+ JSFunction array_function = native_context->array_function();
map = array_function->initial_map();
}
return Handle<JSArray>::cast(
@@ -2989,7 +3166,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
- NativeContext* native_context = isolate()->raw_native_context();
+ NativeContext native_context = isolate()->raw_native_context();
Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
isolate());
@@ -3139,8 +3316,8 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
UNREACHABLE();
}
-JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
- NativeContext* native_context = isolate->context()->native_context();
+JSFunction GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
+ NativeContext native_context = isolate->context()->native_context();
switch (type) {
#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
case kExternal##Type##Array: \
@@ -3152,8 +3329,8 @@ JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
UNREACHABLE();
}
-JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
- NativeContext* native_context = isolate->context()->native_context();
+JSFunction GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
+ NativeContext native_context = isolate->context()->native_context();
switch (elements_kind) {
#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
@@ -3352,6 +3529,7 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
// Maintain invariant expected from any JSGlobalProxy.
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
+ LOG(isolate(), MapDetails(*map));
return Handle<JSGlobalProxy>::cast(NewJSObjectFromMap(map, NOT_TENURED));
}
@@ -3400,9 +3578,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
- MessageTemplate::Template message, Handle<Object> argument,
- int start_position, int end_position, Handle<Script> script,
- Handle<Object> stack_frames) {
+ MessageTemplate message, Handle<Object> argument, int start_position,
+ int end_position, Handle<Script> script, Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj(
JSMessageObject::cast(New(map, NOT_TENURED)), isolate());
@@ -3454,7 +3631,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// Set pointer fields.
share->set_name_or_scope_info(
- has_shared_name ? *shared_name
+ has_shared_name ? Object::cast(*shared_name)
: SharedFunctionInfo::kNoSharedNameSentinel);
Handle<HeapObject> function_data;
if (maybe_function_data.ToHandle(&function_data)) {
@@ -3465,7 +3642,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
!Code::cast(*function_data)->is_builtin());
share->set_function_data(*function_data);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
- DCHECK_NE(maybe_builtin_index, Builtins::kDeserializeLazy);
share->set_builtin_id(maybe_builtin_index);
} else {
share->set_builtin_id(Builtins::kIllegal);
@@ -3494,6 +3670,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
+ // For lite mode disable optimization.
+ if (FLAG_lite_mode) {
+ share->set_flags(
+ SharedFunctionInfo::DisabledOptimizationReasonBits::encode(
+ BailoutReason::kNeverOptimize));
+ }
share->CalculateConstructAsBuiltin();
share->set_kind(kind);
@@ -3512,7 +3694,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
}
namespace {
-inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi* number) {
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
int mask = (cache->length() >> 1) - 1;
return number->value() & mask;
}
@@ -3545,9 +3727,9 @@ Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
return js_string;
}
-Handle<Object> Factory::NumberToStringCacheGet(Object* number, int hash) {
+Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
DisallowHeapAllocation no_gc;
- Object* key = number_string_cache()->get(hash * 2);
+ Object key = number_string_cache()->get(hash * 2);
if (key == number || (key->IsHeapNumber() && number->IsHeapNumber() &&
key->Number() == number->Number())) {
return Handle<String>(
@@ -3581,7 +3763,7 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
return NumberToStringCacheSet(number, hash, string, check_cache);
}
-Handle<String> Factory::NumberToString(Smi* number, bool check_cache) {
+Handle<String> Factory::NumberToString(Smi number, bool check_cache) {
int hash = 0;
if (check_cache) {
hash = NumberToStringCacheHash(number_string_cache(), number);
@@ -3611,6 +3793,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_script(shared->script_or_debug_info());
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
+ debug_info->set_debug_bytecode_array(ReadOnlyRoots(heap).undefined_value());
debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
// Link debug info to function.
@@ -3729,10 +3912,10 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
} else {
// Check to see whether there is a matching element in the cache.
Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
- MaybeObject* result = cache->Get(cache_index);
- HeapObject* heap_object;
+ MaybeObject result = cache->Get(cache_index);
+ HeapObject heap_object;
if (result->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
DCHECK(!map->is_dictionary_map());
return handle(map, isolate());
}
@@ -3803,7 +3986,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Type type, Handle<String> source,
JSRegExp::Flags flags, int capture_count) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
- Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
+ Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
@@ -3868,7 +4051,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
Handle<Map> map = NewMap(
- JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kTaggedSize,
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
@@ -3895,7 +4078,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
length_string(), function_length_accessor(), roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
@@ -3904,23 +4087,23 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
Handle<Name> name = isolate()->factory()->name_string();
Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
} else {
// Add name accessor.
Descriptor d = Descriptor::AccessorConstant(
name_string(), function_name_accessor(), roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // Add arguments accessor.
Descriptor d = Descriptor::AccessorConstant(
arguments_string(), function_arguments_accessor(), ro_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{ // Add caller accessor.
Descriptor d = Descriptor::AccessorConstant(
caller_string(), function_caller_accessor(), ro_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype accessor.
@@ -3929,9 +4112,10 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
: ro_attribs;
Descriptor d = Descriptor::AccessorConstant(
prototype_string(), function_prototype_accessor(), attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3947,7 +4131,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
inobject_properties_count;
Handle<Map> map = NewMap(
- JS_FUNCTION_TYPE, header_size + inobject_properties_count * kPointerSize,
+ JS_FUNCTION_TYPE, header_size + inobject_properties_count * kTaggedSize,
TERMINAL_FAST_ELEMENTS_KIND, inobject_properties_count);
map->set_has_prototype_slot(has_prototype);
map->set_is_constructor(has_prototype);
@@ -3971,7 +4155,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
length_string(), function_length_accessor(), roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
@@ -3980,13 +4164,13 @@ Handle<Map> Factory::CreateStrictFunctionMap(
Handle<Name> name = isolate()->factory()->name_string();
Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
} else {
// Add name accessor.
Descriptor d = Descriptor::AccessorConstant(
name_string(), function_name_accessor(), roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
STATIC_ASSERT(JSFunction::kMaybeHomeObjectDescriptorIndex == 2);
@@ -3995,7 +4179,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
Handle<Name> name = isolate()->factory()->home_object_symbol();
Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -4005,9 +4189,10 @@ Handle<Map> Factory::CreateStrictFunctionMap(
: ro_attribs;
Descriptor d = Descriptor::AccessorConstant(
prototype_string(), function_prototype_accessor(), attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ LOG(isolate(), MapDetails(*map));
return map;
}
@@ -4033,15 +4218,16 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
length_string(), function_length_accessor(), roc_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
{
// Add prototype accessor.
Descriptor d = Descriptor::AccessorConstant(
prototype_string(), function_prototype_accessor(), ro_attribs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate(), &d);
}
+ LOG(isolate(), MapDetails(*map));
return map;
}
@@ -4068,7 +4254,7 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
: side_effect_call_handler_info_map();
Handle<CallHandlerInfo> info(CallHandlerInfo::cast(New(map, TENURED)),
isolate());
- Object* undefined_value = ReadOnlyRoots(isolate()).undefined_value();
+ Object undefined_value = ReadOnlyRoots(isolate()).undefined_value();
info->set_callback(undefined_value);
info->set_js_callback(undefined_value);
info->set_data(undefined_value);
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 8c6d32090e..abdca3807a 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -16,7 +16,6 @@
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/hash-table.h"
-#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/ordered-hash-table.h"
@@ -34,12 +33,14 @@ class CallableTask;
class CallbackTask;
class CallHandlerInfo;
class Expression;
+class EmbedderDataArray;
class ArrayBoilerplateDescription;
class CoverageInfo;
class DebugInfo;
class EnumCache;
class FreshlyAllocatedBigInt;
class Isolate;
+class JSDataView;
class JSGeneratorObject;
class JSMap;
class JSMapIterator;
@@ -48,24 +49,27 @@ class JSPromise;
class JSProxy;
class JSSet;
class JSSetIterator;
+class JSTypedArray;
class JSWeakMap;
class LoadHandler;
class ModuleInfo;
class NativeContext;
class NewFunctionArgs;
-class PreParsedScopeData;
+class PreparseData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
class StackFrameInfo;
class StoreHandler;
class TemplateObjectDescription;
-class UncompiledDataWithoutPreParsedScope;
-class UncompiledDataWithPreParsedScope;
+class UncompiledDataWithoutPreparseData;
+class UncompiledDataWithPreparseData;
class WasmExportedFunctionData;
+class WeakFactoryCleanupJobTask;
struct SourceRange;
template <typename T>
class ZoneVector;
+enum class SharedFlag : uint32_t;
enum FunctionMode {
kWithNameBit = 1 << 0,
@@ -152,6 +156,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackVector> NewFeedbackVector(
Handle<SharedFunctionInfo> shared, PretenureFlag pretenure = NOT_TENURED);
+ // Allocates a clean embedder data array with given capacity.
+ Handle<EmbedderDataArray> NewEmbedderDataArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
Handle<ObjectBoilerplateDescription> NewObjectBoilerplateDescription(
@@ -176,6 +184,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
+ Handle<OrderedNameDictionary> NewOrderedNameDictionary();
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
int capacity = SmallOrderedHashSet::kMinCapacity,
@@ -183,6 +192,9 @@ class V8_EXPORT_PRIVATE Factory {
Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
int capacity = SmallOrderedHashMap::kMinCapacity,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<SmallOrderedNameDictionary> NewSmallOrderedNameDictionary(
+ int capacity = SmallOrderedHashMap::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -258,7 +270,7 @@ class V8_EXPORT_PRIVATE Factory {
inline Handle<String> NewStringFromStaticChars(
const char (&str)[N], PretenureFlag pretenure = NOT_TENURED) {
DCHECK(N == StrLength(str) + 1);
- return NewStringFromOneByte(STATIC_CHAR_VECTOR(str), pretenure)
+ return NewStringFromOneByte(StaticCharVector(str), pretenure)
.ToHandleChecked();
}
@@ -360,7 +372,7 @@ class V8_EXPORT_PRIVATE Factory {
// Create a symbol in old or read-only space.
Handle<Symbol> NewSymbol(PretenureFlag pretenure = TENURED);
Handle<Symbol> NewPrivateSymbol(PretenureFlag pretenure = TENURED);
- Handle<Symbol> NewPrivateFieldSymbol();
+ Handle<Symbol> NewPrivateNameSymbol(Handle<String> name);
// Create a global (but otherwise uninitialized) context.
Handle<NativeContext> NewNativeContext();
@@ -439,8 +451,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
Handle<JSReceiver> thenable, Handle<Context> context);
-
- Handle<MicrotaskQueue> NewMicrotaskQueue();
+ Handle<WeakFactoryCleanupJobTask> NewWeakFactoryCleanupJobTask(
+ Handle<JSWeakFactory> weak_factory);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -469,7 +481,11 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewNoFeedbackCell();
+ Handle<DescriptorArray> NewDescriptorArray(
+ int number_of_entries, int slack = 0,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -482,8 +498,8 @@ class V8_EXPORT_PRIVATE Factory {
int inobject_properties = 0);
// Initializes the fields of a newly created Map. Exposed for tests and
// heap setup; other code should just call NewMap which takes care of it.
- Map* InitializeMap(Map* map, InstanceType type, int instance_size,
- ElementsKind elements_kind, int inobject_properties);
+ Map InitializeMap(Map map, InstanceType type, int instance_size,
+ ElementsKind elements_kind, int inobject_properties);
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
@@ -593,6 +609,15 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates and initializes a new JavaScript object with the given
+ // {prototype} and {properties}. The newly created object will be
+ // in dictionary properties mode. The {elements} can either be the
+ // empty fixed array, in which case the resulting object will have
+ // fast elements, or a NumberDictionary, in which case the resulting
+ // object will have dictionary elements.
+ Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements(
+ Handle<Object> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements, PretenureFlag pretenure = NOT_TENURED);
// JS arrays are pretenured when allocated by the parser.
@@ -635,9 +660,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
- Handle<JSArrayBuffer> NewJSArrayBuffer(
- SharedFlag shared = SharedFlag::kNotShared,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArrayBuffer> NewJSArrayBuffer(SharedFlag shared,
+ PretenureFlag pretenure = NOT_TENURED);
static void TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
@@ -726,18 +750,17 @@ class V8_EXPORT_PRIVATE Factory {
Handle<ModuleInfo> NewModuleInfo();
- Handle<PreParsedScopeData> NewPreParsedScopeData(int length);
+ Handle<PreparseData> NewPreparseData(int data_length, int children_length);
- Handle<UncompiledDataWithoutPreParsedScope>
- NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
- int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id);
+ Handle<UncompiledDataWithoutPreparseData>
+ NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position,
+ int32_t function_literal_id);
- Handle<UncompiledDataWithPreParsedScope> NewUncompiledDataWithPreParsedScope(
+ Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
Handle<String> inferred_name, int32_t start_position,
- int32_t end_position, int32_t function_literal_id,
- Handle<PreParsedScopeData>);
+ int32_t end_position, int32_t function_literal_id, Handle<PreparseData>);
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -757,7 +780,7 @@ class V8_EXPORT_PRIVATE Factory {
MaybeHandle<ByteArray>(),
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
- Movability movability = kMovable, uint32_t stub_key = 0,
+ Movability movability = kMovable,
bool is_turbofanned = false, int stack_slots = 0,
int safepoint_table_offset = 0,
int handler_table_offset = 0);
@@ -772,15 +795,9 @@ class V8_EXPORT_PRIVATE Factory {
MaybeHandle<ByteArray>(),
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
- Movability movability = kMovable, uint32_t stub_key = 0,
- bool is_turbofanned = false, int stack_slots = 0,
- int safepoint_table_offset = 0, int handler_table_offset = 0);
-
- // Allocates a new, empty code object for use by builtin deserialization. The
- // given {size} argument specifies the size of the entire code object.
- // Can only be used when code space is unprotected and requires manual
- // initialization by the caller.
- Handle<Code> NewCodeForDeserialization(uint32_t size);
+ Movability movability = kMovable, bool is_turbofanned = false,
+ int stack_slots = 0, int safepoint_table_offset = 0,
+ int handler_table_offset = 0);
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
@@ -800,15 +817,15 @@ class V8_EXPORT_PRIVATE Factory {
inline Handle<Object> NewURIError();
Handle<Object> NewError(Handle<JSFunction> constructor,
- MessageTemplate::Template template_index,
+ MessageTemplate template_index,
Handle<Object> arg0 = Handle<Object>(),
Handle<Object> arg1 = Handle<Object>(),
Handle<Object> arg2 = Handle<Object>());
-#define DECLARE_ERROR(NAME) \
- Handle<Object> New##NAME(MessageTemplate::Template template_index, \
- Handle<Object> arg0 = Handle<Object>(), \
- Handle<Object> arg1 = Handle<Object>(), \
+#define DECLARE_ERROR(NAME) \
+ Handle<Object> New##NAME(MessageTemplate template_index, \
+ Handle<Object> arg0 = Handle<Object>(), \
+ Handle<Object> arg1 = Handle<Object>(), \
Handle<Object> arg2 = Handle<Object>());
DECLARE_ERROR(Error)
DECLARE_ERROR(EvalError)
@@ -822,11 +839,11 @@ class V8_EXPORT_PRIVATE Factory {
#undef DECLARE_ERROR
Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
- Handle<String> NumberToString(Smi* number, bool check_cache = true);
+ Handle<String> NumberToString(Smi number, bool check_cache = true);
inline Handle<String> Uint32ToString(uint32_t value, bool check_cache = true);
-#define ROOT_ACCESSOR(type, name, CamelName) inline Handle<type> name();
+#define ROOT_ACCESSOR(Type, name, CamelName) inline Handle<Type> name();
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -867,12 +884,9 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> CreateClassFunctionMap(Handle<JSFunction> empty_function);
// Allocates a new JSMessageObject object.
- Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
- Handle<Object> argument,
- int start_position,
- int end_position,
- Handle<Script> script,
- Handle<Object> stack_frames);
+ Handle<JSMessageObject> NewJSMessageObject(
+ MessageTemplate message, Handle<Object> argument, int start_position,
+ int end_position, Handle<Script> script, Handle<Object> stack_frames);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -911,11 +925,13 @@ class V8_EXPORT_PRIVATE Factory {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+ Handle<JSPromise> NewJSPromiseWithoutHook(
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
- HeapObject* NewForTest(Handle<Map> map, PretenureFlag pretenure) {
+ HeapObject NewForTest(Handle<Map> map, PretenureFlag pretenure) {
return New(map, pretenure);
}
@@ -928,24 +944,34 @@ class V8_EXPORT_PRIVATE Factory {
return (Isolate*)this; // NOLINT(readability/casting)
}
- HeapObject* AllocateRawWithImmortalMap(
- int size, PretenureFlag pretenure, Map* map,
+ HeapObject AllocateRawWithImmortalMap(
+ int size, PretenureFlag pretenure, Map map,
AllocationAlignment alignment = kWordAligned);
- HeapObject* AllocateRawWithAllocationSite(
+ HeapObject AllocateRawWithAllocationSite(
Handle<Map> map, PretenureFlag pretenure,
Handle<AllocationSite> allocation_site);
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
- HeapObject* AllocateRawArray(int size, PretenureFlag pretenure);
- HeapObject* AllocateRawFixedArray(int length, PretenureFlag pretenure);
- HeapObject* AllocateRawWeakArrayList(int length, PretenureFlag pretenure);
+ HeapObject AllocateRawArray(int size, PretenureFlag pretenure);
+ HeapObject AllocateRawFixedArray(int length, PretenureFlag pretenure);
+ HeapObject AllocateRawWeakArrayList(int length, PretenureFlag pretenure);
Handle<FixedArray> NewFixedArrayWithFiller(RootIndex map_root_index,
- int length, Object* filler,
+ int length, Object filler,
PretenureFlag pretenure);
+ // Allocates new context with given map, sets length and initializes the
+ // after-header part with uninitialized values and leaves the context header
+ // uninitialized.
+ Handle<Context> NewContext(RootIndex map_root_index, int size,
+ int variadic_part_length, PretenureFlag pretenure);
+
+ template <typename T>
+ Handle<T> AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
+ PretenureFlag pretenure);
+
// Creates a heap object based on the map. The fields of the heap object are
// not initialized, it's the responsibility of the caller to do that.
- HeapObject* New(Handle<Map> map, PretenureFlag pretenure);
+ HeapObject New(Handle<Map> map, PretenureFlag pretenure);
template <typename T>
Handle<T> CopyArrayWithMap(Handle<T> src, Handle<Map> map);
@@ -968,7 +994,7 @@ class V8_EXPORT_PRIVATE Factory {
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- Handle<Object> NumberToStringCacheGet(Object* number, int hash);
+ Handle<Object> NumberToStringCacheGet(Object number, int hash);
// Update the cache with a new number-string pair.
Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
@@ -978,15 +1004,12 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSPromise> NewJSPromiseWithoutHook(
- PretenureFlag pretenure = NOT_TENURED);
-
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind = kNormalFunction);
- void InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site);
+ void InitializeAllocationMemento(AllocationMemento memento,
+ AllocationSite allocation_site);
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(Handle<JSObject> obj,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 7d33c68ad1..c3f7ff3029 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -7,8 +7,10 @@
#include <cstdarg>
#include "src/base/atomic-utils.h"
-#include "src/counters.h"
+#include "src/counters-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
namespace v8 {
@@ -196,7 +198,7 @@ void GCTracer::ResetForTesting() {
average_mark_compact_duration_ = 0;
current_mark_compact_mutator_utilization_ = 1.0;
previous_mark_compact_end_time_ = 0;
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
background_counter_[i].runtime_call_counter.Reset();
@@ -326,6 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
@@ -337,6 +340,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
@@ -654,6 +658,7 @@ void GCTracer::PrintNVP() const {
"gc=%s "
"reduce_memory=%d "
"heap.prologue=%.2f "
+ "heap.embedder_tracing_epilogue=%.2f "
"heap.epilogue=%.2f "
"heap.epilogue.reduce_new_space=%.2f "
"heap.external.prologue=%.1f "
@@ -693,9 +698,8 @@ void GCTracer::PrintNVP() const {
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
"mark.weak_closure.harmony=%.1f "
- "mark.wrapper_prologue=%.1f "
- "mark.wrapper_epilogue=%.1f "
- "mark.wrapper_tracing=%.1f "
+ "mark.embedder_prologue=%.1f "
+ "mark.embedder_tracing=%.1f "
"prologue=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
@@ -706,9 +710,11 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.body=%.1f "
"incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f "
+ "incremental.layout_change=%.1f "
+ "incremental.start=%.1f "
"incremental.sweeping=%.1f "
- "incremental.wrapper_prologue=%.1f "
- "incremental.wrapper_tracing=%.1f "
+ "incremental.embedder_prologue=%.1f "
+ "incremental.embedder_tracing=%.1f "
"incremental_wrapper_tracing_longest_step=%.1f "
"incremental_finalize_longest_step=%.1f "
"incremental_finalize_steps_count=%d "
@@ -750,6 +756,7 @@ void GCTracer::PrintNVP() const {
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
+ current_.scopes[Scope::HEAP_EMBEDDER_TRACING_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE],
current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
@@ -788,9 +795,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
- current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
- current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
- current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
+ current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE],
+ current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING],
current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
@@ -800,11 +806,14 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE],
+ current_.scopes[Scope::MC_INCREMENTAL_START],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
- current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING],
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
current_
- .incremental_marking_scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING]
+ .incremental_marking_scopes
+ [Scope::MC_INCREMENTAL_EMBEDDER_TRACING]
.longest_step,
current_
.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
@@ -1060,7 +1069,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
int last_background_scope) {
DCHECK_EQ(last_global_scope - first_global_scope,
last_background_scope - first_background_scope);
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
int background_mc_scopes = last_background_scope - first_background_scope + 1;
for (int i = 0; i < background_mc_scopes; i++) {
current_.scopes[first_global_scope + i] +=
@@ -1085,7 +1094,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
void GCTracer::AddBackgroundScopeSample(
BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter) {
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
BackgroundCounter& counter = background_counter_[scope];
counter.total_duration_ms += duration;
if (runtime_call_counter) {
@@ -1093,7 +1102,7 @@ void GCTracer::AddBackgroundScopeSample(
}
}
-void GCTracer::RecordGCPhasesHistograms(HistogramTimer* gc_timer) {
+void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
Counters* counters = heap_->isolate()->counters();
if (gc_timer == counters->gc_finalize()) {
DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
@@ -1120,5 +1129,58 @@ void GCTracer::RecordGCPhasesHistograms(HistogramTimer* gc_timer) {
}
}
+void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
+ base::MutexGuard guard(&background_counter_mutex_);
+
+ const double overall_duration =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration +
+ incremental_marking_duration_ +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration +
+ atomic_pause_duration;
+ const double background_duration =
+ background_counter_[BackgroundScope::MC_BACKGROUND_EVACUATE_COPY]
+ .total_duration_ms +
+ background_counter_
+ [BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]
+ .total_duration_ms +
+ background_counter_[BackgroundScope::MC_BACKGROUND_MARKING]
+ .total_duration_ms +
+ background_counter_[BackgroundScope::MC_BACKGROUND_SWEEPING]
+ .total_duration_ms;
+
+ const double marking_duration =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ incremental_marking_duration_ +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration +
+ current_.scopes[Scope::MC_MARK];
+ const double marking_background_duration =
+ background_counter_[BackgroundScope::MC_BACKGROUND_MARKING]
+ .total_duration_ms;
+
+ // UMA.
+ heap_->isolate()->counters()->gc_mark_compactor()->AddSample(
+ static_cast<int>(overall_duration));
+
+ // Emit trace event counters.
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCMarkCompactorSummary", TRACE_EVENT_SCOPE_THREAD,
+ "duration", overall_duration, "background_duration",
+ background_duration);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCMarkCompactorMarkingSummary",
+ TRACE_EVENT_SCOPE_THREAD, "duration", marking_duration,
+ "background_duration", marking_background_duration);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index bf49586d57..4b4736048c 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE GCTracer {
NUMBER_OF_SCOPES,
FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
- LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
+ LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
FIRST_SCOPE = MC_INCREMENTAL,
NUMBER_OF_INCREMENTAL_SCOPES =
LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
@@ -321,7 +321,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter);
- void RecordGCPhasesHistograms(HistogramTimer* gc_timer);
+ void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
private:
FRIEND_TEST(GCTracer, AverageSpeed);
@@ -338,6 +338,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
+ FRIEND_TEST(GCTracerTest, RecordGCSumHistograms);
FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
@@ -359,6 +360,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordMutatorUtilization(double mark_compactor_end_time,
double mark_compactor_duration);
+ // Overall time spent in mark compact within a given GC cycle. Exact
+ // accounting of events within a GC is not necessary which is why the
+ // recording takes place at the end of the atomic pause.
+ void RecordGCSumCounters(double atomic_pause_duration);
+
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
void PrintNVP() const;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 65b791a42f..d617d9f9ac 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -15,23 +15,26 @@
#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
-#include "src/counters-inl.h"
#include "src/feedback-vector.h"
// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
#include "src/heap/spaces-inl.h"
+#include "src/isolate-data.h"
#include "src/isolate.h"
-#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
-#include "src/objects/literal-objects.h"
-#include "src/objects/microtask-queue-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
#include "src/zone/zone-list-inl.h"
@@ -50,32 +53,82 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::ToInt(object_));
}
-HeapObject* AllocationResult::ToObjectChecked() {
+HeapObject AllocationResult::ToObjectChecked() {
CHECK(!IsRetry());
return HeapObject::cast(object_);
}
-#define ROOT_ACCESSOR(type, name, CamelName) \
- type* Heap::name() { return type::cast(roots_[RootIndex::k##CamelName]); }
+Isolate* Heap::isolate() {
+ return reinterpret_cast<Isolate*>(
+ reinterpret_cast<intptr_t>(this) -
+ reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
+}
+
+int64_t Heap::external_memory() {
+ return isolate()->isolate_data()->external_memory_;
+}
+
+void Heap::update_external_memory(int64_t delta) {
+ isolate()->isolate_data()->external_memory_ += delta;
+}
+
+void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
+ external_memory_concurrently_freed_ += freed;
+}
+
+void Heap::account_external_memory_concurrently_freed() {
+ isolate()->isolate_data()->external_memory_ -=
+ external_memory_concurrently_freed_;
+ external_memory_concurrently_freed_ = 0;
+}
+
+RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
+
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type Heap::name() { \
+ return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
+ }
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define ROOT_ACCESSOR(type, name, CamelName) \
- void Heap::set_##name(type* value) { \
+ void Heap::set_##name(type value) { \
/* The deserializer makes use of the fact that these common roots are */ \
/* never in new space and never on a page that is being compacted. */ \
- DCHECK(!deserialization_complete() || \
- RootCanBeWrittenAfterInitialization(RootIndex::k##CamelName)); \
- DCHECK_IMPLIES(static_cast<int>(RootIndex::k##CamelName) < kOldSpaceRoots, \
- !InNewSpace(value)); \
- roots_[RootIndex::k##CamelName] = value; \
+ DCHECK_IMPLIES(deserialization_complete(), \
+ !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
+ DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
+ IsImmovable(HeapObject::cast(value))); \
+ roots_table()[RootIndex::k##CamelName] = value->ptr(); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+void Heap::SetRootMaterializedObjects(FixedArray objects) {
+ roots_table()[RootIndex::kMaterializedObjects] = objects->ptr();
+}
+
+void Heap::SetRootScriptList(Object value) {
+ roots_table()[RootIndex::kScriptList] = value->ptr();
+}
+
+void Heap::SetRootStringTable(StringTable value) {
+ roots_table()[RootIndex::kStringTable] = value->ptr();
+}
+
+void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
+ roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value->ptr();
+}
+
+void Heap::SetMessageListeners(TemplateList value) {
+ roots_table()[RootIndex::kMessageListeners] = value->ptr();
+}
+
PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE);
DCHECK_NE(idx, NEW_SPACE);
+ DCHECK_NE(idx, CODE_LO_SPACE);
+ DCHECK_NE(idx, NEW_LO_SPACE);
return static_cast<PagedSpace*>(space_[idx]);
}
@@ -118,24 +171,19 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
#endif
#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
+ IncrementObjectCounters();
#endif
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
- bool new_large_object = FLAG_young_generation_large_objects &&
- size_in_bytes > kMaxNewSpaceHeapObjectSize;
- HeapObject* object = nullptr;
+
+ HeapObject object;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
- space = LO_SPACE;
+ // TODO(hpayer): Implement a LO tenuring strategy.
+ space = FLAG_young_generation_large_objects ? NEW_LO_SPACE : LO_SPACE;
} else {
- if (new_large_object) {
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
- }
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -146,19 +194,25 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
// Here we only allocate in the old generation.
if (OLD_SPACE == space) {
if (large_object) {
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
}
} else if (CODE_SPACE == space) {
- if (size_in_bytes <= code_space()->AreaSize()) {
+ if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
- allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
}
} else if (LO_SPACE == space) {
DCHECK(large_object);
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
+ } else if (NEW_LO_SPACE == space) {
+ DCHECK(FLAG_young_generation_large_objects);
+ allocation = new_lo_space_->AllocateRaw(size_in_bytes);
+ } else if (CODE_LO_SPACE == space) {
+ DCHECK(large_object);
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (RO_SPACE == space) {
@@ -186,7 +240,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
return allocation;
}
-void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object->address(), size_in_bytes);
}
@@ -212,46 +266,13 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
}
}
-
-void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
- int size_in_bytes) {
- HeapProfiler* heap_profiler = isolate_->heap_profiler();
- if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(source->address(), target->address(),
- size_in_bytes);
- }
- for (auto& tracker : allocation_trackers_) {
- tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
- }
- if (target->IsSharedFunctionInfo()) {
- LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
- target->address()));
- }
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(source);
- UpdateAllocationsHash(target);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- }
-}
-
bool Heap::CanAllocateInReadOnlySpace() {
return !deserialization_complete_ &&
(isolate()->serializer_enabled() ||
!isolate()->initialized_from_snapshot());
}
-void Heap::UpdateAllocationsHash(HeapObject* object) {
+void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner()->identity();
@@ -264,7 +285,6 @@ void Heap::UpdateAllocationsHash(HeapObject* object) {
UpdateAllocationsHash(value);
}
-
void Heap::UpdateAllocationsHash(uint32_t value) {
uint16_t c1 = static_cast<uint16_t>(value);
uint16_t c2 = static_cast<uint16_t>(value >> 16);
@@ -274,14 +294,13 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
-
-void Heap::RegisterExternalString(String* string) {
+void Heap::RegisterExternalString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!string->IsThinString());
external_string_table_.AddString(string);
}
-void Heap::UpdateExternalString(String* string, size_t old_payload,
+void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
@@ -294,10 +313,10 @@ void Heap::UpdateExternalString(String* string, size_t old_payload,
ExternalBackingStoreType::kExternalString, new_payload - old_payload);
}
-void Heap::FinalizeExternalString(String* string) {
+void Heap::FinalizeExternalString(String string) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
- ExternalString* ext_string = ExternalString::cast(string);
+ ExternalString ext_string = ExternalString::cast(string);
page->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
@@ -305,8 +324,7 @@ void Heap::FinalizeExternalString(String* string) {
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
- kHeapObjectTag);
+ string->address() + ExternalString::kResourceOffset);
// Dispose of the C++ object if it has not already been disposed.
if (*resource_addr != nullptr) {
@@ -318,19 +336,19 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
// static
-bool Heap::InNewSpace(Object* object) {
+bool Heap::InNewSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
}
// static
-bool Heap::InNewSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InNewSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
}
// static
-bool Heap::InNewSpace(HeapObject* heap_object) {
+bool Heap::InNewSpace(HeapObject heap_object) {
// Inlined check from NewSpace::Contains.
bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
#ifdef DEBUG
@@ -346,56 +364,48 @@ bool Heap::InNewSpace(HeapObject* heap_object) {
}
// static
-bool Heap::InFromSpace(Object* object) {
+bool Heap::InFromSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
// static
-bool Heap::InFromSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InFromSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
}
// static
-bool Heap::InFromSpace(HeapObject* heap_object) {
+bool Heap::InFromSpace(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)
->IsFlagSet(Page::IN_FROM_SPACE);
}
// static
-bool Heap::InToSpace(Object* object) {
+bool Heap::InToSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
}
// static
-bool Heap::InToSpace(MaybeObject* object) {
- HeapObject* heap_object;
+bool Heap::InToSpace(MaybeObject object) {
+ HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
}
// static
-bool Heap::InToSpace(HeapObject* heap_object) {
+bool Heap::InToSpace(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
}
-bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
+bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
-bool Heap::InReadOnlySpace(Object* object) {
+bool Heap::InReadOnlySpace(Object object) {
return read_only_space_->Contains(object);
}
-bool Heap::InNewSpaceSlow(Address address) {
- return new_space_->ContainsSlow(address);
-}
-
-bool Heap::InOldSpaceSlow(Address address) {
- return old_space_->ContainsSlow(address);
-}
-
// static
-Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
+Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
@@ -415,27 +425,29 @@ bool Heap::ShouldBePromoted(Address old_address) {
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
- static_cast<size_t>(byte_size / kPointerSize));
+ DCHECK(IsAligned(byte_size, kTaggedSize));
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ CopyWords(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
}
template <Heap::FindMementoMode mode>
-AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
+AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
Address object_address = object->address();
Address memento_address = object_address + object->SizeFromMap(map);
- Address last_memento_word_address = memento_address + kPointerSize;
+ Address last_memento_word_address = memento_address + kTaggedSize;
// If the memento would be on another page, bail out immediately.
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
- return nullptr;
+ return AllocationMemento();
}
- HeapObject* candidate = HeapObject::FromAddress(memento_address);
- Map* candidate_map = candidate->map();
+ HeapObject candidate = HeapObject::FromAddress(memento_address);
+ MapWordSlot candidate_map_slot = candidate->map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
- MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
- if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
- return nullptr;
+ MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
+ if (!candidate_map_slot.contains_value(
+ ReadOnlyRoots(this).allocation_memento_map().ptr())) {
+ return AllocationMemento();
}
// Bail out if the memento is below the age mark, which can happen when
@@ -445,15 +457,15 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
Address age_mark =
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
if (!object_page->Contains(age_mark)) {
- return nullptr;
+ return AllocationMemento();
}
// Do an exact check in the case where the age mark is on the same page.
if (object_address < age_mark) {
- return nullptr;
+ return AllocationMemento();
}
}
- AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
+ AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
// Depending on what the memento is used for, we might need to perform
// additional checks.
@@ -462,7 +474,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
case Heap::kForGC:
return memento_candidate;
case Heap::kForRuntime:
- if (memento_candidate == nullptr) return nullptr;
+ if (memento_candidate.is_null()) return AllocationMemento();
// Either the object is the last object in the new space, or there is
// another object of at least word size (the header map word) following
// it, so suffices to compare ptr and top here.
@@ -473,43 +485,37 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate;
}
- return nullptr;
+ return AllocationMemento();
default:
UNREACHABLE();
}
UNREACHABLE();
}
-void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
+void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
- DCHECK(
- InFromSpace(object) ||
- (InToSpace(object) && Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
- (!InNewSpace(object) && Page::FromAddress(object->address())
- ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
+ DCHECK(InFromSpace(object) ||
+ (InToSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION)) ||
+ (!InNewSpace(object) && Page::FromHeapObject(object)->IsFlagSet(
+ Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(map->instance_type()))
+ !AllocationSite::CanTrack(map->instance_type())) {
return;
- AllocationMemento* memento_candidate =
+ }
+ AllocationMemento memento_candidate =
FindAllocationMemento<kForGC>(map, object);
- if (memento_candidate == nullptr) return;
+ if (memento_candidate.is_null()) return;
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
- (*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
+ (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
-Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(
- reinterpret_cast<intptr_t>(this) -
- reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
-}
-
-void Heap::ExternalStringTable::AddString(String* string) {
+void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));
@@ -520,14 +526,15 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-Oddball* Heap::ToBoolean(bool condition) {
+Oddball Heap::ToBoolean(bool condition) {
ReadOnlyRoots roots(this);
return condition ? roots.true_value() : roots.false_value();
}
uint64_t Heap::HashSeed() {
uint64_t seed;
- hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
+ ReadOnlyRoots(this).hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed),
+ kInt64Size);
DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
@@ -595,12 +602,11 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndWritable();
- LargePage* page = heap_->lo_space()->first_page();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndWritable();
page = page->next_page();
}
}
@@ -609,13 +615,12 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
- heap_->code_space()->SetReadAndExecutable();
- LargePage* page = heap_->lo_space()->first_page();
+ heap_->code_space()->SetDefaultCodePermissions();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetDefaultCodePermissions();
page = page->next_page();
}
}
@@ -646,15 +651,14 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == LO_SPACE &&
- chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
+ (chunk_->owner()->identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) {
- chunk_->SetReadAndExecutable();
+ chunk_->SetDefaultCodePermissions();
}
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index b20e65d1f1..a8137ddee4 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -11,8 +11,11 @@
#include "src/heap/heap-write-barrier.h"
#include "src/globals.h"
-#include "src/objects-inl.h"
+#include "src/heap/heap.h"
+#include "src/objects/code.h"
+#include "src/objects/heap-object.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -23,14 +26,15 @@ namespace heap_internals {
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kHeapOffset =
+ kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
- HeapObject* object) {
- return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(object) &
- ~kPageAlignmentMask);
+ HeapObject object) {
+ return reinterpret_cast<MemoryChunk*>(object->ptr() & ~kPageAlignmentMask);
}
V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
@@ -41,13 +45,20 @@ struct MemoryChunk {
}
V8_INLINE uintptr_t GetFlags() const {
- return *reinterpret_cast<const uintptr_t*>(
- reinterpret_cast<const uint8_t*>(this) + kFlagsOffset);
+ return *reinterpret_cast<const uintptr_t*>(reinterpret_cast<Address>(this) +
+ kFlagsOffset);
+ }
+
+ V8_INLINE Heap* GetHeap() {
+ Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
+ kHeapOffset);
+ SLOW_DCHECK(heap != nullptr);
+ return heap;
}
};
-inline void GenerationalBarrierInternal(HeapObject* object, Address slot,
- HeapObject* value) {
+inline void GenerationalBarrierInternal(HeapObject object, Address slot,
+ HeapObject value) {
DCHECK(Heap::PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
@@ -59,8 +70,8 @@ inline void GenerationalBarrierInternal(HeapObject* object, Address slot,
Heap::GenerationalBarrierSlow(object, slot, value);
}
-inline void MarkingBarrierInternal(HeapObject* object, Address slot,
- HeapObject* value) {
+inline void MarkingBarrierInternal(HeapObject object, Address slot,
+ HeapObject value) {
DCHECK(Heap::PageFlagsAreConsistent(object));
heap_internals::MemoryChunk* value_chunk =
heap_internals::MemoryChunk::FromHeapObject(value);
@@ -72,36 +83,36 @@ inline void MarkingBarrierInternal(HeapObject* object, Address slot,
} // namespace heap_internals
-inline void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value) {
+inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value) {
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(value);
+ HeapObject object = HeapObject::cast(value);
GenerationalBarrierForCode(host, rinfo, object);
MarkingBarrierForCode(host, rinfo, object);
}
-inline void WriteBarrierForCode(Code* host) {
+inline void WriteBarrierForCode(Code host) {
Heap::WriteBarrierForCodeSlow(host);
}
-inline void GenerationalBarrier(HeapObject* object, Object** slot,
- Object* value) {
+inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
+ Object value) {
DCHECK(!HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
- heap_internals::GenerationalBarrierInternal(
- object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+ heap_internals::GenerationalBarrierInternal(object, slot.address(),
+ HeapObject::cast(value));
}
-inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value) {
- HeapObject* value_heap_object;
+inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
+ MaybeObject value) {
+ HeapObject value_heap_object;
if (!value->GetHeapObject(&value_heap_object)) return;
- heap_internals::GenerationalBarrierInternal(
- object, reinterpret_cast<Address>(slot), value_heap_object);
+ heap_internals::GenerationalBarrierInternal(object, slot.address(),
+ value_heap_object);
}
-inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
+inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
int offset, int length) {
heap_internals::MemoryChunk* array_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
@@ -110,31 +121,31 @@ inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
-inline void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
- HeapObject* object) {
+inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
+ HeapObject object) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->InNewSpace()) return;
Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
}
-inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
- DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+inline void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value) {
+ DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
- heap_internals::MarkingBarrierInternal(
- object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+ heap_internals::MarkingBarrierInternal(object, slot.address(),
+ HeapObject::cast(value));
}
-inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value) {
- HeapObject* value_heap_object;
+inline void MarkingBarrier(HeapObject object, MaybeObjectSlot slot,
+ MaybeObject value) {
+ HeapObject value_heap_object;
if (!value->GetHeapObject(&value_heap_object)) return;
- heap_internals::MarkingBarrierInternal(
- object, reinterpret_cast<Address>(slot), value_heap_object);
+ heap_internals::MarkingBarrierInternal(object, slot.address(),
+ value_heap_object);
}
-inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
+inline void MarkingBarrierForElements(Heap* heap, HeapObject object) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->IsMarking()) return;
@@ -142,15 +153,32 @@ inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
Heap::MarkingBarrierForElementsSlow(heap, object);
}
-inline void MarkingBarrierForCode(Code* host, RelocInfo* rinfo,
- HeapObject* object) {
- DCHECK(!HasWeakHeapObjectTag(object));
+inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
+ HeapObject object) {
+ DCHECK(!HasWeakHeapObjectTag(object.ptr()));
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->IsMarking()) return;
Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
}
+inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
+ HeapObject descriptor_array,
+ int number_of_own_descriptors) {
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(descriptor_array);
+ if (!chunk->IsMarking()) return;
+
+ Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array,
+ number_of_own_descriptors);
+}
+
+inline Heap* GetHeapFromWritableObject(const HeapObject object) {
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ return chunk->GetHeap();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index 4eaeaae8a4..9fcb64b94b 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_
#define V8_HEAP_HEAP_WRITE_BARRIER_H_
+#include "include/v8-internal.h"
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
@@ -27,23 +30,28 @@ class RelocInfo;
} while (false)
// Combined write barriers.
-void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value);
-void WriteBarrierForCode(Code* host);
+void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value);
+void WriteBarrierForCode(Code host);
// Generational write barrier.
-void GenerationalBarrier(HeapObject* object, Object** slot, Object* value);
-void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value);
-void GenerationalBarrierForElements(Heap* heap, FixedArray* array, int offset,
+void GenerationalBarrier(HeapObject object, ObjectSlot slot, Object value);
+void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
+ MaybeObject value);
+void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
int length);
-void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
- HeapObject* object);
+void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
// Marking write barrier.
-void MarkingBarrier(HeapObject* object, Object** slot, Object* value);
-void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value);
-void MarkingBarrierForElements(Heap* heap, HeapObject* object);
-void MarkingBarrierForCode(Code* host, RelocInfo* rinfo, HeapObject* object);
+void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value);
+void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, MaybeObject value);
+void MarkingBarrierForElements(Heap* heap, HeapObject object);
+void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
+
+void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
+ HeapObject descriptor_array,
+ int number_of_own_descriptors);
+
+Heap* GetHeapFromWritableObject(const HeapObject object);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index b509d21142..d399d070b8 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -10,12 +10,10 @@
#include "src/accessors.h"
#include "src/api-inl.h"
#include "src/assembler-inl.h"
-#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
@@ -46,14 +44,18 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
-#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
+#include "src/log.h"
+#include "src/microtask-queue.h"
#include "src/objects/data-handler.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
@@ -91,12 +93,12 @@ void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetSerializedObjects(FixedArray* objects) {
+void Heap::SetSerializedObjects(FixedArray objects) {
DCHECK(isolate()->serializer_enabled());
set_serialized_objects(objects);
}
-void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
DCHECK(isolate()->serializer_enabled());
set_serialized_global_proxy_sizes(sizes);
}
@@ -107,11 +109,11 @@ bool Heap::GCCallbackTuple::operator==(
}
Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
- const Heap::GCCallbackTuple& other) = default;
+ const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
struct Heap::StrongRootsList {
- Object** start;
- Object** end;
+ FullObjectSlot start;
+ FullObjectSlot end;
StrongRootsList* next;
};
@@ -129,18 +131,21 @@ class IdleScavengeObserver : public AllocationObserver {
};
Heap::Heap()
- : initial_max_old_generation_size_(max_old_generation_size_),
+ : isolate_(isolate()),
+ initial_max_old_generation_size_(max_old_generation_size_),
+ initial_max_old_generation_size_threshold_(0),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
memory_pressure_level_(MemoryPressureLevel::kNone),
old_generation_allocation_limit_(initial_old_generation_size_),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
+ is_current_gc_forced_(false),
external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
- set_native_contexts_list(nullptr);
+ set_native_contexts_list(Smi::kZero);
set_allocation_sites_list(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
@@ -148,9 +153,8 @@ Heap::Heap()
}
size_t Heap::MaxReserved() {
- const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
- return static_cast<size_t>(
- (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
+ return static_cast<size_t>(2 * max_semi_space_size_ +
+ max_old_generation_size_);
}
size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
@@ -176,7 +180,7 @@ size_t Heap::OldGenerationCapacity() {
space = spaces.next()) {
total += space->Capacity();
}
- return total + lo_space_->SizeOfObjects();
+ return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
size_t Heap::CommittedOldGenerationMemory() {
@@ -188,14 +192,13 @@ size_t Heap::CommittedOldGenerationMemory() {
space = spaces.next()) {
total += space->CommittedMemory();
}
- return total + lo_space_->Size();
+ return total + lo_space_->Size() + code_lo_space_->Size();
}
-size_t Heap::CommittedMemoryOfHeapAndUnmapper() {
+size_t Heap::CommittedMemoryOfUnmapper() {
if (!HasBeenSetUp()) return 0;
- return CommittedMemory() +
- memory_allocator()->unmapper()->CommittedBufferedMemory();
+ return memory_allocator()->unmapper()->CommittedBufferedMemory();
}
size_t Heap::CommittedMemory() {
@@ -240,6 +243,8 @@ size_t Heap::Available() {
for (SpaceIterator it(this); it.has_next();) {
total += it.next()->Available();
}
+
+ total += memory_allocator()->Available();
return total;
}
@@ -363,6 +368,15 @@ void Heap::PrintShortHeapStatistics() {
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
+ "Code large object space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
+ lo_space_->SizeOfObjects() / KB,
+ code_lo_space_->Available() / KB,
+ code_lo_space_->CommittedMemory() / KB);
+ PrintIsolate(isolate_,
"All spaces, used: %6" PRIuS
" KB"
", available: %6" PRIuS
@@ -371,11 +385,11 @@ void Heap::PrintShortHeapStatistics() {
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Unmapper buffering %d chunks of committed: %6" PRIuS " KB\n",
- memory_allocator()->unmapper()->NumberOfChunks(),
- CommittedMemoryOfHeapAndUnmapper() / KB);
+ "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
+ memory_allocator()->unmapper()->NumberOfCommittedChunks(),
+ CommittedMemoryOfUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
- external_memory_ / KB);
+ isolate()->isolate_data()->external_memory_ / KB);
PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
@@ -425,13 +439,13 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
}
}
-bool Heap::IsRetainingPathTarget(HeapObject* object,
+bool Heap::IsRetainingPathTarget(HeapObject object,
RetainingPathOption* option) {
- WeakArrayList* targets = retaining_path_targets();
+ WeakArrayList targets = retaining_path_targets();
int length = targets->length();
- MaybeObject* object_to_check = HeapObjectReference::Weak(object);
+ MaybeObject object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
- MaybeObject* target = targets->Get(i);
+ MaybeObject target = targets->Get(i);
DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
@@ -442,12 +456,12 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
return false;
}
-void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
+void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
- PrintF("Retaining path for %p:\n", static_cast<void*>(target));
- HeapObject* object = target;
- std::vector<std::pair<HeapObject*, bool>> retaining_path;
+ PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
+ HeapObject object = target;
+ std::vector<std::pair<HeapObject, bool>> retaining_path;
Root root = Root::kUnknown;
bool ephemeron = false;
while (true) {
@@ -468,7 +482,7 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
}
int distance = static_cast<int>(retaining_path.size());
for (auto node : retaining_path) {
- HeapObject* object = node.first;
+ HeapObject object = node.first;
bool ephemeron = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
@@ -488,7 +502,7 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("-------------------------------------------------\n");
}
-void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
+void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
if (retainer_.count(object)) return;
retainer_[object] = retainer;
RetainingPathOption option = RetainingPathOption::kDefault;
@@ -502,7 +516,7 @@ void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
}
}
-void Heap::AddEphemeronRetainer(HeapObject* retainer, HeapObject* object) {
+void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
if (ephemeron_retainer_.count(object)) return;
ephemeron_retainer_[object] = retainer;
RetainingPathOption option = RetainingPathOption::kDefault;
@@ -515,7 +529,7 @@ void Heap::AddEphemeronRetainer(HeapObject* retainer, HeapObject* object) {
}
}
-void Heap::AddRetainingRoot(Root root, HeapObject* object) {
+void Heap::AddRetainingRoot(Root root, HeapObject object) {
if (retaining_root_.count(object)) return;
retaining_root_[object] = root;
RetainingPathOption option = RetainingPathOption::kDefault;
@@ -597,6 +611,8 @@ const char* Heap::GetSpaceName(int idx) {
return "large_object_space";
case NEW_LO_SPACE:
return "new_large_object_space";
+ case CODE_LO_SPACE:
+ return "code_large_object_space";
case RO_SPACE:
return "read_only_space";
default:
@@ -605,21 +621,9 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
- roots_[RootIndex::kCodeStubs] = value;
-}
-
-void Heap::RepairFreeListsAfterDeserialization() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
- space->RepairFreeListsAfterDeserialization();
- }
-}
-
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
- AllocationSite* site = nullptr;
+ AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
MapWord map_word = site_and_count.first->map_word();
@@ -688,7 +692,7 @@ class Heap::SkipStoreBufferScope {
namespace {
inline bool MakePretenureDecision(
- AllocationSite* site, AllocationSite::PretenureDecision current_decision,
+ AllocationSite site, AllocationSite::PretenureDecision current_decision,
double ratio, bool maximum_size_scavenge) {
// Here we just allow state transitions from undecided or maybe tenure
// to don't tenure, maybe tenure, or tenure.
@@ -712,7 +716,7 @@ inline bool MakePretenureDecision(
return false;
}
-inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
+inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) {
bool deopt = false;
int create_count = site->memento_create_count();
@@ -734,8 +738,8 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
PrintIsolate(isolate,
"pretenuring: AllocationSite(%p): (created, found, ratio) "
"(%d, %d, %f) %s => %s\n",
- static_cast<void*>(site), create_count, found_count, ratio,
- site->PretenureDecisionName(current_decision),
+ reinterpret_cast<void*>(site.ptr()), create_count, found_count,
+ ratio, site->PretenureDecisionName(current_decision),
site->PretenureDecisionName(site->pretenure_decision()));
}
@@ -746,7 +750,7 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
}
} // namespace
-void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
global_pretenuring_feedback_.erase(site);
}
@@ -763,7 +767,7 @@ void Heap::ProcessPretenuringFeedback() {
int allocation_sites = 0;
int active_allocation_sites = 0;
- AllocationSite* site = nullptr;
+ AllocationSite site;
// Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
@@ -796,7 +800,7 @@ void Heap::ProcessPretenuringFeedback() {
if (deopt_maybe_tenured) {
ForeachAllocationSite(
allocation_sites_list(),
- [&allocation_sites, &trigger_deoptimization](AllocationSite* site) {
+ [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
DCHECK(site->IsAllocationSite());
allocation_sites++;
if (site->IsMaybeTenure()) {
@@ -827,14 +831,8 @@ void Heap::ProcessPretenuringFeedback() {
}
}
-void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
- CodePageMemoryModificationScope modification_scope(chunk);
- code->InvalidateEmbeddedObjects(this);
-}
-
-void Heap::InvalidateCodeDeoptimizationData(Code* code) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+void Heap::InvalidateCodeDeoptimizationData(Code code) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
CodePageMemoryModificationScope modification_scope(chunk);
code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
@@ -843,7 +841,7 @@ void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
- ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite* site) {
+ ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
@@ -941,6 +939,38 @@ void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
+
+ if (FLAG_harmony_weak_refs) {
+ // TODO(marja): (spec): The exact condition on when to schedule the cleanup
+ // task is unclear. This version schedules the cleanup task for a factory
+ // whenever the GC has discovered new dirty WeakCells for it (at that point
+ // it might have leftover dirty WeakCells since an earlier invocation of the
+ // cleanup function didn't iterate through them). See
+ // https://github.com/tc39/proposal-weakrefs/issues/34
+ HandleScope handle_scope(isolate());
+ while (
+ !isolate()->heap()->dirty_js_weak_factories()->IsUndefined(isolate())) {
+ // Enqueue one microtask per JSWeakFactory.
+ Handle<JSWeakFactory> weak_factory(
+ JSWeakFactory::cast(isolate()->heap()->dirty_js_weak_factories()),
+ isolate());
+ isolate()->heap()->set_dirty_js_weak_factories(weak_factory->next());
+ weak_factory->set_next(ReadOnlyRoots(isolate()).undefined_value());
+ Handle<NativeContext> context(weak_factory->native_context(), isolate());
+ // GC has no native context, but we use the creation context of the
+ // JSWeakFactory for the EnqueueTask operation. This is consitent with the
+ // Promise implementation, assuming the JSFactory creation context is the
+ // "caller's context" in promise functions. An alternative would be to use
+ // the native context of the cleanup function. This difference shouldn't
+ // be observable from JavaScript, since we enter the native context of the
+ // cleanup function before calling it. TODO(marja): Revisit when the spec
+ // clarifies this. See also
+ // https://github.com/tc39/proposal-weakrefs/issues/38 .
+ Handle<WeakFactoryCleanupJobTask> task =
+ isolate()->factory()->NewWeakFactoryCleanupJobTask(weak_factory);
+ context->microtask_queue()->EnqueueMicrotask(*task);
+ }
+ }
}
class GCCallbacksScope {
@@ -982,10 +1012,12 @@ void Heap::HandleGCRequest() {
void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
+ DCHECK(FLAG_idle_time_scavenge);
+ DCHECK_NOT_NULL(scavenge_job_);
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
}
-HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
+TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
if (isolate_->IsIsolateInBackground()) {
return isolate_->counters()->gc_scavenger_background();
@@ -1013,7 +1045,7 @@ HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
}
}
-HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
+TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
} else {
@@ -1041,13 +1073,13 @@ void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
namespace {
-intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
- int words = size / kPointerSize;
+intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
+ int slots = size / kTaggedSize;
DCHECK_EQ(a->Size(), size);
DCHECK_EQ(b->Size(), size);
- intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
- intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
- for (int i = 0; i < words; i++) {
+ Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
+ Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
+ for (int i = 0; i < slots; i++) {
if (*slot_a != *slot_b) {
return *slot_a - *slot_b;
}
@@ -1057,17 +1089,17 @@ intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
return 0;
}
-void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
+void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
if (objects.size() == 0) return;
- sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
+ sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
- std::vector<std::pair<int, HeapObject*>> duplicates;
- HeapObject* current = objects[0];
+ std::vector<std::pair<int, HeapObject>> duplicates;
+ HeapObject current = objects[0];
int count = 1;
for (size_t i = 1; i < objects.size(); i++) {
if (CompareWords(size, current, objects[i]) == 0) {
@@ -1141,18 +1173,18 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
- std::map<int, std::vector<HeapObject*>> objects_by_size;
+ std::map<int, std::vector<HeapObject>> objects_by_size;
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
HeapObjectIterator it(space);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj->Size()].push_back(obj);
}
}
{
LargeObjectIterator it(lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj->Size()].push_back(obj);
}
}
@@ -1177,8 +1209,9 @@ void Heap::ReportExternalMemoryPressure() {
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
- if (external_memory_ >
- (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
+ if (isolate()->isolate_data()->external_memory_ >
+ (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
+ external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
@@ -1200,10 +1233,12 @@ void Heap::ReportExternalMemoryPressure() {
// Incremental marking is turned on an has already been started.
const double kMinStepSize = 5;
const double kMaxStepSize = 10;
- const double ms_step =
- Min(kMaxStepSize,
- Max(kMinStepSize, static_cast<double>(external_memory_) /
- external_memory_limit_ * kMinStepSize));
+ const double ms_step = Min(
+ kMaxStepSize,
+ Max(kMinStepSize,
+ static_cast<double>(isolate()->isolate_data()->external_memory_) /
+ isolate()->isolate_data()->external_memory_limit_ *
+ kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
@@ -1219,7 +1254,7 @@ void Heap::EnsureFillerObjectAtTop() {
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
Address to_top = new_space_->top();
- Page* page = Page::FromAddress(to_top - kPointerSize);
+ Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
@@ -1231,6 +1266,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
const v8::GCCallbackFlags gc_callback_flags) {
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
if (!CanExpandOldGeneration(new_space()->Capacity())) {
InvokeNearHeapLimitCallback();
@@ -1276,17 +1312,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionPrologue();
{
- HistogramTimer* gc_type_timer = GCTypeTimer(collector);
- HistogramTimerScope histogram_timer_scope(gc_type_timer);
+ TimedHistogram* gc_type_timer = GCTypeTimer(collector);
+ TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
TRACE_EVENT0("v8", gc_type_timer->name());
- HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
- OptionalHistogramTimerScopeMode mode =
+ TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
+ OptionalTimedHistogramScopeMode mode =
isolate_->IsMemorySavingsModeActive()
- ? OptionalHistogramTimerScopeMode::DONT_TAKE_TIME
- : OptionalHistogramTimerScopeMode::TAKE_TIME;
- OptionalHistogramTimerScope histogram_timer_priority_scope(
- gc_type_priority_timer, mode);
+ ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
+ : OptionalTimedHistogramScopeMode::TAKE_TIME;
+ OptionalTimedHistogramScope histogram_timer_priority_scope(
+ gc_type_priority_timer, isolate_, mode);
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
@@ -1295,6 +1331,11 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
+ // Clear is_current_gc_forced now that the current GC is complete. Do this
+ // before GarbageCollectionEpilogue() since that could trigger another
+ // unforced GC.
+ is_current_gc_forced_ = false;
+
GarbageCollectionEpilogue();
if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
@@ -1318,6 +1359,10 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
+ if (initial_max_old_generation_size_ < max_old_generation_size_ &&
+ used_memory_after < initial_max_old_generation_size_threshold_) {
+ max_old_generation_size_ = initial_max_old_generation_size_;
+ }
}
tracer()->Stop(collector);
@@ -1345,6 +1390,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
+ old_generation_allocation_limit_ = initial_old_generation_size_;
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1388,53 +1434,64 @@ void Heap::StartIdleIncrementalMarking(
gc_callback_flags);
}
-void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
- int len, WriteBarrierMode mode) {
+void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
+ WriteBarrierMode mode) {
if (len == 0) return;
DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
- Object** dst = array->data_start() + dst_index;
- Object** src = array->data_start() + src_index;
+ ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
+ ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
- base::AsAtomicPointer::Relaxed_Store(
- dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
+ dst.Relaxed_Store(src.Relaxed_Load());
+ ++dst;
+ ++src;
}
} else {
- for (int i = len - 1; i >= 0; i--) {
- base::AsAtomicPointer::Relaxed_Store(
- dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
+ // Copy backwards.
+ dst += len - 1;
+ src += len - 1;
+ for (int i = 0; i < len; i++) {
+ dst.Relaxed_Store(src.Relaxed_Load());
+ --dst;
+ --src;
}
}
} else {
- MemMove(dst, src, len * kPointerSize);
+ MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
-
#ifdef VERIFY_HEAP
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
if ((*p)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*p);
+ HeapObject object = HeapObject::cast(*p);
// Check that the string is actually internalized.
CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
object->IsInternalizedString());
}
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ UNREACHABLE();
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
UNREACHABLE();
}
@@ -1475,7 +1532,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
// The deserializer will update the skip list.
AllocationResult allocation = map_space()->AllocateRawUnaligned(
Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
- HeapObject* free_space = nullptr;
+ HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
@@ -1499,7 +1556,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
AllocationResult allocation;
int size = chunk.size;
DCHECK_LE(static_cast<size_t>(size),
- MemoryAllocator::PageAreaSize(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
@@ -1508,7 +1565,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
allocation = paged_space(space)->AllocateRawUnaligned(
size, PagedSpace::IGNORE_SKIP_LIST);
}
- HeapObject* free_space = nullptr;
+ HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
@@ -1587,7 +1644,7 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
- int freed_global_handles = 0;
+ size_t freed_global_handles = 0;
if (!IsYoungGenerationCollector(collector)) {
PROFILE(isolate_, CodeMovingGCEvent());
@@ -1604,6 +1661,11 @@ bool Heap::PerformGarbageCollection(
{
GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create their
+ // own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ local_embedder_heap_tracer(),
+ EmbedderHeapTracer::EmbedderStackState::kUnknown);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
@@ -1670,15 +1732,35 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
- gc_post_processing_depth_++;
{
- AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ // First round weak callbacks are not supposed to allocate and trigger
+ // nested GCs.
freed_global_handles =
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
+ isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
+ }
+
+ if (collector == MARK_COMPACTOR) {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
+ // TraceEpilogue may trigger operations that invalidate global handles. It
+ // has to be called *after* all other operations that potentially touch and
+ // reset global handles. It is also still part of the main garbage
+ // collection pause and thus needs to be called *before* any operation that
+ // can potentially trigger recursive garbage
+ local_embedder_heap_tracer()->TraceEpilogue();
+ }
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowHeapAllocation allow_allocation;
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
}
- gc_post_processing_depth_--;
isolate_->eternal_handles()->PostGarbageCollectionProcessing();
@@ -1691,8 +1773,11 @@ bool Heap::PerformGarbageCollection(
size_t old_gen_size = OldGenerationSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
- external_memory_at_last_mark_compact_ = external_memory_;
- external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
+ isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
+ isolate()->isolate_data()->external_memory_;
+ isolate()->isolate_data()->external_memory_limit_ =
+ isolate()->isolate_data()->external_memory_ +
+ kExternalAllocationSoftLimit;
double max_factor =
heap_controller()->MaxGrowingFactor(max_old_generation_size_);
@@ -1824,7 +1909,6 @@ void Heap::MarkCompactEpilogue() {
void Heap::MarkCompactPrologue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
- isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
RegExpResultsCache::Clear(regexp_multiple_cache());
@@ -1855,7 +1939,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
- base::LockGuard<base::Mutex> guard(relocation_mutex());
+ base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
@@ -1898,7 +1982,7 @@ void Heap::EvacuateYoungGeneration() {
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
- base::LockGuard<base::Mutex> guard(relocation_mutex());
+ base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
@@ -1951,15 +2035,15 @@ void Heap::ComputeFastPromotionMode() {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
if (unprotected_memory_chunks_registry_enabled_) {
- base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetReadAndWritable();
}
}
}
-void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
- UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
+void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
+ UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
}
void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
@@ -1971,62 +2055,61 @@ void Heap::ProtectUnprotectedMemoryChunks() {
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
- (*chunk)->SetReadAndExecutable();
+ (*chunk)->SetDefaultCodePermissions();
}
unprotected_memory_chunks_.clear();
}
-bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
+bool Heap::ExternalStringTable::Contains(String string) {
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- if (new_space_strings_[i] == obj) return true;
+ if (new_space_strings_[i] == string) return true;
}
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- if (old_space_strings_[i] == obj) return true;
+ if (old_space_strings_[i] == string) return true;
}
return false;
}
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, FullObjectSlot p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- String* string = String::cast(*p);
+ String string = String::cast(*p);
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
- return nullptr;
+ return String();
}
heap->FinalizeExternalString(string);
- return nullptr;
+ return String();
}
// String is still reachable.
- String* new_string = String::cast(first_word.ToForwardingAddress());
+ String new_string = String::cast(first_word.ToForwardingAddress());
if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
- return nullptr;
+ return String();
} else if (new_string->IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
- Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string),
+ Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
ExternalString::cast(new_string)->ExternalPayloadSize());
return new_string;
}
// Internalization can replace external strings with non-external strings.
- return new_string->IsExternalString() ? new_string : nullptr;
+ return new_string->IsExternalString() ? new_string : String();
}
void Heap::ExternalStringTable::VerifyNewSpace() {
#ifdef DEBUG
- std::set<String*> visited_map;
+ std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- String* obj = String::cast(new_space_strings_[i]);
+ String obj = String::cast(new_space_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(mc->InNewSpace());
DCHECK(heap_->InNewSpace(obj));
@@ -2045,12 +2128,12 @@ void Heap::ExternalStringTable::VerifyNewSpace() {
void Heap::ExternalStringTable::Verify() {
#ifdef DEBUG
- std::set<String*> visited_map;
+ std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
VerifyNewSpace();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- String* obj = String::cast(old_space_strings_[i]);
+ String obj = String::cast(old_space_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(!mc->InNewSpace());
DCHECK(!heap_->InNewSpace(obj));
@@ -2071,20 +2154,20 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (new_space_strings_.empty()) return;
- Object** start = new_space_strings_.data();
- Object** end = start + new_space_strings_.size();
- Object** last = start;
+ FullObjectSlot start(&new_space_strings_[0]);
+ FullObjectSlot end(&new_space_strings_[new_space_strings_.size()]);
+ FullObjectSlot last = start;
- for (Object** p = start; p < end; ++p) {
- String* target = updater_func(heap_, p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ String target = updater_func(heap_, p);
- if (target == nullptr) continue;
+ if (target.is_null()) continue;
DCHECK(target->IsExternalString());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
- *last = target;
+ last.store(target);
++last;
} else {
// String got promoted. Move it to the old string list.
@@ -2092,8 +2175,8 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
}
}
- DCHECK_LE(last, end);
- new_space_strings_.resize(static_cast<size_t>(last - start));
+ DCHECK(last <= end);
+ new_space_strings_.resize(last - start);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyNewSpace();
@@ -2111,18 +2194,20 @@ void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
- new_space_strings_.data(),
- new_space_strings_.data() + new_space_strings_.size());
+ v->VisitRootPointers(
+ Root::kExternalStringsTable, nullptr,
+ FullObjectSlot(&new_space_strings_[0]),
+ FullObjectSlot(&new_space_strings_[new_space_strings_.size()]));
}
}
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
- old_space_strings_.data(),
- old_space_strings_.data() + old_space_strings_.size());
+ v->VisitRootPointers(
+ Root::kExternalStringsTable, nullptr,
+ FullObjectSlot(old_space_strings_.data()),
+ FullObjectSlot(old_space_strings_.data() + old_space_strings_.size()));
}
}
@@ -2134,9 +2219,10 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
void Heap::ExternalStringTable::UpdateReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (old_space_strings_.size() > 0) {
- Object** start = old_space_strings_.data();
- Object** end = start + old_space_strings_.size();
- for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p);
+ FullObjectSlot start(old_space_strings_.data());
+ FullObjectSlot end(old_space_strings_.data() + old_space_strings_.size());
+ for (FullObjectSlot p = start; p < end; ++p)
+ p.store(updater_func(heap_, p));
}
UpdateNewSpaceReferences(updater_func);
@@ -2160,14 +2246,14 @@ void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
- Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
+ Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
- Object* allocation_site_obj =
+ Object allocation_site_obj =
VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
set_allocation_sites_list(allocation_site_obj);
}
@@ -2178,15 +2264,15 @@ void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
}
void Heap::ForeachAllocationSite(
- Object* list, const std::function<void(AllocationSite*)>& visitor) {
+ Object list, const std::function<void(AllocationSite)>& visitor) {
DisallowHeapAllocation disallow_heap_allocation;
- Object* current = list;
+ Object current = list;
while (current->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(current);
+ AllocationSite site = AllocationSite::cast(current);
visitor(site);
- Object* current_nested = site->nested_site();
+ Object current_nested = site->nested_site();
while (current_nested->IsAllocationSite()) {
- AllocationSite* nested_site = AllocationSite::cast(current_nested);
+ AllocationSite nested_site = AllocationSite::cast(current_nested);
visitor(nested_site);
current_nested = nested_site->nested_site();
}
@@ -2199,7 +2285,7 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
bool marked = false;
ForeachAllocationSite(allocation_sites_list(),
- [&marked, flag, this](AllocationSite* site) {
+ [&marked, flag, this](AllocationSite site) {
if (site->GetPretenureMode() == flag) {
site->ResetPretenureDecision();
site->set_deopt_dependent_code(true);
@@ -2245,8 +2331,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
Isolate* isolate, v8::ExternalResourceVisitor* visitor)
: isolate_(isolate), visitor_(visitor) {}
void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
@@ -2277,7 +2363,7 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
- return kDoubleSize - kPointerSize;
+ return kDoubleSize - kTaggedSize;
default:
UNREACHABLE();
}
@@ -2287,22 +2373,20 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
- return kPointerSize;
+ return kTaggedSize;
if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
- return kDoubleSize - kPointerSize; // No fill if double is always aligned.
+ return kDoubleSize - kTaggedSize; // No fill if double is always aligned.
return 0;
}
-
-HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
+HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
return HeapObject::FromAddress(object->address() + filler_size);
}
-
-HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
- int allocation_size,
- AllocationAlignment alignment) {
+HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
+ int allocation_size,
+ AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object->address(), alignment);
@@ -2310,115 +2394,38 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
object = PrecedeWithFiller(object, pre_filler);
filler_size -= pre_filler;
}
- if (filler_size)
+ if (filler_size) {
CreateFillerObjectAt(object->address() + object_size, filler_size,
ClearRecordedSlots::kNo);
+ }
return object;
}
-void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
+void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
ArrayBufferTracker::RegisterNew(this, buffer);
}
-
-void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
+void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
ArrayBufferTracker::Unregister(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
- old_generation_allocation_limit_ =
- Max(heap_controller()->MinimumAllocationLimitGrowingStep(
- CurrentHeapGrowingMode()),
+ const size_t new_limit =
+ Max(OldGenerationSizeOfObjects() +
+ heap_controller()->MinimumAllocationLimitGrowingStep(
+ CurrentHeapGrowingMode()),
static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
+ if (new_limit < old_generation_allocation_limit_) {
+ old_generation_allocation_limit_ = new_limit;
+ } else {
+ old_generation_size_configured_ = true;
+ }
}
}
-void Heap::CreateJSEntryStub() {
- JSEntryStub stub(isolate(), StackFrame::ENTRY);
- set_js_entry_code(*stub.GetCode());
-}
-
-
-void Heap::CreateJSConstructEntryStub() {
- JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY);
- set_js_construct_entry_code(*stub.GetCode());
-}
-
-void Heap::CreateJSRunMicrotasksEntryStub() {
- JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
- set_js_run_microtasks_entry_code(*stub.GetCode());
-}
-
-void Heap::CreateFixedStubs() {
- // Here we create roots for fixed stubs. They are needed at GC
- // for cooking and uncooking (check out frames.cc).
- // The eliminates the need for doing dictionary lookup in the
- // stub cache for these stubs.
- HandleScope scope(isolate());
- // Canonicalize handles, so that we can share constant pool entries pointing
- // to code targets without dereferencing their handles.
- CanonicalHandleScope canonical(isolate());
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
-
- // gcc-4.4 has problem generating correct code of following snippet:
- // { JSEntryStub stub;
- // js_entry_code_ = *stub.GetCode();
- // }
- // { JSConstructEntryStub stub;
- // js_construct_entry_code_ = *stub.GetCode();
- // }
- // To workaround the problem, make separate functions without inlining.
- Heap::CreateJSEntryStub();
- Heap::CreateJSConstructEntryStub();
- Heap::CreateJSRunMicrotasksEntryStub();
-}
-
-bool Heap::RootCanBeWrittenAfterInitialization(RootIndex root_index) {
- switch (root_index) {
- case RootIndex::kNumberStringCache:
- case RootIndex::kCodeStubs:
- case RootIndex::kScriptList:
- case RootIndex::kMaterializedObjects:
- case RootIndex::kDetachedContexts:
- case RootIndex::kRetainedMaps:
- case RootIndex::kRetainingPathTargets:
- case RootIndex::kFeedbackVectorsForProfilingTools:
- case RootIndex::kNoScriptSharedFunctionInfos:
- case RootIndex::kSerializedObjects:
- case RootIndex::kSerializedGlobalProxySizes:
- case RootIndex::kPublicSymbolTable:
- case RootIndex::kApiSymbolTable:
- case RootIndex::kApiPrivateSymbolTable:
- case RootIndex::kMessageListeners:
-// Smi values
-#define SMI_ENTRY(type, name, Name) case RootIndex::k##Name:
- SMI_ROOT_LIST(SMI_ENTRY)
-#undef SMI_ENTRY
- // String table
- case RootIndex::kStringTable:
- return true;
-
- default:
- return false;
- }
-}
-
-bool Heap::RootCanBeTreatedAsConstant(RootIndex root_index) {
- bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(root(root_index));
- DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
- return can_be;
-}
-
-
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
@@ -2427,32 +2434,32 @@ void Heap::FlushNumberStringCache() {
}
}
-HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots clear_slots_mode,
- ClearFreedMemoryMode clear_memory_mode) {
- if (size == 0) return nullptr;
- HeapObject* filler = HeapObject::FromAddress(addr);
- if (size == kPointerSize) {
+HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots clear_slots_mode,
+ ClearFreedMemoryMode clear_memory_mode) {
+ if (size == 0) return HeapObject();
+ HeapObject filler = HeapObject::FromAddress(addr);
+ if (size == kTaggedSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kOnePointerFillerMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER);
- } else if (size == 2 * kPointerSize) {
+ } else if (size == 2 * kTaggedSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kTwoPointerFillerMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
- Memory<Address>(addr + kPointerSize) =
- static_cast<Address>(kClearedFreeMemoryValue);
+ Memory<Tagged_t>(addr + kTaggedSize) =
+ static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
} else {
- DCHECK_GT(size, 2 * kPointerSize);
+ DCHECK_GT(size, 2 * kTaggedSize);
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kFreeSpaceMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
- memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
- kClearedFreeMemoryValue, size - 2 * kPointerSize);
+ MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
+ (size / kTaggedSize) - 2);
}
}
if (clear_slots_mode == ClearRecordedSlots::kYes) {
@@ -2461,29 +2468,44 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
- DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
+ DCHECK((filler->map_slot().contains_value(kNullAddress) &&
+ !deserialization_complete_) ||
filler->map()->IsMap());
return filler;
}
-
-bool Heap::CanMoveObjectStart(HeapObject* object) {
+bool Heap::CanMoveObjectStart(HeapObject object) {
if (!FLAG_move_object_start) return false;
// Sampling heap profiler may have a reference to the object.
if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
- Address address = object->address();
-
- if (lo_space()->Contains(object)) return false;
+ if (IsLargeObject(object)) return false;
// We can move the object start if the page was already swept.
- return Page::FromAddress(address)->SweepingDone();
+ return Page::FromHeapObject(object)->SweepingDone();
+}
+
+bool Heap::IsImmovable(HeapObject object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ return chunk->NeverEvacuate() || IsLargeObject(object);
+}
+
+bool Heap::IsLargeObject(HeapObject object) {
+ return IsLargeMemoryChunk(MemoryChunk::FromHeapObject(object));
}
-bool Heap::IsImmovable(HeapObject* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
+bool Heap::IsLargeMemoryChunk(MemoryChunk* chunk) {
+ return chunk->owner()->identity() == NEW_LO_SPACE ||
+ chunk->owner()->identity() == LO_SPACE ||
+ chunk->owner()->identity() == CODE_LO_SPACE;
+}
+
+bool Heap::IsInYoungGeneration(HeapObject object) {
+ if (MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace()) {
+ return !object->map_word().IsForwardingAddress();
+ }
+ return Heap::InNewSpace(object);
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -2491,18 +2513,18 @@ namespace {
class LeftTrimmerVerifierRootVisitor : public RootVisitor {
public:
- explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
+ explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
: to_check_(to_check) {}
void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) override {
- for (Object** p = start; p < end; ++p) {
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
DCHECK_NE(*p, to_check_);
}
}
private:
- FixedArrayBase* to_check_;
+ FixedArrayBase to_check_;
DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
};
@@ -2510,7 +2532,7 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
#endif // ENABLE_SLOW_DCHECKS
namespace {
-bool MayContainRecordedSlots(HeapObject* object) {
+bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
// Whitelist objects that definitely do not have pointers.
@@ -2520,30 +2542,62 @@ bool MayContainRecordedSlots(HeapObject* object) {
}
} // namespace
-FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
- int elements_to_trim) {
+void Heap::OnMoveEvent(HeapObject target, HeapObject source,
+ int size_in_bytes) {
+ HeapProfiler* heap_profiler = isolate_->heap_profiler();
+ if (heap_profiler->is_tracking_object_moves()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size_in_bytes);
+ }
+ for (auto& tracker : allocation_trackers_) {
+ tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
+ }
+ if (target->IsSharedFunctionInfo()) {
+ LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+ target->address()));
+ }
+
+ if (FLAG_verify_predictable) {
+ ++allocations_count_;
+ // Advance synthetic time by making a time request.
+ MonotonicallyIncreasingTimeInMs();
+
+ UpdateAllocationsHash(source);
+ UpdateAllocationsHash(target);
+ UpdateAllocationsHash(size_in_bytes);
+
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
+ PrintAllocationsHash();
+ }
+ } else if (FLAG_fuzzer_gc_analysis) {
+ ++allocations_count_;
+ }
+}
+
+FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
+ int elements_to_trim) {
if (elements_to_trim == 0) {
// This simplifies reasoning in the rest of the function.
return object;
}
- CHECK_NOT_NULL(object);
+ CHECK(!object.is_null());
DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type
// is added.
DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
- const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+ const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
- Map* map = object->map();
+ Map map = object->map();
- // For now this trick is only applied to objects in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- DCHECK(!lo_space()->Contains(object));
+ // For now this trick is only applied to fixed arrays which may be in new
+ // space or old space. In a large object space the object's start must
+ // coincide with chunk and thus the trick is just not applicable.
+ DCHECK(!IsLargeObject(object));
DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
const int len = object->length();
DCHECK(elements_to_trim <= len);
@@ -2560,17 +2614,17 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- HeapObject* filler =
+ HeapObject filler =
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
- RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
+ RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
Smi::FromInt(len - elements_to_trim));
- FixedArrayBase* new_object =
+ FixedArrayBase new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Remove recorded slots for the new map and length offset.
@@ -2590,9 +2644,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// we need pointer granularity writes to avoid race with the concurrent
// marking.
if (filler->Size() > FreeSpace::kSize) {
- MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
- ReadOnlyRoots(this).undefined_value(),
- (filler->Size() - FreeSpace::kSize) / kPointerSize);
+ MemsetTagged(HeapObject::RawField(filler, FreeSpace::kSize),
+ ReadOnlyRoots(this).undefined_value(),
+ (filler->Size() - FreeSpace::kSize) / kTaggedSize);
}
}
// Notify the heap profiler of change in object layout.
@@ -2603,6 +2657,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
LeftTrimmerVerifierRootVisitor root_visitor(object);
+ ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, VISIT_ALL);
}
#endif // ENABLE_SLOW_DCHECKS
@@ -2610,7 +2665,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
return new_object;
}
-void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
+void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
const int len = object->length();
DCHECK_LE(elements_to_trim, len);
DCHECK_GE(elements_to_trim, 0);
@@ -2623,7 +2678,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
DCHECK_GE(bytes_to_trim, 0);
} else if (object->IsFixedArray()) {
CHECK_NE(elements_to_trim, len);
- bytes_to_trim = elements_to_trim * kPointerSize;
+ bytes_to_trim = elements_to_trim * kTaggedSize;
} else {
DCHECK(object->IsFixedDoubleArray());
CHECK_NE(elements_to_trim, len);
@@ -2633,18 +2688,18 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
}
-void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
+void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
int elements_to_trim) {
// This function is safe to use only at the end of the mark compact
// collection: When marking, we record the weak slots, and shrinking
// invalidates them.
DCHECK_EQ(gc_state(), MARK_COMPACT);
CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
- elements_to_trim * kPointerSize);
+ elements_to_trim * kTaggedSize);
}
template <typename T>
-void Heap::CreateFillerForArray(T* object, int elements_to_trim,
+void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
object->IsWeakFixedArray());
@@ -2679,13 +2734,11 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- // We do not create a filler for objects in large object space.
- // TODO(hpayer): We should shrink the large object page if the size
- // of the object changed significantly.
- if (!lo_space()->Contains(object)) {
- HeapObject* filler =
+ // We do not create a filler for objects in a large object space.
+ if (!IsLargeObject(object)) {
+ HeapObject filler =
CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
- DCHECK_NOT_NULL(filler);
+ DCHECK(!filler.is_null());
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
@@ -2920,7 +2973,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
}
void Heap::RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject*>& large_objects,
+ Reservation* reservations, const std::vector<HeapObject>& large_objects,
const std::vector<Address>& maps) {
// TODO(ulan): pause black allocation during deserialization to avoid
// iterating all these objects in one go.
@@ -2931,12 +2984,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
- HeapObject* obj = HeapObject::FromAddress(addr);
+ HeapObject obj = HeapObject::FromAddress(addr);
// Objects can have any color because incremental marking can
// start in the middle of Heap::ReserveSpace().
if (marking_state->IsBlack(obj)) {
@@ -2946,12 +2999,9 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
}
- // We potentially deserialized wrappers which require registering with the
- // embedder as the marker will not find them.
- local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
// Large object space doesn't use reservations, so it needs custom handling.
- for (HeapObject* object : large_objects) {
+ for (HeapObject object : large_objects) {
incremental_marking()->ProcessBlackAllocatedObject(object);
}
@@ -2962,10 +3012,10 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
-void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
+void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) {
if (incremental_marking()->IsMarking()) {
- incremental_marking()->MarkBlackAndPush(object);
+ incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
@@ -2974,7 +3024,7 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- DCHECK_NULL(pending_layout_change_object_);
+ DCHECK(pending_layout_change_object_.is_null());
pending_layout_change_object_ = object;
}
#endif
@@ -2984,33 +3034,39 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
// Helper class for collecting slot addresses.
class SlotCollectingVisitor final : public ObjectVisitor {
public:
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
slots_.push_back(p);
}
}
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ UNREACHABLE();
+ }
+
int number_of_slots() { return static_cast<int>(slots_.size()); }
- MaybeObject** slot(int i) { return slots_[i]; }
+ MaybeObjectSlot slot(int i) { return slots_[i]; }
private:
- std::vector<MaybeObject**> slots_;
+ std::vector<MaybeObjectSlot> slots_;
};
-void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
// Check that Heap::NotifyObjectLayout was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
- if (pending_layout_change_object_ == nullptr) {
+ if (pending_layout_change_object_.is_null()) {
if (object->IsJSObject()) {
DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
} else {
@@ -3026,12 +3082,12 @@ void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
object->set_map_word(old_map_word);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
- DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
+ DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
}
}
} else {
DCHECK_EQ(pending_layout_change_object_, object);
- pending_layout_change_object_ = nullptr;
+ pending_layout_change_object_ = HeapObject();
}
}
#endif
@@ -3211,8 +3267,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
- int64_t potential_garbage =
- (CommittedMemory() - SizeOfObjects()) + external_memory_;
+ int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
+ isolate()->isolate_data()->external_memory_;
// If we can potentially free large amount of memory, then start GC right
// away instead of waiting for memory reducer.
if (potential_garbage >= kGarbageThresholdInBytes &&
@@ -3257,7 +3313,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
void Heap::EagerlyFreeExternalMemory() {
for (Page* page : *old_space()) {
if (!page->SweepingDone()) {
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
if (!page->SweepingDone()) {
ArrayBufferTracker::FreeDead(
page, mark_compact_collector()->non_atomic_marking_state());
@@ -3291,6 +3347,11 @@ void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
UNREACHABLE();
}
+void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
+ initial_max_old_generation_size_threshold_ =
+ initial_max_old_generation_size_ * threshold_percent;
+}
+
bool Heap::InvokeNearHeapLimitCallback() {
if (near_heap_limit_callbacks_.size() > 0) {
HandleScope scope(isolate());
@@ -3314,7 +3375,7 @@ void Heap::CollectCodeStatistics() {
// somehow ends up in those spaces, we would miss it here.
CodeStatistics::CollectCodeStatistics(code_space_, isolate());
CodeStatistics::CollectCodeStatistics(old_space_, isolate());
- CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
+ CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
}
#ifdef DEBUG
@@ -3390,28 +3451,18 @@ const char* Heap::GarbageCollectionReasonToString(
UNREACHABLE();
}
-bool Heap::Contains(HeapObject* value) {
+bool Heap::Contains(HeapObject value) {
if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value) || read_only_space_->Contains(value));
-}
-
-bool Heap::ContainsSlow(Address addr) {
- if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
- return false;
- }
- return HasBeenSetUp() &&
- (new_space_->ToSpaceContainsSlow(addr) ||
- old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
- map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
- read_only_space_->Contains(addr));
+ lo_space_->Contains(value) || read_only_space_->Contains(value) ||
+ code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
}
-bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
+bool Heap::InSpace(HeapObject value, AllocationSpace space) {
if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
@@ -3428,6 +3479,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
+ case CODE_LO_SPACE:
+ return code_lo_space_->Contains(value);
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
case RO_SPACE:
@@ -3453,6 +3506,8 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
+ case CODE_LO_SPACE:
+ return code_lo_space_->ContainsSlow(addr);
case NEW_LO_SPACE:
return new_lo_space_->ContainsSlow(addr);
case RO_SPACE:
@@ -3469,6 +3524,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case MAP_SPACE:
case LO_SPACE:
case NEW_LO_SPACE:
+ case CODE_LO_SPACE:
case RO_SPACE:
return true;
default:
@@ -3476,23 +3532,6 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
}
-bool Heap::RootIsImmortalImmovable(RootIndex root_index) {
- switch (root_index) {
-#define IMMORTAL_IMMOVABLE_ROOT(name) case RootIndex::k##name:
- IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
-#undef IMMORTAL_IMMOVABLE_ROOT
-#define INTERNALIZED_STRING(_, name, value) case RootIndex::k##name:
- INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING, /* not used */)
-#undef INTERNALIZED_STRING
-#define STRING_TYPE(NAME, size, name, Name) case RootIndex::k##Name##Map:
- STRING_TYPE_LIST(STRING_TYPE)
-#undef STRING_TYPE
- return true;
- default:
- return false;
- }
-}
-
#ifdef VERIFY_HEAP
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
public:
@@ -3500,17 +3539,17 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
: VerifyPointersVisitor(heap) {}
protected:
- void VerifyPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
- if (host != nullptr) {
+ void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ if (!host.is_null()) {
CHECK(heap_->InReadOnlySpace(host->map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObject(&object)) {
- CHECK(heap_->InReadOnlySpace(object));
+ for (MaybeObjectSlot current = start; current < end; ++current) {
+ HeapObject heap_object;
+ if ((*current)->GetHeapObject(&heap_object)) {
+ CHECK(heap_->InReadOnlySpace(heap_object));
}
}
}
@@ -3538,6 +3577,8 @@ void Heap::Verify() {
code_space_->Verify(isolate(), &no_dirty_regions_visitor);
lo_space_->Verify(isolate());
+ code_lo_space_->Verify(isolate());
+ new_lo_space_->Verify(isolate());
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
@@ -3549,30 +3590,29 @@ class SlotVerifyingVisitor : public ObjectVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: untyped_(untyped), typed_(typed) {}
- virtual bool ShouldHaveBeenRecorded(HeapObject* host,
- MaybeObject* target) = 0;
+ virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
#ifdef DEBUG
- for (Object** slot = start; slot < end; slot++) {
+ for (ObjectSlot slot = start; slot < end; ++slot) {
DCHECK(!HasWeakHeapObjectTag(*slot));
}
#endif // DEBUG
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** slot = start; slot < end; slot++) {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot slot = start; slot < end; ++slot) {
if (ShouldHaveBeenRecorded(host, *slot)) {
- CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
+ CHECK_GT(untyped_->count(slot.address()), 0);
}
}
}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
@@ -3581,8 +3621,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- Object* target = rinfo->target_object();
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
@@ -3604,7 +3644,7 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
std::set<std::pair<SlotType, Address>>* typed)
: SlotVerifyingVisitor(untyped, typed) {}
- bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
+ bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
Heap::InToSpace(target));
return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
@@ -3616,16 +3656,17 @@ template <RememberedSetType direction>
void CollectSlots(MemoryChunk* chunk, Address start, Address end,
std::set<Address>* untyped,
std::set<std::pair<SlotType, Address> >* typed) {
- RememberedSet<direction>::Iterate(chunk,
- [start, end, untyped](Address slot) {
- if (start <= slot && slot < end) {
- untyped->insert(slot);
- }
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<direction>::Iterate(
+ chunk,
+ [start, end, untyped](MaybeObjectSlot slot) {
+ if (start <= slot.address() && slot.address() < end) {
+ untyped->insert(slot.address());
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<direction>::IterateTyped(
- chunk, [start, end, typed](SlotType type, Address host, Address slot) {
+ chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
typed->insert(std::make_pair(type, slot));
}
@@ -3633,8 +3674,8 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
});
}
-void Heap::VerifyRememberedSetFor(HeapObject* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+void Heap::VerifyRememberedSetFor(HeapObject object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
@@ -3683,30 +3724,30 @@ void Heap::ZapFromSpace() {
void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
+ DCHECK(IsAligned(start_address, kIntSize));
+ for (int i = 0; i < size_in_bytes / kIntSize; i++) {
+ Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
}
#endif
}
-Code* Heap::builtin(int index) {
+// TODO(ishell): move builtin accessors out from Heap.
+Code Heap::builtin(int index) {
DCHECK(Builtins::IsBuiltinId(index));
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[index]);
+ return Code::cast(Object(isolate()->builtins_table()[index]));
}
Address Heap::builtin_address(int index) {
DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
- return reinterpret_cast<Address>(&builtins_[index]);
+ return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
}
-void Heap::set_builtin(int index, HeapObject* builtin) {
+void Heap::set_builtin(int index, Code builtin) {
DCHECK(Builtins::IsBuiltinId(index));
- DCHECK(Internals::HasHeapObjectTag(builtin));
+ DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
// The given builtin may be completely uninitialized thus we cannot check its
// type here.
- builtins_[index] = builtin;
+ isolate()->builtins_table()[index] = builtin.ptr();
}
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
@@ -3719,7 +3760,7 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointer(Root::kStringTable, nullptr,
- &roots_[RootIndex::kStringTable]);
+ FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -3734,8 +3775,9 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, nullptr, roots_.smi_roots_begin(),
- roots_.smi_roots_end());
+ v->VisitRootPointers(Root::kSmiRootList, nullptr,
+ roots_table().smi_roots_begin(),
+ roots_table().smi_roots_end());
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
@@ -3750,38 +3792,38 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ FullObjectSlot p) override {
FixHandle(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) FixHandle(p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
}
private:
- inline void FixHandle(Object** p) {
+ inline void FixHandle(FullObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
- HeapObject* current = reinterpret_cast<HeapObject*>(*p);
+ HeapObject current = HeapObject::cast(*p);
const MapWord map_word = current->map_word();
if (!map_word.IsForwardingAddress() && current->IsFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
while (current->IsFiller()) {
- Address next = reinterpret_cast<Address>(current);
+ Address next = current->ptr();
if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
- next += kPointerSize;
+ next += kTaggedSize;
} else if (current->map() ==
ReadOnlyRoots(heap_).two_pointer_filler_map()) {
- next += 2 * kPointerSize;
+ next += 2 * kTaggedSize;
} else {
next += current->Size();
}
- current = reinterpret_cast<HeapObject*>(next);
+ current = HeapObject::cast(Object(next));
}
DCHECK(current->IsFixedArrayBase());
#endif // DEBUG
- *p = nullptr;
+ p.store(Smi::kZero);
}
}
@@ -3792,13 +3834,9 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- // Garbage collection can skip over the read-only roots.
- const bool isGC = mode != VISIT_ALL && mode != VISIT_FOR_SERIALIZATION &&
- mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION;
- Object** start =
- isGC ? roots_.read_only_roots_end() : roots_.strong_roots_begin();
- v->VisitRootPointers(Root::kStrongRootList, nullptr, start,
- roots_.strong_roots_end());
+ v->VisitRootPointers(Root::kStrongRootList, nullptr,
+ roots_table().strong_roots_begin(),
+ roots_table().strong_roots_end());
v->Synchronize(VisitorSynchronization::kStrongRootList);
isolate_->bootstrapper()->Iterate(v);
@@ -3826,8 +3864,17 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
if (!isMinorGC) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
- isolate_->interpreter()->IterateDispatchTable(v);
- v->Synchronize(VisitorSynchronization::kDispatchTable);
+
+ // The dispatch table is set up directly from the builtins using
+ // IntitializeDispatchTable so there is no need to iterate to create it.
+ if (mode != VISIT_FOR_SERIALIZATION) {
+ // Currently we iterate the dispatch table to update pointers to possibly
+ // moved Code objects for bytecode handlers.
+ // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
+ // immovable) in every build configuration.
+ isolate_->interpreter()->IterateDispatchTable(v);
+ v->Synchronize(VisitorSynchronization::kDispatchTable);
+ }
}
// Iterate over global handles.
@@ -3837,19 +3884,15 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// global handles need to be added manually.
break;
case VISIT_ONLY_STRONG:
- case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
- isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
- break;
case VISIT_ALL_IN_MINOR_MC_MARK:
- // Global handles are processed manually by the minor MC.
+ isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- // Global handles are processed manually by the minor MC.
+ isolate_->global_handles()->IterateAllNewSpaceRoots(v);
break;
- case VISIT_ALL_BUT_READ_ONLY:
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
isolate_->global_handles()->IterateAllRoots(v);
@@ -3878,11 +3921,21 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
- // Iterate over the partial snapshot cache unless serializing.
+ // Iterate over pending Microtasks stored in MicrotaskQueues.
+ MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
+ if (default_microtask_queue) {
+ MicrotaskQueue* microtask_queue = default_microtask_queue;
+ do {
+ microtask_queue->IterateMicrotasks(v);
+ microtask_queue = microtask_queue->next();
+ } while (microtask_queue != default_microtask_queue);
+ }
+
+ // Iterate over the partial snapshot cache unless serializing or
+ // deserializing.
if (mode != VISIT_FOR_SERIALIZATION) {
SerializerDeserializer::Iterate(isolate_, v);
- // We don't do a v->Synchronize call here because the serializer and the
- // deserializer are deliberately out of sync here.
+ v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
}
}
@@ -3892,8 +3945,18 @@ void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
void Heap::IterateBuiltins(RootVisitor* v) {
for (int i = 0; i < Builtins::builtin_count; i++) {
- v->VisitRootPointer(Root::kBuiltins, Builtins::name(i), &builtins_[i]);
+ v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
+ FullObjectSlot(builtin_address(i)));
}
+#ifdef V8_EMBEDDED_BUILTINS
+ // The entry table does not need to be updated if all builtins are embedded.
+ STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
+#else
+ // If builtins are not embedded, they may move and thus the entry table must
+ // be updated.
+ // TODO(v8:6666): Remove once builtins are embedded unconditionally.
+ Builtins::UpdateBuiltinEntryTable(isolate());
+#endif // V8_EMBEDDED_BUILTINS
}
// TODO(1236194): Since the heap size is configurable on the command line
@@ -4031,6 +4094,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_size = map_space_->SizeOfObjects();
*stats->map_space_capacity = map_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
+ *stats->code_lo_space_size = code_lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
*stats->memory_allocator_size = memory_allocator()->Size();
*stats->memory_allocator_capacity =
@@ -4040,7 +4104,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
HeapIterator iterator(this);
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
@@ -4072,9 +4136,14 @@ size_t Heap::OldGenerationSizeOfObjects() {
}
uint64_t Heap::PromotedExternalMemorySize() {
- if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
- return static_cast<uint64_t>(external_memory_ -
- external_memory_at_last_mark_compact_);
+ IsolateData* isolate_data = isolate()->isolate_data();
+ if (isolate_data->external_memory_ <=
+ isolate_data->external_memory_at_last_mark_compact_) {
+ return 0;
+ }
+ return static_cast<uint64_t>(
+ isolate_data->external_memory_ -
+ isolate_data->external_memory_at_last_mark_compact_);
}
bool Heap::ShouldOptimizeForLoadTime() {
@@ -4155,7 +4224,8 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_marking > 0) {
double gained_since_last_gc =
PromotedSinceLastGC() +
- (external_memory_ - external_memory_at_last_mark_compact_);
+ (isolate()->isolate_data()->external_memory_ -
+ isolate()->isolate_data()->external_memory_at_last_mark_compact_);
double size_before_gc =
OldGenerationObjectsAndPromotedExternalMemorySize() -
gained_since_last_gc;
@@ -4225,18 +4295,17 @@ void Heap::DisableInlineAllocation() {
}
}
-HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
- int object_size) {
+HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
// Code objects which should stay at a fixed address are allocated either
// in the first page of code space, in large object space, or (during
// snapshot creation) the containing page is marked as immovable.
- DCHECK(heap_object);
+ DCHECK(!heap_object.is_null());
DCHECK(code_space_->Contains(heap_object));
DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object->address())) {
- MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
+ MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
@@ -4251,9 +4320,9 @@ HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
return heap_object;
}
-HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
- AllocationAlignment alignment) {
- HeapObject* result;
+HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
+ AllocationAlignment alignment) {
+ HeapObject result;
AllocationResult alloc = AllocateRaw(size, space, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4269,14 +4338,14 @@ HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
return result;
}
}
- return nullptr;
+ return HeapObject();
}
-HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
- AllocationAlignment alignment) {
+HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
+ AllocationAlignment alignment) {
AllocationResult alloc;
- HeapObject* result = AllocateRawWithLightRetry(size, space, alignment);
- if (result) return result;
+ HeapObject result = AllocateRawWithLightRetry(size, space, alignment);
+ if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
@@ -4290,14 +4359,14 @@ HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
}
// TODO(1181417): Fix this.
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
- return nullptr;
+ return HeapObject();
}
// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
// parameter and just do what's necessary.
-HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
- AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
- HeapObject* result;
+HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
+ AllocationResult alloc = code_lo_space()->AllocateRaw(size);
+ HeapObject result;
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4306,7 +4375,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
- alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ alloc = code_lo_space()->AllocateRaw(size);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4316,7 +4385,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
- alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ alloc = code_lo_space()->AllocateRaw(size);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4324,7 +4393,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
}
// TODO(1181417): Fix this.
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
- return nullptr;
+ return HeapObject();
}
void Heap::SetUp() {
@@ -4360,16 +4429,15 @@ void Heap::SetUp() {
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
- if (FLAG_concurrent_marking) {
+ if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
- this, marking_worklist->shared(), marking_worklist->bailout(),
- marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
- marking_worklist->embedder());
+ this, marking_worklist->shared(), marking_worklist->on_hold(),
+ mark_compact_collector_->weak_objects(), marking_worklist->embedder());
} else {
- concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
- nullptr, nullptr);
+ concurrent_marking_ =
+ new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
@@ -4385,6 +4453,7 @@ void Heap::SetUp() {
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
+ space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
i++) {
@@ -4404,7 +4473,6 @@ void Heap::SetUp() {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
}
- scavenge_job_ = new ScavengeJob();
local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -4419,9 +4487,12 @@ void Heap::SetUp() {
}
#endif // ENABLE_MINOR_MC
- idle_scavenge_observer_ = new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
- new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ if (FLAG_idle_time_scavenge) {
+ scavenge_job_ = new ScavengeJob();
+ idle_scavenge_observer_ = new IdleScavengeObserver(
+ *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+ new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ }
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
@@ -4438,11 +4509,10 @@ void Heap::SetUp() {
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
-
- external_reference_table_.Init(isolate_);
}
void Heap::InitializeHashSeed() {
+ DCHECK(!deserialization_complete_);
uint64_t new_hash_seed;
if (FLAG_hash_seed == 0) {
int64_t rnd = isolate()->random_number_generator()->NextInt64();
@@ -4450,7 +4520,8 @@ void Heap::InitializeHashSeed() {
} else {
new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
}
- hash_seed()->copy_in(0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
+ ReadOnlyRoots(this).hash_seed()->copy_in(
+ 0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
void Heap::SetStackLimits() {
@@ -4461,15 +4532,15 @@ void Heap::SetStackLimits() {
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
- roots_[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
- (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
- (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
+ roots_table()[RootIndex::kStackLimit] =
+ (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
+ roots_table()[RootIndex::kRealStackLimit] =
+ (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
}
void Heap::ClearStackLimits() {
- roots_[RootIndex::kStackLimit] = Smi::kZero;
- roots_[RootIndex::kRealStackLimit] = Smi::kZero;
+ roots_table()[RootIndex::kStackLimit] = kNullAddress;
+ roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
}
int Heap::NextAllocationTimeout(int current_timeout) {
@@ -4520,6 +4591,26 @@ void Heap::NotifyDeserializationComplete() {
deserialization_complete_ = true;
}
+void Heap::NotifyBootstrapComplete() {
+ // This function is invoked for each native context creation. We are
+ // interested only in the first native context.
+ if (old_generation_capacity_after_bootstrap_ == 0) {
+ old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
+ }
+}
+
+void Heap::NotifyOldGenerationExpansion() {
+ const size_t kMemoryReducerActivationThreshold = 1 * MB;
+ if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
+ OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
+ kMemoryReducerActivationThreshold) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer()->NotifyPossibleGarbage(event);
+ }
+}
+
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
@@ -4529,25 +4620,12 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
-void Heap::TracePossibleWrapper(JSObject* js_object) {
- DCHECK(js_object->IsApiWrapper());
- if (js_object->GetEmbedderFieldCount() >= 2 &&
- js_object->GetEmbedderField(0) &&
- js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
- js_object->GetEmbedderField(1) != ReadOnlyRoots(this).undefined_value()) {
- DCHECK_EQ(0,
- reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
- local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
- reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
- reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
- }
-}
-
-void Heap::RegisterExternallyReferencedObject(Object** object) {
+void Heap::RegisterExternallyReferencedObject(Address* location) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis.
- if (!(*object)->IsHeapObject()) return;
- HeapObject* heap_object = HeapObject::cast(*object);
+ Object object(*location);
+ if (!object->IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
DCHECK(Contains(heap_object));
if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
incremental_marking()->WhiteToGreyAndPush(heap_object);
@@ -4582,9 +4660,13 @@ void Heap::TearDown() {
}
}
- new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
- delete idle_scavenge_observer_;
- idle_scavenge_observer_ = nullptr;
+ if (FLAG_idle_time_scavenge) {
+ new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
+ delete idle_scavenge_observer_;
+ idle_scavenge_observer_ = nullptr;
+ delete scavenge_job_;
+ scavenge_job_ = nullptr;
+ }
if (FLAG_stress_marking > 0) {
RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
@@ -4655,11 +4737,6 @@ void Heap::TearDown() {
delete local_embedder_heap_tracer_;
local_embedder_heap_tracer_ = nullptr;
- delete scavenge_job_;
- scavenge_job_ = nullptr;
-
- isolate_->global_handles()->TearDown();
-
external_string_table_.TearDown();
// Tear down all ArrayBuffers before tearing down the heap since their
@@ -4760,7 +4837,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
// fill in the new array.
int copy_to = 0;
for (int i = 0; i < array->length(); i++) {
- MaybeObject* element = array->Get(i);
+ MaybeObject element = array->Get(i);
if (element->IsCleared()) continue;
new_array->Set(copy_to++, element);
}
@@ -4775,9 +4852,9 @@ void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
HeapIterator iterator(this);
- for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
+ for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
if (o->IsPrototypeInfo()) {
- PrototypeInfo* prototype_info = PrototypeInfo::cast(o);
+ PrototypeInfo prototype_info = PrototypeInfo::cast(o);
if (prototype_info->prototype_users()->IsWeakArrayList()) {
prototype_infos.emplace_back(handle(prototype_info, isolate()));
}
@@ -4790,7 +4867,7 @@ void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
DCHECK_IMPLIES(pretenure == TENURED,
InOldSpace(*array) ||
*array == ReadOnlyRoots(this).empty_weak_array_list());
- WeakArrayList* new_array = PrototypeUsers::Compact(
+ WeakArrayList new_array = PrototypeUsers::Compact(
array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
prototype_info->set_prototype_users(new_array);
}
@@ -4827,21 +4904,21 @@ void Heap::AddRetainedMap(Handle<Map> map) {
map->set_is_in_retained_map_list(true);
}
-void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
+void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
DCHECK_EQ(retained_maps, this->retained_maps());
int length = retained_maps->length();
int new_length = 0;
int new_number_of_disposed_maps = 0;
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
- MaybeObject* maybe_object = retained_maps->Get(i);
+ MaybeObject maybe_object = retained_maps->Get(i);
if (maybe_object->IsCleared()) {
continue;
}
DCHECK(maybe_object->IsWeak());
- MaybeObject* age = retained_maps->Get(i + 1);
+ MaybeObject age = retained_maps->Get(i + 1);
DCHECK(age->IsSmi());
if (i != new_length) {
retained_maps->Set(new_length, maybe_object);
@@ -4853,7 +4930,7 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
new_length += 2;
}
number_of_disposed_maps_ = new_number_of_disposed_maps;
- HeapObject* undefined = ReadOnlyRoots(this).undefined_value();
+ HeapObject undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
retained_maps->Set(i, HeapObjectReference::Strong(undefined));
}
@@ -4868,11 +4945,11 @@ void Heap::FatalProcessOutOfMemory(const char* location) {
class PrintHandleVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
- reinterpret_cast<void*>(*p));
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p)
+ PrintF(" handle %p to %p\n", p.ToVoidPtr(),
+ reinterpret_cast<void*>((*p).ptr()));
}
};
@@ -4891,8 +4968,8 @@ class CheckHandleCountVisitor : public RootVisitor {
~CheckHandleCountVisitor() override {
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
handle_count_ += end - start;
}
@@ -4920,25 +4997,23 @@ Address Heap::store_buffer_overflow_function_address() {
return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
}
-void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
- Address slot_addr = reinterpret_cast<Address>(slot);
- Page* page = Page::FromAddress(slot_addr);
+void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
+ Page* page = Page::FromAddress(slot.address());
if (!page->InNewSpace()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- store_buffer()->DeleteEntry(slot_addr);
+ store_buffer()->DeleteEntry(slot.address());
}
}
#ifdef DEBUG
-void Heap::VerifyClearedSlot(HeapObject* object, Object** slot) {
+void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
if (InNewSpace(object)) return;
- Address slot_addr = reinterpret_cast<Address>(slot);
- Page* page = Page::FromAddress(slot_addr);
+ Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
- CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr));
+ CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
- CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr),
+ CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots(object));
}
#endif
@@ -4987,7 +5062,7 @@ Space* SpaceIterator::next() {
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;
- virtual bool SkipObject(HeapObject* object) = 0;
+ virtual bool SkipObject(HeapObject object) = 0;
};
@@ -5004,18 +5079,18 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
}
- bool SkipObject(HeapObject* object) override {
+ bool SkipObject(HeapObject object) override {
if (object->IsFiller()) return true;
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
- bool MarkAsReachable(HeapObject* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ bool MarkAsReachable(HeapObject object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) {
- reachable_[chunk] = new std::unordered_set<HeapObject*>();
+ reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
if (reachable_[chunk]->count(object)) return false;
reachable_[chunk]->insert(object);
@@ -5027,64 +5102,82 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter) {}
- void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- MarkPointers(reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
MarkPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- MarkPointers(reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ MarkHeapObject(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ MarkHeapObject(rinfo->target_object());
+ }
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ MarkPointersImpl(start, end);
}
void TransitiveClosure() {
while (!marking_stack_.empty()) {
- HeapObject* obj = marking_stack_.back();
+ HeapObject obj = marking_stack_.back();
marking_stack_.pop_back();
obj->Iterate(this);
}
}
private:
- void MarkPointers(MaybeObject** start, MaybeObject** end) {
+ void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
+ MarkPointersImpl(start, end);
+ }
+
+ template <typename TSlot>
+ V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
// Treat weak references as strong.
- for (MaybeObject** p = start; p < end; p++) {
- HeapObject* heap_object;
- if ((*p)->GetHeapObject(&heap_object)) {
- if (filter_->MarkAsReachable(heap_object)) {
- marking_stack_.push_back(heap_object);
- }
+ for (TSlot p = start; p < end; ++p) {
+ typename TSlot::TObject object = *p;
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ MarkHeapObject(heap_object);
}
}
}
+
+ V8_INLINE void MarkHeapObject(HeapObject heap_object) {
+ if (filter_->MarkAsReachable(heap_object)) {
+ marking_stack_.push_back(heap_object);
+ }
+ }
+
UnreachableObjectsFilter* filter_;
- std::vector<HeapObject*> marking_stack_;
+ std::vector<HeapObject> marking_stack_;
};
friend class MarkingVisitor;
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL_BUT_READ_ONLY);
+ heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
Heap* heap_;
DisallowHeapAllocation no_allocation_;
- std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
+ std::unordered_map<MemoryChunk*,
+ std::unordered_set<HeapObject, Object::Hasher>*>
+ reachable_;
};
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
- : no_heap_allocation_(),
- heap_(heap),
+ : heap_(heap),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
@@ -5117,38 +5210,37 @@ HeapIterator::~HeapIterator() {
delete filter_;
}
-
-HeapObject* HeapIterator::next() {
+HeapObject HeapIterator::next() {
if (filter_ == nullptr) return NextObject();
- HeapObject* obj = NextObject();
- while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
+ HeapObject obj = NextObject();
+ while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
return obj;
}
-
-HeapObject* HeapIterator::NextObject() {
+HeapObject HeapIterator::NextObject() {
// No iterator means we are done.
- if (object_iterator_.get() == nullptr) return nullptr;
+ if (object_iterator_.get() == nullptr) return HeapObject();
- if (HeapObject* obj = object_iterator_.get()->Next()) {
+ HeapObject obj = object_iterator_.get()->Next();
+ if (!obj.is_null()) {
// If the current iterator has more objects we are fine.
return obj;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
object_iterator_ = space_iterator_->next()->GetObjectIterator();
- if (HeapObject* obj = object_iterator_.get()->Next()) {
+ obj = object_iterator_.get()->Next();
+ if (!obj.is_null()) {
return obj;
}
}
}
// Done with the last space.
object_iterator_.reset(nullptr);
- return nullptr;
+ return HeapObject();
}
-
void Heap::UpdateTotalGCTime(double duration) {
if (FLAG_trace_gc_verbose) {
total_gc_time_ms_ += duration;
@@ -5159,7 +5251,7 @@ void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* o = new_space_strings_[i];
+ Object o = new_space_strings_[i];
if (o->IsTheHole(isolate)) {
continue;
}
@@ -5181,7 +5273,7 @@ void Heap::ExternalStringTable::CleanUpAll() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* o = old_space_strings_[i];
+ Object o = old_space_strings_[i];
if (o->IsTheHole(isolate)) {
continue;
}
@@ -5202,14 +5294,14 @@ void Heap::ExternalStringTable::CleanUpAll() {
void Heap::ExternalStringTable::TearDown() {
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* o = new_space_strings_[i];
+ Object o = new_space_strings_[i];
// Dont finalize thin strings.
if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
new_space_strings_.clear();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* o = old_space_strings_[i];
+ Object o = old_space_strings_[i];
// Dont finalize thin strings.
if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
@@ -5230,7 +5322,7 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
-void Heap::RegisterStrongRoots(Object** start, Object** end) {
+void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
StrongRootsList* list = new StrongRootsList();
list->next = strong_roots_list_;
list->start = start;
@@ -5238,8 +5330,7 @@ void Heap::RegisterStrongRoots(Object** start, Object** end) {
strong_roots_list_ = list;
}
-
-void Heap::UnregisterStrongRoots(Object** start) {
+void Heap::UnregisterStrongRoots(FullObjectSlot start) {
StrongRootsList* prev = nullptr;
StrongRootsList* list = strong_roots_list_;
while (list != nullptr) {
@@ -5258,10 +5349,52 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
-void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
+void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
set_builtins_constants_table(cache);
}
+void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
+ DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
+ set_interpreter_entry_trampoline_for_profiling(code);
+}
+
+void Heap::AddDirtyJSWeakFactory(
+ JSWeakFactory weak_factory,
+ std::function<void(HeapObject object, ObjectSlot slot, Object target)>
+ gc_notify_updated_slot) {
+ DCHECK(dirty_js_weak_factories()->IsUndefined(isolate()) ||
+ dirty_js_weak_factories()->IsJSWeakFactory());
+ DCHECK(weak_factory->next()->IsUndefined(isolate()));
+ DCHECK(!weak_factory->scheduled_for_cleanup());
+ weak_factory->set_scheduled_for_cleanup(true);
+ weak_factory->set_next(dirty_js_weak_factories());
+ gc_notify_updated_slot(weak_factory,
+ weak_factory.RawField(JSWeakFactory::kNextOffset),
+ dirty_js_weak_factories());
+ set_dirty_js_weak_factories(weak_factory);
+ // Roots are rescanned after objects are moved, so no need to record a slot
+ // for the root pointing to the first JSWeakFactory.
+}
+
+void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
+ DCHECK(FLAG_harmony_weak_refs);
+ DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
+ weak_refs_keep_during_job()->IsOrderedHashSet());
+ Handle<OrderedHashSet> table;
+ if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
+ table = isolate()->factory()->NewOrderedHashSet();
+ } else {
+ table =
+ handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
+ }
+ table = OrderedHashSet::Add(isolate(), table, target);
+ set_weak_refs_keep_during_job(*table);
+}
+
+void Heap::ClearKeepDuringJobSet() {
+ set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
+}
+
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
}
@@ -5307,10 +5440,10 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
size_t Heap::NumberOfNativeContexts() {
int result = 0;
- Object* context = native_contexts_list();
+ Object context = native_contexts_list();
while (!context->IsUndefined(isolate())) {
++result;
- Context* native_context = Context::cast(context);
+ Context native_context = Context::cast(context);
context = native_context->next_context_link();
}
return result;
@@ -5343,46 +5476,66 @@ const char* AllocationSpaceName(AllocationSpace space) {
return nullptr;
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) {
+void VerifyPointersVisitor::VisitPointers(HeapObject host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
VerifyPointers(host, start, end);
}
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
- Object** start, Object** end) {
- VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
-}
-
-void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
- MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObject(&object)) {
- CHECK(heap_->Contains(object));
- CHECK(object->map()->IsMap());
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK(heap_->Contains(heap_object));
+ CHECK(heap_object->map()->IsMap());
+}
+
+template <typename TSlot>
+void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
} else {
- CHECK((*current)->IsSmi() || (*current)->IsCleared());
+ CHECK(object->IsSmi() || object->IsCleared());
}
}
}
+void VerifyPointersVisitor::VerifyPointers(HeapObject host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+}
+
+void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+ VerifyHeapObjectImpl(rinfo->target_object());
+}
+
void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ for (FullObjectSlot current = start; current < end; ++current) {
CHECK((*current)->IsSmi());
}
}
-bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
+bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
//
// 1) Objects in new-space can be migrated to the old space
@@ -5397,7 +5550,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
// asserts here, but check everything explicitly.
if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = obj->map()->instance_type();
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
@@ -5408,6 +5561,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
+ case CODE_LO_SPACE:
case NEW_LO_SPACE:
case RO_SPACE:
return false;
@@ -5439,27 +5593,27 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
namespace {
-Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
: map_word.ToMap();
}
-int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
}
-Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
- Code* code = reinterpret_cast<Code*>(object);
- DCHECK_NOT_NULL(code);
+Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
+ Code code = Code::unchecked_cast(object);
+ DCHECK(!code.is_null());
DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
return code;
}
} // namespace
-bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
- Map* map = GcSafeMapOfCodeSpaceObject(code);
+bool Heap::GcSafeCodeContains(Code code, Address addr) {
+ Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
Address start = code->address();
@@ -5467,12 +5621,12 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
return start <= addr && addr < end;
}
-Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
- Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (code != nullptr) return code;
+Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+ Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (!code.is_null()) return code;
// Check if the inner pointer points into a large object chunk.
- LargePage* large_page = lo_space()->FindPage(inner_pointer);
+ LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
}
@@ -5495,16 +5649,17 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
continue;
}
- HeapObject* obj = HeapObject::FromAddress(addr);
+ HeapObject obj = HeapObject::FromAddress(addr);
int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer)
+ if (next_addr > inner_pointer) {
return GcSafeCastToCode(this, obj, inner_pointer);
+ }
addr = next_addr;
}
}
-void Heap::WriteBarrierForCodeSlow(Code* code) {
+void Heap::WriteBarrierForCodeSlow(Code code) {
for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
!it.done(); it.next()) {
GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
@@ -5512,25 +5667,25 @@ void Heap::WriteBarrierForCodeSlow(Code* code) {
}
}
-void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
- HeapObject* value) {
+void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
+ HeapObject value) {
Heap* heap = Heap::FromWritableHeapObject(object);
heap->store_buffer()->InsertEntry(slot);
}
-void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray* array,
+void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
for (int i = 0; i < length; i++) {
if (!InNewSpace(array->get(offset + i))) continue;
heap->store_buffer()->InsertEntry(
- reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+ array->RawFieldOfElementAt(offset + i).address());
}
}
-void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
- HeapObject* object) {
+void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
+ HeapObject object) {
DCHECK(InNewSpace(object));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ Page* source_page = Page::FromHeapObject(host);
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
@@ -5543,32 +5698,52 @@ void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_NEW>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
+ uintptr_t offset = addr - source_page->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
+ static_cast<uint32_t>(offset));
}
-void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
- HeapObject* value) {
+void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
+ HeapObject value) {
Heap* heap = Heap::FromWritableHeapObject(object);
- heap->incremental_marking()->RecordWriteSlow(
- object, reinterpret_cast<HeapObjectReference**>(slot), value);
+ heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
+ value);
}
-void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
- if (FLAG_concurrent_marking ||
- heap->incremental_marking()->marking_state()->IsBlack(object)) {
- heap->incremental_marking()->RevisitObject(object);
+void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
+ IncrementalMarking::MarkingState* marking_state =
+ heap->incremental_marking()->marking_state();
+ if (!marking_state->IsBlack(object)) {
+ marking_state->WhiteToGrey(object);
+ marking_state->GreyToBlack(object);
}
+ heap->incremental_marking()->RevisitObject(object);
}
-void Heap::MarkingBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
- HeapObject* object) {
+void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
+ HeapObject object) {
Heap* heap = Heap::FromWritableHeapObject(host);
DCHECK(heap->incremental_marking()->IsMarking());
heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
}
-bool Heap::PageFlagsAreConsistent(HeapObject* object) {
+void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
+ HeapObject raw_descriptor_array,
+ int number_of_own_descriptors) {
+ DCHECK(heap->incremental_marking()->IsMarking());
+ DescriptorArray descriptor_array =
+ DescriptorArray::cast(raw_descriptor_array);
+ int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
+ if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
+ raw_marked) <
+ number_of_own_descriptors) {
+ heap->incremental_marking()->VisitDescriptors(host, descriptor_array,
+ number_of_own_descriptors);
+ }
+}
+
+bool Heap::PageFlagsAreConsistent(HeapObject object) {
Heap* heap = Heap::FromWritableHeapObject(object);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk =
@@ -5597,6 +5772,9 @@ static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
static_assert(MemoryChunk::kFlagsOffset ==
heap_internals::MemoryChunk::kFlagsOffset,
"Flag offset inconsistent");
+static_assert(MemoryChunk::kHeapOffset ==
+ heap_internals::MemoryChunk::kHeapOffset,
+ "Heap offset inconsistent");
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
@@ -5604,5 +5782,12 @@ void Heap::SetEmbedderStackStateForNextFinalizaton(
stack_state);
}
+#ifdef DEBUG
+void Heap::IncrementObjectCounters() {
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
+}
+#endif // DEBUG
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index c99f0d424e..d75d450c23 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -19,11 +19,13 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
-#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/objects.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/smi.h"
#include "src/objects/string-table.h"
#include "src/roots.h"
#include "src/visitors.h"
@@ -41,7 +43,6 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
-class AllocationMemento;
class ObjectBoilerplateDescription;
class BytecodeArray;
class CodeDataContainer;
@@ -52,130 +53,10 @@ class JSArrayBuffer;
class ExternalString;
using v8::MemoryPressureLevel;
-// Adapts PRIVATE_SYMBOL_LIST_GERNATOR entry to IMMORTAL_IMMOVABLE_ROOT_LIST
-// entry
-#define PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER(V, name) V(name)
-
-// Heap roots that are known to be immortal immovable, for which we can safely
-// skip write barriers. This list is not complete and has omissions.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
- V(ArgumentsMarker) \
- V(ArgumentsMarkerMap) \
- V(ArrayBufferNeuteringProtector) \
- V(ArrayIteratorProtector) \
- V(AwaitContextMap) \
- V(BigIntMap) \
- V(BlockContextMap) \
- V(ObjectBoilerplateDescriptionMap) \
- V(BooleanMap) \
- V(ByteArrayMap) \
- V(BytecodeArrayMap) \
- V(CatchContextMap) \
- V(CellMap) \
- V(CodeMap) \
- V(DebugEvaluateContextMap) \
- V(DescriptorArrayMap) \
- V(EphemeronHashTableMap) \
- V(EmptyByteArray) \
- V(EmptyDescriptorArray) \
- V(EmptyFixedArray) \
- V(EmptyFixedFloat32Array) \
- V(EmptyFixedFloat64Array) \
- V(EmptyFixedInt16Array) \
- V(EmptyFixedInt32Array) \
- V(EmptyFixedInt8Array) \
- V(EmptyFixedUint16Array) \
- V(EmptyFixedUint32Array) \
- V(EmptyFixedUint8Array) \
- V(EmptyFixedUint8ClampedArray) \
- V(EmptyOrderedHashMap) \
- V(EmptyOrderedHashSet) \
- V(EmptyPropertyCell) \
- V(EmptyScopeInfo) \
- V(EmptyScript) \
- V(EmptySloppyArgumentsElements) \
- V(EmptySlowElementDictionary) \
- V(EvalContextMap) \
- V(Exception) \
- V(FalseValue) \
- V(FixedArrayMap) \
- V(FixedCOWArrayMap) \
- V(FixedDoubleArrayMap) \
- V(ForeignMap) \
- V(FreeSpaceMap) \
- V(FunctionContextMap) \
- V(GlobalDictionaryMap) \
- V(GlobalPropertyCellMap) \
- V(HashTableMap) \
- V(HeapNumberMap) \
- V(HoleNanValue) \
- V(InfinityValue) \
- V(IsConcatSpreadableProtector) \
- V(JSMessageObjectMap) \
- V(JsConstructEntryCode) \
- V(JsEntryCode) \
- V(ManyClosuresCell) \
- V(ManyClosuresCellMap) \
- V(MetaMap) \
- V(MinusInfinityValue) \
- V(MinusZeroValue) \
- V(ModuleContextMap) \
- V(ModuleInfoMap) \
- V(MutableHeapNumberMap) \
- V(NameDictionaryMap) \
- V(NanValue) \
- V(NativeContextMap) \
- V(NoClosuresCellMap) \
- V(NoElementsProtector) \
- V(NullMap) \
- V(NullValue) \
- V(NumberDictionaryMap) \
- V(OneClosureCellMap) \
- V(OnePointerFillerMap) \
- V(OptimizedOut) \
- V(OrderedHashMapMap) \
- V(OrderedHashSetMap) \
- V(PreParsedScopeDataMap) \
- V(PropertyArrayMap) \
- V(ScopeInfoMap) \
- V(ScriptContextMap) \
- V(ScriptContextTableMap) \
- V(SelfReferenceMarker) \
- V(SharedFunctionInfoMap) \
- V(SimpleNumberDictionaryMap) \
- V(SloppyArgumentsElementsMap) \
- V(SmallOrderedHashMapMap) \
- V(SmallOrderedHashSetMap) \
- V(ArraySpeciesProtector) \
- V(TypedArraySpeciesProtector) \
- V(PromiseSpeciesProtector) \
- V(StaleRegister) \
- V(StringIteratorProtector) \
- V(StringLengthProtector) \
- V(StringTableMap) \
- V(SymbolMap) \
- V(TerminationException) \
- V(TheHoleMap) \
- V(TheHoleValue) \
- V(TransitionArrayMap) \
- V(TrueValue) \
- V(TwoPointerFillerMap) \
- V(UndefinedMap) \
- V(UndefinedValue) \
- V(UninitializedMap) \
- V(UninitializedValue) \
- V(UncompiledDataWithoutPreParsedScopeMap) \
- V(UncompiledDataWithPreParsedScopeMap) \
- V(WeakFixedArrayMap) \
- V(WeakArrayListMap) \
- V(WithContextMap) \
- V(empty_string) \
- PRIVATE_SYMBOL_LIST_GENERATOR( \
- PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER, V)
-
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferTracker;
+class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeAction;
class GCIdleTimeHandler;
@@ -187,6 +68,7 @@ class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
+class JSWeakFactory;
class LocalEmbedderHeapTracer;
class MemoryAllocator;
class MemoryReducer;
@@ -202,11 +84,10 @@ class ScavengerCollector;
class Space;
class StoreBuffer;
class StressScavengeObserver;
+class TimedHistogram;
class TracePossibleWrapperReporter;
class WeakObjectRetainer;
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -270,8 +151,8 @@ class AllocationResult {
return AllocationResult(space);
}
- // Implicit constructor from Object*.
- AllocationResult(Object* object) // NOLINT
+ // Implicit constructor from Object.
+ AllocationResult(Object object) // NOLINT
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
@@ -281,11 +162,11 @@ class AllocationResult {
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
inline bool IsRetry() { return object_->IsSmi(); }
- inline HeapObject* ToObjectChecked();
+ inline HeapObject ToObjectChecked();
inline AllocationSpace RetrySpace();
template <typename T>
- bool To(T** obj) {
+ bool To(T* obj) {
if (IsRetry()) return false;
*obj = T::cast(object_);
return true;
@@ -295,10 +176,10 @@ class AllocationResult {
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
- Object* object_;
+ Object object_;
};
-STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
+STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
#ifdef DEBUG
struct CommentStatistic {
@@ -327,7 +208,8 @@ class Heap {
TEAR_DOWN
};
- using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
+ using PretenuringFeedbackMap =
+ std::unordered_map<AllocationSite, size_t, Object::Hasher>;
// Taking this mutex prevents the GC from entering a phase that relocates
// object references.
@@ -349,7 +231,8 @@ class Heap {
// should instead adapt it's heap size based on available physical memory.
static const int kPointerMultiplier = 1;
#else
- static const int kPointerMultiplier = i::kPointerSize / 4;
+ // TODO(ishell): kSystePointerMultiplier?
+ static const int kPointerMultiplier = i::kSystemPointerSize / 4;
#endif
// Semi-space size needs to be a multiple of page size.
@@ -364,11 +247,8 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
- // The roots that have an index less than this are always in old space.
- static const int kOldSpaceRoots = 0x20;
-
// The minimum size of a HeapObject on the heap.
- static const int kMinObjectSizeInWords = 2;
+ static const int kMinObjectSizeInTaggedWords = 2;
static const int kMinPromotedPercentForFastPromotionMode = 90;
@@ -394,15 +274,9 @@ class Heap {
void FatalProcessOutOfMemory(const char* location);
- V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(RootIndex root_index);
-
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootIndex root_index);
-
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -448,39 +322,43 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code* host);
- V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
+ V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
- HeapObject* value);
+ HeapObject value);
V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
- Heap* heap, FixedArray* array, int offset, int length);
+ Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
- Code* host, RelocInfo* rinfo, HeapObject* value);
- V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
+ Code host, RelocInfo* rinfo, HeapObject value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
Address slot,
- HeapObject* value);
+ HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
- Heap* heap, HeapObject* object);
- V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code* host,
+ Heap* heap, HeapObject object);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
- HeapObject* value);
- V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
+ HeapObject value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
+ Heap* heap, HeapObject host, HeapObject descriptor_array,
+ int number_of_own_descriptors);
+ V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
+ void NotifyBootstrapComplete();
+
+ void NotifyOldGenerationExpansion();
+
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
- // FreeSpace objects have a null map after deserialization. Update the map.
- void RepairFreeListsAfterDeserialization();
-
// Move len elements within a given array from src_index index to dst_index
// index.
- void MoveElements(FixedArray* array, int dst_index, int src_index, int len,
+ void MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Initialize a filler object to keep the ability to iterate over the heap
@@ -489,56 +367,63 @@ class Heap {
// pass ClearRecordedSlots::kNo. If the memory after the object header of
// the filler should be cleared, pass in kClearFreedMemory. The default is
// kDontClearFreedMemory.
- V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(
+ V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode,
ClearFreedMemoryMode clear_memory_mode =
ClearFreedMemoryMode::kDontClearFreedMemory);
template <typename T>
- void CreateFillerForArray(T* object, int elements_to_trim, int bytes_to_trim);
+ void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
- bool CanMoveObjectStart(HeapObject* object);
+ bool CanMoveObjectStart(HeapObject object);
- static bool IsImmovable(HeapObject* object);
+ bool IsImmovable(HeapObject object);
+
+ bool IsLargeObject(HeapObject object);
+ bool IsLargeMemoryChunk(MemoryChunk* chunk);
+
+ bool IsInYoungGeneration(HeapObject object);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
- FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+ FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
// Trim the given array from the right.
- void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
- void RightTrimWeakFixedArray(WeakFixedArray* obj, int elements_to_trim);
+ void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
+ void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
- inline Oddball* ToBoolean(bool condition);
+ inline Oddball ToBoolean(bool condition);
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
- void set_native_contexts_list(Object* object) {
+ void set_native_contexts_list(Object object) {
native_contexts_list_ = object;
}
- Object* native_contexts_list() const { return native_contexts_list_; }
+ Object native_contexts_list() const { return native_contexts_list_; }
- void set_allocation_sites_list(Object* object) {
+ void set_allocation_sites_list(Object object) {
allocation_sites_list_ = object;
}
- Object* allocation_sites_list() { return allocation_sites_list_; }
+ Object allocation_sites_list() { return allocation_sites_list_; }
// Used in CreateAllocationSiteStub and the (de)serializer.
- Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+ Address allocation_sites_list_address() {
+ return reinterpret_cast<Address>(&allocation_sites_list_);
+ }
// Traverse all the allocaions_sites [nested_site and weak_next] in the list
// and foreach call the visitor
void ForeachAllocationSite(
- Object* list, const std::function<void(AllocationSite*)>& visitor);
+ Object list, const std::function<void(AllocationSite)>& visitor);
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
- bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+ bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest);
void CheckHandleCount();
@@ -563,7 +448,7 @@ class Heap {
}
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
- void UnprotectAndRegisterMemoryChunk(HeapObject* object);
+ void UnprotectAndRegisterMemoryChunk(HeapObject object);
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
@@ -586,9 +471,9 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
// If an object has an AllocationMemento trailing it, return it, otherwise
- // return nullptr;
+ // return a null AllocationMemento.
template <FindMementoMode mode>
- inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
+ inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
@@ -610,6 +495,7 @@ class Heap {
void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
size_t heap_limit);
+ void AutomaticallyRestoreInitialHeapLimit(double threshold_percent);
double MonotonicallyIncreasingTimeInMs();
@@ -632,25 +518,18 @@ class Heap {
inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
- void SetSerializedObjects(FixedArray* objects);
- void SetSerializedGlobalProxySizes(FixedArray* sizes);
+ void SetSerializedObjects(FixedArray objects);
+ void SetSerializedGlobalProxySizes(FixedArray sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
- int64_t external_memory() { return external_memory_; }
- void update_external_memory(int64_t delta) { external_memory_ += delta; }
-
- void update_external_memory_concurrently_freed(intptr_t freed) {
- external_memory_concurrently_freed_ += freed;
- }
-
- void account_external_memory_concurrently_freed() {
- external_memory_ -= external_memory_concurrently_freed_;
- external_memory_concurrently_freed_ = 0;
- }
+ V8_INLINE int64_t external_memory();
+ V8_INLINE void update_external_memory(int64_t delta);
+ V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
+ V8_INLINE void account_external_memory_concurrently_freed();
size_t backing_store_bytes() const { return backing_store_bytes_; }
@@ -662,11 +541,10 @@ class Heap {
// by runtime. Allocations of target space for object evacuation do not
// trigger the event. In order to track ALL allocations one must turn off
// FLAG_inline_new.
- inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+ inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
// This event is triggered after object is moved to a new place.
- inline void OnMoveEvent(HeapObject* target, HeapObject* source,
- int size_in_bytes);
+ void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
@@ -737,6 +615,7 @@ class Heap {
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
@@ -771,79 +650,22 @@ class Heap {
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
- friend class ReadOnlyRoots;
- public:
- RootsTable& roots_table() { return roots_; }
+ // Shortcut to the roots table stored in the Isolate.
+ V8_INLINE RootsTable& roots_table();
// Heap root getters.
-#define ROOT_ACCESSOR(type, name, CamelName) inline type* name();
+#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- Object* root(RootIndex index) { return roots_[index]; }
- Handle<Object> root_handle(RootIndex index) {
- return Handle<Object>(&roots_[index]);
- }
-
- bool IsRootHandleLocation(Object** handle_location, RootIndex* index) const {
- return roots_.IsRootHandleLocation(handle_location, index);
- }
-
- template <typename T>
- bool IsRootHandle(Handle<T> handle, RootIndex* index) const {
- return roots_.IsRootHandle(handle, index);
- }
-
- // Generated code can embed this address to get access to the roots.
- Object** roots_array_start() { return roots_.roots_; }
+ V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
+ V8_INLINE void SetRootScriptList(Object value);
+ V8_INLINE void SetRootStringTable(StringTable value);
+ V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
+ V8_INLINE void SetMessageListeners(TemplateList value);
- ExternalReferenceTable* external_reference_table() {
- DCHECK(external_reference_table_.is_initialized());
- return &external_reference_table_;
- }
-
- static constexpr int roots_to_external_reference_table_offset() {
- return kRootsExternalReferenceTableOffset;
- }
-
- static constexpr int roots_to_builtins_offset() {
- return kRootsBuiltinsOffset;
- }
-
- static constexpr int root_register_addressable_end_offset() {
- return kRootRegisterAddressableEndOffset;
- }
-
- Address root_register_addressable_end() {
- return reinterpret_cast<Address>(roots_array_start()) +
- kRootRegisterAddressableEndOffset;
- }
-
- // Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(SimpleNumberDictionary* value);
-
- void SetRootMaterializedObjects(FixedArray* objects) {
- roots_[RootIndex::kMaterializedObjects] = objects;
- }
-
- void SetRootScriptList(Object* value) {
- roots_[RootIndex::kScriptList] = value;
- }
-
- void SetRootStringTable(StringTable* value) {
- roots_[RootIndex::kStringTable] = value;
- }
-
- void SetRootNoScriptSharedFunctionInfos(Object* value) {
- roots_[RootIndex::kNoScriptSharedFunctionInfos] = value;
- }
-
- void SetMessageListeners(TemplateList* value) {
- roots_[RootIndex::kMessageListeners] = value;
- }
-
- // Set the stack limit in the roots_ array. Some architectures generate
+ // Set the stack limit in the roots table. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
@@ -852,13 +674,27 @@ class Heap {
// snapshot blob, we need to reset it before serializing.
void ClearStackLimits();
- // Generated code can treat direct references to this root as constant.
- bool RootCanBeTreatedAsConstant(RootIndex root_index);
+ void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
+ void UnregisterStrongRoots(FullObjectSlot start);
+
+ void SetBuiltinsConstantsTable(FixedArray cache);
- void RegisterStrongRoots(Object** start, Object** end);
- void UnregisterStrongRoots(Object** start);
+ // A full copy of the interpreter entry trampoline, used as a template to
+ // create copies of the builtin at runtime. The copies are used to create
+ // better profiling information for ticks in bytecode execution. Note that
+ // this is always a copy of the full builtin, i.e. not the off-heap
+ // trampoline.
+ // See also: FLAG_interpreted_frames_native_stack.
+ void SetInterpreterEntryTrampolineForProfiling(Code code);
- void SetBuiltinsConstantsTable(FixedArray* cache);
+ // Add weak_factory into the dirty_js_weak_factories list.
+ void AddDirtyJSWeakFactory(
+ JSWeakFactory weak_factory,
+ std::function<void(HeapObject object, ObjectSlot slot, Object target)>
+ gc_notify_updated_slot);
+
+ void AddKeepDuringJobTarget(Handle<JSReceiver> target);
+ void ClearKeepDuringJobSet();
// ===========================================================================
// Inline allocation. ========================================================
@@ -916,15 +752,22 @@ class Heap {
// Builtins. =================================================================
// ===========================================================================
- Code* builtin(int index);
+ Code builtin(int index);
Address builtin_address(int index);
- void set_builtin(int index, HeapObject* builtin);
+ void set_builtin(int index, Code builtin);
// ===========================================================================
// Iterators. ================================================================
// ===========================================================================
+ // None of these methods iterate over the read-only roots. To do this use
+ // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
+ // garbage collection and is usually only performed as part of
+ // (de)serialization or heap verification.
+
+ // Iterates over the strong roots and the weak roots.
void IterateRoots(RootVisitor* v, VisitMode mode);
+ // Iterates over the strong roots.
void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
@@ -951,11 +794,11 @@ class Heap {
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
- void ClearRecordedSlot(HeapObject* object, Object** slot);
+ void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
#ifdef DEBUG
- void VerifyClearedSlot(HeapObject* object, Object** slot);
+ void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
#endif
// ===========================================================================
@@ -988,7 +831,7 @@ class Heap {
void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason);
void RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject*>& large_objects,
+ Reservation* reservations, const std::vector<HeapObject>& large_objects,
const std::vector<Address>& maps);
IncrementalMarking* incremental_marking() { return incremental_marking_; }
@@ -1002,14 +845,14 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change.
- void NotifyObjectLayoutChange(HeapObject* object, int old_size,
+ void NotifyObjectLayoutChange(HeapObject object, int old_size,
const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP
// This function checks that either
// - the map transition is safe,
// - or it was communicated to GC using NotifyObjectLayoutChange.
- void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
+ void VerifyObjectLayoutChange(HeapObject object, Map new_map);
#endif
// ===========================================================================
@@ -1022,13 +865,9 @@ class Heap {
void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
void SetInterpreterEntryReturnPCOffset(int pc_offset);
- // Invalidates references in the given {code} object that are directly
- // embedded within the instruction stream. Mutates write-protected code.
- void InvalidateCodeEmbeddedObjects(Code* code);
-
// Invalidates references in the given {code} object that are referenced
// transitively from the deoptimization data. Mutates write-protected code.
- void InvalidateCodeDeoptimizationData(Code* code);
+ void InvalidateCodeDeoptimizationData(Code code);
void DeoptMarkedAllocationSites();
@@ -1045,8 +884,7 @@ class Heap {
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
- void TracePossibleWrapper(JSObject* js_object);
- void RegisterExternallyReferencedObject(Object** object);
+ void RegisterExternallyReferencedObject(Address* location);
void SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state);
@@ -1055,59 +893,54 @@ class Heap {
// ===========================================================================
// Registers an external string.
- inline void RegisterExternalString(String* string);
+ inline void RegisterExternalString(String string);
// Called when a string's resource is changed. The size of the payload is sent
// as argument of the method.
- inline void UpdateExternalString(String* string, size_t old_payload,
+ inline void UpdateExternalString(String string, size_t old_payload,
size_t new_payload);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
- inline void FinalizeExternalString(String* string);
+ inline void FinalizeExternalString(String string);
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, Object** pointer);
+ static String UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, FullObjectSlot pointer);
// ===========================================================================
// Methods checking/returning the space of a given object/address. ===========
// ===========================================================================
// Returns whether the object resides in new space.
- static inline bool InNewSpace(Object* object);
- static inline bool InNewSpace(MaybeObject* object);
- static inline bool InNewSpace(HeapObject* heap_object);
- static inline bool InFromSpace(Object* object);
- static inline bool InFromSpace(MaybeObject* object);
- static inline bool InFromSpace(HeapObject* heap_object);
- static inline bool InToSpace(Object* object);
- static inline bool InToSpace(MaybeObject* object);
- static inline bool InToSpace(HeapObject* heap_object);
+ static inline bool InNewSpace(Object object);
+ static inline bool InNewSpace(MaybeObject object);
+ static inline bool InNewSpace(HeapObject heap_object);
+ static inline bool InFromSpace(Object object);
+ static inline bool InFromSpace(MaybeObject object);
+ static inline bool InFromSpace(HeapObject heap_object);
+ static inline bool InToSpace(Object object);
+ static inline bool InToSpace(MaybeObject object);
+ static inline bool InToSpace(HeapObject heap_object);
// Returns whether the object resides in old space.
- inline bool InOldSpace(Object* object);
+ inline bool InOldSpace(Object object);
// Returns whether the object resides in read-only space.
- inline bool InReadOnlySpace(Object* object);
+ inline bool InReadOnlySpace(Object object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
- bool Contains(HeapObject* value);
+ bool Contains(HeapObject value);
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- bool InSpace(HeapObject* value, AllocationSpace space);
+ bool InSpace(HeapObject value, AllocationSpace space);
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
- bool ContainsSlow(Address addr);
bool InSpaceSlow(Address addr, AllocationSpace space);
- inline bool InNewSpaceSlow(Address address);
- inline bool InOldSpaceSlow(Address address);
- // Find the heap which owns this HeapObject. Should never be called for
- // objects in RO space.
- static inline Heap* FromWritableHeapObject(const HeapObject* obj);
+ static inline Heap* FromWritableHeapObject(const HeapObject obj);
// ===========================================================================
// Object statistics tracking. ===============================================
@@ -1176,9 +1009,8 @@ class Heap {
// Returns the capacity of the old generation.
size_t OldGenerationCapacity();
- // Returns the amount of memory currently committed for the heap and memory
- // held alive by the unmapper.
- size_t CommittedMemoryOfHeapAndUnmapper();
+ // Returns the amount of memory currently held alive by the unmapper.
+ size_t CommittedMemoryOfUnmapper();
// Returns the amount of memory currently committed for the heap.
size_t CommittedMemory();
@@ -1285,6 +1117,8 @@ class Heap {
int gc_count() const { return gc_count_; }
+ bool is_current_gc_forced() const { return is_current_gc_forced_; }
+
// Returns the size of objects residing in non-new spaces.
// Excludes external memory held by those objects.
size_t OldGenerationSizeOfObjects();
@@ -1311,15 +1145,15 @@ class Heap {
// ===========================================================================
// Creates a filler object and returns a heap object immediately after it.
- V8_WARN_UNUSED_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
- int filler_size);
+ V8_WARN_UNUSED_RESULT HeapObject PrecedeWithFiller(HeapObject object,
+ int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
- V8_WARN_UNUSED_RESULT HeapObject* AlignWithFiller(
- HeapObject* object, int object_size, int allocation_size,
- AllocationAlignment alignment);
+ V8_WARN_UNUSED_RESULT HeapObject
+ AlignWithFiller(HeapObject object, int object_size, int allocation_size,
+ AllocationAlignment alignment);
// ===========================================================================
// ArrayBuffer tracking. =====================================================
@@ -1329,8 +1163,8 @@ class Heap {
// in the registration/unregistration APIs. Consider dropping the "New" from
// "RegisterNewArrayBuffer" because one can re-register a previously
// unregistered buffer, too, and the name is confusing.
- void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
- void UnregisterArrayBuffer(JSArrayBuffer* buffer);
+ void RegisterNewArrayBuffer(JSArrayBuffer buffer);
+ void UnregisterArrayBuffer(JSArrayBuffer buffer);
// ===========================================================================
// Allocation site tracking. =================================================
@@ -1339,8 +1173,7 @@ class Heap {
// Updates the AllocationSite of a given {object}. The entry (including the
// count) is cached on the local pretenuring feedback.
inline void UpdateAllocationSite(
- Map* map, HeapObject* object,
- PretenuringFeedbackMap* pretenuring_feedback);
+ Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
// Merges local pretenuring feedback into the global one. Note that this
// method needs to be called after evacuation, as allocation sites may be
@@ -1390,19 +1223,18 @@ class Heap {
// Stack frame support. ======================================================
// ===========================================================================
- // Returns the Code object for a given interior pointer. Returns nullptr if
- // {inner_pointer} is not contained within a Code object.
- Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+ // Returns the Code object for a given interior pointer.
+ Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
// Returns true if {addr} is contained within {code} and false otherwise.
// Mostly useful for debugging.
- bool GcSafeCodeContains(HeapObject* code, Address addr);
+ bool GcSafeCodeContains(Code code, Address addr);
// =============================================================================
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
- void VerifyRememberedSetFor(HeapObject* object);
+ void VerifyRememberedSetFor(HeapObject object);
#endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
@@ -1448,8 +1280,8 @@ class Heap {
private:
class SkipStoreBufferScope;
- typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
+ typedef String (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ FullObjectSlot pointer);
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
@@ -1459,8 +1291,8 @@ class Heap {
explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
// Registers an external string.
- inline void AddString(String* string);
- bool Contains(HeapObject* obj);
+ inline void AddString(String string);
+ bool Contains(String string);
void IterateAll(RootVisitor* v);
void IterateNewSpaceStrings(RootVisitor* v);
@@ -1487,8 +1319,8 @@ class Heap {
// To speed up scavenge collections new space string are kept
// separate from old space strings.
- std::vector<Object*> new_space_strings_;
- std::vector<Object*> old_space_strings_;
+ std::vector<Object> new_space_strings_;
+ std::vector<Object> old_space_strings_;
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
@@ -1518,7 +1350,7 @@ class Heap {
: callback(callback), gc_type(gc_type), data(data) {}
bool operator==(const GCCallbackTuple& other) const;
- GCCallbackTuple& operator=(const GCCallbackTuple& other);
+ GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
v8::Isolate::GCCallbackWithData callback;
GCType gc_type;
@@ -1564,8 +1396,7 @@ class Heap {
return 0;
}
-#define ROOT_ACCESSOR(type, name, CamelName) \
- inline void set_##name(type* value);
+#define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -1607,14 +1438,6 @@ class Heap {
void CreateInternalAccessorInfoObjects();
void CreateInitialObjects();
- // These five Create*EntryStub functions are here and forced to not be inlined
- // because of a gcc-4.4 bug that assigns wrong vtable entries.
- V8_NOINLINE void CreateJSEntryStub();
- V8_NOINLINE void CreateJSConstructEntryStub();
- V8_NOINLINE void CreateJSRunMicrotasksEntryStub();
-
- void CreateFixedStubs();
-
// Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted();
@@ -1662,7 +1485,7 @@ class Heap {
double deadline_in_ms);
int NextAllocationTimeout(int current_timeout = 0);
- inline void UpdateAllocationsHash(HeapObject* object);
+ inline void UpdateAllocationsHash(HeapObject object);
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();
@@ -1674,7 +1497,7 @@ class Heap {
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
- void CompactRetainedMaps(WeakArrayList* retained_maps);
+ void CompactRetainedMaps(WeakArrayList retained_maps);
void CollectGarbageOnMemoryPressure();
@@ -1697,8 +1520,8 @@ class Heap {
// - GCFinalzeMC: finalization of incremental full GC
// - GCFinalizeMCReduceMemory: finalization of incremental full GC with
// memory reduction
- HistogramTimer* GCTypeTimer(GarbageCollector collector);
- HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
+ TimedHistogram* GCTypeTimer(GarbageCollector collector);
+ TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
// ===========================================================================
// Pretenuring. ==============================================================
@@ -1710,7 +1533,7 @@ class Heap {
void ProcessPretenuringFeedback();
// Removes an entry from the global pretenuring storage.
- void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+ void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
// ===========================================================================
// Actual GC. ================================================================
@@ -1866,7 +1689,7 @@ class Heap {
// triggered and the allocation is retried. This is performed multiple times.
// If after that retry procedure the allocation still fails nullptr is
// returned.
- HeapObject* AllocateRawWithLightRetry(
+ HeapObject AllocateRawWithLightRetry(
int size, AllocationSpace space,
AllocationAlignment alignment = kWordAligned);
@@ -1876,25 +1699,25 @@ class Heap {
// If after that retry procedure the allocation still fails a "hammer"
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
- HeapObject* AllocateRawWithRetryOrFail(
+ HeapObject AllocateRawWithRetryOrFail(
int size, AllocationSpace space,
AllocationAlignment alignment = kWordAligned);
- HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
+ HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
- V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
AllocationSpace space);
// Takes a code object and checks if it is on memory which is not subject to
// compaction. This method will return a new code object on an immovable
// memory location if the original code object was movable.
- HeapObject* EnsureImmovableCode(HeapObject* heap_object, int object_size);
+ HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
// Allocates a partial map for bootstrapping.
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
- void FinalizePartialMap(Map* map);
+ void FinalizePartialMap(Map map);
// Allocate empty fixed typed array of given type.
V8_WARN_UNUSED_RESULT AllocationResult
@@ -1906,22 +1729,17 @@ class Heap {
// Retaining path tracing ====================================================
// ===========================================================================
- void AddRetainer(HeapObject* retainer, HeapObject* object);
- void AddEphemeronRetainer(HeapObject* retainer, HeapObject* object);
- void AddRetainingRoot(Root root, HeapObject* object);
+ void AddRetainer(HeapObject retainer, HeapObject object);
+ void AddEphemeronRetainer(HeapObject retainer, HeapObject object);
+ void AddRetainingRoot(Root root, HeapObject object);
// Returns true if the given object is a target of retaining path tracking.
// Stores the option corresponding to the object in the provided *option.
- bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
- void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
-
- // The amount of external memory registered through the API.
- int64_t external_memory_ = 0;
-
- // The limit when to trigger memory pressure from the API.
- int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
+ bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
+ void PrintRetainingPath(HeapObject object, RetainingPathOption option);
- // Caches the amount of external memory registered at the last MC.
- int64_t external_memory_at_last_mark_compact_ = 0;
+#ifdef DEBUG
+ void IncrementObjectCounters();
+#endif // DEBUG
// The amount of memory that has been freed concurrently.
std::atomic<intptr_t> external_memory_concurrently_freed_{0};
@@ -1930,37 +1748,16 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
- RootsTable roots_;
-
- // This table is accessed from builtin code compiled into the snapshot, and
- // thus its offset from roots_ must remain static. This is verified in
- // Isolate::Init() using runtime checks.
- static constexpr int kRootsExternalReferenceTableOffset =
- static_cast<int>(RootIndex::kRootListLength) * kPointerSize;
- ExternalReferenceTable external_reference_table_;
-
- // As external references above, builtins are accessed through an offset from
- // the roots register. Its offset from roots_ must remain static. This is
- // verified in Isolate::Init() using runtime checks.
- static constexpr int kRootsBuiltinsOffset =
- kRootsExternalReferenceTableOffset +
- ExternalReferenceTable::SizeInBytes();
- Object* builtins_[Builtins::builtin_count];
-
- // kRootRegister may be used to address any location that starts at the
- // Isolate and ends at this point. Fields past this point are not guaranteed
- // to live at a static offset from kRootRegister.
- static constexpr int kRootRegisterAddressableEndOffset =
- kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
-
size_t code_range_size_ = 0;
- size_t max_semi_space_size_ = 8 * (kPointerSize / 4) * MB;
+ size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
- size_t max_old_generation_size_ = 700ul * (kPointerSize / 4) * MB;
+ size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
size_t initial_max_old_generation_size_;
+ size_t initial_max_old_generation_size_threshold_;
size_t initial_old_generation_size_;
bool old_generation_size_configured_ = false;
size_t maximum_committed_ = 0;
+ size_t old_generation_capacity_after_bootstrap_ = 0;
// Backing store bytes (array buffers and external strings).
std::atomic<size_t> backing_store_bytes_{0};
@@ -1996,6 +1793,7 @@ class Heap {
CodeSpace* code_space_ = nullptr;
MapSpace* map_space_ = nullptr;
LargeObjectSpace* lo_space_ = nullptr;
+ CodeLargeObjectSpace* code_lo_space_ = nullptr;
NewLargeObjectSpace* new_lo_space_ = nullptr;
ReadOnlySpace* read_only_space_ = nullptr;
// Map from the space id to the space.
@@ -2067,8 +1865,8 @@ class Heap {
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
- Object* native_contexts_list_;
- Object* allocation_sites_list_;
+ Object native_contexts_list_;
+ Object allocation_sites_list_;
std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
std::vector<GCCallbackTuple> gc_prologue_callbacks_;
@@ -2163,6 +1961,8 @@ class Heap {
// the embedder and V8's GC.
GCCallbackFlags current_gc_callback_flags_;
+ bool is_current_gc_forced_;
+
ExternalStringTable external_string_table_;
base::Mutex relocation_mutex_;
@@ -2180,7 +1980,7 @@ class Heap {
bool force_oom_ = false;
bool delay_sweeper_tasks_for_testing_ = false;
- HeapObject* pending_layout_change_object_ = nullptr;
+ HeapObject pending_layout_change_object_;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
@@ -2193,11 +1993,11 @@ class Heap {
int allocation_timeout_ = 0;
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
- std::map<HeapObject*, HeapObject*> retainer_;
- std::map<HeapObject*, Root> retaining_root_;
+ std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
+ std::map<HeapObject, Root, Object::Comparer> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
// ephemeron is stored in this map.
- std::map<HeapObject*, HeapObject*> ephemeron_retainer_;
+ std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
// For each index inthe retaining_path_targets_ array this map
// stores the option of the corresponding target.
std::map<int, RetainingPathOption> retaining_path_target_option_;
@@ -2227,6 +2027,7 @@ class Heap {
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
+ friend class ReadOnlyRoots;
friend class Scavenger;
friend class ScavengerCollector;
friend class Space;
@@ -2267,21 +2068,22 @@ class HeapStats {
size_t* map_space_size; // 9
size_t* map_space_capacity; // 10
size_t* lo_space_size; // 11
- size_t* global_handle_count; // 12
- size_t* weak_global_handle_count; // 13
- size_t* pending_global_handle_count; // 14
- size_t* near_death_global_handle_count; // 15
- size_t* free_global_handle_count; // 16
- size_t* memory_allocator_size; // 17
- size_t* memory_allocator_capacity; // 18
- size_t* malloced_memory; // 19
- size_t* malloced_peak_memory; // 20
- size_t* objects_per_type; // 21
- size_t* size_per_type; // 22
- int* os_error; // 23
- char* last_few_messages; // 24
- char* js_stacktrace; // 25
- intptr_t* end_marker; // 26
+ size_t* code_lo_space_size; // 12
+ size_t* global_handle_count; // 13
+ size_t* weak_global_handle_count; // 14
+ size_t* pending_global_handle_count; // 15
+ size_t* near_death_global_handle_count; // 16
+ size_t* free_global_handle_count; // 17
+ size_t* memory_allocator_size; // 18
+ size_t* memory_allocator_capacity; // 19
+ size_t* malloced_memory; // 20
+ size_t* malloced_peak_memory; // 21
+ size_t* objects_per_type; // 22
+ size_t* size_per_type; // 23
+ int* os_error; // 24
+ char* last_few_messages; // 25
+ char* js_stacktrace; // 26
+ intptr_t* end_marker; // 27
};
@@ -2330,7 +2132,7 @@ class CodePageMemoryModificationScope {
// Disallow any GCs inside this scope, as a relocation of the underlying
// object would change the {MemoryChunk} that this scope targets.
- DisallowHeapAllocation no_heap_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
};
// Visitor class to verify interior pointers in spaces that do not contain
@@ -2341,15 +2143,24 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override;
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override;
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override;
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
protected:
- virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end);
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end);
+
+ virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end);
Heap* heap_;
};
@@ -2358,8 +2169,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
};
// Space iterator for iterating over all the paged spaces of the heap: Map
@@ -2416,12 +2227,12 @@ class HeapIterator {
HeapObjectsFiltering filtering = kNoFiltering);
~HeapIterator();
- HeapObject* next();
+ HeapObject next();
private:
- HeapObject* NextObject();
+ HeapObject NextObject();
- DisallowHeapAllocation no_heap_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
Heap* heap_;
HeapObjectsFiltering filtering_;
@@ -2440,7 +2251,7 @@ class WeakObjectRetainer {
// Return whether this object should be retained. If nullptr is returned the
// object has no references. Otherwise the address of the retained object
// should be returned as in some GC situations the object has been moved.
- virtual Object* RetainAs(Object* object) = 0;
+ virtual Object RetainAs(Object object) = 0;
};
// -----------------------------------------------------------------------------
@@ -2449,7 +2260,7 @@ class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK_LE(kPointerSize, step_size);
+ DCHECK_LE(kTaggedSize, step_size);
}
virtual ~AllocationObserver() = default;
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index e19d62f4d4..7df67d3d27 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-void IncrementalMarking::TransferColor(HeapObject* from, HeapObject* to) {
+void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
if (atomic_marking_state()->IsBlack(to)) {
DCHECK(black_allocation());
return;
@@ -33,25 +33,23 @@ void IncrementalMarking::TransferColor(HeapObject* from, HeapObject* to) {
}
}
-void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
- Object* value) {
- DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+void IncrementalMarking::RecordWrite(HeapObject obj, ObjectSlot slot,
+ Object value) {
+ DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (IsMarking() && value->IsHeapObject()) {
- RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
- HeapObject::cast(value));
+ RecordWriteSlow(obj, HeapObjectSlot(slot), HeapObject::cast(value));
}
}
-void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
- MaybeObject** slot,
- MaybeObject* value) {
+void IncrementalMarking::RecordMaybeWeakWrite(HeapObject obj,
+ MaybeObjectSlot slot,
+ MaybeObject value) {
// When writing a weak reference, treat it as strong for the purposes of the
// marking barrier.
- HeapObject* heap_object;
+ HeapObject heap_object;
if (IsMarking() && value->GetHeapObject(&heap_object)) {
- RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
- heap_object);
+ RecordWriteSlow(obj, HeapObjectSlot(slot), heap_object);
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 96eff0508e..836b491d8f 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -5,6 +5,7 @@
#include "src/heap/incremental-marking-job.h"
#include "src/base/platform/time.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
@@ -15,6 +16,29 @@
namespace v8 {
namespace internal {
+class IncrementalMarkingJob::Task : public CancelableTask {
+ public:
+ static void Step(Heap* heap,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ Task(Isolate* isolate, IncrementalMarkingJob* job,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : CancelableTask(isolate),
+ isolate_(isolate),
+ job_(job),
+ stack_state_(stack_state) {}
+
+ // CancelableTask overrides.
+ void RunInternal() override;
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* const isolate_;
+ IncrementalMarkingJob* const job_;
+ const EmbedderHeapTracer::EmbedderStackState stack_state_;
+};
+
void IncrementalMarkingJob::Start(Heap* heap) {
DCHECK(!heap->incremental_marking()->IsStopped());
ScheduleTask(heap);
@@ -26,19 +50,32 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
task_pending_ = true;
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
- taskrunner->PostTask(base::make_unique<Task>(heap->isolate(), this));
+ if (taskrunner->NonNestableTasksEnabled()) {
+ taskrunner->PostNonNestableTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kEmpty));
+ } else {
+ taskrunner->PostTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kUnknown));
+ }
}
}
-void IncrementalMarkingJob::Task::Step(Heap* heap) {
+void IncrementalMarkingJob::Task::Step(
+ Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
- heap->FinalizeIncrementalMarkingIfComplete(
- GarbageCollectionReason::kFinalizeMarkingViaTask);
+ {
+ EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
+ stack_state);
+ heap->FinalizeIncrementalMarkingIfComplete(
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
+ }
}
void IncrementalMarkingJob::Task::RunInternal() {
@@ -61,7 +98,7 @@ void IncrementalMarkingJob::Task::RunInternal() {
job_->task_pending_ = false;
if (!incremental_marking->IsStopped()) {
- Step(heap);
+ Step(heap, stack_state_);
if (!incremental_marking->IsStopped()) {
job_->ScheduleTask(heap);
}
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index 902989b613..a2202c7504 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -18,31 +18,18 @@ class Isolate;
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
- class Task : public CancelableTask {
- public:
- explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
- : CancelableTask(isolate), isolate_(isolate), job_(job) {}
- static void Step(Heap* heap);
- // CancelableTask overrides.
- void RunInternal() override;
+ IncrementalMarkingJob() = default;
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- IncrementalMarkingJob* job_;
- };
-
- IncrementalMarkingJob() : task_pending_(false) {}
-
- bool TaskPending() { return task_pending_; }
+ bool TaskPending() const { return task_pending_; }
void Start(Heap* heap);
void ScheduleTask(Heap* heap);
private:
- bool task_pending_;
+ class Task;
+
+ bool task_pending_ = false;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 239f416eaf..ea86e4f7c9 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -4,10 +4,10 @@
#include "src/heap/incremental-marking.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -18,6 +18,7 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/slots-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
@@ -39,19 +40,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
- if (incremental_marking_.black_allocation() && addr != kNullAddress) {
- // AdvanceIncrementalMarkingOnAllocation can start black allocation.
- // Ensure that the new object is marked black.
- HeapObject* object = HeapObject::FromAddress(addr);
- if (incremental_marking_.marking_state()->IsWhite(object) &&
- !(Heap::InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
- if (heap->lo_space()->Contains(object)) {
- incremental_marking_.marking_state()->WhiteToBlack(object);
- } else {
- Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
- }
- }
- }
+ // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
+ incremental_marking_.EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
@@ -77,8 +67,8 @@ IncrementalMarking::IncrementalMarking(
SetState(STOPPED);
}
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
- HeapObject* value_heap_obj = HeapObject::cast(value);
+bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
+ HeapObject value_heap_obj = HeapObject::cast(value);
DCHECK(!marking_state()->IsImpossible(value_heap_obj));
DCHECK(!marking_state()->IsImpossible(obj));
#ifdef V8_CONCURRENT_MARKING
@@ -95,27 +85,28 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
return is_compacting_ && need_recording;
}
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
- HeapObjectReference** slot,
- Object* value) {
- if (BaseRecordWrite(obj, value) && slot != nullptr) {
+void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
+ Object value) {
+ if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot,
HeapObject::cast(value));
}
}
-int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
+int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
+ Address slot_address,
Isolate* isolate) {
- DCHECK(obj->IsHeapObject());
+ HeapObject obj = HeapObject::cast(Object(raw_obj));
+ MaybeObjectSlot slot(slot_address);
isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
*slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
-void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
- HeapObject* value) {
+void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
+ HeapObject value) {
DCHECK(IsMarking());
if (BaseRecordWrite(host, value)) {
// Object is not going to be rescanned. We need to record the slot.
@@ -123,7 +114,7 @@ void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
}
}
-bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
+bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
return true;
@@ -131,83 +122,42 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
return false;
}
-void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
- // Marking left-trimmable fixed array black is unsafe because left-trimming
- // re-pushes only grey arrays onto the marking worklist.
- DCHECK(!obj->IsFixedArrayBase());
- // Color the object black and push it into the bailout deque.
+void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
+ HeapObject obj) {
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
- if (FLAG_concurrent_marking) {
- marking_worklist()->PushBailout(obj);
- } else {
- marking_worklist()->Push(obj);
- }
+ RevisitObject(obj);
}
}
-void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
+void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(IsMarking());
- DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
- DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
- MemoryChunk::FromAddress(to->address()));
+ DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
+ DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
DCHECK_NE(from, to);
- MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
// Nothing to do if the object is in black area.
return;
}
-
- bool marked_black_due_to_left_trimming = false;
- if (FLAG_concurrent_marking) {
- // We need to mark the array black before overwriting its map and length
- // so that the concurrent marker does not observe inconsistent state.
- Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
- if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
- // The concurrent marker will not mark the array. We need to push the
- // new array start in marking deque to ensure that it will be marked.
- marked_black_due_to_left_trimming = true;
- }
- DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
- }
-
- if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
- !marked_black_due_to_left_trimming) {
- // The array was black before left trimming or was marked black by the
- // concurrent marker. Simply transfer the color.
- if (from->address() + kPointerSize == to->address()) {
- // The old and the new markbits overlap. The |to| object has the
- // grey color. To make it black, we need to set the second bit.
- DCHECK(new_mark_bit.Get<kAtomicity>());
- new_mark_bit.Next().Set<kAtomicity>();
- } else {
- bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
- DCHECK(success);
- USE(success);
- }
- } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
- marked_black_due_to_left_trimming) {
- // The array was already grey or was marked black by this function.
- // Mark the new array grey and push it to marking deque.
- if (from->address() + kPointerSize == to->address()) {
- // The old and the new markbits overlap. The |to| object is either white
- // or grey. Set the first bit to make sure that it is grey.
- new_mark_bit.Set<kAtomicity>();
- DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
- } else {
- bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
- DCHECK(success);
- USE(success);
- }
- // Subsequent left-trimming will re-push only grey arrays.
- // Ensure that this array is grey.
- DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
- marking_worklist()->PushBailout(to);
- RestartIfNotMarking();
+ MarkBlackAndVisitObjectDueToLayoutChange(from);
+ DCHECK(marking_state()->IsBlack(from));
+ // Mark the new address as black.
+ if (from->address() + kTaggedSize == to->address()) {
+ // The old and the new markbits overlap. The |to| object has the
+ // grey color. To make it black, we need to set the second bit.
+ DCHECK(new_mark_bit.Get<kAtomicity>());
+ new_mark_bit.Next().Set<kAtomicity>();
+ } else {
+ bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
+ DCHECK(success);
+ USE(success);
}
+ DCHECK(marking_state()->IsBlack(to));
}
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
@@ -217,18 +167,18 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
: heap_(incremental_marking->heap()) {}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ FullObjectSlot p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
}
private:
- void MarkObjectByPointer(Object** p) {
- Object* obj = *p;
+ void MarkObjectByPointer(FullObjectSlot p) {
+ Object obj = *p;
if (!obj->IsHeapObject()) return;
heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
@@ -262,6 +212,10 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
+
+ for (LargePage* p : *heap_->code_lo_space()) {
+ p->SetOldGenerationPageFlags(false);
+ }
}
@@ -288,6 +242,10 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
}
+
+ for (LargePage* p : *heap_->code_lo_space()) {
+ p->SetOldGenerationPageFlags(true);
+ }
}
@@ -384,7 +342,7 @@ void IncrementalMarking::StartMarking() {
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
+ GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
}
@@ -399,13 +357,7 @@ void IncrementalMarking::StartMarking() {
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
-#ifdef V8_CONCURRENT_MARKING
- // The write-barrier does not check the color of the source object.
- // Start black allocation earlier to ensure faster marking progress.
- if (!black_allocation_) {
- StartBlackAllocation();
- }
-#endif
+ StartBlackAllocation();
// Mark strong roots grey.
IncrementalMarkingRootMarkingVisitor visitor(this);
@@ -422,7 +374,6 @@ void IncrementalMarking::StartMarking() {
}
void IncrementalMarking::StartBlackAllocation() {
- DCHECK(FLAG_black_allocation);
DCHECK(!black_allocation_);
DCHECK(IsMarking());
black_allocation_ = true;
@@ -436,7 +387,6 @@ void IncrementalMarking::StartBlackAllocation() {
}
void IncrementalMarking::PauseBlackAllocation() {
- DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
@@ -458,6 +408,22 @@ void IncrementalMarking::FinishBlackAllocation() {
}
}
+void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
+ if (black_allocation() && allocated != kNullAddress) {
+ HeapObject object = HeapObject::FromAddress(allocated);
+ if (marking_state()->IsWhite(object) &&
+ !(Heap::InNewSpace(object) ||
+ heap_->new_lo_space()->Contains(object))) {
+ if (heap_->IsLargeObject(object)) {
+ marking_state()->WhiteToBlack(object);
+ } else {
+ Page::FromAddress(allocated)->CreateBlackArea(allocated,
+ allocated + size);
+ }
+ }
+ }
+}
+
void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -466,12 +432,12 @@ void IncrementalMarking::MarkRoots() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
}
-bool IncrementalMarking::ShouldRetainMap(Map* map, int age) {
+bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
if (age == 0) {
// The map has aged. Do not retain this map.
return false;
}
- Object* constructor = map->GetConstructor();
+ Object constructor = map->GetConstructor();
if (!constructor->IsHeapObject() ||
marking_state()->IsWhite(HeapObject::cast(constructor))) {
// The constructor is dead, no new objects with this map can
@@ -488,27 +454,27 @@ void IncrementalMarking::RetainMaps() {
// - GC is requested by tests or dev-tools (abort_incremental_marking_).
bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
FLAG_retain_maps_for_n_gc == 0;
- WeakArrayList* retained_maps = heap()->retained_maps();
+ WeakArrayList retained_maps = heap()->retained_maps();
int length = retained_maps->length();
// The number_of_disposed_maps separates maps in the retained_maps
// array that were created before and after context disposal.
// We do not age and retain disposed maps to avoid memory leaks.
int number_of_disposed_maps = heap()->number_of_disposed_maps_;
for (int i = 0; i < length; i += 2) {
- MaybeObject* value = retained_maps->Get(i);
- HeapObject* map_heap_object;
+ MaybeObject value = retained_maps->Get(i);
+ HeapObject map_heap_object;
if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
continue;
}
- int age = Smi::ToInt(retained_maps->Get(i + 1)->cast<Smi>());
+ int age = retained_maps->Get(i + 1).ToSmi().value();
int new_age;
- Map* map = Map::cast(map_heap_object);
+ Map map = Map::cast(map_heap_object);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
marking_state()->IsWhite(map)) {
if (ShouldRetainMap(map, age)) {
WhiteToGreyAndPush(map);
}
- Object* prototype = map->prototype();
+ Object prototype = map->prototype();
if (age > 0 && prototype->IsHeapObject() &&
marking_state()->IsWhite(HeapObject::cast(prototype))) {
// The prototype is not marked, age the map.
@@ -547,13 +513,6 @@ void IncrementalMarking::FinalizeIncrementally() {
finalize_marking_completed_ = true;
- if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
- !black_allocation_) {
- // TODO(hpayer): Move to an earlier point as soon as we make faster marking
- // progress.
- StartBlackAllocation();
- }
-
if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
@@ -565,7 +524,7 @@ void IncrementalMarking::FinalizeIncrementally() {
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
if (!IsMarking()) return;
- Map* filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
+ Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
@@ -574,8 +533,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
void* minor_marking_state = nullptr;
#endif // ENABLE_MINOR_MC
- marking_worklist()->Update([this, filler_map, minor_marking_state](
- HeapObject* obj, HeapObject** out) -> bool {
+ marking_worklist()->Update([
+#ifdef DEBUG
+ // this is referred inside DCHECK.
+ this,
+#endif
+ filler_map, minor_marking_state](
+ HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromSpace(obj)) {
@@ -588,14 +552,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
// them.
return false;
}
- HeapObject* dest = map_word.ToForwardingAddress();
+ HeapObject dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
*out = dest;
return true;
} else if (Heap::InToSpace(obj)) {
// The object may be on a page that was moved in new space.
- DCHECK(
- Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ DCHECK(Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
@@ -606,8 +569,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
} else {
// The object may be on a page that was moved from new to old space. Only
// applicable during minor MC garbage collections.
- if (Page::FromAddress(obj->address())
- ->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+ if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
@@ -632,13 +594,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
namespace {
template <typename T>
-T* ForwardingAddress(T* heap_obj) {
+T ForwardingAddress(T heap_obj) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (Heap::InNewSpace(heap_obj)) {
- return nullptr;
+ return T();
} else {
return heap_obj;
}
@@ -647,31 +609,29 @@ T* ForwardingAddress(T* heap_obj) {
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->weak_references.Update(
- [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
- std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
- HeapObject* heap_obj = slot_in.first;
- HeapObject* forwarded = ForwardingAddress(heap_obj);
+ [](std::pair<HeapObject, HeapObjectSlot> slot_in,
+ std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
- if (forwarded) {
+ if (!forwarded.is_null()) {
ptrdiff_t distance_to_slot =
- reinterpret_cast<Address>(slot_in.second) -
- reinterpret_cast<Address>(slot_in.first);
- Address new_slot =
- reinterpret_cast<Address>(forwarded) + distance_to_slot;
+ slot_in.second.address() - slot_in.first.ptr();
+ Address new_slot = forwarded.ptr() + distance_to_slot;
slot_out->first = forwarded;
- slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
+ slot_out->second = HeapObjectSlot(new_slot);
return true;
}
return false;
});
weak_objects_->weak_objects_in_code.Update(
- [](std::pair<HeapObject*, Code*> slot_in,
- std::pair<HeapObject*, Code*>* slot_out) -> bool {
- HeapObject* heap_obj = slot_in.first;
- HeapObject* forwarded = ForwardingAddress(heap_obj);
+ [](std::pair<HeapObject, Code> slot_in,
+ std::pair<HeapObject, Code>* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
- if (forwarded) {
+ if (!forwarded.is_null()) {
slot_out->first = forwarded;
slot_out->second = slot_in.second;
return true;
@@ -680,10 +640,10 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
return false;
});
weak_objects_->ephemeron_hash_tables.Update(
- [](EphemeronHashTable* slot_in, EphemeronHashTable** slot_out) -> bool {
- EphemeronHashTable* forwarded = ForwardingAddress(slot_in);
+ [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
+ EphemeronHashTable forwarded = ForwardingAddress(slot_in);
- if (forwarded) {
+ if (!forwarded.is_null()) {
*slot_out = forwarded;
return true;
}
@@ -692,12 +652,12 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
});
auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
- HeapObject* key = slot_in.key;
- HeapObject* value = slot_in.value;
- HeapObject* forwarded_key = ForwardingAddress(key);
- HeapObject* forwarded_value = ForwardingAddress(value);
+ HeapObject key = slot_in.key;
+ HeapObject value = slot_in.value;
+ HeapObject forwarded_key = ForwardingAddress(key);
+ HeapObject forwarded_value = ForwardingAddress(value);
- if (forwarded_key && forwarded_value) {
+ if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
*slot_out = Ephemeron{forwarded_key, forwarded_value};
return true;
}
@@ -708,6 +668,12 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
+#ifdef DEBUG
+ weak_objects_->bytecode_flushing_candidates.Iterate(
+ [](SharedFunctionInfo candidate) {
+ DCHECK(!Heap::InNewSpace(candidate));
+ });
+#endif
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -717,13 +683,13 @@ void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
}
-bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
+bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
if (!obj->IsFixedArray()) return false;
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
}
-int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
+int IncrementalMarking::VisitObject(Map map, HeapObject obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
if (!marking_state()->GreyToBlack(obj)) {
// The object can already be black in these cases:
@@ -733,8 +699,11 @@ int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
// 3. The object is a string that was colored black before
// unsafe layout change.
// 4. The object is materizalized by the deoptimizer.
+ // 5. The object is a descriptor array marked black by
+ // the descriptor array marking barrier.
DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
- obj->IsFixedArray() || obj->IsJSObject() || obj->IsString());
+ obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
+ obj->IsString() || obj->IsDescriptorArray());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
@@ -743,38 +712,44 @@ int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
return visitor.Visit(map, obj);
}
-void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
+void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
if (IsMarking() && marking_state()->IsBlack(obj)) {
RevisitObject(obj);
}
}
-void IncrementalMarking::RevisitObject(HeapObject* obj) {
+void IncrementalMarking::RevisitObject(HeapObject obj) {
DCHECK(IsMarking());
- DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
- Page* page = Page::FromAddress(obj->address());
+ DCHECK(marking_state()->IsBlack(obj));
+ Page* page = Page::FromHeapObject(obj);
if (page->owner()->identity() == LO_SPACE) {
page->ResetProgressBar();
}
- Map* map = obj->map();
+ Map map = obj->map();
WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
marking_state());
visitor.Visit(map, obj);
}
-template <WorklistToProcess worklist_to_process>
+void IncrementalMarking::VisitDescriptors(HeapObject host,
+ DescriptorArray descriptors,
+ int number_of_own_descriptors) {
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
+ marking_state());
+ // This is necessary because the Scavenger records slots only for the
+ // promoted black objects and the marking visitor of DescriptorArray skips
+ // the descriptors marked by the visitor.VisitDescriptors() below.
+ visitor.MarkDescriptorArrayBlack(host, descriptors);
+ visitor.VisitDescriptors(descriptors, number_of_own_descriptors);
+}
+
intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
- HeapObject* obj;
- if (worklist_to_process == WorklistToProcess::kBailout) {
- obj = marking_worklist()->PopBailout();
- } else {
- obj = marking_worklist()->Pop();
- }
- if (obj == nullptr) break;
+ HeapObject obj = marking_worklist()->Pop();
+ if (obj.is_null()) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
if (obj->IsFiller()) {
@@ -785,39 +760,35 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
int size = VisitObject(obj->map(), obj);
bytes_processed += size - unscanned_bytes_of_large_object_;
}
- // Report all found wrappers to the embedder. This is necessary as the
- // embedder could potentially invalidate wrappers as soon as V8 is done
- // with its incremental marking processing. Any cached wrappers could
- // result in broken pointers at this point.
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
return bytes_processed;
}
void IncrementalMarking::EmbedderStep(double duration_ms) {
- constexpr int kObjectsToProcessBeforeInterrupt = 100;
-
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
- const double deadline =
- heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
-
- HeapObject* object;
- int cnt = 0;
- while (marking_worklist()->embedder()->Pop(0, &object)) {
- heap_->TracePossibleWrapper(JSObject::cast(object));
- if (++cnt == kObjectsToProcessBeforeInterrupt) {
- cnt = 0;
- if (heap_->MonotonicallyIncreasingTimeInMs() > deadline) {
- break;
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
+ double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
+ bool empty_worklist;
+ do {
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap_->local_embedder_heap_tracer());
+ HeapObject object;
+ size_t cnt = 0;
+ empty_worklist = true;
+ while (marking_worklist()->embedder()->Pop(0, &object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ if (++cnt == kObjectsToProcessBeforeInterrupt) {
+ cnt = 0;
+ empty_worklist = false;
+ break;
+ }
}
}
- }
-
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
- if (!heap_->local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking()) {
heap_->local_embedder_heap_tracer()->Trace(deadline);
- }
+ } while (!empty_worklist &&
+ (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
+ heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
}
void IncrementalMarking::Hurry() {
@@ -927,6 +898,11 @@ void IncrementalMarking::Epilogue() {
finalize_marking_completed_ = false;
}
+bool IncrementalMarking::ShouldDoEmbedderStep() {
+ return state_ == MARKING && FLAG_incremental_marking_wrappers &&
+ heap_->local_embedder_heap_tracer()->InUse();
+}
+
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, CompletionAction completion_action,
StepOrigin step_origin) {
@@ -935,27 +911,22 @@ double IncrementalMarking::AdvanceIncrementalMarking(
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
- DCHECK_EQ(
- 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double remaining_time_in_ms = 0.0;
- intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- kStepSizeInMs,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
-
- const bool incremental_wrapper_tracing =
- state_ == MARKING && FLAG_incremental_marking_wrappers &&
- heap_->local_embedder_heap_tracer()->InUse();
do {
- if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
+ if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
EmbedderStep(kStepSizeInMs);
} else {
+ const intptr_t step_size_in_bytes =
+ GCIdleTimeHandler::EstimateMarkingStepSize(
+ kStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
Step(step_size_in_bytes, completion_action, step_origin);
}
trace_wrappers_toggle_ = !trace_wrappers_toggle_;
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
+ } while (remaining_time_in_ms > kStepSizeInMs && !IsComplete() &&
!marking_worklist()->IsEmpty());
return remaining_time_in_ms;
}
@@ -985,24 +956,18 @@ size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
}
size_t IncrementalMarking::StepSizeToMakeProgress() {
- // We increase step size gradually based on the time passed in order to
- // leave marking work to standalone tasks. The ramp up duration and the
- // target step count are chosen based on benchmarks.
- const int kRampUpIntervalMs = 300;
const size_t kTargetStepCount = 256;
const size_t kTargetStepCountAtOOM = 32;
+ const size_t kMaxStepSizeInByte = 256 * KB;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
}
- size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
- IncrementalMarking::kMinStepSizeInBytes);
- double time_passed_ms =
- heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
- double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
- return static_cast<size_t>(factor * step_size);
+ return Min(Max(initial_old_generation_size_ / kTargetStepCount,
+ IncrementalMarking::kMinStepSizeInBytes),
+ kMaxStepSizeInByte);
}
void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
@@ -1013,54 +978,64 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
return;
}
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+
+ double embedder_step_time_ms = 0.0;
+ if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
+ EmbedderStep(kMaxStepSizeInMs);
+ embedder_step_time_ms = heap_->MonotonicallyIncreasingTimeInMs() - start;
+ }
+ trace_wrappers_toggle_ = !trace_wrappers_toggle_;
+
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
+ if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes &&
+ embedder_step_time_ms < kMaxStepSizeInMs) {
+ StepOnAllocation(bytes_to_process,
+ kMaxStepSizeInMs - embedder_step_time_ms);
+ }
+}
- if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
- // The first step after Scavenge will see many allocated bytes.
- // Cap the step size to distribute the marking work more uniformly.
- size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
- kMaxStepSizeInMs,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
- bytes_to_process = Min(bytes_to_process, max_step_size);
- size_t bytes_processed = 0;
- if (FLAG_concurrent_marking) {
- bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
- StepOrigin::kV8, WorklistToProcess::kBailout);
- bytes_to_process = (bytes_processed >= bytes_to_process)
- ? 0
- : bytes_to_process - bytes_processed;
- size_t current_bytes_marked_concurrently =
- heap()->concurrent_marking()->TotalMarkedBytes();
- // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
- // short period of time when a concurrent marking task is finishing.
- if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
- bytes_marked_ahead_of_schedule_ +=
- current_bytes_marked_concurrently - bytes_marked_concurrently_;
- bytes_marked_concurrently_ = current_bytes_marked_concurrently;
- }
- }
- if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
- // Steps performed in tasks and concurrently have put us ahead of
- // schedule. We skip processing of marking dequeue here and thus shift
- // marking time from inside V8 to standalone tasks.
- bytes_marked_ahead_of_schedule_ -= bytes_to_process;
- bytes_processed += bytes_to_process;
- bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
+void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
+ double max_step_size) {
+ // The first step after Scavenge will see many allocated bytes.
+ // Cap the step size to distribute the marking work more uniformly.
+ size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ max_step_size,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ bytes_to_process = Min(bytes_to_process, step_size);
+ size_t bytes_processed = 0;
+ if (FLAG_concurrent_marking) {
+ size_t current_bytes_marked_concurrently =
+ heap()->concurrent_marking()->TotalMarkedBytes();
+ // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
+ // short period of time when a concurrent marking task is finishing.
+ if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
+ bytes_marked_ahead_of_schedule_ +=
+ current_bytes_marked_concurrently - bytes_marked_concurrently_;
+ bytes_marked_concurrently_ = current_bytes_marked_concurrently;
}
- bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
- StepOrigin::kV8, WorklistToProcess::kAll);
- bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
+ if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
+ // Steps performed in tasks and concurrently have put us ahead of
+ // schedule. We skip processing of marking dequeue here and thus shift
+ // marking time from inside V8 to standalone tasks.
+ bytes_marked_ahead_of_schedule_ -= bytes_to_process;
+ bytes_processed += bytes_to_process;
+ bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
+ }
+ bytes_processed +=
+ Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
+ bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
- CompletionAction action, StepOrigin step_origin,
- WorklistToProcess worklist_to_process) {
+ CompletionAction action,
+ StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1087,13 +1062,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
}
#endif
- if (worklist_to_process == WorklistToProcess::kBailout) {
- bytes_processed =
- ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
- } else {
- bytes_processed =
- ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
- }
+ bytes_processed = ProcessMarkingWorklist(bytes_to_process);
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index ee774c230f..bbf12f6bc0 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -20,7 +20,6 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
-enum class WorklistToProcess { kAll, kBailout };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
@@ -98,9 +97,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
return &non_atomic_marking_state_;
}
- void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
+ void NotifyLeftTrimming(HeapObject from, HeapObject to);
- V8_INLINE void TransferColor(HeapObject* from, HeapObject* to);
+ V8_INLINE void TransferColor(HeapObject from, HeapObject to);
State state() const {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
@@ -175,13 +174,18 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
- StepOrigin step_origin,
- WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
+ StepOrigin step_origin);
+ void StepOnAllocation(size_t bytes_to_process, double max_step_size);
+
+ bool ShouldDoEmbedderStep();
void EmbedderStep(double duration);
inline void RestartIfNotMarking();
- static int RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
+ // {raw_obj} and {slot_address} are raw Address values instead of a
+ // HeapObject and a MaybeObjectSlot because this is called from
+ // generated code via ExternalReference.
+ static int RecordWriteFromCode(Address raw_obj, Address slot_address,
Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
@@ -190,34 +194,35 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
- V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
- V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
- MaybeObject* value);
- void RevisitObject(HeapObject* obj);
-
- void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
- Object* value);
- void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, HeapObject* value);
+ V8_INLINE bool BaseRecordWrite(HeapObject obj, Object value);
+ V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
+ V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
+ MaybeObject value);
+ void RevisitObject(HeapObject obj);
+ // Ensures that all descriptors int range [0, number_of_own_descripts)
+ // are visited.
+ void VisitDescriptors(HeapObject host, DescriptorArray array,
+ int number_of_own_descriptors);
+
+ void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, Object value);
+ void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
- bool WhiteToGreyAndPush(HeapObject* obj);
+ bool WhiteToGreyAndPush(HeapObject obj);
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
- void MarkBlackAndPush(HeapObject* obj);
+ void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
bool IsCompacting() { return IsMarking() && is_compacting_; }
- void ActivateGeneratedStub(Code* stub);
-
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
- void ProcessBlackAllocatedObject(HeapObject* obj);
+ void ProcessBlackAllocatedObject(HeapObject obj);
Heap* heap() const { return heap_; }
@@ -239,6 +244,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void Deactivate();
+ // Ensures that the given region is black allocated if it is in the old
+ // generation.
+ void EnsureBlackAllocated(Address allocated, size_t size);
+
private:
class Observer : public AllocationObserver {
public:
@@ -259,7 +268,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinishBlackAllocation();
void MarkRoots();
- bool ShouldRetainMap(Map* map, int age);
+ bool ShouldRetainMap(Map map, int age);
// Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
// increase chances of reusing of map transition tree in future.
void RetainMaps();
@@ -272,15 +281,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
- template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
V8_INLINE intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
- V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject* object);
+ V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject object);
// Visits the object and returns its size.
- V8_INLINE int VisitObject(Map* map, HeapObject* obj);
+ V8_INLINE int VisitObject(Map map, HeapObject obj);
void IncrementIdleMarkingDelayCounter();
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 5e4610257e..583d443eda 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -31,7 +31,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
DCHECK_LE(invalidated_end_, iterator_->first->address());
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
- invalidated_object_ = nullptr;
+ invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
} else {
invalidated_start_ = sentinel_;
@@ -45,7 +45,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
}
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
- if (invalidated_object_ == nullptr) {
+ if (invalidated_object_.is_null()) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
DCHECK(!invalidated_object_->IsFiller());
invalidated_object_size_ =
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 42042c63ef..a5b835441b 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -31,7 +31,6 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
- invalidated_object_ = nullptr;
invalidated_object_size_ = 0;
#ifdef DEBUG
last_slot_ = chunk->area_start();
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 641e8feb91..364bb22781 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -10,18 +10,17 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
+#include "src/objects/heap-object.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-class HeapObject;
-
// This data structure stores objects that went through object layout change
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
-using InvalidatedSlots = std::map<HeapObject*, int>;
+using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
@@ -40,7 +39,7 @@ class InvalidatedSlotsFilter {
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
- HeapObject* invalidated_object_;
+ HeapObject invalidated_object_;
int invalidated_object_size_;
bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_;
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index b536ccc5d4..85dd55c593 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -119,7 +119,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
// Wait for background tasks.
for (size_t i = 0; i < num_tasks; i++) {
if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_tasks_->Wait();
}
}
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index 7263387465..b8f0bdc5b5 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -30,7 +30,7 @@ AllocationResult LocalAllocator::Allocate(AllocationSpace space,
}
}
-void LocalAllocator::FreeLast(AllocationSpace space, HeapObject* object,
+void LocalAllocator::FreeLast(AllocationSpace space, HeapObject object,
int object_size) {
switch (space) {
case NEW_SPACE:
@@ -46,7 +46,7 @@ void LocalAllocator::FreeLast(AllocationSpace space, HeapObject* object,
}
}
-void LocalAllocator::FreeLastInNewSpace(HeapObject* object, int object_size) {
+void LocalAllocator::FreeLastInNewSpace(HeapObject object, int object_size) {
if (!new_space_lab_.TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object->address(), object_size,
@@ -54,7 +54,7 @@ void LocalAllocator::FreeLastInNewSpace(HeapObject* object, int object_size) {
}
}
-void LocalAllocator::FreeLastInOldSpace(HeapObject* object, int object_size) {
+void LocalAllocator::FreeLastInOldSpace(HeapObject object, int object_size) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object->address(), object_size,
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index e84c7188c2..ad99f07fac 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -43,7 +43,7 @@ class LocalAllocator {
inline AllocationResult Allocate(AllocationSpace space, int object_size,
AllocationAlignment alignment);
- inline void FreeLast(AllocationSpace space, HeapObject* object,
+ inline void FreeLast(AllocationSpace space, HeapObject object,
int object_size);
private:
@@ -52,8 +52,8 @@ class LocalAllocator {
inline bool NewLocalAllocationBuffer();
inline AllocationResult AllocateInLAB(int object_size,
AllocationAlignment alignment);
- inline void FreeLastInNewSpace(HeapObject* object, int object_size);
- inline void FreeLastInOldSpace(HeapObject* object, int object_size);
+ inline void FreeLastInNewSpace(HeapObject object, int object_size);
+ inline void FreeLastInOldSpace(HeapObject object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 449ca43e50..cb73fedca7 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -5,19 +5,22 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/slots-inl.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(
- HeapObject* obj) {
- MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
+bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
+ MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj->address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
@@ -25,14 +28,13 @@ bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(
}
template <typename ConcreteState, AccessMode access_mode>
-bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(
- HeapObject* obj) {
+bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
- HeapObject* obj) {
+ HeapObject obj) {
return WhiteToGrey(obj) && GreyToBlack(obj);
}
@@ -43,24 +45,71 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState* marking_state)
: heap_(collector->heap()),
collector_(collector),
- marking_state_(marking_state) {}
+ marking_state_(marking_state),
+ mark_compact_epoch_(collector->epoch()) {}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitBytecodeArray(Map* map,
- BytecodeArray* array) {
+ MarkingState>::VisitBytecodeArray(Map map,
+ BytecodeArray array) {
int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
BytecodeArray::BodyDescriptor::IterateBody(map, array, size, this);
- array->MakeOlder();
+
+ if (!heap_->is_current_gc_forced()) {
+ array->MakeOlder();
+ }
return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitFixedArray(Map* map,
- FixedArray* object) {
+ MarkingState>::VisitDescriptorArray(Map map,
+ DescriptorArray array) {
+ int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
+ VisitPointers(array, array->GetFirstPointerSlot(),
+ array->GetDescriptorSlot(0));
+ VisitDescriptors(array, array->number_of_descriptors());
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ VisitSharedFunctionInfo(Map map, SharedFunctionInfo shared_info) {
+ int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size, this);
+
+ // If the SharedFunctionInfo has old bytecode, mark it as flushable,
+ // otherwise visit the function data field strongly.
+ if (shared_info->ShouldFlushBytecode()) {
+ collector_->AddBytecodeFlushingCandidate(shared_info);
+ } else {
+ VisitPointer(shared_info,
+ HeapObject::RawField(shared_info,
+ SharedFunctionInfo::kFunctionDataOffset));
+ }
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSFunction(Map map, JSFunction object) {
+ int size = Parent::VisitJSFunction(map, object);
+
+ // Check if the JSFunction needs reset due to bytecode being flushed.
+ if (FLAG_flush_bytecode && object->NeedsResetDueToFlushedBytecode()) {
+ collector_->AddFlushedJSFunction(object);
+ }
+
+ return size;
+}
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitFixedArray(Map map, FixedArray object) {
return (fixed_array_mode == FixedArrayVisitationMode::kRegular)
? Parent::VisitFixedArray(map, object)
: VisitFixedArrayIncremental(map, object);
@@ -71,10 +120,10 @@ template <FixedArrayVisitationMode fixed_array_mode,
template <typename T>
V8_INLINE int
MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitEmbedderTracingSubclass(Map* map,
- T* object) {
+ MarkingState>::VisitEmbedderTracingSubclass(Map map, T object) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- heap_->TracePossibleWrapper(object);
+ marking_worklist()->embedder()->Push(MarkCompactCollectorBase::kMainThread,
+ object);
}
int size = T::BodyDescriptor::SizeOf(map, object);
T::BodyDescriptor::IterateBody(map, object, size, this);
@@ -84,57 +133,56 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
+ MarkingState>::VisitJSApiObject(Map map, JSObject object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSArrayBuffer(Map* map,
- JSArrayBuffer* object) {
+ MarkingState>::VisitJSArrayBuffer(Map map,
+ JSArrayBuffer object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSDataView(Map* map,
- JSDataView* object) {
+ MarkingState>::VisitJSDataView(Map map, JSDataView object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSTypedArray(Map* map,
- JSTypedArray* object) {
+ MarkingState>::VisitJSTypedArray(Map map,
+ JSTypedArray object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
collector_->AddEphemeronHashTable(table);
for (int i = 0; i < table->Capacity(); i++) {
- Object** key_slot =
+ ObjectSlot key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
- HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ HeapObject key = HeapObject::cast(table->KeyAt(i));
collector_->RecordSlot(table, key_slot, key);
- Object** value_slot =
+ ObjectSlot value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
- Object* value_obj = *value_slot;
+ Object value_obj = *value_slot;
if (value_obj->IsHeapObject()) {
- HeapObject* value = HeapObject::cast(value_obj);
+ HeapObject value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
@@ -152,23 +200,37 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitMap(Map* map, Map* object) {
- // When map collection is enabled we have to mark through map's transitions
- // and back pointers in a special way to make these links weak.
- int size = Map::BodyDescriptor::SizeOf(map, object);
- if (object->CanTransition()) {
- MarkMapContents(object);
- } else {
- Map::BodyDescriptor::IterateBody(map, object, size, this);
+ MarkingState>::VisitMap(Map meta_map, Map map) {
+ int size = Map::BodyDescriptor::SizeOf(meta_map, map);
+ if (map->CanTransition()) {
+ // Maps that can transition share their descriptor arrays and require
+ // special visiting logic to avoid memory leaks.
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The
+ // slot holding the descriptor array will be implicitly recorded when the
+ // pointer fields of this map are visited.
+ DescriptorArray descriptors = map->instance_descriptors();
+ MarkDescriptorArrayBlack(map, descriptors);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors) {
+ DCHECK_LE(number_of_own_descriptors,
+ descriptors->number_of_descriptors());
+ VisitDescriptors(descriptors, number_of_own_descriptors);
+ }
+ // Mark the pointer fields of the Map. Since the transitions array has
+ // been marked already, it is fine that one of these fields contains a
+ // pointer to it.
}
+ Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitTransitionArray(Map* map,
- TransitionArray* array) {
+ MarkingState>::VisitTransitionArray(Map map,
+ TransitionArray array) {
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
collector_->AddTransitionArray(array);
@@ -177,46 +239,90 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitPointer(HeapObject* host, Object** p) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* target_object = HeapObject::cast(*p);
- collector_->RecordSlot(host, p, target_object);
- MarkObject(host, target_object);
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
+ if (weak_ref->target()->IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_ref->target());
+ if (marking_state()->IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakRef, since the IterateBody below
+ // won't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_ref, JSWeakCell::kTargetOffset);
+ collector_->RecordSlot(weak_ref, slot, target);
+ } else {
+ // JSWeakRef points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ collector_->AddWeakRef(weak_ref);
+ }
+ }
+ int size = JSWeakRef::BodyDescriptor::SizeOf(map, weak_ref);
+ JSWeakRef::BodyDescriptor::IterateBody(map, weak_ref, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSWeakCell(Map map,
+ JSWeakCell weak_cell) {
+ if (weak_cell->target()->IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_cell->target());
+ if (marking_state()->IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakCell, since the IterateBody below
+ // won't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ collector_->RecordSlot(weak_cell, slot, target);
+ } else {
+ // JSWeakCell points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ collector_->AddWeakCell(weak_cell);
+ }
+ }
+ int size = JSWeakCell::BodyDescriptor::SizeOf(map, weak_cell);
+ JSWeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
+ return size;
}
+// class template arguments
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+// method template arguments
+template <typename TSlot>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitPointer(HeapObject* host,
- MaybeObject** p) {
- HeapObject* target_object;
- if ((*p)->GetHeapObjectIfStrong(&target_object)) {
- collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
- target_object);
+ MarkingState>::VisitPointerImpl(HeapObject host,
+ TSlot slot) {
+ static_assert(std::is_same<TSlot, ObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only ObjectSlot and MaybeObjectSlot are expected here");
+ typename TSlot::TObject object = *slot;
+ HeapObject target_object;
+ if (object.GetHeapObjectIfStrong(&target_object)) {
+ collector_->RecordSlot(host, HeapObjectSlot(slot), target_object);
MarkObject(host, target_object);
- } else if ((*p)->GetHeapObjectIfWeak(&target_object)) {
+ } else if (TSlot::kCanBeWeak && object.GetHeapObjectIfWeak(&target_object)) {
if (marking_state()->IsBlackOrGrey(target_object)) {
// Weak references with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
- collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
- target_object);
+ collector_->RecordSlot(host, HeapObjectSlot(slot), target_object);
} else {
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
// closure.
- collector_->AddWeakReference(host,
- reinterpret_cast<HeapObjectReference**>(p));
+ collector_->AddWeakReference(host, HeapObjectSlot(slot));
}
}
}
+// class template arguments
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+// method template arguments
+template <typename TSlot>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitPointers(HeapObject* host,
- Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
+ MarkingState>::VisitPointersImpl(HeapObject host,
+ TSlot start, TSlot end) {
+ for (TSlot p = start; p < end; ++p) {
VisitPointer(host, p);
}
}
@@ -224,60 +330,56 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitPointers(HeapObject* host,
- MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitEmbeddedPointer(Code* host,
+ MarkingState>::VisitEmbeddedPointer(Code host,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
+ HeapObject object = HeapObject::cast(rinfo->target_object());
collector_->RecordRelocSlot(host, rinfo, object);
- if (!host->IsWeakObject(object)) {
- MarkObject(host, object);
- } else if (!marking_state()->IsBlackOrGrey(object)) {
- collector_->AddWeakObjectInCode(object, host);
+ if (!marking_state()->IsBlackOrGrey(object)) {
+ if (host->IsWeakObject(object)) {
+ collector_->AddWeakObjectInCode(object, host);
+ } else {
+ MarkObject(host, object);
+ }
}
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitCodeTarget(Code* host,
+ MarkingState>::VisitCodeTarget(Code host,
RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
collector_->RecordRelocSlot(host, rinfo, target);
MarkObject(host, target);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-bool MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::MarkObjectWithoutPush(HeapObject* host,
- HeapObject* object) {
- if (marking_state()->WhiteToBlack(object)) {
+void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ MarkDescriptorArrayBlack(HeapObject host, DescriptorArray descriptors) {
+ // Note that WhiteToBlack is not sufficient here because it fails if the
+ // descriptor array is grey. So we need to do two steps: WhiteToGrey and
+ // GreyToBlack. Alternatively, we could check WhiteToGrey || WhiteToBlack.
+ if (marking_state()->WhiteToGrey(descriptors)) {
if (retaining_path_mode == TraceRetainingPathMode::kEnabled &&
V8_UNLIKELY(FLAG_track_retaining_path)) {
- heap_->AddRetainer(host, object);
+ heap_->AddRetainer(host, descriptors);
}
- return true;
}
- return false;
+ if (marking_state()->GreyToBlack(descriptors)) {
+ VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
+ descriptors->GetDescriptorSlot(0));
+ }
+ DCHECK(marking_state()->IsBlack(descriptors));
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::MarkObject(HeapObject* host,
- HeapObject* object) {
+ MarkingState>::MarkObject(HeapObject host,
+ HeapObject object) {
if (marking_state()->WhiteToGrey(object)) {
marking_worklist()->Push(object);
if (retaining_path_mode == TraceRetainingPathMode::kEnabled &&
@@ -290,81 +392,53 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitFixedArrayIncremental(Map* map, FixedArray* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ VisitFixedArrayIncremental(Map map, FixedArray object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ int size = FixedArray::BodyDescriptor::SizeOf(map, object);
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- DCHECK(!FLAG_use_marking_progress_bar ||
- chunk->owner()->identity() == LO_SPACE);
- // When using a progress bar for large fixed arrays, scan only a chunk of
- // the array and try to push it onto the marking deque again until it is
- // fully scanned. Fall back to scanning it through to the end in case this
- // fails because of a full deque.
- int start_offset =
+ DCHECK(FLAG_use_marking_progress_bar);
+ DCHECK(heap_->IsLargeObject(object));
+ int start =
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
- if (start_offset < object_size) {
- // Ensure that the object is either grey or black before pushing it
- // into marking worklist.
- marking_state()->WhiteToGrey(object);
- if (FLAG_concurrent_marking) {
- marking_worklist()->PushBailout(object);
- } else {
+ int end = Min(size, start + kProgressBarScanningChunk);
+ if (start < end) {
+ VisitPointers(object, HeapObject::RawField(object, start),
+ HeapObject::RawField(object, end));
+ chunk->set_progress_bar(end);
+ if (end < size) {
+ DCHECK(marking_state()->IsBlack(object));
+ // The object can be pushed back onto the marking worklist only after
+ // progress bar was updated.
marking_worklist()->Push(object);
- }
- DCHECK(marking_state()->IsGrey(object) ||
- marking_state()->IsBlack(object));
-
- int end_offset =
- Min(object_size, start_offset + kProgressBarScanningChunk);
- int already_scanned_offset = start_offset;
- VisitPointers(object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- chunk->set_progress_bar(start_offset);
- if (start_offset < object_size) {
heap_->incremental_marking()->NotifyIncompleteScanOfObject(
- object_size - (start_offset - already_scanned_offset));
+ size - (end - start));
}
}
} else {
- FixedArray::BodyDescriptor::IterateBody(map, object, object_size, this);
+ FixedArray::BodyDescriptor::IterateBody(map, object, size, this);
}
- return object_size;
+ return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::MarkMapContents(Map* map) {
- // Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that belong to this map are marked. The first time a non-empty
- // descriptor array is marked, its header is also visited. The slot holding
- // the descriptor array will be implicitly recorded when the pointer fields of
- // this map are visited. Prototype maps don't keep track of transitions, so
- // just mark the entire descriptor array.
- if (!map->is_prototype_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
- if (MarkObjectWithoutPush(map, descriptors) && descriptors->length() > 0) {
- VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
- }
- int start = 0;
- int end = map->NumberOfOwnDescriptors();
- if (start < end) {
- VisitPointers(descriptors, descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
- }
+void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
+ VisitDescriptors(DescriptorArray descriptors,
+ int number_of_own_descriptors) {
+ // Updating the number of marked descriptor is supported only for black
+ // descriptor arrays.
+ DCHECK(marking_state()->IsBlack(descriptors));
+ int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
+ int16_t old_marked = descriptors->UpdateNumberOfMarkedDescriptors(
+ mark_compact_epoch_, new_marked);
+ if (old_marked < new_marked) {
+ VisitPointers(descriptors,
+ MaybeObjectSlot(descriptors->GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptors->GetDescriptorSlot(new_marked)));
}
-
- // Mark the pointer fields of the Map. Since the transitions array has
- // been marked already, it is fine that one of these fields contains a
- // pointer to it.
- Map::BodyDescriptor::IterateBody(
- map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
}
-void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
+void MarkCompactCollector::MarkObject(HeapObject host, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
@@ -373,7 +447,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
}
}
-void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
+void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
@@ -384,7 +458,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
#ifdef ENABLE_MINOR_MC
-void MinorMarkCompactCollector::MarkRootObject(HeapObject* obj) {
+void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InNewSpace(obj) && non_atomic_marking_state_.WhiteToGrey(obj)) {
worklist_->Push(kMainThread, obj);
}
@@ -392,7 +466,7 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject* obj) {
#endif
-void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
+void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
@@ -401,23 +475,34 @@ void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
}
}
-void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
- HeapObject* target) {
- RecordSlot(object, reinterpret_cast<HeapObjectReference**>(slot), target);
+void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
+ HeapObject target) {
+ RecordSlot(object, HeapObjectSlot(slot), target);
}
-void MarkCompactCollector::RecordSlot(HeapObject* object,
- HeapObjectReference** slot,
- HeapObject* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
+void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
+ HeapObject target) {
+ Page* target_page = Page::FromHeapObject(target);
+ Page* source_page = Page::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page,
- reinterpret_cast<Address>(slot));
+ RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
}
}
+void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
+ weak_objects_.transition_arrays.Push(kMainThread, array);
+}
+
+void MarkCompactCollector::AddBytecodeFlushingCandidate(
+ SharedFunctionInfo flush_candidate) {
+ weak_objects_.bytecode_flushing_candidates.Push(kMainThread, flush_candidate);
+}
+
+void MarkCompactCollector::AddFlushedJSFunction(JSFunction flushed_function) {
+ weak_objects_.flushed_js_functions.Push(kMainThread, flushed_function);
+}
+
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
Address start)
@@ -434,8 +519,6 @@ LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
AdvanceToNextValidObject();
- } else {
- current_object_ = nullptr;
}
}
@@ -457,11 +540,11 @@ operator++(int) {
template <LiveObjectIterationMode mode>
void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
while (!it_.Done()) {
- HeapObject* object = nullptr;
+ HeapObject object;
int size = 0;
while (current_cell_ != 0) {
uint32_t trailing_zeros = base::bits::CountTrailingZeros(current_cell_);
- Address addr = cell_base_ + trailing_zeros * kPointerSize;
+ Address addr = cell_base_ + trailing_zeros * kTaggedSize;
// Clear the first bit of the found object..
current_cell_ &= ~(1u << trailing_zeros);
@@ -476,7 +559,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// that case we can return immediately.
if (!it_.Advance()) {
DCHECK(HeapObject::FromAddress(addr)->map() == one_word_filler_map_);
- current_object_ = nullptr;
+ current_object_ = HeapObject();
return;
}
cell_base_ = it_.CurrentCellBase();
@@ -485,20 +568,19 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
second_bit_index = 1u << (trailing_zeros + 1);
}
- Map* map = nullptr;
+ Map map;
if (current_cell_ & second_bit_index) {
// We found a black object. If the black object is within a black area,
// make sure that we skip all set bits in the black area until the
// object ends.
- HeapObject* black_object = HeapObject::FromAddress(addr);
- map =
- base::AsAtomicPointer::Relaxed_Load(reinterpret_cast<Map**>(addr));
+ HeapObject black_object = HeapObject::FromAddress(addr);
+ map = Map::cast(ObjectSlot(addr).Acquire_Load());
size = black_object->SizeFromMap(map);
- Address end = addr + size - kPointerSize;
+ Address end = addr + size - kTaggedSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
- // object_start + object_size - kPointerSize == object_start.
+ // object_start + object_size - kTaggedSize == object_start.
if (addr != end) {
DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end));
uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
@@ -519,14 +601,13 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
- map =
- base::AsAtomicPointer::Relaxed_Load(reinterpret_cast<Map**>(addr));
+ map = Map::cast(ObjectSlot(addr).Acquire_Load());
object = HeapObject::FromAddress(addr);
size = object->SizeFromMap(map);
}
// We found a live object.
- if (object != nullptr) {
+ if (!object.is_null()) {
// Do not use IsFiller() here. This may cause a data race for reading
// out the instance type when a new map concurrently is written into
// this object while iterating over the object.
@@ -538,7 +619,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// 2) Left trimming may leave black or grey fillers behind because we
// do not clear the old location of the object start.
// We filter these objects out in the iterator.
- object = nullptr;
+ object = HeapObject();
} else {
break;
}
@@ -551,13 +632,13 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
current_cell_ = *it_.CurrentCell();
}
}
- if (object != nullptr) {
+ if (!object.is_null()) {
current_object_ = object;
current_size_ = size;
return;
}
}
- current_object_ = nullptr;
+ current_object_ = HeapObject();
}
template <LiveObjectIterationMode mode>
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 6f46bc57bf..0a6b5ed000 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -8,7 +8,6 @@
#include "src/base/utils/random-number-generator.h"
#include "src/cancelable-task.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -28,7 +27,11 @@
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
+#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/slots-inl.h"
#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -44,7 +47,7 @@ const char* Marking::kImpossibleBitPattern = "01";
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
-STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
// =============================================================================
// Verifiers
@@ -62,31 +65,34 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
- virtual bool IsMarked(HeapObject* object) = 0;
+ virtual bool IsMarked(HeapObject object) = 0;
- virtual bool IsBlackOrGrey(HeapObject* object) = 0;
+ virtual bool IsBlackOrGrey(HeapObject object) = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- VerifyPointers(start, end);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ VerifyRootPointers(start, end);
}
void VerifyRoots(VisitMode mode);
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
+ void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
};
@@ -97,7 +103,7 @@ void MarkingVerifier::VerifyRoots(VisitMode mode) {
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
- HeapObject* object;
+ HeapObject object;
Address next_object_must_be_here_or_later = start;
for (Address current = start; current < end;) {
object = HeapObject::FromAddress(current);
@@ -115,11 +121,11 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
- page->AddressToMarkbitIndex(current + kPointerSize * 2),
+ page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
current = next_object_must_be_here_or_later;
} else {
- current += kPointerSize;
+ current += kTaggedSize;
}
}
}
@@ -146,6 +152,15 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
}
+void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
+ LargeObjectIterator it(lo_space);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ if (IsBlackOrGrey(obj)) {
+ obj->Iterate(this);
+ }
+ }
+}
+
class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
@@ -159,13 +174,8 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- if (marking_state_->IsBlackOrGrey(obj)) {
- obj->Iterate(this);
- }
- }
+ VerifyMarking(heap_->lo_space());
+ VerifyMarking(heap_->code_lo_space());
}
protected:
@@ -173,41 +183,55 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsMarked(HeapObject* object) override {
+ bool IsMarked(HeapObject object) override {
return marking_state_->IsBlack(object);
}
- bool IsBlackOrGrey(HeapObject* object) override {
+ bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(marking_state_->IsBlackOrGrey(object));
- }
- }
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObjectIfStrong(&object)) {
- CHECK(marking_state_->IsBlackOrGrey(object));
- }
- }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!host->IsWeakObject(rinfo->target_object())) {
- Object* p = rinfo->target_object();
- VisitPointer(host, &p);
+ HeapObject object = rinfo->target_object();
+ VerifyHeapObjectImpl(object);
}
}
private:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK(marking_state_->IsBlackOrGrey(heap_object));
+ }
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
+ }
+ }
+ }
+
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
@@ -215,18 +239,19 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- VerifyPointers(start, end);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ VerifyRootPointers(start, end);
}
protected:
@@ -234,8 +259,9 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
inline Heap* heap() { return heap_; }
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
@@ -252,7 +278,7 @@ void EvacuationVerifier::VerifyRoots(VisitMode mode) {
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
- HeapObject* object = HeapObject::FromAddress(current);
+ HeapObject object = HeapObject::FromAddress(current);
if (!object->IsFiller()) object->Iterate(this);
current += object->Size();
}
@@ -295,27 +321,37 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (Heap::InNewSpace(object)) {
- CHECK(Heap::InToSpace(object));
- }
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
+ }
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = *current;
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObjectIfStrong(&object)) {
- if (Heap::InNewSpace(object)) {
- CHECK(Heap::InToSpace(object));
- }
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
- }
- }
+
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
};
@@ -474,7 +510,8 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
HeapObjectIterator iterator(space);
- while (HeapObject* object = iterator.Next()) {
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}
@@ -494,6 +531,14 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
}
+void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ CHECK(non_atomic_marking_state()->IsWhite(obj));
+ CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
+ MemoryChunk::FromHeapObject(obj)));
+ }
+}
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
@@ -503,13 +548,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- CHECK(non_atomic_marking_state()->IsWhite(obj));
- CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
- MemoryChunk::FromAddress(obj->address())));
- }
+ VerifyMarkbitsAreClean(heap_->lo_space());
+ VerifyMarkbitsAreClean(heap_->code_lo_space());
+ VerifyMarkbitsAreClean(heap_->new_lo_space());
}
#endif // VERIFY_HEAP
@@ -753,7 +794,7 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
if (!was_marked_incrementally_) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
}
@@ -779,9 +820,12 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
- if (FLAG_concurrent_marking) {
+ // FinishConcurrentMarking is called for both, concurrent and parallel,
+ // marking. It is safe to call this function when tasks are already finished.
+ if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
- heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
+ heap()->concurrent_marking()->FlushMemoryChunkData(
+ non_atomic_marking_state());
}
}
@@ -806,6 +850,8 @@ void MarkCompactCollector::VerifyMarking() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+ epoch_++;
+
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
@@ -819,6 +865,7 @@ void MarkCompactCollector::Finish() {
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+ heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -845,17 +892,18 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final {
+ for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
}
private:
- V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
+ V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
@@ -879,27 +927,34 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointer(HeapObject* host, Object** p) final {
+ void VisitPointer(HeapObject host, ObjectSlot p) final {
MarkObject(host, *p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
UNREACHABLE();
}
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ MarkObject(host, target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ MarkObject(host, rinfo->target_object());
+ }
private:
- void MarkObject(HeapObject* host, Object* object) {
+ V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object->IsHeapObject()) return;
collector_->MarkObject(host, HeapObject::cast(object));
}
@@ -909,22 +964,23 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
class InternalizedStringTableCleaner : public ObjectVisitor {
public:
- InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
+ InternalizedStringTableCleaner(Heap* heap, HeapObject table)
: heap_(heap), pointers_removed_(0), table_(table) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
- Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
+ Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ for (ObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
} else {
// StringTable contains only old space strings.
DCHECK(!Heap::InNewSpace(o));
@@ -934,8 +990,14 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ UNREACHABLE();
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
@@ -946,32 +1008,32 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
private:
Heap* heap_;
int pointers_removed_;
- HeapObject* table_;
+ HeapObject table_;
};
class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
+ for (FullObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
+ heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
}
}
}
@@ -989,8 +1051,8 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: marking_state_(marking_state) {}
- Object* RetainAs(Object* object) override {
- HeapObject* heap_object = HeapObject::cast(object);
+ Object RetainAs(Object object) override {
+ HeapObject heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
return object;
@@ -999,9 +1061,9 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
- Object* nested = object;
+ Object nested = object;
while (nested->IsAllocationSite()) {
- AllocationSite* current_site = AllocationSite::cast(nested);
+ AllocationSite current_site = AllocationSite::cast(nested);
// MarkZombie will override the nested_site, read it first before
// marking
nested = current_site->nested_site();
@@ -1011,7 +1073,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
return object;
} else {
- return nullptr;
+ return Object();
}
}
@@ -1024,69 +1086,68 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- inline void VisitPointer(HeapObject* host, Object** p) final {
+ inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(*p));
- RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
- reinterpret_cast<Address>(p));
+ RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
}
- inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
- RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
+ inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ RecordMigratedSlot(host, *p, p.address());
}
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
+ inline void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
- inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
+ HeapObject object = HeapObject::cast(rinfo->target_object());
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
// Entries that are skipped for recording.
- inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
- inline void VisitExternalReference(Foreign* host, Address* p) final {}
- inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
- inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Foreign host, Address* p) final {}
+ inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
protected:
- inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
+ MemoryChunk::FromHeapObject(host), slot);
}
}
}
@@ -1099,7 +1160,7 @@ class MigrationObserver {
explicit MigrationObserver(Heap* heap) : heap_(heap) {}
virtual ~MigrationObserver() = default;
- virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) = 0;
protected:
@@ -1110,7 +1171,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
public:
explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
PROFILE(heap_->isolate(),
@@ -1123,7 +1184,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() = default;
- virtual bool Visit(HeapObject* object, int size) = 0;
+ virtual bool Visit(HeapObject object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
@@ -1136,21 +1197,21 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
- typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
- HeapObject* src, int size,
+ typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
+ HeapObject src, int size,
AllocationSpace dest);
template <MigrationMode mode>
- static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
- HeapObject* src, int size,
- AllocationSpace dest) {
+ static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
+ HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE);
+ DCHECK_NE(dest, LO_SPACE);
+ DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
- DCHECK(IsAligned(size, kPointerSize));
+ DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
@@ -1169,8 +1230,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
- static_cast<base::AtomicWord>(dst_addr));
+ src->set_map_word(MapWord::FromForwardingAddress(dst));
}
EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
@@ -1181,9 +1241,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
- inline bool TryEvacuateObject(AllocationSpace target_space,
- HeapObject* object, int size,
- HeapObject** target_object) {
+ inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
+ int size, HeapObject* target_object) {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
@@ -1198,25 +1257,25 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
return false;
}
- inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
- HeapObject* dst, int size) {
+ inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
+ HeapObject dst, int size) {
for (MigrationObserver* obs : observers_) {
obs->Move(dest, src, dst, size);
}
}
- inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+ inline void MigrateObject(HeapObject dst, HeapObject src, int size,
AllocationSpace dest) {
migration_function_(this, dst, src, size, dest);
}
#ifdef VERIFY_HEAP
- bool AbortCompactionForTesting(HeapObject* object) {
+ bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
- kPageAlignmentMask & ~kPointerAlignmentMask;
- if ((object->address() & kPageAlignmentMask) == mask) {
- Page* page = Page::FromAddress(object->address());
+ kPageAlignmentMask & ~kObjectAlignmentMask;
+ if ((object->ptr() & kPageAlignmentMask) == mask) {
+ Page* page = Page::FromHeapObject(object);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
} else {
@@ -1249,9 +1308,9 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
- HeapObject* target_object = nullptr;
+ HeapObject target_object;
if (heap_->ShouldBePromoted(object->address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
@@ -1259,7 +1318,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
}
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
- HeapObject* target = nullptr;
+ HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
@@ -1270,19 +1329,17 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
- inline bool TryEvacuateWithoutCopy(HeapObject* object) {
+ inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
- Map* map = object->map();
+ Map map = object->map();
// Some objects can be evacuated without creating a copy.
if (map->visitor_id() == kVisitThinString) {
- HeapObject* actual = ThinString::cast(object)->unchecked_actual();
+ HeapObject actual = ThinString::cast(object)->unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- base::Relaxed_Store(
- reinterpret_cast<base::AtomicWord*>(object->address()),
- reinterpret_cast<base::AtomicWord>(
- MapWord::FromForwardingAddress(actual).ToMap()));
+ object->map_slot().Relaxed_Store(
+ MapWord::FromForwardingAddress(actual).ToMap());
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1290,8 +1347,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
return false;
}
- inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
- HeapObject** target_object) {
+ inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
+ HeapObject* target_object) {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object->map());
AllocationSpace space_allocated_in = NEW_SPACE;
@@ -1352,7 +1409,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
}
}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_);
@@ -1378,11 +1435,10 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
- inline bool Visit(HeapObject* object, int size) override {
- HeapObject* target_object = nullptr;
- if (TryEvacuateObject(
- Page::FromAddress(object->address())->owner()->identity(), object,
- size, &target_object)) {
+ inline bool Visit(HeapObject object, int size) override {
+ HeapObject target_object;
+ if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
+ object, size, &target_object)) {
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
@@ -1394,7 +1450,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
- inline bool Visit(HeapObject* object, int size) override {
+ inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBodyFast(&visitor);
return true;
@@ -1404,17 +1460,17 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
- Object* o = *p;
+bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
+ Object o = *p;
if (!o->IsHeapObject()) return false;
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
heap_object);
}
void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
// Mark the string table itself.
if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
@@ -1457,7 +1513,6 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
@@ -1471,6 +1526,7 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
+ !marking_worklist()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
@@ -1516,7 +1572,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
CHECK(heap()->concurrent_marking()->IsStopped());
- std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
+ std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
DCHECK(weak_objects_.current_ephemerons.IsEmpty());
@@ -1571,10 +1627,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// This is the good case: newly_discovered stores all discovered
// objects. Now use key_to_values to see if discovered objects keep more
// objects alive due to ephemeron semantics.
- for (HeapObject* object : ephemeron_marking_.newly_discovered) {
+ for (HeapObject object : ephemeron_marking_.newly_discovered) {
auto range = key_to_values.equal_range(object);
for (auto it = range.first; it != range.second; ++it) {
- HeapObject* value = it->second;
+ HeapObject value = it->second;
MarkObject(object, value);
}
}
@@ -1597,12 +1653,15 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- HeapObject* object;
- while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
- heap_->TracePossibleWrapper(JSObject::cast(object));
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap_->local_embedder_heap_tracer());
+ HeapObject object;
+ while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ }
}
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
}
@@ -1615,9 +1674,9 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
void MarkCompactCollector::ProcessMarkingWorklistInternal() {
- HeapObject* object;
+ HeapObject object;
MarkCompactMarkingVisitor visitor(this, marking_state());
- while ((object = marking_worklist()->Pop()) != nullptr) {
+ while (!(object = marking_worklist()->Pop()).is_null()) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
@@ -1627,14 +1686,13 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
- Map* map = object->map();
+ Map map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
}
- DCHECK(marking_worklist()->IsBailoutEmpty());
}
-bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
+bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklist()->Push(value);
@@ -1668,7 +1726,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
+ Code code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
}
@@ -1737,7 +1795,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
ProcessMarkingWorklist();
@@ -1756,14 +1813,16 @@ void MarkCompactCollector::MarkLiveObjects() {
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
ProcessMarkingWorklist();
- } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
+ !marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1807,10 +1866,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- heap()->local_embedder_heap_tracer()->TraceEpilogue();
- }
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1835,7 +1890,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
string_table->IterateElements(&internalized_visitor);
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
@@ -1846,6 +1901,16 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
+ ClearOldBytecodeCandidates();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
+ ClearFlushedJsFunctions();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer(
@@ -1859,36 +1924,47 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// cleared.
ClearFullMapTransitions();
}
- ClearWeakReferences();
- MarkDependentCodeForDeoptimization();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
+ ClearWeakReferences();
+ ClearWeakCollections();
+ ClearJSWeakCells();
+ }
- ClearWeakCollections();
+ MarkDependentCodeForDeoptimization();
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
+ DCHECK(weak_objects_.js_weak_refs.IsEmpty());
+ DCHECK(weak_objects_.js_weak_cells.IsEmpty());
+ DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
+ DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
- std::pair<HeapObject*, Code*> weak_object_in_code;
+ std::pair<HeapObject, Code> weak_object_in_code;
while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
&weak_object_in_code)) {
- HeapObject* object = weak_object_in_code.first;
- Code* code = weak_object_in_code.second;
+ HeapObject object = weak_object_in_code.first;
+ Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
- !code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization("weak objects");
- code->InvalidateEmbeddedObjects(heap_);
- have_code_to_deoptimize_ = true;
+ !code->embedded_objects_cleared()) {
+ if (!code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization("weak objects");
+ have_code_to_deoptimize_ = true;
+ }
+ code->ClearEmbeddedObjects(heap_);
+ DCHECK(code->embedded_objects_cleared());
}
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
- Object* potential_parent = dead_target->constructor_or_backpointer();
+ Object potential_parent = dead_target->constructor_or_backpointer();
if (potential_parent->IsMap()) {
- Map* parent = Map::cast(potential_parent);
+ Map parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(isolate(), parent, &no_gc_obviously)
@@ -1898,14 +1974,14 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
- Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
+ Map dead_target) {
DCHECK(!map->is_prototype_map());
DCHECK(!dead_target->is_prototype_map());
DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
if (descriptors == dead_target->instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
@@ -1913,21 +1989,120 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
}
}
+void MarkCompactCollector::FlushBytecodeFromSFI(
+ SharedFunctionInfo shared_info) {
+ DCHECK(shared_info->HasBytecodeArray());
+
+ // Retain objects required for uncompiled data.
+ String inferred_name = shared_info->inferred_name();
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->FunctionLiteralId(isolate());
+
+ shared_info->DiscardCompiledMetadata(
+ isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
+ RecordSlot(object, slot, target);
+ });
+
+ // The size of the bytecode array should always be larger than an
+ // UncompiledData object.
+ STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
+ UncompiledDataWithoutPreparseData::kSize);
+
+ // Replace bytecode array with an uncompiled data array.
+ HeapObject compiled_data = shared_info->GetBytecodeArray();
+ Address compiled_data_start = compiled_data->address();
+ int compiled_data_size = compiled_data->Size();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
+
+ // Clear any recorded slots for the compiled data as being invalid.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(
+ chunk, compiled_data_start, compiled_data_start + compiled_data_size,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(
+ chunk, compiled_data_start, compiled_data_start + compiled_data_size,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+
+ // Swap the map, using set_map_after_allocation to avoid verify heap checks
+ // which are not necessary since we are doing this during the GC atomic pause.
+ compiled_data->set_map_after_allocation(
+ ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
+ SKIP_WRITE_BARRIER);
+
+ // Create a filler object for any left over space in the bytecode array.
+ if (!heap()->IsLargeObject(compiled_data)) {
+ heap()->CreateFillerObjectAt(
+ compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
+ compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
+ ClearRecordedSlots::kNo);
+ }
+
+ // Initialize the uncompiled data.
+ UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
+ UncompiledData::Initialize(
+ uncompiled_data, inferred_name, start_position, end_position,
+ function_literal_id,
+ [](HeapObject object, ObjectSlot slot, HeapObject target) {
+ RecordSlot(object, slot, target);
+ });
+
+ // Mark the uncompiled data as black, and ensure all fields have already been
+ // marked.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
+ non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
+
+ // Use the raw function data setter to avoid validity checks, since we're
+ // performing the unusual task of decompiling.
+ shared_info->set_function_data(uncompiled_data);
+ DCHECK(!shared_info->is_compiled());
+}
+
+void MarkCompactCollector::ClearOldBytecodeCandidates() {
+ DCHECK(FLAG_flush_bytecode ||
+ weak_objects_.bytecode_flushing_candidates.IsEmpty());
+ SharedFunctionInfo flushing_candidate;
+ while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThread,
+ &flushing_candidate)) {
+ // If the BytecodeArray is dead, flush it, which will replace the field with
+ // an uncompiled data object.
+ if (!non_atomic_marking_state()->IsBlackOrGrey(
+ flushing_candidate->GetBytecodeArray())) {
+ FlushBytecodeFromSFI(flushing_candidate);
+ }
+
+ // Now record the slot, which has either been updated to an uncompiled data,
+ // or is the BytecodeArray which is still alive.
+ ObjectSlot slot = HeapObject::RawField(
+ flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
+ RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
+ }
+}
+
+void MarkCompactCollector::ClearFlushedJsFunctions() {
+ DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
+ JSFunction flushed_js_function;
+ while (weak_objects_.flushed_js_functions.Pop(kMainThread,
+ &flushed_js_function)) {
+ flushed_js_function->ResetIfBytecodeFlushed();
+ }
+}
+
void MarkCompactCollector::ClearFullMapTransitions() {
- TransitionArray* array;
+ TransitionArray array;
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
- Map* map;
+ Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array->GetTargetIfExists(0, isolate(), &map)) {
- DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet.
- Map* parent = Map::cast(map->constructor_or_backpointer());
+ DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
+ Map parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
- DescriptorArray* descriptors =
- parent_is_alive ? parent->instance_descriptors() : nullptr;
+ DescriptorArray descriptors = parent_is_alive
+ ? parent->instance_descriptors()
+ : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
@@ -1938,32 +2113,32 @@ void MarkCompactCollector::ClearFullMapTransitions() {
}
}
-bool MarkCompactCollector::CompactTransitionArray(
- Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+bool MarkCompactCollector::CompactTransitionArray(Map map,
+ TransitionArray transitions,
+ DescriptorArray descriptors) {
DCHECK(!map->is_prototype_map());
int num_transitions = transitions->number_of_entries();
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map* target = transitions->GetTarget(i);
+ Map target = transitions->GetTarget(i);
DCHECK_EQ(target->constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
- if (descriptors != nullptr &&
+ if (!descriptors.is_null() &&
target->instance_descriptors() == descriptors) {
DCHECK(!target->is_prototype_map());
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- Name* key = transitions->GetKey(i);
+ Name key = transitions->GetKey(i);
transitions->SetKey(transition_index, key);
- HeapObjectReference** key_slot =
- transitions->GetKeySlot(transition_index);
+ HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
RecordSlot(transitions, key_slot, key);
- MaybeObject* raw_target = transitions->GetRawTarget(i);
+ MaybeObject raw_target = transitions->GetRawTarget(i);
transitions->SetRawTarget(transition_index, raw_target);
- HeapObjectReference** target_slot =
+ HeapObjectSlot target_slot =
transitions->GetTargetSlot(transition_index);
RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
}
@@ -1988,26 +2163,44 @@ bool MarkCompactCollector::CompactTransitionArray(
return descriptors_owner_died;
}
-void MarkCompactCollector::TrimDescriptorArray(Map* map,
- DescriptorArray* descriptors) {
+void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
+ int descriptors_to_trim) {
+ int old_nof_all_descriptors = array->number_of_all_descriptors();
+ int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
+ DCHECK_LT(0, descriptors_to_trim);
+ DCHECK_LE(0, new_nof_all_descriptors);
+ Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
+ Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
+ RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
+ ClearRecordedSlots::kNo);
+ array->set_number_of_all_descriptors(new_nof_all_descriptors);
+}
+
+void MarkCompactCollector::TrimDescriptorArray(Map map,
+ DescriptorArray descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
-
- int number_of_descriptors = descriptors->number_of_descriptors_storage();
- int to_trim = number_of_descriptors - number_of_own_descriptors;
+ // TODO(ulan): Trim only if slack is greater than some percentage threshold.
+ int to_trim =
+ descriptors->number_of_all_descriptors() - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimWeakFixedArray(descriptors,
- to_trim * DescriptorArray::kEntrySize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ descriptors->set_number_of_descriptors(number_of_own_descriptors);
+ RightTrimDescriptorArray(descriptors, to_trim);
TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor();
layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
@@ -2017,21 +2210,20 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
map->set_owns_descriptors(true);
}
-void MarkCompactCollector::TrimEnumCache(Map* map,
- DescriptorArray* descriptors) {
+void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
- EnumCache* enum_cache = descriptors->GetEnumCache();
+ EnumCache enum_cache = descriptors->enum_cache();
- FixedArray* keys = enum_cache->keys();
+ FixedArray keys = enum_cache->keys();
int to_trim = keys->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(keys, to_trim);
- FixedArray* indices = enum_cache->indices();
+ FixedArray indices = enum_cache->indices();
to_trim = indices->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(indices, to_trim);
@@ -2039,13 +2231,13 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
- EphemeronHashTable* table;
+ EphemeronHashTable table;
while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
for (int i = 0; i < table->Capacity(); i++) {
- HeapObject* key = HeapObject::cast(table->KeyAt(i));
+ HeapObject key = HeapObject::cast(table->KeyAt(i));
#ifdef VERIFY_HEAP
- Object* value = table->ValueAt(i);
+ Object value = table->ValueAt(i);
if (value->IsHeapObject()) {
CHECK_IMPLIES(
@@ -2062,22 +2254,81 @@ void MarkCompactCollector::ClearWeakCollections() {
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
- std::pair<HeapObject*, HeapObjectReference**> slot;
+ std::pair<HeapObject, HeapObjectSlot> slot;
+ HeapObjectReference cleared_weak_ref =
+ HeapObjectReference::ClearedValue(isolate());
while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
- HeapObject* value;
- HeapObjectReference** location = slot.second;
+ HeapObject value;
+ // The slot could have been overwritten, so we have to treat it
+ // as MaybeObjectSlot.
+ MaybeObjectSlot location(slot.second);
if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value->IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
- RecordSlot(slot.first, location, value);
+ RecordSlot(slot.first, HeapObjectSlot(location), value);
} else {
if (value->IsMap()) {
// The map is non-live.
ClearPotentialSimpleMapTransition(Map::cast(value));
}
- *location = HeapObjectReference::ClearedValue();
+ location.store(cleared_weak_ref);
+ }
+ }
+ }
+}
+
+void MarkCompactCollector::ClearJSWeakCells() {
+ if (!FLAG_harmony_weak_refs) {
+ return;
+ }
+ JSWeakRef weak_ref;
+ while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be undefined here.
+ JSReceiver target = JSReceiver::cast(weak_ref->target());
+ if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
+ } else {
+ // The value of the JSWeakRef is alive.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ RecordSlot(weak_ref, slot, target);
+ }
+ }
+ JSWeakCell weak_cell;
+ while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be a Smi here.
+ HeapObject target = HeapObject::cast(weak_cell->target());
+ if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ // The value of the JSWeakCell is dead.
+ JSWeakFactory weak_factory = JSWeakFactory::cast(weak_cell->factory());
+ if (!weak_factory->scheduled_for_cleanup()) {
+ heap()->AddDirtyJSWeakFactory(
+ weak_factory,
+ [](HeapObject object, ObjectSlot slot, Object target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
}
+ // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
+ // thus we need to record the slots it writes. The normal write barrier is
+ // not enough, since it's disabled before GC.
+ weak_cell->Nullify(isolate(),
+ [](HeapObject object, ObjectSlot slot, Object target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
+ DCHECK(weak_factory->NeedsCleanup());
+ DCHECK(weak_factory->scheduled_for_cleanup());
+ } else {
+ // The value of the JSWeakCell is alive.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
}
@@ -2090,14 +2341,25 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.discovered_ephemerons.Clear();
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
+ weak_objects_.js_weak_refs.Clear();
+ weak_objects_.js_weak_cells.Clear();
+ weak_objects_.bytecode_flushing_candidates.Clear();
+ weak_objects_.flushed_js_functions.Clear();
}
-void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
- Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
+ return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
+}
+
+MarkCompactCollector::RecordRelocSlotInfo
+MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ RecordRelocSlotInfo result;
+ result.should_record = false;
+ Page* target_page = Page::FromHeapObject(target);
+ Page* source_page = Page::FromHeapObject(host);
if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == nullptr ||
+ (rinfo->host().is_null() ||
!source_page->ShouldSkipEvacuationSlotRecording())) {
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
@@ -2111,29 +2373,91 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_OLD>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
+ uintptr_t offset = addr - source_page->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ result.should_record = true;
+ result.memory_chunk = source_page;
+ result.slot_type = slot_type;
+ result.offset = static_cast<uint32_t>(offset);
+ }
+ return result;
+}
+
+void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
+ if (info.should_record) {
+ RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(
- MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
- HeapObjectReferenceType reference_type) {
+namespace {
+
+// Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
+// attempt to store a weak reference to strong-only slot to a compilation error.
+template <typename TSlot, HeapObjectReferenceType reference_type>
+typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
+
+template <>
+Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return heap_object;
+}
+
+template <>
+MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Strong(heap_object);
+}
+
+template <>
+MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Weak(heap_object);
+}
+
+#ifdef V8_COMPRESS_POINTERS
+template <>
+Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return heap_object;
+}
+
+template <>
+MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
+ HeapObject heap_object) {
+ return HeapObjectReference::Strong(heap_object);
+}
+
+// The following specialization
+// MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
+// is not used.
+#endif
+
+template <AccessMode access_mode, HeapObjectReferenceType reference_type,
+ typename TSlot>
+static inline SlotCallbackResult UpdateSlot(TSlot slot,
+ typename TSlot::TObject old,
+ HeapObject heap_obj) {
+ static_assert(
+ std::is_same<TSlot, FullObjectSlot>::value ||
+ std::is_same<TSlot, ObjectSlot>::value ||
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(Heap::InFromSpace(heap_obj) ||
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromAddress(heap_obj->address())
- ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- MaybeObject* target =
- reference_type == HeapObjectReferenceType::WEAK
- ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
- : HeapObjectReference::Strong(map_word.ToForwardingAddress());
+ Page::FromHeapObject(heap_obj)->IsFlagSet(
+ Page::COMPACTION_WAS_ABORTED));
+ typename TSlot::TObject target =
+ MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
if (access_mode == AccessMode::NON_ATOMIC) {
- *slot = target;
+ slot.store(target);
} else {
- base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
+ slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
@@ -2144,113 +2468,113 @@ static inline SlotCallbackResult UpdateSlot(
return REMOVE_SLOT;
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
- MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
- HeapObject* heap_obj;
- if (obj->GetHeapObjectIfWeak(&heap_obj)) {
- UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
+template <AccessMode access_mode, typename TSlot>
+static inline SlotCallbackResult UpdateSlot(TSlot slot) {
+ typename TSlot::TObject obj = slot.Relaxed_Load();
+ HeapObject heap_obj;
+ if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
+ UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
} else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
- return UpdateSlot<access_mode>(slot, obj, heap_obj,
- HeapObjectReferenceType::STRONG);
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
+ heap_obj);
}
return REMOVE_SLOT;
}
-template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
- DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
- Object** slot = reinterpret_cast<Object**>(maybe_slot);
- Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
- heap_obj, HeapObjectReferenceType::STRONG);
+template <AccessMode access_mode, typename TSlot>
+static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
+ DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
+ typename TSlot::TObject obj = slot.Relaxed_Load();
+ HeapObject heap_obj;
+ if (obj.GetHeapObject(&heap_obj)) {
+ return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
+ heap_obj);
}
return REMOVE_SLOT;
}
+} // namespace
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-// TODO(ulan): Remove code object specific functions. This visitor
-// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
+ PointersUpdatingVisitor() {}
- void VisitPointer(HeapObject* host, Object** p) override {
+ void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
- void VisitPointer(HeapObject* host, MaybeObject** p) override {
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
UpdateSlotInternal(p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
UpdateStrongSlotInternal(p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
UpdateSlotInternal(p);
}
}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- UpdateStrongSlotInternal(p);
+ FullObjectSlot p) override {
+ UpdateRootSlotInternal(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ UpdateRootSlotInternal(p);
+ }
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(
- heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(
- rinfo, UpdateStrongMaybeObjectSlotInternal);
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
private:
+ static inline SlotCallbackResult UpdateRootSlotInternal(FullObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
+ }
+
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- MaybeObject** slot) {
- DCHECK(!(*slot)->IsWeakOrCleared());
- return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
+ MaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
}
- static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
+ static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
-
- Heap* heap_;
};
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ FullObjectSlot p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- String* new_string = String::cast(map_word.ToForwardingAddress());
+ String new_string = String::cast(map_word.ToForwardingAddress());
if (new_string->IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
- Page::FromAddress(reinterpret_cast<Address>(*p)),
- Page::FromHeapObject(new_string),
+ Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
ExternalString::cast(new_string)->ExternalPayloadSize());
}
return new_string;
@@ -2270,6 +2594,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->Flip();
new_space->ResetLinearAllocationArea();
+ heap()->new_lo_space()->Flip();
+
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
old_space_evacuation_pages_ = std::move(evacuation_candidates_);
@@ -2283,6 +2609,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
+ heap()->code_lo_space()->FreeUnmarkedObjects();
+ heap()->new_lo_space()->FreeUnmarkedObjects();
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
@@ -2318,10 +2646,11 @@ class Evacuator : public Malloced {
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
- static int PageEvacuationThreshold() {
+ static intptr_t NewSpacePageEvacuationThreshold() {
if (FLAG_page_promotion)
- return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
- return Page::kAllocatableMemory + kPointerSize;
+ return FLAG_page_promotion_threshold *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
+ return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
@@ -2341,7 +2670,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() = default;
- void EvacuatePage(Page* page);
+ void EvacuatePage(MemoryChunk* chunk);
void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer);
@@ -2358,7 +2687,8 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+ virtual void RawEvacuatePage(MemoryChunk* chunk,
+ intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -2386,29 +2716,30 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-void Evacuator::EvacuatePage(Page* page) {
+void Evacuator::EvacuatePage(MemoryChunk* chunk) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
- DCHECK(page->SweepingDone());
+ DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- RawEvacuatePage(page, &saved_live_bytes);
+ RawEvacuatePage(chunk, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(
- heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
- static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
- evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ PrintIsolate(heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(chunk),
+ chunk->InNewSpace(),
+ chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ chunk->Contains(heap()->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time,
+ chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
}
@@ -2439,51 +2770,51 @@ class FullEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MarkCompactCollector* collector_;
};
-void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
- const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- HeapObject* failed_object = nullptr;
+ *live_bytes = marking_state->live_bytes(chunk);
+ HeapObject failed_object;
switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kObjectsOldToOld: {
const bool success = LiveObjectVisitor::VisitBlackObjects(
- page, marking_state, &old_space_visitor_,
+ chunk, marking_state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits, &failed_object);
if (!success) {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, page);
+ collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
} else {
// ArrayBufferTracker will be updated during pointers updating.
}
@@ -2492,14 +2823,14 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
}
}
-class PageEvacuationItem : public ItemParallelJob::Item {
+class EvacuationItem : public ItemParallelJob::Item {
public:
- explicit PageEvacuationItem(Page* page) : page_(page) {}
- ~PageEvacuationItem() override = default;
- Page* page() const { return page_; }
+ explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~EvacuationItem() override = default;
+ MemoryChunk* chunk() const { return chunk_; }
private:
- Page* page_;
+ MemoryChunk* chunk_;
};
class PageEvacuationTask : public ItemParallelJob::Task {
@@ -2511,9 +2842,9 @@ class PageEvacuationTask : public ItemParallelJob::Task {
void RunInParallel() override {
TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
- PageEvacuationItem* item = nullptr;
- while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
- evacuator_->EvacuatePage(item->page());
+ EvacuationItem* item = nullptr;
+ while ((item = GetItem<EvacuationItem>()) != nullptr) {
+ evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
};
@@ -2571,7 +2902,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
- (live_bytes > Evacuator::PageEvacuationThreshold()) &&
+ (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}
@@ -2582,7 +2913,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) {
@@ -2601,8 +2932,25 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
+ }
+
+ // Promote young generation large objects.
+ LargePage* current = heap()->new_lo_space()->first_page();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ while (current) {
+ LargePage* next_current = current->next_page();
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (marking_state->IsBlack(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_job.AddItem(new EvacuationItem(current));
+ }
+ current = next_current;
}
+
if (evacuation_job.NumberOfItems() == 0) return;
RecordMigratedSlotVisitor record_visitor(this);
@@ -2613,9 +2961,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
- Object* RetainAs(Object* object) override {
+ Object RetainAs(Object object) override {
if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
@@ -2625,11 +2973,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-// Return true if the given code is deoptimized or will be deoptimized.
-bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- return code->is_optimized_code() && code->marked_for_deoptimization();
-}
-
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap());
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
@@ -2642,12 +2985,12 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode,
- HeapObject** failed_object) {
+ HeapObject* failed_object) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjects");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
if (!visitor->Visit(object, object_and_size.second)) {
if (iteration_mode == kClearMarkbits) {
marking_state->bitmap(chunk)->ClearRange(
@@ -2671,13 +3014,22 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjectsNoFail");
- for (auto object_and_size :
- LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
+ if (chunk->owner()->identity() == LO_SPACE) {
+ HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
DCHECK(marking_state->IsBlack(object));
- const bool success = visitor->Visit(object, object_and_size.second);
+ const bool success = visitor->Visit(object, object->Size());
USE(success);
DCHECK(success);
+ } else {
+ for (auto object_and_size :
+ LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
+ HeapObject const object = object_and_size.first;
+ DCHECK(marking_state->IsBlack(object));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
}
if (iteration_mode == kClearMarkbits) {
marking_state->ClearLiveness(chunk);
@@ -2693,7 +3045,7 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
"LiveObjectVisitor::VisitGreyObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
DCHECK(marking_state->IsGrey(object));
const bool success = visitor->Visit(object, object_and_size.second);
USE(success);
@@ -2717,7 +3069,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
@@ -2839,10 +3191,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor(chunk_->heap());
+ PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
- HeapObject* object = HeapObject::FromAddress(cur);
- Map* map = object->map();
+ HeapObject object = HeapObject::FromAddress(cur);
+ Map map = object->map();
int size = object->SizeFromMap(map);
object->IterateBodyFast(map, size, &visitor);
cur += size;
@@ -2854,7 +3206,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor(chunk_->heap());
+ PointersUpdatingVisitor visitor;
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first->IterateBodyFast(&visitor);
@@ -2882,27 +3234,31 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"RememberedSetUpdatingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
CodePageMemoryModificationScope memory_modification_scope(chunk_);
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
- MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
- HeapObject* heap_object;
- if (!(*slot)->GetHeapObject(&heap_object)) {
+ template <typename TSlot>
+ inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ HeapObject heap_object;
+ if (!(*slot).GetHeapObject(&heap_object)) {
return REMOVE_SLOT;
}
if (Heap::InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- HeapObjectReference::Update(
- reinterpret_cast<HeapObjectReference**>(slot),
- map_word.ToForwardingAddress());
+ HeapObjectReference::Update(THeapObjectSlot(slot),
+ map_word.ToForwardingAddress());
}
- bool success = (*slot)->GetHeapObject(&heap_object);
+ bool success = (*slot).GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
// If the object was in from space before and is after executing the
@@ -2918,7 +3274,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// if the slot was already updated during old->old updating.
// In case the page has been moved, check markbits to determine liveness
// of the slot. In the other case, the slot can just be kept.
- if (Page::FromAddress(heap_object->address())
+ if (Page::FromHeapObject(heap_object)
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// IsBlackOrGrey is required because objects are marked as grey for
// the young generation collector while they are black for the full
@@ -2940,7 +3296,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ [this](MaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
+ },
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -2948,10 +3306,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
- [&filter](Address slot) {
- if (!filter.IsValid(slot)) return REMOVE_SLOT;
- return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ [&filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
@@ -2959,7 +3316,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_->invalidated_slots() != nullptr) {
#ifdef DEBUG
for (auto object_size : *chunk_->invalidated_slots()) {
- HeapObject* object = object_size.first;
+ HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object->SizeFromMap(object->map()), size);
}
@@ -2975,11 +3332,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
const auto check_and_update_old_to_new_slot_fn =
- [this](MaybeObject** slot) {
- return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
+ [this](FullMaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
});
@@ -2989,12 +3346,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr)) {
CHECK_NE(chunk_->owner(), heap_->map_space());
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot,
- UpdateStrongSlot<AccessMode::NON_ATOMIC>);
+ UpdateStrongSlot<AccessMode::NON_ATOMIC, FullMaybeObjectSlot>);
});
}
}
@@ -3017,30 +3374,6 @@ UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-class GlobalHandlesUpdatingItem : public UpdatingItem {
- public:
- GlobalHandlesUpdatingItem(Heap* heap, GlobalHandles* global_handles,
- size_t start, size_t end)
- : heap_(heap),
- global_handles_(global_handles),
- start_(start),
- end_(end) {}
- ~GlobalHandlesUpdatingItem() override = default;
-
- void Process() override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesUpdatingItem::Process");
- PointersUpdatingVisitor updating_visitor(heap_);
- global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
- }
-
- private:
- Heap* heap_;
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
// Update array buffers on a page that has been evacuated by copying objects.
// Target page exclusivity in old space is guaranteed by the fact that
// evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
@@ -3160,7 +3493,7 @@ int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
@@ -3181,6 +3514,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
&updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
const int remembered_set_tasks =
remembered_set_pages == 0
? 0
@@ -3243,15 +3578,16 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- HeapObject* failed_object, Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ HeapObject failed_object, MemoryChunk* chunk) {
+ base::MutexGuard guard(&mutex_);
- aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
+ aborted_evacuation_candidates_.push_back(
+ std::make_pair(failed_object, static_cast<Page*>(chunk)));
}
void MarkCompactCollector::PostProcessEvacuationCandidates() {
for (auto object_and_page : aborted_evacuation_candidates_) {
- HeapObject* failed_object = object_and_page.first;
+ HeapObject failed_object = object_and_page.first;
Page* page = object_and_page.second;
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
@@ -3392,7 +3728,7 @@ void MarkCompactCollector::MarkingWorklist::PrintWorklist(
const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
std::map<InstanceType, int> count;
int total_count = 0;
- worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
+ worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
++total_count;
count[obj->map()->instance_type()]++;
});
@@ -3430,11 +3766,11 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
return marking_state_->bitmap(chunk);
}
- bool IsMarked(HeapObject* object) override {
+ bool IsMarked(HeapObject object) override {
return marking_state_->IsGrey(object);
}
- bool IsBlackOrGrey(HeapObject* object) override {
+ bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
@@ -3443,31 +3779,43 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_space());
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- DCHECK(!HasWeakHeapObjectTag(*current));
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (!Heap::InNewSpace(object)) return;
- CHECK(IsMarked(object));
- }
- }
+ protected:
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
+ private:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), IsMarked(heap_object));
+ }
+
+ template <typename TSlot>
+ V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
// Minor MC treats weak references as strong.
- if ((*current)->GetHeapObject(&object)) {
- if (!Heap::InNewSpace(object)) {
- continue;
- }
- CHECK(IsMarked(object));
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
- private:
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
@@ -3485,41 +3833,42 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
- }
- }
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InNewSpace(heap_object), Heap::InToSpace(heap_object));
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
- HeapObject* object;
- if ((*current)->GetHeapObject(&object)) {
- CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = *current;
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
}
}
}
+
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VerifyHeapObjectImpl(rinfo->target_object());
+ }
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
+ VerifyPointersImpl(start, end);
+ }
};
#endif // VERIFY_HEAP
-template <class ParallelItem>
-void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
- ItemParallelJob* job) {
- // Create batches of global handles.
- const size_t kGlobalHandlesBufferSize = 1000;
- const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
- for (size_t start = 0; start < new_space_nodes;
- start += kGlobalHandlesBufferSize) {
- size_t end = start + kGlobalHandlesBufferSize;
- if (end > new_space_nodes) end = new_space_nodes;
- job->AddItem(new ParallelItem(heap, global_handles, start, end));
- }
-}
-
-bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
+bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
->non_atomic_marking_state()
@@ -3536,43 +3885,54 @@ class YoungGenerationMarkingVisitor final
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: worklist_(global_worklist, task_id), marking_state_(marking_state) {}
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
+ V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
+ V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
}
- V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
- Object* target = *slot;
- DCHECK(!HasWeakHeapObjectTag(target));
- if (Heap::InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingWorklist(target_object);
+ V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
+ VisitPointerImpl(host, slot);
+ }
+
+ V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
+ VisitPointerImpl(host, slot);
+ }
+
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ // Code objects are not expected in new space.
+ UNREACHABLE();
+ }
+
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ // Code objects are not expected in new space.
+ UNREACHABLE();
+ }
+
+ private:
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ VisitPointer(host, slot);
}
}
- V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
- MaybeObject* target = *slot;
+ template <typename TSlot>
+ V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
+ typename TSlot::TObject target = *slot;
if (Heap::InNewSpace(target)) {
- HeapObject* target_object;
- // Treat weak references as strong. TODO(marja): Proper weakness handling
- // for minor-mcs.
- if (target->GetHeapObject(&target_object)) {
- MarkObjectViaMarkingWorklist(target_object);
- }
+ // Treat weak references as strong.
+ // TODO(marja): Proper weakness handling for minor-mcs.
+ HeapObject target_object = target.GetHeapObject();
+ MarkObjectViaMarkingWorklist(target_object);
}
}
- private:
- inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ inline void MarkObjectViaMarkingWorklist(HeapObject object) {
if (marking_state_->WhiteToGrey(object)) {
// Marking deque overflow is unsupported for the young generation.
CHECK(worklist_.Push(object));
@@ -3632,7 +3992,7 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
: MigrationObserver(heap),
mark_compact_collector_(mark_compact_collector) {}
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
@@ -3655,22 +4015,22 @@ class YoungGenerationRecordMigratedSlotVisitor final
MarkCompactCollector* collector)
: RecordMigratedSlotVisitor(collector) {}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
private:
// Only record slots for host objects that are considered as live by the full
// collector.
- inline bool IsLive(HeapObject* object) {
+ inline bool IsLive(HeapObject object) {
return collector_->non_atomic_marking_state()->IsBlack(object);
}
- inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
@@ -3688,14 +4048,12 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
CollectNewSpaceArrayBufferTrackerItems(&updating_job);
// Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesUpdatingItem>(
- heap(), isolate()->global_handles(), &updating_job);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
int remembered_set_pages = 0;
remembered_set_pages += CollectRememberedSetUpdatingItems(
@@ -3710,6 +4068,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
const int remembered_set_tasks =
remembered_set_pages == 0 ? 0
: NumberOfParallelPointerUpdateTasks(
@@ -3751,19 +4112,20 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) final {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final {
+ for (FullObjectSlot p = start; p < end; ++p) {
MarkObjectByPointer(p);
}
}
private:
- V8_INLINE void MarkObjectByPointer(Object** p) {
+ V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(HeapObject::cast(*p));
}
@@ -3808,7 +4170,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
// going to be unmapped.
- heap()->concurrent_marking()->ClearLiveness(p);
+ heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
}
@@ -3832,11 +4194,10 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
- DCHECK_EQ(0, free_start % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
DCHECK(non_atomic_marking_state()->IsGrey(object));
Address free_end = object->address();
if (free_end != free_start) {
@@ -3851,7 +4212,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
int size = object->SizeFromMap(map);
free_start = free_end + size;
}
@@ -3885,15 +4246,15 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
+ for (FullObjectSlot p = start; p < end; ++p) {
+ Object o = *p;
if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
+ HeapObject heap_object = HeapObject::cast(o);
if (marking_state_->IsWhite(heap_object)) {
if (o->IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
@@ -3902,7 +4263,7 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = ReadOnlyRoots(heap_).the_hole_value();
+ p.store(ReadOnlyRoots(heap_).the_hole_value());
}
}
}
@@ -3921,8 +4282,8 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MinorMarkCompactCollector* collector)
: marking_state_(collector->non_atomic_marking_state()) {}
- Object* RetainAs(Object* object) override {
- HeapObject* heap_object = HeapObject::cast(object);
+ Object RetainAs(Object object) override {
+ HeapObject heap_object = HeapObject::cast(object);
if (!Heap::InNewSpace(heap_object)) return object;
// Young generation marking only marks to grey instead of black.
@@ -3930,7 +4291,7 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
if (marking_state_->IsGrey(heap_object)) {
return object;
}
- return nullptr;
+ return Object();
}
private:
@@ -3989,7 +4350,6 @@ UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
}
class MarkingItem;
-class GlobalHandlesMarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
@@ -4036,9 +4396,9 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
};
- void MarkObject(Object* object) {
+ void MarkObject(Object object) {
if (!Heap::InNewSpace(object)) return;
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
if (marking_state_->WhiteToGrey(heap_object)) {
const int size = visitor_.Visit(heap_object);
IncrementLiveBytes(heap_object, size);
@@ -4047,7 +4407,7 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
private:
void EmptyLocalMarkingWorklist() {
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
@@ -4055,16 +4415,15 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
void EmptyMarkingWorklist() {
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
}
}
- void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
- local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
- bytes;
+ void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
+ local_live_bytes_[Page::FromHeapObject(object)] += bytes;
}
void FlushLiveBytes() {
@@ -4089,7 +4448,7 @@ class PageMarkingItem : public MarkingItem {
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
}
@@ -4098,33 +4457,37 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
+ [this, task](MaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_,
- [this, task](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap(), slot_type, slot, [this, task](MaybeObject** slot) {
- return CheckAndMarkObject(task,
- reinterpret_cast<Address>(slot));
+ heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
});
});
}
- SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
- Address slot_address) {
- MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
+ template <typename TSlot>
+ V8_INLINE SlotCallbackResult
+ CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ MaybeObject object = *slot;
if (Heap::InNewSpace(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in ToSpace.
DCHECK(Heap::InToSpace(object));
- HeapObject* heap_object;
- bool success = object->GetHeapObject(&heap_object);
+ HeapObject heap_object;
+ bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
@@ -4139,51 +4502,6 @@ class PageMarkingItem : public MarkingItem {
int slots_;
};
-class GlobalHandlesMarkingItem : public MarkingItem {
- public:
- GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
- size_t start, size_t end)
- : global_handles_(global_handles), start_(start), end_(end) {}
- ~GlobalHandlesMarkingItem() override = default;
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesMarkingItem::Process");
- GlobalHandlesRootMarkingVisitor visitor(task);
- global_handles_
- ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- &visitor, start_, end_);
- }
-
- private:
- class GlobalHandlesRootMarkingVisitor : public RootVisitor {
- public:
- explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
- : task_(task) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- task_->MarkObject(*p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- for (Object** p = start; p < end; p++) {
- task_->MarkObject(*p);
- }
- }
-
- private:
- YoungGenerationMarkingTask* task_;
- };
-
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
std::atomic<int> slots;
@@ -4194,10 +4512,9 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// Seed the root set (roots + old->new set).
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &JSObject::IsUnmodifiedApiObject);
heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesMarkingItem>(
- heap(), isolate()->global_handles(), &job);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&job, &slots](MemoryChunk* chunk) {
@@ -4254,7 +4571,7 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
void MinorMarkCompactCollector::ProcessMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
- HeapObject* object = nullptr;
+ HeapObject object;
while (marking_worklist.Pop(&object)) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
@@ -4267,7 +4584,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
@@ -4321,63 +4638,67 @@ class YoungGenerationEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
-void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
intptr_t* live_bytes) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- switch (ComputeEvacuationMode(page)) {
+ *live_bytes = marking_state->live_bytes(chunk);
+ switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ marking_state->live_bytes(chunk));
+ if (chunk->owner()->identity() != NEW_LO_SPACE) {
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
}
break;
case kPageNewToNew:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
+ DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
}
break;
case kObjectsOldToOld:
@@ -4404,7 +4725,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
if (evacuation_job.NumberOfItems() == 0) return;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index c4ab5b2b9c..0d2f4c0434 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -13,6 +13,8 @@
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
+#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
+#include "src/objects/js-weak-refs.h" // For Worklist<JSWeakCell, ...>
namespace v8 {
namespace internal {
@@ -29,43 +31,43 @@ class YoungGenerationMarkingVisitor;
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
- V8_INLINE MarkBit MarkBitFrom(HeapObject* obj) {
- return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
- obj->address());
+ V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
+ return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj->ptr());
}
+ // {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
- Marking::ObjectColor Color(HeapObject* obj) {
+ Marking::ObjectColor Color(HeapObject obj) {
return Marking::Color(MarkBitFrom(obj));
}
- V8_INLINE bool IsImpossible(HeapObject* obj) {
+ V8_INLINE bool IsImpossible(HeapObject obj) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool IsBlack(HeapObject* obj) {
+ V8_INLINE bool IsBlack(HeapObject obj) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool IsWhite(HeapObject* obj) {
+ V8_INLINE bool IsWhite(HeapObject obj) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool IsGrey(HeapObject* obj) {
+ V8_INLINE bool IsGrey(HeapObject obj) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool IsBlackOrGrey(HeapObject* obj) {
+ V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
- V8_INLINE bool WhiteToGrey(HeapObject* obj);
- V8_INLINE bool WhiteToBlack(HeapObject* obj);
- V8_INLINE bool GreyToBlack(HeapObject* obj);
+ V8_INLINE bool WhiteToGrey(HeapObject obj);
+ V8_INLINE bool WhiteToBlack(HeapObject obj);
+ V8_INLINE bool GreyToBlack(HeapObject obj);
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
@@ -76,13 +78,9 @@ class MarkingStateBase {
class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
- DCHECK(Bitmap::IsCellAligned(
- chunk_->AddressToMarkbitIndex(chunk_->area_start())));
- DCHECK(Bitmap::IsCellAligned(
- chunk_->AddressToMarkbitIndex(chunk_->area_end())));
last_cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
- cell_base_ = chunk_->area_start();
+ cell_base_ = chunk_->address();
cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
cells_ = bitmap->cells();
@@ -105,7 +103,7 @@ class MarkBitCellIterator {
}
V8_WARN_UNUSED_RESULT inline bool Advance() {
- cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
+ cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
return ++cell_index_ != last_cell_index_;
}
@@ -115,7 +113,7 @@ class MarkBitCellIterator {
DCHECK_LE(new_cell_index, last_cell_index_);
unsigned int diff = new_cell_index - cell_index_;
cell_index_ = new_cell_index;
- cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
+ cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
return true;
}
return false;
@@ -148,7 +146,7 @@ class LiveObjectRange {
public:
class iterator {
public:
- using value_type = std::pair<HeapObject*, int /* size */>;
+ using value_type = std::pair<HeapObject, int /* size */>;
using pointer = const value_type*;
using reference = const value_type&;
using iterator_category = std::forward_iterator_tag;
@@ -172,13 +170,13 @@ class LiveObjectRange {
inline void AdvanceToNextValidObject();
MemoryChunk* const chunk_;
- Map* const one_word_filler_map_;
- Map* const two_word_filler_map_;
- Map* const free_space_map_;
+ Map const one_word_filler_map_;
+ Map const two_word_filler_map_;
+ Map const free_space_map_;
MarkBitCellIterator it_;
Address cell_base_;
MarkBit::CellType current_cell_;
- HeapObject* current_object_;
+ HeapObject current_object_;
int current_size_;
};
@@ -211,7 +209,7 @@ class LiveObjectVisitor : AllStatic {
template <class Visitor, typename MarkingState>
static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
Visitor* visitor, IterationMode iteration_mode,
- HeapObject** failed_object);
+ HeapObject* failed_object);
// Visits black objects on a MemoryChunk. The visitor is not allowed to fail
// visitation for an object.
@@ -238,6 +236,8 @@ enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
public:
+ static const int kMainThread = 0;
+
virtual ~MarkCompactCollectorBase() = default;
virtual void SetUp() = 0;
@@ -248,7 +248,6 @@ class MarkCompactCollectorBase {
inline Isolate* isolate();
protected:
- static const int kMainThread = 0;
explicit MarkCompactCollectorBase(Heap* heap)
: heap_(heap), old_to_new_slots_(0) {}
@@ -341,7 +340,10 @@ class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
// Concurrent marking uses local live bytes.
@@ -362,7 +364,10 @@ class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -383,7 +388,10 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -400,19 +408,19 @@ class MajorNonAtomicMarkingState final
};
struct Ephemeron {
- HeapObject* key;
- HeapObject* value;
+ HeapObject key;
+ HeapObject value;
};
typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking.
struct WeakObjects {
- Worklist<TransitionArray*, 64> transition_arrays;
+ Worklist<TransitionArray, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
- Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
+ Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
@@ -434,12 +442,18 @@ struct WeakObjects {
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
- Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
- Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
+ Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
+ Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
+
+ Worklist<JSWeakRef, 64> js_weak_refs;
+ Worklist<JSWeakCell, 64> js_weak_cells;
+
+ Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
+ Worklist<JSFunction, 64> flushed_js_functions;
};
struct EphemeronMarking {
- std::vector<HeapObject*> newly_discovered;
+ std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
size_t newly_discovered_limit;
};
@@ -455,64 +469,42 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
- // Wrapper for the shared and bailout worklists.
+ // Wrapper for the shared worklist.
class MarkingWorklist {
public:
- using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
- using EmbedderTracingWorklist = Worklist<HeapObject*, 16>;
+ using ConcurrentMarkingWorklist = Worklist<HeapObject, 64>;
+ using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
// The heap parameter is not used but needed to match the sequential case.
explicit MarkingWorklist(Heap* heap) {}
- void Push(HeapObject* object) {
+ void Push(HeapObject object) {
bool success = shared_.Push(kMainThread, object);
USE(success);
DCHECK(success);
}
- void PushBailout(HeapObject* object) {
- bool success = bailout_.Push(kMainThread, object);
- USE(success);
- DCHECK(success);
- }
-
- HeapObject* Pop() {
- HeapObject* result;
-#ifdef V8_CONCURRENT_MARKING
- if (bailout_.Pop(kMainThread, &result)) return result;
-#endif
+ HeapObject Pop() {
+ HeapObject result;
if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time
// and we can thus avoid the emptiness checks by putting it last.
if (on_hold_.Pop(kMainThread, &result)) return result;
#endif
- return nullptr;
- }
-
- HeapObject* PopBailout() {
-#ifdef V8_CONCURRENT_MARKING
- HeapObject* result;
- if (bailout_.Pop(kMainThread, &result)) return result;
-#endif
- return nullptr;
+ return HeapObject();
}
void Clear() {
- bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
}
- bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
-
bool IsEmpty() {
- return bailout_.IsLocalEmpty(kMainThread) &&
- shared_.IsLocalEmpty(kMainThread) &&
+ return shared_.IsLocalEmpty(kMainThread) &&
on_hold_.IsLocalEmpty(kMainThread) &&
- bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
- on_hold_.IsGlobalPoolEmpty();
+ shared_.IsGlobalPoolEmpty() && on_hold_.IsGlobalPoolEmpty();
}
bool IsEmbedderEmpty() {
@@ -521,31 +513,27 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
int Size() {
- return static_cast<int>(bailout_.LocalSize(kMainThread) +
- shared_.LocalSize(kMainThread) +
+ return static_cast<int>(shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread));
}
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
- // The callback must accept HeapObject* and return HeapObject*.
+ // The callback must accept HeapObject and return HeapObject.
template <typename Callback>
void Update(Callback callback) {
- bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
- ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
void Print() {
PrintWorklist("shared", &shared_);
- PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_);
}
@@ -557,11 +545,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Worklist used for most objects.
ConcurrentMarkingWorklist shared_;
- // Concurrent marking uses this worklist to bail out of concurrently
- // marking certain object types. These objects are handled later in a STW
- // pause after concurrent marking has finished.
- ConcurrentMarkingWorklist bailout_;
-
// Concurrent marking uses this worklist to bail out of marking objects
// in new space's linear allocation area. Used to avoid black allocation
// for new space. This allow the compiler to remove write barriers
@@ -609,22 +592,25 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
- static inline bool IsOnEvacuationCandidate(Object* obj) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))
- ->IsEvacuationCandidate();
+ static inline bool IsOnEvacuationCandidate(Object obj) {
+ return Page::FromAddress(obj->ptr())->IsEvacuationCandidate();
}
- static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))
- ->IsEvacuationCandidate();
- }
+ static bool IsOnEvacuationCandidate(MaybeObject obj);
- void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
- V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
- HeapObject* target);
- V8_INLINE static void RecordSlot(HeapObject* object,
- HeapObjectReference** slot,
- HeapObject* target);
+ struct RecordRelocSlotInfo {
+ MemoryChunk* memory_chunk;
+ SlotType slot_type;
+ bool should_record;
+ uint32_t offset;
+ };
+ static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target);
+ static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
+ V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
+ HeapObject target);
+ V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
+ HeapObject target);
void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
@@ -648,29 +634,38 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
- void AddTransitionArray(TransitionArray* array) {
- weak_objects_.transition_arrays.Push(kMainThread, array);
- }
+ inline void AddTransitionArray(TransitionArray array);
- void AddEphemeronHashTable(EphemeronHashTable* table) {
+ void AddEphemeronHashTable(EphemeronHashTable table) {
weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
}
- void AddEphemeron(HeapObject* key, HeapObject* value) {
+ void AddEphemeron(HeapObject key, HeapObject value) {
weak_objects_.discovered_ephemerons.Push(kMainThread,
Ephemeron{key, value});
}
- void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
+ void AddWeakReference(HeapObject host, HeapObjectSlot slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
}
- void AddWeakObjectInCode(HeapObject* object, Code* code) {
+ void AddWeakObjectInCode(HeapObject object, Code code) {
weak_objects_.weak_objects_in_code.Push(kMainThread,
std::make_pair(object, code));
}
- void AddNewlyDiscovered(HeapObject* object) {
+ void AddWeakRef(JSWeakRef weak_ref) {
+ weak_objects_.js_weak_refs.Push(kMainThread, weak_ref);
+ }
+
+ void AddWeakCell(JSWeakCell weak_cell) {
+ weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
+ }
+
+ inline void AddBytecodeFlushingCandidate(SharedFunctionInfo flush_candidate);
+ inline void AddFlushedJSFunction(JSFunction flushed_function);
+
+ void AddNewlyDiscovered(HeapObject object) {
if (ephemeron_marking_.newly_discovered_overflowed) return;
if (ephemeron_marking_.newly_discovered.size() <
@@ -701,14 +696,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif
+ unsigned epoch() const { return epoch_; }
+
private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
- bool WillBeDeoptimized(Code* code);
-
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
@@ -722,14 +718,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Marks the object black and adds it to the marking work list.
// This is for non-incremental marking only.
- V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
+ V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
// Marks the object black and adds it to the marking work list.
// This is for non-incremental marking only.
- V8_INLINE void MarkRootObject(Root root, HeapObject* obj);
+ V8_INLINE void MarkRootObject(Root root, HeapObject obj);
// Used by wrapper tracing.
- V8_INLINE void MarkExternallyReferencedObject(HeapObject* obj);
+ V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootVisitor* root_visitor,
@@ -747,9 +743,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// otherwise they can die and try to deoptimize the underlying code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
- // Collects a list of dependent code from maps embedded in optimize code.
- DependentCode* DependentCodeListFromNonLiveMaps();
-
// Drains the main thread marking work list. Will mark all pending objects
// if no concurrent threads are running.
void ProcessMarkingWorklist() override;
@@ -764,7 +757,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
- bool VisitEphemeron(HeapObject* key, HeapObject* value);
+ bool VisitEphemeron(HeapObject key, HeapObject value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
@@ -783,7 +776,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Callback function for telling whether the object *p is an unmarked
// heap object.
- static bool IsUnmarkedHeapObject(Heap* heap, Object** p);
+ static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
@@ -792,15 +785,26 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Checks if the given weak cell is a simple transition from the parent map
// of the given dead target. If so it clears the transition and trims
// the descriptor array of the parent if needed.
- void ClearPotentialSimpleMapTransition(Map* dead_target);
- void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map dead_target);
+ void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
+
+ // Flushes a weakly held bytecode array from a shared function info.
+ void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
+
+ // Clears bytecode arrays that have not been executed for multiple
+ // collections.
+ void ClearOldBytecodeCandidates();
+
+ // Resets any JSFunctions which have had their bytecode flushed.
+ void ClearFlushedJsFunctions();
+
// Compact every array in the global list of transition arrays and
// trim the corresponding descriptor array if a transition target is non-live.
void ClearFullMapTransitions();
- bool CompactTransitionArray(Map* map, TransitionArray* transitions,
- DescriptorArray* descriptors);
- void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
- void TrimEnumCache(Map* map, DescriptorArray* descriptors);
+ void TrimDescriptorArray(Map map, DescriptorArray descriptors);
+ void TrimEnumCache(Map map, DescriptorArray descriptors);
+ bool CompactTransitionArray(Map map, TransitionArray transitions,
+ DescriptorArray descriptors);
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
@@ -812,6 +816,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the dead map via weak cell, then this function also clears the map
// transition.
void ClearWeakReferences();
+
+ // Goes through the list of encountered JSWeakCells and clears those with dead
+ // values.
+ void ClearJSWeakCells();
+
void AbortWeakObjects();
// Starts sweeping of spaces by contributing on the main thread and setting
@@ -835,12 +844,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
+ void ReportAbortedEvacuationCandidate(HeapObject failed_object,
+ MemoryChunk* chunk);
static const int kEphemeronChunkSize = 8 * KB;
int NumberOfParallelEphemeronVisitingTasks(size_t elements);
+ void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
+
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_;
@@ -880,13 +892,18 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
+ std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
Sweeper* sweeper_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ // Counts the number of mark-compact collections. This is used for marking
+ // descriptor arrays. See NumberOfMarkedDescriptors. Only lower two bits are
+ // used, so it is okay if this counter overflows and wraps around.
+ unsigned epoch_ = 0;
+
friend class EphemeronHashTableMarkingTask;
friend class FullEvacuator;
friend class Heap;
@@ -909,49 +926,69 @@ class MarkingVisitor final
V8_INLINE bool ShouldVisitMapPointer() { return false; }
- V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
- V8_INLINE int VisitEphemeronHashTable(Map* map, EphemeronHashTable* object);
- V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
- V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object);
- V8_INLINE int VisitJSDataView(Map* map, JSDataView* object);
- V8_INLINE int VisitJSTypedArray(Map* map, JSTypedArray* object);
- V8_INLINE int VisitMap(Map* map, Map* object);
- V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
+ V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
+ V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
+ V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
+ V8_INLINE int VisitFixedArray(Map map, FixedArray object);
+ V8_INLINE int VisitJSApiObject(Map map, JSObject object);
+ V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
+ V8_INLINE int VisitJSFunction(Map map, JSFunction object);
+ V8_INLINE int VisitJSDataView(Map map, JSDataView object);
+ V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
+ V8_INLINE int VisitMap(Map map, Map object);
+ V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
+ V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
+ V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell object);
+ V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
// ObjectVisitor implementation.
- V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
- V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final;
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final;
- V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
- V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
+ VisitPointerImpl(host, p);
+ }
+ V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ VisitPointerImpl(host, p);
+ }
+ V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
+ }
+ V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
+ }
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
- void VisitCustomWeakPointers(HeapObject* host, Object** start,
- Object** end) final {}
+ void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {}
+
+ V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
+ int number_of_own_descriptors);
+ // Marks the descriptor array black without pushing it on the marking work
+ // list and visits its header.
+ V8_INLINE void MarkDescriptorArrayBlack(HeapObject host,
+ DescriptorArray descriptors);
private:
// Granularity in which FixedArrays are scanned if |fixed_array_mode|
// is true.
- static const int kProgressBarScanningChunk = 32 * 1024;
+ static const int kProgressBarScanningChunk = 32 * KB;
- V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
+ template <typename TSlot>
+ V8_INLINE void VisitPointerImpl(HeapObject host, TSlot p);
- template <typename T>
- V8_INLINE int VisitEmbedderTracingSubclass(Map* map, T* object);
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
- V8_INLINE void MarkMapContents(Map* map);
+ V8_INLINE int VisitFixedArrayIncremental(Map map, FixedArray object);
- // Marks the object black without pushing it on the marking work list. Returns
- // true if the object needed marking and false otherwise.
- V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
+ template <typename T>
+ V8_INLINE int VisitEmbedderTracingSubclass(Map map, T object);
// Marks the object grey and pushes it on the marking work list.
- V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
+ V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingState* marking_state() { return marking_state_; }
@@ -962,6 +999,7 @@ class MarkingVisitor final
Heap* const heap_;
MarkCompactCollector* const collector_;
MarkingState* const marking_state_;
+ const unsigned mark_compact_epoch_;
};
class EvacuationScope {
@@ -1003,7 +1041,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CleanupSweepToIteratePages();
private:
- using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+ using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
class RootMarkingVisitor;
static const int kNumMarkers = 8;
@@ -1017,7 +1055,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
- V8_INLINE void MarkRootObject(HeapObject* obj);
+ V8_INLINE void MarkRootObject(HeapObject obj);
void ProcessMarkingWorklist() override;
void ClearNonLiveReferences() override;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index ccf25d6549..df73f1c5c1 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -98,10 +98,10 @@ class V8_EXPORT_PRIVATE Bitmap {
static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
- static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
+ static const size_t kLength = (1 << kPageSizeBits) >> (kTaggedSizeLog2);
static const size_t kSize = (1 << kPageSizeBits) >>
- (kPointerSizeLog2 + kBitsPerByteLog2);
+ (kTaggedSizeLog2 + kBitsPerByteLog2);
static int CellsForLength(int length) {
return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
@@ -122,10 +122,6 @@ class V8_EXPORT_PRIVATE Bitmap {
return index & ~kBitIndexMask;
}
- V8_INLINE static bool IsCellAligned(uint32_t index) {
- return (index & kBitIndexMask) == 0;
- }
-
V8_INLINE MarkBit::CellType* cells() {
return reinterpret_cast<MarkBit::CellType*>(this);
}
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 4af7df87fd..475728b769 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -7,6 +7,7 @@
#include "src/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/utils.h"
#include "src/v8.h"
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 90a955150e..2b0a8b81bb 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -13,6 +13,10 @@
namespace v8 {
namespace internal {
+namespace heap {
+class HeapTester;
+} // namespace heap
+
class Heap;
@@ -161,7 +165,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
double js_calls_sample_time_ms_;
// Used in cctest.
- friend class HeapTester;
+ friend class heap::HeapTester;
DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
};
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index bb069d19f4..10fd67e907 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -15,11 +15,14 @@
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
+#include "src/memcopy.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/slots.h"
#include "src/objects/templates.h"
-#include "src/utils.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -37,13 +40,13 @@ class FieldStatsCollector : public ObjectVisitor {
unboxed_double_fields_count_(unboxed_double_fields_count),
raw_fields_count_(raw_fields_count) {}
- void RecordStats(HeapObject* host) {
+ void RecordStats(HeapObject host) {
size_t old_pointer_fields_count = *tagged_fields_count_;
host->Iterate(this);
size_t tagged_fields_count_in_object =
*tagged_fields_count_ - old_pointer_fields_count;
- int object_size_in_words = host->Size() / kPointerSize;
+ int object_size_in_words = host->Size() / kTaggedSize;
DCHECK_LE(tagged_fields_count_in_object, object_size_in_words);
size_t raw_fields_count_in_object =
object_size_in_words - tagged_fields_count_in_object;
@@ -66,14 +69,24 @@ class FieldStatsCollector : public ObjectVisitor {
*raw_fields_count_ += raw_fields_count_in_object;
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
*tagged_fields_count_ += (end - start);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
*tagged_fields_count_ += (end - start);
}
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ // Code target is most likely encoded as a relative 32-bit offset and not
+ // as a full tagged value, so there's nothing to count.
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ *tagged_fields_count_ += 1;
+ }
+
private:
struct JSObjectFieldStats {
JSObjectFieldStats()
@@ -82,9 +95,10 @@ class FieldStatsCollector : public ObjectVisitor {
unsigned embedded_fields_count_ : kDescriptorIndexBitCount;
unsigned unboxed_double_fields_count_ : kDescriptorIndexBitCount;
};
- std::unordered_map<Map*, JSObjectFieldStats> field_stats_cache_;
+ std::unordered_map<Map, JSObjectFieldStats, Object::Hasher>
+ field_stats_cache_;
- JSObjectFieldStats GetInobjectFieldStats(Map* map);
+ JSObjectFieldStats GetInobjectFieldStats(Map map);
size_t* const tagged_fields_count_;
size_t* const embedder_fields_count_;
@@ -93,7 +107,7 @@ class FieldStatsCollector : public ObjectVisitor {
};
FieldStatsCollector::JSObjectFieldStats
-FieldStatsCollector::GetInobjectFieldStats(Map* map) {
+FieldStatsCollector::GetInobjectFieldStats(Map map) {
auto iter = field_stats_cache_.find(map);
if (iter != field_stats_cache_.end()) {
return iter->second;
@@ -103,7 +117,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map* map) {
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map->is_dictionary_map()) {
int nof = map->NumberOfOwnDescriptors();
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
for (int descriptor = 0; descriptor < nof; descriptor++) {
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.location() == kField) {
@@ -189,11 +203,12 @@ void ObjectStats::PrintJSON(const char* key) {
PrintF("{ ");
PrintKeyAndId(key, gc_count);
PrintF("\"type\": \"field_data\"");
- PrintF(", \"tagged_fields\": %zu", tagged_fields_count_ * kPointerSize);
- PrintF(", \"embedder_fields\": %zu", embedder_fields_count_ * kPointerSize);
+ PrintF(", \"tagged_fields\": %zu", tagged_fields_count_ * kTaggedSize);
+ PrintF(", \"embedder_fields\": %zu",
+ embedder_fields_count_ * kEmbedderDataSlotSize);
PrintF(", \"unboxed_double_fields\": %zu",
unboxed_double_fields_count_ * kDoubleSize);
- PrintF(", \"other_raw_fields\": %zu", raw_fields_count_ * kPointerSize);
+ PrintF(", \"other_raw_fields\": %zu", raw_fields_count_ * kSystemPointerSize);
PrintF(" }\n");
// bucket_sizes
PrintF("{ ");
@@ -243,11 +258,13 @@ void ObjectStats::Dump(std::stringstream& stream) {
// field_data
stream << "\"field_data\":{";
- stream << "\"tagged_fields\":" << (tagged_fields_count_ * kPointerSize);
- stream << ",\"embedder_fields\":" << (embedder_fields_count_ * kPointerSize);
+ stream << "\"tagged_fields\":" << (tagged_fields_count_ * kTaggedSize);
+ stream << ",\"embedder_fields\":"
+ << (embedder_fields_count_ * kEmbedderDataSlotSize);
stream << ",\"unboxed_double_fields\": "
<< (unboxed_double_fields_count_ * kDoubleSize);
- stream << ",\"other_raw_fields\":" << (raw_fields_count_ * kPointerSize);
+ stream << ",\"other_raw_fields\":"
+ << (raw_fields_count_ * kSystemPointerSize);
stream << "}, ";
stream << "\"bucket_sizes\":[";
@@ -272,7 +289,7 @@ void ObjectStats::Dump(std::stringstream& stream) {
}
void ObjectStats::CheckpointObjectStats() {
- base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+ base::MutexGuard lock_guard(object_stats_mutex.Pointer());
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
@@ -326,7 +343,7 @@ class ObjectStatsCollectorImpl {
void CollectGlobalStatistics();
enum class CollectFieldStats { kNo, kYes };
- void CollectStatistics(HeapObject* obj, Phase phase,
+ void CollectStatistics(HeapObject obj, Phase phase,
CollectFieldStats collect_field_stats);
private:
@@ -337,7 +354,7 @@ class ObjectStatsCollectorImpl {
Isolate* isolate() { return heap_->isolate(); }
- bool RecordVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ bool RecordVirtualObjectStats(HeapObject parent, HeapObject obj,
ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated,
CowMode check_cow_array = kCheckCow);
@@ -345,53 +362,53 @@ class ObjectStatsCollectorImpl {
ObjectStats::VirtualInstanceType type,
size_t size);
// Gets size from |ob| and assumes no over allocating.
- bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ bool RecordSimpleVirtualObjectStats(HeapObject parent, HeapObject obj,
ObjectStats::VirtualInstanceType type);
// For HashTable it is possible to compute over allocated memory.
- void RecordHashTableVirtualObjectStats(HeapObject* parent,
- FixedArray* hash_table,
+ void RecordHashTableVirtualObjectStats(HeapObject parent,
+ FixedArray hash_table,
ObjectStats::VirtualInstanceType type);
- bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
- bool CanRecordFixedArray(FixedArrayBase* array);
- bool IsCowArray(FixedArrayBase* array);
+ bool SameLiveness(HeapObject obj1, HeapObject obj2);
+ bool CanRecordFixedArray(FixedArrayBase array);
+ bool IsCowArray(FixedArrayBase array);
// Blacklist for objects that should not be recorded using
// VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
// objects dispatch to the low level ObjectStats::RecordObjectStats manually.
- bool ShouldRecordObject(HeapObject* object, CowMode check_cow_array);
+ bool ShouldRecordObject(HeapObject object, CowMode check_cow_array);
- void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+ void RecordObjectStats(HeapObject obj, InstanceType type, size_t size);
// Specific recursion into constant pool or embedded code objects. Records
// FixedArrays and Tuple2.
void RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- HeapObject* parent, HeapObject* object,
+ HeapObject parent, HeapObject object,
ObjectStats::VirtualInstanceType type);
// Details.
- void RecordVirtualAllocationSiteDetails(AllocationSite* site);
- void RecordVirtualBytecodeArrayDetails(BytecodeArray* bytecode);
- void RecordVirtualCodeDetails(Code* code);
- void RecordVirtualContext(Context* context);
- void RecordVirtualFeedbackVectorDetails(FeedbackVector* vector);
- void RecordVirtualFixedArrayDetails(FixedArray* array);
- void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo* fti);
- void RecordVirtualJSGlobalObjectDetails(JSGlobalObject* object);
- void RecordVirtualJSCollectionDetails(JSObject* object);
- void RecordVirtualJSObjectDetails(JSObject* object);
- void RecordVirtualMapDetails(Map* map);
- void RecordVirtualScriptDetails(Script* script);
- void RecordVirtualExternalStringDetails(ExternalString* script);
- void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
- void RecordVirtualJSFunctionDetails(JSFunction* function);
+ void RecordVirtualAllocationSiteDetails(AllocationSite site);
+ void RecordVirtualBytecodeArrayDetails(BytecodeArray bytecode);
+ void RecordVirtualCodeDetails(Code code);
+ void RecordVirtualContext(Context context);
+ void RecordVirtualFeedbackVectorDetails(FeedbackVector vector);
+ void RecordVirtualFixedArrayDetails(FixedArray array);
+ void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo fti);
+ void RecordVirtualJSGlobalObjectDetails(JSGlobalObject object);
+ void RecordVirtualJSCollectionDetails(JSObject object);
+ void RecordVirtualJSObjectDetails(JSObject object);
+ void RecordVirtualMapDetails(Map map);
+ void RecordVirtualScriptDetails(Script script);
+ void RecordVirtualExternalStringDetails(ExternalString script);
+ void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo info);
+ void RecordVirtualJSFunctionDetails(JSFunction function);
void RecordVirtualArrayBoilerplateDescription(
- ArrayBoilerplateDescription* description);
+ ArrayBoilerplateDescription description);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
- std::unordered_set<HeapObject*> virtual_objects_;
+ std::unordered_set<HeapObject, Object::Hasher> virtual_objects_;
std::unordered_set<Address> external_resources_;
FieldStatsCollector field_stats_collector_;
};
@@ -406,10 +423,10 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
&stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->unboxed_double_fields_count_, &stats->raw_fields_count_) {}
-bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
+bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject obj,
CowMode check_cow_array) {
if (obj->IsFixedArrayExact()) {
- FixedArray* fixed_array = FixedArray::cast(obj);
+ FixedArray fixed_array = FixedArray::cast(obj);
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
}
@@ -418,7 +435,7 @@ bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
}
void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
- HeapObject* parent, FixedArray* hash_table,
+ HeapObject parent, FixedArray hash_table,
ObjectStats::VirtualInstanceType type) {
CHECK(hash_table->IsHashTable());
// TODO(mlippautz): Implement over allocation for hash tables.
@@ -427,14 +444,13 @@ void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
}
bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
- HeapObject* parent, HeapObject* obj,
- ObjectStats::VirtualInstanceType type) {
+ HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type) {
return RecordVirtualObjectStats(parent, obj, type, obj->Size(),
ObjectStats::kNoOverAllocation, kCheckCow);
}
bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
- HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
+ HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated, CowMode check_cow_array) {
if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array)) {
return false;
@@ -457,9 +473,9 @@ void ObjectStatsCollectorImpl::RecordExternalResourceStats(
}
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
- AllocationSite* site) {
+ AllocationSite site) {
if (!site->PointsToLiteral()) return;
- JSObject* boilerplate = site->boilerplate();
+ JSObject boilerplate = site->boilerplate();
if (boilerplate->IsJSArray()) {
RecordSimpleVirtualObjectStats(site, boilerplate,
ObjectStats::JS_ARRAY_BOILERPLATE_TYPE);
@@ -471,22 +487,22 @@ void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
if (boilerplate->HasFastProperties()) {
// We'll mis-classify the empty_property_array here. Given that there is a
// single instance, this is negligible.
- PropertyArray* properties = boilerplate->property_array();
+ PropertyArray properties = boilerplate->property_array();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
- NameDictionary* properties = boilerplate->property_dictionary();
+ NameDictionary properties = boilerplate->property_dictionary();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
- FixedArrayBase* elements = boilerplate->elements();
+ FixedArrayBase elements = boilerplate->elements();
RecordSimpleVirtualObjectStats(site, elements,
ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
- FunctionTemplateInfo* fti) {
+ FunctionTemplateInfo fti) {
// named_property_handler and indexed_property_handler are recorded as
// INTERCEPTOR_INFO_TYPE.
if (!fti->call_code()->IsUndefined(isolate())) {
@@ -494,62 +510,62 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
fti, CallHandlerInfo::cast(fti->call_code()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (!fti->instance_call_handler()->IsUndefined(isolate())) {
+ if (!fti->GetInstanceCallHandler()->IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti->instance_call_handler()),
+ fti, CallHandlerInfo::cast(fti->GetInstanceCallHandler()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
}
void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
- JSGlobalObject* object) {
+ JSGlobalObject object) {
// Properties.
- GlobalDictionary* properties = object->global_dictionary();
+ GlobalDictionary properties = object->global_dictionary();
RecordHashTableVirtualObjectStats(object, properties,
ObjectStats::GLOBAL_PROPERTIES_TYPE);
// Elements.
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
RecordSimpleVirtualObjectStats(object, elements,
ObjectStats::GLOBAL_ELEMENTS_TYPE);
}
void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
- JSObject* object) {
+ JSObject object) {
if (object->IsJSMap()) {
RecordSimpleVirtualObjectStats(
object, FixedArray::cast(JSMap::cast(object)->table()),
- ObjectStats::JS_COLLETION_TABLE_TYPE);
+ ObjectStats::JS_COLLECTION_TABLE_TYPE);
}
if (object->IsJSSet()) {
RecordSimpleVirtualObjectStats(
object, FixedArray::cast(JSSet::cast(object)->table()),
- ObjectStats::JS_COLLETION_TABLE_TYPE);
+ ObjectStats::JS_COLLECTION_TABLE_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
+void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject object) {
// JSGlobalObject is recorded separately.
if (object->IsJSGlobalObject()) return;
// Properties.
if (object->HasFastProperties()) {
- PropertyArray* properties = object->property_array();
+ PropertyArray properties = object->property_array();
CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
} else {
- NameDictionary* properties = object->property_dictionary();
+ NameDictionary properties = object->property_dictionary();
RecordHashTableVirtualObjectStats(
object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
// Elements.
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
}
static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
- MaybeObject* maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
+ MaybeObject maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
if (maybe_obj->IsCleared())
return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
- Object* obj = maybe_obj->GetHeapObjectOrSmi();
+ Object obj = maybe_obj->GetHeapObjectOrSmi();
switch (kind) {
case FeedbackSlotKind::kCall:
if (obj == *isolate->factory()->uninitialized_symbol() ||
@@ -591,64 +607,61 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
}
void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
- FeedbackVector* vector) {
- if (virtual_objects_.find(vector) == virtual_objects_.end()) {
- // Manually insert the feedback vector into the virtual object list, since
- // we're logging its component parts separately.
- virtual_objects_.insert(vector);
-
- size_t calculated_size = 0;
-
- // Log the feedback vector's header (fixed fields).
- size_t header_size =
- reinterpret_cast<Address>(vector->slots_start()) - vector->address();
- stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
- header_size,
- ObjectStats::kNoOverAllocation);
- calculated_size += header_size;
-
- // Iterate over the feedback slots and log each one.
- if (!vector->shared_function_info()->HasFeedbackMetadata()) return;
-
- FeedbackMetadataIterator it(vector->metadata());
- while (it.HasNext()) {
- FeedbackSlot slot = it.Next();
- // Log the entry (or entries) taken up by this slot.
- size_t slot_size = it.entry_size() * kPointerSize;
- stats_->RecordVirtualObjectStats(
- GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
- slot_size, ObjectStats::kNoOverAllocation);
- calculated_size += slot_size;
-
- // Log the monomorphic/polymorphic helper objects that this slot owns.
- for (int i = 0; i < it.entry_size(); i++) {
- MaybeObject* raw_object = vector->get(slot.ToInt() + i);
- HeapObject* object;
- if (raw_object->GetHeapObject(&object)) {
- if (object->IsCell() || object->IsWeakFixedArray()) {
- RecordSimpleVirtualObjectStats(
- vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
- }
+ FeedbackVector vector) {
+ if (virtual_objects_.find(vector) != virtual_objects_.end()) return;
+ // Manually insert the feedback vector into the virtual object list, since
+ // we're logging its component parts separately.
+ virtual_objects_.insert(vector);
+
+ size_t calculated_size = 0;
+
+ // Log the feedback vector's header (fixed fields).
+ size_t header_size = vector->slots_start().address() - vector->address();
+ stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
+ header_size, ObjectStats::kNoOverAllocation);
+ calculated_size += header_size;
+
+ // Iterate over the feedback slots and log each one.
+ if (!vector->shared_function_info()->HasFeedbackMetadata()) return;
+
+ FeedbackMetadataIterator it(vector->metadata());
+ while (it.HasNext()) {
+ FeedbackSlot slot = it.Next();
+ // Log the entry (or entries) taken up by this slot.
+ size_t slot_size = it.entry_size() * kTaggedSize;
+ stats_->RecordVirtualObjectStats(
+ GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
+ slot_size, ObjectStats::kNoOverAllocation);
+ calculated_size += slot_size;
+
+ // Log the monomorphic/polymorphic helper objects that this slot owns.
+ for (int i = 0; i < it.entry_size(); i++) {
+ MaybeObject raw_object = vector->get(slot.ToInt() + i);
+ HeapObject object;
+ if (raw_object->GetHeapObject(&object)) {
+ if (object->IsCell() || object->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(
+ vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
}
}
-
- CHECK_EQ(calculated_size, vector->Size());
}
+
+ CHECK_EQ(calculated_size, vector->Size());
}
void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
- FixedArray* array) {
+ FixedArray array) {
if (IsCowArray(array)) {
- RecordVirtualObjectStats(nullptr, array, ObjectStats::COW_ARRAY_TYPE,
+ RecordVirtualObjectStats(HeapObject(), array, ObjectStats::COW_ARRAY_TYPE,
array->Size(), ObjectStats::kNoOverAllocation,
kIgnoreCow);
}
}
void ObjectStatsCollectorImpl::CollectStatistics(
- HeapObject* obj, Phase phase, CollectFieldStats collect_field_stats) {
- Map* map = obj->map();
+ HeapObject obj, Phase phase, CollectFieldStats collect_field_stats) {
+ Map map = obj->map();
switch (phase) {
case kPhase1:
if (obj->IsFeedbackVector()) {
@@ -702,42 +715,39 @@ void ObjectStatsCollectorImpl::CollectStatistics(
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Iterate boilerplates first to disambiguate them from regular JS objects.
- Object* list = heap_->allocation_sites_list();
+ Object list = heap_->allocation_sites_list();
while (list->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(list);
+ AllocationSite site = AllocationSite::cast(list);
RecordVirtualAllocationSiteDetails(site);
list = site->weak_next();
}
// FixedArray.
- RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
+ RecordSimpleVirtualObjectStats(HeapObject(), heap_->serialized_objects(),
ObjectStats::SERIALIZED_OBJECTS_TYPE);
- RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
+ RecordSimpleVirtualObjectStats(HeapObject(), heap_->number_string_cache(),
ObjectStats::NUMBER_STRING_CACHE_TYPE);
RecordSimpleVirtualObjectStats(
- nullptr, heap_->single_character_string_cache(),
+ HeapObject(), heap_->single_character_string_cache(),
ObjectStats::SINGLE_CHARACTER_STRING_CACHE_TYPE);
- RecordSimpleVirtualObjectStats(nullptr, heap_->string_split_cache(),
+ RecordSimpleVirtualObjectStats(HeapObject(), heap_->string_split_cache(),
ObjectStats::STRING_SPLIT_CACHE_TYPE);
- RecordSimpleVirtualObjectStats(nullptr, heap_->regexp_multiple_cache(),
+ RecordSimpleVirtualObjectStats(HeapObject(), heap_->regexp_multiple_cache(),
ObjectStats::REGEXP_MULTIPLE_CACHE_TYPE);
- RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
+ RecordSimpleVirtualObjectStats(HeapObject(), heap_->retained_maps(),
ObjectStats::RETAINED_MAPS_TYPE);
// WeakArrayList.
RecordSimpleVirtualObjectStats(
- nullptr, WeakArrayList::cast(heap_->noscript_shared_function_infos()),
+ HeapObject(),
+ WeakArrayList::cast(heap_->noscript_shared_function_infos()),
ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- RecordSimpleVirtualObjectStats(nullptr,
+ RecordSimpleVirtualObjectStats(HeapObject(),
WeakArrayList::cast(heap_->script_list()),
ObjectStats::SCRIPT_LIST_TYPE);
-
- // HashTable.
- RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
- ObjectStats::CODE_STUBS_TABLE_TYPE);
}
-void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
+void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject obj,
InstanceType type,
size_t size) {
if (virtual_objects_.find(obj) == virtual_objects_.end()) {
@@ -745,32 +755,31 @@ void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
}
}
-bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
+bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
ReadOnlyRoots roots(heap_);
return array != roots.empty_fixed_array() &&
array != roots.empty_sloppy_arguments_elements() &&
array != roots.empty_slow_element_dictionary() &&
- array != heap_->empty_property_dictionary();
+ array != roots.empty_property_dictionary();
}
-bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
+bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
return array->map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
-bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
- HeapObject* obj2) {
- return obj1 == nullptr || obj2 == nullptr ||
+bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
+ return obj1.is_null() || obj2.is_null() ||
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
+void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
- DescriptorArray* array = map->instance_descriptors();
+ DescriptorArray array = map->instance_descriptors();
if (map->owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// DescriptorArray has its own instance type.
- EnumCache* enum_cache = array->GetEnumCache();
+ EnumCache enum_cache = array->enum_cache();
RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
ObjectStats::ENUM_CACHE_TYPE);
RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
@@ -779,8 +788,8 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
if (map->is_prototype_map()) {
if (map->prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
- Object* users = info->prototype_users();
+ PrototypeInfo info = PrototypeInfo::cast(map->prototype_info());
+ Object users = info->prototype_users();
if (users->IsWeakFixedArray()) {
RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
@@ -789,18 +798,18 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
}
}
-void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
+void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
RecordSimpleVirtualObjectStats(
script, script->shared_function_infos(),
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
// Log the size of external source code.
- Object* raw_source = script->source();
+ Object raw_source = script->source();
if (raw_source->IsExternalString()) {
// The contents of external strings aren't on the heap, so we have to record
// them manually. The on-heap String object is recorded indepentendely in
// the normal pass.
- ExternalString* string = ExternalString::cast(raw_source);
+ ExternalString string = ExternalString::cast(raw_source);
Address resource = string->resource_as_address();
size_t off_heap_size = string->ExternalPayloadSize();
RecordExternalResourceStats(
@@ -810,7 +819,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
: ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
off_heap_size);
} else if (raw_source->IsString()) {
- String* source = String::cast(raw_source);
+ String source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
script, source,
source->IsOneByteRepresentation()
@@ -820,7 +829,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
}
void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
- ExternalString* string) {
+ ExternalString string) {
// Track the external string resource size in a separate category.
Address resource = string->resource_as_address();
@@ -834,24 +843,24 @@ void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
}
void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
- SharedFunctionInfo* info) {
+ SharedFunctionInfo info) {
// Uncompiled SharedFunctionInfo gets its own category.
if (!info->is_compiled()) {
RecordSimpleVirtualObjectStats(
- nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
+ HeapObject(), info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
}
void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
- JSFunction* function) {
+ JSFunction function) {
// Uncompiled JSFunctions get their own category.
if (!function->is_compiled()) {
- RecordSimpleVirtualObjectStats(nullptr, function,
+ RecordSimpleVirtualObjectStats(HeapObject(), function,
ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
}
}
void ObjectStatsCollectorImpl::RecordVirtualArrayBoilerplateDescription(
- ArrayBoilerplateDescription* description) {
+ ArrayBoilerplateDescription description) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
description, description->constant_elements(),
ObjectStats::ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE);
@@ -859,13 +868,13 @@ void ObjectStatsCollectorImpl::RecordVirtualArrayBoilerplateDescription(
void ObjectStatsCollectorImpl::
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- HeapObject* parent, HeapObject* object,
+ HeapObject parent, HeapObject object,
ObjectStats::VirtualInstanceType type) {
if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
if (object->IsFixedArrayExact()) {
- FixedArray* array = FixedArray::cast(object);
+ FixedArray array = FixedArray::cast(object);
for (int i = 0; i < array->length(); i++) {
- Object* entry = array->get(i);
+ Object entry = array->get(i);
if (!entry->IsHeapObject()) continue;
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
array, HeapObject::cast(entry), type);
@@ -874,15 +883,15 @@ void ObjectStatsCollectorImpl::
}
void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
- BytecodeArray* bytecode) {
+ BytecodeArray bytecode) {
RecordSimpleVirtualObjectStats(
bytecode, bytecode->constant_pool(),
ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
// FixedArrays on constant pool are used for holding descriptor information.
// They are shared with optimized code.
- FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
+ FixedArray constant_pool = FixedArray::cast(bytecode->constant_pool());
for (int i = 0; i < constant_pool->length(); i++) {
- Object* entry = constant_pool->get(i);
+ Object entry = constant_pool->get(i);
if (entry->IsFixedArrayExact()) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
@@ -892,6 +901,8 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
RecordSimpleVirtualObjectStats(
bytecode, bytecode->handler_table(),
ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
+ RecordSimpleVirtualObjectStats(bytecode, bytecode->SourcePositionTable(),
+ ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
namespace {
@@ -912,14 +923,14 @@ ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
} // namespace
-void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
- RecordSimpleVirtualObjectStats(nullptr, code,
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
+ RecordSimpleVirtualObjectStats(HeapObject(), code,
CodeKindToVirtualInstanceType(code->kind()));
RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
ObjectStats::DEOPTIMIZATION_DATA_TYPE);
RecordSimpleVirtualObjectStats(code, code->relocation_info(),
ObjectStats::RELOC_INFO_TYPE);
- Object* source_position_table = code->source_position_table();
+ Object source_position_table = code->source_position_table();
if (source_position_table->IsSourcePositionTableWithFrameCache()) {
RecordSimpleVirtualObjectStats(
code,
@@ -932,7 +943,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
- DeoptimizationData* input_data =
+ DeoptimizationData input_data =
DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
RecordSimpleVirtualObjectStats(code->deoptimization_data(),
@@ -944,7 +955,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object* target = it.rinfo()->target_object();
+ Object target = it.rinfo()->target_object();
if (target->IsFixedArrayExact()) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -953,13 +964,13 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
}
}
-void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
+void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context->IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context->Size());
} else if (context->IsFunctionContext()) {
RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context->Size());
} else {
- RecordSimpleVirtualObjectStats(nullptr, context,
+ RecordSimpleVirtualObjectStats(HeapObject(), context,
ObjectStats::OTHER_CONTEXT_TYPE);
}
}
@@ -975,7 +986,7 @@ class ObjectStatsVisitor {
heap->mark_compact_collector()->non_atomic_marking_state()),
phase_(phase) {}
- bool Visit(HeapObject* obj, int size) {
+ bool Visit(HeapObject obj, int size) {
if (marking_state_->IsBlack(obj)) {
live_collector_->CollectStatistics(
obj, phase_, ObjectStatsCollectorImpl::CollectFieldStats::kYes);
@@ -998,11 +1009,11 @@ namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
SpaceIterator space_it(heap);
- HeapObject* obj = nullptr;
+ HeapObject obj;
while (space_it.has_next()) {
std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
ObjectIterator* obj_it = it.get();
- while ((obj = obj_it->Next()) != nullptr) {
+ for (obj = obj_it->Next(); !obj.is_null(); obj = obj_it->Next()) {
visitor->Visit(obj, obj->Size());
}
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 7914f09881..b7f3aefd7f 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -22,7 +22,6 @@
V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
- V(CODE_STUBS_TABLE_TYPE) \
V(COW_ARRAY_TYPE) \
V(DEOPTIMIZATION_DATA_TYPE) \
V(DEPENDENT_CODE_TYPE) \
@@ -44,7 +43,7 @@
V(GLOBAL_ELEMENTS_TYPE) \
V(GLOBAL_PROPERTIES_TYPE) \
V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_COLLECTION_TABLE_TYPE) \
V(JS_OBJECT_BOILERPLATE_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index c7a4f70f01..6f2d2c58d1 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -10,41 +10,41 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
-#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/oddball.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
template <typename ResultType, typename ConcreteVisitor>
template <typename T>
-T* HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject* object) {
+T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
return T::cast(object);
}
template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject object) {
return Visit(object->map(), object);
}
template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
- HeapObject* object) {
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
+ HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
switch (map->visitor_id()) {
-#define CASE(type) \
- case kVisit##type: \
- return visitor->Visit##type(map, \
- ConcreteVisitor::template Cast<type>(object));
+#define CASE(TypeName, Type) \
+ case kVisit##TypeName: \
+ return visitor->Visit##TypeName( \
+ map, ConcreteVisitor::template Cast<TypeName>(object));
TYPED_VISITOR_ID_LIST(CASE)
#undef CASE
case kVisitShortcutCandidate:
return visitor->VisitShortcutCandidate(
map, ConcreteVisitor::template Cast<ConsString>(object));
- case kVisitNativeContext:
- return visitor->VisitNativeContext(
- map, ConcreteVisitor::template Cast<Context>(object));
case kVisitDataObject:
return visitor->VisitDataObject(map, object);
case kVisitJSObjectFast:
@@ -69,15 +69,15 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject* host, HeapObject** map) {
- static_cast<ConcreteVisitor*>(this)->VisitPointer(
- host, reinterpret_cast<Object**>(map));
+ HeapObject host, MapWordSlot map_slot) {
+ DCHECK(!host->map_word().IsForwardingAddress());
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
}
-#define VISIT(type) \
+#define VISIT(TypeName, Type) \
template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
+ Map map, Type object) { \
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
if (!visitor->ShouldVisit(object)) return ResultType(); \
if (!visitor->AllowDefaultJSObjectVisit()) { \
@@ -85,10 +85,10 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"Implement custom visitor for new JSObject subclass in " \
"concurrent marker"); \
} \
- int size = type::BodyDescriptor::SizeOf(map, object); \
+ int size = TypeName::BodyDescriptor::SizeOf(map, object); \
if (visitor->ShouldVisitMapPointer()) \
visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(map, object, size, visitor); \
+ TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
@@ -96,36 +96,25 @@ TYPED_VISITOR_ID_LIST(VISIT)
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
- Map* map, ConsString* object) {
+ Map map, ConsString object) {
return static_cast<ConcreteVisitor*>(this)->VisitConsString(map, object);
}
template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
- Map* map, Context* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = Context::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(map, object, size, visitor);
- return static_cast<ResultType>(size);
-}
-
-template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
- Map* map, HeapObject* object) {
+ Map map, HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map->instance_size();
- if (visitor->ShouldVisitMapPointer())
+ if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object->map_slot());
+ }
return static_cast<ResultType>(size);
}
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
- Map* map, JSObject* object) {
+ Map map, JSObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
@@ -137,7 +126,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
- Map* map, JSObject* object) {
+ Map map, JSObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
@@ -149,50 +138,67 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
- Map* map, HeapObject* object) {
+ Map map, HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map->instance_size();
- if (visitor->ShouldVisitMapPointer())
+ if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object->map_slot());
+ }
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
- Map* map, FreeSpace* object) {
+ Map map, FreeSpace object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
- if (visitor->ShouldVisitMapPointer())
+ if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object->map_slot());
- return static_cast<ResultType>(FreeSpace::cast(object)->size());
+ }
+ return static_cast<ResultType>(object->size());
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
- Context* object) {
+int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
+ NativeContext object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = Context::BodyDescriptor::SizeOf(map, object);
- Context::BodyDescriptor::IterateBody(map, object, size, visitor);
+ int size = NativeContext::BodyDescriptor::SizeOf(map, object);
+ NativeContext::BodyDescriptor::IterateBody(map, object, size, visitor);
return size;
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
- JSObject* object) {
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map map,
+ JSObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
return visitor->VisitJSObject(map, object);
}
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
+ Map map, SharedFunctionInfo object) {
+ UNREACHABLE();
+ return 0;
+}
+
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSWeakCell(Map map,
+ JSWeakCell js_weak_cell) {
+ UNREACHABLE();
+ return 0;
+}
+
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
- Map* map, HeapObject* object) {
+ Map map, HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
+ if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object, object->map_slot());
+ }
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 594b837f69..063dae512f 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -24,42 +24,41 @@ static bool MustRecordSlots(Heap* heap) {
template <class T>
struct WeakListVisitor;
-
template <class T>
-Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
- Object* undefined = ReadOnlyRoots(heap).undefined_value();
- Object* head = undefined;
- T* tail = nullptr;
+Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
+ Object undefined = ReadOnlyRoots(heap).undefined_value();
+ Object head = undefined;
+ T tail;
bool record_slots = MustRecordSlots(heap);
while (list != undefined) {
// Check whether to keep the candidate in the list.
- T* candidate = reinterpret_cast<T*>(list);
+ T candidate = T::cast(list);
- Object* retained = retainer->RetainAs(list);
+ Object retained = retainer->RetainAs(list);
// Move to the next element before the WeakNext is cleared.
list = WeakListVisitor<T>::WeakNext(candidate);
- if (retained != nullptr) {
+ if (retained != Object()) {
if (head == undefined) {
// First element in the list.
head = retained;
} else {
// Subsequent elements in the list.
- DCHECK_NOT_NULL(tail);
+ DCHECK(!tail.is_null());
WeakListVisitor<T>::SetWeakNext(tail, retained);
if (record_slots) {
- HeapObject* slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
+ HeapObject slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
- Object** slot = HeapObject::RawField(slot_holder, slot_offset);
+ ObjectSlot slot = HeapObject::RawField(slot_holder, slot_offset);
MarkCompactCollector::RecordSlot(slot_holder, slot,
HeapObject::cast(retained));
}
}
// Retained object is new tail.
DCHECK(!retained->IsUndefined(heap->isolate()));
- candidate = reinterpret_cast<T*>(retained);
+ candidate = T::cast(retained);
tail = candidate;
// tail is a live object, visit it.
@@ -71,16 +70,15 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
}
// Terminate the list if there is one or more elements.
- if (tail != nullptr) WeakListVisitor<T>::SetWeakNext(tail, undefined);
+ if (!tail.is_null()) WeakListVisitor<T>::SetWeakNext(tail, undefined);
return head;
}
-
template <class T>
-static void ClearWeakList(Heap* heap, Object* list) {
- Object* undefined = ReadOnlyRoots(heap).undefined_value();
+static void ClearWeakList(Heap* heap, Object list) {
+ Object undefined = ReadOnlyRoots(heap).undefined_value();
while (list != undefined) {
- T* candidate = reinterpret_cast<T*>(list);
+ T candidate = T::cast(list);
list = WeakListVisitor<T>::WeakNext(candidate);
WeakListVisitor<T>::SetWeakNext(candidate, undefined);
}
@@ -88,24 +86,24 @@ static void ClearWeakList(Heap* heap, Object* list) {
template <>
struct WeakListVisitor<Code> {
- static void SetWeakNext(Code* code, Object* next) {
+ static void SetWeakNext(Code code, Object next) {
code->code_data_container()->set_next_code_link(next,
UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(Code* code) {
+ static Object WeakNext(Code code) {
return code->code_data_container()->next_code_link();
}
- static HeapObject* WeakNextHolder(Code* code) {
+ static HeapObject WeakNextHolder(Code code) {
return code->code_data_container();
}
static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
- static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
+ static void VisitLiveObject(Heap*, Code, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap* heap, Code* code) {
+ static void VisitPhantomObject(Heap* heap, Code code) {
// Even though the code is dying, its code_data_container can still be
// alive. Clear the next_code_link slot to avoid a dangling pointer.
SetWeakNext(code, ReadOnlyRoots(heap).undefined_value());
@@ -115,27 +113,27 @@ struct WeakListVisitor<Code> {
template <>
struct WeakListVisitor<Context> {
- static void SetWeakNext(Context* context, Object* next) {
+ static void SetWeakNext(Context context, Object next) {
context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(Context* context) {
+ static Object WeakNext(Context context) {
return context->next_context_link();
}
- static HeapObject* WeakNextHolder(Context* context) { return context; }
+ static HeapObject WeakNextHolder(Context context) { return context; }
static int WeakNextOffset() {
return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
}
- static void VisitLiveObject(Heap* heap, Context* context,
+ static void VisitLiveObject(Heap* heap, Context context,
WeakObjectRetainer* retainer) {
if (heap->gc_state() == Heap::MARK_COMPACT) {
// Record the slots of the weak entries in the native context.
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
- Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
+ ObjectSlot slot = context->RawField(Context::OffsetOfElementAt(idx));
MarkCompactCollector::RecordSlot(context, slot,
HeapObject::cast(*slot));
}
@@ -147,24 +145,23 @@ struct WeakListVisitor<Context> {
}
template <class T>
- static void DoWeakList(Heap* heap, Context* context,
+ static void DoWeakList(Heap* heap, Context context,
WeakObjectRetainer* retainer, int index) {
// Visit the weak list, removing dead intermediate elements.
- Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+ Object list_head = VisitWeakList<T>(heap, context->get(index), retainer);
// Update the list head.
context->set(index, list_head, UPDATE_WRITE_BARRIER);
if (MustRecordSlots(heap)) {
// Record the updated slot if necessary.
- Object** head_slot =
- HeapObject::RawField(context, FixedArray::SizeFor(index));
+ ObjectSlot head_slot = context->RawField(FixedArray::SizeFor(index));
heap->mark_compact_collector()->RecordSlot(context, head_slot,
HeapObject::cast(list_head));
}
}
- static void VisitPhantomObject(Heap* heap, Context* context) {
+ static void VisitPhantomObject(Heap* heap, Context context) {
ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
@@ -173,26 +170,25 @@ struct WeakListVisitor<Context> {
template <>
struct WeakListVisitor<AllocationSite> {
- static void SetWeakNext(AllocationSite* obj, Object* next) {
+ static void SetWeakNext(AllocationSite obj, Object next) {
obj->set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
+ static Object WeakNext(AllocationSite obj) { return obj->weak_next(); }
- static HeapObject* WeakNextHolder(AllocationSite* obj) { return obj; }
+ static HeapObject WeakNextHolder(AllocationSite obj) { return obj; }
static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; }
- static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
+ static void VisitLiveObject(Heap*, AllocationSite, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap*, AllocationSite*) {}
+ static void VisitPhantomObject(Heap*, AllocationSite) {}
};
+template Object VisitWeakList<Context>(Heap* heap, Object list,
+ WeakObjectRetainer* retainer);
-template Object* VisitWeakList<Context>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
-
-template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
+template Object VisitWeakList<AllocationSite>(Heap* heap, Object list,
+ WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 147af52c7e..696b12a31c 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -20,54 +20,67 @@ namespace internal {
class BigInt;
class BytecodeArray;
class DataHandler;
+class EmbedderDataArray;
class JSArrayBuffer;
class JSDataView;
class JSRegExp;
class JSTypedArray;
+class JSWeakCell;
+class JSWeakRef;
class JSWeakCollection;
-class UncompiledDataWithoutPreParsedScope;
-class UncompiledDataWithPreParsedScope;
-
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSObject) \
- V(JSTypedArray) \
- V(JSWeakCollection) \
- V(Map) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithoutPreParsedScope) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject)
+class NativeContext;
+class UncompiledDataWithoutPreparseData;
+class UncompiledDataWithPreparseData;
+class WasmInstanceObject;
+
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite, AllocationSite) \
+ V(BigInt, BigInt) \
+ V(ByteArray, ByteArray) \
+ V(BytecodeArray, BytecodeArray) \
+ V(Cell, Cell) \
+ V(Code, Code) \
+ V(CodeDataContainer, CodeDataContainer) \
+ V(ConsString, ConsString) \
+ V(Context, Context) \
+ V(DataHandler, DataHandler) \
+ V(DescriptorArray, DescriptorArray) \
+ V(EmbedderDataArray, EmbedderDataArray) \
+ V(EphemeronHashTable, EphemeronHashTable) \
+ V(FeedbackCell, FeedbackCell) \
+ V(FeedbackVector, FeedbackVector) \
+ V(FixedArray, FixedArray) \
+ V(FixedDoubleArray, FixedDoubleArray) \
+ V(FixedFloat64Array, FixedFloat64Array) \
+ V(FixedTypedArrayBase, FixedTypedArrayBase) \
+ V(JSArrayBuffer, JSArrayBuffer) \
+ V(JSDataView, JSDataView) \
+ V(JSFunction, JSFunction) \
+ V(JSObject, JSObject) \
+ V(JSTypedArray, JSTypedArray) \
+ V(JSWeakCell, JSWeakCell) \
+ V(JSWeakCollection, JSWeakCollection) \
+ V(JSWeakRef, JSWeakRef) \
+ V(Map, Map) \
+ V(NativeContext, NativeContext) \
+ V(Oddball, Oddball) \
+ V(PreparseData, PreparseData) \
+ V(PropertyArray, PropertyArray) \
+ V(PropertyCell, PropertyCell) \
+ V(PrototypeInfo, PrototypeInfo) \
+ V(SeqOneByteString, SeqOneByteString) \
+ V(SeqTwoByteString, SeqTwoByteString) \
+ V(SharedFunctionInfo, SharedFunctionInfo) \
+ V(SlicedString, SlicedString) \
+ V(SmallOrderedHashMap, SmallOrderedHashMap) \
+ V(SmallOrderedHashSet, SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
+ V(Symbol, Symbol) \
+ V(ThinString, ThinString) \
+ V(TransitionArray, TransitionArray) \
+ V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
+ V(WasmInstanceObject, WasmInstanceObject)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
@@ -83,36 +96,36 @@ class UncompiledDataWithPreParsedScope;
template <typename ResultType, typename ConcreteVisitor>
class HeapVisitor : public ObjectVisitor {
public:
- V8_INLINE ResultType Visit(HeapObject* object);
- V8_INLINE ResultType Visit(Map* map, HeapObject* object);
+ V8_INLINE ResultType Visit(HeapObject object);
+ V8_INLINE ResultType Visit(Map map, HeapObject object);
protected:
// A guard predicate for visiting the object.
// If it returns false then the default implementations of the Visit*
// functions bailout from iterating the object pointers.
- V8_INLINE bool ShouldVisit(HeapObject* object) { return true; }
+ V8_INLINE bool ShouldVisit(HeapObject object) { return true; }
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
+ V8_INLINE void VisitMapPointer(HeapObject host, MapWordSlot map_slot);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
-#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
+#define VISIT(TypeName, Type) \
+ V8_INLINE ResultType Visit##TypeName(Map map, Type object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
- V8_INLINE ResultType VisitShortcutCandidate(Map* map, ConsString* object);
- V8_INLINE ResultType VisitNativeContext(Map* map, Context* object);
- V8_INLINE ResultType VisitDataObject(Map* map, HeapObject* object);
- V8_INLINE ResultType VisitJSObjectFast(Map* map, JSObject* object);
- V8_INLINE ResultType VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE ResultType VisitStruct(Map* map, HeapObject* object);
- V8_INLINE ResultType VisitFreeSpace(Map* map, FreeSpace* object);
- V8_INLINE ResultType VisitWeakArray(Map* map, HeapObject* object);
+ V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object);
+ V8_INLINE ResultType VisitDataObject(Map map, HeapObject object);
+ V8_INLINE ResultType VisitJSObjectFast(Map map, JSObject object);
+ V8_INLINE ResultType VisitJSApiObject(Map map, JSObject object);
+ V8_INLINE ResultType VisitStruct(Map map, HeapObject object);
+ V8_INLINE ResultType VisitFreeSpace(Map map, FreeSpace object);
+ V8_INLINE ResultType VisitWeakArray(Map map, HeapObject object);
template <typename T>
- static V8_INLINE T* Cast(HeapObject* object);
+ static V8_INLINE T Cast(HeapObject object);
};
template <typename ConcreteVisitor>
@@ -122,18 +135,16 @@ class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
// Special cases for young generation.
- V8_INLINE int VisitNativeContext(Map* map, Context* object);
- V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE int VisitNativeContext(Map map, NativeContext object);
+ V8_INLINE int VisitJSApiObject(Map map, JSObject object);
- int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ int VisitBytecodeArray(Map map, BytecodeArray object) {
UNREACHABLE();
return 0;
}
- int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
- UNREACHABLE();
- return 0;
- }
+ int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
+ int VisitJSWeakCell(Map map, JSWeakCell js_weak_cell);
};
class WeakObjectRetainer;
@@ -144,7 +155,9 @@ class WeakObjectRetainer;
// pointers. The template parameter T is a WeakListVisitor that defines how to
// access the next-element pointers.
template <class T>
-Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer);
+template <class T>
+Object VisitWeakList2(Heap* heap, Object list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index e59457b10d..b6b442b56e 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -116,7 +116,7 @@ class RememberedSet : public AllStatic {
// The callback should take (MemoryChunk* chunk) and return void.
template <typename Callback>
static void IterateMemoryChunks(Heap* heap, Callback callback) {
- MemoryChunkIterator it(heap);
+ OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
@@ -190,21 +190,21 @@ class RememberedSet : public AllStatic {
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
- static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
- Address slot_addr) {
+ static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
+ uint32_t offset) {
+ TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
+ if (slot_set == nullptr) {
+ slot_set = memory_chunk->AllocateTypedSlotSet<type>();
+ }
+ slot_set->Insert(slot_type, offset);
+ }
+
+ static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> slots) {
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>();
}
- if (host_addr == kNullAddress) {
- host_addr = page->address();
- }
- uintptr_t offset = slot_addr - page->address();
- uintptr_t host_offset = host_addr - page->address();
- DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- slot_set->Insert(slot_type, static_cast<uint32_t>(host_offset),
- static_cast<uint32_t>(offset));
+ slot_set->Merge(slots.get());
}
// Given a page and a range of typed slots in that page, this function removes
@@ -213,8 +213,7 @@ class RememberedSet : public AllStatic {
TypedSlotSet* slots = page->typed_slot_set<type>();
if (slots != nullptr) {
slots->Iterate(
- [start, end](SlotType slot_type, Address host_addr,
- Address slot_addr) {
+ [=](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
},
@@ -223,7 +222,7 @@ class RememberedSet : public AllStatic {
}
// Iterates and filters the remembered set with the given callback.
- // The callback should take (SlotType slot_type, SlotAddress slot) and return
+ // The callback should take (SlotType slot_type, Address addr) and return
// SlotCallbackResult.
template <typename Callback>
static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
@@ -237,7 +236,7 @@ class RememberedSet : public AllStatic {
// Iterates and filters typed old to old pointers in the given memory chunk
// with the given callback. The callback should take (SlotType slot_type,
- // Address slot_addr) and return SlotCallbackResult.
+ // Address addr) and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>();
@@ -252,7 +251,7 @@ class RememberedSet : public AllStatic {
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
STATIC_ASSERT(type == OLD_TO_OLD);
- MemoryChunkIterator it(heap);
+ OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
@@ -261,45 +260,66 @@ class RememberedSet : public AllStatic {
}
}
- // Eliminates all stale slots from the remembered set, i.e.
- // slots that are not part of live objects anymore. This method must be
- // called after marking, when the whole transitive closure is known and
- // must be called before sweeping when mark bits are still intact.
- static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
-
private:
- static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
+ static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
class UpdateTypedSlotHelper {
public:
+ // Updates a typed slot using an untyped slot callback.
+ // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
+ Address addr, Callback callback) {
+ switch (slot_type) {
+ case CODE_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
+ return UpdateCodeTarget(&rinfo, callback);
+ }
+ case CODE_ENTRY_SLOT: {
+ return UpdateCodeEntry(addr, callback);
+ }
+ case EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
+ }
+ case OBJECT_SLOT: {
+ // TODO(ishell): the incoming addr represents MaybeObjectSlot(addr).
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ return callback(FullMaybeObjectSlot(addr));
+ }
+ case CLEARED_SLOT:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ private:
// Updates a code entry slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateCodeEntry(Address entry_address,
Callback callback) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&code));
- DCHECK(!HasWeakHeapObjectTag(code));
+ Code code = Code::GetObjectFromEntryAddress(entry_address);
+ Code old_code = code;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
+ DCHECK(!HasWeakHeapObjectTag(code.ptr()));
if (code != old_code) {
- Memory<Address>(entry_address) = reinterpret_cast<Code*>(code)->entry();
+ Memory<Address>(entry_address) = code->entry();
}
return result;
}
// Updates a code target slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
Callback callback) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* new_target = old_target;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
+ Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target.ptr()));
if (new_target != old_target) {
rinfo->set_target_address(
Code::cast(new_target)->raw_instruction_start());
@@ -308,47 +328,20 @@ class UpdateTypedSlotHelper {
}
// Updates an embedded pointer slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
Callback callback) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* old_target = rinfo->target_object();
- Object* new_target = old_target;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
+ HeapObject old_target = rinfo->target_object();
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target->ptr()));
if (new_target != old_target) {
rinfo->set_target_object(heap, HeapObject::cast(new_target));
}
return result;
}
-
- // Updates a typed slot using an untyped slot callback.
- // The callback accepts MaybeObject** and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
- Address addr, Callback callback) {
- switch (slot_type) {
- case CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, nullptr);
- return UpdateCodeTarget(&rinfo, callback);
- }
- case CODE_ENTRY_SLOT: {
- return UpdateCodeEntry(addr, callback);
- }
- case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, nullptr);
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
- case OBJECT_SLOT: {
- return callback(reinterpret_cast<MaybeObject**>(addr));
- }
- case CLEARED_SLOT:
- break;
- }
- UNREACHABLE();
- }
};
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 5848d5342e..14e7d000ca 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -8,6 +8,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 376b5e75aa..bfd8e11ff8 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -11,17 +11,18 @@
#include "src/heap/local-allocator-inl.h"
#include "src/objects-inl.h"
#include "src/objects/map.h"
+#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
-void Scavenger::PromotionList::View::PushRegularObject(HeapObject* object,
+void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
int size) {
promotion_list_->PushRegularObject(task_id_, object, size);
}
-void Scavenger::PromotionList::View::PushLargeObject(HeapObject* object,
- Map* map, int size) {
+void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
+ int size) {
promotion_list_->PushLargeObject(task_id_, object, map, size);
}
@@ -45,13 +46,13 @@ bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
}
-void Scavenger::PromotionList::PushRegularObject(int task_id,
- HeapObject* object, int size) {
+void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
+ int size) {
regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
}
-void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject* object,
- Map* map, int size) {
+void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
+ Map map, int size) {
large_object_promotion_list_.Push(task_id, {object, map, size});
}
@@ -109,11 +110,11 @@ bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-void Scavenger::PageMemoryFence(MaybeObject* object) {
+void Scavenger::PageMemoryFence(MaybeObject object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
- HeapObject* heap_object;
+ HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
@@ -121,16 +122,15 @@ void Scavenger::PageMemoryFence(MaybeObject* object) {
#endif
}
-bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
int size) {
// Copy the content of source to target.
target->set_map_word(MapWord::FromMap(map));
- heap()->CopyBlock(target->address() + kPointerSize,
- source->address() + kPointerSize, size - kPointerSize);
+ heap()->CopyBlock(target->address() + kTaggedSize,
+ source->address() + kTaggedSize, size - kTaggedSize);
- HeapObject* old = base::AsAtomicPointer::Release_CompareAndSwap(
- reinterpret_cast<HeapObject**>(source->address()), map,
- MapWord::FromForwardingAddress(target).ToMap());
+ Object old = source->map_slot().Release_CompareAndSwap(
+ map, MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
// Other task migrated the object.
return false;
@@ -147,16 +147,20 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
return true;
}
-CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+template <typename THeapObjectSlot>
+CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
+ THeapObjectSlot slot,
+ HeapObject object,
int object_size) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
- HeapObject* target = nullptr;
+ HeapObject target;
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
@@ -179,15 +183,18 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map* map,
return CopyAndForwardResult::FAILURE;
}
-CopyAndForwardResult Scavenger::PromoteObject(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+template <typename THeapObjectSlot>
+CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
+ HeapObject object,
int object_size) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
- HeapObject* target = nullptr;
+ HeapObject target;
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
@@ -218,15 +225,16 @@ SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
: REMOVE_SLOT;
}
-bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
- int object_size) {
- if (V8_UNLIKELY(FLAG_young_generation_large_objects &&
- object_size > kMaxNewSpaceHeapObjectSize)) {
+bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size) {
+ // TODO(hpayer): Make this check size based, i.e.
+ // object_size > kMaxRegularHeapObjectSize
+ if (V8_UNLIKELY(
+ FLAG_young_generation_large_objects &&
+ MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (base::AsAtomicPointer::Release_CompareAndSwap(
- reinterpret_cast<HeapObject**>(object->address()), map,
- MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ if (object->map_slot().Release_CompareAndSwap(
+ map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
if (!ContainsOnlyData(map->visitor_id())) {
@@ -238,11 +246,14 @@ bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
return false;
}
-SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+template <typename THeapObjectSlot>
+SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
+ THeapObjectSlot slot,
+ HeapObject object,
int object_size) {
- SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result;
@@ -250,6 +261,9 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
return REMOVE_SLOT;
}
+ SLOW_DCHECK(static_cast<size_t>(object_size) <=
+ MemoryChunkLayout::AllocatableMemoryInDataPage());
+
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
@@ -277,69 +291,75 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
UNREACHABLE();
}
-SlotCallbackResult Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object,
+template <typename THeapObjectSlot>
+SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
+ ThinString object,
int object_size) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
if (!is_incremental_marking_) {
// The ThinString should die after Scavenge, so avoid writing the proper
// forwarding pointer and instead just signal the actual object as forwarded
// reference.
- String* actual = object->actual();
+ String actual = object->actual();
// ThinStrings always refer to internalized strings, which are always in old
// space.
DCHECK(!Heap::InNewSpace(actual));
- *slot = actual;
+ slot.StoreHeapObject(actual);
return REMOVE_SLOT;
}
- return EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
+ return EvacuateObjectDefault(map, slot, object, object_size);
}
-SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- ConsString* object,
+template <typename THeapObjectSlot>
+SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
+ THeapObjectSlot slot,
+ ConsString object,
int object_size) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(IsShortcutCandidate(map->instance_type()));
if (!is_incremental_marking_ &&
object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
- HeapObject* first = HeapObject::cast(object->unchecked_first());
+ HeapObject first = HeapObject::cast(object->unchecked_first());
- *slot = first;
+ slot.StoreHeapObject(first);
if (!Heap::InNewSpace(first)) {
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
+ object->map_slot().Release_Store(
MapWord::FromForwardingAddress(first).ToMap());
return REMOVE_SLOT;
}
MapWord first_word = first->synchronized_map_word();
if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
+ HeapObject target = first_word.ToForwardingAddress();
- *slot = target;
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
+ slot.StoreHeapObject(target);
+ object->map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
return Heap::InToSpace(target) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map* map = first_word.ToMap();
- SlotCallbackResult result = EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), first,
- first->SizeFromMap(map));
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
- MapWord::FromForwardingAddress(*slot).ToMap());
+ Map map = first_word.ToMap();
+ SlotCallbackResult result =
+ EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
+ object->map_slot().Release_Store(
+ MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
return result;
}
- return EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
+ return EvacuateObjectDefault(map, slot, object, object_size);
}
-SlotCallbackResult Scavenger::EvacuateObject(HeapObjectReference** slot,
- Map* map, HeapObject* source) {
+template <typename THeapObjectSlot>
+SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
+ HeapObject source) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
SLOW_DCHECK(Heap::InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
@@ -349,21 +369,24 @@ SlotCallbackResult Scavenger::EvacuateObject(HeapObjectReference** slot,
case kVisitThinString:
// At the moment we don't allow weak pointers to thin strings.
DCHECK(!(*slot)->IsWeak());
- return EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ThinString*>(source), size);
+ return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
+ size);
case kVisitShortcutCandidate:
DCHECK(!(*slot)->IsWeak());
// At the moment we don't allow weak pointers to cons strings.
return EvacuateShortcutCandidate(
- map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ConsString*>(source), size);
+ map, slot, ConsString::unchecked_cast(source), size);
default:
return EvacuateObjectDefault(map, slot, source, size);
}
}
-SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
- HeapObject* object) {
+template <typename THeapObjectSlot>
+SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
+ HeapObject object) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(Heap::InFromSpace(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
@@ -372,36 +395,44 @@ SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
+ HeapObject dest = first_word.ToForwardingAddress();
DCHECK(Heap::InFromSpace(*p));
if ((*p)->IsWeak()) {
- *p = HeapObjectReference::Weak(dest);
+ p.store(HeapObjectReference::Weak(dest));
} else {
DCHECK((*p)->IsStrong());
- *p = HeapObjectReference::Strong(dest);
+ p.store(HeapObjectReference::Strong(dest));
}
- DCHECK(Heap::InToSpace(dest) || !Heap::InNewSpace((dest)));
+ DCHECK_IMPLIES(Heap::InNewSpace(dest),
+ (Heap::InToSpace(dest) ||
+ MemoryChunk::FromHeapObject(dest)->owner()->identity() ==
+ NEW_LO_SPACE));
+
return Heap::InToSpace(dest) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map* map = first_word.ToMap();
+ Map map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
// Call the slow part of scavenge object.
return EvacuateObject(p, map, object);
}
-SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
- Address slot_address) {
- MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
- MaybeObject* object = *slot;
+template <typename TSlot>
+SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ MaybeObject object = *slot;
if (Heap::InFromSpace(object)) {
- HeapObject* heap_object = object->GetHeapObject();
- DCHECK(heap_object->IsHeapObject());
+ HeapObject heap_object = object->GetHeapObject();
- SlotCallbackResult result = ScavengeObject(
- reinterpret_cast<HeapObjectReference**>(slot), heap_object);
- DCHECK_IMPLIES(result == REMOVE_SLOT, !Heap::InNewSpace(*slot));
+ SlotCallbackResult result =
+ ScavengeObject(THeapObjectSlot(slot), heap_object);
+ DCHECK_IMPLIES(result == REMOVE_SLOT,
+ !heap->IsInYoungGeneration((*slot)->GetHeapObject()));
return result;
} else if (Heap::InToSpace(object)) {
// Already updated slot. This can happen when processing of the work list
@@ -413,28 +444,55 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
return REMOVE_SLOT;
}
-void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- for (Object** p = start; p < end; p++) {
- Object* object = *p;
- if (!Heap::InNewSpace(object)) continue;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
- reinterpret_cast<HeapObject*>(object));
+void ScavengeVisitor::VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
+ return VisitPointersImpl(host, start, end);
+}
+
+void ScavengeVisitor::VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ return VisitPointersImpl(host, start, end);
+}
+
+void ScavengeVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+#ifdef DEBUG
+ Code old_target = target;
+#endif
+ FullObjectSlot slot(&target);
+ VisitHeapObjectImpl(slot, target);
+ // Code objects are never in new-space, so the slot contents must not change.
+ DCHECK_EQ(old_target, target);
+}
+
+void ScavengeVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+ HeapObject heap_object = rinfo->target_object();
+#ifdef DEBUG
+ HeapObject old_heap_object = heap_object;
+#endif
+ FullObjectSlot slot(&heap_object);
+ VisitHeapObjectImpl(slot, heap_object);
+ // We don't embed new-space objects into code, so the slot contents must not
+ // change.
+ DCHECK_EQ(old_heap_object, heap_object);
+}
+
+template <typename TSlot>
+void ScavengeVisitor::VisitHeapObjectImpl(TSlot slot, HeapObject heap_object) {
+ if (Heap::InNewSpace(heap_object)) {
+ scavenger_->ScavengeObject(HeapObjectSlot(slot), heap_object);
}
}
-void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** p = start; p < end; p++) {
- MaybeObject* object = *p;
- if (!Heap::InNewSpace(object)) continue;
- // Treat the weak reference as strong.
- HeapObject* heap_object;
- if (object->GetHeapObject(&heap_object)) {
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
- heap_object);
- } else {
- UNREACHABLE();
+template <typename TSlot>
+void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
+ TSlot end) {
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
+ // Treat weak references as strong.
+ if (object.GetHeapObject(&heap_object)) {
+ VisitHeapObjectImpl(slot, heap_object);
}
}
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 4c63ed099a..76939a87e8 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -76,36 +76,48 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
bool record_slots)
: heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** slot = start; slot < end; ++slot) {
- Object* target = *slot;
- DCHECK(!HasWeakHeapObjectTag(target));
- if (target->IsHeapObject()) {
- HandleSlot(host, reinterpret_cast<Address>(slot),
- HeapObject::cast(target));
- }
- }
+ V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
+ }
+
+ V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ VisitPointersImpl(host, start, end);
}
- inline void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- // Treat weak references as strong. TODO(marja): Proper weakness handling in
- // the young generation.
- for (MaybeObject** slot = start; slot < end; ++slot) {
- MaybeObject* target = *slot;
- HeapObject* heap_object;
- if (target->GetHeapObject(&heap_object)) {
- HandleSlot(host, reinterpret_cast<Address>(slot), heap_object);
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ HandleSlot(host, FullHeapObjectSlot(&target), target);
+ }
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
+ HeapObject heap_object = rinfo->target_object();
+ HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
+ }
+
+ private:
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
+ using THeapObjectSlot = typename TSlot::THeapObjectSlot;
+ // Treat weak references as strong.
+ // TODO(marja): Proper weakness handling in the young generation.
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = *slot;
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ HandleSlot(host, THeapObjectSlot(slot), heap_object);
}
}
}
- inline void HandleSlot(HeapObject* host, Address slot_address,
- HeapObject* target) {
- HeapObjectReference** slot =
- reinterpret_cast<HeapObjectReference**>(slot_address);
- scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+ template <typename THeapObjectSlot>
+ V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
+ HeapObject target) {
+ static_assert(
+ std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
+ scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
if (Heap::InFromSpace(target)) {
SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
@@ -115,31 +127,31 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
if (result == KEEP_SLOT) {
SLOW_DCHECK(target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
- slot_address);
+ RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
+ slot.address());
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
} else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
+ heap_->mark_compact_collector()->RecordSlot(host, ObjectSlot(slot),
+ target);
}
}
- private:
Heap* const heap_;
Scavenger* const scavenger_;
const bool record_slots_;
};
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+static bool IsUnscavengedHeapObject(Heap* heap, FullObjectSlot p) {
return Heap::InFromSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
- Object* RetainAs(Object* object) override {
+ Object RetainAs(Object object) override {
if (!Heap::InFromSpace(object)) {
return object;
}
@@ -148,7 +160,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
- return nullptr;
+ return Object();
}
};
@@ -156,13 +168,14 @@ ScavengerCollector::ScavengerCollector(Heap* heap)
: isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
void ScavengerCollector::CollectGarbage() {
+ DCHECK(surviving_new_large_objects_.empty());
ItemParallelJob job(isolate_->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
const int kMainThreadId = 0;
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = isolate_->LogObjectRelocation();
const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier;
+ OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
@@ -255,7 +268,7 @@ void ScavengerCollector::CollectGarbage() {
// going to be unmapped.
for (Page* p :
PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
- heap_->concurrent_marking()->ClearLiveness(p);
+ heap_->concurrent_marking()->ClearMemoryChunkData(p);
}
}
@@ -271,6 +284,11 @@ void ScavengerCollector::CollectGarbage() {
}
heap_->array_buffer_collector()->FreeAllocations();
+ // Since we promote all surviving large objects immediatelly, all remaining
+ // large objects must be dead.
+ // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
+ heap_->new_lo_space()->FreeAllObjects();
+
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
@@ -281,24 +299,20 @@ void ScavengerCollector::CollectGarbage() {
// Update how much has survived scavenge.
heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
-
- // Scavenger may find new wrappers by iterating objects promoted onto a black
- // page.
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
- HeapObject* object = update_info.first;
- Map* map = update_info.second;
+ HeapObject object = update_info.first;
+ Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object->set_map_word(MapWord::FromMap(map));
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
- DCHECK(heap_->new_lo_space()->IsEmpty());
+ surviving_new_large_objects_.clear();
}
void ScavengerCollector::MergeSurvivingNewLargeObjects(
@@ -340,7 +354,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
-void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map* map,
+void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
// We are not collecting slots on new space objects during mutation thus we
// have to scan for pointers to evacuation candidates when we promote
@@ -365,18 +379,18 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::ScavengePage");
CodePageMemoryModificationScope memory_modification_scope(page);
- RememberedSet<OLD_TO_NEW>::Iterate(
- page,
- [this](Address addr) { return CheckAndScavengeObject(heap_, addr); },
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(page,
+ [this](MaybeObjectSlot addr) {
+ return CheckAndScavengeObject(heap_,
+ addr);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::IterateTyped(
- page, [this](SlotType type, Address host_addr, Address addr) {
+ page, [=](SlotType type, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, type, addr, [this](MaybeObject** addr) {
- return CheckAndScavengeObject(heap(),
- reinterpret_cast<Address>(addr));
+ heap_, type, addr, [this](FullMaybeObjectSlot slot) {
+ return CheckAndScavengeObject(heap(), slot);
});
});
@@ -384,7 +398,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
}
void Scavenger::Process(OneshotBarrier* barrier) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
@@ -406,7 +419,7 @@ void Scavenger::Process(OneshotBarrier* barrier) {
struct PromotionListEntry entry;
while (promotion_list_.Pop(&entry)) {
- HeapObject* target = entry.heap_object;
+ HeapObject target = entry.heap_object;
DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
@@ -428,24 +441,24 @@ void Scavenger::Finalize() {
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
- Object** p) {
+ FullObjectSlot p) {
DCHECK(!HasWeakHeapObjectTag(*p));
ScavengePointer(p);
}
void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ FullObjectSlot start,
+ FullObjectSlot end) {
// Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) ScavengePointer(p);
+ for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
}
-void RootScavengeVisitor::ScavengePointer(Object** p) {
- Object* object = *p;
+void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
+ Object object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
if (!Heap::InNewSpace(object)) return;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
- reinterpret_cast<HeapObject*>(object));
+ scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
}
RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index b984102c6b..0dfe44628a 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -22,13 +22,15 @@ enum class CopyAndForwardResult {
FAILURE
};
-using ObjectAndSize = std::pair<HeapObject*, int>;
-using SurvivingNewLargeObjectsMap = std::unordered_map<HeapObject*, Map*>;
-using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject*, Map*>;
+using ObjectAndSize = std::pair<HeapObject, int>;
+using SurvivingNewLargeObjectsMap =
+ std::unordered_map<HeapObject, Map, Object::Hasher>;
+using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject, Map>;
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
+ static const int kMaxWaitTimeMs = 2;
explicit ScavengerCollector(Heap* heap);
@@ -53,8 +55,8 @@ class ScavengerCollector {
class Scavenger {
public:
struct PromotionListEntry {
- HeapObject* heap_object;
- Map* map;
+ HeapObject heap_object;
+ Map map;
int size;
};
@@ -65,8 +67,8 @@ class Scavenger {
View(PromotionList* promotion_list, int task_id)
: promotion_list_(promotion_list), task_id_(task_id) {}
- inline void PushRegularObject(HeapObject* object, int size);
- inline void PushLargeObject(HeapObject* object, Map* map, int size);
+ inline void PushRegularObject(HeapObject object, int size);
+ inline void PushLargeObject(HeapObject object, Map map, int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize();
inline bool Pop(struct PromotionListEntry* entry);
@@ -82,8 +84,8 @@ class Scavenger {
: regular_object_promotion_list_(num_tasks),
large_object_promotion_list_(num_tasks) {}
- inline void PushRegularObject(int task_id, HeapObject* object, int size);
- inline void PushLargeObject(int task_id, HeapObject* object, Map* map,
+ inline void PushRegularObject(int task_id, HeapObject object, int size);
+ inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize(int task_id);
@@ -134,59 +136,64 @@ class Scavenger {
inline Heap* heap() { return heap_; }
- inline void PageMemoryFence(MaybeObject* object);
+ inline void PageMemoryFence(MaybeObject object);
void AddPageToSweeperIfNecessary(MemoryChunk* page);
- // Potentially scavenges an object referenced from |slot_address| if it is
+ // Potentially scavenges an object referenced from |slot| if it is
// indeed a HeapObject and resides in from space.
- inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
- Address slot_address);
+ template <typename TSlot>
+ inline SlotCallbackResult CheckAndScavengeObject(Heap* heap, TSlot slot);
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
- inline SlotCallbackResult ScavengeObject(HeapObjectReference** p,
- HeapObject* object);
+ template <typename THeapObjectSlot>
+ inline SlotCallbackResult ScavengeObject(THeapObjectSlot p,
+ HeapObject object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
- V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ V8_INLINE bool MigrateObject(Map map, HeapObject source, HeapObject target,
int size);
V8_INLINE SlotCallbackResult
RememberedSetEntryNeeded(CopyAndForwardResult result);
- V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+ template <typename THeapObjectSlot>
+ V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map map,
+ THeapObjectSlot slot,
+ HeapObject object,
int object_size);
- V8_INLINE CopyAndForwardResult PromoteObject(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+ template <typename THeapObjectSlot>
+ V8_INLINE CopyAndForwardResult PromoteObject(Map map, THeapObjectSlot slot,
+ HeapObject object,
int object_size);
- V8_INLINE SlotCallbackResult EvacuateObject(HeapObjectReference** slot,
- Map* map, HeapObject* source);
+ template <typename THeapObjectSlot>
+ V8_INLINE SlotCallbackResult EvacuateObject(THeapObjectSlot slot, Map map,
+ HeapObject source);
- V8_INLINE bool HandleLargeObject(Map* map, HeapObject* object,
- int object_size);
+ V8_INLINE bool HandleLargeObject(Map map, HeapObject object, int object_size);
// Different cases for object evacuation.
- V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map* map,
- HeapObjectReference** slot,
- HeapObject* object,
+ template <typename THeapObjectSlot>
+ V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map map,
+ THeapObjectSlot slot,
+ HeapObject object,
int object_size);
- inline SlotCallbackResult EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object,
+ template <typename THeapObjectSlot>
+ inline SlotCallbackResult EvacuateThinString(Map map, THeapObjectSlot slot,
+ ThinString object,
int object_size);
- inline SlotCallbackResult EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- ConsString* object,
+ template <typename THeapObjectSlot>
+ inline SlotCallbackResult EvacuateShortcutCandidate(Map map,
+ THeapObjectSlot slot,
+ ConsString object,
int object_size);
- void IterateAndScavengePromotedObject(HeapObject* target, Map* map, int size);
+ void IterateAndScavengePromotedObject(HeapObject target, Map map, int size);
static inline bool ContainsOnlyData(VisitorId visitor_id);
@@ -214,12 +221,13 @@ class RootScavengeVisitor final : public RootVisitor {
public:
explicit RootScavengeVisitor(Scavenger* scavenger);
- void VisitRootPointer(Root root, const char* description, Object** p) final;
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final;
+ void VisitRootPointer(Root root, const char* description,
+ FullObjectSlot p) final;
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final;
private:
- void ScavengePointer(Object** p);
+ void ScavengePointer(FullObjectSlot p);
Scavenger* const scavenger_;
};
@@ -228,12 +236,22 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
explicit ScavengeVisitor(Scavenger* scavenger);
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final;
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final;
+ V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) final;
+
+ V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final;
+
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
private:
+ template <typename TSlot>
+ V8_INLINE void VisitHeapObjectImpl(TSlot slot, HeapObject heap_object);
+
+ template <typename TSlot>
+ V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
+
Scavenger* const scavenger_;
};
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 5790b82907..14f2842d83 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -5,7 +5,6 @@
#include "src/setup-isolate.h"
#include "src/accessors.h"
-#include "src/ast/context-slot-cache.h"
#include "src/compilation-cache.h"
#include "src/contexts.h"
#include "src/heap-symbols.h"
@@ -17,18 +16,25 @@
#include "src/lookup-cache.h"
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/data-handler.h"
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/js-weak-refs.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
-#include "src/objects/microtask-queue.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
+#include "src/objects/oddball-inl.h"
#include "src/objects/promise.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
#include "src/regexp/jsregexp.h"
@@ -99,7 +105,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
!Map::CanHaveFastTransitionableElementsKind(instance_type),
IsDictionaryElementsKind(elements_kind) ||
IsTerminalElementsKind(elements_kind));
- HeapObject* result = nullptr;
+ HeapObject result;
// JSObjects have maps with a mutable prototype_validity_cell, so they cannot
// go in RO_SPACE.
AllocationResult allocation =
@@ -108,7 +114,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
result->set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
SKIP_WRITE_BARRIER);
- Map* map = isolate()->factory()->InitializeMap(
+ Map map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
@@ -117,13 +123,14 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
- Object* result = nullptr;
+ Object result;
AllocationResult allocation = AllocateRaw(Map::kSize, RO_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
- Map* map = reinterpret_cast<Map*>(result);
+ Map map = Map::unchecked_cast(result);
map->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kMetaMap)), SKIP_WRITE_BARRIER);
+ Map::unchecked_cast(isolate()->root(RootIndex::kMetaMap)),
+ SKIP_WRITE_BARRIER);
map->set_instance_type(instance_type);
map->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
@@ -147,11 +154,11 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
return map;
}
-void Heap::FinalizePartialMap(Map* map) {
+void Heap::FinalizePartialMap(Map map) {
ReadOnlyRoots roots(this);
map->set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
- map->set_instance_descriptors(roots.empty_descriptor_array());
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
+ map->SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
@@ -159,10 +166,10 @@ void Heap::FinalizePartialMap(Map* map) {
map->set_constructor_or_backpointer(roots.null_value());
}
-AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
+AllocationResult Heap::Allocate(Map map, AllocationSpace space) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- HeapObject* result = nullptr;
+ HeapObject result;
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
// New space objects are allocated white.
@@ -176,7 +183,7 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
ExternalArrayType array_type) {
int size = OBJECT_POINTER_ALIGN(FixedTypedArrayBase::kDataOffset);
- HeapObject* object = nullptr;
+ HeapObject object;
AllocationResult allocation = AllocateRaw(
size, RO_SPACE,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
@@ -185,7 +192,7 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
object->set_map_after_allocation(
ReadOnlyRoots(this).MapForFixedTypedArray(array_type),
SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ FixedTypedArrayBase elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
reinterpret_cast<void*>(
@@ -196,13 +203,13 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
}
bool Heap::CreateInitialMaps() {
- HeapObject* obj = nullptr;
+ HeapObject obj;
{
AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
+ Map new_meta_map = Map::unchecked_cast(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map_after_allocation(new_meta_map);
@@ -210,7 +217,7 @@ bool Heap::CreateInitialMaps() {
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
- Map* map; \
+ Map map; \
if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
set_##field_name##_map(map); \
}
@@ -290,9 +297,9 @@ bool Heap::CreateInitialMaps() {
// Setup the struct maps first (needed for the EnumCache).
for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
- Map* map;
+ Map map;
if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
- roots_[entry.index] = map;
+ roots_table()[entry.index] = map->ptr();
}
// Allocate the empty enum cache.
@@ -306,19 +313,14 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
- STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
- int length = DescriptorArray::kFirstIndex;
- int size = WeakFixedArray::SizeFor(length);
+ int size = DescriptorArray::SizeFor(0);
if (!AllocateRaw(size, RO_SPACE).To(&obj)) return false;
obj->set_map_after_allocation(roots.descriptor_array_map(),
SKIP_WRITE_BARRIER);
- DescriptorArray::cast(obj)->set_length(length);
+ DescriptorArray array = DescriptorArray::cast(obj);
+ array->Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0);
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
- DescriptorArray::cast(obj)->SetNumberOfDescriptors(0);
- WeakFixedArray::cast(obj)->Set(
- DescriptorArray::kEnumCacheIndex,
- MaybeObject::FromObject(roots.empty_enum_cache()));
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(roots.meta_map());
@@ -334,16 +336,16 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(roots.the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
- FinalizePartialMap(Map::cast(roots_[entry.index]));
+ FinalizePartialMap(Map::cast(Object(roots_table()[entry.index])));
}
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
+ { \
+ Map map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
@@ -380,24 +382,21 @@ bool Heap::CreateInitialMaps() {
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
- {
- AllocationResult allocation = AllocateMap(entry.type, entry.size);
- if (!allocation.To(&obj)) return false;
- }
- Map* map = Map::cast(obj);
+ Map map;
+ if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
if (StringShape(entry.type).IsCons()) map->mark_unstable();
- roots_[entry.index] = map;
+ roots_table()[entry.index] = map->ptr();
}
{ // Create a separate external one byte string map for native sources.
+ Map map;
AllocationResult allocation =
AllocateMap(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE,
ExternalOneByteString::kUncachedSize);
- if (!allocation.To(&obj)) return false;
- Map* map = Map::cast(obj);
+ if (!allocation.To(&map)) return false;
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
set_native_source_string_map(map);
}
@@ -411,6 +410,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
+ small_ordered_name_dictionary)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
@@ -425,7 +426,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
{
// The invalid_prototype_validity_cell is needed for JSObject maps.
- Smi* value = Smi::FromInt(Map::kPrototypeChainInvalid);
+ Smi value = Smi::FromInt(Map::kPrototypeChainInvalid);
AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
@@ -434,8 +435,8 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
- ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
- ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, kTaggedSize, one_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, 2 * kTaggedSize, two_pointer_filler)
// The "no closures" and "one closure" FeedbackCell maps need
// to be marked unstable because their objects can change maps.
@@ -444,12 +445,15 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_feedback_cell)
+ roots.no_feedback_cell_map()->mark_unstable();
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
ALLOCATE_VARSIZE_MAP(ORDERED_HASH_MAP_TYPE, ordered_hash_map)
ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(ORDERED_NAME_DICTIONARY_TYPE, ordered_name_dictionary)
ALLOCATE_VARSIZE_MAP(NAME_DICTIONARY_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(GLOBAL_DICTIONARY_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
@@ -457,6 +461,7 @@ bool Heap::CreateInitialMaps() {
simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(STRING_TABLE_TYPE, string_table)
+ ALLOCATE_VARSIZE_MAP(EMBEDDER_DATA_ARRAY_TYPE, embedder_data_array)
ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
@@ -475,8 +480,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
object_boilerplate_description)
- ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
- roots.native_context_map()->set_visitor_id(kVisitNativeContext);
+ ALLOCATE_MAP(NATIVE_CONTEXT_TYPE, NativeContext::kSize, native_context)
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
@@ -485,13 +489,13 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
next_call_side_effect_free_call_handler_info)
- ALLOCATE_VARSIZE_MAP(PRE_PARSED_SCOPE_DATA_TYPE, pre_parsed_scope_data)
- ALLOCATE_MAP(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
- UncompiledDataWithoutPreParsedScope::kSize,
- uncompiled_data_without_pre_parsed_scope)
- ALLOCATE_MAP(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
- UncompiledDataWithPreParsedScope::kSize,
- uncompiled_data_with_pre_parsed_scope)
+ ALLOCATE_VARSIZE_MAP(PREPARSE_DATA_TYPE, preparse_data)
+ ALLOCATE_MAP(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
+ UncompiledDataWithoutPreparseData::kSize,
+ uncompiled_data_without_preparse_data)
+ ALLOCATE_MAP(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
+ UncompiledDataWithPreparseData::kSize,
+ uncompiled_data_with_preparse_data)
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
@@ -499,7 +503,9 @@ bool Heap::CreateInitialMaps() {
code_data_container)
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+ ALLOCATE_MAP(JS_OBJECT_TYPE,
+ JSObject::kHeaderSizeForEmbedderFields + kEmbedderDataSlotSize,
+ external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
@@ -574,12 +580,13 @@ bool Heap::CreateInitialMaps() {
set_empty_property_array(PropertyArray::cast(obj));
}
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ { \
+ FixedTypedArrayBase obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) { \
+ return false; \
+ } \
+ set_empty_fixed_##type##_array(obj); \
}
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
@@ -622,9 +629,15 @@ void Heap::CreateInitialObjects() {
set_minus_infinity_value(
*factory->NewHeapNumber(-V8_INFINITY, TENURED_READ_ONLY));
- set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED));
+ set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED_READ_ONLY));
InitializeHashSeed();
+ // There's no "current microtask" in the beginning.
+ set_current_microtask(roots.undefined_value());
+
+ set_dirty_js_weak_factories(roots.undefined_value());
+ set_weak_refs_keep_during_job(roots.undefined_value());
+
// Allocate cache for single character one byte strings.
set_single_character_string_cache(
*factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
@@ -635,7 +648,7 @@ void Heap::CreateInitialObjects() {
for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
factory->InternalizeUtf8String(constant_string_table[i].contents);
- roots_[constant_string_table[i].index] = *str;
+ roots_table()[constant_string_table[i].index] = str->ptr();
}
// Allocate
@@ -695,9 +708,7 @@ void Heap::CreateInitialObjects() {
set_self_reference_marker(
*factory->NewSelfReferenceMarker(TENURED_READ_ONLY));
- // Create the code_stubs dictionary. The initial size is set to avoid
- // expanding the dictionary during bootstrapping.
- set_code_stubs(*SimpleNumberDictionary::New(isolate(), 128));
+ set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
{
HandleScope scope(isolate());
@@ -705,7 +716,7 @@ void Heap::CreateInitialObjects() {
{ \
Handle<Symbol> symbol( \
isolate()->factory()->NewPrivateSymbol(TENURED_READ_ONLY)); \
- roots_[RootIndex::k##name] = *symbol; \
+ roots_table()[RootIndex::k##name] = symbol->ptr(); \
}
PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
@@ -713,22 +724,20 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
- Handle<String> name##d = \
- factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
- name->set_name(*name##d); \
- roots_[RootIndex::k##name] = *name;
+#define SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ name->set_name(*name##d); \
+ roots_table()[RootIndex::k##name] = name->ptr();
PUBLIC_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
-#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
- Handle<String> name##d = \
- factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
- name->set_is_well_known_symbol(true); \
- name->set_name(*name##d); \
- roots_[RootIndex::k##name] = *name;
+#define SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_table()[RootIndex::k##name] = name->ptr();
WELL_KNOWN_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
@@ -736,8 +745,8 @@ void Heap::CreateInitialObjects() {
to_string_tag_symbol->set_is_interesting_symbol(true);
}
- Handle<NameDictionary> empty_property_dictionary =
- NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
+ isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
set_empty_property_dictionary(*empty_property_dictionary);
@@ -759,7 +768,9 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- set_default_microtask_queue(*factory->NewMicrotaskQueue());
+ // Allocate FeedbackCell for cases where we don't collect feedback.
+ Handle<FeedbackCell> no_feedback_cell = factory->NewNoFeedbackCell();
+ set_no_feedback_cell(*no_feedback_cell);
{
Handle<FixedArray> empty_sloppy_arguments_elements =
@@ -788,11 +799,11 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
set_last_debugging_id(Smi::FromInt(DebugInfo::kNoDebuggingId));
- set_next_template_serial_number(Smi::kZero);
+ set_next_template_serial_number(Smi::zero());
// Allocate the empty OrderedHashMap.
Handle<FixedArray> empty_ordered_hash_map = factory->NewFixedArray(
- OrderedHashMap::kHashTableStartIndex, TENURED_READ_ONLY);
+ OrderedHashMap::HashTableStartIndex(), TENURED_READ_ONLY);
empty_ordered_hash_map->set_map_no_write_barrier(
*factory->ordered_hash_map_map());
for (int i = 0; i < empty_ordered_hash_map->length(); ++i) {
@@ -802,7 +813,7 @@ void Heap::CreateInitialObjects() {
// Allocate the empty OrderedHashSet.
Handle<FixedArray> empty_ordered_hash_set = factory->NewFixedArray(
- OrderedHashSet::kHashTableStartIndex, TENURED_READ_ONLY);
+ OrderedHashSet::HashTableStartIndex(), TENURED_READ_ONLY);
empty_ordered_hash_set->set_map_no_write_barrier(
*factory->ordered_hash_set_map());
for (int i = 0; i < empty_ordered_hash_set->length(); ++i) {
@@ -839,6 +850,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_iterator_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_map_iterator_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_set_iterator_protector(*cell);
+
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
@@ -857,6 +876,10 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_regexp_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_iterator_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
@@ -865,7 +888,7 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_array_buffer_neutering_protector(*cell);
+ set_array_buffer_detaching_protector(*cell);
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
@@ -884,6 +907,9 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(roots.empty_weak_array_list());
+ set_off_heap_trampoline_relocation_info(
+ *Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
+
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->Hash();
isolate()->factory()->one_string()->Hash();
@@ -891,9 +917,6 @@ void Heap::CreateInitialObjects() {
// Initialize builtins constants table.
set_builtins_constants_table(roots.empty_fixed_array());
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
// Initialize descriptor cache.
isolate_->descriptor_lookup_cache()->Clear();
@@ -904,19 +927,21 @@ void Heap::CreateInitialObjects() {
void Heap::CreateInternalAccessorInfoObjects() {
Isolate* isolate = this->isolate();
HandleScope scope(isolate);
- Handle<AccessorInfo> acessor_info;
+ Handle<AccessorInfo> accessor_info;
#define INIT_ACCESSOR_INFO(_, accessor_name, AccessorName, ...) \
- acessor_info = Accessors::Make##AccessorName##Info(isolate); \
- roots_[RootIndex::k##AccessorName##Accessor] = *acessor_info;
+ accessor_info = Accessors::Make##AccessorName##Info(isolate); \
+ roots_table()[RootIndex::k##AccessorName##Accessor] = accessor_info->ptr();
ACCESSOR_INFO_LIST_GENERATOR(INIT_ACCESSOR_INFO, /* not used */)
#undef INIT_ACCESSOR_INFO
#define INIT_SIDE_EFFECT_FLAG(_, accessor_name, AccessorName, GetterType, \
SetterType) \
- AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ AccessorInfo::cast( \
+ Object(roots_table()[RootIndex::k##AccessorName##Accessor])) \
->set_getter_side_effect_type(SideEffectType::GetterType); \
- AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ AccessorInfo::cast( \
+ Object(roots_table()[RootIndex::k##AccessorName##Accessor])) \
->set_setter_side_effect_type(SideEffectType::SetterType);
ACCESSOR_INFO_LIST_GENERATOR(INIT_SIDE_EFFECT_FLAG, /* not used */)
#undef INIT_SIDE_EFFECT_FLAG
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
new file mode 100644
index 0000000000..12cf6bab5a
--- /dev/null
+++ b/deps/v8/src/heap/slot-set.cc
@@ -0,0 +1,99 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/slot-set.h"
+
+namespace v8 {
+namespace internal {
+
+TypedSlots::~TypedSlots() {
+ Chunk* chunk = head_;
+ while (chunk != nullptr) {
+ Chunk* next = chunk->next;
+ delete[] chunk->buffer;
+ delete chunk;
+ chunk = next;
+ }
+ head_ = nullptr;
+ tail_ = nullptr;
+}
+
+void TypedSlots::Insert(SlotType type, uint32_t offset) {
+ TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset)};
+ Chunk* chunk = EnsureChunk();
+ DCHECK_LT(chunk->count, chunk->capacity);
+ chunk->buffer[chunk->count] = slot;
+ ++chunk->count;
+}
+
+void TypedSlots::Merge(TypedSlots* other) {
+ if (other->head_ == nullptr) {
+ return;
+ }
+ if (head_ == nullptr) {
+ head_ = other->head_;
+ tail_ = other->tail_;
+ } else {
+ tail_->next = other->head_;
+ tail_ = other->tail_;
+ }
+ other->head_ = nullptr;
+ other->tail_ = nullptr;
+}
+
+TypedSlots::Chunk* TypedSlots::EnsureChunk() {
+ if (!head_) {
+ head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
+ }
+ if (head_->count == head_->capacity) {
+ head_ = NewChunk(head_, NextCapacity(head_->capacity));
+ }
+ return head_;
+}
+
+TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
+ Chunk* chunk = new Chunk;
+ chunk->next = next;
+ chunk->buffer = new TypedSlot[capacity];
+ chunk->capacity = capacity;
+ chunk->count = 0;
+ return chunk;
+}
+
+TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
+
+void TypedSlotSet::FreeToBeFreedChunks() {
+ base::MutexGuard guard(&to_be_freed_chunks_mutex_);
+ std::stack<std::unique_ptr<Chunk>> empty;
+ to_be_freed_chunks_.swap(empty);
+}
+
+void TypedSlotSet::ClearInvalidSlots(
+ const std::map<uint32_t, uint32_t>& invalid_ranges) {
+ Chunk* chunk = LoadHead();
+ while (chunk != nullptr) {
+ TypedSlot* buffer = chunk->buffer;
+ int count = chunk->count;
+ for (int i = 0; i < count; i++) {
+ TypedSlot slot = LoadTypedSlot(buffer + i);
+ SlotType type = TypeField::decode(slot.type_and_offset);
+ if (type == CLEARED_SLOT) continue;
+ uint32_t offset = OffsetField::decode(slot.type_and_offset);
+ std::map<uint32_t, uint32_t>::const_iterator upper_bound =
+ invalid_ranges.upper_bound(offset);
+ if (upper_bound == invalid_ranges.begin()) continue;
+ // upper_bounds points to the invalid range after the given slot. Hence,
+ // we have to go to the previous element.
+ upper_bound--;
+ DCHECK_LE(upper_bound->first, offset);
+ if (upper_bound->second > offset) {
+ ClearTypedSlot(buffer + i);
+ }
+ }
+ chunk = LoadNext(chunk);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 7423665bcb..2d9fb327be 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -11,8 +11,13 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
+#include "src/objects/slots.h"
#include "src/utils.h"
+#ifdef V8_COMPRESS_POINTERS
+#include "src/ptr-compr.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -182,8 +187,8 @@ class SlotSet : public Malloced {
// This method should only be called on the main thread.
//
// Sample usage:
- // Iterate([](Address slot_address) {
- // if (good(slot_address)) return KEEP_SLOT;
+ // Iterate([](MaybeObjectSlot slot) {
+ // if (good(slot)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
template <typename Callback>
@@ -202,8 +207,8 @@ class SlotSet : public Malloced {
while (cell) {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
- uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
- if (callback(page_start_ + slot) == KEEP_SLOT) {
+ uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
+ if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
@@ -226,7 +231,7 @@ class SlotSet : public Malloced {
}
int NumberOfPreFreedEmptyBuckets() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
return static_cast<int>(to_be_freed_buckets_.size());
}
@@ -253,7 +258,7 @@ class SlotSet : public Malloced {
}
void FreeToBeFreedBuckets() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
while (!to_be_freed_buckets_.empty()) {
Bucket top = to_be_freed_buckets_.top();
to_be_freed_buckets_.pop();
@@ -264,7 +269,7 @@ class SlotSet : public Malloced {
private:
typedef uint32_t* Bucket;
- static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
+ static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
static const int kBitsPerCell = 32;
@@ -294,7 +299,7 @@ class SlotSet : public Malloced {
void PreFreeEmptyBucket(int bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
if (bucket != nullptr) {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
to_be_freed_buckets_.push(bucket);
StoreBucket(&buckets_[bucket_index], nullptr);
}
@@ -370,8 +375,8 @@ class SlotSet : public Malloced {
// Converts the slot offset into bucket/cell/bit index.
void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
int* bit_index) {
- DCHECK_EQ(slot_offset % kPointerSize, 0);
- int slot = slot_offset >> kPointerSizeLog2;
+ DCHECK(IsAligned(slot_offset, kTaggedSize));
+ int slot = slot_offset >> kTaggedSizeLog2;
DCHECK(slot >= 0 && slot <= kMaxSlots);
*bucket_index = slot >> kBitsPerBucketLog2;
*cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
@@ -392,101 +397,56 @@ enum SlotType {
CLEARED_SLOT
};
-// Data structure for maintaining a multiset of typed slots in a page.
+// Data structure for maintaining a list of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
-class TypedSlotSet {
+class V8_EXPORT_PRIVATE TypedSlots {
public:
- enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
-
- typedef std::pair<SlotType, uint32_t> TypeAndOffset;
+ static const int kMaxOffset = 1 << 29;
+ TypedSlots() = default;
+ virtual ~TypedSlots();
+ void Insert(SlotType type, uint32_t offset);
+ void Merge(TypedSlots* other);
+ protected:
+ class OffsetField : public BitField<int, 0, 29> {};
+ class TypeField : public BitField<SlotType, 29, 3> {};
struct TypedSlot {
- TypedSlot() : type_and_offset_(0), host_offset_(0) {}
-
- TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
- : type_and_offset_(TypeField::encode(type) |
- OffsetField::encode(offset)),
- host_offset_(host_offset) {}
-
- bool operator==(const TypedSlot other) {
- return type_and_offset() == other.type_and_offset() &&
- host_offset() == other.host_offset();
- }
-
- bool operator!=(const TypedSlot other) { return !(*this == other); }
-
- SlotType type() const { return TypeField::decode(type_and_offset()); }
-
- uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
-
- TypeAndOffset GetTypeAndOffset() const {
- uint32_t t_and_o = type_and_offset();
- return std::make_pair(TypeField::decode(t_and_o),
- OffsetField::decode(t_and_o));
- }
-
- uint32_t type_and_offset() const {
- return base::AsAtomic32::Acquire_Load(&type_and_offset_);
- }
-
- uint32_t host_offset() const {
- return base::AsAtomic32::Acquire_Load(&host_offset_);
- }
-
- void Set(TypedSlot slot) {
- base::AsAtomic32::Release_Store(&type_and_offset_,
- slot.type_and_offset());
- base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
- }
-
- void Clear() {
- base::AsAtomic32::Release_Store(
- &type_and_offset_,
- TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
- base::AsAtomic32::Release_Store(&host_offset_, 0);
- }
-
- uint32_t type_and_offset_;
- uint32_t host_offset_;
+ uint32_t type_and_offset;
};
- static const int kMaxOffset = 1 << 29;
+ struct Chunk {
+ Chunk* next;
+ TypedSlot* buffer;
+ int32_t capacity;
+ int32_t count;
+ };
+ static const int kInitialBufferSize = 100;
+ static const int kMaxBufferSize = 16 * KB;
+ static int NextCapacity(int capacity) {
+ return Min(kMaxBufferSize, capacity * 2);
+ }
+ Chunk* EnsureChunk();
+ Chunk* NewChunk(Chunk* next, int capacity);
+ Chunk* head_ = nullptr;
+ Chunk* tail_ = nullptr;
+};
- explicit TypedSlotSet(Address page_start)
- : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
+// A multiset of per-page typed slots that allows concurrent iteration
+// clearing of invalid slots.
+class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
+ public:
+ // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
+ // during the iteration are queued in to_be_freed_chunks_, which are
+ // then freed in FreeToBeFreedChunks.
+ enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
- ~TypedSlotSet() {
- Chunk* chunk = load_top();
- while (chunk != nullptr) {
- Chunk* n = chunk->next();
- delete chunk;
- chunk = n;
- }
- FreeToBeFreedChunks();
- }
+ explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
- // The slot offset specifies a slot at address page_start_ + offset.
- // This method can only be called on the main thread.
- void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
- TypedSlot slot(type, host_offset, offset);
- Chunk* top_chunk = load_top();
- if (!top_chunk) {
- top_chunk = new Chunk(nullptr, kInitialBufferSize);
- set_top(top_chunk);
- }
- if (!top_chunk->AddSlot(slot)) {
- Chunk* new_top_chunk =
- new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
- bool added = new_top_chunk->AddSlot(slot);
- set_top(new_top_chunk);
- DCHECK(added);
- USE(added);
- }
- }
+ ~TypedSlotSet() override;
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
@@ -497,145 +457,82 @@ class TypedSlotSet {
// if (good(slot_type, slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
+ // This can run concurrently to ClearInvalidSlots().
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
- Chunk* chunk = load_top();
+ Chunk* chunk = head_;
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buf = chunk->buffer();
+ TypedSlot* buffer = chunk->buffer;
+ int count = chunk->count;
bool empty = true;
- for (int i = 0; i < chunk->count(); i++) {
- // Order is important here. We have to read out the slot type last to
- // observe the concurrent removal case consistently.
- Address host_addr = page_start_ + buf[i].host_offset();
- TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
- SlotType type = type_and_offset.first;
+ for (int i = 0; i < count; i++) {
+ TypedSlot slot = LoadTypedSlot(buffer + i);
+ SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
- Address addr = page_start_ + type_and_offset.second;
- if (callback(type, host_addr, addr) == KEEP_SLOT) {
+ uint32_t offset = OffsetField::decode(slot.type_and_offset);
+ Address addr = page_start_ + offset;
+ if (callback(type, addr) == KEEP_SLOT) {
new_count++;
empty = false;
} else {
- buf[i].Clear();
+ ClearTypedSlot(buffer + i);
}
}
}
-
- Chunk* n = chunk->next();
+ Chunk* next = chunk->next;
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
- previous->set_next(n);
+ StoreNext(previous, next);
} else {
- set_top(n);
+ StoreHead(next);
}
- base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
- to_be_freed_chunks_.push(chunk);
+ base::MutexGuard guard(&to_be_freed_chunks_mutex_);
+ to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
} else {
previous = chunk;
}
- chunk = n;
+ chunk = next;
}
return new_count;
}
- void FreeToBeFreedChunks() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
- while (!to_be_freed_chunks_.empty()) {
- Chunk* top = to_be_freed_chunks_.top();
- to_be_freed_chunks_.pop();
- delete top;
- }
- }
+ // Clears all slots that have the offset in the specified ranges.
+ // This can run concurrently to Iterate().
+ void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
- void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
- Chunk* chunk = load_top();
- while (chunk != nullptr) {
- TypedSlot* buf = chunk->buffer();
- for (int i = 0; i < chunk->count(); i++) {
- uint32_t host_offset = buf[i].host_offset();
- std::map<uint32_t, uint32_t>::iterator upper_bound =
- invalid_ranges.upper_bound(host_offset);
- if (upper_bound == invalid_ranges.begin()) continue;
- // upper_bounds points to the invalid range after the given slot. Hence,
- // we have to go to the previous element.
- upper_bound--;
- DCHECK_LE(upper_bound->first, host_offset);
- if (upper_bound->second > host_offset) {
- buf[i].Clear();
- }
- }
- chunk = chunk->next();
- }
- }
+ // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
+ void FreeToBeFreedChunks();
private:
- static const int kInitialBufferSize = 100;
- static const int kMaxBufferSize = 16 * KB;
-
- static int NextCapacity(int capacity) {
- return Min(kMaxBufferSize, capacity * 2);
+ // Atomic operations used by Iterate and ClearInvalidSlots;
+ Chunk* LoadNext(Chunk* chunk) {
+ return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
+ }
+ void StoreNext(Chunk* chunk, Chunk* next) {
+ return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
+ }
+ Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
+ void StoreHead(Chunk* chunk) {
+ base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
+ }
+ TypedSlot LoadTypedSlot(TypedSlot* slot) {
+ return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
+ }
+ void ClearTypedSlot(TypedSlot* slot) {
+ // Order is important here and should match that of LoadTypedSlot.
+ base::AsAtomic32::Relaxed_Store(
+ &slot->type_and_offset,
+ TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
}
-
- class OffsetField : public BitField<int, 0, 29> {};
- class TypeField : public BitField<SlotType, 29, 3> {};
-
- struct Chunk : Malloced {
- explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
- next_ = next_chunk;
- buffer_ = NewArray<TypedSlot>(chunk_capacity);
- capacity_ = chunk_capacity;
- count_ = 0;
- }
-
- ~Chunk() { DeleteArray(buffer_); }
-
- bool AddSlot(TypedSlot slot) {
- int current_count = count();
- if (current_count == capacity()) return false;
- TypedSlot* current_buffer = buffer();
- // Order is important here. We have to write the slot first before
- // increasing the counter to guarantee that a consistent state is
- // observed by concurrent threads.
- current_buffer[current_count].Set(slot);
- set_count(current_count + 1);
- return true;
- }
-
- Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
-
- void set_next(Chunk* n) {
- return base::AsAtomicPointer::Release_Store(&next_, n);
- }
-
- TypedSlot* buffer() const { return buffer_; }
-
- int32_t capacity() const { return capacity_; }
-
- int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
-
- void set_count(int32_t new_value) {
- base::AsAtomic32::Release_Store(&count_, new_value);
- }
-
- private:
- Chunk* next_;
- TypedSlot* buffer_;
- int32_t capacity_;
- int32_t count_;
- };
-
- Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
-
- void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
Address page_start_;
- Chunk* top_;
base::Mutex to_be_freed_chunks_mutex_;
- std::stack<Chunk*> to_be_freed_chunks_;
+ std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 7162769e5e..ed996d0c73 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -42,42 +42,42 @@ PageRange::PageRange(Address start, Address limit)
// -----------------------------------------------------------------------------
// SemiSpaceIterator
-HeapObject* SemiSpaceIterator::Next() {
+HeapObject SemiSpaceIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(page);
current_ = page->area_start();
- if (current_ == limit_) return nullptr;
+ if (current_ == limit_) return HeapObject();
}
- HeapObject* object = HeapObject::FromAddress(current_);
+ HeapObject object = HeapObject::FromAddress(current_);
current_ += object->Size();
if (!object->IsFiller()) {
return object;
}
}
- return nullptr;
+ return HeapObject();
}
// -----------------------------------------------------------------------------
// HeapObjectIterator
-HeapObject* HeapObjectIterator::Next() {
+HeapObject HeapObjectIterator::Next() {
do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != nullptr) return next_obj;
+ HeapObject next_obj = FromCurrentPage();
+ if (!next_obj.is_null()) return next_obj;
} while (AdvanceToNextPage());
- return nullptr;
+ return HeapObject();
}
-HeapObject* HeapObjectIterator::FromCurrentPage() {
+HeapObject HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj->Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
@@ -91,7 +91,7 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return obj;
}
}
- return nullptr;
+ return HeapObject();
}
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
@@ -118,13 +118,12 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
// -----------------------------------------------------------------------------
// SemiSpace
-bool SemiSpace::Contains(HeapObject* o) {
- return id_ == kToSpace
- ? MemoryChunk::FromAddress(o->address())->InToSpace()
- : MemoryChunk::FromAddress(o->address())->InFromSpace();
+bool SemiSpace::Contains(HeapObject o) {
+ return id_ == kToSpace ? MemoryChunk::FromHeapObject(o)->InToSpace()
+ : MemoryChunk::FromHeapObject(o)->InFromSpace();
}
-bool SemiSpace::Contains(Object* o) {
+bool SemiSpace::Contains(Object o) {
return o->IsHeapObject() && Contains(HeapObject::cast(o));
}
@@ -138,12 +137,12 @@ bool SemiSpace::ContainsSlow(Address a) {
// --------------------------------------------------------------------------
// NewSpace
-bool NewSpace::Contains(HeapObject* o) {
- return MemoryChunk::FromAddress(o->address())->InNewSpace();
+bool NewSpace::Contains(Object o) {
+ return o->IsHeapObject() && Contains(HeapObject::cast(o));
}
-bool NewSpace::Contains(Object* o) {
- return o->IsHeapObject() && Contains(HeapObject::cast(o));
+bool NewSpace::Contains(HeapObject o) {
+ return MemoryChunk::FromHeapObject(o)->InNewSpace();
}
bool NewSpace::ContainsSlow(Address a) {
@@ -154,21 +153,16 @@ bool NewSpace::ToSpaceContainsSlow(Address a) {
return to_space_.ContainsSlow(a);
}
-bool NewSpace::FromSpaceContainsSlow(Address a) {
- return from_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
+bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
bool PagedSpace::Contains(Address addr) {
- if (heap()->lo_space()->FindPage(addr)) return false;
- return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
+ return MemoryChunk::FromAnyPointerAddress(addr)->owner() == this;
}
-bool PagedSpace::Contains(Object* o) {
- if (!o->IsHeapObject()) return false;
- return Page::FromAddress(HeapObject::cast(o)->address())->owner() == this;
+bool PagedSpace::Contains(Object o) {
+ if (!o.IsHeapObject()) return false;
+ return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
@@ -193,7 +187,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
-bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) {
+bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object->address();
if ((allocation_info_.top() - object_size) == object_address) {
@@ -204,12 +198,18 @@ bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) {
return false;
}
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
- MemoryChunk* chunk = heap->lo_space()->FindPage(addr);
- if (chunk == nullptr) {
- chunk = MemoryChunk::FromAddress(addr);
+bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
+ Address base = BaseAddress(slot_addr);
+ if (slot_addr < base + kHeaderSize) return false;
+ return HeapObject::FromAddress(base) ==
+ ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
+}
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+ while (!HasHeaderSentinel(addr)) {
+ addr = BaseAddress(addr) - 1;
}
- return chunk;
+ return FromAddress(addr);
}
void MemoryChunk::IncrementExternalBackingStoreBytes(
@@ -234,6 +234,10 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
amount);
}
+bool MemoryChunk::IsInNewLargeObjectSpace() const {
+ return owner()->identity() == NEW_LO_SPACE;
+}
+
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -259,15 +263,20 @@ void Page::ClearEvacuationCandidate() {
InitializeFreeListCategories();
}
-MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
+HeapObject LargePage::GetObject() {
+ return HeapObject::FromAddress(area_start());
+}
+
+OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
: heap_(heap),
state_(kOldSpaceState),
old_iterator_(heap->old_space()->begin()),
code_iterator_(heap->code_space()->begin()),
map_iterator_(heap->map_space()->begin()),
- lo_iterator_(heap->lo_space()->begin()) {}
+ lo_iterator_(heap->lo_space()->begin()),
+ code_lo_iterator_(heap->code_lo_space()->begin()) {}
-MemoryChunk* MemoryChunkIterator::next() {
+MemoryChunk* OldGenerationMemoryChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
@@ -287,6 +296,12 @@ MemoryChunk* MemoryChunkIterator::next() {
}
case kLargeObjectState: {
if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
+ state_ = kCodeLargeObjectState;
+ V8_FALLTHROUGH;
+ }
+ case kCodeLargeObjectState: {
+ if (code_lo_iterator_ != heap_->code_lo_space()->end())
+ return *(code_lo_iterator_++);
state_ = kFinishedState;
V8_FALLTHROUGH;
}
@@ -332,7 +347,7 @@ bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
return SlowRefillLinearAllocationArea(size_in_bytes);
}
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
@@ -340,13 +355,13 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
return HeapObject::FromAddress(current_top);
}
-HeapObject* PagedSpace::TryAllocateLinearlyAligned(
+HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return nullptr;
+ if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
@@ -364,8 +379,8 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
- HeapObject* object = AllocateLinearly(size_in_bytes);
- DCHECK_NOT_NULL(object);
+ HeapObject object = AllocateLinearly(size_in_bytes);
+ DCHECK(!object.is_null());
if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
@@ -379,8 +394,8 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
int allocation_size = size_in_bytes;
- HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- if (object == nullptr) {
+ HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
@@ -390,7 +405,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- DCHECK_NOT_NULL(object);
+ DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
@@ -420,14 +435,14 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
- HeapObject* heap_obj = nullptr;
+ HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj->address(), size_in_bytes);
StartNextInlineAllocationStep();
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
@@ -455,7 +470,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
aligned_size_in_bytes = size_in_bytes + filler_size;
}
- HeapObject* obj = HeapObject::FromAddress(top);
+ HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -480,7 +495,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
top = allocation_info_.top();
}
- HeapObject* obj = HeapObject::FromAddress(top);
+ HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -509,7 +524,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
}
@@ -517,7 +532,7 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
if (result.IsRetry()) return InvalidBuffer();
- HeapObject* obj = nullptr;
+ HeapObject obj;
bool ok = result.To(&obj);
USE(ok);
DCHECK(ok);
@@ -535,7 +550,7 @@ bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
return false;
}
-bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) {
+bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
if (IsValid()) {
const Address object_address = object->address();
if ((allocation_info_.top() - object_size) == object_address) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index dcacea0afc..606279292a 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -22,8 +22,10 @@
#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/ostreams.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
@@ -31,6 +33,14 @@
namespace v8 {
namespace internal {
+// These checks are here to ensure that the lower 32 bits of any real heap
+// object can't overlap with the lower 32 bits of cleared weak reference value
+// and therefore it's enough to compare only the lower 32 bits of a MaybeObject
+// in order to figure out if it's a cleared weak reference or not.
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
+
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -98,7 +108,7 @@ static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER;
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
auto it = recently_freed_.find(code_range_size);
if (it == recently_freed_.end() || it->second.empty()) {
return reinterpret_cast<Address>(GetRandomMmapAddr());
@@ -110,7 +120,7 @@ Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
recently_freed_[code_range_size].push_back(code_range_start);
}
@@ -121,7 +131,7 @@ void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
size_t code_range_size)
: isolate_(isolate),
- data_page_allocator_(GetPlatformPageAllocator()),
+ data_page_allocator_(isolate->page_allocator()),
code_page_allocator_(nullptr),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
@@ -154,7 +164,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
// Fullfilling both reserved pages requirement and huge code area
// alignments is not supported (requires re-implementation).
- DCHECK_LE(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize());
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
@@ -163,7 +173,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
page_allocator->AllocatePageSize());
VirtualMemory reservation(
page_allocator, requested, reinterpret_cast<void*>(hint),
- Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()));
+ Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
if (!reservation.IsReserved()) {
V8::FatalProcessOutOfMemory(isolate_,
"CodeRange setup: allocate virtual memory");
@@ -190,7 +200,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
size_t size =
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kCodeRangeAreaAlignment));
+ DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
LOG(isolate_,
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
@@ -283,7 +293,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
@@ -360,8 +370,13 @@ void MemoryAllocator::Unmapper::TearDown() {
}
}
+size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
+ base::MutexGuard guard(&mutex_);
+ return chunks_[kRegular].size() + chunks_[kNonRegular].size();
+}
+
int MemoryAllocator::Unmapper::NumberOfChunks() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
size_t result = 0;
for (int i = 0; i < kNumberOfChunkQueues; i++) {
result += chunks_[i].size();
@@ -370,7 +385,7 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
}
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
size_t sum = 0;
// kPooled chunks are already uncommited. We only have to account for
@@ -446,6 +461,73 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
+void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
+ base::AddressRegion memory_area =
+ MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
+ if (memory_area.size() != 0) {
+ MemoryAllocator* memory_allocator = heap_->memory_allocator();
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable());
+ CHECK(page_allocator->DiscardSystemPages(
+ reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
+ }
+}
+
+size_t MemoryChunkLayout::CodePageGuardStartOffset() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::CodePageGuardSize() {
+ return MemoryAllocator::GetCommitPageSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
+ // We are guarding code pages: the last OS page will be protected as
+ // non-writable.
+ return Page::kPageSize -
+ static_cast<int>(MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
+ size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
+ return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
+}
+
+size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return ObjectStartOffsetInCodePage();
+ }
+ return ObjectStartOffsetInDataPage();
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
+ size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return AllocatableMemoryInCodePage();
+ }
+ return AllocatableMemoryInDataPage();
+}
+
Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
@@ -463,12 +545,16 @@ void MemoryChunk::InitializationMemoryFence() {
#endif
}
-void MemoryChunk::SetReadAndExecutable() {
+void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::Permission permission) {
+ DCHECK(permission == PageAllocator::kRead ||
+ permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ DCHECK(owner()->identity() == CODE_SPACE ||
+ owner()->identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
- base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ base::MutexGuard guard(page_protection_change_mutex_);
if (write_unprotect_counter_ == 0) {
// This is a corner case that may happen when we have a
// CodeSpaceMemoryModificationScope open and this page was newly
@@ -479,44 +565,65 @@ void MemoryChunk::SetReadAndExecutable() {
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 0) {
Address protect_start =
- address() + MemoryAllocator::CodePageAreaStartOffset();
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(protect_start, page_size));
+ DCHECK(IsAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
- CHECK(reservation_.SetPermissions(protect_start, protect_size,
- PageAllocator::kReadExecute));
+ CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
}
}
+void MemoryChunk::SetReadable() {
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
+}
+
+void MemoryChunk::SetReadAndExecutable() {
+ DCHECK(!FLAG_jitless);
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::kReadExecute);
+}
+
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ DCHECK(owner()->identity() == CODE_SPACE ||
+ owner()->identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
- base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ base::MutexGuard guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
- address() + MemoryAllocator::CodePageAreaStartOffset();
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(unprotect_start, page_size));
+ DCHECK(IsAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
}
}
+namespace {
+
+PageAllocator::Permission DefaultWritableCodePermissions() {
+ return FLAG_jitless ? PageAllocator::kReadWrite
+ : PageAllocator::kReadWriteExecute;
+}
+
+} // namespace
+
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
- DCHECK(base == chunk->address());
+ DCHECK_EQ(base, chunk->address());
chunk->heap_ = heap;
chunk->size_ = size;
+ chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
+ DCHECK(HasHeaderSentinel(area_start));
chunk->area_start_ = area_start;
chunk->area_end_ = area_end;
chunk->flags_ = Flags(NO_FLAGS);
@@ -539,6 +646,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
+ chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
@@ -550,14 +658,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
+ chunk->AllocateMarkingBitmap();
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
} else {
- heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
- chunk);
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
}
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
@@ -569,10 +678,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->code_space_memory_modification_scope_depth();
} else {
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(area_start, page_size));
+ DCHECK(IsAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
CHECK(reservation.SetPermissions(area_start, area_size,
- PageAllocator::kReadWriteExecute));
+ DefaultWritableCodePermissions()));
}
}
@@ -583,7 +692,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
- DCHECK_GE(Page::kAllocatableMemory, page->area_size());
+ DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ page->owner()->identity()),
+ page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
@@ -627,6 +738,15 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+ // Initialize the sentinel value for each page boundary since the mutator
+ // may initialize the object starting from its end.
+ Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
+ MemoryChunk::kPageSize;
+ while (sentinel < chunk->area_end()) {
+ *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
+ sentinel += MemoryChunk::kPageSize;
+ }
+
LargePage* page = static_cast<LargePage*>(chunk);
page->list_node().Initialize();
return page;
@@ -671,10 +791,6 @@ size_t MemoryChunk::CommittedPhysicalMemory() {
return high_water_mark_;
}
-bool MemoryChunk::IsPagedSpace() const {
- return owner()->identity() != LO_SPACE;
-}
-
bool MemoryChunk::InOldSpace() const {
return owner()->identity() == OLD_SPACE;
}
@@ -719,7 +835,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
// Non-executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
- // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+ // +----------------------------+<- area_start_ (base + area_start_)
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
@@ -729,13 +845,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
//
if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(
- CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
- GetCommitPageSize());
+ chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
+ reserve_area_size +
+ MemoryChunkLayout::CodePageGuardSize(),
+ GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
- CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
+ MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
@@ -744,18 +862,20 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_executable_ += reservation.size();
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
+ ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
+ commit_area_size, kZapValue);
}
- area_start = base + CodePageAreaStartOffset();
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
area_end = area_start + commit_area_size;
} else {
- chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size =
- ::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
- GetCommitPageSize());
+ chunk_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
+ GetCommitPageSize());
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
@@ -763,10 +883,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
+ ZapBlock(
+ base,
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ kZapValue);
}
- area_start = base + Page::kObjectStartOffset;
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
area_end = area_start + commit_area_size;
}
@@ -852,7 +975,7 @@ size_t Page::AvailableInFreeList() {
namespace {
// Skips filler starting from the given filler until the end address.
// Returns the first address after the skipped fillers.
-Address SkipFillers(HeapObject* filler, Address end) {
+Address SkipFillers(HeapObject filler, Address end) {
Address addr = filler->address();
while (addr < end) {
filler = HeapObject::FromAddress(addr);
@@ -872,7 +995,7 @@ size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
- HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
+ HeapObject filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
// Ensure that no objects were allocated in [filler, area_end) region.
@@ -940,7 +1063,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
- chunk->area_end() + CodePageGuardSize());
+ chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
reservation->SetPermissions(chunk->area_end_, page_size,
PageAllocator::kNoAccess);
}
@@ -1023,23 +1146,26 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
-template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kFull>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
+ DCHECK_EQ(size, static_cast<size_t>(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ owner->identity())));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
@@ -1050,15 +1176,15 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
return owner->InitializePage(chunk, executable);
}
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
@@ -1074,7 +1200,9 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
const Address start = reinterpret_cast<Address>(chunk);
- const Address area_start = start + MemoryChunk::kObjectStartOffset;
+ const Address area_start =
+ start +
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
const Address area_end = start + size;
// Pooled pages are always regular data pages.
DCHECK_NE(CODE_SPACE, owner->identity());
@@ -1091,31 +1219,10 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) {
- DCHECK_EQ(start % kPointerSize, 0);
- DCHECK_EQ(size % kPointerSize, 0);
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory<Address>(start + s) = static_cast<Address>(zap_value);
- }
-}
-
-size_t MemoryAllocator::CodePageGuardStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
-}
-
-size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
-
-size_t MemoryAllocator::CodePageAreaStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-size_t MemoryAllocator::CodePageAreaEndOffset() {
- // We are guarding code pages: the last OS page will be protected as
- // non-writable.
- return Page::kPageSize - static_cast<int>(GetCommitPageSize());
+ DCHECK(IsAligned(start, kTaggedSize));
+ DCHECK(IsAligned(size, kTaggedSize));
+ MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
+ size >> kTaggedSizeLog2);
}
intptr_t MemoryAllocator::GetCommitPageSize() {
@@ -1127,17 +1234,31 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
}
}
+base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
+ size_t size) {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ if (size < page_size + FreeSpace::kSize) {
+ return base::AddressRegion(0, 0);
+ }
+ Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
+ Address discardable_end = RoundDown(addr + size, page_size);
+ if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
+ return base::AddressRegion(discardable_start,
+ discardable_end - discardable_start);
+}
+
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
const size_t page_size = GetCommitPageSize();
// All addresses and sizes must be aligned to the commit page size.
- DCHECK(IsAddressAligned(start, page_size));
+ DCHECK(IsAligned(start, page_size));
DCHECK_EQ(0, commit_size % page_size);
DCHECK_EQ(0, reserved_size % page_size);
- const size_t guard_size = CodePageGuardSize();
- const size_t pre_guard_offset = CodePageGuardStartOffset();
- const size_t code_area_offset = CodePageAreaStartOffset();
+ const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
+ const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
+ const size_t code_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInCodePage();
// reserved_size includes two guard regions, commit_size does not.
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
const Address pre_guard_page = start + pre_guard_offset;
@@ -1189,8 +1310,9 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
- if (IsPagedSpace()) {
+ if (!heap_->IsLargeMemoryChunk(this)) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
@@ -1275,7 +1397,7 @@ void MemoryChunk::ReleaseInvalidatedSlots() {
}
}
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
if (!ShouldSkipEvacuationSlotRecording()) {
if (invalidated_slots() == nullptr) {
@@ -1286,7 +1408,7 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
-bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject* object) {
+bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
if (ShouldSkipEvacuationSlotRecording()) {
// Invalidated slots do not matter if we are not recording slots.
return true;
@@ -1297,8 +1419,8 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject* object) {
return invalidated_slots()->find(object) != invalidated_slots()->end();
}
-void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
- HeapObject* new_start) {
+void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
+ HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
@@ -1330,6 +1452,17 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
+void MemoryChunk::AllocateMarkingBitmap() {
+ DCHECK_NULL(marking_bitmap_);
+ marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+}
+
+void MemoryChunk::ReleaseMarkingBitmap() {
+ DCHECK_NOT_NULL(marking_bitmap_);
+ free(marking_bitmap_);
+ marking_bitmap_ = nullptr;
+}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1380,7 +1513,7 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: SpaceWithLinearArea(heap, space), executable_(executable) {
- area_size_ = MemoryAllocator::PageAreaSize(space);
+ area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
@@ -1411,12 +1544,12 @@ void PagedSpace::RefillFreeList() {
if (is_local()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
- base::LockGuard<base::Mutex> guard(owner->mutex());
+ base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p);
added += AddPage(p);
} else {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
DCHECK_EQ(this, p->owner());
RefineAllocatedBytesAfterSweeping(p);
added += RelinkFreeListCategories(p);
@@ -1428,7 +1561,7 @@ void PagedSpace::RefillFreeList() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
DCHECK(identity() == other->identity());
// Unmerged fields:
@@ -1491,7 +1624,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
// Check for pages that still contain free list entries. Bail out for smaller
// categories.
const int minimum_category =
@@ -1567,7 +1700,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
- base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
+ base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
@@ -1581,6 +1714,7 @@ bool PagedSpace::Expand() {
AddPage(page);
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
+ heap()->NotifyOldGenerationExpansion();
return true;
}
@@ -1732,6 +1866,14 @@ void PagedSpace::ReleasePage(Page* page) {
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
+void PagedSpace::SetReadable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadable();
+ }
+}
+
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
@@ -1753,7 +1895,7 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
if (top() != limit()) {
@@ -1775,8 +1917,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
}
size_t new_node_size = 0;
- FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
- if (new_node == nullptr) return false;
+ FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
@@ -1787,7 +1929,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- Page* page = Page::FromAddress(new_node->address());
+ Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node->address();
@@ -1836,13 +1978,12 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
- for (HeapObject* object = it.Next(); object != nullptr;
- object = it.Next()) {
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
CHECK(end_of_previous_object <= object->address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
@@ -1864,11 +2005,11 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
end_of_previous_object = object->address() + size;
if (object->IsExternalString()) {
- ExternalString* external_string = ExternalString::cast(object);
+ ExternalString external_string = ExternalString::cast(object);
size_t size = external_string->ExternalPayloadSize();
external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object->IsJSArrayBuffer()) {
- JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = array_buffer->byte_length();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
@@ -1898,8 +2039,7 @@ void PagedSpace::VerifyLiveBytes() {
CHECK(page->SweepingDone());
HeapObjectIterator it(page);
int black_size = 0;
- for (HeapObject* object = it.Next(); object != nullptr;
- object = it.Next()) {
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object->Size();
@@ -1919,8 +2059,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
total_capacity += page->area_size();
HeapObjectIterator it(page);
size_t real_allocated = 0;
- for (HeapObject* object = it.Next(); object != nullptr;
- object = it.Next()) {
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object->IsFiller()) {
real_allocated += object->Size();
}
@@ -2072,7 +2211,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
@@ -2101,8 +2241,9 @@ LinearAllocationArea LocalAllocationBuffer::Close() {
}
LocalAllocationBuffer::LocalAllocationBuffer(
- Heap* heap, LinearAllocationArea allocation_info)
- : heap_(heap), allocation_info_(allocation_info) {
+ Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
+ : heap_(heap),
+ allocation_info_(allocation_info) {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
@@ -2111,15 +2252,13 @@ LocalAllocationBuffer::LocalAllocationBuffer(
}
}
-
-LocalAllocationBuffer::LocalAllocationBuffer(
- const LocalAllocationBuffer& other) {
+LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
+ V8_NOEXCEPT {
*this = other;
}
-
LocalAllocationBuffer& LocalAllocationBuffer::operator=(
- const LocalAllocationBuffer& other) {
+ const LocalAllocationBuffer& other) V8_NOEXCEPT {
Close();
heap_ = other.heap_;
allocation_info_ = other.allocation_info_;
@@ -2139,8 +2278,10 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
- original_top_ = top();
- original_limit_ = limit();
+ // The order of the following two stores is important.
+ // See the corresponding loads in ConcurrentMarking::Run.
+ original_limit_.store(limit(), std::memory_order_relaxed);
+ original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -2156,7 +2297,7 @@ void NewSpace::ResetLinearAllocationArea() {
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
- heap()->concurrent_marking()->ClearLiveness(p);
+ heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
@@ -2174,7 +2315,7 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
- DCHECK(!Page::IsAtObjectStart(top));
+ DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
@@ -2195,7 +2336,7 @@ bool NewSpace::AddFreshPage() {
bool NewSpace::AddFreshPageSynchronized() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
return AddFreshPage();
}
@@ -2234,7 +2375,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
size_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->memory_allocator()->Available());
+ // We return zero here since we cannot take advantage of already allocated
+ // large object memory.
+ return 0;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
@@ -2330,11 +2473,11 @@ void NewSpace::Verify(Isolate* isolate) {
CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
current < top());
- HeapObject* object = HeapObject::FromAddress(current);
+ HeapObject object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
@@ -2352,11 +2495,11 @@ void NewSpace::Verify(Isolate* isolate) {
object->IterateBody(map, size, &visitor);
if (object->IsExternalString()) {
- ExternalString* external_string = ExternalString::cast(object);
+ ExternalString external_string = ExternalString::cast(object);
size_t size = external_string->ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object->IsJSArrayBuffer()) {
- JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = array_buffer->byte_length();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
@@ -2411,7 +2554,8 @@ bool SemiSpace::Commit() {
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@@ -2468,7 +2612,8 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@@ -2696,19 +2841,19 @@ size_t NewSpace::CommittedPhysicalMemory() {
void FreeListCategory::Reset() {
- set_top(nullptr);
+ set_top(FreeSpace());
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
}
-FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
- size_t* node_size) {
+FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
- FreeSpace* node = top();
- if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
+ FreeSpace node = top();
+ if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
*node_size = 0;
- return nullptr;
+ return FreeSpace();
}
set_top(node->next());
*node_size = node->Size();
@@ -2716,11 +2861,11 @@ FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
return node;
}
-FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
- size_t* node_size) {
+FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
- FreeSpace* prev_non_evac_node = nullptr;
- for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ FreeSpace prev_non_evac_node;
+ for (FreeSpace cur_node = top(); !cur_node.is_null();
cur_node = cur_node->next()) {
size_t size = cur_node->size();
if (size >= minimum_size) {
@@ -2729,9 +2874,8 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
if (cur_node == top()) {
set_top(cur_node->next());
}
- if (prev_non_evac_node != nullptr) {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(prev_non_evac_node->address());
+ if (!prev_non_evac_node.is_null()) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
if (chunk->owner()->identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
@@ -2743,13 +2887,13 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
prev_non_evac_node = cur_node;
}
- return nullptr;
+ return FreeSpace();
}
void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
DCHECK(page()->CanAllocate());
- FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
+ FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);
available_ += size_in_bytes;
@@ -2760,11 +2904,14 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeSpace* n = top();
- while (n != nullptr) {
- Map** map_location = reinterpret_cast<Map**>(n->address());
- if (*map_location == nullptr) {
- *map_location = ReadOnlyRoots(heap).free_space_map();
+ FreeSpace n = top();
+ while (!n.is_null()) {
+ MapWordSlot map_location = n.map_slot();
+ // We can't use .is_null() here because *map_location returns an
+ // Object (for which "is null" is not defined, as it would be
+ // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
+ if (*map_location == Map()) {
+ map_location.store(ReadOnlyRoots(heap).free_space_map());
} else {
DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
}
@@ -2814,14 +2961,14 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size) {
+FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size) {
FreeListCategoryIterator it(this, type);
- FreeSpace* node = nullptr;
+ FreeSpace node;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
node = current->PickNodeFromList(minimum_size, node_size);
- if (node != nullptr) {
+ if (!node.is_null()) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2830,26 +2977,25 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
return node;
}
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- if (categories_[type] == nullptr) return nullptr;
- FreeSpace* node =
- categories_[type]->PickNodeFromList(minimum_size, node_size);
- if (node != nullptr) {
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ if (categories_[type] == nullptr) return FreeSpace();
+ FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
return node;
}
-FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t* node_size,
- size_t minimum_size) {
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t* node_size,
+ size_t minimum_size) {
FreeListCategoryIterator it(this, type);
- FreeSpace* node = nullptr;
+ FreeSpace node;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
node = current->SearchForNodeInList(minimum_size, node_size);
- if (node != nullptr) {
+ if (!node.is_null()) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2860,33 +3006,33 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
-FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace* node = nullptr;
+ FreeSpace node;
// First try the allocation fast path: try to allocate the minimum element
// size of a free list category. This operation is constant time.
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
- for (int i = type; i < kHuge && node == nullptr; i++) {
+ for (int i = type; i < kHuge && node.is_null(); i++) {
node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
node_size);
}
- if (node == nullptr) {
+ if (node.is_null()) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
}
- if (node == nullptr && type != kHuge) {
+ if (node.is_null() && type != kHuge) {
// We didn't find anything in the huge list. Now search the best fitting
// free list for a node that has at least the requested size.
type = SelectFreeListCategoryType(size_in_bytes);
node = TryFindNodeIn(type, size_in_bytes, node_size);
}
- if (node != nullptr) {
- Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
}
DCHECK(IsVeryLong() || Available() == SumFreeLists());
@@ -2971,9 +3117,12 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
#ifdef DEBUG
size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
- FreeSpace* cur = top();
- while (cur != nullptr) {
- DCHECK(cur->map() == page()->heap()->root(RootIndex::kFreeSpaceMap));
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ // We can't use "cur->map()" here because both cur's map and the
+ // root can be null during bootstrapping.
+ DCHECK_EQ(*cur->map_slot(),
+ page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -2982,8 +3131,8 @@ size_t FreeListCategory::SumFreeList() {
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeSpace* cur = top();
- while (cur != nullptr) {
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
length++;
cur = cur->next();
if (length == kVeryLongFreeList) return length;
@@ -3034,34 +3183,6 @@ size_t PagedSpace::SizeOfObjects() {
return Size() - (limit() - top());
}
-// After we have booted, we have created a map which represents free space
-// on the heap. If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
-// fix them.
-void PagedSpace::RepairFreeListsAfterDeserialization() {
- free_list_.RepairLists(heap());
- // Each page may have a small free space that is not tracked by a free list.
- // Those free spaces still contain null as their map pointer.
- // Overwrite them with new fillers.
- for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
- if (size == 0) {
- // If there is no wasted memory then all free space is in the free list.
- continue;
- }
- Address start = page->HighWaterMark();
- Address end = page->area_end();
- if (start < end - size) {
- // A region at the high watermark is already in free list.
- HeapObject* filler = HeapObject::FromAddress(start);
- CHECK(filler->IsFiller());
- start += filler->Size();
- }
- CHECK_EQ(size, static_cast<int>(end - start));
- heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
- }
-}
-
bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -3159,7 +3280,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// MapSpace implementation
#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
+void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
#endif
ReadOnlySpace::ReadOnlySpace(Heap* heap)
@@ -3179,7 +3300,8 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
const size_t page_size = MemoryAllocator::GetCommitPageSize();
- const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
+ const size_t area_start_offset =
+ RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
@@ -3196,13 +3318,41 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
}
}
+// After we have booted, we have created a map which represents free space
+// on the heap. If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
+// fix them.
+void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
+ free_list_.RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Those free spaces still contain null as their map pointer.
+ // Overwrite them with new fillers.
+ for (Page* page : *this) {
+ int size = static_cast<int>(page->wasted_memory());
+ if (size == 0) {
+ // If there is no wasted memory then all free space is in the free list.
+ continue;
+ }
+ Address start = page->HighWaterMark();
+ Address end = page->area_end();
+ if (start < end - size) {
+ // A region at the high watermark is already in free list.
+ HeapObject filler = HeapObject::FromAddress(start);
+ CHECK(filler->IsFiller());
+ start += filler->Size();
+ }
+ CHECK_EQ(size, static_cast<int>(end - start));
+ heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
+ }
+}
+
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
WritableScope writable_scope(this);
for (Page* page : *this) {
HeapObjectIterator iterator(page);
- for (HeapObject* o = iterator.Next(); o != nullptr; o = iterator.Next()) {
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o->IsSeqOneByteString()) {
SeqOneByteString::cast(o)->clear_padding();
} else if (o->IsSeqTwoByteString()) {
@@ -3255,16 +3405,14 @@ LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
current_ = space->first_page();
}
+HeapObject LargeObjectIterator::Next() {
+ if (current_ == nullptr) return HeapObject();
-HeapObject* LargeObjectIterator::Next() {
- if (current_ == nullptr) return nullptr;
-
- HeapObject* object = current_->GetObject();
+ HeapObject object = current_->GetObject();
current_ = current_->next_page();
return object;
}
-
// -----------------------------------------------------------------------------
// LargeObjectSpace
@@ -3289,6 +3437,10 @@ void LargeObjectSpace::TearDown() {
}
}
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
+ return AllocateRaw(object_size, NOT_EXECUTABLE);
+}
+
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
@@ -3301,7 +3453,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- HeapObject* object = page->GetObject();
+ HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
@@ -3312,6 +3464,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
+ heap()->NotifyOldGenerationExpansion();
return object;
}
@@ -3324,15 +3477,8 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Register(page, object_size);
- HeapObject* object = page->GetObject();
+ HeapObject object = page->GetObject();
- if (Heap::ShouldZapGarbage()) {
- // Make the object consistent so the heap can be verified in OldSpaceStep.
- // We only need to do this in debug builds or if verify_heap is on.
- reinterpret_cast<Object**>(object->address())[0] =
- ReadOnlyRoots(heap()).fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
- }
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
AllocationStep(object_size, object->address(), object_size);
@@ -3349,7 +3495,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
// GC support
-Object* LargeObjectSpace::FindObject(Address a) {
+Object LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
if (page != nullptr) {
return page->GetObject();
@@ -3357,11 +3503,6 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Smi::kZero; // Signaling not found.
}
-LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
- base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
- return FindPage(a);
-}
-
LargePage* LargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
@@ -3379,10 +3520,10 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ResetProgressBar();
marking_state->SetLiveBytes(chunk, 0);
@@ -3394,7 +3535,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// There may be concurrent access on the chunk map. We have to take the lock
// here.
- base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
+ base::MutexGuard guard(&chunk_map_mutex_);
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
@@ -3452,10 +3593,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
objects_size_ = 0;
while (current) {
LargePage* next_current = current->next_page();
- HeapObject* object = current->GetObject();
+ HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
Address free_start;
@@ -3490,14 +3633,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
}
-
-bool LargeObjectSpace::Contains(HeapObject* object) {
- Address address = object->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+bool LargeObjectSpace::Contains(HeapObject object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
- SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
+ SLOW_DCHECK(!owned || FindObject(object->address())->IsHeapObject());
return owned;
}
@@ -3520,25 +3661,31 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
- HeapObject* object = chunk->GetObject();
- Page* page = Page::FromAddress(object->address());
+ HeapObject object = chunk->GetObject();
+ Page* page = Page::FromHeapObject(object);
CHECK(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
// We have only the following types in the large object space:
- CHECK(object->IsAbstractCode() || object->IsSeqString() ||
+ if (!(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
object->IsWeakFixedArray() || object->IsWeakArrayList() ||
object->IsPropertyArray() || object->IsByteArray() ||
object->IsFeedbackVector() || object->IsBigInt() ||
- object->IsFreeSpace() || object->IsFeedbackMetadata());
+ object->IsFreeSpace() || object->IsFeedbackMetadata() ||
+ object->IsContext() ||
+ object->IsUncompiledDataWithoutPreparseData() ||
+ object->IsPreparseData())) {
+ FATAL("Found invalid Object (instance_type=%i) in large object space.",
+ object->map()->instance_type());
+ }
// The object itself should look OK.
object->ObjectVerify(isolate);
@@ -3552,21 +3699,21 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
VerifyPointersVisitor code_visitor(heap());
object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
+ FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
- Object* element = array->get(j);
+ Object element = array->get(j);
if (element->IsHeapObject()) {
- HeapObject* element_object = HeapObject::cast(element);
+ HeapObject element_object = HeapObject::cast(element);
CHECK(heap()->Contains(element_object));
CHECK(element_object->map()->IsMap());
}
}
} else if (object->IsPropertyArray()) {
- PropertyArray* array = PropertyArray::cast(object);
+ PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array->length(); j++) {
- Object* property = array->get(j);
+ Object property = array->get(j);
if (property->IsHeapObject()) {
- HeapObject* property_object = HeapObject::cast(property);
+ HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
CHECK(property_object->map()->IsMap());
}
@@ -3588,7 +3735,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
void LargeObjectSpace::Print() {
StdoutStream os;
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj->Print(os);
}
}
@@ -3600,7 +3747,7 @@ void Page::Print() {
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
- for (HeapObject* object = objects.Next(); object != nullptr;
+ for (HeapObject object = objects.Next(); !object.is_null();
object = objects.Next()) {
bool is_marked =
heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
@@ -3643,5 +3790,27 @@ void NewLargeObjectSpace::Flip() {
chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
}
}
+
+void NewLargeObjectSpace::FreeAllObjects() {
+ LargePage* current = first_page();
+ while (current) {
+ LargePage* next_current = current->next_page();
+ Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
+ current);
+ current = next_current;
+ }
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
+ objects_size_ = 0;
+}
+
+CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, CODE_LO_SPACE) {}
+
+AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
+ return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 018e9da47b..e0bd39ea2b 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -15,6 +15,7 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
+#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h"
@@ -25,6 +26,8 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/objects.h"
+#include "src/objects/free-space.h"
+#include "src/objects/heap-object.h"
#include "src/objects/map.h"
#include "src/utils.h"
@@ -45,6 +48,7 @@ class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
+class MemoryChunkLayout;
class Page;
class PagedSpace;
class SemiSpace;
@@ -121,9 +125,6 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-#define DCHECK_PAGE_OFFSET(offset) \
- DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
-
enum FreeListCategoryType {
kTiniest,
kTiny,
@@ -156,14 +157,12 @@ class FreeListCategory {
page_(page),
type_(kInvalidCategory),
available_(0),
- top_(nullptr),
prev_(nullptr),
next_(nullptr) {}
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
- top_ = nullptr;
prev_ = nullptr;
next_ = nullptr;
}
@@ -183,16 +182,16 @@ class FreeListCategory {
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
- FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
+ FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
- FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
+ FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
inline Page* page() const { return page_; }
inline bool is_linked();
- bool is_empty() { return top() == nullptr; }
+ bool is_empty() { return top().is_null(); }
size_t available() const { return available_; }
void set_free_list(FreeList* free_list) { free_list_ = free_list; }
@@ -207,8 +206,8 @@ class FreeListCategory {
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
- FreeSpace* top() { return top_; }
- void set_top(FreeSpace* top) { top_ = top; }
+ FreeSpace top() { return top_; }
+ void set_top(FreeSpace top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
void set_prev(FreeListCategory* prev) { prev_ = prev; }
FreeListCategory* next() { return next_; }
@@ -227,8 +226,8 @@ class FreeListCategory {
// category.
size_t available_;
- // |top_|: Points to the top FreeSpace* in the free list category.
- FreeSpace* top_;
+ // |top_|: Points to the top FreeSpace in the free list category.
+ FreeSpace top_;
FreeListCategory* prev_;
FreeListCategory* next_;
@@ -239,6 +238,19 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+class MemoryChunkLayout {
+ public:
+ static size_t CodePageGuardStartOffset();
+ static size_t CodePageGuardSize();
+ static intptr_t ObjectStartOffsetInCodePage();
+ static intptr_t ObjectEndOffsetInCodePage();
+ static size_t AllocatableMemoryInCodePage();
+ static intptr_t ObjectStartOffsetInDataPage();
+ V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
+ static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
+ static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
+};
+
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
@@ -346,82 +358,71 @@ class MemoryChunk {
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
- static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
- static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
- static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
- static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
-
- static const size_t kMinHeaderSize =
- kSizeOffset // NOLINT
- + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kPointerSize // Address area_start_
- + kPointerSize // Address area_end_
- + 3 * kPointerSize // VirtualMemory reservation_
- + kPointerSize // Address owner_
- + kPointerSize // Heap* heap_
- + kIntptrSize // intptr_t progress_bar_
- + kIntptrSize // std::atomic<intptr_t> live_byte_count_
- + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
- + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
- + kPointerSize // InvalidatedSlots* invalidated_slots_
- + kPointerSize // SkipList* skip_list_
- + kPointerSize // std::atomic<intptr_t> high_water_mark_
- + kPointerSize // base::Mutex* mutex_
- +
- kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
- + kPointerSize // base::Mutex* page_protection_change_mutex_
- + kPointerSize // unitptr_t write_unprotect_counter_
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
+ static const intptr_t kReservationOffset =
+ kMarkBitmapOffset + kSystemPointerSize;
+ static const intptr_t kHeapOffset =
+ kReservationOffset + 3 * kSystemPointerSize;
+ static const intptr_t kHeaderSentinelOffset =
+ kHeapOffset + kSystemPointerSize;
+
+ static const size_t kHeaderSize =
+ kSizeOffset // NOLINT
+ + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address header_sentinel_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize // Address area_end_
+ + kSystemPointerSize // Address owner_
+ + kIntptrSize // intptr_t progress_bar_
+ + kIntptrSize // std::atomic<intptr_t> live_byte_count_
+ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ + kSystemPointerSize *
+ NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ + kSystemPointerSize // InvalidatedSlots* invalidated_slots_
+ + kSystemPointerSize // SkipList* skip_list_
+ + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ + kSystemPointerSize // base::Mutex* mutex_
+ + kSystemPointerSize // std::atomic<ConcurrentSweepingState>
+ // concurrent_sweeping_
+ + kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ + kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
- + kSizetSize // size_t allocated_bytes_
- + kSizetSize // size_t wasted_memory_
- + kPointerSize * 2 // base::ListNode
- + kPointerSize * kNumberOfCategories
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kSystemPointerSize * 2 // base::ListNode
+ + kSystemPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories]
- + kPointerSize // LocalArrayBufferTracker* local_tracker_
- + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
- + kPointerSize; // Bitmap* young_generation_bitmap_
-
- // We add some more space to the computed header size to amount for missing
- // alignment requirements in our computation.
- // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
- static const size_t kHeaderSize = kMinHeaderSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset =
- kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+ + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ + kSystemPointerSize; // Bitmap* young_generation_bitmap_
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
-
// Maximum number of nested code memory modification scopes.
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
static const int kMaxWriteUnprotectCounter = 4;
+ static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
- return reinterpret_cast<MemoryChunk*>(a & ~kAlignmentMask);
+ return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(const HeapObject* o) {
- return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
- ~kAlignmentMask);
+ static MemoryChunk* FromHeapObject(const HeapObject o) {
+ return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
- static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
+ static inline MemoryChunk* FromAnyPointerAddress(Address addr);
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
@@ -442,6 +443,8 @@ class MemoryChunk {
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
+ void DiscardUnusedMemory(Address addr, size_t size);
+
Address address() const {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
@@ -512,11 +515,11 @@ class MemoryChunk {
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
- void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
+ void RegisterObjectWithInvalidatedSlots(HeapObject object, int size);
// Updates invalidated_slots after array left-trimming.
- void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
- HeapObject* new_start);
- bool RegisteredObjectWithInvalidatedSlots(HeapObject* object);
+ void MoveObjectWithInvalidatedSlots(HeapObject old_start,
+ HeapObject new_start);
+ bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker();
@@ -524,6 +527,9 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
+ void AllocateMarkingBitmap();
+ void ReleaseMarkingBitmap();
+
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
@@ -534,13 +540,13 @@ class MemoryChunk {
Address HighWaterMark() { return address() + high_water_mark_; }
int progress_bar() {
- DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
- return static_cast<int>(progress_bar_);
+ DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
+ return static_cast<int>(progress_bar_.load(std::memory_order_relaxed));
}
void set_progress_bar(int progress_bar) {
- DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
- progress_bar_ = progress_bar;
+ DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
+ progress_bar_.store(progress_bar, std::memory_order_relaxed);
}
void ResetProgressBar() {
@@ -559,12 +565,15 @@ class MemoryChunk {
return external_backing_store_bytes_[type];
}
+ // Some callers rely on the fact that this can operate on both
+ // tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
- return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+ return static_cast<uint32_t>(addr - this->address()) >>
+ kSystemPointerSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
- return this->address() + (index << kPointerSizeLog2);
+ return this->address() + (index << kSystemPointerSizeLog2);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
@@ -634,19 +643,30 @@ class MemoryChunk {
bool InLargeObjectSpace() const;
+ inline bool IsInNewLargeObjectSpace() const;
+
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
- bool IsPagedSpace() const;
+ static inline bool HasHeaderSentinel(Address slot_addr);
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
+ void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
+ void SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+ }
+
base::ListNode<MemoryChunk>& list_node() { return list_node_; }
protected:
@@ -658,26 +678,39 @@ class MemoryChunk {
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
+ // Sets the requested page permissions only if the write unprotect counter
+ // has reached 0.
+ void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::Permission permission);
+
VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
uintptr_t flags_;
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
+ Bitmap* marking_bitmap_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
+ Heap* heap_;
+
+ // This is used to distinguish the memory chunk header from the interior of a
+ // large page. The memory chunk header stores here an impossible tagged
+ // pointer: the tagger pointer of the page start. A field in a large object is
+ // guaranteed to not contain such a pointer.
+ Address header_sentinel_;
+
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
// The space owning this memory chunk.
std::atomic<Space*> owner_;
- Heap* heap_;
-
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
- intptr_t progress_bar_;
+ std::atomic<intptr_t> progress_bar_;
// Count of bytes marked black on page.
std::atomic<intptr_t> live_byte_count_;
@@ -749,12 +782,8 @@ class MemoryChunk {
friend class PagedSpace;
};
-static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
- "sizeof(std::atomic<intptr_t>) == kPointerSize");
-
-static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
- "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
-
+static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
+ "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger.
@@ -778,17 +807,16 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
- static Page* FromHeapObject(const HeapObject* o) {
- return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
- ~kAlignmentMask);
+ static Page* FromHeapObject(const HeapObject o) {
+ return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from
- // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
+ // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
static Page* FromAllocationAreaAddress(Address address) {
- return Page::FromAddress(address - kPointerSize);
+ return Page::FromAddress(address - kTaggedSize);
}
// Checks if address1 and address2 are on the same new space page.
@@ -801,10 +829,6 @@ class Page : public MemoryChunk {
return (addr & kPageAlignmentMask) == 0;
}
- static bool IsAtObjectStart(Address addr) {
- return (addr & kPageAlignmentMask) == kObjectStartOffset;
- }
-
static Page* ConvertNewToOld(Page* old_page);
inline void MarkNeverAllocateForTesting();
@@ -826,8 +850,10 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
- DCHECK_PAGE_OFFSET(offset);
- return address() + offset;
+ Address address_in_page = address() + offset;
+ DCHECK_GE(address_in_page, area_start_);
+ DCHECK_LT(address_in_page, area_end_);
+ return address_in_page;
}
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
@@ -908,11 +934,11 @@ class LargePage : public MemoryChunk {
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
- static LargePage* FromHeapObject(const HeapObject* o) {
+ static LargePage* FromHeapObject(const HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
- HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
+ inline HeapObject GetObject();
inline LargePage* next_page() {
return static_cast<LargePage*>(list_node_.next());
@@ -1001,7 +1027,7 @@ class Space : public Malloced {
if (id_ == CODE_SPACE) {
return RoundDown(size, kCodeAlignment);
} else {
- return RoundDown(size, kPointerSize);
+ return RoundDown(size, kTaggedSize);
}
}
@@ -1116,7 +1142,7 @@ class SkipList {
void AddObject(Address addr, int size) {
int start_region = RegionNumber(addr);
- int end_region = RegionNumber(addr + size - kPointerSize);
+ int end_region = RegionNumber(addr + size - kTaggedSize);
for (int idx = start_region; idx <= end_region; idx++) {
if (starts_[idx] > addr) {
starts_[idx] = addr;
@@ -1178,7 +1204,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
+ if (!heap_->IsLargeMemoryChunk(chunk) &&
+ chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
@@ -1202,11 +1229,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
- void FreeQueuedChunks();
+ V8_EXPORT_PRIVATE void FreeQueuedChunks();
void CancelAndWaitForPendingTasks();
void PrepareForMarkCompact();
void EnsureUnmappingCompleted();
- void TearDown();
+ V8_EXPORT_PRIVATE void TearDown();
+ size_t NumberOfCommittedChunks();
int NumberOfChunks();
size_t CommittedBufferedMemory();
@@ -1229,13 +1257,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
chunks_[type].push_back(chunk);
}
template <ChunkQueueType type>
MemoryChunk* GetMemoryChunkSafe() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].back();
chunks_[type].pop_back();
@@ -1273,26 +1301,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
kPooledAndQueue,
};
- static size_t CodePageGuardStartOffset();
-
- static size_t CodePageGuardSize();
-
- static size_t CodePageAreaStartOffset();
-
- static size_t CodePageAreaEndOffset();
-
- static size_t CodePageAreaSize() {
- return CodePageAreaEndOffset() - CodePageAreaStartOffset();
- }
-
- static size_t PageAreaSize(AllocationSpace space) {
- DCHECK_NE(LO_SPACE, space);
- return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kAllocatableMemory;
- }
-
static intptr_t GetCommitPageSize();
+ // Computes the memory area of discardable memory within a given memory area
+ // [addr, addr+size) and returns the result as base::AddressRegion. If the
+ // memory is not discardable base::AddressRegion is an empty region.
+ static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
+ size_t size);
+
MemoryAllocator(Isolate* isolate, size_t max_capacity,
size_t code_range_size);
@@ -1303,12 +1319,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
@@ -1323,11 +1341,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return capacity_ < size ? 0 : capacity_ - size;
}
- // Returns maximum available bytes that the old space can have.
- size_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
- }
-
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
@@ -1542,7 +1555,7 @@ MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() = default;
- virtual HeapObject* Next() = 0;
+ virtual HeapObject Next() = 0;
};
template <class PAGE_TYPE>
@@ -1601,11 +1614,11 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns nullptr when the iteration has ended.
- inline HeapObject* Next() override;
+ inline HeapObject Next() override;
private:
// Fast (inlined) path of next().
- inline HeapObject* FromCurrentPage();
+ inline HeapObject FromCurrentPage();
// Slow path of next(), goes into the next page. Returns false if the
// iteration has ended.
@@ -1821,8 +1834,8 @@ class V8_EXPORT_PRIVATE FreeList {
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
- V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
- size_t* node_size);
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size);
// Clear the free list.
void Reset();
@@ -1907,14 +1920,17 @@ class V8_EXPORT_PRIVATE FreeList {
};
// The size range of blocks, in bytes.
- static const size_t kMinBlockSize = 3 * kPointerSize;
- static const size_t kMaxBlockSize = Page::kAllocatableMemory;
-
- static const size_t kTiniestListMax = 0xa * kPointerSize;
- static const size_t kTinyListMax = 0x1f * kPointerSize;
- static const size_t kSmallListMax = 0xff * kPointerSize;
- static const size_t kMediumListMax = 0x7ff * kPointerSize;
- static const size_t kLargeListMax = 0x3fff * kPointerSize;
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ static const size_t kTiniestListMax = 0xa * kTaggedSize;
+ static const size_t kTinyListMax = 0x1f * kTaggedSize;
+ static const size_t kSmallListMax = 0xff * kTaggedSize;
+ static const size_t kMediumListMax = 0x7ff * kTaggedSize;
+ static const size_t kLargeListMax = 0x3fff * kTaggedSize;
static const size_t kTinyAllocationMax = kTiniestListMax;
static const size_t kSmallAllocationMax = kTinyListMax;
static const size_t kMediumAllocationMax = kSmallListMax;
@@ -1922,18 +1938,18 @@ class V8_EXPORT_PRIVATE FreeList {
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
- FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
+ FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty or the top entry is smaller
// than minimum_size.
- FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size);
// The tiny categories are not used for fast allocation.
FreeListCategoryType SelectFastAllocationFreeListCategoryType(
@@ -1993,8 +2009,9 @@ class LocalAllocationBuffer {
~LocalAllocationBuffer() { Close(); }
// Convert to C++11 move-semantics once allowed by the style guide.
- LocalAllocationBuffer(const LocalAllocationBuffer& other);
- LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
+ LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
+ LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
+ V8_NOEXCEPT;
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
@@ -2005,13 +2022,14 @@ class LocalAllocationBuffer {
// Returns true if the merge was successful, false otherwise.
inline bool TryMerge(LocalAllocationBuffer* other);
- inline bool TryFreeLast(HeapObject* object, int object_size);
+ inline bool TryFreeLast(HeapObject object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
LinearAllocationArea Close();
private:
- LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
+ LocalAllocationBuffer(Heap* heap,
+ LinearAllocationArea allocation_info) V8_NOEXCEPT;
Heap* heap_;
LinearAllocationArea allocation_info_;
@@ -2087,16 +2105,12 @@ class V8_EXPORT_PRIVATE PagedSpace
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- inline bool Contains(Object* o);
+ inline bool Contains(Object o);
bool ContainsSlow(Address addr);
// Does the space need executable memory?
Executability executable() { return executable_; }
- // During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- void RepairFreeListsAfterDeserialization();
-
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
@@ -2186,7 +2200,7 @@ class V8_EXPORT_PRIVATE PagedSpace
return size_in_bytes - wasted;
}
- inline bool TryFreeLast(HeapObject* object, int object_size);
+ inline bool TryFreeLast(HeapObject object, int object_size);
void ResetFreeList();
@@ -2223,9 +2237,18 @@ class V8_EXPORT_PRIVATE PagedSpace
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
+ void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
+ void SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+ }
+
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
@@ -2234,7 +2257,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject* obj) {}
+ virtual void VerifyObject(HeapObject obj) {}
#endif
#ifdef DEBUG
@@ -2324,13 +2347,13 @@ class V8_EXPORT_PRIVATE PagedSpace
inline bool EnsureLinearAllocationArea(int size_in_bytes);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
- inline HeapObject* AllocateLinearly(int size_in_bytes);
+ inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
- inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment);
+ inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes);
@@ -2397,8 +2420,8 @@ class SemiSpace : public Space {
current_page_(nullptr),
pages_used_(0) {}
- inline bool Contains(HeapObject* o);
- inline bool Contains(Object* o);
+ inline bool Contains(HeapObject o);
+ inline bool Contains(Object o);
inline bool ContainsSlow(Address a);
void SetUp(size_t initial_capacity, size_t maximum_capacity);
@@ -2557,7 +2580,7 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
- inline HeapObject* Next() override;
+ inline HeapObject Next() override;
private:
void Initialize(Address start, Address end);
@@ -2583,9 +2606,9 @@ class NewSpace : public SpaceWithLinearArea {
~NewSpace() override { TearDown(); }
- inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
- inline bool Contains(Object* o);
+ inline bool Contains(Object o);
+ inline bool Contains(HeapObject o);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2604,7 +2627,8 @@ class NewSpace : public SpaceWithLinearArea {
// Return the allocated bytes in the active semispace.
size_t Size() override {
DCHECK_GE(top(), to_space_.page_low());
- return to_space_.pages_used() * Page::kAllocatableMemory +
+ return to_space_.pages_used() *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
@@ -2614,7 +2638,7 @@ class NewSpace : public SpaceWithLinearArea {
size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
- Page::kAllocatableMemory;
+ MemoryChunkLayout::AllocatableMemoryInDataPage();
}
// Return the current size of a semispace, allocatable and non-allocatable
@@ -2669,7 +2693,7 @@ class NewSpace : public SpaceWithLinearArea {
}
while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page);
- allocated += Page::kAllocatableMemory;
+ allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page();
}
DCHECK_GE(top(), current_page->area_start());
@@ -2701,13 +2725,17 @@ class NewSpace : public SpaceWithLinearArea {
}
void ResetOriginalTop() {
- DCHECK_GE(top(), original_top());
- DCHECK_LE(top(), original_limit());
- original_top_ = top();
+ DCHECK_GE(top(), original_top_);
+ DCHECK_LE(top(), original_limit_);
+ original_top_.store(top(), std::memory_order_release);
}
- Address original_top() { return original_top_; }
- Address original_limit() { return original_limit_; }
+ Address original_top_acquire() {
+ return original_top_.load(std::memory_order_acquire);
+ }
+ Address original_limit_relaxed() {
+ return original_limit_.load(std::memory_order_relaxed);
+ }
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
@@ -2741,9 +2769,8 @@ class NewSpace : public SpaceWithLinearArea {
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
inline bool ToSpaceContainsSlow(Address a);
- inline bool FromSpaceContainsSlow(Address a);
- inline bool ToSpaceContains(Object* o);
- inline bool FromSpaceContains(Object* o);
+ inline bool ToSpaceContains(Object o);
+ inline bool FromSpaceContains(Object o);
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
@@ -2874,6 +2901,11 @@ class OldSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
+
+ static bool IsAtPageStart(Address addr) {
+ return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
+ MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ }
};
// -----------------------------------------------------------------------------
@@ -2886,7 +2918,6 @@ class CodeSpace : public PagedSpace {
explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
};
-
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
@@ -2912,7 +2943,7 @@ class MapSpace : public PagedSpace {
}
#ifdef VERIFY_HEAP
- void VerifyObject(HeapObject* obj) override;
+ void VerifyObject(HeapObject obj) override;
#endif
};
@@ -2940,6 +2971,10 @@ class ReadOnlySpace : public PagedSpace {
void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
+ // During boot the free_space_map is created, and afterwards we may need
+ // to write it into the free list nodes that were already created.
+ void RepairFreeListsAfterDeserialization();
+
private:
void MarkAsReadWrite();
void SetPermissionsForPages(PageAllocator::Permission access);
@@ -2954,9 +2989,7 @@ class ReadOnlySpace : public PagedSpace {
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
-// managed by the large object space. A large object is allocated from OS
-// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
-// A large object always starts at Page::kObjectStartOffset to a page.
+// managed by the large object space.
// Large objects do not move during garbage collections.
class LargeObjectSpace : public Space {
@@ -2971,13 +3004,8 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static size_t ObjectSizeFor(size_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
- Executability executable);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
@@ -2992,10 +3020,7 @@ class LargeObjectSpace : public Space {
// Finds an object for a given address, returns a Smi if it is not found.
// The function iterates through all objects in this space, may be slow.
- Object* FindObject(Address a);
-
- // Takes the chunk_map_mutex_ and calls FindPage after that.
- LargePage* FindPageThreadSafe(Address a);
+ Object FindObject(Address a);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
@@ -3014,7 +3039,7 @@ class LargeObjectSpace : public Space {
void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject* obj);
+ bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
@@ -3049,12 +3074,14 @@ class LargeObjectSpace : public Space {
protected:
LargePage* AllocateLargePage(int object_size, Executability executable);
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
- private:
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
+ private:
// The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently.
base::Mutex chunk_map_mutex_;
@@ -3075,13 +3102,23 @@ class NewLargeObjectSpace : public LargeObjectSpace {
size_t Available() override;
void Flip();
+
+ void FreeAllObjects();
+};
+
+class CodeLargeObjectSpace : public LargeObjectSpace {
+ public:
+ explicit CodeLargeObjectSpace(Heap* heap);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
};
class LargeObjectIterator : public ObjectIterator {
public:
explicit LargeObjectIterator(LargeObjectSpace* space);
- HeapObject* Next() override;
+ HeapObject Next() override;
private:
LargePage* current_;
@@ -3089,9 +3126,9 @@ class LargeObjectIterator : public ObjectIterator {
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
-class MemoryChunkIterator {
+class OldGenerationMemoryChunkIterator {
public:
- inline explicit MemoryChunkIterator(Heap* heap);
+ inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
@@ -3102,6 +3139,7 @@ class MemoryChunkIterator {
kMapState,
kCodeState,
kLargeObjectState,
+ kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
@@ -3110,6 +3148,7 @@ class MemoryChunkIterator {
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
+ LargePageIterator code_lo_iterator_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index f737eb099d..637b8062d4 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/template-utils.h"
#include "src/counters.h"
@@ -32,9 +33,16 @@ StoreBuffer::StoreBuffer(Heap* heap)
void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- const size_t requested_size = kStoreBufferSize * kStoreBuffers;
+ // Round up the requested size in order to fulfill the VirtualMemory's
+ // requrements on the requested size alignment. This may cause a bit of
+ // memory wastage if the actual CommitPageSize() will be bigger than the
+ // kMinExpectedOSPageSize value but this is a trade-off for keeping the
+ // store buffer overflow check in write barriers cheap.
+ const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
+ page_allocator->CommitPageSize());
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
const size_t alignment =
std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
@@ -47,9 +55,9 @@ void StoreBuffer::SetUp() {
const size_t allocated_size = reservation.size();
start_[0] = reinterpret_cast<Address*>(start);
- limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
+ limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize);
start_[1] = limit_[0];
- limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
+ limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize);
// Sanity check the buffers.
Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
@@ -133,7 +141,7 @@ int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
}
void StoreBuffer::FlipStoreBuffers() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
@@ -152,14 +160,15 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, kStoreBuffers);
Address last_inserted_addr = kNullAddress;
+ MemoryChunk* chunk = nullptr;
- // We are taking the chunk map mutex here because the page lookup of addr
- // below may require us to check if addr is part of a large page.
- base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
Address addr = *current;
- MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
+ if (chunk == nullptr ||
+ MemoryChunk::BaseAddress(addr) != chunk->address()) {
+ chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ }
if (IsDeletionAddress(addr)) {
last_inserted_addr = kNullAddress;
current++;
@@ -184,7 +193,7 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
}
void StoreBuffer::MoveAllEntriesToRememberedSet() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
@@ -193,7 +202,7 @@ void StoreBuffer::MoveAllEntriesToRememberedSet() {
}
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
task_running_ = false;
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 4dbb471b7a..d1be45f3e5 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -23,13 +23,16 @@ namespace internal {
// one is the end address of the invalid range or null if there is just one slot
// that needs to be removed from the remembered set. On buffer overflow the
// slots are moved to the remembered set.
+// Store buffer entries are always full pointers.
class StoreBuffer {
public:
enum StoreBufferMode { IN_GC, NOT_IN_GC };
- static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
- static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
+ static const int kStoreBufferSize =
+ Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
+ 1 << (11 + kSystemPointerSizeLog2));
+ static const int kStoreBufferMask = kStoreBufferSize - 1;
static const intptr_t kDeletionTag = 1;
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
index 710282d573..091f279a78 100644
--- a/deps/v8/src/heap/stress-marking-observer.cc
+++ b/deps/v8/src/heap/stress-marking-observer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/stress-marking-observer.h"
+#include "src/heap/incremental-marking.h"
namespace v8 {
namespace internal {
@@ -15,6 +16,7 @@ void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
+ heap_.incremental_marking()->EnsureBlackAllocated(soon_object, size);
}
} // namespace internal
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 4f5ad18bec..3b1d9a7727 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -15,6 +15,20 @@
namespace v8 {
namespace internal {
+Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
+ : heap_(heap),
+ marking_state_(marking_state),
+ num_tasks_(0),
+ pending_sweeper_tasks_semaphore_(0),
+ incremental_sweeper_pending_(false),
+ sweeping_in_progress_(false),
+ num_sweeping_tasks_(0),
+ stop_sweeper_tasks_(false),
+ iterability_task_semaphore_(0),
+ iterability_in_progress_(false),
+ iterability_task_started_(false),
+ should_reduce_memory_(false) {}
+
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_ = true;
@@ -136,6 +150,7 @@ void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
+ should_reduce_memory_ = heap_->ShouldReduceMemory();
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
@@ -181,7 +196,7 @@ void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
auto last_page = list.back();
@@ -196,7 +211,7 @@ void Sweeper::AbortAndWaitForTasks() {
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
@@ -249,7 +264,6 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
- DCHECK_EQ(0, free_start % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -272,7 +286,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
+ HeapObject const object = object_and_size.first;
DCHECK(marking_state_->IsBlack(object));
Address free_end = object->address();
if (free_end != free_start) {
@@ -290,6 +304,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
ClearFreedMemoryMode::kClearFreedMemory);
}
+ if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
@@ -300,13 +315,13 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
static_cast<uint32_t>(free_end - p->address())));
}
}
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
int size = object->SizeFromMap(map);
live_bytes += size;
if (rebuild_skip_list) {
int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
+ SkipList::RegionNumber(free_end + size - kTaggedSize);
if (new_region_start != curr_region || new_region_end != curr_region) {
skip_list->AddObject(free_end, size);
curr_region = new_region_end;
@@ -330,7 +345,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ClearRecordedSlots::kNo,
ClearFreedMemoryMode::kClearFreedMemory);
}
-
+ if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
@@ -346,11 +361,11 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
- old_to_new->RemoveInvaldSlots(free_ranges);
+ old_to_new->ClearInvalidSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
- old_to_old->RemoveInvaldSlots(free_ranges);
+ old_to_old->ClearInvalidSlots(free_ranges);
}
}
@@ -411,7 +426,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
DCHECK(IsValidSweepingSpace(identity));
int max_freed = 0;
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
@@ -438,7 +453,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
}
{
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
}
return max_freed;
@@ -457,7 +472,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
@@ -483,7 +498,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
@@ -509,7 +524,7 @@ void Sweeper::EnsureIterabilityCompleted() {
if (FLAG_concurrent_sweeping && iterability_task_started_) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(
- iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
+ iterability_task_id_) != TryAbortResult::kTaskAborted) {
iterability_task_semaphore_.Wait();
}
iterability_task_started_ = false;
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 90a429b3ea..ff806a0af6 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -77,18 +77,7 @@ class Sweeper {
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
- Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
- : heap_(heap),
- marking_state_(marking_state),
- num_tasks_(0),
- pending_sweeper_tasks_semaphore_(0),
- incremental_sweeper_pending_(false),
- sweeping_in_progress_(false),
- num_sweeping_tasks_(0),
- stop_sweeper_tasks_(false),
- iterability_task_semaphore_(0),
- iterability_in_progress_(false),
- iterability_task_started_(false) {}
+ Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@@ -196,6 +185,7 @@ class Sweeper {
base::Semaphore iterability_task_semaphore_;
bool iterability_in_progress_;
bool iterability_task_started_;
+ bool should_reduce_memory_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index db6e572df7..c086b87e59 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -62,6 +62,7 @@ class Worklist {
Worklist() : Worklist(kMaxNumTasks) {}
explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ DCHECK_LE(num_tasks, kMaxNumTasks);
for (int i = 0; i < num_tasks_; i++) {
private_push_segment(i) = NewSegment();
private_pop_segment(i) = NewSegment();
@@ -282,13 +283,13 @@ class Worklist {
}
V8_INLINE void Push(Segment* segment) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
}
V8_INLINE bool Pop(Segment** segment) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
*segment = top_;
set_top(top_->next());
@@ -302,7 +303,7 @@ class Worklist {
}
void Clear() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
@@ -315,7 +316,7 @@ class Worklist {
// See Worklist::Update.
template <typename Callback>
void Update(Callback callback) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
while (current != nullptr) {
@@ -339,7 +340,7 @@ class Worklist {
// See Worklist::Iterate.
template <typename Callback>
void Iterate(Callback callback) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
for (Segment* current = top_; current != nullptr;
current = current->next()) {
current->Iterate(callback);
@@ -349,7 +350,7 @@ class Worklist {
std::pair<Segment*, Segment*> Extract() {
Segment* top = nullptr;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
top = top_;
set_top(nullptr);
@@ -362,7 +363,7 @@ class Worklist {
void MergeList(Segment* start, Segment* end) {
if (start == nullptr) return;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
end->set_next(top_);
set_top(start);
}
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 4598395642..dabeabb41b 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -55,11 +55,10 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
- IsJsToWasmCall(rmode_) || IsOffHeapTarget(rmode_)) {
+ IsOffHeapTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
@@ -76,9 +75,7 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsWasmStubCall(rmode_) || IsEmbeddedObject(rmode_) ||
- IsExternalReference(rmode_) || IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
return pc_;
}
@@ -92,9 +89,9 @@ int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Memory<Object*>(pc_));
+ return HeapObject::cast(Object(Memory<Address>(pc_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
@@ -102,20 +99,19 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
return Handle<HeapObject>::cast(Memory<Handle<Object>>(pc_));
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory<Object*>(pc_) = target;
+ Memory<Address>(pc_) = target->ptr();
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory<Address>(pc_);
@@ -278,9 +274,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
@@ -323,10 +320,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
-#ifdef DEBUG
- AddUsedRegister(index);
- AddUsedRegister(base);
-#endif
DCHECK_EQ(len_, 1);
DCHECK_EQ(scale & -4, 0);
// Use SIB with no index register only for base esp.
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index ff589c820b..b0c359034a 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -50,7 +50,6 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/conversions-inl.h"
#include "src/deoptimizer.h"
#include "src/disassembler.h"
@@ -70,13 +69,6 @@ Immediate Immediate::EmbeddedNumber(double value) {
return result;
}
-Immediate Immediate::EmbeddedCode(CodeStub* stub) {
- Immediate result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Immediate Immediate::EmbeddedStringConstant(const StringConstantBase* str) {
Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -198,7 +190,6 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
@@ -210,28 +201,10 @@ bool RelocInfo::IsCodedSpecially() {
return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
-
bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return Memory<uint32_t>(pc_);
@@ -316,10 +289,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
- case HeapObjectRequest::kCodeStub:
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -327,7 +296,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
Memory<Handle<Object>>(pc) = object;
}
}
@@ -339,20 +308,14 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
#define EMIT(x) \
*pc_++ = (x)
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size) {
-// Clear the buffer in debug mode unless it was provided by the
-// caller in which case we can't be sure it's okay to overwrite
-// existing code in it.
-#ifdef DEBUG
- if (own_buffer_) ZapCode(reinterpret_cast<Address>(buffer_), buffer_size_);
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+ int code_comments_size = WriteCodeComments();
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
@@ -360,15 +323,19 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
+}
+void Assembler::FinalizeJumpOptimizationInfo() {
// Collection stage
auto jump_opt = jump_optimization_info();
if (jump_opt && jump_opt->is_collecting()) {
@@ -393,7 +360,6 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
}
}
-
void Assembler::Align(int m) {
DCHECK(base::bits::IsPowerOfTwo(m));
int mask = m - 1;
@@ -413,7 +379,6 @@ bool Assembler::IsNop(Address addr) {
void Assembler::Nop(int bytes) {
EnsureSpace ensure_space(this);
-
// Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
while (bytes > 0) {
switch (bytes) {
@@ -501,7 +466,6 @@ void Assembler::pushad() {
void Assembler::popad() {
EnsureSpace ensure_space(this);
- AssertIsAddressable(ebx);
EMIT(0x61);
}
@@ -538,7 +502,6 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::push(Register src) {
- AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0x50 | src.code());
}
@@ -551,7 +514,6 @@ void Assembler::push(Operand src) {
void Assembler::pop(Register dst) {
- AssertIsAddressable(dst);
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
EMIT(0x58 | dst.code());
@@ -623,7 +585,6 @@ void Assembler::mov_w(Operand dst, const Immediate& src) {
void Assembler::mov(Register dst, int32_t imm32) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(imm32);
@@ -631,14 +592,12 @@ void Assembler::mov(Register dst, int32_t imm32) {
void Assembler::mov(Register dst, const Immediate& x) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(x);
}
void Assembler::mov(Register dst, Handle<HeapObject> handle) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(handle);
@@ -652,8 +611,6 @@ void Assembler::mov(Register dst, Operand src) {
void Assembler::mov(Register dst, Register src) {
- AssertIsAddressable(src);
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x89);
EMIT(0xC0 | src.code() << 3 | dst.code());
@@ -687,6 +644,7 @@ void Assembler::mov(Operand dst, Register src) {
}
void Assembler::movsx_b(Register dst, Operand src) {
+ DCHECK_IMPLIES(src.is_reg_only(), src.reg().is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBE);
@@ -701,6 +659,7 @@ void Assembler::movsx_w(Register dst, Operand src) {
}
void Assembler::movzx_b(Register dst, Operand src) {
+ DCHECK_IMPLIES(src.is_reg_only(), src.reg().is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB6);
@@ -758,8 +717,6 @@ void Assembler::stos() {
void Assembler::xchg(Register dst, Register src) {
- AssertIsAddressable(src);
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
EMIT(0x90 | (src == eax ? dst.code() : src.code()));
@@ -990,7 +947,6 @@ void Assembler::cmpw_ax(Operand op) {
void Assembler::dec_b(Register dst) {
- AssertIsAddressable(dst);
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0xFE);
@@ -1005,7 +961,6 @@ void Assembler::dec_b(Operand dst) {
void Assembler::dec(Register dst) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x48 | dst.code());
}
@@ -1036,7 +991,6 @@ void Assembler::div(Operand src) {
void Assembler::imul(Register reg) {
- AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE8 | reg.code());
@@ -1069,7 +1023,6 @@ void Assembler::imul(Register dst, Operand src, int32_t imm32) {
void Assembler::inc(Register dst) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x40 | dst.code());
}
@@ -1088,7 +1041,6 @@ void Assembler::lea(Register dst, Operand src) {
void Assembler::mul(Register src) {
- AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE0 | src.code());
@@ -1096,14 +1048,12 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD8 | dst.code());
}
void Assembler::neg(Operand dst) {
- AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(ebx, dst);
@@ -1111,7 +1061,6 @@ void Assembler::neg(Operand dst) {
void Assembler::not_(Register dst) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD0 | dst.code());
@@ -1148,7 +1097,6 @@ void Assembler::or_(Operand dst, Register src) {
void Assembler::rcl(Register dst, uint8_t imm8) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1163,7 +1111,6 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
void Assembler::rcr(Register dst, uint8_t imm8) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1321,7 +1268,6 @@ void Assembler::test(Register reg, const Immediate& imm) {
return;
}
- AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
// This is not using emit_arith because test doesn't support
// sign-extension of 8-bit operands.
@@ -1362,7 +1308,6 @@ void Assembler::test(Operand op, const Immediate& imm) {
}
void Assembler::test_b(Register reg, Immediate imm8) {
- AssertIsAddressable(reg);
DCHECK(imm8.is_uint8());
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
@@ -1392,7 +1337,6 @@ void Assembler::test_b(Operand op, Immediate imm8) {
}
void Assembler::test_w(Register reg, Immediate imm16) {
- AssertIsAddressable(reg);
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
if (reg == eax) {
@@ -1449,7 +1393,6 @@ void Assembler::xor_(Operand dst, const Immediate& x) {
}
void Assembler::bswap(Register dst) {
- AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC8 + dst.code());
@@ -1561,10 +1504,10 @@ void Assembler::bind_to(Label* L, int pos) {
Displacement disp = disp_at(L);
int fixup_pos = L->pos();
if (disp.type() == Displacement::CODE_ABSOLUTE) {
- long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos));
+ long_at_put(fixup_pos, reinterpret_cast<int>(buffer_start_ + pos));
internal_reference_positions_.push_back(fixup_pos);
} else if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code* heap object pointer.
+ // Relative to Code heap object pointer.
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
} else {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
@@ -1678,12 +1621,6 @@ void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
-void Assembler::call(CodeStub* stub) {
- EnsureSpace ensure_space(this);
- EMIT(0xE8);
- emit(Immediate::EmbeddedCode(stub));
-}
-
void Assembler::jmp_rel(int offset) {
EnsureSpace ensure_space(this);
const int short_size = 2;
@@ -1878,7 +1815,6 @@ void Assembler::fld_d(Operand adr) {
}
void Assembler::fstp_s(Operand adr) {
- AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(ebx, adr);
@@ -1891,7 +1827,6 @@ void Assembler::fst_s(Operand adr) {
}
void Assembler::fstp_d(Operand adr) {
- AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ebx, adr);
@@ -1916,7 +1851,6 @@ void Assembler::fild_d(Operand adr) {
}
void Assembler::fistp_s(Operand adr) {
- AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ebx, adr);
@@ -2204,7 +2138,6 @@ void Assembler::sahf() {
void Assembler::setcc(Condition cc, Register reg) {
- AssertIsAddressable(reg);
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2214,6 +2147,8 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
+ // The [src] might contain ebx's register code, but in
+ // this case, it refers to xmm3, so it is OK to emit.
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x2C);
@@ -2222,6 +2157,8 @@ void Assembler::cvttss2si(Register dst, Operand src) {
void Assembler::cvttsd2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
+ // The [src] might contain ebx's register code, but in
+ // this case, it refers to xmm3, so it is OK to emit.
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x2C);
@@ -3185,7 +3122,6 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
}
void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
- AllowExplicitEbxAccessScope accessing_xmm_register(this);
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
@@ -3197,13 +3133,11 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- AssertIsAddressable(dst);
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- AssertIsAddressable(src);
EMIT(0xC0 | (dst.code() << 3) | src.code());
}
@@ -3231,56 +3165,51 @@ void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- desc.buffer_size = 2 * buffer_size_;
+ int old_size = buffer_->size();
+ int new_size = 2 * old_size;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- ZapCode(reinterpret_cast<Address>(desc.buffer), desc.buffer_size);
-#endif
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
- desc.reloc_size);
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- int32_t* p = reinterpret_cast<int32_t*>(buffer_ + pos);
+ int32_t* p = reinterpret_cast<int32_t*>(buffer_start_ + pos);
*p += pc_delta;
}
// Relocate pc-relative references.
- int mode_mask = RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
DCHECK_EQ(mode_mask, RelocInfo::kApplyMask & mode_mask);
- for (RelocIterator it(desc, mode_mask); !it.done(); it.next()) {
+ Vector<byte> instructions{buffer_start_, static_cast<size_t>(pc_offset())};
+ Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ for (RelocIterator it(instructions, reloc_info, 0, mode_mask); !it.done();
+ it.next()) {
it.rinfo()->apply(pc_delta);
}
@@ -3289,7 +3218,6 @@ void Assembler::GrowBuffer() {
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- AssertIsAddressable(dst);
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
DCHECK_EQ(op1 & 0x01, 0); // should be 8bit operation
@@ -3300,7 +3228,6 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
- AssertIsAddressable(dst);
DCHECK((0 <= sel) && (sel <= 7));
Register ireg = Register::from_code(sel);
if (x.is_int8()) {
@@ -3327,16 +3254,13 @@ void Assembler::emit_operand(XMMRegister reg, Operand adr) {
}
void Assembler::emit_operand(int code, Operand adr) {
- AssertIsAddressable(adr);
- AssertIsAddressable(Register::from_code(code));
// Isolate-independent code may not embed relocatable addresses.
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::CODE_TARGET);
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::EMBEDDED_OBJECT);
- // TODO(jgruber,v8:6666): Enable once kRootRegister exists.
- // DCHECK(!options().isolate_independent_code ||
- // adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
+ DCHECK(!options().isolate_independent_code ||
+ adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
const unsigned length = adr.len_;
DCHECK_GT(length, 0);
@@ -3364,7 +3288,7 @@ void Assembler::emit_operand(int code, Operand adr) {
void Assembler::emit_label(Label* label) {
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
- emit(reinterpret_cast<uint32_t>(buffer_ + label->pos()));
+ emit(reinterpret_cast<uint32_t>(buffer_start_ + label->pos()));
} else {
emit_disp(label, Displacement::CODE_ABSOLUTE);
}
@@ -3406,20 +3330,10 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
reloc_info_writer.Write(&rinfo);
}
-#ifdef DEBUG
-void Assembler::AssertIsAddressable(const Operand& operand) {
- DCHECK(is_ebx_addressable_ || !operand.UsesEbx());
-}
-
-void Assembler::AssertIsAddressable(const Register& reg) {
- DCHECK(is_ebx_addressable_ || reg != ebx);
-}
-#endif // DEBUG
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index b721542f13..96bf2c7eeb 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -41,122 +41,16 @@
#include "src/assembler.h"
#include "src/ia32/constants-ia32.h"
+#include "src/ia32/register-ia32.h"
#include "src/ia32/sse-instr.h"
#include "src/isolate.h"
+#include "src/label.h"
+#include "src/objects/smi.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-#define GENERAL_REGISTERS(V) \
- V(eax) \
- V(ecx) \
- V(edx) \
- V(ebx) \
- V(esp) \
- V(ebp) \
- V(esi) \
- V(edi)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(eax) \
- V(ecx) \
- V(edx) \
- V(ebx) \
- V(esi) \
- V(edi)
-
-#define DOUBLE_REGISTERS(V) \
- V(xmm0) \
- V(xmm1) \
- V(xmm2) \
- V(xmm3) \
- V(xmm4) \
- V(xmm5) \
- V(xmm6) \
- V(xmm7)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(xmm1) \
- V(xmm2) \
- V(xmm3) \
- V(xmm4) \
- V(xmm5) \
- V(xmm6) \
- V(xmm7)
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
- bool is_byte_register() const { return reg_code_ <= 3; }
-
- private:
- friend class RegisterBase<Register, kRegAfterLast>;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-#define DEFINE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
- friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
- explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
-};
-
-typedef XMMRegister FloatRegister;
-
-typedef XMMRegister DoubleRegister;
-
-typedef XMMRegister Simd128Register;
-
-#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-// Note that the bit values must match those used in actual instruction encoding
-constexpr int kNumRegs = 8;
-
-// Caller-saved registers
-constexpr RegList kJSCallerSaved =
- Register::ListOf<eax, ecx, edx,
- ebx, // used as a caller-saved register in JavaScript code
- edi // callee function
- >();
-
-constexpr int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-constexpr int kNumSafepointRegisters = 8;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -218,11 +112,10 @@ class Immediate {
: Immediate(ext.address(), RelocInfo::EXTERNAL_REFERENCE) {}
inline explicit Immediate(Handle<HeapObject> handle)
: Immediate(handle.address(), RelocInfo::EMBEDDED_OBJECT) {}
- inline explicit Immediate(Smi* value)
- : Immediate(reinterpret_cast<intptr_t>(value)) {}
+ inline explicit Immediate(Smi value)
+ : Immediate(static_cast<intptr_t>(value.ptr())) {}
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
- static Immediate EmbeddedCode(CodeStub* code);
static Immediate EmbeddedStringConstant(const StringConstantBase* str);
static Immediate CodeRelativeOffset(Label* label) {
@@ -246,6 +139,14 @@ class Immediate {
return value_.immediate;
}
+ bool is_embedded_object() const {
+ return !is_heap_object_request() && rmode() == RelocInfo::EMBEDDED_OBJECT;
+ }
+
+ Handle<HeapObject> embedded_object() const {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(immediate()));
+ }
+
bool is_external_reference() const {
return rmode() == RelocInfo::EXTERNAL_REFERENCE;
}
@@ -361,17 +262,10 @@ class V8_EXPORT_PRIVATE Operand {
// register.
Register reg() const;
-#ifdef DEBUG
- bool UsesEbx() const { return uses_ebx_; }
-#endif // DEBUG
-
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_modrm(int mod, Register rm) {
-#ifdef DEBUG
- AddUsedRegister(rm);
-#endif
DCHECK_EQ(mod & -4, 0);
buf_[0] = mod << 6 | rm.code();
len_ = 1;
@@ -398,23 +292,12 @@ class V8_EXPORT_PRIVATE Operand {
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
-#ifdef DEBUG
- // TODO(v8:6666): Remove once kRootRegister support is complete.
- bool uses_ebx_ = false;
- void AddUsedRegister(Register reg) {
- if (reg == ebx) uses_ebx_ = true;
- }
-#endif // DEBUG
-
// TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
-// TODO(v8:6666): Re-enable globally once kRootRegister support is complete.
-#ifndef DEBUG
static_assert(sizeof(Operand) <= 2 * kPointerSize,
"Operand must be small enough to pass it by value");
-#endif
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
@@ -485,15 +368,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -501,6 +378,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Assembler functions are invoked in between GetCode() calls.
void GetCode(Isolate* isolate, CodeDesc* desc);
+ void FinalizeJumpOptimizationInfo();
+
// Read/Modify the code target in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
@@ -515,7 +394,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -532,8 +411,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// and the return address
static constexpr int kCallTargetAddressOffset = kPointerSize;
- static constexpr int kCallInstructionLength = 5;
-
// One byte opcode for test al, 0xXX.
static constexpr byte kTestAlByte = 0xA8;
// One byte opcode for nop.
@@ -851,7 +728,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void call(Register reg) { call(Operand(reg)); }
void call(Operand adr);
void call(Handle<Code> code, RelocInfo::Mode rmode);
- void call(CodeStub* stub);
void wasm_call(Address address, RelocInfo::Mode rmode);
// Jumps
@@ -1733,9 +1609,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1762,45 +1635,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsNop(Address addr);
int relocation_writer_size() {
- return (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ return (buffer_start_ + buffer_->size()) - reloc_info_writer.pos();
}
// Avoid overflows for displacements etc.
static constexpr int kMaximalBufferSize = 512 * MB;
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
- // Temporary helper data structures while adding kRootRegister support to ia32
- // builtins. The SupportsRootRegisterScope is intended to mark each builtin
- // and helper that fully supports the root register, i.e. that does not
- // clobber ebx. The AllowExplicitEbxAccessScope marks regions that are allowed
- // to clobber ebx, e.g. when ebx is spilled and restored.
- // TODO(v8:6666): Remove once kRootRegister is fully supported.
- template <bool new_value>
- class SetRootRegisterSupportScope final {
- public:
- explicit SetRootRegisterSupportScope(Assembler* assembler)
- : assembler_(assembler), old_value_(assembler->is_ebx_addressable_) {
- assembler_->is_ebx_addressable_ = new_value;
- }
- ~SetRootRegisterSupportScope() {
- assembler_->is_ebx_addressable_ = old_value_;
- }
-
- private:
- Assembler* assembler_;
- const bool old_value_;
- };
- typedef SetRootRegisterSupportScope<false> SupportsRootRegisterScope;
- typedef SetRootRegisterSupportScope<true> AllowExplicitEbxAccessScope;
+ byte byte_at(int pos) { return buffer_start_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_start_[pos] = value; }
protected:
void emit_sse_operand(XMMRegister reg, Operand adr);
@@ -1808,18 +1650,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, Register src);
- byte* addr_at(int pos) { return buffer_ + pos; }
-
-#ifdef DEBUG
- // TODO(v8:6666): Remove once kRootRegister is fully supported.
- void AssertIsAddressable(const Register& reg);
- void AssertIsAddressable(const Operand& operand);
-#else
- // An empty inline definition to avoid slowing down release builds.
- void AssertIsAddressable(const Register&) {}
- void AssertIsAddressable(const Operand&) {}
-#endif // DEBUG
- bool is_ebx_addressable_ = true;
+ byte* addr_at(int pos) { return buffer_start_ + pos; }
private:
uint32_t long_at(int pos) {
@@ -1902,6 +1733,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class EnsureSpace;
// Internal reference positions, required for (potential) patching in
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
deleted file mode 100644
index 63cd5a9621..0000000000
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ /dev/null
@@ -1,514 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/api-arguments-inl.h"
-#include "src/assembler-inl.h"
-#include "src/base/bits.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Set up frame.
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Push marker in two places.
- StackFrame::Type marker = type();
- __ push(Immediate(StackFrame::TypeToMarker(marker))); // marker
- ExternalReference context_address =
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- __ push(__ StaticVariable(context_address)); // context
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- __ InitializeRootRegister();
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- __ push(__ StaticVariable(c_entry_fp));
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ cmp(__ StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js, Label::kNear);
- __ mov(__ StaticVariable(js_entry_sp), ebp);
- __ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ jmp(&invoke, Label::kNear);
- __ bind(&not_outermost_js);
- __ push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception = ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate());
- __ mov(__ StaticVariable(pending_exception), eax);
- __ mov(eax, Immediate(isolate()->factory()->exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushStackHandler();
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. Notice that we cannot store a
- // reference to the trampoline code directly in this stub, because the
- // builtin stubs may not have been generated yet.
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit);
-
- __ VerifyRootRegister();
-
- // Check if the current stack frame is marked as the outermost JS frame.
- Assembler::AllowExplicitEbxAccessScope exiting_js(masm);
- __ pop(ebx);
- __ cmp(ebx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(__ StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- __ pop(__ StaticVariable(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- masm->CallStub(&stub);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
- // Save volatile registers.
- const int kNumSavedRegisters = 3;
- __ push(eax);
- __ push(ecx);
- __ push(edx);
-
- // Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
- __ push(eax);
-
- // Retrieve our return address and use it to calculate the calling
- // function's address.
- __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- __ push(eax);
-
- // Call the entry hook.
- DCHECK_NOT_NULL(isolate()->function_entry_hook());
- __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
- RelocInfo::RUNTIME_ENTRY);
- __ add(esp, Immediate(2 * kPointerSize));
-
- // Restore ecx.
- __ pop(edx);
- __ pop(ecx);
- __ pop(eax);
-
- __ ret(0);
-}
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-static Operand ApiParameterOperand(int index) {
- return Operand(esp, index * kPointerSize);
-}
-
-
-// Prepares stack to put arguments (aligns and so on). Reserves
-// space for return value if needed (assumes the return value is a handle).
-// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
-// etc. Saves context (esi). If space was reserved for return value then
-// stores the pointer to the reserved slot into esi.
-static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
- __ EnterApiExitFrame(argc);
- if (__ emit_debug_code()) {
- __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Clobbers esi, edi and
-// caller-save registers. Restores context. On return removes
-// stack_space * kPointerSize (GCed).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg, int stack_space,
- Operand* stack_space_operand,
- Operand return_value_operand) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
- Isolate* isolate = masm->isolate();
-
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate);
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate);
-
- DCHECK(edx == function_address);
- // Allocate HandleScope in callee-save registers.
- __ mov(esi, __ StaticVariable(next_address));
- __ mov(edi, __ StaticVariable(limit_address));
- __ add(__ StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
- __ cmpb(Operand(eax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
-
- // Additional parameter is the address of the actual getter function.
- __ mov(thunk_last_arg, function_address);
- // Call the api function.
- __ mov(eax, Immediate(thunk_ref));
- __ call(eax);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function.
- __ call(function_address);
- __ bind(&end_profiler_check);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label prologue;
- // Load the value from ReturnValue
- __ mov(eax, return_value_operand);
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- __ bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ mov(__ StaticVariable(next_address), esi);
- __ sub(__ StaticVariable(level_address), Immediate(1));
- __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
- __ cmp(edi, __ StaticVariable(limit_address));
- __ j(not_equal, &delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- if (stack_space_operand != nullptr) {
- __ mov(edx, *stack_space_operand);
- }
- __ LeaveApiExitFrame();
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate);
- __ cmp(__ StaticVariable(scheduled_exception_address),
- Immediate(isolate->factory()->the_hole_value()));
- __ j(not_equal, &promote_scheduled_exception);
-
-#if DEBUG
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- __ JumpIfSmi(return_value, &ok, Label::kNear);
- __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- __ CmpInstanceType(map, LAST_NAME_TYPE);
- __ j(below_equal, &ok, Label::kNear);
-
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, &ok, Label::kNear);
-
- __ cmp(map, isolate->factory()->heap_number_map());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->undefined_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->true_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->false_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->null_value());
- __ j(equal, &ok, Label::kNear);
-
- __ Abort(AbortReason::kAPICallReturnedInvalidObject);
-
- __ bind(&ok);
-#endif
-
- if (stack_space_operand != nullptr) {
- DCHECK_EQ(0, stack_space);
- __ pop(ecx);
- __ add(esp, edx);
- __ jmp(ecx);
- } else {
- __ ret(stack_space * kPointerSize);
- }
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions();
- __ bind(&delete_allocated_handles);
- __ mov(__ StaticVariable(limit_address), edi);
- __ mov(edi, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ mov(eax, Immediate(delete_extensions));
- __ call(eax);
- __ mov(eax, edi);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
- // ----------- S t a t e -------------
- // -- eax : call_data
- // -- ecx : holder
- // -- edx : api_function_address
- // -- esi : context
- // --
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[argc * 4] : first argument
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Register call_data = eax;
- Register holder = ecx;
- Register api_function_address = edx;
- Register return_address = edi;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- __ pop(return_address);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ push(call_data);
-
- // return value
- __ PushRoot(RootIndex::kUndefinedValue);
- // return value default
- __ PushRoot(RootIndex::kUndefinedValue);
- // isolate
- __ push(Immediate(ExternalReference::isolate_address(isolate())));
- // holder
- __ push(holder);
-
- Register scratch = call_data;
-
- __ mov(scratch, esp);
-
- // push return address
- __ push(return_address);
-
- // API function gets reference to the v8::Arguments. If CPU profiler
- // is enabled wrapper function will be called and we need to pass
- // address of the callback as additional parameter, always allocate
- // space for it.
- const int kApiArgc = 1 + 1;
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
-
- // FunctionCallbackInfo::implicit_args_.
- __ mov(ApiParameterOperand(2), scratch);
- __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc()));
-
- // v8::InvocationCallback's argument.
- __ lea(scratch, ApiParameterOperand(2));
- __ mov(ApiParameterOperand(0), scratch);
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- Operand return_value_operand(ebp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- Operand* stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- ApiParameterOperand(1), stack_space,
- stack_space_operand, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- Assembler::SupportsRootRegisterScope supports_root_register(masm);
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = edi;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- __ pop(scratch); // Pop return address to extend the frame.
- __ push(receiver);
- __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
- __ PushRoot(RootIndex::kUndefinedValue); // ReturnValue
- // ReturnValue default value
- __ PushRoot(RootIndex::kUndefinedValue);
- __ push(Immediate(ExternalReference::isolate_address(isolate())));
- __ push(holder);
- __ push(Immediate(Smi::kZero)); // should_throw_on_error -> false
- __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch); // Restore return address.
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::PropertyCallbackInfo object, arguments for callback and
- // space for optional callback address parameter (in case CPU profiler is
- // active) in non-GCed stack space.
- const int kApiArgc = 3 + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array.
- __ lea(scratch, Operand(esp, 2 * kPointerSize));
-
- PrepareCallApiFunction(masm, kApiArgc);
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- Operand info_object = ApiParameterOperand(3);
- __ mov(info_object, scratch);
-
- // Name as handle.
- __ sub(scratch, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(0), scratch);
- // Arguments pointer.
- __ lea(scratch, info_object);
- __ mov(ApiParameterOperand(1), scratch);
- // Reserve space for optional callback address parameter.
- Operand thunk_last_arg = ApiParameterOperand(2);
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
- Register function_address = edx;
- __ mov(function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
- // +3 is to skip prolog, return address and name handle.
- Operand return_value_operand(
- ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
deleted file mode 100644
index 78790b75d0..0000000000
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/heap/factory-inl.h"
-#include "src/heap/heap.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-UnaryMathFunction CreateSqrtFunction() {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
- {
- __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
- __ sqrtsd(xmm0, xmm0);
- __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
- // Load result into floating point register as return value.
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-// Helper functions for CreateMemMoveFunction.
-#undef __
-#define __ ACCESS_MASM(masm)
-
-enum Direction { FORWARD, BACKWARD };
-enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
-
-// Expects registers:
-// esi - source, aligned if alignment == ALIGNED
-// edi - destination, always aligned
-// ecx - count (copy size in bytes)
-// edx - loop count (number of 64 byte chunks)
-void MemMoveEmitMainLoop(MacroAssembler* masm,
- Label* move_last_15,
- Direction direction,
- Alignment alignment) {
- Register src = esi;
- Register dst = edi;
- Register count = ecx;
- Register loop_count = edx;
- Label loop, move_last_31, move_last_63;
- __ cmp(loop_count, 0);
- __ j(equal, &move_last_63);
- __ bind(&loop);
- // Main loop. Copy in 64 byte chunks.
- if (direction == BACKWARD) __ sub(src, Immediate(0x40));
- __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
- __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
- __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
- __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
- if (direction == FORWARD) __ add(src, Immediate(0x40));
- if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ movdqa(Operand(dst, 0x20), xmm2);
- __ movdqa(Operand(dst, 0x30), xmm3);
- if (direction == FORWARD) __ add(dst, Immediate(0x40));
- __ dec(loop_count);
- __ j(not_zero, &loop);
- // At most 63 bytes left to copy.
- __ bind(&move_last_63);
- __ test(count, Immediate(0x20));
- __ j(zero, &move_last_31);
- if (direction == BACKWARD) __ sub(src, Immediate(0x20));
- __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
- __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
- if (direction == FORWARD) __ add(src, Immediate(0x20));
- if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- if (direction == FORWARD) __ add(dst, Immediate(0x20));
- // At most 31 bytes left to copy.
- __ bind(&move_last_31);
- __ test(count, Immediate(0x10));
- __ j(zero, move_last_15);
- if (direction == BACKWARD) __ sub(src, Immediate(0x10));
- __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
- if (direction == FORWARD) __ add(src, Immediate(0x10));
- if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- if (direction == FORWARD) __ add(dst, Immediate(0x10));
-}
-
-
-void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
-}
-
-
-#undef __
-#define __ masm.
-
-
-class LabelConverter {
- public:
- explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
- int32_t address(Label* l) const {
- return reinterpret_cast<int32_t>(buffer_) + l->pos();
- }
- private:
- byte* buffer_;
-};
-
-MemMoveFunction CreateMemMoveFunction() {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
- LabelConverter conv(buffer);
-
- // Generated code is put into a fixed, unmovable buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // 32-bit C declaration function calls pass arguments on stack.
-
- // Stack layout:
- // esp[12]: Third argument, size.
- // esp[8]: Second argument, source pointer.
- // esp[4]: First argument, destination pointer.
- // esp[0]: return address
-
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
-
- // When copying up to this many bytes, use special "small" handlers.
- const size_t kSmallCopySize = 8;
- // When copying up to this many bytes, use special "medium" handlers.
- const size_t kMediumCopySize = 63;
- // When non-overlapping region of src and dst is less than this,
- // use a more careful implementation (slightly slower).
- const size_t kMinMoveDistance = 16;
- // Note that these values are dictated by the implementation below,
- // do not just change them and hope things will work!
-
- int stack_offset = 0; // Update if we change the stack height.
-
- Label backward, backward_much_overlap;
- Label forward_much_overlap, small_size, medium_size, pop_and_return;
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- Register loop_count = edx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- __ cmp(dst, src);
- __ j(equal, &pop_and_return);
-
- __ prefetch(Operand(src, 0), 1);
- __ cmp(count, kSmallCopySize);
- __ j(below_equal, &small_size);
- __ cmp(count, kMediumCopySize);
- __ j(below_equal, &medium_size);
- __ cmp(dst, src);
- __ j(above, &backward);
-
- {
- // |dst| is a lower address than |src|. Copy front-to-back.
- Label unaligned_source, move_last_15, skip_last_move;
- __ mov(eax, src);
- __ sub(eax, dst);
- __ cmp(eax, kMinMoveDistance);
- __ j(below, &forward_much_overlap);
- // Copy first 16 bytes.
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- // Determine distance to alignment: 16 - (dst & 0xF).
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // dst is now aligned. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- // Check if src is also aligned.
- __ test(src, Immediate(0xF));
- __ j(not_zero, &unaligned_source);
- // Copy loop for aligned source and destination.
- MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ bind(&move_last_15);
- __ and_(count, 0xF);
- __ j(zero, &skip_last_move, Label::kNear);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
- __ bind(&skip_last_move);
- MemMoveEmitPopAndReturn(&masm);
-
- // Copy loop for unaligned source and aligned destination.
- __ bind(&unaligned_source);
- MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
- __ jmp(&move_last_15);
-
- // Less than kMinMoveDistance offset between dst and src.
- Label loop_until_aligned, last_15_much_overlap;
- __ bind(&loop_until_aligned);
- __ mov_b(eax, Operand(src, 0));
- __ inc(src);
- __ mov_b(Operand(dst, 0), eax);
- __ inc(dst);
- __ dec(count);
- __ bind(&forward_much_overlap); // Entry point into this block.
- __ test(dst, Immediate(0xF));
- __ j(not_zero, &loop_until_aligned);
- // dst is now aligned, src can't be. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
- FORWARD, MOVE_UNALIGNED);
- __ bind(&last_15_much_overlap);
- __ and_(count, 0xF);
- __ j(zero, &pop_and_return);
- __ cmp(count, kSmallCopySize);
- __ j(below_equal, &small_size);
- __ jmp(&medium_size);
- }
-
- {
- // |dst| is a higher address than |src|. Copy backwards.
- Label unaligned_source, move_first_15, skip_last_move;
- __ bind(&backward);
- // |dst| and |src| always point to the end of what's left to copy.
- __ add(dst, count);
- __ add(src, count);
- __ mov(eax, dst);
- __ sub(eax, src);
- __ cmp(eax, kMinMoveDistance);
- __ j(below, &backward_much_overlap);
- // Copy last 16 bytes.
- __ movdqu(xmm0, Operand(src, -0x10));
- __ movdqu(Operand(dst, -0x10), xmm0);
- // Find distance to alignment: dst & 0xF
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ sub(dst, edx);
- __ sub(src, edx);
- __ sub(count, edx);
- // dst is now aligned. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- // Check if src is also aligned.
- __ test(src, Immediate(0xF));
- __ j(not_zero, &unaligned_source);
- // Copy loop for aligned source and destination.
- MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
- // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
- __ bind(&move_first_15);
- __ and_(count, 0xF);
- __ j(zero, &skip_last_move, Label::kNear);
- __ sub(src, count);
- __ sub(dst, count);
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ bind(&skip_last_move);
- MemMoveEmitPopAndReturn(&masm);
-
- // Copy loop for unaligned source and aligned destination.
- __ bind(&unaligned_source);
- MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
- __ jmp(&move_first_15);
-
- // Less than kMinMoveDistance offset between dst and src.
- Label loop_until_aligned, first_15_much_overlap;
- __ bind(&loop_until_aligned);
- __ dec(src);
- __ dec(dst);
- __ mov_b(eax, Operand(src, 0));
- __ mov_b(Operand(dst, 0), eax);
- __ dec(count);
- __ bind(&backward_much_overlap); // Entry point into this block.
- __ test(dst, Immediate(0xF));
- __ j(not_zero, &loop_until_aligned);
- // dst is now aligned, src can't be. Main copy loop.
- __ mov(loop_count, count);
- __ shr(loop_count, 6);
- MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
- BACKWARD, MOVE_UNALIGNED);
- __ bind(&first_15_much_overlap);
- __ and_(count, 0xF);
- __ j(zero, &pop_and_return);
- // Small/medium handlers expect dst/src to point to the beginning.
- __ sub(dst, count);
- __ sub(src, count);
- __ cmp(count, kSmallCopySize);
- __ j(below_equal, &small_size);
- __ jmp(&medium_size);
- }
- {
- // Special handlers for 9 <= copy_size < 64. No assumptions about
- // alignment or move distance, so all reads must be unaligned and
- // must happen before any writes.
- Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
-
- __ bind(&f9_16);
- __ movsd(xmm0, Operand(src, 0));
- __ movsd(xmm1, Operand(src, count, times_1, -8));
- __ movsd(Operand(dst, 0), xmm0);
- __ movsd(Operand(dst, count, times_1, -8), xmm1);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f17_32);
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f33_48);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, 0x10), xmm1);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f49_63);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ movdqu(xmm2, Operand(src, 0x20));
- __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, 0x00), xmm0);
- __ movdqu(Operand(dst, 0x10), xmm1);
- __ movdqu(Operand(dst, 0x20), xmm2);
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&medium_handlers);
- __ dd(conv.address(&f9_16));
- __ dd(conv.address(&f17_32));
- __ dd(conv.address(&f33_48));
- __ dd(conv.address(&f49_63));
-
- __ bind(&medium_size); // Entry point into this block.
- __ mov(eax, count);
- __ dec(eax);
- __ shr(eax, 4);
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(eax, 3);
- __ j(below_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
- __ jmp(eax);
- }
- {
- // Specialized copiers for copy_size <= 8 bytes.
- Label small_handlers, f0, f1, f2, f3, f4, f5_8;
- __ bind(&f0);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f1);
- __ mov_b(eax, Operand(src, 0));
- __ mov_b(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f2);
- __ mov_w(eax, Operand(src, 0));
- __ mov_w(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f3);
- __ mov_w(eax, Operand(src, 0));
- __ mov_b(edx, Operand(src, 2));
- __ mov_w(Operand(dst, 0), eax);
- __ mov_b(Operand(dst, 2), edx);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f4);
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&f5_8);
- __ mov(eax, Operand(src, 0));
- __ mov(edx, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, 0), eax);
- __ mov(Operand(dst, count, times_1, -4), edx);
- MemMoveEmitPopAndReturn(&masm);
-
- __ bind(&small_handlers);
- __ dd(conv.address(&f0));
- __ dd(conv.address(&f1));
- __ dd(conv.address(&f2));
- __ dd(conv.address(&f3));
- __ dd(conv.address(&f4));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
- __ dd(conv.address(&f5_8));
-
- __ bind(&small_size); // Entry point into this block.
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(count, 8);
- __ j(below_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
- __ jmp(eax);
- }
-
- __ bind(&pop_and_return);
- MemMoveEmitPopAndReturn(&masm);
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- // TODO(jkummerow): It would be nice to register this code creation event
- // with the PROFILE / GDBJIT system.
- return FUNCTION_CAST<MemMoveFunction>(buffer);
-}
-
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/constants-ia32.h b/deps/v8/src/ia32/constants-ia32.h
index ed1104eef9..38ad1280f1 100644
--- a/deps/v8/src/ia32/constants-ia32.h
+++ b/deps/v8/src/ia32/constants-ia32.h
@@ -15,10 +15,6 @@ namespace internal {
// currently no root register is present.
constexpr int kRootRegisterBias = 0;
-// Used temporarily to track clobbering of the root register.
-// TODO(v8:6666): Remove this once use the root register.
-constexpr size_t kRootRegisterSentinel = 0xcafeca11;
-
// TODO(sigurds): Change this value once we use relative jumps.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
} // namespace internal
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 8de6d1eeb1..73b71e8dde 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -10,8 +10,7 @@
#if V8_TARGET_ARCH_IA32
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 2c68241fc2..01da1c61b8 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -4,23 +4,21 @@
#if V8_TARGET_ARCH_IA32
-#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 10;
+#define __ masm->
-#define __ masm()->
-
-void Deoptimizer::TableEntryGenerator::Generate() {
- Assembler::SupportsRootRegisterScope supports_root_register(masm());
-
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -48,19 +46,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ pushad();
ExternalReference c_entry_fp_address =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- __ mov(masm()->StaticVariable(c_entry_fp_address), ebp);
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
+ __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ mov(esi, Operand(esp, kSavedRegistersAreaSize));
+ // The bailout id is passed in ebx by the caller.
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ sub(edx, ebp);
__ neg(edx);
@@ -75,14 +72,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize),
- Immediate(static_cast<int>(deopt_kind())));
- __ mov(Operand(esp, 2 * kPointerSize), esi); // Bailout id.
+ Immediate(static_cast<int>(deopt_kind)));
+ __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
+ Immediate(ExternalReference::isolate_address(isolate)));
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -118,8 +115,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
- // Remove the bailout id, return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ // Remove the return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 1 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
@@ -145,7 +142,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, esi);
__ mov(Operand(esp, 0 * kPointerSize), eax);
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(eax);
@@ -197,27 +194,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Restore the registers from the stack.
- Assembler::AllowExplicitEbxAccessScope restoring_spilled_value(masm());
__ popad();
+ __ InitializeRootRegister();
+
// Return to the continuation point.
__ ret(0);
}
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index 5683fdd9e3..20f9450e7d 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -13,13 +13,23 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset = -6 * kPointerSize;
- static constexpr int kNewTargetArgOffset = +2 * kPointerSize;
- static constexpr int kFunctionArgOffset = +3 * kPointerSize;
- static constexpr int kReceiverArgOffset = +4 * kPointerSize;
- static constexpr int kArgcOffset = +5 * kPointerSize;
- static constexpr int kArgvOffset = +6 * kPointerSize;
+ // EntryFrame is used by JSEntry, JSConstructEntry and JSRunMicrotasksEntry.
+ // All of them take |root_register_value| as the first parameter.
+ static constexpr int kRootRegisterValueOffset = +2 * kPointerSize;
+
+ // Rest of parameters passed to JSEntry and JSConstructEntry.
+ static constexpr int kNewTargetArgOffset = +3 * kPointerSize;
+ static constexpr int kFunctionArgOffset = +4 * kPointerSize;
+ static constexpr int kReceiverArgOffset = +5 * kPointerSize;
+ static constexpr int kArgcOffset = +6 * kPointerSize;
+ static constexpr int kArgvOffset = +7 * kPointerSize;
+
+ // Rest of parameters passed to JSRunMicrotasksEntry.
+ static constexpr int kMicrotaskQueueArgOffset = +3 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -40,7 +50,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 5;
+ static constexpr int kNumberOfSavedGpParamRegs = 4;
static constexpr int kNumberOfSavedFpParamRegs = 6;
// FP-relative.
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index e2c04f7525..48a279b6e4 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -23,7 +25,7 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ecx, edx, edi,
+ static const Register default_stub_registers[] = {ecx, edx, esi, edi,
kReturnRegister0};
data->RestrictAllocatableRegisters(default_stub_registers,
@@ -71,12 +73,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@@ -210,10 +206,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- eax, // call_data
- ecx, // holder
- edx, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ edx, // kApiFunctionAddress
+ ecx, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -262,6 +257,11 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 82cea88ac4..2bde18e0fd 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -10,17 +10,22 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
+#include "src/ia32/assembler-ia32-inl.h"
+#include "src/macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
-#include "src/ia32/assembler-ia32-inl.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
#include "src/ia32/macro-assembler-ia32.h"
+#endif
namespace v8 {
namespace internal {
@@ -28,61 +33,56 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-
-#ifdef V8_EMBEDDED_BUILTINS
- // Fake it as long as we use indirections through an embedded external
- // reference. This will let us implement indirections without a real
- // root register.
- // TODO(jgruber, v8:6666): Remove once a real root register exists.
- if (FLAG_embedded_builtins) set_root_array_available(true);
-#endif // V8_EMBEDDED_BUILTINS
+void TurboAssembler::InitializeRootRegister() {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ Move(kRootRegister, Immediate(isolate_root));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
- // TODO(jgruber, v8:6666): Support loads through the root register once it
- // exists.
- if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- Handle<Object> object = isolate()->heap()->root_handle(index);
+ if (root_array_available()) {
+ mov(destination,
+ Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ return;
+ }
+
+ if (RootsTable::IsImmortalImmovable(index)) {
+ Handle<Object> object = isolate()->root_handle(index);
if (object->IsSmi()) {
mov(destination, Immediate(Smi::cast(*object)));
return;
- } else if (!options().isolate_independent_code) {
+ } else {
DCHECK(object->IsHeapObject());
mov(destination, Handle<HeapObject>::cast(object));
return;
}
}
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(destination, Immediate(static_cast<int>(index)));
- mov(destination,
- StaticArray(destination, times_pointer_size, roots_array_start));
+
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ lea(destination,
+ Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
+ mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
}
-void MacroAssembler::CompareRoot(Register with, Register scratch,
+void TurboAssembler::CompareRoot(Register with, Register scratch,
RootIndex index) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(static_cast<int>(index)));
- cmp(with, StaticArray(scratch, times_pointer_size, roots_array_start));
+ if (root_array_available()) {
+ CompareRoot(with, index);
+ } else {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ lea(scratch,
+ Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
+ cmp(with, Operand(scratch, RootRegisterOffsetForRootIndex(index)));
+ }
}
-void MacroAssembler::CompareRoot(Register with, RootIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> object = isolate()->heap()->root_handle(index);
+void TurboAssembler::CompareRoot(Register with, RootIndex index) {
+ if (root_array_available()) {
+ cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ return;
+ }
+
+ DCHECK(RootsTable::IsImmortalImmovable(index));
+ Handle<Object> object = isolate()->root_handle(index);
if (object->IsHeapObject()) {
cmp(with, Handle<HeapObject>::cast(object));
} else {
@@ -90,19 +90,38 @@ void MacroAssembler::CompareRoot(Register with, RootIndex index) {
}
}
-void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> object = isolate()->heap()->root_handle(index);
- if (object->IsHeapObject()) {
- cmp(with, Handle<HeapObject>::cast(object));
+void TurboAssembler::CompareStackLimit(Register with) {
+ if (root_array_available()) {
+ CompareRoot(with, RootIndex::kStackLimit);
} else {
- cmp(with, Immediate(Smi::cast(*object)));
+ DCHECK(!options().isolate_independent_code);
+ ExternalReference ref =
+ ExternalReference::address_of_stack_limit(isolate());
+ cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
+ }
+}
+
+void TurboAssembler::CompareRealStackLimit(Register with) {
+ if (root_array_available()) {
+ CompareRoot(with, RootIndex::kRealStackLimit);
+ } else {
+ DCHECK(!options().isolate_independent_code);
+ ExternalReference ref =
+ ExternalReference::address_of_real_stack_limit(isolate());
+ cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
}
}
void MacroAssembler::PushRoot(RootIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> object = isolate()->heap()->root_handle(index);
+ if (root_array_available()) {
+ DCHECK(RootsTable::IsImmortalImmovable(index));
+ push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ return;
+ }
+
+ // TODO(v8:6666): Add a scratch register or remove all uses.
+ DCHECK(RootsTable::IsImmortalImmovable(index));
+ Handle<Object> object = isolate()->root_handle(index);
if (object->IsHeapObject()) {
Push(Handle<HeapObject>::cast(object));
} else {
@@ -110,13 +129,67 @@ void MacroAssembler::PushRoot(RootIndex index) {
}
}
+Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch) {
+ // TODO(jgruber): Add support for enable_root_array_delta_access.
+ if (root_array_available() && options().isolate_independent_code) {
+ if (IsAddressableThroughRootRegister(isolate(), reference)) {
+ // Some external references can be efficiently loaded as an offset from
+ // kRootRegister.
+ intptr_t offset =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ return Operand(kRootRegister, offset);
+ } else {
+ // Otherwise, do a memory load from the external reference table.
+ mov(scratch, Operand(kRootRegister,
+ RootRegisterOffsetForExternalReferenceTableEntry(
+ isolate(), reference)));
+ return Operand(scratch, 0);
+ }
+ }
+ Move(scratch, Immediate(reference));
+ return Operand(scratch, 0);
+}
+
+// TODO(v8:6666): If possible, refactor into a platform-independent function in
+// TurboAssembler.
+Operand TurboAssembler::ExternalReferenceAddressAsOperand(
+ ExternalReference reference) {
+ DCHECK(FLAG_embedded_builtins);
+ DCHECK(root_array_available());
+ DCHECK(options().isolate_independent_code);
+ return Operand(
+ kRootRegister,
+ RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
+}
+
+// TODO(v8:6666): If possible, refactor into a platform-independent function in
+// TurboAssembler.
+Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
+ DCHECK(FLAG_embedded_builtins);
+ DCHECK(root_array_available());
+
+ int builtin_index;
+ RootIndex root_index;
+ if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
+ return Operand(kRootRegister, RootRegisterOffsetForRootIndex(root_index));
+ } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
+ return Operand(kRootRegister,
+ RootRegisterOffsetForBuiltinIndex(builtin_index));
+ } else if (object.is_identical_to(code_object_) &&
+ Builtins::IsBuiltinId(maybe_builtin_index_)) {
+ return Operand(kRootRegister,
+ RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
+ } else {
+ // Objects in the constants table need an additional indirection, which
+ // cannot be represented as a single Operand.
+ UNREACHABLE();
+ }
+}
+
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(!is_ebx_addressable_);
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
- // TODO(jgruber): LoadRoot should be a register-relative load once we have
- // the kRootRegister.
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
FieldOperand(destination,
@@ -125,44 +198,30 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
- DCHECK(!is_ebx_addressable_);
DCHECK(is_int32(offset));
- // TODO(jgruber): Register-relative load once kRootRegister exists.
- mov(destination, Immediate(ExternalReference::roots_array_start(isolate())));
- if (offset != 0) {
- add(destination, Immediate(offset));
+ DCHECK(root_array_available());
+ if (offset == 0) {
+ mov(destination, kRootRegister);
+ } else {
+ lea(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
}
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
- DCHECK(!is_ebx_addressable_);
- // TODO(jgruber): Register-relative load once kRootRegister exists.
- LoadRootRegisterOffset(destination, offset);
- mov(destination, Operand(destination, 0));
+ DCHECK(root_array_available());
+ mov(destination, Operand(kRootRegister, offset));
}
void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(destination, source);
- return;
- }
+ // TODO(jgruber): Add support for enable_root_array_delta_access.
+ if (root_array_available() && options().isolate_independent_code) {
+ IndirectLoadExternalReference(destination, source);
+ return;
}
mov(destination, Immediate(source));
}
-Operand TurboAssembler::StaticVariable(const ExternalReference& ext) {
- // TODO(jgruber,v8:6666): Root-relative operand once kRootRegister exists.
- return Operand(ext.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-Operand TurboAssembler::StaticArray(Register index, ScaleFactor scale,
- const ExternalReference& ext) {
- // TODO(jgruber,v8:6666): Root-relative operand once kRootRegister exists.
- return Operand(index, scale, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
static constexpr Register saved_regs[] = {eax, ecx, edx};
static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
@@ -312,25 +371,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
push(object);
push(address);
@@ -340,7 +417,12 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ // Use {wasm_call} for direct Wasm call within a module.
+ wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -393,7 +475,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, value);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
@@ -405,12 +487,15 @@ void MacroAssembler::RecordWrite(Register object, Register address,
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
+ Label dont_drop;
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
- mov(eax, StaticVariable(restart_fp));
+ mov(eax, ExternalReferenceAsOperand(restart_fp, eax));
test(eax, eax);
- j(not_zero, BUILTIN_CODE(isolate(), FrameDropperTrampoline),
- RelocInfo::CODE_TARGET);
+ j(zero, &dont_drop, Label::kNear);
+
+ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
+ bind(&dont_drop);
}
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
@@ -456,13 +541,13 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
bind(&done);
}
-void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src) {
+void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
- addsd(dst, StaticVariable(uint32_bias));
+ addsd(dst, ExternalReferenceAsOperand(uint32_bias, scratch));
bind(&done);
}
@@ -612,6 +697,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
j(equal, &do_check, Label::kNear);
+ // Check if JSAsyncFunctionObject.
+ CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
+ j(equal, &do_check, Label::kNear);
+
// Check if JSAsyncGeneratorObject
CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -622,14 +711,15 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
}
-void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
- cmp(object, isolate()->factory()->undefined_value());
+ CompareRoot(object, scratch, RootIndex::kUndefinedValue);
j(equal, &done_checking);
- cmp(FieldOperand(object, 0),
- Immediate(isolate()->factory()->allocation_site_map()));
+ LoadRoot(scratch, RootIndex::kAllocationSiteWithWeakNextMap);
+ cmp(FieldOperand(object, 0), scratch);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -695,24 +785,8 @@ void TurboAssembler::AllocateStackFrame(Register bytes_scratch) {
}
#endif
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(ebp);
- Move(ebp, esp);
- Push(context);
- Push(target);
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(target);
- Pop(context);
- leave();
-}
-
-void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
+void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
+ Register scratch) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -728,7 +802,8 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
- push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
+ Move(scratch, CodeObject());
+ push(scratch); // Accessed from ExitFrame::code_slot.
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
STATIC_ASSERT(esi == kContextRegister);
@@ -740,9 +815,11 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
ExternalReference c_function_address =
ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
- mov(StaticVariable(c_entry_fp_address), ebp);
- mov(StaticVariable(context_address), esi);
- mov(StaticVariable(c_function_address), edx);
+
+ DCHECK(!AreAliased(scratch, ebp, esi, edx));
+ mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), ebp);
+ mov(ExternalReferenceAsOperand(context_address, scratch), esi);
+ mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
}
@@ -773,7 +850,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
StackFrame::Type frame_type) {
- EnterExitFramePrologue(frame_type);
+ EnterExitFramePrologue(frame_type, edi);
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
@@ -784,9 +861,8 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
EnterExitFrameEpilogue(argc, save_doubles);
}
-
-void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue(StackFrame::EXIT);
+void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
+ EnterExitFramePrologue(StackFrame::EXIT, scratch);
EnterExitFrameEpilogue(argc, false);
}
@@ -820,18 +896,21 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
}
void MacroAssembler::LeaveExitFrameEpilogue() {
+ // Clear the top frame.
+ ExternalReference c_entry_fp_address =
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
+ mov(ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
+
// Restore current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- mov(esi, StaticVariable(context_address));
+ mov(esi, ExternalReferenceAsOperand(context_address, esi));
#ifdef DEBUG
- mov(StaticVariable(context_address), Immediate(Context::kInvalidContext));
+ push(eax);
+ mov(ExternalReferenceAsOperand(context_address, eax),
+ Immediate(Context::kInvalidContext));
+ pop(eax);
#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- mov(StaticVariable(c_entry_fp_address), Immediate(0));
}
void MacroAssembler::LeaveApiExitFrame() {
@@ -841,8 +920,7 @@ void MacroAssembler::LeaveApiExitFrame() {
LeaveExitFrameEpilogue();
}
-
-void MacroAssembler::PushStackHandler() {
+void MacroAssembler::PushStackHandler(Register scratch) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -852,40 +930,20 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- push(StaticVariable(handler_address));
+ push(ExternalReferenceAsOperand(handler_address, scratch));
// Set this new handler as the current one.
- mov(StaticVariable(handler_address), esp);
+ mov(ExternalReferenceAsOperand(handler_address, scratch), esp);
}
-
-void MacroAssembler::PopStackHandler() {
+void MacroAssembler::PopStackHandler(Register scratch) {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- pop(StaticVariable(handler_address));
+ pop(ExternalReferenceAsOperand(handler_address, scratch));
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles) {
@@ -916,8 +974,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
DCHECK(!AreAliased(centry, kRuntimeCallArgCountRegister,
kRuntimeCallFunctionRegister));
- add(centry, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
@@ -1102,7 +1159,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
- cmpb(StaticVariable(debug_hook_active), Immediate(0));
+ push(eax);
+ cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
+ pop(eax);
j(equal, &skip_hook);
{
@@ -1161,7 +1220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
- mov(edx, isolate()->factory()->undefined_value());
+ Move(edx, isolate()->factory()->undefined_value());
}
Label done;
@@ -1174,12 +1233,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
- add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- call(ecx);
+ CallCodeObject(ecx);
} else {
DCHECK(flag == JUMP_FUNCTION);
- jmp(ecx);
+ JumpCodeObject(ecx);
}
bind(&done);
}
@@ -1234,6 +1292,18 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
+void TurboAssembler::Push(Immediate value) {
+ if (root_array_available() && options().isolate_independent_code) {
+ if (value.is_embedded_object()) {
+ Push(HeapObjectAsOperand(value.embedded_object()));
+ return;
+ } else if (value.is_external_reference()) {
+ Push(ExternalReferenceAddressAsOperand(value.external_reference()));
+ return;
+ }
+ }
+ push(value);
+}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
@@ -1257,10 +1327,27 @@ void TurboAssembler::Move(Register dst, const Immediate& src) {
}
}
-void TurboAssembler::Move(Operand dst, const Immediate& src) { mov(dst, src); }
+void TurboAssembler::Move(Operand dst, const Immediate& src) {
+ // Since there's no scratch register available, take a detour through the
+ // stack.
+ if (root_array_available() && options().isolate_independent_code) {
+ if (src.is_embedded_object() || src.is_external_reference() ||
+ src.is_heap_object_request()) {
+ Push(src);
+ pop(dst);
+ return;
+ }
+ }
+
+ if (src.is_embedded_object()) {
+ mov(dst, src.embedded_object());
+ } else {
+ mov(dst, src);
+ }
+}
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
- if (root_array_available_ && options().isolate_independent_code) {
+ if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src);
return;
}
@@ -1579,16 +1666,18 @@ void TurboAssembler::Popcnt(Register dst, Operand src) {
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
- cmp(in_out, Immediate(kClearedWeakHeapObject));
+ cmp(in_out, Immediate(kClearedWeakHeapObjectLower32));
j(equal, target_if_cleared);
and_(in_out, Immediate(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = StaticVariable(ExternalReference::Create(counter));
+ Operand operand =
+ ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
if (value == 1) {
inc(operand);
} else {
@@ -1597,11 +1686,12 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
}
}
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = StaticVariable(ExternalReference::Create(counter));
+ Operand operand =
+ ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
if (value == 1) {
dec(operand);
} else {
@@ -1696,7 +1786,7 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
- mov(eax, Immediate(function));
+ Move(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
@@ -1708,7 +1798,39 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CheckStackAlignment();
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ // Get the current PC via call, pop. This gets the return address pushed to
+ // the stack by call.
+ Label get_pc;
+ call(&get_pc);
+ bind(&get_pc);
+ // Find two caller-saved scratch registers.
+ Register scratch1 = eax;
+ Register scratch2 = ecx;
+ if (function == eax) scratch1 = edx;
+ if (function == ecx) scratch2 = edx;
+ pop(scratch1);
+ mov(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_pc_address(isolate()),
+ scratch2),
+ scratch1);
+ mov(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()),
+ scratch2),
+ ebp);
+ }
+
call(function);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ mov(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()), edx),
+ Immediate(0));
+ }
+
if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
@@ -1717,59 +1839,105 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
- if (FLAG_embedded_builtins) {
- // TODO(jgruber): Pc-relative builtin-to-builtin calls.
- if (root_array_available_ && options().isolate_independent_code) {
- // TODO(jgruber): There's no scratch register on ia32. Any call that
- // requires loading a code object from the builtins constant table must:
- // 1) spill two scratch registers, 2) load the target into scratch1, 3)
- // store the target into a virtual register on the isolate using scratch2,
- // 4) restore both scratch registers, and finally 5) call through the
- // virtual register. All affected call sites should vanish once all
- // builtins are embedded on ia32.
- UNREACHABLE();
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- call(entry, RelocInfo::OFF_HEAP_TARGET);
- return;
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code_object));
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ call(entry, RelocInfo::OFF_HEAP_TARGET);
+ return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 4);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below (we use times_2 instead
+ // of times_4 since smis are already shifted by one).
+ mov(builtin_pointer, Operand(kRootRegister, builtin_pointer, times_2,
+ IsolateData::builtin_entry_table_offset()));
+ call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+ cmp(FieldOperand(code_object, Code::kBuiltinIndexOffset),
+ Immediate(Builtins::kNoBuiltinId));
+ j(not_equal, &if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ Move(destination, code_object);
+ add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_builtin);
+ mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
+ mov(destination, Operand(kRootRegister, destination, times_pointer_size,
+ IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Move(destination, code_object);
+ add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ jmp(code_object);
+}
+
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- if (FLAG_embedded_builtins) {
- // TODO(jgruber): Pc-relative builtin-to-builtin calls.
- if (root_array_available_ && options().isolate_independent_code) {
- // TODO(jgruber): There's no scratch register on ia32. Any call that
- // requires loading a code object from the builtins constant table must:
- // 1) spill two scratch registers, 2) load the target into scratch1, 3)
- // store the target into a virtual register on the isolate using scratch2,
- // 4) restore both scratch registers, and finally 5) call through the
- // virtual register. All affected call sites should vanish once all
- // builtins are embedded on ia32.
- UNREACHABLE();
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- jmp(entry, RelocInfo::OFF_HEAP_TARGET);
- return;
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code_object));
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ jmp(entry, RelocInfo::OFF_HEAP_TARGET);
+ return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -1862,8 +2030,11 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Immediate(-1));
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+ // Save the deopt id in ebx (we don't need the roots array from now on).
+ mov(ebx, deopt_id);
+ call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index bdb04fb222..d26152663a 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
@@ -9,46 +13,10 @@
#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/ia32/assembler-ia32.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = eax;
-constexpr Register kReturnRegister1 = edx;
-constexpr Register kReturnRegister2 = edi;
-constexpr Register kJSFunctionRegister = edi;
-constexpr Register kContextRegister = esi;
-constexpr Register kAllocateSizeRegister = edx;
-constexpr Register kSpeculationPoisonRegister = ebx;
-constexpr Register kInterpreterAccumulatorRegister = eax;
-constexpr Register kInterpreterBytecodeOffsetRegister = edx;
-constexpr Register kInterpreterBytecodeArrayRegister = edi;
-constexpr Register kInterpreterDispatchTableRegister = esi;
-
-constexpr Register kJavaScriptCallArgCountRegister = eax;
-constexpr Register kJavaScriptCallCodeStartRegister = ecx;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = edx;
-
-// The ExtraArg1Register not part of the real JS calling convention and is
-// mostly there to simplify consistent interface descriptor definitions across
-// platforms. Note that on ia32 it aliases kJavaScriptCallCodeStartRegister.
-constexpr Register kJavaScriptCallExtraArg1Register = ecx;
-
-// The off-heap trampoline does not need a register on ia32 (it uses a
-// pc-relative call instead).
-constexpr Register kOffHeapTrampolineRegister = no_reg;
-
-constexpr Register kRuntimeCallFunctionRegister = edx;
-constexpr Register kRuntimeCallArgCountRegister = eax;
-constexpr Register kRuntimeCallArgvRegister = ecx;
-constexpr Register kWasmInstanceRegister = esi;
-
-// TODO(v8:6666): Implement full support.
-constexpr Register kRootRegister = ebx;
-
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -58,14 +26,9 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -109,28 +72,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Check that the stack is aligned.
void CheckStackAlignment();
- void InitializeRootRegister() {
- // For now, only check sentinel value for root register.
- // TODO(jgruber,v8:6666): Implement root register.
- if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
- mov(kRootRegister, kRootRegisterSentinel);
- }
- }
-
- void VerifyRootRegister() {
- if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
- Assembler::AllowExplicitEbxAccessScope read_only_access(this);
- Label root_register_ok;
- cmp(kRootRegister, kRootRegisterSentinel);
- j(equal, &root_register_ok);
- int3();
- bind(&root_register_ok);
- }
- }
-
// Move a constant into a destination using the most efficient encoding.
void Move(Register dst, const Immediate& src);
- void Move(Register dst, Smi* src) { Move(dst, Immediate(src)); }
+ void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
void Move(Register dst, Register src);
void Move(Operand dst, const Immediate& src);
@@ -145,21 +89,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
- void RetpolineJump(Register reg);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- call(target, rmode);
- }
+ void RetpolineJump(Register reg);
- inline bool AllowThisStubCall(CodeStub* stub);
- void CallStubDelayed(CodeStub* stub);
+ void CallForDeoptimization(Address target, int deopt_id);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
@@ -242,14 +185,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret();
- void LoadRoot(Register destination, RootIndex index) override;
+ // Root register utility functions.
- void MoveForRootRegisterRefactoring(Register dst, Register src) {
- // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
- // kRootRegister ebx, most call sites of this wrapper function can probably
- // be removed.
- Move(dst, src);
- }
+ void InitializeRootRegister();
+
+ void LoadRoot(Register destination, RootIndex index) override;
// Indirect root-relative loads.
void LoadFromConstantsTable(Register destination,
@@ -257,24 +197,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
- void LoadAddress(Register destination, ExternalReference source);
+ // Operand pointing to an external reference.
+ // May emit code to set up the scratch register. The operand is
+ // only guaranteed to be correct as long as the scratch register
+ // isn't changed.
+ // If the operand is used more than once, use a scratch register
+ // that is guaranteed not to be clobbered.
+ Operand ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch);
+ Operand ExternalReferenceAddressAsOperand(ExternalReference reference);
+ Operand HeapObjectAsOperand(Handle<HeapObject> object);
- void PushRootRegister() {
- // Check that a NoRootArrayScope exists.
- CHECK(!root_array_available());
- push(kRootRegister);
- }
- void PopRootRegister() {
- // Check that a NoRootArrayScope exists.
- CHECK(!root_array_available());
- pop(kRootRegister);
- }
+ void LoadAddress(Register destination, ExternalReference source);
- // Wrapper functions to ensure external reference operands produce
- // isolate-independent code if needed.
- Operand StaticVariable(const ExternalReference& ext);
- Operand StaticArray(Register index, ScaleFactor scale,
- const ExternalReference& ext);
+ void CompareStackLimit(Register with);
+ void CompareRealStackLimit(Register with);
+ void CompareRoot(Register with, RootIndex index);
+ void CompareRoot(Register with, Register scratch, RootIndex index);
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
@@ -427,8 +366,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Cvttss2ui(dst, Operand(src), tmp);
}
void Cvttss2ui(Register dst, Operand src, XMMRegister tmp);
- void Cvtui2sd(XMMRegister dst, Register src) { Cvtui2sd(dst, Operand(src)); }
- void Cvtui2sd(XMMRegister dst, Operand src);
+ void Cvtui2sd(XMMRegister dst, Register src, Register scratch) {
+ Cvtui2sd(dst, Operand(src), scratch);
+ }
+ void Cvtui2sd(XMMRegister dst, Operand src, Register scratch);
void Cvttsd2ui(Register dst, XMMRegister src, XMMRegister tmp) {
Cvttsd2ui(dst, Operand(src), tmp);
}
@@ -436,9 +377,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
- void Push(Immediate value) { push(value); }
+ void Push(Immediate value);
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Immediate(smi)); }
+ void Push(Smi smi) { Push(Immediate(smi)); }
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
@@ -446,6 +387,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -474,22 +418,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
+ // TODO(860429): Remove remaining poisoning infrastructure on ia32.
+ void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int32_t x) {
@@ -501,12 +444,6 @@ class MacroAssembler : public TurboAssembler {
}
void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
- // Operations on roots in the root-array.
- void CompareRoot(Register with, Register scratch, RootIndex index);
- // These methods can only be used with constant roots (i.e. non-writable
- // and not in new space).
- void CompareRoot(Register with, RootIndex index);
- void CompareRoot(Operand with, RootIndex index);
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -515,11 +452,6 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(Operand with, RootIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(equal, if_equal, if_equal_distance);
- }
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal,
@@ -527,11 +459,6 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(Operand with, RootIndex index, Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(not_equal, if_not_equal, if_not_equal_distance);
- }
// ---------------------------------------------------------------------------
// GC Support
@@ -565,7 +492,7 @@ class MacroAssembler : public TurboAssembler {
// esi.
void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
- void EnterApiExitFrame(int argc);
+ void EnterApiExitFrame(int argc, Register scratch);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
@@ -678,26 +605,20 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
- void AssertUndefinedOrAllocationSite(Register object);
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link it into stack handler chain.
- void PushStackHandler();
+ void PushStackHandler(Register scratch);
// Unlink the stack handler on top of the stack from the stack handler chain.
- void PopStackHandler();
+ void PopStackHandler(Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub);
-
- // Tail call a code stub (jump). Generate the code if necessary.
- void TailCallStub(CodeStub* stub);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
@@ -744,16 +665,13 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -761,7 +679,7 @@ class MacroAssembler : public TurboAssembler {
bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance);
- void EnterExitFramePrologue(StackFrame::Type frame_type);
+ void EnterExitFramePrologue(StackFrame::Type frame_type, Register scratch);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue();
@@ -777,6 +695,8 @@ class MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/ia32/register-ia32.h b/deps/v8/src/ia32/register-ia32.h
new file mode 100644
index 0000000000..b1e213b4d7
--- /dev/null
+++ b/deps/v8/src/ia32/register-ia32.h
@@ -0,0 +1,166 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IA32_REGISTER_IA32_H_
+#define V8_IA32_REGISTER_IA32_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+#define GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esp) \
+ V(ebp) \
+ V(esi) \
+ V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(esi) \
+ V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ bool is_byte_register() const { return reg_code_ <= 3; }
+
+ private:
+ friend class RegisterBase<Register, kRegAfterLast>;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
+ friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
+ explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
+};
+
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
+
+#define DEFINE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// Note that the bit values must match those used in actual instruction encoding
+constexpr int kNumRegs = 8;
+
+// Caller-saved registers
+constexpr RegList kJSCallerSaved =
+ Register::ListOf<eax, ecx, edx,
+ ebx, // used as a caller-saved register in JavaScript code
+ edi // callee function
+ >();
+
+constexpr int kNumJSCallerSaved = 5;
+
+// Number of registers for which space is reserved in safepoints.
+constexpr int kNumSafepointRegisters = 8;
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = eax;
+constexpr Register kReturnRegister1 = edx;
+constexpr Register kReturnRegister2 = edi;
+constexpr Register kJSFunctionRegister = edi;
+constexpr Register kContextRegister = esi;
+constexpr Register kAllocateSizeRegister = edx;
+constexpr Register kInterpreterAccumulatorRegister = eax;
+constexpr Register kInterpreterBytecodeOffsetRegister = edx;
+constexpr Register kInterpreterBytecodeArrayRegister = edi;
+constexpr Register kInterpreterDispatchTableRegister = esi;
+
+constexpr Register kJavaScriptCallArgCountRegister = eax;
+constexpr Register kJavaScriptCallCodeStartRegister = ecx;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = edx;
+
+// The ExtraArg1Register not part of the real JS calling convention and is
+// mostly there to simplify consistent interface descriptor definitions across
+// platforms. Note that on ia32 it aliases kJavaScriptCallCodeStartRegister.
+constexpr Register kJavaScriptCallExtraArg1Register = ecx;
+
+// The off-heap trampoline does not need a register on ia32 (it uses a
+// pc-relative call instead).
+constexpr Register kOffHeapTrampolineRegister = no_reg;
+
+constexpr Register kRuntimeCallFunctionRegister = edx;
+constexpr Register kRuntimeCallArgCountRegister = eax;
+constexpr Register kRuntimeCallArgvRegister = ecx;
+constexpr Register kWasmInstanceRegister = esi;
+constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
+
+constexpr Register kRootRegister = ebx;
+
+// TODO(860429): Remove remaining poisoning infrastructure on ia32.
+constexpr Register kSpeculationPoisonRegister = no_reg;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IA32_REGISTER_IA32_H_
diff --git a/deps/v8/src/ia32/simulator-ia32.cc b/deps/v8/src/ia32/simulator-ia32.cc
deleted file mode 100644
index d696e4b45e..0000000000
--- a/deps/v8/src/ia32/simulator-ia32.cc
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ia32/simulator-ia32.h"
-
-// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
deleted file mode 100644
index a55c1fefb8..0000000000
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IA32_SIMULATOR_IA32_H_
-#define V8_IA32_SIMULATOR_IA32_H_
-
-// Since there is no simulator for the ia32 architecture this file is empty.
-
-#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 0cb326763c..0bbc9391b0 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -6,14 +6,17 @@
#include "src/ast/ast.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/module.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -59,7 +62,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
USE(minimum_size);
CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
LoadMapInstanceSizeInWords(handler_map),
- IntPtrConstant(minimum_size / kPointerSize)));
+ IntPtrConstant(minimum_size / kTaggedSize)));
return LoadMaybeWeakObjectField(handler, offset);
}
@@ -87,7 +90,7 @@ TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
TNode<MaybeObject> handler = UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize))));
+ IntPtrAdd(offset, IntPtrConstant(header_size + kTaggedSize))));
*var_handler = handler;
Goto(if_handler);
@@ -159,7 +162,7 @@ void AccessorAssembler::HandlePolymorphicCase(
// Found, now call handler.
TNode<MaybeObject> handler =
- LoadWeakFixedArrayElement(feedback, index, kPointerSize);
+ LoadWeakFixedArrayElement(feedback, index, kTaggedSize);
*var_handler = handler;
Goto(if_handler);
@@ -214,7 +217,8 @@ void AccessorAssembler::HandleLoadCallbackProperty(const LoadICParameters* p,
TNode<WordT> handler_word,
ExitPoint* exit_point) {
Comment("native_data_property_load");
- Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
Label runtime(this, Label::kDeferred);
Callable callable = CodeFactory::ApiGetter(isolate());
@@ -270,9 +274,10 @@ void AccessorAssembler::HandleLoadAccessor(
Goto(&load);
BIND(&load);
- Callable callable = CodeFactory::CallApiCallback(isolate(), 0);
- exit_point->Return(CallStub(callable, nullptr, context, data,
- api_holder.value(), callback, p->receiver));
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+ TNode<IntPtrT> argc = IntPtrConstant(0);
+ exit_point->Return(CallStub(callable, nullptr, context, callback, argc,
+ data, api_holder.value(), p->receiver));
}
BIND(&runtime);
@@ -287,7 +292,7 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
ExitPoint* exit_point) {
Comment("field_load");
Node* index = DecodeWord<LoadHandler::FieldIndexBits>(handler_word);
- Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
+ Node* offset = IntPtrMul(index, IntPtrConstant(kTaggedSize));
Label inobject(this), out_of_object(this);
Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
@@ -324,22 +329,15 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
}
}
-TNode<Object> AccessorAssembler::LoadDescriptorValue(TNode<Map> map,
- Node* descriptor) {
- return CAST(LoadDescriptorValueOrFieldType(map, descriptor));
+TNode<Object> AccessorAssembler::LoadDescriptorValue(
+ TNode<Map> map, TNode<IntPtrT> descriptor_entry) {
+ return CAST(LoadDescriptorValueOrFieldType(map, descriptor_entry));
}
TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
- TNode<Map> map, SloppyTNode<IntPtrT> descriptor) {
+ TNode<Map> map, TNode<IntPtrT> descriptor_entry) {
TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
- TNode<IntPtrT> scaled_descriptor =
- IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- TNode<IntPtrT> value_index = IntPtrAdd(
- scaled_descriptor, IntPtrConstant(DescriptorArray::kFirstIndex +
- DescriptorArray::kEntryValueIndex));
- CSA_ASSERT(this, UintPtrLessThan(descriptor, LoadAndUntagWeakFixedArrayLength(
- descriptors)));
- return LoadWeakFixedArrayElement(descriptors, value_index);
+ return LoadFieldTypeByDescriptorEntry(descriptors, descriptor_entry);
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
@@ -496,7 +494,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&constant);
{
Comment("constant_load");
- Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
Node* value = LoadDescriptorValue(LoadMap(holder), descriptor);
exit_point->Return(value);
@@ -525,7 +524,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&accessor);
{
Comment("accessor_load");
- Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_ASSERT(this, IsAccessorPair(accessor_pair));
Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
@@ -826,10 +826,14 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
void AccessorAssembler::HandleStoreICNativeDataProperty(
const StoreICParameters* p, Node* holder, Node* handler_word) {
Comment("native_data_property_store");
- Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_CHECK(this, IsAccessorInfo(accessor_info));
+ // TODO(8580): Get the language mode lazily when required to avoid the
+ // computation of GetLanguageMode here. Also make the computation of
+ // language mode not dependent on vector.
Node* language_mode = GetLanguageMode(p->vector, p->slot);
TailCallRuntime(Runtime::kStoreCallbackProperty, p->context, p->receiver,
@@ -985,13 +989,11 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
TNode<IntPtrT> last_key_index = UncheckedCast<IntPtrT>(IntPtrAdd(
IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
if (flags & kValidateTransitionHandler) {
- Node* key = LoadWeakFixedArrayElement(descriptors, last_key_index);
+ TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
GotoIf(WordNotEqual(key, p->name), miss);
} else {
- CSA_ASSERT(this,
- WordEqual(BitcastMaybeObjectToWord(LoadWeakFixedArrayElement(
- descriptors, last_key_index)),
- p->name));
+ CSA_ASSERT(this, WordEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
+ p->name));
}
Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
if (flags & kValidateTransitionHandler) {
@@ -1062,10 +1064,10 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
GotoIf(TaggedIsSmi(value), bailout);
TNode<MaybeObject> field_type = LoadFieldTypeByKeyIndex(
descriptors, UncheckedCast<IntPtrT>(name_index));
- intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
- intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
- DCHECK_NE(kNoneType, kClearedWeakHeapObject);
- DCHECK_NE(kAnyType, kClearedWeakHeapObject);
+ const Address kNoneType = FieldType::None().ptr();
+ const Address kAnyType = FieldType::Any().ptr();
+ DCHECK_NE(static_cast<uint32_t>(kNoneType), kClearedWeakHeapObjectLower32);
+ DCHECK_NE(static_cast<uint32_t>(kAnyType), kClearedWeakHeapObjectLower32);
// FieldType::None can't hold any value.
GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type),
IntPtrConstant(kNoneType)),
@@ -1128,7 +1130,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&inobject);
{
- Node* field_offset = TimesPointerSize(field_index);
+ Node* field_offset = TimesTaggedSize(field_index);
Label tagged_rep(this), double_rep(this);
Branch(
Word32Equal(representation, Int32Constant(Representation::kDouble)),
@@ -1253,7 +1255,8 @@ void AccessorAssembler::CheckPrototypeValidityCell(Node* maybe_validity_cell,
void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
Node* holder, Node* handler_word) {
Comment("accessor_store");
- Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
Node* accessor_pair = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_ASSERT(this, IsAccessorPair(accessor_pair));
Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
@@ -1417,9 +1420,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Goto(&store);
BIND(&store);
- Callable callable = CodeFactory::CallApiCallback(isolate(), 1);
- Return(CallStub(callable, nullptr, context, data, api_holder.value(),
- callback, p->receiver, p->value));
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+ TNode<IntPtrT> argc = IntPtrConstant(1);
+ Return(CallStub(callable, nullptr, context, callback, argc, data,
+ api_holder.value(), p->receiver, p->value));
}
BIND(&if_store_global_proxy);
@@ -1450,6 +1454,9 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Label if_index(this), if_unique_name(this),
to_name_failed(this, Label::kDeferred);
+ // TODO(8580): Get the language mode lazily when required to avoid the
+ // computation of GetLanguageMode here. Also make the computation of
+ // language mode not dependent on vector.
Node* language_mode = GetLanguageMode(p->vector, p->slot);
if (support_elements == kSupportElements) {
@@ -1584,7 +1591,8 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
IntPtrConstant(StoreHandler::kConstField)),
&done);
}
- Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
+ TNode<IntPtrT> descriptor =
+ Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
TNode<MaybeObject> maybe_field_type =
LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor);
@@ -1713,7 +1721,7 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
}
Node* index = DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
- Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
+ Node* offset = IntPtrMul(index, IntPtrConstant(kTaggedSize));
if (representation.IsDouble()) {
if (!FLAG_unbox_double_fields || !is_inobject) {
// Load the mutable heap number.
@@ -1867,7 +1875,7 @@ void AccessorAssembler::EmitElementLoad(
BIND(&if_typed_array);
{
Comment("typed elements");
- // Check if buffer has been neutered.
+ // Check if buffer has been detached.
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), miss);
@@ -2305,15 +2313,6 @@ void AccessorAssembler::TryProbeStubCacheTable(
Node* name, Node* map, Label* if_handler,
TVariable<MaybeObject>* var_handler, Label* if_miss) {
StubCache::Table table = static_cast<StubCache::Table>(table_id);
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- Goto(if_miss);
- return;
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- Goto(if_miss);
- return;
- }
-#endif
// The {table_offset} holds the entry offset times four (due to masking
// and shifting optimizations).
const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
@@ -2326,18 +2325,19 @@ void AccessorAssembler::TryProbeStubCacheTable(
GotoIf(WordNotEqual(name, entry_key), if_miss);
// Get the map entry from the cache.
- DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
- stub_cache->key_reference(table).address());
+ DCHECK_EQ(kSystemPointerSize * 2,
+ stub_cache->map_reference(table).address() -
+ stub_cache->key_reference(table).address());
Node* entry_map =
Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
+ IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize * 2)));
GotoIf(WordNotEqual(map, entry_map), if_miss);
- DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
- stub_cache->key_reference(table).address());
+ DCHECK_EQ(kSystemPointerSize, stub_cache->value_reference(table).address() -
+ stub_cache->key_reference(table).address());
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
- Load(MachineType::TaggedPointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
+ Load(MachineType::AnyTagged(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize))));
// We found the handler.
*var_handler = handler;
@@ -2393,6 +2393,8 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
// accident.
Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred);
+ GotoIf(IsUndefined(p->vector), &miss);
+
// Inlined fast path.
{
Comment("LoadIC_BytecodeHandler_fast");
@@ -2528,7 +2530,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
- kPointerSize, SMI_PARAMETERS);
+ kTaggedSize, SMI_PARAMETERS);
{
// Special case for Function.prototype load, because it's very common
@@ -2579,7 +2581,8 @@ void AccessorAssembler::LoadGlobalIC(TNode<FeedbackVector> vector, Node* slot,
TNode<Context> context = lazy_context();
TNode<Name> name = lazy_name();
exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
- ParameterToTagged(slot, slot_mode), vector);
+ ParameterToTagged(slot, slot_mode), vector,
+ SmiConstant(typeof_mode));
}
}
@@ -2631,7 +2634,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
Label call_handler(this), non_smi(this);
TNode<MaybeObject> feedback_element =
- LoadFeedbackVectorSlot(vector, slot, kPointerSize, slot_mode);
+ LoadFeedbackVectorSlot(vector, slot, kTaggedSize, slot_mode);
TNode<Object> handler = CAST(feedback_element);
GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)), miss);
@@ -2852,7 +2855,7 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p) {
// Check if we have a matching handler for the {receiver_map}.
TNode<MaybeObject> feedback_element =
- LoadFeedbackVectorSlot(vector, slot, kPointerSize, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(vector, slot, kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
1);
@@ -2954,7 +2957,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
{
Comment("StoreGlobalIC_try_handler");
TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
- pp->vector, pp->slot, kPointerSize, SMI_PARAMETERS);
+ pp->vector, pp->slot, kTaggedSize, SMI_PARAMETERS);
GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)),
&miss);
@@ -3108,7 +3111,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
- p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+ p->vector, p->slot, kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
&miss, 1);
@@ -3191,9 +3194,8 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
BIND(&miss);
{
Comment("StoreInArrayLiteralIC_miss");
- // TODO(neis): Introduce Runtime::kStoreInArrayLiteralIC_Miss.
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, p->context, p->value,
+ p->slot, p->vector, p->receiver, p->name);
}
}
@@ -3517,7 +3519,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
Label did_set_proto_if_needed(this);
TNode<BoolT> is_null_proto = SmiNotEqual(
SmiAnd(flags, SmiConstant(ObjectLiteral::kHasNullPrototype)),
- SmiConstant(Smi::kZero));
+ SmiConstant(Smi::zero()));
GotoIfNot(is_null_proto, &did_set_proto_if_needed);
CallRuntime(Runtime::kInternalSetPrototype, context, result,
@@ -3547,13 +3549,12 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(source))), &call_runtime);
- ForEachEnumerableOwnProperty(
- context, map, CAST(source),
- [=](TNode<Name> key, TNode<Object> value) {
- KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context,
- result, key, value);
- },
- &call_runtime);
+ ForEachEnumerableOwnProperty(context, map, CAST(source),
+ [=](TNode<Name> key, TNode<Object> value) {
+ SetPropertyInLiteral(context, result, key,
+ value);
+ },
+ &call_runtime);
Goto(&done);
BIND(&call_runtime);
@@ -3619,6 +3620,8 @@ void AccessorAssembler::GenerateCloneObjectIC() {
auto mode = INTPTR_PARAMETERS;
var_properties = CAST(AllocatePropertyArray(length, mode));
+ FillPropertyArrayWithUndefined(var_properties.value(), IntPtrConstant(0),
+ length, mode);
CopyPropertyArrayValues(source_properties, var_properties.value(), length,
SKIP_WRITE_BARRIER, mode, DestroySource::kNo);
}
@@ -3638,7 +3641,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TNode<IntPtrT> result_start =
LoadMapInobjectPropertiesStartInWords(result_map);
TNode<IntPtrT> field_offset_difference =
- TimesPointerSize(IntPtrSub(result_start, source_start));
+ TimesTaggedSize(IntPtrSub(result_start, source_start));
// If MutableHeapNumbers may be present in-object, allocations may occur
// within this loop, thus the write barrier is required.
@@ -3651,7 +3654,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> field_offset =
- TimesPointerSize(UncheckedCast<IntPtrT>(field_index));
+ TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
if (may_use_mutable_heap_numbers) {
TNode<Object> field = LoadObjectField(source, field_offset);
@@ -3661,13 +3664,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
StoreObjectField(object, result_offset, field);
} else {
// Copy fields as raw data.
- TNode<IntPtrT> field = UncheckedCast<IntPtrT>(
- LoadObjectField(source, field_offset, MachineType::IntPtr()));
+ TNode<IntPtrT> field =
+ LoadObjectField<IntPtrT>(source, field_offset);
TNode<IntPtrT> result_offset =
IntPtrAdd(field_offset, field_offset_difference);
- StoreObjectFieldNoWriteBarrier(
- object, result_offset, field,
- MachineType::IntPtr().representation());
+ StoreObjectFieldNoWriteBarrier(object, result_offset, field);
}
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 3d92ab26c3..1022d0f160 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -148,9 +148,10 @@ class AccessorAssembler : public CodeStubAssembler {
TVariable<MaybeObject>* var_handler, Label* if_handler,
Label* miss, ExitPoint* exit_point);
- TNode<Object> LoadDescriptorValue(TNode<Map> map, Node* descriptor);
+ TNode<Object> LoadDescriptorValue(TNode<Map> map,
+ TNode<IntPtrT> descriptor_entry);
TNode<MaybeObject> LoadDescriptorValueOrFieldType(
- TNode<Map> map, SloppyTNode<IntPtrT> descriptor);
+ TNode<Map> map, TNode<IntPtrT> descriptor_entry);
void LoadIC_Uninitialized(const LoadICParameters* p);
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 41928f1c08..f1acbe112b 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -20,16 +20,16 @@ CallOptimization::CallOptimization(Isolate* isolate, Handle<Object> function) {
}
}
-Context* CallOptimization::GetAccessorContext(Map* holder_map) const {
+Context CallOptimization::GetAccessorContext(Map holder_map) const {
if (is_constant_call()) {
return constant_function_->context()->native_context();
}
- JSFunction* constructor = JSFunction::cast(holder_map->GetConstructor());
+ JSFunction constructor = JSFunction::cast(holder_map->GetConstructor());
return constructor->context()->native_context();
}
-bool CallOptimization::IsCrossContextLazyAccessorPair(Context* native_context,
- Map* holder_map) const {
+bool CallOptimization::IsCrossContextLazyAccessorPair(Context native_context,
+ Map holder_map) const {
DCHECK(native_context->IsNativeContext());
if (is_constant_call()) return false;
return native_context != GetAccessorContext(holder_map);
@@ -48,7 +48,7 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
if (object_map->has_hidden_prototype()) {
- JSObject* raw_prototype = JSObject::cast(object_map->prototype());
+ JSObject raw_prototype = JSObject::cast(object_map->prototype());
Handle<JSObject> prototype(raw_prototype, raw_prototype->GetIsolate());
object_map = handle(prototype->map(), prototype->GetIsolate());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
@@ -83,9 +83,9 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
if (api_holder.is_identical_to(holder)) return true;
// Check if holder is in prototype chain of api_holder.
{
- JSObject* object = *api_holder;
+ JSObject object = *api_holder;
while (true) {
- Object* prototype = object->map()->prototype();
+ Object prototype = object->map()->prototype();
if (!prototype->IsJSObject()) return false;
if (prototype == *holder) return true;
object = JSObject::cast(prototype);
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index e3115bdbcd..62317dc659 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -5,8 +5,7 @@
#ifndef V8_IC_CALL_OPTIMIZATION_H_
#define V8_IC_CALL_OPTIMIZATION_H_
-#include "src/code-stubs.h"
-#include "src/macro-assembler.h"
+#include "src/api-arguments.h"
#include "src/objects.h"
namespace v8 {
@@ -16,9 +15,9 @@ class CallOptimization {
public:
CallOptimization(Isolate* isolate, Handle<Object> function);
- Context* GetAccessorContext(Map* holder_map) const;
- bool IsCrossContextLazyAccessorPair(Context* native_context,
- Map* holder_map) const;
+ Context GetAccessorContext(Map holder_map) const;
+ bool IsCrossContextLazyAccessorPair(Context native_context,
+ Map holder_map) const;
bool is_constant_call() const { return !constant_function_.is_null(); }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index f95f0d7aab..85dabc9954 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -10,6 +10,8 @@
#include "src/field-index-inl.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,10 +19,12 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(LoadHandler, DataHandler)
+
CAST_ACCESSOR(LoadHandler)
// Decodes kind from Smi-handler.
-LoadHandler::Kind LoadHandler::GetHandlerKind(Smi* smi_handler) {
+LoadHandler::Kind LoadHandler::GetHandlerKind(Smi smi_handler) {
return KindBits::decode(smi_handler->value());
}
@@ -109,6 +113,8 @@ Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
return handle(Smi::FromInt(config), isolate);
}
+OBJECT_CONSTRUCTORS_IMPL(StoreHandler, DataHandler)
+
CAST_ACCESSOR(StoreHandler)
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 73ab0de645..465d7beb21 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -4,7 +4,7 @@
#include "src/ic/handler-configuration.h"
-#include "src/code-stubs.h"
+#include "src/code-factory.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/maybe-object.h"
@@ -165,10 +165,10 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
}
// static
-KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject* handler) {
+KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject handler) {
DisallowHeapAllocation no_gc;
if (handler->IsSmi()) {
- int const raw_handler = handler->cast<Smi>()->value();
+ int const raw_handler = handler.ToSmi().value();
Kind const kind = KindBits::decode(raw_handler);
if ((kind == kElement || kind == kIndexedString) &&
AllowOutOfBoundsBits::decode(raw_handler)) {
@@ -182,12 +182,8 @@ KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject* handler) {
Handle<Object> StoreHandler::StoreElementTransition(
Isolate* isolate, Handle<Map> receiver_map, Handle<Map> transition,
KeyedAccessStoreMode store_mode) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = ElementsTransitionAndStoreStub(
- isolate, elements_kind, transition->elements_kind(),
- is_js_array, store_mode)
- .GetCode();
+ Handle<Code> stub =
+ CodeFactory::ElementsTransitionAndStore(isolate, store_mode).code();
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(1);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 72ab68140e..1d9658e118 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -108,7 +108,7 @@ class LoadHandler final : public DataHandler {
kSmiValueSize - LookupOnReceiverBits::kNext> {};
// Decodes kind from Smi-handler.
- static inline Kind GetHandlerKind(Smi* smi_handler);
+ static inline Kind GetHandlerKind(Smi smi_handler);
// Creates a Smi-handler for loading a property from a slow object.
static inline Handle<Smi> LoadNormal(Isolate* isolate);
@@ -178,7 +178,9 @@ class LoadHandler final : public DataHandler {
KeyedAccessLoadMode load_mode);
// Decodes the KeyedAccessLoadMode from a {handler}.
- static KeyedAccessLoadMode GetKeyedAccessLoadMode(MaybeObject* handler);
+ static KeyedAccessLoadMode GetKeyedAccessLoadMode(MaybeObject handler);
+
+ OBJECT_CONSTRUCTORS(LoadHandler, DataHandler)
};
// A set of bit fields representing Smi handlers for stores and a HeapObject
@@ -298,6 +300,8 @@ class StoreHandler final : public DataHandler {
static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation);
+
+ OBJECT_CONSTRUCTORS(StoreHandler, DataHandler)
};
} // namespace internal
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 101703dc28..0616340a62 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -9,7 +9,8 @@
#include "src/assembler-inl.h"
#include "src/debug/debug.h"
-#include "src/macro-assembler.h"
+#include "src/frames-inl.h"
+#include "src/handles-inl.h"
#include "src/prototype.h"
namespace v8 {
@@ -47,9 +48,9 @@ void IC::update_receiver_map(Handle<Object> receiver) {
}
}
-bool IC::IsHandler(MaybeObject* object) {
- HeapObject* heap_object;
- return (object->IsSmi() && (object != nullptr)) ||
+bool IC::IsHandler(MaybeObject object) {
+ HeapObject heap_object;
+ return (object->IsSmi() && (object.ptr() != kNullAddress)) ||
(object->GetHeapObjectIfWeak(&heap_object) &&
(heap_object->IsMap() || heap_object->IsPropertyCell())) ||
(object->GetHeapObjectIfStrong(&heap_object) &&
@@ -60,13 +61,21 @@ bool IC::AddressIsDeoptimizedCode() const {
return AddressIsDeoptimizedCode(isolate(), address());
}
-
+// static
bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
- Code* host =
+ Code host =
isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
return (host->kind() == Code::OPTIMIZED_FUNCTION &&
host->marked_for_deoptimization());
}
+
+bool IC::vector_needs_update() {
+ if (state() == NO_FEEDBACK) return false;
+ return (!vector_set_ &&
+ (state() != MEGAMORPHIC ||
+ nexus()->GetFeedbackExtra().ToSmi().value() != ELEMENT));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index 0c33863d3d..2a62dcac9f 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -54,36 +54,38 @@ void ICStats::Dump() {
Reset();
}
-const char* ICStats::GetOrCacheScriptName(Script* script) {
- if (script_name_map_.find(script) != script_name_map_.end()) {
- return script_name_map_[script].get();
+const char* ICStats::GetOrCacheScriptName(Script script) {
+ Address script_ptr = script.ptr();
+ if (script_name_map_.find(script_ptr) != script_name_map_.end()) {
+ return script_name_map_[script_ptr].get();
}
- Object* script_name_raw = script->name();
+ Object script_name_raw = script->name();
if (script_name_raw->IsString()) {
- String* script_name = String::cast(script_name_raw);
+ String script_name = String::cast(script_name_raw);
char* c_script_name =
script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.release();
script_name_map_.insert(
- std::make_pair(script, std::unique_ptr<char[]>(c_script_name)));
+ std::make_pair(script_ptr, std::unique_ptr<char[]>(c_script_name)));
return c_script_name;
} else {
script_name_map_.insert(
- std::make_pair(script, std::unique_ptr<char[]>(nullptr)));
+ std::make_pair(script_ptr, std::unique_ptr<char[]>(nullptr)));
return nullptr;
}
return nullptr;
}
-const char* ICStats::GetOrCacheFunctionName(JSFunction* function) {
- if (function_name_map_.find(function) != function_name_map_.end()) {
- return function_name_map_[function].get();
+const char* ICStats::GetOrCacheFunctionName(JSFunction function) {
+ Address function_ptr = function.ptr();
+ if (function_name_map_.find(function_ptr) != function_name_map_.end()) {
+ return function_name_map_[function_ptr].get();
}
- SharedFunctionInfo* shared = function->shared();
+ SharedFunctionInfo shared = function->shared();
ic_infos_[pos_].is_optimized = function->IsOptimized();
char* function_name = shared->DebugName()->ToCString().release();
function_name_map_.insert(
- std::make_pair(function, std::unique_ptr<char[]>(function_name)));
+ std::make_pair(function_ptr, std::unique_ptr<char[]>(function_name)));
return function_name;
}
diff --git a/deps/v8/src/ic/ic-stats.h b/deps/v8/src/ic/ic-stats.h
index a3015d0a6a..76c65c3862 100644
--- a/deps/v8/src/ic/ic-stats.h
+++ b/deps/v8/src/ic/ic-stats.h
@@ -10,6 +10,7 @@
#include <unordered_map>
#include <vector>
+#include "include/v8-internal.h" // For Address.
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
@@ -58,16 +59,18 @@ class ICStats {
DCHECK(pos_ >= 0 && pos_ < MAX_IC_INFO);
return ic_infos_[pos_];
}
- const char* GetOrCacheScriptName(Script* script);
- const char* GetOrCacheFunctionName(JSFunction* function);
+ const char* GetOrCacheScriptName(Script script);
+ const char* GetOrCacheFunctionName(JSFunction function);
V8_INLINE static ICStats* instance() { return instance_.Pointer(); }
private:
static base::LazyInstance<ICStats>::type instance_;
base::Atomic32 enabled_;
std::vector<ICInfo> ic_infos_;
- std::unordered_map<Script*, std::unique_ptr<char[]>> script_name_map_;
- std::unordered_map<JSFunction*, std::unique_ptr<char[]>> function_name_map_;
+ // Keys are Script pointers; uses raw Address to keep includes light.
+ std::unordered_map<Address, std::unique_ptr<char[]>> script_name_map_;
+ // Keys are JSFunction pointers; uses raw Address to keep includes light.
+ std::unordered_map<Address, std::unique_ptr<char[]>> function_name_map_;
int pos_;
};
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 3ca62d0bb4..c607679a2a 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -10,6 +10,7 @@
#include "src/arguments-inl.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/conversions.h"
#include "src/execution.h"
#include "src/field-type.h"
@@ -21,12 +22,13 @@
#include "src/ic/ic-stats.h"
#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
-#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/prototype.h"
#include "src/runtime-profiler.h"
#include "src/runtime/runtime-utils.h"
@@ -39,6 +41,8 @@ namespace internal {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
+ case NO_FEEDBACK:
+ UNREACHABLE();
case UNINITIALIZED:
return '0';
case PREMONOMORPHIC:
@@ -93,7 +97,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
if (V8_LIKELY(!FLAG_ic_stats)) return;
- Map* map = nullptr;
+ Map map;
if (!receiver_map().is_null()) {
map = *receiver_map();
}
@@ -123,10 +127,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.type = keyed_prefix ? "Keyed" : "";
ic_info.type += type;
- Object* maybe_function =
- Memory<Object*>(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ Object maybe_function =
+ Object(Memory<Address>(fp_ + JavaScriptFrameConstants::kFunctionOffset));
DCHECK(maybe_function->IsJSFunction());
- JSFunction* function = JSFunction::cast(maybe_function);
+ JSFunction function = JSFunction::cast(maybe_function);
int code_offset = 0;
if (function->IsInterpreted()) {
code_offset = InterpretedFrame::GetBytecodeOffset(fp());
@@ -144,8 +148,8 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.state += TransitionMarkFromState(new_state);
ic_info.state += modifier;
ic_info.state += ")";
- ic_info.map = reinterpret_cast<void*>(map);
- if (map != nullptr) {
+ ic_info.map = reinterpret_cast<void*>(map.ptr());
+ if (!map.is_null()) {
ic_info.is_dictionary_map = map->is_dictionary_map();
ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
ic_info.instance_type = std::to_string(map->instance_type());
@@ -154,10 +158,11 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ICStats::instance()->End();
}
-IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
+ FeedbackSlotKind kind)
: isolate_(isolate),
vector_set_(false),
- kind_(FeedbackSlotKind::kInvalid),
+ kind_(kind),
target_maps_set_(false),
slow_stub_reason_(nullptr),
nexus_(vector, slot) {
@@ -194,12 +199,12 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- kind_ = nexus_.kind();
- state_ = nexus_.StateFromFeedback();
+ DCHECK_IMPLIES(!vector.is_null(), kind_ == nexus_.kind());
+ state_ = (vector.is_null()) ? NO_FEEDBACK : nexus_.StateFromFeedback();
old_state_ = state_;
}
-JSFunction* IC::GetHostFunction() const {
+JSFunction IC::GetHostFunction() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
@@ -257,8 +262,8 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// would transition to.
if (maybe_handler_.is_null()) {
if (!receiver_map()->IsJSObjectMap()) return false;
- Map* first_map = FirstTargetMap();
- if (first_map == nullptr) return false;
+ Map first_map = FirstTargetMap();
+ if (first_map.is_null()) return false;
Handle<Map> old_map(first_map, isolate());
if (old_map->is_deprecated()) return true;
return IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
@@ -272,7 +277,7 @@ bool IC::RecomputeHandlerForName(Handle<Object> name) {
if (is_keyed()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name* stub_name = nexus()->FindFirstName();
+ Name stub_name = nexus()->FindFirstName();
if (*name != stub_name) return false;
}
@@ -281,6 +286,7 @@ bool IC::RecomputeHandlerForName(Handle<Object> name) {
void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+ if (state() == NO_FEEDBACK) return;
update_receiver_map(receiver);
if (!name->IsString()) return;
if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
@@ -294,9 +300,8 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
}
}
-
-MaybeHandle<Object> IC::TypeError(MessageTemplate::Template index,
- Handle<Object> object, Handle<Object> key) {
+MaybeHandle<Object> IC::TypeError(MessageTemplate index, Handle<Object> object,
+ Handle<Object> key) {
HandleScope scope(isolate());
THROW_NEW_ERROR(isolate(), NewTypeError(index, key, object), Object);
}
@@ -310,15 +315,15 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction* host_function, const char* reason) {
- FeedbackVector* vector = nexus->vector();
+ JSFunction host_function, const char* reason) {
+ FeedbackVector vector = nexus->vector();
FeedbackSlot slot = nexus->slot();
OnFeedbackChanged(isolate, vector, slot, host_function, reason);
}
// static
-void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
- FeedbackSlot slot, JSFunction* host_function,
+void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
+ FeedbackSlot slot, JSFunction host_function,
const char* reason) {
if (FLAG_trace_opt_verbose) {
// TODO(leszeks): The host function is only needed for this print, we could
@@ -420,10 +425,12 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
+ bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
+
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsNullOrUndefined(isolate())) {
- if (FLAG_use_ic && state() != PREMONOMORPHIC) {
+ if (use_ic && state() != PREMONOMORPHIC) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
update_receiver_map(object);
@@ -437,7 +444,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
- bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+ if (MigrateDeprecated(object)) use_ic = false;
if (state() != UNINITIALIZED) {
JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
@@ -448,9 +455,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
LookupForRead(isolate(), &it);
if (name->IsPrivate()) {
- if (name->IsPrivateField() && !it.IsFound()) {
- return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
- name);
+ if (name->IsPrivateName() && !it.IsFound()) {
+ Handle<String> name_string(String::cast(Symbol::cast(*name)->name()),
+ isolate());
+ return TypeError(MessageTemplate::kInvalidPrivateFieldRead, object,
+ name_string);
}
// IC handling of private symbols/fields lookup on JSProxy is not
@@ -491,17 +500,20 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(isolate(), script_contexts, str_name,
&lookup_result)) {
- Handle<Object> result = FixedArray::get(
- *ScriptContextTable::GetContext(isolate(), script_contexts,
- lookup_result.context_index),
- lookup_result.slot_index, isolate());
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ isolate(), script_contexts, lookup_result.context_index);
+
+ Handle<Object> result(script_context->get(lookup_result.slot_index),
+ isolate());
+
if (result->IsTheHole(isolate())) {
// Do not install stubs and stay pre-monomorphic for
// uninitialized accesses.
return ReferenceError(name);
}
- if (FLAG_use_ic) {
+ bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
+ if (use_ic) {
if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
@@ -617,15 +629,14 @@ void IC::CopyICToMegamorphicCache(Handle<Name> name) {
}
}
-
-bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
- if (source_map == nullptr) return true;
- if (target_map == nullptr) return false;
+bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) {
+ if (source_map.is_null()) return true;
+ if (target_map.is_null()) return false;
if (source_map->is_abandoned_prototype_map()) return false;
ElementsKind target_elements_kind = target_map->elements_kind();
bool more_general_transition = IsMoreGeneralElementsKindTransition(
source_map->elements_kind(), target_elements_kind);
- Map* transitioned_map = nullptr;
+ Map transitioned_map;
if (more_general_transition) {
MapHandles map_list;
map_list.push_back(handle(target_map, isolate_));
@@ -644,6 +655,8 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
// Currently only load and store ICs support non-code handlers.
DCHECK(IsAnyLoad() || IsAnyStore());
switch (state()) {
+ case NO_FEEDBACK:
+ break;
case UNINITIALIZED:
case PREMONOMORPHIC:
UpdateMonomorphicIC(handler, name);
@@ -1074,8 +1087,9 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
!receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
isolate()) &&
!receiver_map->GetIndexedInterceptor()->non_masking()) {
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedInterceptorStub);
- return LoadIndexedInterceptorStub(isolate()).GetCode();
+ return BUILTIN_CODE(isolate(), LoadIndexedInterceptorIC);
}
InstanceType instance_type = receiver_map->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -1092,8 +1106,9 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
ElementsKind elements_kind = receiver_map->elements_kind();
if (IsSloppyArgumentsElementsKind(elements_kind)) {
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
- return KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
+ return BUILTIN_CODE(isolate(), KeyedLoadIC_SloppyArguments);
}
bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
@@ -1129,9 +1144,9 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
// among receiver_maps as unstable because the optimizing compilers may
// generate an elements kind transition for this kind of receivers.
if (receiver_map->is_stable()) {
- Map* tmap = receiver_map->FindElementsKindTransitionedMap(isolate(),
- *receiver_maps);
- if (tmap != nullptr) {
+ Map tmap = receiver_map->FindElementsKindTransitionedMap(isolate(),
+ *receiver_maps);
+ if (!tmap.is_null()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
}
}
@@ -1143,8 +1158,8 @@ void KeyedLoadIC::LoadElementPolymorphicHandlers(
namespace {
bool ConvertKeyToIndex(Handle<Object> receiver, Handle<Object> key,
- uint32_t* index) {
- if (!FLAG_use_ic) return false;
+ uint32_t* index, InlineCacheState state) {
+ if (!FLAG_use_ic || state == NO_FEEDBACK) return false;
if (receiver->IsAccessCheckNeeded() || receiver->IsJSValue()) return false;
// For regular JSReceiver or String receivers, the {key} must be a positive
@@ -1238,7 +1253,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
LoadIC::Load(object, Handle<Name>::cast(key)),
Object);
- } else if (ConvertKeyToIndex(object, key, &index)) {
+ } else if (ConvertKeyToIndex(object, key, &index, state())) {
KeyedAccessLoadMode load_mode = GetLoadMode(isolate(), object, index);
UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
@@ -1279,7 +1294,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
return true;
case LookupIterator::INTERCEPTOR: {
Handle<JSObject> holder = it->GetHolder<JSObject>();
- InterceptorInfo* info = holder->GetNamedInterceptor();
+ InterceptorInfo info = holder->GetNamedInterceptor();
if (it->HolderIsReceiverOrHiddenPrototype()) {
return !info->non_masking() && receiver.is_identical_to(holder) &&
!info->setter()->IsUndefined(isolate());
@@ -1350,8 +1365,8 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
return TypeError(MessageTemplate::kConstAssign, global, name);
}
- Handle<Object> previous_value =
- FixedArray::get(*script_context, lookup_result.slot_index, isolate());
+ Handle<Object> previous_value(script_context->get(lookup_result.slot_index),
+ isolate());
if (previous_value->IsTheHole(isolate())) {
// Do not install stubs and stay pre-monomorphic for
@@ -1359,7 +1374,8 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
return ReferenceError(name);
}
- if (FLAG_use_ic) {
+ bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
+ if (use_ic) {
if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
@@ -1392,10 +1408,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return result;
}
+ bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsNullOrUndefined(isolate())) {
- if (FLAG_use_ic && state() != PREMONOMORPHIC) {
+ if (use_ic && state() != PREMONOMORPHIC) {
// Ensure the IC state progresses.
TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
update_receiver_map(object);
@@ -1409,12 +1426,13 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
}
LookupIterator it(isolate(), object, name);
- bool use_ic = FLAG_use_ic;
if (name->IsPrivate()) {
- if (name->IsPrivateField() && !it.IsFound()) {
- return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
- name);
+ if (name->IsPrivateName() && !it.IsFound()) {
+ Handle<String> name_string(String::cast(Symbol::cast(*name)->name()),
+ isolate());
+ return TypeError(MessageTemplate::kInvalidPrivateFieldWrite, object,
+ name_string);
}
// IC handling of private fields/symbols stores on JSProxy is not
@@ -1501,9 +1519,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
USE(holder);
DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined(isolate()));
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
- StoreInterceptorStub stub(isolate());
- return MaybeObjectHandle(stub.GetCode());
+ return MaybeObjectHandle(BUILTIN_CODE(isolate(), StoreInterceptorIC));
}
case LookupIterator::ACCESSOR: {
@@ -1840,39 +1858,40 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
}
// TODO(ishell): move to StoreHandler::StoreElement().
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub;
+ Handle<Code> code;
if (receiver_map->has_sloppy_arguments_elements()) {
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
- stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
+ code =
+ CodeFactory::KeyedStoreIC_SloppyArguments(isolate(), store_mode).code();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
- stub =
- StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
- .GetCode();
- if (receiver_map->has_fixed_typed_array_elements()) return stub;
+ code = CodeFactory::StoreFastElementIC(isolate(), store_mode).code();
+ if (receiver_map->has_fixed_typed_array_elements()) return code;
} else if (IsStoreInArrayLiteralICKind(kind())) {
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), StoreInArrayLiteralIC_SlowStub);
- stub = StoreInArrayLiteralSlowStub(isolate(), store_mode).GetCode();
+ code =
+ CodeFactory::StoreInArrayLiteralIC_Slow(isolate(), store_mode).code();
} else {
+ // TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
- DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
- stub = StoreSlowElementStub(isolate(), store_mode).GetCode();
+ DCHECK_EQ(DICTIONARY_ELEMENTS, receiver_map->elements_kind());
+ code = CodeFactory::KeyedStoreIC_Slow(isolate(), store_mode).code();
}
- if (IsStoreInArrayLiteralICKind(kind())) return stub;
+ if (IsStoreInArrayLiteralICKind(kind())) return code;
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (validity_cell->IsSmi()) {
// There's no prototype validity cell to check, so we can just use the stub.
- return stub;
+ return code;
}
Handle<StoreHandler> handler = isolate()->factory()->NewStoreHandler(0);
handler->set_validity_cell(*validity_cell);
- handler->set_smi_handler(*stub);
+ handler->set_smi_handler(*code);
return handler;
}
@@ -1904,9 +1923,9 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
} else {
{
- Map* tmap = receiver_map->FindElementsKindTransitionedMap(
+ Map tmap = receiver_map->FindElementsKindTransitionedMap(
isolate(), *receiver_maps);
- if (tmap != nullptr) {
+ if (!tmap.is_null()) {
if (receiver_map->is_stable()) {
receiver_map->NotifyLeafMapLayoutChange(isolate());
}
@@ -2019,8 +2038,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
- bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
- !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
+ bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic &&
+ !object->IsStringWrapper() && !object->IsAccessCheckNeeded() &&
+ !object->IsJSGlobalProxy();
if (use_ic && !object->IsSmi()) {
// Don't use ICs for maps of the objects in Array's prototype chain. We
// expect to be able to trap element sets to objects with those maps in
@@ -2120,7 +2140,7 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
DCHECK(!array->map()->IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
- if (!FLAG_use_ic || MigrateDeprecated(array)) {
+ if (!FLAG_use_ic || state() == NO_FEEDBACK || MigrateDeprecated(array)) {
StoreOwnElement(isolate(), array, index, value);
TraceIC("StoreInArrayLiteralIC", index);
return;
@@ -2155,6 +2175,20 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
+//
+namespace {
+
+// TODO(8580): Compute the language mode lazily to avoid the expensive
+// computation of language mode here.
+LanguageMode GetLanguageMode(Handle<FeedbackVector> vector, Context context) {
+ LanguageMode language_mode = vector->shared_function_info()->language_mode();
+ if (context->scope_info()->language_mode() > language_mode) {
+ return context->scope_info()->language_mode();
+ }
+ return language_mode;
+}
+
+} // namespace
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
HandleScope scope(isolate);
@@ -2163,27 +2197,39 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
Handle<Object> receiver = args.at(0);
Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ // The only case where we call without a vector is from the LoadNamedProperty
+ // bytecode handler. Also, when there is no feedback vector, there is no
+ // difference between LoadProperty or LoadKeyed kind.
+ FeedbackSlotKind kind = FeedbackSlotKind::kLoadProperty;
+ if (!vector.is_null()) {
+ kind = vector->GetKind(vector_slot);
+ }
if (IsLoadICKind(kind)) {
- LoadIC ic(isolate, vector, vector_slot);
+ LoadIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
} else if (IsLoadGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- LoadGlobalIC ic(isolate, vector, vector_slot);
+ LoadGlobalIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
} else {
DCHECK(IsKeyedLoadICKind(kind));
- KeyedLoadIC ic(isolate, vector, vector_slot);
+ KeyedLoadIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2191,15 +2237,26 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<JSGlobalObject> global = isolate->global_object();
Handle<String> name = args.at<String>(0);
Handle<Smi> slot = args.at<Smi>(1);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
+ CONVERT_INT32_ARG_CHECKED(typeof_value, 3);
+ TypeofMode typeof_mode = static_cast<TypeofMode>(typeof_value);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LoadGlobalIC ic(isolate, vector, vector_slot);
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
+
+ FeedbackSlotKind kind = (typeof_mode == TypeofMode::INSIDE_TYPEOF)
+ ? FeedbackSlotKind::kLoadGlobalInsideTypeof
+ : FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
+ LoadGlobalIC ic(isolate, vector, vector_slot, kind);
ic.UpdateState(global, name);
Handle<Object> result;
@@ -2221,8 +2278,8 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
isolate, script_contexts, lookup_result.context_index);
- Handle<Object> result =
- FixedArray::get(*script_context, lookup_result.slot_index, isolate);
+ Handle<Object> result(script_context->get(lookup_result.slot_index),
+ isolate);
if (*result == ReadOnlyRoots(isolate).the_hole_value()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
@@ -2258,9 +2315,15 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
Handle<Object> receiver = args.at(0);
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
+
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- KeyedLoadIC ic(isolate, vector, vector_slot);
+ KeyedLoadIC ic(isolate, vector, vector_slot, FeedbackSlotKind::kLoadKeyed);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2274,26 +2337,51 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
+
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
- StoreIC ic(isolate, vector, vector_slot);
+ StoreIC ic(isolate, vector, vector_slot, kind, language_mode);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else if (IsStoreGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- StoreGlobalIC ic(isolate, vector, vector_slot);
+ StoreGlobalIC ic(isolate, vector, vector_slot, kind, language_mode);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
} else {
DCHECK(IsKeyedStoreICKind(kind));
- KeyedStoreIC ic(isolate, vector, vector_slot);
+ KeyedStoreIC ic(isolate, vector, vector_slot, kind, language_mode);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
}
+RUNTIME_FUNCTION(Runtime_StoreICNoFeedback_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ Handle<Object> value = args.at(0);
+ Handle<Object> receiver = args.at(1);
+ Handle<Name> key = args.at<Name>(2);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+ CONVERT_INT32_ARG_CHECKED(is_own_property_value, 4);
+ NamedPropertyType property_type =
+ static_cast<NamedPropertyType>(is_own_property_value);
+
+ FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
+ ? FeedbackSlotKind::kStoreNamedStrict
+ : FeedbackSlotKind::kStoreNamedSloppy;
+ if (property_type == NamedPropertyType::kOwn) {
+ language_mode = LanguageMode::kStrict;
+ kind = FeedbackSlotKind::kStoreOwnNamed;
+ }
+ StoreIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
+ language_mode);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+}
+
RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -2302,24 +2390,43 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Name> key = args.at<Name>(3);
+
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- StoreGlobalIC ic(isolate, vector, vector_slot);
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
+ StoreGlobalIC ic(isolate, vector, vector_slot, kind, language_mode);
Handle<JSGlobalObject> global = isolate->global_object();
ic.UpdateState(global, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
}
+RUNTIME_FUNCTION(Runtime_StoreGlobalICNoFeedback_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Name> key = args.at<Name>(1);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 2);
+
+ FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
+ ? FeedbackSlotKind::kStoreGlobalStrict
+ : FeedbackSlotKind::kStoreGlobalSloppy;
+ StoreGlobalIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
+ language_mode);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
+}
+
RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
CONVERT_ARG_HANDLE_CHECKED(String, name, 4);
#ifdef DEBUG
{
+ Handle<Smi> slot = args.at<Smi>(1);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
DCHECK(IsStoreGlobalICKind(slot_kind));
@@ -2343,8 +2450,8 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
isolate, NewTypeError(MessageTemplate::kConstAssign, global, name));
}
- Handle<Object> previous_value =
- FixedArray::get(*script_context, lookup_result.slot_index, isolate);
+ Handle<Object> previous_value(script_context->get(lookup_result.slot_index),
+ isolate);
if (previous_value->IsTheHole(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -2355,8 +2462,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
return *value;
}
- FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
RETURN_RESULT_OR_FAILURE(
isolate,
Runtime::SetObjectProperty(isolate, global, name, value, language_mode,
@@ -2372,13 +2478,15 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
+
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
// The elements store stubs miss into this function, but they are shared by
// different ICs.
if (IsKeyedStoreICKind(kind)) {
- KeyedStoreIC ic(isolate, vector, vector_slot);
+ KeyedStoreIC ic(isolate, vector, vector_slot, kind, language_mode);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else {
@@ -2392,19 +2500,54 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
}
}
-RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
+RUNTIME_FUNCTION(Runtime_KeyedStoreICNoFeedback_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Object> receiver = args.at(1);
+ Handle<Object> key = args.at(2);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+
+ FeedbackSlotKind kind = (language_mode == LanguageMode::kStrict)
+ ? FeedbackSlotKind::kStoreKeyedStrict
+ : FeedbackSlotKind::kStoreKeyedSloppy;
+ KeyedStoreIC ic(isolate, Handle<FeedbackVector>(), FeedbackSlot(), kind,
+ language_mode);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+}
+
+RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
+ Handle<Object> receiver = args.at(3);
+ Handle<Object> key = args.at(4);
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
+ DCHECK(receiver->IsJSArray());
+ DCHECK(key->IsNumber());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ StoreInArrayLiteralIC ic(isolate, vector, vector_slot);
+ ic.Store(Handle<JSArray>::cast(receiver), key, value);
+ return *value;
+}
+
+RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> object = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- FeedbackSlotKind kind = vector->GetKind(vector_slot);
- DCHECK(IsStoreICKind(kind) || IsKeyedStoreICKind(kind));
- LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
RETURN_RESULT_OR_FAILURE(
isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
@@ -2445,7 +2588,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
return *value;
} else {
DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind));
- LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
RETURN_RESULT_OR_FAILURE(
isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode,
@@ -2462,12 +2605,12 @@ static bool CanFastCloneObject(Handle<Map> map) {
return false;
}
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descriptors->GetDetails(i);
- Name* key = descriptors->GetKey(i);
+ Name key = descriptors->GetKey(i);
if (details.kind() != kData || !details.IsEnumerable() ||
- key->IsPrivateField()) {
+ key->IsPrivateName()) {
return false;
}
}
@@ -2489,7 +2632,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
initial_map->GetInObjectProperties()) {
int inobject_properties = source_map->GetInObjectProperties();
int instance_size =
- JSObject::kHeaderSize + kPointerSize * inobject_properties;
+ JSObject::kHeaderSize + kTaggedSize * inobject_properties;
int unused = source_map->UnusedInObjectProperties();
DCHECK(instance_size <= JSObject::kMaxInstanceSize);
map = Map::CopyInitialMap(isolate, map, instance_size, inobject_properties,
@@ -2519,7 +2662,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
isolate, source_descriptors, size, slack);
Handle<LayoutDescriptor> layout =
LayoutDescriptor::New(isolate, map, descriptors, size);
- map->InitializeDescriptors(*descriptors, *layout);
+ map->InitializeDescriptors(isolate, *descriptors, *layout);
map->CopyUnusedPropertyFieldsAdjustedForInstanceSize(*source_map);
// Update bitfields
@@ -2560,7 +2703,14 @@ RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
MigrateDeprecated(source);
FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
- Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
+ if (maybe_vector->IsUndefined()) {
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CloneObjectSlowPath(isolate, source, flags));
+ }
+
+ DCHECK(maybe_vector->IsFeedbackVector());
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
FeedbackNexus nexus(vector, slot);
Handle<Map> source_map(source->map(), isolate);
@@ -2578,15 +2728,6 @@ RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
return *result_map;
}
-RUNTIME_FUNCTION(Runtime_CloneObjectIC_Slow) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<HeapObject> source = args.at<HeapObject>(0);
- int flags = args.smi_at(1);
- RETURN_RESULT_OR_FAILURE(isolate,
- CloneObjectSlowPath(isolate, source, flags));
-}
-
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
@@ -2638,7 +2779,7 @@ RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
int handler_kind = args.smi_at(1);
Handle<CallHandlerInfo> call_handler_info = args.at<CallHandlerInfo>(2);
- Object* holder = *receiver;
+ Object holder = *receiver;
if (handler_kind == LoadHandler::kApiGetterHolderIsPrototype) {
holder = receiver->map()->prototype();
} else {
@@ -2647,7 +2788,7 @@ RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
// Call the accessor without additional arguments.
FunctionCallbackArguments custom(isolate, call_handler_info->data(),
- *receiver, holder, nullptr, nullptr, 0);
+ *receiver, holder, HeapObject(), nullptr, 0);
Handle<Object> result_handle = custom.Call(*call_handler_info);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
if (result_handle.is_null()) return ReadOnlyRoots(isolate).undefined_value();
@@ -2718,7 +2859,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<JSObject> receiver = args.at<JSObject>(3);
Handle<Name> name = args.at<Name>(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
+ LanguageMode language_mode = GetLanguageMode(vector, isolate->context());
// TODO(ishell): Cache interceptor_holder in the store handler like we do
// for LoadHandler::kInterceptor case.
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 05bde1ff61..9ed469410f 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -11,14 +11,16 @@
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
+enum class NamedPropertyType : bool { kNotOwn, kOwn };
+
//
// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
//
@@ -35,7 +37,8 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot);
+ IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
+ FeedbackSlotKind kind);
virtual ~IC() = default;
State state() const { return state_; }
@@ -59,15 +62,15 @@ class IC {
IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind());
}
- static inline bool IsHandler(MaybeObject* object);
+ static inline bool IsHandler(MaybeObject object);
// Nofity the IC system that a feedback has changed.
- static void OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
- FeedbackSlot slot, JSFunction* host_function,
+ static void OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
+ FeedbackSlot slot, JSFunction host_function,
const char* reason);
static void OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction* host_function, const char* reason);
+ JSFunction host_function, const char* reason);
protected:
Address fp() const { return fp_; }
@@ -78,18 +81,14 @@ class IC {
Isolate* isolate() const { return isolate_; }
// Get the caller function object.
- JSFunction* GetHostFunction() const;
+ JSFunction GetHostFunction() const;
inline bool AddressIsDeoptimizedCode() const;
inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
Address address);
bool is_vector_set() { return vector_set_; }
- bool vector_needs_update() {
- return (!vector_set_ &&
- (state() != MEGAMORPHIC ||
- Smi::ToInt(nexus()->GetFeedbackExtra()->cast<Smi>()) != ELEMENT));
- }
+ inline bool vector_needs_update();
// Configure for most states.
bool ConfigureVectorState(IC::State new_state, Handle<Object> key);
@@ -109,8 +108,8 @@ class IC {
void TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state);
- MaybeHandle<Object> TypeError(MessageTemplate::Template,
- Handle<Object> object, Handle<Object> key);
+ MaybeHandle<Object> TypeError(MessageTemplate, Handle<Object> object,
+ Handle<Object> key);
MaybeHandle<Object> ReferenceError(Handle<Name> name);
void TraceHandlerCacheHitStats(LookupIterator* lookup);
@@ -123,7 +122,7 @@ class IC {
StubCache* stub_cache();
void CopyICToMegamorphicCache(Handle<Name> name);
- bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+ bool IsTransitionOfMonomorphicTarget(Map source_map, Map target_map);
void PatchCache(Handle<Name> name, Handle<Object> handler);
void PatchCache(Handle<Name> name, const MaybeObjectHandle& handler);
FeedbackSlotKind kind() const { return kind_; }
@@ -151,9 +150,9 @@ class IC {
}
}
- Map* FirstTargetMap() {
+ Map FirstTargetMap() {
FindTargetMaps();
- return !target_maps_.empty() ? *target_maps_[0] : nullptr;
+ return !target_maps_.empty() ? *target_maps_[0] : Map();
}
State saved_state() const {
@@ -208,8 +207,9 @@ class IC {
class LoadIC : public IC {
public:
- LoadIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
- : IC(isolate, vector, slot) {
+ LoadIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
+ FeedbackSlotKind kind)
+ : IC(isolate, vector, slot, kind) {
DCHECK(IsAnyLoad());
}
@@ -243,8 +243,8 @@ class LoadIC : public IC {
class LoadGlobalIC : public LoadIC {
public:
LoadGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : LoadIC(isolate, vector, slot) {}
+ FeedbackSlot slot, FeedbackSlotKind kind)
+ : LoadIC(isolate, vector, slot, kind) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Name> name);
@@ -257,8 +257,8 @@ class LoadGlobalIC : public LoadIC {
class KeyedLoadIC : public LoadIC {
public:
KeyedLoadIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : LoadIC(isolate, vector, slot) {}
+ FeedbackSlot slot, FeedbackSlotKind kind)
+ : LoadIC(isolate, vector, slot, kind) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Object> key);
@@ -287,12 +287,13 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- StoreIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
- : IC(isolate, vector, slot) {
+ StoreIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
+ FeedbackSlotKind kind, LanguageMode language_mode)
+ : IC(isolate, vector, slot, kind), language_mode_(language_mode) {
DCHECK(IsAnyStore());
}
- LanguageMode language_mode() const { return nexus()->GetLanguageMode(); }
+ LanguageMode language_mode() const { return language_mode_; }
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -313,6 +314,11 @@ class StoreIC : public IC {
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
StoreOrigin store_origin);
+ // TODO(v8:8580): Instead of storing the language mode, compute it lazily
+ // from the closure and context when needed. We only need it when throwing
+ // exceptions, so it is OK to be slow.
+ LanguageMode language_mode_;
+
private:
MaybeObjectHandle ComputeHandler(LookupIterator* lookup);
@@ -322,8 +328,9 @@ class StoreIC : public IC {
class StoreGlobalIC : public StoreIC {
public:
StoreGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : StoreIC(isolate, vector, slot) {}
+ FeedbackSlot slot, FeedbackSlotKind kind,
+ LanguageMode language_mode)
+ : StoreIC(isolate, vector, slot, kind, language_mode) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
@@ -347,8 +354,9 @@ class KeyedStoreIC : public StoreIC {
}
KeyedStoreIC(Isolate* isolate, Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : StoreIC(isolate, vector, slot) {}
+ FeedbackSlot slot, FeedbackSlotKind kind,
+ LanguageMode language_mode)
+ : StoreIC(isolate, vector, slot, kind, language_mode) {}
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
@@ -381,7 +389,9 @@ class StoreInArrayLiteralIC : public KeyedStoreIC {
public:
StoreInArrayLiteralIC(Isolate* isolate, Handle<FeedbackVector> vector,
FeedbackSlot slot)
- : KeyedStoreIC(isolate, vector, slot) {
+ : KeyedStoreIC(isolate, vector, slot,
+ FeedbackSlotKind::kStoreInArrayLiteral,
+ LanguageMode::kStrict) {
DCHECK(IsStoreInArrayLiteralICKind(kind()));
}
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 2b2f15bb82..2a8bd37130 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -20,10 +20,13 @@ using Node = compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
+enum class StoreMode { kOrdinary, kInLiteral };
+
class KeyedStoreGenericAssembler : public AccessorAssembler {
public:
- explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
- : AccessorAssembler(state) {}
+ explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state,
+ StoreMode mode)
+ : AccessorAssembler(state), mode_(mode) {}
void KeyedStoreGeneric();
@@ -43,19 +46,9 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<Object> key, TNode<Object> value,
LanguageMode language_mode);
- // Set an own property
- void SetPropertyInLiteral(TNode<Context> context, TNode<JSObject> receiver,
- TNode<Map> map, TNode<Name> key,
- TNode<Object> value) {
- Label done(this);
- ExitPoint exit_point(this,
- [this, &done](Node* result) { this->Goto(&done); });
- EmitGenericPropertyStoreInLiteral(context, receiver, map, key, value,
- &exit_point);
- BIND(&done);
- }
-
private:
+ StoreMode mode_;
+
enum UpdateLength {
kDontChangeLength,
kIncrementLengthByOne,
@@ -90,12 +83,6 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Nothing<LanguageMode>());
}
- void EmitGenericPropertyStoreInLiteral(TNode<Context> context,
- TNode<JSObject> receiver,
- TNode<Map> map, TNode<Name> key,
- TNode<Object> value,
- ExitPoint* exit_point);
-
void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
Label* non_fast_elements,
Label* only_fast_elements);
@@ -133,16 +120,28 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<Map> FindCandidateStoreICTransitionMapHandler(TNode<Map> map,
TNode<Name> name,
Label* slow);
+
+ bool IsKeyedStore() const { return mode_ == StoreMode::kOrdinary; }
+ bool IsStoreInLiteral() const { return mode_ == StoreMode::kInLiteral; }
+
+ bool ShouldCheckPrototype() const { return IsKeyedStore(); }
+ bool ShouldReconfigureExisting() const { return IsStoreInLiteral(); }
+ bool ShouldCallSetter() const { return IsKeyedStore(); }
+ bool ShouldCheckPrototypeValidity() const {
+ // We don't do this for "in-literal" stores, because it is impossible for
+ // the target object to be a "prototype"
+ return !IsStoreInLiteral();
+ }
};
void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
assembler.KeyedStoreGeneric();
}
void StoreICUninitializedGenerator::Generate(
compiler::CodeAssemblerState* state) {
- KeyedStoreGenericAssembler assembler(state);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
assembler.StoreIC_Uninitialized();
}
@@ -150,7 +149,7 @@ void KeyedStoreGenericGenerator::SetProperty(
compiler::CodeAssemblerState* state, TNode<Context> context,
TNode<JSReceiver> receiver, TNode<BoolT> is_simple_receiver,
TNode<Name> name, TNode<Object> value, LanguageMode language_mode) {
- KeyedStoreGenericAssembler assembler(state);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
assembler.SetProperty(context, receiver, is_simple_receiver, name, value,
language_mode);
}
@@ -159,16 +158,15 @@ void KeyedStoreGenericGenerator::SetProperty(
compiler::CodeAssemblerState* state, TNode<Context> context,
TNode<Object> receiver, TNode<Object> key, TNode<Object> value,
LanguageMode language_mode) {
- KeyedStoreGenericAssembler assembler(state);
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kOrdinary);
assembler.SetProperty(context, receiver, key, value, language_mode);
}
void KeyedStoreGenericGenerator::SetPropertyInLiteral(
compiler::CodeAssemblerState* state, TNode<Context> context,
- TNode<JSObject> receiver, TNode<Name> key, TNode<Object> value) {
- KeyedStoreGenericAssembler assembler(state);
- TNode<Map> map = assembler.LoadMap(receiver);
- assembler.SetPropertyInLiteral(context, receiver, map, key, value);
+ TNode<JSObject> receiver, TNode<Object> key, TNode<Object> value) {
+ KeyedStoreGenericAssembler assembler(state, StoreMode::kInLiteral);
+ assembler.SetProperty(context, receiver, key, value, LanguageMode::kStrict);
}
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
@@ -314,8 +312,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// The length property is non-configurable, so it's guaranteed to always
// be the first property.
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
- TNode<Uint32T> details = LoadDetailsByKeyIndex(
- descriptors, IntPtrConstant(DescriptorArray::ToKeyIndex(0)));
+ TNode<Uint32T> details = LoadDetailsByDescriptorEntry(descriptors, 0);
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
slow);
}
@@ -631,7 +628,8 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
JumpIfDataProperty(details, &ok_to_write, readonly);
// Accessor case.
- // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+ // TODO(jkummerow): Implement a trimmed-down
+ // LoadAccessorFromFastObject.
VARIABLE(var_details, MachineRepresentation::kWord32);
LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
&var_details, var_accessor_pair);
@@ -647,11 +645,15 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
JumpIfDataProperty(details, &ok_to_write, readonly);
- // Accessor case.
- var_accessor_pair->Bind(
- LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
- var_accessor_holder->Bind(holder);
- Goto(accessor);
+ if (accessor != nullptr) {
+ // Accessor case.
+ var_accessor_pair->Bind(
+ LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
+ var_accessor_holder->Bind(holder);
+ Goto(accessor);
+ } else {
+ Goto(&ok_to_write);
+ }
}
BIND(&found_global);
@@ -667,10 +669,14 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
property_cell, PropertyCell::kDetailsOffset);
JumpIfDataProperty(details, &ok_to_write, readonly);
- // Accessor case.
- var_accessor_pair->Bind(value);
- var_accessor_holder->Bind(holder);
- Goto(accessor);
+ if (accessor != nullptr) {
+ // Accessor case.
+ var_accessor_pair->Bind(value);
+ var_accessor_holder->Bind(holder);
+ Goto(accessor);
+ } else {
+ Goto(&ok_to_write);
+ }
}
}
@@ -739,7 +745,7 @@ TNode<Map> KeyedStoreGenericAssembler::FindCandidateStoreICTransitionMapHandler(
STATIC_ASSERT(NONE == 0);
const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
TransitionArray::kEntryKeyIndex) *
- kPointerSize;
+ kTaggedSize;
var_transition_map = CAST(GetHeapObjectAssumeWeak(
LoadArrayElement(transitions, WeakFixedArray::kHeaderSize,
var_name_index.value(), kKeyToTargetOffset)));
@@ -758,8 +764,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
CSA_ASSERT(this, IsSimpleObjectMap(receiver_map));
VARIABLE(var_accessor_pair, MachineRepresentation::kTagged);
VARIABLE(var_accessor_holder, MachineRepresentation::kTagged);
- Label stub_cache(this), fast_properties(this), dictionary_properties(this),
- accessor(this), readonly(this);
+ Label fast_properties(this), dictionary_properties(this), accessor(this),
+ readonly(this);
Node* bitfield3 = LoadMapBitField3(receiver_map);
Branch(IsSetWord32<Map::IsDictionaryMapBit>(bitfield3),
&dictionary_properties, &fast_properties);
@@ -778,15 +784,21 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<IntPtrT> name_index = var_name_index.value();
Node* details = LoadDetailsByKeyIndex(descriptors, name_index);
Label data_property(this);
- JumpIfDataProperty(details, &data_property, &readonly);
+ JumpIfDataProperty(details, &data_property,
+ ShouldReconfigureExisting() ? nullptr : &readonly);
- // Accessor case.
- // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
- VARIABLE(var_details, MachineRepresentation::kWord32);
- LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
- name_index, &var_details, &var_accessor_pair);
- var_accessor_holder.Bind(receiver);
- Goto(&accessor);
+ if (ShouldCallSetter()) {
+ // Accessor case.
+ // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+ VARIABLE(var_details, MachineRepresentation::kWord32);
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ name_index, &var_details,
+ &var_accessor_pair);
+ var_accessor_holder.Bind(receiver);
+ Goto(&accessor);
+ } else {
+ Goto(&data_property);
+ }
BIND(&data_property);
{
@@ -804,10 +816,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
receiver_map, CAST(p->name), slow);
// Validate the transition handler candidate and apply the transition.
- HandleStoreICTransitionMapHandlerCase(
- p, transition_map, slow,
- StoreTransitionMapFlags(kCheckPrototypeValidity |
- kValidateTransitionHandler));
+ StoreTransitionMapFlags flags = kValidateTransitionHandler;
+ if (ShouldCheckPrototypeValidity()) {
+ flags = StoreTransitionMapFlags(flags | kCheckPrototypeValidity);
+ }
+ HandleStoreICTransitionMapHandlerCase(p, transition_map, slow, flags);
exit_point->Return(p->value);
}
}
@@ -829,13 +842,18 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label overwrite(this);
TNode<Uint32T> details = LoadDetailsByKeyIndex<NameDictionary>(
properties, var_name_index.value());
- JumpIfDataProperty(details, &overwrite, &readonly);
+ JumpIfDataProperty(details, &overwrite,
+ ShouldReconfigureExisting() ? nullptr : &readonly);
- // Accessor case.
- var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
- properties, var_name_index.value()));
- var_accessor_holder.Bind(receiver);
- Goto(&accessor);
+ if (ShouldCallSetter()) {
+ // Accessor case.
+ var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
+ properties, var_name_index.value()));
+ var_accessor_holder.Bind(receiver);
+ Goto(&accessor);
+ } else {
+ Goto(&overwrite);
+ }
BIND(&overwrite);
{
@@ -855,9 +873,13 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield2), &extensible, slow);
BIND(&extensible);
- LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
- &var_accessor_pair, &var_accessor_holder,
- &readonly, slow);
+ if (ShouldCheckPrototype()) {
+ DCHECK(ShouldCallSetter());
+ LookupPropertyOnPrototypeChain(
+ receiver_map, p->name, &accessor, &var_accessor_pair,
+ &var_accessor_holder,
+ ShouldReconfigureExisting() ? nullptr : &readonly, slow);
+ }
Label add_dictionary_property_slow(this);
InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
Add<NameDictionary>(properties, CAST(p->name), p->value,
@@ -870,23 +892,56 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
}
- BIND(&accessor);
- {
- Label not_callable(this);
- Node* accessor_pair = var_accessor_pair.value();
- GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
- CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
- Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
- Node* setter_map = LoadMap(setter);
- // FunctionTemplateInfo setters are not supported yet.
- GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
- GotoIfNot(IsCallableMap(setter_map), &not_callable);
-
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, p->context, setter, receiver, p->value);
- exit_point->Return(p->value);
-
- BIND(&not_callable);
+ if (ShouldCallSetter()) {
+ BIND(&accessor);
+ {
+ Label not_callable(this);
+ Node* accessor_pair = var_accessor_pair.value();
+ GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
+ CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
+ Node* setter =
+ LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+ Node* setter_map = LoadMap(setter);
+ // FunctionTemplateInfo setters are not supported yet.
+ GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
+ GotoIfNot(IsCallableMap(setter_map), &not_callable);
+
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, p->context, setter, receiver, p->value);
+ exit_point->Return(p->value);
+
+ BIND(&not_callable);
+ {
+ bool handle_strict = true;
+ Label strict(this);
+ LanguageMode language_mode;
+ if (maybe_language_mode.To(&language_mode)) {
+ if (language_mode == LanguageMode::kStrict) {
+ Goto(&strict);
+ } else {
+ handle_strict = false;
+ exit_point->Return(p->value);
+ }
+ } else {
+ BranchIfStrictMode(p->vector, p->slot, &strict);
+ exit_point->Return(p->value);
+ }
+
+ if (handle_strict) {
+ BIND(&strict);
+ {
+ exit_point->ReturnCallRuntime(
+ Runtime::kThrowTypeError, p->context,
+ SmiConstant(MessageTemplate::kNoSetterInCallback), p->name,
+ var_accessor_holder.value());
+ }
+ }
+ }
+ }
+ }
+
+ if (!ShouldReconfigureExisting()) {
+ BIND(&readonly);
{
bool handle_strict = true;
Label strict(this);
@@ -902,42 +957,16 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BranchIfStrictMode(p->vector, p->slot, &strict);
exit_point->Return(p->value);
}
-
if (handle_strict) {
BIND(&strict);
{
- ThrowTypeError(p->context, MessageTemplate::kNoSetterInCallback,
- p->name, var_accessor_holder.value());
+ Node* type = Typeof(p->receiver);
+ ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
+ p->name, type, p->receiver);
}
}
}
}
-
- BIND(&readonly);
- {
- bool handle_strict = true;
- Label strict(this);
- LanguageMode language_mode;
- if (maybe_language_mode.To(&language_mode)) {
- if (language_mode == LanguageMode::kStrict) {
- Goto(&strict);
- } else {
- handle_strict = false;
- exit_point->Return(p->value);
- }
- } else {
- BranchIfStrictMode(p->vector, p->slot, &strict);
- exit_point->Return(p->value);
- }
- if (handle_strict) {
- BIND(&strict);
- {
- Node* type = Typeof(p->receiver);
- ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
- p->name, type, p->receiver);
- }
- }
- }
}
// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
@@ -945,7 +974,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
TNode<Context> context, TNode<Object> receiver, TNode<Object> key,
TNode<Object> value, Maybe<LanguageMode> language_mode, TNode<Smi> slot,
TNode<FeedbackVector> vector) {
- TVARIABLE(WordT, var_index);
+ TVARIABLE(IntPtrT, var_index);
TVARIABLE(Object, var_unique, key);
Label if_index(this), if_unique_name(this), not_internalized(this),
slow(this);
@@ -989,19 +1018,25 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
BIND(&slow);
{
- Comment("KeyedStoreGeneric_slow");
- if (language_mode.IsJust()) {
- TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key, value,
- SmiConstant(language_mode.FromJust()));
+ if (IsKeyedStore()) {
+ Comment("KeyedStoreGeneric_slow");
+ if (language_mode.IsJust()) {
+ TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
+ value, SmiConstant(language_mode.FromJust()));
+ } else {
+ TVARIABLE(Smi, var_language_mode, SmiConstant(LanguageMode::kStrict));
+ Label call_runtime(this);
+ BranchIfStrictMode(vector, slot, &call_runtime);
+ var_language_mode = SmiConstant(LanguageMode::kSloppy);
+ Goto(&call_runtime);
+ BIND(&call_runtime);
+ TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key,
+ value, var_language_mode.value());
+ }
} else {
- TVARIABLE(Smi, var_language_mode, SmiConstant(LanguageMode::kStrict));
- Label call_runtime(this);
- BranchIfStrictMode(vector, slot, &call_runtime);
- var_language_mode = SmiConstant(LanguageMode::kSloppy);
- Goto(&call_runtime);
- BIND(&call_runtime);
- TailCallRuntime(Runtime::kSetKeyedProperty, context, receiver, key, value,
- var_language_mode.value());
+ DCHECK(IsStoreInLiteral());
+ TailCallRuntime(Runtime::kStoreDataPropertyInLiteral, context, receiver,
+ key, value);
}
}
}
@@ -1087,132 +1122,18 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
BIND(&slow);
{
- CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
- value, SmiConstant(language_mode));
+ if (IsStoreInLiteral()) {
+ CallRuntime(Runtime::kStoreDataPropertyInLiteral, context, receiver,
+ unique_name, value);
+ } else {
+ CallRuntime(Runtime::kSetKeyedProperty, context, receiver, unique_name,
+ value, SmiConstant(language_mode));
+ }
Goto(&done);
}
BIND(&done);
}
-// Sets data properties as in PropertyDefinitionEvaluation --- Does not invoke
-// own setters or traverse the prototype chain.
-void KeyedStoreGenericAssembler::EmitGenericPropertyStoreInLiteral(
- TNode<Context> context, TNode<JSObject> receiver, TNode<Map> map,
- TNode<Name> key, TNode<Object> value, ExitPoint* exit_point) {
- CSA_ASSERT(this, IsSimpleObjectMap(map));
-
- // This should only be used for storing data properties in object literals.
- CSA_ASSERT(this, HasInstanceType(receiver, JS_OBJECT_TYPE));
-
- Label stub_cache(this), fast_properties(this), dictionary_properties(this),
- accessor(this), call_runtime(this, Label::kDeferred), done(this);
- TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
- Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3),
- &dictionary_properties, &fast_properties);
-
- BIND(&fast_properties);
- {
- Comment("fast property store");
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
- Label descriptor_found(this), lookup_transition(this);
-
- TVARIABLE(IntPtrT, var_name_index);
- DescriptorLookup(key, descriptors, bit_field3, &descriptor_found,
- &var_name_index, &lookup_transition);
-
- BIND(&descriptor_found);
- {
- TNode<IntPtrT> name_index = var_name_index.value();
- TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, name_index);
- Label data_property(this);
- JumpIfDataProperty(details, &data_property, nullptr);
-
- // Reconfigure the accessor to a data property via runtime call.
- // TODO(caitp): reconfigure the property details inlinr here.
- Goto(&call_runtime);
-
- BIND(&data_property);
- {
- // TODO(caitp): consider only checking for names associated with
- // protectors that can apply to non-prototype JSObjects (currently, only
- // [Symbol.isConcatSpreadable]), and request this behaviour with an
- // enum parameter.
- CheckForAssociatedProtector(key, &call_runtime);
- OverwriteExistingFastDataProperty(receiver, map, descriptors,
- name_index, details, value,
- &call_runtime, false);
- exit_point->Return(value);
- }
- }
-
- BIND(&lookup_transition);
- {
- Comment("lookup transition");
- TNode<Map> transition_map =
- FindCandidateStoreICTransitionMapHandler(map, key, &call_runtime);
-
- // Validate the transition handler candidate and apply the transition.
- StoreICParameters p(context, receiver, key, value, nullptr, nullptr);
- HandleStoreICTransitionMapHandlerCase(&p, transition_map, &call_runtime,
- kValidateTransitionHandler);
- exit_point->Return(value);
- }
- }
-
- BIND(&dictionary_properties);
- {
- Comment("dictionary property store");
- TVARIABLE(IntPtrT, var_name_index);
- Label dictionary_found(this, &var_name_index), not_found(this);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
- NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
- &var_name_index, &not_found);
- BIND(&dictionary_found);
- {
- Label overwrite(this);
- TNode<Uint32T> details = LoadDetailsByKeyIndex<NameDictionary>(
- properties, var_name_index.value());
- JumpIfDataProperty(details, &overwrite, nullptr);
-
- // Reconfigure the accessor to a data property via runtime call.
- Goto(&call_runtime);
-
- BIND(&overwrite);
- {
- // See above TODO regarding non-pertinent checks
- CheckForAssociatedProtector(key, &call_runtime);
- StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- value);
- exit_point->Return(value);
- }
- }
-
- BIND(&not_found);
- {
- // See above TODO regarding non-pertinent checks
- CheckForAssociatedProtector(key, &call_runtime);
-
- // This method should always be invoked on a new JSObject literal ---
- // it should be impossible for the object to be made non-extensible, or to
- // be a prototype map/
- CSA_ASSERT(this, IsExtensibleNonPrototypeMap(map));
-
- Label add_dictionary_property_slow(this);
- Add<NameDictionary>(properties, key, value,
- &add_dictionary_property_slow);
- exit_point->Return(value);
-
- BIND(&add_dictionary_property_slow);
- exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty, context,
- receiver, key, value);
- }
- }
-
- BIND(&call_runtime);
- exit_point->ReturnCallRuntime(Runtime::kStoreDataPropertyInLiteral, context,
- receiver, key, value);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 9442a54935..9ab8db7864 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -33,7 +33,7 @@ class KeyedStoreGenericGenerator {
static void SetPropertyInLiteral(compiler::CodeAssemblerState* state,
TNode<Context> context,
- TNode<JSObject> receiver, TNode<Name> key,
+ TNode<JSObject> receiver, TNode<Object> key,
TNode<Object> value);
};
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 4958726a22..8567799f3f 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -16,7 +16,7 @@ namespace internal {
StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
// Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
// when the entry is not found is not considered as a handler.
- DCHECK(!IC::IsHandler(nullptr));
+ DCHECK(!IC::IsHandler(MaybeObject()));
}
void StubCache::Initialize() {
@@ -28,7 +28,7 @@ void StubCache::Initialize() {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
-int StubCache::PrimaryOffset(Name* name, Map* map) {
+int StubCache::PrimaryOffset(Name name, Map map) {
STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
DCHECK(name->HasHashCode());
@@ -36,8 +36,7 @@ int StubCache::PrimaryOffset(Name* name, Map* map) {
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ uint32_t map_low32bits = static_cast<uint32_t>(map.ptr());
// Base the offset on a simple combination of name and map.
uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
@@ -46,88 +45,95 @@ int StubCache::PrimaryOffset(Name* name, Map* map) {
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
-int StubCache::SecondaryOffset(Name* name, int seed) {
+int StubCache::SecondaryOffset(Name name, int seed) {
// Use the seed from the primary cache in the secondary cache.
- uint32_t name_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+ uint32_t name_low32bits = static_cast<uint32_t>(name.ptr());
uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
+int StubCache::PrimaryOffsetForTesting(Name name, Map map) {
+ return PrimaryOffset(name, map);
+}
+
+int StubCache::SecondaryOffsetForTesting(Name name, int seed) {
+ return SecondaryOffset(name, seed);
+}
+
#ifdef DEBUG
namespace {
-bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
- MaybeObject* handler) {
+bool CommonStubCacheChecks(StubCache* stub_cache, Name name, Map map,
+ MaybeObject handler) {
// Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!Heap::InNewSpace(name));
DCHECK(!Heap::InNewSpace(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
- if (handler) DCHECK(IC::IsHandler(handler));
+ if (handler->ptr() != kNullAddress) DCHECK(IC::IsHandler(handler));
return true;
}
} // namespace
#endif
-MaybeObject* StubCache::Set(Name* name, Map* map, MaybeObject* handler) {
+void StubCache::Set(Name name, Map map, MaybeObject handler) {
DCHECK(CommonStubCacheChecks(this, name, map, handler));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- MaybeObject* old_handler = primary->value;
+ MaybeObject old_handler(primary->value);
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
if (old_handler != MaybeObject::FromObject(
- isolate_->builtins()->builtin(Builtins::kIllegal))) {
- Map* old_map = primary->map;
- int seed = PrimaryOffset(primary->key, old_map);
- int secondary_offset = SecondaryOffset(primary->key, seed);
+ isolate_->builtins()->builtin(Builtins::kIllegal)) &&
+ primary->map != kNullAddress) {
+ Map old_map = Map::cast(Object(primary->map));
+ int seed = PrimaryOffset(Name::cast(Object(primary->key)), old_map);
+ int secondary_offset =
+ SecondaryOffset(Name::cast(Object(primary->key)), seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
// Update primary cache.
- primary->key = name;
- primary->value = handler;
- primary->map = map;
+ primary->key = name.ptr();
+ primary->value = handler.ptr();
+ primary->map = map.ptr();
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return handler;
}
-MaybeObject* StubCache::Get(Name* name, Map* map) {
- DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
+MaybeObject StubCache::Get(Name name, Map map) {
+ DCHECK(CommonStubCacheChecks(this, name, map, MaybeObject()));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- if (primary->key == name && primary->map == map) {
- return primary->value;
+ if (primary->key == name.ptr() && primary->map == map.ptr()) {
+ return MaybeObject(primary->value);
}
int secondary_offset = SecondaryOffset(name, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
- if (secondary->key == name && secondary->map == map) {
- return secondary->value;
+ if (secondary->key == name.ptr() && secondary->map == map.ptr()) {
+ return MaybeObject(secondary->value);
}
- return nullptr;
+ return MaybeObject();
}
-
void StubCache::Clear() {
- MaybeObject* empty = MaybeObject::FromObject(
+ MaybeObject empty = MaybeObject::FromObject(
isolate_->builtins()->builtin(Builtins::kIllegal));
- Name* empty_string = ReadOnlyRoots(isolate()).empty_string();
+ Name empty_string = ReadOnlyRoots(isolate()).empty_string();
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = empty_string;
- primary_[i].map = nullptr;
- primary_[i].value = empty;
+ primary_[i].key = empty_string.ptr();
+ primary_[i].map = kNullAddress;
+ primary_[i].value = empty.ptr();
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = empty_string;
- secondary_[j].map = nullptr;
- secondary_[j].value = empty;
+ secondary_[j].key = empty_string.ptr();
+ secondary_[j].map = kNullAddress;
+ secondary_[j].value = empty.ptr();
}
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 5cff496b15..0b6f9d43d1 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -5,7 +5,6 @@
#ifndef V8_IC_STUB_CACHE_H_
#define V8_IC_STUB_CACHE_H_
-#include "src/macro-assembler.h"
#include "src/objects/name.h"
namespace v8 {
@@ -33,15 +32,21 @@ class SCTableReference {
class StubCache {
public:
struct Entry {
- Name* key;
- MaybeObject* value;
- Map* map;
+ // The values here have plain Address types because they are read
+ // directly from generated code. As a nice side effect, this keeps
+ // #includes lightweight.
+ Address key;
+ // {value} is a tagged heap object reference (weak or strong), equivalent
+ // to a MaybeObject's payload.
+ Address value;
+ // {map} is a tagged Map pointer, or nullptr.
+ Address map;
};
void Initialize();
// Access cache for entry hash(name, map).
- MaybeObject* Set(Name* name, Map* map, MaybeObject* handler);
- MaybeObject* Get(Name* name, Map* map);
+ void Set(Name name, Map map, MaybeObject handler);
+ MaybeObject Get(Name name, Map map);
// Clear the lookup table (@ mark compact collection).
void Clear();
@@ -87,13 +92,8 @@ class StubCache {
// Some magic number used in the secondary hash computation.
static const int kSecondaryMagic = 0xb16ca6e5;
- static int PrimaryOffsetForTesting(Name* name, Map* map) {
- return PrimaryOffset(name, map);
- }
-
- static int SecondaryOffsetForTesting(Name* name, int seed) {
- return SecondaryOffset(name, seed);
- }
+ static int PrimaryOffsetForTesting(Name name, Map map);
+ static int SecondaryOffsetForTesting(Name name, int seed);
// The constructor is made public only for the purposes of testing.
explicit StubCache(Isolate* isolate);
@@ -109,12 +109,12 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int PrimaryOffset(Name* name, Map* map);
+ static int PrimaryOffset(Name name, Map map);
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int SecondaryOffset(Name* name, int seed);
+ static int SecondaryOffset(Name name, int seed);
// Compute the entry for a given offset in exactly the same way as
// we do in generated code. We generate an hash code that already
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index c6d74d2bd8..bd0377aff0 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -5,7 +5,8 @@
#include "src/identity-map.h"
#include "src/base/functional.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/roots-inl.h"
namespace v8 {
namespace internal {
@@ -22,7 +23,7 @@ IdentityMapBase::~IdentityMapBase() {
void IdentityMapBase::Clear() {
if (keys_) {
DCHECK(!is_iterable());
- heap_->UnregisterStrongRoots(keys_);
+ heap_->UnregisterStrongRoots(FullObjectSlot(keys_));
DeleteArray(keys_);
DeleteArray(values_);
keys_ = nullptr;
@@ -43,9 +44,9 @@ void IdentityMapBase::DisableIteration() {
is_iterable_ = false;
}
-int IdentityMapBase::ScanKeysFor(Object* address) const {
+int IdentityMapBase::ScanKeysFor(Address address) const {
int start = Hash(address) & mask_;
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int index = start; index < capacity_; index++) {
if (keys_[index] == address) return index; // Found.
if (keys_[index] == not_mapped) return -1; // Not found.
@@ -57,8 +58,8 @@ int IdentityMapBase::ScanKeysFor(Object* address) const {
return -1;
}
-int IdentityMapBase::InsertKey(Object* address) {
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+int IdentityMapBase::InsertKey(Address address) {
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
while (true) {
int start = Hash(address) & mask_;
int limit = capacity_ / 2;
@@ -80,7 +81,7 @@ int IdentityMapBase::InsertKey(Object* address) {
bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
if (deleted_value != nullptr) *deleted_value = values_[index];
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
DCHECK_NE(keys_[index], not_mapped);
keys_[index] = not_mapped;
values_[index] = nullptr;
@@ -97,7 +98,7 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
int next_index = index;
for (;;) {
next_index = (next_index + 1) & mask_;
- Object* key = keys_[next_index];
+ Address key = keys_[next_index];
if (key == not_mapped) break;
int expected_index = Hash(key) & mask_;
@@ -118,7 +119,7 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
return true;
}
-int IdentityMapBase::Lookup(Object* key) const {
+int IdentityMapBase::Lookup(Address key) const {
int index = ScanKeysFor(key);
if (index < 0 && gc_counter_ != heap_->gc_count()) {
// Miss; rehash if there was a GC, then lookup again.
@@ -128,7 +129,7 @@ int IdentityMapBase::Lookup(Object* key) const {
return index;
}
-int IdentityMapBase::LookupOrInsert(Object* key) {
+int IdentityMapBase::LookupOrInsert(Address key) {
// Perform an optimistic lookup.
int index = ScanKeysFor(key);
if (index < 0) {
@@ -140,17 +141,16 @@ int IdentityMapBase::LookupOrInsert(Object* key) {
return index;
}
-int IdentityMapBase::Hash(Object* address) const {
- CHECK_NE(address, ReadOnlyRoots(heap_).not_mapped_symbol());
- uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- return static_cast<int>(hasher_(raw_address));
+int IdentityMapBase::Hash(Address address) const {
+ CHECK_NE(address, ReadOnlyRoots(heap_).not_mapped_symbol().ptr());
+ return static_cast<int>(hasher_(address));
}
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
-IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
+IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Address key) {
CHECK(!is_iterable()); // Don't allow insertion while iterable.
if (capacity_ == 0) {
// Allocate the initial storage for keys and values.
@@ -158,13 +158,14 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
mask_ = kInitialIdentityMapSize - 1;
gc_counter_ = heap_->gc_count();
- keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ keys_ = reinterpret_cast<Address*>(NewPointerArray(capacity_));
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
memset(values_, 0, sizeof(void*) * capacity_);
- heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
+ heap_->RegisterStrongRoots(FullObjectSlot(keys_),
+ FullObjectSlot(keys_ + capacity_));
}
int index = LookupOrInsert(key);
return &values_[index];
@@ -174,7 +175,7 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) const {
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Address key) const {
// Don't allow find by key while iterable (might rehash).
CHECK(!is_iterable());
if (size_ == 0) return nullptr;
@@ -186,7 +187,7 @@ IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) const {
// Deletes the given key from the map using the object's address as the
// identity, returning true iff the key was found (in which case, the value
// argument will be set to the deleted entry's value).
-bool IdentityMapBase::DeleteEntry(Object* key, void** deleted_value) {
+bool IdentityMapBase::DeleteEntry(Address key, void** deleted_value) {
CHECK(!is_iterable()); // Don't allow deletion by key while iterable.
if (size_ == 0) return false;
int index = Lookup(key);
@@ -194,10 +195,10 @@ bool IdentityMapBase::DeleteEntry(Object* key, void** deleted_value) {
return DeleteIndex(index, deleted_value);
}
-Object* IdentityMapBase::KeyAtIndex(int index) const {
+Address IdentityMapBase::KeyAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
- DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol());
+ DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol().ptr());
CHECK(is_iterable()); // Must be iterable to access by index;
return keys_[index];
}
@@ -205,7 +206,7 @@ Object* IdentityMapBase::KeyAtIndex(int index) const {
IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
- DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol());
+ DCHECK_NE(keys_[index], ReadOnlyRoots(heap_).not_mapped_symbol().ptr());
CHECK(is_iterable()); // Must be iterable to access by index;
return &values_[index];
}
@@ -214,7 +215,7 @@ int IdentityMapBase::NextIndex(int index) const {
DCHECK_LE(-1, index);
DCHECK_LE(index, capacity_);
CHECK(is_iterable()); // Must be iterable to access by index;
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (++index; index < capacity_; ++index) {
if (keys_[index] != not_mapped) {
return index;
@@ -228,11 +229,11 @@ void IdentityMapBase::Rehash() {
// Record the current GC counter.
gc_counter_ = heap_->gc_count();
// Assume that most objects won't be moved.
- std::vector<std::pair<Object*, void*>> reinsert;
+ std::vector<std::pair<Address, void*>> reinsert;
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) {
if (keys_[i] == not_mapped) {
last_empty = i;
@@ -240,7 +241,7 @@ void IdentityMapBase::Rehash() {
int pos = Hash(keys_[i]) & mask_;
if (pos <= last_empty || pos > i) {
// Evacuate an entry that is in the wrong place.
- reinsert.push_back(std::pair<Object*, void*>(keys_[i], values_[i]));
+ reinsert.push_back(std::pair<Address, void*>(keys_[i], values_[i]));
keys_[i] = not_mapped;
values_[i] = nullptr;
last_empty = i;
@@ -261,7 +262,7 @@ void IdentityMapBase::Resize(int new_capacity) {
// Resize the internal storage and reinsert all the key/value pairs.
DCHECK_GT(new_capacity, size_);
int old_capacity = capacity_;
- Object** old_keys = keys_;
+ Address* old_keys = keys_;
void** old_values = values_;
capacity_ = new_capacity;
@@ -269,8 +270,8 @@ void IdentityMapBase::Resize(int new_capacity) {
gc_counter_ = heap_->gc_count();
size_ = 0;
- keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
- Object* not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol();
+ keys_ = reinterpret_cast<Address*>(NewPointerArray(capacity_));
+ Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
memset(values_, 0, sizeof(void*) * capacity_);
@@ -283,8 +284,9 @@ void IdentityMapBase::Resize(int new_capacity) {
}
// Unregister old keys and register new keys.
- heap_->UnregisterStrongRoots(old_keys);
- heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
+ heap_->UnregisterStrongRoots(FullObjectSlot(old_keys));
+ heap_->RegisterStrongRoots(FullObjectSlot(keys_),
+ FullObjectSlot(keys_ + capacity_));
// Delete old storage;
DeleteArray(old_keys);
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index 8598c6c1da..5c8f37df46 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -7,6 +7,7 @@
#include "src/base/functional.h"
#include "src/handles.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -41,12 +42,12 @@ class IdentityMapBase {
is_iterable_(false) {}
virtual ~IdentityMapBase();
- RawEntry GetEntry(Object* key);
- RawEntry FindEntry(Object* key) const;
- bool DeleteEntry(Object* key, void** deleted_value);
+ RawEntry GetEntry(Address key);
+ RawEntry FindEntry(Address key) const;
+ bool DeleteEntry(Address key, void** deleted_value);
void Clear();
- Object* KeyAtIndex(int index) const;
+ Address KeyAtIndex(int index) const;
V8_EXPORT_PRIVATE RawEntry EntryAtIndex(int index) const;
V8_EXPORT_PRIVATE int NextIndex(int index) const;
@@ -59,14 +60,14 @@ class IdentityMapBase {
private:
// Internal implementation should not be called directly by subclasses.
- int ScanKeysFor(Object* address) const;
- int InsertKey(Object* address);
- int Lookup(Object* key) const;
- int LookupOrInsert(Object* key);
+ int ScanKeysFor(Address address) const;
+ int InsertKey(Address address);
+ int Lookup(Address key) const;
+ int LookupOrInsert(Address key);
bool DeleteIndex(int index, void** deleted_value);
void Rehash();
void Resize(int new_capacity);
- int Hash(Object* address) const;
+ int Hash(Address address) const;
base::hash<uintptr_t> hasher_;
Heap* heap_;
@@ -74,7 +75,7 @@ class IdentityMapBase {
int size_;
int capacity_;
int mask_;
- Object** keys_;
+ Address* keys_;
void** values_;
bool is_iterable_;
@@ -100,25 +101,29 @@ class IdentityMap : public IdentityMapBase {
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
V* Get(Handle<Object> key) { return Get(*key); }
- V* Get(Object* key) { return reinterpret_cast<V*>(GetEntry(key)); }
+ V* Get(Object key) { return reinterpret_cast<V*>(GetEntry(key.ptr())); }
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
V* Find(Handle<Object> key) const { return Find(*key); }
- V* Find(Object* key) const { return reinterpret_cast<V*>(FindEntry(key)); }
+ V* Find(Object key) const {
+ return reinterpret_cast<V*>(FindEntry(key.ptr()));
+ }
// Set the value for the given key.
void Set(Handle<Object> key, V v) { Set(*key, v); }
- void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
+ void Set(Object key, V v) {
+ *(reinterpret_cast<V*>(GetEntry(key.ptr()))) = v;
+ }
bool Delete(Handle<Object> key, V* deleted_value) {
return Delete(*key, deleted_value);
}
- bool Delete(Object* key, V* deleted_value) {
+ bool Delete(Object key, V* deleted_value) {
void* v = nullptr;
- bool deleted_something = DeleteEntry(key, &v);
+ bool deleted_something = DeleteEntry(key.ptr(), &v);
if (deleted_value != nullptr && deleted_something) {
*deleted_value = *reinterpret_cast<V*>(&v);
}
@@ -137,7 +142,7 @@ class IdentityMap : public IdentityMapBase {
return *this;
}
- Object* key() const { return map_->KeyAtIndex(index_); }
+ Object key() const { return Object(map_->KeyAtIndex(index_)); }
V* entry() const {
return reinterpret_cast<V*>(map_->EntryAtIndex(index_));
}
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 699b1bcbd4..10d476d9ee 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -30,9 +30,9 @@ _protocol_generated = [
action("protocol_compatibility") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- script = "$_inspector_protocol/CheckProtocolCompatibility.py"
+ script = "$_inspector_protocol/check_protocol_compatibility.py"
inputs = [
- "js_protocol.json",
+ "js_protocol.pdl",
]
_stamp = "$target_gen_dir/js_protocol.stamp"
outputs = [
@@ -41,7 +41,7 @@ action("protocol_compatibility") {
args = [
"--stamp",
rebase_path(_stamp, root_build_dir),
- rebase_path("js_protocol.json", root_build_dir),
+ rebase_path("js_protocol.pdl", root_build_dir),
]
}
@@ -55,28 +55,12 @@ inspector_protocol_generate("protocol_generated_sources") {
out_dir = target_gen_dir
config_file = "inspector_protocol_config.json"
inputs = [
- "js_protocol.json",
+ "js_protocol.pdl",
"inspector_protocol_config.json",
]
outputs = _protocol_generated
}
-action("inspector_injected_script") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
- script = "build/xxd.py"
- inputs = [
- "injected-script-source.js",
- ]
- outputs = [
- "$target_gen_dir/injected-script-source.h",
- ]
- args = [
- "InjectedScriptSource_js",
- rebase_path("injected-script-source.js", root_build_dir),
- rebase_path("$target_gen_dir/injected-script-source.h", root_build_dir),
- ]
-}
-
config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
if (is_component_build) {
@@ -86,7 +70,6 @@ config("inspector_config") {
v8_source_set("inspector") {
deps = [
- ":inspector_injected_script",
":protocol_generated_sources",
]
configs = [ ":inspector_config" ]
@@ -101,8 +84,9 @@ v8_source_set("inspector") {
"../../include/v8-inspector-protocol.h",
"../../include/v8-inspector.h",
]
- sources += get_target_outputs(":inspector_injected_script")
sources += [
+ "custom-preview.cc",
+ "custom-preview.h",
"injected-script.cc",
"injected-script.h",
"inspected-context.cc",
@@ -129,18 +113,12 @@ v8_source_set("inspector") {
"v8-debugger-script.h",
"v8-debugger.cc",
"v8-debugger.h",
- "v8-function-call.cc",
- "v8-function-call.h",
"v8-heap-profiler-agent-impl.cc",
"v8-heap-profiler-agent-impl.h",
- "v8-injected-script-host.cc",
- "v8-injected-script-host.h",
"v8-inspector-impl.cc",
"v8-inspector-impl.h",
"v8-inspector-session-impl.cc",
"v8-inspector-session-impl.h",
- "v8-internal-value-type.cc",
- "v8-internal-value-type.h",
"v8-profiler-agent-impl.cc",
"v8-profiler-agent-impl.h",
"v8-regex.cc",
@@ -153,7 +131,18 @@ v8_source_set("inspector") {
"v8-stack-trace-impl.h",
"v8-value-utils.cc",
"v8-value-utils.h",
+ "value-mirror.cc",
+ "value-mirror.h",
"wasm-translation.cc",
"wasm-translation.h",
]
}
+
+#Target to generate all .cc files.
+group("v8_generated_cc_files") {
+ testonly = true
+
+ deps = [
+ ":protocol_generated_sources",
+ ]
+}
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 8624d47bf4..19a30512ce 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -10,7 +10,6 @@ include_rules = [
"+src/conversions.h",
"+src/flags.h",
"+src/v8memory.h",
- "+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
"+src/debug/debug-interface.h",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 3cfeff35c4..848dee532a 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -16,4 +16,7 @@ per-file js_protocol.pdl=set noparent
per-file js_protocol.pdl=dgozman@chromium.org
per-file js_protocol.pdl=pfeldman@chromium.org
+per-file PRESUBMIT.py=machenbach@chromium.org
+per-file PRESUBMIT.py=sergiyb@chromium.org
+
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/PRESUBMIT.py b/deps/v8/src/inspector/PRESUBMIT.py
deleted file mode 100644
index 8b7a5cb320..0000000000
--- a/deps/v8/src/inspector/PRESUBMIT.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""v8_inspect presubmit script
-
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into gcl.
-"""
-
-compile_note = "Be sure to run your patch by the compile-scripts.py script prior to committing!"
-
-
-def _CompileScripts(input_api, output_api):
- local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
-
- compilation_related_files = [
- "js_protocol.json"
- "compile-scripts.js",
- "injected-script-source.js",
- "injected_script_externs.js",
- "check_injected_script_source.js"
- ]
-
- for file in compilation_related_files:
- if (any(file in path for path in local_paths)):
- script_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
- "build", "compile-scripts.py")
- proc = input_api.subprocess.Popen(
- [input_api.python_executable, script_path],
- stdout=input_api.subprocess.PIPE,
- stderr=input_api.subprocess.STDOUT)
- out, _ = proc.communicate()
- if "ERROR" in out or "WARNING" in out or proc.returncode:
- return [output_api.PresubmitError(out)]
- if "NOTE" in out:
- return [output_api.PresubmitPromptWarning(out + compile_note)]
- return []
- return []
-
-
-def CheckChangeOnUpload(input_api, output_api):
- results = []
- results.extend(_CompileScripts(input_api, output_api))
- return results
-
-
-def CheckChangeOnCommit(input_api, output_api):
- results = []
- results.extend(_CompileScripts(input_api, output_api))
- return results
-
-def PostUploadHook(cl, change, output_api):
- """git cl upload will call this hook after the issue is created/modified.
-
- This hook adds extra try bots to the CL description in order to run layout
- tests in addition to CQ try bots.
- """
- return output_api.EnsureCQIncludeTrybotsAreAdded(
- cl,
- [
- 'master.tryserver.blink:linux_trusty_blink_rel',
- 'luci.chromium.try:linux_chromium_headless_rel',
- ],
- 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/inspector/build/check_injected_script_source.py b/deps/v8/src/inspector/build/check_injected_script_source.py
deleted file mode 100644
index 0f2509cd8c..0000000000
--- a/deps/v8/src/inspector/build/check_injected_script_source.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2014 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Copied from blink:
-# WebKit/Source/devtools/scripts/check_injected_script_source.py
-#
-
-import re
-import sys
-import os
-
-
-def validate_injected_script(fileName):
- f = open(fileName, "r")
- lines = f.readlines()
- f.close()
-
- proto_functions = "|".join([
- # Array.prototype.*
- "concat", "every", "filter", "forEach", "indexOf", "join", "lastIndexOf", "map", "pop",
- "push", "reduce", "reduceRight", "reverse", "shift", "slice", "some", "sort", "splice", "toLocaleString", "toString", "unshift",
- # Function.prototype.*
- "apply", "bind", "call", "isGenerator", "toSource",
- # Object.prototype.*
- "toString",
- ])
-
- global_functions = "|".join([
- "eval", "uneval", "isFinite", "isNaN", "parseFloat", "parseInt", "decodeURI", "decodeURIComponent",
- "encodeURI", "encodeURIComponent", "escape", "unescape", "Map", "Set"
- ])
-
- # Black list:
- # - instanceof, since e.g. "obj instanceof Error" may throw if Error is overridden and is not a function
- # - Object.prototype.toString()
- # - Array.prototype.*
- # - Function.prototype.*
- # - Math.*
- # - Global functions
- black_list_call_regex = re.compile(r"\sinstanceof\s+\w*|\bMath\.\w+\(|(?<!InjectedScriptHost)\.(" + proto_functions + r")\(|[^\.]\b(" + global_functions + r")\(")
-
- errors_found = False
- for i, line in enumerate(lines):
- if line.find("suppressBlacklist") != -1:
- continue
- for match in re.finditer(black_list_call_regex, line):
- errors_found = True
- print "ERROR: Black listed expression in %s at line %02d column %02d: %s" % (os.path.basename(fileName), i + 1, match.start(), match.group(0))
-
- if not errors_found:
- print "OK"
-
-
-def main(argv):
- if len(argv) < 2:
- print('ERROR: Usage: %s path/to/injected-script-source.js' % argv[0])
- return 1
-
- validate_injected_script(argv[1])
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1 b/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1
deleted file mode 100644
index 5366f51b21..0000000000
--- a/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1
+++ /dev/null
@@ -1 +0,0 @@
-69937d3c239ca63e4c9045718886ddd096ffc054 \ No newline at end of file
diff --git a/deps/v8/src/inspector/build/compile-scripts.py b/deps/v8/src/inspector/build/compile-scripts.py
deleted file mode 100755
index afcbce722d..0000000000
--- a/deps/v8/src/inspector/build/compile-scripts.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import os.path as path
-import generate_protocol_externs
-import re
-import subprocess
-import sys
-
-if len(sys.argv) == 2 and sys.argv[1] == '--help':
- print("Usage: %s" % path.basename(sys.argv[0]))
- sys.exit(0)
-
-java_required_major = 1
-java_required_minor = 7
-
-v8_inspector_path = path.dirname(path.dirname(path.abspath(__file__)))
-
-protocol_externs_file = path.join(v8_inspector_path, 'protocol_externs.js')
-injected_script_source_name = path.join(v8_inspector_path,
- 'injected-script-source.js')
-injected_script_externs_file = path.join(v8_inspector_path,
- 'injected_script_externs.js')
-
-generate_protocol_externs.generate_protocol_externs(protocol_externs_file,
- path.join(v8_inspector_path, 'js_protocol.json'))
-
-error_warning_regex = re.compile(r'WARNING|ERROR')
-
-closure_compiler_jar = path.join(v8_inspector_path, 'build',
- 'closure-compiler', 'closure-compiler.jar')
-
-common_closure_args = [
- '--checks_only',
- '--warning_level', 'VERBOSE'
-]
-
-# Error reporting and checking.
-errors_found = False
-
-def popen(arguments):
- return subprocess.Popen(arguments, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
-def error_excepthook(exctype, value, traceback):
- print 'ERROR:'
- sys.__excepthook__(exctype, value, traceback)
-sys.excepthook = error_excepthook
-
-def has_errors(output):
- return re.search(error_warning_regex, output) != None
-
-# Find java. Based on
-# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python.
-def which(program):
- def is_exe(fpath):
- return path.isfile(fpath) and os.access(fpath, os.X_OK)
-
- fpath, fname = path.split(program)
- if fpath:
- if is_exe(program):
- return program
- else:
- for part in os.environ['PATH'].split(os.pathsep):
- part = part.strip('"')
- exe_file = path.join(part, program)
- if is_exe(exe_file):
- return exe_file
- return None
-
-def find_java():
- exec_command = None
- has_server_jvm = True
- java_path = which('java')
- if not java_path:
- java_path = which('java.exe')
-
- if not java_path:
- print 'NOTE: No Java executable found in $PATH.'
- sys.exit(0)
-
- is_ok = False
- java_version_out, _ = popen([java_path, '-version']).communicate()
- java_build_regex = re.compile(r'^\w+ version "(\d+)\.(\d+)')
- # pylint: disable=E1103
- match = re.search(java_build_regex, java_version_out)
- if match:
- major = int(match.group(1))
- minor = int(match.group(2))
- is_ok = major >= java_required_major and minor >= java_required_minor
- if is_ok:
- exec_command = [java_path, '-Xms1024m', '-server',
- '-XX:+TieredCompilation']
- check_server_proc = popen(exec_command + ['-version'])
- check_server_proc.communicate()
- if check_server_proc.returncode != 0:
- # Not all Java installs have server JVMs.
- exec_command = exec_command.remove('-server')
- has_server_jvm = False
-
- if not is_ok:
- print 'NOTE: Java executable version %d.%d or above not found in $PATH.' % (java_required_major, java_required_minor)
- sys.exit(0)
- print 'Java executable: %s%s' % (java_path, '' if has_server_jvm else ' (no server JVM)')
- return exec_command
-
-java_exec = find_java()
-
-spawned_compiler_command = java_exec + [
- '-jar',
- closure_compiler_jar
-] + common_closure_args
-
-print 'Compiling injected-script-source.js...'
-
-command = spawned_compiler_command + [
- '--externs', injected_script_externs_file,
- '--externs', protocol_externs_file,
- '--js', injected_script_source_name
-]
-
-injected_script_compile_proc = popen(command)
-
-print 'Validating injected-script-source.js...'
-injectedscript_check_script_path = path.join(v8_inspector_path, 'build',
- 'check_injected_script_source.py')
-validate_injected_script_proc = popen([sys.executable,
- injectedscript_check_script_path, injected_script_source_name])
-
-print
-
-(injected_script_compile_out, _) = injected_script_compile_proc.communicate()
-print 'injected-script-source.js compilation output:%s' % os.linesep
-print injected_script_compile_out
-errors_found |= has_errors(injected_script_compile_out)
-
-(validate_injected_script_out, _) = validate_injected_script_proc.communicate()
-print 'Validate injected-script-source.js output:%s' % os.linesep
-print validate_injected_script_out if validate_injected_script_out else '<empty>'
-errors_found |= has_errors(validate_injected_script_out)
-
-os.remove(protocol_externs_file)
-
-if errors_found:
- print 'ERRORS DETECTED'
- sys.exit(1)
diff --git a/deps/v8/src/inspector/build/generate_protocol_externs.py b/deps/v8/src/inspector/build/generate_protocol_externs.py
deleted file mode 100755
index c2ba2c5b84..0000000000
--- a/deps/v8/src/inspector/build/generate_protocol_externs.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-import json
-
-type_traits = {
- "any": "*",
- "string": "string",
- "integer": "number",
- "number": "number",
- "boolean": "boolean",
- "array": "!Array.<*>",
- "object": "!Object",
-}
-
-promisified_domains = {
- "Accessibility",
- "Animation",
- "CSS",
- "Emulation",
- "Profiler"
-}
-
-ref_types = {}
-
-def full_qualified_type_id(domain_name, type_id):
- if type_id.find(".") == -1:
- return "%s.%s" % (domain_name, type_id)
- return type_id
-
-
-def fix_camel_case(name):
- prefix = ""
- if name[0] == "-":
- prefix = "Negative"
- name = name[1:]
- refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
- refined = to_title_case(refined)
- return prefix + re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
-
-
-def to_title_case(name):
- return name[:1].upper() + name[1:]
-
-
-def generate_enum(name, json):
- enum_members = []
- for member in json["enum"]:
- enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
- return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
-
-
-def param_type(domain_name, param):
- if "type" in param:
- if param["type"] == "array":
- items = param["items"]
- return "!Array.<%s>" % param_type(domain_name, items)
- else:
- return type_traits[param["type"]]
- if "$ref" in param:
- type_id = full_qualified_type_id(domain_name, param["$ref"])
- if type_id in ref_types:
- return ref_types[type_id]
- else:
- print "Type not found: " + type_id
- return "!! Type not found: " + type_id
-
-
-def load_schema(file, domains):
- input_file = open(file, "r")
- json_string = input_file.read()
- parsed_json = json.loads(json_string)
- domains.extend(parsed_json["domains"])
-
-
-def generate_protocol_externs(output_path, file1):
- domains = []
- load_schema(file1, domains)
- output_file = open(output_path, "w")
-
- output_file.write(
-"""
-var InspectorBackend = {}
-
-var Protocol = {};
-/** @typedef {string}*/
-Protocol.Error;
-""")
-
- for domain in domains:
- domain_name = domain["domain"]
- if "types" in domain:
- for type in domain["types"]:
- type_id = full_qualified_type_id(domain_name, type["id"])
- ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
-
- for domain in domains:
- domain_name = domain["domain"]
- promisified = domain_name in promisified_domains
-
- output_file.write("\n\n/**\n * @constructor\n*/\n")
- output_file.write("Protocol.%sAgent = function(){};\n" % domain_name)
-
- if "commands" in domain:
- for command in domain["commands"]:
- output_file.write("\n/**\n")
- params = []
- has_return_value = "returns" in command
- explicit_parameters = promisified and has_return_value
- if ("parameters" in command):
- for in_param in command["parameters"]:
- # All parameters are not optional in case of promisified domain with return value.
- if (not explicit_parameters and "optional" in in_param):
- params.append("opt_%s" % in_param["name"])
- output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
- else:
- params.append(in_param["name"])
- output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
- returns = []
- returns.append("?Protocol.Error")
- if ("error" in command):
- returns.append("%s=" % param_type(domain_name, command["error"]))
- if (has_return_value):
- for out_param in command["returns"]:
- if ("optional" in out_param):
- returns.append("%s=" % param_type(domain_name, out_param))
- else:
- returns.append("%s" % param_type(domain_name, out_param))
- callback_return_type = "void="
- if explicit_parameters:
- callback_return_type = "T"
- elif promisified:
- callback_return_type = "T="
- output_file.write(" * @param {function(%s):%s} opt_callback\n" % (", ".join(returns), callback_return_type))
- if (promisified):
- output_file.write(" * @return {!Promise.<T>}\n")
- output_file.write(" * @template T\n")
- params.append("opt_callback")
-
- output_file.write(" */\n")
- output_file.write("Protocol.%sAgent.prototype.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
- output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
- output_file.write("Protocol.%sAgent.prototype.invoke_%s = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
-
- output_file.write("\n\n\nvar %sAgent = function(){};\n" % domain_name)
-
- if "types" in domain:
- for type in domain["types"]:
- if type["type"] == "object":
- typedef_args = []
- if "properties" in type:
- for property in type["properties"]:
- suffix = ""
- if ("optional" in property):
- suffix = "|undefined"
- if "enum" in property:
- enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
- output_file.write(generate_enum(enum_name, property))
- typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
- else:
- typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
- if (typedef_args):
- output_file.write("\n/** @typedef {!{%s}} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
- else:
- output_file.write("\n/** @typedef {!Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
- elif type["type"] == "string" and "enum" in type:
- output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
- elif type["type"] == "array":
- output_file.write("\n/** @typedef {!Array.<!%s>} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), domain_name, type["id"]))
- else:
- output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
-
- output_file.write("/** @interface */\n")
- output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
- if "events" in domain:
- for event in domain["events"]:
- params = []
- if ("parameters" in event):
- output_file.write("/**\n")
- for param in event["parameters"]:
- if ("optional" in param):
- params.append("opt_%s" % param["name"])
- output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
- else:
- params.append(param["name"])
- output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
- output_file.write(" */\n")
- output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
-
- output_file.write("\n/** @constructor\n * @param {!Object.<string, !Object>} agentsMap\n */\n")
- output_file.write("Protocol.Agents = function(agentsMap){this._agentsMap;};\n")
- output_file.write("/**\n * @param {string} domain\n * @param {!Object} dispatcher\n */\n")
- output_file.write("Protocol.Agents.prototype.registerDispatcher = function(domain, dispatcher){};\n")
- for domain in domains:
- domain_name = domain["domain"]
- uppercase_length = 0
- while uppercase_length < len(domain_name) and domain_name[uppercase_length].isupper():
- uppercase_length += 1
-
- output_file.write("/** @return {!Protocol.%sAgent}*/\n" % domain_name)
- output_file.write("Protocol.Agents.prototype.%s = function(){};\n" % (domain_name[:uppercase_length].lower() + domain_name[uppercase_length:] + "Agent"))
-
- output_file.write("/**\n * @param {!%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
- output_file.write("Protocol.Agents.prototype.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
-
-
- output_file.close()
-
-if __name__ == "__main__":
- import sys
- import os.path
- program_name = os.path.basename(__file__)
- if len(sys.argv) < 4 or sys.argv[1] != "-o":
- sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
- exit(1)
- output_path = sys.argv[2]
- input_path = sys.argv[3]
- generate_protocol_externs(output_path, input_path)
diff --git a/deps/v8/src/inspector/build/rjsmin.py b/deps/v8/src/inspector/build/rjsmin.py
deleted file mode 100755
index 8357a6dcc1..0000000000
--- a/deps/v8/src/inspector/build/rjsmin.py
+++ /dev/null
@@ -1,295 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 - 2013
-# Andr\xe9 Malo or his licensors, as applicable
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-r"""
-=====================
- Javascript Minifier
-=====================
-
-rJSmin is a javascript minifier written in python.
-
-The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\.
-
-The module is a re-implementation aiming for speed, so it can be used at
-runtime (rather than during a preprocessing step). Usually it produces the
-same results as the original ``jsmin.c``. It differs in the following ways:
-
-- there is no error detection: unterminated string, regex and comment
- literals are treated as regular javascript code and minified as such.
-- Control characters inside string and regex literals are left untouched; they
- are not converted to spaces (nor to \n)
-- Newline characters are not allowed inside string and regex literals, except
- for line continuations in string literals (ECMA-5).
-- "return /regex/" is recognized correctly.
-- "+ +" and "- -" sequences are not collapsed to '++' or '--'
-- Newlines before ! operators are removed more sensibly
-- rJSmin does not handle streams, but only complete strings. (However, the
- module provides a "streamy" interface).
-
-Since most parts of the logic are handled by the regex engine it's way
-faster than the original python port of ``jsmin.c`` by Baruch Even. The speed
-factor varies between about 6 and 55 depending on input and python version
-(it gets faster the more compressed the input already is). Compared to the
-speed-refactored python port by Dave St.Germain the performance gain is less
-dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for
-details.
-
-rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
-
-Both python 2 and python 3 are supported.
-
-.. _jsmin.c by Douglas Crockford:
- http://www.crockford.com/javascript/jsmin.c
-"""
-__author__ = "Andr\xe9 Malo"
-__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
-__docformat__ = "restructuredtext en"
-__license__ = "Apache License, Version 2.0"
-__version__ = '1.0.7'
-__all__ = ['jsmin']
-
-import re as _re
-
-
-def _make_jsmin(python_only=False):
- """
- Generate JS minifier based on `jsmin.c by Douglas Crockford`_
-
- .. _jsmin.c by Douglas Crockford:
- http://www.crockford.com/javascript/jsmin.c
-
- :Parameters:
- `python_only` : ``bool``
- Use only the python variant. If true, the c extension is not even
- tried to be loaded.
-
- :Return: Minifier
- :Rtype: ``callable``
- """
- # pylint: disable = R0912, R0914, W0612
- if not python_only:
- try:
- import _rjsmin
- except ImportError:
- pass
- else:
- return _rjsmin.jsmin
- try:
- xrange
- except NameError:
- xrange = range # pylint: disable = W0622
-
- space_chars = r'[\000-\011\013\014\016-\040]'
-
- line_comment = r'(?://[^\r\n]*)'
- space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
- string1 = \
- r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
- string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
- strings = r'(?:%s|%s)' % (string1, string2)
-
- charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
- nospecial = r'[^/\\\[\r\n]'
- regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
- nospecial, charclass, nospecial)
- space = r'(?:%s|%s)' % (space_chars, space_comment)
- newline = r'(?:%s?[\r\n])' % line_comment
-
- def fix_charclass(result):
- """ Fixup string of chars to fit into a regex char class """
- pos = result.find('-')
- if pos >= 0:
- result = r'%s%s-' % (result[:pos], result[pos + 1:])
-
- def sequentize(string):
- """
- Notate consecutive characters as sequence
-
- (1-4 instead of 1234)
- """
- first, last, result = None, None, []
- for char in map(ord, string):
- if last is None:
- first = last = char
- elif last + 1 == char:
- last = char
- else:
- result.append((first, last))
- first = last = char
- if last is not None:
- result.append((first, last))
- return ''.join(['%s%s%s' % (
- chr(first),
- last > first + 1 and '-' or '',
- last != first and chr(last) or '') for first, last in result])
-
- return _re.sub(r'([\000-\040\047])', # for better portability
- lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
- .replace('\\', '\\\\')
- .replace('[', '\\[')
- .replace(']', '\\]')))
-
- def id_literal_(what):
- """ Make id_literal like char class """
- match = _re.compile(what).match
- result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
- return '[^%s]' % fix_charclass(result)
-
- def not_id_literal_(keep):
- """ Make negated id_literal like char class """
- match = _re.compile(id_literal_(keep)).match
- result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
- return r'[%s]' % fix_charclass(result)
-
- not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
- preregex1 = r'[(,=:\[!&|?{};\r\n]'
- preregex2 = r'%(not_id_literal)sreturn' % locals()
-
- id_literal = id_literal_(r'[a-zA-Z0-9_$]')
- id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
- id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
-
- dull = r'[^\047"/\000-\040]'
-
- space_sub = _re.compile((
- r'(%(dull)s+)'
- r'|(%(strings)s%(dull)s*)'
- r'|(?<=%(preregex1)s)'
- r'%(space)s*(?:%(newline)s%(space)s*)*'
- r'(%(regex)s%(dull)s*)'
- r'|(?<=%(preregex2)s)'
- r'%(space)s*(?:%(newline)s%(space)s)*'
- r'(%(regex)s%(dull)s*)'
- r'|(?<=%(id_literal_close)s)'
- r'%(space)s*(?:(%(newline)s)%(space)s*)+'
- r'(?=%(id_literal_open)s)'
- r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
- r'|(?<=\+)(%(space)s)+(?=\+)'
- r'|(?<=-)(%(space)s)+(?=-)'
- r'|%(space)s+'
- r'|(?:%(newline)s%(space)s*)+') % locals()).sub
- #print space_sub.__self__.pattern
-
- def space_subber(match):
- """ Substitution callback """
- # pylint: disable = C0321, R0911
- groups = match.groups()
- if groups[0]:
- return groups[0]
- elif groups[1]:
- return groups[1]
- elif groups[2]:
- return groups[2]
- elif groups[3]:
- return groups[3]
- elif groups[4]:
- return '\n'
- elif groups[5] or groups[6] or groups[7]:
- return ' '
- else:
- return ''
-
- def jsmin(script): # pylint: disable = W0621
- r"""
- Minify javascript based on `jsmin.c by Douglas Crockford`_\.
-
- Instead of parsing the stream char by char, it uses a regular
- expression approach which minifies the whole script with one big
- substitution regex.
-
- .. _jsmin.c by Douglas Crockford:
- http://www.crockford.com/javascript/jsmin.c
-
- :Parameters:
- `script` : ``str``
- Script to minify
-
- :Return: Minified script
- :Rtype: ``str``
- """
- return space_sub(space_subber, '\n%s\n' % script).strip()
-
- return jsmin
-
-jsmin = _make_jsmin()
-
-
-def jsmin_for_posers(script):
- r"""
- Minify javascript based on `jsmin.c by Douglas Crockford`_\.
-
- Instead of parsing the stream char by char, it uses a regular
- expression approach which minifies the whole script with one big
- substitution regex.
-
- .. _jsmin.c by Douglas Crockford:
- http://www.crockford.com/javascript/jsmin.c
-
- :Warning: This function is the digest of a _make_jsmin() call. It just
- utilizes the resulting regex. It's just for fun here and may
- vanish any time. Use the `jsmin` function instead.
-
- :Parameters:
- `script` : ``str``
- Script to minify
-
- :Return: Minified script
- :Rtype: ``str``
- """
- def subber(match):
- """ Substitution callback """
- groups = match.groups()
- return (
- groups[0] or
- groups[1] or
- groups[2] or
- groups[3] or
- (groups[4] and '\n') or
- (groups[5] and ' ') or
- (groups[6] and ' ') or
- (groups[7] and ' ') or
- '')
-
- return _re.sub(
- r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
- r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
- r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r\n])(?'
- r':[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*'
- r'(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*'
- r'[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:('
- r'?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\['
- r'\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return'
- r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
- r'))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:'
- r'/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?'
- r':(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/'
- r'\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|'
- r'~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)'
- r'*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]'
- r'|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,./'
- r':-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\013\01'
- r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:'
- r'-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*'
- r'\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-'
- r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013'
- r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^'
- r'\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^'
- r'/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script).strip()
-
-
-if __name__ == '__main__':
- import sys as _sys
- _sys.stdout.write(jsmin(_sys.stdin.read()))
diff --git a/deps/v8/src/inspector/build/xxd.py b/deps/v8/src/inspector/build/xxd.py
deleted file mode 100644
index 5a63a7cb8d..0000000000
--- a/deps/v8/src/inspector/build/xxd.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Represent a file as a C++ constant string.
-
-Usage:
-python xxd.py VAR SOURCE DEST
-"""
-
-
-import sys
-import rjsmin
-
-
-def main():
- variable_name, input_filename, output_filename = sys.argv[1:]
- with open(input_filename) as input_file:
- input_text = input_file.read()
- input_text = rjsmin.jsmin(input_text)
- hex_values = ['0x{0:02x}'.format(ord(char)) for char in input_text]
- const_declaration = 'const char %s[] = {\n%s\n};\n' % (
- variable_name, ', '.join(hex_values))
- with open(output_filename, 'w') as output_file:
- output_file.write(const_declaration)
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
new file mode 100644
index 0000000000..63d1d74ab8
--- /dev/null
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -0,0 +1,388 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/custom-preview.h"
+
+#include "src/debug/debug-interface.h"
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+namespace v8_inspector {
+
+using protocol::Runtime::CustomPreview;
+
+namespace {
+void reportError(v8::Local<v8::Context> context, const v8::TryCatch& tryCatch) {
+ DCHECK(tryCatch.HasCaught());
+ v8::Isolate* isolate = context->GetIsolate();
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
+ int contextId = InspectedContext::contextId(context);
+ int groupId = inspector->contextGroupId(contextId);
+ v8::Local<v8::String> message = tryCatch.Message()->Get();
+ v8::Local<v8::String> prefix =
+ toV8String(isolate, "Custom Formatter Failed: ");
+ message = v8::String::Concat(isolate, prefix, message);
+ std::vector<v8::Local<v8::Value>> arguments;
+ arguments.push_back(message);
+ V8ConsoleMessageStorage* storage =
+ inspector->ensureConsoleMessageStorage(groupId);
+ if (!storage) return;
+ storage->addMessage(V8ConsoleMessage::createForConsoleAPI(
+ context, contextId, groupId, inspector,
+ inspector->client()->currentTimeMS(), ConsoleAPIType::kError, arguments,
+ String16(), nullptr));
+}
+
+void reportError(v8::Local<v8::Context> context, const v8::TryCatch& tryCatch,
+ const String16& message) {
+ v8::Isolate* isolate = context->GetIsolate();
+ isolate->ThrowException(toV8String(isolate, message));
+ reportError(context, tryCatch);
+}
+
+InjectedScript* getInjectedScript(v8::Local<v8::Context> context,
+ int sessionId) {
+ v8::Isolate* isolate = context->GetIsolate();
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
+ InspectedContext* inspectedContext =
+ inspector->getContext(InspectedContext::contextId(context));
+ if (!inspectedContext) return nullptr;
+ return inspectedContext->getInjectedScript(sessionId);
+}
+
+bool substituteObjectTags(int sessionId, const String16& groupName,
+ v8::Local<v8::Context> context,
+ v8::Local<v8::Array> jsonML, int maxDepth) {
+ if (!jsonML->Length()) return true;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+
+ if (maxDepth <= 0) {
+ reportError(context, tryCatch,
+ "Too deep hierarchy of inlined custom previews");
+ return false;
+ }
+
+ v8::Local<v8::Value> firstValue;
+ if (!jsonML->Get(context, 0).ToLocal(&firstValue)) {
+ reportError(context, tryCatch);
+ return false;
+ }
+ v8::Local<v8::String> objectLiteral = toV8String(isolate, "object");
+ if (jsonML->Length() == 2 && firstValue->IsString() &&
+ firstValue.As<v8::String>()->StringEquals(objectLiteral)) {
+ v8::Local<v8::Value> attributesValue;
+ if (!jsonML->Get(context, 1).ToLocal(&attributesValue)) {
+ reportError(context, tryCatch);
+ return false;
+ }
+ if (!attributesValue->IsObject()) {
+ reportError(context, tryCatch, "attributes should be an Object");
+ return false;
+ }
+ v8::Local<v8::Object> attributes = attributesValue.As<v8::Object>();
+ v8::Local<v8::Value> originValue;
+ if (!attributes->Get(context, objectLiteral).ToLocal(&originValue)) {
+ reportError(context, tryCatch);
+ return false;
+ }
+ if (originValue->IsUndefined()) {
+ reportError(context, tryCatch,
+ "obligatory attribute \"object\" isn't specified");
+ return false;
+ }
+
+ v8::Local<v8::Value> configValue;
+ if (!attributes->Get(context, toV8String(isolate, "config"))
+ .ToLocal(&configValue)) {
+ reportError(context, tryCatch);
+ return false;
+ }
+
+ InjectedScript* injectedScript = getInjectedScript(context, sessionId);
+ if (!injectedScript) {
+ reportError(context, tryCatch, "cannot find context with specified id");
+ return false;
+ }
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapper;
+ protocol::Response response =
+ injectedScript->wrapObject(originValue, groupName, WrapMode::kNoPreview,
+ configValue, maxDepth - 1, &wrapper);
+ if (!response.isSuccess() || !wrapper) {
+ reportError(context, tryCatch, "cannot wrap value");
+ return false;
+ }
+ v8::Local<v8::Value> jsonWrapper;
+ String16 serialized = wrapper->serialize();
+ if (!v8::JSON::Parse(context, toV8String(isolate, serialized))
+ .ToLocal(&jsonWrapper)) {
+ reportError(context, tryCatch, "cannot wrap value");
+ return false;
+ }
+ if (jsonML->Set(context, 1, jsonWrapper).IsNothing()) {
+ reportError(context, tryCatch);
+ return false;
+ }
+ } else {
+ for (uint32_t i = 0; i < jsonML->Length(); ++i) {
+ v8::Local<v8::Value> value;
+ if (!jsonML->Get(context, i).ToLocal(&value)) {
+ reportError(context, tryCatch);
+ return false;
+ }
+ if (value->IsArray() && value.As<v8::Array>()->Length() > 0 &&
+ !substituteObjectTags(sessionId, groupName, context,
+ value.As<v8::Array>(), maxDepth - 1)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void bodyCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Object> bodyConfig = info.Data().As<v8::Object>();
+
+ v8::Local<v8::Value> objectValue;
+ if (!bodyConfig->Get(context, toV8String(isolate, "object"))
+ .ToLocal(&objectValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!objectValue->IsObject()) {
+ reportError(context, tryCatch, "object should be an Object");
+ return;
+ }
+ v8::Local<v8::Object> object = objectValue.As<v8::Object>();
+
+ v8::Local<v8::Value> formatterValue;
+ if (!bodyConfig->Get(context, toV8String(isolate, "formatter"))
+ .ToLocal(&formatterValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!formatterValue->IsObject()) {
+ reportError(context, tryCatch, "formatter should be an Object");
+ return;
+ }
+ v8::Local<v8::Object> formatter = formatterValue.As<v8::Object>();
+
+ v8::Local<v8::Value> bodyValue;
+ if (!formatter->Get(context, toV8String(isolate, "body"))
+ .ToLocal(&bodyValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!bodyValue->IsFunction()) {
+ reportError(context, tryCatch, "body should be a Function");
+ return;
+ }
+ v8::Local<v8::Function> bodyFunction = bodyValue.As<v8::Function>();
+
+ v8::Local<v8::Value> configValue;
+ if (!bodyConfig->Get(context, toV8String(isolate, "config"))
+ .ToLocal(&configValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+
+ v8::Local<v8::Value> sessionIdValue;
+ if (!bodyConfig->Get(context, toV8String(isolate, "sessionId"))
+ .ToLocal(&sessionIdValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!sessionIdValue->IsInt32()) {
+ reportError(context, tryCatch, "sessionId should be an Int32");
+ return;
+ }
+
+ v8::Local<v8::Value> groupNameValue;
+ if (!bodyConfig->Get(context, toV8String(isolate, "groupName"))
+ .ToLocal(&groupNameValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!groupNameValue->IsString()) {
+ reportError(context, tryCatch, "groupName should be a string");
+ return;
+ }
+
+ v8::Local<v8::Value> formattedValue;
+ v8::Local<v8::Value> args[] = {object, configValue};
+ if (!bodyFunction->Call(context, formatter, 2, args)
+ .ToLocal(&formattedValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!formattedValue->IsArray()) {
+ reportError(context, tryCatch, "body should return an Array");
+ return;
+ }
+ v8::Local<v8::Array> jsonML = formattedValue.As<v8::Array>();
+ if (jsonML->Length() &&
+ !substituteObjectTags(
+ sessionIdValue.As<v8::Int32>()->Value(),
+ toProtocolString(isolate, groupNameValue.As<v8::String>()), context,
+ jsonML, kMaxCustomPreviewDepth)) {
+ return;
+ }
+ info.GetReturnValue().Set(jsonML);
+}
+} // anonymous namespace
+
+void generateCustomPreview(int sessionId, const String16& groupName,
+ v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ v8::MaybeLocal<v8::Value> maybeConfig, int maxDepth,
+ std::unique_ptr<CustomPreview>* preview) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::Value> configValue;
+ if (!maybeConfig.ToLocal(&configValue)) configValue = v8::Undefined(isolate);
+
+ v8::Local<v8::Object> global = context->Global();
+ v8::Local<v8::Value> formattersValue;
+ if (!global->Get(context, toV8String(isolate, "devtoolsFormatters"))
+ .ToLocal(&formattersValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!formattersValue->IsArray()) return;
+ v8::Local<v8::Array> formatters = formattersValue.As<v8::Array>();
+ v8::Local<v8::String> headerLiteral = toV8String(isolate, "header");
+ v8::Local<v8::String> hasBodyLiteral = toV8String(isolate, "hasBody");
+ for (uint32_t i = 0; i < formatters->Length(); ++i) {
+ v8::Local<v8::Value> formatterValue;
+ if (!formatters->Get(context, i).ToLocal(&formatterValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!formatterValue->IsObject()) {
+ reportError(context, tryCatch, "formatter should be an Object");
+ return;
+ }
+ v8::Local<v8::Object> formatter = formatterValue.As<v8::Object>();
+
+ v8::Local<v8::Value> headerValue;
+ if (!formatter->Get(context, headerLiteral).ToLocal(&headerValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!headerValue->IsFunction()) {
+ reportError(context, tryCatch, "header should be a Function");
+ return;
+ }
+ v8::Local<v8::Function> headerFunction = headerValue.As<v8::Function>();
+
+ v8::Local<v8::Value> formattedValue;
+ v8::Local<v8::Value> args[] = {object, configValue};
+ if (!headerFunction->Call(context, formatter, 2, args)
+ .ToLocal(&formattedValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!formattedValue->IsArray()) continue;
+ v8::Local<v8::Array> jsonML = formattedValue.As<v8::Array>();
+
+ v8::Local<v8::Value> hasBodyFunctionValue;
+ if (!formatter->Get(context, hasBodyLiteral)
+ .ToLocal(&hasBodyFunctionValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!hasBodyFunctionValue->IsFunction()) continue;
+ v8::Local<v8::Function> hasBodyFunction =
+ hasBodyFunctionValue.As<v8::Function>();
+ v8::Local<v8::Value> hasBodyValue;
+ if (!hasBodyFunction->Call(context, formatter, 2, args)
+ .ToLocal(&hasBodyValue)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ bool hasBody = hasBodyValue->ToBoolean(isolate)->Value();
+
+ if (jsonML->Length() && !substituteObjectTags(sessionId, groupName, context,
+ jsonML, maxDepth)) {
+ return;
+ }
+
+ v8::Local<v8::String> header;
+ if (!v8::JSON::Stringify(context, jsonML).ToLocal(&header)) {
+ reportError(context, tryCatch);
+ return;
+ }
+
+ v8::Local<v8::Function> bodyFunction;
+ if (hasBody) {
+ v8::Local<v8::Object> bodyConfig = v8::Object::New(isolate);
+ if (bodyConfig
+ ->CreateDataProperty(context, toV8String(isolate, "sessionId"),
+ v8::Integer::New(isolate, sessionId))
+ .IsNothing()) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (bodyConfig
+ ->CreateDataProperty(context, toV8String(isolate, "formatter"),
+ formatter)
+ .IsNothing()) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (bodyConfig
+ ->CreateDataProperty(context, toV8String(isolate, "groupName"),
+ toV8String(isolate, groupName))
+ .IsNothing()) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (bodyConfig
+ ->CreateDataProperty(context, toV8String(isolate, "config"),
+ configValue)
+ .IsNothing()) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (bodyConfig
+ ->CreateDataProperty(context, toV8String(isolate, "object"),
+ object)
+ .IsNothing()) {
+ reportError(context, tryCatch);
+ return;
+ }
+ if (!v8::Function::New(context, bodyCallback, bodyConfig)
+ .ToLocal(&bodyFunction)) {
+ reportError(context, tryCatch);
+ return;
+ }
+ }
+ *preview = CustomPreview::create()
+ .setHeader(toProtocolString(isolate, header))
+ .build();
+ if (!bodyFunction.IsEmpty()) {
+ InjectedScript* injectedScript = getInjectedScript(context, sessionId);
+ if (!injectedScript) {
+ reportError(context, tryCatch, "cannot find context with specified id");
+ return;
+ }
+ (*preview)->setBodyGetterId(
+ injectedScript->bindObject(bodyFunction, groupName));
+ }
+ return;
+ }
+}
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/custom-preview.h b/deps/v8/src/inspector/custom-preview.h
new file mode 100644
index 0000000000..1ae8e25a4c
--- /dev/null
+++ b/deps/v8/src/inspector/custom-preview.h
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_CUSTOM_PREVIEW_H_
+#define V8_INSPECTOR_CUSTOM_PREVIEW_H_
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/protocol/Runtime.h"
+
+namespace v8_inspector {
+
+const int kMaxCustomPreviewDepth = 20;
+
+void generateCustomPreview(
+ int sessionId, const String16& groupName, v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, v8::MaybeLocal<v8::Value> config,
+ int maxDepth, std::unique_ptr<protocol::Runtime::CustomPreview>* preview);
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_CUSTOM_PREVIEW_H_
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
deleted file mode 100644
index 380091cb4a..0000000000
--- a/deps/v8/src/inspector/injected-script-source.js
+++ /dev/null
@@ -1,1116 +0,0 @@
-/*
- * Copyright (C) 2007 Apple Inc. All rights reserved.
- * Copyright (C) 2013 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-"use strict";
-
-/**
- * @param {!InjectedScriptHostClass} InjectedScriptHost
- * @param {!Window|!WorkerGlobalScope} inspectedGlobalObject
- * @param {number} injectedScriptId
- * @suppress {uselessCode}
- */
-(function (InjectedScriptHost, inspectedGlobalObject, injectedScriptId) {
-
-/**
- * @param {!Array.<T>} array
- * @param {...} var_args
- * @template T
- */
-function push(array, var_args)
-{
- for (var i = 1; i < arguments.length; ++i)
- array[array.length] = arguments[i];
-}
-
-/**
- * @param {*} obj
- * @return {string}
- * @suppress {uselessCode}
- */
-function toString(obj)
-{
- // We don't use String(obj) because String could be overridden.
- // Also the ("" + obj) expression may throw.
- try {
- return "" + obj;
- } catch (e) {
- var name = InjectedScriptHost.internalConstructorName(obj) || InjectedScriptHost.subtype(obj) || (typeof obj);
- return "#<" + name + ">";
- }
-}
-
-/**
- * TODO(luoe): remove type-check suppression once bigint is supported by closure.
- * @suppress {checkTypes}
- * @param {*} obj
- * @return {string}
- */
-function toStringDescription(obj)
-{
- if (typeof obj === "number" && obj === 0 && 1 / obj < 0)
- return "-0"; // Negative zero.
- if (typeof obj === "bigint")
- return toString(obj) + "n";
- return toString(obj);
-}
-
-/**
- * FireBug's array detection.
- * @param {*} obj
- * @return {boolean}
- */
-function isArrayLike(obj)
-{
- if (typeof obj !== "object")
- return false;
- var splice = InjectedScriptHost.getProperty(obj, "splice");
- if (typeof splice === "function") {
- if (!InjectedScriptHost.objectHasOwnProperty(/** @type {!Object} */ (obj), "length"))
- return false;
- var len = InjectedScriptHost.getProperty(obj, "length");
- // is len uint32?
- return typeof len === "number" && len >>> 0 === len && (len > 0 || 1 / len > 0);
- }
- return false;
-}
-
-/**
- * @param {number} a
- * @param {number} b
- * @return {number}
- */
-function max(a, b)
-{
- return a > b ? a : b;
-}
-
-/**
- * FIXME: Remove once ES6 is supported natively by JS compiler.
- * @param {*} obj
- * @return {boolean}
- */
-function isSymbol(obj)
-{
- var type = typeof obj;
- return (type === "symbol");
-}
-
-/**
- * DOM Attributes which have observable side effect on getter, in the form of
- * {interfaceName1: {attributeName1: true,
- * attributeName2: true,
- * ...},
- * interfaceName2: {...},
- * ...}
- * @type {!Object<string, !Object<string, boolean>>}
- * @const
- */
-var domAttributesWithObservableSideEffectOnGet = {
- Request: { body: true, __proto__: null },
- Response: { body: true, __proto__: null },
- __proto__: null
-}
-
-/**
- * @param {!Object} object
- * @param {string} attribute
- * @return {boolean}
- */
-function doesAttributeHaveObservableSideEffectOnGet(object, attribute)
-{
- for (var interfaceName in domAttributesWithObservableSideEffectOnGet) {
- var interfaceFunction = inspectedGlobalObject[interfaceName];
- // Call to instanceOf looks safe after typeof check.
- var isInstance = typeof interfaceFunction === "function" && /* suppressBlacklist */ object instanceof interfaceFunction;
- if (isInstance)
- return attribute in domAttributesWithObservableSideEffectOnGet[interfaceName];
- }
- return false;
-}
-
-/**
- * @constructor
- */
-var InjectedScript = function()
-{
-}
-InjectedScriptHost.nullifyPrototype(InjectedScript);
-
-/**
- * @type {!Object<string, boolean>}
- * @const
- */
-InjectedScript.primitiveTypes = {
- "undefined": true,
- "boolean": true,
- "number": true,
- "string": true,
- "bigint": true,
- __proto__: null
-}
-
-/**
- * @type {!Object<string, string>}
- * @const
- */
-InjectedScript.closureTypes = {
- "local": "Local",
- "closure": "Closure",
- "catch": "Catch",
- "block": "Block",
- "script": "Script",
- "with": "With Block",
- "global": "Global",
- "eval": "Eval",
- "module": "Module",
- __proto__: null
-};
-
-InjectedScript.prototype = {
- /**
- * @param {*} object
- * @return {boolean}
- */
- isPrimitiveValue: function(object)
- {
- // FIXME(33716): typeof document.all is always 'undefined'.
- return InjectedScript.primitiveTypes[typeof object] && !this._isHTMLAllCollection(object);
- },
-
- /**
- * @param {*} object
- * @return {boolean}
- */
- _shouldPassByValue: function(object)
- {
- return typeof object === "object" && InjectedScriptHost.subtype(object) === "internal#location";
- },
-
- /**
- * @param {*} object
- * @param {string} groupName
- * @param {boolean} forceValueType
- * @param {boolean} generatePreview
- * @return {!RuntimeAgent.RemoteObject}
- */
- wrapObject: function(object, groupName, forceValueType, generatePreview)
- {
- return this._wrapObject(object, groupName, forceValueType, generatePreview);
- },
-
- /**
- * @param {!Object} table
- * @param {!Array.<string>|string|boolean} columns
- * @return {!RuntimeAgent.RemoteObject}
- */
- wrapTable: function(table, columns)
- {
- var columnNames = null;
- if (typeof columns === "string")
- columns = [columns];
- if (InjectedScriptHost.subtype(columns) === "array") {
- columnNames = [];
- InjectedScriptHost.nullifyPrototype(columnNames);
- for (var i = 0; i < columns.length; ++i)
- columnNames[i] = toString(columns[i]);
- }
- return this._wrapObject(table, "console", false, true, columnNames, true);
- },
-
- /**
- * This method cannot throw.
- * @param {*} object
- * @param {string=} objectGroupName
- * @param {boolean=} forceValueType
- * @param {boolean=} generatePreview
- * @param {?Array.<string>=} columnNames
- * @param {boolean=} isTable
- * @param {boolean=} doNotBind
- * @param {*=} customObjectConfig
- * @return {!RuntimeAgent.RemoteObject}
- * @suppress {checkTypes}
- */
- _wrapObject: function(object, objectGroupName, forceValueType, generatePreview, columnNames, isTable, doNotBind, customObjectConfig)
- {
- try {
- return new InjectedScript.RemoteObject(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, undefined, customObjectConfig);
- } catch (e) {
- try {
- var description = injectedScript._describe(e);
- } catch (ex) {
- var description = "<failed to convert exception to string>";
- }
- return new InjectedScript.RemoteObject(description);
- }
- },
-
- /**
- * @param {!Object|symbol} object
- * @param {string=} objectGroupName
- * @return {string}
- */
- _bind: function(object, objectGroupName)
- {
- var id = InjectedScriptHost.bind(object, objectGroupName || "");
- return "{\"injectedScriptId\":" + injectedScriptId + ",\"id\":" + id + "}";
- },
-
- /**
- * @param {!Object} object
- * @param {string} objectGroupName
- * @param {boolean} ownProperties
- * @param {boolean} accessorPropertiesOnly
- * @param {boolean} generatePreview
- * @return {!Array<!RuntimeAgent.PropertyDescriptor>|boolean}
- */
- getProperties: function(object, objectGroupName, ownProperties, accessorPropertiesOnly, generatePreview)
- {
- var subtype = this._subtype(object);
- if (subtype === "internal#scope") {
- // Internally, scope contains object with scope variables and additional information like type,
- // we use additional information for preview and would like to report variables as scope
- // properties.
- object = object.object;
- }
-
- // Go over properties, wrap object values.
- var descriptors = this._propertyDescriptors(object, addPropertyIfNeeded, ownProperties, accessorPropertiesOnly);
- for (var i = 0; i < descriptors.length; ++i) {
- var descriptor = descriptors[i];
- if ("get" in descriptor)
- descriptor.get = this._wrapObject(descriptor.get, objectGroupName);
- if ("set" in descriptor)
- descriptor.set = this._wrapObject(descriptor.set, objectGroupName);
- if ("value" in descriptor)
- descriptor.value = this._wrapObject(descriptor.value, objectGroupName, false, generatePreview);
- if (!("configurable" in descriptor))
- descriptor.configurable = false;
- if (!("enumerable" in descriptor))
- descriptor.enumerable = false;
- if ("symbol" in descriptor)
- descriptor.symbol = this._wrapObject(descriptor.symbol, objectGroupName);
- }
- return descriptors;
-
- /**
- * @param {!Array<!Object>} descriptors
- * @param {!Object} descriptor
- * @return {boolean}
- */
- function addPropertyIfNeeded(descriptors, descriptor) {
- push(descriptors, descriptor);
- return true;
- }
- },
-
- /**
- * @param {!Object} object
- * @return {?Object}
- */
- _objectPrototype: function(object)
- {
- if (InjectedScriptHost.subtype(object) === "proxy")
- return null;
- try {
- return InjectedScriptHost.getPrototypeOf(object);
- } catch (e) {
- return null;
- }
- },
-
- /**
- * @param {!Object} object
- * @param {!function(!Array<!Object>, !Object)} addPropertyIfNeeded
- * @param {boolean=} ownProperties
- * @param {boolean=} accessorPropertiesOnly
- * @param {?Array<string>=} propertyNamesOnly
- * @return {!Array<!Object>}
- */
- _propertyDescriptors: function(object, addPropertyIfNeeded, ownProperties, accessorPropertiesOnly, propertyNamesOnly)
- {
- var descriptors = [];
- InjectedScriptHost.nullifyPrototype(descriptors);
- var propertyProcessed = { __proto__: null };
- var subtype = InjectedScriptHost.subtype(object);
-
- /**
- * @param {!Object} o
- * @param {!Array<string|number|symbol>=} properties
- * @param {number=} objectLength
- * @return {boolean}
- */
- function process(o, properties, objectLength)
- {
- // When properties is not provided, iterate over the object's indices.
- var length = properties ? properties.length : objectLength;
- for (var i = 0; i < length; ++i) {
- var property = properties ? properties[i] : ("" + i);
- if (propertyProcessed[property])
- continue;
- propertyProcessed[property] = true;
- var name;
- if (isSymbol(property))
- name = /** @type {string} */ (injectedScript._describe(property));
- else
- name = typeof property === "number" ? ("" + property) : /** @type {string} */(property);
-
- if (subtype === "internal#scopeList" && name === "length")
- continue;
-
- var descriptor;
- try {
- var nativeAccessorDescriptor = InjectedScriptHost.nativeAccessorDescriptor(o, property);
- if (nativeAccessorDescriptor && !nativeAccessorDescriptor.isBuiltin) {
- descriptor = { __proto__: null };
- if (nativeAccessorDescriptor.hasGetter)
- descriptor.get = function nativeGetter() { return o[property]; };
- if (nativeAccessorDescriptor.hasSetter)
- descriptor.set = function nativeSetter(v) { o[property] = v; };
- } else {
- descriptor = InjectedScriptHost.getOwnPropertyDescriptor(o, property);
- if (descriptor) {
- InjectedScriptHost.nullifyPrototype(descriptor);
- }
- }
- var isAccessorProperty = descriptor && ("get" in descriptor || "set" in descriptor);
- if (accessorPropertiesOnly && !isAccessorProperty)
- continue;
- // Special case for Symbol.prototype.description where the receiver of the getter is not an actual object.
- // Should only occur for nested previews.
- var isSymbolDescription = isSymbol(object) && name === 'description';
- if (isSymbolDescription || (descriptor && "get" in descriptor && "set" in descriptor && name !== "__proto__" &&
- InjectedScriptHost.formatAccessorsAsProperties(object, descriptor.get) &&
- !doesAttributeHaveObservableSideEffectOnGet(object, name))) {
- descriptor.value = object[property];
- descriptor.isOwn = true;
- delete descriptor.get;
- delete descriptor.set;
- }
- } catch (e) {
- if (accessorPropertiesOnly)
- continue;
- descriptor = { value: e, wasThrown: true, __proto__: null };
- }
-
- // Not all bindings provide proper descriptors. Fall back to the non-configurable, non-enumerable,
- // non-writable property.
- if (!descriptor) {
- try {
- descriptor = { value: o[property], writable: false, __proto__: null };
- } catch (e) {
- // Silent catch.
- continue;
- }
- }
-
- descriptor.name = name;
- if (o === object)
- descriptor.isOwn = true;
- if (isSymbol(property))
- descriptor.symbol = property;
- if (!addPropertyIfNeeded(descriptors, descriptor))
- return false;
- }
- return true;
- }
-
- if (propertyNamesOnly) {
- for (var i = 0; i < propertyNamesOnly.length; ++i) {
- var name = propertyNamesOnly[i];
- for (var o = object; this._isDefined(o); o = this._objectPrototype(/** @type {!Object} */ (o))) {
- o = /** @type {!Object} */ (o);
- if (InjectedScriptHost.objectHasOwnProperty(o, name)) {
- if (!process(o, [name]))
- return descriptors;
- break;
- }
- if (ownProperties)
- break;
- }
- }
- return descriptors;
- }
-
- var skipGetOwnPropertyNames;
- try {
- skipGetOwnPropertyNames = subtype === "typedarray" && object.length > 500000;
- } catch (e) {
- }
-
- for (var o = object; this._isDefined(o); o = this._objectPrototype(/** @type {!Object} */ (o))) {
- o = /** @type {!Object} */ (o);
- if (InjectedScriptHost.subtype(o) === "proxy")
- continue;
-
- var typedArrays = subtype === "arraybuffer" ? InjectedScriptHost.typedArrayProperties(o) || [] : [];
- for (var i = 0; i < typedArrays.length; i += 2)
- addPropertyIfNeeded(descriptors, { name: typedArrays[i], value: typedArrays[i + 1], isOwn: true, enumerable: false, configurable: false, __proto__: null });
-
- try {
- if (skipGetOwnPropertyNames && o === object) {
- if (!process(o, undefined, o.length))
- return descriptors;
- } else {
- // First call Object.keys() to enforce ordering of the property descriptors.
- if (!process(o, InjectedScriptHost.keys(o)))
- return descriptors;
- if (!process(o, InjectedScriptHost.getOwnPropertyNames(o)))
- return descriptors;
- }
- if (!process(o, InjectedScriptHost.getOwnPropertySymbols(o)))
- return descriptors;
-
- if (ownProperties) {
- var proto = this._objectPrototype(o);
- if (proto && !accessorPropertiesOnly) {
- var descriptor = { name: "__proto__", value: proto, writable: true, configurable: true, enumerable: false, isOwn: true, __proto__: null };
- if (!addPropertyIfNeeded(descriptors, descriptor))
- return descriptors;
- }
- }
- } catch (e) {
- }
-
- if (ownProperties)
- break;
- }
- return descriptors;
- },
-
- /**
- * @param {string|undefined} objectGroupName
- * @param {*} jsonMLObject
- * @throws {string} error message
- */
- _substituteObjectTagsInCustomPreview: function(objectGroupName, jsonMLObject)
- {
- var maxCustomPreviewRecursionDepth = 20;
- this._customPreviewRecursionDepth = (this._customPreviewRecursionDepth || 0) + 1
- try {
- if (this._customPreviewRecursionDepth >= maxCustomPreviewRecursionDepth)
- throw new Error("Too deep hierarchy of inlined custom previews");
-
- if (!isArrayLike(jsonMLObject))
- return;
-
- if (jsonMLObject[0] === "object") {
- var attributes = jsonMLObject[1];
- var originObject = attributes["object"];
- var config = attributes["config"];
- if (typeof originObject === "undefined")
- throw new Error("Illegal format: obligatory attribute \"object\" isn't specified");
-
- jsonMLObject[1] = this._wrapObject(originObject, objectGroupName, false, false, null, false, false, config);
- return;
- }
-
- for (var i = 0; i < jsonMLObject.length; ++i)
- this._substituteObjectTagsInCustomPreview(objectGroupName, jsonMLObject[i]);
- } finally {
- this._customPreviewRecursionDepth--;
- }
- },
-
- /**
- * @param {*} object
- * @return {boolean}
- */
- _isDefined: function(object)
- {
- return !!object || this._isHTMLAllCollection(object);
- },
-
- /**
- * @param {*} object
- * @return {boolean}
- */
- _isHTMLAllCollection: function(object)
- {
- // document.all is reported as undefined, but we still want to process it.
- return (typeof object === "undefined") && !!InjectedScriptHost.subtype(object);
- },
-
- /**
- * @param {*} obj
- * @return {?string}
- */
- _subtype: function(obj)
- {
- if (obj === null)
- return "null";
-
- if (this.isPrimitiveValue(obj))
- return null;
-
- var subtype = InjectedScriptHost.subtype(obj);
- if (subtype)
- return subtype;
-
- if (isArrayLike(obj))
- return "array";
-
- // If owning frame has navigated to somewhere else window properties will be undefined.
- return null;
- },
-
- /**
- * @param {*} obj
- * @return {?string}
- */
- _describe: function(obj)
- {
- if (this.isPrimitiveValue(obj))
- return null;
-
- var subtype = this._subtype(obj);
-
- if (subtype === "regexp")
- return toString(obj);
-
- if (subtype === "date")
- return toString(obj);
-
- if (subtype === "node") {
- // We should warmup blink dom binding before calling anything,
- // see (crbug.com/827585) for details.
- InjectedScriptHost.getOwnPropertyDescriptor(/** @type {!Object} */(obj), "nodeName");
- var description = "";
- var nodeName = InjectedScriptHost.getProperty(obj, "nodeName");
- if (nodeName) {
- description = nodeName.toLowerCase();
- } else {
- var constructor = InjectedScriptHost.getProperty(obj, "constructor");
- if (constructor)
- description = (InjectedScriptHost.getProperty(constructor, "name") || "").toLowerCase();
- }
-
- var nodeType = InjectedScriptHost.getProperty(obj, "nodeType");
- switch (nodeType) {
- case 1 /* Node.ELEMENT_NODE */:
- var id = InjectedScriptHost.getProperty(obj, "id");
- description += id ? "#" + id : "";
- var className = InjectedScriptHost.getProperty(obj, "className");
- description += (className && typeof className === "string") ? "." + className.trim().replace(/\s+/g, ".") : "";
- break;
- case 10 /*Node.DOCUMENT_TYPE_NODE */:
- description = "<!DOCTYPE " + description + ">";
- break;
- }
- return description;
- }
-
- if (subtype === "proxy")
- return "Proxy";
-
- var className = InjectedScriptHost.internalConstructorName(obj);
- if (subtype === "array" || subtype === "typedarray") {
- if (typeof obj.length === "number")
- return className + "(" + obj.length + ")";
- return className;
- }
-
- if (subtype === "map" || subtype === "set" || subtype === "blob") {
- if (typeof obj.size === "number")
- return className + "(" + obj.size + ")";
- return className;
- }
-
- if (subtype === "arraybuffer" || subtype === "dataview") {
- if (typeof obj.byteLength === "number")
- return className + "(" + obj.byteLength + ")";
- return className;
- }
-
- if (typeof obj === "function")
- return toString(obj);
-
- if (isSymbol(obj)) {
- try {
- // It isn't safe, because Symbol.prototype.toString can be overriden.
- return /* suppressBlacklist */ obj.toString() || "Symbol";
- } catch (e) {
- return "Symbol";
- }
- }
-
- if (InjectedScriptHost.subtype(obj) === "error") {
- try {
- const stack = obj.stack;
- if (stack.substr(0, className.length) === className)
- return stack;
- const message = obj.message;
- const index = /* suppressBlacklist */ stack.indexOf(message);
- const messageWithStack = index !== -1 ? stack.substr(index) : message;
- return className + ': ' + messageWithStack;
- } catch(e) {
- return className;
- }
- }
-
- if (subtype === "internal#entry") {
- if ("key" in obj)
- return "{" + this._describeIncludingPrimitives(obj.key) + " => " + this._describeIncludingPrimitives(obj.value) + "}";
- return this._describeIncludingPrimitives(obj.value);
- }
-
- if (subtype === "internal#scopeList")
- return "Scopes[" + obj.length + "]";
-
- if (subtype === "internal#scope")
- return (InjectedScript.closureTypes[obj.type] || "Unknown") + (obj.name ? " (" + obj.name + ")" : "");
-
- return className;
- },
-
- /**
- * @param {*} value
- * @return {string}
- */
- _describeIncludingPrimitives: function(value)
- {
- if (typeof value === "string")
- return "\"" + value.replace(/\n/g, "\u21B5") + "\"";
- if (value === null)
- return "" + value;
- return this.isPrimitiveValue(value) ? toStringDescription(value) : (this._describe(value) || "");
- },
-
- /**
- * @param {boolean} enabled
- */
- setCustomObjectFormatterEnabled: function(enabled)
- {
- this._customObjectFormatterEnabled = enabled;
- }
-}
-
-/**
- * @type {!InjectedScript}
- * @const
- */
-var injectedScript = new InjectedScript();
-
-/**
- * @constructor
- * @param {*} object
- * @param {string=} objectGroupName
- * @param {boolean=} doNotBind
- * @param {boolean=} forceValueType
- * @param {boolean=} generatePreview
- * @param {?Array.<string>=} columnNames
- * @param {boolean=} isTable
- * @param {boolean=} skipEntriesPreview
- * @param {*=} customObjectConfig
- */
-InjectedScript.RemoteObject = function(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, skipEntriesPreview, customObjectConfig)
-{
- this.type = typeof object;
- if (this.type === "undefined" && injectedScript._isHTMLAllCollection(object))
- this.type = "object";
-
- if (injectedScript.isPrimitiveValue(object) || object === null || forceValueType) {
- // We don't send undefined values over JSON.
- if (this.type !== "undefined")
- this.value = object;
-
- // Null object is object with 'null' subtype.
- if (object === null)
- this.subtype = "null";
-
- // Provide user-friendly number values.
- if (this.type === "number") {
- this.description = toStringDescription(object);
- switch (this.description) {
- case "NaN":
- case "Infinity":
- case "-Infinity":
- case "-0":
- delete this.value;
- this.unserializableValue = this.description;
- break;
- }
- }
-
- // The "n" suffix of bigint primitives are not JSON serializable.
- if (this.type === "bigint") {
- delete this.value;
- this.description = toStringDescription(object);
- this.unserializableValue = this.description;
- }
-
- return;
- }
-
- if (injectedScript._shouldPassByValue(object)) {
- this.value = object;
- this.subtype = injectedScript._subtype(object);
- this.description = injectedScript._describeIncludingPrimitives(object);
- return;
- }
-
- object = /** @type {!Object} */ (object);
-
- if (!doNotBind)
- this.objectId = injectedScript._bind(object, objectGroupName);
- var subtype = injectedScript._subtype(object);
- if (subtype)
- this.subtype = subtype;
- var className = InjectedScriptHost.internalConstructorName(object);
- if (className)
- this.className = className;
- this.description = injectedScript._describe(object);
-
- if (generatePreview && this.type === "object") {
- if (this.subtype === "proxy")
- this.preview = this._generatePreview(InjectedScriptHost.proxyTargetValue(object), undefined, columnNames, isTable, skipEntriesPreview);
- else
- this.preview = this._generatePreview(object, undefined, columnNames, isTable, skipEntriesPreview);
- }
-
- if (injectedScript._customObjectFormatterEnabled) {
- var customPreview = this._customPreview(object, objectGroupName, customObjectConfig);
- if (customPreview)
- this.customPreview = customPreview;
- }
-}
-
-InjectedScript.RemoteObject.prototype = {
-
- /**
- * @param {*} object
- * @param {string=} objectGroupName
- * @param {*=} customObjectConfig
- * @return {?RuntimeAgent.CustomPreview}
- */
- _customPreview: function(object, objectGroupName, customObjectConfig)
- {
- /**
- * @param {!Error} error
- */
- function logError(error)
- {
- // We use user code to generate custom output for object, we can use user code for reporting error too.
- Promise.resolve().then(/* suppressBlacklist */ inspectedGlobalObject.console.error.bind(inspectedGlobalObject.console, "Custom Formatter Failed: " + error.message));
- }
-
- /**
- * @param {*} object
- * @param {*=} customObjectConfig
- * @return {*}
- */
- function wrap(object, customObjectConfig)
- {
- return injectedScript._wrapObject(object, objectGroupName, false, false, null, false, false, customObjectConfig);
- }
-
- try {
- var formatters = inspectedGlobalObject["devtoolsFormatters"];
- if (!formatters || !isArrayLike(formatters))
- return null;
-
- for (var i = 0; i < formatters.length; ++i) {
- try {
- var formatted = formatters[i].header(object, customObjectConfig);
- if (!formatted)
- continue;
-
- var hasBody = formatters[i].hasBody(object, customObjectConfig);
- injectedScript._substituteObjectTagsInCustomPreview(objectGroupName, formatted);
- var formatterObjectId = injectedScript._bind(formatters[i], objectGroupName);
- var bindRemoteObjectFunctionId = injectedScript._bind(wrap, objectGroupName);
- var result = {header: JSON.stringify(formatted), hasBody: !!hasBody, formatterObjectId: formatterObjectId, bindRemoteObjectFunctionId: bindRemoteObjectFunctionId};
- if (customObjectConfig)
- result["configObjectId"] = injectedScript._bind(customObjectConfig, objectGroupName);
- return result;
- } catch (e) {
- logError(e);
- }
- }
- } catch (e) {
- logError(e);
- }
- return null;
- },
-
- /**
- * @return {!RuntimeAgent.ObjectPreview} preview
- */
- _createEmptyPreview: function()
- {
- var preview = {
- type: /** @type {!RuntimeAgent.ObjectPreviewType.<string>} */ (this.type),
- description: this.description || toStringDescription(this.value),
- overflow: false,
- properties: [],
- __proto__: null
- };
- InjectedScriptHost.nullifyPrototype(preview.properties);
- if (this.subtype)
- preview.subtype = /** @type {!RuntimeAgent.ObjectPreviewSubtype.<string>} */ (this.subtype);
- return preview;
- },
-
- /**
- * @param {!Object} object
- * @param {?Array.<string>=} firstLevelKeys
- * @param {?Array.<string>=} secondLevelKeys
- * @param {boolean=} isTable
- * @param {boolean=} skipEntriesPreview
- * @return {!RuntimeAgent.ObjectPreview} preview
- */
- _generatePreview: function(object, firstLevelKeys, secondLevelKeys, isTable, skipEntriesPreview)
- {
- var preview = this._createEmptyPreview();
- var firstLevelKeysCount = firstLevelKeys ? firstLevelKeys.length : 0;
- var propertiesThreshold = {
- properties: isTable ? 1000 : max(5, firstLevelKeysCount),
- indexes: isTable ? 1000 : max(100, firstLevelKeysCount),
- __proto__: null
- };
- var subtype = this.subtype;
- var primitiveString;
-
- try {
- var descriptors = [];
- InjectedScriptHost.nullifyPrototype(descriptors);
-
- // Add internal properties to preview.
- var rawInternalProperties = InjectedScriptHost.getInternalProperties(object) || [];
- var internalProperties = [];
- InjectedScriptHost.nullifyPrototype(rawInternalProperties);
- InjectedScriptHost.nullifyPrototype(internalProperties);
- var entries = null;
- for (var i = 0; i < rawInternalProperties.length; i += 2) {
- if (rawInternalProperties[i] === "[[Entries]]") {
- entries = /** @type {!Array<*>} */(rawInternalProperties[i + 1]);
- continue;
- }
- if (rawInternalProperties[i] === "[[PrimitiveValue]]" && typeof rawInternalProperties[i + 1] === 'string')
- primitiveString = rawInternalProperties[i + 1];
- var internalPropertyDescriptor = {
- name: rawInternalProperties[i],
- value: rawInternalProperties[i + 1],
- isOwn: true,
- enumerable: true,
- __proto__: null
- };
- push(descriptors, internalPropertyDescriptor);
- }
- var naturalDescriptors = injectedScript._propertyDescriptors(object, addPropertyIfNeeded, false /* ownProperties */, undefined /* accessorPropertiesOnly */, firstLevelKeys);
- for (var i = 0; i < naturalDescriptors.length; i++)
- push(descriptors, naturalDescriptors[i]);
-
- this._appendPropertyPreviewDescriptors(preview, descriptors, secondLevelKeys, isTable);
-
- if (subtype === "map" || subtype === "set" || subtype === "weakmap" || subtype === "weakset" || subtype === "iterator")
- this._appendEntriesPreview(entries, preview, skipEntriesPreview);
-
- } catch (e) {}
-
- return preview;
-
- /**
- * @param {!Array<!Object>} descriptors
- * @param {!Object} descriptor
- * @return {boolean}
- */
- function addPropertyIfNeeded(descriptors, descriptor) {
- if (descriptor.wasThrown)
- return true;
-
- // Ignore __proto__ property.
- if (descriptor.name === "__proto__")
- return true;
-
- // Ignore length property of array.
- if ((subtype === "array" || subtype === "typedarray") && descriptor.name === "length")
- return true;
-
- // Ignore size property of map, set.
- if ((subtype === "map" || subtype === "set") && descriptor.name === "size")
- return true;
-
- // Ignore ArrayBuffer previews
- if (subtype === 'arraybuffer' && (descriptor.name === "[[Int8Array]]" || descriptor.name === "[[Uint8Array]]" || descriptor.name === "[[Int16Array]]" || descriptor.name === "[[Int32Array]]"))
- return true;
-
- // Never preview prototype properties.
- if (!descriptor.isOwn)
- return true;
-
- // Ignore computed properties unless they have getters.
- if (!("value" in descriptor) && !descriptor.get)
- return true;
-
- // Ignore index properties when there is a primitive string.
- if (primitiveString && primitiveString[descriptor.name] === descriptor.value)
- return true;
-
- if (toString(descriptor.name >>> 0) === descriptor.name)
- propertiesThreshold.indexes--;
- else
- propertiesThreshold.properties--;
-
- var canContinue = propertiesThreshold.indexes >= 0 && propertiesThreshold.properties >= 0;
- if (!canContinue) {
- preview.overflow = true;
- return false;
- }
- push(descriptors, descriptor);
- return true;
- }
- },
-
- /**
- * @param {!RuntimeAgent.ObjectPreview} preview
- * @param {!Array.<*>|!Iterable.<*>} descriptors
- * @param {?Array.<string>=} secondLevelKeys
- * @param {boolean=} isTable
- */
- _appendPropertyPreviewDescriptors: function(preview, descriptors, secondLevelKeys, isTable)
- {
- for (var i = 0; i < descriptors.length; ++i) {
- var descriptor = descriptors[i];
- var name = descriptor.name;
- var value = descriptor.value;
- var type = typeof value;
-
- // Special-case HTMLAll.
- if (type === "undefined" && injectedScript._isHTMLAllCollection(value))
- type = "object";
-
- // Ignore computed properties unless they have getters.
- if (descriptor.get && !("value" in descriptor)) {
- push(preview.properties, { name: name, type: "accessor", __proto__: null });
- continue;
- }
-
- // Render own properties.
- if (value === null) {
- push(preview.properties, { name: name, type: "object", subtype: "null", value: "null", __proto__: null });
- continue;
- }
-
- var maxLength = 100;
- if (InjectedScript.primitiveTypes[type]) {
- var valueString = type === "string" ? value : toStringDescription(value);
- if (valueString.length > maxLength)
- valueString = this._abbreviateString(valueString, maxLength, true);
- push(preview.properties, { name: name, type: type, value: valueString, __proto__: null });
- continue;
- }
-
- var property = { name: name, type: type, __proto__: null };
- var subtype = injectedScript._subtype(value);
- if (subtype)
- property.subtype = subtype;
-
- if (secondLevelKeys === null || secondLevelKeys) {
- var subPreview = this._generatePreview(value, secondLevelKeys || undefined, undefined, isTable);
- property.valuePreview = subPreview;
- if (subPreview.overflow)
- preview.overflow = true;
- } else {
- var description = "";
- if (type !== "function")
- description = this._abbreviateString(/** @type {string} */ (injectedScript._describe(value)), maxLength, subtype === "regexp");
- property.value = description;
- }
- push(preview.properties, property);
- }
- },
-
- /**
- * @param {?Array<*>} entries
- * @param {!RuntimeAgent.ObjectPreview} preview
- * @param {boolean=} skipEntriesPreview
- */
- _appendEntriesPreview: function(entries, preview, skipEntriesPreview)
- {
- if (!entries)
- return;
- if (skipEntriesPreview) {
- if (entries.length)
- preview.overflow = true;
- return;
- }
- preview.entries = [];
- InjectedScriptHost.nullifyPrototype(preview.entries);
- var entriesThreshold = 5;
- for (var i = 0; i < entries.length; ++i) {
- if (preview.entries.length >= entriesThreshold) {
- preview.overflow = true;
- break;
- }
- var entry = entries[i];
- InjectedScriptHost.nullifyPrototype(entry);
- var previewEntry = {
- value: generateValuePreview(entry.value),
- __proto__: null
- };
- if ("key" in entry)
- previewEntry.key = generateValuePreview(entry.key);
- push(preview.entries, previewEntry);
- }
-
- /**
- * @param {*} value
- * @return {!RuntimeAgent.ObjectPreview}
- */
- function generateValuePreview(value)
- {
- var remoteObject = new InjectedScript.RemoteObject(value, undefined, true, undefined, true, undefined, undefined, true);
- var valuePreview = remoteObject.preview || remoteObject._createEmptyPreview();
- return valuePreview;
- }
- },
-
- /**
- * @param {string} string
- * @param {number} maxLength
- * @param {boolean=} middle
- * @return {string}
- */
- _abbreviateString: function(string, maxLength, middle)
- {
- if (string.length <= maxLength)
- return string;
- if (middle) {
- var leftHalf = maxLength >> 1;
- var rightHalf = maxLength - leftHalf - 1;
- return string.substr(0, leftHalf) + "\u2026" + string.substr(string.length - rightHalf, rightHalf);
- }
- return string.substr(0, maxLength) + "\u2026";
- },
-
- __proto__: null
-}
-
-return injectedScript;
-})
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 296dc4c631..f1eb4fecf3 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -30,25 +30,26 @@
#include "src/inspector/injected-script.h"
-#include "src/inspector/injected-script-source.h"
+#include <cmath>
+#include <unordered_set>
+
+#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/remote-object-id.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console.h"
-#include "src/inspector/v8-function-call.h"
-#include "src/inspector/v8-injected-script-host.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
+#include "src/inspector/value-mirror.h"
#include "include/v8-inspector.h"
namespace v8_inspector {
namespace {
-static const char privateKeyName[] = "v8-inspector#injectedScript";
static const char kGlobalHandleLabel[] = "DevTools console";
static bool isResolvableNumberLike(String16 query) {
return query == "Infinity" || query == "-Infinity" || query == "NaN";
@@ -66,8 +67,7 @@ class InjectedScript::ProtocolPromiseHandler {
static bool add(V8InspectorSessionImpl* session,
v8::Local<v8::Context> context, v8::Local<v8::Value> value,
int executionContextId, const String16& objectGroup,
- bool returnByValue, bool generatePreview,
- EvaluateCallback* callback) {
+ WrapMode wrapMode, EvaluateCallback* callback) {
v8::Local<v8::Promise::Resolver> resolver;
if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) {
callback->sendFailure(Response::InternalError());
@@ -80,9 +80,8 @@ class InjectedScript::ProtocolPromiseHandler {
v8::Local<v8::Promise> promise = resolver->GetPromise();
V8InspectorImpl* inspector = session->inspector();
- ProtocolPromiseHandler* handler =
- new ProtocolPromiseHandler(session, executionContextId, objectGroup,
- returnByValue, generatePreview, callback);
+ ProtocolPromiseHandler* handler = new ProtocolPromiseHandler(
+ session, executionContextId, objectGroup, wrapMode, callback);
v8::Local<v8::Value> wrapper = handler->m_wrapper.Get(inspector->isolate());
v8::Local<v8::Function> thenCallbackFunction =
v8::Function::New(context, thenCallback, wrapper, 0,
@@ -130,15 +129,13 @@ class InjectedScript::ProtocolPromiseHandler {
ProtocolPromiseHandler(V8InspectorSessionImpl* session,
int executionContextId, const String16& objectGroup,
- bool returnByValue, bool generatePreview,
- EvaluateCallback* callback)
+ WrapMode wrapMode, EvaluateCallback* callback)
: m_inspector(session->inspector()),
m_sessionId(session->sessionId()),
m_contextGroupId(session->contextGroupId()),
m_executionContextId(executionContextId),
m_objectGroup(objectGroup),
- m_returnByValue(returnByValue),
- m_generatePreview(generatePreview),
+ m_wrapMode(wrapMode),
m_callback(std::move(callback)),
m_wrapper(m_inspector->isolate(),
v8::External::New(m_inspector->isolate(), this)) {
@@ -170,9 +167,8 @@ class InjectedScript::ProtocolPromiseHandler {
scope.injectedScript()->takeEvaluateCallback(m_callback);
if (!callback) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
- response = scope.injectedScript()->wrapObject(
- result, m_objectGroup, m_returnByValue, m_generatePreview,
- &wrappedValue);
+ response = scope.injectedScript()->wrapObject(result, m_objectGroup,
+ m_wrapMode, &wrappedValue);
if (!response.isSuccess()) {
callback->sendFailure(response);
return;
@@ -192,9 +188,8 @@ class InjectedScript::ProtocolPromiseHandler {
scope.injectedScript()->takeEvaluateCallback(m_callback);
if (!callback) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
- response = scope.injectedScript()->wrapObject(
- result, m_objectGroup, m_returnByValue, m_generatePreview,
- &wrappedValue);
+ response = scope.injectedScript()->wrapObject(result, m_objectGroup,
+ m_wrapMode, &wrappedValue);
if (!response.isSuccess()) {
callback->sendFailure(response);
return;
@@ -252,116 +247,139 @@ class InjectedScript::ProtocolPromiseHandler {
int m_contextGroupId;
int m_executionContextId;
String16 m_objectGroup;
- bool m_returnByValue;
- bool m_generatePreview;
+ WrapMode m_wrapMode;
EvaluateCallback* m_callback;
v8::Global<v8::External> m_wrapper;
};
-std::unique_ptr<InjectedScript> InjectedScript::create(
- InspectedContext* inspectedContext, int sessionId) {
- v8::Isolate* isolate = inspectedContext->isolate();
- v8::HandleScope handles(isolate);
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::Context> context = inspectedContext->context();
- v8::debug::PostponeInterruptsScope postponeInterrupts(isolate);
- v8::Context::Scope scope(context);
- v8::MicrotasksScope microtasksScope(isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
-
- // Inject javascript into the context. The compiled script is supposed to
- // evaluate into
- // a single anonymous function(it's anonymous to avoid cluttering the global
- // object with
- // inspector's stuff) the function is called a few lines below with
- // InjectedScriptHost wrapper,
- // injected script id and explicit reference to the inspected global object.
- // The function is expected
- // to create and configure InjectedScript instance that is going to be used by
- // the inspector.
- StringView injectedScriptSource(
- reinterpret_cast<const uint8_t*>(InjectedScriptSource_js),
- sizeof(InjectedScriptSource_js));
- v8::Local<v8::Value> value;
- if (!inspectedContext->inspector()
- ->compileAndRunInternalScript(
- context, toV8String(isolate, injectedScriptSource))
- .ToLocal(&value)) {
- return nullptr;
- }
- DCHECK(value->IsFunction());
- v8::Local<v8::Object> scriptHostWrapper =
- V8InjectedScriptHost::create(context, inspectedContext->inspector());
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
- v8::Local<v8::Object> windowGlobal = context->Global();
- v8::Local<v8::Value> info[] = {
- scriptHostWrapper, windowGlobal,
- v8::Number::New(isolate, inspectedContext->contextId())};
-
- int contextGroupId = inspectedContext->contextGroupId();
- int contextId = inspectedContext->contextId();
- V8InspectorImpl* inspector = inspectedContext->inspector();
- v8::Local<v8::Value> injectedScriptValue;
- if (!function->Call(context, windowGlobal, arraysize(info), info)
- .ToLocal(&injectedScriptValue))
- return nullptr;
- if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
- return nullptr;
- if (!injectedScriptValue->IsObject()) return nullptr;
-
- std::unique_ptr<InjectedScript> injectedScript(new InjectedScript(
- inspectedContext, injectedScriptValue.As<v8::Object>(), sessionId));
- v8::Local<v8::Private> privateKey = v8::Private::ForApi(
- isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
- v8::NewStringType::kInternalized)
- .ToLocalChecked());
- scriptHostWrapper->SetPrivate(
- context, privateKey, v8::External::New(isolate, injectedScript.get()));
- return injectedScript;
-}
-
-InjectedScript::InjectedScript(InspectedContext* context,
- v8::Local<v8::Object> object, int sessionId)
- : m_context(context),
- m_value(context->isolate(), object),
- m_sessionId(sessionId) {}
+InjectedScript::InjectedScript(InspectedContext* context, int sessionId)
+ : m_context(context), m_sessionId(sessionId) {}
InjectedScript::~InjectedScript() { discardEvaluateCallbacks(); }
+namespace {
+class PropertyAccumulator : public ValueMirror::PropertyAccumulator {
+ public:
+ explicit PropertyAccumulator(std::vector<PropertyMirror>* mirrors)
+ : m_mirrors(mirrors) {}
+ bool Add(PropertyMirror mirror) override {
+ m_mirrors->push_back(std::move(mirror));
+ return true;
+ }
+
+ private:
+ std::vector<PropertyMirror>* m_mirrors;
+};
+} // anonymous namespace
+
Response InjectedScript::getProperties(
v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, bool generatePreview,
+ bool accessorPropertiesOnly, WrapMode wrapMode,
std::unique_ptr<Array<PropertyDescriptor>>* properties,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::HandleScope handles(m_context->isolate());
v8::Local<v8::Context> context = m_context->context();
- V8FunctionCall function(m_context->inspector(), m_context->context(),
- v8Value(), "getProperties");
- function.appendArgument(object);
- function.appendArgument(groupName);
- function.appendArgument(ownProperties);
- function.appendArgument(accessorPropertiesOnly);
- function.appendArgument(generatePreview);
-
- v8::TryCatch tryCatch(m_context->isolate());
- v8::Local<v8::Value> resultValue = function.callWithoutExceptionHandling();
- if (tryCatch.HasCaught()) {
- Response response = createExceptionDetails(
- tryCatch, groupName, generatePreview, exceptionDetails);
+ v8::Isolate* isolate = m_context->isolate();
+ int sessionId = m_sessionId;
+ v8::TryCatch tryCatch(isolate);
+
+ *properties = Array<PropertyDescriptor>::create();
+ std::vector<PropertyMirror> mirrors;
+ PropertyAccumulator accumulator(&mirrors);
+ if (!ValueMirror::getProperties(context, object, ownProperties,
+ accessorPropertiesOnly, &accumulator)) {
+ return createExceptionDetails(tryCatch, groupName, wrapMode,
+ exceptionDetails);
+ }
+ for (const PropertyMirror& mirror : mirrors) {
+ std::unique_ptr<PropertyDescriptor> descriptor =
+ PropertyDescriptor::create()
+ .setName(mirror.name)
+ .setConfigurable(mirror.configurable)
+ .setEnumerable(mirror.enumerable)
+ .setIsOwn(mirror.isOwn)
+ .build();
+ Response response;
+ std::unique_ptr<RemoteObject> remoteObject;
+ if (mirror.value) {
+ response = wrapObjectMirror(*mirror.value, groupName, wrapMode,
+ v8::MaybeLocal<v8::Value>(),
+ kMaxCustomPreviewDepth, &remoteObject);
+ if (!response.isSuccess()) return response;
+ descriptor->setValue(std::move(remoteObject));
+ descriptor->setWritable(mirror.writable);
+ }
+ if (mirror.getter) {
+ response =
+ mirror.getter->buildRemoteObject(context, wrapMode, &remoteObject);
+ if (!response.isSuccess()) return response;
+ response =
+ bindRemoteObjectIfNeeded(sessionId, context, mirror.getter->v8Value(),
+ groupName, remoteObject.get());
+ if (!response.isSuccess()) return response;
+ descriptor->setGet(std::move(remoteObject));
+ }
+ if (mirror.setter) {
+ response =
+ mirror.setter->buildRemoteObject(context, wrapMode, &remoteObject);
+ if (!response.isSuccess()) return response;
+ response =
+ bindRemoteObjectIfNeeded(sessionId, context, mirror.setter->v8Value(),
+ groupName, remoteObject.get());
+ if (!response.isSuccess()) return response;
+ descriptor->setSet(std::move(remoteObject));
+ }
+ if (mirror.symbol) {
+ response =
+ mirror.symbol->buildRemoteObject(context, wrapMode, &remoteObject);
+ if (!response.isSuccess()) return response;
+ response =
+ bindRemoteObjectIfNeeded(sessionId, context, mirror.symbol->v8Value(),
+ groupName, remoteObject.get());
+ if (!response.isSuccess()) return response;
+ descriptor->setSymbol(std::move(remoteObject));
+ }
+ if (mirror.exception) {
+ response =
+ mirror.exception->buildRemoteObject(context, wrapMode, &remoteObject);
+ if (!response.isSuccess()) return response;
+ response = bindRemoteObjectIfNeeded(sessionId, context,
+ mirror.exception->v8Value(),
+ groupName, remoteObject.get());
+ if (!response.isSuccess()) return response;
+ descriptor->setValue(std::move(remoteObject));
+ descriptor->setWasThrown(true);
+ }
+ (*properties)->addItem(std::move(descriptor));
+ }
+ return Response::OK();
+}
+
+Response InjectedScript::getInternalProperties(
+ v8::Local<v8::Value> value, const String16& groupName,
+ std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>* result) {
+ *result = protocol::Array<InternalPropertyDescriptor>::create();
+ v8::Local<v8::Context> context = m_context->context();
+ int sessionId = m_sessionId;
+ std::vector<InternalPropertyMirror> wrappers;
+ if (value->IsObject()) {
+ ValueMirror::getInternalProperties(m_context->context(),
+ value.As<v8::Object>(), &wrappers);
+ }
+ for (size_t i = 0; i < wrappers.size(); ++i) {
+ std::unique_ptr<RemoteObject> remoteObject;
+ Response response = wrappers[i].value->buildRemoteObject(
+ m_context->context(), WrapMode::kNoPreview, &remoteObject);
if (!response.isSuccess()) return response;
- // FIXME: make properties optional
- *properties = Array<PropertyDescriptor>::create();
- return Response::OK();
+ response = bindRemoteObjectIfNeeded(sessionId, context,
+ wrappers[i].value->v8Value(), groupName,
+ remoteObject.get());
+ if (!response.isSuccess()) return response;
+ (*result)->addItem(InternalPropertyDescriptor::create()
+ .setName(wrappers[i].name)
+ .setValue(std::move(remoteObject))
+ .build());
}
- if (resultValue.IsEmpty()) return Response::InternalError();
- std::unique_ptr<protocol::Value> protocolValue;
- Response response = toProtocolValue(context, resultValue, &protocolValue);
- if (!response.isSuccess()) return response;
- protocol::ErrorSupport errors;
- std::unique_ptr<Array<PropertyDescriptor>> result =
- Array<PropertyDescriptor>::fromValue(protocolValue.get(), &errors);
- if (errors.hasErrors()) return Response::Error(errors.errors());
- *properties = std::move(result);
return Response::OK();
}
@@ -378,67 +396,108 @@ void InjectedScript::releaseObject(const String16& objectId) {
}
Response InjectedScript::wrapObject(
- v8::Local<v8::Value> value, const String16& groupName, bool forceValueType,
- bool generatePreview,
- std::unique_ptr<protocol::Runtime::RemoteObject>* result) const {
- v8::HandleScope handles(m_context->isolate());
- v8::Local<v8::Value> wrappedObject;
+ v8::Local<v8::Value> value, const String16& groupName, WrapMode wrapMode,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
+ return wrapObject(value, groupName, wrapMode, v8::MaybeLocal<v8::Value>(),
+ kMaxCustomPreviewDepth, result);
+}
+
+Response InjectedScript::wrapObject(
+ v8::Local<v8::Value> value, const String16& groupName, WrapMode wrapMode,
+ v8::MaybeLocal<v8::Value> customPreviewConfig, int maxCustomPreviewDepth,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
v8::Local<v8::Context> context = m_context->context();
- Response response = wrapValue(value, groupName, forceValueType,
- generatePreview, &wrappedObject);
+ v8::Context::Scope contextScope(context);
+ std::unique_ptr<ValueMirror> mirror = ValueMirror::create(context, value);
+ if (!mirror) return Response::InternalError();
+ return wrapObjectMirror(*mirror, groupName, wrapMode, customPreviewConfig,
+ maxCustomPreviewDepth, result);
+}
+
+Response InjectedScript::wrapObjectMirror(
+ const ValueMirror& mirror, const String16& groupName, WrapMode wrapMode,
+ v8::MaybeLocal<v8::Value> customPreviewConfig, int maxCustomPreviewDepth,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
+ int customPreviewEnabled = m_customPreviewEnabled;
+ int sessionId = m_sessionId;
+ v8::Local<v8::Context> context = m_context->context();
+ v8::Context::Scope contextScope(context);
+ Response response = mirror.buildRemoteObject(context, wrapMode, result);
if (!response.isSuccess()) return response;
- protocol::ErrorSupport errors;
- std::unique_ptr<protocol::Value> protocolValue;
- response = toProtocolValue(context, wrappedObject, &protocolValue);
+ v8::Local<v8::Value> value = mirror.v8Value();
+ response = bindRemoteObjectIfNeeded(sessionId, context, value, groupName,
+ result->get());
if (!response.isSuccess()) return response;
-
- *result =
- protocol::Runtime::RemoteObject::fromValue(protocolValue.get(), &errors);
- if (!result->get()) return Response::Error(errors.errors());
- return Response::OK();
-}
-
-Response InjectedScript::wrapValue(v8::Local<v8::Value> value,
- const String16& groupName,
- bool forceValueType, bool generatePreview,
- v8::Local<v8::Value>* result) const {
- V8FunctionCall function(m_context->inspector(), m_context->context(),
- v8Value(), "wrapObject");
- function.appendArgument(value);
- function.appendArgument(groupName);
- function.appendArgument(forceValueType);
- function.appendArgument(generatePreview);
- bool hadException = false;
- *result = function.call(hadException);
- if (hadException || result->IsEmpty()) return Response::InternalError();
+ if (customPreviewEnabled && value->IsObject()) {
+ std::unique_ptr<protocol::Runtime::CustomPreview> customPreview;
+ generateCustomPreview(sessionId, groupName, context, value.As<v8::Object>(),
+ customPreviewConfig, maxCustomPreviewDepth,
+ &customPreview);
+ if (customPreview) (*result)->setCustomPreview(std::move(customPreview));
+ }
return Response::OK();
}
std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
- v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const {
- v8::HandleScope handles(m_context->isolate());
+ v8::Local<v8::Object> table, v8::MaybeLocal<v8::Array> maybeColumns) {
+ using protocol::Runtime::RemoteObject;
+ using protocol::Runtime::ObjectPreview;
+ using protocol::Runtime::PropertyPreview;
+ using protocol::Array;
+
+ v8::Isolate* isolate = m_context->isolate();
+ v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = m_context->context();
- V8FunctionCall function(m_context->inspector(), context, v8Value(),
- "wrapTable");
- function.appendArgument(table);
- if (columns.IsEmpty())
- function.appendArgument(false);
- else
- function.appendArgument(columns);
- bool hadException = false;
- v8::Local<v8::Value> r = function.call(hadException);
- if (hadException || r.IsEmpty()) return nullptr;
- std::unique_ptr<protocol::Value> protocolValue;
- Response response = toProtocolValue(context, r, &protocolValue);
- if (!response.isSuccess()) return nullptr;
- protocol::ErrorSupport errors;
- return protocol::Runtime::RemoteObject::fromValue(protocolValue.get(),
- &errors);
+
+ std::unique_ptr<RemoteObject> remoteObject;
+ Response response =
+ wrapObject(table, "console", WrapMode::kNoPreview, &remoteObject);
+ if (!remoteObject || !response.isSuccess()) return nullptr;
+
+ auto mirror = ValueMirror::create(context, table);
+ std::unique_ptr<ObjectPreview> preview;
+ int limit = 1000;
+ mirror->buildObjectPreview(context, true /* generatePreviewForTable */,
+ &limit, &limit, &preview);
+ if (!preview) return nullptr;
+
+ Array<PropertyPreview>* columns = preview->getProperties();
+ std::unordered_set<String16> selectedColumns;
+ v8::Local<v8::Array> v8Columns;
+ if (maybeColumns.ToLocal(&v8Columns)) {
+ for (uint32_t i = 0; i < v8Columns->Length(); ++i) {
+ v8::Local<v8::Value> column;
+ if (v8Columns->Get(context, i).ToLocal(&column) && column->IsString()) {
+ selectedColumns.insert(
+ toProtocolString(isolate, column.As<v8::String>()));
+ }
+ }
+ }
+ if (!selectedColumns.empty()) {
+ for (size_t i = 0; i < columns->length(); ++i) {
+ ObjectPreview* columnPreview = columns->get(i)->getValuePreview(nullptr);
+ if (!columnPreview) continue;
+
+ std::unique_ptr<Array<PropertyPreview>> filtered =
+ Array<PropertyPreview>::create();
+ Array<PropertyPreview>* columns = columnPreview->getProperties();
+ for (size_t j = 0; j < columns->length(); ++j) {
+ PropertyPreview* property = columns->get(j);
+ if (selectedColumns.find(property->getName()) !=
+ selectedColumns.end()) {
+ filtered->addItem(property->clone());
+ }
+ }
+ columnPreview->setProperties(std::move(filtered));
+ }
+ }
+ remoteObject->setPreview(std::move(preview));
+ return remoteObject;
}
void InjectedScript::addPromiseCallback(
V8InspectorSessionImpl* session, v8::MaybeLocal<v8::Value> value,
- const String16& objectGroup, bool returnByValue, bool generatePreview,
+ const String16& objectGroup, WrapMode wrapMode,
std::unique_ptr<EvaluateCallback> callback) {
if (value.IsEmpty()) {
callback->sendFailure(Response::InternalError());
@@ -448,8 +507,7 @@ void InjectedScript::addPromiseCallback(
v8::MicrotasksScope::kRunMicrotasks);
if (ProtocolPromiseHandler::add(
session, m_context->context(), value.ToLocalChecked(),
- m_context->contextId(), objectGroup, returnByValue, generatePreview,
- callback.get())) {
+ m_context->contextId(), objectGroup, wrapMode, callback.get())) {
m_evaluateCallbacks.insert(callback.release());
}
}
@@ -496,17 +554,7 @@ void InjectedScript::releaseObjectGroup(const String16& objectGroup) {
}
void InjectedScript::setCustomObjectFormatterEnabled(bool enabled) {
- v8::HandleScope handles(m_context->isolate());
- V8FunctionCall function(m_context->inspector(), m_context->context(),
- v8Value(), "setCustomObjectFormatterEnabled");
- function.appendArgument(enabled);
- bool hadException = false;
- function.call(hadException);
- DCHECK(!hadException);
-}
-
-v8::Local<v8::Value> InjectedScript::v8Value() const {
- return m_value.Get(m_context->isolate());
+ m_customPreviewEnabled = enabled;
}
v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
@@ -560,7 +608,7 @@ Response InjectedScript::resolveCallArgument(
Response InjectedScript::createExceptionDetails(
const v8::TryCatch& tryCatch, const String16& objectGroup,
- bool generatePreview, Maybe<protocol::Runtime::ExceptionDetails>* result) {
+ WrapMode wrapMode, Maybe<protocol::Runtime::ExceptionDetails>* result) {
if (!tryCatch.HasCaught()) return Response::InternalError();
v8::Local<v8::Message> message = tryCatch.Message();
v8::Local<v8::Value> exception = tryCatch.Exception();
@@ -596,8 +644,10 @@ Response InjectedScript::createExceptionDetails(
if (!exception.IsEmpty()) {
std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
Response response =
- wrapObject(exception, objectGroup, false /* forceValueType */,
- generatePreview && !exception->IsNativeError(), &wrapped);
+ wrapObject(exception, objectGroup,
+ exception->IsNativeError() ? WrapMode::kNoPreview
+ : WrapMode::kWithPreview,
+ &wrapped);
if (!response.isSuccess()) return response;
exceptionDetails->setException(std::move(wrapped));
}
@@ -607,15 +657,14 @@ Response InjectedScript::createExceptionDetails(
Response InjectedScript::wrapEvaluateResult(
v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch& tryCatch,
- const String16& objectGroup, bool returnByValue, bool generatePreview,
+ const String16& objectGroup, WrapMode wrapMode,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::Local<v8::Value> resultValue;
if (!tryCatch.HasCaught()) {
if (!maybeResultValue.ToLocal(&resultValue))
return Response::InternalError();
- Response response = wrapObject(resultValue, objectGroup, returnByValue,
- generatePreview, result);
+ Response response = wrapObject(resultValue, objectGroup, wrapMode, result);
if (!response.isSuccess()) return response;
if (objectGroup == "console") {
m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
@@ -627,12 +676,14 @@ Response InjectedScript::wrapEvaluateResult(
}
v8::Local<v8::Value> exception = tryCatch.Exception();
Response response =
- wrapObject(exception, objectGroup, false,
- generatePreview && !exception->IsNativeError(), result);
+ wrapObject(exception, objectGroup,
+ exception->IsNativeError() ? WrapMode::kNoPreview
+ : WrapMode::kWithPreview,
+ result);
if (!response.isSuccess()) return response;
// We send exception in result for compatibility reasons, even though it's
// accessible through exceptionDetails.exception.
- response = createExceptionDetails(tryCatch, objectGroup, generatePreview,
+ response = createExceptionDetails(tryCatch, objectGroup, wrapMode,
exceptionDetails);
if (!response.isSuccess()) return response;
}
@@ -781,33 +832,44 @@ Response InjectedScript::CallFrameScope::findInjectedScript(
return session->findInjectedScript(remoteId.get(), m_injectedScript);
}
-InjectedScript* InjectedScript::fromInjectedScriptHost(
- v8::Isolate* isolate, v8::Local<v8::Object> injectedScriptObject) {
- v8::HandleScope handleScope(isolate);
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Private> privateKey = v8::Private::ForApi(
- isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
- v8::NewStringType::kInternalized)
- .ToLocalChecked());
- v8::Local<v8::Value> value =
- injectedScriptObject->GetPrivate(context, privateKey).ToLocalChecked();
- DCHECK(value->IsExternal());
- v8::Local<v8::External> external = value.As<v8::External>();
- return static_cast<InjectedScript*>(external->Value());
-}
-
-int InjectedScript::bindObject(v8::Local<v8::Value> value,
- const String16& groupName) {
+String16 InjectedScript::bindObject(v8::Local<v8::Value> value,
+ const String16& groupName) {
if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
int id = m_lastBoundObjectId++;
m_idToWrappedObject[id].Reset(m_context->isolate(), value);
m_idToWrappedObject[id].AnnotateStrongRetainer(kGlobalHandleLabel);
-
if (!groupName.isEmpty() && id > 0) {
m_idToObjectGroupName[id] = groupName;
m_nameToObjectGroup[groupName].push_back(id);
}
- return id;
+ // TODO(dgozman): get rid of "injectedScript" notion.
+ return String16::concat(
+ "{\"injectedScriptId\":", String16::fromInteger(m_context->contextId()),
+ ",\"id\":", String16::fromInteger(id), "}");
+}
+
+// static
+Response InjectedScript::bindRemoteObjectIfNeeded(
+ int sessionId, v8::Local<v8::Context> context, v8::Local<v8::Value> value,
+ const String16& groupName, protocol::Runtime::RemoteObject* remoteObject) {
+ if (!remoteObject) return Response::OK();
+ if (remoteObject->hasValue()) return Response::OK();
+ if (remoteObject->hasUnserializableValue()) return Response::OK();
+ if (remoteObject->getType() != RemoteObject::TypeEnum::Undefined) {
+ v8::Isolate* isolate = context->GetIsolate();
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
+ InspectedContext* inspectedContext =
+ inspector->getContext(InspectedContext::contextId(context));
+ InjectedScript* injectedScript =
+ inspectedContext ? inspectedContext->getInjectedScript(sessionId)
+ : nullptr;
+ if (!injectedScript) {
+ return Response::Error("Cannot find context with specified id");
+ }
+ remoteObject->setObjectId(injectedScript->bindObject(value, groupName));
+ }
+ return Response::OK();
}
void InjectedScript::unbindObject(int id) {
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index a5fb681060..c2a4744c13 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -46,9 +46,10 @@
namespace v8_inspector {
class RemoteObjectId;
-class V8FunctionCall;
class V8InspectorImpl;
class V8InspectorSessionImpl;
+class ValueMirror;
+enum class WrapMode;
using protocol::Maybe;
using protocol::Response;
@@ -65,33 +66,44 @@ class EvaluateCallback {
class InjectedScript final {
public:
- static std::unique_ptr<InjectedScript> create(InspectedContext*,
- int sessionId);
+ InjectedScript(InspectedContext*, int sessionId);
~InjectedScript();
- static InjectedScript* fromInjectedScriptHost(v8::Isolate* isolate,
- v8::Local<v8::Object>);
InspectedContext* context() const { return m_context; }
Response getProperties(
v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, bool generatePreview,
+ bool accessorPropertiesOnly, WrapMode wrapMode,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Runtime::ExceptionDetails>*);
+
+ Response getInternalProperties(
+ v8::Local<v8::Value>, const String16& groupName,
+ std::unique_ptr<
+ protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
+ result);
+
void releaseObject(const String16& objectId);
- Response wrapObject(
- v8::Local<v8::Value>, const String16& groupName, bool forceValueType,
- bool generatePreview,
- std::unique_ptr<protocol::Runtime::RemoteObject>* result) const;
+ Response wrapObject(v8::Local<v8::Value>, const String16& groupName,
+ WrapMode wrapMode,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result);
+ Response wrapObject(v8::Local<v8::Value>, const String16& groupName,
+ WrapMode wrapMode,
+ v8::MaybeLocal<v8::Value> customPreviewConfig,
+ int maxCustomPreviewDepth,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result);
+ Response wrapObjectMirror(
+ const ValueMirror& mirror, const String16& groupName, WrapMode wrapMode,
+ v8::MaybeLocal<v8::Value> customPreviewConfig, int maxCustomPreviewDepth,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result);
std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
- v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
+ v8::Local<v8::Object> table, v8::MaybeLocal<v8::Array> columns);
void addPromiseCallback(V8InspectorSessionImpl* session,
v8::MaybeLocal<v8::Value> value,
- const String16& objectGroup, bool returnByValue,
- bool generatePreview,
+ const String16& objectGroup, WrapMode wrapMode,
std::unique_ptr<EvaluateCallback> callback);
Response findObject(const RemoteObjectId&, v8::Local<v8::Value>*) const;
@@ -102,18 +114,16 @@ class InjectedScript final {
v8::Local<v8::Value>* result);
Response createExceptionDetails(
- const v8::TryCatch&, const String16& groupName, bool generatePreview,
+ const v8::TryCatch&, const String16& groupName, WrapMode wrapMode,
Maybe<protocol::Runtime::ExceptionDetails>* result);
Response wrapEvaluateResult(
v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch&,
- const String16& objectGroup, bool returnByValue, bool generatePreview,
+ const String16& objectGroup, WrapMode wrapMode,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>*);
v8::Local<v8::Value> lastEvaluationResult() const;
void setLastEvaluationResult(v8::Local<v8::Value> result);
- int bindObject(v8::Local<v8::Value>, const String16& groupName);
-
class Scope {
public:
Response initialize();
@@ -124,6 +134,7 @@ class InjectedScript final {
v8::Local<v8::Context> context() const { return m_context; }
InjectedScript* injectedScript() const { return m_injectedScript; }
const v8::TryCatch& tryCatch() const { return m_tryCatch; }
+ V8InspectorImpl* inspector() const { return m_inspector; }
protected:
explicit Scope(V8InspectorSessionImpl*);
@@ -191,23 +202,22 @@ class InjectedScript final {
DISALLOW_COPY_AND_ASSIGN(CallFrameScope);
};
+ String16 bindObject(v8::Local<v8::Value>, const String16& groupName);
private:
- InjectedScript(InspectedContext*, v8::Local<v8::Object>, int sessionId);
- v8::Local<v8::Value> v8Value() const;
- Response wrapValue(v8::Local<v8::Value>, const String16& groupName,
- bool forceValueType, bool generatePreview,
- v8::Local<v8::Value>* result) const;
v8::Local<v8::Object> commandLineAPI();
void unbindObject(int id);
+ static Response bindRemoteObjectIfNeeded(
+ int sessionId, v8::Local<v8::Context> context, v8::Local<v8::Value>,
+ const String16& groupName, protocol::Runtime::RemoteObject* remoteObject);
+
class ProtocolPromiseHandler;
void discardEvaluateCallbacks();
std::unique_ptr<EvaluateCallback> takeEvaluateCallback(
EvaluateCallback* callback);
InspectedContext* m_context;
- v8::Global<v8::Value> m_value;
int m_sessionId;
v8::Global<v8::Value> m_lastEvaluationResult;
v8::Global<v8::Object> m_commandLineAPI;
@@ -216,6 +226,7 @@ class InjectedScript final {
std::unordered_map<int, String16> m_idToObjectGroupName;
std::unordered_map<String16, std::vector<int>> m_nameToObjectGroup;
std::unordered_set<EvaluateCallback*> m_evaluateCallbacks;
+ bool m_customPreviewEnabled = false;
DISALLOW_COPY_AND_ASSIGN(InjectedScript);
};
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
deleted file mode 100644
index d293b8547d..0000000000
--- a/deps/v8/src/inspector/injected_script_externs.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-/** @interface */
-function InjectedScriptHostClass()
-{
-}
-
-/**
- * @param {*} obj
- */
-InjectedScriptHostClass.prototype.nullifyPrototype = function(obj) {}
-
-/**
- * @param {*} obj
- * @param {string} name
- * @return {*}
- */
-InjectedScriptHostClass.prototype.getProperty = function(obj, name) {}
-
-/**
- * @param {*} obj
- * @return {string}
- */
-InjectedScriptHostClass.prototype.internalConstructorName = function(obj) {}
-
-/**
- * @param {*} obj
- * @param {function()|undefined} func
- * @return {boolean}
- */
-InjectedScriptHostClass.prototype.formatAccessorsAsProperties = function(obj, func) {}
-
-/**
- * @param {*} obj
- * @return {string}
- */
-InjectedScriptHostClass.prototype.subtype = function(obj) {}
-
-/**
- * @param {*} obj
- * @return {boolean}
- */
-InjectedScriptHostClass.prototype.isTypedArray = function(obj) {}
-
-/**
- * @param {*} obj
- * @return {!Array.<*>}
- */
-InjectedScriptHostClass.prototype.getInternalProperties = function(obj) {}
-
-/**
- * @param {!Object} object
- * @param {string} propertyName
- * @return {boolean}
- */
-InjectedScriptHostClass.prototype.objectHasOwnProperty = function(object, propertyName) {}
-
-/**
- * @param {*} value
- * @param {string} groupName
- * @return {number}
- */
-InjectedScriptHostClass.prototype.bind = function(value, groupName) {}
-
-/**
- * @param {!Object} object
- * @return {!Object}
- */
-InjectedScriptHostClass.prototype.proxyTargetValue = function(object) {}
-
-/**
- * @param {!Object} obj
- * @return {!Array<string>}
- */
-InjectedScriptHostClass.prototype.keys = function(obj) {}
-
-/**
- * @param {!Object} obj
- * @return {Object}
- */
-InjectedScriptHostClass.prototype.getPrototypeOf = function(obj) {}
-
-/**
- * @param {!Object} obj
- * @param {string} prop
- * @return {Object}
- */
-InjectedScriptHostClass.prototype.getOwnPropertyDescriptor = function(obj, prop) {}
-
-/**
- * @param {!Object} obj
- * @return {!Array<string>}
- */
-InjectedScriptHostClass.prototype.getOwnPropertyNames = function(obj) {}
-
-/**
- * @param {!Object} obj
- * @return {!Array<symbol>}
- */
-InjectedScriptHostClass.prototype.getOwnPropertySymbols = function(obj) {}
-
-/**
- * @param {!Object} obj
- * @param {string|symbol} name
- * @return {{isBuiltin:boolean, hasGetter:boolean, hasSetter:boolean}|undefined}
- */
-InjectedScriptHostClass.prototype.nativeAccessorDescriptor = function(obj, name) {}
-
-/**
- * @param {!Object} arrayBuffer
- * @return {Array<Object>|undefined}
- */
-InjectedScriptHostClass.prototype.typedArrayProperties = function(arrayBuffer) {}
-
-/** @type {!InjectedScriptHostClass} */
-var InjectedScriptHost;
-/** @type {!Window} */
-var inspectedGlobalObject;
-/** @type {number} */
-var injectedScriptId;
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index bb98c87158..92f64c2cb9 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -109,18 +109,40 @@ InjectedScript* InspectedContext::getInjectedScript(int sessionId) {
return it == m_injectedScripts.end() ? nullptr : it->second.get();
}
-bool InspectedContext::createInjectedScript(int sessionId) {
+InjectedScript* InspectedContext::createInjectedScript(int sessionId) {
std::unique_ptr<InjectedScript> injectedScript =
- InjectedScript::create(this, sessionId);
- // InjectedScript::create can destroy |this|.
- if (!injectedScript) return false;
+ v8::base::make_unique<InjectedScript>(this, sessionId);
CHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end());
m_injectedScripts[sessionId] = std::move(injectedScript);
- return true;
+ return getInjectedScript(sessionId);
}
void InspectedContext::discardInjectedScript(int sessionId) {
m_injectedScripts.erase(sessionId);
}
+bool InspectedContext::addInternalObject(v8::Local<v8::Object> object,
+ V8InternalValueType type) {
+ if (m_internalObjects.IsEmpty()) {
+ m_internalObjects.Reset(isolate(), v8::debug::WeakMap::New(isolate()));
+ }
+ return !m_internalObjects.Get(isolate())
+ ->Set(m_context.Get(isolate()), object,
+ v8::Integer::New(isolate(), static_cast<int>(type)))
+ .IsEmpty();
+}
+
+V8InternalValueType InspectedContext::getInternalType(
+ v8::Local<v8::Object> object) {
+ if (m_internalObjects.IsEmpty()) return V8InternalValueType::kNone;
+ v8::Local<v8::Value> typeValue;
+ if (!m_internalObjects.Get(isolate())
+ ->Get(m_context.Get(isolate()), object)
+ .ToLocal(&typeValue) ||
+ !typeValue->IsUint32()) {
+ return V8InternalValueType::kNone;
+ }
+ return static_cast<V8InternalValueType>(typeValue.As<v8::Int32>()->Value());
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index ef0a0ca52a..4ec52dc1e4 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -9,6 +9,7 @@
#include <unordered_set>
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "include/v8.h"
@@ -20,6 +21,8 @@ class InjectedScriptHost;
class V8ContextInfo;
class V8InspectorImpl;
+enum class V8InternalValueType { kNone, kEntry, kScope, kScopeList };
+
class InspectedContext {
public:
~InspectedContext();
@@ -40,9 +43,13 @@ class InspectedContext {
V8InspectorImpl* inspector() const { return m_inspector; }
InjectedScript* getInjectedScript(int sessionId);
- bool createInjectedScript(int sessionId);
+ InjectedScript* createInjectedScript(int sessionId);
void discardInjectedScript(int sessionId);
+ bool addInternalObject(v8::Local<v8::Object> object,
+ V8InternalValueType type);
+ V8InternalValueType getInternalType(v8::Local<v8::Object> object);
+
private:
friend class V8InspectorImpl;
InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
@@ -59,6 +66,7 @@ class InspectedContext {
std::unordered_set<int> m_reportedSessionIds;
std::unordered_map<int, std::unique_ptr<InjectedScript>> m_injectedScripts;
WeakCallbackData* m_weakCallbackData;
+ v8::Global<v8::debug::WeakMap> m_internalObjects;
DISALLOW_COPY_AND_ASSIGN(InspectedContext);
};
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index fa073128b3..82927b44e6 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -16,7 +16,6 @@
},
{
"domain": "Debugger",
- "async": [ "scheduleStepIntoAsync" ],
"exported": ["SearchMatch", "paused.reason"]
},
{
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 23f8875063..ea6c995088 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -546,11 +546,6 @@
"description": "Resumes JavaScript execution."
},
{
- "name": "scheduleStepIntoAsync",
- "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and\nDebugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled\nbefore next pause. Returns success when async task is actually scheduled, returns error if no\ntask were scheduled or another scheduleStepIntoAsync was called.",
- "experimental": true
- },
- {
"name": "searchInContent",
"description": "Searches for given string in script content.",
"parameters": [
@@ -1189,6 +1184,11 @@
"type": "number"
},
{
+ "name": "id",
+ "description": "Node id. Ids are unique across all profiles collected between startSampling and stopSampling.",
+ "type": "integer"
+ },
+ {
"name": "children",
"description": "Child nodes.",
"type": "array",
@@ -1199,13 +1199,42 @@
]
},
{
+ "id": "SamplingHeapProfileSample",
+ "description": "A single sample from a sampling profile.",
+ "type": "object",
+ "properties": [
+ {
+ "name": "size",
+ "description": "Allocation size in bytes attributed to the sample.",
+ "type": "number"
+ },
+ {
+ "name": "nodeId",
+ "description": "Id of the corresponding profile tree node.",
+ "type": "integer"
+ },
+ {
+ "name": "ordinal",
+ "description": "Time-ordered sample ordinal number. It is unique across all profiles retrieved\nbetween startSampling and stopSampling.",
+ "type": "number"
+ }
+ ]
+ },
+ {
"id": "SamplingHeapProfile",
- "description": "Profile.",
+ "description": "Sampling profile.",
"type": "object",
"properties": [
{
"name": "head",
"$ref": "SamplingHeapProfileNode"
+ },
+ {
+ "name": "samples",
+ "type": "array",
+ "items": {
+ "$ref": "SamplingHeapProfileSample"
+ }
}
]
}
@@ -1856,7 +1885,9 @@
"error",
"proxy",
"promise",
- "typedarray"
+ "typedarray",
+ "arraybuffer",
+ "dataview"
]
},
{
@@ -1911,22 +1942,12 @@
"properties": [
{
"name": "header",
+ "description": "The JSON-stringified result of formatter.header(object, config) call.\nIt contains json ML array that represents RemoteObject.",
"type": "string"
},
{
- "name": "hasBody",
- "type": "boolean"
- },
- {
- "name": "formatterObjectId",
- "$ref": "RemoteObjectId"
- },
- {
- "name": "bindRemoteObjectFunctionId",
- "$ref": "RemoteObjectId"
- },
- {
- "name": "configObjectId",
+ "name": "bodyGetterId",
+ "description": "If formatter returns true as a result of formatter.hasBody call then bodyGetterId will\ncontain RemoteObjectId for the function that returns result of formatter.body(object, config) call.\nThe result value is json ML array.",
"optional": true,
"$ref": "RemoteObjectId"
}
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
index a7e368d2b8..93c988ea30 100644
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -259,12 +259,6 @@ domain Debugger
# Resumes JavaScript execution.
command resume
- # This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and
- # Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled
- # before next pause. Returns success when async task is actually scheduled, returns error if no
- # task were scheduled or another scheduleStepIntoAsync was called.
- experimental command scheduleStepIntoAsync
-
# Searches for given string in script content.
command searchInContent
parameters
@@ -556,13 +550,27 @@ experimental domain HeapProfiler
Runtime.CallFrame callFrame
# Allocations size in bytes for the node excluding children.
number selfSize
+ # Node id. Ids are unique across all profiles collected between startSampling and stopSampling.
+ integer id
# Child nodes.
array of SamplingHeapProfileNode children
- # Profile.
+ # A single sample from a sampling profile.
+ type SamplingHeapProfileSample extends object
+ properties
+ # Allocation size in bytes attributed to the sample.
+ number size
+ # Id of the corresponding profile tree node.
+ integer nodeId
+ # Time-ordered sample ordinal number. It is unique across all profiles retrieved
+ # between startSampling and stopSampling.
+ number ordinal
+
+ # Sampling profile.
type SamplingHeapProfile extends object
properties
SamplingHeapProfileNode head
+ array of SamplingHeapProfileSample samples
# Enables console to refer to the node with given id via $x (see Command Line API for more details
# $x functions).
@@ -873,6 +881,8 @@ domain Runtime
proxy
promise
typedarray
+ arraybuffer
+ dataview
# Object class (constructor) name. Specified for `object` type values only.
optional string className
# Remote object value in case of primitive values or JSON values (if it was requested).
@@ -890,11 +900,13 @@ domain Runtime
experimental type CustomPreview extends object
properties
+ # The JSON-stringified result of formatter.header(object, config) call.
+ # It contains json ML array that represents RemoteObject.
string header
- boolean hasBody
- RemoteObjectId formatterObjectId
- RemoteObjectId bindRemoteObjectFunctionId
- optional RemoteObjectId configObjectId
+ # If formatter returns true as a result of formatter.hasBody call then bodyGetterId will
+ # contain RemoteObjectId for the function that returns result of formatter.body(object, config) call.
+ # The result value is json ML array.
+ optional RemoteObjectId bodyGetterId
# Object containing abbreviated remote object value.
experimental type ObjectPreview extends object
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index eb77ddd5fb..30219a062d 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -370,14 +370,6 @@ static inline void putUTF8Triple(char*& buffer, UChar ch) {
} // namespace
-String16::String16() = default;
-
-String16::String16(const String16& other) = default;
-
-String16::String16(String16&& other) V8_NOEXCEPT
- : m_impl(std::move(other.m_impl)),
- hash_code(other.hash_code) {}
-
String16::String16(const UChar* characters, size_t size)
: m_impl(characters, size) {}
@@ -393,14 +385,6 @@ String16::String16(const char* characters, size_t size) {
String16::String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
-String16& String16::operator=(const String16& other) = default;
-
-String16& String16::operator=(String16&& other) V8_NOEXCEPT {
- m_impl = std::move(other.m_impl);
- hash_code = other.hash_code;
- return *this;
-}
-
// static
String16 String16::fromInteger(int number) {
char arr[50];
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 7c6f9e992d..56df993332 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -22,17 +22,17 @@ class String16 {
public:
static const size_t kNotFound = static_cast<size_t>(-1);
- String16();
- String16(const String16& other);
- String16(String16&& other) V8_NOEXCEPT;
+ String16() = default;
+ String16(const String16&) V8_NOEXCEPT = default;
+ String16(String16&&) V8_NOEXCEPT = default;
String16(const UChar* characters, size_t size);
String16(const UChar* characters); // NOLINT(runtime/explicit)
String16(const char* characters); // NOLINT(runtime/explicit)
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
- String16& operator=(const String16& other);
- String16& operator=(String16&& other) V8_NOEXCEPT;
+ String16& operator=(const String16&) V8_NOEXCEPT = default;
+ String16& operator=(String16&&) V8_NOEXCEPT = default;
static String16 fromInteger(int);
static String16 fromInteger(size_t);
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 79c5dcc9cf..2992f08530 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -7,7 +7,6 @@
#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/inspector/protocol/Protocol.h"
-#include "src/unicode-cache.h"
namespace v8_inspector {
@@ -99,10 +98,9 @@ namespace protocol {
// static
double StringUtil::toDouble(const char* s, size_t len, bool* isOk) {
- v8::internal::UnicodeCache unicode_cache;
int flags = v8::internal::ALLOW_HEX | v8::internal::ALLOW_OCTAL |
v8::internal::ALLOW_BINARY;
- double result = StringToDouble(&unicode_cache, s, flags);
+ double result = v8::internal::StringToDouble(s, flags);
*isOk = !std::isnan(result);
return result;
}
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 97aaa93eb7..4ab39bd6d1 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_STRING_UTIL_H_
#define V8_INSPECTOR_STRING_UTIL_H_
+#include <stdint.h>
#include <memory>
#include "src/base/logging.h"
@@ -60,6 +61,19 @@ class StringUtil {
static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
};
+// A read-only sequence of uninterpreted bytes with reference-counted storage.
+// Though the templates for generating the protocol bindings reference
+// this type, js_protocol.pdl doesn't have a field of type 'binary', so
+// therefore it's unnecessary to provide an implementation here.
+class Binary {
+ public:
+ const uint8_t* data() const { UNIMPLEMENTED(); }
+ size_t size() const { UNIMPLEMENTED(); }
+ String toBase64() const { UNIMPLEMENTED(); }
+ static Binary fromBase64(const String& base64, bool* success) {
+ UNIMPLEMENTED();
+ }
+};
} // namespace protocol
v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 6d39deeb4c..687dfd2217 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -259,19 +259,33 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>> args =
protocol::Array<protocol::Runtime::RemoteObject>::create();
- if (m_type == ConsoleAPIType::kTable && generatePreview) {
- v8::Local<v8::Value> table = m_arguments[0]->Get(isolate);
- v8::Local<v8::Value> columns = m_arguments.size() > 1
- ? m_arguments[1]->Get(isolate)
- : v8::Local<v8::Value>();
+
+ v8::Local<v8::Value> value = m_arguments[0]->Get(isolate);
+ if (value->IsObject() && m_type == ConsoleAPIType::kTable &&
+ generatePreview) {
+ v8::MaybeLocal<v8::Array> columns;
+ if (m_arguments.size() > 1) {
+ v8::Local<v8::Value> secondArgument = m_arguments[1]->Get(isolate);
+ if (secondArgument->IsArray()) {
+ columns = v8::Local<v8::Array>::Cast(secondArgument);
+ } else if (secondArgument->IsString()) {
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Array> array = v8::Array::New(isolate);
+ if (array->Set(context, 0, secondArgument).IsJust()) {
+ columns = array;
+ }
+ }
+ }
std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
- session->wrapTable(context, table, columns);
+ session->wrapTable(context, v8::Local<v8::Object>::Cast(value),
+ columns);
inspectedContext = inspector->getContext(contextGroupId, contextId);
if (!inspectedContext) return nullptr;
- if (wrapped)
+ if (wrapped) {
args->addItem(std::move(wrapped));
- else
+ } else {
args = nullptr;
+ }
} else {
for (size_t i = 0; i < m_arguments.size(); ++i) {
std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index ef4c7ccd1d..ccf6e4aea2 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -600,9 +600,8 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
InjectedScript* injectedScript = helper.injectedScript(sessionId);
if (!injectedScript) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
- protocol::Response response =
- injectedScript->wrapObject(value, "", false /** forceValueType */,
- false /** generatePreview */, &wrappedObject);
+ protocol::Response response = injectedScript->wrapObject(
+ value, "", WrapMode::kNoPreview, &wrappedObject);
if (!response.isSuccess()) return;
std::unique_ptr<protocol::DictionaryValue> hints =
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index d227526d64..8b87bf4a50 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -260,8 +260,9 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
for (; !iterator->Done(); iterator->Advance()) {
std::unique_ptr<RemoteObject> object;
- Response result = injectedScript->wrapObject(
- iterator->GetObject(), kBacktraceObjectGroup, false, false, &object);
+ Response result =
+ injectedScript->wrapObject(iterator->GetObject(), kBacktraceObjectGroup,
+ WrapMode::kNoPreview, &object);
if (!result.isSuccess()) return result;
auto scope = Scope::create()
@@ -1003,16 +1004,6 @@ Response V8DebuggerAgentImpl::stepOut() {
return Response::OK();
}
-void V8DebuggerAgentImpl::scheduleStepIntoAsync(
- std::unique_ptr<ScheduleStepIntoAsyncCallback> callback) {
- if (!isPaused()) {
- callback->sendFailure(Response::Error(kDebuggerNotPaused));
- return;
- }
- m_debugger->scheduleStepIntoAsync(std::move(callback),
- m_session->contextGroupId());
-}
-
Response V8DebuggerAgentImpl::pauseOnAsyncCall(
std::unique_ptr<protocol::Runtime::StackTraceId> inParentStackTraceId) {
bool isOk = false;
@@ -1073,7 +1064,7 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
v8::MaybeLocal<v8::Value> maybeResultValue;
{
- V8InspectorImpl::EvaluateScope evaluateScope(m_isolate);
+ V8InspectorImpl::EvaluateScope evaluateScope(scope);
if (timeout.isJust()) {
response = evaluateScope.setTimeout(timeout.fromJust() / 1000.0);
if (!response.isSuccess()) return response;
@@ -1085,10 +1076,12 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
// context or session.
response = scope.initialize();
if (!response.isSuccess()) return response;
+ WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
return scope.injectedScript()->wrapEvaluateResult(
- maybeResultValue, scope.tryCatch(), objectGroup.fromMaybe(""),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false), result,
- exceptionDetails);
+ maybeResultValue, scope.tryCatch(), objectGroup.fromMaybe(""), mode,
+ result, exceptionDetails);
}
Response V8DebuggerAgentImpl::setVariableValue(
@@ -1268,8 +1261,9 @@ Response V8DebuggerAgentImpl::currentCallFrames(
if (injectedScript) {
v8::Local<v8::Value> receiver;
if (iterator->GetReceiver().ToLocal(&receiver)) {
- res = injectedScript->wrapObject(receiver, kBacktraceObjectGroup, false,
- false, &protocolReceiver);
+ res =
+ injectedScript->wrapObject(receiver, kBacktraceObjectGroup,
+ WrapMode::kNoPreview, &protocolReceiver);
if (!res.isSuccess()) return res;
}
}
@@ -1320,7 +1314,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
if (!returnValue.IsEmpty() && injectedScript) {
std::unique_ptr<RemoteObject> value;
res = injectedScript->wrapObject(returnValue, kBacktraceObjectGroup,
- false, false, &value);
+ WrapMode::kNoPreview, &value);
if (!res.isSuccess()) return res;
frame->setReturnValue(std::move(value));
}
@@ -1418,8 +1412,9 @@ void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8StackTraceImpl> stack =
V8StackTraceImpl::capture(m_inspector->debugger(), contextGroupId, 1);
std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
- stack && !stack->isEmpty() ? stack->buildInspectorObjectImpl(m_debugger)
- : nullptr;
+ stack && !stack->isEmpty()
+ ? stack->buildInspectorObjectImpl(m_debugger, 0)
+ : nullptr;
if (success) {
// TODO(herhut, dgozman): Report correct length for WASM if needed for
// coverage. Or do not send the length at all and change coverage instead.
@@ -1527,8 +1522,8 @@ void V8DebuggerAgentImpl::didPause(
? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
: protocol::Debugger::Paused::ReasonEnum::Exception;
std::unique_ptr<protocol::Runtime::RemoteObject> obj;
- injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
- &obj);
+ injectedScript->wrapObject(exception, kBacktraceObjectGroup,
+ WrapMode::kNoPreview, &obj);
std::unique_ptr<protocol::DictionaryValue> breakAuxData;
if (obj) {
breakAuxData = obj->toValue();
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 9806c85f48..d048dff610 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -96,8 +96,6 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response stepOver() override;
Response stepInto(Maybe<bool> inBreakOnAsyncCall) override;
Response stepOut() override;
- void scheduleStepIntoAsync(
- std::unique_ptr<ScheduleStepIntoAsyncCallback> callback) override;
Response pauseOnAsyncCall(std::unique_ptr<protocol::Runtime::StackTraceId>
inParentStackTraceId) override;
Response setPauseOnExceptions(const String16& pauseState) override;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index c1efd2dba1..d9dda1fb01 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -417,8 +417,9 @@ class WasmVirtualScript : public V8DebuggerScript {
private:
static const String16& emptyString() {
- static const String16 singleEmptyString;
- return singleEmptyString;
+ // On the heap and leaked so that no destructor needs to run at exit time.
+ static const String16* singleEmptyString = new String16;
+ return *singleEmptyString;
}
v8::Local<v8::debug::Script> script() const override {
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 5f826b56a9..1539446452 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -10,7 +10,6 @@
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
-#include "src/inspector/v8-internal-value-type.h"
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
@@ -24,102 +23,6 @@ namespace {
static const int kMaxAsyncTaskStacks = 128 * 1024;
static const int kNoBreakpointId = 0;
-v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
- v8::Local<v8::Value> value) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Array> entries;
- bool isKeyValue = false;
- if (!value->IsObject() ||
- !value.As<v8::Object>()->PreviewEntries(&isKeyValue).ToLocal(&entries)) {
- return v8::MaybeLocal<v8::Array>();
- }
-
- v8::Local<v8::Array> wrappedEntries = v8::Array::New(isolate);
- CHECK(!isKeyValue || wrappedEntries->Length() % 2 == 0);
- if (!wrappedEntries->SetPrototype(context, v8::Null(isolate))
- .FromMaybe(false))
- return v8::MaybeLocal<v8::Array>();
- for (uint32_t i = 0; i < entries->Length(); i += isKeyValue ? 2 : 1) {
- v8::Local<v8::Value> item;
- if (!entries->Get(context, i).ToLocal(&item)) continue;
- v8::Local<v8::Value> value;
- if (isKeyValue && !entries->Get(context, i + 1).ToLocal(&value)) continue;
- v8::Local<v8::Object> wrapper = v8::Object::New(isolate);
- if (!wrapper->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
- continue;
- createDataProperty(
- context, wrapper,
- toV8StringInternalized(isolate, isKeyValue ? "key" : "value"), item);
- if (isKeyValue) {
- createDataProperty(context, wrapper,
- toV8StringInternalized(isolate, "value"), value);
- }
- createDataProperty(context, wrappedEntries, wrappedEntries->Length(),
- wrapper);
- }
- if (!markArrayEntriesAsInternal(context, wrappedEntries,
- V8InternalValueType::kEntry)) {
- return v8::MaybeLocal<v8::Array>();
- }
- return wrappedEntries;
-}
-
-v8::MaybeLocal<v8::Object> buildLocation(v8::Local<v8::Context> context,
- int scriptId, int lineNumber,
- int columnNumber) {
- if (scriptId == v8::UnboundScript::kNoScriptId)
- return v8::MaybeLocal<v8::Object>();
- if (lineNumber == v8::Function::kLineOffsetNotFound ||
- columnNumber == v8::Function::kLineOffsetNotFound) {
- return v8::MaybeLocal<v8::Object>();
- }
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Object> location = v8::Object::New(isolate);
- if (!location->SetPrototype(context, v8::Null(isolate)).FromMaybe(false)) {
- return v8::MaybeLocal<v8::Object>();
- }
- if (!createDataProperty(context, location,
- toV8StringInternalized(isolate, "scriptId"),
- toV8String(isolate, String16::fromInteger(scriptId)))
- .FromMaybe(false)) {
- return v8::MaybeLocal<v8::Object>();
- }
- if (!createDataProperty(context, location,
- toV8StringInternalized(isolate, "lineNumber"),
- v8::Integer::New(isolate, lineNumber))
- .FromMaybe(false)) {
- return v8::MaybeLocal<v8::Object>();
- }
- if (!createDataProperty(context, location,
- toV8StringInternalized(isolate, "columnNumber"),
- v8::Integer::New(isolate, columnNumber))
- .FromMaybe(false)) {
- return v8::MaybeLocal<v8::Object>();
- }
- if (!markAsInternal(context, location, V8InternalValueType::kLocation)) {
- return v8::MaybeLocal<v8::Object>();
- }
- return location;
-}
-
-v8::MaybeLocal<v8::Object> generatorObjectLocation(
- v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
- if (!value->IsGeneratorObject()) return v8::MaybeLocal<v8::Object>();
- v8::Local<v8::debug::GeneratorObject> generatorObject =
- v8::debug::GeneratorObject::Cast(value);
- if (!generatorObject->IsSuspended()) {
- v8::Local<v8::Function> func = generatorObject->Function();
- return buildLocation(context, func->ScriptId(), func->GetScriptLineNumber(),
- func->GetScriptColumnNumber());
- }
- v8::Local<v8::debug::Script> script;
- if (!generatorObject->Script().ToLocal(&script))
- return v8::MaybeLocal<v8::Object>();
- v8::debug::Location suspendedLocation = generatorObject->SuspendedLocation();
- return buildLocation(context, script->Id(), suspendedLocation.GetLineNumber(),
- suspendedLocation.GetColumnNumber());
-}
-
template <typename Map>
void cleanupExpiredWeakPointers(Map& map) {
for (auto it = map.begin(); it != map.end();) {
@@ -376,19 +279,6 @@ bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
return true;
}
-void V8Debugger::scheduleStepIntoAsync(
- std::unique_ptr<ScheduleStepIntoAsyncCallback> callback,
- int targetContextGroupId) {
- DCHECK(isPaused());
- DCHECK(targetContextGroupId);
- if (m_stepIntoAsyncCallback) {
- m_stepIntoAsyncCallback->sendFailure(Response::Error(
- "Current scheduled step into async was overriden with new one."));
- }
- m_targetContextGroupId = targetContextGroupId;
- m_stepIntoAsyncCallback = std::move(callback);
-}
-
void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
const String16& debuggerId) {
DCHECK(targetContextGroupId);
@@ -415,19 +305,22 @@ void V8Debugger::terminateExecution(
m_isolate->TerminateExecution();
}
-void V8Debugger::terminateExecutionCompletedCallback(v8::Isolate* isolate) {
- isolate->RemoveCallCompletedCallback(
+void V8Debugger::reportTermination() {
+ if (!m_terminateExecutionCallback) return;
+ m_isolate->RemoveCallCompletedCallback(
&V8Debugger::terminateExecutionCompletedCallback);
- isolate->RemoveMicrotasksCompletedCallback(
+ m_isolate->RemoveMicrotasksCompletedCallback(
&V8Debugger::terminateExecutionCompletedCallback);
+ m_isolate->CancelTerminateExecution();
+ m_terminateExecutionCallback->sendSuccess();
+ m_terminateExecutionCallback.reset();
+}
+
+void V8Debugger::terminateExecutionCompletedCallback(v8::Isolate* isolate) {
V8InspectorImpl* inspector =
static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
V8Debugger* debugger = inspector->debugger();
- debugger->m_isolate->CancelTerminateExecution();
- if (debugger->m_terminateExecutionCallback) {
- debugger->m_terminateExecutionCallback->sendSuccess();
- debugger->m_terminateExecutionCallback.reset();
- }
+ debugger->reportTermination();
}
Response V8Debugger::continueToLocation(
@@ -490,11 +383,6 @@ void V8Debugger::handleProgramBreak(
return;
}
m_targetContextGroupId = 0;
- if (m_stepIntoAsyncCallback) {
- m_stepIntoAsyncCallback->sendFailure(
- Response::Error("No async tasks were scheduled before pause."));
- m_stepIntoAsyncCallback.reset();
- }
m_breakRequested = false;
m_pauseOnAsyncCall = false;
m_taskWithScheduledBreak = nullptr;
@@ -563,7 +451,8 @@ size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
- v8::Local<v8::Context> context = thisPtr->m_isolate->GetEnteredContext();
+ v8::Local<v8::Context> context =
+ thisPtr->m_isolate->GetEnteredOrMicrotaskContext();
thisPtr->m_targetContextGroupId =
context.IsEmpty() ? 0 : thisPtr->m_inspector->contextGroupId(context);
thisPtr->m_isolate->RequestInterrupt(
@@ -714,28 +603,50 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
for (; !iterator->Done(); iterator->Advance()) {
v8::Local<v8::Object> scope = v8::Object::New(m_isolate);
- if (!markAsInternal(context, scope, V8InternalValueType::kScope)) {
+ if (!addInternalObject(context, scope, V8InternalValueType::kScope))
return v8::MaybeLocal<v8::Value>();
- }
- String16 type = v8_inspector::scopeType(iterator->GetType());
- String16 name;
- v8::Local<v8::Value> maybe_name = iterator->GetFunctionDebugName();
- if (!maybe_name->IsUndefined()) {
- name = toProtocolStringWithTypeCheck(m_isolate, maybe_name);
+ String16 nameSuffix = toProtocolStringWithTypeCheck(
+ m_isolate, iterator->GetFunctionDebugName());
+ String16 description;
+ if (nameSuffix.length()) nameSuffix = " (" + nameSuffix + ")";
+ switch (iterator->GetType()) {
+ case v8::debug::ScopeIterator::ScopeTypeGlobal:
+ description = "Global" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeLocal:
+ description = "Local" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeWith:
+ description = "With Block" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeClosure:
+ description = "Closure" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeCatch:
+ description = "Catch" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeBlock:
+ description = "Block" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeScript:
+ description = "Script" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeEval:
+ description = "Eval" + nameSuffix;
+ break;
+ case v8::debug::ScopeIterator::ScopeTypeModule:
+ description = "Module" + nameSuffix;
+ break;
}
v8::Local<v8::Object> object = iterator->GetObject();
createDataProperty(context, scope,
- toV8StringInternalized(m_isolate, "type"),
- toV8String(m_isolate, type));
- createDataProperty(context, scope,
- toV8StringInternalized(m_isolate, "name"),
- toV8String(m_isolate, name));
+ toV8StringInternalized(m_isolate, "description"),
+ toV8String(m_isolate, description));
createDataProperty(context, scope,
toV8StringInternalized(m_isolate, "object"), object);
createDataProperty(context, result, result->Length(), scope);
}
- if (!markAsInternal(context, v8::Local<v8::Array>::Cast(result),
- V8InternalValueType::kScopeList))
+ if (!addInternalObject(context, result, V8InternalValueType::kScopeList))
return v8::MaybeLocal<v8::Value>();
return result;
}
@@ -750,21 +661,42 @@ v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
return getTargetScopes(context, generator, GENERATOR);
}
-v8::MaybeLocal<v8::Uint32> V8Debugger::stableObjectId(
+v8::MaybeLocal<v8::Array> V8Debugger::collectionsEntries(
v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
- DCHECK(value->IsObject());
- if (m_stableObjectId.IsEmpty()) {
- m_stableObjectId.Reset(m_isolate, v8::debug::WeakMap::New(m_isolate));
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Array> entries;
+ bool isKeyValue = false;
+ if (!value->IsObject() ||
+ !value.As<v8::Object>()->PreviewEntries(&isKeyValue).ToLocal(&entries)) {
+ return v8::MaybeLocal<v8::Array>();
}
- v8::Local<v8::debug::WeakMap> stableObjectId =
- m_stableObjectId.Get(m_isolate);
- v8::Local<v8::Value> idValue;
- if (!stableObjectId->Get(context, value).ToLocal(&idValue) ||
- !idValue->IsUint32()) {
- idValue = v8::Integer::NewFromUnsigned(m_isolate, ++m_lastStableObjectId);
- stableObjectId->Set(context, value, idValue).ToLocalChecked();
+
+ v8::Local<v8::Array> wrappedEntries = v8::Array::New(isolate);
+ CHECK(!isKeyValue || wrappedEntries->Length() % 2 == 0);
+ if (!wrappedEntries->SetPrototype(context, v8::Null(isolate))
+ .FromMaybe(false))
+ return v8::MaybeLocal<v8::Array>();
+ for (uint32_t i = 0; i < entries->Length(); i += isKeyValue ? 2 : 1) {
+ v8::Local<v8::Value> item;
+ if (!entries->Get(context, i).ToLocal(&item)) continue;
+ v8::Local<v8::Value> value;
+ if (isKeyValue && !entries->Get(context, i + 1).ToLocal(&value)) continue;
+ v8::Local<v8::Object> wrapper = v8::Object::New(isolate);
+ if (!wrapper->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
+ continue;
+ createDataProperty(
+ context, wrapper,
+ toV8StringInternalized(isolate, isKeyValue ? "key" : "value"), item);
+ if (isKeyValue) {
+ createDataProperty(context, wrapper,
+ toV8StringInternalized(isolate, "value"), value);
+ }
+ if (!addInternalObject(context, wrapper, V8InternalValueType::kEntry))
+ continue;
+ createDataProperty(context, wrappedEntries, wrappedEntries->Length(),
+ wrapper);
}
- return idValue.As<v8::Uint32>();
+ return wrappedEntries;
}
v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
@@ -772,34 +704,6 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::Local<v8::Array> properties;
if (!v8::debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
return v8::MaybeLocal<v8::Array>();
- if (value->IsObject()) {
- v8::Local<v8::Uint32> id;
- if (stableObjectId(context, value).ToLocal(&id)) {
- createDataProperty(
- context, properties, properties->Length(),
- toV8StringInternalized(m_isolate, "[[StableObjectId]]"));
- createDataProperty(context, properties, properties->Length(), id);
- }
- }
- if (value->IsFunction()) {
- v8::Local<v8::Function> function = value.As<v8::Function>();
- v8::Local<v8::Object> location;
- if (buildLocation(context, function->ScriptId(),
- function->GetScriptLineNumber(),
- function->GetScriptColumnNumber())
- .ToLocal(&location)) {
- createDataProperty(
- context, properties, properties->Length(),
- toV8StringInternalized(m_isolate, "[[FunctionLocation]]"));
- createDataProperty(context, properties, properties->Length(), location);
- }
- if (function->IsGeneratorFunction()) {
- createDataProperty(context, properties, properties->Length(),
- toV8StringInternalized(m_isolate, "[[IsGenerator]]"));
- createDataProperty(context, properties, properties->Length(),
- v8::True(m_isolate));
- }
- }
v8::Local<v8::Array> entries;
if (collectionsEntries(context, value).ToLocal(&entries)) {
createDataProperty(context, properties, properties->Length(),
@@ -807,13 +711,6 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
createDataProperty(context, properties, properties->Length(), entries);
}
if (value->IsGeneratorObject()) {
- v8::Local<v8::Object> location;
- if (generatorObjectLocation(context, value).ToLocal(&location)) {
- createDataProperty(
- context, properties, properties->Length(),
- toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
- createDataProperty(context, properties, properties->Length(), location);
- }
v8::Local<v8::Value> scopes;
if (generatorScopes(context, value).ToLocal(&scopes)) {
createDataProperty(context, properties, properties->Length(),
@@ -1032,26 +929,18 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) {
}
void V8Debugger::asyncTaskCandidateForStepping(void* task, bool isLocal) {
+ if (!m_pauseOnAsyncCall) return;
int contextGroupId = currentContextGroupId();
- if (m_pauseOnAsyncCall && contextGroupId) {
- if (isLocal) {
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
- reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0));
- } else {
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
- reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId));
- }
- breakProgram(m_targetContextGroupId);
- m_scheduledAsyncCall = v8_inspector::V8StackTraceId();
- return;
- }
- if (!m_stepIntoAsyncCallback) return;
- DCHECK(m_targetContextGroupId);
if (contextGroupId != m_targetContextGroupId) return;
- m_taskWithScheduledBreak = task;
- v8::debug::ClearStepping(m_isolate);
- m_stepIntoAsyncCallback->sendSuccess();
- m_stepIntoAsyncCallback.reset();
+ if (isLocal) {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0));
+ } else {
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId(
+ reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId));
+ }
+ breakProgram(m_targetContextGroupId);
+ m_scheduledAsyncCall = v8_inspector::V8StackTraceId();
}
void V8Debugger::asyncTaskStartedForStepping(void* task) {
@@ -1196,6 +1085,15 @@ std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(
return std::make_pair(0, 0);
}
+bool V8Debugger::addInternalObject(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ V8InternalValueType type) {
+ int contextId = InspectedContext::contextId(context);
+ InspectedContext* inspectedContext = m_inspector->getContext(contextId);
+ return inspectedContext ? inspectedContext->addInternalObject(object, type)
+ : false;
+}
+
void V8Debugger::dumpAsyncTaskStacksStateForTest() {
fprintf(stdout, "Async stacks count: %d\n", m_asyncStacksCount);
fprintf(stdout, "Scheduled async tasks: %zu\n", m_asyncTaskStacks.size());
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index a99653add6..91d8c7e248 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -11,7 +11,7 @@
#include <vector>
#include "src/base/macros.h"
-#include "src/debug/debug-interface.h"
+#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
@@ -30,9 +30,9 @@ class V8InspectorImpl;
class V8StackTraceImpl;
struct V8StackTraceId;
+enum class WrapMode { kForceValue, kNoPreview, kWithPreview };
+
using protocol::Response;
-using ScheduleStepIntoAsyncCallback =
- protocol::Debugger::Backend::ScheduleStepIntoAsyncCallback;
using TerminateExecutionCallback =
protocol::Runtime::Backend::TerminateExecutionCallback;
@@ -59,9 +59,6 @@ class V8Debugger : public v8::debug::DebugDelegate,
void stepIntoStatement(int targetContextGroupId, bool breakOnAsyncCall);
void stepOverStatement(int targetContextGroupId);
void stepOutOfFunction(int targetContextGroupId);
- void scheduleStepIntoAsync(
- std::unique_ptr<ScheduleStepIntoAsyncCallback> callback,
- int targetContextGroupId);
void pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
const String16& debuggerId);
@@ -134,14 +131,19 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::shared_ptr<AsyncStackTrace> stackTraceFor(int contextGroupId,
const V8StackTraceId& id);
+ void reportTermination();
+
private:
+ bool addInternalObject(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ V8InternalValueType type);
+
void clearContinueToLocation();
bool shouldContinueToCurrentLocation();
static size_t nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit);
static void terminateExecutionCompletedCallback(v8::Isolate* isolate);
-
void handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
@@ -160,6 +162,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Function>);
v8::MaybeLocal<v8::Value> generatorScopes(v8::Local<v8::Context>,
v8::Local<v8::Value>);
+ v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value);
void asyncTaskScheduledForStack(const String16& taskName, void* task,
bool recurring);
@@ -191,12 +195,10 @@ class V8Debugger : public v8::debug::DebugDelegate,
int currentContextGroupId();
bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn);
- v8::MaybeLocal<v8::Uint32> stableObjectId(v8::Local<v8::Context>,
- v8::Local<v8::Value>);
-
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
+
int m_breakpointsActiveCount = 0;
int m_ignoreScriptParsedEventsCounter;
size_t m_originalHeapLimit = 0;
@@ -231,7 +233,6 @@ class V8Debugger : public v8::debug::DebugDelegate,
void* m_taskWithScheduledBreak = nullptr;
String16 m_taskWithScheduledBreakDebuggerId;
- std::unique_ptr<ScheduleStepIntoAsyncCallback> m_stepIntoAsyncCallback;
bool m_breakRequested = false;
v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
@@ -250,9 +251,6 @@ class V8Debugger : public v8::debug::DebugDelegate,
std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
- uint32_t m_lastStableObjectId = 0;
- v8::Global<v8::debug::WeakMap> m_stableObjectId;
-
WasmTranslation m_wasmTranslation;
DISALLOW_COPY_AND_ASSIGN(V8Debugger);
diff --git a/deps/v8/src/inspector/v8-function-call.cc b/deps/v8/src/inspector/v8-function-call.cc
deleted file mode 100644
index ebeb7a3d07..0000000000
--- a/deps/v8/src/inspector/v8-function-call.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2009 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "src/inspector/v8-function-call.h"
-
-#include "src/inspector/inspected-context.h"
-#include "src/inspector/string-util.h"
-#include "src/inspector/v8-debugger.h"
-#include "src/inspector/v8-inspector-impl.h"
-
-#include "include/v8-inspector.h"
-
-namespace v8_inspector {
-
-V8FunctionCall::V8FunctionCall(V8InspectorImpl* inspector,
- v8::Local<v8::Context> context,
- v8::Local<v8::Value> value, const String16& name)
- : m_inspector(inspector),
- m_context(context),
- m_name(toV8String(context->GetIsolate(), name)),
- m_value(value) {}
-
-void V8FunctionCall::appendArgument(v8::Local<v8::Value> value) {
- m_arguments.push_back(value);
-}
-
-void V8FunctionCall::appendArgument(const String16& argument) {
- m_arguments.push_back(toV8String(m_context->GetIsolate(), argument));
-}
-
-void V8FunctionCall::appendArgument(int argument) {
- m_arguments.push_back(v8::Number::New(m_context->GetIsolate(), argument));
-}
-
-void V8FunctionCall::appendArgument(bool argument) {
- m_arguments.push_back(argument ? v8::True(m_context->GetIsolate())
- : v8::False(m_context->GetIsolate()));
-}
-
-v8::Local<v8::Value> V8FunctionCall::call(bool& hadException,
- bool reportExceptions) {
- v8::TryCatch tryCatch(m_context->GetIsolate());
- tryCatch.SetVerbose(reportExceptions);
-
- v8::Local<v8::Value> result = callWithoutExceptionHandling();
- hadException = tryCatch.HasCaught();
- return result;
-}
-
-v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
- v8::Context::Scope contextScope(m_context);
-
- v8::Local<v8::Object> thisObject = v8::Local<v8::Object>::Cast(m_value);
- v8::Local<v8::Value> value;
- if (!thisObject->Get(m_context, m_name).ToLocal(&value))
- return v8::Local<v8::Value>();
-
- DCHECK(value->IsFunction());
-
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
- std::unique_ptr<v8::Local<v8::Value>[]> info(
- new v8::Local<v8::Value>[m_arguments.size()]);
- for (size_t i = 0; i < m_arguments.size(); ++i) {
- info[i] = m_arguments[i];
- DCHECK(!info[i].IsEmpty());
- }
-
- int contextGroupId = m_inspector->contextGroupId(m_context);
- if (contextGroupId) {
- m_inspector->client()->muteMetrics(contextGroupId);
- m_inspector->muteExceptions(contextGroupId);
- }
- v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
- v8::MicrotasksScope::kDoNotRunMicrotasks);
- v8::Isolate::AllowJavascriptExecutionScope(m_context->GetIsolate());
- v8::MaybeLocal<v8::Value> maybeResult = function->Call(
- m_context, thisObject, static_cast<int>(m_arguments.size()), info.get());
- if (contextGroupId) {
- m_inspector->client()->unmuteMetrics(contextGroupId);
- m_inspector->unmuteExceptions(contextGroupId);
- }
-
- v8::Local<v8::Value> result;
- if (!maybeResult.ToLocal(&result)) return v8::Local<v8::Value>();
- return result;
-}
-
-} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-function-call.h b/deps/v8/src/inspector/v8-function-call.h
deleted file mode 100644
index 28a5886c91..0000000000
--- a/deps/v8/src/inspector/v8-function-call.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2009 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef V8_INSPECTOR_V8_FUNCTION_CALL_H_
-#define V8_INSPECTOR_V8_FUNCTION_CALL_H_
-
-#include "src/inspector/string-16.h"
-
-#include "include/v8.h"
-
-namespace v8_inspector {
-
-class V8InspectorImpl;
-
-class V8FunctionCall {
- public:
- V8FunctionCall(V8InspectorImpl*, v8::Local<v8::Context>, v8::Local<v8::Value>,
- const String16& name);
-
- void appendArgument(v8::Local<v8::Value>);
- void appendArgument(const String16&);
- void appendArgument(int);
- void appendArgument(bool);
-
- v8::Local<v8::Value> call(bool& hadException, bool reportExceptions = true);
- v8::Local<v8::Value> callWithoutExceptionHandling();
-
- protected:
- V8InspectorImpl* m_inspector;
- v8::Local<v8::Context> m_context;
- std::vector<v8::Local<v8::Value>> m_arguments;
- v8::Local<v8::String> m_name;
- v8::Local<v8::Value> m_value;
-};
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_V8_FUNCTION_CALL_H_
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index e50fe0e893..b1d60877fe 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -357,6 +357,7 @@ buildSampingHeapProfileNode(v8::Isolate* isolate,
.setCallFrame(std::move(callFrame))
.setSelfSize(selfSize)
.setChildren(std::move(children))
+ .setId(node->node_id)
.build();
return result;
}
@@ -376,15 +377,25 @@ Response V8HeapProfilerAgentImpl::stopSampling(
Response V8HeapProfilerAgentImpl::getSamplingProfile(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- v8::HandleScope scope(
- m_isolate); // v8::AllocationProfile contains Local handles.
+ // Need a scope as v8::AllocationProfile contains Local handles.
+ v8::HandleScope scope(m_isolate);
std::unique_ptr<v8::AllocationProfile> v8Profile(
profiler->GetAllocationProfile());
if (!v8Profile)
return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
+ auto samples = protocol::Array<
+ protocol::HeapProfiler::SamplingHeapProfileSample>::create();
+ for (const auto& sample : v8Profile->GetSamples()) {
+ samples->addItem(protocol::HeapProfiler::SamplingHeapProfileSample::create()
+ .setSize(sample.size * sample.count)
+ .setNodeId(sample.node_id)
+ .setOrdinal(static_cast<double>(sample.sample_id))
+ .build());
+ }
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
.setHead(buildSampingHeapProfileNode(m_isolate, root))
+ .setSamples(std::move(samples))
.build();
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
deleted file mode 100644
index d9c1d59aa8..0000000000
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/inspector/v8-injected-script-host.h"
-
-#include "src/base/macros.h"
-#include "src/debug/debug-interface.h"
-#include "src/inspector/injected-script.h"
-#include "src/inspector/string-util.h"
-#include "src/inspector/v8-debugger.h"
-#include "src/inspector/v8-inspector-impl.h"
-#include "src/inspector/v8-internal-value-type.h"
-#include "src/inspector/v8-value-utils.h"
-
-#include "include/v8-inspector.h"
-
-namespace v8_inspector {
-
-namespace {
-
-void setFunctionProperty(v8::Local<v8::Context> context,
- v8::Local<v8::Object> obj, const char* name,
- v8::FunctionCallback callback,
- v8::Local<v8::External> external) {
- v8::Local<v8::String> funcName =
- toV8StringInternalized(context->GetIsolate(), name);
- v8::Local<v8::Function> func;
- if (!v8::Function::New(context, callback, external, 0,
- v8::ConstructorBehavior::kThrow)
- .ToLocal(&func))
- return;
- func->SetName(funcName);
- createDataProperty(context, obj, funcName, func);
-}
-
-V8InspectorImpl* unwrapInspector(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- DCHECK(!info.Data().IsEmpty());
- DCHECK(info.Data()->IsExternal());
- V8InspectorImpl* inspector =
- static_cast<V8InspectorImpl*>(info.Data().As<v8::External>()->Value());
- DCHECK(inspector);
- return inspector;
-}
-
-template <typename TypedArray>
-void addTypedArrayProperty(std::vector<v8::Local<v8::Value>>* props,
- v8::Isolate* isolate,
- v8::Local<v8::ArrayBuffer> arraybuffer,
- String16 name, size_t length) {
- props->push_back(toV8String(isolate, name));
- props->push_back(TypedArray::New(arraybuffer, 0, length));
-}
-
-} // namespace
-
-v8::Local<v8::Object> V8InjectedScriptHost::create(
- v8::Local<v8::Context> context, V8InspectorImpl* inspector) {
- v8::Isolate* isolate = inspector->isolate();
- v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate);
- bool success = injectedScriptHost->SetPrototype(context, v8::Null(isolate))
- .FromMaybe(false);
- DCHECK(success);
- USE(success);
- v8::Local<v8::External> debuggerExternal =
- v8::External::New(isolate, inspector);
- setFunctionProperty(context, injectedScriptHost, "nullifyPrototype",
- V8InjectedScriptHost::nullifyPrototypeCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "getProperty",
- V8InjectedScriptHost::getPropertyCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "internalConstructorName",
- V8InjectedScriptHost::internalConstructorNameCallback,
- debuggerExternal);
- setFunctionProperty(
- context, injectedScriptHost, "formatAccessorsAsProperties",
- V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "subtype",
- V8InjectedScriptHost::subtypeCallback, debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "getInternalProperties",
- V8InjectedScriptHost::getInternalPropertiesCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty",
- V8InjectedScriptHost::objectHasOwnPropertyCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "bind",
- V8InjectedScriptHost::bindCallback, debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "proxyTargetValue",
- V8InjectedScriptHost::proxyTargetValueCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "nativeAccessorDescriptor",
- V8InjectedScriptHost::nativeAccessorDescriptorCallback,
- debuggerExternal);
- setFunctionProperty(context, injectedScriptHost, "typedArrayProperties",
- V8InjectedScriptHost::typedArrayPropertiesCallback,
- debuggerExternal);
- createDataProperty(context, injectedScriptHost,
- toV8StringInternalized(isolate, "keys"),
- v8::debug::GetBuiltin(isolate, v8::debug::kObjectKeys));
- createDataProperty(
- context, injectedScriptHost,
- toV8StringInternalized(isolate, "getPrototypeOf"),
- v8::debug::GetBuiltin(isolate, v8::debug::kObjectGetPrototypeOf));
- createDataProperty(
- context, injectedScriptHost,
- toV8StringInternalized(isolate, "getOwnPropertyDescriptor"),
- v8::debug::GetBuiltin(isolate,
- v8::debug::kObjectGetOwnPropertyDescriptor));
- createDataProperty(
- context, injectedScriptHost,
- toV8StringInternalized(isolate, "getOwnPropertyNames"),
- v8::debug::GetBuiltin(isolate, v8::debug::kObjectGetOwnPropertyNames));
- createDataProperty(
- context, injectedScriptHost,
- toV8StringInternalized(isolate, "getOwnPropertySymbols"),
- v8::debug::GetBuiltin(isolate, v8::debug::kObjectGetOwnPropertySymbols));
- return injectedScriptHost;
-}
-
-void V8InjectedScriptHost::nullifyPrototypeCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK_EQ(1, info.Length());
- DCHECK(info[0]->IsObject());
- if (!info[0]->IsObject()) return;
- v8::Isolate* isolate = info.GetIsolate();
- info[0]
- .As<v8::Object>()
- ->SetPrototype(isolate->GetCurrentContext(), v8::Null(isolate))
- .ToChecked();
-}
-
-void V8InjectedScriptHost::getPropertyCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK(info.Length() == 2 && info[1]->IsString());
- if (!info[0]->IsObject()) return;
- v8::Isolate* isolate = info.GetIsolate();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::TryCatch tryCatch(isolate);
- v8::Isolate::DisallowJavascriptExecutionScope throwJs(
- isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
- v8::Local<v8::Value> property;
- if (info[0]
- .As<v8::Object>()
- ->Get(context, v8::Local<v8::String>::Cast(info[1]))
- .ToLocal(&property)) {
- info.GetReturnValue().Set(property);
- }
-}
-
-void V8InjectedScriptHost::internalConstructorNameCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() < 1 || !info[0]->IsObject()) return;
-
- v8::Local<v8::Object> object = info[0].As<v8::Object>();
- info.GetReturnValue().Set(object->GetConstructorName());
-}
-
-void V8InjectedScriptHost::formatAccessorsAsProperties(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- DCHECK_EQ(info.Length(), 2);
- info.GetReturnValue().Set(false);
- if (!info[1]->IsFunction()) return;
- // Check that function is user-defined.
- if (info[1].As<v8::Function>()->ScriptId() != v8::UnboundScript::kNoScriptId)
- return;
- info.GetReturnValue().Set(
- unwrapInspector(info)->client()->formatAccessorsAsProperties(info[0]));
-}
-
-void V8InjectedScriptHost::subtypeCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() < 1) return;
-
- v8::Isolate* isolate = info.GetIsolate();
- v8::Local<v8::Value> value = info[0];
- if (value->IsObject()) {
- v8::Local<v8::Value> internalType = v8InternalValueTypeFrom(
- isolate->GetCurrentContext(), v8::Local<v8::Object>::Cast(value));
- if (internalType->IsString()) {
- info.GetReturnValue().Set(internalType);
- return;
- }
- }
- if (value->IsArray() || value->IsArgumentsObject()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "array"));
- return;
- }
- if (value->IsTypedArray()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "typedarray"));
- return;
- }
- if (value->IsDate()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "date"));
- return;
- }
- if (value->IsRegExp()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "regexp"));
- return;
- }
- if (value->IsMap()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "map"));
- return;
- }
- if (value->IsWeakMap()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "weakmap"));
- return;
- }
- if (value->IsSet()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "set"));
- return;
- }
- if (value->IsWeakSet()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "weakset"));
- return;
- }
- if (value->IsMapIterator() || value->IsSetIterator()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "iterator"));
- return;
- }
- if (value->IsGeneratorObject()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "generator"));
- return;
- }
- if (value->IsNativeError()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "error"));
- return;
- }
- if (value->IsProxy()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "proxy"));
- return;
- }
- if (value->IsPromise()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "promise"));
- return;
- }
- if (value->IsArrayBuffer() || value->IsSharedArrayBuffer()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "arraybuffer"));
- return;
- }
- if (value->IsDataView()) {
- info.GetReturnValue().Set(toV8StringInternalized(isolate, "dataview"));
- return;
- }
- std::unique_ptr<StringBuffer> subtype =
- unwrapInspector(info)->client()->valueSubtype(value);
- if (subtype) {
- info.GetReturnValue().Set(toV8String(isolate, subtype->string()));
- return;
- }
-}
-
-void V8InjectedScriptHost::getInternalPropertiesCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() < 1) return;
-
- std::unordered_set<String16> allowedProperties;
- if (info[0]->IsBooleanObject() || info[0]->IsNumberObject() ||
- info[0]->IsStringObject() || info[0]->IsSymbolObject() ||
- info[0]->IsBigIntObject()) {
- allowedProperties.insert(String16("[[PrimitiveValue]]"));
- } else if (info[0]->IsPromise()) {
- allowedProperties.insert(String16("[[PromiseStatus]]"));
- allowedProperties.insert(String16("[[PromiseValue]]"));
- } else if (info[0]->IsGeneratorObject()) {
- allowedProperties.insert(String16("[[GeneratorStatus]]"));
- } else if (info[0]->IsMap() || info[0]->IsWeakMap() || info[0]->IsSet() ||
- info[0]->IsWeakSet() || info[0]->IsMapIterator() ||
- info[0]->IsSetIterator()) {
- allowedProperties.insert(String16("[[Entries]]"));
- }
- if (!allowedProperties.size()) return;
-
- v8::Isolate* isolate = info.GetIsolate();
- v8::Local<v8::Array> allProperties;
- if (!unwrapInspector(info)
- ->debugger()
- ->internalProperties(isolate->GetCurrentContext(), info[0])
- .ToLocal(&allProperties) ||
- !allProperties->IsArray() || allProperties->Length() % 2 != 0)
- return;
-
- {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::TryCatch tryCatch(isolate);
- v8::Isolate::DisallowJavascriptExecutionScope throwJs(
- isolate,
- v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
-
- v8::Local<v8::Array> properties = v8::Array::New(isolate);
- if (tryCatch.HasCaught()) return;
-
- uint32_t outputIndex = 0;
- for (uint32_t i = 0; i < allProperties->Length(); i += 2) {
- v8::Local<v8::Value> key;
- if (!allProperties->Get(context, i).ToLocal(&key)) continue;
- if (tryCatch.HasCaught()) {
- tryCatch.Reset();
- continue;
- }
- String16 keyString = toProtocolStringWithTypeCheck(isolate, key);
- if (keyString.isEmpty() ||
- allowedProperties.find(keyString) == allowedProperties.end())
- continue;
- v8::Local<v8::Value> value;
- if (!allProperties->Get(context, i + 1).ToLocal(&value)) continue;
- if (tryCatch.HasCaught()) {
- tryCatch.Reset();
- continue;
- }
- createDataProperty(context, properties, outputIndex++, key);
- createDataProperty(context, properties, outputIndex++, value);
- }
- info.GetReturnValue().Set(properties);
- }
-}
-
-void V8InjectedScriptHost::objectHasOwnPropertyCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() < 2 || !info[0]->IsObject() || !info[1]->IsString()) return;
- bool result = info[0]
- .As<v8::Object>()
- ->HasOwnProperty(info.GetIsolate()->GetCurrentContext(),
- v8::Local<v8::String>::Cast(info[1]))
- .FromMaybe(false);
- info.GetReturnValue().Set(v8::Boolean::New(info.GetIsolate(), result));
-}
-
-void V8InjectedScriptHost::bindCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() < 2 || !info[1]->IsString()) return;
- InjectedScript* injectedScript =
- InjectedScript::fromInjectedScriptHost(info.GetIsolate(), info.Holder());
- if (!injectedScript) return;
-
- v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
- v8::Local<v8::String> v8groupName =
- info[1]->ToString(context).ToLocalChecked();
- String16 groupName =
- toProtocolStringWithTypeCheck(info.GetIsolate(), v8groupName);
- int id = injectedScript->bindObject(info[0], groupName);
- info.GetReturnValue().Set(id);
-}
-
-void V8InjectedScriptHost::proxyTargetValueCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- if (info.Length() != 1 || !info[0]->IsProxy()) {
- UNREACHABLE();
- return;
- }
- v8::Local<v8::Value> target = info[0].As<v8::Proxy>();
- while (target->IsProxy())
- target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
- info.GetReturnValue().Set(target);
-}
-
-void V8InjectedScriptHost::nativeAccessorDescriptorCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- v8::Isolate* isolate = info.GetIsolate();
- if (info.Length() != 2 || !info[0]->IsObject() || !info[1]->IsName()) {
- info.GetReturnValue().Set(v8::Undefined(isolate));
- return;
- }
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- int flags = v8::debug::GetNativeAccessorDescriptor(
- context, v8::Local<v8::Object>::Cast(info[0]),
- v8::Local<v8::Name>::Cast(info[1]));
- if (flags == static_cast<int>(v8::debug::NativeAccessorType::None)) {
- info.GetReturnValue().Set(v8::Undefined(isolate));
- return;
- }
-
- bool isBuiltin =
- flags & static_cast<int>(v8::debug::NativeAccessorType::IsBuiltin);
- bool hasGetter =
- flags & static_cast<int>(v8::debug::NativeAccessorType::HasGetter);
- bool hasSetter =
- flags & static_cast<int>(v8::debug::NativeAccessorType::HasSetter);
- v8::Local<v8::Object> result = v8::Object::New(isolate);
- result->SetPrototype(context, v8::Null(isolate)).ToChecked();
- createDataProperty(context, result, toV8String(isolate, "isBuiltin"),
- v8::Boolean::New(isolate, isBuiltin));
- createDataProperty(context, result, toV8String(isolate, "hasGetter"),
- v8::Boolean::New(isolate, hasGetter));
- createDataProperty(context, result, toV8String(isolate, "hasSetter"),
- v8::Boolean::New(isolate, hasSetter));
- info.GetReturnValue().Set(result);
-}
-
-void V8InjectedScriptHost::typedArrayPropertiesCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- v8::Isolate* isolate = info.GetIsolate();
- if (info.Length() != 1 || !info[0]->IsArrayBuffer()) return;
-
- v8::TryCatch tryCatch(isolate);
- v8::Isolate::DisallowJavascriptExecutionScope throwJs(
- isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
- v8::Local<v8::ArrayBuffer> arrayBuffer = info[0].As<v8::ArrayBuffer>();
- size_t length = arrayBuffer->ByteLength();
- if (length == 0) return;
- std::vector<v8::Local<v8::Value>> arrays_vector;
- addTypedArrayProperty<v8::Int8Array>(&arrays_vector, isolate, arrayBuffer,
- "[[Int8Array]]", length);
- addTypedArrayProperty<v8::Uint8Array>(&arrays_vector, isolate, arrayBuffer,
- "[[Uint8Array]]", length);
-
- if (length % 2 == 0) {
- addTypedArrayProperty<v8::Int16Array>(&arrays_vector, isolate, arrayBuffer,
- "[[Int16Array]]", length / 2);
- }
- if (length % 4 == 0) {
- addTypedArrayProperty<v8::Int32Array>(&arrays_vector, isolate, arrayBuffer,
- "[[Int32Array]]", length / 4);
- }
-
- if (tryCatch.HasCaught()) return;
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Array> arrays =
- v8::Array::New(isolate, static_cast<uint32_t>(arrays_vector.size()));
- for (uint32_t i = 0; i < static_cast<uint32_t>(arrays_vector.size()); i++)
- createDataProperty(context, arrays, i, arrays_vector[i]);
- if (tryCatch.HasCaught()) return;
- info.GetReturnValue().Set(arrays);
-}
-
-} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
deleted file mode 100644
index 6a3ee3d386..0000000000
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
-#define V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
-
-#include "include/v8.h"
-
-namespace v8_inspector {
-
-class V8InspectorImpl;
-
-// SECURITY NOTE: Although the InjectedScriptHost is intended for use solely by
-// the inspector,
-// a reference to the InjectedScriptHost may be leaked to the page being
-// inspected. Thus, the
-// InjectedScriptHost must never implemment methods that have more power over
-// the page than the
-// page already has itself (e.g. origin restriction bypasses).
-
-class V8InjectedScriptHost {
- public:
- // We expect that debugger outlives any JS context and thus
- // V8InjectedScriptHost (owned by JS)
- // is destroyed before inspector.
- static v8::Local<v8::Object> create(v8::Local<v8::Context>, V8InspectorImpl*);
-
- private:
- static void nullifyPrototypeCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void getPropertyCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- static void internalConstructorNameCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void formatAccessorsAsProperties(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void subtypeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- static void getInternalPropertiesCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void objectHasOwnPropertyCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void bindCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- static void proxyTargetValueCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void nativeAccessorDescriptorCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
- static void typedArrayPropertiesCallback(
- const v8::FunctionCallbackInfo<v8::Value>&);
-};
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 5422b5e12f..b764118079 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -188,14 +188,8 @@ InspectedContext* V8InspectorImpl::getContext(int contextId) const {
return getContext(contextGroupId(contextId), contextId);
}
-v8::MaybeLocal<v8::Context> V8InspectorImpl::contextById(
- int groupId, v8::Maybe<int> contextId) {
- if (contextId.IsNothing()) {
- v8::Local<v8::Context> context =
- client()->ensureDefaultContextInGroup(groupId);
- return context.IsEmpty() ? v8::MaybeLocal<v8::Context>() : context;
- }
- InspectedContext* context = getContext(contextId.FromJust());
+v8::MaybeLocal<v8::Context> V8InspectorImpl::contextById(int contextId) {
+ InspectedContext* context = getContext(contextId);
return context ? context->context() : v8::MaybeLocal<v8::Context>();
}
@@ -247,10 +241,15 @@ void V8InspectorImpl::contextCollected(int groupId, int contextId) {
void V8InspectorImpl::resetContextGroup(int contextGroupId) {
m_consoleStorageMap.erase(contextGroupId);
m_muteExceptionsMap.erase(contextGroupId);
+ std::vector<int> contextIdsToClear;
+ forEachContext(contextGroupId,
+ [&contextIdsToClear](InspectedContext* context) {
+ contextIdsToClear.push_back(context->contextId());
+ });
+ m_debugger->wasmTranslation()->Clear(m_isolate, contextIdsToClear);
forEachSession(contextGroupId,
[](V8InspectorSessionImpl* session) { session->reset(); });
m_contexts.erase(contextGroupId);
- m_debugger->wasmTranslation()->Clear();
}
void V8InspectorImpl::idleStarted() { m_isolate->SetIdle(true); }
@@ -394,8 +393,11 @@ void V8InspectorImpl::forEachSession(
}
}
-V8InspectorImpl::EvaluateScope::EvaluateScope(v8::Isolate* isolate)
- : m_isolate(isolate), m_safeForTerminationScope(isolate) {}
+V8InspectorImpl::EvaluateScope::EvaluateScope(
+ const InjectedScript::Scope& scope)
+ : m_scope(scope),
+ m_isolate(scope.inspector()->isolate()),
+ m_safeForTerminationScope(m_isolate) {}
struct V8InspectorImpl::EvaluateScope::CancelToken {
v8::base::Mutex m_mutex;
@@ -403,8 +405,11 @@ struct V8InspectorImpl::EvaluateScope::CancelToken {
};
V8InspectorImpl::EvaluateScope::~EvaluateScope() {
+ if (m_scope.tryCatch().HasTerminated()) {
+ m_scope.inspector()->debugger()->reportTermination();
+ }
if (m_cancelToken) {
- v8::base::LockGuard<v8::base::Mutex> lock(&m_cancelToken->m_mutex);
+ v8::base::MutexGuard lock(&m_cancelToken->m_mutex);
m_cancelToken->m_canceled = true;
m_isolate->CancelTerminateExecution();
}
@@ -418,7 +423,7 @@ class V8InspectorImpl::EvaluateScope::TerminateTask : public v8::Task {
void Run() override {
// CancelToken contains m_canceled bool which may be changed from main
// thread, so lock mutex first.
- v8::base::LockGuard<v8::base::Mutex> lock(&m_token->m_mutex);
+ v8::base::MutexGuard lock(&m_token->m_mutex);
if (m_token->m_canceled) return;
m_isolate->TerminateExecution();
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 70eaf0eb20..5b89cb0920 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -37,6 +37,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/inspector/injected-script.h"
#include "src/inspector/protocol/Protocol.h"
#include "include/v8-inspector.h"
@@ -78,8 +79,7 @@ class V8InspectorImpl : public V8Inspector {
const StringView& state) override;
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
- v8::MaybeLocal<v8::Context> contextById(int groupId,
- v8::Maybe<int> contextId) override;
+ v8::MaybeLocal<v8::Context> contextById(int contextId) override;
void contextCollected(int contextGroupId, int contextId);
void resetContextGroup(int contextGroupId) override;
void idleStarted() override;
@@ -127,15 +127,17 @@ class V8InspectorImpl : public V8Inspector {
class EvaluateScope {
public:
- explicit EvaluateScope(v8::Isolate* isolate);
+ explicit EvaluateScope(const InjectedScript::Scope& scope);
~EvaluateScope();
protocol::Response setTimeout(double timeout);
private:
- v8::Isolate* m_isolate;
class TerminateTask;
struct CancelToken;
+
+ const InjectedScript::Scope& m_scope;
+ v8::Isolate* m_isolate;
std::shared_ptr<CancelToken> m_cancelToken;
v8::Isolate::SafeForTerminationScope m_safeForTerminationScope;
};
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index d37f87a2a7..db05b24102 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -203,12 +203,7 @@ Response V8InspectorSessionImpl::findInjectedScript(
if (!context) return Response::Error("Cannot find context with specified id");
injectedScript = context->getInjectedScript(m_sessionId);
if (!injectedScript) {
- if (!context->createInjectedScript(m_sessionId)) {
- if (m_inspector->isolate()->IsExecutionTerminating())
- return Response::Error("Execution was terminated");
- return Response::Error("Cannot access specified execution context");
- }
- injectedScript = context->getInjectedScript(m_sessionId);
+ injectedScript = context->createInjectedScript(m_sessionId);
if (m_customObjectFormatterEnabled)
injectedScript->setCustomObjectFormatterEnabled(true);
}
@@ -285,14 +280,16 @@ V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
findInjectedScript(InspectedContext::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
std::unique_ptr<protocol::Runtime::RemoteObject> result;
- injectedScript->wrapObject(value, groupName, false, generatePreview, &result);
+ injectedScript->wrapObject(
+ value, groupName,
+ generatePreview ? WrapMode::kWithPreview : WrapMode::kNoPreview, &result);
return result;
}
std::unique_ptr<protocol::Runtime::RemoteObject>
V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
- v8::Local<v8::Value> table,
- v8::Local<v8::Value> columns) {
+ v8::Local<v8::Object> table,
+ v8::MaybeLocal<v8::Array> columns) {
InjectedScript* injectedScript = nullptr;
findInjectedScript(InspectedContext::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 5053d4dd78..461cc0a2f0 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -55,8 +55,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
v8::Local<v8::Context>, v8::Local<v8::Value>, const String16& groupName,
bool generatePreview);
std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
- v8::Local<v8::Context>, v8::Local<v8::Value> table,
- v8::Local<v8::Value> columns);
+ v8::Local<v8::Context>, v8::Local<v8::Object> table,
+ v8::MaybeLocal<v8::Array> columns);
std::vector<std::unique_ptr<protocol::Schema::Domain>> supportedDomainsImpl();
Response unwrapObject(const String16& objectId, v8::Local<v8::Value>*,
v8::Local<v8::Context>*, String16* objectGroup);
diff --git a/deps/v8/src/inspector/v8-internal-value-type.cc b/deps/v8/src/inspector/v8-internal-value-type.cc
deleted file mode 100644
index 54e839e64e..0000000000
--- a/deps/v8/src/inspector/v8-internal-value-type.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/inspector/v8-internal-value-type.h"
-
-#include "src/inspector/string-util.h"
-
-namespace v8_inspector {
-
-namespace {
-
-v8::Local<v8::Private> internalSubtypePrivate(v8::Isolate* isolate) {
- return v8::Private::ForApi(
- isolate,
- toV8StringInternalized(isolate, "V8InternalType#internalSubtype"));
-}
-
-v8::Local<v8::String> subtypeForInternalType(v8::Isolate* isolate,
- V8InternalValueType type) {
- switch (type) {
- case V8InternalValueType::kEntry:
- return toV8StringInternalized(isolate, "internal#entry");
- case V8InternalValueType::kLocation:
- return toV8StringInternalized(isolate, "internal#location");
- case V8InternalValueType::kScope:
- return toV8StringInternalized(isolate, "internal#scope");
- case V8InternalValueType::kScopeList:
- return toV8StringInternalized(isolate, "internal#scopeList");
- }
- UNREACHABLE();
-}
-
-} // namespace
-
-bool markAsInternal(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object, V8InternalValueType type) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
- v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
- return object->SetPrivate(context, privateValue, subtype).FromMaybe(false);
-}
-
-bool markArrayEntriesAsInternal(v8::Local<v8::Context> context,
- v8::Local<v8::Array> array,
- V8InternalValueType type) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
- v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
- for (uint32_t i = 0; i < array->Length(); ++i) {
- v8::Local<v8::Value> entry;
- if (!array->Get(context, i).ToLocal(&entry) || !entry->IsObject())
- return false;
- if (!entry.As<v8::Object>()
- ->SetPrivate(context, privateValue, subtype)
- .FromMaybe(false))
- return false;
- }
- return true;
-}
-
-v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
- if (!object->HasPrivate(context, privateValue).FromMaybe(false))
- return v8::Null(isolate);
- v8::Local<v8::Value> subtypeValue;
- if (!object->GetPrivate(context, privateValue).ToLocal(&subtypeValue) ||
- !subtypeValue->IsString())
- return v8::Null(isolate);
- return subtypeValue;
-}
-
-} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-internal-value-type.h b/deps/v8/src/inspector/v8-internal-value-type.h
deleted file mode 100644
index 991919a82e..0000000000
--- a/deps/v8/src/inspector/v8-internal-value-type.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
-#define V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
-
-#include "include/v8.h"
-
-namespace v8_inspector {
-
-enum class V8InternalValueType { kEntry, kLocation, kScope, kScopeList };
-
-bool markAsInternal(v8::Local<v8::Context>, v8::Local<v8::Object>,
- V8InternalValueType);
-bool markArrayEntriesAsInternal(v8::Local<v8::Context>, v8::Local<v8::Array>,
- V8InternalValueType);
-v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context>,
- v8::Local<v8::Object>);
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 9e3697cf9e..cedb637399 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -90,14 +90,14 @@ template <typename ProtocolCallback>
bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
v8::MaybeLocal<v8::Value> maybeResultValue,
const v8::TryCatch& tryCatch,
- const String16& objectGroup, bool returnByValue,
- bool generatePreview, ProtocolCallback* callback) {
+ const String16& objectGroup, WrapMode wrapMode,
+ ProtocolCallback* callback) {
std::unique_ptr<RemoteObject> result;
Maybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
Response response = injectedScript->wrapEvaluateResult(
- maybeResultValue, tryCatch, objectGroup, returnByValue, generatePreview,
- &result, &exceptionDetails);
+ maybeResultValue, tryCatch, objectGroup, wrapMode, &result,
+ &exceptionDetails);
if (response.isSuccess()) {
callback->sendSuccess(std::move(result), std::move(exceptionDetails));
return true;
@@ -110,8 +110,8 @@ void innerCallFunctionOn(
V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
v8::Local<v8::Value> recv, const String16& expression,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
- bool silent, bool returnByValue, bool generatePreview, bool userGesture,
- bool awaitPromise, const String16& objectGroup,
+ bool silent, WrapMode wrapMode, bool userGesture, bool awaitPromise,
+ const String16& objectGroup,
std::unique_ptr<V8RuntimeAgentImpl::CallFunctionOnCallback> callback) {
V8InspectorImpl* inspector = session->inspector();
@@ -159,7 +159,7 @@ void innerCallFunctionOn(
if (scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeFunctionValue,
- scope.tryCatch(), objectGroup, false, false,
+ scope.tryCatch(), objectGroup, WrapMode::kNoPreview,
callback.get());
return;
}
@@ -189,13 +189,13 @@ void innerCallFunctionOn(
if (!awaitPromise || scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
- scope.tryCatch(), objectGroup, returnByValue,
- generatePreview, callback.get());
+ scope.tryCatch(), objectGroup, wrapMode,
+ callback.get());
return;
}
scope.injectedScript()->addPromiseCallback(
- session, maybeResultValue, objectGroup, returnByValue, generatePreview,
+ session, maybeResultValue, objectGroup, wrapMode,
EvaluateCallbackWrapper<V8RuntimeAgentImpl::CallFunctionOnCallback>::wrap(
std::move(callback)));
}
@@ -261,7 +261,7 @@ void V8RuntimeAgentImpl::evaluate(
scope.allowCodeGenerationFromStrings();
v8::MaybeLocal<v8::Value> maybeResultValue;
{
- V8InspectorImpl::EvaluateScope evaluateScope(m_inspector->isolate());
+ V8InspectorImpl::EvaluateScope evaluateScope(scope);
if (timeout.isJust()) {
response = evaluateScope.setTimeout(timeout.fromJust() / 1000.0);
if (!response.isSuccess()) {
@@ -284,16 +284,17 @@ void V8RuntimeAgentImpl::evaluate(
return;
}
+ WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
- scope.tryCatch(), objectGroup.fromMaybe(""),
- returnByValue.fromMaybe(false),
- generatePreview.fromMaybe(false), callback.get());
+ scope.tryCatch(), objectGroup.fromMaybe(""), mode,
+ callback.get());
return;
}
scope.injectedScript()->addPromiseCallback(
- m_session, maybeResultValue, objectGroup.fromMaybe(""),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ m_session, maybeResultValue, objectGroup.fromMaybe(""), mode,
EvaluateCallbackWrapper<EvaluateCallback>::wrap(std::move(callback)));
}
@@ -312,9 +313,11 @@ void V8RuntimeAgentImpl::awaitPromise(
Response::Error("Could not find promise with given id"));
return;
}
+ WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
scope.injectedScript()->addPromiseCallback(
- m_session, scope.object(), scope.objectGroupName(),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ m_session, scope.object(), scope.objectGroupName(), mode,
EvaluateCallbackWrapper<AwaitPromiseCallback>::wrap(std::move(callback)));
}
@@ -335,6 +338,9 @@ void V8RuntimeAgentImpl::callFunctionOn(
"Either ObjectId or executionContextId must be specified"));
return;
}
+ WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
if (objectId.isJust()) {
InjectedScript::ObjectScope scope(m_session, objectId.fromJust());
Response response = scope.initialize();
@@ -342,14 +348,13 @@ void V8RuntimeAgentImpl::callFunctionOn(
callback->sendFailure(response);
return;
}
- innerCallFunctionOn(
- m_session, scope, scope.object(), expression,
- std::move(optionalArguments), silent.fromMaybe(false),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
- userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
- objectGroup.isJust() ? objectGroup.fromMaybe(String16())
- : scope.objectGroupName(),
- std::move(callback));
+ innerCallFunctionOn(m_session, scope, scope.object(), expression,
+ std::move(optionalArguments), silent.fromMaybe(false),
+ mode, userGesture.fromMaybe(false),
+ awaitPromise.fromMaybe(false),
+ objectGroup.isJust() ? objectGroup.fromMaybe(String16())
+ : scope.objectGroupName(),
+ std::move(callback));
} else {
int contextId = 0;
Response response =
@@ -365,12 +370,11 @@ void V8RuntimeAgentImpl::callFunctionOn(
callback->sendFailure(response);
return;
}
- innerCallFunctionOn(
- m_session, scope, scope.context()->Global(), expression,
- std::move(optionalArguments), silent.fromMaybe(false),
- returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
- userGesture.fromMaybe(false), awaitPromise.fromMaybe(false),
- objectGroup.fromMaybe(""), std::move(callback));
+ innerCallFunctionOn(m_session, scope, scope.context()->Global(), expression,
+ std::move(optionalArguments), silent.fromMaybe(false),
+ mode, userGesture.fromMaybe(false),
+ awaitPromise.fromMaybe(false),
+ objectGroup.fromMaybe(""), std::move(callback));
}
}
@@ -397,40 +401,18 @@ Response V8RuntimeAgentImpl::getProperties(
v8::Local<v8::Object> object = scope.object().As<v8::Object>();
response = scope.injectedScript()->getProperties(
object, scope.objectGroupName(), ownProperties.fromMaybe(false),
- accessorPropertiesOnly.fromMaybe(false), generatePreview.fromMaybe(false),
+ accessorPropertiesOnly.fromMaybe(false),
+ generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview,
result, exceptionDetails);
if (!response.isSuccess()) return response;
if (exceptionDetails->isJust() || accessorPropertiesOnly.fromMaybe(false))
return Response::OK();
- v8::Local<v8::Array> propertiesArray;
- if (!m_inspector->debugger()
- ->internalProperties(scope.context(), scope.object())
- .ToLocal(&propertiesArray)) {
- return Response::InternalError();
- }
std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
- propertiesProtocolArray =
- protocol::Array<InternalPropertyDescriptor>::create();
- for (uint32_t i = 0; i < propertiesArray->Length(); i += 2) {
- v8::Local<v8::Value> name;
- if (!propertiesArray->Get(scope.context(), i).ToLocal(&name) ||
- !name->IsString()) {
- return Response::InternalError();
- }
- v8::Local<v8::Value> value;
- if (!propertiesArray->Get(scope.context(), i + 1).ToLocal(&value))
- return Response::InternalError();
- std::unique_ptr<RemoteObject> wrappedValue;
- protocol::Response response = scope.injectedScript()->wrapObject(
- value, scope.objectGroupName(), false, false, &wrappedValue);
- if (!response.isSuccess()) return response;
- propertiesProtocolArray->addItem(
- InternalPropertyDescriptor::create()
- .setName(
- toProtocolString(m_inspector->isolate(), name.As<v8::String>()))
- .setValue(std::move(wrappedValue))
- .build());
- }
+ propertiesProtocolArray;
+ response = scope.injectedScript()->getInternalProperties(
+ object, scope.objectGroupName(), &propertiesProtocolArray);
+ if (!response.isSuccess()) return response;
if (propertiesProtocolArray->length())
*internalProperties = std::move(propertiesProtocolArray);
return Response::OK();
@@ -499,7 +481,7 @@ Response V8RuntimeAgentImpl::compileScript(
if (!isOk) {
if (scope.tryCatch().HasCaught()) {
response = scope.injectedScript()->createExceptionDetails(
- scope.tryCatch(), String16(), false, exceptionDetails);
+ scope.tryCatch(), String16(), WrapMode::kNoPreview, exceptionDetails);
if (!response.isSuccess()) return response;
return Response::OK();
} else {
@@ -577,17 +559,18 @@ void V8RuntimeAgentImpl::runScript(
return;
}
+ WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
+ : WrapMode::kNoPreview;
+ if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
- scope.tryCatch(), objectGroup.fromMaybe(""),
- returnByValue.fromMaybe(false),
- generatePreview.fromMaybe(false), callback.get());
+ scope.tryCatch(), objectGroup.fromMaybe(""), mode,
+ callback.get());
return;
}
scope.injectedScript()->addPromiseCallback(
- m_session, maybeResultValue.ToLocalChecked(),
- objectGroup.fromMaybe(""), returnByValue.fromMaybe(false),
- generatePreview.fromMaybe(false),
+ m_session, maybeResultValue.ToLocalChecked(), objectGroup.fromMaybe(""),
+ mode,
EvaluateCallbackWrapper<RunScriptCallback>::wrap(std::move(callback)));
}
@@ -603,8 +586,8 @@ Response V8RuntimeAgentImpl::queryObjects(
v8::Local<v8::Array> resultArray = m_inspector->debugger()->queryObjects(
scope.context(), v8::Local<v8::Object>::Cast(scope.object()));
return scope.injectedScript()->wrapObject(
- resultArray, objectGroup.fromMaybe(scope.objectGroupName()), false, false,
- objects);
+ resultArray, objectGroup.fromMaybe(scope.objectGroupName()),
+ WrapMode::kNoPreview, objects);
}
Response V8RuntimeAgentImpl::globalLexicalScopeNames(
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index ae41344a7b..8bf16b4baf 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -268,9 +268,15 @@ StringView V8StackTraceImpl::topFunctionName() const {
std::unique_ptr<protocol::Runtime::StackTrace>
V8StackTraceImpl::buildInspectorObjectImpl(V8Debugger* debugger) const {
+ return buildInspectorObjectImpl(debugger, m_maxAsyncDepth);
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8StackTraceImpl::buildInspectorObjectImpl(V8Debugger* debugger,
+ int maxAsyncDepth) const {
return buildInspectorObjectCommon(debugger, m_frames, String16(),
m_asyncParent.lock(), m_externalParent,
- m_maxAsyncDepth);
+ maxAsyncDepth);
}
std::unique_ptr<protocol::Runtime::API::StackTrace>
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index abda0f12ee..1142cfaa82 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -63,6 +63,9 @@ class V8StackTraceImpl : public V8StackTrace {
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl(
V8Debugger* debugger) const;
+ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl(
+ V8Debugger* debugger, int maxAsyncDepth) const;
+
// V8StackTrace implementation.
// This method drops the async stack trace.
std::unique_ptr<V8StackTrace> clone() override;
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index feaffd36d0..dd73c2919d 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -6,102 +6,6 @@
namespace v8_inspector {
-namespace {
-
-protocol::Response toProtocolValue(v8::Local<v8::Context> context,
- v8::Local<v8::Value> value, int maxDepth,
- std::unique_ptr<protocol::Value>* result) {
- using protocol::Response;
- if (value.IsEmpty()) {
- UNREACHABLE();
- }
-
- if (!maxDepth) return Response::Error("Object reference chain is too long");
- maxDepth--;
-
- if (value->IsNull() || value->IsUndefined()) {
- *result = protocol::Value::null();
- return Response::OK();
- }
- if (value->IsBoolean()) {
- *result =
- protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
- return Response::OK();
- }
- if (value->IsNumber()) {
- double doubleValue = value.As<v8::Number>()->Value();
- int intValue = static_cast<int>(doubleValue);
- if (intValue == doubleValue) {
- *result = protocol::FundamentalValue::create(intValue);
- return Response::OK();
- }
- *result = protocol::FundamentalValue::create(doubleValue);
- return Response::OK();
- }
- if (value->IsString()) {
- *result = protocol::StringValue::create(
- toProtocolString(context->GetIsolate(), value.As<v8::String>()));
- return Response::OK();
- }
- if (value->IsArray()) {
- v8::Local<v8::Array> array = value.As<v8::Array>();
- std::unique_ptr<protocol::ListValue> inspectorArray =
- protocol::ListValue::create();
- uint32_t length = array->Length();
- for (uint32_t i = 0; i < length; i++) {
- v8::Local<v8::Value> value;
- if (!array->Get(context, i).ToLocal(&value))
- return Response::InternalError();
- std::unique_ptr<protocol::Value> element;
- Response response = toProtocolValue(context, value, maxDepth, &element);
- if (!response.isSuccess()) return response;
- inspectorArray->pushValue(std::move(element));
- }
- *result = std::move(inspectorArray);
- return Response::OK();
- }
- if (value->IsObject()) {
- std::unique_ptr<protocol::DictionaryValue> jsonObject =
- protocol::DictionaryValue::create();
- v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
- v8::Local<v8::Array> propertyNames;
- if (!object->GetPropertyNames(context).ToLocal(&propertyNames))
- return Response::InternalError();
- uint32_t length = propertyNames->Length();
- for (uint32_t i = 0; i < length; i++) {
- v8::Local<v8::Value> name;
- if (!propertyNames->Get(context, i).ToLocal(&name))
- return Response::InternalError();
- // FIXME(yurys): v8::Object should support GetOwnPropertyNames
- if (name->IsString()) {
- v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
- context, v8::Local<v8::String>::Cast(name));
- if (hasRealNamedProperty.IsNothing() ||
- !hasRealNamedProperty.FromJust())
- continue;
- }
- v8::Local<v8::String> propertyName;
- if (!name->ToString(context).ToLocal(&propertyName)) continue;
- v8::Local<v8::Value> property;
- if (!object->Get(context, name).ToLocal(&property))
- return Response::InternalError();
- if (property->IsUndefined()) continue;
- std::unique_ptr<protocol::Value> propertyValue;
- Response response =
- toProtocolValue(context, property, maxDepth, &propertyValue);
- if (!response.isSuccess()) return response;
- jsonObject->setValue(
- toProtocolString(context->GetIsolate(), propertyName),
- std::move(propertyValue));
- }
- *result = std::move(jsonObject);
- return Response::OK();
- }
- return Response::Error("Object couldn't be returned by value");
-}
-
-} // namespace
-
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
v8::Local<v8::Object> object,
v8::Local<v8::Name> key,
@@ -122,11 +26,4 @@ v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
return array->CreateDataProperty(context, index, value);
}
-
-protocol::Response toProtocolValue(v8::Local<v8::Context> context,
- v8::Local<v8::Value> value,
- std::unique_ptr<protocol::Value>* result) {
- return toProtocolValue(context, value, 1000, result);
-}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-value-utils.h b/deps/v8/src/inspector/v8-value-utils.h
index 029fee224b..6817d9fbb6 100644
--- a/deps/v8/src/inspector/v8-value-utils.h
+++ b/deps/v8/src/inspector/v8-value-utils.h
@@ -18,9 +18,6 @@ v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>, v8::Local<v8::Array>,
int index, v8::Local<v8::Value>);
-protocol::Response toProtocolValue(v8::Local<v8::Context>, v8::Local<v8::Value>,
- std::unique_ptr<protocol::Value>* result);
-
} // namespace v8_inspector
#endif // V8_INSPECTOR_V8_VALUE_UTILS_H_
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
new file mode 100644
index 0000000000..aac6481828
--- /dev/null
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -0,0 +1,1617 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/value-mirror.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "src/debug/debug-interface.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-value-utils.h"
+
+namespace v8_inspector {
+
+using protocol::Response;
+using protocol::Runtime::RemoteObject;
+using protocol::Runtime::ObjectPreview;
+using protocol::Runtime::PropertyPreview;
+using protocol::Runtime::EntryPreview;
+using protocol::Runtime::InternalPropertyDescriptor;
+
+namespace {
+V8InspectorClient* clientFor(v8::Local<v8::Context> context) {
+ return static_cast<V8InspectorImpl*>(
+ v8::debug::GetInspector(context->GetIsolate()))
+ ->client();
+}
+
+V8InternalValueType v8InternalValueTypeFrom(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (!value->IsObject()) return V8InternalValueType::kNone;
+ V8InspectorImpl* inspector = static_cast<V8InspectorImpl*>(
+ v8::debug::GetInspector(context->GetIsolate()));
+ int contextId = InspectedContext::contextId(context);
+ InspectedContext* inspectedContext = inspector->getContext(contextId);
+ if (!inspectedContext) return V8InternalValueType::kNone;
+ return inspectedContext->getInternalType(value.As<v8::Object>());
+}
+
+Response toProtocolValue(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value, int maxDepth,
+ std::unique_ptr<protocol::Value>* result) {
+ if (!maxDepth) return Response::Error("Object reference chain is too long");
+ maxDepth--;
+
+ if (value->IsNull() || value->IsUndefined()) {
+ *result = protocol::Value::null();
+ return Response::OK();
+ }
+ if (value->IsBoolean()) {
+ *result =
+ protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
+ return Response::OK();
+ }
+ if (value->IsNumber()) {
+ double doubleValue = value.As<v8::Number>()->Value();
+ int intValue = static_cast<int>(doubleValue);
+ if (intValue == doubleValue) {
+ *result = protocol::FundamentalValue::create(intValue);
+ return Response::OK();
+ }
+ *result = protocol::FundamentalValue::create(doubleValue);
+ return Response::OK();
+ }
+ if (value->IsString()) {
+ *result = protocol::StringValue::create(
+ toProtocolString(context->GetIsolate(), value.As<v8::String>()));
+ return Response::OK();
+ }
+ if (value->IsArray()) {
+ v8::Local<v8::Array> array = value.As<v8::Array>();
+ std::unique_ptr<protocol::ListValue> inspectorArray =
+ protocol::ListValue::create();
+ uint32_t length = array->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> value;
+ if (!array->Get(context, i).ToLocal(&value))
+ return Response::InternalError();
+ std::unique_ptr<protocol::Value> element;
+ Response response = toProtocolValue(context, value, maxDepth, &element);
+ if (!response.isSuccess()) return response;
+ inspectorArray->pushValue(std::move(element));
+ }
+ *result = std::move(inspectorArray);
+ return Response::OK();
+ }
+ if (value->IsObject()) {
+ std::unique_ptr<protocol::DictionaryValue> jsonObject =
+ protocol::DictionaryValue::create();
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ v8::Local<v8::Array> propertyNames;
+ if (!object->GetPropertyNames(context).ToLocal(&propertyNames))
+ return Response::InternalError();
+ uint32_t length = propertyNames->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> name;
+ if (!propertyNames->Get(context, i).ToLocal(&name))
+ return Response::InternalError();
+ // FIXME(yurys): v8::Object should support GetOwnPropertyNames
+ if (name->IsString()) {
+ v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
+ context, v8::Local<v8::String>::Cast(name));
+ if (hasRealNamedProperty.IsNothing() ||
+ !hasRealNamedProperty.FromJust())
+ continue;
+ }
+ v8::Local<v8::String> propertyName;
+ if (!name->ToString(context).ToLocal(&propertyName)) continue;
+ v8::Local<v8::Value> property;
+ if (!object->Get(context, name).ToLocal(&property))
+ return Response::InternalError();
+ if (property->IsUndefined()) continue;
+ std::unique_ptr<protocol::Value> propertyValue;
+ Response response =
+ toProtocolValue(context, property, maxDepth, &propertyValue);
+ if (!response.isSuccess()) return response;
+ jsonObject->setValue(
+ toProtocolString(context->GetIsolate(), propertyName),
+ std::move(propertyValue));
+ }
+ *result = std::move(jsonObject);
+ return Response::OK();
+ }
+ return Response::Error("Object couldn't be returned by value");
+}
+
+Response toProtocolValue(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ std::unique_ptr<protocol::Value>* result) {
+ if (value->IsUndefined()) return Response::OK();
+ return toProtocolValue(context, value, 1000, result);
+}
+
+enum AbbreviateMode { kMiddle, kEnd };
+
+String16 abbreviateString(const String16& value, AbbreviateMode mode) {
+ const size_t maxLength = 100;
+ if (value.length() <= maxLength) return value;
+ UChar ellipsis = static_cast<UChar>(0x2026);
+ if (mode == kMiddle) {
+ return String16::concat(
+ value.substring(0, maxLength / 2), String16(&ellipsis, 1),
+ value.substring(value.length() - maxLength / 2 + 1));
+ }
+ return String16::concat(value.substring(0, maxLength - 1), ellipsis);
+}
+
+String16 descriptionForSymbol(v8::Local<v8::Context> context,
+ v8::Local<v8::Symbol> symbol) {
+ return String16::concat(
+ "Symbol(",
+ toProtocolStringWithTypeCheck(context->GetIsolate(), symbol->Name()),
+ ")");
+}
+
+String16 descriptionForBigInt(v8::Local<v8::Context> context,
+ v8::Local<v8::BigInt> value) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::String> description;
+ if (!value->ToString(context).ToLocal(&description)) return String16();
+ return toProtocolString(isolate, description) + "n";
+}
+
+String16 descriptionForPrimitiveType(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (value->IsUndefined()) return RemoteObject::TypeEnum::Undefined;
+ if (value->IsNull()) return RemoteObject::SubtypeEnum::Null;
+ if (value->IsBoolean()) {
+ return value.As<v8::Boolean>()->Value() ? "true" : "false";
+ }
+ if (value->IsString()) {
+ return toProtocolString(context->GetIsolate(), value.As<v8::String>());
+ }
+ UNREACHABLE();
+ return String16();
+}
+
+String16 descriptionForRegExp(v8::Isolate* isolate,
+ v8::Local<v8::RegExp> value) {
+ String16Builder description;
+ description.append('/');
+ description.append(toProtocolString(isolate, value->GetSource()));
+ description.append('/');
+ v8::RegExp::Flags flags = value->GetFlags();
+ if (flags & v8::RegExp::Flags::kGlobal) description.append('g');
+ if (flags & v8::RegExp::Flags::kIgnoreCase) description.append('i');
+ if (flags & v8::RegExp::Flags::kMultiline) description.append('m');
+ if (flags & v8::RegExp::Flags::kDotAll) description.append('s');
+ if (flags & v8::RegExp::Flags::kUnicode) description.append('u');
+ if (flags & v8::RegExp::Flags::kSticky) description.append('y');
+ return description.toString();
+}
+
+enum class ErrorType { kNative, kClient };
+
+String16 descriptionForError(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, ErrorType type) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ String16 className = toProtocolString(isolate, object->GetConstructorName());
+ v8::Local<v8::Value> stackValue;
+ if (!object->Get(context, toV8String(isolate, "stack"))
+ .ToLocal(&stackValue) ||
+ !stackValue->IsString()) {
+ return className;
+ }
+ String16 stack = toProtocolString(isolate, stackValue.As<v8::String>());
+ String16 description = stack;
+ if (type == ErrorType::kClient) {
+ if (stack.substring(0, className.length()) != className) {
+ v8::Local<v8::Value> messageValue;
+ if (!object->Get(context, toV8String(isolate, "message"))
+ .ToLocal(&messageValue) ||
+ !messageValue->IsString()) {
+ return stack;
+ }
+ String16 message = toProtocolStringWithTypeCheck(isolate, messageValue);
+ size_t index = stack.find(message);
+ String16 stackWithoutMessage =
+ index != String16::kNotFound
+ ? stack.substring(index + message.length())
+ : String16();
+ description = className + ": " + message + stackWithoutMessage;
+ }
+ }
+ return description;
+}
+
+String16 descriptionForObject(v8::Isolate* isolate,
+ v8::Local<v8::Object> object) {
+ return toProtocolString(isolate, object->GetConstructorName());
+}
+
+String16 descriptionForDate(v8::Local<v8::Context> context,
+ v8::Local<v8::Date> date) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::String> description;
+ if (!date->ToString(context).ToLocal(&description)) {
+ return descriptionForObject(isolate, date);
+ }
+ return toProtocolString(isolate, description);
+}
+
+String16 descriptionForScopeList(v8::Local<v8::Array> list) {
+ return String16::concat(
+ "Scopes[", String16::fromInteger(static_cast<size_t>(list->Length())),
+ ']');
+}
+
+String16 descriptionForScope(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Value> value;
+ if (!object->GetRealNamedProperty(context, toV8String(isolate, "description"))
+ .ToLocal(&value)) {
+ return String16();
+ }
+ return toProtocolStringWithTypeCheck(isolate, value);
+}
+
+String16 descriptionForCollection(v8::Isolate* isolate,
+ v8::Local<v8::Object> object, size_t length) {
+ String16 className = toProtocolString(isolate, object->GetConstructorName());
+ return String16::concat(className, '(', String16::fromInteger(length), ')');
+}
+
+String16 descriptionForEntry(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object) {
+ v8::Isolate* isolate = context->GetIsolate();
+ String16 key;
+ v8::Local<v8::Value> tmp;
+ if (object->GetRealNamedProperty(context, toV8String(isolate, "key"))
+ .ToLocal(&tmp)) {
+ auto wrapper = ValueMirror::create(context, tmp);
+ if (wrapper) {
+ std::unique_ptr<ObjectPreview> preview;
+ int limit = 5;
+ wrapper->buildEntryPreview(context, &limit, &limit, &preview);
+ if (preview) {
+ key = preview->getDescription(String16());
+ if (preview->getType() == RemoteObject::TypeEnum::String) {
+ key = String16::concat('\"', key, '\"');
+ }
+ }
+ }
+ }
+
+ String16 value;
+ if (object->GetRealNamedProperty(context, toV8String(isolate, "value"))
+ .ToLocal(&tmp)) {
+ auto wrapper = ValueMirror::create(context, tmp);
+ if (wrapper) {
+ std::unique_ptr<ObjectPreview> preview;
+ int limit = 5;
+ wrapper->buildEntryPreview(context, &limit, &limit, &preview);
+ if (preview) {
+ value = preview->getDescription(String16());
+ if (preview->getType() == RemoteObject::TypeEnum::String) {
+ value = String16::concat('\"', value, '\"');
+ }
+ }
+ }
+ }
+
+ return key.length() ? ("{" + key + " => " + value + "}") : value;
+}
+
+String16 descriptionForFunction(v8::Local<v8::Context> context,
+ v8::Local<v8::Function> value) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::String> description;
+ if (!value->ToString(context).ToLocal(&description)) {
+ return descriptionForObject(isolate, value);
+ }
+ return toProtocolString(isolate, description);
+}
+
+class PrimitiveValueMirror final : public ValueMirror {
+ public:
+ PrimitiveValueMirror(v8::Local<v8::Value> value, const String16& type)
+ : m_value(value), m_type(type) {}
+
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ std::unique_ptr<protocol::Value> protocolValue;
+ toProtocolValue(context, m_value, &protocolValue);
+ *result = RemoteObject::create()
+ .setType(m_type)
+ .setValue(std::move(protocolValue))
+ .build();
+ if (m_value->IsNull())
+ (*result)->setSubtype(RemoteObject::SubtypeEnum::Null);
+ return Response::OK();
+ }
+
+ void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* preview) const override {
+ *preview =
+ ObjectPreview::create()
+ .setType(m_type)
+ .setDescription(descriptionForPrimitiveType(context, m_value))
+ .setOverflow(false)
+ .setProperties(protocol::Array<PropertyPreview>::create())
+ .build();
+ if (m_value->IsNull())
+ (*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
+ }
+
+ void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<PropertyPreview>* preview) const override {
+ *preview = PropertyPreview::create()
+ .setName(name)
+ .setValue(abbreviateString(
+ descriptionForPrimitiveType(context, m_value), kMiddle))
+ .setType(m_type)
+ .build();
+ if (m_value->IsNull())
+ (*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
+ }
+
+ private:
+ v8::Local<v8::Value> m_value;
+ String16 m_type;
+ String16 m_subtype;
+};
+
+class NumberMirror final : public ValueMirror {
+ public:
+ explicit NumberMirror(v8::Local<v8::Number> value) : m_value(value) {}
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ bool unserializable = false;
+ String16 descriptionValue = description(&unserializable);
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Number)
+ .setDescription(descriptionValue)
+ .build();
+ if (unserializable) {
+ (*result)->setUnserializableValue(descriptionValue);
+ } else {
+ (*result)->setValue(protocol::FundamentalValue::create(m_value->Value()));
+ }
+ return Response::OK();
+ }
+ void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<PropertyPreview>* result) const override {
+ bool unserializable = false;
+ *result = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Number)
+ .setValue(description(&unserializable))
+ .build();
+ }
+ void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* preview) const override {
+ bool unserializable = false;
+ *preview = ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Number)
+ .setDescription(description(&unserializable))
+ .setOverflow(false)
+ .setProperties(protocol::Array<PropertyPreview>::create())
+ .build();
+ }
+
+ private:
+ String16 description(bool* unserializable) const {
+ *unserializable = true;
+ double rawValue = m_value->Value();
+ if (std::isnan(rawValue)) return "NaN";
+ if (rawValue == 0.0 && std::signbit(rawValue)) return "-0";
+ if (std::isinf(rawValue)) {
+ return std::signbit(rawValue) ? "-Infinity" : "Infinity";
+ }
+ *unserializable = false;
+ return String16::fromDouble(rawValue);
+ }
+
+ v8::Local<v8::Number> m_value;
+};
+
+class BigIntMirror final : public ValueMirror {
+ public:
+ explicit BigIntMirror(v8::Local<v8::BigInt> value) : m_value(value) {}
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ String16 description = descriptionForBigInt(context, m_value);
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Bigint)
+ .setUnserializableValue(description)
+ .setDescription(description)
+ .build();
+ return Response::OK();
+ }
+
+ void buildPropertyPreview(v8::Local<v8::Context> context,
+ const String16& name,
+ std::unique_ptr<protocol::Runtime::PropertyPreview>*
+ preview) const override {
+ *preview = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Bigint)
+ .setValue(abbreviateString(
+ descriptionForBigInt(context, m_value), kMiddle))
+ .build();
+ }
+
+ void buildEntryPreview(v8::Local<v8::Context> context, int* nameLimit,
+ int* indexLimit,
+ std::unique_ptr<protocol::Runtime::ObjectPreview>*
+ preview) const override {
+ *preview = ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Bigint)
+ .setDescription(descriptionForBigInt(context, m_value))
+ .setOverflow(false)
+ .setProperties(protocol::Array<PropertyPreview>::create())
+ .build();
+ }
+
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ private:
+ v8::Local<v8::BigInt> m_value;
+};
+
+class SymbolMirror final : public ValueMirror {
+ public:
+ explicit SymbolMirror(v8::Local<v8::Value> value)
+ : m_symbol(value.As<v8::Symbol>()) {}
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ if (mode == WrapMode::kForceValue) {
+ return Response::Error("Object couldn't be returned by value");
+ }
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Symbol)
+ .setDescription(descriptionForSymbol(context, m_symbol))
+ .build();
+ return Response::OK();
+ }
+
+ void buildPropertyPreview(v8::Local<v8::Context> context,
+ const String16& name,
+ std::unique_ptr<protocol::Runtime::PropertyPreview>*
+ preview) const override {
+ *preview = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Symbol)
+ .setValue(abbreviateString(
+ descriptionForSymbol(context, m_symbol), kEnd))
+ .build();
+ }
+
+ v8::Local<v8::Value> v8Value() const override { return m_symbol; }
+
+ private:
+ v8::Local<v8::Symbol> m_symbol;
+};
+
+class LocationMirror final : public ValueMirror {
+ public:
+ static std::unique_ptr<LocationMirror> create(
+ v8::Local<v8::Function> function) {
+ return create(function, function->ScriptId(),
+ function->GetScriptLineNumber(),
+ function->GetScriptColumnNumber());
+ }
+ static std::unique_ptr<LocationMirror> createForGenerator(
+ v8::Local<v8::Value> value) {
+ v8::Local<v8::debug::GeneratorObject> generatorObject =
+ v8::debug::GeneratorObject::Cast(value);
+ if (!generatorObject->IsSuspended()) {
+ return create(generatorObject->Function());
+ }
+ v8::Local<v8::debug::Script> script;
+ if (!generatorObject->Script().ToLocal(&script)) return nullptr;
+ v8::debug::Location suspendedLocation =
+ generatorObject->SuspendedLocation();
+ return create(value, script->Id(), suspendedLocation.GetLineNumber(),
+ suspendedLocation.GetColumnNumber());
+ }
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ auto location = protocol::DictionaryValue::create();
+ location->setString("scriptId", String16::fromInteger(m_scriptId));
+ location->setInteger("lineNumber", m_lineNumber);
+ location->setInteger("columnNumber", m_columnNumber);
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Object)
+ .setSubtype("internal#location")
+ .setDescription("Object")
+ .setValue(std::move(location))
+ .build();
+ return Response::OK();
+ }
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ private:
+ static std::unique_ptr<LocationMirror> create(v8::Local<v8::Value> value,
+ int scriptId, int lineNumber,
+ int columnNumber) {
+ if (scriptId == v8::UnboundScript::kNoScriptId) return nullptr;
+ if (lineNumber == v8::Function::kLineOffsetNotFound ||
+ columnNumber == v8::Function::kLineOffsetNotFound) {
+ return nullptr;
+ }
+ return std::unique_ptr<LocationMirror>(
+ new LocationMirror(value, scriptId, lineNumber, columnNumber));
+ }
+
+ LocationMirror(v8::Local<v8::Value> value, int scriptId, int lineNumber,
+ int columnNumber)
+ : m_value(value),
+ m_scriptId(scriptId),
+ m_lineNumber(lineNumber),
+ m_columnNumber(columnNumber) {}
+
+ v8::Local<v8::Value> m_value;
+ int m_scriptId;
+ int m_lineNumber;
+ int m_columnNumber;
+};
+
+class FunctionMirror final : public ValueMirror {
+ public:
+ explicit FunctionMirror(v8::Local<v8::Value> value)
+ : m_value(value.As<v8::Function>()) {}
+
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ // TODO(alph): drop this functionality.
+ if (mode == WrapMode::kForceValue) {
+ std::unique_ptr<protocol::Value> protocolValue;
+ Response response = toProtocolValue(context, m_value, &protocolValue);
+ if (!response.isSuccess()) return response;
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Function)
+ .setValue(std::move(protocolValue))
+ .build();
+ } else {
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Function)
+ .setClassName(toProtocolStringWithTypeCheck(
+ context->GetIsolate(), m_value->GetConstructorName()))
+ .setDescription(descriptionForFunction(context, m_value))
+ .build();
+ }
+ return Response::OK();
+ }
+
+ void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<PropertyPreview>* result) const override {
+ *result = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Function)
+ .setValue(String16())
+ .build();
+ }
+ void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* preview) const override {
+ *preview = ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Function)
+ .setDescription(descriptionForFunction(context, m_value))
+ .setOverflow(false)
+ .setProperties(protocol::Array<PropertyPreview>::create())
+ .build();
+ }
+
+ private:
+ v8::Local<v8::Function> m_value;
+};
+
+bool isArrayLike(v8::Local<v8::Context> context, v8::Local<v8::Value> value,
+ size_t* length) {
+ if (!value->IsObject()) return false;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ v8::Local<v8::Value> spliceValue;
+ if (!object->IsArgumentsObject() &&
+ (!object->GetRealNamedProperty(context, toV8String(isolate, "splice"))
+ .ToLocal(&spliceValue) ||
+ !spliceValue->IsFunction())) {
+ return false;
+ }
+ v8::Local<v8::Value> lengthValue;
+ v8::Maybe<bool> result =
+ object->HasOwnProperty(context, toV8String(isolate, "length"));
+ if (result.IsNothing()) return false;
+ if (!result.FromJust() ||
+ !object->Get(context, toV8String(isolate, "length"))
+ .ToLocal(&lengthValue) ||
+ !lengthValue->IsUint32()) {
+ return false;
+ }
+ *length = v8::Local<v8::Uint32>::Cast(lengthValue)->Value();
+ return true;
+}
+
+struct EntryMirror {
+ std::unique_ptr<ValueMirror> key;
+ std::unique_ptr<ValueMirror> value;
+
+ static bool getEntries(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, size_t limit,
+ bool* overflow, std::vector<EntryMirror>* mirrors) {
+ bool isKeyValue = false;
+ v8::Local<v8::Array> entries;
+ if (!object->PreviewEntries(&isKeyValue).ToLocal(&entries)) return false;
+ for (uint32_t i = 0; i < entries->Length(); i += isKeyValue ? 2 : 1) {
+ v8::Local<v8::Value> tmp;
+
+ std::unique_ptr<ValueMirror> keyMirror;
+ if (isKeyValue && entries->Get(context, i).ToLocal(&tmp)) {
+ keyMirror = ValueMirror::create(context, tmp);
+ }
+ std::unique_ptr<ValueMirror> valueMirror;
+ if (entries->Get(context, isKeyValue ? i + 1 : i).ToLocal(&tmp)) {
+ valueMirror = ValueMirror::create(context, tmp);
+ } else {
+ continue;
+ }
+ if (mirrors->size() == limit) {
+ *overflow = true;
+ return true;
+ }
+ mirrors->emplace_back(
+ EntryMirror{std::move(keyMirror), std::move(valueMirror)});
+ }
+ return mirrors->size() > 0;
+ }
+};
+
+class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
+ public:
+ PreviewPropertyAccumulator(const std::vector<String16>& blacklist,
+ int skipIndex, int* nameLimit, int* indexLimit,
+ bool* overflow,
+ std::vector<PropertyMirror>* mirrors)
+ : m_blacklist(blacklist),
+ m_skipIndex(skipIndex),
+ m_nameLimit(nameLimit),
+ m_indexLimit(indexLimit),
+ m_overflow(overflow),
+ m_mirrors(mirrors) {}
+
+ bool Add(PropertyMirror mirror) override {
+ if (mirror.exception) return true;
+ if ((!mirror.getter || !mirror.getter->v8Value()->IsFunction()) &&
+ !mirror.value) {
+ return true;
+ }
+ if (!mirror.isOwn) return true;
+ if (std::find(m_blacklist.begin(), m_blacklist.end(), mirror.name) !=
+ m_blacklist.end()) {
+ return true;
+ }
+ if (mirror.isIndex && m_skipIndex > 0) {
+ --m_skipIndex;
+ if (m_skipIndex > 0) return true;
+ }
+ int* limit = mirror.isIndex ? m_indexLimit : m_nameLimit;
+ if (!*limit) {
+ *m_overflow = true;
+ return false;
+ }
+ --*limit;
+ m_mirrors->push_back(std::move(mirror));
+ return true;
+ }
+
+ private:
+ std::vector<String16> m_blacklist;
+ int m_skipIndex;
+ int* m_nameLimit;
+ int* m_indexLimit;
+ bool* m_overflow;
+ std::vector<PropertyMirror>* m_mirrors;
+};
+
+bool getPropertiesForPreview(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, int* nameLimit,
+ int* indexLimit, bool* overflow,
+ std::vector<PropertyMirror>* properties) {
+ std::vector<String16> blacklist;
+ size_t length = 0;
+ if (object->IsArray() || isArrayLike(context, object, &length) ||
+ object->IsStringObject()) {
+ blacklist.push_back("length");
+ } else {
+ auto clientSubtype = clientFor(context)->valueSubtype(object);
+ if (clientSubtype && toString16(clientSubtype->string()) == "array") {
+ blacklist.push_back("length");
+ }
+ }
+ if (object->IsArrayBuffer() || object->IsSharedArrayBuffer()) {
+ blacklist.push_back("[[Int8Array]]");
+ blacklist.push_back("[[Uint8Array]]");
+ blacklist.push_back("[[Int16Array]]");
+ blacklist.push_back("[[Int32Array]]");
+ }
+ int skipIndex = object->IsStringObject()
+ ? object.As<v8::StringObject>()->ValueOf()->Length() + 1
+ : -1;
+ PreviewPropertyAccumulator accumulator(blacklist, skipIndex, nameLimit,
+ indexLimit, overflow, properties);
+ return ValueMirror::getProperties(context, object, false, false,
+ &accumulator);
+}
+
+void getInternalPropertiesForPreview(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ int* nameLimit, bool* overflow,
+ std::vector<InternalPropertyMirror>* properties) {
+ std::vector<InternalPropertyMirror> mirrors;
+ ValueMirror::getInternalProperties(context, object, &mirrors);
+ std::vector<String16> whitelist;
+ if (object->IsBooleanObject() || object->IsNumberObject() ||
+ object->IsStringObject() || object->IsSymbolObject() ||
+ object->IsBigIntObject()) {
+ whitelist.emplace_back("[[PrimitiveValue]]");
+ } else if (object->IsPromise()) {
+ whitelist.emplace_back("[[PromiseStatus]]");
+ whitelist.emplace_back("[[PromiseValue]]");
+ } else if (object->IsGeneratorObject()) {
+ whitelist.emplace_back("[[GeneratorStatus]]");
+ }
+ for (auto& mirror : mirrors) {
+ if (std::find(whitelist.begin(), whitelist.end(), mirror.name) ==
+ whitelist.end()) {
+ continue;
+ }
+ if (!*nameLimit) {
+ *overflow = true;
+ return;
+ }
+ --*nameLimit;
+ properties->push_back(std::move(mirror));
+ }
+}
+
+class ObjectMirror final : public ValueMirror {
+ public:
+ ObjectMirror(v8::Local<v8::Value> value, const String16& description)
+ : m_value(value.As<v8::Object>()),
+ m_description(description),
+ m_hasSubtype(false) {}
+ ObjectMirror(v8::Local<v8::Value> value, const String16& subtype,
+ const String16& description)
+ : m_value(value.As<v8::Object>()),
+ m_description(description),
+ m_hasSubtype(true),
+ m_subtype(subtype) {}
+
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ if (mode == WrapMode::kForceValue) {
+ std::unique_ptr<protocol::Value> protocolValue;
+ Response response = toProtocolValue(context, m_value, &protocolValue);
+ if (!response.isSuccess()) return response;
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Object)
+ .setValue(std::move(protocolValue))
+ .build();
+ } else {
+ v8::Isolate* isolate = context->GetIsolate();
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Object)
+ .setClassName(toProtocolString(
+ isolate, m_value->GetConstructorName()))
+ .setDescription(m_description)
+ .build();
+ if (m_hasSubtype) (*result)->setSubtype(m_subtype);
+ if (mode == WrapMode::kWithPreview) {
+ std::unique_ptr<ObjectPreview> previewValue;
+ int nameLimit = 5;
+ int indexLimit = 100;
+ buildObjectPreview(context, false, &nameLimit, &indexLimit,
+ &previewValue);
+ (*result)->setPreview(std::move(previewValue));
+ }
+ }
+ return Response::OK();
+ }
+
+ void buildObjectPreview(
+ v8::Local<v8::Context> context, bool generatePreviewForTable,
+ int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* result) const override {
+ buildObjectPreviewInternal(context, false /* forEntry */,
+ generatePreviewForTable, nameLimit, indexLimit,
+ result);
+ }
+
+ void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* result) const override {
+ buildObjectPreviewInternal(context, true /* forEntry */,
+ false /* generatePreviewForTable */, nameLimit,
+ indexLimit, result);
+ }
+
+ void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<PropertyPreview>* result) const override {
+ *result = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Object)
+ .setValue(abbreviateString(
+ m_description,
+ m_subtype == RemoteObject::SubtypeEnum::Regexp ? kMiddle
+ : kEnd))
+ .build();
+ if (m_hasSubtype) (*result)->setSubtype(m_subtype);
+ }
+
+ private:
+ void buildObjectPreviewInternal(
+ v8::Local<v8::Context> context, bool forEntry,
+ bool generatePreviewForTable, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* result) const {
+ std::unique_ptr<protocol::Array<PropertyPreview>> properties =
+ protocol::Array<PropertyPreview>::create();
+ std::unique_ptr<protocol::Array<EntryPreview>> entriesPreview;
+ bool overflow = false;
+
+ v8::Local<v8::Value> value = m_value;
+ while (value->IsProxy()) value = value.As<v8::Proxy>()->GetTarget();
+ if (value->IsObject() && !value->IsProxy()) {
+ v8::Local<v8::Object> objectForPreview = value.As<v8::Object>();
+ std::vector<InternalPropertyMirror> internalProperties;
+ getInternalPropertiesForPreview(context, objectForPreview, nameLimit,
+ &overflow, &internalProperties);
+ for (size_t i = 0; i < internalProperties.size(); ++i) {
+ std::unique_ptr<PropertyPreview> propertyPreview;
+ internalProperties[i].value->buildPropertyPreview(
+ context, internalProperties[i].name, &propertyPreview);
+ if (propertyPreview) {
+ properties->addItem(std::move(propertyPreview));
+ }
+ }
+
+ std::vector<PropertyMirror> mirrors;
+ if (getPropertiesForPreview(context, objectForPreview, nameLimit,
+ indexLimit, &overflow, &mirrors)) {
+ for (size_t i = 0; i < mirrors.size(); ++i) {
+ std::unique_ptr<PropertyPreview> preview;
+ std::unique_ptr<ObjectPreview> valuePreview;
+ if (mirrors[i].value) {
+ mirrors[i].value->buildPropertyPreview(context, mirrors[i].name,
+ &preview);
+ if (generatePreviewForTable) {
+ int tableLimit = 1000;
+ mirrors[i].value->buildObjectPreview(context, false, &tableLimit,
+ &tableLimit, &valuePreview);
+ }
+ } else {
+ preview = PropertyPreview::create()
+ .setName(mirrors[i].name)
+ .setType(PropertyPreview::TypeEnum::Accessor)
+ .build();
+ }
+ if (valuePreview) {
+ preview->setValuePreview(std::move(valuePreview));
+ }
+ properties->addItem(std::move(preview));
+ }
+ }
+
+ std::vector<EntryMirror> entries;
+ if (EntryMirror::getEntries(context, objectForPreview, 5, &overflow,
+ &entries)) {
+ if (forEntry) {
+ overflow = true;
+ } else {
+ entriesPreview = protocol::Array<EntryPreview>::create();
+ for (const auto& entry : entries) {
+ std::unique_ptr<ObjectPreview> valuePreview;
+ entry.value->buildEntryPreview(context, nameLimit, indexLimit,
+ &valuePreview);
+ if (!valuePreview) continue;
+ std::unique_ptr<ObjectPreview> keyPreview;
+ if (entry.key) {
+ entry.key->buildEntryPreview(context, nameLimit, indexLimit,
+ &keyPreview);
+ if (!keyPreview) continue;
+ }
+ std::unique_ptr<EntryPreview> entryPreview =
+ EntryPreview::create()
+ .setValue(std::move(valuePreview))
+ .build();
+ if (keyPreview) entryPreview->setKey(std::move(keyPreview));
+ entriesPreview->addItem(std::move(entryPreview));
+ }
+ }
+ }
+ }
+ *result = ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Object)
+ .setDescription(m_description)
+ .setOverflow(overflow)
+ .setProperties(std::move(properties))
+ .build();
+ if (m_hasSubtype) (*result)->setSubtype(m_subtype);
+ if (entriesPreview) (*result)->setEntries(std::move(entriesPreview));
+ }
+
+ v8::Local<v8::Object> m_value;
+ String16 m_description;
+ bool m_hasSubtype;
+ String16 m_subtype;
+};
+
+void nativeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Object> data = info.Data().As<v8::Object>();
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Value> name;
+ if (!data->GetRealNamedProperty(context, toV8String(isolate, "name"))
+ .ToLocal(&name)) {
+ return;
+ }
+ v8::Local<v8::Value> object;
+ if (!data->GetRealNamedProperty(context, toV8String(isolate, "object"))
+ .ToLocal(&object) ||
+ !object->IsObject()) {
+ return;
+ }
+ v8::Local<v8::Value> value;
+ if (!object.As<v8::Object>()->Get(context, name).ToLocal(&value)) return;
+ info.GetReturnValue().Set(value);
+}
+
+std::unique_ptr<ValueMirror> createNativeGetter(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> object,
+ v8::Local<v8::Name> name) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::Object> data = v8::Object::New(isolate);
+ if (data->Set(context, toV8String(isolate, "name"), name).IsNothing()) {
+ return nullptr;
+ }
+ if (data->Set(context, toV8String(isolate, "object"), object).IsNothing()) {
+ return nullptr;
+ }
+
+ v8::Local<v8::Function> function;
+ if (!v8::Function::New(context, nativeGetterCallback, data, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&function)) {
+ return nullptr;
+ }
+ return ValueMirror::create(context, function);
+}
+
+void nativeSetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 1) return;
+ v8::Local<v8::Object> data = info.Data().As<v8::Object>();
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Value> name;
+ if (!data->GetRealNamedProperty(context, toV8String(isolate, "name"))
+ .ToLocal(&name)) {
+ return;
+ }
+ v8::Local<v8::Value> object;
+ if (!data->GetRealNamedProperty(context, toV8String(isolate, "object"))
+ .ToLocal(&object) ||
+ !object->IsObject()) {
+ return;
+ }
+ v8::Local<v8::Value> value;
+ if (!object.As<v8::Object>()->Set(context, name, info[0]).IsNothing()) return;
+}
+
+std::unique_ptr<ValueMirror> createNativeSetter(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> object,
+ v8::Local<v8::Name> name) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::Object> data = v8::Object::New(isolate);
+ if (data->Set(context, toV8String(isolate, "name"), name).IsNothing()) {
+ return nullptr;
+ }
+ if (data->Set(context, toV8String(isolate, "object"), object).IsNothing()) {
+ return nullptr;
+ }
+
+ v8::Local<v8::Function> function;
+ if (!v8::Function::New(context, nativeSetterCallback, data, 1,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&function)) {
+ return nullptr;
+ }
+ return ValueMirror::create(context, function);
+}
+
+bool doesAttributeHaveObservableSideEffectOnGet(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ v8::Local<v8::Name> name) {
+ // TODO(dgozman): we should remove this, annotate more embedder properties as
+ // side-effect free, and call all getters which do not produce side effects.
+ if (!name->IsString()) return false;
+ v8::Isolate* isolate = context->GetIsolate();
+ if (!name.As<v8::String>()->StringEquals(toV8String(isolate, "body"))) {
+ return false;
+ }
+
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Value> request;
+ if (context->Global()
+ ->GetRealNamedProperty(context, toV8String(isolate, "Request"))
+ .ToLocal(&request)) {
+ if (request->IsObject() &&
+ object->InstanceOf(context, request.As<v8::Object>())
+ .FromMaybe(false)) {
+ return true;
+ }
+ }
+ if (tryCatch.HasCaught()) tryCatch.Reset();
+
+ v8::Local<v8::Value> response;
+ if (context->Global()
+ ->GetRealNamedProperty(context, toV8String(isolate, "Response"))
+ .ToLocal(&response)) {
+ if (response->IsObject() &&
+ object->InstanceOf(context, response.As<v8::Object>())
+ .FromMaybe(false)) {
+ return true;
+ }
+ }
+ return false;
+}
+template <typename ArrayView, typename ArrayBuffer>
+void addTypedArrayView(v8::Local<v8::Context> context,
+ v8::Local<ArrayBuffer> buffer, size_t length,
+ const char* name,
+ ValueMirror::PropertyAccumulator* accumulator) {
+ accumulator->Add(PropertyMirror{
+ String16(name), false, false, false, true, false,
+ ValueMirror::create(context, ArrayView::New(buffer, 0, length)), nullptr,
+ nullptr, nullptr, nullptr});
+}
+
+template <typename ArrayBuffer>
+void addTypedArrayViews(v8::Local<v8::Context> context,
+ v8::Local<ArrayBuffer> buffer,
+ ValueMirror::PropertyAccumulator* accumulator) {
+ // TODO(alph): these should be internal properties.
+ size_t length = buffer->ByteLength();
+ addTypedArrayView<v8::Int8Array>(context, buffer, length, "[[Int8Array]]",
+ accumulator);
+ addTypedArrayView<v8::Uint8Array>(context, buffer, length, "[[Uint8Array]]",
+ accumulator);
+ if (buffer->ByteLength() % 2 == 0) {
+ addTypedArrayView<v8::Int16Array>(context, buffer, length / 2,
+ "[[Int16Array]]", accumulator);
+ }
+ if (buffer->ByteLength() % 4 == 0) {
+ addTypedArrayView<v8::Int32Array>(context, buffer, length / 4,
+ "[[Int32Array]]", accumulator);
+ }
+}
+} // anonymous namespace
+
+ValueMirror::~ValueMirror() = default;
+
+// static
+bool ValueMirror::getProperties(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ bool ownProperties, bool accessorPropertiesOnly,
+ PropertyAccumulator* accumulator) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Set> set = v8::Set::New(isolate);
+
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ V8InternalValueType internalType = v8InternalValueTypeFrom(context, object);
+ if (internalType == V8InternalValueType::kScope) {
+ v8::Local<v8::Value> value;
+ if (!object->Get(context, toV8String(isolate, "object")).ToLocal(&value) ||
+ !value->IsObject()) {
+ return false;
+ } else {
+ object = value.As<v8::Object>();
+ }
+ }
+ if (internalType == V8InternalValueType::kScopeList) {
+ if (!set->Add(context, toV8String(isolate, "length")).ToLocal(&set)) {
+ return false;
+ }
+ }
+ bool shouldSkipProto = internalType == V8InternalValueType::kScopeList;
+
+ bool formatAccessorsAsProperties =
+ clientFor(context)->formatAccessorsAsProperties(object);
+
+ if (object->IsArrayBuffer()) {
+ addTypedArrayViews(context, object.As<v8::ArrayBuffer>(), accumulator);
+ }
+ if (object->IsSharedArrayBuffer()) {
+ addTypedArrayViews(context, object.As<v8::SharedArrayBuffer>(),
+ accumulator);
+ }
+
+ for (auto iterator = v8::debug::PropertyIterator::Create(object);
+ !iterator->Done(); iterator->Advance()) {
+ bool isOwn = iterator->is_own();
+ if (!isOwn && ownProperties) break;
+ v8::Local<v8::Name> v8Name = iterator->name();
+ v8::Maybe<bool> result = set->Has(context, v8Name);
+ if (result.IsNothing()) return false;
+ if (result.FromJust()) continue;
+ if (!set->Add(context, v8Name).ToLocal(&set)) return false;
+
+ String16 name;
+ std::unique_ptr<ValueMirror> symbolMirror;
+ if (v8Name->IsString()) {
+ name = toProtocolString(isolate, v8Name.As<v8::String>());
+ } else {
+ v8::Local<v8::Symbol> symbol = v8Name.As<v8::Symbol>();
+ name = descriptionForSymbol(context, symbol);
+ symbolMirror = ValueMirror::create(context, symbol);
+ }
+
+ v8::PropertyAttribute attributes;
+ std::unique_ptr<ValueMirror> valueMirror;
+ std::unique_ptr<ValueMirror> getterMirror;
+ std::unique_ptr<ValueMirror> setterMirror;
+ std::unique_ptr<ValueMirror> exceptionMirror;
+ bool writable = false;
+ bool enumerable = false;
+ bool configurable = false;
+
+ bool isAccessorProperty = false;
+ v8::TryCatch tryCatch(isolate);
+ if (!iterator->attributes().To(&attributes)) {
+ exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ } else {
+ if (iterator->is_native_accessor()) {
+ if (iterator->has_native_getter()) {
+ getterMirror = createNativeGetter(context, object, v8Name);
+ }
+ if (iterator->has_native_setter()) {
+ setterMirror = createNativeSetter(context, object, v8Name);
+ }
+ writable = !(attributes & v8::PropertyAttribute::ReadOnly);
+ enumerable = !(attributes & v8::PropertyAttribute::DontEnum);
+ configurable = !(attributes & v8::PropertyAttribute::DontDelete);
+ isAccessorProperty = getterMirror || setterMirror;
+ } else {
+ v8::TryCatch tryCatch(isolate);
+ v8::debug::PropertyDescriptor descriptor;
+ if (!iterator->descriptor().To(&descriptor)) {
+ exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ } else {
+ writable = descriptor.has_writable ? descriptor.writable : false;
+ enumerable =
+ descriptor.has_enumerable ? descriptor.enumerable : false;
+ configurable =
+ descriptor.has_configurable ? descriptor.configurable : false;
+ if (!descriptor.value.IsEmpty()) {
+ valueMirror = ValueMirror::create(context, descriptor.value);
+ }
+ bool getterIsNativeFunction = false;
+ if (!descriptor.get.IsEmpty()) {
+ v8::Local<v8::Value> get = descriptor.get;
+ getterMirror = ValueMirror::create(context, get);
+ getterIsNativeFunction =
+ get->IsFunction() && get.As<v8::Function>()->ScriptId() ==
+ v8::UnboundScript::kNoScriptId;
+ }
+ if (!descriptor.set.IsEmpty()) {
+ setterMirror = ValueMirror::create(context, descriptor.set);
+ }
+ isAccessorProperty = getterMirror || setterMirror;
+ bool isSymbolDescription =
+ object->IsSymbol() && name == "description";
+ if (isSymbolDescription ||
+ (name != "__proto__" && getterIsNativeFunction &&
+ formatAccessorsAsProperties &&
+ !doesAttributeHaveObservableSideEffectOnGet(context, object,
+ v8Name))) {
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Value> value;
+ if (object->Get(context, v8Name).ToLocal(&value)) {
+ valueMirror = ValueMirror::create(context, value);
+ isOwn = true;
+ setterMirror = nullptr;
+ getterMirror = nullptr;
+ }
+ }
+ }
+ }
+ }
+ if (accessorPropertiesOnly && !isAccessorProperty) continue;
+ auto mirror = PropertyMirror{name,
+ writable,
+ configurable,
+ enumerable,
+ isOwn,
+ iterator->is_array_index(),
+ std::move(valueMirror),
+ std::move(getterMirror),
+ std::move(setterMirror),
+ std::move(symbolMirror),
+ std::move(exceptionMirror)};
+ if (!accumulator->Add(std::move(mirror))) return true;
+ }
+ if (!shouldSkipProto && ownProperties && !object->IsProxy() &&
+ !accessorPropertiesOnly) {
+ v8::Local<v8::Value> prototype = object->GetPrototype();
+ if (prototype->IsObject()) {
+ accumulator->Add(PropertyMirror{String16("__proto__"), true, true, false,
+ true, false,
+ ValueMirror::create(context, prototype),
+ nullptr, nullptr, nullptr, nullptr});
+ }
+ }
+ return true;
+}
+
+// static
+void ValueMirror::getInternalProperties(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ std::vector<InternalPropertyMirror>* mirrors) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::TryCatch tryCatch(isolate);
+ if (object->IsFunction()) {
+ v8::Local<v8::Function> function = object.As<v8::Function>();
+ auto location = LocationMirror::create(function);
+ if (location) {
+ mirrors->emplace_back(InternalPropertyMirror{
+ String16("[[FunctionLocation]]"), std::move(location)});
+ }
+ if (function->IsGeneratorFunction()) {
+ mirrors->emplace_back(InternalPropertyMirror{
+ String16("[[IsGenerator]]"),
+ ValueMirror::create(context, v8::True(context->GetIsolate()))});
+ }
+ }
+ if (object->IsGeneratorObject()) {
+ auto location = LocationMirror::createForGenerator(object);
+ if (location) {
+ mirrors->emplace_back(InternalPropertyMirror{
+ String16("[[GeneratorLocation]]"), std::move(location)});
+ }
+ }
+ V8Debugger* debugger =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate))
+ ->debugger();
+ v8::Local<v8::Array> properties;
+ if (debugger->internalProperties(context, object).ToLocal(&properties)) {
+ for (uint32_t i = 0; i < properties->Length(); i += 2) {
+ v8::Local<v8::Value> name;
+ if (!properties->Get(context, i).ToLocal(&name) || !name->IsString()) {
+ tryCatch.Reset();
+ continue;
+ }
+ v8::Local<v8::Value> value;
+ if (!properties->Get(context, i + 1).ToLocal(&value)) {
+ tryCatch.Reset();
+ continue;
+ }
+ auto wrapper = ValueMirror::create(context, value);
+ if (wrapper) {
+ mirrors->emplace_back(InternalPropertyMirror{
+ toProtocolStringWithTypeCheck(context->GetIsolate(), name),
+ std::move(wrapper)});
+ }
+ }
+ }
+}
+
+String16 descriptionForNode(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (!value->IsObject()) return String16();
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Value> nodeName;
+ if (!object->Get(context, toV8String(isolate, "nodeName"))
+ .ToLocal(&nodeName)) {
+ return String16();
+ }
+ String16 description;
+ v8::Local<v8::Function> toLowerCase =
+ v8::debug::GetBuiltin(isolate, v8::debug::kStringToLowerCase);
+ if (nodeName->IsString()) {
+ if (!toLowerCase->Call(context, nodeName, 0, nullptr).ToLocal(&nodeName))
+ return String16();
+ if (nodeName->IsString()) {
+ description = toProtocolString(isolate, nodeName.As<v8::String>());
+ }
+ }
+ if (!description.length()) {
+ v8::Local<v8::Value> value;
+ if (!object->Get(context, toV8String(isolate, "constructor"))
+ .ToLocal(&value) ||
+ !value->IsObject()) {
+ return String16();
+ }
+ if (!value.As<v8::Object>()
+ ->Get(context, toV8String(isolate, "name"))
+ .ToLocal(&value) ||
+ !value->IsString()) {
+ return String16();
+ }
+ description = toProtocolString(isolate, value.As<v8::String>());
+ }
+ v8::Local<v8::Value> nodeType;
+ if (!object->Get(context, toV8String(isolate, "nodeType"))
+ .ToLocal(&nodeType) ||
+ !nodeType->IsInt32()) {
+ return description;
+ }
+ if (nodeType.As<v8::Int32>()->Value() == 1) {
+ v8::Local<v8::Value> idValue;
+ if (!object->Get(context, toV8String(isolate, "id")).ToLocal(&idValue)) {
+ return description;
+ }
+ if (idValue->IsString()) {
+ String16 id = toProtocolString(isolate, idValue.As<v8::String>());
+ if (id.length()) {
+ description = String16::concat(description, '#', id);
+ }
+ }
+ v8::Local<v8::Value> classNameValue;
+ if (!object->Get(context, toV8String(isolate, "className"))
+ .ToLocal(&classNameValue)) {
+ return description;
+ }
+ if (classNameValue->IsString() &&
+ classNameValue.As<v8::String>()->Length()) {
+ String16 classes =
+ toProtocolString(isolate, classNameValue.As<v8::String>());
+ String16Builder output;
+ bool previousIsDot = false;
+ for (size_t i = 0; i < classes.length(); ++i) {
+ if (classes[i] == ' ') {
+ if (!previousIsDot) {
+ output.append('.');
+ previousIsDot = true;
+ }
+ } else {
+ output.append(classes[i]);
+ previousIsDot = classes[i] == '.';
+ }
+ }
+ description = String16::concat(description, '.', output.toString());
+ }
+ } else if (nodeType.As<v8::Int32>()->Value() == 1) {
+ return String16::concat("<!DOCTYPE ", description, '>');
+ }
+ return description;
+}
+
+std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ const String16& subtype) {
+ // TODO(alph): description and length retrieval should move to embedder.
+ if (subtype == "node") {
+ return v8::base::make_unique<ObjectMirror>(
+ value, subtype, descriptionForNode(context, value));
+ }
+ if (subtype == "error") {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Error,
+ descriptionForError(context, value.As<v8::Object>(),
+ ErrorType::kClient));
+ }
+ if (subtype == "array" && value->IsObject()) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ v8::Local<v8::Value> lengthValue;
+ if (object->Get(context, toV8String(isolate, "length"))
+ .ToLocal(&lengthValue)) {
+ if (lengthValue->IsInt32()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Array,
+ descriptionForCollection(isolate, object,
+ lengthValue.As<v8::Int32>()->Value()));
+ }
+ }
+ }
+ return v8::base::make_unique<ObjectMirror>(
+ value,
+ descriptionForObject(context->GetIsolate(), value.As<v8::Object>()));
+}
+
+std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (value->IsNull()) {
+ return v8::base::make_unique<PrimitiveValueMirror>(
+ value, RemoteObject::TypeEnum::Object);
+ }
+ if (value->IsBoolean()) {
+ return v8::base::make_unique<PrimitiveValueMirror>(
+ value, RemoteObject::TypeEnum::Boolean);
+ }
+ if (value->IsNumber()) {
+ return v8::base::make_unique<NumberMirror>(value.As<v8::Number>());
+ }
+ v8::Isolate* isolate = context->GetIsolate();
+ if (value->IsString()) {
+ return v8::base::make_unique<PrimitiveValueMirror>(
+ value, RemoteObject::TypeEnum::String);
+ }
+ if (value->IsBigInt()) {
+ return v8::base::make_unique<BigIntMirror>(value.As<v8::BigInt>());
+ }
+ if (value->IsSymbol()) {
+ return v8::base::make_unique<SymbolMirror>(value.As<v8::Symbol>());
+ }
+ auto clientSubtype = (value->IsUndefined() || value->IsObject())
+ ? clientFor(context)->valueSubtype(value)
+ : nullptr;
+ if (clientSubtype) {
+ String16 subtype = toString16(clientSubtype->string());
+ return clientMirror(context, value, subtype);
+ }
+ if (value->IsUndefined()) {
+ return v8::base::make_unique<PrimitiveValueMirror>(
+ value, RemoteObject::TypeEnum::Undefined);
+ }
+ if (value->IsRegExp()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Regexp,
+ descriptionForRegExp(isolate, value.As<v8::RegExp>()));
+ }
+ if (value->IsFunction()) {
+ return v8::base::make_unique<FunctionMirror>(value);
+ }
+ if (value->IsProxy()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Proxy, "Proxy");
+ }
+ if (value->IsDate()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Date,
+ descriptionForDate(context, value.As<v8::Date>()));
+ }
+ if (value->IsPromise()) {
+ v8::Local<v8::Promise> promise = value.As<v8::Promise>();
+ return v8::base::make_unique<ObjectMirror>(
+ promise, RemoteObject::SubtypeEnum::Promise,
+ descriptionForObject(isolate, promise));
+ }
+ if (value->IsNativeError()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Error,
+ descriptionForError(context, value.As<v8::Object>(),
+ ErrorType::kNative));
+ }
+ if (value->IsMap()) {
+ v8::Local<v8::Map> map = value.As<v8::Map>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Map,
+ descriptionForCollection(isolate, map, map->Size()));
+ }
+ if (value->IsSet()) {
+ v8::Local<v8::Set> set = value.As<v8::Set>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Set,
+ descriptionForCollection(isolate, set, set->Size()));
+ }
+ if (value->IsWeakMap()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Weakmap,
+ descriptionForObject(isolate, value.As<v8::Object>()));
+ }
+ if (value->IsWeakSet()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Weakset,
+ descriptionForObject(isolate, value.As<v8::Object>()));
+ }
+ if (value->IsMapIterator() || value->IsSetIterator()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Iterator,
+ descriptionForObject(isolate, value.As<v8::Object>()));
+ }
+ if (value->IsGeneratorObject()) {
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ return v8::base::make_unique<ObjectMirror>(
+ object, RemoteObject::SubtypeEnum::Generator,
+ descriptionForObject(isolate, object));
+ }
+ if (value->IsTypedArray()) {
+ v8::Local<v8::TypedArray> array = value.As<v8::TypedArray>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Typedarray,
+ descriptionForCollection(isolate, array, array->Length()));
+ }
+ if (value->IsArrayBuffer()) {
+ v8::Local<v8::ArrayBuffer> buffer = value.As<v8::ArrayBuffer>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Arraybuffer,
+ descriptionForCollection(isolate, buffer, buffer->ByteLength()));
+ }
+ if (value->IsSharedArrayBuffer()) {
+ v8::Local<v8::SharedArrayBuffer> buffer = value.As<v8::SharedArrayBuffer>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Arraybuffer,
+ descriptionForCollection(isolate, buffer, buffer->ByteLength()));
+ }
+ if (value->IsDataView()) {
+ v8::Local<v8::DataView> view = value.As<v8::DataView>();
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Dataview,
+ descriptionForCollection(isolate, view, view->ByteLength()));
+ }
+ V8InternalValueType internalType =
+ v8InternalValueTypeFrom(context, v8::Local<v8::Object>::Cast(value));
+ if (value->IsArray() && internalType == V8InternalValueType::kScopeList) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, "internal#scopeList",
+ descriptionForScopeList(value.As<v8::Array>()));
+ }
+ if (value->IsObject() && internalType == V8InternalValueType::kEntry) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, "internal#entry",
+ descriptionForEntry(context, value.As<v8::Object>()));
+ }
+ if (value->IsObject() && internalType == V8InternalValueType::kScope) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, "internal#scope",
+ descriptionForScope(context, value.As<v8::Object>()));
+ }
+ size_t length = 0;
+ if (value->IsArray() || isArrayLike(context, value, &length)) {
+ length = value->IsArray() ? value.As<v8::Array>()->Length() : length;
+ return v8::base::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Array,
+ descriptionForCollection(isolate, value.As<v8::Object>(), length));
+ }
+ if (value->IsObject()) {
+ return v8::base::make_unique<ObjectMirror>(
+ value, descriptionForObject(isolate, value.As<v8::Object>()));
+ }
+ return nullptr;
+}
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/value-mirror.h b/deps/v8/src/inspector/value-mirror.h
new file mode 100644
index 0000000000..0a464b30bf
--- /dev/null
+++ b/deps/v8/src/inspector/value-mirror.h
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_VALUE_MIRROR_H_
+#define V8_INSPECTOR_VALUE_MIRROR_H_
+
+#include <memory>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class ValueMirror;
+enum class WrapMode;
+
+struct InternalPropertyMirror {
+ String16 name;
+ std::unique_ptr<ValueMirror> value;
+};
+
+struct PropertyMirror {
+ String16 name;
+ bool writable;
+ bool configurable;
+ bool enumerable;
+ bool isOwn;
+ bool isIndex;
+ std::unique_ptr<ValueMirror> value;
+ std::unique_ptr<ValueMirror> getter;
+ std::unique_ptr<ValueMirror> setter;
+ std::unique_ptr<ValueMirror> symbol;
+ std::unique_ptr<ValueMirror> exception;
+};
+
+class ValueMirror {
+ public:
+ virtual ~ValueMirror();
+
+ static std::unique_ptr<ValueMirror> create(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value);
+ virtual protocol::Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) const = 0;
+ virtual void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<protocol::Runtime::PropertyPreview>*) const {}
+ virtual void buildObjectPreview(
+ v8::Local<v8::Context> context, bool generatePreviewForTable,
+ int* nameLimit, int* indexLimit,
+ std::unique_ptr<protocol::Runtime::ObjectPreview>*) const {}
+ virtual void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<protocol::Runtime::ObjectPreview>*) const {}
+ virtual v8::Local<v8::Value> v8Value() const = 0;
+
+ class PropertyAccumulator {
+ public:
+ virtual ~PropertyAccumulator() = default;
+ virtual bool Add(PropertyMirror mirror) = 0;
+ };
+ static bool getProperties(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, bool ownProperties,
+ bool accessorPropertiesOnly,
+ PropertyAccumulator* accumulator);
+ static void getInternalProperties(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ std::vector<InternalPropertyMirror>* mirrors);
+};
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_VALUE_MIRROR_H_
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index c5d1f8c6a2..4836a6bc4a 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -67,52 +67,13 @@ class WasmTranslation::TranslatorImpl {
column(column) {}
};
- virtual void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
- virtual void Translate(TransLocation*) = 0;
- virtual void TranslateBack(TransLocation*) = 0;
- virtual const WasmSourceInformation& GetSourceInformation(v8::Isolate*,
- int index) = 0;
- virtual const String16 GetHash(v8::Isolate*, int index) = 0;
-
- virtual ~TranslatorImpl() = default;
-
- class RawTranslator;
- class DisassemblingTranslator;
-};
-
-class WasmTranslation::TranslatorImpl::RawTranslator
- : public WasmTranslation::TranslatorImpl {
- public:
- void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) override {}
- void Translate(TransLocation*) override {}
- void TranslateBack(TransLocation*) override {}
- const WasmSourceInformation& GetSourceInformation(v8::Isolate*,
- int index) override {
- // NOTE(mmarchini): prior to 3.9, clang won't accept const object
- // instantiations with non-user-provided default constructors, unless an
- // empty initializer is explicitly given. Node.js still supports older
- // clang versions, therefore we must take care when using const objects
- // with default constructors. For more informations, please refer to CWG
- // 253 (http://open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#253)
- static const WasmSourceInformation singleEmptySourceInformation = {};
- return singleEmptySourceInformation;
- }
- const String16 GetHash(v8::Isolate*, int index) override {
- // TODO(herhut): Find useful hash default value.
- return String16();
+ TranslatorImpl(v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> script)
+ : script_(isolate, script) {
+ script_.AnnotateStrongRetainer(kGlobalScriptHandleLabel);
}
-};
-
-class WasmTranslation::TranslatorImpl::DisassemblingTranslator
- : public WasmTranslation::TranslatorImpl {
-
- public:
- DisassemblingTranslator(v8::Isolate* isolate,
- v8::Local<v8::debug::WasmScript> script)
- : script_(isolate, script) {}
void Init(v8::Isolate* isolate, WasmTranslation* translation,
- V8DebuggerAgentImpl* agent) override {
+ V8DebuggerAgentImpl* agent) {
// Register fake scripts for each function in this wasm module/script.
v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
int num_functions = script->NumFunctions();
@@ -127,7 +88,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
}
}
- void Translate(TransLocation* loc) override {
+ void Translate(TransLocation* loc) {
const OffsetTable& offset_table = GetOffsetTable(loc);
DCHECK(!offset_table.empty());
uint32_t byte_offset = static_cast<uint32_t>(loc->column);
@@ -160,7 +121,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
(entry.line == loc.line && entry.column < loc.column);
}
- void TranslateBack(TransLocation* loc) override {
+ void TranslateBack(TransLocation* loc) {
v8::Isolate* isolate = loc->translation->isolate_;
int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
const OffsetTable& reverse_table = GetReverseTable(isolate, func_index);
@@ -192,7 +153,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
}
const WasmSourceInformation& GetSourceInformation(v8::Isolate* isolate,
- int index) override {
+ int index) {
auto it = source_informations_.find(index);
if (it != source_informations_.end()) return it->second;
v8::HandleScope scope(isolate);
@@ -207,7 +168,7 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return inserted.first->second;
}
- const String16 GetHash(v8::Isolate* isolate, int index) override {
+ const String16 GetHash(v8::Isolate* isolate, int index) {
v8::HandleScope scope(isolate);
v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
uint32_t hash = script->GetFunctionHash(index);
@@ -216,6 +177,12 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return builder.toString();
}
+ int GetContextId(v8::Isolate* isolate) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
+ return script->ContextId().FromMaybe(0);
+ }
+
private:
String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
@@ -279,6 +246,9 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
return GetSourceInformation(isolate, func_index).reverse_offset_table;
}
+ static constexpr char kGlobalScriptHandleLabel[] =
+ "WasmTranslation::TranslatorImpl::script_";
+
v8::Global<v8::debug::WasmScript> script_;
// We assume to only disassemble a subset of the functions, so store them in a
@@ -286,22 +256,16 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
std::unordered_map<int, WasmSourceInformation> source_informations_;
};
-WasmTranslation::WasmTranslation(v8::Isolate* isolate)
- : isolate_(isolate), mode_(Disassemble) {}
+constexpr char WasmTranslation::TranslatorImpl::kGlobalScriptHandleLabel[];
+
+WasmTranslation::WasmTranslation(v8::Isolate* isolate) : isolate_(isolate) {}
WasmTranslation::~WasmTranslation() { Clear(); }
void WasmTranslation::AddScript(v8::Local<v8::debug::WasmScript> script,
V8DebuggerAgentImpl* agent) {
std::unique_ptr<TranslatorImpl> impl;
- switch (mode_) {
- case Raw:
- impl.reset(new TranslatorImpl::RawTranslator());
- break;
- case Disassemble:
- impl.reset(new TranslatorImpl::DisassemblingTranslator(isolate_, script));
- break;
- }
+ impl.reset(new TranslatorImpl(isolate_, script));
DCHECK(impl);
auto inserted =
wasm_translators_.insert(std::make_pair(script->Id(), std::move(impl)));
@@ -316,6 +280,32 @@ void WasmTranslation::Clear() {
fake_scripts_.clear();
}
+void WasmTranslation::Clear(v8::Isolate* isolate,
+ const std::vector<int>& contextIdsToClear) {
+ for (auto iter = fake_scripts_.begin(); iter != fake_scripts_.end();) {
+ auto contextId = iter->second->GetContextId(isolate);
+ auto it = std::find(std::begin(contextIdsToClear),
+ std::end(contextIdsToClear), contextId);
+ if (it != std::end(contextIdsToClear)) {
+ iter = fake_scripts_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+
+ for (auto iter = wasm_translators_.begin();
+ iter != wasm_translators_.end();) {
+ auto contextId = iter->second->GetContextId(isolate);
+ auto it = std::find(std::begin(contextIdsToClear),
+ std::end(contextIdsToClear), contextId);
+ if (it != std::end(contextIdsToClear)) {
+ iter = wasm_translators_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
const String16& WasmTranslation::GetSource(const String16& script_id,
int func_index) {
auto it = fake_scripts_.find(script_id);
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
index 0df1f58e21..2d41822e59 100644
--- a/deps/v8/src/inspector/wasm-translation.h
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -19,14 +19,9 @@ class V8DebuggerAgentImpl;
class WasmTranslation {
public:
- enum Mode { Raw, Disassemble };
-
explicit WasmTranslation(v8::Isolate* isolate);
~WasmTranslation();
- // Set translation mode.
- void SetMode(Mode mode) { mode_ = mode; }
-
// Make a wasm script known to the translation. This will trigger a number of
// didParseScript calls to the given debugger agent.
// Only locations referencing a registered script will be translated by the
@@ -37,6 +32,9 @@ class WasmTranslation {
// Clear all registered scripts.
void Clear();
+ // Clear all registered scripts for context group.
+ void Clear(v8::Isolate* isolate, const std::vector<int>& contextIdsToClear);
+
// Translate a location as generated by V8 to a location that should be sent
// over protocol.
// Does nothing for locations referencing a script which was not registered
@@ -72,7 +70,6 @@ class WasmTranslation {
v8::Isolate* isolate_;
std::unordered_map<int, std::unique_ptr<TranslatorImpl>> wasm_translators_;
std::unordered_map<String16, TranslatorImpl*> fake_scripts_;
- Mode mode_;
DISALLOW_COPY_AND_ASSIGN(WasmTranslation);
};
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
deleted file mode 100644
index 20cb4ece16..0000000000
--- a/deps/v8/src/instruction-stream.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/instruction-stream.h"
-
-#include "src/builtins/builtins.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/snapshot.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
- if (FLAG_embedded_builtins) {
- const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
- return start <= pc && pc < start + isolate->embedded_blob_size();
- } else {
- return false;
- }
-}
-
-// static
-Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
- if (!PcIsOffHeap(isolate, address)) return nullptr;
-
- EmbeddedData d = EmbeddedData::FromBlob();
-
- int l = 0, r = Builtins::builtin_count;
- while (l < r) {
- const int mid = (l + r) / 2;
- Address start = d.InstructionStartOfBuiltin(mid);
- Address end = start + d.InstructionSizeOfBuiltin(mid);
-
- if (address < start) {
- r = mid;
- } else if (address >= end) {
- l = mid + 1;
- } else {
- return isolate->builtins()->builtin(mid);
- }
- }
-
- UNREACHABLE();
-}
-
-// static
-void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
- uint8_t** data,
- uint32_t* size) {
- EmbeddedData d = EmbeddedData::FromIsolate(isolate);
-
- v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
- const uint32_t page_size =
- static_cast<uint32_t>(page_allocator->AllocatePageSize());
- const uint32_t allocated_size = RoundUp(d.size(), page_size);
-
- uint8_t* allocated_bytes = static_cast<uint8_t*>(
- AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
- allocated_size, page_size, PageAllocator::kReadWrite));
- CHECK_NOT_NULL(allocated_bytes);
-
- std::memcpy(allocated_bytes, d.data(), d.size());
- CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
- PageAllocator::kReadExecute));
-
- *data = allocated_bytes;
- *size = d.size();
-
- d.Dispose();
-}
-
-// static
-void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
- uint32_t size) {
- v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
- const uint32_t page_size =
- static_cast<uint32_t>(page_allocator->AllocatePageSize());
- CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/instruction-stream.h b/deps/v8/src/instruction-stream.h
deleted file mode 100644
index 25129871db..0000000000
--- a/deps/v8/src/instruction-stream.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSTRUCTION_STREAM_H_
-#define V8_INSTRUCTION_STREAM_H_
-
-#include "src/base/macros.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class Code;
-class Isolate;
-
-// Wraps an off-heap instruction stream.
-// TODO(jgruber,v8:6666): Remove this class.
-class InstructionStream final : public AllStatic {
- public:
- // Returns true, iff the given pc points into an off-heap instruction stream.
- static bool PcIsOffHeap(Isolate* isolate, Address pc);
-
- // Returns the corresponding Code object if it exists, and nullptr otherwise.
- static Code* TryLookupCode(Isolate* isolate, Address address);
-
- // During snapshot creation, we first create an executable off-heap area
- // containing all off-heap code. The area is guaranteed to be contiguous.
- // Note that this only applies when building the snapshot, e.g. for
- // mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
- static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** data,
- uint32_t* size);
- static void FreeOffHeapInstructionStream(uint8_t* data, uint32_t size);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INSTRUCTION_STREAM_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 3eca6f65b4..1877314f0a 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -4,6 +4,8 @@
#include "src/interface-descriptors.h"
+#include "src/macro-assembler.h"
+
namespace v8 {
namespace internal {
@@ -13,6 +15,9 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
register_param_count_ = register_parameter_count;
+ // UBSan doesn't like creating zero-length arrays.
+ if (register_parameter_count == 0) return;
+
// InterfaceDescriptor owns a copy of the registers array.
register_params_ = NewArray<Register>(register_parameter_count, no_reg);
for (int i = 0; i < register_parameter_count; i++) {
@@ -43,7 +48,7 @@ void CallInterfaceDescriptorData::InitializePlatformIndependent(
for (int i = 0; i < types_length; i++) machine_types_[i] = machine_types[i];
}
- DCHECK(AllStackParametersAreTagged());
+ if (!(flags_ & kNoStackScan)) DCHECK(AllStackParametersAreTagged());
}
#ifdef DEBUG
@@ -123,6 +128,11 @@ const char* CallInterfaceDescriptor::DebugName() const {
return "";
}
+#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
+bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
+ return true;
+}
+#endif
void VoidDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -259,6 +269,11 @@ void TypeConversionStackParameterDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
+void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
@@ -311,7 +326,7 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 3);
+ DefaultInitializePlatformSpecific(data, kParameterCount);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@@ -339,7 +354,7 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void WasmGrowMemoryDescriptor::InitializePlatformSpecific(
+void WasmMemoryGrowDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
@@ -349,10 +364,48 @@ void WasmThrowDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void WasmAtomicWakeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
+void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+#endif
+
void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+// static
+Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
+ return CallDescriptors::call_descriptor_data(CallDescriptors::RunMicrotasks)
+ ->register_param(0);
+}
+
+void RunMicrotasksDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void BigIntToWasmI64Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void BigIntToI64Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index ae64b05582..118b2de8ed 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -7,75 +7,81 @@
#include <memory>
-#include "src/assembler.h"
#include "src/globals.h"
#include "src/isolate.h"
-#include "src/macro-assembler.h"
+#include "src/machine-type.h"
+#include "src/register-arch.h"
namespace v8 {
namespace internal {
#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(CppBuiltinAdaptor) \
- V(CEntry1ArgvOnStack) \
- V(Allocate) \
- V(Void) \
- V(ContextOnly) \
- V(NoContext) \
- V(Load) \
- V(LoadWithVector) \
- V(LoadGlobal) \
- V(LoadGlobalWithVector) \
- V(Store) \
- V(StoreWithVector) \
- V(StoreTransition) \
- V(StoreGlobal) \
- V(StoreGlobalWithVector) \
- V(FastNewFunctionContext) \
- V(FastNewObject) \
- V(RecordWrite) \
- V(TypeConversion) \
- V(TypeConversionStackParameter) \
- V(Typeof) \
- V(CallFunction) \
- V(CallVarargs) \
- V(CallForwardVarargs) \
- V(CallWithSpread) \
- V(CallWithArrayLike) \
- V(CallTrampoline) \
- V(ConstructStub) \
- V(ConstructVarargs) \
- V(ConstructForwardVarargs) \
- V(ConstructWithSpread) \
- V(ConstructWithArrayLike) \
- V(JSTrampoline) \
V(Abort) \
+ V(Allocate) \
V(AllocateHeapNumber) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(ArgumentsAdaptor) \
V(ArrayConstructor) \
+ V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(Compare) \
+ V(AsyncFunctionStackParameter) \
+ V(BigIntToI64) \
+ V(BigIntToWasmI64) \
V(BinaryOp) \
- V(StringAt) \
- V(StringSubstring) \
+ V(CallForwardVarargs) \
+ V(CallTrampoline) \
+ V(CallVarargs) \
+ V(CallWithArrayLike) \
+ V(CallWithSpread) \
+ V(CEntry1ArgvOnStack) \
+ V(CloneObjectWithVector) \
+ V(Compare) \
+ V(ConstructForwardVarargs) \
+ V(ConstructStub) \
+ V(ConstructVarargs) \
+ V(ConstructWithArrayLike) \
+ V(ConstructWithSpread) \
+ V(ContextOnly) \
+ V(CppBuiltinAdaptor) \
+ V(FastNewFunctionContext) \
+ V(FastNewObject) \
+ V(FrameDropperTrampoline) \
V(GetProperty) \
- V(ArgumentsAdaptor) \
- V(ApiCallback) \
- V(ApiGetter) \
V(GrowArrayElements) \
- V(NewArgumentsElements) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
- V(InterpreterCEntry1) \
- V(InterpreterCEntry2) \
+ V(JSTrampoline) \
+ V(Load) \
+ V(LoadGlobal) \
+ V(LoadGlobalWithVector) \
+ V(LoadWithVector) \
+ V(NewArgumentsElements) \
+ V(NoContext) \
+ V(RecordWrite) \
V(ResumeGenerator) \
- V(FrameDropperTrampoline) \
+ V(RunMicrotasksEntry) \
V(RunMicrotasks) \
- V(WasmGrowMemory) \
+ V(Store) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
+ V(StoreTransition) \
+ V(StoreWithVector) \
+ V(StringAt) \
+ V(StringSubstring) \
+ V(TypeConversion) \
+ V(TypeConversionStackParameter) \
+ V(Typeof) \
+ V(Void) \
+ V(WasmAtomicWake) \
+ V(WasmI32AtomicWait) \
+ V(WasmI64AtomicWait) \
+ V(WasmMemoryGrow) \
V(WasmThrow) \
- V(CloneObjectWithVector) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -83,6 +89,11 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
enum Flag {
kNoFlags = 0u,
kNoContext = 1u << 0,
+
+ // This indicates that the code uses a special frame that does not scan the
+ // stack arguments, e.g. EntryFrame. And this allows the code to use
+ // untagged stack arguments.
+ kNoStackScan = 1u << 1,
};
typedef base::Flags<Flag> Flags;
@@ -142,7 +153,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
private:
bool IsInitializedPlatformSpecific() const {
const bool initialized =
- register_param_count_ >= 0 && register_params_ != nullptr;
+ (register_param_count_ == 0 && register_params_ == nullptr) ||
+ (register_param_count_ > 0 && register_params_ != nullptr);
// Platform-specific initialization happens before platform-independent.
return initialized;
}
@@ -214,7 +226,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
CallInterfaceDescriptor() : data_(nullptr) {}
virtual ~CallInterfaceDescriptor() = default;
- CallInterfaceDescriptor(CallDescriptors::Key key)
+ explicit CallInterfaceDescriptor(CallDescriptors::Key key)
: data_(CallDescriptors::call_descriptor_data(key)) {}
Flags flags() const { return data()->flags(); }
@@ -286,6 +298,20 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
static void JSDefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int non_js_register_parameter_count);
+ // Checks if float parameters are not assigned invalid registers.
+ bool CheckFloatingPointParameters(CallInterfaceDescriptorData* data) {
+ for (int i = 0; i < data->register_param_count(); i++) {
+ if (IsFloatingPoint(data->param_type(i).representation())) {
+ if (!IsValidFloatParameterRegister(data->register_param(i))) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool IsValidFloatParameterRegister(Register reg);
+
private:
// {CallDescriptors} is allowed to call the private {Initialize} method.
friend class CallDescriptors;
@@ -299,6 +325,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
InitializePlatformSpecific(data);
InitializePlatformIndependent(data);
DCHECK(data->IsInitialized());
+ DCHECK(CheckFloatingPointParameters(data));
}
};
@@ -382,6 +409,20 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kParameterCount \
};
+// This is valid only for builtins that use EntryFrame, which does not scan
+// stack arguments on GC.
+#define DEFINE_PARAMETERS_ENTRY(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoContext | \
+ CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr int kReturnCount = 1; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount \
+ };
+
#define DEFINE_PARAMETERS(...) DEFINE_RESULT_AND_PARAMETERS(1, ##__VA_ARGS__)
#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
@@ -438,6 +479,13 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
+// Dummy descriptor used to mark builtins that don't yet have their proper
+// descriptor associated.
+typedef VoidDescriptor DummyDescriptor;
+
+// Dummy descriptor that marks builtins with C calling convention.
+typedef VoidDescriptor CCallDescriptor;
+
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
@@ -668,7 +716,7 @@ class FastNewObjectDescriptor : public CallInterfaceDescriptor {
class RecordWriteDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kObject, kSlot, kRememberedSet, kFPMode)
+ DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlot, kRememberedSet, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
MachineType::Pointer(), // kSlot
MachineType::TaggedSigned(), // kRememberedSet
@@ -695,6 +743,15 @@ class TypeConversionStackParameterDescriptor final
CallInterfaceDescriptor)
};
+class AsyncFunctionStackParameterDescriptor final
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kPromise, kResult)
+ DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor,
+ CallInterfaceDescriptor)
+};
+
class GetPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
@@ -796,13 +853,6 @@ class ConstructStubDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructStubDescriptor, CallInterfaceDescriptor)
};
-class CallFunctionDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kTarget)
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
-};
-
class AbortDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kMessageOrMessageId)
@@ -943,15 +993,18 @@ class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
- // TODO(jgruber): This could be simplified to pass call data on the stack
- // since this is what the CallApiCallbackStub anyways. This would free a
- // register.
- DEFINE_PARAMETERS_NO_CONTEXT(kTargetContext, kCallData, kHolder,
- kApiFunctionAddress)
+ DEFINE_PARAMETERS_NO_CONTEXT(kTargetContext, // register argument
+ kApiFunctionAddress, // register argument
+ kArgc, // register argument
+ kCallData, // stack argument 1
+ kHolder) // stack argument 2
+ // receiver is implicit stack argument 3
+ // argv are implicit stack arguments [4, 4 + kArgc[
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTargetContext
+ MachineType::Pointer(), // kApiFunctionAddress
+ MachineType::IntPtr(), // kArgc
MachineType::AnyTagged(), // kCallData
- MachineType::AnyTagged(), // kHolder
- MachineType::Pointer()) // kApiFunctionAddress
+ MachineType::AnyTagged()) // kHolder
DECLARE_DESCRIPTOR(ApiCallbackDescriptor, CallInterfaceDescriptor)
};
@@ -1072,18 +1125,29 @@ class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(FrameDropperTrampolineDescriptor, CallInterfaceDescriptor)
};
+class RunMicrotasksEntryDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_ENTRY(kRootRegisterValue, kMicrotaskQueue)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kRootRegisterValue
+ MachineType::Pointer()) // kMicrotaskQueue
+ DECLARE_DESCRIPTOR(RunMicrotasksEntryDescriptor, CallInterfaceDescriptor)
+};
+
class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS()
- DECLARE_DEFAULT_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kMicrotaskQueue)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer())
+ DECLARE_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor)
+
+ static Register MicrotaskQueueRegister();
};
-class WasmGrowMemoryDescriptor final : public CallInterfaceDescriptor {
+class WasmMemoryGrowDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kNumPages)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // result 1
MachineType::Int32()) // kNumPages
- DECLARE_DESCRIPTOR(WasmGrowMemoryDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmMemoryGrowDescriptor, CallInterfaceDescriptor)
};
class WasmThrowDescriptor final : public CallInterfaceDescriptor {
@@ -1094,6 +1158,53 @@ class WasmThrowDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmThrowDescriptor, CallInterfaceDescriptor)
};
+class BigIntToWasmI64Descriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
+ DEFINE_PARAMETER_TYPES(MachineType::Int64()) // kArgument
+ DECLARE_DESCRIPTOR(BigIntToWasmI64Descriptor, CallInterfaceDescriptor)
+};
+
+class BigIntToI64Descriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArgument)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int64(), // result 1
+ MachineType::AnyTagged()) // kArgument
+ DECLARE_DESCRIPTOR(BigIntToI64Descriptor, CallInterfaceDescriptor)
+};
+
+class WasmAtomicWakeDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kCount)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint32()) // kCount
+ DECLARE_DESCRIPTOR(WasmAtomicWakeDescriptor, CallInterfaceDescriptor)
+};
+
+class WasmI32AtomicWaitDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Int32(), // kExpectedValue
+ MachineType::Float64()) // kTimeout
+ DECLARE_DESCRIPTOR(WasmI32AtomicWaitDescriptor, CallInterfaceDescriptor)
+};
+
+class WasmI64AtomicWaitDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueHigh, kExpectedValueLow,
+ kTimeout)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint32(), // kExpectedValueHigh
+ MachineType::Uint32(), // kExpectedValueLow
+ MachineType::Float64()) // kTimeout
+ DECLARE_DESCRIPTOR(WasmI64AtomicWaitDescriptor, CallInterfaceDescriptor)
+};
+
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 3ec2cc595b..e455cfd065 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -197,11 +197,11 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-Object* BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
+Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
return bytecode_array()->constant_pool()->get(index);
}
-Object* BytecodeArrayAccessor::GetConstantForIndexOperand(
+Object BytecodeArrayAccessor::GetConstantForIndexOperand(
int operand_index) const {
return GetConstantAtIndex(GetIndexOperand(operand_index));
}
@@ -215,7 +215,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
}
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi* smi = Smi::cast(GetConstantForIndexOperand(0));
+ Smi smi = Smi::cast(GetConstantForIndexOperand(0));
return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
@@ -285,7 +285,7 @@ JumpTableTargetOffsets::iterator::iterator(
int case_value, int table_offset, int table_end,
const BytecodeArrayAccessor* accessor)
: accessor_(accessor),
- current_(Smi::kZero),
+ current_(Smi::zero()),
index_(case_value),
table_offset_(table_offset),
table_end_(table_end) {
@@ -317,7 +317,7 @@ bool JumpTableTargetOffsets::iterator::operator!=(
void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
if (table_offset_ >= table_end_) return;
- Object* current = accessor_->GetConstantAtIndex(table_offset_);
+ Object current = accessor_->GetConstantAtIndex(table_offset_);
while (!current->IsSmi()) {
DCHECK(current->IsTheHole());
++table_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index e36eed8ade..db33b6f6ac 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -10,6 +10,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects.h"
+#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -42,7 +43,7 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
void UpdateAndAdvanceToValid();
const BytecodeArrayAccessor* accessor_;
- Smi* current_;
+ Smi current_;
int index_;
int table_offset_;
int table_end_;
@@ -92,8 +93,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Object* GetConstantAtIndex(int offset) const;
- Object* GetConstantForIndexOperand(int operand_index) const;
+ Object GetConstantAtIndex(int offset) const;
+ Object GetConstantForIndexOperand(int operand_index) const;
// Returns the absolute offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index a7c95aae7b..2183068576 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -13,6 +13,7 @@
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -393,7 +394,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperationSmiLiteral(
- Token::Value op, Smi* literal, int feedback_slot) {
+ Token::Value op, Smi literal, int feedback_slot) {
switch (op) {
case Token::Value::ADD:
OutputAddSmi(literal->value(), feedback_slot);
@@ -571,8 +572,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
- v8::internal::Smi* smi) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Smi smi) {
int32_t raw_smi = smi->value();
if (raw_smi == 0) {
OutputLdaZero();
@@ -994,10 +994,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayFromIterable() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- size_t constant_properties_entry, int literal_index, int flags,
- Register output) {
- OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
- output);
+ size_t constant_properties_entry, int literal_index, int flags) {
+ OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags);
return *this;
}
@@ -1344,6 +1342,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
Register context) {
+ // TODO(leszeks): Do we need to start a new basic block here? Could we simply
+ // get the current bytecode offset from the array writer instead?
BytecodeLabel try_begin;
Bind(&try_begin);
handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index bf5909d8e4..d362ffffa4 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
- BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+ BytecodeArrayBuilder& LoadLiteral(Smi value);
BytecodeArrayBuilder& LoadLiteral(double value);
BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
@@ -246,8 +246,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& CreateEmptyArrayLiteral(int literal_index);
BytecodeArrayBuilder& CreateArrayFromIterable();
BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
- int literal_index, int flags,
- Register output);
+ int literal_index, int flags);
BytecodeArrayBuilder& CreateEmptyObjectLiteral();
BytecodeArrayBuilder& CloneObject(Register source, int flags,
int feedback_slot);
@@ -349,7 +348,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
int feedback_slot);
// Same as above, but lhs in the accumulator and rhs in |literal|.
BytecodeArrayBuilder& BinaryOperationSmiLiteral(Token::Value binop,
- Smi* literal,
+ Smi literal,
int feedback_slot);
// Unary and Count Operators (value stored in accumulator).
@@ -408,7 +407,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label);
- BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfJSReceiver(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNotNull(BytecodeLabel* label);
@@ -522,6 +520,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
}
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
+ bool RemainderOfBlockIsDead() const {
+ return bytecode_array_writer_.RemainderOfBlockIsDead();
+ }
// Returns the raw operand value for the given register or register list.
uint32_t GetInputRegisterOperand(Register reg);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3dbd009879..a563ff4fc3 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -42,7 +42,7 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
- int frame_size = register_count * kPointerSize;
+ int frame_size = register_count * kSystemPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
Handle<ByteArray> source_position_table =
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 9700d2c1cf..e6db2fce22 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -45,6 +45,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
int parameter_count,
Handle<ByteArray> handler_table);
+ bool RemainderOfBlockIsDead() const { return exit_seen_in_block_; }
+
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
// plus the actual bytecode, plus the maximum number of operands times
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 57ff5cd850..59dcf54132 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -7,7 +7,6 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/code-stubs.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -31,10 +30,10 @@ uint8_t CreateObjectLiteralFlags::Encode(int runtime_flags,
}
// static
-uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope) {
+uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope,
+ bool might_always_opt) {
uint8_t result = PretenuredBit::encode(pretenure);
- if (!FLAG_always_opt && !FLAG_prepare_always_opt &&
- pretenure == NOT_TENURED && is_function_scope) {
+ if (!might_always_opt && pretenure == NOT_TENURED && is_function_scope) {
result |= FastNewClosureBit::encode(true);
}
return result;
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 0e0ae256ed..6f05770192 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -43,7 +43,8 @@ class CreateClosureFlags {
class PretenuredBit : public BitField8<bool, 0, 1> {};
class FastNewClosureBit : public BitField8<bool, PretenuredBit::kNext, 1> {};
- static uint8_t Encode(bool pretenure, bool is_function_scope);
+ static uint8_t Encode(bool pretenure, bool is_function_scope,
+ bool might_always_opt);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CreateClosureFlags);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 48682439fb..00b1916c92 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -8,7 +8,6 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -18,6 +17,7 @@
#include "src/objects-inl.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/smi.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
#include "src/unoptimized-compilation-info.h"
@@ -97,7 +97,8 @@ class BytecodeGenerator::ContextScope {
class BytecodeGenerator::ControlScope {
public:
explicit ControlScope(BytecodeGenerator* generator)
- : generator_(generator), outer_(generator->execution_control()),
+ : generator_(generator),
+ outer_(generator->execution_control()),
context_(generator->execution_context()) {
generator_->set_execution_control(this);
}
@@ -158,6 +159,16 @@ class BytecodeGenerator::ControlScope {
// paths going through the finally-block to dispatch after leaving the block.
class BytecodeGenerator::ControlScope::DeferredCommands final {
public:
+ // Fixed value tokens for paths we know we need.
+ // Fallthrough is set to -1 to make it the fallthrough case of the jump table,
+ // where the remaining cases start at 0.
+ static const int kFallthroughToken = -1;
+ // TODO(leszeks): Rethrow being 0 makes it use up a valuable LdaZero, which
+ // means that other commands (such as break or return) have to use LdaSmi.
+ // This can very slightly bloat bytecode, so perhaps token values should all
+ // be shifted down by 1.
+ static const int kRethrowToken = 0;
+
DeferredCommands(BytecodeGenerator* generator, Register token_register,
Register result_register)
: generator_(generator),
@@ -165,8 +176,13 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
token_register_(token_register),
result_register_(result_register),
return_token_(-1),
- async_return_token_(-1),
- rethrow_token_(-1) {}
+ async_return_token_(-1) {
+ // There's always a rethrow path.
+ // TODO(leszeks): We could decouple deferred_ index and token to allow us
+ // to still push this lazily.
+ STATIC_ASSERT(kRethrowToken == 0);
+ deferred_.push_back({CMD_RETHROW, nullptr, kRethrowToken});
+ }
// One recorded control-flow command.
struct Entry {
@@ -211,7 +227,7 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Records the dispatch token to be used to identify the implicit fall-through
// path at the end of a try-block into the corresponding finally-block.
void RecordFallThroughPath() {
- builder()->LoadLiteral(Smi::FromInt(-1));
+ builder()->LoadLiteral(Smi::FromInt(kFallthroughToken));
builder()->StoreAccumulatorInRegister(token_register_);
// Since we're not saving the accumulator in the result register, shove a
// harmless value there instead so that it is still considered "killed" in
@@ -277,7 +293,7 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
case CMD_ASYNC_RETURN:
return GetAsyncReturnToken();
case CMD_RETHROW:
- return GetRethrowToken();
+ return kRethrowToken;
default:
// TODO(leszeks): We could also search for entries with the same
// command and statement.
@@ -299,13 +315,6 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
return async_return_token_;
}
- int GetRethrowToken() {
- if (rethrow_token_ == -1) {
- rethrow_token_ = GetNewTokenForCommand(CMD_RETHROW, nullptr);
- }
- return rethrow_token_;
- }
-
int GetNewTokenForCommand(Command command, Statement* statement) {
int token = static_cast<int>(deferred_.size());
deferred_.push_back({command, statement, token});
@@ -320,7 +329,6 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Tokens for commands that don't need a statement.
int return_token_;
int async_return_token_;
- int rethrow_token_;
};
// Scoped class for dealing with control flow reaching the function level.
@@ -552,6 +560,8 @@ class BytecodeGenerator::RegisterAllocationScope final {
outer_next_register_index_);
}
+ BytecodeGenerator* generator() const { return generator_; }
+
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
@@ -559,21 +569,47 @@ class BytecodeGenerator::RegisterAllocationScope final {
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
+class BytecodeGenerator::AccumulatorPreservingScope final {
+ public:
+ explicit AccumulatorPreservingScope(BytecodeGenerator* generator,
+ AccumulatorPreservingMode mode)
+ : generator_(generator) {
+ if (mode == AccumulatorPreservingMode::kPreserve) {
+ saved_accumulator_register_ =
+ generator_->register_allocator()->NewRegister();
+ generator_->builder()->StoreAccumulatorInRegister(
+ saved_accumulator_register_);
+ }
+ }
+
+ ~AccumulatorPreservingScope() {
+ if (saved_accumulator_register_.is_valid()) {
+ generator_->builder()->LoadAccumulatorWithRegister(
+ saved_accumulator_register_);
+ }
+ }
+
+ private:
+ BytecodeGenerator* generator_;
+ Register saved_accumulator_register_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccumulatorPreservingScope);
+};
+
// Scoped base class for determining how the result of an expression will be
// used.
class BytecodeGenerator::ExpressionResultScope {
public:
ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
- : generator_(generator),
- outer_(generator->execution_result()),
+ : outer_(generator->execution_result()),
allocator_(generator),
kind_(kind),
type_hint_(TypeHint::kAny) {
- generator_->set_execution_result(this);
+ generator->set_execution_result(this);
}
- virtual ~ExpressionResultScope() {
- generator_->set_execution_result(outer_);
+ ~ExpressionResultScope() {
+ allocator_.generator()->set_execution_result(outer_);
}
bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -599,7 +635,6 @@ class BytecodeGenerator::ExpressionResultScope {
TypeHint type_hint() const { return type_hint_; }
private:
- BytecodeGenerator* generator_;
ExpressionResultScope* outer_;
RegisterAllocationScope allocator_;
Expression::Context kind_;
@@ -639,9 +674,7 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
- void SetResultConsumedByTest() {
- result_consumed_by_test_ = true;
- }
+ void SetResultConsumedByTest() { result_consumed_by_test_ = true; }
bool result_consumed_by_test() { return result_consumed_by_test_; }
// Inverts the control flow of the operation, swapping the then and else
@@ -730,7 +763,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
data->set(array_index++, *declaration.name->string());
data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
- Object* undefined_or_literal_slot;
+ Object undefined_or_literal_slot;
if (declaration.literal_slot.IsInvalid()) {
undefined_or_literal_slot = ReadOnlyRoots(isolate).undefined_value();
} else {
@@ -871,7 +904,7 @@ class BytecodeGenerator::IteratorRecord final {
static bool IsInEagerLiterals(
FunctionLiteral* literal,
- const ZoneVector<FunctionLiteral*>& eager_literals) {
+ const std::vector<FunctionLiteral*>& eager_literals) {
for (FunctionLiteral* eager_literal : eager_literals) {
if (literal == eager_literal) return true;
}
@@ -883,7 +916,7 @@ static bool IsInEagerLiterals(
BytecodeGenerator::BytecodeGenerator(
UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- ZoneVector<FunctionLiteral*>* eager_inner_literals)
+ std::vector<FunctionLiteral*>* eager_inner_literals)
: zone_(info->zone()),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
@@ -922,6 +955,12 @@ BytecodeGenerator::BytecodeGenerator(
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
Isolate* isolate, Handle<Script> script) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+#ifdef DEBUG
+ // Unoptimized compilation should be context-independent. Verify that we don't
+ // access the native context by nulling it out during finalization.
+ SaveContext save(isolate);
+ isolate->set_context(Context());
+#endif
AllocateDeferredConstants(isolate, script);
@@ -1075,7 +1114,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Create a generator object if necessary and initialize the
// {.generator_object} variable.
- if (info()->literal()->CanSuspend()) {
+ if (IsResumableFunction(info()->literal()->kind())) {
BuildGeneratorObjectVariableInitialization();
}
@@ -1104,9 +1143,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind()) &&
- info()->literal()->requires_instance_fields_initializer()) {
- BuildInstanceFieldInitialization(Register::function_closure(),
- builder()->Receiver());
+ info()->literal()->requires_instance_members_initializer()) {
+ BuildInstanceMemberInitialization(Register::function_closure(),
+ builder()->Receiver());
}
// Visit statements in the function body.
@@ -1121,7 +1160,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
}
void BytecodeGenerator::AllocateTopLevelRegisters() {
- if (info()->literal()->CanSuspend()) {
+ if (IsResumableFunction(info()->literal()->kind())) {
// Either directly use generator_object_var or allocate a new register for
// the incoming generator object.
Variable* generator_object_var = closure_scope()->generator_object_var();
@@ -1181,7 +1220,7 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
- Variable* variable = decl->proxy()->var();
+ Variable* variable = decl->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
@@ -1210,7 +1249,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
break;
case VariableLocation::LOOKUP: {
- DCHECK_EQ(VariableMode::kVar, variable->mode());
+ DCHECK_EQ(VariableMode::kDynamic, variable->mode());
DCHECK(!variable->binding_needs_init());
Register name = register_allocator()->NewRegister();
@@ -1232,9 +1271,10 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- Variable* variable = decl->proxy()->var();
+ Variable* variable = decl->var();
DCHECK(variable->mode() == VariableMode::kLet ||
- variable->mode() == VariableMode::kVar);
+ variable->mode() == VariableMode::kVar ||
+ variable->mode() == VariableMode::kDynamic);
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackSlot slot =
@@ -1247,13 +1287,13 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
case VariableLocation::CONTEXT: {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
builder()->StoreContextSlot(execution_context()->reg(), variable->index(),
0);
break;
@@ -1263,7 +1303,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
builder()
->LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(args[0]);
- VisitForAccumulatorValue(decl->fun());
+ VisitFunctionLiteral(decl->fun());
builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
Runtime::kDeclareEvalFunction, args);
break;
@@ -1291,8 +1331,7 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
->LoadLiteral(Smi::FromInt(entry->module_request))
.StoreAccumulatorInRegister(module_request)
.CallRuntime(Runtime::kGetModuleNamespace, module_request);
- Variable* var = closure_scope()->LookupLocal(entry->local_name);
- DCHECK_NOT_NULL(var);
+ Variable* var = closure_scope()->LookupInModule(entry->local_name);
BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
}
@@ -1326,13 +1365,14 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
globals_builder_ = new (zone()) GlobalDeclarationsBuilder(zone());
}
-void BytecodeGenerator::VisitStatements(ZonePtrList<Statement>* statements) {
+void BytecodeGenerator::VisitStatements(
+ const ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
// Allocate an outer register allocations scope for the statement.
RegisterAllocationScope allocation_scope(this);
Statement* stmt = statements->at(i);
Visit(stmt);
- if (stmt->IsJump()) break;
+ if (builder()->RemainderOfBlockIsDead()) break;
}
}
@@ -1341,8 +1381,7 @@ void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
VisitForEffect(stmt->expression());
}
-void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
-}
+void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {}
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
ConditionalControlFlowBuilder conditional_builder(
@@ -1463,6 +1502,107 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
}
+template <typename TryBodyFunc, typename CatchBodyFunc>
+void BytecodeGenerator::BuildTryCatch(
+ TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryCatchStatement* stmt_for_coverage) {
+ TryCatchBuilder try_control_builder(
+ builder(),
+ stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
+ stmt_for_coverage, catch_prediction);
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting 'throw' control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryCatch scope(this, &try_control_builder);
+ try_body_func();
+ }
+ try_control_builder.EndTry();
+
+ catch_body_func(context);
+
+ try_control_builder.EndCatch();
+}
+
+template <typename TryBodyFunc, typename FinallyBodyFunc>
+void BytecodeGenerator::BuildTryFinally(
+ TryBodyFunc try_body_func, FinallyBodyFunc finally_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryFinallyStatement* stmt_for_coverage) {
+ // We can't know whether the finally block will override ("catch") an
+ // exception thrown in the try block, so we just adopt the outer prediction.
+ TryFinallyBuilder try_control_builder(
+ builder(),
+ stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
+ stmt_for_coverage, catch_prediction);
+
+ // We keep a record of all paths that enter the finally-block to be able to
+ // dispatch to the correct continuation point after the statements in the
+ // finally-block have been evaluated.
+ //
+ // The try-finally construct can enter the finally-block in three ways:
+ // 1. By exiting the try-block normally, falling through at the end.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (i.e. through break/continue/return statements).
+ // 3. By exiting the try-block with a thrown exception.
+ //
+ // The result register semantics depend on how the block was entered:
+ // - ReturnStatement: It represents the return value being returned.
+ // - ThrowStatement: It represents the exception being thrown.
+ // - BreakStatement/ContinueStatement: Undefined and not used.
+ // - Falling through into finally-block: Undefined and not used.
+ Register token = register_allocator()->NewRegister();
+ Register result = register_allocator()->NewRegister();
+ ControlScope::DeferredCommands commands(this, token, result);
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting all control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
+ try_body_func();
+ }
+ try_control_builder.EndTry();
+
+ // Record fall-through and exception cases.
+ commands.RecordFallThroughPath();
+ try_control_builder.LeaveTry();
+ try_control_builder.BeginHandler();
+ commands.RecordHandlerReThrowPath();
+
+ // Pending message object is saved on entry.
+ try_control_builder.BeginFinally();
+ Register message = context; // Reuse register.
+
+ // Clear message object as we enter the finally block.
+ builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
+ message);
+
+ // Evaluate the finally-block.
+ finally_body_func(token);
+ try_control_builder.EndFinally();
+
+ // Pending message object is restored on exit.
+ builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
+
+ // Dynamic dispatch after the finally-block.
+ commands.ApplyDeferredCommands();
+}
+
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
@@ -1540,76 +1680,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
loop_builder.JumpToHeader(loop_depth_);
}
-void BytecodeGenerator::VisitForInAssignment(Expression* expr) {
- DCHECK(expr->IsValidReferenceExpression());
-
- // Evaluate assignment starting with the value to be stored in the
- // accumulator.
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), Token::ASSIGN,
- proxy->hole_check_mode());
- break;
- }
- case NAMED_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- Register object = VisitForRegisterValue(property->obj());
- const AstRawString* name =
- property->key()->AsLiteral()->AsRawPropertyName();
- builder()->LoadAccumulatorWithRegister(value);
- FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
- builder()->StoreNamedProperty(object, name, feedback_index(slot),
- language_mode());
- builder()->LoadAccumulatorWithRegister(value);
- break;
- }
- case KEYED_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- Register object = VisitForRegisterValue(property->obj());
- Register key = VisitForRegisterValue(property->key());
- builder()->LoadAccumulatorWithRegister(value);
- FeedbackSlot slot = feedback_spec()->AddKeyedStoreICSlot(language_mode());
- builder()->StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
- builder()->LoadAccumulatorWithRegister(value);
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- builder()->StoreAccumulatorInRegister(args[3]);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
- builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
- .StoreAccumulatorInRegister(args[2])
- .CallRuntime(StoreToSuperRuntimeId(), args);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- builder()->StoreAccumulatorInRegister(args[3]);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
- VisitForRegisterValue(property->key(), args[2]);
- builder()->CallRuntime(StoreKeyedToSuperRuntimeId(), args);
- break;
- }
- }
-}
-
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral()) {
@@ -1636,7 +1706,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up loop counter
Register index = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::kZero);
+ builder()->LoadLiteral(Smi::zero());
builder()->StoreAccumulatorInRegister(index);
// The loop
@@ -1649,7 +1719,18 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->ForInNext(receiver, index, triple.Truncate(2),
feedback_index(slot));
loop_builder.ContinueIfUndefined();
- VisitForInAssignment(stmt->each());
+
+ // Assign accumulator value to the 'each' target.
+ {
+ EffectResultScope scope(this);
+ // Make sure to preserve the accumulator across the PrepareAssignmentLhs
+ // call.
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(
+ stmt->each(), AccumulatorPreservingMode::kPreserve);
+ builder()->SetExpressionPosition(stmt->each());
+ BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
+ }
+
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
@@ -1659,22 +1740,94 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->Bind(&subject_undefined_label);
}
+// Desugar a for-of statement into an application of the iteration protocol.
+//
+// for (EACH of SUBJECT) BODY
+//
+// becomes
+//
+// iterator = %GetIterator(SUBJECT)
+// try {
+//
+// loop {
+// // Make sure we are considered 'done' if .next(), .done or .value fail.
+// done = true
+// value = iterator.next()
+// if (value.done) break;
+// value = value.value
+// done = false
+//
+// EACH = value
+// BODY
+// }
+// done = true
+//
+// } catch(e) {
+// iteration_continuation = RETHROW
+// } finally {
+// %FinalizeIteration(iterator, done, iteration_continuation)
+// }
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+ EffectResultScope effect_scope(this);
- builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
- VisitForEffect(stmt->assign_iterator());
- VisitForEffect(stmt->assign_next());
+ builder()->SetExpressionAsStatementPosition(stmt->subject());
+ VisitForAccumulatorValue(stmt->subject());
- loop_builder.LoopHeader();
- builder()->SetExpressionAsStatementPosition(stmt->next_result());
- VisitForEffect(stmt->next_result());
- TypeHint type_hint = VisitForAccumulatorValue(stmt->result_done());
- loop_builder.BreakIfTrue(ToBooleanModeFromTypeHint(type_hint));
+ // Store the iterator in a dedicated register so that it can be closed on
+ // exit, and the 'done' value in a dedicated register so that it can be
+ // changed and accessed independently of the iteration result.
+ IteratorRecord iterator = BuildGetIteratorRecord(stmt->type());
+ Register done = register_allocator()->NewRegister();
+ builder()->LoadFalse();
+ builder()->StoreAccumulatorInRegister(done);
+
+ BuildTryFinally(
+ // Try block.
+ [&]() {
+ Register next_result = register_allocator()->NewRegister();
+
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+ loop_builder.LoopHeader();
+
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+
+ // Call the iterator's .next() method. Break from the loop if the `done`
+ // property is truthy, otherwise load the value from the iterator result
+ // and append the argument.
+ builder()->SetExpressionAsStatementPosition(stmt->each());
+ BuildIteratorNext(iterator, next_result);
+ builder()->LoadNamedProperty(
+ next_result, ast_string_constants()->done_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
- VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
+ builder()
+ // value = value.value
+ ->LoadNamedProperty(
+ next_result, ast_string_constants()->value_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()));
+ // done = false, before the assignment to each happens, so that done is
+ // false if the assignment throws.
+ builder()
+ ->StoreAccumulatorInRegister(next_result)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(done);
+
+ // Assign to the 'each' target.
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(stmt->each());
+ builder()->LoadAccumulatorWithRegister(next_result);
+ BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
+
+ VisitIterationBody(stmt, &loop_builder);
+
+ loop_builder.JumpToHeader(loop_depth_);
+ },
+ // Finally block.
+ [&](Register iteration_continuation_token) {
+ // Finish the iteration in the finally block.
+ BuildFinalizeIteration(iterator, done, iteration_continuation_token);
+ },
+ HandlerTable::UNCAUGHT);
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
@@ -1684,111 +1837,45 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
HandlerTable::CatchPrediction outer_catch_prediction = catch_prediction();
set_catch_prediction(stmt->GetCatchPrediction(outer_catch_prediction));
- TryCatchBuilder try_control_builder(builder(), block_coverage_builder_, stmt,
- catch_prediction());
-
- // Preserve the context in a dedicated register, so that it can be restored
- // when the handler is entered by the stack-unwinding machinery.
- // TODO(mstarzinger): Be smarter about register allocation.
- Register context = register_allocator()->NewRegister();
- builder()->MoveRegister(Register::current_context(), context);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting 'throw' control commands.
- try_control_builder.BeginTry(context);
- {
- ControlScopeForTryCatch scope(this, &try_control_builder);
- Visit(stmt->try_block());
- set_catch_prediction(outer_catch_prediction);
- }
- try_control_builder.EndTry();
-
- if (stmt->scope()) {
- // Create a catch scope that binds the exception.
- BuildNewLocalCatchContext(stmt->scope());
- builder()->StoreAccumulatorInRegister(context);
- }
+ BuildTryCatch(
+ // Try body.
+ [&]() {
+ Visit(stmt->try_block());
+ set_catch_prediction(outer_catch_prediction);
+ },
+ // Catch body.
+ [&](Register context) {
+ if (stmt->scope()) {
+ // Create a catch scope that binds the exception.
+ BuildNewLocalCatchContext(stmt->scope());
+ builder()->StoreAccumulatorInRegister(context);
+ }
- // If requested, clear message object as we enter the catch block.
- if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
- builder()->LoadTheHole().SetPendingMessage();
- }
+ // If requested, clear message object as we enter the catch block.
+ if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
+ builder()->LoadTheHole().SetPendingMessage();
+ }
- // Load the catch context into the accumulator.
- builder()->LoadAccumulatorWithRegister(context);
+ // Load the catch context into the accumulator.
+ builder()->LoadAccumulatorWithRegister(context);
- // Evaluate the catch-block.
- if (stmt->scope()) {
- VisitInScope(stmt->catch_block(), stmt->scope());
- } else {
- VisitBlock(stmt->catch_block());
- }
- try_control_builder.EndCatch();
+ // Evaluate the catch-block.
+ if (stmt->scope()) {
+ VisitInScope(stmt->catch_block(), stmt->scope());
+ } else {
+ VisitBlock(stmt->catch_block());
+ }
+ },
+ catch_prediction(), stmt);
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- // We can't know whether the finally block will override ("catch") an
- // exception thrown in the try block, so we just adopt the outer prediction.
- TryFinallyBuilder try_control_builder(builder(), block_coverage_builder_,
- stmt, catch_prediction());
-
- // We keep a record of all paths that enter the finally-block to be able to
- // dispatch to the correct continuation point after the statements in the
- // finally-block have been evaluated.
- //
- // The try-finally construct can enter the finally-block in three ways:
- // 1. By exiting the try-block normally, falling through at the end.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (i.e. through break/continue/return statements).
- // 3. By exiting the try-block with a thrown exception.
- //
- // The result register semantics depend on how the block was entered:
- // - ReturnStatement: It represents the return value being returned.
- // - ThrowStatement: It represents the exception being thrown.
- // - BreakStatement/ContinueStatement: Undefined and not used.
- // - Falling through into finally-block: Undefined and not used.
- Register token = register_allocator()->NewRegister();
- Register result = register_allocator()->NewRegister();
- ControlScope::DeferredCommands commands(this, token, result);
-
- // Preserve the context in a dedicated register, so that it can be restored
- // when the handler is entered by the stack-unwinding machinery.
- // TODO(mstarzinger): Be smarter about register allocation.
- Register context = register_allocator()->NewRegister();
- builder()->MoveRegister(Register::current_context(), context);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting all control commands.
- try_control_builder.BeginTry(context);
- {
- ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
- Visit(stmt->try_block());
- }
- try_control_builder.EndTry();
-
- // Record fall-through and exception cases.
- commands.RecordFallThroughPath();
- try_control_builder.LeaveTry();
- try_control_builder.BeginHandler();
- commands.RecordHandlerReThrowPath();
-
- // Pending message object is saved on entry.
- try_control_builder.BeginFinally();
- Register message = context; // Reuse register.
-
- // Clear message object as we enter the finally block.
- builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
- message);
-
- // Evaluate the finally-block.
- Visit(stmt->finally_block());
- try_control_builder.EndFinally();
-
- // Pending message object is restored on exit.
- builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
-
- // Dynamic dispatch after the finally-block.
- commands.ApplyDeferredCommands();
+ BuildTryFinally(
+ // Try block.
+ [&]() { Visit(stmt->try_block()); },
+ // Finally block.
+ [&](Register body_continuation_token) { Visit(stmt->finally_block()); },
+ catch_prediction(), stmt);
}
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
@@ -1799,7 +1886,8 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
DCHECK(expr->scope()->outer_scope() == current_scope());
uint8_t flags = CreateClosureFlags::Encode(
- expr->pretenure(), closure_scope()->is_function_scope());
+ expr->pretenure(), closure_scope()->is_function_scope(),
+ info()->might_always_opt());
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
FeedbackSlot slot = GetCachedCreateClosureSlot(expr);
builder()->CreateClosure(entry, feedback_index(slot), flags);
@@ -1819,14 +1907,11 @@ bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
if (loop_depth_ > 0) return false;
- // A non-top-level iife is likely to be executed multiple times and so
- // shouldn`t be optimized as one-shot.
- bool is_toplevel_iife = info()->literal()->is_iife() &&
- current_scope()->outer_scope()->is_script_scope();
- return info()->literal()->is_toplevel() || is_toplevel_iife;
+ return info()->literal()->is_toplevel() ||
+ info()->literal()->is_oneshot_iife();
}
-void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
+void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
size_t class_boilerplate_entry =
builder()->AllocateDeferredConstantPoolEntry();
class_literals_.push_back(std::make_pair(expr, class_boilerplate_entry));
@@ -1859,7 +1944,6 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) {
- DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD);
Register key = register_allocator()->GrowRegisterList(&args);
builder()->SetExpressionAsStatementPosition(property->key());
@@ -1881,7 +1965,8 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
.Bind(&done);
}
- if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ DCHECK(!property->is_private());
// Initialize field's name variable with the computed name.
DCHECK_NOT_NULL(property->computed_name_var());
builder()->LoadAccumulatorWithRegister(key);
@@ -1890,16 +1975,22 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
}
- if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
+ if (property->kind() == ClassLiteral::Property::FIELD) {
+ if (property->is_private()) {
+ RegisterAllocationScope private_name_register_scope(this);
+ Register private_name = register_allocator()->NewRegister();
+ VisitForRegisterValue(property->key(), private_name);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+ .StoreAccumulatorInRegister(private_name)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+ DCHECK_NOT_NULL(property->private_name_var());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
- } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
- builder()->CallRuntime(Runtime::kCreatePrivateFieldSymbol);
- DCHECK_NOT_NULL(property->private_field_name_var());
- BuildVariableAssignment(property->private_field_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- continue;
}
Register value = register_allocator()->GrowRegisterList(&args);
@@ -1920,12 +2011,12 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
HoleCheckMode::kElided);
}
- if (expr->instance_fields_initializer_function() != nullptr) {
+ if (expr->instance_members_initializer_function() != nullptr) {
Register initializer =
- VisitForRegisterValue(expr->instance_fields_initializer_function());
+ VisitForRegisterValue(expr->instance_members_initializer_function());
if (FunctionLiteral::NeedsHomeObject(
- expr->instance_fields_initializer_function())) {
+ expr->instance_members_initializer_function())) {
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()->LoadAccumulatorWithRegister(prototype).StoreHomeObjectProperty(
initializer, feedback_index(slot), language_mode());
@@ -1939,6 +2030,24 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
if (expr->static_fields_initializer() != nullptr) {
+ // TODO(gsathya): This can be optimized away to be a part of the
+ // class boilerplate in the future. The name argument can be
+ // passed to the DefineClass runtime function and have it set
+ // there.
+ if (name.is_valid()) {
+ Register key = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(ast_string_constants()->name_string())
+ .StoreAccumulatorInRegister(key);
+
+ DataPropertyInLiteralFlags data_property_flags =
+ DataPropertyInLiteralFlag::kNoFlags;
+ FeedbackSlot slot =
+ feedback_spec()->AddStoreDataPropertyInLiteralICSlot();
+ builder()->LoadAccumulatorWithRegister(name).StoreDataPropertyInLiteral(
+ class_constructor, key, data_property_flags, feedback_index(slot));
+ }
+
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer =
VisitForRegisterValue(expr->static_fields_initializer());
@@ -1960,19 +2069,23 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+ VisitClassLiteral(expr, Register::invalid_value());
+}
+
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr, Register name) {
CurrentScope current_scope(this, expr->scope());
DCHECK_NOT_NULL(expr->scope());
if (expr->scope()->NeedsContext()) {
BuildNewLocalBlockContext(expr->scope());
ContextScope scope(this, expr->scope());
- BuildClassLiteral(expr);
+ BuildClassLiteral(expr, name);
} else {
- BuildClassLiteral(expr);
+ BuildClassLiteral(expr, name);
}
}
-void BytecodeGenerator::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* stmt) {
+void BytecodeGenerator::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* stmt) {
RegisterList args = register_allocator()->NewRegisterList(3);
Register constructor = args[0], key = args[1], value = args[2];
builder()->MoveRegister(builder()->Receiver(), constructor);
@@ -1981,17 +2094,19 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
ClassLiteral::Property* property = stmt->fields()->at(i);
if (property->is_computed_name()) {
- DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD);
+ DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
+ DCHECK(!property->is_private());
Variable* var = property->computed_name_var();
DCHECK_NOT_NULL(var);
// The computed name is already evaluated and stored in a
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
- } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
- Variable* private_field_name_var = property->private_field_name_var();
- DCHECK_NOT_NULL(private_field_name_var);
- BuildVariableLoad(private_field_name_var, HoleCheckMode::kElided);
+ } else if (property->kind() == ClassLiteral::Property::FIELD &&
+ property->is_private()) {
+ Variable* private_name_var = property->private_name_var();
+ DCHECK_NOT_NULL(private_name_var);
+ BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
} else {
BuildLoadPropertyKey(property, key);
@@ -2002,15 +2117,16 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
VisitSetHomeObject(value, constructor, property);
Runtime::FunctionId function_id =
- property->kind() == ClassLiteral::Property::PUBLIC_FIELD
+ property->kind() == ClassLiteral::Property::FIELD &&
+ !property->is_private()
? Runtime::kCreateDataProperty
: Runtime::kAddPrivateField;
builder()->CallRuntime(function_id, args);
}
}
-void BytecodeGenerator::BuildInstanceFieldInitialization(Register constructor,
- Register instance) {
+void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
+ Register instance) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register initializer = register_allocator()->NewRegister();
@@ -2127,7 +2243,9 @@ void BytecodeGenerator::BuildCreateObjectLiteral(Register literal,
// optimize once the CreateShallowObjectLiteral stub is in sync with the TF
// optimizations.
int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- builder()->CreateObjectLiteral(entry, literal_index, flags, literal);
+ builder()
+ ->CreateObjectLiteral(entry, literal_index, flags)
+ .StoreAccumulatorInRegister(literal);
}
}
@@ -2317,7 +2435,20 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Register key = register_allocator()->NewRegister();
BuildLoadPropertyKey(property, key);
builder()->SetExpressionPosition(property->value());
- Register value = VisitForRegisterValue(property->value());
+ Register value;
+
+ // Static class fields require the name property to be set on
+ // the class, meaning we can't wait until the
+ // StoreDataPropertyInLiteral call later to set the name.
+ if (property->value()->IsClassLiteral() &&
+ property->value()->AsClassLiteral()->static_fields_initializer() !=
+ nullptr) {
+ value = register_allocator()->NewRegister();
+ VisitClassLiteral(property->value()->AsClassLiteral(), key);
+ builder()->StoreAccumulatorInRegister(value);
+ } else {
+ value = VisitForRegisterValue(property->value());
+ }
VisitSetHomeObject(value, literal, property);
DataPropertyInLiteralFlags data_property_flags =
@@ -2369,16 +2500,25 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->LoadAccumulatorWithRegister(literal);
}
-void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
- Register index,
- FeedbackSlot index_slot,
- FeedbackSlot element_slot) {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
-
- builder()->SetExpressionAsStatementPosition(spread->expression());
- IteratorRecord iterator =
- BuildGetIteratorRecord(spread->expression(), IteratorType::kNormal);
+// Fill an array with values from an iterator, starting at a given index. It is
+// guaranteed that the loop will only terminate if the iterator is exhausted, or
+// if one of iterator.next(), value.done, or value.value fail.
+//
+// In pseudocode:
+//
+// loop {
+// value = iterator.next()
+// if (value.done) break;
+// value = value.value
+// array[index++] = value
+// }
+void BytecodeGenerator::BuildFillArrayWithIterator(
+ IteratorRecord iterator, Register array, Register index, Register value,
+ FeedbackSlot next_value_slot, FeedbackSlot next_done_slot,
+ FeedbackSlot index_slot, FeedbackSlot element_slot) {
+ DCHECK(array.is_valid());
+ DCHECK(index.is_valid());
+ DCHECK(value.is_valid());
LoopBuilder loop_builder(builder(), nullptr, nullptr);
loop_builder.LoopHeader();
@@ -2396,8 +2536,7 @@ void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
builder()
// value = value.value
->LoadNamedProperty(value, ast_string_constants()->value_string(),
- feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(value)
+ feedback_index(next_value_slot))
// array[index] = value
.StoreInArrayLiteral(array, index, feedback_index(element_slot))
// index++
@@ -2409,7 +2548,7 @@ void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
}
void BytecodeGenerator::BuildCreateArrayLiteral(
- ZonePtrList<Expression>* elements, ArrayLiteral* expr) {
+ const ZonePtrList<Expression>* elements, ArrayLiteral* expr) {
RegisterAllocationScope register_scope(this);
Register index = register_allocator()->NewRegister();
Register array = register_allocator()->NewRegister();
@@ -2519,9 +2658,20 @@ void BytecodeGenerator::BuildCreateArrayLiteral(
for (; current != end; ++current) {
Expression* subexpr = *current;
if (subexpr->IsSpread()) {
+ RegisterAllocationScope scope(this);
+ builder()->SetExpressionAsStatementPosition(
+ subexpr->AsSpread()->expression());
+ VisitForAccumulatorValue(subexpr->AsSpread()->expression());
+ IteratorRecord iterator = BuildGetIteratorRecord(IteratorType::kNormal);
+
+ Register value = register_allocator()->NewRegister();
+ FeedbackSlot next_value_load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot next_done_load_slot = feedback_spec()->AddLoadICSlot();
FeedbackSlot real_index_slot = index_slot.Get();
- BuildArrayLiteralSpread(subexpr->AsSpread(), array, index,
- real_index_slot, element_slot.Get());
+ FeedbackSlot real_element_slot = element_slot.Get();
+ BuildFillArrayWithIterator(iterator, array, index, value,
+ next_value_load_slot, next_done_load_slot,
+ real_index_slot, real_element_slot);
} else if (!subexpr->IsTheHoleLiteral()) {
// literal[index++] = subexpr
VisitForAccumulatorValue(subexpr);
@@ -2712,18 +2862,13 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
DCHECK(IsAsyncFunction(info()->literal()->kind()));
- RegisterList args = register_allocator()->NewRegisterList(2);
- Register promise = args[0];
- Register return_value = args[1];
- builder()->StoreAccumulatorInRegister(return_value);
-
- Variable* var_promise = closure_scope()->promise_var();
- DCHECK_NOT_NULL(var_promise);
- BuildVariableLoad(var_promise, HoleCheckMode::kElided);
+ RegisterList args = register_allocator()->NewRegisterList(3);
builder()
- ->StoreAccumulatorInRegister(promise)
- .CallRuntime(Runtime::kInlineResolvePromise, args)
- .LoadAccumulatorWithRegister(promise);
+ ->MoveRegister(generator_object(), args[0]) // generator
+ .StoreAccumulatorInRegister(args[1]) // value
+ .LoadBoolean(info()->literal()->CanSuspend())
+ .StoreAccumulatorInRegister(args[2]) // can_suspend
+ .CallRuntime(Runtime::kInlineAsyncFunctionResolve, args);
}
BuildReturn(source_position);
@@ -2863,18 +3008,18 @@ void BytecodeGenerator::BuildVariableAssignment(
}
}
-void BytecodeGenerator::BuildLoadNamedProperty(Property* property,
+void BytecodeGenerator::BuildLoadNamedProperty(const Expression* object_expr,
Register object,
const AstRawString* name) {
if (ShouldOptimizeAsOneShot()) {
builder()->LoadNamedPropertyNoFeedback(object, name);
} else {
- FeedbackSlot slot = GetCachedLoadICSlot(property->obj(), name);
+ FeedbackSlot slot = GetCachedLoadICSlot(object_expr, name);
builder()->LoadNamedProperty(object, name, feedback_index(slot));
}
}
-void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
+void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
Register object,
const AstRawString* name) {
Register value;
@@ -2886,7 +3031,7 @@ void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
if (ShouldOptimizeAsOneShot()) {
builder()->StoreNamedPropertyNoFeedback(object, name, language_mode());
} else {
- FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
+ FeedbackSlot slot = GetCachedStoreICSlot(object_expr, name);
builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
}
@@ -2896,35 +3041,69 @@ void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
}
}
-void BytecodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression() ||
- (expr->op() == Token::INIT && expr->target()->IsVariableProxy() &&
- expr->target()->AsVariableProxy()->is_this()));
- Register object, key;
- RegisterList super_property_args;
- const AstRawString* name;
-
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
+ return AssignmentLhsData(NON_PROPERTY, expr, RegisterList(), Register(),
+ Register(), nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NamedProperty(Expression* object_expr,
+ Register object,
+ const AstRawString* name) {
+ return AssignmentLhsData(NAMED_PROPERTY, nullptr, RegisterList(), object,
+ Register(), object_expr, name);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::KeyedProperty(Register object,
+ Register key) {
+ return AssignmentLhsData(KEYED_PROPERTY, nullptr, RegisterList(), object, key,
+ nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
+ RegisterList super_property_args) {
+ return AssignmentLhsData(NAMED_SUPER_PROPERTY, nullptr, super_property_args,
+ Register(), Register(), nullptr, nullptr);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty(
+ RegisterList super_property_args) {
+ return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args,
+ Register(), Register(), nullptr, nullptr);
+}
+
+BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
+ Expression* lhs, AccumulatorPreservingMode accumulator_preserving_mode) {
// Left-hand side can only be a property, a global or a variable slot.
- Property* property = expr->target()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
+ Property* property = lhs->AsProperty();
+ AssignType assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE:
- // Nothing to do to evaluate variable assignment LHS.
- break;
+ case NON_PROPERTY:
+ return AssignmentLhsData::NonProperty(lhs);
case NAMED_PROPERTY: {
- object = VisitForRegisterValue(property->obj());
- name = property->key()->AsLiteral()->AsRawPropertyName();
- break;
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ const AstRawString* name =
+ property->key()->AsLiteral()->AsRawPropertyName();
+ return AssignmentLhsData::NamedProperty(property->obj(), object, name);
}
case KEYED_PROPERTY: {
- object = VisitForRegisterValue(property->obj());
- key = VisitForRegisterValue(property->key());
- break;
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ Register key = VisitForRegisterValue(property->key());
+ return AssignmentLhsData::KeyedProperty(object, key);
}
case NAMED_SUPER_PROPERTY: {
- super_property_args = register_allocator()->NewRegisterList(4);
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ RegisterList super_property_args =
+ register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
@@ -2933,81 +3112,514 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(super_property_args[2]);
- break;
+ return AssignmentLhsData::NamedSuperProperty(super_property_args);
}
case KEYED_SUPER_PROPERTY: {
- super_property_args = register_allocator()->NewRegisterList(4);
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ RegisterList super_property_args =
+ register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
VisitForRegisterValue(super_property->home_object(),
super_property_args[1]);
VisitForRegisterValue(property->key(), super_property_args[2]);
- break;
+ return AssignmentLhsData::KeyedSuperProperty(super_property_args);
}
}
+ UNREACHABLE();
+}
- // Evaluate the value and potentially handle compound assignments by loading
- // the left-hand side value and performing a binary operation.
- if (expr->IsCompoundAssignment()) {
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
- break;
- }
- case NAMED_PROPERTY: {
- BuildLoadNamedProperty(property, object, name);
- break;
- }
- case KEYED_PROPERTY: {
- // Key is already in accumulator at this point due to evaluating the
- // LHS above.
- FeedbackSlot slot = feedback_spec()->AddKeyedLoadICSlot();
- builder()->LoadKeyedProperty(object, feedback_index(slot));
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- builder()->CallRuntime(Runtime::kLoadFromSuper,
- super_property_args.Truncate(3));
- break;
+// Build the iteration finalizer called in the finally block of an iteration
+// protocol execution. This closes the iterator if needed, and suppresses any
+// exception it throws if necessary.
+//
+// In pseudo-code, this builds:
+//
+// if (!done) {
+// let method = iterator.return
+// if (method !== null && method !== undefined) {
+// if (typeof(method) !== "function") throw TypeError
+// try {
+// let return_val = method.call(iterator)
+// if (!%IsObject(return_val)) throw TypeError
+// } catch (e) {
+// if (iteration_continuation != RETHROW)
+// rethrow e
+// }
+// }
+// }
+//
+// For async iterators, iterator.close() becomes await iterator.close().
+void BytecodeGenerator::BuildFinalizeIteration(
+ IteratorRecord iterator, Register done,
+ Register iteration_continuation_token) {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels iterator_is_done(zone());
+
+ // if (!done) {
+ builder()->LoadAccumulatorWithRegister(done).JumpIfTrue(
+ ToBooleanMode::kConvertToBoolean, iterator_is_done.New());
+
+ // method = iterator.return
+ // if (method !== null && method !== undefined) {
+ Register method = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(iterator.object(),
+ ast_string_constants()->return_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .StoreAccumulatorInRegister(method)
+ .JumpIfUndefined(iterator_is_done.New())
+ .JumpIfNull(iterator_is_done.New());
+
+ // if (typeof(method) !== "function") throw TypeError
+ BytecodeLabel if_callable;
+ builder()
+ ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
+ {
+ // throw %NewTypeError(kReturnMethodNotCallable)
+ RegisterAllocationScope register_scope(this);
+ RegisterList new_type_error_args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
+ .StoreAccumulatorInRegister(new_type_error_args[0])
+ .LoadLiteral(ast_string_constants()->empty_string())
+ .StoreAccumulatorInRegister(new_type_error_args[1])
+ .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
+ .Throw();
+ }
+ builder()->Bind(&if_callable);
+
+ {
+ RegisterAllocationScope register_scope(this);
+ BuildTryCatch(
+ // try {
+ // let return_val = method.call(iterator)
+ // if (!%IsObject(return_val)) throw TypeError
+ // }
+ [&]() {
+ RegisterList args(iterator.object());
+ builder()->CallProperty(
+ method, args, feedback_index(feedback_spec()->AddCallICSlot()));
+ if (iterator.type() == IteratorType::kAsync) {
+ BuildAwait();
+ }
+ builder()->JumpIfJSReceiver(iterator_is_done.New());
+ {
+ // Throw this exception inside the try block so that it is
+ // suppressed by the iteration continuation if necessary.
+ RegisterAllocationScope register_scope(this);
+ Register return_result = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(return_result)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject,
+ return_result);
+ }
+ },
+
+ // catch (e) {
+ // if (iteration_continuation != RETHROW)
+ // rethrow e
+ // }
+ [&](Register context) {
+ // Reuse context register to store the exception.
+ Register close_exception = context;
+ builder()->StoreAccumulatorInRegister(close_exception);
+
+ BytecodeLabel suppress_close_exception;
+ builder()
+ ->LoadLiteral(
+ Smi::FromInt(ControlScope::DeferredCommands::kRethrowToken))
+ .CompareReference(iteration_continuation_token)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean,
+ &suppress_close_exception)
+ .LoadAccumulatorWithRegister(close_exception)
+ .ReThrow()
+ .Bind(&suppress_close_exception);
+ },
+ HandlerTable::UNCAUGHT);
+ }
+
+ iterator_is_done.Bind(builder());
+}
+
+// Get the default value of a destructuring target. Will mutate the
+// destructuring target expression if there is a default value.
+//
+// For
+// a = b
+// in
+// let {a = b} = c
+// returns b and mutates the input into a.
+Expression* BytecodeGenerator::GetDestructuringDefaultValue(
+ Expression** target) {
+ Expression* default_value = nullptr;
+ if ((*target)->IsAssignment()) {
+ Assignment* default_init = (*target)->AsAssignment();
+ DCHECK_EQ(default_init->op(), Token::ASSIGN);
+ default_value = default_init->value();
+ *target = default_init->target();
+ DCHECK((*target)->IsValidReferenceExpression() || (*target)->IsPattern());
+ }
+ return default_value;
+}
+
+// Convert a destructuring assignment to an array literal into a sequence of
+// iterator accesses into the value being assigned (in the accumulator).
+//
+// [a().x, ...b] = accumulator
+//
+// becomes
+//
+// iterator = %GetIterator(accumulator)
+// try {
+//
+// // Individual assignments read off the value from iterator.next() This gets
+// // repeated per destructuring element.
+// if (!done) {
+// // Make sure we are considered 'done' if .next(), .done or .value fail.
+// done = true
+// var next_result = iterator.next()
+// var tmp_done = next_result.done
+// if (!tmp_done) {
+// value = next_result.value
+// done = false
+// }
+// }
+// if (done)
+// value = undefined
+// a().x = value
+//
+// // A spread receives the remaining items in the iterator.
+// var array = []
+// var index = 0
+// %FillArrayWithIterator(iterator, array, index, done)
+// done = true
+// b = array
+//
+// } catch(e) {
+// iteration_continuation = RETHROW
+// } finally {
+// %FinalizeIteration(iterator, done, iteration_continuation)
+// }
+void BytecodeGenerator::BuildDestructuringArrayAssignment(
+ ArrayLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ RegisterAllocationScope scope(this);
+
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+
+ // Store the iterator in a dedicated register so that it can be closed on
+ // exit, and the 'done' value in a dedicated register so that it can be
+ // changed and accessed independently of the iteration result.
+ IteratorRecord iterator = BuildGetIteratorRecord(IteratorType::kNormal);
+ Register done = register_allocator()->NewRegister();
+ builder()->LoadFalse();
+ builder()->StoreAccumulatorInRegister(done);
+
+ BuildTryFinally(
+ // Try block.
+ [&]() {
+ Register next_result = register_allocator()->NewRegister();
+ FeedbackSlot next_value_load_slot = feedback_spec()->AddLoadICSlot();
+ FeedbackSlot next_done_load_slot = feedback_spec()->AddLoadICSlot();
+
+ Spread* spread = nullptr;
+ for (Expression* target : *pattern->values()) {
+ if (target->IsSpread()) {
+ spread = target->AsSpread();
+ break;
+ }
+
+ Expression* default_value = GetDestructuringDefaultValue(&target);
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(target);
+ }
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // if (!done) {
+ // // Make sure we are considered done if .next(), .done or .value
+ // // fail.
+ // done = true
+ // var next_result = iterator.next()
+ // var tmp_done = next_result.done
+ // if (!tmp_done) {
+ // value = next_result.value
+ // done = false
+ // }
+ // }
+ // if (done)
+ // value = undefined
+ BytecodeLabels is_done(zone());
+
+ builder()->LoadAccumulatorWithRegister(done);
+ builder()->JumpIfTrue(ToBooleanMode::kConvertToBoolean,
+ is_done.New());
+
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+ BuildIteratorNext(iterator, next_result);
+ builder()
+ ->LoadNamedProperty(next_result,
+ ast_string_constants()->done_string(),
+ feedback_index(next_done_load_slot))
+ .JumpIfTrue(ToBooleanMode::kConvertToBoolean, is_done.New())
+ .LoadNamedProperty(next_result,
+ ast_string_constants()->value_string(),
+ feedback_index(next_value_load_slot))
+ .StoreAccumulatorInRegister(next_result)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(done)
+ .LoadAccumulatorWithRegister(next_result);
+
+ // Only do the assignment if this is not a hole (i.e. 'elided').
+ if (!target->IsTheHoleLiteral()) {
+ // [<pattern> = <init>] = <value>
+ // becomes (roughly)
+ // temp = <value>.next();
+ // <pattern> = temp === undefined ? <init> : temp;
+ BytecodeLabel do_assignment;
+ if (default_value) {
+ builder()->JumpIfNotUndefined(&do_assignment);
+ // Since done == true => temp == undefined, jump directly to using
+ // the default value for that case.
+ is_done.Bind(builder());
+ VisitForAccumulatorValue(default_value);
+ } else {
+ builder()->Jump(&do_assignment);
+ is_done.Bind(builder());
+ builder()->LoadUndefined();
+ }
+ builder()->Bind(&do_assignment);
+
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+ } else {
+ DCHECK_EQ(lhs_data.assign_type(), NON_PROPERTY);
+ is_done.Bind(builder());
+ }
+ }
+
+ if (spread) {
+ RegisterAllocationScope scope(this);
+
+ // A spread is turned into a loop over the remainer of the iterator.
+ Expression* target = spread->expression();
+
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(spread);
+ }
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // var array = [];
+ Register array = register_allocator()->NewRegister();
+ builder()->CreateEmptyArrayLiteral(
+ feedback_index(feedback_spec()->AddLiteralSlot()));
+ builder()->StoreAccumulatorInRegister(array);
+
+ // var index = 0;
+ Register index = register_allocator()->NewRegister();
+ builder()->LoadLiteral(Smi::zero());
+ builder()->StoreAccumulatorInRegister(index);
+
+ // Set done to true, since it's guaranteed to be true by the time the
+ // array fill completes.
+ builder()->LoadTrue().StoreAccumulatorInRegister(done);
+
+ // Fill the array with the iterator.
+ FeedbackSlot element_slot =
+ feedback_spec()->AddStoreInArrayLiteralICSlot();
+ FeedbackSlot index_slot = feedback_spec()->AddBinaryOpICSlot();
+ BuildFillArrayWithIterator(iterator, array, index, next_result,
+ next_value_load_slot, next_done_load_slot,
+ index_slot, element_slot);
+
+ // Assign the array to the LHS.
+ builder()->LoadAccumulatorWithRegister(array);
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+ }
+ },
+ // Finally block.
+ [&](Register iteration_continuation_token) {
+ // Finish the iteration in the finally block.
+ BuildFinalizeIteration(iterator, done, iteration_continuation_token);
+ },
+ HandlerTable::UNCAUGHT);
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
+// Convert a destructuring assignment to an object literal into a sequence of
+// property accesses into the value being assigned (in the accumulator).
+//
+// { y, [x++]: a(), ...b.c } = value
+//
+// becomes
+//
+// var rest_runtime_callargs = new Array(3);
+// rest_runtime_callargs[0] = value;
+//
+// rest_runtime_callargs[1] = value;
+// y = value.y;
+//
+// var temp1 = %ToName(x++);
+// rest_runtime_callargs[2] = temp1;
+// a() = value[temp1];
+//
+// b.c = %CopyDataPropertiesWithExcludedProperties.call(rest_runtime_callargs);
+void BytecodeGenerator::BuildDestructuringObjectAssignment(
+ ObjectLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ RegisterAllocationScope scope(this);
+
+ // if (value === null || value === undefined)
+ // throw new TypeError(kNonCoercible);
+ //
+ // TODO(leszeks): Eliminate check if value is known to be non-null (e.g.
+ // an object literal).
+ BytecodeLabel is_null_or_undefined, not_null_or_undefined;
+ builder()
+ ->JumpIfNull(&is_null_or_undefined)
+ .JumpIfNotUndefined(&not_null_or_undefined);
+
+ {
+ builder()->Bind(&is_null_or_undefined);
+ builder()->SetExpressionPosition(pattern);
+ builder()->CallRuntime(Runtime::kThrowPatternAssignmentNonCoercible);
+ }
+
+ // Store the assignment value in a register.
+ Register value;
+ RegisterList rest_runtime_callargs;
+ if (pattern->has_rest_property()) {
+ rest_runtime_callargs =
+ register_allocator()->NewRegisterList(pattern->properties()->length());
+ value = rest_runtime_callargs[0];
+ } else {
+ value = register_allocator()->NewRegister();
+ }
+ builder()->Bind(&not_null_or_undefined).StoreAccumulatorInRegister(value);
+
+ int i = 0;
+ for (ObjectLiteralProperty* pattern_property : *pattern->properties()) {
+ RegisterAllocationScope scope(this);
+
+ // The key of the pattern becomes the key into the RHS value, and the value
+ // of the pattern becomes the target of the assignment.
+ //
+ // e.g. { a: b } = o becomes b = o.a
+ Expression* pattern_key = pattern_property->key();
+ Expression* target = pattern_property->value();
+ Expression* default_value = GetDestructuringDefaultValue(&target);
+
+ if (!target->IsPattern()) {
+ builder()->SetExpressionAsStatementPosition(target);
+ }
+
+ // Calculate this property's key into the assignment RHS value, additionally
+ // storing the key for rest_runtime_callargs if needed.
+ //
+ // The RHS is accessed using the key either by LoadNamedProperty (if
+ // value_name is valid) or by LoadKeyedProperty (otherwise).
+ const AstRawString* value_name = nullptr;
+ Register value_key;
+
+ if (pattern_property->kind() != ObjectLiteralProperty::Kind::SPREAD) {
+ if (pattern_key->IsPropertyName()) {
+ value_name = pattern_key->AsLiteral()->AsRawPropertyName();
}
- case KEYED_SUPER_PROPERTY: {
- builder()->CallRuntime(Runtime::kLoadKeyedFromSuper,
- super_property_args.Truncate(3));
- break;
+ if (pattern->has_rest_property() || !value_name) {
+ if (pattern->has_rest_property()) {
+ value_key = rest_runtime_callargs[i + 1];
+ } else {
+ value_key = register_allocator()->NewRegister();
+ }
+ if (pattern_property->is_computed_name()) {
+ // { [a()]: b().x } = c
+ // becomes
+ // var tmp = a()
+ // b().x = c[tmp]
+ DCHECK(!pattern_key->IsPropertyName() ||
+ !pattern_key->IsNumberLiteral());
+ VisitForAccumulatorValue(pattern_key);
+ builder()->ToName(value_key);
+ } else {
+ // We only need the key for non-computed properties when it is numeric
+ // or is being saved for the rest_runtime_callargs.
+ DCHECK(
+ pattern_key->IsNumberLiteral() ||
+ (pattern->has_rest_property() && pattern_key->IsPropertyName()));
+ VisitForRegisterValue(pattern_key, value_key);
+ }
}
}
- BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
- FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
- if (expr->value()->IsSmiLiteral()) {
- builder()->BinaryOperationSmiLiteral(
- binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
- feedback_index(slot));
+
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(target);
+
+ // Get the value from the RHS.
+ if (pattern_property->kind() == ObjectLiteralProperty::Kind::SPREAD) {
+ DCHECK_EQ(i, pattern->properties()->length() - 1);
+ DCHECK(!value_key.is_valid());
+ DCHECK_NULL(value_name);
+ builder()->CallRuntime(Runtime::kCopyDataPropertiesWithExcludedProperties,
+ rest_runtime_callargs);
+ } else if (value_name) {
+ builder()->LoadNamedProperty(
+ value, value_name, feedback_index(feedback_spec()->AddLoadICSlot()));
} else {
- Register old_value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(old_value);
- VisitForAccumulatorValue(expr->value());
- builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
+ DCHECK(value_key.is_valid());
+ builder()->LoadAccumulatorWithRegister(value_key).LoadKeyedProperty(
+ value, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
}
- } else {
- VisitForAccumulatorValue(expr->value());
+
+ // {<pattern> = <init>} = <value>
+ // becomes
+ // temp = <value>;
+ // <pattern> = temp === undefined ? <init> : temp;
+ if (default_value) {
+ BytecodeLabel value_not_undefined;
+ builder()->JumpIfNotUndefined(&value_not_undefined);
+ VisitForAccumulatorValue(default_value);
+ builder()->Bind(&value_not_undefined);
+ }
+
+ BuildAssignment(lhs_data, op, lookup_hoisting_mode);
+
+ i++;
}
- // Store the value.
- builder()->SetExpressionPosition(expr);
- switch (assign_type) {
- case VARIABLE: {
- // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
- // Is the value in the accumulator safe? Yes, but scary.
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- BuildVariableAssignment(proxy->var(), expr->op(),
- proxy->hole_check_mode(),
- expr->lookup_hoisting_mode());
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
+void BytecodeGenerator::BuildAssignment(
+ const AssignmentLhsData& lhs_data, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode) {
+ // Assign the value to the LHS.
+ switch (lhs_data.assign_type()) {
+ case NON_PROPERTY: {
+ if (ObjectLiteral* pattern = lhs_data.expr()->AsObjectLiteral()) {
+ // Split object literals into destructuring.
+ BuildDestructuringObjectAssignment(pattern, op, lookup_hoisting_mode);
+ } else if (ArrayLiteral* pattern = lhs_data.expr()->AsArrayLiteral()) {
+ // Split object literals into destructuring.
+ BuildDestructuringArrayAssignment(pattern, op, lookup_hoisting_mode);
+ } else {
+ DCHECK(lhs_data.expr()->IsVariableProxy());
+ VariableProxy* proxy = lhs_data.expr()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), op, proxy->hole_check_mode(),
+ lookup_hoisting_mode);
+ }
break;
}
case NAMED_PROPERTY: {
- BuildStoreNamedProperty(property, object, name);
+ BuildStoreNamedProperty(lhs_data.object_expr(), lhs_data.object(),
+ lhs_data.name());
break;
}
case KEYED_PROPERTY: {
@@ -3017,8 +3629,8 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
}
- builder()->StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreKeyedProperty(lhs_data.object(), lhs_data.key(),
+ feedback_index(slot), language_mode());
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -3026,34 +3638,91 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
case NAMED_SUPER_PROPERTY: {
builder()
- ->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
+ ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
+ .CallRuntime(StoreToSuperRuntimeId(), lhs_data.super_property_args());
break;
}
case KEYED_SUPER_PROPERTY: {
builder()
- ->StoreAccumulatorInRegister(super_property_args[3])
- .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
+ ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3])
+ .CallRuntime(StoreKeyedToSuperRuntimeId(),
+ lhs_data.super_property_args());
break;
}
}
}
+void BytecodeGenerator::VisitAssignment(Assignment* expr) {
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(expr->target());
+
+ VisitForAccumulatorValue(expr->value());
+
+ builder()->SetExpressionPosition(expr);
+ BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
+}
+
void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
- VisitAssignment(expr);
+ AssignmentLhsData lhs_data = PrepareAssignmentLhs(expr->target());
+
+ // Evaluate the value and potentially handle compound assignments by loading
+ // the left-hand side value and performing a binary operation.
+ switch (lhs_data.assign_type()) {
+ case NON_PROPERTY: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ BuildLoadNamedProperty(lhs_data.object_expr(), lhs_data.object(),
+ lhs_data.name());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ FeedbackSlot slot = feedback_spec()->AddKeyedLoadICSlot();
+ builder()
+ ->LoadAccumulatorWithRegister(lhs_data.key())
+ .LoadKeyedProperty(lhs_data.object(), feedback_index(slot));
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ builder()->CallRuntime(Runtime::kLoadFromSuper,
+ lhs_data.super_property_args().Truncate(3));
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ builder()->CallRuntime(Runtime::kLoadKeyedFromSuper,
+ lhs_data.super_property_args().Truncate(3));
+ break;
+ }
+ }
+ BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
+ FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
+ if (expr->value()->IsSmiLiteral()) {
+ builder()->BinaryOperationSmiLiteral(
+ binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
+ feedback_index(slot));
+ } else {
+ Register old_value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(old_value);
+ VisitForAccumulatorValue(expr->value());
+ builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
+ }
+
+ builder()->SetExpressionPosition(expr);
+ BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
}
// Suspends the generator to resume at the next suspend_id, with output stored
// in the accumulator. When the generator is resumed, the sent value is loaded
// in the accumulator.
-void BytecodeGenerator::BuildSuspendPoint(Expression* suspend_expr) {
+void BytecodeGenerator::BuildSuspendPoint(int position) {
const int suspend_id = suspend_count_++;
RegisterList registers = register_allocator()->AllLiveRegisters();
// Save context, registers, and state. This bytecode then returns the value
// in the accumulator.
- builder()->SetExpressionPosition(suspend_expr);
+ builder()->SetExpressionPosition(position);
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
// Upon resume, we continue here.
@@ -3090,12 +3759,12 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
builder()
->StoreAccumulatorInRegister(args[0]) // value
.LoadFalse()
- .StoreAccumulatorInRegister(args[1]) // done
+ .StoreAccumulatorInRegister(args[1]) // done
.CallRuntime(Runtime::kInlineCreateIterResultObject, args);
}
}
- BuildSuspendPoint(expr);
+ BuildSuspendPoint(expr->position());
// At this point, the generator has been resumed, with the received value in
// the accumulator.
@@ -3218,8 +3887,8 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
{
RegisterAllocationScope register_scope(this);
RegisterList iterator_and_input = register_allocator()->NewRegisterList(2);
+ VisitForAccumulatorValue(expr->expression());
IteratorRecord iterator = BuildGetIteratorRecord(
- expr->expression(),
register_allocator()->NewRegister() /* next method */,
iterator_and_input[0], iterator_type);
@@ -3306,7 +3975,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kAsync) {
// Await the result of the method invocation.
- BuildAwait(expr);
+ BuildAwait(expr->position());
}
// Check that output is an object.
@@ -3346,7 +4015,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.CallRuntime(Runtime::kInlineAsyncGeneratorYield, args);
}
- BuildSuspendPoint(expr);
+ BuildSuspendPoint(expr->position());
builder()->StoreAccumulatorInRegister(input);
builder()
->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
@@ -3382,7 +4051,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
builder()->LoadAccumulatorWithRegister(output_value);
}
-void BytecodeGenerator::BuildAwait(Expression* await_expr) {
+void BytecodeGenerator::BuildAwait(int position) {
// Rather than HandlerTable::UNCAUGHT, async functions use
// HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are
// transformed into promise rejections. This is necessary to prevent emitting
@@ -3395,38 +4064,24 @@ void BytecodeGenerator::BuildAwait(Expression* await_expr) {
// Await(operand) and suspend.
RegisterAllocationScope register_scope(this);
- int await_builtin_context_index;
- RegisterList args;
+ Runtime::FunctionId await_intrinsic_id;
if (IsAsyncGeneratorFunction(function_kind())) {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT
- : Context::ASYNC_GENERATOR_AWAIT_CAUGHT;
- args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(generator_object(), args[0])
- .StoreAccumulatorInRegister(args[1]);
+ await_intrinsic_id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncGeneratorAwaitUncaught
+ : Runtime::kInlineAsyncGeneratorAwaitCaught;
} else {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX
- : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX;
- args = register_allocator()->NewRegisterList(3);
- builder()
- ->MoveRegister(generator_object(), args[0])
- .StoreAccumulatorInRegister(args[1]);
-
- // AsyncFunction Await builtins require a 3rd parameter to hold the outer
- // promise.
- Variable* var_promise = closure_scope()->promise_var();
- BuildVariableLoadForAccumulatorValue(var_promise, HoleCheckMode::kElided);
- builder()->StoreAccumulatorInRegister(args[2]);
+ await_intrinsic_id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncFunctionAwaitUncaught
+ : Runtime::kInlineAsyncFunctionAwaitCaught;
}
-
- builder()->CallJSRuntime(await_builtin_context_index, args);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(generator_object(), args[0])
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(await_intrinsic_id, args);
}
- BuildSuspendPoint(await_expr);
+ BuildSuspendPoint(position);
Register input = register_allocator()->NewRegister();
Register resume_mode = register_allocator()->NewRegister();
@@ -3454,7 +4109,7 @@ void BytecodeGenerator::BuildAwait(Expression* await_expr) {
void BytecodeGenerator::VisitAwait(Await* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- BuildAwait(expr);
+ BuildAwait(expr->position());
BuildIncrementBlockCoverageCounterIfEnabled(expr,
SourceRangeKind::kContinuation);
}
@@ -3467,15 +4122,15 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
}
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
- LhsKind property_kind = Property::GetAssignType(property);
+ AssignType property_kind = Property::GetAssignType(property);
switch (property_kind) {
- case VARIABLE:
+ case NON_PROPERTY:
UNREACHABLE();
case NAMED_PROPERTY: {
builder()->SetExpressionPosition(property);
const AstRawString* name =
property->key()->AsLiteral()->AsRawPropertyName();
- BuildLoadNamedProperty(property, obj, name);
+ BuildLoadNamedProperty(property->obj(), obj, name);
break;
}
case KEYED_PROPERTY: {
@@ -3541,7 +4196,7 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
}
void BytecodeGenerator::VisitProperty(Property* expr) {
- LhsKind property_kind = Property::GetAssignType(expr);
+ AssignType property_kind = Property::GetAssignType(expr);
if (property_kind != NAMED_SUPER_PROPERTY &&
property_kind != KEYED_SUPER_PROPERTY) {
Register obj = VisitForRegisterValue(expr->obj());
@@ -3556,7 +4211,7 @@ void BytecodeGenerator::VisitResolvedProperty(ResolvedProperty* expr) {
UNREACHABLE();
}
-void BytecodeGenerator::VisitArguments(ZonePtrList<Expression>* args,
+void BytecodeGenerator::VisitArguments(const ZonePtrList<Expression>* args,
RegisterList* arg_regs) {
// Visit arguments.
for (int i = 0; i < static_cast<int>(args->length()); i++) {
@@ -3733,7 +4388,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
void BytecodeGenerator::VisitCallSuper(Call* expr) {
RegisterAllocationScope register_scope(this);
SuperCallReference* super = expr->expression()->AsSuperCallReference();
- ZonePtrList<Expression>* args = expr->arguments();
+ const ZonePtrList<Expression>* args = expr->arguments();
int first_spread_index = 0;
for (; first_spread_index < args->length(); first_spread_index++) {
@@ -3810,11 +4465,11 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// TODO(gsathya): In the future, we could tag nested arrow functions
// or eval with the correct bit so that we do the load conditionally
// if required.
- if (info()->literal()->requires_instance_fields_initializer() ||
+ if (info()->literal()->requires_instance_members_initializer() ||
!IsDerivedConstructor(info()->literal()->kind())) {
Register instance = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(instance);
- BuildInstanceFieldInitialization(this_function, instance);
+ BuildInstanceMemberInitialization(this_function, instance);
builder()->LoadAccumulatorWithRegister(instance);
}
}
@@ -3918,56 +4573,51 @@ void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
}
-void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
- if (expr->expression()->IsProperty()) {
+void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
+ Expression* expr = unary->expression();
+ if (expr->IsProperty()) {
// Delete of an object property is allowed both in sloppy
// and strict modes.
- Property* property = expr->expression()->AsProperty();
+ Property* property = expr->AsProperty();
Register object = VisitForRegisterValue(property->obj());
VisitForAccumulatorValue(property->key());
builder()->Delete(object, language_mode());
- } else if (expr->expression()->IsVariableProxy()) {
+ } else if (expr->IsVariableProxy() && !expr->AsVariableProxy()->is_this() &&
+ !expr->AsVariableProxy()->is_new_target()) {
// Delete of an unqualified identifier is allowed in sloppy mode but is
- // not allowed in strict mode. Deleting 'this' and 'new.target' is allowed
- // in both modes.
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- DCHECK(is_sloppy(language_mode()) || proxy->is_this() ||
- proxy->is_new_target());
- if (proxy->is_this() || proxy->is_new_target()) {
- builder()->LoadTrue();
- } else {
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::CONTEXT: {
- // Deleting local var/let/const, context variables, and arguments
- // does not have any effect.
- builder()->LoadFalse();
- break;
- }
- case VariableLocation::UNALLOCATED:
- // TODO(adamk): Falling through to the runtime results in correct
- // behavior, but does unnecessary context-walking (since scope
- // analysis has already proven that the variable doesn't exist in
- // any non-global scope). Consider adding a DeleteGlobal bytecode
- // that knows how to deal with ScriptContexts as well as global
- // object properties.
- case VariableLocation::LOOKUP: {
- Register name_reg = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(variable->raw_name())
- .StoreAccumulatorInRegister(name_reg)
- .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
- break;
- }
- default:
- UNREACHABLE();
+ // not allowed in strict mode.
+ DCHECK(is_sloppy(language_mode()));
+ Variable* variable = expr->AsVariableProxy()->var();
+ switch (variable->location()) {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ // Deleting local var/let/const, context variables, and arguments
+ // does not have any effect.
+ builder()->LoadFalse();
+ break;
}
+ case VariableLocation::UNALLOCATED:
+ // TODO(adamk): Falling through to the runtime results in correct
+ // behavior, but does unnecessary context-walking (since scope
+ // analysis has already proven that the variable doesn't exist in
+ // any non-global scope). Consider adding a DeleteGlobal bytecode
+ // that knows how to deal with ScriptContexts as well as global
+ // object properties.
+ case VariableLocation::LOOKUP: {
+ Register name_reg = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(variable->raw_name())
+ .StoreAccumulatorInRegister(name_reg)
+ .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
} else {
- // Delete of an unresolvable reference returns true.
- VisitForEffect(expr->expression());
+ // Delete of an unresolvable reference, new.target, and this returns true.
+ VisitForEffect(expr);
builder()->LoadTrue();
}
}
@@ -3977,7 +4627,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
+ AssignType assign_type = Property::GetAssignType(property);
bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
@@ -3986,7 +4636,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
RegisterList super_property_args;
const AstRawString* name;
switch (assign_type) {
- case VARIABLE: {
+ case NON_PROPERTY: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
BuildVariableLoadForAccumulatorValue(proxy->var(),
proxy->hole_check_mode());
@@ -4054,7 +4704,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value.
builder()->SetExpressionPosition(expr);
switch (assign_type) {
- case VARIABLE: {
+ case NON_PROPERTY: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
BuildVariableAssignment(proxy->var(), expr->op(),
proxy->hole_check_mode());
@@ -4208,7 +4858,7 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
Expression* subexpr;
- Smi* literal;
+ Smi literal;
if (expr->IsSmiLiteralOperation(&subexpr, &literal)) {
TypeHint type_hint = VisitForAccumulatorValue(subexpr);
builder()->SetExpressionPosition(expr);
@@ -4277,14 +4927,11 @@ void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
.CallRuntime(Runtime::kDynamicImportCall, args);
}
-void BytecodeGenerator::BuildGetIterator(Expression* iterable,
- IteratorType hint) {
+void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register method = register_allocator()->NewRegister();
Register obj = args[0];
- VisitForAccumulatorValue(iterable);
-
if (hint == IteratorType::kAsync) {
// Set method to GetMethod(obj, @@asyncIterator)
builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
@@ -4346,9 +4993,9 @@ void BytecodeGenerator::BuildGetIterator(Expression* iterable,
// Returns an IteratorRecord which is valid for the lifetime of the current
// register_allocation_scope.
BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
- Expression* iterable, Register next, Register object, IteratorType hint) {
+ Register next, Register object, IteratorType hint) {
DCHECK(next.is_valid() && object.is_valid());
- BuildGetIterator(iterable, hint);
+ BuildGetIterator(hint);
builder()
->StoreAccumulatorInRegister(object)
@@ -4359,10 +5006,10 @@ BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
}
BytecodeGenerator::IteratorRecord BytecodeGenerator::BuildGetIteratorRecord(
- Expression* iterable, IteratorType hint) {
+ IteratorType hint) {
Register next = register_allocator()->NewRegister();
Register object = register_allocator()->NewRegister();
- return BuildGetIteratorRecord(iterable, next, object, hint);
+ return BuildGetIteratorRecord(next, object, hint);
}
void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
@@ -4371,7 +5018,9 @@ void BytecodeGenerator::BuildIteratorNext(const IteratorRecord& iterator,
builder()->CallProperty(iterator.next(), RegisterList(iterator.object()),
feedback_index(feedback_spec()->AddCallICSlot()));
- // TODO(caitp): support async IteratorNext here.
+ if (iterator.type() == IteratorType::kAsync) {
+ BuildAwait();
+ }
BytecodeLabel is_object;
builder()
@@ -4413,7 +5062,7 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
if (iterator.type() == IteratorType::kAsync) {
DCHECK_NOT_NULL(expr);
- BuildAwait(expr);
+ BuildAwait(expr->position());
}
builder()->JumpIfJSReceiver(done.New());
@@ -4428,11 +5077,6 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
done.Bind(builder());
}
-void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
- builder()->SetExpressionPosition(expr);
- BuildGetIterator(expr->iterable(), expr->hint());
-}
-
void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
builder()->SetExpressionPosition(expr);
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
@@ -4730,10 +5374,6 @@ void BytecodeGenerator::VisitNaryLogicalAndExpression(NaryOperation* expr) {
}
}
-void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
- Visit(expr->expression());
-}
-
void BytecodeGenerator::BuildNewLocalActivationContext() {
ValueResultScope value_execution_result(this);
Scope* scope = closure_scope();
@@ -4888,7 +5528,7 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// to pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructible, so don't
// assign anything to the new.target variable.
- if (info()->literal()->CanSuspend()) return;
+ if (IsResumableFunction(info()->literal()->kind())) return;
if (variable->location() == VariableLocation::LOCAL) {
// The new.target register was already assigned by entry trampoline.
@@ -4908,10 +5548,15 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
Variable* generator_object_var = closure_scope()->generator_object_var();
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(2);
+ Runtime::FunctionId function_id =
+ (IsAsyncFunction(info()->literal()->kind()) &&
+ !IsAsyncGeneratorFunction(info()->literal()->kind()))
+ ? Runtime::kInlineAsyncFunctionEnter
+ : Runtime::kInlineCreateJSGeneratorObject;
builder()
->MoveRegister(Register::function_closure(), args[0])
.MoveRegister(builder()->Receiver(), args[1])
- .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args)
+ .CallRuntime(function_id, args)
.StoreAccumulatorInRegister(generator_object());
if (generator_object_var->location() == VariableLocation::LOCAL) {
@@ -4934,7 +5579,9 @@ void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
void BytecodeGenerator::BuildLoadPropertyKey(LiteralProperty* property,
Register out_reg) {
if (property->key()->IsStringLiteral()) {
- VisitForRegisterValue(property->key(), out_reg);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawString())
+ .StoreAccumulatorInRegister(out_reg);
} else {
VisitForAccumulatorValue(property->key());
builder()->ToName(out_reg);
@@ -5107,7 +5754,7 @@ LanguageMode BytecodeGenerator::language_mode() const {
}
Register BytecodeGenerator::generator_object() const {
- DCHECK(info()->literal()->CanSuspend());
+ DCHECK(IsResumableFunction(info()->literal()->kind()));
return incoming_new_target_or_generator_;
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 3150245b0b..a5c573f7ff 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
explicit BytecodeGenerator(
UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
@@ -44,7 +44,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visiting function for declarations list and statements are overridden.
void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZonePtrList<Statement>* statments);
+ void VisitStatements(const ZonePtrList<Statement>* statments);
private:
class ContextScope;
@@ -62,6 +62,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class IteratorRecord;
class NaryCodeCoverageSlots;
class RegisterAllocationScope;
+ class AccumulatorPreservingScope;
class TestResultScope;
class ValueResultScope;
@@ -69,6 +70,80 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TestFallthrough { kThen, kElse, kNone };
enum class TypeHint { kAny, kBoolean, kString };
+ enum class AccumulatorPreservingMode { kNone, kPreserve };
+
+ // An assignment has to evaluate its LHS before its RHS, but has to assign to
+ // the LHS after both evaluations are done. This class stores the data
+ // computed in the LHS evaulation that has to live across the RHS evaluation,
+ // and is used in the actual LHS assignment.
+ class AssignmentLhsData {
+ public:
+ static AssignmentLhsData NonProperty(Expression* expr);
+ static AssignmentLhsData NamedProperty(Expression* object_expr,
+ Register object,
+ const AstRawString* name);
+ static AssignmentLhsData KeyedProperty(Register object, Register key);
+ static AssignmentLhsData NamedSuperProperty(
+ RegisterList super_property_args);
+ static AssignmentLhsData KeyedSuperProperty(
+ RegisterList super_property_args);
+
+ AssignType assign_type() const { return assign_type_; }
+ Expression* expr() const {
+ DCHECK_EQ(assign_type_, NON_PROPERTY);
+ return expr_;
+ }
+ Expression* object_expr() const {
+ DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ return object_expr_;
+ }
+ Register object() const {
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
+ return object_;
+ }
+ Register key() const {
+ DCHECK_EQ(assign_type_, KEYED_PROPERTY);
+ return key_;
+ }
+ const AstRawString* name() const {
+ DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ return name_;
+ }
+ RegisterList super_property_args() const {
+ DCHECK(assign_type_ == NAMED_SUPER_PROPERTY ||
+ assign_type_ == KEYED_SUPER_PROPERTY);
+ return super_property_args_;
+ }
+
+ private:
+ AssignmentLhsData(AssignType assign_type, Expression* expr,
+ RegisterList super_property_args, Register object,
+ Register key, Expression* object_expr,
+ const AstRawString* name)
+ : assign_type_(assign_type),
+ expr_(expr),
+ super_property_args_(super_property_args),
+ object_(object),
+ key_(key),
+ object_expr_(object_expr),
+ name_(name) {}
+
+ AssignType assign_type_;
+
+ // Different assignment types use different fields:
+ //
+ // NON_PROPERTY: expr
+ // NAMED_PROPERTY: object_expr, object, name
+ // KEYED_PROPERTY: object, key
+ // NAMED_SUPER_PROPERTY: super_property_args
+ // KEYED_SUPER_PROPERT: super_property_args
+ Expression* expr_;
+ RegisterList super_property_args_;
+ Register object_;
+ Register key_;
+ Expression* object_expr_;
+ const AstRawString* name_;
+ };
void GenerateBytecodeBody();
void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
@@ -101,7 +176,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visit the arguments expressions in |args| and store them in |args_regs|,
// growing |args_regs| for each argument visited.
- void VisitArguments(ZonePtrList<Expression>* args, RegisterList* arg_regs);
+ void VisitArguments(const ZonePtrList<Expression>* args,
+ RegisterList* arg_regs);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -121,9 +197,23 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitPropertyLoadForRegister(Register obj, Property* expr,
Register destination);
- void BuildLoadNamedProperty(Property* property, Register object,
+ AssignmentLhsData PrepareAssignmentLhs(
+ Expression* lhs, AccumulatorPreservingMode accumulator_preserving_mode =
+ AccumulatorPreservingMode::kNone);
+ void BuildAssignment(const AssignmentLhsData& data, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+
+ Expression* GetDestructuringDefaultValue(Expression** target);
+ void BuildDestructuringArrayAssignment(
+ ArrayLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+ void BuildDestructuringObjectAssignment(
+ ObjectLiteral* pattern, Token::Value op,
+ LookupHoistingMode lookup_hoisting_mode);
+
+ void BuildLoadNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
- void BuildStoreNamedProperty(Property* property, Register object,
+ void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
@@ -155,23 +245,25 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalWithContext(Scope* scope);
void BuildGeneratorPrologue();
- void BuildSuspendPoint(Expression* suspend_expr);
+ void BuildSuspendPoint(int position);
+ void BuildAwait(int position = kNoSourcePosition);
void BuildAwait(Expression* await_expr);
- void BuildGetIterator(Expression* iterable, IteratorType hint);
+ void BuildFinalizeIteration(IteratorRecord iterator, Register done,
+ Register iteration_continuation_token);
+
+ void BuildGetIterator(IteratorType hint);
// Create an IteratorRecord with pre-allocated registers holding the next
// method and iterator object.
- IteratorRecord BuildGetIteratorRecord(Expression* iterable,
- Register iterator_next,
+ IteratorRecord BuildGetIteratorRecord(Register iterator_next,
Register iterator_object,
IteratorType hint);
// Create an IteratorRecord allocating new registers to hold the next method
// and iterator object.
- IteratorRecord BuildGetIteratorRecord(Expression* iterable,
- IteratorType hint);
+ IteratorRecord BuildGetIteratorRecord(IteratorType hint);
void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
void BuildIteratorClose(const IteratorRecord& iterator,
Expression* expr = nullptr);
@@ -180,24 +272,28 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeLabel* if_called,
BytecodeLabels* if_notcalled);
- void BuildArrayLiteralSpread(Spread* spread, Register array, Register index,
- FeedbackSlot index_slot,
- FeedbackSlot element_slot);
+ void BuildFillArrayWithIterator(IteratorRecord iterator, Register array,
+ Register index, Register value,
+ FeedbackSlot next_value_slot,
+ FeedbackSlot next_done_slot,
+ FeedbackSlot index_slot,
+ FeedbackSlot element_slot);
// Create Array literals. |expr| can be nullptr, but if provided,
// a boilerplate will be used to create an initial array for elements
// before the first spread.
- void BuildCreateArrayLiteral(ZonePtrList<Expression>* elements,
+ void BuildCreateArrayLiteral(const ZonePtrList<Expression>* elements,
ArrayLiteral* expr);
void BuildCreateObjectLiteral(Register literal, uint8_t flags, size_t entry);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void BuildClassLiteral(ClassLiteral* expr);
+ void BuildClassLiteral(ClassLiteral* expr, Register name);
+ void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
void VisitThisFunctionVariable(Variable* variable);
- void BuildInstanceFieldInitialization(Register constructor,
- Register instance);
+ void BuildInstanceMemberInitialization(Register constructor,
+ Register instance);
void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitSetHomeObject(Register value, Register home_object,
@@ -248,6 +344,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildTest(ToBooleanMode mode, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
+ template <typename TryBodyFunc, typename CatchBodyFunc>
+ void BuildTryCatch(TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryCatchStatement* stmt_for_coverage = nullptr);
+ template <typename TryBodyFunc, typename FinallyBodyFunc>
+ void BuildTryFinally(TryBodyFunc try_body_func,
+ FinallyBodyFunc finally_body_func,
+ HandlerTable::CatchPrediction catch_prediction,
+ TryFinallyStatement* stmt_for_coverage = nullptr);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect. Some visitors return a TypeHint which
// specifies the type of the result of the visited expression.
@@ -351,7 +457,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Scope* current_scope_;
// External vector of literals to be eagerly compiled.
- ZoneVector<FunctionLiteral*>* eager_inner_literals_;
+ std::vector<FunctionLiteral*>* eager_inner_literals_;
FeedbackSlotCache* feedback_slot_cache_;
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index d79bf9a9d0..56f6297016 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -11,27 +11,27 @@ namespace interpreter {
static const int kLastParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kLastParamFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
- kPointerSize;
+ kSystemPointerSize;
static const int kCurrentContextRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kContextOffset) /
- kPointerSize;
+ kSystemPointerSize;
static const int kBytecodeArrayRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kBytecodeArrayFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kBytecodeOffsetRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kBytecodeOffsetFromFp) /
- kPointerSize;
+ kSystemPointerSize;
static const int kCallerPCOffsetRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kCallerPCOffsetFromFp) /
- kPointerSize;
+ kSystemPointerSize;
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index b5420f7e72..ae8bbe4275 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -91,7 +91,7 @@ class V8_EXPORT_PRIVATE Register final {
static const int kInvalidIndex = kMaxInt;
static const int kRegisterFileStartOffset =
- InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
+ InterpreterFrameConstants::kRegisterFileFromFp / kSystemPointerSize;
int index_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 39f61eb9bd..7efcd1ae62 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -255,8 +255,8 @@ namespace interpreter {
OperandType::kIdx, OperandType::kFlag8) \
V(CreateArrayFromIterable, AccumulatorUse::kReadWrite) \
V(CreateEmptyArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
+ V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
V(CloneObject, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kFlag8, OperandType::kIdx) \
@@ -687,6 +687,15 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kInvokeIntrinsic;
}
+ // Returns true if the bytecode is an one-shot bytecode. One-shot bytecodes
+ // don`t collect feedback and are intended for code that runs only once and
+ // shouldn`t be optimized.
+ static constexpr bool IsOneShotBytecode(Bytecode bytecode) {
+ return bytecode == Bytecode::kCallNoFeedback ||
+ bytecode == Bytecode::kLdaNamedPropertyNoFeedback ||
+ bytecode == Bytecode::kStaNamedPropertyNoFeedback;
+ }
+
// Returns true if the bytecode is a scaling prefix bytecode.
static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 47bb955374..d77960f7a1 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -66,12 +66,12 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
#if DEBUG
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
Isolate* isolate) const {
- std::set<Smi*> smis;
+ std::set<Smi> smis;
std::set<double> heap_numbers;
std::set<const AstRawString*> strings;
std::set<const char*> bigints;
std::set<const Scope*> scopes;
- std::set<Object*> deferred_objects;
+ std::set<Object, Object::Comparer> deferred_objects;
for (const Entry& entry : constants_) {
bool duplicate = false;
switch (entry.tag_) {
@@ -207,7 +207,7 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
return fixed_array;
}
-size_t ConstantArrayBuilder::Insert(Smi* smi) {
+size_t ConstantArrayBuilder::Insert(Smi smi) {
auto entry = smi_map_.find(smi);
if (entry == smi_map_.end()) {
return AllocateReservedEntry(smi);
@@ -312,7 +312,7 @@ void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
return slice->At(index).SetDeferred(object);
}
-void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi* smi) {
+void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi smi) {
ConstantArraySlice* slice = IndexToSlice(index);
// Allow others to reuse these Smis, but insert using emplace to avoid
// overwriting existing values in the Smi map (which may have a smaller
@@ -332,14 +332,14 @@ OperandSize ConstantArrayBuilder::CreateReservedEntry() {
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
- Smi* value) {
+ Smi value) {
index_t index = static_cast<index_t>(AllocateIndex(Entry(value)));
smi_map_[value] = index;
return index;
}
size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
- Smi* value) {
+ Smi value) {
DiscardReservedEntry(operand_size);
size_t index;
auto entry = smi_map_.find(value);
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index f06983abfa..d736996d5e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects/smi.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -48,7 +49,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
static const size_t k32BitCapacity =
kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
- ConstantArrayBuilder(Zone* zone);
+ explicit ConstantArrayBuilder(Zone* zone);
// Generate a fixed array of constant handles based on inserted objects.
Handle<FixedArray> ToFixedArray(Isolate* isolate);
@@ -63,7 +64,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Insert an object into the constants array if it is not already present.
// Returns the array index associated with the object.
- size_t Insert(Smi* smi);
+ size_t Insert(Smi smi);
size_t Insert(double number);
size_t Insert(const AstRawString* raw_string);
size_t Insert(AstBigInt bigint);
@@ -87,7 +88,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Sets the jump table entry at |index| to |smi|. Note that |index| is the
// constant pool index, not the switch case value.
- void SetJumpTableSmi(size_t index, Smi* smi);
+ void SetJumpTableSmi(size_t index, Smi smi);
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
@@ -96,7 +97,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
// Commit reserved entry and returns the constant pool index for the
// SMI value.
- size_t CommitReservedEntry(OperandSize operand_size, Smi* value);
+ size_t CommitReservedEntry(OperandSize operand_size, Smi value);
// Discards constant pool reservation.
void DiscardReservedEntry(OperandSize operand_size);
@@ -111,7 +112,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
enum class Tag : uint8_t;
public:
- explicit Entry(Smi* smi) : smi_(smi), tag_(Tag::kSmi) {}
+ explicit Entry(Smi smi) : smi_(smi), tag_(Tag::kSmi) {}
explicit Entry(double heap_number)
: heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
explicit Entry(const AstRawString* raw_string)
@@ -143,7 +144,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
handle_ = handle;
}
- void SetJumpTableSmi(Smi* smi) {
+ void SetJumpTableSmi(Smi smi) {
DCHECK_EQ(tag_, Tag::kUninitializedJumpTableSmi);
tag_ = Tag::kJumpTableSmi;
smi_ = smi;
@@ -156,7 +157,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
union {
Handle<Object> handle_;
- Smi* smi_;
+ Smi smi_;
double heap_number_;
const AstRawString* raw_string_;
AstBigInt bigint_;
@@ -186,7 +187,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
index_t AllocateIndex(Entry constant_entry);
index_t AllocateIndexArray(Entry constant_entry, size_t size);
- index_t AllocateReservedEntry(Smi* value);
+ index_t AllocateReservedEntry(Smi value);
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
@@ -227,8 +228,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
base::KeyEqualityMatcher<intptr_t>,
ZoneAllocationPolicy>
constants_map_;
- ZoneMap<Smi*, index_t> smi_map_;
- ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ ZoneMap<Smi, index_t> smi_map_;
+ ZoneVector<std::pair<Smi, index_t>> smi_pairs_;
ZoneMap<double, index_t> heap_number_map_;
#define SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) int LOWER_NAME##_;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index cc8dfb1a30..dadfaa8783 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -13,7 +13,6 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/machine-type.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/zone/zone.h"
@@ -233,7 +232,7 @@ Node* InterpreterAssembler::RegisterLocation(Register reg) {
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return TimesPointerSize(index);
+ return TimesSystemPointerSize(index);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
@@ -243,12 +242,12 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::LoadRegister(Register reg) {
return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+ IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
- return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
- << kPointerSizeLog2);
+ return LoadAndUntagSmi(GetInterpretedFramePointer(),
+ reg.ToOperand() << kSystemPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
@@ -299,7 +298,7 @@ Node* InterpreterAssembler::RegisterLocationInRegisterList(
void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
+ IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2), value);
}
void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -309,7 +308,7 @@ void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
}
void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
- int offset = reg.ToOperand() << kPointerSizeLog2;
+ int offset = reg.ToOperand() << kSystemPointerSizeLog2;
StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
}
@@ -674,6 +673,11 @@ TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
return CodeStubAssembler::LoadFeedbackVector(function);
}
+Node* InterpreterAssembler::LoadFeedbackVectorUnchecked() {
+ TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
+ return CodeStubAssembler::LoadFeedbackVectorUnchecked(function);
+}
+
void InterpreterAssembler::CallPrologue() {
if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
// Bytecodes that make a call along the critical path save the bytecode
@@ -706,7 +710,7 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Node* slot_id) {
Comment("increment call count");
TNode<Smi> call_count =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize));
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
// The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
// count are used as flags. To increment the call count by 1 we hence
// have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
@@ -714,7 +718,7 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
- SKIP_WRITE_BARRIER, kPointerSize);
+ SKIP_WRITE_BARRIER, kTaggedSize);
}
void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
@@ -803,7 +807,7 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -818,13 +822,22 @@ void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
}
void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
- Node* feedback_vector,
+ Node* maybe_feedback_vector,
Node* slot_id) {
+ Label feedback_done(this);
+ // If feedback_vector is not valid, then nothing to do.
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
+
+ CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
+
// Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
+ IncrementCallCount(maybe_feedback_vector, slot_id);
// Collect the callable {target} feedback.
- CollectCallableFeedback(target, context, feedback_vector, slot_id);
+ CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
+ Goto(&feedback_done);
+
+ BIND(&feedback_done);
}
void InterpreterAssembler::CallJSAndDispatch(
@@ -898,10 +911,10 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
- Node* feedback_vector) {
+ Node* maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
Comment("call using CallWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), ConvertReceiverMode::kAny,
@@ -926,6 +939,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
VARIABLE(var_site, MachineRepresentation::kTagged);
Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
construct(this), construct_array(this, &var_site);
+ GotoIf(IsUndefined(feedback_vector), &construct);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1054,7 +1068,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1106,6 +1120,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// constructor _and_ spread the last argument at the same time.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Label extra_checks(this, Label::kDeferred), construct(this);
+ GotoIf(IsUndefined(feedback_vector), &construct);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1195,7 +1210,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
Comment("transition to megamorphic");
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
StoreFeedbackVectorSlot(
feedback_vector, slot_id,
HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
@@ -1235,8 +1250,9 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Load(MachineType::Pointer(), function,
IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStubR(callable.descriptor(), result_size, code_target, context,
- args.reg_count(), args.base_reg_location(), function_entry);
+ return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
+ result_size, code_target, context, args.reg_count(),
+ args.base_reg_location(), function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
@@ -1403,7 +1419,7 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesPointerSize(target_bytecode));
+ TimesSystemPointerSize(target_bytecode));
return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
target_bytecode);
@@ -1460,7 +1476,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesPointerSize(target_index));
+ TimesSystemPointerSize(target_index));
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
next_bytecode);
@@ -1547,8 +1563,8 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* source_bytecode_table_index = IntPtrConstant(
static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
- Node* counter_offset =
- TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
+ Node* counter_offset = TimesSystemPointerSize(
+ IntPtrAdd(source_bytecode_table_index, target_bytecode));
Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset);
@@ -1771,24 +1787,14 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
// Record the type feedback collected for {object}.
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
+
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
Dispatch();
}
-void InterpreterAssembler::DeserializeLazyAndDispatch() {
- Node* context = GetContext();
- Node* bytecode_offset = BytecodeOffset();
- Node* bytecode = LoadBytecode(bytecode_offset);
-
- Node* target_handler =
- CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
- SmiTag(bytecode), SmiConstant(operand_scale()));
- DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 036e920837..20922bc8b4 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -147,6 +147,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Load the FeedbackVector for the current function.
compiler::TNode<FeedbackVector> LoadFeedbackVector();
+ // Load the FeedbackVector for the current function. The returned node
+ // could be undefined.
+ compiler::Node* LoadFeedbackVectorUnchecked();
+
// Increment the call count for a CALL_IC or construct call.
// The call count is located at feedback_vector[slot_id + 1].
void IncrementCallCount(compiler::Node* feedback_vector,
@@ -162,7 +166,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// |feedback_vector| at |slot_id|, and the call counts in
// the |feedback_vector| at |slot_id+1|.
void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* feedback_vector,
+ compiler::Node* maybe_feedback_vector,
compiler::Node* slot_id);
// Call JSFunction or Callable |function| with |args| arguments, possibly
@@ -270,9 +274,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
- // Lazily deserializes the current bytecode's handler and tail-calls into it.
- void DeserializeLazyAndDispatch();
-
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index d2dab6d8d8..0ac2146731 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -15,13 +15,17 @@
#include "src/debug/debug.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
+#include "src/ic/ic.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects-inl.h"
+#include "src/objects/cell.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
+#include "src/objects/oddball.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -156,7 +160,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
- TNode<FeedbackVector> feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
@@ -172,9 +176,20 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
return CAST(name);
};
- accessor_asm.LoadGlobalIC(feedback_vector, feedback_slot, lazy_context,
- lazy_name, typeof_mode, &exit_point,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Label miss(this, Label::kDeferred);
+ ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ GotoIf(IsUndefined(maybe_feedback_vector), &miss);
+ accessor_asm.LoadGlobalIC(CAST(maybe_feedback_vector), feedback_slot,
+ lazy_context, lazy_name, typeof_mode, &exit_point,
+ slot_mode);
+
+ BIND(&miss);
+ {
+ exit_point.ReturnCallRuntime(
+ Runtime::kLoadGlobalIC_Miss, lazy_context(), lazy_name(),
+ ParameterToTagged(feedback_slot, slot_mode), maybe_feedback_vector,
+ SmiConstant(typeof_mode));
+ }
}
};
@@ -212,9 +227,23 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+
CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, smi_slot,
- feedback_vector);
+ maybe_vector);
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name,
+ language_mode);
+ Goto(&end);
+
+ Bind(&end);
Dispatch();
}
@@ -490,7 +519,7 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* feedback_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(feedback_slot);
@@ -539,11 +568,26 @@ IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Node* name = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector);
- SetAccumulator(result);
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+ var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
+ smi_slot, feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ Comment("KeyedLoadIC_no_feedback");
+ var_result.Bind(CallRuntime(Runtime::kKeyedLoadIC_Miss, context, object,
+ name, smi_slot, feedback_vector));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -554,23 +598,39 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- void StaNamedProperty(Callable ic) {
+ void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
Node* code_target = HeapConstant(ic.code());
Node* object = LoadRegisterAtOperandIndex(0);
Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, smi_slot, feedback_vector);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+ var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
+ name, value, smi_slot, maybe_vector));
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure =
+ CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ var_result.Bind(CallRuntime(Runtime::kStoreICNoFeedback_Miss, context,
+ value, object, name, language_mode,
+ SmiConstant(property_type)));
+ Goto(&end);
+
+ Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
};
@@ -582,7 +642,7 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
// accumulator.
IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
- StaNamedProperty(ic);
+ StaNamedProperty(ic, NamedPropertyType::kNotOwn);
}
// StaNamedOwnProperty <object> <name_index> <slot>
@@ -592,7 +652,7 @@ IGNITION_HANDLER(StaNamedProperty, InterpreterStoreNamedPropertyAssembler) {
// accumulator.
IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate());
- StaNamedProperty(ic);
+ StaNamedProperty(ic, NamedPropertyType::kOwn);
}
// StaNamedPropertyNoFeedback <object> <name_index>
@@ -623,16 +683,31 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, feedback_vector);
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(maybe_vector), &no_feedback);
+
+ var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
+ value, smi_slot, maybe_vector));
+ Goto(&end);
+
+ Bind(&no_feedback);
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+ Node* language_mode = GetLanguageMode(closure, context);
+ var_result.Bind(CallRuntime(Runtime::kKeyedStoreICNoFeedback_Miss, context,
+ value, object, name, language_mode));
+ Goto(&end);
+
+ Bind(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -646,16 +721,29 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
+ var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
+ index, value, smi_slot, feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ var_result.Bind(CallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, context,
+ value, smi_slot, feedback_vector, array, index));
+ Goto(&end);
+
+ BIND(&end);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(result);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -674,7 +762,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* vector_index = SmiTag(BytecodeOperandIdx(3));
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
@@ -812,11 +900,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* rhs = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, false);
+ maybe_feedback_vector, false);
SetAccumulator(result);
Dispatch();
}
@@ -826,11 +914,11 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* rhs = BytecodeOperandImmSmi(0);
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
BinaryOpAssembler binop_asm(state());
Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- feedback_vector, true);
+ maybe_feedback_vector, true);
SetAccumulator(result);
Dispatch();
}
@@ -933,7 +1021,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* right = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
@@ -959,7 +1047,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
BinaryOperationFeedback::kNumber);
TNode<Smi> input_feedback =
SmiOr(var_left_feedback.value(), var_right_feedback.value());
- UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector,
+ UpdateFeedback(SmiOr(result_type, input_feedback), maybe_feedback_vector,
slot_index);
SetAccumulator(result);
Dispatch();
@@ -974,7 +1062,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
CallRuntime(Runtime::kBigIntBinaryOp, context, var_left_bigint.value(),
var_right_bigint.value(), SmiConstant(bitwise_op)));
UpdateFeedback(SmiOr(var_left_feedback.value(), var_right_feedback.value()),
- feedback_vector, slot_index);
+ maybe_feedback_vector, slot_index);
Dispatch();
}
@@ -982,7 +1070,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* left = GetAccumulator();
Node* right = BytecodeOperandImmSmi(0);
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
TVARIABLE(Smi, var_left_feedback);
@@ -1000,12 +1088,13 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
UpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
- feedback_vector, slot_index);
+ maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
BIND(&if_bigint_mix);
- UpdateFeedback(var_left_feedback.value(), feedback_vector, slot_index);
+ UpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
+ slot_index);
ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
};
@@ -1088,7 +1177,7 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
Node* operand = GetAccumulator();
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
VARIABLE(var_word32, MachineRepresentation::kWord32);
@@ -1105,15 +1194,15 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_feedback.value()), feedback_vector,
- slot_index);
+ UpdateFeedback(SmiOr(result_type, var_feedback.value()),
+ maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
// BigInt case.
BIND(&if_bigint);
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt), feedback_vector,
- slot_index);
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
+ maybe_feedback_vector, slot_index);
SetAccumulator(CallRuntime(Runtime::kBigIntUnaryOp, context,
var_bigint.value(),
SmiConstant(Operation::kBitwiseNot)));
@@ -1166,7 +1255,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
void UnaryOpWithFeedback() {
VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator());
Node* slot_index = BytecodeOperandIdx(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
VARIABLE(var_result, MachineRepresentation::kTagged);
VARIABLE(var_float_value, MachineRepresentation::kFloat64);
@@ -1245,7 +1334,7 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
}
BIND(&end);
- UpdateFeedback(var_feedback.value(), feedback_vector, slot_index);
+ UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1522,11 +1611,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* function = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
// Call the function and dispatch to the next handler.
CallJSAndDispatch(function, context, args, receiver_mode);
@@ -1555,11 +1644,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* function = LoadRegisterAtOperandIndex(0);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, feedback_vector, slot_id);
+ CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
switch (kRecieverAndArgOperandCount) {
case 0:
@@ -1710,12 +1799,12 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
Node* callable = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
- feedback_vector);
+ maybe_feedback_vector);
}
// ConstructWithSpread <first_arg> <arg_count>
@@ -1729,7 +1818,7 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
Node* constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
Node* result = ConstructWithSpread(constructor, context, new_target, args,
slot_id, feedback_vector);
@@ -1748,7 +1837,7 @@ IGNITION_HANDLER(Construct, InterpreterAssembler) {
Node* constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
Node* result = Construct(constructor, context, new_target, args, slot_id,
feedback_vector);
@@ -1788,8 +1877,9 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
}
Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1872,12 +1962,17 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
Node* object = LoadRegisterAtOperandIndex(0);
Node* callable = GetAccumulator();
Node* slot_id = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
+ Label feedback_done(this);
+ GotoIf(IsUndefined(feedback_vector), &feedback_done);
+
// Record feedback for the {callable} in the {feedback_vector}.
CollectCallableFeedback(callable, context, feedback_vector, slot_id);
+ Goto(&feedback_done);
+ BIND(&feedback_done);
// Perform the actual instanceof operation.
SetAccumulator(InstanceOf(object, callable, context));
Dispatch();
@@ -2362,14 +2457,29 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* context = GetContext();
+
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Label no_feedback(this, Label::kDeferred), end(this);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context);
- SetAccumulator(result);
+ result.Bind(constructor_assembler.EmitCreateRegExpLiteral(
+ feedback_vector, slot_id, pattern, flags, context));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ feedback_vector, SmiTag(slot_id), pattern, flags));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(result.value());
Dispatch();
}
@@ -2378,12 +2488,15 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// Creates an array literal for literal index <literal_idx> with
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* context = GetContext();
Node* bytecode_flags = BytecodeOperandFlag(2);
Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
+ // No feedback, so handle it as a slow case.
+ GotoIf(IsUndefined(feedback_vector), &call_runtime);
+
Branch(IsSetWord32<CreateArrayLiteralFlags::FastCloneSupportedBit>(
bytecode_flags),
&fast_shallow_clone, &call_runtime);
@@ -2416,13 +2529,31 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
//
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(0);
Node* context = GetContext();
+
+ Label no_feedback(this, Label::kDeferred), end(this);
+ VARIABLE(result, MachineRepresentation::kTagged);
+ GotoIf(IsUndefined(feedback_vector), &no_feedback);
+
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateEmptyArrayLiteral(
- feedback_vector, slot_id, context);
- SetAccumulator(result);
+ result.Bind(constructor_assembler.EmitCreateEmptyArrayLiteral(
+ feedback_vector, slot_id, context));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ {
+ TNode<Map> array_map = LoadJSArrayElementsMap(GetInitialFastElementsKind(),
+ LoadNativeContext(context));
+ result.Bind(AllocateJSArray(GetInitialFastElementsKind(), array_map,
+ SmiConstant(0), SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ SetAccumulator(result.value());
Dispatch();
}
@@ -2443,12 +2574,15 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// Creates an object literal for literal index <literal_idx> with
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot_id = BytecodeOperandIdx(1);
Node* bytecode_flags = BytecodeOperandFlag(2);
- // Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
+ // No feedback, so handle it as a slow case.
+ GotoIf(IsUndefined(feedback_vector), &if_not_fast_clone);
+
+ // Check if we can do a fast clone or have to call the runtime.
Branch(IsSetWord32<CreateObjectLiteralFlags::FastCloneSupportedBit>(
bytecode_flags),
&if_fast_clone, &if_not_fast_clone);
@@ -2459,7 +2593,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
feedback_vector, slot_id, &if_not_fast_clone);
- StoreRegisterAtOperandIndex(result, 3);
+ SetAccumulator(result);
Dispatch();
}
@@ -2477,7 +2611,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* result =
CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
SmiTag(slot_id), object_boilerplate_description, flags);
- StoreRegisterAtOperandIndex(result, 3);
+ SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
}
@@ -2506,11 +2640,23 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
Node* smi_flags = SmiTag(raw_flags);
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
Node* context = GetContext();
- Node* result = CallBuiltin(Builtins::kCloneObjectIC, context, source,
- smi_flags, smi_slot, feedback_vector);
- SetAccumulator(result);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label no_feedback(this), end(this);
+ GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
+ var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
+ smi_flags, smi_slot, maybe_feedback_vector));
+ Goto(&end);
+
+ BIND(&no_feedback);
+ var_result.Bind(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source,
+ smi_flags, smi_slot, maybe_feedback_vector));
+ Goto(&end);
+
+ BIND(&end);
+ SetAccumulator(var_result.value());
Dispatch();
}
@@ -2520,12 +2666,15 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- Node* feedback_vector = LoadFeedbackVector();
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
Node* slot = BytecodeOperandIdx(1);
+
+ Label call_runtime(this, Label::kDeferred);
+ GotoIf(IsUndefined(feedback_vector), &call_runtime);
+
TNode<Object> cached_value =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
- Label call_runtime(this, Label::kDeferred);
GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
SetAccumulator(cached_value);
@@ -2537,7 +2686,13 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
Node* context = GetContext();
Node* result =
CallRuntime(Runtime::kCreateTemplateObject, context, description);
+
+ Label end(this);
+ GotoIf(IsUndefined(feedback_vector), &end);
StoreFeedbackVectorSlot(feedback_vector, slot, result);
+ Goto(&end);
+
+ Bind(&end);
SetAccumulator(result);
Dispatch();
}
@@ -2552,18 +2707,30 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
Node* flags = BytecodeOperandFlag(2);
Node* context = GetContext();
Node* slot = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
- TNode<Object> feedback_cell =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
+ Label if_undefined(this), load_feedback_done(this);
+ Variable feedback_cell(this, MachineRepresentation::kTagged);
+ Node* feedback_vector = LoadFeedbackVectorUnchecked();
+
+ GotoIf(IsUndefined(feedback_vector), &if_undefined);
+ feedback_cell.Bind(LoadFeedbackVectorSlot(feedback_vector, slot));
+ Goto(&load_feedback_done);
+
+ BIND(&if_undefined);
+ {
+ feedback_cell.Bind(LoadRoot(RootIndex::kNoFeedbackCell));
+ Goto(&load_feedback_done);
+ }
+
+ BIND(&load_feedback_done);
Label if_fast(this), if_slow(this, Label::kDeferred);
Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
&if_slow);
BIND(&if_fast);
{
- Node* result =
- CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
+ Node* result = CallBuiltin(Builtins::kFastNewClosure, context, shared,
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2576,8 +2743,8 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_newspace);
{
- Node* result =
- CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
+ Node* result = CallRuntime(Runtime::kNewClosure, context, shared,
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2585,7 +2752,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_oldspace);
{
Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
- feedback_cell);
+ feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
@@ -2754,6 +2921,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
CallRuntime(Runtime::kThrow, context, exception);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
// ReThrow
@@ -2765,6 +2933,7 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
// Abort <abort_reason>
@@ -2801,6 +2970,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2819,6 +2989,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2838,6 +3009,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
}
}
@@ -2922,7 +3094,7 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* enumerator = GetAccumulator();
Node* vector_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
// The {enumerator} is either a Map or a FixedArray.
CSA_ASSERT(this, TaggedIsNotSmi(enumerator));
@@ -2948,7 +3120,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* feedback = SelectSmiConstant(
IntPtrLessThanOrEqual(enum_length, enum_indices_length),
ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
- UpdateFeedback(feedback, feedback_vector, vector_index);
+ UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
// Construct the cache info triple.
Node* cache_type = enumerator;
@@ -2964,7 +3136,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
CSA_ASSERT(this, IsFixedArray(enumerator));
// Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
vector_index);
// Construct the cache info triple.
@@ -2986,7 +3158,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Node* cache_array;
std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
Node* vector_index = BytecodeOperandIdx(3);
- Node* feedback_vector = LoadFeedbackVector();
+ Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
// Load the next key from the enumeration array.
Node* key = LoadFixedArrayElement(CAST(cache_array), index, 0,
@@ -3005,7 +3177,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
BIND(&if_slow);
{
// Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), feedback_vector,
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
vector_index);
// Need to filter the {key} for the {receiver}.
@@ -3071,6 +3243,7 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// An invalid bytecode aborting execution if dispatched.
IGNITION_HANDLER(Illegal, InterpreterAssembler) {
Abort(AbortReason::kInvalidBytecode);
+ Unreachable();
}
// SuspendGenerator <generator> <first input register> <register count>
@@ -3194,7 +3367,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
: PoisoningMitigationLevel::kDontPoison,
- 0, builtin_index);
+ builtin_index);
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
@@ -3220,75 +3393,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
return code;
}
-namespace {
-
-// DeserializeLazy
-//
-// Deserialize the bytecode handler, store it in the dispatch table, and
-// finally jump there (preserving existing args).
-// We manually create a custom assembler instead of using the helper macros
-// above since no corresponding bytecode exists.
-class DeserializeLazyAssembler : public InterpreterAssembler {
- public:
- static const Bytecode kFakeBytecode = Bytecode::kIllegal;
-
- explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
- OperandScale operand_scale)
- : InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
-
- static void Generate(compiler::CodeAssemblerState* state,
- OperandScale operand_scale) {
- DeserializeLazyAssembler assembler(state, operand_scale);
- state->SetInitialDebugInformation("DeserializeLazy", __FILE__, __LINE__);
- assembler.GenerateImpl();
- }
-
- private:
- void GenerateImpl() { DeserializeLazyAndDispatch(); }
-
- DISALLOW_COPY_AND_ASSIGN(DeserializeLazyAssembler);
-};
-
-} // namespace
-
-Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
- OperandScale operand_scale,
- int builtin_index,
- const AssemblerOptions& options) {
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- std::string debug_name = std::string("DeserializeLazy");
- if (operand_scale > OperandScale::kSingle) {
- Bytecode prefix_bytecode =
- Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
- debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
- }
-
- compiler::CodeAssemblerState state(
- isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
- debug_name.c_str(),
- FLAG_untrusted_code_mitigations
- ? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
- 0, builtin_index);
-
- DeserializeLazyAssembler::Generate(&state, operand_scale);
- Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
- PROFILE(isolate,
- CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(*code), debug_name.c_str()));
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_trace_ignition_codegen) {
- StdoutStream os;
- code->Disassemble(debug_name.c_str(), os);
- os << std::flush;
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 3e261bea9f..cbb41a7af0 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -183,7 +183,8 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall(
stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
stub_args[index++] = context;
- return __ CallStubN(callable.descriptor(), 1, input_count, stub_args);
+ return __ CallStubN(StubCallMode::kCallCodeObject, callable.descriptor(), 1,
+ input_count, stub_args);
}
Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
@@ -206,20 +207,6 @@ Node* IntrinsicsGenerator::HasProperty(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::RejectPromise(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kRejectPromise));
-}
-
-Node* IntrinsicsGenerator::ResolvePromise(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kResolvePromise));
-}
-
Node* IntrinsicsGenerator::ToString(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
@@ -352,6 +339,45 @@ Node* IntrinsicsGenerator::GetImportMetaObject(
return return_value.value();
}
+Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionEnter(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionReject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionResolve(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitUncaught);
+}
+
Node* IntrinsicsGenerator::AsyncGeneratorReject(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 608b0afcac..c89f0c01c7 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -13,26 +13,31 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(AsyncGeneratorReject, async_generator_reject, 2) \
- V(AsyncGeneratorResolve, async_generator_resolve, 3) \
- V(AsyncGeneratorYield, async_generator_yield, 3) \
- V(CreateJSGeneratorObject, create_js_generator_object, 2) \
- V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
- V(GeneratorClose, generator_close, 1) \
- V(GetImportMetaObject, get_import_meta_object, 0) \
- V(Call, call, -1) \
- V(CreateIterResultObject, create_iter_result_object, 2) \
- V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(RejectPromise, reject_promise, 3) \
- V(ResolvePromise, resolve_promise, 2) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
+#define INTRINSICS_LIST(V) \
+ V(AsyncFunctionAwaitCaught, async_function_await_caught, 2) \
+ V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 2) \
+ V(AsyncFunctionEnter, async_function_enter, 2) \
+ V(AsyncFunctionReject, async_function_reject, 3) \
+ V(AsyncFunctionResolve, async_function_resolve, 3) \
+ V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \
+ V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \
+ V(AsyncGeneratorReject, async_generator_reject, 2) \
+ V(AsyncGeneratorResolve, async_generator_resolve, 3) \
+ V(AsyncGeneratorYield, async_generator_yield, 3) \
+ V(CreateJSGeneratorObject, create_js_generator_object, 2) \
+ V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
+ V(GeneratorClose, generator_close, 1) \
+ V(GetImportMetaObject, get_import_meta_object, 0) \
+ V(Call, call, -1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
V(ToObject, to_object, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index ca53fa674c..4298003ce2 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -14,9 +14,10 @@
#include "src/counters-inl.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
-#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots.h"
+#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
@@ -29,9 +30,10 @@ namespace interpreter {
class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
public:
- InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ InterpreterCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ std::vector<FunctionLiteral*>* eager_inner_literals);
protected:
Status ExecuteJobImpl() final;
@@ -48,7 +50,9 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
-Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+Interpreter::Interpreter(Isolate* isolate)
+ : isolate_(isolate),
+ interpreter_entry_trampoline_instruction_start_(kNullAddress) {
memset(dispatch_table_, 0, sizeof(dispatch_table_));
if (FLAG_trace_ignition_dispatches) {
@@ -73,31 +77,15 @@ int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
} // namespace
-Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
- Bytecode bytecode, OperandScale operand_scale) {
+Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
int builtin_index = BuiltinIndexFromBytecode(bytecode, operand_scale);
Builtins* builtins = isolate_->builtins();
- Code* code = builtins->builtin(builtin_index);
-
- // Already deserialized? Then just return the handler.
- if (!Builtins::IsLazyDeserializer(code)) return code;
-
- DCHECK(FLAG_lazy_deserialization);
- DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
- code = Snapshot::DeserializeBuiltin(isolate_, builtin_index);
-
- DCHECK(code->IsCode());
- DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
- DCHECK(!Builtins::IsLazyDeserializer(code));
-
- SetBytecodeHandler(bytecode, operand_scale, code);
-
- return code;
+ return builtins->builtin(builtin_index);
}
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale,
- Code* handler) {
+ OperandScale operand_scale, Code handler) {
DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
dispatch_table_[index] = handler->InstructionStart();
@@ -113,19 +101,36 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
+ if (FLAG_embedded_builtins && !isolate_->serializer_enabled() &&
+ isolate_->embedded_blob() != nullptr) {
+// If builtins are embedded (and we're not generating a snapshot), then
+// every bytecode handler will be off-heap, so there's no point iterating
+// over them.
+#ifdef DEBUG
+ for (int i = 0; i < kDispatchTableSize; i++) {
+ Address code_entry = dispatch_table_[i];
+ CHECK(code_entry == kNullAddress ||
+ InstructionStream::PcIsOffHeap(isolate_, code_entry));
+ }
+#endif // ENABLE_SLOW_DCHECKS
+ return;
+ }
+
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
-
- // If the handler is embedded, it is immovable.
+ // Skip over off-heap bytecode handlers since they will never move.
if (InstructionStream::PcIsOffHeap(isolate_, code_entry)) continue;
- Object* code = code_entry == kNullAddress
- ? nullptr
- : Code::GetCodeFromTargetAddress(code_entry);
- Object* old_code = code;
- v->VisitRootPointer(Root::kDispatchTable, nullptr, &code);
+ // TODO(jkummerow): Would it hurt to simply do:
+ // if (code_entry == kNullAddress) continue;
+ Code code;
+ if (code_entry != kNullAddress) {
+ code = Code::GetCodeFromTargetAddress(code_entry);
+ }
+ Code old_code = code;
+ v->VisitRootPointer(Root::kDispatchTable, nullptr, FullObjectSlot(&code));
if (code != old_code) {
- dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
+ dispatch_table_[i] = code->entry();
}
}
}
@@ -168,7 +173,7 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals)
+ std::vector<FunctionLiteral*>* eager_inner_literals)
: UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
&compilation_info_),
zone_(allocator, ZONE_NAME),
@@ -228,7 +233,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals) {
+ std::vector<FunctionLiteral*>* eager_inner_literals) {
return new InterpreterCompilationJob(parse_info, literal, allocator,
eager_inner_literals);
}
@@ -248,13 +253,23 @@ void Interpreter::ForEachBytecode(
}
}
-void Interpreter::InitializeDispatchTable() {
+void Interpreter::Initialize() {
Builtins* builtins = isolate_->builtins();
- Code* illegal = builtins->builtin(Builtins::kIllegalHandler);
+
+ // Set the interpreter entry trampoline entry point now that builtins are
+ // initialized.
+ Handle<Code> code = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
+ DCHECK(builtins->is_initialized());
+ DCHECK(code->is_off_heap_trampoline() ||
+ isolate_->heap()->IsImmovable(*code));
+ interpreter_entry_trampoline_instruction_start_ = code->InstructionStart();
+
+ // Initialize the dispatch table.
+ Code illegal = builtins->builtin(Builtins::kIllegalHandler);
int builtin_id = Builtins::kFirstBytecodeHandler;
ForEachBytecode([=, &builtin_id](Bytecode bytecode,
OperandScale operand_scale) {
- Code* handler = illegal;
+ Code handler = illegal;
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
std::string builtin_name(Builtins::name(builtin_id));
@@ -268,22 +283,13 @@ void Interpreter::InitializeDispatchTable() {
});
DCHECK(builtin_id == Builtins::builtin_count);
DCHECK(IsDispatchTableInitialized());
-
-#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
- if (!isolate_->serializer_enabled() && FLAG_perf_prof_unwinding_info) {
- StdoutStream{}
- << "Warning: The --perf-prof-unwinding-info flag can be passed at "
- "mksnapshot time to get better results."
- << std::endl;
- }
-#endif
}
bool Interpreter::IsDispatchTableInitialized() const {
return dispatch_table_[0] != kNullAddress;
}
-const char* Interpreter::LookupNameOfBytecodeHandler(const Code* code) {
+const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) {
#ifdef ENABLE_DISASSEMBLER
#define RETURN_NAME(Name, ...) \
if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 5023b0ef00..c0dece5aae 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -19,7 +19,6 @@ namespace v8 {
namespace internal {
class Isolate;
-class BuiltinDeserializerAllocator;
class Callable;
class UnoptimizedCompilationJob;
class FunctionLiteral;
@@ -47,28 +46,27 @@ class Interpreter {
static UnoptimizedCompilationJob* NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- ZoneVector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
- Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
- OperandScale operand_scale);
+ Code GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// Set the bytecode handler for |bytecode| and |operand_scale|.
void SetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale,
- Code* handler);
+ Code handler);
// GC support.
void IterateDispatchTable(RootVisitor* v);
// Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
- const char* LookupNameOfBytecodeHandler(const Code* code);
+ const char* LookupNameOfBytecodeHandler(const Code code);
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
- void InitializeDispatchTable();
+ void Initialize();
bool IsDispatchTableInitialized() const;
@@ -80,10 +78,14 @@ class Interpreter {
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
+ Address address_of_interpreter_entry_trampoline_instruction_start() const {
+ return reinterpret_cast<Address>(
+ &interpreter_entry_trampoline_instruction_start_);
+ }
+
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
- friend class v8::internal::BuiltinDeserializerAllocator;
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
@@ -98,6 +100,7 @@ class Interpreter {
Isolate* isolate_;
Address dispatch_table_[kDispatchTableSize];
std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
+ Address interpreter_entry_trampoline_instruction_start_;
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
diff --git a/deps/v8/src/intl.cc b/deps/v8/src/intl.cc
deleted file mode 100644
index c8548b6d48..0000000000
--- a/deps/v8/src/intl.cc
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTL_SUPPORT
-#error Internationalization is expected to be enabled.
-#endif // V8_INTL_SUPPORT
-
-#include "src/intl.h"
-
-#include <memory>
-
-#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/string-case.h"
-#include "unicode/basictz.h"
-#include "unicode/calendar.h"
-#include "unicode/gregocal.h"
-#include "unicode/timezone.h"
-#include "unicode/ustring.h"
-#include "unicode/uvernum.h"
-#include "unicode/uversion.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
-
-const uint8_t kToLower[256] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
- 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
- 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
- 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
- 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
- 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
- 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
- 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
- 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
- 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
- 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
- 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
- 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
- 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
- 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
- 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
- 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
- 0xFC, 0xFD, 0xFE, 0xFF,
-};
-
-inline uint16_t ToLatin1Lower(uint16_t ch) {
- return static_cast<uint16_t>(kToLower[ch]);
-}
-
-inline uint16_t ToASCIIUpper(uint16_t ch) {
- return ch & ~((ch >= 'a' && ch <= 'z') << 5);
-}
-
-// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
-inline uint16_t ToLatin1Upper(uint16_t ch) {
- DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
- return ch &
- ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
- << 5);
-}
-
-template <typename Char>
-bool ToUpperFastASCII(const Vector<const Char>& src,
- Handle<SeqOneByteString> result) {
- // Do a faster loop for the case where all the characters are ASCII.
- uint16_t ored = 0;
- int32_t index = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- ored |= ch;
- result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
- }
- return !(ored & ~0x7F);
-}
-
-const uint16_t sharp_s = 0xDF;
-
-template <typename Char>
-bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
- int* sharp_s_count) {
- // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
-
- // There are two special cases.
- // 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
- // 2. Lower case sharp-S converts to "SS" (two characters)
- *sharp_s_count = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- if (V8_UNLIKELY(ch == sharp_s)) {
- ++(*sharp_s_count);
- continue;
- }
- if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
- // Since this upper-cased character does not fit in an 8-bit string, we
- // need to take the 16-bit path.
- return false;
- }
- *dest++ = ToLatin1Upper(ch);
- }
-
- return true;
-}
-
-template <typename Char>
-void ToUpperWithSharpS(const Vector<const Char>& src,
- Handle<SeqOneByteString> result) {
- int32_t dest_index = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- if (ch == sharp_s) {
- result->SeqOneByteStringSet(dest_index++, 'S');
- result->SeqOneByteStringSet(dest_index++, 'S');
- } else {
- result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
- }
- }
-}
-
-inline int FindFirstUpperOrNonAscii(String* s, int length) {
- for (int index = 0; index < length; ++index) {
- uint16_t ch = s->Get(index);
- if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
- return index;
- }
- }
- return length;
-}
-
-} // namespace
-
-const uint8_t* ToLatin1LowerTable() { return &kToLower[0]; }
-
-const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
- std::unique_ptr<uc16[]>* dest,
- int32_t length) {
- DCHECK(flat.IsFlat());
- if (flat.IsOneByte()) {
- if (!*dest) {
- dest->reset(NewArray<uc16>(length));
- CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
- }
- return reinterpret_cast<const UChar*>(dest->get());
- } else {
- return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
- }
-}
-
-MaybeHandle<String> LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang) {
- auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
- int32_t src_length = s->length();
- int32_t dest_length = src_length;
- UErrorCode status;
- Handle<SeqTwoByteString> result;
- std::unique_ptr<uc16[]> sap;
-
- if (dest_length == 0) return ReadOnlyRoots(isolate).empty_string_handle();
-
- // This is not a real loop. It'll be executed only once (no overflow) or
- // twice (overflow).
- for (int i = 0; i < 2; ++i) {
- // Case conversion can increase the string length (e.g. sharp-S => SS) so
- // that we have to handle RangeError exceptions here.
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(dest_length),
- String);
- DisallowHeapAllocation no_gc;
- DCHECK(s->IsFlat());
- String::FlatContent flat = s->GetFlatContent();
- const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
- status = U_ZERO_ERROR;
- dest_length = case_converter(reinterpret_cast<UChar*>(result->GetChars()),
- dest_length, src, src_length, lang, &status);
- if (status != U_BUFFER_OVERFLOW_ERROR) break;
- }
-
- // In most cases, the output will fill the destination buffer completely
- // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
- // Only in rare cases, it'll be shorter than the destination buffer and
- // |result| has to be truncated.
- DCHECK(U_SUCCESS(status));
- if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
- DCHECK(dest_length == result->length());
- return result;
- }
- DCHECK(dest_length < result->length());
- return SeqString::Truncate(result, dest_length);
-}
-
-// A stripped-down version of ConvertToLower that can only handle flat one-byte
-// strings and does not allocate. Note that {src} could still be, e.g., a
-// one-byte sliced string with a two-byte parent string.
-// Called from TF builtins.
-V8_WARN_UNUSED_RESULT String* ConvertOneByteToLower(String* src, String* dst) {
- DCHECK_EQ(src->length(), dst->length());
- DCHECK(src->HasOnlyOneByteChars());
- DCHECK(src->IsFlat());
- DCHECK(dst->IsSeqOneByteString());
-
- DisallowHeapAllocation no_gc;
-
- const int length = src->length();
- String::FlatContent src_flat = src->GetFlatContent();
- uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars();
-
- if (src_flat.IsOneByte()) {
- const uint8_t* src_data = src_flat.ToOneByteVector().start();
-
- bool has_changed_character = false;
- int index_to_first_unprocessed =
- FastAsciiConvert<true>(reinterpret_cast<char*>(dst_data),
- reinterpret_cast<const char*>(src_data), length,
- &has_changed_character);
-
- if (index_to_first_unprocessed == length) {
- return has_changed_character ? dst : src;
- }
-
- // If not ASCII, we keep the result up to index_to_first_unprocessed and
- // process the rest.
- for (int index = index_to_first_unprocessed; index < length; ++index) {
- dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
- }
- } else {
- DCHECK(src_flat.IsTwoByte());
- int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length);
- if (index_to_first_unprocessed == length) return src;
-
- const uint16_t* src_data = src_flat.ToUC16Vector().start();
- CopyChars(dst_data, src_data, index_to_first_unprocessed);
- for (int index = index_to_first_unprocessed; index < length; ++index) {
- dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
- }
- }
-
- return dst;
-}
-
-MaybeHandle<String> ConvertToLower(Handle<String> s, Isolate* isolate) {
- if (!s->HasOnlyOneByteChars()) {
- // Use a slower implementation for strings with characters beyond U+00FF.
- return LocaleConvertCase(s, isolate, false, "");
- }
-
- int length = s->length();
-
- // We depend here on the invariant that the length of a Latin1
- // string is invariant under ToLowerCase, and the result always
- // fits in the Latin1 range in the *root locale*. It does not hold
- // for ToUpperCase even in the root locale.
-
- // Scan the string for uppercase and non-ASCII characters for strings
- // shorter than a machine-word without any memory allocation overhead.
- // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
- // to two parts, one for scanning the prefix with no change and the other for
- // handling ASCII-only characters.
-
- bool is_short = length < static_cast<int>(sizeof(uintptr_t));
- if (is_short) {
- bool is_lower_ascii = FindFirstUpperOrNonAscii(*s, length) == length;
- if (is_lower_ascii) return s;
- }
-
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-
- return Handle<String>(ConvertOneByteToLower(*s, *result), isolate);
-}
-
-MaybeHandle<String> ConvertToUpper(Handle<String> s, Isolate* isolate) {
- int32_t length = s->length();
- if (s->HasOnlyOneByteChars() && length > 0) {
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-
- DCHECK(s->IsFlat());
- int sharp_s_count;
- bool is_result_single_byte;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- uint8_t* dest = result->GetChars();
- if (flat.IsOneByte()) {
- Vector<const uint8_t> src = flat.ToOneByteVector();
- bool has_changed_character = false;
- int index_to_first_unprocessed =
- FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<const char*>(src.start()),
- length, &has_changed_character);
- if (index_to_first_unprocessed == length) {
- return has_changed_character ? result : s;
- }
- // If not ASCII, we keep the result up to index_to_first_unprocessed and
- // process the rest.
- is_result_single_byte =
- ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
- dest + index_to_first_unprocessed, &sharp_s_count);
- } else {
- DCHECK(flat.IsTwoByte());
- Vector<const uint16_t> src = flat.ToUC16Vector();
- if (ToUpperFastASCII(src, result)) return result;
- is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
- }
- }
-
- // Go to the full Unicode path if there are characters whose uppercase
- // is beyond the Latin-1 range (cannot be represented in OneByteString).
- if (V8_UNLIKELY(!is_result_single_byte)) {
- return LocaleConvertCase(s, isolate, true, "");
- }
-
- if (sharp_s_count == 0) return result;
-
- // We have sharp_s_count sharp-s characters, but the result is still
- // in the Latin-1 range.
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewRawOneByteString(length + sharp_s_count),
- String);
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- if (flat.IsOneByte()) {
- ToUpperWithSharpS(flat.ToOneByteVector(), result);
- } else {
- ToUpperWithSharpS(flat.ToUC16Vector(), result);
- }
-
- return result;
- }
-
- return LocaleConvertCase(s, isolate, true, "");
-}
-
-MaybeHandle<String> ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate) {
- return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
-}
-
-ICUTimezoneCache::ICUTimezoneCache() : timezone_(nullptr) { Clear(); }
-
-ICUTimezoneCache::~ICUTimezoneCache() { Clear(); }
-
-const char* ICUTimezoneCache::LocalTimezone(double time_ms) {
- bool is_dst = DaylightSavingsOffset(time_ms) != 0;
- std::string* name = is_dst ? &dst_timezone_name_ : &timezone_name_;
- if (name->empty()) {
- icu::UnicodeString result;
- GetTimeZone()->getDisplayName(is_dst, icu::TimeZone::LONG, result);
- result += '\0';
-
- icu::StringByteSink<std::string> byte_sink(name);
- result.toUTF8(byte_sink);
- }
- DCHECK(!name->empty());
- return name->c_str();
-}
-
-icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
- if (timezone_ == nullptr) {
- timezone_ = icu::TimeZone::createDefault();
- }
- return timezone_;
-}
-
-bool ICUTimezoneCache::GetOffsets(double time_ms, bool is_utc,
- int32_t* raw_offset, int32_t* dst_offset) {
- UErrorCode status = U_ZERO_ERROR;
- // TODO(jshin): ICU TimeZone class handles skipped time differently from
- // Ecma 262 (https://github.com/tc39/ecma262/pull/778) and icu::TimeZone
- // class does not expose the necessary API. Fixing
- // http://bugs.icu-project.org/trac/ticket/13268 would make it easy to
- // implement the proposed spec change. A proposed fix for ICU is
- // https://chromium-review.googlesource.com/851265 .
- // In the meantime, use an internal (still public) API of icu::BasicTimeZone.
- // Once it's accepted by the upstream, get rid of cast. Note that casting
- // TimeZone to BasicTimeZone is safe because we know that icu::TimeZone used
- // here is a BasicTimeZone.
- if (is_utc) {
- GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
- } else {
- static_cast<const icu::BasicTimeZone*>(GetTimeZone())
- ->getOffsetFromLocal(time_ms, icu::BasicTimeZone::kFormer,
- icu::BasicTimeZone::kFormer, *raw_offset,
- *dst_offset, status);
- }
-
- return U_SUCCESS(status);
-}
-
-double ICUTimezoneCache::DaylightSavingsOffset(double time_ms) {
- int32_t raw_offset, dst_offset;
- if (!GetOffsets(time_ms, true, &raw_offset, &dst_offset)) return 0;
- return dst_offset;
-}
-
-double ICUTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
- int32_t raw_offset, dst_offset;
- if (!GetOffsets(time_ms, is_utc, &raw_offset, &dst_offset)) return 0;
- return raw_offset + dst_offset;
-}
-
-void ICUTimezoneCache::Clear() {
- delete timezone_;
- timezone_ = nullptr;
- timezone_name_.clear();
- dst_timezone_name_.clear();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
deleted file mode 100644
index a2b393bdaa..0000000000
--- a/deps/v8/src/intl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTL_SUPPORT
-#error Internationalization is expected to be enabled.
-#endif // V8_INTL_SUPPORT
-
-#ifndef V8_INTL_H_
-#define V8_INTL_H_
-
-#include <string>
-
-#include "src/base/timezone-cache.h"
-#include "src/objects.h"
-#include "src/objects/string.h"
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class TimeZone;
-}
-
-namespace v8 {
-namespace internal {
-
-enum class ICUService {
- kBreakIterator,
- kCollator,
- kDateFormat,
- kNumberFormat,
- kPluralRules,
- kRelativeDateTimeFormatter,
- kListFormatter,
- kSegmenter
-};
-
-const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
- std::unique_ptr<uc16[]>* dest,
- int32_t length);
-MaybeHandle<String> LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang);
-MaybeHandle<String> ConvertToLower(Handle<String> s, Isolate* isolate);
-MaybeHandle<String> ConvertToUpper(Handle<String> s, Isolate* isolate);
-MaybeHandle<String> ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate);
-
-V8_WARN_UNUSED_RESULT String* ConvertOneByteToLower(String* src, String* dst);
-
-const uint8_t* ToLatin1LowerTable();
-
-// ICUTimezoneCache calls out to ICU for TimezoneCache
-// functionality in a straightforward way.
-class ICUTimezoneCache : public base::TimezoneCache {
- public:
- ICUTimezoneCache();
-
- ~ICUTimezoneCache() override;
-
- const char* LocalTimezone(double time_ms) override;
-
- double DaylightSavingsOffset(double time_ms) override;
-
- double LocalTimeOffset(double time_ms, bool is_utc) override;
-
- void Clear() override;
-
- private:
- icu::TimeZone* GetTimeZone();
-
- bool GetOffsets(double time_ms, bool is_utc, int32_t* raw_offset,
- int32_t* dst_offset);
-
- icu::TimeZone* timezone_;
-
- std::string timezone_name_;
- std::string dst_timezone_name_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTL_H_
diff --git a/deps/v8/src/isolate-allocator.cc b/deps/v8/src/isolate-allocator.cc
new file mode 100644
index 0000000000..939fc1e5fc
--- /dev/null
+++ b/deps/v8/src/isolate-allocator.cc
@@ -0,0 +1,160 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/isolate-allocator.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/isolate.h"
+#include "src/ptr-compr.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
+#if V8_TARGET_ARCH_64_BIT
+ if (mode == IsolateAllocationMode::kInV8Heap) {
+ Address heap_base = InitReservation();
+ CommitPagesForIsolate(heap_base);
+ return;
+ }
+#endif // V8_TARGET_ARCH_64_BIT
+
+ // Allocate Isolate in C++ heap.
+ CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
+ page_allocator_ = GetPlatformPageAllocator();
+ isolate_memory_ = ::operator new(sizeof(Isolate));
+ DCHECK(!reservation_.IsReserved());
+}
+
+IsolateAllocator::~IsolateAllocator() {
+ if (reservation_.IsReserved()) {
+ // The actual memory will be freed when the |reservation_| will die.
+ return;
+ }
+
+ // The memory was allocated in C++ heap.
+ ::operator delete(isolate_memory_);
+}
+
+#if V8_TARGET_ARCH_64_BIT
+Address IsolateAllocator::InitReservation() {
+ v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+
+ // Reserve a 4Gb region so that the middle is 4Gb aligned.
+ // The VirtualMemory API does not support such an constraint so we have to
+ // implement it manually here.
+ size_t reservation_size = kPtrComprHeapReservationSize;
+ size_t base_alignment = kPtrComprIsolateRootAlignment;
+
+ const int kMaxAttempts = 3;
+ for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
+ Address hint = RoundDown(reinterpret_cast<Address>(
+ platform_page_allocator->GetRandomMmapAddr()),
+ base_alignment) +
+ kPtrComprIsolateRootBias;
+
+ // Within this reservation there will be a sub-region with proper alignment.
+ VirtualMemory padded_reservation(platform_page_allocator,
+ reservation_size * 2,
+ reinterpret_cast<void*>(hint));
+ if (!padded_reservation.IsReserved()) break;
+
+ // Find such a sub-region inside the reservation that it's middle is
+ // |base_alignment|-aligned.
+ Address address =
+ RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
+ base_alignment) -
+ kPtrComprIsolateRootBias;
+ CHECK(padded_reservation.InVM(address, reservation_size));
+
+ // Now free the padded reservation and immediately try to reserve an exact
+ // region at aligned address. We have to do this dancing because the
+ // reservation address requirement is more complex than just a certain
+ // alignment and not all operating systems support freeing parts of reserved
+ // address space regions.
+ padded_reservation.Free();
+
+ VirtualMemory reservation(platform_page_allocator, reservation_size,
+ reinterpret_cast<void*>(address));
+ if (!reservation.IsReserved()) break;
+
+ // The reservation could still be somewhere else but we can accept it
+ // if the reservation has the required alignment.
+ Address aligned_address =
+ RoundUp(reservation.address() + kPtrComprIsolateRootBias,
+ base_alignment) -
+ kPtrComprIsolateRootBias;
+
+ if (reservation.address() == aligned_address) {
+ reservation_ = std::move(reservation);
+ break;
+ }
+ }
+ if (!reservation_.IsReserved()) {
+ V8::FatalProcessOutOfMemory(nullptr,
+ "Failed to reserve memory for new V8 Isolate");
+ }
+
+ CHECK_EQ(reservation_.size(), reservation_size);
+
+ Address heap_base = reservation_.address() + kPtrComprIsolateRootBias;
+ CHECK(IsAligned(heap_base, base_alignment));
+
+ return heap_base;
+}
+
+void IsolateAllocator::CommitPagesForIsolate(Address heap_base) {
+ v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+
+ // Simplify BoundedPageAllocator's life by configuring it to use same page
+ // size as the Heap will use (MemoryChunk::kPageSize).
+ size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
+ platform_page_allocator->AllocatePageSize());
+
+ page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
+ platform_page_allocator, reservation_.address(), reservation_.size(),
+ page_size);
+ page_allocator_ = page_allocator_instance_.get();
+
+ Address isolate_address = heap_base - Isolate::isolate_root_bias();
+ Address isolate_end = isolate_address + sizeof(Isolate);
+
+ // Inform the bounded page allocator about reserved pages.
+ {
+ Address reserved_region_address = RoundDown(isolate_address, page_size);
+ size_t reserved_region_size =
+ RoundUp(isolate_end, page_size) - reserved_region_address;
+
+ CHECK(page_allocator_instance_->AllocatePagesAt(
+ reserved_region_address, reserved_region_size,
+ PageAllocator::Permission::kNoAccess));
+ }
+
+ // Commit pages where the Isolate will be stored.
+ {
+ size_t commit_page_size = platform_page_allocator->CommitPageSize();
+ Address committed_region_address =
+ RoundDown(isolate_address, commit_page_size);
+ size_t committed_region_size =
+ RoundUp(isolate_end, commit_page_size) - committed_region_address;
+
+ // We are using |reservation_| directly here because |page_allocator_| has
+ // bigger commit page size than we actually need.
+ CHECK(reservation_.SetPermissions(committed_region_address,
+ committed_region_size,
+ PageAllocator::kReadWrite));
+
+ if (Heap::ShouldZapGarbage()) {
+ for (Address address = committed_region_address;
+ address < committed_region_size; address += kSystemPointerSize) {
+ Memory<Address>(address) = static_cast<Address>(kZapValue);
+ }
+ }
+ }
+ isolate_memory_ = reinterpret_cast<void*>(isolate_address);
+}
+#endif // V8_TARGET_ARCH_64_BIT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/isolate-allocator.h b/deps/v8/src/isolate-allocator.h
new file mode 100644
index 0000000000..ded3080866
--- /dev/null
+++ b/deps/v8/src/isolate-allocator.h
@@ -0,0 +1,63 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ISOLATE_ALLOCATOR_H_
+#define V8_ISOLATE_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/page-allocator.h"
+#include "src/globals.h"
+
+namespace v8 {
+
+// Forward declarations.
+namespace base {
+class BoundedPageAllocator;
+}
+
+namespace internal {
+
+// IsolateAllocator object is responsible for allocating memory for one (!)
+// Isolate object. Depending on the allocation mode the memory can be allocated
+// 1) in the C++ heap (when pointer compression is disabled)
+// 2) in a proper part of a properly aligned region of a reserved address space
+// (when pointer compression is enabled).
+//
+// Isolate::New() first creates IsolateAllocator object which allocates the
+// memory and then it constructs Isolate object in this memory. Once it's done
+// the Isolate object takes ownership of the IsolateAllocator object to keep
+// the memory alive.
+// Isolate::Delete() takes care of the proper order of the objects destruction.
+class V8_EXPORT_PRIVATE IsolateAllocator final {
+ public:
+ explicit IsolateAllocator(IsolateAllocationMode mode);
+ ~IsolateAllocator();
+
+ void* isolate_memory() const { return isolate_memory_; }
+
+ v8::PageAllocator* page_allocator() const { return page_allocator_; }
+
+ IsolateAllocationMode mode() {
+ return reservation_.IsReserved() ? IsolateAllocationMode::kInV8Heap
+ : IsolateAllocationMode::kInCppHeap;
+ }
+
+ private:
+ Address InitReservation();
+ void CommitPagesForIsolate(Address heap_base);
+
+ // The allocated memory for Isolate instance.
+ void* isolate_memory_ = nullptr;
+ v8::PageAllocator* page_allocator_ = nullptr;
+ std::unique_ptr<base::BoundedPageAllocator> page_allocator_instance_;
+ VirtualMemory reservation_;
+
+ DISALLOW_COPY_AND_ASSIGN(IsolateAllocator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ISOLATE_ALLOCATOR_H_
diff --git a/deps/v8/src/isolate-data.h b/deps/v8/src/isolate-data.h
new file mode 100644
index 0000000000..269d0883f9
--- /dev/null
+++ b/deps/v8/src/isolate-data.h
@@ -0,0 +1,217 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ISOLATE_DATA_H_
+#define V8_ISOLATE_DATA_H_
+
+#include "src/builtins/builtins.h"
+#include "src/constants-arch.h"
+#include "src/external-reference-table.h"
+#include "src/roots.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// This class contains a collection of data accessible from both C++ runtime
+// and compiled code (including assembly stubs, builtins, interpreter bytecode
+// handlers and optimized code).
+// In particular, it contains pointer to the V8 heap roots table, external
+// reference table and builtins array.
+// The compiled code accesses the isolate data fields indirectly via the root
+// register.
+class IsolateData final {
+ public:
+ IsolateData() = default;
+
+ static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
+
+ // The value of the kRootRegister.
+ Address isolate_root() const {
+ return reinterpret_cast<Address>(this) + kIsolateRootBias;
+ }
+
+ // Root-register-relative offset of the roots table.
+ static constexpr int roots_table_offset() {
+ return kRootsTableOffset - kIsolateRootBias;
+ }
+
+ // Root-register-relative offset of the given root table entry.
+ static constexpr int root_slot_offset(RootIndex root_index) {
+ return roots_table_offset() + RootsTable::offset_of(root_index);
+ }
+
+ // Root-register-relative offset of the external reference table.
+ static constexpr int external_reference_table_offset() {
+ return kExternalReferenceTableOffset - kIsolateRootBias;
+ }
+
+ // Root-register-relative offset of the builtin entry table.
+ static constexpr int builtin_entry_table_offset() {
+ return kBuiltinEntryTableOffset - kIsolateRootBias;
+ }
+
+ // Root-register-relative offset of the builtins table.
+ static constexpr int builtins_table_offset() {
+ return kBuiltinsTableOffset - kIsolateRootBias;
+ }
+
+ // Root-register-relative offset of the given builtin table entry.
+ // TODO(ishell): remove in favour of typified id version.
+ static int builtin_slot_offset(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ return builtins_table_offset() + builtin_index * kSystemPointerSize;
+ }
+
+ // Root-register-relative offset of the builtin table entry.
+ static int builtin_slot_offset(Builtins::Name id) {
+ return builtins_table_offset() + id * kSystemPointerSize;
+ }
+
+ // Root-register-relative offset of the virtual call target register value.
+ static constexpr int virtual_call_target_register_offset() {
+ return kVirtualCallTargetRegisterOffset - kIsolateRootBias;
+ }
+
+ // The FP and PC that are saved right before TurboAssembler::CallCFunction.
+ Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
+ Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+ Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
+ Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
+
+ // Returns true if this address points to data stored in this instance.
+ // If it's the case then the value can be accessed indirectly through the
+ // root register.
+ bool contains(Address address) const {
+ STATIC_ASSERT(std::is_unsigned<Address>::value);
+ Address start = reinterpret_cast<Address>(this);
+ return (address - start) < sizeof(*this);
+ }
+
+ RootsTable& roots() { return roots_; }
+ const RootsTable& roots() const { return roots_; }
+
+ ExternalReferenceTable* external_reference_table() {
+ return &external_reference_table_;
+ }
+
+ Address* builtin_entry_table() { return builtin_entry_table_; }
+ Address* builtins() { return builtins_; }
+
+ private:
+// Static layout definition.
+#define FIELDS(V) \
+ V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
+ V(kExternalMemoryOffset, kInt64Size) \
+ V(kExternalMemoryLlimitOffset, kInt64Size) \
+ V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
+ V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
+ V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
+ V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
+ V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
+ V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
+ V(kFastCCallCallerFPOffset, kSystemPointerSize) \
+ V(kFastCCallCallerPCOffset, kSystemPointerSize) \
+ /* This padding aligns IsolateData size by 8 bytes. */ \
+ V(kPaddingOffset, \
+ 8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(0, FIELDS)
+#undef FIELDS
+
+ // These fields are accessed through the API, offsets must be kept in sync
+ // with v8::internal::Internals (in include/v8-internal.h) constants.
+ // The layout consitency is verified in Isolate::CheckIsolateLayout() using
+ // runtime checks.
+ void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
+
+ // TODO(ishell): Move these external memory counters back to Heap once the
+ // Node JS bot issue is solved.
+ // The amount of external memory registered through the API.
+ int64_t external_memory_ = 0;
+
+ // The limit when to trigger memory pressure from the API.
+ int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
+
+ // Caches the amount of external memory registered at the last MC.
+ int64_t external_memory_at_last_mark_compact_ = 0;
+
+ RootsTable roots_;
+
+ ExternalReferenceTable external_reference_table_;
+
+ // The entry points for all builtins. This corresponds to
+ // Code::InstructionStart() for each Code object in the builtins table below.
+ // The entry table is in IsolateData for easy access through kRootRegister.
+ Address builtin_entry_table_[Builtins::builtin_count] = {};
+
+ // The entries in this array are tagged pointers to Code objects.
+ Address builtins_[Builtins::builtin_count] = {};
+
+ // For isolate-independent calls on ia32.
+ // TODO(v8:6666): Remove once wasm supports pc-relative jumps to builtins on
+ // ia32 (otherwise the arguments adaptor call runs out of registers).
+ void* virtual_call_target_register_ = nullptr;
+
+ // Stores the state of the caller for TurboAssembler::CallCFunction so that
+ // the sampling CPU profiler can iterate the stack during such calls. These
+ // are stored on IsolateData so that they can be stored to with only one move
+ // instruction in compiled code.
+ Address fast_c_call_caller_fp_ = kNullAddress;
+ Address fast_c_call_caller_pc_ = kNullAddress;
+
+ // Ensure the size is 8-byte aligned in order to make alignment of the field
+ // following the IsolateData field predictable. This solves the issue with
+ // C++ compilers for 32-bit platforms which are not consistent at aligning
+ // int64_t fields.
+ // In order to avoid dealing with zero-size arrays the padding size is always
+ // in the range [8, 15).
+ STATIC_ASSERT(kPaddingOffsetEnd + 1 - kPaddingOffset >= 8);
+ char padding_[kPaddingOffsetEnd + 1 - kPaddingOffset];
+
+ V8_INLINE static void AssertPredictableLayout();
+
+ friend class Isolate;
+ friend class Heap;
+ FRIEND_TEST(HeapTest, ExternalLimitDefault);
+ FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
+
+ DISALLOW_COPY_AND_ASSIGN(IsolateData);
+};
+
+// IsolateData object must have "predictable" layout which does not change when
+// cross-compiling to another platform. Otherwise there may be compatibility
+// issues because of different compilers used for snapshot generator and
+// actual V8 code.
+void IsolateData::AssertPredictableLayout() {
+ STATIC_ASSERT(std::is_standard_layout<RootsTable>::value);
+ STATIC_ASSERT(std::is_standard_layout<ExternalReferenceTable>::value);
+ STATIC_ASSERT(std::is_standard_layout<IsolateData>::value);
+ STATIC_ASSERT(offsetof(IsolateData, roots_) == kRootsTableOffset);
+ STATIC_ASSERT(offsetof(IsolateData, external_reference_table_) ==
+ kExternalReferenceTableOffset);
+ STATIC_ASSERT(offsetof(IsolateData, builtins_) == kBuiltinsTableOffset);
+ STATIC_ASSERT(offsetof(IsolateData, virtual_call_target_register_) ==
+ kVirtualCallTargetRegisterOffset);
+ STATIC_ASSERT(offsetof(IsolateData, external_memory_) ==
+ kExternalMemoryOffset);
+ STATIC_ASSERT(offsetof(IsolateData, external_memory_limit_) ==
+ kExternalMemoryLlimitOffset);
+ STATIC_ASSERT(offsetof(IsolateData, external_memory_at_last_mark_compact_) ==
+ kExternalMemoryAtLastMarkCompactOffset);
+ STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_fp_) ==
+ kFastCCallCallerFPOffset);
+ STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
+ kFastCCallCallerPCOffset);
+ STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ISOLATE_DATA_H_
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index dcbe5bea23..bc70d3dead 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -5,19 +5,20 @@
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
+#include "src/heap/heap-inl.h" // Need MemoryChunk from heap/spaces.h
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/regexp-match-info.h"
namespace v8 {
namespace internal {
-base::AddressRegion Isolate::root_register_addressable_region() {
- Address start = reinterpret_cast<Address>(this);
- Address end = heap_.root_register_addressable_end();
- return base::AddressRegion(start, end - start);
+IsolateAllocationMode Isolate::isolate_allocation_mode() {
+ return isolate_allocator_->mode();
}
-bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
+bool Isolate::FromWritableHeapObject(HeapObject obj, Isolate** isolate) {
i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
if (chunk->owner()->identity() == i::RO_SPACE) {
*isolate = nullptr;
@@ -27,8 +28,8 @@ bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
return true;
}
-void Isolate::set_context(Context* context) {
- DCHECK(context == nullptr || context->IsContext());
+void Isolate::set_context(Context context) {
+ DCHECK(context.is_null() || context->IsContext());
thread_local_top_.context_ = context;
}
@@ -36,23 +37,21 @@ Handle<NativeContext> Isolate::native_context() {
return handle(context()->native_context(), this);
}
-NativeContext* Isolate::raw_native_context() {
+NativeContext Isolate::raw_native_context() {
return context()->native_context();
}
-Object* Isolate::pending_exception() {
+Object Isolate::pending_exception() {
DCHECK(has_pending_exception());
DCHECK(!thread_local_top_.pending_exception_->IsException(this));
return thread_local_top_.pending_exception_;
}
-
-void Isolate::set_pending_exception(Object* exception_obj) {
+void Isolate::set_pending_exception(Object exception_obj) {
DCHECK(!exception_obj->IsException(this));
thread_local_top_.pending_exception_ = exception_obj;
}
-
void Isolate::clear_pending_exception() {
DCHECK(!thread_local_top_.pending_exception_->IsException(this));
thread_local_top_.pending_exception_ = ReadOnlyRoots(this).the_hole_value();
@@ -69,14 +68,12 @@ void Isolate::clear_pending_message() {
thread_local_top_.pending_message_obj_ = ReadOnlyRoots(this).the_hole_value();
}
-
-Object* Isolate::scheduled_exception() {
+Object Isolate::scheduled_exception() {
DCHECK(has_scheduled_exception());
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
return thread_local_top_.scheduled_exception_;
}
-
bool Isolate::has_scheduled_exception() {
DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
return thread_local_top_.scheduled_exception_ !=
@@ -89,7 +86,7 @@ void Isolate::clear_scheduled_exception() {
thread_local_top_.scheduled_exception_ = ReadOnlyRoots(this).the_hole_value();
}
-bool Isolate::is_catchable_by_javascript(Object* exception) {
+bool Isolate::is_catchable_by_javascript(Object exception) {
return exception != ReadOnlyRoots(heap()).termination_exception();
}
@@ -99,14 +96,6 @@ void Isolate::FireBeforeCallEnteredCallback() {
}
}
-void Isolate::FireMicrotasksCompletedCallback() {
- std::vector<MicrotasksCompletedCallback> callbacks(
- microtasks_completed_callbacks_);
- for (auto& callback : callbacks) {
- callback(reinterpret_cast<v8::Isolate*>(this));
- }
-}
-
Handle<JSGlobalObject> Isolate::global_object() {
return handle(context()->global_object(), this);
}
@@ -129,14 +118,14 @@ Isolate::ExceptionScope::~ExceptionScope() {
Handle<type> Isolate::name() { \
return Handle<type>(raw_native_context()->name(), this); \
} \
- bool Isolate::is_##name(type* value) { \
+ bool Isolate::is_##name(type value) { \
return raw_native_context()->is_##name(value); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
bool Isolate::IsArrayConstructorIntact() {
- Cell* array_constructor_cell = heap()->array_constructor_protector();
+ Cell array_constructor_cell = heap()->array_constructor_protector();
return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
}
@@ -153,40 +142,56 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
// done here. In place, there are mjsunit tests harmony/array-species* which
// ensure that behavior is correct in various invalid protector cases.
- PropertyCell* species_cell = heap()->array_species_protector();
+ PropertyCell species_cell = heap()->array_species_protector();
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
- PropertyCell* species_cell = heap()->typed_array_species_protector();
+ PropertyCell species_cell = heap()->typed_array_species_protector();
+ return species_cell->value()->IsSmi() &&
+ Smi::ToInt(species_cell->value()) == kProtectorValid;
+}
+
+bool Isolate::IsRegExpSpeciesLookupChainIntact() {
+ PropertyCell species_cell = heap()->regexp_species_protector();
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsPromiseSpeciesLookupChainIntact() {
- PropertyCell* species_cell = heap()->promise_species_protector();
+ PropertyCell species_cell = heap()->promise_species_protector();
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsStringLengthOverflowIntact() {
- Cell* string_length_cell = heap()->string_length_protector();
+ Cell string_length_cell = heap()->string_length_protector();
return string_length_cell->value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsArrayBufferNeuteringIntact() {
- PropertyCell* buffer_neutering = heap()->array_buffer_neutering_protector();
- return buffer_neutering->value() == Smi::FromInt(kProtectorValid);
+bool Isolate::IsArrayBufferDetachingIntact() {
+ PropertyCell buffer_detaching = heap()->array_buffer_detaching_protector();
+ return buffer_detaching->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayIteratorLookupChainIntact() {
- PropertyCell* array_iterator_cell = heap()->array_iterator_protector();
+ PropertyCell array_iterator_cell = heap()->array_iterator_protector();
return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
+bool Isolate::IsMapIteratorLookupChainIntact() {
+ PropertyCell map_iterator_cell = heap()->map_iterator_protector();
+ return map_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsSetIteratorLookupChainIntact() {
+ PropertyCell set_iterator_cell = heap()->set_iterator_protector();
+ return set_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
bool Isolate::IsStringIteratorLookupChainIntact() {
- PropertyCell* string_iterator_cell = heap()->string_iterator_protector();
+ PropertyCell string_iterator_cell = heap()->string_iterator_protector();
return string_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index e6a9e95a2f..8549c08b0b 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -8,13 +8,13 @@
#include <atomic>
#include <fstream> // NOLINT(readability/streams)
+#include <memory>
#include <sstream>
#include <unordered_map>
#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/ast/ast-value-factory.h"
-#include "src/ast/context-slot-cache.h"
#include "src/ast/scopes.h"
#include "src/base/adapters.h"
#include "src/base/hashmap.h"
@@ -22,38 +22,45 @@
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/builtins/builtins-promise.h"
#include "src/builtins/constants-table-builder.h"
#include "src/cancelable-task.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/date.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
-#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/libsampler/sampler.h"
#include "src/log.h"
#include "src/messages.h"
+#include "src/microtask-queue.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/ostreams.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/prototype.h"
+#include "src/ptr-compr.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
#include "src/setup-isolate.h"
#include "src/simulator.h"
+#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded-file-writer.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/trap-handler/trap-handler.h"
@@ -67,9 +74,12 @@
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
#ifdef V8_INTL_SUPPORT
-#include "unicode/regex.h"
+#include "unicode/uobject.h"
#endif // V8_INTL_SUPPORT
+extern "C" const uint8_t* v8_Default_embedded_blob_;
+extern "C" uint32_t v8_Default_embedded_blob_size_;
+
namespace v8 {
namespace internal {
@@ -85,14 +95,15 @@ namespace internal {
#define TRACE_ISOLATE(tag)
#endif
-base::Atomic32 ThreadId::highest_thread_id_ = 0;
-
-extern const uint8_t* DefaultEmbeddedBlob();
-extern uint32_t DefaultEmbeddedBlobSize();
+const uint8_t* DefaultEmbeddedBlob() { return v8_Default_embedded_blob_; }
+uint32_t DefaultEmbeddedBlobSize() { return v8_Default_embedded_blob_size_; }
#ifdef V8_MULTI_SNAPSHOTS
-extern const uint8_t* TrustedEmbeddedBlob();
-extern uint32_t TrustedEmbeddedBlobSize();
+extern "C" const uint8_t* v8_Trusted_embedded_blob_;
+extern "C" uint32_t v8_Trusted_embedded_blob_size_;
+
+const uint8_t* TrustedEmbeddedBlob() { return v8_Trusted_embedded_blob_; }
+uint32_t TrustedEmbeddedBlobSize() { return v8_Trusted_embedded_blob_size_; }
#endif
namespace {
@@ -106,24 +117,119 @@ namespace {
std::atomic<const uint8_t*> current_embedded_blob_(nullptr);
std::atomic<uint32_t> current_embedded_blob_size_(0);
+
+// The various workflows around embedded snapshots are fairly complex. We need
+// to support plain old snapshot builds, nosnap builds, and the requirements of
+// subtly different serialization tests. There's two related knobs to twiddle:
+//
+// - The default embedded blob may be overridden by setting the sticky embedded
+// blob. This is set automatically whenever we create a new embedded blob.
+//
+// - Lifecycle management can be either manual or set to refcounting.
+//
+// A few situations to demonstrate their use:
+//
+// - A plain old snapshot build neither overrides the default blob nor
+// refcounts.
+//
+// - mksnapshot sets the sticky blob and manually frees the embedded
+// blob once done.
+//
+// - Most serializer tests do the same.
+//
+// - Nosnapshot builds set the sticky blob and enable refcounting.
+
+// This mutex protects access to the following variables:
+// - sticky_embedded_blob_
+// - sticky_embedded_blob_size_
+// - enable_embedded_blob_refcounting_
+// - current_embedded_blob_refs_
+base::LazyMutex current_embedded_blob_refcount_mutex_ = LAZY_MUTEX_INITIALIZER;
+
+const uint8_t* sticky_embedded_blob_ = nullptr;
+uint32_t sticky_embedded_blob_size_ = 0;
+
+bool enable_embedded_blob_refcounting_ = true;
+int current_embedded_blob_refs_ = 0;
+
+const uint8_t* StickyEmbeddedBlob() { return sticky_embedded_blob_; }
+uint32_t StickyEmbeddedBlobSize() { return sticky_embedded_blob_size_; }
+
+void SetStickyEmbeddedBlob(const uint8_t* blob, uint32_t blob_size) {
+ sticky_embedded_blob_ = blob;
+ sticky_embedded_blob_size_ = blob_size;
+}
+
} // namespace
+void DisableEmbeddedBlobRefcounting() {
+ base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
+ enable_embedded_blob_refcounting_ = false;
+}
+
+void FreeCurrentEmbeddedBlob() {
+ CHECK(!enable_embedded_blob_refcounting_);
+ base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
+
+ if (StickyEmbeddedBlob() == nullptr) return;
+
+ CHECK_EQ(StickyEmbeddedBlob(), Isolate::CurrentEmbeddedBlob());
+
+ InstructionStream::FreeOffHeapInstructionStream(
+ const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlob()),
+ Isolate::CurrentEmbeddedBlobSize());
+
+ current_embedded_blob_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_size_.store(0, std::memory_order_relaxed);
+ sticky_embedded_blob_ = nullptr;
+ sticky_embedded_blob_size_ = 0;
+}
+
+// static
+bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
+ // In some situations, we must be able to rely on the embedded blob being
+ // immortal immovable. This is the case if the blob is binary-embedded.
+ // See blob lifecycle controls above for descriptions of when the current
+ // embedded blob may change (e.g. in tests or mksnapshot). If the blob is
+ // binary-embedded, it is immortal immovable.
+ const uint8_t* blob =
+ current_embedded_blob_.load(std::memory_order::memory_order_relaxed);
+ if (blob == nullptr) return false;
+#ifdef V8_MULTI_SNAPSHOTS
+ if (blob == TrustedEmbeddedBlob()) return true;
+#endif
+ return blob == DefaultEmbeddedBlob();
+}
+
void Isolate::SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size) {
+ CHECK_NOT_NULL(blob);
+
embedded_blob_ = blob;
embedded_blob_size_ = blob_size;
current_embedded_blob_.store(blob, std::memory_order_relaxed);
current_embedded_blob_size_.store(blob_size, std::memory_order_relaxed);
#ifdef DEBUG
- if (blob != nullptr) {
- // Verify that the contents of the embedded blob are unchanged from
- // serialization-time, just to ensure the compiler isn't messing with us.
- EmbeddedData d = EmbeddedData::FromBlob();
- CHECK_EQ(d.Hash(), d.CreateHash());
- }
+ // Verify that the contents of the embedded blob are unchanged from
+ // serialization-time, just to ensure the compiler isn't messing with us.
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK_EQ(d.Hash(), d.CreateHash());
#endif // DEBUG
}
+void Isolate::ClearEmbeddedBlob() {
+ CHECK(enable_embedded_blob_refcounting_);
+ CHECK_EQ(embedded_blob_, CurrentEmbeddedBlob());
+ CHECK_EQ(embedded_blob_, StickyEmbeddedBlob());
+
+ embedded_blob_ = nullptr;
+ embedded_blob_size_ = 0;
+ current_embedded_blob_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_size_.store(0, std::memory_order_relaxed);
+ sticky_embedded_blob_ = nullptr;
+ sticky_embedded_blob_size_ = 0;
+}
+
const uint8_t* Isolate::embedded_blob() const { return embedded_blob_; }
uint32_t Isolate::embedded_blob_size() const { return embedded_blob_size_; }
@@ -138,21 +244,6 @@ uint32_t Isolate::CurrentEmbeddedBlobSize() {
std::memory_order::memory_order_relaxed);
}
-int ThreadId::AllocateThreadId() {
- int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
- return new_id;
-}
-
-
-int ThreadId::GetCurrentThreadId() {
- int thread_id = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
- if (thread_id == 0) {
- thread_id = AllocateThreadId();
- base::Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
- }
- return thread_id;
-}
-
void ThreadLocalTop::Initialize(Isolate* isolate) {
*this = ThreadLocalTop();
isolate_ = isolate;
@@ -171,7 +262,6 @@ void ThreadLocalTop::Free() {
base::Thread::LocalStorageKey Isolate::isolate_key_;
-base::Thread::LocalStorageKey Isolate::thread_id_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
base::Atomic32 Isolate::isolate_counter_ = 0;
#if DEBUG
@@ -183,7 +273,7 @@ Isolate::PerIsolateThreadData*
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = nullptr;
{
- base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
+ base::MutexGuard lock_guard(&thread_data_table_mutex_);
per_thread = thread_data_table_.Lookup(thread_id);
if (per_thread == nullptr) {
per_thread = new PerIsolateThreadData(this, thread_id);
@@ -196,11 +286,10 @@ Isolate::PerIsolateThreadData*
void Isolate::DiscardPerThreadDataForThisThread() {
- int thread_id_int = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
- if (thread_id_int) {
- ThreadId thread_id = ThreadId(thread_id_int);
+ ThreadId thread_id = ThreadId::TryGetCurrent();
+ if (thread_id.IsValid()) {
DCHECK(!thread_manager_->mutex_owner_.Equals(thread_id));
- base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
+ base::MutexGuard lock_guard(&thread_data_table_mutex_);
PerIsolateThreadData* per_thread = thread_data_table_.Lookup(thread_id);
if (per_thread) {
DCHECK(!per_thread->thread_state_);
@@ -220,7 +309,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
PerIsolateThreadData* per_thread = nullptr;
{
- base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
+ base::MutexGuard lock_guard(&thread_data_table_mutex_);
per_thread = thread_data_table_.Lookup(thread_id);
}
return per_thread;
@@ -232,7 +321,6 @@ void Isolate::InitializeOncePerProcess() {
#if DEBUG
base::Relaxed_Store(&isolate_key_created_, 1);
#endif
- thread_id_key_ = base::Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
}
@@ -254,18 +342,23 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_exception_);
- v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_message_obj_);
v->VisitRootPointer(Root::kTop, nullptr,
- bit_cast<Object**>(&(thread->context_)));
- v->VisitRootPointer(Root::kTop, nullptr, &thread->scheduled_exception_);
+ FullObjectSlot(&thread->pending_exception_));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ FullObjectSlot(&thread->pending_message_obj_));
+ v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&thread->context_));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ FullObjectSlot(&thread->scheduled_exception_));
for (v8::TryCatch* block = thread->try_catch_handler(); block != nullptr;
block = block->next_) {
- v->VisitRootPointer(Root::kTop, nullptr,
- bit_cast<Object**>(&(block->exception_)));
- v->VisitRootPointer(Root::kTop, nullptr,
- bit_cast<Object**>(&(block->message_obj_)));
+ // TODO(3770): Make TryCatch::exception_ an Address (and message_obj_ too).
+ v->VisitRootPointer(
+ Root::kTop, nullptr,
+ FullObjectSlot(reinterpret_cast<Address>(&(block->exception_))));
+ v->VisitRootPointer(
+ Root::kTop, nullptr,
+ FullObjectSlot(reinterpret_cast<Address>(&(block->message_obj_))));
}
// Iterate over pointers on native execution stack.
@@ -288,7 +381,10 @@ void Isolate::IterateDeferredHandles(RootVisitor* visitor) {
#ifdef DEBUG
-bool Isolate::IsDeferredHandle(Object** handle) {
+bool Isolate::IsDeferredHandle(Address* handle) {
+ // Comparing unrelated pointers (not from the same array) is undefined
+ // behavior, so cast to Address before making arbitrary comparisons.
+ Address handle_as_address = reinterpret_cast<Address>(handle);
// Each DeferredHandles instance keeps the handles to one job in the
// concurrent recompilation queue, containing a list of blocks. Each block
// contains kHandleBlockSize handles except for the first block, which may
@@ -297,11 +393,14 @@ bool Isolate::IsDeferredHandle(Object** handle) {
// belongs to one of the blocks. If so, it is deferred.
for (DeferredHandles* deferred = deferred_handles_head_; deferred != nullptr;
deferred = deferred->next_) {
- std::vector<Object**>* blocks = &deferred->blocks_;
+ std::vector<Address*>* blocks = &deferred->blocks_;
for (size_t i = 0; i < blocks->size(); i++) {
- Object** block_limit = (i == 0) ? deferred->first_block_limit_
+ Address* block_limit = (i == 0) ? deferred->first_block_limit_
: blocks->at(i) + kHandleBlockSize;
- if (blocks->at(i) <= handle && handle < block_limit) return true;
+ if (reinterpret_cast<Address>(blocks->at(i)) <= handle_as_address &&
+ handle_as_address < reinterpret_cast<Address>(block_limit)) {
+ return true;
+ }
}
}
return false;
@@ -384,7 +483,8 @@ StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
StackFrameIterator it(isolate);
for (; !it.done() && i < code_objects_length; it.Advance()) {
if (it.frame()->type() == StackFrame::INTERNAL) continue;
- code_objects_[i++] = it.frame()->unchecked_code();
+ code_objects_[i++] =
+ reinterpret_cast<void*>(it.frame()->unchecked_code().ptr());
}
}
@@ -429,21 +529,29 @@ class FrameArrayBuilder {
offset, flags);
}
- bool AppendJavaScriptFrame(
+ void AppendPromiseAllFrame(Handle<Context> context, int offset) {
+ if (full()) return;
+ int flags = FrameArray::kIsAsync | FrameArray::kIsPromiseAll;
+
+ Handle<Context> native_context(context->native_context(), isolate_);
+ Handle<JSFunction> function(native_context->promise_all(), isolate_);
+ if (!IsVisibleInStackTrace(function)) return;
+
+ Handle<Object> receiver(native_context->promise_function(), isolate_);
+ Handle<AbstractCode> code(AbstractCode::cast(function->code()), isolate_);
+ elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
+ offset, flags);
+ }
+
+ void AppendJavaScriptFrame(
FrameSummary::JavaScriptFrameSummary const& summary) {
// Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(summary.function())) return false;
+ if (!IsVisibleInStackTrace(summary.function())) return;
Handle<AbstractCode> abstract_code = summary.abstract_code();
const int offset = summary.code_offset();
- bool is_constructor = summary.is_constructor();
- // Help CallSite::IsConstructor correctly detect hand-written
- // construct stubs.
- if (abstract_code->IsCode() &&
- Code::cast(*abstract_code)->is_construct_stub()) {
- is_constructor = true;
- }
+ const bool is_constructor = summary.is_constructor();
int flags = 0;
Handle<JSFunction> function = summary.function();
@@ -453,12 +561,11 @@ class FrameArrayBuilder {
elements_ = FrameArray::AppendJSFrame(
elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
abstract_code, offset, flags);
- return true;
}
- bool AppendWasmCompiledFrame(
+ void AppendWasmCompiledFrame(
FrameSummary::WasmCompiledFrameSummary const& summary) {
- if (summary.code()->kind() != wasm::WasmCode::kFunction) return false;
+ if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
if (instance->module_object()->is_asm_js()) {
@@ -473,10 +580,9 @@ class FrameArrayBuilder {
elements_ = FrameArray::AppendWasmFrame(
elements_, instance, summary.function_index(), summary.code(),
summary.code_offset(), flags);
- return true;
}
- bool AppendWasmInterpretedFrame(
+ void AppendWasmInterpretedFrame(
FrameSummary::WasmInterpretedFrameSummary const& summary) {
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
@@ -484,14 +590,13 @@ class FrameArrayBuilder {
elements_ = FrameArray::AppendWasmFrame(elements_, instance,
summary.function_index(), {},
summary.byte_offset(), flags);
- return true;
}
- bool AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
+ void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
// Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(function)) return false;
+ if (!IsVisibleInStackTrace(function)) return;
Handle<Object> receiver(exit_frame->receiver(), isolate_);
Handle<Code> code(exit_frame->LookupCode(), isolate_);
@@ -505,8 +610,6 @@ class FrameArrayBuilder {
elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
Handle<AbstractCode>::cast(code),
offset, flags);
-
- return true;
}
bool full() { return elements_->FrameCount() >= limit_; }
@@ -610,18 +713,19 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) {
bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
-bool IsBuiltinFunction(Isolate* isolate, HeapObject* object,
+bool IsBuiltinFunction(Isolate* isolate, HeapObject object,
Builtins::Name builtin_index) {
if (!object->IsJSFunction()) return false;
- JSFunction* const function = JSFunction::cast(object);
+ JSFunction const function = JSFunction::cast(object);
return function->code() == isolate->builtins()->builtin(builtin_index);
}
void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
FrameArrayBuilder* builder) {
- CHECK_EQ(Promise::kPending, promise->status());
-
while (!builder->full()) {
+ // Check that the {promise} is not settled.
+ if (promise->status() != Promise::kPending) return;
+
// Check that we have exactly one PromiseReaction on the {promise}.
if (!promise->reactions()->IsPromiseReaction()) return;
Handle<PromiseReaction> reaction(
@@ -648,32 +752,40 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
builder->AppendAsyncFrame(generator_object);
// Try to continue from here.
- Handle<JSFunction> function(generator_object->function(), isolate);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (IsAsyncGeneratorFunction(shared->kind())) {
- Handle<Object> dot_generator_object(
- generator_object->parameters_and_registers()->get(
- DeclarationScope::kGeneratorObjectVarIndex +
- shared->scope_info()->ParameterCount()),
- isolate);
- if (!dot_generator_object->IsJSAsyncGeneratorObject()) return;
+ if (generator_object->IsJSAsyncFunctionObject()) {
+ Handle<JSAsyncFunctionObject> async_function_object =
+ Handle<JSAsyncFunctionObject>::cast(generator_object);
+ promise = handle(async_function_object->promise(), isolate);
+ } else {
Handle<JSAsyncGeneratorObject> async_generator_object =
- Handle<JSAsyncGeneratorObject>::cast(dot_generator_object);
+ Handle<JSAsyncGeneratorObject>::cast(generator_object);
+ if (async_generator_object->queue()->IsUndefined(isolate)) return;
Handle<AsyncGeneratorRequest> async_generator_request(
AsyncGeneratorRequest::cast(async_generator_object->queue()),
isolate);
promise = handle(JSPromise::cast(async_generator_request->promise()),
isolate);
- } else {
- CHECK(IsAsyncFunction(shared->kind()));
- Handle<Object> dot_promise(
- generator_object->parameters_and_registers()->get(
- DeclarationScope::kPromiseVarIndex +
- shared->scope_info()->ParameterCount()),
- isolate);
- if (!dot_promise->IsJSPromise()) return;
- promise = Handle<JSPromise>::cast(dot_promise);
}
+ } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kPromiseAllResolveElementClosure)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+
+ // We store the offset of the promise into the {function}'s
+ // hash field for promise resolve element callbacks.
+ int const offset = Smi::ToInt(Smi::cast(function->GetIdentityHash())) - 1;
+ builder->AppendPromiseAllFrame(context, offset);
+
+ // Now peak into the Promise.all() resolve element context to
+ // find the promise capability that's being resolved when all
+ // the concurrent promises resolve.
+ int const index =
+ PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot;
+ Handle<PromiseCapability> capability(
+ PromiseCapability::cast(context->get(index)), isolate);
+ if (!capability->promise()->IsJSPromise()) return;
+ promise = handle(JSPromise::cast(capability->promise()), isolate);
} else {
// We have some generic promise chain here, so try to
// continue with the chained promise on the reaction
@@ -682,11 +794,15 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
reaction->promise_or_capability(), isolate);
if (promise_or_capability->IsJSPromise()) {
promise = Handle<JSPromise>::cast(promise_or_capability);
- } else {
+ } else if (promise_or_capability->IsPromiseCapability()) {
Handle<PromiseCapability> capability =
Handle<PromiseCapability>::cast(promise_or_capability);
if (!capability->promise()->IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else {
+ // Otherwise the {promise_or_capability} must be undefined here.
+ CHECK(promise_or_capability->IsUndefined(isolate));
+ return;
}
}
}
@@ -707,8 +823,6 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
// Build the regular stack trace, and remember the last relevant
// frame ID and inlined index (for the async stack trace handling
// below, which starts from this last frame).
- int last_frame_index = 0;
- StackFrame::Id last_frame_id = StackFrame::NO_ID;
for (StackFrameIterator it(this); !it.done() && !builder.full();
it.Advance()) {
StackFrame* const frame = it.frame();
@@ -727,37 +841,23 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
const auto& summary = frames[i];
if (summary.IsJavaScript()) {
- //====================================================================
+ //=========================================================
// Handle a JavaScript frame.
- //====================================================================
+ //=========================================================
auto const& java_script = summary.AsJavaScript();
- if (builder.AppendJavaScriptFrame(java_script)) {
- if (IsAsyncFunction(java_script.function()->shared()->kind())) {
- last_frame_id = frame->id();
- last_frame_index = static_cast<int>(i);
- } else {
- last_frame_id = StackFrame::NO_ID;
- last_frame_index = 0;
- }
- }
+ builder.AppendJavaScriptFrame(java_script);
} else if (summary.IsWasmCompiled()) {
- //====================================================================
+ //=========================================================
// Handle a WASM compiled frame.
- //====================================================================
+ //=========================================================
auto const& wasm_compiled = summary.AsWasmCompiled();
- if (builder.AppendWasmCompiledFrame(wasm_compiled)) {
- last_frame_id = StackFrame::NO_ID;
- last_frame_index = 0;
- }
+ builder.AppendWasmCompiledFrame(wasm_compiled);
} else if (summary.IsWasmInterpreted()) {
- //====================================================================
+ //=========================================================
// Handle a WASM interpreted frame.
- //====================================================================
+ //=========================================================
auto const& wasm_interpreted = summary.AsWasmInterpreted();
- if (builder.AppendWasmInterpretedFrame(wasm_interpreted)) {
- last_frame_id = StackFrame::NO_ID;
- last_frame_index = 0;
- }
+ builder.AppendWasmInterpretedFrame(wasm_interpreted);
}
}
break;
@@ -766,10 +866,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
case StackFrame::BUILTIN_EXIT:
// BuiltinExitFrames are not standard frames, so they do not have
// Summarize(). However, they may have one JS frame worth showing.
- if (builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame))) {
- last_frame_id = StackFrame::NO_ID;
- last_frame_index = 0;
- }
+ builder.AppendBuiltinExitFrame(BuiltinExitFrame::cast(frame));
break;
default:
@@ -777,52 +874,59 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
}
}
- // If --async-stack-traces is enabled, and we ended on a regular JavaScript
- // frame above, we can enrich the stack trace with async frames (if this
- // last frame corresponds to an async function).
- if (FLAG_async_stack_traces && last_frame_id != StackFrame::NO_ID) {
- StackFrameIterator it(this);
- while (it.frame()->id() != last_frame_id) it.Advance();
- FrameInspector inspector(StandardFrame::cast(it.frame()), last_frame_index,
- this);
- FunctionKind const kind = inspector.GetFunction()->shared()->kind();
- if (IsAsyncGeneratorFunction(kind)) {
- Handle<Object> const dot_generator_object =
- inspector.GetExpression(DeclarationScope::kGeneratorObjectVarIndex);
- if (dot_generator_object->IsUndefined(this)) {
- // The .generator_object was not yet initialized (i.e. we see a
- // really early exception in the setup of the async generator).
+ // If --async-stack-traces are enabled and the "current microtask" is a
+ // PromiseReactionJobTask, we try to enrich the stack trace with async
+ // frames.
+ if (FLAG_async_stack_traces) {
+ Handle<Object> current_microtask = factory()->current_microtask();
+ if (current_microtask->IsPromiseReactionJobTask()) {
+ Handle<PromiseReactionJobTask> promise_reaction_job_task =
+ Handle<PromiseReactionJobTask>::cast(current_microtask);
+ // Check if the {reaction} has one of the known async function or
+ // async generator continuations as its fulfill handler.
+ if (IsBuiltinFunction(this, promise_reaction_job_task->handler(),
+ Builtins::kAsyncFunctionAwaitResolveClosure) ||
+ IsBuiltinFunction(this, promise_reaction_job_task->handler(),
+ Builtins::kAsyncGeneratorAwaitResolveClosure) ||
+ IsBuiltinFunction(this, promise_reaction_job_task->handler(),
+ Builtins::kAsyncGeneratorYieldResolveClosure)) {
+ // Now peak into the handlers' AwaitContext to get to
+ // the JSGeneratorObject for the async function.
+ Handle<Context> context(
+ JSFunction::cast(promise_reaction_job_task->handler())->context(),
+ this);
+ Handle<JSGeneratorObject> generator_object(
+ JSGeneratorObject::cast(context->extension()), this);
+ if (generator_object->is_executing()) {
+ if (generator_object->IsJSAsyncFunctionObject()) {
+ Handle<JSAsyncFunctionObject> async_function_object =
+ Handle<JSAsyncFunctionObject>::cast(generator_object);
+ Handle<JSPromise> promise(async_function_object->promise(), this);
+ CaptureAsyncStackTrace(this, promise, &builder);
+ } else {
+ Handle<JSAsyncGeneratorObject> async_generator_object =
+ Handle<JSAsyncGeneratorObject>::cast(generator_object);
+ Handle<AsyncGeneratorRequest> async_generator_request(
+ AsyncGeneratorRequest::cast(async_generator_object->queue()),
+ this);
+ Handle<JSPromise> promise(
+ JSPromise::cast(async_generator_request->promise()), this);
+ CaptureAsyncStackTrace(this, promise, &builder);
+ }
+ }
} else {
- // Check if there's a pending async request on the generator object.
- Handle<JSAsyncGeneratorObject> async_generator_object =
- Handle<JSAsyncGeneratorObject>::cast(dot_generator_object);
- if (!async_generator_object->queue()->IsUndefined(this)) {
- // Take the promise from the first async generatot request.
- Handle<AsyncGeneratorRequest> request(
- AsyncGeneratorRequest::cast(async_generator_object->queue()),
- this);
-
- // We can start collecting an async stack trace from the
- // promise on the {request}.
- Handle<JSPromise> promise(JSPromise::cast(request->promise()), this);
+ // The {promise_reaction_job_task} doesn't belong to an await (or
+ // yield inside an async generator), but we might still be able to
+ // find an async frame if we follow along the chain of promises on
+ // the {promise_reaction_job_task}.
+ Handle<HeapObject> promise_or_capability(
+ promise_reaction_job_task->promise_or_capability(), this);
+ if (promise_or_capability->IsJSPromise()) {
+ Handle<JSPromise> promise =
+ Handle<JSPromise>::cast(promise_or_capability);
CaptureAsyncStackTrace(this, promise, &builder);
}
}
- } else {
- DCHECK(IsAsyncFunction(kind));
- Handle<Object> const dot_promise =
- inspector.GetExpression(DeclarationScope::kPromiseVarIndex);
- if (dot_promise->IsJSPromise()) {
- // We can start collecting an async stack trace from .promise here.
- CaptureAsyncStackTrace(this, Handle<JSPromise>::cast(dot_promise),
- &builder);
- } else {
- // If .promise was not yet initialized (i.e. we see a really
- // early exception in the setup of the function), it holds
- // the value undefined. Sanity check here to make sure that
- // we're not peaking into the completely wrong stack slot.
- CHECK(dot_promise->IsUndefined(this));
- }
}
}
@@ -838,11 +942,10 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
- RETURN_ON_EXCEPTION(
- this,
- JSReceiver::SetProperty(this, error_object, key, stack_trace,
- LanguageMode::kStrict),
- JSReceiver);
+ RETURN_ON_EXCEPTION(this,
+ Object::SetProperty(this, error_object, key,
+ stack_trace, LanguageMode::kStrict),
+ JSReceiver);
}
return error_object;
}
@@ -854,11 +957,10 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> stack_trace =
CaptureSimpleStackTrace(error_object, mode, caller);
- RETURN_ON_EXCEPTION(
- this,
- JSReceiver::SetProperty(this, error_object, key, stack_trace,
- LanguageMode::kStrict),
- JSReceiver);
+ RETURN_ON_EXCEPTION(this,
+ Object::SetProperty(this, error_object, key, stack_trace,
+ LanguageMode::kStrict),
+ JSReceiver);
return error_object;
}
@@ -883,7 +985,7 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
DCHECK(!frame->is_builtin());
int position = frame->position();
- Object* maybe_script = frame->function()->shared()->script();
+ Object maybe_script = frame->function()->shared()->script();
if (maybe_script->IsScript()) {
Handle<Script> script(Script::cast(maybe_script), this);
Script::PositionInfo info;
@@ -898,8 +1000,7 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
if (frame->is_interpreted()) {
InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
Address bytecode_start =
- reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
- BytecodeArray::kHeaderSize;
+ iframe->GetBytecodeArray()->GetFirstBytecodeAddress();
return bytecode_start + iframe->GetBytecodeOffset();
}
@@ -1103,14 +1204,14 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
}
DCHECK(receiver->IsAccessCheckNeeded());
- DCHECK(context());
+ DCHECK(!context().is_null());
// Get the data object from access check info.
HandleScope scope(this);
Handle<Object> data;
{ DisallowHeapAllocation no_gc;
- AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
- if (!access_check_info) {
+ AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
+ if (access_check_info.is_null()) {
AllowHeapAllocation doesnt_matter_anymore;
return ScheduleThrow(
*factory()->NewTypeError(MessageTemplate::kNoAccess));
@@ -1138,13 +1239,13 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
DisallowHeapAllocation no_gc;
if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context =
+ Object receiver_context =
JSGlobalProxy::cast(*receiver)->native_context();
if (!receiver_context->IsContext()) return false;
// Get the native context of current top context.
// avoid using Isolate::native_context() because it uses Handle.
- Context* native_context =
+ Context native_context =
accessing_context->global_object()->native_context();
if (receiver_context == native_context) return true;
@@ -1158,9 +1259,9 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
Handle<Object> data;
v8::AccessCheckCallback callback = nullptr;
{ DisallowHeapAllocation no_gc;
- AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
- if (!access_check_info) return false;
- Object* fun_obj = access_check_info->callback();
+ AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
+ if (access_check_info.is_null()) return false;
+ Object fun_obj = access_check_info->callback();
callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
data = handle(access_check_info->data(), this);
}
@@ -1175,8 +1276,7 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
}
}
-
-Object* Isolate::StackOverflow() {
+Object Isolate::StackOverflow() {
if (FLAG_abort_on_stack_or_string_length_overflow) {
FATAL("Aborting on stack overflow");
}
@@ -1186,7 +1286,7 @@ Object* Isolate::StackOverflow() {
Handle<JSFunction> fun = range_error_function();
Handle<Object> msg = factory()->NewStringFromAsciiChecked(
- MessageTemplate::TemplateString(MessageTemplate::kStackOverflow));
+ MessageFormatter::TemplateString(MessageTemplate::kStackOverflow));
Handle<Object> no_caller;
Handle<Object> exception;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1205,12 +1305,10 @@ Object* Isolate::StackOverflow() {
return ReadOnlyRoots(heap()).exception();
}
-
-Object* Isolate::TerminateExecution() {
+Object Isolate::TerminateExecution() {
return Throw(ReadOnlyRoots(this).termination_exception(), nullptr);
}
-
void Isolate::CancelTerminateExecution() {
if (try_catch_handler()) {
try_catch_handler()->has_terminated_ = false;
@@ -1309,7 +1407,7 @@ void ReportBootstrappingException(Handle<Object> exception,
#endif
}
-Object* Isolate::Throw(Object* raw_exception, MessageLocation* location) {
+Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
HandleScope scope(this);
@@ -1417,8 +1515,7 @@ Object* Isolate::Throw(Object* raw_exception, MessageLocation* location) {
return ReadOnlyRoots(heap()).exception();
}
-
-Object* Isolate::ReThrow(Object* exception) {
+Object Isolate::ReThrow(Object exception) {
DCHECK(!has_pending_exception());
// Set the exception being re-thrown.
@@ -1426,11 +1523,10 @@ Object* Isolate::ReThrow(Object* exception) {
return ReadOnlyRoots(heap()).exception();
}
+Object Isolate::UnwindAndFindHandler() {
+ Object exception = pending_exception();
-Object* Isolate::UnwindAndFindHandler() {
- Object* exception = pending_exception();
-
- auto FoundHandler = [&](Context* context, Address instruction_start,
+ auto FoundHandler = [&](Context context, Address instruction_start,
intptr_t handler_offset,
Address constant_pool_address, Address handler_sp,
Address handler_fp) {
@@ -1462,16 +1558,16 @@ Object* Isolate::UnwindAndFindHandler() {
switch (frame->type()) {
case StackFrame::ENTRY:
case StackFrame::CONSTRUCT_ENTRY: {
- // For JSEntryStub frames we always have a handler.
+ // For JSEntry frames we always have a handler.
StackHandler* handler = frame->top_handler();
// Restore the next handler.
- thread_local_top()->handler_ = handler->next()->address();
+ thread_local_top()->handler_ = handler->next_address();
// Gather information from the handler.
- Code* code = frame->LookupCode();
+ Code code = frame->LookupCode();
HandlerTable table(code);
- return FoundHandler(nullptr, code->InstructionStart(),
+ return FoundHandler(Context(), code->InstructionStart(),
table.LookupReturn(0), code->constant_pool(),
handler->address() + StackHandlerConstants::kSize,
0);
@@ -1492,7 +1588,7 @@ Object* Isolate::UnwindAndFindHandler() {
// argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kPointerSize;
+ stack_slots * kSystemPointerSize;
// This is going to be handled by Wasm, so we need to set the TLS flag
// again. It was cleared above assuming the frame would be unwound.
@@ -1501,7 +1597,7 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the frame.
wasm::WasmCode* wasm_code =
wasm_engine()->code_manager()->LookupCode(frame->pc());
- return FoundHandler(nullptr, wasm_code->instruction_start(), offset,
+ return FoundHandler(Context(), wasm_code->instruction_start(), offset,
wasm_code->constant_pool(), return_sp, frame->fp());
}
@@ -1517,10 +1613,10 @@ Object* Isolate::UnwindAndFindHandler() {
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kPointerSize;
+ stack_slots * kSystemPointerSize;
// Gather information from the frame.
- Code* code = frame->LookupCode();
+ Code code = frame->LookupCode();
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
// but do not have a code kind of OPTIMIZED_FUNCTION.
@@ -1533,7 +1629,7 @@ Object* Isolate::UnwindAndFindHandler() {
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(nullptr, code->InstructionStart(), offset,
+ return FoundHandler(Context(), code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1541,7 +1637,7 @@ Object* Isolate::UnwindAndFindHandler() {
// Some stubs are able to handle exceptions.
if (!catchable_by_js) break;
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
- Code* code = stub_frame->LookupCode();
+ Code code = stub_frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
!code->handler_table_offset() || !code->is_turbofanned()) {
break;
@@ -1555,9 +1651,9 @@ Object* Isolate::UnwindAndFindHandler() {
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kPointerSize;
+ stack_slots * kSystemPointerSize;
- return FoundHandler(nullptr, code->InstructionStart(), offset,
+ return FoundHandler(Context(), code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1578,17 +1674,17 @@ Object* Isolate::UnwindAndFindHandler() {
// in between then {frame->sp()} would already be correct.
Address return_sp = frame->fp() -
InterpreterFrameConstants::kFixedFrameSizeFromFp -
- register_slots * kPointerSize;
+ register_slots * kSystemPointerSize;
// Patch the bytecode offset in the interpreted frame to reflect the
// position of the exception handler. The special builtin below will
// take care of continuing to dispatch at that position. Also restore
// the correct context for the handler from the interpreter register.
- Context* context =
+ Context context =
Context::cast(js_frame->ReadInterpreterRegister(context_reg));
js_frame->PatchBytecodeOffset(static_cast<int>(offset));
- Code* code =
+ Code code =
builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
return FoundHandler(context, code->InstructionStart(), 0,
code->constant_pool(), return_sp, frame->fp());
@@ -1622,8 +1718,8 @@ Object* Isolate::UnwindAndFindHandler() {
// Reconstruct the stack pointer from the frame pointer.
Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
- Code* code = js_frame->LookupCode();
- return FoundHandler(nullptr, code->InstructionStart(), 0,
+ Code code = js_frame->LookupCode();
+ return FoundHandler(Context(), code->InstructionStart(), 0,
code->constant_pool(), return_sp, frame->fp());
} break;
@@ -1700,7 +1796,7 @@ Isolate::CatchType ToCatchType(HandlerTable::CatchPrediction prediction) {
Isolate::CatchType Isolate::PredictExceptionCatcher() {
Address external_handler = thread_local_top()->try_catch_handler_address();
- if (IsExternalHandlerOnTop(nullptr)) return CAUGHT_BY_EXTERNAL;
+ if (IsExternalHandlerOnTop(Object())) return CAUGHT_BY_EXTERNAL;
// Search for an exception handler by performing a full walk over the stack.
for (StackFrameIterator iter(this); !iter.done(); iter.Advance()) {
@@ -1709,7 +1805,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
switch (frame->type()) {
case StackFrame::ENTRY:
case StackFrame::CONSTRUCT_ENTRY: {
- Address entry_handler = frame->top_handler()->next()->address();
+ Address entry_handler = frame->top_handler()->next_address();
// The exception has been externally caught if and only if there is an
// external handler which is on top of the top-most JS_ENTRY handler.
if (external_handler != kNullAddress &&
@@ -1758,13 +1854,12 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
return NOT_CAUGHT;
}
-Object* Isolate::ThrowIllegalOperation() {
+Object Isolate::ThrowIllegalOperation() {
if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(ReadOnlyRoots(heap()).illegal_access_string());
}
-
-void Isolate::ScheduleThrow(Object* exception) {
+void Isolate::ScheduleThrow(Object exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
@@ -1776,13 +1871,12 @@ void Isolate::ScheduleThrow(Object* exception) {
}
}
-
void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
DCHECK(handler == try_catch_handler());
DCHECK(handler->HasCaught());
DCHECK(handler->rethrow_);
DCHECK(handler->capture_message_);
- Object* message = reinterpret_cast<Object*>(handler->message_obj_);
+ Object message(reinterpret_cast<Address>(handler->message_obj_));
DCHECK(message->IsJSMessageObject() || message->IsTheHole(this));
thread_local_top()->pending_message_obj_ = message;
}
@@ -1790,25 +1884,33 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK(has_scheduled_exception());
- if (scheduled_exception() == handler->exception_) {
- DCHECK(scheduled_exception() !=
- ReadOnlyRoots(heap()).termination_exception());
+ if (reinterpret_cast<void*>(scheduled_exception().ptr()) ==
+ handler->exception_) {
+ DCHECK_NE(scheduled_exception(),
+ ReadOnlyRoots(heap()).termination_exception());
clear_scheduled_exception();
+ } else {
+ DCHECK_EQ(scheduled_exception(),
+ ReadOnlyRoots(heap()).termination_exception());
+ // Clear termination once we returned from all V8 frames.
+ if (handle_scope_implementer()->CallDepthIsZero()) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_scheduled_exception();
+ }
}
- if (thread_local_top_.pending_message_obj_ == handler->message_obj_) {
+ if (reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr()) ==
+ handler->message_obj_) {
clear_pending_message();
}
}
-
-Object* Isolate::PromoteScheduledException() {
- Object* thrown = scheduled_exception();
+Object Isolate::PromoteScheduledException() {
+ Object thrown = scheduled_exception();
clear_scheduled_exception();
// Re-throw the exception to avoid getting repeated error reporting.
return ReThrow(thrown);
}
-
void Isolate::PrintCurrentStackTrace(FILE* out) {
for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
if (!it.is_javascript()) continue;
@@ -1930,10 +2032,10 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<JSFunction> fun = handle(elements->Function(i), this);
if (!fun->shared()->IsSubjectToDebugging()) continue;
- Object* script = fun->shared()->script();
+ Object script = fun->shared()->script();
if (script->IsScript() &&
!(Script::cast(script)->source()->IsUndefined(this))) {
- AbstractCode* abstract_code = elements->Code(i);
+ AbstractCode abstract_code = elements->Code(i);
const int code_offset = elements->Offset(i)->value();
const int pos = abstract_code->SourcePosition(code_offset);
@@ -1978,8 +2080,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
stack_trace_object);
}
-
-bool Isolate::IsJavaScriptHandlerOnTop(Object* exception) {
+bool Isolate::IsJavaScriptHandlerOnTop(Object exception) {
DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
// For uncatchable exceptions, the JavaScript handler cannot be on top.
@@ -2003,8 +2104,7 @@ bool Isolate::IsJavaScriptHandlerOnTop(Object* exception) {
return (entry_handler < external_handler);
}
-
-bool Isolate::IsExternalHandlerOnTop(Object* exception) {
+bool Isolate::IsExternalHandlerOnTop(Object exception) {
DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
// Get the address of the external handler so we can compare the address to
@@ -2029,10 +2129,10 @@ bool Isolate::IsExternalHandlerOnTop(Object* exception) {
}
void Isolate::ReportPendingMessagesImpl(bool report_externally) {
- Object* exception = pending_exception();
+ Object exception = pending_exception();
// Clear the pending message object early to avoid endless recursion.
- Object* message_obj = thread_local_top_.pending_message_obj_;
+ Object message_obj = thread_local_top_.pending_message_obj_;
clear_pending_message();
// For uncatchable exceptions we do nothing. If needed, the exception and the
@@ -2069,7 +2169,7 @@ void Isolate::ReportPendingMessages() {
// The embedder might run script in response to an exception.
AllowJavascriptExecutionDebugOnly allow_script(this);
- Object* exception = pending_exception();
+ Object exception = pending_exception();
// Try to propagate the exception to an external v8::TryCatch handler. If
// propagation was unsuccessful, then we will get another chance at reporting
@@ -2089,8 +2189,7 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
// Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
Address entry_handler = Isolate::handler(thread_local_top());
DCHECK_NE(entry_handler, kNullAddress);
- entry_handler =
- reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+ entry_handler = StackHandler::FromAddress(entry_handler)->next_address();
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
@@ -2107,8 +2206,7 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
// Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
Address entry_handler = Isolate::handler(thread_local_top());
DCHECK_NE(entry_handler, kNullAddress);
- entry_handler =
- reinterpret_cast<StackHandler*>(entry_handler)->next()->address();
+ entry_handler = StackHandler::FromAddress(entry_handler)->next_address();
return (entry_handler > external_handler);
};
@@ -2129,11 +2227,12 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
thread_local_top_.pending_message_obj_->IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
- handler->exception_ = pending_exception();
+ handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
- handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ handler->message_obj_ =
+ reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr());
return true;
};
@@ -2160,19 +2259,15 @@ MessageLocation Isolate::GetMessageLocation() {
return MessageLocation();
}
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+bool Isolate::OptionalRescheduleException(bool clear_exception) {
DCHECK(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
bool is_termination_exception =
pending_exception() == ReadOnlyRoots(this).termination_exception();
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
-
if (is_termination_exception) {
- if (is_bottom_call) {
+ if (clear_exception) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
@@ -2273,21 +2368,23 @@ bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(current);
Handle<HeapObject> promise_or_capability(
reaction->promise_or_capability(), isolate);
- Handle<JSPromise> promise = Handle<JSPromise>::cast(
- promise_or_capability->IsJSPromise()
- ? promise_or_capability
- : handle(Handle<PromiseCapability>::cast(promise_or_capability)
- ->promise(),
- isolate));
- if (reaction->reject_handler()->IsUndefined(isolate)) {
- if (InternalPromiseHasUserDefinedRejectHandler(isolate, promise)) {
- return true;
- }
- } else {
- Handle<JSReceiver> current_handler(
- JSReceiver::cast(reaction->reject_handler()), isolate);
- if (PromiseHandlerCheck(isolate, current_handler, promise)) {
- return true;
+ if (!promise_or_capability->IsUndefined(isolate)) {
+ Handle<JSPromise> promise = Handle<JSPromise>::cast(
+ promise_or_capability->IsJSPromise()
+ ? promise_or_capability
+ : handle(Handle<PromiseCapability>::cast(promise_or_capability)
+ ->promise(),
+ isolate));
+ if (reaction->reject_handler()->IsUndefined(isolate)) {
+ if (InternalPromiseHasUserDefinedRejectHandler(isolate, promise)) {
+ return true;
+ }
+ } else {
+ Handle<JSReceiver> current_handler(
+ JSReceiver::cast(reaction->reject_handler()), isolate);
+ if (PromiseHandlerCheck(isolate, current_handler, promise)) {
+ return true;
+ }
}
}
current = handle(reaction->next(), isolate);
@@ -2322,7 +2419,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
if (frame->is_java_script()) {
catch_prediction = PredictException(JavaScriptFrame::cast(frame));
} else if (frame->type() == StackFrame::STUB) {
- Code* code = frame->LookupCode();
+ Code code = frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
!code->handler_table_offset() || !code->is_turbofanned()) {
continue;
@@ -2402,15 +2499,13 @@ Handle<Context> Isolate::GetIncumbentContext() {
// if it's newer than the last Context::BackupIncumbentScope entry.
//
// NOTE: This code assumes that the stack grows downward.
- // This code doesn't work with ASAN because ASAN seems allocating stack
- // separated for native C++ code and compiled JS code, and the following
- // comparison doesn't make sense in ASAN.
- // TODO(yukishiino): Make the implementation of BackupIncumbentScope more
- // robust.
- if (!it.done() && (!top_backup_incumbent_scope() ||
- it.frame()->sp() < reinterpret_cast<Address>(
- top_backup_incumbent_scope()))) {
- Context* context = Context::cast(it.frame()->context());
+ Address top_backup_incumbent =
+ top_backup_incumbent_scope()
+ ? top_backup_incumbent_scope()->JSStackComparableAddress()
+ : 0;
+ if (!it.done() &&
+ (!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
+ Context context = Context::cast(it.frame()->context());
return Handle<Context>(context->native_context(), this);
}
@@ -2420,12 +2515,12 @@ Handle<Context> Isolate::GetIncumbentContext() {
*top_backup_incumbent_scope()->backup_incumbent_context_);
}
- // Last candidate: the entered context.
+ // Last candidate: the entered context or microtask context.
// Given that there is no other author function is running, there must be
// no cross-context function running, then the incumbent realm must match
// the entry realm.
v8::Local<v8::Context> entered_context =
- reinterpret_cast<v8::Isolate*>(this)->GetEnteredContext();
+ reinterpret_cast<v8::Isolate*>(this)->GetEnteredOrMicrotaskContext();
return Utils::OpenHandle(*entered_context);
}
@@ -2448,11 +2543,12 @@ char* Isolate::RestoreThread(char* from) {
#ifdef USE_SIMULATOR
thread_local_top()->simulator_ = Simulator::current(this);
#endif
- DCHECK(context() == nullptr || context()->IsContext());
+ DCHECK(context().is_null() || context()->IsContext());
return from + sizeof(ThreadLocalTop);
}
void Isolate::ReleaseSharedPtrs() {
+ base::MutexGuard lock(&managed_ptr_destructors_mutex_);
while (managed_ptr_destructors_head_) {
ManagedPtrDestructor* l = managed_ptr_destructors_head_;
ManagedPtrDestructor* n = nullptr;
@@ -2466,6 +2562,7 @@ void Isolate::ReleaseSharedPtrs() {
}
void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
+ base::MutexGuard lock(&managed_ptr_destructors_mutex_);
DCHECK_NULL(destructor->prev_);
DCHECK_NULL(destructor->next_);
if (managed_ptr_destructors_head_) {
@@ -2476,6 +2573,7 @@ void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
}
void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
+ base::MutexGuard lock(&managed_ptr_destructors_mutex_);
if (destructor->prev_) {
destructor->prev_->next_ = destructor->next_;
} else {
@@ -2621,96 +2719,80 @@ class VerboseAccountingAllocator : public AccountingAllocator {
std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
-Isolate::Isolate()
- : embedder_data_(),
- entry_stack_(nullptr),
- stack_trace_nesting_level_(0),
- incomplete_message_(nullptr),
- bootstrapper_(nullptr),
- runtime_profiler_(nullptr),
- compilation_cache_(nullptr),
- logger_(nullptr),
- load_stub_cache_(nullptr),
- store_stub_cache_(nullptr),
- deoptimizer_data_(nullptr),
- deoptimizer_lazy_throw_(false),
- materialized_object_store_(nullptr),
- capture_stack_trace_for_uncaught_exceptions_(false),
- stack_trace_for_uncaught_exceptions_frame_limit_(0),
- stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- context_slot_cache_(nullptr),
- descriptor_lookup_cache_(nullptr),
- handle_scope_implementer_(nullptr),
- unicode_cache_(nullptr),
+// static
+Isolate* Isolate::New(IsolateAllocationMode mode) {
+ // IsolateAllocator allocates the memory for the Isolate object according to
+ // the given allocation mode.
+ std::unique_ptr<IsolateAllocator> isolate_allocator =
+ base::make_unique<IsolateAllocator>(mode);
+ // Construct Isolate object in the allocated memory.
+ void* isolate_ptr = isolate_allocator->isolate_memory();
+ Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
+#ifdef V8_TARGET_ARCH_64_BIT
+ DCHECK_IMPLIES(
+ mode == IsolateAllocationMode::kInV8Heap,
+ IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
+#endif
+
+#ifdef DEBUG
+ non_disposed_isolates_++;
+#endif // DEBUG
+
+ return isolate;
+}
+
+// static
+void Isolate::Delete(Isolate* isolate) {
+ DCHECK_NOT_NULL(isolate);
+ // Temporarily set this isolate as current so that various parts of
+ // the isolate can access it in their destructors without having a
+ // direct pointer. We don't use Enter/Exit here to avoid
+ // initializing the thread data.
+ PerIsolateThreadData* saved_data = isolate->CurrentPerIsolateThreadData();
+ DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
+ Isolate* saved_isolate = reinterpret_cast<Isolate*>(
+ base::Thread::GetThreadLocal(isolate->isolate_key_));
+ SetIsolateThreadLocals(isolate, nullptr);
+
+ isolate->Deinit();
+
+#ifdef DEBUG
+ non_disposed_isolates_--;
+#endif // DEBUG
+
+ // Take ownership of the IsolateAllocator to ensure the Isolate memory will
+ // be available during Isolate descructor call.
+ std::unique_ptr<IsolateAllocator> isolate_allocator =
+ std::move(isolate->isolate_allocator_);
+ isolate->~Isolate();
+ // Now free the memory owned by the allocator.
+ isolate_allocator.reset();
+
+ // Restore the previous current isolate.
+ SetIsolateThreadLocals(saved_isolate, saved_data);
+}
+
+v8::PageAllocator* Isolate::page_allocator() {
+ return isolate_allocator_->page_allocator();
+}
+
+Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
+ : isolate_allocator_(std::move(isolate_allocator)),
+ id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
+ stack_guard_(this),
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
&heap_, 256 * KB, 128 * KB)
: new AccountingAllocator()),
- inner_pointer_to_code_cache_(nullptr),
- global_handles_(nullptr),
- eternal_handles_(nullptr),
- thread_manager_(nullptr),
builtins_(this),
- setup_delegate_(nullptr),
- regexp_stack_(nullptr),
- date_cache_(nullptr),
- // TODO(bmeurer) Initialized lazily because it depends on flags; can
- // be fixed once the default isolate cleanup is done.
- random_number_generator_(nullptr),
- fuzzer_rng_(nullptr),
rail_mode_(PERFORMANCE_ANIMATION),
- atomics_wait_callback_(nullptr),
- atomics_wait_callback_data_(nullptr),
- promise_hook_(nullptr),
- host_import_module_dynamically_callback_(nullptr),
- host_initialize_import_meta_object_callback_(nullptr),
- load_start_time_ms_(0),
-#ifdef V8_INTL_SUPPORT
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- language_singleton_regexp_matcher_(nullptr),
- language_tag_regexp_matcher_(nullptr),
- language_variant_regexp_matcher_(nullptr),
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- default_locale_(""),
-#endif // V8_INTL_SUPPORT
- serializer_enabled_(false),
- has_fatal_error_(false),
- initialized_from_snapshot_(false),
- is_tail_call_elimination_enabled_(true),
- is_isolate_in_background_(false),
- memory_savings_mode_active_(false),
- heap_profiler_(nullptr),
code_event_dispatcher_(new CodeEventDispatcher()),
- function_entry_hook_(nullptr),
- deferred_handles_head_(nullptr),
- optimizing_compile_dispatcher_(nullptr),
- stress_deopt_count_(0),
- force_slow_path_(false),
- next_optimization_id_(0),
-#if V8_SFI_HAS_UNIQUE_ID
- next_unique_sfi_id_(0),
-#endif
- is_running_microtasks_(false),
- use_counter_callback_(nullptr),
- cancelable_task_manager_(new CancelableTaskManager()),
- abort_on_uncaught_exception_callback_(nullptr),
- total_regexp_code_generated_(0) {
- id_ = base::Relaxed_AtomicIncrement(&isolate_counter_, 1);
+ cancelable_task_manager_(new CancelableTaskManager()) {
TRACE_ISOLATE(constructor);
-
- memset(isolate_addresses_, 0,
- sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
-
- heap_.isolate_ = this;
- stack_guard_.isolate_ = this;
+ CheckIsolateLayout();
// ThreadManager is initialized early to support locking an isolate
// before it is entered.
- thread_manager_ = new ThreadManager();
- thread_manager_->isolate_ = this;
-
-#ifdef DEBUG
- non_disposed_isolates_++;
-#endif // DEBUG
+ thread_manager_ = new ThreadManager(this);
handle_scope_data_.Initialize();
@@ -2727,58 +2809,30 @@ Isolate::Isolate()
InitializeLoggingAndCounters();
debug_ = new Debug(this);
- init_memcopy_functions();
+ InitializeDefaultEmbeddedBlob();
- if (FLAG_embedded_builtins) {
-#ifdef V8_MULTI_SNAPSHOTS
- if (FLAG_untrusted_code_mitigations) {
- SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
- } else {
- SetEmbeddedBlob(TrustedEmbeddedBlob(), TrustedEmbeddedBlobSize());
- }
-#else
- SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
-#endif
- }
+ MicrotaskQueue::SetUpDefaultMicrotaskQueue(this);
}
-
-void Isolate::TearDown() {
- TRACE_ISOLATE(tear_down);
-
- tracing_cpu_profiler_.reset();
- if (FLAG_stress_sampling_allocation_profiler > 0) {
- heap_profiler()->StopSamplingHeapProfiler();
- }
-
- // Temporarily set this isolate as current so that various parts of
- // the isolate can access it in their destructors without having a
- // direct pointer. We don't use Enter/Exit here to avoid
- // initializing the thread data.
- PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
- Isolate* saved_isolate =
- reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
- SetIsolateThreadLocals(this, nullptr);
-
- Deinit();
-
- {
- base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
- thread_data_table_.RemoveAllThreads();
- }
-
-#ifdef DEBUG
- non_disposed_isolates_--;
-#endif // DEBUG
-
- delete this;
-
- // Restore the previous current isolate.
- SetIsolateThreadLocals(saved_isolate, saved_data);
+void Isolate::CheckIsolateLayout() {
+ CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
+ Internals::kIsolateEmbedderDataOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
+ Internals::kIsolateRootsOffset);
+ CHECK_EQ(Internals::kExternalMemoryOffset % 8, 0);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.external_memory_)),
+ Internals::kExternalMemoryOffset);
+ CHECK_EQ(Internals::kExternalMemoryLimitOffset % 8, 0);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, isolate_data_.external_memory_limit_)),
+ Internals::kExternalMemoryLimitOffset);
+ CHECK_EQ(Internals::kExternalMemoryAtLastMarkCompactOffset % 8, 0);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(
+ Isolate, isolate_data_.external_memory_at_last_mark_compact_)),
+ Internals::kExternalMemoryAtLastMarkCompactOffset);
}
-
void Isolate::ClearSerializerData() {
delete external_reference_map_;
external_reference_map_ = nullptr;
@@ -2795,6 +2849,11 @@ bool Isolate::LogObjectRelocation() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
+ tracing_cpu_profiler_.reset();
+ if (FLAG_stress_sampling_allocation_profiler > 0) {
+ heap_profiler()->StopSamplingHeapProfiler();
+ }
+
debug()->Unload();
wasm_engine()->DeleteCompileJobsOnIsolate(this);
@@ -2840,7 +2899,7 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
- compiler_dispatcher_->AbortAll(BlockingBehavior::kBlock);
+ compiler_dispatcher_->AbortAll();
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
@@ -2855,14 +2914,7 @@ void Isolate::Deinit() {
wasm_engine_.reset();
}
- if (FLAG_embedded_builtins) {
- if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
- // We own the embedded blob. Free it.
- uint8_t* data = const_cast<uint8_t*>(embedded_blob_);
- InstructionStream::FreeOffHeapInstructionStream(data,
- embedded_blob_size_);
- }
- }
+ TearDownEmbeddedBlob();
delete interpreter_;
interpreter_ = nullptr;
@@ -2880,6 +2932,11 @@ void Isolate::Deinit() {
compiler_cache_ = nullptr;
ClearSerializerData();
+
+ {
+ base::MutexGuard lock_guard(&thread_data_table_mutex_);
+ thread_data_table_.RemoveAllThreads();
+ }
}
@@ -2905,26 +2962,11 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = nullptr;
-#ifdef V8_INTL_SUPPORT
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- delete language_singleton_regexp_matcher_;
- language_singleton_regexp_matcher_ = nullptr;
-
- delete language_tag_regexp_matcher_;
- language_tag_regexp_matcher_ = nullptr;
-
- delete language_variant_regexp_matcher_;
- language_variant_regexp_matcher_ = nullptr;
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
-#endif // V8_INTL_SUPPORT
-
delete regexp_stack_;
regexp_stack_ = nullptr;
delete descriptor_lookup_cache_;
descriptor_lookup_cache_ = nullptr;
- delete context_slot_cache_;
- context_slot_cache_ = nullptr;
delete load_stub_cache_;
load_stub_cache_ = nullptr;
@@ -2975,6 +3017,12 @@ Isolate::~Isolate() {
delete allocator_;
allocator_ = nullptr;
+
+ // Assert that |default_microtask_queue_| is the last MicrotaskQueue instance.
+ DCHECK_IMPLIES(default_microtask_queue_,
+ default_microtask_queue_ == default_microtask_queue_->next());
+ delete default_microtask_queue_;
+ default_microtask_queue_ = nullptr;
}
void Isolate::InitializeThreadLocal() { thread_local_top_.Initialize(this); }
@@ -2983,11 +3031,12 @@ void Isolate::SetTerminationOnExternalTryCatch() {
if (try_catch_handler() == nullptr) return;
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
- try_catch_handler()->exception_ = ReadOnlyRoots(heap()).null_value();
+ try_catch_handler()->exception_ =
+ reinterpret_cast<void*>(ReadOnlyRoots(heap()).null_value().ptr());
}
bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
- Object* exception = pending_exception();
+ Object exception = pending_exception();
if (IsJavaScriptHandlerOnTop(exception)) {
thread_local_top_.external_caught_exception_ = false;
@@ -3008,11 +3057,12 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
thread_local_top_.pending_message_obj_->IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
- handler->exception_ = pending_exception();
+ handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
- handler->message_obj_ = thread_local_top_.pending_message_obj_;
+ handler->message_obj_ =
+ reinterpret_cast<void*>(thread_local_top_.pending_message_obj_.ptr());
}
return true;
}
@@ -3031,18 +3081,8 @@ void Isolate::InitializeLoggingAndCounters() {
}
namespace {
-void PrintBuiltinSizes(Isolate* isolate) {
- Builtins* builtins = isolate->builtins();
- for (int i = 0; i < Builtins::builtin_count; i++) {
- const char* name = builtins->name(i);
- const char* kind = Builtins::KindNameOf(i);
- Code* code = builtins->builtin(i);
- PrintF(stdout, "%s Builtin, %s, %d\n", kind, name, code->InstructionSize());
- }
-}
void CreateOffHeapTrampolines(Isolate* isolate) {
- DCHECK(isolate->serializer_enabled());
DCHECK_NOT_NULL(isolate->embedded_blob());
DCHECK_NE(0, isolate->embedded_blob_size());
@@ -3051,7 +3091,6 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
EmbeddedData d = EmbeddedData::FromBlob();
- CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
for (int i = 0; i < Builtins::builtin_count; i++) {
if (!Builtins::IsIsolateIndependent(i)) continue;
@@ -3061,8 +3100,7 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
// Note that references to the old, on-heap code objects may still exist on
// the heap. This is fine for the sake of serialization, as serialization
- // will replace all of them with a builtin reference which is later
- // deserialized to point to the object within the builtins table.
+ // will canonicalize all builtins in MaybeCanonicalizeBuiltin().
//
// From this point onwards, some builtin code objects may be unreachable and
// thus collected by the GC.
@@ -3075,37 +3113,79 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
}
}
-void PrintEmbeddedBuiltinCandidates(Isolate* isolate) {
- CHECK(FLAG_print_embedded_builtin_candidates);
- bool found_a_candidate = false;
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (Builtins::IsIsolateIndependent(i)) continue;
- Code* builtin = isolate->heap()->builtin(i);
- if (!builtin->IsIsolateIndependent(isolate)) continue;
- if (!found_a_candidate) PrintF("Found embedded builtin candidates:\n");
- found_a_candidate = true;
- PrintF(" %s\n", Builtins::name(i));
+} // namespace
+
+void Isolate::InitializeDefaultEmbeddedBlob() {
+ const uint8_t* blob = DefaultEmbeddedBlob();
+ uint32_t size = DefaultEmbeddedBlobSize();
+
+#ifdef V8_MULTI_SNAPSHOTS
+ if (!FLAG_untrusted_code_mitigations) {
+ blob = TrustedEmbeddedBlob();
+ size = TrustedEmbeddedBlobSize();
+ }
+#endif
+
+ if (StickyEmbeddedBlob() != nullptr) {
+ base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
+ // Check again now that we hold the lock.
+ if (StickyEmbeddedBlob() != nullptr) {
+ blob = StickyEmbeddedBlob();
+ size = StickyEmbeddedBlobSize();
+ current_embedded_blob_refs_++;
+ }
+ }
+
+ if (blob == nullptr) {
+ CHECK_EQ(0, size);
+ } else {
+ SetEmbeddedBlob(blob, size);
}
}
-} // namespace
-void Isolate::PrepareEmbeddedBlobForSerialization() {
- // When preparing the embedded blob, ensure it doesn't exist yet.
- DCHECK_NULL(embedded_blob());
- DCHECK_NULL(DefaultEmbeddedBlob());
- DCHECK(serializer_enabled());
-
- // The isolate takes ownership of this pointer into an executable mmap'd
- // area. We muck around with const-casts because the standard use-case in
- // shipping builds is for embedded_blob_ to point into a read-only
- // .text-embedded section.
- uint8_t* data;
- uint32_t size;
- InstructionStream::CreateOffHeapInstructionStream(this, &data, &size);
- SetEmbeddedBlob(const_cast<const uint8_t*>(data), size);
+void Isolate::CreateAndSetEmbeddedBlob() {
+ base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
+
+ PrepareBuiltinSourcePositionMap();
+
+ // If a sticky blob has been set, we reuse it.
+ if (StickyEmbeddedBlob() != nullptr) {
+ CHECK_EQ(embedded_blob(), StickyEmbeddedBlob());
+ CHECK_EQ(CurrentEmbeddedBlob(), StickyEmbeddedBlob());
+ } else {
+ // Create and set a new embedded blob.
+ uint8_t* data;
+ uint32_t size;
+ InstructionStream::CreateOffHeapInstructionStream(this, &data, &size);
+
+ CHECK_EQ(0, current_embedded_blob_refs_);
+ const uint8_t* const_data = const_cast<const uint8_t*>(data);
+ SetEmbeddedBlob(const_data, size);
+ current_embedded_blob_refs_++;
+
+ SetStickyEmbeddedBlob(const_data, size);
+ }
+
CreateOffHeapTrampolines(this);
}
+void Isolate::TearDownEmbeddedBlob() {
+ // Nothing to do in case the blob is embedded into the binary or unset.
+ if (StickyEmbeddedBlob() == nullptr) return;
+
+ CHECK_EQ(embedded_blob(), StickyEmbeddedBlob());
+ CHECK_EQ(CurrentEmbeddedBlob(), StickyEmbeddedBlob());
+
+ base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
+ current_embedded_blob_refs_--;
+ if (current_embedded_blob_refs_ == 0 && enable_embedded_blob_refcounting_) {
+ // We own the embedded blob and are the last holder. Free it.
+ InstructionStream::FreeOffHeapInstructionStream(
+ const_cast<uint8_t*>(embedded_blob()), embedded_blob_size());
+ ClearEmbeddedBlob();
+ }
+}
+
bool Isolate::Init(StartupDeserializer* des) {
TRACE_ISOLATE(init);
@@ -3119,14 +3199,6 @@ bool Isolate::Init(StartupDeserializer* des) {
has_fatal_error_ = false;
- if (function_entry_hook() != nullptr) {
- // When function entry hooking is in effect, we have to create the code
- // stubs from scratch to get entry hooks, rather than loading the previously
- // generated stubs from disk.
- // If this assert fires, the initialization path has regressed.
- DCHECK_NULL(des);
- }
-
// The initialization process does not handle memory exhaustion.
AlwaysAllocateScope always_allocate(this);
@@ -3140,7 +3212,6 @@ bool Isolate::Init(StartupDeserializer* des) {
#undef ASSIGN_ELEMENT
compilation_cache_ = new CompilationCache(this);
- context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
@@ -3175,6 +3246,8 @@ bool Isolate::Init(StartupDeserializer* des) {
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp();
+ isolate_data_.external_reference_table()->Init(this);
+
// Setup the wasm engine.
if (wasm_engine_ == nullptr) {
SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
@@ -3206,22 +3279,37 @@ bool Isolate::Init(StartupDeserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
- if (FLAG_embedded_builtins) {
- if (create_heap_objects && serializer_enabled()) {
- builtins_constants_table_builder_ =
- new BuiltinsConstantsTableBuilder(this);
- }
+ if (FLAG_embedded_builtins && create_heap_objects) {
+ builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
}
setup_delegate_->SetupBuiltins(this);
- if (FLAG_embedded_builtins) {
- if (create_heap_objects && serializer_enabled()) {
- builtins_constants_table_builder_->Finalize();
- delete builtins_constants_table_builder_;
- builtins_constants_table_builder_ = nullptr;
- }
+#ifndef V8_TARGET_ARCH_ARM
+ if (create_heap_objects) {
+ // Create a copy of the the interpreter entry trampoline and store it
+ // on the root list. It is used as a template for further copies that
+ // may later be created to help profile interpreted code.
+ // We currently cannot do this on arm due to RELATIVE_CODE_TARGETs
+ // assuming that all possible Code targets may be addressed with an int24
+ // offset, effectively limiting code space size to 32MB. We can guarantee
+ // this at mksnapshot-time, but not at runtime.
+ // See also: https://crbug.com/v8/8713.
+ HandleScope handle_scope(this);
+ Handle<Code> code =
+ factory()->CopyCode(BUILTIN_CODE(this, InterpreterEntryTrampoline));
+ heap_.SetInterpreterEntryTrampolineForProfiling(*code);
+ }
+#endif
+ if (FLAG_embedded_builtins && create_heap_objects) {
+ builtins_constants_table_builder_->Finalize();
+ delete builtins_constants_table_builder_;
+ builtins_constants_table_builder_ = nullptr;
+
+ CreateAndSetEmbeddedBlob();
}
- if (create_heap_objects) heap_.CreateFixedStubs();
+ // Initialize custom memcopy and memmove functions (must happen after
+ // embedded blob setup).
+ init_memcopy_functions();
if (FLAG_log_internal_timer_events) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
@@ -3245,16 +3333,17 @@ bool Isolate::Init(StartupDeserializer* des) {
if (!create_heap_objects) des->DeserializeInto(this);
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
- interpreter_->InitializeDispatchTable();
+ interpreter_->Initialize();
heap_.NotifyDeserializationComplete();
}
delete setup_delegate_;
setup_delegate_ = nullptr;
- if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
- if (FLAG_print_embedded_builtin_candidates) {
- PrintEmbeddedBuiltinCandidates(this);
- }
+ // Initialize the builtin entry table.
+ Builtins::UpdateBuiltinEntryTable(this);
+
+ if (FLAG_print_builtin_code) builtins()->PrintBuiltinCode();
+ if (FLAG_print_builtin_size) builtins()->PrintBuiltinSize();
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
@@ -3274,24 +3363,6 @@ bool Isolate::Init(StartupDeserializer* des) {
std::ofstream(GetTurboCfgFileName(this).c_str(), std::ios_base::trunc);
}
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
- Internals::kIsolateEmbedderDataOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
- Internals::kIsolateRootsOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_)),
- Internals::kExternalMemoryOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_limit_)),
- Internals::kExternalMemoryLimitOffset);
- CHECK_EQ(static_cast<int>(
- OFFSET_OF(Isolate, heap_.external_memory_at_last_mark_compact_)),
- Internals::kExternalMemoryAtLastMarkCompactOffset);
- CHECK_EQ(
- static_cast<int>(OFFSET_OF(Isolate, heap_.external_reference_table_)),
- Internals::kIsolateRootsOffset +
- Heap::kRootsExternalReferenceTableOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.builtins_)),
- Internals::kIsolateRootsOffset + Heap::kRootsBuiltinsOffset);
-
{
HandleScope scope(this);
ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());
@@ -3471,7 +3542,7 @@ bool Isolate::NeedsSourcePositionsForProfiling() const {
debug_->is_active() || logger_->is_logging() || FLAG_trace_maps;
}
-void Isolate::SetFeedbackVectorsForProfilingTools(Object* value) {
+void Isolate::SetFeedbackVectorsForProfilingTools(Object value) {
DCHECK(value->IsUndefined(this) || value->IsArrayList());
heap()->set_feedback_vectors_for_profiling_tools(value);
}
@@ -3488,11 +3559,12 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
{
HeapIterator heap_iterator(heap());
- while (HeapObject* current_obj = heap_iterator.next()) {
+ for (HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
+ current_obj = heap_iterator.next()) {
if (!current_obj->IsFeedbackVector()) continue;
- FeedbackVector* vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo* shared = vector->shared_function_info();
+ FeedbackVector vector = FeedbackVector::cast(current_obj);
+ SharedFunctionInfo shared = vector->shared_function_info();
// No need to preserve the feedback vector for non-user-visible functions.
if (!shared->IsSubjectToDebugging()) continue;
@@ -3508,10 +3580,17 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
SetFeedbackVectorsForProfilingTools(*list);
}
-bool Isolate::IsArrayOrObjectOrStringPrototype(Object* object) {
- Object* context = heap()->native_contexts_list();
+void Isolate::set_date_cache(DateCache* date_cache) {
+ if (date_cache != date_cache_) {
+ delete date_cache_;
+ }
+ date_cache_ = date_cache;
+}
+
+bool Isolate::IsArrayOrObjectOrStringPrototype(Object object) {
+ Object context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
- Context* current_context = Context::cast(context);
+ Context current_context = Context::cast(context);
if (current_context->initial_object_prototype() == object ||
current_context->initial_array_prototype() == object ||
current_context->initial_string_prototype() == object) {
@@ -3522,11 +3601,11 @@ bool Isolate::IsArrayOrObjectOrStringPrototype(Object* object) {
return false;
}
-bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
+bool Isolate::IsInAnyContext(Object object, uint32_t index) {
DisallowHeapAllocation no_gc;
- Object* context = heap()->native_contexts_list();
+ Object context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
- Context* current_context = Context::cast(context);
+ Context current_context = Context::cast(context);
if (current_context->get(index) == object) {
return true;
}
@@ -3535,26 +3614,25 @@ bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
return false;
}
-bool Isolate::IsNoElementsProtectorIntact(Context* context) {
- PropertyCell* no_elements_cell = heap()->no_elements_protector();
+bool Isolate::IsNoElementsProtectorIntact(Context context) {
+ PropertyCell no_elements_cell = heap()->no_elements_protector();
bool cell_reports_intact =
no_elements_cell->value()->IsSmi() &&
Smi::ToInt(no_elements_cell->value()) == kProtectorValid;
#ifdef DEBUG
- Context* native_context = context->native_context();
+ Context native_context = context->native_context();
- Map* root_array_map =
+ Map root_array_map =
native_context->GetInitialJSArrayMap(GetInitialFastElementsKind());
- JSObject* initial_array_proto = JSObject::cast(
+ JSObject initial_array_proto = JSObject::cast(
native_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- JSObject* initial_object_proto = JSObject::cast(
+ JSObject initial_object_proto = JSObject::cast(
native_context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
- JSObject* initial_string_proto = JSObject::cast(
+ JSObject initial_string_proto = JSObject::cast(
native_context->get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
- if (root_array_map == nullptr ||
- initial_array_proto == initial_object_proto) {
+ if (root_array_map.is_null() || initial_array_proto == initial_object_proto) {
// We are in the bootstrapping process, and the entire check sequence
// shouldn't be performed.
return cell_reports_intact;
@@ -3566,7 +3644,7 @@ bool Isolate::IsNoElementsProtectorIntact(Context* context) {
return cell_reports_intact;
}
- FixedArrayBase* elements = initial_array_proto->elements();
+ FixedArrayBase elements = initial_array_proto->elements();
ReadOnlyRoots roots(heap());
if (elements != roots.empty_fixed_array() &&
elements != roots.empty_slow_element_dictionary()) {
@@ -3622,13 +3700,13 @@ bool Isolate::IsNoElementsProtectorIntact() {
}
bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
- Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
+ Cell is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
bool is_is_concat_spreadable_set =
Smi::ToInt(is_concat_spreadable_cell->value()) == kProtectorInvalid;
#ifdef DEBUG
- Map* root_array_map =
+ Map root_array_map =
raw_native_context()->GetInitialJSArrayMap(GetInitialFastElementsKind());
- if (root_array_map == nullptr) {
+ if (root_array_map.is_null()) {
// Ignore the value of is_concat_spreadable during bootstrap.
return !is_is_concat_spreadable_set;
}
@@ -3648,29 +3726,31 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
return !is_is_concat_spreadable_set;
}
-bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver) {
+bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver) {
if (!IsIsConcatSpreadableLookupChainIntact()) return false;
return !receiver->HasProxyInPrototype(this);
}
bool Isolate::IsPromiseHookProtectorIntact() {
- PropertyCell* promise_hook_cell = heap()->promise_hook_protector();
+ PropertyCell promise_hook_cell = heap()->promise_hook_protector();
bool is_promise_hook_protector_intact =
Smi::ToInt(promise_hook_cell->value()) == kProtectorValid;
DCHECK_IMPLIES(is_promise_hook_protector_intact,
!promise_hook_or_async_event_delegate_);
+ DCHECK_IMPLIES(is_promise_hook_protector_intact,
+ !promise_hook_or_debug_is_active_or_async_event_delegate_);
return is_promise_hook_protector_intact;
}
bool Isolate::IsPromiseResolveLookupChainIntact() {
- Cell* promise_resolve_cell = heap()->promise_resolve_protector();
+ Cell promise_resolve_cell = heap()->promise_resolve_protector();
bool is_promise_resolve_protector_intact =
Smi::ToInt(promise_resolve_cell->value()) == kProtectorValid;
return is_promise_resolve_protector_intact;
}
bool Isolate::IsPromiseThenLookupChainIntact() {
- PropertyCell* promise_then_cell = heap()->promise_then_protector();
+ PropertyCell promise_then_cell = heap()->promise_then_protector();
bool is_promise_then_protector_intact =
Smi::ToInt(promise_then_cell->value()) == kProtectorValid;
return is_promise_then_protector_intact;
@@ -3730,6 +3810,15 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
+void Isolate::InvalidateRegExpSpeciesProtector() {
+ DCHECK(factory()->regexp_species_protector()->value()->IsSmi());
+ DCHECK(IsRegExpSpeciesLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->regexp_species_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsRegExpSpeciesLookupChainIntact());
+}
+
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value()->IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
@@ -3756,6 +3845,24 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(!IsArrayIteratorLookupChainIntact());
}
+void Isolate::InvalidateMapIteratorProtector() {
+ DCHECK(factory()->map_iterator_protector()->value()->IsSmi());
+ DCHECK(IsMapIteratorLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->map_iterator_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsMapIteratorLookupChainIntact());
+}
+
+void Isolate::InvalidateSetIteratorProtector() {
+ DCHECK(factory()->set_iterator_protector()->value()->IsSmi());
+ DCHECK(IsSetIteratorLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ this, factory()->set_iterator_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsSetIteratorLookupChainIntact());
+}
+
void Isolate::InvalidateStringIteratorProtector() {
DCHECK(factory()->string_iterator_protector()->value()->IsSmi());
DCHECK(IsStringIteratorLookupChainIntact());
@@ -3765,13 +3872,13 @@ void Isolate::InvalidateStringIteratorProtector() {
DCHECK(!IsStringIteratorLookupChainIntact());
}
-void Isolate::InvalidateArrayBufferNeuteringProtector() {
- DCHECK(factory()->array_buffer_neutering_protector()->value()->IsSmi());
- DCHECK(IsArrayBufferNeuteringIntact());
+void Isolate::InvalidateArrayBufferDetachingProtector() {
+ DCHECK(factory()->array_buffer_detaching_protector()->value()->IsSmi());
+ DCHECK(IsArrayBufferDetachingIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_buffer_neutering_protector(),
+ this, factory()->array_buffer_detaching_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArrayBufferNeuteringIntact());
+ DCHECK(!IsArrayBufferDetachingIntact());
}
void Isolate::InvalidatePromiseHookProtector() {
@@ -3818,6 +3925,8 @@ static base::RandomNumberGenerator* ensure_rng_exists(
}
base::RandomNumberGenerator* Isolate::random_number_generator() {
+ // TODO(bmeurer) Initialized lazily because it depends on flags; can
+ // be fixed once the default isolate cleanup is done.
return ensure_rng_exists(&random_number_generator_, FLAG_random_seed);
}
@@ -3843,7 +3952,7 @@ int Isolate::GenerateIdentityHash(uint32_t mask) {
return hash != 0 ? hash : 1;
}
-Code* Isolate::FindCodeObject(Address a) {
+Code Isolate::FindCodeObject(Address a) {
return heap()->GcSafeFindCodeForInnerPointer(a);
}
@@ -3860,7 +3969,7 @@ Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
Handle<String> name, bool private_symbol) {
Handle<String> key = factory()->InternalizeString(name);
Handle<NameDictionary> dictionary =
- Handle<NameDictionary>::cast(heap()->root_handle(dictionary_index));
+ Handle<NameDictionary>::cast(root_handle(dictionary_index));
int entry = dictionary->FindEntry(this, key);
Handle<Symbol> symbol;
if (entry == NameDictionary::kNotFound) {
@@ -3918,32 +4027,24 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
call_completed_callbacks_.erase(pos);
}
-void Isolate::AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- auto pos = std::find(microtasks_completed_callbacks_.begin(),
- microtasks_completed_callbacks_.end(), callback);
- if (pos != microtasks_completed_callbacks_.end()) return;
- microtasks_completed_callbacks_.push_back(callback);
-}
-
-void Isolate::RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallback callback) {
- auto pos = std::find(microtasks_completed_callbacks_.begin(),
- microtasks_completed_callbacks_.end(), callback);
- if (pos == microtasks_completed_callbacks_.end()) return;
- microtasks_completed_callbacks_.erase(pos);
-}
-
void Isolate::FireCallCompletedCallback() {
if (!handle_scope_implementer()->CallDepthIsZero()) return;
bool run_microtasks =
- heap()->default_microtask_queue()->pending_microtask_count() &&
- !handle_scope_implementer()->HasMicrotasksSuppressions() &&
+ default_microtask_queue()->size() &&
+ !default_microtask_queue()->HasMicrotasksSuppressions() &&
handle_scope_implementer()->microtasks_policy() ==
v8::MicrotasksPolicy::kAuto;
- if (run_microtasks) RunMicrotasks();
+ if (run_microtasks) {
+ default_microtask_queue()->RunMicrotasks(this);
+ } else {
+ // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
+ // set is still open (whether to clear it after every microtask or once
+ // during a microtask checkpoint). See also
+ // https://github.com/tc39/proposal-weakrefs/issues/39 .
+ heap()->ClearKeepDuringJobSet();
+ }
if (call_completed_callbacks_.empty()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
@@ -3956,12 +4057,18 @@ void Isolate::FireCallCompletedCallback() {
}
void Isolate::PromiseHookStateUpdated() {
- bool is_active = promise_hook_ || async_event_delegate_;
- if (is_active && IsPromiseHookProtectorIntact()) {
+ bool promise_hook_or_async_event_delegate =
+ promise_hook_ || async_event_delegate_;
+ bool promise_hook_or_debug_is_active_or_async_event_delegate =
+ promise_hook_or_async_event_delegate || debug()->is_active();
+ if (promise_hook_or_debug_is_active_or_async_event_delegate &&
+ IsPromiseHookProtectorIntact()) {
HandleScope scope(this);
InvalidatePromiseHookProtector();
}
- promise_hook_or_async_event_delegate_ = is_active;
+ promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
+ promise_hook_or_debug_is_active_or_async_event_delegate_ =
+ promise_hook_or_debug_is_active_or_async_event_delegate;
}
namespace {
@@ -4043,20 +4150,47 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
}
MaybeHandle<Object> Isolate::RunPrepareStackTraceCallback(
- Handle<Context> context, Handle<JSObject> error) {
+ Handle<Context> context, Handle<JSObject> error, Handle<JSArray> sites) {
v8::Local<v8::Context> api_context = Utils::ToLocal(context);
- v8::Local<StackTrace> trace =
- Utils::StackTraceToLocal(GetDetailedStackTrace(error));
-
v8::Local<v8::Value> stack;
ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
this, stack,
- prepare_stack_trace_callback_(api_context, Utils::ToLocal(error), trace),
+ prepare_stack_trace_callback_(api_context, Utils::ToLocal(error),
+ Utils::ToLocal(sites)),
MaybeHandle<Object>());
return Utils::OpenHandle(*stack);
}
+int Isolate::LookupOrAddExternallyCompiledFilename(const char* filename) {
+ if (embedded_file_writer_ != nullptr) {
+ return embedded_file_writer_->LookupOrAddExternallyCompiledFilename(
+ filename);
+ }
+ return 0;
+}
+
+const char* Isolate::GetExternallyCompiledFilename(int index) const {
+ if (embedded_file_writer_ != nullptr) {
+ return embedded_file_writer_->GetExternallyCompiledFilename(index);
+ }
+ return "";
+}
+
+int Isolate::GetExternallyCompiledFilenameCount() const {
+ if (embedded_file_writer_ != nullptr) {
+ return embedded_file_writer_->GetExternallyCompiledFilenameCount();
+ }
+ return 0;
+}
+
+void Isolate::PrepareBuiltinSourcePositionMap() {
+ if (embedded_file_writer_ != nullptr) {
+ return embedded_file_writer_->PrepareBuiltinSourcePositionMap(
+ this->builtins());
+ }
+}
+
void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
prepare_stack_trace_callback_ = callback;
}
@@ -4073,7 +4207,7 @@ void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
void Isolate::RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
Handle<JSArrayBuffer> array_buffer,
- size_t offset_in_bytes, int32_t value,
+ size_t offset_in_bytes, int64_t value,
double timeout_in_ms,
AtomicsWaitWakeHandle* stop_handle) {
DCHECK(array_buffer->is_shared());
@@ -4180,43 +4314,6 @@ void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
-void Isolate::EnqueueMicrotask(Handle<Microtask> microtask) {
- Handle<MicrotaskQueue> microtask_queue(heap()->default_microtask_queue(),
- this);
- MicrotaskQueue::EnqueueMicrotask(this, microtask_queue, microtask);
-}
-
-
-void Isolate::RunMicrotasks() {
- // Increase call depth to prevent recursive callbacks.
- v8::Isolate::SuppressMicrotaskExecutionScope suppress(
- reinterpret_cast<v8::Isolate*>(this));
- HandleScope scope(this);
- Handle<MicrotaskQueue> microtask_queue(heap()->default_microtask_queue(),
- this);
- if (microtask_queue->pending_microtask_count()) {
- is_running_microtasks_ = true;
- TRACE_EVENT0("v8.execute", "RunMicrotasks");
- TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
-
- MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
- this, Execution::MessageHandling::kReport, &maybe_exception);
- // If execution is terminating, bail out, clean up, and propagate to
- // TryCatch scope.
- if (maybe_result.is_null() && maybe_exception.is_null()) {
- microtask_queue->set_queue(ReadOnlyRoots(heap()).empty_fixed_array());
- microtask_queue->set_pending_microtask_count(0);
- handle_scope_implementer()->LeaveMicrotaskContext();
- SetTerminationOnExternalTryCatch();
- }
- CHECK_EQ(0, microtask_queue->pending_microtask_count());
- CHECK_EQ(0, microtask_queue->queue()->length());
- is_running_microtasks_ = false;
- }
- FireMicrotasksCompletedCallback();
-}
-
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
DCHECK(!use_counter_callback_);
use_counter_callback_ = callback;
@@ -4273,8 +4370,8 @@ void Isolate::CheckDetachedContextsAfterGC() {
if (length == 0) return;
int new_length = 0;
for (int i = 0; i < length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->cast<Smi>());
- MaybeObject* context = detached_contexts->Get(i + 1);
+ int mark_sweeps = detached_contexts->Get(i).ToSmi().value();
+ MaybeObject context = detached_contexts->Get(i + 1);
DCHECK(context->IsWeakOrCleared());
if (!context->IsCleared()) {
detached_contexts->Set(
@@ -4285,7 +4382,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
detached_contexts->set_length(new_length);
while (new_length < length) {
- detached_contexts->Set(new_length, MaybeObject::FromSmi(Smi::kZero));
+ detached_contexts->Set(new_length, MaybeObject::FromSmi(Smi::zero()));
++new_length;
}
@@ -4293,26 +4390,26 @@ void Isolate::CheckDetachedContextsAfterGC() {
PrintF("%d detached contexts are collected out of %d\n",
length - new_length, length);
for (int i = 0; i < new_length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->cast<Smi>());
- MaybeObject* context = detached_contexts->Get(i + 1);
+ int mark_sweeps = detached_contexts->Get(i).ToSmi().value();
+ MaybeObject context = detached_contexts->Get(i + 1);
DCHECK(context->IsWeakOrCleared());
if (mark_sweeps > 3) {
PrintF("detached context %p\n survived %d GCs (leak?)\n",
- static_cast<void*>(context), mark_sweeps);
+ reinterpret_cast<void*>(context.ptr()), mark_sweeps);
}
}
}
}
double Isolate::LoadStartTimeMs() {
- base::LockGuard<base::Mutex> guard(&rail_mutex_);
+ base::MutexGuard guard(&rail_mutex_);
return load_start_time_ms_;
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
RAILMode old_rail_mode = rail_mode_.Value();
if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
- base::LockGuard<base::Mutex> guard(&rail_mutex_);
+ base::MutexGuard guard(&rail_mutex_);
load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
}
rail_mode_.SetValue(rail_mode);
@@ -4355,6 +4452,21 @@ void Isolate::SetIdle(bool is_idle) {
}
}
+#ifdef V8_INTL_SUPPORT
+icu::UObject* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
+ return icu_object_cache_[cache_type].get();
+}
+
+void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
+ std::shared_ptr<icu::UObject> obj) {
+ icu_object_cache_[cache_type] = obj;
+}
+
+void Isolate::clear_cached_icu_object(ICUObjectCacheType cache_type) {
+ icu_object_cache_.erase(cache_type);
+}
+#endif // V8_INTL_SUPPORT
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
@@ -4368,7 +4480,7 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
SaveContext::SaveContext(Isolate* isolate)
: isolate_(isolate), prev_(isolate->save_context()) {
- if (isolate->context() != nullptr) {
+ if (!isolate->context().is_null()) {
context_ = Handle<Context>(isolate->context(), isolate);
}
isolate->set_save_context(this);
@@ -4377,7 +4489,7 @@ SaveContext::SaveContext(Isolate* isolate)
}
SaveContext::~SaveContext() {
- isolate_->set_context(context_.is_null() ? nullptr : *context_);
+ isolate_->set_context(context_.is_null() ? Context() : *context_);
isolate_->set_save_context(prev_);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index c25f143cf8..3bf6f3aa85 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -6,6 +6,7 @@
#define V8_ISOLATE_H_
#include <cstddef>
+#include <functional>
#include <memory>
#include <queue>
#include <unordered_map>
@@ -19,7 +20,6 @@
#include "src/base/macros.h"
#include "src/builtins/builtins.h"
#include "src/contexts.h"
-#include "src/date.h"
#include "src/debug/debug-interface.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
@@ -27,19 +27,19 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
+#include "src/isolate-allocator.h"
+#include "src/isolate-data.h"
#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
+#include "src/thread-id.h"
#include "src/unicode.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uversion.h" // Define U_ICU_NAMESPACE.
-// 'icu' does not work. Use U_ICU_NAMESPACE.
namespace U_ICU_NAMESPACE {
-
-class RegexMatcher;
-
+class UObject;
} // namespace U_ICU_NAMESPACE
#endif // V8_INTL_SUPPORT
@@ -74,6 +74,7 @@ class Counters;
class Debug;
class DeoptimizerData;
class DescriptorLookupCache;
+class EmbeddedFileWriterInterface;
class EternalHandles;
class ExternalCallbackScope;
class HandleScopeImplementer;
@@ -83,6 +84,7 @@ class InnerPointerToCodeCache;
class Logger;
class MaterializedObjectStore;
class Microtask;
+class MicrotaskQueue;
class OptimizingCompileDispatcher;
class PromiseOnStack;
class RegExpStack;
@@ -160,7 +162,7 @@ class WasmEngine;
} while (false)
/**
- * RETURN_RESULT_OR_FAILURE is used in functions with return type Object* (such
+ * RETURN_RESULT_OR_FAILURE is used in functions with return type Object (such
* as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
* the contents of a MaybeHandle<X>, or the "exception" sentinel value.
* Example usage:
@@ -253,7 +255,7 @@ class WasmEngine;
*
* If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
* instead.
- * If inside a function with return type Object*, use
+ * If inside a function with return type Object, use
* RETURN_FAILURE_ON_EXCEPTION instead.
*/
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
@@ -267,7 +269,7 @@ class WasmEngine;
/**
* RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
* the given MaybeHandle is empty; so it can only be used in functions with
- * return type Object*, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
+ * return type Object, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
* {...}. Example usage:
*
* RUNTIME_FUNCTION(Runtime_Func) {
@@ -306,7 +308,7 @@ class WasmEngine;
* ...
* }
*
- * If inside a function with return type Object*, use
+ * If inside a function with return type Object, use
* RETURN_FAILURE_ON_EXCEPTION instead.
* If inside a function with return type
* Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
@@ -330,63 +332,15 @@ class WasmEngine;
} \
} while (false)
-// Platform-independent, reliable thread identifier.
-class ThreadId {
- public:
- // Creates an invalid ThreadId.
- ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
-
- ThreadId& operator=(const ThreadId& other) {
- base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
- return *this;
- }
-
- bool operator==(const ThreadId& other) const { return Equals(other); }
-
- // Returns ThreadId for current thread.
- static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
-
- // Returns invalid ThreadId (guaranteed not to be equal to any thread).
- static ThreadId Invalid() { return ThreadId(kInvalidId); }
-
- // Compares ThreadIds for equality.
- V8_INLINE bool Equals(const ThreadId& other) const {
- return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
- }
-
- // Checks whether this ThreadId refers to any thread.
- V8_INLINE bool IsValid() const {
- return base::Relaxed_Load(&id_) != kInvalidId;
- }
-
- // Converts ThreadId to an integer representation
- // (required for public API: V8::V8::GetCurrentThreadId).
- int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
-
- // Converts ThreadId to an integer representation
- // (required for public API: V8::V8::TerminateExecution).
- static ThreadId FromInteger(int id) { return ThreadId(id); }
-
- private:
- static const int kInvalidId = -1;
-
- explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
-
- static int AllocateThreadId();
-
- V8_EXPORT_PRIVATE static int GetCurrentThreadId();
-
- base::Atomic32 id_;
-
- static base::Atomic32 highest_thread_id_;
-
- friend class Isolate;
-};
-
#define FIELD_ACCESSOR(type, name) \
inline void set_##name(type v) { name##_ = v; } \
inline type name() const { return name##_; }
+// Controls for manual embedded blob lifecycle management, used by tests and
+// mksnapshot.
+V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
+V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
+
class ThreadLocalTop {
public:
// Does early low-level initialization that does not depend on the
@@ -423,12 +377,17 @@ class ThreadLocalTop {
Isolate* isolate_ = nullptr;
// The context where the current execution method is created and for variable
// lookups.
- Context* context_ = nullptr;
+ // TODO(3770): This field is read/written from generated code, so it would
+ // be cleaner to make it an "Address raw_context_", and construct a Context
+ // object in the getter. Same for {pending_handler_context_} below. In the
+ // meantime, assert that the memory layout is the same.
+ STATIC_ASSERT(sizeof(Context) == kSystemPointerSize);
+ Context context_;
ThreadId thread_id_ = ThreadId::Invalid();
- Object* pending_exception_ = nullptr;
+ Object pending_exception_;
// Communication channel between Isolate::FindHandler and the CEntry.
- Context* pending_handler_context_ = nullptr;
+ Context pending_handler_context_;
Address pending_handler_entrypoint_ = kNullAddress;
Address pending_handler_constant_pool_ = kNullAddress;
Address pending_handler_fp_ = kNullAddress;
@@ -436,12 +395,12 @@ class ThreadLocalTop {
// Communication channel between Isolate::Throw and message consumers.
bool rethrowing_message_ = false;
- Object* pending_message_obj_ = nullptr;
+ Object pending_message_obj_;
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
- Object* scheduled_exception_ = nullptr;
+ Object scheduled_exception_;
bool external_caught_exception_ = false;
SaveContext* save_context_ = nullptr;
@@ -498,7 +457,7 @@ class ThreadLocalTop {
V(int, suffix_table, (kBMMaxShift + 1)) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef std::vector<HeapObject*> DebugObjectCache;
+typedef std::vector<HeapObject> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* Assembler state. */ \
@@ -515,10 +474,11 @@ typedef std::vector<HeapObject*> DebugObjectCache;
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
- V(Object*, string_stream_current_security_token, nullptr) \
+ V(Object, string_stream_current_security_token, Object()) \
V(const intptr_t*, api_external_references, nullptr) \
V(AddressToIndexHashMap*, external_reference_map, nullptr) \
V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
+ V(MicrotaskQueue*, default_microtask_queue, nullptr) \
V(CompilationStatistics*, turbo_statistics, nullptr) \
V(CodeTracer*, code_tracer, nullptr) \
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
@@ -554,14 +514,12 @@ typedef std::vector<HeapObject*> DebugObjectCache;
// Factory's members available to Isolate directly.
class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
-class Isolate : private HiddenFactory {
+class Isolate final : private HiddenFactory {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
class ThreadDataTable;
class EntryStackItem;
public:
- ~Isolate();
-
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
// and reused on subsequent entries.
@@ -615,6 +573,23 @@ class Isolate : private HiddenFactory {
static void InitializeOncePerProcess();
+ // Creates Isolate object. Must be used instead of constructing Isolate with
+ // new operator.
+ static V8_EXPORT_PRIVATE Isolate* New(
+ IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
+
+ // Deletes Isolate object. Must be used instead of delete operator.
+ // Destroys the non-default isolates.
+ // Sets default isolate into "has_been_disposed" state rather then destroying,
+ // for legacy API reasons.
+ static void Delete(Isolate* isolate);
+
+ // Returns allocation mode of this isolate.
+ V8_INLINE IsolateAllocationMode isolate_allocation_mode();
+
+ // Page allocator that must be used for allocating V8 heap pages.
+ v8::PageAllocator* page_allocator();
+
// Returns the PerIsolateThreadData for the current thread (or nullptr if one
// is not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
@@ -622,11 +597,16 @@ class Isolate : private HiddenFactory {
base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
- // Returns the isolate inside which the current thread is running.
- V8_INLINE static Isolate* Current() {
+ // Returns the isolate inside which the current thread is running or nullptr.
+ V8_INLINE static Isolate* TryGetCurrent() {
DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
- Isolate* isolate = reinterpret_cast<Isolate*>(
+ return reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
+ }
+
+ // Returns the isolate inside which the current thread is running.
+ V8_INLINE static Isolate* Current() {
+ Isolate* isolate = TryGetCurrent();
DCHECK_NOT_NULL(isolate);
return isolate;
}
@@ -634,7 +614,7 @@ class Isolate : private HiddenFactory {
// Get the isolate that the given HeapObject lives in, returning true on
// success. If the object is not writable (i.e. lives in read-only space),
// return false.
- inline static bool FromWritableHeapObject(HeapObject* obj, Isolate** isolate);
+ inline static bool FromWritableHeapObject(HeapObject obj, Isolate** isolate);
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
@@ -649,17 +629,27 @@ class Isolate : private HiddenFactory {
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != nullptr; }
- // Destroys the non-default isolates.
- // Sets default isolate into "has_been_disposed" state rather then destroying,
- // for legacy API reasons.
- void TearDown();
-
void ReleaseSharedPtrs();
void ClearSerializerData();
bool LogObjectRelocation();
+ // Initializes the current thread to run this Isolate.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Enter();
+
+ // Exits the current thread. The previosuly entered Isolate is restored
+ // for the thread.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Exit();
+
+ // Find the PerThread for this particular (isolate, thread) combination.
+ // If one does not yet exist, allocate a new one.
+ PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
+
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
@@ -672,29 +662,15 @@ class Isolate : private HiddenFactory {
// If one does not yet exist, no-op.
void DiscardPerThreadDataForThisThread();
- // Returns the key used to store the pointer to the current isolate.
- // Used internally for V8 threads that do not execute JavaScript but still
- // are part of the domain of an isolate (like the context switcher).
- static base::Thread::LocalStorageKey isolate_key() {
- return isolate_key_;
- }
-
- // Returns the key used to store process-wide thread IDs.
- static base::Thread::LocalStorageKey thread_id_key() {
- return thread_id_key_;
- }
-
- static base::Thread::LocalStorageKey per_isolate_thread_data_key();
-
// Mutex for serializing access to break control structures.
base::RecursiveMutex* break_access() { return &break_access_; }
Address get_address_from_id(IsolateAddressId id);
// Access to top context (where the current function object was created).
- Context* context() { return thread_local_top_.context_; }
- inline void set_context(Context* context);
- Context** context_address() { return &thread_local_top_.context_; }
+ Context context() { return thread_local_top_.context_; }
+ inline void set_context(Context context);
+ Context* context_address() { return &thread_local_top_.context_; }
THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
@@ -702,17 +678,17 @@ class Isolate : private HiddenFactory {
THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
- inline Object* pending_exception();
- inline void set_pending_exception(Object* exception_obj);
+ inline Object pending_exception();
+ inline void set_pending_exception(Object exception_obj);
inline void clear_pending_exception();
bool AreWasmThreadsEnabled(Handle<Context> context);
- THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
+ THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
inline bool has_pending_exception();
- THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
+ THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
@@ -727,21 +703,21 @@ class Isolate : private HiddenFactory {
return &thread_local_top_.external_caught_exception_;
}
- THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
+ THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
inline void clear_pending_message();
Address pending_message_obj_address() {
return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
}
- inline Object* scheduled_exception();
+ inline Object scheduled_exception();
inline bool has_scheduled_exception();
inline void clear_scheduled_exception();
- bool IsJavaScriptHandlerOnTop(Object* exception);
- bool IsExternalHandlerOnTop(Object* exception);
+ bool IsJavaScriptHandlerOnTop(Object exception);
+ bool IsExternalHandlerOnTop(Object exception);
- inline bool is_catchable_by_javascript(Object* exception);
+ inline bool is_catchable_by_javascript(Object exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -780,7 +756,7 @@ class Isolate : private HiddenFactory {
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
- V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
+ V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool clear_exception);
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
@@ -850,8 +826,8 @@ class Isolate : private HiddenFactory {
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Object* Throw(Object* exception, MessageLocation* location = nullptr);
- Object* ThrowIllegalOperation();
+ Object Throw(Object exception, MessageLocation* location = nullptr);
+ Object ThrowIllegalOperation();
template <typename T>
V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
@@ -874,11 +850,11 @@ class Isolate : private HiddenFactory {
// Re-throw an exception. This involves no error reporting since error
// reporting was handled when the exception was thrown originally.
- Object* ReThrow(Object* exception);
+ Object ReThrow(Object exception);
// Find the correct handler for the current pending exception. This also
// clears and returns the current pending exception.
- Object* UnwindAndFindHandler();
+ Object UnwindAndFindHandler();
// Tries to predict whether an exception will be caught. Note that this can
// only produce an estimate, because it is undecidable whether a finally
@@ -893,7 +869,7 @@ class Isolate : private HiddenFactory {
};
CatchType PredictExceptionCatcher();
- V8_EXPORT_PRIVATE void ScheduleThrow(Object* exception);
+ V8_EXPORT_PRIVATE void ScheduleThrow(Object exception);
// Re-set pending message, script and positions reported to the TryCatch
// back to the TLS for re-use when rethrowing.
void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
@@ -909,7 +885,7 @@ class Isolate : private HiddenFactory {
MessageLocation GetMessageLocation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- Object* PromoteScheduledException();
+ Object PromoteScheduledException();
// Attempts to compute the current source location, storing the
// result in the target out parameter. The source location is attached to a
@@ -925,8 +901,8 @@ class Isolate : private HiddenFactory {
MessageLocation* location);
// Out of resource exception helpers.
- Object* StackOverflow();
- Object* TerminateExecution();
+ Object StackOverflow();
+ Object TerminateExecution();
void CancelTerminateExecution();
void RequestInterrupt(InterruptCallback callback, void* data);
@@ -940,7 +916,7 @@ class Isolate : private HiddenFactory {
// Returns the current native context.
inline Handle<NativeContext> native_context();
- inline NativeContext* raw_native_context();
+ inline NativeContext raw_native_context();
Handle<Context> GetIncumbentContext();
@@ -976,7 +952,7 @@ class Isolate : private HiddenFactory {
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
inline Handle<type> name(); \
- inline bool is_##name(type* value);
+ inline bool is_##name(type value);
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -1000,10 +976,44 @@ class Isolate : private HiddenFactory {
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
- // kRootRegister may be used to address any location that falls into this
- // region. Fields outside this region are not guaranteed to live at a static
- // offset from kRootRegister.
- inline base::AddressRegion root_register_addressable_region();
+ const IsolateData* isolate_data() const { return &isolate_data_; }
+ IsolateData* isolate_data() { return &isolate_data_; }
+
+ // Generated code can embed this address to get access to the isolate-specific
+ // data (for example, roots, external references, builtins, etc.).
+ // The kRootRegister is set to this value.
+ Address isolate_root() const { return isolate_data()->isolate_root(); }
+ static size_t isolate_root_bias() {
+ return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
+ }
+
+ RootsTable& roots_table() { return isolate_data()->roots(); }
+
+ // A sub-region of the Isolate object that has "predictable" layout which
+ // depends only on the pointer size and therefore it's guaranteed that there
+ // will be no compatibility issues because of different compilers used for
+ // snapshot generator and actual V8 code.
+ // Thus, kRootRegister may be used to address any location that falls into
+ // this region.
+ // See IsolateData::AssertPredictableLayout() for details.
+ base::AddressRegion root_register_addressable_region() const {
+ return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
+ sizeof(IsolateData));
+ }
+
+ Object root(RootIndex index) { return Object(roots_table()[index]); }
+
+ Handle<Object> root_handle(RootIndex index) {
+ return Handle<Object>(&roots_table()[index]);
+ }
+
+ ExternalReferenceTable* external_reference_table() {
+ DCHECK(isolate_data()->external_reference_table()->is_initialized());
+ return isolate_data()->external_reference_table();
+ }
+
+ Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
+ V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
@@ -1013,12 +1023,20 @@ class Isolate : private HiddenFactory {
deoptimizer_lazy_throw_ = value;
}
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
- MaterializedObjectStore* materialized_object_store() {
- return materialized_object_store_;
+
+ static uint32_t thread_in_wasm_flag_address_offset() {
+ // For WebAssembly trap handlers there is a flag in thread-local storage
+ // which indicates that the executing thread executes WebAssembly code. To
+ // access this flag directly from generated code, we store a pointer to the
+ // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
+ // here returns the offset of that member from {isolate_root()}.
+ return static_cast<uint32_t>(
+ OFFSET_OF(Isolate, thread_local_top_.thread_in_wasm_flag_address_) -
+ isolate_root_bias());
}
- ContextSlotCache* context_slot_cache() {
- return context_slot_cache_;
+ MaterializedObjectStore* materialized_object_store() {
+ return materialized_object_store_;
}
DescriptorLookupCache* descriptor_lookup_cache() {
@@ -1104,11 +1122,11 @@ class Isolate : private HiddenFactory {
void SetData(uint32_t slot, void* data) {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
- embedder_data_[slot] = data;
+ isolate_data_.embedder_data_[slot] = data;
}
void* GetData(uint32_t slot) {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
- return embedder_data_[slot];
+ return isolate_data_.embedder_data_[slot];
}
bool serializer_enabled() const { return serializer_enabled_; }
@@ -1163,7 +1181,7 @@ class Isolate : private HiddenFactory {
// needed anymore. This keeps many feedback vectors alive, but code
// coverage or type profile are used for debugging only and increase in
// memory usage is expected.
- void SetFeedbackVectorsForProfilingTools(Object* value);
+ void SetFeedbackVectorsForProfilingTools(Object value);
void MaybeInitializeVectorListFromHeap();
@@ -1175,27 +1193,9 @@ class Isolate : private HiddenFactory {
return date_cache_;
}
- void set_date_cache(DateCache* date_cache) {
- if (date_cache != date_cache_) {
- delete date_cache_;
- }
- date_cache_ = date_cache;
- }
+ void set_date_cache(DateCache* date_cache);
#ifdef V8_INTL_SUPPORT
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- icu::RegexMatcher* language_singleton_regexp_matcher() {
- return language_singleton_regexp_matcher_;
- }
-
- icu::RegexMatcher* language_tag_regexp_matcher() {
- return language_tag_regexp_matcher_;
- }
-
- icu::RegexMatcher* language_variant_regexp_matcher() {
- return language_variant_regexp_matcher_;
- }
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
const std::string& default_locale() { return default_locale_; }
@@ -1204,19 +1204,16 @@ class Isolate : private HiddenFactory {
default_locale_ = locale;
}
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- void set_language_tag_regexp_matchers(
- icu::RegexMatcher* language_singleton_regexp_matcher,
- icu::RegexMatcher* language_tag_regexp_matcher,
- icu::RegexMatcher* language_variant_regexp_matcher) {
- DCHECK_NULL(language_singleton_regexp_matcher_);
- DCHECK_NULL(language_tag_regexp_matcher_);
- DCHECK_NULL(language_variant_regexp_matcher_);
- language_singleton_regexp_matcher_ = language_singleton_regexp_matcher;
- language_tag_regexp_matcher_ = language_tag_regexp_matcher;
- language_variant_regexp_matcher_ = language_variant_regexp_matcher;
- }
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
+ // enum to access the icu object cache.
+ enum class ICUObjectCacheType{
+ kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
+ kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
+
+ icu::UObject* get_cached_icu_object(ICUObjectCacheType cache_type);
+ void set_icu_object_in_cache(ICUObjectCacheType cache_type,
+ std::shared_ptr<icu::UObject> obj);
+ void clear_cached_icu_object(ICUObjectCacheType cache_type);
+
#endif // V8_INTL_SUPPORT
static const int kProtectorValid = 1;
@@ -1227,18 +1224,45 @@ class Isolate : private HiddenFactory {
// The version with an explicit context parameter can be used when
// Isolate::context is not set up, e.g. when calling directly into C++ from
// CSA.
- bool IsNoElementsProtectorIntact(Context* context);
+ bool IsNoElementsProtectorIntact(Context context);
bool IsNoElementsProtectorIntact();
+ bool IsArrayOrObjectOrStringPrototype(Object object);
+
inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
+ inline bool IsRegExpSpeciesLookupChainIntact();
inline bool IsPromiseSpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
- bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
+ bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver);
inline bool IsStringLengthOverflowIntact();
inline bool IsArrayIteratorLookupChainIntact();
- // The StringIteratorProtector protects the original string iterating behavior
+ // The MapIterator protector protects the original iteration behaviors of
+ // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries().
+ // It does not protect the original iteration behavior of
+ // Map.prototype[Symbol.iterator](). The protector is invalidated when:
+ // * The 'next' property is set on an object where the property holder is the
+ // %MapIteratorPrototype% (e.g. because the object is that very prototype).
+ // * The 'Symbol.iterator' property is set on an object where the property
+ // holder is the %IteratorPrototype%. Note that this also invalidates the
+ // SetIterator protector (see below).
+ inline bool IsMapIteratorLookupChainIntact();
+
+ // The SetIterator protector protects the original iteration behavior of
+ // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(),
+ // and Set.prototype[Symbol.iterator](). The protector is invalidated when:
+ // * The 'next' property is set on an object where the property holder is the
+ // %SetIteratorPrototype% (e.g. because the object is that very prototype).
+ // * The 'Symbol.iterator' property is set on an object where the property
+ // holder is the %SetPrototype% OR %IteratorPrototype%. This means that
+ // setting Symbol.iterator on a MapIterator object can also invalidate the
+ // SetIterator protector, and vice versa, setting Symbol.iterator on a
+ // SetIterator object can also invalidate the MapIterator. This is an over-
+ // approximation for the sake of simplicity.
+ inline bool IsSetIteratorLookupChainIntact();
+
+ // The StringIteratorProtector protects the original string iteration behavior
// for primitive strings. As long as the StringIteratorProtector is valid,
// iterating over a primitive string is guaranteed to be unobservable from
// user code and can thus be cut short. More specifically, the protector gets
@@ -1250,8 +1274,8 @@ class Isolate : private HiddenFactory {
// non-configurable and non-writable.
inline bool IsStringIteratorLookupChainIntact();
- // Make sure we do check for neutered array buffers.
- inline bool IsArrayBufferNeuteringIntact();
+ // Make sure we do check for detached array buffers.
+ inline bool IsArrayBufferDetachingIntact();
// Disable promise optimizations if promise (debug) hooks have ever been
// active.
@@ -1286,12 +1310,15 @@ class Isolate : private HiddenFactory {
void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
void InvalidateTypedArraySpeciesProtector();
+ void InvalidateRegExpSpeciesProtector();
void InvalidatePromiseSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
+ void InvalidateMapIteratorProtector();
+ void InvalidateSetIteratorProtector();
void InvalidateStringIteratorProtector();
- void InvalidateArrayBufferNeuteringProtector();
+ void InvalidateArrayBufferDetachingProtector();
V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
void InvalidatePromiseResolveProtector();
void InvalidatePromiseThenProtector();
@@ -1304,7 +1331,7 @@ class Isolate : private HiddenFactory {
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
#ifdef DEBUG
- bool IsDeferredHandle(Object** location);
+ bool IsDeferredHandle(Address* location);
#endif // DEBUG
bool concurrent_recompilation_enabled() {
@@ -1328,11 +1355,6 @@ class Isolate : private HiddenFactory {
void DumpAndResetStats();
- FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
- void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
- function_entry_hook_ = function_entry_hook;
- }
-
void* stress_deopt_count_address() { return &stress_deopt_count_; }
void set_force_slow_path(bool v) { force_slow_path_ = v; }
@@ -1352,7 +1374,7 @@ class Isolate : private HiddenFactory {
int GenerateIdentityHash(uint32_t mask);
// Given an address occupied by a live code object, return that object.
- Code* FindCodeObject(Address a);
+ Code FindCodeObject(Address a);
int NextOptimizationId() {
int id = next_optimization_id_++;
@@ -1373,17 +1395,11 @@ class Isolate : private HiddenFactory {
void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
inline void FireBeforeCallEnteredCallback();
- void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
- void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
- inline void FireMicrotasksCompletedCallback();
-
void SetPromiseRejectCallback(PromiseRejectCallback callback);
void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
- void EnqueueMicrotask(Handle<Microtask> microtask);
- void RunMicrotasks();
- bool IsRunningMicrotasks() const { return is_running_microtasks_; }
+ void SetTerminationOnExternalTryCatch();
Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
bool private_symbol);
@@ -1409,6 +1425,11 @@ class Isolate : private HiddenFactory {
return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
}
+ Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
+ return reinterpret_cast<Address>(
+ &promise_hook_or_debug_is_active_or_async_event_delegate_);
+ }
+
Address handle_scope_implementer_address() {
return reinterpret_cast<Address>(&handle_scope_implementer_);
}
@@ -1417,43 +1438,41 @@ class Isolate : private HiddenFactory {
void* data);
void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
Handle<JSArrayBuffer> array_buffer,
- size_t offset_in_bytes, int32_t value,
+ size_t offset_in_bytes, int64_t value,
double timeout_in_ms,
AtomicsWaitWakeHandle* stop_handle);
void SetPromiseHook(PromiseHook hook);
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent);
+ void PromiseHookStateUpdated();
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
- std::vector<Object*>* partial_snapshot_cache() {
+ std::vector<Object>* read_only_object_cache() {
+ return &read_only_object_cache_;
+ }
+
+ std::vector<Object>* partial_snapshot_cache() {
return &partial_snapshot_cache_;
}
// Off-heap builtins cannot embed constants within the code object itself,
// and thus need to load them from the root list.
+ // TODO(jgruber): Rename to IsGeneratingEmbeddedBuiltins().
bool ShouldLoadConstantsFromRootList() const {
- if (FLAG_embedded_builtins) {
- return (serializer_enabled() &&
- builtins_constants_table_builder() != nullptr);
- } else {
- return false;
- }
+ return FLAG_embedded_builtins &&
+ builtins_constants_table_builder() != nullptr;
}
- // Called only prior to serialization.
- // This function copies off-heap-safe builtins off the heap, creates off-heap
- // trampolines, and sets up this isolate's embedded blob.
- void PrepareEmbeddedBlobForSerialization();
-
BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
return builtins_constants_table_builder_;
}
static const uint8_t* CurrentEmbeddedBlob();
static uint32_t CurrentEmbeddedBlobSize();
+ static bool CurrentEmbeddedBlobIsBinaryEmbedded();
// These always return the same result as static methods above, but don't
// access the global atomic variable (and thus *might be* slightly faster).
@@ -1494,7 +1513,7 @@ class Isolate : private HiddenFactory {
return compiler_dispatcher_;
}
- bool IsInAnyContext(Object* object, uint32_t index);
+ bool IsInAnyContext(Object object, uint32_t index);
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
@@ -1506,9 +1525,23 @@ class Isolate : private HiddenFactory {
Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
Handle<Module> module);
+ void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
+ embedded_file_writer_ = writer;
+ }
+
+ int LookupOrAddExternallyCompiledFilename(const char* filename);
+ const char* GetExternallyCompiledFilename(int index) const;
+ int GetExternallyCompiledFilenameCount() const;
+ // PrepareBuiltinSourcePositionMap is necessary in order to preserve the
+ // builtin source positions before the corresponding code objects are
+ // replaced with trampolines. Those source positions are used to
+ // annotate the builtin blob with debugging information.
+ void PrepareBuiltinSourcePositionMap();
+
void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
- Handle<JSObject> Error);
+ Handle<JSObject> Error,
+ Handle<JSArray> sites);
bool HasPrepareStackTraceCallback() const;
void SetRAILMode(RAILMode rail_mode);
@@ -1558,19 +1591,11 @@ class Isolate : private HiddenFactory {
void SetIdle(bool is_idle);
- protected:
- Isolate();
- bool IsArrayOrObjectOrStringPrototype(Object* object);
-
private:
- friend struct GlobalState;
- friend struct InitializeGlobalState;
+ explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
+ ~Isolate();
- // These fields are accessed through the API, offsets must be kept in sync
- // with v8::internal::Internals (in include/v8.h) constants. This is also
- // verified in Isolate::Init() using runtime checks.
- void* embedder_data_[Internals::kNumIsolateDataSlots];
- Heap heap_;
+ void CheckIsolateLayout();
class ThreadDataTable {
public:
@@ -1618,7 +1643,6 @@ class Isolate : private HiddenFactory {
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey isolate_key_;
- static base::Thread::LocalStorageKey thread_id_key_;
// A global counter for all generated Isolates, might overflow.
static base::Atomic32 isolate_counter_;
@@ -1632,21 +1656,6 @@ class Isolate : private HiddenFactory {
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- // Find the PerThread for this particular (isolate, thread) combination.
- // If one does not yet exist, allocate a new one.
- PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
-
- // Initializes the current thread to run this Isolate.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Enter();
-
- // Exits the current thread. The previosuly entered Isolate is restored
- // for the thread.
- // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
- // at the same time, this should be prevented using external locking.
- void Exit();
-
void InitializeThreadLocal();
void MarkCompactPrologue(bool is_compacting,
@@ -1661,9 +1670,6 @@ class Isolate : private HiddenFactory {
// then return true.
bool PropagatePendingExceptionToExternalTryCatch();
- void SetTerminationOnExternalTryCatch();
-
- void PromiseHookStateUpdated();
void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise);
@@ -1681,90 +1687,105 @@ class Isolate : private HiddenFactory {
return "";
}
+ // This class contains a collection of data accessible from both C++ runtime
+ // and compiled code (including assembly stubs, builtins, interpreter bytecode
+ // handlers and optimized code).
+ IsolateData isolate_data_;
+
+ std::unique_ptr<IsolateAllocator> isolate_allocator_;
+ Heap heap_;
+
base::Atomic32 id_;
- EntryStackItem* entry_stack_;
- int stack_trace_nesting_level_;
- StringStream* incomplete_message_;
- Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- Bootstrapper* bootstrapper_;
- RuntimeProfiler* runtime_profiler_;
- CompilationCache* compilation_cache_;
+ EntryStackItem* entry_stack_ = nullptr;
+ int stack_trace_nesting_level_ = 0;
+ StringStream* incomplete_message_ = nullptr;
+ Address isolate_addresses_[kIsolateAddressCount + 1] = {};
+ Bootstrapper* bootstrapper_ = nullptr;
+ RuntimeProfiler* runtime_profiler_ = nullptr;
+ CompilationCache* compilation_cache_ = nullptr;
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
- Logger* logger_;
+ Logger* logger_ = nullptr;
StackGuard stack_guard_;
- StubCache* load_stub_cache_;
- StubCache* store_stub_cache_;
- DeoptimizerData* deoptimizer_data_;
- bool deoptimizer_lazy_throw_;
- MaterializedObjectStore* materialized_object_store_;
+ StubCache* load_stub_cache_ = nullptr;
+ StubCache* store_stub_cache_ = nullptr;
+ DeoptimizerData* deoptimizer_data_ = nullptr;
+ bool deoptimizer_lazy_throw_ = false;
+ MaterializedObjectStore* materialized_object_store_ = nullptr;
ThreadLocalTop thread_local_top_;
- bool capture_stack_trace_for_uncaught_exceptions_;
- int stack_trace_for_uncaught_exceptions_frame_limit_;
- StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- ContextSlotCache* context_slot_cache_;
- DescriptorLookupCache* descriptor_lookup_cache_;
+ bool capture_stack_trace_for_uncaught_exceptions_ = false;
+ int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
+ StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
+ StackTrace::kOverview;
+ DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
HandleScopeData handle_scope_data_;
- HandleScopeImplementer* handle_scope_implementer_;
- UnicodeCache* unicode_cache_;
- AccountingAllocator* allocator_;
- InnerPointerToCodeCache* inner_pointer_to_code_cache_;
- GlobalHandles* global_handles_;
- EternalHandles* eternal_handles_;
- ThreadManager* thread_manager_;
+ HandleScopeImplementer* handle_scope_implementer_ = nullptr;
+ UnicodeCache* unicode_cache_ = nullptr;
+ AccountingAllocator* allocator_ = nullptr;
+ InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
+ GlobalHandles* global_handles_ = nullptr;
+ EternalHandles* eternal_handles_ = nullptr;
+ ThreadManager* thread_manager_ = nullptr;
RuntimeState runtime_state_;
Builtins builtins_;
- SetupIsolateDelegate* setup_delegate_;
+ SetupIsolateDelegate* setup_delegate_ = nullptr;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
- RegExpStack* regexp_stack_;
+ RegExpStack* regexp_stack_ = nullptr;
std::vector<int> regexp_indices_;
- DateCache* date_cache_;
- base::RandomNumberGenerator* random_number_generator_;
- base::RandomNumberGenerator* fuzzer_rng_;
+ DateCache* date_cache_ = nullptr;
+ base::RandomNumberGenerator* random_number_generator_ = nullptr;
+ base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
base::AtomicValue<RAILMode> rail_mode_;
- v8::Isolate::AtomicsWaitCallback atomics_wait_callback_;
- void* atomics_wait_callback_data_;
- PromiseHook promise_hook_;
- HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
+ v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
+ void* atomics_wait_callback_data_ = nullptr;
+ PromiseHook promise_hook_ = nullptr;
+ HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
+ nullptr;
HostInitializeImportMetaObjectCallback
- host_initialize_import_meta_object_callback_;
+ host_initialize_import_meta_object_callback_ = nullptr;
base::Mutex rail_mutex_;
- double load_start_time_ms_;
+ double load_start_time_ms_ = 0;
#ifdef V8_INTL_SUPPORT
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- icu::RegexMatcher* language_singleton_regexp_matcher_;
- icu::RegexMatcher* language_tag_regexp_matcher_;
- icu::RegexMatcher* language_variant_regexp_matcher_;
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
std::string default_locale_;
+
+ struct ICUObjectCacheTypeHash {
+ std::size_t operator()(ICUObjectCacheType a) const {
+ return static_cast<std::size_t>(a);
+ }
+ };
+ std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UObject>,
+ ICUObjectCacheTypeHash>
+ icu_object_cache_;
+
#endif // V8_INTL_SUPPORT
// Whether the isolate has been created for snapshotting.
- bool serializer_enabled_;
+ bool serializer_enabled_ = false;
// True if fatal error has been signaled for this isolate.
- bool has_fatal_error_;
+ bool has_fatal_error_ = false;
// True if this isolate was initialized from a snapshot.
- bool initialized_from_snapshot_;
+ bool initialized_from_snapshot_ = false;
+ // TODO(ishell): remove
// True if ES2015 tail call elimination feature is enabled.
- bool is_tail_call_elimination_enabled_;
+ bool is_tail_call_elimination_enabled_ = true;
// True if the isolate is in background. This flag is used
// to prioritize between memory usage and latency.
- bool is_isolate_in_background_;
+ bool is_isolate_in_background_ = false;
// True if the isolate is in memory savings mode. This flag is used to
// favor memory over runtime performance.
- bool memory_savings_mode_active_;
+ bool memory_savings_mode_active_ = false;
// Time stamp at initialization.
- double time_millis_at_init_;
+ double time_millis_at_init_ = 0;
#ifdef DEBUG
static std::atomic<size_t> non_disposed_isolates_;
@@ -1772,19 +1793,18 @@ class Isolate : private HiddenFactory {
JSObject::SpillInformation js_spill_information_;
#endif
- Debug* debug_;
- HeapProfiler* heap_profiler_;
+ Debug* debug_ = nullptr;
+ HeapProfiler* heap_profiler_ = nullptr;
std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
- FunctionEntryHook function_entry_hook_;
- const AstStringConstants* ast_string_constants_;
+ const AstStringConstants* ast_string_constants_ = nullptr;
- interpreter::Interpreter* interpreter_;
+ interpreter::Interpreter* interpreter_ = nullptr;
compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
Zone* compiler_zone_ = nullptr;
- CompilerDispatcher* compiler_dispatcher_;
+ CompilerDispatcher* compiler_dispatcher_ = nullptr;
typedef std::pair<InterruptCallback, void*> InterruptEntry;
std::queue<InterruptEntry> api_interrupts_queue_;
@@ -1803,25 +1823,25 @@ class Isolate : private HiddenFactory {
// This class is huge and has a number of fields controlled by
// preprocessor defines. Make sure the offsets of these fields agree
// between compilation units.
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
- static const intptr_t name##_debug_offset_;
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+ V8_EXPORT_PRIVATE static const intptr_t name##_debug_offset_;
ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
- DeferredHandles* deferred_handles_head_;
- OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
+ DeferredHandles* deferred_handles_head_ = nullptr;
+ OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
// Counts deopt points if deopt_every_n_times is enabled.
- unsigned int stress_deopt_count_;
+ unsigned int stress_deopt_count_ = 0;
- bool force_slow_path_;
+ bool force_slow_path_ = false;
- int next_optimization_id_;
+ int next_optimization_id_ = 0;
#if V8_SFI_HAS_UNIQUE_ID
- int next_unique_sfi_id_;
+ int next_unique_sfi_id_ = 0;
#endif
// Vector of callbacks before a Call starts execution.
@@ -1830,43 +1850,47 @@ class Isolate : private HiddenFactory {
// Vector of callbacks when a Call completes.
std::vector<CallCompletedCallback> call_completed_callbacks_;
- // Vector of callbacks after microtasks were run.
- std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
- bool is_running_microtasks_;
-
- v8::Isolate::UseCounterCallback use_counter_callback_;
+ v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
- std::vector<Object*> partial_snapshot_cache_;
+ std::vector<Object> read_only_object_cache_;
+ std::vector<Object> partial_snapshot_cache_;
// Used during builtins compilation to build the builtins constants table,
// which is stored on the root list prior to serialization.
BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
+ void InitializeDefaultEmbeddedBlob();
+ void CreateAndSetEmbeddedBlob();
+ void TearDownEmbeddedBlob();
+
void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);
+ void ClearEmbeddedBlob();
const uint8_t* embedded_blob_ = nullptr;
uint32_t embedded_blob_size_ = 0;
- v8::ArrayBuffer::Allocator* array_buffer_allocator_;
+ v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
FutexWaitListNode futex_wait_list_node_;
- CancelableTaskManager* cancelable_task_manager_;
+ CancelableTaskManager* cancelable_task_manager_ = nullptr;
debug::ConsoleDelegate* console_delegate_ = nullptr;
debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
bool promise_hook_or_async_event_delegate_ = false;
+ bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
int async_task_count_ = 0;
v8::Isolate::AbortOnUncaughtExceptionCallback
- abort_on_uncaught_exception_callback_;
+ abort_on_uncaught_exception_callback_ = nullptr;
- bool allow_atomics_wait_;
+ bool allow_atomics_wait_ = true;
+ base::Mutex managed_ptr_destructors_mutex_;
ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
- size_t total_regexp_code_generated_;
+ size_t total_regexp_code_generated_ = 0;
size_t elements_deletion_counter_ = 0;
@@ -1874,6 +1898,8 @@ class Isolate : private HiddenFactory {
std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
+ EmbeddedFileWriterInterface* embedded_file_writer_ = nullptr;
+
// The top entry of the v8::Context::BackupIncumbentScope stack.
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
nullptr;
@@ -1886,20 +1912,14 @@ class Isolate : private HiddenFactory {
base::Mutex thread_data_table_mutex_;
ThreadDataTable thread_data_table_;
- friend class ExecutionAccess;
- friend class HandleScopeImplementer;
+ // Delete new/delete operators to ensure that Isolate::New() and
+ // Isolate::Delete() are used for Isolate creation and deletion.
+ void* operator new(size_t, void* ptr) { return ptr; }
+ void* operator new(size_t) = delete;
+ void operator delete(void*) = delete;
+
friend class heap::HeapTester;
- friend class OptimizingCompileDispatcher;
- friend class Simulator;
- friend class StackGuard;
- friend class SweeperThread;
- friend class TestIsolate;
- friend class ThreadId;
- friend class ThreadManager;
- friend class v8::Isolate;
- friend class v8::Locker;
- friend class v8::SnapshotCreator;
- friend class v8::Unlocker;
+ friend class TestSerializer;
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
deleted file mode 100644
index 2ca285742b..0000000000
--- a/deps/v8/src/js/OWNERS
+++ /dev/null
@@ -1,14 +0,0 @@
-set noparent
-
-adamk@chromium.org
-bmeurer@chromium.org
-cbruni@chromium.org
-gsathya@chromium.org
-ishell@chromium.org
-jgruber@chromium.org
-jkummerow@chromium.org
-littledan@chromium.org
-verwaest@chromium.org
-yangguo@chromium.org
-
-# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
deleted file mode 100644
index 16b140bb38..0000000000
--- a/deps/v8/src/js/array.js
+++ /dev/null
@@ -1,515 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var InternalArray = utils.InternalArray;
-var ObjectToString = global.Object.prototype.toString;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
-
-// -------------------------------------------------------------------
-
-macro INVERT_NEG_ZERO(arg)
-((arg) + 0)
-endmacro
-
-function ArraySpeciesCreate(array, length) {
- length = INVERT_NEG_ZERO(length);
- var constructor = %ArraySpeciesConstructor(array);
- return new constructor(length);
-}
-
-
-function KeySortCompare(a, b) {
- return a - b;
-}
-
-function GetSortedArrayKeys(array, indices) {
- if (IS_NUMBER(indices)) {
- // It's an interval
- var limit = indices;
- var keys = new InternalArray();
- for (var i = 0; i < limit; ++i) {
- var e = array[i];
- if (!IS_UNDEFINED(e) || i in array) {
- keys.push(i);
- }
- }
- return keys;
- }
- return InnerArraySort(indices, indices.length, KeySortCompare);
-}
-
-
-function SparseJoinWithSeparatorJS(
- array, keys, length, use_locale, separator, locales, options) {
- var keys_length = keys.length;
- var elements = new InternalArray(keys_length * 2);
- for (var i = 0; i < keys_length; i++) {
- var key = keys[i];
- elements[i * 2] = key;
- elements[i * 2 + 1] = ConvertToString(
- use_locale, array[key], locales, options);
- }
- return %SparseJoinWithSeparator(elements, length, separator);
-}
-
-
-// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, keys, use_locale, locales, options) {
- var keys_length = keys.length;
- var elements = new InternalArray(keys_length);
- for (var i = 0; i < keys_length; i++) {
- elements[i] = ConvertToString(use_locale, array[keys[i]], locales, options);
- }
- return %StringBuilderConcat(elements, keys_length, '');
-}
-
-
-function UseSparseVariant(array, length, is_array, touched) {
- // Only use the sparse variant on arrays that are likely to be sparse and the
- // number of elements touched in the operation is relatively small compared to
- // the overall size of the array.
- if (!is_array || length < 1000 || %HasComplexElements(array)) {
- return false;
- }
- if (!%_IsSmi(length)) {
- return true;
- }
- var elements_threshold = length >> 2; // No more than 75% holes
- var estimated_elements = %EstimateNumberOfElements(array);
- return (estimated_elements < elements_threshold) &&
- (touched > estimated_elements * 4);
-}
-
-function Stack() {
- this.length = 0;
- this.values = new InternalArray();
-}
-
-// Predeclare the instance variables on the prototype. Otherwise setting them in
-// the constructor will leak the instance through settings on Object.prototype.
-Stack.prototype.length = null;
-Stack.prototype.values = null;
-
-function StackPush(stack, value) {
- stack.values[stack.length++] = value;
-}
-
-function StackPop(stack) {
- stack.values[--stack.length] = null
-}
-
-function StackHas(stack, v) {
- var length = stack.length;
- var values = stack.values;
- for (var i = 0; i < length; i++) {
- if (values[i] === v) return true;
- }
- return false;
-}
-
-// Global list of arrays visited during toString, toLocaleString and
-// join invocations.
-var visited_arrays = new Stack();
-
-function DoJoin(
- array, length, is_array, separator, use_locale, locales, options) {
- if (UseSparseVariant(array, length, is_array, length)) {
- %NormalizeElements(array);
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, length));
- if (separator === '') {
- if (keys.length === 0) return '';
- return SparseJoin(array, keys, use_locale, locales, options);
- } else {
- return SparseJoinWithSeparatorJS(
- array, keys, length, use_locale, separator, locales, options);
- }
- }
-
- // Fast case for one-element arrays.
- if (length === 1) {
- return ConvertToString(use_locale, array[0], locales, options);
- }
-
- // Construct an array for the elements.
- var elements = new InternalArray(length);
- for (var i = 0; i < length; i++) {
- elements[i] = ConvertToString(use_locale, array[i], locales, options);
- }
-
- if (separator === '') {
- return %StringBuilderConcat(elements, length, '');
- } else {
- return %StringBuilderJoin(elements, length, separator);
- }
-}
-
-function Join(array, length, separator, use_locale, locales, options) {
- if (length === 0) return '';
-
- var is_array = IS_ARRAY(array);
-
- if (is_array) {
- // If the array is cyclic, return the empty string for already
- // visited arrays.
- if (StackHas(visited_arrays, array)) return '';
- StackPush(visited_arrays, array);
- }
-
- // Attempt to convert the elements.
- try {
- return DoJoin(
- array, length, is_array, separator, use_locale, locales, options);
- } finally {
- // Make sure to remove the last element of the visited array no
- // matter what happens.
- if (is_array) StackPop(visited_arrays);
- }
-}
-
-
-function ConvertToString(use_locale, x, locales, options) {
- if (IS_NULL_OR_UNDEFINED(x)) return '';
- if (use_locale) {
- if (IS_NULL_OR_UNDEFINED(locales)) {
- return TO_STRING(x.toLocaleString());
- } else if (IS_NULL_OR_UNDEFINED(options)) {
- return TO_STRING(x.toLocaleString(locales));
- }
- return TO_STRING(x.toLocaleString(locales, options));
- }
-
- return TO_STRING(x);
-}
-
-
-// -------------------------------------------------------------------
-
-var ArrayJoin;
-DEFINE_METHOD(
- GlobalArray.prototype,
- toString() {
- var array;
- var func;
- if (IS_ARRAY(this)) {
- func = this.join;
- if (func === ArrayJoin) {
- return Join(this, this.length, ',', false);
- }
- array = this;
- } else {
- array = TO_OBJECT(this);
- func = array.join;
- }
- if (!IS_CALLABLE(func)) {
- return %_Call(ObjectToString, array);
- }
- return %_Call(func, array);
- }
-);
-
-// ecma402 #sup-array.prototype.tolocalestring
-function InnerArrayToLocaleString(array, length, locales, options) {
- return Join(array, TO_LENGTH(length), ',', true, locales, options);
-}
-
-
-DEFINE_METHOD(
- GlobalArray.prototype,
- // ecma402 #sup-array.prototype.tolocalestring
- toLocaleString() {
- var array = TO_OBJECT(this);
- var arrayLen = array.length;
- var locales = arguments[0];
- var options = arguments[1];
- return InnerArrayToLocaleString(array, arrayLen, locales, options);
- }
-);
-
-
-function InnerArrayJoin(separator, array, length) {
- if (IS_UNDEFINED(separator)) {
- separator = ',';
- } else {
- separator = TO_STRING(separator);
- }
-
- // Fast case for one-element arrays.
- if (length === 1) {
- var e = array[0];
- if (IS_NULL_OR_UNDEFINED(e)) return '';
- return TO_STRING(e);
- }
-
- return Join(array, length, separator, false);
-}
-
-
-DEFINE_METHOD(
- GlobalArray.prototype,
- join(separator) {
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayJoin(separator, array, length);
- }
-);
-
-
-// Oh the humanity... don't remove the following function because js2c for some
-// reason gets symbol minifiation wrong if it's not there. Instead of spending
-// the time fixing js2c (which will go away when all of the internal .js runtime
-// files are gone), just keep this work-around.
-function ArraySliceFallback(start, end) {
- return null;
-}
-
-function InnerArraySort(array, length, comparefn) {
- // In-place QuickSort algorithm.
- // For short (length <= 10) arrays, insertion sort is used for efficiency.
-
- if (!IS_CALLABLE(comparefn)) {
- comparefn = function (x, y) {
- if (x === y) return 0;
- if (%_IsSmi(x) && %_IsSmi(y)) {
- return %SmiLexicographicCompare(x, y);
- }
- x = TO_STRING(x);
- y = TO_STRING(y);
- if (x == y) return 0;
- else return x < y ? -1 : 1;
- };
- }
- function InsertionSort(a, from, to) {
- for (var i = from + 1; i < to; i++) {
- var element = a[i];
- for (var j = i - 1; j >= from; j--) {
- var tmp = a[j];
- var order = comparefn(tmp, element);
- if (order > 0) {
- a[j + 1] = tmp;
- } else {
- break;
- }
- }
- a[j + 1] = element;
- }
- };
-
- function GetThirdIndex(a, from, to) {
- var t_array = new InternalArray();
- // Use both 'from' and 'to' to determine the pivot candidates.
- var increment = 200 + ((to - from) & 15);
- var j = 0;
- from += 1;
- to -= 1;
- for (var i = from; i < to; i += increment) {
- t_array[j] = [i, a[i]];
- j++;
- }
- t_array.sort(function(a, b) {
- return comparefn(a[1], b[1]);
- });
- var third_index = t_array[t_array.length >> 1][0];
- return third_index;
- }
-
- function QuickSort(a, from, to) {
- var third_index = 0;
- while (true) {
- // Insertion sort is faster for short arrays.
- if (to - from <= 10) {
- InsertionSort(a, from, to);
- return;
- }
- if (to - from > 1000) {
- third_index = GetThirdIndex(a, from, to);
- } else {
- third_index = from + ((to - from) >> 1);
- }
- // Find a pivot as the median of first, last and middle element.
- var v0 = a[from];
- var v1 = a[to - 1];
- var v2 = a[third_index];
- var c01 = comparefn(v0, v1);
- if (c01 > 0) {
- // v1 < v0, so swap them.
- var tmp = v0;
- v0 = v1;
- v1 = tmp;
- } // v0 <= v1.
- var c02 = comparefn(v0, v2);
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- var tmp = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2
- var c12 = comparefn(v1, v2);
- if (c12 > 0) {
- // v0 <= v2 < v1
- var tmp = v1;
- v1 = v2;
- v2 = tmp;
- }
- }
- // v0 <= v1 <= v2
- a[from] = v0;
- a[to - 1] = v2;
- var pivot = v1;
- var low_end = from + 1; // Upper bound of elements lower than pivot.
- var high_start = to - 1; // Lower bound of elements greater than pivot.
- a[third_index] = a[low_end];
- a[low_end] = pivot;
-
- // From low_end to i are elements equal to pivot.
- // From i to high_start are elements that haven't been compared yet.
- partition: for (var i = low_end + 1; i < high_start; i++) {
- var element = a[i];
- var order = comparefn(element, pivot);
- if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- } else if (order > 0) {
- do {
- high_start--;
- if (high_start == i) break partition;
- var top_elem = a[high_start];
- order = comparefn(top_elem, pivot);
- } while (order > 0);
- a[i] = a[high_start];
- a[high_start] = element;
- if (order < 0) {
- element = a[i];
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- }
- }
- }
- if (to - high_start < low_end - from) {
- QuickSort(a, high_start, to);
- to = low_end;
- } else {
- QuickSort(a, from, low_end);
- from = high_start;
- }
- }
- };
-
- if (length < 2) return array;
-
- // For compatibility with JSC, we also sort elements inherited from
- // the prototype chain on non-Array objects.
- // We do this by copying them to this object and sorting only
- // own elements. This is not very efficient, but sorting with
- // inherited elements happens very, very rarely, if at all.
- // The specification allows "implementation dependent" behavior
- // if an element on the prototype chain has an element that
- // might interact with sorting.
- //
- // We also move all non-undefined elements to the front of the
- // array and move the undefineds after that. Holes are removed.
- // This happens for Array as well as non-Array objects.
- var num_non_undefined = %PrepareElementsForSort(array, length);
-
- QuickSort(array, 0, num_non_undefined);
-
- return array;
-}
-
-
-// Set up unscopable properties on the Array.prototype object.
-var unscopables = {
- __proto__: null,
- copyWithin: true,
- entries: true,
- fill: true,
- find: true,
- findIndex: true,
- includes: true,
- keys: true,
-};
-
-%ToFastProperties(unscopables);
-
-%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
- DONT_ENUM | READ_ONLY);
-
-var ArrayIndexOf = GlobalArray.prototype.indexOf;
-var ArrayJoin = GlobalArray.prototype.join;
-var ArrayPop = GlobalArray.prototype.pop;
-var ArrayPush = GlobalArray.prototype.push;
-var ArraySlice = GlobalArray.prototype.slice;
-var ArrayShift = GlobalArray.prototype.shift;
-var ArraySort = GlobalArray.prototype.sort;
-var ArraySplice = GlobalArray.prototype.splice;
-var ArrayToString = GlobalArray.prototype.toString;
-var ArrayUnshift = GlobalArray.prototype.unshift;
-
-// Array prototype functions that return iterators. They are exposed to the
-// public API via Template::SetIntrinsicDataProperty().
-var ArrayEntries = GlobalArray.prototype.entries;
-var ArrayForEach = GlobalArray.prototype.forEach;
-var ArrayKeys = GlobalArray.prototype.keys;
-var ArrayValues = GlobalArray.prototype[iteratorSymbol];
-
-
-// The internal Array prototype doesn't need to be fancy, since it's never
-// exposed to user code.
-// Adding only the functions that are actually used.
-utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
- "indexOf", ArrayIndexOf,
- "join", ArrayJoin,
- "pop", ArrayPop,
- "push", ArrayPush,
- "shift", ArrayShift,
- "sort", ArraySort,
- "splice", ArraySplice
-]);
-
-// V8 extras get a separate copy of InternalPackedArray. We give them the basic
-// manipulation methods.
-utils.SetUpLockedPrototype(extrasUtils.InternalPackedArray, GlobalArray(), [
- "push", ArrayPush,
- "pop", ArrayPop,
- "shift", ArrayShift,
- "unshift", ArrayUnshift,
- "splice", ArraySplice,
- "slice", ArraySlice
-]);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.ArrayJoin = ArrayJoin;
- to.ArrayPush = ArrayPush;
- to.ArrayToString = ArrayToString;
- to.ArrayValues = ArrayValues;
- to.InnerArrayJoin = InnerArrayJoin;
- to.InnerArrayToLocaleString = InnerArrayToLocaleString;
-});
-
-%InstallToContext([
- "array_entries_iterator", ArrayEntries,
- "array_for_each_iterator", ArrayForEach,
- "array_keys_iterator", ArrayKeys,
- "array_values_iterator", ArrayValues,
-]);
-
-});
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
deleted file mode 100644
index 43119a490d..0000000000
--- a/deps/v8/src/js/intl.js
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// ECMAScript 402 API implementation.
-
-/**
- * Intl object is a single object that has some named properties,
- * all of which are constructors.
- */
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var ArrayJoin;
-var ArrayPush;
-var GlobalDate = global.Date;
-var GlobalIntl = global.Intl;
-var GlobalIntlDateTimeFormat = GlobalIntl.DateTimeFormat;
-var GlobalIntlNumberFormat = GlobalIntl.NumberFormat;
-var GlobalIntlCollator = GlobalIntl.Collator;
-var GlobalIntlPluralRules = GlobalIntl.PluralRules;
-var GlobalIntlv8BreakIterator = GlobalIntl.v8BreakIterator;
-var GlobalRegExp = global.RegExp;
-var GlobalString = global.String;
-var GlobalArray = global.Array;
-var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
-var InternalArray = utils.InternalArray;
-var MathMax = global.Math.max;
-var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
-var ObjectKeys = global.Object.keys;
-var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
-var StringSubstr = GlobalString.prototype.substr;
-var StringSubstring = GlobalString.prototype.substring;
-
-utils.Import(function(from) {
- ArrayJoin = from.ArrayJoin;
- ArrayPush = from.ArrayPush;
-});
-
-// -------------------------------------------------------------------
-
-/**
- * Caches available locales for each service.
- */
-var AVAILABLE_LOCALES = {
- __proto__ : null,
- 'collator': UNDEFINED,
- 'numberformat': UNDEFINED,
- 'dateformat': UNDEFINED,
- 'breakiterator': UNDEFINED,
- 'pluralrules': UNDEFINED,
- 'relativetimeformat': UNDEFINED,
- 'listformat': UNDEFINED,
- 'segmenter': UNDEFINED,
-};
-
-/**
- * Unicode extension regular expression.
- */
-var UNICODE_EXTENSION_RE = UNDEFINED;
-
-function GetUnicodeExtensionRE() {
- if (IS_UNDEFINED(UNDEFINED)) {
- UNICODE_EXTENSION_RE = new GlobalRegExp('-u(-[a-z0-9]{2,8})+', 'g');
- }
- return UNICODE_EXTENSION_RE;
-}
-
-/**
- * Matches any Unicode extension.
- */
-var ANY_EXTENSION_RE = UNDEFINED;
-
-function GetAnyExtensionRE() {
- if (IS_UNDEFINED(ANY_EXTENSION_RE)) {
- ANY_EXTENSION_RE = new GlobalRegExp('-[a-z0-9]{1}-.*', 'g');
- }
- return ANY_EXTENSION_RE;
-}
-
-/**
- * Matches valid service name.
- */
-var SERVICE_RE = UNDEFINED;
-
-function GetServiceRE() {
- if (IS_UNDEFINED(SERVICE_RE)) {
- SERVICE_RE =
- new GlobalRegExp('^(' + %_Call(ArrayJoin, ObjectKeys(AVAILABLE_LOCALES), '|') + ')$');
- }
- return SERVICE_RE;
-}
-
-/**
- * Returns a getOption function that extracts property value for given
- * options object. If property is missing it returns defaultValue. If value
- * is out of range for that property it throws RangeError.
- */
-function getGetOption(options, caller) {
- if (IS_UNDEFINED(options)) throw %make_error(kDefaultOptionsMissing, caller);
-
- // Ecma 402 #sec-getoption
- var getOption = function (property, type, values, fallback) {
- // 1. Let value be ? Get(options, property).
- var value = options[property];
- // 2. If value is not undefined, then
- if (!IS_UNDEFINED(value)) {
- switch (type) {
- // If type is "boolean", then let value be ToBoolean(value).
- case 'boolean':
- value = TO_BOOLEAN(value);
- break;
- // If type is "string", then let value be ToString(value).
- case 'string':
- value = TO_STRING(value);
- break;
- // Assert: type is "boolean" or "string".
- default:
- throw %make_error(kWrongValueType);
- }
-
- // d. If values is not undefined, then
- // If values does not contain an element equal to value, throw a
- // RangeError exception.
- if (!IS_UNDEFINED(values) && %ArrayIndexOf(values, value, 0) === -1) {
- throw %make_range_error(kValueOutOfRange, value, caller, property);
- }
-
- return value;
- }
-
- return fallback;
- }
-
- return getOption;
-}
-
-
-/**
- * Ecma 402 9.2.5
- * TODO(jshin): relevantExtensionKeys and localeData need to be taken into
- * account per spec.
- * Compares a BCP 47 language priority list requestedLocales against the locales
- * in availableLocales and determines the best available language to meet the
- * request. Two algorithms are available to match the locales: the Lookup
- * algorithm described in RFC 4647 section 3.4, and an implementation dependent
- * best-fit algorithm. Independent of the locale matching algorithm, options
- * specified through Unicode locale extension sequences are negotiated
- * separately, taking the caller's relevant extension keys and locale data as
- * well as client-provided options into consideration. Returns an object with
- * a locale property whose value is the language tag of the selected locale,
- * and properties for each key in relevantExtensionKeys providing the selected
- * value for that key.
- */
-function resolveLocale(service, requestedLocales, options) {
- requestedLocales = initializeLocaleList(requestedLocales);
-
- var getOption = getGetOption(options, service);
- var matcher = getOption('localeMatcher', 'string',
- ['lookup', 'best fit'], 'best fit');
- var resolved;
- if (matcher === 'lookup') {
- resolved = lookupMatcher(service, requestedLocales);
- } else {
- resolved = bestFitMatcher(service, requestedLocales);
- }
-
- return resolved;
-}
-
-%InstallToContext([
- "resolve_locale", resolveLocale
-]);
-
-/**
- * Look up the longest non-empty prefix of |locale| that is an element of
- * |availableLocales|. Returns undefined when the |locale| is completely
- * unsupported by |availableLocales|.
- */
-function bestAvailableLocale(availableLocales, locale) {
- do {
- if (!IS_UNDEFINED(availableLocales[locale])) {
- return locale;
- }
- // Truncate locale if possible.
- var pos = %StringLastIndexOf(locale, '-');
- if (pos === -1) {
- break;
- }
- locale = %_Call(StringSubstring, locale, 0, pos);
- } while (true);
-
- return UNDEFINED;
-}
-
-
-/**
- * Try to match any mutation of |requestedLocale| against |availableLocales|.
- */
-function attemptSingleLookup(availableLocales, requestedLocale) {
- // Remove all extensions.
- var noExtensionsLocale = %RegExpInternalReplace(
- GetAnyExtensionRE(), requestedLocale, '');
- var availableLocale = bestAvailableLocale(
- availableLocales, requestedLocale);
- if (!IS_UNDEFINED(availableLocale)) {
- // Return the resolved locale and extension.
- var extensionMatch = %regexp_internal_match(
- GetUnicodeExtensionRE(), requestedLocale);
- var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
- return {
- __proto__: null,
- locale: availableLocale,
- extension: extension,
- localeWithExtension: availableLocale + extension,
- };
- }
- return UNDEFINED;
-}
-
-
-/**
- * Returns best matched supported locale and extension info using basic
- * lookup algorithm.
- */
-function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
- throw %make_error(kWrongServiceType, service);
- }
-
- var availableLocales = getAvailableLocalesOf(service);
-
- for (var i = 0; i < requestedLocales.length; ++i) {
- var result = attemptSingleLookup(availableLocales, requestedLocales[i]);
- if (!IS_UNDEFINED(result)) {
- return result;
- }
- }
-
- var defLocale = %GetDefaultICULocale();
-
- // While ECMA-402 returns defLocale directly, we have to check if it is
- // supported, as such support is not guaranteed.
- var result = attemptSingleLookup(availableLocales, defLocale);
- if (!IS_UNDEFINED(result)) {
- return result;
- }
-
- // Didn't find a match, return default.
- return {
- __proto__: null,
- locale: 'und',
- extension: '',
- localeWithExtension: 'und',
- };
-}
-
-
-/**
- * Returns best matched supported locale and extension info using
- * implementation dependend algorithm.
- */
-function bestFitMatcher(service, requestedLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupMatcher(service, requestedLocales);
-}
-
-/**
- * Given an array-like, outputs an Array with the numbered
- * properties copied over and defined
- * configurable: false, writable: false, enumerable: true.
- * When |expandable| is true, the result array can be expanded.
- */
-function freezeArray(input) {
- var array = [];
- var l = input.length;
- for (var i = 0; i < l; i++) {
- if (i in input) {
- %object_define_property(array, i, {value: input[i],
- configurable: false,
- writable: false,
- enumerable: true});
- }
- }
-
- %object_define_property(array, 'length', {value: l, writable: false});
- return array;
-}
-
-/* Make JS array[] out of InternalArray */
-function makeArray(input) {
- var array = [];
- %MoveArrayContents(input, array);
- return array;
-}
-
-/**
- * Returns an Object that contains all of supported locales for a given
- * service.
- * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
- * that is supported. This is required by the spec.
- */
-function getAvailableLocalesOf(service) {
- // Cache these, they don't ever change per service.
- if (!IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
- return AVAILABLE_LOCALES[service];
- }
-
- var available = %AvailableLocalesOf(service);
-
- for (var i in available) {
- if (HAS_OWN_PROPERTY(available, i)) {
- var parts = %regexp_internal_match(
- /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/, i);
- if (!IS_NULL(parts)) {
- // Build xx-ZZ. We don't care about the actual value,
- // as long it's not undefined.
- available[parts[1] + '-' + parts[3]] = null;
- }
- }
- }
-
- AVAILABLE_LOCALES[service] = available;
-
- return available;
-}
-
-/**
- * Returns an InternalArray where all locales are canonicalized and duplicates
- * removed.
- * Throws on locales that are not well formed BCP47 tags.
- * ECMA 402 8.2.1 steps 1 (ECMA 402 9.2.1) and 2.
- */
-function canonicalizeLocaleList(locales) {
- var seen = new InternalArray();
- if (!IS_UNDEFINED(locales)) {
- // We allow single string localeID.
- if (typeof locales === 'string') {
- %_Call(ArrayPush, seen, %CanonicalizeLanguageTag(locales));
- return seen;
- }
-
- var o = TO_OBJECT(locales);
- var len = TO_LENGTH(o.length);
-
- for (var k = 0; k < len; k++) {
- if (k in o) {
- var value = o[k];
-
- var tag = %CanonicalizeLanguageTag(value);
-
- if (%ArrayIndexOf(seen, tag, 0) === -1) {
- %_Call(ArrayPush, seen, tag);
- }
- }
- }
- }
-
- return seen;
-}
-
-// TODO(ftang): remove the %InstallToContext once
-// initializeLocaleList is available in C++
-// https://bugs.chromium.org/p/v8/issues/detail?id=7987
-%InstallToContext([
- "canonicalize_locale_list", canonicalizeLocaleList
-]);
-
-
-function initializeLocaleList(locales) {
- return freezeArray(canonicalizeLocaleList(locales));
-}
-
-// ECMA 402 section 8.2.1
-DEFINE_METHOD(
- GlobalIntl,
- getCanonicalLocales(locales) {
- return makeArray(canonicalizeLocaleList(locales));
- }
-);
-
-// Save references to Intl objects and methods we use, for added security.
-var savedObjects = {
- __proto__: null,
- 'collator': GlobalIntlCollator,
- 'numberformat': GlobalIntlNumberFormat,
- 'dateformatall': GlobalIntlDateTimeFormat,
- 'dateformatdate': GlobalIntlDateTimeFormat,
- 'dateformattime': GlobalIntlDateTimeFormat
-};
-
-
-// Default (created with undefined locales and options parameters) collator,
-// number and date format instances. They'll be created as needed.
-var defaultObjects = {
- __proto__: null,
- 'collator': UNDEFINED,
- 'numberformat': UNDEFINED,
- 'dateformatall': UNDEFINED,
- 'dateformatdate': UNDEFINED,
- 'dateformattime': UNDEFINED,
-};
-
-function clearDefaultObjects() {
- defaultObjects['dateformatall'] = UNDEFINED;
- defaultObjects['dateformatdate'] = UNDEFINED;
- defaultObjects['dateformattime'] = UNDEFINED;
-}
-
-var date_cache_version = 0;
-
-function checkDateCacheCurrent() {
- var new_date_cache_version = %DateCacheVersion();
- if (new_date_cache_version == date_cache_version) {
- return;
- }
- date_cache_version = new_date_cache_version;
-
- clearDefaultObjects();
-}
-
-/**
- * Returns cached or newly created instance of a given service.
- * We cache only default instances (where no locales or options are provided).
- */
-function cachedOrNewService(service, locales, options, defaults) {
- var useOptions = (IS_UNDEFINED(defaults)) ? options : defaults;
- if (IS_UNDEFINED(locales) && IS_UNDEFINED(options)) {
- checkDateCacheCurrent();
- if (IS_UNDEFINED(defaultObjects[service])) {
- defaultObjects[service] = new savedObjects[service](locales, useOptions);
- }
- return defaultObjects[service];
- }
- return new savedObjects[service](locales, useOptions);
-}
-
-// TODO(ftang) remove the %InstallToContext once
-// cachedOrNewService is available in C++
-%InstallToContext([
- "cached_or_new_service", cachedOrNewService
-]);
-
-})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
deleted file mode 100644
index 4eaf990a58..0000000000
--- a/deps/v8/src/js/macros.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2006-2009 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native JS files.
-
-define NONE = 0;
-define READ_ONLY = 1;
-define DONT_ENUM = 2;
-define DONT_DELETE = 4;
-
-# 2^32 - 1
-define kMaxUint32 = 4294967295;
-
-# Type query macros.
-#
-# Note: We have special support for typeof(foo) === 'bar' in the compiler.
-# It will *not* generate a runtime typeof call for the most important
-# values of 'bar'.
-macro IS_ARRAY(arg) = (%_IsArray(arg));
-macro IS_NULL(arg) = (arg === null);
-macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_UNDEFINED(arg) = (arg === (void 0));
-
-# Macro for ES queries of the type: "Type(O) is Object."
-macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
-
-# Macro for ES queries of the type: "IsCallable(O)"
-macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
-
-# Macro for ES RequireObjectCoercible
-# https://tc39.github.io/ecma262/#sec-requireobjectcoercible
-# Throws a TypeError of the form "[functionName] called on null or undefined".
-macro REQUIRE_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
-
-# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
-macro TO_BOOLEAN(arg) = (!!(arg));
-macro TO_LENGTH(arg) = (%_ToLength(arg));
-macro TO_STRING(arg) = (%_ToString(arg));
-macro TO_OBJECT(arg) = (%_ToObject(arg));
-macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
-
-macro DEFINE_METHODS_LEN(obj, class_def, len) = %DefineMethodsInternal(obj, class class_def, len);
-macro DEFINE_METHOD_LEN(obj, method_def, len) = %DefineMethodsInternal(obj, class { method_def }, len);
-macro DEFINE_METHODS(obj, class_def) = DEFINE_METHODS_LEN(obj, class_def, -1);
-macro DEFINE_METHOD(obj, method_def) = DEFINE_METHOD_LEN(obj, method_def, -1);
-
-# Constants. The compiler constant folds them.
-define INFINITY = (1/0);
-define UNDEFINED = (void 0);
-
-# This should be kept consistent with Intl::Type.
-define NUMBER_FORMAT_TYPE = 0;
-define COLLATOR_TYPE = 1;
-define DATE_TIME_FORMAT_TYPE = 2;
-define PLURAL_RULES_TYPE = 3;
-define BREAK_ITERATOR_TYPE = 4;
-define LOCALE_TYPE = 5;
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
deleted file mode 100644
index bc2652129d..0000000000
--- a/deps/v8/src/js/prologue.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -----------------------------------------------------------------------
-// Utils
-
-var imports = UNDEFINED;
-var exports_container = %ExportFromRuntime({});
-
-// Export to other scripts.
-function Export(f) {
- f(exports_container);
-}
-
-
-// Import from other scripts. The actual importing happens in PostNatives so
-// that we can import from scripts executed later. However, that means that
-// the import is not available until the very end. If the import needs to be
-// available immediately, use ImportNow.
-function Import(f) {
- f.next = imports;
- imports = f;
-}
-
-
-// Import immediately from exports of previous scripts. We need this for
-// functions called during bootstrapping. Hooking up imports in PostNatives
-// would be too late.
-function ImportNow(name) {
- return exports_container[name];
-}
-
-
-function InstallConstants(object, constants) {
- %CheckIsBootstrapping();
- %OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
- var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
- for (var i = 0; i < constants.length; i += 2) {
- var name = constants[i];
- var k = constants[i + 1];
- %AddNamedProperty(object, name, k, attributes);
- }
- %ToFastProperties(object);
-}
-
-
-// Prevents changes to the prototype of a built-in function.
-// The "prototype" property of the function object is made non-configurable,
-// and the prototype object is made non-extensible. The latter prevents
-// changing the __proto__ property.
-function SetUpLockedPrototype(
- constructor, fields, methods) {
- %CheckIsBootstrapping();
- var prototype = constructor.prototype;
- // Install functions first, because this function is used to initialize
- // PropertyDescriptor itself.
- var property_count = (methods.length >> 1) + (fields ? fields.length : 0);
- if (property_count >= 4) {
- %OptimizeObjectForAddingMultipleProperties(prototype, property_count);
- }
- if (fields) {
- for (var i = 0; i < fields.length; i++) {
- %AddNamedProperty(prototype, fields[i],
- UNDEFINED, DONT_ENUM | DONT_DELETE);
- }
- }
- for (var i = 0; i < methods.length; i += 2) {
- var key = methods[i];
- var f = methods[i + 1];
- %AddNamedProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetNativeFlag(f);
- }
- %InternalSetPrototype(prototype, null);
- %ToFastProperties(prototype);
-}
-
-
-// -----------------------------------------------------------------------
-// To be called by bootstrapper
-
-function PostNatives(utils) {
- %CheckIsBootstrapping();
-
- for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
- imports(exports_container);
- }
-
- exports_container = UNDEFINED;
- utils.Export = UNDEFINED;
- utils.Import = UNDEFINED;
- utils.ImportNow = UNDEFINED;
- utils.PostNatives = UNDEFINED;
-}
-
-// -----------------------------------------------------------------------
-
-%OptimizeObjectForAddingMultipleProperties(utils, 14);
-
-utils.Import = Import;
-utils.ImportNow = ImportNow;
-utils.Export = Export;
-utils.InstallConstants = InstallConstants;
-utils.SetUpLockedPrototype = SetUpLockedPrototype;
-utils.PostNatives = PostNatives;
-
-%ToFastProperties(utils);
-
-// -----------------------------------------------------------------------
-
-%OptimizeObjectForAddingMultipleProperties(extrasUtils, 11);
-
-extrasUtils.logStackTrace = function logStackTrace() {
- %DebugTrace();
-};
-
-extrasUtils.log = function log() {
- let message = '';
- for (const arg of arguments) {
- message += arg;
- }
-
- %GlobalPrint(message);
-};
-
-// Extras need the ability to store private state on their objects without
-// exposing it to the outside world.
-
-extrasUtils.createPrivateSymbol = function createPrivateSymbol(name) {
- return %CreatePrivateSymbol(name);
-};
-
-// These functions are key for safe meta-programming:
-// http://wiki.ecmascript.org/doku.php?id=conventions:safe_meta_programming
-//
-// Technically they could all be derived from combinations of
-// Function.prototype.{bind,call,apply} but that introduces lots of layers of
-// indirection.
-
-extrasUtils.uncurryThis = function uncurryThis(func) {
- return function(thisArg, ...args) {
- return %reflect_apply(func, thisArg, args);
- };
-};
-
-extrasUtils.markPromiseAsHandled = function markPromiseAsHandled(promise) {
- %PromiseMarkAsHandled(promise);
-};
-
-extrasUtils.promiseState = function promiseState(promise) {
- return %PromiseStatus(promise);
-};
-
-// [[PromiseState]] values (for extrasUtils.promiseState())
-// These values should be kept in sync with PromiseStatus in globals.h
-extrasUtils.kPROMISE_PENDING = 0;
-extrasUtils.kPROMISE_FULFILLED = 1;
-extrasUtils.kPROMISE_REJECTED = 2;
-
-%ToFastProperties(extrasUtils);
-
-})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
deleted file mode 100644
index 65662c8083..0000000000
--- a/deps/v8/src/js/typedarray.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-// array.js has to come before typedarray.js for this to work
-var ArrayToString = utils.ImportNow("ArrayToString");
-var InnerArrayJoin;
-var InnerArrayToLocaleString;
-
-macro TYPED_ARRAYS(FUNCTION)
-FUNCTION(Uint8Array, 1)
-FUNCTION(Int8Array, 1)
-FUNCTION(Uint16Array, 2)
-FUNCTION(Int16Array, 2)
-FUNCTION(Uint32Array, 4)
-FUNCTION(Int32Array, 4)
-FUNCTION(Float32Array, 4)
-FUNCTION(Float64Array, 8)
-FUNCTION(Uint8ClampedArray, 1)
-FUNCTION(BigUint64Array, 8)
-FUNCTION(BigInt64Array, 8)
-endmacro
-
-macro DECLARE_GLOBALS(NAME, SIZE)
-var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-
-macro IS_TYPEDARRAY(arg)
-(%_IsTypedArray(arg))
-endmacro
-
-var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
-
-utils.Import(function(from) {
- InnerArrayJoin = from.InnerArrayJoin;
- InnerArrayToLocaleString = from.InnerArrayToLocaleString;
-});
-
-// --------------- Typed Arrays ---------------------
-
-// ES6 section 22.2.3.5.1 ValidateTypedArray ( O )
-function ValidateTypedArray(array, methodName) {
- if (!IS_TYPEDARRAY(array)) throw %make_type_error(kNotTypedArray);
-
- if (%ArrayBufferViewWasNeutered(array))
- throw %make_type_error(kDetachedOperation, methodName);
-}
-
-
-// ES6 section 22.2.3.27
-// ecma402 #sup-array.prototype.tolocalestring
-DEFINE_METHOD(
- GlobalTypedArray.prototype,
- toLocaleString() {
- ValidateTypedArray(this, "%TypedArray%.prototype.toLocaleString");
-
- var locales = arguments[0];
- var options = arguments[1];
- var length = %TypedArrayGetLength(this);
- return InnerArrayToLocaleString(this, length, locales, options);
- }
-);
-
-
-// ES6 section 22.2.3.14
-DEFINE_METHOD(
- GlobalTypedArray.prototype,
- join(separator) {
- ValidateTypedArray(this, "%TypedArray%.prototype.join");
-
- var length = %TypedArrayGetLength(this);
-
- return InnerArrayJoin(separator, this, length);
- }
-);
-
-// -------------------------------------------------------------------
-
-%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
- DONT_ENUM);
-
-})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 28819c4cd0..7b0f757b1d 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -8,13 +8,12 @@
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/field-type.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/property-descriptor.h"
#include "src/string-hasher.h"
#include "src/transitions.h"
-#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
@@ -35,8 +34,7 @@ class VectorSegment {
~VectorSegment() { container_.resize(begin_); }
Vector<const value_type> GetVector() const {
- return Vector<const value_type>(container_.data() + begin_,
- container_.size() - begin_);
+ return VectorOf(container_) + begin_;
}
template <typename T>
@@ -164,7 +162,7 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
// Parse failed. Current character is the unexpected token.
Factory* factory = this->factory();
- MessageTemplate::Template message;
+ MessageTemplate message;
Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
Handle<Object> arg2;
@@ -258,10 +256,10 @@ bool JsonParser<seq_one_byte>::ParseJsonString(Handle<String> expected) {
int length = expected->length();
if (source_->length() - position_ - 1 > length) {
DisallowHeapAllocation no_gc;
- String::FlatContent content = expected->GetFlatContent();
+ String::FlatContent content = expected->GetFlatContent(no_gc);
if (content.IsOneByte()) {
DCHECK_EQ('"', c0_);
- const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
+ const uint8_t* input_chars = seq_source_->GetChars(no_gc) + position_ + 1;
const uint8_t* expected_chars = content.ToOneByteVector().start();
for (int i = 0; i < length; i++) {
uint8_t c0 = input_chars[i];
@@ -522,7 +520,7 @@ void JsonParser<seq_one_byte>::CommitStateToJsonObject(
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
- DescriptorArray* descriptors = json_object->map()->instance_descriptors();
+ DescriptorArray descriptors = json_object->map()->instance_descriptors();
for (int i = 0; i < properties.length(); i++) {
Handle<Object> value = properties[i];
// Initializing store.
@@ -638,17 +636,20 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
// a decimal point or exponent.
if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
} else {
- int i = 0;
+ uint32_t i = 0;
int digits = 0;
if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
do {
+ // This can overflow. That's OK, the "digits < 10" check below
+ // will discard overflown results.
i = i * 10 + c0_ - '0';
digits++;
Advance();
} while (IsDecimalDigit(c0_));
if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
SkipWhitespace();
- return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+ return Handle<Smi>(Smi::FromInt((negative ? -static_cast<int>(i) : i)),
+ isolate());
}
}
if (c0_ == '.') {
@@ -669,8 +670,9 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
int length = position_ - beg_pos;
double number;
if (seq_one_byte) {
- Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
- number = StringToDouble(isolate()->unicode_cache(), chars,
+ DisallowHeapAllocation no_gc;
+ Vector<const uint8_t> chars(seq_source_->GetChars(no_gc) + beg_pos, length);
+ number = StringToDouble(chars,
NO_FLAGS, // Hex, octal or trailing junk.
std::numeric_limits<double>::quiet_NaN());
} else {
@@ -678,7 +680,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
Vector<const uint8_t> result =
Vector<const uint8_t>(buffer.start(), length);
- number = StringToDouble(isolate()->unicode_cache(), result,
+ number = StringToDouble(result,
NO_FLAGS, // Hex, octal or trailing junk.
0.0);
buffer.Dispose();
@@ -728,9 +730,13 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
Handle<StringType> seq_string =
NewRawString<StringType>(factory(), length, pretenure_);
- // Copy prefix into seq_str.
- SinkChar* dest = seq_string->GetChars();
- String::WriteToFlat(*prefix, dest, start, end);
+
+ {
+ DisallowHeapAllocation no_gc;
+ // Copy prefix into seq_str.
+ SinkChar* dest = seq_string->GetChars(no_gc);
+ String::WriteToFlat(*prefix, dest, start, end);
+ }
while (c0_ != '"') {
// Check for control character (0x00-0x1F) or unterminated string (<0).
@@ -881,27 +887,29 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
} else {
hash = static_cast<uint32_t>(length);
}
- Vector<const uint8_t> string_vector(seq_source_->GetChars() + position_,
- length);
- StringTable* string_table = isolate()->heap()->string_table();
+ StringTable string_table = isolate()->heap()->string_table();
uint32_t capacity = string_table->Capacity();
uint32_t entry = StringTable::FirstProbe(hash, capacity);
uint32_t count = 1;
Handle<String> result;
while (true) {
- Object* element = string_table->KeyAt(entry);
+ Object element = string_table->KeyAt(entry);
if (element->IsUndefined(isolate())) {
// Lookup failure.
result =
factory()->InternalizeOneByteString(seq_source_, position_, length);
break;
}
- if (!element->IsTheHole(isolate()) &&
- String::cast(element)->IsOneByteEqualTo(string_vector)) {
- result = Handle<String>(String::cast(element), isolate());
- DCHECK_EQ(result->Hash(),
- (hash << String::kHashShift) >> String::kHashShift);
- break;
+ if (!element->IsTheHole(isolate())) {
+ DisallowHeapAllocation no_gc;
+ Vector<const uint8_t> string_vector(
+ seq_source_->GetChars(no_gc) + position_, length);
+ if (String::cast(element)->IsOneByteEqualTo(string_vector)) {
+ result = Handle<String>(String::cast(element), isolate());
+ DCHECK_EQ(result->Hash(),
+ (hash << String::kHashShift) >> String::kHashShift);
+ break;
+ }
}
entry = StringTable::NextProbe(entry, count++, capacity);
}
@@ -931,7 +939,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
int length = position_ - beg_pos;
Handle<String> result =
factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
- uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
+ DisallowHeapAllocation no_gc;
+ uint8_t* dest = SeqOneByteString::cast(*result)->GetChars(no_gc);
String::WriteToFlat(*source_, dest, beg_pos, position_);
DCHECK_EQ('"', c0_);
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index fe9a1468b1..1d3858f351 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -5,11 +5,13 @@
#include "src/json-stringifier.h"
#include "src/conversions.h"
-#include "src/heap/heap-inl.h"
#include "src/lookup.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/oddball-inl.h"
+#include "src/objects/smi.h"
#include "src/string-builder-inl.h"
#include "src/utils.h"
@@ -65,19 +67,20 @@ class JsonStringifier {
V8_INLINE void SerializeDeferredKey(bool deferred_comma,
Handle<Object> deferred_key);
- Result SerializeSmi(Smi* object);
+ Result SerializeSmi(Smi object);
Result SerializeDouble(double number);
V8_INLINE Result SerializeHeapNumber(Handle<HeapNumber> object) {
return SerializeDouble(object->value());
}
- Result SerializeJSValue(Handle<JSValue> object);
+ Result SerializeJSValue(Handle<JSValue> object, Handle<Object> key);
- V8_INLINE Result SerializeJSArray(Handle<JSArray> object);
- V8_INLINE Result SerializeJSObject(Handle<JSObject> object);
+ V8_INLINE Result SerializeJSArray(Handle<JSArray> object, Handle<Object> key);
+ V8_INLINE Result SerializeJSObject(Handle<JSObject> object,
+ Handle<Object> key);
- Result SerializeJSProxy(Handle<JSProxy> object);
+ Result SerializeJSProxy(Handle<JSProxy> object, Handle<Object> key);
Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
Result SerializeArrayLikeSlow(Handle<JSReceiver> object, uint32_t start,
uint32_t length);
@@ -103,7 +106,7 @@ class JsonStringifier {
Handle<JSReceiver> CurrentHolder(Handle<Object> value,
Handle<Object> inital_holder);
- Result StackPush(Handle<Object> object);
+ Result StackPush(Handle<Object> object, Handle<Object> key);
void StackPop();
Factory* factory() { return isolate_->factory(); }
@@ -111,12 +114,14 @@ class JsonStringifier {
Isolate* isolate_;
IncrementalStringBuilder builder_;
Handle<String> tojson_string_;
- Handle<JSArray> stack_;
Handle<FixedArray> property_list_;
Handle<JSReceiver> replacer_function_;
uc16* gap_;
int indent_;
+ using KeyObject = std::pair<Handle<Object>, Handle<Object>>;
+ std::vector<KeyObject> stack_;
+
static const int kJsonEscapeTableEntrySize = 8;
static const char* const JsonEscapeTable;
};
@@ -196,9 +201,12 @@ const char* const JsonStringifier::JsonEscapeTable =
"\xFC\0 \xFD\0 \xFE\0 \xFF\0 ";
JsonStringifier::JsonStringifier(Isolate* isolate)
- : isolate_(isolate), builder_(isolate), gap_(nullptr), indent_(0) {
+ : isolate_(isolate),
+ builder_(isolate),
+ gap_(nullptr),
+ indent_(0),
+ stack_() {
tojson_string_ = factory()->toJSON_string();
- stack_ = factory()->NewJSArray(8);
}
MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
@@ -343,33 +351,30 @@ MaybeHandle<Object> JsonStringifier::ApplyReplacerFunction(
Handle<JSReceiver> JsonStringifier::CurrentHolder(
Handle<Object> value, Handle<Object> initial_holder) {
- int length = Smi::ToInt(stack_->length());
- if (length == 0) {
+ if (stack_.empty()) {
Handle<JSObject> holder =
factory()->NewJSObject(isolate_->object_function());
JSObject::AddProperty(isolate_, holder, factory()->empty_string(),
initial_holder, NONE);
return holder;
} else {
- FixedArray* elements = FixedArray::cast(stack_->elements());
- return Handle<JSReceiver>(JSReceiver::cast(elements->get(length - 1)),
+ return Handle<JSReceiver>(JSReceiver::cast(*stack_.back().second),
isolate_);
}
}
-JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
+JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object,
+ Handle<Object> key) {
StackLimitCheck check(isolate_);
if (check.HasOverflowed()) {
isolate_->StackOverflow();
return EXCEPTION;
}
- int length = Smi::ToInt(stack_->length());
{
DisallowHeapAllocation no_allocation;
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
+ for (const KeyObject& key_object : stack_) {
+ if (*key_object.second == *object) {
AllowHeapAllocation allow_to_return_error;
Handle<Object> error =
factory()->NewTypeError(MessageTemplate::kCircularStructure);
@@ -378,15 +383,11 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
}
}
}
- JSArray::SetLength(stack_, length + 1);
- FixedArray::cast(stack_->elements())->set(length, *object);
+ stack_.emplace_back(key, object);
return SUCCESS;
}
-void JsonStringifier::StackPop() {
- int length = Smi::ToInt(stack_->length());
- stack_->set_length(Smi::FromInt(length - 1));
-}
+void JsonStringifier::StackPop() { stack_.pop_back(); }
template <bool deferred_string_key>
JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
@@ -441,10 +442,10 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
}
case JS_ARRAY_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSArray(Handle<JSArray>::cast(object));
+ return SerializeJSArray(Handle<JSArray>::cast(object), key);
case JS_VALUE_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object));
+ return SerializeJSValue(Handle<JSValue>::cast(object), key);
case SYMBOL_TYPE:
return UNCHANGED;
default:
@@ -458,9 +459,9 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
// Go to slow path for global proxy and objects requiring access checks.
if (deferred_string_key) SerializeDeferredKey(comma, key);
if (object->IsJSProxy()) {
- return SerializeJSProxy(Handle<JSProxy>::cast(object));
+ return SerializeJSProxy(Handle<JSProxy>::cast(object), key);
}
- return SerializeJSObject(Handle<JSObject>::cast(object));
+ return SerializeJSObject(Handle<JSObject>::cast(object), key);
}
}
@@ -468,8 +469,8 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
}
JsonStringifier::Result JsonStringifier::SerializeJSValue(
- Handle<JSValue> object) {
- Object* raw = object->value();
+ Handle<JSValue> object, Handle<Object> key) {
+ Object raw = object->value();
if (raw->IsString()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -489,12 +490,12 @@ JsonStringifier::Result JsonStringifier::SerializeJSValue(
builder_.AppendCString(raw->IsTrue(isolate_) ? "true" : "false");
} else {
// ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
- return SerializeJSObject(object);
+ return SerializeJSObject(object, key);
}
return SUCCESS;
}
-JsonStringifier::Result JsonStringifier::SerializeSmi(Smi* object) {
+JsonStringifier::Result JsonStringifier::SerializeSmi(Smi object) {
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
@@ -515,9 +516,9 @@ JsonStringifier::Result JsonStringifier::SerializeDouble(double number) {
}
JsonStringifier::Result JsonStringifier::SerializeJSArray(
- Handle<JSArray> object) {
+ Handle<JSArray> object, Handle<Object> key) {
HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
+ Result stack_push = StackPush(object, key);
if (stack_push != SUCCESS) return stack_push;
uint32_t length = 0;
CHECK(object->length()->ToArrayLength(&length));
@@ -630,21 +631,18 @@ JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
}
JsonStringifier::Result JsonStringifier::SerializeJSObject(
- Handle<JSObject> object) {
+ Handle<JSObject> object, Handle<Object> key) {
HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
+ Result stack_push = StackPush(object, key);
if (stack_push != SUCCESS) return stack_push;
if (property_list_.is_null() &&
!object->map()->IsCustomElementsReceiverMap() &&
- object->HasFastProperties() &&
- Handle<JSObject>::cast(object)->elements()->length() == 0) {
- DCHECK(object->IsJSObject());
+ object->HasFastProperties() && object->elements()->length() == 0) {
DCHECK(!object->IsJSGlobalProxy());
- Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
- DCHECK(!js_obj->HasIndexedInterceptor());
- DCHECK(!js_obj->HasNamedInterceptor());
- Handle<Map> map(js_obj->map(), isolate_);
+ DCHECK(!object->HasIndexedInterceptor());
+ DCHECK(!object->HasNamedInterceptor());
+ Handle<Map> map(object->map(), isolate_);
builder_.AppendCharacter('{');
Indent();
bool comma = false;
@@ -656,15 +654,15 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
PropertyDetails details = map->instance_descriptors()->GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
- if (details.location() == kField && *map == js_obj->map()) {
+ if (details.location() == kField && *map == object->map()) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- property = JSObject::FastPropertyAt(js_obj, details.representation(),
+ property = JSObject::FastPropertyAt(object, details.representation(),
field_index);
} else {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, property,
- Object::GetPropertyOrElement(isolate_, js_obj, key), EXCEPTION);
+ Object::GetPropertyOrElement(isolate_, object, key), EXCEPTION);
}
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
@@ -712,9 +710,9 @@ JsonStringifier::Result JsonStringifier::SerializeJSReceiverSlow(
}
JsonStringifier::Result JsonStringifier::SerializeJSProxy(
- Handle<JSProxy> object) {
+ Handle<JSProxy> object, Handle<Object> key) {
HandleScope scope(isolate_);
- Result stack_push = StackPush(object);
+ Result stack_push = StackPush(object, key);
if (stack_push != SUCCESS) return stack_push;
Maybe<bool> is_array = Object::IsArray(object);
if (is_array.IsNothing()) return EXCEPTION;
@@ -812,9 +810,9 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
// part, or we might need to allocate.
if (int worst_case_length = builder_.EscapedLengthIfCurrentPartFits(length)) {
DisallowHeapAllocation no_gc;
- Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
+ Vector<const SrcChar> vector = string->GetCharVector<SrcChar>(no_gc);
IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
- &builder_, worst_case_length);
+ &builder_, worst_case_length, no_gc);
SerializeStringUnchecked_(vector, &no_extend);
} else {
FlatStringReader reader(isolate_, string);
@@ -904,14 +902,14 @@ void JsonStringifier::SerializeDeferredKey(bool deferred_comma,
void JsonStringifier::SerializeString(Handle<String> object) {
object = String::Flatten(isolate_, object);
if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
- if (object->IsOneByteRepresentationUnderneath()) {
+ if (String::IsOneByteRepresentationUnderneath(*object)) {
SerializeString_<uint8_t, uint8_t>(object);
} else {
builder_.ChangeEncoding();
SerializeString(object);
}
} else {
- if (object->IsOneByteRepresentationUnderneath()) {
+ if (String::IsOneByteRepresentationUnderneath(*object)) {
SerializeString_<uint8_t, uc16>(object);
} else {
SerializeString_<uc16, uc16>(object);
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index ab893d5df9..cc11ac09bf 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -25,7 +25,7 @@ namespace {
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
- Object* e = array->get(i);
+ Object e = array->get(i);
if (!(e->IsName() || e->IsNumber())) return false;
}
return true;
@@ -62,7 +62,7 @@ Handle<OrderedHashSet> KeyAccumulator::keys() {
return Handle<OrderedHashSet>::cast(keys_);
}
-void KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
+void KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
AddKey(handle(key, isolate_), convert);
}
@@ -87,7 +87,7 @@ void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
// The keys_ Set is converted directly to a FixedArray in GetKeys which can
// be left-trimmer. Hence the previous Set should not keep a pointer to the
// new one.
- keys_->set(OrderedHashTableBase::kNextTableIndex, Smi::kZero);
+ keys_->set(OrderedHashSet::NextTableIndex(), Smi::kZero);
keys_ = new_set;
}
}
@@ -209,7 +209,7 @@ bool KeyAccumulator::IsShadowed(Handle<Object> key) {
return shadowing_keys_->Has(isolate_, key);
}
-void KeyAccumulator::AddShadowingKey(Object* key) {
+void KeyAccumulator::AddShadowingKey(Object key) {
if (mode_ == KeyCollectionMode::kOwnOnly) return;
AddShadowingKey(handle(key, isolate_));
}
@@ -223,8 +223,8 @@ void KeyAccumulator::AddShadowingKey(Handle<Object> key) {
namespace {
-void TrySettingEmptyEnumCache(JSReceiver* object) {
- Map* map = object->map();
+void TrySettingEmptyEnumCache(JSReceiver object) {
+ Map map = object->map();
DCHECK_EQ(kInvalidEnumCacheSentinel, map->EnumLength());
if (!map->OnlyHasSimpleProperties()) return;
if (map->IsJSProxyMap()) return;
@@ -233,7 +233,7 @@ void TrySettingEmptyEnumCache(JSReceiver* object) {
map->SetEnumLength(0);
}
-bool CheckAndInitalizeEmptyEnumCache(JSReceiver* object) {
+bool CheckAndInitalizeEmptyEnumCache(JSReceiver object) {
if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
TrySettingEmptyEnumCache(object);
}
@@ -250,10 +250,10 @@ void FastKeyAccumulator::Prepare() {
// Fully walk the prototype chain and find the last prototype with keys.
is_receiver_simple_enum_ = false;
has_empty_prototype_ = true;
- JSReceiver* last_prototype = nullptr;
+ JSReceiver last_prototype;
for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
iter.Advance()) {
- JSReceiver* current = iter.GetCurrent<JSReceiver>();
+ JSReceiver current = iter.GetCurrent<JSReceiver>();
bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
if (has_no_properties) continue;
last_prototype = current;
@@ -263,7 +263,7 @@ void FastKeyAccumulator::Prepare() {
is_receiver_simple_enum_ =
receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
!JSObject::cast(*receiver_)->HasEnumerableElements();
- } else if (last_prototype != nullptr) {
+ } else if (!last_prototype.is_null()) {
last_non_empty_prototype_ = handle(last_prototype, isolate_);
}
}
@@ -282,7 +282,7 @@ Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate);
- Handle<FixedArray> keys(map->instance_descriptors()->GetEnumCache()->keys(),
+ Handle<FixedArray> keys(map->instance_descriptors()->enum_cache()->keys(),
isolate);
// Check if the {map} has a valid enum length, which implies that it
@@ -320,7 +320,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
DisallowHeapAllocation no_gc;
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
- Object* key = descriptors->GetKey(i);
+ Object key = descriptors->GetKey(i);
if (key->IsSymbol()) continue;
keys->set(index, key);
if (details.location() != kField) fields_only = false;
@@ -337,7 +337,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
DisallowHeapAllocation no_gc;
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
- Object* key = descriptors->GetKey(i);
+ Object key = descriptors->GetKey(i);
if (key->IsSymbol()) continue;
DCHECK_EQ(kData, details.kind());
DCHECK_EQ(kField, details.location());
@@ -348,7 +348,8 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
DCHECK_EQ(index, indices->length());
}
- DescriptorArray::SetEnumCache(descriptors, isolate, keys, indices);
+ DescriptorArray::InitializeOrChangeEnumCache(descriptors, isolate, keys,
+ indices);
if (map->OnlyHasSimpleProperties()) map->SetEnumLength(enum_length);
return keys;
@@ -401,7 +402,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
GetKeysConversion keys_conversion) {
bool own_only = has_empty_prototype_ || mode_ == KeyCollectionMode::kOwnOnly;
- Map* map = receiver_->map();
+ Map map = receiver_->map();
if (!own_only || map->IsCustomElementsReceiverMap()) {
return MaybeHandle<FixedArray>();
}
@@ -439,7 +440,7 @@ MaybeHandle<FixedArray>
FastKeyAccumulator::GetOwnKeysWithUninitializedEnumCache() {
Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
// Uninitalized enum cache
- Map* map = object->map();
+ Map map = object->map();
if (object->elements()->length() != 0) {
// Assume that there are elements.
return MaybeHandle<FixedArray>();
@@ -603,12 +604,12 @@ int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
- Object* accessors = descs->GetStrongValue(i);
+ Object accessors = descs->GetStrongValue(i);
if (!accessors->IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
}
- Name* key = descs->GetKey(i);
+ Name key = descs->GetKey(i);
if (skip_symbols == key->IsSymbol()) {
if (first_skipped == -1) first_skipped = i;
continue;
@@ -629,7 +630,7 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
KeyCollectionMode mode,
KeyAccumulator* accumulator,
Handle<JSObject> object,
- T* raw_dictionary) {
+ T raw_dictionary) {
Handle<T> dictionary(raw_dictionary, isolate);
if (dictionary->NumberOfElements() == 0) {
return isolate->factory()->empty_fixed_array();
@@ -649,7 +650,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
enum_keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, object);
// If the number of properties equals the length of enumerable properties
// we do not have to filter out non-enumerable ones
- Map* map = object->map();
+ Map map = object->map();
int nof_descriptors = map->NumberOfOwnDescriptors();
if (enum_keys->length() != nof_descriptors) {
Handle<DescriptorArray> descs =
@@ -657,7 +658,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
for (int i = 0; i < nof_descriptors; i++) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) continue;
- Object* key = descs->GetKey(i);
+ Object key = descs->GetKey(i);
this->AddShadowingKey(key);
}
}
@@ -747,12 +748,14 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
Handle<AccessCheckInfo> access_check_info;
{
DisallowHeapAllocation no_gc;
- AccessCheckInfo* maybe_info = AccessCheckInfo::Get(isolate_, object);
- if (maybe_info) access_check_info = handle(maybe_info, isolate_);
+ AccessCheckInfo maybe_info = AccessCheckInfo::Get(isolate_, object);
+ if (!maybe_info.is_null()) {
+ access_check_info = handle(maybe_info, isolate_);
+ }
}
// We always have both kinds of interceptors or none.
if (!access_check_info.is_null() &&
- access_check_info->named_interceptor()) {
+ access_check_info->named_interceptor() != Object()) {
MAYBE_RETURN(CollectAccessCheckInterceptorKeys(access_check_info,
receiver, object),
Nothing<bool>());
@@ -904,7 +907,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
}
// 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
for (int i = 0; i < nonconfigurable_keys_length; ++i) {
- Object* raw_key = target_nonconfigurable_keys->get(i);
+ Object raw_key = target_nonconfigurable_keys->get(i);
Handle<Name> key(Name::cast(raw_key), isolate_);
// 17a. If key is not an element of uncheckedResultKeys, throw a
// TypeError exception.
@@ -924,7 +927,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
}
// 19. Repeat, for each key that is an element of targetConfigurableKeys:
for (int i = 0; i < target_configurable_keys->length(); ++i) {
- Object* raw_key = target_configurable_keys->get(i);
+ Object raw_key = target_configurable_keys->get(i);
if (raw_key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
Handle<Name> key(Name::cast(raw_key), isolate_);
// 19a. If key is not an element of uncheckedResultKeys, throw a
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 7ec7127aa5..b4eaa3101c 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -63,7 +63,7 @@ class KeyAccumulator final {
static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object);
- void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
void AddKeys(Handle<JSObject> array_like, AddKeyConversion convert);
@@ -89,7 +89,7 @@ class KeyAccumulator final {
}
// Shadowing keys are used to filter keys. This happens when non-enumerable
// keys appear again on the prototype chain.
- void AddShadowingKey(Object* key);
+ void AddShadowingKey(Object key);
void AddShadowingKey(Handle<Object> key);
private:
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index ebed59ef2c..90d3b0d3bc 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -6,21 +6,32 @@
#define V8_LAYOUT_DESCRIPTOR_INL_H_
#include "src/layout-descriptor.h"
+
+#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/smi.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-LayoutDescriptor* LayoutDescriptor::FromSmi(Smi* smi) {
- return LayoutDescriptor::cast(smi);
+LayoutDescriptor::LayoutDescriptor(Address ptr)
+ : ByteArray(ptr, AllowInlineSmiStorage::kAllowBeingASmi) {
+ SLOW_DCHECK(IsLayoutDescriptor());
}
+CAST_ACCESSOR(LayoutDescriptor)
+LayoutDescriptor LayoutDescriptor::FromSmi(Smi smi) {
+ return LayoutDescriptor::cast(smi);
+}
Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
if (length <= kBitsInSmiLayout) {
// The whole bit vector fits into a smi.
- return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
+ return handle(LayoutDescriptor::FromSmi(Smi::zero()), isolate);
}
int backing_store_length = GetSlowModeBackingStoreLength(length);
Handle<LayoutDescriptor> result = Handle<LayoutDescriptor>::cast(
@@ -40,12 +51,10 @@ bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
return details.field_index() < inobject_properties;
}
-
-LayoutDescriptor* LayoutDescriptor::FastPointerLayout() {
- return LayoutDescriptor::FromSmi(Smi::kZero);
+LayoutDescriptor LayoutDescriptor::FastPointerLayout() {
+ return LayoutDescriptor::FromSmi(Smi::zero());
}
-
bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
int* layout_bit_index) {
if (static_cast<unsigned>(field_index) >= static_cast<unsigned>(capacity())) {
@@ -60,13 +69,11 @@ bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
return true;
}
-
-LayoutDescriptor* LayoutDescriptor::SetRawData(int field_index) {
+LayoutDescriptor LayoutDescriptor::SetRawData(int field_index) {
return SetTagged(field_index, false);
}
-
-LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
+LayoutDescriptor LayoutDescriptor::SetTagged(int field_index, bool tagged) {
int layout_word_index = 0;
int layout_bit_index = 0;
@@ -81,9 +88,9 @@ LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
value |= layout_mask;
}
set_layout_word(layout_word_index, value);
- return this;
+ return *this;
} else {
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(this));
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
if (tagged) {
value &= ~layout_mask;
} else {
@@ -93,7 +100,6 @@ LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
}
}
-
bool LayoutDescriptor::IsTagged(int field_index) {
if (IsFastPointerLayout()) return true;
@@ -110,22 +116,20 @@ bool LayoutDescriptor::IsTagged(int field_index) {
uint32_t value = get_layout_word(layout_word_index);
return (value & layout_mask) == 0;
} else {
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(this));
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
return (value & layout_mask) == 0;
}
}
bool LayoutDescriptor::IsFastPointerLayout() {
- return this == FastPointerLayout();
+ return *this == FastPointerLayout();
}
-
-bool LayoutDescriptor::IsFastPointerLayout(Object* layout_descriptor) {
+bool LayoutDescriptor::IsFastPointerLayout(Object layout_descriptor) {
return layout_descriptor == FastPointerLayout();
}
-
bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
@@ -133,25 +137,23 @@ int LayoutDescriptor::capacity() {
return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
}
-
-LayoutDescriptor* LayoutDescriptor::cast_gc_safe(Object* object) {
+LayoutDescriptor LayoutDescriptor::cast_gc_safe(Object object) {
// The map word of the object can be a forwarding pointer during
// object evacuation phase of GC. Since the layout descriptor methods
// for checking whether a field is tagged or not do not depend on the
// object map, it should be safe.
- return reinterpret_cast<LayoutDescriptor*>(object);
+ return LayoutDescriptor::unchecked_cast(object);
}
int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
DCHECK_LT(0, length);
- // We allocate kPointerSize rounded blocks of memory anyway so we increase
+ // We allocate kTaggedSize rounded blocks of memory anyway so we increase
// the length of allocated array to utilize that "lost" space which could
// also help to avoid layout descriptor reallocations.
- return RoundUp(length, kBitsPerByte * kPointerSize) / kBitsPerByte;
+ return RoundUp(length, kBitsPerByte * kTaggedSize) / kBitsPerByte;
}
-
-int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
+int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
int num_descriptors) {
int inobject_properties = map->GetInObjectProperties();
if (inobject_properties == 0) return 0;
@@ -159,7 +161,7 @@ int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
DCHECK_LE(num_descriptors, descriptors->number_of_descriptors());
int layout_descriptor_length;
- const int kMaxWordsPerField = kDoubleSize / kPointerSize;
+ const int kMaxWordsPerField = kDoubleSize / kTaggedSize;
if (num_descriptors <= kBitsInSmiLayout / kMaxWordsPerField) {
// Even in the "worst" case (all fields are doubles) it would fit into
@@ -182,9 +184,8 @@ int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
return layout_descriptor_length;
}
-
-LayoutDescriptor* LayoutDescriptor::Initialize(
- LayoutDescriptor* layout_descriptor, Map* map, DescriptorArray* descriptors,
+LayoutDescriptor LayoutDescriptor::Initialize(
+ LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
int num_descriptors) {
DisallowHeapAllocation no_allocation;
int inobject_properties = map->GetInObjectProperties();
@@ -219,7 +220,7 @@ void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
// LayoutDescriptorHelper is a helper class for querying whether inobject
// property at offset is Double or not.
-LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
+LayoutDescriptorHelper::LayoutDescriptorHelper(Map map)
: all_fields_tagged_(true),
header_size_(0),
layout_descriptor_(LayoutDescriptor::FastPointerLayout()) {
@@ -230,7 +231,7 @@ LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
return;
}
- header_size_ = map->GetInObjectPropertiesStartInWords() * kPointerSize;
+ header_size_ = map->GetInObjectPropertiesStartInWords() * kTaggedSize;
DCHECK_GE(header_size_, 0);
all_fields_tagged_ = false;
@@ -238,11 +239,11 @@ LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
- DCHECK(IsAligned(offset_in_bytes, kPointerSize));
+ DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
if (all_fields_tagged_) return true;
// Object headers do not contain non-tagged fields.
if (offset_in_bytes < header_size_) return true;
- int field_index = (offset_in_bytes - header_size_) / kPointerSize;
+ int field_index = (offset_in_bytes - header_size_) / kTaggedSize;
return layout_descriptor_->IsTagged(field_index);
}
@@ -250,4 +251,6 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 766b7d81c2..c90d53d06c 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -31,7 +31,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(
Handle<LayoutDescriptor> layout_descriptor_handle =
LayoutDescriptor::New(isolate, layout_descriptor_length);
- LayoutDescriptor* layout_descriptor = Initialize(
+ LayoutDescriptor layout_descriptor = Initialize(
*layout_descriptor_handle, *map, *descriptors, num_descriptors);
return handle(layout_descriptor, isolate);
@@ -53,7 +53,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
isolate, layout_descriptor, field_index + details.field_width_in_words());
DisallowHeapAllocation no_allocation;
- LayoutDescriptor* layout_desc = *layout_descriptor;
+ LayoutDescriptor layout_desc = *layout_descriptor;
layout_desc = layout_desc->SetRawData(field_index);
if (details.field_width_in_words() > 1) {
layout_desc = layout_desc->SetRawData(field_index + 1);
@@ -65,7 +65,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
Isolate* isolate, Handle<Map> map, PropertyDetails details,
Handle<LayoutDescriptor> full_layout_descriptor) {
DisallowHeapAllocation no_allocation;
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor();
if (layout_descriptor->IsSlowLayout()) {
return full_layout_descriptor;
}
@@ -134,7 +134,7 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
- : static_cast<uint32_t>(Smi::ToInt(this));
+ : static_cast<uint32_t>(Smi::ToInt(*this));
bool is_tagged = (value & layout_mask) == 0;
if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
@@ -186,26 +186,24 @@ Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
return New(isolate, length);
}
-
-LayoutDescriptor* LayoutDescriptor::SetTaggedForTesting(int field_index,
- bool tagged) {
+LayoutDescriptor LayoutDescriptor::SetTaggedForTesting(int field_index,
+ bool tagged) {
return SetTagged(field_index, tagged);
}
-
bool LayoutDescriptorHelper::IsTagged(
int offset_in_bytes, int end_offset,
int* out_end_of_contiguous_region_offset) {
- DCHECK(IsAligned(offset_in_bytes, kPointerSize));
- DCHECK(IsAligned(end_offset, kPointerSize));
+ DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
+ DCHECK(IsAligned(end_offset, kTaggedSize));
DCHECK(offset_in_bytes < end_offset);
if (all_fields_tagged_) {
*out_end_of_contiguous_region_offset = end_offset;
DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
return true;
}
- int max_sequence_length = (end_offset - offset_in_bytes) / kPointerSize;
- int field_index = Max(0, (offset_in_bytes - header_size_) / kPointerSize);
+ int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
+ int field_index = Max(0, (offset_in_bytes - header_size_) / kTaggedSize);
int sequence_length;
bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
&sequence_length);
@@ -216,7 +214,7 @@ bool LayoutDescriptorHelper::IsTagged(
if (tagged) {
// First field is tagged, calculate end offset from there.
*out_end_of_contiguous_region_offset =
- header_size_ + sequence_length * kPointerSize;
+ header_size_ + sequence_length * kTaggedSize;
} else {
*out_end_of_contiguous_region_offset = header_size_;
@@ -225,19 +223,18 @@ bool LayoutDescriptorHelper::IsTagged(
return true;
}
*out_end_of_contiguous_region_offset =
- offset_in_bytes + sequence_length * kPointerSize;
+ offset_in_bytes + sequence_length * kTaggedSize;
DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
return tagged;
}
-
-LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
- DescriptorArray* descriptors,
- int num_descriptors) {
+LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
+ DescriptorArray descriptors,
+ int num_descriptors) {
DisallowHeapAllocation no_allocation;
// Fast mode descriptors are never shared and therefore always fully
// correspond to their map.
- if (!IsSlowLayout()) return this;
+ if (!IsSlowLayout()) return *this;
int layout_descriptor_length =
CalculateCapacity(map, descriptors, num_descriptors);
@@ -252,19 +249,18 @@ LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
if (new_backing_store_length != backing_store_length) {
DCHECK_LT(new_backing_store_length, backing_store_length);
int delta = backing_store_length - new_backing_store_length;
- heap->RightTrimFixedArray(this, delta);
+ heap->RightTrimFixedArray(*this, delta);
}
memset(GetDataStartAddress(), 0, DataSize());
- LayoutDescriptor* layout_descriptor =
- Initialize(this, map, descriptors, num_descriptors);
- DCHECK_EQ(this, layout_descriptor);
+ LayoutDescriptor layout_descriptor =
+ Initialize(*this, map, descriptors, num_descriptors);
+ DCHECK_EQ(*this, layout_descriptor);
return layout_descriptor;
}
-
-bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
+bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (FLAG_unbox_double_fields) {
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
int nof_descriptors = map->NumberOfOwnDescriptors();
int last_field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 54c8ff09bd..d5ce1f9223 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -9,6 +9,9 @@
#include "src/objects/fixed-array.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -38,15 +41,14 @@ class LayoutDescriptor : public ByteArray {
// Returns true if this is a layout of the object having only tagged fields.
V8_INLINE bool IsFastPointerLayout();
- V8_INLINE static bool IsFastPointerLayout(Object* layout_descriptor);
+ V8_INLINE static bool IsFastPointerLayout(Object layout_descriptor);
// Returns true if the layout descriptor is in non-Smi form.
V8_INLINE bool IsSlowLayout();
- V8_INLINE static LayoutDescriptor* cast(Object* object);
- V8_INLINE static const LayoutDescriptor* cast(const Object* object);
+ DECL_CAST(LayoutDescriptor)
- V8_INLINE static LayoutDescriptor* cast_gc_safe(Object* object);
+ V8_INLINE static LayoutDescriptor cast_gc_safe(Object object);
// Builds layout descriptor optimized for given |map| by |num_descriptors|
// elements of given descriptors array. The |map|'s descriptors could be
@@ -69,17 +71,17 @@ class LayoutDescriptor : public ByteArray {
// Layout descriptor that corresponds to an object all fields of which are
// tagged (FastPointerLayout).
- V8_INLINE static LayoutDescriptor* FastPointerLayout();
+ V8_INLINE static LayoutDescriptor FastPointerLayout();
// Check that this layout descriptor corresponds to given map.
- bool IsConsistentWithMap(Map* map, bool check_tail = false);
+ bool IsConsistentWithMap(Map map, bool check_tail = false);
// Trims this layout descriptor to given number of descriptors. This happens
// only when corresponding descriptors array is trimmed.
// The layout descriptor could be trimmed if it was slow or it could
// become fast.
- LayoutDescriptor* Trim(Heap* heap, Map* map, DescriptorArray* descriptors,
- int num_descriptors);
+ LayoutDescriptor Trim(Heap* heap, Map map, DescriptorArray descriptors,
+ int num_descriptors);
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
@@ -93,7 +95,7 @@ class LayoutDescriptor : public ByteArray {
V8_INLINE int capacity();
static Handle<LayoutDescriptor> NewForTesting(Isolate* isolate, int length);
- LayoutDescriptor* SetTaggedForTesting(int field_index, bool tagged);
+ LayoutDescriptor SetTaggedForTesting(int field_index, bool tagged);
private:
// Exclude sign-bit to simplify encoding.
@@ -107,14 +109,14 @@ class LayoutDescriptor : public ByteArray {
V8_INLINE void set_layout_word(int index, uint32_t value);
V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
- V8_INLINE static LayoutDescriptor* FromSmi(Smi* smi);
+ V8_INLINE static LayoutDescriptor FromSmi(Smi smi);
V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
PropertyDetails details);
// Calculates minimal layout descriptor capacity required for given
// |map|, |descriptors| and |num_descriptors|.
- V8_INLINE static int CalculateCapacity(Map* map, DescriptorArray* descriptors,
+ V8_INLINE static int CalculateCapacity(Map map, DescriptorArray descriptors,
int num_descriptors);
// Calculates the length of the slow-mode backing store array by given layout
@@ -123,9 +125,9 @@ class LayoutDescriptor : public ByteArray {
// Fills in clean |layout_descriptor| according to given |map|, |descriptors|
// and |num_descriptors|.
- V8_INLINE static LayoutDescriptor* Initialize(
- LayoutDescriptor* layout_descriptor, Map* map,
- DescriptorArray* descriptors, int num_descriptors);
+ V8_INLINE static LayoutDescriptor Initialize(
+ LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
+ int num_descriptors);
static Handle<LayoutDescriptor> EnsureCapacity(
Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
@@ -135,10 +137,12 @@ class LayoutDescriptor : public ByteArray {
V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
int* layout_bit_index);
- V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor* SetRawData(int field_index);
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetRawData(int field_index);
+
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetTagged(int field_index,
+ bool tagged);
- V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor* SetTagged(int field_index,
- bool tagged);
+ OBJECT_CONSTRUCTORS(LayoutDescriptor, ByteArray)
};
@@ -146,7 +150,7 @@ class LayoutDescriptor : public ByteArray {
// about whether the field at given offset is tagged or not.
class LayoutDescriptorHelper {
public:
- inline explicit LayoutDescriptorHelper(Map* map);
+ inline explicit LayoutDescriptorHelper(Map map);
bool all_fields_tagged() { return all_fields_tagged_; }
inline bool IsTagged(int offset_in_bytes);
@@ -162,9 +166,11 @@ class LayoutDescriptorHelper {
private:
bool all_fields_tagged_;
int header_size_;
- LayoutDescriptor* layout_descriptor_;
+ LayoutDescriptor layout_descriptor_;
};
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_LAYOUT_DESCRIPTOR_H_
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.cc b/deps/v8/src/libplatform/default-foreground-task-runner.cc
index 140dd404ec..0a31024d9a 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.cc
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.cc
@@ -15,7 +15,7 @@ DefaultForegroundTaskRunner::DefaultForegroundTaskRunner(
: idle_task_support_(idle_task_support), time_function_(time_function) {}
void DefaultForegroundTaskRunner::Terminate() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
terminated_ = true;
// Drain the task queues.
@@ -24,15 +24,15 @@ void DefaultForegroundTaskRunner::Terminate() {
while (!idle_task_queue_.empty()) idle_task_queue_.pop();
}
-void DefaultForegroundTaskRunner::PostTaskLocked(
- std::unique_ptr<Task> task, const base::LockGuard<base::Mutex>&) {
+void DefaultForegroundTaskRunner::PostTaskLocked(std::unique_ptr<Task> task,
+ const base::MutexGuard&) {
if (terminated_) return;
task_queue_.push(std::move(task));
event_loop_control_.NotifyOne();
}
void DefaultForegroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
PostTaskLocked(std::move(task), guard);
}
@@ -43,7 +43,7 @@ double DefaultForegroundTaskRunner::MonotonicallyIncreasingTime() {
void DefaultForegroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) {
DCHECK_GE(delay_in_seconds, 0.0);
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (terminated_) return;
double deadline = MonotonicallyIncreasingTime() + delay_in_seconds;
delayed_task_queue_.push(std::make_pair(deadline, std::move(task)));
@@ -51,7 +51,7 @@ void DefaultForegroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
void DefaultForegroundTaskRunner::PostIdleTask(std::unique_ptr<IdleTask> task) {
CHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (terminated_) return;
idle_task_queue_.push(std::move(task));
}
@@ -62,7 +62,7 @@ bool DefaultForegroundTaskRunner::IdleTasksEnabled() {
std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
MessageLoopBehavior wait_for_work) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
// Move delayed tasks that hit their deadline to the main queue.
std::unique_ptr<Task> task = PopTaskFromDelayedQueueLocked(guard);
while (task) {
@@ -83,7 +83,7 @@ std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
std::unique_ptr<Task>
DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
- const base::LockGuard<base::Mutex>&) {
+ const base::MutexGuard&) {
if (delayed_task_queue_.empty()) return {};
double now = MonotonicallyIncreasingTime();
@@ -102,7 +102,7 @@ DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
}
std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (idle_task_queue_.empty()) return {};
std::unique_ptr<IdleTask> task = std::move(idle_task_queue_.front());
@@ -111,8 +111,7 @@ std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
return task;
}
-void DefaultForegroundTaskRunner::WaitForTaskLocked(
- const base::LockGuard<base::Mutex>&) {
+void DefaultForegroundTaskRunner::WaitForTaskLocked(const base::MutexGuard&) {
event_loop_control_.Wait(&lock_);
}
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.h b/deps/v8/src/libplatform/default-foreground-task-runner.h
index a0869d0bc7..78c0f6b660 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.h
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.h
@@ -29,7 +29,7 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
std::unique_ptr<IdleTask> PopTaskFromIdleQueue();
- void WaitForTaskLocked(const base::LockGuard<base::Mutex>&);
+ void WaitForTaskLocked(const base::MutexGuard&);
double MonotonicallyIncreasingTime();
@@ -46,13 +46,11 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
private:
// The same as PostTask, but the lock is already held by the caller. The
// {guard} parameter should make sure that the caller is holding the lock.
- void PostTaskLocked(std::unique_ptr<Task> task,
- const base::LockGuard<base::Mutex>&);
+ void PostTaskLocked(std::unique_ptr<Task> task, const base::MutexGuard&);
// A caller of this function has to hold {lock_}. The {guard} parameter should
// make sure that the caller is holding the lock.
- std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(
- const base::LockGuard<base::Mutex>&);
+ std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(const base::MutexGuard&);
bool terminated_ = false;
base::Mutex lock_;
@@ -68,7 +66,7 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
// queue. This is necessary because we have to reset the unique_ptr when we
// remove a DelayedEntry from the priority queue.
struct DelayedEntryCompare {
- bool operator()(DelayedEntry& left, DelayedEntry& right) {
+ bool operator()(const DelayedEntry& left, const DelayedEntry& right) const {
return left.first > right.first;
}
};
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index c23616116e..e33cf07844 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -92,7 +92,7 @@ DefaultPlatform::DefaultPlatform(
}
DefaultPlatform::~DefaultPlatform() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (worker_threads_task_runner_) worker_threads_task_runner_->Terminate();
for (auto it : foreground_task_runner_map_) {
it.second->Terminate();
@@ -100,7 +100,7 @@ DefaultPlatform::~DefaultPlatform() {
}
void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
DCHECK_GE(thread_pool_size, 0);
if (thread_pool_size < 1) {
thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
@@ -110,7 +110,7 @@ void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
}
void DefaultPlatform::EnsureBackgroundTaskRunnerInitialized() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (!worker_threads_task_runner_) {
worker_threads_task_runner_ =
std::make_shared<DefaultWorkerThreadsTaskRunner>(thread_pool_size_);
@@ -128,7 +128,7 @@ double DefaultTimeFunction() {
void DefaultPlatform::SetTimeFunctionForTesting(
DefaultPlatform::TimeFunction time_function) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
time_function_for_testing_ = time_function;
// The time function has to be right after the construction of the platform.
DCHECK(foreground_task_runner_map_.empty());
@@ -139,7 +139,7 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
bool failed_result = wait_for_work == MessageLoopBehavior::kWaitForWork;
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
auto it = foreground_task_runner_map_.find(isolate);
if (it == foreground_task_runner_map_.end()) return failed_result;
task_runner = it->second;
@@ -157,7 +157,7 @@ void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
DCHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (foreground_task_runner_map_.find(isolate) ==
foreground_task_runner_map_.end()) {
return;
@@ -176,7 +176,7 @@ void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
std::shared_ptr<TaskRunner> DefaultPlatform::GetForegroundTaskRunner(
v8::Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (foreground_task_runner_map_.find(isolate) ==
foreground_task_runner_map_.end()) {
foreground_task_runner_map_.insert(std::make_pair(
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index f3338acfe3..8832845343 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -24,7 +24,7 @@ DefaultWorkerThreadsTaskRunner::~DefaultWorkerThreadsTaskRunner() {
}
void DefaultWorkerThreadsTaskRunner::Terminate() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
terminated_ = true;
queue_.Terminate();
// Clearing the thread pool lets all worker threads join.
@@ -32,14 +32,14 @@ void DefaultWorkerThreadsTaskRunner::Terminate() {
}
void DefaultWorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (terminated_) return;
queue_.Append(std::move(task));
}
void DefaultWorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (terminated_) return;
if (delay_in_seconds == 0) {
queue_.Append(std::move(task));
diff --git a/deps/v8/src/libplatform/task-queue.cc b/deps/v8/src/libplatform/task-queue.cc
index 19d668c095..61edd74434 100644
--- a/deps/v8/src/libplatform/task-queue.cc
+++ b/deps/v8/src/libplatform/task-queue.cc
@@ -16,13 +16,13 @@ TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {}
TaskQueue::~TaskQueue() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
DCHECK(terminated_);
DCHECK(task_queue_.empty());
}
void TaskQueue::Append(std::unique_ptr<Task> task) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
DCHECK(!terminated_);
task_queue_.push(std::move(task));
process_queue_semaphore_.Signal();
@@ -31,7 +31,7 @@ void TaskQueue::Append(std::unique_ptr<Task> task) {
std::unique_ptr<Task> TaskQueue::GetNext() {
for (;;) {
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (!task_queue_.empty()) {
std::unique_ptr<Task> result = std::move(task_queue_.front());
task_queue_.pop();
@@ -48,7 +48,7 @@ std::unique_ptr<Task> TaskQueue::GetNext() {
void TaskQueue::Terminate() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
DCHECK(!terminated_);
terminated_ = true;
process_queue_semaphore_.Signal();
@@ -57,7 +57,7 @@ void TaskQueue::Terminate() {
void TaskQueue::BlockUntilQueueEmptyForTesting() {
for (;;) {
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (task_queue_.empty()) return;
}
base::OS::Sleep(base::TimeDelta::FromMilliseconds(5));
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.cc b/deps/v8/src/libplatform/tracing/trace-buffer.cc
index 8bec153440..22ed13edc1 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.cc
@@ -16,7 +16,7 @@ TraceBufferRingBuffer::TraceBufferRingBuffer(size_t max_chunks,
}
TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (is_empty_ || chunks_[chunk_index_]->IsFull()) {
chunk_index_ = is_empty_ ? 0 : NextChunkIndex(chunk_index_);
is_empty_ = false;
@@ -35,7 +35,7 @@ TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
}
TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
size_t chunk_index, event_index;
uint32_t chunk_seq;
ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index);
@@ -46,7 +46,7 @@ TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
}
bool TraceBufferRingBuffer::Flush() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
// This flushes all the traces stored in the buffer.
if (!is_empty_) {
for (size_t i = NextChunkIndex(chunk_index_);; i = NextChunkIndex(i)) {
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 3d02347216..53fe01e42d 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -47,7 +47,7 @@ TracingController::~TracingController() {
{
// Free memory for category group names allocated via strdup.
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
for (size_t i = g_category_index - 1; i >= g_num_builtin_categories; --i) {
const char* group = g_category_groups[i];
g_category_groups[i] = nullptr;
@@ -143,7 +143,7 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
trace_config_.reset(trace_config);
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
mode_ = RECORDING_MODE;
UpdateCategoryGroupEnabledFlags();
observers_copy = observers_;
@@ -162,7 +162,7 @@ void TracingController::StopTracing() {
UpdateCategoryGroupEnabledFlags();
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
observers_copy = observers_;
}
for (auto o : observers_copy) {
@@ -213,7 +213,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
}
// Slow path. Grab the lock.
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
// Check the list again with lock in hand.
unsigned char* category_group_enabled = nullptr;
@@ -251,7 +251,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
void TracingController::AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) {
{
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
observers_.insert(observer);
if (mode_ != RECORDING_MODE) return;
}
@@ -261,7 +261,7 @@ void TracingController::AddTraceStateObserver(
void TracingController::RemoveTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) {
- base::LockGuard<base::Mutex> lock(mutex_.get());
+ base::MutexGuard lock(mutex_.get());
DCHECK(observers_.find(observer) != observers_.end());
observers_.erase(observer);
}
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 464b4de32a..eb804a787a 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -4,14 +4,13 @@
#include "src/libsampler/sampler.h"
-#if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
-
-#define USE_SIGNALS
+#ifdef USE_SIGNALS
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
+#include <atomic>
#if !V8_OS_QNX && !V8_OS_AIX
#include <sys/syscall.h> // NOLINT
@@ -57,10 +56,8 @@ typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
#include <algorithm>
#include <vector>
-#include <map>
#include "src/base/atomic-utils.h"
-#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -170,55 +167,24 @@ enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
namespace v8 {
namespace sampler {
-namespace {
-
#if defined(USE_SIGNALS)
-typedef std::vector<Sampler*> SamplerList;
-typedef SamplerList::iterator SamplerListIterator;
-typedef base::AtomicValue<bool> AtomicMutex;
-
-class AtomicGuard {
- public:
- explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true)
- : atomic_(atomic), is_success_(false) {
- do {
- // Use Acquire_Load to gain mutual exclusion.
- USE(atomic_->Value());
- is_success_ = atomic_->TrySetValue(false, true);
- } while (is_blocking && !is_success_);
- }
- bool is_success() const { return is_success_; }
-
- ~AtomicGuard() {
- if (!is_success_) return;
- atomic_->SetValue(false);
- }
-
- private:
- AtomicMutex* const atomic_;
- bool is_success_;
-};
-
-// Returns key for hash map.
-void* ThreadKey(pthread_t thread_id) {
- return reinterpret_cast<void*>(thread_id);
+AtomicGuard::AtomicGuard(AtomicMutex* atomic, bool is_blocking)
+ : atomic_(atomic), is_success_(false) {
+ do {
+ bool expected = false;
+ // We have to use the strong version here for the case where is_blocking
+ // is false, and we will only attempt the exchange once.
+ is_success_ = atomic->compare_exchange_strong(expected, true);
+ } while (is_blocking && !is_success_);
}
-// Returns hash value for hash map.
-uint32_t ThreadHash(pthread_t thread_id) {
-#if V8_OS_BSD
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
-#else
- return static_cast<uint32_t>(thread_id);
-#endif
+AtomicGuard::~AtomicGuard() {
+ if (!is_success_) return;
+ atomic_->store(false);
}
-#endif // USE_SIGNALS
-
-} // namespace
-
-#if defined(USE_SIGNALS)
+bool AtomicGuard::is_success() const { return is_success_; }
class Sampler::PlatformData {
public:
@@ -229,94 +195,57 @@ class Sampler::PlatformData {
pthread_t vm_tid_;
};
-class SamplerManager {
- public:
- SamplerManager() : sampler_map_() {}
-
- void AddSampler(Sampler* sampler) {
- AtomicGuard atomic_guard(&samplers_access_counter_);
- DCHECK(sampler->IsActive() || !sampler->IsRegistered());
- // Add sampler into map if needed.
- pthread_t thread_id = sampler->platform_data()->vm_tid();
- base::HashMap::Entry* entry =
- sampler_map_.LookupOrInsert(ThreadKey(thread_id),
- ThreadHash(thread_id));
- DCHECK_NOT_NULL(entry);
- if (entry->value == nullptr) {
- SamplerList* samplers = new SamplerList();
- samplers->push_back(sampler);
- entry->value = samplers;
- } else {
- SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
- bool exists = false;
- for (SamplerListIterator iter = samplers->begin();
- iter != samplers->end(); ++iter) {
- if (*iter == sampler) {
- exists = true;
- break;
- }
- }
- if (!exists) {
- samplers->push_back(sampler);
- }
- }
+void SamplerManager::AddSampler(Sampler* sampler) {
+ AtomicGuard atomic_guard(&samplers_access_counter_);
+ DCHECK(sampler->IsActive());
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ auto it = sampler_map_.find(thread_id);
+ if (it == sampler_map_.end()) {
+ SamplerList samplers;
+ samplers.push_back(sampler);
+ sampler_map_.emplace(thread_id, std::move(samplers));
+ } else {
+ SamplerList& samplers = it->second;
+ auto it = std::find(samplers.begin(), samplers.end(), sampler);
+ if (it == samplers.end()) samplers.push_back(sampler);
}
+}
- void RemoveSampler(Sampler* sampler) {
- AtomicGuard atomic_guard(&samplers_access_counter_);
- DCHECK(sampler->IsActive() || sampler->IsRegistered());
- // Remove sampler from map.
- pthread_t thread_id = sampler->platform_data()->vm_tid();
- void* thread_key = ThreadKey(thread_id);
- uint32_t thread_hash = ThreadHash(thread_id);
- base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
- DCHECK_NOT_NULL(entry);
- SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
- for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
- ++iter) {
- if (*iter == sampler) {
- samplers->erase(iter);
- break;
- }
- }
- if (samplers->empty()) {
- sampler_map_.Remove(thread_key, thread_hash);
- delete samplers;
- }
+void SamplerManager::RemoveSampler(Sampler* sampler) {
+ AtomicGuard atomic_guard(&samplers_access_counter_);
+ DCHECK(sampler->IsActive());
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ auto it = sampler_map_.find(thread_id);
+ DCHECK_NE(it, sampler_map_.end());
+ SamplerList& samplers = it->second;
+ samplers.erase(std::remove(samplers.begin(), samplers.end(), sampler),
+ samplers.end());
+ if (samplers.empty()) {
+ sampler_map_.erase(it);
}
+}
-#if defined(USE_SIGNALS)
- void DoSample(const v8::RegisterState& state) {
- AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
- if (!atomic_guard.is_success()) return;
- pthread_t thread_id = pthread_self();
- base::HashMap::Entry* entry =
- sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id));
- if (!entry) return;
- SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
-
- for (size_t i = 0; i < samplers.size(); ++i) {
- Sampler* sampler = samplers[i];
- Isolate* isolate = sampler->isolate();
- // We require a fully initialized and entered isolate.
- if (isolate == nullptr || !isolate->IsInUse()) continue;
- if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
- sampler->SampleStack(state);
- }
+void SamplerManager::DoSample(const v8::RegisterState& state) {
+ AtomicGuard atomic_guard(&samplers_access_counter_, false);
+ if (!atomic_guard.is_success()) return;
+ pthread_t thread_id = pthread_self();
+ auto it = sampler_map_.find(thread_id);
+ if (it == sampler_map_.end()) return;
+ SamplerList& samplers = it->second;
+
+ for (Sampler* sampler : samplers) {
+ Isolate* isolate = sampler->isolate();
+ // We require a fully initialized and entered isolate.
+ if (isolate == nullptr || !isolate->IsInUse()) continue;
+ if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
+ sampler->SampleStack(state);
}
-#endif
-
- static SamplerManager* instance() { return instance_.Pointer(); }
-
- private:
- base::HashMap sampler_map_;
- static AtomicMutex samplers_access_counter_;
- static base::LazyInstance<SamplerManager>::type instance_;
-};
+}
-AtomicMutex SamplerManager::samplers_access_counter_;
-base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
- LAZY_INSTANCE_INITIALIZER;
+SamplerManager* SamplerManager::instance() {
+ static base::LeakyObject<SamplerManager> instance;
+ return instance.get();
+}
#elif V8_OS_WIN || V8_OS_CYGWIN
@@ -378,24 +307,18 @@ class Sampler::PlatformData {
#if defined(USE_SIGNALS)
class SignalHandler {
public:
- static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() {
- delete mutex_;
- mutex_ = nullptr;
- }
-
static void IncreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
+ base::MutexGuard lock_guard(mutex_.Pointer());
if (++client_count_ == 1) Install();
}
static void DecreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
+ base::MutexGuard lock_guard(mutex_.Pointer());
if (--client_count_ == 0) Restore();
}
static bool Installed() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
+ base::MutexGuard lock_guard(mutex_.Pointer());
return signal_handler_installed_;
}
@@ -424,13 +347,13 @@ class SignalHandler {
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
// Protects the process wide state below.
- static base::Mutex* mutex_;
+ static base::LazyMutex mutex_;
static int client_count_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
};
-base::Mutex* SignalHandler::mutex_ = nullptr;
+base::LazyMutex SignalHandler::mutex_ = LAZY_MUTEX_INITIALIZER;
int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
@@ -589,85 +512,36 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#endif // USE_SIGNALS
-
-void Sampler::SetUp() {
-#if defined(USE_SIGNALS)
- SignalHandler::SetUp();
-#endif
-}
-
-
-void Sampler::TearDown() {
-#if defined(USE_SIGNALS)
- SignalHandler::TearDown();
-#endif
-}
-
Sampler::Sampler(Isolate* isolate)
- : is_counting_samples_(false),
- js_sample_count_(0),
- external_sample_count_(0),
- isolate_(isolate),
- profiling_(false),
- has_processing_thread_(false),
- active_(false),
- registered_(false) {
- data_ = new PlatformData;
-}
+ : isolate_(isolate), data_(base::make_unique<PlatformData>()) {}
Sampler::~Sampler() {
DCHECK(!IsActive());
-#if defined(USE_SIGNALS)
- if (IsRegistered()) {
- SamplerManager::instance()->RemoveSampler(this);
- }
-#endif
- delete data_;
}
void Sampler::Start() {
DCHECK(!IsActive());
SetActive(true);
#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
SamplerManager::instance()->AddSampler(this);
#endif
}
-
void Sampler::Stop() {
#if defined(USE_SIGNALS)
SamplerManager::instance()->RemoveSampler(this);
+ SignalHandler::DecreaseSamplerCount();
#endif
DCHECK(IsActive());
SetActive(false);
- SetRegistered(false);
}
-
-void Sampler::IncreaseProfilingDepth() {
- base::Relaxed_AtomicIncrement(&profiling_, 1);
-#if defined(USE_SIGNALS)
- SignalHandler::IncreaseSamplerCount();
-#endif
-}
-
-
-void Sampler::DecreaseProfilingDepth() {
-#if defined(USE_SIGNALS)
- SignalHandler::DecreaseSamplerCount();
-#endif
- base::Relaxed_AtomicIncrement(&profiling_, -1);
-}
-
-
#if defined(USE_SIGNALS)
void Sampler::DoSample() {
if (!SignalHandler::Installed()) return;
- if (!IsActive() && !IsRegistered()) {
- SamplerManager::instance()->AddSampler(this);
- SetRegistered(true);
- }
+ DCHECK(IsActive());
pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
@@ -690,6 +564,10 @@ void Sampler::DoSample() {
state.pc = reinterpret_cast<void*>(context.Rip);
state.sp = reinterpret_cast<void*>(context.Rsp);
state.fp = reinterpret_cast<void*>(context.Rbp);
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<void*>(context.Pc);
+ state.sp = reinterpret_cast<void*>(context.Sp);
+ state.fp = reinterpret_cast<void*>(context.Fp);
#else
state.pc = reinterpret_cast<void*>(context.Eip);
state.sp = reinterpret_cast<void*>(context.Esp);
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 6ce6798a44..8e39a95f58 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -5,11 +5,17 @@
#ifndef V8_LIBSAMPLER_SAMPLER_H_
#define V8_LIBSAMPLER_SAMPLER_H_
-#include "include/v8.h"
+#include <atomic>
+#include <unordered_map>
-#include "src/base/atomicops.h"
+#include "include/v8.h"
+#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
+#if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
+#define USE_SIGNALS
+#endif
+
namespace v8 {
namespace sampler {
@@ -25,10 +31,6 @@ class Sampler {
static const int kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
- // Initializes the Sampler support. Called once at VM startup.
- static void SetUp();
- static void TearDown();
-
// Initialize sampler.
explicit Sampler(Isolate* isolate);
virtual ~Sampler();
@@ -44,28 +46,11 @@ class Sampler {
void Start();
void Stop();
- // Whether the sampling thread should use this Sampler for CPU profiling?
- bool IsProfiling() const {
- return base::Relaxed_Load(&profiling_) > 0 &&
- !base::Relaxed_Load(&has_processing_thread_);
- }
- void IncreaseProfilingDepth();
- void DecreaseProfilingDepth();
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return base::Relaxed_Load(&active_) != 0; }
-
- // CpuProfiler collects samples by calling DoSample directly
- // without calling Start. To keep it working, we register the sampler
- // with the CpuProfiler.
- bool IsRegistered() const { return base::Relaxed_Load(&registered_) != 0; }
+ // Whether the sampler is running (start has been called).
+ bool IsActive() const { return active_.load(std::memory_order_relaxed); }
void DoSample();
- void SetHasProcessingThread(bool value) {
- base::Relaxed_Store(&has_processing_thread_, value);
- }
-
// Used in tests to make sure that stack sampling is performed.
unsigned js_sample_count() const { return js_sample_count_; }
unsigned external_sample_count() const { return external_sample_count_; }
@@ -76,27 +61,85 @@ class Sampler {
}
class PlatformData;
- PlatformData* platform_data() const { return data_; }
+ PlatformData* platform_data() const { return data_.get(); }
protected:
// Counts stack samples taken in various VM states.
- bool is_counting_samples_;
- unsigned js_sample_count_;
- unsigned external_sample_count_;
+ bool is_counting_samples_ = 0;
+ unsigned js_sample_count_ = 0;
+ unsigned external_sample_count_ = 0;
- private:
- void SetActive(bool value) { base::Relaxed_Store(&active_, value); }
- void SetRegistered(bool value) { base::Relaxed_Store(&registered_, value); }
+ void SetActive(bool value) {
+ active_.store(value, std::memory_order_relaxed);
+ }
Isolate* isolate_;
- base::Atomic32 profiling_;
- base::Atomic32 has_processing_thread_;
- base::Atomic32 active_;
- base::Atomic32 registered_;
- PlatformData* data_; // Platform specific data.
+ std::atomic_bool active_{false};
+ std::unique_ptr<PlatformData> data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
+#ifdef USE_SIGNALS
+
+typedef std::atomic_bool AtomicMutex;
+
+// A helper that uses an std::atomic_bool to create a lock that is obtained on
+// construction and released on destruction.
+class AtomicGuard {
+ public:
+ // Attempt to obtain the lock represented by |atomic|. |is_blocking|
+ // determines whether we will block to obtain the lock, or only make one
+ // attempt to gain the lock and then stop. If we fail to gain the lock,
+ // is_success will be false.
+ explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true);
+
+ // Releases the lock represented by atomic, if it is held by this guard.
+ ~AtomicGuard();
+
+ // Whether the lock was successfully obtained in the constructor. This will
+ // always be true if is_blocking was true.
+ bool is_success() const;
+
+ private:
+ AtomicMutex* const atomic_;
+ bool is_success_;
+};
+
+// SamplerManager keeps a list of Samplers per thread, and allows the caller to
+// take a sample for every Sampler on the current thread.
+class SamplerManager {
+ public:
+ typedef std::vector<Sampler*> SamplerList;
+
+ // Add |sampler| to the map if it is not already present.
+ void AddSampler(Sampler* sampler);
+
+ // If |sampler| exists in the map, remove it and delete the SamplerList if
+ // |sampler| was the last sampler in the list.
+ void RemoveSampler(Sampler* sampler);
+
+ // Take a sample for every sampler on the current thread. This function can
+ // return without taking samples if AddSampler or RemoveSampler are being
+ // concurrently called on any thread.
+ void DoSample(const v8::RegisterState& state);
+
+ // Get the lazily instantiated, global SamplerManager instance.
+ static SamplerManager* instance();
+
+ private:
+ SamplerManager() = default;
+ // Must be a friend so that it can access the private constructor for the
+ // global lazy instance.
+ friend class base::LeakyObject<SamplerManager>;
+
+ std::unordered_map<pthread_t, SamplerList> sampler_map_;
+ AtomicMutex samplers_access_counter_{false};
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerManager);
+};
+
+#endif // USE_SIGNALS
+
} // namespace sampler
} // namespace v8
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
index 65c8736d7a..bbc800c4a9 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/locked-queue-inl.h
@@ -46,7 +46,7 @@ inline void LockedQueue<Record>::Enqueue(const Record& record) {
CHECK_NOT_NULL(n);
n->value = record;
{
- base::LockGuard<base::Mutex> guard(&tail_mutex_);
+ base::MutexGuard guard(&tail_mutex_);
tail_->next.SetValue(n);
tail_ = n;
}
@@ -57,7 +57,7 @@ template <typename Record>
inline bool LockedQueue<Record>::Dequeue(Record* record) {
Node* old_head = nullptr;
{
- base::LockGuard<base::Mutex> guard(&head_mutex_);
+ base::MutexGuard guard(&head_mutex_);
old_head = head_;
Node* const next_node = head_->next.Value();
if (next_node == nullptr) return false;
@@ -71,14 +71,14 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
template <typename Record>
inline bool LockedQueue<Record>::IsEmpty() const {
- base::LockGuard<base::Mutex> guard(&head_mutex_);
+ base::MutexGuard guard(&head_mutex_);
return head_->next.Value() == nullptr;
}
template <typename Record>
inline bool LockedQueue<Record>::Peek(Record* record) const {
- base::LockGuard<base::Mutex> guard(&head_mutex_);
+ base::MutexGuard guard(&head_mutex_);
Node* const next_node = head_->next.Value();
if (next_node == nullptr) return false;
*record = next_node->value;
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 0eb18e0525..92659c2a9c 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
CodeEventListener::LogEventsAndTags Logger::ToNativeByScript(
- CodeEventListener::LogEventsAndTags tag, Script* script) {
+ CodeEventListener::LogEventsAndTags tag, Script script) {
if (script->type() != Script::TYPE_NATIVE) return tag;
switch (tag) {
case CodeEventListener::FUNCTION_TAG:
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 4cdb854731..b017b50a3f 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -88,9 +88,9 @@ Log::MessageBuilder::MessageBuilder(Log* log)
DCHECK_NOT_NULL(log_->format_buffer_);
}
-void Log::MessageBuilder::AppendString(String* str,
+void Log::MessageBuilder::AppendString(String str,
base::Optional<int> length_limit) {
- if (str == nullptr) return;
+ if (str.is_null()) return;
DisallowHeapAllocation no_gc; // Ensure string stays valid.
int length = str->length();
@@ -155,8 +155,8 @@ void Log::MessageBuilder::AppendCharacter(char c) {
}
}
-void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
- DCHECK(symbol);
+void Log::MessageBuilder::AppendSymbolName(Symbol symbol) {
+ DCHECK(!symbol.is_null());
OFStream& os = log_->os_;
os << "symbol(";
if (!symbol->name()->IsUndefined()) {
@@ -167,9 +167,9 @@ void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
os << "hash " << std::hex << symbol->Hash() << std::dec << ")";
}
-void Log::MessageBuilder::AppendSymbolNameDetails(String* str,
+void Log::MessageBuilder::AppendSymbolNameDetails(String str,
bool show_impl_info) {
- if (str == nullptr) return;
+ if (str.is_null()) return;
DisallowHeapAllocation no_gc; // Ensure string stays valid.
OFStream& os = log_->os_;
@@ -233,19 +233,19 @@ Log::MessageBuilder& Log::MessageBuilder::operator<<<char>(char c) {
}
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<String*>(String* string) {
+Log::MessageBuilder& Log::MessageBuilder::operator<<<String>(String string) {
this->AppendString(string);
return *this;
}
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol*>(Symbol* symbol) {
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol>(Symbol symbol) {
this->AppendSymbolName(symbol);
return *this;
}
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<Name*>(Name* name) {
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Name>(Name name) {
if (name->IsString()) {
this->AppendString(String::cast(name));
} else {
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index e30b32b875..de76b44362 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -65,14 +65,14 @@ class Log {
explicit MessageBuilder(Log* log);
~MessageBuilder() = default;
- void AppendString(String* str,
+ void AppendString(String str,
base::Optional<int> length_limit = base::nullopt);
void AppendString(Vector<const char> str);
void AppendString(const char* str);
void AppendString(const char* str, size_t length);
void PRINTF_FORMAT(2, 3) AppendFormatString(const char* format, ...);
void AppendCharacter(char c);
- void AppendSymbolName(Symbol* symbol);
+ void AppendSymbolName(Symbol symbol);
// Delegate insertion to the underlying {log_}.
// All appended strings are escaped to maintain one-line log entries.
@@ -91,13 +91,13 @@ class Log {
int PRINTF_FORMAT(2, 0)
FormatStringIntoBuffer(const char* format, va_list args);
- void AppendSymbolNameDetails(String* str, bool show_impl_info);
+ void AppendSymbolNameDetails(String str, bool show_impl_info);
void PRINTF_FORMAT(2, 3) AppendRawFormatString(const char* format, ...);
void AppendRawCharacter(const char character);
Log* log_;
- base::LockGuard<base::Mutex> lock_guard_;
+ base::MutexGuard lock_guard_;
};
private:
@@ -143,11 +143,11 @@ Log::MessageBuilder& Log::MessageBuilder::operator<<<const char*>(
template <>
Log::MessageBuilder& Log::MessageBuilder::operator<<<char>(char c);
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<String*>(String* string);
+Log::MessageBuilder& Log::MessageBuilder::operator<<<String>(String string);
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol*>(Symbol* symbol);
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol>(Symbol symbol);
template <>
-Log::MessageBuilder& Log::MessageBuilder::operator<<<Name*>(Name* name);
+Log::MessageBuilder& Log::MessageBuilder::operator<<<Name>(Name name);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index d78625a46a..6aecad98fe 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -12,20 +12,22 @@
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
-#include "src/instruction-stream.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
#include "src/libsampler/sampler.h"
#include "src/log-inl.h"
+#include "src/log-utils.h"
#include "src/macro-assembler.h"
+#include "src/memcopy.h"
#include "src/objects/api-callbacks.h"
#include "src/perf-jit.h"
#include "src/profiler/tick-sample.h"
#include "src/runtime-profiler.h"
+#include "src/snapshot/embedded-data.h"
#include "src/source-position-table.h"
#include "src/string-stream.h"
#include "src/tracing/tracing-category-observer.h"
@@ -34,7 +36,6 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/utils.h"
#include "src/version.h"
namespace v8 {
@@ -71,8 +72,7 @@ static v8::CodeEventType GetCodeEventTypeForTag(
PROFILE(isolate_, Call); \
}
-static const char* ComputeMarker(SharedFunctionInfo* shared,
- AbstractCode* code) {
+static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
switch (code->kind()) {
case AbstractCode::INTERPRETED_FUNCTION:
return shared->optimization_disabled() ? "" : "~";
@@ -108,11 +108,11 @@ class CodeEventLogger::NameBuffer {
AppendByte(':');
}
- void AppendName(Name* name) {
+ void AppendName(Name name) {
if (name->IsString()) {
AppendString(String::cast(name));
} else {
- Symbol* symbol = Symbol::cast(name);
+ Symbol symbol = Symbol::cast(name);
AppendBytes("symbol(");
if (!symbol->name()->IsUndefined()) {
AppendBytes("\"");
@@ -125,8 +125,8 @@ class CodeEventLogger::NameBuffer {
}
}
- void AppendString(String* str) {
- if (str == nullptr) return;
+ void AppendString(String str) {
+ if (str.is_null()) return;
int length = 0;
std::unique_ptr<char[]> c_str =
str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, &length);
@@ -185,31 +185,34 @@ CodeEventLogger::CodeEventLogger(Isolate* isolate)
CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* comment) {
+ AbstractCode code, const char* comment) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
- LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, SharedFunctionInfo(), name_buffer_->get(),
+ name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name) {
+ AbstractCode code, Name name) {
name_buffer_->Init(tag);
name_buffer_->AppendName(name);
- LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, SharedFunctionInfo(), name_buffer_->get(),
+ name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) {
+ AbstractCode code,
+ SharedFunctionInfo shared, Name name) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
+ name_buffer_->AppendByte(' ');
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code,
- SharedFunctionInfo* shared, Name* source,
+ AbstractCode code,
+ SharedFunctionInfo shared, Name source,
int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
@@ -245,11 +248,11 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
- String* source) {
+void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode code, String source) {
name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
name_buffer_->AppendString(source);
- LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
+ LogRecordedBuffer(code, SharedFunctionInfo(), name_buffer_->get(),
+ name_buffer_->size());
}
// Linux perf tool logging support
@@ -258,12 +261,12 @@ class PerfBasicLogger : public CodeEventLogger {
explicit PerfBasicLogger(Isolate* isolate);
~PerfBasicLogger() override;
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {}
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override {}
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
private:
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) override;
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
@@ -316,7 +319,7 @@ void PerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size,
size, name_length, name);
}
-void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+void PerfBasicLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
(code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
@@ -374,7 +377,7 @@ void ExternalCodeEventListener::StopListening() {
}
void ExternalCodeEventListener::CodeCreateEvent(
- CodeEventListener::LogEventsAndTags tag, AbstractCode* code,
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code,
const char* comment) {
CodeEvent code_event;
code_event.code_start_address =
@@ -391,7 +394,7 @@ void ExternalCodeEventListener::CodeCreateEvent(
}
void ExternalCodeEventListener::CodeCreateEvent(
- CodeEventListener::LogEventsAndTags tag, AbstractCode* code, Name* name) {
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code, Name name) {
Handle<String> name_string =
Name::ToFunctionName(isolate_, Handle<Name>(name, isolate_))
.ToHandleChecked();
@@ -411,8 +414,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
}
void ExternalCodeEventListener::CodeCreateEvent(
- CodeEventListener::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) {
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name name) {
Handle<String> name_string =
Name::ToFunctionName(isolate_, Handle<Name>(name, isolate_))
.ToHandleChecked();
@@ -432,8 +435,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
}
void ExternalCodeEventListener::CodeCreateEvent(
- CodeEventListener::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source, int line, int column) {
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line, int column) {
Handle<String> name_string =
Name::ToFunctionName(isolate_, Handle<Name>(shared->Name(), isolate_))
.ToHandleChecked();
@@ -461,8 +464,8 @@ void ExternalCodeEventListener::CodeCreateEvent(LogEventsAndTags tag,
// TODO(mmarchini): handle later
}
-void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode* code,
- String* source) {
+void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode code,
+ String source) {
CodeEvent code_event;
code_event.code_start_address =
static_cast<uintptr_t>(code->InstructionStart());
@@ -483,14 +486,14 @@ class LowLevelLogger : public CodeEventLogger {
LowLevelLogger(Isolate* isolate, const char* file_name);
~LowLevelLogger() override;
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
- void SnapshotPositionEvent(HeapObject* obj, int pos);
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
+ void SnapshotPositionEvent(HeapObject obj, int pos);
void CodeMovingGCEvent() override;
private:
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) override;
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
@@ -578,7 +581,7 @@ void LowLevelLogger::LogCodeInfo() {
LogWriteBytes(arch, sizeof(arch));
}
-void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+void LowLevelLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) {
CodeCreateStruct event;
event.name_size = length;
@@ -602,14 +605,13 @@ void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
code->instructions().length());
}
-void LowLevelLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void LowLevelLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
CodeMoveStruct event;
event.from_address = from->InstructionStart();
event.to_address = to->InstructionStart();
LogWriteStruct(event);
}
-
void LowLevelLogger::LogWriteBytes(const char* bytes, int size) {
size_t rv = fwrite(bytes, 1, size, ll_output_handle_);
DCHECK(static_cast<size_t>(size) == rv);
@@ -627,9 +629,9 @@ class JitLogger : public CodeEventLogger {
public:
JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler);
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
int position,
JitCodeEvent::PositionType position_type);
@@ -638,7 +640,7 @@ class JitLogger : public CodeEventLogger {
void EndCodePosInfoEvent(Address start_address, void* jit_handler_data);
private:
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) override;
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
@@ -650,9 +652,8 @@ class JitLogger : public CodeEventLogger {
JitLogger::JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler)
: CodeEventLogger(isolate), code_event_handler_(code_event_handler) {}
-void JitLogger::LogRecordedBuffer(AbstractCode* code,
- SharedFunctionInfo* shared, const char* name,
- int length) {
+void JitLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
+ const char* name, int length) {
JitCodeEvent event;
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
@@ -661,7 +662,7 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
code->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
event.code_len = code->InstructionSize();
Handle<SharedFunctionInfo> shared_function_handle;
- if (shared && shared->script()->IsScript()) {
+ if (!shared.is_null() && shared->script()->IsScript()) {
shared_function_handle =
Handle<SharedFunctionInfo>(shared, shared->GetIsolate());
}
@@ -686,8 +687,8 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
code_event_handler_(&event);
}
-void JitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
- base::LockGuard<base::Mutex> guard(&logger_mutex_);
+void JitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
+ base::MutexGuard guard(&logger_mutex_);
JitCodeEvent event;
event.type = JitCodeEvent::CODE_MOVED;
@@ -755,7 +756,7 @@ class SamplingThread : public base::Thread {
sampler_(sampler),
interval_microseconds_(interval_microseconds) {}
void Run() override {
- while (sampler_->IsProfiling()) {
+ while (sampler_->IsActive()) {
sampler_->DoSample();
base::OS::Sleep(
base::TimeDelta::FromMicroseconds(interval_microseconds_));
@@ -781,9 +782,6 @@ class Profiler: public base::Thread {
// Inserts collected profiling data into buffer.
void Insert(v8::TickSample* sample) {
- if (paused_)
- return;
-
if (Succ(head_) == static_cast<int>(base::Relaxed_Load(&tail_))) {
overflow_ = true;
} else {
@@ -795,10 +793,6 @@ class Profiler: public base::Thread {
void Run() override;
- // Pause and Resume TickSample data collection.
- void Pause() { paused_ = true; }
- void Resume() { paused_ = false; }
-
private:
// Waits for a signal and removes profiling data.
bool Remove(v8::TickSample* sample) {
@@ -825,14 +819,8 @@ class Profiler: public base::Thread {
// Semaphore used for buffer synchronization.
base::Semaphore buffer_semaphore_;
- // Tells whether profiler is engaged, that is, processing thread is stated.
- bool engaged_;
-
// Tells whether worker thread should continue running.
base::Atomic32 running_;
-
- // Tells whether we are currently recording tick samples.
- bool paused_;
};
@@ -844,18 +832,16 @@ class Ticker: public sampler::Sampler {
public:
Ticker(Isolate* isolate, int interval_microseconds)
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
- profiler_(nullptr),
- sampling_thread_(new SamplingThread(this, interval_microseconds)) {}
+ sampling_thread_(
+ base::make_unique<SamplingThread>(this, interval_microseconds)) {}
~Ticker() override {
if (IsActive()) Stop();
- delete sampling_thread_;
}
void SetProfiler(Profiler* profiler) {
DCHECK_NULL(profiler_);
profiler_ = profiler;
- IncreaseProfilingDepth();
if (!IsActive()) Start();
sampling_thread_->StartSynchronously();
}
@@ -863,7 +849,6 @@ class Ticker: public sampler::Sampler {
void ClearProfiler() {
profiler_ = nullptr;
if (IsActive()) Stop();
- DecreaseProfilingDepth();
sampling_thread_->Join();
}
@@ -876,8 +861,8 @@ class Ticker: public sampler::Sampler {
}
private:
- Profiler* profiler_;
- SamplingThread* sampling_thread_;
+ Profiler* profiler_ = nullptr;
+ std::unique_ptr<SamplingThread> sampling_thread_;
};
//
@@ -888,18 +873,12 @@ Profiler::Profiler(Isolate* isolate)
isolate_(isolate),
head_(0),
overflow_(false),
- buffer_semaphore_(0),
- engaged_(false),
- paused_(false) {
+ buffer_semaphore_(0) {
base::Relaxed_Store(&tail_, 0);
base::Relaxed_Store(&running_, 0);
}
-
void Profiler::Engage() {
- if (engaged_) return;
- engaged_ = true;
-
std::vector<base::OS::SharedLibraryAddress> addresses =
base::OS::GetSharedLibraryAddresses();
for (const auto& address : addresses) {
@@ -920,8 +899,6 @@ void Profiler::Engage() {
void Profiler::Disengage() {
- if (!engaged_) return;
-
// Stop receiving ticks.
isolate_->logger()->ticker_->ClearProfiler();
@@ -930,8 +907,6 @@ void Profiler::Disengage() {
// the thread to terminate.
base::Relaxed_Store(&running_, 0);
v8::TickSample sample;
- // Reset 'paused_' flag, otherwise semaphore may not be signalled.
- Resume();
Insert(&sample);
Join();
@@ -955,15 +930,9 @@ void Profiler::Run() {
Logger::Logger(Isolate* isolate)
: isolate_(isolate),
- ticker_(nullptr),
- profiler_(nullptr),
log_events_(nullptr),
is_logging_(false),
log_(nullptr),
- perf_basic_logger_(nullptr),
- perf_jit_logger_(nullptr),
- ll_logger_(nullptr),
- jit_logger_(nullptr),
is_initialized_(false),
existing_code_logger_(isolate) {}
@@ -971,6 +940,8 @@ Logger::~Logger() {
delete log_;
}
+const LogSeparator Logger::kNext = LogSeparator::kSeparator;
+
void Logger::AddCodeEventListener(CodeEventListener* listener) {
bool result = isolate_->code_event_dispatcher()->AddListener(listener);
CHECK(result);
@@ -1014,11 +985,10 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
msg.WriteToLogFile();
}
-
-void Logger::HandleEvent(const char* name, Object** location) {
+void Logger::HandleEvent(const char* name, Address* location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
Log::MessageBuilder msg(log_);
- msg << name << kNext << static_cast<void*>(location);
+ msg << name << kNext << reinterpret_cast<void*>(location);
msg.WriteToLogFile();
}
@@ -1041,7 +1011,7 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
msg.WriteToLogFile();
}
-void Logger::CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) {
if (!log_->IsEnabled()) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
@@ -1118,8 +1088,8 @@ void Logger::LeaveExternal(Isolate* isolate) {
TIMER_EVENTS_LIST(V)
#undef V
-void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder,
- Object* property_name) {
+void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder,
+ Object property_name) {
DCHECK(property_name->IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
@@ -1128,8 +1098,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder,
msg.WriteToLogFile();
}
-void Logger::ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
+void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
@@ -1138,15 +1107,13 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
msg.WriteToLogFile();
}
-
-void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
+void Logger::ApiObjectAccess(const char* tag, JSObject object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
msg << "api" << kNext << tag << kNext << object->class_name();
msg.WriteToLogFile();
}
-
void Logger::ApiEntryCall(const char* name) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
@@ -1171,8 +1138,7 @@ void Logger::DeleteEvent(const char* name, void* object) {
msg.WriteToLogFile();
}
-
-void Logger::CallbackEventInternal(const char* prefix, Name* name,
+void Logger::CallbackEventInternal(const char* prefix, Name name,
Address entry_point) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1184,18 +1150,15 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
msg.WriteToLogFile();
}
-
-void Logger::CallbackEvent(Name* name, Address entry_point) {
+void Logger::CallbackEvent(Name name, Address entry_point) {
CallbackEventInternal("", name, entry_point);
}
-
-void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
+void Logger::GetterCallbackEvent(Name name, Address entry_point) {
CallbackEventInternal("get ", name, entry_point);
}
-
-void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
+void Logger::SetterCallbackEvent(Name name, Address entry_point) {
CallbackEventInternal("set ", name, entry_point);
}
@@ -1214,7 +1177,7 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, base::ElapsedTimer* timer) {
+ AbstractCode code, base::ElapsedTimer* timer) {
AppendCodeCreateHeader(msg, tag, code->kind(),
reinterpret_cast<uint8_t*>(code->InstructionStart()),
code->InstructionSize(), timer);
@@ -1223,7 +1186,7 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
} // namespace
void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* comment) {
+ AbstractCode code, const char* comment) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1233,7 +1196,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name) {
+ AbstractCode code, Name name) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1243,8 +1206,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* name) {
+ AbstractCode code, SharedFunctionInfo shared,
+ Name name) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
if (code == AbstractCode::cast(
@@ -1287,8 +1250,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// the SharedFunctionInfo object, we left it to caller
// to leave logging functions free from heap allocations.
void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* source, int line, int column) {
+ AbstractCode code, SharedFunctionInfo shared,
+ Name source, int line, int column) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
{
@@ -1301,9 +1264,9 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
if (!FLAG_log_source_code) return;
- Object* script_object = shared->script();
+ Object script_object = shared->script();
if (!script_object->IsScript()) return;
- Script* script = Script::cast(script_object);
+ Script script = Script::cast(script_object);
if (!EnsureLogScriptSource(script)) return;
// We log source code information in the form:
@@ -1346,7 +1309,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
msg << kNext;
int maxInlinedId = -1;
if (hasInlined) {
- PodArray<InliningPosition>* inlining_positions =
+ PodArray<InliningPosition> inlining_positions =
DeoptimizationData::cast(Code::cast(code)->deoptimization_data())
->InliningPositions();
for (int i = 0; i < inlining_positions->length(); i++) {
@@ -1367,7 +1330,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
msg << kNext;
if (hasInlined) {
- DeoptimizationData* deopt_data =
+ DeoptimizationData deopt_data =
DeoptimizationData::cast(Code::cast(code)->deoptimization_data());
msg << std::hex;
@@ -1381,8 +1344,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
msg.WriteToLogFile();
}
-void Logger::CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) {
+void Logger::CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1392,14 +1354,13 @@ void Logger::CodeDisableOptEvent(AbstractCode* code,
msg.WriteToLogFile();
}
-
void Logger::CodeMovingGCEvent() {
if (!is_listening_to_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
base::OS::SignalCodeMovingGC();
}
-void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
+void Logger::RegExpCodeCreateEvent(AbstractCode code, String source) {
if (!is_listening_to_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1408,7 +1369,7 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
msg.WriteToLogFile();
}
-void Logger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
if (!is_listening_to_code_events()) return;
MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(),
to->address());
@@ -1438,15 +1399,15 @@ void CodeLinePosEvent(JitLogger* jit_logger, Address code_start,
} // namespace
void Logger::CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray* source_position_table) {
+ ByteArray source_position_table) {
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(jit_logger_, code_start, iter);
+ CodeLinePosEvent(jit_logger_.get(), code_start, iter);
}
void Logger::CodeLinePosInfoRecordEvent(
Address code_start, Vector<const byte> source_position_table) {
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(jit_logger_, code_start, iter);
+ CodeLinePosEvent(jit_logger_.get(), code_start, iter);
}
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
@@ -1487,13 +1448,12 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
msg.WriteToLogFile();
}
-
-void Logger::SuspectReadEvent(Name* name, Object* obj) {
+void Logger::SuspectReadEvent(Name name, Object obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
Log::MessageBuilder msg(log_);
- String* class_name = obj->IsJSObject()
- ? JSObject::cast(obj)->class_name()
- : ReadOnlyRoots(isolate_).empty_string();
+ String class_name = obj->IsJSObject()
+ ? JSObject::cast(obj)->class_name()
+ : ReadOnlyRoots(isolate_).empty_string();
msg << "suspect-read" << kNext << class_name << kNext << name;
msg.WriteToLogFile();
}
@@ -1511,12 +1471,12 @@ void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
int start_position, int end_position,
- String* function_name) {
+ String function_name) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
Log::MessageBuilder msg(log_);
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
end_position, &timer_);
- if (function_name) msg << function_name;
+ if (!function_name.is_null()) msg << function_name;
msg.WriteToLogFile();
}
@@ -1535,7 +1495,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
}
void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
- SharedFunctionInfo* sfi) {
+ SharedFunctionInfo sfi) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
Log::MessageBuilder msg(log_);
int script_id = -1;
@@ -1575,7 +1535,7 @@ void Logger::ScriptEvent(ScriptEventType type, int script_id) {
msg.WriteToLogFile();
}
-void Logger::ScriptDetails(Script* script) {
+void Logger::ScriptDetails(Script script) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
{
Log::MessageBuilder msg(log_);
@@ -1593,7 +1553,7 @@ void Logger::ScriptDetails(Script* script) {
EnsureLogScriptSource(script);
}
-bool Logger::EnsureLogScriptSource(Script* script) {
+bool Logger::EnsureLogScriptSource(Script script) {
if (!log_->IsEnabled()) return false;
Log::MessageBuilder msg(log_);
// Make sure the script is written to the log file.
@@ -1603,9 +1563,9 @@ bool Logger::EnsureLogScriptSource(Script* script) {
}
// This script has not been logged yet.
logged_source_code_.insert(script_id);
- Object* source_object = script->source();
+ Object source_object = script->source();
if (!source_object->IsString()) return false;
- String* source_code = String::cast(source_object);
+ String source_code = String::cast(source_object);
msg << "script-source" << kNext << script_id << kNext;
// Log the script name.
@@ -1654,7 +1614,7 @@ void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
msg.WriteToLogFile();
}
-void Logger::ICEvent(const char* type, bool keyed, Map* map, Object* key,
+void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
char old_state, char new_state, const char* modifier,
const char* slow_stub_reason) {
if (!log_->IsEnabled() || !FLAG_trace_ic) return;
@@ -1665,7 +1625,7 @@ void Logger::ICEvent(const char* type, bool keyed, Map* map, Object* key,
Address pc = isolate_->GetAbstractPC(&line, &column);
msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << line << kNext
<< column << kNext << old_state << kNext << new_state << kNext
- << reinterpret_cast<void*>(map) << kNext;
+ << reinterpret_cast<void*>(map.ptr()) << kNext;
if (key->IsSmi()) {
msg << Smi::ToInt(key);
} else if (key->IsNumber()) {
@@ -1680,11 +1640,11 @@ void Logger::ICEvent(const char* type, bool keyed, Map* map, Object* key,
msg.WriteToLogFile();
}
-void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
- HeapObject* name_or_sfi) {
+void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
+ HeapObject name_or_sfi) {
DisallowHeapAllocation no_gc;
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
- if (to) MapDetails(to);
+ if (!to.is_null()) MapDetails(to);
int line = -1;
int column = -1;
Address pc = 0;
@@ -1694,15 +1654,16 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
}
Log::MessageBuilder msg(log_);
msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
- << kNext << reinterpret_cast<void*>(from) << kNext
- << reinterpret_cast<void*>(to) << kNext << reinterpret_cast<void*>(pc)
- << kNext << line << kNext << column << kNext << reason << kNext;
+ << kNext << reinterpret_cast<void*>(from.ptr()) << kNext
+ << reinterpret_cast<void*>(to.ptr()) << kNext
+ << reinterpret_cast<void*>(pc) << kNext << line << kNext << column
+ << kNext << reason << kNext;
- if (name_or_sfi) {
+ if (!name_or_sfi.is_null()) {
if (name_or_sfi->IsName()) {
msg << Name::cast(name_or_sfi);
} else if (name_or_sfi->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(name_or_sfi);
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(name_or_sfi);
msg << sfi->DebugName();
#if V8_SFI_HAS_UNIQUE_ID
msg << " " << sfi->unique_id();
@@ -1712,24 +1673,21 @@ void Logger::MapEvent(const char* type, Map* from, Map* to, const char* reason,
msg.WriteToLogFile();
}
-void Logger::MapCreate(Map* map) {
+void Logger::MapCreate(Map map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
DisallowHeapAllocation no_gc;
Log::MessageBuilder msg(log_);
msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(map);
+ << reinterpret_cast<void*>(map.ptr());
msg.WriteToLogFile();
}
-void Logger::MapDetails(Map* map) {
+void Logger::MapDetails(Map map) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
- // Disable logging Map details during bootstrapping since we use LogMaps() to
- // log all creating
- if (isolate_->bootstrapper()->IsActive()) return;
DisallowHeapAllocation no_gc;
Log::MessageBuilder msg(log_);
msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(map) << kNext;
+ << reinterpret_cast<void*>(map.ptr()) << kNext;
if (FLAG_trace_maps_details) {
std::ostringstream buffer;
map->PrintMapDetails(buffer);
@@ -1738,23 +1696,7 @@ void Logger::MapDetails(Map* map) {
msg.WriteToLogFile();
}
-void Logger::StopProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != nullptr) {
- profiler_->Pause();
- is_logging_ = false;
- RemoveCodeEventListener(this);
- }
-}
-
-// This function can be called when Log's mutex is acquired,
-// either from main or Profiler's thread.
-void Logger::LogFailure() {
- StopProfiler();
-}
-
-static void AddFunctionAndCode(SharedFunctionInfo* sfi,
- AbstractCode* code_object,
+static void AddFunctionAndCode(SharedFunctionInfo sfi, AbstractCode code_object,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects, int offset) {
if (sfis != nullptr) {
@@ -1774,10 +1716,10 @@ static int EnumerateCompiledFunctions(Heap* heap,
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
if (sfi->is_compiled() &&
(!sfi->script()->IsScript() ||
Script::cast(sfi->script())->HasValidSource())) {
@@ -1788,9 +1730,9 @@ static int EnumerateCompiledFunctions(Heap* heap,
} else if (obj->IsJSFunction()) {
// Given that we no longer iterate over all optimized JSFunctions, we need
// to take care of this here.
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
- Object* maybe_script = sfi->script();
+ JSFunction function = JSFunction::cast(obj);
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(function->shared());
+ Object maybe_script = sfi->script();
if (maybe_script->IsScript() &&
!Script::cast(maybe_script)->HasValidSource()) {
continue;
@@ -1814,10 +1756,10 @@ static int EnumerateWasmModuleObjects(
DisallowHeapAllocation no_gc;
int module_objects_count = 0;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (obj->IsWasmModuleObject()) {
- WasmModuleObject* module = WasmModuleObject::cast(obj);
+ WasmModuleObject module = WasmModuleObject::cast(obj);
if (module_objects != nullptr) {
module_objects[module_objects_count] = handle(module, heap->isolate());
}
@@ -1827,7 +1769,7 @@ static int EnumerateWasmModuleObjects(
return module_objects_count;
}
-void Logger::LogCodeObject(Object* object) {
+void Logger::LogCodeObject(Object object) {
existing_code_logger_.LogCodeObject(object);
}
@@ -1846,13 +1788,13 @@ void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
- AccessorInfo* ai = AccessorInfo::cast(obj);
+ AccessorInfo ai = AccessorInfo::cast(obj);
if (!ai->name()->IsName()) continue;
Address getter_entry = v8::ToCData<Address>(ai->getter());
- Name* name = Name::cast(ai->name());
+ Name name = Name::cast(ai->name());
if (getter_entry != 0) {
#if USES_FUNCTION_DESCRIPTORS
getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(getter_entry);
@@ -1869,23 +1811,27 @@ void Logger::LogAccessorCallbacks() {
}
}
-void Logger::LogMaps() {
+void Logger::LogAllMaps() {
+ DisallowHeapAllocation no_gc;
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
- DisallowHeapAllocation no_gc;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (!obj->IsMap()) continue;
- MapDetails(Map::cast(obj));
+ Map map = Map::cast(obj);
+ MapCreate(map);
+ MapDetails(map);
}
}
static void AddIsolateIdIfNeeded(std::ostream& os, // NOLINT
Isolate* isolate) {
- if (FLAG_logfile_per_isolate) os << "isolate-" << isolate << "-";
+ if (FLAG_logfile_per_isolate) {
+ os << "isolate-" << isolate << "-" << base::OS::GetCurrentProcessId()
+ << "-";
+ }
}
-
static void PrepareLogFileName(std::ostream& os, // NOLINT
Isolate* isolate, const char* file_name) {
int dir_separator_count = 0;
@@ -1942,21 +1888,21 @@ bool Logger::SetUp(Isolate* isolate) {
log_ = new Log(this, log_file_name.str().c_str());
if (FLAG_perf_basic_prof) {
- perf_basic_logger_ = new PerfBasicLogger(isolate);
- AddCodeEventListener(perf_basic_logger_);
+ perf_basic_logger_.reset(new PerfBasicLogger(isolate));
+ AddCodeEventListener(perf_basic_logger_.get());
}
if (FLAG_perf_prof) {
- perf_jit_logger_ = new PerfJitLogger(isolate);
- AddCodeEventListener(perf_jit_logger_);
+ perf_jit_logger_.reset(new PerfJitLogger(isolate));
+ AddCodeEventListener(perf_jit_logger_.get());
}
if (FLAG_ll_prof) {
- ll_logger_ = new LowLevelLogger(isolate, log_file_name.str().c_str());
- AddCodeEventListener(ll_logger_);
+ ll_logger_.reset(new LowLevelLogger(isolate, log_file_name.str().c_str()));
+ AddCodeEventListener(ll_logger_.get());
}
- ticker_ = new Ticker(isolate, FLAG_prof_sampling_interval);
+ ticker_.reset(new Ticker(isolate, FLAG_prof_sampling_interval));
if (Log::InitLogAtStart()) {
is_logging_ = true;
@@ -1965,7 +1911,7 @@ bool Logger::SetUp(Isolate* isolate) {
timer_.Start();
if (FLAG_prof_cpp) {
- profiler_ = new Profiler(isolate);
+ profiler_.reset(new Profiler(isolate));
is_logging_ = true;
profiler_->Engage();
}
@@ -1981,14 +1927,13 @@ bool Logger::SetUp(Isolate* isolate) {
void Logger::SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler) {
if (jit_logger_) {
- RemoveCodeEventListener(jit_logger_);
- delete jit_logger_;
- jit_logger_ = nullptr;
+ RemoveCodeEventListener(jit_logger_.get());
+ jit_logger_.reset();
}
if (event_handler) {
- jit_logger_ = new JitLogger(isolate_, event_handler);
- AddCodeEventListener(jit_logger_);
+ jit_logger_.reset(new JitLogger(isolate_, event_handler));
+ AddCodeEventListener(jit_logger_.get());
if (options & kJitCodeEventEnumExisting) {
HandleScope scope(isolate_);
LogCodeObjects();
@@ -1997,15 +1942,12 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
}
-sampler::Sampler* Logger::sampler() {
- return ticker_;
-}
+sampler::Sampler* Logger::sampler() { return ticker_.get(); }
void Logger::StopProfilerThread() {
if (profiler_ != nullptr) {
profiler_->Disengage();
- delete profiler_;
- profiler_ = nullptr;
+ profiler_.reset();
}
}
@@ -2016,38 +1958,33 @@ FILE* Logger::TearDown() {
// Stop the profiler thread before closing the file.
StopProfilerThread();
- delete ticker_;
- ticker_ = nullptr;
+ ticker_.reset();
if (perf_basic_logger_) {
- RemoveCodeEventListener(perf_basic_logger_);
- delete perf_basic_logger_;
- perf_basic_logger_ = nullptr;
+ RemoveCodeEventListener(perf_basic_logger_.get());
+ perf_basic_logger_.reset();
}
if (perf_jit_logger_) {
- RemoveCodeEventListener(perf_jit_logger_);
- delete perf_jit_logger_;
- perf_jit_logger_ = nullptr;
+ RemoveCodeEventListener(perf_jit_logger_.get());
+ perf_jit_logger_.reset();
}
if (ll_logger_) {
- RemoveCodeEventListener(ll_logger_);
- delete ll_logger_;
- ll_logger_ = nullptr;
+ RemoveCodeEventListener(ll_logger_.get());
+ ll_logger_.reset();
}
if (jit_logger_) {
- RemoveCodeEventListener(jit_logger_);
- delete jit_logger_;
- jit_logger_ = nullptr;
+ RemoveCodeEventListener(jit_logger_.get());
+ jit_logger_.reset();
}
return log_->Close();
}
-void ExistingCodeLogger::LogCodeObject(Object* object) {
- AbstractCode* abstract_code = AbstractCode::cast(object);
+void ExistingCodeLogger::LogCodeObject(Object object) {
+ AbstractCode abstract_code = AbstractCode::cast(object);
CodeEventListener::LogEventsAndTags tag = CodeEventListener::STUB_TAG;
const char* description = "Unknown code from before profiling";
switch (abstract_code->kind()) {
@@ -2057,9 +1994,7 @@ void ExistingCodeLogger::LogCodeObject(Object* object) {
case AbstractCode::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
case AbstractCode::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(abstract_code->GetCode()));
- if (description == nullptr) description = "A stub from before profiling";
+ description = "STUB code";
tag = CodeEventListener::STUB_TAG;
break;
case AbstractCode::REGEXP:
@@ -2106,7 +2041,7 @@ void ExistingCodeLogger::LogCodeObjects() {
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
if (obj->IsBytecodeArray()) LogCodeObject(obj);
@@ -2174,11 +2109,11 @@ void ExistingCodeLogger::LogExistingFunction(
}
} else if (shared->IsApiFunction()) {
// API function.
- FunctionTemplateInfo* fun_data = shared->get_api_func_data();
- Object* raw_call_data = fun_data->call_code();
+ FunctionTemplateInfo fun_data = shared->get_api_func_data();
+ Object raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined(isolate_)) {
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
+ CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
+ Object callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
#if USES_FUNCTION_DESCRIPTORS
entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 8ec63a9b46..5992747a66 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -9,13 +9,8 @@
#include <string>
#include "include/v8-profiler.h"
-#include "src/allocation.h"
-#include "src/base/compiler-specific.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/base/platform/platform.h"
#include "src/code-events.h"
-#include "src/isolate.h"
-#include "src/log-utils.h"
#include "src/objects.h"
namespace v8 {
@@ -64,7 +59,6 @@ namespace internal {
// Forward declarations.
class CodeEventListener;
-class CpuProfiler;
class Isolate;
class JitLogger;
class Log;
@@ -72,7 +66,6 @@ class LowLevelLogger;
class PerfBasicLogger;
class PerfJitLogger;
class Profiler;
-class RuntimeCallTimer;
class Ticker;
#undef LOG
@@ -101,13 +94,15 @@ class ExistingCodeLogger {
Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag =
CodeEventListener::LAZY_COMPILE_TAG);
- void LogCodeObject(Object* object);
+ void LogCodeObject(Object object);
private:
Isolate* isolate_;
CodeEventListener* listener_;
};
+enum class LogSeparator;
+
class Logger : public CodeEventListener {
public:
enum StartEnd { START = 0, END = 1, STAMP = 2 };
@@ -121,7 +116,7 @@ class Logger : public CodeEventListener {
};
// The separator is used to write an unescaped "," into the log.
- static const LogSeparator kNext = LogSeparator::kSeparator;
+ static const LogSeparator kNext;
// Acquires resources for logging if the right flags are set.
bool SetUp(Isolate* isolate);
@@ -146,7 +141,7 @@ class Logger : public CodeEventListener {
void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
- void HandleEvent(const char* name, Object** location);
+ void HandleEvent(const char* name, Address* location);
// Emits memory management events for C allocated structures.
void NewEvent(const char* name, void* object, size_t size);
@@ -160,29 +155,28 @@ class Logger : public CodeEventListener {
// Emits an event that an undefined property was read from an
// object.
- void SuspectReadEvent(Name* name, Object* obj);
+ void SuspectReadEvent(Name name, Object obj);
// ==== Events logged by --log-function-events ====
void FunctionEvent(const char* reason, int script_id, double time_delta_ms,
- int start_position = -1, int end_position = -1,
- String* function_name = nullptr);
+ int start_position, int end_position,
+ String function_name);
void FunctionEvent(const char* reason, int script_id, double time_delta_ms,
int start_position, int end_position,
const char* function_name = nullptr,
size_t function_name_length = 0);
void CompilationCacheEvent(const char* action, const char* cache_type,
- SharedFunctionInfo* sfi);
+ SharedFunctionInfo sfi);
void ScriptEvent(ScriptEventType type, int script_id);
- void ScriptDetails(Script* script);
+ void ScriptDetails(Script script);
// ==== Events logged by --log-api. ====
void ApiSecurityCheck();
- void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
- void ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
+ void ApiNamedPropertyAccess(const char* tag, JSObject holder, Object name);
+ void ApiIndexedPropertyAccess(const char* tag, JSObject holder,
uint32_t index);
- void ApiObjectAccess(const char* tag, JSObject* obj);
+ void ApiObjectAccess(const char* tag, JSObject obj);
void ApiEntryCall(const char* name);
// ==== Events logged by --log-code. ====
@@ -190,34 +184,34 @@ class Logger : public CodeEventListener {
void RemoveCodeEventListener(CodeEventListener* listener);
// Emits a code event for a callback function.
- void CallbackEvent(Name* name, Address entry_point) override;
- void GetterCallbackEvent(Name* name, Address entry_point) override;
- void SetterCallbackEvent(Name* name, Address entry_point) override;
+ void CallbackEvent(Name name, Address entry_point) override;
+ void GetterCallbackEvent(Name name, Address entry_point) override;
+ void SetterCallbackEvent(Name name, Address entry_point) override;
// Emits a code create event.
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* source) override;
+ AbstractCode code, const char* source) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name) override;
+ AbstractCode code, Name name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* name) override;
+ AbstractCode code, SharedFunctionInfo shared,
+ Name name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* source, int line, int column) override;
+ AbstractCode code, SharedFunctionInfo shared,
+ Name source, int line, int column) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
const wasm::WasmCode* code,
wasm::WasmName name) override;
// Emits a code deoptimization event.
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override;
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override;
void CodeMovingGCEvent() override;
// Emits a code create event for a RegExp.
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void RegExpCodeCreateEvent(AbstractCode code, String source) override;
// Emits a code move event.
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
// Emits a code line info record event.
void CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray* source_position_table);
+ ByteArray source_position_table);
void CodeLinePosInfoRecordEvent(Address code_start,
Vector<const byte> source_position_table);
@@ -225,19 +219,18 @@ class Logger : public CodeEventListener {
void CodeNameEvent(Address addr, int pos, const char* code_name);
- void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override;
- void ICEvent(const char* type, bool keyed, Map* map, Object* key,
+ void ICEvent(const char* type, bool keyed, Map map, Object key,
char old_state, char new_state, const char* modifier,
const char* slow_stub_reason);
- void MapEvent(const char* type, Map* from, Map* to,
+ void MapEvent(const char* type, Map from, Map to,
const char* reason = nullptr,
- HeapObject* name_or_sfi = nullptr);
- void MapCreate(Map* map);
- void MapDetails(Map* map);
-
+ HeapObject name_or_sfi = HeapObject());
+ void MapCreate(Map map);
+ void MapDetails(Map map);
void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
uintptr_t end, intptr_t aslr_slide);
@@ -258,14 +251,13 @@ class Logger : public CodeEventListener {
return is_logging_;
}
+ // Used by CpuProfiler. TODO(petermarshall): Untangle
+ void set_is_logging(bool new_value) { is_logging_ = new_value; }
+
bool is_listening_to_code_events() override {
return is_logging() || jit_logger_ != nullptr;
}
- // Stop collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded.
- void StopProfiler();
-
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code);
// Logs all compiled functions found in the heap.
@@ -274,18 +266,15 @@ class Logger : public CodeEventListener {
void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
void LogCodeObjects();
- // Logs all Mpas foind in the heap.
- void LogMaps();
+ // Logs all Maps found on the heap.
+ void LogAllMaps();
// Converts tag to a corresponding NATIVE_... if the script is native.
V8_INLINE static CodeEventListener::LogEventsAndTags ToNativeByScript(
- CodeEventListener::LogEventsAndTags, Script*);
-
- // Callback from Log, stops profiling in case of insufficient resources.
- void LogFailure();
+ CodeEventListener::LogEventsAndTags, Script);
// Used for logging stubs found in the snapshot.
- void LogCodeObject(Object* code_object);
+ void LogCodeObject(Object code_object);
private:
explicit Logger(Isolate* isolate);
@@ -295,8 +284,7 @@ class Logger : public CodeEventListener {
void ProfilerBeginEvent();
// Emits callback event messages.
- void CallbackEventInternal(const char* prefix,
- Name* name,
+ void CallbackEventInternal(const char* prefix, Name name,
Address entry_point);
// Internal configurable move event.
@@ -318,17 +306,17 @@ class Logger : public CodeEventListener {
// Logs a scripts sources. Keeps track of all logged scripts to ensure that
// each script is logged only once.
- bool EnsureLogScriptSource(Script* script);
+ bool EnsureLogScriptSource(Script script);
Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
- Ticker* ticker_;
+ std::unique_ptr<Ticker> ticker_;
// When the statistical profile is active, profiler_
// points to a Profiler, that handles collection
// of samples.
- Profiler* profiler_;
+ std::unique_ptr<Profiler> profiler_;
// An array of log events names.
const char* const* log_events_;
@@ -344,10 +332,10 @@ class Logger : public CodeEventListener {
bool is_logging_;
Log* log_;
- PerfBasicLogger* perf_basic_logger_;
- PerfJitLogger* perf_jit_logger_;
- LowLevelLogger* ll_logger_;
- JitLogger* jit_logger_;
+ std::unique_ptr<PerfBasicLogger> perf_basic_logger_;
+ std::unique_ptr<PerfJitLogger> perf_jit_logger_;
+ std::unique_ptr<LowLevelLogger> ll_logger_;
+ std::unique_ptr<JitLogger> jit_logger_;
std::set<int> logged_source_code_;
uint32_t next_source_info_id_ = 0;
@@ -358,8 +346,6 @@ class Logger : public CodeEventListener {
ExistingCodeLogger existing_code_logger_;
base::ElapsedTimer timer_;
-
- friend class CpuProfiler;
};
#define TIMER_EVENTS_LIST(V) \
@@ -405,25 +391,25 @@ class CodeEventLogger : public CodeEventListener {
explicit CodeEventLogger(Isolate* isolate);
~CodeEventLogger() override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
const char* comment) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- Name* name) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source, int line,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ Name name) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name name) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line,
int column) override;
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void CallbackEvent(Name* name, Address entry_point) override {}
- void GetterCallbackEvent(Name* name, Address entry_point) override {}
- void SetterCallbackEvent(Name* name, Address entry_point) override {}
+ void RegExpCodeCreateEvent(AbstractCode code, String source) override;
+ void CallbackEvent(Name name, Address entry_point) override {}
+ void GetterCallbackEvent(Name name, Address entry_point) override {}
+ void SetterCallbackEvent(Name name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
- void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override {}
protected:
@@ -432,7 +418,7 @@ class CodeEventLogger : public CodeEventListener {
private:
class NameBuffer;
- virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ virtual void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) = 0;
virtual void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) = 0;
@@ -457,31 +443,31 @@ class ExternalCodeEventListener : public CodeEventListener {
explicit ExternalCodeEventListener(Isolate* isolate);
~ExternalCodeEventListener() override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
const char* comment) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- Name* name) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) override;
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* source, int line,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ Name name) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name name) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line,
int column) override;
void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void CallbackEvent(Name* name, Address entry_point) override {}
- void GetterCallbackEvent(Name* name, Address entry_point) override {}
- void SetterCallbackEvent(Name* name, Address entry_point) override {}
+ void RegExpCodeCreateEvent(AbstractCode code, String source) override;
+ void CallbackEvent(Name name, Address entry_point) override {}
+ void GetterCallbackEvent(Name name, Address entry_point) override {}
+ void SetterCallbackEvent(Name name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {}
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override {}
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
void CodeMovingGCEvent() override {}
- void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override {}
- void StartListening(CodeEventHandler* code_event_handler);
+ void StartListening(v8::CodeEventHandler* code_event_handler);
void StopListening();
bool is_listening_to_code_events() override { return true; }
diff --git a/deps/v8/src/lookup-cache-inl.h b/deps/v8/src/lookup-cache-inl.h
index 34435c0e8e..e980deb7da 100644
--- a/deps/v8/src/lookup-cache-inl.h
+++ b/deps/v8/src/lookup-cache-inl.h
@@ -13,24 +13,22 @@ namespace v8 {
namespace internal {
// static
-int DescriptorLookupCache::Hash(Object* source, Name* name) {
+int DescriptorLookupCache::Hash(Map source, Name name) {
DCHECK(name->IsUniqueName());
// Uses only lower 32 bits if pointers are larger.
- uint32_t source_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
- kPointerSizeLog2;
+ uint32_t source_hash = static_cast<uint32_t>(source.ptr()) >> kTaggedSizeLog2;
uint32_t name_hash = name->hash_field();
return (source_hash ^ name_hash) % kLength;
}
-int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+int DescriptorLookupCache::Lookup(Map source, Name name) {
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
return kAbsent;
}
-void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+void DescriptorLookupCache::Update(Map source, Name name, int result) {
DCHECK_NE(result, kAbsent);
int index = Hash(source, name);
Key& key = keys_[index];
diff --git a/deps/v8/src/lookup-cache.cc b/deps/v8/src/lookup-cache.cc
index 2ec0230889..60491ff535 100644
--- a/deps/v8/src/lookup-cache.cc
+++ b/deps/v8/src/lookup-cache.cc
@@ -4,13 +4,11 @@
#include "src/lookup-cache.h"
-#include "src/objects-inl.h"
-
namespace v8 {
namespace internal {
void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].source = nullptr;
+ for (int index = 0; index < kLength; index++) keys_[index].source = Map();
}
} // namespace internal
diff --git a/deps/v8/src/lookup-cache.h b/deps/v8/src/lookup-cache.h
index b8a59c9c1a..8904127266 100644
--- a/deps/v8/src/lookup-cache.h
+++ b/deps/v8/src/lookup-cache.h
@@ -6,6 +6,8 @@
#define V8_LOOKUP_CACHE_H_
#include "src/objects.h"
+#include "src/objects/map.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
@@ -18,10 +20,10 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- inline int Lookup(Map* source, Name* name);
+ inline int Lookup(Map source, Name name);
// Update an element in the cache.
- inline void Update(Map* source, Name* name, int result);
+ inline void Update(Map source, Name name, int result);
// Clear the cache.
void Clear();
@@ -31,18 +33,18 @@ class DescriptorLookupCache {
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
- keys_[i].source = nullptr;
- keys_[i].name = nullptr;
+ keys_[i].source = Map();
+ keys_[i].name = Name();
results_[i] = kAbsent;
}
}
- static inline int Hash(Object* source, Name* name);
+ static inline int Hash(Map source, Name name);
static const int kLength = 64;
struct Key {
- Map* source;
- Name* name;
+ Map source;
+ Name name;
};
Key keys_[kLength];
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
index 6e95d06b0f..abe865f69c 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/lookup-inl.h
@@ -17,6 +17,11 @@
namespace v8 {
namespace internal {
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Configuration configuration)
+ : LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
+ configuration) {}
+
LookupIterator::LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
Configuration configuration)
@@ -43,6 +48,11 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
Start<false>();
}
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index, Configuration configuration)
+ : LookupIterator(isolate, receiver, index,
+ GetRoot(isolate, receiver, index), configuration) {}
+
LookupIterator LookupIterator::PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder, Configuration configuration) {
@@ -80,6 +90,22 @@ bool LookupIterator::is_dictionary_holder() const {
return !holder_->HasFastProperties();
}
+Handle<Map> LookupIterator::transition_map() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<Map>::cast(transition_);
+}
+
+Handle<PropertyCell> LookupIterator::transition_cell() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<PropertyCell>::cast(transition_);
+}
+
+template <class T>
+Handle<T> LookupIterator::GetHolder() const {
+ DCHECK(IsFound());
+ return Handle<T>::cast(holder_);
+}
+
bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
return !receiver->map()->is_extensible() &&
@@ -107,6 +133,20 @@ void LookupIterator::UpdateProtector() {
}
}
+int LookupIterator::descriptor_number() const {
+ DCHECK(!IsElement());
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ return number_;
+}
+
+int LookupIterator::dictionary_entry() const {
+ DCHECK(!IsElement());
+ DCHECK(has_property_);
+ DCHECK(!holder_->HasFastProperties());
+ return number_;
+}
+
LookupIterator::Configuration LookupIterator::ComputeConfiguration(
Configuration configuration, Handle<Name> name) {
return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
@@ -123,16 +163,23 @@ template <class T>
Handle<T> LookupIterator::GetStoreTarget() const {
DCHECK(receiver_->IsJSReceiver());
if (receiver_->IsJSGlobalProxy()) {
- Map* map = JSGlobalProxy::cast(*receiver_)->map();
+ Map map = JSGlobalProxy::cast(*receiver_)->map();
if (map->has_hidden_prototype()) {
return handle(JSGlobalObject::cast(map->prototype()), isolate_);
}
}
return Handle<T>::cast(receiver_);
}
+
+template <bool is_element>
+InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) {
+ return is_element ? holder->GetIndexedInterceptor()
+ : holder->GetNamedInterceptor();
+}
+
inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
DCHECK_EQ(INTERCEPTOR, state_);
- InterceptorInfo* result =
+ InterceptorInfo result =
IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
: GetInterceptor<false>(JSObject::cast(*holder_));
return handle(result, isolate_);
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index c6cc06eeae..3608aa763d 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -5,11 +5,14 @@
#include "src/lookup.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/field-type.h"
#include "src/isolate-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/struct-inl.h"
namespace v8 {
namespace internal {
@@ -147,8 +150,8 @@ void LookupIterator::Start() {
state_ = NOT_FOUND;
holder_ = initial_holder_;
- JSReceiver* holder = *holder_;
- Map* map = holder->map();
+ JSReceiver holder = *holder_;
+ Map map = holder->map();
state_ = LookupInHolder<is_element>(map, holder);
if (IsFound()) return;
@@ -165,8 +168,8 @@ void LookupIterator::Next() {
DisallowHeapAllocation no_gc;
has_property_ = false;
- JSReceiver* holder = *holder_;
- Map* map = holder->map();
+ JSReceiver holder = *holder_;
+ Map map = holder->map();
if (map->IsSpecialReceiverMap()) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
@@ -179,10 +182,10 @@ void LookupIterator::Next() {
}
template <bool is_element>
-void LookupIterator::NextInternal(Map* map, JSReceiver* holder) {
+void LookupIterator::NextInternal(Map map, JSReceiver holder) {
do {
- JSReceiver* maybe_holder = NextHolder(map);
- if (maybe_holder == nullptr) {
+ JSReceiver maybe_holder = NextHolder(map);
+ if (maybe_holder.is_null()) {
if (interceptor_state_ == InterceptorState::kSkipNonMasking) {
RestartLookupForNonMaskingInterceptors<is_element>();
return;
@@ -227,7 +230,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
auto root =
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
- isolate->PushStackTraceAndDie(*receiver);
+ isolate->PushStackTraceAndDie(reinterpret_cast<void*>(receiver->ptr()));
}
return Handle<JSReceiver>::cast(root);
}
@@ -254,7 +257,7 @@ void LookupIterator::ReloadPropertyInformation() {
namespace {
-bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
static uint32_t context_slots[] = {
#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
Context::TYPE##_ARRAY_FUN_INDEX,
@@ -278,9 +281,11 @@ void LookupIterator::InternalUpdateProtector() {
ReadOnlyRoots roots(heap());
if (*name_ == roots.constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
- !isolate_->IsTypedArraySpeciesLookupChainIntact() &&
- !isolate_->IsPromiseSpeciesLookupChainIntact())
+ !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
+ }
// Setting the constructor property could change an instance's @@species
if (holder_->IsJSArray()) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
@@ -292,6 +297,10 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
return;
+ } else if (holder_->IsJSRegExp()) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
+ return;
} else if (holder_->IsJSTypedArray()) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
@@ -299,9 +308,8 @@ void LookupIterator::InternalUpdateProtector() {
}
if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
- // Setting the constructor of Array.prototype, Promise.prototype or
- // %TypedArray%.prototype of any realm also needs to invalidate the
- // @@species protector.
+ // Setting the constructor of any prototype with the @@species protector
+ // (of any realm) also needs to invalidate the protector.
// For typed arrays, we check a prototype of this holder since TypedArrays
// have different prototypes for each type, and their parent prototype is
// pointing the same TYPED_ARRAY_PROTOTYPE.
@@ -315,6 +323,10 @@ void LookupIterator::InternalUpdateProtector() {
Context::PROMISE_PROTOTYPE_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::REGEXP_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
} else if (isolate_->IsInAnyContext(
holder_->map()->prototype(),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
@@ -330,6 +342,14 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
} else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsMapIteratorLookupChainIntact()) return;
+ isolate_->InvalidateMapIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsSetIteratorLookupChainIntact()) return;
+ isolate_->InvalidateSetIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
*receiver_,
Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %StringIteratorPrototype% invalidates the
@@ -339,9 +359,11 @@ void LookupIterator::InternalUpdateProtector() {
}
} else if (*name_ == roots.species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
- !isolate_->IsTypedArraySpeciesLookupChainIntact() &&
- !isolate_->IsPromiseSpeciesLookupChainIntact())
+ !isolate_->IsPromiseSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
+ }
// Setting the Symbol.species property of any Array, Promise or TypedArray
// constructor invalidates the @@species protector
if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
@@ -353,6 +375,10 @@ void LookupIterator::InternalUpdateProtector() {
Context::PROMISE_FUNCTION_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::REGEXP_FUNCTION_INDEX)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
+ isolate_->InvalidateRegExpSpeciesProtector();
} else if (IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
@@ -365,6 +391,18 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
} else if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
+ if (isolate_->IsMapIteratorLookupChainIntact()) {
+ isolate_->InvalidateMapIteratorProtector();
+ }
+ if (isolate_->IsSetIteratorLookupChainIntact()) {
+ isolate_->InvalidateSetIteratorProtector();
+ }
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_SET_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsSetIteratorLookupChainIntact()) return;
+ isolate_->InvalidateSetIteratorProtector();
+ } else if (isolate_->IsInAnyContext(
*receiver_, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
// Setting the Symbol.iterator property of String.prototype invalidates
// the string iterator protector. Symbol.iterator can also be set on a
@@ -477,7 +515,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
- // Property details can never change for private fields.
+ // Property details can never change for private properties.
if (holder->IsJSProxy()) {
DCHECK(name()->IsPrivate());
return;
@@ -680,7 +718,7 @@ void LookupIterator::Delete() {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Delete(object, number_);
} else {
- DCHECK(!name()->IsPrivateField());
+ DCHECK(!name()->IsPrivateName());
bool is_prototype_map = holder->map()->is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
@@ -789,7 +827,7 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements()) {
- FixedArray* parameter_map = FixedArray::cast(receiver->elements());
+ FixedArray parameter_map = FixedArray::cast(receiver->elements());
uint32_t length = parameter_map->length() - 2;
if (number_ < length) {
parameter_map->set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
@@ -832,8 +870,8 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DisallowHeapAllocation no_gc;
if (*receiver_ == *holder_) return true;
if (!receiver_->IsJSReceiver()) return false;
- JSReceiver* current = JSReceiver::cast(*receiver_);
- JSReceiver* object = *holder_;
+ JSReceiver current = JSReceiver::cast(*receiver_);
+ JSReceiver object = *holder_;
if (!current->map()->has_hidden_prototype()) return false;
// JSProxy do not occur as hidden prototypes.
if (object->IsJSProxy()) return false;
@@ -848,7 +886,7 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
- Object* result = nullptr;
+ Object result;
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
ElementsAccessor* accessor = holder->GetElementsAccessor();
@@ -870,7 +908,7 @@ Handle<Object> LookupIterator::FetchValue() const {
return handle(result, isolate_);
}
-bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
+bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK(!IsElement());
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
@@ -883,7 +921,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
if (holder->IsUnboxedDoubleField(field_index)) {
bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
} else {
- Object* current_value = holder->RawFastPropertyAt(field_index);
+ Object current_value = holder->RawFastPropertyAt(field_index);
DCHECK(current_value->IsMutableHeapNumber());
bits = MutableHeapNumber::cast(current_value)->value_as_bits();
}
@@ -898,7 +936,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
}
return bit_cast<double>(bits) == value->Number();
} else {
- Object* current_value = holder->RawFastPropertyAt(field_index);
+ Object current_value = holder->RawFastPropertyAt(field_index);
return current_value->IsUninitialized(isolate()) || current_value == value;
}
}
@@ -935,7 +973,7 @@ Handle<Map> LookupIterator::GetFieldOwnerMap() const {
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- Map* holder_map = holder_->map();
+ Map holder_map = holder_->map();
return handle(holder_map->FindFieldOwner(isolate(), descriptor_number()),
isolate_);
}
@@ -1000,18 +1038,18 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject()) {
- GlobalDictionary* dictionary =
+ GlobalDictionary dictionary =
JSGlobalObject::cast(*holder)->global_dictionary();
dictionary->CellAt(dictionary_entry())->set_value(*value);
} else {
DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- NameDictionary* dictionary = holder->property_dictionary();
+ NameDictionary dictionary = holder->property_dictionary();
dictionary->ValueAtPut(dictionary_entry(), *value);
}
}
template <bool is_element>
-bool LookupIterator::SkipInterceptor(JSObject* holder) {
+bool LookupIterator::SkipInterceptor(JSObject holder) {
auto info = GetInterceptor<is_element>(holder);
if (!is_element && name_->IsSymbol() && !info->can_intercept_symbols()) {
return true;
@@ -1030,29 +1068,31 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
return interceptor_state_ == InterceptorState::kProcessNonMasking;
}
-JSReceiver* LookupIterator::NextHolder(Map* map) {
+JSReceiver LookupIterator::NextHolder(Map map) {
DisallowHeapAllocation no_gc;
- if (map->prototype() == ReadOnlyRoots(heap()).null_value()) return nullptr;
- if (!check_prototype_chain() && !map->has_hidden_prototype()) return nullptr;
+ if (map->prototype() == ReadOnlyRoots(heap()).null_value()) {
+ return JSReceiver();
+ }
+ if (!check_prototype_chain() && !map->has_hidden_prototype()) {
+ return JSReceiver();
+ }
return JSReceiver::cast(map->prototype());
}
-LookupIterator::State LookupIterator::NotFound(JSReceiver* const holder) const {
+LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
DCHECK(!IsElement());
if (!holder->IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
Handle<String> name_string = Handle<String>::cast(name_);
if (name_string->length() == 0) return NOT_FOUND;
- return IsSpecialIndex(isolate_->unicode_cache(), *name_string)
- ? INTEGER_INDEXED_EXOTIC
- : NOT_FOUND;
+ return IsSpecialIndex(*name_string) ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
}
namespace {
template <bool is_element>
-bool HasInterceptor(Map* map) {
+bool HasInterceptor(Map map) {
return is_element ? map->has_indexed_interceptor()
: map->has_named_interceptor();
}
@@ -1061,7 +1101,7 @@ bool HasInterceptor(Map* map) {
template <bool is_element>
LookupIterator::State LookupIterator::LookupInSpecialHolder(
- Map* const map, JSReceiver* const holder) {
+ Map const map, JSReceiver const holder) {
STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
switch (state_) {
case NOT_FOUND:
@@ -1080,12 +1120,12 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
V8_FALLTHROUGH;
case INTERCEPTOR:
if (!is_element && map->IsJSGlobalObjectMap()) {
- GlobalDictionary* dict =
+ GlobalDictionary dict =
JSGlobalObject::cast(holder)->global_dictionary();
int number = dict->FindEntry(isolate(), name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
- PropertyCell* cell = dict->CellAt(number_);
+ PropertyCell cell = dict->CellAt(number_);
if (cell->value()->IsTheHole(isolate_)) return NOT_FOUND;
property_details_ = cell->property_details();
has_property_ = true;
@@ -1110,16 +1150,16 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
template <bool is_element>
LookupIterator::State LookupIterator::LookupInRegularHolder(
- Map* const map, JSReceiver* const holder) {
+ Map const map, JSReceiver const holder) {
DisallowHeapAllocation no_gc;
if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
return NOT_FOUND;
}
if (is_element) {
- JSObject* js_object = JSObject::cast(holder);
+ JSObject js_object = JSObject::cast(holder);
ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase* backing_store = js_object->elements();
+ FixedArrayBase backing_store = js_object->elements();
number_ =
accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
if (number_ == kMaxUInt32) {
@@ -1127,14 +1167,14 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
}
property_details_ = accessor->GetDetails(js_object, number_);
} else if (!map->is_dictionary_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
int number = descriptors->SearchWithCache(isolate_, *name_, map);
if (number == DescriptorArray::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors->GetDetails(number_);
} else {
DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- NameDictionary* dict = holder->property_dictionary();
+ NameDictionary dict = holder->property_dictionary();
int number = dict->FindEntry(isolate(), name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
@@ -1155,12 +1195,12 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
const {
DCHECK_EQ(ACCESS_CHECK, state_);
DisallowHeapAllocation no_gc;
- AccessCheckInfo* access_check_info =
+ AccessCheckInfo access_check_info =
AccessCheckInfo::Get(isolate_, Handle<JSObject>::cast(holder_));
- if (access_check_info) {
- Object* interceptor = IsElement() ? access_check_info->indexed_interceptor()
- : access_check_info->named_interceptor();
- if (interceptor) {
+ if (!access_check_info.is_null()) {
+ Object interceptor = IsElement() ? access_check_info->indexed_interceptor()
+ : access_check_info->named_interceptor();
+ if (interceptor != Object()) {
return handle(InterceptorInfo::cast(interceptor), isolate_);
}
}
@@ -1176,7 +1216,7 @@ bool LookupIterator::LookupCachedProperty() {
DCHECK_EQ(state(), LookupIterator::ACCESSOR);
DCHECK(GetAccessors()->IsAccessorPair());
- AccessorPair* accessor_pair = AccessorPair::cast(*GetAccessors());
+ AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
Handle<Object> getter(accessor_pair->getter(), isolate());
MaybeHandle<Name> maybe_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 1c55d2769c..012ba83d14 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/js-objects.h"
#include "src/objects/map.h"
namespace v8 {
@@ -44,10 +45,9 @@ class V8_EXPORT_PRIVATE LookupIterator final {
BEFORE_PROPERTY = INTERCEPTOR
};
- LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Configuration configuration = DEFAULT)
- : LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
- configuration) {}
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name,
+ Configuration configuration = DEFAULT);
inline LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
@@ -57,10 +57,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<Name> name, Handle<JSReceiver> holder,
Configuration configuration = DEFAULT);
- LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
- Configuration configuration = DEFAULT)
- : LookupIterator(isolate, receiver, index,
- GetRoot(isolate, receiver, index), configuration) {}
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index, Configuration configuration = DEFAULT);
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Handle<JSReceiver> holder,
@@ -130,19 +128,10 @@ class V8_EXPORT_PRIVATE LookupIterator final {
template <class T>
inline Handle<T> GetStoreTarget() const;
inline bool is_dictionary_holder() const;
- Handle<Map> transition_map() const {
- DCHECK_EQ(TRANSITION, state_);
- return Handle<Map>::cast(transition_);
- }
- Handle<PropertyCell> transition_cell() const {
- DCHECK_EQ(TRANSITION, state_);
- return Handle<PropertyCell>::cast(transition_);
- }
+ inline Handle<Map> transition_map() const;
+ inline Handle<PropertyCell> transition_cell() const;
template <class T>
- Handle<T> GetHolder() const {
- DCHECK(IsFound());
- return Handle<T>::cast(holder_);
- }
+ inline Handle<T> GetHolder() const;
bool HolderIsReceiver() const;
bool HolderIsReceiverOrHiddenPrototype() const;
@@ -221,22 +210,22 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<Map> GetReceiverMap() const;
- V8_WARN_UNUSED_RESULT inline JSReceiver* NextHolder(Map* map);
+ V8_WARN_UNUSED_RESULT inline JSReceiver NextHolder(Map map);
template <bool is_element>
V8_EXPORT_PRIVATE void Start();
template <bool is_element>
- void NextInternal(Map* map, JSReceiver* holder);
+ void NextInternal(Map map, JSReceiver holder);
template <bool is_element>
- inline State LookupInHolder(Map* map, JSReceiver* holder) {
+ inline State LookupInHolder(Map map, JSReceiver holder) {
return map->IsSpecialReceiverMap()
? LookupInSpecialHolder<is_element>(map, holder)
: LookupInRegularHolder<is_element>(map, holder);
}
template <bool is_element>
- State LookupInRegularHolder(Map* map, JSReceiver* holder);
+ State LookupInRegularHolder(Map map, JSReceiver holder);
template <bool is_element>
- State LookupInSpecialHolder(Map* map, JSReceiver* holder);
+ State LookupInSpecialHolder(Map map, JSReceiver holder);
template <bool is_element>
void RestartLookupForNonMaskingInterceptors() {
RestartInternal<is_element>(InterceptorState::kProcessNonMasking);
@@ -244,33 +233,20 @@ class V8_EXPORT_PRIVATE LookupIterator final {
template <bool is_element>
void RestartInternal(InterceptorState interceptor_state);
Handle<Object> FetchValue() const;
- bool IsConstFieldValueEqualTo(Object* value) const;
+ bool IsConstFieldValueEqualTo(Object value) const;
template <bool is_element>
void ReloadPropertyInformation();
template <bool is_element>
- bool SkipInterceptor(JSObject* holder);
+ bool SkipInterceptor(JSObject holder);
template <bool is_element>
- inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
- return is_element ? holder->GetIndexedInterceptor()
- : holder->GetNamedInterceptor();
- }
+ static inline InterceptorInfo GetInterceptor(JSObject holder);
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
}
- int descriptor_number() const {
- DCHECK(!IsElement());
- DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
- return number_;
- }
- int dictionary_entry() const {
- DCHECK(!IsElement());
- DCHECK(has_property_);
- DCHECK(!holder_->HasFastProperties());
- return number_;
- }
+ inline int descriptor_number() const;
+ inline int dictionary_entry() const;
static inline Configuration ComputeConfiguration(
Configuration configuration, Handle<Name> name);
@@ -281,7 +257,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<Object> receiver,
uint32_t index = kMaxUInt32);
- State NotFound(JSReceiver* const holder) const;
+ State NotFound(JSReceiver const holder) const;
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
diff --git a/deps/v8/src/lsan.h b/deps/v8/src/lsan.h
deleted file mode 100644
index 6e087e3a8f..0000000000
--- a/deps/v8/src/lsan.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// LeakSanitizer support.
-
-#ifndef V8_LSAN_H_
-#define V8_LSAN_H_
-
-#include "src/base/macros.h"
-#include "src/globals.h"
-
-// There is no compile time flag for LSan, to enable this whenever ASan is
-// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
-// On windows, LSan is not implemented yet, so disable it there.
-#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
-
-#include <sanitizer/lsan_interface.h>
-
-#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr)
-
-#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
-
-#define LSAN_IGNORE_OBJECT(ptr) \
- static_assert(std::is_pointer<decltype(ptr)>::value || \
- std::is_same<v8::internal::Address, decltype(ptr)>::value, \
- "static type violation")
-
-#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
-
-#endif // V8_LSAN_H_
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 37c2623f89..e70de2e89f 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -94,15 +94,21 @@ class MachineType {
representation() == MachineRepresentation::kTaggedSigned ||
representation() == MachineRepresentation::kTagged;
}
+ constexpr bool IsTaggedSigned() const {
+ return representation() == MachineRepresentation::kTaggedSigned;
+ }
+ constexpr bool IsTaggedPointer() const {
+ return representation() == MachineRepresentation::kTaggedPointer;
+ }
constexpr static MachineRepresentation PointerRepresentation() {
- return (kPointerSize == 4) ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
+ return (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
}
constexpr static MachineType UintPtr() {
- return (kPointerSize == 4) ? Uint32() : Uint64();
+ return (kSystemPointerSize == 4) ? Uint32() : Uint64();
}
constexpr static MachineType IntPtr() {
- return (kPointerSize == 4) ? Int32() : Int64();
+ return (kSystemPointerSize == 4) ? Int32() : Int64();
}
constexpr static MachineType Int8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
@@ -228,7 +234,7 @@ class MachineType {
}
bool LessThanOrEqualPointerSize() {
- return ElementSizeLog2Of(this->representation()) <= kPointerSizeLog2;
+ return ElementSizeLog2Of(this->representation()) <= kSystemPointerSizeLog2;
}
private:
@@ -287,7 +293,7 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- return kPointerSizeLog2;
+ return kTaggedSizeLog2;
default:
break;
}
diff --git a/deps/v8/src/macro-assembler-inl.h b/deps/v8/src/macro-assembler-inl.h
index 55d5cf7ed4..a9e9ee7a9f 100644
--- a/deps/v8/src/macro-assembler-inl.h
+++ b/deps/v8/src/macro-assembler-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_MACRO_ASSEMBLER_INL_H_
#define V8_MACRO_ASSEMBLER_INL_H_
+#include "src/assembler-inl.h"
#include "src/macro-assembler.h"
#if V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 33ca113da0..440fb4a27e 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -5,9 +5,9 @@
#ifndef V8_MACRO_ASSEMBLER_H_
#define V8_MACRO_ASSEMBLER_H_
-#include "src/assembler.h"
#include "src/frames.h"
#include "src/heap/heap.h"
+#include "src/turbo-assembler.h"
// Helper types to make boolean flag easier to read at call-site.
enum InvokeFlag {
@@ -32,6 +32,8 @@ enum AllocationFlags {
PRETENURE = 1 << 3,
};
+// This is the only place allowed to include the platform-specific headers.
+#define INCLUDED_FROM_MACRO_ASSEMBLER_H
#if V8_TARGET_ARCH_IA32
#include "src/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
@@ -57,6 +59,7 @@ enum AllocationFlags {
#else
#error Unsupported target architecture.
#endif
+#undef INCLUDED_FROM_MACRO_ASSEMBLER_H
namespace v8 {
namespace internal {
@@ -172,28 +175,11 @@ class AllowExternalCallThatCantCauseGC: public FrameScope {
: FrameScope(masm, StackFrame::NONE) { }
};
-
-class NoCurrentFrameScope {
- public:
- explicit NoCurrentFrameScope(MacroAssembler* masm)
- : masm_(masm), saved_(masm->has_frame()) {
- masm->set_has_frame(false);
- }
-
- ~NoCurrentFrameScope() {
- masm_->set_has_frame(saved_);
- }
-
- private:
- MacroAssembler* masm_;
- bool saved_;
-};
-
// Prevent the use of the RootArray during the lifetime of this
// scope object.
class NoRootArrayScope {
public:
- explicit NoRootArrayScope(MacroAssembler* masm)
+ explicit NoRootArrayScope(TurboAssembler* masm)
: masm_(masm), old_value_(masm->root_array_available()) {
masm->set_root_array_available(false);
}
@@ -201,7 +187,7 @@ class NoRootArrayScope {
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
private:
- MacroAssembler* masm_;
+ TurboAssembler* masm_;
bool old_value_;
};
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index 470215a310..6825f1ac83 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -16,7 +16,7 @@ namespace internal {
namespace {
-inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
+inline bool EqualImmutableValues(Object obj1, Object obj2) {
if (obj1 == obj2) return true; // Valid for both kData and kAccessor kinds.
// TODO(ishell): compare AccessorPairs.
return false;
@@ -38,7 +38,7 @@ MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
->IsFunctionTemplateInfo());
}
-Name* MapUpdater::GetKey(int descriptor) const {
+Name MapUpdater::GetKey(int descriptor) const {
return old_descriptors_->GetKey(descriptor);
}
@@ -51,7 +51,7 @@ PropertyDetails MapUpdater::GetDetails(int descriptor) const {
return old_descriptors_->GetDetails(descriptor);
}
-Object* MapUpdater::GetValue(int descriptor) const {
+Object MapUpdater::GetValue(int descriptor) const {
DCHECK_LE(0, descriptor);
if (descriptor == modified_descriptor_) {
DCHECK_EQ(kDescriptor, new_location_);
@@ -61,7 +61,7 @@ Object* MapUpdater::GetValue(int descriptor) const {
return old_descriptors_->GetStrongValue(descriptor);
}
-FieldType* MapUpdater::GetFieldType(int descriptor) const {
+FieldType MapUpdater::GetFieldType(int descriptor) const {
DCHECK_LE(0, descriptor);
if (descriptor == modified_descriptor_) {
DCHECK_EQ(kField, new_location_);
@@ -170,6 +170,9 @@ Handle<Map> MapUpdater::Update() {
if (FindTargetMap() == kEnd) return result_map_;
ConstructNewMap();
DCHECK_EQ(kEnd, state_);
+ if (FLAG_fast_map_update) {
+ TransitionsAccessor(isolate_, old_map_).SetMigrationTarget(*result_map_);
+ }
return result_map_;
}
@@ -284,7 +287,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
DCHECK_EQ(kData, old_details.kind());
DCHECK_EQ(kData, new_kind_);
DCHECK_EQ(kField, new_location_);
- FieldType* old_field_type =
+ FieldType old_field_type =
old_descriptors_->GetFieldType(modified_descriptor_);
if (!new_field_type_->NowIs(old_field_type)) {
return CopyGeneralizeAllFields("GenAll_RootModification5");
@@ -317,10 +320,10 @@ MapUpdater::State MapUpdater::FindTargetMap() {
int root_nof = root_map_->NumberOfOwnDescriptors();
for (int i = root_nof; i < old_nof_; ++i) {
PropertyDetails old_details = GetDetails(i);
- Map* transition = TransitionsAccessor(isolate_, target_map_)
- .SearchTransition(GetKey(i), old_details.kind(),
- old_details.attributes());
- if (transition == nullptr) break;
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchTransition(GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
@@ -372,7 +375,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (target_nof == old_nof_) {
#ifdef DEBUG
if (modified_descriptor_ >= 0) {
- DescriptorArray* target_descriptors = target_map_->instance_descriptors();
+ DescriptorArray target_descriptors = target_map_->instance_descriptors();
PropertyDetails details =
target_descriptors->GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
@@ -403,10 +406,10 @@ MapUpdater::State MapUpdater::FindTargetMap() {
// Find the last compatible target map in the transition tree.
for (int i = target_nof; i < old_nof_; ++i) {
PropertyDetails old_details = GetDetails(i);
- Map* transition = TransitionsAccessor(isolate_, target_map_)
- .SearchTransition(GetKey(i), old_details.kind(),
- old_details.attributes());
- if (transition == nullptr) break;
+ Map transition = TransitionsAccessor(isolate_, target_map_)
+ .SearchTransition(GetKey(i), old_details.kind(),
+ old_details.attributes());
+ if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
isolate_);
@@ -439,11 +442,13 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// descriptors, with minimally the exact same size as the old descriptor
// array.
int new_slack =
- Max(old_nof_, old_descriptors_->number_of_descriptors()) - old_nof_;
+ std::max<int>(old_nof_, old_descriptors_->number_of_descriptors()) -
+ old_nof_;
Handle<DescriptorArray> new_descriptors =
DescriptorArray::Allocate(isolate_, old_nof_, new_slack);
- DCHECK(new_descriptors->length() > target_descriptors->length() ||
- new_descriptors->NumberOfSlackDescriptors() > 0 ||
+ DCHECK(new_descriptors->number_of_all_descriptors() >
+ target_descriptors->number_of_all_descriptors() ||
+ new_descriptors->number_of_slack_descriptors() > 0 ||
new_descriptors->number_of_descriptors() ==
old_descriptors_->number_of_descriptors());
DCHECK(new_descriptors->number_of_descriptors() == old_nof_);
@@ -610,15 +615,15 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
DisallowHeapAllocation no_allocation;
int root_nof = root_map_->NumberOfOwnDescriptors();
- Map* current = *root_map_;
+ Map current = *root_map_;
for (int i = root_nof; i < old_nof_; i++) {
- Name* name = descriptors->GetKey(i);
+ Name name = descriptors->GetKey(i);
PropertyDetails details = descriptors->GetDetails(i);
- Map* next =
+ Map next =
TransitionsAccessor(isolate_, current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
- if (next == nullptr) break;
- DescriptorArray* next_descriptors = next->instance_descriptors();
+ if (next.is_null()) break;
+ DescriptorArray next_descriptors = next->instance_descriptors();
PropertyDetails next_details = next_descriptors->GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
@@ -628,7 +633,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
if (!details.representation().Equals(next_details.representation())) break;
if (next_details.location() == kField) {
- FieldType* next_field_type = next_descriptors->GetFieldType(i);
+ FieldType next_field_type = next_descriptors->GetFieldType(i);
if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
break;
}
@@ -654,16 +659,16 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
TransitionsAccessor transitions(isolate_, split_map);
// Invalidate a transition target at |key|.
- Map* maybe_transition = transitions.SearchTransition(
+ Map maybe_transition = transitions.SearchTransition(
GetKey(split_nof), split_details.kind(), split_details.attributes());
- if (maybe_transition != nullptr) {
+ if (!maybe_transition.is_null()) {
maybe_transition->DeprecateTransitionTree(isolate_);
}
// If |maybe_transition| is not nullptr then the transition array already
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (maybe_transition == nullptr && !transitions.CanHaveMoreTransitions()) {
+ if (maybe_transition.is_null() && !transitions.CanHaveMoreTransitions()) {
return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
}
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 52be931bdf..511541e882 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -108,7 +108,7 @@ class MapUpdater {
State CopyGeneralizeAllFields(const char* reason);
// Returns name of a |descriptor| property.
- inline Name* GetKey(int descriptor) const;
+ inline Name GetKey(int descriptor) const;
// Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
// array.
@@ -116,11 +116,11 @@ class MapUpdater {
// Returns value of a |descriptor| with kDescriptor location in "updated"
// |old_descrtiptors_| array.
- inline Object* GetValue(int descriptor) const;
+ inline Object GetValue(int descriptor) const;
// Returns field type for a |descriptor| with kField location in "updated"
// |old_descrtiptors_| array.
- inline FieldType* GetFieldType(int descriptor) const;
+ inline FieldType GetFieldType(int descriptor) const;
// If a |descriptor| property in "updated" |old_descriptors_| has kField
// location then returns it's field type otherwise computes optimal field
diff --git a/deps/v8/src/math-random.cc b/deps/v8/src/math-random.cc
index 932d4b9d2a..8200af9f8e 100644
--- a/deps/v8/src/math-random.cc
+++ b/deps/v8/src/math-random.cc
@@ -9,6 +9,7 @@
#include "src/contexts-inl.h"
#include "src/isolate.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -24,15 +25,16 @@ void MathRandom::InitializeContext(Isolate* isolate,
ResetContext(*native_context);
}
-void MathRandom::ResetContext(Context* native_context) {
- native_context->set_math_random_index(Smi::kZero);
+void MathRandom::ResetContext(Context native_context) {
+ native_context->set_math_random_index(Smi::zero());
State state = {0, 0};
PodArray<State>::cast(native_context->math_random_state())->set(0, state);
}
-Smi* MathRandom::RefillCache(Isolate* isolate, Context* native_context) {
+Address MathRandom::RefillCache(Isolate* isolate, Address raw_native_context) {
+ Context native_context = Context::cast(Object(raw_native_context));
DisallowHeapAllocation no_gc;
- PodArray<State>* pod =
+ PodArray<State> pod =
PodArray<State>::cast(native_context->math_random_state());
State state = pod->get(0);
// Initialize state if not yet initialized. If a fixed random seed was
@@ -51,7 +53,7 @@ Smi* MathRandom::RefillCache(Isolate* isolate, Context* native_context) {
CHECK(state.s0 != 0 || state.s1 != 0);
}
- FixedDoubleArray* cache =
+ FixedDoubleArray cache =
FixedDoubleArray::cast(native_context->math_random_cache());
// Create random numbers.
for (int i = 0; i < kCacheSize; i++) {
@@ -61,9 +63,9 @@ Smi* MathRandom::RefillCache(Isolate* isolate, Context* native_context) {
}
pod->set(0, state);
- Smi* new_index = Smi::FromInt(kCacheSize);
+ Smi new_index = Smi::FromInt(kCacheSize);
native_context->set_math_random_index(new_index);
- return new_index;
+ return new_index.ptr();
}
} // namespace internal
diff --git a/deps/v8/src/math-random.h b/deps/v8/src/math-random.h
index a720c75757..481a245750 100644
--- a/deps/v8/src/math-random.h
+++ b/deps/v8/src/math-random.h
@@ -16,8 +16,10 @@ class MathRandom : public AllStatic {
static void InitializeContext(Isolate* isolate,
Handle<Context> native_context);
- static void ResetContext(Context* native_context);
- static Smi* RefillCache(Isolate* isolate, Context* native_context);
+ static void ResetContext(Context native_context);
+ // Takes native context as a raw Address for ExternalReference usage.
+ // Returns a tagged Smi as a raw Address.
+ static Address RefillCache(Isolate* isolate, Address raw_native_context);
static const int kCacheSize = 64;
static const int kStateSize = 2 * kInt64Size;
diff --git a/deps/v8/src/maybe-handles-inl.h b/deps/v8/src/maybe-handles-inl.h
index 1743af41a4..8e0e7e3a38 100644
--- a/deps/v8/src/maybe-handles-inl.h
+++ b/deps/v8/src/maybe-handles-inl.h
@@ -13,15 +13,15 @@
namespace v8 {
namespace internal {
template <typename T>
-MaybeHandle<T>::MaybeHandle(T* object, Isolate* isolate)
+MaybeHandle<T>::MaybeHandle(T object, Isolate* isolate)
: MaybeHandle(handle(object, isolate)) {}
MaybeObjectHandle::MaybeObjectHandle()
: reference_type_(HeapObjectReferenceType::STRONG),
handle_(Handle<Object>::null()) {}
-MaybeObjectHandle::MaybeObjectHandle(MaybeObject* object, Isolate* isolate) {
- HeapObject* heap_object;
+MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
+ HeapObject heap_object;
DCHECK(!object->IsCleared());
if (object->GetHeapObjectIfWeak(&heap_object)) {
handle_ = handle(heap_object, isolate);
@@ -35,11 +35,11 @@ MaybeObjectHandle::MaybeObjectHandle(MaybeObject* object, Isolate* isolate) {
MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object)
: reference_type_(HeapObjectReferenceType::STRONG), handle_(object) {}
-MaybeObjectHandle::MaybeObjectHandle(Object* object, Isolate* isolate)
+MaybeObjectHandle::MaybeObjectHandle(Object object, Isolate* isolate)
: reference_type_(HeapObjectReferenceType::STRONG),
handle_(object, isolate) {}
-MaybeObjectHandle::MaybeObjectHandle(Object* object,
+MaybeObjectHandle::MaybeObjectHandle(Object object,
HeapObjectReferenceType reference_type,
Isolate* isolate)
: reference_type_(reference_type), handle_(handle(object, isolate)) {}
@@ -52,11 +52,11 @@ MaybeObjectHandle MaybeObjectHandle::Weak(Handle<Object> object) {
return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK);
}
-MaybeObjectHandle MaybeObjectHandle::Weak(Object* object, Isolate* isolate) {
+MaybeObjectHandle MaybeObjectHandle::Weak(Object object, Isolate* isolate) {
return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK, isolate);
}
-MaybeObject* MaybeObjectHandle::operator*() const {
+MaybeObject MaybeObjectHandle::operator*() const {
if (reference_type_ == HeapObjectReferenceType::WEAK) {
return HeapObjectReference::Weak(*handle_.ToHandleChecked());
} else {
@@ -64,7 +64,7 @@ MaybeObject* MaybeObjectHandle::operator*() const {
}
}
-MaybeObject* MaybeObjectHandle::operator->() const {
+MaybeObject MaybeObjectHandle::operator->() const {
if (reference_type_ == HeapObjectReferenceType::WEAK) {
return HeapObjectReference::Weak(*handle_.ToHandleChecked());
} else {
@@ -76,7 +76,7 @@ Handle<Object> MaybeObjectHandle::object() const {
return handle_.ToHandleChecked();
}
-inline MaybeObjectHandle handle(MaybeObject* object, Isolate* isolate) {
+inline MaybeObjectHandle handle(MaybeObject object, Isolate* isolate) {
return MaybeObjectHandle(object, isolate);
}
diff --git a/deps/v8/src/maybe-handles.h b/deps/v8/src/maybe-handles.h
index 231e4d78fb..8a68c85f48 100644
--- a/deps/v8/src/maybe-handles.h
+++ b/deps/v8/src/maybe-handles.h
@@ -29,17 +29,16 @@ class MaybeHandle final {
// Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
template <typename S, typename = typename std::enable_if<
std::is_convertible<S*, T*>::value>::type>
- V8_INLINE MaybeHandle(Handle<S> handle)
- : location_(reinterpret_cast<T**>(handle.location_)) {}
+ V8_INLINE MaybeHandle(Handle<S> handle) : location_(handle.location_) {}
// Constructor for handling automatic up casting.
// Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
template <typename S, typename = typename std::enable_if<
std::is_convertible<S*, T*>::value>::type>
V8_INLINE MaybeHandle(MaybeHandle<S> maybe_handle)
- : location_(reinterpret_cast<T**>(maybe_handle.location_)) {}
+ : location_(maybe_handle.location_) {}
- V8_INLINE MaybeHandle(T* object, Isolate* isolate);
+ V8_INLINE MaybeHandle(T object, Isolate* isolate);
V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
@@ -63,12 +62,14 @@ class MaybeHandle final {
// Returns the raw address where this handle is stored. This should only be
// used for hashing handles; do not ever try to dereference it.
- V8_INLINE Address address() const { return bit_cast<Address>(location_); }
+ V8_INLINE Address address() const {
+ return reinterpret_cast<Address>(location_);
+ }
bool is_null() const { return location_ == nullptr; }
protected:
- T** location_ = nullptr;
+ Address* location_ = nullptr;
// MaybeHandles of different classes are allowed to access each
// other's location_.
@@ -81,15 +82,15 @@ class MaybeHandle final {
class MaybeObjectHandle {
public:
inline MaybeObjectHandle();
- inline MaybeObjectHandle(MaybeObject* object, Isolate* isolate);
- inline MaybeObjectHandle(Object* object, Isolate* isolate);
+ inline MaybeObjectHandle(MaybeObject object, Isolate* isolate);
+ inline MaybeObjectHandle(Object object, Isolate* isolate);
inline explicit MaybeObjectHandle(Handle<Object> object);
- static inline MaybeObjectHandle Weak(Object* object, Isolate* isolate);
+ static inline MaybeObjectHandle Weak(Object object, Isolate* isolate);
static inline MaybeObjectHandle Weak(Handle<Object> object);
- inline MaybeObject* operator*() const;
- inline MaybeObject* operator->() const;
+ inline MaybeObject operator*() const;
+ inline MaybeObject operator->() const;
inline Handle<Object> object() const;
bool is_identical_to(const MaybeObjectHandle& other) const {
@@ -104,7 +105,7 @@ class MaybeObjectHandle {
bool is_null() const { return handle_.is_null(); }
private:
- inline MaybeObjectHandle(Object* object,
+ inline MaybeObjectHandle(Object object,
HeapObjectReferenceType reference_type,
Isolate* isolate);
inline MaybeObjectHandle(Handle<Object> object,
diff --git a/deps/v8/src/memcopy.cc b/deps/v8/src/memcopy.cc
new file mode 100644
index 0000000000..2185faea29
--- /dev/null
+++ b/deps/v8/src/memcopy.cc
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/memcopy.h"
+
+#include "src/snapshot/embedded-data.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_TARGET_ARCH_IA32
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+// Initialize to library version so we can call this at any time during startup.
+static MemMoveFunction memmove_function = &MemMoveWrapper;
+
+// Copy memory area to disjoint memory area.
+V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) {
+ if (size == 0) return;
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ (*memmove_function)(dest, src, size);
+}
+#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
+void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
+ size_t chars) {
+ uint16_t* limit = dest + chars;
+ while (dest < limit) {
+ *dest++ = static_cast<uint16_t>(*src++);
+ }
+}
+
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+ &MemCopyUint8Wrapper;
+MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
+ &MemCopyUint16Uint8Wrapper;
+#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+ &MemCopyUint8Wrapper;
+#endif
+
+void init_memcopy_functions() {
+#if V8_TARGET_ARCH_IA32
+ if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ memmove_function = reinterpret_cast<MemMoveFunction>(
+ d.InstructionStartOfBuiltin(Builtins::kMemMove));
+ }
+#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
+ if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>(
+ d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8));
+ memcopy_uint16_uint8_function =
+ reinterpret_cast<MemCopyUint16Uint8Function>(
+ d.InstructionStartOfBuiltin(Builtins::kMemCopyUint16Uint8));
+ }
+#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
+ if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>(
+ d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8));
+ }
+#endif
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/memcopy.h b/deps/v8/src/memcopy.h
new file mode 100644
index 0000000000..e0469a024e
--- /dev/null
+++ b/deps/v8/src/memcopy.h
@@ -0,0 +1,492 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MEMCOPY_H_
+#define V8_MEMCOPY_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+typedef uintptr_t Address;
+
+// ----------------------------------------------------------------------------
+// Generated memcpy/memmove for ia32, arm, and mips.
+
+void init_memcopy_functions();
+
+#if defined(V8_TARGET_ARCH_IA32)
+// Limit below which the extra overhead of the MemCopy function is likely
+// to outweigh the benefits of faster copying.
+const int kMinComplexMemCopy = 64;
+
+// Copy memory area. No restrictions.
+V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
+typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+
+// Keep the distinction of "move" vs. "copy" for the benefit of other
+// architectures.
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ MemMove(dest, src, size);
+}
+#elif defined(V8_HOST_ARCH_ARM)
+typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
+ size_t size);
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
+V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+}
+// For values < 16, the assembler function is slower than the inlined C code.
+const int kMinComplexMemCopy = 16;
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), size);
+}
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
+ memmove(dest, src, size);
+}
+
+typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
+ size_t size);
+extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
+void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
+ size_t chars);
+// For values < 12, the assembler function is slower than the inlined C code.
+const int kMinComplexConvertMemCopy = 12;
+V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
+ size_t size) {
+ (*memcopy_uint16_uint8_function)(dest, src, size);
+}
+#elif defined(V8_HOST_ARCH_MIPS)
+typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
+ size_t size);
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
+V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+}
+// For values < 16, the assembler function is slower than the inlined C code.
+const int kMinComplexMemCopy = 16;
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), size);
+}
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
+ memmove(dest, src, size);
+}
+#else
+// Copy memory area to disjoint memory area.
+V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
+ memcpy(dest, src, size);
+}
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
+ memmove(dest, src, size);
+}
+const int kMinComplexMemCopy = 8;
+#endif // V8_TARGET_ARCH_IA32
+
+// Copies words from |src| to |dst|. The data spans must not overlap.
+// |src| and |dst| must be kSystemPointerSize-aligned.
+inline void CopyWords(Address dst, const Address src, size_t num_words) {
+ constexpr int kSystemPointerSize = sizeof(void*); // to avoid src/globals.h
+ DCHECK(IsAligned(dst, kSystemPointerSize));
+ DCHECK(IsAligned(src, kSystemPointerSize));
+ DCHECK(((src <= dst) && ((src + num_words * kSystemPointerSize) <= dst)) ||
+ ((dst <= src) && ((dst + num_words * kSystemPointerSize) <= src)));
+
+ // Use block copying MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ Address* dst_ptr = reinterpret_cast<Address*>(dst);
+ Address* src_ptr = reinterpret_cast<Address*>(src);
+ if (num_words < kBlockCopyLimit) {
+ do {
+ num_words--;
+ *dst_ptr++ = *src_ptr++;
+ } while (num_words > 0);
+ } else {
+ MemCopy(dst_ptr, src_ptr, num_words * kSystemPointerSize);
+ }
+}
+
+// Copies data from |src| to |dst|. The data spans must not overlap.
+template <typename T>
+inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
+ STATIC_ASSERT(sizeof(T) == 1);
+ DCHECK(((src <= dst) && ((src + num_bytes) <= dst)) ||
+ ((dst <= src) && ((dst + num_bytes) <= src)));
+ if (num_bytes == 0) return;
+
+ // Use block copying MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const int kBlockCopyLimit = kMinComplexMemCopy;
+
+ if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
+ do {
+ num_bytes--;
+ *dst++ = *src++;
+ } while (num_bytes > 0);
+ } else {
+ MemCopy(dst, src, num_bytes);
+ }
+}
+
+inline void MemsetPointer(Address* dest, Address value, size_t counter) {
+#if V8_HOST_ARCH_IA32
+#define STOS "stosl"
+#elif V8_HOST_ARCH_X64
+#define STOS "stosq"
+#endif
+
+#if defined(MEMORY_SANITIZER)
+ // MemorySanitizer does not understand inline assembly.
+#undef STOS
+#endif
+
+#if defined(__GNUC__) && defined(STOS)
+ asm volatile(
+ "cld;"
+ "rep ; " STOS
+ : "+&c"(counter), "+&D"(dest)
+ : "a"(value)
+ : "memory", "cc");
+#else
+ for (size_t i = 0; i < counter; i++) {
+ dest[i] = value;
+ }
+#endif
+
+#undef STOS
+}
+
+template <typename T, typename U>
+inline void MemsetPointer(T** dest, U* value, size_t counter) {
+#ifdef DEBUG
+ T* a = nullptr;
+ U* b = nullptr;
+ a = b; // Fake assignment to check assignability.
+ USE(a);
+#endif // DEBUG
+ MemsetPointer(reinterpret_cast<Address*>(dest),
+ reinterpret_cast<Address>(value), counter);
+}
+
+template <typename sourcechar, typename sinkchar>
+V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
+ size_t chars);
+#if defined(V8_HOST_ARCH_ARM)
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
+#elif defined(V8_HOST_ARCH_MIPS)
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
+V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
+ size_t chars);
+V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars);
+#endif
+
+// Copy from 8bit/16bit chars to 8bit/16bit chars.
+template <typename sourcechar, typename sinkchar>
+V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars);
+
+template <typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
+ DCHECK_LE(sizeof(sourcechar), 2);
+ DCHECK_LE(sizeof(sinkchar), 2);
+ if (sizeof(sinkchar) == 1) {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src), chars);
+ }
+ } else {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src), chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src), chars);
+ }
+ }
+}
+
+template <typename sourcechar, typename sinkchar>
+void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
+ sinkchar* limit = dest + chars;
+ if ((sizeof(*dest) == sizeof(*src)) &&
+ (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ } else {
+ while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
+ }
+}
+
+#if defined(V8_HOST_ARCH_ARM)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 2);
+ break;
+ case 3:
+ memcpy(dest, src, 3);
+ break;
+ case 4:
+ memcpy(dest, src, 4);
+ break;
+ case 5:
+ memcpy(dest, src, 5);
+ break;
+ case 6:
+ memcpy(dest, src, 6);
+ break;
+ case 7:
+ memcpy(dest, src, 7);
+ break;
+ case 8:
+ memcpy(dest, src, 8);
+ break;
+ case 9:
+ memcpy(dest, src, 9);
+ break;
+ case 10:
+ memcpy(dest, src, 10);
+ break;
+ case 11:
+ memcpy(dest, src, 11);
+ break;
+ case 12:
+ memcpy(dest, src, 12);
+ break;
+ case 13:
+ memcpy(dest, src, 13);
+ break;
+ case 14:
+ memcpy(dest, src, 14);
+ break;
+ case 15:
+ memcpy(dest, src, 15);
+ break;
+ default:
+ MemCopy(dest, src, chars);
+ break;
+ }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) {
+ if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) {
+ MemCopyUint16Uint8(dest, src, chars);
+ } else {
+ MemCopyUint16Uint8Wrapper(dest, src, chars);
+ }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ case 2:
+ memcpy(dest, src, 4);
+ break;
+ case 3:
+ memcpy(dest, src, 6);
+ break;
+ case 4:
+ memcpy(dest, src, 8);
+ break;
+ case 5:
+ memcpy(dest, src, 10);
+ break;
+ case 6:
+ memcpy(dest, src, 12);
+ break;
+ case 7:
+ memcpy(dest, src, 14);
+ break;
+ default:
+ MemCopy(dest, src, chars * sizeof(*dest));
+ break;
+ }
+}
+
+#elif defined(V8_HOST_ARCH_MIPS)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
+ if (chars < kMinComplexMemCopy) {
+ memcpy(dest, src, chars);
+ } else {
+ MemCopy(dest, src, chars);
+ }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
+ if (chars < kMinComplexMemCopy) {
+ memcpy(dest, src, chars * sizeof(*dest));
+ } else {
+ MemCopy(dest, src, chars * sizeof(*dest));
+ }
+}
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n); \
+ break
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ CASE(33);
+ CASE(34);
+ CASE(35);
+ CASE(36);
+ CASE(37);
+ CASE(38);
+ CASE(39);
+ CASE(40);
+ CASE(41);
+ CASE(42);
+ CASE(43);
+ CASE(44);
+ CASE(45);
+ CASE(46);
+ CASE(47);
+ CASE(48);
+ CASE(49);
+ CASE(50);
+ CASE(51);
+ CASE(52);
+ CASE(53);
+ CASE(54);
+ CASE(55);
+ CASE(56);
+ CASE(57);
+ CASE(58);
+ CASE(59);
+ CASE(60);
+ CASE(61);
+ CASE(62);
+ CASE(63);
+ CASE(64);
+ default:
+ memcpy(dest, src, chars);
+ break;
+ }
+}
+#undef CASE
+
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n * 2); \
+ break
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ default:
+ memcpy(dest, src, chars * 2);
+ break;
+ }
+}
+#undef CASE
+#endif
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MEMCOPY_H_
diff --git a/deps/v8/src/message-template.h b/deps/v8/src/message-template.h
new file mode 100644
index 0000000000..05caf14ac8
--- /dev/null
+++ b/deps/v8/src/message-template.h
@@ -0,0 +1,576 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MESSAGE_TEMPLATE_H_
+#define V8_MESSAGE_TEMPLATE_H_
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(913887): fix the use of 'neuter' in these error messages.
+#define MESSAGE_TEMPLATES(T) \
+ /* Error */ \
+ T(None, "") \
+ T(CyclicProto, "Cyclic __proto__ value") \
+ T(Debugger, "Debugger: %") \
+ T(DebuggerLoading, "Error loading debugger") \
+ T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
+ T(DeletePrivateField, "Private fields can not be deleted") \
+ T(UncaughtException, "Uncaught %") \
+ T(Unsupported, "Not supported") \
+ T(WrongServiceType, "Internal error, wrong service type: %") \
+ T(WrongValueType, "Internal error. Wrong value type.") \
+ T(IcuError, "Internal error. Icu error.") \
+ /* TypeError */ \
+ T(ApplyNonFunction, \
+ "Function.prototype.apply was called on %, which is a % and not a " \
+ "function") \
+ T(ArgumentsDisallowedInInitializer, \
+ "'arguments' is not allowed in class field initializer") \
+ T(ArrayBufferTooShort, \
+ "Derived ArrayBuffer constructor created a buffer which was too small") \
+ T(ArrayBufferSpeciesThis, \
+ "ArrayBuffer subclass returned this from species constructor") \
+ T(ArrayItemNotType, "array %[%] is not type %") \
+ T(AwaitNotInAsyncFunction, "await is only valid in async function") \
+ T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
+ T(BadSortComparisonFunction, \
+ "The comparison function must be either a function or undefined") \
+ T(BigIntFromNumber, \
+ "The number % cannot be converted to a BigInt because it is not an " \
+ "integer") \
+ T(BigIntFromObject, "Cannot convert % to a BigInt") \
+ T(BigIntMixedTypes, \
+ "Cannot mix BigInt and other types, use explicit conversions") \
+ T(BigIntSerializeJSON, "Do not know how to serialize a BigInt") \
+ T(BigIntShr, "BigInts have no unsigned right shift, use >> instead") \
+ T(BigIntToNumber, "Cannot convert a BigInt value to a number") \
+ T(CalledNonCallable, "% is not a function") \
+ T(CalledOnNonObject, "% called on non-object") \
+ T(CalledOnNullOrUndefined, "% called on null or undefined") \
+ T(CallSiteExpectsFunction, \
+ "CallSite expects wasm object as first or function as second argument, " \
+ "got <%, %>") \
+ T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
+ T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
+ T(CannotPreventExt, "Cannot prevent extensions") \
+ T(CannotFreeze, "Cannot freeze") \
+ T(CannotFreezeArrayBufferView, \
+ "Cannot freeze array buffer views with elements") \
+ T(CannotSeal, "Cannot seal") \
+ T(CircularStructure, "Converting circular structure to JSON") \
+ T(ConstructAbstractClass, "Abstract class % not directly constructable") \
+ T(ConstAssign, "Assignment to constant variable.") \
+ T(ConstructorClassField, "Classes may not have a field named 'constructor'") \
+ T(ConstructorNonCallable, \
+ "Class constructor % cannot be invoked without 'new'") \
+ T(ConstructorNotFunction, "Constructor % requires 'new'") \
+ T(ConstructorNotReceiver, "The .constructor property is not an object") \
+ T(CurrencyCode, "Currency code is required with currency style.") \
+ T(CyclicModuleDependency, "Detected cycle while resolving name '%' in '%'") \
+ T(DataViewNotArrayBuffer, \
+ "First argument to DataView constructor must be an ArrayBuffer") \
+ T(DateType, "this is not a Date object.") \
+ T(DebuggerFrame, "Debugger: Invalid frame index.") \
+ T(DebuggerType, "Debugger: Parameters have wrong types.") \
+ T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
+ T(DefineDisallowed, "Cannot define property %, object is not extensible") \
+ T(DetachedOperation, "Cannot perform % on a neutered ArrayBuffer") \
+ T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
+ T(ExtendsValueNotConstructor, \
+ "Class extends value % is not a constructor or null") \
+ T(FirstArgumentNotRegExp, \
+ "First argument to % must not be a regular expression") \
+ T(FunctionBind, "Bind must be called on a function") \
+ T(GeneratorRunning, "Generator is already running") \
+ T(IllegalInvocation, "Illegal invocation") \
+ T(ImmutablePrototypeSet, \
+ "Immutable prototype object '%' cannot have their prototype set") \
+ T(ImportCallNotNewExpression, "Cannot use new with import") \
+ T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
+ T(ImportMissingSpecifier, "import() requires a specifier") \
+ T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
+ T(InstanceofNonobjectProto, \
+ "Function has non-object prototype '%' in instanceof check") \
+ T(InvalidArgument, "invalid_argument") \
+ T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
+ T(InvalidRegExpExecResult, \
+ "RegExp exec method returned something other than an Object or null") \
+ T(InvalidUnit, "Invalid unit argument for %() '%'") \
+ T(IteratorResultNotAnObject, "Iterator result % is not an object") \
+ T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
+ T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
+ T(LanguageID, "Language ID should be string or object.") \
+ T(LocaleNotEmpty, \
+ "First argument to Intl.Locale constructor can't be empty or missing") \
+ T(LocaleBadParameters, "Incorrect locale information provided") \
+ T(ListFormatBadParameters, "Incorrect ListFormat information provided") \
+ T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
+ T(MethodCalledOnWrongObject, \
+ "Method % called on a non-object or on a wrong type of object.") \
+ T(MethodInvokedOnNullOrUndefined, \
+ "Method invoked on undefined or null value.") \
+ T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
+ T(NoAccess, "no access") \
+ T(NonCallableInInstanceOfCheck, \
+ "Right-hand side of 'instanceof' is not callable") \
+ T(NonCoercible, "Cannot destructure 'undefined' or 'null'.") \
+ T(NonCoercibleWithProperty, \
+ "Cannot destructure property `%` of 'undefined' or 'null'.") \
+ T(NonExtensibleProto, "% is not extensible") \
+ T(NonObjectInInstanceOfCheck, \
+ "Right-hand side of 'instanceof' is not an object") \
+ T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
+ T(NonObjectPropertyStore, "Cannot set property '%' of %") \
+ T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
+ T(NotAnIterator, "% is not an iterator") \
+ T(NotAPromise, "% is not a promise") \
+ T(NotConstructor, "% is not a constructor") \
+ T(NotDateObject, "this is not a Date object.") \
+ T(NotGeneric, "% requires that 'this' be a %") \
+ T(NotCallableOrIterable, \
+ "% is not a function or its return value is not iterable") \
+ T(NotCallableOrAsyncIterable, \
+ "% is not a function or its return value is not async iterable") \
+ T(NotFiniteNumber, "Value need to be finite number for %()") \
+ T(NotIterable, "% is not iterable") \
+ T(NotIterableNoSymbolLoad, "% is not iterable (cannot read property %)") \
+ T(NotAsyncIterable, "% is not async iterable") \
+ T(NotPropertyName, "% is not a valid property name") \
+ T(NotTypedArray, "this is not a typed array.") \
+ T(NotSuperConstructor, "Super constructor % of % is not a constructor") \
+ T(NotSuperConstructorAnonymousClass, \
+ "Super constructor % of anonymous class is not a constructor") \
+ T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
+ T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.") \
+ T(ObjectGetterExpectingFunction, \
+ "Object.prototype.__defineGetter__: Expecting function") \
+ T(ObjectGetterCallable, "Getter must be a function: %") \
+ T(ObjectNotExtensible, "Cannot add property %, object is not extensible") \
+ T(ObjectSetterExpectingFunction, \
+ "Object.prototype.__defineSetter__: Expecting function") \
+ T(ObjectSetterCallable, "Setter must be a function: %") \
+ T(OrdinaryFunctionCalledAsConstructor, \
+ "Function object that's not a constructor was created with new") \
+ T(PromiseCyclic, "Chaining cycle detected for promise %") \
+ T(PromiseExecutorAlreadyInvoked, \
+ "Promise executor has already been invoked with non-undefined arguments") \
+ T(PromiseNonCallable, "Promise resolve or reject function is not callable") \
+ T(PropertyDescObject, "Property description must be an object: %") \
+ T(PropertyNotFunction, \
+ "'%' returned for property '%' of object '%' is not a function") \
+ T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
+ T(PrototypeParentNotAnObject, \
+ "Class extends value does not have valid prototype property %") \
+ T(ProxyConstructNonObject, \
+ "'construct' on proxy: trap returned non-object ('%')") \
+ T(ProxyDefinePropertyNonConfigurable, \
+ "'defineProperty' on proxy: trap returned truish for defining " \
+ "non-configurable property '%' which is either non-existent or " \
+ "configurable in the proxy target") \
+ T(ProxyDefinePropertyNonExtensible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " to the non-extensible proxy target") \
+ T(ProxyDefinePropertyIncompatible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " that is incompatible with the existing property in the proxy target") \
+ T(ProxyDeletePropertyNonConfigurable, \
+ "'deleteProperty' on proxy: trap returned truish for property '%' which " \
+ "is non-configurable in the proxy target") \
+ T(ProxyGetNonConfigurableData, \
+ "'get' on proxy: property '%' is a read-only and " \
+ "non-configurable data property on the proxy target but the proxy " \
+ "did not return its actual value (expected '%' but got '%')") \
+ T(ProxyGetNonConfigurableAccessor, \
+ "'get' on proxy: property '%' is a non-configurable accessor " \
+ "property on the proxy target and does not have a getter function, but " \
+ "the trap did not return 'undefined' (got '%')") \
+ T(ProxyGetOwnPropertyDescriptorIncompatible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned descriptor for " \
+ "property '%' that is incompatible with the existing property in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorInvalid, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned neither object nor " \
+ "undefined for property '%'") \
+ T(ProxyGetOwnPropertyDescriptorNonConfigurable, \
+ "'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
+ "for property '%' which is either non-existent or configurable in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorNonExtensible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which exists in the non-extensible proxy target") \
+ T(ProxyGetOwnPropertyDescriptorUndefined, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which is non-configurable in the proxy target") \
+ T(ProxyGetPrototypeOfInvalid, \
+ "'getPrototypeOf' on proxy: trap returned neither object nor null") \
+ T(ProxyGetPrototypeOfNonExtensible, \
+ "'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
+ "trap did not return its actual prototype") \
+ T(ProxyHandlerOrTargetRevoked, \
+ "Cannot create proxy with a revoked proxy as target or handler") \
+ T(ProxyHasNonConfigurable, \
+ "'has' on proxy: trap returned falsish for property '%' which exists in " \
+ "the proxy target as non-configurable") \
+ T(ProxyHasNonExtensible, \
+ "'has' on proxy: trap returned falsish for property '%' but the proxy " \
+ "target is not extensible") \
+ T(ProxyIsExtensibleInconsistent, \
+ "'isExtensible' on proxy: trap result does not reflect extensibility of " \
+ "proxy target (which is '%')") \
+ T(ProxyNonObject, \
+ "Cannot create proxy with a non-object as target or handler") \
+ T(ProxyOwnKeysMissing, \
+ "'ownKeys' on proxy: trap result did not include '%'") \
+ T(ProxyOwnKeysNonExtensible, \
+ "'ownKeys' on proxy: trap returned extra keys but proxy target is " \
+ "non-extensible") \
+ T(ProxyPreventExtensionsExtensible, \
+ "'preventExtensions' on proxy: trap returned truish but the proxy target " \
+ "is extensible") \
+ T(ProxyPrivate, "Cannot pass private property name to proxy trap") \
+ T(ProxyRevoked, "Cannot perform '%' on a proxy that has been revoked") \
+ T(ProxySetFrozenData, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable data property " \
+ "with a different value") \
+ T(ProxySetFrozenAccessor, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable accessor " \
+ "property without a setter") \
+ T(ProxySetPrototypeOfNonExtensible, \
+ "'setPrototypeOf' on proxy: trap returned truish for setting a new " \
+ "prototype on the non-extensible proxy target") \
+ T(ProxyTrapReturnedFalsish, "'%' on proxy: trap returned falsish") \
+ T(ProxyTrapReturnedFalsishFor, \
+ "'%' on proxy: trap returned falsish for property '%'") \
+ T(RedefineDisallowed, "Cannot redefine property: %") \
+ T(RedefineExternalArray, \
+ "Cannot redefine a property of an object with external array elements") \
+ T(ReduceNoInitial, "Reduce of empty array with no initial value") \
+ T(RegExpFlags, \
+ "Cannot supply flags when constructing one RegExp from another") \
+ T(RegExpNonObject, "% getter called on non-object %") \
+ T(RegExpNonRegExp, "% getter called on non-RegExp object") \
+ T(RelativeDateTimeFormatterBadParameters, \
+ "Incorrect RelativeDateTimeFormatter provided") \
+ T(ResolverNotAFunction, "Promise resolver % is not a function") \
+ T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
+ T(SharedArrayBufferTooShort, \
+ "Derived SharedArrayBuffer constructor created a buffer which was too " \
+ "small") \
+ T(SharedArrayBufferSpeciesThis, \
+ "SharedArrayBuffer subclass returned this from species constructor") \
+ T(StaticPrototype, \
+ "Classes may not have a static property named 'prototype'") \
+ T(StrictDeleteProperty, "Cannot delete property '%' of %") \
+ T(StrictPoisonPill, \
+ "'caller', 'callee', and 'arguments' properties may not be accessed on " \
+ "strict mode functions or the arguments objects for calls to them") \
+ T(StrictReadOnlyProperty, \
+ "Cannot assign to read only property '%' of % '%'") \
+ T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
+ T(SymbolIteratorInvalid, \
+ "Result of the Symbol.iterator method is not an object") \
+ T(SymbolAsyncIteratorInvalid, \
+ "Result of the Symbol.asyncIterator method is not an object") \
+ T(SymbolKeyFor, "% is not a symbol") \
+ T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
+ T(SymbolToString, "Cannot convert a Symbol value to a string") \
+ T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.") \
+ T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
+ T(ValueAndAccessor, \
+ "Invalid property descriptor. Cannot both specify accessors and a value " \
+ "or writable attribute, %") \
+ T(VarRedeclaration, "Identifier '%' has already been declared") \
+ T(WrongArgs, "%: Arguments list has wrong type") \
+ /* ReferenceError */ \
+ T(NotDefined, "% is not defined") \
+ T(SuperAlreadyCalled, "Super constructor may only be called once") \
+ T(UnsupportedSuper, "Unsupported reference to 'super'") \
+ /* RangeError */ \
+ T(BigIntDivZero, "Division by zero") \
+ T(BigIntNegativeExponent, "Exponent must be positive") \
+ T(BigIntTooBig, "Maximum BigInt size exceeded") \
+ T(DateRange, "Provided date is not in valid range.") \
+ T(ExpectedLocation, \
+ "Expected letters optionally connected with underscores or hyphens for " \
+ "a location, got %") \
+ T(InvalidArrayBufferLength, "Invalid array buffer length") \
+ T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
+ T(InvalidArrayLength, "Invalid array length") \
+ T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
+ T(InvalidCodePoint, "Invalid code point %") \
+ T(InvalidCountValue, "Invalid count value") \
+ T(InvalidCurrencyCode, "Invalid currency code: %") \
+ T(InvalidDataViewAccessorOffset, \
+ "Offset is outside the bounds of the DataView") \
+ T(InvalidDataViewLength, "Invalid DataView length %") \
+ T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
+ T(InvalidHint, "Invalid hint: %") \
+ T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
+ T(InvalidLanguageTag, "Invalid language tag: %") \
+ T(InvalidWeakMapKey, "Invalid value used as weak map key") \
+ T(InvalidWeakSetValue, "Invalid value used in weak set") \
+ T(InvalidStringLength, "Invalid string length") \
+ T(InvalidTimeValue, "Invalid time value") \
+ T(InvalidTimeZone, "Invalid time zone specified: %") \
+ T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
+ T(InvalidTypedArrayIndex, "Invalid typed array index") \
+ T(InvalidTypedArrayLength, "Invalid typed array length: %") \
+ T(IllegalTypeWhileStyleNarrow, \
+ "When style is 'narrow', 'unit' is the only allowed value for the type " \
+ "option.") \
+ T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
+ T(LocaleMatcher, "Illegal value for localeMatcher:%") \
+ T(NormalizationForm, "The normalization form should be one of %.") \
+ T(ParameterOfFunctionOutOfRange, \
+ "Paramenter % of function %() is % and out of range") \
+ T(ZeroDigitNumericSeparator, \
+ "Numeric separator can not be used after leading 0.") \
+ T(NumberFormatRange, "% argument must be between 0 and 100") \
+ T(TrailingNumericSeparator, \
+ "Numeric separators are not allowed at the end of numeric literals") \
+ T(ContinuousNumericSeparator, \
+ "Only one underscore is allowed as numeric separator") \
+ T(PropertyValueOutOfRange, "% value is out of range.") \
+ T(StackOverflow, "Maximum call stack size exceeded") \
+ T(ToPrecisionFormatRange, \
+ "toPrecision() argument must be between 1 and 100") \
+ T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
+ T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
+ T(TypedArraySetSourceTooLarge, "Source is too large") \
+ T(ValueOutOfRange, "Value % out of range for % options property %") \
+ /* SyntaxError */ \
+ T(AmbiguousExport, \
+ "The requested module '%' contains conflicting star exports for name '%'") \
+ T(BadGetterArity, "Getter must not have any formal parameters.") \
+ T(BadSetterArity, "Setter must have exactly one formal parameter.") \
+ T(BigIntInvalidString, "Invalid BigInt string") \
+ T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
+ T(ConstructorIsGenerator, "Class constructor may not be a generator") \
+ T(ConstructorIsAsync, "Class constructor may not be an async method") \
+ T(ConstructorIsPrivate, "Class constructor may not be a private method") \
+ T(DerivedConstructorReturnedNonObject, \
+ "Derived constructors may only return object or undefined") \
+ T(DuplicateConstructor, "A class may only have one constructor") \
+ T(DuplicateExport, "Duplicate export of '%'") \
+ T(DuplicateProto, \
+ "Duplicate __proto__ fields are not allowed in object literals") \
+ T(ForInOfLoopInitializer, \
+ "% loop variable declaration may not have an initializer.") \
+ T(ForInOfLoopMultiBindings, \
+ "Invalid left-hand side in % loop: Must have a single binding.") \
+ T(GeneratorInSingleStatementContext, \
+ "Generators can only be declared at the top level or inside a block.") \
+ T(AsyncFunctionInSingleStatementContext, \
+ "Async functions can only be declared at the top level or inside a " \
+ "block.") \
+ T(IllegalBreak, "Illegal break statement") \
+ T(NoIterationStatement, \
+ "Illegal continue statement: no surrounding iteration statement") \
+ T(IllegalContinue, \
+ "Illegal continue statement: '%' does not denote an iteration statement") \
+ T(IllegalLanguageModeDirective, \
+ "Illegal '%' directive in function with non-simple parameter list") \
+ T(IllegalReturn, "Illegal return statement") \
+ T(IntrinsicWithSpread, "Intrinsic calls do not support spread arguments") \
+ T(InvalidRestBindingPattern, \
+ "`...` must be followed by an identifier in declaration contexts") \
+ T(InvalidPropertyBindingPattern, "Illegal property in declaration context") \
+ T(InvalidRestAssignmentPattern, \
+ "`...` must be followed by an assignable reference in assignment " \
+ "contexts") \
+ T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
+ T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters") \
+ T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
+ T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
+ T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
+ T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
+ T(InvalidLhsInPostfixOp, \
+ "Invalid left-hand side expression in postfix operation") \
+ T(InvalidLhsInPrefixOp, \
+ "Invalid left-hand side expression in prefix operation") \
+ T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
+ T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
+ T(InvalidPrivateFieldResolution, \
+ "Undefined private field %: must be declared in an enclosing class") \
+ T(InvalidPrivateFieldRead, \
+ "Read of private field % from an object which did not contain the field") \
+ T(InvalidPrivateFieldWrite, \
+ "Write of private field % to an object which did not contain the field") \
+ T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
+ T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
+ T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
+ T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
+ T(LabelRedeclaration, "Label '%' has already been declared") \
+ T(LabelledFunctionDeclaration, \
+ "Labelled function declaration not allowed as the body of a control flow " \
+ "structure") \
+ T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
+ T(MalformedRegExp, "Invalid regular expression: /%/: %") \
+ T(MalformedRegExpFlags, "Invalid regular expression flags") \
+ T(ModuleExportUndefined, "Export '%' is not defined in module") \
+ T(HtmlCommentInModule, "HTML comments are not allowed in modules") \
+ T(MultipleDefaultsInSwitch, \
+ "More than one default clause in switch statement") \
+ T(NewlineAfterThrow, "Illegal newline after throw") \
+ T(NoCatchOrFinally, "Missing catch or finally after try") \
+ T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(FlattenPastSafeLength, \
+ "Flattening % elements on an array-like of length % " \
+ "is disallowed, as the total surpasses 2**53-1") \
+ T(PushPastSafeLength, \
+ "Pushing % elements on an array-like of length % " \
+ "is disallowed, as the total surpasses 2**53-1") \
+ T(ElementAfterRest, "Rest element must be last element") \
+ T(BadSetterRestParameter, \
+ "Setter function argument must not be a rest parameter") \
+ T(ParamDupe, "Duplicate parameter name not allowed in this context") \
+ T(ParenthesisInArgString, "Function arg string contains parenthesis") \
+ T(ArgStringTerminatesParametersEarly, \
+ "Arg string terminates parameters early") \
+ T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
+ T(RestDefaultInitializer, \
+ "Rest parameter may not have a default initializer") \
+ T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
+ T(SuperNotCalled, \
+ "Must call super constructor in derived class before accessing 'this' or " \
+ "returning from derived constructor") \
+ T(SingleFunctionLiteral, "Single function literal required") \
+ T(SloppyFunction, \
+ "In non-strict mode code, functions can only be declared at top level, " \
+ "inside a block, or as the body of an if statement.") \
+ T(SpeciesNotConstructor, \
+ "object.constructor[Symbol.species] is not a constructor") \
+ T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
+ T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
+ T(StrictFunction, \
+ "In strict mode code, functions can only be declared at top level or " \
+ "inside a block.") \
+ T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
+ T(StrictDecimalWithLeadingZero, \
+ "Decimals with leading zeros are not allowed in strict mode.") \
+ T(StrictOctalEscape, \
+ "Octal escape sequences are not allowed in strict mode.") \
+ T(StrictWith, "Strict mode code may not include a with statement") \
+ T(TemplateOctalLiteral, \
+ "Octal escape sequences are not allowed in template strings.") \
+ T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
+ T(AwaitBindingIdentifier, \
+ "'await' is not a valid identifier name in an async function") \
+ T(AwaitExpressionFormalParameter, \
+ "Illegal await-expression in formal parameters of async function") \
+ T(TooManyArguments, \
+ "Too many arguments in function call (only 65535 allowed)") \
+ T(TooManyParameters, \
+ "Too many parameters in function definition (only 65534 allowed)") \
+ T(TooManySpreads, \
+ "Literal containing too many nested spreads (up to 65534 allowed)") \
+ T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
+ T(TooManyElementsInPromiseAll, "Too many elements passed to Promise.all") \
+ T(TypedArrayTooShort, \
+ "Derived TypedArray constructor created an array which was too small") \
+ T(UnexpectedEOS, "Unexpected end of input") \
+ T(UnexpectedReserved, "Unexpected reserved word") \
+ T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
+ T(UnexpectedSuper, "'super' keyword unexpected here") \
+ T(UnexpectedNewTarget, "new.target expression is not allowed here") \
+ T(UnexpectedTemplateString, "Unexpected template string") \
+ T(UnexpectedToken, "Unexpected token %") \
+ T(UnexpectedTokenUnaryExponentiation, \
+ "Unary operator used immediately before exponentiation expression. " \
+ "Parenthesis must be used to disambiguate operator precedence") \
+ T(UnexpectedTokenIdentifier, "Unexpected identifier") \
+ T(UnexpectedTokenNumber, "Unexpected number") \
+ T(UnexpectedTokenString, "Unexpected string") \
+ T(UnexpectedTokenRegExp, "Unexpected regular expression") \
+ T(UnexpectedLexicalDeclaration, \
+ "Lexical declaration cannot appear in a single-statement context") \
+ T(UnknownLabel, "Undefined label '%'") \
+ T(UnresolvableExport, \
+ "The requested module '%' does not provide an export named '%'") \
+ T(UnterminatedArgList, "missing ) after argument list") \
+ T(UnterminatedRegExp, "Invalid regular expression: missing /") \
+ T(UnterminatedTemplate, "Unterminated template literal") \
+ T(UnterminatedTemplateExpr, "Missing } in template expression") \
+ T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance") \
+ T(InvalidHexEscapeSequence, "Invalid hexadecimal escape sequence") \
+ T(InvalidUnicodeEscapeSequence, "Invalid Unicode escape sequence") \
+ T(UndefinedUnicodeCodePoint, "Undefined Unicode code-point") \
+ T(YieldInParameter, "Yield expression not allowed in formal parameter") \
+ /* EvalError */ \
+ T(CodeGenFromStrings, "%") \
+ T(NoSideEffectDebugEvaluate, "Possible side-effect in debug-evaluate") \
+ /* URIError */ \
+ T(URIMalformed, "URI malformed") \
+ /* Wasm errors (currently Error) */ \
+ T(WasmTrapUnreachable, "unreachable") \
+ T(WasmTrapMemOutOfBounds, "memory access out of bounds") \
+ T(WasmTrapUnalignedAccess, "operation does not support unaligned accesses") \
+ T(WasmTrapDivByZero, "divide by zero") \
+ T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
+ T(WasmTrapRemByZero, "remainder by zero") \
+ T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
+ T(WasmTrapFuncInvalid, "invalid index into function table") \
+ T(WasmTrapFuncSigMismatch, "function signature mismatch") \
+ T(WasmTrapTypeError, "wasm function signature contains illegal type") \
+ T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
+ T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
+ T(WasmTrapTableOutOfBounds, "table access out of bounds") \
+ T(WasmExceptionError, "wasm exception") \
+ /* Asm.js validation related */ \
+ T(AsmJsInvalid, "Invalid asm.js: %") \
+ T(AsmJsCompiled, "Converted asm.js to WebAssembly: %") \
+ T(AsmJsInstantiated, "Instantiated asm.js: %") \
+ T(AsmJsLinkingFailed, "Linking failure in asm.js: %") \
+ /* DataCloneError messages */ \
+ T(DataCloneError, "% could not be cloned.") \
+ T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
+ T(DataCloneErrorDetachedArrayBuffer, \
+ "An ArrayBuffer is neutered and could not be cloned.") \
+ T(DataCloneErrorSharedArrayBufferTransferred, \
+ "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
+ "transferred.") \
+ T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
+ T(DataCloneDeserializationVersionError, \
+ "Unable to deserialize cloned data due to invalid or unsupported " \
+ "version.") \
+ /* Builtins-Trace Errors */ \
+ T(TraceEventCategoryError, "Trace event category must be a string.") \
+ T(TraceEventNameError, "Trace event name must be a string.") \
+ T(TraceEventNameLengthError, \
+ "Trace event name must not be an empty string.") \
+ T(TraceEventPhaseError, "Trace event phase must be a number.") \
+ T(TraceEventIDError, "Trace event id must be a number.") \
+ /* Weak refs */ \
+ T(WeakRefsCleanupMustBeCallable, "WeakFactory: cleanup must be callable") \
+ T(WeakRefsMakeCellTargetMustBeObject, \
+ "WeakFactory.prototype.makeCell: target must be an object") \
+ T(WeakRefsMakeCellTargetAndHoldingsMustNotBeSame, \
+ "WeakFactory.prototype.makeCell: target and holdings must not be same") \
+ T(WeakRefsWeakRefConstructorTargetMustBeObject, \
+ "WeakRef: target must be an object") \
+ T(WeakRefsMakeRefTargetAndHoldingsMustNotBeSame, \
+ "WeakFactory.prototype.makeRef: target and holdings must not be same")
+
+enum class MessageTemplate {
+#define TEMPLATE(NAME, STRING) k##NAME,
+ MESSAGE_TEMPLATES(TEMPLATE)
+#undef TEMPLATE
+ kLastMessage
+};
+
+inline MessageTemplate MessageTemplateFromInt(int message_id) {
+ DCHECK_LE(0, message_id);
+ DCHECK_LT(message_id, static_cast<int>(MessageTemplate::kLastMessage));
+ return static_cast<MessageTemplate>(message_id);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MESSAGE_TEMPLATE_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 3d98da4e63..f89d3a5746 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -7,11 +7,14 @@
#include <memory>
#include "src/api-inl.h"
+#include "src/counters.h"
#include "src/execution.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
+#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -50,9 +53,8 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
}
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
- Isolate* isolate, MessageTemplate::Template message,
- const MessageLocation* location, Handle<Object> argument,
- Handle<FixedArray> stack_frames) {
+ Isolate* isolate, MessageTemplate message, const MessageLocation* location,
+ Handle<Object> argument, Handle<FixedArray> stack_frames) {
Factory* factory = isolate->factory();
int start = -1;
@@ -84,7 +86,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
// and ignore scheduled exceptions callbacks can throw.
// We pass the exception object into the message handler callback though.
- Object* exception_object = ReadOnlyRoots(isolate).undefined_value();
+ Object exception_object = ReadOnlyRoots(isolate).undefined_value();
if (isolate->has_pending_exception()) {
exception_object = isolate->pending_exception();
}
@@ -147,8 +149,8 @@ void MessageHandler::ReportMessageNoExceptions(
for (int i = 0; i < global_length; i++) {
HandleScope scope(isolate);
if (global_listeners->get(i)->IsUndefined(isolate)) continue;
- FixedArray* listener = FixedArray::cast(global_listeners->get(i));
- Foreign* callback_obj = Foreign::cast(listener->get(0));
+ FixedArray listener = FixedArray::cast(global_listeners->get(i));
+ Foreign callback_obj = Foreign::cast(listener->get(0));
int32_t message_levels =
static_cast<int32_t>(Smi::ToInt(listener->get(2)));
if (!(message_levels & error_level)) {
@@ -158,6 +160,8 @@ void MessageHandler::ReportMessageNoExceptions(
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
Handle<Object> callback_data(listener->get(1), isolate);
{
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kMessageListenerCallback);
// Do not allow exceptions to propagate.
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
callback(api_message_obj, callback_data->IsUndefined(isolate)
@@ -176,7 +180,7 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Handle<Object> data) {
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
Handle<Object> arg = Handle<Object>(message->argument(), isolate);
- return MessageTemplate::FormatMessage(isolate, message->type(), arg);
+ return MessageFormatter::FormatMessage(isolate, message->type(), arg);
}
std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
@@ -187,9 +191,10 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
namespace {
-Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
- if (!script->has_eval_from_shared())
+Object EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
+ if (!script->has_eval_from_shared()) {
return ReadOnlyRoots(isolate).undefined_value();
+ }
Handle<SharedFunctionInfo> shared(script->eval_from_shared(), isolate);
// Find the name of the function calling eval.
@@ -200,9 +205,10 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
return shared->inferred_name();
}
-Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
- if (!script->has_eval_from_shared())
+Object EvalFromScript(Isolate* isolate, Handle<Script> script) {
+ if (!script->has_eval_from_shared()) {
return ReadOnlyRoots(isolate).undefined_value();
+ }
Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared(),
isolate);
@@ -306,6 +312,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
is_async_ = (flags & FrameArray::kIsAsync) != 0;
+ is_promise_all_ = (flags & FrameArray::kIsPromiseAll) != 0;
}
JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
@@ -360,7 +367,7 @@ bool CheckMethodName(Isolate* isolate, Handle<JSReceiver> receiver,
}
Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
- Object* name_or_url = script->source_url();
+ Object name_or_url = script->source_url();
if (!name_or_url->IsString()) name_or_url = script->name();
return handle(name_or_url, isolate);
}
@@ -466,6 +473,10 @@ int JSStackFrame::GetColumnNumber() {
return -1;
}
+int JSStackFrame::GetPromiseIndex() const {
+ return is_promise_all_ ? offset_ : -1;
+}
+
bool JSStackFrame::IsNative() {
return HasScript() && GetScript()->type() == Script::TYPE_NATIVE;
}
@@ -607,12 +618,23 @@ MaybeHandle<String> JSStackFrame::ToString() {
const bool is_toplevel = IsToplevel();
const bool is_async = IsAsync();
+ const bool is_promise_all = IsPromiseAll();
const bool is_constructor = IsConstructor();
const bool is_method_call = !(is_toplevel || is_constructor);
if (is_async) {
builder.AppendCString("async ");
}
+ if (is_promise_all) {
+ // For `Promise.all(iterable)` frames we interpret the {offset_}
+ // as the element index into `iterable` where the error occurred.
+ builder.AppendCString("Promise.all (index ");
+ Handle<String> index_string = isolate_->factory()->NumberToString(
+ handle(Smi::FromInt(offset_), isolate_), isolate_);
+ builder.AppendString(index_string);
+ builder.AppendCString(")");
+ return builder.Finish();
+ }
if (is_method_call) {
AppendMethodCall(isolate_, this, &builder);
} else if (is_constructor) {
@@ -962,10 +984,15 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
DCHECK(!error_context.is_null() && error_context->IsNativeContext());
PrepareStackTraceScope scope(isolate);
+ Handle<JSArray> sites;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, sites, GetStackFrames(isolate, elems),
+ Object);
+
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- isolate->RunPrepareStackTraceCallback(error_context, error), Object);
+ isolate->RunPrepareStackTraceCallback(error_context, error, sites),
+ Object);
return result;
} else {
Handle<JSFunction> global_error = isolate->error_function();
@@ -1048,18 +1075,18 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
return builder.Finish();
}
-Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
- int template_index,
- Handle<Object> arg) {
+Handle<String> MessageFormatter::FormatMessage(Isolate* isolate,
+ MessageTemplate index,
+ Handle<Object> arg) {
Factory* factory = isolate->factory();
Handle<String> result_string = Object::NoSideEffectsToString(isolate, arg);
- MaybeHandle<String> maybe_result_string = MessageTemplate::FormatMessage(
- isolate, template_index, result_string, factory->empty_string(),
+ MaybeHandle<String> maybe_result_string = MessageFormatter::FormatMessage(
+ isolate, index, result_string, factory->empty_string(),
factory->empty_string());
if (!maybe_result_string.ToHandle(&result_string)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
- return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
+ return factory->InternalizeOneByteString(StaticCharVector("<error>"));
}
// A string that has been obtained from JS code in this way is
// likely to be a complicated ConsString of some sort. We flatten it
@@ -1069,26 +1096,25 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
return String::Flatten(isolate, result_string);
}
-
-const char* MessageTemplate::TemplateString(int template_index) {
- switch (template_index) {
-#define CASE(NAME, STRING) \
- case k##NAME: \
+const char* MessageFormatter::TemplateString(MessageTemplate index) {
+ switch (index) {
+#define CASE(NAME, STRING) \
+ case MessageTemplate::k##NAME: \
return STRING;
MESSAGE_TEMPLATES(CASE)
#undef CASE
- case kLastMessage:
+ case MessageTemplate::kLastMessage:
default:
return nullptr;
}
}
-MaybeHandle<String> MessageTemplate::FormatMessage(Isolate* isolate,
- int template_index,
- Handle<String> arg0,
- Handle<String> arg1,
- Handle<String> arg2) {
- const char* template_string = TemplateString(template_index);
+MaybeHandle<String> MessageFormatter::FormatMessage(Isolate* isolate,
+ MessageTemplate index,
+ Handle<String> arg0,
+ Handle<String> arg1,
+ Handle<String> arg2) {
+ const char* template_string = TemplateString(index);
if (template_string == nullptr) {
isolate->ThrowIllegalOperation();
return MaybeHandle<String>();
@@ -1244,7 +1270,7 @@ MaybeHandle<String> ErrorUtils::ToString(Isolate* isolate,
namespace {
-Handle<String> FormatMessage(Isolate* isolate, int template_index,
+Handle<String> FormatMessage(Isolate* isolate, MessageTemplate index,
Handle<Object> arg0, Handle<Object> arg1,
Handle<Object> arg2) {
Handle<String> arg0_str = Object::NoSideEffectsToString(isolate, arg0);
@@ -1254,8 +1280,8 @@ Handle<String> FormatMessage(Isolate* isolate, int template_index,
isolate->native_context()->IncrementErrorsThrown();
Handle<String> msg;
- if (!MessageTemplate::FormatMessage(isolate, template_index, arg0_str,
- arg1_str, arg2_str)
+ if (!MessageFormatter::FormatMessage(isolate, index, arg0_str, arg1_str,
+ arg2_str)
.ToHandle(&msg)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
@@ -1270,21 +1296,20 @@ Handle<String> FormatMessage(Isolate* isolate, int template_index,
// static
MaybeHandle<Object> ErrorUtils::MakeGenericError(
- Isolate* isolate, Handle<JSFunction> constructor, int template_index,
+ Isolate* isolate, Handle<JSFunction> constructor, MessageTemplate index,
Handle<Object> arg0, Handle<Object> arg1, Handle<Object> arg2,
FrameSkipMode mode) {
if (FLAG_clear_exceptions_on_js_entry) {
- // This function used to be implemented in JavaScript, and JSEntryStub
- // clears
- // any pending exceptions - so whenever we'd call this from C++, pending
- // exceptions would be cleared. Preserve this behavior.
+ // This function used to be implemented in JavaScript, and JSEntry
+ // clears any pending exceptions - so whenever we'd call this from C++,
+ // pending exceptions would be cleared. Preserve this behavior.
isolate->clear_pending_exception();
}
DCHECK(mode != SKIP_UNTIL_SEEN);
Handle<Object> no_caller;
- Handle<String> msg = FormatMessage(isolate, template_index, arg0, arg1, arg2);
+ Handle<String> msg = FormatMessage(isolate, index, arg0, arg1, arg2);
return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
no_caller, false);
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 05d287faae..f030190aa8 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -13,6 +13,7 @@
#include <memory>
#include "src/handles.h"
+#include "src/message-template.h"
namespace v8 {
namespace internal {
@@ -68,10 +69,14 @@ class StackFrameBase {
// Return 1-based column number, including column offset if first line.
virtual int GetColumnNumber() = 0;
+ // Returns index for Promise.all() async frames, or -1 for other frames.
+ virtual int GetPromiseIndex() const = 0;
+
virtual bool IsNative() = 0;
virtual bool IsToplevel() = 0;
virtual bool IsEval();
virtual bool IsAsync() const = 0;
+ virtual bool IsPromiseAll() const = 0;
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
@@ -107,9 +112,12 @@ class JSStackFrame : public StackFrameBase {
int GetLineNumber() override;
int GetColumnNumber() override;
+ int GetPromiseIndex() const override;
+
bool IsNative() override;
bool IsToplevel() override;
bool IsAsync() const override { return is_async_; }
+ bool IsPromiseAll() const override { return is_promise_all_; }
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
@@ -129,6 +137,7 @@ class JSStackFrame : public StackFrameBase {
bool is_async_ : 1;
bool is_constructor_ : 1;
+ bool is_promise_all_ : 1;
bool is_strict_ : 1;
friend class FrameArrayIterator;
@@ -151,9 +160,12 @@ class WasmStackFrame : public StackFrameBase {
int GetLineNumber() override { return wasm_func_index_; }
int GetColumnNumber() override { return -1; }
+ int GetPromiseIndex() const override { return -1; }
+
bool IsNative() override { return false; }
bool IsToplevel() override { return false; }
bool IsAsync() const override { return false; }
+ bool IsPromiseAll() const override { return false; }
bool IsConstructor() override { return false; }
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
@@ -244,7 +256,7 @@ class ErrorUtils : public AllStatic {
static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
static MaybeHandle<Object> MakeGenericError(
- Isolate* isolate, Handle<JSFunction> constructor, int template_index,
+ Isolate* isolate, Handle<JSFunction> constructor, MessageTemplate index,
Handle<Object> arg0, Handle<Object> arg1, Handle<Object> arg2,
FrameSkipMode mode);
@@ -255,542 +267,17 @@ class ErrorUtils : public AllStatic {
Handle<Object> stack_trace);
};
-#define MESSAGE_TEMPLATES(T) \
- /* Error */ \
- T(None, "") \
- T(CyclicProto, "Cyclic __proto__ value") \
- T(Debugger, "Debugger: %") \
- T(DebuggerLoading, "Error loading debugger") \
- T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
- T(DeletePrivateField, "Private fields can not be deleted") \
- T(UncaughtException, "Uncaught %") \
- T(Unsupported, "Not supported") \
- T(WrongServiceType, "Internal error, wrong service type: %") \
- T(WrongValueType, "Internal error. Wrong value type.") \
- T(IcuError, "Internal error. Icu error.") \
- /* TypeError */ \
- T(ApplyNonFunction, \
- "Function.prototype.apply was called on %, which is a % and not a " \
- "function") \
- T(ArgumentsDisallowedInInitializer, \
- "'arguments' is not allowed in class field initializer") \
- T(ArrayBufferTooShort, \
- "Derived ArrayBuffer constructor created a buffer which was too small") \
- T(ArrayBufferSpeciesThis, \
- "ArrayBuffer subclass returned this from species constructor") \
- T(ArrayItemNotType, "array %[%] is not type %") \
- T(AwaitNotInAsyncFunction, "await is only valid in async function") \
- T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
- T(BadSortComparisonFunction, \
- "The comparison function must be either a function or undefined") \
- T(BigIntFromNumber, \
- "The number % cannot be converted to a BigInt because it is not an " \
- "integer") \
- T(BigIntFromObject, "Cannot convert % to a BigInt") \
- T(BigIntMixedTypes, \
- "Cannot mix BigInt and other types, use explicit conversions") \
- T(BigIntSerializeJSON, "Do not know how to serialize a BigInt") \
- T(BigIntShr, "BigInts have no unsigned right shift, use >> instead") \
- T(BigIntToNumber, "Cannot convert a BigInt value to a number") \
- T(CalledNonCallable, "% is not a function") \
- T(CalledOnNonObject, "% called on non-object") \
- T(CalledOnNullOrUndefined, "% called on null or undefined") \
- T(CallSiteExpectsFunction, \
- "CallSite expects wasm object as first or function as second argument, " \
- "got <%, %>") \
- T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
- T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
- T(CannotPreventExt, "Cannot prevent extensions") \
- T(CannotFreeze, "Cannot freeze") \
- T(CannotFreezeArrayBufferView, \
- "Cannot freeze array buffer views with elements") \
- T(CannotSeal, "Cannot seal") \
- T(CircularStructure, "Converting circular structure to JSON") \
- T(ConstructAbstractClass, "Abstract class % not directly constructable") \
- T(ConstAssign, "Assignment to constant variable.") \
- T(ConstructorClassField, "Classes may not have a field named 'constructor'") \
- T(ConstructorNonCallable, \
- "Class constructor % cannot be invoked without 'new'") \
- T(ConstructorNotFunction, "Constructor % requires 'new'") \
- T(ConstructorNotReceiver, "The .constructor property is not an object") \
- T(CurrencyCode, "Currency code is required with currency style.") \
- T(CyclicModuleDependency, "Detected cycle while resolving name '%' in '%'") \
- T(DataViewNotArrayBuffer, \
- "First argument to DataView constructor must be an ArrayBuffer") \
- T(DateType, "this is not a Date object.") \
- T(DebuggerFrame, "Debugger: Invalid frame index.") \
- T(DebuggerType, "Debugger: Parameters have wrong types.") \
- T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
- T(DefineDisallowed, "Cannot define property %, object is not extensible") \
- T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
- T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
- T(ExtendsValueNotConstructor, \
- "Class extends value % is not a constructor or null") \
- T(FirstArgumentNotRegExp, \
- "First argument to % must not be a regular expression") \
- T(FunctionBind, "Bind must be called on a function") \
- T(GeneratorRunning, "Generator is already running") \
- T(IllegalInvocation, "Illegal invocation") \
- T(ImmutablePrototypeSet, \
- "Immutable prototype object '%' cannot have their prototype set") \
- T(ImportCallNotNewExpression, "Cannot use new with import") \
- T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
- T(ImportMissingSpecifier, "import() requires a specifier") \
- T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
- T(InstanceofNonobjectProto, \
- "Function has non-object prototype '%' in instanceof check") \
- T(InvalidArgument, "invalid_argument") \
- T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
- T(InvalidRegExpExecResult, \
- "RegExp exec method returned something other than an Object or null") \
- T(InvalidUnit, "Invalid unit argument for %() '%'") \
- T(IteratorResultNotAnObject, "Iterator result % is not an object") \
- T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
- T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
- T(LanguageID, "Language ID should be string or object.") \
- T(LocaleNotEmpty, \
- "First argument to Intl.Locale constructor can't be empty or missing") \
- T(LocaleBadParameters, "Incorrect locale information provided") \
- T(ListFormatBadParameters, "Incorrect ListFormat information provided") \
- T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
- T(MethodCalledOnWrongObject, \
- "Method % called on a non-object or on a wrong type of object.") \
- T(MethodInvokedOnNullOrUndefined, \
- "Method invoked on undefined or null value.") \
- T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
- T(NoAccess, "no access") \
- T(NonCallableInInstanceOfCheck, \
- "Right-hand side of 'instanceof' is not callable") \
- T(NonCoercible, "Cannot destructure 'undefined' or 'null'.") \
- T(NonCoercibleWithProperty, \
- "Cannot destructure property `%` of 'undefined' or 'null'.") \
- T(NonExtensibleProto, "% is not extensible") \
- T(NonObjectInInstanceOfCheck, \
- "Right-hand side of 'instanceof' is not an object") \
- T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
- T(NonObjectPropertyStore, "Cannot set property '%' of %") \
- T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
- T(NotAnIterator, "% is not an iterator") \
- T(NotAPromise, "% is not a promise") \
- T(NotConstructor, "% is not a constructor") \
- T(NotDateObject, "this is not a Date object.") \
- T(NotGeneric, "% requires that 'this' be a %") \
- T(NotCallableOrIterable, \
- "% is not a function or its return value is not iterable") \
- T(NotCallableOrAsyncIterable, \
- "% is not a function or its return value is not async iterable") \
- T(NotFiniteNumber, "Value need to be finite number for %()") \
- T(NotIterable, "% is not iterable") \
- T(NotIterableNoSymbolLoad, "% is not iterable (cannot read property %)") \
- T(NotAsyncIterable, "% is not async iterable") \
- T(NotPropertyName, "% is not a valid property name") \
- T(NotTypedArray, "this is not a typed array.") \
- T(NotSuperConstructor, "Super constructor % of % is not a constructor") \
- T(NotSuperConstructorAnonymousClass, \
- "Super constructor % of anonymous class is not a constructor") \
- T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
- T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.") \
- T(ObjectGetterExpectingFunction, \
- "Object.prototype.__defineGetter__: Expecting function") \
- T(ObjectGetterCallable, "Getter must be a function: %") \
- T(ObjectNotExtensible, "Cannot add property %, object is not extensible") \
- T(ObjectSetterExpectingFunction, \
- "Object.prototype.__defineSetter__: Expecting function") \
- T(ObjectSetterCallable, "Setter must be a function: %") \
- T(OrdinaryFunctionCalledAsConstructor, \
- "Function object that's not a constructor was created with new") \
- T(PromiseCyclic, "Chaining cycle detected for promise %") \
- T(PromiseExecutorAlreadyInvoked, \
- "Promise executor has already been invoked with non-undefined arguments") \
- T(PromiseNonCallable, "Promise resolve or reject function is not callable") \
- T(PropertyDescObject, "Property description must be an object: %") \
- T(PropertyNotFunction, \
- "'%' returned for property '%' of object '%' is not a function") \
- T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
- T(PrototypeParentNotAnObject, \
- "Class extends value does not have valid prototype property %") \
- T(ProxyConstructNonObject, \
- "'construct' on proxy: trap returned non-object ('%')") \
- T(ProxyDefinePropertyNonConfigurable, \
- "'defineProperty' on proxy: trap returned truish for defining " \
- "non-configurable property '%' which is either non-existant or " \
- "configurable in the proxy target") \
- T(ProxyDefinePropertyNonExtensible, \
- "'defineProperty' on proxy: trap returned truish for adding property '%' " \
- " to the non-extensible proxy target") \
- T(ProxyDefinePropertyIncompatible, \
- "'defineProperty' on proxy: trap returned truish for adding property '%' " \
- " that is incompatible with the existing property in the proxy target") \
- T(ProxyDeletePropertyNonConfigurable, \
- "'deleteProperty' on proxy: trap returned truish for property '%' which " \
- "is non-configurable in the proxy target") \
- T(ProxyGetNonConfigurableData, \
- "'get' on proxy: property '%' is a read-only and " \
- "non-configurable data property on the proxy target but the proxy " \
- "did not return its actual value (expected '%' but got '%')") \
- T(ProxyGetNonConfigurableAccessor, \
- "'get' on proxy: property '%' is a non-configurable accessor " \
- "property on the proxy target and does not have a getter function, but " \
- "the trap did not return 'undefined' (got '%')") \
- T(ProxyGetOwnPropertyDescriptorIncompatible, \
- "'getOwnPropertyDescriptor' on proxy: trap returned descriptor for " \
- "property '%' that is incompatible with the existing property in the " \
- "proxy target") \
- T(ProxyGetOwnPropertyDescriptorInvalid, \
- "'getOwnPropertyDescriptor' on proxy: trap returned neither object nor " \
- "undefined for property '%'") \
- T(ProxyGetOwnPropertyDescriptorNonConfigurable, \
- "'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
- "for property '%' which is either non-existant or configurable in the " \
- "proxy target") \
- T(ProxyGetOwnPropertyDescriptorNonExtensible, \
- "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
- "property '%' which exists in the non-extensible proxy target") \
- T(ProxyGetOwnPropertyDescriptorUndefined, \
- "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
- "property '%' which is non-configurable in the proxy target") \
- T(ProxyGetPrototypeOfInvalid, \
- "'getPrototypeOf' on proxy: trap returned neither object nor null") \
- T(ProxyGetPrototypeOfNonExtensible, \
- "'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
- "trap did not return its actual prototype") \
- T(ProxyHandlerOrTargetRevoked, \
- "Cannot create proxy with a revoked proxy as target or handler") \
- T(ProxyHasNonConfigurable, \
- "'has' on proxy: trap returned falsish for property '%' which exists in " \
- "the proxy target as non-configurable") \
- T(ProxyHasNonExtensible, \
- "'has' on proxy: trap returned falsish for property '%' but the proxy " \
- "target is not extensible") \
- T(ProxyIsExtensibleInconsistent, \
- "'isExtensible' on proxy: trap result does not reflect extensibility of " \
- "proxy target (which is '%')") \
- T(ProxyNonObject, \
- "Cannot create proxy with a non-object as target or handler") \
- T(ProxyOwnKeysMissing, \
- "'ownKeys' on proxy: trap result did not include '%'") \
- T(ProxyOwnKeysNonExtensible, \
- "'ownKeys' on proxy: trap returned extra keys but proxy target is " \
- "non-extensible") \
- T(ProxyPreventExtensionsExtensible, \
- "'preventExtensions' on proxy: trap returned truish but the proxy target " \
- "is extensible") \
- T(ProxyPrivate, "Cannot pass private property name to proxy trap") \
- T(ProxyRevoked, "Cannot perform '%' on a proxy that has been revoked") \
- T(ProxySetFrozenData, \
- "'set' on proxy: trap returned truish for property '%' which exists in " \
- "the proxy target as a non-configurable and non-writable data property " \
- "with a different value") \
- T(ProxySetFrozenAccessor, \
- "'set' on proxy: trap returned truish for property '%' which exists in " \
- "the proxy target as a non-configurable and non-writable accessor " \
- "property without a setter") \
- T(ProxySetPrototypeOfNonExtensible, \
- "'setPrototypeOf' on proxy: trap returned truish for setting a new " \
- "prototype on the non-extensible proxy target") \
- T(ProxyTrapReturnedFalsish, "'%' on proxy: trap returned falsish") \
- T(ProxyTrapReturnedFalsishFor, \
- "'%' on proxy: trap returned falsish for property '%'") \
- T(RedefineDisallowed, "Cannot redefine property: %") \
- T(RedefineExternalArray, \
- "Cannot redefine a property of an object with external array elements") \
- T(ReduceNoInitial, "Reduce of empty array with no initial value") \
- T(RegExpFlags, \
- "Cannot supply flags when constructing one RegExp from another") \
- T(RegExpNonObject, "% getter called on non-object %") \
- T(RegExpNonRegExp, "% getter called on non-RegExp object") \
- T(RelativeDateTimeFormatterBadParameters, \
- "Incorrect RelativeDateTimeFormatter provided") \
- T(ResolverNotAFunction, "Promise resolver % is not a function") \
- T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
- T(SharedArrayBufferTooShort, \
- "Derived SharedArrayBuffer constructor created a buffer which was too " \
- "small") \
- T(SharedArrayBufferSpeciesThis, \
- "SharedArrayBuffer subclass returned this from species constructor") \
- T(StaticPrototype, \
- "Classes may not have a static property named 'prototype'") \
- T(StrictDeleteProperty, "Cannot delete property '%' of %") \
- T(StrictPoisonPill, \
- "'caller', 'callee', and 'arguments' properties may not be accessed on " \
- "strict mode functions or the arguments objects for calls to them") \
- T(StrictReadOnlyProperty, \
- "Cannot assign to read only property '%' of % '%'") \
- T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
- T(SymbolIteratorInvalid, \
- "Result of the Symbol.iterator method is not an object") \
- T(SymbolAsyncIteratorInvalid, \
- "Result of the Symbol.asyncIterator method is not an object") \
- T(SymbolKeyFor, "% is not a symbol") \
- T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
- T(SymbolToString, "Cannot convert a Symbol value to a string") \
- T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.") \
- T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
- T(ValueAndAccessor, \
- "Invalid property descriptor. Cannot both specify accessors and a value " \
- "or writable attribute, %") \
- T(VarRedeclaration, "Identifier '%' has already been declared") \
- T(WrongArgs, "%: Arguments list has wrong type") \
- /* ReferenceError */ \
- T(NotDefined, "% is not defined") \
- T(SuperAlreadyCalled, "Super constructor may only be called once") \
- T(UnsupportedSuper, "Unsupported reference to 'super'") \
- /* RangeError */ \
- T(BigIntDivZero, "Division by zero") \
- T(BigIntNegativeExponent, "Exponent must be positive") \
- T(BigIntTooBig, "Maximum BigInt size exceeded") \
- T(DateRange, "Provided date is not in valid range.") \
- T(ExpectedLocation, \
- "Expected letters optionally connected with underscores or hyphens for " \
- "a location, got %") \
- T(InvalidArrayBufferLength, "Invalid array buffer length") \
- T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
- T(InvalidArrayLength, "Invalid array length") \
- T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
- T(InvalidCodePoint, "Invalid code point %") \
- T(InvalidCountValue, "Invalid count value") \
- T(InvalidCurrencyCode, "Invalid currency code: %") \
- T(InvalidDataViewAccessorOffset, \
- "Offset is outside the bounds of the DataView") \
- T(InvalidDataViewLength, "Invalid DataView length %") \
- T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
- T(InvalidHint, "Invalid hint: %") \
- T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
- T(InvalidLanguageTag, "Invalid language tag: %") \
- T(InvalidWeakMapKey, "Invalid value used as weak map key") \
- T(InvalidWeakSetValue, "Invalid value used in weak set") \
- T(InvalidStringLength, "Invalid string length") \
- T(InvalidTimeValue, "Invalid time value") \
- T(InvalidTimeZone, "Invalid time zone specified: %") \
- T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
- T(InvalidTypedArrayIndex, "Invalid typed array index") \
- T(InvalidTypedArrayLength, "Invalid typed array length: %") \
- T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
- T(LocaleMatcher, "Illegal value for localeMatcher:%") \
- T(NormalizationForm, "The normalization form should be one of %.") \
- T(ZeroDigitNumericSeparator, \
- "Numeric separator can not be used after leading 0.") \
- T(NumberFormatRange, "% argument must be between 0 and 100") \
- T(TrailingNumericSeparator, \
- "Numeric separators are not allowed at the end of numeric literals") \
- T(ContinuousNumericSeparator, \
- "Only one underscore is allowed as numeric separator") \
- T(PropertyValueOutOfRange, "% value is out of range.") \
- T(StackOverflow, "Maximum call stack size exceeded") \
- T(ToPrecisionFormatRange, \
- "toPrecision() argument must be between 1 and 100") \
- T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
- T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
- T(TypedArraySetSourceTooLarge, "Source is too large") \
- T(ValueOutOfRange, "Value % out of range for % options property %") \
- /* SyntaxError */ \
- T(AmbiguousExport, \
- "The requested module '%' contains conflicting star exports for name '%'") \
- T(BadGetterArity, "Getter must not have any formal parameters.") \
- T(BadSetterArity, "Setter must have exactly one formal parameter.") \
- T(BigIntInvalidString, "Invalid BigInt string") \
- T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
- T(ConstructorIsGenerator, "Class constructor may not be a generator") \
- T(ConstructorIsAsync, "Class constructor may not be an async method") \
- T(DerivedConstructorReturnedNonObject, \
- "Derived constructors may only return object or undefined") \
- T(DuplicateConstructor, "A class may only have one constructor") \
- T(DuplicateExport, "Duplicate export of '%'") \
- T(DuplicateProto, \
- "Duplicate __proto__ fields are not allowed in object literals") \
- T(ForInOfLoopInitializer, \
- "% loop variable declaration may not have an initializer.") \
- T(ForInOfLoopMultiBindings, \
- "Invalid left-hand side in % loop: Must have a single binding.") \
- T(GeneratorInSingleStatementContext, \
- "Generators can only be declared at the top level or inside a block.") \
- T(AsyncFunctionInSingleStatementContext, \
- "Async functions can only be declared at the top level or inside a " \
- "block.") \
- T(IllegalBreak, "Illegal break statement") \
- T(NoIterationStatement, \
- "Illegal continue statement: no surrounding iteration statement") \
- T(IllegalContinue, \
- "Illegal continue statement: '%' does not denote an iteration statement") \
- T(IllegalLanguageModeDirective, \
- "Illegal '%' directive in function with non-simple parameter list") \
- T(IllegalReturn, "Illegal return statement") \
- T(IntrinsicWithSpread, "Intrinsic calls do not support spread arguments") \
- T(InvalidRestBindingPattern, \
- "`...` must be followed by an identifier in declaration contexts") \
- T(InvalidRestAssignmentPattern, \
- "`...` must be followed by an assignable reference in assignment " \
- "contexts") \
- T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
- T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters") \
- T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
- T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
- T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
- T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
- T(InvalidLhsInPostfixOp, \
- "Invalid left-hand side expression in postfix operation") \
- T(InvalidLhsInPrefixOp, \
- "Invalid left-hand side expression in prefix operation") \
- T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
- T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
- T(InvalidPrivateFieldAccess, "Invalid private field '%'") \
- T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
- T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
- T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
- T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
- T(LabelRedeclaration, "Label '%' has already been declared") \
- T(LabelledFunctionDeclaration, \
- "Labelled function declaration not allowed as the body of a control flow " \
- "structure") \
- T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
- T(MalformedRegExp, "Invalid regular expression: /%/: %") \
- T(MalformedRegExpFlags, "Invalid regular expression flags") \
- T(ModuleExportUndefined, "Export '%' is not defined in module") \
- T(HtmlCommentInModule, "HTML comments are not allowed in modules") \
- T(MultipleDefaultsInSwitch, \
- "More than one default clause in switch statement") \
- T(NewlineAfterThrow, "Illegal newline after throw") \
- T(NoCatchOrFinally, "Missing catch or finally after try") \
- T(NotIsvar, "builtin %%IS_VAR: not a variable") \
- T(ParamAfterRest, "Rest parameter must be last formal parameter") \
- T(FlattenPastSafeLength, \
- "Flattening % elements on an array-like of length % " \
- "is disallowed, as the total surpasses 2**53-1") \
- T(PushPastSafeLength, \
- "Pushing % elements on an array-like of length % " \
- "is disallowed, as the total surpasses 2**53-1") \
- T(ElementAfterRest, "Rest element must be last element") \
- T(BadSetterRestParameter, \
- "Setter function argument must not be a rest parameter") \
- T(ParamDupe, "Duplicate parameter name not allowed in this context") \
- T(ParenthesisInArgString, "Function arg string contains parenthesis") \
- T(ArgStringTerminatesParametersEarly, \
- "Arg string terminates parameters early") \
- T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
- T(RestDefaultInitializer, \
- "Rest parameter may not have a default initializer") \
- T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
- T(SuperNotCalled, \
- "Must call super constructor in derived class before accessing 'this' or " \
- "returning from derived constructor") \
- T(SingleFunctionLiteral, "Single function literal required") \
- T(SloppyFunction, \
- "In non-strict mode code, functions can only be declared at top level, " \
- "inside a block, or as the body of an if statement.") \
- T(SpeciesNotConstructor, \
- "object.constructor[Symbol.species] is not a constructor") \
- T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
- T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
- T(StrictFunction, \
- "In strict mode code, functions can only be declared at top level or " \
- "inside a block.") \
- T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
- T(StrictDecimalWithLeadingZero, \
- "Decimals with leading zeros are not allowed in strict mode.") \
- T(StrictOctalEscape, \
- "Octal escape sequences are not allowed in strict mode.") \
- T(StrictWith, "Strict mode code may not include a with statement") \
- T(TemplateOctalLiteral, \
- "Octal escape sequences are not allowed in template strings.") \
- T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
- T(AwaitBindingIdentifier, \
- "'await' is not a valid identifier name in an async function") \
- T(AwaitExpressionFormalParameter, \
- "Illegal await-expression in formal parameters of async function") \
- T(TooManyArguments, \
- "Too many arguments in function call (only 65535 allowed)") \
- T(TooManyParameters, \
- "Too many parameters in function definition (only 65535 allowed)") \
- T(TooManySpreads, \
- "Literal containing too many nested spreads (up to 65534 allowed)") \
- T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
- T(TooManyElementsInPromiseAll, "Too many elements passed to Promise.all") \
- T(TypedArrayTooShort, \
- "Derived TypedArray constructor created an array which was too small") \
- T(UnexpectedEOS, "Unexpected end of input") \
- T(UnexpectedReserved, "Unexpected reserved word") \
- T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
- T(UnexpectedSuper, "'super' keyword unexpected here") \
- T(UnexpectedNewTarget, "new.target expression is not allowed here") \
- T(UnexpectedTemplateString, "Unexpected template string") \
- T(UnexpectedToken, "Unexpected token %") \
- T(UnexpectedTokenIdentifier, "Unexpected identifier") \
- T(UnexpectedTokenNumber, "Unexpected number") \
- T(UnexpectedTokenString, "Unexpected string") \
- T(UnexpectedTokenRegExp, "Unexpected regular expression") \
- T(UnexpectedLexicalDeclaration, \
- "Lexical declaration cannot appear in a single-statement context") \
- T(UnknownLabel, "Undefined label '%'") \
- T(UnresolvableExport, \
- "The requested module '%' does not provide an export named '%'") \
- T(UnterminatedArgList, "missing ) after argument list") \
- T(UnterminatedRegExp, "Invalid regular expression: missing /") \
- T(UnterminatedTemplate, "Unterminated template literal") \
- T(UnterminatedTemplateExpr, "Missing } in template expression") \
- T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance") \
- T(InvalidHexEscapeSequence, "Invalid hexadecimal escape sequence") \
- T(InvalidUnicodeEscapeSequence, "Invalid Unicode escape sequence") \
- T(UndefinedUnicodeCodePoint, "Undefined Unicode code-point") \
- T(YieldInParameter, "Yield expression not allowed in formal parameter") \
- /* EvalError */ \
- T(CodeGenFromStrings, "%") \
- T(NoSideEffectDebugEvaluate, "Possible side-effect in debug-evaluate") \
- /* URIError */ \
- T(URIMalformed, "URI malformed") \
- /* Wasm errors (currently Error) */ \
- T(WasmTrapUnreachable, "unreachable") \
- T(WasmTrapMemOutOfBounds, "memory access out of bounds") \
- T(WasmTrapUnalignedAccess, "operation does not support unaligned accesses") \
- T(WasmTrapDivByZero, "divide by zero") \
- T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
- T(WasmTrapRemByZero, "remainder by zero") \
- T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
- T(WasmTrapFuncInvalid, "invalid index into function table") \
- T(WasmTrapFuncSigMismatch, "function signature mismatch") \
- T(WasmTrapTypeError, "wasm function signature contains illegal type") \
- T(WasmExceptionError, "wasm exception") \
- /* Asm.js validation related */ \
- T(AsmJsInvalid, "Invalid asm.js: %") \
- T(AsmJsCompiled, "Converted asm.js to WebAssembly: %") \
- T(AsmJsInstantiated, "Instantiated asm.js: %") \
- T(AsmJsLinkingFailed, "Linking failure in asm.js: %") \
- /* DataCloneError messages */ \
- T(DataCloneError, "% could not be cloned.") \
- T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
- T(DataCloneErrorNeuteredArrayBuffer, \
- "An ArrayBuffer is neutered and could not be cloned.") \
- T(DataCloneErrorSharedArrayBufferTransferred, \
- "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
- "transferred.") \
- T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
- T(DataCloneDeserializationVersionError, \
- "Unable to deserialize cloned data due to invalid or unsupported " \
- "version.") \
- /* Builtins-Trace Errors */ \
- T(TraceEventCategoryError, "Trace event category must be a string.") \
- T(TraceEventNameError, "Trace event name must be a string.") \
- T(TraceEventNameLengthError, \
- "Trace event name must not be an empty string.") \
- T(TraceEventPhaseError, "Trace event phase must be a number.") \
- T(TraceEventIDError, "Trace event id must be a number.")
-
-class MessageTemplate {
+class MessageFormatter {
public:
- enum Template {
-#define TEMPLATE(NAME, STRING) k##NAME,
- MESSAGE_TEMPLATES(TEMPLATE)
-#undef TEMPLATE
- kLastMessage
- };
-
- static const char* TemplateString(int template_index);
+ static const char* TemplateString(MessageTemplate index);
- static MaybeHandle<String> FormatMessage(Isolate* isolate, int template_index,
+ static MaybeHandle<String> FormatMessage(Isolate* isolate,
+ MessageTemplate index,
Handle<String> arg0,
Handle<String> arg1,
Handle<String> arg2);
- static Handle<String> FormatMessage(Isolate* isolate, int template_index,
+ static Handle<String> FormatMessage(Isolate* isolate, MessageTemplate index,
Handle<Object> arg);
};
@@ -801,9 +288,8 @@ class MessageHandler {
public:
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
- Isolate* isolate, MessageTemplate::Template type,
- const MessageLocation* location, Handle<Object> argument,
- Handle<FixedArray> stack_frames);
+ Isolate* isolate, MessageTemplate type, const MessageLocation* location,
+ Handle<Object> argument, Handle<FixedArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(Isolate* isolate, const MessageLocation* loc,
diff --git a/deps/v8/src/microtask-queue.cc b/deps/v8/src/microtask-queue.cc
new file mode 100644
index 0000000000..5010b0bc25
--- /dev/null
+++ b/deps/v8/src/microtask-queue.cc
@@ -0,0 +1,226 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/microtask-queue.h"
+
+#include <stddef.h>
+#include <algorithm>
+
+#include "src/api.h"
+#include "src/base/logging.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/objects/microtask-inl.h"
+#include "src/roots-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t MicrotaskQueue::kRingBufferOffset =
+ OFFSET_OF(MicrotaskQueue, ring_buffer_);
+const size_t MicrotaskQueue::kCapacityOffset =
+ OFFSET_OF(MicrotaskQueue, capacity_);
+const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_);
+const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_);
+
+const intptr_t MicrotaskQueue::kMinimumCapacity = 8;
+
+// static
+void MicrotaskQueue::SetUpDefaultMicrotaskQueue(Isolate* isolate) {
+ DCHECK_NULL(isolate->default_microtask_queue());
+
+ MicrotaskQueue* microtask_queue = new MicrotaskQueue;
+ microtask_queue->next_ = microtask_queue;
+ microtask_queue->prev_ = microtask_queue;
+ isolate->set_default_microtask_queue(microtask_queue);
+}
+
+// static
+std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate) {
+ DCHECK_NOT_NULL(isolate->default_microtask_queue());
+
+ std::unique_ptr<MicrotaskQueue> microtask_queue(new MicrotaskQueue);
+
+ // Insert the new instance to the next of last MicrotaskQueue instance.
+ MicrotaskQueue* last = isolate->default_microtask_queue()->prev_;
+ microtask_queue->next_ = last->next_;
+ microtask_queue->prev_ = last;
+ last->next_->prev_ = microtask_queue.get();
+ last->next_ = microtask_queue.get();
+
+ return microtask_queue;
+}
+
+MicrotaskQueue::MicrotaskQueue() = default;
+
+MicrotaskQueue::~MicrotaskQueue() {
+ if (next_ != this) {
+ DCHECK_NE(prev_, this);
+ next_->prev_ = prev_;
+ prev_->next_ = next_;
+ }
+ delete[] ring_buffer_;
+}
+
+// static
+Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
+ intptr_t microtask_queue_pointer,
+ Address raw_microtask) {
+ Microtask microtask = Microtask::cast(Object(raw_microtask));
+ reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer)
+ ->EnqueueMicrotask(microtask);
+ return ReadOnlyRoots(isolate).undefined_value().ptr();
+}
+
+void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
+ if (size_ == capacity_) {
+ // Keep the capacity of |ring_buffer_| power of 2, so that the JIT
+ // implementation can calculate the modulo easily.
+ intptr_t new_capacity = std::max(kMinimumCapacity, capacity_ << 1);
+ ResizeBuffer(new_capacity);
+ }
+
+ DCHECK_LT(size_, capacity_);
+ ring_buffer_[(start_ + size_) % capacity_] = microtask.ptr();
+ ++size_;
+}
+
+namespace {
+
+class SetIsRunningMicrotasks {
+ public:
+ explicit SetIsRunningMicrotasks(bool* flag) : flag_(flag) {
+ DCHECK(!*flag_);
+ *flag_ = true;
+ }
+
+ ~SetIsRunningMicrotasks() {
+ DCHECK(*flag_);
+ *flag_ = false;
+ }
+
+ private:
+ bool* flag_;
+};
+
+} // namespace
+
+int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
+ if (!size()) {
+ OnCompleted(isolate);
+ return 0;
+ }
+
+ HandleScope handle_scope(isolate);
+ MaybeHandle<Object> maybe_exception;
+
+ MaybeHandle<Object> maybe_result;
+
+ {
+ SetIsRunningMicrotasks scope(&is_running_microtasks_);
+ v8::Isolate::SuppressMicrotaskExecutionScope suppress(
+ reinterpret_cast<v8::Isolate*>(isolate));
+ HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
+ isolate->handle_scope_implementer());
+ TRACE_EVENT0("v8.execute", "RunMicrotasks");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
+ maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception);
+ }
+
+ // If execution is terminating, clean up and propagate that to TryCatch scope.
+ if (maybe_result.is_null() && maybe_exception.is_null()) {
+ delete[] ring_buffer_;
+ ring_buffer_ = nullptr;
+ capacity_ = 0;
+ size_ = 0;
+ start_ = 0;
+ isolate->SetTerminationOnExternalTryCatch();
+ OnCompleted(isolate);
+ return -1;
+ }
+ DCHECK_EQ(0, size());
+ OnCompleted(isolate);
+
+ // TODO(tzik): Return the number of microtasks run in this round.
+ return 0;
+}
+
+void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
+ if (size_) {
+ // Iterate pending Microtasks as root objects to avoid the write barrier for
+ // all single Microtask. If this hurts the GC performance, use a FixedArray.
+ visitor->VisitRootPointers(
+ Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_ + start_),
+ FullObjectSlot(ring_buffer_ + std::min(start_ + size_, capacity_)));
+ visitor->VisitRootPointers(
+ Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_),
+ FullObjectSlot(ring_buffer_ + std::max(start_ + size_ - capacity_,
+ static_cast<intptr_t>(0))));
+ }
+
+ if (capacity_ <= kMinimumCapacity) {
+ return;
+ }
+
+ intptr_t new_capacity = capacity_;
+ while (new_capacity > 2 * size_) {
+ new_capacity >>= 1;
+ }
+ new_capacity = std::max(new_capacity, kMinimumCapacity);
+ if (new_capacity < capacity_) {
+ ResizeBuffer(new_capacity);
+ }
+}
+
+void MicrotaskQueue::AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ auto pos = std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback);
+ if (pos != microtasks_completed_callbacks_.end()) return;
+ microtasks_completed_callbacks_.push_back(callback);
+}
+
+void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ auto pos = std::find(microtasks_completed_callbacks_.begin(),
+ microtasks_completed_callbacks_.end(), callback);
+ if (pos == microtasks_completed_callbacks_.end()) return;
+ microtasks_completed_callbacks_.erase(pos);
+}
+
+void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const {
+ std::vector<MicrotasksCompletedCallback> callbacks(
+ microtasks_completed_callbacks_);
+ for (auto& callback : callbacks) {
+ callback(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+}
+
+void MicrotaskQueue::OnCompleted(Isolate* isolate) {
+ // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
+ // set is still open (whether to clear it after every microtask or once
+ // during a microtask checkpoint). See also
+ // https://github.com/tc39/proposal-weakrefs/issues/39 .
+ isolate->heap()->ClearKeepDuringJobSet();
+
+ FireMicrotasksCompletedCallback(isolate);
+}
+
+void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) {
+ DCHECK_LE(size_, new_capacity);
+ Address* new_ring_buffer = new Address[new_capacity];
+ for (intptr_t i = 0; i < size_; ++i) {
+ new_ring_buffer[i] = ring_buffer_[(start_ + i) % capacity_];
+ }
+
+ delete[] ring_buffer_;
+ ring_buffer_ = new_ring_buffer;
+ capacity_ = new_capacity;
+ start_ = 0;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/microtask-queue.h b/deps/v8/src/microtask-queue.h
new file mode 100644
index 0000000000..c4db47ad46
--- /dev/null
+++ b/deps/v8/src/microtask-queue.h
@@ -0,0 +1,124 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MICROTASK_QUEUE_H_
+#define V8_MICROTASK_QUEUE_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include "include/v8-internal.h" // For Address.
+#include "include/v8.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Microtask;
+class Object;
+class RootVisitor;
+
+class V8_EXPORT_PRIVATE MicrotaskQueue {
+ public:
+ static void SetUpDefaultMicrotaskQueue(Isolate* isolate);
+ static std::unique_ptr<MicrotaskQueue> New(Isolate* isolate);
+
+ ~MicrotaskQueue();
+
+ // Uses raw Address values because it's called via ExternalReference.
+ // {raw_microtask} is a tagged Microtask pointer.
+ // Returns a tagged Object pointer.
+ static Address CallEnqueueMicrotask(Isolate* isolate,
+ intptr_t microtask_queue_pointer,
+ Address raw_microtask);
+
+ void EnqueueMicrotask(Microtask microtask);
+
+ // Returns -1 if the execution is terminating, otherwise, returns 0.
+ // TODO(tzik): Update the implementation to return the number of processed
+ // microtasks.
+ int RunMicrotasks(Isolate* isolate);
+
+ // Iterate all pending Microtasks in this queue as strong roots, so that
+ // builtins can update the queue directly without the write barrier.
+ void IterateMicrotasks(RootVisitor* visitor);
+
+ // Microtasks scope depth represents nested scopes controlling microtasks
+ // invocation, which happens when depth reaches zero.
+ void IncrementMicrotasksScopeDepth() { ++microtasks_depth_; }
+ void DecrementMicrotasksScopeDepth() { --microtasks_depth_; }
+ int GetMicrotasksScopeDepth() const { return microtasks_depth_; }
+
+ // Possibly nested microtasks suppression scopes prevent microtasks
+ // from running.
+ void IncrementMicrotasksSuppressions() { ++microtasks_suppressions_; }
+ void DecrementMicrotasksSuppressions() { --microtasks_suppressions_; }
+ bool HasMicrotasksSuppressions() const {
+ return microtasks_suppressions_ != 0;
+ }
+
+#ifdef DEBUG
+ // In debug we check that calls not intended to invoke microtasks are
+ // still correctly wrapped with microtask scopes.
+ void IncrementDebugMicrotasksScopeDepth() { ++debug_microtasks_depth_; }
+ void DecrementDebugMicrotasksScopeDepth() { --debug_microtasks_depth_; }
+ bool DebugMicrotasksScopeDepthIsZero() const {
+ return debug_microtasks_depth_ == 0;
+ }
+#endif
+
+ void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+ void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+ void FireMicrotasksCompletedCallback(Isolate* isolate) const;
+ bool IsRunningMicrotasks() const { return is_running_microtasks_; }
+
+ intptr_t capacity() const { return capacity_; }
+ intptr_t size() const { return size_; }
+ intptr_t start() const { return start_; }
+
+ MicrotaskQueue* next() const { return next_; }
+ MicrotaskQueue* prev() const { return prev_; }
+
+ static const size_t kRingBufferOffset;
+ static const size_t kCapacityOffset;
+ static const size_t kSizeOffset;
+ static const size_t kStartOffset;
+
+ static const intptr_t kMinimumCapacity;
+
+ private:
+ void OnCompleted(Isolate* isolate);
+
+ MicrotaskQueue();
+ void ResizeBuffer(intptr_t new_capacity);
+
+ // A ring buffer to hold Microtask instances.
+ // ring_buffer_[(start_ + i) % capacity_] contains |i|th Microtask for each
+ // |i| in [0, size_).
+ intptr_t size_ = 0;
+ intptr_t capacity_ = 0;
+ intptr_t start_ = 0;
+ Address* ring_buffer_ = nullptr;
+
+ // MicrotaskQueue instances form a doubly linked list loop, so that all
+ // instances are reachable through |next_|.
+ MicrotaskQueue* next_ = nullptr;
+ MicrotaskQueue* prev_ = nullptr;
+
+ int microtasks_depth_ = 0;
+ int microtasks_suppressions_ = 0;
+#ifdef DEBUG
+ int debug_microtasks_depth_ = 0;
+#endif
+
+ bool is_running_microtasks_ = false;
+ std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MICROTASK_QUEUE_H_
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index a0ca03f3cf..5cdd8808a6 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -70,19 +70,20 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else if (IsRelativeCodeTarget(rmode_)) {
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -121,16 +122,16 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
if (IsMipsArchVariant(kMips32r6)) {
// On R6 the address location is shifted by one instruction
set_target_address_at(
instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress, target);
} else {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress, target);
}
}
@@ -158,9 +159,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
// Encoded internal references are lui/ori load of 32-bit absolute address.
- Assembler::instr_at_put(pc + 0 * kInstrSize,
- instr1 | ((imm >> kLuiShift) & kImm16Mask));
- Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
+ PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
+ 1 * kInstrSize);
}
// Currently used only by deserializer, and all code will be flushed
@@ -169,59 +169,60 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ if (RelocInfo::IsInternalReferenceEncoded(mode)) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
} else {
- DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
+ DCHECK(RelocInfo::IsInternalReference(mode));
Memory<Address>(pc) = target;
}
}
-HeapObject* RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+HeapObject RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ }
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
- DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsExternalReference(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
- DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ DCHECK(IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_internal_reference() {
- if (rmode_ == INTERNAL_REFERENCE) {
+ if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are lui/ori or lui/jic load of 32-bit
// absolute address.
- DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReferenceEncoded(rmode_));
Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
@@ -230,15 +231,13 @@ Address RelocInfo::target_internal_reference() {
return static_cast<Address>(
Assembler::CreateTargetAddress(instr1, instr2));
}
- int32_t imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
- return static_cast<Address>(imm);
+ return static_cast<Address>(Assembler::GetLuiOriImmediate(instr1, instr2));
}
}
Address RelocInfo::target_internal_reference_address() {
- DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
@@ -274,21 +273,35 @@ void RelocInfo::WipeOut() {
}
}
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr1 = instr_at(pc);
+ Instr instr2 = instr_at(pc + kInstrSize);
+ DCHECK(IsLui(instr1));
+ DCHECK(IsOri(instr2) || IsNal(instr2));
+ DCHECK(IsNal(instr2) || IsNal(instr_at(pc - kInstrSize)));
+ if (IsNal(instr2)) {
+ instr2 = instr_at(pc + 2 * kInstrSize);
+ }
+ // Interpret 2 instructions generated by li (lui/ori).
+ int code_target_index = GetLuiOriImmediate(instr1, instr2);
+ return GetCodeTarget(code_target_index);
+}
+
template <typename ObjectVisitor>
void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Mode mode = rmode();
+ if (IsEmbeddedObject(mode)) {
visitor->VisitEmbeddedPointer(host(), this);
- } else if (RelocInfo::IsCodeTargetMode(mode)) {
+ } else if (IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ } else if (IsExternalReference(mode)) {
visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
- mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ } else if (IsInternalReference(mode) || IsInternalReferenceEncoded(mode)) {
visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ } else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
- } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ } else if (IsOffHeapTarget(mode)) {
visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index ee39c524f6..631ba8f9cb 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -38,9 +38,9 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/mips/assembler-mips-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/string-constants.h"
namespace v8 {
@@ -187,7 +187,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -201,23 +202,6 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -242,13 +226,6 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -275,17 +252,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
- case HeapObjectRequest::kCodeStub:
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
object = str->AllocateStringConstant(isolate);
break;
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
}
}
@@ -327,11 +300,11 @@ const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
scratch_register_list_(at.bit()) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -350,19 +323,24 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
+
+ int code_comments_size = WriteCodeComments();
+
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
@@ -684,6 +662,20 @@ bool Assembler::IsOri(Instr instr) {
return opcode == ORI;
}
+bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rd_field = GetRd(instr);
+ uint32_t rs_field = GetRs(instr);
+ uint32_t rt_field = GetRt(instr);
+ uint32_t sa_field = GetSaField(instr);
+ uint32_t rd_reg = static_cast<uint32_t>(rd.code());
+ uint32_t rs_reg = static_cast<uint32_t>(rs.code());
+ uint32_t rt_reg = static_cast<uint32_t>(rt.code());
+ uint32_t function_field = GetFunction(instr);
+ return opcode == SPECIAL && sa_field == 0 && function_field == ADDU &&
+ rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field;
+}
+
bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rd_field = GetRd(instr);
@@ -848,13 +840,42 @@ void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
}
+void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
+ Address offset_lui, Instr instr_ori,
+ Address offset_ori) {
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ instr_at_put(static_cast<int>(pc + offset_lui),
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(static_cast<int>(pc + offset_ori),
+ instr_ori | (imm & kImm16Mask));
+}
+
+void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui,
+ Address offset_lui, Instr instr_ori,
+ Address offset_ori) {
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask));
+}
+
+int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) {
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ int32_t imm;
+ imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ return imm;
+}
+
int Assembler::target_at(int pos, bool is_internal) {
Instr instr = instr_at(pos);
if (is_internal) {
if (instr == 0) {
return kEndOfChain;
} else {
- int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
int delta = static_cast<int>(instr_address - instr);
DCHECK(pos > delta);
return pos - delta;
@@ -877,10 +898,7 @@ int Assembler::target_at(int pos, bool is_internal) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * kInstrSize);
Instr instr_ori = instr_at(pos + 3 * kInstrSize);
- DCHECK(IsLui(instr_lui));
- DCHECK(IsOri(instr_ori));
- imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
@@ -892,10 +910,7 @@ int Assembler::target_at(int pos, bool is_internal) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 2 * kInstrSize);
- DCHECK(IsLui(instr_lui));
- DCHECK(IsOri(instr_ori));
- imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
@@ -909,15 +924,14 @@ int Assembler::target_at(int pos, bool is_internal) {
if (IsJicOrJialc(instr2)) {
imm = CreateTargetAddress(instr1, instr2);
} else {
- imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+ imm = GetLuiOriImmediate(instr1, instr2);
}
if (imm == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_start_ + pos);
int32_t delta = instr_address - imm;
DCHECK(pos > delta);
return pos - delta;
@@ -948,14 +962,14 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
Instr instr = instr_at(pos);
if (is_internal) {
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
instr_at_put(pos, imm);
return;
}
if ((instr & ~kImm16Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
+ // Make label relative to Code pointer of generated Code object.
instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
return;
}
@@ -982,8 +996,10 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
Instr instr_branch_delay;
if (IsJump(instr_j)) {
- instr_branch_delay = instr_at(pos + 6 * kInstrSize);
+ // Case when branch delay slot is protected.
+ instr_branch_delay = nopInstr;
} else {
+ // Case when branch delay slot is used.
instr_branch_delay = instr_at(pos + 7 * kInstrSize);
}
instr_at_put(pos + 0 * kInstrSize, instr_b);
@@ -995,9 +1011,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
- instr_at_put(pos + 2 * kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
+ PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori,
+ 3 * kInstrSize);
}
} else {
DCHECK(IsLui(instr));
@@ -1024,16 +1039,14 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
} else {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
-
- instr_at_put(pos + 0 * kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
+ PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori,
+ 2 * kInstrSize);
}
} else {
Instr instr1 = instr_at(pos + 0 * kInstrSize);
Instr instr2 = instr_at(pos + 1 * kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
DCHECK_EQ(imm & 3, 0);
DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
instr1 &= ~kImm16Mask;
@@ -1045,9 +1058,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
- instr_at_put(pos + 0 * kInstrSize,
- instr1 | ((imm & kHiMask) >> kLuiShift));
- instr_at_put(pos + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
+ PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2,
+ 1 * kInstrSize);
}
}
}
@@ -1501,7 +1513,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_start_) + target_pos;
DCHECK_EQ(imm & 3, 0);
return imm;
@@ -2335,14 +2347,16 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
}
}
-void Assembler::llwp(Register rd, Register rt, Register base) {
+void Assembler::llx(Register rd, const MemOperand& rs) {
DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL3, base, rt, rd, 1, LL_R6);
+ DCHECK(is_int9(rs.offset_));
+ GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6);
}
-void Assembler::scwp(Register rd, Register rt, Register base) {
+void Assembler::scx(Register rd, const MemOperand& rs) {
DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL3, base, rt, rd, 1, SC_R6);
+ DCHECK(is_int9(rs.offset_));
+ GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6);
}
void Assembler::lui(Register rd, int32_t j) {
@@ -3782,8 +3796,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
imm = CreateTargetAddress(instr1, instr2);
} else {
- imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+ imm = GetLuiOriImmediate(instr1, instr2);
}
if (imm == kEndOfJumpChain) {
@@ -3800,9 +3813,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
- instr_at_put(pc + 0 * kInstrSize,
- instr1 | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
+ PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
+ 1 * kInstrSize);
}
return 2; // Number of instructions patched.
} else {
@@ -3811,48 +3823,75 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ if (IsLui(instr)) {
+ Instr instr1 = instr_at(pc + 0 * kInstrSize);
+ Instr instr2 = instr_at(pc + 1 * kInstrSize);
+ Instr instr3 = instr_at(pc + 2 * kInstrSize);
+ int32_t imm;
+ Address ori_offset;
+ if (IsNal(instr2)) {
+ instr2 = instr3;
+ ori_offset = 2 * kInstrSize;
+ } else {
+ ori_offset = 1 * kInstrSize;
+ }
+ DCHECK(IsOri(instr2));
+ imm = GetLuiOriImmediate(instr1, instr2);
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2*buffer_size_;
+ if (imm == kEndOfJumpChain) {
+ return;
+ }
+ imm += pc_delta;
+ DCHECK_EQ(imm & 3, 0);
+ PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset);
+ return;
} else {
- desc.buffer_size = buffer_size_ + 1*MB;
+ UNREACHABLE();
}
+}
+
+void Assembler::GrowBuffer() {
+ // Compute new buffer size.
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
+ int pc_delta = new_start - buffer_start_;
+ int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
+ Vector<byte> instructions{buffer_start_, pc_offset()};
+ Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
rmode == RelocInfo::INTERNAL_REFERENCE) {
@@ -3885,7 +3924,7 @@ void Assembler::dd(Label* label) {
uint32_t data;
CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
- data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
+ data = reinterpret_cast<uint32_t>(buffer_start_ + label->pos());
} else {
data = jump_address(label);
unbound_labels_count_++;
@@ -3899,7 +3938,7 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
@@ -3948,20 +3987,11 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
nop();
} else {
- or_(t8, ra, zero_reg);
- nal(); // Read PC into ra register.
- lui(t9, 0); // Branch delay slot.
- ori(t9, t9, 0);
- addu(t9, ra, t9);
- // Instruction jr will take or_ from the next trampoline.
- // in its branch delay slot. This is the expected behavior
- // in order to decrease size of trampoline pool.
- or_(ra, t8, zero_reg);
- jr(t9);
+ GenPCRelativeJump(t8, t9, 0, RelocInfo::NONE,
+ BranchDelaySlot::PROTECT);
}
}
}
- nop();
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
@@ -3983,16 +4013,26 @@ void Assembler::CheckTrampolinePool() {
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
+ Instr instr3 = instr_at(pc + 2 * kInstrSize);
// Interpret 2 instructions generated by li (lui/ori) or optimized pairs
// lui/jic, aui/jic or lui/jialc.
if (IsLui(instr1)) {
if (IsOri(instr2)) {
+ Address target_address;
// Assemble the 32 bit value.
- return static_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
- GetImmediate16(instr2));
+ target_address = GetLuiOriImmediate(instr1, instr2);
+ if (IsAddu(instr3, t9, ra, t9)) {
+ target_address += pc + kRelativeJumpForBuiltinsOffset;
+ }
+ return target_address;
} else if (IsJicOrJialc(instr2)) {
// Assemble the 32 bit value.
return static_cast<Address>(CreateTargetAddress(instr1, instr2));
+ } else if (IsNal(instr2)) {
+ DCHECK(IsOri(instr3));
+ Address target_address;
+ target_address = GetLuiOriImmediate(instr1, instr3);
+ return target_address + pc + kRelativeCallForBuiltinsOffset;
}
}
@@ -4005,30 +4045,24 @@ Address Assembler::target_address_at(Address pc) {
// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
// OS::nan_value() returns a qNaN.
-void Assembler::QuietNaN(HeapObject* object) {
+void Assembler::QuietNaN(HeapObject object) {
HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
}
-
// On Mips, a target address is stored in a lui/ori instruction pair, each
// of which load 16 bits of the 32-bit address to a register.
// Patching the address must replace both instr, and flush the i-cache.
// On r6, target address is stored in a lui/jic pair, and both instr have to be
// patched.
-//
-// There is an optimization below, which emits a nop when the address
-// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
-// and possibly removed.
void Assembler::set_target_value_at(Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode) {
+ Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- uint32_t rt_code = GetRtField(instr2);
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
#ifdef DEBUG
// Check we have the result from a li macro-instruction, using instr pair.
- Instr instr1 = instr_at(pc);
- DCHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
+ DCHECK(IsLui(instr1) &&
+ (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2)));
#endif
if (IsJicOrJialc(instr2)) {
@@ -4036,18 +4070,37 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
uint32_t lui_offset, jic_offset;
Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
- *p &= ~kImm16Mask;
- *(p + 1) &= ~kImm16Mask;
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
- *p |= lui_offset;
- *(p + 1) |= jic_offset;
+ instr1 |= lui_offset;
+ instr2 |= jic_offset;
+ instr_at_put(pc, instr1);
+ instr_at_put(pc + kInstrSize, instr2);
} else {
+ Instr instr3 = instr_at(pc + 2 * kInstrSize);
+ // If we are using relative calls/jumps for builtins.
+ if (IsNal(instr2)) {
+ target -= pc + kRelativeCallForBuiltinsOffset;
+ }
+ if (IsAddu(instr3, t9, ra, t9)) {
+ target -= pc + kRelativeJumpForBuiltinsOffset;
+ }
// Must use 2 instructions to insure patchable code => just use lui and ori.
// lui rt, upper-16.
// ori rt rt, lower-16.
- *p = LUI | rt_code | ((target & kHiMask) >> kLuiShift);
- *(p + 1) = ORI | rt_code | (rt_code << 5) | (target & kImm16Mask);
+ if (IsNal(instr2)) {
+ instr1 &= ~kImm16Mask;
+ instr3 &= ~kImm16Mask;
+ PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3,
+ 2 * kInstrSize);
+ } else {
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
+ PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2,
+ 1 * kInstrSize);
+ }
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
@@ -4055,6 +4108,44 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
}
}
+void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
+ RelocInfo::Mode rmode,
+ BranchDelaySlot bdslot) {
+ // Order of these instructions is relied upon when patching them
+ // or when changing imm32 that lui/ori pair loads.
+ or_(tf, ra, zero_reg);
+ nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode);
+ }
+ lui(ts, (imm32 & kHiMask) >> kLuiShift);
+ ori(ts, ts, (imm32 & kImm16Mask));
+ addu(ts, ra, ts);
+ if (bdslot == USE_DELAY_SLOT) {
+ or_(ra, tf, zero_reg);
+ }
+ jr(ts);
+ if (bdslot == PROTECT) {
+ or_(ra, tf, zero_reg);
+ }
+}
+
+void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
+ RelocInfo::Mode rmode,
+ BranchDelaySlot bdslot) {
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode);
+ }
+ // Order of these instructions is relied upon when patching them
+ // or when changing imm32 that lui/ori pair loads.
+ lui(t, (imm32 & kHiMask) >> kLuiShift);
+ nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
+ ori(t, t, (imm32 & kImm16Mask));
+ addu(t, ra, t);
+ jalr(t);
+ if (bdslot == PROTECT) nop();
+}
+
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
: available_(assembler->GetScratchRegisterList()),
old_available_(*available_) {}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index d535f1e923..21409f9bf4 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -41,344 +41,17 @@
#include <set>
#include "src/assembler.h"
+#include "src/external-reference.h"
+#include "src/label.h"
#include "src/mips/constants-mips.h"
+#include "src/mips/register-mips.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
- V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \
- V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
- V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(a0) V(a1) V(a2) V(a3) \
- V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \
- V(v0) V(v1)
-
-#define DOUBLE_REGISTERS(V) \
- V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
- V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
- V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
- V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS(V) \
- V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
- V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
- V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
- V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
- V(f16) V(f18) V(f20) V(f22) V(f24)
-// clang-format on
-
-// Register lists.
-// Note that the bit values must match those used in actual instruction
-// encoding.
-const int kNumRegs = 32;
-
-const RegList kJSCallerSaved = 1 << 2 | // v0
- 1 << 3 | // v1
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // t0
- 1 << 9 | // t1
- 1 << 10 | // t2
- 1 << 11 | // t3
- 1 << 12 | // t4
- 1 << 13 | // t5
- 1 << 14 | // t6
- 1 << 15; // t7
-
-const int kNumJSCallerSaved = 14;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 16 | // s0
- 1 << 17 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 30; // fp/s8
-
-const int kNumCalleeSaved = 9;
-
-const RegList kCalleeSavedFPU = 1 << 20 | // f20
- 1 << 22 | // f22
- 1 << 24 | // f24
- 1 << 26 | // f26
- 1 << 28 | // f28
- 1 << 30; // f30
-
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU = 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 24;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-const int kUndefIndex = -1;
-// Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
- kUndefIndex, // at
- 0, // v0
- 1, // v1
- 2, // a0
- 3, // a1
- 4, // a2
- 5, // a3
- 6, // t0
- 7, // t1
- 8, // t2
- 9, // t3
- 10, // t4
- 11, // t5
- 12, // t6
- 13, // t7
- 14, // s0
- 15, // s1
- 16, // s2
- 17, // s3
- 18, // s4
- 19, // s5
- 20, // s6
- 21, // s7
- kUndefIndex, // t8
- kUndefIndex, // t9
- kUndefIndex, // k0
- kUndefIndex, // k1
- kUndefIndex, // gp
- kUndefIndex, // sp
- 22, // fp
- kUndefIndex};
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister.
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- static constexpr int kMantissaOffset = 0;
- static constexpr int kExponentOffset = 4;
-#elif defined(V8_TARGET_BIG_ENDIAN)
- static constexpr int kMantissaOffset = 4;
- static constexpr int kExponentOffset = 0;
-#else
-#error Unknown endianness
-#endif
-
- private:
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-// s7: context register
-// s3: scratch register
-// s4: scratch register 2
-#define DECLARE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-int ToNumber(Register reg);
-
-Register ToRegister(int num);
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Coprocessor register.
-class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
- public:
- FPURegister low() const {
- // Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
- return FPURegister::from_code(code());
- }
- FPURegister high() const {
- // Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
- return FPURegister::from_code(code() + 1);
- }
-
- private:
- friend class RegisterBase;
- explicit constexpr FPURegister(int code) : RegisterBase(code) {}
-};
-
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
-
-// A few double registers are reserved: one as a scratch register and one to
-// hold 0.0.
-// f28: 0.0
-// f30: scratch register.
-
-// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
-// 32-bit registers, f0 through f31. When used as 'double' they are used
-// in pairs, starting with the even numbered register. So a double operation
-// on f0 really uses f0 and f1.
-// (Modern mips hardware also supports 32 64-bit registers, via setting
-// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
-// but it is not in common use. Someday we will want to support this in v8.)
-
-// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister FloatRegister;
-
-typedef FPURegister DoubleRegister;
-
-#define DECLARE_DOUBLE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
-#undef DECLARE_DOUBLE_REGISTER
-
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-// SIMD registers.
-typedef MSARegister Simd128Register;
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
-
-const Simd128Register no_msareg = Simd128Register::no_reg();
-
-// Register aliases.
-// cp is assumed to be a callee saved register.
-constexpr Register kRootRegister = s6;
-constexpr Register cp = s7;
-constexpr Register kScratchReg = s3;
-constexpr Register kScratchReg2 = s4;
-constexpr DoubleRegister kScratchDoubleReg = f30;
-constexpr DoubleRegister kDoubleRegZero = f28;
-// Used on mips32r6 for compare operations.
-constexpr DoubleRegister kDoubleCompareReg = f26;
-// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
-constexpr Simd128Register kSimd128RegZero = w28;
-constexpr Simd128Register kSimd128ScratchReg = w30;
-
-// FPU (coprocessor 1) control registers.
-// Currently only FCSR (#31) is implemented.
-struct FPUControlRegister {
- bool is_valid() const { return reg_code == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
-constexpr FPUControlRegister FCSR = {kFCSRRegister};
-
-// MSA control registers
-struct MSAControlRegister {
- bool is_valid() const {
- return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister);
- }
- bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
-constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
-constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT };
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -397,16 +70,12 @@ class Operand {
value_.immediate = static_cast<int32_t>(f.address());
}
V8_INLINE explicit Operand(const char* s);
- V8_INLINE explicit Operand(Object** opp);
- V8_INLINE explicit Operand(Context** cpp);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value)
- : rm_(no_reg), rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
- static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Register.
@@ -484,15 +153,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -590,13 +254,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- static void QuietNaN(HeapObject* nan);
+ static void QuietNaN(HeapObject nan);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -612,14 +276,22 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
- // branches
+ // branches. It is distance between address of the first instruction in
+ // the jump sequence, and the value that ra gets after calling nal().
static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
- // Adjust ra register in branch delay slot of bal instruction so to skip
+ // Adjust ra register in branch delay slot of bal instruction in order to skip
// instructions not needed after optimization of PIC in
// TurboAssembler::BranchAndLink method.
+ static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 3 * kInstrSize;
- static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
+ // Offset of target relative address in calls/jumps for builtins. It is
+ // distance between instruction that is placed just after calling
+ // RecordRelocInfo, and the value that ra gets aftr calling nal().
+ static constexpr int kRelativeJumpForBuiltinsOffset = 1 * kInstrSize;
+ // Relative target address of jumps for builtins when we use lui, ori, dsll,
+ // ori sequence when loading address that cannot fit into 32 bits.
+ static constexpr int kRelativeCallForBuiltinsOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@@ -644,10 +316,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
#endif
- // Difference between address of current opcode and value read from pc
- // register.
- static constexpr int kPcLoadDelta = 4;
-
// Max offset for instructions with 16-bit offset field
static constexpr int kMaxBranchOffset = (1 << (18 - 1)) - 1;
@@ -883,8 +551,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void ll(Register rd, const MemOperand& rs);
void sc(Register rd, const MemOperand& rs);
- void llwp(Register rd, Register rt, Register base);
- void scwp(Register rd, Register rt, Register base);
+ void llx(Register rd, const MemOperand& rs);
+ void scx(Register rd, const MemOperand& rs);
// ---------PC-Relative-instructions-----------
@@ -1721,10 +1389,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
};
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1733,6 +1397,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1758,9 +1425,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static void instr_at_put(Address pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
// Check if an instruction is a branch of some kind.
@@ -1782,6 +1451,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsJ(Instr instr);
static bool IsLui(Instr instr);
static bool IsOri(Instr instr);
+ static bool IsAddu(Instr instr, Register rd, Register rs, Register rt);
static bool IsJal(Instr instr);
static bool IsJr(Instr instr);
@@ -1836,18 +1506,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckTrampolinePool();
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
static bool IsCompactBranchSupported() {
return IsMipsArchVariant(kMips32r6);
}
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
inline int UnboundLabelsCount() { return unbound_labels_count_; }
protected:
@@ -1881,6 +1548,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Read 32-bit immediate from lui, ori pair that is used to load immediate.
+ static int32_t GetLuiOriImmediate(Instr instr1, Instr instr2);
+
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
if (no_trampoline_pool_before_ < pc_offset)
@@ -1941,6 +1611,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RegList scratch_register_list_;
+ // Generate common instruction sequence.
+ void GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
+ RelocInfo::Mode rmode, BranchDelaySlot bdslot);
+ void GenPCRelativeJumpAndLink(Register t, int32_t imm32,
+ RelocInfo::Mode rmode, BranchDelaySlot bdslot);
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -2129,6 +1805,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L, bool is_internal);
+ // Patching lui/ori pair which is commonly used for loading constants.
+ static void PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr1,
+ Address offset_lui, Instr instr2,
+ Address offset_ori);
+ void PatchLuiOriImmediate(int pc, int32_t imm, Instr instr1,
+ Address offset_lui, Instr instr2,
+ Address offset_ori);
+
// One trampoline consists of:
// - space for trampoline slots,
// - space for labels.
@@ -2208,6 +1892,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
@@ -2232,6 +1918,7 @@ class UseScratchRegisterScope {
RegList old_available_;
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
deleted file mode 100644
index 1650458d19..0000000000
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/api-arguments-inl.h"
-#include "src/base/bits.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-#include "src/mips/code-stubs-mips.h" // Cannot be the first include.
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
- Isolate* isolate = masm->isolate();
-
- {
- NoRootArrayScope no_root_array(masm);
-
- // Registers:
- // a0: entry address
- // a1: function
- // a2: receiver
- // a3: argc
- //
- // Stack:
- // 4 args slots
- // args
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
-
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
-
- __ InitializeRootRegister();
- }
-
- // Load argv in s0 register.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
-
- __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
-
- // We build an EntryFrame.
- __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- StackFrame::Type marker = type();
- __ li(t2, Operand(StackFrame::TypeToMarker(marker)));
- __ li(t1, Operand(StackFrame::TypeToMarker(marker)));
- __ li(t0,
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
- __ lw(t0, MemOperand(t0));
- __ Push(t3, t2, t1, t0);
- // Set up frame pointer for the frame to be pushed.
- __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xFF...F) |
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate);
- __ li(t1, js_entry_sp);
- __ lw(t2, MemOperand(t1));
- __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
- __ sw(fp, MemOperand(t1));
- __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont);
- __ nop(); // Branch delay slot nop.
- __ bind(&non_outermost_js);
- __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
- __ push(t0);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ li(t0, ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate));
- __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ LoadRoot(v0, RootIndex::kException);
- __ b(&exit); // b exposes branch delay slot.
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit); // v0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(t1);
- __ Branch(&non_outermost_js_2, ne, t1,
- Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ li(t1, ExternalReference(js_entry_sp));
- __ sw(zero_reg, MemOperand(t1));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(t1);
- __ li(t0,
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
- __ sw(t1, MemOperand(t0));
-
- // Reset the stack to the callee saved registers.
- __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
-
- // Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
- // Return.
- __ Jump(ra);
-}
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Make place for arguments to fit C calling convention. Most of the callers
- // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
- // so they handle stack restoring and we don't have to do that here.
- // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
- // kCArgsSlotsSize stack space after the call.
- __ Subu(sp, sp, Operand(kCArgsSlotsSize));
- // Place the return address on the stack, making the call
- // GC safe. The RegExp backend also relies on this.
- __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
- __ Call(t9); // Call the C++ function.
- __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
-
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- // In case of an error the return address may point to a memory area
- // filled with kZapValue by the GC.
- // Dereference the address and check for this.
- __ lw(t0, MemOperand(t9));
- __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
- Operand(reinterpret_cast<uint32_t>(kZapValue)));
- }
- __ Jump(t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into kScratchReg instead of t9.
- __ Move(t9, target);
- __ IndirectLoadConstant(kScratchReg, GetCode());
- __ Call(kScratchReg, Code::kHeaderSize - kHeapObjectTag);
- return;
- }
- }
- intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode().location());
- __ Move(t9, target);
- __ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- __ Call(kScratchReg);
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- tasm->push(ra);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->pop(ra);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- __ push(ra);
- __ CallStub(&stub);
- __ pop(ra);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push ra" instruction, followed by a call.
- // Note: on MIPS "push" is 2 instruction
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
-
- // This should contain all kJSCallerSaved registers.
- const RegList kSavedRegs =
- kJSCallerSaved | // Caller saved registers.
- s5.bit(); // Saved stack pointer.
-
- // We also save ra, so the count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
-
- // Save all caller-save registers as this may be called from anywhere.
- __ MultiPush(kSavedRegs | ra.bit());
-
- // Compute the function's address for the first argument.
- __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(s5, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ And(sp, sp, Operand(-frame_alignment));
- }
- __ Subu(sp, sp, kCArgsSlotsSize);
-#if defined(V8_HOST_ARCH_MIPS)
- int32_t entry_hook =
- reinterpret_cast<int32_t>(isolate()->function_entry_hook());
- __ li(t9, Operand(entry_hook));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter.
- __ li(a2, ExternalReference::isolate_address(isolate()));
-
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(t9, ExternalReference::Create(&dispatcher,
- ExternalReference::BUILTIN_CALL));
-#endif
- // Call C function through t9 to conform ABI for PIC.
- __ Call(t9);
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, s5);
- } else {
- __ Addu(sp, sp, kCArgsSlotsSize);
- }
-
- // Also pop ra to get Ret(0).
- __ MultiPop(kSavedRegs | ra.bit());
- __ Ret();
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int32_t stack_space_offset,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- DCHECK(function_address == a1 || function_address == a2);
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ li(t9, ExternalReference::is_profiling_address(isolate));
- __ lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
- __ bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- __ li(s5, next_address);
- __ lw(s0, MemOperand(s5, kNextOffset));
- __ lw(s1, MemOperand(s5, kLimitOffset));
- __ lw(s2, MemOperand(s5, kLevelOffset));
- __ Addu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s5, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, t9);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- __ lw(v0, return_value_operand);
- __ bind(&return_value_loaded);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ sw(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
- __ lw(a1, MemOperand(s5, kLevelOffset));
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
- Operand(s2));
- }
- __ Subu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s5, kLevelOffset));
- __ lw(kScratchReg, MemOperand(s5, kLimitOffset));
- __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
-
- if (stack_space_offset != kInvalidStackOffset) {
- // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
- // so this must be accounted for.
- __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
- } else {
- __ li(s0, Operand(stack_space));
- }
- __ LeaveExitFrame(false, s0, NO_EMIT_RETURN,
- stack_space_offset != kInvalidStackOffset);
-
- // Check if the function scheduled an exception.
- __ LoadRoot(t0, RootIndex::kTheHoleValue);
- __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
- __ lw(t1, MemOperand(kScratchReg));
- __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
-
- __ Ret();
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ sw(s1, MemOperand(s5, kLimitOffset));
- __ mov(s0, v0);
- __ mov(a0, v0);
- __ PrepareCallCFunction(1, s1);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ mov(v0, s0);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- t0 : call_data
- // -- a2 : holder
- // -- a1 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- Register call_data = t0;
- Register holder = a2;
- Register api_function_address = a1;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data.
- __ Push(call_data);
-
- Register scratch = call_data;
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- // Push return value and default return value.
- __ Push(scratch, scratch);
- __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
- // Push isolate and holder.
- __ Push(scratch, holder);
-
- // Prepare arguments.
- __ mov(scratch, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != a0 && scratch != a0);
- // a0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ Addu(a0, sp, Operand(1 * kPointerSize));
- // FunctionCallbackInfo::implicit_args_
- __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Addu(kScratchReg, scratch,
- Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ sw(kScratchReg, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(kScratchReg, Operand(argc()));
- __ sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument.
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- // TODO(adamk): Why are we clobbering this immediately?
- const int32_t stack_space_offset = kInvalidStackOffset;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_offset, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = t0;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = a2;
-
- // Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
- __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
- kPointerSize));
- __ li(scratch, ExternalReference::isolate_address(isolate()));
- __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
- // should_throw_on_error -> false
- DCHECK_NULL(Smi::kZero);
- __ sw(zero_reg,
- MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ sw(a1, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ lw(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, kInvalidStackOffset,
- return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
deleted file mode 100644
index 95a253c20e..0000000000
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MIPS_CODE_STUBS_MIPS_H_
-#define V8_MIPS_CODE_STUBS_MIPS_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_MIPS_CODE_STUBS_MIPS_H_
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
deleted file mode 100644
index 86546668db..0000000000
--- a/deps/v8/src/mips/codegen-mips.cc
+++ /dev/null
@@ -1,587 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include <memory>
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/mips/simulator-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-#if defined(V8_HOST_ARCH_MIPS)
-
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
- defined(_MIPS_ARCH_MIPS32RX)
- return stub;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- // This code assumes that cache lines are 32 bytes and if the cache line is
- // larger it will not work correctly.
- {
- Label lastb, unaligned, aligned, chkw,
- loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
- leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
- ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
-
- // The size of each prefetch.
- uint32_t pref_chunk = 32;
- // The maximum size of a prefetch, it must not be less than pref_chunk.
- // If the real size of a prefetch is greater than max_pref_size and
- // the kPrefHintPrepareForStore hint is used, the code will not work
- // correctly.
- uint32_t max_pref_size = 128;
- DCHECK(pref_chunk < max_pref_size);
-
- // pref_limit is set based on the fact that we never use an offset
- // greater then 5 on a store pref and that a single pref can
- // never be larger then max_pref_size.
- uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
- int32_t pref_hint_load = kPrefHintLoadStreamed;
- int32_t pref_hint_store = kPrefHintPrepareForStore;
- uint32_t loadstore_chunk = 4;
-
- // The initial prefetches may fetch bytes that are before the buffer being
- // copied. Start copies with an offset of 4 so avoid this situation when
- // using kPrefHintPrepareForStore.
- DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
- pref_chunk * 4 >= max_pref_size);
-
- // If the size is less than 8, go to lastb. Regardless of size,
- // copy dst pointer to v0 for the retuen value.
- __ slti(t2, a2, 2 * loadstore_chunk);
- __ bne(t2, zero_reg, &lastb);
- __ mov(v0, a0); // In delay slot.
-
- // If src and dst have different alignments, go to unaligned, if they
- // have the same alignment (but are not actually aligned) do a partial
- // load/store to make them aligned. If they are both already aligned
- // we can start copying at aligned.
- __ xor_(t8, a1, a0);
- __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
- __ bne(t8, zero_reg, &unaligned);
- __ subu(a3, zero_reg, a0); // In delay slot.
-
- __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
- __ beq(a3, zero_reg, &aligned); // Already aligned.
- __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
-
- if (kArchEndian == kLittle) {
- __ lwr(t8, MemOperand(a1));
- __ addu(a1, a1, a3);
- __ swr(t8, MemOperand(a0));
- __ addu(a0, a0, a3);
- } else {
- __ lwl(t8, MemOperand(a1));
- __ addu(a1, a1, a3);
- __ swl(t8, MemOperand(a0));
- __ addu(a0, a0, a3);
- }
- // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
- // count how many bytes we have to copy after all the 64 byte chunks are
- // copied and a3 to the dst pointer after all the 64 byte chunks have been
- // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
- __ bind(&aligned);
- __ andi(t8, a2, 0x3F);
- __ beq(a2, t8, &chkw); // Less than 64?
- __ subu(a3, a2, t8); // In delay slot.
- __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
-
- // When in the loop we prefetch with kPrefHintPrepareForStore hint,
- // in this case the a0+x should be past the "t0-32" address. This means:
- // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
- // x=64 the last "safe" a0 address is "t0-96". In the current version we
- // will use "pref hint, 128(a0)", so "t0-160" is the limit.
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ addu(t0, a0, a2); // t0 is the "past the end" address.
- __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
- }
-
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
-
- if (pref_hint_store != kPrefHintPrepareForStore) {
- __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
- }
- __ bind(&loop16w);
- __ lw(t0, MemOperand(a1));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
- __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
- }
- __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&skip_pref);
- __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
-
- __ sw(t0, MemOperand(a0));
- __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
-
- __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
- __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
- __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
- __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
- __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
-
- __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
- __ addiu(a0, a0, 16 * loadstore_chunk);
- __ bne(a0, a3, &loop16w);
- __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
- __ mov(a2, t8);
-
- // Here we have src and dest word-aligned but less than 64-bytes to go.
- // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
- // down to chk1w to handle the tail end of the copy.
- __ bind(&chkw);
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1F);
- __ beq(a2, t8, &chk1w); // Less than 32?
- __ nop(); // In delay slot.
- __ lw(t0, MemOperand(a1));
- __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
- __ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(t0, MemOperand(a0));
- __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
- __ addiu(a0, a0, 8 * loadstore_chunk);
-
- // Here we have less than 32 bytes to copy. Set up for a loop to copy
- // one word at a time. Set a2 to count how many bytes we have to copy
- // after all the word chunks are copied and a3 to the dst pointer after
- // all the word chunks have been copied. We will loop, incrementing a0
- // and a1 until a0 equals a3.
- __ bind(&chk1w);
- __ andi(a2, t8, loadstore_chunk - 1);
- __ beq(a2, t8, &lastb);
- __ subu(a3, t8, a2); // In delay slot.
- __ addu(a3, a0, a3);
-
- __ bind(&wordCopy_loop);
- __ lw(t3, MemOperand(a1));
- __ addiu(a0, a0, loadstore_chunk);
- __ addiu(a1, a1, loadstore_chunk);
- __ bne(a0, a3, &wordCopy_loop);
- __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
-
- __ bind(&lastb);
- __ Branch(&leave, le, a2, Operand(zero_reg));
- __ addu(a3, a0, a2);
-
- __ bind(&lastbloop);
- __ lb(v1, MemOperand(a1));
- __ addiu(a0, a0, 1);
- __ addiu(a1, a1, 1);
- __ bne(a0, a3, &lastbloop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
-
- __ bind(&leave);
- __ jr(ra);
- __ nop();
-
- // Unaligned case. Only the dst gets aligned so we need to do partial
- // loads of the source followed by normal stores to the dst (once we
- // have aligned the destination).
- __ bind(&unaligned);
- __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
- __ beq(a3, zero_reg, &ua_chk16w);
- __ subu(a2, a2, a3); // In delay slot.
-
- if (kArchEndian == kLittle) {
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ addu(a1, a1, a3);
- __ swr(v1, MemOperand(a0));
- __ addu(a0, a0, a3);
- } else {
- __ lwl(v1, MemOperand(a1));
- __ lwr(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ addu(a1, a1, a3);
- __ swl(v1, MemOperand(a0));
- __ addu(a0, a0, a3);
- }
-
- // Now the dst (but not the source) is aligned. Set a2 to count how many
- // bytes we have to copy after all the 64 byte chunks are copied and a3 to
- // the dst pointer after all the 64 byte chunks have been copied. We will
- // loop, incrementing a0 and a1 until a0 equals a3.
- __ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3F);
- __ beq(a2, t8, &ua_chkw);
- __ subu(a3, a2, t8); // In delay slot.
- __ addu(a3, a0, a3);
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ addu(t0, a0, a2);
- __ Subu(t9, t0, pref_limit);
- }
-
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
-
- if (pref_hint_store != kPrefHintPrepareForStore) {
- __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
- }
-
- __ bind(&ua_loop16w);
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
- if (kArchEndian == kLittle) {
- __ lwr(t0, MemOperand(a1));
- __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0);
- __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
- }
- __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&ua_skip_pref);
- __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(t0, MemOperand(a1));
- __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0);
- __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
- }
- __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&ua_skip_pref);
- __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
- __ lwr(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ sw(t0, MemOperand(a0));
- __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
- if (kArchEndian == kLittle) {
- __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
- __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
- __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
- __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
- __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
- __ lwl(t0,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t4,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t5,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t6,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t7,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
- __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
- __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
- __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
- __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
- __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
- __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
- __ lwr(t0,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t4,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t5,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t6,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t7,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
- __ addiu(a0, a0, 16 * loadstore_chunk);
- __ bne(a0, a3, &ua_loop16w);
- __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
- __ mov(a2, t8);
-
- // Here less than 64-bytes. Check for
- // a 32 byte chunk and copy if there is one. Otherwise jump down to
- // ua_chk1w to handle the tail end of the copy.
- __ bind(&ua_chkw);
- __ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1F);
-
- __ beq(a2, t8, &ua_chk1w);
- __ nop(); // In delay slot.
- if (kArchEndian == kLittle) {
- __ lwr(t0, MemOperand(a1));
- __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
- __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(t0, MemOperand(a1));
- __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
- __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
- __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
- __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
- __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
- __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
- __ lwr(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(t0, MemOperand(a0));
- __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
- __ addiu(a0, a0, 8 * loadstore_chunk);
-
- // Less than 32 bytes to copy. Set up for a loop to
- // copy one word at a time.
- __ bind(&ua_chk1w);
- __ andi(a2, t8, loadstore_chunk - 1);
- __ beq(a2, t8, &ua_smallCopy);
- __ subu(a3, t8, a2); // In delay slot.
- __ addu(a3, a0, a3);
-
- __ bind(&ua_wordCopy_loop);
- if (kArchEndian == kLittle) {
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(v1, MemOperand(a1));
- __ lwr(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ addiu(a0, a0, loadstore_chunk);
- __ addiu(a1, a1, loadstore_chunk);
- __ bne(a0, a3, &ua_wordCopy_loop);
- __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
-
- // Copy the last 8 bytes.
- __ bind(&ua_smallCopy);
- __ beq(a2, zero_reg, &leave);
- __ addu(a3, a0, a2); // In delay slot.
-
- __ bind(&ua_smallCopy_loop);
- __ lb(v1, MemOperand(a1));
- __ addiu(a0, a0, 1);
- __ addiu(a1, a1, 1);
- __ bne(a0, a3, &ua_smallCopy_loop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
-
- __ jr(ra);
- __ nop();
- }
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<MemCopyUint8Function>(buffer);
-#endif
-}
-#endif
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- __ MovFromFloatParameter(f12);
- __ sqrt_d(f0, f12);
- __ MovToFloatResult(f0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 54eb0a6eb0..e1a3f2bb38 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -4,7 +4,7 @@
#ifndef V8_MIPS_CONSTANTS_MIPS_H_
#define V8_MIPS_CONSTANTS_MIPS_H_
-#include "src/globals.h"
+#include "src/cpu-features.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
@@ -145,8 +145,7 @@ const uint32_t kLeastSignificantByteInInt32Offset = 3;
namespace v8 {
namespace internal {
-// TODO(sigurds): Change this value once we use relative jumps.
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -1685,9 +1684,10 @@ class Instruction : public InstructionGetters<InstructionBase> {
// C/C++ argument slots size.
const int kCArgSlotCount = 4;
const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize;
-const int kInvalidStackOffset = -1;
+
// JS argument slots size.
const int kJSArgsSlotsSize = 0 * kInstrSize;
+
// Assembly builtins argument slots size.
const int kBArgsSlotsSize = 0 * kInstrSize;
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 2e71817bd8..a8feba60db 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -13,10 +13,7 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
-
-#include "src/simulator.h" // For cache flushing.
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 58fd212f78..33e517a21d 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-
-#define __ masm()->
-
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
@@ -57,22 +57,20 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ sw(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+ // Get the bailout id is passed as kRootRegister by the caller.
+ __ mov(a2, kRootRegister);
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
__ mov(a3, ra);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
__ Subu(t0, fp, t0);
// Allocate a new deoptimizer object.
@@ -84,15 +82,15 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(a1, &context_check);
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind())));
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
+ __ li(t1, Operand(ExternalReference::isolate_address(isolate)));
__ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -138,8 +136,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ swc1(f0, MemOperand(a1, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ // Remove the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
@@ -166,7 +164,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, a1);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
@@ -226,15 +224,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ InitializeRootRegister();
-
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
__ stop("Unreachable.");
}
-
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS32R6
const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
@@ -242,72 +237,6 @@ const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- Label table_start, done, trampoline_jump;
- __ bind(&table_start);
-
-#ifdef _MIPS_ARCH_MIPS32R6
- int kMaxEntriesBranchReach =
- (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
-#else
- int kMaxEntriesBranchReach =
- (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
-#endif
-
- if (count() <= kMaxEntriesBranchReach) {
- // Common case.
- for (int i = 0; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- if (IsMipsArchVariant(kMips32r6)) {
- __ li(kScratchReg, i);
- __ BranchShort(PROTECT, &done);
- } else {
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(kScratchReg, i); // In the delay slot.
- __ nop();
- }
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
-
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
- __ bind(&done);
- __ Push(kScratchReg);
- } else {
- DCHECK(!IsMipsArchVariant(kMips32r6));
- // Uncommon case, the branch cannot reach.
- // Create mini trampoline to reach the end of the table
- for (int i = 0, j = 0; i < count(); i++, j++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- if (j >= kMaxEntriesBranchReach) {
- j = 0;
- __ li(kScratchReg, i);
- __ bind(&trampoline_jump);
- trampoline_jump = Label();
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
- __ nop();
- } else {
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(kScratchReg, i); // In the delay slot.
- __ nop();
- }
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
-
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
- __ bind(&trampoline_jump);
- __ Push(kScratchReg);
- }
-}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index e7ec95b7ac..e83d56aa5b 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1686,7 +1686,11 @@ void Decoder::DecodeTypeImmediateSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case LL_R6: {
if (IsMipsArchVariant(kMips32r6)) {
- Format(instr, "ll 'rt, 'imm9s('rs)");
+ if (instr->Bit(6)) {
+ Format(instr, "llx 'rt, 'imm9s('rs)");
+ } else {
+ Format(instr, "ll 'rt, 'imm9s('rs)");
+ }
} else {
Unknown(instr);
}
@@ -1694,7 +1698,11 @@ void Decoder::DecodeTypeImmediateSPECIAL3(Instruction* instr) {
}
case SC_R6: {
if (IsMipsArchVariant(kMips32r6)) {
- Format(instr, "sc 'rt, 'imm9s('rs)");
+ if (instr->Bit(6)) {
+ Format(instr, "scx 'rt, 'imm9s('rs)");
+ } else {
+ Format(instr, "sc 'rt, 'imm9s('rs)");
+ }
} else {
Unknown(instr);
}
@@ -1748,7 +1756,11 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
+ if (instr->RsValue() == 0) {
+ Format(instr, "nal");
+ } else {
+ Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
+ }
break;
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
@@ -1958,14 +1970,14 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (IsMipsArchVariant(kMips32r6)) {
Unknown(instr);
} else {
- Format(instr, "ll 'rt, 'imm16s('rs)");
+ Format(instr, "ll 'rt, 'imm16s('rs)");
}
break;
case SC:
if (IsMipsArchVariant(kMips32r6)) {
Unknown(instr);
} else {
- Format(instr, "sc 'rt, 'imm16s('rs)");
+ Format(instr, "sc 'rt, 'imm16s('rs)");
}
break;
case LWC1:
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/mips/frame-constants-mips.h
index 243ad6cdc2..eab0e9be16 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/mips/frame-constants-mips.h
@@ -13,8 +13,14 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+
+ // Stack offsets for arguments passed to JSEntry.
+ static constexpr int kArgcOffset = +0 * kSystemPointerSize;
+ static constexpr int kArgvOffset = +1 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -39,7 +45,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 4;
+ static constexpr int kNumberOfSavedGpParamRegs = 3;
static constexpr int kNumberOfSavedFpParamRegs = 7;
// FP-relative.
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 1ece4812a3..0eda758193 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -20,6 +22,33 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
default_stub_registers);
}
+// On MIPS it is not allowed to use odd numbered floating point registers
+// (e.g. f1, f3, etc.) for parameters. This can happen if we use
+// DefaultInitializePlatformSpecific to assign float registers for parameters.
+// E.g if fourth parameter goes to float register, f7 would be assigned for
+// parameter (a3 casted to int is 7).
+bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
+ return reg.code() % 2 == 0;
+}
+
+void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ /* Register t4 correspond to f12 FPU register. */
+ const Register default_stub_registers[] = {a0, a1, t4};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ /* Register t4 correspond to f12 FPU register. */
+ const Register default_stub_registers[] = {a0, a1, a2, t4};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -70,12 +99,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@@ -208,10 +231,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- t0, // call_data
- a2, // holder
- a1, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ a1, // kApiFunctionAddress
+ a2, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -263,6 +285,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c10602df48..e0de62e1da 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -6,41 +6,33 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
-#include "src/mips/assembler-mips-inl.h"
-#include "src/mips/macro-assembler-mips.h"
+#include "src/macro-assembler.h"
+#include "src/objects/heap-number.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/mips/macro-assembler-mips.h"
+#endif
+
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
@@ -128,14 +120,16 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
- lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
+ lw(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- lw(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
+ lw(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
@@ -259,24 +253,42 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object);
Push(address);
@@ -286,7 +298,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -1724,8 +1740,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- Subu(scratch, pos, Operand(32));
- Neg(scratch, Operand(scratch));
+ Subu(scratch, zero_reg, pos);
Ror(dest, dest, scratch);
}
}
@@ -3644,8 +3659,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
lw(destination,
FieldMemOperand(destination,
@@ -3795,23 +3809,38 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this);
if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond != cc_always) {
+ // By using delay slot, we always execute first instruction of
+ // GenPcRelativeJump (which is or_(t8, ra, zero_reg)).
+ Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+ }
+ GenPCRelativeJump(t8, t9, code_target_index,
+ RelocInfo::RELATIVE_CODE_TARGET, bd);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(t9, code);
Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(t9, 0, cond, rs, rt, bd);
- return;
- }
+ } else if (target_is_isolate_independent_builtin &&
+ options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t9, 0, cond, rs, rt, bd);
+ return;
}
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
@@ -3902,23 +3931,36 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond != cc_always) {
+ Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
+ }
+ GenPCRelativeJumpAndLink(t8, code_target_index,
+ RelocInfo::RELATIVE_CODE_TARGET, bd);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(t9, code);
Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(t9, 0, cond, rs, rt, bd);
- return;
- }
+ } else if (target_is_isolate_independent_builtin &&
+ options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(t9, 0, cond, rs, rt, bd);
+ return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -3926,6 +3968,57 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 4);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ SmiUntag(builtin_pointer, builtin_pointer);
+ Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
+ lw(builtin_pointer,
+ MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 4;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ if (kArchVariant >= kMips32r6) {
+ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
+ nal(); // nal has branch delay slot.
+ Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
+ }
+ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ sw(ra, MemOperand(sp));
+ // Stack space reservation moved to the branch delay slot below.
+ // Stack is still aligned.
+
+ // Call the C routine.
+ mov(t9, target); // Function pointer to t9 to conform to ABI for PIC.
+ jalr(t9);
+ // Set up sp in the delay slot.
+ addiu(sp, sp, -kCArgsSlotsSize);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, 0, cond, rs, rt, bd);
@@ -3940,17 +4033,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- or_(t8, ra, zero_reg);
- nal(); // Read PC into ra register.
- lui(t9, (imm32 & kHiMask) >> kLuiShift); // Branch delay slot.
- ori(t9, t9, (imm32 & kImm16Mask));
- addu(t9, ra, t9);
- if (bdslot == USE_DELAY_SLOT) {
- or_(ra, t8, zero_reg);
- }
- jr(t9);
- // Emit a or_ in the branch delay slot if it's protected.
- if (bdslot == PROTECT) or_(ra, t8, zero_reg);
+ GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot);
}
}
@@ -3963,13 +4046,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- lui(t8, (imm32 & kHiMask) >> kLuiShift);
- nal(); // Read PC into ra register.
- ori(t8, t8, (imm32 & kImm16Mask)); // Branch delay slot.
- addu(t8, ra, t8);
- jalr(t8);
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
+ GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot);
}
}
@@ -4039,7 +4116,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(scratch);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
@@ -4062,7 +4139,7 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- Push(Smi::kZero); // Padding.
+ Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
li(t2,
@@ -4426,39 +4503,6 @@ void MacroAssembler::GetObjectType(Register object,
// -----------------------------------------------------------------------------
// Runtime calls.
-void MacroAssembler::CallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2,
- BranchDelaySlot bd) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2,
- BranchDelaySlot bd) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
-
- BlockTrampolinePoolScope block_trampoline_pool(this);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, Operand::EmbeddedCode(stub));
- Call(scratch);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2,
- BranchDelaySlot bd) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void TurboAssembler::AddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4606,7 +4650,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
And(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -4741,19 +4785,6 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
lw(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(ra, fp);
- Move(fp, sp);
- Push(context, target, argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(context, target, argc);
- Pop(ra, fp);
-}
-
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4822,7 +4853,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
// Reserve place for the return address, stack space and an optional slot
- // (used by the DirectCEntryStub to hold the return value if a struct is
+ // (used by DirectCEntry to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
DCHECK_GE(stack_space, 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
@@ -5041,6 +5072,9 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSGeneratorObject
Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -5379,7 +5413,36 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
function_offset = 0;
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register scratch1 = t4;
+ Register scratch2 = t5;
+ DCHECK(!AreAliased(scratch1, scratch2, function_base));
+
+ Label get_pc;
+ mov(scratch1, ra);
+ Call(&get_pc);
+
+ bind(&get_pc);
+ mov(scratch2, ra);
+ mov(ra, scratch1);
+
+ li(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ sw(scratch2, MemOperand(scratch1));
+ li(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ sw(fp, MemOperand(scratch1));
+ }
+
Call(function_base, function_offset);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = t4;
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ sw(zero_reg, MemOperand(scratch));
+ }
}
int stack_passed_arguments = CalculateStackPassedWords(
@@ -5451,6 +5514,17 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deipt id in kRootRegister (we don't need the roots array from now
+ // on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+ li(kRootRegister, deopt_id);
+
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index ae3138f85f..303fbb76b2 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -2,42 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#include "src/assembler.h"
+#include "src/contexts.h"
#include "src/globals.h"
#include "src/mips/assembler-mips.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = v0;
-constexpr Register kReturnRegister1 = v1;
-constexpr Register kReturnRegister2 = a0;
-constexpr Register kJSFunctionRegister = a1;
-constexpr Register kContextRegister = s7;
-constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
-constexpr Register kInterpreterAccumulatorRegister = v0;
-constexpr Register kInterpreterBytecodeOffsetRegister = t4;
-constexpr Register kInterpreterBytecodeArrayRegister = t5;
-constexpr Register kInterpreterDispatchTableRegister = t6;
-
-constexpr Register kJavaScriptCallArgCountRegister = a0;
-constexpr Register kJavaScriptCallCodeStartRegister = a2;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = a3;
-constexpr Register kJavaScriptCallExtraArg1Register = a2;
-
-constexpr Register kOffHeapTrampolineRegister = at;
-constexpr Register kRuntimeCallFunctionRegister = a1;
-constexpr Register kRuntimeCallArgCountRegister = a0;
-constexpr Register kRuntimeCallArgvRegister = a2;
-constexpr Register kWasmInstanceRegister = a0;
-
// Forward declarations
enum class AbortReason : uint8_t;
@@ -60,12 +39,6 @@ enum LeaveExitFrameMode {
NO_EMIT_RETURN = false
};
-// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
-enum BranchDelaySlot {
- USE_DELAY_SLOT,
- PROTECT
-};
-
// Flags used for the li macro-assembler function.
enum LiFlags {
// If the constant value can be represented in just 16 bits, then
@@ -118,14 +91,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -140,10 +108,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Prologue();
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- li(kRootRegister, Operand(roots_array_start));
- Addu(kRootRegister, kRootRegister, kRootRegisterBias);
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
}
// Jump unconditionally to given label.
@@ -166,8 +132,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Print a message to stdout and abort execution.
void Abort(AbortReason msg);
- inline bool AllowThisStubCall(CodeStub* stub);
-
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
@@ -262,11 +226,28 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
COND_ARGS);
void Call(Label* target);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination,
+ Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
+ }
+ void CallCodeObject(Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
}
+ void JumpCodeObject(Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
+ }
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Address target, int deopt_id);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -297,7 +278,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src) { push(src); }
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -347,6 +328,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -532,8 +516,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
-
- void CallStubDelayed(CodeStub* stub, COND_ARGS);
#undef COND_ARGS
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
@@ -541,15 +523,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
@@ -728,7 +701,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
- inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
if (dst != src) {
@@ -860,6 +833,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
private:
bool has_double_zero_reg_set_ = false;
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
void CallCFunctionHelper(Register function_base, int16_t function_offset,
int num_reg_arguments, int num_double_arguments);
@@ -910,21 +890,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
@@ -1059,18 +1037,6 @@ class MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Runtime calls.
-#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
-const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
-
- // Call a code stub.
- void CallStub(CodeStub* stub,
- COND_ARGS);
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub, COND_ARGS);
-
-#undef COND_ARGS
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
@@ -1170,10 +1136,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
DecodeField<Field>(reg, reg);
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
-
private:
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -1186,6 +1148,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
diff --git a/deps/v8/src/mips/register-mips.h b/deps/v8/src/mips/register-mips.h
new file mode 100644
index 0000000000..24ddd588a9
--- /dev/null
+++ b/deps/v8/src/mips/register-mips.h
@@ -0,0 +1,382 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_REGISTER_MIPS_H_
+#define V8_MIPS_REGISTER_MIPS_H_
+
+#include "src/mips/constants-mips.h"
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \
+ V(v0) V(v1)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24)
+// clang-format on
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 2 | // v0
+ 1 << 3 | // v1
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // t0
+ 1 << 9 | // t1
+ 1 << 10 | // t2
+ 1 << 11 | // t3
+ 1 << 12 | // t4
+ 1 << 13 | // t5
+ 1 << 14 | // t6
+ 1 << 15; // t7
+
+const int kNumJSCallerSaved = 14;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 16 | // s0
+ 1 << 17 | // s1
+ 1 << 18 | // s2
+ 1 << 19 | // s3
+ 1 << 20 | // s4
+ 1 << 21 | // s5
+ 1 << 22 | // s6 (roots in Javascript code)
+ 1 << 23 | // s7 (cp in Javascript code)
+ 1 << 30; // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const RegList kCalleeSavedFPU = 1 << 20 | // f20
+ 1 << 22 | // f22
+ 1 << 24 | // f24
+ 1 << 26 | // f26
+ 1 << 28 | // f28
+ 1 << 30; // f30
+
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU = 1 << 0 | // f0
+ 1 << 2 | // f2
+ 1 << 4 | // f4
+ 1 << 6 | // f6
+ 1 << 8 | // f8
+ 1 << 10 | // f10
+ 1 << 12 | // f12
+ 1 << 14 | // f14
+ 1 << 16 | // f16
+ 1 << 18; // f18
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 24;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
+ kUndefIndex, // at
+ 0, // v0
+ 1, // v1
+ 2, // a0
+ 3, // a1
+ 4, // a2
+ 5, // a3
+ 6, // t0
+ 7, // t1
+ 8, // t2
+ 9, // t3
+ 10, // t4
+ 11, // t5
+ 12, // t6
+ 13, // t7
+ 14, // s0
+ 15, // s1
+ 16, // s2
+ 17, // s3
+ 18, // s4
+ 19, // s5
+ 20, // s6
+ 21, // s7
+ kUndefIndex, // t8
+ kUndefIndex, // t9
+ kUndefIndex, // k0
+ kUndefIndex, // k1
+ kUndefIndex, // gp
+ kUndefIndex, // sp
+ 22, // fp
+ kUndefIndex};
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Coprocessor register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ FPURegister low() const {
+ // Find low reg of a Double-reg pair, which is the reg itself.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code());
+ }
+ FPURegister high() const {
+ // Find high reg of a Doubel-reg pair, which is reg + 1.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code() + 1);
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kMsaAfterLast
+};
+
+// MIPS SIMD (MSA) register
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
+};
+
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
+// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
+// 32-bit registers, f0 through f31. When used as 'double' they are used
+// in pairs, starting with the even numbered register. So a double operation
+// on f0 really uses f0 and f1.
+// (Modern mips hardware also supports 32 64-bit registers, via setting
+// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
+// but it is not in common use. Someday we will want to support this in v8.)
+
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// SIMD registers.
+typedef MSARegister Simd128Register;
+
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+const Simd128Register no_msareg = Simd128Register::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
+constexpr DoubleRegister kDoubleRegZero = f28;
+// Used on mips32r6 for compare operations.
+constexpr DoubleRegister kDoubleCompareReg = f26;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
+constexpr FPUControlRegister FCSR = {kFCSRRegister};
+
+// MSA control registers
+struct MSAControlRegister {
+ bool is_valid() const {
+ return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister);
+ }
+ bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
+constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
+constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = v0;
+constexpr Register kReturnRegister1 = v1;
+constexpr Register kReturnRegister2 = a0;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = t3;
+constexpr Register kInterpreterAccumulatorRegister = v0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t4;
+constexpr Register kInterpreterBytecodeArrayRegister = t5;
+constexpr Register kInterpreterDispatchTableRegister = t6;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = at;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MIPS_REGISTER_MIPS_H_
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index b759176db3..f4d8f354d8 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -2,30 +2,31 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips/simulator-mips.h"
+
+// Only build the simulator if not compiling for real MIPS hardware.
+#if defined(USE_SIMULATOR)
+
#include <limits.h>
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_MIPS
-
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
+#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/mips/constants-mips.h"
-#include "src/mips/simulator-mips.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
-
-// Only build the simulator if not compiling for real MIPS hardware.
-#if defined(USE_SIMULATOR)
-
namespace v8 {
namespace internal {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
+
// Utils functions.
bool HaveSameSign(int32_t a, int32_t b) {
return ((a ^ b) >= 0);
@@ -475,7 +476,7 @@ void MipsDebugger::Debug() {
int32_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -532,14 +533,12 @@ void MipsDebugger::Debug() {
while (cur < end) {
PrintF(" 0x%08" PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -916,9 +915,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
last_debugger_input_ = nullptr;
}
-
-Simulator::~Simulator() { free(stack_); }
-
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ free(stack_);
+}
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -1967,6 +1967,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
dbg.Debug();
}
if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyLoad();
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
switch (t) {
case WORD:
@@ -1997,6 +1998,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
dbg.Debug();
}
if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemWr(addr, value, WORD);
*ptr = value;
@@ -2009,8 +2013,40 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
dbg.Debug();
}
+void Simulator::WriteConditionalW(int32_t addr, int32_t value,
+ Instruction* instr, int32_t rt_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ set_register(rt_reg, 1);
+ } else {
+ set_register(rt_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
+}
+
double Simulator::ReadD(int32_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyLoad();
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
@@ -2024,6 +2060,9 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
@@ -2037,6 +2076,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyLoad();
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -2051,6 +2091,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyLoad();
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -2065,6 +2106,9 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
@@ -2079,6 +2123,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
@@ -2092,6 +2139,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int32_t addr) {
+ local_monitor_.NotifyLoad();
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr & 0xFF;
@@ -2099,6 +2147,7 @@ uint32_t Simulator::ReadBU(int32_t addr) {
int32_t Simulator::ReadB(int32_t addr) {
+ local_monitor_.NotifyLoad();
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -2106,6 +2155,9 @@ int32_t Simulator::ReadB(int32_t addr) {
void Simulator::WriteB(int32_t addr, uint8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemWr(addr, value, BYTE);
*ptr = value;
@@ -2113,6 +2165,9 @@ void Simulator::WriteB(int32_t addr, uint8_t value) {
void Simulator::WriteB(int32_t addr, int8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
TraceMemWr(addr, value, BYTE);
*ptr = value;
@@ -2122,6 +2177,7 @@ template <typename T>
T Simulator::ReadMem(int32_t addr, Instruction* instr) {
int alignment_mask = (1 << sizeof(T)) - 1;
if ((addr & alignment_mask) == 0 || IsMipsArchVariant(kMips32r6)) {
+ local_monitor_.NotifyLoad();
T* ptr = reinterpret_cast<T*>(addr);
TraceMemRd(addr, *ptr);
return *ptr;
@@ -2134,6 +2190,9 @@ T Simulator::ReadMem(int32_t addr, Instruction* instr) {
template <typename T>
void Simulator::WriteMem(int32_t addr, T value, Instruction* instr) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
int alignment_mask = (1 << sizeof(T)) - 1;
if ((addr & alignment_mask) == 0 || IsMipsArchVariant(kMips32r6)) {
T* ptr = reinterpret_cast<T*>(addr);
@@ -4240,21 +4299,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rd_reg(), alu_out);
break;
}
- case LL_R6: {
- // LLWP/SCWP sequence cannot be simulated properly
- DCHECK(IsMipsArchVariant(kMips32r6));
- set_register(rd_reg(), ReadW(rs() + 4, instr_.instr()));
- set_register(rt(), ReadW(rs(), instr_.instr()));
- break;
- }
- case SC_R6: {
- // LLWP/SCWP sequence cannot be simulated properly
- DCHECK(IsMipsArchVariant(kMips32r6));
- WriteW(rs() + 4, rd_reg(), instr_.instr());
- WriteW(rs(), rt(), instr_.instr());
- set_register(rt(), 1);
- break;
- }
default:
UNREACHABLE();
}
@@ -6742,16 +6786,19 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case LL: {
- // LL/SC sequence cannot be simulated properly
DCHECK(!IsMipsArchVariant(kMips32r6));
- set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = rs + se_imm16;
+ set_register(rt_reg, ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
break;
}
case SC: {
- // LL/SC sequence cannot be simulated properly
DCHECK(!IsMipsArchVariant(kMips32r6));
- WriteW(rs + se_imm16, rt, instr_.instr());
- set_register(rt_reg, 1);
+ addr = rs + se_imm16;
+ WriteConditionalW(addr, rt, instr_.instr(), rt_reg);
break;
}
case LWC1:
@@ -6821,20 +6868,25 @@ void Simulator::DecodeTypeImmediate() {
case SPECIAL3: {
switch (instr_.FunctionFieldRaw()) {
case LL_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK(IsMipsArchVariant(kMips32r6));
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
int32_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
+ addr = base + offset9;
+ DCHECK_EQ(addr & kPointerAlignmentMask, 0);
set_register(rt_reg, ReadW(base + offset9, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ addr, &global_monitor_thread_);
break;
}
case SC_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK(IsMipsArchVariant(kMips32r6));
int32_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
- WriteW(base + offset9, rt, instr_.instr());
- set_register(rt_reg, 1);
+ addr = base + offset9;
+ DCHECK_EQ(addr & kPointerAlignmentMask, 0);
+ WriteConditionalW(addr, rt, instr_.instr(), rt_reg);
break;
}
default:
@@ -7124,6 +7176,177 @@ uintptr_t Simulator::PopAddress() {
return address;
}
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_processor) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_processor) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
#undef UNSUPPORTED
@@ -7131,5 +7354,3 @@ uintptr_t Simulator::PopAddress() {
} // namespace v8
#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 59ec3af7e3..88e45605f2 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -12,14 +12,16 @@
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
-#include "src/allocation.h"
-#include "src/mips/constants-mips.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/mips/constants-mips.h"
#include "src/simulator-base.h"
namespace v8 {
@@ -314,6 +316,8 @@ class Simulator : public SimulatorBase {
inline int ReadW(int32_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int32_t addr, int value, Instruction* instr);
+ void WriteConditionalW(int32_t addr, int32_t value, Instruction* instr,
+ int32_t rt_reg);
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
@@ -554,6 +558,97 @@ class Simulator : public SimulatorBase {
char* desc;
};
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
};
} // namespace internal
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 24abf0249e..37652d0690 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -79,9 +79,7 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -114,10 +112,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress, target);
}
int Assembler::deserialization_special_target_size(
@@ -152,31 +150,29 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 3449537626..10e4806337 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -37,9 +37,9 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/mips64/assembler-mips64-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/string-constants.h"
namespace v8 {
@@ -179,23 +179,6 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -220,13 +203,6 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -254,17 +230,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
- case HeapObjectRequest::kCodeStub:
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
object = str->AllocateStringConstant(isolate);
break;
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
}
}
@@ -305,11 +277,11 @@ const Instr kLwSwInstrTypeMask = 0xFFE00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
scratch_register_list_(at.bit()) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -328,20 +300,24 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
+
+ int code_comments_size = WriteCodeComments();
+
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
+ desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
+ reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
@@ -778,7 +754,7 @@ static inline int32_t AddBranchOffset(int pos, Instr instr) {
int Assembler::target_at(int pos, bool is_internal) {
if (is_internal) {
- int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
int64_t address = *p;
if (address == kEndOfJumpChain) {
return kEndOfChain;
@@ -852,7 +828,7 @@ int Assembler::target_at(int pos, bool is_internal) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
- uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_start_ + pos);
DCHECK(instr_address - imm < INT_MAX);
int delta = static_cast<int>(instr_address - imm);
DCHECK(pos > delta);
@@ -891,15 +867,15 @@ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if (is_internal) {
- uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
- *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
return;
}
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
+ // Make label relative to Code pointer of generated Code object.
instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
return;
}
@@ -943,7 +919,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
- uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
@@ -1462,7 +1438,7 @@ uint64_t Assembler::jump_address(Label* L) {
return kEndOfJumpChain;
}
}
- uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
DCHECK_EQ(imm & 3, 0);
return imm;
@@ -4130,48 +4106,40 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
- MemMove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
+ Vector<byte> instructions{buffer_start_, pc_offset()};
+ Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
@@ -4203,7 +4171,7 @@ void Assembler::dd(Label* label) {
uint64_t data;
CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
- data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
+ data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
} else {
data = jump_address(label);
unbound_labels_count_++;
@@ -4217,7 +4185,7 @@ void Assembler::dd(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
@@ -4327,11 +4295,10 @@ Address Assembler::target_address_at(Address pc) {
// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
// OS::nan_value() returns a qNaN.
-void Assembler::QuietNaN(HeapObject* object) {
+void Assembler::QuietNaN(HeapObject object) {
HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
}
-
// On Mips64, a target address is stored in a 4-instruction sequence:
// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 814f3eacba..09ad7522d6 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -40,352 +40,16 @@
#include <set>
#include "src/assembler.h"
+#include "src/contexts.h"
+#include "src/external-reference.h"
+#include "src/label.h"
#include "src/mips64/constants-mips64.h"
+#include "src/mips64/register-mips64.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
- V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(t3) \
- V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
- V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(a0) V(a1) V(a2) V(a3) \
- V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) \
- V(v0) V(v1)
-
-#define DOUBLE_REGISTERS(V) \
- V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
- V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
- V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
- V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS(V) \
- V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
- V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
- V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
- V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
- V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
-// clang-format on
-
-// Note that the bit values must match those used in actual instruction
-// encoding.
-const int kNumRegs = 32;
-
-const RegList kJSCallerSaved = 1 << 2 | // v0
- 1 << 3 | // v1
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- 1 << 8 | // a4
- 1 << 9 | // a5
- 1 << 10 | // a6
- 1 << 11 | // a7
- 1 << 12 | // t0
- 1 << 13 | // t1
- 1 << 14 | // t2
- 1 << 15; // t3
-
-const int kNumJSCallerSaved = 14;
-
-// Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved = 1 << 16 | // s0
- 1 << 17 | // s1
- 1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
- 1 << 21 | // s5
- 1 << 22 | // s6 (roots in Javascript code)
- 1 << 23 | // s7 (cp in Javascript code)
- 1 << 30; // fp/s8
-
-const int kNumCalleeSaved = 9;
-
-const RegList kCalleeSavedFPU = 1 << 20 | // f20
- 1 << 22 | // f22
- 1 << 24 | // f24
- 1 << 26 | // f26
- 1 << 28 | // f28
- 1 << 30; // f30
-
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU = 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 24;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-const int kUndefIndex = -1;
-// Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
- kUndefIndex, // at
- 0, // v0
- 1, // v1
- 2, // a0
- 3, // a1
- 4, // a2
- 5, // a3
- 6, // a4
- 7, // a5
- 8, // a6
- 9, // a7
- 10, // t0
- 11, // t1
- 12, // t2
- 13, // t3
- 14, // s0
- 15, // s1
- 16, // s2
- 17, // s3
- 18, // s4
- 19, // s5
- 20, // s6
- 21, // s7
- kUndefIndex, // t8
- kUndefIndex, // t9
- kUndefIndex, // k0
- kUndefIndex, // k1
- kUndefIndex, // gp
- kUndefIndex, // sp
- 22, // fp
- kUndefIndex};
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister.
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- static constexpr int kMantissaOffset = 0;
- static constexpr int kExponentOffset = 4;
-#elif defined(V8_TARGET_BIG_ENDIAN)
- static constexpr int kMantissaOffset = 4;
- static constexpr int kExponentOffset = 0;
-#else
-#error Unknown endianness
-#endif
-
- private:
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-// s7: context register
-// s3: scratch register
-// s4: scratch register 2
-#define DECLARE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-
-constexpr Register no_reg = Register::no_reg();
-
-int ToNumber(Register reg);
-
-Register ToRegister(int num);
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Coprocessor register.
-class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
- public:
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
-
- FPURegister low() const {
- // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
- // Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
- return FPURegister::from_code(code());
- }
- FPURegister high() const {
- // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
- // Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
- return FPURegister::from_code(code() + 1);
- }
-
- private:
- friend class RegisterBase;
- explicit constexpr FPURegister(int code) : RegisterBase(code) {}
-};
-
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
-
-// A few double registers are reserved: one as a scratch register and one to
-// hold 0.0.
-// f28: 0.0
-// f30: scratch register.
-
-// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
-// 32-bit registers, f0 through f31. When used as 'double' they are used
-// in pairs, starting with the even numbered register. So a double operation
-// on f0 really uses f0 and f1.
-// (Modern mips hardware also supports 32 64-bit registers, via setting
-// (privileged) Status Register FR bit to 1. This is used by the N32 ABI,
-// but it is not in common use. Someday we will want to support this in v8.)
-
-// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister FloatRegister;
-
-typedef FPURegister DoubleRegister;
-
-#define DECLARE_DOUBLE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
-#undef DECLARE_DOUBLE_REGISTER
-
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-// SIMD registers.
-typedef MSARegister Simd128Register;
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
-
-const Simd128Register no_msareg = Simd128Register::no_reg();
-
-// Register aliases.
-// cp is assumed to be a callee saved register.
-constexpr Register kRootRegister = s6;
-constexpr Register cp = s7;
-constexpr Register kScratchReg = s3;
-constexpr Register kScratchReg2 = s4;
-constexpr DoubleRegister kScratchDoubleReg = f30;
-constexpr DoubleRegister kDoubleRegZero = f28;
-// Used on mips64r6 for compare operations.
-// We use the last non-callee saved odd register for N64 ABI
-constexpr DoubleRegister kDoubleCompareReg = f23;
-// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
-constexpr Simd128Register kSimd128RegZero = w28;
-constexpr Simd128Register kSimd128ScratchReg = w30;
-
-// FPU (coprocessor 1) control registers.
-// Currently only FCSR (#31) is implemented.
-struct FPUControlRegister {
- bool is_valid() const { return reg_code == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
-constexpr FPUControlRegister FCSR = {kFCSRRegister};
-
-// MSA control registers
-struct MSAControlRegister {
- bool is_valid() const {
- return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister);
- }
- bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
- void setcode(int f) {
- reg_code = f;
- DCHECK(is_valid());
- }
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
-constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
-constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
-
// -----------------------------------------------------------------------------
// Machine instruction Operands.
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -404,16 +68,12 @@ class Operand {
value_.immediate = static_cast<int64_t>(f.address());
}
V8_INLINE explicit Operand(const char* s);
- V8_INLINE explicit Operand(Object** opp);
- V8_INLINE explicit Operand(Context** cpp);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value)
- : rm_(no_reg), rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
- static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Register.
@@ -491,15 +151,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -600,13 +255,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static void JumpLabelToJumpRegister(Address pc);
- static void QuietNaN(HeapObject* nan);
+ static void QuietNaN(HeapObject nan);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -1795,10 +1450,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
};
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1834,9 +1485,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static void instr_at_put(Address pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
// Check if an instruction is a branch of some kind.
@@ -1905,13 +1558,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckTrampolinePool();
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
static bool IsCompactBranchSupported() { return kArchVariant == kMips64r6; }
@@ -2274,6 +1920,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
deleted file mode 100644
index bb51ac7cf3..0000000000
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ /dev/null
@@ -1,602 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/api-arguments.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-#include "src/mips64/code-stubs-mips64.h" // Cannot be the first include.
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
- Isolate* isolate = masm->isolate();
-
- {
- NoRootArrayScope no_root_array(masm);
-
- // TODO(plind): unify the ABI description here.
- // Registers:
- // a0: entry address
- // a1: function
- // a2: receiver
- // a3: argc
- // a4 (a4): on mips64
-
- // Stack:
- // 0 arg slots on mips64 (4 args slots on mips)
- // args -- in a4/a4 on mips64, on stack on mips
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved | ra.bit());
-
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
-
- // Load argv in s0 register.
- __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
-
- __ InitializeRootRegister();
- }
-
- // We build an EntryFrame.
- __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- StackFrame::Type marker = type();
- __ li(a6, Operand(StackFrame::TypeToMarker(marker)));
- __ li(a5, Operand(StackFrame::TypeToMarker(marker)));
- ExternalReference c_entry_fp =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
- __ li(a4, Operand(c_entry_fp));
- __ Ld(a4, MemOperand(a4));
- __ Push(a7, a6, a5, a4);
- // Set up frame pointer for the frame to be pushed.
- __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xFF...F) |
- // callee saved registers + ra
- // [ O32: 4 args slots]
- // args
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate);
- __ li(a5, js_entry_sp);
- __ Ld(a6, MemOperand(a5));
- __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
- __ Sd(fp, MemOperand(a5));
- __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont);
- __ nop(); // Branch delay slot nop.
- __ bind(&non_outermost_js);
- __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
- __ push(a4);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ li(a4, ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate));
- __ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
- __ LoadRoot(v0, RootIndex::kException);
- __ b(&exit); // b exposes branch delay slot.
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: receiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // [ O32: 4 args slots]
- // args
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit); // v0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(a5);
- __ Branch(&non_outermost_js_2, ne, a5,
- Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ li(a5, ExternalReference(js_entry_sp));
- __ Sd(zero_reg, MemOperand(a5));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(a5);
- __ li(a4,
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
- __ Sd(a5, MemOperand(a4));
-
- // Reset the stack to the callee saved registers.
- __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
-
- // Restore callee saved registers from the stack.
- __ MultiPop(kCalleeSaved | ra.bit());
- // Return.
- __ Jump(ra);
-}
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Make place for arguments to fit C calling convention. Most of the callers
- // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
- // so they handle stack restoring and we don't have to do that here.
- // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
- // kCArgsSlotsSize stack space after the call.
- __ daddiu(sp, sp, -kCArgsSlotsSize);
- // Place the return address on the stack, making the call
- // GC safe. The RegExp backend also relies on this.
- __ Sd(ra, MemOperand(sp, kCArgsSlotsSize));
- __ Call(t9); // Call the C++ function.
- __ Ld(t9, MemOperand(sp, kCArgsSlotsSize));
-
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- // In case of an error the return address may point to a memory area
- // filled with kZapValue by the GC.
- // Dereference the address and check for this.
- __ Uld(a4, MemOperand(t9));
- __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
- Operand(reinterpret_cast<uint64_t>(kZapValue)));
- }
- __ Jump(t9);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into kScratchReg instead of t9.
- __ Move(t9, target);
- __ IndirectLoadConstant(kScratchReg, GetCode());
- __ Daddu(kScratchReg, kScratchReg,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(kScratchReg);
- return;
- }
- }
- intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode().location());
- __ Move(t9, target);
- __ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- __ Call(kScratchReg);
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- tasm->push(ra);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->pop(ra);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- __ push(ra);
- __ CallStub(&stub);
- __ pop(ra);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push ra" instruction, followed by a call.
- // Note: on MIPS "push" is 2 instruction
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
-
- // This should contain all kJSCallerSaved registers.
- const RegList kSavedRegs =
- kJSCallerSaved | // Caller saved registers.
- s5.bit(); // Saved stack pointer.
-
- // We also save ra, so the count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
-
- // Save all caller-save registers as this may be called from anywhere.
- __ MultiPush(kSavedRegs | ra.bit());
-
- // Compute the function's address for the first argument.
- __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(s5, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ And(sp, sp, Operand(-frame_alignment));
- }
-
- __ Dsubu(sp, sp, kCArgsSlotsSize);
-#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
- int64_t entry_hook =
- reinterpret_cast<int64_t>(isolate()->function_entry_hook());
- __ li(t9, Operand(entry_hook));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter.
- __ li(a2, ExternalReference::isolate_address(isolate()));
-
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(t9, ExternalReference::Create(&dispatcher,
- ExternalReference::BUILTIN_CALL));
-#endif
- // Call C function through t9 to conform ABI for PIC.
- __ Call(t9);
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, s5);
- } else {
- __ Daddu(sp, sp, kCArgsSlotsSize);
- }
-
- // Also pop ra to get Ret(0).
- __ MultiPop(kSavedRegs | ra.bit());
- __ Ret();
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- DCHECK(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int32_t stack_space_offset,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- DCHECK(function_address == a1 || function_address == a2);
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ li(t9, ExternalReference::is_profiling_address(isolate));
- __ Lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
- __ bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- __ li(s5, next_address);
- __ Ld(s0, MemOperand(s5, kNextOffset));
- __ Ld(s1, MemOperand(s5, kLimitOffset));
- __ Lw(s2, MemOperand(s5, kLevelOffset));
- __ Addu(s2, s2, Operand(1));
- __ Sw(s2, MemOperand(s5, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, t9);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- __ Ld(v0, return_value_operand);
- __ bind(&return_value_loaded);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ Sd(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
- __ Lw(a1, MemOperand(s5, kLevelOffset));
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
- Operand(s2));
- }
- __ Subu(s2, s2, Operand(1));
- __ Sw(s2, MemOperand(s5, kLevelOffset));
- __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
- __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
-
- if (stack_space_offset != kInvalidStackOffset) {
- DCHECK_EQ(kCArgsSlotsSize, 0);
- __ Ld(s0, MemOperand(sp, stack_space_offset));
- } else {
- __ li(s0, Operand(stack_space));
- }
- __ LeaveExitFrame(false, s0, NO_EMIT_RETURN,
- stack_space_offset != kInvalidStackOffset);
-
- // Check if the function scheduled an exception.
- __ LoadRoot(a4, RootIndex::kTheHoleValue);
- __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
- __ Ld(a5, MemOperand(kScratchReg));
- __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
-
- __ Ret();
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ Sd(s1, MemOperand(s5, kLimitOffset));
- __ mov(s0, v0);
- __ mov(a0, v0);
- __ PrepareCallCFunction(1, s1);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ mov(v0, s0);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a4 : call_data
- // -- a2 : holder
- // -- a1 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- Register call_data = a4;
- Register holder = a2;
- Register api_function_address = a1;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data.
- __ Push(call_data);
-
- Register scratch = call_data;
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- // Push return value and default return value.
- __ Push(scratch, scratch);
- __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
- // Push isolate and holder.
- __ Push(scratch, holder);
-
- // Prepare arguments.
- __ mov(scratch, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != a0 && scratch != a0);
- // a0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ Daddu(a0, sp, Operand(1 * kPointerSize));
- // FunctionCallbackInfo::implicit_args_
- __ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Daddu(kScratchReg, scratch,
- Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ Sd(kScratchReg, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- // Stored as int field, 32-bit integers within struct on stack always left
- // justified by n64 ABI.
- __ li(kScratchReg, Operand(argc()));
- __ Sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument.
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- // TODO(adamk): Why are we clobbering this immediately?
- const int32_t stack_space_offset = kInvalidStackOffset;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_offset, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = a4;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = a2;
-
- // Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
- __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
- kPointerSize));
- __ li(scratch, ExternalReference::isolate_address(isolate()));
- __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
- // should_throw_on_error -> false
- DCHECK_NULL(Smi::kZero);
- __ Sd(zero_reg,
- MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
- __ Daddu(a1, sp, Operand(1 * kPointerSize));
- // a1 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ Ld(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, kInvalidStackOffset,
- return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
deleted file mode 100644
index f5d20d8c2b..0000000000
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MIPS64_CODE_STUBS_MIPS64_H_
-#define V8_MIPS64_CODE_STUBS_MIPS64_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC.
-class DirectCEntryStub : public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_MIPS64_CODE_STUBS_MIPS64_H_
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
deleted file mode 100644
index ac143dd3e5..0000000000
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ /dev/null
@@ -1,587 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include <memory>
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/mips64/simulator-mips64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-#if defined(V8_HOST_ARCH_MIPS)
-
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR)
- return stub;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return stub;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- // This code assumes that cache lines are 32 bytes and if the cache line is
- // larger it will not work correctly.
- {
- Label lastb, unaligned, aligned, chkw,
- loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
- leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
- ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
-
- // The size of each prefetch.
- uint32_t pref_chunk = 32;
- // The maximum size of a prefetch, it must not be less than pref_chunk.
- // If the real size of a prefetch is greater than max_pref_size and
- // the kPrefHintPrepareForStore hint is used, the code will not work
- // correctly.
- uint32_t max_pref_size = 128;
- DCHECK(pref_chunk < max_pref_size);
-
- // pref_limit is set based on the fact that we never use an offset
- // greater then 5 on a store pref and that a single pref can
- // never be larger then max_pref_size.
- uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
- int32_t pref_hint_load = kPrefHintLoadStreamed;
- int32_t pref_hint_store = kPrefHintPrepareForStore;
- uint32_t loadstore_chunk = 4;
-
- // The initial prefetches may fetch bytes that are before the buffer being
- // copied. Start copies with an offset of 4 so avoid this situation when
- // using kPrefHintPrepareForStore.
- DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
- pref_chunk * 4 >= max_pref_size);
- // If the size is less than 8, go to lastb. Regardless of size,
- // copy dst pointer to v0 for the retuen value.
- __ slti(a6, a2, 2 * loadstore_chunk);
- __ bne(a6, zero_reg, &lastb);
- __ mov(v0, a0); // In delay slot.
-
- // If src and dst have different alignments, go to unaligned, if they
- // have the same alignment (but are not actually aligned) do a partial
- // load/store to make them aligned. If they are both already aligned
- // we can start copying at aligned.
- __ xor_(t8, a1, a0);
- __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
- __ bne(t8, zero_reg, &unaligned);
- __ subu(a3, zero_reg, a0); // In delay slot.
-
- __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
- __ beq(a3, zero_reg, &aligned); // Already aligned.
- __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
-
- if (kArchEndian == kLittle) {
- __ lwr(t8, MemOperand(a1));
- __ addu(a1, a1, a3);
- __ swr(t8, MemOperand(a0));
- __ addu(a0, a0, a3);
- } else {
- __ lwl(t8, MemOperand(a1));
- __ addu(a1, a1, a3);
- __ swl(t8, MemOperand(a0));
- __ addu(a0, a0, a3);
- }
-
- // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
- // count how many bytes we have to copy after all the 64 byte chunks are
- // copied and a3 to the dst pointer after all the 64 byte chunks have been
- // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
- __ bind(&aligned);
- __ andi(t8, a2, 0x3F);
- __ beq(a2, t8, &chkw); // Less than 64?
- __ subu(a3, a2, t8); // In delay slot.
- __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
-
- // When in the loop we prefetch with kPrefHintPrepareForStore hint,
- // in this case the a0+x should be past the "a4-32" address. This means:
- // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
- // x=64 the last "safe" a0 address is "a4-96". In the current version we
- // will use "pref hint, 128(a0)", so "a4-160" is the limit.
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ addu(a4, a0, a2); // a4 is the "past the end" address.
- __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
- }
-
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
-
- if (pref_hint_store != kPrefHintPrepareForStore) {
- __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
- }
- __ bind(&loop16w);
- __ Lw(a4, MemOperand(a1));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
- __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
- }
- __ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&skip_pref);
- __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
-
- __ Sw(a4, MemOperand(a0));
- __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
-
- __ Lw(a4, MemOperand(a1, 8, loadstore_chunk));
- __ Lw(a5, MemOperand(a1, 9, loadstore_chunk));
- __ Lw(a6, MemOperand(a1, 10, loadstore_chunk));
- __ Lw(a7, MemOperand(a1, 11, loadstore_chunk));
- __ Lw(t0, MemOperand(a1, 12, loadstore_chunk));
- __ Lw(t1, MemOperand(a1, 13, loadstore_chunk));
- __ Lw(t2, MemOperand(a1, 14, loadstore_chunk));
- __ Lw(t3, MemOperand(a1, 15, loadstore_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
-
- __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
- __ addiu(a0, a0, 16 * loadstore_chunk);
- __ bne(a0, a3, &loop16w);
- __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
- __ mov(a2, t8);
-
- // Here we have src and dest word-aligned but less than 64-bytes to go.
- // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
- // down to chk1w to handle the tail end of the copy.
- __ bind(&chkw);
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ andi(t8, a2, 0x1F);
- __ beq(a2, t8, &chk1w); // Less than 32?
- __ nop(); // In delay slot.
- __ Lw(a4, MemOperand(a1));
- __ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
- __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
- __ addiu(a1, a1, 8 * loadstore_chunk);
- __ Sw(a4, MemOperand(a0));
- __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
- __ addiu(a0, a0, 8 * loadstore_chunk);
-
- // Here we have less than 32 bytes to copy. Set up for a loop to copy
- // one word at a time. Set a2 to count how many bytes we have to copy
- // after all the word chunks are copied and a3 to the dst pointer after
- // all the word chunks have been copied. We will loop, incrementing a0
- // and a1 until a0 equals a3.
- __ bind(&chk1w);
- __ andi(a2, t8, loadstore_chunk - 1);
- __ beq(a2, t8, &lastb);
- __ subu(a3, t8, a2); // In delay slot.
- __ addu(a3, a0, a3);
-
- __ bind(&wordCopy_loop);
- __ Lw(a7, MemOperand(a1));
- __ addiu(a0, a0, loadstore_chunk);
- __ addiu(a1, a1, loadstore_chunk);
- __ bne(a0, a3, &wordCopy_loop);
- __ Sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
-
- __ bind(&lastb);
- __ Branch(&leave, le, a2, Operand(zero_reg));
- __ addu(a3, a0, a2);
-
- __ bind(&lastbloop);
- __ Lb(v1, MemOperand(a1));
- __ addiu(a0, a0, 1);
- __ addiu(a1, a1, 1);
- __ bne(a0, a3, &lastbloop);
- __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
-
- __ bind(&leave);
- __ jr(ra);
- __ nop();
-
- // Unaligned case. Only the dst gets aligned so we need to do partial
- // loads of the source followed by normal stores to the dst (once we
- // have aligned the destination).
- __ bind(&unaligned);
- __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
- __ beq(a3, zero_reg, &ua_chk16w);
- __ subu(a2, a2, a3); // In delay slot.
-
- if (kArchEndian == kLittle) {
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ addu(a1, a1, a3);
- __ swr(v1, MemOperand(a0));
- __ addu(a0, a0, a3);
- } else {
- __ lwl(v1, MemOperand(a1));
- __ lwr(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ addu(a1, a1, a3);
- __ swl(v1, MemOperand(a0));
- __ addu(a0, a0, a3);
- }
-
- // Now the dst (but not the source) is aligned. Set a2 to count how many
- // bytes we have to copy after all the 64 byte chunks are copied and a3 to
- // the dst pointer after all the 64 byte chunks have been copied. We will
- // loop, incrementing a0 and a1 until a0 equals a3.
- __ bind(&ua_chk16w);
- __ andi(t8, a2, 0x3F);
- __ beq(a2, t8, &ua_chkw);
- __ subu(a3, a2, t8); // In delay slot.
- __ addu(a3, a0, a3);
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ addu(a4, a0, a2);
- __ Subu(t9, a4, pref_limit);
- }
-
- __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
- __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
-
- if (pref_hint_store != kPrefHintPrepareForStore) {
- __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
- }
-
- __ bind(&ua_loop16w);
- if (kArchEndian == kLittle) {
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
- __ lwr(a4, MemOperand(a1));
- __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0);
- __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
- }
- __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&ua_skip_pref);
- __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
- __ lwl(a4, MemOperand(a1));
- __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0);
- __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
- }
- __ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&ua_skip_pref);
- __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwr(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ Sw(a4, MemOperand(a0));
- __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
- if (kArchEndian == kLittle) {
- __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
- __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
- __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
- __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
- __ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
- __ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
- __ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
- __ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
- __ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
- __ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
- __ lwr(a4,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a5,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a6,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a7,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t0,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
- __ addiu(a0, a0, 16 * loadstore_chunk);
- __ bne(a0, a3, &ua_loop16w);
- __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
- __ mov(a2, t8);
-
- // Here less than 64-bytes. Check for
- // a 32 byte chunk and copy if there is one. Otherwise jump down to
- // ua_chk1w to handle the tail end of the copy.
- __ bind(&ua_chkw);
- __ Pref(pref_hint_load, MemOperand(a1));
- __ andi(t8, a2, 0x1F);
-
- __ beq(a2, t8, &ua_chk1w);
- __ nop(); // In delay slot.
- if (kArchEndian == kLittle) {
- __ lwr(a4, MemOperand(a1));
- __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(a4, MemOperand(a1));
- __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwr(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwr(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ addiu(a1, a1, 8 * loadstore_chunk);
- __ Sw(a4, MemOperand(a0));
- __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
- __ addiu(a0, a0, 8 * loadstore_chunk);
-
- // Less than 32 bytes to copy. Set up for a loop to
- // copy one word at a time.
- __ bind(&ua_chk1w);
- __ andi(a2, t8, loadstore_chunk - 1);
- __ beq(a2, t8, &ua_smallCopy);
- __ subu(a3, t8, a2); // In delay slot.
- __ addu(a3, a0, a3);
-
- __ bind(&ua_wordCopy_loop);
- if (kArchEndian == kLittle) {
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- } else {
- __ lwl(v1, MemOperand(a1));
- __ lwr(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- }
- __ addiu(a0, a0, loadstore_chunk);
- __ addiu(a1, a1, loadstore_chunk);
- __ bne(a0, a3, &ua_wordCopy_loop);
- __ Sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
-
- // Copy the last 8 bytes.
- __ bind(&ua_smallCopy);
- __ beq(a2, zero_reg, &leave);
- __ addu(a3, a0, a2); // In delay slot.
-
- __ bind(&ua_smallCopy_loop);
- __ Lb(v1, MemOperand(a1));
- __ addiu(a0, a0, 1);
- __ addiu(a1, a1, 1);
- __ bne(a0, a3, &ua_smallCopy_loop);
- __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
-
- __ jr(ra);
- __ nop();
- }
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<MemCopyUint8Function>(buffer);
-#endif
-}
-#endif
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- __ MovFromFloatParameter(f12);
- __ sqrt_d(f0, f12);
- __ MovToFloatResult(f0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index cc8ecdbd5a..db2002d5ed 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -13,10 +13,7 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
-
-#include "src/simulator.h" // For cache flushing.
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 62d25e53b9..69bb895e58 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -2,21 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-#define __ masm()->
-
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
@@ -57,21 +58,20 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
+ // Get the bailout is passed as kRootRegister by the caller.
+ __ mov(a2, kRootRegister);
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register a4.
__ mov(a3, ra);
- // Correct one word for bailout id.
- __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
__ Dsubu(a4, fp, a4);
@@ -84,15 +84,15 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(a1, &context_check);
__ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind())));
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
// a4: already has fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+ __ li(a5, Operand(ExternalReference::isolate_address(isolate)));
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -138,8 +138,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Swc1(f0, MemOperand(a1, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ // Remove the saved registers from the stack.
+ __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
@@ -165,7 +165,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, a1);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
@@ -224,15 +224,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ InitializeRootRegister();
-
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
__ stop("Unreachable.");
}
-
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS64R6
const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
@@ -240,73 +237,6 @@ const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- Label table_start, done, trampoline_jump;
- __ bind(&table_start);
-#ifdef _MIPS_ARCH_MIPS64R6
- int kMaxEntriesBranchReach =
- (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
-#else
- int kMaxEntriesBranchReach =
- (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
-#endif
-
- if (count() <= kMaxEntriesBranchReach) {
- // Common case.
- for (int i = 0; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- if (kArchVariant == kMips64r6) {
- __ li(kScratchReg, i);
- __ BranchShort(PROTECT, &done);
- } else {
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(kScratchReg, i); // In the delay slot.
- __ nop();
- }
-
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
-
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
- __ bind(&done);
- __ Push(kScratchReg);
- } else {
- DCHECK_NE(kArchVariant, kMips64r6);
- // Uncommon case, the branch cannot reach.
- // Create mini trampoline to reach the end of the table
- for (int i = 0, j = 0; i < count(); i++, j++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- if (j >= kMaxEntriesBranchReach) {
- j = 0;
- __ li(kScratchReg, i);
- __ bind(&trampoline_jump);
- trampoline_jump = Label();
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
- __ nop();
- } else {
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(kScratchReg, i); // In the delay slot.
- __ nop();
- }
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
-
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
- __ bind(&trampoline_jump);
- __ Push(kScratchReg);
- }
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/mips64/frame-constants-mips64.h
index e91ccf9480..731eb7c99e 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/mips64/frame-constants-mips64.h
@@ -13,6 +13,8 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
@@ -39,7 +41,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
static constexpr int kNumberOfSavedFpParamRegs = 7;
// FP-relative.
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index a6c7bfa4ba..4446732ea1 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -20,6 +22,33 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
default_stub_registers);
}
+// On MIPS it is not allowed to use odd numbered floating point registers
+// (e.g. f1, f3, etc.) for parameters. This can happen if we use
+// DefaultInitializePlatformSpecific to assign float registers for parameters.
+// E.g if fourth parameter goes to float register, f7 would be assigned for
+// parameter (a3 casted to int is 7).
+bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
+ return reg.code() % 2 == 0;
+}
+
+void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ /* Register t0 correspond to f12 FPU register. */
+ const Register default_stub_registers[] = {a0, a1, t0};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ /* Register t0 correspond to f12 FPU register. */
+ const Register default_stub_registers[] = {a0, a1, a2, t0};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -70,12 +99,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@@ -208,10 +231,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- a4, // call_data
- a2, // holder
- a1, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ a1, // kApiFunctionAddress
+ a2, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -263,6 +285,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index dd3b51eba5..748aa18dda 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -6,41 +6,33 @@
#if V8_TARGET_ARCH_MIPS64
+#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
-#include "src/mips64/assembler-mips64-inl.h"
-#include "src/mips64/macro-assembler-mips64.h"
+#include "src/macro-assembler.h"
+#include "src/objects/heap-number.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/mips64/macro-assembler-mips64.h"
+#endif
+
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
@@ -128,14 +120,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
- Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
+ Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- Ld(destination, MemOperand(s6, RootRegisterOffset(index)));
+ Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
@@ -259,24 +251,42 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object);
Push(address);
@@ -286,7 +296,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -2040,7 +2054,7 @@ void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
int size, bool sign_extend) {
- srav(dest, source, pos);
+ dsrav(dest, source, pos);
Dext(dest, dest, 0, size);
if (sign_extend) {
switch (size) {
@@ -2062,14 +2076,13 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
- Ror(dest, dest, pos);
+ Dror(dest, dest, pos);
Dins(dest, source, 0, size);
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- Dsubu(scratch, pos, Operand(64));
- Neg(scratch, Operand(scratch));
- Ror(dest, dest, scratch);
+ Dsubu(scratch, zero_reg, pos);
+ Dror(dest, dest, scratch);
}
}
@@ -4125,8 +4138,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ld(destination,
FieldMemOperand(destination,
@@ -4283,6 +4295,61 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ SmiUntag(builtin_pointer, builtin_pointer);
+ Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
+ Ld(builtin_pointer,
+ MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ // Compute the return address in lr to return to after the jump below. The pc
+ // is already at '+ 8' from the current instruction; but return is after three
+ // instructions, so add another 4 to pc to get the return address.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 4;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ if (kArchVariant >= kMips64r6) {
+ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
+ nal(); // nal has branch delay slot.
+ Daddu(ra, ra, kNumInstructionsToJump * kInstrSize);
+ }
+ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ Sd(ra, MemOperand(sp));
+ // Stack space reservation moved to the branch delay slot below.
+ // Stack is still aligned.
+
+ // Call the C routine.
+ mov(t9, target); // Function pointer to t9 to conform to ABI for PIC.
+ jalr(t9);
+ // Set up sp in the delay slot.
+ daddiu(sp, sp, -kCArgsSlotsSize);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
@@ -4391,7 +4458,7 @@ void MacroAssembler::Swap(Register reg1,
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
@@ -4421,16 +4488,16 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- Push(Smi::kZero); // Padding.
+ Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
- li(a6,
+ li(t2,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
- Ld(a5, MemOperand(a6));
- push(a5);
+ Ld(t1, MemOperand(t2));
+ push(t1);
// Set this new handler as the current one.
- Sd(sp, MemOperand(a6));
+ Sd(sp, MemOperand(t2));
}
@@ -4787,40 +4854,6 @@ void MacroAssembler::GetObjectType(Register object,
// -----------------------------------------------------------------------------
// Runtime calls.
-void MacroAssembler::CallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2,
- BranchDelaySlot bd) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2,
- BranchDelaySlot bd) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
-
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, Operand::EmbeddedCode(stub));
- Call(scratch);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2,
- BranchDelaySlot bd) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void TurboAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4969,7 +5002,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
And(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -5105,19 +5138,6 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
Ld(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(ra, fp);
- Move(fp, sp);
- Push(context, target, argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(context, target, argc);
- Pop(ra, fp);
-}
-
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -5184,7 +5204,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
// Reserve place for the return address, stack space and an optional slot
- // (used by the DirectCEntryStub to hold the return value if a struct is
+ // (used by DirectCEntry to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
DCHECK_GE(stack_space, 0);
Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
@@ -5423,6 +5443,9 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Check if JSGeneratorObject
Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -5756,7 +5779,36 @@ void TurboAssembler::CallCFunctionHelper(Register function,
function = t9;
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register scratch1 = t1;
+ Register scratch2 = t2;
+ DCHECK(!AreAliased(scratch1, scratch2, function));
+
+ Label get_pc;
+ mov(scratch1, ra);
+ Call(&get_pc);
+
+ bind(&get_pc);
+ mov(scratch2, ra);
+ mov(ra, scratch1);
+
+ li(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Sd(scratch2, MemOperand(scratch1));
+ li(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(fp, MemOperand(scratch1));
+ }
+
Call(function);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = t1;
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(zero_reg, MemOperand(scratch));
+ }
}
int stack_passed_arguments = CalculateStackPassedWords(
@@ -5829,6 +5881,16 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deopt id in kRootRegister (we don't need the roots array from now
+ // on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+ li(kRootRegister, deopt_id);
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 9160b26e01..2e6991c1ba 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -2,42 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#define V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#include "src/assembler.h"
#include "src/globals.h"
#include "src/mips64/assembler-mips64.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = v0;
-constexpr Register kReturnRegister1 = v1;
-constexpr Register kReturnRegister2 = a0;
-constexpr Register kJSFunctionRegister = a1;
-constexpr Register kContextRegister = s7;
-constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = a7;
-constexpr Register kInterpreterAccumulatorRegister = v0;
-constexpr Register kInterpreterBytecodeOffsetRegister = t0;
-constexpr Register kInterpreterBytecodeArrayRegister = t1;
-constexpr Register kInterpreterDispatchTableRegister = t2;
-
-constexpr Register kJavaScriptCallArgCountRegister = a0;
-constexpr Register kJavaScriptCallCodeStartRegister = a2;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = a3;
-constexpr Register kJavaScriptCallExtraArg1Register = a2;
-
-constexpr Register kOffHeapTrampolineRegister = at;
-constexpr Register kRuntimeCallFunctionRegister = a1;
-constexpr Register kRuntimeCallArgCountRegister = a0;
-constexpr Register kRuntimeCallArgvRegister = a2;
-constexpr Register kWasmInstanceRegister = a0;
-
// Forward declarations.
enum class AbortReason : uint8_t;
@@ -135,14 +113,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -157,10 +130,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Prologue();
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- li(kRootRegister, Operand(roots_array_start));
- daddiu(kRootRegister, kRootRegister, kRootRegisterBias);
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
}
// Jump unconditionally to given label.
@@ -183,8 +154,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Print a message to stdout and abort execution.
void Abort(AbortReason msg);
- inline bool AllowThisStubCall(CodeStub* stub);
-
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
@@ -282,11 +251,28 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
COND_ARGS);
void Call(Label* target);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination,
+ Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
}
+ void CallCodeObject(Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
+ }
+ void JumpCodeObject(Register code_object) override {
+ // TODO(mips): Implement.
+ UNIMPLEMENTED();
+ }
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Address target, int deopt_id);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -319,7 +305,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Push(Register src) { push(src); }
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -369,6 +355,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -555,8 +544,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
-
- void CallStubDelayed(CodeStub* stub, COND_ARGS);
#undef COND_ARGS
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
@@ -564,15 +551,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
@@ -690,7 +668,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
- inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
if (dst != src) {
@@ -866,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
private:
bool has_double_zero_reg_set_ = false;
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
@@ -917,21 +902,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
bool IsNear(Label* L, Condition cond, int rs_reg);
@@ -1105,17 +1088,6 @@ class MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Runtime calls.
-#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
-const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
-
- // Call a code stub.
- void CallStub(CodeStub* stub, COND_ARGS);
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub, COND_ARGS);
-
-#undef COND_ARGS
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
@@ -1234,9 +1206,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
DecodeField<Field>(reg, reg);
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -1249,6 +1218,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
template <typename Func>
diff --git a/deps/v8/src/mips64/register-mips64.h b/deps/v8/src/mips64/register-mips64.h
new file mode 100644
index 0000000000..5da1b7a087
--- /dev/null
+++ b/deps/v8/src/mips64/register-mips64.h
@@ -0,0 +1,389 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS64_REGISTER_MIPS64_H_
+#define V8_MIPS64_REGISTER_MIPS64_H_
+
+#include "src/mips64/constants-mips64.h"
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(t3) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) \
+ V(v0) V(v1)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+// clang-format on
+
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 2 | // v0
+ 1 << 3 | // v1
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // a4
+ 1 << 9 | // a5
+ 1 << 10 | // a6
+ 1 << 11 | // a7
+ 1 << 12 | // t0
+ 1 << 13 | // t1
+ 1 << 14 | // t2
+ 1 << 15; // t3
+
+const int kNumJSCallerSaved = 14;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 16 | // s0
+ 1 << 17 | // s1
+ 1 << 18 | // s2
+ 1 << 19 | // s3
+ 1 << 20 | // s4
+ 1 << 21 | // s5
+ 1 << 22 | // s6 (roots in Javascript code)
+ 1 << 23 | // s7 (cp in Javascript code)
+ 1 << 30; // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const RegList kCalleeSavedFPU = 1 << 20 | // f20
+ 1 << 22 | // f22
+ 1 << 24 | // f24
+ 1 << 26 | // f26
+ 1 << 28 | // f28
+ 1 << 30; // f30
+
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU = 1 << 0 | // f0
+ 1 << 2 | // f2
+ 1 << 4 | // f4
+ 1 << 6 | // f6
+ 1 << 8 | // f8
+ 1 << 10 | // f10
+ 1 << 12 | // f12
+ 1 << 14 | // f14
+ 1 << 16 | // f16
+ 1 << 18; // f18
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 24;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
+ kUndefIndex, // at
+ 0, // v0
+ 1, // v1
+ 2, // a0
+ 3, // a1
+ 4, // a2
+ 5, // a3
+ 6, // a4
+ 7, // a5
+ 8, // a6
+ 9, // a7
+ 10, // t0
+ 11, // t1
+ 12, // t2
+ 13, // t3
+ 14, // s0
+ 15, // s1
+ 16, // s2
+ 17, // s3
+ 18, // s4
+ 19, // s5
+ 20, // s6
+ 21, // s7
+ kUndefIndex, // t8
+ kUndefIndex, // t9
+ kUndefIndex, // k0
+ kUndefIndex, // k1
+ kUndefIndex, // gp
+ kUndefIndex, // sp
+ 22, // fp
+ kUndefIndex};
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Coprocessor register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
+
+ FPURegister low() const {
+ // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
+ // Find low reg of a Double-reg pair, which is the reg itself.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code());
+ }
+ FPURegister high() const {
+ // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
+ // Find high reg of a Doubel-reg pair, which is reg + 1.
+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even.
+ return FPURegister::from_code(code() + 1);
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kMsaAfterLast
+};
+
+// MIPS SIMD (MSA) register
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
+};
+
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
+// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
+// 32-bit registers, f0 through f31. When used as 'double' they are used
+// in pairs, starting with the even numbered register. So a double operation
+// on f0 really uses f0 and f1.
+// (Modern mips hardware also supports 32 64-bit registers, via setting
+// (privileged) Status Register FR bit to 1. This is used by the N32 ABI,
+// but it is not in common use. Someday we will want to support this in v8.)
+
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// SIMD registers.
+typedef MSARegister Simd128Register;
+
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+const Simd128Register no_msareg = Simd128Register::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
+constexpr DoubleRegister kDoubleRegZero = f28;
+// Used on mips64r6 for compare operations.
+// We use the last non-callee saved odd register for N64 ABI
+constexpr DoubleRegister kDoubleCompareReg = f23;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
+constexpr FPUControlRegister FCSR = {kFCSRRegister};
+
+// MSA control registers
+struct MSAControlRegister {
+ bool is_valid() const {
+ return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister);
+ }
+ bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
+constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
+constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = v0;
+constexpr Register kReturnRegister1 = v1;
+constexpr Register kReturnRegister2 = a0;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = a7;
+constexpr Register kInterpreterAccumulatorRegister = v0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = at;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MIPS64_REGISTER_MIPS64_H_
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 7f57b4da28..fad35304cd 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -2,29 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips64/simulator-mips64.h"
+
+// Only build the simulator if not compiling for real MIPS hardware.
+#if defined(USE_SIMULATOR)
+
#include <limits.h>
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_MIPS64
-
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/mips64/constants-mips64.h"
-#include "src/mips64/simulator-mips64.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
-// Only build the simulator if not compiling for real MIPS hardware.
-#if defined(USE_SIMULATOR)
-
namespace v8 {
namespace internal {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
+
// Util functions.
inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
@@ -246,7 +247,6 @@ void MipsDebugger::PrintAllRegs() {
REG_INFO(31), REG_INFO(34));
#undef REG_INFO
-#undef FPU_REG_INFO
}
@@ -293,7 +293,6 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30));
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31));
-#undef REG_INFO
#undef FPU_REG_INFO
}
@@ -420,7 +419,7 @@ void MipsDebugger::Debug() {
int64_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -464,14 +463,12 @@ void MipsDebugger::Debug() {
while (cur < end) {
PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int64_t value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", static_cast<int>(value >> 32));
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -849,9 +846,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
last_debugger_input_ = nullptr;
}
-
-Simulator::~Simulator() { free(stack_); }
-
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ free(stack_);
+}
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -1872,6 +1870,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
DieOrDebug();
}
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
return *ptr;
@@ -1892,6 +1891,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
DieOrDebug();
}
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
return *ptr;
@@ -1912,6 +1912,9 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
DieOrDebug();
}
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, WORD);
int* ptr = reinterpret_cast<int*>(addr);
*ptr = value;
@@ -1922,6 +1925,35 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
DieOrDebug();
}
+void Simulator::WriteConditionalW(int64_t addr, int32_t value,
+ Instruction* instr, int32_t rt_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ set_register(rt_reg, 1);
+ } else {
+ set_register(rt_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
@@ -1932,6 +1964,7 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
DieOrDebug();
}
if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
TraceMemRd(addr, *ptr);
return *ptr;
@@ -1952,6 +1985,9 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
DieOrDebug();
}
if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, DWORD);
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value;
@@ -1962,9 +1998,40 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
DieOrDebug();
}
+void Simulator::WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rt_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr,
+ TransactionSize::DoubleWord) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ set_register(rt_reg, 1);
+ } else {
+ set_register(rt_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
double Simulator::ReadD(int64_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
@@ -1977,6 +2044,9 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
@@ -1990,6 +2060,7 @@ void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
@@ -2004,6 +2075,7 @@ uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
@@ -2018,6 +2090,9 @@ int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, HALF);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@@ -2032,6 +2107,9 @@ void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, HALF);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
@@ -2045,6 +2123,7 @@ void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int64_t addr) {
+ local_monitor_.NotifyLoad();
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr & 0xFF;
@@ -2052,6 +2131,7 @@ uint32_t Simulator::ReadBU(int64_t addr) {
int32_t Simulator::ReadB(int64_t addr) {
+ local_monitor_.NotifyLoad();
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
@@ -2059,6 +2139,9 @@ int32_t Simulator::ReadB(int64_t addr) {
void Simulator::WriteB(int64_t addr, uint8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, BYTE);
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
@@ -2066,6 +2149,9 @@ void Simulator::WriteB(int64_t addr, uint8_t value) {
void Simulator::WriteB(int64_t addr, int8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
TraceMemWr(addr, value, BYTE);
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
@@ -2075,6 +2161,7 @@ template <typename T>
T Simulator::ReadMem(int64_t addr, Instruction* instr) {
int alignment_mask = (1 << sizeof(T)) - 1;
if ((addr & alignment_mask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyLoad();
T* ptr = reinterpret_cast<T*>(addr);
TraceMemRd(addr, *ptr);
return *ptr;
@@ -2090,6 +2177,9 @@ template <typename T>
void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
int alignment_mask = (1 << sizeof(T)) - 1;
if ((addr & alignment_mask) == 0 || kArchVariant == kMips64r6) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
T* ptr = reinterpret_cast<T*>(addr);
*ptr = value;
TraceMemWr(addr, value);
@@ -6913,6 +7003,7 @@ void Simulator::DecodeTypeImmediate() {
set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr()));
break;
case LWL: {
+ local_monitor_.NotifyLoad();
// al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
@@ -7026,29 +7117,35 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case LL: {
- // LL/SC sequence cannot be simulated properly
- DCHECK_EQ(kArchVariant, kMips64r2);
- set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
+ DCHECK(kArchVariant != kMips64r6);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = rs + se_imm16;
+ set_register(rt_reg, ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
break;
}
case SC: {
- // LL/SC sequence cannot be simulated properly
- DCHECK_EQ(kArchVariant, kMips64r2);
- WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr_.instr());
- set_register(rt_reg, 1);
+ DCHECK(kArchVariant != kMips64r6);
+ addr = rs + se_imm16;
+ WriteConditionalW(addr, static_cast<int32_t>(rt), instr_.instr(), rt_reg);
break;
}
case LLD: {
- // LL/SC sequence cannot be simulated properly
- DCHECK_EQ(kArchVariant, kMips64r2);
- set_register(rt_reg, ReadD(rs + se_imm16, instr_.instr()));
+ DCHECK(kArchVariant != kMips64r6);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = rs + se_imm16;
+ set_register(rt_reg, Read2W(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
break;
}
case SCD: {
- // LL/SC sequence cannot be simulated properly
- DCHECK_EQ(kArchVariant, kMips64r2);
- WriteD(rs + se_imm16, rt, instr_.instr());
- set_register(rt_reg, 1);
+ DCHECK(kArchVariant != kMips64r6);
+ addr = rs + se_imm16;
+ WriteConditional2W(addr, rt, instr_.instr(), rt_reg);
break;
}
case LWC1:
@@ -7102,8 +7199,7 @@ void Simulator::DecodeTypeImmediate() {
imm19 <<= (kOpcodeBits + kRsBits + 2);
imm19 >>= (kOpcodeBits + kRsBits + 2);
addr = current_pc + (imm19 << 2);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- alu_out = *ptr;
+ alu_out = ReadWU(addr, instr_.instr());
break;
}
case LWPC: {
@@ -7111,8 +7207,7 @@ void Simulator::DecodeTypeImmediate() {
imm19 <<= (kOpcodeBits + kRsBits + 2);
imm19 >>= (kOpcodeBits + kRsBits + 2);
addr = current_pc + (imm19 << 2);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- alu_out = *ptr;
+ alu_out = ReadW(addr, instr_.instr());
break;
}
case ADDIUPC: {
@@ -7137,37 +7232,48 @@ void Simulator::DecodeTypeImmediate() {
case SPECIAL3: {
switch (instr_.FunctionFieldRaw()) {
case LL_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK_EQ(kArchVariant, kMips64r6);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
- set_register(rt_reg, ReadW(base + offset9, instr_.instr()));
+ addr = base + offset9;
+ DCHECK_EQ(addr & 0x3, 0);
+ set_register(rt_reg, ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ addr, &global_monitor_thread_);
break;
}
case LLD_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK_EQ(kArchVariant, kMips64r6);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
- set_register(rt_reg, ReadD(base + offset9, instr_.instr()));
+ addr = base + offset9;
+ DCHECK_EQ(addr & kPointerAlignmentMask, 0);
+ set_register(rt_reg, Read2W(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ addr, &global_monitor_thread_);
break;
}
case SC_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
- WriteW(base + offset9, static_cast<int32_t>(rt), instr_.instr());
- set_register(rt_reg, 1);
+ addr = base + offset9;
+ DCHECK_EQ(addr & 0x3, 0);
+ WriteConditionalW(addr, static_cast<int32_t>(rt), instr_.instr(),
+ rt_reg);
break;
}
case SCD_R6: {
- // LL/SC sequence cannot be simulated properly
DCHECK_EQ(kArchVariant, kMips64r6);
int64_t base = get_register(instr_.BaseValue());
int32_t offset9 = instr_.Imm9Value();
- WriteD(base + offset9, rt, instr_.instr());
- set_register(rt_reg, 1);
+ addr = base + offset9;
+ DCHECK_EQ(addr & kPointerAlignmentMask, 0);
+ WriteConditional2W(addr, rt, instr_.instr(), rt_reg);
break;
}
default:
@@ -7402,7 +7508,7 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
if (reg_arg_count > 0) set_register(a0, arguments[0]);
if (reg_arg_count > 1) set_register(a1, arguments[1]);
if (reg_arg_count > 2) set_register(a2, arguments[2]);
- if (reg_arg_count > 2) set_register(a3, arguments[3]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
// Up to eight arguments passed in registers in N64 ABI.
// TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
@@ -7475,11 +7581,181 @@ uintptr_t Simulator::PopAddress() {
return address;
}
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_thread) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_thread) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
+
+#undef SScanF
-#undef UNSUPPORTED
} // namespace internal
} // namespace v8
#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 0ed51c21e1..9691bedd75 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -12,14 +12,16 @@
#ifndef V8_MIPS64_SIMULATOR_MIPS64_H_
#define V8_MIPS64_SIMULATOR_MIPS64_H_
-#include "src/allocation.h"
-#include "src/mips64/constants-mips64.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
#include "src/assembler.h"
#include "src/base/hashmap.h"
+#include "src/mips64/constants-mips64.h"
#include "src/simulator-base.h"
namespace v8 {
@@ -326,8 +328,12 @@ class Simulator : public SimulatorBase {
inline uint32_t ReadWU(int64_t addr, Instruction* instr);
inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
+ int32_t rt_reg);
inline int64_t Read2W(int64_t addr, Instruction* instr);
inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+ inline void WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rt_reg);
inline double ReadD(int64_t addr, Instruction* instr);
inline void WriteD(int64_t addr, double value, Instruction* instr);
@@ -575,6 +581,98 @@ class Simulator : public SimulatorBase {
char* desc;
};
StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ DoubleWord = 8,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
};
} // namespace internal
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index e91de2bac3..20742892cf 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -5,11 +5,16 @@
#ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
#define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
-#include "src/assembler-inl.h"
#include "src/feedback-vector.h"
#include "src/objects-body-descriptors.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-collection.h"
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/oddball.h"
+#include "src/objects/slots.h"
+#include "src/reloc-info.h"
#include "src/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -17,17 +22,42 @@ namespace v8 {
namespace internal {
template <int start_offset>
-int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
+int FlexibleBodyDescriptor<start_offset>::SizeOf(Map map, HeapObject object) {
return object->SizeFromMap(map);
}
-bool BodyDescriptorBase::IsValidSlotImpl(Map* map, HeapObject* obj,
- int offset) {
+template <int start_offset>
+int FlexibleWeakBodyDescriptor<start_offset>::SizeOf(Map map,
+ HeapObject object) {
+ return object->SizeFromMap(map);
+}
+
+bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ int embedder_fields_offset = JSObject::GetEmbedderFieldsStartOffset(map);
+ int inobject_fields_offset = map->GetInObjectPropertyOffset(0);
+ // |embedder_fields_offset| may be greater than |inobject_fields_offset| if
+ // the object does not have embedder fields but the check handles this
+ // case properly.
+ if (embedder_fields_offset <= offset && offset < inobject_fields_offset) {
+ // offset points to embedder fields area:
+ // [embedder_fields_offset, inobject_fields_offset).
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kEmbedderDataSlotSize));
+ return ((offset - embedder_fields_offset) & (kEmbedderDataSlotSize - 1)) ==
+ EmbedderDataSlot::kTaggedPayloadOffset;
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to treat the whole
+ // embedder field area as tagged slots.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+#endif
if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
return true;
} else {
DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kSystemPointerSize));
LayoutDescriptorHelper helper(map);
DCHECK(!helper.all_fields_tagged());
@@ -36,15 +66,44 @@ bool BodyDescriptorBase::IsValidSlotImpl(Map* map, HeapObject* obj,
}
template <typename ObjectVisitor>
-void BodyDescriptorBase::IterateBodyImpl(Map* map, HeapObject* obj,
- int start_offset, int end_offset,
- ObjectVisitor* v) {
+void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
+ int start_offset,
+ int end_offset,
+ ObjectVisitor* v) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
+ int header_size = JSObject::GetHeaderSize(map);
+ int inobject_fields_offset = map->GetInObjectPropertyOffset(0);
+ // We are always requested to process header and embedder fields.
+ DCHECK_LE(inobject_fields_offset, end_offset);
+ // Embedder fields are located between header rouned up to the system pointer
+ // size and inobject properties.
+ if (header_size < inobject_fields_offset) {
+ // There are embedder fields.
+ IteratePointers(obj, start_offset, header_size, v);
+ // Iterate only tagged payload of the embedder slots and skip raw payload.
+ int embedder_fields_offset = RoundUp(header_size, kSystemPointerSize);
+ DCHECK_EQ(embedder_fields_offset,
+ JSObject::GetEmbedderFieldsStartOffset(map));
+ for (int offset =
+ embedder_fields_offset + EmbedderDataSlot::kTaggedPayloadOffset;
+ offset < inobject_fields_offset; offset += kEmbedderDataSlotSize) {
+ IteratePointer(obj, offset, v);
+ }
+ // Proceed processing inobject properties.
+ start_offset = inobject_fields_offset;
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // embedder field area as tagged slots.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+#endif
if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
IteratePointers(obj, start_offset, end_offset, v);
} else {
DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
+ DCHECK(IsAligned(start_offset, kSystemPointerSize) &&
+ IsAligned(end_offset, kSystemPointerSize));
LayoutDescriptorHelper helper(map);
DCHECK(!helper.all_fields_tagged());
@@ -59,7 +118,7 @@ void BodyDescriptorBase::IterateBodyImpl(Map* map, HeapObject* obj,
}
template <typename ObjectVisitor>
-DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject* obj,
+DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject obj,
int start_offset,
int end_offset,
ObjectVisitor* v) {
@@ -68,33 +127,33 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject* obj,
}
template <typename ObjectVisitor>
-void BodyDescriptorBase::IteratePointer(HeapObject* obj, int offset,
+void BodyDescriptorBase::IteratePointer(HeapObject obj, int offset,
ObjectVisitor* v) {
v->VisitPointer(obj, HeapObject::RawField(obj, offset));
}
template <typename ObjectVisitor>
DISABLE_CFI_PERF void BodyDescriptorBase::IterateMaybeWeakPointers(
- HeapObject* obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ HeapObject obj, int start_offset, int end_offset, ObjectVisitor* v) {
v->VisitPointers(obj, HeapObject::RawMaybeWeakField(obj, start_offset),
HeapObject::RawMaybeWeakField(obj, end_offset));
}
template <typename ObjectVisitor>
-void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject* obj, int offset,
+void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v) {
v->VisitPointer(obj, HeapObject::RawMaybeWeakField(obj, offset));
}
template <typename ObjectVisitor>
DISABLE_CFI_PERF void BodyDescriptorBase::IterateCustomWeakPointers(
- HeapObject* obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ HeapObject obj, int start_offset, int end_offset, ObjectVisitor* v) {
v->VisitCustomWeakPointers(obj, HeapObject::RawField(obj, start_offset),
HeapObject::RawField(obj, end_offset));
}
template <typename ObjectVisitor>
-void BodyDescriptorBase::IterateCustomWeakPointer(HeapObject* obj, int offset,
+void BodyDescriptorBase::IterateCustomWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v) {
v->VisitCustomWeakPointer(obj, HeapObject::RawField(obj, offset));
}
@@ -103,18 +162,18 @@ class JSObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kStartOffset) return false;
- return IsValidSlotImpl(map, obj, offset);
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IterateBodyImpl(map, obj, kStartOffset, object_size, v);
+ IterateJSObjectBodyImpl(map, obj, kStartOffset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
@@ -123,41 +182,78 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= kStartOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, kStartOffset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
-class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
+class JSWeakCell::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- if (offset < kSizeWithoutPrototype) return true;
- if (offset < kSizeWithPrototype && map->has_prototype_slot()) {
- return true;
- }
- return IsValidSlotImpl(map, obj, offset);
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- int header_size = JSFunction::GetHeaderSize(map->has_prototype_slot());
- DCHECK_EQ(header_size, JSObject::GetHeaderSize(map));
- IteratePointers(obj, kPropertiesOrHashOffset, header_size, v);
- IterateBodyImpl(map, obj, header_size, object_size, v);
+ IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
+ IterateCustomWeakPointer(obj, kTargetOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kTargetOffset + kTaggedSize, object_size,
+ v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map->instance_size();
+ }
+};
+
+class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return JSObject::BodyDescriptor::IsValidSlot(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, JSReceiver::kPropertiesOrHashOffset, kTargetOffset, v);
+ IterateCustomWeakPointer(obj, kTargetOffset, v);
+ IteratePointers(obj, kTargetOffset + kPointerSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map->instance_size();
+ }
+};
+
+class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return FixedBodyDescriptor<kStartOfPointerFieldsOffset,
+ kEndOfTaggedFieldsOffset,
+ kAlignedSize>::IsValidSlot(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateCustomWeakPointer(obj, kFunctionDataOffset, v);
+ IteratePointers(obj,
+ SharedFunctionInfo::kStartOfAlwaysStrongPointerFieldsOffset,
+ SharedFunctionInfo::kEndOfTaggedFieldsOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
@@ -171,7 +267,7 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(AllocationSite::kPretenureCreateCountOffset + kInt32Size ==
AllocationSite::kWeakNextOffset);
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset >= AllocationSite::kStartOffset &&
offset < AllocationSite::kCommonPointerFieldEndOffset) {
return true;
@@ -185,7 +281,7 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
// Iterate over all the common pointer fields
IteratePointers(obj, AllocationSite::kStartOffset,
@@ -198,57 +294,49 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
}
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
- STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
- STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
-
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- if (offset < kBitFieldSlot) return true;
- if (offset < kSize) return false;
- return IsValidSlotImpl(map, obj, offset);
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
// JSArrayBuffer instances contain raw data that the GC does not know about.
- IteratePointers(obj, kPropertiesOrHashOffset, kByteLengthOffset, v);
- IterateBodyImpl(map, obj, kSize, object_size, v);
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
class JSArrayBufferView::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kBufferOffset + kPointerSize == kByteOffsetOffset);
- STATIC_ASSERT(kByteOffsetOffset + kUIntptrSize == kByteLengthOffset);
- STATIC_ASSERT(kByteLengthOffset + kUIntptrSize == kHeaderSize);
-
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- if (offset < kByteOffsetOffset) return true;
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
if (offset < kHeaderSize) return false;
- return IsValidSlotImpl(map, obj, offset);
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
// JSArrayBufferView contains raw data that the GC does not know about.
- IteratePointers(obj, kPropertiesOrHashOffset, kByteOffsetOffset, v);
- IterateBodyImpl(map, obj, kHeaderSize, object_size, v);
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
@@ -257,65 +345,57 @@ template <typename Derived>
class SmallOrderedHashTable<Derived>::BodyDescriptor final
: public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- Derived* table = reinterpret_cast<Derived*>(obj);
- if (offset < kDataTableStartOffset) return false;
- if (offset >= table->GetBucketsStartOffset()) return false;
- return IsValidSlotImpl(map, obj, offset);
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ Derived table = Derived::cast(obj);
+ // Only data table part contains tagged values.
+ return (offset >= DataTableStartOffset()) &&
+ (offset < table->GetBucketsStartOffset());
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- Derived* table = reinterpret_cast<Derived*>(obj);
-
- int offset = kDataTableStartOffset;
- int entry = 0;
- for (int i = 0; i < table->Capacity(); i++) {
- for (int j = 0; j < Derived::kEntrySize; j++) {
- IteratePointer(obj, offset + (entry * kPointerSize), v);
- entry++;
- }
- }
+ Derived table = Derived::cast(obj);
+ int start_offset = DataTableStartOffset();
+ int end_offset = table->GetBucketsStartOffset();
+ IteratePointers(obj, start_offset, end_offset, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) {
- Derived* table = reinterpret_cast<Derived*>(obj);
+ static inline int SizeOf(Map map, HeapObject obj) {
+ Derived table = Derived::cast(obj);
return table->SizeFor(table->Capacity());
}
};
class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return ByteArray::SizeFor(ByteArray::cast(obj)->synchronized_length());
}
};
class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= kConstantPoolOffset &&
offset <= kSourcePositionTableOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kConstantPoolOffset, v);
IteratePointer(obj, kHandlerTableOffset, v);
IteratePointer(obj, kSourcePositionTableOffset, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return BytecodeArray::SizeFor(
BytecodeArray::cast(obj)->synchronized_length());
}
@@ -323,30 +403,26 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
class BigInt::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
- return BigInt::SizeFor(BigInt::cast(obj)->length());
+ static inline int SizeOf(Map map, HeapObject obj) {
+ return BigInt::SizeFor(BigInt::cast(obj)->synchronized_length());
}
};
class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return FixedDoubleArray::SizeFor(
FixedDoubleArray::cast(obj)->synchronized_length());
}
@@ -354,49 +430,30 @@ class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset == kBasePointerOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kBasePointerOffset, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return FixedTypedArrayBase::cast(object)->size();
}
};
-class WeakArrayBodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return true;
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
- ObjectVisitor* v) {
- IterateMaybeWeakPointers(obj, HeapObject::kHeaderSize, object_size, v);
- }
-
- static inline int SizeOf(Map* map, HeapObject* object) {
- return object->SizeFromMap(map);
- }
-};
-
class FeedbackMetadata::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return FeedbackMetadata::SizeFor(
FeedbackMetadata::cast(obj)->synchronized_slot_count());
}
@@ -404,246 +461,242 @@ class FeedbackMetadata::BodyDescriptor final : public BodyDescriptorBase {
class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset == kSharedFunctionInfoOffset ||
offset == kOptimizedCodeOffset || offset >= kFeedbackSlotsOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kSharedFunctionInfoOffset, v);
IterateMaybeWeakPointer(obj, kOptimizedCodeOffset, v);
IterateMaybeWeakPointers(obj, kFeedbackSlotsOffset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return FeedbackVector::SizeFor(FeedbackVector::cast(obj)->length());
}
};
-class PreParsedScopeData::BodyDescriptor final : public BodyDescriptorBase {
+class PreparseData::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return offset == kScopeDataOffset || offset >= kChildDataStartOffset;
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset >= PreparseData::cast(obj)->inner_start_offset();
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IteratePointer(obj, kScopeDataOffset, v);
- IteratePointers(obj, kChildDataStartOffset, object_size, v);
+ PreparseData data = PreparseData::cast(obj);
+ int start_offset = data->inner_start_offset();
+ int end_offset = start_offset + data->children_length() * kTaggedSize;
+ IteratePointers(obj, start_offset, end_offset, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) {
- return PreParsedScopeData::SizeFor(PreParsedScopeData::cast(obj)->length());
+ static inline int SizeOf(Map map, HeapObject obj) {
+ PreparseData data = PreparseData::cast(obj);
+ return PreparseData::SizeFor(data->data_length(), data->children_length());
}
};
class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= HeapObject::kHeaderSize;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, HeapObject::kHeaderSize, kObjectCreateMapOffset, v);
IterateMaybeWeakPointer(obj, kObjectCreateMapOffset, v);
- IteratePointers(obj, kObjectCreateMapOffset + kPointerSize, object_size, v);
+ IteratePointers(obj, kObjectCreateMapOffset + kTaggedSize, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) {
+ static inline int SizeOf(Map map, HeapObject obj) {
return obj->SizeFromMap(map);
}
};
class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kTableOffset + kPointerSize == kSize);
+ STATIC_ASSERT(kTableOffset + kTaggedSize == kSize);
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return IsValidSlotImpl(map, obj, offset);
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IterateBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
+ IterateJSObjectBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
class Foreign::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- v->VisitExternalReference(Foreign::cast(obj),
- reinterpret_cast<Address*>(HeapObject::RawField(
- obj, kForeignAddressOffset)));
+ v->VisitExternalReference(
+ Foreign::cast(obj),
+ reinterpret_cast<Address*>(
+ HeapObject::RawField(obj, kForeignAddressOffset).address()));
}
- static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kRelocationInfoOffset + kPointerSize ==
+ STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
kDeoptimizationDataOffset);
- STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
+ STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
kSourcePositionTableOffset);
- STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
+ STATIC_ASSERT(kSourcePositionTableOffset + kTaggedSize ==
kCodeDataContainerOffset);
- STATIC_ASSERT(kCodeDataContainerOffset + kPointerSize == kDataStart);
+ STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
// Slots in code can't be invalid because we never trim code objects.
return true;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
+ static constexpr int kModeMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// GC does not visit data/code in the header and in the body directly.
IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
- RelocIterator it(Code::cast(obj), mode_mask);
+ RelocIterator it(Code::cast(obj), kModeMask);
v->VisitRelocInfo(&it);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IterateBody(map, obj, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
- return reinterpret_cast<Code*>(object)->CodeSize();
+ static inline int SizeOf(Map map, HeapObject object) {
+ return Code::unchecked_cast(object)->CodeSize();
}
};
class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
- SeqOneByteString* string = SeqOneByteString::cast(obj);
+ static inline int SizeOf(Map map, HeapObject obj) {
+ SeqOneByteString string = SeqOneByteString::cast(obj);
return string->SizeFor(string->synchronized_length());
}
};
class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return false;
- }
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {}
- static inline int SizeOf(Map* map, HeapObject* obj) {
- SeqTwoByteString* string = SeqTwoByteString::cast(obj);
+ static inline int SizeOf(Map map, HeapObject obj) {
+ SeqTwoByteString string = SeqTwoByteString::cast(obj);
return string->SizeFor(string->synchronized_length());
}
};
class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kMemoryStartOffset) return true;
if (offset < kModuleObjectOffset) return false;
- return IsValidSlotImpl(map, obj, offset);
+ return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOrHashOffset, kFirstUntaggedOffset, v);
- IterateBodyImpl(map, obj, kSize, object_size, v);
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kSize, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return map->instance_size();
}
};
class Map::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= Map::kPointerFieldsBeginOffset &&
offset < Map::kPointerFieldsEndOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, Map::kPointerFieldsBeginOffset,
Map::kTransitionsOrPrototypeInfoOffset, v);
IterateMaybeWeakPointer(obj, kTransitionsOrPrototypeInfoOffset, v);
- IteratePointers(obj, Map::kTransitionsOrPrototypeInfoOffset + kPointerSize,
+ IteratePointers(obj, Map::kTransitionsOrPrototypeInfoOffset + kTaggedSize,
Map::kPointerFieldsEndOffset, v);
}
- static inline int SizeOf(Map* map, HeapObject* obj) { return Map::kSize; }
+ static inline int SizeOf(Map map, HeapObject obj) { return Map::kSize; }
};
class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= HeapObject::kHeaderSize;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
static_assert(kSmiHandlerOffset < kData1Offset,
"Field order must be in sync with this iteration code");
@@ -653,41 +706,40 @@ class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
IterateMaybeWeakPointers(obj, kData1Offset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return object->SizeFromMap(map);
}
};
-class Context::BodyDescriptor final : public BodyDescriptorBase {
+class NativeContext::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
- return offset >= Context::kHeaderSize && offset < Context::kSize;
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return offset < NativeContext::kEndOfTaggedFieldsOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, Context::kHeaderSize,
- Context::kHeaderSize + FIRST_WEAK_SLOT * kPointerSize, v);
- IterateCustomWeakPointers(
- obj, Context::kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
- Context::kSize, v);
+ IteratePointers(obj, NativeContext::kStartOfStrongFieldsOffset,
+ NativeContext::kEndOfStrongFieldsOffset, v);
+ IterateCustomWeakPointers(obj, NativeContext::kStartOfWeakFieldsOffset,
+ NativeContext::kEndOfWeakFieldsOffset, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
- return Context::kSize;
+ static inline int SizeOf(Map map, HeapObject object) {
+ return NativeContext::kSize;
}
};
class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= CodeDataContainer::kHeaderSize &&
offset < CodeDataContainer::kSize;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, CodeDataContainer::kHeaderSize,
CodeDataContainer::kPointerFieldsStrongEndOffset, v);
@@ -696,11 +748,53 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
CodeDataContainer::kPointerFieldsWeakEndOffset, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
return CodeDataContainer::kSize;
}
};
+class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kEmbedderDataSlotSize));
+ return (offset < EmbedderDataArray::kHeaderSize) ||
+ (((offset - EmbedderDataArray::kHeaderSize) &
+ (kEmbedderDataSlotSize - 1)) ==
+ EmbedderDataSlot::kTaggedPayloadOffset);
+#else
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // array.
+ return true;
+#endif
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kSystemPointerSize);
+ // Iterate only tagged payload of the embedder slots and skip raw payload.
+ for (int offset = EmbedderDataArray::OffsetOfElementAt(0) +
+ EmbedderDataSlot::kTaggedPayloadOffset;
+ offset < object_size; offset += kEmbedderDataSlotSize) {
+ IteratePointer(obj, offset, v);
+ }
+#else
+ // We store raw aligned pointers as Smis, so it's safe to iterate the whole
+ // array.
+ STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
+ IteratePointers(obj, EmbedderDataArray::kHeaderSize, object_size, v);
+#endif
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return object->SizeFromMap(map);
+ }
+};
+
template <typename Op, typename ReturnType, typename T1, typename T2,
typename T3, typename T4>
ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
@@ -727,11 +821,15 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
}
switch (type) {
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ return Op::template apply<EmbedderDataArray::BodyDescriptor>(p1, p2, p3,
+ p4);
case FIXED_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -740,6 +838,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case EPHEMERON_HASH_TABLE_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -747,10 +846,11 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
+ return Op::template apply<Context::BodyDescriptor>(p1, p2, p3, p4);
+ case NATIVE_CONTEXT_TYPE:
+ return Op::template apply<NativeContext::BodyDescriptor>(p1, p2, p3, p4);
case WEAK_FIXED_ARRAY_TYPE:
return Op::template apply<WeakFixedArray::BodyDescriptor>(p1, p2, p3, p4);
case WEAK_ARRAY_LIST_TYPE:
@@ -779,6 +879,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
@@ -801,6 +902,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ case JS_WEAK_FACTORY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
@@ -810,6 +913,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_EXCEPTION_TYPE:
@@ -833,6 +937,10 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
case JS_FUNCTION_TYPE:
return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_WEAK_CELL_TYPE:
+ return Op::template apply<JSWeakCell::BodyDescriptor>(p1, p2, p3, p4);
+ case JS_WEAK_REF_TYPE:
+ return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
case ODDBALL_TYPE:
return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3, p4);
case JS_PROXY_TYPE:
@@ -859,18 +967,21 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<
SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
p3, p4);
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedNameDictionary>::BodyDescriptor>(
+ p1, p2, p3, p4);
case CODE_DATA_CONTAINER_TYPE:
return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
p4);
- case PRE_PARSED_SCOPE_DATA_TYPE:
- return Op::template apply<PreParsedScopeData::BodyDescriptor>(p1, p2, p3,
- p4);
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
- return Op::template apply<
- UncompiledDataWithoutPreParsedScope::BodyDescriptor>(p1, p2, p3, p4);
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
+ case PREPARSE_DATA_TYPE:
+ return Op::template apply<PreparseData::BodyDescriptor>(p1, p2, p3, p4);
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
return Op::template apply<
- UncompiledDataWithPreParsedScope::BodyDescriptor>(p1, p2, p3, p4);
+ UncompiledDataWithoutPreparseData::BodyDescriptor>(p1, p2, p3, p4);
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ return Op::template apply<UncompiledDataWithPreparseData::BodyDescriptor>(
+ p1, p2, p3, p4);
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
@@ -916,29 +1027,29 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
template <typename ObjectVisitor>
void HeapObject::IterateFast(ObjectVisitor* v) {
- BodyDescriptorBase::IteratePointer(this, kMapOffset, v);
+ BodyDescriptorBase::IteratePointer(*this, kMapOffset, v);
IterateBodyFast(v);
}
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(ObjectVisitor* v) {
- Map* m = map();
+ Map m = map();
IterateBodyFast(m, SizeFromMap(m), v);
}
struct CallIterateBody {
template <typename BodyDescriptor, typename ObjectVisitor>
- static void apply(Map* map, HeapObject* obj, int object_size,
+ static void apply(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
BodyDescriptor::IterateBody(map, obj, object_size, v);
}
};
template <typename ObjectVisitor>
-void HeapObject::IterateBodyFast(Map* map, int object_size, ObjectVisitor* v) {
- BodyDescriptorApply<CallIterateBody, void>(map->instance_type(), map, this,
+void HeapObject::IterateBodyFast(Map map, int object_size, ObjectVisitor* v) {
+ BodyDescriptorApply<CallIterateBody, void>(map->instance_type(), map, *this,
object_size, v);
}
} // namespace internal
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index a1dc0f7ffa..0d381a40a6 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_BODY_DESCRIPTORS_H_
#include "src/objects.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -18,50 +19,55 @@ namespace internal {
// It is used for invalid slots filtering. If the offset points outside
// of the object or to the map word, the result is UNDEFINED (!!!).
//
-// static bool IsValidSlot(Map* map, HeapObject* obj, int offset);
+// static bool IsValidSlot(Map map, HeapObject obj, int offset);
//
//
// 2) Iterate object's body using stateful object visitor.
//
// template <typename ObjectVisitor>
-// static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+// static inline void IterateBody(Map map, HeapObject obj, int object_size,
// ObjectVisitor* v);
class BodyDescriptorBase {
public:
template <typename ObjectVisitor>
- static inline void IteratePointers(HeapObject* obj, int start_offset,
+ static inline void IteratePointers(HeapObject obj, int start_offset,
int end_offset, ObjectVisitor* v);
template <typename ObjectVisitor>
- static inline void IteratePointer(HeapObject* obj, int offset,
+ static inline void IteratePointer(HeapObject obj, int offset,
ObjectVisitor* v);
template <typename ObjectVisitor>
- static inline void IterateCustomWeakPointers(HeapObject* obj,
- int start_offset, int end_offset,
+ static inline void IterateCustomWeakPointers(HeapObject obj, int start_offset,
+ int end_offset,
ObjectVisitor* v);
template <typename ObjectVisitor>
- static inline void IterateCustomWeakPointer(HeapObject* obj, int offset,
+ static inline void IterateCustomWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v);
template <typename ObjectVisitor>
- static inline void IterateMaybeWeakPointers(HeapObject* obj, int start_offset,
+ static inline void IterateMaybeWeakPointers(HeapObject obj, int start_offset,
int end_offset, ObjectVisitor* v);
template <typename ObjectVisitor>
- static inline void IterateMaybeWeakPointer(HeapObject* obj, int offset,
+ static inline void IterateMaybeWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v);
protected:
// Returns true for all header and embedder fields.
- static inline bool IsValidSlotImpl(Map* map, HeapObject* obj, int offset);
+ static inline bool IsValidJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset);
+
+ // Returns true for all header and embedder fields.
+ static inline bool IsValidEmbedderJSObjectSlotImpl(Map map, HeapObject obj,
+ int offset);
// Treats all header and embedder fields in the range as tagged.
template <typename ObjectVisitor>
- static inline void IterateBodyImpl(Map* map, HeapObject* obj,
- int start_offset, int end_offset,
- ObjectVisitor* v);
+ static inline void IterateJSObjectBodyImpl(Map map, HeapObject obj,
+ int start_offset, int end_offset,
+ ObjectVisitor* v);
};
@@ -75,22 +81,22 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
static const int kEndOffset = end_offset;
static const int kSize = size;
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= kStartOffset && offset < kEndOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
IteratePointers(obj, start_offset, end_offset, v);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IterateBody(map, obj, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
@@ -102,22 +108,40 @@ class FlexibleBodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return (offset >= kStartOffset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, start_offset, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object);
+ static inline int SizeOf(Map map, HeapObject object);
};
typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
+template <int start_offset>
+class FlexibleWeakBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return (offset >= kStartOffset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IterateMaybeWeakPointers(obj, start_offset, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object);
+};
+
// This class describes a body of an object which has a parent class that also
// has a body descriptor. This represents a union of the parent's body
// descriptor, and a new descriptor for the child -- so, both parent and child's
@@ -131,25 +155,25 @@ class SubclassBodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(ParentBodyDescriptor::kSize <=
ChildBodyDescriptor::kStartOffset);
- static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return ParentBodyDescriptor::IsValidSlot(map, obj, offset) ||
ChildBodyDescriptor::IsValidSlot(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
+ static inline void IterateBody(Map map, HeapObject obj, ObjectVisitor* v) {
ParentBodyDescriptor::IterateBody(map, obj, v);
ChildBodyDescriptor::IterateBody(map, obj, v);
}
template <typename ObjectVisitor>
- static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
ParentBodyDescriptor::IterateBody(map, obj, object_size, v);
ChildBodyDescriptor::IterateBody(map, obj, object_size, v);
}
- static inline int SizeOf(Map* map, HeapObject* object) {
+ static inline int SizeOf(Map map, HeapObject object) {
// The child should know its full size.
return ChildBodyDescriptor::SizeOf(map, object);
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index b4e50843a1..cdf392448b 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -6,17 +6,24 @@
#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
+#include "src/date.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/elements.h"
#include "src/field-type.h"
#include "src/layout-descriptor.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -38,15 +45,18 @@
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/oddball-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions.h"
@@ -80,15 +90,16 @@ namespace internal {
#ifdef VERIFY_HEAP
void Object::ObjectVerify(Isolate* isolate) {
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kObjectVerify);
if (IsSmi()) {
- Smi::cast(this)->SmiVerify(isolate);
+ Smi::cast(*this)->SmiVerify(isolate);
} else {
- HeapObject::cast(this)->HeapObjectVerify(isolate);
+ HeapObject::cast(*this)->HeapObjectVerify(isolate);
}
CHECK(!IsConstructor() || IsCallable());
}
-void Object::VerifyPointer(Isolate* isolate, Object* p) {
+void Object::VerifyPointer(Isolate* isolate, Object p) {
if (p->IsHeapObject()) {
HeapObject::VerifyHeapPointer(isolate, p);
} else {
@@ -96,8 +107,8 @@ void Object::VerifyPointer(Isolate* isolate, Object* p) {
}
}
-void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p) {
- HeapObject* heap_object;
+void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
+ HeapObject heap_object;
if (p->GetHeapObject(&heap_object)) {
HeapObject::VerifyHeapPointer(isolate, heap_object);
} else {
@@ -106,7 +117,7 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p) {
}
namespace {
-void VerifyForeignPointer(Isolate* isolate, HeapObject* host, Object* foreign) {
+void VerifyForeignPointer(Isolate* isolate, HeapObject host, Object foreign) {
host->VerifyPointer(isolate, foreign);
CHECK(foreign->IsUndefined(isolate) || Foreign::IsNormalized(foreign));
}
@@ -126,13 +137,13 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)
#undef STRING_TYPE_CASE
- String::cast(this)->StringVerify(isolate);
+ String::cast(*this)->StringVerify(isolate);
break;
case SYMBOL_TYPE:
- Symbol::cast(this)->SymbolVerify(isolate);
+ Symbol::cast(*this)->SymbolVerify(isolate);
break;
case MAP_TYPE:
- Map::cast(this)->MapVerify(isolate);
+ Map::cast(*this)->MapVerify(isolate);
break;
case HEAP_NUMBER_TYPE:
CHECK(IsHeapNumber());
@@ -141,19 +152,23 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CHECK(IsMutableHeapNumber());
break;
case BIGINT_TYPE:
- BigInt::cast(this)->BigIntVerify(isolate);
+ BigInt::cast(*this)->BigIntVerify(isolate);
break;
case CALL_HANDLER_INFO_TYPE:
- CallHandlerInfo::cast(this)->CallHandlerInfoVerify(isolate);
+ CallHandlerInfo::cast(*this)->CallHandlerInfoVerify(isolate);
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- ObjectBoilerplateDescription::cast(this)
+ ObjectBoilerplateDescription::cast(*this)
->ObjectBoilerplateDescriptionVerify(isolate);
break;
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ EmbedderDataArray::cast(*this)->EmbedderDataArrayVerify(isolate);
+ break;
// FixedArray types
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -163,6 +178,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case FIXED_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ FixedArray::cast(*this)->FixedArrayVerify(isolate);
+ break;
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -170,61 +187,63 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- FixedArray::cast(this)->FixedArrayVerify(isolate);
+ Context::cast(*this)->ContextVerify(isolate);
+ break;
+ case NATIVE_CONTEXT_TYPE:
+ NativeContext::cast(*this)->NativeContextVerify(isolate);
break;
case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(this)->WeakFixedArrayVerify(isolate);
+ WeakFixedArray::cast(*this)->WeakFixedArrayVerify(isolate);
break;
case WEAK_ARRAY_LIST_TYPE:
- WeakArrayList::cast(this)->WeakArrayListVerify(isolate);
+ WeakArrayList::cast(*this)->WeakArrayListVerify(isolate);
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayVerify(isolate);
+ FixedDoubleArray::cast(*this)->FixedDoubleArrayVerify(isolate);
break;
case FEEDBACK_METADATA_TYPE:
- FeedbackMetadata::cast(this)->FeedbackMetadataVerify(isolate);
+ FeedbackMetadata::cast(*this)->FeedbackMetadataVerify(isolate);
break;
case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayVerify(isolate);
+ ByteArray::cast(*this)->ByteArrayVerify(isolate);
break;
case BYTECODE_ARRAY_TYPE:
- BytecodeArray::cast(this)->BytecodeArrayVerify(isolate);
+ BytecodeArray::cast(*this)->BytecodeArrayVerify(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DescriptorArray::cast(this)->DescriptorArrayVerify(isolate);
+ DescriptorArray::cast(*this)->DescriptorArrayVerify(isolate);
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(this)->TransitionArrayVerify(isolate);
+ TransitionArray::cast(*this)->TransitionArrayVerify(isolate);
break;
case PROPERTY_ARRAY_TYPE:
- PropertyArray::cast(this)->PropertyArrayVerify(isolate);
+ PropertyArray::cast(*this)->PropertyArrayVerify(isolate);
break;
case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpaceVerify(isolate);
+ FreeSpace::cast(*this)->FreeSpaceVerify(isolate);
break;
case FEEDBACK_CELL_TYPE:
- FeedbackCell::cast(this)->FeedbackCellVerify(isolate);
+ FeedbackCell::cast(*this)->FeedbackCellVerify(isolate);
break;
case FEEDBACK_VECTOR_TYPE:
- FeedbackVector::cast(this)->FeedbackVectorVerify(isolate);
+ FeedbackVector::cast(*this)->FeedbackVectorVerify(isolate);
break;
-#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(isolate); \
+#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ Fixed##Type##Array::cast(*this)->FixedTypedArrayVerify(isolate); \
break;
TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
#undef VERIFY_TYPED_ARRAY
case CODE_TYPE:
- Code::cast(this)->CodeVerify(isolate);
+ Code::cast(*this)->CodeVerify(isolate);
break;
case ODDBALL_TYPE:
- Oddball::cast(this)->OddballVerify(isolate);
+ Oddball::cast(*this)->OddballVerify(isolate);
break;
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
@@ -235,190 +254,215 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
- JSObject::cast(this)->JSObjectVerify(isolate);
+ JSObject::cast(*this)->JSObjectVerify(isolate);
break;
case WASM_MODULE_TYPE:
- WasmModuleObject::cast(this)->WasmModuleObjectVerify(isolate);
+ WasmModuleObject::cast(*this)->WasmModuleObjectVerify(isolate);
break;
case WASM_INSTANCE_TYPE:
- WasmInstanceObject::cast(this)->WasmInstanceObjectVerify(isolate);
+ WasmInstanceObject::cast(*this)->WasmInstanceObjectVerify(isolate);
break;
case JS_ARGUMENTS_TYPE:
- JSArgumentsObject::cast(this)->JSArgumentsObjectVerify(isolate);
+ JSArgumentsObject::cast(*this)->JSArgumentsObjectVerify(isolate);
break;
case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(this)->JSGeneratorObjectVerify(isolate);
+ JSGeneratorObject::cast(*this)->JSGeneratorObjectVerify(isolate);
+ break;
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ JSAsyncFunctionObject::cast(*this)->JSAsyncFunctionObjectVerify(isolate);
break;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- JSAsyncGeneratorObject::cast(this)->JSAsyncGeneratorObjectVerify(isolate);
+ JSAsyncGeneratorObject::cast(*this)->JSAsyncGeneratorObjectVerify(
+ isolate);
break;
case JS_VALUE_TYPE:
- JSValue::cast(this)->JSValueVerify(isolate);
+ JSValue::cast(*this)->JSValueVerify(isolate);
break;
case JS_DATE_TYPE:
- JSDate::cast(this)->JSDateVerify(isolate);
+ JSDate::cast(*this)->JSDateVerify(isolate);
break;
case JS_BOUND_FUNCTION_TYPE:
- JSBoundFunction::cast(this)->JSBoundFunctionVerify(isolate);
+ JSBoundFunction::cast(*this)->JSBoundFunctionVerify(isolate);
break;
case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionVerify(isolate);
+ JSFunction::cast(*this)->JSFunctionVerify(isolate);
break;
case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyVerify(isolate);
+ JSGlobalProxy::cast(*this)->JSGlobalProxyVerify(isolate);
break;
case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectVerify(isolate);
+ JSGlobalObject::cast(*this)->JSGlobalObjectVerify(isolate);
break;
case CELL_TYPE:
- Cell::cast(this)->CellVerify(isolate);
+ Cell::cast(*this)->CellVerify(isolate);
break;
case PROPERTY_CELL_TYPE:
- PropertyCell::cast(this)->PropertyCellVerify(isolate);
+ PropertyCell::cast(*this)->PropertyCellVerify(isolate);
break;
case JS_ARRAY_TYPE:
- JSArray::cast(this)->JSArrayVerify(isolate);
+ JSArray::cast(*this)->JSArrayVerify(isolate);
break;
case JS_MODULE_NAMESPACE_TYPE:
- JSModuleNamespace::cast(this)->JSModuleNamespaceVerify(isolate);
+ JSModuleNamespace::cast(*this)->JSModuleNamespaceVerify(isolate);
break;
case JS_SET_TYPE:
- JSSet::cast(this)->JSSetVerify(isolate);
+ JSSet::cast(*this)->JSSetVerify(isolate);
break;
case JS_MAP_TYPE:
- JSMap::cast(this)->JSMapVerify(isolate);
+ JSMap::cast(*this)->JSMapVerify(isolate);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
- JSSetIterator::cast(this)->JSSetIteratorVerify(isolate);
+ JSSetIterator::cast(*this)->JSSetIteratorVerify(isolate);
break;
case JS_MAP_KEY_ITERATOR_TYPE:
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
- JSMapIterator::cast(this)->JSMapIteratorVerify(isolate);
+ JSMapIterator::cast(*this)->JSMapIteratorVerify(isolate);
break;
case JS_ARRAY_ITERATOR_TYPE:
- JSArrayIterator::cast(this)->JSArrayIteratorVerify(isolate);
+ JSArrayIterator::cast(*this)->JSArrayIteratorVerify(isolate);
break;
case JS_STRING_ITERATOR_TYPE:
- JSStringIterator::cast(this)->JSStringIteratorVerify(isolate);
+ JSStringIterator::cast(*this)->JSStringIteratorVerify(isolate);
break;
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- JSAsyncFromSyncIterator::cast(this)->JSAsyncFromSyncIteratorVerify(
+ JSAsyncFromSyncIterator::cast(*this)->JSAsyncFromSyncIteratorVerify(
isolate);
break;
+ case JS_WEAK_CELL_TYPE:
+ JSWeakCell::cast(*this)->JSWeakCellVerify(isolate);
+ break;
+ case JS_WEAK_REF_TYPE:
+ JSWeakRef::cast(*this)->JSWeakRefVerify(isolate);
+ break;
+ case JS_WEAK_FACTORY_TYPE:
+ JSWeakFactory::cast(*this)->JSWeakFactoryVerify(isolate);
+ break;
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ JSWeakFactoryCleanupIterator::cast(*this)
+ ->JSWeakFactoryCleanupIteratorVerify(isolate);
+ break;
case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(this)->JSWeakMapVerify(isolate);
+ JSWeakMap::cast(*this)->JSWeakMapVerify(isolate);
break;
case JS_WEAK_SET_TYPE:
- JSWeakSet::cast(this)->JSWeakSetVerify(isolate);
+ JSWeakSet::cast(*this)->JSWeakSetVerify(isolate);
break;
case JS_PROMISE_TYPE:
- JSPromise::cast(this)->JSPromiseVerify(isolate);
+ JSPromise::cast(*this)->JSPromiseVerify(isolate);
break;
case JS_REGEXP_TYPE:
- JSRegExp::cast(this)->JSRegExpVerify(isolate);
+ JSRegExp::cast(*this)->JSRegExpVerify(isolate);
break;
case JS_REGEXP_STRING_ITERATOR_TYPE:
- JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorVerify(isolate);
+ JSRegExpStringIterator::cast(*this)->JSRegExpStringIteratorVerify(
+ isolate);
break;
case FILLER_TYPE:
break;
case JS_PROXY_TYPE:
- JSProxy::cast(this)->JSProxyVerify(isolate);
+ JSProxy::cast(*this)->JSProxyVerify(isolate);
break;
case FOREIGN_TYPE:
- Foreign::cast(this)->ForeignVerify(isolate);
+ Foreign::cast(*this)->ForeignVerify(isolate);
break;
- case PRE_PARSED_SCOPE_DATA_TYPE:
- PreParsedScopeData::cast(this)->PreParsedScopeDataVerify(isolate);
+ case PREPARSE_DATA_TYPE:
+ PreparseData::cast(*this)->PreparseDataVerify(isolate);
break;
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
- UncompiledDataWithoutPreParsedScope::cast(this)
- ->UncompiledDataWithoutPreParsedScopeVerify(isolate);
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ UncompiledDataWithoutPreparseData::cast(*this)
+ ->UncompiledDataWithoutPreparseDataVerify(isolate);
break;
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
- UncompiledDataWithPreParsedScope::cast(this)
- ->UncompiledDataWithPreParsedScopeVerify(isolate);
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ UncompiledDataWithPreparseData::cast(*this)
+ ->UncompiledDataWithPreparseDataVerify(isolate);
break;
case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify(isolate);
+ SharedFunctionInfo::cast(*this)->SharedFunctionInfoVerify(isolate);
break;
case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectVerify(isolate);
+ JSMessageObject::cast(*this)->JSMessageObjectVerify(isolate);
break;
case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::cast(this)->JSArrayBufferVerify(isolate);
+ JSArrayBuffer::cast(*this)->JSArrayBufferVerify(isolate);
break;
case JS_TYPED_ARRAY_TYPE:
- JSTypedArray::cast(this)->JSTypedArrayVerify(isolate);
+ JSTypedArray::cast(*this)->JSTypedArrayVerify(isolate);
break;
case JS_DATA_VIEW_TYPE:
- JSDataView::cast(this)->JSDataViewVerify(isolate);
+ JSDataView::cast(*this)->JSDataViewVerify(isolate);
break;
case SMALL_ORDERED_HASH_SET_TYPE:
- SmallOrderedHashSet::cast(this)->SmallOrderedHashTableVerify(isolate);
+ SmallOrderedHashSet::cast(*this)->SmallOrderedHashSetVerify(isolate);
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
- SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify(isolate);
+ SmallOrderedHashMap::cast(*this)->SmallOrderedHashMapVerify(isolate);
+ break;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ SmallOrderedNameDictionary::cast(*this)->SmallOrderedNameDictionaryVerify(
+ isolate);
break;
case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(this)->CodeDataContainerVerify(isolate);
+ CodeDataContainer::cast(*this)->CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- JSV8BreakIterator::cast(this)->JSV8BreakIteratorVerify(isolate);
+ JSV8BreakIterator::cast(*this)->JSV8BreakIteratorVerify(isolate);
break;
case JS_INTL_COLLATOR_TYPE:
- JSCollator::cast(this)->JSCollatorVerify(isolate);
+ JSCollator::cast(*this)->JSCollatorVerify(isolate);
break;
case JS_INTL_DATE_TIME_FORMAT_TYPE:
- JSDateTimeFormat::cast(this)->JSDateTimeFormatVerify(isolate);
+ JSDateTimeFormat::cast(*this)->JSDateTimeFormatVerify(isolate);
break;
case JS_INTL_LIST_FORMAT_TYPE:
- JSListFormat::cast(this)->JSListFormatVerify(isolate);
+ JSListFormat::cast(*this)->JSListFormatVerify(isolate);
break;
case JS_INTL_LOCALE_TYPE:
- JSLocale::cast(this)->JSLocaleVerify(isolate);
+ JSLocale::cast(*this)->JSLocaleVerify(isolate);
break;
case JS_INTL_NUMBER_FORMAT_TYPE:
- JSNumberFormat::cast(this)->JSNumberFormatVerify(isolate);
+ JSNumberFormat::cast(*this)->JSNumberFormatVerify(isolate);
break;
case JS_INTL_PLURAL_RULES_TYPE:
- JSPluralRules::cast(this)->JSPluralRulesVerify(isolate);
+ JSPluralRules::cast(*this)->JSPluralRulesVerify(isolate);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatVerify(isolate);
+ JSRelativeTimeFormat::cast(*this)->JSRelativeTimeFormatVerify(isolate);
+ break;
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ JSSegmentIterator::cast(*this)->JSSegmentIteratorVerify(isolate);
break;
case JS_INTL_SEGMENTER_TYPE:
- JSSegmenter::cast(this)->JSSegmenterVerify(isolate);
+ JSSegmenter::cast(*this)->JSSegmenterVerify(isolate);
break;
#endif // V8_INTL_SUPPORT
-#define MAKE_STRUCT_CASE(TYPE, Name, name) \
- case TYPE: \
- Name::cast(this)->Name##Verify(isolate); \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ Name::cast(*this)->Name##Verify(isolate); \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE:
- AllocationSite::cast(this)->AllocationSiteVerify(isolate);
+ AllocationSite::cast(*this)->AllocationSiteVerify(isolate);
break;
case LOAD_HANDLER_TYPE:
- LoadHandler::cast(this)->LoadHandlerVerify(isolate);
+ LoadHandler::cast(*this)->LoadHandlerVerify(isolate);
break;
case STORE_HANDLER_TYPE:
- StoreHandler::cast(this)->StoreHandlerVerify(isolate);
+ StoreHandler::cast(*this)->StoreHandlerVerify(isolate);
break;
}
}
-void HeapObject::VerifyHeapPointer(Isolate* isolate, Object* p) {
+void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
CHECK(p->IsHeapObject());
- HeapObject* ho = HeapObject::cast(p);
+ HeapObject ho = HeapObject::cast(p);
CHECK(isolate->heap()->Contains(ho));
}
@@ -427,7 +471,7 @@ void Symbol::SymbolVerify(Isolate* isolate) {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined(isolate) || name()->IsString());
- CHECK_IMPLIES(IsPrivateField(), IsPrivate());
+ CHECK_IMPLIES(IsPrivateName(), IsPrivate());
}
void ByteArray::ByteArrayVerify(Isolate* isolate) { CHECK(IsByteArray()); }
@@ -454,33 +498,30 @@ void FeedbackCell::FeedbackCellVerify(Isolate* isolate) {
void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
CHECK(IsFeedbackVector());
- MaybeObject* code = optimized_code_weak_or_smi();
+ MaybeObject code = optimized_code_weak_or_smi();
MaybeObject::VerifyMaybeObjectPointer(isolate, code);
CHECK(code->IsSmi() || code->IsWeakOrCleared());
}
template <class Traits>
void FixedTypedArray<Traits>::FixedTypedArrayVerify(Isolate* isolate) {
- CHECK(IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- Traits::kInstanceType);
- if (base_pointer() == this) {
+ CHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
+ if (base_pointer()->ptr() == ptr()) {
CHECK(reinterpret_cast<Address>(external_pointer()) ==
ExternalReference::fixed_typed_array_base_data_offset().address());
} else {
- CHECK_NULL(base_pointer());
+ CHECK_EQ(base_pointer(), Smi::kZero);
}
}
bool JSObject::ElementsAreSafeToExamine() const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- return reinterpret_cast<Map*>(elements()) !=
- GetReadOnlyRoots().one_pointer_filler_map();
+ return elements() != GetReadOnlyRoots().one_pointer_filler_map();
}
namespace {
-void VerifyJSObjectElements(Isolate* isolate, JSObject* object) {
+void VerifyJSObjectElements(Isolate* isolate, JSObject object) {
// Only TypedArrays can have these specialized elements.
if (object->IsJSTypedArray()) {
// TODO(cbruni): Fix CreateTypedArray to either not instantiate the object
@@ -499,17 +540,17 @@ void VerifyJSObjectElements(Isolate* isolate, JSObject* object) {
return;
}
- FixedArray* elements = FixedArray::cast(object->elements());
+ FixedArray elements = FixedArray::cast(object->elements());
if (object->HasSmiElements()) {
// We might have a partially initialized backing store, in which case we
// allow the hole + smi values.
for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
+ Object value = elements->get(i);
CHECK(value->IsSmi() || value->IsTheHole(isolate));
}
} else if (object->HasObjectElements()) {
for (int i = 0; i < elements->length(); i++) {
- Object* element = elements->get(i);
+ Object element = elements->get(i);
CHECK_IMPLIES(!element->IsSmi(), !HasWeakHeapObjectTag(element));
}
}
@@ -536,7 +577,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
int delta = actual_unused_property_fields - map()->UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
- DescriptorArray* descriptors = map()->instance_descriptors();
+ DescriptorArray descriptors = map()->instance_descriptors();
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map()->elements_kind());
@@ -550,12 +591,12 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
DCHECK(r.IsDouble());
continue;
}
- Object* value = RawFastPropertyAt(index);
+ Object value = RawFastPropertyAt(index);
if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
if (value->IsUninitialized(isolate)) continue;
if (r.IsSmi()) DCHECK(value->IsSmi());
if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
- FieldType* field_type = descriptors->GetFieldType(i);
+ FieldType field_type = descriptors->GetFieldType(i);
bool type_is_none = field_type->IsNone();
bool type_is_any = field_type->IsAny();
if (r.IsNone()) {
@@ -570,9 +611,9 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
}
if (map()->EnumLength() != kInvalidEnumCacheSentinel) {
- EnumCache* enum_cache = descriptors->GetEnumCache();
- FixedArray* keys = enum_cache->keys();
- FixedArray* indices = enum_cache->indices();
+ EnumCache enum_cache = descriptors->enum_cache();
+ FixedArray keys = enum_cache->keys();
+ FixedArray indices = enum_cache->indices();
CHECK_LE(map()->EnumLength(), keys->length());
CHECK_IMPLIES(indices != ReadOnlyRoots(isolate).empty_fixed_array(),
keys->length() == indices->length());
@@ -588,34 +629,34 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
(elements()->map() == GetReadOnlyRoots().fixed_array_map() ||
elements()->map() == GetReadOnlyRoots().fixed_cow_array_map()));
CHECK_EQ(map()->has_fast_object_elements(), HasObjectElements());
- VerifyJSObjectElements(isolate, this);
+ VerifyJSObjectElements(isolate, *this);
}
}
void Map::MapVerify(Isolate* isolate) {
Heap* heap = isolate->heap();
- CHECK(!Heap::InNewSpace(this));
+ CHECK(!Heap::InNewSpace(*this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
- (kPointerSize <= instance_size() &&
+ (kTaggedSize <= instance_size() &&
static_cast<size_t>(instance_size()) < heap->Capacity()));
CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
!Map::cast(GetBackPointer())->is_stable());
- VerifyHeapPointer(isolate, prototype());
- VerifyHeapPointer(isolate, instance_descriptors());
+ HeapObject::VerifyHeapPointer(isolate, prototype());
+ HeapObject::VerifyHeapPointer(isolate, instance_descriptors());
SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
DisallowHeapAllocation no_gc;
SLOW_DCHECK(
- TransitionsAccessor(isolate, this, &no_gc).IsSortedNoDuplicates());
- SLOW_DCHECK(TransitionsAccessor(isolate, this, &no_gc)
+ TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
+ SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
.IsConsistentWithBackPointers());
SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor()->IsConsistentWithMap(this));
+ layout_descriptor()->IsConsistentWithMap(*this));
if (!may_have_interesting_symbols()) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
- DescriptorArray* const descriptors = instance_descriptors();
+ DescriptorArray const descriptors = instance_descriptors();
for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
CHECK(!descriptors->GetKey(i)->IsInterestingSymbol());
}
@@ -641,16 +682,25 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(),
instance_descriptors());
CHECK_EQ(0, UnusedPropertyFields());
- CHECK_EQ(Map::GetVisitorId(this), visitor_id());
+ CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
void AliasedArgumentsEntry::AliasedArgumentsEntryVerify(Isolate* isolate) {
VerifySmiField(kAliasedContextSlot);
}
+void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
+ EmbedderDataSlot start(*this, 0);
+ EmbedderDataSlot end(*this, length());
+ for (EmbedderDataSlot slot = start; slot < end; ++slot) {
+ Object e = slot.load_tagged();
+ Object::VerifyPointer(isolate, e);
+ }
+}
+
void FixedArray::FixedArrayVerify(Isolate* isolate) {
for (int i = 0; i < length(); i++) {
- Object* e = get(i);
+ Object e = get(i);
VerifyPointer(isolate, e);
}
}
@@ -669,14 +719,14 @@ void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
if (length() == 0) {
- CHECK_EQ(this, ReadOnlyRoots(isolate).empty_property_array());
+ CHECK_EQ(*this, ReadOnlyRoots(isolate).empty_property_array());
return;
}
// There are no empty PropertyArrays.
CHECK_LT(0, length());
for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- VerifyPointer(isolate, e);
+ Object e = get(i);
+ Object::VerifyPointer(isolate, e);
}
}
@@ -695,11 +745,28 @@ void FixedDoubleArray::FixedDoubleArrayVerify(Isolate* isolate) {
}
}
+void Context::ContextVerify(Isolate* isolate) {
+ VerifySmiField(kLengthOffset);
+ VerifyObjectField(isolate, kScopeInfoOffset);
+ VerifyObjectField(isolate, kPreviousOffset);
+ VerifyObjectField(isolate, kExtensionOffset);
+ VerifyObjectField(isolate, kNativeContextOffset);
+ for (int i = 0; i < length(); i++) {
+ VerifyObjectField(isolate, OffsetOfElementAt(i));
+ }
+}
+
+void NativeContext::NativeContextVerify(Isolate* isolate) {
+ ContextVerify(isolate);
+ CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
+ CHECK_EQ(kSize, map()->instance_size());
+}
+
void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
if (slot_count() == 0) {
- CHECK_EQ(ReadOnlyRoots(isolate).empty_feedback_metadata(), this);
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_feedback_metadata(), *this);
} else {
- FeedbackMetadataIterator iter(this);
+ FeedbackMetadataIterator iter(*this);
while (iter.HasNext()) {
iter.Next();
FeedbackSlotKind kind = iter.kind();
@@ -710,21 +777,25 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
}
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
- WeakFixedArrayVerify(isolate);
- int nof_descriptors = number_of_descriptors();
- if (number_of_descriptors_storage() == 0) {
+ for (int i = 0; i < number_of_all_descriptors(); i++) {
+ MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToKeyIndex(i)));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToDetailsIndex(i)));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToValueIndex(i)));
+ }
+ if (number_of_all_descriptors() == 0) {
Heap* heap = isolate->heap();
- CHECK_EQ(ReadOnlyRoots(heap).empty_descriptor_array(), this);
- CHECK_EQ(2, length());
- CHECK_EQ(0, nof_descriptors);
- CHECK_EQ(ReadOnlyRoots(heap).empty_enum_cache(), GetEnumCache());
+ CHECK_EQ(ReadOnlyRoots(heap).empty_descriptor_array(), *this);
+ CHECK_EQ(0, number_of_all_descriptors());
+ CHECK_EQ(0, number_of_descriptors());
+ CHECK_EQ(ReadOnlyRoots(heap).empty_enum_cache(), enum_cache());
} else {
- CHECK_LT(2, length());
- CHECK_LE(LengthFor(nof_descriptors), length());
+ CHECK_LT(0, number_of_all_descriptors());
+ CHECK_LE(number_of_descriptors(), number_of_all_descriptors());
// Check that properties with private symbols names are non-enumerable.
- for (int descriptor = 0; descriptor < nof_descriptors; descriptor++) {
- Object* key = get(ToKeyIndex(descriptor))->cast<Object>();
+ for (int descriptor = 0; descriptor < number_of_descriptors();
+ descriptor++) {
+ Object key = get(ToKeyIndex(descriptor))->cast<Object>();
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key->IsUndefined(isolate)) continue;
@@ -732,8 +803,8 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
if (Name::cast(key)->IsPrivate()) {
CHECK_NE(details.attributes() & DONT_ENUM, 0);
}
- MaybeObject* value = get(ToValueIndex(descriptor));
- HeapObject* heap_object;
+ MaybeObject value = get(ToValueIndex(descriptor));
+ HeapObject heap_object;
if (details.location() == kField) {
CHECK(
value == MaybeObject::FromObject(FieldType::None()) ||
@@ -756,7 +827,7 @@ void TransitionArray::TransitionArrayVerify(Isolate* isolate) {
void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
SloppyArgumentsElements::cast(elements())
- ->SloppyArgumentsElementsVerify(isolate, this);
+ ->SloppyArgumentsElementsVerify(isolate, *this);
}
if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
isolate->IsInAnyContext(map(),
@@ -773,7 +844,7 @@ void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
}
void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
- JSObject* holder) {
+ JSObject holder) {
FixedArrayVerify(isolate);
// Abort verification if only partially initialized (can't use arguments()
// getter because it does FixedArray::cast()).
@@ -784,8 +855,8 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
CHECK(IsFixedArray());
CHECK_GE(length(), 2);
CHECK_EQ(map(), ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
- Context* context_object = Context::cast(context());
- FixedArray* arg_elements = FixedArray::cast(arguments());
+ Context context_object = context();
+ FixedArray arg_elements = FixedArray::cast(arguments());
if (arg_elements->length() == 0) {
CHECK(arg_elements == ReadOnlyRoots(isolate).empty_fixed_array());
return;
@@ -801,7 +872,7 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
for (int i = 0; i < nofMappedParameters; i++) {
// Verify that each context-mapped argument is either the hole or a valid
// Smi within context length range.
- Object* mapped = get_mapped_entry(i);
+ Object mapped = get_mapped_entry(i);
if (mapped->IsTheHole(isolate)) {
// Slow sloppy arguments can be holey.
if (!is_fast) continue;
@@ -814,7 +885,7 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
nofMappedParameters++;
CHECK_LE(maxMappedIndex, mappedIndex);
maxMappedIndex = mappedIndex;
- Object* value = context_object->get(mappedIndex);
+ Object value = context_object->get(mappedIndex);
CHECK(value->IsObject());
// None of the context-mapped entries should exist in the arguments
// elements.
@@ -837,6 +908,13 @@ void JSGeneratorObject::JSGeneratorObjectVerify(Isolate* isolate) {
VerifyObjectField(isolate, kContinuationOffset);
}
+void JSAsyncFunctionObject::JSAsyncFunctionObjectVerify(Isolate* isolate) {
+ // Check inherited fields
+ JSGeneratorObjectVerify(isolate);
+ VerifyObjectField(isolate, kPromiseOffset);
+ promise()->HeapObjectVerify(isolate);
+}
+
void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
// Check inherited fields
JSGeneratorObjectVerify(isolate);
@@ -845,7 +923,7 @@ void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
}
void JSValue::JSValueVerify(Isolate* isolate) {
- Object* v = value();
+ Object v = value();
if (v->IsHeapObject()) {
VerifyHeapPointer(isolate, v);
}
@@ -910,16 +988,16 @@ void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
void String::StringVerify(Isolate* isolate) {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- CHECK_IMPLIES(length() == 0, this == ReadOnlyRoots(isolate).empty_string());
+ CHECK_IMPLIES(length() == 0, *this == ReadOnlyRoots(isolate).empty_string());
if (IsInternalizedString()) {
- CHECK(!Heap::InNewSpace(this));
+ CHECK(!Heap::InNewSpace(*this));
}
if (IsConsString()) {
- ConsString::cast(this)->ConsStringVerify(isolate);
+ ConsString::cast(*this)->ConsStringVerify(isolate);
} else if (IsSlicedString()) {
- SlicedString::cast(this)->SlicedStringVerify(isolate);
+ SlicedString::cast(*this)->SlicedStringVerify(isolate);
} else if (IsThinString()) {
- ThinString::cast(this)->ThinStringVerify(isolate);
+ ThinString::cast(*this)->ThinStringVerify(isolate);
}
}
@@ -965,11 +1043,11 @@ void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
void JSFunction::JSFunctionVerify(Isolate* isolate) {
CHECK(IsJSFunction());
JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, feedback_cell());
- CHECK(feedback_cell()->IsFeedbackCell());
+ VerifyHeapPointer(isolate, raw_feedback_cell());
+ CHECK(raw_feedback_cell()->IsFeedbackCell());
CHECK(code()->IsCode());
CHECK(map()->is_callable());
- Handle<JSFunction> function(this, isolate);
+ Handle<JSFunction> function(*this, isolate);
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
if (has_prototype_slot()) {
@@ -994,7 +1072,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
VerifyObjectField(isolate, kScriptOrDebugInfoOffset);
VerifyObjectField(isolate, kNameOrScopeInfoOffset);
- Object* value = name_or_scope_info();
+ Object value = name_or_scope_info();
CHECK(value == kNoSharedNameSentinel || value->IsString() ||
value->IsScopeInfo());
if (value->IsScopeInfo()) {
@@ -1004,8 +1082,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
CHECK(HasWasmExportedFunctionData() || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData() || HasBuiltinId() ||
- HasUncompiledDataWithPreParsedScope() ||
- HasUncompiledDataWithoutPreParsedScope());
+ HasUncompiledDataWithPreparseData() ||
+ HasUncompiledDataWithoutPreparseData());
CHECK(script_or_debug_info()->IsUndefined(isolate) ||
script_or_debug_info()->IsScript() || HasDebugInfo());
@@ -1014,8 +1092,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
CHECK(!HasFeedbackMetadata());
CHECK(outer_scope_info()->IsScopeInfo() ||
outer_scope_info()->IsTheHole(isolate));
- } else if (HasBytecodeArray()) {
- CHECK(HasFeedbackMetadata());
+ } else if (HasBytecodeArray() && HasFeedbackMetadata()) {
CHECK(feedback_metadata()->IsFeedbackMetadata());
}
@@ -1024,7 +1101,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
CHECK_EQ(expected_map_index, function_map_index());
if (scope_info()->length() > 0) {
- ScopeInfo* info = scope_info();
+ ScopeInfo info = scope_info();
CHECK(kind() == info->function_kind());
CHECK_EQ(kind() == kModule, info->scope_type() == MODULE_SCOPE);
}
@@ -1066,7 +1143,7 @@ void Oddball::OddballVerify(Isolate* isolate) {
CHECK(IsOddball());
Heap* heap = isolate->heap();
VerifyHeapPointer(isolate, to_string());
- Object* number = to_number();
+ Object number = to_number();
if (number->IsHeapObject()) {
CHECK(number == ReadOnlyRoots(heap).nan_value() ||
number == ReadOnlyRoots(heap).hole_nan_value());
@@ -1081,25 +1158,25 @@ void Oddball::OddballVerify(Isolate* isolate) {
ReadOnlyRoots roots(heap);
if (map() == roots.undefined_map()) {
- CHECK(this == roots.undefined_value());
+ CHECK(*this == roots.undefined_value());
} else if (map() == roots.the_hole_map()) {
- CHECK(this == roots.the_hole_value());
+ CHECK(*this == roots.the_hole_value());
} else if (map() == roots.null_map()) {
- CHECK(this == roots.null_value());
+ CHECK(*this == roots.null_value());
} else if (map() == roots.boolean_map()) {
- CHECK(this == roots.true_value() || this == roots.false_value());
+ CHECK(*this == roots.true_value() || *this == roots.false_value());
} else if (map() == roots.uninitialized_map()) {
- CHECK(this == roots.uninitialized_value());
+ CHECK(*this == roots.uninitialized_value());
} else if (map() == roots.arguments_marker_map()) {
- CHECK(this == roots.arguments_marker());
+ CHECK(*this == roots.arguments_marker());
} else if (map() == roots.termination_exception_map()) {
- CHECK(this == roots.termination_exception());
+ CHECK(*this == roots.termination_exception());
} else if (map() == roots.exception_map()) {
- CHECK(this == roots.exception());
+ CHECK(*this == roots.exception());
} else if (map() == roots.optimized_out_map()) {
- CHECK(this == roots.optimized_out());
+ CHECK(*this == roots.optimized_out());
} else if (map() == roots.stale_register_map()) {
- CHECK(this == roots.stale_register());
+ CHECK(*this == roots.stale_register());
} else if (map() == roots.self_reference_marker_map()) {
// Multiple instances of this oddball may exist at once.
CHECK_EQ(kind(), Oddball::kSelfReferenceMarker);
@@ -1125,12 +1202,15 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
}
void Code::CodeVerify(Isolate* isolate) {
- CHECK_LE(constant_pool_offset(), InstructionSize());
- CHECK(IsAligned(InstructionStart(), kCodeAlignment));
+ CHECK_LE(constant_pool_offset(), code_comments_offset());
+ CHECK_LE(code_comments_offset(), InstructionSize());
+ CHECK(IsAligned(raw_instruction_start(), kCodeAlignment));
relocation_info()->ObjectVerify(isolate);
+ CHECK(Code::SizeFor(body_size()) <= kMaxRegularHeapObjectSize ||
+ isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
- for (RelocIterator it(this); !it.done(); it.next()) {
+ for (RelocIterator it(*this); !it.done(); it.next()) {
it.rinfo()->Verify(isolate);
// Ensure that GC will not iterate twice over the same pointer.
if (RelocInfo::IsGCRelocMode(it.rinfo()->rmode())) {
@@ -1170,7 +1250,7 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
CHECK(length()->ToArrayLength(&array_length));
}
if (array_length != 0) {
- NumberDictionary* dict = NumberDictionary::cast(elements());
+ NumberDictionary dict = NumberDictionary::cast(elements());
// The dictionary can never have more elements than the array length + 1.
// If the backing store grows the verification might be triggered with
// the old length in place.
@@ -1213,6 +1293,56 @@ void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(index()->IsSmi());
}
+void JSWeakCell::JSWeakCellVerify(Isolate* isolate) {
+ CHECK(IsJSWeakCell());
+ JSObjectVerify(isolate);
+
+ CHECK(next()->IsJSWeakCell() || next()->IsUndefined(isolate));
+ if (next()->IsJSWeakCell()) {
+ CHECK_EQ(JSWeakCell::cast(next())->prev(), *this);
+ }
+ CHECK(prev()->IsJSWeakCell() || prev()->IsUndefined(isolate));
+ if (prev()->IsJSWeakCell()) {
+ CHECK_EQ(JSWeakCell::cast(prev())->next(), *this);
+ }
+
+ CHECK(factory()->IsUndefined(isolate) || factory()->IsJSWeakFactory());
+}
+
+void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
+ CHECK(IsJSWeakRef());
+ JSObjectVerify(isolate);
+ CHECK(target()->IsUndefined(isolate) || target()->IsJSReceiver());
+}
+
+void JSWeakFactory::JSWeakFactoryVerify(Isolate* isolate) {
+ CHECK(IsJSWeakFactory());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, cleanup());
+ CHECK(active_cells()->IsUndefined(isolate) || active_cells()->IsJSWeakCell());
+ if (active_cells()->IsJSWeakCell()) {
+ CHECK(JSWeakCell::cast(active_cells())->prev()->IsUndefined(isolate));
+ }
+ CHECK(cleared_cells()->IsUndefined(isolate) ||
+ cleared_cells()->IsJSWeakCell());
+ if (cleared_cells()->IsJSWeakCell()) {
+ CHECK(JSWeakCell::cast(cleared_cells())->prev()->IsUndefined(isolate));
+ }
+}
+
+void JSWeakFactoryCleanupIterator::JSWeakFactoryCleanupIteratorVerify(
+ Isolate* isolate) {
+ CHECK(IsJSWeakFactoryCleanupIterator());
+ JSObjectVerify(isolate);
+ VerifyHeapPointer(isolate, factory());
+}
+
+void WeakFactoryCleanupJobTask::WeakFactoryCleanupJobTaskVerify(
+ Isolate* isolate) {
+ CHECK(IsWeakFactoryCleanupJobTask());
+ CHECK(factory()->IsJSWeakFactory());
+}
+
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
CHECK(IsJSWeakMap());
JSObjectVerify(isolate);
@@ -1288,14 +1418,8 @@ void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
CHECK(handler()->IsUndefined(isolate) || handler()->IsCallable());
VerifyHeapPointer(isolate, promise_or_capability());
CHECK(promise_or_capability()->IsJSPromise() ||
- promise_or_capability()->IsPromiseCapability());
-}
-
-void MicrotaskQueue::MicrotaskQueueVerify(Isolate* isolate) {
- CHECK(IsMicrotaskQueue());
- VerifyHeapPointer(isolate, queue());
- VerifySmiField(kPendingMicrotaskCountOffset);
- CHECK_LE(pending_microtask_count(), queue()->length());
+ promise_or_capability()->IsPromiseCapability() ||
+ promise_or_capability()->IsUndefined(isolate));
}
void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify(
@@ -1347,7 +1471,8 @@ void PromiseReaction::PromiseReactionVerify(Isolate* isolate) {
fulfill_handler()->IsCallable());
VerifyHeapPointer(isolate, promise_or_capability());
CHECK(promise_or_capability()->IsJSPromise() ||
- promise_or_capability()->IsPromiseCapability());
+ promise_or_capability()->IsPromiseCapability() ||
+ promise_or_capability()->IsUndefined(isolate));
}
void JSPromise::JSPromiseVerify(Isolate* isolate) {
@@ -1385,54 +1510,78 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
for (int entry = 0; entry < NumberOfElements(); entry++) {
for (int offset = 0; offset < Derived::kEntrySize; offset++) {
- Object* val = GetDataEntry(entry, offset);
+ Object val = GetDataEntry(entry, offset);
VerifyPointer(isolate, val);
}
}
+ for (int entry = NumberOfElements() + NumberOfDeletedElements();
+ entry < Capacity(); entry++) {
+ for (int offset = 0; offset < Derived::kEntrySize; offset++) {
+ Object val = GetDataEntry(entry, offset);
+ CHECK(val->IsTheHole(isolate));
+ }
+ }
+}
+void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
+ SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify(
+ isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
entry++) {
- for (int offset = 0; offset < Derived::kEntrySize; offset++) {
- Object* val = GetDataEntry(entry, offset);
+ for (int offset = 0; offset < kEntrySize; offset++) {
+ Object val = GetDataEntry(entry, offset);
CHECK(val->IsTheHole(isolate));
}
}
+}
- for (int entry = NumberOfElements() + NumberOfDeletedElements();
- entry < Capacity(); entry++) {
- for (int offset = 0; offset < Derived::kEntrySize; offset++) {
- Object* val = GetDataEntry(entry, offset);
+void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
+ SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify(
+ isolate);
+ for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
+ entry++) {
+ for (int offset = 0; offset < kEntrySize; offset++) {
+ Object val = GetDataEntry(entry, offset);
CHECK(val->IsTheHole(isolate));
}
}
}
-template void SmallOrderedHashTable<
- SmallOrderedHashMap>::SmallOrderedHashTableVerify(Isolate* isolate);
-template void SmallOrderedHashTable<
- SmallOrderedHashSet>::SmallOrderedHashTableVerify(Isolate* isolate);
+void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
+ Isolate* isolate) {
+ SmallOrderedHashTable<
+ SmallOrderedNameDictionary>::SmallOrderedHashTableVerify(isolate);
+ for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
+ entry++) {
+ for (int offset = 0; offset < kEntrySize; offset++) {
+ Object val = GetDataEntry(entry, offset);
+ CHECK(val->IsTheHole(isolate) ||
+ (PropertyDetails::Empty().AsSmi() == Smi::cast(val)));
+ }
+ }
+}
void JSRegExp::JSRegExpVerify(Isolate* isolate) {
JSObjectVerify(isolate);
CHECK(data()->IsUndefined(isolate) || data()->IsFixedArray());
switch (TypeTag()) {
case JSRegExp::ATOM: {
- FixedArray* arr = FixedArray::cast(data());
+ FixedArray arr = FixedArray::cast(data());
CHECK(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
break;
}
case JSRegExp::IRREGEXP: {
bool is_native = RegExpImpl::UsesNativeRegExp();
- FixedArray* arr = FixedArray::cast(data());
- Object* one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
+ FixedArray arr = FixedArray::cast(data());
+ Object one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
// Smi : Not compiled yet (-1).
// Code/ByteArray: Compiled code.
CHECK(
(one_byte_data->IsSmi() &&
Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
(is_native ? one_byte_data->IsCode() : one_byte_data->IsByteArray()));
- Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
+ Object uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
CHECK((uc16_data->IsSmi() &&
Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
@@ -1472,6 +1621,11 @@ void JSProxy::JSProxyVerify(Isolate* isolate) {
void JSArrayBuffer::JSArrayBufferVerify(Isolate* isolate) {
CHECK(IsJSArrayBuffer());
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ CHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ CHECK_EQ(0,
+ *reinterpret_cast<uint32_t*>(address() + kOptionalPaddingOffset));
+ }
JSObjectVerify(isolate);
}
@@ -1563,7 +1717,7 @@ void Module::ModuleVerify(Isolate* isolate) {
module_namespace()->IsJSModuleNamespace());
if (module_namespace()->IsJSModuleNamespace()) {
CHECK_LE(kInstantiating, status());
- CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), this);
+ CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), *this);
}
CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
@@ -1575,7 +1729,7 @@ void Module::ModuleVerify(Isolate* isolate) {
void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
CHECK(IsPrototypeInfo());
- Object* module_ns = module_namespace();
+ Object module_ns = module_namespace();
CHECK(module_ns->IsJSModuleNamespace() || module_ns->IsUndefined(isolate));
if (prototype_users()->IsWeakArrayList()) {
PrototypeUsers::Verify(WeakArrayList::cast(prototype_users()));
@@ -1584,7 +1738,7 @@ void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
}
}
-void PrototypeUsers::Verify(WeakArrayList* array) {
+void PrototypeUsers::Verify(WeakArrayList array) {
if (array->length() == 0) {
// Allow empty & uninitialized lists.
return;
@@ -1595,7 +1749,7 @@ void PrototypeUsers::Verify(WeakArrayList* array) {
while (empty_slot != kNoEmptySlotsMarker) {
CHECK_GT(empty_slot, 0);
CHECK_LT(empty_slot, array->length());
- empty_slot = Smi::ToInt(array->Get(empty_slot)->cast<Smi>());
+ empty_slot = array->Get(empty_slot).ToSmi().value();
++empty_slots_count;
}
@@ -1603,8 +1757,8 @@ void PrototypeUsers::Verify(WeakArrayList* array) {
// slots.
int weak_maps_count = 0;
for (int i = kFirstIndex; i < array->length(); ++i) {
- HeapObject* heap_object;
- MaybeObject* object = array->Get(i);
+ HeapObject heap_object;
+ MaybeObject object = array->Get(i);
if ((object->GetHeapObjectIfWeak(&heap_object) && heap_object->IsMap()) ||
object->IsCleared()) {
++weak_maps_count;
@@ -1619,11 +1773,11 @@ void PrototypeUsers::Verify(WeakArrayList* array) {
void Tuple2::Tuple2Verify(Isolate* isolate) {
CHECK(IsTuple2());
Heap* heap = isolate->heap();
- if (this == ReadOnlyRoots(heap).empty_enum_cache()) {
+ if (*this == ReadOnlyRoots(heap).empty_enum_cache()) {
CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(),
- EnumCache::cast(this)->keys());
+ EnumCache::cast(*this)->keys());
CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(),
- EnumCache::cast(this)->indices());
+ EnumCache::cast(*this)->indices());
} else {
VerifyObjectField(isolate, kValue1Offset);
VerifyObjectField(isolate, kValue2Offset);
@@ -1652,6 +1806,15 @@ void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionVerify(
VerifyObjectField(isolate, kConstantElementsOffset);
}
+void AsmWasmData::AsmWasmDataVerify(Isolate* isolate) {
+ CHECK(IsAsmWasmData());
+ VerifyObjectField(isolate, kManagedNativeModuleOffset);
+ VerifyObjectField(isolate, kExportWrappersOffset);
+ VerifyObjectField(isolate, kAsmJsOffsetTableOffset);
+ CHECK(uses_bitset()->IsHeapNumber());
+ VerifyObjectField(isolate, kUsesBitsetOffset);
+}
+
void WasmDebugInfo::WasmDebugInfoVerify(Isolate* isolate) {
CHECK(IsWasmDebugInfo());
VerifyObjectField(isolate, kInstanceOffset);
@@ -1665,6 +1828,11 @@ void WasmDebugInfo::WasmDebugInfoVerify(Isolate* isolate) {
VerifyObjectField(isolate, kCWasmEntryMapOffset);
}
+void WasmExceptionTag::WasmExceptionTagVerify(Isolate* isolate) {
+ CHECK(IsWasmExceptionTag());
+ VerifySmiField(kIndexOffset);
+}
+
void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
JSObjectVerify(isolate);
CHECK(IsWasmInstanceObject());
@@ -1672,8 +1840,8 @@ void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
// Just generically check all tagged fields. Don't check the untagged fields,
// as some of them might still contain the "undefined" value if the
// WasmInstanceObject is not fully set up yet.
- for (int offset = kHeaderSize; offset < kFirstUntaggedOffset;
- offset += kPointerSize) {
+ for (int offset = kHeaderSize; offset < kEndOfTaggedFieldsOffset;
+ offset += kTaggedSize) {
VerifyObjectField(isolate, offset);
}
}
@@ -1731,9 +1899,9 @@ void AccessorInfo::AccessorInfoVerify(Isolate* isolate) {
CHECK(IsAccessorInfo());
VerifyPointer(isolate, name());
VerifyPointer(isolate, expected_receiver_type());
- VerifyForeignPointer(isolate, this, getter());
- VerifyForeignPointer(isolate, this, setter());
- VerifyForeignPointer(isolate, this, js_getter());
+ VerifyForeignPointer(isolate, *this, getter());
+ VerifyForeignPointer(isolate, *this, setter());
+ VerifyForeignPointer(isolate, *this, js_getter());
VerifyPointer(isolate, data());
}
@@ -1765,11 +1933,11 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
void InterceptorInfo::InterceptorInfoVerify(Isolate* isolate) {
CHECK(IsInterceptorInfo());
- VerifyForeignPointer(isolate, this, getter());
- VerifyForeignPointer(isolate, this, setter());
- VerifyForeignPointer(isolate, this, query());
- VerifyForeignPointer(isolate, this, deleter());
- VerifyForeignPointer(isolate, this, enumerator());
+ VerifyForeignPointer(isolate, *this, getter());
+ VerifyForeignPointer(isolate, *this, setter());
+ VerifyForeignPointer(isolate, *this, query());
+ VerifyForeignPointer(isolate, *this, deleter());
+ VerifyForeignPointer(isolate, *this, enumerator());
VerifyPointer(isolate, data());
VerifySmiField(kFlagsOffset);
}
@@ -1785,14 +1953,20 @@ void FunctionTemplateInfo::FunctionTemplateInfoVerify(Isolate* isolate) {
TemplateInfoVerify(isolate);
VerifyPointer(isolate, serial_number());
VerifyPointer(isolate, call_code());
+ VerifyPointer(isolate, signature());
+ VerifyPointer(isolate, cached_property_name());
+ VerifyPointer(isolate, rare_data());
+}
+
+void FunctionTemplateRareData::FunctionTemplateRareDataVerify(
+ Isolate* isolate) {
+ CHECK(IsFunctionTemplateRareData());
VerifyPointer(isolate, prototype_template());
VerifyPointer(isolate, parent_template());
VerifyPointer(isolate, named_property_handler());
VerifyPointer(isolate, indexed_property_handler());
VerifyPointer(isolate, instance_template());
- VerifyPointer(isolate, signature());
VerifyPointer(isolate, access_check_info());
- VerifyPointer(isolate, cached_property_name());
}
void ObjectTemplateInfo::ObjectTemplateInfoVerify(Isolate* isolate) {
@@ -1818,8 +1992,8 @@ void Script::ScriptVerify(Isolate* isolate) {
VerifyPointer(isolate, name());
VerifyPointer(isolate, line_ends());
for (int i = 0; i < shared_function_infos()->length(); ++i) {
- MaybeObject* maybe_object = shared_function_infos()->Get(i);
- HeapObject* heap_object;
+ MaybeObject maybe_object = shared_function_infos()->Get(i);
+ HeapObject heap_object;
CHECK(maybe_object->IsWeak() || maybe_object->IsCleared() ||
(maybe_object->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)));
@@ -1827,11 +2001,11 @@ void Script::ScriptVerify(Isolate* isolate) {
}
void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
- WeakFixedArray::cast(this)->WeakFixedArrayVerify(isolate);
+ WeakFixedArray::cast(*this)->WeakFixedArrayVerify(isolate);
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < length(); i++) {
- MaybeObject* e = WeakFixedArray::Get(i);
- HeapObject* heap_object;
+ MaybeObject e = WeakFixedArray::Get(i);
+ HeapObject heap_object;
if (e->GetHeapObjectIfWeak(&heap_object)) {
Map::cast(heap_object)->DictionaryMapVerify(isolate);
} else {
@@ -1857,28 +2031,28 @@ void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
VerifyPointer(isolate, function_name());
}
-void PreParsedScopeData::PreParsedScopeDataVerify(Isolate* isolate) {
- CHECK(IsPreParsedScopeData());
- CHECK(scope_data()->IsByteArray());
- CHECK_GE(length(), 0);
+void PreparseData::PreparseDataVerify(Isolate* isolate) {
+ CHECK(IsPreparseData());
+ CHECK_LE(0, data_length());
+ CHECK_LE(0, children_length());
- for (int i = 0; i < length(); ++i) {
- Object* child = child_data(i);
- CHECK(child->IsPreParsedScopeData() || child->IsNull());
+ for (int i = 0; i < children_length(); ++i) {
+ Object child = get_child_raw(i);
+ CHECK(child->IsNull() || child->IsPreparseData());
VerifyPointer(isolate, child);
}
}
-void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopeVerify(
+void UncompiledDataWithPreparseData::UncompiledDataWithPreparseDataVerify(
Isolate* isolate) {
- CHECK(IsUncompiledDataWithPreParsedScope());
+ CHECK(IsUncompiledDataWithPreparseData());
VerifyPointer(isolate, inferred_name());
- VerifyPointer(isolate, pre_parsed_scope_data());
+ VerifyPointer(isolate, preparse_data());
}
-void UncompiledDataWithoutPreParsedScope::
- UncompiledDataWithoutPreParsedScopeVerify(Isolate* isolate) {
- CHECK(IsUncompiledDataWithoutPreParsedScope());
+void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify(
+ Isolate* isolate) {
+ CHECK(IsUncompiledDataWithoutPreparseData());
VerifyPointer(isolate, inferred_name());
}
@@ -1914,6 +2088,7 @@ void JSDateTimeFormat::JSDateTimeFormatVerify(Isolate* isolate) {
VerifyObjectField(isolate, kICULocaleOffset);
VerifyObjectField(isolate, kICUSimpleDateFormatOffset);
VerifyObjectField(isolate, kBoundFormatOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
}
void JSListFormat::JSListFormatVerify(Isolate* isolate) {
@@ -1925,16 +2100,7 @@ void JSListFormat::JSListFormatVerify(Isolate* isolate) {
void JSLocale::JSLocaleVerify(Isolate* isolate) {
JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLanguageOffset);
- VerifyObjectField(isolate, kScriptOffset);
- VerifyObjectField(isolate, kRegionOffset);
- VerifyObjectField(isolate, kBaseNameOffset);
- VerifyObjectField(isolate, kLocaleOffset);
- // Unicode extension fields.
- VerifyObjectField(isolate, kFlagsOffset);
- VerifyObjectField(isolate, kCalendarOffset);
- VerifyObjectField(isolate, kCollationOffset);
- VerifyObjectField(isolate, kNumberingSystemOffset);
+ VerifyObjectField(isolate, kICULocaleOffset);
}
void JSNumberFormat::JSNumberFormatVerify(Isolate* isolate) {
@@ -1950,7 +2116,7 @@ void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
CHECK(IsJSPluralRules());
JSObjectVerify(isolate);
VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kTypeOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
VerifyObjectField(isolate, kICUPluralRulesOffset);
VerifyObjectField(isolate, kICUDecimalFormatOffset);
}
@@ -1962,6 +2128,13 @@ void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
VerifyObjectField(isolate, kFlagsOffset);
}
+void JSSegmentIterator::JSSegmentIteratorVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kICUBreakIteratorOffset);
+ VerifyObjectField(isolate, kUnicodeStringOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
+}
+
void JSSegmenter::JSSegmenterVerify(Isolate* isolate) {
JSObjectVerify(isolate);
VerifyObjectField(isolate, kLocaleOffset);
@@ -1983,12 +2156,12 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->UnusedPropertyFields();
} else if (IsJSGlobalObject()) {
- GlobalDictionary* dict = JSGlobalObject::cast(this)->global_dictionary();
+ GlobalDictionary dict = JSGlobalObject::cast(*this)->global_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
} else {
- NameDictionary* dict = property_dictionary();
+ NameDictionary dict = property_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -2004,7 +2177,7 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
case FAST_STRING_WRAPPER_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
- FixedArray* e = FixedArray::cast(elements());
+ FixedArray e = FixedArray::cast(elements());
int len = e->length();
for (int i = 0; i < len; i++) {
if (e->get(i)->IsTheHole(isolate)) holes++;
@@ -2020,13 +2193,13 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
#undef TYPED_ARRAY_CASE
{
info->number_of_objects_with_fast_elements_++;
- FixedArrayBase* e = FixedArrayBase::cast(elements());
+ FixedArrayBase e = FixedArrayBase::cast(elements());
info->number_of_fast_used_elements_ += e->length();
break;
}
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
- NumberDictionary* dict = element_dictionary();
+ NumberDictionary dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -2079,10 +2252,10 @@ void JSObject::SpillInformation::Print() {
bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
if (valid_entries == -1) valid_entries = number_of_descriptors();
- Name* current_key = nullptr;
+ Name current_key;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
- Name* key = GetSortedKey(i);
+ Name key = GetSortedKey(i);
if (key == current_key) {
Print();
return false;
@@ -2100,19 +2273,19 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
DCHECK_EQ(valid_entries, -1);
- Name* prev_key = nullptr;
+ Name prev_key;
PropertyKind prev_kind = kData;
PropertyAttributes prev_attributes = NONE;
uint32_t prev_hash = 0;
for (int i = 0; i < number_of_transitions(); i++) {
- Name* key = GetSortedKey(i);
+ Name key = GetSortedKey(i);
uint32_t hash = key->Hash();
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(key->GetReadOnlyRoots(),
key)) {
- Map* target = GetTarget(i);
+ Map target = GetTarget(i);
PropertyDetails details =
TransitionsAccessor::GetTargetDetails(key, target);
kind = details.kind();
@@ -2142,53 +2315,19 @@ bool TransitionsAccessor::IsSortedNoDuplicates() {
return transitions()->IsSortedNoDuplicates();
}
-
-static bool CheckOneBackPointer(Map* current_map, Object* target) {
+static bool CheckOneBackPointer(Map current_map, Object target) {
return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
}
bool TransitionsAccessor::IsConsistentWithBackPointers() {
int num_transitions = NumberOfTransitions();
for (int i = 0; i < num_transitions; i++) {
- Map* target = GetTarget(i);
+ Map target = GetTarget(i);
if (!CheckOneBackPointer(map_, target)) return false;
}
return true;
}
-// Estimates if there is a path from the object to a context.
-// This function is not precise, and can return false even if
-// there is a path to a context.
-bool CanLeak(Object* obj, Heap* heap) {
- if (!obj->IsHeapObject()) return false;
- if (obj->IsCell()) {
- return CanLeak(Cell::cast(obj)->value(), heap);
- }
- if (obj->IsPropertyCell()) {
- return CanLeak(PropertyCell::cast(obj)->value(), heap);
- }
- if (obj->IsContext()) return true;
- if (obj->IsMap()) {
- Map* map = Map::cast(obj);
- for (RootIndex root_index = RootIndex::kFirstStrongRoot;
- root_index <= RootIndex::kLastStrongRoot; ++root_index) {
- if (map == heap->root(root_index)) return false;
- }
- return true;
- }
- return CanLeak(HeapObject::cast(obj)->map(), heap);
-}
-
-void Code::VerifyEmbeddedObjects(Isolate* isolate, VerifyMode mode) {
- if (kind() == OPTIMIZED_FUNCTION) return;
- Heap* heap = isolate->heap();
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- Object* target = it.rinfo()->target_object();
- DCHECK(!CanLeak(target, heap));
- }
-}
-
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects-definitions.h
index 8b8d50f2a7..08f3ce8c76 100644
--- a/deps/v8/src/objects-definitions.h
+++ b/deps/v8/src/objects-definitions.h
@@ -96,9 +96,11 @@ namespace internal {
V(ACCESSOR_PAIR_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(ALLOCATION_MEMENTO_TYPE) \
+ V(ASM_WASM_DATA_TYPE) \
V(ASYNC_GENERATOR_REQUEST_TYPE) \
V(DEBUG_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
V(INTERPRETER_DATA_TYPE) \
V(MODULE_INFO_ENTRY_TYPE) \
@@ -113,6 +115,7 @@ namespace internal {
V(TUPLE3_TYPE) \
V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
V(WASM_DEBUG_INFO_TYPE) \
+ V(WASM_EXCEPTION_TAG_TYPE) \
V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
\
V(CALLABLE_TASK_TYPE) \
@@ -120,16 +123,17 @@ namespace internal {
V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
- \
- V(MICROTASK_QUEUE_TYPE) \
+ V(WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE) \
\
V(ALLOCATION_SITE_TYPE) \
+ V(EMBEDDER_DATA_ARRAY_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
V(HASH_TABLE_TYPE) \
V(ORDERED_HASH_MAP_TYPE) \
V(ORDERED_HASH_SET_TYPE) \
+ V(ORDERED_NAME_DICTIONARY_TYPE) \
V(NAME_DICTIONARY_TYPE) \
V(GLOBAL_DICTIONARY_TYPE) \
V(NUMBER_DICTIONARY_TYPE) \
@@ -151,24 +155,25 @@ namespace internal {
V(WITH_CONTEXT_TYPE) \
\
V(WEAK_FIXED_ARRAY_TYPE) \
- V(DESCRIPTOR_ARRAY_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
\
V(CALL_HANDLER_INFO_TYPE) \
V(CELL_TYPE) \
V(CODE_DATA_CONTAINER_TYPE) \
+ V(DESCRIPTOR_ARRAY_TYPE) \
V(FEEDBACK_CELL_TYPE) \
V(FEEDBACK_VECTOR_TYPE) \
V(LOAD_HANDLER_TYPE) \
- V(PRE_PARSED_SCOPE_DATA_TYPE) \
+ V(PREPARSE_DATA_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
V(PROPERTY_CELL_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(SMALL_ORDERED_HASH_MAP_TYPE) \
V(SMALL_ORDERED_HASH_SET_TYPE) \
+ V(SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
V(STORE_HANDLER_TYPE) \
- V(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
- V(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
+ V(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
+ V(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
V(WEAK_ARRAY_LIST_TYPE) \
\
V(JS_PROXY_TYPE) \
@@ -185,6 +190,7 @@ namespace internal {
V(JS_ARRAY_ITERATOR_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_ASYNC_FUNCTION_OBJECT_TYPE) \
V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
V(JS_DATE_TYPE) \
@@ -202,6 +208,10 @@ namespace internal {
V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
V(JS_SET_VALUE_ITERATOR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
+ V(JS_WEAK_CELL_TYPE) \
+ V(JS_WEAK_REF_TYPE) \
+ V(JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE) \
+ V(JS_WEAK_FACTORY_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
@@ -228,6 +238,7 @@ namespace internal {
V(JS_INTL_NUMBER_FORMAT_TYPE) \
V(JS_INTL_PLURAL_RULES_TYPE) \
V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JS_INTL_SEGMENT_ITERATOR_TYPE) \
V(JS_INTL_SEGMENTER_TYPE) \
INSTANCE_TYPE_LIST_AFTER_INTL(V)
#else
@@ -307,11 +318,14 @@ namespace internal {
V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
aliased_arguments_entry) \
V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
function_template_info) \
+ V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
+ function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
@@ -327,6 +341,7 @@ namespace internal {
V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
array_boilerplate_description) \
V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
+ V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
@@ -337,7 +352,8 @@ namespace internal {
promise_reject_reaction_job_task) \
V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
promise_resolve_thenable_job_task) \
- V(_, MICROTASK_QUEUE_TYPE, MicrotaskQueue, microtask_queue)
+ V(_, WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE, WeakFactoryCleanupJobTask, \
+ weak_factory_cleanup_job_task)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 5c66d5f60a..0bad18efa8 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -14,7 +14,6 @@
#include "src/objects.h"
-#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/tsan.h"
#include "src/builtins/builtins.h"
@@ -24,7 +23,7 @@
#include "src/field-index-inl.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
@@ -32,12 +31,20 @@
#include "src/lookup-inl.h"
#include "src/maybe-handles-inl.h"
#include "src/objects/bigint.h"
-#include "src/objects/descriptor-array.h"
+#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object.h" // TODO(jkummerow): See below [1].
#include "src/objects/js-proxy-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/oddball-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/smi-inl.h"
#include "src/objects/template-objects.h"
#include "src/objects/templates.h"
#include "src/property-details.h"
@@ -47,18 +54,22 @@
#include "src/transitions-inl.h"
#include "src/v8memory.h"
+// [1] This file currently contains the definitions of many
+// HeapObject::IsFoo() predicates, which in turn require #including
+// many other -inl.h files. Find a way to avoid this. Idea:
+// Since e.g. HeapObject::IsSeqString requires things from string-inl.h,
+// and presumably is mostly used from places that require/include string-inl.h
+// anyway, maybe that's where it should be defined?
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-PropertyDetails::PropertyDetails(Smi* smi) {
- value_ = smi->value();
-}
-
+PropertyDetails::PropertyDetails(Smi smi) { value_ = smi->value(); }
-Smi* PropertyDetails::AsSmi() const {
+Smi PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -69,73 +80,13 @@ Smi* PropertyDetails::AsSmi() const {
int PropertyDetails::field_width_in_words() const {
DCHECK_EQ(location(), kField);
if (!FLAG_unbox_double_fields) return 1;
- if (kDoubleSize == kPointerSize) return 1;
- return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
-}
-
-namespace InstanceTypeChecker {
-
-// Define type checkers for classes with single instance type.
-INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER);
-
-#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
- INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
-#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
-
-#define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \
- INSTANCE_TYPE_CHECKER(Name, TYPE)
-STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
-#undef STRUCT_INSTANCE_TYPE_CHECKER
-
-// Define type checkers for classes with ranges of instance types.
-#define INSTANCE_TYPE_CHECKER_RANGE(type, first_instance_type, \
- last_instance_type) \
- V8_INLINE bool Is##type(InstanceType instance_type) { \
- return instance_type >= first_instance_type && \
- instance_type <= last_instance_type; \
- }
-INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE);
-#undef INSTANCE_TYPE_CHECKER_RANGE
-
-V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
- return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) ||
- IsFixedTypedArrayBase(instance_type);
+ if (kDoubleSize == kTaggedSize) return 1;
+ return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
}
-V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; }
-
-V8_INLINE bool IsInternalizedString(InstanceType instance_type) {
- STATIC_ASSERT(kNotInternalizedTag != 0);
- return (instance_type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
- (kStringTag | kInternalizedTag);
-}
-
-V8_INLINE bool IsJSObject(InstanceType instance_type) {
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return instance_type >= FIRST_JS_OBJECT_TYPE;
-}
-
-V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- return instance_type >= FIRST_JS_RECEIVER_TYPE;
-}
-
-} // namespace InstanceTypeChecker
-
-// TODO(v8:7786): For instance types that have a single map instance on the
-// roots, and when that map is a embedded in the binary, compare against the map
-// pointer rather than looking up the instance type.
-INSTANCE_TYPE_CHECKERS(TYPE_CHECKER);
-
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
- TYPE_CHECKER(Fixed##Type##Array)
-TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
-#undef TYPED_ARRAY_TYPE_CHECKER
-
bool HeapObject::IsUncompiledData() const {
- return IsUncompiledDataWithoutPreParsedScope() ||
- IsUncompiledDataWithPreParsedScope();
+ return IsUncompiledDataWithoutPreparseData() ||
+ IsUncompiledDataWithPreparseData();
}
bool HeapObject::IsSloppyArgumentsElements() const {
@@ -148,7 +99,7 @@ bool HeapObject::IsJSSloppyArgumentsObject() const {
bool HeapObject::IsJSGeneratorObject() const {
return map()->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
- IsJSAsyncGeneratorObject();
+ IsJSAsyncFunctionObject() || IsJSAsyncGeneratorObject();
}
bool HeapObject::IsDataHandler() const {
@@ -161,29 +112,29 @@ bool HeapObject::IsExternal(Isolate* isolate) const {
return map()->FindRootMap(isolate) == isolate->heap()->external_map();
}
-#define IS_TYPE_FUNCTION_DEF(type_) \
- bool Object::Is##type_() const { \
- return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this)->Is##type_(); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
-#define IS_TYPE_FUNCTION_DEF(Type, Value) \
- bool Object::Is##Type(Isolate* isolate) const { \
- return Is##Type(ReadOnlyRoots(isolate->heap())); \
- } \
- bool Object::Is##Type(ReadOnlyRoots roots) const { \
- return this == roots.Value(); \
- } \
- bool Object::Is##Type() const { \
- return IsHeapObject() && HeapObject::cast(this)->Is##Type(); \
- } \
- bool HeapObject::Is##Type(Isolate* isolate) const { \
- return Object::Is##Type(isolate); \
- } \
- bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
- return Object::Is##Type(roots); \
- } \
+#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+ bool Object::Is##Type(Isolate* isolate) const { \
+ return Is##Type(ReadOnlyRoots(isolate->heap())); \
+ } \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ return *this == roots.Value(); \
+ } \
+ bool Object::Is##Type() const { \
+ return IsHeapObject() && HeapObject::cast(*this)->Is##Type(); \
+ } \
+ bool HeapObject::Is##Type(Isolate* isolate) const { \
+ return Object::Is##Type(isolate); \
+ } \
+ bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
+ return Object::Is##Type(roots); \
+ } \
bool HeapObject::Is##Type() const { return Is##Type(GetReadOnlyRoots()); }
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
@@ -197,7 +148,7 @@ bool Object::IsNullOrUndefined(ReadOnlyRoots roots) const {
}
bool Object::IsNullOrUndefined() const {
- return IsHeapObject() && HeapObject::cast(this)->IsNullOrUndefined();
+ return IsHeapObject() && HeapObject::cast(*this)->IsNullOrUndefined();
}
bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
@@ -235,51 +186,51 @@ bool HeapObject::IsTemplateInfo() const {
bool HeapObject::IsConsString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsCons();
+ return StringShape(String::cast(*this)).IsCons();
}
bool HeapObject::IsThinString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsThin();
+ return StringShape(String::cast(*this)).IsThin();
}
bool HeapObject::IsSlicedString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSliced();
+ return StringShape(String::cast(*this)).IsSliced();
}
bool HeapObject::IsSeqString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential();
+ return StringShape(String::cast(*this)).IsSequential();
}
bool HeapObject::IsSeqOneByteString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsOneByteRepresentation();
+ return StringShape(String::cast(*this)).IsSequential() &&
+ String::cast(*this)->IsOneByteRepresentation();
}
bool HeapObject::IsSeqTwoByteString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsTwoByteRepresentation();
+ return StringShape(String::cast(*this)).IsSequential() &&
+ String::cast(*this)->IsTwoByteRepresentation();
}
bool HeapObject::IsExternalString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal();
+ return StringShape(String::cast(*this)).IsExternal();
}
bool HeapObject::IsExternalOneByteString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsOneByteRepresentation();
+ return StringShape(String::cast(*this)).IsExternal() &&
+ String::cast(*this)->IsOneByteRepresentation();
}
bool HeapObject::IsExternalTwoByteString() const {
if (!IsString()) return false;
- return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsTwoByteRepresentation();
+ return StringShape(String::cast(*this)).IsExternal() &&
+ String::cast(*this)->IsTwoByteRepresentation();
}
bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
@@ -307,7 +258,7 @@ bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
bool HeapObject::IsArrayList() const {
return map() == GetReadOnlyRoots().array_list_map() ||
- this == GetReadOnlyRoots().empty_fixed_array();
+ *this == GetReadOnlyRoots().empty_fixed_array();
}
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
@@ -322,7 +273,7 @@ bool HeapObject::IsDeoptimizationData() const {
// a deoptimization data array. Since this is used for asserts we can
// check that the length is zero or else the fixed size plus a multiple of
// the entry size.
- int length = FixedArray::cast(this)->length();
+ int length = FixedArray::cast(*this)->length();
if (length == 0) return true;
length -= DeoptimizationData::kFirstDeoptEntryIndex;
@@ -340,7 +291,7 @@ bool HeapObject::IsTemplateList() const {
if (!IsFixedArrayExact()) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
- if (FixedArray::cast(this)->length() < 1) return false;
+ if (FixedArray::cast(*this)->length() < 1) return false;
return true;
}
@@ -356,32 +307,32 @@ bool HeapObject::IsAbstractCode() const {
}
bool HeapObject::IsStringWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsString();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsString();
}
bool HeapObject::IsBooleanWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsBoolean();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsBoolean();
}
bool HeapObject::IsScriptWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsScript();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsScript();
}
bool HeapObject::IsNumberWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsNumber();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsNumber();
}
bool HeapObject::IsBigIntWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsBigInt();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsBigInt();
}
bool HeapObject::IsSymbolWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsSymbol();
+ return IsJSValue() && JSValue::cast(*this)->value()->IsSymbol();
}
bool HeapObject::IsBoolean() const {
return IsOddball() &&
- ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
+ ((Oddball::cast(*this)->kind() & Oddball::kNotBooleanMask) == 0);
}
bool HeapObject::IsJSArrayBufferView() const {
@@ -393,7 +344,7 @@ bool HeapObject::IsStringSet() const { return IsHashTable(); }
bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
bool HeapObject::IsNormalizedMapCache() const {
- return NormalizedMapCache::IsNormalizedMapCache(this);
+ return NormalizedMapCache::IsNormalizedMapCache(*this);
}
bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
@@ -402,12 +353,15 @@ bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
+bool Object::IsHashTableBase() const { return IsHashTable(); }
+
bool Object::IsSmallOrderedHashTable() const {
- return IsSmallOrderedHashSet() || IsSmallOrderedHashMap();
+ return IsSmallOrderedHashSet() || IsSmallOrderedHashMap() ||
+ IsSmallOrderedNameDictionary();
}
bool Object::IsPrimitive() const {
- return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
+ return IsSmi() || HeapObject::cast(*this)->map()->IsPrimitiveMap();
}
// static
@@ -423,8 +377,8 @@ bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
bool HeapObject::IsAccessCheckNeeded() const {
if (IsJSGlobalProxy()) {
- const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
+ const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
+ JSGlobalObject global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return map()->is_access_check_needed();
@@ -437,88 +391,112 @@ bool HeapObject::IsStruct() const {
return true;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ // It is hard to include ALLOCATION_SITE_TYPE in STRUCT_LIST because
+ // that macro is used for many things and AllocationSite needs a few
+ // special cases.
+ case ALLOCATION_SITE_TYPE:
+ return true;
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
+ return true;
+ case FEEDBACK_CELL_TYPE:
+ return true;
+ case CALL_HANDLER_INFO_TYPE:
+ return true;
default:
return false;
}
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
- } \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this)->Is##Name(); \
+ } \
TYPE_CHECKER(Name)
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
double Object::Number() const {
DCHECK(IsNumber());
- return IsSmi()
- ? static_cast<double>(reinterpret_cast<const Smi*>(this)->value())
- : reinterpret_cast<const HeapNumber*>(this)->value();
+ return IsSmi() ? static_cast<double>(Smi(this->ptr())->value())
+ : HeapNumber::unchecked_cast(*this)->value();
}
bool Object::IsNaN() const {
- return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
+ return this->IsHeapNumber() && std::isnan(HeapNumber::cast(*this)->value());
}
bool Object::IsMinusZero() const {
return this->IsHeapNumber() &&
- i::IsMinusZero(HeapNumber::cast(this)->value());
+ i::IsMinusZero(HeapNumber::cast(*this)->value());
+}
+
+OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
+OBJECT_CONSTRUCTORS_IMPL(HashTableBase, FixedArray)
+
+template <typename Derived, typename Shape>
+HashTable<Derived, Shape>::HashTable(Address ptr) : HashTableBase(ptr) {
+ SLOW_DCHECK(IsHashTable());
+}
+
+template <typename Derived, typename Shape>
+ObjectHashTableBase<Derived, Shape>::ObjectHashTableBase(Address ptr)
+ : HashTable<Derived, Shape>(ptr) {}
+
+ObjectHashTable::ObjectHashTable(Address ptr)
+ : ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>(ptr) {
+ SLOW_DCHECK(IsObjectHashTable());
}
+EphemeronHashTable::EphemeronHashTable(Address ptr)
+ : ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>(ptr) {
+ SLOW_DCHECK(IsEphemeronHashTable());
+}
+
+ObjectHashSet::ObjectHashSet(Address ptr)
+ : HashTable<ObjectHashSet, ObjectHashSetShape>(ptr) {
+ SLOW_DCHECK(IsObjectHashSet());
+}
+
+OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray)
+
+NormalizedMapCache::NormalizedMapCache(Address ptr) : WeakFixedArray(ptr) {
+ // TODO(jkummerow): Introduce IsNormalizedMapCache() and use
+ // OBJECT_CONSTRUCTORS_IMPL macro?
+}
+
+OBJECT_CONSTRUCTORS_IMPL(BigIntBase, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
+OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
+
+OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Tuple2)
+
// ------------------------------------
// Cast operations
-CAST_ACCESSOR(AccessorPair)
-CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(ObjectBoilerplateDescription)
-CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(ArrayBoilerplateDescription)
-CAST_ACCESSOR(DataHandler)
-CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EphemeronHashTable)
-CAST_ACCESSOR(EnumCache)
-CAST_ACCESSOR(FeedbackCell)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(LayoutDescriptor)
-CAST_ACCESSOR(MutableHeapNumber)
-CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(NumberDictionary)
CAST_ACCESSOR(Object)
CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(ObjectHashTable)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(OrderedHashMap)
-CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(SimpleNumberDictionary)
-CAST_ACCESSOR(SmallOrderedHashMap)
-CAST_ACCESSOR(SmallOrderedHashSet)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(StringSet)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateObjectDescription)
-CAST_ACCESSOR(Tuple2)
-CAST_ACCESSOR(Tuple3)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
-bool Object::KeyEquals(Object* second) {
- Object* first = this;
+bool Object::KeyEquals(Object second) {
+ Object first = *this;
if (second->IsNumber()) {
if (first->IsNumber()) return first->Number() == second->Number();
- Object* temp = first;
+ Object temp = first;
first = second;
second = temp;
}
@@ -535,7 +513,7 @@ bool Object::FilterKey(PropertyFilter filter) {
DCHECK(!IsPropertyCell());
if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
- if (Symbol::cast(this)->is_private()) return true;
+ if (Symbol::cast(*this)->is_private()) return true;
} else {
if (filter & SKIP_STRINGS) return true;
}
@@ -608,13 +586,13 @@ bool Object::FitsRepresentation(Representation representation) {
bool Object::ToUint32(uint32_t* value) const {
if (IsSmi()) {
- int num = Smi::ToInt(this);
+ int num = Smi::ToInt(*this);
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
- double num = HeapNumber::cast(this)->value();
+ double num = HeapNumber::cast(*this)->value();
return DoubleToUint32IfEqualToSelf(num, value);
}
return false;
@@ -696,7 +674,7 @@ MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
// static
MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
- MessageTemplate::Template error_index) {
+ MessageTemplate error_index) {
if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
return ConvertToIndex(isolate, input, error_index);
}
@@ -724,39 +702,37 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
-Object** HeapObject::RawField(const HeapObject* obj, int byte_offset) {
- return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
+ObjectSlot HeapObject::RawField(int byte_offset) const {
+ return ObjectSlot(FIELD_ADDR(this, byte_offset));
}
-MaybeObject** HeapObject::RawMaybeWeakField(HeapObject* obj, int byte_offset) {
- return reinterpret_cast<MaybeObject**>(FIELD_ADDR(obj, byte_offset));
+ObjectSlot HeapObject::RawField(const HeapObject obj, int byte_offset) {
+ return ObjectSlot(FIELD_ADDR(obj, byte_offset));
}
-int Smi::ToInt(const Object* object) { return Smi::cast(object)->value(); }
+MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
+ return MaybeObjectSlot(FIELD_ADDR(this, byte_offset));
+}
-MapWord MapWord::FromMap(const Map* map) {
- return MapWord(reinterpret_cast<uintptr_t>(map));
+MaybeObjectSlot HeapObject::RawMaybeWeakField(HeapObject obj, int byte_offset) {
+ return MaybeObjectSlot(FIELD_ADDR(obj, byte_offset));
}
-Map* MapWord::ToMap() const { return reinterpret_cast<Map*>(value_); }
+MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
-bool MapWord::IsForwardingAddress() const {
- return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
-}
+Map MapWord::ToMap() const { return Map::unchecked_cast(Object(value_)); }
+bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(value_); }
-MapWord MapWord::FromForwardingAddress(HeapObject* object) {
- Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
- return MapWord(static_cast<uintptr_t>(raw));
+MapWord MapWord::FromForwardingAddress(HeapObject object) {
+ return MapWord(object->ptr() - kHeapObjectTag);
}
-
-HeapObject* MapWord::ToForwardingAddress() {
+HeapObject MapWord::ToForwardingAddress() {
DCHECK(IsForwardingAddress());
- return HeapObject::FromAddress(static_cast<Address>(value_));
+ return HeapObject::FromAddress(value_);
}
-
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
VerifyPointer(isolate, READ_FIELD(this, offset));
@@ -769,186 +745,93 @@ void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
void HeapObject::VerifySmiField(int offset) {
CHECK(READ_FIELD(this, offset)->IsSmi());
}
+
#endif
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
// TODO(v8:7464): When RO_SPACE is embedded, this will access a global
// variable instead.
- return ReadOnlyRoots(MemoryChunk::FromHeapObject(this)->heap());
+ return ReadOnlyRoots(MemoryChunk::FromHeapObject(*this)->heap());
}
-Heap* NeverReadOnlySpaceObject::GetHeap() const {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(reinterpret_cast<Address>(this));
- // Make sure we are not accessing an object in RO space.
- SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE);
- Heap* heap = chunk->heap();
- SLOW_DCHECK(heap != nullptr);
- return heap;
-}
-
-Isolate* NeverReadOnlySpaceObject::GetIsolate() const {
- return GetHeap()->isolate();
-}
+Map HeapObject::map() const { return map_word().ToMap(); }
-Map* HeapObject::map() const {
- return map_word().ToMap();
-}
-
-
-void HeapObject::set_map(Map* value) {
- if (value != nullptr) {
+void HeapObject::set_map(Map value) {
+ if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
- if (value != nullptr) {
- // TODO(1600) We are passing nullptr as a slot because maps can never be on
- // evacuation candidate.
- MarkingBarrier(this, nullptr, value);
+ if (!value.is_null()) {
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
}
-Map* HeapObject::synchronized_map() const {
+Map HeapObject::synchronized_map() const {
return synchronized_map_word().ToMap();
}
-
-void HeapObject::synchronized_set_map(Map* value) {
- if (value != nullptr) {
+void HeapObject::synchronized_set_map(Map value) {
+ if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
synchronized_set_map_word(MapWord::FromMap(value));
- if (value != nullptr) {
- // TODO(1600) We are passing nullptr as a slot because maps can never be on
- // evacuation candidate.
- MarkingBarrier(this, nullptr, value);
+ if (!value.is_null()) {
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
}
// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_no_write_barrier(Map* value) {
- if (value != nullptr) {
+void HeapObject::set_map_no_write_barrier(Map value) {
+ if (!value.is_null()) {
#ifdef VERIFY_HEAP
- Heap::FromWritableHeapObject(this)->VerifyObjectLayoutChange(this, value);
+ Heap::FromWritableHeapObject(*this)->VerifyObjectLayoutChange(*this, value);
#endif
}
set_map_word(MapWord::FromMap(value));
}
-void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
+void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
set_map_word(MapWord::FromMap(value));
if (mode != SKIP_WRITE_BARRIER) {
- DCHECK_NOT_NULL(value);
- // TODO(1600) We are passing nullptr as a slot because maps can never be on
- // evacuation candidate.
- MarkingBarrier(this, nullptr, value);
+ DCHECK(!value.is_null());
+ // TODO(1600) We are passing kNullAddress as a slot because maps can never
+ // be on an evacuation candidate.
+ MarkingBarrier(*this, ObjectSlot(kNullAddress), value);
}
}
-HeapObject** HeapObject::map_slot() {
- return reinterpret_cast<HeapObject**>(FIELD_ADDR(this, kMapOffset));
+MapWordSlot HeapObject::map_slot() const {
+ return MapWordSlot(FIELD_ADDR(*this, kMapOffset));
}
MapWord HeapObject::map_word() const {
- return MapWord(
- reinterpret_cast<uintptr_t>(RELAXED_READ_FIELD(this, kMapOffset)));
+ return MapWord(map_slot().Relaxed_Load().ptr());
}
-
void HeapObject::set_map_word(MapWord map_word) {
- RELAXED_WRITE_FIELD(this, kMapOffset,
- reinterpret_cast<Object*>(map_word.value_));
+ map_slot().Relaxed_Store(Object(map_word.value_));
}
MapWord HeapObject::synchronized_map_word() const {
- return MapWord(
- reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
+ return MapWord(map_slot().Acquire_Load().ptr());
}
-
void HeapObject::synchronized_set_map_word(MapWord map_word) {
- RELEASE_WRITE_FIELD(
- this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+ map_slot().Release_Store(Object(map_word.value_));
}
int HeapObject::Size() const { return SizeFromMap(map()); }
-double HeapNumberBase::value() const {
- return READ_DOUBLE_FIELD(this, kValueOffset);
-}
-
-void HeapNumberBase::set_value(double value) {
- WRITE_DOUBLE_FIELD(this, kValueOffset, value);
-}
-
-uint64_t HeapNumberBase::value_as_bits() const {
- return READ_UINT64_FIELD(this, kValueOffset);
-}
-
-void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(this, kValueOffset, bits);
-}
-
-int HeapNumberBase::get_exponent() {
- return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
- kExponentShift) - kExponentBias;
-}
-
-int HeapNumberBase::get_sign() {
- return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
-}
-
-double Oddball::to_number_raw() const {
- return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
-}
-
-void Oddball::set_to_number_raw(double value) {
- WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
-}
-
-void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(this, kToNumberRawOffset, bits);
-}
-
-ACCESSORS(Oddball, to_string, String, kToStringOffset)
-ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-
-byte Oddball::kind() const { return Smi::ToInt(READ_FIELD(this, kKindOffset)); }
-
-void Oddball::set_kind(byte value) {
- WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
-}
-
-
-// static
-Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
- return handle(input->to_number(), isolate);
-}
-
-
-ACCESSORS(Cell, value, Object, kValueOffset)
-ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
-ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(PropertyCell, name, Name, kNameOffset)
-ACCESSORS(PropertyCell, value, Object, kValueOffset)
-ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
-
-PropertyDetails PropertyCell::property_details() const {
- return PropertyDetails(Smi::cast(property_details_raw()));
-}
-
-
-void PropertyCell::set_property_details(PropertyDetails details) {
- set_property_details_raw(details.AsSmi());
-}
-
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
}
@@ -972,13 +855,6 @@ bool Map::IsCustomElementsReceiverMap() const {
return IsCustomElementsReceiverInstanceType(instance_type());
}
-void Struct::InitializeBody(int object_size) {
- Object* value = GetReadOnlyRoots().undefined_value();
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
-
bool Object::ToArrayLength(uint32_t* index) const {
return Object::ToUint32(index);
}
@@ -987,6 +863,20 @@ bool Object::ToArrayIndex(uint32_t* index) const {
return Object::ToUint32(index) && *index != kMaxUInt32;
}
+bool Object::GetHeapObjectIfStrong(HeapObject* result) const {
+ return GetHeapObject(result);
+}
+
+bool Object::GetHeapObject(HeapObject* result) const {
+ if (!IsHeapObject()) return false;
+ *result = HeapObject::cast(*this);
+ return true;
+}
+
+HeapObject Object::GetHeapObject() const {
+ DCHECK(IsHeapObject());
+ return HeapObject::cast(*this);
+}
void Object::VerifyApiCallResultType() {
#if DEBUG
@@ -1001,7 +891,7 @@ void Object::VerifyApiCallResultType() {
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
- Object* obj = get(kNumberOfCapturesIndex);
+ Object obj = get(kNumberOfCapturesIndex);
return Smi::ToInt(obj);
}
@@ -1010,30 +900,29 @@ void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
set(kNumberOfCapturesIndex, Smi::FromInt(value));
}
-String* RegExpMatchInfo::LastSubject() {
+String RegExpMatchInfo::LastSubject() {
DCHECK_GE(length(), kLastMatchOverhead);
- Object* obj = get(kLastSubjectIndex);
- return String::cast(obj);
+ return String::cast(get(kLastSubjectIndex));
}
-void RegExpMatchInfo::SetLastSubject(String* value) {
+void RegExpMatchInfo::SetLastSubject(String value) {
DCHECK_GE(length(), kLastMatchOverhead);
set(kLastSubjectIndex, value);
}
-Object* RegExpMatchInfo::LastInput() {
+Object RegExpMatchInfo::LastInput() {
DCHECK_GE(length(), kLastMatchOverhead);
return get(kLastInputIndex);
}
-void RegExpMatchInfo::SetLastInput(Object* value) {
+void RegExpMatchInfo::SetLastInput(Object value) {
DCHECK_GE(length(), kLastMatchOverhead);
set(kLastInputIndex, value);
}
int RegExpMatchInfo::Capture(int i) {
DCHECK_LT(i, NumberOfCaptureRegisters());
- Object* obj = get(kFirstCaptureIndex + i);
+ Object obj = get(kFirstCaptureIndex + i);
return Smi::ToInt(obj);
}
@@ -1044,13 +933,13 @@ void RegExpMatchInfo::SetCapture(int i, int value) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
- Heap* heap = Heap::FromWritableHeapObject(this);
+ Heap* heap = Heap::FromWritableHeapObject(*this);
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
- if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ if (Heap::InNewSpace(*this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
-AllocationAlignment HeapObject::RequiredAlignment(Map* map) {
+AllocationAlignment HeapObject::RequiredAlignment(Map map) {
#ifdef V8_HOST_ARCH_32_BIT
int instance_type = map->instance_type();
if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
@@ -1065,13 +954,13 @@ AllocationAlignment HeapObject::RequiredAlignment(Map* map) {
bool HeapObject::NeedsRehashing() const {
switch (map()->instance_type()) {
case DESCRIPTOR_ARRAY_TYPE:
- return DescriptorArray::cast(this)->number_of_descriptors() > 1;
+ return DescriptorArray::cast(*this)->number_of_descriptors() > 1;
case TRANSITION_ARRAY_TYPE:
- return TransitionArray::cast(this)->number_of_entries() > 1;
+ return TransitionArray::cast(*this)->number_of_entries() > 1;
case ORDERED_HASH_MAP_TYPE:
- return OrderedHashMap::cast(this)->NumberOfElements() > 0;
+ return OrderedHashMap::cast(*this)->NumberOfElements() > 0;
case ORDERED_HASH_SET_TYPE:
- return OrderedHashSet::cast(this)->NumberOfElements() > 0;
+ return OrderedHashSet::cast(*this)->NumberOfElements() > 0;
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -1080,6 +969,7 @@ bool HeapObject::NeedsRehashing() const {
case HASH_TABLE_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
return true;
default:
return false;
@@ -1093,353 +983,6 @@ Address HeapObject::GetFieldAddress(int field_offset) const {
ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
-int DescriptorArray::number_of_descriptors() const {
- return Smi::ToInt(get(kDescriptorLengthIndex)->cast<Smi>());
-}
-
-int DescriptorArray::number_of_descriptors_storage() const {
- return (length() - kFirstIndex) / kEntrySize;
-}
-
-int DescriptorArray::NumberOfSlackDescriptors() const {
- return number_of_descriptors_storage() - number_of_descriptors();
-}
-
-
-void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
- set(kDescriptorLengthIndex,
- MaybeObject::FromObject(Smi::FromInt(number_of_descriptors)));
-}
-
-inline int DescriptorArray::number_of_entries() const {
- return number_of_descriptors();
-}
-
-void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
- set(kEnumCacheIndex, array->get(kEnumCacheIndex));
-}
-
-EnumCache* DescriptorArray::GetEnumCache() {
- return EnumCache::cast(get(kEnumCacheIndex)->GetHeapObjectAssumeStrong());
-}
-
-// Perform a binary search in a fixed array.
-template <SearchMode search_mode, typename T>
-int BinarySearch(T* array, Name* name, int valid_entries,
- int* out_insertion_index) {
- DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
- int low = 0;
- int high = array->number_of_entries() - 1;
- uint32_t hash = name->hash_field();
- int limit = high;
-
- DCHECK(low <= high);
-
- while (low != high) {
- int mid = low + (high - low) / 2;
- Name* mid_name = array->GetSortedKey(mid);
- uint32_t mid_hash = mid_name->hash_field();
-
- if (mid_hash >= hash) {
- high = mid;
- } else {
- low = mid + 1;
- }
- }
-
- for (; low <= limit; ++low) {
- int sort_index = array->GetSortedKeyIndex(low);
- Name* entry = array->GetKey(sort_index);
- uint32_t current_hash = entry->hash_field();
- if (current_hash != hash) {
- if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
- }
- return T::kNotFound;
- }
- if (entry == name) {
- if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
- return sort_index;
- }
- return T::kNotFound;
- }
- }
-
- if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- *out_insertion_index = limit + 1;
- }
- return T::kNotFound;
-}
-
-
-// Perform a linear search in this fixed array. len is the number of entry
-// indices that are valid.
-template <SearchMode search_mode, typename T>
-int LinearSearch(T* array, Name* name, int valid_entries,
- int* out_insertion_index) {
- if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- uint32_t hash = name->hash_field();
- int len = array->number_of_entries();
- for (int number = 0; number < len; number++) {
- int sorted_index = array->GetSortedKeyIndex(number);
- Name* entry = array->GetKey(sorted_index);
- uint32_t current_hash = entry->hash_field();
- if (current_hash > hash) {
- *out_insertion_index = sorted_index;
- return T::kNotFound;
- }
- if (entry == name) return sorted_index;
- }
- *out_insertion_index = len;
- return T::kNotFound;
- } else {
- DCHECK_LE(valid_entries, array->number_of_entries());
- DCHECK_NULL(out_insertion_index); // Not supported here.
- for (int number = 0; number < valid_entries; number++) {
- if (array->GetKey(number) == name) return number;
- }
- return T::kNotFound;
- }
-}
-
-template <SearchMode search_mode, typename T>
-int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
- SLOW_DCHECK(array->IsSortedNoDuplicates());
-
- if (valid_entries == 0) {
- if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- *out_insertion_index = 0;
- }
- return T::kNotFound;
- }
-
- // Fast case: do linear search for small arrays.
- const int kMaxElementsForLinearSearch = 8;
- if (valid_entries <= kMaxElementsForLinearSearch) {
- return LinearSearch<search_mode>(array, name, valid_entries,
- out_insertion_index);
- }
-
- // Slow case: perform binary search.
- return BinarySearch<search_mode>(array, name, valid_entries,
- out_insertion_index);
-}
-
-
-int DescriptorArray::Search(Name* name, int valid_descriptors) {
- DCHECK(name->IsUniqueName());
- return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
- nullptr);
-}
-
-int DescriptorArray::Search(Name* name, Map* map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) return kNotFound;
- return Search(name, number_of_own_descriptors);
-}
-
-int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- if (number_of_own_descriptors == 0) return kNotFound;
-
- DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
- int number = cache->Lookup(map, name);
-
- if (number == DescriptorLookupCache::kAbsent) {
- number = Search(name, number_of_own_descriptors);
- cache->Update(map, name, number);
- }
-
- return number;
-}
-
-
-Object** DescriptorArray::GetKeySlot(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- DCHECK((*RawFieldOfElementAt(ToKeyIndex(descriptor_number)))->IsObject());
- return reinterpret_cast<Object**>(
- RawFieldOfElementAt(ToKeyIndex(descriptor_number)));
-}
-
-MaybeObject** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
- return reinterpret_cast<MaybeObject**>(GetKeySlot(descriptor_number));
-}
-
-MaybeObject** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
- return GetValueSlot(descriptor_number - 1) + 1;
-}
-
-
-Name* DescriptorArray::GetKey(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- return Name::cast(
- get(ToKeyIndex(descriptor_number))->GetHeapObjectAssumeStrong());
-}
-
-
-int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
- return GetDetails(descriptor_number).pointer();
-}
-
-
-Name* DescriptorArray::GetSortedKey(int descriptor_number) {
- return GetKey(GetSortedKeyIndex(descriptor_number));
-}
-
-
-void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
- PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index),
- MaybeObject::FromObject(details.set_pointer(pointer).AsSmi()));
-}
-
-MaybeObject** DescriptorArray::GetValueSlot(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- return RawFieldOfElementAt(ToValueIndex(descriptor_number));
-}
-
-
-int DescriptorArray::GetValueOffset(int descriptor_number) {
- return OffsetOfElementAt(ToValueIndex(descriptor_number));
-}
-
-Object* DescriptorArray::GetStrongValue(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number))->cast<Object>();
-}
-
-
-void DescriptorArray::SetValue(int descriptor_index, Object* value) {
- set(ToValueIndex(descriptor_index), MaybeObject::FromObject(value));
-}
-
-MaybeObject* DescriptorArray::GetValue(int descriptor_number) {
- DCHECK_LT(descriptor_number, number_of_descriptors());
- return get(ToValueIndex(descriptor_number));
-}
-
-PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- MaybeObject* details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(details->cast<Smi>());
-}
-
-int DescriptorArray::GetFieldIndex(int descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- return GetDetails(descriptor_number).field_index();
-}
-
-FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- MaybeObject* wrapped_type = GetValue(descriptor_number);
- return Map::UnwrapFieldType(wrapped_type);
-}
-
-void DescriptorArray::Set(int descriptor_number, Name* key, MaybeObject* value,
- PropertyDetails details) {
- // Range check.
- DCHECK(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), MaybeObject::FromObject(key));
- set(ToValueIndex(descriptor_number), value);
- set(ToDetailsIndex(descriptor_number),
- MaybeObject::FromObject(details.AsSmi()));
-}
-
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
- Name* key = *desc->GetKey();
- MaybeObject* value = *desc->GetValue();
- Set(descriptor_number, key, value, desc->GetDetails());
-}
-
-
-void DescriptorArray::Append(Descriptor* desc) {
- DisallowHeapAllocation no_gc;
- int descriptor_number = number_of_descriptors();
- SetNumberOfDescriptors(descriptor_number + 1);
- Set(descriptor_number, desc);
-
- uint32_t hash = desc->GetKey()->Hash();
-
- int insertion;
-
- for (insertion = descriptor_number; insertion > 0; --insertion) {
- Name* key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
- SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
- }
-
- SetSortedKey(insertion, descriptor_number);
-}
-
-
-void DescriptorArray::SwapSortedKeys(int first, int second) {
- int first_key = GetSortedKeyIndex(first);
- SetSortedKey(first, GetSortedKeyIndex(second));
- SetSortedKey(second, first_key);
-}
-
-MaybeObject* DescriptorArray::get(int index) const {
- return WeakFixedArray::Get(index);
-}
-
-void DescriptorArray::set(int index, MaybeObject* value) {
- WeakFixedArray::Set(index, value);
-}
-
-bool StringSetShape::IsMatch(String* key, Object* value) {
- DCHECK(value->IsString());
- return key->Equals(String::cast(value));
-}
-
-uint32_t StringSetShape::Hash(Isolate* isolate, String* key) {
- return key->Hash();
-}
-
-uint32_t StringSetShape::HashForObject(Isolate* isolate, Object* object) {
- return String::cast(object)->Hash();
-}
-
-StringTableKey::StringTableKey(uint32_t hash_field)
- : HashTableKey(hash_field >> Name::kHashShift), hash_field_(hash_field) {}
-
-void StringTableKey::set_hash_field(uint32_t hash_field) {
- hash_field_ = hash_field;
- set_hash(hash_field >> Name::kHashShift);
-}
-
-Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
- StringTableKey* key) {
- return key->AsHandle(isolate);
-}
-
-uint32_t StringTableShape::HashForObject(Isolate* isolate, Object* object) {
- return String::cast(object)->Hash();
-}
-
-RootIndex StringTableShape::GetMapRootIndex() {
- return RootIndex::kStringTableMap;
-}
-
-bool NumberDictionary::requires_slow_elements() {
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return false;
- return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
-}
-
-uint32_t NumberDictionary::max_number_key() {
- DCHECK(!requires_slow_elements());
- Object* max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return 0;
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(max_index_object));
- return value >> kRequiresSlowElementsTagSize;
-}
-
-void NumberDictionary::set_requires_slow_elements() {
- set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
-}
-
DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
@@ -1452,165 +995,110 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
-RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
-
-
-int FreeSpace::Size() { return size(); }
-
-
-FreeSpace* FreeSpace::next() {
- DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
- RootIndex::kFreeSpaceMap) ||
- (!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
- map() == nullptr));
- DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
- return reinterpret_cast<FreeSpace*>(Memory<Address>(address() + kNextOffset));
-}
-
-
-void FreeSpace::set_next(FreeSpace* next) {
- DCHECK(map() == Heap::FromWritableHeapObject(this)->root(
- RootIndex::kFreeSpaceMap) ||
- (!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
- map() == nullptr));
- DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
- base::Relaxed_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
- reinterpret_cast<base::AtomicWord>(next));
-}
-
-
-FreeSpace* FreeSpace::cast(HeapObject* o) {
- SLOW_DCHECK(!Heap::FromWritableHeapObject(o)->deserialization_complete() ||
- o->IsFreeSpace());
- return reinterpret_cast<FreeSpace*>(o);
-}
-
-int HeapObject::SizeFromMap(Map* map) const {
+int HeapObject::SizeFromMap(Map map) const {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
InstanceType instance_type = map->instance_type();
- if (instance_type >= FIRST_FIXED_ARRAY_TYPE &&
- instance_type <= LAST_FIXED_ARRAY_TYPE) {
+ if (IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)) {
return FixedArray::SizeFor(
- reinterpret_cast<const FixedArray*>(this)->synchronized_length());
+ FixedArray::unchecked_cast(*this)->synchronized_length());
+ }
+ if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
+ // Native context has fixed size.
+ DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
+ return Context::SizeFor(Context::unchecked_cast(*this)->length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqOneByteString::SizeFor(
- reinterpret_cast<const SeqOneByteString*>(this)->synchronized_length());
+ SeqOneByteString::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return ByteArray::SizeFor(
- reinterpret_cast<const ByteArray*>(this)->synchronized_length());
+ ByteArray::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == BYTECODE_ARRAY_TYPE) {
return BytecodeArray::SizeFor(
- reinterpret_cast<const BytecodeArray*>(this)->synchronized_length());
+ BytecodeArray::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == FREE_SPACE_TYPE) {
- return reinterpret_cast<const FreeSpace*>(this)->relaxed_read_size();
+ return FreeSpace::unchecked_cast(*this)->relaxed_read_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqTwoByteString::SizeFor(
- reinterpret_cast<const SeqTwoByteString*>(this)->synchronized_length());
+ SeqTwoByteString::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return FixedDoubleArray::SizeFor(
- reinterpret_cast<const FixedDoubleArray*>(this)->synchronized_length());
+ FixedDoubleArray::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == FEEDBACK_METADATA_TYPE) {
return FeedbackMetadata::SizeFor(
- reinterpret_cast<const FeedbackMetadata*>(this)
- ->synchronized_slot_count());
+ FeedbackMetadata::unchecked_cast(*this)->synchronized_slot_count());
+ }
+ if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
+ return DescriptorArray::SizeFor(
+ DescriptorArray::unchecked_cast(*this)->number_of_all_descriptors());
}
- if (instance_type >= FIRST_WEAK_FIXED_ARRAY_TYPE &&
- instance_type <= LAST_WEAK_FIXED_ARRAY_TYPE) {
+ if (IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
+ LAST_WEAK_FIXED_ARRAY_TYPE)) {
return WeakFixedArray::SizeFor(
- reinterpret_cast<const WeakFixedArray*>(this)->synchronized_length());
+ WeakFixedArray::unchecked_cast(*this)->synchronized_length());
}
if (instance_type == WEAK_ARRAY_LIST_TYPE) {
return WeakArrayList::SizeForCapacity(
- reinterpret_cast<const WeakArrayList*>(this)->synchronized_capacity());
+ WeakArrayList::unchecked_cast(*this)->synchronized_capacity());
}
- if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
- instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
- return reinterpret_cast<const FixedTypedArrayBase*>(this)->TypedArraySize(
+ if (IsInRange(instance_type, FIRST_FIXED_TYPED_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE)) {
+ return FixedTypedArrayBase::unchecked_cast(*this)->TypedArraySize(
instance_type);
}
if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
return SmallOrderedHashSet::SizeFor(
- reinterpret_cast<const SmallOrderedHashSet*>(this)->Capacity());
- }
- if (instance_type == PROPERTY_ARRAY_TYPE) {
- return PropertyArray::SizeFor(
- reinterpret_cast<const PropertyArray*>(this)->synchronized_length());
+ SmallOrderedHashSet::unchecked_cast(*this)->Capacity());
}
if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
return SmallOrderedHashMap::SizeFor(
- reinterpret_cast<const SmallOrderedHashMap*>(this)->Capacity());
+ SmallOrderedHashMap::unchecked_cast(*this)->Capacity());
+ }
+ if (instance_type == SMALL_ORDERED_NAME_DICTIONARY_TYPE) {
+ return SmallOrderedNameDictionary::SizeFor(
+ SmallOrderedNameDictionary::unchecked_cast(*this)->Capacity());
+ }
+ if (instance_type == PROPERTY_ARRAY_TYPE) {
+ return PropertyArray::SizeFor(
+ PropertyArray::cast(*this)->synchronized_length());
}
if (instance_type == FEEDBACK_VECTOR_TYPE) {
return FeedbackVector::SizeFor(
- reinterpret_cast<const FeedbackVector*>(this)->length());
+ FeedbackVector::unchecked_cast(*this)->length());
}
if (instance_type == BIGINT_TYPE) {
- return BigInt::SizeFor(reinterpret_cast<const BigInt*>(this)->length());
+ return BigInt::SizeFor(BigInt::unchecked_cast(*this)->length());
+ }
+ if (instance_type == PREPARSE_DATA_TYPE) {
+ PreparseData data = PreparseData::unchecked_cast(*this);
+ return PreparseData::SizeFor(data->data_length(), data->children_length());
}
- if (instance_type == PRE_PARSED_SCOPE_DATA_TYPE) {
- return PreParsedScopeData::SizeFor(
- reinterpret_cast<const PreParsedScopeData*>(this)->length());
+ if (instance_type == CODE_TYPE) {
+ return Code::unchecked_cast(*this)->CodeSize();
}
- DCHECK(instance_type == CODE_TYPE);
- return reinterpret_cast<const Code*>(this)->CodeSize();
+ DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
+ return EmbedderDataArray::SizeFor(
+ EmbedderDataArray::unchecked_cast(*this)->length());
}
-
-ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
-SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
-ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
-ACCESSORS(AsyncGeneratorRequest, promise, Object, kPromiseOffset)
-
-ACCESSORS(Tuple2, value1, Object, kValue1Offset)
-ACCESSORS(Tuple2, value2, Object, kValue2Offset)
-ACCESSORS(Tuple3, value3, Object, kValue3Offset)
-
ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
kCookedStringsOffset)
-ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
-ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-
-// static
-bool Foreign::IsNormalized(Object* value) {
- if (value == Smi::kZero) return true;
- return Foreign::cast(value)->foreign_address() != kNullAddress;
-}
-
-Address Foreign::foreign_address() {
- return READ_UINTPTR_FIELD(this, kForeignAddressOffset);
-}
-
-void Foreign::set_foreign_address(Address value) {
- WRITE_UINTPTR_FIELD(this, kForeignAddressOffset, value);
-}
-
-template <class Derived>
-void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
- Object* value) {
- Address entry_offset = GetDataEntryOffset(entry, relative_index);
- RELAXED_WRITE_FIELD(this, entry_offset, value);
- WRITE_BARRIER(this, static_cast<int>(entry_offset), value);
-}
-
// static
Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -1710,198 +1198,8 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
-Object* AccessorPair::get(AccessorComponent component) {
- return component == ACCESSOR_GETTER ? getter() : setter();
-}
-
-
-void AccessorPair::set(AccessorComponent component, Object* value) {
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
-}
-
-
-void AccessorPair::SetComponents(Object* getter, Object* setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
-}
-
-bool AccessorPair::Equals(AccessorPair* pair) {
- return (this == pair) || pair->Equals(getter(), setter());
-}
-
-
-bool AccessorPair::Equals(Object* getter_value, Object* setter_value) {
- return (getter() == getter_value) && (setter() == setter_value);
-}
-
-
-bool AccessorPair::ContainsAccessor() {
- return IsJSAccessor(getter()) || IsJSAccessor(setter());
-}
-
-
-bool AccessorPair::IsJSAccessor(Object* obj) {
- return obj->IsCallable() || obj->IsUndefined();
-}
-
-template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate, int entry) {
- Object* the_hole = this->GetReadOnlyRoots().the_hole_value();
- PropertyDetails details = PropertyDetails::Empty();
- Derived::cast(this)->SetEntry(isolate, entry, the_hole, the_hole, details);
-}
-
-template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
- Object* key, Object* value,
- PropertyDetails details) {
- DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
- DCHECK(!key->IsName() || details.dictionary_index() > 0);
- int index = DerivedHashTable::EntryToIndex(entry);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
- this->set(index + Derived::kEntryKeyIndex, key, mode);
- this->set(index + Derived::kEntryValueIndex, value, mode);
- if (Shape::kHasDetails) DetailsAtPut(isolate, entry, details);
-}
-
-Object* GlobalDictionaryShape::Unwrap(Object* object) {
- return PropertyCell::cast(object)->name();
-}
-
-RootIndex GlobalDictionaryShape::GetMapRootIndex() {
- return RootIndex::kGlobalDictionaryMap;
-}
-
-Name* NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
-
-RootIndex NameDictionaryShape::GetMapRootIndex() {
- return RootIndex::kNameDictionaryMap;
-}
-
-PropertyCell* GlobalDictionary::CellAt(int entry) {
- DCHECK(KeyAt(entry)->IsPropertyCell());
- return PropertyCell::cast(KeyAt(entry));
-}
-
-bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object* k) {
- DCHECK_NE(roots.the_hole_value(), k);
- return k != roots.undefined_value();
-}
-
-bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object* k) {
- return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots);
-}
-
-Name* GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
-Object* GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
-
-void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object* key,
- Object* value, PropertyDetails details) {
- DCHECK_EQ(key, PropertyCell::cast(value)->name());
- set(EntryToIndex(entry) + kEntryKeyIndex, value);
- DetailsAtPut(isolate, entry, details);
-}
-
-void GlobalDictionary::ValueAtPut(int entry, Object* value) {
- set(EntryToIndex(entry), value);
-}
-
-bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object* other) {
- DCHECK(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
-}
-
-uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
- return ComputeSeededHash(key, isolate->heap()->HashSeed());
-}
-
-uint32_t NumberDictionaryBaseShape::HashForObject(Isolate* isolate,
- Object* other) {
- DCHECK(other->IsNumber());
- return ComputeSeededHash(static_cast<uint32_t>(other->Number()),
- isolate->heap()->HashSeed());
-}
-
-Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
- uint32_t key) {
- return isolate->factory()->NewNumberFromUint(key);
-}
-
-RootIndex NumberDictionaryShape::GetMapRootIndex() {
- return RootIndex::kNumberDictionaryMap;
-}
-
-RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
- return RootIndex::kSimpleNumberDictionaryMap;
-}
-
-bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
- DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName());
- DCHECK(key->IsUniqueName());
- return *key == other;
-}
-
-uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
- return key->Hash();
-}
-
-uint32_t NameDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
- return Name::cast(other)->Hash();
-}
-
-bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
- DCHECK(PropertyCell::cast(other)->name()->IsUniqueName());
- return *key == PropertyCell::cast(other)->name();
-}
-
-uint32_t GlobalDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
- return PropertyCell::cast(other)->name()->Hash();
-}
-
-Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
- Handle<Name> key) {
- DCHECK(key->IsUniqueName());
- return key;
-}
-
-
-template <typename Dictionary>
-PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary* dict, int entry) {
- DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- return dict->CellAt(entry)->property_details();
-}
-
-template <typename Dictionary>
-void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary* dict,
- int entry, PropertyDetails value) {
- DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- PropertyCell* cell = dict->CellAt(entry);
- if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
- }
- cell->set_property_details(value);
-}
-
-bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object* other) {
- return key->SameValue(other);
-}
-
-uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
- return Smi::ToInt(key->GetHash());
-}
-
-uint32_t ObjectHashTableShape::HashForObject(Isolate* isolate, Object* other) {
- return Smi::ToInt(other->GetHash());
-}
-
// static
-Object* Object::GetSimpleHash(Object* object) {
+Object Object::GetSimpleHash(Object object) {
DisallowHeapAllocation no_gc;
if (object->IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(object));
@@ -1937,15 +1235,14 @@ Object* Object::GetSimpleHash(Object* object) {
return object;
}
-Object* Object::GetHash() {
+Object Object::GetHash() {
DisallowHeapAllocation no_gc;
- Object* hash = GetSimpleHash(this);
+ Object hash = GetSimpleHash(*this);
if (hash->IsSmi()) return hash;
DCHECK(IsJSReceiver());
- JSReceiver* receiver = JSReceiver::cast(this);
- Isolate* isolate = receiver->GetIsolate();
- return receiver->GetIdentityHash(isolate);
+ JSReceiver receiver = JSReceiver::cast(*this);
+ return receiver->GetIdentityHash();
}
Handle<Object> ObjectHashTableShape::AsHandle(Handle<Object> key) {
@@ -1964,20 +1261,10 @@ Relocatable::~Relocatable() {
isolate_->set_relocatable_top(prev_);
}
-
-template<class Derived, class TableType>
-Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
- TableType* table(TableType::cast(this->table()));
- int index = Smi::ToInt(this->index());
- Object* key = table->KeyAt(index);
- DCHECK(!key->IsTheHole());
- return key;
-}
-
-// Predictably converts HeapObject* or Address to uint32 by calculating
+// Predictably converts HeapObject or Address to uint32 by calculating
// offset of the address in respective MemoryChunk.
-static inline uint32_t ObjectAddressForHashing(void* object) {
- uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
+static inline uint32_t ObjectAddressForHashing(Address object) {
+ uint32_t value = static_cast<uint32_t>(object);
return value & MemoryChunk::kAlignmentMask;
}
@@ -2006,8 +1293,9 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
PACKED_ELEMENTS, 2);
}
-
-bool ScopeInfo::IsAsmModule() const { return AsmModuleField::decode(Flags()); }
+bool ScopeInfo::IsAsmModule() const {
+ return IsAsmModuleField::decode(Flags());
+}
bool ScopeInfo::HasSimpleParameters() const {
return HasSimpleParametersField::decode(Flags());
@@ -2025,9 +1313,9 @@ bool ScopeInfo::HasSimpleParameters() const {
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
-FreshlyAllocatedBigInt* FreshlyAllocatedBigInt::cast(Object* object) {
+FreshlyAllocatedBigInt FreshlyAllocatedBigInt::cast(Object object) {
SLOW_DCHECK(object->IsBigInt());
- return reinterpret_cast<FreshlyAllocatedBigInt*>(object);
+ return FreshlyAllocatedBigInt(object->ptr());
}
} // namespace internal
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 38dd9d1c52..60931395a9 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -10,15 +10,22 @@
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
-#include "src/instruction-stream.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/snapshot/embedded-data.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -38,14 +45,17 @@
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/oddball-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
@@ -58,31 +68,30 @@ namespace internal {
#ifdef OBJECT_PRINT
-void Object::Print() {
+void Object::Print() const {
StdoutStream os;
this->Print(os);
os << std::flush;
}
-void Object::Print(std::ostream& os) { // NOLINT
+void Object::Print(std::ostream& os) const { // NOLINT
if (IsSmi()) {
- os << "Smi: " << std::hex << "0x" << Smi::ToInt(this);
- os << std::dec << " (" << Smi::ToInt(this) << ")\n";
+ os << "Smi: " << std::hex << "0x" << Smi::ToInt(*this);
+ os << std::dec << " (" << Smi::ToInt(*this) << ")\n";
} else {
- HeapObject::cast(this)->HeapObjectPrint(os);
+ HeapObject::cast(*this)->HeapObjectPrint(os);
}
}
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << reinterpret_cast<void*>(this) << ": [";
+ os << reinterpret_cast<void*>(ptr()) << ": [";
if (id != nullptr) {
os << id;
} else {
os << map()->instance_type();
}
os << "]";
- MemoryChunk* chunk = MemoryChunk::FromAddress(
- reinterpret_cast<Address>(const_cast<HeapObject*>(this)));
+ MemoryChunk* chunk = MemoryChunk::FromAddress(ptr());
if (chunk->owner()->identity() == OLD_SPACE) os << " in OldSpace";
if (!IsMap()) os << "\n - map: " << Brief(map());
}
@@ -91,35 +100,40 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
InstanceType instance_type = map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringPrint(os);
+ String::cast(*this)->StringPrint(os);
os << "\n";
return;
}
switch (instance_type) {
case SYMBOL_TYPE:
- Symbol::cast(this)->SymbolPrint(os);
+ Symbol::cast(*this)->SymbolPrint(os);
break;
case MAP_TYPE:
- Map::cast(this)->MapPrint(os);
+ Map::cast(*this)->MapPrint(os);
break;
case HEAP_NUMBER_TYPE:
- HeapNumber::cast(this)->HeapNumberPrint(os);
+ HeapNumber::cast(*this)->HeapNumberPrint(os);
os << "\n";
break;
case MUTABLE_HEAP_NUMBER_TYPE:
os << "<mutable ";
- MutableHeapNumber::cast(this)->MutableHeapNumberPrint(os);
+ MutableHeapNumber::cast(*this)->MutableHeapNumberPrint(os);
os << ">\n";
break;
case BIGINT_TYPE:
- BigInt::cast(this)->BigIntPrint(os);
+ BigInt::cast(*this)->BigIntPrint(os);
os << "\n";
break;
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ EmbedderDataArray::cast(*this)->EmbedderDataArrayPrint(os);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
+ FixedDoubleArray::cast(*this)->FixedDoubleArrayPrint(os);
break;
case FIXED_ARRAY_TYPE:
+ FixedArray::cast(*this)->FixedArrayPrint(os);
+ break;
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -127,59 +141,64 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
- FixedArray::cast(this)->FixedArrayPrint(os);
+ Context::cast(*this)->ContextPrint(os);
+ break;
+ case NATIVE_CONTEXT_TYPE:
+ NativeContext::cast(*this)->NativeContextPrint(os);
break;
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ FixedArray::cast(*this)->FixedArrayPrint(os);
+ break;
case STRING_TABLE_TYPE:
- ObjectHashTable::cast(this)->ObjectHashTablePrint(os);
+ ObjectHashTable::cast(*this)->ObjectHashTablePrint(os);
break;
case NUMBER_DICTIONARY_TYPE:
- NumberDictionary::cast(this)->NumberDictionaryPrint(os);
+ NumberDictionary::cast(*this)->NumberDictionaryPrint(os);
break;
case EPHEMERON_HASH_TABLE_TYPE:
- EphemeronHashTable::cast(this)->EphemeronHashTablePrint(os);
+ EphemeronHashTable::cast(*this)->EphemeronHashTablePrint(os);
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- ObjectBoilerplateDescription::cast(this)
+ ObjectBoilerplateDescription::cast(*this)
->ObjectBoilerplateDescriptionPrint(os);
break;
case PROPERTY_ARRAY_TYPE:
- PropertyArray::cast(this)->PropertyArrayPrint(os);
+ PropertyArray::cast(*this)->PropertyArrayPrint(os);
break;
case BYTE_ARRAY_TYPE:
- ByteArray::cast(this)->ByteArrayPrint(os);
+ ByteArray::cast(*this)->ByteArrayPrint(os);
break;
case BYTECODE_ARRAY_TYPE:
- BytecodeArray::cast(this)->BytecodeArrayPrint(os);
+ BytecodeArray::cast(*this)->BytecodeArrayPrint(os);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DescriptorArray::cast(this)->DescriptorArrayPrint(os);
+ DescriptorArray::cast(*this)->DescriptorArrayPrint(os);
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(this)->TransitionArrayPrint(os);
+ TransitionArray::cast(*this)->TransitionArrayPrint(os);
break;
case FEEDBACK_CELL_TYPE:
- FeedbackCell::cast(this)->FeedbackCellPrint(os);
+ FeedbackCell::cast(*this)->FeedbackCellPrint(os);
break;
case FEEDBACK_VECTOR_TYPE:
- FeedbackVector::cast(this)->FeedbackVectorPrint(os);
+ FeedbackVector::cast(*this)->FeedbackVectorPrint(os);
break;
case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpacePrint(os);
+ FreeSpace::cast(*this)->FreeSpacePrint(os);
break;
-#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
- case Fixed##Type##Array::kInstanceType: \
- Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os); \
+#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ case Fixed##Type##Array::kInstanceType: \
+ Fixed##Type##Array::cast(*this)->FixedTypedArrayPrint(os); \
break;
TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
@@ -192,6 +211,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
@@ -200,180 +220,196 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
- JSObject::cast(this)->JSObjectPrint(os);
+ JSObject::cast(*this)->JSObjectPrint(os);
break;
case WASM_MODULE_TYPE:
- WasmModuleObject::cast(this)->WasmModuleObjectPrint(os);
+ WasmModuleObject::cast(*this)->WasmModuleObjectPrint(os);
break;
case WASM_INSTANCE_TYPE:
- WasmInstanceObject::cast(this)->WasmInstanceObjectPrint(os);
+ WasmInstanceObject::cast(*this)->WasmInstanceObjectPrint(os);
break;
case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(this)->JSGeneratorObjectPrint(os);
+ JSGeneratorObject::cast(*this)->JSGeneratorObjectPrint(os);
break;
case JS_PROMISE_TYPE:
- JSPromise::cast(this)->JSPromisePrint(os);
+ JSPromise::cast(*this)->JSPromisePrint(os);
break;
case JS_ARRAY_TYPE:
- JSArray::cast(this)->JSArrayPrint(os);
+ JSArray::cast(*this)->JSArrayPrint(os);
break;
case JS_REGEXP_TYPE:
- JSRegExp::cast(this)->JSRegExpPrint(os);
+ JSRegExp::cast(*this)->JSRegExpPrint(os);
break;
case JS_REGEXP_STRING_ITERATOR_TYPE:
- JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorPrint(os);
+ JSRegExpStringIterator::cast(*this)->JSRegExpStringIteratorPrint(os);
break;
case ODDBALL_TYPE:
- Oddball::cast(this)->to_string()->Print(os);
+ Oddball::cast(*this)->to_string()->Print(os);
break;
case JS_BOUND_FUNCTION_TYPE:
- JSBoundFunction::cast(this)->JSBoundFunctionPrint(os);
+ JSBoundFunction::cast(*this)->JSBoundFunctionPrint(os);
break;
case JS_FUNCTION_TYPE:
- JSFunction::cast(this)->JSFunctionPrint(os);
+ JSFunction::cast(*this)->JSFunctionPrint(os);
break;
case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(this)->JSGlobalProxyPrint(os);
+ JSGlobalProxy::cast(*this)->JSGlobalProxyPrint(os);
break;
case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(this)->JSGlobalObjectPrint(os);
+ JSGlobalObject::cast(*this)->JSGlobalObjectPrint(os);
break;
case JS_VALUE_TYPE:
- JSValue::cast(this)->JSValuePrint(os);
+ JSValue::cast(*this)->JSValuePrint(os);
break;
case JS_DATE_TYPE:
- JSDate::cast(this)->JSDatePrint(os);
+ JSDate::cast(*this)->JSDatePrint(os);
break;
case CODE_TYPE:
- Code::cast(this)->CodePrint(os);
+ Code::cast(*this)->CodePrint(os);
break;
case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(this)->CodeDataContainerPrint(os);
+ CodeDataContainer::cast(*this)->CodeDataContainerPrint(os);
break;
case JS_PROXY_TYPE:
- JSProxy::cast(this)->JSProxyPrint(os);
+ JSProxy::cast(*this)->JSProxyPrint(os);
break;
case JS_SET_TYPE:
- JSSet::cast(this)->JSSetPrint(os);
+ JSSet::cast(*this)->JSSetPrint(os);
break;
case JS_MAP_TYPE:
- JSMap::cast(this)->JSMapPrint(os);
+ JSMap::cast(*this)->JSMapPrint(os);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
- JSSetIterator::cast(this)->JSSetIteratorPrint(os);
+ JSSetIterator::cast(*this)->JSSetIteratorPrint(os);
break;
case JS_MAP_KEY_ITERATOR_TYPE:
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
- JSMapIterator::cast(this)->JSMapIteratorPrint(os);
+ JSMapIterator::cast(*this)->JSMapIteratorPrint(os);
+ break;
+ case JS_WEAK_CELL_TYPE:
+ JSWeakCell::cast(*this)->JSWeakCellPrint(os);
+ break;
+ case JS_WEAK_REF_TYPE:
+ JSWeakRef::cast(*this)->JSWeakRefPrint(os);
+ break;
+ case JS_WEAK_FACTORY_TYPE:
+ JSWeakFactory::cast(*this)->JSWeakFactoryPrint(os);
+ break;
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ JSWeakFactoryCleanupIterator::cast(*this)
+ ->JSWeakFactoryCleanupIteratorPrint(os);
break;
case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(this)->JSWeakMapPrint(os);
+ JSWeakMap::cast(*this)->JSWeakMapPrint(os);
break;
case JS_WEAK_SET_TYPE:
- JSWeakSet::cast(this)->JSWeakSetPrint(os);
+ JSWeakSet::cast(*this)->JSWeakSetPrint(os);
break;
case JS_MODULE_NAMESPACE_TYPE:
- JSModuleNamespace::cast(this)->JSModuleNamespacePrint(os);
+ JSModuleNamespace::cast(*this)->JSModuleNamespacePrint(os);
break;
case FOREIGN_TYPE:
- Foreign::cast(this)->ForeignPrint(os);
+ Foreign::cast(*this)->ForeignPrint(os);
break;
case CALL_HANDLER_INFO_TYPE:
- CallHandlerInfo::cast(this)->CallHandlerInfoPrint(os);
+ CallHandlerInfo::cast(*this)->CallHandlerInfoPrint(os);
break;
- case PRE_PARSED_SCOPE_DATA_TYPE:
- PreParsedScopeData::cast(this)->PreParsedScopeDataPrint(os);
+ case PREPARSE_DATA_TYPE:
+ PreparseData::cast(*this)->PreparseDataPrint(os);
break;
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
- UncompiledDataWithoutPreParsedScope::cast(this)
- ->UncompiledDataWithoutPreParsedScopePrint(os);
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ UncompiledDataWithoutPreparseData::cast(*this)
+ ->UncompiledDataWithoutPreparseDataPrint(os);
break;
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
- UncompiledDataWithPreParsedScope::cast(this)
- ->UncompiledDataWithPreParsedScopePrint(os);
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ UncompiledDataWithPreparseData::cast(*this)
+ ->UncompiledDataWithPreparseDataPrint(os);
break;
case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(os);
+ SharedFunctionInfo::cast(*this)->SharedFunctionInfoPrint(os);
break;
case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(this)->JSMessageObjectPrint(os);
+ JSMessageObject::cast(*this)->JSMessageObjectPrint(os);
break;
case CELL_TYPE:
- Cell::cast(this)->CellPrint(os);
+ Cell::cast(*this)->CellPrint(os);
break;
case PROPERTY_CELL_TYPE:
- PropertyCell::cast(this)->PropertyCellPrint(os);
+ PropertyCell::cast(*this)->PropertyCellPrint(os);
break;
case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::cast(this)->JSArrayBufferPrint(os);
+ JSArrayBuffer::cast(*this)->JSArrayBufferPrint(os);
break;
case JS_ARRAY_ITERATOR_TYPE:
- JSArrayIterator::cast(this)->JSArrayIteratorPrint(os);
+ JSArrayIterator::cast(*this)->JSArrayIteratorPrint(os);
break;
case JS_TYPED_ARRAY_TYPE:
- JSTypedArray::cast(this)->JSTypedArrayPrint(os);
+ JSTypedArray::cast(*this)->JSTypedArrayPrint(os);
break;
case JS_DATA_VIEW_TYPE:
- JSDataView::cast(this)->JSDataViewPrint(os);
+ JSDataView::cast(*this)->JSDataViewPrint(os);
break;
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- JSV8BreakIterator::cast(this)->JSV8BreakIteratorPrint(os);
+ JSV8BreakIterator::cast(*this)->JSV8BreakIteratorPrint(os);
break;
case JS_INTL_COLLATOR_TYPE:
- JSCollator::cast(this)->JSCollatorPrint(os);
+ JSCollator::cast(*this)->JSCollatorPrint(os);
break;
case JS_INTL_DATE_TIME_FORMAT_TYPE:
- JSDateTimeFormat::cast(this)->JSDateTimeFormatPrint(os);
+ JSDateTimeFormat::cast(*this)->JSDateTimeFormatPrint(os);
break;
case JS_INTL_LIST_FORMAT_TYPE:
- JSListFormat::cast(this)->JSListFormatPrint(os);
+ JSListFormat::cast(*this)->JSListFormatPrint(os);
break;
case JS_INTL_LOCALE_TYPE:
- JSLocale::cast(this)->JSLocalePrint(os);
+ JSLocale::cast(*this)->JSLocalePrint(os);
break;
case JS_INTL_NUMBER_FORMAT_TYPE:
- JSNumberFormat::cast(this)->JSNumberFormatPrint(os);
+ JSNumberFormat::cast(*this)->JSNumberFormatPrint(os);
break;
case JS_INTL_PLURAL_RULES_TYPE:
- JSPluralRules::cast(this)->JSPluralRulesPrint(os);
+ JSPluralRules::cast(*this)->JSPluralRulesPrint(os);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatPrint(os);
+ JSRelativeTimeFormat::cast(*this)->JSRelativeTimeFormatPrint(os);
+ break;
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ JSSegmentIterator::cast(*this)->JSSegmentIteratorPrint(os);
break;
case JS_INTL_SEGMENTER_TYPE:
- JSSegmenter::cast(this)->JSSegmenterPrint(os);
+ JSSegmenter::cast(*this)->JSSegmenterPrint(os);
break;
#endif // V8_INTL_SUPPORT
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
- Name::cast(this)->Name##Print(os); \
+ Name::cast(*this)->Name##Print(os); \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE:
- AllocationSite::cast(this)->AllocationSitePrint(os);
+ AllocationSite::cast(*this)->AllocationSitePrint(os);
break;
case LOAD_HANDLER_TYPE:
- LoadHandler::cast(this)->LoadHandlerPrint(os);
+ LoadHandler::cast(*this)->LoadHandlerPrint(os);
break;
case STORE_HANDLER_TYPE:
- StoreHandler::cast(this)->StoreHandlerPrint(os);
+ StoreHandler::cast(*this)->StoreHandlerPrint(os);
break;
case SCOPE_INFO_TYPE:
- ScopeInfo::cast(this)->ScopeInfoPrint(os);
+ ScopeInfo::cast(*this)->ScopeInfoPrint(os);
break;
case FEEDBACK_METADATA_TYPE:
- FeedbackMetadata::cast(this)->FeedbackMetadataPrint(os);
+ FeedbackMetadata::cast(*this)->FeedbackMetadataPrint(os);
break;
case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(this)->WeakFixedArrayPrint(os);
+ WeakFixedArray::cast(*this)->WeakFixedArrayPrint(os);
break;
case WEAK_ARRAY_LIST_TYPE:
- WeakArrayList::cast(this)->WeakArrayListPrint(os);
+ WeakArrayList::cast(*this)->WeakArrayListPrint(os);
break;
case INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
@@ -399,6 +435,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
// TODO(all): Handle these types too.
@@ -409,11 +446,14 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
}
void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
- os << "byte array, data starts at " << GetDataStartAddress();
+ PrintHeader(os, "ByteArray");
+ os << "\n - length: " << length()
+ << "\n - data-start: " << static_cast<void*>(GetDataStartAddress())
+ << "\n";
}
void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "BytecodeArray");
+ PrintHeader(os, "BytecodeArray");
Disassemble(os);
}
@@ -431,7 +471,7 @@ void FixedTypedArray<Traits>::FixedTypedArrayPrint(
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
- DescriptorArray* descs = map()->instance_descriptors();
+ DescriptorArray descs = map()->instance_descriptors();
int nof_inobject_properties = map()->GetInObjectProperties();
int i = 0;
for (; i < map()->NumberOfOwnDescriptors(); i++) {
@@ -464,7 +504,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
return i > 0;
} else if (IsJSGlobalObject()) {
- JSGlobalObject::cast(this)->global_dictionary()->Print(os);
+ JSGlobalObject::cast(*this)->global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
}
@@ -474,17 +514,17 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
namespace {
template <class T>
-bool IsTheHoleAt(T* array, int index) {
+bool IsTheHoleAt(T array, int index) {
return false;
}
template <>
-bool IsTheHoleAt(FixedDoubleArray* array, int index) {
+bool IsTheHoleAt(FixedDoubleArray array, int index) {
return array->is_the_hole(index);
}
template <class T>
-double GetScalarElement(T* array, int index) {
+double GetScalarElement(T array, int index) {
if (IsTheHoleAt(array, index)) {
return std::numeric_limits<double>::quiet_NaN();
}
@@ -492,9 +532,9 @@ double GetScalarElement(T* array, int index) {
}
template <class T>
-void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
+void DoPrintElements(std::ostream& os, Object object) { // NOLINT
const bool print_the_hole = std::is_same<T, FixedDoubleArray>::value;
- T* array = T::cast(object);
+ T array = T::cast(object);
if (array->length() == 0) return;
int previous_index = 0;
double previous_value = GetScalarElement(array, 0);
@@ -525,10 +565,10 @@ void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
}
template <typename T>
-void PrintFixedArrayElements(std::ostream& os, T* array) {
+void PrintFixedArrayElements(std::ostream& os, T array) {
// Print in array notation for non-sparse arrays.
- Object* previous_value = array->length() > 0 ? array->get(0) : nullptr;
- Object* value = nullptr;
+ Object previous_value = array->length() > 0 ? array->get(0) : Object();
+ Object value;
int previous_index = 0;
int i;
for (i = 1; i <= array->length(); i++) {
@@ -548,9 +588,9 @@ void PrintFixedArrayElements(std::ostream& os, T* array) {
}
}
-void PrintDictionaryElements(std::ostream& os, FixedArrayBase* elements) {
+void PrintDictionaryElements(std::ostream& os, FixedArrayBase elements) {
// Print some internal fields
- NumberDictionary* dict = NumberDictionary::cast(elements);
+ NumberDictionary dict = NumberDictionary::cast(elements);
if (dict->requires_slow_elements()) {
os << "\n - requires_slow_elements";
} else {
@@ -560,14 +600,14 @@ void PrintDictionaryElements(std::ostream& os, FixedArrayBase* elements) {
}
void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
- SloppyArgumentsElements* elements) {
- FixedArray* arguments_store = elements->arguments();
+ SloppyArgumentsElements elements) {
+ FixedArray arguments_store = elements->arguments();
os << "\n 0: context: " << Brief(elements->context())
<< "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
- Object* mapped_entry = elements->get_mapped_entry(i);
+ Object mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
<< "): " << Brief(mapped_entry);
if (mapped_entry->IsTheHole()) {
@@ -588,6 +628,16 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
+void PrintEmbedderData(std::ostream& os, EmbedderDataSlot slot) {
+ DisallowHeapAllocation no_gc;
+ Object value = slot.load_tagged();
+ os << Brief(value);
+ void* raw_pointer;
+ if (slot.ToAlignedPointer(&raw_pointer)) {
+ os << ", aligned pointer: " << raw_pointer;
+ }
+}
+
} // namespace
void JSObject::PrintElements(std::ostream& os) { // NOLINT
@@ -636,7 +686,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
os << "\n }\n";
}
-static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
+static void JSObjectPrintHeader(std::ostream& os, JSObject obj,
const char* id) { // NOLINT
Isolate* isolate = obj->GetIsolate();
obj->PrintHeader(os, id);
@@ -654,7 +704,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
<< ElementsKindToString(obj->map()->elements_kind());
if (obj->elements()->IsCowArray()) os << " (COW)";
os << "]";
- Object* hash = obj->GetHash();
+ Object hash = obj->GetHash();
if (hash->IsSmi()) {
os << "\n - hash: " << Brief(hash);
}
@@ -664,10 +714,10 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
}
static void JSObjectPrintBody(std::ostream& os,
- JSObject* obj, // NOLINT
+ JSObject obj, // NOLINT
bool print_elements = true) {
os << "\n - properties: ";
- Object* properties_or_hash = obj->raw_properties_or_hash();
+ Object properties_or_hash = obj->raw_properties_or_hash();
if (!properties_or_hash->IsSmi()) {
os << Brief(properties_or_hash);
}
@@ -681,19 +731,20 @@ static void JSObjectPrintBody(std::ostream& os,
if (embedder_fields > 0) {
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
- os << "\n " << obj->GetEmbedderField(i);
+ os << "\n ";
+ PrintEmbedderData(os, EmbedderDataSlot(obj, i));
}
os << "\n }\n";
}
}
void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, nullptr);
- JSObjectPrintBody(os, this);
+ JSObjectPrintHeader(os, *this, nullptr);
+ JSObjectPrintBody(os, *this);
}
void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSGeneratorObject");
+ JSObjectPrintHeader(os, *this, "JSGeneratorObject");
os << "\n - function: " << Brief(function());
os << "\n - context: " << Brief(context());
os << "\n - receiver: " << Brief(receiver());
@@ -722,14 +773,14 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
if (is_suspended()) os << " (suspended)";
if (is_suspended()) {
DisallowHeapAllocation no_gc;
- SharedFunctionInfo* fun_info = function()->shared();
+ SharedFunctionInfo fun_info = function()->shared();
if (fun_info->HasSourceCode()) {
- Script* script = Script::cast(fun_info->script());
+ Script script = Script::cast(fun_info->script());
int lin = script->GetLineNumber(source_position()) + 1;
int col = script->GetColumnNumber(source_position()) + 1;
- String* script_name = script->name()->IsString()
- ? String::cast(script->name())
- : GetReadOnlyRoots().empty_string();
+ String script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : GetReadOnlyRoots().empty_string();
os << "\n - source position: " << source_position();
os << " (";
script_name->PrintUC16(os);
@@ -739,17 +790,17 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
}
}
os << "\n - register file: " << Brief(parameters_and_registers());
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSArray");
+ JSObjectPrintHeader(os, *this, "JSArray");
os << "\n - length: " << Brief(this->length());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSPromise");
+ JSObjectPrintHeader(os, *this, "JSPromise");
os << "\n - status: " << JSPromise::Status(status());
if (status() == Promise::kPending) {
os << "\n - reactions: " << Brief(reactions());
@@ -757,140 +808,65 @@ void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
os << "\n - result: " << Brief(result());
}
os << "\n - has_handler: " << has_handler();
- os << "\n ";
+ JSObjectPrintBody(os, *this);
}
void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSRegExp");
+ JSObjectPrintHeader(os, *this, "JSRegExp");
os << "\n - data: " << Brief(data());
os << "\n - source: " << Brief(source());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSRegExpStringIterator::JSRegExpStringIteratorPrint(
std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSRegExpStringIterator");
+ JSObjectPrintHeader(os, *this, "JSRegExpStringIterator");
os << "\n - regex: " << Brief(iterating_regexp());
os << "\n - string: " << Brief(iterating_string());
os << "\n - done: " << done();
os << "\n - global: " << global();
os << "\n - unicode: " << unicode();
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Symbol");
+ PrintHeader(os, "Symbol");
os << "\n - hash: " << Hash();
os << "\n - name: " << Brief(name());
if (name()->IsUndefined()) {
os << " (" << PrivateSymbolToName() << ")";
}
os << "\n - private: " << is_private();
- os << "\n";
}
-void Map::MapPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Map");
- os << "\n - type: " << instance_type();
- os << "\n - instance size: ";
- if (instance_size() == kVariableSizeSentinel) {
- os << "variable";
- } else {
- os << instance_size();
- }
- if (IsJSObjectMap()) {
- os << "\n - inobject properties: " << GetInObjectProperties();
- }
- os << "\n - elements kind: " << ElementsKindToString(elements_kind());
- os << "\n - unused property fields: " << UnusedPropertyFields();
- os << "\n - enum length: ";
- if (EnumLength() == kInvalidEnumCacheSentinel) {
- os << "invalid";
- } else {
- os << EnumLength();
- }
- if (is_deprecated()) os << "\n - deprecated_map";
- if (is_stable()) os << "\n - stable_map";
- if (is_migration_target()) os << "\n - migration_target";
- if (is_dictionary_map()) os << "\n - dictionary_map";
- if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
- if (has_named_interceptor()) os << "\n - named_interceptor";
- if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
- if (may_have_interesting_symbols()) os << "\n - may_have_interesting_symbols";
- if (is_undetectable()) os << "\n - undetectable";
- if (is_callable()) os << "\n - callable";
- if (is_constructor()) os << "\n - constructor";
- if (has_prototype_slot()) {
- os << "\n - has_prototype_slot";
- if (has_non_instance_prototype()) os << " (non-instance prototype)";
- }
- if (is_access_check_needed()) os << "\n - access_check_needed";
- if (!is_extensible()) os << "\n - non-extensible";
- if (is_prototype_map()) {
- os << "\n - prototype_map";
- os << "\n - prototype info: " << Brief(prototype_info());
- } else {
- os << "\n - back pointer: " << Brief(GetBackPointer());
- }
- os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
- os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
- << "#" << NumberOfOwnDescriptors() << ": "
- << Brief(instance_descriptors());
- if (FLAG_unbox_double_fields) {
- os << "\n - layout descriptor: ";
- layout_descriptor()->ShortPrint(os);
- }
-
- Isolate* isolate;
- // Read-only maps can't have transitions, which is fortunate because we need
- // the isolate to iterate over the transitions.
- if (Isolate::FromWritableHeapObject(this, &isolate)) {
- DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(isolate, this, &no_gc);
- int nof_transitions = transitions.NumberOfTransitions();
- if (nof_transitions > 0) {
- os << "\n - transitions #" << nof_transitions << ": ";
- HeapObject* heap_object;
- Smi* smi;
- if (raw_transitions()->ToSmi(&smi)) {
- os << Brief(smi);
- } else if (raw_transitions()->GetHeapObject(&heap_object)) {
- os << Brief(heap_object);
- }
- transitions.PrintTransitions(os);
- }
- }
- os << "\n - prototype: " << Brief(prototype());
- os << "\n - constructor: " << Brief(GetConstructor());
- os << "\n - dependent code: " << Brief(dependent_code());
- os << "\n - construction counter: " << construction_counter();
- os << "\n";
-}
void DescriptorArray::DescriptorArrayPrint(std::ostream& os) {
- HeapObject::PrintHeader(os, "DescriptorArray");
- os << "\n - capacity: " << length();
- EnumCache* enum_cache = GetEnumCache();
+ PrintHeader(os, "DescriptorArray");
os << "\n - enum_cache: ";
- if (enum_cache->keys()->length() == 0) {
+ if (enum_cache()->keys()->length() == 0) {
os << "empty";
} else {
- os << enum_cache->keys()->length();
- os << "\n - keys: " << Brief(enum_cache->keys());
- os << "\n - indices: " << Brief(enum_cache->indices());
+ os << enum_cache()->keys()->length();
+ os << "\n - keys: " << Brief(enum_cache()->keys());
+ os << "\n - indices: " << Brief(enum_cache()->indices());
}
+ os << "\n - nof slack descriptors: " << number_of_slack_descriptors();
os << "\n - nof descriptors: " << number_of_descriptors();
+ int16_t raw_marked = raw_number_of_marked_descriptors();
+ os << "\n - raw marked descriptors: mc epoch "
+ << NumberOfMarkedDescriptors::Epoch::decode(raw_marked) << ", marked "
+ << NumberOfMarkedDescriptors::Marked::decode(raw_marked);
PrintDescriptors(os);
}
void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AliasedArgumentsEntry");
+ PrintHeader(os, "AliasedArgumentsEntry");
os << "\n - aliased_context_slot: " << aliased_context_slot();
}
namespace {
-void PrintFixedArrayWithHeader(std::ostream& os, FixedArray* array,
+void PrintFixedArrayWithHeader(std::ostream& os, FixedArray array,
const char* type) {
array->PrintHeader(os, type);
os << "\n - length: " << array->length();
@@ -899,7 +875,7 @@ void PrintFixedArrayWithHeader(std::ostream& os, FixedArray* array,
}
template <typename T>
-void PrintHashTableWithHeader(std::ostream& os, T* table, const char* type) {
+void PrintHashTableWithHeader(std::ostream& os, T table, const char* type) {
table->PrintHeader(os, type);
os << "\n - length: " << table->length();
os << "\n - elements: " << table->NumberOfElements();
@@ -918,8 +894,9 @@ void PrintHashTableWithHeader(std::ostream& os, T* table, const char* type) {
template <typename T>
void PrintWeakArrayElements(std::ostream& os, T* array) {
// Print in array notation for non-sparse arrays.
- MaybeObject* previous_value = array->length() > 0 ? array->Get(0) : nullptr;
- MaybeObject* value = nullptr;
+ MaybeObject previous_value =
+ array->length() > 0 ? array->Get(0) : MaybeObject(kNullAddress);
+ MaybeObject value;
int previous_index = 0;
int i;
for (i = 1; i <= array->length(); i++) {
@@ -941,39 +918,74 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
} // namespace
-void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
- PrintFixedArrayWithHeader(os, this, "FixedArray");
+void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
+ PrintHeader(os, "EmbedderDataArray");
+ os << "\n - length: " << length();
+ EmbedderDataSlot start(*this, 0);
+ EmbedderDataSlot end(*this, length());
+ for (EmbedderDataSlot slot = start; slot < end; ++slot) {
+ os << "\n ";
+ PrintEmbedderData(os, slot);
+ }
+ os << "\n";
+}
+
+void FixedArray::FixedArrayPrint(std::ostream& os) {
+ PrintFixedArrayWithHeader(os, *this, "FixedArray");
+}
+
+namespace {
+void PrintContextWithHeader(std::ostream& os, Context context,
+ const char* type) {
+ context->PrintHeader(os, type);
+ os << "\n - length: " << context->length();
+ os << "\n - scope_info: " << Brief(context->scope_info());
+ os << "\n - previous: " << Brief(context->previous());
+ os << "\n - extension_object: " << Brief(context->extension_object());
+ os << "\n - native_context: " << Brief(context->native_context());
+ PrintFixedArrayElements(os, context);
+ os << "\n";
+}
+} // namespace
+
+void Context::ContextPrint(std::ostream& os) {
+ PrintContextWithHeader(os, *this, "Context");
+}
+
+void NativeContext::NativeContextPrint(std::ostream& os) {
+ PrintContextWithHeader(os, *this, "NativeContext");
+ os << " - microtask_queue: " << microtask_queue() << "\n";
}
void ObjectHashTable::ObjectHashTablePrint(std::ostream& os) {
- PrintHashTableWithHeader(os, this, "ObjectHashTable");
+ PrintHashTableWithHeader(os, *this, "ObjectHashTable");
}
void NumberDictionary::NumberDictionaryPrint(std::ostream& os) {
- PrintHashTableWithHeader(os, this, "NumberDictionary");
+ PrintHashTableWithHeader(os, *this, "NumberDictionary");
}
void EphemeronHashTable::EphemeronHashTablePrint(std::ostream& os) {
- PrintHashTableWithHeader(os, this, "EphemeronHashTable");
+ PrintHashTableWithHeader(os, *this, "EphemeronHashTable");
}
void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionPrint(
std::ostream& os) {
- PrintFixedArrayWithHeader(os, this, "ObjectBoilerplateDescription");
+ PrintFixedArrayWithHeader(os, *this, "ObjectBoilerplateDescription");
}
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PropertyArray");
+ PrintHeader(os, "PropertyArray");
os << "\n - length: " << length();
os << "\n - hash: " << Hash();
- PrintFixedArrayElements(os, this);
+ PrintFixedArrayElements(os, *this);
os << "\n";
}
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "FixedDoubleArray");
+ PrintHeader(os, "FixedDoubleArray");
os << "\n - length: " << length();
- DoPrintElements<FixedDoubleArray>(os, this);
+ DoPrintElements<FixedDoubleArray>(os, *this);
os << "\n";
}
@@ -993,12 +1005,12 @@ void WeakArrayList::WeakArrayListPrint(std::ostream& os) {
}
void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "TransitionArray");
+ PrintHeader(os, "TransitionArray");
PrintInternal(os);
}
void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "FeedbackCell");
+ PrintHeader(os, "FeedbackCell");
ReadOnlyRoots roots = GetReadOnlyRoots();
if (map() == roots.no_closures_cell_map()) {
os << "\n - no closures";
@@ -1040,10 +1052,10 @@ void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
}
void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
- HeapObject::PrintHeader(os, "FeedbackMetadata");
+ PrintHeader(os, "FeedbackMetadata");
os << "\n - slot_count: " << slot_count();
- FeedbackMetadataIterator iter(this);
+ FeedbackMetadataIterator iter(*this);
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
FeedbackSlotKind kind = iter.kind();
@@ -1053,7 +1065,7 @@ void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
}
void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "FeedbackVector");
+ PrintHeader(os, "FeedbackVector");
os << "\n - length: " << length();
if (length() == 0) {
os << " (empty)\n";
@@ -1091,7 +1103,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
FeedbackSlot slot) { // NOLINT
- FeedbackNexus nexus(this, slot);
+ FeedbackNexus nexus(*this, slot);
nexus.Print(os);
}
@@ -1140,20 +1152,20 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
}
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSValue");
+ JSObjectPrintHeader(os, *this, "JSValue");
os << "\n - value: " << Brief(value());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSMessageObject");
- os << "\n - type: " << type();
+ JSObjectPrintHeader(os, *this, "JSMessageObject");
+ os << "\n - type: " << static_cast<int>(type());
os << "\n - arguments: " << Brief(argument());
os << "\n - start_position: " << start_position();
os << "\n - end_position: " << end_position();
os << "\n - script: " << Brief(script());
os << "\n - stack_frames: " << Brief(stack_frames());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
@@ -1161,11 +1173,11 @@ void String::StringPrint(std::ostream& os) { // NOLINT
if (!HasOnlyOneByteChars()) {
os << "u";
}
- if (StringShape(this).IsInternalized()) {
+ if (StringShape(*this).IsInternalized()) {
os << "#";
- } else if (StringShape(this).IsCons()) {
+ } else if (StringShape(*this).IsCons()) {
os << "c\"";
- } else if (StringShape(this).IsThin()) {
+ } else if (StringShape(*this).IsThin()) {
os << ">\"";
} else {
os << "\"";
@@ -1185,15 +1197,15 @@ void String::StringPrint(std::ostream& os) { // NOLINT
os << truncated_epilogue;
}
- if (!StringShape(this).IsInternalized()) os << "\"";
+ if (!StringShape(*this).IsInternalized()) os << "\"";
}
void Name::NamePrint(std::ostream& os) { // NOLINT
if (IsString()) {
- String::cast(this)->StringPrint(os);
+ String::cast(*this)->StringPrint(os);
} else {
- os << Brief(this);
+ os << Brief(*this);
}
}
@@ -1203,7 +1215,7 @@ static const char* const weekdays[] = {
};
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSDate");
+ JSObjectPrintHeader(os, *this, "JSDate");
os << "\n - value: " << Brief(value());
if (!year()->IsSmi()) {
os << "\n - time = NaN\n";
@@ -1220,12 +1232,12 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
sec()->IsSmi() ? Smi::ToInt(sec()) : -1);
os << buf.start();
}
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSProxy");
+ PrintHeader(os, "JSProxy");
os << "\n - target: ";
target()->ShortPrint(os);
os << "\n - handler: ";
@@ -1234,97 +1246,134 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
}
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSSet");
+ JSObjectPrintHeader(os, *this, "JSSet");
os << " - table: " << Brief(table());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSMap");
+ JSObjectPrintHeader(os, *this, "JSMap");
os << " - table: " << Brief(table());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSCollectionIterator::JSCollectionIteratorPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os, const char* name) { // NOLINT
+ JSObjectPrintHeader(os, *this, name);
os << "\n - table: " << Brief(table());
os << "\n - index: " << Brief(index());
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSSetIterator");
- JSCollectionIteratorPrint(os);
+ JSCollectionIteratorPrint(os, "JSSetIterator");
}
void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSMapIterator");
- JSCollectionIteratorPrint(os);
+ JSCollectionIteratorPrint(os, "JSMapIterator");
+}
+
+void JSWeakCell::JSWeakCellPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSWeakCell");
+ os << "\n - factory: " << Brief(factory());
+ os << "\n - target: " << Brief(target());
+ os << "\n - holdings: " << Brief(holdings());
+ os << "\n - prev: " << Brief(prev());
+ os << "\n - next: " << Brief(next());
+ JSObjectPrintBody(os, *this);
+}
+
+void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSWeakRef");
+ os << "\n - target: " << Brief(target());
+ JSObjectPrintBody(os, *this);
+}
+
+void JSWeakFactory::JSWeakFactoryPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSWeakFactory");
+ os << "\n - native_context: " << Brief(native_context());
+ os << "\n - cleanup: " << Brief(cleanup());
+ os << "\n - active_cells: " << Brief(active_cells());
+ os << "\n - cleared_cells: " << Brief(cleared_cells());
+ JSObjectPrintBody(os, *this);
+}
+
+void JSWeakFactoryCleanupIterator::JSWeakFactoryCleanupIteratorPrint(
+ std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSWeakFactoryCleanupIterator");
+ os << "\n - factory: " << Brief(factory());
+ JSObjectPrintBody(os, *this);
+}
+
+void WeakFactoryCleanupJobTask::WeakFactoryCleanupJobTaskPrint(
+ std::ostream& os) {
+ PrintHeader(os, "WeakFactoryCleanupJobTask");
+ os << "\n - factory: " << Brief(factory());
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSWeakMap");
+ JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSWeakSet");
+ JSObjectPrintHeader(os, *this, "JSWeakSet");
os << "\n - table: " << Brief(table());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSArrayBuffer");
+ JSObjectPrintHeader(os, *this, "JSArrayBuffer");
os << "\n - backing_store: " << backing_store();
os << "\n - byte_length: " << byte_length();
if (is_external()) os << "\n - external";
- if (is_neuterable()) os << "\n - neuterable";
- if (was_neutered()) os << "\n - neutered";
+ if (is_detachable()) os << "\n - detachable";
+ if (was_detached()) os << "\n - detached";
if (is_shared()) os << "\n - shared";
if (is_wasm_memory()) os << "\n - is_wasm_memory";
if (is_growable()) os << "\n - growable";
- JSObjectPrintBody(os, this, !was_neutered());
+ JSObjectPrintBody(os, *this, !was_detached());
}
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSTypedArray");
+ JSObjectPrintHeader(os, *this, "JSTypedArray");
os << "\n - buffer: " << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
os << "\n - length: " << Brief(length());
- if (WasNeutered()) os << "\n - neutered";
- JSObjectPrintBody(os, this, !WasNeutered());
+ if (WasDetached()) os << "\n - detached";
+ JSObjectPrintBody(os, *this, !WasDetached());
}
void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
- JSObjectPrintHeader(os, this, "JSArrayIterator");
+ JSObjectPrintHeader(os, *this, "JSArrayIterator");
os << "\n - iterated_object: " << Brief(iterated_object());
os << "\n - next_index: " << Brief(next_index());
os << "\n - kind: " << kind();
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSDataView");
+ JSObjectPrintHeader(os, *this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
- if (WasNeutered()) os << "\n - neutered";
- JSObjectPrintBody(os, this, !WasNeutered());
+ if (WasDetached()) os << "\n - detached";
+ JSObjectPrintBody(os, *this, !WasDetached());
}
void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSBoundFunction");
+ JSObjectPrintHeader(os, *this, "JSBoundFunction");
os << "\n - bound_target_function: " << Brief(bound_target_function());
os << "\n - bound_this: " << Brief(bound_this());
os << "\n - bound_arguments: " << Brief(bound_arguments());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
Isolate* isolate = GetIsolate();
- JSObjectPrintHeader(os, this, "Function");
+ JSObjectPrintHeader(os, *this, "Function");
os << "\n - function prototype: ";
if (has_prototype_slot()) {
if (has_prototype()) {
@@ -1343,16 +1392,8 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
// Print Builtin name for builtin functions
int builtin_index = code()->builtin_index();
- if (builtin_index != -1 && !IsInterpreted()) {
- if (builtin_index == Builtins::kDeserializeLazy) {
- if (shared()->HasBuiltinId()) {
- builtin_index = shared()->builtin_id();
- os << "\n - builtin: " << isolate->builtins()->name(builtin_index)
- << "(lazy)";
- }
- } else {
- os << "\n - builtin: " << isolate->builtins()->name(builtin_index);
- }
+ if (Builtins::IsBuiltinId(builtin_index) && !IsInterpreted()) {
+ os << "\n - builtin: " << isolate->builtins()->name(builtin_index);
}
os << "\n - formal_parameter_count: "
@@ -1366,14 +1407,14 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - bytecode: " << shared()->GetBytecodeArray();
}
}
- if (WasmExportedFunction::IsWasmExportedFunction(this)) {
- WasmExportedFunction* function = WasmExportedFunction::cast(this);
+ if (WasmExportedFunction::IsWasmExportedFunction(*this)) {
+ WasmExportedFunction function = WasmExportedFunction::cast(*this);
os << "\n - WASM instance "
- << reinterpret_cast<void*>(function->instance());
+ << reinterpret_cast<void*>(function->instance()->ptr());
os << "\n - WASM function index " << function->function_index();
}
shared()->PrintSourceCode(os);
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
os << "\n - feedback vector: ";
if (!shared()->HasFeedbackMetadata()) {
os << "feedback metadata is not available in SFI\n";
@@ -1387,7 +1428,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
if (HasSourceCode()) {
os << "\n - source code: ";
- String* source = String::cast(Script::cast(script())->source());
+ String source = String::cast(Script::cast(script())->source());
int start = StartPosition();
int length = EndPosition() - start;
std::unique_ptr<char[]> source_string = source->ToCString(
@@ -1397,7 +1438,7 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
}
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "SharedFunctionInfo");
+ PrintHeader(os, "SharedFunctionInfo");
os << "\n - name: ";
if (HasSharedName()) {
os << Brief(Name());
@@ -1451,30 +1492,30 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
}
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSGlobalProxy");
+ JSObjectPrintHeader(os, *this, "JSGlobalProxy");
if (!GetIsolate()->bootstrapper()->IsActive()) {
os << "\n - native context: " << Brief(native_context());
}
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSGlobalObject");
+ JSObjectPrintHeader(os, *this, "JSGlobalObject");
if (!GetIsolate()->bootstrapper()->IsActive()) {
os << "\n - native context: " << Brief(native_context());
}
os << "\n - global proxy: " << Brief(global_proxy());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void Cell::CellPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Cell");
+ PrintHeader(os, "Cell");
os << "\n - value: " << Brief(value());
os << "\n";
}
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PropertyCell");
+ PrintHeader(os, "PropertyCell");
os << "\n - name: ";
name()->NamePrint(os);
os << "\n - value: " << Brief(value());
@@ -1524,7 +1565,7 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
}
void Code::CodePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Code");
+ PrintHeader(os, "Code");
os << "\n";
#ifdef ENABLE_DISASSEMBLER
if (FLAG_use_verbose_printer) {
@@ -1534,7 +1575,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
}
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CodeDataContainer");
+ PrintHeader(os, "CodeDataContainer");
os << "\n - kind_specific_flags: " << kind_specific_flags();
os << "\n";
}
@@ -1546,7 +1587,7 @@ void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AccessorInfo");
+ PrintHeader(os, "AccessorInfo");
os << "\n - name: " << Brief(name());
os << "\n - flags: " << flags();
os << "\n - getter: " << Brief(getter());
@@ -1557,14 +1598,14 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
}
void CallbackTask::CallbackTaskPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CallbackTask");
+ PrintHeader(os, "CallbackTask");
os << "\n - callback: " << Brief(callback());
os << "\n - data: " << Brief(data());
os << "\n";
}
void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CallableTask");
+ PrintHeader(os, "CallableTask");
os << "\n - context: " << Brief(context());
os << "\n - callable: " << Brief(callable());
os << "\n";
@@ -1572,7 +1613,7 @@ void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseFulfillReactionJobTask");
+ PrintHeader(os, "PromiseFulfillReactionJobTask");
os << "\n - argument: " << Brief(argument());
os << "\n - context: " << Brief(context());
os << "\n - handler: " << Brief(handler());
@@ -1582,7 +1623,7 @@ void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseRejectReactionJobTask");
+ PrintHeader(os, "PromiseRejectReactionJobTask");
os << "\n - argument: " << Brief(argument());
os << "\n - context: " << Brief(context());
os << "\n - handler: " << Brief(handler());
@@ -1592,7 +1633,7 @@ void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseResolveThenableJobTask");
+ PrintHeader(os, "PromiseResolveThenableJobTask");
os << "\n - context: " << Brief(context());
os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
os << "\n - then: " << Brief(then());
@@ -1601,7 +1642,7 @@ void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
}
void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseCapability");
+ PrintHeader(os, "PromiseCapability");
os << "\n - promise: " << Brief(promise());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
@@ -1609,7 +1650,7 @@ void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
}
void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseReaction");
+ PrintHeader(os, "PromiseReaction");
os << "\n - next: " << Brief(next());
os << "\n - reject_handler: " << Brief(reject_handler());
os << "\n - fulfill_handler: " << Brief(fulfill_handler());
@@ -1619,7 +1660,7 @@ void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AsyncGeneratorRequest");
+ PrintHeader(os, "AsyncGeneratorRequest");
const char* mode = "Invalid!";
switch (resume_mode()) {
case JSGeneratorObject::kNext:
@@ -1639,7 +1680,7 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
}
void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ModuleInfoEntry");
+ PrintHeader(os, "ModuleInfoEntry");
os << "\n - export_name: " << Brief(export_name());
os << "\n - local_name: " << Brief(local_name());
os << "\n - import_name: " << Brief(import_name());
@@ -1651,7 +1692,7 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
}
void Module::ModulePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Module");
+ PrintHeader(os, "Module");
os << "\n - origin: " << Brief(script()->GetNameOrSourceURL());
os << "\n - code: " << Brief(code());
os << "\n - exports: " << Brief(exports());
@@ -1664,13 +1705,13 @@ void Module::ModulePrint(std::ostream& os) { // NOLINT
}
void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSModuleNamespace");
+ JSObjectPrintHeader(os, *this, "JSModuleNamespace");
os << "\n - module: " << Brief(module());
- JSObjectPrintBody(os, this);
+ JSObjectPrintBody(os, *this);
}
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PrototypeInfo");
+ PrintHeader(os, "PrototypeInfo");
os << "\n - module namespace: " << Brief(module_namespace());
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
@@ -1680,14 +1721,14 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
}
void Tuple2::Tuple2Print(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Tuple2");
+ PrintHeader(os, "Tuple2");
os << "\n - value1: " << Brief(value1());
os << "\n - value2: " << Brief(value2());
os << "\n";
}
void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Tuple3");
+ PrintHeader(os, "Tuple3");
os << "\n - value1: " << Brief(value1());
os << "\n - value2: " << Brief(value2());
os << "\n - value3: " << Brief(value3());
@@ -1696,28 +1737,46 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ArrayBoilerplateDescription");
+ PrintHeader(os, "ArrayBoilerplateDescription");
os << "\n - elements kind: " << elements_kind();
os << "\n - constant elements: " << Brief(constant_elements());
os << "\n";
}
+void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "AsmWasmData");
+ os << "\n - native module: " << Brief(managed_native_module());
+ os << "\n - export_wrappers: " << Brief(export_wrappers());
+ os << "\n - offset table: " << Brief(asm_js_offset_table());
+ os << "\n - uses bitset: " << uses_bitset()->value();
+ os << "\n";
+}
+
void WasmDebugInfo::WasmDebugInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmDebugInfo");
+ PrintHeader(os, "WasmDebugInfo");
os << "\n - wasm_instance: " << Brief(wasm_instance());
os << "\n";
}
+void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmExceptionTag");
+ os << "\n - index: " << index();
+ os << "\n";
+}
+
void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmInstanceObject");
+ PrintHeader(os, "WasmInstanceObject");
os << "\n - module_object: " << Brief(module_object());
os << "\n - exports_object: " << Brief(exports_object());
os << "\n - native_context: " << Brief(native_context());
if (has_memory_object()) {
os << "\n - memory_object: " << Brief(memory_object());
}
- if (has_globals_buffer()) {
- os << "\n - globals_buffer: " << Brief(globals_buffer());
+ if (has_untagged_globals_buffer()) {
+ os << "\n - untagged_globals_buffer: " << Brief(untagged_globals_buffer());
+ }
+ if (has_tagged_globals_buffer()) {
+ os << "\n - tagged_globals_buffer: " << Brief(tagged_globals_buffer());
}
if (has_imported_mutable_globals_buffers()) {
os << "\n - imported_mutable_globals_buffers: "
@@ -1729,13 +1788,10 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
if (has_table_object()) {
os << "\n - table_object: " << Brief(table_object());
}
- os << "\n - imported_function_instances: "
- << Brief(imported_function_instances());
- os << "\n - imported_function_callables: "
- << Brief(imported_function_callables());
- if (has_indirect_function_table_instances()) {
- os << "\n - indirect_function_table_instances: "
- << Brief(indirect_function_table_instances());
+ os << "\n - imported_function_refs: " << Brief(imported_function_refs());
+ if (has_indirect_function_table_refs()) {
+ os << "\n - indirect_function_table_refs: "
+ << Brief(indirect_function_table_refs());
}
if (has_managed_native_allocations()) {
os << "\n - managed_native_allocations: "
@@ -1759,7 +1815,7 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmExportedFunctionData");
+ PrintHeader(os, "WasmExportedFunctionData");
os << "\n - wrapper_code: " << Brief(wrapper_code());
os << "\n - instance: " << Brief(instance());
os << "\n - function_index: " << function_index();
@@ -1767,7 +1823,7 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
}
void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WasmModuleObject");
+ PrintHeader(os, "WasmModuleObject");
os << "\n - module: " << module();
os << "\n - native module: " << native_module();
os << "\n - export wrappers: " << Brief(export_wrappers());
@@ -1782,7 +1838,7 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
}
void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "LoadHandler");
+ PrintHeader(os, "LoadHandler");
// TODO(ishell): implement printing based on handler kind
os << "\n - handler: " << Brief(smi_handler());
os << "\n - validity_cell: " << Brief(validity_cell());
@@ -1800,7 +1856,7 @@ void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
}
void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "StoreHandler");
+ PrintHeader(os, "StoreHandler");
// TODO(ishell): implement printing based on handler kind
os << "\n - handler: " << Brief(smi_handler());
os << "\n - validity_cell: " << Brief(validity_cell());
@@ -1818,7 +1874,7 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
}
void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AccessorPair");
+ PrintHeader(os, "AccessorPair");
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
os << "\n";
@@ -1826,7 +1882,7 @@ void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AccessCheckInfo");
+ PrintHeader(os, "AccessCheckInfo");
os << "\n - callback: " << Brief(callback());
os << "\n - named_interceptor: " << Brief(named_interceptor());
os << "\n - indexed_interceptor: " << Brief(indexed_interceptor());
@@ -1835,7 +1891,7 @@ void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
}
void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CallHandlerInfo");
+ PrintHeader(os, "CallHandlerInfo");
os << "\n - callback: " << Brief(callback());
os << "\n - js_callback: " << Brief(js_callback());
os << "\n - data: " << Brief(data());
@@ -1845,7 +1901,7 @@ void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
}
void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "InterceptorInfo");
+ PrintHeader(os, "InterceptorInfo");
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
os << "\n - query: " << Brief(query());
@@ -1858,31 +1914,40 @@ void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
void FunctionTemplateInfo::FunctionTemplateInfoPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "FunctionTemplateInfo");
+ PrintHeader(os, "FunctionTemplateInfo");
os << "\n - class name: " << Brief(class_name());
os << "\n - tag: " << Brief(tag());
os << "\n - serial_number: " << Brief(serial_number());
os << "\n - property_list: " << Brief(property_list());
os << "\n - call_code: " << Brief(call_code());
os << "\n - property_accessors: " << Brief(property_accessors());
- os << "\n - prototype_template: " << Brief(prototype_template());
- os << "\n - parent_template: " << Brief(parent_template());
- os << "\n - named_property_handler: " << Brief(named_property_handler());
- os << "\n - indexed_property_handler: " << Brief(indexed_property_handler());
- os << "\n - instance_template: " << Brief(instance_template());
os << "\n - signature: " << Brief(signature());
- os << "\n - access_check_info: " << Brief(access_check_info());
os << "\n - cached_property_name: " << Brief(cached_property_name());
os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
os << "\n - undetectable: " << (undetectable() ? "true" : "false");
os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
os << "\n - instantiated: " << (instantiated() ? "true" : "false");
+ os << "\n - rare_data: " << Brief(rare_data());
os << "\n";
}
+void FunctionTemplateRareData::FunctionTemplateRareDataPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "FunctionTemplateRareData");
+ os << "\n - prototype_template: " << Brief(prototype_template());
+ os << "\n - prototype_provider_template: "
+ << Brief(prototype_provider_template());
+ os << "\n - parent_template: " << Brief(parent_template());
+ os << "\n - named_property_handler: " << Brief(named_property_handler());
+ os << "\n - indexed_property_handler: " << Brief(indexed_property_handler());
+ os << "\n - instance_template: " << Brief(instance_template());
+ os << "\n - instance_call_handler: " << Brief(instance_call_handler());
+ os << "\n - access_check_info: " << Brief(access_check_info());
+ os << "\n";
+}
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ObjectTemplateInfo");
+ PrintHeader(os, "ObjectTemplateInfo");
os << "\n - tag: " << Brief(tag());
os << "\n - serial_number: " << Brief(serial_number());
os << "\n - property_list: " << Brief(property_list());
@@ -1895,7 +1960,7 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AllocationSite");
+ PrintHeader(os, "AllocationSite");
if (this->HasWeakNext()) os << "\n - weak_next: " << Brief(weak_next());
os << "\n - dependent code: " << Brief(dependent_code());
os << "\n - nested site: " << Brief(nested_site());
@@ -1919,7 +1984,7 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "AllocationMemento");
+ PrintHeader(os, "AllocationMemento");
os << "\n - allocation site: ";
if (IsValid()) {
GetAllocationSite()->AllocationSitePrint(os);
@@ -1930,7 +1995,7 @@ void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
void Script::ScriptPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Script");
+ PrintHeader(os, "Script");
os << "\n - source: " << Brief(source());
os << "\n - name: " << Brief(name());
os << "\n - line_offset: " << line_offset();
@@ -1953,7 +2018,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
#ifdef V8_INTL_SUPPORT
void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSV8BreakIterator");
+ JSObjectPrintHeader(os, *this, "JSV8BreakIterator");
os << "\n - locale: " << Brief(locale());
os << "\n - type: " << TypeAsString();
os << "\n - break iterator: " << Brief(break_iterator());
@@ -1967,68 +2032,58 @@ void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
}
void JSCollator::JSCollatorPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSCollator");
+ JSObjectPrintHeader(os, *this, "JSCollator");
os << "\n - icu collator: " << Brief(icu_collator());
os << "\n - bound compare: " << Brief(bound_compare());
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSDateTimeFormat::JSDateTimeFormatPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSDateTimeFormat");
+ JSObjectPrintHeader(os, *this, "JSDateTimeFormat");
os << "\n - icu locale: " << Brief(icu_locale());
os << "\n - icu simple date format: " << Brief(icu_simple_date_format());
os << "\n - bound format: " << Brief(bound_format());
- os << "\n";
+ os << "\n - hour cycle: " << HourCycleAsString();
+ JSObjectPrintBody(os, *this);
}
void JSListFormat::JSListFormatPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSListFormat");
+ JSObjectPrintHeader(os, *this, "JSListFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - style: " << StyleAsString();
os << "\n - type: " << TypeAsString();
os << "\n - icu formatter: " << Brief(icu_formatter());
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSLocale");
- os << "\n - language: " << Brief(language());
- os << "\n - script: " << Brief(script());
- os << "\n - region: " << Brief(region());
- os << "\n - baseName: " << Brief(base_name());
- os << "\n - locale: " << Brief(locale());
- os << "\n - calendar: " << Brief(calendar());
- os << "\n - caseFirst: " << CaseFirstAsString();
- os << "\n - collation: " << Brief(collation());
- os << "\n - hourCycle: " << HourCycleAsString();
- os << "\n - numeric: " << NumericAsString();
- os << "\n - numberingSystem: " << Brief(numbering_system());
- os << "\n";
+ JSObjectPrintHeader(os, *this, "JSLocale");
+ os << "\n - icu locale: " << Brief(icu_locale());
+ JSObjectPrintBody(os, *this);
}
void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSNumberFormat");
+ JSObjectPrintHeader(os, *this, "JSNumberFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - icu_number_format: " << Brief(icu_number_format());
os << "\n - bound_format: " << Brief(bound_format());
os << "\n - style: " << StyleAsString();
os << "\n - currency_display: " << CurrencyDisplayAsString();
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSPluralRules");
- JSObjectPrint(os);
+ JSObjectPrintHeader(os, *this, "JSPluralRules");
os << "\n - locale: " << Brief(locale());
- os << "\n - type: " << Brief(type());
+ os << "\n - type: " << TypeAsString();
os << "\n - icu plural rules: " << Brief(icu_plural_rules());
os << "\n - icu decimal format: " << Brief(icu_decimal_format());
- os << "\n";
+ JSObjectPrintBody(os, *this);
}
void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSRelativeTimeFormat");
+ JSObjectPrintHeader(os, *this, "JSRelativeTimeFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - style: " << StyleAsString();
os << "\n - numeric: " << NumericAsString();
@@ -2036,18 +2091,25 @@ void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
os << "\n";
}
+void JSSegmentIterator::JSSegmentIteratorPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, *this, "JSSegmentIterator");
+ os << "\n - icu break iterator: " << Brief(icu_break_iterator());
+ os << "\n - unicode string: " << Brief(unicode_string());
+ os << "\n - granularity: " << GranularityAsString();
+ os << "\n";
+}
+
void JSSegmenter::JSSegmenterPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSSegmenter");
+ JSObjectPrintHeader(os, *this, "JSSegmenter");
os << "\n - locale: " << Brief(locale());
os << "\n - granularity: " << GranularityAsString();
- os << "\n - lineBreakStyle: " << LineBreakStyleAsString();
- os << "\n - icubreak iterator: " << Brief(icu_break_iterator());
- os << "\n";
+ os << "\n - icu break iterator: " << Brief(icu_break_iterator());
+ JSObjectPrintBody(os, *this);
}
#endif // V8_INTL_SUPPORT
namespace {
-void PrintScopeInfoList(ScopeInfo* scope_info, std::ostream& os,
+void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
const char* list_name, int nof_internal_slots,
int start, int length) {
if (length <= 0) return;
@@ -2067,7 +2129,7 @@ void PrintScopeInfoList(ScopeInfo* scope_info, std::ostream& os,
} // namespace
void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ScopeInfo");
+ PrintHeader(os, "ScopeInfo");
if (length() == 0) {
os << "\n - length = 0\n";
return;
@@ -2109,7 +2171,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - length: " << length();
if (length() > 0) {
- PrintScopeInfoList(this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
+ PrintScopeInfoList(*this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
ContextLocalNamesIndex(), ContextLocalCount());
// TODO(neis): Print module stuff if present.
}
@@ -2117,12 +2179,13 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
}
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "DebugInfo");
+ PrintHeader(os, "DebugInfo");
os << "\n - flags: " << flags();
os << "\n - debugger_hints: " << debugger_hints();
os << "\n - shared: " << Brief(shared());
os << "\n - script: " << Brief(script());
os << "\n - original bytecode array: " << Brief(original_bytecode_array());
+ os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
os << "\n - break_points: ";
break_points()->FixedArrayPrint(os);
os << "\n - coverage_info: " << Brief(coverage_info());
@@ -2130,7 +2193,7 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "StackFrame");
+ PrintHeader(os, "StackFrame");
os << "\n - line_number: " << line_number();
os << "\n - column_number: " << column_number();
os << "\n - script_id: " << script_id();
@@ -2159,9 +2222,10 @@ void LayoutDescriptor::Print() {
void LayoutDescriptor::ShortPrint(std::ostream& os) {
if (IsSmi()) {
- os << this; // Print tagged value for easy use with "jld" gdb macro.
+ // Print tagged value for easy use with "jld" gdb macro.
+ os << reinterpret_cast<void*>(ptr());
} else {
- os << Brief(this);
+ os << Brief(*this);
}
}
@@ -2171,7 +2235,7 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
os << "<all tagged>";
} else if (IsSmi()) {
os << "fast";
- PrintBitMask(os, static_cast<uint32_t>(Smi::ToInt(this)));
+ PrintBitMask(os, static_cast<uint32_t>(Smi::ToInt(*this)));
} else if (IsOddball() && IsUninitialized()) {
os << "<uninitialized>";
} else {
@@ -2185,42 +2249,41 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
os << "\n";
}
-void PreParsedScopeData::PreParsedScopeDataPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PreParsedScopeData");
- os << "\n - scope_data: " << Brief(scope_data());
- os << "\n - length: " << length();
- for (int i = 0; i < length(); ++i) {
- os << "\n - [" << i << "]: " << Brief(child_data(i));
+void PreparseData::PreparseDataPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "PreparseData");
+ os << "\n - data_length: " << data_length();
+ os << "\n - children_length: " << children_length();
+ if (data_length() > 0) {
+ os << "\n - data-start: " << (address() + kDataStartOffset);
+ }
+ if (children_length() > 0) {
+ os << "\n - children-start: " << inner_start_offset();
+ }
+ for (int i = 0; i < children_length(); ++i) {
+ os << "\n - [" << i << "]: " << Brief(get_child(i));
}
os << "\n";
}
-void UncompiledDataWithoutPreParsedScope::
- UncompiledDataWithoutPreParsedScopePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "UncompiledDataWithoutPreParsedScope");
+void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "UncompiledDataWithoutPreparseData");
os << "\n - start position: " << start_position();
os << "\n - end position: " << end_position();
os << "\n";
}
-void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopePrint(
+void UncompiledDataWithPreparseData::UncompiledDataWithPreparseDataPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "UncompiledDataWithPreParsedScope");
+ PrintHeader(os, "UncompiledDataWithPreparseData");
os << "\n - start position: " << start_position();
os << "\n - end position: " << end_position();
- os << "\n - pre_parsed_scope_data: " << Brief(pre_parsed_scope_data());
- os << "\n";
-}
-
-void MicrotaskQueue::MicrotaskQueuePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "MicrotaskQueue");
- os << "\n - pending_microtask_count: " << pending_microtask_count();
- os << "\n - queue: " << Brief(queue());
+ os << "\n - preparse_data: " << Brief(preparse_data());
os << "\n";
}
void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "InterpreterData");
+ PrintHeader(os, "InterpreterData");
os << "\n - bytecode_array: " << Brief(bytecode_array());
os << "\n - interpreter_trampoline: " << Brief(interpreter_trampoline());
os << "\n";
@@ -2233,8 +2296,8 @@ void MaybeObject::Print() {
}
void MaybeObject::Print(std::ostream& os) {
- Smi* smi;
- HeapObject* heap_object;
+ Smi smi;
+ HeapObject heap_object;
if (ToSmi(&smi)) {
smi->SmiPrint(os);
} else if (IsCleared()) {
@@ -2260,10 +2323,10 @@ void MutableHeapNumber::MutableHeapNumberPrint(std::ostream& os) {
// TODO(cbruni): remove once the new maptracer is in place.
void Name::NameShortPrint() {
if (this->IsString()) {
- PrintF("%s", String::cast(this)->ToCString().get());
+ PrintF("%s", String::cast(*this)->ToCString().get());
} else {
DCHECK(this->IsSymbol());
- Symbol* s = Symbol::cast(this);
+ Symbol s = Symbol::cast(*this);
if (s->name()->IsUndefined()) {
PrintF("#<%s>", s->PrivateSymbolToName());
} else {
@@ -2275,10 +2338,10 @@ void Name::NameShortPrint() {
// TODO(cbruni): remove once the new maptracer is in place.
int Name::NameShortPrint(Vector<char> str) {
if (this->IsString()) {
- return SNPrintF(str, "%s", String::cast(this)->ToCString().get());
+ return SNPrintF(str, "%s", String::cast(*this)->ToCString().get());
} else {
DCHECK(this->IsSymbol());
- Symbol* s = Symbol::cast(this);
+ Symbol s = Symbol::cast(*this);
if (s->name()->IsUndefined()) {
return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
} else {
@@ -2289,18 +2352,97 @@ int Name::NameShortPrint(Vector<char> str) {
void Map::PrintMapDetails(std::ostream& os) {
DisallowHeapAllocation no_gc;
-#ifdef OBJECT_PRINT
this->MapPrint(os);
+ instance_descriptors()->PrintDescriptors(os);
+}
+
+void Map::MapPrint(std::ostream& os) { // NOLINT
+#ifdef OBJECT_PRINT
+ PrintHeader(os, "Map");
#else
- os << "Map=" << reinterpret_cast<void*>(this);
+ os << "Map=" << reinterpret_cast<void*>(ptr());
#endif
+ os << "\n - type: " << instance_type();
+ os << "\n - instance size: ";
+ if (instance_size() == kVariableSizeSentinel) {
+ os << "variable";
+ } else {
+ os << instance_size();
+ }
+ if (IsJSObjectMap()) {
+ os << "\n - inobject properties: " << GetInObjectProperties();
+ }
+ os << "\n - elements kind: " << ElementsKindToString(elements_kind());
+ os << "\n - unused property fields: " << UnusedPropertyFields();
+ os << "\n - enum length: ";
+ if (EnumLength() == kInvalidEnumCacheSentinel) {
+ os << "invalid";
+ } else {
+ os << EnumLength();
+ }
+ if (is_deprecated()) os << "\n - deprecated_map";
+ if (is_stable()) os << "\n - stable_map";
+ if (is_migration_target()) os << "\n - migration_target";
+ if (is_dictionary_map()) os << "\n - dictionary_map";
+ if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
+ if (has_named_interceptor()) os << "\n - named_interceptor";
+ if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
+ if (may_have_interesting_symbols()) os << "\n - may_have_interesting_symbols";
+ if (is_undetectable()) os << "\n - undetectable";
+ if (is_callable()) os << "\n - callable";
+ if (is_constructor()) os << "\n - constructor";
+ if (has_prototype_slot()) {
+ os << "\n - has_prototype_slot";
+ if (has_non_instance_prototype()) os << " (non-instance prototype)";
+ }
+ if (is_access_check_needed()) os << "\n - access_check_needed";
+ if (!is_extensible()) os << "\n - non-extensible";
+ if (is_prototype_map()) {
+ os << "\n - prototype_map";
+ os << "\n - prototype info: " << Brief(prototype_info());
+ } else {
+ os << "\n - back pointer: " << Brief(GetBackPointer());
+ }
+ os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
+ os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
+ << "#" << NumberOfOwnDescriptors() << ": "
+ << Brief(instance_descriptors());
+ if (FLAG_unbox_double_fields) {
+ os << "\n - layout descriptor: ";
+ layout_descriptor()->ShortPrint(os);
+ }
+
+ Isolate* isolate;
+ // Read-only maps can't have transitions, which is fortunate because we need
+ // the isolate to iterate over the transitions.
+ if (Isolate::FromWritableHeapObject(*this, &isolate)) {
+ DisallowHeapAllocation no_gc;
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
+ int nof_transitions = transitions.NumberOfTransitions();
+ if (nof_transitions > 0) {
+ os << "\n - transitions #" << nof_transitions << ": ";
+ HeapObject heap_object;
+ Smi smi;
+ if (raw_transitions()->ToSmi(&smi)) {
+ os << Brief(smi);
+ } else if (raw_transitions()->GetHeapObject(&heap_object)) {
+ os << Brief(heap_object);
+ }
+#ifdef OBJECT_PRINT
+ transitions.PrintTransitions(os);
+#endif // OBJECT_PRINT
+ }
+ }
+ os << "\n - prototype: " << Brief(prototype());
+ os << "\n - constructor: " << Brief(GetConstructor());
+ os << "\n - dependent code: " << Brief(dependent_code());
+ os << "\n - construction counter: " << construction_counter();
os << "\n";
- instance_descriptors()->PrintDescriptors(os);
}
void DescriptorArray::PrintDescriptors(std::ostream& os) {
for (int i = 0; i < number_of_descriptors(); i++) {
- Name* key = GetKey(i);
+ Name key = GetKey(i);
os << "\n [" << i << "]: ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
@@ -2320,15 +2462,15 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
os << " @ ";
switch (details.location()) {
case kField: {
- FieldType* field_type = GetFieldType(descriptor);
+ FieldType field_type = GetFieldType(descriptor);
field_type->PrintTo(os);
break;
}
case kDescriptor:
- Object* value = GetStrongValue(descriptor);
+ Object value = GetStrongValue(descriptor);
os << Brief(value);
if (value->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(value);
+ AccessorPair pair = AccessorPair::cast(value);
os << "(get: " << Brief(pair->getter())
<< ", set: " << Brief(pair->setter()) << ")";
}
@@ -2346,14 +2488,14 @@ char* String::ToAsciiArray() {
static char* buffer = nullptr;
if (buffer != nullptr) delete[] buffer;
buffer = new char[length() + 1];
- WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
+ WriteToFlat(*this, reinterpret_cast<uint8_t*>(buffer), 0, length());
buffer[length()] = 0;
return buffer;
}
// static
-void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
- Map* target) {
+void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
+ Map target) {
os << "\n ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
@@ -2377,7 +2519,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name* key,
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
int descriptor = target->LastAdded();
- DescriptorArray* descriptors = target->instance_descriptors();
+ DescriptorArray descriptors = target->instance_descriptors();
descriptors->PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
os << ")";
@@ -2389,8 +2531,8 @@ void TransitionArray::PrintInternal(std::ostream& os) {
int num_transitions = number_of_transitions();
os << "Transition array #" << num_transitions << ":";
for (int i = 0; i < num_transitions; i++) {
- Name* key = GetKey(i);
- Map* target = GetTarget(i);
+ Name key = GetKey(i);
+ Map target = GetTarget(i);
TransitionsAccessor::PrintOneTransition(os, key, target);
}
os << "\n" << std::flush;
@@ -2400,10 +2542,11 @@ void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
return;
case kWeakRef: {
- Map* target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
- Name* key = GetSimpleTransitionKey(target);
+ Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ Name key = GetSimpleTransitionKey(target);
PrintOneTransition(os, key, target);
break;
}
@@ -2426,8 +2569,8 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
int num_transitions = NumberOfTransitions();
if (num_transitions == 0) return;
for (int i = 0; i < num_transitions; i++) {
- Name* key = GetKey(i);
- Map* target = GetTarget(i);
+ Name key = GetKey(i);
+ Map target = GetTarget(i);
os << std::endl
<< " " << level << "/" << i << ":" << std::setw(level * 2 + 2) << " ";
std::stringstream ss;
@@ -2454,7 +2597,7 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
int descriptor = target->LastAdded();
- DescriptorArray* descriptors = target->instance_descriptors();
+ DescriptorArray descriptors = target->instance_descriptors();
descriptors->PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
@@ -2479,7 +2622,7 @@ void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
// The following functions are used by our gdb macros.
//
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Object(void* object) {
- reinterpret_cast<i::Object*>(object)->Print();
+ i::Object(reinterpret_cast<i::Address>(object))->Print();
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
@@ -2504,7 +2647,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
return;
}
- i::Code* code = isolate->FindCodeObject(address);
+ i::Code code = isolate->FindCodeObject(address);
if (!code->IsCode()) {
i::PrintF("No code object found containing %p\n", object);
return;
@@ -2519,11 +2662,11 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
V8_EXPORT_PRIVATE extern void _v8_internal_Print_LayoutDescriptor(
void* object) {
- i::Object* o = reinterpret_cast<i::Object*>(object);
+ i::Object o(reinterpret_cast<i::Address>(object));
if (!o->IsLayoutDescriptor()) {
printf("Please provide a layout descriptor\n");
} else {
- reinterpret_cast<i::LayoutDescriptor*>(object)->Print();
+ i::LayoutDescriptor::cast(o)->Print();
}
}
@@ -2533,13 +2676,13 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_StackTrace() {
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
- i::Object* o = reinterpret_cast<i::Object*>(object);
+ i::Object o(reinterpret_cast<i::Address>(object));
if (!o->IsMap()) {
printf("Please provide a valid Map\n");
} else {
#if defined(DEBUG) || defined(OBJECT_PRINT)
i::DisallowHeapAllocation no_gc;
- i::Map* map = reinterpret_cast<i::Map*>(object);
+ i::Map map = i::Map::unchecked_cast(o);
i::TransitionsAccessor transitions(i::Isolate::Current(), map, &no_gc);
transitions.PrintTransitionTree();
#endif
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6ccdbf4e34..119c6aed72 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -4,6 +4,7 @@
#include "src/objects.h"
+#include <algorithm>
#include <cmath>
#include <iomanip>
#include <memory>
@@ -18,13 +19,13 @@
#include "src/api-natives.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/assembler-inl.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
-#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/counters-inl.h"
#include "src/counters.h"
@@ -47,16 +48,18 @@
#include "src/keys.h"
#include "src/log.h"
#include "src/lookup-inl.h"
-#include "src/macro-assembler.h"
#include "src/map-updater.h"
-#include "src/messages.h"
+#include "src/message-template.h"
+#include "src/microtask-queue.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/foreign.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
@@ -79,16 +82,21 @@
#include "src/objects/js-regexp-string-iterator.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
+#include "src/code-comments.h"
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/slots-atomic-inl.h"
#include "src/objects/stack-frame-info-inl.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/objects/struct-inl.h"
+#include "src/ostreams.h"
+#include "src/parsing/preparse-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
@@ -99,8 +107,8 @@
#include "src/string-builder-inl.h"
#include "src/string-search.h"
#include "src/string-stream.h"
-#include "src/unicode-cache-inl.h"
#include "src/unicode-decoder.h"
+#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
@@ -150,7 +158,7 @@ Handle<FieldType> Object::OptimalType(Isolate* isolate,
if (FLAG_track_field_types) {
if (representation.IsHeapObject() && IsHeapObject()) {
// We can track only JavaScript objects with stable maps.
- Handle<Map> map(HeapObject::cast(this)->map(), isolate);
+ Handle<Map> map(HeapObject::cast(*this)->map(), isolate);
if (map->is_stable() && map->IsJSReceiverMap()) {
return FieldType::Class(map, isolate);
}
@@ -504,9 +512,9 @@ MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
}
// static
-MaybeHandle<Object> Object::ConvertToIndex(
- Isolate* isolate, Handle<Object> input,
- MessageTemplate::Template error_index) {
+MaybeHandle<Object> Object::ConvertToIndex(Isolate* isolate,
+ Handle<Object> input,
+ MessageTemplate error_index) {
if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
@@ -519,18 +527,17 @@ MaybeHandle<Object> Object::ConvertToIndex(
}
bool Object::BooleanValue(Isolate* isolate) {
- if (IsSmi()) return Smi::ToInt(this) != 0;
+ if (IsSmi()) return Smi::ToInt(*this) != 0;
DCHECK(IsHeapObject());
if (IsBoolean()) return IsTrue(isolate);
if (IsNullOrUndefined(isolate)) return false;
if (IsUndetectable()) return false; // Undetectable object is false.
- if (IsString()) return String::cast(this)->length() != 0;
- if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(this)->value());
- if (IsBigInt()) return BigInt::cast(this)->ToBoolean();
+ if (IsString()) return String::cast(*this)->length() != 0;
+ if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(*this)->value());
+ if (IsBigInt()) return BigInt::cast(*this)->ToBoolean();
return true;
}
-
namespace {
// TODO(bmeurer): Maybe we should introduce a marker interface Number,
@@ -554,7 +561,7 @@ bool NumberEquals(double x, double y) {
return x == y;
}
-bool NumberEquals(const Object* x, const Object* y) {
+bool NumberEquals(const Object x, const Object y) {
return NumberEquals(x->Number(), y->Number());
}
@@ -714,22 +721,20 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
}
}
-
-bool Object::StrictEquals(Object* that) {
+bool Object::StrictEquals(Object that) {
if (this->IsNumber()) {
if (!that->IsNumber()) return false;
- return NumberEquals(this, that);
+ return NumberEquals(*this, that);
} else if (this->IsString()) {
if (!that->IsString()) return false;
- return String::cast(this)->Equals(String::cast(that));
+ return String::cast(*this)->Equals(String::cast(that));
} else if (this->IsBigInt()) {
if (!that->IsBigInt()) return false;
- return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(that));
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(that));
}
- return this == that;
+ return *this == that;
}
-
// static
Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsNumber()) return isolate->factory()->number_string();
@@ -827,8 +832,8 @@ MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
Handle<Object> inst_of_handler;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, inst_of_handler,
- JSReceiver::GetMethod(Handle<JSReceiver>::cast(callable),
- isolate->factory()->has_instance_symbol()),
+ Object::GetMethod(Handle<JSReceiver>::cast(callable),
+ isolate->factory()->has_instance_symbol()),
Object);
if (!inst_of_handler->IsUndefined(isolate)) {
// Call the {inst_of_handler} on the {callable}.
@@ -850,8 +855,8 @@ MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
// Fall back to OrdinaryHasInstance with {callable} and {object}.
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- JSReceiver::OrdinaryHasInstance(isolate, callable, object), Object);
+ isolate, result, Object::OrdinaryHasInstance(isolate, callable, object),
+ Object);
return result;
}
@@ -891,7 +896,7 @@ MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
} else if (object->IsJSTypedArray()) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(object);
size_t length = array->length_value();
- if (array->WasNeutered() ||
+ if (array->WasDetached() ||
length > static_cast<size_t>(FixedArray::kMaxLength)) {
return MaybeHandle<FixedArray>();
}
@@ -1048,9 +1053,16 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
UNREACHABLE();
case LookupIterator::JSPROXY: {
bool was_found;
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by
+ // the global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(),
+ it->isolate());
+ }
MaybeHandle<Object> result =
JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
- it->GetName(), it->GetReceiver(), &was_found);
+ it->GetName(), receiver, &was_found);
if (!was_found) it->NotFound();
return result;
}
@@ -1213,7 +1225,7 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
case LookupIterator::ACCESS_CHECK:
// Support calling this method without an active context, but refuse
// access to access-checked objects in that case.
- if (it->isolate()->context() != nullptr && it->HasAccess()) continue;
+ if (!it->isolate()->context().is_null() && it->HasAccess()) continue;
V8_FALLTHROUGH;
case LookupIterator::JSPROXY:
it->NotFound();
@@ -1235,11 +1247,11 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
bool Object::ToInt32(int32_t* value) {
if (IsSmi()) {
- *value = Smi::ToInt(this);
+ *value = Smi::ToInt(*this);
return true;
}
if (IsHeapNumber()) {
- double num = HeapNumber::cast(this)->value();
+ double num = HeapNumber::cast(*this)->value();
// Check range before conversion to avoid undefined behavior.
if (num >= kMinInt && num <= kMaxInt && FastI2D(FastD2I(num)) == num) {
*value = FastD2I(num);
@@ -1249,10 +1261,15 @@ bool Object::ToInt32(int32_t* value) {
return false;
}
+// static constexpr object declarations need a definition to make the
+// compiler happy.
+constexpr Object Smi::kZero;
+constexpr Object SharedFunctionInfo::kNoSharedNameSentinel;
+
Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name) {
- Object* current_info = info->shared_function_info();
+ Object current_info = info->shared_function_info();
if (current_info->IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(current_info), isolate);
}
@@ -1283,14 +1300,14 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
return result;
}
-bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
+bool FunctionTemplateInfo::IsTemplateFor(Map map) {
// There is a constraint on the object; check.
if (!map->IsJSObjectMap()) return false;
// Fetch the constructor function of the object.
- Object* cons_obj = map->GetConstructor();
- Object* type;
+ Object cons_obj = map->GetConstructor();
+ Object type;
if (cons_obj->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(cons_obj);
+ JSFunction fun = JSFunction::cast(cons_obj);
type = fun->shared()->function_data();
} else if (cons_obj->IsFunctionTemplateInfo()) {
type = FunctionTemplateInfo::cast(cons_obj);
@@ -1300,13 +1317,24 @@ bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
// Iterate through the chain of inheriting function templates to
// see if the required one occurs.
while (type->IsFunctionTemplateInfo()) {
- if (type == this) return true;
- type = FunctionTemplateInfo::cast(type)->parent_template();
+ if (type == *this) return true;
+ type = FunctionTemplateInfo::cast(type)->GetParentTemplate();
}
// Didn't find the required type in the inheritance chain.
return false;
}
+// static
+FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK(function_template_info->rare_data()->IsUndefined(isolate));
+ Handle<Struct> struct_obj =
+ isolate->factory()->NewStruct(FUNCTION_TEMPLATE_RARE_DATA_TYPE, TENURED);
+ Handle<FunctionTemplateRareData> rare_data =
+ i::Handle<FunctionTemplateRareData>::cast(struct_obj);
+ function_template_info->set_rare_data(*rare_data);
+ return *rare_data;
+}
// static
Handle<TemplateList> TemplateList::New(Isolate* isolate, int size) {
@@ -1383,7 +1411,7 @@ MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
DCHECK(object->HasSmiOrObjectElements() ||
object->HasFastStringWrapperElements());
- FixedArray* raw_elems = FixedArray::cast(object->elements());
+ FixedArray raw_elems = FixedArray::cast(object->elements());
Heap* heap = object->GetHeap();
if (raw_elems->map() != ReadOnlyRoots(heap).fixed_cow_array_map()) return;
Isolate* isolate = heap->isolate();
@@ -1403,8 +1431,12 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kSize;
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ return JSAsyncFunctionObject::kSize;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
return JSAsyncGeneratorObject::kSize;
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ return JSAsyncFromSyncIterator::kSize;
case JS_GLOBAL_PROXY_TYPE:
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
@@ -1420,13 +1452,13 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_ARRAY_TYPE:
return JSArray::kSize;
case JS_ARRAY_BUFFER_TYPE:
- return JSArrayBuffer::kSize;
+ return JSArrayBuffer::kHeaderSize;
case JS_ARRAY_ITERATOR_TYPE:
return JSArrayIterator::kSize;
case JS_TYPED_ARRAY_TYPE:
- return JSTypedArray::kSize;
+ return JSTypedArray::kHeaderSize;
case JS_DATA_VIEW_TYPE:
- return JSDataView::kSize;
+ return JSDataView::kHeaderSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
@@ -1438,6 +1470,14 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
return JSMapIterator::kSize;
+ case JS_WEAK_CELL_TYPE:
+ return JSWeakCell::kSize;
+ case JS_WEAK_REF_TYPE:
+ return JSWeakRef::kSize;
+ case JS_WEAK_FACTORY_TYPE:
+ return JSWeakFactory::kSize;
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ return JSWeakFactoryCleanupIterator::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
@@ -1477,6 +1517,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSPluralRules::kSize;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
return JSRelativeTimeFormat::kSize;
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
+ return JSSegmentIterator::kSize;
case JS_INTL_SEGMENTER_TYPE:
return JSSegmenter::kSize;
#endif // V8_INTL_SUPPORT
@@ -1490,6 +1532,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return WasmModuleObject::kSize;
case WASM_TABLE_TYPE:
return WasmTableObject::kSize;
+ case WASM_EXCEPTION_TYPE:
+ return WasmExceptionObject::kSize;
default:
UNREACHABLE();
}
@@ -1517,8 +1561,8 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
// 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
Handle<Object> trap;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, trap, GetMethod(handler, trap_name),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, trap,
+ Object::GetMethod(handler, trap_name), Object);
// 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
if (trap->IsUndefined(isolate)) {
return JSReceiver::GetPrototype(isolate, target);
@@ -1979,7 +2023,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
if (interceptor.is_null()) {
while (AllCanRead(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
- return GetPropertyWithAccessor(it);
+ return Object::GetPropertyWithAccessor(it);
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
bool done;
@@ -2061,7 +2105,7 @@ Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
it->GetInterceptorForFailedAccessCheck();
if (interceptor.is_null()) {
if (AllCanWrite(it)) {
- return SetPropertyWithAccessor(it, value, should_throw);
+ return Object::SetPropertyWithAccessor(it, value, should_throw);
}
} else {
Maybe<bool> result = SetPropertyWithInterceptorInternal(
@@ -2314,50 +2358,43 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
return Just(true);
}
-Map* Object::GetPrototypeChainRootMap(Isolate* isolate) const {
+Map Object::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
- Context* native_context = isolate->context()->native_context();
+ Context native_context = isolate->context()->native_context();
return native_context->number_function()->initial_map();
}
- const HeapObject* heap_object = HeapObject::cast(this);
+ const HeapObject heap_object = HeapObject::cast(*this);
return heap_object->map()->GetPrototypeChainRootMap(isolate);
}
-Map* Map::GetPrototypeChainRootMap(Isolate* isolate) const {
+Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsJSReceiverMap()) {
- return const_cast<Map*>(this);
+ return *this;
}
int constructor_function_index = GetConstructorFunctionIndex();
if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- Context* native_context = isolate->context()->native_context();
- JSFunction* constructor_function =
+ Context native_context = isolate->context()->native_context();
+ JSFunction constructor_function =
JSFunction::cast(native_context->get(constructor_function_index));
return constructor_function->initial_map();
}
return ReadOnlyRoots(isolate).null_value()->map();
}
-// static
-Smi* Object::GetOrCreateHash(Isolate* isolate, Object* key) {
- DisallowHeapAllocation no_gc;
- return key->GetOrCreateHash(isolate);
-}
-
-Smi* Object::GetOrCreateHash(Isolate* isolate) {
+Smi Object::GetOrCreateHash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
- Object* hash = Object::GetSimpleHash(this);
+ Object hash = Object::GetSimpleHash(*this);
if (hash->IsSmi()) return Smi::cast(hash);
DCHECK(IsJSReceiver());
- return JSReceiver::cast(this)->GetOrCreateIdentityHash(isolate);
+ return JSReceiver::cast(*this)->GetOrCreateIdentityHash(isolate);
}
-
-bool Object::SameValue(Object* other) {
- if (other == this) return true;
+bool Object::SameValue(Object other) {
+ if (other == *this) return true;
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
@@ -2370,17 +2407,16 @@ bool Object::SameValue(Object* other) {
return (std::signbit(this_value) == std::signbit(other_value));
}
if (IsString() && other->IsString()) {
- return String::cast(this)->Equals(String::cast(other));
+ return String::cast(*this)->Equals(String::cast(other));
}
if (IsBigInt() && other->IsBigInt()) {
- return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(other));
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
}
return false;
}
-
-bool Object::SameValueZero(Object* other) {
- if (other == this) return true;
+bool Object::SameValueZero(Object other) {
+ if (other == *this) return true;
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
@@ -2390,15 +2426,14 @@ bool Object::SameValueZero(Object* other) {
(std::isnan(this_value) && std::isnan(other_value));
}
if (IsString() && other->IsString()) {
- return String::cast(this)->Equals(String::cast(other));
+ return String::cast(*this)->Equals(String::cast(other));
}
if (IsBigInt() && other->IsBigInt()) {
- return BigInt::EqualToBigInt(BigInt::cast(this), BigInt::cast(other));
+ return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
}
return false;
}
-
MaybeHandle<Object> Object::ArraySpeciesConstructor(
Isolate* isolate, Handle<Object> original_array) {
Handle<Object> default_species = isolate->array_function();
@@ -2492,7 +2527,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
bool Object::IterationHasObservableEffects() {
// Check that this object is an array.
if (!IsJSArray()) return true;
- JSArray* array = JSArray::cast(this);
+ JSArray array = JSArray::cast(*this);
Isolate* isolate = array->GetIsolate();
#ifdef V8_ENABLE_FORCE_SLOW_PATH
@@ -2501,7 +2536,7 @@ bool Object::IterationHasObservableEffects() {
// Check that we have the original ArrayPrototype.
if (!array->map()->prototype()->IsJSObject()) return true;
- JSObject* array_proto = JSObject::cast(array->map()->prototype());
+ JSObject array_proto = JSObject::cast(array->map()->prototype());
if (!isolate->is_initial_array_prototype(array_proto)) return true;
// Check that the ArrayPrototype hasn't been modified in a way that would
@@ -2523,42 +2558,44 @@ bool Object::IterationHasObservableEffects() {
return true;
}
-void Object::ShortPrint(FILE* out) {
+void Object::ShortPrint(FILE* out) const {
OFStream os(out);
- os << Brief(this);
+ os << Brief(*this);
}
-
-void Object::ShortPrint(StringStream* accumulator) {
+void Object::ShortPrint(StringStream* accumulator) const {
std::ostringstream os;
- os << Brief(this);
+ os << Brief(*this);
accumulator->Add(os.str().c_str());
}
+void Object::ShortPrint(std::ostream& os) const { os << Brief(*this); }
-void Object::ShortPrint(std::ostream& os) { os << Brief(this); }
+std::ostream& operator<<(std::ostream& os, const Object& obj) {
+ obj.ShortPrint(os);
+ return os;
+}
void MaybeObject::ShortPrint(FILE* out) {
OFStream os(out);
- os << Brief(this);
+ os << Brief(*this);
}
void MaybeObject::ShortPrint(StringStream* accumulator) {
std::ostringstream os;
- os << Brief(this);
+ os << Brief(*this);
accumulator->Add(os.str().c_str());
}
-void MaybeObject::ShortPrint(std::ostream& os) { os << Brief(this); }
+void MaybeObject::ShortPrint(std::ostream& os) { os << Brief(*this); }
-Brief::Brief(const Object* v)
- : value(MaybeObject::FromObject(const_cast<Object*>(v))) {}
+Brief::Brief(const Object v) : value(v->ptr()) {}
+Brief::Brief(const MaybeObject v) : value(v.ptr()) {}
std::ostream& operator<<(std::ostream& os, const Brief& v) {
- // TODO(marja): const-correct HeapObjectShortPrint.
- MaybeObject* maybe_object = const_cast<MaybeObject*>(v.value);
- Smi* smi;
- HeapObject* heap_object;
+ MaybeObject maybe_object(v.value);
+ Smi smi;
+ HeapObject heap_object;
if (maybe_object->ToSmi(&smi)) {
smi->SmiPrint(os);
} else if (maybe_object->IsCleared()) {
@@ -2602,13 +2639,13 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
Handle<SeqOneByteString> flat = isolate->factory()->NewRawOneByteString(
length, tenure).ToHandleChecked();
DisallowHeapAllocation no_gc;
- WriteToFlat(*cons, flat->GetChars(), 0, length);
+ WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
result = flat;
} else {
Handle<SeqTwoByteString> flat = isolate->factory()->NewRawTwoByteString(
length, tenure).ToHandleChecked();
DisallowHeapAllocation no_gc;
- WriteToFlat(*cons, flat->GetChars(), 0, length);
+ WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
result = flat;
}
cons->set_first(isolate, *result);
@@ -2630,7 +2667,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Assert that the resource and the string are equivalent.
DCHECK(static_cast<size_t>(this->length()) == resource->length());
ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
@@ -2641,13 +2678,13 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!Isolate::FromWritableHeapObject(this, &isolate)) return false;
+ if (!Isolate::FromWritableHeapObject(*this, &isolate)) return false;
Heap* heap = isolate->heap();
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
- bool has_pointers = StringShape(this).IsIndirect();
+ bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(this, size, no_allocation);
+ heap->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -2655,7 +2692,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// resort to an uncached external string instead, omitting the field caching
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
- Map* new_map;
+ Map new_map;
ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
if (is_internalized) {
@@ -2693,9 +2730,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// the left-over space to avoid races with the sweeper thread.
this->synchronized_set_map(new_map);
- ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
+ ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
self->SetResource(isolate, resource);
- heap->RegisterExternalString(this);
+ heap->RegisterExternalString(*this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
}
@@ -2713,11 +2750,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
DCHECK(static_cast<size_t>(this->length()) == resource->length());
if (this->IsTwoByteRepresentation()) {
ScopedVector<uint16_t> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
}
ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(this, smart_chars.start(), 0, this->length());
+ String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
@@ -2728,13 +2765,13 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!Isolate::FromWritableHeapObject(this, &isolate)) return false;
+ if (!Isolate::FromWritableHeapObject(*this, &isolate)) return false;
Heap* heap = isolate->heap();
bool is_internalized = this->IsInternalizedString();
- bool has_pointers = StringShape(this).IsIndirect();
+ bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(this, size, no_allocation);
+ heap->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
@@ -2743,7 +2780,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// resort to an uncached external string instead, omitting the field caching
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
- Map* new_map;
+ Map new_map;
ReadOnlyRoots roots(heap);
if (size < ExternalString::kSize) {
new_map = is_internalized
@@ -2767,26 +2804,26 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// the left-over space to avoid races with the sweeper thread.
this->synchronized_set_map(new_map);
- ExternalOneByteString* self = ExternalOneByteString::cast(this);
+ ExternalOneByteString self = ExternalOneByteString::cast(*this);
self->SetResource(isolate, resource);
- heap->RegisterExternalString(this);
+ heap->RegisterExternalString(*this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
}
bool String::SupportsExternalization() {
if (this->IsThinString()) {
- return i::ThinString::cast(this)->actual()->SupportsExternalization();
+ return i::ThinString::cast(*this)->actual()->SupportsExternalization();
}
Isolate* isolate;
// RO_SPACE strings cannot be externalized.
- if (!Isolate::FromWritableHeapObject(this, &isolate)) {
+ if (!Isolate::FromWritableHeapObject(*this, &isolate)) {
return false;
}
// Already an external string.
- if (StringShape(this).IsExternal()) {
+ if (StringShape(*this).IsExternal()) {
return false;
}
@@ -2794,9 +2831,11 @@ bool String::SupportsExternalization() {
}
void String::StringShortPrint(StringStream* accumulator, bool show_details) {
+ const char* internalized_marker = this->IsInternalizedString() ? "#" : "";
+
int len = length();
if (len > kMaxShortPrintLength) {
- accumulator->Add("<Very long string[%u]>", len);
+ accumulator->Add("<Very long string[%s%u]>", internalized_marker, len);
return;
}
@@ -2805,7 +2844,7 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
return;
}
- StringCharacterStream stream(this);
+ StringCharacterStream stream(*this);
bool truncated = false;
if (len > kMaxShortPrintLength) {
@@ -2820,9 +2859,10 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
one_byte = false;
}
}
- stream.Reset(this);
+ stream.Reset(*this);
if (one_byte) {
- if (show_details) accumulator->Add("<String[%u]: ", length());
+ if (show_details)
+ accumulator->Add("<String[%s%u]: ", internalized_marker, length());
for (int i = 0; i < len; i++) {
accumulator->Put(static_cast<char>(stream.GetNext()));
}
@@ -2830,7 +2870,8 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
} else {
// Backslash indicates that the string contains control
// characters and that backslashes are therefore escaped.
- if (show_details) accumulator->Add("<String[%u]\\: ", length());
+ if (show_details)
+ accumulator->Add("<String[%s%u]\\: ", internalized_marker, length());
for (int i = 0; i < len; i++) {
uint16_t c = stream.GetNext();
if (c == '\n') {
@@ -2858,7 +2899,7 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
if (end < 0) end = length();
- StringCharacterStream stream(this, start);
+ StringCharacterStream stream(*this, start);
for (int i = start; i < end && stream.HasMore(); i++) {
os << AsUC16(stream.GetNext());
}
@@ -2868,18 +2909,18 @@ void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->IsUndefined()
+ double length = JSArray::cast(*this)->length()->IsUndefined()
? 0
- : JSArray::cast(this)->length()->Number();
+ : JSArray::cast(*this)->length()->Number();
accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
break;
}
case JS_BOUND_FUNCTION_TYPE: {
- JSBoundFunction* bound_function = JSBoundFunction::cast(this);
+ JSBoundFunction bound_function = JSBoundFunction::cast(*this);
accumulator->Add("<JSBoundFunction");
- accumulator->Add(
- " (BoundTargetFunction %p)>",
- reinterpret_cast<void*>(bound_function->bound_target_function()));
+ accumulator->Add(" (BoundTargetFunction %p)>",
+ reinterpret_cast<void*>(
+ bound_function->bound_target_function().ptr()));
break;
}
case JS_WEAK_MAP_TYPE: {
@@ -2892,7 +2933,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
case JS_REGEXP_TYPE: {
accumulator->Add("<JSRegExp");
- JSRegExp* regexp = JSRegExp::cast(this);
+ JSRegExp regexp = JSRegExp::cast(*this);
if (regexp->source()->IsString()) {
accumulator->Add(" ");
String::cast(regexp->source())->StringShortPrint(accumulator);
@@ -2902,11 +2943,11 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
case JS_FUNCTION_TYPE: {
- JSFunction* function = JSFunction::cast(this);
- Object* fun_name = function->shared()->DebugName();
+ JSFunction function = JSFunction::cast(*this);
+ Object fun_name = function->shared()->DebugName();
bool printed = false;
if (fun_name->IsString()) {
- String* str = String::cast(fun_name);
+ String str = String::cast(fun_name);
if (str->length() > 0) {
accumulator->Add("<JSFunction ");
accumulator->Put(str);
@@ -2917,10 +2958,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSFunction");
}
if (FLAG_trace_file_names) {
- Object* source_name =
- Script::cast(function->shared()->script())->name();
+ Object source_name = Script::cast(function->shared()->script())->name();
if (source_name->IsString()) {
- String* str = String::cast(source_name);
+ String str = String::cast(source_name);
if (str->length() > 0) {
accumulator->Add(" <");
accumulator->Put(str);
@@ -2929,7 +2969,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
}
accumulator->Add(" (sfi = %p)",
- reinterpret_cast<void*>(function->shared()));
+ reinterpret_cast<void*>(function->shared().ptr()));
accumulator->Put('>');
break;
}
@@ -2937,6 +2977,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSGenerator>");
break;
}
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE: {
+ accumulator->Add("<JSAsyncFunctionObject>");
+ break;
+ }
case JS_ASYNC_GENERATOR_OBJECT_TYPE: {
accumulator->Add("<JS AsyncGenerator>");
break;
@@ -2945,9 +2989,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
default: {
- Map* map_of_this = map();
+ Map map_of_this = map();
Heap* heap = GetHeap();
- Object* constructor = map_of_this->GetConstructor();
+ Object constructor = map_of_this->GetConstructor();
bool printed = false;
if (constructor->IsHeapObject() &&
!heap->Contains(HeapObject::cast(constructor))) {
@@ -2958,7 +3002,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
- String* constructor_name =
+ String constructor_name =
JSFunction::cast(constructor)->shared()->Name();
if (constructor_name->length() > 0) {
accumulator->Add(global_object ? "<GlobalObject " : "<");
@@ -2980,7 +3024,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
if (IsJSValue()) {
accumulator->Add(" value = ");
- JSValue::cast(this)->value()->ShortPrint(accumulator);
+ JSValue::cast(*this)->value()->ShortPrint(accumulator);
}
accumulator->Put('>');
break;
@@ -3028,11 +3072,11 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name* name = instance_descriptors()->GetKey(modify_index);
+ Name name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- os << "{symbol " << static_cast<void*>(name) << "}";
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
os << attributes << " [";
@@ -3040,12 +3084,11 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
os << "]\n";
}
-VisitorId Map::GetVisitorId(Map* map) {
+VisitorId Map::GetVisitorId(Map map) {
STATIC_ASSERT(kVisitorIdCount <= 256);
const int instance_type = map->instance_type();
- const bool has_unboxed_fields =
- FLAG_unbox_double_fields && !map->HasFastPointerLayout();
+
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
@@ -3084,11 +3127,15 @@ VisitorId Map::GetVisitorId(Map* map) {
case FREE_SPACE_TYPE:
return kVisitFreeSpace;
+ case EMBEDDER_DATA_ARRAY_TYPE:
+ return kVisitEmbedderDataArray;
+
case FIXED_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -3096,6 +3143,8 @@ VisitorId Map::GetVisitorId(Map* map) {
case STRING_TABLE_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ return kVisitFixedArray;
+
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -3103,17 +3152,18 @@ VisitorId Map::GetVisitorId(Map* map) {
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- return kVisitFixedArray;
+ return kVisitContext;
+
+ case NATIVE_CONTEXT_TYPE:
+ return kVisitNativeContext;
case EPHEMERON_HASH_TABLE_TYPE:
return kVisitEphemeronHashTable;
case WEAK_FIXED_ARRAY_TYPE:
case WEAK_ARRAY_LIST_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
return kVisitWeakArray;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3143,6 +3193,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
+ case DESCRIPTOR_ARRAY_TYPE:
+ return kVisitDescriptorArray;
+
case TRANSITION_ARRAY_TYPE:
return kVisitTransitionArray;
@@ -3168,6 +3221,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_DATA_VIEW_TYPE:
return kVisitJSDataView;
+ case JS_FUNCTION_TYPE:
+ return kVisitJSFunction;
+
case JS_TYPED_ARRAY_TYPE:
return kVisitJSTypedArray;
@@ -3177,20 +3233,23 @@ VisitorId Map::GetVisitorId(Map* map) {
case SMALL_ORDERED_HASH_SET_TYPE:
return kVisitSmallOrderedHashSet;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return kVisitSmallOrderedNameDictionary;
+
case CODE_DATA_CONTAINER_TYPE:
return kVisitCodeDataContainer;
case WASM_INSTANCE_TYPE:
return kVisitWasmInstanceObject;
- case PRE_PARSED_SCOPE_DATA_TYPE:
- return kVisitPreParsedScopeData;
+ case PREPARSE_DATA_TYPE:
+ return kVisitPreparseData;
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
- return kVisitUncompiledDataWithoutPreParsedScope;
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
+ return kVisitUncompiledDataWithoutPreparseData;
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
- return kVisitUncompiledDataWithPreParsedScope;
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ return kVisitUncompiledDataWithPreparseData;
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
@@ -3198,13 +3257,13 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_TYPE:
- case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
@@ -3219,6 +3278,8 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_PROMISE_TYPE:
case JS_REGEXP_TYPE:
case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE:
+ case JS_WEAK_FACTORY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
case JS_INTL_COLLATOR_TYPE:
@@ -3228,6 +3289,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
case JS_INTL_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_EXCEPTION_TYPE:
@@ -3235,12 +3297,22 @@ VisitorId Map::GetVisitorId(Map* map) {
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
- return has_unboxed_fields ? kVisitJSObject : kVisitJSObjectFast;
+ case JS_BOUND_FUNCTION_TYPE: {
+ const bool has_raw_data_fields =
+ (FLAG_unbox_double_fields && !map->HasFastPointerLayout()) ||
+ (COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
+ return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
+ }
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
return kVisitJSApiObject;
+ case JS_WEAK_REF_TYPE:
+ return kVisitJSWeakRef;
+
+ case JS_WEAK_CELL_TYPE:
+ return kVisitJSWeakCell;
+
case FILLER_TYPE:
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
@@ -3294,11 +3366,11 @@ void Map::PrintGeneralization(
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name* name = instance_descriptors()->GetKey(modify_index);
+ Name name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- os << "{symbol " << static_cast<void*>(name) << "}";
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
os << ":";
if (descriptor_to_field) {
@@ -3329,17 +3401,15 @@ void Map::PrintGeneralization(
os << "]\n";
}
-
-void JSObject::PrintInstanceMigration(FILE* file,
- Map* original_map,
- Map* new_map) {
+void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
+ Map new_map) {
if (new_map->is_dictionary_map()) {
PrintF(file, "[migrating to slow]\n");
return;
}
PrintF(file, "[migrating]");
- DescriptorArray* o = original_map->instance_descriptors();
- DescriptorArray* n = new_map->instance_descriptors();
+ DescriptorArray o = original_map->instance_descriptors();
+ DescriptorArray n = new_map->instance_descriptors();
for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
Representation o_r = o->GetDetails(i).representation();
Representation n_r = n->GetDetails(i).representation();
@@ -3348,11 +3418,11 @@ void JSObject::PrintInstanceMigration(FILE* file,
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
} else if (o->GetDetails(i).location() == kDescriptor &&
n->GetDetails(i).location() == kField) {
- Name* name = o->GetKey(i);
+ Name name = o->GetKey(i);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- PrintF(file, "{symbol %p}", static_cast<void*>(name));
+ PrintF(file, "{symbol %p}", reinterpret_cast<void*>(name.ptr()));
}
PrintF(file, " ");
}
@@ -3364,42 +3434,42 @@ void JSObject::PrintInstanceMigration(FILE* file,
PrintF(file, "\n");
}
-bool JSObject::IsUnmodifiedApiObject(Object** o) {
- Object* object = *o;
+bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
+ Object object = *o;
if (object->IsSmi()) return false;
- HeapObject* heap_object = HeapObject::cast(object);
+ HeapObject heap_object = HeapObject::cast(object);
if (!object->IsJSObject()) return false;
- JSObject* js_object = JSObject::cast(object);
+ JSObject js_object = JSObject::cast(object);
if (!js_object->IsDroppableApiWrapper()) return false;
- Object* maybe_constructor = js_object->map()->GetConstructor();
+ Object maybe_constructor = js_object->map()->GetConstructor();
if (!maybe_constructor->IsJSFunction()) return false;
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
if (js_object->elements()->length() != 0) return false;
return constructor->initial_map() == heap_object->map();
}
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
- os << AsHex(reinterpret_cast<Address>(this), kPointerHexDigits, true) << " ";
+ os << AsHex(this->ptr(), kSystemPointerHexDigits, true) << " ";
if (IsString()) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- String::cast(this)->StringShortPrint(&accumulator);
+ String::cast(*this)->StringShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
if (IsJSObject()) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- JSObject::cast(this)->JSObjectShortPrint(&accumulator);
+ JSObject::cast(*this)->JSObjectShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
switch (map()->instance_type()) {
case MAP_TYPE: {
os << "<Map";
- Map* mapInstance = Map::cast(this);
+ Map mapInstance = Map::cast(*this);
if (mapInstance->IsJSObjectMap()) {
os << "(" << ElementsKindToString(mapInstance->elements_kind()) << ")";
} else if (mapInstance->instance_size() != kVariableSizeSentinel) {
@@ -3411,100 +3481,105 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<AwaitContext generator= ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- Context::cast(this)->extension()->ShortPrint(&accumulator);
+ Context::cast(*this)->extension()->ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
}
case BLOCK_CONTEXT_TYPE:
- os << "<BlockContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<BlockContext[" << Context::cast(*this)->length() << "]>";
break;
case CATCH_CONTEXT_TYPE:
- os << "<CatchContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<CatchContext[" << Context::cast(*this)->length() << "]>";
break;
case DEBUG_EVALUATE_CONTEXT_TYPE:
- os << "<DebugEvaluateContext[" << FixedArray::cast(this)->length()
- << "]>";
+ os << "<DebugEvaluateContext[" << Context::cast(*this)->length() << "]>";
break;
case EVAL_CONTEXT_TYPE:
- os << "<EvalContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<EvalContext[" << Context::cast(*this)->length() << "]>";
break;
case FUNCTION_CONTEXT_TYPE:
- os << "<FunctionContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<FunctionContext[" << Context::cast(*this)->length() << "]>";
break;
case MODULE_CONTEXT_TYPE:
- os << "<ModuleContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<ModuleContext[" << Context::cast(*this)->length() << "]>";
break;
case NATIVE_CONTEXT_TYPE:
- os << "<NativeContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<NativeContext[" << Context::cast(*this)->length() << "]>";
break;
case SCRIPT_CONTEXT_TYPE:
- os << "<ScriptContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<ScriptContext[" << Context::cast(*this)->length() << "]>";
break;
case WITH_CONTEXT_TYPE:
- os << "<WithContext[" << FixedArray::cast(this)->length() << "]>";
+ os << "<WithContext[" << Context::cast(*this)->length() << "]>";
break;
case SCRIPT_CONTEXT_TABLE_TYPE:
- os << "<ScriptContextTable[" << FixedArray::cast(this)->length() << "]>";
+ os << "<ScriptContextTable[" << FixedArray::cast(*this)->length() << "]>";
break;
case HASH_TABLE_TYPE:
- os << "<HashTable[" << FixedArray::cast(this)->length() << "]>";
+ os << "<HashTable[" << FixedArray::cast(*this)->length() << "]>";
break;
case ORDERED_HASH_MAP_TYPE:
- os << "<OrderedHashMap[" << FixedArray::cast(this)->length() << "]>";
+ os << "<OrderedHashMap[" << FixedArray::cast(*this)->length() << "]>";
break;
case ORDERED_HASH_SET_TYPE:
- os << "<OrderedHashSet[" << FixedArray::cast(this)->length() << "]>";
+ os << "<OrderedHashSet[" << FixedArray::cast(*this)->length() << "]>";
+ break;
+ case ORDERED_NAME_DICTIONARY_TYPE:
+ os << "<OrderedNameDictionary[" << FixedArray::cast(*this)->length()
+ << "]>";
break;
case NAME_DICTIONARY_TYPE:
- os << "<NameDictionary[" << FixedArray::cast(this)->length() << "]>";
+ os << "<NameDictionary[" << FixedArray::cast(*this)->length() << "]>";
break;
case GLOBAL_DICTIONARY_TYPE:
- os << "<GlobalDictionary[" << FixedArray::cast(this)->length() << "]>";
+ os << "<GlobalDictionary[" << FixedArray::cast(*this)->length() << "]>";
break;
case NUMBER_DICTIONARY_TYPE:
- os << "<NumberDictionary[" << FixedArray::cast(this)->length() << "]>";
+ os << "<NumberDictionary[" << FixedArray::cast(*this)->length() << "]>";
break;
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- os << "<SimpleNumberDictionary[" << FixedArray::cast(this)->length()
+ os << "<SimpleNumberDictionary[" << FixedArray::cast(*this)->length()
<< "]>";
break;
case STRING_TABLE_TYPE:
- os << "<StringTable[" << FixedArray::cast(this)->length() << "]>";
+ os << "<StringTable[" << FixedArray::cast(*this)->length() << "]>";
break;
case FIXED_ARRAY_TYPE:
- os << "<FixedArray[" << FixedArray::cast(this)->length() << "]>";
+ os << "<FixedArray[" << FixedArray::cast(*this)->length() << "]>";
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- os << "<ObjectBoilerplateDescription[" << FixedArray::cast(this)->length()
- << "]>";
+ os << "<ObjectBoilerplateDescription["
+ << FixedArray::cast(*this)->length() << "]>";
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- os << "<FixedDoubleArray[" << FixedDoubleArray::cast(this)->length()
+ os << "<FixedDoubleArray[" << FixedDoubleArray::cast(*this)->length()
<< "]>";
break;
case BYTE_ARRAY_TYPE:
- os << "<ByteArray[" << ByteArray::cast(this)->length() << "]>";
+ os << "<ByteArray[" << ByteArray::cast(*this)->length() << "]>";
break;
case BYTECODE_ARRAY_TYPE:
- os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
+ os << "<BytecodeArray[" << BytecodeArray::cast(*this)->length() << "]>";
break;
case DESCRIPTOR_ARRAY_TYPE:
- os << "<DescriptorArray[" << DescriptorArray::cast(this)->length()
- << "]>";
+ os << "<DescriptorArray["
+ << DescriptorArray::cast(*this)->number_of_descriptors() << "]>";
break;
case TRANSITION_ARRAY_TYPE:
- os << "<TransitionArray[" << TransitionArray::cast(this)->length()
+ os << "<TransitionArray[" << TransitionArray::cast(*this)->length()
<< "]>";
break;
case PROPERTY_ARRAY_TYPE:
- os << "<PropertyArray[" << PropertyArray::cast(this)->length() << "]>";
+ os << "<PropertyArray[" << PropertyArray::cast(*this)->length() << "]>";
break;
case FEEDBACK_CELL_TYPE: {
{
ReadOnlyRoots roots = GetReadOnlyRoots();
os << "<FeedbackCell[";
if (map() == roots.no_closures_cell_map()) {
+ os << "no feedback";
+ } else if (map() == roots.no_closures_cell_map()) {
os << "no closures";
} else if (map() == roots.one_closure_cell_map()) {
os << "one closure";
@@ -3518,45 +3593,46 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case FEEDBACK_VECTOR_TYPE:
- os << "<FeedbackVector[" << FeedbackVector::cast(this)->length() << "]>";
+ os << "<FeedbackVector[" << FeedbackVector::cast(*this)->length() << "]>";
break;
case FREE_SPACE_TYPE:
- os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
+ os << "<FreeSpace[" << FreeSpace::cast(*this)->size() << "]>";
break;
-#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(this)->length() \
- << "]>"; \
+#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(*this)->length() \
+ << "]>"; \
break;
TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
#undef TYPED_ARRAY_SHORT_PRINT
- case PRE_PARSED_SCOPE_DATA_TYPE: {
- PreParsedScopeData* data = PreParsedScopeData::cast(this);
- os << "<PreParsedScopeData[" << data->length() << "]>";
+ case PREPARSE_DATA_TYPE: {
+ PreparseData data = PreparseData::cast(*this);
+ os << "<PreparseData[data=" << data->data_length()
+ << " children=" << data->children_length() << "]>";
break;
}
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE: {
- UncompiledDataWithoutPreParsedScope* data =
- UncompiledDataWithoutPreParsedScope::cast(this);
- os << "<UncompiledDataWithoutPreParsedScope (" << data->start_position()
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: {
+ UncompiledDataWithoutPreparseData data =
+ UncompiledDataWithoutPreparseData::cast(*this);
+ os << "<UncompiledDataWithoutPreparseData (" << data->start_position()
<< ", " << data->end_position() << ")]>";
break;
}
- case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE: {
- UncompiledDataWithPreParsedScope* data =
- UncompiledDataWithPreParsedScope::cast(this);
- os << "<UncompiledDataWithPreParsedScope (" << data->start_position()
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE: {
+ UncompiledDataWithPreparseData data =
+ UncompiledDataWithPreparseData::cast(*this);
+ os << "<UncompiledDataWithPreparseData (" << data->start_position()
<< ", " << data->end_position()
- << ") preparsed=" << Brief(data->pre_parsed_scope_data()) << ">";
+ << ") preparsed=" << Brief(data->preparse_data()) << ">";
break;
}
case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(*this);
std::unique_ptr<char[]> debug_name = shared->DebugName()->ToCString();
if (debug_name[0] != 0) {
os << "<SharedFunctionInfo " << debug_name.get() << ">";
@@ -3568,33 +3644,31 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case JS_MESSAGE_OBJECT_TYPE:
os << "<JSMessageObject>";
break;
-#define MAKE_STRUCT_CASE(TYPE, Name, name) \
- case TYPE: \
- os << "<" #Name; \
- Name::cast(this)->BriefPrintDetails(os); \
- os << ">"; \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ os << "<" #Name; \
+ Name::cast(*this)->BriefPrintDetails(os); \
+ os << ">"; \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE: {
os << "<AllocationSite";
- AllocationSite::cast(this)->BriefPrintDetails(os);
+ AllocationSite::cast(*this)->BriefPrintDetails(os);
os << ">";
break;
}
case SCOPE_INFO_TYPE: {
- ScopeInfo* scope = ScopeInfo::cast(this);
+ ScopeInfo scope = ScopeInfo::cast(*this);
os << "<ScopeInfo";
if (scope->length()) os << " " << scope->scope_type() << " ";
os << "[" << scope->length() << "]>";
break;
}
case CODE_TYPE: {
- Code* code = Code::cast(this);
+ Code code = Code::cast(*this);
os << "<Code " << Code::Kind2String(code->kind());
- if (code->is_stub()) {
- os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
- } else if (code->is_builtin()) {
+ if (code->is_builtin()) {
os << " " << Builtins::name(code->builtin_index());
}
os << ">";
@@ -3613,31 +3687,31 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<false>";
} else {
os << "<Odd Oddball: ";
- os << Oddball::cast(this)->to_string()->ToCString().get();
+ os << Oddball::cast(*this)->to_string()->ToCString().get();
os << ">";
}
break;
}
case SYMBOL_TYPE: {
- Symbol* symbol = Symbol::cast(this);
+ Symbol symbol = Symbol::cast(*this);
symbol->SymbolShortPrint(os);
break;
}
case HEAP_NUMBER_TYPE: {
os << "<HeapNumber ";
- HeapNumber::cast(this)->HeapNumberPrint(os);
+ HeapNumber::cast(*this)->HeapNumberPrint(os);
os << ">";
break;
}
case MUTABLE_HEAP_NUMBER_TYPE: {
os << "<MutableHeapNumber ";
- MutableHeapNumber::cast(this)->MutableHeapNumberPrint(os);
+ MutableHeapNumber::cast(*this)->MutableHeapNumberPrint(os);
os << '>';
break;
}
case BIGINT_TYPE: {
os << "<BigInt ";
- BigInt::cast(this)->BigIntShortPrint(os);
+ BigInt::cast(*this)->BigIntShortPrint(os);
os << ">";
break;
}
@@ -3651,13 +3725,13 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<Cell value= ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- Cell::cast(this)->value()->ShortPrint(&accumulator);
+ Cell::cast(*this)->value()->ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
}
case PROPERTY_CELL_TYPE: {
- PropertyCell* cell = PropertyCell::cast(this);
+ PropertyCell cell = PropertyCell::cast(*this);
os << "<PropertyCell name=";
cell->name()->ShortPrint(os);
os << " value=";
@@ -3669,7 +3743,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case CALL_HANDLER_INFO_TYPE: {
- CallHandlerInfo* info = CallHandlerInfo::cast(this);
+ CallHandlerInfo info = CallHandlerInfo::cast(*this);
os << "<CallHandlerInfo ";
os << "callback= " << Brief(info->callback());
os << ", js_callback= " << Brief(info->js_callback());
@@ -3710,35 +3784,35 @@ void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
void HeapObject::IterateBody(ObjectVisitor* v) {
- Map* m = map();
+ Map m = map();
IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
}
-void HeapObject::IterateBody(Map* map, int object_size, ObjectVisitor* v) {
+void HeapObject::IterateBody(Map map, int object_size, ObjectVisitor* v) {
IterateBodyFast<ObjectVisitor>(map, object_size, v);
}
struct CallIsValidSlot {
template <typename BodyDescriptor>
- static bool apply(Map* map, HeapObject* obj, int offset, int) {
+ static bool apply(Map map, HeapObject obj, int offset, int) {
return BodyDescriptor::IsValidSlot(map, obj, offset);
}
};
-bool HeapObject::IsValidSlot(Map* map, int offset) {
+bool HeapObject::IsValidSlot(Map map, int offset) {
DCHECK_NE(0, offset);
return BodyDescriptorApply<CallIsValidSlot, bool>(map->instance_type(), map,
- this, offset, 0);
+ *this, offset, 0);
}
-String* JSReceiver::class_name() {
+String JSReceiver::class_name() {
ReadOnlyRoots roots = GetReadOnlyRoots();
if (IsFunction()) return roots.Function_string();
if (IsJSArgumentsObject()) return roots.Arguments_string();
if (IsJSArray()) return roots.Array_string();
if (IsJSArrayBuffer()) {
- if (JSArrayBuffer::cast(this)->is_shared()) {
+ if (JSArrayBuffer::cast(*this)->is_shared()) {
return roots.SharedArrayBuffer_string();
}
return roots.ArrayBuffer_string();
@@ -3765,7 +3839,7 @@ String* JSReceiver::class_name() {
#undef SWITCH_KIND
}
if (IsJSValue()) {
- Object* value = JSValue::cast(this)->value();
+ Object value = JSValue::cast(*this)->value();
if (value->IsBoolean()) return roots.Boolean_string();
if (value->IsString()) return roots.String_string();
if (value->IsNumber()) return roots.Number_string();
@@ -3778,16 +3852,16 @@ String* JSReceiver::class_name() {
if (IsJSWeakSet()) return roots.WeakSet_string();
if (IsJSGlobalProxy()) return roots.global_string();
- Object* maybe_constructor = map()->GetConstructor();
+ Object maybe_constructor = map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
if (constructor->shared()->IsApiFunction()) {
maybe_constructor = constructor->shared()->get_api_func_data();
}
}
if (maybe_constructor->IsFunctionTemplateInfo()) {
- FunctionTemplateInfo* info = FunctionTemplateInfo::cast(maybe_constructor);
+ FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
if (info->class_name()->IsString()) return String::cast(info->class_name());
}
@@ -3799,6 +3873,7 @@ bool HeapObject::CanBeRehashed() const {
switch (map()->instance_type()) {
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
// TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return false;
case NAME_DICTIONARY_TYPE:
@@ -3812,9 +3887,11 @@ bool HeapObject::CanBeRehashed() const {
case TRANSITION_ARRAY_TYPE:
return true;
case SMALL_ORDERED_HASH_MAP_TYPE:
- return SmallOrderedHashMap::cast(this)->NumberOfElements() == 0;
+ return SmallOrderedHashMap::cast(*this)->NumberOfElements() == 0;
case SMALL_ORDERED_HASH_SET_TYPE:
- return SmallOrderedHashMap::cast(this)->NumberOfElements() == 0;
+ return SmallOrderedHashMap::cast(*this)->NumberOfElements() == 0;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ return SmallOrderedNameDictionary::cast(*this)->NumberOfElements() == 0;
default:
return false;
}
@@ -3827,32 +3904,35 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
UNREACHABLE();
break;
case NAME_DICTIONARY_TYPE:
- NameDictionary::cast(this)->Rehash(isolate);
+ NameDictionary::cast(*this)->Rehash(isolate);
break;
case GLOBAL_DICTIONARY_TYPE:
- GlobalDictionary::cast(this)->Rehash(isolate);
+ GlobalDictionary::cast(*this)->Rehash(isolate);
break;
case NUMBER_DICTIONARY_TYPE:
- NumberDictionary::cast(this)->Rehash(isolate);
+ NumberDictionary::cast(*this)->Rehash(isolate);
break;
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- SimpleNumberDictionary::cast(this)->Rehash(isolate);
+ SimpleNumberDictionary::cast(*this)->Rehash(isolate);
break;
case STRING_TABLE_TYPE:
- StringTable::cast(this)->Rehash(isolate);
+ StringTable::cast(*this)->Rehash(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DCHECK_LE(1, DescriptorArray::cast(this)->number_of_descriptors());
- DescriptorArray::cast(this)->Sort();
+ DCHECK_LE(1, DescriptorArray::cast(*this)->number_of_descriptors());
+ DescriptorArray::cast(*this)->Sort();
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(this)->Sort();
+ TransitionArray::cast(*this)->Sort();
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
- DCHECK_EQ(0, SmallOrderedHashMap::cast(this)->NumberOfElements());
+ DCHECK_EQ(0, SmallOrderedHashMap::cast(*this)->NumberOfElements());
break;
case SMALL_ORDERED_HASH_SET_TYPE:
- DCHECK_EQ(0, SmallOrderedHashSet::cast(this)->NumberOfElements());
+ DCHECK_EQ(0, SmallOrderedHashSet::cast(*this)->NumberOfElements());
+ break;
+ case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this)->NumberOfElements());
break;
default:
break;
@@ -3870,18 +3950,17 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
// reclaimed and replaced by Object in OptimizeAsPrototype.
if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
!receiver->map()->is_prototype_map()) {
- Object* maybe_constructor = receiver->map()->GetConstructor();
+ Object maybe_constructor = receiver->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- String* name = constructor->shared()->DebugName();
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ String name = constructor->shared()->DebugName();
if (name->length() != 0 &&
!name->Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
} else if (maybe_constructor->IsFunctionTemplateInfo()) {
- FunctionTemplateInfo* info =
- FunctionTemplateInfo::cast(maybe_constructor);
+ FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
if (info->class_name()->IsString()) {
return std::make_pair(
MaybeHandle<JSFunction>(),
@@ -3907,8 +3986,8 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(*maybe_constructor);
- String* name = constructor->shared()->DebugName();
+ JSFunction constructor = JSFunction::cast(*maybe_constructor);
+ String name = constructor->shared()->DebugName();
if (name->length() != 0 &&
!name->Equals(ReadOnlyRoots(isolate).Object_string())) {
@@ -3934,16 +4013,18 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
}
Handle<Context> JSReceiver::GetCreationContext() {
- JSReceiver* receiver = this;
+ JSReceiver receiver = *this;
// Externals are JSObjects with null as a constructor.
DCHECK(!receiver->IsExternal(GetIsolate()));
- Object* constructor = receiver->map()->GetConstructor();
- JSFunction* function;
+ Object constructor = receiver->map()->GetConstructor();
+ JSFunction function;
if (constructor->IsJSFunction()) {
function = JSFunction::cast(constructor);
} else if (constructor->IsFunctionTemplateInfo()) {
// Remote objects don't have a creation context.
return Handle<Context>::null();
+ } else if (receiver->IsJSGeneratorObject()) {
+ function = JSGeneratorObject::cast(receiver)->function();
} else {
// Functions have null as a constructor,
// but any JSFunction knows its context immediately.
@@ -3966,11 +4047,11 @@ MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
}
// static
-FieldType* Map::UnwrapFieldType(MaybeObject* wrapped_type) {
+FieldType Map::UnwrapFieldType(MaybeObject wrapped_type) {
if (wrapped_type->IsCleared()) {
return FieldType::None();
}
- HeapObject* heap_object;
+ HeapObject heap_object;
if (wrapped_type->GetHeapObjectIfWeak(&heap_object)) {
return FieldType::cast(heap_object);
}
@@ -4053,17 +4134,17 @@ const char* Representation::Mnemonic() const {
}
}
-bool Map::TransitionRemovesTaggedField(Map* target) const {
+bool Map::TransitionRemovesTaggedField(Map target) const {
int inobject = NumberOfFields();
int target_inobject = target->NumberOfFields();
for (int i = target_inobject; i < inobject; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(this, i);
+ FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
if (!IsUnboxedDoubleField(index)) return true;
}
return false;
}
-bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) const {
+bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
int inobject = NumberOfFields();
int target_inobject = target->NumberOfFields();
int limit = Min(inobject, target_inobject);
@@ -4076,12 +4157,12 @@ bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) const {
return false;
}
-bool Map::TransitionRequiresSynchronizationWithGC(Map* target) const {
+bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
return TransitionRemovesTaggedField(target) ||
TransitionChangesTaggedFieldToUntaggedField(target);
}
-bool Map::InstancesNeedRewriting(Map* target) const {
+bool Map::InstancesNeedRewriting(Map target) const {
int target_number_of_fields = target->NumberOfFields();
int target_inobject = target->GetInObjectProperties();
int target_unused = target->UnusedPropertyFields();
@@ -4092,7 +4173,7 @@ bool Map::InstancesNeedRewriting(Map* target) const {
&old_number_of_fields);
}
-bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
+bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const {
// If fields were added (or removed), rewrite the instance.
@@ -4101,8 +4182,8 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray* old_desc = instance_descriptors();
- DescriptorArray* new_desc = target->instance_descriptors();
+ DescriptorArray old_desc = instance_descriptors();
+ DescriptorArray new_desc = target->instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if (new_desc->GetDetails(i).representation().IsDouble() !=
@@ -4138,9 +4219,9 @@ void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
old_map->set_prototype_info(Smi::kZero);
if (FLAG_trace_prototype_users) {
PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()),
- reinterpret_cast<void*>(*old_map),
- reinterpret_cast<void*>(*new_map));
+ reinterpret_cast<void*>(new_map->prototype_info()->ptr()),
+ reinterpret_cast<void*>(old_map->ptr()),
+ reinterpret_cast<void*>(new_map->ptr()));
}
if (was_registered) {
if (new_map->prototype_info()->IsPrototypeInfo()) {
@@ -4356,7 +4437,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- Object* value = inobject_props->get(i);
+ Object value = inobject_props->get(i);
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
@@ -4580,7 +4661,7 @@ void JSObject::ForceSetPrototype(Handle<JSObject> object,
}
int Map::NumberOfFields() const {
- DescriptorArray* descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors();
int result = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
if (descriptors->GetDetails(i).location() == kField) result++;
@@ -4588,6 +4669,26 @@ int Map::NumberOfFields() const {
return result;
}
+Map::FieldCounts Map::GetFieldCounts() const {
+ DescriptorArray descriptors = instance_descriptors();
+ int mutable_count = 0;
+ int const_count = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kField) {
+ switch (details.constness()) {
+ case PropertyConstness::kMutable:
+ mutable_count++;
+ break;
+ case PropertyConstness::kConst:
+ const_count++;
+ break;
+ }
+ }
+ }
+ return FieldCounts(mutable_count, const_count);
+}
+
bool Map::HasOutOfObjectProperties() const {
return GetInObjectProperties() < NumberOfFields();
}
@@ -4663,7 +4764,7 @@ Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
void Map::DeprecateTransitionTree(Isolate* isolate) {
if (is_deprecated()) return;
DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(isolate, this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
@@ -4671,7 +4772,7 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
set_is_deprecated(true);
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Deprecate", this, nullptr));
+ LOG(isolate, MapEvent("Deprecate", *this, Map()));
}
dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kTransitionGroup);
@@ -4681,57 +4782,59 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
// Installs |new_descriptors| over the current instance_descriptors to ensure
// proper sharing of descriptor arrays.
-void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor) {
+void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
+ LayoutDescriptor new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
return;
}
- DescriptorArray* to_replace = instance_descriptors();
+ DescriptorArray to_replace = instance_descriptors();
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
- MarkingBarrierForElements(isolate->heap(), to_replace);
- Map* current = this;
+ Map current = *this;
+ MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
+ to_replace->number_of_descriptors());
while (current->instance_descriptors() == to_replace) {
- Object* next = current->GetBackPointer();
+ Object next = current->GetBackPointer();
if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
current->SetEnumLength(kInvalidEnumCacheSentinel);
- current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
+ current->UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
+ current->NumberOfOwnDescriptors());
current = Map::cast(next);
}
set_owns_descriptors(false);
}
-Map* Map::FindRootMap(Isolate* isolate) const {
- const Map* result = this;
+Map Map::FindRootMap(Isolate* isolate) const {
+ Map result = *this;
while (true) {
- Object* back = result->GetBackPointer();
+ Object back = result->GetBackPointer();
if (back->IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
DCHECK(result->owns_descriptors());
DCHECK_EQ(result->NumberOfOwnDescriptors(),
result->instance_descriptors()->number_of_descriptors());
- return const_cast<Map*>(result);
+ return result;
}
result = Map::cast(back);
}
}
-Map* Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
+Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
- const Map* result = this;
+ Map result = *this;
while (true) {
- Object* back = result->GetBackPointer();
+ Object back = result->GetBackPointer();
if (back->IsUndefined(isolate)) break;
- const Map* parent = Map::cast(back);
+ const Map parent = Map::cast(back);
if (parent->NumberOfOwnDescriptors() <= descriptor) break;
result = parent;
}
- return const_cast<Map*>(result);
+ return result;
}
void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
@@ -4746,20 +4849,20 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
DCHECK_EQ(kData, details.kind());
Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneQueue<Map*> backlog(&zone);
- backlog.push(this);
+ ZoneQueue<Map> backlog(&zone);
+ backlog.push(*this);
while (!backlog.empty()) {
- Map* current = backlog.front();
+ Map current = backlog.front();
backlog.pop();
TransitionsAccessor transitions(isolate, current, &no_allocation);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
- Map* target = transitions.GetTarget(i);
+ Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray* descriptors = current->instance_descriptors();
+ DescriptorArray descriptors = current->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
// Currently constness change implies map change.
@@ -4783,7 +4886,7 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
}
}
-bool FieldTypeIsCleared(Representation rep, FieldType* type) {
+bool FieldTypeIsCleared(Representation rep, FieldType type) {
return type->IsNone() && rep.IsHeapObject();
}
@@ -4887,6 +4990,45 @@ Handle<Map> Map::ReconfigureElementsKind(Isolate* isolate, Handle<Map> map,
return mu.ReconfigureElementsKind(new_elements_kind);
}
+namespace {
+
+Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
+ Map target = old_map;
+ do {
+ target = TransitionsAccessor(isolate, target, &no_allocation)
+ .GetMigrationTarget();
+ } while (!target.is_null() && target->is_deprecated());
+ if (target.is_null()) return Map();
+
+ // TODO(ishell): if this validation ever become a bottleneck consider adding a
+ // bit to the Map telling whether it contains fields whose field types may be
+ // cleared.
+ // TODO(ishell): revisit handling of cleared field types in
+ // TryReplayPropertyTransitions() and consider checking the target map's field
+ // types instead of old_map's types.
+ // Go to slow map updating if the old_map has fast properties with cleared
+ // field types.
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map->instance_descriptors();
+ for (int i = 0; i < old_nof; i++) {
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.location() == kField && old_details.kind() == kData) {
+ FieldType old_type = old_descriptors->GetFieldType(i);
+ if (FieldTypeIsCleared(old_details.representation(), old_type)) {
+ return Map();
+ }
+ }
+ }
+
+ SLOW_DCHECK(Map::TryUpdateSlow(isolate, old_map) == target);
+ return target;
+}
+} // namespace
+
+// TODO(ishell): Move TryUpdate() and friends to MapUpdater
// static
MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
DisallowHeapAllocation no_allocation;
@@ -4894,91 +5036,109 @@ MaybeHandle<Map> Map::TryUpdate(Isolate* isolate, Handle<Map> old_map) {
if (!old_map->is_deprecated()) return old_map;
+ if (FLAG_fast_map_update) {
+ Map target_map = SearchMigrationTarget(isolate, *old_map);
+ if (!target_map.is_null()) {
+ return handle(target_map, isolate);
+ }
+ }
+
+ Map new_map = TryUpdateSlow(isolate, *old_map);
+ if (new_map.is_null()) return MaybeHandle<Map>();
+ if (FLAG_fast_map_update) {
+ TransitionsAccessor(isolate, *old_map, &no_allocation)
+ .SetMigrationTarget(new_map);
+ }
+ return handle(new_map, isolate);
+}
+
+Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(isolate);
+
// Check the state of the root map.
- Map* root_map = old_map->FindRootMap(isolate);
+ Map root_map = old_map->FindRootMap(isolate);
if (root_map->is_deprecated()) {
- JSFunction* constructor = JSFunction::cast(root_map->GetConstructor());
+ JSFunction constructor = JSFunction::cast(root_map->GetConstructor());
DCHECK(constructor->has_initial_map());
DCHECK(constructor->initial_map()->is_dictionary_map());
if (constructor->initial_map()->elements_kind() !=
old_map->elements_kind()) {
- return MaybeHandle<Map>();
+ return Map();
}
- return handle(constructor->initial_map(), constructor->GetIsolate());
+ return constructor->initial_map();
}
- if (!old_map->EquivalentToForTransition(root_map)) return MaybeHandle<Map>();
+ if (!old_map->EquivalentToForTransition(root_map)) return Map();
ElementsKind from_kind = root_map->elements_kind();
ElementsKind to_kind = old_map->elements_kind();
if (from_kind != to_kind) {
// Try to follow existing elements kind transitions.
root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
- if (root_map == nullptr) return MaybeHandle<Map>();
+ if (root_map.is_null()) return Map();
// From here on, use the map with correct elements kind as root map.
}
- Map* new_map = root_map->TryReplayPropertyTransitions(isolate, *old_map);
- if (new_map == nullptr) return MaybeHandle<Map>();
- return handle(new_map, isolate);
+ return root_map->TryReplayPropertyTransitions(isolate, old_map);
}
-Map* Map::TryReplayPropertyTransitions(Isolate* isolate, Map* old_map) {
+Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
DisallowHeapAllocation no_allocation;
DisallowDeoptimization no_deoptimization(isolate);
int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ DescriptorArray old_descriptors = old_map->instance_descriptors();
- Map* new_map = this;
+ Map new_map = *this;
for (int i = root_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
- Map* transition =
+ Map transition =
TransitionsAccessor(isolate, new_map, &no_allocation)
.SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
old_details.attributes());
- if (transition == nullptr) return nullptr;
+ if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ DescriptorArray new_descriptors = new_map->instance_descriptors();
PropertyDetails new_details = new_descriptors->GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
DCHECK_EQ(old_details.attributes(), new_details.attributes());
if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
- return nullptr;
+ return Map();
}
DCHECK(IsGeneralizableTo(old_details.location(), new_details.location()));
if (!old_details.representation().fits_into(new_details.representation())) {
- return nullptr;
+ return Map();
}
if (new_details.location() == kField) {
if (new_details.kind() == kData) {
- FieldType* new_type = new_descriptors->GetFieldType(i);
+ FieldType new_type = new_descriptors->GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
- return nullptr;
+ return Map();
}
DCHECK_EQ(kData, old_details.kind());
if (old_details.location() == kField) {
- FieldType* old_type = old_descriptors->GetFieldType(i);
+ FieldType old_type = old_descriptors->GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type->NowIs(new_type)) {
- return nullptr;
+ return Map();
}
} else {
DCHECK_EQ(kDescriptor, old_details.location());
DCHECK(!FLAG_track_constant_fields);
- Object* old_value = old_descriptors->GetStrongValue(i);
+ Object old_value = old_descriptors->GetStrongValue(i);
if (!new_type->NowContains(old_value)) {
- return nullptr;
+ return Map();
}
}
} else {
DCHECK_EQ(kAccessor, new_details.kind());
#ifdef DEBUG
- FieldType* new_type = new_descriptors->GetFieldType(i);
+ FieldType new_type = new_descriptors->GetFieldType(i);
DCHECK(new_type->IsAny());
#endif
UNREACHABLE();
@@ -4988,11 +5148,11 @@ Map* Map::TryReplayPropertyTransitions(Isolate* isolate, Map* old_map) {
if (old_details.location() == kField ||
old_descriptors->GetStrongValue(i) !=
new_descriptors->GetStrongValue(i)) {
- return nullptr;
+ return Map();
}
}
}
- if (new_map->NumberOfOwnDescriptors() != old_nof) return nullptr;
+ if (new_map->NumberOfOwnDescriptors() != old_nof) return Map();
return new_map;
}
@@ -5000,6 +5160,12 @@ Map* Map::TryReplayPropertyTransitions(Isolate* isolate, Map* old_map) {
// static
Handle<Map> Map::Update(Isolate* isolate, Handle<Map> map) {
if (!map->is_deprecated()) return map;
+ if (FLAG_fast_map_update) {
+ Map target_map = SearchMigrationTarget(isolate, *map);
+ if (!target_map.is_null()) {
+ return handle(target_map, isolate);
+ }
+ }
MapUpdater mu(isolate, map);
return mu.Update();
}
@@ -5046,9 +5212,17 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
return JSObject::SetPropertyWithFailedAccessCheck(it, value,
should_throw);
- case LookupIterator::JSPROXY:
+ case LookupIterator::JSPROXY: {
+ Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by
+ // the global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(),
+ it->isolate());
+ }
return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
- value, it->GetReceiver(), language_mode);
+ value, receiver, language_mode);
+ }
case LookupIterator::INTERCEPTOR: {
if (it->HolderIsReceiverOrHiddenPrototype()) {
@@ -5100,7 +5274,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Object::ToNumber(it->isolate(), value), Nothing<bool>());
}
- // FIXME: Throw a TypeError if the holder is neutered here
+ // FIXME: Throw a TypeError if the holder is detached here
// (IntegerIndexedElementSpec step 5).
// TODO(verwaest): Per spec, we should return false here (steps 6-9
@@ -5197,8 +5371,8 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
if (own_lookup.IsReadOnly()) {
return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
}
- return JSObject::SetPropertyWithAccessor(&own_lookup, value,
- should_throw);
+ return Object::SetPropertyWithAccessor(&own_lookup, value,
+ should_throw);
}
V8_FALLTHROUGH;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -5285,8 +5459,8 @@ Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
DCHECK_IMPLIES(it->GetReceiver()->IsJSProxy(),
- it->GetName()->IsPrivateField());
- DCHECK_IMPLIES(!it->IsElement() && it->GetName()->IsPrivateField(),
+ it->GetName()->IsPrivateName());
+ DCHECK_IMPLIES(!it->IsElement() && it->GetName()->IsPrivateName(),
it->state() == LookupIterator::DATA);
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
@@ -5304,8 +5478,8 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
BigInt::FromObject(it->isolate(), value),
Nothing<bool>());
// We have to recheck the length. However, it can only change if the
- // underlying buffer was neutered, so just check that.
- if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
+ // underlying buffer was detached, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasDetached()) {
return Just(true);
// TODO(neis): According to the spec, this should throw a TypeError.
}
@@ -5314,8 +5488,8 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Object::ToNumber(it->isolate(), value),
Nothing<bool>());
// We have to recheck the length. However, it can only change if the
- // underlying buffer was neutered, so just check that.
- if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
+ // underlying buffer was detached, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasDetached()) {
return Just(true);
// TODO(neis): According to the spec, this should throw a TypeError.
}
@@ -5349,7 +5523,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
// Private symbols should be installed on JSProxy using
// JSProxy::SetPrivateSymbol.
if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate() &&
- !it->GetName()->IsPrivateField()) {
+ !it->GetName()->IsPrivateName()) {
RETURN_FAILURE(it->isolate(), should_throw,
NewTypeError(MessageTemplate::kProxyPrivate));
}
@@ -5357,7 +5531,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
Handle<JSReceiver> receiver = it->GetStoreTarget<JSReceiver>();
- DCHECK_IMPLIES(receiver->IsJSProxy(), it->GetName()->IsPrivateField());
+ DCHECK_IMPLIES(receiver->IsJSProxy(), it->GetName()->IsPrivateName());
DCHECK_IMPLIES(receiver->IsJSProxy(),
it->state() == LookupIterator::NOT_FOUND);
@@ -5425,17 +5599,18 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
int old_size = map->NumberOfOwnDescriptors();
- if (slack <= descriptors->NumberOfSlackDescriptors()) return;
+ if (slack <= descriptors->number_of_slack_descriptors()) return;
Handle<DescriptorArray> new_descriptors =
DescriptorArray::CopyUpTo(isolate, descriptors, old_size, slack);
DisallowHeapAllocation no_allocation;
// The descriptors are still the same, so keep the layout descriptor.
- LayoutDescriptor* layout_descriptor = map->GetLayoutDescriptor();
+ LayoutDescriptor layout_descriptor = map->GetLayoutDescriptor();
if (old_size == 0) {
- map->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->NumberOfOwnDescriptors());
return;
}
@@ -5449,16 +5624,19 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
- MarkingBarrierForElements(isolate->heap(), *descriptors);
+ MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors,
+ descriptors->number_of_descriptors());
- Map* current = *map;
+ Map current = *map;
while (current->instance_descriptors() == *descriptors) {
- Object* next = current->GetBackPointer();
+ Object next = current->GetBackPointer();
if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ current->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ current->NumberOfOwnDescriptors());
current = Map::cast(next);
}
- map->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->NumberOfOwnDescriptors());
}
// static
@@ -5562,39 +5740,39 @@ int AccessorInfo::AppendUnique(Isolate* isolate, Handle<Object> descriptors,
valid_descriptors);
}
-static bool ContainsMap(MapHandles const& maps, Map* map) {
- DCHECK_NOT_NULL(map);
+static bool ContainsMap(MapHandles const& maps, Map map) {
+ DCHECK(!map.is_null());
for (Handle<Map> current : maps) {
if (!current.is_null() && *current == map) return true;
}
return false;
}
-Map* Map::FindElementsKindTransitionedMap(Isolate* isolate,
- MapHandles const& candidates) {
+Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
+ MapHandles const& candidates) {
DisallowHeapAllocation no_allocation;
DisallowDeoptimization no_deoptimization(isolate);
- if (is_prototype_map()) return nullptr;
+ if (is_prototype_map()) return Map();
ElementsKind kind = elements_kind();
bool packed = IsFastPackedElementsKind(kind);
- Map* transition = nullptr;
+ Map transition;
if (IsTransitionableFastElementsKind(kind)) {
// Check the state of the root map.
- Map* root_map = FindRootMap(isolate);
- if (!EquivalentToForElementsKindTransition(root_map)) return nullptr;
+ Map root_map = FindRootMap(isolate);
+ if (!EquivalentToForElementsKindTransition(root_map)) return Map();
root_map = root_map->LookupElementsTransitionMap(isolate, kind);
- DCHECK_NOT_NULL(root_map);
+ DCHECK(!root_map.is_null());
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
for (root_map = root_map->ElementsTransitionMap();
- root_map != nullptr && root_map->has_fast_elements();
+ !root_map.is_null() && root_map->has_fast_elements();
root_map = root_map->ElementsTransitionMap()) {
- Map* current = root_map->TryReplayPropertyTransitions(isolate, this);
- if (current == nullptr) continue;
+ Map current = root_map->TryReplayPropertyTransitions(isolate, *this);
+ if (current.is_null()) continue;
if (InstancesNeedRewriting(current)) continue;
if (ContainsMap(candidates, current) &&
@@ -5607,17 +5785,17 @@ Map* Map::FindElementsKindTransitionedMap(Isolate* isolate,
return transition;
}
-static Map* FindClosestElementsTransition(Isolate* isolate, Map* map,
- ElementsKind to_kind) {
+static Map FindClosestElementsTransition(Isolate* isolate, Map map,
+ ElementsKind to_kind) {
// Ensure we are requested to search elements kind transition "near the root".
DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
- Map* current_map = map;
+ Map current_map = map;
ElementsKind kind = map->elements_kind();
while (kind != to_kind) {
- Map* next_map = current_map->ElementsTransitionMap();
- if (next_map == nullptr) return current_map;
+ Map next_map = current_map->ElementsTransitionMap();
+ if (next_map.is_null()) return current_map;
kind = next_map->elements_kind();
current_map = next_map;
}
@@ -5626,18 +5804,18 @@ static Map* FindClosestElementsTransition(Isolate* isolate, Map* map,
return current_map;
}
-Map* Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
- Map* to_map = FindClosestElementsTransition(isolate, this, to_kind);
+Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
+ Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
if (to_map->elements_kind() == to_kind) return to_map;
- return nullptr;
+ return Map();
}
bool Map::IsMapInArrayPrototypeChain(Isolate* isolate) const {
- if (isolate->initial_array_prototype()->map() == this) {
+ if (isolate->initial_array_prototype()->map() == *this) {
return true;
}
- if (isolate->initial_object_prototype()->map() == this) {
+ if (isolate->initial_object_prototype()->map() == *this) {
return true;
}
@@ -5680,7 +5858,7 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind from_kind = map->elements_kind();
if (from_kind == to_kind) return map;
- Context* native_context = isolate->context()->native_context();
+ Context native_context = isolate->context()->native_context();
if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
if (*map == native_context->fast_aliased_arguments_map()) {
DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
@@ -5695,7 +5873,7 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
// Reuse map transitions for JSArrays.
DisallowHeapAllocation no_gc;
if (native_context->GetInitialJSArrayMap(from_kind) == *map) {
- Object* maybe_transitioned_map =
+ Object maybe_transitioned_map =
native_context->get(Context::ArrayMapIndex(to_kind));
if (maybe_transitioned_map->IsMap()) {
return handle(Map::cast(maybe_transitioned_map), isolate);
@@ -6071,12 +6249,14 @@ Handle<Object> JSFunction::GetName(Isolate* isolate,
Maybe<int> JSFunction::GetLength(Isolate* isolate,
Handle<JSFunction> function) {
int length = 0;
- if (function->shared()->is_compiled()) {
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ if (is_compiled_scope.is_compiled()) {
length = function->shared()->GetLength();
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
- if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+ if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
length = function->shared()->GetLength();
}
if (isolate->has_pending_exception()) return Nothing<int>();
@@ -6185,7 +6365,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
if (!FLAG_unbox_double_fields) {
for (int i = 0; i < inobject; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
- Object* value = storage->get(i);
+ Object value = storage->get(i);
object->RawFastPropertyAtPut(index, value);
}
}
@@ -6246,11 +6426,18 @@ void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
DCHECK(!it.IsFound());
DCHECK(object->map()->is_extensible() || name->IsPrivate());
#endif
- CHECK(AddDataProperty(&it, value, attributes, kThrowOnError,
- StoreOrigin::kNamed)
+ CHECK(Object::AddDataProperty(&it, value, attributes, kThrowOnError,
+ StoreOrigin::kNamed)
.IsJust());
}
+void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
+ const char* name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ JSObject::AddProperty(isolate, object,
+ isolate->factory()->InternalizeUtf8String(name), value,
+ attributes);
+}
// Reconfigures a property to a data property with attributes, even if it is not
// reconfigurable.
@@ -6318,27 +6505,27 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
it->TransitionToAccessorPair(accessors, attributes);
}
- return JSObject::SetPropertyWithAccessor(it, value, should_throw);
+ return Object::SetPropertyWithAccessor(it, value, should_throw);
}
it->ReconfigureDataProperty(value, attributes);
return Just(true);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
- should_throw);
+ return Object::RedefineIncompatibleProperty(
+ it->isolate(), it->GetName(), value, should_throw);
case LookupIterator::DATA: {
// Regular property update if the attributes match.
if (it->property_attributes() == attributes) {
- return SetDataProperty(it, value);
+ return Object::SetDataProperty(it, value);
}
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
if (it->IsElement() && object->HasFixedTypedArrayElements()) {
- return RedefineIncompatibleProperty(it->isolate(), it->GetName(),
- value, should_throw);
+ return Object::RedefineIncompatibleProperty(
+ it->isolate(), it->GetName(), value, should_throw);
}
// Reconfigure the data property if the attributes mismatch.
@@ -6349,8 +6536,8 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
}
}
- return AddDataProperty(it, value, attributes, should_throw,
- StoreOrigin::kNamed);
+ return Object::AddDataProperty(it, value, attributes, should_throw,
+ StoreOrigin::kNamed);
}
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
@@ -6428,13 +6615,13 @@ Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
PropertyNormalizationMode mode) {
DisallowHeapAllocation no_gc;
- MaybeObject* value = WeakFixedArray::Get(GetIndex(fast_map));
- HeapObject* heap_object;
+ MaybeObject value = WeakFixedArray::Get(GetIndex(fast_map));
+ HeapObject heap_object;
if (!value->GetHeapObjectIfWeak(&heap_object)) {
return MaybeHandle<Map>();
}
- Map* normalized_map = Map::cast(heap_object);
+ Map normalized_map = Map::cast(heap_object);
if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
return MaybeHandle<Map>();
}
@@ -6492,7 +6679,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
if (FLAG_track_constant_fields) {
number_of_fields += 1;
} else {
- Object* value = dictionary->ValueAt(index);
+ Object value = dictionary->ValueAt(index);
if (!value->IsJSFunction()) {
number_of_fields += 1;
}
@@ -6506,18 +6693,14 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Allocate new map.
Handle<Map> new_map = Map::CopyDropDescriptors(isolate, old_map);
- if (new_map->has_named_interceptor() || new_map->is_access_check_needed()) {
- // Force certain slow paths when API interceptors are used, or if an access
- // check is required.
- new_map->set_may_have_interesting_symbols(true);
- }
+ // We should not only set this bit if we need to. We should not retain the
+ // old bit because turning a map into dictionary always sets this bit.
+ new_map->set_may_have_interesting_symbols(new_map->has_named_interceptor() ||
+ new_map->is_access_check_needed());
new_map->set_is_dictionary_map(false);
NotifyMapChange(old_map, new_map, isolate);
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
- }
if (instance_descriptor_length == 0) {
DisallowHeapAllocation no_gc;
@@ -6528,6 +6711,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
// Check that it really works.
DCHECK(object->HasFastProperties());
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ }
return;
}
@@ -6554,7 +6740,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int current_offset = 0;
for (int i = 0; i < instance_descriptor_length; i++) {
int index = Smi::ToInt(iteration_order->get(i));
- Name* k = dictionary->NameAt(index);
+ Name k = dictionary->NameAt(index);
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
CHECK(k->IsUniqueName());
@@ -6565,7 +6751,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
new_map->set_may_have_interesting_symbols(true);
}
- Object* value = dictionary->ValueAt(index);
+ Object value = dictionary->ValueAt(index);
PropertyDetails details = dictionary->DetailsAt(index);
DCHECK_EQ(kField, details.location());
@@ -6615,13 +6801,16 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
isolate, new_map, descriptors, descriptors->number_of_descriptors());
DisallowHeapAllocation no_gc;
- new_map->InitializeDescriptors(*descriptors, *layout_descriptor);
+ new_map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
if (number_of_allocated_fields == 0) {
new_map->SetInObjectUnusedPropertyFields(unused_property_fields);
} else {
new_map->SetOutOfObjectUnusedPropertyFields(unused_property_fields);
}
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ }
// Transform the object.
object->synchronized_set_map(*new_map);
@@ -6632,7 +6821,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK(object->HasFastProperties());
}
-void JSObject::RequireSlowElements(NumberDictionary* dictionary) {
+void JSObject::RequireSlowElements(NumberDictionary dictionary) {
if (dictionary->requires_slow_elements()) return;
dictionary->set_requires_slow_elements();
if (map()->is_prototype_map()) {
@@ -6648,7 +6837,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
{
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
if (is_sloppy_arguments) {
elements = SloppyArgumentsElements::cast(elements)->arguments();
@@ -6701,16 +6890,14 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
namespace {
-Object* SetHashAndUpdateProperties(Isolate* isolate, HeapObject* properties,
- int hash) {
+Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
DCHECK(PropertyArray::HashField::is_valid(hash));
- Heap* heap = isolate->heap();
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots = properties->GetReadOnlyRoots();
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
- properties == heap->empty_property_dictionary()) {
+ properties == roots.empty_property_dictionary()) {
return Smi::FromInt(hash);
}
@@ -6730,9 +6917,9 @@ Object* SetHashAndUpdateProperties(Isolate* isolate, HeapObject* properties,
return properties;
}
-int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
+int GetIdentityHashHelper(JSReceiver object) {
DisallowHeapAllocation no_gc;
- Object* properties = object->raw_properties_or_hash();
+ Object properties = object->raw_properties_or_hash();
if (properties->IsSmi()) {
return Smi::ToInt(properties);
}
@@ -6750,11 +6937,9 @@ int GetIdentityHashHelper(Isolate* isolate, JSReceiver* object) {
}
#ifdef DEBUG
- FixedArray* empty_fixed_array = ReadOnlyRoots(isolate).empty_fixed_array();
- FixedArray* empty_property_dictionary =
- isolate->heap()->empty_property_dictionary();
- DCHECK(properties == empty_fixed_array ||
- properties == empty_property_dictionary);
+ ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ DCHECK(properties == roots.empty_fixed_array() ||
+ properties == roots.empty_property_dictionary());
#endif
return PropertyArray::kNoHashSentinel;
@@ -6766,43 +6951,41 @@ void JSReceiver::SetIdentityHash(int hash) {
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
DCHECK(PropertyArray::HashField::is_valid(hash));
- HeapObject* existing_properties = HeapObject::cast(raw_properties_or_hash());
- Object* new_properties =
- SetHashAndUpdateProperties(GetIsolate(), existing_properties, hash);
+ HeapObject existing_properties = HeapObject::cast(raw_properties_or_hash());
+ Object new_properties = SetHashAndUpdateProperties(existing_properties, hash);
set_raw_properties_or_hash(new_properties);
}
-void JSReceiver::SetProperties(HeapObject* properties) {
+void JSReceiver::SetProperties(HeapObject properties) {
DCHECK_IMPLIES(properties->IsPropertyArray() &&
PropertyArray::cast(properties)->length() == 0,
properties == GetReadOnlyRoots().empty_property_array());
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- int hash = GetIdentityHashHelper(isolate, this);
- Object* new_properties = properties;
+ int hash = GetIdentityHashHelper(*this);
+ Object new_properties = properties;
// TODO(cbruni): Make GetIdentityHashHelper return a bool so that we
// don't have to manually compare against kNoHashSentinel.
if (hash != PropertyArray::kNoHashSentinel) {
- new_properties = SetHashAndUpdateProperties(isolate, properties, hash);
+ new_properties = SetHashAndUpdateProperties(properties, hash);
}
set_raw_properties_or_hash(new_properties);
}
-Object* JSReceiver::GetIdentityHash(Isolate* isolate) {
+Object JSReceiver::GetIdentityHash() {
DisallowHeapAllocation no_gc;
- int hash = GetIdentityHashHelper(isolate, this);
+ int hash = GetIdentityHashHelper(*this);
if (hash == PropertyArray::kNoHashSentinel) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return GetReadOnlyRoots().undefined_value();
}
return Smi::FromInt(hash);
}
// static
-Smi* JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver* key) {
+Smi JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver key) {
DisallowHeapAllocation no_gc;
int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
@@ -6811,15 +6994,15 @@ Smi* JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver* key) {
return Smi::FromInt(hash);
}
-Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
+Smi JSReceiver::GetOrCreateIdentityHash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
- Object* hash_obj = GetIdentityHash(isolate);
- if (!hash_obj->IsUndefined(isolate)) {
- return Smi::cast(hash_obj);
+ int hash = GetIdentityHashHelper(*this);
+ if (hash != PropertyArray::kNoHashSentinel) {
+ return Smi::FromInt(hash);
}
- return JSReceiver::CreateIdentityHash(isolate, this);
+ return JSReceiver::CreateIdentityHash(isolate, *this);
}
Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
@@ -6985,9 +7168,9 @@ Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
// ES6 19.1.2.4
// static
-Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
- Handle<Object> key,
- Handle<Object> attributes) {
+Object JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> attributes) {
// 1. If Type(O) is not Object, throw a TypeError exception.
if (!object->IsJSReceiver()) {
Handle<String> fun_name =
@@ -6997,7 +7180,8 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
}
// 2. Let key be ToPropertyKey(P).
// 3. ReturnIfAbrupt(key).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key, ToPropertyKey(isolate, key));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToPropertyKey(isolate, key));
// 4. Let desc be ToPropertyDescriptor(Attributes).
// 5. ReturnIfAbrupt(desc).
PropertyDescriptor desc;
@@ -7014,7 +7198,6 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
return *object;
}
-
// ES6 19.1.2.3.1
// static
MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
@@ -7732,7 +7915,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
ShouldThrow should_throw) {
STACK_CHECK(isolate, Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
- DCHECK(!Handle<Symbol>::cast(key)->IsPrivateField());
+ DCHECK(!Handle<Symbol>::cast(key)->IsPrivateName());
return JSProxy::SetPrivateSymbol(isolate, proxy, Handle<Symbol>::cast(key),
desc, should_throw);
}
@@ -7842,7 +8025,7 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
ShouldThrow should_throw) {
- DCHECK(!private_name->IsPrivateField());
+ DCHECK(!private_name->IsPrivateName());
// Despite the generic name, this can only add private data properties.
if (!PropertyDescriptor::IsDataDescriptor(desc) ||
desc->ToAttributes() != DONT_ENUM) {
@@ -8196,14 +8379,14 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
namespace {
template <typename Dictionary>
-bool TestDictionaryPropertiesIntegrityLevel(Dictionary* dict,
+bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
ReadOnlyRoots roots,
PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
uint32_t capacity = dict->Capacity();
for (uint32_t i = 0; i < capacity; i++) {
- Object* key;
+ Object key;
if (!dict->ToKey(roots, i, &key)) continue;
if (key->FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dict->DetailsAt(i);
@@ -8215,12 +8398,12 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary* dict,
return true;
}
-bool TestFastPropertiesIntegrityLevel(Map* map, PropertyAttributes level) {
+bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
DCHECK(!map->IsCustomElementsReceiverMap());
DCHECK(!map->is_dictionary_map());
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descriptors->GetKey(i)->IsPrivate()) continue;
@@ -8233,7 +8416,7 @@ bool TestFastPropertiesIntegrityLevel(Map* map, PropertyAttributes level) {
return true;
}
-bool TestPropertiesIntegrityLevel(JSObject* object, PropertyAttributes level) {
+bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
DCHECK(!object->map()->IsCustomElementsReceiverMap());
if (object->HasFastProperties()) {
@@ -8244,7 +8427,7 @@ bool TestPropertiesIntegrityLevel(JSObject* object, PropertyAttributes level) {
object->property_dictionary(), object->GetReadOnlyRoots(), level);
}
-bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
+bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
DCHECK(!object->HasSloppyArgumentsElements());
ElementsKind kind = object->GetElementsKind();
@@ -8254,6 +8437,9 @@ bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
level);
}
+ if (IsFixedTypedArrayElementsKind(kind)) {
+ return TestPropertiesIntegrityLevel(object, level);
+ }
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
// Only DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS have
@@ -8261,7 +8447,7 @@ bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
return accessor->NumberOfElements(object) == 0;
}
-bool FastTestIntegrityLevel(JSObject* object, PropertyAttributes level) {
+bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
DCHECK(!object->map()->IsCustomElementsReceiverMap());
return !object->map()->is_extensible() &&
@@ -8507,14 +8693,14 @@ void ApplyAttributesToDictionary(Isolate* isolate, ReadOnlyRoots roots,
const PropertyAttributes attributes) {
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!dictionary->ToKey(roots, i, &k)) continue;
if (k->FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
- Object* v = dictionary->ValueAt(i);
+ Object v = dictionary->ValueAt(i);
if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
}
details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
@@ -8555,7 +8741,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (object->map()->has_named_interceptor() ||
object->map()->has_indexed_interceptor()) {
- MessageTemplate::Template message = MessageTemplate::kNone;
+ MessageTemplate message = MessageTemplate::kNone;
switch (attrs) {
case NONE:
message = MessageTemplate::kCannotPreventExt;
@@ -8596,8 +8782,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<Map> old_map(object->map(), isolate);
TransitionsAccessor transitions(isolate, old_map);
- Map* transition = transitions.SearchSpecial(*transition_marker);
- if (transition != nullptr) {
+ Map transition = transitions.SearchSpecial(*transition_marker);
+ if (!transition.is_null()) {
Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements() ||
transition_map->has_fixed_typed_array_elements() ||
@@ -8695,7 +8881,8 @@ MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
Handle<Object> exotic_to_prim;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, exotic_to_prim,
- GetMethod(receiver, isolate->factory()->to_primitive_symbol()), Object);
+ Object::GetMethod(receiver, isolate->factory()->to_primitive_symbol()),
+ Object);
if (!exotic_to_prim->IsUndefined(isolate)) {
Handle<Object> hint_string =
isolate->factory()->ToPrimitiveHintString(hint);
@@ -8752,7 +8939,7 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
bool JSObject::HasEnumerableElements() {
// TODO(cbruni): cleanup
- JSObject* object = this;
+ JSObject object = *this;
switch (object->GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
@@ -8764,7 +8951,7 @@ bool JSObject::HasEnumerableElements() {
}
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(object->elements());
+ FixedArray elements = FixedArray::cast(object->elements());
int length = object->IsJSArray()
? Smi::ToInt(JSArray::cast(object)->length())
: elements->length();
@@ -8781,7 +8968,7 @@ bool JSObject::HasEnumerableElements() {
// Zero-length arrays would use the empty FixedArray...
if (length == 0) return false;
// ...so only cast to FixedDoubleArray otherwise.
- FixedDoubleArray* elements = FixedDoubleArray::cast(object->elements());
+ FixedDoubleArray elements = FixedDoubleArray::cast(object->elements());
for (int i = 0; i < length; i++) {
if (!elements->is_the_hole(i)) return true;
}
@@ -8796,7 +8983,7 @@ bool JSObject::HasEnumerableElements() {
return length > 0;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* elements = NumberDictionary::cast(object->elements());
+ NumberDictionary elements = NumberDictionary::cast(object->elements());
return elements->NumberOfEnumerableProperties() > 0;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -8817,7 +9004,7 @@ bool JSObject::HasEnumerableElements() {
int Map::NumberOfEnumerableProperties() const {
int result = 0;
- DescriptorArray* descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
@@ -8831,7 +9018,7 @@ int Map::NumberOfEnumerableProperties() const {
int Map::NextFreePropertyIndex() const {
int free_index = 0;
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray* descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
@@ -8967,7 +9154,7 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, JSReceiver::GetPropertyOrElement(isolate, object, key),
+ isolate, value, Object::GetPropertyOrElement(isolate, object, key),
MaybeHandle<FixedArray>());
if (get_entries) {
@@ -9017,12 +9204,13 @@ bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
return false;
}
- for (PrototypeIterator iter(isolate, this); !iter.IsAtEnd(); iter.Advance()) {
+ for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
+ iter.Advance()) {
// Be conservative, don't walk into proxies.
if (iter.GetCurrent()->IsJSProxy()) return true;
// String wrappers have non-configurable, non-writable elements.
if (iter.GetCurrent()->IsStringWrapper()) return true;
- JSObject* current = iter.GetCurrent<JSObject>();
+ JSObject current = iter.GetCurrent<JSObject>();
if (current->HasDictionaryElements() &&
current->element_dictionary()->requires_slow_elements()) {
@@ -9030,8 +9218,8 @@ bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
}
if (current->HasSlowArgumentsElements()) {
- FixedArray* parameter_map = FixedArray::cast(current->elements());
- Object* arguments = parameter_map->get(1);
+ FixedArray parameter_map = FixedArray::cast(current->elements());
+ Object arguments = parameter_map->get(1);
if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
return true;
}
@@ -9128,10 +9316,10 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
return object;
}
-Object* JSObject::SlowReverseLookup(Object* value) {
+Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray* descs = map()->instance_descriptors();
+ DescriptorArray descs = map()->instance_descriptors();
bool value_is_number = value->IsNumber();
for (int i = 0; i < number_of_own_descriptors; i++) {
PropertyDetails details = descs->GetDetails(i);
@@ -9146,7 +9334,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
} else {
- Object* property = RawFastPropertyAt(field_index);
+ Object property = RawFastPropertyAt(field_index);
if (field_index.is_double()) {
DCHECK(property->IsMutableHeapNumber());
if (value_is_number && property->Number() == value->Number()) {
@@ -9167,7 +9355,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
return GetReadOnlyRoots().undefined_value();
} else if (IsJSGlobalObject()) {
- return JSGlobalObject::cast(this)->global_dictionary()->SlowReverseLookup(
+ return JSGlobalObject::cast(*this)->global_dictionary()->SlowReverseLookup(
value);
} else {
return property_dictionary()->SlowReverseLookup(value);
@@ -9228,18 +9416,21 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
DCHECK_EQ(fresh->raw_transitions(),
MaybeObject::FromObject(Smi::kZero));
STATIC_ASSERT(kDescriptorsOffset ==
- kTransitionsOrPrototypeInfoOffset + kPointerSize);
- DCHECK_EQ(0, memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
- HeapObject::RawField(*new_map, kDescriptorsOffset),
- kDependentCodeOffset - kDescriptorsOffset));
+ kTransitionsOrPrototypeInfoOffset + kTaggedSize);
+ DCHECK_EQ(
+ 0,
+ memcmp(
+ HeapObject::RawField(*fresh, kDescriptorsOffset).ToVoidPtr(),
+ HeapObject::RawField(*new_map, kDescriptorsOffset).ToVoidPtr(),
+ kDependentCodeOffset - kDescriptorsOffset));
} else {
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
reinterpret_cast<void*>(new_map->address()),
Map::kDependentCodeOffset));
}
STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
- Map::kDependentCodeOffset + kPointerSize);
- int offset = Map::kPrototypeValidityCellOffset + kPointerSize;
+ Map::kDependentCodeOffset + kTaggedSize);
+ int offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
Map::kSize - offset));
@@ -9263,7 +9454,7 @@ Handle<Map> Map::CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode) {
int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= map->GetInObjectProperties() * kPointerSize;
+ new_instance_size -= map->GetInObjectProperties() * kTaggedSize;
}
Handle<Map> result = RawCopy(
@@ -9302,7 +9493,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Strict function maps have Function as a constructor but the
// Function's initial map is a sloppy function map. Same holds for
// GeneratorFunction / AsyncFunction and its initial map.
- Object* constructor = map->GetConstructor();
+ Object constructor = map->GetConstructor();
DCHECK(constructor->IsJSFunction());
DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
*map == *isolate->strict_function_map() ||
@@ -9345,9 +9536,9 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors > 0) {
// The copy will use the same descriptors array.
- result->UpdateDescriptors(map->instance_descriptors(),
- map->GetLayoutDescriptor());
- result->SetNumberOfOwnDescriptors(number_of_own_descriptors);
+ result->UpdateDescriptors(isolate, map->instance_descriptors(),
+ map->GetLayoutDescriptor(),
+ number_of_own_descriptors);
DCHECK_EQ(result->NumberOfFields(),
result->GetInObjectProperties() - result->UnusedPropertyFields());
@@ -9387,7 +9578,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
}
// Ensure there's space for the new descriptor in the shared descriptor array.
- if (descriptors->NumberOfSlackDescriptors() == 0) {
+ if (descriptors->number_of_slack_descriptors() == 0) {
int old_size = descriptors->number_of_descriptors();
if (old_size == 0) {
descriptors = DescriptorArray::Allocate(isolate, 0, 1);
@@ -9407,7 +9598,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
{
DisallowHeapAllocation no_gc;
descriptors->Append(descriptor);
- result->InitializeDescriptors(*descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
}
DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
@@ -9473,17 +9664,17 @@ Handle<Map> Map::CopyReplaceDescriptors(
if (!map->is_prototype_map()) {
if (flag == INSERT_TRANSITION &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
- result->InitializeDescriptors(*descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
DCHECK(!maybe_name.is_null());
ConnectTransition(isolate, map, result, name, simple_flag);
} else {
descriptors->GeneralizeAllFields();
- result->InitializeDescriptors(*descriptors,
+ result->InitializeDescriptors(isolate, *descriptors,
LayoutDescriptor::FastPointerLayout());
}
} else {
- result->InitializeDescriptors(*descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
}
if (FLAG_trace_maps &&
// Mirror conditions above that did not call ConnectTransition().
@@ -9491,7 +9682,7 @@ Handle<Map> Map::CopyReplaceDescriptors(
!(flag == INSERT_TRANSITION &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()))) {
LOG(isolate, MapEvent("ReplaceDescriptors", *map, *result, reason,
- maybe_name.is_null() ? nullptr : *name));
+ maybe_name.is_null() ? Name() : *name));
}
return result;
}
@@ -9520,7 +9711,8 @@ Handle<Map> Map::AddMissingTransitions(
// the flag and clear it right before the descriptors are installed. This
// makes heap verification happy and ensures the flag ends up accurate.
Handle<Map> last_map = CopyDropDescriptors(isolate, split_map);
- last_map->InitializeDescriptors(*descriptors, *full_layout_descriptor);
+ last_map->InitializeDescriptors(isolate, *descriptors,
+ *full_layout_descriptor);
last_map->SetInObjectUnusedPropertyFields(0);
last_map->set_may_have_interesting_symbols(true);
@@ -9534,6 +9726,7 @@ Handle<Map> Map::AddMissingTransitions(
Handle<Map> new_map = CopyDropDescriptors(isolate, map);
InstallDescriptors(isolate, map, new_map, i, descriptors,
full_layout_descriptor);
+
map = new_map;
}
map->NotifyLeafMapLayoutChange(isolate);
@@ -9552,8 +9745,7 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
- child->set_instance_descriptors(*descriptors);
- child->SetNumberOfOwnDescriptors(new_descriptor + 1);
+ child->SetInstanceDescriptors(isolate, *descriptors, new_descriptor + 1);
child->CopyUnusedPropertyFields(*parent);
PropertyDetails details = descriptors->GetDetails(new_descriptor);
if (details.location() == kField) {
@@ -9592,14 +9784,14 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
!map->CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(kind) || IsTerminalElementsKind(kind));
- Map* maybe_elements_transition_map = nullptr;
+ Map maybe_elements_transition_map;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(maybe_elements_transition_map == nullptr ||
+ DCHECK(maybe_elements_transition_map.is_null() ||
(maybe_elements_transition_map->elements_kind() ==
DICTIONARY_ELEMENTS &&
kind == DICTIONARY_ELEMENTS));
@@ -9611,10 +9803,10 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
bool insert_transition =
flag == INSERT_TRANSITION &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions() &&
- maybe_elements_transition_map == nullptr;
+ maybe_elements_transition_map.is_null();
if (insert_transition) {
- Handle<Map> new_map = CopyForTransition(isolate, map, "CopyAsElementsKind");
+ Handle<Map> new_map = CopyForElementsTransition(isolate, map);
new_map->set_elements_kind(kind);
Handle<Name> name = isolate->factory()->elements_transition_symbol();
@@ -9644,9 +9836,9 @@ Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
DCHECK_EQ(LanguageMode::kStrict, shared_info->language_mode());
Handle<Symbol> transition_symbol =
isolate->factory()->strict_function_transition_symbol();
- Map* maybe_transition = TransitionsAccessor(isolate, initial_map)
- .SearchSpecial(*transition_symbol);
- if (maybe_transition != nullptr) {
+ Map maybe_transition = TransitionsAccessor(isolate, initial_map)
+ .SearchSpecial(*transition_symbol);
+ if (!maybe_transition.is_null()) {
return handle(maybe_transition, isolate);
}
initial_map->NotifyLeafMapLayoutChange(isolate);
@@ -9668,8 +9860,7 @@ Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
return map;
}
-Handle<Map> Map::CopyForTransition(Isolate* isolate, Handle<Map> map,
- const char* reason) {
+Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
DCHECK(!map->is_prototype_map());
Handle<Map> new_map = CopyDropDescriptors(isolate, map);
@@ -9677,7 +9868,7 @@ Handle<Map> Map::CopyForTransition(Isolate* isolate, Handle<Map> map,
// In case the map owned its own descriptors, share the descriptors and
// transfer ownership to the new map.
// The properties did not change, so reuse descriptors.
- new_map->InitializeDescriptors(map->instance_descriptors(),
+ new_map->InitializeDescriptors(isolate, map->instance_descriptors(),
map->GetLayoutDescriptor());
} else {
// In case the map did not own its own descriptors, a split is forced by
@@ -9688,11 +9879,8 @@ Handle<Map> Map::CopyForTransition(Isolate* isolate, Handle<Map> map,
isolate, descriptors, number_of_own_descriptors);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
- new_map->InitializeDescriptors(*new_descriptors, *new_layout_descriptor);
- }
-
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("CopyForTransition", *map, *new_map, reason));
+ new_map->InitializeDescriptors(isolate, *new_descriptors,
+ *new_layout_descriptor);
}
return new_map;
}
@@ -9723,11 +9911,11 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
}
int new_instance_size =
- JSObject::kHeaderSize + kPointerSize * inobject_properties;
+ JSObject::kHeaderSize + kTaggedSize * inobject_properties;
// Adjust the map with the extra inobject properties.
copy->set_instance_size(new_instance_size);
- copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kPointerSize);
+ copy->SetInObjectPropertiesStartInWords(JSObject::kHeaderSize / kTaggedSize);
DCHECK_EQ(copy->GetInObjectProperties(), inobject_properties);
copy->SetInObjectUnusedPropertyFields(inobject_properties);
copy->set_visitor_id(Map::GetVisitorId(*copy));
@@ -9759,8 +9947,8 @@ Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
namespace {
-bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
- PropertyConstness constness, Object* value) {
+bool CanHoldValue(DescriptorArray descriptors, int descriptor,
+ PropertyConstness constness, Object value) {
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.location() == kField) {
if (details.kind() == kData) {
@@ -9839,9 +10027,9 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
// Migrate to the newest map before storing the property.
map = Update(isolate, map);
- Map* maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kData, attributes);
- if (maybe_transition != nullptr) {
+ Map maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kData, attributes);
+ if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
int descriptor = transition->LastAdded();
@@ -9962,11 +10150,11 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- Map* maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kAccessor, attributes);
- if (maybe_transition != nullptr) {
+ Map maybe_transition = TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, kAccessor, attributes);
+ if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
- DescriptorArray* descriptors = transition->instance_descriptors();
+ DescriptorArray descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
@@ -9989,7 +10177,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
}
Handle<AccessorPair> pair;
- DescriptorArray* old_descriptors = map->instance_descriptors();
+ DescriptorArray old_descriptors = map->instance_descriptors();
if (descriptor != DescriptorArray::kNotFound) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
@@ -10109,14 +10297,14 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
if (attributes != NONE) {
for (int i = 0; i < size; ++i) {
- MaybeObject* value_or_field_type = desc->GetValue(i);
- Name* key = desc->GetKey(i);
+ MaybeObject value_or_field_type = desc->GetValue(i);
+ Name key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
// Bulk attribute changes never affect private properties.
if (!key->IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
- HeapObject* heap_object;
+ HeapObject heap_object;
if (details.kind() != kAccessor ||
!(value_or_field_type->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsAccessorPair())) {
@@ -10152,10 +10340,10 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
DescriptorArray::Allocate(isolate, size, slack);
for (int i = 0; i < size; ++i) {
- Name* key = src->GetKey(i);
+ Name key = src->GetKey(i);
PropertyDetails details = src->GetDetails(i);
- DCHECK(!key->IsPrivateField());
+ DCHECK(!key->IsPrivateName());
DCHECK(details.IsEnumerable());
DCHECK_EQ(details.kind(), kData);
@@ -10166,7 +10354,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
details.field_index());
// Do not propagate the field type of normal object fields from the
// original descriptors since FieldType changes don't create new maps.
- MaybeObject* type = src->GetValue(i);
+ MaybeObject type = src->GetValue(i);
if (details.location() == PropertyLocation::kField) {
type = MaybeObject::FromObject(FieldType::Any());
}
@@ -10178,7 +10366,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
return descriptors;
}
-bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
+bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
for (int i = 0; i < nof_descriptors; i++) {
if (GetKey(i) != desc->GetKey(i) || GetValue(i) != desc->GetValue(i)) {
return false;
@@ -10244,8 +10432,8 @@ Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
bool FixedArray::ContainsSortedNumbers() {
for (int i = 1; i < length(); ++i) {
- Object* a_obj = get(i - 1);
- Object* b_obj = get(i);
+ Object a_obj = get(i - 1);
+ Object b_obj = get(i);
if (!a_obj->IsNumber() || !b_obj->IsNumber()) return false;
uint32_t a = NumberToUint32(a_obj);
@@ -10270,39 +10458,28 @@ Handle<FixedArray> FixedArray::ShrinkOrEmpty(Isolate* isolate,
void FixedArray::Shrink(Isolate* isolate, int new_length) {
DCHECK(0 < new_length && new_length <= length());
if (new_length < length()) {
- isolate->heap()->RightTrimFixedArray(this, length() - new_length);
+ isolate->heap()->RightTrimFixedArray(*this, length() - new_length);
}
}
-void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos,
- int len) const {
+void FixedArray::CopyTo(int pos, FixedArray dest, int dest_pos, int len) const {
DisallowHeapAllocation no_gc;
// Return early if len == 0 so that we don't try to read the write barrier off
// a canonical read-only empty fixed array.
if (len == 0) return;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
- dest->set(dest_pos+index, get(pos+index), mode);
- }
-}
-
-#ifdef DEBUG
-bool FixedArray::IsEqualTo(FixedArray* other) {
- if (length() != other->length()) return false;
- for (int i = 0 ; i < length(); ++i) {
- if (get(i) != other->get(i)) return false;
+ dest->set(dest_pos + index, get(pos + index), mode);
}
- return true;
}
-#endif
-void JSObject::PrototypeRegistryCompactionCallback(HeapObject* value,
+void JSObject::PrototypeRegistryCompactionCallback(HeapObject value,
int old_index,
int new_index) {
DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
- Map* map = Map::cast(value);
+ Map map = Map::cast(value);
DCHECK(map->prototype_info()->IsPrototypeInfo());
- PrototypeInfo* proto_info = PrototypeInfo::cast(map->prototype_info());
+ PrototypeInfo proto_info = PrototypeInfo::cast(map->prototype_info());
DCHECK_EQ(old_index, proto_info->registry_slot());
proto_info->set_registry_slot(new_index);
}
@@ -10373,7 +10550,8 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
Handle<ArrayList> ArrayList::EnsureSpace(Isolate* isolate,
Handle<ArrayList> array, int length) {
const bool empty = (array->length() == 0);
- auto ret = EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length);
+ Handle<FixedArray> ret =
+ EnsureSpaceInFixedArray(isolate, array, kFirstIndex + length);
if (empty) {
ret->set_map_no_write_barrier(array->GetReadOnlyRoots().array_list_map());
@@ -10426,13 +10604,15 @@ int WeakArrayList::CountLiveWeakReferences() const {
bool WeakArrayList::RemoveOne(const MaybeObjectHandle& value) {
if (length() == 0) return false;
// Optimize for the most recently added element to be removed again.
+ MaybeObject cleared_weak_ref =
+ HeapObjectReference::ClearedValue(GetIsolate());
int last_index = length() - 1;
for (int i = last_index; i >= 0; --i) {
if (Get(i) == *value) {
// Move the last element into the this slot (or no-op, if this is the
// last slot).
Set(i, Get(last_index));
- Set(last_index, HeapObjectReference::ClearedValue());
+ Set(last_index, cleared_weak_ref);
set_length(last_index);
return true;
}
@@ -10469,7 +10649,7 @@ Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
if (empty_slot != kNoEmptySlotsMarker) {
DCHECK_GE(empty_slot, kFirstIndex);
CHECK_LT(empty_slot, array->length());
- int next_empty_slot = Smi::ToInt(array->Get(empty_slot)->cast<Smi>());
+ int next_empty_slot = array->Get(empty_slot).ToSmi().value();
array->Set(empty_slot, HeapObjectReference::Weak(*value));
if (assigned_index != nullptr) *assigned_index = empty_slot;
@@ -10488,9 +10668,9 @@ Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
return array;
}
-WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
- CompactionCallback callback,
- PretenureFlag pretenure) {
+WeakArrayList PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
+ CompactionCallback callback,
+ PretenureFlag pretenure) {
if (array->length() == 0) {
return *array;
}
@@ -10507,8 +10687,8 @@ WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
// cleared weak heap objects. Count the number of live objects again.
int copy_to = kFirstIndex;
for (int i = kFirstIndex; i < array->length(); i++) {
- MaybeObject* element = array->Get(i);
- HeapObject* value;
+ MaybeObject element = array->Get(i);
+ HeapObject value;
if (element->GetHeapObjectIfWeak(&value)) {
callback(value, i, copy_to);
new_array->Set(copy_to++, element);
@@ -10525,9 +10705,8 @@ Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count) {
DCHECK_GE(match_info->length(), kLastMatchOverhead);
const int required_length = kFirstCaptureIndex + capture_count;
- Handle<FixedArray> result =
- EnsureSpaceInFixedArray(isolate, match_info, required_length);
- return Handle<RegExpMatchInfo>::cast(result);
+ return Handle<RegExpMatchInfo>::cast(
+ EnsureSpaceInFixedArray(isolate, match_info, required_length));
}
// static
@@ -10582,28 +10761,32 @@ Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
}
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
- int number_of_descriptors,
+ int nof_descriptors,
int slack,
PretenureFlag pretenure) {
- DCHECK_LE(0, number_of_descriptors);
- Factory* factory = isolate->factory();
- // Do not use DescriptorArray::cast on incomplete object.
- int size = number_of_descriptors + slack;
- if (size == 0) return factory->empty_descriptor_array();
- // Allocate the array of keys.
- Handle<WeakFixedArray> result =
- factory->NewWeakFixedArrayWithMap<DescriptorArray>(
- RootIndex::kDescriptorArrayMap, LengthFor(size), pretenure);
- result->Set(kDescriptorLengthIndex,
- MaybeObject::FromObject(Smi::FromInt(number_of_descriptors)));
- result->Set(kEnumCacheIndex, MaybeObject::FromObject(
- ReadOnlyRoots(isolate).empty_enum_cache()));
- return Handle<DescriptorArray>::cast(result);
+ return nof_descriptors + slack == 0
+ ? isolate->factory()->empty_descriptor_array()
+ : isolate->factory()->NewDescriptorArray(nof_descriptors, slack,
+ pretenure);
+}
+
+void DescriptorArray::Initialize(EnumCache enum_cache,
+ HeapObject undefined_value,
+ int nof_descriptors, int slack) {
+ DCHECK_GE(nof_descriptors, 0);
+ DCHECK_GE(slack, 0);
+ DCHECK_LE(nof_descriptors + slack, kMaxNumberOfDescriptors);
+ set_number_of_all_descriptors(nof_descriptors + slack);
+ set_number_of_descriptors(nof_descriptors);
+ set_raw_number_of_marked_descriptors(0);
+ set_filler16bits(0);
+ set_enum_cache(enum_cache);
+ MemsetTagged(GetDescriptorSlot(0), undefined_value,
+ number_of_all_descriptors() * kEntrySize);
}
void DescriptorArray::ClearEnumCache() {
- set(kEnumCacheIndex,
- MaybeObject::FromObject(GetReadOnlyRoots().empty_enum_cache()));
+ set_enum_cache(GetReadOnlyRoots().empty_enum_cache());
}
void DescriptorArray::Replace(int index, Descriptor* descriptor) {
@@ -10612,20 +10795,20 @@ void DescriptorArray::Replace(int index, Descriptor* descriptor) {
}
// static
-void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
- Isolate* isolate, Handle<FixedArray> keys,
- Handle<FixedArray> indices) {
- EnumCache* enum_cache = descriptors->GetEnumCache();
+void DescriptorArray::InitializeOrChangeEnumCache(
+ Handle<DescriptorArray> descriptors, Isolate* isolate,
+ Handle<FixedArray> keys, Handle<FixedArray> indices) {
+ EnumCache enum_cache = descriptors->enum_cache();
if (enum_cache == ReadOnlyRoots(isolate).empty_enum_cache()) {
enum_cache = *isolate->factory()->NewEnumCache(keys, indices);
- descriptors->set(kEnumCacheIndex, MaybeObject::FromObject(enum_cache));
+ descriptors->set_enum_cache(enum_cache);
} else {
enum_cache->set_keys(*keys);
enum_cache->set_indices(*indices);
}
}
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src) {
+void DescriptorArray::CopyFrom(int index, DescriptorArray src) {
PropertyDetails details = src->GetDetails(index);
Set(index, src->GetKey(index), src->GetValue(index), details);
}
@@ -10684,6 +10867,28 @@ void DescriptorArray::Sort() {
DCHECK(IsSortedNoDuplicates());
}
+int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
+ unsigned mark_compact_epoch, int16_t new_marked) {
+ STATIC_ASSERT(kMaxNumberOfDescriptors <=
+ NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
+ int16_t old_raw_marked = raw_number_of_marked_descriptors();
+ int16_t old_marked =
+ NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
+ int16_t new_raw_marked =
+ NumberOfMarkedDescriptors::encode(mark_compact_epoch, new_marked);
+ while (old_marked < new_marked) {
+ int16_t actual_raw_marked = CompareAndSwapRawNumberOfMarkedDescriptors(
+ old_raw_marked, new_raw_marked);
+ if (actual_raw_marked == old_raw_marked) {
+ break;
+ }
+ old_raw_marked = actual_raw_marked;
+ old_marked =
+ NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
+ }
+ return old_marked;
+}
+
Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
Handle<AccessorPair> pair) {
Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair();
@@ -10695,7 +10900,7 @@ Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
Handle<AccessorPair> accessor_pair,
AccessorComponent component) {
- Object* accessor = accessor_pair->get(component);
+ Object accessor = accessor_pair->get(component);
if (accessor->IsFunctionTemplateInfo()) {
return ApiNatives::InstantiateFunction(
handle(FunctionTemplateInfo::cast(accessor), isolate))
@@ -10719,7 +10924,7 @@ Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
isolate->factory()->empty_fixed_array());
}
-SharedFunctionInfo* DeoptimizationData::GetInlinedFunction(int index) {
+SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
if (index == -1) {
return SharedFunctionInfo::cast(SharedFunctionInfo());
} else {
@@ -10728,9 +10933,11 @@ SharedFunctionInfo* DeoptimizationData::GetInlinedFunction(int index) {
}
#ifdef DEBUG
-bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
- if (length() != other->length()) return false;
- for (int i = 0; i < length(); ++i) {
+bool DescriptorArray::IsEqualTo(DescriptorArray other) {
+ if (number_of_all_descriptors() != other->number_of_all_descriptors()) {
+ return false;
+ }
+ for (int i = 0; i < number_of_all_descriptors(); ++i) {
if (get(i) != other->get(i)) return false;
}
return true;
@@ -10745,10 +10952,8 @@ Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
// Perform left trimming if requested.
int left = 0;
- UnicodeCache* unicode_cache = isolate->unicode_cache();
if (mode == kTrim || mode == kTrimStart) {
- while (left < length &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+ while (left < length && IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
}
}
@@ -10756,9 +10961,8 @@ Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
// Perform right trimming if requested.
int right = length;
if (mode == kTrim || mode == kTrimEnd) {
- while (
- right > left &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
+ while (right > left &&
+ IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
right--;
}
}
@@ -10769,11 +10973,11 @@ Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
bool String::LooksValid() {
// TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
// basically the same logic as the way we access the heap in the first place.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
// RO_SPACE objects should always be valid.
if (chunk->owner()->identity() == RO_SPACE) return true;
if (chunk->heap() == nullptr) return false;
- return chunk->heap()->Contains(this);
+ return chunk->heap()->Contains(*this);
}
// static
@@ -10846,7 +11050,8 @@ Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
if (len == 0) return handle(Smi::kZero, isolate);
DisallowHeapAllocation no_gc;
- uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
+ uint8_t const* data =
+ Handle<SeqOneByteString>::cast(subject)->GetChars(no_gc);
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -10885,26 +11090,25 @@ Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
// Slower case.
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return isolate->factory()->NewNumber(
- StringToDouble(isolate, isolate->unicode_cache(), subject, flags));
+ return isolate->factory()->NewNumber(StringToDouble(isolate, subject, flags));
}
-
-String::FlatContent String::GetFlatContent() {
- DCHECK(!AllowHeapAllocation::IsAllowed());
+String::FlatContent String::GetFlatContent(
+ const DisallowHeapAllocation& no_gc) {
+ USE(no_gc);
int length = this->length();
- StringShape shape(this);
- String* string = this;
+ StringShape shape(*this);
+ String string = *this;
int offset = 0;
if (shape.representation_tag() == kConsStringTag) {
- ConsString* cons = ConsString::cast(string);
+ ConsString cons = ConsString::cast(string);
if (cons->second()->length() != 0) {
return FlatContent();
}
string = cons->first();
shape = StringShape(string);
} else if (shape.representation_tag() == kSlicedStringTag) {
- SlicedString* slice = SlicedString::cast(string);
+ SlicedString slice = SlicedString::cast(string);
offset = slice->offset();
string = slice->parent();
shape = StringShape(string);
@@ -10912,7 +11116,7 @@ String::FlatContent String::GetFlatContent() {
shape.representation_tag() != kSlicedStringTag);
}
if (shape.representation_tag() == kThinStringTag) {
- ThinString* thin = ThinString::cast(string);
+ ThinString thin = ThinString::cast(string);
string = thin->actual();
shape = StringShape(string);
DCHECK(!shape.IsCons());
@@ -10921,7 +11125,7 @@ String::FlatContent String::GetFlatContent() {
if (shape.encoding_tag() == kOneByteStringTag) {
const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars();
+ start = SeqOneByteString::cast(string)->GetChars(no_gc);
} else {
start = ExternalOneByteString::cast(string)->GetChars();
}
@@ -10930,7 +11134,7 @@ String::FlatContent String::GetFlatContent() {
DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string)->GetChars();
+ start = SeqTwoByteString::cast(string)->GetChars(no_gc);
} else {
start = ExternalTwoByteString::cast(string)->GetChars();
}
@@ -10949,7 +11153,7 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
if (length < 0) length = kMaxInt - offset;
// Compute the size of the UTF-8 string. Start at the specified offset.
- StringCharacterStream stream(this, offset);
+ StringCharacterStream stream(*this, offset);
int character_position = offset;
int utf8_bytes = 0;
int last = unibrow::Utf16::kNoPreviousCharacter;
@@ -10966,7 +11170,7 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
char* result = NewArray<char>(utf8_bytes + 1);
// Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- stream.Reset(this, offset);
+ stream.Reset(*this, offset);
character_position = offset;
int utf8_byte_position = 0;
last = unibrow::Utf16::kNoPreviousCharacter;
@@ -11058,7 +11262,7 @@ void FlatStringReader::PostGarbageCollection() {
DCHECK(str->IsFlat());
DisallowHeapAllocation no_gc;
// This does not actually prevent the vector from being relocated later.
- String::FlatContent content = str->GetFlatContent();
+ String::FlatContent content = str->GetFlatContent(no_gc);
DCHECK(content.IsFlat());
is_one_byte_ = content.IsOneByte();
if (is_one_byte_) {
@@ -11068,9 +11272,8 @@ void FlatStringReader::PostGarbageCollection() {
}
}
-
-void ConsStringIterator::Initialize(ConsString* cons_string, int offset) {
- DCHECK_NOT_NULL(cons_string);
+void ConsStringIterator::Initialize(ConsString cons_string, int offset) {
+ DCHECK(!cons_string.is_null());
root_ = cons_string;
consumed_ = offset;
// Force stack blown condition to trigger restart.
@@ -11079,27 +11282,25 @@ void ConsStringIterator::Initialize(ConsString* cons_string, int offset) {
DCHECK(StackBlown());
}
-
-String* ConsStringIterator::Continue(int* offset_out) {
+String ConsStringIterator::Continue(int* offset_out) {
DCHECK_NE(depth_, 0);
DCHECK_EQ(0, *offset_out);
bool blew_stack = StackBlown();
- String* string = nullptr;
+ String string;
// Get the next leaf if there is one.
if (!blew_stack) string = NextLeaf(&blew_stack);
// Restart search from root.
if (blew_stack) {
- DCHECK_NULL(string);
+ DCHECK(string.is_null());
string = Search(offset_out);
}
// Ensure future calls return null immediately.
- if (string == nullptr) Reset(nullptr);
+ if (string.is_null()) Reset(ConsString());
return string;
}
-
-String* ConsStringIterator::Search(int* offset_out) {
- ConsString* cons_string = root_;
+String ConsStringIterator::Search(int* offset_out) {
+ ConsString cons_string = root_;
// Reset the stack, pushing the root string.
depth_ = 1;
maximum_depth_ = 1;
@@ -11108,7 +11309,7 @@ String* ConsStringIterator::Search(int* offset_out) {
int offset = 0;
while (true) {
// Loop until the string is found which contains the target offset.
- String* string = cons_string->first();
+ String string = cons_string->first();
int length = string->length();
int32_t type;
if (consumed < offset + length) {
@@ -11140,8 +11341,8 @@ String* ConsStringIterator::Search(int* offset_out) {
// This happens only if we have asked for an offset outside the string.
if (length == 0) {
// Reset so future operations will return null immediately.
- Reset(nullptr);
- return nullptr;
+ Reset(ConsString());
+ return String();
}
// Tell the stack we're done descending.
AdjustMaximumDepth();
@@ -11157,22 +11358,21 @@ String* ConsStringIterator::Search(int* offset_out) {
UNREACHABLE();
}
-
-String* ConsStringIterator::NextLeaf(bool* blew_stack) {
+String ConsStringIterator::NextLeaf(bool* blew_stack) {
while (true) {
// Tree traversal complete.
if (depth_ == 0) {
*blew_stack = false;
- return nullptr;
+ return String();
}
// We've lost track of higher nodes.
if (StackBlown()) {
*blew_stack = true;
- return nullptr;
+ return String();
}
// Go right.
- ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
- String* string = cons_string->second();
+ ConsString cons_string = frames_[OffsetForDepth(depth_ - 1)];
+ String string = cons_string->second();
int32_t type = string->map()->instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
@@ -11204,22 +11404,21 @@ String* ConsStringIterator::NextLeaf(bool* blew_stack) {
UNREACHABLE();
}
-
uint16_t ConsString::ConsStringGet(int index) {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
if (second()->length() == 0) {
- String* left = first();
+ String left = first();
return left->Get(index);
}
- String* string = String::cast(this);
+ String string = String::cast(*this);
while (true) {
if (StringShape(string).IsCons()) {
- ConsString* cons_string = ConsString::cast(string);
- String* left = cons_string->first();
+ ConsString cons_string = ConsString::cast(string);
+ String left = cons_string->first();
if (left->length() > index) {
string = left;
} else {
@@ -11240,13 +11439,10 @@ uint16_t SlicedString::SlicedStringGet(int index) {
return parent()->Get(offset() + index);
}
-
template <typename sinkchar>
-void String::WriteToFlat(String* src,
- sinkchar* sink,
- int f,
- int t) {
- String* source = src;
+void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
+ DisallowHeapAllocation no_gc;
+ String source = src;
int from = f;
int to = t;
while (true) {
@@ -11266,21 +11462,19 @@ void String::WriteToFlat(String* src,
return;
}
case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqOneByteString::cast(source)->GetChars() + from,
+ CopyChars(sink, SeqOneByteString::cast(source)->GetChars(no_gc) + from,
to - from);
return;
}
case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink,
- SeqTwoByteString::cast(source)->GetChars() + from,
+ CopyChars(sink, SeqTwoByteString::cast(source)->GetChars(no_gc) + from,
to - from);
return;
}
case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
- ConsString* cons_string = ConsString::cast(source);
- String* first = cons_string->first();
+ ConsString cons_string = ConsString::cast(source);
+ String first = cons_string->first();
int boundary = first->length();
if (to - boundary >= boundary - from) {
// Right hand side is longer. Recurse over left.
@@ -11300,7 +11494,7 @@ void String::WriteToFlat(String* src,
} else {
// Left hand side is longer. Recurse over right.
if (to > boundary) {
- String* second = cons_string->second();
+ String second = cons_string->second();
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
// common case of sequential one-byte right child.
@@ -11308,7 +11502,7 @@ void String::WriteToFlat(String* src,
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
} else if (second->IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(),
+ SeqOneByteString::cast(second)->GetChars(no_gc),
to - boundary);
} else {
WriteToFlat(second,
@@ -11324,7 +11518,7 @@ void String::WriteToFlat(String* src,
}
case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
- SlicedString* slice = SlicedString::cast(source);
+ SlicedString slice = SlicedString::cast(source);
unsigned offset = slice->offset();
WriteToFlat(slice->parent(), sink, from + offset, to + offset);
return;
@@ -11342,14 +11536,13 @@ static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
Vector<const SourceChar> src,
bool include_ending_line) {
const int src_len = src.length();
- UnicodeCache* cache = isolate->unicode_cache();
for (int i = 0; i < src_len - 1; i++) {
SourceChar current = src[i];
SourceChar next = src[i + 1];
- if (cache->IsLineTerminatorSequence(current, next)) line_ends->push_back(i);
+ if (IsLineTerminatorSequence(current, next)) line_ends->push_back(i);
}
- if (src_len > 0 && cache->IsLineTerminatorSequence(src[src_len - 1], 0)) {
+ if (src_len > 0 && IsLineTerminatorSequence(src[src_len - 1], 0)) {
line_ends->push_back(src_len - 1);
}
if (include_ending_line) {
@@ -11370,7 +11563,7 @@ Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
line_ends.reserve(line_count_estimate);
{ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
- String::FlatContent content = src->GetFlatContent();
+ String::FlatContent content = src->GetFlatContent(no_allocation);
DCHECK(content.IsFlat());
if (content.IsOneByte()) {
CalculateLineEndsImpl(isolate,
@@ -11392,6 +11585,115 @@ Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
return array;
}
+namespace {
+
+template <typename sinkchar>
+void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
+ sinkchar* sink, int sink_length) {
+ DisallowHeapAllocation no_allocation;
+ CHECK_GT(length, 0);
+ CHECK_LE(length, fixed_array->length());
+#ifdef DEBUG
+ sinkchar* sink_end = sink + sink_length;
+#endif
+
+ const int separator_length = separator->length();
+ const bool use_one_byte_separator_fast_path =
+ separator_length == 1 && sizeof(sinkchar) == 1 &&
+ StringShape(separator).IsSequentialOneByte();
+ uint8_t separator_one_char;
+ if (use_one_byte_separator_fast_path) {
+ CHECK(StringShape(separator).IsSequentialOneByte());
+ CHECK_EQ(separator->length(), 1);
+ separator_one_char =
+ SeqOneByteString::cast(separator)->GetChars(no_allocation)[0];
+ }
+
+ uint32_t num_separators = 0;
+ for (int i = 0; i < length; i++) {
+ Object element = fixed_array->get(i);
+ const bool element_is_separator_sequence = element->IsSmi();
+
+ // If element is a Smi, it represents the number of separators to write.
+ if (V8_UNLIKELY(element_is_separator_sequence)) {
+ CHECK(element->ToUint32(&num_separators));
+ // Verify that Smis (number of separators) only occur when necessary:
+ // 1) at the beginning
+ // 2) at the end
+ // 3) when the number of separators > 1
+ // - It is assumed that consecutive Strings will have one separator,
+ // so there is no need for a Smi.
+ DCHECK(i == 0 || i == length - 1 || num_separators > 1);
+ }
+
+ // Write separator(s) if necessary.
+ if (num_separators > 0 && separator_length > 0) {
+ // TODO(pwong): Consider doubling strategy employed by runtime-strings.cc
+ // WriteRepeatToFlat().
+ // Fast path for single character, single byte separators.
+ if (use_one_byte_separator_fast_path) {
+ DCHECK_LE(sink + num_separators, sink_end);
+ memset(sink, separator_one_char, num_separators);
+ DCHECK_EQ(separator_length, 1);
+ sink += num_separators;
+ } else {
+ for (uint32_t j = 0; j < num_separators; j++) {
+ DCHECK_LE(sink + separator_length, sink_end);
+ String::WriteToFlat(separator, sink, 0, separator_length);
+ sink += separator_length;
+ }
+ }
+ }
+
+ if (V8_UNLIKELY(element_is_separator_sequence)) {
+ num_separators = 0;
+ } else {
+ DCHECK(element->IsString());
+ String string = String::cast(element);
+ const int string_length = string->length();
+
+ DCHECK(string_length == 0 || sink < sink_end);
+ String::WriteToFlat(string, sink, 0, string_length);
+ sink += string_length;
+
+ // Next string element, needs at least one separator preceding it.
+ num_separators = 1;
+ }
+ }
+
+ // Verify we have written to the end of the sink.
+ DCHECK_EQ(sink, sink_end);
+}
+
+} // namespace
+
+// static
+Address JSArray::ArrayJoinConcatToSequentialString(Isolate* isolate,
+ Address raw_fixed_array,
+ intptr_t length,
+ Address raw_separator,
+ Address raw_dest) {
+ DisallowHeapAllocation no_allocation;
+ DisallowJavascriptExecution no_js(isolate);
+ FixedArray fixed_array = FixedArray::cast(Object(raw_fixed_array));
+ String separator = String::cast(Object(raw_separator));
+ String dest = String::cast(Object(raw_dest));
+ DCHECK(fixed_array->IsFixedArray());
+ DCHECK(StringShape(dest).IsSequentialOneByte() ||
+ StringShape(dest).IsSequentialTwoByte());
+
+ if (StringShape(dest).IsSequentialOneByte()) {
+ WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
+ SeqOneByteString::cast(dest)->GetChars(no_allocation),
+ dest->length());
+ } else {
+ DCHECK(StringShape(dest).IsSequentialTwoByte());
+ WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
+ SeqTwoByteString::cast(dest)->GetChars(no_allocation),
+ dest->length());
+ }
+ return dest->ptr();
+}
// Compares the contents of two strings by reading and comparing
// int-sized blocks of characters.
@@ -11441,10 +11743,10 @@ class StringComparator {
public:
State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
- void Init(String* string) {
- ConsString* cons_string = String::VisitFlat(this, string);
+ void Init(String string) {
+ ConsString cons_string = String::VisitFlat(this, string);
iter_.Reset(cons_string);
- if (cons_string != nullptr) {
+ if (!cons_string.is_null()) {
int offset;
string = iter_.Next(&offset);
String::VisitFlat(this, string, offset);
@@ -11477,9 +11779,9 @@ class StringComparator {
}
// Advance state.
int offset;
- String* next = iter_.Next(&offset);
+ String next = iter_.Next(&offset);
DCHECK_EQ(0, offset);
- DCHECK_NOT_NULL(next);
+ DCHECK(!next.is_null());
String::VisitFlat(this, next);
}
@@ -11505,7 +11807,7 @@ class StringComparator {
return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
}
- bool Equals(String* string_1, String* string_2) {
+ bool Equals(String string_1, String string_2) {
int length = string_1->length();
state_1_.Init(string_1);
state_2_.Init(string_2);
@@ -11543,8 +11845,7 @@ class StringComparator {
DISALLOW_COPY_AND_ASSIGN(StringComparator);
};
-
-bool String::SlowEquals(String* other) {
+bool String::SlowEquals(String other) {
DisallowHeapAllocation no_gc;
// Fast check: negative check with lengths.
int len = length();
@@ -11556,7 +11857,7 @@ bool String::SlowEquals(String* other) {
if (this->IsThinString() || other->IsThinString()) {
if (other->IsThinString()) other = ThinString::cast(other)->actual();
if (this->IsThinString()) {
- return ThinString::cast(this)->actual()->Equals(other);
+ return ThinString::cast(*this)->actual()->Equals(other);
} else {
return this->Equals(other);
}
@@ -11587,13 +11888,13 @@ bool String::SlowEquals(String* other) {
if (this->Get(0) != other->Get(0)) return false;
if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
- const uint8_t* str1 = SeqOneByteString::cast(this)->GetChars();
- const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars();
+ const uint8_t* str1 = SeqOneByteString::cast(*this)->GetChars(no_gc);
+ const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(no_gc);
return CompareRawStringContents(str1, str2, len);
}
StringComparator comparator;
- return comparator.Equals(this, other);
+ return comparator.Equals(*this, other);
}
bool String::SlowEquals(Isolate* isolate, Handle<String> one,
@@ -11641,8 +11942,8 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
two = String::Flatten(isolate, two);
DisallowHeapAllocation no_gc;
- String::FlatContent flat1 = one->GetFlatContent();
- String::FlatContent flat2 = two->GetFlatContent();
+ String::FlatContent flat1 = one->GetFlatContent(no_gc);
+ String::FlatContent flat2 = two->GetFlatContent(no_gc);
if (flat1.IsOneByte() && flat2.IsOneByte()) {
return CompareRawStringContents(flat1.ToOneByteVector().start(),
@@ -11691,8 +11992,8 @@ ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
result = ComparisonResult::kLessThan;
}
int r;
- String::FlatContent x_content = x->GetFlatContent();
- String::FlatContent y_content = y->GetFlatContent();
+ String::FlatContent x_content = x->GetFlatContent(no_gc);
+ String::FlatContent y_content = y->GetFlatContent(no_gc);
if (x_content.IsOneByte()) {
Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
if (y_content.IsOneByte()) {
@@ -11720,8 +12021,8 @@ ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
return result;
}
-Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position) {
+Object String::IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
@@ -11775,8 +12076,8 @@ int String::IndexOf(Isolate* isolate, Handle<String> receiver,
DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before getting encoding.
- String::FlatContent receiver_content = receiver->GetFlatContent();
- String::FlatContent search_content = search->GetFlatContent();
+ String::FlatContent receiver_content = receiver->GetFlatContent(no_gc);
+ String::FlatContent search_content = search->GetFlatContent(no_gc);
// dispatch on type of strings
if (search_content.IsOneByte()) {
@@ -11991,8 +12292,8 @@ int StringMatchBackwards(Vector<const schar> subject,
} // namespace
-Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position) {
+Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
@@ -12037,8 +12338,8 @@ Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
int last_index = -1;
DisallowHeapAllocation no_gc; // ensure vectors stay valid
- String::FlatContent receiver_content = receiver_string->GetFlatContent();
- String::FlatContent search_content = search_string->GetFlatContent();
+ String::FlatContent receiver_content = receiver_string->GetFlatContent(no_gc);
+ String::FlatContent search_content = search_string->GetFlatContent(no_gc);
if (search_content.IsOneByte()) {
Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
@@ -12096,15 +12397,12 @@ bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
DisallowHeapAllocation no_gc;
- FlatContent content = GetFlatContent();
+ FlatContent content = GetFlatContent(no_gc);
if (content.IsOneByte()) {
return CompareChars(content.ToOneByteVector().start(),
str.start(), slen) == 0;
}
- for (int i = 0; i < slen; i++) {
- if (Get(i) != static_cast<uint16_t>(str[i])) return false;
- }
- return true;
+ return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
}
@@ -12112,23 +12410,22 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
DisallowHeapAllocation no_gc;
- FlatContent content = GetFlatContent();
- if (content.IsTwoByte()) {
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
- }
- for (int i = 0; i < slen; i++) {
- if (Get(i) != str[i]) return false;
+ FlatContent content = GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ 0;
}
- return true;
+ return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
}
uint32_t String::ComputeAndSetHash(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
// Should only be called if hash code has not yet been computed.
DCHECK(!HasHashCode());
// Store the hash code in the object.
uint32_t field =
- IteratingStringHasher::Hash(this, isolate->heap()->HashSeed());
+ IteratingStringHasher::Hash(*this, isolate->heap()->HashSeed());
set_hash_field(field);
// Check the hash code is there.
@@ -12142,12 +12439,13 @@ uint32_t String::ComputeAndSetHash(Isolate* isolate) {
bool String::ComputeArrayIndex(uint32_t* index) {
int length = this->length();
if (length == 0 || length > kMaxArrayIndexSize) return false;
- StringCharacterStream stream(this);
+ StringCharacterStream stream(*this);
return StringToArrayIndex(&stream, index);
}
bool String::SlowAsArrayIndex(uint32_t* index) {
+ DisallowHeapAllocation no_gc;
if (length() <= kMaxCachedArrayIndexLength) {
Hash(); // force computation of hash code
uint32_t field = hash_field();
@@ -12283,14 +12581,13 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars, uint64_t seed,
return hasher.GetHashField();
}
-
-void IteratingStringHasher::VisitConsString(ConsString* cons_string) {
+void IteratingStringHasher::VisitConsString(ConsString cons_string) {
// Run small ConsStrings through ConsStringIterator.
if (cons_string->length() < 64) {
ConsStringIterator iter(cons_string);
int offset;
- String* string;
- while (nullptr != (string = iter.Next(&offset))) {
+ for (String string = iter.Next(&offset); !string.is_null();
+ string = iter.Next(&offset)) {
DCHECK_EQ(0, offset);
String::VisitFlat(this, string, 0);
}
@@ -12312,7 +12609,6 @@ void IteratingStringHasher::VisitConsString(ConsString* cons_string) {
}
}
-
void String::PrintOn(FILE* file) {
int length = this->length();
for (int i = 0; i < length; i++) {
@@ -12328,12 +12624,12 @@ int Map::Hash() {
// addresses.
// Shift away the tag.
- int hash = ObjectAddressForHashing(GetConstructor()) >> 2;
+ int hash = ObjectAddressForHashing(GetConstructor().ptr()) >> 2;
// XOR-ing the prototype and constructor directly yields too many zero bits
// when the two pointers are close (which is fairly common).
// To avoid this we shift the prototype bits relatively to the constructor.
- hash ^= ObjectAddressForHashing(prototype()) << (32 - kPageSizeBits);
+ hash ^= ObjectAddressForHashing(prototype().ptr()) << (32 - kPageSizeBits);
return hash ^ (hash >> 16) ^ bit_field2();
}
@@ -12341,7 +12637,7 @@ int Map::Hash() {
namespace {
-bool CheckEquivalent(const Map* first, const Map* second) {
+bool CheckEquivalent(const Map first, const Map second) {
return first->GetConstructor() == second->GetConstructor() &&
first->prototype() == second->prototype() &&
first->instance_type() == second->instance_type() &&
@@ -12353,8 +12649,8 @@ bool CheckEquivalent(const Map* first, const Map* second) {
} // namespace
-bool Map::EquivalentToForTransition(const Map* other) const {
- if (!CheckEquivalent(this, other)) return false;
+bool Map::EquivalentToForTransition(const Map other) const {
+ if (!CheckEquivalent(*this, other)) return false;
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
@@ -12365,13 +12661,13 @@ bool Map::EquivalentToForTransition(const Map* other) const {
return true;
}
-bool Map::EquivalentToForElementsKindTransition(const Map* other) const {
+bool Map::EquivalentToForElementsKindTransition(const Map other) const {
if (!EquivalentToForTransition(other)) return false;
#ifdef DEBUG
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray* descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors();
int nof = NumberOfOwnDescriptors();
for (int i = 0; i < nof; i++) {
PropertyDetails details = descriptors->GetDetails(i);
@@ -12385,13 +12681,13 @@ bool Map::EquivalentToForElementsKindTransition(const Map* other) const {
return true;
}
-bool Map::EquivalentToForNormalization(const Map* other,
+bool Map::EquivalentToForNormalization(const Map other,
PropertyNormalizationMode mode) const {
int properties =
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
- return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
+ return CheckEquivalent(*this, other) && bit_field2() == other->bit_field2() &&
GetInObjectProperties() == properties &&
- JSObject::GetEmbedderFieldCount(this) ==
+ JSObject::GetEmbedderFieldCount(*this) ==
JSObject::GetEmbedderFieldCount(other);
}
@@ -12434,23 +12730,28 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
// static
void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
- if (function->feedback_cell()->value()->IsUndefined(isolate)) {
+ DCHECK(function->shared()->is_compiled());
+ DCHECK(FLAG_lite_mode || function->shared()->HasFeedbackMetadata());
+ if (!function->has_feedback_vector() &&
+ function->shared()->HasFeedbackMetadata()) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!shared->HasAsmWasmData()) {
+ DCHECK(function->shared()->HasBytecodeArray());
Handle<FeedbackVector> feedback_vector =
FeedbackVector::New(isolate, shared);
- if (function->feedback_cell() == isolate->heap()->many_closures_cell()) {
+ if (function->raw_feedback_cell() ==
+ isolate->heap()->many_closures_cell()) {
Handle<FeedbackCell> feedback_cell =
isolate->factory()->NewOneClosureCell(feedback_vector);
- function->set_feedback_cell(*feedback_cell);
+ function->set_raw_feedback_cell(*feedback_cell);
} else {
- function->feedback_cell()->set_value(*feedback_vector);
+ function->raw_feedback_cell()->set_value(*feedback_vector);
}
}
}
}
-static void GetMinInobjectSlack(Map* map, void* data) {
+static void GetMinInobjectSlack(Map map, void* data) {
int slack = map->UnusedPropertyFields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
@@ -12458,10 +12759,10 @@ static void GetMinInobjectSlack(Map* map, void* data) {
}
int Map::InstanceSizeFromSlack(int slack) const {
- return instance_size() - slack * kPointerSize;
+ return instance_size() - slack * kTaggedSize;
}
-static void ShrinkInstanceSize(Map* map, void* data) {
+static void ShrinkInstanceSize(Map map, void* data) {
int slack = *reinterpret_cast<int*>(data);
DCHECK_GE(slack, 0);
#ifdef DEBUG
@@ -12474,7 +12775,7 @@ static void ShrinkInstanceSize(Map* map, void* data) {
DCHECK_EQ(new_unused, map->UnusedPropertyFields());
}
-static void StopSlackTracking(Map* map, void* data) {
+static void StopSlackTracking(Map map, void* data) {
map->set_construction_counter(Map::kNoSlackTracking);
}
@@ -12484,7 +12785,7 @@ int Map::ComputeMinObjectSlack(Isolate* isolate) {
DCHECK(GetBackPointer()->IsUndefined(isolate));
int slack = UnusedPropertyFields();
- TransitionsAccessor transitions(isolate, this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
return slack;
}
@@ -12495,7 +12796,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DCHECK(GetBackPointer()->IsUndefined(isolate));
int slack = ComputeMinObjectSlack(isolate);
- TransitionsAccessor transitions(isolate, this, &no_gc);
+ TransitionsAccessor transitions(isolate, *this, &no_gc);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
@@ -12504,6 +12805,13 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
}
}
+void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ int number_of_own_descriptors) {
+ set_synchronized_instance_descriptors(descriptors);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors);
+ MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
+ number_of_own_descriptors);
+}
static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
@@ -12525,7 +12833,7 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
if (!current->IsJSObject()) return;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
- Map* current_map = current_obj->map();
+ Map current_map = current_obj->map();
if (current_map->is_prototype_map()) {
// If the map is already marked as should be fast, we're done. Its
// prototypes will have been marked already as well.
@@ -12561,12 +12869,12 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
// Replace the pointer to the exact constructor with the Object function
// from the same context if undetectable from JS. This is to avoid keeping
// memory alive unnecessarily.
- Object* maybe_constructor = object->map()->GetConstructor();
+ Object maybe_constructor = object->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
if (!constructor->shared()->IsApiFunction()) {
- Context* context = constructor->context()->native_context();
- JSFunction* object_function = context->object_function();
+ Context context = constructor->context()->native_context();
+ JSFunction object_function = context->object_function();
object->map()->SetConstructor(object_function);
}
}
@@ -12618,9 +12926,9 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
}
if (FLAG_trace_prototype_users) {
PrintF("Registering %p as a user of prototype %p (map=%p).\n",
- reinterpret_cast<void*>(*current_user),
- reinterpret_cast<void*>(*proto),
- reinterpret_cast<void*>(proto->map()));
+ reinterpret_cast<void*>(current_user->ptr()),
+ reinterpret_cast<void*>(proto->ptr()),
+ reinterpret_cast<void*>(proto->map()->ptr()));
}
current_user = handle(proto->map(), isolate);
@@ -12639,7 +12947,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
// If it had no prototype before, see if it had users that might expect
// registration.
if (!user->prototype()->IsJSObject()) {
- Object* users =
+ Object users =
PrototypeInfo::cast(user->prototype_info())->prototype_users();
return users->IsWeakArrayList();
}
@@ -12649,7 +12957,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
int slot = user_info->registry_slot();
if (slot == PrototypeInfo::UNREGISTERED) return false;
DCHECK(prototype->map()->is_prototype_map());
- Object* maybe_proto_info = prototype->map()->prototype_info();
+ Object maybe_proto_info = prototype->map()->prototype_info();
// User knows its registry slot, prototype info and user registry must exist.
DCHECK(maybe_proto_info->IsPrototypeInfo());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
@@ -12660,7 +12968,8 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
PrototypeUsers::MarkSlotEmpty(*prototype_users, slot);
if (FLAG_trace_prototype_users) {
PrintF("Unregistering %p as a user of prototype %p.\n",
- reinterpret_cast<void*>(*user), reinterpret_cast<void*>(*prototype));
+ reinterpret_cast<void*>(user->ptr()),
+ reinterpret_cast<void*>(prototype->ptr()));
}
return true;
}
@@ -12670,32 +12979,35 @@ namespace {
// This function must be kept in sync with
// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
// before jumping here.
-void InvalidateOnePrototypeValidityCellInternal(Map* map) {
+void InvalidateOnePrototypeValidityCellInternal(Map map) {
DCHECK(map->is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
- reinterpret_cast<void*>(map));
+ reinterpret_cast<void*>(map.ptr()));
}
- Object* maybe_cell = map->prototype_validity_cell();
+ Object maybe_cell = map->prototype_validity_cell();
if (maybe_cell->IsCell()) {
// Just set the value; the cell will be replaced lazily.
- Cell* cell = Cell::cast(maybe_cell);
+ Cell cell = Cell::cast(maybe_cell);
cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
}
-void InvalidatePrototypeChainsInternal(Map* map) {
+void InvalidatePrototypeChainsInternal(Map map) {
InvalidateOnePrototypeValidityCellInternal(map);
- Object* maybe_proto_info = map->prototype_info();
+ Object maybe_proto_info = map->prototype_info();
if (!maybe_proto_info->IsPrototypeInfo()) return;
- PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
- WeakArrayList* prototype_users =
+ PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
+ if (!proto_info->prototype_users()->IsWeakArrayList()) {
+ return;
+ }
+ WeakArrayList prototype_users =
WeakArrayList::cast(proto_info->prototype_users());
// For now, only maps register themselves as users.
for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
++i) {
- HeapObject* heap_object;
+ HeapObject heap_object;
if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
heap_object->IsMap()) {
// Walk the prototype chain (backwards, towards leaf objects) if
@@ -12708,7 +13020,7 @@ void InvalidatePrototypeChainsInternal(Map* map) {
} // namespace
// static
-Map* JSObject::InvalidatePrototypeChains(Map* map) {
+Map JSObject::InvalidatePrototypeChains(Map map) {
DisallowHeapAllocation no_gc;
InvalidatePrototypeChainsInternal(map);
return map;
@@ -12722,7 +13034,7 @@ Map* JSObject::InvalidatePrototypeChains(Map* map) {
// in the prototype chain are not affected by appearance of a new lexical
// variable and therefore we don't propagate invalidation down.
// static
-void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject* global) {
+void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
DisallowHeapAllocation no_gc;
InvalidateOnePrototypeValidityCellInternal(global->map());
}
@@ -12730,7 +13042,7 @@ void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject* global) {
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Isolate* isolate) {
- Object* maybe_proto_info = prototype->map()->prototype_info();
+ Object maybe_proto_info = prototype->map()->prototype_info();
if (maybe_proto_info->IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
@@ -12743,7 +13055,7 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
Isolate* isolate) {
- Object* maybe_proto_info = prototype_map->prototype_info();
+ Object maybe_proto_info = prototype_map->prototype_info();
if (maybe_proto_info->IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
@@ -12784,7 +13096,7 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
isolate);
- Object* maybe_cell = prototype->map()->prototype_validity_cell();
+ Object maybe_cell = prototype->map()->prototype_validity_cell();
// Return existing cell if it's still valid.
if (maybe_cell->IsCell()) {
Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
@@ -12800,11 +13112,11 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
}
// static
-bool Map::IsPrototypeChainInvalidated(Map* map) {
+bool Map::IsPrototypeChainInvalidated(Map map) {
DCHECK(map->is_prototype_map());
- Object* maybe_cell = map->prototype_validity_cell();
+ Object maybe_cell = map->prototype_validity_cell();
if (maybe_cell->IsCell()) {
- Cell* cell = Cell::cast(maybe_cell);
+ Cell cell = Cell::cast(maybe_cell);
return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
}
return true;
@@ -12822,10 +13134,10 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
- Object* maybe_constructor = prototype_jsobj->map()->GetConstructor();
+ Object maybe_constructor = prototype_jsobj->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- Object* data = constructor->shared()->function_data();
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
+ Object data = constructor->shared()->function_data();
is_hidden = (data->IsFunctionTemplateInfo() &&
FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
prototype->IsJSGlobalObject();
@@ -12854,7 +13166,8 @@ Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- if (Map* maybe_elements_transition = current_map->ElementsTransitionMap()) {
+ Map maybe_elements_transition = current_map->ElementsTransitionMap();
+ if (!maybe_elements_transition.is_null()) {
new_map = handle(maybe_elements_transition, native_context->GetIsolate());
} else {
new_map =
@@ -12971,7 +13284,7 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
if (FLAG_trace_maps) {
- LOG(function->GetIsolate(), MapEvent("InitialMap", nullptr, *map, "",
+ LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
function->shared()->DebugName()));
}
}
@@ -12999,9 +13312,11 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_INTL_NUMBER_FORMAT_TYPE:
case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_INTL_SEGMENT_ITERATOR_TYPE:
case JS_INTL_SEGMENTER_TYPE:
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
#endif
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MAP_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
@@ -13039,6 +13354,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ case ORDERED_NAME_DICTIONARY_TYPE:
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -13097,8 +13413,10 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// The constructor should be compiled for the optimization hints to be
// available.
int expected_nof_properties = 0;
- if (function->shared()->is_compiled() ||
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ if (is_compiled_scope.is_compiled() ||
+ Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
DCHECK(function->shared()->is_compiled());
expected_nof_properties = function->shared()->expected_nof_properties();
}
@@ -13344,7 +13662,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
Handle<Object> maybe_class_positions = JSReceiver::GetDataProperty(
function, isolate->factory()->class_positions_symbol());
if (maybe_class_positions->IsTuple2()) {
- Tuple2* class_positions = Tuple2::cast(*maybe_class_positions);
+ Tuple2 class_positions = Tuple2::cast(*maybe_class_positions);
int start_position = Smi::ToInt(class_positions->value1());
int end_position = Smi::ToInt(class_positions->value2());
Handle<String> script_source(
@@ -13370,6 +13688,8 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
SharedFunctionInfo::GetSourceCodeHarmony(shared_info));
}
+STATIC_ASSERT(Oddball::kToNumberRawOffset == HeapNumber::kValueOffset);
+
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -13400,7 +13720,7 @@ int Script::GetEvalPosition() {
if (!has_eval_from_shared()) {
position = 0;
} else {
- SharedFunctionInfo* shared = eval_from_shared();
+ SharedFunctionInfo shared = eval_from_shared();
position = shared->abstract_code()->SourcePosition(-position);
}
DCHECK_GE(position, 0);
@@ -13415,7 +13735,7 @@ void Script::InitLineEnds(Handle<Script> script) {
DCHECK(script->type() != Script::TYPE_WASM ||
script->source_mapping_url()->IsString());
- Object* src_obj = script->source();
+ Object src_obj = script->source();
if (!src_obj->IsString()) {
DCHECK(src_obj->IsUndefined(isolate));
script->set_line_ends(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -13441,20 +13761,21 @@ bool Script::IsUserJavaScript() { return type() == Script::TYPE_NORMAL; }
bool Script::ContainsAsmModule() {
DisallowHeapAllocation no_gc;
- SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), this);
- while (SharedFunctionInfo* info = iter.Next()) {
+ SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), *this);
+ for (SharedFunctionInfo info = iter.Next(); !info.is_null();
+ info = iter.Next()) {
if (info->HasAsmWasmData()) return true;
}
return false;
}
namespace {
-bool GetPositionInfoSlow(const Script* script, int position,
+bool GetPositionInfoSlow(const Script script, int position,
Script::PositionInfo* info) {
if (!script->source()->IsString()) return false;
if (position < 0) position = 0;
- String* source_string = String::cast(script->source());
+ String source_string = String::cast(script->source());
int line = 0;
int line_start = 0;
int len = source_string->length();
@@ -13490,10 +13811,10 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
if (line_ends()->IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
- if (!GetPositionInfoSlow(this, position, info)) return false;
+ if (!GetPositionInfoSlow(*this, position, info)) return false;
} else {
DCHECK(line_ends()->IsFixedArray());
- FixedArray* ends = FixedArray::cast(line_ends());
+ FixedArray ends = FixedArray::cast(line_ends());
const int ends_len = ends->length();
if (ends_len == 0) return false;
@@ -13537,7 +13858,7 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
info->line_end = SMI_VALUE(ends->get(info->line));
if (info->line_end > 0) {
DCHECK(source()->IsString());
- String* src = String::cast(source());
+ String src = String::cast(source());
if (src->length() >= info->line_end &&
src->Get(info->line_end - 1) == '\r') {
info->line_end--;
@@ -13581,7 +13902,7 @@ int Script::GetLineNumber(int code_pos) const {
return info.line;
}
-Object* Script::GetNameOrSourceURL() {
+Object Script::GetNameOrSourceURL() {
// Keep in sync with ScriptNameOrSourceURL in messages.js.
if (!source_url()->IsUndefined()) return source_url();
return name();
@@ -13595,9 +13916,8 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
// AstTraversalVisitor doesn't recurse properly in the construct which
// triggers the mismatch.
CHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
- MaybeObject* shared =
- shared_function_infos()->Get(fun->function_literal_id());
- HeapObject* heap_object;
+ MaybeObject shared = shared_function_infos()->Get(fun->function_literal_id());
+ HeapObject heap_object;
if (!shared->GetHeapObject(&heap_object) ||
heap_object->IsUndefined(isolate)) {
return MaybeHandle<SharedFunctionInfo>();
@@ -13608,22 +13928,22 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
-Script* Script::Iterator::Next() {
- Object* o = iterator_.Next();
- if (o != nullptr) {
+Script Script::Iterator::Next() {
+ Object o = iterator_.Next();
+ if (o != Object()) {
return Script::cast(o);
}
- return nullptr;
+ return Script();
}
-Code* SharedFunctionInfo::GetCode() const {
+Code SharedFunctionInfo::GetCode() const {
// ======
// NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
// GetSharedFunctionInfoCode method in code-stub-assembler.cc.
// ======
Isolate* isolate = GetIsolate();
- Object* data = function_data();
+ Object data = function_data();
if (data->IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
@@ -13632,8 +13952,8 @@ Code* SharedFunctionInfo::GetCode() const {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- } else if (data->IsFixedArray()) {
- // Having a fixed array means we are an asm.js/wasm function.
+ } else if (data->IsAsmWasmData()) {
+ // Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
} else if (data->IsUncompiledData()) {
@@ -13649,7 +13969,7 @@ Code* SharedFunctionInfo::GetCode() const {
DCHECK(HasWasmExportedFunctionData());
return wasm_exported_function_data()->wrapper_code();
} else if (data->IsInterpreterData()) {
- Code* code = InterpreterTrampoline();
+ Code code = InterpreterTrampoline();
DCHECK(code->IsCode());
DCHECK(code->is_interpreter_trampoline_builtin());
return code;
@@ -13657,14 +13977,14 @@ Code* SharedFunctionInfo::GetCode() const {
UNREACHABLE();
}
-WasmExportedFunctionData* SharedFunctionInfo::wasm_exported_function_data()
+WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
const {
DCHECK(HasWasmExportedFunctionData());
return WasmExportedFunctionData::cast(function_data());
}
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
- Script* script)
+ Script script)
: ScriptIterator(isolate,
handle(script->shared_function_infos(), isolate)) {}
@@ -13674,20 +13994,20 @@ SharedFunctionInfo::ScriptIterator::ScriptIterator(
shared_function_infos_(shared_function_infos),
index_(0) {}
-SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
+SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
while (index_ < shared_function_infos_->length()) {
- MaybeObject* raw = shared_function_infos_->Get(index_++);
- HeapObject* heap_object;
+ MaybeObject raw = shared_function_infos_->Get(index_++);
+ HeapObject heap_object;
if (!raw->GetHeapObject(&heap_object) ||
heap_object->IsUndefined(isolate_)) {
continue;
}
return SharedFunctionInfo::cast(heap_object);
}
- return nullptr;
+ return SharedFunctionInfo();
}
-void SharedFunctionInfo::ScriptIterator::Reset(Script* script) {
+void SharedFunctionInfo::ScriptIterator::Reset(Script script) {
shared_function_infos_ = handle(script->shared_function_infos(), isolate_);
index_ = 0;
}
@@ -13697,14 +14017,14 @@ SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
sfi_iterator_(isolate, script_iterator_.Next()) {}
-SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
- HeapObject* next = noscript_sfi_iterator_.Next();
- if (next != nullptr) return SharedFunctionInfo::cast(next);
+SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() {
+ HeapObject next = noscript_sfi_iterator_.Next();
+ if (!next.is_null()) return SharedFunctionInfo::cast(next);
for (;;) {
next = sfi_iterator_.Next();
- if (next != nullptr) return SharedFunctionInfo::cast(next);
- Script* next_script = script_iterator_.Next();
- if (next_script == nullptr) return nullptr;
+ if (!next.is_null()) return SharedFunctionInfo::cast(next);
+ Script next_script = script_iterator_.Next();
+ if (next_script.is_null()) return SharedFunctionInfo();
sfi_iterator_.Reset(next_script);
}
}
@@ -13717,8 +14037,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Isolate* isolate = shared->GetIsolate();
if (reset_preparsed_scope_data &&
- shared->HasUncompiledDataWithPreParsedScope()) {
- shared->ClearPreParsedScopeData();
+ shared->HasUncompiledDataWithPreparseData()) {
+ shared->ClearPreparseData();
}
// Add shared function info to new script's list. If a collection occurs,
@@ -13732,8 +14052,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
handle(script->shared_function_infos(), isolate);
#ifdef DEBUG
DCHECK_LT(function_literal_id, list->length());
- MaybeObject* maybe_object = list->Get(function_literal_id);
- HeapObject* heap_object;
+ MaybeObject maybe_object = list->Get(function_literal_id);
+ HeapObject heap_object;
if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
DCHECK_EQ(heap_object, *shared);
}
@@ -13741,7 +14061,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
list->Set(function_literal_id, HeapObjectReference::Weak(*shared));
// Remove shared function info from root array.
- WeakArrayList* noscript_list =
+ WeakArrayList noscript_list =
isolate->heap()->noscript_shared_function_infos();
CHECK(noscript_list->RemoveOne(MaybeObjectHandle::Weak(shared)));
} else {
@@ -13752,8 +14072,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
WeakArrayList::Iterator iterator(*list);
- HeapObject* next;
- while ((next = iterator.Next()) != nullptr) {
+ for (HeapObject next = iterator.Next(); !next.is_null();
+ next = iterator.Next()) {
DCHECK_NE(next, *shared);
}
}
@@ -13765,15 +14085,15 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
// Remove shared function info from old script's list.
- Script* old_script = Script::cast(shared->script());
+ Script old_script = Script::cast(shared->script());
// Due to liveedit, it might happen that the old_script doesn't know
// about the SharedFunctionInfo, so we have to guard against that.
Handle<WeakFixedArray> infos(old_script->shared_function_infos(), isolate);
if (function_literal_id < infos->length()) {
- MaybeObject* raw =
+ MaybeObject raw =
old_script->shared_function_infos()->Get(function_literal_id);
- HeapObject* heap_object;
+ HeapObject heap_object;
if (raw->GetHeapObjectIfWeak(&heap_object) && heap_object == *shared) {
old_script->shared_function_infos()->Set(
function_literal_id, HeapObjectReference::Strong(
@@ -13788,33 +14108,33 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(GetDebugInfo());
+ DebugInfo info = GetDebugInfo();
bool has_break_info = info->HasBreakInfo();
return has_break_info;
}
bool SharedFunctionInfo::BreakAtEntry() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(GetDebugInfo());
+ DebugInfo info = GetDebugInfo();
bool break_at_entry = info->BreakAtEntry();
return break_at_entry;
}
bool SharedFunctionInfo::HasCoverageInfo() const {
if (!HasDebugInfo()) return false;
- DebugInfo* info = DebugInfo::cast(GetDebugInfo());
+ DebugInfo info = GetDebugInfo();
bool has_coverage_info = info->HasCoverageInfo();
return has_coverage_info;
}
-CoverageInfo* SharedFunctionInfo::GetCoverageInfo() const {
+CoverageInfo SharedFunctionInfo::GetCoverageInfo() const {
DCHECK(HasCoverageInfo());
return CoverageInfo::cast(GetDebugInfo()->coverage_info());
}
-String* SharedFunctionInfo::DebugName() {
+String SharedFunctionInfo::DebugName() {
DisallowHeapAllocation no_gc;
- String* function_name = Name();
+ String function_name = Name();
if (function_name->length() > 0) return function_name;
return inferred_name();
}
@@ -13828,7 +14148,65 @@ bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
bool SharedFunctionInfo::HasSourceCode() const {
Isolate* isolate = GetIsolate();
return !script()->IsUndefined(isolate) &&
- !reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
+ !Script::cast(script())->source()->IsUndefined(isolate);
+}
+
+void SharedFunctionInfo::DiscardCompiledMetadata(
+ Isolate* isolate,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot) {
+ DisallowHeapAllocation no_gc;
+ if (is_compiled()) {
+ HeapObject outer_scope_info;
+ if (scope_info()->HasOuterScopeInfo()) {
+ outer_scope_info = scope_info()->OuterScopeInfo();
+ } else {
+ outer_scope_info = ReadOnlyRoots(isolate).the_hole_value();
+ }
+
+ // Raw setter to avoid validity checks, since we're performing the unusual
+ // task of decompiling.
+ set_raw_outer_scope_info_or_feedback_metadata(outer_scope_info);
+ gc_notify_updated_slot(
+ *this,
+ RawField(SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset),
+ outer_scope_info);
+ } else {
+ DCHECK(outer_scope_info()->IsScopeInfo() ||
+ outer_scope_info()->IsTheHole());
+ }
+
+ // TODO(rmcilroy): Possibly discard ScopeInfo here as well.
+}
+
+// static
+void SharedFunctionInfo::DiscardCompiled(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
+ DCHECK(shared_info->CanDiscardCompiled());
+
+ Handle<String> inferred_name_val =
+ handle(shared_info->inferred_name(), isolate);
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->FunctionLiteralId(isolate);
+
+ shared_info->DiscardCompiledMetadata(isolate);
+
+ // Replace compiled data with a new UncompiledData object.
+ if (shared_info->HasUncompiledDataWithPreparseData()) {
+ // If this is uncompiled data with a pre-parsed scope data, we can just
+ // clear out the scope data and keep the uncompiled data.
+ shared_info->ClearPreparseData();
+ } else {
+ // Create a new UncompiledData, without pre-parsed scope, and update the
+ // function data to point to it. Use the raw function data setter to avoid
+ // validity checks, since we're performing the unusual task of decompiling.
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithoutPreparseData(
+ inferred_name_val, start_position, end_position,
+ function_literal_id);
+ shared_info->set_function_data(*data);
+ }
}
// static
@@ -13876,12 +14254,32 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
bool SharedFunctionInfo::IsInlineable() {
// Check that the function has a script associated with it.
if (!script()->IsScript()) return false;
+
if (GetIsolate()->is_precise_binary_code_coverage() &&
!has_reported_binary_coverage()) {
// We may miss invocations if this function is inlined.
return false;
}
- return !optimization_disabled();
+
+ if (optimization_disabled()) return false;
+
+ // Built-in functions are handled by the JSCallReducer.
+ if (HasBuiltinFunctionId()) return false;
+
+ // Only choose user code for inlining.
+ if (!IsUserJavaScript()) return false;
+
+ // If there is no bytecode array, it is either not compiled or it is compiled
+ // with WebAssembly for the asm.js pipeline. In either case we don't want to
+ // inline.
+ if (!HasBytecodeArray()) return false;
+
+ // Quick check on the size of the bytecode to avoid inlining large functions.
+ if (GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
+ return false;
+ }
+
+ return true;
}
int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
@@ -13889,17 +14287,18 @@ int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
DisallowHeapAllocation no_gc;
- Object* script_obj = script();
+ Object script_obj = script();
if (!script_obj->IsScript()) return FunctionLiteral::kIdTypeInvalid;
- WeakFixedArray* shared_info_list =
+ WeakFixedArray shared_info_list =
Script::cast(script_obj)->shared_function_infos();
SharedFunctionInfo::ScriptIterator iterator(
- isolate, Handle<WeakFixedArray>(&shared_info_list));
+ isolate,
+ Handle<WeakFixedArray>(reinterpret_cast<Address*>(&shared_info_list)));
- for (SharedFunctionInfo* shared = iterator.Next(); shared != nullptr;
+ for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
shared = iterator.Next()) {
- if (shared == this) {
+ if (shared == *this) {
return iterator.CurrentIndex();
}
}
@@ -13916,8 +14315,15 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
DCHECK_LE(static_cast<unsigned>(requested_embedder_fields),
JSObject::kMaxEmbedderFields);
int header_size = JSObject::GetHeaderSize(instance_type, has_prototype_slot);
+ if (requested_embedder_fields) {
+ // If there are embedder fields, then the embedder fields start offset must
+ // be properly aligned (embedder fields are located between object header
+ // and inobject fields).
+ header_size = RoundUp<kSystemPointerSize>(header_size);
+ requested_embedder_fields *= kEmbedderDataSlotSizeInTaggedSlots;
+ }
int max_nof_fields =
- (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2;
+ (JSObject::kMaxInstanceSize - header_size) >> kTaggedSizeLog2;
CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
CHECK_LE(static_cast<unsigned>(requested_embedder_fields),
static_cast<unsigned>(max_nof_fields));
@@ -13925,9 +14331,9 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
max_nof_fields - requested_embedder_fields);
*instance_size =
header_size +
- ((requested_embedder_fields + *in_object_properties) << kPointerSizeLog2);
+ ((requested_embedder_fields + *in_object_properties) << kTaggedSizeLog2);
CHECK_EQ(*in_object_properties,
- ((*instance_size - header_size) >> kPointerSizeLog2) -
+ ((*instance_size - header_size) >> kTaggedSizeLog2) -
requested_embedder_fields);
CHECK_LE(static_cast<unsigned>(*instance_size),
static_cast<unsigned>(JSObject::kMaxInstanceSize));
@@ -13949,8 +14355,10 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
// The super constructor should be compiled for the number of expected
// properties to be available.
Handle<SharedFunctionInfo> shared(func->shared(), isolate);
- if (shared->is_compiled() ||
- Compiler::Compile(func, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ if (is_compiled_scope.is_compiled() ||
+ Compiler::Compile(func, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
DCHECK(shared->is_compiled());
int count = shared->expected_nof_properties();
// Check that the estimate is sane.
@@ -13959,7 +14367,7 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
} else {
expected_nof_properties = JSObject::kMaxInObjectProperties;
}
- } else if (!shared->is_compiled()) {
+ } else {
// In case there was a compilation error for the constructor we will
// throw an error during instantiation. Hence we directly return 0;
return false;
@@ -13975,21 +14383,21 @@ bool JSFunction::CalculateInstanceSizeForDerivedClass(
// Output the source code without any allocation in the heap.
std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
- const SharedFunctionInfo* s = v.value;
+ const SharedFunctionInfo s = v.value;
// For some native functions there is no source.
if (!s->HasSourceCode()) return os << "<No Source>";
// Get the source for the script which this function came from.
// Don't use String::cast because we don't want more assertion errors while
// we are already creating a stack dump.
- String* script_source =
- reinterpret_cast<String*>(Script::cast(s->script())->source());
+ String script_source =
+ String::unchecked_cast(Script::cast(s->script())->source());
if (!script_source->LooksValid()) return os << "<Invalid Source>";
if (!s->is_toplevel()) {
os << "function ";
- String* name = s->Name();
+ String name = s->Name();
if (name->length() > 0) {
name->PrintUC16(os);
}
@@ -14014,7 +14422,7 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// Code should be the lazy compilation stub or else interpreted.
DCHECK(abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
abstract_code()->kind() == AbstractCode::BUILTIN);
- PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), this));
+ PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), *this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
ShortPrint();
@@ -14048,10 +14456,10 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
- DCHECK_IMPLIES(lit->requires_instance_fields_initializer(),
+ DCHECK_IMPLIES(lit->requires_instance_members_initializer(),
IsClassConstructor(lit->kind()));
- shared_info->set_requires_instance_fields_initializer(
- lit->requires_instance_fields_initializer());
+ shared_info->set_requires_instance_members_initializer(
+ lit->requires_instance_members_initializer());
shared_info->set_is_toplevel(is_toplevel);
DCHECK(shared_info->outer_scope_info()->IsTheHole());
@@ -14066,45 +14474,36 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// don't have the information yet. They're set later in
// SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
// really parsed and compiled.
- if (lit->body() != nullptr) {
+ if (lit->ShouldEagerCompile()) {
shared_info->set_length(lit->function_length());
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(lit);
- DCHECK_NULL(lit->produced_preparsed_scope_data());
- if (lit->ShouldEagerCompile()) {
- // If we're about to eager compile, we'll have the function literal
- // available, so there's no need to wastefully allocate an uncompiled
- // data.
- // TODO(leszeks): This should be explicitly passed as a parameter, rather
- // than relying on a property of the literal.
- needs_position_info = false;
- }
+ DCHECK_NULL(lit->produced_preparse_data());
+ // If we're about to eager compile, we'll have the function literal
+ // available, so there's no need to wastefully allocate an uncompiled data.
+ // TODO(leszeks): This should be explicitly passed as a parameter, rather
+ // than relying on a property of the literal.
+ needs_position_info = false;
} else {
// Set an invalid length for lazy functions. This way we can set the correct
// value after compiling, but avoid overwriting values set manually by the
// bootstrapper.
shared_info->set_length(SharedFunctionInfo::kInvalidLength);
- if (FLAG_preparser_scope_analysis) {
- ProducedPreParsedScopeData* scope_data =
- lit->produced_preparsed_scope_data();
- if (scope_data != nullptr) {
- Handle<PreParsedScopeData> pre_parsed_scope_data;
- if (scope_data->Serialize(shared_info->GetIsolate())
- .ToHandle(&pre_parsed_scope_data)) {
- Handle<UncompiledData> data =
- isolate->factory()->NewUncompiledDataWithPreParsedScope(
- lit->inferred_name(), lit->start_position(),
- lit->end_position(), lit->function_literal_id(),
- pre_parsed_scope_data);
- shared_info->set_uncompiled_data(*data);
- needs_position_info = false;
- }
- }
+ ProducedPreparseData* scope_data = lit->produced_preparse_data();
+ if (scope_data != nullptr) {
+ Handle<PreparseData> preparse_data =
+ scope_data->Serialize(shared_info->GetIsolate());
+ Handle<UncompiledData> data =
+ isolate->factory()->NewUncompiledDataWithPreparseData(
+ lit->inferred_name(), lit->start_position(), lit->end_position(),
+ lit->function_literal_id(), preparse_data);
+ shared_info->set_uncompiled_data(*data);
+ needs_position_info = false;
}
}
if (needs_position_info) {
Handle<UncompiledData> data =
- isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
+ isolate->factory()->NewUncompiledDataWithoutPreparseData(
lit->inferred_name(), lit->start_position(), lit->end_position(),
lit->function_literal_id());
shared_info->set_uncompiled_data(*data);
@@ -14147,9 +14546,9 @@ void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
}
int SharedFunctionInfo::StartPosition() const {
- Object* maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info->HasPositionInfo()) {
return info->StartPosition();
}
@@ -14164,9 +14563,9 @@ int SharedFunctionInfo::StartPosition() const {
}
int SharedFunctionInfo::EndPosition() const {
- Object* maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info->HasPositionInfo()) {
return info->EndPosition();
}
@@ -14196,17 +14595,17 @@ int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
}
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
- Object* maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* info = ScopeInfo::cast(maybe_scope_info);
+ ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info->HasPositionInfo()) {
info->SetPositionInfo(start_position, end_position);
}
} else if (HasUncompiledData()) {
- if (HasUncompiledDataWithPreParsedScope()) {
+ if (HasUncompiledDataWithPreparseData()) {
// Clear out preparsed scope data, since the position setter invalidates
// any scope data.
- ClearPreParsedScopeData();
+ ClearPreparseData();
}
uncompiled_data()->set_start_position(start_position);
uncompiled_data()->set_end_position(end_position);
@@ -14221,45 +14620,30 @@ void Map::StartInobjectSlackTracking() {
set_construction_counter(Map::kSlackTrackingCounterStart);
}
-void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Object* old_pointer = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* new_pointer = old_pointer;
- VisitPointer(host, &new_pointer);
- DCHECK_EQ(old_pointer, new_pointer);
-}
-
-void ObjectVisitor::VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* old_pointer = rinfo->target_object();
- Object* new_pointer = old_pointer;
- VisitPointer(host, &new_pointer);
- DCHECK_EQ(old_pointer, new_pointer);
-}
-
void ObjectVisitor::VisitRelocInfo(RelocIterator* it) {
for (; !it->done(); it->next()) {
it->rinfo()->Visit(this);
}
}
-void Code::InvalidateEmbeddedObjects(Heap* heap) {
- HeapObject* undefined = ReadOnlyRoots(heap).undefined_value();
+void Code::ClearEmbeddedObjects(Heap* heap) {
+ HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
}
}
+ set_embedded_objects_cleared(true);
}
void Code::Relocate(intptr_t delta) {
- for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
+ for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
+ FlushICache();
}
void Code::FlushICache() const {
@@ -14281,15 +14665,13 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
}
// Copy reloc info.
- CopyBytes(relocation_start(),
- desc.buffer + desc.buffer_size - desc.reloc_size,
- static_cast<size_t>(desc.reloc_size));
+ CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
// Unbox handles and relocate.
Assembler* origin = desc.origin;
AllowDeferredHandleDereference embedding_raw_address;
const int mode_mask = RelocInfo::PostCodegenRelocationMask();
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
@@ -14299,7 +14681,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Rewrite code handles to direct pointers to the first instruction in the
// code object.
Handle<Object> p = it.rinfo()->target_object_handle(origin);
- Code* code = Code::cast(*p);
+ Code code = Code::cast(*p);
it.rinfo()->set_target_address(code->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
@@ -14316,7 +14698,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
SafepointEntry Code::GetSafepointEntry(Address pc) {
- SafepointTable table(this);
+ SafepointTable table(*this);
return table.FindEntry(pc);
}
@@ -14377,8 +14759,8 @@ void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
namespace {
template <typename Code>
-void DropStackFrameCacheCommon(Code* code) {
- i::Object* maybe_table = code->source_position_table();
+void DropStackFrameCacheCommon(Code code) {
+ i::Object maybe_table = code->source_position_table();
if (maybe_table->IsByteArray()) return;
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
code->set_source_position_table(
@@ -14425,30 +14807,31 @@ int AbstractCode::SourceStatementPosition(int offset) {
}
void JSFunction::ClearTypeFeedbackInfo() {
- if (feedback_cell()->value()->IsFeedbackVector()) {
- FeedbackVector* vector = feedback_vector();
+ ResetIfBytecodeFlushed();
+ if (has_feedback_vector()) {
+ FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector->ClearSlots(isolate)) {
- IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), this,
+ IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
"ClearTypeFeedbackInfo");
}
}
}
void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*this, pc);
class SourcePosition pos = info.position;
if (info.deopt_reason != DeoptimizeReason::kUnknown || pos.IsKnown()) {
PrintF(out, "%s", str);
OFStream outstr(out);
- pos.Print(outstr, this);
+ pos.Print(outstr, *this);
PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
}
}
bool Code::CanDeoptAt(Address pc) {
- DeoptimizationData* deopt_data =
+ DeoptimizationData deopt_data =
DeoptimizationData::cast(deoptimization_data());
Address code_start_address = InstructionStart();
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
@@ -14485,13 +14868,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
constexpr int all_real_modes_mask =
(1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
constexpr int mode_mask = all_real_modes_mask &
- ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
- STATIC_ASSERT(RelocInfo::ModeMask(RelocInfo::COMMENT) ==
- (1 << RelocInfo::COMMENT));
STATIC_ASSERT(mode_mask ==
(RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
@@ -14499,25 +14879,24 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
bool is_process_independent = true;
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
- defined(V8_TARGET_ARCH_ARM)
- // On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
- // independent builtins in the snapshot. They are later rewritten as
- // pc-relative jumps to the off-heap instruction stream and are thus
- // process-independent.
- // See also: FinalizeEmbeddedCodeTargets.
+ defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
+ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
+ // On these platforms we emit relative builtin-to-builtin
+ // jumps for isolate independent builtins in the snapshot. They are later
+ // rewritten as pc-relative jumps to the off-heap instruction stream and are
+ // thus process-independent. See also: FinalizeEmbeddedCodeTargets.
if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
Address target_address = it.rinfo()->target_address();
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
- Code* target = Code::GetCodeFromTargetAddress(target_address);
+ Code target = Code::GetCodeFromTargetAddress(target_address);
CHECK(target->IsCode());
if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
}
@@ -14528,15 +14907,15 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
return is_process_independent;
}
-bool Code::Inlines(SharedFunctionInfo* sfi) {
+bool Code::Inlines(SharedFunctionInfo sfi) {
// We can only check for inlining for optimized code.
DCHECK(is_optimized_code());
DisallowHeapAllocation no_gc;
- DeoptimizationData* const data =
+ DeoptimizationData const data =
DeoptimizationData::cast(deoptimization_data());
if (data->length() == 0) return false;
if (data->SharedFunctionInfo() == sfi) return true;
- FixedArray* const literals = data->LiteralArray();
+ FixedArray const literals = data->LiteralArray();
int const inlined_count = data->InlinedFunctionCount()->value();
for (int i = 0; i < inlined_count; ++i) {
if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
@@ -14546,33 +14925,31 @@ bool Code::Inlines(SharedFunctionInfo* sfi) {
Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
isolate_ = isolate;
- Object* list = isolate->heap()->native_contexts_list();
- next_context_ = list->IsUndefined(isolate_) ? nullptr : Context::cast(list);
- current_code_ = nullptr;
+ Object list = isolate->heap()->native_contexts_list();
+ next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list);
}
-Code* Code::OptimizedCodeIterator::Next() {
+Code Code::OptimizedCodeIterator::Next() {
do {
- Object* next;
- if (current_code_ != nullptr) {
+ Object next;
+ if (!current_code_.is_null()) {
// Get next code in the linked list.
- next = Code::cast(current_code_)->next_code_link();
- } else if (next_context_ != nullptr) {
+ next = current_code_->next_code_link();
+ } else if (!next_context_.is_null()) {
// Linked list of code exhausted. Get list of next context.
next = next_context_->OptimizedCodeListHead();
- Object* next_context = next_context_->next_context_link();
+ Object next_context = next_context_->next_context_link();
next_context_ = next_context->IsUndefined(isolate_)
- ? nullptr
+ ? Context()
: Context::cast(next_context);
} else {
// Exhausted contexts.
- return nullptr;
+ return Code();
}
- current_code_ = next->IsUndefined(isolate_) ? nullptr : Code::cast(next);
- } while (current_code_ == nullptr);
- Code* code = Code::cast(current_code_);
- DCHECK_EQ(Code::OPTIMIZED_FUNCTION, code->kind());
- return code;
+ current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next);
+ } while (current_code_.is_null());
+ DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind());
+ return current_code_;
}
#ifdef ENABLE_DISASSEMBLER
@@ -14597,7 +14974,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
int const inlined_function_count = InlinedFunctionCount()->value();
os << "Inlined functions (count = " << inlined_function_count << ")\n";
for (int id = 0; id < inlined_function_count; ++id) {
- Object* info = LiteralArray()->get(id);
+ Object info = LiteralArray()->get(id);
os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
}
os << "\n";
@@ -14647,17 +15024,20 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
int bytecode_offset = iterator.Next();
int shared_info_id = iterator.Next();
unsigned height = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
+ int return_value_offset = iterator.Next();
+ int return_value_count = iterator.Next();
+ Object shared_info = LiteralArray()->get(shared_info_id);
os << "{bytecode_offset=" << bytecode_offset << ", function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << "}";
+ << ", height=" << height << ", retval=@" << return_value_offset
+ << "(#" << return_value_count << ")}";
break;
}
case Translation::CONSTRUCT_STUB_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
@@ -14670,7 +15050,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
@@ -14680,7 +15060,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
int shared_info_id = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
os << "{function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
@@ -14724,18 +15104,13 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::FLOAT_REGISTER: {
int reg_code = iterator.Next();
- os << "{input="
- << RegisterConfiguration::Default()->GetFloatRegisterName(reg_code)
- << "}";
+ os << "{input=" << FloatRegister::from_code(reg_code) << "}";
break;
}
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
- os << "{input="
- << RegisterConfiguration::Default()->GetDoubleRegisterName(
- reg_code)
- << "}";
+ os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
break;
}
@@ -14778,7 +15153,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::LITERAL: {
int literal_index = iterator.Next();
- Object* literal_value = LiteralArray()->get(literal_index);
+ Object literal_value = LiteralArray()->get(literal_index);
os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
<< ")}";
break;
@@ -14818,10 +15193,8 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
const char* Code::GetName(Isolate* isolate) const {
- if (is_stub()) {
- return CodeStub::MajorName(CodeStub::GetMajorKey(this));
- } else if (kind() == BYTECODE_HANDLER) {
- return isolate->interpreter()->LookupNameOfBytecodeHandler(this);
+ if (kind() == BYTECODE_HANDLER) {
+ return isolate->interpreter()->LookupNameOfBytecodeHandler(*this);
} else {
// There are some handlers and ICs that we can also find names for with
// Builtins::Lookup.
@@ -14829,24 +15202,9 @@ const char* Code::GetName(Isolate* isolate) const {
}
}
-void Code::PrintBuiltinCode(Isolate* isolate, const char* name) {
- DCHECK(FLAG_print_builtin_code);
- if (name == nullptr) {
- name = GetName(isolate);
- }
- if (name != nullptr &&
- PassesFilter(CStrVector(name),
- CStrVector(FLAG_print_builtin_code_filter))) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- OFStream os(trace_scope.file());
- Disassemble(name, os);
- os << "\n";
- }
-}
-
namespace {
-inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code* code,
+inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code,
Address begin, size_t size,
Address current_pc) {
Address end = begin + size;
@@ -14865,11 +15223,6 @@ inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code* code,
void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
Isolate* isolate = GetIsolate();
os << "kind = " << Kind2String(kind()) << "\n";
- if (is_stub()) {
- const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
- os << "major_key = " << (n == nullptr ? "null" : n) << "\n";
- os << "minor_key = " << CodeStub::MinorKeyFromKey(this->stub_key()) << "\n";
- }
if (name == nullptr) {
name = GetName(isolate);
}
@@ -14885,7 +15238,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
if (is_off_heap_trampoline()) {
int trampoline_size = raw_instruction_size();
os << "Trampoline (size = " << trampoline_size << ")\n";
- DisassembleCodeRange(isolate, os, this, raw_instruction_start(),
+ DisassembleCodeRange(isolate, os, *this, raw_instruction_start(),
trampoline_size, current_pc);
os << "\n";
}
@@ -14894,24 +15247,24 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
int size = InstructionSize();
int safepoint_offset =
has_safepoint_info() ? safepoint_table_offset() : size;
- int constant_pool_offset = this->constant_pool_offset();
+ int const_pool_offset = constant_pool_offset();
int handler_offset = handler_table_offset() ? handler_table_offset() : size;
+ int comments_offset = code_comments_offset();
// Stop before reaching any embedded tables
- int code_size =
- Min(handler_offset, Min(safepoint_offset, constant_pool_offset));
+ int code_size = std::min(
+ {handler_offset, safepoint_offset, const_pool_offset, comments_offset});
os << "Instructions (size = " << code_size << ")\n";
- DisassembleCodeRange(isolate, os, this, InstructionStart(), code_size,
+ DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
current_pc);
- if (constant_pool_offset < size) {
- int constant_pool_size = safepoint_offset - constant_pool_offset;
- DCHECK_EQ(constant_pool_size & kPointerAlignmentMask, 0);
- os << "\nConstant Pool (size = " << constant_pool_size << ")\n";
+ if (int pool_size = constant_pool_size()) {
+ DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
+ os << "\nConstant Pool (size = " << pool_size << ")\n";
Vector<char> buf = Vector<char>::New(50);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(InstructionStart() +
- constant_pool_offset);
- for (int i = 0; i < constant_pool_size; i += kPointerSize, ptr++) {
+ intptr_t* ptr =
+ reinterpret_cast<intptr_t*>(InstructionStart() + const_pool_offset);
+ for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
}
@@ -14919,26 +15272,44 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
}
os << "\n";
- SourcePositionTableIterator it(SourcePositionTable());
- if (!it.done()) {
- os << "Source positions:\n pc offset position\n";
- for (; !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
+ {
+ SourcePositionTableIterator it(
+ SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
+ if (!it.done()) {
+ os << "Source positions:\n pc offset position\n";
+ for (; !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
+ }
+ }
+
+ {
+ SourcePositionTableIterator it(SourcePositionTable(),
+ SourcePositionTableIterator::kExternalOnly);
+ if (!it.done()) {
+ os << "External Source positions:\n pc offset fileid line\n";
+ for (; !it.done(); it.Advance()) {
+ DCHECK(it.source_position().IsExternal());
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ExternalFileId()
+ << std::setw(10) << it.source_position().ExternalLine() << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
if (kind() == OPTIMIZED_FUNCTION) {
- DeoptimizationData* data =
+ DeoptimizationData data =
DeoptimizationData::cast(this->deoptimization_data());
data->DeoptimizationDataPrint(os);
}
os << "\n";
if (has_safepoint_info()) {
- SafepointTable table(this);
+ SafepointTable table(*this);
os << "Safepoints (size = " << table.size() << ")\n";
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
@@ -14951,21 +15322,18 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
table.PrintEntry(i, os);
os << " (sp -> fp) ";
SafepointEntry entry = table.GetEntry(i);
- if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+ if (entry.has_deoptimization_index()) {
os << std::setw(6) << entry.deoptimization_index();
} else {
os << "<none>";
}
- if (entry.argument_count() > 0) {
- os << " argc: " << entry.argument_count();
- }
os << "\n";
}
os << "\n";
}
if (handler_table_offset() > 0) {
- HandlerTable table(this);
+ HandlerTable table(*this);
os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
if (kind() == OPTIMIZED_FUNCTION) {
table.HandlerTableReturnPrint(os);
@@ -14974,7 +15342,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
}
os << "RelocInfo (size = " << relocation_size() << ")\n";
- for (RelocIterator it(this); !it.done(); it.next()) {
+ for (RelocIterator it(*this); !it.done(); it.next()) {
it.rinfo()->Print(isolate, os);
}
os << "\n";
@@ -14987,6 +15355,10 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
eh_frame_disassembler.DisassembleToStream(os);
os << "\n";
}
+
+ if (code_comments_offset() < InstructionSize()) {
+ PrintCodeCommentsSection(os, code_comments());
+ }
}
#endif // ENABLE_DISASSEMBLER
@@ -15001,8 +15373,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
// Storage for backing the handle passed to the iterator. This handle won't be
// updated by the gc, but that's ok because we've disallowed GCs anyway.
- BytecodeArray* handle_storage = this;
- Handle<BytecodeArray> handle(&handle_storage);
+ BytecodeArray handle_storage = *this;
+ Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
interpreter::BytecodeArrayIterator iterator(handle);
while (!iterator.done()) {
if (!source_positions.done() &&
@@ -15050,14 +15422,14 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Handler Table (size = " << handler_table()->length() << ")\n";
#ifdef ENABLE_DISASSEMBLER
if (handler_table()->length() > 0) {
- HandlerTable table(this);
+ HandlerTable table(*this);
table.HandlerTableRangePrint(os);
}
#endif
}
-void BytecodeArray::CopyBytecodesTo(BytecodeArray* to) {
- BytecodeArray* from = this;
+void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
+ BytecodeArray from = *this;
DCHECK_EQ(from->length(), to->length());
CopyBytes(reinterpret_cast<byte*>(to->GetFirstBytecodeAddress()),
reinterpret_cast<byte*>(from->GetFirstBytecodeAddress()),
@@ -15068,7 +15440,7 @@ void BytecodeArray::MakeOlder() {
// BytecodeArray is aged in concurrent marker.
// The word must be completely within the byte code array.
Address age_addr = address() + kBytecodeAgeOffset;
- DCHECK_LE((age_addr & ~kPointerAlignmentMask) + kPointerSize,
+ DCHECK_LE(RoundDown(age_addr, kSystemPointerSize) + kSystemPointerSize,
address() + Size());
Age age = bytecode_age();
if (age < kLastBytecodeAge) {
@@ -15100,7 +15472,7 @@ void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
array->GetElementsAccessor()->SetLength(array, new_length);
}
-DependentCode* DependentCode::GetDependentCode(Handle<HeapObject> object) {
+DependentCode DependentCode::GetDependentCode(Handle<HeapObject> object) {
if (object->IsMap()) {
return Handle<Map>::cast(object)->dependent_code();
} else if (object->IsPropertyCell()) {
@@ -15196,7 +15568,7 @@ bool DependentCode::Compact() {
int old_count = count();
int new_count = 0;
for (int i = 0; i < old_count; i++) {
- MaybeObject* obj = object_at(i);
+ MaybeObject obj = object_at(i);
if (!obj->IsCleared()) {
if (i != new_count) {
copy(i, new_count);
@@ -15228,9 +15600,9 @@ bool DependentCode::MarkCodeForDeoptimization(
bool marked = false;
int count = this->count();
for (int i = 0; i < count; i++) {
- MaybeObject* obj = object_at(i);
+ MaybeObject obj = object_at(i);
if (obj->IsCleared()) continue;
- Code* code = Code::cast(obj->GetHeapObjectAssumeWeak());
+ Code code = Code::cast(obj->GetHeapObjectAssumeWeak());
if (!code->marked_for_deoptimization()) {
code->SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
@@ -15259,14 +15631,13 @@ void Code::SetMarkedForDeoptimization(const char* reason) {
set_marked_for_deoptimization(true);
if (FLAG_trace_deopt &&
(deoptimization_data() != GetReadOnlyRoots().empty_fixed_array())) {
- DeoptimizationData* deopt_data =
+ DeoptimizationData deopt_data =
DeoptimizationData::cast(deoptimization_data());
CodeTracer::Scope scope(GetHeap()->isolate()->GetCodeTracer());
PrintF(scope.file(),
"[marking dependent code " V8PRIxPTR_FMT
" (opt #%d) for deoptimization, reason: %s]\n",
- reinterpret_cast<intptr_t>(this),
- deopt_data->OptimizationId()->value(), reason);
+ ptr(), deopt_data->OptimizationId()->value(), reason);
}
}
@@ -15503,7 +15874,7 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
- object, args->arguments() - first_arg - (arg_count - 1), arg_count, mode);
+ object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
}
@@ -15511,7 +15882,7 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
return ElementsAccessor::ForKind(GetElementsKind());
}
-void JSObject::ValidateElements(JSObject* object) {
+void JSObject::ValidateElements(JSObject object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
object->GetElementsAccessor()->Validate(object);
@@ -15519,8 +15890,7 @@ void JSObject::ValidateElements(JSObject* object) {
#endif
}
-
-static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
+static bool ShouldConvertToSlowElements(JSObject object, uint32_t capacity,
uint32_t index,
uint32_t* new_capacity) {
STATIC_ASSERT(JSObject::kMaxUncheckedOldFastElementsLength <=
@@ -15546,16 +15916,14 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
return size_threshold <= *new_capacity;
}
-
bool JSObject::WouldConvertToSlowElements(uint32_t index) {
if (!HasFastElements()) return false;
uint32_t capacity = static_cast<uint32_t>(elements()->length());
uint32_t new_capacity;
- return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
+ return ShouldConvertToSlowElements(*this, capacity, index, &new_capacity);
}
-
-static ElementsKind BestFittingFastElementsKind(JSObject* object) {
+static ElementsKind BestFittingFastElementsKind(JSObject object) {
if (!object->map()->CanHaveFastTransitionableElementsKind()) {
return HOLEY_ELEMENTS;
}
@@ -15566,12 +15934,12 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
return FAST_STRING_WRAPPER_ELEMENTS;
}
DCHECK(object->HasDictionaryElements());
- NumberDictionary* dictionary = object->element_dictionary();
+ NumberDictionary dictionary = object->element_dictionary();
ElementsKind kind = HOLEY_SMI_ELEMENTS;
for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
+ Object key = dictionary->KeyAt(i);
if (key->IsNumber()) {
- Object* value = dictionary->ValueAt(i);
+ Object value = dictionary->ValueAt(i);
if (!value->IsNumber()) return HOLEY_ELEMENTS;
if (!value->IsSmi()) {
if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
@@ -15582,8 +15950,8 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
return kind;
}
-static bool ShouldConvertToFastElements(JSObject* object,
- NumberDictionary* dictionary,
+static bool ShouldConvertToFastElements(JSObject object,
+ NumberDictionary dictionary,
uint32_t index,
uint32_t* new_capacity) {
// If properties with non-standard attributes or accessors were added, we
@@ -15594,7 +15962,7 @@ static bool ShouldConvertToFastElements(JSObject* object,
if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
if (object->IsJSArray()) {
- Object* length = JSArray::cast(object)->length();
+ Object length = JSArray::cast(object)->length();
if (!length->IsSmi()) return false;
*new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
} else if (object->IsJSSloppyArgumentsObject()) {
@@ -15627,7 +15995,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
}
ElementsKind kind = object->GetElementsKind();
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
elements = SloppyArgumentsElements::cast(elements)->arguments();
@@ -15671,7 +16039,7 @@ bool JSArray::SetLengthWouldNormalize(uint32_t new_length) {
uint32_t capacity = static_cast<uint32_t>(elements()->length());
uint32_t new_capacity;
return JSArray::SetLengthWouldNormalize(GetHeap(), new_length) &&
- ShouldConvertToSlowElements(this, capacity, new_length - 1,
+ ShouldConvertToSlowElements(*this, capacity, new_length - 1,
&new_capacity);
}
@@ -15693,10 +16061,10 @@ PretenureFlag AllocationSite::GetPretenureMode() const {
bool AllocationSite::IsNested() {
DCHECK(FLAG_trace_track_allocation_sites);
- Object* current = boilerplate()->GetHeap()->allocation_sites_list();
+ Object current = boilerplate()->GetHeap()->allocation_sites_list();
while (current->IsAllocationSite()) {
- AllocationSite* current_site = AllocationSite::cast(current);
- if (current_site->nested_site() == this) {
+ AllocationSite current_site = AllocationSite::cast(current);
+ if (current_site->nested_site() == *this) {
return true;
}
current = current_site->weak_next();
@@ -15729,8 +16097,9 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
if (FLAG_trace_track_allocation_sites) {
bool is_nested = site->IsNested();
PrintF("AllocationSite: JSArray %p boilerplate %supdated %s->%s\n",
- reinterpret_cast<void*>(*site), is_nested ? "(nested)" : " ",
- ElementsKindToString(kind), ElementsKindToString(to_kind));
+ reinterpret_cast<void*>(site->ptr()),
+ is_nested ? "(nested)" : " ", ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
}
JSObject::TransitionElementsKind(boilerplate, to_kind);
site->dependent_code()->DeoptimizeDependentCodeGroup(
@@ -15749,8 +16118,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) return true;
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
- reinterpret_cast<void*>(*site),
- ElementsKindToString(kind),
+ reinterpret_cast<void*>(site->ptr()), ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
site->SetElementsKind(to_kind);
@@ -15791,9 +16159,9 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
DisallowHeapAllocation no_allocation;
Heap* heap = object->GetHeap();
- AllocationMemento* memento =
+ AllocationMemento memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
- if (memento == nullptr) return false;
+ if (memento.is_null()) return false;
// Walk through to the Allocation Site
site = handle(memento->GetAllocationSite(), heap->isolate());
@@ -15845,7 +16213,7 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
- Map* map = array->map();
+ Map map = array->map();
// Fast path: "length" is the first fast property of arrays. Since it's not
// configurable, it's guaranteed to be the first in the descriptor array.
if (!map->is_dictionary_map()) {
@@ -15871,7 +16239,7 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
}
template <typename BackingStore>
-static int HoleyElementsUsage(JSObject* object, BackingStore* store) {
+static int HoleyElementsUsage(JSObject object, BackingStore store) {
Isolate* isolate = object->GetIsolate();
int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
: store->length();
@@ -15883,12 +16251,12 @@ static int HoleyElementsUsage(JSObject* object, BackingStore* store) {
}
int JSObject::GetFastElementsUsage() {
- FixedArrayBase* store = elements();
+ FixedArrayBase store = elements();
switch (GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS:
case PACKED_ELEMENTS:
- return IsJSArray() ? Smi::ToInt(JSArray::cast(this)->length())
+ return IsJSArray() ? Smi::ToInt(JSArray::cast(*this)->length())
: store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = SloppyArgumentsElements::cast(store)->arguments();
@@ -15896,10 +16264,10 @@ int JSObject::GetFastElementsUsage() {
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
- return HoleyElementsUsage(this, FixedArray::cast(store));
+ return HoleyElementsUsage(*this, FixedArray::cast(store));
case HOLEY_DOUBLE_ELEMENTS:
if (elements()->length() == 0) return 0;
- return HoleyElementsUsage(this, FixedDoubleArray::cast(store));
+ return HoleyElementsUsage(*this, FixedDoubleArray::cast(store));
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
@@ -15925,10 +16293,10 @@ template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print(std::ostream& os) {
DisallowHeapAllocation no_gc;
ReadOnlyRoots roots = this->GetReadOnlyRoots();
- Derived* dictionary = Derived::cast(this);
+ Derived dictionary = Derived::cast(*this);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = dictionary->KeyAt(i);
+ Object k = dictionary->KeyAt(i);
if (!dictionary->ToKey(roots, i, &k)) continue;
os << "\n ";
if (k->IsString()) {
@@ -16011,7 +16379,7 @@ bool JSObject::IsDroppableApiWrapper() {
const char* Symbol::PrivateSymbolToName() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
#define SYMBOL_CHECK_AND_PRINT(_, name) \
- if (this == roots.name()) return #name;
+ if (*this == roots.name()) return #name;
PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_CHECK_AND_PRINT, /* not used */)
#undef SYMBOL_CHECK_AND_PRINT
return "UNKNOWN";
@@ -16055,15 +16423,15 @@ class StringSharedKey : public HashTableKey {
language_mode_(language_mode),
position_(position) {}
- bool IsMatch(Object* other) override {
+ bool IsMatch(Object other) override {
DisallowHeapAllocation no_allocation;
if (!other->IsFixedArray()) {
DCHECK(other->IsNumber());
uint32_t other_hash = static_cast<uint32_t>(other->Number());
return Hash() == other_hash;
}
- FixedArray* other_array = FixedArray::cast(other);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
+ FixedArray other_array = FixedArray::cast(other);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != *shared_) return false;
int language_unchecked = Smi::ToInt(other_array->get(2));
DCHECK(is_valid_language_mode(language_unchecked));
@@ -16071,7 +16439,7 @@ class StringSharedKey : public HashTableKey {
if (language_mode != language_mode_) return false;
int position = Smi::ToInt(other_array->get(3));
if (position != position_) return false;
- String* source = String::cast(other_array->get(1));
+ String source = String::cast(other_array->get(1));
return source->Equals(*source_);
}
@@ -16249,7 +16617,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
promise, LanguageMode::kStrict)
.Check();
}
- isolate->EnqueueMicrotask(task);
+ isolate->native_context()->microtask_queue()->EnqueueMicrotask(*task);
// 13. Return undefined.
return isolate->factory()->undefined_value();
@@ -16266,10 +16634,10 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
// on the JSPromise in the reverse order.
{
DisallowHeapAllocation no_gc;
- Object* current = *reactions;
- Object* reversed = Smi::kZero;
+ Object current = *reactions;
+ Object reversed = Smi::kZero;
while (!current->IsSmi()) {
- Object* next = PromiseReaction::cast(current)->next();
+ Object next = PromiseReaction::cast(current)->next();
PromiseReaction::cast(current)->set_next(reversed);
reversed = current;
current = next;
@@ -16284,7 +16652,8 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
reactions = handle(reaction->next(), isolate);
- STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
+ static_cast<int>(PromiseReactionJobTask::kSize));
if (type == PromiseReaction::kFulfill) {
task->synchronized_set_map(
ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
@@ -16292,24 +16661,30 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
*argument);
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
*isolate->native_context());
- STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
- PromiseFulfillReactionJobTask::kHandlerOffset);
- STATIC_ASSERT(PromiseReaction::kPromiseOrCapabilityOffset ==
- PromiseFulfillReactionJobTask::kPromiseOrCapabilityOffset);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kFulfillHandlerOffset) ==
+ static_cast<int>(PromiseFulfillReactionJobTask::kHandlerOffset));
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseFulfillReactionJobTask::kPromiseOrCapabilityOffset));
} else {
DisallowHeapAllocation no_gc;
- HeapObject* handler = reaction->reject_handler();
+ HeapObject handler = reaction->reject_handler();
task->synchronized_set_map(
ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map());
Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
*isolate->native_context());
Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(handler);
- STATIC_ASSERT(PromiseReaction::kPromiseOrCapabilityOffset ==
- PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset);
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
+ static_cast<int>(
+ PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset));
}
- isolate->EnqueueMicrotask(Handle<PromiseReactionJobTask>::cast(task));
+ isolate->native_context()->microtask_queue()->EnqueueMicrotask(
+ *Handle<PromiseReactionJobTask>::cast(task));
}
return isolate->factory()->undefined_value();
@@ -16317,40 +16692,75 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
namespace {
-JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
- JSRegExp::Flags value = JSRegExp::kNone;
+constexpr JSRegExp::Flag kCharFlagValues[] = {
+ JSRegExp::kGlobal, // g
+ JSRegExp::kInvalid, // h
+ JSRegExp::kIgnoreCase, // i
+ JSRegExp::kInvalid, // j
+ JSRegExp::kInvalid, // k
+ JSRegExp::kInvalid, // l
+ JSRegExp::kMultiline, // m
+ JSRegExp::kInvalid, // n
+ JSRegExp::kInvalid, // o
+ JSRegExp::kInvalid, // p
+ JSRegExp::kInvalid, // q
+ JSRegExp::kInvalid, // r
+ JSRegExp::kDotAll, // s
+ JSRegExp::kInvalid, // t
+ JSRegExp::kUnicode, // u
+ JSRegExp::kInvalid, // v
+ JSRegExp::kInvalid, // w
+ JSRegExp::kInvalid, // x
+ JSRegExp::kSticky, // y
+};
+
+constexpr JSRegExp::Flag CharToFlag(uc16 flag_char) {
+ return (flag_char < 'g' || flag_char > 'y')
+ ? JSRegExp::kInvalid
+ : kCharFlagValues[flag_char - 'g'];
+}
+
+JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
+ bool* success) {
+ STATIC_ASSERT(CharToFlag('g') == JSRegExp::kGlobal);
+ STATIC_ASSERT(CharToFlag('i') == JSRegExp::kIgnoreCase);
+ STATIC_ASSERT(CharToFlag('m') == JSRegExp::kMultiline);
+ STATIC_ASSERT(CharToFlag('s') == JSRegExp::kDotAll);
+ STATIC_ASSERT(CharToFlag('u') == JSRegExp::kUnicode);
+ STATIC_ASSERT(CharToFlag('y') == JSRegExp::kSticky);
+
int length = flags->length();
+ if (length == 0) {
+ *success = true;
+ return JSRegExp::kNone;
+ }
// A longer flags string cannot be valid.
if (length > JSRegExp::FlagCount()) return JSRegExp::Flags(0);
- for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = JSRegExp::kNone;
- switch (flags->Get(i)) {
- case 'g':
- flag = JSRegExp::kGlobal;
- break;
- case 'i':
- flag = JSRegExp::kIgnoreCase;
- break;
- case 'm':
- flag = JSRegExp::kMultiline;
- break;
- case 's':
- flag = JSRegExp::kDotAll;
- break;
- case 'u':
- flag = JSRegExp::kUnicode;
- break;
- case 'y':
- flag = JSRegExp::kSticky;
- break;
- default:
- return JSRegExp::Flags(0);
+ // Initialize {value} to {kInvalid} to allow 2-in-1 duplicate/invalid check.
+ JSRegExp::Flags value = JSRegExp::kInvalid;
+ if (flags->IsSeqOneByteString()) {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = CharToFlag(seq_flags.SeqOneByteStringGet(i));
+ // Duplicate or invalid flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
+ }
+ } else {
+ flags = String::Flatten(isolate, flags);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flags_content = flags->GetFlatContent(no_gc);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = CharToFlag(flags_content.Get(i));
+ // Duplicate or invalid flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
}
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
}
*success = true;
+ // Drop the initially set {kInvalid} bit.
+ value ^= JSRegExp::kInvalid;
return value;
}
@@ -16374,33 +16784,52 @@ Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
}
+namespace {
template <typename Char>
-inline int CountRequiredEscapes(Handle<String> source) {
+int CountRequiredEscapes(Handle<String> source) {
DisallowHeapAllocation no_gc;
int escapes = 0;
- Vector<const Char> src = source->GetCharVector<Char>();
+ Vector<const Char> src = source->GetCharVector<Char>(no_gc);
for (int i = 0; i < src.length(); i++) {
- if (src[i] == '\\') {
+ const Char c = src[i];
+ if (c == '\\') {
// Escape. Skip next character;
i++;
- } else if (src[i] == '/') {
+ } else if (c == '/') {
// Not escaped forward-slash needs escape.
escapes++;
+ } else if (c == '\n') {
+ escapes++;
+ } else if (c == '\r') {
+ escapes++;
+ } else if (static_cast<int>(c) == 0x2028) {
+ escapes += std::strlen("\\u2028") - 1;
+ } else if (static_cast<int>(c) == 0x2029) {
+ escapes += std::strlen("\\u2029") - 1;
+ } else {
+ DCHECK(!unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c)));
}
}
return escapes;
}
+template <typename Char>
+void WriteStringToCharVector(Vector<Char> v, int* d, const char* string) {
+ int s = 0;
+ while (string[s] != '\0') v[(*d)++] = string[s++];
+}
template <typename Char, typename StringType>
-inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
- Handle<StringType> result) {
+Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
+ Handle<StringType> result) {
DisallowHeapAllocation no_gc;
- Vector<const Char> src = source->GetCharVector<Char>();
- Vector<Char> dst(result->GetChars(), result->length());
+ Vector<const Char> src = source->GetCharVector<Char>(no_gc);
+ Vector<Char> dst(result->GetChars(no_gc), result->length());
int s = 0;
int d = 0;
+ // TODO(v8:1982): Fully implement
+ // https://tc39.github.io/ecma262/#sec-escaperegexppattern
while (s < src.length()) {
if (src[s] == '\\') {
// Escape. Copy this and next character.
@@ -16409,6 +16838,22 @@ inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
} else if (src[s] == '/') {
// Not escaped forward-slash needs escape.
dst[d++] = '\\';
+ } else if (src[s] == '\n') {
+ WriteStringToCharVector(dst, &d, "\\n");
+ s++;
+ continue;
+ } else if (src[s] == '\r') {
+ WriteStringToCharVector(dst, &d, "\\r");
+ s++;
+ continue;
+ } else if (static_cast<int>(src[s]) == 0x2028) {
+ WriteStringToCharVector(dst, &d, "\\u2028");
+ s++;
+ continue;
+ } else if (static_cast<int>(src[s]) == 0x2029) {
+ WriteStringToCharVector(dst, &d, "\\u2029");
+ s++;
+ continue;
}
dst[d++] = src[s++];
}
@@ -16416,12 +16861,11 @@ inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
return result;
}
-
MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
Handle<String> source) {
DCHECK(source->IsFlat());
if (source->length() == 0) return isolate->factory()->query_colon_string();
- bool one_byte = source->IsOneByteRepresentationUnderneath();
+ bool one_byte = String::IsOneByteRepresentationUnderneath(*source);
int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
: CountRequiredEscapes<uc16>(source);
if (escapes == 0) return source;
@@ -16441,6 +16885,7 @@ MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
}
}
+} // namespace
// static
MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
@@ -16448,7 +16893,7 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> flags_string) {
Isolate* isolate = regexp->GetIsolate();
bool success = false;
- Flags flags = RegExpFlagsFromString(flags_string, &success);
+ Flags flags = RegExpFlagsFromString(isolate, flags_string, &success);
if (!success) {
THROW_NEW_ERROR(
isolate,
@@ -16480,8 +16925,8 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
regexp->set_source(*escaped_source);
regexp->set_flags(Smi::FromInt(flags));
- Map* map = regexp->map();
- Object* constructor = map->GetConstructor();
+ Map map = regexp->map();
+ Object constructor = map->GetConstructor();
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
@@ -16491,9 +16936,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
// Map has changed, so use generic, but slower, method.
RETURN_ON_EXCEPTION(
isolate,
- JSReceiver::SetProperty(isolate, regexp, factory->lastIndex_string(),
- Handle<Smi>(Smi::kZero, isolate),
- LanguageMode::kStrict),
+ Object::SetProperty(isolate, regexp, factory->lastIndex_string(),
+ Handle<Smi>(Smi::zero(), isolate),
+ LanguageMode::kStrict),
JSRegExp);
}
@@ -16514,14 +16959,14 @@ class RegExpKey : public HashTableKey {
// stored value is stored where the key should be. IsMatch then
// compares the search key to the found object, rather than comparing
// a key to a key.
- bool IsMatch(Object* obj) override {
- FixedArray* val = FixedArray::cast(obj);
+ bool IsMatch(Object obj) override {
+ FixedArray val = FixedArray::cast(obj);
return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
&& (flags_ == val->get(JSRegExp::kFlagsIndex));
}
Handle<String> string_;
- Smi* flags_;
+ Smi flags_;
};
Handle<String> OneByteStringKey::AsHandle(Isolate* isolate) {
@@ -16537,13 +16982,12 @@ Handle<String> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
string_, from_, length_, HashField());
}
-
-bool SeqOneByteSubStringKey::IsMatch(Object* string) {
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
+bool SeqOneByteSubStringKey::IsMatch(Object string) {
+ DisallowHeapAllocation no_gc;
+ Vector<const uint8_t> chars(string_->GetChars(no_gc) + from_, length_);
return String::cast(string)->IsOneByteEqualTo(chars);
}
-
// InternalizedStringKey carries a string/internalized-string object as key.
class InternalizedStringKey : public StringTableKey {
public:
@@ -16556,7 +17000,7 @@ class InternalizedStringKey : public StringTableKey {
set_hash_field(string->hash_field());
}
- bool IsMatch(Object* string) override {
+ bool IsMatch(Object string) override {
return string_->SlowEquals(String::cast(string));
}
@@ -16592,13 +17036,13 @@ class InternalizedStringKey : public StringTableKey {
template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::IteratePrefix(ObjectVisitor* v) {
- BodyDescriptorBase::IteratePointers(this, 0, kElementsStartOffset, v);
+ BodyDescriptorBase::IteratePointers(*this, 0, kElementsStartOffset, v);
}
template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::IterateElements(ObjectVisitor* v) {
- BodyDescriptorBase::IteratePointers(this, kElementsStartOffset,
- kHeaderSize + length() * kPointerSize, v);
+ BodyDescriptorBase::IteratePointers(*this, kElementsStartOffset,
+ SizeFor(length()), v);
}
template <typename Derived, typename Shape>
@@ -16635,7 +17079,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived* new_table) {
+void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
@@ -16651,7 +17095,7 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived* new_table) {
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
- Object* k = this->get(from_index);
+ Object k = this->get(from_index);
if (!Shape::IsLive(roots, k)) continue;
uint32_t hash = Shape::HashForObject(isolate, k);
uint32_t insertion_index =
@@ -16665,7 +17109,7 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate, Derived* new_table) {
}
template <typename Derived, typename Shape>
-uint32_t HashTable<Derived, Shape>::EntryForProbe(Isolate* isolate, Object* k,
+uint32_t HashTable<Derived, Shape>::EntryForProbe(Isolate* isolate, Object k,
int probe,
uint32_t expected) {
uint32_t hash = Shape::HashForObject(isolate, k);
@@ -16683,7 +17127,7 @@ void HashTable<Derived, Shape>::Swap(uint32_t entry1, uint32_t entry2,
WriteBarrierMode mode) {
int index1 = EntryToIndex(entry1);
int index2 = EntryToIndex(entry2);
- Object* temp[Shape::kEntrySize];
+ Object temp[Shape::kEntrySize];
for (int j = 0; j < Shape::kEntrySize; j++) {
temp[j] = get(index1 + j);
}
@@ -16707,11 +17151,11 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate) {
// are placed correctly. Other elements might need to be moved.
done = true;
for (uint32_t current = 0; current < capacity; current++) {
- Object* current_key = KeyAt(current);
+ Object current_key = KeyAt(current);
if (!Shape::IsLive(roots, current_key)) continue;
uint32_t target = EntryForProbe(isolate, current_key, probe, current);
if (current == target) continue;
- Object* target_key = KeyAt(target);
+ Object target_key = KeyAt(target);
if (!Shape::IsLive(roots, target_key) ||
EntryForProbe(isolate, target_key, probe, target) != target) {
// Put the current element into the correct position.
@@ -16726,8 +17170,8 @@ void HashTable<Derived, Shape>::Rehash(Isolate* isolate) {
}
}
// Wipe deleted entries.
- Object* the_hole = roots.the_hole_value();
- Object* undefined = roots.undefined_value();
+ Object the_hole = roots.the_hole_value();
+ Object undefined = roots.undefined_value();
for (uint32_t current = 0; current < capacity; current++) {
if (KeyAt(current) == the_hole) {
set(EntryToIndex(current) + kEntryKeyIndex, undefined);
@@ -16873,8 +17317,8 @@ class TwoCharHashTableKey : public StringTableKey {
TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint64_t seed)
: StringTableKey(ComputeHashField(c1, c2, seed)), c1_(c1), c2_(c2) {}
- bool IsMatch(Object* o) override {
- String* other = String::cast(o);
+ bool IsMatch(Object o) override {
+ String other = String::cast(o);
if (other->length() != 2) return false;
if (other->Get(0) != c1_) return false;
return other->Get(1) == c2_;
@@ -16945,9 +17389,9 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
namespace {
template <class StringClass>
-void MigrateExternalStringResource(Isolate* isolate, String* from, String* to) {
- StringClass* cast_from = StringClass::cast(from);
- StringClass* cast_to = StringClass::cast(to);
+void MigrateExternalStringResource(Isolate* isolate, String from, String to) {
+ StringClass cast_from = StringClass::cast(from);
+ StringClass cast_to = StringClass::cast(to);
const typename StringClass::Resource* to_resource = cast_to->resource();
if (to_resource == nullptr) {
// |to| is a just-created internalized copy of |from|. Migrate the resource.
@@ -16963,7 +17407,7 @@ void MigrateExternalStringResource(Isolate* isolate, String* from, String* to) {
}
}
-void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
+void MakeStringThin(String string, String internalized, Isolate* isolate) {
DCHECK_NE(string, internalized);
DCHECK(internalized->IsInternalizedString());
@@ -16990,7 +17434,7 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
: isolate->factory()->thin_string_map();
DCHECK_GE(old_size, ThinString::kSize);
string->synchronized_set_map(*map);
- ThinString* thin = ThinString::cast(string);
+ ThinString thin = ThinString::cast(string);
thin->set_actual(internalized);
Address thin_end = thin->address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
@@ -17021,7 +17465,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
cons->set_first(isolate, *result);
cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
} else if (string->IsSlicedString()) {
- STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
+ STATIC_ASSERT(static_cast<int>(ConsString::kSize) ==
+ static_cast<int>(SlicedString::kSize));
DisallowHeapAllocation no_gc;
bool one_byte = result->IsOneByteRepresentation();
Handle<Map> map = one_byte
@@ -17090,7 +17535,7 @@ namespace {
class StringTableNoAllocateKey : public StringTableKey {
public:
- StringTableNoAllocateKey(String* string, uint64_t seed)
+ StringTableNoAllocateKey(String string, uint64_t seed)
: StringTableKey(0), string_(string) {
StringShape shape(string);
one_byte_ = shape.HasOnlyOneByteChars();
@@ -17140,14 +17585,15 @@ class StringTableNoAllocateKey : public StringTableKey {
}
}
- bool IsMatch(Object* otherstring) override {
- String* other = String::cast(otherstring);
+ bool IsMatch(Object otherstring) override {
+ String other = String::cast(otherstring);
DCHECK(other->IsInternalizedString());
DCHECK(other->IsFlat());
if (Hash() != other->Hash()) return false;
int len = string_->length();
if (len != other->length()) return false;
+ DisallowHeapAllocation no_gc;
if (!special_flattening_) {
if (string_->Get(0) != other->Get(0)) return false;
if (string_->IsFlat()) {
@@ -17155,15 +17601,15 @@ class StringTableNoAllocateKey : public StringTableKey {
StringShape shape2(other);
if (shape1.encoding_tag() == kOneByteStringTag &&
shape2.encoding_tag() == kOneByteStringTag) {
- String::FlatContent flat1 = string_->GetFlatContent();
- String::FlatContent flat2 = other->GetFlatContent();
+ String::FlatContent flat1 = string_->GetFlatContent(no_gc);
+ String::FlatContent flat2 = other->GetFlatContent(no_gc);
return CompareRawStringContents(flat1.ToOneByteVector().start(),
flat2.ToOneByteVector().start(), len);
}
if (shape1.encoding_tag() == kTwoByteStringTag &&
shape2.encoding_tag() == kTwoByteStringTag) {
- String::FlatContent flat1 = string_->GetFlatContent();
- String::FlatContent flat2 = other->GetFlatContent();
+ String::FlatContent flat1 = string_->GetFlatContent(no_gc);
+ String::FlatContent flat2 = other->GetFlatContent(no_gc);
return CompareRawStringContents(flat1.ToUC16Vector().start(),
flat2.ToUC16Vector().start(), len);
}
@@ -17172,7 +17618,7 @@ class StringTableNoAllocateKey : public StringTableKey {
return comparator.Equals(string_, other);
}
- String::FlatContent flat_content = other->GetFlatContent();
+ String::FlatContent flat_content = other->GetFlatContent(no_gc);
if (one_byte_) {
if (flat_content.IsOneByte()) {
return CompareRawStringContents(
@@ -17203,7 +17649,7 @@ class StringTableNoAllocateKey : public StringTableKey {
}
private:
- String* string_;
+ String string_;
bool one_byte_;
bool special_flattening_;
union {
@@ -17219,11 +17665,12 @@ class StringTableNoAllocateKey : public StringTableKey {
} // namespace
// static
-Object* StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
- String* string) {
+Address StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
+ Address raw_string) {
DisallowHeapAllocation no_gc;
+ String string = String::cast(Object(raw_string));
Heap* heap = isolate->heap();
- StringTable* table = heap->string_table();
+ StringTable table = heap->string_table();
StringTableNoAllocateKey key(string, heap->HashSeed());
@@ -17238,35 +17685,34 @@ Object* StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
!String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
if (Name::ContainsCachedArrayIndex(hash)) {
- return Smi::FromInt(String::ArrayIndexValueBits::decode(hash));
+ return Smi::FromInt(String::ArrayIndexValueBits::decode(hash)).ptr();
}
if ((hash & Name::kIsNotArrayIndexMask) == 0) {
// It is an indexed, but it's not cached.
- return Smi::FromInt(ResultSentinel::kUnsupported);
+ return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
}
DCHECK(!string->IsInternalizedString());
int entry = table->FindEntry(ReadOnlyRoots(isolate), &key, key.Hash());
if (entry != kNotFound) {
- String* internalized = String::cast(table->KeyAt(entry));
+ String internalized = String::cast(table->KeyAt(entry));
if (FLAG_thin_strings) {
MakeStringThin(string, internalized, isolate);
}
- return internalized;
+ return internalized.ptr();
}
// A string that's not an array index, and not in the string table,
// cannot have been used as a property name before.
- return Smi::FromInt(ResultSentinel::kNotFound);
+ return Smi::FromInt(ResultSentinel::kNotFound).ptr();
}
-String* StringTable::ForwardStringIfExists(Isolate* isolate,
- StringTableKey* key,
- String* string) {
+String StringTable::ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
+ String string) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(isolate, key);
- if (entry == kNotFound) return nullptr;
+ if (entry == kNotFound) return String();
- String* canonical = String::cast(table->KeyAt(entry));
+ String canonical = String::cast(table->KeyAt(entry));
if (canonical != string) MakeStringThin(string, canonical, isolate);
return canonical;
}
@@ -17311,17 +17757,17 @@ const int kLiteralInitialLength = 2;
const int kLiteralContextOffset = 0;
const int kLiteralLiteralsOffset = 1;
-int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
- Context* native_context) {
+int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
- Object* obj = cache->get(cache_entry);
+ Object obj = cache->get(cache_entry);
// Check that there's no confusion between FixedArray and WeakFixedArray (the
// object used to be a FixedArray here).
DCHECK(!obj->IsFixedArray());
if (obj->IsWeakFixedArray()) {
- WeakFixedArray* literals_map = WeakFixedArray::cast(obj);
+ WeakFixedArray literals_map = WeakFixedArray::cast(obj);
int length = literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
DCHECK(literals_map->Get(i + kLiteralContextOffset)->IsWeakOrCleared());
@@ -17343,7 +17789,7 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
Handle<WeakFixedArray> new_literals_map;
int entry;
- Object* obj = cache->get(cache_entry);
+ Object obj = cache->get(cache_entry);
// Check that there's no confusion between FixedArray and WeakFixedArray (the
// object used to be a FixedArray here).
@@ -17388,7 +17834,7 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
#ifdef DEBUG
for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
- MaybeObject* object = new_literals_map->Get(i + kLiteralContextOffset);
+ MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
DCHECK(object->IsCleared() ||
object->GetHeapObjectAssumeWeak()->IsNativeContext());
object = new_literals_map->Get(i + kLiteralLiteralsOffset);
@@ -17397,47 +17843,50 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
#endif
- Object* old_literals_map = cache->get(cache_entry);
+ Object old_literals_map = cache->get(cache_entry);
if (old_literals_map != *new_literals_map) {
cache->set(cache_entry, *new_literals_map);
}
}
-FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
- Context* native_context) {
- FeedbackCell* result = nullptr;
+FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ FeedbackCell result;
int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
if (entry >= 0) {
- WeakFixedArray* literals_map =
- WeakFixedArray::cast(cache->get(cache_entry));
+ WeakFixedArray literals_map = WeakFixedArray::cast(cache->get(cache_entry));
DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
- MaybeObject* object = literals_map->Get(entry + kLiteralLiteralsOffset);
+ MaybeObject object = literals_map->Get(entry + kLiteralLiteralsOffset);
- result = object->IsCleared()
- ? nullptr
- : FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
+ if (!object->IsCleared()) {
+ result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
+ }
}
- DCHECK(result == nullptr || result->IsFeedbackCell());
+ DCHECK(result.is_null() || result->IsFeedbackCell());
return result;
}
} // namespace
MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
- Handle<String> src, Handle<Context> native_context,
- LanguageMode language_mode) {
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode) {
// We use the empty function SFI as part of the key. Although the
// empty_function is native context dependent, the SFI is de-duped on
// snapshot builds by the PartialSnapshotCache, and so this does not prevent
// reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
native_context->GetIsolate());
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- int entry = FindEntry(GetIsolate(), &key);
+ int entry = table->FindEntry(isolate, &key);
if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return MaybeHandle<SharedFunctionInfo>();
- Object* obj = get(index + 1);
+ if (!table->get(index)->IsFixedArray()) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ Object obj = table->get(index + 1);
if (obj->IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
}
@@ -17445,18 +17894,21 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
}
InfoCellPair CompilationCacheTable::LookupEval(
- Handle<String> src, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> native_context, LanguageMode language_mode, int position) {
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
+ LanguageMode language_mode, int position) {
InfoCellPair empty_result;
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
StringSharedKey key(src, outer_info, language_mode, position);
- int entry = FindEntry(GetIsolate(), &key);
+ int entry = table->FindEntry(isolate, &key);
if (entry == kNotFound) return empty_result;
int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return empty_result;
- Object* obj = get(EntryToIndex(entry) + 1);
+ if (!table->get(index)->IsFixedArray()) return empty_result;
+ Object obj = table->get(EntryToIndex(entry) + 1);
if (obj->IsSharedFunctionInfo()) {
- FeedbackCell* feedback_cell =
- SearchLiteralsMap(this, EntryToIndex(entry) + 2, *native_context);
+ FeedbackCell feedback_cell =
+ SearchLiteralsMap(*table, EntryToIndex(entry) + 2, *native_context);
return InfoCellPair(SharedFunctionInfo::cast(obj), feedback_cell);
}
return empty_result;
@@ -17483,6 +17935,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
// reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
isolate);
+ src = String::Flatten(isolate, src);
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
Handle<Object> k = key.AsHandle(isolate);
cache = EnsureCapacity(isolate, cache, 1);
@@ -17499,6 +17952,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position) {
Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
StringSharedKey key(src, outer_info, value->language_mode(), position);
{
Handle<Object> k = key.AsHandle(isolate);
@@ -17542,26 +17996,26 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
void CompilationCacheTable::Age() {
DisallowHeapAllocation no_allocation;
- Object* the_hole_value = GetReadOnlyRoots().the_hole_value();
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(entry_index)->IsNumber()) {
- Smi* count = Smi::cast(get(value_index));
+ Smi count = Smi::cast(get(value_index));
count = Smi::FromInt(count->value() - 1);
if (count->value() == 0) {
- NoWriteBarrierSet(this, entry_index, the_hole_value);
- NoWriteBarrierSet(this, value_index, the_hole_value);
+ NoWriteBarrierSet(*this, entry_index, the_hole_value);
+ NoWriteBarrierSet(*this, value_index, the_hole_value);
ElementRemoved();
} else {
- NoWriteBarrierSet(this, value_index, count);
+ NoWriteBarrierSet(*this, value_index, count);
}
} else if (get(entry_index)->IsFixedArray()) {
- SharedFunctionInfo* info = SharedFunctionInfo::cast(get(value_index));
+ SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
if (info->IsInterpreted() && info->GetBytecodeArray()->IsOld()) {
for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
}
ElementRemoved();
}
@@ -17569,16 +18023,15 @@ void CompilationCacheTable::Age() {
}
}
-
-void CompilationCacheTable::Remove(Object* value) {
+void CompilationCacheTable::Remove(Object value) {
DisallowHeapAllocation no_allocation;
- Object* the_hole_value = GetReadOnlyRoots().the_hole_value();
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(value_index) == value) {
for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
}
ElementRemoved();
}
@@ -17686,9 +18139,12 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
// SetNextEnumerationIndex.
int index = dictionary->NextEnumerationIndex();
details = details.set_index(index);
+ dictionary = AddNoUpdateNextEnumerationIndex(isolate, dictionary, key, value,
+ details, entry_out);
+ // Update enumeration index here in order to avoid potential modification of
+ // the canonical empty dictionary which lives in read only space.
dictionary->SetNextEnumerationIndex(index + 1);
- return AddNoUpdateNextEnumerationIndex(isolate, dictionary, key, value,
- details, entry_out);
+ return dictionary;
}
template <typename Derived, typename Shape>
@@ -17727,7 +18183,7 @@ bool NumberDictionary::HasComplexElements() {
ReadOnlyRoots roots = GetReadOnlyRoots();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!this->ToKey(roots, i, &k)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.kind() == kAccessor) return true;
@@ -17747,13 +18203,13 @@ void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
// elements.
if (key > kRequiresSlowElementsLimit) {
if (!dictionary_holder.is_null()) {
- dictionary_holder->RequireSlowElements(this);
+ dictionary_holder->RequireSlowElements(*this);
}
set_requires_slow_elements();
return;
}
// Update max key value.
- Object* max_index_object = get(kMaxNumberKeyIndex);
+ Object max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi() || max_number_key() < key) {
FixedArray::set(kMaxNumberKeyIndex,
Smi::FromInt(key << kRequiresSlowElementsTagSize));
@@ -17768,14 +18224,14 @@ Handle<NumberDictionary> NumberDictionary::Set(
return AtPut(isolate, dictionary, key, value, details);
}
-void NumberDictionary::CopyValuesTo(FixedArray* elements) {
+void NumberDictionary::CopyValuesTo(FixedArray elements) {
ReadOnlyRoots roots = GetReadOnlyRoots();
int pos = 0;
int capacity = this->Capacity();
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (this->ToKey(roots, i, &k)) {
elements->set(pos++, this->ValueAt(i), mode);
}
@@ -17789,7 +18245,7 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!this->ToKey(roots, i, &k)) continue;
if (k->FilterKey(ENUMERABLE_STRINGS)) continue;
PropertyDetails details = this->DetailsAt(i);
@@ -17802,14 +18258,15 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
template <typename Dictionary>
struct EnumIndexComparator {
- explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
- bool operator()(const base::AtomicElement<Smi*>& a,
- const base::AtomicElement<Smi*>& b) {
- PropertyDetails da(dict->DetailsAt(a.value()->value()));
- PropertyDetails db(dict->DetailsAt(b.value()->value()));
+ explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
+ bool operator()(Tagged_t a, Tagged_t b) {
+ // TODO(ishell): revisit the code below
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ PropertyDetails da(dict->DetailsAt(Smi(a).value()));
+ PropertyDetails db(dict->DetailsAt(Smi(b).value()));
return da.dictionary_index() < db.dictionary_index();
}
- Dictionary* dict;
+ Dictionary dict;
};
template <typename Derived, typename Shape>
@@ -17822,7 +18279,7 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
int properties = 0;
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object* key;
+ Object key;
if (!dictionary->ToKey(roots, i, &key)) continue;
bool is_shadowing_key = false;
if (key->IsSymbol()) continue;
@@ -17846,14 +18303,12 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
CHECK_EQ(length, properties);
DisallowHeapAllocation no_gc;
- Derived* raw_dictionary = *dictionary;
- FixedArray* raw_storage = *storage;
+ Derived raw_dictionary = *dictionary;
+ FixedArray raw_storage = *storage;
EnumIndexComparator<Derived> cmp(raw_dictionary);
- // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
- base::AtomicElement<Smi*>* start =
- reinterpret_cast<base::AtomicElement<Smi*>*>(
- storage->GetFirstElementAddress());
+ AtomicSlot start(storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
int index = Smi::ToInt(raw_storage->get(i));
@@ -17871,9 +18326,9 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
int array_size = 0;
{
DisallowHeapAllocation no_gc;
- Derived* raw_dictionary = *dictionary;
+ Derived raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!raw_dictionary->ToKey(roots, i, &k)) continue;
array->set(array_size++, Smi::FromInt(i));
}
@@ -17881,11 +18336,9 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
DCHECK_EQ(array_size, length);
EnumIndexComparator<Derived> cmp(raw_dictionary);
- // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
- base::AtomicElement<Smi*>* start =
- reinterpret_cast<base::AtomicElement<Smi*>*>(
- array->GetFirstElementAddress());
+ AtomicSlot start(array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
@@ -17903,9 +18356,9 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
PropertyFilter filter = keys->filter();
{
DisallowHeapAllocation no_gc;
- Derived* raw_dictionary = *dictionary;
+ Derived raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!raw_dictionary->ToKey(roots, i, &k)) continue;
if (k->FilterKey(filter)) continue;
PropertyDetails details = raw_dictionary->DetailsAt(i);
@@ -17915,7 +18368,7 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
}
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
- Object* accessors = raw_dictionary->ValueAt(i);
+ Object accessors = raw_dictionary->ValueAt(i);
if (!accessors->IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
}
@@ -17923,18 +18376,16 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
}
EnumIndexComparator<Derived> cmp(raw_dictionary);
- // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
- base::AtomicElement<Smi*>* start =
- reinterpret_cast<base::AtomicElement<Smi*>*>(
- array->GetFirstElementAddress());
+ AtomicSlot start(array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
bool has_seen_symbol = false;
for (int i = 0; i < array_size; i++) {
int index = Smi::ToInt(array->get(i));
- Object* key = dictionary->NameAt(index);
+ Object key = dictionary->NameAt(index);
if (key->IsSymbol()) {
has_seen_symbol = true;
continue;
@@ -17944,7 +18395,7 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
if (has_seen_symbol) {
for (int i = 0; i < array_size; i++) {
int index = Smi::ToInt(array->get(i));
- Object* key = dictionary->NameAt(index);
+ Object key = dictionary->NameAt(index);
if (!key->IsSymbol()) continue;
keys->AddKey(key, DO_NOT_CONVERT);
}
@@ -17953,14 +18404,14 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
// Backwards lookup (slow).
template <typename Derived, typename Shape>
-Object* Dictionary<Derived, Shape>::SlowReverseLookup(Object* value) {
- Derived* dictionary = Derived::cast(this);
+Object Dictionary<Derived, Shape>::SlowReverseLookup(Object value) {
+ Derived dictionary = Derived::cast(*this);
ReadOnlyRoots roots = dictionary->GetReadOnlyRoots();
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k;
+ Object k;
if (!dictionary->ToKey(roots, i, &k)) continue;
- Object* e = dictionary->ValueAt(i);
+ Object e = dictionary->ValueAt(i);
if (e == value) return k;
}
return roots.undefined_value();
@@ -17976,9 +18427,9 @@ void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
}
template <typename Derived, typename Shape>
-Object* ObjectHashTableBase<Derived, Shape>::Lookup(ReadOnlyRoots roots,
- Handle<Object> key,
- int32_t hash) {
+Object ObjectHashTableBase<Derived, Shape>::Lookup(ReadOnlyRoots roots,
+ Handle<Object> key,
+ int32_t hash) {
DisallowHeapAllocation no_gc;
DCHECK(this->IsKey(roots, *key));
@@ -17988,14 +18439,14 @@ Object* ObjectHashTableBase<Derived, Shape>::Lookup(ReadOnlyRoots roots,
}
template <typename Derived, typename Shape>
-Object* ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
+Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
ReadOnlyRoots roots = this->GetReadOnlyRoots();
DCHECK(this->IsKey(roots, *key));
// If the object does not have an identity hash, it was never used as a key.
- Object* hash = key->GetHash();
+ Object hash = key->GetHash();
if (hash->IsUndefined(roots)) {
return roots.the_hole_value();
}
@@ -18003,13 +18454,13 @@ Object* ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
}
template <typename Derived, typename Shape>
-Object* ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key,
- int32_t hash) {
+Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key,
+ int32_t hash) {
return Lookup(this->GetReadOnlyRoots(), key, hash);
}
template <typename Derived, typename Shape>
-Object* ObjectHashTableBase<Derived, Shape>::ValueAt(int entry) {
+Object ObjectHashTableBase<Derived, Shape>::ValueAt(int entry) {
return this->get(EntryToValueIndex(entry));
}
@@ -18077,7 +18528,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
bool* was_present) {
DCHECK(table->IsKey(table->GetReadOnlyRoots(), *key));
- Object* hash = key->GetHash();
+ Object hash = key->GetHash();
if (hash->IsUndefined()) {
*was_present = false;
return table;
@@ -18105,8 +18556,8 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
}
template <typename Derived, typename Shape>
-void ObjectHashTableBase<Derived, Shape>::AddEntry(int entry, Object* key,
- Object* value) {
+void ObjectHashTableBase<Derived, Shape>::AddEntry(int entry, Object key,
+ Object value) {
this->set(Derived::EntryToIndex(entry), key);
this->set(Derived::EntryToIndex(entry) + 1, value);
this->ElementAdded();
@@ -18209,11 +18660,11 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
int count = 0;
for (int i = 0;
count / values_per_entry < max_entries && i < table->Capacity(); i++) {
- Object* key;
+ Object key;
if (table->ToKey(roots, i, &key)) {
entries->set(count++, key);
if (values_per_entry > 1) {
- Object* value = table->Lookup(handle(key, isolate));
+ Object value = table->Lookup(handle(key, isolate));
entries->set(count++, value);
}
}
@@ -18256,19 +18707,21 @@ double JSDate::CurrentTimeValue(Isolate* isolate) {
// static
-Object* JSDate::GetField(Object* object, Smi* index) {
- return JSDate::cast(object)->DoGetField(
- static_cast<FieldIndex>(index->value()));
+Address JSDate::GetField(Address raw_object, Address smi_index) {
+ Object object(raw_object);
+ Smi index(smi_index);
+ return JSDate::cast(object)
+ ->DoGetField(static_cast<FieldIndex>(index->value()))
+ ->ptr();
}
-
-Object* JSDate::DoGetField(FieldIndex index) {
+Object JSDate::DoGetField(FieldIndex index) {
DCHECK_NE(index, kDateValue);
DateCache* date_cache = GetIsolate()->date_cache();
if (index < kFirstUncachedField) {
- Object* stamp = cache_stamp();
+ Object stamp = cache_stamp();
if (stamp != date_cache->stamp() && stamp->IsSmi()) {
// Since the stamp is not NaN, the value is also not NaN.
int64_t local_time_ms =
@@ -18305,10 +18758,8 @@ Object* JSDate::DoGetField(FieldIndex index) {
return Smi::FromInt(time_in_day_ms);
}
-
-Object* JSDate::GetUTCField(FieldIndex index,
- double value,
- DateCache* date_cache) {
+Object JSDate::GetUTCField(FieldIndex index, double value,
+ DateCache* date_cache) {
DCHECK_GE(index, kFirstUTCField);
if (std::isnan(value)) return GetReadOnlyRoots().nan_value();
@@ -18316,6 +18767,7 @@ Object* JSDate::GetUTCField(FieldIndex index,
int64_t time_ms = static_cast<int64_t>(value);
if (index == kTimezoneOffset) {
+ GetIsolate()->CountUsage(v8::Isolate::kDateGetTimezoneOffset);
return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
}
@@ -18346,7 +18798,6 @@ Object* JSDate::GetUTCField(FieldIndex index,
UNREACHABLE();
}
-
// static
Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
Isolate* const isolate = date->GetIsolate();
@@ -18356,11 +18807,10 @@ Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
return value;
}
-
-void JSDate::SetValue(Object* value, bool is_value_nan) {
+void JSDate::SetValue(Object value, bool is_value_nan) {
set_value(value);
if (is_value_nan) {
- HeapNumber* nan = GetReadOnlyRoots().nan_value();
+ HeapNumber nan = GetReadOnlyRoots().nan_value();
set_cache_stamp(nan, SKIP_WRITE_BARRIER);
set_year(nan, SKIP_WRITE_BARRIER);
set_month(nan, SKIP_WRITE_BARRIER);
@@ -18374,7 +18824,6 @@ void JSDate::SetValue(Object* value, bool is_value_nan) {
}
}
-
void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
int days = DateCache::DaysFromTime(local_time_ms);
int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
@@ -18596,48 +19045,48 @@ int JSGeneratorObject::source_position() const {
// The stored bytecode offset is relative to a different base than what
// is used in the source position table, hence the subtraction.
code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
- AbstractCode* code =
+ AbstractCode code =
AbstractCode::cast(function()->shared()->GetBytecodeArray());
return code->SourcePosition(code_offset);
}
// static
-AccessCheckInfo* AccessCheckInfo::Get(Isolate* isolate,
- Handle<JSObject> receiver) {
+AccessCheckInfo AccessCheckInfo::Get(Isolate* isolate,
+ Handle<JSObject> receiver) {
DisallowHeapAllocation no_gc;
DCHECK(receiver->map()->is_access_check_needed());
- Object* maybe_constructor = receiver->map()->GetConstructor();
+ Object maybe_constructor = receiver->map()->GetConstructor();
if (maybe_constructor->IsFunctionTemplateInfo()) {
- Object* data_obj =
- FunctionTemplateInfo::cast(maybe_constructor)->access_check_info();
- if (data_obj->IsUndefined(isolate)) return nullptr;
+ Object data_obj =
+ FunctionTemplateInfo::cast(maybe_constructor)->GetAccessCheckInfo();
+ if (data_obj->IsUndefined(isolate)) return AccessCheckInfo();
return AccessCheckInfo::cast(data_obj);
}
// Might happen for a detached context.
- if (!maybe_constructor->IsJSFunction()) return nullptr;
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ if (!maybe_constructor->IsJSFunction()) return AccessCheckInfo();
+ JSFunction constructor = JSFunction::cast(maybe_constructor);
// Might happen for the debug context.
- if (!constructor->shared()->IsApiFunction()) return nullptr;
+ if (!constructor->shared()->IsApiFunction()) return AccessCheckInfo();
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj->IsUndefined(isolate)) return nullptr;
+ Object data_obj =
+ constructor->shared()->get_api_func_data()->GetAccessCheckInfo();
+ if (data_obj->IsUndefined(isolate)) return AccessCheckInfo();
return AccessCheckInfo::cast(data_obj);
}
bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
- for (PrototypeIterator iter(isolate, this, kStartAtReceiver,
+ for (PrototypeIterator iter(isolate, *this, kStartAtReceiver,
PrototypeIterator::END_AT_NULL);
!iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
- if (iter.GetCurrent<Object>()->IsJSProxy()) return true;
+ if (iter.GetCurrent()->IsJSProxy()) return true;
}
return false;
}
bool JSReceiver::HasComplexElements() {
if (IsJSProxy()) return true;
- JSObject* this_object = JSObject::cast(this);
+ JSObject this_object = JSObject::cast(*this);
if (this_object->HasIndexedInterceptor()) {
return true;
}
@@ -18658,7 +19107,7 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
return MaybeHandle<Name>();
}
-Smi* Smi::LexicographicCompare(Isolate* isolate, Smi* x, Smi* y) {
+Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
DisallowHeapAllocation no_allocation;
DisallowJavascriptExecution no_js(isolate);
@@ -18666,12 +19115,13 @@ Smi* Smi::LexicographicCompare(Isolate* isolate, Smi* x, Smi* y) {
int y_value = Smi::ToInt(y);
// If the integers are equal so are the string representations.
- if (x_value == y_value) return Smi::FromInt(0);
+ if (x_value == y_value) return Smi::FromInt(0).ptr();
// If one of the integers is zero the normal integer order is the
// same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0)
- return Smi::FromInt(x_value < y_value ? -1 : 1);
+ if (x_value == 0 || y_value == 0) {
+ return Smi::FromInt(x_value < y_value ? -1 : 1).ptr();
+ }
// If only one of the integers is negative the negative number is
// smallest because the char code of '-' is less than the char code
@@ -18682,8 +19132,8 @@ Smi* Smi::LexicographicCompare(Isolate* isolate, Smi* x, Smi* y) {
uint32_t x_scaled = x_value;
uint32_t y_scaled = y_value;
if (x_value < 0 || y_value < 0) {
- if (y_value >= 0) return Smi::FromInt(-1);
- if (x_value >= 0) return Smi::FromInt(1);
+ if (y_value >= 0) return Smi::FromInt(-1).ptr();
+ if (x_value >= 0) return Smi::FromInt(1).ptr();
x_scaled = -x_value;
y_scaled = -y_value;
}
@@ -18729,9 +19179,9 @@ Smi* Smi::LexicographicCompare(Isolate* isolate, Smi* x, Smi* y) {
tie = 1;
}
- if (x_scaled < y_scaled) return Smi::FromInt(-1);
- if (x_scaled > y_scaled) return Smi::FromInt(1);
- return Smi::FromInt(tie);
+ if (x_scaled < y_scaled) return Smi::FromInt(-1).ptr();
+ if (x_scaled > y_scaled) return Smi::FromInt(1).ptr();
+ return Smi::FromInt(tie).ptr();
}
// Force instantiation of template instances class.
@@ -18832,5 +19282,43 @@ template void
BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
Handle<NameDictionary> dictionary, KeyAccumulator* keys);
+void JSWeakFactory::Cleanup(Handle<JSWeakFactory> weak_factory,
+ Isolate* isolate) {
+ // It's possible that the cleared_cells list is empty, since
+ // WeakCell.clear() was called on all its elements before this task ran. In
+ // that case, don't call the cleanup function.
+ if (!weak_factory->cleared_cells()->IsUndefined(isolate)) {
+ // Construct the iterator.
+ Handle<JSWeakFactoryCleanupIterator> iterator;
+ {
+ Handle<Map> cleanup_iterator_map(
+ isolate->native_context()->js_weak_factory_cleanup_iterator_map(),
+ isolate);
+ iterator = Handle<JSWeakFactoryCleanupIterator>::cast(
+ isolate->factory()->NewJSObjectFromMap(
+ cleanup_iterator_map, NOT_TENURED,
+ Handle<AllocationSite>::null()));
+ iterator->set_factory(*weak_factory);
+ }
+ Handle<Object> cleanup(weak_factory->cleanup(), isolate);
+
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ v8::Local<v8::Value> result;
+ MaybeHandle<Object> exception;
+ Handle<Object> args[] = {iterator};
+ bool has_pending_exception = !ToLocal<Value>(
+ Execution::TryCall(
+ isolate, cleanup,
+ handle(ReadOnlyRoots(isolate).undefined_value(), isolate), 1, args,
+ Execution::MessageHandling::kReport, &exception),
+ &result);
+ // TODO(marja): (spec): What if there's an exception?
+ USE(has_pending_exception);
+
+ // TODO(marja): (spec): Should the iterator be invalidated after the
+ // function returns?
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index cd64199982..72d3511c6f 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -17,27 +17,17 @@
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/checks.h"
+#include "src/constants-arch.h"
#include "src/elements-kind.h"
#include "src/field-index.h"
#include "src/flags.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-definitions.h"
#include "src/property-details.h"
-#include "src/roots.h"
#include "src/utils.h"
-#if V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/constants-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/constants-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/constants-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/constants-s390.h" // NOLINT
+#ifdef V8_COMPRESS_POINTERS
+#include "src/ptr-compr.h"
#endif
// Has to be the last include (doesn't have include guards):
@@ -84,6 +74,7 @@
// - JSNumberFormat // If V8_INTL_SUPPORT enabled.
// - JSPluralRules // If V8_INTL_SUPPORT enabled.
// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
+// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
// - JSSegmenter // If V8_INTL_SUPPORT enabled.
// - WasmExceptionObject
// - WasmGlobalObject
@@ -96,7 +87,6 @@
// - ByteArray
// - BytecodeArray
// - FixedArray
-// - DescriptorArray
// - FrameArray
// - HashTable
// - Dictionary
@@ -107,7 +97,6 @@
// - OrderedHashTable
// - OrderedHashSet
// - OrderedHashMap
-// - Context
// - FeedbackMetadata
// - TemplateList
// - TransitionArray
@@ -135,9 +124,12 @@
// - ExternalOneByteInternalizedString
// - ExternalTwoByteInternalizedString
// - Symbol
+// - Context
+// - NativeContext
// - HeapNumber
// - BigInt
// - Cell
+// - DescriptorArray
// - PropertyCell
// - PropertyArray
// - Code
@@ -151,6 +143,7 @@
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
+// - AsmWasmData
// - PromiseReaction
// - PromiseCapability
// - AccessorPair
@@ -176,17 +169,16 @@
// - PromiseFulfillReactionJobTask
// - PromiseRejectReactionJobTask
// - PromiseResolveThenableJobTask
-// - MicrotaskQueue
// - Module
// - ModuleInfoEntry
// - FeedbackCell
// - FeedbackVector
-// - PreParsedScopeData
+// - PreparseData
// - UncompiledData
-// - UncompiledDataWithoutPreParsedScope
-// - UncompiledDataWithPreParsedScope
+// - UncompiledDataWithoutPreparseData
+// - UncompiledDataWithPreparseData
//
-// Formats of Object*:
+// Formats of Object::ptr_:
// Smi: [31 bit signed int] 0
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
@@ -246,375 +238,6 @@ const int kVariableSizeSentinel = 0;
const int kStubMajorKeyBits = 8;
const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
-// We use the full 16 bits of the instance_type field to encode heap object
-// instance types. All the high-order bits (bit 7-15) are cleared if the object
-// is a string, and contain set bits if it is not a string.
-const uint32_t kIsNotStringMask = 0xff80;
-const uint32_t kStringTag = 0x0;
-
-// Bit 6 indicates that the object is an internalized string (if set) or not.
-// Bit 7 has to be clear as well.
-const uint32_t kIsNotInternalizedMask = 0x40;
-const uint32_t kNotInternalizedTag = 0x40;
-const uint32_t kInternalizedTag = 0x0;
-
-// If bit 7 is clear then bit 3 indicates whether the string consists of
-// two-byte characters or one-byte characters.
-const uint32_t kStringEncodingMask = 0x8;
-const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kOneByteStringTag = 0x8;
-
-// If bit 7 is clear, the low-order 3 bits indicate the representation
-// of the string.
-const uint32_t kStringRepresentationMask = 0x07;
-enum StringRepresentationTag {
- kSeqStringTag = 0x0,
- kConsStringTag = 0x1,
- kExternalStringTag = 0x2,
- kSlicedStringTag = 0x3,
- kThinStringTag = 0x5
-};
-const uint32_t kIsIndirectStringMask = 0x1;
-const uint32_t kIsIndirectStringTag = 0x1;
-STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); // NOLINT
-STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); // NOLINT
-STATIC_ASSERT((kConsStringTag &
- kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
-STATIC_ASSERT((kSlicedStringTag &
- kIsIndirectStringMask) == kIsIndirectStringTag); // NOLINT
-STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-
-// If bit 7 is clear, then bit 4 indicates whether this two-byte
-// string actually contains one byte data.
-const uint32_t kOneByteDataHintMask = 0x10;
-const uint32_t kOneByteDataHintTag = 0x10;
-
-// If bit 7 is clear and string representation indicates an external string,
-// then bit 5 indicates whether the data pointer is cached.
-const uint32_t kUncachedExternalStringMask = 0x20;
-const uint32_t kUncachedExternalStringTag = 0x20;
-
-// A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector. We don't allocate any
-// non-flat internalized strings, so we do not shortcut them thereby
-// avoiding turning internalized strings into strings. The bit-masks
-// below contain the internalized bit as additional safety.
-// See heap.cc, mark-compact.cc and objects-visiting.cc.
-const uint32_t kShortcutTypeMask =
- kIsNotStringMask |
- kIsNotInternalizedMask |
- kStringRepresentationMask;
-const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag;
-
-static inline bool IsShortcutCandidate(int type) {
- return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-}
-
-enum InstanceType : uint16_t {
- // String types.
- INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
- kInternalizedTag, // FIRST_PRIMITIVE_TYPE
- ONE_BYTE_INTERNALIZED_STRING_TYPE =
- kOneByteStringTag | kSeqStringTag | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_TYPE =
- kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
- EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
- kOneByteStringTag | kExternalStringTag | kInternalizedTag,
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
- kInternalizedTag,
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
- kInternalizedTag,
- UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
- EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
- kInternalizedTag,
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kUncachedExternalStringTag | kInternalizedTag,
- STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- ONE_BYTE_STRING_TYPE =
- ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag,
- CONS_ONE_BYTE_STRING_TYPE =
- kOneByteStringTag | kConsStringTag | kNotInternalizedTag,
- SLICED_STRING_TYPE =
- kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
- SLICED_ONE_BYTE_STRING_TYPE =
- kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
- EXTERNAL_STRING_TYPE =
- EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- EXTERNAL_ONE_BYTE_STRING_TYPE =
- EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kNotInternalizedTag,
- UNCACHED_EXTERNAL_STRING_TYPE =
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
- UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
- UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
- kNotInternalizedTag,
- THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
- THIN_ONE_BYTE_STRING_TYPE =
- kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
-
- // Non-string names
- SYMBOL_TYPE =
- 1 + (kIsNotInternalizedMask | kUncachedExternalStringMask |
- kOneByteDataHintMask | kStringEncodingMask |
- kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
-
- // Other primitives (cannot contain non-map-word pointers to heap objects).
- HEAP_NUMBER_TYPE,
- BIGINT_TYPE,
- ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
-
- // Objects allocated in their own spaces (never in new space).
- MAP_TYPE,
- CODE_TYPE,
-
- // "Data", objects that cannot contain non-map-word pointers to heap
- // objects.
- MUTABLE_HEAP_NUMBER_TYPE,
- FOREIGN_TYPE,
- BYTE_ARRAY_TYPE,
- BYTECODE_ARRAY_TYPE,
- FREE_SPACE_TYPE,
- FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
- FIXED_UINT8_ARRAY_TYPE,
- FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE,
- FIXED_INT32_ARRAY_TYPE,
- FIXED_UINT32_ARRAY_TYPE,
- FIXED_FLOAT32_ARRAY_TYPE,
- FIXED_FLOAT64_ARRAY_TYPE,
- FIXED_UINT8_CLAMPED_ARRAY_TYPE,
- FIXED_BIGINT64_ARRAY_TYPE,
- FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
- FIXED_DOUBLE_ARRAY_TYPE,
- FEEDBACK_METADATA_TYPE,
- FILLER_TYPE, // LAST_DATA_TYPE
-
- // Structs.
- ACCESS_CHECK_INFO_TYPE,
- ACCESSOR_INFO_TYPE,
- ACCESSOR_PAIR_TYPE,
- ALIASED_ARGUMENTS_ENTRY_TYPE,
- ALLOCATION_MEMENTO_TYPE,
- ASYNC_GENERATOR_REQUEST_TYPE,
- DEBUG_INFO_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE,
- INTERCEPTOR_INFO_TYPE,
- INTERPRETER_DATA_TYPE,
- MODULE_INFO_ENTRY_TYPE,
- MODULE_TYPE,
- OBJECT_TEMPLATE_INFO_TYPE,
- PROMISE_CAPABILITY_TYPE,
- PROMISE_REACTION_TYPE,
- PROTOTYPE_INFO_TYPE,
- SCRIPT_TYPE,
- STACK_FRAME_INFO_TYPE,
- TUPLE2_TYPE,
- TUPLE3_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- WASM_DEBUG_INFO_TYPE,
- WASM_EXPORTED_FUNCTION_DATA_TYPE,
-
- CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
- CALLBACK_TASK_TYPE,
- PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
- PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
-
- MICROTASK_QUEUE_TYPE,
-
- ALLOCATION_SITE_TYPE,
- // FixedArrays.
- FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
- OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
- HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
- ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE
- ORDERED_HASH_SET_TYPE,
- NAME_DICTIONARY_TYPE,
- GLOBAL_DICTIONARY_TYPE,
- NUMBER_DICTIONARY_TYPE,
- SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE
- STRING_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
- EPHEMERON_HASH_TABLE_TYPE,
- SCOPE_INFO_TYPE,
- SCRIPT_CONTEXT_TABLE_TYPE,
- AWAIT_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
- BLOCK_CONTEXT_TYPE,
- CATCH_CONTEXT_TYPE,
- DEBUG_EVALUATE_CONTEXT_TYPE,
- EVAL_CONTEXT_TYPE,
- FUNCTION_CONTEXT_TYPE,
- MODULE_CONTEXT_TYPE,
- NATIVE_CONTEXT_TYPE,
- SCRIPT_CONTEXT_TYPE,
- WITH_CONTEXT_TYPE, // LAST_FIXED_ARRAY_TYPE, LAST_CONTEXT_TYPE
-
- WEAK_FIXED_ARRAY_TYPE, // FIRST_WEAK_FIXED_ARRAY_TYPE
- DESCRIPTOR_ARRAY_TYPE,
- TRANSITION_ARRAY_TYPE, // LAST_WEAK_FIXED_ARRAY_TYPE
-
- // Misc.
- CALL_HANDLER_INFO_TYPE,
- CELL_TYPE,
- CODE_DATA_CONTAINER_TYPE,
- FEEDBACK_CELL_TYPE,
- FEEDBACK_VECTOR_TYPE,
- LOAD_HANDLER_TYPE,
- PRE_PARSED_SCOPE_DATA_TYPE,
- PROPERTY_ARRAY_TYPE,
- PROPERTY_CELL_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- SMALL_ORDERED_HASH_MAP_TYPE,
- SMALL_ORDERED_HASH_SET_TYPE,
- STORE_HANDLER_TYPE,
- UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
- UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
- WEAK_ARRAY_LIST_TYPE,
-
- // All the following types are subtypes of JSReceiver, which corresponds to
- // objects in the JS sense. The first and the last type in this range are
- // the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
- // Some of the following instance types are exposed in v8.h, so to not
- // unnecessarily change the ABI when we introduce new instance types in the
- // future, we leave some space between instance types.
- JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE
- JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
- JS_GLOBAL_PROXY_TYPE,
- JS_MODULE_NAMESPACE_TYPE,
- // Like JS_API_OBJECT_TYPE, but requires access checks and/or has
- // interceptors.
- JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
- JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
- // Like JS_OBJECT_TYPE, but created from API function.
- JS_API_OBJECT_TYPE = 0x0420,
- JS_OBJECT_TYPE,
- JS_ARGUMENTS_TYPE,
- JS_ARRAY_BUFFER_TYPE,
- JS_ARRAY_ITERATOR_TYPE,
- JS_ARRAY_TYPE,
- JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
- JS_ASYNC_GENERATOR_OBJECT_TYPE,
- JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JS_DATE_TYPE,
- JS_ERROR_TYPE,
- JS_GENERATOR_OBJECT_TYPE,
- JS_MAP_TYPE,
- JS_MAP_KEY_ITERATOR_TYPE,
- JS_MAP_KEY_VALUE_ITERATOR_TYPE,
- JS_MAP_VALUE_ITERATOR_TYPE,
- JS_MESSAGE_OBJECT_TYPE,
- JS_PROMISE_TYPE,
- JS_REGEXP_TYPE,
- JS_REGEXP_STRING_ITERATOR_TYPE,
- JS_SET_TYPE,
- JS_SET_KEY_VALUE_ITERATOR_TYPE,
- JS_SET_VALUE_ITERATOR_TYPE,
- JS_STRING_ITERATOR_TYPE,
- JS_WEAK_MAP_TYPE,
- JS_WEAK_SET_TYPE,
-
- JS_TYPED_ARRAY_TYPE,
- JS_DATA_VIEW_TYPE,
-
-#ifdef V8_INTL_SUPPORT
- JS_INTL_V8_BREAK_ITERATOR_TYPE,
- JS_INTL_COLLATOR_TYPE,
- JS_INTL_DATE_TIME_FORMAT_TYPE,
- JS_INTL_LIST_FORMAT_TYPE,
- JS_INTL_LOCALE_TYPE,
- JS_INTL_NUMBER_FORMAT_TYPE,
- JS_INTL_PLURAL_RULES_TYPE,
- JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
- JS_INTL_SEGMENTER_TYPE,
-#endif // V8_INTL_SUPPORT
-
- WASM_EXCEPTION_TYPE,
- WASM_GLOBAL_TYPE,
- WASM_INSTANCE_TYPE,
- WASM_MEMORY_TYPE,
- WASM_MODULE_TYPE,
- WASM_TABLE_TYPE,
- JS_BOUND_FUNCTION_TYPE,
- JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
-
- // Pseudo-types
- FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
- FIRST_NAME_TYPE = FIRST_TYPE,
- LAST_NAME_TYPE = SYMBOL_TYPE,
- FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
- LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
- FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
- FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
- LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
- FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
- LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of FixedArray.
- FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
- LAST_FIXED_ARRAY_TYPE = WITH_CONTEXT_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of HashTable
- FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
- LAST_HASH_TABLE_TYPE = STRING_TABLE_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of Dictionary
- FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE,
- LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
- FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
- LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
- // Boundaries for testing if given HeapObject is a Context
- FIRST_CONTEXT_TYPE = AWAIT_CONTEXT_TYPE,
- LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of Microtask.
- FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
- LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
- // Boundaries for testing for a fixed typed array.
- FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
- // Boundary for promotion to old space.
- LAST_DATA_TYPE = FILLER_TYPE,
- // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
- // Note that there is no range for JSObject or JSProxy, since their subtypes
- // are not continuous in this enum! The enum ranges instead reflect the
- // external class names, where proxies are treated as either ordinary objects,
- // or functions.
- FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
- LAST_JS_RECEIVER_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSObject
- FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE,
- LAST_JS_OBJECT_TYPE = LAST_TYPE,
- // Boundary for testing JSReceivers that need special property lookup handling
- LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE,
- // Boundary case for testing JSReceivers that may have elements while having
- // an empty fixed array as elements backing store. This is true for string
- // wrappers.
- LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
-
- FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
- LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
-
- FIRST_MAP_ITERATOR_TYPE = JS_MAP_KEY_ITERATOR_TYPE,
- LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE,
-};
-
-STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
-STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
-STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
-STATIC_ASSERT(JS_SPECIAL_API_OBJECT_TYPE == Internals::kJSSpecialApiObjectType);
-STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
-STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- InstanceType instance_type);
-
// Result of an abstract relational comparison of x and y, implemented according
// to ES6 section 7.2.11 Abstract Relational Comparison.
enum class ComparisonResult {
@@ -640,26 +263,37 @@ class DependentCode;
class ElementsAccessor;
class EnumCache;
class FixedArrayBase;
-class PropertyArray;
+class FixedDoubleArray;
+class FreeSpace;
class FunctionLiteral;
class FunctionTemplateInfo;
+class JSAsyncGeneratorObject;
+class JSGlobalProxy;
+class JSPromise;
+class JSProxy;
class KeyAccumulator;
class LayoutDescriptor;
class LookupIterator;
class FieldType;
-class MicrotaskQueue;
class Module;
class ModuleInfoEntry;
+class MutableHeapNumber;
class ObjectHashTable;
class ObjectTemplateInfo;
class ObjectVisitor;
-class PreParsedScopeData;
+class PreparseData;
+class PropertyArray;
class PropertyCell;
class PropertyDescriptor;
+class PrototypeInfo;
+class ReadOnlyRoots;
+class RegExpMatchInfo;
class RootVisitor;
class SafepointEntry;
+class ScriptContextTable;
class SharedFunctionInfo;
class StringStream;
+class Symbol;
class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
@@ -667,6 +301,8 @@ class UncompiledData;
class TemplateInfo;
class TransitionArray;
class TemplateList;
+class WasmInstanceObject;
+class WasmMemoryObject;
template <typename T>
class ZoneForwardList;
@@ -713,6 +349,7 @@ class ZoneForwardList;
V(DeoptimizationData) \
V(DependentCode) \
V(DescriptorArray) \
+ V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(EnumCache) \
V(ExternalOneByteString) \
@@ -747,11 +384,13 @@ class ZoneForwardList;
V(HeapNumber) \
V(InternalizedString) \
V(JSArgumentsObject) \
+ V(JSArgumentsObjectWithLength) \
V(JSArray) \
V(JSArrayBuffer) \
V(JSArrayBufferView) \
V(JSArrayIterator) \
V(JSAsyncFromSyncIterator) \
+ V(JSAsyncFunctionObject) \
V(JSAsyncGeneratorObject) \
V(JSBoundFunction) \
V(JSCollection) \
@@ -780,7 +419,11 @@ class ZoneForwardList;
V(JSStringIterator) \
V(JSTypedArray) \
V(JSValue) \
+ V(JSWeakCell) \
+ V(JSWeakRef) \
V(JSWeakCollection) \
+ V(JSWeakFactory) \
+ V(JSWeakFactoryCleanupIterator) \
V(JSWeakMap) \
V(JSWeakSet) \
V(LoadHandler) \
@@ -800,7 +443,8 @@ class ZoneForwardList;
V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
- V(PreParsedScopeData) \
+ V(OrderedNameDictionary) \
+ V(PreparseData) \
V(PromiseReactionJobTask) \
V(PropertyArray) \
V(PropertyCell) \
@@ -818,6 +462,7 @@ class ZoneForwardList;
V(SloppyArgumentsElements) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
V(SourcePositionTableWithFrameCache) \
V(StoreHandler) \
V(String) \
@@ -833,8 +478,8 @@ class ZoneForwardList;
V(ThinString) \
V(TransitionArray) \
V(UncompiledData) \
- V(UncompiledDataWithPreParsedScope) \
- V(UncompiledDataWithoutPreParsedScope) \
+ V(UncompiledDataWithPreparseData) \
+ V(UncompiledDataWithoutPreparseData) \
V(Undetectable) \
V(UniqueName) \
V(WasmExceptionObject) \
@@ -857,6 +502,7 @@ class ZoneForwardList;
V(JSNumberFormat) \
V(JSPluralRules) \
V(JSRelativeTimeFormat) \
+ V(JSSegmentIterator) \
V(JSSegmenter)
#else
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
@@ -882,159 +528,6 @@ class ZoneForwardList;
V(OptimizedOut, optimized_out) \
V(StaleRegister, stale_register)
-// List of object types that have a single unique instance type.
-#define INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
- V(AllocationSite, ALLOCATION_SITE_TYPE) \
- V(BigInt, BIGINT_TYPE) \
- V(ObjectBoilerplateDescription, OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
- V(BreakPoint, TUPLE2_TYPE) \
- V(BreakPointInfo, TUPLE2_TYPE) \
- V(ByteArray, BYTE_ARRAY_TYPE) \
- V(BytecodeArray, BYTECODE_ARRAY_TYPE) \
- V(CallHandlerInfo, CALL_HANDLER_INFO_TYPE) \
- V(Cell, CELL_TYPE) \
- V(Code, CODE_TYPE) \
- V(CodeDataContainer, CODE_DATA_CONTAINER_TYPE) \
- V(CoverageInfo, FIXED_ARRAY_TYPE) \
- V(DescriptorArray, DESCRIPTOR_ARRAY_TYPE) \
- V(EphemeronHashTable, EPHEMERON_HASH_TABLE_TYPE) \
- V(FeedbackCell, FEEDBACK_CELL_TYPE) \
- V(FeedbackMetadata, FEEDBACK_METADATA_TYPE) \
- V(FeedbackVector, FEEDBACK_VECTOR_TYPE) \
- V(FixedArrayExact, FIXED_ARRAY_TYPE) \
- V(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) \
- V(Foreign, FOREIGN_TYPE) \
- V(FreeSpace, FREE_SPACE_TYPE) \
- V(GlobalDictionary, GLOBAL_DICTIONARY_TYPE) \
- V(HeapNumber, HEAP_NUMBER_TYPE) \
- V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \
- V(JSArray, JS_ARRAY_TYPE) \
- V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \
- V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \
- V(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
- V(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JSBoundFunction, JS_BOUND_FUNCTION_TYPE) \
- V(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JSDataView, JS_DATA_VIEW_TYPE) \
- V(JSDate, JS_DATE_TYPE) \
- V(JSError, JS_ERROR_TYPE) \
- V(JSFunction, JS_FUNCTION_TYPE) \
- V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) \
- V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE) \
- V(JSMap, JS_MAP_TYPE) \
- V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
- V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
- V(JSPromise, JS_PROMISE_TYPE) \
- V(JSProxy, JS_PROXY_TYPE) \
- V(JSRegExp, JS_REGEXP_TYPE) \
- V(JSRegExpResult, JS_ARRAY_TYPE) \
- V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
- V(JSSet, JS_SET_TYPE) \
- V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
- V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
- V(JSValue, JS_VALUE_TYPE) \
- V(JSWeakMap, JS_WEAK_MAP_TYPE) \
- V(JSWeakSet, JS_WEAK_SET_TYPE) \
- V(LoadHandler, LOAD_HANDLER_TYPE) \
- V(Map, MAP_TYPE) \
- V(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE) \
- V(NameDictionary, NAME_DICTIONARY_TYPE) \
- V(NativeContext, NATIVE_CONTEXT_TYPE) \
- V(NumberDictionary, NUMBER_DICTIONARY_TYPE) \
- V(Oddball, ODDBALL_TYPE) \
- V(OrderedHashMap, ORDERED_HASH_MAP_TYPE) \
- V(OrderedHashSet, ORDERED_HASH_SET_TYPE) \
- V(PreParsedScopeData, PRE_PARSED_SCOPE_DATA_TYPE) \
- V(PropertyArray, PROPERTY_ARRAY_TYPE) \
- V(PropertyCell, PROPERTY_CELL_TYPE) \
- V(PropertyDescriptorObject, FIXED_ARRAY_TYPE) \
- V(ScopeInfo, SCOPE_INFO_TYPE) \
- V(ScriptContextTable, SCRIPT_CONTEXT_TABLE_TYPE) \
- V(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) \
- V(SimpleNumberDictionary, SIMPLE_NUMBER_DICTIONARY_TYPE) \
- V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
- V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
- V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \
- V(StoreHandler, STORE_HANDLER_TYPE) \
- V(StringTable, STRING_TABLE_TYPE) \
- V(Symbol, SYMBOL_TYPE) \
- V(TemplateObjectDescription, TUPLE2_TYPE) \
- V(TransitionArray, TRANSITION_ARRAY_TYPE) \
- V(UncompiledDataWithoutPreParsedScope, \
- UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
- V(UncompiledDataWithPreParsedScope, \
- UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
- V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \
- V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
- V(WasmInstanceObject, WASM_INSTANCE_TYPE) \
- V(WasmMemoryObject, WASM_MEMORY_TYPE) \
- V(WasmModuleObject, WASM_MODULE_TYPE) \
- V(WasmTableObject, WASM_TABLE_TYPE) \
- V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
-#ifdef V8_INTL_SUPPORT
-
-#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
- INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
- V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \
- V(JSCollator, JS_INTL_COLLATOR_TYPE) \
- V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \
- V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
- V(JSLocale, JS_INTL_LOCALE_TYPE) \
- V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \
- V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
- V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
- V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
-
-#else
-
-#define INSTANCE_TYPE_CHECKERS_SINGLE(V) INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V)
-
-#endif // V8_INTL_SUPPORT
-
-#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
- V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
- V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \
- V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
- V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \
- LAST_FIXED_TYPED_ARRAY_TYPE) \
- V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
- V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
- V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
- V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
- V(Name, FIRST_TYPE, LAST_NAME_TYPE) \
- V(String, FIRST_TYPE, FIRST_NONSTRING_TYPE - 1) \
- V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
-
-#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
- V(FixedArrayBase) \
- V(InternalizedString) \
- V(JSObject) \
- V(JSReceiver)
-
-#define INSTANCE_TYPE_CHECKERS(V) \
- INSTANCE_TYPE_CHECKERS_SINGLE(V) \
- INSTANCE_TYPE_CHECKERS_RANGE(V) \
- INSTANCE_TYPE_CHECKERS_CUSTOM(V)
-
-namespace InstanceTypeChecker {
-#define IS_TYPE_FUNCTION_DECL(Type, ...) \
- V8_INLINE bool Is##Type(InstanceType instance_type);
-
-INSTANCE_TYPE_CHECKERS(IS_TYPE_FUNCTION_DECL)
-
-#define TYPED_ARRAY_IS_TYPE_FUNCTION_DECL(Type, ...) \
- IS_TYPE_FUNCTION_DECL(Fixed##Type##Array)
-TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
-#undef TYPED_ARRAY_IS_TYPE_FUNCTION_DECL
-
-#define STRUCT_IS_TYPE_FUNCTION_DECL(NAME, Name, name) \
- IS_TYPE_FUNCTION_DECL(Name)
-STRUCT_LIST(STRUCT_IS_TYPE_FUNCTION_DECL)
-#undef STRUCT_IS_TYPE_FUNCTION_DECL
-
-#undef IS_TYPE_FUNCTION_DECL
-} // namespace InstanceTypeChecker
-
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1042,10 +535,29 @@ enum class ElementTypes { kAll, kStringAndSymbol };
// object hierarchy.
// Object does not use any virtual functions to avoid the
// allocation of the C++ vtable.
-// Since both Smi and HeapObject are subclasses of Object no
-// data members can be present in Object.
+// There must only be a single data member in Object: the Address ptr,
+// containing the tagged heap pointer that this Object instance refers to.
+// For a design overview, see https://goo.gl/Ph4CGz.
class Object {
public:
+ constexpr Object() : ptr_(kNullAddress) {}
+ explicit constexpr Object(Address ptr) : ptr_(ptr) {}
+
+ // Make clang on Linux catch what MSVC complains about on Windows:
+ operator bool() const = delete;
+
+ bool operator==(const Object that) const { return this->ptr() == that.ptr(); }
+ bool operator!=(const Object that) const { return this->ptr() != that.ptr(); }
+ // Usage in std::set requires operator<.
+ bool operator<(const Object that) const { return this->ptr() < that.ptr(); }
+
+ // Returns the tagged "(heap) object pointer" representation of this object.
+ constexpr Address ptr() const { return ptr_; }
+
+ // These operator->() overloads are required for handlified code.
+ Object* operator->() { return this; }
+ const Object* operator->() const { return this; }
+
// Type testing.
bool IsObject() const { return true; }
@@ -1054,8 +566,6 @@ class Object {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsExternal(Isolate* isolate) const;
-
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
#define IS_TYPE_FUNCTION_DECL(Type, Value) \
@@ -1105,6 +615,7 @@ class Object {
V8_INLINE
V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object);
+ V8_INLINE bool IsHashTableBase() const;
V8_INLINE bool IsSmallOrderedHashTable() const;
// Extract the number.
@@ -1123,7 +634,7 @@ class Object {
// Checks whether two valid primitive encodings of a property name resolve to
// the same logical property. E.g., the smi 1, the string "1" and the double
// 1 all refer to the same property, so this helper will return true.
- inline bool KeyEquals(Object* other);
+ inline bool KeyEquals(Object other);
inline bool FilterKey(PropertyFilter filter);
@@ -1155,7 +666,7 @@ class Object {
Handle<Object> y);
// ES6 section 7.2.13 Strict Equality Comparison
- bool StrictEquals(Object* that);
+ bool StrictEquals(Object that);
// ES6 section 7.1.13 ToObject
// Convert to a JSObject if needed.
@@ -1219,8 +730,7 @@ class Object {
// ES6 section 7.1.17 ToIndex
V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToIndex(
- Isolate* isolate, Handle<Object> input,
- MessageTemplate::Template error_index);
+ Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
// ES6 section 7.3.9 GetMethod
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetMethod(
@@ -1332,24 +842,23 @@ class Object {
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
- inline Object* GetHash();
+ inline Object GetHash();
// Returns the permanent hash code associated with this object depending on
// the actual object type. May create and store a hash code if needed and none
// exists.
- Smi* GetOrCreateHash(Isolate* isolate);
- static Smi* GetOrCreateHash(Isolate* isolate, Object* key);
+ Smi GetOrCreateHash(Isolate* isolate);
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
- // to implement the Harmony "egal" function.
- V8_EXPORT_PRIVATE bool SameValue(Object* other);
+ // to implement the Object.is function.
+ V8_EXPORT_PRIVATE bool SameValue(Object other);
// Checks whether this object has the same value as the given one.
// +0 and -0 are treated equal. Everything else is the same as SameValue.
// This function is implemented according to ES6, section 7.2.4 and is used
// by ES6 Map and Set.
- bool SameValueZero(Object* other);
+ bool SameValueZero(Object other);
// ES6 section 9.4.2.3 ArraySpeciesCreate (part of it)
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
@@ -1367,28 +876,57 @@ class Object {
// Tries to convert an object to an array index. Returns true and sets the
// output parameter if it succeeds. Equivalent to ToArrayLength, but does not
// allow kMaxUInt32.
- inline bool ToArrayIndex(uint32_t* index) const;
+ V8_WARN_UNUSED_RESULT inline bool ToArrayIndex(uint32_t* index) const;
// Returns true if the result of iterating over the object is the same
// (including observable effects) as simply accessing the properties between 0
// and length.
bool IterationHasObservableEffects();
+ //
+ // The following GetHeapObjectXX methods mimic corresponding functionality
+ // in MaybeObject. Having them here allows us to unify code that processes
+ // ObjectSlots and MaybeObjectSlots.
+ //
+
+ // If this Object is a strong pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfStrong(HeapObject* result) const;
+
+ // If this Object is a strong pointer to a HeapObject (weak pointers are not
+ // expected), returns true and sets *result. Otherwise returns false.
+ inline bool GetHeapObject(HeapObject* result) const;
+
+ // DCHECKs that this Object is a strong pointer to a HeapObject and returns
+ // the HeapObject.
+ inline HeapObject GetHeapObject() const;
+
+ // Always returns false because Object is not expected to be a weak pointer
+ // to a HeapObject.
+ inline bool GetHeapObjectIfWeak(HeapObject* result) const {
+ DCHECK(!HasWeakHeapObjectTag(ptr()));
+ return false;
+ }
+ // Always returns false because Object is not expected to be a weak pointer
+ // to a HeapObject.
+ inline bool IsCleared() const { return false; }
+
DECL_VERIFIER(Object)
+
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
- static void VerifyPointer(Isolate* isolate, Object* p);
+ static void VerifyPointer(Isolate* isolate, Object p);
#endif
inline void VerifyApiCallResultType();
// Prints this object without details.
- void ShortPrint(FILE* out = stdout);
+ void ShortPrint(FILE* out = stdout) const;
// Prints this object without details to a message accumulator.
- void ShortPrint(StringStream* accumulator);
+ void ShortPrint(StringStream* accumulator) const;
- void ShortPrint(std::ostream& os); // NOLINT
+ void ShortPrint(std::ostream& os) const; // NOLINT
DECL_CAST(Object)
@@ -1397,21 +935,37 @@ class Object {
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
- void Print();
+ void Print() const;
// Prints this object with details.
- void Print(std::ostream& os); // NOLINT
+ void Print(std::ostream& os) const; // NOLINT
#else
- void Print() { ShortPrint(); }
- void Print(std::ostream& os) { ShortPrint(os); } // NOLINT
+ void Print() const { ShortPrint(); }
+ void Print(std::ostream& os) const { ShortPrint(os); } // NOLINT
#endif
+ // For use with std::unordered_set.
+ struct Hasher {
+ size_t operator()(const Object o) const {
+ return std::hash<v8::internal::Address>{}(o.ptr());
+ }
+ };
+
+ // For use with std::map.
+ struct Comparer {
+ bool operator()(const Object a, const Object b) const {
+ return a.ptr() < b.ptr();
+ }
+ };
+
private:
+ friend class CompressedObjectSlot;
+ friend class FullObjectSlot;
friend class LookupIterator;
friend class StringStream;
// Return the map of the root of object's prototype chain.
- Map* GetPrototypeChainRootMap(Isolate* isolate) const;
+ Map GetPrototypeChainRootMap(Isolate* isolate) const;
// Returns a non-SMI for JSReceivers, but returns the hash code for
// simple objects. This avoids a double lookup in the cases where
@@ -1420,7 +974,7 @@ class Object {
//
// Despite its size, this needs to be inlined for performance
// reasons.
- static inline Object* GetSimpleHash(Object* object);
+ static inline Object GetSimpleHash(Object object);
// Helper for SetProperty and SetSuperProperty.
// Return value is only meaningful if [found] is set to true on return.
@@ -1445,93 +999,36 @@ class Object {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToLength(
Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToIndex(
- Isolate* isolate, Handle<Object> input,
- MessageTemplate::Template error_index);
+ Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
- DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
+ Address ptr_;
};
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Object& obj);
// In objects.h to be usable without objects-inl.h inclusion.
-bool Object::IsSmi() const { return HAS_SMI_TAG(this); }
+bool Object::IsSmi() const { return HAS_SMI_TAG(ptr()); }
bool Object::IsHeapObject() const {
- DCHECK_EQ(!IsSmi(), Internals::HasHeapObjectTag(this));
+ DCHECK_EQ(!IsSmi(), Internals::HasHeapObjectTag(ptr()));
return !IsSmi();
}
struct Brief {
- V8_EXPORT_PRIVATE explicit Brief(const Object* v);
- explicit Brief(const MaybeObject* v) : value(v) {}
- const MaybeObject* value;
+ V8_EXPORT_PRIVATE explicit Brief(const Object v);
+ explicit Brief(const MaybeObject v);
+ // {value} is a tagged heap object reference (weak or strong), equivalent to
+ // a MaybeObject's payload. It has a plain Address type to keep #includes
+ // lightweight.
+ const Address value;
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
-// Smi represents integer Numbers that can be stored in 31 bits.
-// Smis are immediate which means they are NOT allocated in the heap.
-// The this pointer has the following format: [31 bit signed int] 0
-// For long smis it has the following format:
-// [32 bit signed int] [31 bits zero padding] 0
-// Smi stands for small integer.
-class Smi: public Object {
- public:
- // Returns the integer value.
- inline int value() const { return Internals::SmiValue(this); }
- inline Smi* ToUint32Smi() {
- if (value() <= 0) return Smi::kZero;
- return Smi::FromInt(static_cast<uint32_t>(value()));
- }
-
- // Convert a Smi object to an int.
- static inline int ToInt(const Object* object);
-
- // Convert a value to a Smi object.
- static inline Smi* FromInt(int value) {
- DCHECK(Smi::IsValid(value));
- return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
- }
-
- static inline Smi* FromIntptr(intptr_t value) {
- DCHECK(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
- }
-
- template <typename E,
- typename = typename std::enable_if<std::is_enum<E>::value>::type>
- static inline Smi* FromEnum(E value) {
- STATIC_ASSERT(sizeof(E) <= sizeof(int));
- return FromInt(static_cast<int>(value));
- }
-
- // Returns whether value can be represented in a Smi.
- static inline bool IsValid(intptr_t value) {
- bool result = Internals::IsValidSmi(value);
- DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue);
- return result;
- }
-
- // Compare two Smis x, y as if they were converted to strings and then
- // compared lexicographically. Returns:
- // -1 if x < y.
- // 0 if x == y.
- // 1 if x > y.
- static Smi* LexicographicCompare(Isolate* isolate, Smi* x, Smi* y);
-
- DECL_CAST(Smi)
-
- // Dispatched behavior.
- V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
- DECL_VERIFIER(Smi)
-
- static constexpr Smi* const kZero = nullptr;
- static const int kMinValue = kSmiMinValue;
- static const int kMaxValue = kSmiMaxValue;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
-};
-
+// Objects should never have the weak tag; this variant is for overzealous
+// checking.
+V8_INLINE static bool HasWeakHeapObjectTag(const Object value) {
+ return ((value->ptr() & kHeapObjectTagMask) == kWeakHeapObjectTag);
+}
// Heap objects typically have a map pointer in their first word. However,
// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
@@ -1542,10 +1039,10 @@ class MapWord {
// Normal state: the map word contains a map pointer.
// Create a map word from a map pointer.
- static inline MapWord FromMap(const Map* map);
+ static inline MapWord FromMap(const Map map);
// View this map word as a map pointer.
- inline Map* ToMap() const;
+ inline Map ToMap() const;
// Scavenge collection: the map word of live objects in the from space
// contains a forwarding address (a heap object pointer in the to space).
@@ -1556,10 +1053,10 @@ class MapWord {
inline bool IsForwardingAddress() const;
// Create a map word from a forwarding address.
- static inline MapWord FromForwardingAddress(HeapObject* object);
+ static inline MapWord FromForwardingAddress(HeapObject object);
// View this map word as a forwarding address.
- inline HeapObject* ToForwardingAddress();
+ inline HeapObject ToForwardingAddress();
static inline MapWord FromRawValue(uintptr_t value) {
return MapWord(value);
@@ -1573,195 +1070,9 @@ class MapWord {
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
- explicit MapWord(uintptr_t value) : value_(value) {}
+ explicit MapWord(Address value) : value_(value) {}
- uintptr_t value_;
-};
-
-
-// HeapObject is the superclass for all classes describing heap allocated
-// objects.
-class HeapObject: public Object {
- public:
- // [map]: Contains a map which contains the object's reflective
- // information.
- inline Map* map() const;
- inline void set_map(Map* value);
-
- inline HeapObject** map_slot();
-
- // The no-write-barrier version. This is OK if the object is white and in
- // new space, or if the value is an immortal immutable object, like the maps
- // of primitive (non-JS) objects like strings, heap numbers etc.
- inline void set_map_no_write_barrier(Map* value);
-
- // Get the map using acquire load.
- inline Map* synchronized_map() const;
- inline MapWord synchronized_map_word() const;
-
- // Set the map using release store
- inline void synchronized_set_map(Map* value);
- inline void synchronized_set_map_word(MapWord map_word);
-
- // Initialize the map immediately after the object is allocated.
- // Do not use this outside Heap.
- inline void set_map_after_allocation(
- Map* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // During garbage collection, the map word of a heap object does not
- // necessarily contain a map pointer.
- inline MapWord map_word() const;
- inline void set_map_word(MapWord map_word);
-
- // TODO(v8:7464): Once RO_SPACE is shared between isolates, this method can be
- // removed as ReadOnlyRoots will be accessible from a global variable. For now
- // this method exists to help remove GetIsolate/GetHeap from HeapObject, in a
- // way that doesn't require passing Isolate/Heap down huge call chains or to
- // places where it might not be safe to access it.
- inline ReadOnlyRoots GetReadOnlyRoots() const;
-
-#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
- HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
-#undef IS_TYPE_FUNCTION_DECL
-
- V8_INLINE bool IsExternal(Isolate* isolate) const;
-
-// Oddball checks are faster when they are raw pointer comparisons, so the
-// isolate/read-only roots overloads should be preferred where possible.
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
- V8_INLINE bool Is##Type(Isolate* isolate) const; \
- V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
- V8_INLINE bool Is##Type() const;
- ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
-#undef IS_TYPE_FUNCTION_DECL
-
- V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
- V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
- V8_INLINE bool IsNullOrUndefined() const;
-
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
- STRUCT_LIST(DECL_STRUCT_PREDICATE)
-#undef DECL_STRUCT_PREDICATE
-
- // Converts an address to a HeapObject pointer.
- static inline HeapObject* FromAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
- }
-
- // Returns the address of this HeapObject.
- inline Address address() const {
- return reinterpret_cast<Address>(this) - kHeapObjectTag;
- }
-
- // Iterates over pointers contained in the object (including the Map).
- // If it's not performance critical iteration use the non-templatized
- // version.
- void Iterate(ObjectVisitor* v);
-
- template <typename ObjectVisitor>
- inline void IterateFast(ObjectVisitor* v);
-
- // Iterates over all pointers contained in the object except the
- // first map pointer. The object type is given in the first
- // parameter. This function does not access the map pointer in the
- // object, and so is safe to call while the map pointer is modified.
- // If it's not performance critical iteration use the non-templatized
- // version.
- void IterateBody(ObjectVisitor* v);
- void IterateBody(Map* map, int object_size, ObjectVisitor* v);
-
- template <typename ObjectVisitor>
- inline void IterateBodyFast(ObjectVisitor* v);
-
- template <typename ObjectVisitor>
- inline void IterateBodyFast(Map* map, int object_size, ObjectVisitor* v);
-
- // Returns true if the object contains a tagged value at given offset.
- // It is used for invalid slots filtering. If the offset points outside
- // of the object or to the map word, the result is UNDEFINED (!!!).
- bool IsValidSlot(Map* map, int offset);
-
- // Returns the heap object's size in bytes
- inline int Size() const;
-
- // Given a heap object's map pointer, returns the heap size in bytes
- // Useful when the map pointer field is used for other purposes.
- // GC internal.
- inline int SizeFromMap(Map* map) const;
-
- // Returns the field at offset in obj, as a read/write Object* reference.
- // Does no checking, and is safe to use during GC, while maps are invalid.
- // Does not invoke write barrier, so should only be assigned to
- // during marking GC.
- static inline Object** RawField(const HeapObject* obj, int offset);
- static inline MaybeObject** RawMaybeWeakField(HeapObject* obj, int offset);
-
- DECL_CAST(HeapObject)
-
- // Return the write barrier mode for this. Callers of this function
- // must be able to present a reference to an DisallowHeapAllocation
- // object as a sign that they are not going to use this function
- // from code that allocates and thus invalidates the returned write
- // barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(
- const DisallowHeapAllocation& promise);
-
- // Dispatched behavior.
- void HeapObjectShortPrint(std::ostream& os); // NOLINT
-#ifdef OBJECT_PRINT
- void PrintHeader(std::ostream& os, const char* id); // NOLINT
-#endif
- DECL_PRINTER(HeapObject)
- DECL_VERIFIER(HeapObject)
-#ifdef VERIFY_HEAP
- inline void VerifyObjectField(Isolate* isolate, int offset);
- inline void VerifySmiField(int offset);
- inline void VerifyMaybeObjectField(Isolate* isolate, int offset);
-
- // Verify a pointer is a valid HeapObject pointer that points to object
- // areas in the heap.
- static void VerifyHeapPointer(Isolate* isolate, Object* p);
-#endif
-
- static inline AllocationAlignment RequiredAlignment(Map* map);
-
- // Whether the object needs rehashing. That is the case if the object's
- // content depends on FLAG_hash_seed. When the object is deserialized into
- // a heap with a different hash seed, these objects need to adapt.
- inline bool NeedsRehashing() const;
-
- // Rehashing support is not implemented for all objects that need rehashing.
- // With objects that need rehashing but cannot be rehashed, rehashing has to
- // be disabled.
- bool CanBeRehashed() const;
-
- // Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap(Isolate* isolate);
-
- // Layout description.
- // First field in a heap object is map.
- static const int kMapOffset = Object::kHeaderSize;
- static const int kHeaderSize = kMapOffset + kPointerSize;
-
- STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
-
- inline Address GetFieldAddress(int field_offset) const;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
-};
-
-// Mixin class for objects that can never be in RO space.
-// TODO(leszeks): Add checks in the factory that we never allocate these objects
-// in RO space.
-class NeverReadOnlySpaceObject {
- public:
- // The Heap the object was allocated in. Used also to access Isolate.
- inline Heap* GetHeap() const;
-
- // Convenience method to get current isolate.
- inline Isolate* GetIsolate() const;
+ Address value_;
};
template <int start_offset, int end_offset, int size>
@@ -1770,76 +1081,12 @@ class FixedBodyDescriptor;
template <int start_offset>
class FlexibleBodyDescriptor;
+template <int start_offset>
+class FlexibleWeakBodyDescriptor;
+
template <class ParentBodyDescriptor, class ChildBodyDescriptor>
class SubclassBodyDescriptor;
-// The HeapNumber class describes heap allocated numbers that cannot be
-// represented in a Smi (small integer). MutableHeapNumber is the same, but its
-// number value can change over time (it is used only as property storage).
-// HeapNumberBase merely exists to avoid code duplication.
-class HeapNumberBase : public HeapObject {
- public:
- // [value]: number value.
- inline double value() const;
- inline void set_value(double value);
-
- inline uint64_t value_as_bits() const;
- inline void set_value_as_bits(uint64_t bits);
-
- inline int get_exponent();
- inline int get_sign();
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- // IEEE doubles are two 32 bit words. The first is just mantissa, the second
- // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
- // words within double numbers are endian dependent and they are set
- // accordingly.
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- static const int kMantissaOffset = kValueOffset;
- static const int kExponentOffset = kValueOffset + 4;
-#elif defined(V8_TARGET_BIG_ENDIAN)
- static const int kMantissaOffset = kValueOffset + 4;
- static const int kExponentOffset = kValueOffset;
-#else
-#error Unknown byte ordering
-#endif
-
- static const int kSize = kValueOffset + kDoubleSize;
- static const uint32_t kSignMask = 0x80000000u;
- static const uint32_t kExponentMask = 0x7ff00000u;
- static const uint32_t kMantissaMask = 0xfffffu;
- static const int kMantissaBits = 52;
- static const int kExponentBits = 11;
- static const int kExponentBias = 1023;
- static const int kExponentShift = 20;
- static const int kInfinityOrNanExponent =
- (kExponentMask >> kExponentShift) - kExponentBias;
- static const int kMantissaBitsInTopWord = 20;
- static const int kNonMantissaBitsInTopWord = 12;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumberBase)
-};
-
-class HeapNumber : public HeapNumberBase {
- public:
- DECL_CAST(HeapNumber)
- V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber)
-};
-
-class MutableHeapNumber : public HeapNumberBase {
- public:
- DECL_CAST(MutableHeapNumber)
- V8_EXPORT_PRIVATE void MutableHeapNumberPrint(std::ostream& os);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MutableHeapNumber)
-};
-
enum EnsureElementsMode {
DONT_ALLOW_DOUBLE_ELEMENTS,
ALLOW_COPIED_DOUBLE_ELEMENTS,
@@ -1864,114 +1111,6 @@ enum class KeyCollectionMode {
static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
};
-// FreeSpace are fixed-size free memory blocks used by the heap and GC.
-// They look like heap objects (are heap object tagged and have a map) so that
-// the heap remains iterable. They have a size and a next pointer.
-// The next pointer is the raw address of the next FreeSpace object (or NULL)
-// in the free list.
-class FreeSpace: public HeapObject {
- public:
- // [size]: size of the free space including the header.
- inline int size() const;
- inline void set_size(int value);
-
- inline int relaxed_read_size() const;
- inline void relaxed_write_size(int value);
-
- inline int Size();
-
- // Accessors for the next field.
- inline FreeSpace* next();
- inline void set_next(FreeSpace* next);
-
- inline static FreeSpace* cast(HeapObject* obj);
-
- // Dispatched behavior.
- DECL_PRINTER(FreeSpace)
- DECL_VERIFIER(FreeSpace)
-
- // Layout description.
- // Size is smi tagged when it is stored.
- static const int kSizeOffset = HeapObject::kHeaderSize;
- static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
- static const int kSize = kNextOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
-};
-
-class PrototypeInfo;
-
-// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to be
-// identified in the type system.
-class Struct: public HeapObject {
- public:
- inline void InitializeBody(int object_size);
- DECL_CAST(Struct)
- void BriefPrintDetails(std::ostream& os);
-};
-
-class Tuple2 : public Struct {
- public:
- DECL_ACCESSORS(value1, Object)
- DECL_ACCESSORS(value2, Object)
-
- DECL_CAST(Tuple2)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple2)
- DECL_VERIFIER(Tuple2)
- void BriefPrintDetails(std::ostream& os);
-
- static const int kValue1Offset = HeapObject::kHeaderSize;
- static const int kValue2Offset = kValue1Offset + kPointerSize;
- static const int kSize = kValue2Offset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple2);
-};
-
-class Tuple3 : public Tuple2 {
- public:
- DECL_ACCESSORS(value3, Object)
-
- DECL_CAST(Tuple3)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple3)
- DECL_VERIFIER(Tuple3)
- void BriefPrintDetails(std::ostream& os);
-
- static const int kValue3Offset = Tuple2::kSize;
- static const int kSize = kValue3Offset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
-};
-
-class AsyncGeneratorRequest : public Struct {
- public:
- // Holds an AsyncGeneratorRequest, or Undefined.
- DECL_ACCESSORS(next, Object)
- DECL_INT_ACCESSORS(resume_mode)
- DECL_ACCESSORS(value, Object)
- DECL_ACCESSORS(promise, Object)
-
- static const int kNextOffset = Struct::kHeaderSize;
- static const int kResumeModeOffset = kNextOffset + kPointerSize;
- static const int kValueOffset = kResumeModeOffset + kPointerSize;
- static const int kPromiseOffset = kValueOffset + kPointerSize;
- static const int kSize = kPromiseOffset + kPointerSize;
-
- DECL_CAST(AsyncGeneratorRequest)
- DECL_PRINTER(AsyncGeneratorRequest)
- DECL_VERIFIER(AsyncGeneratorRequest)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AsyncGeneratorRequest);
-};
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
@@ -1995,273 +1134,6 @@ class Relocatable {
Relocatable* prev_;
};
-
-// The Oddball describes objects null, undefined, true, and false.
-class Oddball: public HeapObject {
- public:
- // [to_number_raw]: Cached raw to_number computed at startup.
- inline double to_number_raw() const;
- inline void set_to_number_raw(double value);
- inline void set_to_number_raw_as_bits(uint64_t bits);
-
- // [to_string]: Cached to_string computed at startup.
- DECL_ACCESSORS(to_string, String)
-
- // [to_number]: Cached to_number computed at startup.
- DECL_ACCESSORS(to_number, Object)
-
- // [typeof]: Cached type_of computed at startup.
- DECL_ACCESSORS(type_of, String)
-
- inline byte kind() const;
- inline void set_kind(byte kind);
-
- // ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
- V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
- Isolate* isolate, Handle<Oddball> input);
-
- DECL_CAST(Oddball)
-
- // Dispatched behavior.
- DECL_VERIFIER(Oddball)
-
- // Initialize the fields.
- static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
- const char* to_string, Handle<Object> to_number,
- const char* type_of, byte kind);
-
- // Layout description.
- static const int kToNumberRawOffset = HeapObject::kHeaderSize;
- static const int kToStringOffset = kToNumberRawOffset + kDoubleSize;
- static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kTypeOfOffset = kToNumberOffset + kPointerSize;
- static const int kKindOffset = kTypeOfOffset + kPointerSize;
- static const int kSize = kKindOffset + kPointerSize;
-
- static const byte kFalse = 0;
- static const byte kTrue = 1;
- static const byte kNotBooleanMask = static_cast<byte>(~1);
- static const byte kTheHole = 2;
- static const byte kNull = 3;
- static const byte kArgumentsMarker = 4;
- static const byte kUndefined = 5;
- static const byte kUninitialized = 6;
- static const byte kOther = 7;
- static const byte kException = 8;
- static const byte kOptimizedOut = 9;
- static const byte kStaleRegister = 10;
- static const byte kSelfReferenceMarker = 10;
-
- typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- STATIC_ASSERT(kToNumberRawOffset == HeapNumber::kValueOffset);
- STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
- STATIC_ASSERT(kNull == Internals::kNullOddballKind);
- STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
-};
-
-
-class Cell: public HeapObject {
- public:
- // [value]: value of the cell.
- DECL_ACCESSORS(value, Object)
-
- DECL_CAST(Cell)
-
- static inline Cell* FromValueAddress(Address value) {
- Object* result = FromAddress(value - kValueOffset);
- return static_cast<Cell*>(result);
- }
-
- inline Address ValueAddress() {
- return address() + kValueOffset;
- }
-
- // Dispatched behavior.
- DECL_PRINTER(Cell)
- DECL_VERIFIER(Cell)
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kValueOffset,
- kValueOffset + kPointerSize,
- kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
-};
-
-// This is a special cell used to maintain both the link between a
-// closure and it's feedback vector, as well as a way to count the
-// number of closures created for a certain function per native
-// context. There's at most one FeedbackCell for each function in
-// a native context.
-class FeedbackCell : public Struct {
- public:
- // [value]: value of the cell.
- DECL_ACCESSORS(value, HeapObject)
-
- DECL_CAST(FeedbackCell)
-
- // Dispatched behavior.
- DECL_PRINTER(FeedbackCell)
- DECL_VERIFIER(FeedbackCell)
-
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kValueOffset, kValueOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackCell);
-};
-
-class PropertyCell : public HeapObject {
- public:
- // [name]: the name of the global property.
- DECL_ACCESSORS(name, Name)
- // [property_details]: details of the global property.
- DECL_ACCESSORS(property_details_raw, Object)
- // [value]: value of the global property.
- DECL_ACCESSORS(value, Object)
- // [dependent_code]: dependent code that depends on the type of the global
- // property.
- DECL_ACCESSORS(dependent_code, DependentCode)
-
- inline PropertyDetails property_details() const;
- inline void set_property_details(PropertyDetails details);
-
- PropertyCellConstantType GetConstantType();
-
- // Computes the new type of the cell's contents for the given value, but
- // without actually modifying the details.
- static PropertyCellType UpdatedType(Isolate* isolate,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- PropertyDetails details);
- // Prepares property cell at given entry for receiving given value.
- // As a result the old cell could be invalidated and/or dependent code could
- // be deoptimized. Returns the prepared property cell.
- static Handle<PropertyCell> PrepareForValue(
- Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
- Handle<Object> value, PropertyDetails details);
-
- static Handle<PropertyCell> InvalidateEntry(
- Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry);
-
- static void SetValueWithInvalidation(Isolate* isolate,
- Handle<PropertyCell> cell,
- Handle<Object> new_value);
-
- DECL_CAST(PropertyCell)
-
- // Dispatched behavior.
- DECL_PRINTER(PropertyCell)
- DECL_VERIFIER(PropertyCell)
-
- // Layout description.
- static const int kDetailsOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kDetailsOffset + kPointerSize;
- static const int kValueOffset = kNameOffset + kPointerSize;
- static const int kDependentCodeOffset = kValueOffset + kPointerSize;
- static const int kSize = kDependentCodeOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kNameOffset, kSize, kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
-};
-
-// Foreign describes objects pointing from JavaScript to C structures.
-class Foreign: public HeapObject {
- public:
- // [address]: field containing the address.
- inline Address foreign_address();
-
- static inline bool IsNormalized(Object* object);
-
- DECL_CAST(Foreign)
-
- // Dispatched behavior.
- DECL_PRINTER(Foreign)
- DECL_VERIFIER(Foreign)
-
- // Layout description.
-
- static const int kForeignAddressOffset = HeapObject::kHeaderSize;
- static const int kSize = kForeignAddressOffset + kPointerSize;
-
- STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
-
- class BodyDescriptor;
-
- private:
- friend class Factory;
- friend class SerializerDeserializer;
- friend class StartupSerializer;
-
- inline void set_foreign_address(Address value);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
-};
-
-// Support for JavaScript accessors: A pair of a getter and a setter. Each
-// accessor can either be
-// * a JavaScript function or proxy: a real accessor
-// * a FunctionTemplateInfo: a real (lazy) accessor
-// * undefined: considered an accessor by the spec, too, strangely enough
-// * null: an accessor which has not been set
-class AccessorPair: public Struct {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
-
- DECL_CAST(AccessorPair)
-
- static Handle<AccessorPair> Copy(Isolate* isolate, Handle<AccessorPair> pair);
-
- inline Object* get(AccessorComponent component);
- inline void set(AccessorComponent component, Object* value);
-
- // Note: Returns undefined if the component is not set.
- static Handle<Object> GetComponent(Isolate* isolate,
- Handle<AccessorPair> accessor_pair,
- AccessorComponent component);
-
- // Set both components, skipping arguments which are a JavaScript null.
- inline void SetComponents(Object* getter, Object* setter);
-
- inline bool Equals(AccessorPair* pair);
- inline bool Equals(Object* getter_value, Object* setter_value);
-
- inline bool ContainsAccessor();
-
- // Dispatched behavior.
- DECL_PRINTER(AccessorPair)
- DECL_VERIFIER(AccessorPair)
-
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kSize = kSetterOffset + kPointerSize;
-
- private:
- // Strangely enough, in addition to functions and harmony proxies, the spec
- // requires us to consider undefined as a kind of accessor, too:
- // var obj = {};
- // Object.defineProperty(obj, "foo", {get: undefined});
- // assertTrue("foo" in obj);
- inline bool IsJSAccessor(Object* obj);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
-};
-
// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 2ed280c054..8e2dc29d9b 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -16,6 +16,11 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(AllocationMemento, Struct)
+OBJECT_CONSTRUCTORS_IMPL(AllocationSite, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(AllocationSite)
+
CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
@@ -30,12 +35,12 @@ ACCESSORS_CHECKED(AllocationSite, weak_next, Object, kWeakNextOffset,
HasWeakNext())
ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
-JSObject* AllocationSite::boilerplate() const {
+JSObject AllocationSite::boilerplate() const {
DCHECK(PointsToLiteral());
return JSObject::cast(transition_info_or_boilerplate());
}
-void AllocationSite::set_boilerplate(JSObject* object, WriteBarrierMode mode) {
+void AllocationSite::set_boilerplate(JSObject object, WriteBarrierMode mode) {
set_transition_info_or_boilerplate(object, mode);
}
@@ -99,7 +104,7 @@ void AllocationSite::SetDoNotInlineCall() {
}
bool AllocationSite::PointsToLiteral() const {
- Object* raw_value = transition_info_or_boilerplate();
+ Object raw_value = transition_info_or_boilerplate();
DCHECK_EQ(!raw_value->IsSmi(),
raw_value->IsJSArray() || raw_value->IsJSObject());
return !raw_value->IsSmi();
@@ -147,7 +152,7 @@ inline void AllocationSite::set_memento_found_count(int count) {
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
- (Heap::kMinObjectSizeInWords * kPointerSize +
+ (Heap::kMinObjectSizeInTaggedWords * kTaggedSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK_LT(count, MementoFoundCountBits::kMax);
set_pretenure_data(MementoFoundCountBits::update(value, count));
@@ -180,13 +185,13 @@ bool AllocationMemento::IsValid() const {
!AllocationSite::cast(allocation_site())->IsZombie();
}
-AllocationSite* AllocationMemento::GetAllocationSite() const {
+AllocationSite AllocationMemento::GetAllocationSite() const {
DCHECK(IsValid());
return AllocationSite::cast(allocation_site());
}
Address AllocationMemento::GetAllocationSiteUnchecked() const {
- return reinterpret_cast<Address>(allocation_site());
+ return allocation_site()->ptr();
}
} // namespace internal
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index d923fd8f23..7b22d34f33 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_ALLOCATION_SITE_H_
#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,8 +14,11 @@
namespace v8 {
namespace internal {
-class AllocationSite : public Struct, public NeverReadOnlySpaceObject {
+enum InstanceType : uint16_t;
+
+class AllocationSite : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
static const double kPretenureRatio;
static const int kPretenureMinimumCreated = 100;
@@ -134,39 +138,45 @@ class AllocationSite : public Struct, public NeverReadOnlySpaceObject {
// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
// and end with WeakNext field.
#define ALLOCATION_SITE_FIELDS(V) \
- V(kTransitionInfoOrBoilerplateOffset, kPointerSize) \
- V(kNestedSiteOffset, kPointerSize) \
- V(kDependentCodeOffset, kPointerSize) \
+ V(kStartOffset, 0) \
+ V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
+ V(kNestedSiteOffset, kTaggedSize) \
+ V(kDependentCodeOffset, kTaggedSize) \
V(kCommonPointerFieldEndOffset, 0) \
V(kPretenureDataOffset, kInt32Size) \
V(kPretenureCreateCountOffset, kInt32Size) \
/* Size of AllocationSite without WeakNext field */ \
V(kSizeWithoutWeakNext, 0) \
- V(kWeakNextOffset, kPointerSize) \
+ V(kWeakNextOffset, kTaggedSize) \
/* Size of AllocationSite with WeakNext field */ \
V(kSizeWithWeakNext, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
-
- static const int kStartOffset = HeapObject::kHeaderSize;
+#undef ALLOCATION_SITE_FIELDS
class BodyDescriptor;
private:
inline bool PretenuringDecisionMade() const;
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
+ OBJECT_CONSTRUCTORS(AllocationSite, Struct);
};
class AllocationMemento : public Struct {
public:
- static const int kAllocationSiteOffset = HeapObject::kHeaderSize;
- static const int kSize = kAllocationSiteOffset + kPointerSize;
+// Layout description.
+#define ALLOCATION_MEMENTO_FIELDS(V) \
+ V(kAllocationSiteOffset, kTaggedSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ ALLOCATION_MEMENTO_FIELDS)
+#undef ALLOCATION_MEMENTO_FIELDS
DECL_ACCESSORS(allocation_site, Object)
inline bool IsValid() const;
- inline AllocationSite* GetAllocationSite() const;
+ inline AllocationSite GetAllocationSite() const;
inline Address GetAllocationSiteUnchecked() const;
DECL_PRINTER(AllocationMemento)
@@ -174,8 +184,7 @@ class AllocationMemento : public Struct {
DECL_CAST(AllocationMemento)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
+ OBJECT_CONSTRUCTORS(AllocationMemento, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index b8ca8bef20..894f13ffe8 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -8,6 +8,8 @@
#include "src/objects/api-callbacks.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/objects/foreign-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
@@ -17,6 +19,11 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(AccessorInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo, Tuple3)
+
CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessCheckInfo)
CAST_ACCESSOR(InterceptorInfo)
@@ -78,7 +85,7 @@ void AccessorInfo::set_setter_side_effect_type(SideEffectType value) {
BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
AccessorInfo::InitialAttributesBits)
-bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
+bool AccessorInfo::IsCompatibleReceiver(Object receiver) {
if (!HasExpectedReceiverType()) return true;
if (!receiver->IsJSObject()) return false;
return FunctionTemplateInfo::cast(expected_receiver_type())
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index f7522da8a7..33f68d56fc 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_API_CALLBACKS_H_
#define V8_OBJECTS_API_CALLBACKS_H_
-#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -64,7 +64,7 @@ class AccessorInfo : public Struct {
// Checks whether the given receiver is compatible with this accessor.
static bool IsCompatibleReceiverMap(Handle<AccessorInfo> info,
Handle<Map> map);
- inline bool IsCompatibleReceiver(Object* receiver);
+ inline bool IsCompatibleReceiver(Object receiver);
DECL_CAST(AccessorInfo)
@@ -77,14 +77,14 @@ class AccessorInfo : public Struct {
Handle<FixedArray> array, int valid_descriptors);
// Layout description.
-#define ACCESSOR_INFO_FIELDS(V) \
- V(kNameOffset, kPointerSize) \
- V(kFlagsOffset, kPointerSize) \
- V(kExpectedReceiverTypeOffset, kPointerSize) \
- V(kSetterOffset, kPointerSize) \
- V(kGetterOffset, kPointerSize) \
- V(kJsGetterOffset, kPointerSize) \
- V(kDataOffset, kPointerSize) \
+#define ACCESSOR_INFO_FIELDS(V) \
+ V(kNameOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kExpectedReceiverTypeOffset, kTaggedSize) \
+ V(kSetterOffset, kTaggedSize) \
+ V(kGetterOffset, kTaggedSize) \
+ V(kJsGetterOffset, kTaggedSize) \
+ V(kDataOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ACCESSOR_INFO_FIELDS)
@@ -108,7 +108,7 @@ class AccessorInfo : public Struct {
DEFINE_BIT_FIELDS(ACCESSOR_INFO_FLAGS_BIT_FIELDS)
#undef ACCESSOR_INFO_FLAGS_BIT_FIELDS
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
+ OBJECT_CONSTRUCTORS(AccessorInfo, Struct);
};
class AccessCheckInfo : public Struct {
@@ -124,17 +124,21 @@ class AccessCheckInfo : public Struct {
DECL_PRINTER(AccessCheckInfo)
DECL_VERIFIER(AccessCheckInfo)
- static AccessCheckInfo* Get(Isolate* isolate, Handle<JSObject> receiver);
+ static AccessCheckInfo Get(Isolate* isolate, Handle<JSObject> receiver);
- static const int kCallbackOffset = HeapObject::kHeaderSize;
- static const int kNamedInterceptorOffset = kCallbackOffset + kPointerSize;
- static const int kIndexedInterceptorOffset =
- kNamedInterceptorOffset + kPointerSize;
- static const int kDataOffset = kIndexedInterceptorOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+// Layout description.
+#define ACCESS_CHECK_INFO_FIELDS(V) \
+ V(kCallbackOffset, kTaggedSize) \
+ V(kNamedInterceptorOffset, kTaggedSize) \
+ V(kIndexedInterceptorOffset, kTaggedSize) \
+ V(kDataOffset, kTaggedSize) \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AccessCheckInfo);
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ ACCESS_CHECK_INFO_FIELDS)
+#undef ACCESS_CHECK_INFO_FIELDS
+
+ OBJECT_CONSTRUCTORS(AccessCheckInfo, Struct);
};
class InterceptorInfo : public Struct {
@@ -162,16 +166,22 @@ class InterceptorInfo : public Struct {
DECL_PRINTER(InterceptorInfo)
DECL_VERIFIER(InterceptorInfo)
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kQueryOffset = kSetterOffset + kPointerSize;
- static const int kDescriptorOffset = kQueryOffset + kPointerSize;
- static const int kDeleterOffset = kDescriptorOffset + kPointerSize;
- static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
- static const int kDefinerOffset = kEnumeratorOffset + kPointerSize;
- static const int kDataOffset = kDefinerOffset + kPointerSize;
- static const int kFlagsOffset = kDataOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
+// Layout description.
+#define INTERCEPTOR_INFO_FIELDS(V) \
+ V(kGetterOffset, kTaggedSize) \
+ V(kSetterOffset, kTaggedSize) \
+ V(kQueryOffset, kTaggedSize) \
+ V(kDescriptorOffset, kTaggedSize) \
+ V(kDeleterOffset, kTaggedSize) \
+ V(kEnumeratorOffset, kTaggedSize) \
+ V(kDefinerOffset, kTaggedSize) \
+ V(kDataOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ INTERCEPTOR_INFO_FIELDS)
+#undef INTERCEPTOR_INFO_FIELDS
static const int kCanInterceptSymbolsBit = 0;
static const int kAllCanReadBit = 1;
@@ -179,8 +189,7 @@ class InterceptorInfo : public Struct {
static const int kNamed = 3;
static const int kHasNoSideEffect = 4;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
+ OBJECT_CONSTRUCTORS(InterceptorInfo, Struct);
};
class CallHandlerInfo : public Tuple3 {
@@ -208,8 +217,7 @@ class CallHandlerInfo : public Tuple3 {
static const int kJsCallbackOffset = kValue2Offset;
static const int kDataOffset = kValue3Offset;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
+ OBJECT_CONSTRUCTORS(CallHandlerInfo, Tuple3);
};
} // namespace internal
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 222ca7954e..541d416e5c 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -18,21 +18,25 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(SloppyArgumentsElements, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry, Struct)
+
CAST_ACCESSOR(AliasedArgumentsEntry)
-CAST_ACCESSOR(JSArgumentsObject)
CAST_ACCESSOR(SloppyArgumentsElements)
+CAST_ACCESSOR(JSArgumentsObject)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
-Context* SloppyArgumentsElements::context() {
+Context SloppyArgumentsElements::context() {
return Context::cast(get(kContextIndex));
}
-FixedArray* SloppyArgumentsElements::arguments() {
+FixedArray SloppyArgumentsElements::arguments() {
return FixedArray::cast(get(kArgumentsIndex));
}
-void SloppyArgumentsElements::set_arguments(FixedArray* arguments) {
+void SloppyArgumentsElements::set_arguments(FixedArray arguments) {
set(kArgumentsIndex, arguments);
}
@@ -40,11 +44,11 @@ uint32_t SloppyArgumentsElements::parameter_map_length() {
return length() - kParameterMapStart;
}
-Object* SloppyArgumentsElements::get_mapped_entry(uint32_t entry) {
+Object SloppyArgumentsElements::get_mapped_entry(uint32_t entry) {
return get(entry + kParameterMapStart);
}
-void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object* object) {
+void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object object) {
set(entry + kParameterMapStart, object);
}
@@ -55,22 +59,22 @@ void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object* object) {
bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate,
Handle<JSObject> object,
int* out) {
- Context* context = *isolate->native_context();
- Map* map = object->map();
+ Context context = *isolate->native_context();
+ Map map = object->map();
if (map != context->sloppy_arguments_map() &&
map != context->strict_arguments_map() &&
map != context->fast_aliased_arguments_map()) {
return false;
}
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
- Object* len_obj =
+ Object len_obj =
object->InObjectPropertyAt(JSArgumentsObjectWithLength::kLengthIndex);
if (!len_obj->IsSmi()) return false;
*out = Max(0, Smi::ToInt(len_obj));
- FixedArray* parameters = FixedArray::cast(object->elements());
+ FixedArray parameters = FixedArray::cast(object->elements());
if (object->HasSloppyArgumentsElements()) {
- FixedArray* arguments = FixedArray::cast(parameters->get(1));
+ FixedArray arguments = FixedArray::cast(parameters->get(1));
return *out <= arguments->length();
}
return *out <= parameters->length();
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 15f3d2a2f5..4d2e18ff53 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -7,6 +7,8 @@
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "src/objects/struct.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,9 +21,7 @@ class JSArgumentsObject : public JSObject {
public:
DECL_VERIFIER(JSArgumentsObject)
DECL_CAST(JSArgumentsObject)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+ OBJECT_CONSTRUCTORS(JSArgumentsObject, JSObject);
};
// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
@@ -30,14 +30,19 @@ class JSArgumentsObject : public JSObject {
// mode already. Only use the below layout with the specific initial maps.
class JSArgumentsObjectWithLength : public JSArgumentsObject {
public:
- // Offsets of object fields.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
+// Layout description.
+#define JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS(V) \
+ V(kLengthOffset, kTaggedSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JSARGUMENTS_OBJECT_WITH_LENGTH_FIELDS)
+#undef JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS
+
// Indices of in-object properties.
static const int kLengthIndex = 0;
DECL_VERIFIER(JSArgumentsObjectWithLength)
- DECL_CAST(JSArgumentsObjectWithLength)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObjectWithLength);
@@ -47,9 +52,15 @@ class JSArgumentsObjectWithLength : public JSArgumentsObject {
// This initial map adds in-object properties for "length" and "callee".
class JSSloppyArgumentsObject : public JSArgumentsObjectWithLength {
public:
- // Offsets of object fields.
- static const int kCalleeOffset = JSArgumentsObjectWithLength::kSize;
- static const int kSize = kCalleeOffset + kPointerSize;
+// Layout description.
+#define JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS(V) \
+ V(kCalleeOffset, kTaggedSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArgumentsObjectWithLength::kSize,
+ JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS)
+#undef JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS
+
// Indices of in-object properties.
static const int kCalleeIndex = kLengthIndex + 1;
@@ -65,7 +76,7 @@ class JSSloppyArgumentsObject : public JSArgumentsObjectWithLength {
// This initial map adds an in-object property for "length".
class JSStrictArgumentsObject : public JSArgumentsObjectWithLength {
public:
- // Offsets of object fields.
+ // Layout description.
static const int kSize = JSArgumentsObjectWithLength::kSize;
private:
@@ -75,13 +86,13 @@ class JSStrictArgumentsObject : public JSArgumentsObjectWithLength {
// Helper class to access FAST_ and SLOW_SLOPPY_ARGUMENTS_ELEMENTS
//
// +---+-----------------------+
-// | 0 | Context* context |
+// | 0 | Context context |
// +---------------------------+
-// | 1 | FixedArray* arguments +----+ HOLEY_ELEMENTS
+// | 1 | FixedArray arguments +----+ HOLEY_ELEMENTS
// +---------------------------+ v-----+-----------+
-// | 2 | Object* param_1_map | | 0 | the_hole |
+// | 2 | Object param_1_map | | 0 | the_hole |
// |...| ... | | ... | ... |
-// |n+1| Object* param_n_map | | n-1 | the_hole |
+// |n+1| Object param_n_map | | n-1 | the_hole |
// +---------------------------+ | n | element_1 |
// | ... | ... |
// |n+m-1| element_m |
@@ -100,20 +111,19 @@ class SloppyArgumentsElements : public FixedArray {
static const int kArgumentsIndex = 1;
static const uint32_t kParameterMapStart = 2;
- inline Context* context();
- inline FixedArray* arguments();
- inline void set_arguments(FixedArray* arguments);
+ inline Context context();
+ inline FixedArray arguments();
+ inline void set_arguments(FixedArray arguments);
inline uint32_t parameter_map_length();
- inline Object* get_mapped_entry(uint32_t entry);
- inline void set_mapped_entry(uint32_t entry, Object* object);
+ inline Object get_mapped_entry(uint32_t entry);
+ inline void set_mapped_entry(uint32_t entry, Object object);
DECL_CAST(SloppyArgumentsElements)
#ifdef VERIFY_HEAP
- void SloppyArgumentsElementsVerify(Isolate* isolate, JSObject* holder);
+ void SloppyArgumentsElementsVerify(Isolate* isolate, JSObject holder);
#endif
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyArgumentsElements);
+ OBJECT_CONSTRUCTORS(SloppyArgumentsElements, FixedArray);
};
// Representation of a slow alias as part of a sloppy arguments objects.
@@ -135,11 +145,17 @@ class AliasedArgumentsEntry : public Struct {
DECL_PRINTER(AliasedArgumentsEntry)
DECL_VERIFIER(AliasedArgumentsEntry)
- static const int kAliasedContextSlot = HeapObject::kHeaderSize;
- static const int kSize = kAliasedContextSlot + kPointerSize;
+// Layout description.
+#define ALIASED_ARGUMENTS_FIELDS(V) \
+ V(kAliasedContextSlot, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ ALIASED_ARGUMENTS_FIELDS)
+#undef ALIASED_ARGUMENTS_FIELDS
+
+ OBJECT_CONSTRUCTORS(AliasedArgumentsEntry, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index dcf99e2f29..ccf48f71b7 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -20,7 +20,8 @@
#include "src/objects/bigint.h"
#include "src/double.h"
-#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
@@ -33,8 +34,7 @@ namespace internal {
// Many of the functions in this class use arguments of type {BigIntBase},
// indicating that they will be used in a read-only capacity, and both
// {BigInt} and {MutableBigInt} objects can be passed in.
-class MutableBigInt : public FreshlyAllocatedBigInt,
- public NeverReadOnlySpaceObject {
+class MutableBigInt : public FreshlyAllocatedBigInt {
public:
// Bottleneck for converting MutableBigInts to BigInts.
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
@@ -57,6 +57,9 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
SLOW_DCHECK(bigint->IsBigInt());
return Handle<MutableBigInt>::cast(bigint);
}
+ static MutableBigInt unchecked_cast(Object o) {
+ return MutableBigInt(o.ptr());
+ }
// Internal helpers.
static MaybeHandle<MutableBigInt> BitwiseAnd(Isolate* isolate,
@@ -81,7 +84,7 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
Handle<BigInt> y, bool result_sign);
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
- MutableBigInt* result_storage = nullptr);
+ MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
Handle<BigIntBase> x);
static MaybeHandle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
@@ -92,21 +95,21 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
enum SymmetricOp { kSymmetric, kNotSymmetric };
static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
+ MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
SymmetricOp symmetric,
const std::function<digit_t(digit_t, digit_t)>& op);
static Handle<MutableBigInt> AbsoluteAnd(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage = nullptr);
+ MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteAndNot(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage = nullptr);
+ MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteOr(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage = nullptr);
+ MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteXor(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage = nullptr);
+ MutableBigInt result_storage = MutableBigInt());
static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
@@ -114,9 +117,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
digit_t multiplier,
Handle<MutableBigInt> accumulator,
int accumulator_index);
- static void InternalMultiplyAdd(BigIntBase* source, digit_t factor,
- digit_t summand, int n,
- MutableBigInt* result);
+ static void InternalMultiplyAdd(BigIntBase source, digit_t factor,
+ digit_t summand, int n, MutableBigInt result);
void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
// Specialized helpers for Divide/Remainder.
@@ -166,7 +168,7 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
// Returns the least significant 64 bits, simulating two's complement
// representation.
- static uint64_t GetRawBits(BigIntBase* x, bool* lossless);
+ static uint64_t GetRawBits(BigIntBase x, bool* lossless);
// Digit arithmetic helpers.
static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
@@ -182,29 +184,39 @@ class MutableBigInt : public FreshlyAllocatedBigInt,
// Internal field setters. Non-mutable BigInts don't have these.
#include "src/objects/object-macros.h"
inline void set_sign(bool new_sign) {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- bitfield = SignBits::update(static_cast<uint32_t>(bitfield), new_sign);
- WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ bitfield = SignBits::update(bitfield, new_sign);
+ RELAXED_WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
}
- inline void set_length(int new_length) {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
- bitfield = LengthBits::update(static_cast<uint32_t>(bitfield), new_length);
- WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ inline void synchronized_set_length(int new_length) {
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
+ bitfield = LengthBits::update(bitfield, new_length);
+ RELEASE_WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
}
inline void initialize_bitfield(bool sign, int length) {
- intptr_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
- WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ int32_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
+ WRITE_INT32_FIELD(this, kBitfieldOffset, bitfield);
}
inline void set_digit(int n, digit_t value) {
SLOW_DCHECK(0 <= n && n < length());
Address address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
(*reinterpret_cast<digit_t*>(address)) = value;
}
-#include "src/objects/object-macros-undef.h"
void set_64_bits(uint64_t bits);
+
+ bool IsMutableBigInt() const { return IsBigInt(); }
+
+ NEVER_READ_ONLY_SPACE
+
+ OBJECT_CONSTRUCTORS(MutableBigInt, FreshlyAllocatedBigInt)
};
+OBJECT_CONSTRUCTORS_IMPL(MutableBigInt, FreshlyAllocatedBigInt)
+NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
+
+#include "src/objects/object-macros-undef.h"
+
MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
PretenureFlag pretenure) {
if (length > BigInt::kMaxLength) {
@@ -317,9 +329,8 @@ Handle<MutableBigInt> MutableBigInt::Copy(Isolate* isolate,
}
void MutableBigInt::InitializeDigits(int length, byte value) {
- memset(reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
- kDigitsOffset - kHeapObjectTag),
- value, length * kDigitSize);
+ memset(reinterpret_cast<void*>(ptr() + kDigitsOffset - kHeapObjectTag), value,
+ length * kDigitSize);
}
MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
@@ -339,8 +350,13 @@ Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
int size_delta = to_trim * kDigitSize;
Address new_end = result->address() + BigInt::SizeFor(new_length);
Heap* heap = result->GetHeap();
- heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
- result->set_length(new_length);
+ if (!heap->IsLargeObject(*result)) {
+ // We do not create a filler for objects in large object space.
+ // TODO(hpayer): We should shrink the large object page if the size
+ // of the object changed significantly.
+ heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
+ }
+ result->synchronized_set_length(new_length);
// Canonicalize -0n.
if (new_length == 0) {
@@ -350,7 +366,7 @@ Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
}
DCHECK_IMPLIES(result->length() > 0,
result->digit(result->length() - 1) != 0); // MSD is non-zero.
- return Handle<BigInt>(reinterpret_cast<BigInt**>(result.location()));
+ return Handle<BigInt>(result.location());
}
Handle<BigInt> BigInt::Zero(Isolate* isolate) {
@@ -617,7 +633,7 @@ ComparisonResult BigInt::CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y) {
return ComparisonResult::kEqual;
}
-bool BigInt::EqualToBigInt(BigInt* x, BigInt* y) {
+bool BigInt::EqualToBigInt(BigInt x, BigInt y) {
if (x->sign() != y->sign()) return false;
if (x->length() != y->length()) return false;
for (int i = 0; i < x->length(); i++) {
@@ -980,7 +996,7 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
}
Handle<Object> BigInt::ToNumber(Isolate* isolate, Handle<BigInt> x) {
- if (x->is_zero()) return Handle<Smi>(Smi::kZero, isolate);
+ if (x->is_zero()) return Handle<Smi>(Smi::zero(), isolate);
if (x->length() == 1 && x->digit(0) < Smi::kMaxValue) {
int value = static_cast<int>(x->digit(0));
if (x->sign()) value = -value;
@@ -1167,7 +1183,7 @@ Handle<BigInt> MutableBigInt::AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
// modification.
MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
- MutableBigInt* result_storage) {
+ MutableBigInt result_storage) {
int input_length = x->length();
// The addition will overflow into a new digit if all existing digits are
// at maximum.
@@ -1180,7 +1196,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
}
int result_length = input_length + will_overflow;
Handle<MutableBigInt> result(result_storage, isolate);
- if (result_storage == nullptr) {
+ if (result_storage.is_null()) {
if (!New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
@@ -1255,7 +1271,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage, ExtraDigitsHandling extra_digits,
+ MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
SymmetricOp symmetric, const std::function<digit_t(digit_t, digit_t)>& op) {
int x_length = x->length();
int y_length = y->length();
@@ -1270,7 +1286,7 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
DCHECK(num_pairs == Min(x_length, y_length));
Handle<MutableBigInt> result(result_storage, isolate);
int result_length = extra_digits == kCopy ? x_length : num_pairs;
- if (result_storage == nullptr) {
+ if (result_storage.is_null()) {
result = New(isolate, result_length).ToHandleChecked();
} else {
DCHECK(result_storage->length() >= result_length);
@@ -1294,9 +1310,10 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage) {
+Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(Isolate* isolate,
+ Handle<BigIntBase> x,
+ Handle<BigIntBase> y,
+ MutableBigInt result_storage) {
return AbsoluteBitwiseOp(isolate, x, y, result_storage, kSkip, kSymmetric,
[](digit_t a, digit_t b) { return a & b; });
}
@@ -1306,7 +1323,7 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(
// {result_storage} may alias {x} or {y} for in-place modification.
Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage) {
+ MutableBigInt result_storage) {
return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kNotSymmetric,
[](digit_t a, digit_t b) { return a & ~b; });
}
@@ -1317,7 +1334,7 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Isolate* isolate,
Handle<BigIntBase> x,
Handle<BigIntBase> y,
- MutableBigInt* result_storage) {
+ MutableBigInt result_storage) {
return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a | b; });
}
@@ -1325,9 +1342,10 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Isolate* isolate,
// If {result_storage} is non-nullptr, it will be used for the result,
// otherwise a new BigInt of appropriate length will be allocated.
// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteXor(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt* result_storage) {
+Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
+ Handle<BigIntBase> x,
+ Handle<BigIntBase> y,
+ MutableBigInt result_storage) {
return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
[](digit_t a, digit_t b) { return a ^ b; });
}
@@ -1385,9 +1403,9 @@ void MutableBigInt::MultiplyAccumulate(Handle<BigIntBase> multiplicand,
// Multiplies {source} with {factor} and adds {summand} to the result.
// {result} and {source} may be the same BigInt for inplace modification.
-void MutableBigInt::InternalMultiplyAdd(BigIntBase* source, digit_t factor,
+void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
digit_t summand, int n,
- MutableBigInt* result) {
+ MutableBigInt result) {
DCHECK(source->length() >= n);
DCHECK(result->length() >= n);
digit_t carry = summand;
@@ -1863,8 +1881,8 @@ int BigInt::DigitsByteLengthForBitfield(uint32_t bitfield) {
// The serialization format MUST NOT CHANGE without updating the format
// version in value-serializer.cc!
void BigInt::SerializeDigits(uint8_t* storage) {
- void* digits = reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
- kDigitsOffset - kHeapObjectTag);
+ void* digits =
+ reinterpret_cast<void*>(ptr() + kDigitsOffset - kHeapObjectTag);
#if defined(V8_TARGET_LITTLE_ENDIAN)
int bytelength = length() * kDigitSize;
memcpy(storage, digits, bytelength);
@@ -1891,8 +1909,8 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
Handle<MutableBigInt> result =
MutableBigInt::Cast(isolate->factory()->NewBigInt(length, pretenure));
result->initialize_bitfield(sign, length);
- void* digits = reinterpret_cast<void*>(reinterpret_cast<Address>(*result) +
- kDigitsOffset - kHeapObjectTag);
+ void* digits =
+ reinterpret_cast<void*>(result->ptr() + kDigitsOffset - kHeapObjectTag);
#if defined(V8_TARGET_LITTLE_ENDIAN)
memcpy(digits, digits_storage.start(), bytelength);
void* padding_start =
@@ -1958,7 +1976,7 @@ MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(
->NewRawOneByteString(static_cast<int>(chars_required))
.ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* buffer = result->GetChars();
+ uint8_t* buffer = result->GetChars(no_gc);
// Print the number into the string, starting from the last position.
int pos = static_cast<int>(chars_required - 1);
digit_t digit = 0;
@@ -2035,7 +2053,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
// Zap the string first.
{
DisallowHeapAllocation no_gc;
- uint8_t* chars = result->GetChars();
+ uint8_t* chars = result->GetChars(no_gc);
for (int i = 0; i < static_cast<int>(chars_required); i++) chars[i] = '?';
}
#endif
@@ -2068,7 +2086,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
DCHECK(!rest.is_null());
dividend = reinterpret_cast<Handle<BigIntBase>*>(&rest);
DisallowHeapAllocation no_gc;
- uint8_t* chars = result->GetChars();
+ uint8_t* chars = result->GetChars(no_gc);
for (int i = 0; i < chunk_chars; i++) {
chars[pos++] = kConversionChars[chunk % radix];
chunk /= radix;
@@ -2082,7 +2100,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
last_digit = rest->digit(0);
}
DisallowHeapAllocation no_gc;
- uint8_t* chars = result->GetChars();
+ uint8_t* chars = result->GetChars(no_gc);
do {
chars[pos++] = kConversionChars[last_digit % radix];
last_digit /= radix;
@@ -2154,6 +2172,8 @@ Handle<BigInt> BigInt::AsIntN(Isolate* isolate, uint64_t n, Handle<BigInt> x) {
false);
}
}
+ // Truncation is no-op if x == -2^(n-1).
+ if (x_length == needed_length && top_digit == compare_digit) return x;
return MutableBigInt::TruncateToNBits(isolate, N, x);
}
return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, false);
@@ -2372,7 +2392,7 @@ void BigInt::ToWordsArray64(int* sign_bit, int* words64_count,
}
}
-uint64_t MutableBigInt::GetRawBits(BigIntBase* x, bool* lossless) {
+uint64_t MutableBigInt::GetRawBits(BigIntBase x, bool* lossless) {
if (lossless != nullptr) *lossless = true;
if (x->is_zero()) return 0;
int len = x->length();
@@ -2387,14 +2407,14 @@ uint64_t MutableBigInt::GetRawBits(BigIntBase* x, bool* lossless) {
}
int64_t BigInt::AsInt64(bool* lossless) {
- uint64_t raw = MutableBigInt::GetRawBits(this, lossless);
+ uint64_t raw = MutableBigInt::GetRawBits(*this, lossless);
int64_t result = static_cast<int64_t>(raw);
if (lossless != nullptr && (result < 0) != sign()) *lossless = false;
return result;
}
uint64_t BigInt::AsUint64(bool* lossless) {
- uint64_t result = MutableBigInt::GetRawBits(this, lossless);
+ uint64_t result = MutableBigInt::GetRawBits(*this, lossless);
if (lossless != nullptr && sign()) *lossless = false;
return result;
}
@@ -2577,7 +2597,7 @@ void MutableBigInt::set_64_bits(uint64_t bits) {
#ifdef OBJECT_PRINT
void BigInt::BigIntPrint(std::ostream& os) {
DisallowHeapAllocation no_gc;
- HeapObject::PrintHeader(os, "BigInt");
+ PrintHeader(os, "BigInt");
int len = length();
os << "\n- length: " << len;
os << "\n- sign: " << sign();
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 6081c5e3f8..096c4d3f40 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/heap-object.h"
#include "src/utils.h"
// Has to be the last include (doesn't have include guards):
@@ -24,23 +25,46 @@ class ValueSerializer;
class BigIntBase : public HeapObject {
public:
inline int length() const {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
- // Increasing kMaxLength will require code changes.
- static const int kMaxLengthBits = kMaxInt - kPointerSize * kBitsPerByte - 1;
- static const int kMaxLength = kMaxLengthBits / (kPointerSize * kBitsPerByte);
+ // For use by the GC.
+ inline int synchronized_length() const {
+ int32_t bitfield = ACQUIRE_READ_INT32_FIELD(this, kBitfieldOffset);
+ return LengthBits::decode(static_cast<uint32_t>(bitfield));
+ }
+
+ static inline BigIntBase unchecked_cast(Object o) {
+ return bit_cast<BigIntBase>(o);
+ }
+
+ // The maximum kMaxLengthBits that the current implementation supports
+ // would be kMaxInt - kSystemPointerSize * kBitsPerByte - 1.
+ // Since we want a platform independent limit, choose a nice round number
+ // somewhere below that maximum.
+ static const int kMaxLengthBits = 1 << 30; // ~1 billion.
+ static const int kMaxLength =
+ kMaxLengthBits / (kSystemPointerSize * kBitsPerByte);
+ // Sign and length are stored in the same bitfield. Since the GC needs to be
+ // able to read the length concurrently, the getters and setters are atomic.
static const int kLengthFieldBits = 30;
STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
class SignBits : public BitField<bool, 0, 1> {};
class LengthBits : public BitField<int, SignBits::kNext, kLengthFieldBits> {};
STATIC_ASSERT(LengthBits::kNext <= 32);
- static const int kBitfieldOffset = HeapObject::kHeaderSize;
- static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
- static const int kHeaderSize = kDigitsOffset;
+ // Layout description.
+#define BIGINT_FIELDS(V) \
+ V(kBitfieldOffset, kInt32Size) \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
+ /* Header size. */ \
+ V(kHeaderSize, 0) \
+ V(kDigitsOffset, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, BIGINT_FIELDS)
+#undef BIGINT_FIELDS
private:
friend class ::v8::internal::BigInt; // MSVC wants full namespace.
@@ -49,7 +73,7 @@ class BigIntBase : public HeapObject {
typedef uintptr_t digit_t;
static const int kDigitSize = sizeof(digit_t);
// kMaxLength definition assumes this:
- STATIC_ASSERT(kDigitSize == kPointerSize);
+ STATIC_ASSERT(kDigitSize == kSystemPointerSize);
static const int kDigitBits = kDigitSize * kBitsPerByte;
static const int kHalfDigitBits = kDigitBits / 2;
@@ -57,7 +81,7 @@ class BigIntBase : public HeapObject {
// sign() == true means negative.
inline bool sign() const {
- intptr_t bitfield = READ_INTPTR_FIELD(this, kBitfieldOffset);
+ int32_t bitfield = RELAXED_READ_INT32_FIELD(this, kBitfieldOffset);
return SignBits::decode(static_cast<uint32_t>(bitfield));
}
@@ -69,7 +93,10 @@ class BigIntBase : public HeapObject {
bool is_zero() const { return length() == 0; }
- DISALLOW_IMPLICIT_CONSTRUCTORS(BigIntBase);
+ // Only serves to make macros happy; other code should use IsBigInt.
+ bool IsBigIntBase() const { return true; }
+
+ OBJECT_CONSTRUCTORS(BigIntBase, HeapObject);
};
class FreshlyAllocatedBigInt : public BigIntBase {
@@ -85,13 +112,27 @@ class FreshlyAllocatedBigInt : public BigIntBase {
// (and no explicit operator is provided either).
public:
- inline static FreshlyAllocatedBigInt* cast(Object* object);
+ inline static FreshlyAllocatedBigInt cast(Object object);
+ inline static FreshlyAllocatedBigInt unchecked_cast(Object o) {
+ return bit_cast<FreshlyAllocatedBigInt>(o);
+ }
+
+ // Clear uninitialized padding space.
+ inline void clear_padding() {
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
+ }
+ }
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreshlyAllocatedBigInt);
+ // Only serves to make macros happy; other code should use IsBigInt.
+ bool IsFreshlyAllocatedBigInt() const { return true; }
+
+ OBJECT_CONSTRUCTORS(FreshlyAllocatedBigInt, BigIntBase);
};
-// UNDER CONSTRUCTION!
// Arbitrary precision integers in JavaScript.
class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
public:
@@ -122,7 +163,7 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
Handle<BigInt> y);
// More convenient version of "bool LessThan(x, y)".
static ComparisonResult CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y);
- static bool EqualToBigInt(BigInt* x, BigInt* y);
+ static bool EqualToBigInt(BigInt x, BigInt y);
static MaybeHandle<BigInt> BitwiseAnd(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y);
static MaybeHandle<BigInt> BitwiseXor(Isolate* isolate, Handle<BigInt> x,
@@ -213,7 +254,7 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage,
PretenureFlag pretenure);
- DISALLOW_IMPLICIT_CONSTRUCTORS(BigInt);
+ OBJECT_CONSTRUCTORS(BigInt, BigIntBase);
};
} // namespace internal
diff --git a/deps/v8/src/objects/builtin-function-id.h b/deps/v8/src/objects/builtin-function-id.h
index ed54811a2b..5d1dd445ea 100644
--- a/deps/v8/src/objects/builtin-function-id.h
+++ b/deps/v8/src/objects/builtin-function-id.h
@@ -192,6 +192,13 @@ enum class BuiltinFunctionId : uint8_t {
kGlobalIsFinite,
kGlobalIsNaN,
kNumberConstructor,
+ kPromiseAll,
+ kPromisePrototypeCatch,
+ kPromisePrototypeFinally,
+ kPromisePrototypeThen,
+ kPromiseRace,
+ kPromiseReject,
+ kPromiseResolve,
kSymbolConstructor,
kSymbolPrototypeToString,
kSymbolPrototypeValueOf,
diff --git a/deps/v8/src/objects/microtask-queue-inl.h b/deps/v8/src/objects/cell-inl.h
index 8d93ee5226..7e32059fee 100644
--- a/deps/v8/src/objects/microtask-queue-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_MICROTASK_QUEUE_INL_H_
-#define V8_OBJECTS_MICROTASK_QUEUE_INL_H_
+#ifndef V8_OBJECTS_CELL_INL_H_
+#define V8_OBJECTS_CELL_INL_H_
-#include "src/objects/microtask-queue.h"
+#include "src/objects/cell.h"
-#include "src/objects-inl.h"
+#include "src/heap/heap-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,14 +15,19 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(MicrotaskQueue)
-ACCESSORS(MicrotaskQueue, queue, FixedArray, kQueueOffset)
-SMI_ACCESSORS(MicrotaskQueue, pending_microtask_count,
- kPendingMicrotaskCountOffset)
+OBJECT_CONSTRUCTORS_IMPL(Cell, HeapObject)
+
+CAST_ACCESSOR(Cell)
+
+ACCESSORS(Cell, value, Object, kValueOffset)
+
+Cell Cell::FromValueAddress(Address value) {
+ return Cell::cast(HeapObject::FromAddress(value - kValueOffset));
+}
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_MICROTASK_QUEUE_INL_H_
+#endif // V8_OBJECTS_CELL_INL_H_
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
new file mode 100644
index 0000000000..0792bc5d12
--- /dev/null
+++ b/deps/v8/src/objects/cell.h
@@ -0,0 +1,50 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CELL_H_
+#define V8_OBJECTS_CELL_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Cell : public HeapObject {
+ public:
+ // [value]: value of the cell.
+ DECL_ACCESSORS(value, Object)
+
+ DECL_CAST(Cell)
+
+ static inline Cell FromValueAddress(Address value);
+
+ inline Address ValueAddress() { return address() + kValueOffset; }
+
+ // Dispatched behavior.
+ DECL_PRINTER(Cell)
+ DECL_VERIFIER(Cell)
+
+ // Layout description.
+#define CELL_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CELL_FIELDS)
+#undef CELL_FIELDS
+
+ typedef FixedBodyDescriptor<kValueOffset, kSize, kSize> BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(Cell, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_CELL_H_
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 34c9f2fc28..476e7c5ce4 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -10,8 +10,10 @@
#include "src/interpreter/bytecode-register.h"
#include "src/isolate.h"
#include "src/objects/dictionary.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/smi-inl.h"
#include "src/v8memory.h"
// Has to be the last include (doesn't have include guards):
@@ -20,6 +22,15 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(DeoptimizationData, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
+OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
+OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Tuple2)
+
+NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
+
CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Code)
@@ -49,7 +60,7 @@ int AbstractCode::InstructionSize() {
}
}
-ByteArray* AbstractCode::source_position_table() {
+ByteArray AbstractCode::source_position_table() {
if (IsCode()) {
return GetCode()->SourcePositionTable();
} else {
@@ -57,8 +68,8 @@ ByteArray* AbstractCode::source_position_table() {
}
}
-Object* AbstractCode::stack_frame_cache() {
- Object* maybe_table;
+Object AbstractCode::stack_frame_cache() {
+ Object maybe_table;
if (IsCode()) {
maybe_table = GetCode()->source_position_table();
} else {
@@ -132,21 +143,21 @@ AbstractCode::Kind AbstractCode::kind() {
}
}
-Code* AbstractCode::GetCode() { return Code::cast(this); }
+Code AbstractCode::GetCode() { return Code::cast(*this); }
-BytecodeArray* AbstractCode::GetBytecodeArray() {
- return BytecodeArray::cast(this);
+BytecodeArray AbstractCode::GetBytecodeArray() {
+ return BytecodeArray::cast(*this);
}
-DependentCode* DependentCode::next_link() {
+DependentCode DependentCode::next_link() {
return DependentCode::cast(Get(kNextLinkIndex)->GetHeapObjectAssumeStrong());
}
-void DependentCode::set_next_link(DependentCode* next) {
+void DependentCode::set_next_link(DependentCode next) {
Set(kNextLinkIndex, HeapObjectReference::Strong(next));
}
-int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->cast<Smi>()); }
+int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->ToSmi()); }
void DependentCode::set_flags(int flags) {
Set(kFlagsIndex, MaybeObject::FromObject(Smi::FromInt(flags)));
@@ -162,11 +173,11 @@ DependentCode::DependencyGroup DependentCode::group() {
return static_cast<DependencyGroup>(GroupField::decode(flags()));
}
-void DependentCode::set_object_at(int i, MaybeObject* object) {
+void DependentCode::set_object_at(int i, MaybeObject object) {
Set(kCodesStartIndex + i, object);
}
-MaybeObject* DependentCode::object_at(int i) {
+MaybeObject DependentCode::object_at(int i) {
return Get(kCodesStartIndex + i);
}
@@ -179,21 +190,31 @@ void DependentCode::copy(int from, int to) {
Set(kCodesStartIndex + to, Get(kCodesStartIndex + from));
}
+OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(Code)
+
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, !Heap::InNewSpace(value))
+#define SYNCHRONIZED_CODE_ACCESSORS(name, type, offset) \
+ SYNCHRONIZED_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !Heap::InNewSpace(value))
+
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
-CODE_ACCESSORS(code_data_container, CodeDataContainer, kCodeDataContainerOffset)
+// Concurrent marker needs to access kind specific flags in code data container.
+SYNCHRONIZED_CODE_ACCESSORS(code_data_container, CodeDataContainer,
+ kCodeDataContainerOffset)
#undef CODE_ACCESSORS
+#undef SYNCHRONIZED_CODE_ACCESSORS
void Code::WipeOutHeader() {
- WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
- WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
- WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
- WRITE_FIELD(this, kCodeDataContainerOffset, nullptr);
+ WRITE_FIELD(this, kRelocationInfoOffset, Smi::FromInt(0));
+ WRITE_FIELD(this, kDeoptimizationDataOffset, Smi::FromInt(0));
+ WRITE_FIELD(this, kSourcePositionTableOffset, Smi::FromInt(0));
+ WRITE_FIELD(this, kCodeDataContainerOffset, Smi::FromInt(0));
}
void Code::clear_padding() {
@@ -205,29 +226,19 @@ void Code::clear_padding() {
CodeSize() - (data_end - address()));
}
-ByteArray* Code::SourcePositionTable() const {
- Object* maybe_table = source_position_table();
+ByteArray Code::SourcePositionTable() const {
+ Object maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
->source_position_table();
}
-uint32_t Code::stub_key() const {
- DCHECK(is_stub());
- return READ_UINT32_FIELD(this, kStubKeyOffset);
-}
-
-void Code::set_stub_key(uint32_t key) {
- DCHECK(is_stub() || key == 0); // Allow zero initialization.
- WRITE_UINT32_FIELD(this, kStubKeyOffset, key);
-}
-
-Object* Code::next_code_link() const {
+Object Code::next_code_link() const {
return code_data_container()->next_code_link();
}
-void Code::set_next_code_link(Object* value) {
+void Code::set_next_code_link(Object value) {
code_data_container()->set_next_code_link(value);
}
@@ -304,8 +315,8 @@ int Code::SizeIncludingMetadata() const {
return size;
}
-ByteArray* Code::unchecked_relocation_info() const {
- return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
+ByteArray Code::unchecked_relocation_info() const {
+ return ByteArray::unchecked_cast(READ_FIELD(this, kRelocationInfoOffset));
}
byte* Code::relocation_start() const {
@@ -313,8 +324,7 @@ byte* Code::relocation_start() const {
}
byte* Code::relocation_end() const {
- return unchecked_relocation_info()->GetDataStartAddress() +
- unchecked_relocation_info()->length();
+ return unchecked_relocation_info()->GetDataEndAddress();
}
int Code::relocation_size() const {
@@ -341,6 +351,14 @@ int Code::ExecutableSize() const {
return raw_instruction_size() + Code::kHeaderSize;
}
+// static
+void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
+ DCHECK_EQ(dest->length(), desc.reloc_size);
+ CopyBytes(dest->GetDataStartAddress(),
+ desc.buffer + desc.buffer_size - desc.reloc_size,
+ static_cast<size_t>(desc.reloc_size));
+}
+
int Code::CodeSize() const { return SizeFor(body_size()); }
Code::Kind Code::kind() const {
@@ -362,25 +380,17 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
}
inline bool Code::is_interpreter_trampoline_builtin() const {
- Builtins* builtins = GetIsolate()->builtins();
- Code* interpreter_entry_trampoline =
- builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool is_interpreter_trampoline =
- (builtin_index() == interpreter_entry_trampoline->builtin_index() ||
- this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
- this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
- DCHECK_IMPLIES(is_interpreter_trampoline, !Builtins::IsLazy(builtin_index()));
+ (builtin_index() == Builtins::kInterpreterEntryTrampoline ||
+ builtin_index() == Builtins::kInterpreterEnterBytecodeAdvance ||
+ builtin_index() == Builtins::kInterpreterEnterBytecodeDispatch);
return is_interpreter_trampoline;
}
inline bool Code::checks_optimization_marker() const {
- Builtins* builtins = GetIsolate()->builtins();
- Code* interpreter_entry_trampoline =
- builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool checks_marker =
- (this == builtins->builtin(Builtins::kCompileLazy) ||
- builtin_index() == interpreter_entry_trampoline->builtin_index());
- DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
+ (builtin_index() == Builtins::kCompileLazy ||
+ builtin_index() == Builtins::kInterpreterEntryTrampoline);
return checks_marker ||
(kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
}
@@ -400,53 +410,40 @@ inline bool Code::is_turbofanned() const {
inline bool Code::can_have_weak_objects() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container()->kind_specific_flags();
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int previous = code_data_container()->kind_specific_flags();
- int updated = CanHaveWeakObjectsField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
-}
-
-inline bool Code::is_construct_stub() const {
- DCHECK(kind() == BUILTIN);
- int flags = code_data_container()->kind_specific_flags();
- return IsConstructStubField::decode(flags);
-}
-
-inline void Code::set_is_construct_stub(bool value) {
- DCHECK(kind() == BUILTIN);
- int previous = code_data_container()->kind_specific_flags();
- int updated = IsConstructStubField::update(previous, value);
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = CanHaveWeakObjectsField::update(previous, value);
code_data_container()->set_kind_specific_flags(updated);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == BUILTIN);
- int flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container()->kind_specific_flags();
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == BUILTIN);
- int previous = code_data_container()->kind_specific_flags();
- int updated = IsPromiseRejectionField::update(previous, value);
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = IsPromiseRejectionField::update(previous, value);
code_data_container()->set_kind_specific_flags(updated);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == BUILTIN);
- int flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container()->kind_specific_flags();
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == BUILTIN);
- int previous = code_data_container()->kind_specific_flags();
- int updated = IsExceptionCaughtField::update(previous, value);
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = IsExceptionCaughtField::update(previous, value);
code_data_container()->set_kind_specific_flags(updated);
}
@@ -496,57 +493,96 @@ void Code::set_safepoint_table_offset(int offset) {
bool Code::marked_for_deoptimization() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container()->kind_specific_flags();
return MarkedForDeoptimizationField::decode(flags);
}
void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int previous = code_data_container()->kind_specific_flags();
- int updated = MarkedForDeoptimizationField::update(previous, flag);
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
+ code_data_container()->set_kind_specific_flags(updated);
+}
+
+bool Code::embedded_objects_cleared() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int32_t flags = code_data_container()->kind_specific_flags();
+ return EmbeddedObjectsClearedField::decode(flags);
+}
+
+void Code::set_embedded_objects_cleared(bool flag) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ DCHECK_IMPLIES(flag, marked_for_deoptimization());
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
code_data_container()->set_kind_specific_flags(updated);
}
bool Code::deopt_already_counted() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container()->kind_specific_flags();
return DeoptAlreadyCountedField::decode(flags);
}
void Code::set_deopt_already_counted(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int previous = code_data_container()->kind_specific_flags();
- int updated = DeoptAlreadyCountedField::update(previous, flag);
+ int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
code_data_container()->set_kind_specific_flags(updated);
}
-bool Code::is_stub() const { return kind() == STUB; }
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
int Code::constant_pool_offset() const {
- if (!FLAG_enable_embedded_constant_pool) return InstructionSize();
+ if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
return READ_INT_FIELD(this, kConstantPoolOffset);
}
void Code::set_constant_pool_offset(int value) {
if (!FLAG_enable_embedded_constant_pool) return;
+ DCHECK_LE(value, InstructionSize());
WRITE_INT_FIELD(this, kConstantPoolOffset, value);
}
+int Code::constant_pool_size() const {
+ if (!FLAG_enable_embedded_constant_pool) return 0;
+ return code_comments_offset() - constant_pool_offset();
+}
Address Code::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
int offset = constant_pool_offset();
- if (offset < InstructionSize()) {
+ if (offset < code_comments_offset()) {
return InstructionStart() + offset;
}
}
return kNullAddress;
}
-Code* Code::GetCodeFromTargetAddress(Address address) {
+int Code::code_comments_offset() const {
+ int offset = READ_INT_FIELD(this, kCodeCommentsOffset);
+ DCHECK_LE(0, offset);
+ DCHECK_LE(offset, InstructionSize());
+ return offset;
+}
+
+void Code::set_code_comments_offset(int offset) {
+ DCHECK_LE(0, offset);
+ DCHECK_LE(offset, InstructionSize());
+ WRITE_INT_FIELD(this, kCodeCommentsOffset, offset);
+}
+
+Address Code::code_comments() const {
+ int offset = code_comments_offset();
+ if (offset < InstructionSize()) {
+ return InstructionStart() + offset;
+ }
+ return kNullAddress;
+}
+
+Code Code::GetCodeFromTargetAddress(Address address) {
{
// TODO(jgruber,v8:6666): Support embedded builtins here. We'd need to pass
// in the current isolate.
@@ -555,47 +591,43 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
CHECK(address < start || address >= end);
}
- HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
- // GetCodeFromTargetAddress might be called when marking objects during mark
- // sweep. reinterpret_cast is therefore used instead of the more appropriate
- // Code::cast. Code::cast does not work when the object's map is
- // marked.
- Code* result = reinterpret_cast<Code*>(code);
- return result;
+ HeapObject code = HeapObject::FromAddress(address - Code::kHeaderSize);
+ // Unchecked cast because we can't rely on the map currently
+ // not being a forwarding pointer.
+ return Code::unchecked_cast(code);
}
-Object* Code::GetObjectFromCodeEntry(Address code_entry) {
- return HeapObject::FromAddress(code_entry - Code::kHeaderSize);
-}
-
-Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return GetObjectFromCodeEntry(Memory<Address>(location_of_address));
+Code Code::GetObjectFromEntryAddress(Address location_of_address) {
+ Address code_entry = Memory<Address>(location_of_address);
+ HeapObject code = HeapObject::FromAddress(code_entry - Code::kHeaderSize);
+ // Unchecked cast because we can't rely on the map currently
+ // not being a forwarding pointer.
+ return Code::unchecked_cast(code);
}
bool Code::CanContainWeakObjects() {
return is_optimized_code() && can_have_weak_objects();
}
-bool Code::IsWeakObject(Object* object) {
+bool Code::IsWeakObject(HeapObject object) {
return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
}
-bool Code::IsWeakObjectInOptimizedCode(Object* object) {
- if (object->IsMap()) {
+bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
+ Map map = object->synchronized_map();
+ InstanceType instance_type = map->instance_type();
+ if (InstanceTypeChecker::IsMap(instance_type)) {
return Map::cast(object)->CanTransition();
}
- if (object->IsCell()) {
- object = Cell::cast(object)->value();
- } else if (object->IsPropertyCell()) {
- object = PropertyCell::cast(object)->value();
- }
- if (object->IsJSReceiver() || object->IsContext()) {
- return true;
- }
- return false;
+ return InstanceTypeChecker::IsPropertyCell(instance_type) ||
+ InstanceTypeChecker::IsJSReceiver(instance_type) ||
+ InstanceTypeChecker::IsContext(instance_type);
}
-INT_ACCESSORS(CodeDataContainer, kind_specific_flags, kKindSpecificFlagsOffset)
+// This field has to have relaxed atomic accessors because it is accessed in the
+// concurrent marker.
+RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
+ kKindSpecificFlagsOffset)
ACCESSORS(CodeDataContainer, next_code_link, Object, kNextCodeLinkOffset)
void CodeDataContainer::clear_padding() {
@@ -615,7 +647,7 @@ void BytecodeArray::set(int index, byte value) {
void BytecodeArray::set_frame_size(int frame_size) {
DCHECK_GE(frame_size, 0);
- DCHECK(IsAligned(frame_size, static_cast<unsigned>(kPointerSize)));
+ DCHECK(IsAligned(frame_size, kSystemPointerSize));
WRITE_INT_FIELD(this, kFrameSizeOffset, frame_size);
}
@@ -624,7 +656,7 @@ int BytecodeArray::frame_size() const {
}
int BytecodeArray::register_count() const {
- return frame_size() / kPointerSize;
+ return frame_size() / kSystemPointerSize;
}
void BytecodeArray::set_parameter_count(int number_of_parameters) {
@@ -632,7 +664,7 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
WRITE_INT_FIELD(this, kParameterSizeOffset,
- (number_of_parameters << kPointerSizeLog2));
+ (number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
@@ -694,7 +726,7 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
+ return READ_INT_FIELD(this, kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -709,11 +741,11 @@ void BytecodeArray::clear_padding() {
}
Address BytecodeArray::GetFirstBytecodeAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
+ return ptr() - kHeapObjectTag + kHeaderSize;
}
-ByteArray* BytecodeArray::SourcePositionTable() {
- Object* maybe_table = source_position_table();
+ByteArray BytecodeArray::SourcePositionTable() {
+ Object maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
@@ -721,7 +753,7 @@ ByteArray* BytecodeArray::SourcePositionTable() {
}
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
- Object* maybe_table = source_position_table();
+ Object maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return;
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 1f1d4b71d6..6239ef9a0b 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -5,9 +5,12 @@
#ifndef V8_OBJECTS_CODE_H_
#define V8_OBJECTS_CODE_H_
+#include "src/contexts.h"
#include "src/handler-table.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -25,8 +28,9 @@ class Register;
}
// Code describes objects with on-the-fly generated machine code.
-class Code : public HeapObject, public NeverReadOnlySpaceObject {
+class Code : public HeapObject {
public:
+ NEVER_READ_ONLY_SPACE
// Opaque data type for encapsulating code flags like kind, inline
// cache state, and arguments count.
typedef uint32_t Flags;
@@ -54,7 +58,6 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
#ifdef ENABLE_DISASSEMBLER
const char* GetName(Isolate* isolate) const;
- void PrintBuiltinCode(Isolate* isolate, const char* name);
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress);
#endif
@@ -74,7 +77,9 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
- void InvalidateEmbeddedObjects(Heap* heap);
+
+ // This function should be called only from GC.
+ void ClearEmbeddedObjects(Heap* heap);
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
@@ -82,34 +87,35 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// [source_position_table]: ByteArray for the source positions table or
// SourcePositionTableWithFrameCache.
DECL_ACCESSORS(source_position_table, Object)
- inline ByteArray* SourcePositionTable() const;
+ inline ByteArray SourcePositionTable() const;
// [code_data_container]: A container indirection for all mutable fields.
DECL_ACCESSORS(code_data_container, CodeDataContainer)
- // [stub_key]: The major/minor key of a code stub.
- inline uint32_t stub_key() const;
- inline void set_stub_key(uint32_t key);
-
// [next_code_link]: Link for lists of optimized or deoptimized code.
// Note that this field is stored in the {CodeDataContainer} to be mutable.
- inline Object* next_code_link() const;
- inline void set_next_code_link(Object* value);
+ inline Object next_code_link() const;
+ inline void set_next_code_link(Object value);
// [constant_pool offset]: Offset of the constant pool.
// Valid for FLAG_enable_embedded_constant_pool only
inline int constant_pool_offset() const;
inline void set_constant_pool_offset(int offset);
+ inline int constant_pool_size() const;
+
+ // [code_comments_offset]: Offset of the code comment section.
+ inline int code_comments_offset() const;
+ inline void set_code_comments_offset(int offset);
+ inline Address code_comments() const;
// Unchecked accessors to be used during GC.
- inline ByteArray* unchecked_relocation_info() const;
+ inline ByteArray unchecked_relocation_info() const;
inline int relocation_size() const;
// [kind]: Access to specific code kind.
inline Kind kind() const;
- inline bool is_stub() const;
inline bool is_optimized_code() const;
inline bool is_wasm_code() const;
@@ -132,12 +138,6 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
inline bool can_have_weak_objects() const;
inline void set_can_have_weak_objects(bool value);
- // [is_construct_stub]: For kind BUILTIN, tells whether the code object
- // represents a hand-written construct stub
- // (e.g., NumberConstructor_ConstructStub).
- inline bool is_construct_stub() const;
- inline void set_is_construct_stub(bool value);
-
// [builtin_index]: For builtins, tells which builtin index the code object
// has. The builtin index is a non-negative integer for builtins, and -1
// otherwise.
@@ -162,10 +162,16 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
inline void set_handler_table_offset(int offset);
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
- // the code is going to be deoptimized because of dead embedded maps.
+ // the code is going to be deoptimized.
inline bool marked_for_deoptimization() const;
inline void set_marked_for_deoptimization(bool flag);
+ // [embedded_objects_cleared]: For kind OPTIMIZED_FUNCTION tells whether
+ // the embedded objects in the code marked for deoptimization were cleared.
+ // Note that embedded_objects_cleared() implies marked_for_deoptimization().
+ inline bool embedded_objects_cleared() const;
+ inline void set_embedded_objects_cleared(bool flag);
+
// [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
// the code was already deoptimized.
inline bool deopt_already_counted() const;
@@ -212,13 +218,10 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
bool is_off_heap_trampoline);
// Convert a target address into a code object.
- static inline Code* GetCodeFromTargetAddress(Address address);
+ static inline Code GetCodeFromTargetAddress(Address address);
// Convert an entry address into an object.
- static inline Object* GetObjectFromEntryAddress(Address location_of_address);
-
- // Convert a code entry into an object.
- static inline Object* GetObjectFromCodeEntry(Address code_entry);
+ static inline Code GetObjectFromEntryAddress(Address location_of_address);
// Returns the address of the first instruction.
inline Address raw_instruction_start() const;
@@ -308,8 +311,13 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// Migrate code from desc without flushing the instruction cache.
void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
+ // Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
+ // exactly the same size as the RelocInfo in |desc|.
+ static inline void CopyRelocInfoToByteArray(ByteArray dest,
+ const CodeDesc& desc);
+
// Flushes the instruction cache for the executable instructions of this code
- // object.
+ // object. Make sure to call this while the code is still writable.
void FlushICache() const;
// Returns the object size for a given body (used for allocation).
@@ -337,69 +345,73 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
-#ifdef DEBUG
- enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
- void VerifyEmbeddedObjects(Isolate* isolate,
- VerifyMode mode = kNoContextRetainingPointers);
-#endif // DEBUG
-
bool IsIsolateIndependent(Isolate* isolate);
inline bool CanContainWeakObjects();
- inline bool IsWeakObject(Object* object);
+ inline bool IsWeakObject(HeapObject object);
- static inline bool IsWeakObjectInOptimizedCode(Object* object);
+ static inline bool IsWeakObjectInOptimizedCode(HeapObject object);
// Return true if the function is inlined in the code.
- bool Inlines(SharedFunctionInfo* sfi);
+ bool Inlines(SharedFunctionInfo sfi);
- class OptimizedCodeIterator {
- public:
- explicit OptimizedCodeIterator(Isolate* isolate);
- Code* Next();
-
- private:
- Context* next_context_;
- Code* current_code_;
- Isolate* isolate_;
-
- DisallowHeapAllocation no_gc;
- DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator)
- };
-
- static const int kConstantPoolSize =
- FLAG_enable_embedded_constant_pool ? kIntSize : 0;
+ class OptimizedCodeIterator;
// Layout description.
- static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
- static const int kDeoptimizationDataOffset =
- kRelocationInfoOffset + kPointerSize;
- static const int kSourcePositionTableOffset =
- kDeoptimizationDataOffset + kPointerSize;
- static const int kCodeDataContainerOffset =
- kSourcePositionTableOffset + kPointerSize;
- static const int kInstructionSizeOffset =
- kCodeDataContainerOffset + kPointerSize;
- static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
- static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
- static const int kHandlerTableOffsetOffset =
- kSafepointTableOffsetOffset + kIntSize;
- static const int kStubKeyOffset = kHandlerTableOffsetOffset + kIntSize;
- static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
- static const int kBuiltinIndexOffset =
- kConstantPoolOffset + kConstantPoolSize;
- static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
-
- // Add padding to align the instruction start following right after
- // the Code object header.
- static const int kHeaderSize =
- (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
-
- // Data or code not directly visited by GC directly starts here.
- // The serializer needs to copy bytes starting from here verbatim.
- // Objects embedded into code is visited via reloc info.
- static const int kDataStart = kInstructionSizeOffset;
+#define CODE_FIELDS(V) \
+ V(kRelocationInfoOffset, kTaggedSize) \
+ V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kSourcePositionTableOffset, kTaggedSize) \
+ V(kCodeDataContainerOffset, kTaggedSize) \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ /* The serializer needs to copy bytes starting from here verbatim. */ \
+ /* Objects embedded into code is visited via reloc info. */ \
+ V(kDataStart, 0) \
+ V(kInstructionSizeOffset, kIntSize) \
+ V(kFlagsOffset, kIntSize) \
+ V(kSafepointTableOffsetOffset, kIntSize) \
+ V(kHandlerTableOffsetOffset, kIntSize) \
+ V(kConstantPoolOffset, FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
+ V(kBuiltinIndexOffset, kIntSize) \
+ V(kCodeCommentsOffset, kIntSize) \
+ /* Add padding to align the instruction start following right after */ \
+ /* the Code object header. */ \
+ V(kHeaderPaddingStart, CODE_POINTER_PADDING(kHeaderPaddingStart)) \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
+#undef CODE_FIELDS
+
+ // This documents the amount of free space we have in each Code object header
+ // due to padding for code alignment.
+#if V8_TARGET_ARCH_ARM64
+ static constexpr int kHeaderPaddingSize = 0;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_MIPS64
+ static constexpr int kHeaderPaddingSize = 0;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_X64
+ static constexpr int kHeaderPaddingSize = 0;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_ARM
+ static constexpr int kHeaderPaddingSize = 20;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_IA32
+ static constexpr int kHeaderPaddingSize = 20;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_MIPS
+ static constexpr int kHeaderPaddingSize = 20;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#elif V8_TARGET_ARCH_PPC64
+ // No static assert possible since padding size depends on the
+ // FLAG_enable_embedded_constant_pool runtime flag.
+#elif V8_TARGET_ARCH_S390X
+ static constexpr int kHeaderPaddingSize = 0;
+ STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
+#else
+#error Unknown architecture.
+#endif
inline int GetUnwindingInfoSizeOffset() const;
@@ -421,9 +433,9 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
V(MarkedForDeoptimizationField, bool, 1, _) \
+ V(EmbeddedObjectsClearedField, bool, 1, _) \
V(DeoptAlreadyCountedField, bool, 1, _) \
V(CanHaveWeakObjectsField, bool, 1, _) \
- V(IsConstructStubField, bool, 1, _) \
V(IsPromiseRejectionField, bool, 1, _) \
V(IsExceptionCaughtField, bool, 1, _)
DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
@@ -444,7 +456,21 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
bool is_promise_rejection() const;
bool is_exception_caught() const;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
+ OBJECT_CONSTRUCTORS(Code, HeapObject);
+};
+
+class Code::OptimizedCodeIterator {
+ public:
+ explicit OptimizedCodeIterator(Isolate* isolate);
+ Code Next();
+
+ private:
+ Context next_context_;
+ Code current_code_;
+ Isolate* isolate_;
+
+ DISALLOW_HEAP_ALLOCATION(no_gc);
+ DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator);
};
// CodeDataContainer is a container for all mutable fields associated with its
@@ -452,8 +478,9 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
// pages within the heap, its header fields need to be immutable. There always
// is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
// field {Code::code_data_container} itself is immutable.
-class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
+class CodeDataContainer : public HeapObject {
public:
+ NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(next_code_link, Object)
DECL_INT_ACCESSORS(kind_specific_flags)
@@ -467,24 +494,29 @@ class CodeDataContainer : public HeapObject, public NeverReadOnlySpaceObject {
DECL_PRINTER(CodeDataContainer)
DECL_VERIFIER(CodeDataContainer)
- static const int kNextCodeLinkOffset = HeapObject::kHeaderSize;
- static const int kKindSpecificFlagsOffset =
- kNextCodeLinkOffset + kPointerSize;
- static const int kUnalignedSize = kKindSpecificFlagsOffset + kIntSize;
- static const int kSize = OBJECT_POINTER_ALIGN(kUnalignedSize);
-
- // During mark compact we need to take special care for weak fields.
- static const int kPointerFieldsStrongEndOffset = kNextCodeLinkOffset;
- static const int kPointerFieldsWeakEndOffset = kKindSpecificFlagsOffset;
+// Layout description.
+#define CODE_DATA_FIELDS(V) \
+ /* Weak pointer fields. */ \
+ V(kPointerFieldsStrongEndOffset, 0) \
+ V(kNextCodeLinkOffset, kTaggedSize) \
+ V(kPointerFieldsWeakEndOffset, 0) \
+ /* Raw data fields. */ \
+ V(kKindSpecificFlagsOffset, kIntSize) \
+ V(kUnalignedSize, OBJECT_POINTER_PADDING(kUnalignedSize)) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_DATA_FIELDS)
+#undef CODE_DATA_FIELDS
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
+ OBJECT_CONSTRUCTORS(CodeDataContainer, HeapObject);
};
-class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
+class AbstractCode : public HeapObject {
public:
+ NEVER_READ_ONLY_SPACE
// All code kinds and INTERPRETED_FUNCTION.
enum Kind {
#define DEFINE_CODE_KIND_ENUM(name) name,
@@ -526,9 +558,9 @@ class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
inline int InstructionSize();
// Return the source position table.
- inline ByteArray* source_position_table();
+ inline ByteArray source_position_table();
- inline Object* stack_frame_cache();
+ inline Object stack_frame_cache();
static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
Handle<SimpleNumberDictionary> cache);
void DropStackFrameCache();
@@ -547,12 +579,14 @@ class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
inline int ExecutableSize();
DECL_CAST(AbstractCode)
- inline Code* GetCode();
- inline BytecodeArray* GetBytecodeArray();
+ inline Code GetCode();
+ inline BytecodeArray GetBytecodeArray();
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
+
+ OBJECT_CONSTRUCTORS(AbstractCode, HeapObject)
};
// Dependent code is a singly linked list of weak fixed arrays. Each array
@@ -615,15 +649,15 @@ class DependentCode : public WeakFixedArray {
// The following low-level accessors are exposed only for tests.
inline DependencyGroup group();
- inline MaybeObject* object_at(int i);
+ inline MaybeObject object_at(int i);
inline int count();
- inline DependentCode* next_link();
+ inline DependentCode next_link();
private:
static const char* DependencyGroupName(DependencyGroup group);
// Get/Set {object}'s {DependentCode}.
- static DependentCode* GetDependentCode(Handle<HeapObject> object);
+ static DependentCode GetDependentCode(Handle<HeapObject> object);
static void SetDependentCode(Handle<HeapObject> object,
Handle<DependentCode> dep);
@@ -651,9 +685,9 @@ class DependentCode : public WeakFixedArray {
static const int kFlagsIndex = 1;
static const int kCodesStartIndex = 2;
- inline void set_next_link(DependentCode* next);
+ inline void set_next_link(DependentCode next);
inline void set_count(int value);
- inline void set_object_at(int i, MaybeObject* object);
+ inline void set_object_at(int i, MaybeObject object);
inline void clear_at(int i);
inline void copy(int from, int to);
@@ -662,6 +696,8 @@ class DependentCode : public WeakFixedArray {
class GroupField : public BitField<int, 0, 3> {};
class CountField : public BitField<int, 3, 27> {};
STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
+
+ OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray)
};
// BytecodeArray represents a sequence of interpreter bytecodes.
@@ -681,7 +717,7 @@ class BytecodeArray : public FixedArrayBase {
kIsOldBytecodeAge = kSexagenarianBytecodeAge
};
- static int SizeFor(int length) {
+ static constexpr int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
@@ -732,7 +768,7 @@ class BytecodeArray : public FixedArrayBase {
// offset and source position or SourcePositionTableWithFrameCache.
DECL_ACCESSORS(source_position_table, Object)
- inline ByteArray* SourcePositionTable();
+ inline ByteArray SourcePositionTable();
inline void ClearFrameCacheFromSourcePositionTable();
DECL_CAST(BytecodeArray)
@@ -754,7 +790,7 @@ class BytecodeArray : public FixedArrayBase {
void Disassemble(std::ostream& os);
- void CopyBytecodesTo(BytecodeArray* to);
+ void CopyBytecodesTo(BytecodeArray to);
// Bytecode aging
bool IsOld() const;
@@ -767,9 +803,9 @@ class BytecodeArray : public FixedArrayBase {
// Layout description.
#define BYTECODE_ARRAY_FIELDS(V) \
/* Pointer fields. */ \
- V(kConstantPoolOffset, kPointerSize) \
- V(kHandlerTableOffset, kPointerSize) \
- V(kSourcePositionTableOffset, kPointerSize) \
+ V(kConstantPoolOffset, kTaggedSize) \
+ V(kHandlerTableOffset, kTaggedSize) \
+ V(kSourcePositionTableOffset, kTaggedSize) \
V(kFrameSizeOffset, kIntSize) \
V(kParameterSizeOffset, kIntSize) \
V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
@@ -790,8 +826,7 @@ class BytecodeArray : public FixedArrayBase {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
+ OBJECT_CONSTRUCTORS(BytecodeArray, FixedArrayBase);
};
// DeoptimizationData is a fixed array used to hold the deoptimization data for
@@ -821,8 +856,8 @@ class DeoptimizationData : public FixedArray {
// Simple element accessors.
#define DECL_ELEMENT_ACCESSORS(name, type) \
- inline type* name(); \
- inline void Set##name(type* value);
+ inline type name() const; \
+ inline void Set##name(type value);
DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
@@ -837,8 +872,8 @@ class DeoptimizationData : public FixedArray {
// Accessors for elements of the ith deoptimization entry.
#define DECL_ENTRY_ACCESSORS(name, type) \
- inline type* name(int i); \
- inline void Set##name(int i, type* value);
+ inline type name(int i) const; \
+ inline void Set##name(int i, type value);
DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
@@ -856,7 +891,7 @@ class DeoptimizationData : public FixedArray {
// Returns the inlined function at the given position in LiteralArray, or the
// outer function if index == kNotInlinedIndex.
- class SharedFunctionInfo* GetInlinedFunction(int index);
+ class SharedFunctionInfo GetInlinedFunction(int index);
// Allocates a DeoptimizationData.
static Handle<DeoptimizationData> New(Isolate* isolate, int deopt_entry_count,
@@ -877,6 +912,8 @@ class DeoptimizationData : public FixedArray {
}
static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
+
+ OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray)
};
class SourcePositionTableWithFrameCache : public Tuple2 {
@@ -886,13 +923,18 @@ class SourcePositionTableWithFrameCache : public Tuple2 {
DECL_CAST(SourcePositionTableWithFrameCache)
- static const int kSourcePositionTableIndex = Struct::kHeaderSize;
- static const int kStackFrameCacheIndex =
- kSourcePositionTableIndex + kPointerSize;
- static const int kSize = kStackFrameCacheIndex + kPointerSize;
+// Layout description.
+#define SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS(V) \
+ V(kSourcePositionTableIndex, kTaggedSize) \
+ V(kStackFrameCacheIndex, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SourcePositionTableWithFrameCache);
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS)
+#undef SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS
+
+ OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Tuple2);
};
} // namespace internal
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 1637e64d6d..07af9a6029 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -10,6 +10,7 @@
#include "src/objects/name-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
#include "src/objects/string.h"
// Has to be the last include (doesn't have include guards):
@@ -18,14 +19,20 @@
namespace v8 {
namespace internal {
+CompilationCacheTable::CompilationCacheTable(Address ptr)
+ : HashTable<CompilationCacheTable, CompilationCacheShape>(ptr) {
+ SLOW_DCHECK(IsCompilationCacheTable());
+}
+
+NEVER_READ_ONLY_SPACE_IMPL(CompilationCacheTable)
CAST_ACCESSOR(CompilationCacheTable)
-uint32_t CompilationCacheShape::RegExpHash(String* string, Smi* flags) {
+uint32_t CompilationCacheShape::RegExpHash(String string, Smi flags) {
return string->Hash() + flags->value();
}
-uint32_t CompilationCacheShape::StringSharedHash(String* source,
- SharedFunctionInfo* shared,
+uint32_t CompilationCacheShape::StringSharedHash(String source,
+ SharedFunctionInfo shared,
LanguageMode language_mode,
int position) {
uint32_t hash = source->Hash();
@@ -35,7 +42,7 @@ uint32_t CompilationCacheShape::StringSharedHash(String* source,
// script source code and the start position of the calling scope.
// We do this to ensure that the cache entries can survive garbage
// collection.
- Script* script(Script::cast(shared->script()));
+ Script script(Script::cast(shared->script()));
hash ^= String::cast(script->source())->Hash();
STATIC_ASSERT(LanguageModeSize == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
@@ -44,15 +51,14 @@ uint32_t CompilationCacheShape::StringSharedHash(String* source,
return hash;
}
-uint32_t CompilationCacheShape::HashForObject(Isolate* isolate,
- Object* object) {
+uint32_t CompilationCacheShape::HashForObject(Isolate* isolate, Object object) {
if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
- FixedArray* val = FixedArray::cast(object);
+ FixedArray val = FixedArray::cast(object);
if (val->map() == val->GetReadOnlyRoots().fixed_cow_array_map()) {
DCHECK_EQ(4, val->length());
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(val->get(0));
- String* source = String::cast(val->get(1));
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(val->get(0));
+ String source = String::cast(val->get(1));
int language_unchecked = Smi::ToInt(val->get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
@@ -64,6 +70,13 @@ uint32_t CompilationCacheShape::HashForObject(Isolate* isolate,
Smi::cast(val->get(JSRegExp::kFlagsIndex)));
}
+InfoCellPair::InfoCellPair(SharedFunctionInfo shared,
+ FeedbackCell feedback_cell)
+ : is_compiled_scope_(!shared.is_null() ? shared->is_compiled_scope()
+ : IsCompiledScope()),
+ shared_(shared),
+ feedback_cell_(feedback_cell) {}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 8e118a0e60..d5dd8ddddf 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_COMPILATION_CACHE_H_
#define V8_OBJECTS_COMPILATION_CACHE_H_
+#include "src/objects/feedback-cell.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,7 +18,7 @@ namespace internal {
class CompilationCacheShape : public BaseShape<HashTableKey*> {
public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
+ static inline bool IsMatch(HashTableKey* key, Object value) {
return key->IsMatch(value);
}
@@ -24,14 +26,14 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
return key->Hash();
}
- static inline uint32_t RegExpHash(String* string, Smi* flags);
+ static inline uint32_t RegExpHash(String string, Smi flags);
- static inline uint32_t StringSharedHash(String* source,
- SharedFunctionInfo* shared,
+ static inline uint32_t StringSharedHash(String source,
+ SharedFunctionInfo shared,
LanguageMode language_mode,
int position);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static const int kPrefixSize = 0;
static const int kEntrySize = 3;
@@ -39,19 +41,32 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
class InfoCellPair {
public:
- InfoCellPair() : shared_(nullptr), feedback_cell_(nullptr) {}
- InfoCellPair(SharedFunctionInfo* shared, FeedbackCell* feedback_cell)
- : shared_(shared), feedback_cell_(feedback_cell) {}
+ InfoCellPair() {}
+ inline InfoCellPair(SharedFunctionInfo shared, FeedbackCell feedback_cell);
- FeedbackCell* feedback_cell() const { return feedback_cell_; }
- SharedFunctionInfo* shared() const { return shared_; }
+ FeedbackCell feedback_cell() const {
+ DCHECK(is_compiled_scope_.is_compiled());
+ return feedback_cell_;
+ }
+ SharedFunctionInfo shared() const {
+ DCHECK(is_compiled_scope_.is_compiled());
+ return shared_;
+ }
- bool has_feedback_cell() const { return feedback_cell_ != nullptr; }
- bool has_shared() const { return shared_ != nullptr; }
+ bool has_feedback_cell() const {
+ return !feedback_cell_.is_null() && is_compiled_scope_.is_compiled();
+ }
+ bool has_shared() const {
+ // Only return true if SFI is compiled - the bytecode could have been
+ // flushed while it's in the compilation cache, and not yet have been
+ // removed form the compilation cache.
+ return !shared_.is_null() && is_compiled_scope_.is_compiled();
+ }
private:
- SharedFunctionInfo* shared_;
- FeedbackCell* feedback_cell_;
+ IsCompiledScope is_compiled_scope_;
+ SharedFunctionInfo shared_;
+ FeedbackCell feedback_cell_;
};
// This cache is used in two different variants. For regexp caching, it simply
@@ -66,15 +81,17 @@ class InfoCellPair {
// recompilation stub, or to "old" code. This avoids memory leaks due to
// premature caching of scripts and eval strings that are never needed later.
class CompilationCacheTable
- : public HashTable<CompilationCacheTable, CompilationCacheShape>,
- public NeverReadOnlySpaceObject {
+ : public HashTable<CompilationCacheTable, CompilationCacheShape> {
public:
- MaybeHandle<SharedFunctionInfo> LookupScript(Handle<String> src,
- Handle<Context> native_context,
- LanguageMode language_mode);
- InfoCellPair LookupEval(Handle<String> src, Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- LanguageMode language_mode, int position);
+ NEVER_READ_ONLY_SPACE
+ static MaybeHandle<SharedFunctionInfo> LookupScript(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode);
+ static InfoCellPair LookupEval(Handle<CompilationCacheTable> table,
+ Handle<String> src,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
@@ -88,14 +105,15 @@ class CompilationCacheTable
static Handle<CompilationCacheTable> PutRegExp(
Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
- void Remove(Object* value);
+ void Remove(Object value);
void Age();
static const int kHashGenerations = 10;
DECL_CAST(CompilationCacheTable)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
+ OBJECT_CONSTRUCTORS(CompilationCacheTable,
+ HashTable<CompilationCacheTable, CompilationCacheShape>);
};
} // namespace internal
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index e754e6e68c..1be71ce8fa 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -14,11 +14,15 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(DataHandler, Struct)
+
+CAST_ACCESSOR(DataHandler)
+
ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
int DataHandler::data_field_count() const {
- return (map()->instance_size() - kSizeWithData0) / kPointerSize;
+ return (map()->instance_size() - kSizeWithData0) / kTaggedSize;
}
WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset,
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index 0d5065b25c..dd4c5d8b12 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_DATA_HANDLER_H_
#define V8_OBJECTS_DATA_HANDLER_H_
-#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -36,15 +36,15 @@ class DataHandler : public Struct {
DECL_ACCESSORS(data3, MaybeObject)
// Layout description.
-#define DATA_HANDLER_FIELDS(V) \
- V(kSmiHandlerOffset, kPointerSize) \
- V(kValidityCellOffset, kPointerSize) \
- V(kSizeWithData0, 0) \
- V(kData1Offset, kPointerSize) \
- V(kSizeWithData1, 0) \
- V(kData2Offset, kPointerSize) \
- V(kSizeWithData2, 0) \
- V(kData3Offset, kPointerSize) \
+#define DATA_HANDLER_FIELDS(V) \
+ V(kSmiHandlerOffset, kTaggedSize) \
+ V(kValidityCellOffset, kTaggedSize) \
+ V(kSizeWithData0, 0) \
+ V(kData1Offset, kTaggedSize) \
+ V(kSizeWithData1, 0) \
+ V(kData2Offset, kTaggedSize) \
+ V(kSizeWithData2, 0) \
+ V(kData3Offset, kTaggedSize) \
V(kSizeWithData3, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, DATA_HANDLER_FIELDS)
@@ -55,6 +55,8 @@ class DataHandler : public Struct {
DECL_VERIFIER(DataHandler)
class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(DataHandler, Struct)
};
} // namespace internal
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index ff3c58a82f..06709f037a 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -6,9 +6,11 @@
#define V8_OBJECTS_DEBUG_OBJECTS_INL_H_
#include "src/objects/debug-objects.h"
-#include "src/objects/shared-function-info.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/objects-inl.h"
+#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +18,13 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(BreakPoint, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(CoverageInfo, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(DebugInfo, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(DebugInfo)
+
CAST_ACCESSOR(BreakPointInfo)
CAST_ACCESSOR(DebugInfo)
CAST_ACCESSOR(CoverageInfo)
@@ -27,6 +36,7 @@ SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset)
ACCESSORS(DebugInfo, script, Object, kScriptOffset)
ACCESSORS(DebugInfo, original_bytecode_array, Object,
kOriginalBytecodeArrayOffset)
+ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
@@ -46,17 +56,20 @@ SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
bool DebugInfo::HasInstrumentedBytecodeArray() {
- return original_bytecode_array()->IsBytecodeArray();
+ DCHECK_EQ(debug_bytecode_array()->IsBytecodeArray(),
+ original_bytecode_array()->IsBytecodeArray());
+ return debug_bytecode_array()->IsBytecodeArray();
}
-BytecodeArray* DebugInfo::OriginalBytecodeArray() {
+BytecodeArray DebugInfo::OriginalBytecodeArray() {
DCHECK(HasInstrumentedBytecodeArray());
return BytecodeArray::cast(original_bytecode_array());
}
-BytecodeArray* DebugInfo::DebugBytecodeArray() {
+BytecodeArray DebugInfo::DebugBytecodeArray() {
DCHECK(HasInstrumentedBytecodeArray());
- return shared()->GetDebugBytecodeArray();
+ DCHECK_EQ(shared()->GetDebugBytecodeArray(), debug_bytecode_array());
+ return BytecodeArray::cast(debug_bytecode_array());
}
} // namespace internal
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index b77b6e136e..1ae360dbbe 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -3,8 +3,11 @@
// found in the LICENSE file.
#include "src/objects/debug-objects.h"
+
#include "src/debug/debug-evaluate.h"
+#include "src/handles-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -30,6 +33,7 @@ void DebugInfo::ClearBreakInfo(Isolate* isolate) {
// array.
shared()->SetDebugBytecodeArray(OriginalBytecodeArray());
set_original_bytecode_array(ReadOnlyRoots(isolate).undefined_value());
+ set_debug_bytecode_array(ReadOnlyRoots(isolate).undefined_value());
}
set_break_points(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -60,7 +64,7 @@ bool DebugInfo::CanBreakAtEntry() const {
bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
// Get the break point info object for this code offset.
- Object* break_point_info = GetBreakPointInfo(isolate, source_position);
+ Object break_point_info = GetBreakPointInfo(isolate, source_position);
// If there is no break point info object or no break points in the break
// point info object there is no break point at this code offset.
@@ -70,11 +74,11 @@ bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) {
}
// Get the break point info object for this source position.
-Object* DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
+Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
+ BreakPointInfo break_point_info =
BreakPointInfo::cast(break_points()->get(i));
if (break_point_info->source_position() == source_position) {
return break_point_info;
@@ -148,7 +152,7 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
int source_position) {
DCHECK(HasBreakInfo());
- Object* break_point_info = GetBreakPointInfo(isolate, source_position);
+ Object break_point_info = GetBreakPointInfo(isolate, source_position);
if (break_point_info->IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -162,7 +166,7 @@ int DebugInfo::GetBreakPointCount(Isolate* isolate) {
int count = 0;
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
+ BreakPointInfo break_point_info =
BreakPointInfo::cast(break_points()->get(i));
count += break_point_info->GetBreakPointCount(isolate);
}
@@ -211,7 +215,7 @@ DebugInfo::SideEffectState DebugInfo::GetSideEffectState(Isolate* isolate) {
}
namespace {
-bool IsEqual(BreakPoint* break_point1, BreakPoint* break_point2) {
+bool IsEqual(BreakPoint break_point1, BreakPoint break_point2) {
return break_point1->id() == break_point2->id();
}
} // namespace
@@ -297,7 +301,7 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
*break_point);
}
// Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_points());
+ FixedArray array = FixedArray::cast(break_point_info->break_points());
for (int i = 0; i < array->length(); i++) {
if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) {
return true;
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 84f244c758..7901c995d5 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,8 +20,9 @@ class BytecodeArray;
// The DebugInfo class holds additional information for a function being
// debugged.
-class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
+class DebugInfo : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
@@ -64,8 +66,8 @@ class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
// and DebugBytecodeArray returns the instrumented bytecode.
inline bool HasInstrumentedBytecodeArray();
- inline BytecodeArray* OriginalBytecodeArray();
- inline BytecodeArray* DebugBytecodeArray();
+ inline BytecodeArray OriginalBytecodeArray();
+ inline BytecodeArray DebugBytecodeArray();
// --- Break points ---
// --------------------
@@ -85,6 +87,10 @@ class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
// points - the instrumented bytecode is held in the shared function info.
DECL_ACCESSORS(original_bytecode_array, Object)
+ // The debug instrumented bytecode array for functions with break points
+ // - also pointed to by the shared function info.
+ DECL_ACCESSORS(debug_bytecode_array, Object)
+
// Fixed array holding status information for each active break point.
DECL_ACCESSORS(break_points, FixedArray)
@@ -162,24 +168,29 @@ class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
DECL_PRINTER(DebugInfo)
DECL_VERIFIER(DebugInfo)
- static const int kSharedFunctionInfoOffset = Struct::kHeaderSize;
- static const int kDebuggerHintsOffset =
- kSharedFunctionInfoOffset + kPointerSize;
- static const int kScriptOffset = kDebuggerHintsOffset + kPointerSize;
- static const int kOriginalBytecodeArrayOffset = kScriptOffset + kPointerSize;
- static const int kBreakPointsStateOffset =
- kOriginalBytecodeArrayOffset + kPointerSize;
- static const int kFlagsOffset = kBreakPointsStateOffset + kPointerSize;
- static const int kCoverageInfoOffset = kFlagsOffset + kPointerSize;
- static const int kSize = kCoverageInfoOffset + kPointerSize;
+// Layout description.
+#define DEBUG_INFO_FIELDS(V) \
+ V(kSharedFunctionInfoOffset, kTaggedSize) \
+ V(kDebuggerHintsOffset, kTaggedSize) \
+ V(kScriptOffset, kTaggedSize) \
+ V(kOriginalBytecodeArrayOffset, kTaggedSize) \
+ V(kDebugBytecodeArrayOffset, kTaggedSize) \
+ V(kBreakPointsStateOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kCoverageInfoOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, DEBUG_INFO_FIELDS)
+#undef DEBUG_INFO_FIELDS
static const int kEstimatedNofBreakPointsInFunction = 4;
private:
// Get the break point info object for a source position.
- Object* GetBreakPointInfo(Isolate* isolate, int source_position);
+ Object GetBreakPointInfo(Isolate* isolate, int source_position);
- DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
+ OBJECT_CONSTRUCTORS(DebugInfo, Struct);
};
// The BreakPointInfo class holds information for break points set in a
@@ -211,8 +222,7 @@ class BreakPointInfo : public Tuple2 {
static const int kSourcePositionOffset = kValue1Offset;
static const int kBreakPointsOffset = kValue2Offset;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
+ OBJECT_CONSTRUCTORS(BreakPointInfo, Tuple2);
};
// Holds information related to block code coverage.
@@ -251,7 +261,7 @@ class CoverageInfo : public FixedArray {
static const int kSlotBlockCountIndex = 2;
static const int kSlotIndexCount = 3;
- DISALLOW_IMPLICIT_CONSTRUCTORS(CoverageInfo);
+ OBJECT_CONSTRUCTORS(CoverageInfo, FixedArray);
};
// Holds breakpoint related information. This object is used by inspector.
@@ -265,8 +275,7 @@ class BreakPoint : public Tuple2 {
static const int kIdOffset = kValue1Offset;
static const int kConditionOffset = kValue2Offset;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPoint);
+ OBJECT_CONSTRUCTORS(BreakPoint, Tuple2);
};
} // namespace internal
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
new file mode 100644
index 0000000000..4cc54ee050
--- /dev/null
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -0,0 +1,226 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_INL_H_
+#define V8_OBJECTS_DESCRIPTOR_ARRAY_INL_H_
+
+#include "src/objects/descriptor-array.h"
+
+#include "src/field-type.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/lookup-cache.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/struct-inl.h"
+#include "src/property.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(DescriptorArray, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(EnumCache, Tuple2)
+
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(EnumCache)
+
+ACCESSORS(DescriptorArray, enum_cache, EnumCache, kEnumCacheOffset)
+RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
+ kNumberOfAllDescriptorsOffset)
+RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_descriptors,
+ kNumberOfDescriptorsOffset)
+RELAXED_INT16_ACCESSORS(DescriptorArray, raw_number_of_marked_descriptors,
+ kRawNumberOfMarkedDescriptorsOffset)
+RELAXED_INT16_ACCESSORS(DescriptorArray, filler16bits, kFiller16BitsOffset)
+
+inline int16_t DescriptorArray::number_of_slack_descriptors() const {
+ return number_of_all_descriptors() - number_of_descriptors();
+}
+
+inline int DescriptorArray::number_of_entries() const {
+ return number_of_descriptors();
+}
+
+inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
+ int16_t expected, int16_t value) {
+ return base::Relaxed_CompareAndSwap(
+ reinterpret_cast<base::Atomic16*>(
+ FIELD_ADDR(this, kRawNumberOfMarkedDescriptorsOffset)),
+ expected, value);
+}
+
+void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
+ set_enum_cache(array->enum_cache());
+}
+
+int DescriptorArray::Search(Name name, int valid_descriptors) {
+ DCHECK(name->IsUniqueName());
+ return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
+ nullptr);
+}
+
+int DescriptorArray::Search(Name name, Map map) {
+ DCHECK(name->IsUniqueName());
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) return kNotFound;
+ return Search(name, number_of_own_descriptors);
+}
+
+int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
+ DCHECK(name->IsUniqueName());
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) return kNotFound;
+
+ DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
+ int number = cache->Lookup(map, name);
+
+ if (number == DescriptorLookupCache::kAbsent) {
+ number = Search(name, number_of_own_descriptors);
+ cache->Update(map, name, number);
+ }
+
+ return number;
+}
+
+ObjectSlot DescriptorArray::GetFirstPointerSlot() {
+ return RawField(DescriptorArray::kPointersStartOffset);
+}
+
+ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
+ // Allow descriptor == number_of_all_descriptors() for computing the slot
+ // address that comes after the last descriptor (for iterating).
+ DCHECK_LE(descriptor, number_of_all_descriptors());
+ return RawField(OffsetOfDescriptorAt(descriptor));
+}
+
+ObjectSlot DescriptorArray::GetKeySlot(int descriptor) {
+ DCHECK_LE(descriptor, number_of_all_descriptors());
+ ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex;
+ DCHECK((*slot)->IsObject());
+ return slot;
+}
+
+Name DescriptorArray::GetKey(int descriptor_number) const {
+ DCHECK(descriptor_number < number_of_descriptors());
+ return Name::cast(
+ get(ToKeyIndex(descriptor_number))->GetHeapObjectAssumeStrong());
+}
+
+int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
+ return GetDetails(descriptor_number).pointer();
+}
+
+Name DescriptorArray::GetSortedKey(int descriptor_number) {
+ return GetKey(GetSortedKeyIndex(descriptor_number));
+}
+
+void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index),
+ MaybeObject::FromObject(details.set_pointer(pointer).AsSmi()));
+}
+
+MaybeObjectSlot DescriptorArray::GetValueSlot(int descriptor) {
+ DCHECK_LT(descriptor, number_of_descriptors());
+ return MaybeObjectSlot(GetDescriptorSlot(descriptor) + kEntryValueIndex);
+}
+
+Object DescriptorArray::GetStrongValue(int descriptor_number) {
+ DCHECK(descriptor_number < number_of_descriptors());
+ return get(ToValueIndex(descriptor_number))->cast<Object>();
+}
+
+void DescriptorArray::SetValue(int descriptor_index, Object value) {
+ set(ToValueIndex(descriptor_index), MaybeObject::FromObject(value));
+}
+
+MaybeObject DescriptorArray::GetValue(int descriptor_number) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ return get(ToValueIndex(descriptor_number));
+}
+
+PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
+ DCHECK(descriptor_number < number_of_descriptors());
+ MaybeObject details = get(ToDetailsIndex(descriptor_number));
+ return PropertyDetails(details->ToSmi());
+}
+
+int DescriptorArray::GetFieldIndex(int descriptor_number) {
+ DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ return GetDetails(descriptor_number).field_index();
+}
+
+FieldType DescriptorArray::GetFieldType(int descriptor_number) {
+ DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ MaybeObject wrapped_type = GetValue(descriptor_number);
+ return Map::UnwrapFieldType(wrapped_type);
+}
+
+void DescriptorArray::Set(int descriptor_number, Name key, MaybeObject value,
+ PropertyDetails details) {
+ // Range check.
+ DCHECK(descriptor_number < number_of_descriptors());
+ set(ToKeyIndex(descriptor_number), MaybeObject::FromObject(key));
+ set(ToValueIndex(descriptor_number), value);
+ set(ToDetailsIndex(descriptor_number),
+ MaybeObject::FromObject(details.AsSmi()));
+}
+
+void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+ Name key = *desc->GetKey();
+ MaybeObject value = *desc->GetValue();
+ Set(descriptor_number, key, value, desc->GetDetails());
+}
+
+void DescriptorArray::Append(Descriptor* desc) {
+ DisallowHeapAllocation no_gc;
+ int descriptor_number = number_of_descriptors();
+ DCHECK_LE(descriptor_number + 1, number_of_all_descriptors());
+ set_number_of_descriptors(descriptor_number + 1);
+ Set(descriptor_number, desc);
+
+ uint32_t hash = desc->GetKey()->Hash();
+
+ int insertion;
+
+ for (insertion = descriptor_number; insertion > 0; --insertion) {
+ Name key = GetSortedKey(insertion - 1);
+ if (key->Hash() <= hash) break;
+ SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
+ }
+
+ SetSortedKey(insertion, descriptor_number);
+}
+
+void DescriptorArray::SwapSortedKeys(int first, int second) {
+ int first_key = GetSortedKeyIndex(first);
+ SetSortedKey(first, GetSortedKeyIndex(second));
+ SetSortedKey(second, first_key);
+}
+
+int DescriptorArray::length() const {
+ return number_of_all_descriptors() * kEntrySize;
+}
+
+MaybeObject DescriptorArray::get(int index) const {
+ DCHECK(index >= 0 && index < this->length());
+ return RELAXED_READ_WEAK_FIELD(*this, offset(index));
+}
+
+void DescriptorArray::set(int index, MaybeObject value) {
+ DCHECK(index >= 0 && index < this->length());
+ RELAXED_WRITE_WEAK_FIELD(*this, offset(index), value);
+ WEAK_WRITE_BARRIER(*this, offset(index), value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_DESCRIPTOR_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index c24deb68ad..3e67e94bf1 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -7,6 +7,8 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/struct.h"
+#include "src/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -31,62 +33,57 @@ class EnumCache : public Tuple2 {
static const int kKeysOffset = kValue1Offset;
static const int kIndicesOffset = kValue2Offset;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(EnumCache);
+ OBJECT_CONSTRUCTORS(EnumCache, Tuple2);
};
-// A DescriptorArray is a fixed array used to hold instance descriptors.
-// The format of these objects is:
-// [0]: Number of descriptors
-// [1]: Enum cache.
-// [2]: first key (and internalized String)
-// [3]: first descriptor details (see PropertyDetails)
-// [4]: first value for constants | Smi(1) when not used
-//
-// [2 + number of descriptors * 3]: start of slack
+// A DescriptorArray is a custom array that holds instance descriptors.
+// It has the following layout:
+// Header:
+// [16:0 bits]: number_of_all_descriptors (including slack)
+// [32:16 bits]: number_of_descriptors
+// [48:32 bits]: raw_number_of_marked_descriptors (used by GC)
+// [64:48 bits]: alignment filler
+// [kEnumCacheOffset]: enum cache
+// Elements:
+// [kHeaderSize + 0]: first key (and internalized String)
+// [kHeaderSize + 1]: first descriptor details (see PropertyDetails)
+// [kHeaderSize + 2]: first value for constants / Smi(1) when not used
+// Slack:
+// [kHeaderSize + number of descriptors * 3]: start of slack
// The "value" fields store either values or field types. A field type is either
// FieldType::None(), FieldType::Any() or a weak reference to a Map. All other
// references are strong.
-class DescriptorArray : public WeakFixedArray {
+class DescriptorArray : public HeapObject {
public:
- // Returns the number of descriptors in the array.
- inline int number_of_descriptors() const;
- inline int number_of_descriptors_storage() const;
- inline int NumberOfSlackDescriptors() const;
-
- inline void SetNumberOfDescriptors(int number_of_descriptors);
+ DECL_INT16_ACCESSORS(number_of_all_descriptors)
+ DECL_INT16_ACCESSORS(number_of_descriptors)
+ inline int16_t number_of_slack_descriptors() const;
inline int number_of_entries() const;
-
- inline EnumCache* GetEnumCache();
+ DECL_ACCESSORS(enum_cache, EnumCache)
void ClearEnumCache();
- inline void CopyEnumCacheFrom(DescriptorArray* array);
- // Initialize or change the enum cache,
- static void SetEnumCache(Handle<DescriptorArray> descriptors,
- Isolate* isolate, Handle<FixedArray> keys,
- Handle<FixedArray> indices);
+ inline void CopyEnumCacheFrom(DescriptorArray array);
+ static void InitializeOrChangeEnumCache(Handle<DescriptorArray> descriptors,
+ Isolate* isolate,
+ Handle<FixedArray> keys,
+ Handle<FixedArray> indices);
// Accessors for fetching instance descriptor at descriptor number.
- inline Name* GetKey(int descriptor_number);
- inline Object** GetKeySlot(int descriptor_number);
- inline Object* GetStrongValue(int descriptor_number);
- inline void SetValue(int descriptor_number, Object* value);
- inline MaybeObject* GetValue(int descriptor_number);
- inline MaybeObject** GetValueSlot(int descriptor_number);
- static inline int GetValueOffset(int descriptor_number);
- inline MaybeObject** GetDescriptorStartSlot(int descriptor_number);
- inline MaybeObject** GetDescriptorEndSlot(int descriptor_number);
+ inline Name GetKey(int descriptor_number) const;
+ inline Object GetStrongValue(int descriptor_number);
+ inline void SetValue(int descriptor_number, Object value);
+ inline MaybeObject GetValue(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
- inline FieldType* GetFieldType(int descriptor_number);
+ inline FieldType GetFieldType(int descriptor_number);
- inline Name* GetSortedKey(int descriptor_number);
+ inline Name GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
// Accessor for complete descriptor.
inline void Set(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number, Name* key, MaybeObject* value,
+ inline void Set(int descriptor_number, Name key, MaybeObject value,
PropertyDetails details);
void Replace(int descriptor_number, Descriptor* descriptor);
@@ -115,34 +112,66 @@ class DescriptorArray : public WeakFixedArray {
void Sort();
// Search the instance descriptors for given name.
- V8_INLINE int Search(Name* name, int number_of_own_descriptors);
- V8_INLINE int Search(Name* name, Map* map);
+ V8_INLINE int Search(Name name, int number_of_own_descriptors);
+ V8_INLINE int Search(Name name, Map map);
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- V8_INLINE int SearchWithCache(Isolate* isolate, Name* name, Map* map);
+ V8_INLINE int SearchWithCache(Isolate* isolate, Name name, Map map);
- bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
+ bool IsEqualUpTo(DescriptorArray desc, int nof_descriptors);
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
static Handle<DescriptorArray> Allocate(
- Isolate* isolate, int number_of_descriptors, int slack,
+ Isolate* isolate, int nof_descriptors, int slack,
PretenureFlag pretenure = NOT_TENURED);
+ void Initialize(EnumCache enum_cache, HeapObject undefined_value,
+ int nof_descriptors, int slack);
+
DECL_CAST(DescriptorArray)
// Constant for denoting key was not found.
static const int kNotFound = -1;
- static const int kDescriptorLengthIndex = 0;
- static const int kEnumCacheIndex = 1;
- static const int kFirstIndex = 2;
-
// Layout description.
- static const int kDescriptorLengthOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize;
- static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
+#define DESCRIPTOR_ARRAY_FIELDS(V) \
+ V(kNumberOfAllDescriptorsOffset, kUInt16Size) \
+ V(kNumberOfDescriptorsOffset, kUInt16Size) \
+ V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
+ V(kFiller16BitsOffset, kUInt16Size) \
+ V(kPointersStartOffset, 0) \
+ V(kEnumCacheOffset, kTaggedSize) \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ DESCRIPTOR_ARRAY_FIELDS)
+#undef DESCRIPTOR_ARRAY_FIELDS
+
+ STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize));
+ STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
+
+ // Garbage collection support.
+ DECL_INT16_ACCESSORS(raw_number_of_marked_descriptors)
+ // Atomic compare-and-swap operation on the raw_number_of_marked_descriptors.
+ int16_t CompareAndSwapRawNumberOfMarkedDescriptors(int16_t expected,
+ int16_t value);
+ int16_t UpdateNumberOfMarkedDescriptors(unsigned mark_compact_epoch,
+ int16_t number_of_marked_descriptors);
+
+ static constexpr int SizeFor(int number_of_all_descriptors) {
+ return offset(number_of_all_descriptors * kEntrySize);
+ }
+ static constexpr int OffsetOfDescriptorAt(int descriptor) {
+ return offset(descriptor * kEntrySize);
+ }
+ inline ObjectSlot GetFirstPointerSlot();
+ inline ObjectSlot GetDescriptorSlot(int descriptor);
+ inline ObjectSlot GetKeySlot(int descriptor);
+ inline MaybeObjectSlot GetValueSlot(int descriptor);
+
+ typedef FlexibleWeakBodyDescriptor<kPointersStartOffset> BodyDescriptor;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
@@ -164,40 +193,77 @@ class DescriptorArray : public WeakFixedArray {
bool IsSortedNoDuplicates(int valid_descriptors = -1);
// Are two DescriptorArrays equal?
- bool IsEqualTo(DescriptorArray* other);
+ bool IsEqualTo(DescriptorArray other);
#endif
- // Returns the fixed array length required to hold number_of_descriptors
- // descriptors.
- static constexpr int LengthFor(int number_of_descriptors) {
- return ToKeyIndex(number_of_descriptors);
- }
-
static constexpr int ToDetailsIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kEntrySize) + kEntryDetailsIndex;
+ return (descriptor_number * kEntrySize) + kEntryDetailsIndex;
}
// Conversion from descriptor number to array indices.
static constexpr int ToKeyIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kEntrySize) + kEntryKeyIndex;
+ return (descriptor_number * kEntrySize) + kEntryKeyIndex;
}
static constexpr int ToValueIndex(int descriptor_number) {
- return kFirstIndex + (descriptor_number * kEntrySize) + kEntryValueIndex;
+ return (descriptor_number * kEntrySize) + kEntryValueIndex;
}
private:
- inline MaybeObject* get(int index) const;
- inline void set(int index, MaybeObject* value);
+ DECL_INT16_ACCESSORS(filler16bits)
+ // Low-level per-element accessors.
+ static constexpr int offset(int index) {
+ return kHeaderSize + index * kTaggedSize;
+ }
+ inline int length() const;
+ inline MaybeObject get(int index) const;
+ inline void set(int index, MaybeObject value);
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
- void CopyFrom(int index, DescriptorArray* src);
+ void CopyFrom(int index, DescriptorArray src);
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
- DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
+ OBJECT_CONSTRUCTORS(DescriptorArray, HeapObject);
+};
+
+class NumberOfMarkedDescriptors {
+ public:
+// Bit positions for |bit_field|.
+#define BIT_FIELD_FIELDS(V, _) \
+ V(Epoch, unsigned, 2, _) \
+ V(Marked, int16_t, 14, _)
+ DEFINE_BIT_FIELDS(BIT_FIELD_FIELDS)
+#undef BIT_FIELD_FIELDS
+ static const int kMaxNumberOfMarkedDescriptors = Marked::kMax;
+ // Decodes the raw value of the number of marked descriptors for the
+ // given mark compact garbage collection epoch.
+ static inline int16_t decode(unsigned mark_compact_epoch, int16_t raw_value) {
+ unsigned epoch_from_value = Epoch::decode(static_cast<uint16_t>(raw_value));
+ int16_t marked_from_value =
+ Marked::decode(static_cast<uint16_t>(raw_value));
+ unsigned actual_epoch = mark_compact_epoch & Epoch::kMask;
+ if (actual_epoch == epoch_from_value) return marked_from_value;
+ // If the epochs do not match, then either the raw_value is zero (freshly
+ // allocated descriptor array) or the epoch from value lags by 1.
+ DCHECK_IMPLIES(raw_value != 0,
+ Epoch::decode(epoch_from_value + 1) == actual_epoch);
+ // Not matching epochs means that the no descriptors were marked in the
+ // current epoch.
+ return 0;
+ }
+
+ // Encodes the number of marked descriptors for the given mark compact
+ // garbage collection epoch.
+ static inline int16_t encode(unsigned mark_compact_epoch, int16_t value) {
+ // TODO(ulan): avoid casting to int16_t by adding support for uint16_t
+ // atomics.
+ return static_cast<int16_t>(
+ Epoch::encode(mark_compact_epoch & Epoch::kMask) |
+ Marked::encode(value));
+ }
};
} // namespace internal
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
new file mode 100644
index 0000000000..39cc1c61b8
--- /dev/null
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_DICTIONARY_INL_H_
+#define V8_OBJECTS_DICTIONARY_INL_H_
+
+#include "src/objects/dictionary.h"
+
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(GlobalDictionary)
+CAST_ACCESSOR(NameDictionary)
+CAST_ACCESSOR(NumberDictionary)
+CAST_ACCESSOR(SimpleNumberDictionary)
+
+template <typename Derived, typename Shape>
+Dictionary<Derived, Shape>::Dictionary(Address ptr)
+ : HashTable<Derived, Shape>(ptr) {}
+
+template <typename Derived, typename Shape>
+BaseNameDictionary<Derived, Shape>::BaseNameDictionary(Address ptr)
+ : Dictionary<Derived, Shape>(ptr) {}
+
+GlobalDictionary::GlobalDictionary(Address ptr)
+ : BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>(ptr) {
+ SLOW_DCHECK(IsGlobalDictionary());
+}
+
+NameDictionary::NameDictionary(Address ptr)
+ : BaseNameDictionary<NameDictionary, NameDictionaryShape>(ptr) {
+ SLOW_DCHECK(IsNameDictionary());
+}
+
+NumberDictionary::NumberDictionary(Address ptr)
+ : Dictionary<NumberDictionary, NumberDictionaryShape>(ptr) {
+ SLOW_DCHECK(IsNumberDictionary());
+}
+
+SimpleNumberDictionary::SimpleNumberDictionary(Address ptr)
+ : Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>(ptr) {
+ SLOW_DCHECK(IsSimpleNumberDictionary());
+}
+
+bool NumberDictionary::requires_slow_elements() {
+ Object max_index_object = get(kMaxNumberKeyIndex);
+ if (!max_index_object->IsSmi()) return false;
+ return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
+}
+
+uint32_t NumberDictionary::max_number_key() {
+ DCHECK(!requires_slow_elements());
+ Object max_index_object = get(kMaxNumberKeyIndex);
+ if (!max_index_object->IsSmi()) return 0;
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(max_index_object));
+ return value >> kRequiresSlowElementsTagSize;
+}
+
+void NumberDictionary::set_requires_slow_elements() {
+ set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
+}
+
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate, int entry) {
+ Object the_hole = this->GetReadOnlyRoots().the_hole_value();
+ PropertyDetails details = PropertyDetails::Empty();
+ Derived::cast(*this)->SetEntry(isolate, entry, the_hole, the_hole, details);
+}
+
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
+ Object key, Object value,
+ PropertyDetails details) {
+ DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
+ DCHECK(!key->IsName() || details.dictionary_index() > 0);
+ int index = DerivedHashTable::EntryToIndex(entry);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
+ this->set(index + Derived::kEntryKeyIndex, key, mode);
+ this->set(index + Derived::kEntryValueIndex, value, mode);
+ if (Shape::kHasDetails) DetailsAtPut(isolate, entry, details);
+}
+
+Object GlobalDictionaryShape::Unwrap(Object object) {
+ return PropertyCell::cast(object)->name();
+}
+
+RootIndex GlobalDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kGlobalDictionaryMap;
+}
+
+Name NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
+
+RootIndex NameDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kNameDictionaryMap;
+}
+
+PropertyCell GlobalDictionary::CellAt(int entry) {
+ DCHECK(KeyAt(entry)->IsPropertyCell());
+ return PropertyCell::cast(KeyAt(entry));
+}
+
+bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) {
+ DCHECK_NE(roots.the_hole_value(), k);
+ return k != roots.undefined_value();
+}
+
+bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) {
+ return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots);
+}
+
+Name GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
+Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
+
+void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key,
+ Object value, PropertyDetails details) {
+ DCHECK_EQ(key, PropertyCell::cast(value)->name());
+ set(EntryToIndex(entry) + kEntryKeyIndex, value);
+ DetailsAtPut(isolate, entry, details);
+}
+
+void GlobalDictionary::ValueAtPut(int entry, Object value) {
+ set(EntryToIndex(entry), value);
+}
+
+bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) {
+ DCHECK(other->IsNumber());
+ return key == static_cast<uint32_t>(other->Number());
+}
+
+uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
+ return ComputeSeededHash(key, isolate->heap()->HashSeed());
+}
+
+uint32_t NumberDictionaryBaseShape::HashForObject(Isolate* isolate,
+ Object other) {
+ DCHECK(other->IsNumber());
+ return ComputeSeededHash(static_cast<uint32_t>(other->Number()),
+ isolate->heap()->HashSeed());
+}
+
+Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
+ uint32_t key) {
+ return isolate->factory()->NewNumberFromUint(key);
+}
+
+RootIndex NumberDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kNumberDictionaryMap;
+}
+
+RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
+ return RootIndex::kSimpleNumberDictionaryMap;
+}
+
+bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
+ DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName());
+ DCHECK(key->IsUniqueName());
+ return *key == other;
+}
+
+uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
+ return key->Hash();
+}
+
+uint32_t NameDictionaryShape::HashForObject(Isolate* isolate, Object other) {
+ return Name::cast(other)->Hash();
+}
+
+bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object other) {
+ DCHECK(PropertyCell::cast(other)->name()->IsUniqueName());
+ return *key == PropertyCell::cast(other)->name();
+}
+
+uint32_t GlobalDictionaryShape::HashForObject(Isolate* isolate, Object other) {
+ return PropertyCell::cast(other)->name()->Hash();
+}
+
+Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
+ Handle<Name> key) {
+ DCHECK(key->IsUniqueName());
+ return key;
+}
+
+template <typename Dictionary>
+PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict, int entry) {
+ DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
+ return dict->CellAt(entry)->property_details();
+}
+
+template <typename Dictionary>
+void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary dict,
+ int entry, PropertyDetails value) {
+ DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
+ PropertyCell cell = dict->CellAt(entry);
+ if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+ }
+ cell->set_property_details(value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_DICTIONARY_INL_H_
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 6d7ee42eec..1b0c57ec7d 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/objects/hash-table.h"
#include "src/objects/property-array.h"
+#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,23 +29,23 @@ class Dictionary : public HashTable<Derived, Shape> {
public:
typedef typename Shape::Key Key;
// Returns the value at entry.
- Object* ValueAt(int entry) {
+ Object ValueAt(int entry) {
return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
}
// Set the value for entry.
- void ValueAtPut(int entry, Object* value) {
+ void ValueAtPut(int entry, Object value) {
this->set(DerivedHashTable::EntryToIndex(entry) + 1, value);
}
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
- return Shape::DetailsAt(static_cast<Derived*>(this), entry);
+ return Shape::DetailsAt(Derived::cast(*this), entry);
}
// Set the details for entry.
void DetailsAtPut(Isolate* isolate, int entry, PropertyDetails value) {
- Shape::DetailsAtPut(isolate, static_cast<Derived*>(this), entry, value);
+ Shape::DetailsAtPut(isolate, Derived::cast(*this), entry, value);
}
// Delete a property from the dictionary.
@@ -66,11 +67,11 @@ class Dictionary : public HashTable<Derived, Shape> {
void Print(std::ostream& os); // NOLINT
#endif
// Returns the key (slow).
- Object* SlowReverseLookup(Object* value);
+ Object SlowReverseLookup(Object value);
// Sets the entry to (key, value) pair.
inline void ClearEntry(Isolate* isolate, int entry);
- inline void SetEntry(Isolate* isolate, int entry, Object* key, Object* value,
+ inline void SetEntry(Isolate* isolate, int entry, Object key, Object value,
PropertyDetails details);
V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
@@ -84,6 +85,8 @@ class Dictionary : public HashTable<Derived, Shape> {
Key key,
Handle<Object> value,
PropertyDetails details);
+
+ OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>)
};
template <typename Key>
@@ -91,7 +94,7 @@ class BaseDictionaryShape : public BaseShape<Key> {
public:
static const bool kHasDetails = true;
template <typename Dictionary>
- static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ static inline PropertyDetails DetailsAt(Dictionary dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
DCHECK_GE(entry, 0); // Not found is -1, which is not caught by get().
return PropertyDetails(Smi::cast(dict->get(
@@ -99,7 +102,7 @@ class BaseDictionaryShape : public BaseShape<Key> {
}
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
@@ -109,9 +112,9 @@ class BaseDictionaryShape : public BaseShape<Key> {
class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
public:
- static inline bool IsMatch(Handle<Name> key, Object* other);
+ static inline bool IsMatch(Handle<Name> key, Object other);
static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
static inline RootIndex GetMapRootIndex();
static const int kPrefixSize = 2;
@@ -146,7 +149,7 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
}
int Hash() const {
- Object* hash_obj = this->get(kObjectHashIndex);
+ Object hash_obj = this->get(kObjectHashIndex);
int hash = Smi::ToInt(hash_obj);
DCHECK(PropertyArray::HashField::is_valid(hash));
return hash;
@@ -184,6 +187,8 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
Isolate* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details, int* entry_out = nullptr);
+
+ OBJECT_CONSTRUCTORS(BaseNameDictionary, Dictionary<Derived, Shape>)
};
class NameDictionary
@@ -194,28 +199,31 @@ class NameDictionary
static const int kEntryDetailsIndex = 2;
static const int kInitialCapacity = 2;
- inline Name* NameAt(int entry);
+ inline Name NameAt(int entry);
inline void set_hash(int hash);
inline int hash() const;
+
+ OBJECT_CONSTRUCTORS(NameDictionary,
+ BaseNameDictionary<NameDictionary, NameDictionaryShape>)
};
class GlobalDictionaryShape : public NameDictionaryShape {
public:
- static inline bool IsMatch(Handle<Name> key, Object* other);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline bool IsMatch(Handle<Name> key, Object other);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static const int kEntrySize = 1; // Overrides NameDictionaryShape::kEntrySize
template <typename Dictionary>
- static inline PropertyDetails DetailsAt(Dictionary* dict, int entry);
+ static inline PropertyDetails DetailsAt(Dictionary dict, int entry);
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry,
PropertyDetails value);
- static inline Object* Unwrap(Object* key);
- static inline bool IsKey(ReadOnlyRoots roots, Object* k);
- static inline bool IsLive(ReadOnlyRoots roots, Object* key);
+ static inline Object Unwrap(Object key);
+ static inline bool IsKey(ReadOnlyRoots roots, Object k);
+ static inline bool IsLive(ReadOnlyRoots roots, Object key);
static inline RootIndex GetMapRootIndex();
};
@@ -224,21 +232,25 @@ class GlobalDictionary
public:
DECL_CAST(GlobalDictionary)
- inline Object* ValueAt(int entry);
- inline PropertyCell* CellAt(int entry);
- inline void SetEntry(Isolate* isolate, int entry, Object* key, Object* value,
+ inline Object ValueAt(int entry);
+ inline PropertyCell CellAt(int entry);
+ inline void SetEntry(Isolate* isolate, int entry, Object key, Object value,
PropertyDetails details);
- inline Name* NameAt(int entry);
- inline void ValueAtPut(int entry, Object* value);
+ inline Name NameAt(int entry);
+ inline void ValueAtPut(int entry, Object value);
+
+ OBJECT_CONSTRUCTORS(
+ GlobalDictionary,
+ BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>)
};
class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
public:
- static inline bool IsMatch(uint32_t key, Object* other);
+ static inline bool IsMatch(uint32_t key, Object other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static inline uint32_t Hash(Isolate* isolate, uint32_t key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
};
class NumberDictionaryShape : public NumberDictionaryBaseShape {
@@ -256,12 +268,12 @@ class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
static const int kEntrySize = 2;
template <typename Dictionary>
- static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ static inline PropertyDetails DetailsAt(Dictionary dict, int entry) {
UNREACHABLE();
}
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary* dict, int entry,
+ static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry,
PropertyDetails value) {
UNREACHABLE();
}
@@ -286,6 +298,10 @@ class SimpleNumberDictionary
Handle<Object> value);
static const int kEntryValueIndex = 1;
+
+ OBJECT_CONSTRUCTORS(
+ SimpleNumberDictionary,
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>)
};
extern template class EXPORT_TEMPLATE_DECLARE(
@@ -317,7 +333,7 @@ class NumberDictionary
bool HasComplexElements();
// Sorting support
- void CopyValuesTo(FixedArray* elements);
+ void CopyValuesTo(FixedArray elements);
// If slow elements are required we will never go back to fast-case
// for the elements kept in this dictionary. We require slow
@@ -343,6 +359,9 @@ class NumberDictionary
// JSObjects prefer dictionary elements if the dictionary saves this much
// memory compared to a fast elements backing store.
static const uint32_t kPreferFastElementsSizeFactor = 3;
+
+ OBJECT_CONSTRUCTORS(NumberDictionary,
+ Dictionary<NumberDictionary, NumberDictionaryShape>)
};
} // namespace internal
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
new file mode 100644
index 0000000000..475945f1f1
--- /dev/null
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -0,0 +1,39 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_EMBEDDER_DATA_ARRAY_INL_H_
+#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_INL_H_
+
+#include "src/objects/embedder-data-array.h"
+
+//#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/slots.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(EmbedderDataArray)
+
+SMI_ACCESSORS(EmbedderDataArray, length, kLengthOffset)
+
+OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray, HeapObject)
+
+Address EmbedderDataArray::slots_start() {
+ return FIELD_ADDR(this, OffsetOfElementAt(0));
+}
+
+Address EmbedderDataArray::slots_end() {
+ return FIELD_ADDR(this, OffsetOfElementAt(length()));
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_EMBEDDER_DATA_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/embedder-data-array.cc b/deps/v8/src/objects/embedder-data-array.cc
new file mode 100644
index 0000000000..665a1fa2f4
--- /dev/null
+++ b/deps/v8/src/objects/embedder-data-array.cc
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/embedder-data-array.h"
+#include "src/objects/embedder-data-array-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Handle<EmbedderDataArray> EmbedderDataArray::EnsureCapacity(
+ Isolate* isolate, Handle<EmbedderDataArray> array, int index) {
+ if (index < array->length()) return array;
+ DCHECK_LT(index, kMaxLength);
+ Handle<EmbedderDataArray> new_array =
+ isolate->factory()->NewEmbedderDataArray(index + 1);
+ DisallowHeapAllocation no_gc;
+ // Last new space allocation does not require any write barriers.
+ size_t size = array->length() * kEmbedderDataSlotSize;
+ MemCopy(reinterpret_cast<void*>(new_array->slots_start()),
+ reinterpret_cast<void*>(array->slots_start()), size);
+ return new_array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
new file mode 100644
index 0000000000..751e4a94a5
--- /dev/null
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
+#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
+
+#include "src/globals.h"
+#include "src/maybe-handles.h"
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// This is a storage array for embedder data fields stored in native context.
+// It's basically an "array of EmbedderDataSlots".
+// Note, if the pointer compression is enabled the embedder data slot also
+// contains a raw data part in addition to tagged part.
+class EmbedderDataArray : public HeapObject {
+ public:
+ // [length]: length of the array in an embedder data slots.
+ V8_INLINE int length() const;
+ V8_INLINE void set_length(int value);
+
+ DECL_CAST(EmbedderDataArray)
+
+// Layout description.
+#define EMBEDDER_DATA_ARRAY_FIELDS(V) \
+ V(kLengthOffset, kTaggedSize) \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ EMBEDDER_DATA_ARRAY_FIELDS)
+#undef EMBEDDER_DATA_ARRAY_FIELDS
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kEmbedderDataSlotSize;
+ }
+
+ // Returns a grown copy if the index is bigger than the array's length.
+ static Handle<EmbedderDataArray> EnsureCapacity(
+ Isolate* isolate, Handle<EmbedderDataArray> array, int index);
+
+ // Code Generation support.
+ static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ // Address of the first slot.
+ V8_INLINE Address slots_start();
+
+ // Address of the one past last slot.
+ V8_INLINE Address slots_end();
+
+ // Dispatched behavior.
+ DECL_PRINTER(EmbedderDataArray)
+ DECL_VERIFIER(EmbedderDataArray)
+
+ class BodyDescriptor;
+
+ static const int kMaxSize = kMaxRegularHeapObjectSize;
+ static constexpr int kMaxLength =
+ (kMaxSize - kHeaderSize) / kEmbedderDataSlotSize;
+
+ private:
+ STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
+
+ OBJECT_CONSTRUCTORS(EmbedderDataArray, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
new file mode 100644
index 0000000000..b136fd288f
--- /dev/null
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -0,0 +1,127 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
+#define V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/embedder-data-array.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+EmbedderDataSlot::EmbedderDataSlot(EmbedderDataArray array, int entry_index)
+ : SlotBase(FIELD_ADDR(array,
+ EmbedderDataArray::OffsetOfElementAt(entry_index))) {}
+
+EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
+ : SlotBase(FIELD_ADDR(
+ object, object->GetEmbedderFieldOffset(embedder_field_index))) {}
+
+Object EmbedderDataSlot::load_tagged() const {
+ return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
+}
+
+void EmbedderDataSlot::store_smi(Smi value) {
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(value);
+#ifdef V8_COMPRESS_POINTERS
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::kZero);
+#endif
+}
+
+// static
+void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
+ Object value) {
+ int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index);
+ ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset))
+ .Relaxed_Store(value);
+ WRITE_BARRIER(array, slot_offset, value);
+#ifdef V8_COMPRESS_POINTERS
+ ObjectSlot(FIELD_ADDR(array, slot_offset + kRawPayloadOffset))
+ .Relaxed_Store(Smi::kZero);
+#endif
+}
+
+// static
+void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
+ Object value) {
+ int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index);
+ ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
+ .Relaxed_Store(value);
+ WRITE_BARRIER(object, slot_offset, value);
+#ifdef V8_COMPRESS_POINTERS
+ ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
+ .Relaxed_Store(Smi::kZero);
+#endif
+}
+
+bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
+ Object tagged_value =
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
+ if (!tagged_value->IsSmi()) return false;
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(SmiValuesAre31Bits());
+ Address value_lo = static_cast<uint32_t>(tagged_value->ptr());
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Address value_hi =
+ FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr();
+ Address value = value_lo | (value_hi << 32);
+ *out_pointer = reinterpret_cast<void*>(value);
+#else
+ *out_pointer = reinterpret_cast<void*>(tagged_value->ptr());
+#endif
+ return true;
+}
+
+bool EmbedderDataSlot::store_aligned_pointer(void* ptr) {
+ Address value = reinterpret_cast<Address>(ptr);
+ if (!HAS_SMI_TAG(value)) return false;
+#ifdef V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(SmiValuesAre31Bits());
+ // Sign-extend lower 32-bits in order to form a proper Smi value.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Address lo = static_cast<intptr_t>(static_cast<int32_t>(value));
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo));
+ Address hi = value >> 32;
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi));
+#else
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value));
+#endif
+ return true;
+}
+
+EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
+ const DisallowHeapAllocation& no_gc) const {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ return RawData{
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load()->ptr(),
+#ifdef V8_COMPRESS_POINTERS
+ FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr()
+#endif
+ };
+}
+
+void EmbedderDataSlot::store_raw(const EmbedderDataSlot::RawData& data,
+ const DisallowHeapAllocation& no_gc) {
+ ObjectSlot(address() + kTaggedPayloadOffset)
+ .Relaxed_Store(Object(data.data_[0]));
+#ifdef V8_COMPRESS_POINTERS
+ ObjectSlot(address() + kRawPayloadOffset)
+ .Relaxed_Store(Object(data.data_[1]));
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_EMBEDDER_DATA_SLOT_INL_H_
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
new file mode 100644
index 0000000000..e10c9dc9fb
--- /dev/null
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -0,0 +1,82 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_EMBEDDER_DATA_SLOT_H_
+#define V8_OBJECTS_EMBEDDER_DATA_SLOT_H_
+
+#include <utility>
+
+#include "src/assert-scope.h"
+#include "src/globals.h"
+#include "src/objects/slots.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class EmbedderDataArray;
+class JSObject;
+class Object;
+
+// An EmbedderDataSlot instance describes a kEmbedderDataSlotSize field ("slot")
+// holding an embedder data which may contain raw aligned pointer or a tagged
+// pointer (smi or heap object).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using respective load_XX() and
+// store_XX() methods.
+// Storing heap object through this slot may require triggering write barriers
+// so this operation must be done via static store_tagged() methods.
+class EmbedderDataSlot
+ : public SlotBase<EmbedderDataSlot, Address, kEmbedderDataSlotSize> {
+ public:
+ EmbedderDataSlot() : SlotBase(kNullAddress) {}
+ V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);
+ V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index);
+
+ // TODO(ishell): these offsets are currently little-endian specific.
+#ifdef V8_COMPRESS_POINTERS
+ static constexpr int kRawPayloadOffset = kTaggedSize;
+#endif
+ static constexpr int kTaggedPayloadOffset = 0;
+ static constexpr int kRequiredPtrAlignment = kSmiTagSize;
+
+ // Opaque type used for storing raw embedder data.
+ struct RawData {
+ const Address data_[kEmbedderDataSlotSizeInTaggedSlots];
+ };
+
+ V8_INLINE Object load_tagged() const;
+ V8_INLINE void store_smi(Smi value);
+
+ // Setting an arbitrary tagged value requires triggering a write barrier
+ // which requires separate object and offset values, therefore these static
+ // functions a
+ static V8_INLINE void store_tagged(EmbedderDataArray array, int entry_index,
+ Object value);
+ static V8_INLINE void store_tagged(JSObject object, int embedder_field_index,
+ Object value);
+
+ // Tries reinterpret the value as an aligned pointer and on success sets
+ // *out_result to the pointer-like value and returns true. Note, that some
+ // Smis could still look like an aligned pointers.
+ // Returns false otherwise.
+ V8_INLINE bool ToAlignedPointer(void** out_result) const;
+
+ // Returns true if the pointer was successfully stored or false it the pointer
+ // was improperly aligned.
+ V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr);
+
+ V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const;
+ V8_INLINE void store_raw(const RawData& data,
+ const DisallowHeapAllocation& no_gc);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_EMBEDDER_DATA_SLOT_H_
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
new file mode 100644
index 0000000000..5d8a5a5780
--- /dev/null
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FEEDBACK_CELL_INL_H_
+#define V8_OBJECTS_FEEDBACK_CELL_INL_H_
+
+#include "src/objects/feedback-cell.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(FeedbackCell, Struct)
+
+CAST_ACCESSOR(FeedbackCell)
+
+ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FEEDBACK_CELL_INL_H_
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
new file mode 100644
index 0000000000..b8864ef4a2
--- /dev/null
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -0,0 +1,51 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FEEDBACK_CELL_H_
+#define V8_OBJECTS_FEEDBACK_CELL_H_
+
+#include "src/objects/struct.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// This is a special cell used to maintain both the link between a
+// closure and its feedback vector, as well as a way to count the
+// number of closures created for a certain function per native
+// context. There's at most one FeedbackCell for each function in
+// a native context.
+class FeedbackCell : public Struct {
+ public:
+ // [value]: value of the cell.
+ DECL_ACCESSORS(value, HeapObject)
+
+ DECL_CAST(FeedbackCell)
+
+ // Dispatched behavior.
+ DECL_PRINTER(FeedbackCell)
+ DECL_VERIFIER(FeedbackCell)
+
+// Layout description.
+#define FEEDBACK_CELL_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS)
+#undef FEEDBACK_CELL_FIELDS
+
+ typedef FixedBodyDescriptor<kValueOffset, kSize, kSize> BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FEEDBACK_CELL_H_
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 8c24ee80be..3bd36afd03 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -7,9 +7,15 @@
#include "src/objects/fixed-array.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/conversions.h"
+#include "src/handles-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/map.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/slots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,6 +23,32 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(FixedArray, FixedArrayBase)
+OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray, FixedArrayBase)
+OBJECT_CONSTRUCTORS_IMPL(FixedTypedArrayBase, FixedArrayBase)
+OBJECT_CONSTRUCTORS_IMPL(ArrayList, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(ByteArray, FixedArrayBase)
+OBJECT_CONSTRUCTORS_IMPL(TemplateList, FixedArray)
+OBJECT_CONSTRUCTORS_IMPL(WeakFixedArray, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(WeakArrayList, HeapObject)
+
+FixedArrayBase::FixedArrayBase(Address ptr, AllowInlineSmiStorage allow_smi)
+ : HeapObject(ptr, allow_smi) {
+ SLOW_DCHECK(
+ (allow_smi == AllowInlineSmiStorage::kAllowBeingASmi && IsSmi()) ||
+ IsFixedArrayBase());
+}
+
+ByteArray::ByteArray(Address ptr, AllowInlineSmiStorage allow_smi)
+ : FixedArrayBase(ptr, allow_smi) {
+ SLOW_DCHECK(
+ (allow_smi == AllowInlineSmiStorage::kAllowBeingASmi && IsSmi()) ||
+ IsByteArray());
+}
+
+NEVER_READ_ONLY_SPACE_IMPL(WeakArrayList)
+
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FixedArray)
@@ -36,45 +68,45 @@ SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
SYNCHRONIZED_SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
SMI_ACCESSORS(WeakArrayList, length, kLengthOffset)
-Object* FixedArrayBase::unchecked_synchronized_length() const {
+Object FixedArrayBase::unchecked_synchronized_length() const {
return ACQUIRE_READ_FIELD(this, kLengthOffset);
}
ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-Object** FixedArray::GetFirstElementAddress() {
- return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+ObjectSlot FixedArray::GetFirstElementAddress() {
+ return RawField(OffsetOfElementAt(0));
}
bool FixedArray::ContainsOnlySmisOrHoles() {
- Object* the_hole = GetReadOnlyRoots().the_hole_value();
- Object** current = GetFirstElementAddress();
- for (int i = 0; i < length(); ++i) {
- Object* candidate = *current++;
+ Object the_hole = GetReadOnlyRoots().the_hole_value();
+ ObjectSlot current = GetFirstElementAddress();
+ for (int i = 0; i < length(); ++i, ++current) {
+ Object candidate = *current;
if (!candidate->IsSmi() && candidate != the_hole) return false;
}
return true;
}
-Object* FixedArray::get(int index) const {
+Object FixedArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kTaggedSize);
}
-Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
+Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
return handle(array->get(index), isolate);
}
template <class T>
MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
- Object* obj = get(index);
+ Object obj = get(index);
if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
return Handle<T>(T::cast(obj), isolate);
}
template <class T>
Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
- Object* obj = get(index);
+ Object obj = get(index);
CHECK(!obj->IsUndefined(isolate));
return Handle<T>(T::cast(obj), isolate);
}
@@ -83,40 +115,39 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(index)->IsTheHole(isolate);
}
-void FixedArray::set(int index, Smi* value) {
+void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(index, this->length());
- DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
- int offset = kHeaderSize + index * kPointerSize;
+ DCHECK(Object(value).IsSmi());
+ int offset = kHeaderSize + index * kTaggedSize;
RELAXED_WRITE_FIELD(this, offset, value);
}
-void FixedArray::set(int index, Object* value) {
+void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
+ int offset = kHeaderSize + index * kTaggedSize;
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ WRITE_BARRIER(*this, offset, value);
}
-void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
+void FixedArray::set(int index, Object value, WriteBarrierMode mode) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+ int offset = kHeaderSize + index * kTaggedSize;
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
-void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
- Object* value) {
+void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
DCHECK_LT(index, array->length());
DCHECK(!Heap::InNewSpace(value));
- RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+ RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
}
void FixedArray::set_undefined(int index) {
@@ -128,7 +159,7 @@ void FixedArray::set_undefined(Isolate* isolate, int index) {
}
void FixedArray::set_undefined(ReadOnlyRoots ro_roots, int index) {
- FixedArray::NoWriteBarrierSet(this, index, ro_roots.undefined_value());
+ FixedArray::NoWriteBarrierSet(*this, index, ro_roots.undefined_value());
}
void FixedArray::set_null(int index) { set_null(GetReadOnlyRoots(), index); }
@@ -138,7 +169,7 @@ void FixedArray::set_null(Isolate* isolate, int index) {
}
void FixedArray::set_null(ReadOnlyRoots ro_roots, int index) {
- FixedArray::NoWriteBarrierSet(this, index, ro_roots.null_value());
+ FixedArray::NoWriteBarrierSet(*this, index, ro_roots.null_value());
}
void FixedArray::set_the_hole(int index) {
@@ -150,7 +181,7 @@ void FixedArray::set_the_hole(Isolate* isolate, int index) {
}
void FixedArray::set_the_hole(ReadOnlyRoots ro_roots, int index) {
- FixedArray::NoWriteBarrierSet(this, index, ro_roots.the_hole_value());
+ FixedArray::NoWriteBarrierSet(*this, index, ro_roots.the_hole_value());
}
void FixedArray::FillWithHoles(int from, int to) {
@@ -159,12 +190,119 @@ void FixedArray::FillWithHoles(int from, int to) {
}
}
-Object** FixedArray::data_start() {
- return HeapObject::RawField(this, OffsetOfElementAt(0));
+ObjectSlot FixedArray::data_start() {
+ return RawField(OffsetOfElementAt(0));
+}
+
+ObjectSlot FixedArray::RawFieldOfElementAt(int index) {
+ return RawField(OffsetOfElementAt(index));
+}
+
+void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len,
+ WriteBarrierMode mode) {
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*this, dst_index, src_index, len, mode);
+}
+
+// Perform a binary search in a fixed array.
+template <SearchMode search_mode, typename T>
+int BinarySearch(T* array, Name name, int valid_entries,
+ int* out_insertion_index) {
+ DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
+ int low = 0;
+ int high = array->number_of_entries() - 1;
+ uint32_t hash = name->hash_field();
+ int limit = high;
+
+ DCHECK(low <= high);
+
+ while (low != high) {
+ int mid = low + (high - low) / 2;
+ Name mid_name = array->GetSortedKey(mid);
+ uint32_t mid_hash = mid_name->hash_field();
+
+ if (mid_hash >= hash) {
+ high = mid;
+ } else {
+ low = mid + 1;
+ }
+ }
+
+ for (; low <= limit; ++low) {
+ int sort_index = array->GetSortedKeyIndex(low);
+ Name entry = array->GetKey(sort_index);
+ uint32_t current_hash = entry->hash_field();
+ if (current_hash != hash) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
+ }
+ return T::kNotFound;
+ }
+ if (entry == name) {
+ if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
+ return sort_index;
+ }
+ return T::kNotFound;
+ }
+ }
+
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ *out_insertion_index = limit + 1;
+ }
+ return T::kNotFound;
+}
+
+// Perform a linear search in this fixed array. len is the number of entry
+// indices that are valid.
+template <SearchMode search_mode, typename T>
+int LinearSearch(T* array, Name name, int valid_entries,
+ int* out_insertion_index) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ uint32_t hash = name->hash_field();
+ int len = array->number_of_entries();
+ for (int number = 0; number < len; number++) {
+ int sorted_index = array->GetSortedKeyIndex(number);
+ Name entry = array->GetKey(sorted_index);
+ uint32_t current_hash = entry->hash_field();
+ if (current_hash > hash) {
+ *out_insertion_index = sorted_index;
+ return T::kNotFound;
+ }
+ if (entry == name) return sorted_index;
+ }
+ *out_insertion_index = len;
+ return T::kNotFound;
+ } else {
+ DCHECK_LE(valid_entries, array->number_of_entries());
+ DCHECK_NULL(out_insertion_index); // Not supported here.
+ for (int number = 0; number < valid_entries; number++) {
+ if (array->GetKey(number) == name) return number;
+ }
+ return T::kNotFound;
+ }
}
-Object** FixedArray::RawFieldOfElementAt(int index) {
- return HeapObject::RawField(this, OffsetOfElementAt(index));
+template <SearchMode search_mode, typename T>
+int Search(T* array, Name name, int valid_entries, int* out_insertion_index) {
+ SLOW_DCHECK(array->IsSortedNoDuplicates());
+
+ if (valid_entries == 0) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ *out_insertion_index = 0;
+ }
+ return T::kNotFound;
+ }
+
+ // Fast case: do linear search for small arrays.
+ const int kMaxElementsForLinearSearch = 8;
+ if (valid_entries <= kMaxElementsForLinearSearch) {
+ return LinearSearch<search_mode>(array, name, valid_entries,
+ out_insertion_index);
+ }
+
+ // Slow case: perform binary search.
+ return BinarySearch<search_mode>(array, name, valid_entries,
+ out_insertion_index);
}
double FixedDoubleArray::get_scalar(int index) {
@@ -183,7 +321,7 @@ uint64_t FixedDoubleArray::get_representation(int index) {
return READ_UINT64_FIELD(this, offset);
}
-Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
+Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
Isolate* isolate) {
if (array->is_the_hole(index)) {
return isolate->factory()->the_hole_value();
@@ -223,8 +361,12 @@ bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
}
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index,
+ int len, WriteBarrierMode mode) {
+ DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
+ double* data_start =
+ reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
+ MemMove(data_start + dst_index, data_start + src_index, len * kDoubleSize);
}
void FixedDoubleArray::FillWithHoles(int from, int to) {
@@ -233,97 +375,92 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
}
-MaybeObject* WeakFixedArray::Get(int index) const {
+MaybeObject WeakFixedArray::Get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
+ return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
}
-void WeakFixedArray::Set(int index, MaybeObject* value) {
+void WeakFixedArray::Set(int index, MaybeObject value) {
DCHECK_GE(index, 0);
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(this, offset, value);
- WEAK_WRITE_BARRIER(this, offset, value);
+ RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
+ WEAK_WRITE_BARRIER(*this, offset, value);
}
-void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
+void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
-}
-
-MaybeObject** WeakFixedArray::data_start() {
- return HeapObject::RawMaybeWeakField(this, kHeaderSize);
+ RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
}
-MaybeObject** WeakFixedArray::RawFieldOfElementAt(int index) {
- return HeapObject::RawMaybeWeakField(this, OffsetOfElementAt(index));
+MaybeObjectSlot WeakFixedArray::data_start() {
+ return RawMaybeWeakField(kHeaderSize);
}
-MaybeObject** WeakFixedArray::GetFirstElementAddress() {
- return reinterpret_cast<MaybeObject**>(
- FIELD_ADDR(this, OffsetOfElementAt(0)));
+MaybeObjectSlot WeakFixedArray::RawFieldOfElementAt(int index) {
+ return RawMaybeWeakField(OffsetOfElementAt(index));
}
-MaybeObject* WeakArrayList::Get(int index) const {
+MaybeObject WeakArrayList::Get(int index) const {
DCHECK(index >= 0 && index < this->capacity());
- return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
+ return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
}
-void WeakArrayList::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
+void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->capacity());
int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
+ RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
}
-MaybeObject** WeakArrayList::data_start() {
- return HeapObject::RawMaybeWeakField(this, kHeaderSize);
+MaybeObjectSlot WeakArrayList::data_start() {
+ return RawMaybeWeakField(kHeaderSize);
}
-HeapObject* WeakArrayList::Iterator::Next() {
- if (array_ != nullptr) {
+HeapObject WeakArrayList::Iterator::Next() {
+ if (!array_.is_null()) {
while (index_ < array_->length()) {
- MaybeObject* item = array_->Get(index_++);
+ MaybeObject item = array_->Get(index_++);
DCHECK(item->IsWeakOrCleared());
if (!item->IsCleared()) return item->GetHeapObjectAssumeWeak();
}
- array_ = nullptr;
+ array_ = WeakArrayList();
}
- return nullptr;
+ return HeapObject();
}
int ArrayList::Length() const {
- if (FixedArray::cast(this)->length() == 0) return 0;
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+ if (FixedArray::cast(*this)->length() == 0) return 0;
+ return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
}
void ArrayList::SetLength(int length) {
- return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
+ return FixedArray::cast(*this)->set(kLengthIndex, Smi::FromInt(length));
}
-Object* ArrayList::Get(int index) const {
- return FixedArray::cast(this)->get(kFirstIndex + index);
+Object ArrayList::Get(int index) const {
+ return FixedArray::cast(*this)->get(kFirstIndex + index);
}
-Object** ArrayList::Slot(int index) {
- return data_start() + kFirstIndex + index;
+ObjectSlot ArrayList::Slot(int index) {
+ return RawField(OffsetOfElementAt(kFirstIndex + index));
}
-void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
- FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
+void ArrayList::Set(int index, Object obj, WriteBarrierMode mode) {
+ FixedArray::cast(*this)->set(kFirstIndex + index, obj, mode);
}
-void ArrayList::Clear(int index, Object* undefined) {
+void ArrayList::Clear(int index, Object undefined) {
DCHECK(undefined->IsUndefined());
- FixedArray::cast(this)->set(kFirstIndex + index, undefined,
- SKIP_WRITE_BARRIER);
+ FixedArray::cast(*this)->set(kFirstIndex + index, undefined,
+ SKIP_WRITE_BARRIER);
}
-int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kTaggedSize); }
byte ByteArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
@@ -374,12 +511,12 @@ void ByteArray::clear_padding() {
memset(reinterpret_cast<void*>(address() + data_size), 0, Size() - data_size);
}
-ByteArray* ByteArray::FromDataStartAddress(Address address) {
+ByteArray ByteArray::FromDataStartAddress(Address address) {
DCHECK_TAG_ALIGNED(address);
- return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
+ return ByteArray::cast(Object(address - kHeaderSize + kHeapObjectTag));
}
-int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
+int ByteArray::DataSize() const { return RoundUp(length(), kTaggedSize); }
int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
@@ -387,15 +524,16 @@ byte* ByteArray::GetDataStartAddress() {
return reinterpret_cast<byte*>(address() + kHeaderSize);
}
-template <class T>
-PodArray<T>* PodArray<T>::cast(Object* object) {
- DCHECK(object->IsByteArray());
- return reinterpret_cast<PodArray<T>*>(object);
+byte* ByteArray::GetDataEndAddress() {
+ return GetDataStartAddress() + length();
}
+
+template <class T>
+PodArray<T>::PodArray(Address ptr) : ByteArray(ptr) {}
+
template <class T>
-const PodArray<T>* PodArray<T>::cast(const Object* object) {
- DCHECK(object->IsByteArray());
- return reinterpret_cast<const PodArray<T>*>(object);
+PodArray<T> PodArray<T>::cast(Object object) {
+ return PodArray<T>(object.ptr());
}
// static
@@ -407,7 +545,7 @@ Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
}
template <class T>
-int PodArray<T>::length() {
+int PodArray<T>::length() const {
return ByteArray::length() / sizeof(T);
}
@@ -424,8 +562,7 @@ void FixedTypedArrayBase::set_external_pointer(void* value,
void* FixedTypedArrayBase::DataPtr() {
return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(base_pointer()) +
- reinterpret_cast<intptr_t>(external_pointer()));
+ base_pointer()->ptr() + reinterpret_cast<intptr_t>(external_pointer()));
}
int FixedTypedArrayBase::ElementSize(InstanceType type) {
@@ -662,13 +799,13 @@ inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
template <class Traits>
Handle<Object> FixedTypedArray<Traits>::get(Isolate* isolate,
- FixedTypedArray<Traits>* array,
+ FixedTypedArray<Traits> array,
int index) {
return Traits::ToHandle(isolate, array->get_scalar(index));
}
template <class Traits>
-void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
+void FixedTypedArray<Traits>::SetValue(uint32_t index, Object value) {
ElementType cast_value = Traits::defaultValue();
if (value->IsSmi()) {
int int_value = Smi::ToInt(value);
@@ -686,14 +823,14 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
template <>
inline void FixedTypedArray<BigInt64ArrayTraits>::SetValue(uint32_t index,
- Object* value) {
+ Object value) {
DCHECK(value->IsBigInt());
set(index, BigInt::cast(value)->AsInt64());
}
template <>
inline void FixedTypedArray<BigUint64ArrayTraits>::SetValue(uint32_t index,
- Object* value) {
+ Object value) {
DCHECK(value->IsBigInt());
set(index, BigInt::cast(value)->AsUint64());
}
@@ -750,32 +887,26 @@ STATIC_CONST_MEMBER_DEFINITION const InstanceType
FixedTypedArray<Traits>::kInstanceType;
template <class Traits>
-FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
- DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+FixedTypedArray<Traits>::FixedTypedArray(Address ptr)
+ : FixedTypedArrayBase(ptr) {
+ DCHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
}
template <class Traits>
-const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(
- const Object* object) {
- DCHECK(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
- return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+FixedTypedArray<Traits> FixedTypedArray<Traits>::cast(Object object) {
+ return FixedTypedArray<Traits>(object.ptr());
}
int TemplateList::length() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
+ return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
}
-Object* TemplateList::get(int index) const {
- return FixedArray::cast(this)->get(kFirstElementIndex + index);
+Object TemplateList::get(int index) const {
+ return FixedArray::cast(*this)->get(kFirstElementIndex + index);
}
-void TemplateList::set(int index, Object* value) {
- FixedArray::cast(this)->set(kFirstElementIndex + index, value);
+void TemplateList::set(int index, Object value) {
+ FixedArray::cast(*this)->set(kFirstElementIndex + index, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 867d04e638..efb80a8ce4 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -6,15 +6,17 @@
#define V8_OBJECTS_FIXED_ARRAY_H_
#include "src/maybe-handles.h"
-#include "src/objects.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-
-class WeakArrayBodyDescriptor;
+typedef FlexibleWeakBodyDescriptor<HeapObject::kHeaderSize>
+ WeakArrayBodyDescriptor;
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
@@ -77,7 +79,7 @@ class FixedArrayBase : public HeapObject {
inline int synchronized_length() const;
inline void synchronized_set_length(int value);
- inline Object* unchecked_synchronized_length() const;
+ inline Object unchecked_synchronized_length() const;
DECL_CAST(FixedArrayBase)
@@ -95,17 +97,29 @@ class FixedArrayBase : public HeapObject {
#endif // V8_HOST_ARCH_32_BIT
// Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
+#define FIXED_ARRAY_BASE_FIELDS(V) \
+ V(kLengthOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ FIXED_ARRAY_BASE_FIELDS)
+#undef FIXED_ARRAY_BASE_FIELDS
+
+ protected:
+ // Special-purpose constructor for subclasses that have fast paths where
+ // their ptr() is a Smi.
+ inline FixedArrayBase(Address ptr, AllowInlineSmiStorage allow_smi);
+
+ OBJECT_CONSTRUCTORS(FixedArrayBase, HeapObject)
};
-// FixedArray describes fixed-sized arrays with element type Object*.
+// FixedArray describes fixed-sized arrays with element type Object.
class FixedArray : public FixedArrayBase {
public:
// Setter and getter for elements.
- inline Object* get(int index) const;
- static inline Handle<Object> get(FixedArray* array, int index,
+ inline Object get(int index) const;
+ static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
template <class T>
MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
@@ -120,13 +134,13 @@ class FixedArray : public FixedArrayBase {
PretenureFlag pretenure = NOT_TENURED);
// Setter that uses write barrier.
- inline void set(int index, Object* value);
+ inline void set(int index, Object value);
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
- inline void set(int index, Smi* value);
+ inline void set(int index, Smi value);
// Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
+ inline void set(int index, Object value, WriteBarrierMode mode);
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
@@ -136,13 +150,16 @@ class FixedArray : public FixedArrayBase {
inline void set_the_hole(int index);
inline void set_the_hole(Isolate* isolate, int index);
- inline Object** GetFirstElementAddress();
+ inline ObjectSlot GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
// Returns true iff the elements are Numbers and sorted ascending.
bool ContainsSortedNumbers();
// Gives access to raw memory which stores the array's data.
- inline Object** data_start();
+ inline ObjectSlot data_start();
+
+ inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
+ WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -155,45 +172,41 @@ class FixedArray : public FixedArrayBase {
int new_length);
// Copy a sub array from the receiver to dest.
- void CopyTo(int pos, FixedArray* dest, int dest_pos, int len) const;
+ void CopyTo(int pos, FixedArray dest, int dest_pos, int len) const;
// Garbage collection support.
static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
+ return kHeaderSize + length * kTaggedSize;
}
// Code Generation support.
static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
// Garbage collection support.
- inline Object** RawFieldOfElementAt(int index);
+ inline ObjectSlot RawFieldOfElementAt(int index);
DECL_CAST(FixedArray)
// Maximally allowed length of a FixedArray.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kTaggedSize;
static_assert(Internals::IsValidSmi(kMaxLength),
"FixedArray maxLength not a Smi");
// Maximally allowed length for regular (non large object space) object.
STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
static const int kMaxRegularLength =
- (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
+ (kMaxRegularHeapObjectSize - kHeaderSize) / kTaggedSize;
// Dispatched behavior.
DECL_PRINTER(FixedArray)
DECL_VERIFIER(FixedArray)
-#ifdef DEBUG
- // Checks if two FixedArrays have identical contents.
- bool IsEqualTo(FixedArray* other);
-#endif
typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
- static inline void NoWriteBarrierSet(FixedArray* array, int index,
- Object* value);
+ static inline void NoWriteBarrierSet(FixedArray array, int index,
+ Object value);
private:
STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
@@ -202,16 +215,13 @@ class FixedArray : public FixedArrayBase {
inline void set_null(ReadOnlyRoots ro_roots, int index);
inline void set_the_hole(ReadOnlyRoots ro_roots, int index);
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
+ OBJECT_CONSTRUCTORS(FixedArray, FixedArrayBase);
};
// FixedArray alias added only because of IsFixedArrayExact() predicate, which
// checks for the exact instance type FIXED_ARRAY_TYPE instead of a range
// check: [FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE].
-class FixedArrayExact final : public FixedArray {
- public:
- DECL_CAST(FixedArrayExact)
-};
+class FixedArrayExact final : public FixedArray {};
// FixedDoubleArray describes fixed-sized arrays with element type double.
class FixedDoubleArray : public FixedArrayBase {
@@ -219,7 +229,7 @@ class FixedDoubleArray : public FixedArrayBase {
// Setter and getter for elements.
inline double get_scalar(int index);
inline uint64_t get_representation(int index);
- static inline Handle<Object> get(FixedDoubleArray* array, int index,
+ static inline Handle<Object> get(FixedDoubleArray array, int index,
Isolate* isolate);
inline void set(int index, double value);
inline void set_the_hole(Isolate* isolate, int index);
@@ -234,8 +244,8 @@ class FixedDoubleArray : public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
- // Gives access to raw memory which stores the array's data.
- inline double* data_start();
+ inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
+ WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -255,26 +265,25 @@ class FixedDoubleArray : public FixedArrayBase {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+ OBJECT_CONSTRUCTORS(FixedDoubleArray, FixedArrayBase);
};
// WeakFixedArray describes fixed-sized arrays with element type
-// MaybeObject*.
+// MaybeObject.
class WeakFixedArray : public HeapObject {
public:
DECL_CAST(WeakFixedArray)
- inline MaybeObject* Get(int index) const;
+ inline MaybeObject Get(int index) const;
// Setter that uses write barrier.
- inline void Set(int index, MaybeObject* value);
+ inline void Set(int index, MaybeObject value);
// Setter with explicit barrier mode.
- inline void Set(int index, MaybeObject* value, WriteBarrierMode mode);
+ inline void Set(int index, MaybeObject value, WriteBarrierMode mode);
static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
+ return kHeaderSize + length * kTaggedSize;
}
DECL_INT_ACCESSORS(length)
@@ -284,28 +293,33 @@ class WeakFixedArray : public HeapObject {
inline void synchronized_set_length(int value);
// Gives access to raw memory which stores the array's data.
- inline MaybeObject** data_start();
-
- inline MaybeObject** RawFieldOfElementAt(int index);
+ inline MaybeObjectSlot data_start();
- inline MaybeObject** GetFirstElementAddress();
+ inline MaybeObjectSlot RawFieldOfElementAt(int index);
DECL_PRINTER(WeakFixedArray)
DECL_VERIFIER(WeakFixedArray)
typedef WeakArrayBodyDescriptor BodyDescriptor;
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
+ // Layout description.
+#define WEAK_FIXED_ARRAY_FIELDS(V) \
+ V(kLengthOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ WEAK_FIXED_ARRAY_FIELDS)
+#undef WEAK_FIXED_ARRAY_FIELDS
static const int kMaxLength =
- (FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
+ (FixedArray::kMaxSize - kHeaderSize) / kTaggedSize;
static_assert(Internals::IsValidSmi(kMaxLength),
"WeakFixedArray maxLength not a Smi");
protected:
static int OffsetOfElementAt(int index) {
- return kHeaderSize + index * kPointerSize;
+ return kHeaderSize + index * kTaggedSize;
}
private:
@@ -313,7 +327,7 @@ class WeakFixedArray : public HeapObject {
static const int kFirstIndex = 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+ OBJECT_CONSTRUCTORS(WeakFixedArray, HeapObject);
};
// WeakArrayList is like a WeakFixedArray with static convenience methods for
@@ -323,6 +337,7 @@ class WeakFixedArray : public HeapObject {
// dynamically with O(1) amortized insertion.
class WeakArrayList : public HeapObject {
public:
+ NEVER_READ_ONLY_SPACE
DECL_CAST(WeakArrayList)
DECL_VERIFIER(WeakArrayList)
DECL_PRINTER(WeakArrayList)
@@ -331,20 +346,20 @@ class WeakArrayList : public HeapObject {
Handle<WeakArrayList> array,
const MaybeObjectHandle& value);
- inline MaybeObject* Get(int index) const;
+ inline MaybeObject Get(int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
// instead.
- inline void Set(int index, MaybeObject* value,
+ inline void Set(int index, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static constexpr int SizeForCapacity(int capacity) {
- return kHeaderSize + capacity * kPointerSize;
+ return kHeaderSize + capacity * kTaggedSize;
}
// Gives access to raw memory which stores the array's data.
- inline MaybeObject** data_start();
+ inline MaybeObjectSlot data_start();
bool IsFull();
@@ -355,14 +370,21 @@ class WeakArrayList : public HeapObject {
inline int synchronized_capacity() const;
inline void synchronized_set_capacity(int value);
- typedef WeakArrayBodyDescriptor BodyDescriptor;
- static const int kCapacityOffset = HeapObject::kHeaderSize;
- static const int kLengthOffset = kCapacityOffset + kPointerSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
+ // Layout description.
+#define WEAK_ARRAY_LIST_FIELDS(V) \
+ V(kCapacityOffset, kTaggedSize) \
+ V(kLengthOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_ARRAY_LIST_FIELDS)
+#undef WEAK_ARRAY_LIST_FIELDS
+
+ typedef WeakArrayBodyDescriptor BodyDescriptor;
static const int kMaxCapacity =
- (FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
+ (FixedArray::kMaxSize - kHeaderSize) / kTaggedSize;
static Handle<WeakArrayList> EnsureSpace(
Isolate* isolate, Handle<WeakArrayList> array, int length,
@@ -377,27 +399,29 @@ class WeakArrayList : public HeapObject {
// duplicates.
bool RemoveOne(const MaybeObjectHandle& value);
- class Iterator {
- public:
- explicit Iterator(WeakArrayList* array) : index_(0), array_(array) {}
-
- inline HeapObject* Next();
-
- private:
- int index_;
- WeakArrayList* array_;
-#ifdef DEBUG
- DisallowHeapAllocation no_gc_;
-#endif // DEBUG
- DISALLOW_COPY_AND_ASSIGN(Iterator);
- };
+ class Iterator;
private:
static int OffsetOfElementAt(int index) {
- return kHeaderSize + index * kPointerSize;
+ return kHeaderSize + index * kTaggedSize;
}
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakArrayList);
+ OBJECT_CONSTRUCTORS(WeakArrayList, HeapObject);
+};
+
+class WeakArrayList::Iterator {
+ public:
+ explicit Iterator(WeakArrayList array) : index_(0), array_(array) {}
+
+ inline HeapObject Next();
+
+ private:
+ int index_;
+ WeakArrayList array_;
+#ifdef DEBUG
+ DisallowHeapAllocation no_gc_;
+#endif // DEBUG
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Generic array grows dynamically with O(1) amortized insertion.
@@ -422,16 +446,16 @@ class ArrayList : public FixedArray {
// Sets the Length() as used by Elements(). Does not change the underlying
// storage capacity, i.e., length().
inline void SetLength(int length);
- inline Object* Get(int index) const;
- inline Object** Slot(int index);
+ inline Object Get(int index) const;
+ inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the ArrayList, use the static Add() methods instead.
- inline void Set(int index, Object* obj,
+ inline void Set(int index, Object obj,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Set the element at index to undefined. This does not change the Length().
- inline void Clear(int index, Object* undefined);
+ inline void Clear(int index, Object undefined);
// Return a copy of the list of size Length() without the first entry. The
// number returned by Length() is stored in the first entry.
@@ -443,13 +467,13 @@ class ArrayList : public FixedArray {
Handle<ArrayList> array, int length);
static const int kLengthIndex = 0;
static const int kFirstIndex = 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
+ OBJECT_CONSTRUCTORS(ArrayList, FixedArray);
};
enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
template <SearchMode search_mode, typename T>
-inline int Search(T* array, Name* name, int valid_entries = 0,
+inline int Search(T* array, Name name, int valid_entries = 0,
int* out_insertion_index = nullptr);
// ByteArray represents fixed sized byte arrays. Used for the relocation info
@@ -485,18 +509,20 @@ class ByteArray : public FixedArrayBase {
// array, this function returns the number of elements a byte array should
// have.
static int LengthFor(int size_in_bytes) {
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_GE(size_in_bytes, kHeaderSize);
return size_in_bytes - kHeaderSize;
}
// Returns data start address.
inline byte* GetDataStartAddress();
+ // Returns address of the past-the-end element.
+ inline byte* GetDataEndAddress();
inline int DataSize() const;
// Returns a pointer to the ByteArray object for a given data start address.
- static inline ByteArray* FromDataStartAddress(Address address);
+ static inline ByteArray FromDataStartAddress(Address address);
DECL_CAST(ByteArray)
@@ -515,8 +541,12 @@ class ByteArray : public FixedArrayBase {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
+ protected:
+ // Special-purpose constructor for subclasses that have fast paths where
+ // their ptr() is a Smi.
+ inline ByteArray(Address ptr, AllowInlineSmiStorage allow_smi);
+
+ OBJECT_CONSTRUCTORS(ByteArray, FixedArrayBase);
};
// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
@@ -530,39 +560,50 @@ class PodArray : public ByteArray {
ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
sizeof(T));
}
+
+ void copy_in(int index, const T* buffer, int length) {
+ ByteArray::copy_in(index * sizeof(T), reinterpret_cast<const byte*>(buffer),
+ length * sizeof(T));
+ }
+
T get(int index) {
T result;
copy_out(index, &result);
return result;
}
- void set(int index, const T& value) {
- copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
- sizeof(T));
- }
- inline int length();
+
+ void set(int index, const T& value) { copy_in(index, &value, 1); }
+
+ inline int length() const;
DECL_CAST(PodArray<T>)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
+ OBJECT_CONSTRUCTORS(PodArray<T>, ByteArray);
};
class FixedTypedArrayBase : public FixedArrayBase {
public:
// [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object)
+ DECL_ACCESSORS(base_pointer, Object);
// [external_pointer]: Contains the offset between base_pointer and the start
// of the data. If the base_pointer is a nullptr, the external_pointer
// therefore points to the actual backing store.
- DECL_ACCESSORS(external_pointer, void)
+ DECL_ACCESSORS(external_pointer, void*)
// Dispatched behavior.
DECL_CAST(FixedTypedArrayBase)
- static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
- static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
- static const int kHeaderSize =
- DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
+#define FIXED_TYPED_ARRAY_BASE_FIELDS(V) \
+ V(kBasePointerOffset, kTaggedSize) \
+ V(kExternalPointerOffset, kSystemPointerSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
+ FIXED_TYPED_ARRAY_BASE_FIELDS)
+#undef FIXED_TYPED_ARRAY_BASE_FIELDS
+
+ STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment));
static const int kDataOffset = kHeaderSize;
@@ -596,7 +637,7 @@ class FixedTypedArrayBase : public FixedArrayBase {
inline int DataSize(InstanceType type) const;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
+ OBJECT_CONSTRUCTORS(FixedTypedArrayBase, FixedArrayBase);
};
template <class Traits>
@@ -609,7 +650,7 @@ class FixedTypedArray : public FixedTypedArrayBase {
static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
inline ElementType get_scalar(int index);
- static inline Handle<Object> get(Isolate* isolate, FixedTypedArray* array,
+ static inline Handle<Object> get(Isolate* isolate, FixedTypedArray array,
int index);
inline void set(int index, ElementType value);
@@ -624,13 +665,13 @@ class FixedTypedArray : public FixedTypedArrayBase {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- inline void SetValue(uint32_t index, Object* value);
+ inline void SetValue(uint32_t index, Object value);
DECL_PRINTER(FixedTypedArray)
DECL_VERIFIER(FixedTypedArray)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
+ OBJECT_CONSTRUCTORS(FixedTypedArray, FixedTypedArrayBase);
};
#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType) \
@@ -655,15 +696,16 @@ class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
- inline Object* get(int index) const;
- inline void set(int index, Object* value);
+ inline Object get(int index) const;
+ inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
DECL_CAST(TemplateList)
private:
static const int kLengthIndex = 0;
static const int kFirstElementIndex = kLengthIndex + 1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
+
+ OBJECT_CONSTRUCTORS(TemplateList, FixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
new file mode 100644
index 0000000000..f8a0488988
--- /dev/null
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FOREIGN_INL_H_
+#define V8_OBJECTS_FOREIGN_INL_H_
+
+#include "src/objects/foreign.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(Foreign, HeapObject)
+
+CAST_ACCESSOR(Foreign)
+
+// static
+bool Foreign::IsNormalized(Object value) {
+ if (value == Smi::kZero) return true;
+ return Foreign::cast(value)->foreign_address() != kNullAddress;
+}
+
+Address Foreign::foreign_address() {
+ return READ_UINTPTR_FIELD(this, kForeignAddressOffset);
+}
+
+void Foreign::set_foreign_address(Address value) {
+ WRITE_UINTPTR_FIELD(this, kForeignAddressOffset, value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FOREIGN_INL_H_
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
new file mode 100644
index 0000000000..c2b96c41ca
--- /dev/null
+++ b/deps/v8/src/objects/foreign.h
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FOREIGN_H_
+#define V8_OBJECTS_FOREIGN_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Foreign describes objects pointing from JavaScript to C structures.
+class Foreign : public HeapObject {
+ public:
+ // [address]: field containing the address.
+ inline Address foreign_address();
+
+ static inline bool IsNormalized(Object object);
+
+ DECL_CAST(Foreign)
+
+ // Dispatched behavior.
+ DECL_PRINTER(Foreign)
+ DECL_VERIFIER(Foreign)
+
+ // Layout description.
+#define FOREIGN_FIELDS(V) \
+ V(kForeignAddressOffset, kSystemPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FOREIGN_FIELDS)
+#undef FOREIGN_FIELDS
+
+ STATIC_ASSERT(IsAligned(kForeignAddressOffset, kSystemPointerSize));
+ STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
+
+ class BodyDescriptor;
+
+ private:
+ friend class Factory;
+ friend class SerializerDeserializer;
+ friend class StartupSerializer;
+
+ inline void set_foreign_address(Address value);
+
+ OBJECT_CONSTRUCTORS(Foreign, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FOREIGN_H_
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index 1e9ac1002e..bd76214464 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -6,6 +6,8 @@
#define V8_OBJECTS_FRAME_ARRAY_INL_H_
#include "src/objects/frame-array.h"
+
+#include "src/objects/foreign-inl.h"
#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -14,16 +16,17 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(FrameArray, FixedArray)
CAST_ACCESSOR(FrameArray)
#define DEFINE_FRAME_ARRAY_ACCESSORS(name, type) \
- type* FrameArray::name(int frame_ix) const { \
- Object* obj = \
+ type FrameArray::name(int frame_ix) const { \
+ Object obj = \
get(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset); \
return type::cast(obj); \
} \
\
- void FrameArray::Set##name(int frame_ix, type* value) { \
+ void FrameArray::Set##name(int frame_ix, type value) { \
set(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset, value); \
}
FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 5bccfb5807..e25ccd6542 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -31,8 +31,8 @@ class Handle;
class FrameArray : public FixedArray {
public:
#define DECL_FRAME_ARRAY_ACCESSORS(name, type) \
- inline type* name(int frame_ix) const; \
- inline void Set##name(int frame_ix, type* value);
+ inline type name(int frame_ix) const; \
+ inline void Set##name(int frame_ix, type value);
FRAME_ARRAY_FIELD_LIST(DECL_FRAME_ARRAY_ACCESSORS)
#undef DECL_FRAME_ARRAY_ACCESSORS
@@ -51,7 +51,8 @@ class FrameArray : public FixedArray {
kIsStrict = 1 << 3,
kIsConstructor = 1 << 4,
kAsmJsAtNumberConversion = 1 << 5,
- kIsAsync = 1 << 6
+ kIsAsync = 1 << 6,
+ kIsPromiseAll = 1 << 7
};
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
@@ -100,7 +101,7 @@ class FrameArray : public FixedArray {
Handle<FrameArray> array, int length);
friend class Factory;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FrameArray);
+ OBJECT_CONSTRUCTORS(FrameArray, FixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
new file mode 100644
index 0000000000..b71a469505
--- /dev/null
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FREE_SPACE_INL_H_
+#define V8_OBJECTS_FREE_SPACE_INL_H_
+
+#include "src/objects/free-space.h"
+
+#include "src/heap/heap-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(FreeSpace, HeapObject)
+
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
+RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
+
+int FreeSpace::Size() { return size(); }
+
+FreeSpace FreeSpace::next() {
+#ifdef DEBUG
+ Heap* heap = Heap::FromWritableHeapObject(*this);
+ Object free_space_map = heap->isolate()->root(RootIndex::kFreeSpaceMap);
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ !heap->deserialization_complete() &&
+ map_slot().contains_value(kNullAddress));
+#endif
+ DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
+ return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
+}
+
+void FreeSpace::set_next(FreeSpace next) {
+#ifdef DEBUG
+ Heap* heap = Heap::FromWritableHeapObject(*this);
+ Object free_space_map = heap->isolate()->root(RootIndex::kFreeSpaceMap);
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ !heap->deserialization_complete() &&
+ map_slot().contains_value(kNullAddress));
+#endif
+ DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
+ ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
+}
+
+FreeSpace FreeSpace::cast(HeapObject o) {
+ SLOW_DCHECK(!Heap::FromWritableHeapObject(o)->deserialization_complete() ||
+ o->IsFreeSpace());
+ return bit_cast<FreeSpace>(o);
+}
+
+FreeSpace FreeSpace::unchecked_cast(const Object o) {
+ return bit_cast<FreeSpace>(o);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FREE_SPACE_INL_H_
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
new file mode 100644
index 0000000000..bb69ba389e
--- /dev/null
+++ b/deps/v8/src/objects/free-space.h
@@ -0,0 +1,61 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_FREE_SPACE_H_
+#define V8_OBJECTS_FREE_SPACE_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// FreeSpace are fixed-size free memory blocks used by the heap and GC.
+// They look like heap objects (are heap object tagged and have a map) so that
+// the heap remains iterable. They have a size and a next pointer.
+// The next pointer is the raw address of the next FreeSpace object (or NULL)
+// in the free list.
+class FreeSpace : public HeapObject {
+ public:
+ // [size]: size of the free space including the header.
+ inline int size() const;
+ inline void set_size(int value);
+
+ inline int relaxed_read_size() const;
+ inline void relaxed_write_size(int value);
+
+ inline int Size();
+
+ // Accessors for the next field.
+ inline FreeSpace next();
+ inline void set_next(FreeSpace next);
+
+ inline static FreeSpace cast(HeapObject obj);
+ inline static FreeSpace unchecked_cast(const Object obj);
+
+ // Dispatched behavior.
+ DECL_PRINTER(FreeSpace)
+ DECL_VERIFIER(FreeSpace)
+
+ // Layout description.
+#define FREE_SPACE_FIELDS(V) \
+ V(kSizeOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FREE_SPACE_FIELDS)
+#undef FREE_SPACE_FIELDS
+
+ OBJECT_CONSTRUCTORS(FreeSpace, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_FREE_SPACE_H_
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index bc391fede6..24eb22e018 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -8,10 +8,12 @@
#include "src/objects/hash-table.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/roots-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
@@ -79,11 +81,11 @@ int HashTable<Derived, Shape>::FindEntry(ReadOnlyRoots roots, Key key,
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
- Object* undefined = roots.undefined_value();
- Object* the_hole = roots.the_hole_value();
+ Object undefined = roots.undefined_value();
+ Object the_hole = roots.the_hole_value();
USE(the_hole);
while (true) {
- Object* element = KeyAt(entry);
+ Object element = KeyAt(entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
if (element == undefined) break;
@@ -96,53 +98,54 @@ int HashTable<Derived, Shape>::FindEntry(ReadOnlyRoots roots, Key key,
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::IsKey(ReadOnlyRoots roots, Object* k) {
+bool HashTable<Derived, Shape>::IsKey(ReadOnlyRoots roots, Object k) {
return Shape::IsKey(roots, k);
}
template <typename Derived, typename Shape>
bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, int entry,
- Object** out_k) {
- Object* k = KeyAt(entry);
+ Object* out_k) {
+ Object k = KeyAt(entry);
if (!IsKey(roots, k)) return false;
*out_k = Shape::Unwrap(k);
return true;
}
template <typename KeyT>
-bool BaseShape<KeyT>::IsKey(ReadOnlyRoots roots, Object* key) {
+bool BaseShape<KeyT>::IsKey(ReadOnlyRoots roots, Object key) {
return IsLive(roots, key);
}
template <typename KeyT>
-bool BaseShape<KeyT>::IsLive(ReadOnlyRoots roots, Object* k) {
+bool BaseShape<KeyT>::IsLive(ReadOnlyRoots roots, Object k) {
return k != roots.the_hole_value() && k != roots.undefined_value();
}
-template <typename Derived, typename Shape>
-HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<HashTable*>(obj);
-}
-
-template <typename Derived, typename Shape>
-const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
- const Object* obj) {
- SLOW_DCHECK(obj->IsHashTable());
- return reinterpret_cast<const HashTable*>(obj);
-}
-
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
return FindEntry(ReadOnlyRoots(isolate), key, hash) != kNotFound;
}
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
- Object* hash = key->GetHash();
+ Object hash = key->GetHash();
if (!hash->IsSmi()) return false;
return FindEntry(ReadOnlyRoots(isolate), key, Smi::ToInt(hash)) != kNotFound;
}
+bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object other) {
+ return key->SameValue(other);
+}
+
+uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
+ return Smi::ToInt(key->GetHash());
+}
+
+uint32_t ObjectHashTableShape::HashForObject(Isolate* isolate, Object other) {
+ return Smi::ToInt(other->GetHash());
+}
+
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_OBJECTS_HASH_TABLE_INL_H_
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 66d3f6dfb2..5bb529a121 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -8,6 +8,8 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/smi.h"
+#include "src/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -31,11 +33,11 @@ namespace internal {
// class ExampleShape {
// public:
// // Tells whether key matches other.
-// static bool IsMatch(Key key, Object* other);
+// static bool IsMatch(Key key, Object other);
// // Returns the hash value for key.
// static uint32_t Hash(Isolate* isolate, Key key);
// // Returns the hash value for object.
-// static uint32_t HashForObject(Isolate* isolate, Object* object);
+// static uint32_t HashForObject(Isolate* isolate, Object object);
// // Convert key to an object.
// static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
// // The prefix size indicates number of elements in the beginning
@@ -57,9 +59,9 @@ class BaseShape {
typedef KeyT Key;
static inline RootIndex GetMapRootIndex();
static const bool kNeedsHoleCheck = true;
- static Object* Unwrap(Object* key) { return key; }
- static inline bool IsKey(ReadOnlyRoots roots, Object* key);
- static inline bool IsLive(ReadOnlyRoots roots, Object* key);
+ static Object Unwrap(Object key) { return key; }
+ static inline bool IsKey(ReadOnlyRoots roots, Object key);
+ static inline bool IsLive(ReadOnlyRoots roots, Object key);
};
class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
@@ -123,6 +125,8 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
uint32_t size) {
return (last + number) & (size - 1);
}
+
+ OBJECT_CONSTRUCTORS(HashTableBase, FixedArray)
};
template <typename Derived, typename Shape>
@@ -137,8 +141,6 @@ class HashTable : public HashTableBase {
PretenureFlag pretenure = NOT_TENURED,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
- DECL_CAST(HashTable)
-
// Garbage collection support.
void IteratePrefix(ObjectVisitor* visitor);
void IterateElements(ObjectVisitor* visitor);
@@ -152,19 +154,19 @@ class HashTable : public HashTableBase {
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
- static bool IsKey(ReadOnlyRoots roots, Object* k);
+ static bool IsKey(ReadOnlyRoots roots, Object k);
- inline bool ToKey(ReadOnlyRoots roots, int entry, Object** out_k);
+ inline bool ToKey(ReadOnlyRoots roots, int entry, Object* out_k);
// Returns the key at entry.
- Object* KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
+ Object KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
STATIC_ASSERT(kEntrySize > 0);
static const int kEntryKeyIndex = 0;
static const int kElementsStartOffset =
- kHeaderSize + kElementsStartIndex * kPointerSize;
+ kHeaderSize + kElementsStartIndex * kTaggedSize;
// Maximal capacity of HashTable. Based on maximal length of underlying
// FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
// cannot overflow.
@@ -175,7 +177,12 @@ class HashTable : public HashTableBase {
static const int kMinShrinkCapacity = 16;
// Maximum length to create a regular HashTable (aka. non large object).
+#if V8_HOST_ARCH_PPC
+ // Reduced kMaxRegularCapacity due to reduced kMaxRegularHeapObjectSize
+ static const int kMaxRegularCapacity = 16384 / 2;
+#else
static const int kMaxRegularCapacity = 16384;
+#endif
// Returns the index for an entry (of the key)
static constexpr inline int EntryToIndex(int entry) {
@@ -226,13 +233,15 @@ class HashTable : public HashTableBase {
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
- uint32_t EntryForProbe(Isolate* isolate, Object* k, int probe,
+ uint32_t EntryForProbe(Isolate* isolate, Object k, int probe,
uint32_t expected);
void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
// Rehashes this hash-table into the new table.
- void Rehash(Isolate* isolate, Derived* new_table);
+ void Rehash(Isolate* isolate, Derived new_table);
+
+ OBJECT_CONSTRUCTORS(HashTable, HashTableBase)
};
// HashTableKey is an abstract superclass for virtual key behavior.
@@ -241,7 +250,7 @@ class HashTableKey {
explicit HashTableKey(uint32_t hash) : hash_(hash) {}
// Returns whether the other object matches this key.
- virtual bool IsMatch(Object* other) = 0;
+ virtual bool IsMatch(Object other) = 0;
// Returns the hash value for this key.
// Required.
virtual ~HashTableKey() = default;
@@ -260,9 +269,9 @@ class HashTableKey {
class ObjectHashTableShape : public BaseShape<Handle<Object>> {
public:
- static inline bool IsMatch(Handle<Object> key, Object* other);
+ static inline bool IsMatch(Handle<Object> key, Object other);
static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static inline Handle<Object> AsHandle(Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntryValueIndex = 1;
@@ -275,12 +284,12 @@ class ObjectHashTableBase : public HashTable<Derived, Shape> {
public:
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
- Object* Lookup(Handle<Object> key);
- Object* Lookup(Handle<Object> key, int32_t hash);
- Object* Lookup(ReadOnlyRoots roots, Handle<Object> key, int32_t hash);
+ Object Lookup(Handle<Object> key);
+ Object Lookup(Handle<Object> key, int32_t hash);
+ Object Lookup(ReadOnlyRoots roots, Handle<Object> key, int32_t hash);
// Returns the value at entry.
- Object* ValueAt(int entry);
+ Object ValueAt(int entry);
// Overwrite all keys and values with the hole value.
static void FillEntriesWithHoles(Handle<Derived>);
@@ -306,8 +315,10 @@ class ObjectHashTableBase : public HashTable<Derived, Shape> {
}
protected:
- void AddEntry(int entry, Object* key, Object* value);
+ void AddEntry(int entry, Object key, Object value);
void RemoveEntry(int entry);
+
+ OBJECT_CONSTRUCTORS(ObjectHashTableBase, HashTable<Derived, Shape>)
};
// ObjectHashTable maps keys that are arbitrary objects to object values by
@@ -317,6 +328,10 @@ class ObjectHashTable
public:
DECL_CAST(ObjectHashTable)
DECL_PRINTER(ObjectHashTable)
+
+ OBJECT_CONSTRUCTORS(
+ ObjectHashTable,
+ ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>)
};
class EphemeronHashTableShape : public ObjectHashTableShape {
@@ -336,6 +351,10 @@ class EphemeronHashTable
protected:
friend class MarkCompactCollector;
+
+ OBJECT_CONSTRUCTORS(
+ EphemeronHashTable,
+ ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>)
};
class ObjectHashSetShape : public ObjectHashTableShape {
@@ -353,6 +372,9 @@ class ObjectHashSet : public HashTable<ObjectHashSet, ObjectHashSetShape> {
inline bool Has(Isolate* isolate, Handle<Object> key);
DECL_CAST(ObjectHashSet)
+
+ OBJECT_CONSTRUCTORS(ObjectHashSet,
+ HashTable<ObjectHashSet, ObjectHashSetShape>)
};
} // namespace internal
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
new file mode 100644
index 0000000000..80a49d0e1d
--- /dev/null
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_HEAP_NUMBER_INL_H_
+#define V8_OBJECTS_HEAP_NUMBER_INL_H_
+
+#include "src/objects/heap-number.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(HeapNumberBase, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(HeapNumber, HeapNumberBase)
+OBJECT_CONSTRUCTORS_IMPL(MutableHeapNumber, HeapNumberBase)
+
+CAST_ACCESSOR(HeapNumber)
+CAST_ACCESSOR(MutableHeapNumber)
+
+double HeapNumberBase::value() const {
+ return READ_DOUBLE_FIELD(this, kValueOffset);
+}
+
+void HeapNumberBase::set_value(double value) {
+ WRITE_DOUBLE_FIELD(this, kValueOffset, value);
+}
+
+uint64_t HeapNumberBase::value_as_bits() const {
+ return READ_UINT64_FIELD(this, kValueOffset);
+}
+
+void HeapNumberBase::set_value_as_bits(uint64_t bits) {
+ WRITE_UINT64_FIELD(this, kValueOffset, bits);
+}
+
+int HeapNumberBase::get_exponent() {
+ return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
+ kExponentShift) -
+ kExponentBias;
+}
+
+int HeapNumberBase::get_sign() {
+ return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_HEAP_NUMBER_INL_H_
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
new file mode 100644
index 0000000000..15e821e966
--- /dev/null
+++ b/deps/v8/src/objects/heap-number.h
@@ -0,0 +1,89 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_HEAP_NUMBER_H_
+#define V8_OBJECTS_HEAP_NUMBER_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The HeapNumber class describes heap allocated numbers that cannot be
+// represented in a Smi (small integer). MutableHeapNumber is the same, but its
+// number value can change over time (it is used only as property storage).
+// HeapNumberBase merely exists to avoid code duplication.
+class HeapNumberBase : public HeapObject {
+ public:
+ // [value]: number value.
+ inline double value() const;
+ inline void set_value(double value);
+
+ inline uint64_t value_as_bits() const;
+ inline void set_value_as_bits(uint64_t bits);
+
+ inline int get_exponent();
+ inline int get_sign();
+
+ // Layout description.
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ // IEEE doubles are two 32 bit words. The first is just mantissa, the second
+ // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
+ // words within double numbers are endian dependent and they are set
+ // accordingly.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const int kMantissaOffset = kValueOffset;
+ static const int kExponentOffset = kValueOffset + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const int kMantissaOffset = kValueOffset + 4;
+ static const int kExponentOffset = kValueOffset;
+#else
+#error Unknown byte ordering
+#endif
+
+ static const int kSize = kValueOffset + kDoubleSize;
+ static const uint32_t kSignMask = 0x80000000u;
+ static const uint32_t kExponentMask = 0x7ff00000u;
+ static const uint32_t kMantissaMask = 0xfffffu;
+ static const int kMantissaBits = 52;
+ static const int kExponentBits = 11;
+ static const int kExponentBias = 1023;
+ static const int kExponentShift = 20;
+ static const int kInfinityOrNanExponent =
+ (kExponentMask >> kExponentShift) - kExponentBias;
+ static const int kMantissaBitsInTopWord = 20;
+ static const int kNonMantissaBitsInTopWord = 12;
+
+ // Just to make the macro-generated constructor happy. Subclasses should
+ // perform their own proper type checking.
+ inline bool IsHeapNumberBase() const { return true; }
+
+ OBJECT_CONSTRUCTORS(HeapNumberBase, HeapObject);
+};
+
+class HeapNumber : public HeapNumberBase {
+ public:
+ DECL_CAST(HeapNumber)
+ V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os);
+
+ OBJECT_CONSTRUCTORS(HeapNumber, HeapNumberBase);
+};
+
+class MutableHeapNumber : public HeapNumberBase {
+ public:
+ DECL_CAST(MutableHeapNumber)
+ V8_EXPORT_PRIVATE void MutableHeapNumberPrint(std::ostream& os);
+
+ OBJECT_CONSTRUCTORS(MutableHeapNumber, HeapNumberBase);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_HEAP_NUMBER_H_
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
new file mode 100644
index 0000000000..169b1acd87
--- /dev/null
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_HEAP_OBJECT_INL_H_
+#define V8_OBJECTS_HEAP_OBJECT_INL_H_
+
+#include "src/objects/heap-object.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
+ : Object(ptr) {
+ SLOW_DCHECK(
+ (allow_smi == AllowInlineSmiStorage::kAllowBeingASmi && IsSmi()) ||
+ IsHeapObject());
+}
+
+HeapObject HeapObject::FromAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return HeapObject(address + kHeapObjectTag);
+}
+
+Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
+ return GetHeapFromWritableObject(object);
+}
+
+Isolate* NeverReadOnlySpaceObject::GetIsolate(const HeapObject object) {
+ return GetHeap(object)->isolate();
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_HEAP_OBJECT_INL_H_
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
new file mode 100644
index 0000000000..61817c2d61
--- /dev/null
+++ b/deps/v8/src/objects/heap-object.h
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_HEAP_OBJECT_H_
+#define V8_OBJECTS_HEAP_OBJECT_H_
+
+#include "src/globals.h"
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// HeapObject is the superclass for all classes describing heap allocated
+// objects.
+class HeapObject : public Object {
+ public:
+ bool is_null() const { return ptr() == kNullAddress; }
+
+ // [map]: Contains a map which contains the object's reflective
+ // information.
+ inline Map map() const;
+ inline void set_map(Map value);
+
+ inline MapWordSlot map_slot() const;
+
+ // The no-write-barrier version. This is OK if the object is white and in
+ // new space, or if the value is an immortal immutable object, like the maps
+ // of primitive (non-JS) objects like strings, heap numbers etc.
+ inline void set_map_no_write_barrier(Map value);
+
+ // Get the map using acquire load.
+ inline Map synchronized_map() const;
+ inline MapWord synchronized_map_word() const;
+
+ // Set the map using release store
+ inline void synchronized_set_map(Map value);
+ inline void synchronized_set_map_word(MapWord map_word);
+
+ // Initialize the map immediately after the object is allocated.
+ // Do not use this outside Heap.
+ inline void set_map_after_allocation(
+ Map value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // During garbage collection, the map word of a heap object does not
+ // necessarily contain a map pointer.
+ inline MapWord map_word() const;
+ inline void set_map_word(MapWord map_word);
+
+ // TODO(v8:7464): Once RO_SPACE is shared between isolates, this method can be
+ // removed as ReadOnlyRoots will be accessible from a global variable. For now
+ // this method exists to help remove GetIsolate/GetHeap from HeapObject, in a
+ // way that doesn't require passing Isolate/Heap down huge call chains or to
+ // places where it might not be safe to access it.
+ inline ReadOnlyRoots GetReadOnlyRoots() const;
+
+#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+ HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+ V8_INLINE bool IsExternal(Isolate* isolate) const;
+
+// Oddball checks are faster when they are raw pointer comparisons, so the
+// isolate/read-only roots overloads should be preferred where possible.
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+ V8_INLINE bool Is##Type() const;
+ ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+ V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
+ V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
+ V8_INLINE bool IsNullOrUndefined() const;
+
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+ STRUCT_LIST(DECL_STRUCT_PREDICATE)
+#undef DECL_STRUCT_PREDICATE
+
+ // Converts an address to a HeapObject pointer.
+ static inline HeapObject FromAddress(Address address);
+
+ // Returns the address of this HeapObject.
+ inline Address address() const { return ptr() - kHeapObjectTag; }
+
+ // Iterates over pointers contained in the object (including the Map).
+ // If it's not performance critical iteration use the non-templatized
+ // version.
+ void Iterate(ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ inline void IterateFast(ObjectVisitor* v);
+
+ // Iterates over all pointers contained in the object except the
+ // first map pointer. The object type is given in the first
+ // parameter. This function does not access the map pointer in the
+ // object, and so is safe to call while the map pointer is modified.
+ // If it's not performance critical iteration use the non-templatized
+ // version.
+ void IterateBody(ObjectVisitor* v);
+ void IterateBody(Map map, int object_size, ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(Map map, int object_size, ObjectVisitor* v);
+
+ // Returns true if the object contains a tagged value at given offset.
+ // It is used for invalid slots filtering. If the offset points outside
+ // of the object or to the map word, the result is UNDEFINED (!!!).
+ bool IsValidSlot(Map map, int offset);
+
+ // Returns the heap object's size in bytes
+ inline int Size() const;
+
+ // Given a heap object's map pointer, returns the heap size in bytes
+ // Useful when the map pointer field is used for other purposes.
+ // GC internal.
+ inline int SizeFromMap(Map map) const;
+
+ // Returns the field at offset in obj, as a read/write Object reference.
+ // Does no checking, and is safe to use during GC, while maps are invalid.
+ // Does not invoke write barrier, so should only be assigned to
+ // during marking GC.
+ inline ObjectSlot RawField(int byte_offset) const;
+ static inline ObjectSlot RawField(const HeapObject obj, int offset);
+ inline MaybeObjectSlot RawMaybeWeakField(int byte_offset) const;
+ static inline MaybeObjectSlot RawMaybeWeakField(HeapObject obj, int offset);
+
+ DECL_CAST(HeapObject)
+
+ // Return the write barrier mode for this. Callers of this function
+ // must be able to present a reference to an DisallowHeapAllocation
+ // object as a sign that they are not going to use this function
+ // from code that allocates and thus invalidates the returned write
+ // barrier mode.
+ inline WriteBarrierMode GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise);
+
+ // Dispatched behavior.
+ void HeapObjectShortPrint(std::ostream& os); // NOLINT
+#ifdef OBJECT_PRINT
+ void PrintHeader(std::ostream& os, const char* id); // NOLINT
+#endif
+ DECL_PRINTER(HeapObject)
+ DECL_VERIFIER(HeapObject)
+#ifdef VERIFY_HEAP
+ inline void VerifyObjectField(Isolate* isolate, int offset);
+ inline void VerifySmiField(int offset);
+ inline void VerifyMaybeObjectField(Isolate* isolate, int offset);
+
+ // Verify a pointer is a valid HeapObject pointer that points to object
+ // areas in the heap.
+ static void VerifyHeapPointer(Isolate* isolate, Object p);
+#endif
+
+ static inline AllocationAlignment RequiredAlignment(Map map);
+
+ // Whether the object needs rehashing. That is the case if the object's
+ // content depends on FLAG_hash_seed. When the object is deserialized into
+ // a heap with a different hash seed, these objects need to adapt.
+ inline bool NeedsRehashing() const;
+
+ // Rehashing support is not implemented for all objects that need rehashing.
+ // With objects that need rehashing but cannot be rehashed, rehashing has to
+ // be disabled.
+ bool CanBeRehashed() const;
+
+ // Rehash the object based on the layout inferred from its map.
+ void RehashBasedOnMap(Isolate* isolate);
+
+ // Layout description.
+#define HEAP_OBJECT_FIELDS(V) \
+ V(kMapOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Object::kHeaderSize, HEAP_OBJECT_FIELDS)
+#undef HEAP_OBJECT_FIELDS
+
+ STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
+
+ inline Address GetFieldAddress(int field_offset) const;
+
+ protected:
+ // Special-purpose constructor for subclasses that have fast paths where
+ // their ptr() is a Smi.
+ enum class AllowInlineSmiStorage { kRequireHeapObjectTag, kAllowBeingASmi };
+ inline HeapObject(Address ptr, AllowInlineSmiStorage allow_smi);
+
+ OBJECT_CONSTRUCTORS(HeapObject, Object);
+};
+
+// Helper class for objects that can never be in RO space.
+class NeverReadOnlySpaceObject {
+ public:
+ // The Heap the object was allocated in. Used also to access Isolate.
+ static inline Heap* GetHeap(const HeapObject object);
+
+ // Convenience method to get current isolate.
+ static inline Isolate* GetIsolate(const HeapObject object);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_HEAP_OBJECT_H_
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
new file mode 100644
index 0000000000..3fb21ed841
--- /dev/null
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_INSTANCE_TYPE_INL_H_
+#define V8_OBJECTS_INSTANCE_TYPE_INL_H_
+
+#include "src/objects/map-inl.h"
+#include "src/utils.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+namespace InstanceTypeChecker {
+
+// Define type checkers for classes with single instance type.
+INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER);
+
+#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
+ INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
+#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
+
+#define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \
+ INSTANCE_TYPE_CHECKER(Name, TYPE)
+STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
+#undef STRUCT_INSTANCE_TYPE_CHECKER
+
+// Define type checkers for classes with ranges of instance types.
+#define INSTANCE_TYPE_CHECKER_RANGE(type, first_instance_type, \
+ last_instance_type) \
+ V8_INLINE bool Is##type(InstanceType instance_type) { \
+ return IsInRange(instance_type, first_instance_type, last_instance_type); \
+ }
+INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE);
+#undef INSTANCE_TYPE_CHECKER_RANGE
+
+V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
+ return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) ||
+ IsFixedTypedArrayBase(instance_type) || IsByteArray(instance_type) ||
+ IsBytecodeArray(instance_type);
+}
+
+V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; }
+
+V8_INLINE bool IsInternalizedString(InstanceType instance_type) {
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (instance_type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
+}
+
+V8_INLINE bool IsJSObject(InstanceType instance_type) {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type >= FIRST_JS_OBJECT_TYPE;
+}
+
+V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ return instance_type >= FIRST_JS_RECEIVER_TYPE;
+}
+
+} // namespace InstanceTypeChecker
+
+// TODO(v8:7786): For instance types that have a single map instance on the
+// roots, and when that map is a embedded in the binary, compare against the map
+// pointer rather than looking up the instance type.
+INSTANCE_TYPE_CHECKERS(TYPE_CHECKER);
+
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
+ TYPE_CHECKER(Fixed##Type##Array)
+TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
+#undef TYPED_ARRAY_TYPE_CHECKER
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_INSTANCE_TYPE_INL_H_
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
new file mode 100644
index 0000000000..3aebb934ee
--- /dev/null
+++ b/deps/v8/src/objects/instance-type.h
@@ -0,0 +1,571 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_INSTANCE_TYPE_H_
+#define V8_OBJECTS_INSTANCE_TYPE_H_
+
+#include "src/elements-kind.h"
+#include "src/objects-definitions.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the full 16 bits of the instance_type field to encode heap object
+// instance types. All the high-order bits (bit 7-15) are cleared if the object
+// is a string, and contain set bits if it is not a string.
+const uint32_t kIsNotStringMask = 0xff80;
+const uint32_t kStringTag = 0x0;
+
+// Bit 6 indicates that the object is an internalized string (if set) or not.
+// Bit 7 has to be clear as well.
+const uint32_t kIsNotInternalizedMask = 0x40;
+const uint32_t kNotInternalizedTag = 0x40;
+const uint32_t kInternalizedTag = 0x0;
+
+// If bit 7 is clear then bit 3 indicates whether the string consists of
+// two-byte characters or one-byte characters.
+const uint32_t kStringEncodingMask = 0x8;
+const uint32_t kTwoByteStringTag = 0x0;
+const uint32_t kOneByteStringTag = 0x8;
+
+// If bit 7 is clear, the low-order 3 bits indicate the representation
+// of the string.
+const uint32_t kStringRepresentationMask = 0x07;
+enum StringRepresentationTag {
+ kSeqStringTag = 0x0,
+ kConsStringTag = 0x1,
+ kExternalStringTag = 0x2,
+ kSlicedStringTag = 0x3,
+ kThinStringTag = 0x5
+};
+const uint32_t kIsIndirectStringMask = 0x1;
+const uint32_t kIsIndirectStringTag = 0x1;
+STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0); // NOLINT
+STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0); // NOLINT
+STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) ==
+ kIsIndirectStringTag); // NOLINT
+STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
+ kIsIndirectStringTag); // NOLINT
+STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+
+// If bit 7 is clear, then bit 4 indicates whether this two-byte
+// string actually contains one byte data.
+const uint32_t kOneByteDataHintMask = 0x10;
+const uint32_t kOneByteDataHintTag = 0x10;
+
+// If bit 7 is clear and string representation indicates an external string,
+// then bit 5 indicates whether the data pointer is cached.
+const uint32_t kUncachedExternalStringMask = 0x20;
+const uint32_t kUncachedExternalStringTag = 0x20;
+
+// A ConsString with an empty string as the right side is a candidate
+// for being shortcut by the garbage collector. We don't allocate any
+// non-flat internalized strings, so we do not shortcut them thereby
+// avoiding turning internalized strings into strings. The bit-masks
+// below contain the internalized bit as additional safety.
+// See heap.cc, mark-compact.cc and objects-visiting.cc.
+const uint32_t kShortcutTypeMask =
+ kIsNotStringMask | kIsNotInternalizedMask | kStringRepresentationMask;
+const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag;
+
+static inline bool IsShortcutCandidate(int type) {
+ return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+}
+
+enum InstanceType : uint16_t {
+ // String types.
+ INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
+ kInternalizedTag, // FIRST_PRIMITIVE_TYPE
+ ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ kOneByteStringTag | kSeqStringTag | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE =
+ kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ kOneByteStringTag | kExternalStringTag | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
+ kInternalizedTag,
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
+ kInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
+ kInternalizedTag,
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kUncachedExternalStringTag | kInternalizedTag,
+ STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ ONE_BYTE_STRING_TYPE =
+ ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag,
+ CONS_ONE_BYTE_STRING_TYPE =
+ kOneByteStringTag | kConsStringTag | kNotInternalizedTag,
+ SLICED_STRING_TYPE =
+ kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ SLICED_ONE_BYTE_STRING_TYPE =
+ kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
+ EXTERNAL_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_ONE_BYTE_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kNotInternalizedTag,
+ UNCACHED_EXTERNAL_STRING_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+ UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+ kNotInternalizedTag,
+ THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
+ THIN_ONE_BYTE_STRING_TYPE =
+ kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
+
+ // Non-string names
+ SYMBOL_TYPE =
+ 1 + (kIsNotInternalizedMask | kUncachedExternalStringMask |
+ kOneByteDataHintMask | kStringEncodingMask |
+ kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
+
+ // Other primitives (cannot contain non-map-word pointers to heap objects).
+ HEAP_NUMBER_TYPE,
+ BIGINT_TYPE,
+ ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
+
+ // Objects allocated in their own spaces (never in new space).
+ MAP_TYPE,
+ CODE_TYPE,
+
+ // "Data", objects that cannot contain non-map-word pointers to heap
+ // objects.
+ MUTABLE_HEAP_NUMBER_TYPE,
+ FOREIGN_TYPE,
+ BYTE_ARRAY_TYPE,
+ BYTECODE_ARRAY_TYPE,
+ FREE_SPACE_TYPE,
+ FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_UINT8_ARRAY_TYPE,
+ FIXED_INT16_ARRAY_TYPE,
+ FIXED_UINT16_ARRAY_TYPE,
+ FIXED_INT32_ARRAY_TYPE,
+ FIXED_UINT32_ARRAY_TYPE,
+ FIXED_FLOAT32_ARRAY_TYPE,
+ FIXED_FLOAT64_ARRAY_TYPE,
+ FIXED_UINT8_CLAMPED_ARRAY_TYPE,
+ FIXED_BIGINT64_ARRAY_TYPE,
+ FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_DOUBLE_ARRAY_TYPE,
+ FEEDBACK_METADATA_TYPE,
+ FILLER_TYPE, // LAST_DATA_TYPE
+
+ // Structs.
+ ACCESS_CHECK_INFO_TYPE,
+ ACCESSOR_INFO_TYPE,
+ ACCESSOR_PAIR_TYPE,
+ ALIASED_ARGUMENTS_ENTRY_TYPE,
+ ALLOCATION_MEMENTO_TYPE,
+ ASM_WASM_DATA_TYPE,
+ ASYNC_GENERATOR_REQUEST_TYPE,
+ DEBUG_INFO_TYPE,
+ FUNCTION_TEMPLATE_INFO_TYPE,
+ FUNCTION_TEMPLATE_RARE_DATA_TYPE,
+ INTERCEPTOR_INFO_TYPE,
+ INTERPRETER_DATA_TYPE,
+ MODULE_INFO_ENTRY_TYPE,
+ MODULE_TYPE,
+ OBJECT_TEMPLATE_INFO_TYPE,
+ PROMISE_CAPABILITY_TYPE,
+ PROMISE_REACTION_TYPE,
+ PROTOTYPE_INFO_TYPE,
+ SCRIPT_TYPE,
+ STACK_FRAME_INFO_TYPE,
+ TUPLE2_TYPE,
+ TUPLE3_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ WASM_DEBUG_INFO_TYPE,
+ WASM_EXCEPTION_TAG_TYPE,
+ WASM_EXPORTED_FUNCTION_DATA_TYPE,
+
+ CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
+ CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
+ WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+
+ ALLOCATION_SITE_TYPE,
+ EMBEDDER_DATA_ARRAY_TYPE,
+ // FixedArrays.
+ FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
+ ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE
+ ORDERED_HASH_SET_TYPE,
+ ORDERED_NAME_DICTIONARY_TYPE,
+ NAME_DICTIONARY_TYPE,
+ GLOBAL_DICTIONARY_TYPE,
+ NUMBER_DICTIONARY_TYPE,
+ SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE
+ STRING_TABLE_TYPE,
+ EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
+ SCOPE_INFO_TYPE,
+ SCRIPT_CONTEXT_TABLE_TYPE, // LAST_FIXED_ARRAY_TYPE,
+
+ // Contexts.
+ AWAIT_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
+ BLOCK_CONTEXT_TYPE,
+ CATCH_CONTEXT_TYPE,
+ DEBUG_EVALUATE_CONTEXT_TYPE,
+ EVAL_CONTEXT_TYPE,
+ FUNCTION_CONTEXT_TYPE,
+ MODULE_CONTEXT_TYPE,
+ NATIVE_CONTEXT_TYPE,
+ SCRIPT_CONTEXT_TYPE,
+ WITH_CONTEXT_TYPE, // LAST_CONTEXT_TYPE
+
+ WEAK_FIXED_ARRAY_TYPE, // FIRST_WEAK_FIXED_ARRAY_TYPE
+ TRANSITION_ARRAY_TYPE, // LAST_WEAK_FIXED_ARRAY_TYPE
+
+ // Misc.
+ CALL_HANDLER_INFO_TYPE,
+ CELL_TYPE,
+ CODE_DATA_CONTAINER_TYPE,
+ DESCRIPTOR_ARRAY_TYPE,
+ FEEDBACK_CELL_TYPE,
+ FEEDBACK_VECTOR_TYPE,
+ LOAD_HANDLER_TYPE,
+ PREPARSE_DATA_TYPE,
+ PROPERTY_ARRAY_TYPE,
+ PROPERTY_CELL_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SMALL_ORDERED_HASH_MAP_TYPE,
+ SMALL_ORDERED_HASH_SET_TYPE,
+ SMALL_ORDERED_NAME_DICTIONARY_TYPE,
+ STORE_HANDLER_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
+ WEAK_ARRAY_LIST_TYPE,
+
+ // All the following types are subtypes of JSReceiver, which corresponds to
+ // objects in the JS sense. The first and the last type in this range are
+ // the two forms of function. This organization enables using the same
+ // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
+ // Some of the following instance types are exposed in v8.h, so to not
+ // unnecessarily change the ABI when we introduce new instance types in the
+ // future, we leave some space between instance types.
+ JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE
+ JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_GLOBAL_PROXY_TYPE,
+ JS_MODULE_NAMESPACE_TYPE,
+ // Like JS_API_OBJECT_TYPE, but requires access checks and/or has
+ // interceptors.
+ JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
+ JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
+ // Like JS_OBJECT_TYPE, but created from API function.
+ JS_API_OBJECT_TYPE = 0x0420,
+ JS_OBJECT_TYPE,
+ JS_ARGUMENTS_TYPE,
+ JS_ARRAY_BUFFER_TYPE,
+ JS_ARRAY_ITERATOR_TYPE,
+ JS_ARRAY_TYPE,
+ JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
+ JS_ASYNC_FUNCTION_OBJECT_TYPE,
+ JS_ASYNC_GENERATOR_OBJECT_TYPE,
+ JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JS_DATE_TYPE,
+ JS_ERROR_TYPE,
+ JS_GENERATOR_OBJECT_TYPE,
+ JS_MAP_TYPE,
+ JS_MAP_KEY_ITERATOR_TYPE,
+ JS_MAP_KEY_VALUE_ITERATOR_TYPE,
+ JS_MAP_VALUE_ITERATOR_TYPE,
+ JS_MESSAGE_OBJECT_TYPE,
+ JS_PROMISE_TYPE,
+ JS_REGEXP_TYPE,
+ JS_REGEXP_STRING_ITERATOR_TYPE,
+ JS_SET_TYPE,
+ JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ JS_SET_VALUE_ITERATOR_TYPE,
+ JS_STRING_ITERATOR_TYPE,
+ JS_WEAK_CELL_TYPE,
+ JS_WEAK_REF_TYPE,
+ JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE,
+ JS_WEAK_FACTORY_TYPE,
+ JS_WEAK_MAP_TYPE,
+ JS_WEAK_SET_TYPE,
+
+ JS_TYPED_ARRAY_TYPE,
+ JS_DATA_VIEW_TYPE,
+
+#ifdef V8_INTL_SUPPORT
+ JS_INTL_V8_BREAK_ITERATOR_TYPE,
+ JS_INTL_COLLATOR_TYPE,
+ JS_INTL_DATE_TIME_FORMAT_TYPE,
+ JS_INTL_LIST_FORMAT_TYPE,
+ JS_INTL_LOCALE_TYPE,
+ JS_INTL_NUMBER_FORMAT_TYPE,
+ JS_INTL_PLURAL_RULES_TYPE,
+ JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
+ JS_INTL_SEGMENT_ITERATOR_TYPE,
+ JS_INTL_SEGMENTER_TYPE,
+#endif // V8_INTL_SUPPORT
+
+ WASM_EXCEPTION_TYPE,
+ WASM_GLOBAL_TYPE,
+ WASM_INSTANCE_TYPE,
+ WASM_MEMORY_TYPE,
+ WASM_MODULE_TYPE,
+ WASM_TABLE_TYPE,
+ JS_BOUND_FUNCTION_TYPE,
+ JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
+
+ // Pseudo-types
+ FIRST_TYPE = 0x0,
+ LAST_TYPE = JS_FUNCTION_TYPE,
+ FIRST_STRING_TYPE = FIRST_TYPE,
+ FIRST_NAME_TYPE = FIRST_STRING_TYPE,
+ LAST_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
+ LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
+ FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
+ LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
+ FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
+ LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of FixedArray.
+ FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
+ LAST_FIXED_ARRAY_TYPE = SCRIPT_CONTEXT_TABLE_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of HashTable
+ FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
+ LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of Dictionary
+ FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE,
+ LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
+ FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
+ LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
+ // Boundaries for testing if given HeapObject is a Context
+ FIRST_CONTEXT_TYPE = AWAIT_CONTEXT_TYPE,
+ LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of Microtask.
+ FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
+ LAST_MICROTASK_TYPE = WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE,
+ // Boundaries for testing for a fixed typed array.
+ FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
+ // Boundary for promotion to old space.
+ LAST_DATA_TYPE = FILLER_TYPE,
+ // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
+ // Note that there is no range for JSObject or JSProxy, since their subtypes
+ // are not continuous in this enum! The enum ranges instead reflect the
+ // external class names, where proxies are treated as either ordinary objects,
+ // or functions.
+ FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
+ LAST_JS_RECEIVER_TYPE = LAST_TYPE,
+ // Boundaries for testing the types represented as JSObject
+ FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE,
+ LAST_JS_OBJECT_TYPE = LAST_TYPE,
+ // Boundary for testing JSReceivers that need special property lookup handling
+ LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE,
+ // Boundary case for testing JSReceivers that may have elements while having
+ // an empty fixed array as elements backing store. This is true for string
+ // wrappers.
+ LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
+
+ FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
+
+ FIRST_MAP_ITERATOR_TYPE = JS_MAP_KEY_ITERATOR_TYPE,
+ LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE,
+};
+
+// This constant is defined outside of the InstanceType enum because the
+// string instance types are sparce and there's no such a string instance type.
+// But it's still useful for range checks to have such a value.
+constexpr InstanceType LAST_STRING_TYPE =
+ static_cast<InstanceType>(FIRST_NONSTRING_TYPE - 1);
+
+STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
+STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
+STATIC_ASSERT(JS_SPECIAL_API_OBJECT_TYPE == Internals::kJSSpecialApiObjectType);
+STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
+STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
+STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ InstanceType instance_type);
+
+// List of object types that have a single unique instance type.
+#define INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(AllocationSite, ALLOCATION_SITE_TYPE) \
+ V(BigInt, BIGINT_TYPE) \
+ V(ObjectBoilerplateDescription, OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(BreakPoint, TUPLE2_TYPE) \
+ V(BreakPointInfo, TUPLE2_TYPE) \
+ V(ByteArray, BYTE_ARRAY_TYPE) \
+ V(BytecodeArray, BYTECODE_ARRAY_TYPE) \
+ V(CallHandlerInfo, CALL_HANDLER_INFO_TYPE) \
+ V(Cell, CELL_TYPE) \
+ V(Code, CODE_TYPE) \
+ V(CodeDataContainer, CODE_DATA_CONTAINER_TYPE) \
+ V(CoverageInfo, FIXED_ARRAY_TYPE) \
+ V(DescriptorArray, DESCRIPTOR_ARRAY_TYPE) \
+ V(EmbedderDataArray, EMBEDDER_DATA_ARRAY_TYPE) \
+ V(EphemeronHashTable, EPHEMERON_HASH_TABLE_TYPE) \
+ V(FeedbackCell, FEEDBACK_CELL_TYPE) \
+ V(FeedbackMetadata, FEEDBACK_METADATA_TYPE) \
+ V(FeedbackVector, FEEDBACK_VECTOR_TYPE) \
+ V(FixedArrayExact, FIXED_ARRAY_TYPE) \
+ V(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) \
+ V(Foreign, FOREIGN_TYPE) \
+ V(FreeSpace, FREE_SPACE_TYPE) \
+ V(GlobalDictionary, GLOBAL_DICTIONARY_TYPE) \
+ V(HeapNumber, HEAP_NUMBER_TYPE) \
+ V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \
+ V(JSArgumentsObjectWithLength, JS_ARGUMENTS_TYPE) \
+ V(JSArray, JS_ARRAY_TYPE) \
+ V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \
+ V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \
+ V(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JSAsyncFunctionObject, JS_ASYNC_FUNCTION_OBJECT_TYPE) \
+ V(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JSBoundFunction, JS_BOUND_FUNCTION_TYPE) \
+ V(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JSDataView, JS_DATA_VIEW_TYPE) \
+ V(JSDate, JS_DATE_TYPE) \
+ V(JSError, JS_ERROR_TYPE) \
+ V(JSFunction, JS_FUNCTION_TYPE) \
+ V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) \
+ V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE) \
+ V(JSMap, JS_MAP_TYPE) \
+ V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
+ V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
+ V(JSPromise, JS_PROMISE_TYPE) \
+ V(JSProxy, JS_PROXY_TYPE) \
+ V(JSRegExp, JS_REGEXP_TYPE) \
+ V(JSRegExpResult, JS_ARRAY_TYPE) \
+ V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JSSet, JS_SET_TYPE) \
+ V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
+ V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
+ V(JSValue, JS_VALUE_TYPE) \
+ V(JSWeakFactory, JS_WEAK_FACTORY_TYPE) \
+ V(JSWeakFactoryCleanupIterator, JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE) \
+ V(JSWeakCell, JS_WEAK_CELL_TYPE) \
+ V(JSWeakMap, JS_WEAK_MAP_TYPE) \
+ V(JSWeakRef, JS_WEAK_REF_TYPE) \
+ V(JSWeakSet, JS_WEAK_SET_TYPE) \
+ V(LoadHandler, LOAD_HANDLER_TYPE) \
+ V(Map, MAP_TYPE) \
+ V(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE) \
+ V(NameDictionary, NAME_DICTIONARY_TYPE) \
+ V(NativeContext, NATIVE_CONTEXT_TYPE) \
+ V(NumberDictionary, NUMBER_DICTIONARY_TYPE) \
+ V(Oddball, ODDBALL_TYPE) \
+ V(OrderedHashMap, ORDERED_HASH_MAP_TYPE) \
+ V(OrderedHashSet, ORDERED_HASH_SET_TYPE) \
+ V(OrderedNameDictionary, ORDERED_NAME_DICTIONARY_TYPE) \
+ V(PreparseData, PREPARSE_DATA_TYPE) \
+ V(PropertyArray, PROPERTY_ARRAY_TYPE) \
+ V(PropertyCell, PROPERTY_CELL_TYPE) \
+ V(PropertyDescriptorObject, FIXED_ARRAY_TYPE) \
+ V(ScopeInfo, SCOPE_INFO_TYPE) \
+ V(ScriptContextTable, SCRIPT_CONTEXT_TABLE_TYPE) \
+ V(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) \
+ V(SimpleNumberDictionary, SIMPLE_NUMBER_DICTIONARY_TYPE) \
+ V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
+ V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
+ V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \
+ V(StoreHandler, STORE_HANDLER_TYPE) \
+ V(StringTable, STRING_TABLE_TYPE) \
+ V(Symbol, SYMBOL_TYPE) \
+ V(TemplateObjectDescription, TUPLE2_TYPE) \
+ V(TransitionArray, TRANSITION_ARRAY_TYPE) \
+ V(UncompiledDataWithoutPreparseData, \
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
+ V(UncompiledDataWithPreparseData, UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \
+ V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \
+ V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
+ V(WasmInstanceObject, WASM_INSTANCE_TYPE) \
+ V(WasmMemoryObject, WASM_MEMORY_TYPE) \
+ V(WasmModuleObject, WASM_MODULE_TYPE) \
+ V(WasmTableObject, WASM_TABLE_TYPE) \
+ V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
+#ifdef V8_INTL_SUPPORT
+
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \
+ V(JSCollator, JS_INTL_COLLATOR_TYPE) \
+ V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \
+ V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
+ V(JSLocale, JS_INTL_LOCALE_TYPE) \
+ V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \
+ V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
+ V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
+ V(JSSegmentIterator, JS_INTL_SEGMENT_ITERATOR_TYPE) \
+ V(JSSegmenter, JS_INTL_SEGMENTER_TYPE)
+
+#else
+
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V)
+
+#endif // V8_INTL_SUPPORT
+
+#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
+ V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
+ V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \
+ V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
+ V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \
+ LAST_FIXED_TYPED_ARRAY_TYPE) \
+ V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
+ V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
+ V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
+ V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
+ V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \
+ V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \
+ V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
+
+#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
+ V(FixedArrayBase) \
+ V(InternalizedString) \
+ V(JSObject) \
+ V(JSReceiver)
+
+#define INSTANCE_TYPE_CHECKERS(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_RANGE(V) \
+ INSTANCE_TYPE_CHECKERS_CUSTOM(V)
+
+namespace InstanceTypeChecker {
+#define IS_TYPE_FUNCTION_DECL(Type, ...) \
+ V8_INLINE bool Is##Type(InstanceType instance_type);
+
+INSTANCE_TYPE_CHECKERS(IS_TYPE_FUNCTION_DECL)
+
+#define TYPED_ARRAY_IS_TYPE_FUNCTION_DECL(Type, ...) \
+ IS_TYPE_FUNCTION_DECL(Fixed##Type##Array)
+TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
+#undef TYPED_ARRAY_IS_TYPE_FUNCTION_DECL
+
+#define STRUCT_IS_TYPE_FUNCTION_DECL(NAME, Name, name) \
+ IS_TYPE_FUNCTION_DECL(Name)
+STRUCT_LIST(STRUCT_IS_TYPE_FUNCTION_DECL)
+#undef STRUCT_IS_TYPE_FUNCTION_DECL
+
+#undef IS_TYPE_FUNCTION_DECL
+} // namespace InstanceTypeChecker
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_INSTANCE_TYPE_H_
diff --git a/deps/v8/src/objects/intl-objects-inl.h b/deps/v8/src/objects/intl-objects-inl.h
deleted file mode 100644
index 62b059ea3c..0000000000
--- a/deps/v8/src/objects/intl-objects-inl.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_INTL_OBJECTS_INL_H_
-#define V8_OBJECTS_INTL_OBJECTS_INL_H_
-
-#include "src/objects/intl-objects.h"
-
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-inline Intl::Type Intl::TypeFromInt(int type_int) {
- STATIC_ASSERT(Intl::Type::kNumberFormat == 0);
- DCHECK_LE(Intl::Type::kNumberFormat, type_int);
- DCHECK_GT(Intl::Type::kTypeCount, type_int);
- return static_cast<Intl::Type>(type_int);
-}
-
-inline Intl::Type Intl::TypeFromSmi(Smi* type) {
- return TypeFromInt(Smi::ToInt(type));
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_OBJECTS_INTL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index dcacb4dd2f..dfabb29af9 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -7,7 +7,6 @@
#endif // V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
-#include "src/objects/intl-objects-inl.h"
#include <algorithm>
#include <memory>
@@ -17,31 +16,382 @@
#include "src/api-inl.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
-#include "src/intl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-date-time-format-inl.h"
+#include "src/objects/js-locale-inl.h"
#include "src/objects/js-number-format-inl.h"
#include "src/objects/string.h"
#include "src/property-descriptor.h"
+#include "src/string-case.h"
+#include "unicode/basictz.h"
#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
+#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
-#include "unicode/regex.h"
-#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
-#include "unicode/ucol.h"
-#include "unicode/ures.h"
-#include "unicode/uvernum.h"
-#include "unicode/uversion.h"
+#include "unicode/ustring.h"
+#include "unicode/uvernum.h" // U_ICU_VERSION_MAJOR_NUM
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+static_assert(
+ V8_MINIMUM_ICU_VERSION <= U_ICU_VERSION_MAJOR_NUM,
+ "v8 is required to build with ICU " XSTR(V8_MINIMUM_ICU_VERSION) " and up");
+#undef STR
+#undef XSTR
namespace v8 {
namespace internal {
+namespace {
+inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
+
+const uint8_t kToLower[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+ 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+ 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
+ 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
+ 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
+ 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
+ 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
+ 0xFC, 0xFD, 0xFE, 0xFF,
+};
+
+inline uint16_t ToLatin1Lower(uint16_t ch) {
+ return static_cast<uint16_t>(kToLower[ch]);
+}
+
+inline uint16_t ToASCIIUpper(uint16_t ch) {
+ return ch & ~((ch >= 'a' && ch <= 'z') << 5);
+}
+
+// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
+inline uint16_t ToLatin1Upper(uint16_t ch) {
+ DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+ return ch &
+ ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
+ << 5);
+}
+
+template <typename Char>
+bool ToUpperFastASCII(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ // Do a faster loop for the case where all the characters are ASCII.
+ uint16_t ored = 0;
+ int32_t index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ ored |= ch;
+ result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+ }
+ return !(ored & ~0x7F);
+}
+
+const uint16_t sharp_s = 0xDF;
+
+template <typename Char>
+bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
+ int* sharp_s_count) {
+ // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
+
+ // There are two special cases.
+ // 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
+ // 2. Lower case sharp-S converts to "SS" (two characters)
+ *sharp_s_count = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (V8_UNLIKELY(ch == sharp_s)) {
+ ++(*sharp_s_count);
+ continue;
+ }
+ if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
+ // Since this upper-cased character does not fit in an 8-bit string, we
+ // need to take the 16-bit path.
+ return false;
+ }
+ *dest++ = ToLatin1Upper(ch);
+ }
+
+ return true;
+}
+
+template <typename Char>
+void ToUpperWithSharpS(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ int32_t dest_index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (ch == sharp_s) {
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ } else {
+ result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
+ }
+ }
+}
+
+inline int FindFirstUpperOrNonAscii(String s, int length) {
+ for (int index = 0; index < length; ++index) {
+ uint16_t ch = s->Get(index);
+ if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ return index;
+ }
+ }
+ return length;
+}
+
+const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
+ std::unique_ptr<uc16[]>* dest,
+ int32_t length) {
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ if (!*dest) {
+ dest->reset(NewArray<uc16>(length));
+ CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+ }
+ return reinterpret_cast<const UChar*>(dest->get());
+ } else {
+ return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+ }
+}
+
+template <typename T>
+MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<Object> locales, Handle<Object> options) {
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
+ T);
+ return T::Initialize(isolate, Handle<T>::cast(result), locales, options);
+}
+} // namespace
+
+const uint8_t* Intl::ToLatin1LowerTable() { return &kToLower[0]; }
+
+icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
+ Handle<String> string) {
+ string = String::Flatten(isolate, string);
+ {
+ DisallowHeapAllocation no_gc;
+ std::unique_ptr<uc16[]> sap;
+ return icu::UnicodeString(
+ GetUCharBufferFromFlat(string->GetFlatContent(no_gc), &sap,
+ string->length()),
+ string->length());
+ }
+}
+
+namespace {
+MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
+ bool is_to_upper, const char* lang) {
+ auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
+ int32_t src_length = s->length();
+ int32_t dest_length = src_length;
+ UErrorCode status;
+ Handle<SeqTwoByteString> result;
+ std::unique_ptr<uc16[]> sap;
+
+ if (dest_length == 0) return ReadOnlyRoots(isolate).empty_string_handle();
+
+ // This is not a real loop. It'll be executed only once (no overflow) or
+ // twice (overflow).
+ for (int i = 0; i < 2; ++i) {
+ // Case conversion can increase the string length (e.g. sharp-S => SS) so
+ // that we have to handle RangeError exceptions here.
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(dest_length),
+ String);
+ DisallowHeapAllocation no_gc;
+ DCHECK(s->IsFlat());
+ String::FlatContent flat = s->GetFlatContent(no_gc);
+ const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+ status = U_ZERO_ERROR;
+ dest_length =
+ case_converter(reinterpret_cast<UChar*>(result->GetChars(no_gc)),
+ dest_length, src, src_length, lang, &status);
+ if (status != U_BUFFER_OVERFLOW_ERROR) break;
+ }
+
+ // In most cases, the output will fill the destination buffer completely
+ // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
+ // Only in rare cases, it'll be shorter than the destination buffer and
+ // |result| has to be truncated.
+ DCHECK(U_SUCCESS(status));
+ if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
+ DCHECK(dest_length == result->length());
+ return result;
+ }
+ DCHECK(dest_length < result->length());
+ return SeqString::Truncate(result, dest_length);
+}
+
+} // namespace
+
+// A stripped-down version of ConvertToLower that can only handle flat one-byte
+// strings and does not allocate. Note that {src} could still be, e.g., a
+// one-byte sliced string with a two-byte parent string.
+// Called from TF builtins.
+String Intl::ConvertOneByteToLower(String src, String dst) {
+ DCHECK_EQ(src->length(), dst->length());
+ DCHECK(src->HasOnlyOneByteChars());
+ DCHECK(src->IsFlat());
+ DCHECK(dst->IsSeqOneByteString());
+
+ DisallowHeapAllocation no_gc;
+
+ const int length = src->length();
+ String::FlatContent src_flat = src->GetFlatContent(no_gc);
+ uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars(no_gc);
+
+ if (src_flat.IsOneByte()) {
+ const uint8_t* src_data = src_flat.ToOneByteVector().start();
+
+ bool has_changed_character = false;
+ int index_to_first_unprocessed =
+ FastAsciiConvert<true>(reinterpret_cast<char*>(dst_data),
+ reinterpret_cast<const char*>(src_data), length,
+ &has_changed_character);
+
+ if (index_to_first_unprocessed == length) {
+ return has_changed_character ? dst : src;
+ }
+
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
+ }
+ } else {
+ DCHECK(src_flat.IsTwoByte());
+ int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length);
+ if (index_to_first_unprocessed == length) return src;
+
+ const uint16_t* src_data = src_flat.ToUC16Vector().start();
+ CopyChars(dst_data, src_data, index_to_first_unprocessed);
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
+ }
+ }
+
+ return dst;
+}
+
+MaybeHandle<String> Intl::ConvertToLower(Isolate* isolate, Handle<String> s) {
+ if (!s->HasOnlyOneByteChars()) {
+ // Use a slower implementation for strings with characters beyond U+00FF.
+ return LocaleConvertCase(isolate, s, false, "");
+ }
+
+ int length = s->length();
+
+ // We depend here on the invariant that the length of a Latin1
+ // string is invariant under ToLowerCase, and the result always
+ // fits in the Latin1 range in the *root locale*. It does not hold
+ // for ToUpperCase even in the root locale.
+
+ // Scan the string for uppercase and non-ASCII characters for strings
+ // shorter than a machine-word without any memory allocation overhead.
+ // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
+ // to two parts, one for scanning the prefix with no change and the other for
+ // handling ASCII-only characters.
+
+ bool is_short = length < static_cast<int>(sizeof(uintptr_t));
+ if (is_short) {
+ bool is_lower_ascii = FindFirstUpperOrNonAscii(*s, length) == length;
+ if (is_lower_ascii) return s;
+ }
+
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+
+ return Handle<String>(Intl::ConvertOneByteToLower(*s, *result), isolate);
+}
+
+MaybeHandle<String> Intl::ConvertToUpper(Isolate* isolate, Handle<String> s) {
+ int32_t length = s->length();
+ if (s->HasOnlyOneByteChars() && length > 0) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+
+ DCHECK(s->IsFlat());
+ int sharp_s_count;
+ bool is_result_single_byte;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent(no_gc);
+ uint8_t* dest = result->GetChars(no_gc);
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> src = flat.ToOneByteVector();
+ bool has_changed_character = false;
+ int index_to_first_unprocessed = FastAsciiConvert<false>(
+ reinterpret_cast<char*>(result->GetChars(no_gc)),
+ reinterpret_cast<const char*>(src.start()), length,
+ &has_changed_character);
+ if (index_to_first_unprocessed == length) {
+ return has_changed_character ? result : s;
+ }
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ is_result_single_byte =
+ ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
+ dest + index_to_first_unprocessed, &sharp_s_count);
+ } else {
+ DCHECK(flat.IsTwoByte());
+ Vector<const uint16_t> src = flat.ToUC16Vector();
+ if (ToUpperFastASCII(src, result)) return result;
+ is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
+ }
+ }
+
+ // Go to the full Unicode path if there are characters whose uppercase
+ // is beyond the Latin-1 range (cannot be represented in OneByteString).
+ if (V8_UNLIKELY(!is_result_single_byte)) {
+ return LocaleConvertCase(isolate, s, true, "");
+ }
+
+ if (sharp_s_count == 0) return result;
+
+ // We have sharp_s_count sharp-s characters, but the result is still
+ // in the Latin-1 range.
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewRawOneByteString(length + sharp_s_count),
+ String);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent(no_gc);
+ if (flat.IsOneByte()) {
+ ToUpperWithSharpS(flat.ToOneByteVector(), result);
+ } else {
+ ToUpperWithSharpS(flat.ToUC16Vector(), result);
+ }
+
+ return result;
+ }
+
+ return LocaleConvertCase(isolate, s, true, "");
+}
+
std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
// to assume that for given locale NumberingSystem constructor produces the
@@ -49,54 +399,18 @@ std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberingSystem> numbering_system(
icu::NumberingSystem::createInstance(icu_locale, status));
- std::string value;
- if (U_SUCCESS(status)) {
- value = numbering_system->getName();
- }
- return value;
+ if (U_SUCCESS(status)) return numbering_system->getName();
+ return "latn";
}
-MaybeHandle<JSObject> Intl::CachedOrNewService(
- Isolate* isolate, Handle<String> service, Handle<Object> locales,
- Handle<Object> options, Handle<Object> internal_options) {
- Handle<Object> result;
- Handle<Object> undefined_value(ReadOnlyRoots(isolate).undefined_value(),
- isolate);
- Handle<Object> args[] = {service, locales, options, internal_options};
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, isolate->cached_or_new_service(),
- undefined_value, arraysize(args), args),
- JSArray);
- return Handle<JSObject>::cast(result);
-}
-
-icu::Locale Intl::CreateICULocale(Isolate* isolate,
- Handle<String> bcp47_locale_str) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::String::Utf8Value bcp47_locale(v8_isolate,
- v8::Utils::ToLocal(bcp47_locale_str));
- CHECK_NOT_NULL(*bcp47_locale);
-
+icu::Locale Intl::CreateICULocale(const std::string& bcp47_locale) {
DisallowHeapAllocation no_gc;
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int parsed_length = 0;
- // bcp47_locale_str should be a canonicalized language tag, which
- // means this shouldn't fail.
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &parsed_length, &status);
+ icu::Locale icu_locale = icu::Locale::forLanguageTag(bcp47_locale, status);
CHECK(U_SUCCESS(status));
-
- // bcp47_locale is already checked for its structural validity
- // so that it should be parsed completely.
- int bcp47length = bcp47_locale.length();
- CHECK_EQ(bcp47length, parsed_length);
-
- icu::Locale icu_locale(icu_result);
if (icu_locale.isBogus()) {
FATAL("Failed to create ICU locale, are ICU data files missing?");
}
@@ -184,62 +498,16 @@ bool RemoveLocaleScriptTag(const std::string& icu_locale,
} // namespace
-std::set<std::string> Intl::GetAvailableLocales(const ICUService service) {
- const icu::Locale* icu_available_locales = nullptr;
- int32_t count = 0;
+std::set<std::string> Intl::BuildLocaleSet(
+ const icu::Locale* icu_available_locales, int32_t count) {
std::set<std::string> locales;
-
- switch (service) {
- case ICUService::kBreakIterator:
- case ICUService::kSegmenter:
- icu_available_locales = icu::BreakIterator::getAvailableLocales(count);
- break;
- case ICUService::kCollator:
- icu_available_locales = icu::Collator::getAvailableLocales(count);
- break;
- case ICUService::kRelativeDateTimeFormatter:
- case ICUService::kDateFormat:
- icu_available_locales = icu::DateFormat::getAvailableLocales(count);
- break;
- case ICUService::kNumberFormat:
- icu_available_locales = icu::NumberFormat::getAvailableLocales(count);
- break;
- case ICUService::kPluralRules:
- // TODO(littledan): For PluralRules, filter out locales that
- // don't support PluralRules.
- // PluralRules is missing an appropriate getAvailableLocales method,
- // so we should filter from all locales, but it's not clear how; see
- // https://ssl.icu-project.org/trac/ticket/12756
- icu_available_locales = icu::Locale::getAvailableLocales(count);
- break;
- case ICUService::kListFormatter: {
- // TODO(ftang): for now just use
- // icu::Locale::getAvailableLocales(count) until we migrate to
- // Intl::GetAvailableLocales().
- // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
- icu_available_locales = icu::Locale::getAvailableLocales(count);
- break;
- }
- }
-
- UErrorCode error = U_ZERO_ERROR;
- char result[ULOC_FULLNAME_CAPACITY];
-
for (int32_t i = 0; i < count; ++i) {
- const char* icu_name = icu_available_locales[i].getName();
-
- error = U_ZERO_ERROR;
- // No need to force strict BCP47 rules.
- uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
- if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
- // This shouldn't happen, but lets not break the user.
- continue;
- }
- std::string locale(result);
+ std::string locale =
+ Intl::ToLanguageTag(icu_available_locales[i]).FromJust();
locales.insert(locale);
std::string shortened_locale;
- if (RemoveLocaleScriptTag(icu_name, &shortened_locale)) {
+ if (RemoveLocaleScriptTag(locale, &shortened_locale)) {
std::replace(shortened_locale.begin(), shortened_locale.end(), '_', '-');
locales.insert(shortened_locale);
}
@@ -248,75 +516,39 @@ std::set<std::string> Intl::GetAvailableLocales(const ICUService service) {
return locales;
}
-namespace {
-
-// TODO(gsathya): Remove this once we port ResolveLocale to C++.
-ICUService StringToICUService(Handle<String> service) {
- std::unique_ptr<char[]> service_cstr = service->ToCString();
- if (strcmp(service_cstr.get(), "collator") == 0) {
- return ICUService::kCollator;
- } else if (strcmp(service_cstr.get(), "numberformat") == 0) {
- return ICUService::kNumberFormat;
- } else if (strcmp(service_cstr.get(), "dateformat") == 0) {
- return ICUService::kDateFormat;
- } else if (strcmp(service_cstr.get(), "breakiterator") == 0) {
- return ICUService::kBreakIterator;
- } else if (strcmp(service_cstr.get(), "pluralrules") == 0) {
- return ICUService::kPluralRules;
- } else if (strcmp(service_cstr.get(), "relativetimeformat") == 0) {
- return ICUService::kRelativeDateTimeFormatter;
- } else if (strcmp(service_cstr.get(), "listformat") == 0) {
- return ICUService::kListFormatter;
- } else if (service->IsUtf8EqualTo(CStrVector("segmenter"))) {
- return ICUService::kSegmenter;
- }
- UNREACHABLE();
-}
-
-const char* ICUServiceToString(ICUService service) {
- switch (service) {
- case ICUService::kCollator:
- return "Intl.Collator";
- case ICUService::kNumberFormat:
- return "Intl.NumberFormat";
- case ICUService::kDateFormat:
- return "Intl.DateFormat";
- case ICUService::kBreakIterator:
- return "Intl.v8BreakIterator";
- case ICUService::kPluralRules:
- return "Intl.PluralRules";
- case ICUService::kRelativeDateTimeFormatter:
- return "Intl.RelativeTimeFormat";
- case ICUService::kListFormatter:
- return "Intl.kListFormat";
- case ICUService::kSegmenter:
- return "Intl.kSegmenter";
- }
- UNREACHABLE();
-}
-
-} // namespace
-
-V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> Intl::AvailableLocalesOf(
- Isolate* isolate, Handle<String> service) {
- Factory* factory = isolate->factory();
- std::set<std::string> results =
- Intl::GetAvailableLocales(StringToICUService(service));
- Handle<JSObject> locales = factory->NewJSObjectWithNullProto();
+Maybe<std::string> Intl::ToLanguageTag(const icu::Locale& locale) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::string res = locale.toLanguageTag<std::string>(status);
+ if (U_FAILURE(status)) {
+ return Nothing<std::string>();
+ }
+ CHECK(U_SUCCESS(status));
- int32_t i = 0;
- for (auto iter = results.begin(); iter != results.end(); ++iter) {
- RETURN_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- locales, factory->NewStringFromAsciiChecked(iter->c_str()),
- factory->NewNumber(i++), NONE),
- JSObject);
+ // Hack to remove -true and -yes from unicode extensions
+ // Address https://crbug.com/v8/8565
+ // TODO(ftang): Move the following "remove true" logic into ICU toLanguageTag
+ // by fixing ICU-20310.
+ size_t u_ext_start = res.find("-u-");
+ if (u_ext_start != std::string::npos) {
+ // remove "-true" and "-yes" after -u-
+ const std::vector<std::string> remove_items({"-true", "-yes"});
+ for (auto item = remove_items.begin(); item != remove_items.end(); item++) {
+ for (size_t sep_remove =
+ res.find(*item, u_ext_start + 5 /* strlen("-u-xx") == 5 */);
+ sep_remove != std::string::npos; sep_remove = res.find(*item)) {
+ size_t end_of_sep_remove = sep_remove + item->length();
+ if (res.length() == end_of_sep_remove ||
+ res.at(end_of_sep_remove) == '-') {
+ res.erase(sep_remove, item->length());
+ }
+ }
+ }
}
- return locales;
+ return Just(res);
}
-std::string Intl::DefaultLocale(Isolate* isolate) {
+namespace {
+std::string DefaultLocale(Isolate* isolate) {
if (isolate->default_locale().empty()) {
icu::Locale default_locale;
// Translate ICU's fallback locale to a well-known locale.
@@ -324,32 +556,16 @@ std::string Intl::DefaultLocale(Isolate* isolate) {
isolate->set_default_locale("en-US");
} else {
// Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- int32_t length =
- uloc_toLanguageTag(default_locale.getName(), result,
- ULOC_FULLNAME_CAPACITY, FALSE, &status);
isolate->set_default_locale(
- U_SUCCESS(status) ? std::string(result, length) : "und");
+ default_locale.isBogus()
+ ? "und"
+ : Intl::ToLanguageTag(default_locale).FromJust());
}
DCHECK(!isolate->default_locale().empty());
}
return isolate->default_locale();
}
-
-bool Intl::IsObjectOfType(Isolate* isolate, Handle<Object> input,
- Intl::Type expected_type) {
- if (!input->IsJSObject()) return false;
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
-
- if (!tag->IsSmi()) return false;
-
- Intl::Type type = Intl::TypeFromSmi(Smi::cast(*tag));
- return type == expected_type;
-}
+} // namespace
// See ecma402/#legacy-constructor.
MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
@@ -378,161 +594,6 @@ MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
return receiver;
}
-namespace {
-
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
-// Define general regexp macros.
-// Note "(?:" means the regexp group a non-capture group.
-#define REGEX_ALPHA "[a-z]"
-#define REGEX_DIGIT "[0-9]"
-#define REGEX_ALPHANUM "(?:" REGEX_ALPHA "|" REGEX_DIGIT ")"
-
-void BuildLanguageTagRegexps(Isolate* isolate) {
-// Define the language tag regexp macros.
-// For info on BCP 47 see https://tools.ietf.org/html/bcp47 .
-// Because language tags are case insensitive per BCP 47 2.1.1 and regexp's
-// defined below will always be used after lowercasing the input, uppercase
-// ranges in BCP 47 2.1 are dropped and grandfathered tags are all lowercased.
-// clang-format off
-#define BCP47_REGULAR \
- "(?:art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|" \
- "zh-min|zh-min-nan|zh-xiang)"
-#define BCP47_IRREGULAR \
- "(?:en-gb-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|" \
- "i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|" \
- "i-tsu|sgn-be-fr|sgn-be-nl|sgn-ch-de)"
-#define BCP47_GRANDFATHERED "(?:" BCP47_IRREGULAR "|" BCP47_REGULAR ")"
-#define BCP47_PRIVATE_USE "(?:x(?:-" REGEX_ALPHANUM "{1,8})+)"
-
-#define BCP47_SINGLETON "(?:" REGEX_DIGIT "|" "[a-wy-z])"
-
-#define BCP47_EXTENSION "(?:" BCP47_SINGLETON "(?:-" REGEX_ALPHANUM "{2,8})+)"
-#define BCP47_VARIANT \
- "(?:" REGEX_ALPHANUM "{5,8}" "|" "(?:" REGEX_DIGIT REGEX_ALPHANUM "{3}))"
-
-#define BCP47_REGION "(?:" REGEX_ALPHA "{2}" "|" REGEX_DIGIT "{3})"
-#define BCP47_SCRIPT "(?:" REGEX_ALPHA "{4})"
-#define BCP47_EXT_LANG "(?:" REGEX_ALPHA "{3}(?:-" REGEX_ALPHA "{3}){0,2})"
-#define BCP47_LANGUAGE "(?:" REGEX_ALPHA "{2,3}(?:-" BCP47_EXT_LANG ")?" \
- "|" REGEX_ALPHA "{4}" "|" REGEX_ALPHA "{5,8})"
-#define BCP47_LANG_TAG \
- BCP47_LANGUAGE \
- "(?:-" BCP47_SCRIPT ")?" \
- "(?:-" BCP47_REGION ")?" \
- "(?:-" BCP47_VARIANT ")*" \
- "(?:-" BCP47_EXTENSION ")*" \
- "(?:-" BCP47_PRIVATE_USE ")?"
- // clang-format on
-
- constexpr char kLanguageTagSingletonRegexp[] = "^" BCP47_SINGLETON "$";
- constexpr char kLanguageTagVariantRegexp[] = "^" BCP47_VARIANT "$";
- constexpr char kLanguageTagRegexp[] =
- "^(?:" BCP47_LANG_TAG "|" BCP47_PRIVATE_USE "|" BCP47_GRANDFATHERED ")$";
-
- UErrorCode status = U_ZERO_ERROR;
- icu::RegexMatcher* language_singleton_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString(kLanguageTagSingletonRegexp, -1, US_INV), 0, status);
- icu::RegexMatcher* language_tag_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString(kLanguageTagRegexp, -1, US_INV), 0, status);
- icu::RegexMatcher* language_variant_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString(kLanguageTagVariantRegexp, -1, US_INV), 0, status);
- CHECK(U_SUCCESS(status));
-
- isolate->set_language_tag_regexp_matchers(language_singleton_regexp_matcher,
- language_tag_regexp_matcher,
- language_variant_regexp_matcher);
-// Undefine the language tag regexp macros.
-#undef BCP47_EXTENSION
-#undef BCP47_EXT_LANG
-#undef BCP47_GRANDFATHERED
-#undef BCP47_IRREGULAR
-#undef BCP47_LANG_TAG
-#undef BCP47_LANGUAGE
-#undef BCP47_PRIVATE_USE
-#undef BCP47_REGION
-#undef BCP47_REGULAR
-#undef BCP47_SCRIPT
-#undef BCP47_SINGLETON
-#undef BCP47_VARIANT
-}
-
-// Undefine the general regexp macros.
-#undef REGEX_ALPHA
-#undef REGEX_DIGIT
-#undef REGEX_ALPHANUM
-
-icu::RegexMatcher* GetLanguageSingletonRegexMatcher(Isolate* isolate) {
- icu::RegexMatcher* language_singleton_regexp_matcher =
- isolate->language_singleton_regexp_matcher();
- if (language_singleton_regexp_matcher == nullptr) {
- BuildLanguageTagRegexps(isolate);
- language_singleton_regexp_matcher =
- isolate->language_singleton_regexp_matcher();
- }
- return language_singleton_regexp_matcher;
-}
-
-icu::RegexMatcher* GetLanguageTagRegexMatcher(Isolate* isolate) {
- icu::RegexMatcher* language_tag_regexp_matcher =
- isolate->language_tag_regexp_matcher();
- if (language_tag_regexp_matcher == nullptr) {
- BuildLanguageTagRegexps(isolate);
- language_tag_regexp_matcher = isolate->language_tag_regexp_matcher();
- }
- return language_tag_regexp_matcher;
-}
-
-icu::RegexMatcher* GetLanguageVariantRegexMatcher(Isolate* isolate) {
- icu::RegexMatcher* language_variant_regexp_matcher =
- isolate->language_variant_regexp_matcher();
- if (language_variant_regexp_matcher == nullptr) {
- BuildLanguageTagRegexps(isolate);
- language_variant_regexp_matcher =
- isolate->language_variant_regexp_matcher();
- }
- return language_variant_regexp_matcher;
-}
-#endif // USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
-
-} // anonymous namespace
-
-MaybeHandle<JSObject> Intl::ResolveLocale(Isolate* isolate, const char* service,
- Handle<Object> requestedLocales,
- Handle<Object> options) {
- Handle<String> service_str =
- isolate->factory()->NewStringFromAsciiChecked(service);
-
- Handle<JSFunction> resolve_locale_function = isolate->resolve_locale();
-
- Handle<Object> result;
- Handle<Object> undefined_value = isolate->factory()->undefined_value();
- Handle<Object> args[] = {service_str, requestedLocales, options};
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, resolve_locale_function, undefined_value,
- arraysize(args), args),
- JSObject);
-
- return Handle<JSObject>::cast(result);
-}
-
-MaybeHandle<JSObject> Intl::CanonicalizeLocaleListJS(Isolate* isolate,
- Handle<Object> locales) {
- Handle<JSFunction> canonicalize_locale_list_function =
- isolate->canonicalize_locale_list();
-
- Handle<Object> result;
- Handle<Object> undefined_value = isolate->factory()->undefined_value();
- Handle<Object> args[] = {locales};
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, canonicalize_locale_list_function,
- undefined_value, arraysize(args), args),
- JSObject);
-
- return Handle<JSObject>::cast(result);
-}
-
Maybe<bool> Intl::GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
const char* property,
std::vector<const char*> values,
@@ -618,111 +679,6 @@ char AsciiToLower(char c) {
return c | (1 << 5);
}
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
-/**
- * Check the structural Validity of the language tag per ECMA 402 6.2.2:
- * - Well-formed per RFC 5646 2.1
- * - There are no duplicate variant subtags
- * - There are no duplicate singleton (extension) subtags
- *
- * One extra-check is done (from RFC 5646 2.2.9): the tag is compared
- * against the list of grandfathered tags. However, subtags for
- * primary/extended language, script, region, variant are not checked
- * against the IANA language subtag registry.
- *
- * ICU 62 or earlier is too permissible and lets invalid tags, like
- * hant-cmn-cn, through.
- *
- * Returns false if the language tag is invalid.
- */
-bool IsStructurallyValidLanguageTag(Isolate* isolate,
- const std::string& locale_in) {
- if (!String::IsAscii(locale_in.c_str(),
- static_cast<int>(locale_in.length()))) {
- return false;
- }
- std::string locale(locale_in);
- icu::RegexMatcher* language_tag_regexp_matcher =
- GetLanguageTagRegexMatcher(isolate);
-
- // Check if it's well-formed, including grandfathered tags.
- icu::UnicodeString locale_uni(locale.c_str(), -1, US_INV);
- // Note: icu::RegexMatcher::reset does not make a copy of the input string
- // so cannot use a temp value; ie: cannot create it as a call parameter.
- language_tag_regexp_matcher->reset(locale_uni);
- UErrorCode status = U_ZERO_ERROR;
- bool is_valid_lang_tag = language_tag_regexp_matcher->matches(status);
- if (!is_valid_lang_tag || V8_UNLIKELY(U_FAILURE(status))) {
- return false;
- }
-
- // Just return if it's a x- form. It's all private.
- if (locale.find("x-") == 0) {
- return true;
- }
-
- // Check if there are any duplicate variants or singletons (extensions).
-
- // Remove private use section.
- locale = locale.substr(0, locale.find("-x-"));
-
- // Skip language since it can match variant regex, so we start from 1.
- // We are matching i-klingon here, but that's ok, since i-klingon-klingon
- // is not valid and would fail LANGUAGE_TAG_RE test.
- size_t pos = 0;
- std::vector<std::string> parts;
- while ((pos = locale.find('-')) != std::string::npos) {
- std::string token = locale.substr(0, pos);
- parts.push_back(token);
- locale = locale.substr(pos + 1);
- }
- if (locale.length() != 0) {
- parts.push_back(locale);
- }
-
- icu::RegexMatcher* language_variant_regexp_matcher =
- GetLanguageVariantRegexMatcher(isolate);
-
- icu::RegexMatcher* language_singleton_regexp_matcher =
- GetLanguageSingletonRegexMatcher(isolate);
-
- std::vector<std::string> variants;
- std::vector<std::string> extensions;
- for (auto it = parts.begin() + 1; it != parts.end(); it++) {
- icu::UnicodeString part(it->data(), -1, US_INV);
- language_variant_regexp_matcher->reset(part);
- bool is_language_variant = language_variant_regexp_matcher->matches(status);
- if (V8_UNLIKELY(U_FAILURE(status))) {
- return false;
- }
- if (is_language_variant && extensions.size() == 0) {
- if (std::find(variants.begin(), variants.end(), *it) == variants.end()) {
- variants.push_back(*it);
- } else {
- return false;
- }
- }
-
- language_singleton_regexp_matcher->reset(part);
- bool is_language_singleton =
- language_singleton_regexp_matcher->matches(status);
- if (V8_UNLIKELY(U_FAILURE(status))) {
- return false;
- }
- if (is_language_singleton) {
- if (std::find(extensions.begin(), extensions.end(), *it) ==
- extensions.end()) {
- extensions.push_back(*it);
- } else {
- return false;
- }
- }
- }
-
- return true;
-}
-#endif // USE_CHROMIUM_ICU == 0 || U_ICU_VERSION_MAJOR_NUM < 63
-
bool IsLowerAscii(char c) { return c >= 'a' && c < 'z'; }
bool IsTwoLetterLanguage(const std::string& locale) {
@@ -795,15 +751,6 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// the input before any more check.
std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
-#if USE_CHROMIUM_ICU == 0 && U_ICU_VERSION_MAJOR_NUM < 63
- if (!IsStructurallyValidLanguageTag(isolate, locale)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
- Nothing<std::string>());
- }
-#endif
-
// ICU maps a few grandfathered tags to what looks like a regular language
// tag even though IANA language tag registry does not have a preferred
// entry map for them. Return them as they're with lowercasing.
@@ -819,38 +766,26 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// handle long locale names better. See
// https://unicode-org.atlassian.net/browse/ICU-13417
UErrorCode error = U_ZERO_ERROR;
- char icu_result[ULOC_FULLNAME_CAPACITY];
// uloc_forLanguageTag checks the structrual validity. If the input BCP47
// language tag is parsed all the way to the end, it indicates that the input
// is structurally valid. Due to a couple of bugs, we can't use it
// without Chromium patches or ICU 62 or earlier.
- int parsed_length;
- uloc_forLanguageTag(locale.c_str(), icu_result, ULOC_FULLNAME_CAPACITY,
- &parsed_length, &error);
- if (U_FAILURE(error) ||
-#if USE_CHROMIUM_ICU == 1 || U_ICU_VERSION_MAJOR_NUM >= 63
- static_cast<size_t>(parsed_length) < locale.length() ||
-#endif
- error == U_STRING_NOT_TERMINATED_WARNING) {
+ icu::Locale icu_locale = icu::Locale::forLanguageTag(locale.c_str(), error);
+ if (U_FAILURE(error) || icu_locale.isBogus()) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
Nothing<std::string>());
}
-
- // Force strict BCP47 rules.
- char result[ULOC_FULLNAME_CAPACITY];
- int32_t result_len = uloc_toLanguageTag(icu_result, result,
- ULOC_FULLNAME_CAPACITY, TRUE, &error);
-
- if (U_FAILURE(error)) {
+ Maybe<std::string> maybe_to_language_tag = Intl::ToLanguageTag(icu_locale);
+ if (maybe_to_language_tag.IsNothing()) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
Nothing<std::string>());
}
- return Just(std::string(result, result_len));
+ return Intl::ToLanguageTag(icu_locale);
}
Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
@@ -862,7 +797,15 @@ Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
}
// 2. Let seen be a new empty List.
std::vector<std::string> seen;
- // 3. If Type(locales) is String, then
+ // 3. If Type(locales) is String or locales has an [[InitializedLocale]]
+ // internal slot, then
+ if (locales->IsJSLocale()) {
+ // Since this value came from JSLocale, which is already went though the
+ // CanonializeLanguageTag process once, therefore there are no need to
+ // call CanonializeLanguageTag again.
+ seen.push_back(JSLocale::ToString(Handle<JSLocale>::cast(locales)));
+ return Just(seen);
+ }
if (locales->IsString()) {
// 3a. Let O be CreateArrayFromList(« locales »).
// Instead of creating a one-element array and then iterating over it,
@@ -898,21 +841,31 @@ Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
// 7a. Let Pk be ToString(k).
// 7b. Let kPresent be ? HasProperty(O, Pk).
LookupIterator it(isolate, o, k);
+ Maybe<bool> maybe_found = JSReceiver::HasProperty(&it);
+ MAYBE_RETURN(maybe_found, Nothing<std::vector<std::string>>());
// 7c. If kPresent is true, then
- if (!it.IsFound()) continue;
+ if (!maybe_found.FromJust()) continue;
// 7c i. Let kValue be ? Get(O, Pk).
Handle<Object> k_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, k_value, Object::GetProperty(&it),
Nothing<std::vector<std::string>>());
// 7c ii. If Type(kValue) is not String or Object, throw a TypeError
// exception.
- // 7c iii. Let tag be ? ToString(kValue).
- // 7c iv. If IsStructurallyValidLanguageTag(tag) is false, throw a
- // RangeError exception.
- // 7c v. Let canonicalizedTag be CanonicalizeLanguageTag(tag).
+ // 7c iii. If Type(kValue) is Object and kValue has an [[InitializedLocale]]
+ // internal slot, then
std::string canonicalized_tag;
- if (!CanonicalizeLanguageTag(isolate, k_value).To(&canonicalized_tag)) {
- return Nothing<std::vector<std::string>>();
+ if (k_value->IsJSLocale()) {
+ // 7c iii. 1. Let tag be kValue.[[Locale]].
+ canonicalized_tag = JSLocale::ToString(Handle<JSLocale>::cast(k_value));
+ // 7c iv. Else,
+ } else {
+ // 7c iv 1. Let tag be ? ToString(kValue).
+ // 7c v. If IsStructurallyValidLanguageTag(tag) is false, throw a
+ // RangeError exception.
+ // 7c vi. Let canonicalizedTag be CanonicalizeLanguageTag(tag).
+ if (!CanonicalizeLanguageTag(isolate, k_value).To(&canonicalized_tag)) {
+ return Nothing<std::vector<std::string>>();
+ }
}
// 7c vi. If canonicalizedTag is not an element of seen, append
// canonicalizedTag as the last element of seen.
@@ -938,7 +891,7 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
return MaybeHandle<String>();
}
std::string requested_locale = requested_locales.size() == 0
- ? Intl::DefaultLocale(isolate)
+ ? DefaultLocale(isolate)
: requested_locales[0];
size_t dash = requested_locale.find('-');
if (dash != std::string::npos) {
@@ -955,7 +908,10 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
// tags (x-foo) or grandfathered irregular tags (e.g. i-enochian) would have
// only 'x' or 'i' when they get here.
if (V8_UNLIKELY(requested_locale.length() != 2)) {
- return ConvertCase(s, to_upper, isolate);
+ if (to_upper) {
+ return ConvertToUpper(isolate, s);
+ }
+ return ConvertToLower(isolate, s);
}
// TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
// in the root locale needs to be adjusted for az, lt and tr because even case
@@ -963,9 +919,12 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
// Greek (el) does not require any adjustment.
if (V8_UNLIKELY((requested_locale == "tr") || (requested_locale == "el") ||
(requested_locale == "lt") || (requested_locale == "az"))) {
- return LocaleConvertCase(s, isolate, to_upper, requested_locale.c_str());
+ return LocaleConvertCase(isolate, s, to_upper, requested_locale.c_str());
} else {
- return ConvertCase(s, to_upper, isolate);
+ if (to_upper) {
+ return ConvertToUpper(isolate, s);
+ }
+ return ConvertToLower(isolate, s);
}
}
@@ -974,46 +933,57 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
Handle<String> string2,
Handle<Object> locales,
Handle<Object> options) {
- Factory* factory = isolate->factory();
- Handle<JSObject> collator;
+ // We only cache the instance when both locales and options are undefined,
+ // as that is the only case when the specified side-effects of examining
+ // those arguments are unobservable.
+ bool can_cache =
+ locales->IsUndefined(isolate) && options->IsUndefined(isolate);
+ if (can_cache) {
+ // Both locales and options are undefined, check the cache.
+ icu::Collator* cached_icu_collator =
+ static_cast<icu::Collator*>(isolate->get_cached_icu_object(
+ Isolate::ICUObjectCacheType::kDefaultCollator));
+ // We may use the cached icu::Collator for a fast path.
+ if (cached_icu_collator != nullptr) {
+ return Intl::CompareStrings(isolate, *cached_icu_collator, string1,
+ string2);
+ }
+ }
+
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(
+ isolate->context()->native_context()->intl_collator_function()),
+ isolate);
+
+ Handle<JSCollator> collator;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, collator,
- CachedOrNewService(isolate, factory->NewStringFromStaticChars("collator"),
- locales, options, factory->undefined_value()),
- Object);
- CHECK(collator->IsJSCollator());
- return Intl::CompareStrings(isolate, Handle<JSCollator>::cast(collator),
- string1, string2);
+ New<JSCollator>(isolate, constructor, locales, options), Object);
+ if (can_cache) {
+ isolate->set_icu_object_in_cache(
+ Isolate::ICUObjectCacheType::kDefaultCollator,
+ std::static_pointer_cast<icu::UObject>(
+ collator->icu_collator()->get()));
+ }
+ icu::Collator* icu_collator = collator->icu_collator()->raw();
+ return Intl::CompareStrings(isolate, *icu_collator, string1, string2);
}
// ecma402/#sec-collator-comparestrings
Handle<Object> Intl::CompareStrings(Isolate* isolate,
- Handle<JSCollator> collator,
+ const icu::Collator& icu_collator,
Handle<String> string1,
Handle<String> string2) {
Factory* factory = isolate->factory();
- icu::Collator* icu_collator = collator->icu_collator()->raw();
- CHECK_NOT_NULL(icu_collator);
string1 = String::Flatten(isolate, string1);
string2 = String::Flatten(isolate, string2);
UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
- {
- DisallowHeapAllocation no_gc;
- int32_t length1 = string1->length();
- int32_t length2 = string2->length();
- String::FlatContent flat1 = string1->GetFlatContent();
- String::FlatContent flat2 = string2->GetFlatContent();
- std::unique_ptr<uc16[]> sap1;
- std::unique_ptr<uc16[]> sap2;
- icu::UnicodeString string_val1(
- FALSE, GetUCharBufferFromFlat(flat1, &sap1, length1), length1);
- icu::UnicodeString string_val2(
- FALSE, GetUCharBufferFromFlat(flat2, &sap2, length2), length2);
- result = icu_collator->compare(string_val1, string_val2, status);
- }
+ icu::UnicodeString string_val1 = Intl::ToICUUnicodeString(isolate, string1);
+ icu::UnicodeString string_val2 = Intl::ToICUUnicodeString(isolate, string2);
+ result = icu_collator.compare(string_val1, string_val2, status);
DCHECK(U_SUCCESS(status));
return factory->NewNumberFromInt(result);
@@ -1024,28 +994,49 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<Object> num,
Handle<Object> locales,
Handle<Object> options) {
- Factory* factory = isolate->factory();
- Handle<JSObject> number_format_holder;
- // 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, number_format_holder,
- CachedOrNewService(isolate,
- factory->NewStringFromStaticChars("numberformat"),
- locales, options, factory->undefined_value()),
- String);
- DCHECK(number_format_holder->IsJSNumberFormat());
- Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>(
- JSNumberFormat::cast(*number_format_holder), isolate);
-
Handle<Object> number_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate, number_obj,
Object::ToNumber(isolate, num), String);
- // Spec treats -0 and +0 as 0.
- double number = number_obj->Number() + 0;
+ double number = number_obj->Number();
+
+ // We only cache the instance when both locales and options are undefined,
+ // as that is the only case when the specified side-effects of examining
+ // those arguments are unobservable.
+ bool can_cache =
+ locales->IsUndefined(isolate) && options->IsUndefined(isolate);
+ if (can_cache) {
+ icu::NumberFormat* cached_number_format =
+ static_cast<icu::NumberFormat*>(isolate->get_cached_icu_object(
+ Isolate::ICUObjectCacheType::kDefaultNumberFormat));
+ // We may use the cached icu::NumberFormat for a fast path.
+ if (cached_number_format != nullptr) {
+ return JSNumberFormat::FormatNumber(isolate, *cached_number_format,
+ number);
+ }
+ }
+
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(
+ isolate->context()->native_context()->intl_number_format_function()),
+ isolate);
+ Handle<JSNumberFormat> number_format;
+ // 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, number_format,
+ New<JSNumberFormat>(isolate, constructor, locales, options), String);
+
+ if (can_cache) {
+ isolate->set_icu_object_in_cache(
+ Isolate::ICUObjectCacheType::kDefaultNumberFormat,
+ std::static_pointer_cast<icu::UObject>(
+ number_format->icu_number_format()->get()));
+ }
// Return FormatNumber(numberFormat, x).
- return JSNumberFormat::FormatNumber(isolate, number_format, number);
+ icu::NumberFormat* icu_number_format =
+ number_format->icu_number_format()->raw();
+ return JSNumberFormat::FormatNumber(isolate, *icu_number_format, number);
}
namespace {
@@ -1232,10 +1223,19 @@ std::string BestAvailableLocale(const std::set<std::string>& available_locales,
}
}
-// Removes unicode extensions from a given bcp47 language tag.
-// For example, converts 'en-US-u-co-emoji' to 'en-US'.
-std::string RemoveUnicodeExtensions(const std::string& locale) {
+struct ParsedLocale {
+ std::string no_extensions_locale;
+ std::string extension;
+};
+
+// Returns a struct containing a bcp47 tag without unicode extensions
+// and the removed unicode extensions.
+//
+// For example, given 'en-US-u-co-emoji' returns 'en-US' and
+// 'u-co-emoji'.
+ParsedLocale ParseBCP47Locale(const std::string& locale) {
size_t length = locale.length();
+ ParsedLocale parsed_locale;
// Privateuse or grandfathered locales have no extension sequences.
if ((length > 1) && (locale[1] == '-')) {
@@ -1243,20 +1243,25 @@ std::string RemoveUnicodeExtensions(const std::string& locale) {
// privateuse extension. ICU can sometimes mess up the
// canonicalization.
CHECK(locale[0] == 'x' || locale[0] == 'i');
- return locale;
+ parsed_locale.no_extensions_locale = locale;
+ return parsed_locale;
}
size_t unicode_extension_start = locale.find("-u-");
// No unicode extensions found.
- if (unicode_extension_start == std::string::npos) return locale;
+ if (unicode_extension_start == std::string::npos) {
+ parsed_locale.no_extensions_locale = locale;
+ return parsed_locale;
+ }
size_t private_extension_start = locale.find("-x-");
// Unicode extensions found within privateuse subtags don't count.
if (private_extension_start != std::string::npos &&
private_extension_start < unicode_extension_start) {
- return locale;
+ parsed_locale.no_extensions_locale = locale;
+ return parsed_locale;
}
const std::string beginning = locale.substr(0, unicode_extension_start);
@@ -1277,7 +1282,10 @@ std::string RemoveUnicodeExtensions(const std::string& locale) {
}
const std::string end = locale.substr(unicode_extension_end);
- return beginning + end;
+ parsed_locale.no_extensions_locale = beginning + end;
+ parsed_locale.extension = locale.substr(
+ unicode_extension_start, unicode_extension_end - unicode_extension_start);
+ return parsed_locale;
}
// ecma402/#sec-lookupsupportedlocales
@@ -1291,7 +1299,8 @@ std::vector<std::string> LookupSupportedLocales(
for (const std::string& locale : requested_locales) {
// 2. a. Let noExtensionsLocale be the String value that is locale
// with all Unicode locale extension sequences removed.
- std::string no_extension_locale = RemoveUnicodeExtensions(locale);
+ std::string no_extension_locale =
+ ParseBCP47Locale(locale).no_extensions_locale;
// 2. b. Let availableLocale be
// BestAvailableLocale(availableLocales, noExtensionsLocale).
@@ -1317,56 +1326,38 @@ std::vector<std::string> BestFitSupportedLocales(
return LookupSupportedLocales(available_locales, requested_locales);
}
-enum MatcherOption { kBestFit, kLookup };
-
-// TODO(bstell): should this be moved somewhere where it is reusable?
-// Implement steps 5, 6, 7 for ECMA 402 9.2.9 SupportedLocales
-// https://tc39.github.io/ecma402/#sec-supportedlocales
-MaybeHandle<JSObject> CreateReadOnlyArray(Isolate* isolate,
- std::vector<std::string> elements) {
+// ecma262 #sec-createarrayfromlist
+Handle<JSArray> CreateArrayFromList(Isolate* isolate,
+ std::vector<std::string> elements,
+ PropertyAttributes attr) {
Factory* factory = isolate->factory();
- if (elements.size() >= kMaxUInt32) {
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayLength), JSObject);
- }
-
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-
- // 5. Let subset be CreateArrayFromList(elements).
- // 6. Let keys be subset.[[OwnPropertyKeys]]().
- Handle<JSArray> subset = factory->NewJSArray(0);
+ // Let array be ! ArrayCreate(0).
+ Handle<JSArray> array = factory->NewJSArray(0);
- // 7. For each element P of keys in List order, do
uint32_t length = static_cast<uint32_t>(elements.size());
+ // 3. Let n be 0.
+ // 4. For each element e of elements, do
for (uint32_t i = 0; i < length; i++) {
+ // a. Let status be CreateDataProperty(array, ! ToString(n), e).
const std::string& part = elements[i];
Handle<String> value =
factory->NewStringFromUtf8(CStrVector(part.c_str())).ToHandleChecked();
- JSObject::AddDataElement(subset, i, value, attr);
+ JSObject::AddDataElement(array, i, value, attr);
}
-
- // 7.a. Let desc be PropertyDescriptor { [[Configurable]]: false,
- // [[Writable]]: false }.
- PropertyDescriptor desc;
- desc.set_writable(false);
- desc.set_configurable(false);
-
- // 7.b. Perform ! DefinePropertyOrThrow(subset, P, desc).
- JSArray::ArraySetLength(isolate, subset, &desc, kThrowOnError).ToChecked();
- return subset;
+ // 5. Return array.
+ return array;
}
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
// https://tc39.github.io/ecma402/#sec-supportedlocales
MaybeHandle<JSObject> SupportedLocales(
- Isolate* isolate, ICUService service,
+ Isolate* isolate, const char* method,
const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales, Handle<Object> options) {
std::vector<std::string> supported_locales;
// 2. Else, let matcher be "best fit".
- MatcherOption matcher = kBestFit;
+ Intl::MatcherOption matcher = Intl::MatcherOption::kBestFit;
// 1. If options is not undefined, then
if (!options->IsUndefined(isolate)) {
@@ -1377,60 +1368,53 @@ MaybeHandle<JSObject> SupportedLocales(
// 1. b. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
- std::unique_ptr<char[]> matcher_str = nullptr;
- std::vector<const char*> matcher_values = {"lookup", "best fit"};
- Maybe<bool> maybe_found_matcher = Intl::GetStringOption(
- isolate, options_obj, "localeMatcher", matcher_values,
- ICUServiceToString(service), &matcher_str);
- MAYBE_RETURN(maybe_found_matcher, MaybeHandle<JSObject>());
- if (maybe_found_matcher.FromJust()) {
- DCHECK_NOT_NULL(matcher_str.get());
- if (strcmp(matcher_str.get(), "lookup") == 0) {
- matcher = kLookup;
- }
- }
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options_obj, method);
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSObject>());
+ matcher = maybe_locale_matcher.FromJust();
}
// 3. If matcher is "best fit", then
// a. Let supportedLocales be BestFitSupportedLocales(availableLocales,
// requestedLocales).
- if (matcher == kBestFit) {
+ if (matcher == Intl::MatcherOption::kBestFit) {
supported_locales =
BestFitSupportedLocales(available_locales, requested_locales);
} else {
// 4. Else,
// a. Let supportedLocales be LookupSupportedLocales(availableLocales,
// requestedLocales).
- DCHECK_EQ(matcher, kLookup);
+ DCHECK_EQ(matcher, Intl::MatcherOption::kLookup);
supported_locales =
LookupSupportedLocales(available_locales, requested_locales);
}
- // TODO(jkummerow): Possibly revisit why the spec has the individual entries
- // readonly but the array is not frozen.
- // https://github.com/tc39/ecma402/issues/258
+ // 5. Return CreateArrayFromList(supportedLocales).
+ PropertyAttributes attr = static_cast<PropertyAttributes>(NONE);
+ return CreateArrayFromList(isolate, supported_locales, attr);
+}
- // 5. Let subset be CreateArrayFromList(supportedLocales).
- // 6. Let keys be subset.[[OwnPropertyKeys]]().
- // 7. For each element P of keys in List order, do
- // a. Let desc be PropertyDescriptor { [[Configurable]]: false,
- // [[Writable]]: false }.
- // b. Perform ! DefinePropertyOrThrow(subset, P, desc).
- MaybeHandle<JSObject> subset =
- CreateReadOnlyArray(isolate, supported_locales);
+} // namespace
- // 8. Return subset.
- return subset;
+// ecma-402 #sec-intl.getcanonicallocales
+MaybeHandle<JSArray> Intl::GetCanonicalLocales(Isolate* isolate,
+ Handle<Object> locales) {
+ // 1. Let ll be ? CanonicalizeLocaleList(locales).
+ Maybe<std::vector<std::string>> maybe_ll =
+ CanonicalizeLocaleList(isolate, locales, false);
+ MAYBE_RETURN(maybe_ll, MaybeHandle<JSArray>());
+
+ // 2. Return CreateArrayFromList(ll).
+ PropertyAttributes attr = static_cast<PropertyAttributes>(NONE);
+ return CreateArrayFromList(isolate, maybe_ll.FromJust(), attr);
}
-} // namespace
// ECMA 402 Intl.*.supportedLocalesOf
-MaybeHandle<JSObject> Intl::SupportedLocalesOf(Isolate* isolate,
- ICUService service,
- Handle<Object> locales,
- Handle<Object> options) {
+MaybeHandle<JSObject> Intl::SupportedLocalesOf(
+ Isolate* isolate, const char* method,
+ const std::set<std::string>& available_locales, Handle<Object> locales,
+ Handle<Object> options) {
// Let availableLocales be %Collator%.[[AvailableLocales]].
- std::set<std::string> available_locales = GetAvailableLocales(service);
// Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> requested_locales =
@@ -1438,17 +1422,57 @@ MaybeHandle<JSObject> Intl::SupportedLocalesOf(Isolate* isolate,
MAYBE_RETURN(requested_locales, MaybeHandle<JSObject>());
// Return ? SupportedLocales(availableLocales, requestedLocales, options).
- return SupportedLocales(isolate, service, available_locales,
+ return SupportedLocales(isolate, method, available_locales,
requested_locales.FromJust(), options);
}
-std::map<std::string, std::string> Intl::LookupUnicodeExtensions(
- const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys) {
+namespace {
+template <typename T>
+bool IsValidExtension(const icu::Locale& locale, const char* key,
+ const std::string& value) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ T::getKeywordValuesForLocale(key, icu::Locale(locale.getBaseName()),
+ false, status));
+ if (U_SUCCESS(status)) {
+ int32_t length;
+ std::string legacy_type(uloc_toLegacyType(key, value.c_str()));
+ for (const char* item = enumeration->next(&length, status); item != nullptr;
+ item = enumeration->next(&length, status)) {
+ if (U_SUCCESS(status) && legacy_type == item) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool IsValidCalendar(const icu::Locale& locale, const std::string& value) {
+ return IsValidExtension<icu::Calendar>(locale, "calendar", value);
+}
+
+bool IsValidCollation(const icu::Locale& locale, const std::string& value) {
+ std::set<std::string> invalid_values = {"standard", "search"};
+ if (invalid_values.find(value) != invalid_values.end()) return false;
+ return IsValidExtension<icu::Collator>(locale, "collation", value);
+}
+
+bool IsValidNumberingSystem(const std::string& value) {
+ std::set<std::string> invalid_values = {"native", "traditio", "finance"};
+ if (invalid_values.find(value) != invalid_values.end()) return false;
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstanceByName(value.c_str(), status));
+ return U_SUCCESS(status) && numbering_system.get() != nullptr;
+}
+
+std::map<std::string, std::string> LookupAndValidateUnicodeExtensions(
+ icu::Locale* icu_locale, const std::set<std::string>& relevant_keys) {
std::map<std::string, std::string> extensions;
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> keywords(
- icu_locale.createKeywords(status));
+ icu_locale->createKeywords(status));
if (U_FAILURE(status)) return extensions;
if (!keywords) return extensions;
@@ -1466,7 +1490,7 @@ std::map<std::string, std::string> Intl::LookupUnicodeExtensions(
continue;
}
- icu_locale.getKeywordValue(keyword, value, ULOC_FULLNAME_CAPACITY, status);
+ icu_locale->getKeywordValue(keyword, value, ULOC_FULLNAME_CAPACITY, status);
// Ignore failures in ICU and skip to the next keyword.
//
@@ -1478,16 +1502,345 @@ std::map<std::string, std::string> Intl::LookupUnicodeExtensions(
const char* bcp47_key = uloc_toUnicodeLocaleKey(keyword);
- // Ignore keywords that we don't recognize - spec allows that.
if (bcp47_key && (relevant_keys.find(bcp47_key) != relevant_keys.end())) {
const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
- extensions.insert(
- std::pair<std::string, std::string>(bcp47_key, bcp47_value));
+ bool is_valid_value = false;
+ // 8.h.ii.1.a If keyLocaleData contains requestedValue, then
+ if (strcmp("ca", bcp47_key) == 0) {
+ is_valid_value = IsValidCalendar(*icu_locale, bcp47_value);
+ } else if (strcmp("co", bcp47_key) == 0) {
+ is_valid_value = IsValidCollation(*icu_locale, bcp47_value);
+ } else if (strcmp("hc", bcp47_key) == 0) {
+ // https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+ std::set<std::string> valid_values = {"h11", "h12", "h23", "h24"};
+ is_valid_value = valid_values.find(bcp47_value) != valid_values.end();
+ } else if (strcmp("lb", bcp47_key) == 0) {
+ // https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/segmentation.xml
+ std::set<std::string> valid_values = {"strict", "normal", "loose"};
+ is_valid_value = valid_values.find(bcp47_value) != valid_values.end();
+ } else if (strcmp("kn", bcp47_key) == 0) {
+ // https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/collation.xml
+ std::set<std::string> valid_values = {"true", "false"};
+ is_valid_value = valid_values.find(bcp47_value) != valid_values.end();
+ } else if (strcmp("kf", bcp47_key) == 0) {
+ // https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/collation.xml
+ std::set<std::string> valid_values = {"upper", "lower", "false"};
+ is_valid_value = valid_values.find(bcp47_value) != valid_values.end();
+ } else if (strcmp("nu", bcp47_key) == 0) {
+ is_valid_value = IsValidNumberingSystem(bcp47_value);
+ }
+ if (is_valid_value) {
+ extensions.insert(
+ std::pair<std::string, std::string>(bcp47_key, bcp47_value));
+ continue;
+ }
}
+ status = U_ZERO_ERROR;
+ icu_locale->setKeywordValue(keyword, nullptr, status);
+ CHECK(U_SUCCESS(status));
}
return extensions;
}
+// ecma402/#sec-lookupmatcher
+std::string LookupMatcher(Isolate* isolate,
+ const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales) {
+ // 1. Let result be a new Record.
+ std::string result;
+
+ // 2. For each element locale of requestedLocales in List order, do
+ for (const std::string& locale : requested_locales) {
+ // 2. a. Let noExtensionsLocale be the String value that is locale
+ // with all Unicode locale extension sequences removed.
+ ParsedLocale parsed_locale = ParseBCP47Locale(locale);
+ std::string no_extensions_locale = parsed_locale.no_extensions_locale;
+
+ // 2. b. Let availableLocale be
+ // BestAvailableLocale(availableLocales, noExtensionsLocale).
+ std::string available_locale =
+ BestAvailableLocale(available_locales, no_extensions_locale);
+
+ // 2. c. If availableLocale is not undefined, append locale to the
+ // end of subset.
+ if (!available_locale.empty()) {
+ // Note: The following steps are not performed here because we
+ // can use ICU to parse the unicode locale extension sequence
+ // as part of Intl::ResolveLocale.
+ //
+ // There's no need to separate the unicode locale extensions
+ // right here. Instead just return the available locale with the
+ // extensions.
+ //
+ // 2. c. i. Set result.[[locale]] to availableLocale.
+ // 2. c. ii. If locale and noExtensionsLocale are not the same
+ // String value, then
+ // 2. c. ii. 1. Let extension be the String value consisting of
+ // the first substring of locale that is a Unicode locale
+ // extension sequence.
+ // 2. c. ii. 2. Set result.[[extension]] to extension.
+ // 2. c. iii. Return result.
+ return available_locale + parsed_locale.extension;
+ }
+ }
+
+ // 3. Let defLocale be DefaultLocale();
+ // 4. Set result.[[locale]] to defLocale.
+ // 5. Return result.
+ return DefaultLocale(isolate);
+}
+
+} // namespace
+
+// This function doesn't correspond exactly with the spec. Instead
+// we use ICU to do all the string manipulations that the spec
+// peforms.
+//
+// The spec uses this function to normalize values for various
+// relevant extension keys (such as disallowing "search" for
+// collation). Instead of doing this here, we let the callers of
+// this method perform such normalization.
+//
+// ecma402/#sec-resolvelocale
+Intl::ResolvedLocale Intl::ResolveLocale(
+ Isolate* isolate, const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales, MatcherOption matcher,
+ const std::set<std::string>& relevant_extension_keys) {
+ std::string locale;
+ if (matcher == Intl::MatcherOption::kLookup) {
+ locale = LookupMatcher(isolate, available_locales, requested_locales);
+ } else if (matcher == Intl::MatcherOption::kBestFit) {
+ // TODO(intl): Implement better lookup algorithm.
+ locale = LookupMatcher(isolate, available_locales, requested_locales);
+ }
+
+ icu::Locale icu_locale = CreateICULocale(locale);
+ std::map<std::string, std::string> extensions =
+ LookupAndValidateUnicodeExtensions(&icu_locale, relevant_extension_keys);
+
+ std::string canonicalized_locale = Intl::ToLanguageTag(icu_locale).FromJust();
+
+ // TODO(gsathya): Remove privateuse subtags from extensions.
+
+ return Intl::ResolvedLocale{canonicalized_locale, icu_locale, extensions};
+}
+
+Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
+ Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
+ icu::UnicodeString* u_text =
+ (icu::UnicodeString*)(Intl::ToICUUnicodeString(isolate, text).clone());
+
+ Handle<Managed<icu::UnicodeString>> new_u_text =
+ Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
+
+ break_iterator->setText(*u_text);
+ return *new_u_text;
+}
+
+// ecma262 #sec-string.prototype.normalize
+MaybeHandle<String> Intl::Normalize(Isolate* isolate, Handle<String> string,
+ Handle<Object> form_input) {
+ const char* form_name;
+ UNormalization2Mode form_mode;
+ if (form_input->IsUndefined(isolate)) {
+ // default is FNC
+ form_name = "nfc";
+ form_mode = UNORM2_COMPOSE;
+ } else {
+ Handle<String> form;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, form,
+ Object::ToString(isolate, form_input), String);
+
+ if (String::Equals(isolate, form, isolate->factory()->NFC_string())) {
+ form_name = "nfc";
+ form_mode = UNORM2_COMPOSE;
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFD_string())) {
+ form_name = "nfc";
+ form_mode = UNORM2_DECOMPOSE;
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFKC_string())) {
+ form_name = "nfkc";
+ form_mode = UNORM2_COMPOSE;
+ } else if (String::Equals(isolate, form,
+ isolate->factory()->NFKD_string())) {
+ form_name = "nfkc";
+ form_mode = UNORM2_DECOMPOSE;
+ } else {
+ Handle<String> valid_forms =
+ isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kNormalizationForm, valid_forms),
+ String);
+ }
+ }
+
+ int length = string->length();
+ string = String::Flatten(isolate, string);
+ icu::UnicodeString result;
+ std::unique_ptr<uc16[]> sap;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString input = ToICUUnicodeString(isolate, string);
+ // Getting a singleton. Should not free it.
+ const icu::Normalizer2* normalizer =
+ icu::Normalizer2::getInstance(nullptr, form_name, form_mode, status);
+ DCHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(normalizer);
+ int32_t normalized_prefix_length =
+ normalizer->spanQuickCheckYes(input, status);
+ // Quick return if the input is already normalized.
+ if (length == normalized_prefix_length) return string;
+ icu::UnicodeString unnormalized =
+ input.tempSubString(normalized_prefix_length);
+ // Read-only alias of the normalized prefix.
+ result.setTo(false, input.getBuffer(), normalized_prefix_length);
+ // copy-on-write; normalize the suffix and append to |result|.
+ normalizer->normalizeSecondAndAppend(result, unnormalized, status);
+
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+ }
+
+ return Intl::ToString(isolate, result);
+}
+
+// ICUTimezoneCache calls out to ICU for TimezoneCache
+// functionality in a straightforward way.
+class ICUTimezoneCache : public base::TimezoneCache {
+ public:
+ ICUTimezoneCache() : timezone_(nullptr) { Clear(); }
+
+ ~ICUTimezoneCache() override { Clear(); };
+
+ const char* LocalTimezone(double time_ms) override;
+
+ double DaylightSavingsOffset(double time_ms) override;
+
+ double LocalTimeOffset(double time_ms, bool is_utc) override;
+
+ void Clear() override;
+
+ private:
+ icu::TimeZone* GetTimeZone();
+
+ bool GetOffsets(double time_ms, bool is_utc, int32_t* raw_offset,
+ int32_t* dst_offset);
+
+ icu::TimeZone* timezone_;
+
+ std::string timezone_name_;
+ std::string dst_timezone_name_;
+};
+
+const char* ICUTimezoneCache::LocalTimezone(double time_ms) {
+ bool is_dst = DaylightSavingsOffset(time_ms) != 0;
+ std::string* name = is_dst ? &dst_timezone_name_ : &timezone_name_;
+ if (name->empty()) {
+ icu::UnicodeString result;
+ GetTimeZone()->getDisplayName(is_dst, icu::TimeZone::LONG, result);
+ result += '\0';
+
+ icu::StringByteSink<std::string> byte_sink(name);
+ result.toUTF8(byte_sink);
+ }
+ DCHECK(!name->empty());
+ return name->c_str();
+}
+
+icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
+ if (timezone_ == nullptr) {
+ timezone_ = icu::TimeZone::createDefault();
+ }
+ return timezone_;
+}
+
+bool ICUTimezoneCache::GetOffsets(double time_ms, bool is_utc,
+ int32_t* raw_offset, int32_t* dst_offset) {
+ UErrorCode status = U_ZERO_ERROR;
+ // TODO(jshin): ICU TimeZone class handles skipped time differently from
+ // Ecma 262 (https://github.com/tc39/ecma262/pull/778) and icu::TimeZone
+ // class does not expose the necessary API. Fixing
+ // http://bugs.icu-project.org/trac/ticket/13268 would make it easy to
+ // implement the proposed spec change. A proposed fix for ICU is
+ // https://chromium-review.googlesource.com/851265 .
+ // In the meantime, use an internal (still public) API of icu::BasicTimeZone.
+ // Once it's accepted by the upstream, get rid of cast. Note that casting
+ // TimeZone to BasicTimeZone is safe because we know that icu::TimeZone used
+ // here is a BasicTimeZone.
+ if (is_utc) {
+ GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
+ } else {
+ static_cast<const icu::BasicTimeZone*>(GetTimeZone())
+ ->getOffsetFromLocal(time_ms, icu::BasicTimeZone::kFormer,
+ icu::BasicTimeZone::kFormer, *raw_offset,
+ *dst_offset, status);
+ }
+
+ return U_SUCCESS(status);
+}
+
+double ICUTimezoneCache::DaylightSavingsOffset(double time_ms) {
+ int32_t raw_offset, dst_offset;
+ if (!GetOffsets(time_ms, true, &raw_offset, &dst_offset)) return 0;
+ return dst_offset;
+}
+
+double ICUTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
+ int32_t raw_offset, dst_offset;
+ if (!GetOffsets(time_ms, is_utc, &raw_offset, &dst_offset)) return 0;
+ return raw_offset + dst_offset;
+}
+
+void ICUTimezoneCache::Clear() {
+ delete timezone_;
+ timezone_ = nullptr;
+ timezone_name_.clear();
+ dst_timezone_name_.clear();
+}
+
+base::TimezoneCache* Intl::CreateTimeZoneCache() {
+ return FLAG_icu_timezone_data ? new ICUTimezoneCache()
+ : base::OS::CreateTimezoneCache();
+}
+
+Maybe<Intl::CaseFirst> Intl::GetCaseFirst(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method) {
+ return Intl::GetStringOption<Intl::CaseFirst>(
+ isolate, options, "caseFirst", method, {"upper", "lower", "false"},
+ {Intl::CaseFirst::kUpper, Intl::CaseFirst::kLower,
+ Intl::CaseFirst::kFalse},
+ Intl::CaseFirst::kUndefined);
+}
+
+Maybe<Intl::HourCycle> Intl::GetHourCycle(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method) {
+ return Intl::GetStringOption<Intl::HourCycle>(
+ isolate, options, "hourCycle", method, {"h11", "h12", "h23", "h24"},
+ {Intl::HourCycle::kH11, Intl::HourCycle::kH12, Intl::HourCycle::kH23,
+ Intl::HourCycle::kH24},
+ Intl::HourCycle::kUndefined);
+}
+
+Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method) {
+ return Intl::GetStringOption<Intl::MatcherOption>(
+ isolate, options, "localeMatcher", method, {"best fit", "lookup"},
+ {Intl::MatcherOption::kLookup, Intl::MatcherOption::kBestFit},
+ Intl::MatcherOption::kLookup);
+}
+
+Intl::HourCycle Intl::ToHourCycle(const std::string& hc) {
+ if (hc == "h11") return Intl::HourCycle::kH11;
+ if (hc == "h12") return Intl::HourCycle::kH12;
+ if (hc == "h23") return Intl::HourCycle::kH23;
+ if (hc == "h24") return Intl::HourCycle::kH24;
+ return Intl::HourCycle::kUndefined;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index fd2842ebbb..4165ec4a77 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -13,13 +13,18 @@
#include <set>
#include <string>
+#include "src/base/timezone-cache.h"
#include "src/contexts.h"
-#include "src/intl.h"
#include "src/objects.h"
+#include "src/objects/managed.h"
#include "unicode/locid.h"
#include "unicode/uversion.h"
+#define V8_MINIMUM_ICU_VERSION 63
+
namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class Collator;
class DecimalFormat;
class SimpleDateFormat;
class UnicodeString;
@@ -34,38 +39,19 @@ class JSCollator;
class Intl {
public:
- enum Type {
- kNumberFormat = 0,
- kCollator,
- kDateTimeFormat,
- kPluralRules,
- kBreakIterator,
- kLocale,
-
- kTypeCount
- };
-
enum class BoundFunctionContextSlot {
kBoundFunction = Context::MIN_CONTEXT_SLOTS,
kLength
};
- inline static Intl::Type TypeFromInt(int type);
- inline static Intl::Type TypeFromSmi(Smi* type);
-
- // Checks if the given object has the expected_type based by looking
- // up a private symbol on the object.
- //
- // TODO(gsathya): This should just be an instance type check once we
- // move all the Intl objects to C++.
- static bool IsObjectOfType(Isolate* isolate, Handle<Object> object,
- Intl::Type expected_type);
+ // Build a set of ICU locales from a list of Locales. If there is a locale
+ // with a script tag then the locales also include a locale without the
+ // script; eg, pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India)
+ // would include pa_IN.
+ static std::set<std::string> BuildLocaleSet(
+ const icu::Locale* icu_available_locales, int32_t count);
- // Gets the ICU locales for a given service. If there is a locale with a
- // script tag then the locales also include a locale without the script; eg,
- // pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India) would include
- // pa_IN.
- static std::set<std::string> GetAvailableLocales(ICUService service);
+ static Maybe<std::string> ToLanguageTag(const icu::Locale& locale);
// Get the name of the numbering system from locale.
// ICU doesn't expose numbering system in any way, so we have to assume that
@@ -73,43 +59,11 @@ class Intl {
// NumberFormat/Calendar would.
static std::string GetNumberingSystem(const icu::Locale& icu_locale);
- static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> AvailableLocalesOf(
- Isolate* isolate, Handle<String> service);
-
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> SupportedLocalesOf(
- Isolate* isolate, ICUService service, Handle<Object> locales_in,
+ Isolate* isolate, const char* method,
+ const std::set<std::string>& available_locales, Handle<Object> locales_in,
Handle<Object> options_in);
- static std::string DefaultLocale(Isolate* isolate);
-
- // The ResolveLocale abstract operation compares a BCP 47 language
- // priority list requestedLocales against the locales in
- // availableLocales and determines the best available language to
- // meet the request. availableLocales, requestedLocales, and
- // relevantExtensionKeys must be provided as List values, options
- // and localeData as Records.
- //
- // #ecma402/sec-partitiondatetimepattern
- //
- // Returns a JSObject with two properties:
- // (1) locale
- // (2) extension
- //
- // To access either, use JSObject::GetDataProperty.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolveLocale(
- Isolate* isolate, const char* service, Handle<Object> requestedLocales,
- Handle<Object> options);
-
- // This currently calls out to the JavaScript implementation of
- // CanonicalizeLocaleList.
- // Note: This is deprecated glue code, required only as long as ResolveLocale
- // still calls a JS implementation. The C++ successor is the overloaded
- // version below that returns a Maybe<std::vector<std::string>>.
- //
- // ecma402/#sec-canonicalizelocalelist
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CanonicalizeLocaleListJS(
- Isolate* isolate, Handle<Object> locales);
-
// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
// ecma402/#sec-getoption
//
@@ -130,6 +84,32 @@ class Intl {
std::vector<const char*> values, const char* service,
std::unique_ptr<char[]>* result);
+ // A helper template to get string from option into a enum.
+ // The enum in the enum_values is the corresponding value to the strings
+ // in the str_values. If the option does not contains name,
+ // default_value will be return.
+ template <typename T>
+ V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* name,
+ const char* method, const std::vector<const char*>& str_values,
+ const std::vector<T>& enum_values, T default_value) {
+ DCHECK_EQ(str_values.size(), enum_values.size());
+ std::unique_ptr<char[]> cstr;
+ Maybe<bool> found = Intl::GetStringOption(isolate, options, name,
+ str_values, method, &cstr);
+ MAYBE_RETURN(found, Nothing<T>());
+ if (found.FromJust()) {
+ DCHECK_NOT_NULL(cstr.get());
+ for (size_t i = 0; i < str_values.size(); i++) {
+ if (strcmp(cstr.get(), str_values[i]) == 0) {
+ return Just(enum_values[i]);
+ }
+ }
+ UNREACHABLE();
+ }
+ return Just(default_value);
+ }
+
// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
// ecma402/#sec-getoption
//
@@ -159,21 +139,27 @@ class Intl {
Isolate* isolate, Handle<Object> locales,
bool only_return_one_result = false);
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CreateNumberFormat(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved);
+ // ecma-402 #sec-intl.getcanonicallocales
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetCanonicalLocales(
+ Isolate* isolate, Handle<Object> locales);
// For locale sensitive functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
Isolate* isolate, Handle<String> s, bool is_upper,
Handle<Object> locales);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ConvertToUpper(
+ Isolate* isolate, Handle<String> s);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ConvertToLower(
+ Isolate* isolate, Handle<String> s);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare(
Isolate* isolate, Handle<String> s1, Handle<String> s2,
Handle<Object> locales, Handle<Object> options);
V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings(
- Isolate* isolate, Handle<JSCollator> collator, Handle<String> s1,
+ Isolate* isolate, const icu::Collator& collator, Handle<String> s1,
Handle<String> s2);
// ecma402/#sup-properties-of-the-number-prototype-object
@@ -186,8 +172,7 @@ class Intl {
Isolate* isolate, icu::DecimalFormat* number_format,
Handle<JSReceiver> options, int mnfd_default, int mxfd_default);
- icu::Locale static CreateICULocale(Isolate* isolate,
- Handle<String> bcp47_locale_str);
+ static icu::Locale CreateICULocale(const std::string& bcp47_locale);
// Helper funciton to convert a UnicodeString to a Handle<String>
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToString(
@@ -214,10 +199,6 @@ class Intl {
Handle<String> additional_property_name,
Handle<String> additional_property_value);
- // A helper function to help handle Unicode Extensions in locale.
- static std::map<std::string, std::string> LookupUnicodeExtensions(
- const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys);
-
// In ECMA 402 v1, Intl constructors supported a mode of operation
// where calling them with an existing object as a receiver would
// transform the receiver into the relevant Intl instance with all
@@ -232,10 +213,57 @@ class Intl {
Isolate* isolate, Handle<JSReceiver> receiver,
Handle<JSFunction> constructor, bool has_initialized_slot);
- // A factory method to got cached objects.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CachedOrNewService(
- Isolate* isolate, Handle<String> service, Handle<Object> locales,
- Handle<Object> options, Handle<Object> internal_options);
+ // enum for "caseFirst" option: shared by Intl.Locale and Intl.Collator.
+ enum class CaseFirst { kUpper, kLower, kFalse, kUndefined };
+
+ // Shared function to read the "caseFirst" option.
+ V8_WARN_UNUSED_RESULT static Maybe<CaseFirst> GetCaseFirst(
+ Isolate* isolate, Handle<JSReceiver> options, const char* method);
+
+ // enum for "hourCycle" option: shared by Intl.Locale and Intl.DateTimeFormat.
+ enum class HourCycle { kH11, kH12, kH23, kH24, kUndefined };
+
+ static HourCycle ToHourCycle(const std::string& str);
+
+ // Shared function to read the "hourCycle" option.
+ V8_WARN_UNUSED_RESULT static Maybe<HourCycle> GetHourCycle(
+ Isolate* isolate, Handle<JSReceiver> options, const char* method);
+
+ // enum for "localeMatcher" option: shared by many Intl objects.
+ enum class MatcherOption { kBestFit, kLookup };
+
+ // Shared function to read the "localeMatcher" option.
+ V8_WARN_UNUSED_RESULT static Maybe<MatcherOption> GetLocaleMatcher(
+ Isolate* isolate, Handle<JSReceiver> options, const char* method);
+
+ struct ResolvedLocale {
+ std::string locale;
+ icu::Locale icu_locale;
+ std::map<std::string, std::string> extensions;
+ };
+
+ static ResolvedLocale ResolveLocale(
+ Isolate* isolate, const std::set<std::string>& available_locales,
+ const std::vector<std::string>& requested_locales, MatcherOption options,
+ const std::set<std::string>& relevant_extension_keys);
+
+ // Utility function to set text to BreakIterator.
+ static Managed<icu::UnicodeString> SetTextToBreakIterator(
+ Isolate* isolate, Handle<String> text,
+ icu::BreakIterator* break_iterator);
+
+ // ecma262 #sec-string.prototype.normalize
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> Normalize(
+ Isolate* isolate, Handle<String> string, Handle<Object> form_input);
+ static base::TimezoneCache* CreateTimeZoneCache();
+
+ // Convert a Handle<String> to icu::UnicodeString
+ static icu::UnicodeString ToICUUnicodeString(Isolate* isolate,
+ Handle<String> string);
+
+ static const uint8_t* ToLatin1LowerTable();
+
+ static String ConvertOneByteToLower(String src, String dst);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 7bc01a8b8a..9ac72ae111 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -16,9 +16,15 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSTypedArray, JSArrayBufferView)
+OBJECT_CONSTRUCTORS_IMPL(JSDataView, JSArrayBufferView)
+
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSDataView)
size_t JSArrayBuffer::byte_length() const {
return READ_UINTPTR_FIELD(this, kByteLengthOffset);
@@ -80,14 +86,15 @@ void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory));
}
-void JSArrayBuffer::set_bit_field(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
-#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kBitFieldSlot + kInt32Size, 0);
-#else
- WRITE_UINT32_FIELD(this, kBitFieldSlot, 0);
-#endif
+void JSArrayBuffer::clear_padding() {
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
}
+}
+
+void JSArrayBuffer::set_bit_field(uint32_t bits) {
WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
}
@@ -98,10 +105,10 @@ uint32_t JSArrayBuffer::bit_field() const {
// |bit_field| fields.
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_external,
JSArrayBuffer::IsExternalBit)
-BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_neuterable,
- JSArrayBuffer::IsNeuterableBit)
-BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_neutered,
- JSArrayBuffer::WasNeuteredBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_detachable,
+ JSArrayBuffer::IsDetachableBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_detached,
+ JSArrayBuffer::WasDetachedBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_growable,
@@ -125,13 +132,11 @@ void JSArrayBufferView::set_byte_length(size_t value) {
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
-bool JSArrayBufferView::WasNeutered() const {
- return JSArrayBuffer::cast(buffer())->was_neutered();
+bool JSArrayBufferView::WasDetached() const {
+ return JSArrayBuffer::cast(buffer())->was_detached();
}
-Object* JSTypedArray::length() const {
- return Object::cast(READ_FIELD(this, kLengthOffset));
-}
+Object JSTypedArray::length() const { return READ_FIELD(this, kLengthOffset); }
size_t JSTypedArray::length_value() const {
double val = length()->Number();
@@ -142,17 +147,17 @@ size_t JSTypedArray::length_value() const {
return static_cast<size_t>(val);
}
-void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
+void JSTypedArray::set_length(Object value, WriteBarrierMode mode) {
WRITE_FIELD(this, kLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kLengthOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
}
bool JSTypedArray::is_on_heap() const {
DisallowHeapAllocation no_gc;
// Checking that buffer()->backing_store() is not nullptr is not sufficient;
// it will be nullptr when byte_length is 0 as well.
- FixedTypedArrayBase* fta(FixedTypedArrayBase::cast(elements()));
- return fta->base_pointer() == fta;
+ FixedTypedArrayBase fta = FixedTypedArrayBase::cast(elements());
+ return fta->base_pointer()->ptr() == fta.ptr();
}
// static
@@ -160,14 +165,13 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name) {
if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
- const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
+ const MessageTemplate message = MessageTemplate::kNotTypedArray;
THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
}
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- if (V8_UNLIKELY(array->WasNeutered())) {
- const MessageTemplate::Template message =
- MessageTemplate::kDetachedOperation;
+ if (V8_UNLIKELY(array->WasDetached())) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
isolate->factory()->NewStringFromAsciiChecked(method_name);
THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 36950f9de6..e8c0c33a64 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -4,6 +4,8 @@
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-buffer-inl.h"
+
+#include "src/counters.h"
#include "src/property-descriptor.h"
namespace v8 {
@@ -36,18 +38,18 @@ inline int ConvertToMb(size_t size) {
} // anonymous namespace
-void JSArrayBuffer::Neuter() {
- CHECK(is_neuterable());
- CHECK(!was_neutered());
+void JSArrayBuffer::Detach() {
+ CHECK(is_detachable());
+ CHECK(!was_detached());
CHECK(is_external());
set_backing_store(nullptr);
set_byte_length(0);
- set_was_neutered(true);
- set_is_neuterable(false);
- // Invalidate the neutering protector.
+ set_was_detached(true);
+ set_is_detachable(false);
+ // Invalidate the detaching protector.
Isolate* const isolate = GetIsolate();
- if (isolate->IsArrayBufferNeuteringIntact()) {
- isolate->InvalidateArrayBufferNeuteringProtector();
+ if (isolate->IsArrayBufferDetachingIntact()) {
+ isolate->InvalidateArrayBufferDetachingProtector();
}
}
@@ -89,8 +91,9 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
}
array_buffer->set_byte_length(byte_length);
array_buffer->set_bit_field(0);
+ array_buffer->clear_padding();
array_buffer->set_is_external(is_external);
- array_buffer->set_is_neuterable(shared_flag == SharedFlag::kNotShared);
+ array_buffer->set_is_detachable(shared_flag == SharedFlag::kNotShared);
array_buffer->set_is_shared(shared_flag == SharedFlag::kShared);
array_buffer->set_is_wasm_memory(is_wasm_memory);
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
@@ -104,6 +107,11 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
}
}
+void JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
+ Isolate* isolate) {
+ Setup(array_buffer, isolate, false, nullptr, 0, SharedFlag::kNotShared);
+}
+
bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate,
size_t allocated_length,
@@ -127,6 +135,7 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
if (data == nullptr) {
isolate->counters()->array_buffer_new_size_failures()->AddSample(
ConvertToMb(allocated_length));
+ SetupAsEmpty(array_buffer, isolate);
return false;
}
} else {
@@ -191,7 +200,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
GetIsolate());
return array_buffer;
}
- Handle<JSTypedArray> self(this, GetIsolate());
+ Handle<JSTypedArray> self(*this, GetIsolate());
return MaterializeArrayBuffer(self);
}
@@ -223,7 +232,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
// 3b iv. Let length be O.[[ArrayLength]].
size_t length = o->length_value();
// 3b v. If numericIndex ≥ length, return false.
- if (o->WasNeutered() || index >= length) {
+ if (o->WasDetached() || index >= length) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 3f0dd064fa..5628b6acba 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -32,7 +32,7 @@ class JSArrayBuffer : public JSObject {
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
// [backing_store]: backing memory for this array
- DECL_ACCESSORS(backing_store, void)
+ DECL_ACCESSORS(backing_store, void*)
// For non-wasm, allocation_length and allocation_base are byte_length and
// backing_store, respectively.
@@ -42,11 +42,15 @@ class JSArrayBuffer : public JSObject {
// [bit_field]: boolean flags
DECL_PRIMITIVE_ACCESSORS(bit_field, uint32_t)
+ // Clear uninitialized padding space. This ensures that the snapshot content
+ // is deterministic. Depending on the V8 build mode there could be no padding.
+ V8_INLINE void clear_padding();
+
// Bit positions for [bit_field].
#define JS_ARRAY_BUFFER_BIT_FIELD_FIELDS(V, _) \
V(IsExternalBit, bool, 1, _) \
- V(IsNeuterableBit, bool, 1, _) \
- V(WasNeuteredBit, bool, 1, _) \
+ V(IsDetachableBit, bool, 1, _) \
+ V(WasDetachedBit, bool, 1, _) \
V(IsSharedBit, bool, 1, _) \
V(IsGrowableBit, bool, 1, _) \
V(IsWasmMemoryBit, bool, 1, _)
@@ -58,11 +62,11 @@ class JSArrayBuffer : public JSObject {
// memory block once all ArrayBuffers referencing it are collected by the GC.
DECL_BOOLEAN_ACCESSORS(is_external)
- // [is_neuterable]: false indicates that this buffer cannot be detached.
- DECL_BOOLEAN_ACCESSORS(is_neuterable)
+ // [is_detachable]: false indicates that this buffer cannot be detached.
+ DECL_BOOLEAN_ACCESSORS(is_detachable)
- // [was_neutered]: true if the buffer was previously detached.
- DECL_BOOLEAN_ACCESSORS(was_neutered)
+ // [was_detached]: true if the buffer was previously detached.
+ DECL_BOOLEAN_ACCESSORS(was_detached)
// [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_shared)
@@ -75,7 +79,7 @@ class JSArrayBuffer : public JSObject {
DECL_CAST(JSArrayBuffer)
- void Neuter();
+ void Detach();
struct Allocation {
Allocation(void* allocation_base, size_t length, void* backing_store,
@@ -100,6 +104,11 @@ class JSArrayBuffer : public JSObject {
SharedFlag shared_flag = SharedFlag::kNotShared,
bool is_wasm_memory = false);
+ // Initialize the object as empty one to avoid confusing heap verifier if
+ // the failure happened in the middle of JSArrayBuffer construction.
+ V8_EXPORT_PRIVATE static void SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
+ Isolate* isolate);
+
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
static bool SetupAllocatingData(
@@ -111,27 +120,28 @@ class JSArrayBuffer : public JSObject {
DECL_PRINTER(JSArrayBuffer)
DECL_VERIFIER(JSArrayBuffer)
- // The fields are not pointers into our heap, so they are not iterated over in
- // objects-body-descriptors-inl.h.
- static const int kByteLengthOffset = JSObject::kHeaderSize;
- static const int kBackingStoreOffset = kByteLengthOffset + kUIntptrSize;
- static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kBitFieldOffset = kBitFieldSlot;
-#else
- static const int kBitFieldOffset = kBitFieldSlot + kInt32Size;
-#endif
- static const int kSize = kBitFieldSlot + kPointerSize;
+// Layout description.
+#define JS_ARRAY_BUFFER_FIELDS(V) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kByteLengthOffset, kUIntptrSize) \
+ V(kBackingStoreOffset, kSystemPointerSize) \
+ V(kBitFieldOffset, kInt32Size) \
+ /* Pads header size to be a multiple of kTaggedSize. */ \
+ V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_ARRAY_BUFFER_FIELDS)
+#undef JS_ARRAY_BUFFER_FIELDS
static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBuffer::kEmbedderFieldCount * kPointerSize;
+ kHeaderSize +
+ v8::ArrayBuffer::kEmbedderFieldCount * kEmbedderDataSlotSize;
- // Iterates all fields in the object including internal ones except
- // kBackingStoreOffset and kBitFieldSlot.
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
+ OBJECT_CONSTRUCTORS(JSArrayBuffer, JSObject);
};
class JSArrayBufferView : public JSObject {
@@ -149,19 +159,25 @@ class JSArrayBufferView : public JSObject {
DECL_VERIFIER(JSArrayBufferView)
- inline bool WasNeutered() const;
+ inline bool WasDetached() const;
- static const int kBufferOffset = JSObject::kHeaderSize;
- static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
- static const int kByteLengthOffset = kByteOffsetOffset + kUIntptrSize;
- static const int kHeaderSize = kByteLengthOffset + kUIntptrSize;
+// Layout description.
+#define JS_ARRAY_BUFFER_VIEW_FIELDS(V) \
+ V(kBufferOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kByteOffsetOffset, kUIntptrSize) \
+ V(kByteLengthOffset, kUIntptrSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ARRAY_BUFFER_VIEW_FIELDS)
+#undef JS_ARRAY_BUFFER_VIEW_FIELDS
- // Iterates all fields in the object including internal ones except
- // kByteOffset and kByteLengthOffset.
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+ OBJECT_CONSTRUCTORS(JSArrayBufferView, JSObject);
};
class JSTypedArray : public JSArrayBufferView {
@@ -193,10 +209,20 @@ class JSTypedArray : public JSArrayBufferView {
DECL_PRINTER(JSTypedArray)
DECL_VERIFIER(JSTypedArray)
- static const int kLengthOffset = JSArrayBufferView::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
+// Layout description.
+#define JS_TYPED_ARRAY_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kLengthOffset, kSystemPointerSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
+ JS_TYPED_ARRAY_FIELDS)
+#undef JS_TYPED_ARRAY_FIELDS
+
static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+ kHeaderSize +
+ v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
@@ -205,7 +231,7 @@ class JSTypedArray : public JSArrayBufferView {
DECL_ACCESSORS(raw_length, Object)
#endif
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
+ OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
};
class JSDataView : public JSArrayBufferView {
@@ -216,12 +242,12 @@ class JSDataView : public JSArrayBufferView {
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
- static const int kSize = JSArrayBufferView::kHeaderSize;
+ // Layout description.
static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+ kHeaderSize +
+ v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+ OBJECT_CONSTRUCTORS(JSDataView, JSArrayBufferView);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 7ec69285ed..7f9710915d 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -15,14 +15,17 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSArray, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator, JSObject)
+
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayIterator)
ACCESSORS(JSArray, length, Object, kLengthOffset)
-void JSArray::set_length(Smi* length) {
+void JSArray::set_length(Smi length) {
// Don't need a write barrier for a Smi.
- set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
+ set_length(Object(length.ptr()), SKIP_WRITE_BARRIER);
}
bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 3a9fe48d24..9554253bad 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -8,6 +8,7 @@
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,7 +28,7 @@ class JSArray : public JSObject {
// Overload the length setter to skip write barrier when the length
// is set to a smi. This matches the set function on FixedArray.
- inline void set_length(Smi* length);
+ inline void set_length(Smi length);
static bool HasReadOnlyLength(Handle<JSArray> array);
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
@@ -63,6 +64,32 @@ class JSArray : public JSObject {
Isolate* isolate, Handle<JSArray> a, PropertyDescriptor* desc,
ShouldThrow should_throw);
+ // Support for Array.prototype.join().
+ // Writes a fixed array of strings and separators to a single destination
+ // string. This helpers assumes the fixed array encodes separators in two
+ // ways:
+ // 1) Explicitly with a smi, whos value represents the number of repeated
+ // separators.
+ // 2) Implicitly between two consecutive strings a single separator.
+ //
+ // Here are some input/output examples given the separator string is ',':
+ //
+ // [1, 'hello', 2, 'world', 1] => ',hello,,world,'
+ // ['hello', 'world'] => 'hello,world'
+ //
+ // To avoid any allocations, this helper assumes the destination string is the
+ // exact length necessary to write the strings and separators from the fixed
+ // array.
+ // Since this is called via ExternalReferences, it uses raw Address values:
+ // - {raw_fixed_array} is a tagged FixedArray pointer.
+ // - {raw_separator} and {raw_dest} are tagged String pointers.
+ // - Returns a tagged String pointer.
+ static Address ArrayJoinConcatToSequentialString(Isolate* isolate,
+ Address raw_fixed_array,
+ intptr_t length,
+ Address raw_separator,
+ Address raw_dest);
+
// Checks whether the Array has the current realm's Array.prototype as its
// prototype. This function is best-effort and only gives a conservative
// approximation, erring on the side of false, in particular with respect
@@ -78,9 +105,8 @@ class JSArray : public JSObject {
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
- // Layout description.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kSize = kLengthOffset + kPointerSize;
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSARRAY_FIELDS)
+#undef JS_ARRAY_FIELDS
static const int kLengthDescriptorIndex = 0;
@@ -90,6 +116,9 @@ class JSArray : public JSObject {
// This constant is somewhat arbitrary. Any large enough value would work.
static const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
+ // Min. stack size for detecting an Array.prototype.join() call cycle.
+ static const uint32_t kMinJoinStackSize = 2;
+
static const int kInitialMaxFastElementArray =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
AllocationMemento::kSize) >>
@@ -98,8 +127,7 @@ class JSArray : public JSObject {
// Valid array indices range from +0 <= i < 2^32 - 1 (kMaxUInt32).
static const uint32_t kMaxArrayIndex = kMaxUInt32 - 1;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
+ OBJECT_CONSTRUCTORS(JSArray, JSObject);
};
Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
@@ -146,13 +174,18 @@ class JSArrayIterator : public JSObject {
inline IterationKind kind() const;
inline void set_kind(IterationKind kind);
- static const int kIteratedObjectOffset = JSObject::kHeaderSize;
- static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
- static const int kKindOffset = kNextIndexOffset + kPointerSize;
- static const int kSize = kKindOffset + kPointerSize;
+ // Layout description.
+#define JS_ARRAY_ITERATOR_FIELDS(V) \
+ V(kIteratedObjectOffset, kTaggedSize) \
+ V(kNextIndexOffset, kTaggedSize) \
+ V(kKindOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_ARRAY_ITERATOR_FIELDS)
+#undef JS_ARRAY_ITERATOR_FIELDS
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
+ OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 16f8111953..294c893065 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -18,13 +18,15 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
+
inline void JSV8BreakIterator::set_type(Type type) {
DCHECK_GT(JSV8BreakIterator::Type::COUNT, type);
WRITE_FIELD(this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
}
inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
- Object* value = READ_FIELD(this, kTypeOffset);
+ Object value = READ_FIELD(this, kTypeOffset);
return static_cast<JSV8BreakIterator::Type>(Smi::ToInt(value));
}
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 2031c2cc5b..505934f8e9 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -8,7 +8,6 @@
#include "src/objects/js-break-iterator.h"
-#include "src/objects/intl-objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-break-iterator-inl.h"
#include "unicode/brkiter.h"
@@ -16,19 +15,18 @@
namespace v8 {
namespace internal {
-JSV8BreakIterator::Type JSV8BreakIterator::getType(const char* str) {
- if (strcmp(str, "character") == 0) return Type::CHARACTER;
- if (strcmp(str, "word") == 0) return Type::WORD;
- if (strcmp(str, "sentence") == 0) return Type::SENTENCE;
- if (strcmp(str, "line") == 0) return Type::LINE;
- UNREACHABLE();
-}
-
MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
Handle<Object> locales, Handle<Object> options_obj) {
Factory* factory = isolate->factory();
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, MaybeHandle<JSV8BreakIterator>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
+
Handle<JSReceiver> options;
if (options_obj->IsUndefined(isolate)) {
options = factory->NewJSObjectWithNullProto();
@@ -40,31 +38,24 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
}
// Extract locale string
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "breakiterator", locales, options),
- JSV8BreakIterator);
- Handle<Object> locale_obj =
- JSObject::GetDataProperty(r, factory->locale_string());
- CHECK(locale_obj->IsString());
- Handle<String> locale = Handle<String>::cast(locale_obj);
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.JSV8BreakIterator");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSV8BreakIterator>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
- // Extract type from options
- std::unique_ptr<char[]> type_str = nullptr;
- std::vector<const char*> type_values = {"character", "word", "sentence",
- "line"};
- Maybe<bool> maybe_found_type = Intl::GetStringOption(
- isolate, options, "type", type_values, "Intl.v8BreakIterator", &type_str);
- Type type_enum = Type::WORD;
- MAYBE_RETURN(maybe_found_type, MaybeHandle<JSV8BreakIterator>());
- if (maybe_found_type.FromJust()) {
- DCHECK_NOT_NULL(type_str.get());
- type_enum = getType(type_str.get());
- }
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSV8BreakIterator::GetAvailableLocales(),
+ requested_locales, matcher, {});
- // Construct icu_locale using the locale string
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ // Extract type from options
+ Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ isolate, options, "type", "Intl.v8BreakIterator",
+ {"word", "character", "sentence", "line"},
+ {Type::WORD, Type::CHARACTER, Type::SENTENCE, Type::LINE}, Type::WORD);
+ MAYBE_RETURN(maybe_type, MaybeHandle<JSV8BreakIterator>());
+ Type type_enum = maybe_type.FromJust();
+
+ icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
// Construct break_iterator using icu_locale and type
@@ -103,8 +94,10 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
Handle<Managed<icu::UnicodeString>> managed_unicode_string =
Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, nullptr);
- // Setting fields
- break_iterator_holder->set_locale(*locale);
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ break_iterator_holder->set_locale(*locale_str);
+
break_iterator_holder->set_type(type_enum);
break_iterator_holder->set_break_iterator(*managed_break_iterator);
break_iterator_holder->set_unicode_string(*managed_unicode_string);
@@ -130,25 +123,12 @@ Handle<JSObject> JSV8BreakIterator::ResolvedOptions(
void JSV8BreakIterator::AdoptText(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
Handle<String> text) {
- icu::UnicodeString* u_text;
- int length = text->length();
- text = String::Flatten(isolate, text);
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = text->GetFlatContent();
- std::unique_ptr<uc16[]> sap;
- const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
- u_text = new icu::UnicodeString(text_value, length);
- }
-
- Handle<Managed<icu::UnicodeString>> new_u_text =
- Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
- break_iterator_holder->set_unicode_string(*new_u_text);
-
icu::BreakIterator* break_iterator =
break_iterator_holder->break_iterator()->raw();
CHECK_NOT_NULL(break_iterator);
- break_iterator->setText(*u_text);
+ Managed<icu::UnicodeString> unicode_string =
+ Intl::SetTextToBreakIterator(isolate, text, break_iterator);
+ break_iterator_holder->set_unicode_string(unicode_string);
}
Handle<String> JSV8BreakIterator::TypeAsString() const {
@@ -166,5 +146,52 @@ Handle<String> JSV8BreakIterator::TypeAsString() const {
}
}
+Handle<Object> JSV8BreakIterator::Current(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
+ return isolate->factory()->NewNumberFromInt(
+ break_iterator->break_iterator()->raw()->current());
+}
+
+Handle<Object> JSV8BreakIterator::First(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
+ return isolate->factory()->NewNumberFromInt(
+ break_iterator->break_iterator()->raw()->first());
+}
+
+Handle<Object> JSV8BreakIterator::Next(
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
+ return isolate->factory()->NewNumberFromInt(
+ break_iterator->break_iterator()->raw()->next());
+}
+
+String JSV8BreakIterator::BreakType(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator) {
+ int32_t status = break_iterator->break_iterator()->raw()->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return ReadOnlyRoots(isolate).none_string();
+ }
+ if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return ReadOnlyRoots(isolate).number_string();
+ }
+ if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return ReadOnlyRoots(isolate).letter_string();
+ }
+ if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return ReadOnlyRoots(isolate).kana_string();
+ }
+ if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return ReadOnlyRoots(isolate).ideo_string();
+ }
+ return ReadOnlyRoots(isolate).unknown_string();
+}
+
+std::set<std::string> JSV8BreakIterator::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::BreakIterator::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index d5847bdaf6..edba90df7a 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_BREAK_ITERATOR_H_
#define V8_OBJECTS_JS_BREAK_ITERATOR_H_
+#include <set>
+#include <string>
+
#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
@@ -26,16 +29,27 @@ namespace internal {
class JSV8BreakIterator : public JSObject {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> Initialize(
- Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
+ Isolate* isolate, Handle<JSV8BreakIterator> break_iterator,
Handle<Object> input_locales, Handle<Object> input_options);
static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
+ static std::set<std::string> GetAvailableLocales();
+
static void AdoptText(Isolate* isolate,
- Handle<JSV8BreakIterator> break_iterator_holder,
+ Handle<JSV8BreakIterator> break_iterator,
Handle<String> text);
+ static Handle<Object> Current(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator);
+ static Handle<Object> First(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator);
+ static Handle<Object> Next(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator);
+ static String BreakType(Isolate* isolate,
+ Handle<JSV8BreakIterator> break_iterator);
+
enum class Type { CHARACTER, WORD, SENTENCE, LINE, COUNT };
inline void set_type(Type type);
inline Type type() const;
@@ -56,27 +70,24 @@ class JSV8BreakIterator : public JSObject {
DECL_ACCESSORS(bound_break_type, Object)
// Layout description.
-#define BREAK_ITERATOR_FIELDS(V) \
- /* Pointer fields. */ \
- V(kLocaleOffset, kPointerSize) \
- V(kTypeOffset, kPointerSize) \
- V(kBreakIteratorOffset, kPointerSize) \
- V(kUnicodeStringOffset, kPointerSize) \
- V(kBoundAdoptTextOffset, kPointerSize) \
- V(kBoundFirstOffset, kPointerSize) \
- V(kBoundNextOffset, kPointerSize) \
- V(kBoundCurrentOffset, kPointerSize) \
- V(kBoundBreakTypeOffset, kPointerSize) \
- /* Total Size */ \
+#define BREAK_ITERATOR_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kTypeOffset, kTaggedSize) \
+ V(kBreakIteratorOffset, kTaggedSize) \
+ V(kUnicodeStringOffset, kTaggedSize) \
+ V(kBoundAdoptTextOffset, kTaggedSize) \
+ V(kBoundFirstOffset, kTaggedSize) \
+ V(kBoundNextOffset, kTaggedSize) \
+ V(kBoundCurrentOffset, kTaggedSize) \
+ V(kBoundBreakTypeOffset, kTaggedSize) \
+ /* Total Size */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
#undef BREAK_ITERATOR_FIELDS
- private:
- static Type getType(const char* str);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSV8BreakIterator)
+ OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index 1a94ac805c..693021e26a 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSCollator, JSObject)
+
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset);
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index f62177b875..3c2efa93db 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -27,6 +27,14 @@ enum class Usage {
SEARCH,
};
+enum class Sensitivity {
+ kBase,
+ kAccent,
+ kCase,
+ kVariant,
+ kUndefined,
+};
+
// TODO(gsathya): Consider internalizing the value strings.
void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
Handle<String> key, const char* value) {
@@ -52,13 +60,6 @@ void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
.FromJust());
}
-void toLanguageTag(const icu::Locale& locale, char* tag) {
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(locale.getName(), tag, ULOC_FULLNAME_CAPACITY, FALSE,
- &status);
- CHECK(U_SUCCESS(status));
-}
-
} // anonymous namespace
// static
@@ -74,8 +75,6 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
bool numeric =
icu_collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON;
CHECK(U_SUCCESS(status));
- CreateDataPropertyForOptions(isolate, options,
- isolate->factory()->numeric_string(), numeric);
const char* case_first = nullptr;
status = U_ZERO_ERROR;
@@ -90,8 +89,6 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
case_first = "false";
}
CHECK(U_SUCCESS(status));
- CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->caseFirst_string(), case_first);
const char* sensitivity = nullptr;
status = U_ZERO_ERROR;
@@ -123,16 +120,11 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
sensitivity = "variant";
}
CHECK(U_SUCCESS(status));
- CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->sensitivity_string(), sensitivity);
status = U_ZERO_ERROR;
bool ignore_punctuation = icu_collator->getAttribute(UCOL_ALTERNATE_HANDLING,
status) == UCOL_SHIFTED;
CHECK(U_SUCCESS(status));
- CreateDataPropertyForOptions(isolate, options,
- isolate->factory()->ignorePunctuation_string(),
- ignore_punctuation);
status = U_ZERO_ERROR;
@@ -145,13 +137,13 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
const char* legacy_collation_key = uloc_toLegacyKey(collation_key);
DCHECK_NOT_NULL(legacy_collation_key);
- char bcp47_locale_tag[ULOC_FULLNAME_CAPACITY];
char legacy_collation_value[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
int32_t length =
icu_locale.getKeywordValue(legacy_collation_key, legacy_collation_value,
ULOC_FULLNAME_CAPACITY, status);
+ std::string locale;
if (length > 0 && U_SUCCESS(status)) {
const char* collation_value =
uloc_toUnicodeLocaleType(collation_key, legacy_collation_value);
@@ -177,40 +169,79 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
new_icu_locale.setKeywordValue(legacy_collation_key, nullptr, status);
CHECK(U_SUCCESS(status));
- toLanguageTag(new_icu_locale, bcp47_locale_tag);
+ locale = Intl::ToLanguageTag(new_icu_locale).FromJust();
} else {
collation = collation_value;
- toLanguageTag(icu_locale, bcp47_locale_tag);
+ locale = Intl::ToLanguageTag(icu_locale).FromJust();
}
} else {
- toLanguageTag(icu_locale, bcp47_locale_tag);
+ locale = Intl::ToLanguageTag(icu_locale).FromJust();
}
+ // 5. For each row of Table 2, except the header row, in table order, do
+ // ...
+ // Table 2: Resolved Options of Collator Instances
+ // Internal Slot Property Extension Key
+ // [[Locale] "locale"
+ // [[Usage] "usage"
+ // [[Sensitivity]] "sensitivity"
+ // [[IgnorePunctuation]] "ignorePunctuation"
+ // [[Collation]] "collation"
+ // [[Numeric]] "numeric" kn
+ // [[CaseFirst]] "caseFirst" kf
CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->collation_string(), collation);
-
+ isolate, options, isolate->factory()->locale_string(), locale.c_str());
CreateDataPropertyForOptions(isolate, options,
isolate->factory()->usage_string(), usage);
-
CreateDataPropertyForOptions(
- isolate, options, isolate->factory()->locale_string(), bcp47_locale_tag);
-
+ isolate, options, isolate->factory()->sensitivity_string(), sensitivity);
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->ignorePunctuation_string(),
+ ignore_punctuation);
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->collation_string(), collation);
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->numeric_string(), numeric);
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->caseFirst_string(), case_first);
return options;
}
namespace {
-void SetCaseFirstOption(icu::Collator* icu_collator, const char* value) {
+Intl::CaseFirst ToCaseFirst(const char* str) {
+ if (strcmp(str, "upper") == 0) return Intl::CaseFirst::kUpper;
+ if (strcmp(str, "lower") == 0) return Intl::CaseFirst::kLower;
+ if (strcmp(str, "false") == 0) return Intl::CaseFirst::kFalse;
+ return Intl::CaseFirst::kUndefined;
+}
+
+UColAttributeValue ToUColAttributeValue(Intl::CaseFirst case_first) {
+ switch (case_first) {
+ case Intl::CaseFirst::kUpper:
+ return UCOL_UPPER_FIRST;
+ case Intl::CaseFirst::kLower:
+ return UCOL_LOWER_FIRST;
+ case Intl::CaseFirst::kFalse:
+ case Intl::CaseFirst::kUndefined:
+ return UCOL_OFF;
+ }
+}
+
+void SetNumericOption(icu::Collator* icu_collator, bool numeric) {
CHECK_NOT_NULL(icu_collator);
- CHECK_NOT_NULL(value);
UErrorCode status = U_ZERO_ERROR;
- if (strcmp(value, "upper") == 0) {
- icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
- } else if (strcmp(value, "lower") == 0) {
- icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
- } else {
- icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
- }
+ icu_collator->setAttribute(UCOL_NUMERIC_COLLATION,
+ numeric ? UCOL_ON : UCOL_OFF, status);
+ CHECK(U_SUCCESS(status));
+}
+
+void SetCaseFirstOption(icu::Collator* icu_collator,
+ Intl::CaseFirst case_first) {
+ CHECK_NOT_NULL(icu_collator);
+ UErrorCode status = U_ZERO_ERROR;
+ icu_collator->setAttribute(UCOL_CASE_FIRST, ToUColAttributeValue(case_first),
+ status);
CHECK(U_SUCCESS(status));
}
@@ -222,10 +253,11 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
Handle<Object> locales,
Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
- Handle<JSObject> requested_locales;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
- Intl::CanonicalizeLocaleListJS(isolate, locales),
- JSCollator);
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSCollator>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
// 2. If options is undefined, then
if (options_obj->IsUndefined(isolate)) {
@@ -244,27 +276,19 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
// "search" », "sort").
- std::vector<const char*> values = {"sort", "search"};
- std::unique_ptr<char[]> usage_str = nullptr;
- Usage usage = Usage::SORT;
- Maybe<bool> found_usage = Intl::GetStringOption(
- isolate, options, "usage", values, "Intl.Collator", &usage_str);
- MAYBE_RETURN(found_usage, MaybeHandle<JSCollator>());
-
- if (found_usage.FromJust()) {
- DCHECK_NOT_NULL(usage_str.get());
- if (strcmp(usage_str.get(), "search") == 0) {
- usage = Usage::SEARCH;
- }
- }
+ Maybe<Usage> maybe_usage = Intl::GetStringOption<Usage>(
+ isolate, options, "usage", "Intl.Collator", {"sort", "search"},
+ {Usage::SORT, Usage::SEARCH}, Usage::SORT);
+ MAYBE_RETURN(maybe_usage, MaybeHandle<JSCollator>());
+ Usage usage = maybe_usage.FromJust();
- // TODO(gsathya): This is currently done as part of the
- // Intl::ResolveLocale call below. Fix this once resolveLocale is
- // changed to not do the lookup.
- //
// 9. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 10. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.Collator");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSCollator>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
// 11. Let numeric be ? GetOption(options, "numeric", "boolean",
// undefined, undefined).
@@ -283,12 +307,10 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
// « "upper", "lower", "false" », undefined).
- // 15. Set opt.[[kf]] to caseFirst.
- values = {"upper", "lower", "false"};
- std::unique_ptr<char[]> case_first_str = nullptr;
- Maybe<bool> found_case_first = Intl::GetStringOption(
- isolate, options, "caseFirst", values, "Intl.Collator", &case_first_str);
- MAYBE_RETURN(found_case_first, MaybeHandle<JSCollator>());
+ Maybe<Intl::CaseFirst> maybe_case_first =
+ Intl::GetCaseFirst(isolate, options, "Intl.Collator");
+ MAYBE_RETURN(maybe_case_first, MaybeHandle<JSCollator>());
+ Intl::CaseFirst case_first = maybe_case_first.FromJust();
// The relevant unicode extensions accepted by Collator as specified here:
// https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots
@@ -296,72 +318,18 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// 16. Let relevantExtensionKeys be %Collator%.[[RelevantExtensionKeys]].
std::set<std::string> relevant_extension_keys{"co", "kn", "kf"};
- // We don't pass the relevant_extension_keys to ResolveLocale here
- // as per the spec.
- //
- // In ResolveLocale, the spec makes sure we only pick and use the
- // relevant extension keys and ignore any other keys. Also, in
- // ResolveLocale, the spec makes sure that if a given key has both a
- // value in the options object and an unicode extension value, then
- // we pick the value provided in the options object.
- // For example: in the case of `new Intl.Collator('en-u-kn-true', {
- // numeric: false })` the value `false` is used for the `numeric`
- // key.
- //
- // Instead of performing all this validation in ResolveLocale, we
- // just perform it inline below. In the future when we port
- // ResolveLocale to C++, we can make all these validations generic
- // and move it ResolveLocale.
- //
// 17. Let r be ResolveLocale(%Collator%.[[AvailableLocales]],
// requestedLocales, opt, %Collator%.[[RelevantExtensionKeys]],
// localeData).
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSCollator::GetAvailableLocales(),
+ requested_locales, matcher, relevant_extension_keys);
+
// 18. Set collator.[[Locale]] to r.[[locale]].
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "collator", requested_locales, options),
- JSCollator);
-
- Handle<String> locale_with_extension_str =
- isolate->factory()->NewStringFromStaticChars("localeWithExtension");
- Handle<Object> locale_with_extension_obj =
- JSObject::GetDataProperty(r, locale_with_extension_str);
-
- // The locale_with_extension has to be a string. Either a user
- // provided canonicalized string or the default locale.
- CHECK(locale_with_extension_obj->IsString());
- Handle<String> locale_with_extension =
- Handle<String>::cast(locale_with_extension_obj);
-
- icu::Locale icu_locale =
- Intl::CreateICULocale(isolate, locale_with_extension);
+ icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
- std::map<std::string, std::string> extensions =
- Intl::LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
-
// 19. Let collation be r.[[co]].
- //
- // r.[[co]] is already set as part of the icu::Locale creation as
- // icu parses unicode extensions and sets the keywords.
- //
- // We need to sanitize the keywords based on certain ECMAScript rules.
- //
- // As per https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots:
- // The values "standard" and "search" must not be used as elements
- // in any [[SortLocaleData]][locale].co and
- // [[SearchLocaleData]][locale].co list.
- auto co_extension_it = extensions.find("co");
- if (co_extension_it != extensions.end()) {
- const std::string& value = co_extension_it->second;
- if ((value == "search") || (value == "standard")) {
- UErrorCode status = U_ZERO_ERROR;
- const char* key = uloc_toLegacyKey("co");
- icu_locale.setKeywordValue(key, nullptr, status);
- CHECK(U_SUCCESS(status));
- }
- }
// 5. Set collator.[[Usage]] to usage.
//
@@ -421,19 +389,11 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// passed in through the unicode extensions.
status = U_ZERO_ERROR;
if (found_numeric.FromJust()) {
- icu_collator->setAttribute(UCOL_NUMERIC_COLLATION,
- numeric ? UCOL_ON : UCOL_OFF, status);
- CHECK(U_SUCCESS(status));
+ SetNumericOption(icu_collator.get(), numeric);
} else {
- auto kn_extension_it = extensions.find("kn");
- if (kn_extension_it != extensions.end()) {
- const std::string& value = kn_extension_it->second;
-
- numeric = (value == "true");
-
- icu_collator->setAttribute(UCOL_NUMERIC_COLLATION,
- numeric ? UCOL_ON : UCOL_OFF, status);
- CHECK(U_SUCCESS(status));
+ auto kn_extension_it = r.extensions.find("kn");
+ if (kn_extension_it != r.extensions.end()) {
+ SetNumericOption(icu_collator.get(), (kn_extension_it->second == "true"));
}
}
@@ -443,14 +403,13 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// If the caseFirst value is passed in through the options object,
// then we use it. Otherwise, we check if the caseFirst value is
// passed in through the unicode extensions.
- if (found_case_first.FromJust()) {
- const char* case_first_cstr = case_first_str.get();
- SetCaseFirstOption(icu_collator.get(), case_first_cstr);
+ if (case_first != Intl::CaseFirst::kUndefined) {
+ SetCaseFirstOption(icu_collator.get(), case_first);
} else {
- auto kf_extension_it = extensions.find("kf");
- if (kf_extension_it != extensions.end()) {
- const std::string& value = kf_extension_it->second;
- SetCaseFirstOption(icu_collator.get(), value.c_str());
+ auto kf_extension_it = r.extensions.find("kf");
+ if (kf_extension_it != r.extensions.end()) {
+ SetCaseFirstOption(icu_collator.get(),
+ ToCaseFirst(kf_extension_it->second.c_str()));
}
}
@@ -463,40 +422,42 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
// 24. Let sensitivity be ? GetOption(options, "sensitivity",
// "string", « "base", "accent", "case", "variant" », undefined).
- values = {"base", "accent", "case", "variant"};
- std::unique_ptr<char[]> sensitivity_str = nullptr;
- Maybe<bool> found_sensitivity =
- Intl::GetStringOption(isolate, options, "sensitivity", values,
- "Intl.Collator", &sensitivity_str);
- MAYBE_RETURN(found_sensitivity, MaybeHandle<JSCollator>());
+ Maybe<Sensitivity> maybe_sensitivity = Intl::GetStringOption<Sensitivity>(
+ isolate, options, "sensitivity", "Intl.Collator",
+ {"base", "accent", "case", "variant"},
+ {Sensitivity::kBase, Sensitivity::kAccent, Sensitivity::kCase,
+ Sensitivity::kVariant},
+ Sensitivity::kUndefined);
+ MAYBE_RETURN(maybe_sensitivity, MaybeHandle<JSCollator>());
+ Sensitivity sensitivity = maybe_sensitivity.FromJust();
// 25. If sensitivity is undefined, then
- if (!found_sensitivity.FromJust()) {
+ if (sensitivity == Sensitivity::kUndefined) {
// 25. a. If usage is "sort", then
if (usage == Usage::SORT) {
// 25. a. i. Let sensitivity be "variant".
- // 26. Set collator.[[Sensitivity]] to sensitivity.
- icu_collator->setStrength(icu::Collator::TERTIARY);
+ sensitivity = Sensitivity::kVariant;
}
- } else {
- DCHECK(found_sensitivity.FromJust());
- const char* sensitivity_cstr = sensitivity_str.get();
- DCHECK_NOT_NULL(sensitivity_cstr);
-
- // 26. Set collator.[[Sensitivity]] to sensitivity.
- if (strcmp(sensitivity_cstr, "base") == 0) {
+ }
+ // 26. Set collator.[[Sensitivity]] to sensitivity.
+ switch (sensitivity) {
+ case Sensitivity::kBase:
icu_collator->setStrength(icu::Collator::PRIMARY);
- } else if (strcmp(sensitivity_cstr, "accent") == 0) {
+ break;
+ case Sensitivity::kAccent:
icu_collator->setStrength(icu::Collator::SECONDARY);
- } else if (strcmp(sensitivity_cstr, "case") == 0) {
+ break;
+ case Sensitivity::kCase:
icu_collator->setStrength(icu::Collator::PRIMARY);
status = U_ZERO_ERROR;
icu_collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
CHECK(U_SUCCESS(status));
- } else {
- DCHECK_EQ(0, strcmp(sensitivity_cstr, "variant"));
+ break;
+ case Sensitivity::kVariant:
icu_collator->setStrength(icu::Collator::TERTIARY);
- }
+ break;
+ case Sensitivity::kUndefined:
+ break;
}
// 27.Let ignorePunctuation be ? GetOption(options,
@@ -523,5 +484,12 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
return collator;
}
+std::set<std::string> JSCollator::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::Collator::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index f857df95b1..0e8cec5f7b 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -9,10 +9,14 @@
#ifndef V8_OBJECTS_JS_COLLATOR_H_
#define V8_OBJECTS_JS_COLLATOR_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-objects.h"
#include "src/objects/managed.h"
// Has to be the last include (doesn't have include guards):
@@ -36,15 +40,17 @@ class JSCollator : public JSObject {
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSCollator> collator);
+ static std::set<std::string> GetAvailableLocales();
+
DECL_CAST(JSCollator)
DECL_PRINTER(JSCollator)
DECL_VERIFIER(JSCollator)
// Layout description.
-#define JS_COLLATOR_FIELDS(V) \
- V(kICUCollatorOffset, kPointerSize) \
- V(kBoundCompareOffset, kPointerSize) \
- /* Total size. */ \
+#define JS_COLLATOR_FIELDS(V) \
+ V(kICUCollatorOffset, kTaggedSize) \
+ V(kBoundCompareOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLATOR_FIELDS)
@@ -53,8 +59,7 @@ class JSCollator : public JSObject {
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
DECL_ACCESSORS(bound_compare, Object);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollator);
+ OBJECT_CONSTRUCTORS(JSCollator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index c50e803429..c216b3daff 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -15,6 +15,32 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSCollection, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSMap, JSCollection)
+OBJECT_CONSTRUCTORS_IMPL(JSSet, JSCollection)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakCollection, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakMap, JSWeakCollection)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakSet, JSWeakCollection)
+
+// TODO(jkummerow): Move JSCollectionIterator to js-collection.h?
+// TODO(jkummerow): Introduce IsJSCollectionIterator() check? Or unchecked
+// version of OBJECT_CONSTRUCTORS_IMPL macro?
+JSCollectionIterator::JSCollectionIterator(Address ptr) : JSObject(ptr) {}
+template <class Derived, class TableType>
+OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIterator(
+ Address ptr)
+ : JSCollectionIterator(ptr) {}
+
+JSMapIterator::JSMapIterator(Address ptr)
+ : OrderedHashTableIterator<JSMapIterator, OrderedHashMap>(ptr) {
+ SLOW_DCHECK(IsJSMapIterator());
+}
+
+JSSetIterator::JSSetIterator(Address ptr)
+ : OrderedHashTableIterator<JSSetIterator, OrderedHashSet>(ptr) {
+ SLOW_DCHECK(IsJSSetIterator());
+}
+
ACCESSORS(JSCollection, table, Object, kTableOffset)
ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
@@ -29,10 +55,10 @@ CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
-Object* JSMapIterator::CurrentValue() {
- OrderedHashMap* table(OrderedHashMap::cast(this->table()));
+Object JSMapIterator::CurrentValue() {
+ OrderedHashMap table = OrderedHashMap::cast(this->table());
int index = Smi::ToInt(this->index());
- Object* value = table->ValueAt(index);
+ Object value = table->ValueAt(index);
DCHECK(!value->IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 7b5e38e7d8..7c2f265ae7 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -19,11 +19,18 @@ class JSCollection : public JSObject {
// [table]: the backing hash table
DECL_ACCESSORS(table, Object)
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
+// Layout description.
+#define JS_COLLECTION_FIELDS(V) \
+ V(kTableOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLECTION_FIELDS)
+#undef JS_COLLECTION_FIELDS
+
+ static const int kAddFunctionDescriptorIndex = 3;
+
+ OBJECT_CONSTRUCTORS(JSCollection, JSObject);
};
// The JSSet describes EcmaScript Harmony sets
@@ -38,8 +45,7 @@ class JSSet : public JSCollection {
DECL_PRINTER(JSSet)
DECL_VERIFIER(JSSet)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+ OBJECT_CONSTRUCTORS(JSSet, JSCollection);
};
class JSSetIterator
@@ -51,8 +57,8 @@ class JSSetIterator
DECL_CAST(JSSetIterator)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
+ OBJECT_CONSTRUCTORS(JSSetIterator,
+ OrderedHashTableIterator<JSSetIterator, OrderedHashSet>);
};
// The JSMap describes EcmaScript Harmony maps
@@ -67,8 +73,7 @@ class JSMap : public JSCollection {
DECL_PRINTER(JSMap)
DECL_VERIFIER(JSMap)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+ OBJECT_CONSTRUCTORS(JSMap, JSCollection);
};
class JSMapIterator
@@ -82,10 +87,10 @@ class JSMapIterator
// Returns the current value of the iterator. This should only be called when
// |HasMore| returns true.
- inline Object* CurrentValue();
+ inline Object CurrentValue();
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
+ OBJECT_CONSTRUCTORS(JSMapIterator,
+ OrderedHashTableIterator<JSMapIterator, OrderedHashMap>);
};
// Base class for both JSWeakMap and JSWeakSet
@@ -104,8 +109,17 @@ class JSWeakCollection : public JSObject {
static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
int max_entries);
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kSize = kTableOffset + kPointerSize;
+// Layout description.
+#define JS_WEAK_COLLECTION_FIELDS(V) \
+ V(kTableOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_WEAK_COLLECTION_FIELDS)
+#undef JS_WEAK_COLLECTION_FIELDS
+
+ static const int kAddFunctionDescriptorIndex = 3;
// Iterates the function object according to the visiting policy.
class BodyDescriptorImpl;
@@ -113,8 +127,7 @@ class JSWeakCollection : public JSObject {
// Visit the whole object.
typedef BodyDescriptorImpl BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
+ OBJECT_CONSTRUCTORS(JSWeakCollection, JSObject);
};
// The JSWeakMap describes EcmaScript Harmony weak maps
@@ -126,8 +139,7 @@ class JSWeakMap : public JSWeakCollection {
DECL_PRINTER(JSWeakMap)
DECL_VERIFIER(JSWeakMap)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
+ OBJECT_CONSTRUCTORS(JSWeakMap, JSWeakCollection);
};
// The JSWeakSet describes EcmaScript Harmony weak sets
@@ -139,8 +151,7 @@ class JSWeakSet : public JSWeakCollection {
DECL_PRINTER(JSWeakSet)
DECL_VERIFIER(JSWeakSet)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
+ OBJECT_CONSTRUCTORS(JSWeakSet, JSWeakCollection);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 0ad7f363c5..a2ae92b112 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -18,10 +18,23 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject)
+
ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset);
ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
Managed<icu::SimpleDateFormat>, kICUSimpleDateFormatOffset)
ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset);
+SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
+
+inline void JSDateTimeFormat::set_hour_cycle(Intl::HourCycle hour_cycle) {
+ int hints = flags();
+ hints = HourCycleBits::update(hints, hour_cycle);
+ set_flags(hints);
+}
+
+inline Intl::HourCycle JSDateTimeFormat::hour_cycle() const {
+ return HourCycleBits::decode(flags());
+}
CAST_ACCESSOR(JSDateTimeFormat);
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 6285b74b04..b3b1d11253 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -12,6 +12,7 @@
#include <string>
#include <vector>
+#include "src/date.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects/intl-objects.h"
@@ -20,7 +21,6 @@
#include "unicode/calendar.h"
#include "unicode/dtptngen.h"
#include "unicode/gregocal.h"
-#include "unicode/numsys.h"
#include "unicode/smpdtfmt.h"
#include "unicode/unistr.h"
@@ -62,7 +62,12 @@ const std::vector<PatternItem> GetPatternItems() {
"narrow", "long", "short", "2-digit", "numeric"};
const std::vector<PatternItem> kPatternItems = {
PatternItem("weekday",
- {{"EEEEE", "narrow"}, {"EEEE", "long"}, {"EEE", "short"}},
+ {{"EEEEE", "narrow"},
+ {"EEEE", "long"},
+ {"EEE", "short"},
+ {"ccccc", "narrow"},
+ {"cccc", "long"},
+ {"ccc", "short"}},
kNarrowLongShort),
PatternItem("era",
{{"GGGGG", "narrow"}, {"GGGG", "long"}, {"GGG", "short"}},
@@ -87,7 +92,11 @@ const std::vector<PatternItem> GetPatternItems() {
{{"HH", "2-digit"},
{"H", "numeric"},
{"hh", "2-digit"},
- {"h", "numeric"}},
+ {"h", "numeric"},
+ {"kk", "2-digit"},
+ {"k", "numeric"},
+ {"KK", "2-digit"},
+ {"K", "numeric"}},
k2DigitNumeric),
PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
k2DigitNumeric),
@@ -114,12 +123,6 @@ class PatternData {
std::vector<const char*> allowed_values;
};
-enum HourOption {
- H_UNKNOWN,
- H_12,
- H_24,
-};
-
const std::vector<PatternData> CreateCommonData(const PatternData& hour_data) {
std::vector<PatternData> build;
for (const PatternItem& item : GetPatternItems()) {
@@ -135,60 +138,41 @@ const std::vector<PatternData> CreateCommonData(const PatternData& hour_data) {
const std::vector<PatternData> CreateData(const char* digit2,
const char* numeric) {
- static std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
- return CreateCommonData(PatternData(
- "hour", {{digit2, "2-digit"}, {numeric, "numeric"}}, k2DigitNumeric));
+ return CreateCommonData(
+ PatternData("hour", {{digit2, "2-digit"}, {numeric, "numeric"}},
+ {"2-digit", "numeric"}));
}
-const std::vector<PatternData> GetPatternData(HourOption option) {
+// According to "Date Field Symbol Table" in
+// http://userguide.icu-project.org/formatparse/datetime
+// Symbol | Meaning | Example(s)
+// h hour in am/pm (1~12) h 7
+// hh 07
+// H hour in day (0~23) H 0
+// HH 00
+// k hour in day (1~24) k 24
+// kk 24
+// K hour in am/pm (0~11) K 0
+// KK 00
+const std::vector<PatternData> GetPatternData(Intl::HourCycle hour_cycle) {
const std::vector<PatternData> data = CreateData("jj", "j");
+ const std::vector<PatternData> data_h11 = CreateData("KK", "K");
const std::vector<PatternData> data_h12 = CreateData("hh", "h");
- const std::vector<PatternData> data_h24 = CreateData("HH", "H");
- switch (option) {
- case HourOption::H_12:
+ const std::vector<PatternData> data_h23 = CreateData("HH", "H");
+ const std::vector<PatternData> data_h24 = CreateData("kk", "k");
+ switch (hour_cycle) {
+ case Intl::HourCycle::kH11:
+ return data_h11;
+ case Intl::HourCycle::kH12:
return data_h12;
- case HourOption::H_24:
+ case Intl::HourCycle::kH23:
+ return data_h23;
+ case Intl::HourCycle::kH24:
return data_h24;
- case HourOption::H_UNKNOWN:
+ case Intl::HourCycle::kUndefined:
return data;
- }
-}
-
-void SetPropertyFromPattern(Isolate* isolate, const std::string& pattern,
- Handle<JSObject> options) {
- Factory* factory = isolate->factory();
- const std::vector<PatternItem> items = GetPatternItems();
- for (const auto& item : items) {
- for (const auto& pair : item.pairs) {
- if (pattern.find(pair.pattern) != std::string::npos) {
- // After we find the first pair in the item which matching the pattern,
- // we set the property and look for the next item in kPatternItems.
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options,
- factory->NewStringFromAsciiChecked(item.property.c_str()),
- factory->NewStringFromAsciiChecked(pair.value.c_str()),
- kDontThrow)
- .FromJust());
- break;
- }
- }
- }
- // hour12
- // b. If p is "hour12", then
- // i. Let hc be dtf.[[HourCycle]].
- // ii. If hc is "h11" or "h12", let v be true.
- // iii. Else if, hc is "h23" or "h24", let v be false.
- // iv. Else, let v be undefined.
- if (pattern.find('h') != std::string::npos) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromStaticChars("hour12"),
- factory->true_value(), kDontThrow)
- .FromJust());
- } else if (pattern.find('H') != std::string::npos) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromStaticChars("hour12"),
- factory->false_value(), kDontThrow)
- .FromJust());
+ default:
+ UNREACHABLE();
}
}
@@ -289,26 +273,23 @@ std::string JSDateTimeFormat::CanonicalizeTimeZoneID(Isolate* isolate,
return ToTitleCaseTimezoneLocation(isolate, input);
}
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
Factory* factory = isolate->factory();
// 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
- // 5. For each row of Table 6, except the header row, in any order, do
- // a. Let p be the Property value of the current row.
Handle<Object> resolved_obj;
- // locale
- UErrorCode status = U_ZERO_ERROR;
- char language[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(date_time_format->icu_locale()->raw()->getName(), language,
- ULOC_FULLNAME_CAPACITY, FALSE, &status);
- CHECK(U_SUCCESS(status));
- Handle<String> locale = factory->NewStringFromAsciiChecked(language);
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->locale_string(), locale, kDontThrow)
- .FromJust());
+ CHECK(!date_time_format->icu_locale().is_null());
+ CHECK_NOT_NULL(date_time_format->icu_locale()->raw());
+ icu::Locale* icu_locale = date_time_format->icu_locale()->raw();
+ Maybe<std::string> maybe_locale_str = Intl::ToLanguageTag(*icu_locale);
+ MAYBE_RETURN(maybe_locale_str, MaybeHandle<JSObject>());
+ std::string locale_str = maybe_locale_str.FromJust();
+ Handle<String> locale =
+ factory->NewStringFromAsciiChecked(locale_str.c_str());
icu::SimpleDateFormat* icu_simple_date_format =
date_time_format->icu_simple_date_format()->raw();
@@ -328,36 +309,15 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
} else if (calendar_str == "ethiopic-amete-alem") {
calendar_str = "ethioaa";
}
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromStaticChars("calendar"),
- factory->NewStringFromAsciiChecked(calendar_str.c_str()),
- kDontThrow)
- .FromJust());
- // Ugly hack. ICU doesn't expose numbering system in any way, so we have
- // to assume that for given locale NumberingSystem constructor produces the
- // same digits as NumberFormat/Calendar would.
- // Tracked by https://unicode-org.atlassian.net/browse/ICU-13431
- std::unique_ptr<icu::NumberingSystem> numbering_system(
- icu::NumberingSystem::createInstance(
- *(date_time_format->icu_locale()->raw()), status));
- if (U_SUCCESS(status)) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->numberingSystem_string(),
- factory->NewStringFromAsciiChecked(numbering_system->getName()),
- kDontThrow)
- .FromJust());
- }
-
- // timezone
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
tz.getID(time_zone);
- status = U_ZERO_ERROR;
+ UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString canonical_time_zone;
icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
+ Handle<Object> timezone_value;
if (U_SUCCESS(status)) {
- Handle<String> timezone_value;
// In CLDR (http://unicode.org/cldr/trac/ticket/9943), Etc/UTC is made
// a separate timezone ID from Etc/GMT even though they're still the same
// timezone. We have Etc/UTC because 'UTC', 'Etc/Universal',
@@ -366,29 +326,111 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
// ecma402#sec-canonicalizetimezonename step 3
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
- timezone_value = factory->NewStringFromStaticChars("UTC");
+ timezone_value = factory->UTC_string();
} else {
ASSIGN_RETURN_ON_EXCEPTION(isolate, timezone_value,
Intl::ToString(isolate, canonical_time_zone),
JSObject);
}
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromStaticChars("timeZone"),
- timezone_value, kDontThrow)
- .FromJust());
} else {
// Somehow on Windows we will reach here.
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->NewStringFromStaticChars("timeZone"),
- factory->undefined_value(), kDontThrow)
- .FromJust());
+ timezone_value = factory->undefined_value();
}
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ // Tracked by https://unicode-org.atlassian.net/browse/ICU-13431
+ std::string numbering_system = Intl::GetNumberingSystem(*icu_locale);
+
icu::UnicodeString pattern_unicode;
icu_simple_date_format->toPattern(pattern_unicode);
std::string pattern;
pattern_unicode.toUTF8String(pattern);
- SetPropertyFromPattern(isolate, pattern, options);
+
+ // 5. For each row of Table 6, except the header row, in table order, do
+ // Table 6: Resolved Options of DateTimeFormat Instances
+ // Internal Slot Property
+ // [[Locale]] "locale"
+ // [[Calendar]] "calendar"
+ // [[NumberingSystem]] "numberingSystem"
+ // [[TimeZone]] "timeZone"
+ // [[HourCycle]] "hourCycle"
+ // "hour12"
+ // [[Weekday]] "weekday"
+ // [[Era]] "era"
+ // [[Year]] "year"
+ // [[Month]] "month"
+ // [[Day]] "day"
+ // [[Hour]] "hour"
+ // [[Minute]] "minute"
+ // [[Second]] "second"
+ // [[TimeZoneName]] "timeZoneName"
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->locale_string(), locale, kDontThrow)
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->calendar_string(),
+ factory->NewStringFromAsciiChecked(calendar_str.c_str()),
+ kDontThrow)
+ .FromJust());
+ if (!numbering_system.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->numberingSystem_string(),
+ factory->NewStringFromAsciiChecked(numbering_system.c_str()),
+ kDontThrow)
+ .FromJust());
+ }
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->timeZone_string(),
+ timezone_value, kDontThrow)
+ .FromJust());
+
+ // 5.b.i. Let hc be dtf.[[HourCycle]].
+ Intl::HourCycle hc = date_time_format->hour_cycle();
+
+ if (hc != Intl::HourCycle::kUndefined) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->hourCycle_string(),
+ date_time_format->HourCycleAsString(), kDontThrow)
+ .FromJust());
+ switch (hc) {
+ // ii. If hc is "h11" or "h12", let v be true.
+ case Intl::HourCycle::kH11:
+ case Intl::HourCycle::kH12:
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->hour12_string(),
+ factory->true_value(), kDontThrow)
+ .FromJust());
+ break;
+ // iii. Else if, hc is "h23" or "h24", let v be false.
+ case Intl::HourCycle::kH23:
+ case Intl::HourCycle::kH24:
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->hour12_string(),
+ factory->false_value(), kDontThrow)
+ .FromJust());
+ break;
+ // iv. Else, let v be undefined.
+ case Intl::HourCycle::kUndefined:
+ break;
+ }
+ }
+
+ for (const auto& item : GetPatternItems()) {
+ for (const auto& pair : item.pairs) {
+ if (pattern.find(pair.pattern) != std::string::npos) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options,
+ factory->NewStringFromAsciiChecked(item.property.c_str()),
+ factory->NewStringFromAsciiChecked(pair.value.c_str()),
+ kDontThrow)
+ .FromJust());
+ break;
+ }
+ }
+ }
+
return options;
}
@@ -397,7 +439,7 @@ namespace {
// ecma402/#sec-formatdatetime
// FormatDateTime( dateTimeFormat, x )
MaybeHandle<String> FormatDateTime(Isolate* isolate,
- Handle<JSDateTimeFormat> date_time_format,
+ const icu::SimpleDateFormat& date_format,
double x) {
double date_value = DateCache::TimeClip(x);
if (std::isnan(date_value)) {
@@ -405,12 +447,8 @@ MaybeHandle<String> FormatDateTime(Isolate* isolate,
String);
}
- icu::SimpleDateFormat* date_format =
- date_time_format->icu_simple_date_format()->raw();
- CHECK_NOT_NULL(date_format);
-
icu::UnicodeString result;
- date_format->format(date_value, result);
+ date_format.format(date_value, result);
return Intl::ToString(isolate, result);
}
@@ -439,28 +477,59 @@ MaybeHandle<String> JSDateTimeFormat::DateTimeFormat(
x = date->Number();
}
// 5. Return FormatDateTime(dtf, x).
- return FormatDateTime(isolate, date_time_format, x);
+ icu::SimpleDateFormat* format =
+ date_time_format->icu_simple_date_format()->raw();
+ return FormatDateTime(isolate, *format, x);
}
+namespace {
+Isolate::ICUObjectCacheType ConvertToCacheType(
+ JSDateTimeFormat::DefaultsOption type) {
+ switch (type) {
+ case JSDateTimeFormat::DefaultsOption::kDate:
+ return Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForDate;
+ case JSDateTimeFormat::DefaultsOption::kTime:
+ return Isolate::ICUObjectCacheType::kDefaultSimpleDateFormatForTime;
+ case JSDateTimeFormat::DefaultsOption::kAll:
+ return Isolate::ICUObjectCacheType::kDefaultSimpleDateFormat;
+ }
+}
+} // namespace
+
MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* service) {
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults) {
+ Isolate::ICUObjectCacheType cache_type = ConvertToCacheType(defaults);
+
Factory* factory = isolate->factory();
// 1. Let x be ? thisTimeValue(this value);
if (!date->IsJSDate()) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
- factory->NewStringFromStaticChars("Date")),
+ factory->Date_string()),
String);
}
double const x = Handle<JSDate>::cast(date)->value()->Number();
// 2. If x is NaN, return "Invalid Date"
if (std::isnan(x)) {
- return factory->NewStringFromStaticChars("Invalid Date");
+ return factory->Invalid_Date_string();
}
+ // We only cache the instance when both locales and options are undefined,
+ // as that is the only case when the specified side-effects of examining
+ // those arguments are unobservable.
+ bool can_cache =
+ locales->IsUndefined(isolate) && options->IsUndefined(isolate);
+ if (can_cache) {
+ // Both locales and options are undefined, check the cache.
+ icu::SimpleDateFormat* cached_icu_simple_date_format =
+ static_cast<icu::SimpleDateFormat*>(
+ isolate->get_cached_icu_object(cache_type));
+ if (cached_icu_simple_date_format != nullptr) {
+ return FormatDateTime(isolate, *cached_icu_simple_date_format, x);
+ }
+ }
// 3. Let options be ? ToDateTimeOptions(options, required, defaults).
Handle<JSObject> internal_options;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -468,19 +537,32 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
ToDateTimeOptions(isolate, options, required, defaults), String);
// 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
- Handle<JSObject> object;
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(isolate->context()
+ ->native_context()
+ ->intl_date_time_format_function()),
+ isolate);
+ Handle<JSObject> obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, obj,
+ JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
+ String);
+ Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, object,
- Intl::CachedOrNewService(isolate,
- factory->NewStringFromAsciiChecked(service),
- locales, options, internal_options),
+ isolate, date_time_format,
+ JSDateTimeFormat::Initialize(isolate, Handle<JSDateTimeFormat>::cast(obj),
+ locales, internal_options),
String);
- CHECK(object->IsJSDateTimeFormat());
- Handle<JSDateTimeFormat> date_time_format =
- Handle<JSDateTimeFormat>::cast(object);
+ if (can_cache) {
+ isolate->set_icu_object_in_cache(
+ cache_type, std::static_pointer_cast<icu::UObject>(
+ date_time_format->icu_simple_date_format()->get()));
+ }
// 5. Return FormatDateTime(dateFormat, x).
- return FormatDateTime(isolate, date_time_format, x);
+ icu::SimpleDateFormat* format =
+ date_time_format->icu_simple_date_format()->raw();
+ return FormatDateTime(isolate, *format, x);
}
namespace {
@@ -713,6 +795,33 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
return date_format;
}
+Intl::HourCycle HourCycleDefault(icu::SimpleDateFormat* date_format) {
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ bool in_quote = false;
+ for (int32_t i = 0; i < pattern.length(); i++) {
+ char16_t ch = pattern[i];
+ switch (ch) {
+ case '\'':
+ in_quote = !in_quote;
+ break;
+ case 'K':
+ if (!in_quote) return Intl::HourCycle::kH11;
+ break;
+ case 'h':
+ if (!in_quote) return Intl::HourCycle::kH12;
+ break;
+ case 'H':
+ if (!in_quote) return Intl::HourCycle::kH23;
+ break;
+ case 'k':
+ if (!in_quote) return Intl::HourCycle::kH24;
+ break;
+ }
+ }
+ return Intl::HourCycle::kUndefined;
+}
+
} // namespace
enum FormatMatcherOption { kBestFit, kBasic };
@@ -720,7 +829,14 @@ enum FormatMatcherOption { kBestFit, kBasic };
// ecma402/#sec-initializedatetimeformat
MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- Handle<Object> requested_locales, Handle<Object> input_options) {
+ Handle<Object> locales, Handle<Object> input_options) {
+ date_time_format->set_flags(0);
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSDateTimeFormat>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
// 2. Let options be ? ToDateTimeOptions(options, "any", "date").
Handle<JSObject> options;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -729,17 +845,13 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
isolate, input_options, RequiredOption::kAny, DefaultsOption::kDate),
JSDateTimeFormat);
- // ResolveLocale currently get option of localeMatcher so we have to call
- // ResolveLocale before "hour12" and "hourCycle".
- // TODO(ftang): fix this once ResolveLocale is ported to C++
- // 11. Let r be ResolveLocale( %DateTimeFormat%.[[AvailableLocales]],
- // requestedLocales, opt, %DateTimeFormat%.[[RelevantExtensionKeys]],
- // localeData).
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "dateformat", requested_locales, options),
- JSDateTimeFormat);
+ // 4. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ // 5. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>());
+ Intl::MatcherOption locale_matcher = maybe_locale_matcher.FromJust();
// 6. Let hour12 be ? GetOption(options, "hour12", "boolean", undefined,
// undefined).
@@ -747,54 +859,84 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Maybe<bool> maybe_get_hour12 = Intl::GetBoolOption(
isolate, options, "hour12", "Intl.DateTimeFormat", &hour12);
MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>());
- HourOption hour_option = HourOption::H_UNKNOWN;
- if (maybe_get_hour12.FromJust()) {
- hour_option = hour12 ? HourOption::H_12 : HourOption::H_24;
- }
// 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
// "h12", "h23", "h24" », undefined).
- static std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
- "h24"};
- std::unique_ptr<char[]> hour_cycle = nullptr;
- Maybe<bool> maybe_hour_cycle =
- Intl::GetStringOption(isolate, options, "hourCycle", hour_cycle_values,
- "Intl.DateTimeFormat", &hour_cycle);
- MAYBE_RETURN(maybe_hour_cycle, Handle<JSDateTimeFormat>());
+ Maybe<Intl::HourCycle> maybe_hour_cycle =
+ Intl::GetHourCycle(isolate, options, "Intl.DateTimeFormat");
+ MAYBE_RETURN(maybe_hour_cycle, MaybeHandle<JSDateTimeFormat>());
+ Intl::HourCycle hour_cycle = maybe_hour_cycle.FromJust();
+
// 8. If hour12 is not undefined, then
if (maybe_get_hour12.FromJust()) {
// a. Let hourCycle be null.
- hour_cycle = nullptr;
+ hour_cycle = Intl::HourCycle::kUndefined;
}
// 9. Set opt.[[hc]] to hourCycle.
- // TODO(ftang): change behavior based on hour_cycle.
- Handle<String> locale_with_extension_str =
- isolate->factory()->NewStringFromStaticChars("localeWithExtension");
- Handle<Object> locale_with_extension_obj =
- JSObject::GetDataProperty(r, locale_with_extension_str);
+ // ecma402/#sec-intl.datetimeformat-internal-slots
+ // The value of the [[RelevantExtensionKeys]] internal slot is
+ // « "ca", "nu", "hc" ».
+ std::set<std::string> relevant_extension_keys = {"nu", "ca", "hc"};
- // The locale_with_extension has to be a string. Either a user
- // provided canonicalized string or the default locale.
- CHECK(locale_with_extension_obj->IsString());
- Handle<String> locale_with_extension =
- Handle<String>::cast(locale_with_extension_obj);
+ // 10. Let localeData be %DateTimeFormat%.[[LocaleData]].
+ // 11. Let r be ResolveLocale( %DateTimeFormat%.[[AvailableLocales]],
+ // requestedLocales, opt, %DateTimeFormat%.[[RelevantExtensionKeys]],
+ // localeData).
+ //
+ Intl::ResolvedLocale r = Intl::ResolveLocale(
+ isolate, JSDateTimeFormat::GetAvailableLocales(), requested_locales,
+ locale_matcher, relevant_extension_keys);
- icu::Locale icu_locale =
- Intl::CreateICULocale(isolate, locale_with_extension);
+ icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
+ if (!maybe_get_hour12.FromJust() &&
+ hour_cycle == Intl::HourCycle::kUndefined) {
+ auto hc_extension_it = r.extensions.find("hc");
+ if (hc_extension_it != r.extensions.end()) {
+ hour_cycle = Intl::ToHourCycle(hc_extension_it->second.c_str());
+ }
+ }
+
// 17. Let timeZone be ? Get(options, "timeZone").
- static std::vector<const char*> empty_values = {};
+ const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
Maybe<bool> maybe_timezone =
Intl::GetStringOption(isolate, options, "timeZone", empty_values,
"Intl.DateTimeFormat", &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
+ std::unique_ptr<icu::Calendar> calendar(
+ CreateCalendar(isolate, icu_locale, timezone.get()));
+
+ // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
+ // i. Throw a RangeError exception.
+ if (calendar.get() == nullptr) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ isolate->factory()->NewStringFromAsciiChecked(
+ timezone.get())),
+ JSDateTimeFormat);
+ }
+
+ // 29. If dateTimeFormat.[[Hour]] is not undefined, then
+ if (hour_cycle == Intl::HourCycle::kUndefined) {
+ // d. If hour12 is not undefined, then
+ if (maybe_get_hour12.FromJust()) {
+ // i. If hour12 is true, then
+ if (hour12) {
+ hour_cycle = Intl::HourCycle::kH12;
+ } else { // ii. Else,
+ hour_cycle = Intl::HourCycle::kH23;
+ }
+ }
+ }
+
+ bool has_hour_option = false;
// 22. For each row of Table 5, except the header row, do
std::string skeleton;
- for (const auto& item : GetPatternData(hour_option)) {
+ for (const PatternData& item : GetPatternData(hour_cycle)) {
std::unique_ptr<char[]> input;
// a. Let prop be the name given in the Property column of the row.
// b. Let value be ? GetOption(options, prop, "string", « the strings given
@@ -804,26 +946,29 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
"Intl.DateTimeFormat", &input);
MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
if (maybe_get_option.FromJust()) {
+ if (item.property == "hour") {
+ has_hour_option = true;
+ }
DCHECK_NOT_NULL(input.get());
// c. Set opt.[[<prop>]] to value.
skeleton += item.map.find(input.get())->second;
}
}
+ enum FormatMatcherOption { kBestFit, kBasic };
// We implement only best fit algorithm, but still need to check
// if the formatMatcher values are in range.
// 25. Let matcher be ? GetOption(options, "formatMatcher", "string",
// « "basic", "best fit" », "best fit").
- Handle<JSReceiver> options_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
- Object::ToObject(isolate, options),
- JSDateTimeFormat);
- std::unique_ptr<char[]> matcher_str = nullptr;
- std::vector<const char*> matcher_values = {"basic", "best fit"};
- Maybe<bool> maybe_found_matcher = Intl::GetStringOption(
- isolate, options_obj, "formatMatcher", matcher_values,
- "Intl.DateTimeFormat", &matcher_str);
- MAYBE_RETURN(maybe_found_matcher, Handle<JSDateTimeFormat>());
+ Maybe<FormatMatcherOption> maybe_format_matcher =
+ Intl::GetStringOption<FormatMatcherOption>(
+ isolate, options, "formatMatcher", "Intl.DateTimeFormat",
+ {"best fit", "basic"},
+ {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
+ FormatMatcherOption::kBestFit);
+ MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
+ // TODO(ftang): uncomment the following line and handle format_matcher.
+ // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
std::unique_ptr<icu::SimpleDateFormat> date_format(
CreateICUDateFormat(isolate, icu_locale, skeleton));
@@ -836,33 +981,38 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
}
}
- // Set the locale
- // 12. Set dateTimeFormat.[[Locale]] to r.[[locale]].
- icu::Locale* cloned_locale = icu_locale.clone();
- CHECK_NOT_NULL(cloned_locale);
- Handle<Managed<icu::Locale>> managed_locale =
- Managed<icu::Locale>::FromRawPtr(isolate, 0, cloned_locale);
- date_time_format->set_icu_locale(*managed_locale);
-
+ // The creation of Calendar depends on timeZone so we have to put 13 after 17.
+ // Also date_format is not created until here.
// 13. Set dateTimeFormat.[[Calendar]] to r.[[ca]].
- std::unique_ptr<icu::Calendar> calendar(
- CreateCalendar(isolate, icu_locale, timezone.get()));
+ date_format->adoptCalendar(calendar.release());
- // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
- // i. Throw a RangeError exception.
- if (calendar.get() == nullptr) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- isolate->factory()->NewStringFromAsciiChecked(
- timezone.get())),
- JSDateTimeFormat);
+ // 29. If dateTimeFormat.[[Hour]] is not undefined, then
+ if (has_hour_option) {
+ // a. Let hcDefault be dataLocaleData.[[hourCycle]].
+ Intl::HourCycle hc_default = HourCycleDefault(date_format.get());
+ // b. Let hc be dateTimeFormat.[[HourCycle]].
+ Intl::HourCycle hc = hour_cycle;
+ // c. If hc is null, then
+ if (hc == Intl::HourCycle::kUndefined) {
+ // i. Set hc to hcDefault.
+ hc = hc_default;
+ }
+ // e. Set dateTimeFormat.[[HourCycle]] to hc.
+ date_time_format->set_hour_cycle(hc);
+ // 30. Else
+ } else {
+ // a. Set dateTimeFormat.[[HourCycle]] to undefined.
+ date_time_format->set_hour_cycle(Intl::HourCycle::kUndefined);
}
- date_format->adoptCalendar(calendar.release());
+ Handle<Managed<icu::Locale>> managed_locale =
+ Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
+ date_time_format->set_icu_locale(*managed_locale);
Handle<Managed<icu::SimpleDateFormat>> managed_format =
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
std::move(date_format));
date_time_format->set_icu_simple_date_format(*managed_format);
+
return date_time_format;
}
@@ -976,5 +1126,30 @@ MaybeHandle<Object> JSDateTimeFormat::FormatToParts(
JSObject::ValidateElements(*result);
return result;
}
+
+std::set<std::string> JSDateTimeFormat::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::DateFormat::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
+Handle<String> JSDateTimeFormat::HourCycleAsString() const {
+ switch (hour_cycle()) {
+ case Intl::HourCycle::kUndefined:
+ return GetReadOnlyRoots().undefined_string_handle();
+ case Intl::HourCycle::kH11:
+ return GetReadOnlyRoots().h11_string_handle();
+ case Intl::HourCycle::kH12:
+ return GetReadOnlyRoots().h12_string_handle();
+ case Intl::HourCycle::kH23:
+ return GetReadOnlyRoots().h23_string_handle();
+ case Intl::HourCycle::kH24:
+ return GetReadOnlyRoots().h24_string_handle();
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index ae2aa36a97..1e28b38add 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -9,8 +9,13 @@
#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
#define V8_OBJECTS_JS_DATE_TIME_FORMAT_H_
+#include <set>
+#include <string>
+
#include "src/isolate.h"
+#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,7 +23,7 @@
namespace U_ICU_NAMESPACE {
class Locale;
class SimpleDateFormat;
-}
+} // namespace U_ICU_NAMESPACE
namespace v8 {
namespace internal {
@@ -64,32 +69,50 @@ class JSDateTimeFormat : public JSObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
- Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* service);
+ Handle<Object> options, RequiredOption required, DefaultsOption defaults);
+
+ static std::set<std::string> GetAvailableLocales();
+ Handle<String> HourCycleAsString() const;
DECL_CAST(JSDateTimeFormat)
// Layout description.
-#define JS_DATE_TIME_FORMAT_FIELDS(V) \
- V(kICULocaleOffset, kPointerSize) \
- V(kICUSimpleDateFormatOffset, kPointerSize) \
- V(kBoundFormatOffset, kPointerSize) \
- /* Total size. */ \
+#define JS_DATE_TIME_FORMAT_FIELDS(V) \
+ V(kICULocaleOffset, kTaggedSize) \
+ V(kICUSimpleDateFormatOffset, kTaggedSize) \
+ V(kBoundFormatOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
JS_DATE_TIME_FORMAT_FIELDS)
#undef JS_DATE_TIME_FORMAT_FIELDS
+ inline void set_hour_cycle(Intl::HourCycle hour_cycle);
+ inline Intl::HourCycle hour_cycle() const;
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) V(HourCycleBits, Intl::HourCycle, 3, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Intl::HourCycle::kUndefined <= HourCycleBits::kMax);
+ STATIC_ASSERT(Intl::HourCycle::kH11 <= HourCycleBits::kMax);
+ STATIC_ASSERT(Intl::HourCycle::kH12 <= HourCycleBits::kMax);
+ STATIC_ASSERT(Intl::HourCycle::kH23 <= HourCycleBits::kMax);
+ STATIC_ASSERT(Intl::HourCycle::kH24 <= HourCycleBits::kMax);
+
DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
DECL_ACCESSORS(bound_format, Object)
+ DECL_INT_ACCESSORS(flags)
DECL_PRINTER(JSDateTimeFormat)
DECL_VERIFIER(JSDateTimeFormat)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDateTimeFormat);
+ OBJECT_CONSTRUCTORS(JSDateTimeFormat, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index 7eb372cb03..c2895e29f9 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_GENERATOR_INL_H_
#include "src/objects/js-generator.h"
+#include "src/objects/js-promise-inl.h"
#include "src/objects-inl.h" // Needed for write barriers
@@ -15,8 +16,15 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(JSGeneratorObject)
+OBJECT_CONSTRUCTORS_IMPL(JSGeneratorObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject, JSGeneratorObject)
+OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject, JSGeneratorObject)
+OBJECT_CONSTRUCTORS_IMPL(AsyncGeneratorRequest, Struct)
+
+CAST_ACCESSOR(JSAsyncFunctionObject)
CAST_ACCESSOR(JSAsyncGeneratorObject)
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(AsyncGeneratorRequest)
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
@@ -27,6 +35,11 @@ SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, parameters_and_registers, FixedArray,
kParametersAndRegistersOffset)
+ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
+SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
+ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
+ACCESSORS(AsyncGeneratorRequest, promise, Object, kPromiseOffset)
+
bool JSGeneratorObject::is_suspended() const {
DCHECK_LT(kGeneratorExecuting, 0);
DCHECK_LT(kGeneratorClosed, 0);
@@ -41,6 +54,8 @@ bool JSGeneratorObject::is_executing() const {
return continuation() == kGeneratorExecuting;
}
+ACCESSORS(JSAsyncFunctionObject, promise, JSPromise, kPromiseOffset)
+
ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 043b457cf0..53541fc997 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_GENERATOR_H_
#include "src/objects/js-objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,6 +14,9 @@
namespace v8 {
namespace internal {
+// Forward declarations.
+class JSPromise;
+
class JSGeneratorObject : public JSObject {
public:
// [function]: The function corresponding to this generator object.
@@ -64,18 +68,44 @@ class JSGeneratorObject : public JSObject {
static const int kGeneratorClosed = -1;
// Layout description.
- static const int kFunctionOffset = JSObject::kHeaderSize;
- static const int kContextOffset = kFunctionOffset + kPointerSize;
- static const int kReceiverOffset = kContextOffset + kPointerSize;
- static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
- static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
- static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
- static const int kParametersAndRegistersOffset =
- kContinuationOffset + kPointerSize;
- static const int kSize = kParametersAndRegistersOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
+#define JS_GENERATOR_FIELDS(V) \
+ V(kFunctionOffset, kTaggedSize) \
+ V(kContextOffset, kTaggedSize) \
+ V(kReceiverOffset, kTaggedSize) \
+ V(kInputOrDebugPosOffset, kTaggedSize) \
+ V(kResumeModeOffset, kTaggedSize) \
+ V(kContinuationOffset, kTaggedSize) \
+ V(kParametersAndRegistersOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GENERATOR_FIELDS)
+#undef JS_GENERATOR_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSGeneratorObject, JSObject);
+};
+
+class JSAsyncFunctionObject : public JSGeneratorObject {
+ public:
+ DECL_CAST(JSAsyncFunctionObject)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(JSAsyncFunctionObject)
+
+ // [promise]: The promise of the async function.
+ DECL_ACCESSORS(promise, JSPromise)
+
+ // Layout description.
+#define JS_ASYNC_FUNCTION_FIELDS(V) \
+ V(kPromiseOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSGeneratorObject::kSize,
+ JS_ASYNC_FUNCTION_FIELDS)
+#undef JS_ASYNC_FUNCTION_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSAsyncFunctionObject, JSGeneratorObject);
};
class JSAsyncGeneratorObject : public JSGeneratorObject {
@@ -95,12 +125,45 @@ class JSAsyncGeneratorObject : public JSGeneratorObject {
DECL_INT_ACCESSORS(is_awaiting)
// Layout description.
- static const int kQueueOffset = JSGeneratorObject::kSize;
- static const int kIsAwaitingOffset = kQueueOffset + kPointerSize;
- static const int kSize = kIsAwaitingOffset + kPointerSize;
+#define JS_ASYNC_GENERATOR_FIELDS(V) \
+ V(kQueueOffset, kTaggedSize) \
+ V(kIsAwaitingOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSGeneratorObject::kSize,
+ JS_ASYNC_GENERATOR_FIELDS)
+#undef JS_ASYNC_GENERATOR_FIELDS
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncGeneratorObject);
+ OBJECT_CONSTRUCTORS(JSAsyncGeneratorObject, JSGeneratorObject);
+};
+
+class AsyncGeneratorRequest : public Struct {
+ public:
+ // Holds an AsyncGeneratorRequest, or Undefined.
+ DECL_ACCESSORS(next, Object)
+ DECL_INT_ACCESSORS(resume_mode)
+ DECL_ACCESSORS(value, Object)
+ DECL_ACCESSORS(promise, Object)
+
+// Layout description.
+#define ASYNC_GENERATOR_REQUEST_FIELDS(V) \
+ V(kNextOffset, kTaggedSize) \
+ V(kResumeModeOffset, kTaggedSize) \
+ V(kValueOffset, kTaggedSize) \
+ V(kPromiseOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ ASYNC_GENERATOR_REQUEST_FIELDS)
+#undef ASYNC_GENERATOR_REQUEST_FIELDS
+
+ DECL_CAST(AsyncGeneratorRequest)
+ DECL_PRINTER(AsyncGeneratorRequest)
+ DECL_VERIFIER(AsyncGeneratorRequest)
+
+ OBJECT_CONSTRUCTORS(AsyncGeneratorRequest, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 0f1395719e..fd7417959a 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSListFormat, JSObject)
+
// Base list format accessors.
ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index d2713d489f..e6f9c76a61 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -11,6 +11,7 @@
#include <memory>
#include <vector>
+#include "src/elements-inl.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -19,7 +20,10 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/managed.h"
+#include "unicode/fieldpos.h"
+#include "unicode/fpositer.h"
#include "unicode/listformatter.h"
+#include "unicode/ulistformatter.h"
namespace v8 {
namespace internal {
@@ -41,31 +45,29 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kStandard;
case JSListFormat::Style::SHORT:
return kStandardShort;
+ // NARROW is now not allowed if type is not unit
+ // It is impossible to reach because we've already thrown a RangeError
+ // when style is "narrow" and type is not "unit".
case JSListFormat::Style::NARROW:
- // Currently, ListFormat::createInstance on "standard-narrow" will
- // fail so we use "standard-short" here.
- // See https://unicode.org/cldr/trac/ticket/11254
- // TODO(ftang): change to return kStandardNarrow; after the above
- // issue fixed in CLDR/ICU.
- // CLDR bug: https://unicode.org/cldr/trac/ticket/11254
- // ICU bug: https://unicode-org.atlassian.net/browse/ICU-20014
- return kStandardShort;
case JSListFormat::Style::COUNT:
UNREACHABLE();
}
case JSListFormat::Type::DISJUNCTION:
switch (style) {
- // Currently, ListFormat::createInstance on "or-short" and "or-narrow"
+ // Currently, ListFormat::createInstance on "or-short"
// will fail so we use "or" here.
// See https://unicode.org/cldr/trac/ticket/11254
- // TODO(ftang): change to return kOr, kOrShort or kOrNarrow depend on
+ // TODO(ftang): change to return kOr or kOrShort depend on
// style after the above issue fixed in CLDR/ICU.
// CLDR bug: https://unicode.org/cldr/trac/ticket/11254
// ICU bug: https://unicode-org.atlassian.net/browse/ICU-20014
case JSListFormat::Style::LONG:
case JSListFormat::Style::SHORT:
- case JSListFormat::Style::NARROW:
return kOr;
+ // NARROW is now not allowed if type is not unit
+ // It is impossible to reach because we've already thrown a RangeError
+ // when style is "narrow" and type is not "unit".
+ case JSListFormat::Style::NARROW:
case JSListFormat::Style::COUNT:
UNREACHABLE();
}
@@ -120,73 +122,89 @@ JSListFormat::Type get_type(const char* str) {
}
MaybeHandle<JSListFormat> JSListFormat::Initialize(
- Isolate* isolate, Handle<JSListFormat> list_format_holder,
- Handle<Object> input_locales, Handle<Object> input_options) {
- Factory* factory = isolate->factory();
- list_format_holder->set_flags(0);
+ Isolate* isolate, Handle<JSListFormat> list_format, Handle<Object> locales,
+ Handle<Object> input_options) {
+ list_format->set_flags(0);
Handle<JSReceiver> options;
- // 2. If options is undefined, then
+ // 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSListFormat>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
+
+ // 4. If options is undefined, then
if (input_options->IsUndefined(isolate)) {
- // a. Let options be ObjectCreate(null).
+ // 4. a. Let options be ObjectCreate(null).
options = isolate->factory()->NewJSObjectWithNullProto();
- // 3. Else
+ // 5. Else
} else {
- // a. Let options be ? ToObject(options).
+ // 5. a. Let options be ? ToObject(options).
ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
Object::ToObject(isolate, input_options),
JSListFormat);
}
- // 5. Let t be GetOption(options, "type", "string", «"conjunction",
+ // Note: No need to create a record. It's not observable.
+ // 6. Let opt be a new Record.
+
+ // 7. Let matcher be ? GetOption(options, "localeMatcher", "string", «
+ // "lookup", "best fit" », "best fit").
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.ListFormat");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSListFormat>());
+
+ // 8. Set opt.[[localeMatcher]] to matcher.
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
+
+ // 10. Let r be ResolveLocale(%ListFormat%.[[AvailableLocales]],
+ // requestedLocales, opt, undefined, localeData).
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSListFormat::GetAvailableLocales(),
+ requested_locales, matcher, {});
+
+ // 11. Set listFormat.[[Locale]] to r.[[Locale]].
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ list_format->set_locale(*locale_str);
+
+ // 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
- std::unique_ptr<char[]> type_str = nullptr;
- std::vector<const char*> type_values = {"conjunction", "disjunction", "unit"};
- Maybe<bool> maybe_found_type = Intl::GetStringOption(
- isolate, options, "type", type_values, "Intl.ListFormat", &type_str);
- Type type_enum = Type::CONJUNCTION;
- MAYBE_RETURN(maybe_found_type, MaybeHandle<JSListFormat>());
- if (maybe_found_type.FromJust()) {
- DCHECK_NOT_NULL(type_str.get());
- type_enum = get_type(type_str.get());
- }
- // 6. Set listFormat.[[Type]] to t.
- list_format_holder->set_type(type_enum);
+ Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ isolate, options, "type", "Intl.ListFormat",
+ {"conjunction", "disjunction", "unit"},
+ {Type::CONJUNCTION, Type::DISJUNCTION, Type::UNIT}, Type::CONJUNCTION);
+ MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
+ Type type_enum = maybe_type.FromJust();
+
+ // 13. Set listFormat.[[Type]] to t.
+ list_format->set_type(type_enum);
+
+ // NOTE: Keep the old way of GetOptions on style for now. I discover a
+ // disadvantage of following the lastest spec and propose to rollback that
+ // part in https://github.com/tc39/proposal-intl-list-format/pull/40
- // 7. Let s be ? GetOption(options, "style", "string",
+ // Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- std::unique_ptr<char[]> style_str = nullptr;
- std::vector<const char*> style_values = {"long", "short", "narrow"};
- Maybe<bool> maybe_found_style = Intl::GetStringOption(
- isolate, options, "style", style_values, "Intl.ListFormat", &style_str);
- Style style_enum = Style::LONG;
- MAYBE_RETURN(maybe_found_style, MaybeHandle<JSListFormat>());
- if (maybe_found_style.FromJust()) {
- DCHECK_NOT_NULL(style_str.get());
- style_enum = get_style(style_str.get());
+ Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ isolate, options, "style", "Intl.ListFormat", {"long", "short", "narrow"},
+ {Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
+ MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
+ Style style_enum = maybe_style.FromJust();
+
+ // If _style_ is `"narrow"` and _type_ is not `"unit"`, throw a *RangeError*
+ // exception.
+ if (style_enum == Style::NARROW && type_enum != Type::UNIT) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kIllegalTypeWhileStyleNarrow),
+ JSListFormat);
}
- // 15. Set listFormat.[[Style]] to s.
- list_format_holder->set_style(style_enum);
- // 10. Let r be ResolveLocale(%ListFormat%.[[AvailableLocales]],
- // requestedLocales, opt, undefined, localeData).
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "listformat", input_locales, options),
- JSListFormat);
-
- Handle<Object> locale_obj =
- JSObject::GetDataProperty(r, factory->locale_string());
- Handle<String> locale;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, locale, Object::ToString(isolate, locale_obj), JSListFormat);
-
- // 18. Set listFormat.[[Locale]] to the value of r.[[Locale]].
- list_format_holder->set_locale(*locale);
-
- std::unique_ptr<char[]> locale_name = locale->ToCString();
- icu::Locale icu_locale(locale_name.get());
+ // 17. Set listFormat.[[Style]] to s.
+ list_format->set_style(style_enum);
+
+ icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
icu_locale, GetIcuStyleString(style_enum, type_enum), status);
@@ -199,21 +217,31 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
Handle<Managed<icu::ListFormatter>> managed_formatter =
Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
- list_format_holder->set_icu_formatter(*managed_formatter);
- return list_format_holder;
+ list_format->set_icu_formatter(*managed_formatter);
+ return list_format;
}
-Handle<JSObject> JSListFormat::ResolvedOptions(
- Isolate* isolate, Handle<JSListFormat> format_holder) {
+// ecma402 #sec-intl.pluralrules.prototype.resolvedoptions
+Handle<JSObject> JSListFormat::ResolvedOptions(Isolate* isolate,
+ Handle<JSListFormat> format) {
Factory* factory = isolate->factory();
+ // 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- Handle<String> locale(format_holder->locale(), isolate);
+
+ // 5. For each row of Table 1, except the header row, do
+ // Table 1: Resolved Options of ListFormat Instances
+ // Internal Slot Property
+ // [[Locale]] "locale"
+ // [[Type]] "type"
+ // [[Style]] "style"
+ Handle<String> locale(format->locale(), isolate);
JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
NONE);
- JSObject::AddProperty(isolate, result, factory->style_string(),
- format_holder->StyleAsString(), NONE);
JSObject::AddProperty(isolate, result, factory->type_string(),
- format_holder->TypeAsString(), NONE);
+ format->TypeAsString(), NONE);
+ JSObject::AddProperty(isolate, result, factory->style_string(),
+ format->StyleAsString(), NONE);
+ // 6. Return options.
return result;
}
@@ -245,47 +273,41 @@ Handle<String> JSListFormat::TypeAsString() const {
namespace {
-// TODO(ftang) remove the following hack after icu::ListFormat support
-// FieldPosition.
-// This is a temporary workaround until icu::ListFormat support FieldPosition
-// It is inefficient and won't work correctly on the edge case that the input
-// contains fraction of the list pattern.
-// For example the following under English will mark the "an" incorrectly
-// since the formatted is "a, b, and an".
-// listFormat.formatToParts(["a", "b", "an"])
-// https://ssl.icu-project.org/trac/ticket/13754
MaybeHandle<JSArray> GenerateListFormatParts(
Isolate* isolate, const icu::UnicodeString& formatted,
- const icu::UnicodeString items[], int length) {
+ const std::vector<icu::FieldPosition>& positions) {
Factory* factory = isolate->factory();
- int estimate_size = length * 2 + 1;
- Handle<JSArray> array = factory->NewJSArray(estimate_size);
+ Handle<JSArray> array =
+ factory->NewJSArray(static_cast<int>(positions.size()));
int index = 0;
- int last_pos = 0;
- for (int i = 0; i < length; i++) {
- int found = formatted.indexOf(items[i], last_pos);
- DCHECK_GE(found, 0);
- if (found > last_pos) {
- Handle<String> substring;
+ int prev_item_end_index = 0;
+ Handle<String> substring;
+ for (const icu::FieldPosition pos : positions) {
+ CHECK(pos.getBeginIndex() >= prev_item_end_index);
+ CHECK(pos.getField() == ULISTFMT_ELEMENT_FIELD);
+ if (pos.getBeginIndex() != prev_item_end_index) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, substring,
- Intl::ToString(isolate, formatted, last_pos, found), JSArray);
+ Intl::ToString(isolate, formatted, prev_item_end_index,
+ pos.getBeginIndex()),
+ JSArray);
Intl::AddElement(isolate, array, index++, factory->literal_string(),
substring);
}
- last_pos = found + items[i].length();
- Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, substring, Intl::ToString(isolate, formatted, found, last_pos),
+ isolate, substring,
+ Intl::ToString(isolate, formatted, pos.getBeginIndex(),
+ pos.getEndIndex()),
JSArray);
Intl::AddElement(isolate, array, index++, factory->element_string(),
substring);
+ prev_item_end_index = pos.getEndIndex();
}
- if (last_pos < formatted.length()) {
- Handle<String> substring;
+ if (prev_item_end_index != formatted.length()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, substring,
- Intl::ToString(isolate, formatted, last_pos, formatted.length()),
+ Intl::ToString(isolate, formatted, prev_item_end_index,
+ formatted.length()),
JSArray);
Intl::AddElement(isolate, array, index++, factory->literal_string(),
substring);
@@ -293,103 +315,137 @@ MaybeHandle<JSArray> GenerateListFormatParts(
return array;
}
+// Get all the FieldPosition into a vector from FieldPositionIterator and return
+// them in output order.
+std::vector<icu::FieldPosition> GenerateFieldPosition(
+ icu::FieldPositionIterator iter) {
+ std::vector<icu::FieldPosition> positions;
+ icu::FieldPosition pos;
+ while (iter.next(pos)) {
+ // Only take the information of the ULISTFMT_ELEMENT_FIELD field.
+ if (pos.getField() == ULISTFMT_ELEMENT_FIELD) {
+ positions.push_back(pos);
+ }
+ }
+ // Because the format may reoder the items, ICU FieldPositionIterator
+ // keep the order for FieldPosition based on the order of the input items.
+ // But the formatToParts API in ECMA402 expects in formatted output order.
+ // Therefore we have to sort based on beginIndex of the FieldPosition.
+ // Example of such is in the "ur" (Urdu) locale with type: "unit", where the
+ // main text flows from right to left, the formatted list of unit should flow
+ // from left to right and therefore in the memory the formatted result will
+ // put the first item on the last in the result string according the current
+ // CLDR patterns.
+ // See 'listPattern' pattern in
+ // third_party/icu/source/data/locales/ur_IN.txt
+ std::sort(positions.begin(), positions.end(),
+ [](icu::FieldPosition a, icu::FieldPosition b) {
+ return a.getBeginIndex() < b.getBeginIndex();
+ });
+ return positions;
+}
+
// Extract String from JSArray into array of UnicodeString
-Maybe<bool> ToUnicodeStringArray(Isolate* isolate, Handle<JSArray> array,
- icu::UnicodeString items[], uint32_t length) {
+Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
+ Isolate* isolate, Handle<JSArray> array) {
Factory* factory = isolate->factory();
// In general, ElementsAccessor::Get actually isn't guaranteed to give us the
- // elements in order. But given that it was created by a builtin we control,
- // it shouldn't be possible for it to be problematic. Add DCHECK to ensure
- // that.
- DCHECK(array->HasFastPackedElements());
+ // elements in order. But if it is a holey array, it will cause the exception
+ // with the IsString check.
auto* accessor = array->GetElementsAccessor();
- DCHECK(length == accessor->NumberOfElements(*array));
+ uint32_t length = accessor->NumberOfElements(*array);
+
// ecma402 #sec-createpartsfromlist
// 2. If list contains any element value such that Type(value) is not String,
// throw a TypeError exception.
//
// Per spec it looks like we're supposed to throw a TypeError exception if the
- // item isn't already a string, rather than coercing to a string. Moreover,
- // the way the spec's written it looks like we're supposed to run through the
- // whole list to check that they're all strings before going further.
+ // item isn't already a string, rather than coercing to a string.
+ std::vector<icu::UnicodeString> result;
for (uint32_t i = 0; i < length; i++) {
+ DCHECK(accessor->HasElement(*array, i));
Handle<Object> item = accessor->Get(array, i);
DCHECK(!item.is_null());
if (!item->IsString()) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate,
NewTypeError(MessageTemplate::kArrayItemNotType,
- factory->NewStringFromStaticChars("list"),
- factory->NewNumber(i),
- factory->NewStringFromStaticChars("String")),
- Nothing<bool>());
+ factory->list_string(),
+ // TODO(ftang): For dictionary-mode arrays, i isn't
+ // actually the index in the array but the index in the
+ // dictionary.
+ factory->NewNumber(i), factory->String_string()),
+ Nothing<std::vector<icu::UnicodeString>>());
}
+ result.push_back(
+ Intl::ToICUUnicodeString(isolate, Handle<String>::cast(item)));
}
- for (uint32_t i = 0; i < length; i++) {
- Handle<String> string = Handle<String>::cast(accessor->Get(array, i));
- DisallowHeapAllocation no_gc;
- string = String::Flatten(isolate, string);
- std::unique_ptr<uc16[]> sap;
- items[i] =
- icu::UnicodeString(GetUCharBufferFromFlat(string->GetFlatContent(),
- &sap, string->length()),
- string->length());
- }
- return Just(true);
+ DCHECK(!array->HasDictionaryElements());
+ return Just(result);
}
} // namespace
-Maybe<bool> FormatListCommon(Isolate* isolate,
- Handle<JSListFormat> format_holder,
- Handle<JSArray> list,
- icu::UnicodeString& formatted, uint32_t* length,
- std::unique_ptr<icu::UnicodeString[]>& array) {
+// ecma402 #sec-formatlist
+MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
+ Handle<JSListFormat> format,
+ Handle<JSArray> list) {
DCHECK(!list->IsUndefined());
-
- icu::ListFormatter* formatter = format_holder->icu_formatter()->raw();
- CHECK_NOT_NULL(formatter);
-
- *length = list->GetElementsAccessor()->NumberOfElements(*list);
- array.reset(new icu::UnicodeString[*length]);
-
// ecma402 #sec-createpartsfromlist
// 2. If list contains any element value such that Type(value) is not String,
// throw a TypeError exception.
- MAYBE_RETURN(ToUnicodeStringArray(isolate, list, array.get(), *length),
- Nothing<bool>());
+ Maybe<std::vector<icu::UnicodeString>> maybe_array =
+ ToUnicodeStringArray(isolate, list);
+ MAYBE_RETURN(maybe_array, Handle<String>());
+ std::vector<icu::UnicodeString> array = maybe_array.FromJust();
+
+ icu::ListFormatter* formatter = format->icu_formatter()->raw();
+ CHECK_NOT_NULL(formatter);
UErrorCode status = U_ZERO_ERROR;
- formatter->format(array.get(), *length, formatted, status);
+ icu::UnicodeString formatted;
+ formatter->format(array.data(), static_cast<int32_t>(array.size()), formatted,
+ status);
DCHECK(U_SUCCESS(status));
- return Just(true);
-}
-// ecma402 #sec-formatlist
-MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
- Handle<JSListFormat> format_holder,
- Handle<JSArray> list) {
- icu::UnicodeString formatted;
- uint32_t length;
- std::unique_ptr<icu::UnicodeString[]> array;
- MAYBE_RETURN(
- FormatListCommon(isolate, format_holder, list, formatted, &length, array),
- Handle<String>());
return Intl::ToString(isolate, formatted);
}
+std::set<std::string> JSListFormat::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ // TODO(ftang): for now just use
+ // icu::Locale::getAvailableLocales(count) until we migrate to
+ // Intl::GetAvailableLocales().
+ // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
+ const icu::Locale* icu_available_locales =
+ icu::Locale::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
// ecma42 #sec-formatlisttoparts
MaybeHandle<JSArray> JSListFormat::FormatListToParts(
- Isolate* isolate, Handle<JSListFormat> format_holder,
- Handle<JSArray> list) {
+ Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list) {
+ DCHECK(!list->IsUndefined());
+ // ecma402 #sec-createpartsfromlist
+ // 2. If list contains any element value such that Type(value) is not String,
+ // throw a TypeError exception.
+ Maybe<std::vector<icu::UnicodeString>> maybe_array =
+ ToUnicodeStringArray(isolate, list);
+ MAYBE_RETURN(maybe_array, Handle<JSArray>());
+ std::vector<icu::UnicodeString> array = maybe_array.FromJust();
+
+ icu::ListFormatter* formatter = format->icu_formatter()->raw();
+ CHECK_NOT_NULL(formatter);
+
+ UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString formatted;
- uint32_t length;
- std::unique_ptr<icu::UnicodeString[]> array;
- MAYBE_RETURN(
- FormatListCommon(isolate, format_holder, list, formatted, &length, array),
- Handle<JSArray>());
- return GenerateListFormatParts(isolate, formatted, array.get(), length);
-}
+ icu::FieldPositionIterator iter;
+ formatter->format(array.data(), static_cast<int32_t>(array.size()), formatted,
+ &iter, status);
+ DCHECK(U_SUCCESS(status));
+ std::vector<icu::FieldPosition> field_positions = GenerateFieldPosition(iter);
+ return GenerateListFormatParts(isolate, formatted, field_positions);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index e9bfec7cc8..1ae6fcdb84 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_LIST_FORMAT_H_
#define V8_OBJECTS_JS_LIST_FORMAT_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
@@ -46,6 +49,8 @@ class JSListFormat : public JSObject {
Isolate* isolate, Handle<JSListFormat> format_holder,
Handle<JSArray> list);
+ static std::set<std::string> GetAvailableLocales();
+
Handle<String> StyleAsString() const;
Handle<String> TypeAsString() const;
@@ -100,14 +105,18 @@ class JSListFormat : public JSObject {
DECL_VERIFIER(JSListFormat)
// Layout description.
- static const int kJSListFormatOffset = JSObject::kHeaderSize;
- static const int kLocaleOffset = kJSListFormatOffset + kPointerSize;
- static const int kICUFormatterOffset = kLocaleOffset + kPointerSize;
- static const int kFlagsOffset = kICUFormatterOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSListFormat);
+#define JS_LIST_FORMAT_FIELDS(V) \
+ V(kJSListFormatOffset, kTaggedSize) \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kICUFormatterOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LIST_FORMAT_FIELDS)
+#undef JS_LIST_FORMAT_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSListFormat, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index ac0a7a914f..15a2082a0a 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -19,54 +19,12 @@
namespace v8 {
namespace internal {
-// Base locale accessors.
-ACCESSORS(JSLocale, language, Object, kLanguageOffset);
-ACCESSORS(JSLocale, script, Object, kScriptOffset);
-ACCESSORS(JSLocale, region, Object, kRegionOffset);
-ACCESSORS(JSLocale, base_name, Object, kBaseNameOffset);
-ACCESSORS(JSLocale, locale, String, kLocaleOffset);
+OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject)
-// Unicode extension accessors.
-ACCESSORS(JSLocale, calendar, Object, kCalendarOffset);
-ACCESSORS(JSLocale, collation, Object, kCollationOffset);
-ACCESSORS(JSLocale, numbering_system, Object, kNumberingSystemOffset);
-SMI_ACCESSORS(JSLocale, flags, kFlagsOffset)
+ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kICULocaleOffset);
CAST_ACCESSOR(JSLocale);
-inline void JSLocale::set_case_first(CaseFirst case_first) {
- DCHECK_GT(CaseFirst::COUNT, case_first);
- int hints = flags();
- hints = CaseFirstBits::update(hints, case_first);
- set_flags(hints);
-}
-
-inline JSLocale::CaseFirst JSLocale::case_first() const {
- return CaseFirstBits::decode(flags());
-}
-
-inline void JSLocale::set_hour_cycle(HourCycle hour_cycle) {
- DCHECK_GT(HourCycle::COUNT, hour_cycle);
- int hints = flags();
- hints = HourCycleBits::update(hints, hour_cycle);
- set_flags(hints);
-}
-
-inline JSLocale::HourCycle JSLocale::hour_cycle() const {
- return HourCycleBits::decode(flags());
-}
-
-inline void JSLocale::set_numeric(Numeric numeric) {
- DCHECK_GT(Numeric::COUNT, numeric);
- int hints = flags();
- hints = NumericBits::update(hints, numeric);
- set_flags(hints);
-}
-
-inline JSLocale::Numeric JSLocale::numeric() const {
- return NumericBits::decode(flags());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 78fb30fa41..be438a2508 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -20,39 +20,29 @@
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "unicode/char16ptr.h"
#include "unicode/locid.h"
#include "unicode/uloc.h"
#include "unicode/unistr.h"
-#include "unicode/uvernum.h"
-#include "unicode/uversion.h"
-
-#if U_ICU_VERSION_MAJOR_NUM >= 59
-#include "unicode/char16ptr.h"
-#endif
namespace v8 {
namespace internal {
namespace {
-JSLocale::CaseFirst GetCaseFirst(const char* str) {
- if (strcmp(str, "upper") == 0) return JSLocale::CaseFirst::UPPER;
- if (strcmp(str, "lower") == 0) return JSLocale::CaseFirst::LOWER;
- if (strcmp(str, "false") == 0) return JSLocale::CaseFirst::FALSE_VALUE;
- UNREACHABLE();
-}
-
-JSLocale::HourCycle GetHourCycle(const char* str) {
- if (strcmp(str, "h11") == 0) return JSLocale::HourCycle::H11;
- if (strcmp(str, "h12") == 0) return JSLocale::HourCycle::H12;
- if (strcmp(str, "h23") == 0) return JSLocale::HourCycle::H23;
- if (strcmp(str, "h24") == 0) return JSLocale::HourCycle::H24;
- UNREACHABLE();
-}
-
-JSLocale::Numeric GetNumeric(const char* str) {
- return strcmp(str, "true") == 0 ? JSLocale::Numeric::TRUE_VALUE
- : JSLocale::Numeric::FALSE_VALUE;
+// Helper function to check a locale is valid. It will return false if
+// the length of the extension fields are incorrect. For example, en-u-a or
+// en-u-co-b will return false.
+bool IsValidLocale(const icu::Locale& locale) {
+ // icu::Locale::toLanguageTag won't return U_STRING_NOT_TERMINATED_WARNING for
+ // incorrect locale yet. So we still need the following uloc_toLanguageTag
+ // TODO(ftang): Change to use icu::Locale::toLanguageTag once it indicate
+ // error.
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(locale.getName(), result, ULOC_FULLNAME_CAPACITY, true,
+ &status);
+ return U_SUCCESS(status) && status != U_STRING_NOT_TERMINATED_WARNING;
}
struct OptionData {
@@ -65,9 +55,9 @@ struct OptionData {
// Inserts tags from options into locale string.
Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
Handle<JSReceiver> options,
- char* icu_locale) {
+ icu::Locale* icu_locale) {
CHECK(isolate);
- CHECK(icu_locale);
+ CHECK(!icu_locale->isBogus());
const std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
"h24"};
@@ -85,6 +75,7 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
// TODO(cira): Pass in values as per the spec to make this to be
// spec compliant.
+ UErrorCode status = U_ZERO_ERROR;
for (const auto& option_to_bcp47 : kOptionToUnicodeTagMap) {
std::unique_ptr<char[]> value_str = nullptr;
bool value_bool = false;
@@ -114,14 +105,9 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
// Overwrite existing, or insert new key-value to the locale string.
const char* value = uloc_toLegacyType(key, value_str.get());
- UErrorCode status = U_ZERO_ERROR;
if (value) {
- // TODO(cira): ICU puts artificial limit on locale length, while BCP47
- // doesn't. Switch to C++ API when it's ready.
- // Related ICU bug - https://ssl.icu-project.org/trac/ticket/13417.
- uloc_setKeywordValue(key, value, icu_locale, ULOC_FULLNAME_CAPACITY,
- &status);
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
+ icu_locale->setKeywordValue(key, value, status);
+ if (U_FAILURE(status)) {
return Just(false);
}
} else {
@@ -129,277 +115,346 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
}
}
+ // Check all the unicode extension fields are in the right length.
+ if (!IsValidLocale(*icu_locale)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<bool>());
+ }
+
return Just(true);
}
-// Fills in the JSLocale object slots with Unicode tag/values.
-bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
- Handle<JSLocale> locale_holder) {
- CHECK(isolate);
- CHECK(icu_locale);
-
- Factory* factory = isolate->factory();
-
+Handle<Object> UnicodeKeywordValue(Isolate* isolate, Handle<JSLocale> locale,
+ const char* key) {
+ icu::Locale* icu_locale = locale->icu_locale()->raw();
UErrorCode status = U_ZERO_ERROR;
- UEnumeration* keywords = uloc_openKeywords(icu_locale, &status);
- if (!keywords) return true;
-
- char value[ULOC_FULLNAME_CAPACITY];
- while (const char* keyword = uenum_next(keywords, nullptr, &status)) {
- uloc_getKeywordValue(icu_locale, keyword, value, ULOC_FULLNAME_CAPACITY,
- &status);
- if (U_FAILURE(status)) {
- status = U_ZERO_ERROR;
- continue;
- }
-
- // Ignore those we don't recognize - spec allows that.
- const char* bcp47_key = uloc_toUnicodeLocaleKey(keyword);
- if (bcp47_key) {
- const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
- if (bcp47_value) {
- if (strcmp(bcp47_key, "kn") == 0) {
- locale_holder->set_numeric(GetNumeric(bcp47_value));
- } else if (strcmp(bcp47_key, "ca") == 0) {
- Handle<String> bcp47_handle =
- factory->NewStringFromAsciiChecked(bcp47_value);
- locale_holder->set_calendar(*bcp47_handle);
- } else if (strcmp(bcp47_key, "kf") == 0) {
- locale_holder->set_case_first(GetCaseFirst(bcp47_value));
- } else if (strcmp(bcp47_key, "co") == 0) {
- Handle<String> bcp47_handle =
- factory->NewStringFromAsciiChecked(bcp47_value);
- locale_holder->set_collation(*bcp47_handle);
- } else if (strcmp(bcp47_key, "hc") == 0) {
- locale_holder->set_hour_cycle(GetHourCycle(bcp47_value));
- } else if (strcmp(bcp47_key, "nu") == 0) {
- Handle<String> bcp47_handle =
- factory->NewStringFromAsciiChecked(bcp47_value);
- locale_holder->set_numbering_system(*bcp47_handle);
- }
- }
- }
+ std::string value =
+ icu_locale->getUnicodeKeywordValue<std::string>(key, status);
+ if (status == U_ILLEGAL_ARGUMENT_ERROR || value == "") {
+ return isolate->factory()->undefined_value();
}
+ return isolate->factory()->NewStringFromAsciiChecked(value.c_str());
+}
- uenum_close(keywords);
+bool InRange(size_t value, size_t start, size_t end) {
+ return (start <= value) && (value <= end);
+}
+bool InRange(char value, char start, char end) {
+ return (start <= value) && (value <= end);
+}
+bool IsCheckRange(const std::string& str, size_t min, size_t max,
+ bool(range_check_func)(char)) {
+ if (!InRange(str.length(), min, max)) return false;
+ for (size_t i = 0; i < str.length(); i++) {
+ if (!range_check_func(str[i])) return false;
+ }
return true;
}
-} // namespace
+bool IsAlpha(const std::string& str, size_t min, size_t max) {
+ return IsCheckRange(str, min, max, [](char c) -> bool {
+ return InRange(c, 'a', 'z') || InRange(c, 'A', 'Z');
+ });
+}
-MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options) {
- locale_holder->set_flags(0);
- static const char* const kMethod = "Intl.Locale";
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- UErrorCode status = U_ZERO_ERROR;
+bool IsDigit(const std::string& str, size_t min, size_t max) {
+ return IsCheckRange(str, min, max,
+ [](char c) -> bool { return InRange(c, '0', '9'); });
+}
+
+bool ValidateLanguageProduction(const std::string& value) {
+ // language = 2*3ALPHA ; shortest ISO 639 code
+ // ["-" extlang] ; sometimes followed by
+ // ; extended language subtags
+ // / 4ALPHA ; or reserved for future use
+ // / 5*8ALPHA ; or registered language subtag
+ //
+ // extlang = 3ALPHA ; selected ISO 639 codes
+ // *2("-" 3ALPHA) ; permanently reserved
+ // TODO(ftang) not handling the [extlang] yet
+ return IsAlpha(value, 2, 8);
+}
- // Get ICU locale format, and canonicalize it.
- char icu_result[ULOC_FULLNAME_CAPACITY];
+bool ValidateScriptProduction(const std::string& value) {
+ // script = 4ALPHA ; ISO 15924 code
+ return IsAlpha(value, 4, 4);
+}
- if (locale->length() == 0) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
- JSLocale);
+bool ValidateRegionProduction(const std::string& value) {
+ // region = 2ALPHA ; ISO 3166-1 code
+ // / 3DIGIT ; UN M.49 code
+ return IsAlpha(value, 2, 2) || IsDigit(value, 3, 3);
+}
+
+Maybe<icu::Locale> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
+ Handle<JSReceiver> options) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ if (tag->length() == 0) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
+ Nothing<icu::Locale>());
}
- v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- CHECK_LT(0, bcp47_locale.length());
- CHECK_NOT_NULL(*bcp47_locale);
-
- int parsed_length = 0;
- int icu_length =
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &parsed_length, &status);
-
- if (U_FAILURE(status) ||
- parsed_length < static_cast<int>(bcp47_locale.length()) ||
- status == U_STRING_NOT_TERMINATED_WARNING || icu_length == 0) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
+ v8::String::Utf8Value bcp47_tag(v8_isolate, v8::Utils::ToLocal(tag));
+ CHECK_LT(0, bcp47_tag.length());
+ CHECK_NOT_NULL(*bcp47_tag);
+ // 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError
+ // exception.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale =
+ icu::Locale::forLanguageTag({*bcp47_tag, bcp47_tag.length()}, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<icu::Locale>());
}
- Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, icu_result);
- MAYBE_RETURN(error, MaybeHandle<JSLocale>());
- if (!error.FromJust()) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
+ // 3. Let language be ? GetOption(options, "language", "string", undefined,
+ // undefined).
+ const std::vector<const char*> empty_values = {};
+ std::unique_ptr<char[]> language_str = nullptr;
+ Maybe<bool> maybe_language =
+ Intl::GetStringOption(isolate, options, "language", empty_values,
+ "ApplyOptionsToTag", &language_str);
+ MAYBE_RETURN(maybe_language, Nothing<icu::Locale>());
+ // 4. If language is not undefined, then
+ if (maybe_language.FromJust()) {
+ // a. If language does not match the language production, throw a RangeError
+ // exception.
+ // b. If language matches the grandfathered production, throw a RangeError
+ // exception.
+ // Currently ValidateLanguageProduction only take 2*3ALPHA / 4ALPHA /
+ // 5*8ALPHA and won't take 2*3ALPHA "-" extlang so none of the grandfathered
+ // will be matched.
+ if (!ValidateLanguageProduction(language_str.get())) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<icu::Locale>());
+ }
}
+ // 5. Let script be ? GetOption(options, "script", "string", undefined,
+ // undefined).
+ std::unique_ptr<char[]> script_str = nullptr;
+ Maybe<bool> maybe_script =
+ Intl::GetStringOption(isolate, options, "script", empty_values,
+ "ApplyOptionsToTag", &script_str);
+ MAYBE_RETURN(maybe_script, Nothing<icu::Locale>());
+ // 6. If script is not undefined, then
+ if (maybe_script.FromJust()) {
+ // a. If script does not match the script production, throw a RangeError
+ // exception.
+ if (!ValidateScriptProduction(script_str.get())) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<icu::Locale>());
+ }
+ }
+ // 7. Let region be ? GetOption(options, "region", "string", undefined,
+ // undefined).
+ std::unique_ptr<char[]> region_str = nullptr;
+ Maybe<bool> maybe_region =
+ Intl::GetStringOption(isolate, options, "region", empty_values,
+ "ApplyOptionsToTag", &region_str);
+ MAYBE_RETURN(maybe_region, Nothing<icu::Locale>());
+ // 8. If region is not undefined, then
+ if (maybe_region.FromJust()) {
+ // a. If region does not match the region production, throw a RangeError
+ // exception.
+ if (!ValidateRegionProduction(region_str.get())) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<icu::Locale>());
+ }
+ }
+ // 9. Set tag to CanonicalizeLanguageTag(tag).
+
+ // 10. If language is not undefined,
+ std::string locale_str;
+ if (maybe_language.FromJust()) {
+ // a. Assert: tag matches the langtag production.
+ // b. Set tag to tag with the substring corresponding to the language
+ // production replaced by the string language.
+ locale_str = language_str.get();
+ } else {
+ locale_str = icu_locale.getLanguage();
+ }
+ // 11. If script is not undefined, then
+ const char* script_ptr = nullptr;
+ if (maybe_script.FromJust()) {
+ // a. If tag does not contain a script production, then
+ // i. Set tag to the concatenation of the language production of tag, "-",
+ // script, and the rest of tag.
+ // i. Set tag to tag with the substring corresponding to the script
+ // production replaced by the string script.
+ script_ptr = script_str.get();
+ } else {
+ script_ptr = icu_locale.getScript();
+ }
+ if (script_ptr != nullptr && strlen(script_ptr) > 0) {
+ locale_str.append("-");
+ locale_str.append(script_ptr);
+ }
+ // 12. If region is not undefined, then
+ const char* region_ptr = nullptr;
+ if (maybe_region.FromJust()) {
+ // a. If tag does not contain a region production, then
+ //
+ // i. Set tag to the concatenation of the language production of tag, the
+ // substring corresponding to the "-" script production if present, "-",
+ // region, and the rest of tag.
+ //
+ // b. Else,
+ //
+ // i. Set tag to tag with the substring corresponding to the region
+ // production replaced by the string region.
+ region_ptr = region_str.get();
+ } else {
+ region_ptr = icu_locale.getCountry();
+ }
+
+ std::string without_options(icu_locale.getName());
- if (!PopulateLocaleWithUnicodeTags(isolate, icu_result, locale_holder)) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
+ // replace with values from options
+ icu_locale =
+ icu::Locale(locale_str.c_str(), region_ptr, icu_locale.getVariant());
+ locale_str = icu_locale.getName();
+
+ // Append extensions from tag
+ size_t others = without_options.find("@");
+ if (others != std::string::npos) {
+ locale_str += without_options.substr(others);
}
- // Extract language, script and region parts.
- char icu_language[ULOC_LANG_CAPACITY];
- uloc_getLanguage(icu_result, icu_language, ULOC_LANG_CAPACITY, &status);
+ // 13. Return CanonicalizeLanguageTag(tag).
+ icu_locale = icu::Locale::createCanonical(locale_str.c_str());
+ return Just(icu_locale);
+}
- char icu_script[ULOC_SCRIPT_CAPACITY];
- uloc_getScript(icu_result, icu_script, ULOC_SCRIPT_CAPACITY, &status);
+} // namespace
- char icu_region[ULOC_COUNTRY_CAPACITY];
- uloc_getCountry(icu_result, icu_region, ULOC_COUNTRY_CAPACITY, &status);
+MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
+ Handle<JSLocale> locale,
+ Handle<String> locale_str,
+ Handle<JSReceiver> options) {
+ Maybe<icu::Locale> maybe_locale =
+ ApplyOptionsToTag(isolate, locale_str, options);
+ MAYBE_RETURN(maybe_locale, MaybeHandle<JSLocale>());
+ icu::Locale icu_locale = maybe_locale.FromJust();
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
+ Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, &icu_locale);
+ MAYBE_RETURN(error, MaybeHandle<JSLocale>());
+ if (!error.FromJust()) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kLocaleBadParameters),
+ JSLocale);
}
- Factory* factory = isolate->factory();
+ // 31. Set locale.[[Locale]] to r.[[locale]].
+ Handle<Managed<icu::Locale>> managed_locale =
+ Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
+ locale->set_icu_locale(*managed_locale);
- // NOTE: One shouldn't use temporary handles, because they can go out of
- // scope and be garbage collected before properly assigned.
- // DON'T DO THIS: locale_holder->set_language(*f->NewStringAscii...);
- Handle<String> language = factory->NewStringFromAsciiChecked(icu_language);
- locale_holder->set_language(*language);
+ return locale;
+}
- if (strlen(icu_script) != 0) {
- Handle<String> script = factory->NewStringFromAsciiChecked(icu_script);
- locale_holder->set_script(*script);
- }
+namespace {
+Handle<String> MorphLocale(Isolate* isolate, String locale,
+ void (*morph_func)(icu::Locale*, UErrorCode*)) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale =
+ icu::Locale::forLanguageTag(locale.ToCString().get(), status);
+ CHECK(U_SUCCESS(status));
+ CHECK(!icu_locale.isBogus());
+ (*morph_func)(&icu_locale, &status);
+ CHECK(U_SUCCESS(status));
+ CHECK(!icu_locale.isBogus());
+ std::string locale_str = Intl::ToLanguageTag(icu_locale).FromJust();
+ return isolate->factory()->NewStringFromAsciiChecked(locale_str.c_str());
+}
- if (strlen(icu_region) != 0) {
- Handle<String> region = factory->NewStringFromAsciiChecked(icu_region);
- locale_holder->set_region(*region);
- }
+} // namespace
- char icu_base_name[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(icu_result, icu_base_name, ULOC_FULLNAME_CAPACITY, &status);
- // We need to convert it back to BCP47.
- char bcp47_result[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(icu_base_name, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
- &status);
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
- }
- Handle<String> base_name = factory->NewStringFromAsciiChecked(bcp47_result);
- locale_holder->set_base_name(*base_name);
+Handle<String> JSLocale::Maximize(Isolate* isolate, String locale) {
+ return MorphLocale(isolate, locale,
+ [](icu::Locale* icu_locale, UErrorCode* status) {
+ icu_locale->addLikelySubtags(*status);
+ });
+}
- // Produce final representation of the locale string, for toString().
- uloc_toLanguageTag(icu_result, bcp47_result, ULOC_FULLNAME_CAPACITY, true,
- &status);
- if (U_FAILURE(status) || status == U_STRING_NOT_TERMINATED_WARNING) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kLocaleBadParameters,
- isolate->factory()->NewStringFromAsciiChecked(kMethod),
- locale_holder),
- JSLocale);
- }
- Handle<String> locale_handle =
- factory->NewStringFromAsciiChecked(bcp47_result);
- locale_holder->set_locale(*locale_handle);
+Handle<String> JSLocale::Minimize(Isolate* isolate, String locale) {
+ return MorphLocale(isolate, locale,
+ [](icu::Locale* icu_locale, UErrorCode* status) {
+ icu_locale->minimizeSubtags(*status);
+ });
+}
- return locale_holder;
+Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
+ Factory* factory = isolate->factory();
+ const char* language = locale->icu_locale()->raw()->getLanguage();
+ if (strlen(language) == 0) return factory->undefined_value();
+ return factory->NewStringFromAsciiChecked(language);
}
-namespace {
+Handle<Object> JSLocale::Script(Isolate* isolate, Handle<JSLocale> locale) {
+ Factory* factory = isolate->factory();
+ const char* script = locale->icu_locale()->raw()->getScript();
+ if (strlen(script) == 0) return factory->undefined_value();
+ return factory->NewStringFromAsciiChecked(script);
+}
-Handle<String> MorphLocale(Isolate* isolate, String* language_tag,
- int32_t (*morph_func)(const char*, char*, int32_t,
- UErrorCode*)) {
+Handle<Object> JSLocale::Region(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- char localeBuffer[ULOC_FULLNAME_CAPACITY];
- char morphBuffer[ULOC_FULLNAME_CAPACITY];
+ const char* region = locale->icu_locale()->raw()->getCountry();
+ if (strlen(region) == 0) return factory->undefined_value();
+ return factory->NewStringFromAsciiChecked(region);
+}
- UErrorCode status = U_ZERO_ERROR;
- // Convert from language id to locale.
- int32_t parsed_length;
- int32_t length =
- uloc_forLanguageTag(language_tag->ToCString().get(), localeBuffer,
- ULOC_FULLNAME_CAPACITY, &parsed_length, &status);
- CHECK(parsed_length == language_tag->length());
- DCHECK(U_SUCCESS(status));
- DCHECK_GT(length, 0);
- DCHECK_NOT_NULL(morph_func);
- // Add the likely subtags or Minimize the subtags on the locale id
- length =
- (*morph_func)(localeBuffer, morphBuffer, ULOC_FULLNAME_CAPACITY, &status);
- DCHECK(U_SUCCESS(status));
- DCHECK_GT(length, 0);
- // Returns a well-formed language tag
- length = uloc_toLanguageTag(morphBuffer, localeBuffer, ULOC_FULLNAME_CAPACITY,
- false, &status);
- DCHECK(U_SUCCESS(status));
- DCHECK_GT(length, 0);
- std::string lang(localeBuffer, length);
- std::replace(lang.begin(), lang.end(), '_', '-');
-
- return factory->NewStringFromAsciiChecked(lang.c_str());
+Handle<String> JSLocale::BaseName(Isolate* isolate, Handle<JSLocale> locale) {
+ icu::Locale icu_locale =
+ icu::Locale::createFromName(locale->icu_locale()->raw()->getBaseName());
+ std::string base_name = Intl::ToLanguageTag(icu_locale).FromJust();
+ return isolate->factory()->NewStringFromAsciiChecked(base_name.c_str());
}
-} // namespace
+Handle<Object> JSLocale::Calendar(Isolate* isolate, Handle<JSLocale> locale) {
+ return UnicodeKeywordValue(isolate, locale, "ca");
+}
-Handle<String> JSLocale::Maximize(Isolate* isolate, String* locale) {
- return MorphLocale(isolate, locale, uloc_addLikelySubtags);
+Handle<Object> JSLocale::CaseFirst(Isolate* isolate, Handle<JSLocale> locale) {
+ return UnicodeKeywordValue(isolate, locale, "kf");
}
-Handle<String> JSLocale::Minimize(Isolate* isolate, String* locale) {
- return MorphLocale(isolate, locale, uloc_minimizeSubtags);
+Handle<Object> JSLocale::Collation(Isolate* isolate, Handle<JSLocale> locale) {
+ return UnicodeKeywordValue(isolate, locale, "co");
}
-Handle<String> JSLocale::CaseFirstAsString() const {
- switch (case_first()) {
- case CaseFirst::UPPER:
- return GetReadOnlyRoots().upper_string_handle();
- case CaseFirst::LOWER:
- return GetReadOnlyRoots().lower_string_handle();
- case CaseFirst::FALSE_VALUE:
- return GetReadOnlyRoots().false_string_handle();
- case CaseFirst::COUNT:
- UNREACHABLE();
- }
+Handle<Object> JSLocale::HourCycle(Isolate* isolate, Handle<JSLocale> locale) {
+ return UnicodeKeywordValue(isolate, locale, "hc");
}
-Handle<String> JSLocale::HourCycleAsString() const {
- switch (hour_cycle()) {
- case HourCycle::H11:
- return GetReadOnlyRoots().h11_string_handle();
- case HourCycle::H12:
- return GetReadOnlyRoots().h12_string_handle();
- case HourCycle::H23:
- return GetReadOnlyRoots().h23_string_handle();
- case HourCycle::H24:
- return GetReadOnlyRoots().h24_string_handle();
- case HourCycle::COUNT:
- UNREACHABLE();
- }
+Handle<Object> JSLocale::Numeric(Isolate* isolate, Handle<JSLocale> locale) {
+ Factory* factory = isolate->factory();
+ icu::Locale* icu_locale = locale->icu_locale()->raw();
+ UErrorCode status = U_ZERO_ERROR;
+ std::string numeric =
+ icu_locale->getUnicodeKeywordValue<std::string>("kn", status);
+ return (numeric == "true") ? factory->true_value() : factory->false_value();
}
-Handle<String> JSLocale::NumericAsString() const {
- switch (numeric()) {
- case Numeric::NOTSET:
- return GetReadOnlyRoots().undefined_string_handle();
- case Numeric::TRUE_VALUE:
- return GetReadOnlyRoots().true_string_handle();
- case Numeric::FALSE_VALUE:
- return GetReadOnlyRoots().false_string_handle();
- case Numeric::COUNT:
- UNREACHABLE();
- }
+Handle<Object> JSLocale::NumberingSystem(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ return UnicodeKeywordValue(isolate, locale, "nu");
+}
+
+std::string JSLocale::ToString(Handle<JSLocale> locale) {
+ icu::Locale* icu_locale = locale->icu_locale()->raw();
+ return Intl::ToLanguageTag(*icu_locale).FromJust();
+}
+
+Handle<String> JSLocale::ToString(Isolate* isolate, Handle<JSLocale> locale) {
+ std::string locale_str = JSLocale::ToString(locale);
+ return isolate->factory()->NewStringFromAsciiChecked(locale_str.c_str());
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index f42a4cdaee..120ddeb965 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -13,11 +13,15 @@
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
-#include "unicode/unistr.h"
+#include "src/objects/managed.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
+namespace U_ICU_NAMESPACE {
+class Locale;
+}
+
namespace v8 {
namespace internal {
@@ -29,103 +33,39 @@ class JSLocale : public JSObject {
Handle<JSLocale> locale_holder,
Handle<String> locale,
Handle<JSReceiver> options);
- static Handle<String> Maximize(Isolate* isolate, String* locale);
- static Handle<String> Minimize(Isolate* isolate, String* locale);
-
- Handle<String> CaseFirstAsString() const;
- Handle<String> NumericAsString() const;
- Handle<String> HourCycleAsString() const;
+ static Handle<String> Maximize(Isolate* isolate, String locale);
+ static Handle<String> Minimize(Isolate* isolate, String locale);
+
+ static Handle<Object> Language(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> Script(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> Region(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<String> BaseName(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> Calendar(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> CaseFirst(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> Collation(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> HourCycle(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> Numeric(Isolate* isolate, Handle<JSLocale> locale);
+ static Handle<Object> NumberingSystem(Isolate* isolate,
+ Handle<JSLocale> locale);
+ static Handle<String> ToString(Isolate* isolate, Handle<JSLocale> locale);
+ static std::string ToString(Handle<JSLocale> locale);
DECL_CAST(JSLocale)
- // Locale accessors.
- DECL_ACCESSORS(language, Object)
- DECL_ACCESSORS(script, Object)
- DECL_ACCESSORS(region, Object)
- DECL_ACCESSORS(base_name, Object)
- DECL_ACCESSORS(locale, String)
-
- // Unicode extension accessors.
- DECL_ACCESSORS(calendar, Object)
- DECL_ACCESSORS(collation, Object)
- DECL_ACCESSORS(numbering_system, Object)
-
- // CaseFirst: "kf"
- //
- // ecma402 #sec-Intl.Locale.prototype.caseFirst
- enum class CaseFirst {
- UPPER, // upper case sorts before lower case
- LOWER, // lower case sorts before upper case
- // (compiler does not like FALSE so we have to name it FALSE_VALUE)
- FALSE_VALUE, // Turn the feature off
- COUNT
- };
- inline void set_case_first(CaseFirst case_first);
- inline CaseFirst case_first() const;
-
- // Numeric: 'kn"
- //
- // ecma402 #sec-Intl.Locale.prototype.numeric
- enum class Numeric { NOTSET, TRUE_VALUE, FALSE_VALUE, COUNT };
- inline void set_numeric(Numeric numeric);
- inline Numeric numeric() const;
-
- // CaseFirst: "hc"
- //
- // ecma402 #sec-Intl.Locale.prototype.hourCycle
- enum class HourCycle {
- H11, // 12-hour format start with hour 0 and go up to 11.
- H12, // 12-hour format start with hour 1 and go up to 12.
- H23, // 24-hour format start with hour 0 and go up to 23.
- H24, // 24-hour format start with hour 1 and go up to 24.
- COUNT
- };
- inline void set_hour_cycle(HourCycle hour_cycle);
- inline HourCycle hour_cycle() const;
-
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(CaseFirstBits, CaseFirst, 2, _) \
- V(NumericBits, Numeric, 2, _) \
- V(HourCycleBits, HourCycle, 2, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
-
- STATIC_ASSERT(CaseFirst::UPPER <= CaseFirstBits::kMax);
- STATIC_ASSERT(CaseFirst::LOWER <= CaseFirstBits::kMax);
- STATIC_ASSERT(CaseFirst::FALSE_VALUE <= CaseFirstBits::kMax);
- STATIC_ASSERT(Numeric::NOTSET <= NumericBits::kMax);
- STATIC_ASSERT(Numeric::FALSE_VALUE <= NumericBits::kMax);
- STATIC_ASSERT(Numeric::TRUE_VALUE <= NumericBits::kMax);
- STATIC_ASSERT(HourCycle::H11 <= HourCycleBits::kMax);
- STATIC_ASSERT(HourCycle::H12 <= HourCycleBits::kMax);
- STATIC_ASSERT(HourCycle::H23 <= HourCycleBits::kMax);
- STATIC_ASSERT(HourCycle::H24 <= HourCycleBits::kMax);
-
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
+ DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
DECL_PRINTER(JSLocale)
DECL_VERIFIER(JSLocale)
// Layout description.
- static const int kJSLocaleOffset = JSObject::kHeaderSize;
- // Locale fields.
- static const int kLanguageOffset = kJSLocaleOffset + kPointerSize;
- static const int kScriptOffset = kLanguageOffset + kPointerSize;
- static const int kRegionOffset = kScriptOffset + kPointerSize;
- static const int kBaseNameOffset = kRegionOffset + kPointerSize;
- static const int kLocaleOffset = kBaseNameOffset + kPointerSize;
- // Unicode extension fields.
- static const int kFlagsOffset = kLocaleOffset + kPointerSize;
- static const int kCalendarOffset = kFlagsOffset + kPointerSize;
- static const int kCollationOffset = kCalendarOffset + kPointerSize;
- static const int kNumberingSystemOffset = kCollationOffset + kPointerSize;
- // Final size.
- static const int kSize = kNumberingSystemOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSLocale);
+#define JS_LOCALE_FIELDS(V) \
+ V(kICULocaleOffset, kTaggedSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LOCALE_FIELDS)
+#undef JS_LOCALE_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSLocale, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 880ef9344f..077abda0c9 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject)
+
ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
ACCESSORS(JSNumberFormat, icu_number_format, Managed<icu::NumberFormat>,
kICUNumberFormatOffset)
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 9fe7c30a9d..646cbed8e7 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -18,8 +18,6 @@
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
-#include "unicode/strenum.h"
-#include "unicode/ucurr.h"
#include "unicode/uloc.h"
namespace v8 {
@@ -27,6 +25,20 @@ namespace internal {
namespace {
+UNumberFormatStyle ToNumberFormatStyle(
+ JSNumberFormat::CurrencyDisplay currency_display) {
+ switch (currency_display) {
+ case JSNumberFormat::CurrencyDisplay::SYMBOL:
+ return UNUM_CURRENCY;
+ case JSNumberFormat::CurrencyDisplay::CODE:
+ return UNUM_CURRENCY_ISO;
+ case JSNumberFormat::CurrencyDisplay::NAME:
+ return UNUM_CURRENCY_PLURAL;
+ case JSNumberFormat::CurrencyDisplay::COUNT:
+ UNREACHABLE();
+ }
+}
+
// ecma-402/#sec-currencydigits
// The currency is expected to an all upper case string value.
int CurrencyDigits(const icu::UnicodeString& currency) {
@@ -60,14 +72,13 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
} // anonymous namespace
// static
+// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
Isolate* isolate, Handle<JSNumberFormat> number_format_holder) {
Factory* factory = isolate->factory();
+
+ // 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->style_string(),
- number_format_holder->StyleAsString(), kDontThrow)
- .FromJust());
icu::NumberFormat* number_format =
number_format_holder->icu_number_format()->raw();
@@ -78,14 +89,29 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Handle<String> locale =
Handle<String>(number_format_holder->locale(), isolate);
+
+ std::unique_ptr<char[]> locale_str = locale->ToCString();
+ icu::Locale icu_locale = Intl::CreateICULocale(locale_str.get());
+
+ std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
+
+ // 5. For each row of Table 4, except the header row, in table order, do
+ // Table 4: Resolved Options of NumberFormat Instances
+ // Internal Slot Property
+ // [[Locale]] "locale"
+ // [[NumberingSystem]] "numberingSystem"
+ // [[Style]] "style"
+ // [[Currency]] "currency"
+ // [[CurrencyDisplay]] "currencyDisplay"
+ // [[MinimumIntegerDigits]] "minimumIntegerDigits"
+ // [[MinimumFractionDigits]] "minimumFractionDigits"
+ // [[MaximumFractionDigits]] "maximumFractionDigits"
+ // [[MinimumSignificantDigits]] "minimumSignificantDigits"
+ // [[MaximumSignificantDigits]] "maximumSignificantDigits"
+ // [[UseGrouping]] "useGrouping"
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->locale_string(), locale, kDontThrow)
.FromJust());
- UErrorCode error = U_ZERO_ERROR;
- icu::Locale icu_locale = number_format->getLocale(ULOC_VALID_LOCALE, error);
- DCHECK(U_SUCCESS(error));
-
- std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
if (!numbering_system.empty()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->numberingSystem_string(),
@@ -93,12 +119,11 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
kDontThrow)
.FromJust());
}
-
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->style_string(),
+ number_format_holder->StyleAsString(), kDontThrow)
+ .FromJust());
if (number_format_holder->style() == Style::CURRENCY) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->currencyDisplay_string(),
- number_format_holder->CurrencyDisplayAsString(), kDontThrow)
- .FromJust());
icu::UnicodeString currency(number_format->getCurrency());
DCHECK(!currency.isEmpty());
CHECK(JSReceiver::CreateDataProperty(
@@ -110,8 +135,12 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
.ToHandleChecked(),
kDontThrow)
.FromJust());
- }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currencyDisplay_string(),
+ number_format_holder->CurrencyDisplayAsString(), kDontThrow)
+ .FromJust());
+ }
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->minimumIntegerDigits_string(),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
@@ -148,7 +177,6 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
factory->ToBoolean((number_format->isGroupingUsed() == TRUE)),
kDontThrow)
.FromJust());
-
return options;
}
@@ -188,11 +216,13 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// set the flags to 0 ASAP.
number_format->set_flags(0);
Factory* factory = isolate->factory();
+
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
- Handle<JSObject> requested_locales;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
- Intl::CanonicalizeLocaleListJS(isolate, locales),
- JSNumberFormat);
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSNumberFormat>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
// 2. If options is undefined, then
if (options_obj->IsUndefined(isolate)) {
@@ -211,81 +241,38 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
// 4. Let opt be a new Record.
- //
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
// "lookup", "best fit" », "best fit").
- //
// 6. Set opt.[[localeMatcher]] to matcher.
- //
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.NumberFormat");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
+
// 7. Let localeData be %NumberFormat%.[[LocaleData]].
- //
// 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
// requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
// localeData).
- //
- // 9. Set numberFormat.[[Locale]] to r.[[locale]].
-
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "numberformat", requested_locales, options),
- JSNumberFormat);
-
- Handle<String> locale_with_extension_str =
- isolate->factory()->NewStringFromStaticChars("localeWithExtension");
- Handle<Object> locale_with_extension_obj =
- JSObject::GetDataProperty(r, locale_with_extension_str);
-
- // The locale_with_extension has to be a string. Either a user
- // provided canonicalized string or the default locale.
- CHECK(locale_with_extension_obj->IsString());
- Handle<String> locale_with_extension =
- Handle<String>::cast(locale_with_extension_obj);
-
- icu::Locale icu_locale =
- Intl::CreateICULocale(isolate, locale_with_extension);
- number_format->set_locale(*locale_with_extension);
- DCHECK(!icu_locale.isBogus());
-
std::set<std::string> relevant_extension_keys{"nu"};
- std::map<std::string, std::string> extensions =
- Intl::LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSNumberFormat::GetAvailableLocales(),
+ requested_locales, matcher, relevant_extension_keys);
- // The list that is the value of the "nu" field of any locale field of
- // [[LocaleData]] must not include the values "native", "traditio", or
- // "finance".
- //
- // See https://tc39.github.io/ecma402/#sec-intl.numberformat-internal-slots
- if (extensions.find("nu") != extensions.end()) {
- const std::string value = extensions.at("nu");
- if (value == "native" || value == "traditio" || value == "finance") {
- // 10. Set numberFormat.[[NumberingSystem]] to r.[[nu]].
- UErrorCode status = U_ZERO_ERROR;
- icu_locale.setKeywordValue("nu", nullptr, status);
- CHECK(U_SUCCESS(status));
- }
- }
+ // 9. Set numberFormat.[[Locale]] to r.[[locale]].
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ number_format->set_locale(*locale_str);
// 11. Let dataLocale be r.[[dataLocale]].
//
// 12. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency" », "decimal").
const char* service = "Intl.NumberFormat";
- std::unique_ptr<char[]> style_cstr;
- const std::vector<const char*> style_values = {"decimal", "percent",
- "currency"};
- Maybe<bool> found_style = Intl::GetStringOption(
- isolate, options, "style", style_values, service, &style_cstr);
- MAYBE_RETURN(found_style, MaybeHandle<JSNumberFormat>());
- Style style = Style::DECIMAL;
- if (found_style.FromJust()) {
- DCHECK_NOT_NULL(style_cstr.get());
- if (strcmp(style_cstr.get(), "percent") == 0) {
- style = Style::PERCENT;
- } else if (strcmp(style_cstr.get(), "currency") == 0) {
- style = Style::CURRENCY;
- }
- }
+ Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ isolate, options, "style", service, {"decimal", "percent", "currency"},
+ {Style::DECIMAL, Style::PERCENT, Style::CURRENCY}, Style::DECIMAL);
+ MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
+ Style style = maybe_style.FromJust();
// 13. Set numberFormat.[[Style]] to style.
number_format->set_style(style);
@@ -334,45 +321,35 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name" », "symbol").
- std::unique_ptr<char[]> currency_display_cstr;
- const std::vector<const char*> currency_display_values = {"code", "name",
- "symbol"};
- Maybe<bool> found_currency_display = Intl::GetStringOption(
- isolate, options, "currencyDisplay", currency_display_values, service,
- &currency_display_cstr);
- MAYBE_RETURN(found_currency_display, MaybeHandle<JSNumberFormat>());
- CurrencyDisplay currency_display = CurrencyDisplay::SYMBOL;
- UNumberFormatStyle format_style = UNUM_CURRENCY;
-
- if (found_currency_display.FromJust()) {
- DCHECK_NOT_NULL(currency_display_cstr.get());
- if (strcmp(currency_display_cstr.get(), "code") == 0) {
- currency_display = CurrencyDisplay::CODE;
- format_style = UNUM_CURRENCY_ISO;
- } else if (strcmp(currency_display_cstr.get(), "name") == 0) {
- currency_display = CurrencyDisplay::NAME;
- format_style = UNUM_CURRENCY_PLURAL;
- }
- }
+ Maybe<CurrencyDisplay> maybe_currencyDisplay =
+ Intl::GetStringOption<CurrencyDisplay>(
+ isolate, options, "currencyDisplay", service,
+ {"code", "symbol", "name"},
+ {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
+ CurrencyDisplay::NAME},
+ CurrencyDisplay::SYMBOL);
+ MAYBE_RETURN(maybe_currencyDisplay, MaybeHandle<JSNumberFormat>());
+ CurrencyDisplay currency_display = maybe_currencyDisplay.FromJust();
+ UNumberFormatStyle format_style = ToNumberFormatStyle(currency_display);
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberFormat> icu_number_format;
if (style == Style::DECIMAL) {
icu_number_format.reset(
- icu::NumberFormat::createInstance(icu_locale, status));
+ icu::NumberFormat::createInstance(r.icu_locale, status));
} else if (style == Style::PERCENT) {
icu_number_format.reset(
- icu::NumberFormat::createPercentInstance(icu_locale, status));
+ icu::NumberFormat::createPercentInstance(r.icu_locale, status));
} else {
DCHECK_EQ(style, Style::CURRENCY);
icu_number_format.reset(
- icu::NumberFormat::createInstance(icu_locale, format_style, status));
+ icu::NumberFormat::createInstance(r.icu_locale, format_style, status));
}
if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
status = U_ZERO_ERROR;
// Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
+ icu::Locale no_extension_locale(r.icu_locale.getBaseName());
icu_number_format.reset(
icu::NumberFormat::createInstance(no_extension_locale, status));
@@ -485,14 +462,9 @@ Handle<String> JSNumberFormat::CurrencyDisplayAsString() const {
}
MaybeHandle<String> JSNumberFormat::FormatNumber(
- Isolate* isolate, Handle<JSNumberFormat> number_format_holder,
- double number) {
- icu::NumberFormat* number_format =
- number_format_holder->icu_number_format()->raw();
- CHECK_NOT_NULL(number_format);
-
+ Isolate* isolate, const icu::NumberFormat& number_format, double number) {
icu::UnicodeString result;
- number_format->format(number, result);
+ number_format.format(number, result);
return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
@@ -650,23 +622,23 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
return out_parts;
}
-MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
- Isolate* isolate, Handle<JSNumberFormat> number_format, double number) {
- Factory* factory = isolate->factory();
- icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
- CHECK_NOT_NULL(fmt);
-
+Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
+ Handle<JSArray> result,
+ int start_index,
+ const icu::NumberFormat& fmt,
+ double number, Handle<String> unit) {
icu::UnicodeString formatted;
icu::FieldPositionIterator fp_iter;
UErrorCode status = U_ZERO_ERROR;
- fmt->format(number, formatted, &fp_iter, status);
+ fmt.format(number, formatted, &fp_iter, status);
if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), JSArray);
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewTypeError(MessageTemplate::kIcuError), Nothing<int>());
}
- Handle<JSArray> result = factory->NewJSArray(0);
int32_t length = formatted.length();
- if (length == 0) return result;
+ int index = start_index;
+ if (length == 0) return Just(index);
std::vector<NumberFormatSpan> regions;
// Add a "literal" backdrop for the entire string. This will be used if no
@@ -685,7 +657,6 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
std::vector<NumberFormatSpan> parts = FlattenRegionsToParts(&regions);
- int index = 0;
for (auto it = parts.begin(); it < parts.end(); it++) {
NumberFormatSpan part = *it;
Handle<String> field_type_string =
@@ -693,17 +664,43 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
? isolate->factory()->literal_string()
: IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
Handle<String> substring;
- ASSIGN_RETURN_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, substring,
Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
- JSArray);
- Intl::AddElement(isolate, result, index, field_type_string, substring);
+ Nothing<int>());
+ if (unit.is_null()) {
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
+ } else {
+ Intl::AddElement(isolate, result, index, field_type_string, substring,
+ isolate->factory()->unit_string(), unit);
+ }
++index;
}
JSObject::ValidateElements(*result);
+ return Just(index);
+}
+
+MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
+ Isolate* isolate, Handle<JSNumberFormat> number_format, double number) {
+ Factory* factory = isolate->factory();
+ icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
+ CHECK_NOT_NULL(fmt);
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+
+ Maybe<int> maybe_format_to_parts = JSNumberFormat::FormatToParts(
+ isolate, result, 0, *fmt, number, Handle<String>());
+ MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
}
+std::set<std::string> JSNumberFormat::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::NumberFormat::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 52443dc3d3..03071a25e4 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_NUMBER_FORMAT_H_
#define V8_OBJECTS_JS_NUMBER_FORMAT_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
@@ -43,8 +46,22 @@ class JSNumberFormat : public JSObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatToParts(
Isolate* isolate, Handle<JSNumberFormat> number_format, double number);
+ // A utility function used by the above JSNumberFormat::FormatToParts()
+ // and JSRelativeTimeFormat::FormatToParts().
+ // Format the number by using the icu::NumberFormat to get the field
+ // information. It add an object into the result array, starting from the
+ // start_index and return the total number of elements in the result array.
+ // For each object added as element, it set the substring of the field as
+ // "value", the field type as "type". If the unit is not null, it also set
+ // unit as "unit" to each added object.
+ V8_WARN_UNUSED_RESULT static Maybe<int> FormatToParts(
+ Isolate* isolate, Handle<JSArray> result, int start_index,
+ const icu::NumberFormat& fmt, double number, Handle<String> unit);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumber(
- Isolate* isolate, Handle<JSNumberFormat> number_format, double number);
+ Isolate* isolate, const icu::NumberFormat& number_format, double number);
+
+ static std::set<std::string> GetAvailableLocales();
Handle<String> StyleAsString() const;
Handle<String> CurrencyDisplayAsString() const;
@@ -78,12 +95,12 @@ class JSNumberFormat : public JSObject {
inline CurrencyDisplay currency_display() const;
// Layout description.
-#define JS_NUMBER_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kPointerSize) \
- V(kICUNumberFormatOffset, kPointerSize) \
- V(kBoundFormatOffset, kPointerSize) \
- V(kFlagsOffset, kPointerSize) \
- /* Total size. */ \
+#define JS_NUMBER_FORMAT_FIELDS(V) \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kICUNumberFormatOffset, kTaggedSize) \
+ V(kBoundFormatOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_NUMBER_FORMAT_FIELDS)
@@ -110,8 +127,7 @@ class JSNumberFormat : public JSObject {
DECL_ACCESSORS(bound_format, Object)
DECL_INT_ACCESSORS(flags)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSNumberFormat);
+ OBJECT_CONSTRUCTORS(JSNumberFormat, JSObject);
};
struct NumberFormatSpan {
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 53483136d8..1de9a10c2a 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -11,8 +11,13 @@
#include "src/heap/heap-write-barrier.h"
#include "src/keys.h"
#include "src/lookup-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/property-array-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi-inl.h"
#include "src/prototype.h"
// Has to be the last include (doesn't have include guards):
@@ -21,13 +26,28 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(JSObject, JSReceiver)
+OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSDate, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy, JSObject)
+JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
+OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSValue, JSObject)
+
+NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
+
CAST_ACCESSOR(JSAsyncFromSyncIterator)
CAST_ACCESSOR(JSBoundFunction)
-CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
+CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSReceiver)
@@ -85,18 +105,18 @@ V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
GetKeysConversion::kConvertToString);
}
-bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
+bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
DisallowHeapAllocation no_gc;
- HeapObject* prototype = HeapObject::cast(object->map()->prototype());
+ HeapObject prototype = HeapObject::cast(object->map()->prototype());
ReadOnlyRoots roots(isolate);
- HeapObject* null = roots.null_value();
- HeapObject* empty_fixed_array = roots.empty_fixed_array();
- HeapObject* empty_slow_element_dictionary =
+ HeapObject null = roots.null_value();
+ FixedArrayBase empty_fixed_array = roots.empty_fixed_array();
+ FixedArrayBase empty_slow_element_dictionary =
roots.empty_slow_element_dictionary();
while (prototype != null) {
- Map* map = prototype->map();
+ Map map = prototype->map();
if (map->IsCustomElementsReceiverMap()) return false;
- HeapObject* elements = JSObject::cast(prototype)->elements();
+ FixedArrayBase elements = JSObject::cast(prototype)->elements();
if (elements != empty_fixed_array &&
elements != empty_slow_element_dictionary) {
return false;
@@ -108,9 +128,9 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-FixedArrayBase* JSObject::elements() const {
- Object* array = READ_FIELD(this, kElementsOffset);
- return static_cast<FixedArrayBase*>(array);
+FixedArrayBase JSObject::elements() const {
+ Object array = READ_FIELD(this, kElementsOffset);
+ return FixedArrayBase::cast(array);
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
@@ -125,9 +145,13 @@ void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
}
}
-void JSObject::EnsureCanContainElements(Handle<JSObject> object,
- Object** objects, uint32_t count,
+template <typename TSlot>
+void JSObject::EnsureCanContainElements(Handle<JSObject> object, TSlot objects,
+ uint32_t count,
EnsureElementsMode mode) {
+ static_assert(std::is_same<TSlot, FullObjectSlot>::value ||
+ std::is_same<TSlot, ObjectSlot>::value,
+ "Only ObjectSlot and FullObjectSlot are expected here");
ElementsKind current_kind = object->GetElementsKind();
ElementsKind target_kind = current_kind;
{
@@ -135,9 +159,9 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsHoleyElementsKind(current_kind);
if (current_kind == HOLEY_ELEMENTS) return;
- Object* the_hole = object->GetReadOnlyRoots().the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
+ Object the_hole = object->GetReadOnlyRoots().the_hole_value();
+ for (uint32_t i = 0; i < count; ++i, ++objects) {
+ Object current = *objects;
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
@@ -175,7 +199,7 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
- Object** objects =
+ ObjectSlot objects =
Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
EnsureCanContainElements(object, objects, length, mode);
return;
@@ -211,27 +235,27 @@ void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
object->set_elements(*value);
}
-void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode);
+void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
+ WRITE_FIELD(*this, kElementsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kElementsOffset, value, mode);
}
void JSObject::initialize_elements() {
- FixedArrayBase* elements = map()->GetInitialElements();
- WRITE_FIELD(this, kElementsOffset, elements);
+ FixedArrayBase elements = map()->GetInitialElements();
+ WRITE_FIELD(*this, kElementsOffset, elements);
}
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
+InterceptorInfo JSObject::GetIndexedInterceptor() {
return map()->GetIndexedInterceptor();
}
-InterceptorInfo* JSObject::GetNamedInterceptor() {
+InterceptorInfo JSObject::GetNamedInterceptor() {
return map()->GetNamedInterceptor();
}
int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
-int JSObject::GetHeaderSize(const Map* map) {
+int JSObject::GetHeaderSize(const Map map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
@@ -242,11 +266,35 @@ int JSObject::GetHeaderSize(const Map* map) {
}
// static
-int JSObject::GetEmbedderFieldCount(const Map* map) {
+int JSObject::GetEmbedderFieldsStartOffset(const Map map) {
+ // Embedder fields are located after the header size rounded up to the
+ // kSystemPointerSize, whereas in-object properties are at the end of the
+ // object.
+ int header_size = GetHeaderSize(map);
+ if (kTaggedSize == kSystemPointerSize) {
+ DCHECK(IsAligned(header_size, kSystemPointerSize));
+ return header_size;
+ } else {
+ return RoundUp(header_size, kSystemPointerSize);
+ }
+}
+
+int JSObject::GetEmbedderFieldsStartOffset() {
+ return GetEmbedderFieldsStartOffset(map());
+}
+
+// static
+int JSObject::GetEmbedderFieldCount(const Map map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
- return ((instance_size - GetHeaderSize(map)) >> kPointerSizeLog2) -
- map->GetInObjectProperties();
+ // Embedder fields are located after the header size rounded up to the
+ // kSystemPointerSize, whereas in-object properties are at the end of the
+ // object. We don't have to round up the header size here because division by
+ // kEmbedderDataSlotSizeInTaggedSlots will swallow potential padding in case
+ // of (kTaggedSize != kSystemPointerSize) anyway.
+ return (((instance_size - GetHeaderSize(map)) >> kTaggedSizeLog2) -
+ map->GetInObjectProperties()) /
+ kEmbedderDataSlotSizeInTaggedSlots;
}
int JSObject::GetEmbedderFieldCount() const {
@@ -254,35 +302,21 @@ int JSObject::GetEmbedderFieldCount() const {
}
int JSObject::GetEmbedderFieldOffset(int index) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- return GetHeaderSize() + (kPointerSize * index);
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(GetEmbedderFieldCount()));
+ return GetEmbedderFieldsStartOffset() + (kEmbedderDataSlotSize * index);
}
-Object* JSObject::GetEmbedderField(int index) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
+Object JSObject::GetEmbedderField(int index) {
+ return EmbedderDataSlot(*this, index).load_tagged();
}
-void JSObject::SetEmbedderField(int index, Object* value) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
+void JSObject::SetEmbedderField(int index, Object value) {
+ EmbedderDataSlot::store_tagged(*this, index, value);
}
-void JSObject::SetEmbedderField(int index, Smi* value) {
- DCHECK(index < GetEmbedderFieldCount() && index >= 0);
- // Internal objects do follow immediately after the header, whereas in-object
- // properties are at the end of the object. Therefore there is no need
- // to adjust the index here.
- int offset = GetHeaderSize() + (kPointerSize * index);
- WRITE_FIELD(this, offset, value);
+void JSObject::SetEmbedderField(int index, Smi value) {
+ EmbedderDataSlot(*this, index).store_smi(value);
}
bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
@@ -293,7 +327,7 @@ bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::RawFastPropertyAt(FieldIndex index) {
+Object JSObject::RawFastPropertyAt(FieldIndex index) {
DCHECK(!IsUnboxedDoubleField(index));
if (index.is_inobject()) {
return READ_FIELD(this, index.offset());
@@ -312,11 +346,11 @@ uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
return READ_UINT64_FIELD(this, index.offset());
}
-void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
+void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value) {
if (index.is_inobject()) {
int offset = index.offset();
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
+ WRITE_FIELD(*this, offset, value);
+ WRITE_BARRIER(*this, offset, value);
} else {
property_array()->set(index.outobject_array_index(), value);
}
@@ -324,14 +358,15 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
uint64_t bits) {
- // Double unboxing is enabled only on 64-bit platforms.
- DCHECK_EQ(kDoubleSize, kPointerSize);
- Address field_addr = FIELD_ADDR(this, index.offset());
+ // Double unboxing is enabled only on 64-bit platforms without pointer
+ // compression.
+ DCHECK_EQ(kDoubleSize, kTaggedSize);
+ Address field_addr = FIELD_ADDR(*this, index.offset());
base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
static_cast<base::AtomicWord>(bits));
}
-void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
if (IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
@@ -343,7 +378,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
}
void JSObject::WriteToField(int descriptor, PropertyDetails details,
- Object* value) {
+ Object value) {
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
DisallowHeapAllocation no_gc;
@@ -379,23 +414,22 @@ int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
}
-Object* JSObject::InObjectPropertyAt(int index) {
+Object JSObject::InObjectPropertyAt(int index) {
int offset = GetInObjectPropertyOffset(index);
- return READ_FIELD(this, offset);
+ return READ_FIELD(*this, offset);
}
-Object* JSObject::InObjectPropertyAtPut(int index, Object* value,
- WriteBarrierMode mode) {
+Object JSObject::InObjectPropertyAtPut(int index, Object value,
+ WriteBarrierMode mode) {
// Adjust for the number of properties stored in the object.
int offset = GetInObjectPropertyOffset(index);
- WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+ WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
return value;
}
-void JSObject::InitializeBody(Map* map, int start_offset,
- Object* pre_allocated_value,
- Object* filler_value) {
+void JSObject::InitializeBody(Map map, int start_offset,
+ Object pre_allocated_value, Object filler_value) {
DCHECK(!filler_value->IsHeapObject() || !Heap::InNewSpace(filler_value));
DCHECK(!pre_allocated_value->IsHeapObject() ||
!Heap::InNewSpace(pre_allocated_value));
@@ -403,20 +437,20 @@ void JSObject::InitializeBody(Map* map, int start_offset,
int offset = start_offset;
if (filler_value != pre_allocated_value) {
int end_of_pre_allocated_offset =
- size - (map->UnusedPropertyFields() * kPointerSize);
+ size - (map->UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
while (offset < end_of_pre_allocated_offset) {
- WRITE_FIELD(this, offset, pre_allocated_value);
- offset += kPointerSize;
+ WRITE_FIELD(*this, offset, pre_allocated_value);
+ offset += kTaggedSize;
}
}
while (offset < size) {
- WRITE_FIELD(this, offset, filler_value);
- offset += kPointerSize;
+ WRITE_FIELD(*this, offset, filler_value);
+ offset += kTaggedSize;
}
}
-Object* JSBoundFunction::raw_bound_target_function() const {
+Object JSBoundFunction::raw_bound_target_function() const {
return READ_FIELD(this, kBoundTargetFunctionOffset);
}
@@ -425,17 +459,16 @@ ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
-ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, feedback_cell, FeedbackCell, kFeedbackCellOffset)
+ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-FeedbackVector* JSFunction::feedback_vector() const {
+FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
- return FeedbackVector::cast(feedback_cell()->value());
+ return FeedbackVector::cast(raw_feedback_cell()->value());
}
// Code objects that are marked for deoptimization are not considered to be
@@ -445,7 +478,7 @@ FeedbackVector* JSFunction::feedback_vector() const {
// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
// or IsValidOptimizedCode.
bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ return is_compiled() && code()->kind() == Code::OPTIMIZED_FUNCTION &&
!code()->marked_for_deoptimization();
}
@@ -467,9 +500,9 @@ void JSFunction::ClearOptimizationMarker() {
// Optimized code marked for deoptimization will tier back down to running
// interpreted on its next activation, and already doesn't count as IsOptimized.
bool JSFunction::IsInterpreted() {
- return code()->is_interpreter_trampoline_builtin() ||
- (code()->kind() == Code::OPTIMIZED_FUNCTION &&
- code()->marked_for_deoptimization());
+ return is_compiled() && (code()->is_interpreter_trampoline_builtin() ||
+ (code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ code()->marked_for_deoptimization()));
}
bool JSFunction::ChecksOptimizationMarker() {
@@ -499,7 +532,7 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
}
}
-AbstractCode* JSFunction::abstract_code() {
+AbstractCode JSFunction::abstract_code() {
if (IsInterpreted()) {
return AbstractCode::cast(shared()->GetBytecodeArray());
} else {
@@ -507,17 +540,30 @@ AbstractCode* JSFunction::abstract_code() {
}
}
-Code* JSFunction::code() { return Code::cast(READ_FIELD(this, kCodeOffset)); }
+Code JSFunction::code() const {
+ return Code::cast(RELAXED_READ_FIELD(*this, kCodeOffset));
+}
-void JSFunction::set_code(Code* value) {
+void JSFunction::set_code(Code value) {
DCHECK(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
- MarkingBarrier(this, HeapObject::RawField(this, kCodeOffset), value);
+ RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
+ MarkingBarrier(*this, RawField(kCodeOffset), value);
}
-void JSFunction::set_code_no_write_barrier(Code* value) {
+void JSFunction::set_code_no_write_barrier(Code value) {
DCHECK(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
+ RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
+}
+
+SharedFunctionInfo JSFunction::shared() const {
+ return SharedFunctionInfo::cast(
+ RELAXED_READ_FIELD(*this, kSharedFunctionInfoOffset));
+}
+
+void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
+ // Release semantics to support acquire read in NeedsResetDueToFlushedBytecode
+ RELEASE_WRITE_FIELD(*this, kSharedFunctionInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfoOffset, value, mode);
}
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
@@ -541,25 +587,26 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
}
bool JSFunction::has_feedback_vector() const {
- return !feedback_cell()->value()->IsUndefined();
+ return shared()->is_compiled() &&
+ !raw_feedback_cell()->value()->IsUndefined();
}
-Context* JSFunction::context() {
- return Context::cast(READ_FIELD(this, kContextOffset));
+Context JSFunction::context() {
+ return Context::cast(READ_FIELD(*this, kContextOffset));
}
bool JSFunction::has_context() const {
- return READ_FIELD(this, kContextOffset)->IsContext();
+ return READ_FIELD(*this, kContextOffset)->IsContext();
}
-JSGlobalProxy* JSFunction::global_proxy() { return context()->global_proxy(); }
+JSGlobalProxy JSFunction::global_proxy() { return context()->global_proxy(); }
-Context* JSFunction::native_context() { return context()->native_context(); }
+Context JSFunction::native_context() { return context()->native_context(); }
-void JSFunction::set_context(Object* value) {
+void JSFunction::set_context(Object value) {
DCHECK(value->IsUndefined() || value->IsContext());
- WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(this, kContextOffset, value);
+ WRITE_FIELD(*this, kContextOffset, value);
+ WRITE_BARRIER(*this, kContextOffset, value);
}
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
@@ -569,7 +616,7 @@ bool JSFunction::has_prototype_slot() const {
return map()->has_prototype_slot();
}
-Map* JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
+Map JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
bool JSFunction::has_initial_map() {
DCHECK(has_prototype_slot());
@@ -595,7 +642,7 @@ bool JSFunction::PrototypeRequiresRuntimeLookup() {
return !has_prototype_property() || map()->has_non_instance_prototype();
}
-Object* JSFunction::instance_prototype() {
+Object JSFunction::instance_prototype() {
DCHECK(has_instance_prototype());
if (has_initial_map()) return initial_map()->prototype();
// When there is no initial map and the prototype is a JSReceiver, the
@@ -603,12 +650,12 @@ Object* JSFunction::instance_prototype() {
return prototype_or_initial_map();
}
-Object* JSFunction::prototype() {
+Object JSFunction::prototype() {
DCHECK(has_prototype());
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
if (map()->has_non_instance_prototype()) {
- Object* prototype = map()->GetConstructor();
+ Object prototype = map()->GetConstructor();
// The map must have a prototype in that field, not a back pointer.
DCHECK(!prototype->IsMap());
DCHECK(!prototype->IsFunctionTemplateInfo());
@@ -617,8 +664,38 @@ Object* JSFunction::prototype() {
return instance_prototype();
}
-bool JSFunction::is_compiled() {
- return code()->builtin_index() != Builtins::kCompileLazy;
+bool JSFunction::is_compiled() const {
+ return code()->builtin_index() != Builtins::kCompileLazy &&
+ shared()->is_compiled();
+}
+
+bool JSFunction::NeedsResetDueToFlushedBytecode() {
+ if (!FLAG_flush_bytecode) return false;
+
+ // Do a raw read for shared and code fields here since this function may be
+ // called on a concurrent thread and the JSFunction might not be fully
+ // initialized yet.
+ Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
+ Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
+
+ if (!maybe_shared->IsSharedFunctionInfo() || !maybe_code->IsCode()) {
+ return false;
+ }
+
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
+ Code code = Code::cast(maybe_code);
+ return !shared->is_compiled() &&
+ code->builtin_index() != Builtins::kCompileLazy;
+}
+
+void JSFunction::ResetIfBytecodeFlushed() {
+ if (NeedsResetDueToFlushedBytecode()) {
+ // Bytecode was flushed and function is now uncompiled, reset JSFunction
+ // by setting code to CompileLazy and clearing the feedback vector.
+ set_code(GetIsolate()->builtins()->builtin(i::Builtins::kCompileLazy));
+ raw_feedback_cell()->set_value(
+ ReadOnlyRoots(GetIsolate()).undefined_value());
+ }
}
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -633,7 +710,13 @@ ACCESSORS(JSDate, hour, Object, kHourOffset)
ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
-SMI_ACCESSORS(JSMessageObject, type, kTypeOffset)
+MessageTemplate JSMessageObject::type() const {
+ Object value = READ_FIELD(this, kTypeOffset);
+ return MessageTemplateFromInt(Smi::ToInt(value));
+}
+void JSMessageObject::set_type(MessageTemplate value) {
+ WRITE_FIELD(this, kTypeOffset, Smi::FromInt(static_cast<int>(value)));
+}
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
@@ -644,13 +727,13 @@ SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
ElementsKind JSObject::GetElementsKind() const {
ElementsKind kind = map()->elements_kind();
#if VERIFY_HEAP && DEBUG
- FixedArrayBase* fixed_array =
- reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+ FixedArrayBase fixed_array =
+ FixedArrayBase::unchecked_cast(READ_FIELD(this, kElementsOffset));
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
- Map* map = fixed_array->map();
+ Map map = fixed_array->map();
if (IsSmiOrObjectElementsKind(kind)) {
DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
map == GetReadOnlyRoots().fixed_cow_array_map());
@@ -725,15 +808,13 @@ bool JSObject::HasSlowStringWrapperElements() {
}
bool JSObject::HasFixedTypedArrayElements() {
- DCHECK_NOT_NULL(elements());
+ DCHECK(!elements().is_null());
return map()->has_fixed_typed_array_elements();
}
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
bool JSObject::HasFixed##Type##Elements() { \
- HeapObject* array = elements(); \
- DCHECK_NOT_NULL(array); \
- if (!array->IsHeapObject()) return false; \
+ FixedArrayBase array = elements(); \
return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
}
@@ -747,30 +828,29 @@ bool JSObject::HasIndexedInterceptor() {
return map()->has_indexed_interceptor();
}
-void JSGlobalObject::set_global_dictionary(GlobalDictionary* dictionary) {
+void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
DCHECK(IsJSGlobalObject());
set_raw_properties_or_hash(dictionary);
}
-GlobalDictionary* JSGlobalObject::global_dictionary() {
+GlobalDictionary JSGlobalObject::global_dictionary() {
DCHECK(!HasFastProperties());
DCHECK(IsJSGlobalObject());
return GlobalDictionary::cast(raw_properties_or_hash());
}
-NumberDictionary* JSObject::element_dictionary() {
+NumberDictionary JSObject::element_dictionary() {
DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
return NumberDictionary::cast(elements());
}
void JSReceiver::initialize_properties() {
- Heap* heap = GetHeap();
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots = GetReadOnlyRoots();
DCHECK(!Heap::InNewSpace(roots.empty_fixed_array()));
- DCHECK(!Heap::InNewSpace(heap->empty_property_dictionary()));
+ DCHECK(!Heap::InNewSpace(roots.empty_property_dictionary()));
if (map()->is_dictionary_map()) {
WRITE_FIELD(this, kPropertiesOrHashOffset,
- heap->empty_property_dictionary());
+ roots.empty_property_dictionary());
} else {
WRITE_FIELD(this, kPropertiesOrHashOffset, roots.empty_fixed_array());
}
@@ -783,13 +863,13 @@ bool JSReceiver::HasFastProperties() const {
return !map()->is_dictionary_map();
}
-NameDictionary* JSReceiver::property_dictionary() const {
+NameDictionary JSReceiver::property_dictionary() const {
DCHECK(!IsJSGlobalObject());
DCHECK(!HasFastProperties());
- Object* prop = raw_properties_or_hash();
+ Object prop = raw_properties_or_hash();
if (prop->IsSmi()) {
- return GetHeap()->empty_property_dictionary();
+ return GetReadOnlyRoots().empty_property_dictionary();
}
return NameDictionary::cast(prop);
@@ -797,10 +877,10 @@ NameDictionary* JSReceiver::property_dictionary() const {
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
-PropertyArray* JSReceiver::property_array() const {
+PropertyArray JSReceiver::property_array() const {
DCHECK(HasFastProperties());
- Object* prop = raw_properties_or_hash();
+ Object prop = raw_properties_or_hash();
if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
return GetReadOnlyRoots().empty_property_array();
}
@@ -872,18 +952,17 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
}
bool JSGlobalObject::IsDetached() {
- return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this);
+ return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(*this);
}
-bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
- const PrototypeIterator iter(this->GetIsolate(),
- const_cast<JSGlobalProxy*>(this));
+bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject global) const {
+ const PrototypeIterator iter(this->GetIsolate(), *this);
return iter.GetCurrent() != global;
}
inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
DCHECK_GE(embedder_field_count, 0);
- return kSize + embedder_field_count * kPointerSize;
+ return kSize + embedder_field_count * kEmbedderDataSlotSize;
}
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 586fe757db..0eab21c137 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -6,7 +6,9 @@
#define V8_OBJECTS_JS_OBJECTS_H_
#include "src/objects.h"
+#include "src/objects/embedder-data-slot.h"
#include "src/objects/property-array.h"
+#include "torque-generated/class-definitions-from-dsl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,13 +16,15 @@
namespace v8 {
namespace internal {
+enum InstanceType : uint16_t;
class JSGlobalObject;
class JSGlobalProxy;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
-class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
+class JSReceiver : public HeapObject {
public:
+ NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
inline bool HasFastProperties() const;
@@ -28,15 +32,15 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
// exists. Otherwise, returns an empty_property_array when there's a
// Smi (hash code) or an empty_fixed_array for a fast properties
// map.
- inline PropertyArray* property_array() const;
+ inline PropertyArray property_array() const;
// Gets slow properties for non-global objects.
- inline NameDictionary* property_dictionary() const;
+ inline NameDictionary property_dictionary() const;
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
// empty_fixed_array(), the hash will be maintained in this case as well.
- void SetProperties(HeapObject* properties);
+ void SetProperties(HeapObject properties);
// There are five possible values for the properties offset.
// 1) EmptyFixedArray/EmptyPropertyDictionary - This is the standard
@@ -123,9 +127,10 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = LanguageMode::kSloppy);
- V8_WARN_UNUSED_RESULT static Object* DefineProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> name,
- Handle<Object> attributes);
+ V8_WARN_UNUSED_RESULT static Object DefineProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> attributes);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> DefineProperties(
Isolate* isolate, Handle<Object> object, Handle<Object> properties);
@@ -186,7 +191,7 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
Handle<JSReceiver> object);
// Returns the class name ([[Class]] property in the specification).
- V8_EXPORT_PRIVATE String* class_name();
+ V8_EXPORT_PRIVATE String class_name();
// Returns the constructor (the function that was used to instantiate the
// object).
@@ -224,12 +229,12 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet.
- Object* GetIdentityHash(Isolate* isolate);
+ Object GetIdentityHash();
// Retrieves a permanent object identity hash code. May create and store a
// hash code if needed and none exists.
- static Smi* CreateIdentityHash(Isolate* isolate, JSReceiver* key);
- Smi* GetOrCreateIdentityHash(Isolate* isolate);
+ static Smi CreateIdentityHash(Isolate* isolate, JSReceiver key);
+ Smi GetOrCreateIdentityHash(Isolate* isolate);
// Stores the hash code. The hash passed in must be masked with
// JSReceiver::kHashMask.
@@ -252,16 +257,14 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
static const int kHashMask = PropertyArray::HashField::kMask;
- // Layout description.
- static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, JSRECEIVER_FIELDS)
+ static const int kHeaderSize = kSize;
bool HasProxyInPrototype(Isolate* isolate);
bool HasComplexElements();
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
+ OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
};
// The JSObject describes real heap allocated JavaScript objects with
@@ -270,7 +273,7 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
// caching.
class JSObject : public JSReceiver {
public:
- static bool IsUnmodifiedApiObject(Object** o);
+ static bool IsUnmodifiedApiObject(FullObjectSlot o);
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> New(
Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
@@ -350,7 +353,7 @@ class JSObject : public JSReceiver {
inline bool HasSlowStringWrapperElements();
bool HasEnumerableElements();
- inline NumberDictionary* element_dictionary(); // Gets slow elements.
+ inline NumberDictionary element_dictionary(); // Gets slow elements.
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
@@ -401,6 +404,11 @@ class JSObject : public JSReceiver {
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes);
+ // {name} must be a UTF-8 encoded, null-terminated string.
+ static void AddProperty(Isolate* isolate, Handle<JSObject> object,
+ const char* name, Handle<Object> value,
+ PropertyAttributes attributes);
+
static void AddDataElement(Handle<JSObject> receiver, uint32_t index,
Handle<Object> value,
PropertyAttributes attributes);
@@ -440,8 +448,8 @@ class JSObject : public JSReceiver {
Handle<Map> new_map,
Isolate* isolate);
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static Map* InvalidatePrototypeChains(Map* map);
- static void InvalidatePrototypeValidityCell(JSGlobalObject* global);
+ static Map InvalidatePrototypeChains(Map map);
+ static void InvalidatePrototypeValidityCell(JSGlobalObject global);
// Updates prototype chain tracking information when an object changes its
// map from |old_map| to |new_map|.
@@ -449,15 +457,15 @@ class JSObject : public JSReceiver {
Isolate* isolate);
// Utility used by many Array builtins and runtime functions
- static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
+ static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject object);
// To be passed to PrototypeUsers::Compact.
- static void PrototypeRegistryCompactionCallback(HeapObject* value,
+ static void PrototypeRegistryCompactionCallback(HeapObject value,
int old_index, int new_index);
// Retrieve interceptors.
- inline InterceptorInfo* GetNamedInterceptor();
- inline InterceptorInfo* GetIndexedInterceptor();
+ inline InterceptorInfo GetNamedInterceptor();
+ inline InterceptorInfo GetIndexedInterceptor();
// Used from JSReceiver.
V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
@@ -488,14 +496,16 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
LookupIterator* it, bool* done);
- static void ValidateElements(JSObject* object);
+ static void ValidateElements(JSObject object);
// Makes sure that this object can contain HeapObject as elements.
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
+ // TSlot here is either ObjectSlot or FullObjectSlot.
+ template <typename TSlot>
static inline void EnsureCanContainElements(Handle<JSObject> object,
- Object** elements, uint32_t count,
+ TSlot elements, uint32_t count,
EnsureElementsMode mode);
static inline void EnsureCanContainElements(Handle<JSObject> object,
Handle<FixedArrayBase> elements,
@@ -543,15 +553,18 @@ class JSObject : public JSReceiver {
// JSFunction objects.
static int GetHeaderSize(InstanceType instance_type,
bool function_has_prototype_slot = false);
- static inline int GetHeaderSize(const Map* map);
+ static inline int GetHeaderSize(const Map map);
inline int GetHeaderSize() const;
- static inline int GetEmbedderFieldCount(const Map* map);
+ static inline int GetEmbedderFieldsStartOffset(const Map map);
+ inline int GetEmbedderFieldsStartOffset();
+
+ static inline int GetEmbedderFieldCount(const Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
- inline Object* GetEmbedderField(int index);
- inline void SetEmbedderField(int index, Object* value);
- inline void SetEmbedderField(int index, Smi* value);
+ inline Object GetEmbedderField(int index);
+ inline void SetEmbedderField(int index, Object value);
+ inline void SetEmbedderField(int index, Smi value);
// Returns true when the object is potentially a wrapper that gets special
// garbage collection treatment.
@@ -592,7 +605,7 @@ class JSObject : public JSReceiver {
// NumberDictionary dictionary. Returns the backing after conversion.
static Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
- void RequireSlowElements(NumberDictionary* dictionary);
+ void RequireSlowElements(NumberDictionary dictionary);
// Transform slow named properties to fast variants.
static void MigrateSlowToFast(Handle<JSObject> object,
@@ -604,21 +617,21 @@ class JSObject : public JSReceiver {
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
- inline Object* RawFastPropertyAt(FieldIndex index);
+ inline Object RawFastPropertyAt(FieldIndex index);
inline double RawFastDoublePropertyAt(FieldIndex index);
inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
- inline void FastPropertyAtPut(FieldIndex index, Object* value);
- inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
+ inline void FastPropertyAtPut(FieldIndex index, Object value);
+ inline void RawFastPropertyAtPut(FieldIndex index, Object value);
inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
inline void WriteToField(int descriptor, PropertyDetails details,
- Object* value);
+ Object value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
- inline Object* InObjectPropertyAt(int index);
- inline Object* InObjectPropertyAtPut(
- int index, Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline Object InObjectPropertyAt(int index);
+ inline Object InObjectPropertyAtPut(
+ int index, Object value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Set the object's prototype (only JSReceiver and null are allowed values).
V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
@@ -634,11 +647,11 @@ class JSObject : public JSReceiver {
// pre_allocated_value and the rest with filler_value.
// Note: this call does not update write barrier, the caller is responsible
// to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map, int start_offset,
- Object* pre_allocated_value, Object* filler_value);
+ inline void InitializeBody(Map map, int start_offset,
+ Object pre_allocated_value, Object filler_value);
// Check whether this object references another object
- bool ReferencesObject(Object* obj);
+ bool ReferencesObject(Object obj);
V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
Handle<JSObject> object, IntegrityLevel lvl);
@@ -668,7 +681,7 @@ class JSObject : public JSReceiver {
ElementsKind to_kind,
Handle<FixedArrayBase> to_elements);
- void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
+ void PrintInstanceMigration(FILE* file, Map original_map, Map new_map);
#ifdef DEBUG
// Structure for collecting spill information about JSObjects.
@@ -699,7 +712,7 @@ class JSObject : public JSReceiver {
bool ElementsAreSafeToExamine() const;
#endif
- Object* SlowReverseLookup(Object* value);
+ Object SlowReverseLookup(Object value);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -724,7 +737,7 @@ class JSObject : public JSReceiver {
// not to arbitrary other JSObject maps.
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
- static const int kMaxInstanceSize = 255 * kPointerSize;
+ static const int kMaxInstanceSize = 255 * kTaggedSize;
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
@@ -733,19 +746,34 @@ class JSObject : public JSReceiver {
STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
PropertyArray::kMaxLength);
- // Layout description.
- static const int kElementsOffset = JSReceiver::kHeaderSize;
- static const int kHeaderSize = kElementsOffset + kPointerSize;
+// Layout description.
+#define JS_OBJECT_FIELDS(V) \
+ V(kElementsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0) \
+ V(kOptionalEmbedderFieldPadding, \
+ POINTER_SIZE_PADDING(kOptionalEmbedderFieldPadding)) \
+ /* Header size aligned to kSystemPointerSize. */ \
+ V(kHeaderSizeForEmbedderFields, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_OBJECT_FIELDS)
+#undef JS_OBJECT_FIELDS
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
static const int kMaxInObjectProperties =
- (kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
+ (kMaxInstanceSize - kHeaderSize) >> kTaggedSizeLog2;
STATIC_ASSERT(kMaxInObjectProperties <= kMaxNumberOfDescriptors);
- // TODO(cbruni): Revisit calculation of the max supported embedder fields.
+
+ STATIC_ASSERT(kHeaderSizeForEmbedderFields ==
+ Internals::kJSObjectHeaderSizeForEmbedderFields);
+ static const int kMaxFirstInobjectPropertyOffset =
+ (1 << kFirstInobjectPropertyOffsetBitCount) - 1;
static const int kMaxEmbedderFields =
- ((1 << kFirstInobjectPropertyOffsetBitCount) - 1 - kHeaderSize) >>
- kPointerSizeLog2;
- STATIC_ASSERT(kMaxEmbedderFields <= kMaxInObjectProperties);
+ (kMaxFirstInobjectPropertyOffset - kHeaderSizeForEmbedderFields) /
+ kEmbedderDataSlotSize;
+ STATIC_ASSERT(kHeaderSizeForEmbedderFields +
+ kMaxEmbedderFields * kEmbedderDataSlotSizeInTaggedSlots <=
+ kMaxInstanceSize);
class BodyDescriptor;
@@ -771,8 +799,8 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw);
- bool ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind,
- Object* object);
+ bool ReferencesObjectFromElements(FixedArray elements, ElementsKind kind,
+ Object object);
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
@@ -780,7 +808,7 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw);
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
+ OBJECT_CONSTRUCTORS(JSObject, JSReceiver);
};
// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
@@ -789,12 +817,19 @@ class JSObject : public JSReceiver {
// FromPropertyDescriptor function for regular accessor properties.
class JSAccessorPropertyDescriptor : public JSObject {
public:
- // Offsets of object fields.
- static const int kGetOffset = JSObject::kHeaderSize;
- static const int kSetOffset = kGetOffset + kPointerSize;
- static const int kEnumerableOffset = kSetOffset + kPointerSize;
- static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
- static const int kSize = kConfigurableOffset + kPointerSize;
+ // Layout description.
+#define JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kGetOffset, kTaggedSize) \
+ V(kSetOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS
+
// Indices of in-object properties.
static const int kGetIndex = 0;
static const int kSetIndex = 1;
@@ -811,12 +846,19 @@ class JSAccessorPropertyDescriptor : public JSObject {
// FromPropertyDescriptor function for regular data properties.
class JSDataPropertyDescriptor : public JSObject {
public:
- // Offsets of object fields.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kWritableOffset = kValueOffset + kPointerSize;
- static const int kEnumerableOffset = kWritableOffset + kPointerSize;
- static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
- static const int kSize = kConfigurableOffset + kPointerSize;
+ // Layout description.
+#define JS_DATA_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kWritableOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_DATA_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_DATA_PROPERTY_DESCRIPTOR_FIELDS
+
// Indices of in-object properties.
static const int kValueIndex = 0;
static const int kWritableIndex = 1;
@@ -836,23 +878,31 @@ class JSIteratorResult : public JSObject {
DECL_ACCESSORS(done, Object)
- // Offsets of object fields.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kDoneOffset = kValueOffset + kPointerSize;
- static const int kSize = kDoneOffset + kPointerSize;
+ // Layout description.
+#define JS_ITERATOR_RESULT_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kDoneOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ITERATOR_RESULT_FIELDS)
+#undef JS_ITERATOR_RESULT_FIELDS
+
// Indices of in-object properties.
static const int kValueIndex = 0;
static const int kDoneIndex = 1;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
+ DECL_CAST(JSIteratorResult)
+
+ OBJECT_CONSTRUCTORS(JSIteratorResult, JSObject);
};
// JSBoundFunction describes a bound function exotic object.
class JSBoundFunction : public JSObject {
public:
// [bound_target_function]: The wrapped function object.
- inline Object* raw_bound_target_function() const;
+ inline Object raw_bound_target_function() const;
DECL_ACCESSORS(bound_target_function, JSReceiver)
// [bound_this]: The value that is always passed as the this value when
@@ -881,13 +931,9 @@ class JSBoundFunction : public JSObject {
static Handle<String> ToString(Handle<JSBoundFunction> function);
// Layout description.
- static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
- static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
- static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
- static const int kSize = kBoundArgumentsOffset + kPointerSize;
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSBOUND_FUNCTION_FIELDS)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
+ OBJECT_CONSTRUCTORS(JSBoundFunction, JSObject);
};
// JSFunction describes JavaScript functions.
@@ -906,11 +952,11 @@ class JSFunction : public JSObject {
static const int kMaybeHomeObjectDescriptorIndex = 2;
// [context]: The context for this function.
- inline Context* context();
+ inline Context context();
inline bool has_context() const;
- inline void set_context(Object* context);
- inline JSGlobalProxy* global_proxy();
- inline Context* native_context();
+ inline void set_context(Object context);
+ inline JSGlobalProxy global_proxy();
+ inline Context native_context();
static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
static Maybe<int> GetLength(Isolate* isolate, Handle<JSFunction> function);
@@ -920,13 +966,13 @@ class JSFunction : public JSObject {
// when the function is invoked, e.g. foo() or new foo(). See
// [[Call]] and [[Construct]] description in ECMA-262, section
// 8.6.2, page 27.
- inline Code* code();
- inline void set_code(Code* code);
- inline void set_code_no_write_barrier(Code* code);
+ inline Code code() const;
+ inline void set_code(Code code);
+ inline void set_code_no_write_barrier(Code code);
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
- inline AbstractCode* abstract_code();
+ inline AbstractCode abstract_code();
// Tells whether or not this function is interpreted.
//
@@ -981,22 +1027,28 @@ class JSFunction : public JSObject {
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
- // [feedback_cell]: The FeedbackCell used to hold the FeedbackVector
- // eventually.
- DECL_ACCESSORS(feedback_cell, FeedbackCell)
+ // [raw_feedback_cell]: Gives raw access to the FeedbackCell used to hold the
+ /// FeedbackVector eventually. Generally this shouldn't be used to get the
+ // feedback_vector, instead use feedback_vector() which correctly deals with
+ // the JSFunction's bytecode being flushed.
+ DECL_ACCESSORS(raw_feedback_cell, FeedbackCell)
// feedback_vector() can be used once the function is compiled.
- inline FeedbackVector* feedback_vector() const;
+ inline FeedbackVector feedback_vector() const;
inline bool has_feedback_vector() const;
static void EnsureFeedbackVector(Handle<JSFunction> function);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
+ // Resets function to clear compiled data after bytecode has been flushed.
+ inline bool NeedsResetDueToFlushedBytecode();
+ inline void ResetIfBytecodeFlushed();
+
inline bool has_prototype_slot() const;
// The initial map for an object created by this constructor.
- inline Map* initial_map();
+ inline Map initial_map();
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<Object> prototype);
inline bool has_initial_map();
@@ -1015,14 +1067,14 @@ class JSFunction : public JSObject {
// until an initial map is needed.
inline bool has_prototype();
inline bool has_instance_prototype();
- inline Object* prototype();
- inline Object* instance_prototype();
+ inline Object prototype();
+ inline Object instance_prototype();
inline bool has_prototype_property();
inline bool PrototypeRequiresRuntimeLookup();
static void SetPrototype(Handle<JSFunction> function, Handle<Object> value);
// Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
+ inline bool is_compiled() const;
static int GetHeaderSize(bool function_has_prototype_slot) {
return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
@@ -1046,8 +1098,6 @@ class JSFunction : public JSObject {
int* instance_size,
int* in_object_properties);
- class BodyDescriptor;
-
// Dispatched behavior.
DECL_PRINTER(JSFunction)
DECL_VERIFIER(JSFunction)
@@ -1073,25 +1123,12 @@ class JSFunction : public JSObject {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
-// Layout description.
-#define JS_FUNCTION_FIELDS(V) \
- /* Pointer fields. */ \
- V(kSharedFunctionInfoOffset, kPointerSize) \
- V(kContextOffset, kPointerSize) \
- V(kFeedbackCellOffset, kPointerSize) \
- V(kEndOfStrongFieldsOffset, 0) \
- V(kCodeOffset, kPointerSize) \
- /* Size of JSFunction object without prototype field. */ \
- V(kSizeWithoutPrototype, 0) \
- V(kPrototypeOrInitialMapOffset, kPointerSize) \
- /* Size of JSFunction object with prototype field. */ \
- V(kSizeWithPrototype, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_FUNCTION_FIELDS)
-#undef JS_FUNCTION_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JSFUNCTION_FIELDS)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
+ static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
+ static constexpr int kSizeWithPrototype = kSize;
+
+ OBJECT_CONSTRUCTORS(JSFunction, JSObject);
};
// JSGlobalProxy's prototype must be a JSGlobalObject or null,
@@ -1110,7 +1147,7 @@ class JSGlobalProxy : public JSObject {
DECL_CAST(JSGlobalProxy)
- inline bool IsDetachedFrom(JSGlobalObject* global) const;
+ inline bool IsDetachedFrom(JSGlobalObject global) const;
static int SizeWithEmbedderFields(int embedder_field_count);
@@ -1119,11 +1156,15 @@ class JSGlobalProxy : public JSObject {
DECL_VERIFIER(JSGlobalProxy)
// Layout description.
- static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kSize = kNativeContextOffset + kPointerSize;
+#define JS_GLOBAL_PROXY_FIELDS(V) \
+ V(kNativeContextOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GLOBAL_PROXY_FIELDS)
+#undef JS_GLOBAL_PROXY_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSGlobalProxy, JSObject);
};
// JavaScript global object.
@@ -1136,8 +1177,8 @@ class JSGlobalObject : public JSObject {
DECL_ACCESSORS(global_proxy, JSObject)
// Gets global object properties.
- inline GlobalDictionary* global_dictionary();
- inline void set_global_dictionary(GlobalDictionary* dictionary);
+ inline GlobalDictionary global_dictionary();
+ inline void set_global_dictionary(GlobalDictionary dictionary);
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
@@ -1155,13 +1196,17 @@ class JSGlobalObject : public JSObject {
DECL_VERIFIER(JSGlobalObject)
// Layout description.
- static const int kNativeContextOffset = JSObject::kHeaderSize;
- static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
- static const int kSize = kHeaderSize;
+#define JS_GLOBAL_OBJECT_FIELDS(V) \
+ V(kNativeContextOffset, kTaggedSize) \
+ V(kGlobalProxyOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0) \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GLOBAL_OBJECT_FIELDS)
+#undef JS_GLOBAL_OBJECT_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSGlobalObject, JSObject);
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
@@ -1177,11 +1222,15 @@ class JSValue : public JSObject {
DECL_VERIFIER(JSValue)
// Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
+#define JS_VALUE_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_VALUE_FIELDS)
+#undef JS_VALUE_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSValue, JSObject);
};
class DateCache;
@@ -1220,11 +1269,16 @@ class JSDate : public JSObject {
// Returns the date field with the specified index.
// See FieldIndex for the list of date fields.
- static Object* GetField(Object* date, Smi* index);
+ // Arguments and result are raw Address values because this is called
+ // via ExternalReference.
+ // {raw_date} is a tagged Object pointer.
+ // {smi_index} is a tagged Smi.
+ // The return value is a tagged Object pointer.
+ static Address GetField(Address raw_date, Address smi_index);
static Handle<Object> SetValue(Handle<JSDate> date, double v);
- void SetValue(Object* value, bool is_value_nan);
+ void SetValue(Object value, bool is_value_nan);
// Dispatched behavior.
DECL_PRINTER(JSDate)
@@ -1260,26 +1314,31 @@ class JSDate : public JSObject {
};
// Layout description.
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kYearOffset = kValueOffset + kPointerSize;
- static const int kMonthOffset = kYearOffset + kPointerSize;
- static const int kDayOffset = kMonthOffset + kPointerSize;
- static const int kWeekdayOffset = kDayOffset + kPointerSize;
- static const int kHourOffset = kWeekdayOffset + kPointerSize;
- static const int kMinOffset = kHourOffset + kPointerSize;
- static const int kSecOffset = kMinOffset + kPointerSize;
- static const int kCacheStampOffset = kSecOffset + kPointerSize;
- static const int kSize = kCacheStampOffset + kPointerSize;
+#define JS_DATE_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kYearOffset, kTaggedSize) \
+ V(kMonthOffset, kTaggedSize) \
+ V(kDayOffset, kTaggedSize) \
+ V(kWeekdayOffset, kTaggedSize) \
+ V(kHourOffset, kTaggedSize) \
+ V(kMinOffset, kTaggedSize) \
+ V(kSecOffset, kTaggedSize) \
+ V(kCacheStampOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_DATE_FIELDS)
+#undef JS_DATE_FIELDS
private:
- inline Object* DoGetField(FieldIndex index);
+ inline Object DoGetField(FieldIndex index);
- Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
+ Object GetUTCField(FieldIndex index, double value, DateCache* date_cache);
// Computes and caches the cacheable fields of the date.
inline void SetCachedFields(int64_t local_time_ms, DateCache* date_cache);
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
+ OBJECT_CONSTRUCTORS(JSDate, JSObject);
};
// Representation of message objects used for error reporting through
@@ -1291,8 +1350,8 @@ class JSDate : public JSObject {
class JSMessageObject : public JSObject {
public:
// [type]: the type of error message.
- inline int type() const;
- inline void set_type(int value);
+ inline MessageTemplate type() const;
+ inline void set_type(MessageTemplate value);
// [arguments]: the arguments for formatting the error message.
DECL_ACCESSORS(argument, Object)
@@ -1332,18 +1391,29 @@ class JSMessageObject : public JSObject {
DECL_VERIFIER(JSMessageObject)
// Layout description.
- static const int kTypeOffset = JSObject::kHeaderSize;
- static const int kArgumentsOffset = kTypeOffset + kPointerSize;
- static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackFramesOffset = kScriptOffset + kPointerSize;
- static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
- static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
- static const int kErrorLevelOffset = kEndPositionOffset + kPointerSize;
- static const int kSize = kErrorLevelOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<HeapObject::kMapOffset,
- kStackFramesOffset + kPointerSize, kSize>
+#define JS_MESSAGE_FIELDS(V) \
+ /* Tagged fields. */ \
+ V(kTypeOffset, kTaggedSize) \
+ V(kArgumentsOffset, kTaggedSize) \
+ V(kScriptOffset, kTaggedSize) \
+ V(kStackFramesOffset, kTaggedSize) \
+ V(kPointerFieldsEndOffset, 0) \
+ /* Raw data fields. */ \
+ /* TODO(ishell): store as int32 instead of Smi. */ \
+ V(kStartPositionOffset, kTaggedSize) \
+ V(kEndPositionOffset, kTaggedSize) \
+ V(kErrorLevelOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_MESSAGE_FIELDS)
+#undef JS_MESSAGE_FIELDS
+
+ typedef FixedBodyDescriptor<HeapObject::kMapOffset, kPointerFieldsEndOffset,
+ kSize>
BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(JSMessageObject, JSObject)
};
// The [Async-from-Sync Iterator] object
@@ -1368,13 +1438,18 @@ class JSAsyncFromSyncIterator : public JSObject {
// subsequent "next" invocations.
DECL_ACCESSORS(next, Object)
- // Offsets of object fields.
- static const int kSyncIteratorOffset = JSObject::kHeaderSize;
- static const int kNextOffset = kSyncIteratorOffset + kPointerSize;
- static const int kSize = kNextOffset + kPointerSize;
+ // Layout description.
+#define JS_ASYNC_FROM_SYNC_ITERATOR_FIELDS(V) \
+ V(kSyncIteratorOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ASYNC_FROM_SYNC_ITERATOR_FIELDS)
+#undef JS_ASYNC_FROM_SYNC_ITERATOR_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSAsyncFromSyncIterator, JSObject);
};
class JSStringIterator : public JSObject {
@@ -1392,12 +1467,18 @@ class JSStringIterator : public JSObject {
inline int index() const;
inline void set_index(int value);
- static const int kStringOffset = JSObject::kHeaderSize;
- static const int kNextIndexOffset = kStringOffset + kPointerSize;
- static const int kSize = kNextIndexOffset + kPointerSize;
+ // Layout description.
+#define JS_STRING_ITERATOR_FIELDS(V) \
+ V(kStringOffset, kTaggedSize) \
+ V(kNextIndexOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_STRING_ITERATOR_FIELDS)
+#undef JS_STRING_ITERATOR_FIELDS
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
+ OBJECT_CONSTRUCTORS(JSStringIterator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 6cfeb827ca..aa126b0ce1 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -19,13 +19,26 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSPluralRules, JSObject)
+
ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
-ACCESSORS(JSPluralRules, type, String, kTypeOffset)
+SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
kICUPluralRulesOffset)
ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
kICUDecimalFormatOffset)
+inline void JSPluralRules::set_type(Type type) {
+ DCHECK_LT(type, Type::COUNT);
+ int hints = flags();
+ hints = TypeBits::update(hints, type);
+ set_flags(hints);
+}
+
+inline JSPluralRules::Type JSPluralRules::type() const {
+ return TypeBits::decode(flags());
+}
+
CAST_ACCESSOR(JSPluralRules);
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index f76692c501..59b52424ef 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -15,7 +15,6 @@
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/plurrule.h"
-#include "unicode/strenum.h"
namespace v8 {
namespace internal {
@@ -23,22 +22,22 @@ namespace internal {
namespace {
bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
- const char* type_string,
+ JSPluralRules::Type type,
std::unique_ptr<icu::PluralRules>* pl,
std::unique_ptr<icu::DecimalFormat>* nf) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
- UPluralType type = UPLURAL_TYPE_CARDINAL;
- if (strcmp(type_string, "ordinal") == 0) {
- type = UPLURAL_TYPE_ORDINAL;
+ UPluralType icu_type = UPLURAL_TYPE_CARDINAL;
+ if (type == JSPluralRules::Type::ORDINAL) {
+ icu_type = UPLURAL_TYPE_ORDINAL;
} else {
- CHECK_EQ(0, strcmp(type_string, "cardinal"));
+ CHECK_EQ(JSPluralRules::Type::CARDINAL, type);
}
std::unique_ptr<icu::PluralRules> plural_rules(
- icu::PluralRules::forLocale(icu_locale, type, status));
+ icu::PluralRules::forLocale(icu_locale, icu_type, status));
if (U_FAILURE(status)) {
return false;
}
@@ -59,12 +58,9 @@ bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
}
void InitializeICUPluralRules(
- Isolate* isolate, Handle<String> locale, const char* type,
+ Isolate* isolate, const icu::Locale& icu_locale, JSPluralRules::Type type,
std::unique_ptr<icu::PluralRules>* plural_rules,
std::unique_ptr<icu::DecimalFormat>* number_format) {
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
- DCHECK(!icu_locale.isBogus());
-
bool success = CreateICUPluralRules(isolate, icu_locale, type, plural_rules,
number_format);
if (!success) {
@@ -84,17 +80,28 @@ void InitializeICUPluralRules(
} // namespace
+Handle<String> JSPluralRules::TypeAsString() const {
+ switch (type()) {
+ case Type::CARDINAL:
+ return GetReadOnlyRoots().cardinal_string_handle();
+ case Type::ORDINAL:
+ return GetReadOnlyRoots().ordinal_string_handle();
+ case Type::COUNT:
+ UNREACHABLE();
+ }
+}
+
// static
MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
Isolate* isolate, Handle<JSPluralRules> plural_rules,
Handle<Object> locales, Handle<Object> options_obj) {
+ plural_rules->set_flags(0);
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
- // TODO(jkummerow): Port ResolveLocale, then use the C++ version of
- // CanonicalizeLocaleList here.
- Handle<JSObject> requested_locales;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
- Intl::CanonicalizeLocaleListJS(isolate, locales),
- JSPluralRules);
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSPluralRules>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
// 2. If options is undefined, then
if (options_obj->IsUndefined(isolate)) {
@@ -112,30 +119,24 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
// At this point, options_obj can either be a JSObject or a JSProxy only.
Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
- // TODO(gsathya): This is currently done as part of the
- // Intl::ResolveLocale call below. Fix this once resolveLocale is
- // changed to not do the lookup.
- //
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 6. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.PluralRules");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSPluralRules>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
// 7. Let t be ? GetOption(options, "type", "string", « "cardinal",
// "ordinal" », "cardinal").
- std::vector<const char*> values = {"cardinal", "ordinal"};
- std::unique_ptr<char[]> type_str = nullptr;
- const char* type_cstr = "cardinal";
- Maybe<bool> found = Intl::GetStringOption(isolate, options, "type", values,
- "Intl.PluralRules", &type_str);
- MAYBE_RETURN(found, MaybeHandle<JSPluralRules>());
- if (found.FromJust()) {
- type_cstr = type_str.get();
- }
+ Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ isolate, options, "type", "Intl.PluralRules", {"cardinal", "ordinal"},
+ {Type::CARDINAL, Type::ORDINAL}, Type::CARDINAL);
+ MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
+ Type type = maybe_type.FromJust();
// 8. Set pluralRules.[[Type]] to t.
- Handle<String> type =
- isolate->factory()->NewStringFromAsciiChecked(type_cstr);
- plural_rules->set_type(*type);
+ plural_rules->set_type(type);
// Note: The spec says we should do ResolveLocale after performing
// SetNumberFormatDigitOptions but we need the locale to create all
@@ -146,26 +147,18 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
// 11. Let r be ResolveLocale(%PluralRules%.[[AvailableLocales]],
// requestedLocales, opt, %PluralRules%.[[RelevantExtensionKeys]],
// localeData).
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "pluralrules", requested_locales, options),
- JSPluralRules);
-
- Handle<String> locale_str = isolate->factory()->locale_string();
- Handle<Object> locale_obj = JSObject::GetDataProperty(r, locale_str);
-
- // The locale has to be a string. Either a user provided
- // canonicalized string or the default locale.
- CHECK(locale_obj->IsString());
- Handle<String> locale = Handle<String>::cast(locale_obj);
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSPluralRules::GetAvailableLocales(),
+ requested_locales, matcher, {});
// 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
- plural_rules->set_locale(*locale);
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ plural_rules->set_locale(*locale_str);
std::unique_ptr<icu::PluralRules> icu_plural_rules;
std::unique_ptr<icu::DecimalFormat> icu_decimal_format;
- InitializeICUPluralRules(isolate, locale, type_cstr, &icu_plural_rules,
+ InitializeICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules,
&icu_decimal_format);
CHECK_NOT_NULL(icu_plural_rules.get());
CHECK_NOT_NULL(icu_decimal_format.get());
@@ -250,8 +243,8 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
Handle<String> locale_value(plural_rules->locale(), isolate);
CreateDataPropertyForOptions(isolate, options, locale_value, "locale");
- Handle<String> type_value(plural_rules->type(), isolate);
- CreateDataPropertyForOptions(isolate, options, type_value, "type");
+ CreateDataPropertyForOptions(isolate, options, plural_rules->TypeAsString(),
+ "type");
icu::DecimalFormat* icu_decimal_format =
plural_rules->icu_decimal_format()->raw();
@@ -321,5 +314,17 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
return options;
}
+std::set<std::string> JSPluralRules::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ // TODO(ftang): For PluralRules, filter out locales that
+ // don't support PluralRules.
+ // PluralRules is missing an appropriate getAvailableLocales method,
+ // so we should filter from all locales, but it's not clear how; see
+ // https://ssl.icu-project.org/trac/ticket/12756
+ const icu::Locale* icu_available_locales =
+ icu::Locale::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index f262457acb..12b796fa02 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_PLURAL_RULES_H_
#define V8_OBJECTS_JS_PLURAL_RULES_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
@@ -37,31 +40,52 @@ class JSPluralRules : public JSObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePlural(
Isolate* isolate, Handle<JSPluralRules> plural_rules, double number);
+ static std::set<std::string> GetAvailableLocales();
+
+ // [[Type]] is one of the values "cardinal" or "ordinal",
+ // identifying the plural rules used.
+ enum class Type {
+ CARDINAL,
+ ORDINAL,
+
+ COUNT
+ };
+ inline void set_type(Type type);
+ inline Type type() const;
+
+ Handle<String> TypeAsString() const;
+
DECL_CAST(JSPluralRules)
DECL_PRINTER(JSPluralRules)
DECL_VERIFIER(JSPluralRules)
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) V(TypeBits, Type, 1, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Type::CARDINAL <= TypeBits::kMax);
+ STATIC_ASSERT(Type::ORDINAL <= TypeBits::kMax);
+
// Layout description.
-#define JS_PLURAL_RULES_FIELDS(V) \
- V(kLocaleOffset, kPointerSize) \
- /* In the future, this can be an enum, \
- and not a string. */ \
- V(kTypeOffset, kPointerSize) \
- V(kICUPluralRulesOffset, kPointerSize) \
- V(kICUDecimalFormatOffset, kPointerSize) \
- /* Total size. */ \
+#define JS_PLURAL_RULES_FIELDS(V) \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kICUPluralRulesOffset, kTaggedSize) \
+ V(kICUDecimalFormatOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PLURAL_RULES_FIELDS)
#undef JS_PLURAL_RULES_FIELDS
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(type, String)
+ DECL_INT_ACCESSORS(flags)
DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
DECL_ACCESSORS(icu_decimal_format, Managed<icu::DecimalFormat>)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSPluralRules);
+ OBJECT_CONSTRUCTORS(JSPluralRules, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index 326ba82473..a423c0281c 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -16,6 +16,7 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSPromise, JSObject)
CAST_ACCESSOR(JSPromise)
ACCESSORS(JSPromise, reactions_or_result, Object, kReactionsOrResultOffset)
@@ -23,12 +24,12 @@ SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
-Object* JSPromise::result() const {
+Object JSPromise::result() const {
DCHECK_NE(Promise::kPending, status());
return reactions_or_result();
}
-Object* JSPromise::reactions() const {
+Object JSPromise::reactions() const {
DCHECK_EQ(Promise::kPending, status());
return reactions_or_result();
}
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index b395ac9b6d..e2e41ec598 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -31,10 +31,10 @@ class JSPromise : public JSObject {
DECL_ACCESSORS(reactions_or_result, Object)
// [result]: Checks that the promise is settled and returns the result.
- inline Object* result() const;
+ inline Object result() const;
// [reactions]: Checks that the promise is pending and returns the reactions.
- inline Object* reactions() const;
+ inline Object reactions() const;
DECL_INT_ACCESSORS(flags)
@@ -68,12 +68,17 @@ class JSPromise : public JSObject {
DECL_PRINTER(JSPromise)
DECL_VERIFIER(JSPromise)
- // Layout description.
- static const int kReactionsOrResultOffset = JSObject::kHeaderSize;
- static const int kFlagsOffset = kReactionsOrResultOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
+#define JS_PROMISE_FIELDS(V) \
+ V(kReactionsOrResultOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PROMISE_FIELDS)
+#undef JS_PROMISE_FIELDS
+
static const int kSizeWithEmbedderFields =
- kSize + v8::Promise::kEmbedderFieldCount * kPointerSize;
+ kSize + v8::Promise::kEmbedderFieldCount * kEmbedderDataSlotSize;
// Flags layout.
// The first two bits store the v8::Promise::PromiseState.
@@ -94,6 +99,8 @@ class JSPromise : public JSObject {
Handle<Object> reactions,
Handle<Object> argument,
PromiseReaction::Type type);
+
+ OBJECT_CONSTRUCTORS(JSPromise, JSObject)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index 989d286b40..e0d0835f06 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSProxy, JSReceiver)
+
CAST_ACCESSOR(JSProxy)
ACCESSORS(JSProxy, target, Object, kTargetOffset)
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 2a7c518be4..3ba7b7e974 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -106,15 +106,21 @@ class JSProxy : public JSReceiver {
static const int kMaxIterationLimit = 100 * 1024;
// Layout description.
- static const int kTargetOffset = JSReceiver::kHeaderSize;
- static const int kHandlerOffset = kTargetOffset + kPointerSize;
- static const int kSize = kHandlerOffset + kPointerSize;
+#define JS_PROXY_FIELDS(V) \
+ V(kTargetOffset, kTaggedSize) \
+ V(kHandlerOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_PROXY_FIELDS)
+#undef JS_PROXY_FIELDS
// kTargetOffset aliases with the elements of JSObject. The fact that
// JSProxy::target is a Javascript value which cannot be confused with an
// elements backing store is exploited by loading from this offset from an
// unknown JSReceiver.
- STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
+ STATIC_ASSERT(static_cast<int>(JSObject::kElementsOffset) ==
+ static_cast<int>(JSProxy::kTargetOffset));
typedef FixedBodyDescriptor<JSReceiver::kPropertiesOrHashOffset, kSize, kSize>
BodyDescriptor;
@@ -124,8 +130,7 @@ class JSProxy : public JSReceiver {
PropertyDescriptor* desc,
ShouldThrow should_throw);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+ OBJECT_CONSTRUCTORS(JSProxy, JSReceiver);
};
// JSProxyRevocableResult is just a JSObject with a specific initial map.
@@ -133,10 +138,17 @@ class JSProxy : public JSReceiver {
// See https://tc39.github.io/ecma262/#sec-proxy.revocable
class JSProxyRevocableResult : public JSObject {
public:
- // Offsets of object fields.
- static const int kProxyOffset = JSObject::kHeaderSize;
- static const int kRevokeOffset = kProxyOffset + kPointerSize;
- static const int kSize = kRevokeOffset + kPointerSize;
+ // Layout description.
+#define JS_PROXY_REVOCATABLE_RESULT_FIELDS(V) \
+ V(kProxyOffset, kTaggedSize) \
+ V(kRevokeOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_PROXY_REVOCATABLE_RESULT_FIELDS)
+#undef JS_PROXY_REVOCATABLE_RESULT_FIELDS
+
// Indices of in-object properties.
static const int kProxyIndex = 0;
static const int kRevokeIndex = 1;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index eeae4fb7ad..23f7c69ade 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/js-regexp.h"
#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/smi.h"
#include "src/objects/string.h"
// Has to be the last include (doesn't have include guards):
@@ -16,6 +17,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSRegExp, JSObject)
+
CAST_ACCESSOR(JSRegExp)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
@@ -23,10 +26,10 @@ ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
ACCESSORS(JSRegExp, source, Object, kSourceOffset)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
-JSRegExp::Type JSRegExp::TypeTag() {
- Object* data = this->data();
+JSRegExp::Type JSRegExp::TypeTag() const {
+ Object data = this->data();
if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
+ Smi smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
return static_cast<JSRegExp::Type>(smi->value());
}
@@ -43,38 +46,49 @@ int JSRegExp::CaptureCount() {
JSRegExp::Flags JSRegExp::GetFlags() {
DCHECK(this->data()->IsFixedArray());
- Object* data = this->data();
- Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
+ Object data = this->data();
+ Smi smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
return Flags(smi->value());
}
-String* JSRegExp::Pattern() {
+String JSRegExp::Pattern() {
DCHECK(this->data()->IsFixedArray());
- Object* data = this->data();
- String* pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
+ Object data = this->data();
+ String pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
return pattern;
}
-Object* JSRegExp::CaptureNameMap() {
+Object JSRegExp::CaptureNameMap() {
DCHECK(this->data()->IsFixedArray());
DCHECK_EQ(TypeTag(), IRREGEXP);
- Object* value = DataAt(kIrregexpCaptureNameMapIndex);
+ Object value = DataAt(kIrregexpCaptureNameMapIndex);
DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
return value;
}
-Object* JSRegExp::DataAt(int index) {
+Object JSRegExp::DataAt(int index) const {
DCHECK(TypeTag() != NOT_COMPILED);
return FixedArray::cast(data())->get(index);
}
-void JSRegExp::SetDataAt(int index, Object* value) {
+void JSRegExp::SetDataAt(int index, Object value) {
DCHECK(TypeTag() != NOT_COMPILED);
DCHECK_GE(index,
kDataIndex); // Only implementation data can be set this way.
FixedArray::cast(data())->set(index, value);
}
+bool JSRegExp::HasCompiledCode() const {
+ return TypeTag() == IRREGEXP && (DataAt(kIrregexpLatin1CodeIndex)->IsCode() ||
+ DataAt(kIrregexpUC16CodeIndex)->IsCode());
+}
+
+void JSRegExp::DiscardCompiledCodeForSerialization() {
+ DCHECK(HasCompiledCode());
+ SetDataAt(kIrregexpLatin1CodeIndex, Smi::FromInt(kUninitializedValue));
+ SetDataAt(kIrregexpUC16CodeIndex, Smi::FromInt(kUninitializedValue));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index 4fc8f88841..82565f0de9 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator, JSObject)
+
ACCESSORS(JSRegExpStringIterator, iterating_regexp, Object,
kIteratingRegExpOffset)
ACCESSORS(JSRegExpStringIterator, iterating_string, String,
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index 9ad2851c7a..005d10dfc6 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -36,19 +36,23 @@ class JSRegExpStringIterator : public JSObject {
DECL_PRINTER(JSRegExpStringIterator)
DECL_VERIFIER(JSRegExpStringIterator)
- static const int kIteratingRegExpOffset = JSObject::kHeaderSize;
- static const int kIteratedStringOffset =
- kIteratingRegExpOffset + kPointerSize;
- static const int kFlagsOffset = kIteratedStringOffset + kPointerSize;
-
- static const int kSize = kFlagsOffset + kPointerSize;
+ // Layout description.
+#define JS_REGEXP_STRING_ITERATOR_FIELDS(V) \
+ V(kIteratingRegExpOffset, kTaggedSize) \
+ V(kIteratedStringOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_REGEXP_STRING_ITERATOR_FIELDS)
+#undef JS_REGEXP_STRING_ITERATOR_FIELDS
static const int kDoneBit = 0;
static const int kGlobalBit = 1;
static const int kUnicodeBit = 2;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpStringIterator);
+ OBJECT_CONSTRUCTORS(JSRegExpStringIterator, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 7c8841ee79..5380b79c6c 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -36,20 +36,49 @@ class JSRegExp : public JSObject {
// ATOM: A simple string to match against using an indexOf operation.
// IRREGEXP: Compiled with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- enum Flag {
+ struct FlagShiftBit {
+ static const int kGlobal = 0;
+ static const int kIgnoreCase = 1;
+ static const int kMultiline = 2;
+ static const int kSticky = 3;
+ static const int kUnicode = 4;
+ static const int kDotAll = 5;
+ static const int kInvalid = 7;
+ };
+ enum Flag : uint8_t {
kNone = 0,
- kGlobal = 1 << 0,
- kIgnoreCase = 1 << 1,
- kMultiline = 1 << 2,
- kSticky = 1 << 3,
- kUnicode = 1 << 4,
- kDotAll = 1 << 5,
+ kGlobal = 1 << FlagShiftBit::kGlobal,
+ kIgnoreCase = 1 << FlagShiftBit::kIgnoreCase,
+ kMultiline = 1 << FlagShiftBit::kMultiline,
+ kSticky = 1 << FlagShiftBit::kSticky,
+ kUnicode = 1 << FlagShiftBit::kUnicode,
+ kDotAll = 1 << FlagShiftBit::kDotAll,
// Update FlagCount when adding new flags.
+ kInvalid = 1 << FlagShiftBit::kInvalid, // Not included in FlagCount.
};
typedef base::Flags<Flag> Flags;
-
static constexpr int FlagCount() { return 6; }
+ static int FlagShiftBits(Flag flag) {
+ switch (flag) {
+ case kGlobal:
+ return FlagShiftBit::kGlobal;
+ case kIgnoreCase:
+ return FlagShiftBit::kIgnoreCase;
+ case kMultiline:
+ return FlagShiftBit::kMultiline;
+ case kSticky:
+ return FlagShiftBit::kSticky;
+ case kUnicode:
+ return FlagShiftBit::kUnicode;
+ case kDotAll:
+ return FlagShiftBit::kDotAll;
+ default:
+ STATIC_ASSERT(FlagCount() == 6);
+ UNREACHABLE();
+ }
+ }
+
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(last_index, Object)
@@ -66,15 +95,15 @@ class JSRegExp : public JSObject {
Handle<String> source,
Handle<String> flags_string);
- inline Type TypeTag();
+ inline Type TypeTag() const;
// Number of captures (without the match itself).
inline int CaptureCount();
inline Flags GetFlags();
- inline String* Pattern();
- inline Object* CaptureNameMap();
- inline Object* DataAt(int index);
+ inline String Pattern();
+ inline Object CaptureNameMap();
+ inline Object DataAt(int index) const;
// Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object* value);
+ inline void SetDataAt(int index, Object value);
static int code_index(bool is_latin1) {
if (is_latin1) {
@@ -84,17 +113,27 @@ class JSRegExp : public JSObject {
}
}
+ inline bool HasCompiledCode() const;
+ inline void DiscardCompiledCodeForSerialization();
+
DECL_CAST(JSRegExp)
// Dispatched behavior.
DECL_PRINTER(JSRegExp)
DECL_VERIFIER(JSRegExp)
- static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSourceOffset = kDataOffset + kPointerSize;
- static const int kFlagsOffset = kSourceOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
- static const int kLastIndexOffset = kSize; // In-object field.
+// Layout description.
+#define JS_REGEXP_FIELDS(V) \
+ V(kDataOffset, kTaggedSize) \
+ V(kSourceOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0) \
+ /* This is already an in-object field. */ \
+ V(kLastIndexOffset, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_REGEXP_FIELDS)
+#undef JS_REGEXP_FIELDS
// Indices in the data array.
static const int kTagIndex = 0;
@@ -131,8 +170,18 @@ class JSRegExp : public JSObject {
static const int kLastIndexFieldIndex = 0;
static const int kInObjectFieldCount = 1;
+ // Descriptor array index to important methods in the prototype.
+ static const int kExecFunctionDescriptorIndex = 1;
+ static const int kSymbolMatchFunctionDescriptorIndex = 13;
+ static const int kSymbolReplaceFunctionDescriptorIndex = 14;
+ static const int kSymbolSearchFunctionDescriptorIndex = 15;
+ static const int kSymbolSplitFunctionDescriptorIndex = 16;
+ static const int kSymbolMatchAllFunctionDescriptorIndex = 17;
+
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
+
+ OBJECT_CONSTRUCTORS(JSRegExp, JSObject)
};
DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
@@ -145,10 +194,12 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// After creation the result must be treated as a JSArray in all regards.
class JSRegExpResult : public JSArray {
public:
+// Layout description.
#define REG_EXP_RESULT_FIELDS(V) \
- V(kIndexOffset, kPointerSize) \
- V(kInputOffset, kPointerSize) \
- V(kGroupsOffset, kPointerSize) \
+ V(kIndexOffset, kTaggedSize) \
+ V(kInputOffset, kTaggedSize) \
+ V(kGroupsOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kSize, REG_EXP_RESULT_FIELDS)
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index a4ee3ee7f3..1824b4b4ca 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -18,14 +18,14 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat, JSObject)
+
// Base relative time format accessors.
ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
ACCESSORS(JSRelativeTimeFormat, icu_formatter,
Managed<icu::RelativeDateTimeFormatter>, kICUFormatterOffset)
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
-// TODO(ftang): Use bit field accessor for style and numeric later.
-
inline void JSRelativeTimeFormat::set_style(Style style) {
DCHECK_GT(Style::COUNT, style);
int hints = flags();
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index b3aa996d64..29896a926e 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -16,10 +16,11 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "unicode/datefmt.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
-#include "unicode/uvernum.h" // for U_ICU_VERSION_MAJOR_NUM
namespace v8 {
namespace internal {
@@ -56,81 +57,81 @@ JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
- Handle<Object> input_locales, Handle<Object> input_options) {
- Factory* factory = isolate->factory();
+ Handle<Object> locales, Handle<Object> input_options) {
relative_time_format_holder->set_flags(0);
- // 4. If options is undefined, then
+
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSRelativeTimeFormat>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
+
+ // 2. If options is undefined, then
Handle<JSReceiver> options;
if (input_options->IsUndefined(isolate)) {
- // a. Let options be ObjectCreate(null).
+ // 2. a. Let options be ObjectCreate(null).
options = isolate->factory()->NewJSObjectWithNullProto();
- // 5. Else
+ // 3. Else
} else {
- // a. Let options be ? ToObject(options).
+ // 3. a. Let options be ? ToObject(options).
ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
Object::ToObject(isolate, input_options),
JSRelativeTimeFormat);
}
- // 10. Let r be ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
- // requestedLocales, opt,
- // %RelativeTimeFormat%.[[RelevantExtensionKeys]],
- // localeData).
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, r,
- Intl::ResolveLocale(isolate, "relativetimeformat",
- input_locales, options),
- JSRelativeTimeFormat);
- Handle<Object> locale_obj =
- JSObject::GetDataProperty(r, factory->locale_string());
- Handle<String> locale;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, locale,
- Object::ToString(isolate, locale_obj),
- JSRelativeTimeFormat);
-
- // 11. Let locale be r.[[Locale]].
- // 12. Set relativeTimeFormat.[[Locale]] to locale.
- relative_time_format_holder->set_locale(*locale);
-
- // 14. Let s be ? GetOption(options, "style", "string",
+ // 4. Let opt be a new Record.
+ // 5. Let matcher be ? GetOption(options, "localeMatcher", "string", «
+ // "lookup", "best fit" », "best fit").
+ // 6. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.RelativeTimeFormat");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSRelativeTimeFormat>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
+
+ // 7. Let localeData be %RelativeTimeFormat%.[[LocaleData]].
+ // 8. Let r be
+ // ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
+ // requestedLocales, opt,
+ // %RelativeTimeFormat%.[[RelevantExtensionKeys]], localeData).
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSRelativeTimeFormat::GetAvailableLocales(),
+ requested_locales, matcher, {"nu"});
+
+ // 9. Let locale be r.[[Locale]].
+ // 10. Set relativeTimeFormat.[[Locale]] to locale.
+ // 11. Let dataLocale be r.[[DataLocale]].
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ relative_time_format_holder->set_locale(*locale_str);
+
+ // 12. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- std::unique_ptr<char[]> style_str = nullptr;
- std::vector<const char*> style_values = {"long", "short", "narrow"};
- Maybe<bool> maybe_found_style =
- Intl::GetStringOption(isolate, options, "style", style_values,
- "Intl.RelativeTimeFormat", &style_str);
- Style style_enum = Style::LONG;
- MAYBE_RETURN(maybe_found_style, MaybeHandle<JSRelativeTimeFormat>());
- if (maybe_found_style.FromJust()) {
- DCHECK_NOT_NULL(style_str.get());
- style_enum = getStyle(style_str.get());
- }
-
- // 15. Set relativeTimeFormat.[[Style]] to s.
+ Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ isolate, options, "style", "Intl.RelativeTimeFormat",
+ {"long", "short", "narrow"}, {Style::LONG, Style::SHORT, Style::NARROW},
+ Style::LONG);
+ MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
+ Style style_enum = maybe_style.FromJust();
+
+ // 13. Set relativeTimeFormat.[[Style]] to s.
relative_time_format_holder->set_style(style_enum);
- // 16. Let numeric be ? GetOption(options, "numeric", "string",
+ // 14. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
- std::unique_ptr<char[]> numeric_str = nullptr;
- std::vector<const char*> numeric_values = {"always", "auto"};
- Maybe<bool> maybe_found_numeric =
- Intl::GetStringOption(isolate, options, "numeric", numeric_values,
- "Intl.RelativeTimeFormat", &numeric_str);
- Numeric numeric_enum = Numeric::ALWAYS;
- MAYBE_RETURN(maybe_found_numeric, MaybeHandle<JSRelativeTimeFormat>());
- if (maybe_found_numeric.FromJust()) {
- DCHECK_NOT_NULL(numeric_str.get());
- numeric_enum = getNumeric(numeric_str.get());
- }
+ Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
+ isolate, options, "numeric", "Intl.RelativeTimeFormat",
+ {"always", "auto"}, {Numeric::ALWAYS, Numeric::AUTO}, Numeric::ALWAYS);
+ MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
+ Numeric numeric_enum = maybe_numeric.FromJust();
- // 17. Set relativeTimeFormat.[[Numeric]] to numeric.
+ // 15. Set relativeTimeFormat.[[Numeric]] to numeric.
relative_time_format_holder->set_numeric(numeric_enum);
- std::unique_ptr<char[]> locale_name = locale->ToCString();
- icu::Locale icu_locale(locale_name.get());
+ icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
- // 25. Let relativeTimeFormat.[[NumberFormat]] be
+ // 19. Let relativeTimeFormat.[[NumberFormat]] be
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status);
@@ -159,9 +160,10 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
icu_formatter);
- // 30. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
+ // 21. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
relative_time_format_holder->set_icu_formatter(*managed_formatter);
- // 31. Return relativeTimeFormat.
+
+ // 22. Return relativeTimeFormat.
return relative_time_format_holder;
}
@@ -176,6 +178,12 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
format_holder->StyleAsString(), NONE);
JSObject::AddProperty(isolate, result, factory->numeric_string(),
format_holder->NumericAsString(), NONE);
+ std::string locale_str(format_holder->locale()->ToCString().get());
+ icu::Locale icu_locale = Intl::CreateICULocale(locale_str);
+ std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
+ JSObject::AddProperty(
+ isolate, result, factory->numberingSystem_string(),
+ factory->NewStringFromAsciiChecked(numbering_system.c_str()), NONE);
return result;
}
@@ -231,7 +239,8 @@ Handle<String> UnitAsString(Isolate* isolate, URelativeDateTimeUnit unit_enum) {
MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
Isolate* isolate, const icu::UnicodeString& formatted,
- const icu::UnicodeString& integer_part, URelativeDateTimeUnit unit_enum) {
+ const icu::UnicodeString& integer_part, URelativeDateTimeUnit unit_enum,
+ double number, const icu::NumberFormat& nf) {
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(0);
int32_t found = formatted.indexOf(integer_part);
@@ -262,18 +271,12 @@ MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
substring);
}
- // array.push({
- // 'type': 'integer',
- // 'value': formatted.substring(found, found + integer_part.length),
- // 'unit': unit})
- ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
- Intl::ToString(isolate, formatted, found,
- found + integer_part.length()),
- JSArray);
Handle<String> unit = UnitAsString(isolate, unit_enum);
- Intl::AddElement(isolate, array, index++,
- factory->integer_string(), // field_type_string
- substring, factory->unit_string(), unit);
+
+ Maybe<int> maybe_format_to_parts =
+ JSNumberFormat::FormatToParts(isolate, array, index, nf, number, unit);
+ MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
+ index = maybe_format_to_parts.FromJust();
// array.push({
// 'type': 'literal',
@@ -370,38 +373,33 @@ MaybeHandle<Object> JSRelativeTimeFormat::Format(
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString formatted;
-#if USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
- if (unit_enum != UDAT_REL_UNIT_QUARTER) { // ICU did not implement
- // UDAT_REL_UNIT_QUARTER < 63
-#endif // USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
- if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) {
- formatter->formatNumeric(number, unit_enum, formatted, status);
- } else {
- DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric());
- formatter->format(number, unit_enum, formatted, status);
- }
-#if USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
+ if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) {
+ formatter->formatNumeric(number, unit_enum, formatted, status);
+ } else {
+ DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric());
+ formatter->format(number, unit_enum, formatted, status);
}
-#endif // USE_CHROMIUM_ICU != 1 && U_ICU_VERSION_MAJOR_NUM < 63
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
}
if (to_parts) {
- icu::UnicodeString integer;
+ icu::UnicodeString number_str;
icu::FieldPosition pos;
- formatter->getNumberFormat().format(std::abs(number), integer, pos, status);
+ double abs_number = std::abs(number);
+ formatter->getNumberFormat().format(abs_number, number_str, pos, status);
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError),
Object);
}
Handle<JSArray> elements;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, elements,
- GenerateRelativeTimeFormatParts(isolate, formatted, integer, unit_enum),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, elements,
+ GenerateRelativeTimeFormatParts(
+ isolate, formatted, number_str, unit_enum,
+ abs_number, formatter->getNumberFormat()),
+ Object);
return elements;
}
@@ -410,5 +408,12 @@ MaybeHandle<Object> JSRelativeTimeFormat::Format(
formatted.length()));
}
+std::set<std::string> JSRelativeTimeFormat::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::DateFormat::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index eaaeb0e05f..c90e24118b 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_H_
#define V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
@@ -47,6 +50,8 @@ class JSRelativeTimeFormat : public JSObject {
Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
bool to_parts);
+ static std::set<std::string> GetAvailableLocales();
+
DECL_CAST(JSRelativeTimeFormat)
// RelativeTimeFormat accessors.
@@ -101,17 +106,23 @@ class JSRelativeTimeFormat : public JSObject {
DECL_VERIFIER(JSRelativeTimeFormat)
// Layout description.
- static const int kJSRelativeTimeFormatOffset = JSObject::kHeaderSize;
- static const int kLocaleOffset = kJSRelativeTimeFormatOffset + kPointerSize;
- static const int kICUFormatterOffset = kLocaleOffset + kPointerSize;
- static const int kFlagsOffset = kICUFormatterOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
+#define JS_RELATIVE_TIME_FORMAT_FIELDS(V) \
+ V(kJSRelativeTimeFormatOffset, kTaggedSize) \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kICUFormatterOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_RELATIVE_TIME_FORMAT_FIELDS)
+#undef JS_RELATIVE_TIME_FORMAT_FIELDS
private:
static Style getStyle(const char* str);
static Numeric getNumeric(const char* str);
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSRelativeTimeFormat);
+ OBJECT_CONSTRUCTORS(JSRelativeTimeFormat, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
new file mode 100644
index 0000000000..1ee6087d1e
--- /dev/null
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -0,0 +1,53 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
+#define V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-segment-iterator.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator, JSObject)
+
+// Base segment iterator accessors.
+ACCESSORS(JSSegmentIterator, icu_break_iterator, Managed<icu::BreakIterator>,
+ kICUBreakIteratorOffset)
+ACCESSORS(JSSegmentIterator, unicode_string, Managed<icu::UnicodeString>,
+ kUnicodeStringOffset)
+
+BIT_FIELD_ACCESSORS(JSSegmentIterator, flags, is_break_type_set,
+ JSSegmentIterator::BreakTypeSetBits)
+
+SMI_ACCESSORS(JSSegmentIterator, flags, kFlagsOffset)
+
+CAST_ACCESSOR(JSSegmentIterator);
+
+inline void JSSegmentIterator::set_granularity(
+ JSSegmenter::Granularity granularity) {
+ DCHECK_GT(JSSegmenter::Granularity::COUNT, granularity);
+ int hints = flags();
+ hints = GranularityBits::update(hints, granularity);
+ set_flags(hints);
+}
+
+inline JSSegmenter::Granularity JSSegmentIterator::granularity() const {
+ return GranularityBits::decode(flags());
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
new file mode 100644
index 0000000000..74b0330719
--- /dev/null
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -0,0 +1,290 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-segment-iterator.h"
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-segment-iterator-inl.h"
+#include "src/objects/managed.h"
+#include "unicode/brkiter.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<String> JSSegmentIterator::GetSegment(Isolate* isolate,
+ int32_t start,
+ int32_t end) const {
+ return Intl::ToString(isolate, *(unicode_string()->raw()), start, end);
+}
+
+Handle<String> JSSegmentIterator::GranularityAsString() const {
+ switch (granularity()) {
+ case JSSegmenter::Granularity::GRAPHEME:
+ return GetReadOnlyRoots().grapheme_string_handle();
+ case JSSegmenter::Granularity::WORD:
+ return GetReadOnlyRoots().word_string_handle();
+ case JSSegmenter::Granularity::SENTENCE:
+ return GetReadOnlyRoots().sentence_string_handle();
+ case JSSegmenter::Granularity::COUNT:
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<JSSegmentIterator> JSSegmentIterator::Create(
+ Isolate* isolate, icu::BreakIterator* break_iterator,
+ JSSegmenter::Granularity granularity, Handle<String> text) {
+ CHECK_NOT_NULL(break_iterator);
+ // 1. Let iterator be ObjectCreate(%SegmentIteratorPrototype%).
+ Handle<Map> map = Handle<Map>(
+ isolate->native_context()->intl_segment_iterator_map(), isolate);
+ Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(map);
+
+ Handle<JSSegmentIterator> segment_iterator =
+ Handle<JSSegmentIterator>::cast(result);
+
+ segment_iterator->set_flags(0);
+ segment_iterator->set_granularity(granularity);
+ // 2. Let iterator.[[SegmentIteratorSegmenter]] be segmenter.
+ Handle<Managed<icu::BreakIterator>> managed_break_iterator =
+ Managed<icu::BreakIterator>::FromRawPtr(isolate, 0, break_iterator);
+ segment_iterator->set_icu_break_iterator(*managed_break_iterator);
+
+ // 3. Let iterator.[[SegmentIteratorString]] be string.
+ Managed<icu::UnicodeString> unicode_string =
+ Intl::SetTextToBreakIterator(isolate, text, break_iterator);
+ segment_iterator->set_unicode_string(unicode_string);
+
+ // 4. Let iterator.[[SegmentIteratorIndex]] be 0.
+ // step 4 is stored inside break_iterator.
+
+ // 5. Let iterator.[[SegmentIteratorBreakType]] be undefined.
+ segment_iterator->set_is_break_type_set(false);
+
+ return segment_iterator;
+}
+
+// ecma402 #sec-segment-iterator-prototype-breakType
+Handle<Object> JSSegmentIterator::BreakType() const {
+ if (!is_break_type_set()) {
+ return GetReadOnlyRoots().undefined_value_handle();
+ }
+ icu::BreakIterator* break_iterator = icu_break_iterator()->raw();
+ int32_t rule_status = break_iterator->getRuleStatus();
+ switch (granularity()) {
+ case JSSegmenter::Granularity::GRAPHEME:
+ return GetReadOnlyRoots().undefined_value_handle();
+ case JSSegmenter::Granularity::WORD:
+ if (rule_status >= UBRK_WORD_NONE && rule_status < UBRK_WORD_NONE_LIMIT) {
+ // "words" that do not fit into any of other categories. Includes spaces
+ // and most punctuation.
+ return GetReadOnlyRoots().none_string_handle();
+ }
+ if ((rule_status >= UBRK_WORD_NUMBER &&
+ rule_status < UBRK_WORD_NUMBER_LIMIT) ||
+ (rule_status >= UBRK_WORD_LETTER &&
+ rule_status < UBRK_WORD_LETTER_LIMIT) ||
+ (rule_status >= UBRK_WORD_KANA &&
+ rule_status < UBRK_WORD_KANA_LIMIT) ||
+ (rule_status >= UBRK_WORD_IDEO &&
+ rule_status < UBRK_WORD_IDEO_LIMIT)) {
+ // words that appear to be numbers, letters, kana characters,
+ // ideographic characters, etc
+ return GetReadOnlyRoots().word_string_handle();
+ }
+ return GetReadOnlyRoots().undefined_value_handle();
+ case JSSegmenter::Granularity::SENTENCE:
+ if (rule_status >= UBRK_SENTENCE_TERM &&
+ rule_status < UBRK_SENTENCE_TERM_LIMIT) {
+ // sentences ending with a sentence terminator ('.', '?', '!', etc.)
+ // character, possibly followed by a hard separator (CR, LF, PS, etc.)
+ return GetReadOnlyRoots().term_string_handle();
+ }
+ if ((rule_status >= UBRK_SENTENCE_SEP &&
+ rule_status < UBRK_SENTENCE_SEP_LIMIT)) {
+ // sentences that do not contain an ending sentence terminator ('.',
+ // '?', '!', etc.) character, but are ended only by a hard separator
+ // (CR, LF, PS, etc.) hard, or mandatory line breaks
+ return GetReadOnlyRoots().sep_string_handle();
+ }
+ return GetReadOnlyRoots().undefined_value_handle();
+ case JSSegmenter::Granularity::COUNT:
+ UNREACHABLE();
+ }
+}
+
+// ecma402 #sec-segment-iterator-prototype-index
+Handle<Object> JSSegmentIterator::Index(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
+ icu::BreakIterator* icu_break_iterator =
+ segment_iterator->icu_break_iterator()->raw();
+ CHECK_NOT_NULL(icu_break_iterator);
+ return isolate->factory()->NewNumberFromInt(icu_break_iterator->current());
+}
+
+// ecma402 #sec-segment-iterator-prototype-next
+MaybeHandle<JSReceiver> JSSegmentIterator::Next(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
+ Factory* factory = isolate->factory();
+ icu::BreakIterator* icu_break_iterator =
+ segment_iterator->icu_break_iterator()->raw();
+ // 3. Let _previousIndex be iterator.[[SegmentIteratorIndex]].
+ int32_t prev = icu_break_iterator->current();
+ // 4. Let done be AdvanceSegmentIterator(iterator, forwards).
+ int32_t index = icu_break_iterator->next();
+ segment_iterator->set_is_break_type_set(true);
+ if (index == icu::BreakIterator::DONE) {
+ // 5. If done is true, return CreateIterResultObject(undefined, true).
+ return factory->NewJSIteratorResult(isolate->factory()->undefined_value(),
+ true);
+ }
+ // 6. Let newIndex be iterator.[[SegmentIteratorIndex]].
+ Handle<Object> new_index = factory->NewNumberFromInt(index);
+
+ // 8. Let segment be the substring of string from previousIndex to
+ // newIndex, inclusive of previousIndex and exclusive of newIndex.
+ Handle<String> segment;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, segment,
+ segment_iterator->GetSegment(isolate, prev, index),
+ JSReceiver);
+
+ // 9. Let breakType be iterator.[[SegmentIteratorBreakType]].
+ Handle<Object> break_type = segment_iterator->BreakType();
+
+ // 10. Let result be ! ObjectCreate(%ObjectPrototype%).
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+
+ // 11. Perform ! CreateDataProperty(result "segment", segment).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, result, factory->segment_string(), segment, kDontThrow)
+ .FromJust());
+
+ // 12. Perform ! CreateDataProperty(result, "breakType", breakType).
+ CHECK(JSReceiver::CreateDataProperty(isolate, result,
+ factory->breakType_string(), break_type,
+ kDontThrow)
+ .FromJust());
+
+ // 13. Perform ! CreateDataProperty(result, "index", newIndex).
+ CHECK(JSReceiver::CreateDataProperty(isolate, result, factory->index_string(),
+ new_index, kDontThrow)
+ .FromJust());
+
+ // 14. Return CreateIterResultObject(result, false).
+ return factory->NewJSIteratorResult(result, false);
+}
+
+// ecma402 #sec-segment-iterator-prototype-following
+Maybe<bool> JSSegmentIterator::Following(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator,
+ Handle<Object> from_obj) {
+ Factory* factory = isolate->factory();
+ icu::BreakIterator* icu_break_iterator =
+ segment_iterator->icu_break_iterator()->raw();
+ // 3. If from is not undefined,
+ if (!from_obj->IsUndefined()) {
+ // a. Let from be ? ToIndex(from).
+ uint32_t from;
+ Handle<Object> index;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, index,
+ Object::ToIndex(isolate, from_obj, MessageTemplate::kInvalidIndex),
+ Nothing<bool>());
+ if (!index->ToArrayIndex(&from)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
+ factory->NewStringFromStaticChars("from"),
+ factory->NewStringFromStaticChars("following"), index),
+ Nothing<bool>());
+ }
+ // b. Let length be the length of iterator.[[SegmentIteratorString]].
+ uint32_t length =
+ static_cast<uint32_t>(icu_break_iterator->getText().getLength());
+
+ // c. If from ≥ length, throw a RangeError exception.
+ if (from >= length) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
+ factory->NewStringFromStaticChars("from"),
+ factory->NewStringFromStaticChars("following"),
+ from_obj),
+ Nothing<bool>());
+ }
+
+ // d. Let iterator.[[SegmentIteratorPosition]] be from.
+ segment_iterator->set_is_break_type_set(true);
+ icu_break_iterator->following(from);
+ return Just(false);
+ }
+ // 4. return AdvanceSegmentIterator(iterator, forward).
+ // 4. .... or if direction is backwards and position is 0, return true.
+ // 4. If direction is forwards and position is the length of string ... return
+ // true.
+ segment_iterator->set_is_break_type_set(true);
+ return Just(icu_break_iterator->next() == icu::BreakIterator::DONE);
+}
+
+// ecma402 #sec-segment-iterator-prototype-preceding
+Maybe<bool> JSSegmentIterator::Preceding(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator,
+ Handle<Object> from_obj) {
+ Factory* factory = isolate->factory();
+ icu::BreakIterator* icu_break_iterator =
+ segment_iterator->icu_break_iterator()->raw();
+ // 3. If from is not undefined,
+ if (!from_obj->IsUndefined()) {
+ // a. Let from be ? ToIndex(from).
+ uint32_t from;
+ Handle<Object> index;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, index,
+ Object::ToIndex(isolate, from_obj, MessageTemplate::kInvalidIndex),
+ Nothing<bool>());
+
+ if (!index->ToArrayIndex(&from)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
+ factory->NewStringFromStaticChars("from"),
+ factory->NewStringFromStaticChars("preceding"), index),
+ Nothing<bool>());
+ }
+ // b. Let length be the length of iterator.[[SegmentIteratorString]].
+ uint32_t length =
+ static_cast<uint32_t>(icu_break_iterator->getText().getLength());
+ // c. If from > length or from = 0, throw a RangeError exception.
+ if (from > length || from == 0) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kParameterOfFunctionOutOfRange,
+ factory->NewStringFromStaticChars("from"),
+ factory->NewStringFromStaticChars("preceding"),
+ from_obj),
+ Nothing<bool>());
+ }
+ // d. Let iterator.[[SegmentIteratorIndex]] be from.
+ segment_iterator->set_is_break_type_set(true);
+ icu_break_iterator->preceding(from);
+ return Just(false);
+ }
+ // 4. return AdvanceSegmentIterator(iterator, backwards).
+ // 4. .... or if direction is backwards and position is 0, return true.
+ segment_iterator->set_is_break_type_set(true);
+ return Just(icu_break_iterator->previous() == icu::BreakIterator::DONE);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
new file mode 100644
index 0000000000..0535704a68
--- /dev/null
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -0,0 +1,112 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
+#define V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects/js-segmenter.h"
+#include "src/objects/managed.h"
+#include "unicode/uversion.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class UnicodeString;
+} // namespace U_ICU_NAMESPACE
+
+namespace v8 {
+namespace internal {
+
+class JSSegmentIterator : public JSObject {
+ public:
+ // ecma402 #sec-CreateSegmentIterator
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmentIterator> Create(
+ Isolate* isolate, icu::BreakIterator* icu_break_iterator,
+ JSSegmenter::Granularity granularity, Handle<String> string);
+
+ // ecma402 #sec-segment-iterator-prototype-next
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> Next(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator_holder);
+
+ // ecma402 #sec-segment-iterator-prototype-following
+ static Maybe<bool> Following(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator_holder,
+ Handle<Object> from);
+
+ // ecma402 #sec-segment-iterator-prototype-preceding
+ static Maybe<bool> Preceding(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator_holder,
+ Handle<Object> from);
+
+ // ecma402 #sec-segment-iterator-prototype-index
+ static Handle<Object> Index(
+ Isolate* isolate, Handle<JSSegmentIterator> segment_iterator_holder);
+
+ Handle<String> GranularityAsString() const;
+
+ DECL_BOOLEAN_ACCESSORS(is_break_type_set)
+
+ // ecma402 #sec-segment-iterator-prototype-breakType
+ Handle<Object> BreakType() const;
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> GetSegment(Isolate* isolate,
+ int32_t start,
+ int32_t end) const;
+
+ DECL_CAST(JSSegmentIterator)
+
+ // SegmentIterator accessors.
+ DECL_ACCESSORS(icu_break_iterator, Managed<icu::BreakIterator>)
+ DECL_ACCESSORS(unicode_string, Managed<icu::UnicodeString>)
+
+ DECL_PRINTER(JSSegmentIterator)
+ DECL_VERIFIER(JSSegmentIterator)
+
+ inline void set_granularity(JSSegmenter::Granularity granularity);
+ inline JSSegmenter::Granularity granularity() const;
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(GranularityBits, JSSegmenter::Granularity, 2, _) \
+ V(BreakTypeSetBits, bool, 1, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(JSSegmenter::Granularity::GRAPHEME <= GranularityBits::kMax);
+ STATIC_ASSERT(JSSegmenter::Granularity::WORD <= GranularityBits::kMax);
+ STATIC_ASSERT(JSSegmenter::Granularity::SENTENCE <= GranularityBits::kMax);
+
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
+// Layout description.
+#define SEGMENTER_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kICUBreakIteratorOffset, kTaggedSize) \
+ V(kUnicodeStringOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Total Size */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, SEGMENTER_FIELDS)
+#undef SEGMENTER_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 1aac2b1d63..327af7f485 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -18,23 +18,14 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(JSSegmenter, JSObject)
+
// Base segmenter accessors.
ACCESSORS(JSSegmenter, locale, String, kLocaleOffset)
ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
kICUBreakIteratorOffset)
SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
-inline void JSSegmenter::set_line_break_style(LineBreakStyle line_break_style) {
- DCHECK_GT(LineBreakStyle::COUNT, line_break_style);
- int hints = flags();
- hints = LineBreakStyleBits::update(hints, line_break_style);
- set_flags(hints);
-}
-
-inline JSSegmenter::LineBreakStyle JSSegmenter::line_break_style() const {
- return LineBreakStyleBits::decode(flags());
-}
-
inline void JSSegmenter::set_granularity(Granularity granularity) {
DCHECK_GT(Granularity::COUNT, granularity);
int hints = flags();
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 62d9bd508a..aba1c7bdf2 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -23,108 +23,72 @@
namespace v8 {
namespace internal {
-JSSegmenter::LineBreakStyle JSSegmenter::GetLineBreakStyle(const char* str) {
- if (strcmp(str, "strict") == 0) return JSSegmenter::LineBreakStyle::STRICT;
- if (strcmp(str, "normal") == 0) return JSSegmenter::LineBreakStyle::NORMAL;
- if (strcmp(str, "loose") == 0) return JSSegmenter::LineBreakStyle::LOOSE;
- UNREACHABLE();
-}
-
JSSegmenter::Granularity JSSegmenter::GetGranularity(const char* str) {
if (strcmp(str, "grapheme") == 0) return JSSegmenter::Granularity::GRAPHEME;
if (strcmp(str, "word") == 0) return JSSegmenter::Granularity::WORD;
if (strcmp(str, "sentence") == 0) return JSSegmenter::Granularity::SENTENCE;
- if (strcmp(str, "line") == 0) return JSSegmenter::Granularity::LINE;
UNREACHABLE();
}
MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
- Handle<Object> input_locales, Handle<Object> input_options) {
- Factory* factory = isolate->factory();
+ Handle<Object> locales, Handle<Object> input_options) {
segmenter_holder->set_flags(0);
+
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
- Handle<JSObject> requested_locales;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, requested_locales,
- Intl::CanonicalizeLocaleListJS(isolate, input_locales), JSSegmenter);
+ Maybe<std::vector<std::string>> maybe_requested_locales =
+ Intl::CanonicalizeLocaleList(isolate, locales);
+ MAYBE_RETURN(maybe_requested_locales, Handle<JSSegmenter>());
+ std::vector<std::string> requested_locales =
+ maybe_requested_locales.FromJust();
// 11. If options is undefined, then
Handle<JSReceiver> options;
if (input_options->IsUndefined(isolate)) {
- // a. Let options be ObjectCreate(null).
+ // 11. a. Let options be ObjectCreate(null).
options = isolate->factory()->NewJSObjectWithNullProto();
// 12. Else
} else {
- // a. Let options be ? ToObject(options).
+ // 23. a. Let options be ? ToObject(options).
ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
Object::ToObject(isolate, input_options),
JSSegmenter);
}
- // 8. Set opt.[[lb]] to lineBreakStyle.
+ // 4. Let opt be a new Record.
+ // 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ // 6. Set opt.[[localeMatcher]] to matcher.
+ Maybe<Intl::MatcherOption> maybe_locale_matcher =
+ Intl::GetLocaleMatcher(isolate, options, "Intl.Segmenter");
+ MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSSegmenter>());
+ Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
- // Because currently we access localeMatcher inside ResolveLocale, we have to
- // move ResolveLocale before get lineBreakStyle
// 9. Let r be ResolveLocale(%Segmenter%.[[AvailableLocales]],
// requestedLocales, opt, %Segmenter%.[[RelevantExtensionKeys]]).
- Handle<JSObject> r;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, r,
- Intl::ResolveLocale(isolate, "segmenter", requested_locales, options),
- JSSegmenter);
- Handle<Object> locale_obj =
- JSObject::GetDataProperty(r, factory->locale_string());
- Handle<String> locale;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, locale, Object::ToString(isolate, locale_obj), JSSegmenter);
-
- // 7. Let lineBreakStyle be ? GetOption(options, "lineBreakStyle", "string", «
- // "strict", "normal", "loose" », "normal").
- std::unique_ptr<char[]> line_break_style_str = nullptr;
- const std::vector<const char*> line_break_style_values = {"strict", "normal",
- "loose"};
- Maybe<bool> maybe_found_line_break_style = Intl::GetStringOption(
- isolate, options, "lineBreakStyle", line_break_style_values,
- "Intl.Segmenter", &line_break_style_str);
- LineBreakStyle line_break_style_enum = LineBreakStyle::NORMAL;
- MAYBE_RETURN(maybe_found_line_break_style, MaybeHandle<JSSegmenter>());
- if (maybe_found_line_break_style.FromJust()) {
- DCHECK_NOT_NULL(line_break_style_str.get());
- line_break_style_enum = GetLineBreakStyle(line_break_style_str.get());
- }
+ Intl::ResolvedLocale r =
+ Intl::ResolveLocale(isolate, JSSegmenter::GetAvailableLocales(),
+ requested_locales, matcher, {});
// 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
- segmenter_holder->set_locale(*locale);
+ Handle<String> locale_str =
+ isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ segmenter_holder->set_locale(*locale_str);
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
- // "grapheme", "word", "sentence", "line" », "grapheme").
-
- std::unique_ptr<char[]> granularity_str = nullptr;
- const std::vector<const char*> granularity_values = {"grapheme", "word",
- "sentence", "line"};
- Maybe<bool> maybe_found_granularity =
- Intl::GetStringOption(isolate, options, "granularity", granularity_values,
- "Intl.Segmenter", &granularity_str);
- Granularity granularity_enum = Granularity::GRAPHEME;
- MAYBE_RETURN(maybe_found_granularity, MaybeHandle<JSSegmenter>());
- if (maybe_found_granularity.FromJust()) {
- DCHECK_NOT_NULL(granularity_str.get());
- granularity_enum = GetGranularity(granularity_str.get());
- }
+ // "grapheme", "word", "sentence" », "grapheme").
+ Maybe<Granularity> maybe_granularity = Intl::GetStringOption<Granularity>(
+ isolate, options, "granularity", "Intl.Segmenter",
+ {"grapheme", "word", "sentence"},
+ {Granularity::GRAPHEME, Granularity::WORD, Granularity::SENTENCE},
+ Granularity::GRAPHEME);
+ MAYBE_RETURN(maybe_granularity, MaybeHandle<JSSegmenter>());
+ Granularity granularity_enum = maybe_granularity.FromJust();
// 14. Set segmenter.[[SegmenterGranularity]] to granularity.
segmenter_holder->set_granularity(granularity_enum);
- // 15. If granularity is "line",
- if (granularity_enum == Granularity::LINE) {
- // a. Set segmenter.[[SegmenterLineBreakStyle]] to r.[[lb]].
- segmenter_holder->set_line_break_style(line_break_style_enum);
- } else {
- segmenter_holder->set_line_break_style(LineBreakStyle::NOTSET);
- }
-
- icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
UErrorCode status = U_ZERO_ERROR;
@@ -143,13 +107,6 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
icu_break_iterator.reset(
icu::BreakIterator::createSentenceInstance(icu_locale, status));
break;
- case Granularity::LINE:
- icu_break_iterator.reset(
- icu::BreakIterator::createLineInstance(icu_locale, status));
- // 15. If granularity is "line",
- // a. Set segmenter.[[SegmenterLineBreakStyle]] to r.[[lb]].
- // TBW
- break;
case Granularity::COUNT:
UNREACHABLE();
}
@@ -165,36 +122,33 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
return segmenter_holder;
}
+// ecma402 #sec-Intl.Segmenter.prototype.resolvedOptions
Handle<JSObject> JSSegmenter::ResolvedOptions(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder) {
Factory* factory = isolate->factory();
+ // 3. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ // 4. For each row of Table 1, except the header row, do
+ // a. Let p be the Property value of the current row.
+ // b. Let v be the value of pr's internal slot whose name is the Internal Slot
+ // value of the current row.
+ //
+ // c. If v is not undefined, then
+ // i. Perform ! CreateDataPropertyOrThrow(options, p, v).
+ // Table 1: Resolved Options of Segmenter Instances
+ // Internal Slot Property
+ // [[Locale]] "locale"
+ // [[SegmenterGranularity]] "granularity"
+
Handle<String> locale(segmenter_holder->locale(), isolate);
JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
NONE);
- if (segmenter_holder->line_break_style() != LineBreakStyle::NOTSET) {
- JSObject::AddProperty(isolate, result, factory->lineBreakStyle_string(),
- segmenter_holder->LineBreakStyleAsString(), NONE);
- }
JSObject::AddProperty(isolate, result, factory->granularity_string(),
segmenter_holder->GranularityAsString(), NONE);
+ // 5. Return options.
return result;
}
-Handle<String> JSSegmenter::LineBreakStyleAsString() const {
- switch (line_break_style()) {
- case LineBreakStyle::STRICT:
- return GetReadOnlyRoots().strict_string_handle();
- case LineBreakStyle::NORMAL:
- return GetReadOnlyRoots().normal_string_handle();
- case LineBreakStyle::LOOSE:
- return GetReadOnlyRoots().loose_string_handle();
- case LineBreakStyle::COUNT:
- case LineBreakStyle::NOTSET:
- UNREACHABLE();
- }
-}
-
Handle<String> JSSegmenter::GranularityAsString() const {
switch (granularity()) {
case Granularity::GRAPHEME:
@@ -203,12 +157,17 @@ Handle<String> JSSegmenter::GranularityAsString() const {
return GetReadOnlyRoots().word_string_handle();
case Granularity::SENTENCE:
return GetReadOnlyRoots().sentence_string_handle();
- case Granularity::LINE:
- return GetReadOnlyRoots().line_string_handle();
case Granularity::COUNT:
UNREACHABLE();
}
}
+std::set<std::string> JSSegmenter::GetAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::BreakIterator::getAvailableLocales(num_locales);
+ return Intl::BuildLocaleSet(icu_available_locales, num_locales);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 167d70c210..64de1d9be7 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -9,6 +9,9 @@
#ifndef V8_OBJECTS_JS_SEGMENTER_H_
#define V8_OBJECTS_JS_SEGMENTER_H_
+#include <set>
+#include <string>
+
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
@@ -36,7 +39,8 @@ class JSSegmenter : public JSObject {
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder);
- Handle<String> LineBreakStyleAsString() const;
+ static std::set<std::string> GetAvailableLocales();
+
Handle<String> GranularityAsString() const;
DECL_CAST(JSSegmenter)
@@ -46,21 +50,6 @@ class JSSegmenter : public JSObject {
DECL_ACCESSORS(icu_break_iterator, Managed<icu::BreakIterator>)
- // LineBreakStyle: identifying the style used for line break.
- //
- // ecma402 #sec-segmenter-internal-slots
-
- enum class LineBreakStyle {
- NOTSET, // While the granularity is not LINE
- STRICT, // CSS level 3 line-break=strict, e.g. treat CJ as NS
- NORMAL, // CSS level 3 line-break=normal, e.g. treat CJ as ID, break before
- // hyphens for ja,zh
- LOOSE, // CSS level 3 line-break=loose
- COUNT
- };
- inline void set_line_break_style(LineBreakStyle line_break_style);
- inline LineBreakStyle line_break_style() const;
-
// Granularity: identifying the segmenter used.
//
// ecma402 #sec-segmenter-internal-slots
@@ -68,27 +57,19 @@ class JSSegmenter : public JSObject {
GRAPHEME, // for character-breaks
WORD, // for word-breaks
SENTENCE, // for sentence-breaks
- LINE, // for line-breaks
COUNT
};
inline void set_granularity(Granularity granularity);
inline Granularity granularity() const;
// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(LineBreakStyleBits, LineBreakStyle, 3, _) \
- V(GranularityBits, Granularity, 3, _)
+#define FLAGS_BIT_FIELDS(V, _) V(GranularityBits, Granularity, 2, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
- STATIC_ASSERT(LineBreakStyle::NOTSET <= LineBreakStyleBits::kMax);
- STATIC_ASSERT(LineBreakStyle::STRICT <= LineBreakStyleBits::kMax);
- STATIC_ASSERT(LineBreakStyle::NORMAL <= LineBreakStyleBits::kMax);
- STATIC_ASSERT(LineBreakStyle::LOOSE <= LineBreakStyleBits::kMax);
STATIC_ASSERT(Granularity::GRAPHEME <= GranularityBits::kMax);
STATIC_ASSERT(Granularity::WORD <= GranularityBits::kMax);
STATIC_ASSERT(Granularity::SENTENCE <= GranularityBits::kMax);
- STATIC_ASSERT(Granularity::LINE <= GranularityBits::kMax);
// [flags] Bit field containing various flags about the function.
DECL_INT_ACCESSORS(flags)
@@ -97,17 +78,21 @@ class JSSegmenter : public JSObject {
DECL_VERIFIER(JSSegmenter)
// Layout description.
- static const int kJSSegmenterOffset = JSObject::kHeaderSize;
- static const int kLocaleOffset = kJSSegmenterOffset + kPointerSize;
- static const int kICUBreakIteratorOffset = kLocaleOffset + kPointerSize;
- static const int kFlagsOffset = kICUBreakIteratorOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
+#define JS_SEGMENTER_FIELDS(V) \
+ V(kJSSegmenterOffset, kTaggedSize) \
+ V(kLocaleOffset, kTaggedSize) \
+ V(kICUBreakIteratorOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_SEGMENTER_FIELDS)
+#undef JS_SEGMENTER_FIELDS
private:
- static LineBreakStyle GetLineBreakStyle(const char* str);
static Granularity GetGranularity(const char* str);
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSegmenter);
+ OBJECT_CONSTRUCTORS(JSSegmenter, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
new file mode 100644
index 0000000000..12006f2927
--- /dev/null
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -0,0 +1,178 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_WEAK_REFS_INL_H_
+#define V8_OBJECTS_JS_WEAK_REFS_INL_H_
+
+#include "src/objects/js-weak-refs.h"
+
+#include "src/api-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/smi-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(JSWeakCell, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakRef, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakFactory, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(JSWeakFactoryCleanupIterator, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WeakFactoryCleanupJobTask, Microtask)
+
+ACCESSORS(JSWeakFactory, native_context, NativeContext, kNativeContextOffset)
+ACCESSORS(JSWeakFactory, cleanup, Object, kCleanupOffset)
+ACCESSORS(JSWeakFactory, active_cells, Object, kActiveCellsOffset)
+ACCESSORS(JSWeakFactory, cleared_cells, Object, kClearedCellsOffset)
+SMI_ACCESSORS(JSWeakFactory, flags, kFlagsOffset)
+ACCESSORS(JSWeakFactory, next, Object, kNextOffset)
+CAST_ACCESSOR(JSWeakFactory)
+
+ACCESSORS(JSWeakCell, factory, Object, kFactoryOffset)
+ACCESSORS(JSWeakCell, target, Object, kTargetOffset)
+ACCESSORS(JSWeakCell, holdings, Object, kHoldingsOffset)
+ACCESSORS(JSWeakCell, next, Object, kNextOffset)
+ACCESSORS(JSWeakCell, prev, Object, kPrevOffset)
+CAST_ACCESSOR(JSWeakCell)
+
+CAST_ACCESSOR(JSWeakRef)
+ACCESSORS(JSWeakRef, target, Object, kTargetOffset)
+
+ACCESSORS(JSWeakFactoryCleanupIterator, factory, JSWeakFactory, kFactoryOffset)
+CAST_ACCESSOR(JSWeakFactoryCleanupIterator)
+
+ACCESSORS(WeakFactoryCleanupJobTask, factory, JSWeakFactory, kFactoryOffset)
+CAST_ACCESSOR(WeakFactoryCleanupJobTask)
+
+void JSWeakFactory::AddWeakCell(JSWeakCell weak_cell) {
+ weak_cell->set_factory(*this);
+ weak_cell->set_next(active_cells());
+ if (active_cells()->IsJSWeakCell()) {
+ JSWeakCell::cast(active_cells())->set_prev(weak_cell);
+ }
+ set_active_cells(weak_cell);
+}
+
+bool JSWeakFactory::NeedsCleanup() const {
+ return cleared_cells()->IsJSWeakCell();
+}
+
+bool JSWeakFactory::scheduled_for_cleanup() const {
+ return ScheduledForCleanupField::decode(flags());
+}
+
+void JSWeakFactory::set_scheduled_for_cleanup(bool scheduled_for_cleanup) {
+ set_flags(ScheduledForCleanupField::update(flags(), scheduled_for_cleanup));
+}
+
+JSWeakCell JSWeakFactory::PopClearedCell(Isolate* isolate) {
+ JSWeakCell weak_cell = JSWeakCell::cast(cleared_cells());
+ DCHECK(weak_cell->prev()->IsUndefined(isolate));
+ set_cleared_cells(weak_cell->next());
+ weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
+
+ if (cleared_cells()->IsJSWeakCell()) {
+ JSWeakCell cleared_cells_head = JSWeakCell::cast(cleared_cells());
+ DCHECK_EQ(cleared_cells_head->prev(), weak_cell);
+ cleared_cells_head->set_prev(ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ DCHECK(cleared_cells()->IsUndefined(isolate));
+ }
+ return weak_cell;
+}
+
+void JSWeakCell::Nullify(
+ Isolate* isolate,
+ std::function<void(HeapObject object, ObjectSlot slot, Object target)>
+ gc_notify_updated_slot) {
+ DCHECK(target()->IsJSReceiver());
+ set_target(ReadOnlyRoots(isolate).undefined_value());
+
+ JSWeakFactory weak_factory = JSWeakFactory::cast(factory());
+ // Remove from the JSWeakCell from the "active_cells" list of its
+ // JSWeakFactory and insert it into the "cleared" list.
+ if (prev()->IsJSWeakCell()) {
+ DCHECK_NE(weak_factory->active_cells(), *this);
+ JSWeakCell prev_cell = JSWeakCell::cast(prev());
+ prev_cell->set_next(next());
+ gc_notify_updated_slot(prev_cell,
+ prev_cell.RawField(JSWeakCell::kNextOffset), next());
+ } else {
+ DCHECK_EQ(weak_factory->active_cells(), *this);
+ weak_factory->set_active_cells(next());
+ gc_notify_updated_slot(
+ weak_factory, weak_factory.RawField(JSWeakFactory::kActiveCellsOffset),
+ next());
+ }
+ if (next()->IsJSWeakCell()) {
+ JSWeakCell next_cell = JSWeakCell::cast(next());
+ next_cell->set_prev(prev());
+ gc_notify_updated_slot(next_cell,
+ next_cell.RawField(JSWeakCell::kPrevOffset), prev());
+ }
+
+ set_prev(ReadOnlyRoots(isolate).undefined_value());
+ Object cleared_head = weak_factory->cleared_cells();
+ if (cleared_head->IsJSWeakCell()) {
+ JSWeakCell cleared_head_cell = JSWeakCell::cast(cleared_head);
+ cleared_head_cell->set_prev(*this);
+ gc_notify_updated_slot(cleared_head_cell,
+ cleared_head_cell.RawField(JSWeakCell::kPrevOffset),
+ *this);
+ }
+ set_next(weak_factory->cleared_cells());
+ gc_notify_updated_slot(*this, RawField(JSWeakCell::kNextOffset), next());
+ weak_factory->set_cleared_cells(*this);
+ gc_notify_updated_slot(
+ weak_factory, weak_factory.RawField(JSWeakFactory::kClearedCellsOffset),
+ *this);
+}
+
+void JSWeakCell::Clear(Isolate* isolate) {
+ // Unlink the JSWeakCell from the list it's in (if any). The JSWeakCell can be
+ // in its JSWeakFactory's active_cells list, cleared_cells list or neither (if
+ // it has been already taken out).
+
+ DCHECK(target()->IsUndefined() || target()->IsJSReceiver());
+ set_target(ReadOnlyRoots(isolate).undefined_value());
+
+ if (factory()->IsJSWeakFactory()) {
+ JSWeakFactory weak_factory = JSWeakFactory::cast(factory());
+ if (weak_factory->active_cells() == *this) {
+ DCHECK(!prev()->IsJSWeakCell());
+ weak_factory->set_active_cells(next());
+ } else if (weak_factory->cleared_cells() == *this) {
+ DCHECK(!prev()->IsJSWeakCell());
+ weak_factory->set_cleared_cells(next());
+ } else if (prev()->IsJSWeakCell()) {
+ JSWeakCell prev_cell = JSWeakCell::cast(prev());
+ prev_cell->set_next(next());
+ }
+ if (next()->IsJSWeakCell()) {
+ JSWeakCell next_cell = JSWeakCell::cast(next());
+ next_cell->set_prev(prev());
+ }
+ set_prev(ReadOnlyRoots(isolate).undefined_value());
+ set_next(ReadOnlyRoots(isolate).undefined_value());
+
+ set_holdings(ReadOnlyRoots(isolate).undefined_value());
+ set_factory(ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ // Already cleared.
+ DCHECK(next()->IsUndefined(isolate));
+ DCHECK(prev()->IsUndefined(isolate));
+ DCHECK(holdings()->IsUndefined(isolate));
+ DCHECK(factory()->IsUndefined(isolate));
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_WEAK_REFS_INL_H_
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
new file mode 100644
index 0000000000..5938c27b2f
--- /dev/null
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -0,0 +1,182 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_WEAK_REFS_H_
+#define V8_OBJECTS_JS_WEAK_REFS_H_
+
+#include "src/objects/js-objects.h"
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSWeakCell;
+class NativeContext;
+
+// WeakFactory object from the JS Weak Refs spec proposal:
+// https://github.com/tc39/proposal-weakrefs
+class JSWeakFactory : public JSObject {
+ public:
+ DECL_PRINTER(JSWeakFactory)
+ DECL_VERIFIER(JSWeakFactory)
+ DECL_CAST(JSWeakFactory)
+
+ DECL_ACCESSORS(native_context, NativeContext)
+ DECL_ACCESSORS(cleanup, Object)
+ DECL_ACCESSORS(active_cells, Object)
+ DECL_ACCESSORS(cleared_cells, Object)
+
+ // For storing a list of JSWeakFactory objects in NativeContext.
+ DECL_ACCESSORS(next, Object)
+
+ DECL_INT_ACCESSORS(flags)
+
+ // Adds a newly constructed JSWeakCell object into this JSWeakFactory.
+ inline void AddWeakCell(JSWeakCell weak_cell);
+
+ // Returns true if the cleared_cells list is non-empty.
+ inline bool NeedsCleanup() const;
+
+ inline bool scheduled_for_cleanup() const;
+ inline void set_scheduled_for_cleanup(bool scheduled_for_cleanup);
+
+ // Get and remove the first cleared JSWeakCell from the cleared_cells
+ // list. (Assumes there is one.)
+ inline JSWeakCell PopClearedCell(Isolate* isolate);
+
+ // Constructs an iterator for the WeakCells in the cleared_cells list and
+ // calls the user's cleanup function.
+ static void Cleanup(Handle<JSWeakFactory> weak_factory, Isolate* isolate);
+
+// Layout description.
+#define JS_WEAK_FACTORY_FIELDS(V) \
+ V(kNativeContextOffset, kTaggedSize) \
+ V(kCleanupOffset, kTaggedSize) \
+ V(kActiveCellsOffset, kTaggedSize) \
+ V(kClearedCellsOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_FACTORY_FIELDS)
+#undef JS_WEAK_FACTORY_FIELDS
+
+ // Bitfields in flags.
+ class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
+
+ OBJECT_CONSTRUCTORS(JSWeakFactory, JSObject);
+};
+
+// WeakCell object from the JS Weak Refs spec proposal.
+class JSWeakCell : public JSObject {
+ public:
+ DECL_PRINTER(JSWeakCell)
+ DECL_VERIFIER(JSWeakCell)
+ DECL_CAST(JSWeakCell)
+
+ DECL_ACCESSORS(factory, Object)
+ DECL_ACCESSORS(target, Object)
+ DECL_ACCESSORS(holdings, Object)
+
+ // For storing doubly linked lists of JSWeakCells in JSWeakFactory.
+ DECL_ACCESSORS(prev, Object)
+ DECL_ACCESSORS(next, Object)
+
+// Layout description.
+#define JS_WEAK_CELL_FIELDS(V) \
+ V(kFactoryOffset, kTaggedSize) \
+ V(kTargetOffset, kTaggedSize) \
+ V(kHoldingsOffset, kTaggedSize) \
+ V(kPrevOffset, kTaggedSize) \
+ V(kNextOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_CELL_FIELDS)
+#undef JS_WEAK_CELL_FIELDS
+
+ class BodyDescriptor;
+
+ // Nullify is called during GC and it modifies the pointers in JSWeakCell and
+ // JSWeakFactory. Thus we need to tell the GC about the modified slots via the
+ // gc_notify_updated_slot function. The normal write barrier is not enough,
+ // since it's disabled before GC.
+ inline void Nullify(
+ Isolate* isolate,
+ std::function<void(HeapObject object, ObjectSlot slot, Object target)>
+ gc_notify_updated_slot);
+
+ inline void Clear(Isolate* isolate);
+
+ OBJECT_CONSTRUCTORS(JSWeakCell, JSObject);
+};
+
+class JSWeakRef : public JSObject {
+ public:
+ DECL_PRINTER(JSWeakRef)
+ DECL_VERIFIER(JSWeakRef)
+ DECL_CAST(JSWeakRef)
+
+ DECL_ACCESSORS(target, Object)
+
+ static const int kTargetOffset = JSObject::kHeaderSize;
+ static const int kSize = kTargetOffset + kPointerSize;
+
+ class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(JSWeakRef, JSObject);
+};
+
+class WeakFactoryCleanupJobTask : public Microtask {
+ public:
+ DECL_ACCESSORS(factory, JSWeakFactory)
+
+ DECL_CAST(WeakFactoryCleanupJobTask)
+ DECL_VERIFIER(WeakFactoryCleanupJobTask)
+ DECL_PRINTER(WeakFactoryCleanupJobTask)
+
+// Layout description.
+#define WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS(V) \
+ V(kFactoryOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
+ WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS)
+#undef WEAK_FACTORY_CLEANUP_JOB_TASK_FIELDS
+
+ OBJECT_CONSTRUCTORS(WeakFactoryCleanupJobTask, Microtask)
+};
+
+class JSWeakFactoryCleanupIterator : public JSObject {
+ public:
+ DECL_PRINTER(JSWeakFactoryCleanupIterator)
+ DECL_VERIFIER(JSWeakFactoryCleanupIterator)
+ DECL_CAST(JSWeakFactoryCleanupIterator)
+
+ DECL_ACCESSORS(factory, JSWeakFactory)
+
+// Layout description.
+#define JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS(V) \
+ V(kFactoryOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS)
+#undef JS_WEAK_FACTORY_CLEANUP_ITERATOR_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSWeakFactoryCleanupIterator, JSObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_WEAK_REFS_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 4d247cfd29..dadbd0e363 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -5,18 +5,22 @@
#ifndef V8_OBJECTS_LITERAL_OBJECTS_INL_H_
#define V8_OBJECTS_LITERAL_OBJECTS_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/objects-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(ObjectBoilerplateDescription, FixedArray)
+
SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset));
+OBJECT_CONSTRUCTORS_IMPL(ClassBoilerplate, FixedArray)
CAST_ACCESSOR(ClassBoilerplate)
BIT_FIELD_ACCESSORS(ClassBoilerplate, flags, install_class_name_accessor,
@@ -46,6 +50,10 @@ ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex));
+OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription, Struct)
+
+CAST_ACCESSOR(ArrayBoilerplateDescription)
+
SMI_ACCESSORS(ArrayBoilerplateDescription, flags, kFlagsOffset);
ACCESSORS(ArrayBoilerplateDescription, constant_elements, FixedArrayBase,
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 29d7c42b5d..b868b8c7fc 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -11,23 +11,25 @@
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/objects/struct-inl.h"
namespace v8 {
namespace internal {
-Object* ObjectBoilerplateDescription::name(int index) const {
+Object ObjectBoilerplateDescription::name(int index) const {
// get() already checks for out of bounds access, but we do not want to allow
// access to the last element, if it is the number of properties.
DCHECK_NE(size(), index);
return get(2 * index + kDescriptionStartIndex);
}
-Object* ObjectBoilerplateDescription::value(int index) const {
+Object ObjectBoilerplateDescription::value(int index) const {
return get(2 * index + 1 + kDescriptionStartIndex);
}
-void ObjectBoilerplateDescription::set_key_value(int index, Object* key,
- Object* value) {
+void ObjectBoilerplateDescription::set_key_value(int index, Object key,
+ Object value) {
DCHECK_LT(index, size());
DCHECK_GE(index, 0);
set(2 * index + kDescriptionStartIndex, key);
@@ -109,8 +111,8 @@ void AddToDescriptorArrayTemplate(
} else {
DCHECK(value_kind == ClassBoilerplate::kGetter ||
value_kind == ClassBoilerplate::kSetter);
- Object* raw_accessor = descriptor_array_template->GetStrongValue(entry);
- AccessorPair* pair;
+ Object raw_accessor = descriptor_array_template->GetStrongValue(entry);
+ AccessorPair pair;
if (raw_accessor->IsAccessorPair()) {
pair = AccessorPair::cast(raw_accessor);
} else {
@@ -162,7 +164,7 @@ constexpr int ComputeEnumerationIndex(int value_index) {
ClassBoilerplate::kMinimumPrototypePropertiesCount);
}
-inline int GetExistingValueIndex(Object* value) {
+inline int GetExistingValueIndex(Object value) {
return value->IsSmi() ? Smi::ToInt(value) : -1;
}
@@ -170,7 +172,7 @@ template <typename Dictionary, typename Key>
void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
Key key, int key_index,
ClassBoilerplate::ValueKind value_kind,
- Object* value) {
+ Object value) {
int entry = dictionary->FindEntry(isolate, key);
if (entry == kNotFound) {
@@ -210,38 +212,45 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
} else {
// Entry found, update it.
int enum_order = dictionary->DetailsAt(entry).dictionary_index();
- Object* existing_value = dictionary->ValueAt(entry);
+ Object existing_value = dictionary->ValueAt(entry);
if (value_kind == ClassBoilerplate::kData) {
// Computed value is a normal method.
if (existing_value->IsAccessorPair()) {
- AccessorPair* current_pair = AccessorPair::cast(existing_value);
+ AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_getter_index =
GetExistingValueIndex(current_pair->getter());
int existing_setter_index =
GetExistingValueIndex(current_pair->setter());
+ // At least one of the accessors must already be defined.
+ DCHECK(existing_getter_index >= 0 || existing_setter_index >= 0);
if (existing_getter_index < key_index &&
existing_setter_index < key_index) {
- // Both getter and setter were defined before the computed method,
- // so overwrite both.
+ // Either both getter and setter were defined before the computed
+ // method or just one of them was defined before while the other one
+ // was not defined yet, so overwrite property to kData.
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
dictionary->DetailsAtPut(isolate, entry, details);
dictionary->ValueAtPut(entry, value);
} else {
+ // The data property was defined "between" accessors so the one that
+ // was overwritten has to be cleared.
if (existing_getter_index < key_index) {
- DCHECK_LT(existing_setter_index, key_index);
- // Getter was defined before the computed method and then it was
- // overwritten by the current computed method which in turn was
- // later overwritten by the setter method. So we clear the getter.
+ DCHECK_LT(key_index, existing_setter_index);
+ // Getter was defined and it was done before the computed method
+ // and then it was overwritten by the current computed method which
+ // in turn was later overwritten by the setter method. So we clear
+ // the getter.
current_pair->set_getter(*isolate->factory()->null_value());
} else if (existing_setter_index < key_index) {
- DCHECK_LT(existing_getter_index, key_index);
- // Setter was defined before the computed method and then it was
- // overwritten by the current computed method which in turn was
- // later overwritten by the getter method. So we clear the setter.
+ DCHECK_LT(key_index, existing_getter_index);
+ // Setter was defined and it was done before the computed method
+ // and then it was overwritten by the current computed method which
+ // in turn was later overwritten by the getter method. So we clear
+ // the setter.
current_pair->set_setter(*isolate->factory()->null_value());
}
}
@@ -260,7 +269,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
? ACCESSOR_GETTER
: ACCESSOR_SETTER;
if (existing_value->IsAccessorPair()) {
- AccessorPair* current_pair = AccessorPair::cast(existing_value);
+ AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_component_index =
GetExistingValueIndex(current_pair->get(component));
@@ -357,13 +366,13 @@ class ObjectDescriptor {
void AddNamedProperty(Isolate* isolate, Handle<Name> name,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
- Smi* value = Smi::FromInt(value_index);
+ Smi value = Smi::FromInt(value_index);
if (HasDictionaryProperties()) {
UpdateNextEnumerationIndex(value_index);
AddToDictionaryTemplate(isolate, properties_dictionary_template_, name,
value_index, value_kind, value);
} else {
- *temp_handle_.location() = value;
+ *temp_handle_.location() = value->ptr();
AddToDescriptorArrayTemplate(isolate, descriptor_array_template_, name,
value_kind, temp_handle_);
}
@@ -372,7 +381,7 @@ class ObjectDescriptor {
void AddIndexedProperty(Isolate* isolate, uint32_t element,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
- Smi* value = Smi::FromInt(value_index);
+ Smi value = Smi::FromInt(value_index);
AddToDictionaryTemplate(isolate, elements_dictionary_template_, element,
value_index, value_kind, value);
}
@@ -419,14 +428,14 @@ class ObjectDescriptor {
void ClassBoilerplate::AddToPropertiesTemplate(
Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
- int key_index, ClassBoilerplate::ValueKind value_kind, Object* value) {
+ int key_index, ClassBoilerplate::ValueKind value_kind, Object value) {
AddToDictionaryTemplate(isolate, dictionary, name, key_index, value_kind,
value);
}
void ClassBoilerplate::AddToElementsTemplate(
Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
- int key_index, ClassBoilerplate::ValueKind value_kind, Object* value) {
+ int key_index, ClassBoilerplate::ValueKind value_kind, Object value) {
AddToDictionaryTemplate(isolate, dictionary, key, key_index, value_kind,
value);
}
@@ -460,9 +469,6 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
// Initialize class object template.
//
static_desc.CreateTemplates(isolate, kMinimumClassPropertiesCount);
- Handle<DescriptorArray> class_function_descriptors(
- isolate->native_context()->class_function_map()->instance_descriptors(),
- isolate);
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
{
// Add length_accessor.
@@ -525,14 +531,12 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
case ClassLiteral::Property::SETTER:
value_kind = ClassBoilerplate::kSetter;
break;
- case ClassLiteral::Property::PUBLIC_FIELD:
+ case ClassLiteral::Property::FIELD:
+ DCHECK_IMPLIES(property->is_computed_name(), !property->is_private());
if (property->is_computed_name()) {
++dynamic_argument_index;
}
continue;
- case ClassLiteral::Property::PRIVATE_FIELD:
- DCHECK(!property->is_computed_name());
- continue;
}
ObjectDescriptor& desc =
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 43a176017d..178306162d 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_LITERAL_OBJECTS_H_
#define V8_OBJECTS_LITERAL_OBJECTS_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,10 +23,10 @@ class ClassLiteral;
// in the list.
class ObjectBoilerplateDescription : public FixedArray {
public:
- Object* name(int index) const;
- Object* value(int index) const;
+ Object name(int index) const;
+ Object value(int index) const;
- void set_key_value(int index, Object* key, Object* value);
+ void set_key_value(int index, Object key, Object value);
// The number of boilerplate properties.
int size() const;
@@ -48,6 +48,8 @@ class ObjectBoilerplateDescription : public FixedArray {
private:
bool has_number_of_properties() const;
+
+ OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray)
};
class ArrayBoilerplateDescription : public Struct {
@@ -67,8 +69,9 @@ class ArrayBoilerplateDescription : public Struct {
void BriefPrintDetails(std::ostream& os);
#define ARRAY_BOILERPLATE_DESCRIPTION_FIELDS(V) \
- V(kFlagsOffset, kPointerSize) \
- V(kConstantElementsOffset, kPointerSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kConstantElementsOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -77,7 +80,7 @@ class ArrayBoilerplateDescription : public Struct {
private:
DECL_INT_ACCESSORS(flags)
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayBoilerplateDescription);
+ OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription, Struct);
};
class ClassBoilerplate : public FixedArray {
@@ -126,12 +129,12 @@ class ClassBoilerplate : public FixedArray {
static void AddToPropertiesTemplate(Isolate* isolate,
Handle<NameDictionary> dictionary,
Handle<Name> name, int key_index,
- ValueKind value_kind, Object* value);
+ ValueKind value_kind, Object value);
static void AddToElementsTemplate(Isolate* isolate,
Handle<NumberDictionary> dictionary,
uint32_t key, int key_index,
- ValueKind value_kind, Object* value);
+ ValueKind value_kind, Object value);
static Handle<ClassBoilerplate> BuildClassBoilerplate(Isolate* isolate,
ClassLiteral* expr);
@@ -151,6 +154,8 @@ class ClassBoilerplate : public FixedArray {
private:
DECL_INT_ACCESSORS(flags)
+
+ OBJECT_CONSTRUCTORS(ClassBoilerplate, FixedArray)
};
} // namespace internal
diff --git a/deps/v8/src/objects/managed.cc b/deps/v8/src/objects/managed.cc
index 034a2085f9..8376ccb547 100644
--- a/deps/v8/src/objects/managed.cc
+++ b/deps/v8/src/objects/managed.cc
@@ -13,6 +13,8 @@ namespace {
void ManagedObjectFinalizerSecondPass(const v8::WeakCallbackInfo<void>& data) {
auto destructor =
reinterpret_cast<ManagedPtrDestructor*>(data.GetParameter());
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ isolate->UnregisterManagedPtrDestructor(destructor);
int64_t adjustment = 0 - static_cast<int64_t>(destructor->estimated_size_);
destructor->destructor_(destructor->shared_ptr_ptr_);
delete destructor;
@@ -26,8 +28,6 @@ void ManagedObjectFinalizer(const v8::WeakCallbackInfo<void>& data) {
auto destructor =
reinterpret_cast<ManagedPtrDestructor*>(data.GetParameter());
GlobalHandles::Destroy(destructor->global_handle_location_);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- isolate->UnregisterManagedPtrDestructor(destructor);
// We need to do the main work as a second pass callback because
// it can trigger garbage collection. The first pass callbacks
// are not allowed to invoke V8 API.
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index 4c3d67bbfc..9842ef7c0d 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -10,6 +10,7 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/objects/foreign.h"
namespace v8 {
namespace internal {
@@ -24,7 +25,7 @@ struct ManagedPtrDestructor {
ManagedPtrDestructor* next_ = nullptr;
void* shared_ptr_ptr_ = nullptr;
void (*destructor_)(void* shared_ptr) = nullptr;
- Object** global_handle_location_ = nullptr;
+ Address* global_handle_location_ = nullptr;
ManagedPtrDestructor(size_t estimated_size, void* shared_ptr_ptr,
void (*destructor)(void*))
@@ -47,16 +48,18 @@ void ManagedObjectFinalizer(const v8::WeakCallbackInfo<void>& data);
template <class CppType>
class Managed : public Foreign {
public:
+ Managed() : Foreign() {}
+ explicit Managed(Address ptr) : Foreign(ptr) {}
+ Managed* operator->() { return this; }
+
// Get a raw pointer to the C++ object.
V8_INLINE CppType* raw() { return GetSharedPtrPtr()->get(); }
// Get a copy of the shared pointer to the C++ object.
V8_INLINE std::shared_ptr<CppType> get() { return *GetSharedPtrPtr(); }
- static Managed<CppType>* cast(Object* obj) {
- SLOW_DCHECK(obj->IsForeign());
- return reinterpret_cast<Managed<CppType>*>(obj);
- }
+ static Managed cast(Object obj) { return Managed(obj->ptr()); }
+ static Managed unchecked_cast(Object obj) { return bit_cast<Managed>(obj); }
// Allocate a new {CppType} and wrap it in a {Managed<CppType>}.
template <typename... Args>
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 0ec4113d4d..338ac78ec4 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -6,31 +6,51 @@
#define V8_OBJECTS_MAP_INL_H_
#include "src/objects/map.h"
+
#include "src/field-type.h"
+#include "src/heap/heap-inl.h"
+#include "src/layout-descriptor-inl.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/prototype-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
#include "src/property.h"
#include "src/transitions.h"
-// For pulling in heap/incremental-marking.h which is needed by
-// ACCESSORS_CHECKED.
-#include "src/heap/heap-inl.h"
-
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
-ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset, FLAG_unbox_double_fields)
+DescriptorArray Map::instance_descriptors() const {
+ return DescriptorArray::cast(READ_FIELD(*this, kDescriptorsOffset));
+}
+
+DescriptorArray Map::synchronized_instance_descriptors() const {
+ return DescriptorArray::cast(ACQUIRE_READ_FIELD(*this, kDescriptorsOffset));
+}
+
+void Map::set_synchronized_instance_descriptors(DescriptorArray value,
+ WriteBarrierMode mode) {
+ RELEASE_WRITE_FIELD(*this, kDescriptorsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kDescriptorsOffset, value, mode);
+}
+
+// A freshly allocated layout descriptor can be set on an existing map.
+// We need to use release-store and acquire-load accessor pairs to ensure
+// that the concurrent marking thread observes initializing stores of the
+// layout descriptor.
+SYNCHRONIZED_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset,
+ FLAG_unbox_double_fields)
WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
// |bit_field| fields.
@@ -70,21 +90,21 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits)
-InterceptorInfo* Map::GetNamedInterceptor() {
+InterceptorInfo Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
- FunctionTemplateInfo* info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->named_property_handler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo();
+ return InterceptorInfo::cast(info->GetNamedPropertyHandler());
}
-InterceptorInfo* Map::GetIndexedInterceptor() {
+InterceptorInfo Map::GetIndexedInterceptor() {
DCHECK(has_indexed_interceptor());
- FunctionTemplateInfo* info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->indexed_property_handler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo();
+ return InterceptorInfo::cast(info->GetIndexedPropertyHandler());
}
bool Map::IsInplaceGeneralizableField(PropertyConstness constness,
Representation representation,
- FieldType* field_type) {
+ FieldType field_type) {
if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
(constness == PropertyConstness::kConst)) {
// VariableMode::kConst -> PropertyConstness::kMutable field generalization
@@ -139,10 +159,19 @@ bool Map::IsUnboxedDoubleField(FieldIndex index) const {
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
- int minimum = store_origin == StoreOrigin::kNamed ? 128 : 12;
- int limit = Max(minimum, GetInObjectProperties());
- int external = NumberOfFields() - GetInObjectProperties();
- return external > limit;
+ if (store_origin == StoreOrigin::kNamed) {
+ int limit = Max(kMaxFastProperties, GetInObjectProperties());
+ FieldCounts counts = GetFieldCounts();
+ // Only count mutable fields so that objects with large numbers of
+ // constant functions do not go to dictionary mode. That would be bad
+ // because such objects have often been used as modules.
+ int external = counts.mutable_count() - GetInObjectProperties();
+ return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
+ } else {
+ int limit = Max(kFastPropertiesSoftLimit, GetInObjectProperties());
+ int external = NumberOfFields() - GetInObjectProperties();
+ return external > limit;
+ }
}
PropertyDetails Map::GetLastDescriptorDetails() const {
@@ -177,14 +206,14 @@ void Map::SetEnumLength(int length) {
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
}
-FixedArrayBase* Map::GetInitialElements() const {
- FixedArrayBase* result = nullptr;
+FixedArrayBase Map::GetInitialElements() const {
+ FixedArrayBase result;
if (has_fast_elements() || has_fast_string_wrapper_elements()) {
result = GetReadOnlyRoots().empty_fixed_array();
} else if (has_fast_sloppy_arguments_elements()) {
result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
} else if (has_fixed_typed_array_elements()) {
- result = GetReadOnlyRoots().EmptyFixedTypedArrayForMap(this);
+ result = GetReadOnlyRoots().EmptyFixedTypedArrayForMap(*this);
} else if (has_dictionary_elements()) {
result = GetReadOnlyRoots().empty_slow_element_dictionary();
} else {
@@ -214,12 +243,12 @@ void Map::set_instance_size_in_words(int value) {
}
int Map::instance_size() const {
- return instance_size_in_words() << kPointerSizeLog2;
+ return instance_size_in_words() << kTaggedSizeLog2;
}
void Map::set_instance_size(int value) {
- CHECK_EQ(0, value & (kPointerSize - 1));
- value >>= kPointerSizeLog2;
+ CHECK(IsAligned(value, kTaggedSize));
+ value >>= kTaggedSizeLog2;
CHECK_LT(static_cast<unsigned>(value), 256);
set_instance_size_in_words(value);
}
@@ -263,7 +292,7 @@ void Map::SetConstructorFunctionIndex(int value) {
}
int Map::GetInObjectPropertyOffset(int index) const {
- return (GetInObjectPropertiesStartInWords() + index) * kPointerSize;
+ return (GetInObjectPropertiesStartInWords() + index) * kTaggedSize;
}
Handle<Map> Map::AddMissingTransitionsForTesting(
@@ -325,11 +354,11 @@ int Map::UsedInstanceSize() const {
// in the property array.
return instance_size();
}
- return words * kPointerSize;
+ return words * kTaggedSize;
}
void Map::SetInObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kTaggedSize);
if (!IsJSObjectMap()) {
CHECK_EQ(0, value);
set_used_or_unused_instance_size_in_words(0);
@@ -340,12 +369,12 @@ void Map::SetInObjectUnusedPropertyFields(int value) {
DCHECK_LE(value, GetInObjectProperties());
int used_inobject_properties = GetInObjectProperties() - value;
set_used_or_unused_instance_size_in_words(
- GetInObjectPropertyOffset(used_inobject_properties) / kPointerSize);
+ GetInObjectPropertyOffset(used_inobject_properties) / kTaggedSize);
DCHECK_EQ(value, UnusedPropertyFields());
}
void Map::SetOutOfObjectUnusedPropertyFields(int value) {
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kTaggedSize);
CHECK_LT(static_cast<unsigned>(value), JSObject::kFieldsAdded);
// For out of object properties "used_instance_size_in_words" byte encodes
// the slack in the property array.
@@ -353,13 +382,13 @@ void Map::SetOutOfObjectUnusedPropertyFields(int value) {
DCHECK_EQ(value, UnusedPropertyFields());
}
-void Map::CopyUnusedPropertyFields(Map* map) {
+void Map::CopyUnusedPropertyFields(Map map) {
set_used_or_unused_instance_size_in_words(
map->used_or_unused_instance_size_in_words());
DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
}
-void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map* map) {
+void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map) {
int value = map->used_or_unused_instance_size_in_words();
if (value >= JSValue::kFieldsAdded) {
// Unused in-object fields. Adjust the offset from the object’s start
@@ -372,7 +401,7 @@ void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map* map) {
void Map::AccountAddedPropertyField() {
// Update used instance size and unused property fields number.
- STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
+ STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kTaggedSize);
#ifdef DEBUG
int new_unused = UnusedPropertyFields() - 1;
if (new_unused < 0) new_unused += JSObject::kFieldsAdded;
@@ -524,41 +553,47 @@ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
bool Map::IsBooleanMap() const {
- return this == GetReadOnlyRoots().boolean_map();
+ return *this == GetReadOnlyRoots().boolean_map();
}
bool Map::IsNullOrUndefinedMap() const {
- return this == GetReadOnlyRoots().null_map() ||
- this == GetReadOnlyRoots().undefined_map();
+ return *this == GetReadOnlyRoots().null_map() ||
+ *this == GetReadOnlyRoots().undefined_map();
}
bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
+Object Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
-void Map::set_prototype(Object* value, WriteBarrierMode mode) {
+void Map::set_prototype(Object value, WriteBarrierMode mode) {
DCHECK(value->IsNull() || value->IsJSReceiver());
- WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, value, mode);
+ WRITE_FIELD(*this, kPrototypeOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kPrototypeOffset, value, mode);
}
-LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
+LayoutDescriptor Map::layout_descriptor_gc_safe() const {
DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ // The loaded value can be dereferenced on background thread to load the
+ // bitmap. We need acquire load in order to ensure that the bitmap
+ // initializing stores are also visible to the background thread.
+ Object layout_desc = ACQUIRE_READ_FIELD(*this, kLayoutDescriptorOffset);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
bool Map::HasFastPointerLayout() const {
DCHECK(FLAG_unbox_double_fields);
- Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
+ // The loaded value is used for SMI check only and is not dereferenced,
+ // so relaxed load is safe.
+ Object layout_desc = RELAXED_READ_FIELD(*this, kLayoutDescriptorOffset);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
-void Map::UpdateDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- set_instance_descriptors(descriptors);
+void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ LayoutDescriptor layout_desc,
+ int number_of_own_descriptors) {
+ SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
if (FLAG_unbox_double_fields) {
if (layout_descriptor()->IsSlowLayout()) {
set_layout_descriptor(layout_desc);
@@ -566,59 +601,63 @@ void Map::UpdateDescriptors(DescriptorArray* descriptors,
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK_EQ(Map::GetVisitorId(this), visitor_id());
+ CHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
- DCHECK(visitor_id() == Map::GetVisitorId(this));
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ DCHECK(visitor_id() == Map::GetVisitorId(*this));
#endif
}
}
-void Map::InitializeDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_desc) {
- int len = descriptors->number_of_descriptors();
- set_instance_descriptors(descriptors);
- SetNumberOfOwnDescriptors(len);
+void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ LayoutDescriptor layout_desc) {
+ SetInstanceDescriptors(isolate, descriptors,
+ descriptors->number_of_descriptors());
if (FLAG_unbox_double_fields) {
set_layout_descriptor(layout_desc);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ CHECK(layout_descriptor()->IsConsistentWithMap(*this));
}
#else
- SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+ SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(*this));
#endif
- set_visitor_id(Map::GetVisitorId(this));
+ set_visitor_id(Map::GetVisitorId(*this));
}
}
void Map::set_bit_field3(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
- WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
+ if (kInt32Size != kTaggedSize) {
+ RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
}
- WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
+ RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() const {
- return READ_UINT32_FIELD(this, kBitField3Offset);
+ return RELAXED_READ_UINT32_FIELD(this, kBitField3Offset);
}
-LayoutDescriptor* Map::GetLayoutDescriptor() const {
+LayoutDescriptor Map::GetLayoutDescriptor() const {
return FLAG_unbox_double_fields ? layout_descriptor()
: LayoutDescriptor::FastPointerLayout();
}
-void Map::AppendDescriptor(Descriptor* desc) {
- DescriptorArray* descriptors = instance_descriptors();
+void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
+ DescriptorArray descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- descriptors->Append(desc);
- SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
-
+ {
+ // The following two operations need to happen before the marking write
+ // barrier.
+ descriptors->Append(desc);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
+ MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
+ number_of_own_descriptors + 1);
+ }
// Properly mark the map if the {desc} is an "interesting symbol".
if (desc->GetKey()->IsInterestingSymbol()) {
set_may_have_interesting_symbols(true);
@@ -636,36 +675,36 @@ void Map::AppendDescriptor(Descriptor* desc) {
#endif
}
-Object* Map::GetBackPointer() const {
- Object* object = constructor_or_backpointer();
+Object Map::GetBackPointer() const {
+ Object object = constructor_or_backpointer();
if (object->IsMap()) {
return object;
}
return GetReadOnlyRoots().undefined_value();
}
-Map* Map::ElementsTransitionMap() {
+Map Map::ElementsTransitionMap() {
DisallowHeapAllocation no_gc;
// TODO(delphick): While it's safe to pass nullptr for Isolate* here as
// SearchSpecial doesn't need it, this is really ugly. Perhaps factor out a
// base class for methods not requiring an Isolate?
- return TransitionsAccessor(nullptr, this, &no_gc)
+ return TransitionsAccessor(nullptr, *this, &no_gc)
.SearchSpecial(GetReadOnlyRoots().elements_transition_symbol());
}
-Object* Map::prototype_info() const {
+Object Map::prototype_info() const {
DCHECK(is_prototype_map());
return READ_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset);
}
-void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
+void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
CHECK(is_prototype_map());
- WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, Map::kTransitionsOrPrototypeInfoOffset, value,
- mode);
+ WRITE_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, Map::kTransitionsOrPrototypeInfoOffset,
+ value, mode);
}
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
CHECK(value->IsMap());
CHECK(GetBackPointer()->IsUndefined());
@@ -680,14 +719,14 @@ ACCESSORS(Map, constructor_or_backpointer, Object,
kConstructorOrBackPointerOffset)
bool Map::IsPrototypeValidityCellValid() const {
- Object* validity_cell = prototype_validity_cell();
- Object* value = validity_cell->IsSmi() ? Smi::cast(validity_cell)
- : Cell::cast(validity_cell)->value();
+ Object validity_cell = prototype_validity_cell();
+ Object value = validity_cell->IsSmi() ? Smi::cast(validity_cell)
+ : Cell::cast(validity_cell)->value();
return value == Smi::FromInt(Map::kPrototypeChainValid);
}
-Object* Map::GetConstructor() const {
- Object* maybe_constructor = constructor_or_backpointer();
+Object Map::GetConstructor() const {
+ Object maybe_constructor = constructor_or_backpointer();
// Follow any back pointers.
while (maybe_constructor->IsMap()) {
maybe_constructor =
@@ -696,8 +735,8 @@ Object* Map::GetConstructor() const {
return maybe_constructor;
}
-FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
- Object* constructor = GetConstructor();
+FunctionTemplateInfo Map::GetFunctionTemplateInfo() const {
+ Object constructor = GetConstructor();
if (constructor->IsJSFunction()) {
DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
return JSFunction::cast(constructor)->shared()->get_api_func_data();
@@ -706,7 +745,7 @@ FunctionTemplateInfo* Map::GetFunctionTemplateInfo() const {
return FunctionTemplateInfo::cast(constructor);
}
-void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
+void Map::SetConstructor(Object constructor, WriteBarrierMode mode) {
// Never overwrite a back pointer with a constructor.
CHECK(!constructor_or_backpointer()->IsMap());
set_constructor_or_backpointer(constructor, mode);
@@ -743,19 +782,20 @@ int Map::SlackForArraySize(int old_size, int size_limit) {
return Min(max_slack, old_size / 4);
}
+NEVER_READ_ONLY_SPACE_IMPL(NormalizedMapCache)
+
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
-bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
+bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject obj) {
if (!obj->IsWeakFixedArray()) return false;
if (WeakFixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- NormalizedMapCache* cache =
- reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj));
+ NormalizedMapCache cache = NormalizedMapCache::cast(obj);
cache->NormalizedMapCacheVerify(cache->GetIsolate());
}
#endif
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 5f6b173cd3..0880b73b66 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_MAP_H_
#define V8_OBJECTS_MAP_H_
+#include "src/globals.h"
#include "src/objects.h"
#include "src/objects/code.h"
-
-#include "src/globals.h"
+#include "src/objects/heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,53 +16,62 @@
namespace v8 {
namespace internal {
-#define VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(DataObject) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(FreeSpace) \
- V(JSApiObject) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSObject) \
- V(JSObjectFast) \
- V(JSTypedArray) \
- V(JSWeakCollection) \
- V(Map) \
- V(NativeContext) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(ShortcutCandidate) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Struct) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithoutPreParsedScope) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject) \
+enum InstanceType : uint16_t;
+
+#define VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(Context) \
+ V(DataHandler) \
+ V(DataObject) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSTypedArray) \
+ V(JSWeakCell) \
+ V(JSWeakRef) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(ShortcutCandidate) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(Struct) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseData) \
+ V(WasmInstanceObject) \
V(WeakArray)
// For data objects, JS objects and structs along with generic visitor which
@@ -198,8 +207,8 @@ class Map : public HeapObject {
Handle<Map> map, Handle<Context> native_context);
// Retrieve interceptors.
- inline InterceptorInfo* GetNamedInterceptor();
- inline InterceptorInfo* GetIndexedInterceptor();
+ inline InterceptorInfo GetNamedInterceptor();
+ inline InterceptorInfo GetIndexedInterceptor();
// Instance type.
DECL_PRIMITIVE_ACCESSORS(instance_type, InstanceType)
@@ -218,8 +227,8 @@ class Map : public HeapObject {
inline void SetInObjectUnusedPropertyFields(int unused_property_fields);
// Updates the counters tracking unused fields in the property array.
inline void SetOutOfObjectUnusedPropertyFields(int unused_property_fields);
- inline void CopyUnusedPropertyFields(Map* map);
- inline void CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map* map);
+ inline void CopyUnusedPropertyFields(Map map);
+ inline void CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map);
inline void AccountAddedPropertyField();
inline void AccountAddedOutOfObjectPropertyField(
int unused_in_property_array);
@@ -408,9 +417,9 @@ class Map : public HeapObject {
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
- inline Map* ElementsTransitionMap();
+ inline Map ElementsTransitionMap();
- inline FixedArrayBase* GetInitialElements() const;
+ inline FixedArrayBase GetInitialElements() const;
// [raw_transitions]: Provides access to the transitions storage field.
// Don't call set_raw_transitions() directly to overwrite transitions, use
@@ -440,33 +449,49 @@ class Map : public HeapObject {
static const int kPrototypeChainValid = 0;
static const int kPrototypeChainInvalid = 1;
- static bool IsPrototypeChainInvalidated(Map* map);
+ static bool IsPrototypeChainInvalidated(Map map);
// Return the map of the root of object's prototype chain.
- Map* GetPrototypeChainRootMap(Isolate* isolate) const;
+ Map GetPrototypeChainRootMap(Isolate* isolate) const;
- Map* FindRootMap(Isolate* isolate) const;
- Map* FindFieldOwner(Isolate* isolate, int descriptor) const;
+ Map FindRootMap(Isolate* isolate) const;
+ Map FindFieldOwner(Isolate* isolate, int descriptor) const;
inline int GetInObjectPropertyOffset(int index) const;
+ class FieldCounts {
+ public:
+ FieldCounts(int mutable_count, int const_count)
+ : mutable_count_(mutable_count), const_count_(const_count) {}
+
+ int GetTotal() const { return mutable_count() + const_count(); }
+
+ int mutable_count() const { return mutable_count_; }
+ int const_count() const { return const_count_; }
+
+ private:
+ int mutable_count_;
+ int const_count_;
+ };
+
+ FieldCounts GetFieldCounts() const;
int NumberOfFields() const;
bool HasOutOfObjectProperties() const;
// Returns true if transition to the given map requires special
// synchronization with the concurrent marker.
- bool TransitionRequiresSynchronizationWithGC(Map* target) const;
+ bool TransitionRequiresSynchronizationWithGC(Map target) const;
// Returns true if transition to the given map removes a tagged in-object
// field.
- bool TransitionRemovesTaggedField(Map* target) const;
+ bool TransitionRemovesTaggedField(Map target) const;
// Returns true if transition to the given map replaces a tagged in-object
// field with an untagged in-object field.
- bool TransitionChangesTaggedFieldToUntaggedField(Map* target) const;
+ bool TransitionChangesTaggedFieldToUntaggedField(Map target) const;
// TODO(ishell): candidate with JSObject::MigrateToMap().
- bool InstancesNeedRewriting(Map* target) const;
- bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
+ bool InstancesNeedRewriting(Map target) const;
+ bool InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const;
V8_WARN_UNUSED_RESULT static Handle<FieldType> GeneralizeFieldType(
@@ -480,7 +505,7 @@ class Map : public HeapObject {
// by just updating current map.
static inline bool IsInplaceGeneralizableField(PropertyConstness constness,
Representation representation,
- FieldType* field_type);
+ FieldType field_type);
// Generalizes constness, representation and field_type if objects with given
// instance type can have fast elements that can be transitioned by stubs or
@@ -539,34 +564,39 @@ class Map : public HeapObject {
// Returns null_value if there's neither a constructor function nor a
// FunctionTemplateInfo available.
DECL_ACCESSORS(constructor_or_backpointer, Object)
- inline Object* GetConstructor() const;
- inline FunctionTemplateInfo* GetFunctionTemplateInfo() const;
- inline void SetConstructor(Object* constructor,
+ inline Object GetConstructor() const;
+ inline FunctionTemplateInfo GetFunctionTemplateInfo() const;
+ inline void SetConstructor(Object constructor,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with the constructor (see above).
- inline Object* GetBackPointer() const;
- inline void SetBackPointer(Object* value,
+ inline Object GetBackPointer() const;
+ inline void SetBackPointer(Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
- DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+ inline DescriptorArray instance_descriptors() const;
+ inline DescriptorArray synchronized_instance_descriptors() const;
+ void SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ int number_of_own_descriptors);
// [layout descriptor]: describes the object layout.
DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
// |layout descriptor| accessor which can be used from GC.
- inline LayoutDescriptor* layout_descriptor_gc_safe() const;
+ inline LayoutDescriptor layout_descriptor_gc_safe() const;
inline bool HasFastPointerLayout() const;
// |layout descriptor| accessor that is safe to call even when
// FLAG_unbox_double_fields is disabled (in this case Map does not contain
// |layout_descriptor| field at all).
- inline LayoutDescriptor* GetLayoutDescriptor() const;
+ inline LayoutDescriptor GetLayoutDescriptor() const;
- inline void UpdateDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_descriptor);
- inline void InitializeDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_descriptor);
+ inline void UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
+ LayoutDescriptor layout_descriptor,
+ int number_of_own_descriptors);
+ inline void InitializeDescriptors(Isolate* isolate,
+ DescriptorArray descriptors,
+ LayoutDescriptor layout_descriptor);
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
@@ -596,7 +626,7 @@ class Map : public HeapObject {
inline int NumberOfOwnDescriptors() const;
inline void SetNumberOfOwnDescriptors(int number);
- inline Cell* RetrieveDescriptorsPointer();
+ inline Cell RetrieveDescriptorsPointer();
// Checks whether all properties are stored either in the map or on the object
// (inobject, properties, or elements backing store), requiring no special
@@ -632,6 +662,7 @@ class Map : public HeapObject {
// is found.
static MaybeHandle<Map> TryUpdate(Isolate* isolate,
Handle<Map> map) V8_WARN_UNUSED_RESULT;
+ static Map TryUpdateSlow(Isolate* isolate, Map map) V8_WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
@@ -652,7 +683,7 @@ class Map : public HeapObject {
static MaybeObjectHandle WrapFieldType(Isolate* isolate,
Handle<FieldType> type);
- static FieldType* UnwrapFieldType(MaybeObject* wrapped_type);
+ static FieldType UnwrapFieldType(MaybeObject wrapped_type);
V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithField(
Isolate* isolate, Handle<Map> map, Handle<Name> name,
@@ -706,13 +737,13 @@ class Map : public HeapObject {
PropertyKind kind,
PropertyAttributes attributes);
- inline void AppendDescriptor(Descriptor* desc);
+ inline void AppendDescriptor(Isolate* isolate, Descriptor* desc);
// Returns a copy of the map, prepared for inserting into the transition
// tree (if the |map| owns descriptors then the new one will share
// descriptors with |map|).
- static Handle<Map> CopyForTransition(Isolate* isolate, Handle<Map> map,
- const char* reason);
+ static Handle<Map> CopyForElementsTransition(Isolate* isolate,
+ Handle<Map> map);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
@@ -749,8 +780,8 @@ class Map : public HeapObject {
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or |nullptr| if no match is
// found at all.
- Map* FindElementsKindTransitionedMap(Isolate* isolate,
- MapHandles const& candidates);
+ Map FindElementsKindTransitionedMap(Isolate* isolate,
+ MapHandles const& candidates);
inline bool CanTransition() const;
@@ -766,7 +797,7 @@ class Map : public HeapObject {
bool IsMapInArrayPrototypeChain(Isolate* isolate) const;
// Dispatched behavior.
- DECL_PRINTER(Map)
+ void MapPrint(std::ostream& os);
DECL_VERIFIER(Map)
#ifdef VERIFY_HEAP
@@ -784,28 +815,29 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
-#define MAP_FIELDS(V) \
- /* Raw data fields. */ \
- V(kInstanceSizeInWordsOffset, kUInt8Size) \
- V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
- V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
- V(kVisitorIdOffset, kUInt8Size) \
- V(kInstanceTypeOffset, kUInt16Size) \
- V(kBitFieldOffset, kUInt8Size) \
- V(kBitField2Offset, kUInt8Size) \
- V(kBitField3Offset, kUInt32Size) \
- V(k64BitArchPaddingOffset, kPointerSize == kUInt32Size ? 0 : kUInt32Size) \
- /* Pointer fields. */ \
- V(kPointerFieldsBeginOffset, 0) \
- V(kPrototypeOffset, kPointerSize) \
- V(kConstructorOrBackPointerOffset, kPointerSize) \
- V(kTransitionsOrPrototypeInfoOffset, kPointerSize) \
- V(kDescriptorsOffset, kPointerSize) \
- V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kPointerSize : 0) \
- V(kDependentCodeOffset, kPointerSize) \
- V(kPrototypeValidityCellOffset, kPointerSize) \
- V(kPointerFieldsEndOffset, 0) \
- /* Total size. */ \
+#define MAP_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
+ V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
+ V(kVisitorIdOffset, kUInt8Size) \
+ V(kInstanceTypeOffset, kUInt16Size) \
+ V(kBitFieldOffset, kUInt8Size) \
+ V(kBitField2Offset, kUInt8Size) \
+ V(kBitField3Offset, kUInt32Size) \
+ V(k64BitArchPaddingOffset, \
+ kSystemPointerSize == kUInt32Size ? 0 : kUInt32Size) \
+ /* Pointer fields. */ \
+ V(kPointerFieldsBeginOffset, 0) \
+ V(kPrototypeOffset, kTaggedSize) \
+ V(kConstructorOrBackPointerOffset, kTaggedSize) \
+ V(kTransitionsOrPrototypeInfoOffset, kTaggedSize) \
+ V(kDescriptorsOffset, kTaggedSize) \
+ V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kTaggedSize : 0) \
+ V(kDependentCodeOffset, kTaggedSize) \
+ V(kPrototypeValidityCellOffset, kTaggedSize) \
+ V(kPointerFieldsEndOffset, 0) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
@@ -819,7 +851,7 @@ class Map : public HeapObject {
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
// The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(const Map* other,
+ bool EquivalentToForNormalization(const Map other,
PropertyNormalizationMode mode) const;
// Returns true if given field is unboxed double.
@@ -837,7 +869,7 @@ class Map : public HeapObject {
// the descriptor array.
inline void NotifyLeafMapLayoutChange(Isolate* isolate);
- static VisitorId GetVisitorId(Map* map);
+ static VisitorId GetVisitorId(Map map);
// Returns true if objects with given instance type are allowed to have
// fast transitionable elements kinds. This predicate is used to ensure
@@ -852,11 +884,11 @@ class Map : public HeapObject {
private:
// This byte encodes either the instance size without the in-object slack or
// the slack size in properties backing store.
- // Let H be JSObject::kHeaderSize / kPointerSize.
+ // Let H be JSObject::kHeaderSize / kTaggedSize.
// If value >= H then:
// - all field properties are stored in the object.
// - there is no property array.
- // - value * kPointerSize is the actual object size without the slack.
+ // - value * kTaggedSize is the actual object size without the slack.
// Otherwise:
// - there is no slack in the object.
// - the property array has value slack slots.
@@ -865,22 +897,21 @@ class Map : public HeapObject {
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
- Map* LookupElementsTransitionMap(Isolate* isolate,
- ElementsKind elements_kind);
+ Map LookupElementsTransitionMap(Isolate* isolate, ElementsKind elements_kind);
// Tries to replay property transitions starting from this (root) map using
// the descriptor array of the |map|. The |root_map| is expected to have
// proper elements kind and therefore elements kinds transitions are not
// taken by this function. Returns |nullptr| if matching transition map is
// not found.
- Map* TryReplayPropertyTransitions(Isolate* isolate, Map* map);
+ Map TryReplayPropertyTransitions(Isolate* isolate, Map map);
static void ConnectTransition(Isolate* isolate, Handle<Map> parent,
Handle<Map> child, Handle<Name> name,
SimpleTransitionFlag flag);
- bool EquivalentToForTransition(const Map* other) const;
- bool EquivalentToForElementsKindTransition(const Map* other) const;
+ bool EquivalentToForTransition(const Map other) const;
+ bool EquivalentToForElementsKindTransition(const Map other) const;
static Handle<Map> RawCopy(Isolate* isolate, Handle<Map> map,
int instance_size, int inobject_properties);
static Handle<Map> ShareDescriptor(Isolate* isolate, Handle<Map> map,
@@ -919,8 +950,8 @@ class Map : public HeapObject {
void DeprecateTransitionTree(Isolate* isolate);
- void ReplaceDescriptors(Isolate* isolate, DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor);
+ void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
+ LayoutDescriptor new_layout_descriptor);
// Update field type of the given descriptor to new representation and new
// type. The type must be prepared for storing in descriptor array:
@@ -940,20 +971,25 @@ class Map : public HeapObject {
Representation old_representation, Representation new_representation,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
+
+ // Use the high-level instance_descriptors/SetInstanceDescriptors instead.
+ inline void set_synchronized_instance_descriptors(
+ DescriptorArray array, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
friend class MapUpdater;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
+ OBJECT_CONSTRUCTORS(Map, HeapObject);
};
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
-class NormalizedMapCache : public WeakFixedArray,
- public NeverReadOnlySpaceObject {
+class NormalizedMapCache : public WeakFixedArray {
public:
+ NEVER_READ_ONLY_SPACE
static Handle<NormalizedMapCache> New(Isolate* isolate);
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
@@ -962,7 +998,7 @@ class NormalizedMapCache : public WeakFixedArray,
DECL_CAST(NormalizedMapCache)
- static inline bool IsNormalizedMapCache(const HeapObject* obj);
+ static inline bool IsNormalizedMapCache(const HeapObject obj);
DECL_VERIFIER(NormalizedMapCache)
@@ -972,8 +1008,10 @@ class NormalizedMapCache : public WeakFixedArray,
static inline int GetIndex(Handle<Map> map);
// The following declarations hide base class methods.
- Object* get(int index);
- void set(int index, Object* value);
+ Object get(int index);
+ void set(int index, Object value);
+
+ OBJECT_CONSTRUCTORS(NormalizedMapCache, WeakFixedArray)
};
} // namespace internal
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 6d2bc6a9ab..8c1023665a 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -7,19 +7,28 @@
#include "src/objects/maybe-object.h"
+#ifdef V8_COMPRESS_POINTERS
+#include "src/isolate.h"
+#endif
#include "src/objects-inl.h"
+#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
-bool MaybeObject::ToSmi(Smi** value) {
- if (HAS_SMI_TAG(this)) {
- *value = Smi::cast(reinterpret_cast<Object*>(this));
+bool MaybeObject::ToSmi(Smi* value) {
+ if (HAS_SMI_TAG(ptr_)) {
+ *value = Smi::cast(Object(ptr_));
return true;
}
return false;
}
+Smi MaybeObject::ToSmi() const {
+ DCHECK(HAS_SMI_TAG(ptr_));
+ return Smi::cast(Object(ptr_));
+}
+
bool MaybeObject::IsStrongOrWeak() const {
if (IsSmi() || IsCleared()) {
return false;
@@ -27,7 +36,7 @@ bool MaybeObject::IsStrongOrWeak() const {
return true;
}
-bool MaybeObject::GetHeapObject(HeapObject** result) {
+bool MaybeObject::GetHeapObject(HeapObject* result) const {
if (IsSmi() || IsCleared()) {
return false;
}
@@ -35,12 +44,12 @@ bool MaybeObject::GetHeapObject(HeapObject** result) {
return true;
}
-bool MaybeObject::GetHeapObject(HeapObject** result,
- HeapObjectReferenceType* reference_type) {
+bool MaybeObject::GetHeapObject(HeapObject* result,
+ HeapObjectReferenceType* reference_type) const {
if (IsSmi() || IsCleared()) {
return false;
}
- *reference_type = HasWeakHeapObjectTag(this)
+ *reference_type = HasWeakHeapObjectTag(ptr_)
? HeapObjectReferenceType::WEAK
: HeapObjectReferenceType::STRONG;
*result = GetHeapObject();
@@ -48,29 +57,29 @@ bool MaybeObject::GetHeapObject(HeapObject** result,
}
bool MaybeObject::IsStrong() const {
- return !HasWeakHeapObjectTag(this) && !IsSmi();
+ return !HasWeakHeapObjectTag(ptr_) && !IsSmi();
}
-bool MaybeObject::GetHeapObjectIfStrong(HeapObject** result) {
- if (!HasWeakHeapObjectTag(this) && !IsSmi()) {
- *result = reinterpret_cast<HeapObject*>(this);
+bool MaybeObject::GetHeapObjectIfStrong(HeapObject* result) const {
+ if (!HasWeakHeapObjectTag(ptr_) && !IsSmi()) {
+ *result = HeapObject::cast(Object(ptr_));
return true;
}
return false;
}
-HeapObject* MaybeObject::GetHeapObjectAssumeStrong() {
+HeapObject MaybeObject::GetHeapObjectAssumeStrong() const {
DCHECK(IsStrong());
- return reinterpret_cast<HeapObject*>(this);
+ return HeapObject::cast(Object(ptr_));
}
bool MaybeObject::IsWeak() const {
- return HasWeakHeapObjectTag(this) && !IsCleared();
+ return HasWeakHeapObjectTag(ptr_) && !IsCleared();
}
-bool MaybeObject::IsWeakOrCleared() const { return HasWeakHeapObjectTag(this); }
+bool MaybeObject::IsWeakOrCleared() const { return HasWeakHeapObjectTag(ptr_); }
-bool MaybeObject::GetHeapObjectIfWeak(HeapObject** result) {
+bool MaybeObject::GetHeapObjectIfWeak(HeapObject* result) const {
if (IsWeak()) {
*result = GetHeapObject();
return true;
@@ -78,29 +87,69 @@ bool MaybeObject::GetHeapObjectIfWeak(HeapObject** result) {
return false;
}
-HeapObject* MaybeObject::GetHeapObjectAssumeWeak() {
+HeapObject MaybeObject::GetHeapObjectAssumeWeak() const {
DCHECK(IsWeak());
return GetHeapObject();
}
-HeapObject* MaybeObject::GetHeapObject() {
+HeapObject MaybeObject::GetHeapObject() const {
DCHECK(!IsSmi());
DCHECK(!IsCleared());
- return RemoveWeakHeapObjectMask(reinterpret_cast<HeapObjectReference*>(this));
+ return HeapObject::cast(Object(ptr_ & ~kWeakHeapObjectMask));
}
-Object* MaybeObject::GetHeapObjectOrSmi() {
+Object MaybeObject::GetHeapObjectOrSmi() const {
if (IsSmi()) {
- return reinterpret_cast<Object*>(this);
+ return Object(ptr_);
}
return GetHeapObject();
}
bool MaybeObject::IsObject() const { return IsSmi() || IsStrong(); }
-MaybeObject* MaybeObject::MakeWeak(MaybeObject* object) {
- DCHECK(object->IsStrongOrWeak());
- return AddWeakHeapObjectMask(object);
+MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
+ DCHECK(object.IsStrongOrWeak());
+ return MaybeObject(object.ptr_ | kWeakHeapObjectMask);
+}
+
+// static
+HeapObjectReference HeapObjectReference::ClearedValue(Isolate* isolate) {
+ // Construct cleared weak ref value.
+ Address raw_value = kClearedWeakHeapObjectLower32;
+#ifdef V8_COMPRESS_POINTERS
+ // This is necessary to make pointer decompression computation also
+ // suitable for cleared weak references.
+ Address isolate_root = isolate->isolate_root();
+ raw_value |= isolate_root;
+ DCHECK_EQ(raw_value & (~static_cast<Address>(kClearedWeakHeapObjectLower32)),
+ isolate_root);
+#endif
+ // The rest of the code will check only the lower 32-bits.
+ DCHECK_EQ(kClearedWeakHeapObjectLower32, static_cast<uint32_t>(raw_value));
+ return HeapObjectReference(raw_value);
+}
+
+template <typename THeapObjectSlot>
+void HeapObjectReference::Update(THeapObjectSlot slot, HeapObject value) {
+ static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
+ std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
+ "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
+ Address old_value = (*slot).ptr();
+ DCHECK(!HAS_SMI_TAG(old_value));
+ Address new_value = value->ptr();
+ DCHECK(Internals::HasHeapObjectTag(new_value));
+
+#ifdef DEBUG
+ bool weak_before = HasWeakHeapObjectTag(old_value);
+#endif
+
+ slot.store(
+ HeapObjectReference(new_value | (old_value & kWeakHeapObjectMask)));
+
+#ifdef DEBUG
+ bool weak_after = HasWeakHeapObjectTag((*slot).ptr());
+ DCHECK_EQ(weak_before, weak_after);
+#endif
}
} // namespace internal
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index 0d55ff859c..c40ae0a5aa 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -9,12 +9,14 @@
#include "include/v8.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
namespace v8 {
namespace internal {
class HeapObject;
-class Smi;
+class Isolate;
class StringStream;
// A MaybeObject is either a SMI, a strong reference to a HeapObject, a weak
@@ -22,11 +24,24 @@ class StringStream;
// implementing in-place weak references (see design doc: goo.gl/j6SdcK )
class MaybeObject {
public:
- bool IsSmi() const { return HAS_SMI_TAG(this); }
- inline bool ToSmi(Smi** value);
+ MaybeObject() : ptr_(kNullAddress) {}
+ explicit MaybeObject(Address ptr) : ptr_(ptr) {}
+
+ bool operator==(const MaybeObject& other) const { return ptr_ == other.ptr_; }
+ bool operator!=(const MaybeObject& other) const { return ptr_ != other.ptr_; }
+
+ Address ptr() const { return ptr_; }
+
+ // Enable incremental transition of client code.
+ MaybeObject* operator->() { return this; }
+ const MaybeObject* operator->() const { return this; }
+
+ bool IsSmi() const { return HAS_SMI_TAG(ptr_); }
+ inline bool ToSmi(Smi* value);
+ inline Smi ToSmi() const;
bool IsCleared() const {
- return ::v8::internal::IsClearedWeakHeapObject(this);
+ return static_cast<uint32_t>(ptr_) == kClearedWeakHeapObjectLower32;
}
inline bool IsStrongOrWeak() const;
@@ -34,58 +49,58 @@ class MaybeObject {
// If this MaybeObject is a strong pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfStrong(HeapObject** result);
+ inline bool GetHeapObjectIfStrong(HeapObject* result) const;
// DCHECKs that this MaybeObject is a strong pointer to a HeapObject and
// returns the HeapObject.
- inline HeapObject* GetHeapObjectAssumeStrong();
+ inline HeapObject GetHeapObjectAssumeStrong() const;
inline bool IsWeak() const;
inline bool IsWeakOrCleared() const;
// If this MaybeObject is a weak pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfWeak(HeapObject** result);
+ inline bool GetHeapObjectIfWeak(HeapObject* result) const;
// DCHECKs that this MaybeObject is a weak pointer to a HeapObject and
// returns the HeapObject.
- inline HeapObject* GetHeapObjectAssumeWeak();
+ inline HeapObject GetHeapObjectAssumeWeak() const;
// If this MaybeObject is a strong or weak pointer to a HeapObject, returns
// true and sets *result. Otherwise returns false.
- inline bool GetHeapObject(HeapObject** result);
- inline bool GetHeapObject(HeapObject** result,
- HeapObjectReferenceType* reference_type);
+ inline bool GetHeapObject(HeapObject* result) const;
+ inline bool GetHeapObject(HeapObject* result,
+ HeapObjectReferenceType* reference_type) const;
// DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
// and returns the HeapObject.
- inline HeapObject* GetHeapObject();
+ inline HeapObject GetHeapObject() const;
// DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
// or a SMI and returns the HeapObject or SMI.
- inline Object* GetHeapObjectOrSmi();
+ inline Object GetHeapObjectOrSmi() const;
inline bool IsObject() const;
template <typename T>
- T* cast() {
- DCHECK(!HasWeakHeapObjectTag(this));
- return T::cast(reinterpret_cast<Object*>(this));
+ T cast() const {
+ DCHECK(!HasWeakHeapObjectTag(ptr_));
+ return T::cast(Object(ptr_));
}
- static MaybeObject* FromSmi(Smi* smi) {
- DCHECK(HAS_SMI_TAG(smi));
- return reinterpret_cast<MaybeObject*>(smi);
+ static MaybeObject FromSmi(Smi smi) {
+ DCHECK(HAS_SMI_TAG(smi->ptr()));
+ return MaybeObject(smi->ptr());
}
- static MaybeObject* FromObject(Object* object) {
- DCHECK(!HasWeakHeapObjectTag(object));
- return reinterpret_cast<MaybeObject*>(object);
+ static MaybeObject FromObject(Object object) {
+ DCHECK(!HasWeakHeapObjectTag(object.ptr()));
+ return MaybeObject(object.ptr());
}
- static inline MaybeObject* MakeWeak(MaybeObject* object);
+ static inline MaybeObject MakeWeak(MaybeObject object);
#ifdef VERIFY_HEAP
- static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject* p);
+ static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
// Prints this object without details.
@@ -105,49 +120,32 @@ class MaybeObject {
#endif
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MaybeObject);
+ Address ptr_;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
// reference to a HeapObject, or a cleared weak reference.
class HeapObjectReference : public MaybeObject {
public:
- static HeapObjectReference* Strong(Object* object) {
+ explicit HeapObjectReference(Address address) : MaybeObject(address) {}
+ explicit HeapObjectReference(Object object) : MaybeObject(object->ptr()) {}
+
+ static HeapObjectReference Strong(Object object) {
DCHECK(!object->IsSmi());
DCHECK(!HasWeakHeapObjectTag(object));
- return reinterpret_cast<HeapObjectReference*>(object);
+ return HeapObjectReference(object);
}
- static HeapObjectReference* Weak(Object* object) {
+ static HeapObjectReference Weak(Object object) {
DCHECK(!object->IsSmi());
DCHECK(!HasWeakHeapObjectTag(object));
- return AddWeakHeapObjectMask(object);
- }
-
- static HeapObjectReference* ClearedValue() {
- return reinterpret_cast<HeapObjectReference*>(kClearedWeakHeapObject);
+ return HeapObjectReference(object->ptr() | kWeakHeapObjectMask);
}
- static void Update(HeapObjectReference** slot, HeapObject* value) {
- DCHECK(!HAS_SMI_TAG(*slot));
- DCHECK(Internals::HasHeapObjectTag(value));
+ V8_INLINE static HeapObjectReference ClearedValue(Isolate* isolate);
-#ifdef DEBUG
- bool weak_before = HasWeakHeapObjectTag(*slot);
-#endif
-
- *slot = reinterpret_cast<HeapObjectReference*>(
- reinterpret_cast<intptr_t>(value) |
- (reinterpret_cast<intptr_t>(*slot) & kWeakHeapObjectMask));
-
-#ifdef DEBUG
- bool weak_after = HasWeakHeapObjectTag(*slot);
- DCHECK_EQ(weak_before, weak_after);
-#endif
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObjectReference);
+ template <typename THeapObjectSlot>
+ V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
};
} // namespace internal
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 180f55be17..fbd8142ebd 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -7,7 +7,8 @@
#include "src/objects/microtask.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/foreign-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +16,10 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(Microtask, Struct)
+OBJECT_CONSTRUCTORS_IMPL(CallbackTask, Microtask)
+OBJECT_CONSTRUCTORS_IMPL(CallableTask, Microtask)
+
CAST_ACCESSOR(Microtask)
CAST_ACCESSOR(CallbackTask)
CAST_ACCESSOR(CallableTask)
diff --git a/deps/v8/src/objects/microtask-queue.cc b/deps/v8/src/objects/microtask-queue.cc
deleted file mode 100644
index a8905acd36..0000000000
--- a/deps/v8/src/objects/microtask-queue.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects/microtask-queue.h"
-
-#include "src/objects/microtask-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// DCHECK requires this for taking the reference of it.
-constexpr int MicrotaskQueue::kMinimumQueueCapacity;
-
-// static
-void MicrotaskQueue::EnqueueMicrotask(Isolate* isolate,
- Handle<MicrotaskQueue> microtask_queue,
- Handle<Microtask> microtask) {
- Handle<FixedArray> queue(microtask_queue->queue(), isolate);
- int num_tasks = microtask_queue->pending_microtask_count();
- DCHECK_LE(num_tasks, queue->length());
- if (num_tasks == queue->length()) {
- queue = isolate->factory()->CopyFixedArrayAndGrow(
- queue, std::max(num_tasks, kMinimumQueueCapacity));
- microtask_queue->set_queue(*queue);
- }
- DCHECK_LE(kMinimumQueueCapacity, queue->length());
- DCHECK_LT(num_tasks, queue->length());
- DCHECK(queue->get(num_tasks)->IsUndefined(isolate));
- queue->set(num_tasks, *microtask);
- microtask_queue->set_pending_microtask_count(num_tasks + 1);
-}
-
-// static
-void MicrotaskQueue::RunMicrotasks(Handle<MicrotaskQueue> microtask_queue) {
- UNIMPLEMENTED();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/objects/microtask-queue.h b/deps/v8/src/objects/microtask-queue.h
deleted file mode 100644
index bb14cfb498..0000000000
--- a/deps/v8/src/objects/microtask-queue.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_MICROTASK_QUEUE_H_
-#define V8_OBJECTS_MICROTASK_QUEUE_H_
-
-#include "src/objects.h"
-#include "src/objects/microtask.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-class V8_EXPORT_PRIVATE MicrotaskQueue : public Struct {
- public:
- DECL_CAST(MicrotaskQueue)
- DECL_VERIFIER(MicrotaskQueue)
- DECL_PRINTER(MicrotaskQueue)
-
- // A FixedArray that the queued microtasks are stored.
- // The first |pending_microtask_count| slots contains Microtask instance
- // for each, and followings are undefined_value if any.
- DECL_ACCESSORS(queue, FixedArray)
-
- // The number of microtasks queued in |queue|. This must be less or equal to
- // the length of |queue|.
- DECL_INT_ACCESSORS(pending_microtask_count)
-
- // Enqueues |microtask| to |microtask_queue|.
- static void EnqueueMicrotask(Isolate* isolate,
- Handle<MicrotaskQueue> microtask_queue,
- Handle<Microtask> microtask);
-
- // Runs all enqueued microtasks.
- static void RunMicrotasks(Handle<MicrotaskQueue> microtask_queue);
-
- static constexpr int kMinimumQueueCapacity = 8;
-
- static const int kQueueOffset = HeapObject::kHeaderSize;
- static const int kPendingMicrotaskCountOffset = kQueueOffset + kPointerSize;
- static const int kSize = kPendingMicrotaskCountOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MicrotaskQueue);
-};
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_MICROTASK_QUEUE_H_
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index 33f121aa2c..a19eea500f 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_MICROTASK_H_
#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,8 +23,7 @@ class Microtask : public Struct {
DECL_CAST(Microtask)
DECL_VERIFIER(Microtask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Microtask);
+ OBJECT_CONSTRUCTORS(Microtask, Struct);
};
// A CallbackTask is a special Microtask that allows us to schedule
@@ -34,17 +34,22 @@ class CallbackTask : public Microtask {
DECL_ACCESSORS(callback, Foreign)
DECL_ACCESSORS(data, Foreign)
- static const int kCallbackOffset = Microtask::kHeaderSize;
- static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+// Layout description.
+#define CALLBACK_TASK_FIELDS(V) \
+ V(kCallbackOffset, kTaggedSize) \
+ V(kDataOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize, CALLBACK_TASK_FIELDS)
+#undef CALLBACK_TASK_FIELDS
// Dispatched behavior.
DECL_CAST(CallbackTask)
DECL_PRINTER(CallbackTask)
DECL_VERIFIER(CallbackTask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CallbackTask)
+ OBJECT_CONSTRUCTORS(CallbackTask, Microtask);
};
// A CallableTask is a special (internal) Microtask that allows us to
@@ -55,9 +60,15 @@ class CallableTask : public Microtask {
DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(context, Context)
- static const int kCallableOffset = Microtask::kHeaderSize;
- static const int kContextOffset = kCallableOffset + kPointerSize;
- static const int kSize = kContextOffset + kPointerSize;
+// Layout description.
+#define CALLABLE_TASK_FIELDS(V) \
+ V(kCallableOffset, kTaggedSize) \
+ V(kContextOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize, CALLABLE_TASK_FIELDS)
+#undef CALLABLE_TASK_FIELDS
// Dispatched behavior.
DECL_CAST(CallableTask)
@@ -65,8 +76,7 @@ class CallableTask : public Microtask {
DECL_VERIFIER(CallableTask)
void BriefPrintDetails(std::ostream& os);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CallableTask);
+ OBJECT_CONSTRUCTORS(CallableTask, Microtask);
};
} // namespace internal
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 1a4f2b3efa..09e19343e1 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -16,6 +16,12 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(Module, Struct)
+OBJECT_CONSTRUCTORS_IMPL(ModuleInfoEntry, Struct)
+OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace, JSObject)
+
+NEVER_READ_ONLY_SPACE_IMPL(Module)
+
CAST_ACCESSOR(Module)
ACCESSORS(Module, code, Object, kCodeOffset)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
@@ -31,7 +37,7 @@ SMI_ACCESSORS(Module, dfs_index, kDfsIndexOffset)
SMI_ACCESSORS(Module, dfs_ancestor_index, kDfsAncestorIndexOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-ModuleInfo* Module::info() const {
+ModuleInfo Module::info() const {
return (status() >= kEvaluating)
? ModuleInfo::cast(code())
: GetSharedFunctionInfo()->scope_info()->ModuleDescriptorInfo();
@@ -49,34 +55,35 @@ SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
+OBJECT_CONSTRUCTORS_IMPL(ModuleInfo, FixedArray)
CAST_ACCESSOR(ModuleInfo)
-FixedArray* ModuleInfo::module_requests() const {
+FixedArray ModuleInfo::module_requests() const {
return FixedArray::cast(get(kModuleRequestsIndex));
}
-FixedArray* ModuleInfo::special_exports() const {
+FixedArray ModuleInfo::special_exports() const {
return FixedArray::cast(get(kSpecialExportsIndex));
}
-FixedArray* ModuleInfo::regular_exports() const {
+FixedArray ModuleInfo::regular_exports() const {
return FixedArray::cast(get(kRegularExportsIndex));
}
-FixedArray* ModuleInfo::regular_imports() const {
+FixedArray ModuleInfo::regular_imports() const {
return FixedArray::cast(get(kRegularImportsIndex));
}
-FixedArray* ModuleInfo::namespace_imports() const {
+FixedArray ModuleInfo::namespace_imports() const {
return FixedArray::cast(get(kNamespaceImportsIndex));
}
-FixedArray* ModuleInfo::module_request_positions() const {
+FixedArray ModuleInfo::module_request_positions() const {
return FixedArray::cast(get(kModuleRequestPositionsIndex));
}
#ifdef DEBUG
-bool ModuleInfo::Equals(ModuleInfo* other) const {
+bool ModuleInfo::Equals(ModuleInfo other) const {
return regular_exports() == other->regular_exports() &&
regular_imports() == other->regular_imports() &&
special_exports() == other->special_exports() &&
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index c4d2626e60..5ac05478b8 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -11,9 +11,11 @@
#include "src/api-inl.h"
#include "src/ast/modules.h"
#include "src/objects-inl.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -139,9 +141,9 @@ void Module::CreateExport(Isolate* isolate, Handle<Module> module,
module->set_exports(*exports);
}
-Cell* Module::GetCell(int cell_index) {
+Cell Module::GetCell(int cell_index) {
DisallowHeapAllocation no_gc;
- Object* cell;
+ Object cell;
switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
case ModuleDescriptor::kImport:
cell = regular_imports()->get(ImportIndex(cell_index));
@@ -247,7 +249,7 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
void Module::RecordError(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
DCHECK(exception()->IsTheHole(isolate));
- Object* the_exception = isolate->pending_exception();
+ Object the_exception = isolate->pending_exception();
DCHECK(!the_exception->IsTheHole(isolate));
set_code(info());
@@ -258,14 +260,14 @@ void Module::RecordError(Isolate* isolate) {
set_exception(the_exception);
}
-Object* Module::GetException() {
+Object Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
DCHECK(!exception()->IsTheHole());
return exception();
}
-SharedFunctionInfo* Module::GetSharedFunctionInfo() const {
+SharedFunctionInfo Module::GetSharedFunctionInfo() const {
DisallowHeapAllocation no_alloc;
DCHECK_NE(status(), Module::kEvaluating);
DCHECK_NE(status(), Module::kEvaluated);
@@ -745,14 +747,10 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
Object);
- DCHECK(static_cast<JSIteratorResult*>(JSObject::cast(*result))
- ->done()
- ->BooleanValue(isolate));
+ DCHECK(JSIteratorResult::cast(*result)->done()->BooleanValue(isolate));
CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(
- static_cast<JSIteratorResult*>(JSObject::cast(*result))->value(),
- isolate);
+ return handle(JSIteratorResult::cast(*result)->value(), isolate);
}
namespace {
@@ -795,7 +793,7 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
Handle<ObjectHashTable> requested_exports(requested_module->exports(),
isolate);
for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
- Object* key;
+ Object key;
if (!requested_exports->ToKey(roots, i, &key)) continue;
Handle<String> name(String::cast(key), isolate);
@@ -856,7 +854,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
ZoneVector<Handle<String>> names(&zone);
names.reserve(exports->NumberOfElements());
for (int i = 0, n = exports->Capacity(); i < n; ++i) {
- Object* key;
+ Object key;
if (!exports->ToKey(roots, i, &key)) continue;
names.push_back(handle(String::cast(key), isolate));
}
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index fd9f9ace80..ec63ddb640 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -8,6 +8,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -26,8 +27,9 @@ class String;
class Zone;
// The runtime representation of an ECMAScript module.
-class Module : public Struct, public NeverReadOnlySpaceObject {
+class Module : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
@@ -66,11 +68,11 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
};
// The exception in the case {status} is kErrored.
- Object* GetException();
+ Object GetException();
// The shared function info in case {status} is not kEvaluating, kEvaluated or
// kErrored.
- SharedFunctionInfo* GetSharedFunctionInfo() const;
+ SharedFunctionInfo GetSharedFunctionInfo() const;
// The namespace object (or undefined).
DECL_ACCESSORS(module_namespace, HeapObject)
@@ -89,7 +91,7 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
DECL_ACCESSORS(import_meta, Object)
// Get the ModuleInfo associated with the code.
- inline ModuleInfo* info() const;
+ inline ModuleInfo info() const;
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
@@ -103,7 +105,7 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<Module> module);
- Cell* GetCell(int cell_index);
+ Cell GetCell(int cell_index);
static Handle<Object> LoadVariable(Isolate* isolate, Handle<Module> module,
int cell_index);
static void StoreVariable(Handle<Module> module, int cell_index,
@@ -123,21 +125,26 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
Handle<Module> module);
- static const int kCodeOffset = HeapObject::kHeaderSize;
- static const int kExportsOffset = kCodeOffset + kPointerSize;
- static const int kRegularExportsOffset = kExportsOffset + kPointerSize;
- static const int kRegularImportsOffset = kRegularExportsOffset + kPointerSize;
- static const int kHashOffset = kRegularImportsOffset + kPointerSize;
- static const int kModuleNamespaceOffset = kHashOffset + kPointerSize;
- static const int kRequestedModulesOffset =
- kModuleNamespaceOffset + kPointerSize;
- static const int kStatusOffset = kRequestedModulesOffset + kPointerSize;
- static const int kDfsIndexOffset = kStatusOffset + kPointerSize;
- static const int kDfsAncestorIndexOffset = kDfsIndexOffset + kPointerSize;
- static const int kExceptionOffset = kDfsAncestorIndexOffset + kPointerSize;
- static const int kScriptOffset = kExceptionOffset + kPointerSize;
- static const int kImportMetaOffset = kScriptOffset + kPointerSize;
- static const int kSize = kImportMetaOffset + kPointerSize;
+// Layout description.
+#define MODULE_FIELDS(V) \
+ V(kCodeOffset, kTaggedSize) \
+ V(kExportsOffset, kTaggedSize) \
+ V(kRegularExportsOffset, kTaggedSize) \
+ V(kRegularImportsOffset, kTaggedSize) \
+ V(kHashOffset, kTaggedSize) \
+ V(kModuleNamespaceOffset, kTaggedSize) \
+ V(kRequestedModulesOffset, kTaggedSize) \
+ V(kStatusOffset, kTaggedSize) \
+ V(kDfsIndexOffset, kTaggedSize) \
+ V(kDfsAncestorIndexOffset, kTaggedSize) \
+ V(kExceptionOffset, kTaggedSize) \
+ V(kScriptOffset, kTaggedSize) \
+ V(kImportMetaOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, MODULE_FIELDS)
+#undef MODULE_FIELDS
private:
friend class Factory;
@@ -210,7 +217,7 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
void PrintStatusTransition(Status new_status);
#endif // DEBUG
- DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
+ OBJECT_CONSTRUCTORS(Module, Struct);
};
// When importing a module namespace (import * as foo from "bar"), a
@@ -243,13 +250,20 @@ class JSModuleNamespace : public JSObject {
kInObjectFieldCount,
};
- static const int kModuleOffset = JSObject::kHeaderSize;
- static const int kHeaderSize = kModuleOffset + kPointerSize;
+// Layout description.
+#define JS_MODULE_NAMESPACE_FIELDS(V) \
+ V(kModuleOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0) \
+ V(kInObjectFieldsOffset, kTaggedSize* kInObjectFieldCount) \
+ /* Total size. */ \
+ V(kSize, 0)
- static const int kSize = kHeaderSize + kPointerSize * kInObjectFieldCount;
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_MODULE_NAMESPACE_FIELDS)
+#undef JS_MODULE_NAMESPACE_FIELDS
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSModuleNamespace);
+ OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
};
// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
@@ -260,21 +274,21 @@ class ModuleInfo : public FixedArray {
static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
ModuleDescriptor* descr);
- inline FixedArray* module_requests() const;
- inline FixedArray* special_exports() const;
- inline FixedArray* regular_exports() const;
- inline FixedArray* regular_imports() const;
- inline FixedArray* namespace_imports() const;
- inline FixedArray* module_request_positions() const;
+ inline FixedArray module_requests() const;
+ inline FixedArray special_exports() const;
+ inline FixedArray regular_exports() const;
+ inline FixedArray regular_imports() const;
+ inline FixedArray namespace_imports() const;
+ inline FixedArray module_request_positions() const;
// Accessors for [regular_exports].
int RegularExportCount() const;
- String* RegularExportLocalName(int i) const;
+ String RegularExportLocalName(int i) const;
int RegularExportCellIndex(int i) const;
- FixedArray* RegularExportExportNames(int i) const;
+ FixedArray RegularExportExportNames(int i) const;
#ifdef DEBUG
- inline bool Equals(ModuleInfo* other) const;
+ inline bool Equals(ModuleInfo other) const;
#endif
private:
@@ -295,7 +309,7 @@ class ModuleInfo : public FixedArray {
kRegularExportExportNamesOffset,
kRegularExportLength
};
- DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
+ OBJECT_CONSTRUCTORS(ModuleInfo, FixedArray);
};
class ModuleInfoEntry : public Struct {
@@ -319,17 +333,22 @@ class ModuleInfoEntry : public Struct {
int module_request, int cell_index,
int beg_pos, int end_pos);
- static const int kExportNameOffset = HeapObject::kHeaderSize;
- static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
- static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
- static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
- static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
- static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
- static const int kEndPosOffset = kBegPosOffset + kPointerSize;
- static const int kSize = kEndPosOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
+// Layout description.
+#define MODULE_INFO_FIELDS(V) \
+ V(kExportNameOffset, kTaggedSize) \
+ V(kLocalNameOffset, kTaggedSize) \
+ V(kImportNameOffset, kTaggedSize) \
+ V(kModuleRequestOffset, kTaggedSize) \
+ V(kCellIndexOffset, kTaggedSize) \
+ V(kBegPosOffset, kTaggedSize) \
+ V(kEndPosOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, MODULE_INFO_FIELDS)
+#undef MODULE_INFO_FIELDS
+
+ OBJECT_CONSTRUCTORS(ModuleInfoEntry, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index 512e47875c..3fda66f2a1 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -8,6 +8,8 @@
#include "src/objects/name.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/objects/map-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +17,9 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(Name, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(Symbol, Name)
+
CAST_ACCESSOR(Name)
CAST_ACCESSOR(Symbol)
@@ -27,17 +32,17 @@ BIT_FIELD_ACCESSORS(Symbol, flags, is_public, Symbol::IsPublicBit)
BIT_FIELD_ACCESSORS(Symbol, flags, is_interesting_symbol,
Symbol::IsInterestingSymbolBit)
-bool Symbol::is_private_field() const {
- bool value = Symbol::IsPrivateFieldBit::decode(flags());
+bool Symbol::is_private_name() const {
+ bool value = Symbol::IsPrivateNameBit::decode(flags());
DCHECK_IMPLIES(value, is_private());
return value;
}
-void Symbol::set_is_private_field() {
+void Symbol::set_is_private_name() {
// TODO(gsathya): Re-order the bits to have these next to each other
// and just do the bit shifts once.
set_flags(Symbol::IsPrivateBit::update(flags(), true));
- set_flags(Symbol::IsPrivateFieldBit::update(flags(), true));
+ set_flags(Symbol::IsPrivateNameBit::update(flags(), true));
}
bool Name::IsUniqueName() const {
@@ -54,13 +59,13 @@ void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
}
-bool Name::Equals(Name* other) {
- if (other == this) return true;
+bool Name::Equals(Name other) {
+ if (other == *this) return true;
if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
this->IsSymbol() || other->IsSymbol()) {
return false;
}
- return String::cast(this)->SlowEquals(String::cast(other));
+ return String::cast(*this)->SlowEquals(String::cast(other));
}
bool Name::Equals(Isolate* isolate, Handle<Name> one, Handle<Name> two) {
@@ -86,27 +91,27 @@ uint32_t Name::Hash() {
// Slow case: compute hash code and set it. Has to be a string.
// Also the string must be writable, because read-only strings will have their
// hash values precomputed.
- return String::cast(this)->ComputeAndSetHash(
- Heap::FromWritableHeapObject(this)->isolate());
+ return String::cast(*this)->ComputeAndSetHash(
+ Heap::FromWritableHeapObject(*this)->isolate());
}
bool Name::IsInterestingSymbol() const {
- return IsSymbol() && Symbol::cast(this)->is_interesting_symbol();
+ return IsSymbol() && Symbol::cast(*this)->is_interesting_symbol();
}
bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(this)->is_private();
+ return this->IsSymbol() && Symbol::cast(*this)->is_private();
}
-bool Name::IsPrivateField() {
- bool is_private_field =
- this->IsSymbol() && Symbol::cast(this)->is_private_field();
- DCHECK_IMPLIES(is_private_field, IsPrivate());
- return is_private_field;
+bool Name::IsPrivateName() {
+ bool is_private_name =
+ this->IsSymbol() && Symbol::cast(*this)->is_private_name();
+ DCHECK_IMPLIES(is_private_name, IsPrivate());
+ return is_private_name;
}
bool Name::AsArrayIndex(uint32_t* index) {
- return IsString() && String::cast(this)->AsArrayIndex(index);
+ return IsString() && String::cast(*this)->AsArrayIndex(index);
}
// static
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index bcc1f2c27d..c3c7fd68da 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_NAME_H_
#include "src/objects.h"
+#include "src/objects/heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,7 +29,7 @@ class Name : public HeapObject {
inline uint32_t Hash();
// Equality operations.
- inline bool Equals(Name* other);
+ inline bool Equals(Name other);
inline static bool Equals(Isolate* isolate, Handle<Name> one,
Handle<Name> two);
@@ -45,9 +46,9 @@ class Name : public HeapObject {
// If the name is private, it can only name own properties.
inline bool IsPrivate();
- // If the name is a private field, it should behave like a private
+ // If the name is a private name, it should behave like a private
// symbol but also throw on property access miss.
- inline bool IsPrivateField();
+ inline bool IsPrivateName();
inline bool IsUniqueName() const;
@@ -130,8 +131,7 @@ class Name : public HeapObject {
protected:
static inline bool IsHashFieldComputed(uint32_t field);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
+ OBJECT_CONSTRUCTORS(Name, HeapObject);
};
// ES6 symbols.
@@ -161,13 +161,13 @@ class Symbol : public Name {
// Symbol.keyFor on such a symbol simply needs to return the attached name.
DECL_BOOLEAN_ACCESSORS(is_public)
- // [is_private_field]: Whether this is a private field. Private fields
+ // [is_private_name]: Whether this is a private name. Private names
// are the same as private symbols except they throw on missing
// property access.
//
// This also sets the is_private bit.
- inline bool is_private_field() const;
- inline void set_is_private_field();
+ inline bool is_private_name() const;
+ inline void set_is_private_name();
DECL_CAST(Symbol)
@@ -176,9 +176,14 @@ class Symbol : public Name {
DECL_VERIFIER(Symbol)
// Layout description.
- static const int kFlagsOffset = Name::kHeaderSize;
- static const int kNameOffset = kFlagsOffset + kInt32Size;
- static const int kSize = kNameOffset + kPointerSize;
+#define SYMBOL_FIELDS(V) \
+ V(kFlagsOffset, kInt32Size) \
+ V(kNameOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize, SYMBOL_FIELDS)
+#undef SYMBOL_FIELDS
// Flags layout.
#define FLAGS_BIT_FIELDS(V, _) \
@@ -186,7 +191,7 @@ class Symbol : public Name {
V(IsWellKnownSymbolBit, bool, 1, _) \
V(IsPublicBit, bool, 1, _) \
V(IsInterestingSymbolBit, bool, 1, _) \
- V(IsPrivateFieldBit, bool, 1, _)
+ V(IsPrivateNameBit, bool, 1, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
@@ -201,7 +206,7 @@ class Symbol : public Name {
// TODO(cbruni): remove once the new maptracer is in place.
friend class Name; // For PrivateSymbolToName.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
+ OBJECT_CONSTRUCTORS(Symbol, Name);
};
} // namespace internal
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index a0c19cab5c..6f243eb19f 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -2,20 +2,36 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Generate this file using the update-object-macros-undef.py script.
+
// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+#undef OBJECT_CONSTRUCTORS
+#undef OBJECT_CONSTRUCTORS_IMPL
+#undef NEVER_READ_ONLY_SPACE
+#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
+#undef DECL_INT32_ACCESSORS
+#undef DECL_UINT16_ACCESSORS
+#undef DECL_UINT8_ACCESSORS
#undef DECL_ACCESSORS
#undef DECL_CAST
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
+#undef INT32_ACCESSORS
+#undef RELAXED_INT32_ACCESSORS
+#undef UINT16_ACCESSORS
+#undef UINT8_ACCESSORS
#undef ACCESSORS_CHECKED2
#undef ACCESSORS_CHECKED
#undef ACCESSORS
-#undef WEAK_ACCESSORS_CHECKED
+#undef SYNCHRONIZED_ACCESSORS_CHECKED2
+#undef SYNCHRONIZED_ACCESSORS_CHECKED
+#undef SYNCHRONIZED_ACCESSORS
#undef WEAK_ACCESSORS_CHECKED2
+#undef WEAK_ACCESSORS_CHECKED
#undef WEAK_ACCESSORS
#undef SMI_ACCESSORS_CHECKED
#undef SMI_ACCESSORS
@@ -31,26 +47,32 @@
#undef READ_WEAK_FIELD
#undef ACQUIRE_READ_FIELD
#undef RELAXED_READ_FIELD
+#undef RELAXED_READ_WEAK_FIELD
#undef WRITE_FIELD
#undef WRITE_WEAK_FIELD
#undef RELEASE_WRITE_FIELD
#undef RELAXED_WRITE_FIELD
#undef WRITE_BARRIER
+#undef WEAK_WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
#undef CONDITIONAL_WEAK_WRITE_BARRIER
#undef READ_DOUBLE_FIELD
#undef WRITE_DOUBLE_FIELD
#undef READ_INT_FIELD
#undef WRITE_INT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
+#undef ACQUIRE_READ_INTPTR_FIELD
#undef RELAXED_READ_INTPTR_FIELD
+#undef READ_INTPTR_FIELD
+#undef RELEASE_WRITE_INTPTR_FIELD
#undef RELAXED_WRITE_INTPTR_FIELD
+#undef WRITE_INTPTR_FIELD
#undef READ_UINTPTR_FIELD
#undef WRITE_UINTPTR_FIELD
#undef READ_UINT8_FIELD
#undef WRITE_UINT8_FIELD
+#undef RELAXED_WRITE_INT8_FIELD
#undef READ_INT8_FIELD
+#undef RELAXED_READ_INT8_FIELD
#undef WRITE_INT8_FIELD
#undef READ_UINT16_FIELD
#undef WRITE_UINT16_FIELD
@@ -59,7 +81,9 @@
#undef READ_UINT32_FIELD
#undef WRITE_UINT32_FIELD
#undef READ_INT32_FIELD
+#undef RELAXED_READ_INT32_FIELD
#undef WRITE_INT32_FIELD
+#undef RELAXED_WRITE_INT32_FIELD
#undef READ_FLOAT_FIELD
#undef WRITE_FLOAT_FIELD
#undef READ_UINT64_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index c97f59f9c0..a125251d63 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -16,6 +16,34 @@
#include <src/v8memory.h>
+// Since this changes visibility, it should always be last in a class
+// definition.
+#define OBJECT_CONSTRUCTORS(Type, ...) \
+ public: \
+ constexpr Type() : __VA_ARGS__() {} \
+ Type* operator->() { return this; } \
+ const Type* operator->() const { return this; } \
+ \
+ protected: \
+ explicit inline Type(Address ptr);
+
+#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
+ inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
+
+#define NEVER_READ_ONLY_SPACE \
+ inline Heap* GetHeap() const; \
+ inline Isolate* GetIsolate() const;
+
+// TODO(leszeks): Add checks in the factory that we never allocate these
+// objects in RO space.
+#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
+ Heap* Type::GetHeap() const { \
+ return NeverReadOnlySpaceObject::GetHeap(*this); \
+ } \
+ Isolate* Type::GetIsolate() const { \
+ return NeverReadOnlySpaceObject::GetIsolate(*this); \
+ }
+
#define DECL_PRIMITIVE_ACCESSORS(name, type) \
inline type name() const; \
inline void set_##name(type value);
@@ -30,29 +58,28 @@
inline uint16_t name() const; \
inline void set_##name(int value);
+#define DECL_INT16_ACCESSORS(name) \
+ inline int16_t name() const; \
+ inline void set_##name(int16_t value);
+
#define DECL_UINT8_ACCESSORS(name) \
inline uint8_t name() const; \
inline void set_##name(int value);
-#define DECL_ACCESSORS(name, type) \
- inline type* name() const; \
- inline void set_##name(type* value, \
+#define DECL_ACCESSORS(name, type) \
+ inline type name() const; \
+ inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-#define DECL_CAST(type) \
- V8_INLINE static type* cast(Object* object); \
- V8_INLINE static const type* cast(const Object* object);
-
-#define CAST_ACCESSOR(type) \
- type* type::cast(Object* object) { \
- SLOW_DCHECK(object->Is##type()); \
- return reinterpret_cast<type*>(object); \
- } \
- const type* type::cast(const Object* object) { \
- SLOW_DCHECK(object->Is##type()); \
- return reinterpret_cast<const type*>(object); \
+#define DECL_CAST(Type) \
+ V8_INLINE static Type cast(Object object); \
+ V8_INLINE static Type unchecked_cast(Object object) { \
+ return bit_cast<Type>(object); \
}
+#define CAST_ACCESSOR(Type) \
+ Type Type::cast(Object object) { return Type(object.ptr()); }
+
#define INT_ACCESSORS(holder, name, offset) \
int holder::name() const { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
@@ -63,6 +90,14 @@
WRITE_INT32_FIELD(this, offset, value); \
}
+#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name() const { \
+ return RELAXED_READ_INT32_FIELD(this, offset); \
+ } \
+ void holder::set_##name(int32_t value) { \
+ RELAXED_WRITE_INT32_FIELD(this, offset, value); \
+ }
+
#define UINT16_ACCESSORS(holder, name, offset) \
uint16_t holder::name() const { return READ_UINT16_FIELD(this, offset); } \
void holder::set_##name(int value) { \
@@ -81,33 +116,54 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
- type* holder::name() const { \
- type* value = type::cast(READ_FIELD(this, offset)); \
+ type holder::name() const { \
+ type value = type::cast(READ_FIELD(*this, offset)); \
DCHECK(get_condition); \
return value; \
} \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ void holder::set_##name(type value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
+ WRITE_FIELD(*this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
+
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
- set_condition) \
- MaybeObject* holder::name() const { \
- MaybeObject* value = READ_WEAK_FIELD(this, offset); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_WEAK_FIELD(this, offset, value); \
- CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \
+#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ type holder::name() const { \
+ type value = type::cast(ACQUIRE_READ_FIELD(*this, offset)); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ RELEASE_WRITE_FIELD(*this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+ }
+
+#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
+ condition)
+
+#define SYNCHRONIZED_ACCESSORS(holder, name, type, offset) \
+ SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, true)
+
+#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
+ set_condition) \
+ MaybeObject holder::name() const { \
+ MaybeObject value = READ_WEAK_FIELD(*this, offset); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_WEAK_FIELD(*this, offset, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
@@ -120,33 +176,33 @@
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
int holder::name() const { \
DCHECK(condition); \
- Object* value = READ_FIELD(this, offset); \
+ Object value = READ_FIELD(*this, offset); \
return Smi::ToInt(value); \
} \
void holder::set_##name(int value) { \
DCHECK(condition); \
- WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
}
#define SMI_ACCESSORS(holder, name, offset) \
SMI_ACCESSORS_CHECKED(holder, name, offset, true)
-#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() const { \
- Object* value = ACQUIRE_READ_FIELD(this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::synchronized_set_##name(int value) { \
- RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::synchronized_##name() const { \
+ Object value = ACQUIRE_READ_FIELD(*this, offset); \
+ return Smi::ToInt(value); \
+ } \
+ void holder::synchronized_set_##name(int value) { \
+ RELEASE_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
}
-#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
- int holder::relaxed_read_##name() const { \
- Object* value = RELAXED_READ_FIELD(this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::relaxed_write_##name(int value) { \
- RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::relaxed_read_##name() const { \
+ Object value = RELAXED_READ_FIELD(*this, offset); \
+ return Smi::ToInt(value); \
+ } \
+ void holder::relaxed_write_##name(int value) { \
+ RELAXED_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
@@ -176,92 +232,84 @@
return InstanceTypeChecker::Is##type(map()->instance_type()); \
}
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<Address>(p) + offset - kHeapObjectTag)
+#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
+ int16_t holder::name() const { \
+ return RELAXED_READ_INT16_FIELD(*this, offset); \
+ } \
+ void holder::set_##name(int16_t value) { \
+ RELAXED_WRITE_INT16_FIELD(*this, offset, value); \
+ }
-#define READ_FIELD(p, offset) \
- (*reinterpret_cast<Object* const*>(FIELD_ADDR(p, offset)))
+#define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag)
-#define READ_WEAK_FIELD(p, offset) \
- (*reinterpret_cast<MaybeObject* const*>(FIELD_ADDR(p, offset)))
+#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
-#define ACQUIRE_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>(base::Acquire_Load( \
- reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
+#define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
-#define RELAXED_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>(base::Relaxed_Load( \
- reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
+#define ACQUIRE_READ_FIELD(p, offset) \
+ ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load()
-#define RELAXED_READ_WEAK_FIELD(p, offset) \
- reinterpret_cast<MaybeObject*>(base::Relaxed_Load( \
- reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
+#define RELAXED_READ_FIELD(p, offset) \
+ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
+
+#define RELAXED_READ_WEAK_FIELD(p, offset) \
+ MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
#ifdef V8_CONCURRENT_MARKING
-#define WRITE_FIELD(p, offset, value) \
- base::Relaxed_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
-#define WRITE_WEAK_FIELD(p, offset, value) \
- base::Relaxed_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
+#define WRITE_FIELD(p, offset, value) \
+ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
+#define WRITE_WEAK_FIELD(p, offset, value) \
+ MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
#else
#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+ ObjectSlot(FIELD_ADDR(p, offset)).store(value)
#define WRITE_WEAK_FIELD(p, offset, value) \
- (*reinterpret_cast<MaybeObject**>(FIELD_ADDR(p, offset)) = value)
+ MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
#endif
-#define RELEASE_WRITE_FIELD(p, offset, value) \
- base::Release_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
+#define RELEASE_WRITE_FIELD(p, offset, value) \
+ ObjectSlot(FIELD_ADDR(p, offset)).Release_Store(value)
-#define RELAXED_WRITE_FIELD(p, offset, value) \
- base::Relaxed_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
+#define RELAXED_WRITE_FIELD(p, offset, value) \
+ ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
- MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
- GenerationalBarrier(object, HeapObject::RawField(object, offset), value); \
- } while (false)
+#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
+ MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WEAK_WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
- MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
- value); \
- GenerationalBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
- value); \
+#define WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ MarkingBarrier(object, (object)->RawField(offset), value); \
+ GenerationalBarrier(object, (object)->RawField(offset), value); \
} while (false)
-#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
+#define WEAK_WRITE_BARRIER(object, offset, value) \
do { \
DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
- } \
- GenerationalBarrier(object, HeapObject::RawField(object, offset), \
- value); \
- } \
+ MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
+ GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
} while (false)
-#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
- do { \
- DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
- value); \
- } \
- GenerationalBarrier( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- } \
+#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object)->RawField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object)->RawField(offset), value); \
+ } \
+ } while (false)
+
+#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
+ } \
} while (false)
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
@@ -275,6 +323,14 @@
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+#define ACQUIRE_READ_INTPTR_FIELD(p, offset) \
+ static_cast<intptr_t>(base::Acquire_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
+
+#define ACQUIRE_READ_INT32_FIELD(p, offset) \
+ static_cast<int32_t>(base::Acquire_Load( \
+ reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
+
#define RELAXED_READ_INTPTR_FIELD(p, offset) \
static_cast<intptr_t>(base::Relaxed_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
@@ -282,6 +338,11 @@
#define READ_INTPTR_FIELD(p, offset) \
(*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
+#define RELEASE_WRITE_INTPTR_FIELD(p, offset, value) \
+ base::Release_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::AtomicWord>(value));
+
#define RELAXED_WRITE_INTPTR_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
@@ -328,18 +389,50 @@
#define WRITE_INT16_FIELD(p, offset, value) \
(*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
+#define RELAXED_READ_INT16_FIELD(p, offset) \
+ static_cast<int16_t>(base::Relaxed_Load( \
+ reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
+
+#define RELAXED_WRITE_INT16_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic16>(value));
+
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
+#define RELAXED_READ_UINT32_FIELD(p, offset) \
+ static_cast<uint32_t>(base::Relaxed_Load( \
+ reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
+
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic32>(value));
+
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
+#define RELAXED_READ_INT32_FIELD(p, offset) \
+ static_cast<int32_t>(base::Relaxed_Load( \
+ reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
+
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
+ base::Release_Store( \
+ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic32>(value))
+
+#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic32>(value));
+
#define READ_FLOAT_FIELD(p, offset) \
(*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
@@ -378,16 +471,16 @@
#define DECL_VERIFIER(Name)
#endif
-#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
- type* DeoptimizationData::name() { return type::cast(get(k##name##Index)); } \
- void DeoptimizationData::Set##name(type* value) { \
- set(k##name##Index, value); \
- }
+#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
+ type DeoptimizationData::name() const { \
+ return type::cast(get(k##name##Index)); \
+ } \
+ void DeoptimizationData::Set##name(type value) { set(k##name##Index, value); }
#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
- type* DeoptimizationData::name(int i) { \
+ type DeoptimizationData::name(int i) const { \
return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
} \
- void DeoptimizationData::Set##name(int i, type* value) { \
+ void DeoptimizationData::Set##name(int i, type value) { \
set(IndexForEntry(i) + k##name##Offset, value); \
}
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
new file mode 100644
index 0000000000..554686a4b5
--- /dev/null
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -0,0 +1,54 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ODDBALL_INL_H_
+#define V8_OBJECTS_ODDBALL_INL_H_
+
+#include "src/objects/oddball.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(Oddball, HeapObject)
+
+CAST_ACCESSOR(Oddball)
+
+double Oddball::to_number_raw() const {
+ return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
+}
+
+void Oddball::set_to_number_raw(double value) {
+ WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
+}
+
+void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
+ WRITE_UINT64_FIELD(this, kToNumberRawOffset, bits);
+}
+
+ACCESSORS(Oddball, to_string, String, kToStringOffset)
+ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
+
+byte Oddball::kind() const { return Smi::ToInt(READ_FIELD(this, kKindOffset)); }
+
+void Oddball::set_kind(byte value) {
+ WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
+}
+
+// static
+Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
+ return handle(input->to_number(), isolate);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ODDBALL_INL_H_
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
new file mode 100644
index 0000000000..8f6adf9743
--- /dev/null
+++ b/deps/v8/src/objects/oddball.h
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ODDBALL_H_
+#define V8_OBJECTS_ODDBALL_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The Oddball describes objects null, undefined, true, and false.
+class Oddball : public HeapObject {
+ public:
+ // [to_number_raw]: Cached raw to_number computed at startup.
+ inline double to_number_raw() const;
+ inline void set_to_number_raw(double value);
+ inline void set_to_number_raw_as_bits(uint64_t bits);
+
+ // [to_string]: Cached to_string computed at startup.
+ DECL_ACCESSORS(to_string, String)
+
+ // [to_number]: Cached to_number computed at startup.
+ DECL_ACCESSORS(to_number, Object)
+
+ // [typeof]: Cached type_of computed at startup.
+ DECL_ACCESSORS(type_of, String)
+
+ inline byte kind() const;
+ inline void set_kind(byte kind);
+
+ // ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
+ V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
+ Isolate* isolate, Handle<Oddball> input);
+
+ DECL_CAST(Oddball)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(Oddball)
+
+ // Initialize the fields.
+ static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
+ const char* to_string, Handle<Object> to_number,
+ const char* type_of, byte kind);
+
+ // Layout description.
+#define ODDBALL_FIELDS(V) \
+ V(kToNumberRawOffset, kDoubleSize) \
+ /* Tagged fields. */ \
+ V(kTaggedFieldsStartOffset, 0) \
+ V(kToStringOffset, kTaggedSize) \
+ V(kToNumberOffset, kTaggedSize) \
+ V(kTypeOfOffset, kTaggedSize) \
+ V(kTaggedFieldsEndOffset, 0) \
+ /* Raw data but still encoded as Smi. */ \
+ V(kKindOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ODDBALL_FIELDS)
+#undef ODDBALL_FIELDS
+
+ static const byte kFalse = 0;
+ static const byte kTrue = 1;
+ static const byte kNotBooleanMask = static_cast<byte>(~1);
+ static const byte kTheHole = 2;
+ static const byte kNull = 3;
+ static const byte kArgumentsMarker = 4;
+ static const byte kUndefined = 5;
+ static const byte kUninitialized = 6;
+ static const byte kOther = 7;
+ static const byte kException = 8;
+ static const byte kOptimizedOut = 9;
+ static const byte kStaleRegister = 10;
+ static const byte kSelfReferenceMarker = 10;
+
+ typedef FixedBodyDescriptor<kTaggedFieldsStartOffset, kTaggedFieldsEndOffset,
+ kSize>
+ BodyDescriptor;
+
+ STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
+ STATIC_ASSERT(kNull == Internals::kNullOddballKind);
+ STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
+
+ OBJECT_CONSTRUCTORS(Oddball, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ODDBALL_H_
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 76343c21ed..277c033994 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -10,9 +10,49 @@
#include "src/heap/heap.h"
#include "src/objects/fixed-array-inl.h"
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
namespace v8 {
namespace internal {
+CAST_ACCESSOR(OrderedNameDictionary)
+CAST_ACCESSOR(SmallOrderedNameDictionary)
+CAST_ACCESSOR(OrderedHashMap)
+CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(SmallOrderedHashMap)
+CAST_ACCESSOR(SmallOrderedHashSet)
+
+template <class Derived, int entrysize>
+OrderedHashTable<Derived, entrysize>::OrderedHashTable(Address ptr)
+ : FixedArray(ptr) {}
+
+OrderedHashSet::OrderedHashSet(Address ptr)
+ : OrderedHashTable<OrderedHashSet, 1>(ptr) {
+ SLOW_DCHECK(IsOrderedHashSet());
+}
+
+OrderedHashMap::OrderedHashMap(Address ptr)
+ : OrderedHashTable<OrderedHashMap, 2>(ptr) {
+ SLOW_DCHECK(IsOrderedHashMap());
+}
+
+OrderedNameDictionary::OrderedNameDictionary(Address ptr)
+ : OrderedHashTable<OrderedNameDictionary, 3>(ptr) {
+ SLOW_DCHECK(IsOrderedNameDictionary());
+}
+
+template <class Derived>
+SmallOrderedHashTable<Derived>::SmallOrderedHashTable(Address ptr)
+ : HeapObject(ptr) {}
+
+OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashSet,
+ SmallOrderedHashTable<SmallOrderedHashSet>)
+OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashMap,
+ SmallOrderedHashTable<SmallOrderedHashMap>)
+OBJECT_CONSTRUCTORS_IMPL(SmallOrderedNameDictionary,
+ SmallOrderedHashTable<SmallOrderedNameDictionary>)
+
RootIndex OrderedHashSet::GetMapRootIndex() {
return RootIndex::kOrderedHashSetMap;
}
@@ -21,6 +61,14 @@ RootIndex OrderedHashMap::GetMapRootIndex() {
return RootIndex::kOrderedHashMapMap;
}
+RootIndex OrderedNameDictionary::GetMapRootIndex() {
+ return RootIndex::kOrderedNameDictionaryMap;
+}
+
+RootIndex SmallOrderedNameDictionary::GetMapRootIndex() {
+ return RootIndex::kSmallOrderedNameDictionaryMap;
+}
+
RootIndex SmallOrderedHashMap::GetMapRootIndex() {
return RootIndex::kSmallOrderedHashMapMap;
}
@@ -29,11 +77,65 @@ RootIndex SmallOrderedHashSet::GetMapRootIndex() {
return RootIndex::kSmallOrderedHashSetMap;
}
-inline Object* OrderedHashMap::ValueAt(int entry) {
- DCHECK_LT(entry, this->UsedCapacity());
+inline Object OrderedHashMap::ValueAt(int entry) {
+ DCHECK_NE(entry, kNotFound);
+ DCHECK_LT(entry, UsedCapacity());
+ return get(EntryToIndex(entry) + kValueOffset);
+}
+
+inline Object OrderedNameDictionary::ValueAt(int entry) {
+ DCHECK_NE(entry, kNotFound);
+ DCHECK_LT(entry, UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
+// Set the value for entry.
+inline void OrderedNameDictionary::ValueAtPut(int entry, Object value) {
+ DCHECK_NE(entry, kNotFound);
+ DCHECK_LT(entry, UsedCapacity());
+ this->set(EntryToIndex(entry) + kValueOffset, value);
+}
+
+// Returns the property details for the property at entry.
+inline PropertyDetails OrderedNameDictionary::DetailsAt(int entry) {
+ DCHECK_NE(entry, kNotFound);
+ DCHECK_LT(entry, this->UsedCapacity());
+ // TODO(gsathya): Optimize the cast away.
+ return PropertyDetails(
+ Smi::cast(get(EntryToIndex(entry) + kPropertyDetailsOffset)));
+}
+
+inline void OrderedNameDictionary::DetailsAtPut(int entry,
+ PropertyDetails value) {
+ DCHECK_NE(entry, kNotFound);
+ DCHECK_LT(entry, this->UsedCapacity());
+ // TODO(gsathya): Optimize the cast away.
+ this->set(EntryToIndex(entry) + kPropertyDetailsOffset, value.AsSmi());
+}
+
+inline Object SmallOrderedNameDictionary::ValueAt(int entry) {
+ return this->GetDataEntry(entry, kValueIndex);
+}
+
+// Set the value for entry.
+inline void SmallOrderedNameDictionary::ValueAtPut(int entry, Object value) {
+ this->SetDataEntry(entry, kValueIndex, value);
+}
+
+// Returns the property details for the property at entry.
+inline PropertyDetails SmallOrderedNameDictionary::DetailsAt(int entry) {
+ // TODO(gsathya): Optimize the cast away. And store this in the data table.
+ return PropertyDetails(
+ Smi::cast(this->GetDataEntry(entry, kPropertyDetailsIndex)));
+}
+
+// Set the details for entry.
+inline void SmallOrderedNameDictionary::DetailsAtPut(int entry,
+ PropertyDetails value) {
+ // TODO(gsathya): Optimize the cast away. And store this in the data table.
+ this->SetDataEntry(entry, kPropertyDetailsIndex, value.AsSmi());
+}
+
inline bool OrderedHashSet::Is(Handle<HeapObject> table) {
return table->IsOrderedHashSet();
}
@@ -49,7 +151,51 @@ inline bool SmallOrderedHashSet::Is(Handle<HeapObject> table) {
inline bool SmallOrderedHashMap::Is(Handle<HeapObject> table) {
return table->IsSmallOrderedHashMap();
}
+
+template <class Derived>
+void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
+ Object value) {
+ DCHECK_NE(kNotFound, entry);
+ int entry_offset = GetDataEntryOffset(entry, relative_index);
+ RELAXED_WRITE_FIELD(*this, entry_offset, value);
+ WRITE_BARRIER(*this, entry_offset, value);
+}
+
+template <class Derived, class TableType>
+Object OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
+ TableType table = TableType::cast(this->table());
+ int index = Smi::ToInt(this->index());
+ Object key = table->KeyAt(index);
+ DCHECK(!key->IsTheHole());
+ return key;
+}
+
+inline void SmallOrderedNameDictionary::SetHash(int hash) {
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ WRITE_INT_FIELD(*this, PrefixOffset(), hash);
+}
+
+inline int SmallOrderedNameDictionary::Hash() {
+ int hash = READ_INT_FIELD(*this, PrefixOffset());
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ return hash;
+}
+
+inline void OrderedNameDictionary::SetHash(int hash) {
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ this->set(PrefixIndex(), Smi::FromInt(hash));
+}
+
+inline int OrderedNameDictionary::Hash() {
+ Object hash_obj = this->get(PrefixIndex());
+ int hash = Smi::ToInt(hash_obj);
+ DCHECK(PropertyArray::HashField::is_valid(hash));
+ return hash;
+}
+
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_OBJECTS_ORDERED_HASH_TABLE_INL_H_
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 171e4dfae3..b4120643e3 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -21,16 +21,16 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
// to something other than 2, capacity should be stored as another
// field of this object.
capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
- if (capacity > kMaxCapacity) {
+ if (capacity > MaxCapacity()) {
isolate->heap()->FatalProcessOutOfMemory("invalid table size");
}
int num_buckets = capacity / kLoadFactor;
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
Derived::GetMapRootIndex(),
- kHashTableStartIndex + num_buckets + (capacity * kEntrySize), pretenure);
+ HashTableStartIndex() + num_buckets + (capacity * kEntrySize), pretenure);
Handle<Derived> table = Handle<Derived>::cast(backing_store);
for (int i = 0; i < num_buckets; ++i) {
- table->set(kHashTableStartIndex + i, Smi::FromInt(kNotFound));
+ table->set(HashTableStartIndex() + i, Smi::FromInt(kNotFound));
}
table->SetNumberOfBuckets(num_buckets);
table->SetNumberOfElements(0);
@@ -50,8 +50,8 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
// Don't need to grow if we can simply clear out deleted entries instead.
// Note that we can't compact in place, though, so we always allocate
// a new table.
- return Rehash(isolate, table,
- (nod < (capacity >> 1)) ? capacity << 1 : capacity);
+ return Derived::Rehash(isolate, table,
+ (nod < (capacity >> 1)) ? capacity << 1 : capacity);
}
template <class Derived, int entrysize>
@@ -62,7 +62,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Shrink(
int nof = table->NumberOfElements();
int capacity = table->Capacity();
if (nof >= (capacity >> 2)) return table;
- return Rehash(isolate, table, capacity / 2);
+ return Derived::Rehash(isolate, table, capacity / 2);
}
template <class Derived, int entrysize>
@@ -81,14 +81,41 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
- Derived* table, Object* key) {
- DCHECK((entrysize == 1 && table->IsOrderedHashSet()) ||
- (entrysize == 2 && table->IsOrderedHashMap()));
+ Derived table, Object key) {
+ DCHECK_IMPLIES(entrysize == 1, table->IsOrderedHashSet());
+ DCHECK_IMPLIES(entrysize == 2, table->IsOrderedHashMap());
DisallowHeapAllocation no_gc;
int entry = table->FindEntry(isolate, key);
return entry != kNotFound;
}
+template <class Derived, int entrysize>
+int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
+ Object key) {
+ int entry;
+ // This special cases for Smi, so that we avoid the HandleScope
+ // creation below.
+ if (key->IsSmi()) {
+ uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
+ entry = HashToEntry(hash & Smi::kMaxValue);
+ } else {
+ HandleScope scope(isolate);
+ Object hash = key->GetHash();
+ // If the object does not have an identity hash, it was never used as a key
+ if (hash->IsUndefined(isolate)) return kNotFound;
+ entry = HashToEntry(Smi::ToInt(hash));
+ }
+
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object candidate_key = KeyAt(entry);
+ if (candidate_key->SameValueZero(key)) break;
+ entry = NextChainEntry(entry);
+ }
+
+ return entry;
+}
+
Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
Handle<OrderedHashSet> table,
Handle<Object> key) {
@@ -96,7 +123,7 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
while (entry != kNotFound) {
- Object* candidate_key = table->KeyAt(entry);
+ Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
if (candidate_key->SameValueZero(*key)) return table;
entry = table->NextChainEntry(entry);
@@ -113,7 +140,7 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
table->set(new_index, *key);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
// and point the bucket to the new entry.
- table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
table->SetNumberOfElements(nof + 1);
return table;
}
@@ -129,8 +156,8 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
int const kMaxStringTableEntries =
isolate->heap()->MaxNumberToStringCacheSize();
for (int i = 0; i < length; i++) {
- int index = kHashTableStartIndex + nof_buckets + (i * kEntrySize);
- Object* key = table->get(index);
+ int index = HashTableStartIndex() + nof_buckets + (i * kEntrySize);
+ Object key = table->get(index);
if (convert == GetKeysConversion::kConvertToString) {
uint32_t index_value;
if (key->ToArrayIndex(&index_value)) {
@@ -146,11 +173,11 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
return FixedArray::ShrinkOrEmpty(isolate, result, length);
}
-HeapObject* OrderedHashSet::GetEmpty(ReadOnlyRoots ro_roots) {
+HeapObject OrderedHashSet::GetEmpty(ReadOnlyRoots ro_roots) {
return ro_roots.empty_ordered_hash_set();
}
-HeapObject* OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
+HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
return ro_roots.empty_ordered_hash_map();
}
@@ -159,7 +186,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
Isolate* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
- Handle<Derived> new_table = Allocate(
+ Handle<Derived> new_table = Derived::Allocate(
isolate, new_capacity, Heap::InNewSpace(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
@@ -169,20 +196,20 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
- Object* key = table->KeyAt(old_entry);
+ Object key = table->KeyAt(old_entry);
if (key->IsTheHole(isolate)) {
table->SetRemovedIndexAt(removed_holes_index++, old_entry);
continue;
}
- Object* hash = key->GetHash();
+ Object hash = key->GetHash();
int bucket = Smi::ToInt(hash) & (new_buckets - 1);
- Object* chain_entry = new_table->get(kHashTableStartIndex + bucket);
- new_table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ Object chain_entry = new_table->get(HashTableStartIndex() + bucket);
+ new_table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
int new_index = new_table->EntryToIndex(new_entry);
int old_index = table->EntryToIndex(old_entry);
for (int i = 0; i < entrysize; ++i) {
- Object* value = table->get(old_index + i);
+ Object value = table->get(old_index + i);
new_table->set(new_index + i, value);
}
new_table->set(new_index + kChainOffset, chain_entry);
@@ -197,9 +224,32 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
return new_table;
}
+Handle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
+ Handle<OrderedHashSet> table,
+ int new_capacity) {
+ return OrderedHashTable<OrderedHashSet, 1>::Rehash(isolate, table,
+ new_capacity);
+}
+
+Handle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
+ Handle<OrderedHashMap> table,
+ int new_capacity) {
+ return OrderedHashTable<OrderedHashMap, 2>::Rehash(isolate, table,
+ new_capacity);
+}
+
+Handle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
+ Handle<OrderedNameDictionary> new_table =
+ OrderedHashTable<OrderedNameDictionary, 3>::Rehash(isolate, table,
+ new_capacity);
+ new_table->SetHash(table->Hash());
+ return new_table;
+}
+
template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
- Derived* table, Object* key) {
+ Derived table, Object key) {
DisallowHeapAllocation no_gc;
int entry = table->FindEntry(isolate, key);
if (entry == kNotFound) return false;
@@ -208,7 +258,7 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
int nod = table->NumberOfDeletedElements();
int index = table->EntryToIndex(entry);
- Object* hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < entrysize; ++i) {
table->set(index + i, hole);
}
@@ -219,15 +269,15 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
return true;
}
-Object* OrderedHashMap::GetHash(Isolate* isolate, Object* key) {
+Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
-
- Object* hash = key->GetHash();
+ Object key(raw_key);
+ Object hash = key->GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return Smi::FromInt(-1);
+ if (hash->IsUndefined(isolate)) return Smi::FromInt(-1).ptr();
DCHECK(hash->IsSmi());
DCHECK_GE(Smi::cast(hash)->value(), 0);
- return hash;
+ return hash.ptr();
}
Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
@@ -239,9 +289,9 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
// Walk the chain of the bucket and try finding the key.
{
DisallowHeapAllocation no_gc;
- Object* raw_key = *key;
+ Object raw_key = *key;
while (entry != kNotFound) {
- Object* candidate_key = table->KeyAt(entry);
+ Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
if (candidate_key->SameValueZero(raw_key)) return table;
entry = table->NextChainEntry(entry);
@@ -260,13 +310,115 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
table->set(new_index + kValueOffset, *value);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
// and point the bucket to the new entry.
- table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
+ table->SetNumberOfElements(nof + 1);
+ return table;
+}
+
+template <>
+int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(Isolate* isolate,
+ Object key) {
+ DisallowHeapAllocation no_gc;
+
+ DCHECK(key->IsUniqueName());
+ Name raw_key = Name::cast(key);
+
+ int entry = HashToEntry(raw_key->Hash());
+ while (entry != kNotFound) {
+ Object candidate_key = KeyAt(entry);
+ DCHECK(candidate_key->IsTheHole() ||
+ Name::cast(candidate_key)->IsUniqueName());
+ if (candidate_key == raw_key) return entry;
+
+ // TODO(gsathya): This is loading the bucket count from the hash
+ // table for every iteration. This should be peeled out of the
+ // loop.
+ entry = NextChainEntry(entry);
+ }
+
+ return kNotFound;
+}
+
+Handle<OrderedNameDictionary> OrderedNameDictionary::Add(
+ Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
+ Handle<Object> value, PropertyDetails details) {
+ DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+
+ table = OrderedNameDictionary::EnsureGrowable(isolate, table);
+ // Read the existing bucket values.
+ int hash = key->Hash();
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToEntry(hash);
+ int nof = table->NumberOfElements();
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+ int new_index = table->EntryToIndex(new_entry);
+ table->set(new_index, *key);
+ table->set(new_index + kValueOffset, *value);
+
+ // TODO(gsathya): Optimize how PropertyDetails are stored in this
+ // dictionary to save memory (by reusing padding?) and performance
+ // (by not doing the Smi conversion).
+ table->set(new_index + kPropertyDetailsOffset, details.AsSmi());
+
+ table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
+ // and point the bucket to the new entry.
+ table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
table->SetNumberOfElements(nof + 1);
return table;
}
-template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Allocate(
- Isolate* isolate, int capacity, PretenureFlag pretenure);
+void OrderedNameDictionary::SetEntry(Isolate* isolate, int entry, Object key,
+ Object value, PropertyDetails details) {
+ DisallowHeapAllocation gc;
+ DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ DisallowHeapAllocation no_gc;
+ int index = EntryToIndex(entry);
+ this->set(index, key);
+ this->set(index + kValueOffset, value);
+
+ // TODO(gsathya): Optimize how PropertyDetails are stored in this
+ // dictionary to save memory (by reusing padding?) and performance
+ // (by not doing the Smi conversion).
+ this->set(index + kPropertyDetailsOffset, details.AsSmi());
+}
+
+Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int entry) {
+ DCHECK_NE(entry, kNotFound);
+
+ Object hole = ReadOnlyRoots(isolate).the_hole_value();
+ PropertyDetails details = PropertyDetails::Empty();
+ table->SetEntry(isolate, entry, hole, hole, details);
+
+ int nof = table->NumberOfElements();
+ table->SetNumberOfElements(nof - 1);
+ int nod = table->NumberOfDeletedElements();
+ table->SetNumberOfDeletedElements(nod + 1);
+
+ return Shrink(isolate, table);
+}
+
+Handle<OrderedHashSet> OrderedHashSet::Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure) {
+ return OrderedHashTable<OrderedHashSet, 1>::Allocate(isolate, capacity,
+ pretenure);
+}
+
+Handle<OrderedHashMap> OrderedHashMap::Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure) {
+ return OrderedHashTable<OrderedHashMap, 2>::Allocate(isolate, capacity,
+ pretenure);
+}
+
+Handle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure) {
+ Handle<OrderedNameDictionary> table =
+ OrderedHashTable<OrderedNameDictionary, 3>::Allocate(isolate, capacity,
+ pretenure);
+ table->SetHash(PropertyArray::kNoHashSentinel);
+ return table;
+}
template Handle<OrderedHashSet>
OrderedHashTable<OrderedHashSet, 1>::EnsureGrowable(
@@ -279,15 +431,15 @@ template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Clear(
Isolate* isolate, Handle<OrderedHashSet> table);
template bool OrderedHashTable<OrderedHashSet, 1>::HasKey(Isolate* isolate,
- OrderedHashSet* table,
- Object* key);
+ OrderedHashSet table,
+ Object key);
template bool OrderedHashTable<OrderedHashSet, 1>::Delete(Isolate* isolate,
- OrderedHashSet* table,
- Object* key);
+ OrderedHashSet table,
+ Object key);
-template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Allocate(
- Isolate* isolate, int capacity, PretenureFlag pretenure);
+template int OrderedHashTable<OrderedHashSet, 1>::FindEntry(Isolate* isolate,
+ Object key);
template Handle<OrderedHashMap>
OrderedHashTable<OrderedHashMap, 2>::EnsureGrowable(
@@ -300,12 +452,23 @@ template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Clear(
Isolate* isolate, Handle<OrderedHashMap> table);
template bool OrderedHashTable<OrderedHashMap, 2>::HasKey(Isolate* isolate,
- OrderedHashMap* table,
- Object* key);
+ OrderedHashMap table,
+ Object key);
template bool OrderedHashTable<OrderedHashMap, 2>::Delete(Isolate* isolate,
- OrderedHashMap* table,
- Object* key);
+ OrderedHashMap table,
+ Object key);
+
+template int OrderedHashTable<OrderedHashMap, 2>::FindEntry(Isolate* isolate,
+ Object key);
+
+template Handle<OrderedNameDictionary>
+OrderedHashTable<OrderedNameDictionary, 3>::Shrink(
+ Isolate* isolate, Handle<OrderedNameDictionary> table);
+
+template Handle<OrderedNameDictionary>
+OrderedHashTable<OrderedNameDictionary, 3>::EnsureGrowable(
+ Isolate* isolate, Handle<OrderedNameDictionary> table);
template <>
Handle<SmallOrderedHashSet>
@@ -323,6 +486,13 @@ SmallOrderedHashTable<SmallOrderedHashMap>::Allocate(Isolate* isolate,
return isolate->factory()->NewSmallOrderedHashMap(capacity, pretenure);
}
+template <>
+Handle<SmallOrderedNameDictionary>
+SmallOrderedHashTable<SmallOrderedNameDictionary>::Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure) {
+ return isolate->factory()->NewSmallOrderedNameDictionary(capacity, pretenure);
+}
+
template <class Derived>
void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
int capacity) {
@@ -338,10 +508,10 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
num_buckets + num_chains);
- if (Heap::InNewSpace(this)) {
- MemsetPointer(RawField(this, kDataTableStartOffset),
- ReadOnlyRoots(isolate).the_hole_value(),
- capacity * Derived::kEntrySize);
+ if (Heap::InNewSpace(*this)) {
+ MemsetTagged(RawField(DataTableStartOffset()),
+ ReadOnlyRoots(isolate).the_hole_value(),
+ capacity * Derived::kEntrySize);
} else {
for (int i = 0; i < capacity; i++) {
for (int j = 0; j < Derived::kEntrySize; j++) {
@@ -433,6 +603,79 @@ MaybeHandle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
return table;
}
+template <>
+int SmallOrderedHashTable<SmallOrderedNameDictionary>::FindEntry(
+ Isolate* isolate, Object key) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(key->IsUniqueName());
+ Name raw_key = Name::cast(key);
+
+ int entry = HashToFirstEntry(raw_key->Hash());
+
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object candidate_key = KeyAt(entry);
+ if (candidate_key == key) return entry;
+ entry = GetNextEntry(entry);
+ }
+
+ return kNotFound;
+}
+
+MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+ DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+
+ if (table->UsedCapacity() >= table->Capacity()) {
+ MaybeHandle<SmallOrderedNameDictionary> new_table =
+ SmallOrderedNameDictionary::Grow(isolate, table);
+ if (!new_table.ToHandle(&table)) {
+ return MaybeHandle<SmallOrderedNameDictionary>();
+ }
+ }
+
+ int nof = table->NumberOfElements();
+
+ // Read the existing bucket values.
+ int hash = key->Hash();
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToFirstEntry(hash);
+
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+
+ table->SetDataEntry(new_entry, SmallOrderedNameDictionary::kValueIndex,
+ *value);
+ table->SetDataEntry(new_entry, SmallOrderedNameDictionary::kKeyIndex, *key);
+
+ // TODO(gsathya): PropertyDetails should be stored as part of the
+ // data table to save more memory.
+ table->SetDataEntry(new_entry,
+ SmallOrderedNameDictionary::kPropertyDetailsIndex,
+ details.AsSmi());
+ table->SetFirstEntry(bucket, new_entry);
+ table->SetNextEntry(new_entry, previous_entry);
+
+ // and update book keeping.
+ table->SetNumberOfElements(nof + 1);
+
+ return table;
+}
+
+void SmallOrderedNameDictionary::SetEntry(Isolate* isolate, int entry,
+ Object key, Object value,
+ PropertyDetails details) {
+ DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
+ SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
+
+ // TODO(gsathya): PropertyDetails should be stored as part of the
+ // data table to save more memory.
+ SetDataEntry(entry, SmallOrderedNameDictionary::kPropertyDetailsIndex,
+ details.AsSmi());
+}
+
template <class Derived>
bool SmallOrderedHashTable<Derived>::HasKey(Isolate* isolate,
Handle<Object> key) {
@@ -441,8 +684,8 @@ bool SmallOrderedHashTable<Derived>::HasKey(Isolate* isolate,
}
template <class Derived>
-bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived* table,
- Object* key) {
+bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
+ Object key) {
DisallowHeapAllocation no_gc;
int entry = table->FindEntry(isolate, key);
if (entry == kNotFound) return false;
@@ -450,7 +693,7 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived* table,
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
- Object* hole = ReadOnlyRoots(isolate).the_hole_value();
+ Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
table->SetDataEntry(entry, j, hole);
}
@@ -461,6 +704,23 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived* table,
return true;
}
+Handle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::DeleteEntry(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry) {
+ DCHECK_NE(entry, kNotFound);
+ {
+ DisallowHeapAllocation no_gc;
+ Object hole = ReadOnlyRoots(isolate).the_hole_value();
+ PropertyDetails details = PropertyDetails::Empty();
+ table->SetEntry(isolate, entry, hole, hole, details);
+
+ int nof = table->NumberOfElements();
+ table->SetNumberOfElements(nof - 1);
+ int nod = table->NumberOfDeletedElements();
+ table->SetNumberOfDeletedElements(nod + 1);
+ }
+ return Shrink(isolate, table);
+}
+
template <class Derived>
Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
Handle<Derived> table,
@@ -476,7 +736,7 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
{
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
- Object* key = table->KeyAt(old_entry);
+ Object key = table->KeyAt(old_entry);
if (key->IsTheHole(isolate)) continue;
int hash = Smi::ToInt(key->GetHash());
@@ -487,7 +747,7 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
new_table->SetNextEntry(new_entry, chain);
for (int i = 0; i < Derived::kEntrySize; ++i) {
- Object* value = table->GetDataEntry(old_entry, i);
+ Object value = table->GetDataEntry(old_entry, i);
new_table->SetDataEntry(new_entry, i, value);
}
@@ -499,6 +759,37 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
return new_table;
}
+Handle<SmallOrderedHashSet> SmallOrderedHashSet::Rehash(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table, int new_capacity) {
+ return SmallOrderedHashTable<SmallOrderedHashSet>::Rehash(isolate, table,
+ new_capacity);
+}
+
+Handle<SmallOrderedHashMap> SmallOrderedHashMap::Rehash(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table, int new_capacity) {
+ return SmallOrderedHashTable<SmallOrderedHashMap>::Rehash(isolate, table,
+ new_capacity);
+}
+
+Handle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Rehash(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ int new_capacity) {
+ Handle<SmallOrderedNameDictionary> new_table =
+ SmallOrderedHashTable<SmallOrderedNameDictionary>::Rehash(isolate, table,
+ new_capacity);
+ new_table->SetHash(table->Hash());
+ return new_table;
+}
+
+template <class Derived>
+Handle<Derived> SmallOrderedHashTable<Derived>::Shrink(Isolate* isolate,
+ Handle<Derived> table) {
+ int nof = table->NumberOfElements();
+ int capacity = table->Capacity();
+ if (nof >= (capacity >> 2)) return table;
+ return Derived::Rehash(isolate, table, capacity / 2);
+}
+
template <class Derived>
MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
Isolate* isolate, Handle<Derived> table) {
@@ -523,7 +814,24 @@ MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
}
}
- return Rehash(isolate, table, new_capacity);
+ return Derived::Rehash(isolate, table, new_capacity);
+}
+
+template <class Derived>
+int SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate, Object key) {
+ DisallowHeapAllocation no_gc;
+ Object hash = key->GetHash();
+
+ if (hash->IsUndefined(isolate)) return kNotFound;
+ int entry = HashToFirstEntry(Smi::ToInt(hash));
+
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object candidate_key = KeyAt(entry);
+ if (candidate_key->SameValueZero(key)) return entry;
+ entry = GetNextEntry(entry);
+ }
+ return kNotFound;
}
template bool SmallOrderedHashTable<SmallOrderedHashSet>::HasKey(
@@ -531,6 +839,9 @@ template bool SmallOrderedHashTable<SmallOrderedHashSet>::HasKey(
template Handle<SmallOrderedHashSet>
SmallOrderedHashTable<SmallOrderedHashSet>::Rehash(
Isolate* isolate, Handle<SmallOrderedHashSet> table, int new_capacity);
+template Handle<SmallOrderedHashSet>
+SmallOrderedHashTable<SmallOrderedHashSet>::Shrink(
+ Isolate* isolate, Handle<SmallOrderedHashSet> table);
template MaybeHandle<SmallOrderedHashSet>
SmallOrderedHashTable<SmallOrderedHashSet>::Grow(
Isolate* isolate, Handle<SmallOrderedHashSet> table);
@@ -542,6 +853,9 @@ template bool SmallOrderedHashTable<SmallOrderedHashMap>::HasKey(
template Handle<SmallOrderedHashMap>
SmallOrderedHashTable<SmallOrderedHashMap>::Rehash(
Isolate* isolate, Handle<SmallOrderedHashMap> table, int new_capacity);
+template Handle<SmallOrderedHashMap>
+SmallOrderedHashTable<SmallOrderedHashMap>::Shrink(
+ Isolate* isolate, Handle<SmallOrderedHashMap> table);
template MaybeHandle<SmallOrderedHashMap>
SmallOrderedHashTable<SmallOrderedHashMap>::Grow(
Isolate* isolate, Handle<SmallOrderedHashMap> table);
@@ -549,9 +863,15 @@ template void SmallOrderedHashTable<SmallOrderedHashMap>::Initialize(
Isolate* isolate, int capacity);
template bool SmallOrderedHashTable<SmallOrderedHashMap>::Delete(
- Isolate* isolate, SmallOrderedHashMap* table, Object* key);
+ Isolate* isolate, SmallOrderedHashMap table, Object key);
template bool SmallOrderedHashTable<SmallOrderedHashSet>::Delete(
- Isolate* isolate, SmallOrderedHashSet* table, Object* key);
+ Isolate* isolate, SmallOrderedHashSet table, Object key);
+
+template void SmallOrderedHashTable<SmallOrderedNameDictionary>::Initialize(
+ Isolate* isolate, int capacity);
+template Handle<SmallOrderedNameDictionary>
+SmallOrderedHashTable<SmallOrderedNameDictionary>::Shrink(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
template <class SmallTable, class LargeTable>
Handle<HeapObject> OrderedHashTableHandler<SmallTable, LargeTable>::Allocate(
@@ -569,6 +889,10 @@ OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::Allocate(
template Handle<HeapObject>
OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::Allocate(
Isolate* isolate, int capacity);
+template Handle<HeapObject>
+OrderedHashTableHandler<SmallOrderedNameDictionary,
+ OrderedNameDictionary>::Allocate(Isolate* isolate,
+ int capacity);
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
@@ -641,6 +965,29 @@ Handle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
return new_table;
}
+Handle<OrderedNameDictionary>
+OrderedNameDictionaryHandler::AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table) {
+ Handle<OrderedNameDictionary> new_table =
+ OrderedNameDictionary::Allocate(isolate, OrderedHashTableMinSize);
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+
+ // TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
+ // unhandlify this code as we preallocate the new backing store with
+ // the proper capacity.
+ for (int entry = 0; entry < (nof + nod); ++entry) {
+ Handle<Name> key(Name::cast(table->KeyAt(entry)), isolate);
+ if (key->IsTheHole(isolate)) continue;
+ Handle<Object> value(table->ValueAt(entry), isolate);
+ PropertyDetails details = table->DetailsAt(entry);
+ new_table =
+ OrderedNameDictionary::Add(isolate, new_table, key, value, details);
+ }
+
+ return new_table;
+}
+
Handle<HeapObject> OrderedHashMapHandler::Add(Isolate* isolate,
Handle<HeapObject> table,
Handle<Object> key,
@@ -681,15 +1028,180 @@ Handle<HeapObject> OrderedHashSetHandler::Add(Isolate* isolate,
return OrderedHashSet::Add(isolate, Handle<OrderedHashSet>::cast(table), key);
}
+Handle<HeapObject> OrderedNameDictionaryHandler::Add(Isolate* isolate,
+ Handle<HeapObject> table,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ Handle<SmallOrderedNameDictionary> small_dict =
+ Handle<SmallOrderedNameDictionary>::cast(table);
+ MaybeHandle<SmallOrderedNameDictionary> new_dict =
+ SmallOrderedNameDictionary::Add(isolate, small_dict, key, value,
+ details);
+ if (!new_dict.is_null()) return new_dict.ToHandleChecked();
+
+ // We couldn't add to the small table, let's migrate to the
+ // big table.
+ table =
+ OrderedNameDictionaryHandler::AdjustRepresentation(isolate, small_dict);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ return OrderedNameDictionary::Add(
+ isolate, Handle<OrderedNameDictionary>::cast(table), key, value, details);
+}
+
+void OrderedNameDictionaryHandler::SetEntry(Isolate* isolate, HeapObject table,
+ int entry, Object key, Object value,
+ PropertyDetails details) {
+ DisallowHeapAllocation no_gc;
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->SetEntry(
+ isolate, entry, key, value, details);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table)->SetEntry(isolate, entry, key,
+ value, details);
+}
+
+int OrderedNameDictionaryHandler::FindEntry(Isolate* isolate, HeapObject table,
+ Name key) {
+ DisallowHeapAllocation no_gc;
+ if (table->IsSmallOrderedNameDictionary()) {
+ int entry =
+ SmallOrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ return entry == SmallOrderedNameDictionary::kNotFound
+ ? OrderedNameDictionaryHandler::kNotFound
+ : entry;
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ int entry = OrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ return entry == OrderedNameDictionary::kNotFound
+ ? OrderedNameDictionaryHandler::kNotFound
+ : entry;
+}
+
+Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->ValueAt(entry);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table)->ValueAt(entry);
+}
+
+void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
+ Object value) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+}
+
+PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
+ int entry) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->DetailsAt(entry);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table)->DetailsAt(entry);
+}
+
+void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table, int entry,
+ PropertyDetails details) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->DetailsAtPut(entry,
+ details);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table)->DetailsAtPut(entry, details);
+}
+
+int OrderedNameDictionaryHandler::Hash(HeapObject table) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->Hash();
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table)->Hash();
+}
+
+void OrderedNameDictionaryHandler::SetHash(HeapObject table, int hash) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->SetHash(hash);
+ }
+
+ DCHECK(table->IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table)->SetHash(hash);
+}
+
+Name OrderedNameDictionaryHandler::KeyAt(HeapObject table, int entry) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return Name::cast(SmallOrderedNameDictionary::cast(table)->KeyAt(entry));
+ }
+
+ return Name::cast(OrderedNameDictionary::cast(table)->KeyAt(entry));
+}
+
+int OrderedNameDictionaryHandler::NumberOfElements(HeapObject table) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->NumberOfElements();
+ }
+
+ return OrderedNameDictionary::cast(table)->NumberOfElements();
+}
+
+int OrderedNameDictionaryHandler::Capacity(HeapObject table) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table)->Capacity();
+ }
+
+ return OrderedNameDictionary::cast(table)->Capacity();
+}
+
+Handle<HeapObject> OrderedNameDictionaryHandler::Shrink(
+ Isolate* isolate, Handle<HeapObject> table) {
+ if (table->IsSmallOrderedNameDictionary()) {
+ Handle<SmallOrderedNameDictionary> small_dict =
+ Handle<SmallOrderedNameDictionary>::cast(table);
+ return SmallOrderedNameDictionary::Shrink(isolate, small_dict);
+ }
+
+ Handle<OrderedNameDictionary> large_dict =
+ Handle<OrderedNameDictionary>::cast(table);
+ return OrderedNameDictionary::Shrink(isolate, large_dict);
+}
+
+Handle<HeapObject> OrderedNameDictionaryHandler::DeleteEntry(
+ Isolate* isolate, Handle<HeapObject> table, int entry) {
+ DisallowHeapAllocation no_gc;
+ if (table->IsSmallOrderedNameDictionary()) {
+ Handle<SmallOrderedNameDictionary> small_dict =
+ Handle<SmallOrderedNameDictionary>::cast(table);
+ return SmallOrderedNameDictionary::DeleteEntry(isolate, small_dict, entry);
+ }
+
+ Handle<OrderedNameDictionary> large_dict =
+ Handle<OrderedNameDictionary>::cast(table);
+ return OrderedNameDictionary::DeleteEntry(isolate, large_dict, entry);
+}
+
template <class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
DisallowHeapAllocation no_allocation;
- TableType* table = TableType::cast(this->table());
+ TableType table = TableType::cast(this->table());
if (!table->IsObsolete()) return;
int index = Smi::ToInt(this->index());
while (table->IsObsolete()) {
- TableType* next_table = table->NextTable();
+ TableType next_table = table->NextTable();
if (index > 0) {
int nod = table->NumberOfDeletedElements();
@@ -720,7 +1232,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
Transition();
- TableType* table = TableType::cast(this->table());
+ TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
int used_capacity = table->UsedCapacity();
@@ -742,7 +1254,7 @@ OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::HasMore();
template void
OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::MoveNext();
-template Object*
+template Object
OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::CurrentKey();
template void
@@ -754,7 +1266,7 @@ OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::HasMore();
template void
OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::MoveNext();
-template Object*
+template Object
OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::CurrentKey();
template void
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 6c606efc75..6e938d53b2 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -8,6 +8,8 @@
#include "src/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "src/objects/smi.h"
+#include "src/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,45 +17,11 @@
namespace v8 {
namespace internal {
-// Non-templatized base class for {OrderedHashTable}s.
-// TODO(hash): Unify this with the HashTableBase above.
-class OrderedHashTableBase : public FixedArray {
- public:
- static const int kNotFound = -1;
- static const int kMinCapacity = 4;
-
- static const int kNumberOfElementsIndex = 0;
- // The next table is stored at the same index as the nof elements.
- static const int kNextTableIndex = kNumberOfElementsIndex;
- static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
- static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1;
- static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1;
- static const int kRemovedHolesIndex = kHashTableStartIndex;
-
- static constexpr const int kNumberOfElementsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfElementsIndex);
- static constexpr const int kNextTableOffset =
- FixedArray::OffsetOfElementAt(kNextTableIndex);
- static constexpr const int kNumberOfDeletedElementsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfDeletedElementsIndex);
- static constexpr const int kNumberOfBucketsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfBucketsIndex);
- static constexpr const int kHashTableStartOffset =
- FixedArray::OffsetOfElementAt(kHashTableStartIndex);
-
- static const int kLoadFactor = 2;
-
- // NumberOfDeletedElements is set to kClearedTableSentinel when
- // the table is cleared, which allows iterator transitions to
- // optimize that case.
- static const int kClearedTableSentinel = -1;
-};
-
// OrderedHashTable is a HashTable with Object keys that preserves
// insertion order. There are Map and Set interfaces (OrderedHashMap
// and OrderedHashTable, below). It is meant to be used by JSMap/JSSet.
//
-// Only Object* keys are supported, with Object::SameValueZero() used as the
+// Only Object keys are supported, with Object::SameValueZero() used as the
// equality operator and Object::GetHash() for the hash function.
//
// Based on the "Deterministic Hash Table" as described by Jason Orendorff at
@@ -61,37 +29,38 @@ class OrderedHashTableBase : public FixedArray {
// Originally attributed to Tyler Close.
//
// Memory layout:
-// [0]: element count
-// [1]: deleted element count
-// [2]: bucket count
-// [3..(3 + NumberOfBuckets() - 1)]: "hash table", where each item is an
-// offset into the data table (see below) where the
-// first item in this bucket is stored.
-// [3 + NumberOfBuckets()..length]: "data table", an array of length
-// Capacity() * kEntrySize, where the first entrysize
-// items are handled by the derived class and the
-// item at kChainOffset is another entry into the
-// data table indicating the next entry in this hash
-// bucket.
+// [0] : Prefix
+// [kPrefixSize]: element count
+// [kPrefixSize + 1]: deleted element count
+// [kPrefixSize + 2]: bucket count
+// [kPrefixSize + 3..(3 + NumberOfBuckets() - 1)]: "hash table",
+// where each item is an offset into the
+// data table (see below) where the first
+// item in this bucket is stored.
+// [kPrefixSize + 3 + NumberOfBuckets()..length]: "data table", an
+// array of length Capacity() * kEntrySize,
+// where the first entrysize items are
+// handled by the derived class and the
+// item at kChainOffset is another entry
+// into the data table indicating the next
+// entry in this hash bucket.
//
// When we transition the table to a new version we obsolete it and reuse parts
// of the memory to store information how to transition an iterator to the new
// table:
//
// Memory layout for obsolete table:
-// [0]: bucket count
-// [1]: Next newer table
-// [2]: Number of removed holes or -1 when the table was cleared.
-// [3..(3 + NumberOfRemovedHoles() - 1)]: The indexes of the removed holes.
-// [3 + NumberOfRemovedHoles()..length]: Not used
-//
+// [0] : Prefix
+// [kPrefixSize + 0]: bucket count
+// [kPrefixSize + 1]: Next newer table
+// [kPrefixSize + 2]: Number of removed holes or -1 when the table was
+// cleared.
+// [kPrefixSize + 3..(3 + NumberOfRemovedHoles() - 1)]: The indexes
+// of the removed holes.
+// [kPrefixSize + 3 + NumberOfRemovedHoles()..length]: Not used
template <class Derived, int entrysize>
-class OrderedHashTable : public OrderedHashTableBase {
+class OrderedHashTable : public FixedArray {
public:
- // Returns an OrderedHashTable with a capacity of at least |capacity|.
- static Handle<Derived> Allocate(Isolate* isolate, int capacity,
- PretenureFlag pretenure = NOT_TENURED);
-
// Returns an OrderedHashTable (possibly |table|) with enough space
// to add at least one new element.
static Handle<Derived> EnsureGrowable(Isolate* isolate,
@@ -106,18 +75,20 @@ class OrderedHashTable : public OrderedHashTableBase {
static Handle<Derived> Clear(Isolate* isolate, Handle<Derived> table);
// Returns true if the OrderedHashTable contains the key
- static bool HasKey(Isolate* isolate, Derived* table, Object* key);
+ static bool HasKey(Isolate* isolate, Derived table, Object key);
// Returns a true value if the OrderedHashTable contains the key and
// the key has been deleted. This does not shrink the table.
- static bool Delete(Isolate* isolate, Derived* table, Object* key);
+ static bool Delete(Isolate* isolate, Derived table, Object key);
+
+ int FindEntry(Isolate* isolate, Object key);
int NumberOfElements() const {
- return Smi::ToInt(get(kNumberOfElementsIndex));
+ return Smi::ToInt(get(NumberOfElementsIndex()));
}
int NumberOfDeletedElements() const {
- return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
+ return Smi::ToInt(get(NumberOfDeletedElementsIndex()));
}
// Returns the number of contiguous entries in the data table, starting at 0,
@@ -126,99 +97,135 @@ class OrderedHashTable : public OrderedHashTableBase {
return NumberOfElements() + NumberOfDeletedElements();
}
- int NumberOfBuckets() const { return Smi::ToInt(get(kNumberOfBucketsIndex)); }
+ int NumberOfBuckets() const {
+ return Smi::ToInt(get(NumberOfBucketsIndex()));
+ }
// Returns an index into |this| for the given entry.
int EntryToIndex(int entry) {
- return kHashTableStartIndex + NumberOfBuckets() + (entry * kEntrySize);
+ return HashTableStartIndex() + NumberOfBuckets() + (entry * kEntrySize);
}
int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
int HashToEntry(int hash) {
int bucket = HashToBucket(hash);
- Object* entry = this->get(kHashTableStartIndex + bucket);
+ Object entry = this->get(HashTableStartIndex() + bucket);
return Smi::ToInt(entry);
}
- int KeyToFirstEntry(Isolate* isolate, Object* key) {
- // This special cases for Smi, so that we avoid the HandleScope
- // creation below.
- if (key->IsSmi()) {
- uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
- return HashToEntry(hash & Smi::kMaxValue);
- }
- HandleScope scope(isolate);
- Object* hash = key->GetHash();
- // If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return kNotFound;
- return HashToEntry(Smi::ToInt(hash));
- }
-
- int FindEntry(Isolate* isolate, Object* key) {
- int entry = KeyToFirstEntry(isolate, key);
- // Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
- Object* candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) break;
- entry = NextChainEntry(entry);
- }
-
- return entry;
- }
-
int NextChainEntry(int entry) {
- Object* next_entry = get(EntryToIndex(entry) + kChainOffset);
+ Object next_entry = get(EntryToIndex(entry) + kChainOffset);
return Smi::ToInt(next_entry);
}
// use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
- Object* KeyAt(int entry) {
+ Object KeyAt(int entry) {
DCHECK_LT(entry, this->UsedCapacity());
return get(EntryToIndex(entry));
}
- bool IsObsolete() { return !get(kNextTableIndex)->IsSmi(); }
+ bool IsObsolete() { return !get(NextTableIndex())->IsSmi(); }
// The next newer table. This is only valid if the table is obsolete.
- Derived* NextTable() { return Derived::cast(get(kNextTableIndex)); }
+ Derived NextTable() { return Derived::cast(get(NextTableIndex())); }
// When the table is obsolete we store the indexes of the removed holes.
int RemovedIndexAt(int index) {
- return Smi::ToInt(get(kRemovedHolesIndex + index));
+ return Smi::ToInt(get(RemovedHolesIndex() + index));
}
+ // The extra +1 is for linking the bucket chains together.
static const int kEntrySize = entrysize + 1;
static const int kChainOffset = entrysize;
- static const int kMaxCapacity =
- (FixedArray::kMaxLength - kHashTableStartIndex) /
- (1 + (kEntrySize * kLoadFactor));
+ static const int kNotFound = -1;
+ static const int kMinCapacity = 4;
+
+ static constexpr int PrefixIndex() { return 0; }
+
+ static constexpr int NumberOfElementsIndex() { return Derived::kPrefixSize; }
+
+ // The next table is stored at the same index as the nof elements.
+ static constexpr int NextTableIndex() { return NumberOfElementsIndex(); }
+
+ static constexpr int NumberOfDeletedElementsIndex() {
+ return NumberOfElementsIndex() + 1;
+ }
+
+ static constexpr int NumberOfBucketsIndex() {
+ return NumberOfDeletedElementsIndex() + 1;
+ }
+
+ static constexpr int HashTableStartIndex() {
+ return NumberOfBucketsIndex() + 1;
+ }
+
+ static constexpr int RemovedHolesIndex() { return HashTableStartIndex(); }
+
+ static constexpr int NumberOfElementsOffset() {
+ return FixedArray::OffsetOfElementAt(NumberOfElementsIndex());
+ }
+
+ static constexpr int NextTableOffset() {
+ return FixedArray::OffsetOfElementAt(NextTableIndex());
+ }
+
+ static constexpr int NumberOfDeletedElementsOffset() {
+ return FixedArray::OffsetOfElementAt(NumberOfDeletedElementsIndex());
+ }
+
+ static constexpr int NumberOfBucketsOffset() {
+ return FixedArray::OffsetOfElementAt(NumberOfBucketsIndex());
+ }
+
+ static constexpr int HashTableStartOffset() {
+ return FixedArray::OffsetOfElementAt(HashTableStartIndex());
+ }
+
+ static const int kLoadFactor = 2;
+
+ // NumberOfDeletedElements is set to kClearedTableSentinel when
+ // the table is cleared, which allows iterator transitions to
+ // optimize that case.
+ static const int kClearedTableSentinel = -1;
+ static constexpr int MaxCapacity() {
+ return (FixedArray::kMaxLength - HashTableStartIndex()) /
+ (1 + (kEntrySize * kLoadFactor));
+ }
protected:
+ // Returns an OrderedHashTable with a capacity of at least |capacity|.
+ static Handle<Derived> Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
void SetNumberOfBuckets(int num) {
- set(kNumberOfBucketsIndex, Smi::FromInt(num));
+ set(NumberOfBucketsIndex(), Smi::FromInt(num));
}
void SetNumberOfElements(int num) {
- set(kNumberOfElementsIndex, Smi::FromInt(num));
+ set(NumberOfElementsIndex(), Smi::FromInt(num));
}
void SetNumberOfDeletedElements(int num) {
- set(kNumberOfDeletedElementsIndex, Smi::FromInt(num));
+ set(NumberOfDeletedElementsIndex(), Smi::FromInt(num));
}
// Returns the number elements that can fit into the allocated buffer.
int Capacity() { return NumberOfBuckets() * kLoadFactor; }
- void SetNextTable(Derived* next_table) { set(kNextTableIndex, next_table); }
+ void SetNextTable(Derived next_table) { set(NextTableIndex(), next_table); }
void SetRemovedIndexAt(int index, int removed_index) {
- return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index));
+ return set(RemovedHolesIndex() + index, Smi::FromInt(removed_index));
}
+
+ OBJECT_CONSTRUCTORS(OrderedHashTable, FixedArray)
+
+ private:
+ friend class OrderedNameDictionaryHandler;
};
class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
@@ -231,9 +238,17 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
static Handle<FixedArray> ConvertToKeysArray(Isolate* isolate,
Handle<OrderedHashSet> table,
GetKeysConversion convert);
- static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
+ static Handle<OrderedHashSet> Rehash(Isolate* isolate,
+ Handle<OrderedHashSet> table,
+ int new_capacity);
+ static Handle<OrderedHashSet> Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
+ static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
static inline RootIndex GetMapRootIndex();
static inline bool Is(Handle<HeapObject> table);
+ static const int kPrefixSize = 0;
+
+ OBJECT_CONSTRUCTORS(OrderedHashSet, OrderedHashTable<OrderedHashSet, 1>)
};
class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
@@ -245,15 +260,26 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
static Handle<OrderedHashMap> Add(Isolate* isolate,
Handle<OrderedHashMap> table,
Handle<Object> key, Handle<Object> value);
- Object* ValueAt(int entry);
- static Object* GetHash(Isolate* isolate, Object* key);
+ static Handle<OrderedHashMap> Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
+ static Handle<OrderedHashMap> Rehash(Isolate* isolate,
+ Handle<OrderedHashMap> table,
+ int new_capacity);
+ Object ValueAt(int entry);
+
+ // This takes and returns raw Address values containing tagged Object
+ // pointers because it is called via ExternalReference.
+ static Address GetHash(Isolate* isolate, Address raw_key);
- static HeapObject* GetEmpty(ReadOnlyRoots ro_roots);
+ static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
static inline RootIndex GetMapRootIndex();
static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
+ static const int kPrefixSize = 0;
+
+ OBJECT_CONSTRUCTORS(OrderedHashMap, OrderedHashTable<OrderedHashMap, 2>)
};
// This is similar to the OrderedHashTable, except for the memory
@@ -265,11 +291,20 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
// that the DataTable entries start aligned. A bucket or chain value
// of 255 is used to denote an unknown entry.
//
-// Memory layout: [ Header ] [ Padding ] [ DataTable ] [ HashTable ] [ Chains ]
+// The prefix size is calculated as the kPrefixSize * kTaggedSize.
+//
+// Memory layout: [ Prefix ] [ Header ] [ Padding ] [ DataTable ] [ HashTable ]
+// [ Chains ]
//
// The index are represented as bytes, on a 64 bit machine with
// kEntrySize = 1, capacity = 4 and entries = 2:
//
+// [ 0 ] : Prefix
+//
+// Note: For the sake of brevity, the following start with index 0
+// but, they actually start from kPrefixSize * kTaggedSize to
+// account for the the prefix.
+//
// [ Header ] :
// [0] : Number of elements
// [1] : Number of deleted elements
@@ -314,15 +349,15 @@ class SmallOrderedHashTable : public HeapObject {
// Returns a true value if the table contains the key and
// the key has been deleted. This does not shrink the table.
- static bool Delete(Isolate* isolate, Derived* table, Object* key);
+ static bool Delete(Isolate* isolate, Derived table, Object key);
// Returns an SmallOrderedHashTable (possibly |table|) with enough
// space to add at least one new element. Returns empty handle if
// we've already reached MaxCapacity.
static MaybeHandle<Derived> Grow(Isolate* isolate, Handle<Derived> table);
- static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
- int new_capacity);
+ int FindEntry(Isolate* isolate, Object key);
+ static Handle<Derived> Shrink(Isolate* isolate, Handle<Derived> table);
// Iterates only fields in the DataTable.
class BodyDescriptor;
@@ -336,10 +371,10 @@ class SmallOrderedHashTable : public HeapObject {
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = capacity / kLoadFactor;
int chain_table_size = capacity;
- int total_size = kDataTableStartOffset + data_table_size + hash_table_size +
- chain_table_size;
+ int total_size = DataTableStartOffset() + data_table_size +
+ hash_table_size + chain_table_size;
- return RoundUp(total_size, kPointerSize);
+ return RoundUp(total_size, kTaggedSize);
}
// Returns the number elements that can fit into the allocated table.
@@ -353,20 +388,26 @@ class SmallOrderedHashTable : public HeapObject {
// Returns the number elements that are present in the table.
int NumberOfElements() const {
- int nof_elements = getByte(kNumberOfElementsOffset, 0);
+ int nof_elements = getByte(NumberOfElementsOffset(), 0);
DCHECK_LE(nof_elements, Capacity());
return nof_elements;
}
int NumberOfDeletedElements() const {
- int nof_deleted_elements = getByte(kNumberOfDeletedElementsOffset, 0);
+ int nof_deleted_elements = getByte(NumberOfDeletedElementsOffset(), 0);
DCHECK_LE(nof_deleted_elements, Capacity());
return nof_deleted_elements;
}
- int NumberOfBuckets() const { return getByte(kNumberOfBucketsOffset, 0); }
+ int NumberOfBuckets() const { return getByte(NumberOfBucketsOffset(), 0); }
+
+ Object KeyAt(int entry) const {
+ DCHECK_LT(entry, Capacity());
+ Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
+ return READ_FIELD(this, entry_offset);
+ }
DECL_VERIFIER(SmallOrderedHashTable)
@@ -394,18 +435,22 @@ class SmallOrderedHashTable : public HeapObject {
static const int kGrowthHack = 256;
protected:
- void SetDataEntry(int entry, int relative_index, Object* value);
+ static Handle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
+ int new_capacity);
+
+ void SetDataEntry(int entry, int relative_index, Object value);
// TODO(gsathya): Calculate all the various possible values for this
// at compile time since capacity can only be 4 different values.
Offset GetBucketsStartOffset() const {
int capacity = Capacity();
int data_table_size = DataTableSizeFor(capacity);
- return kDataTableStartOffset + data_table_size;
+ return DataTableStartOffset() + data_table_size;
}
Address GetHashTableStartAddress(int capacity) const {
- return FIELD_ADDR(this, kDataTableStartOffset + DataTableSizeFor(capacity));
+ return FIELD_ADDR(this,
+ DataTableStartOffset() + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
@@ -427,7 +472,7 @@ class SmallOrderedHashTable : public HeapObject {
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = nof_buckets;
- return kDataTableStartOffset + data_table_size + hash_table_size;
+ return DataTableStartOffset() + data_table_size + hash_table_size;
}
void SetNextEntry(int entry, int next_entry) {
@@ -442,19 +487,13 @@ class SmallOrderedHashTable : public HeapObject {
return getByte(GetChainTableOffset(), entry);
}
- Object* GetDataEntry(int entry, int relative_index) {
+ Object GetDataEntry(int entry, int relative_index) {
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
return READ_FIELD(this, entry_offset);
}
- Object* KeyAt(int entry) const {
- DCHECK_LT(entry, Capacity());
- Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
- return READ_FIELD(this, entry_offset);
- }
-
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
int HashToFirstEntry(int hash) const {
@@ -464,63 +503,59 @@ class SmallOrderedHashTable : public HeapObject {
return entry;
}
- void SetNumberOfBuckets(int num) { setByte(kNumberOfBucketsOffset, 0, num); }
+ void SetNumberOfBuckets(int num) { setByte(NumberOfBucketsOffset(), 0, num); }
void SetNumberOfElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
- setByte(kNumberOfElementsOffset, 0, num);
+ setByte(NumberOfElementsOffset(), 0, num);
}
void SetNumberOfDeletedElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
- setByte(kNumberOfDeletedElementsOffset, 0, num);
+ setByte(NumberOfDeletedElementsOffset(), 0, num);
}
- int FindEntry(Isolate* isolate, Object* key) {
- DisallowHeapAllocation no_gc;
- Object* hash = key->GetHash();
+ static constexpr Offset PrefixOffset() { return kHeaderSize; }
- if (hash->IsUndefined(isolate)) return kNotFound;
- int entry = HashToFirstEntry(Smi::ToInt(hash));
+ static constexpr Offset NumberOfElementsOffset() {
+ return PrefixOffset() + (Derived::kPrefixSize * kTaggedSize);
+ }
- // Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
- Object* candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) return entry;
- entry = GetNextEntry(entry);
- }
- return kNotFound;
+ static constexpr Offset NumberOfDeletedElementsOffset() {
+ return NumberOfElementsOffset() + kOneByteSize;
}
- static const Offset kNumberOfElementsOffset = kHeaderSize;
- static const Offset kNumberOfDeletedElementsOffset =
- kNumberOfElementsOffset + kOneByteSize;
- static const Offset kNumberOfBucketsOffset =
- kNumberOfDeletedElementsOffset + kOneByteSize;
- static const constexpr Offset kDataTableStartOffset =
- RoundUp<kPointerSize>(kNumberOfBucketsOffset);
+ static constexpr Offset NumberOfBucketsOffset() {
+ return NumberOfDeletedElementsOffset() + kOneByteSize;
+ }
+
+ static constexpr Offset DataTableStartOffset() {
+ return RoundUp<kTaggedSize>(NumberOfBucketsOffset());
+ }
static constexpr int DataTableSizeFor(int capacity) {
- return capacity * Derived::kEntrySize * kPointerSize;
+ return capacity * Derived::kEntrySize * kTaggedSize;
}
// This is used for accessing the non |DataTable| part of the
// structure.
byte getByte(Offset offset, ByteIndex index) const {
- DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
+ DCHECK(offset < DataTableStartOffset() ||
+ offset >= GetBucketsStartOffset());
return READ_BYTE_FIELD(this, offset + (index * kOneByteSize));
}
void setByte(Offset offset, ByteIndex index, byte value) {
- DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
+ DCHECK(offset < DataTableStartOffset() ||
+ offset >= GetBucketsStartOffset());
WRITE_BYTE_FIELD(this, offset + (index * kOneByteSize), value);
}
Offset GetDataEntryOffset(int entry, int relative_index) const {
DCHECK_LT(entry, Capacity());
- int offset_in_datatable = entry * Derived::kEntrySize * kPointerSize;
- int offset_in_entry = relative_index * kPointerSize;
- return kDataTableStartOffset + offset_in_datatable + offset_in_entry;
+ int offset_in_datatable = entry * Derived::kEntrySize * kTaggedSize;
+ int offset_in_entry = relative_index * kTaggedSize;
+ return DataTableStartOffset() + offset_in_datatable + offset_in_entry;
}
int UsedCapacity() const {
@@ -533,7 +568,10 @@ class SmallOrderedHashTable : public HeapObject {
private:
friend class OrderedHashMapHandler;
friend class OrderedHashSetHandler;
+ friend class OrderedNameDictionaryHandler;
friend class CodeStubAssembler;
+
+ OBJECT_CONSTRUCTORS(SmallOrderedHashTable, HeapObject)
};
class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
@@ -541,9 +579,11 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
DECL_CAST(SmallOrderedHashSet)
DECL_PRINTER(SmallOrderedHashSet)
+ DECL_VERIFIER(SmallOrderedHashSet)
static const int kKeyIndex = 0;
static const int kEntrySize = 1;
+ static const int kPrefixSize = 0;
// Adds |value| to |table|, if the capacity isn't enough, a new
// table is created. The original |table| is returned if there is
@@ -553,6 +593,11 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
Handle<Object> key);
static inline bool Is(Handle<HeapObject> table);
static inline RootIndex GetMapRootIndex();
+ static Handle<SmallOrderedHashSet> Rehash(Isolate* isolate,
+ Handle<SmallOrderedHashSet> table,
+ int new_capacity);
+ OBJECT_CONSTRUCTORS(SmallOrderedHashSet,
+ SmallOrderedHashTable<SmallOrderedHashSet>)
};
class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
@@ -560,10 +605,12 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
DECL_CAST(SmallOrderedHashMap)
DECL_PRINTER(SmallOrderedHashMap)
+ DECL_VERIFIER(SmallOrderedHashMap)
static const int kKeyIndex = 0;
static const int kValueIndex = 1;
static const int kEntrySize = 2;
+ static const int kPrefixSize = 0;
// Adds |value| to |table|, if the capacity isn't enough, a new
// table is created. The original |table| is returned if there is
@@ -574,6 +621,13 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
Handle<Object> value);
static inline bool Is(Handle<HeapObject> table);
static inline RootIndex GetMapRootIndex();
+
+ static Handle<SmallOrderedHashMap> Rehash(Isolate* isolate,
+ Handle<SmallOrderedHashMap> table,
+ int new_capacity);
+
+ OBJECT_CONSTRUCTORS(SmallOrderedHashMap,
+ SmallOrderedHashTable<SmallOrderedHashMap>)
};
// TODO(gsathya): Rename this to OrderedHashTable, after we rename
@@ -613,6 +667,149 @@ class OrderedHashSetHandler
Isolate* isolate, Handle<SmallOrderedHashSet> table);
};
+class OrderedNameDictionary
+ : public OrderedHashTable<OrderedNameDictionary, 3> {
+ public:
+ DECL_CAST(OrderedNameDictionary)
+
+ static Handle<OrderedNameDictionary> Add(Isolate* isolate,
+ Handle<OrderedNameDictionary> table,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details);
+
+ void SetEntry(Isolate* isolate, int entry, Object key, Object value,
+ PropertyDetails details);
+
+ static Handle<OrderedNameDictionary> DeleteEntry(
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int entry);
+
+ static Handle<OrderedNameDictionary> Allocate(
+ Isolate* isolate, int capacity, PretenureFlag pretenure = NOT_TENURED);
+
+ static Handle<OrderedNameDictionary> Rehash(
+ Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
+
+ // Returns the value for entry.
+ inline Object ValueAt(int entry);
+
+ // Set the value for entry.
+ inline void ValueAtPut(int entry, Object value);
+
+ // Returns the property details for the property at entry.
+ inline PropertyDetails DetailsAt(int entry);
+
+ // Set the details for entry.
+ inline void DetailsAtPut(int entry, PropertyDetails value);
+
+ inline void SetHash(int hash);
+ inline int Hash();
+
+ static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
+ static inline RootIndex GetMapRootIndex();
+
+ static const int kValueOffset = 1;
+ static const int kPropertyDetailsOffset = 2;
+ static const int kPrefixSize = 1;
+
+ OBJECT_CONSTRUCTORS(OrderedNameDictionary,
+ OrderedHashTable<OrderedNameDictionary, 3>)
+};
+
+class OrderedNameDictionaryHandler
+ : public OrderedHashTableHandler<SmallOrderedNameDictionary,
+ OrderedNameDictionary> {
+ public:
+ static Handle<HeapObject> Add(Isolate* isolate, Handle<HeapObject> table,
+ Handle<Name> key, Handle<Object> value,
+ PropertyDetails details);
+ static Handle<HeapObject> Shrink(Isolate* isolate, Handle<HeapObject> table);
+
+ static Handle<HeapObject> DeleteEntry(Isolate* isolate,
+ Handle<HeapObject> table, int entry);
+ static int FindEntry(Isolate* isolate, HeapObject table, Name key);
+ static void SetEntry(Isolate* isolate, HeapObject table, int entry,
+ Object key, Object value, PropertyDetails details);
+
+ // Returns the value for entry.
+ static Object ValueAt(HeapObject table, int entry);
+
+ // Set the value for entry.
+ static void ValueAtPut(HeapObject table, int entry, Object value);
+
+ // Returns the property details for the property at entry.
+ static PropertyDetails DetailsAt(HeapObject table, int entry);
+
+ // Set the details for entry.
+ static void DetailsAtPut(HeapObject table, int entry, PropertyDetails value);
+
+ static Name KeyAt(HeapObject table, int entry);
+
+ static void SetHash(HeapObject table, int hash);
+ static int Hash(HeapObject table);
+
+ static int NumberOfElements(HeapObject table);
+ static int Capacity(HeapObject table);
+
+ static const int kNotFound = -1;
+
+ protected:
+ static Handle<OrderedNameDictionary> AdjustRepresentation(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
+};
+
+class SmallOrderedNameDictionary
+ : public SmallOrderedHashTable<SmallOrderedNameDictionary> {
+ public:
+ DECL_CAST(SmallOrderedNameDictionary)
+
+ DECL_PRINTER(SmallOrderedNameDictionary)
+ DECL_VERIFIER(SmallOrderedNameDictionary)
+
+ // Returns the value for entry.
+ inline Object ValueAt(int entry);
+
+ static Handle<SmallOrderedNameDictionary> Rehash(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ int new_capacity);
+
+ static Handle<SmallOrderedNameDictionary> DeleteEntry(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry);
+
+ // Set the value for entry.
+ inline void ValueAtPut(int entry, Object value);
+
+ // Returns the property details for the property at entry.
+ inline PropertyDetails DetailsAt(int entry);
+
+ // Set the details for entry.
+ inline void DetailsAtPut(int entry, PropertyDetails value);
+
+ inline void SetHash(int hash);
+ inline int Hash();
+
+ static const int kKeyIndex = 0;
+ static const int kValueIndex = 1;
+ static const int kPropertyDetailsIndex = 2;
+ static const int kEntrySize = 3;
+ static const int kPrefixSize = 1;
+
+ // Adds |value| to |table|, if the capacity isn't enough, a new
+ // table is created. The original |table| is returned if there is
+ // capacity to store |value| otherwise the new table is returned.
+ static MaybeHandle<SmallOrderedNameDictionary> Add(
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ Handle<Name> key, Handle<Object> value, PropertyDetails details);
+
+ void SetEntry(Isolate* isolate, int entry, Object key, Object value,
+ PropertyDetails details);
+
+ static inline RootIndex GetMapRootIndex();
+
+ OBJECT_CONSTRUCTORS(SmallOrderedNameDictionary,
+ SmallOrderedHashTable<SmallOrderedNameDictionary>)
+};
+
class JSCollectionIterator : public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
@@ -621,15 +818,20 @@ class JSCollectionIterator : public JSObject {
// [index]: The index into the data table.
DECL_ACCESSORS(index, Object)
- // Dispatched behavior.
- DECL_PRINTER(JSCollectionIterator)
+ void JSCollectionIteratorPrint(std::ostream& os, const char* name);
- static const int kTableOffset = JSObject::kHeaderSize;
- static const int kIndexOffset = kTableOffset + kPointerSize;
- static const int kSize = kIndexOffset + kPointerSize;
+// Layout description.
+#define JS_COLLECTION_ITERATOR_FIELDS(V) \
+ V(kTableOffset, kTaggedSize) \
+ V(kIndexOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kSize, 0)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollectionIterator);
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_COLLECTION_ITERATOR_FIELDS)
+#undef JS_COLLECTION_ITERATOR_FIELDS
+
+ OBJECT_CONSTRUCTORS(JSCollectionIterator, JSObject);
};
// OrderedHashTableIterator is an iterator that iterates over the keys and
@@ -656,14 +858,14 @@ class OrderedHashTableIterator : public JSCollectionIterator {
// Returns the current key of the iterator. This should only be called when
// |HasMore| returns true.
- inline Object* CurrentKey();
+ inline Object CurrentKey();
private:
// Transitions the iterator to the non obsolete backing store. This is a NOP
// if the [table] is not obsolete.
void Transition();
- DISALLOW_IMPLICIT_CONSTRUCTORS(OrderedHashTableIterator);
+ OBJECT_CONSTRUCTORS(OrderedHashTableIterator, JSCollectionIterator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index f33bc42681..abd9fa3e0a 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -5,15 +5,24 @@
#ifndef V8_OBJECTS_PROMISE_INL_H_
#define V8_OBJECTS_PROMISE_INL_H_
-#include "src/objects/js-promise-inl.h"
#include "src/objects/promise.h"
+#include "src/objects/js-promise-inl.h"
+#include "src/objects/microtask-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(PromiseReactionJobTask, Microtask)
+OBJECT_CONSTRUCTORS_IMPL(PromiseFulfillReactionJobTask, PromiseReactionJobTask)
+OBJECT_CONSTRUCTORS_IMPL(PromiseRejectReactionJobTask, PromiseReactionJobTask)
+OBJECT_CONSTRUCTORS_IMPL(PromiseResolveThenableJobTask, Microtask)
+OBJECT_CONSTRUCTORS_IMPL(PromiseCapability, Struct)
+OBJECT_CONSTRUCTORS_IMPL(PromiseReaction, Struct)
+
CAST_ACCESSOR(PromiseCapability)
CAST_ACCESSOR(PromiseReaction)
CAST_ACCESSOR(PromiseReactionJobTask)
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 0f7b4f23ce..0504eb0537 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -29,21 +29,28 @@ class PromiseReactionJobTask : public Microtask {
DECL_ACCESSORS(argument, Object)
DECL_ACCESSORS(context, Context)
DECL_ACCESSORS(handler, HeapObject)
- // [promise_or_capability]: Either a JSPromise or a PromiseCapability.
+ // [promise_or_capability]: Either a JSPromise (in case of native promises),
+ // a PromiseCapability (general case), or undefined (in case of await).
DECL_ACCESSORS(promise_or_capability, HeapObject)
- static const int kArgumentOffset = Microtask::kHeaderSize;
- static const int kContextOffset = kArgumentOffset + kPointerSize;
- static const int kHandlerOffset = kContextOffset + kPointerSize;
- static const int kPromiseOrCapabilityOffset = kHandlerOffset + kPointerSize;
- static const int kSize = kPromiseOrCapabilityOffset + kPointerSize;
+// Layout description.
+#define PROMISE_REACTION_JOB_FIELDS(V) \
+ V(kArgumentOffset, kTaggedSize) \
+ V(kContextOffset, kTaggedSize) \
+ V(kHandlerOffset, kTaggedSize) \
+ V(kPromiseOrCapabilityOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
+ PROMISE_REACTION_JOB_FIELDS)
+#undef PROMISE_REACTION_JOB_FIELDS
// Dispatched behavior.
DECL_CAST(PromiseReactionJobTask)
DECL_VERIFIER(PromiseReactionJobTask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobTask);
+ OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask)
};
// Struct to hold state required for a PromiseReactionJob of type "Fulfill".
@@ -54,8 +61,7 @@ class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseFulfillReactionJobTask)
DECL_VERIFIER(PromiseFulfillReactionJobTask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseFulfillReactionJobTask);
+ OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask)
};
// Struct to hold state required for a PromiseReactionJob of type "Reject".
@@ -66,8 +72,7 @@ class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseRejectReactionJobTask)
DECL_VERIFIER(PromiseRejectReactionJobTask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseRejectReactionJobTask);
+ OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask)
};
// A container struct to hold state required for PromiseResolveThenableJob.
@@ -78,19 +83,25 @@ class PromiseResolveThenableJobTask : public Microtask {
DECL_ACCESSORS(then, JSReceiver)
DECL_ACCESSORS(thenable, JSReceiver)
- static const int kContextOffset = Microtask::kHeaderSize;
- static const int kPromiseToResolveOffset = kContextOffset + kPointerSize;
- static const int kThenOffset = kPromiseToResolveOffset + kPointerSize;
- static const int kThenableOffset = kThenOffset + kPointerSize;
- static const int kSize = kThenableOffset + kPointerSize;
+// Layout description.
+#define PROMISE_RESOLVE_THENABLE_JOB_FIELDS(V) \
+ V(kContextOffset, kTaggedSize) \
+ V(kPromiseToResolveOffset, kTaggedSize) \
+ V(kThenOffset, kTaggedSize) \
+ V(kThenableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Microtask::kHeaderSize,
+ PROMISE_RESOLVE_THENABLE_JOB_FIELDS)
+#undef PROMISE_RESOLVE_THENABLE_JOB_FIELDS
// Dispatched behavior.
DECL_CAST(PromiseResolveThenableJobTask)
DECL_PRINTER(PromiseResolveThenableJobTask)
DECL_VERIFIER(PromiseResolveThenableJobTask)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobTask);
+ OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask, Microtask)
};
// Struct to hold the state of a PromiseCapability.
@@ -100,18 +111,23 @@ class PromiseCapability : public Struct {
DECL_ACCESSORS(resolve, Object)
DECL_ACCESSORS(reject, Object)
- static const int kPromiseOffset = Struct::kHeaderSize;
- static const int kResolveOffset = kPromiseOffset + kPointerSize;
- static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kSize = kRejectOffset + kPointerSize;
+// Layout description.
+#define PROMISE_CAPABILITY_FIELDS(V) \
+ V(kPromiseOffset, kTaggedSize) \
+ V(kResolveOffset, kTaggedSize) \
+ V(kRejectOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, PROMISE_CAPABILITY_FIELDS)
+#undef PROMISE_CAPABILITY_FIELDS
// Dispatched behavior.
DECL_CAST(PromiseCapability)
DECL_PRINTER(PromiseCapability)
DECL_VERIFIER(PromiseCapability)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
+ OBJECT_CONSTRUCTORS(PromiseCapability, Struct);
};
// A representation of promise reaction. This differs from the specification
@@ -124,10 +140,8 @@ class PromiseCapability : public Struct {
//
// The PromiseReaction::promise_or_capability field can either hold a JSPromise
// instance (in the fast case of a native promise) or a PromiseCapability in
-// case of a Promise subclass.
-//
-// We need to keep the context in the PromiseReaction so that we can run
-// the default handlers (in case they are undefined) in the proper context.
+// case of a Promise subclass. In case of await it can also be undefined if
+// PromiseHooks are disabled (see https://github.com/tc39/ecma262/pull/1146).
//
// The PromiseReaction objects form a singly-linked list, terminated by
// Smi 0. On the JSPromise instance they are linked in reverse order,
@@ -140,22 +154,28 @@ class PromiseReaction : public Struct {
DECL_ACCESSORS(next, Object)
DECL_ACCESSORS(reject_handler, HeapObject)
DECL_ACCESSORS(fulfill_handler, HeapObject)
+ // [promise_or_capability]: Either a JSPromise (in case of native promises),
+ // a PromiseCapability (general case), or undefined (in case of await).
DECL_ACCESSORS(promise_or_capability, HeapObject)
- static const int kNextOffset = Struct::kHeaderSize;
- static const int kRejectHandlerOffset = kNextOffset + kPointerSize;
- static const int kFulfillHandlerOffset = kRejectHandlerOffset + kPointerSize;
- static const int kPromiseOrCapabilityOffset =
- kFulfillHandlerOffset + kPointerSize;
- static const int kSize = kPromiseOrCapabilityOffset + kPointerSize;
+// Layout description.
+#define PROMISE_REACTION_FIELDS(V) \
+ V(kNextOffset, kTaggedSize) \
+ V(kRejectHandlerOffset, kTaggedSize) \
+ V(kFulfillHandlerOffset, kTaggedSize) \
+ V(kPromiseOrCapabilityOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, PROMISE_REACTION_FIELDS)
+#undef PROMISE_REACTION_FIELDS
// Dispatched behavior.
DECL_CAST(PromiseReaction)
DECL_PRINTER(PromiseReaction)
DECL_VERIFIER(PromiseReaction)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReaction);
+ OBJECT_CONSTRUCTORS(PromiseReaction, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index cb157db5d6..b9785c563f 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -8,6 +8,8 @@
#include "src/objects/property-array.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/smi-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,64 +17,63 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(PropertyArray, HeapObject)
CAST_ACCESSOR(PropertyArray)
-Object* PropertyArray::get(int index) const {
- DCHECK_GE(index, 0);
- DCHECK_LE(index, this->length());
- return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+Object PropertyArray::get(int index) const {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
}
-void PropertyArray::set(int index, Object* value) {
+void PropertyArray::set(int index, Object value) {
DCHECK(IsPropertyArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset, value);
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ WRITE_BARRIER(*this, offset, value);
}
-void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+void PropertyArray::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(this->length()));
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
-Object** PropertyArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
-}
+ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
return LengthField::decode(value);
}
void PropertyArray::initialize_length(int len) {
- SLOW_DCHECK(len >= 0);
- SLOW_DCHECK(len < LengthField::kMax);
- WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(len));
+ DCHECK_LT(static_cast<unsigned>(len),
+ static_cast<unsigned>(LengthField::kMax));
+ WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(len));
}
int PropertyArray::synchronized_length() const {
- Object* value_obj = ACQUIRE_READ_FIELD(this, kLengthAndHashOffset);
+ Object value_obj = ACQUIRE_READ_FIELD(*this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
return LengthField::decode(value);
}
int PropertyArray::Hash() const {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
return HashField::decode(value);
}
void PropertyArray::SetHash(int hash) {
- Object* value_obj = READ_FIELD(this, kLengthAndHashOffset);
+ Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
int value = Smi::ToInt(value_obj);
value = HashField::update(value, hash);
- WRITE_FIELD(this, kLengthAndHashOffset, Smi::FromInt(value));
+ WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(value));
}
} // namespace internal
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 70f535a8f0..c1ac27fb72 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_ARRAY_H_
#define V8_OBJECTS_PROPERTY_ARRAY_H_
-#include "src/objects.h"
+#include "src/objects/heap-object.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,27 +28,33 @@ class PropertyArray : public HeapObject {
inline void SetHash(int hash);
inline int Hash() const;
- inline Object* get(int index) const;
+ inline Object get(int index) const;
- inline void set(int index, Object* value);
+ inline void set(int index, Object value);
// Setter with explicit barrier mode.
- inline void set(int index, Object* value, WriteBarrierMode mode);
+ inline void set(int index, Object value, WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data.
- inline Object** data_start();
+ inline ObjectSlot data_start();
// Garbage collection support.
static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kPointerSize;
+ return kHeaderSize + length * kTaggedSize;
}
+ static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
DECL_CAST(PropertyArray)
DECL_PRINTER(PropertyArray)
DECL_VERIFIER(PropertyArray)
- // Layout description.
- static const int kLengthAndHashOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthAndHashOffset + kPointerSize;
+// Layout description.
+#define PROPERTY_ARRAY_FIELDS(V) \
+ V(kLengthAndHashOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, PROPERTY_ARRAY_FIELDS)
+#undef PROPERTY_ARRAY_FIELDS
// Garbage collection support.
typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
@@ -61,8 +67,7 @@ class PropertyArray : public HeapObject {
static const int kNoHashSentinel = 0;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyArray);
+ OBJECT_CONSTRUCTORS(PropertyArray, HeapObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
new file mode 100644
index 0000000000..6b8e396cd4
--- /dev/null
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_CELL_INL_H_
+#define V8_OBJECTS_PROPERTY_CELL_INL_H_
+
+#include "src/objects/property-cell.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/objects/code.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(PropertyCell, HeapObject)
+
+CAST_ACCESSOR(PropertyCell)
+ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS(PropertyCell, name, Name, kNameOffset)
+ACCESSORS(PropertyCell, value, Object, kValueOffset)
+ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
+
+PropertyDetails PropertyCell::property_details() const {
+ return PropertyDetails(Smi::cast(property_details_raw()));
+}
+
+void PropertyCell::set_property_details(PropertyDetails details) {
+ set_property_details_raw(details.AsSmi());
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_CELL_INL_H_
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
new file mode 100644
index 0000000000..c71a3e787e
--- /dev/null
+++ b/deps/v8/src/objects/property-cell.h
@@ -0,0 +1,81 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROPERTY_CELL_H_
+#define V8_OBJECTS_PROPERTY_CELL_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class PropertyCell : public HeapObject {
+ public:
+ // [name]: the name of the global property.
+ DECL_ACCESSORS(name, Name)
+ // [property_details]: details of the global property.
+ DECL_ACCESSORS(property_details_raw, Object)
+ // [value]: value of the global property.
+ DECL_ACCESSORS(value, Object)
+ // [dependent_code]: dependent code that depends on the type of the global
+ // property.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
+ inline PropertyDetails property_details() const;
+ inline void set_property_details(PropertyDetails details);
+
+ PropertyCellConstantType GetConstantType();
+
+ // Computes the new type of the cell's contents for the given value, but
+ // without actually modifying the details.
+ static PropertyCellType UpdatedType(Isolate* isolate,
+ Handle<PropertyCell> cell,
+ Handle<Object> value,
+ PropertyDetails details);
+ // Prepares property cell at given entry for receiving given value.
+ // As a result the old cell could be invalidated and/or dependent code could
+ // be deoptimized. Returns the prepared property cell.
+ static Handle<PropertyCell> PrepareForValue(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+ Handle<Object> value, PropertyDetails details);
+
+ static Handle<PropertyCell> InvalidateEntry(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry);
+
+ static void SetValueWithInvalidation(Isolate* isolate,
+ Handle<PropertyCell> cell,
+ Handle<Object> new_value);
+
+ DECL_CAST(PropertyCell)
+
+ // Dispatched behavior.
+ DECL_PRINTER(PropertyCell)
+ DECL_VERIFIER(PropertyCell)
+
+// Layout description.
+#define PROPERTY_CELL_FIELDS(V) \
+ V(kDetailsOffset, kTaggedSize) \
+ V(kNameOffset, kTaggedSize) \
+ V(kValueOffset, kTaggedSize) \
+ V(kDependentCodeOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, PROPERTY_CELL_FIELDS)
+#undef PROPERTY_CELL_FIELDS
+
+ typedef FixedBodyDescriptor<kNameOffset, kSize, kSize> BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(PropertyCell, HeapObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROPERTY_CELL_H_
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
index a65d5d65e0..66ca48164f 100644
--- a/deps/v8/src/objects/property-descriptor-object-inl.h
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(PropertyDescriptorObject, FixedArray)
CAST_ACCESSOR(PropertyDescriptorObject)
}
} // namespace v8
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 758bde56b0..8c2628d131 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -55,6 +55,8 @@ class PropertyDescriptorObject : public FixedArray {
FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kGetIndex);
static const int kSetOffset =
FixedArray::OffsetOfElementAt(PropertyDescriptorObject::kSetIndex);
+
+ OBJECT_CONSTRUCTORS(PropertyDescriptorObject, FixedArray)
};
} // namespace internal
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 24e219d46c..7bb8ed109c 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -17,9 +17,11 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(PrototypeInfo, Struct)
+
CAST_ACCESSOR(PrototypeInfo)
-Map* PrototypeInfo::ObjectCreateMap() {
+Map PrototypeInfo::ObjectCreateMap() {
return Map::cast(object_create_map()->GetHeapObjectAssumeWeak());
}
@@ -30,7 +32,7 @@ void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
}
bool PrototypeInfo::HasObjectCreateMap() {
- MaybeObject* cache = object_create_map();
+ MaybeObject cache = object_create_map();
return cache->IsWeak();
}
@@ -41,7 +43,7 @@ SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
-void PrototypeUsers::MarkSlotEmpty(WeakArrayList* array, int index) {
+void PrototypeUsers::MarkSlotEmpty(WeakArrayList array, int index) {
DCHECK_GT(index, 0);
DCHECK_LT(index, array->length());
// Chain the empty slots into a linked list (each empty slot contains the
@@ -50,11 +52,11 @@ void PrototypeUsers::MarkSlotEmpty(WeakArrayList* array, int index) {
set_empty_slot_index(array, index);
}
-Smi* PrototypeUsers::empty_slot_index(WeakArrayList* array) {
- return array->Get(kEmptySlotIndex)->cast<Smi>();
+Smi PrototypeUsers::empty_slot_index(WeakArrayList array) {
+ return array->Get(kEmptySlotIndex).ToSmi();
}
-void PrototypeUsers::set_empty_slot_index(WeakArrayList* array, int index) {
+void PrototypeUsers::set_empty_slot_index(WeakArrayList array, int index) {
array->Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
}
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index 09c538fd19..5f0be22613 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,7 +33,7 @@ class PrototypeInfo : public Struct {
// [object_create_map]: A field caching the map for Object.create(prototype).
static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
Handle<Map> map);
- inline Map* ObjectCreateMap();
+ inline Map ObjectCreateMap();
inline bool HasObjectCreateMap();
// [registry_slot]: Slot in prototype's user registry where this user
@@ -52,14 +53,19 @@ class PrototypeInfo : public Struct {
DECL_PRINTER(PrototypeInfo)
DECL_VERIFIER(PrototypeInfo)
- static const int kJSModuleNamespaceOffset = HeapObject::kHeaderSize;
- static const int kPrototypeUsersOffset =
- kJSModuleNamespaceOffset + kPointerSize;
- static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
- static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
- static const int kObjectCreateMapOffset = kValidityCellOffset + kPointerSize;
- static const int kBitFieldOffset = kObjectCreateMapOffset + kPointerSize;
- static const int kSize = kBitFieldOffset + kPointerSize;
+// Layout description.
+#define PROTOTYPE_INFO_FIELDS(V) \
+ V(kJSModuleNamespaceOffset, kTaggedSize) \
+ V(kPrototypeUsersOffset, kTaggedSize) \
+ V(kRegistrySlotOffset, kTaggedSize) \
+ V(kValidityCellOffset, kTaggedSize) \
+ V(kObjectCreateMapOffset, kTaggedSize) \
+ V(kBitFieldOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, PROTOTYPE_INFO_FIELDS)
+#undef PROTOTYPE_INFO_FIELDS
// Bit field usage.
static const int kShouldBeFastBit = 0;
@@ -69,7 +75,7 @@ class PrototypeInfo : public Struct {
private:
DECL_ACCESSORS(object_create_map, MaybeObject)
- DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
+ OBJECT_CONSTRUCTORS(PrototypeInfo, Struct);
};
// A growing array with an additional API for marking slots "empty". When adding
@@ -80,19 +86,19 @@ class PrototypeUsers : public WeakArrayList {
Handle<WeakArrayList> array,
Handle<Map> value, int* assigned_index);
- static inline void MarkSlotEmpty(WeakArrayList* array, int index);
+ static inline void MarkSlotEmpty(WeakArrayList array, int index);
// The callback is called when a weak pointer to HeapObject "object" is moved
// from index "from_index" to index "to_index" during compaction. The callback
// must not cause GC.
- typedef void (*CompactionCallback)(HeapObject* object, int from_index,
+ typedef void (*CompactionCallback)(HeapObject object, int from_index,
int to_index);
- static WeakArrayList* Compact(Handle<WeakArrayList> array, Heap* heap,
- CompactionCallback callback,
- PretenureFlag pretenure = NOT_TENURED);
+ static WeakArrayList Compact(Handle<WeakArrayList> array, Heap* heap,
+ CompactionCallback callback,
+ PretenureFlag pretenure = NOT_TENURED);
#ifdef VERIFY_HEAP
- static void Verify(WeakArrayList* array);
+ static void Verify(WeakArrayList array);
#endif // VERIFY_HEAP
static const int kEmptySlotIndex = 0;
@@ -101,10 +107,10 @@ class PrototypeUsers : public WeakArrayList {
static const int kNoEmptySlotsMarker = 0;
private:
- static inline Smi* empty_slot_index(WeakArrayList* array);
- static inline void set_empty_slot_index(WeakArrayList* array, int index);
+ static inline Smi empty_slot_index(WeakArrayList array);
+ static inline void set_empty_slot_index(WeakArrayList array, int index);
- static void IsSlotEmpty(WeakArrayList* array, int index);
+ static void IsSlotEmpty(WeakArrayList array, int index);
DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeUsers);
};
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 5d8fcfb5ce..d973e62a6b 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -33,12 +33,12 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
inline void SetNumberOfCaptureRegisters(int value);
// Returns the subject string of the last match.
- inline String* LastSubject();
- inline void SetLastSubject(String* value);
+ inline String LastSubject();
+ inline void SetLastSubject(String value);
// Like LastSubject, but modifiable by the user.
- inline Object* LastInput();
- inline void SetLastInput(Object* value);
+ inline Object LastInput();
+ inline void SetLastInput(Object value);
// Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
// Capture(1) determine the start- and endpoint of the match itself.
@@ -57,16 +57,21 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
static const int kFirstCaptureIndex = 3;
static const int kLastMatchOverhead = kFirstCaptureIndex;
- static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
- static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
- static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
- static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
+// Layout description.
+#define REG_EXP_MATCH_INFO_FIELDS(V) \
+ V(kNumberOfCapturesOffset, kTaggedSize) \
+ V(kLastSubjectOffset, kTaggedSize) \
+ V(kLastInputOffset, kTaggedSize) \
+ V(kFirstCaptureOffset, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(FixedArray::kHeaderSize,
+ REG_EXP_MATCH_INFO_FIELDS)
+#undef REG_EXP_MATCH_INFO_FIELDS
// Every match info is guaranteed to have enough space to store two captures.
static const int kInitialCaptureIndices = 2;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
+ OBJECT_CONSTRUCTORS(RegExpMatchInfo, FixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 0fa5557e8c..7a34830ee7 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -6,11 +6,10 @@
#include "src/objects/scope-info.h"
-#include "src/ast/context-slot-cache.h"
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
#include "src/bootstrapper.h"
-#include "src/heap/heap-inl.h"
+
#include "src/objects-inl.h"
#include "src/objects/module-inl.h"
@@ -26,11 +25,11 @@ enum ModuleVariableEntryOffset {
};
#ifdef DEBUG
-bool ScopeInfo::Equals(ScopeInfo* other) const {
+bool ScopeInfo::Equals(ScopeInfo other) const {
if (length() != other->length()) return false;
for (int index = 0; index < length(); ++index) {
- Object* entry = get(index);
- Object* other_entry = other->get(index);
+ Object entry = get(index);
+ Object other_entry = other->get(index);
if (entry->IsSmi()) {
if (entry != other_entry) return false;
} else {
@@ -153,12 +152,12 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
bool has_simple_parameters = false;
- bool asm_module = false;
+ bool is_asm_module = false;
bool calls_sloppy_eval = false;
if (scope->is_function_scope()) {
DeclarationScope* function_scope = scope->AsDeclarationScope();
has_simple_parameters = function_scope->has_simple_parameters();
- asm_module = function_scope->asm_module();
+ is_asm_module = function_scope->is_asm_module();
}
FunctionKind function_kind = kNormalFunction;
if (scope->is_declaration_scope()) {
@@ -176,11 +175,12 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
- AsmModuleField::encode(asm_module) |
+ IsAsmModuleField::encode(is_asm_module) |
HasSimpleParametersField::encode(has_simple_parameters) |
FunctionKindField::encode(function_kind) |
HasOuterScopeInfoField::encode(has_outer_scope_info) |
- IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope());
+ IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
+ ForceContextAllocationField::encode(scope->ForceContextForLanguageMode());
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
@@ -268,7 +268,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
DisallowHeapAllocation no_gc;
Variable* var = scope->AsDeclarationScope()->function_var();
int var_index = -1;
- Object* name = Smi::kZero;
+ Object name = Smi::kZero;
if (var != nullptr) {
var_index = var->index();
name = *var->name();
@@ -331,7 +331,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(false) |
ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
- FunctionVariableField::encode(NONE) | AsmModuleField::encode(false) |
+ FunctionVariableField::encode(NONE) | IsAsmModuleField::encode(false) |
HasSimpleParametersField::encode(true) |
FunctionKindField::encode(kNormalFunction) |
HasOuterScopeInfoField::encode(has_outer_scope_info) |
@@ -395,7 +395,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
HasNewTargetField::encode(false) |
FunctionVariableField::encode(is_empty_function ? UNUSED : NONE) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
- AsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
+ IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
FunctionKindField::encode(FunctionKind::kNormalFunction) |
HasOuterScopeInfoField::encode(false) |
IsDebugEvaluateScopeField::encode(false);
@@ -452,7 +452,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
return scope_info;
}
-ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
+ScopeInfo ScopeInfo::Empty(Isolate* isolate) {
return ReadOnlyRoots(isolate).empty_scope_info();
}
@@ -483,7 +483,9 @@ int ScopeInfo::ContextLength() const {
int context_locals = ContextLocalCount();
bool function_name_context_slot =
FunctionVariableField::decode(Flags()) == CONTEXT;
- bool has_context = context_locals > 0 || function_name_context_slot ||
+ bool force_context = ForceContextAllocationField::decode(Flags());
+ bool has_context = context_locals > 0 || force_context ||
+ function_name_context_slot ||
scope_type() == WITH_SCOPE ||
(scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
is_declaration_scope()) ||
@@ -539,13 +541,13 @@ bool ScopeInfo::HasSharedFunctionName() const {
return FunctionName() != SharedFunctionInfo::kNoSharedNameSentinel;
}
-void ScopeInfo::SetFunctionName(Object* name) {
+void ScopeInfo::SetFunctionName(Object name) {
DCHECK(HasFunctionName());
DCHECK(name->IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
set(FunctionNameInfoIndex(), name);
}
-void ScopeInfo::SetInferredFunctionName(String* name) {
+void ScopeInfo::SetInferredFunctionName(String name) {
DCHECK(HasInferredFunctionName());
set(InferredFunctionNameIndex(), name);
}
@@ -571,18 +573,18 @@ void ScopeInfo::SetIsDebugEvaluateScope() {
bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
-Object* ScopeInfo::FunctionName() const {
+Object ScopeInfo::FunctionName() const {
DCHECK(HasFunctionName());
return get(FunctionNameInfoIndex());
}
-Object* ScopeInfo::InferredFunctionName() const {
+Object ScopeInfo::InferredFunctionName() const {
DCHECK(HasInferredFunctionName());
return get(InferredFunctionNameIndex());
}
-String* ScopeInfo::FunctionDebugName() const {
- Object* name = FunctionName();
+String ScopeInfo::FunctionDebugName() const {
+ Object name = FunctionName();
if (name->IsString() && String::cast(name)->length() > 0) {
return String::cast(name);
}
@@ -610,17 +612,17 @@ void ScopeInfo::SetPositionInfo(int start, int end) {
set(PositionInfoIndex() + 1, Smi::FromInt(end));
}
-ScopeInfo* ScopeInfo::OuterScopeInfo() const {
+ScopeInfo ScopeInfo::OuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
return ScopeInfo::cast(get(OuterScopeInfoIndex()));
}
-ModuleInfo* ScopeInfo::ModuleDescriptorInfo() const {
+ModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
return ModuleInfo::cast(get(ModuleInfoIndex()));
}
-String* ScopeInfo::ContextLocalName(int var) const {
+String ScopeInfo::ContextLocalName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalNamesIndex() + var;
@@ -667,7 +669,7 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
}
// static
-bool ScopeInfo::VariableIsSynthetic(String* name) {
+bool ScopeInfo::VariableIsSynthetic(String name) {
// There's currently no flag stored on the ScopeInfo to indicate that a
// variable is a compiler-introduced temporary. However, to avoid conflict
// with user declarations, the current temporaries like .generator_object and
@@ -688,7 +690,7 @@ int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
int module_vars_count = Smi::ToInt(get(ModuleVariableCountIndex()));
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
- String* var_name = String::cast(get(entry + kModuleVariableNameOffset));
+ String var_name = String::cast(get(entry + kModuleVariableNameOffset));
if (name->Equals(var_name)) {
int index;
ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
@@ -712,26 +714,6 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
if (scope_info->length() == 0) return -1;
- // Get the Isolate via the heap.
- //
- // Ideally we'd pass Isolate* through to this function, however this is mostly
- // called from the parser, which is otherwise isolate independent. We can't
- // assume that all scope infos are never RO space (like we can with JSReceiver
- // or Context), but we can assume that *non-empty* scope infos are.
- //
- // So, we take the least-ugly approach of manually getting the isolate to be
- // able to remove GetIsolate from ScopeInfo in the general case, while
- // allowing it in this one particular case.
- Isolate* isolate = Heap::FromWritableHeapObject(*scope_info)->isolate();
-
- ContextSlotCache* context_slot_cache = isolate->context_slot_cache();
- int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
- maybe_assigned_flag);
- if (result != ContextSlotCache::kNotFound) {
- DCHECK_LT(result, scope_info->ContextLength());
- return result;
- }
-
int start = scope_info->ContextLocalNamesIndex();
int end = start + scope_info->ContextLocalCount();
for (int i = start; i < end; ++i) {
@@ -740,17 +722,12 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
*mode = scope_info->ContextLocalMode(var);
*init_flag = scope_info->ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
- result = Context::MIN_CONTEXT_SLOTS + var;
+ int result = Context::MIN_CONTEXT_SLOTS + var;
- context_slot_cache->Update(scope_info, name, *mode, *init_flag,
- *maybe_assigned_flag, result);
DCHECK_LT(result, scope_info->ContextLength());
return result;
}
}
- // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
- context_slot_cache->Update(scope_info, name, VariableMode::kTemporary,
- kNeedsInitialization, kNotAssigned, -1);
return -1;
}
@@ -762,7 +739,7 @@ int ScopeInfo::ReceiverContextSlotIndex() const {
return -1;
}
-int ScopeInfo::FunctionContextSlotIndex(String* name) const {
+int ScopeInfo::FunctionContextSlotIndex(String name) const {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
@@ -819,7 +796,7 @@ int ScopeInfo::ModuleVariablesIndex() const {
return ModuleVariableCountIndex() + 1;
}
-void ScopeInfo::ModuleVariable(int i, String** name, int* index,
+void ScopeInfo::ModuleVariable(int i, String* name, int* index,
VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
@@ -947,7 +924,7 @@ int ModuleInfo::RegularExportCount() const {
return regular_exports()->length() / kRegularExportLength;
}
-String* ModuleInfo::RegularExportLocalName(int i) const {
+String ModuleInfo::RegularExportLocalName(int i) const {
return String::cast(regular_exports()->get(i * kRegularExportLength +
kRegularExportLocalNameOffset));
}
@@ -957,7 +934,7 @@ int ModuleInfo::RegularExportCellIndex(int i) const {
kRegularExportCellIndexOffset));
}
-FixedArray* ModuleInfo::RegularExportExportNames(int i) const {
+FixedArray ModuleInfo::RegularExportExportNames(int i) const {
return FixedArray::cast(regular_exports()->get(
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 622c51210b..3f6ae6df88 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -76,8 +76,8 @@ class ScopeInfo : public FixedArray {
bool HasInferredFunctionName() const;
- void SetFunctionName(Object* name);
- void SetInferredFunctionName(String* name);
+ void SetFunctionName(Object name);
+ void SetInferredFunctionName(String name);
// Does this scope belong to a function?
bool HasPositionInfo() const;
@@ -91,25 +91,25 @@ class ScopeInfo : public FixedArray {
inline bool HasSimpleParameters() const;
// Return the function_name if present.
- Object* FunctionName() const;
+ Object FunctionName() const;
// The function's name if it is non-empty, otherwise the inferred name or an
// empty string.
- String* FunctionDebugName() const;
+ String FunctionDebugName() const;
// Return the function's inferred name if present.
// See SharedFunctionInfo::function_identifier.
- Object* InferredFunctionName() const;
+ Object InferredFunctionName() const;
// Position information accessors.
int StartPosition() const;
int EndPosition() const;
void SetPositionInfo(int start, int end);
- ModuleInfo* ModuleDescriptorInfo() const;
+ ModuleInfo ModuleDescriptorInfo() const;
// Return the name of the given context local.
- String* ContextLocalName(int var) const;
+ String ContextLocalName(int var) const;
// Return the mode of the given context local.
VariableMode ContextLocalMode(int var) const;
@@ -125,7 +125,7 @@ class ScopeInfo : public FixedArray {
// Return true if this local was introduced by the compiler, and should not be
// exposed to the user in a debugger.
- static bool VariableIsSynthetic(String* name);
+ static bool VariableIsSynthetic(String name);
// Lookup support for serialized scope info. Returns the local context slot
// index for a given slot name if the slot is present; otherwise
@@ -147,7 +147,7 @@ class ScopeInfo : public FixedArray {
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
// must be an internalized string.
- int FunctionContextSlotIndex(String* name) const;
+ int FunctionContextSlotIndex(String name) const;
// Lookup support for serialized scope info. Returns the receiver context
// slot index if scope has a "this" binding, and the binding is
@@ -167,10 +167,10 @@ class ScopeInfo : public FixedArray {
void SetIsDebugEvaluateScope();
// Return the outer ScopeInfo if present.
- ScopeInfo* OuterScopeInfo() const;
+ ScopeInfo OuterScopeInfo() const;
#ifdef DEBUG
- bool Equals(ScopeInfo* other) const;
+ bool Equals(ScopeInfo other) const;
#endif
static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
@@ -182,7 +182,7 @@ class ScopeInfo : public FixedArray {
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Serializes empty scope info.
- V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
+ V8_EXPORT_PRIVATE static ScopeInfo Empty(Isolate* isolate);
// The layout of the static part of a ScopeInfo is as follows. Each entry is
// numeric and occupies one array slot.
@@ -201,13 +201,52 @@ class ScopeInfo : public FixedArray {
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
- enum {
+ enum Fields {
#define DECL_INDEX(name) k##name,
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
#undef DECL_INDEX
kVariablePartIndex
};
+ // Used for the function name variable for named function expressions, and for
+ // the receiver.
+ enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+
+ // Properties of scopes.
+ class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
+ class CallsSloppyEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {
+ };
+ STATIC_ASSERT(LanguageModeSize == 2);
+ class LanguageModeField
+ : public BitField<LanguageMode, CallsSloppyEvalField::kNext, 1> {};
+ class DeclarationScopeField
+ : public BitField<bool, LanguageModeField::kNext, 1> {};
+ class ReceiverVariableField
+ : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
+ 2> {};
+ class HasNewTargetField
+ : public BitField<bool, ReceiverVariableField::kNext, 1> {};
+ class FunctionVariableField
+ : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
+ // TODO(cbruni): Combine with function variable field when only storing the
+ // function name.
+ class HasInferredFunctionNameField
+ : public BitField<bool, FunctionVariableField::kNext, 1> {};
+ class IsAsmModuleField
+ : public BitField<bool, HasInferredFunctionNameField::kNext, 1> {};
+ class HasSimpleParametersField
+ : public BitField<bool, IsAsmModuleField::kNext, 1> {};
+ class FunctionKindField
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 5> {};
+ class HasOuterScopeInfoField
+ : public BitField<bool, FunctionKindField::kNext, 1> {};
+ class IsDebugEvaluateScopeField
+ : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
+ class ForceContextAllocationField
+ : public BitField<bool, IsDebugEvaluateScopeField::kNext, 1> {};
+
+ STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
+
private:
// The layout of the variable part of a ScopeInfo is as follows:
// 1. ContextLocalNames:
@@ -262,51 +301,14 @@ class ScopeInfo : public FixedArray {
// Get metadata of i-th MODULE-allocated variable, where 0 <= i <
// ModuleVariableCount. The metadata is returned via out-arguments, which may
// be nullptr if the corresponding information is not requested
- void ModuleVariable(int i, String** name, int* index,
+ void ModuleVariable(int i, String* name, int* index,
VariableMode* mode = nullptr,
InitializationFlag* init_flag = nullptr,
MaybeAssignedFlag* maybe_assigned_flag = nullptr);
- // Used for the function name variable for named function expressions, and for
- // the receiver.
- enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
-
static const int kFunctionNameEntries = 2;
static const int kPositionInfoEntries = 2;
- // Properties of scopes.
- class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
- class CallsSloppyEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {
- };
- STATIC_ASSERT(LanguageModeSize == 2);
- class LanguageModeField
- : public BitField<LanguageMode, CallsSloppyEvalField::kNext, 1> {};
- class DeclarationScopeField
- : public BitField<bool, LanguageModeField::kNext, 1> {};
- class ReceiverVariableField
- : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
- 2> {};
- class HasNewTargetField
- : public BitField<bool, ReceiverVariableField::kNext, 1> {};
- class FunctionVariableField
- : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
- // TODO(cbruni): Combine with function variable field when only storing the
- // function name.
- class HasInferredFunctionNameField
- : public BitField<bool, FunctionVariableField::kNext, 1> {};
- class AsmModuleField
- : public BitField<bool, HasInferredFunctionNameField::kNext, 1> {};
- class HasSimpleParametersField
- : public BitField<bool, AsmModuleField::kNext, 1> {};
- class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 5> {};
- class HasOuterScopeInfoField
- : public BitField<bool, FunctionKindField::kNext, 1> {};
- class IsDebugEvaluateScopeField
- : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
-
- STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax);
-
// Properties of variables.
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
@@ -317,6 +319,8 @@ class ScopeInfo : public FixedArray {
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os,
ScopeInfo::VariableAllocationInfo var);
+
+ OBJECT_CONSTRUCTORS(ScopeInfo, FixedArray)
};
std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index eaea8f78e8..0ab5b2dfc4 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi-inl.h"
#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -16,6 +17,10 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(Script, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(Script)
+
CAST_ACCESSOR(Script)
ACCESSORS(Script, source, Object, kSourceOffset)
@@ -49,23 +54,23 @@ bool Script::has_eval_from_shared() const {
return eval_from_shared_or_wrapped_arguments()->IsSharedFunctionInfo();
}
-void Script::set_eval_from_shared(SharedFunctionInfo* shared,
+void Script::set_eval_from_shared(SharedFunctionInfo shared,
WriteBarrierMode mode) {
DCHECK(!is_wrapped());
set_eval_from_shared_or_wrapped_arguments(shared, mode);
}
-SharedFunctionInfo* Script::eval_from_shared() const {
+SharedFunctionInfo Script::eval_from_shared() const {
DCHECK(has_eval_from_shared());
return SharedFunctionInfo::cast(eval_from_shared_or_wrapped_arguments());
}
-void Script::set_wrapped_arguments(FixedArray* value, WriteBarrierMode mode) {
+void Script::set_wrapped_arguments(FixedArray value, WriteBarrierMode mode) {
DCHECK(!has_eval_from_shared());
set_eval_from_shared_or_wrapped_arguments(value, mode);
}
-FixedArray* Script::wrapped_arguments() const {
+FixedArray Script::wrapped_arguments() const {
DCHECK(is_wrapped());
return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
}
@@ -98,9 +103,9 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
}
bool Script::HasValidSource() {
- Object* src = this->source();
+ Object src = this->source();
if (!src->IsString()) return true;
- String* src_str = String::cast(src);
+ String src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
if (src_str->IsOneByteRepresentation()) {
return ExternalOneByteString::cast(src)->resource() != nullptr;
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index bd789ba2ff..12c9a4eeac 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,8 +16,9 @@ namespace v8 {
namespace internal {
// Script describes a script which has been added to the VM.
-class Script : public Struct, public NeverReadOnlySpaceObject {
+class Script : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
// Script types.
enum Type {
TYPE_NATIVE = 0,
@@ -123,7 +125,7 @@ class Script : public Struct, public NeverReadOnlySpaceObject {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
- Object* GetNameOrSourceURL();
+ Object GetNameOrSourceURL();
// Retrieve source position from where eval was called.
int GetEvalPosition();
@@ -176,7 +178,7 @@ class Script : public Struct, public NeverReadOnlySpaceObject {
class Iterator {
public:
explicit Iterator(Isolate* isolate);
- Script* Next();
+ Script Next();
private:
WeakArrayList::Iterator iterator_;
@@ -187,26 +189,28 @@ class Script : public Struct, public NeverReadOnlySpaceObject {
DECL_PRINTER(Script)
DECL_VERIFIER(Script)
- static const int kSourceOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kSourceOffset + kPointerSize;
- static const int kLineOffsetOffset = kNameOffset + kPointerSize;
- static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
- static const int kTypeOffset = kContextOffset + kPointerSize;
- static const int kLineEndsOffset = kTypeOffset + kPointerSize;
- static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOrWrappedArgumentsOffset =
- kIdOffset + kPointerSize;
- static const int kEvalFromPositionOffset =
- kEvalFromSharedOrWrappedArgumentsOffset + kPointerSize;
- static const int kSharedFunctionInfosOffset =
- kEvalFromPositionOffset + kPointerSize;
- static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
- static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
- static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
- static const int kHostDefinedOptionsOffset =
- kSourceMappingUrlOffset + kPointerSize;
- static const int kSize = kHostDefinedOptionsOffset + kPointerSize;
+// Layout description.
+#define SCRIPTS_FIELDS(V) \
+ V(kSourceOffset, kTaggedSize) \
+ V(kNameOffset, kTaggedSize) \
+ V(kLineOffsetOffset, kTaggedSize) \
+ V(kColumnOffsetOffset, kTaggedSize) \
+ V(kContextOffset, kTaggedSize) \
+ V(kTypeOffset, kTaggedSize) \
+ V(kLineEndsOffset, kTaggedSize) \
+ V(kIdOffset, kTaggedSize) \
+ V(kEvalFromSharedOrWrappedArgumentsOffset, kTaggedSize) \
+ V(kEvalFromPositionOffset, kTaggedSize) \
+ V(kSharedFunctionInfosOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
+ V(kSourceUrlOffset, kTaggedSize) \
+ V(kSourceMappingUrlOffset, kTaggedSize) \
+ V(kHostDefinedOptionsOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, SCRIPTS_FIELDS)
+#undef SCRIPTS_FIELDS
private:
// Bit positions in the flags field.
@@ -217,7 +221,7 @@ class Script : public Struct, public NeverReadOnlySpaceObject {
static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
<< kOriginOptionsShift;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
+ OBJECT_CONSTRUCTORS(Script, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index cf057e9ca0..5484441030 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,11 +7,13 @@
#include "src/objects/shared-function-info.h"
+#include "src/feedback-vector-inl.h"
#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/templates.h"
+#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,39 +21,72 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(PreParsedScopeData)
-ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint8_t>, kScopeDataOffset)
-INT_ACCESSORS(PreParsedScopeData, length, kLengthOffset)
+OBJECT_CONSTRUCTORS_IMPL(PreparseData, HeapObject)
-Object* PreParsedScopeData::child_data(int index) const {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kChildDataStartOffset + index * kPointerSize;
- return RELAXED_READ_FIELD(this, offset);
+CAST_ACCESSOR(PreparseData)
+INT_ACCESSORS(PreparseData, data_length, kDataLengthOffset)
+INT_ACCESSORS(PreparseData, children_length, kInnerLengthOffset)
+
+int PreparseData::inner_start_offset() const {
+ return InnerOffset(data_length());
}
-void PreParsedScopeData::set_child_data(int index, Object* value,
- WriteBarrierMode mode) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kChildDataStartOffset + index * kPointerSize;
- RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
+ObjectSlot PreparseData::inner_data_start() const {
+ return RawField(inner_start_offset());
}
-Object** PreParsedScopeData::child_data_start() const {
- return HeapObject::RawField(this, kChildDataStartOffset);
+void PreparseData::clear_padding() {
+ int data_end_offset = kDataStartOffset + data_length();
+ int padding_size = inner_start_offset() - data_end_offset;
+ DCHECK_LE(0, padding_size);
+ if (padding_size == 0) return;
+ memset(reinterpret_cast<void*>(address() + data_end_offset), 0, padding_size);
}
-void PreParsedScopeData::clear_padding() {
- // For archs where kIntSize < kPointerSize, there will be padding between the
- // length field and the start of the child data.
- if (kUnalignedChildDataStartOffset < kChildDataStartOffset) {
- memset(reinterpret_cast<void*>(address() + kUnalignedChildDataStartOffset),
- 0, kChildDataStartOffset - kUnalignedChildDataStartOffset);
- }
+byte PreparseData::get(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, data_length());
+ int offset = kDataStartOffset + index * kByteSize;
+ return READ_BYTE_FIELD(*this, offset);
+}
+
+void PreparseData::set(int index, byte value) {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, data_length());
+ int offset = kDataStartOffset + index * kByteSize;
+ WRITE_BYTE_FIELD(*this, offset, value);
+}
+
+void PreparseData::copy_in(int index, const byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
+ index + length <= this->data_length());
+ Address dst_addr = FIELD_ADDR(this, kDataStartOffset + index * kByteSize);
+ memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
+}
+
+PreparseData PreparseData::get_child(int index) const {
+ return PreparseData::cast(get_child_raw(index));
+}
+
+Object PreparseData::get_child_raw(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, this->children_length());
+ int offset = inner_start_offset() + index * kTaggedSize;
+ return RELAXED_READ_FIELD(*this, offset);
}
+void PreparseData::set_child(int index, PreparseData value,
+ WriteBarrierMode mode) {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, this->children_length());
+ int offset = inner_start_offset() + index * kTaggedSize;
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+OBJECT_CONSTRUCTORS_IMPL(UncompiledData, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData, UncompiledData)
+OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData, UncompiledData)
CAST_ACCESSOR(UncompiledData)
ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset)
INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
@@ -59,31 +94,32 @@ INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
void UncompiledData::clear_padding() {
- // For archs where kIntSize < kPointerSize, there will be padding at the end
- // of the data.
- if (kUnalignedSize < kSize) {
- memset(reinterpret_cast<void*>(address() + kUnalignedSize), 0,
- kSize - kUnalignedSize);
- }
+ if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
}
-CAST_ACCESSOR(UncompiledDataWithoutPreParsedScope)
+CAST_ACCESSOR(UncompiledDataWithoutPreparseData)
-CAST_ACCESSOR(UncompiledDataWithPreParsedScope)
-ACCESSORS(UncompiledDataWithPreParsedScope, pre_parsed_scope_data,
- PreParsedScopeData, kPreParsedScopeDataOffset)
+CAST_ACCESSOR(UncompiledDataWithPreparseData)
+ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData,
+ kPreparseDataOffset)
+
+OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
CAST_ACCESSOR(InterpreterData)
ACCESSORS(InterpreterData, bytecode_array, BytecodeArray, kBytecodeArrayOffset)
ACCESSORS(InterpreterData, interpreter_trampoline, Code,
kInterpreterTrampolineOffset)
+OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfo, HeapObject)
+NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
-ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
@@ -98,19 +134,19 @@ UINT8_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
UINT8_ACCESSORS(SharedFunctionInfo, raw_builtin_function_id, kBuiltinFunctionId)
UINT16_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
kFunctionTokenOffsetOffset)
-INT_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
bool SharedFunctionInfo::HasSharedName() const {
- Object* value = name_or_scope_info();
+ Object value = name_or_scope_info();
if (value->IsScopeInfo()) {
return ScopeInfo::cast(value)->HasSharedFunctionName();
}
return value != kNoSharedNameSentinel;
}
-String* SharedFunctionInfo::Name() const {
+String SharedFunctionInfo::Name() const {
if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
- Object* value = name_or_scope_info();
+ Object value = name_or_scope_info();
if (value->IsScopeInfo()) {
if (ScopeInfo::cast(value)->HasFunctionName()) {
return String::cast(ScopeInfo::cast(value)->FunctionName());
@@ -120,8 +156,8 @@ String* SharedFunctionInfo::Name() const {
return String::cast(value);
}
-void SharedFunctionInfo::SetName(String* name) {
- Object* maybe_scope_info = name_or_scope_info();
+void SharedFunctionInfo::SetName(String name) {
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
ScopeInfo::cast(maybe_scope_info)->SetFunctionName(name);
} else {
@@ -132,7 +168,7 @@ void SharedFunctionInfo::SetName(String* name) {
UpdateFunctionMapIndex();
}
-AbstractCode* SharedFunctionInfo::abstract_code() {
+AbstractCode SharedFunctionInfo::abstract_code() {
if (HasBytecodeArray()) {
return AbstractCode::cast(GetBytecodeArray());
} else {
@@ -140,6 +176,15 @@ AbstractCode* SharedFunctionInfo::abstract_code() {
}
}
+Object SharedFunctionInfo::function_data() const {
+ return RELAXED_READ_FIELD(*this, kFunctionDataOffset);
+}
+
+void SharedFunctionInfo::set_function_data(Object data, WriteBarrierMode mode) {
+ RELAXED_WRITE_FIELD(*this, kFunctionDataOffset, data);
+ CONDITIONAL_WRITE_BARRIER(*this, kFunctionDataOffset, data, mode);
+}
+
int SharedFunctionInfo::function_token_position() const {
int offset = raw_function_token_offset();
if (offset == kFunctionTokenOutOfRange) {
@@ -163,15 +208,13 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
- requires_instance_fields_initializer,
- SharedFunctionInfo::RequiresInstanceFieldsInitializer)
+ requires_instance_members_initializer,
+ SharedFunctionInfo::RequiresInstanceMembersInitializer)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_anonymous_expression,
SharedFunctionInfo::IsAnonymousExpressionBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, deserialized,
- SharedFunctionInfo::IsDeserializedBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
SharedFunctionInfo::HasReportedBinaryCoverageBit)
@@ -179,6 +222,8 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_named_expression,
SharedFunctionInfo::IsNamedExpressionBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
SharedFunctionInfo::IsTopLevelBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_oneshot_iife,
+ SharedFunctionInfo::IsOneshotIIFEBit)
bool SharedFunctionInfo::optimization_disabled() const {
return disable_optimization_reason() != BailoutReason::kNoReason;
@@ -284,18 +329,18 @@ void SharedFunctionInfo::DontAdaptArguments() {
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
-ScopeInfo* SharedFunctionInfo::scope_info() const {
- Object* maybe_scope_info = name_or_scope_info();
+ScopeInfo SharedFunctionInfo::scope_info() const {
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return ScopeInfo::Empty(GetIsolate());
}
-void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
+void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
- Object* name = name_or_scope_info();
+ Object name = name_or_scope_info();
if (name->IsScopeInfo()) {
name = ScopeInfo::cast(name)->FunctionName();
}
@@ -305,23 +350,21 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
if (HasInferredName() && inferred_name()->length() != 0) {
scope_info->SetInferredFunctionName(inferred_name());
}
- WRITE_FIELD(this, kNameOrScopeInfoOffset,
- reinterpret_cast<Object*>(scope_info));
- CONDITIONAL_WRITE_BARRIER(this, kNameOrScopeInfoOffset,
- reinterpret_cast<Object*>(scope_info), mode);
+ WRITE_FIELD(*this, kNameOrScopeInfoOffset, scope_info);
+ CONDITIONAL_WRITE_BARRIER(*this, kNameOrScopeInfoOffset, scope_info, mode);
}
ACCESSORS(SharedFunctionInfo, raw_outer_scope_info_or_feedback_metadata,
HeapObject, kOuterScopeInfoOrFeedbackMetadataOffset)
-HeapObject* SharedFunctionInfo::outer_scope_info() const {
+HeapObject SharedFunctionInfo::outer_scope_info() const {
DCHECK(!is_compiled());
DCHECK(!HasFeedbackMetadata());
return raw_outer_scope_info_or_feedback_metadata();
}
bool SharedFunctionInfo::HasOuterScopeInfo() const {
- ScopeInfo* outer_info = nullptr;
+ ScopeInfo outer_info;
if (!is_compiled()) {
if (!outer_scope_info()->IsScopeInfo()) return false;
outer_info = ScopeInfo::cast(outer_scope_info());
@@ -332,42 +375,55 @@ bool SharedFunctionInfo::HasOuterScopeInfo() const {
return outer_info->length() > 0;
}
-ScopeInfo* SharedFunctionInfo::GetOuterScopeInfo() const {
+ScopeInfo SharedFunctionInfo::GetOuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
if (!is_compiled()) return ScopeInfo::cast(outer_scope_info());
return scope_info()->OuterScopeInfo();
}
-void SharedFunctionInfo::set_outer_scope_info(HeapObject* value,
+void SharedFunctionInfo::set_outer_scope_info(HeapObject value,
WriteBarrierMode mode) {
DCHECK(!is_compiled());
DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole());
DCHECK(value->IsScopeInfo() || value->IsTheHole());
- return set_raw_outer_scope_info_or_feedback_metadata(value, mode);
+ set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::HasFeedbackMetadata() const {
return raw_outer_scope_info_or_feedback_metadata()->IsFeedbackMetadata();
}
-FeedbackMetadata* SharedFunctionInfo::feedback_metadata() const {
+FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
DCHECK(HasFeedbackMetadata());
return FeedbackMetadata::cast(raw_outer_scope_info_or_feedback_metadata());
}
-void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata* value,
+void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
WriteBarrierMode mode) {
DCHECK(!HasFeedbackMetadata());
DCHECK(value->IsFeedbackMetadata());
- return set_raw_outer_scope_info_or_feedback_metadata(value, mode);
+ set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::is_compiled() const {
- Object* data = function_data();
+ Object data = function_data();
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
!data->IsUncompiledData();
}
+IsCompiledScope SharedFunctionInfo::is_compiled_scope() const {
+ return IsCompiledScope(*this, GetIsolate());
+}
+
+IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
+ Isolate* isolate)
+ : retain_bytecode_(shared->HasBytecodeArray()
+ ? handle(shared->GetBytecodeArray(), isolate)
+ : MaybeHandle<BytecodeArray>()),
+ is_compiled_(shared->is_compiled()) {
+ DCHECK_IMPLIES(!retain_bytecode_.is_null(), is_compiled());
+}
+
uint16_t SharedFunctionInfo::GetLength() const {
DCHECK(is_compiled());
DCHECK(HasLength());
@@ -386,7 +442,7 @@ bool SharedFunctionInfo::IsApiFunction() const {
return function_data()->IsFunctionTemplateInfo();
}
-FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
+FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
DCHECK(IsApiFunction());
return FunctionTemplateInfo::cast(function_data());
}
@@ -396,7 +452,7 @@ bool SharedFunctionInfo::HasBytecodeArray() const {
function_data()->IsInterpreterData();
}
-BytecodeArray* SharedFunctionInfo::GetBytecodeArray() const {
+BytecodeArray SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
if (HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray()) {
return GetDebugInfo()->OriginalBytecodeArray();
@@ -408,7 +464,7 @@ BytecodeArray* SharedFunctionInfo::GetBytecodeArray() const {
}
}
-BytecodeArray* SharedFunctionInfo::GetDebugBytecodeArray() const {
+BytecodeArray SharedFunctionInfo::GetDebugBytecodeArray() const {
DCHECK(HasBytecodeArray());
DCHECK(HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray());
if (function_data()->IsBytecodeArray()) {
@@ -419,7 +475,7 @@ BytecodeArray* SharedFunctionInfo::GetDebugBytecodeArray() const {
}
}
-void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray* bytecode) {
+void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray bytecode) {
DCHECK(HasBytecodeArray());
if (function_data()->IsBytecodeArray()) {
set_function_data(bytecode);
@@ -429,13 +485,36 @@ void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray* bytecode) {
}
}
-void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
+void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData());
set_function_data(bytecode);
}
-Code* SharedFunctionInfo::InterpreterTrampoline() const {
+bool SharedFunctionInfo::ShouldFlushBytecode() {
+ if (!FLAG_flush_bytecode) return false;
+
+ // TODO(rmcilroy): Enable bytecode flushing for resumable functions amd class
+ // member initializers.
+ if (IsResumableFunction(kind()) ||
+ IsClassMembersInitializerFunction(kind()) || !allows_lazy_compilation()) {
+ return false;
+ }
+
+ // Get a snapshot of the function data field, and if it is a bytecode array,
+ // check if it is old. Note, this is done this way since this function can be
+ // called by the concurrent marker.
+ Object data = function_data();
+ if (!data->IsBytecodeArray()) return false;
+
+ if (FLAG_stress_flush_bytecode) return true;
+
+ BytecodeArray bytecode = BytecodeArray::cast(data);
+
+ return bytecode->IsOld();
+}
+
+Code SharedFunctionInfo::InterpreterTrampoline() const {
DCHECK(HasInterpreterData());
return interpreter_data()->interpreter_trampoline();
}
@@ -444,27 +523,27 @@ bool SharedFunctionInfo::HasInterpreterData() const {
return function_data()->IsInterpreterData();
}
-InterpreterData* SharedFunctionInfo::interpreter_data() const {
+InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
return InterpreterData::cast(function_data());
}
void SharedFunctionInfo::set_interpreter_data(
- InterpreterData* interpreter_data) {
+ InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
set_function_data(interpreter_data);
}
bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data()->IsFixedArray();
+ return function_data()->IsAsmWasmData();
}
-FixedArray* SharedFunctionInfo::asm_wasm_data() const {
+AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
- return FixedArray::cast(function_data());
+ return AsmWasmData::cast(function_data());
}
-void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
+void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData() || HasAsmWasmData());
set_function_data(data);
@@ -483,7 +562,6 @@ int SharedFunctionInfo::builtin_id() const {
void SharedFunctionInfo::set_builtin_id(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK_NE(builtin_id, Builtins::kDeserializeLazy);
set_function_data(Smi::FromInt(builtin_id), SKIP_WRITE_BARRIER);
}
@@ -491,43 +569,42 @@ bool SharedFunctionInfo::HasUncompiledData() const {
return function_data()->IsUncompiledData();
}
-UncompiledData* SharedFunctionInfo::uncompiled_data() const {
+UncompiledData SharedFunctionInfo::uncompiled_data() const {
DCHECK(HasUncompiledData());
return UncompiledData::cast(function_data());
}
-void SharedFunctionInfo::set_uncompiled_data(UncompiledData* uncompiled_data) {
+void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
DCHECK(uncompiled_data->IsUncompiledData());
set_function_data(uncompiled_data);
}
-bool SharedFunctionInfo::HasUncompiledDataWithPreParsedScope() const {
- return function_data()->IsUncompiledDataWithPreParsedScope();
+bool SharedFunctionInfo::HasUncompiledDataWithPreparseData() const {
+ return function_data()->IsUncompiledDataWithPreparseData();
}
-UncompiledDataWithPreParsedScope*
-SharedFunctionInfo::uncompiled_data_with_pre_parsed_scope() const {
- DCHECK(HasUncompiledDataWithPreParsedScope());
- return UncompiledDataWithPreParsedScope::cast(function_data());
+UncompiledDataWithPreparseData
+SharedFunctionInfo::uncompiled_data_with_preparse_data() const {
+ DCHECK(HasUncompiledDataWithPreparseData());
+ return UncompiledDataWithPreparseData::cast(function_data());
}
-void SharedFunctionInfo::set_uncompiled_data_with_pre_parsed_scope(
- UncompiledDataWithPreParsedScope* uncompiled_data_with_pre_parsed_scope) {
+void SharedFunctionInfo::set_uncompiled_data_with_preparse_data(
+ UncompiledDataWithPreparseData uncompiled_data_with_preparse_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- DCHECK(uncompiled_data_with_pre_parsed_scope
- ->IsUncompiledDataWithPreParsedScope());
- set_function_data(uncompiled_data_with_pre_parsed_scope);
+ DCHECK(
+ uncompiled_data_with_preparse_data->IsUncompiledDataWithPreparseData());
+ set_function_data(uncompiled_data_with_preparse_data);
}
-bool SharedFunctionInfo::HasUncompiledDataWithoutPreParsedScope() const {
- return function_data()->IsUncompiledDataWithoutPreParsedScope();
+bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
+ return function_data()->IsUncompiledDataWithoutPreparseData();
}
-void SharedFunctionInfo::ClearPreParsedScopeData() {
- DCHECK(HasUncompiledDataWithPreParsedScope());
- UncompiledDataWithPreParsedScope* data =
- uncompiled_data_with_pre_parsed_scope();
+void SharedFunctionInfo::ClearPreparseData() {
+ DCHECK(HasUncompiledDataWithPreparseData());
+ UncompiledDataWithPreparseData data = uncompiled_data_with_preparse_data();
// Trim off the pre-parsed scope data from the uncompiled data by swapping the
// map, leaving only an uncompiled data without pre-parsed scope.
@@ -535,40 +612,69 @@ void SharedFunctionInfo::ClearPreParsedScopeData() {
Heap* heap = Heap::FromWritableHeapObject(data);
// Swap the map.
- heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreParsedScope::kSize,
+ heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize,
no_gc);
- STATIC_ASSERT(UncompiledDataWithoutPreParsedScope::kSize <
- UncompiledDataWithPreParsedScope::kSize);
- STATIC_ASSERT(UncompiledDataWithoutPreParsedScope::kSize ==
+ STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize <
+ UncompiledDataWithPreparseData::kSize);
+ STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
UncompiledData::kSize);
data->synchronized_set_map(
- GetReadOnlyRoots().uncompiled_data_without_pre_parsed_scope_map());
+ GetReadOnlyRoots().uncompiled_data_without_preparse_data_map());
// Fill the remaining space with filler.
heap->CreateFillerObjectAt(
- data->address() + UncompiledDataWithoutPreParsedScope::kSize,
- UncompiledDataWithPreParsedScope::kSize -
- UncompiledDataWithoutPreParsedScope::kSize,
+ data->address() + UncompiledDataWithoutPreparseData::kSize,
+ UncompiledDataWithPreparseData::kSize -
+ UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
// Ensure that the clear was successful.
- DCHECK(HasUncompiledDataWithoutPreParsedScope());
+ DCHECK(HasUncompiledDataWithoutPreparseData());
+}
+
+// static
+void UncompiledData::Initialize(
+ UncompiledData data, String inferred_name, int start_position,
+ int end_position, int function_literal_id,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot) {
+ data->set_inferred_name(inferred_name);
+ gc_notify_updated_slot(
+ data, data->RawField(UncompiledData::kInferredNameOffset), inferred_name);
+ data->set_start_position(start_position);
+ data->set_end_position(end_position);
+ data->set_function_literal_id(function_literal_id);
+ data->clear_padding();
+}
+
+void UncompiledDataWithPreparseData::Initialize(
+ UncompiledDataWithPreparseData data, String inferred_name,
+ int start_position, int end_position, int function_literal_id,
+ PreparseData scope_data,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot) {
+ UncompiledData::Initialize(data, inferred_name, start_position, end_position,
+ function_literal_id, gc_notify_updated_slot);
+ data->set_preparse_data(scope_data);
+ gc_notify_updated_slot(
+ data, data->RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
+ scope_data);
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data()->IsWasmExportedFunctionData();
}
-Object* SharedFunctionInfo::script() const {
- Object* maybe_script = script_or_debug_info();
+Object SharedFunctionInfo::script() const {
+ Object maybe_script = script_or_debug_info();
if (maybe_script->IsDebugInfo()) {
return DebugInfo::cast(maybe_script)->script();
}
return maybe_script;
}
-void SharedFunctionInfo::set_script(Object* script) {
- Object* maybe_debug_info = script_or_debug_info();
+void SharedFunctionInfo::set_script(Object script) {
+ Object maybe_debug_info = script_or_debug_info();
if (maybe_debug_info->IsDebugInfo()) {
DebugInfo::cast(maybe_debug_info)->set_script(script);
} else {
@@ -580,12 +686,12 @@ bool SharedFunctionInfo::HasDebugInfo() const {
return script_or_debug_info()->IsDebugInfo();
}
-DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
+DebugInfo SharedFunctionInfo::GetDebugInfo() const {
DCHECK(HasDebugInfo());
return DebugInfo::cast(script_or_debug_info());
}
-void SharedFunctionInfo::SetDebugInfo(DebugInfo* debug_info) {
+void SharedFunctionInfo::SetDebugInfo(DebugInfo debug_info) {
DCHECK(!HasDebugInfo());
DCHECK_EQ(debug_info->script(), script_or_debug_info());
set_script_or_debug_info(debug_info);
@@ -604,19 +710,19 @@ void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
}
bool SharedFunctionInfo::HasInferredName() {
- Object* scope_info = name_or_scope_info();
+ Object scope_info = name_or_scope_info();
if (scope_info->IsScopeInfo()) {
return ScopeInfo::cast(scope_info)->HasInferredFunctionName();
}
return HasUncompiledData();
}
-String* SharedFunctionInfo::inferred_name() {
- Object* maybe_scope_info = name_or_scope_info();
+String SharedFunctionInfo::inferred_name() {
+ Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo* scope_info = ScopeInfo::cast(maybe_scope_info);
+ ScopeInfo scope_info = ScopeInfo::cast(maybe_scope_info);
if (scope_info->HasInferredFunctionName()) {
- Object* name = ScopeInfo::cast(maybe_scope_info)->InferredFunctionName();
+ Object name = ScopeInfo::cast(maybe_scope_info)->InferredFunctionName();
if (name->IsString()) return String::cast(name);
}
} else if (HasUncompiledData()) {
@@ -626,9 +732,9 @@ String* SharedFunctionInfo::inferred_name() {
}
bool SharedFunctionInfo::IsUserJavaScript() {
- Object* script_obj = script();
+ Object script_obj = script();
if (script_obj->IsUndefined()) return false;
- Script* script = Script::cast(script_obj);
+ Script script = Script::cast(script_obj);
return script->IsUserJavaScript();
}
@@ -638,53 +744,10 @@ bool SharedFunctionInfo::IsSubjectToDebugging() {
bool SharedFunctionInfo::CanDiscardCompiled() const {
bool can_decompile = (HasBytecodeArray() || HasAsmWasmData() ||
- HasUncompiledDataWithPreParsedScope());
+ HasUncompiledDataWithPreparseData());
return can_decompile;
}
-// static
-void SharedFunctionInfo::DiscardCompiled(
- Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
- DCHECK(shared_info->CanDiscardCompiled());
-
- int start_position = shared_info->StartPosition();
- int end_position = shared_info->EndPosition();
- int function_literal_id = shared_info->FunctionLiteralId(isolate);
-
- if (shared_info->is_compiled()) {
- DisallowHeapAllocation no_gc;
-
- HeapObject* outer_scope_info;
- if (shared_info->scope_info()->HasOuterScopeInfo()) {
- outer_scope_info = shared_info->scope_info()->OuterScopeInfo();
- } else {
- outer_scope_info = ReadOnlyRoots(isolate).the_hole_value();
- }
- // Raw setter to avoid validity checks, since we're performing the unusual
- // task of decompiling.
- shared_info->set_raw_outer_scope_info_or_feedback_metadata(
- outer_scope_info);
- } else {
- DCHECK(shared_info->outer_scope_info()->IsScopeInfo() ||
- shared_info->outer_scope_info()->IsTheHole());
- }
-
- if (shared_info->HasUncompiledDataWithPreParsedScope()) {
- // If this is uncompiled data with a pre-parsed scope data, we can just
- // clear out the scope data and keep the uncompiled data.
- shared_info->ClearPreParsedScopeData();
- } else {
- // Create a new UncompiledData, without pre-parsed scope, and update the
- // function data to point to it. Use the raw function data setter to avoid
- // validity checks, since we're performing the unusual task of decompiling.
- Handle<UncompiledData> data =
- isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
- handle(shared_info->inferred_name(), isolate), start_position,
- end_position, function_literal_id);
- shared_info->set_function_data(*data);
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index f43fa61b2f..baaacd538c 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -9,6 +9,8 @@
#include "src/objects.h"
#include "src/objects/builtin-function-id.h"
#include "src/objects/script.h"
+#include "src/objects/smi.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,51 +18,79 @@
namespace v8 {
namespace internal {
+class AsmWasmData;
class BytecodeArray;
class CoverageInfo;
class DebugInfo;
+class IsCompiledScope;
class WasmExportedFunctionData;
// Data collected by the pre-parser storing information about scopes and inner
// functions.
-class PreParsedScopeData : public HeapObject {
+//
+// PreparseData Layout:
+// +-------------------------------+
+// | data_length | children_length |
+// +-------------------------------+
+// | Scope Byte Data ... |
+// | ... |
+// +-------------------------------+
+// | [Padding] |
+// +-------------------------------+
+// | Inner PreparseData 1 |
+// +-------------------------------+
+// | ... |
+// +-------------------------------+
+// | Inner PreparseData N |
+// +-------------------------------+
+class PreparseData : public HeapObject {
public:
- DECL_ACCESSORS(scope_data, PodArray<uint8_t>)
- DECL_INT_ACCESSORS(length)
+ DECL_INT_ACCESSORS(data_length)
+ DECL_INT_ACCESSORS(children_length)
- inline Object* child_data(int index) const;
- inline void set_child_data(int index, Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline int inner_start_offset() const;
+ inline ObjectSlot inner_data_start() const;
- inline Object** child_data_start() const;
+ inline byte get(int index) const;
+ inline void set(int index, byte value);
+ inline void copy_in(int index, const byte* buffer, int length);
+
+ inline PreparseData get_child(int index) const;
+ inline void set_child(int index, PreparseData value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Clear uninitialized padding space.
inline void clear_padding();
- DECL_CAST(PreParsedScopeData)
- DECL_PRINTER(PreParsedScopeData)
- DECL_VERIFIER(PreParsedScopeData)
-
-#define PRE_PARSED_SCOPE_DATA_FIELDS(V) \
- V(kScopeDataOffset, kPointerSize) \
- V(kLengthOffset, kIntSize) \
- V(kUnalignedChildDataStartOffset, 0)
+ DECL_CAST(PreparseData)
+ DECL_PRINTER(PreparseData)
+ DECL_VERIFIER(PreparseData)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- PRE_PARSED_SCOPE_DATA_FIELDS)
-#undef PRE_PARSED_SCOPE_DATA_FIELDS
+// Layout description.
+#define PREPARSE_DATA_FIELDS(V) \
+ V(kDataLengthOffset, kInt32Size) \
+ V(kInnerLengthOffset, kInt32Size) \
+ /* Header size. */ \
+ V(kDataStartOffset, 0) \
+ V(kHeaderSize, 0)
- static const int kChildDataStartOffset =
- POINTER_SIZE_ALIGN(kUnalignedChildDataStartOffset);
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, PREPARSE_DATA_FIELDS)
+#undef PREPARSE_DATA_FIELDS
class BodyDescriptor;
- static constexpr int SizeFor(int length) {
- return kChildDataStartOffset + length * kPointerSize;
+ static int InnerOffset(int data_length) {
+ return RoundUp(kDataStartOffset + data_length * kByteSize, kTaggedSize);
}
+ static int SizeFor(int data_length, int children_length) {
+ return InnerOffset(data_length) + children_length * kTaggedSize;
+ }
+
+ OBJECT_CONSTRUCTORS(PreparseData, HeapObject);
+
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreParsedScopeData);
+ inline Object get_child_raw(int index) const;
};
// Abstract class representing extra data for an uncompiled function, which is
@@ -74,82 +104,97 @@ class UncompiledData : public HeapObject {
DECL_CAST(UncompiledData)
-#define UNCOMPILED_DATA_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
- V(kInferredNameOffset, kPointerSize) \
- V(kEndOfPointerFieldsOffset, 0) \
- V(kStartPositionOffset, kInt32Size) \
- V(kEndPositionOffset, kInt32Size) \
- V(kFunctionLiteralIdOffset, kInt32Size) \
- /* Total size. */ \
- V(kUnalignedSize, 0)
+ inline static void Initialize(
+ UncompiledData data, String inferred_name, int start_position,
+ int end_position, int function_literal_id,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot =
+ [](HeapObject object, ObjectSlot slot, HeapObject target) {});
+
+ // Layout description.
+#define UNCOMPILED_DATA_FIELDS(V) \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kInferredNameOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kStartPositionOffset, kInt32Size) \
+ V(kEndPositionOffset, kInt32Size) \
+ V(kFunctionLiteralIdOffset, kInt32Size) \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
+ /* Header size. */ \
+ V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS)
#undef UNCOMPILED_DATA_FIELDS
- static const int kSize = POINTER_SIZE_ALIGN(kUnalignedSize);
-
typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfPointerFieldsOffset, kSize>
+ kEndOfTaggedFieldsOffset, kSize>
BodyDescriptor;
// Clear uninitialized padding space.
inline void clear_padding();
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledData);
+ OBJECT_CONSTRUCTORS(UncompiledData, HeapObject);
};
// Class representing data for an uncompiled function that does not have any
// data from the pre-parser, either because it's a leaf function or because the
// pre-parser bailed out.
-class UncompiledDataWithoutPreParsedScope : public UncompiledData {
+class UncompiledDataWithoutPreparseData : public UncompiledData {
public:
- DECL_CAST(UncompiledDataWithoutPreParsedScope)
- DECL_PRINTER(UncompiledDataWithoutPreParsedScope)
- DECL_VERIFIER(UncompiledDataWithoutPreParsedScope)
+ DECL_CAST(UncompiledDataWithoutPreparseData)
+ DECL_PRINTER(UncompiledDataWithoutPreparseData)
+ DECL_VERIFIER(UncompiledDataWithoutPreparseData)
static const int kSize = UncompiledData::kSize;
// No extra fields compared to UncompiledData.
typedef UncompiledData::BodyDescriptor BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithoutPreParsedScope);
+ OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseData, UncompiledData);
};
// Class representing data for an uncompiled function that has pre-parsed scope
// data.
-class UncompiledDataWithPreParsedScope : public UncompiledData {
+class UncompiledDataWithPreparseData : public UncompiledData {
public:
- DECL_ACCESSORS(pre_parsed_scope_data, PreParsedScopeData)
-
- DECL_CAST(UncompiledDataWithPreParsedScope)
- DECL_PRINTER(UncompiledDataWithPreParsedScope)
- DECL_VERIFIER(UncompiledDataWithPreParsedScope)
-
-#define UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
- V(kPreParsedScopeDataOffset, kPointerSize) \
- V(kEndOfPointerFieldsOffset, 0) \
- /* Total size. */ \
+ DECL_ACCESSORS(preparse_data, PreparseData)
+
+ DECL_CAST(UncompiledDataWithPreparseData)
+ DECL_PRINTER(UncompiledDataWithPreparseData)
+ DECL_VERIFIER(UncompiledDataWithPreparseData)
+
+ inline static void Initialize(
+ UncompiledDataWithPreparseData data, String inferred_name,
+ int start_position, int end_position, int function_literal_id,
+ PreparseData scope_data,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot =
+ [](HeapObject object, ObjectSlot slot, HeapObject target) {});
+
+ // Layout description.
+
+#define UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS(V) \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kPreparseDataOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(UncompiledData::kSize,
- UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS)
-#undef UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_FIELDS
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS)
+#undef UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS
// Make sure the size is aligned
STATIC_ASSERT(kSize == POINTER_SIZE_ALIGN(kSize));
typedef SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
- FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfPointerFieldsOffset, kSize>>
+ FixedBodyDescriptor<kStartOfPointerFieldsOffset, kEndOfTaggedFieldsOffset,
+ kSize>>
BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithPreParsedScope);
+ OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData, UncompiledData);
};
class InterpreterData : public Struct {
@@ -157,35 +202,40 @@ class InterpreterData : public Struct {
DECL_ACCESSORS(bytecode_array, BytecodeArray)
DECL_ACCESSORS(interpreter_trampoline, Code)
- static const int kBytecodeArrayOffset = Struct::kHeaderSize;
- static const int kInterpreterTrampolineOffset =
- kBytecodeArrayOffset + kPointerSize;
- static const int kSize = kInterpreterTrampolineOffset + kPointerSize;
+// Layout description.
+#define INTERPRETER_DATA_FIELDS(V) \
+ V(kBytecodeArrayOffset, kTaggedSize) \
+ V(kInterpreterTrampolineOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, INTERPRETER_DATA_FIELDS)
+#undef INTERPRETER_DATA_FIELDS
DECL_CAST(InterpreterData)
DECL_PRINTER(InterpreterData)
DECL_VERIFIER(InterpreterData)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(InterpreterData);
+ OBJECT_CONSTRUCTORS(InterpreterData, Struct);
};
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
-class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
+class SharedFunctionInfo : public HeapObject {
public:
- static constexpr Object* const kNoSharedNameSentinel = Smi::kZero;
+ NEVER_READ_ONLY_SPACE
+ static constexpr Object const kNoSharedNameSentinel = Smi::kZero;
// [name]: Returns shared name if it exists or an empty string otherwise.
- inline String* Name() const;
- inline void SetName(String* name);
+ inline String Name() const;
+ inline void SetName(String name);
// Get the code object which represents the execution of this function.
- Code* GetCode() const;
+ Code GetCode() const;
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
- inline AbstractCode* abstract_code();
+ inline AbstractCode abstract_code();
// Tells whether or not this shared function info is interpreted.
//
@@ -210,14 +260,6 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
static const int kNotFound = -1;
static const uint16_t kInvalidLength = static_cast<uint16_t>(-1);
- // Helpers for assembly code that does a backwards walk of the optimized code
- // map.
- static const int kOffsetToPreviousContext =
- FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
- static const int kOffsetToPreviousCachedCode =
- FixedArray::kHeaderSize +
- kPointerSize * (kCachedCodeOffset - kEntryLength);
-
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -237,16 +279,24 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// Get the outer scope info whether this function is compiled or not.
inline bool HasOuterScopeInfo() const;
- inline ScopeInfo* GetOuterScopeInfo() const;
+ inline ScopeInfo GetOuterScopeInfo() const;
// [feedback metadata] Metadata template for feedback vectors of instances of
// this function.
inline bool HasFeedbackMetadata() const;
DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
- // Returns if this function has been compiled to native code yet.
+ // Returns if this function has been compiled yet. Note: with bytecode
+ // flushing, any GC after this call is made could cause the function
+ // to become uncompiled. If you need to ensure the function remains compiled
+ // for some period of time, use IsCompiledScope instead.
inline bool is_compiled() const;
+ // Returns an IsCompiledScope which reports whether the function is compiled,
+ // and if compiled, will avoid the function becoming uncompiled while it is
+ // held.
+ inline IsCompiledScope is_compiled_scope() const;
+
// [length]: The function length - usually the number of declared parameters.
// Use up to 2^16-2 parameters (16 bits of values, where one is reserved for
// kDontAdaptArgumentsSentinel). The value is only reliable when the function
@@ -280,30 +330,30 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
// - a InterpreterData with the BytecodeArray and a copy of the
// interpreter trampoline [HasInterpreterData()]
- // - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
+ // - an AsmWasmData with Asm->Wasm conversion [HasAsmWasmData()].
// - a Smi containing the builtin id [HasBuiltinId()]
- // - a UncompiledDataWithoutPreParsedScope for lazy compilation
- // [HasUncompiledDataWithoutPreParsedScope()]
- // - a UncompiledDataWithPreParsedScope for lazy compilation
- // [HasUncompiledDataWithPreParsedScope()]
+ // - a UncompiledDataWithoutPreparseData for lazy compilation
+ // [HasUncompiledDataWithoutPreparseData()]
+ // - a UncompiledDataWithPreparseData for lazy compilation
+ // [HasUncompiledDataWithPreparseData()]
// - a WasmExportedFunctionData for Wasm [HasWasmExportedFunctionData()]
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction() const;
- inline FunctionTemplateInfo* get_api_func_data();
- inline void set_api_func_data(FunctionTemplateInfo* data);
+ inline FunctionTemplateInfo get_api_func_data();
+ inline void set_api_func_data(FunctionTemplateInfo data);
inline bool HasBytecodeArray() const;
- inline BytecodeArray* GetBytecodeArray() const;
- inline void set_bytecode_array(BytecodeArray* bytecode);
- inline Code* InterpreterTrampoline() const;
+ inline BytecodeArray GetBytecodeArray() const;
+ inline void set_bytecode_array(BytecodeArray bytecode);
+ inline Code InterpreterTrampoline() const;
inline bool HasInterpreterData() const;
- inline InterpreterData* interpreter_data() const;
- inline void set_interpreter_data(InterpreterData* interpreter_data);
- inline BytecodeArray* GetDebugBytecodeArray() const;
- inline void SetDebugBytecodeArray(BytecodeArray* bytecode);
+ inline InterpreterData interpreter_data() const;
+ inline void set_interpreter_data(InterpreterData interpreter_data);
+ inline BytecodeArray GetDebugBytecodeArray() const;
+ inline void SetDebugBytecodeArray(BytecodeArray bytecode);
inline bool HasAsmWasmData() const;
- inline FixedArray* asm_wasm_data() const;
- inline void set_asm_wasm_data(FixedArray* data);
+ inline AsmWasmData asm_wasm_data() const;
+ inline void set_asm_wasm_data(AsmWasmData data);
// A brief note to clear up possible confusion:
// builtin_id corresponds to the auto-generated
@@ -314,21 +364,20 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
inline int builtin_id() const;
inline void set_builtin_id(int builtin_id);
inline bool HasUncompiledData() const;
- inline UncompiledData* uncompiled_data() const;
- inline void set_uncompiled_data(UncompiledData* data);
- inline bool HasUncompiledDataWithPreParsedScope() const;
- inline UncompiledDataWithPreParsedScope*
- uncompiled_data_with_pre_parsed_scope() const;
- inline void set_uncompiled_data_with_pre_parsed_scope(
- UncompiledDataWithPreParsedScope* data);
- inline bool HasUncompiledDataWithoutPreParsedScope() const;
+ inline UncompiledData uncompiled_data() const;
+ inline void set_uncompiled_data(UncompiledData data);
+ inline bool HasUncompiledDataWithPreparseData() const;
+ inline UncompiledDataWithPreparseData uncompiled_data_with_preparse_data()
+ const;
+ inline void set_uncompiled_data_with_preparse_data(
+ UncompiledDataWithPreparseData data);
+ inline bool HasUncompiledDataWithoutPreparseData() const;
inline bool HasWasmExportedFunctionData() const;
- WasmExportedFunctionData* wasm_exported_function_data() const;
- inline void set_wasm_exported_function_data(WasmExportedFunctionData* data);
+ WasmExportedFunctionData wasm_exported_function_data() const;
- // Clear out pre-parsed scope data from UncompiledDataWithPreParsedScope,
- // turning it into UncompiledDataWithoutPreParsedScope.
- inline void ClearPreParsedScopeData();
+ // Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
+ // turning it into UncompiledDataWithoutPreparseData.
+ inline void ClearPreparseData();
// [raw_builtin_function_id]: The id of the built-in function this function
// represents, used during optimization to improve code generation.
@@ -347,7 +396,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// code written in OO style, where almost all functions are anonymous but are
// assigned to object properties.
inline bool HasInferredName();
- inline String* inferred_name();
+ inline String inferred_name();
// Get the function literal id associated with this function, for parsing.
V8_EXPORT_PRIVATE int FunctionLiteralId(Isolate* isolate) const;
@@ -360,10 +409,10 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// Coverage infos are contained in DebugInfo, this is a convenience method
// to simplify access.
bool HasCoverageInfo() const;
- CoverageInfo* GetCoverageInfo() const;
+ CoverageInfo GetCoverageInfo() const;
// The function's name if it is non-empty, otherwise the inferred name.
- String* DebugName();
+ String DebugName();
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
@@ -373,13 +422,13 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// - a DebugInfo which holds the actual script [HasDebugInfo()].
DECL_ACCESSORS(script_or_debug_info, Object)
- inline Object* script() const;
- inline void set_script(Object* script);
+ inline Object script() const;
+ inline void set_script(Object script);
// The function is subject to debugging if a debug info is attached.
inline bool HasDebugInfo() const;
- inline DebugInfo* GetDebugInfo() const;
- inline void SetDebugInfo(DebugInfo* debug_info);
+ inline DebugInfo GetDebugInfo() const;
+ inline void SetDebugInfo(DebugInfo debug_info);
// The offset of the 'function' token in the script source relative to the
// start position. Can return kFunctionTokenOutOfRange if offset doesn't
@@ -395,7 +444,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
inline bool HasSharedName() const;
// [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
+ DECL_INT32_ACCESSORS(flags)
// Is this function a named function expression in the source code.
DECL_BOOLEAN_ACCESSORS(is_named_expression)
@@ -439,8 +488,10 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// which does not change this flag).
DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
- // Indicates that the the shared function info is deserialized from cache.
- DECL_BOOLEAN_ACCESSORS(deserialized)
+ // Indicates that the function represented by the shared function info was
+ // classed as an immediately invoked function execution (IIFE) function and
+ // is only executed once.
+ DECL_BOOLEAN_ACCESSORS(is_oneshot_iife)
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
@@ -474,7 +525,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// initializer. This flag is set when creating the
// SharedFunctionInfo as a reminder to emit the initializer call
// when generating code later.
- DECL_BOOLEAN_ACCESSORS(requires_instance_fields_initializer)
+ DECL_BOOLEAN_ACCESSORS(requires_instance_members_initializer)
// [source code]: Source code for the function.
bool HasSourceCode() const;
@@ -496,9 +547,20 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
inline bool CanDiscardCompiled() const;
// Flush compiled data from this function, setting it back to CompileLazy and
- // clearing any feedback metadata.
- static inline void DiscardCompiled(Isolate* isolate,
- Handle<SharedFunctionInfo> shared_info);
+ // clearing any compiled metadata.
+ static void DiscardCompiled(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info);
+
+ // Discard the compiled metadata. If called during GC then
+ // |gc_notify_updated_slot| should be used to record any slot updates.
+ void DiscardCompiledMetadata(
+ Isolate* isolate,
+ std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
+ gc_notify_updated_slot =
+ [](HeapObject object, ObjectSlot slot, HeapObject target) {});
+
+ // Returns true if the function has old bytecode that could be flushed.
+ inline bool ShouldFlushBytecode();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -542,14 +604,14 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// Iterate over all shared function infos in a given script.
class ScriptIterator {
public:
- ScriptIterator(Isolate* isolate, Script* script);
+ ScriptIterator(Isolate* isolate, Script script);
ScriptIterator(Isolate* isolate,
Handle<WeakFixedArray> shared_function_infos);
- SharedFunctionInfo* Next();
+ SharedFunctionInfo Next();
int CurrentIndex() const { return index_ - 1; }
// Reset the iterator to run on |script|.
- void Reset(Script* script);
+ void Reset(Script script);
private:
Isolate* isolate_;
@@ -562,13 +624,13 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
class GlobalIterator {
public:
explicit GlobalIterator(Isolate* isolate);
- SharedFunctionInfo* Next();
+ SharedFunctionInfo Next();
private:
Script::Iterator script_iterator_;
WeakArrayList::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
- DisallowHeapAllocation no_gc_;
+ DISALLOW_HEAP_ALLOCATION(no_gc_);
DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
};
@@ -589,23 +651,24 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
#endif
// Layout description.
-#define SHARED_FUNCTION_INFO_FIELDS(V) \
- /* Pointer fields. */ \
- V(kStartOfPointerFieldsOffset, 0) \
- V(kFunctionDataOffset, kPointerSize) \
- V(kNameOrScopeInfoOffset, kPointerSize) \
- V(kOuterScopeInfoOrFeedbackMetadataOffset, kPointerSize) \
- V(kScriptOrDebugInfoOffset, kPointerSize) \
- V(kEndOfPointerFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kUniqueIdOffset, kUniqueIdFieldSize) \
- V(kLengthOffset, kUInt16Size) \
- V(kFormalParameterCountOffset, kUInt16Size) \
- V(kExpectedNofPropertiesOffset, kUInt8Size) \
- V(kBuiltinFunctionId, kUInt8Size) \
- V(kFunctionTokenOffsetOffset, kUInt16Size) \
- V(kFlagsOffset, kInt32Size) \
- /* Total size. */ \
+#define SHARED_FUNCTION_INFO_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kFunctionDataOffset, kTaggedSize) \
+ V(kStartOfAlwaysStrongPointerFieldsOffset, 0) \
+ V(kNameOrScopeInfoOffset, kTaggedSize) \
+ V(kOuterScopeInfoOrFeedbackMetadataOffset, kTaggedSize) \
+ V(kScriptOrDebugInfoOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kUniqueIdOffset, kUniqueIdFieldSize) \
+ V(kLengthOffset, kUInt16Size) \
+ V(kFormalParameterCountOffset, kUInt16Size) \
+ V(kExpectedNofPropertiesOffset, kUInt8Size) \
+ V(kBuiltinFunctionId, kUInt8Size) \
+ V(kFunctionTokenOffsetOffset, kUInt16Size) \
+ V(kFlagsOffset, kInt32Size) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -614,9 +677,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
- typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfPointerFieldsOffset, kAlignedSize>
- BodyDescriptor;
+ class BodyDescriptor;
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
@@ -633,14 +694,14 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 5, _) \
V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
- V(RequiresInstanceFieldsInitializer, bool, 1, _) \
+ V(RequiresInstanceMembersInitializer, bool, 1, _) \
V(ConstructAsBuiltinBit, bool, 1, _) \
V(IsAnonymousExpressionBit, bool, 1, _) \
V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
- V(IsDeserializedBit, bool, 1, _) \
V(HasReportedBinaryCoverageBit, bool, 1, _) \
V(IsNamedExpressionBit, bool, 1, _) \
- V(IsTopLevelBit, bool, 1, _)
+ V(IsTopLevelBit, bool, 1, _) \
+ V(IsOneshotIIFEBit, bool, 1, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
@@ -678,17 +739,32 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// FunctionLiteralId.
int FindIndexInScript(Isolate* isolate) const;
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
+ OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
// Printing support.
struct SourceCodeOf {
- explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1)
+ explicit SourceCodeOf(SharedFunctionInfo v, int max = -1)
: value(v), max_length(max) {}
- const SharedFunctionInfo* value;
+ const SharedFunctionInfo value;
int max_length;
};
+// IsCompiledScope enables a caller to check if a function is compiled, and
+// ensure it remains compiled (i.e., doesn't have it's bytecode flushed) while
+// the scope is retained.
+class IsCompiledScope {
+ public:
+ inline IsCompiledScope(const SharedFunctionInfo shared, Isolate* isolate);
+ inline IsCompiledScope() : retain_bytecode_(), is_compiled_(false) {}
+
+ inline bool is_compiled() const { return is_compiled_; }
+
+ private:
+ MaybeHandle<BytecodeArray> retain_bytecode_;
+ bool is_compiled_;
+};
+
std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v);
} // namespace internal
diff --git a/deps/v8/src/objects/slots-atomic-inl.h b/deps/v8/src/objects/slots-atomic-inl.h
new file mode 100644
index 0000000000..e0f4f9dff2
--- /dev/null
+++ b/deps/v8/src/objects/slots-atomic-inl.h
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SLOTS_ATOMIC_INL_H_
+#define V8_OBJECTS_SLOTS_ATOMIC_INL_H_
+
+#include "src/base/atomic-utils.h"
+#include "src/objects/slots.h"
+
+namespace v8 {
+namespace internal {
+
+// This class is intended to be used as a wrapper for elements of an array
+// that is passed in to STL functions such as std::sort. It ensures that
+// elements accesses are atomic.
+// Usage example:
+// FixedArray array;
+// AtomicSlot start(array->GetFirstElementAddress());
+// std::sort(start, start + given_length,
+// [](Tagged_t a, Tagged_t b) {
+// // Decompress a and b if necessary.
+// return my_comparison(a, b);
+// });
+// Note how the comparator operates on Address values, representing the raw
+// data found at the given heap location, so you probably want to construct
+// an Object from it.
+class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
+ public:
+ // This class is a stand-in for "Address&" that uses custom atomic
+ // read/write operations for the actual memory accesses.
+ class Reference {
+ public:
+ explicit Reference(Tagged_t* address) : address_(address) {}
+ Reference(const Reference&) V8_NOEXCEPT = default;
+
+ Reference& operator=(const Reference& other) V8_NOEXCEPT {
+ AsAtomicTagged::Relaxed_Store(
+ address_, AsAtomicTagged::Relaxed_Load(other.address_));
+ return *this;
+ }
+ Reference& operator=(Tagged_t value) {
+ AsAtomicTagged::Relaxed_Store(address_, value);
+ return *this;
+ }
+
+ // Values of type AtomicSlot::reference must be implicitly convertible
+ // to AtomicSlot::value_type.
+ operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); }
+
+ void swap(Reference& other) {
+ Address tmp = value();
+ AsAtomicTagged::Relaxed_Store(address_, other.value());
+ AsAtomicTagged::Relaxed_Store(other.address_, tmp);
+ }
+
+ bool operator<(const Reference& other) const {
+ return value() < other.value();
+ }
+
+ bool operator==(const Reference& other) const {
+ return value() == other.value();
+ }
+
+ private:
+ Address value() const { return AsAtomicTagged::Relaxed_Load(address_); }
+
+ Tagged_t* address_;
+ };
+
+ // The rest of this class follows C++'s "RandomAccessIterator" requirements.
+ // Most of the heavy lifting is inherited from SlotBase.
+ typedef int difference_type;
+ typedef Tagged_t value_type;
+ typedef Reference reference;
+ typedef void* pointer; // Must be present, but should not be used.
+ typedef std::random_access_iterator_tag iterator_category;
+
+ AtomicSlot() : SlotBase(kNullAddress) {}
+ explicit AtomicSlot(Address address) : SlotBase(address) {}
+ explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
+
+ Reference operator*() const {
+ return Reference(reinterpret_cast<Tagged_t*>(address()));
+ }
+ Reference operator[](difference_type i) const {
+ return Reference(reinterpret_cast<Tagged_t*>(address() + i * kTaggedSize));
+ }
+
+ friend void swap(Reference lhs, Reference rhs) { lhs.swap(rhs); }
+
+ friend difference_type operator-(AtomicSlot a, AtomicSlot b) {
+ return static_cast<int>(a.address() - b.address()) / kTaggedSize;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SLOTS_ATOMIC_INL_H_
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
new file mode 100644
index 0000000000..7072c43b44
--- /dev/null
+++ b/deps/v8/src/objects/slots-inl.h
@@ -0,0 +1,126 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SLOTS_INL_H_
+#define V8_OBJECTS_SLOTS_INL_H_
+
+#include "src/objects/slots.h"
+
+#include "src/base/atomic-utils.h"
+#include "src/memcopy.h"
+#include "src/objects.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/maybe-object.h"
+
+#ifdef V8_COMPRESS_POINTERS
+#include "src/ptr-compr-inl.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+//
+// FullObjectSlot implementation.
+//
+
+FullObjectSlot::FullObjectSlot(Object* object)
+ : SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
+
+bool FullObjectSlot::contains_value(Address raw_value) const {
+ return base::AsAtomicPointer::Relaxed_Load(location()) == raw_value;
+}
+
+Object FullObjectSlot::operator*() const { return Object(*location()); }
+
+void FullObjectSlot::store(Object value) const { *location() = value->ptr(); }
+
+Object FullObjectSlot::Acquire_Load() const {
+ return Object(base::AsAtomicPointer::Acquire_Load(location()));
+}
+
+Object FullObjectSlot::Relaxed_Load() const {
+ return Object(base::AsAtomicPointer::Relaxed_Load(location()));
+}
+
+void FullObjectSlot::Relaxed_Store(Object value) const {
+ base::AsAtomicPointer::Relaxed_Store(location(), value->ptr());
+}
+
+void FullObjectSlot::Release_Store(Object value) const {
+ base::AsAtomicPointer::Release_Store(location(), value->ptr());
+}
+
+Object FullObjectSlot::Release_CompareAndSwap(Object old, Object target) const {
+ Address result = base::AsAtomicPointer::Release_CompareAndSwap(
+ location(), old->ptr(), target->ptr());
+ return Object(result);
+}
+
+//
+// FullMaybeObjectSlot implementation.
+//
+
+MaybeObject FullMaybeObjectSlot::operator*() const {
+ return MaybeObject(*location());
+}
+
+void FullMaybeObjectSlot::store(MaybeObject value) const {
+ *location() = value.ptr();
+}
+
+MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
+ return MaybeObject(AsAtomicTagged::Relaxed_Load(location()));
+}
+
+void FullMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
+ AsAtomicTagged::Relaxed_Store(location(), value->ptr());
+}
+
+void FullMaybeObjectSlot::Release_CompareAndSwap(MaybeObject old,
+ MaybeObject target) const {
+ AsAtomicTagged::Release_CompareAndSwap(location(), old.ptr(), target.ptr());
+}
+
+//
+// FullHeapObjectSlot implementation.
+//
+
+HeapObjectReference FullHeapObjectSlot::operator*() const {
+ return HeapObjectReference(*location());
+}
+
+void FullHeapObjectSlot::store(HeapObjectReference value) const {
+ *location() = value.ptr();
+}
+
+HeapObject FullHeapObjectSlot::ToHeapObject() const {
+ DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
+ return HeapObject::cast(Object(*location()));
+}
+
+void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
+ *location() = value->ptr();
+}
+
+//
+// Utils.
+//
+
+// Sets |counter| number of kTaggedSize-sized values starting at |start| slot.
+inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
+ // TODO(ishell): revisit this implementation, maybe use "rep stosl"
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ MemsetPointer(start.location(), value.ptr(), counter);
+}
+
+// Sets |counter| number of kSystemPointerSize-sized values starting at |start|
+// slot.
+inline void MemsetPointer(FullObjectSlot start, Object value, size_t counter) {
+ MemsetPointer(start.location(), value.ptr(), counter);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SLOTS_INL_H_
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
new file mode 100644
index 0000000000..12dc168e8e
--- /dev/null
+++ b/deps/v8/src/objects/slots.h
@@ -0,0 +1,181 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SLOTS_H_
+#define V8_OBJECTS_SLOTS_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Object;
+
+template <typename Subclass, typename Data, size_t SlotDataSize>
+class SlotBase {
+ public:
+ using TData = Data;
+
+ // TODO(ishell): This should eventually become just sizeof(TData) once
+ // pointer compression is implemented.
+ static constexpr size_t kSlotDataSize = SlotDataSize;
+
+ Subclass& operator++() { // Prefix increment.
+ ptr_ += kSlotDataSize;
+ return *static_cast<Subclass*>(this);
+ }
+ Subclass operator++(int) { // Postfix increment.
+ Subclass result = *static_cast<Subclass*>(this);
+ ptr_ += kSlotDataSize;
+ return result;
+ }
+ Subclass& operator--() { // Prefix decrement.
+ ptr_ -= kSlotDataSize;
+ return *static_cast<Subclass*>(this);
+ }
+ Subclass operator--(int) { // Postfix decrement.
+ Subclass result = *static_cast<Subclass*>(this);
+ ptr_ -= kSlotDataSize;
+ return result;
+ }
+
+ bool operator<(const SlotBase& other) const { return ptr_ < other.ptr_; }
+ bool operator<=(const SlotBase& other) const { return ptr_ <= other.ptr_; }
+ bool operator>(const SlotBase& other) const { return ptr_ > other.ptr_; }
+ bool operator>=(const SlotBase& other) const { return ptr_ >= other.ptr_; }
+ bool operator==(const SlotBase& other) const { return ptr_ == other.ptr_; }
+ bool operator!=(const SlotBase& other) const { return ptr_ != other.ptr_; }
+ size_t operator-(const SlotBase& other) const {
+ DCHECK_GE(ptr_, other.ptr_);
+ return static_cast<size_t>((ptr_ - other.ptr_) / kSlotDataSize);
+ }
+ Subclass operator-(int i) const { return Subclass(ptr_ - i * kSlotDataSize); }
+ Subclass operator+(int i) const { return Subclass(ptr_ + i * kSlotDataSize); }
+ friend Subclass operator+(int i, const Subclass& slot) {
+ return Subclass(slot.ptr_ + i * kSlotDataSize);
+ }
+ Subclass& operator+=(int i) {
+ ptr_ += i * kSlotDataSize;
+ return *static_cast<Subclass*>(this);
+ }
+ Subclass operator-(int i) { return Subclass(ptr_ - i * kSlotDataSize); }
+ Subclass& operator-=(int i) {
+ ptr_ -= i * kSlotDataSize;
+ return *static_cast<Subclass*>(this);
+ }
+
+ void* ToVoidPtr() const { return reinterpret_cast<void*>(address()); }
+
+ Address address() const { return ptr_; }
+ // For symmetry with Handle.
+ TData* location() const { return reinterpret_cast<TData*>(ptr_); }
+
+ protected:
+ STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize));
+ explicit SlotBase(Address ptr) : ptr_(ptr) {
+ DCHECK(IsAligned(ptr, kTaggedSize));
+ }
+
+ private:
+ // This field usually describes an on-heap address (a slot within an object),
+ // so its type should not be a pointer to another C++ wrapper class.
+ // Type safety is provided by well-defined conversion operations.
+ Address ptr_;
+};
+
+// An FullObjectSlot instance describes a kSystemPointerSize-sized field
+// ("slot") holding a tagged pointer (smi or strong heap object).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+class FullObjectSlot
+ : public SlotBase<FullObjectSlot, Address, kSystemPointerSize> {
+ public:
+ using TObject = Object;
+ using THeapObjectSlot = FullHeapObjectSlot;
+
+ // Tagged value stored in this slot is guaranteed to never be a weak pointer.
+ static constexpr bool kCanBeWeak = false;
+
+ FullObjectSlot() : SlotBase(kNullAddress) {}
+ explicit FullObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit FullObjectSlot(const Address* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ inline explicit FullObjectSlot(Object* object);
+ template <typename T>
+ explicit FullObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ // Compares memory representation of a value stored in the slot with given
+ // raw value.
+ inline bool contains_value(Address raw_value) const;
+
+ inline Object operator*() const;
+ inline void store(Object value) const;
+
+ inline Object Acquire_Load() const;
+ inline Object Relaxed_Load() const;
+ inline void Relaxed_Store(Object value) const;
+ inline void Release_Store(Object value) const;
+ inline Object Release_CompareAndSwap(Object old, Object target) const;
+};
+
+// A FullMaybeObjectSlot instance describes a kSystemPointerSize-sized field
+// ("slot") holding a possibly-weak tagged pointer (think: MaybeObject).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+class FullMaybeObjectSlot
+ : public SlotBase<FullMaybeObjectSlot, Address, kSystemPointerSize> {
+ public:
+ using TObject = MaybeObject;
+ using THeapObjectSlot = FullHeapObjectSlot;
+
+ // Tagged value stored in this slot can be a weak pointer.
+ static constexpr bool kCanBeWeak = true;
+
+ FullMaybeObjectSlot() : SlotBase(kNullAddress) {}
+ explicit FullMaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit FullMaybeObjectSlot(Object* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ template <typename T>
+ explicit FullMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ inline MaybeObject operator*() const;
+ inline void store(MaybeObject value) const;
+
+ inline MaybeObject Relaxed_Load() const;
+ inline void Relaxed_Store(MaybeObject value) const;
+ inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
+};
+
+// A FullHeapObjectSlot instance describes a kSystemPointerSize-sized field
+// ("slot") holding a weak or strong pointer to a heap object (think:
+// HeapObjectReference).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+// In case it is known that that slot contains a strong heap object pointer,
+// ToHeapObject() can be used to retrieve that heap object.
+class FullHeapObjectSlot
+ : public SlotBase<FullHeapObjectSlot, Address, kSystemPointerSize> {
+ public:
+ FullHeapObjectSlot() : SlotBase(kNullAddress) {}
+ explicit FullHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit FullHeapObjectSlot(Object* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ template <typename T>
+ explicit FullHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ inline HeapObjectReference operator*() const;
+ inline void store(HeapObjectReference value) const;
+
+ inline HeapObject ToHeapObject() const;
+
+ inline void StoreHeapObject(HeapObject value) const;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SLOTS_H_
diff --git a/deps/v8/src/objects/smi-inl.h b/deps/v8/src/objects/smi-inl.h
new file mode 100644
index 0000000000..38d644fbea
--- /dev/null
+++ b/deps/v8/src/objects/smi-inl.h
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SMI_INL_H_
+#define V8_OBJECTS_SMI_INL_H_
+
+#include "src/objects/smi.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Smi)
+
+int Smi::ToInt(const Object object) { return Smi::cast(object)->value(); }
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SMI_INL_H_
diff --git a/deps/v8/src/objects/smi.h b/deps/v8/src/objects/smi.h
new file mode 100644
index 0000000000..0361ef0a7a
--- /dev/null
+++ b/deps/v8/src/objects/smi.h
@@ -0,0 +1,107 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SMI_H_
+#define V8_OBJECTS_SMI_H_
+
+#include "src/globals.h"
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Smi represents integer Numbers that can be stored in 31 bits.
+// Smis are immediate which means they are NOT allocated in the heap.
+// The ptr_ value has the following format: [31 bit signed int] 0
+// For long smis it has the following format:
+// [32 bit signed int] [31 bits zero padding] 0
+// Smi stands for small integer.
+class Smi : public Object {
+ public:
+ // This replaces the OBJECT_CONSTRUCTORS macro, because Smis are special
+ // in that we want them to be constexprs.
+ constexpr Smi() : Object() {}
+ explicit constexpr Smi(Address ptr) : Object(ptr) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK(HAS_SMI_TAG(ptr));
+#endif
+ }
+ Smi* operator->() { return this; }
+ const Smi* operator->() const { return this; }
+
+ // Returns the integer value.
+ inline int value() const { return Internals::SmiValue(ptr()); }
+ inline Smi ToUint32Smi() {
+ if (value() <= 0) return Smi::FromInt(0);
+ return Smi::FromInt(static_cast<uint32_t>(value()));
+ }
+
+ // Convert a Smi object to an int.
+ static inline int ToInt(const Object object);
+
+ // Convert a value to a Smi object.
+ static inline constexpr Smi FromInt(int value) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK(Smi::IsValid(value));
+#endif
+ return Smi(Internals::IntToSmi(value));
+ }
+
+ static inline Smi FromIntptr(intptr_t value) {
+ DCHECK(Smi::IsValid(value));
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+ return Smi((value << smi_shift_bits) | kSmiTag);
+ }
+
+ template <typename E,
+ typename = typename std::enable_if<std::is_enum<E>::value>::type>
+ static inline Smi FromEnum(E value) {
+ STATIC_ASSERT(sizeof(E) <= sizeof(int));
+ return FromInt(static_cast<int>(value));
+ }
+
+ // Returns whether value can be represented in a Smi.
+ static inline bool constexpr IsValid(intptr_t value) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK(Internals::IsValidSmi(value) ==
+ (value >= kMinValue && value <= kMaxValue));
+#endif
+ return Internals::IsValidSmi(value);
+ }
+
+ // Compare two Smis x, y as if they were converted to strings and then
+ // compared lexicographically. Returns:
+ // -1 if x < y.
+ // 0 if x == y.
+ // 1 if x > y.
+ // Returns the result (a tagged Smi) as a raw Address for ExternalReference
+ // usage.
+ static Address LexicographicCompare(Isolate* isolate, Smi x, Smi y);
+
+ DECL_CAST(Smi)
+
+ // Dispatched behavior.
+ V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
+ DECL_VERIFIER(Smi)
+
+ // C++ does not allow us to have an object of type Smi within class Smi,
+ // so the kZero value has type Object. Consider it deprecated; new code
+ // should use zero() instead.
+ V8_EXPORT_PRIVATE static constexpr Object kZero = Object(0);
+ // If you need something with type Smi, call zero() instead. Since it is
+ // a constexpr, "calling" it is just as efficient as reading kZero.
+ static inline constexpr Smi zero() { return Smi::FromInt(0); }
+ static constexpr int kMinValue = kSmiMinValue;
+ static constexpr int kMaxValue = kSmiMaxValue;
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SMI_H_
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 8398c7cb5b..a9c4661726 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
+
CAST_ACCESSOR(StackFrameInfo)
SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 4adc37109e..8764547ecc 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_STACK_FRAME_INFO_H_
#define V8_OBJECTS_STACK_FRAME_INFO_H_
-#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
+class StackFrameInfo : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
DECL_INT_ACCESSORS(line_number)
DECL_INT_ACCESSORS(column_number)
DECL_INT_ACCESSORS(script_id)
@@ -33,17 +34,21 @@ class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
DECL_PRINTER(StackFrameInfo)
DECL_VERIFIER(StackFrameInfo)
- static const int kLineNumberIndex = Struct::kHeaderSize;
- static const int kColumnNumberIndex = kLineNumberIndex + kPointerSize;
- static const int kScriptIdIndex = kColumnNumberIndex + kPointerSize;
- static const int kScriptNameIndex = kScriptIdIndex + kPointerSize;
- static const int kScriptNameOrSourceUrlIndex =
- kScriptNameIndex + kPointerSize;
- static const int kFunctionNameIndex =
- kScriptNameOrSourceUrlIndex + kPointerSize;
- static const int kFlagIndex = kFunctionNameIndex + kPointerSize;
- static const int kIdIndex = kFlagIndex + kPointerSize;
- static const int kSize = kIdIndex + kPointerSize;
+ // Layout description.
+#define STACK_FRAME_INFO_FIELDS(V) \
+ V(kLineNumberIndex, kTaggedSize) \
+ V(kColumnNumberIndex, kTaggedSize) \
+ V(kScriptIdIndex, kTaggedSize) \
+ V(kScriptNameIndex, kTaggedSize) \
+ V(kScriptNameOrSourceUrlIndex, kTaggedSize) \
+ V(kFunctionNameIndex, kTaggedSize) \
+ V(kFlagIndex, kTaggedSize) \
+ V(kIdIndex, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, STACK_FRAME_INFO_FIELDS)
+#undef STACK_FRAME_INFO_FIELDS
private:
// Bit position in the flag, from least significant bit position.
@@ -51,7 +56,7 @@ class StackFrameInfo : public Struct, public NeverReadOnlySpaceObject {
static const int kIsConstructorBit = 1;
static const int kIsWasmBit = 2;
- DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrameInfo);
+ OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 349fa31f9d..f9efd53418 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -11,6 +11,8 @@
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/objects/name-inl.h"
+#include "src/objects/smi-inl.h"
+#include "src/objects/string-table-inl.h"
#include "src/string-hasher-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -31,6 +33,18 @@ void String::synchronized_set_length(int value) {
reinterpret_cast<int32_t*>(FIELD_ADDR(this, kLengthOffset)), value);
}
+OBJECT_CONSTRUCTORS_IMPL(String, Name)
+OBJECT_CONSTRUCTORS_IMPL(SeqString, String)
+OBJECT_CONSTRUCTORS_IMPL(SeqOneByteString, SeqString)
+OBJECT_CONSTRUCTORS_IMPL(SeqTwoByteString, SeqString)
+OBJECT_CONSTRUCTORS_IMPL(InternalizedString, String)
+OBJECT_CONSTRUCTORS_IMPL(ConsString, String)
+OBJECT_CONSTRUCTORS_IMPL(ThinString, String)
+OBJECT_CONSTRUCTORS_IMPL(SlicedString, String)
+OBJECT_CONSTRUCTORS_IMPL(ExternalString, String)
+OBJECT_CONSTRUCTORS_IMPL(ExternalOneByteString, ExternalString)
+OBJECT_CONSTRUCTORS_IMPL(ExternalTwoByteString, ExternalString)
+
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
@@ -43,13 +57,13 @@ CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(String)
CAST_ACCESSOR(ThinString)
-StringShape::StringShape(const String* str)
+StringShape::StringShape(const String str)
: type_(str->map()->instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
-StringShape::StringShape(Map* map) : type_(map->instance_type()) {
+StringShape::StringShape(Map map) : type_(map->instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -148,33 +162,20 @@ bool String::IsTwoByteRepresentation() const {
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
-bool String::IsOneByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- DCHECK(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return true;
- case kTwoByteStringTag:
- return false;
- default: // Cons, sliced, thin, strings need to go deeper.
- return GetUnderlying()->IsOneByteRepresentationUnderneath();
- }
-}
-
-bool String::IsTwoByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- DCHECK(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return false;
- case kTwoByteStringTag:
- return true;
- default: // Cons, sliced, thin, strings need to go deeper.
- return GetUnderlying()->IsTwoByteRepresentationUnderneath();
+bool String::IsOneByteRepresentationUnderneath(String string) {
+ while (true) {
+ uint32_t type = string.map()->instance_type();
+ STATIC_ASSERT(kIsIndirectStringTag != 0);
+ STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+ DCHECK(string.IsFlat());
+ switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+ case kOneByteStringTag:
+ return true;
+ case kTwoByteStringTag:
+ return false;
+ default: // Cons, sliced, thin, strings need to go deeper.
+ string = string.GetUnderlying();
+ }
}
}
@@ -219,7 +220,7 @@ class OneByteStringKey : public SequentialStringKey<uint8_t> {
OneByteStringKey(Vector<const uint8_t> str, uint64_t seed)
: SequentialStringKey<uint8_t>(str, seed) {}
- bool IsMatch(Object* string) override {
+ bool IsMatch(Object string) override {
return String::cast(string)->IsOneByteEqualTo(string_);
}
@@ -238,11 +239,13 @@ class SeqOneByteSubStringKey : public StringTableKey {
#endif
SeqOneByteSubStringKey(Isolate* isolate, Handle<SeqOneByteString> string,
int from, int length)
- : StringTableKey(StringHasher::HashSequentialString(
- string->GetChars() + from, length, isolate->heap()->HashSeed())),
- string_(string),
- from_(from),
- length_(length) {
+ : StringTableKey(0), string_(string), from_(from), length_(length) {
+ // We have to set the hash later.
+ DisallowHeapAllocation no_gc;
+ uint32_t hash = StringHasher::HashSequentialString(
+ string->GetChars(no_gc) + from, length, isolate->heap()->HashSeed());
+ set_hash_field(hash);
+
DCHECK_LE(0, length_);
DCHECK_LE(from_ + length_, string_->length());
DCHECK(string_->IsSeqOneByteString());
@@ -251,7 +254,7 @@ class SeqOneByteSubStringKey : public StringTableKey {
#pragma warning(pop)
#endif
- bool IsMatch(Object* string) override;
+ bool IsMatch(Object string) override;
Handle<String> AsHandle(Isolate* isolate) override;
private:
@@ -265,7 +268,7 @@ class TwoByteStringKey : public SequentialStringKey<uc16> {
explicit TwoByteStringKey(Vector<const uc16> str, uint64_t seed)
: SequentialStringKey<uc16>(str, seed) {}
- bool IsMatch(Object* string) override {
+ bool IsMatch(Object string) override {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
@@ -279,7 +282,7 @@ class Utf8StringKey : public StringTableKey {
: StringTableKey(StringHasher::ComputeUtf8Hash(string, seed, &chars_)),
string_(string) {}
- bool IsMatch(Object* string) override {
+ bool IsMatch(Object string) override {
return String::cast(string)->IsUtf8EqualTo(string_);
}
@@ -293,8 +296,8 @@ class Utf8StringKey : public StringTableKey {
int chars_; // Caches the number of characters when computing the hash code.
};
-bool String::Equals(String* other) {
- if (other == this) return true;
+bool String::Equals(String other) {
+ if (other == *this) return true;
if (this->IsInternalizedString() && other->IsInternalizedString()) {
return false;
}
@@ -328,24 +331,26 @@ Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
- switch (StringShape(this).full_representation_tag()) {
+ switch (StringShape(*this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
+ return SeqOneByteString::cast(*this)->SeqOneByteStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
+ return SeqTwoByteString::cast(*this)->SeqTwoByteStringGet(index);
case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(this)->ConsStringGet(index);
+ return ConsString::cast(*this)->ConsStringGet(index);
case kExternalStringTag | kOneByteStringTag:
- return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
+ return ExternalOneByteString::cast(*this)->ExternalOneByteStringGet(
+ index);
case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+ return ExternalTwoByteString::cast(*this)->ExternalTwoByteStringGet(
+ index);
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(this)->SlicedStringGet(index);
+ return SlicedString::cast(*this)->SlicedStringGet(index);
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- return ThinString::cast(this)->ThinStringGet(index);
+ return ThinString::cast(*this)->ThinStringGet(index);
default:
break;
}
@@ -355,32 +360,35 @@ uint16_t String::Get(int index) {
void String::Set(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
- DCHECK(StringShape(this).IsSequential());
+ DCHECK(StringShape(*this).IsSequential());
return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
- : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
+ ? SeqOneByteString::cast(*this)->SeqOneByteStringSet(index, value)
+ : SeqTwoByteString::cast(*this)->SeqTwoByteStringSet(index, value);
}
bool String::IsFlat() {
- if (!StringShape(this).IsCons()) return true;
- return ConsString::cast(this)->second()->length() == 0;
+ if (!StringShape(*this).IsCons()) return true;
+ return ConsString::cast(*this)->second()->length() == 0;
}
-String* String::GetUnderlying() {
+String String::GetUnderlying() {
// Giving direct access to underlying string only makes sense if the
// wrapping string is already flattened.
DCHECK(this->IsFlat());
- DCHECK(StringShape(this).IsIndirect());
- STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
- STATIC_ASSERT(ConsString::kFirstOffset == ThinString::kActualOffset);
+ DCHECK(StringShape(*this).IsIndirect());
+ STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
+ static_cast<int>(SlicedString::kParentOffset));
+ STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
+ static_cast<int>(ThinString::kActualOffset));
const int kUnderlyingOffset = SlicedString::kParentOffset;
return String::cast(READ_FIELD(this, kUnderlyingOffset));
}
template <class Visitor>
-ConsString* String::VisitFlat(Visitor* visitor, String* string,
- const int offset) {
+ConsString String::VisitFlat(Visitor* visitor, String string,
+ const int offset) {
+ DisallowHeapAllocation no_gc;
int slice_offset = offset;
const int length = string->length();
DCHECK(offset <= length);
@@ -389,31 +397,31 @@ ConsString* String::VisitFlat(Visitor* visitor, String* string,
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- SeqOneByteString::cast(string)->GetChars() + slice_offset,
+ SeqOneByteString::cast(string)->GetChars(no_gc) + slice_offset,
length - offset);
- return nullptr;
+ return ConsString();
case kSeqStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- SeqTwoByteString::cast(string)->GetChars() + slice_offset,
+ SeqTwoByteString::cast(string)->GetChars(no_gc) + slice_offset,
length - offset);
- return nullptr;
+ return ConsString();
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
ExternalOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return nullptr;
+ return ConsString();
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
length - offset);
- return nullptr;
+ return ConsString();
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
- SlicedString* slicedString = SlicedString::cast(string);
+ SlicedString slicedString = SlicedString::cast(string);
slice_offset += slicedString->offset();
string = slicedString->parent();
continue;
@@ -435,20 +443,22 @@ ConsString* String::VisitFlat(Visitor* visitor, String* string,
}
template <>
-inline Vector<const uint8_t> String::GetCharVector() {
- String::FlatContent flat = GetFlatContent();
+inline Vector<const uint8_t> String::GetCharVector(
+ const DisallowHeapAllocation& no_gc) {
+ String::FlatContent flat = GetFlatContent(no_gc);
DCHECK(flat.IsOneByte());
return flat.ToOneByteVector();
}
template <>
-inline Vector<const uc16> String::GetCharVector() {
- String::FlatContent flat = GetFlatContent();
+inline Vector<const uc16> String::GetCharVector(
+ const DisallowHeapAllocation& no_gc) {
+ String::FlatContent flat = GetFlatContent(no_gc);
DCHECK(flat.IsTwoByte());
return flat.ToUC16Vector();
}
-uint32_t String::ToValidIndex(Object* number) {
+uint32_t String::ToValidIndex(Object number) {
uint32_t index = PositiveNumberToUint32(number);
uint32_t length_value = static_cast<uint32_t>(length());
if (index > length_value) return length_value;
@@ -470,7 +480,8 @@ Address SeqOneByteString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-uint8_t* SeqOneByteString::GetChars() {
+uint8_t* SeqOneByteString::GetChars(const DisallowHeapAllocation& no_gc) {
+ USE(no_gc);
return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
@@ -478,7 +489,8 @@ Address SeqTwoByteString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-uc16* SeqTwoByteString::GetChars() {
+uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
+ USE(no_gc);
return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
}
@@ -500,49 +512,49 @@ int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
-String* SlicedString::parent() {
- return String::cast(READ_FIELD(this, kParentOffset));
+String SlicedString::parent() {
+ return String::cast(READ_FIELD(*this, kParentOffset));
}
-void SlicedString::set_parent(Isolate* isolate, String* parent,
+void SlicedString::set_parent(Isolate* isolate, String parent,
WriteBarrierMode mode) {
DCHECK(parent->IsSeqString() || parent->IsExternalString());
- WRITE_FIELD(this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(this, kParentOffset, parent, mode);
+ WRITE_FIELD(*this, kParentOffset, parent);
+ CONDITIONAL_WRITE_BARRIER(*this, kParentOffset, parent, mode);
}
SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
-String* ConsString::first() {
+String ConsString::first() {
return String::cast(READ_FIELD(this, kFirstOffset));
}
-Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); }
+Object ConsString::unchecked_first() { return READ_FIELD(*this, kFirstOffset); }
-void ConsString::set_first(Isolate* isolate, String* value,
+void ConsString::set_first(Isolate* isolate, String value,
WriteBarrierMode mode) {
- WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, value, mode);
+ WRITE_FIELD(*this, kFirstOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kFirstOffset, value, mode);
}
-String* ConsString::second() {
- return String::cast(READ_FIELD(this, kSecondOffset));
+String ConsString::second() {
+ return String::cast(READ_FIELD(*this, kSecondOffset));
}
-Object* ConsString::unchecked_second() {
- return RELAXED_READ_FIELD(this, kSecondOffset);
+Object ConsString::unchecked_second() {
+ return RELAXED_READ_FIELD(*this, kSecondOffset);
}
-void ConsString::set_second(Isolate* isolate, String* value,
+void ConsString::set_second(Isolate* isolate, String value,
WriteBarrierMode mode) {
- WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, value, mode);
+ WRITE_FIELD(*this, kSecondOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kSecondOffset, value, mode);
}
ACCESSORS(ThinString, actual, String, kActualOffset);
-HeapObject* ThinString::unchecked_actual() const {
- return reinterpret_cast<HeapObject*>(READ_FIELD(this, kActualOffset));
+HeapObject ThinString::unchecked_actual() const {
+ return HeapObject::unchecked_cast(READ_FIELD(*this, kActualOffset));
}
bool ExternalString::is_uncached() const {
@@ -551,40 +563,39 @@ bool ExternalString::is_uncached() const {
}
Address ExternalString::resource_as_address() {
- return *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset));
+ return *reinterpret_cast<Address*>(FIELD_ADDR(*this, kResourceOffset));
}
void ExternalString::set_address_as_resource(Address address) {
- DCHECK(IsAligned(address, kPointerSize));
- *reinterpret_cast<Address*>(FIELD_ADDR(this, kResourceOffset)) = address;
+ *reinterpret_cast<Address*>(FIELD_ADDR(*this, kResourceOffset)) = address;
if (IsExternalOneByteString()) {
- ExternalOneByteString::cast(this)->update_data_cache();
+ ExternalOneByteString::cast(*this)->update_data_cache();
} else {
- ExternalTwoByteString::cast(this)->update_data_cache();
+ ExternalTwoByteString::cast(*this)->update_data_cache();
}
}
uint32_t ExternalString::resource_as_uint32() {
return static_cast<uint32_t>(
- *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)));
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(*this, kResourceOffset)));
}
void ExternalString::set_uint32_as_resource(uint32_t value) {
- *reinterpret_cast<uintptr_t*>(FIELD_ADDR(this, kResourceOffset)) = value;
+ *reinterpret_cast<uintptr_t*>(FIELD_ADDR(*this, kResourceOffset)) = value;
if (is_uncached()) return;
const char** data_field =
- reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ reinterpret_cast<const char**>(FIELD_ADDR(*this, kResourceDataOffset));
*data_field = nullptr;
}
const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(*this, kResourceOffset));
}
void ExternalOneByteString::update_data_cache() {
if (is_uncached()) return;
const char** data_field =
- reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ reinterpret_cast<const char**>(FIELD_ADDR(*this, kResourceDataOffset));
*data_field = resource()->data();
}
@@ -593,13 +604,12 @@ void ExternalOneByteString::SetResource(
set_resource(resource);
size_t new_payload = resource == nullptr ? 0 : resource->length();
if (new_payload > 0)
- isolate->heap()->UpdateExternalString(this, 0, new_payload);
+ isolate->heap()->UpdateExternalString(*this, 0, new_payload);
}
void ExternalOneByteString::set_resource(
const ExternalOneByteString::Resource* resource) {
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
- *reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
+ *reinterpret_cast<const Resource**>(FIELD_ADDR(*this, kResourceOffset)) =
resource;
if (resource != nullptr) update_data_cache();
}
@@ -614,13 +624,13 @@ uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
}
const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(*this, kResourceOffset));
}
void ExternalTwoByteString::update_data_cache() {
if (is_uncached()) return;
- const uint16_t** data_field =
- reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+ const uint16_t** data_field = reinterpret_cast<const uint16_t**>(
+ FIELD_ADDR(*this, kResourceDataOffset));
*data_field = resource()->data();
}
@@ -629,12 +639,12 @@ void ExternalTwoByteString::SetResource(
set_resource(resource);
size_t new_payload = resource == nullptr ? 0 : resource->length() * 2;
if (new_payload > 0)
- isolate->heap()->UpdateExternalString(this, 0, new_payload);
+ isolate->heap()->UpdateExternalString(*this, 0, new_payload);
}
void ExternalTwoByteString::set_resource(
const ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
+ *reinterpret_cast<const Resource**>(FIELD_ADDR(*this, kResourceOffset)) =
resource;
if (resource != nullptr) update_data_cache();
}
@@ -653,11 +663,11 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
-void ConsStringIterator::PushLeft(ConsString* string) {
+void ConsStringIterator::PushLeft(ConsString string) {
frames_[depth_++ & kDepthMask] = string;
}
-void ConsStringIterator::PushRight(ConsString* string) {
+void ConsStringIterator::PushRight(ConsString string) {
// Inplace update.
frames_[(depth_ - 1) & kDepthMask] = string;
}
@@ -680,28 +690,28 @@ uint16_t StringCharacterStream::GetNext() {
return is_one_byte_ ? *buffer8_++ : *buffer16_++;
}
-StringCharacterStream::StringCharacterStream(String* string, int offset)
+StringCharacterStream::StringCharacterStream(String string, int offset)
: is_one_byte_(false) {
Reset(string, offset);
}
-void StringCharacterStream::Reset(String* string, int offset) {
+void StringCharacterStream::Reset(String string, int offset) {
buffer8_ = nullptr;
end_ = nullptr;
- ConsString* cons_string = String::VisitFlat(this, string, offset);
+ ConsString cons_string = String::VisitFlat(this, string, offset);
iter_.Reset(cons_string, offset);
- if (cons_string != nullptr) {
+ if (!cons_string.is_null()) {
string = iter_.Next(&offset);
- if (string != nullptr) String::VisitFlat(this, string, offset);
+ if (!string.is_null()) String::VisitFlat(this, string, offset);
}
}
bool StringCharacterStream::HasMore() {
if (buffer8_ != end_) return true;
int offset;
- String* string = iter_.Next(&offset);
+ String string = iter_.Next(&offset);
DCHECK_EQ(offset, 0);
- if (string == nullptr) return false;
+ if (string.is_null()) return false;
String::VisitFlat(this, string);
DCHECK(buffer8_ != end_);
return true;
@@ -729,12 +739,15 @@ bool String::AsArrayIndex(uint32_t* index) {
return SlowAsArrayIndex(index);
}
-String::SubStringRange::SubStringRange(String* string, int first, int length)
+SubStringRange::SubStringRange(String string,
+ const DisallowHeapAllocation& no_gc, int first,
+ int length)
: string_(string),
first_(first),
- length_(length == -1 ? string->length() : length) {}
+ length_(length == -1 ? string->length() : length),
+ no_gc_(no_gc) {}
-class String::SubStringRange::iterator final {
+class SubStringRange::iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
typedef int difference_type;
@@ -759,18 +772,19 @@ class String::SubStringRange::iterator final {
private:
friend class String;
- iterator(String* from, int offset)
- : content_(from->GetFlatContent()), offset_(offset) {}
+ friend class SubStringRange;
+ iterator(String from, int offset, const DisallowHeapAllocation& no_gc)
+ : content_(from->GetFlatContent(no_gc)), offset_(offset) {}
String::FlatContent content_;
int offset_;
};
-String::SubStringRange::iterator String::SubStringRange::begin() {
- return String::SubStringRange::iterator(string_, first_);
+SubStringRange::iterator SubStringRange::begin() {
+ return SubStringRange::iterator(string_, first_, no_gc_);
}
-String::SubStringRange::iterator String::SubStringRange::end() {
- return String::SubStringRange::iterator(string_, first_ + length_);
+SubStringRange::iterator SubStringRange::end() {
+ return SubStringRange::iterator(string_, first_ + length_, no_gc_);
}
} // namespace internal
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
new file mode 100644
index 0000000000..ce0193d12d
--- /dev/null
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -0,0 +1,69 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRING_TABLE_INL_H_
+#define V8_OBJECTS_STRING_TABLE_INL_H_
+
+#include "src/objects/string-table.h"
+
+#include "src/objects/string-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(StringSet)
+CAST_ACCESSOR(StringTable)
+
+StringTable::StringTable(Address ptr)
+ : HashTable<StringTable, StringTableShape>(ptr) {
+ SLOW_DCHECK(IsStringTable());
+}
+
+StringSet::StringSet(Address ptr) : HashTable<StringSet, StringSetShape>(ptr) {
+ SLOW_DCHECK(IsStringSet());
+}
+
+bool StringSetShape::IsMatch(String key, Object value) {
+ DCHECK(value->IsString());
+ return key->Equals(String::cast(value));
+}
+
+uint32_t StringSetShape::Hash(Isolate* isolate, String key) {
+ return key->Hash();
+}
+
+uint32_t StringSetShape::HashForObject(Isolate* isolate, Object object) {
+ return String::cast(object)->Hash();
+}
+
+StringTableKey::StringTableKey(uint32_t hash_field)
+ : HashTableKey(hash_field >> Name::kHashShift), hash_field_(hash_field) {}
+
+void StringTableKey::set_hash_field(uint32_t hash_field) {
+ hash_field_ = hash_field;
+ set_hash(hash_field >> Name::kHashShift);
+}
+
+Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
+ StringTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+uint32_t StringTableShape::HashForObject(Isolate* isolate, Object object) {
+ return String::cast(object)->Hash();
+}
+
+RootIndex StringTableShape::GetMapRootIndex() {
+ return RootIndex::kStringTableMap;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STRING_TABLE_INL_H_
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index b26e86a381..44b9fd930c 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_STRING_TABLE_H_
#include "src/objects/hash-table.h"
+#include "src/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,13 +33,13 @@ class StringTableKey : public HashTableKey {
class StringTableShape : public BaseShape<StringTableKey*> {
public:
- static inline bool IsMatch(Key key, Object* value) {
+ static inline bool IsMatch(Key key, Object value) {
return key->IsMatch(value);
}
static inline uint32_t Hash(Isolate* isolate, Key key) { return key->Hash(); }
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
@@ -62,8 +63,8 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
Handle<String> key);
static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
static Handle<String> AddKeyNoResize(Isolate* isolate, StringTableKey* key);
- static String* ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
- String* string);
+ static String ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
+ String string);
// Shink the StringTable if it's very empty (kMaxEmptyFactor) to avoid the
// performance overhead of re-allocating the StringTable over and over again.
@@ -74,8 +75,11 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
// string handle if it is found, or an empty handle otherwise.
V8_WARN_UNUSED_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
Isolate* isolate, uint16_t c1, uint16_t c2);
- static Object* LookupStringIfExists_NoAllocate(Isolate* isolate,
- String* string);
+ // {raw_string} must be a tagged String pointer.
+ // Returns a tagged pointer: either an internalized string, or a Smi
+ // sentinel.
+ static Address LookupStringIfExists_NoAllocate(Isolate* isolate,
+ Address raw_string);
static void EnsureCapacityForDeserialization(Isolate* isolate, int expected);
@@ -89,14 +93,14 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
template <bool seq_one_byte>
friend class JsonParser;
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
+ OBJECT_CONSTRUCTORS(StringTable, HashTable<StringTable, StringTableShape>)
};
-class StringSetShape : public BaseShape<String*> {
+class StringSetShape : public BaseShape<String> {
public:
- static inline bool IsMatch(String* key, Object* value);
- static inline uint32_t Hash(Isolate* isolate, String* key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+ static inline bool IsMatch(String key, Object value);
+ static inline uint32_t Hash(Isolate* isolate, String key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object object);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
@@ -110,6 +114,7 @@ class StringSet : public HashTable<StringSet, StringSetShape> {
bool Has(Isolate* isolate, Handle<String> name);
DECL_CAST(StringSet)
+ OBJECT_CONSTRUCTORS(StringSet, HashTable<StringSet, StringSetShape>)
};
} // namespace internal
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 206bed641c..3a1fc21ac4 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -6,7 +6,9 @@
#define V8_OBJECTS_STRING_H_
#include "src/base/bits.h"
+#include "src/objects/instance-type.h"
#include "src/objects/name.h"
+#include "src/objects/smi.h"
#include "src/unicode-decoder.h"
// Has to be the last include (doesn't have include guards):
@@ -15,7 +17,7 @@
namespace v8 {
namespace internal {
-class BigInt;
+enum InstanceType : uint16_t;
enum AllowNullsFlag { ALLOW_NULLS, DISALLOW_NULLS };
enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
@@ -33,8 +35,8 @@ enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
// concrete performance benefit at that particular point in the code.
class StringShape {
public:
- inline explicit StringShape(const String* s);
- inline explicit StringShape(Map* s);
+ inline explicit StringShape(const String s);
+ inline explicit StringShape(Map s);
inline explicit StringShape(InstanceType t);
inline bool IsSequential();
inline bool IsExternal();
@@ -81,20 +83,6 @@ class String : public Name {
public:
enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
- class SubStringRange {
- public:
- explicit inline SubStringRange(String* string, int first = 0,
- int length = -1);
- class iterator;
- inline iterator begin();
- inline iterator end();
-
- private:
- String* string_;
- int first_;
- int length_;
- };
-
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -155,7 +143,8 @@ class String : public Name {
};
template <typename Char>
- V8_INLINE Vector<const Char> GetCharVector();
+ V8_INLINE Vector<const Char> GetCharVector(
+ const DisallowHeapAllocation& no_gc);
// Get and set the length of the string.
inline int length() const;
@@ -175,9 +164,9 @@ class String : public Name {
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
- // Requires: this->IsFlat()
- inline bool IsOneByteRepresentationUnderneath();
- inline bool IsTwoByteRepresentationUnderneath();
+ // This function is static because that helps it get inlined.
+ // Requires: string.IsFlat()
+ static inline bool IsOneByteRepresentationUnderneath(String string);
// NOTE: this should be considered only a hint. False negatives are
// possible.
@@ -213,11 +202,11 @@ class String : public Name {
// If the string isn't flat, and therefore doesn't have flat content, the
// returned structure will report so, and can't provide a vector of either
// kind.
- FlatContent GetFlatContent();
+ FlatContent GetFlatContent(const DisallowHeapAllocation& no_gc);
// Returns the parent of a sliced string or first part of a flat cons string.
// Requires: StringShape(this).IsIndirect() && this->IsFlat()
- inline String* GetUnderlying();
+ inline String GetUnderlying();
// String relational comparison, implemented according to ES6 section 7.2.11
// Abstract Relational Comparison (step 5): The comparison of Strings uses a
@@ -235,16 +224,16 @@ class String : public Name {
Handle<String> y);
// Perform ES6 21.1.3.8, including checking arguments.
- static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position);
+ static Object IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
// Perform string match of pattern on subject, starting at start index.
// Caller must ensure that 0 <= start_index <= sub->length(), as this does not
// check any arguments.
static int IndexOf(Isolate* isolate, Handle<String> receiver,
Handle<String> search, int start_index);
- static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position);
+ static Object LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
// Encapsulates logic related to a match and its capture groups as required
// by GetSubstitution.
@@ -278,7 +267,7 @@ class String : public Name {
int start_index = 0);
// String equality operations.
- inline bool Equals(String* other);
+ inline bool Equals(String other);
inline static bool Equals(Isolate* isolate, Handle<String> one,
Handle<String> two);
bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
@@ -315,7 +304,7 @@ class String : public Name {
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
- uint32_t inline ToValidIndex(Object* number);
+ uint32_t inline ToValidIndex(Object number);
// Trimming.
enum TrimMode { kTrim, kTrimStart, kTrimEnd };
@@ -372,7 +361,7 @@ class String : public Name {
// Helper function for flattening strings.
template <typename sinkchar>
- static void WriteToFlat(String* source, sinkchar* sink, int from, int to);
+ static void WriteToFlat(String source, sinkchar* sink, int from, int to);
// The return value may point to the first aligned word containing the first
// non-one-byte character, rather than directly to the non-one-byte character.
@@ -435,8 +424,8 @@ class String : public Name {
}
template <class Visitor>
- static inline ConsString* VisitFlat(Visitor* visitor, String* string,
- int offset = 0);
+ static inline ConsString VisitFlat(Visitor* visitor, String string,
+ int offset = 0);
static Handle<FixedArray> CalculateLineEnds(Isolate* isolate,
Handle<String> string,
@@ -452,7 +441,7 @@ class String : public Name {
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
- bool SlowEquals(String* other);
+ bool SlowEquals(String other);
static bool SlowEquals(Isolate* isolate, Handle<String> one,
Handle<String> two);
@@ -463,7 +452,22 @@ class String : public Name {
// Compute and set the hash code.
uint32_t ComputeAndSetHash(Isolate* isolate);
- DISALLOW_IMPLICIT_CONSTRUCTORS(String);
+ OBJECT_CONSTRUCTORS(String, Name);
+};
+
+class SubStringRange {
+ public:
+ inline SubStringRange(String string, const DisallowHeapAllocation& no_gc,
+ int first = 0, int length = -1);
+ class iterator;
+ inline iterator begin();
+ inline iterator end();
+
+ private:
+ String string_;
+ int first_;
+ int length_;
+ const DisallowHeapAllocation& no_gc_;
};
// The SeqString abstract class captures sequential string values.
@@ -477,8 +481,7 @@ class SeqString : public String {
V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Handle<SeqString> string,
int new_length);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
+ OBJECT_CONSTRUCTORS(SeqString, String);
};
class InternalizedString : public String {
@@ -486,8 +489,7 @@ class InternalizedString : public String {
DECL_CAST(InternalizedString)
// TODO(neis): Possibly move some stuff from String here.
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(InternalizedString);
+ OBJECT_CONSTRUCTORS(InternalizedString, String);
};
// The OneByteString class captures sequential one-byte string objects.
@@ -503,7 +505,7 @@ class SeqOneByteString : public SeqString {
// Get the address of the characters in this string.
inline Address GetCharsAddress();
- inline uint8_t* GetChars();
+ inline uint8_t* GetChars(const DisallowHeapAllocation& no_gc);
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -528,8 +530,7 @@ class SeqOneByteString : public SeqString {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
+ OBJECT_CONSTRUCTORS(SeqOneByteString, SeqString);
};
// The TwoByteString class captures sequential unicode string objects.
@@ -545,7 +546,7 @@ class SeqTwoByteString : public SeqString {
// Get the address of the characters in this string.
inline Address GetCharsAddress();
- inline uc16* GetChars();
+ inline uc16* GetChars(const DisallowHeapAllocation& no_gc);
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -571,8 +572,7 @@ class SeqTwoByteString : public SeqString {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
+ OBJECT_CONSTRUCTORS(SeqTwoByteString, SeqString);
};
// The ConsString class describes string values built by using the
@@ -586,19 +586,19 @@ class SeqTwoByteString : public SeqString {
class ConsString : public String {
public:
// First string of the cons cell.
- inline String* first();
+ inline String first();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_first();
- inline void set_first(Isolate* isolate, String* first,
+ inline Object unchecked_first();
+ inline void set_first(Isolate* isolate, String first,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Second string of the cons cell.
- inline String* second();
+ inline String second();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_second();
- inline void set_second(Isolate* isolate, String* second,
+ inline Object unchecked_second();
+ inline void set_second(Isolate* isolate, String second,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
@@ -607,20 +607,23 @@ class ConsString : public String {
DECL_CAST(ConsString)
// Layout description.
- static const int kFirstOffset = String::kHeaderSize;
- static const int kSecondOffset = kFirstOffset + kPointerSize;
- static const int kSize = kSecondOffset + kPointerSize;
+#define CONS_STRING_FIELDS(V) \
+ V(kFirstOffset, kTaggedSize) \
+ V(kSecondOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize, CONS_STRING_FIELDS)
+#undef CONS_STRING_FIELDS
// Minimum length for a cons string.
static const int kMinLength = 13;
- typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
- BodyDescriptor;
+ typedef FixedBodyDescriptor<kFirstOffset, kSize, kSize> BodyDescriptor;
DECL_VERIFIER(ConsString)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
+ OBJECT_CONSTRUCTORS(ConsString, String);
};
// The ThinString class describes string objects that are just references
@@ -633,9 +636,9 @@ class ConsString : public String {
class ThinString : public String {
public:
// Actual string that this ThinString refers to.
- inline String* actual() const;
- inline HeapObject* unchecked_actual() const;
- inline void set_actual(String* s,
+ inline String actual() const;
+ inline HeapObject unchecked_actual() const;
+ inline void set_actual(String s,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
@@ -644,13 +647,17 @@ class ThinString : public String {
DECL_VERIFIER(ThinString)
// Layout description.
- static const int kActualOffset = String::kHeaderSize;
- static const int kSize = kActualOffset + kPointerSize;
+#define THIN_STRING_FIELDS(V) \
+ V(kActualOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize, THIN_STRING_FIELDS)
+#undef THIN_STRING_FIELDS
typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
- private:
- DISALLOW_COPY_AND_ASSIGN(ThinString);
+ OBJECT_CONSTRUCTORS(ThinString, String);
};
// The Sliced String class describes strings that are substrings of another
@@ -667,8 +674,8 @@ class ThinString : public String {
// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
class SlicedString : public String {
public:
- inline String* parent();
- inline void set_parent(Isolate* isolate, String* parent,
+ inline String parent();
+ inline void set_parent(Isolate* isolate, String parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int offset() const;
inline void set_offset(int offset);
@@ -679,21 +686,23 @@ class SlicedString : public String {
DECL_CAST(SlicedString)
// Layout description.
- static const int kParentOffset = String::kHeaderSize;
- static const int kOffsetOffset = kParentOffset + kPointerSize;
- static const int kSize = kOffsetOffset + kPointerSize;
+#define SLICED_STRING_FIELDS(V) \
+ V(kParentOffset, kTaggedSize) \
+ V(kOffsetOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize, SLICED_STRING_FIELDS)
+#undef SLICED_STRING_FIELDS
// Minimum length for a sliced string.
static const int kMinLength = 13;
- typedef FixedBodyDescriptor<kParentOffset, kOffsetOffset + kPointerSize,
- kSize>
- BodyDescriptor;
+ typedef FixedBodyDescriptor<kParentOffset, kSize, kSize> BodyDescriptor;
DECL_VERIFIER(SlicedString)
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+ OBJECT_CONSTRUCTORS(SlicedString, String);
};
// The ExternalString class describes string values that are backed by
@@ -710,10 +719,16 @@ class ExternalString : public String {
DECL_CAST(ExternalString)
// Layout description.
- static const int kResourceOffset = String::kHeaderSize;
- static const int kUncachedSize = kResourceOffset + kPointerSize;
- static const int kResourceDataOffset = kResourceOffset + kPointerSize;
- static const int kSize = kResourceDataOffset + kPointerSize;
+#define EXTERNAL_STRING_FIELDS(V) \
+ V(kResourceOffset, kSystemPointerSize) \
+ /* Size of uncached external strings. */ \
+ V(kUncachedSize, 0) \
+ V(kResourceDataOffset, kSystemPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize, EXTERNAL_STRING_FIELDS)
+#undef EXTERNAL_STRING_FIELDS
// Return whether the external string data pointer is not cached.
inline bool is_uncached() const;
@@ -728,8 +743,7 @@ class ExternalString : public String {
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
+ OBJECT_CONSTRUCTORS(ExternalString, String);
};
// The ExternalOneByteString class is an external string backed by an
@@ -764,8 +778,7 @@ class ExternalOneByteString : public ExternalString {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
+ OBJECT_CONSTRUCTORS(ExternalOneByteString, ExternalString);
};
// The ExternalTwoByteString class is an external string backed by a UTF-16
@@ -803,8 +816,7 @@ class ExternalTwoByteString : public ExternalString {
class BodyDescriptor;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
+ OBJECT_CONSTRUCTORS(ExternalTwoByteString, ExternalString);
};
// A flat string reader provides random access to the contents of a
@@ -821,7 +833,7 @@ class FlatStringReader : public Relocatable {
int length() { return length_; }
private:
- String** str_;
+ Address* str_;
bool is_one_byte_;
int length_;
const void* start_;
@@ -833,19 +845,19 @@ class FlatStringReader : public Relocatable {
class ConsStringIterator {
public:
inline ConsStringIterator() = default;
- inline explicit ConsStringIterator(ConsString* cons_string, int offset = 0) {
+ inline explicit ConsStringIterator(ConsString cons_string, int offset = 0) {
Reset(cons_string, offset);
}
- inline void Reset(ConsString* cons_string, int offset = 0) {
+ inline void Reset(ConsString cons_string, int offset = 0) {
depth_ = 0;
// Next will always return nullptr.
- if (cons_string == nullptr) return;
+ if (cons_string.is_null()) return;
Initialize(cons_string, offset);
}
// Returns nullptr when complete.
- inline String* Next(int* offset_out) {
+ inline String Next(int* offset_out) {
*offset_out = 0;
- if (depth_ == 0) return nullptr;
+ if (depth_ == 0) return String();
return Continue(offset_out);
}
@@ -857,20 +869,20 @@ class ConsStringIterator {
"kStackSize must be power of two");
static inline int OffsetForDepth(int depth);
- inline void PushLeft(ConsString* string);
- inline void PushRight(ConsString* string);
+ inline void PushLeft(ConsString string);
+ inline void PushRight(ConsString string);
inline void AdjustMaximumDepth();
inline void Pop();
inline bool StackBlown() { return maximum_depth_ - depth_ == kStackSize; }
- void Initialize(ConsString* cons_string, int offset);
- String* Continue(int* offset_out);
- String* NextLeaf(bool* blew_stack);
- String* Search(int* offset_out);
+ void Initialize(ConsString cons_string, int offset);
+ String Continue(int* offset_out);
+ String NextLeaf(bool* blew_stack);
+ String Search(int* offset_out);
// Stack must always contain only frames for which right traversal
// has not yet been performed.
- ConsString* frames_[kStackSize];
- ConsString* root_;
+ ConsString frames_[kStackSize];
+ ConsString root_;
int depth_;
int maximum_depth_;
int consumed_;
@@ -879,10 +891,10 @@ class ConsStringIterator {
class StringCharacterStream {
public:
- inline explicit StringCharacterStream(String* string, int offset = 0);
+ inline explicit StringCharacterStream(String string, int offset = 0);
inline uint16_t GetNext();
inline bool HasMore();
- inline void Reset(String* string, int offset = 0);
+ inline void Reset(String string, int offset = 0);
inline void VisitOneByteString(const uint8_t* chars, int length);
inline void VisitTwoByteString(const uint16_t* chars, int length);
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
new file mode 100644
index 0000000000..8a5b53efbb
--- /dev/null
+++ b/deps/v8/src/objects/struct-inl.h
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRUCT_INL_H_
+#define V8_OBJECTS_STRUCT_INL_H_
+
+#include "src/objects/struct.h"
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/oddball.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OBJECT_CONSTRUCTORS_IMPL(Struct, HeapObject)
+// TODO(jkummerow): Fix IsTuple2() and IsTuple3() to be subclassing-aware,
+// or rethink this more generally (see crbug.com/v8/8516).
+Tuple2::Tuple2(Address ptr) : Struct(ptr) {}
+Tuple3::Tuple3(Address ptr) : Tuple2(ptr) {}
+OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
+
+CAST_ACCESSOR(AccessorPair)
+CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(Tuple2)
+CAST_ACCESSOR(Tuple3)
+
+void Struct::InitializeBody(int object_size) {
+ Object value = GetReadOnlyRoots().undefined_value();
+ for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+ WRITE_FIELD(this, offset, value);
+ }
+}
+
+ACCESSORS(Tuple2, value1, Object, kValue1Offset)
+ACCESSORS(Tuple2, value2, Object, kValue2Offset)
+ACCESSORS(Tuple3, value3, Object, kValue3Offset)
+
+ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
+ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+
+Object AccessorPair::get(AccessorComponent component) {
+ return component == ACCESSOR_GETTER ? getter() : setter();
+}
+
+void AccessorPair::set(AccessorComponent component, Object value) {
+ if (component == ACCESSOR_GETTER) {
+ set_getter(value);
+ } else {
+ set_setter(value);
+ }
+}
+
+void AccessorPair::SetComponents(Object getter, Object setter) {
+ if (!getter->IsNull()) set_getter(getter);
+ if (!setter->IsNull()) set_setter(setter);
+}
+
+bool AccessorPair::Equals(Object getter_value, Object setter_value) {
+ return (getter() == getter_value) && (setter() == setter_value);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STRUCT_INL_H_
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
new file mode 100644
index 0000000000..a790ac2320
--- /dev/null
+++ b/deps/v8/src/objects/struct.h
@@ -0,0 +1,127 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRUCT_H_
+#define V8_OBJECTS_STRUCT_H_
+
+#include "src/objects.h"
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// An abstract superclass, a marker class really, for simple structure classes.
+// It doesn't carry much functionality but allows struct classes to be
+// identified in the type system.
+class Struct : public HeapObject {
+ public:
+ inline void InitializeBody(int object_size);
+ DECL_CAST(Struct)
+ void BriefPrintDetails(std::ostream& os);
+
+ OBJECT_CONSTRUCTORS(Struct, HeapObject)
+};
+
+class Tuple2 : public Struct {
+ public:
+ DECL_ACCESSORS(value1, Object)
+ DECL_ACCESSORS(value2, Object)
+
+ DECL_CAST(Tuple2)
+
+ // Dispatched behavior.
+ DECL_PRINTER(Tuple2)
+ DECL_VERIFIER(Tuple2)
+ void BriefPrintDetails(std::ostream& os);
+
+// Layout description.
+#define TUPLE2_FIELDS(V) \
+ V(kValue1Offset, kTaggedSize) \
+ V(kValue2Offset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TUPLE2_FIELDS)
+#undef TUPLE2_FIELDS
+
+ OBJECT_CONSTRUCTORS(Tuple2, Struct);
+};
+
+class Tuple3 : public Tuple2 {
+ public:
+ DECL_ACCESSORS(value3, Object)
+
+ DECL_CAST(Tuple3)
+
+ // Dispatched behavior.
+ DECL_PRINTER(Tuple3)
+ DECL_VERIFIER(Tuple3)
+ void BriefPrintDetails(std::ostream& os);
+
+// Layout description.
+#define TUPLE3_FIELDS(V) \
+ V(kValue3Offset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Tuple2::kSize, TUPLE3_FIELDS)
+#undef TUPLE3_FIELDS
+
+ OBJECT_CONSTRUCTORS(Tuple3, Tuple2);
+};
+
+// Support for JavaScript accessors: A pair of a getter and a setter. Each
+// accessor can either be
+// * a JavaScript function or proxy: a real accessor
+// * a FunctionTemplateInfo: a real (lazy) accessor
+// * undefined: considered an accessor by the spec, too, strangely enough
+// * null: an accessor which has not been set
+class AccessorPair : public Struct {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+
+ DECL_CAST(AccessorPair)
+
+ static Handle<AccessorPair> Copy(Isolate* isolate, Handle<AccessorPair> pair);
+
+ inline Object get(AccessorComponent component);
+ inline void set(AccessorComponent component, Object value);
+
+ // Note: Returns undefined if the component is not set.
+ static Handle<Object> GetComponent(Isolate* isolate,
+ Handle<AccessorPair> accessor_pair,
+ AccessorComponent component);
+
+ // Set both components, skipping arguments which are a JavaScript null.
+ inline void SetComponents(Object getter, Object setter);
+
+ inline bool Equals(Object getter_value, Object setter_value);
+
+ // Dispatched behavior.
+ DECL_PRINTER(AccessorPair)
+ DECL_VERIFIER(AccessorPair)
+
+// Layout description.
+#define ACCESSOR_PAIR_FIELDS(V) \
+ V(kGetterOffset, kTaggedSize) \
+ V(kSetterOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ACCESSOR_PAIR_FIELDS)
+#undef ACCESSOR_PAIR_FIELDS
+
+ OBJECT_CONSTRUCTORS(AccessorPair, Struct);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STRUCT_H_
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index ad8ff95950..e24deabd8d 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/objects/hash-table.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -31,8 +32,7 @@ class TemplateObjectDescription final : public Tuple2 {
static constexpr int kRawStringsOffset = kValue1Offset;
static constexpr int kCookedStringsOffset = kValue2Offset;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateObjectDescription);
+ OBJECT_CONSTRUCTORS(TemplateObjectDescription, Tuple2);
};
} // namespace internal
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 6473cfd1cf..c42353b249 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -16,6 +16,13 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(TemplateInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateInfo, TemplateInfo)
+OBJECT_CONSTRUCTORS_IMPL(ObjectTemplateInfo, TemplateInfo)
+OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateRareData, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(TemplateInfo)
+
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, serial_number, Object, kSerialNumberOffset)
SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
@@ -23,25 +30,12 @@ ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
- kPrototypeTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, prototype_provider_template, Object,
- kPrototypeProviderTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
-ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
- kNamedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, indexed_property_handler, Object,
- kIndexedPropertyHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, instance_template, Object,
- kInstanceTemplateOffset)
ACCESSORS(FunctionTemplateInfo, class_name, Object, kClassNameOffset)
ACCESSORS(FunctionTemplateInfo, signature, Object, kSignatureOffset)
-ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
- kInstanceCallHandlerOffset)
-ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
- kAccessCheckInfoOffset)
ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
kSharedFunctionInfoOffset)
+ACCESSORS(FunctionTemplateInfo, rare_data, HeapObject,
+ kFunctionTemplateRareDataOffset)
ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
kCachedPropertyNameOffset)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
@@ -59,11 +53,65 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
+// static
+FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
+ HeapObject extra = function_template_info->rare_data();
+ if (extra->IsUndefined(isolate)) {
+ return AllocateFunctionTemplateRareData(isolate, function_template_info);
+ } else {
+ return FunctionTemplateRareData::cast(extra);
+ }
+}
+
+#define RARE_ACCESSORS(Name, CamelName, Type) \
+ Type FunctionTemplateInfo::Get##CamelName() { \
+ HeapObject extra = rare_data(); \
+ HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
+ return extra == undefined ? undefined \
+ : FunctionTemplateRareData::cast(extra)->Name(); \
+ } \
+ inline void FunctionTemplateInfo::Set##CamelName( \
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
+ Handle<Type> Name) { \
+ FunctionTemplateRareData rare_data = \
+ EnsureFunctionTemplateRareData(isolate, function_template_info); \
+ rare_data->set_##Name(*Name); \
+ }
+
+RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
+RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate, Object)
+RARE_ACCESSORS(parent_template, ParentTemplate, Object)
+RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object)
+RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object)
+RARE_ACCESSORS(instance_template, InstanceTemplate, Object)
+RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
+RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
+#undef RARE_ACCESSORS
+
+ACCESSORS(FunctionTemplateRareData, prototype_template, Object,
+ kPrototypeTemplateOffset)
+ACCESSORS(FunctionTemplateRareData, prototype_provider_template, Object,
+ kPrototypeProviderTemplateOffset)
+ACCESSORS(FunctionTemplateRareData, parent_template, Object,
+ kParentTemplateOffset)
+ACCESSORS(FunctionTemplateRareData, named_property_handler, Object,
+ kNamedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateRareData, indexed_property_handler, Object,
+ kIndexedPropertyHandlerOffset)
+ACCESSORS(FunctionTemplateRareData, instance_template, Object,
+ kInstanceTemplateOffset)
+ACCESSORS(FunctionTemplateRareData, instance_call_handler, Object,
+ kInstanceCallHandlerOffset)
+ACCESSORS(FunctionTemplateRareData, access_check_info, Object,
+ kAccessCheckInfoOffset)
+
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, data, Object, kDataOffset)
CAST_ACCESSOR(TemplateInfo)
CAST_ACCESSOR(FunctionTemplateInfo)
+CAST_ACCESSOR(FunctionTemplateRareData)
CAST_ACCESSOR(ObjectTemplateInfo)
bool FunctionTemplateInfo::instantiated() {
@@ -71,37 +119,37 @@ bool FunctionTemplateInfo::instantiated() {
}
bool FunctionTemplateInfo::BreakAtEntry() {
- Object* maybe_shared = shared_function_info();
+ Object maybe_shared = shared_function_info();
if (maybe_shared->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(maybe_shared);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return shared->BreakAtEntry();
}
return false;
}
-FunctionTemplateInfo* FunctionTemplateInfo::GetParent(Isolate* isolate) {
- Object* parent = parent_template();
- return parent->IsUndefined(isolate) ? nullptr
+FunctionTemplateInfo FunctionTemplateInfo::GetParent(Isolate* isolate) {
+ Object parent = GetParentTemplate();
+ return parent->IsUndefined(isolate) ? FunctionTemplateInfo()
: FunctionTemplateInfo::cast(parent);
}
-ObjectTemplateInfo* ObjectTemplateInfo::GetParent(Isolate* isolate) {
- Object* maybe_ctor = constructor();
- if (maybe_ctor->IsUndefined(isolate)) return nullptr;
- FunctionTemplateInfo* constructor = FunctionTemplateInfo::cast(maybe_ctor);
+ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
+ Object maybe_ctor = constructor();
+ if (maybe_ctor->IsUndefined(isolate)) return ObjectTemplateInfo();
+ FunctionTemplateInfo constructor = FunctionTemplateInfo::cast(maybe_ctor);
while (true) {
constructor = constructor->GetParent(isolate);
- if (constructor == nullptr) return nullptr;
- Object* maybe_obj = constructor->instance_template();
+ if (constructor.is_null()) return ObjectTemplateInfo();
+ Object maybe_obj = constructor->GetInstanceTemplate();
if (!maybe_obj->IsUndefined(isolate)) {
return ObjectTemplateInfo::cast(maybe_obj);
}
}
- return nullptr;
+ return ObjectTemplateInfo();
}
int ObjectTemplateInfo::embedder_field_count() const {
- Object* value = data();
+ Object value = data();
DCHECK(value->IsSmi());
return EmbedderFieldCount::decode(Smi::ToInt(value));
}
@@ -113,7 +161,7 @@ void ObjectTemplateInfo::set_embedder_field_count(int count) {
}
bool ObjectTemplateInfo::immutable_proto() const {
- Object* value = data();
+ Object value = data();
DCHECK(value->IsSmi());
return IsImmutablePrototype::decode(Smi::ToInt(value));
}
@@ -123,7 +171,7 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
IsImmutablePrototype::update(Smi::ToInt(data()), immutable)));
}
-bool FunctionTemplateInfo::IsTemplateFor(JSObject* object) {
+bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
return IsTemplateFor(object->map());
}
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 24cbd18bd2..de75f5de80 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_TEMPLATES_H_
#define V8_OBJECTS_TEMPLATES_H_
-#include "src/objects.h"
+#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-class TemplateInfo : public Struct, public NeverReadOnlySpaceObject {
+class TemplateInfo : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(serial_number, Object)
DECL_INT_ACCESSORS(number_of_properties)
@@ -25,13 +26,18 @@ class TemplateInfo : public Struct, public NeverReadOnlySpaceObject {
DECL_CAST(TemplateInfo)
- static const int kTagOffset = HeapObject::kHeaderSize;
- static const int kSerialNumberOffset = kTagOffset + kPointerSize;
- static const int kNumberOfProperties = kSerialNumberOffset + kPointerSize;
- static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
- static const int kPropertyAccessorsOffset =
- kPropertyListOffset + kPointerSize;
- static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
+ // Layout description.
+#define TEMPLATE_INFO_FIELDS(V) \
+ V(kTagOffset, kTaggedSize) \
+ V(kSerialNumberOffset, kTaggedSize) \
+ V(kNumberOfProperties, kTaggedSize) \
+ V(kPropertyListOffset, kTaggedSize) \
+ V(kPropertyAccessorsOffset, kTaggedSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TEMPLATE_INFO_FIELDS)
+#undef TEMPLATE_INFO_FIELDS
static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
@@ -40,8 +46,45 @@ class TemplateInfo : public Struct, public NeverReadOnlySpaceObject {
// instead of caching them.
static const int kSlowTemplateInstantiationsCacheSize = 1 * MB;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
+ OBJECT_CONSTRUCTORS(TemplateInfo, Struct);
+};
+
+// Contains data members that are rarely set on a FunctionTemplateInfo.
+class FunctionTemplateRareData : public Struct {
+ public:
+ // See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
+ DECL_ACCESSORS(prototype_template, Object)
+ DECL_ACCESSORS(prototype_provider_template, Object)
+ DECL_ACCESSORS(parent_template, Object)
+ DECL_ACCESSORS(named_property_handler, Object)
+ DECL_ACCESSORS(indexed_property_handler, Object)
+ DECL_ACCESSORS(instance_template, Object)
+ DECL_ACCESSORS(instance_call_handler, Object)
+ DECL_ACCESSORS(access_check_info, Object)
+
+ DECL_CAST(FunctionTemplateRareData)
+
+ // Dispatched behavior.
+ DECL_PRINTER(FunctionTemplateRareData)
+ DECL_VERIFIER(FunctionTemplateRareData)
+
+ // Layout description.
+#define SYMBOL_FIELDS(V) \
+ V(kPrototypeTemplateOffset, kTaggedSize) \
+ V(kPrototypeProviderTemplateOffset, kTaggedSize) \
+ V(kParentTemplateOffset, kTaggedSize) \
+ V(kNamedPropertyHandlerOffset, kTaggedSize) \
+ V(kIndexedPropertyHandlerOffset, kTaggedSize) \
+ V(kInstanceTemplateOffset, kTaggedSize) \
+ V(kInstanceCallHandlerOffset, kTaggedSize) \
+ V(kAccessCheckInfoOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, SYMBOL_FIELDS)
+#undef SYMBOL_FIELDS
+
+ OBJECT_CONSTRUCTORS(FunctionTemplateRareData, Struct);
};
// See the api-exposed FunctionTemplate for more information.
@@ -51,45 +94,59 @@ class FunctionTemplateInfo : public TemplateInfo {
// Either CallInfoHandler or Undefined.
DECL_ACCESSORS(call_code, Object)
+ DECL_ACCESSORS(class_name, Object)
+
+ // If the signature is a FunctionTemplateInfo it is used to check whether the
+ // receiver calling the associated JSFunction is a compatible receiver, i.e.
+ // it is an instance of the signature FunctionTemplateInfo or any of the
+ // receiver's prototypes are.
+ DECL_ACCESSORS(signature, Object)
+
+ // If any of the setters below declared by DECL_RARE_ACCESSORS are used then
+ // a FunctionTemplateRareData will be stored here. Until then this contains
+ // undefined.
+ DECL_ACCESSORS(rare_data, HeapObject)
+
+#define DECL_RARE_ACCESSORS(Name, CamelName, Type) \
+ inline Type Get##CamelName(); \
+ static inline void Set##CamelName( \
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
+ Handle<Type> Name);
+
// ObjectTemplateInfo or Undefined, used for the prototype property of the
// resulting JSFunction instance of this FunctionTemplate.
- DECL_ACCESSORS(prototype_template, Object)
+ DECL_RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
// In the case the prototype_template is Undefined we use the
- // protoype_provider_template to retrieve the instance prototype. Either
+ // prototype_provider_template to retrieve the instance prototype. Either
// contains an ObjectTemplateInfo or Undefined.
- DECL_ACCESSORS(prototype_provider_template, Object)
+ DECL_RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate,
+ Object)
- // Used to create protoype chains. The parent_template's prototype is set as
+ // Used to create prototype chains. The parent_template's prototype is set as
// __proto__ of this FunctionTemplate's instance prototype. Is either a
// FunctionTemplateInfo or Undefined.
- DECL_ACCESSORS(parent_template, Object)
+ DECL_RARE_ACCESSORS(parent_template, ParentTemplate, Object)
// Returns an InterceptorInfo or Undefined for named properties.
- DECL_ACCESSORS(named_property_handler, Object)
+ DECL_RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object)
// Returns an InterceptorInfo or Undefined for indexed properties/elements.
- DECL_ACCESSORS(indexed_property_handler, Object)
+ DECL_RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object)
// An ObjectTemplateInfo that is used when instantiating the JSFunction
// associated with this FunctionTemplateInfo. Contains either an
// ObjectTemplateInfo or Undefined. A default instance_template is assigned
// upon first instantiation if it's Undefined.
- DECL_ACCESSORS(instance_template, Object)
-
- DECL_ACCESSORS(class_name, Object)
-
- // If the signature is a FunctionTemplateInfo it is used to check whether the
- // receiver calling the associated JSFunction is a compatible receiver, i.e.
- // it is an instance of the signare FunctionTemplateInfo or any of the
- // receiver's prototypes are.
- DECL_ACCESSORS(signature, Object)
+ DECL_RARE_ACCESSORS(instance_template, InstanceTemplate, Object)
// Either a CallHandlerInfo or Undefined. If an instance_call_handler is
// provided the instances created from the associated JSFunction are marked as
// callable.
- DECL_ACCESSORS(instance_call_handler, Object)
+ DECL_RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
+
+ DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
+#undef DECL_RARE_ACCESSORS
- DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(shared_function_info, Object)
// Internal field to store a flag bitfield.
@@ -134,38 +191,31 @@ class FunctionTemplateInfo : public TemplateInfo {
static const int kInvalidSerialNumber = 0;
- static const int kCallCodeOffset = TemplateInfo::kHeaderSize;
- static const int kPrototypeTemplateOffset = kCallCodeOffset + kPointerSize;
- static const int kPrototypeProviderTemplateOffset =
- kPrototypeTemplateOffset + kPointerSize;
- static const int kParentTemplateOffset =
- kPrototypeProviderTemplateOffset + kPointerSize;
- static const int kNamedPropertyHandlerOffset =
- kParentTemplateOffset + kPointerSize;
- static const int kIndexedPropertyHandlerOffset =
- kNamedPropertyHandlerOffset + kPointerSize;
- static const int kInstanceTemplateOffset =
- kIndexedPropertyHandlerOffset + kPointerSize;
- static const int kClassNameOffset = kInstanceTemplateOffset + kPointerSize;
- static const int kSignatureOffset = kClassNameOffset + kPointerSize;
- static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
- static const int kAccessCheckInfoOffset =
- kInstanceCallHandlerOffset + kPointerSize;
- static const int kSharedFunctionInfoOffset =
- kAccessCheckInfoOffset + kPointerSize;
- static const int kFlagOffset = kSharedFunctionInfoOffset + kPointerSize;
- static const int kLengthOffset = kFlagOffset + kPointerSize;
- static const int kCachedPropertyNameOffset = kLengthOffset + kPointerSize;
- static const int kSize = kCachedPropertyNameOffset + kPointerSize;
+ // Layout description.
+#define FUNCTION_TEMPLATE_INFO_FIELDS(V) \
+ V(kCallCodeOffset, kTaggedSize) \
+ V(kClassNameOffset, kTaggedSize) \
+ V(kSignatureOffset, kTaggedSize) \
+ V(kFunctionTemplateRareDataOffset, kTaggedSize) \
+ V(kSharedFunctionInfoOffset, kTaggedSize) \
+ V(kFlagOffset, kTaggedSize) \
+ V(kLengthOffset, kTaggedSize) \
+ V(kCachedPropertyNameOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
+ FUNCTION_TEMPLATE_INFO_FIELDS)
+#undef FUNCTION_TEMPLATE_INFO_FIELDS
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name);
- // Returns parent function template or null.
- inline FunctionTemplateInfo* GetParent(Isolate* isolate);
+ // Returns parent function template or a null FunctionTemplateInfo.
+ inline FunctionTemplateInfo GetParent(Isolate* isolate);
// Returns true if |object| is an instance of this function template.
- inline bool IsTemplateFor(JSObject* object);
- bool IsTemplateFor(Map* map);
+ inline bool IsTemplateFor(JSObject object);
+ bool IsTemplateFor(Map map);
inline bool instantiated();
inline bool BreakAtEntry();
@@ -175,6 +225,12 @@ class FunctionTemplateInfo : public TemplateInfo {
Handle<Object> getter);
private:
+ static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
+
+ static FunctionTemplateRareData AllocateFunctionTemplateRareData(
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info);
+
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
static const int kUndetectableBit = 1;
@@ -184,7 +240,7 @@ class FunctionTemplateInfo : public TemplateInfo {
static const int kDoNotCacheBit = 5;
static const int kAcceptAnyReceiver = 6;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
+ OBJECT_CONSTRUCTORS(FunctionTemplateInfo, TemplateInfo);
};
class ObjectTemplateInfo : public TemplateInfo {
@@ -200,19 +256,28 @@ class ObjectTemplateInfo : public TemplateInfo {
DECL_PRINTER(ObjectTemplateInfo)
DECL_VERIFIER(ObjectTemplateInfo)
- static const int kConstructorOffset = TemplateInfo::kHeaderSize;
- // LSB is for immutable_proto, higher bits for embedder_field_count
- static const int kDataOffset = kConstructorOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+ // Layout description.
+#define OBJECT_TEMPLATE_INFO_FIELDS(V) \
+ V(kConstructorOffset, kTaggedSize) \
+ /* LSB is for immutable_proto, higher bits for embedder_field_count */ \
+ V(kDataOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
+ OBJECT_TEMPLATE_INFO_FIELDS)
+#undef OBJECT_TEMPLATE_INFO_FIELDS
// Starting from given object template's constructor walk up the inheritance
// chain till a function template that has an instance template is found.
- inline ObjectTemplateInfo* GetParent(Isolate* isolate);
+ inline ObjectTemplateInfo GetParent(Isolate* isolate);
private:
class IsImmutablePrototype : public BitField<bool, 0, 1> {};
class EmbedderFieldCount
: public BitField<int, IsImmutablePrototype::kNext, 29> {};
+
+ OBJECT_CONSTRUCTORS(ObjectTemplateInfo, TemplateInfo)
};
} // namespace internal
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index d6293c2228..1ec908f4b0 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -10,6 +10,7 @@
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/source-position.h"
+#include "src/wasm/function-compiler.h"
namespace v8 {
namespace internal {
@@ -18,6 +19,8 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
+ DCHECK(shared->is_compiled());
+ bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
@@ -77,9 +80,11 @@ void OptimizedCompilationInfo::ConfigureFlags() {
MarkAsSourcePositionsEnabled();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
break;
- default:
+ case Code::WASM_FUNCTION:
SetFlag(kSwitchJumpTableEnabled);
break;
+ default:
+ break;
}
}
@@ -105,6 +110,9 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate);
}
+ if (!bytecode_array_.is_null()) {
+ bytecode_array_ = Handle<BytecodeArray>(*bytecode_array_, isolate);
+ }
if (!closure_.is_null()) {
closure_ = Handle<JSFunction>(*closure_, isolate);
}
@@ -142,34 +150,49 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
}
}
+void OptimizedCompilationInfo::SetWasmCompilationResult(
+ std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result) {
+ wasm_compilation_result_ = std::move(wasm_compilation_result);
+}
+
+std::unique_ptr<wasm::WasmCompilationResult>
+OptimizedCompilationInfo::ReleaseWasmCompilationResult() {
+ return std::move(wasm_compilation_result_);
+}
+
bool OptimizedCompilationInfo::has_context() const {
return !closure().is_null();
}
-Context* OptimizedCompilationInfo::context() const {
- return has_context() ? closure()->context() : nullptr;
+Context OptimizedCompilationInfo::context() const {
+ DCHECK(has_context());
+ return closure()->context();
}
bool OptimizedCompilationInfo::has_native_context() const {
- return !closure().is_null() && (closure()->native_context() != nullptr);
+ return !closure().is_null() && !closure()->native_context().is_null();
}
-Context* OptimizedCompilationInfo::native_context() const {
- return has_native_context() ? closure()->native_context() : nullptr;
+Context OptimizedCompilationInfo::native_context() const {
+ DCHECK(has_native_context());
+ return closure()->native_context();
}
bool OptimizedCompilationInfo::has_global_object() const {
return has_native_context();
}
-JSGlobalObject* OptimizedCompilationInfo::global_object() const {
- return has_global_object() ? native_context()->global_object() : nullptr;
+JSGlobalObject OptimizedCompilationInfo::global_object() const {
+ DCHECK(has_global_object());
+ return native_context()->global_object();
}
int OptimizedCompilationInfo::AddInlinedFunction(
- Handle<SharedFunctionInfo> inlined_function, SourcePosition pos) {
+ Handle<SharedFunctionInfo> inlined_function,
+ Handle<BytecodeArray> inlined_bytecode, SourcePosition pos) {
int id = static_cast<int>(inlined_functions_.size());
- inlined_functions_.push_back(InlinedFunctionHolder(inlined_function, pos));
+ inlined_functions_.push_back(
+ InlinedFunctionHolder(inlined_function, inlined_bytecode, pos));
return id;
}
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index 37232a2f06..69376bc72c 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -8,8 +8,6 @@
#include <memory>
#include "src/bailout-reason.h"
-#include "src/code-reference.h"
-#include "src/feedback-vector.h"
#include "src/frames.h"
#include "src/globals.h"
#include "src/handles.h"
@@ -21,17 +19,17 @@
namespace v8 {
namespace internal {
-class CoverageInfo;
-class DeclarationScope;
class DeferredHandles;
class FunctionLiteral;
class Isolate;
class JavaScriptFrame;
class JSGlobalObject;
-class ParseInfo;
-class SourceRangeMap;
class Zone;
+namespace wasm {
+struct WasmCompilationResult;
+}
+
// OptimizedCompilationInfo encapsulates the information needed to compile
// optimized code for a given function, and the results of the optimized
// compilation.
@@ -74,15 +72,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool is_osr() const { return !osr_offset_.IsNone(); }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool has_shared_info() const { return !shared_info().is_null(); }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+ bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<JSFunction> closure() const { return closure_; }
- Handle<Code> code() const { return code_.as_js_code(); }
-
- wasm::WasmCode* wasm_code() const {
- return const_cast<wasm::WasmCode*>(code_.as_wasm_code());
- }
+ Handle<Code> code() const { return code_; }
Code::Kind code_kind() const { return code_kind_; }
- uint32_t stub_key() const { return stub_key_; }
- void set_stub_key(uint32_t stub_key) { stub_key_ = stub_key; }
int32_t builtin_index() const { return builtin_index_; }
void set_builtin_index(int32_t index) { builtin_index_ = index; }
BailoutId osr_offset() const { return osr_offset_; }
@@ -182,24 +176,24 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Code getters and setters.
- template <typename T>
- void SetCode(T code) {
- code_ = CodeReference(code);
- }
+ void SetCode(Handle<Code> code) { code_ = code; }
+
+ void SetWasmCompilationResult(std::unique_ptr<wasm::WasmCompilationResult>);
+ std::unique_ptr<wasm::WasmCompilationResult> ReleaseWasmCompilationResult();
bool has_context() const;
- Context* context() const;
+ Context context() const;
bool has_native_context() const;
- Context* native_context() const;
+ Context native_context() const;
bool has_global_object() const;
- JSGlobalObject* global_object() const;
+ JSGlobalObject global_object() const;
// Accessors for the different compilation modes.
bool IsOptimizing() const { return code_kind() == Code::OPTIMIZED_FUNCTION; }
bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
- bool IsStub() const {
+ bool IsNotOptimizedFunctionOrWasmFunction() const {
return code_kind() != Code::OPTIMIZED_FUNCTION &&
code_kind() != Code::WASM_FUNCTION;
}
@@ -238,12 +232,14 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
struct InlinedFunctionHolder {
Handle<SharedFunctionInfo> shared_info;
+ Handle<BytecodeArray> bytecode_array;
InliningPosition position;
InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
+ Handle<BytecodeArray> inlined_bytecode,
SourcePosition pos)
- : shared_info(inlined_shared_info) {
+ : shared_info(inlined_shared_info), bytecode_array(inlined_bytecode) {
position.position = pos;
// initialized when generating the deoptimization literals
position.inlined_function_id = DeoptimizationData::kNotInlinedIndex;
@@ -259,6 +255,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Returns the inlining id for source position tracking.
int AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function,
+ Handle<BytecodeArray> inlined_bytecode,
SourcePosition pos);
std::unique_ptr<char[]> GetDebugName() const;
@@ -288,15 +285,21 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
PoisoningMitigationLevel::kDontPoison;
Code::Kind code_kind_;
- uint32_t stub_key_ = 0;
int32_t builtin_index_ = -1;
+ // We retain a reference the bytecode array specifically to ensure it doesn't
+ // get flushed while we are optimizing the code.
+ Handle<BytecodeArray> bytecode_array_;
+
Handle<SharedFunctionInfo> shared_info_;
Handle<JSFunction> closure_;
// The compiled code.
- CodeReference code_;
+ Handle<Code> code_;
+
+ // The WebAssembly compilation result, not published in the NativeModule yet.
+ std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
// Entry point when compiling for OSR, {BailoutId::None} otherwise.
BailoutId osr_offset_ = BailoutId::None();
diff --git a/deps/v8/src/parsing/duplicate-finder.h b/deps/v8/src/parsing/duplicate-finder.h
deleted file mode 100644
index 65bcc4e00d..0000000000
--- a/deps/v8/src/parsing/duplicate-finder.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_DUPLICATE_FINDER_H_
-#define V8_PARSING_DUPLICATE_FINDER_H_
-
-#include <set>
-
-namespace v8 {
-namespace internal {
-
-class Scanner;
-
-// DuplicateFinder : Helper class to discover duplicate symbols.
-//
-// Allocate a DuplicateFinder for each set of symbols you want to check
-// for duplicates and then pass this instance into
-// Scanner::IsDuplicateSymbol(..).
-//
-// This class only holds the data; all actual logic is in
-// Scanner::IsDuplicateSymbol.
-class DuplicateFinder {
- public:
- DuplicateFinder() = default;
-
- private:
- friend class Scanner;
-
- std::set<const void*> known_symbols_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_DUPLICATE_FINDER_H_
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
deleted file mode 100644
index 2eed75b939..0000000000
--- a/deps/v8/src/parsing/expression-classifier.h
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H_
-#define V8_PARSING_EXPRESSION_CLASSIFIER_H_
-
-#include <type_traits>
-
-#include "src/messages.h"
-#include "src/parsing/scanner.h"
-
-namespace v8 {
-namespace internal {
-
-class DuplicateFinder;
-
-#define ERROR_CODES(T) \
- T(ExpressionProduction, 0) \
- T(FormalParameterInitializerProduction, 1) \
- T(BindingPatternProduction, 2) \
- T(AssignmentPatternProduction, 3) \
- T(DistinctFormalParametersProduction, 4) \
- T(StrictModeFormalParametersProduction, 5) \
- T(ArrowFormalParametersProduction, 6) \
- T(LetPatternProduction, 7) \
- T(AsyncArrowFormalParametersProduction, 8)
-
-// Expression classifiers serve two purposes:
-//
-// 1) They keep track of error messages that are pending (and other
-// related information), waiting for the parser to decide whether
-// the parsed expression is a pattern or not.
-// 2) They keep track of expressions that may need to be rewritten, if
-// the parser decides that they are not patterns. (A different
-// mechanism implements the rewriting of patterns.)
-//
-// Expression classifiers are used by the parser in a stack fashion.
-// Each new classifier is pushed on top of the stack. This happens
-// automatically by the class's constructor. While on top of the
-// stack, the classifier records pending error messages and tracks the
-// pending non-patterns of the expression that is being parsed.
-//
-// At the end of its life, a classifier is either "accumulated" to the
-// one that is below it on the stack, or is "discarded". The former
-// is achieved by calling the method Accumulate. The latter is
-// achieved automatically by the destructor, but it can happen earlier
-// by calling the method Discard. Both actions result in removing the
-// classifier from the parser's stack.
-
-// Expression classifier is split into four parts. The base implementing the
-// general expression classifier logic. Two parts that implement the error
-// tracking interface, where one is the actual implementation and the other is
-// an empty class providing only the interface without logic. The expression
-// classifier class then combines the other parts and provides the full
-// expression classifier interface by inheriting conditionally, controlled by
-// Types::ExpressionClassifierReportErrors, either from the ErrorTracker or the
-// EmptyErrorTracker.
-//
-// Base
-// / \
-// / \
-// / \
-// / \
-// ErrorTracker EmptyErrorTracker
-// \ /
-// \ /
-// \ /
-// \ /
-// ExpressionClassifier
-
-template <typename Types>
-class ExpressionClassifier;
-
-template <typename Types, typename ErrorTracker>
-class ExpressionClassifierBase {
- public:
- enum ErrorKind : unsigned {
-#define DEFINE_ERROR_KIND(NAME, CODE) k##NAME = CODE,
- ERROR_CODES(DEFINE_ERROR_KIND)
-#undef DEFINE_ERROR_KIND
- kUnusedError = 15 // Larger than error codes; should fit in 4 bits
- };
-
- struct Error {
- V8_INLINE Error()
- : location(Scanner::Location::invalid()),
- message(MessageTemplate::kNone),
- kind(kUnusedError),
- arg(nullptr) {}
- V8_INLINE explicit Error(Scanner::Location loc,
- MessageTemplate::Template msg, ErrorKind k,
- const char* a = nullptr)
- : location(loc), message(msg), kind(k), arg(a) {}
-
- Scanner::Location location;
- MessageTemplate::Template message : 28;
- unsigned kind : 4;
- const char* arg;
- };
-
- // clang-format off
- enum TargetProduction : unsigned {
-#define DEFINE_PRODUCTION(NAME, CODE) NAME = 1 << CODE,
- ERROR_CODES(DEFINE_PRODUCTION)
-#undef DEFINE_PRODUCTION
-
-#define DEFINE_ALL_PRODUCTIONS(NAME, CODE) NAME |
- AllProductions = ERROR_CODES(DEFINE_ALL_PRODUCTIONS) /* | */ 0
-#undef DEFINE_ALL_PRODUCTIONS
- };
- // clang-format on
-
- explicit ExpressionClassifierBase(typename Types::Base* base,
- DuplicateFinder* duplicate_finder = nullptr)
- : base_(base),
- duplicate_finder_(duplicate_finder),
- invalid_productions_(0),
- is_non_simple_parameter_list_(0) {}
-
- virtual ~ExpressionClassifierBase() = default;
-
- V8_INLINE bool is_valid(unsigned productions) const {
- return (invalid_productions_ & productions) == 0;
- }
-
- V8_INLINE DuplicateFinder* duplicate_finder() const {
- return duplicate_finder_;
- }
-
- V8_INLINE bool is_valid_expression() const {
- return is_valid(ExpressionProduction);
- }
-
- V8_INLINE bool is_valid_formal_parameter_initializer() const {
- return is_valid(FormalParameterInitializerProduction);
- }
-
- V8_INLINE bool is_valid_binding_pattern() const {
- return is_valid(BindingPatternProduction);
- }
-
- V8_INLINE bool is_valid_assignment_pattern() const {
- return is_valid(AssignmentPatternProduction);
- }
-
- V8_INLINE bool is_valid_arrow_formal_parameters() const {
- return is_valid(ArrowFormalParametersProduction);
- }
-
- V8_INLINE bool is_valid_formal_parameter_list_without_duplicates() const {
- return is_valid(DistinctFormalParametersProduction);
- }
-
- // Note: callers should also check
- // is_valid_formal_parameter_list_without_duplicates().
- V8_INLINE bool is_valid_strict_mode_formal_parameters() const {
- return is_valid(StrictModeFormalParametersProduction);
- }
-
- V8_INLINE bool is_valid_let_pattern() const {
- return is_valid(LetPatternProduction);
- }
-
- bool is_valid_async_arrow_formal_parameters() const {
- return is_valid(AsyncArrowFormalParametersProduction);
- }
-
- V8_INLINE bool is_simple_parameter_list() const {
- return !is_non_simple_parameter_list_;
- }
-
- V8_INLINE void RecordNonSimpleParameter() {
- is_non_simple_parameter_list_ = 1;
- }
-
- V8_INLINE void Accumulate(ExpressionClassifier<Types>* const inner,
- unsigned productions) {
-#ifdef DEBUG
- static_cast<ErrorTracker*>(this)->CheckErrorPositions(inner);
-#endif
- // Propagate errors from inner, but don't overwrite already recorded
- // errors.
- unsigned non_arrow_inner_invalid_productions =
- inner->invalid_productions_ & ~ArrowFormalParametersProduction;
- if (non_arrow_inner_invalid_productions) {
- unsigned errors = non_arrow_inner_invalid_productions & productions &
- ~this->invalid_productions_;
- // The result will continue to be a valid arrow formal parameters if the
- // inner expression is a valid binding pattern.
- bool copy_BP_to_AFP = false;
- if (productions & ArrowFormalParametersProduction &&
- this->is_valid_arrow_formal_parameters()) {
- // Also whether we've seen any non-simple parameters
- // if expecting an arrow function parameter.
- this->is_non_simple_parameter_list_ |=
- inner->is_non_simple_parameter_list_;
- if (!inner->is_valid_binding_pattern()) {
- copy_BP_to_AFP = true;
- this->invalid_productions_ |= ArrowFormalParametersProduction;
- }
- }
- if (errors != 0 || copy_BP_to_AFP) {
- this->invalid_productions_ |= errors;
- static_cast<ErrorTracker*>(this)->AccumulateErrorImpl(
- inner, productions, errors, copy_BP_to_AFP);
- }
- }
- static_cast<ErrorTracker*>(this)->RewindErrors(inner);
- }
-
- protected:
- typename Types::Base* base_;
- DuplicateFinder* duplicate_finder_;
- unsigned invalid_productions_ : kUnusedError;
- STATIC_ASSERT(kUnusedError <= 15);
- unsigned is_non_simple_parameter_list_ : 1;
-};
-
-template <typename Types>
-class ExpressionClassifierErrorTracker
- : public ExpressionClassifierBase<Types,
- ExpressionClassifierErrorTracker<Types>> {
- public:
- using BaseClassType =
- ExpressionClassifierBase<Types, ExpressionClassifierErrorTracker<Types>>;
- using typename BaseClassType::Error;
- using typename BaseClassType::ErrorKind;
- using TP = typename BaseClassType::TargetProduction;
-
- ExpressionClassifierErrorTracker(typename Types::Base* base,
- DuplicateFinder* duplicate_finder)
- : BaseClassType(base, duplicate_finder),
- reported_errors_(base->impl()->GetReportedErrorList()) {
- reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
- }
-
- ~ExpressionClassifierErrorTracker() override { Discard(); }
-
- V8_INLINE void Discard() {
- if (reported_errors_end_ == reported_errors_->length()) {
- reported_errors_->Rewind(reported_errors_begin_);
- reported_errors_end_ = reported_errors_begin_;
- }
- DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
- }
-
- protected:
- V8_INLINE const Error& reported_error(ErrorKind kind) const {
- if (this->invalid_productions_ & (1 << kind)) {
- for (int i = reported_errors_begin_; i < reported_errors_end_; i++) {
- if (reported_errors_->at(i).kind == kind)
- return reported_errors_->at(i);
- }
- UNREACHABLE();
- }
- // We should only be looking for an error when we know that one has
- // been reported. But we're not... So this is to make sure we have
- // the same behaviour.
- UNREACHABLE();
-
- // Make MSVC happy by returning an error from this inaccessible path.
- static Error none;
- return none;
- }
-
- // Adds e to the end of the list of reported errors for this classifier.
- // It is expected that this classifier is the last one in the stack.
- V8_INLINE void Add(const Error& e) {
- DCHECK_EQ(reported_errors_end_, reported_errors_->length());
- reported_errors_->Add(e, this->base_->impl()->zone());
- reported_errors_end_++;
- }
-
- // Copies the error at position i of the list of reported errors, so that
- // it becomes the last error reported for this classifier. Position i
- // could be either after the existing errors of this classifier (i.e.,
- // in an inner classifier) or it could be an existing error (in case a
- // copy is needed).
- V8_INLINE void Copy(int i) {
- DCHECK_LT(i, reported_errors_->length());
- if (reported_errors_end_ != i)
- reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
- reported_errors_end_++;
- }
-
- private:
-#ifdef DEBUG
- V8_INLINE void CheckErrorPositions(ExpressionClassifier<Types>* const inner) {
- DCHECK_EQ(inner->reported_errors_, this->reported_errors_);
- DCHECK_EQ(inner->reported_errors_begin_, this->reported_errors_end_);
- DCHECK_EQ(inner->reported_errors_end_, this->reported_errors_->length());
- }
-#endif
-
- V8_INLINE void RewindErrors(ExpressionClassifier<Types>* const inner) {
- this->reported_errors_->Rewind(this->reported_errors_end_);
- inner->reported_errors_begin_ = inner->reported_errors_end_ =
- this->reported_errors_end_;
- }
-
- void AccumulateErrorImpl(ExpressionClassifier<Types>* const inner,
- unsigned productions, unsigned errors,
- bool copy_BP_to_AFP) {
- // Traverse the list of errors reported by the inner classifier
- // to copy what's necessary.
- int binding_pattern_index = inner->reported_errors_end_;
- for (int i = inner->reported_errors_begin_; i < inner->reported_errors_end_;
- i++) {
- int k = this->reported_errors_->at(i).kind;
- if (errors & (1 << k)) this->Copy(i);
- // Check if it's a BP error that has to be copied to an AFP error.
- if (k == ErrorKind::kBindingPatternProduction && copy_BP_to_AFP) {
- if (this->reported_errors_end_ <= i) {
- // If the BP error itself has not already been copied,
- // copy it now and change it to an AFP error.
- this->Copy(i);
- this->reported_errors_->at(this->reported_errors_end_ - 1).kind =
- ErrorKind::kArrowFormalParametersProduction;
- } else {
- // Otherwise, if the BP error was already copied, keep its
- // position and wait until the end of the traversal.
- DCHECK_EQ(this->reported_errors_end_, i + 1);
- binding_pattern_index = i;
- }
- }
- }
- // Do we still have to copy the BP error to an AFP error?
- if (binding_pattern_index < inner->reported_errors_end_) {
- // If there's still unused space in the list of the inner
- // classifier, copy it there, otherwise add it to the end
- // of the list.
- if (this->reported_errors_end_ < inner->reported_errors_end_)
- this->Copy(binding_pattern_index);
- else
- Add(this->reported_errors_->at(binding_pattern_index));
- this->reported_errors_->at(this->reported_errors_end_ - 1).kind =
- ErrorKind::kArrowFormalParametersProduction;
- }
- }
-
- private:
- ZoneList<Error>* reported_errors_;
- // The uint16_t for reported_errors_begin_ and reported_errors_end_ will
- // not be enough in the case of a long series of expressions using nested
- // classifiers, e.g., a long sequence of assignments, as in:
- // literals with spreads, as in:
- // var N=65536; eval("var x;" + "x=".repeat(N) + "42");
- // This should not be a problem, as such things currently fail with a
- // stack overflow while parsing.
- uint16_t reported_errors_begin_;
- uint16_t reported_errors_end_;
-
- friend BaseClassType;
-};
-
-template <typename Types>
-class ExpressionClassifierEmptyErrorTracker
- : public ExpressionClassifierBase<
- Types, ExpressionClassifierEmptyErrorTracker<Types>> {
- public:
- using BaseClassType =
- ExpressionClassifierBase<Types,
- ExpressionClassifierEmptyErrorTracker<Types>>;
- using typename BaseClassType::Error;
- using typename BaseClassType::ErrorKind;
- using TP = typename BaseClassType::TargetProduction;
-
- ExpressionClassifierEmptyErrorTracker(typename Types::Base* base,
- DuplicateFinder* duplicate_finder)
- : BaseClassType(base, duplicate_finder) {}
-
- V8_INLINE void Discard() {}
-
- protected:
- V8_INLINE const Error& reported_error(ErrorKind kind) const {
- static Error none;
- return none;
- }
-
- V8_INLINE void Add(const Error& e) {}
-
- private:
-#ifdef DEBUG
- V8_INLINE void CheckErrorPositions(ExpressionClassifier<Types>* const inner) {
- }
-#endif
- V8_INLINE void RewindErrors(ExpressionClassifier<Types>* const inner) {}
- V8_INLINE void AccumulateErrorImpl(ExpressionClassifier<Types>* const inner,
- unsigned productions, unsigned errors,
- bool copy_BP_to_AFP) {}
-
- friend BaseClassType;
-};
-
-template <typename Types>
-class ExpressionClassifier
- : public std::conditional<
- Types::ExpressionClassifierReportErrors,
- ExpressionClassifierErrorTracker<Types>,
- ExpressionClassifierEmptyErrorTracker<Types>>::type {
- static constexpr bool ReportErrors = Types::ExpressionClassifierReportErrors;
-
- public:
- using BaseClassType = typename std::conditional<
- Types::ExpressionClassifierReportErrors,
- typename ExpressionClassifierErrorTracker<Types>::BaseClassType,
- typename ExpressionClassifierEmptyErrorTracker<Types>::BaseClassType>::
- type;
- using typename BaseClassType::Error;
- using typename BaseClassType::ErrorKind;
- using TP = typename BaseClassType::TargetProduction;
-
- explicit ExpressionClassifier(typename Types::Base* base,
- DuplicateFinder* duplicate_finder = nullptr)
- : std::conditional<Types::ExpressionClassifierReportErrors,
- ExpressionClassifierErrorTracker<Types>,
- ExpressionClassifierEmptyErrorTracker<Types>>::
- type(base, duplicate_finder),
- previous_(base->classifier_) {
- base->classifier_ = this;
- }
-
- V8_INLINE ~ExpressionClassifier() override {
- if (this->base_->classifier_ == this) this->base_->classifier_ = previous_;
- }
-
- V8_INLINE const Error& expression_error() const {
- return this->reported_error(ErrorKind::kExpressionProduction);
- }
-
- V8_INLINE const Error& formal_parameter_initializer_error() const {
- return this->reported_error(
- ErrorKind::kFormalParameterInitializerProduction);
- }
-
- V8_INLINE const Error& binding_pattern_error() const {
- return this->reported_error(ErrorKind::kBindingPatternProduction);
- }
-
- V8_INLINE const Error& assignment_pattern_error() const {
- return this->reported_error(ErrorKind::kAssignmentPatternProduction);
- }
-
- V8_INLINE const Error& arrow_formal_parameters_error() const {
- return this->reported_error(ErrorKind::kArrowFormalParametersProduction);
- }
-
- V8_INLINE const Error& duplicate_formal_parameter_error() const {
- return this->reported_error(ErrorKind::kDistinctFormalParametersProduction);
- }
-
- V8_INLINE const Error& strict_mode_formal_parameter_error() const {
- return this->reported_error(
- ErrorKind::kStrictModeFormalParametersProduction);
- }
-
- V8_INLINE const Error& let_pattern_error() const {
- return this->reported_error(ErrorKind::kLetPatternProduction);
- }
-
- V8_INLINE const Error& async_arrow_formal_parameters_error() const {
- return this->reported_error(
- ErrorKind::kAsyncArrowFormalParametersProduction);
- }
-
- V8_INLINE bool does_error_reporting() { return ReportErrors; }
-
- void RecordExpressionError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_expression()) return;
- this->invalid_productions_ |= TP::ExpressionProduction;
- this->Add(Error(loc, message, ErrorKind::kExpressionProduction, arg));
- }
-
- void RecordFormalParameterInitializerError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_formal_parameter_initializer()) return;
- this->invalid_productions_ |= TP::FormalParameterInitializerProduction;
- this->Add(Error(loc, message,
- ErrorKind::kFormalParameterInitializerProduction, arg));
- }
-
- void RecordBindingPatternError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_binding_pattern()) return;
- this->invalid_productions_ |= TP::BindingPatternProduction;
- this->Add(Error(loc, message, ErrorKind::kBindingPatternProduction, arg));
- }
-
- void RecordAssignmentPatternError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_assignment_pattern()) return;
- this->invalid_productions_ |= TP::AssignmentPatternProduction;
- this->Add(
- Error(loc, message, ErrorKind::kAssignmentPatternProduction, arg));
- }
-
- void RecordPatternError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- RecordBindingPatternError(loc, message, arg);
- RecordAssignmentPatternError(loc, message, arg);
- }
-
- void RecordArrowFormalParametersError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_arrow_formal_parameters()) return;
- this->invalid_productions_ |= TP::ArrowFormalParametersProduction;
- this->Add(
- Error(loc, message, ErrorKind::kArrowFormalParametersProduction, arg));
- }
-
- void RecordAsyncArrowFormalParametersError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_async_arrow_formal_parameters()) return;
- this->invalid_productions_ |= TP::AsyncArrowFormalParametersProduction;
- this->Add(Error(loc, message,
- ErrorKind::kAsyncArrowFormalParametersProduction, arg));
- }
-
- void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
- if (!this->is_valid_formal_parameter_list_without_duplicates()) return;
- this->invalid_productions_ |= TP::DistinctFormalParametersProduction;
- this->Add(Error(loc, MessageTemplate::kParamDupe,
- ErrorKind::kDistinctFormalParametersProduction));
- }
-
- // Record a binding that would be invalid in strict mode. Confusingly this
- // is not the same as StrictFormalParameterList, which simply forbids
- // duplicate bindings.
- void RecordStrictModeFormalParameterError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_strict_mode_formal_parameters()) return;
- this->invalid_productions_ |= TP::StrictModeFormalParametersProduction;
- this->Add(Error(loc, message,
- ErrorKind::kStrictModeFormalParametersProduction, arg));
- }
-
- void RecordLetPatternError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!this->is_valid_let_pattern()) return;
- this->invalid_productions_ |= TP::LetPatternProduction;
- this->Add(Error(loc, message, ErrorKind::kLetPatternProduction, arg));
- }
-
- ExpressionClassifier* previous() const { return previous_; }
-
- private:
- ExpressionClassifier* previous_;
-
- DISALLOW_COPY_AND_ASSIGN(ExpressionClassifier);
-};
-
-#undef ERROR_CODES
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H_
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index 30e96d1688..95e4935c53 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -27,7 +27,6 @@ class Reparenter final : public AstTraversalVisitor<Reparenter> {
void VisitFunctionLiteral(FunctionLiteral* expr);
void VisitClassLiteral(ClassLiteral* expr);
void VisitVariableProxy(VariableProxy* expr);
- void VisitRewritableExpression(RewritableExpression* expr);
void VisitBlock(Block* stmt);
void VisitTryCatchStatement(TryCatchStatement* stmt);
@@ -79,11 +78,6 @@ void Reparenter::VisitVariableProxy(VariableProxy* proxy) {
}
}
-void Reparenter::VisitRewritableExpression(RewritableExpression* expr) {
- Visit(expr->expression());
- expr->set_scope(scope_);
-}
-
void Reparenter::VisitBlock(Block* stmt) {
if (stmt->scope())
stmt->scope()->ReplaceOuterScope(scope_);
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
new file mode 100644
index 0000000000..878cb3cf25
--- /dev/null
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -0,0 +1,710 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_EXPRESSION_SCOPE_H_
+#define V8_PARSING_EXPRESSION_SCOPE_H_
+
+#include "src/ast/scopes.h"
+#include "src/message-template.h"
+#include "src/parsing/scanner.h"
+#include "src/zone/zone.h" // For ScopedPtrList.
+
+namespace v8 {
+namespace internal {
+
+template <typename Types>
+class ExpressionParsingScope;
+template <typename Types>
+class AccumulationScope;
+template <typename Types>
+class ArrowHeadParsingScope;
+template <typename Types>
+class ParameterDeclarationParsingScope;
+template <typename Types>
+class VariableDeclarationParsingScope;
+class VariableProxy;
+
+// ExpressionScope is used in a stack fashion, and is used to specialize
+// expression parsing for the task at hand. It allows the parser to reuse the
+// same code to parse destructuring declarations, assignment patterns,
+// expressions, and (async) arrow function heads.
+//
+// One of the specific subclasses needs to be instantiated to tell the parser
+// the meaning of the expression it will parse next. The parser then calls
+// Record* on the expression_scope() to indicate errors. The expression_scope
+// will either discard those errors, immediately report those errors, or
+// classify the errors for later validation.
+// TODO(verwaest): Record is a slightly odd name since it will directly throw
+// for unambiguous scopes.
+template <typename Types>
+class ExpressionScope {
+ public:
+ typedef typename Types::Impl ParserT;
+ typedef typename Types::Expression ExpressionT;
+
+ VariableProxy* NewVariable(const AstRawString* name,
+ int pos = kNoSourcePosition) {
+ VariableProxy* result = parser_->NewRawVariable(name, pos);
+ if (CanBeExpression()) {
+ AsExpressionParsingScope()->TrackVariable(result);
+ } else if (type_ == kParameterDeclaration) {
+ AsParameterDeclarationParsingScope()->Declare(result);
+ } else {
+ return AsVariableDeclarationParsingScope()->Declare(result);
+ }
+ return result;
+ }
+
+ void MarkIdentifierAsAssigned() {
+ if (!CanBeExpression()) return;
+ AsExpressionParsingScope()->MarkIdentifierAsAssigned();
+ }
+
+ void ValidateAsPattern(ExpressionT expression, int begin, int end) {
+ if (!CanBeExpression()) return;
+ AsExpressionParsingScope()->ValidatePattern(expression, begin, end);
+ AsExpressionParsingScope()->ClearExpressionError();
+ }
+
+ // Record async arrow parameters errors in all ambiguous async arrow scopes in
+ // the chain up to the first unambiguous scope.
+ void RecordAsyncArrowParametersError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ // Only ambiguous scopes (ExpressionParsingScope, *ArrowHeadParsingScope)
+ // need to propagate errors to a possible kAsyncArrowHeadParsingScope, so
+ // immediately return if the current scope is not ambiguous.
+ if (!CanBeExpression()) return;
+ AsExpressionParsingScope()->RecordAsyncArrowParametersError(loc, message);
+ }
+
+ // Record initializer errors in all scopes that can turn into parameter scopes
+ // (ArrowHeadParsingScopes) up to the first known unambiguous parameter scope.
+ void RecordParameterInitializerError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ ExpressionScope* scope = this;
+ while (!scope->IsCertainlyParameterDeclaration()) {
+ if (!has_possible_parameter_in_scope_chain_) return;
+ if (scope->CanBeParameterDeclaration()) {
+ scope->AsArrowHeadParsingScope()->RecordDeclarationError(loc, message);
+ }
+ scope = scope->parent();
+ if (scope == nullptr) return;
+ }
+ Report(loc, message);
+ }
+
+ void RecordPatternError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ // TODO(verwaest): Non-assigning expression?
+ if (IsCertainlyPattern()) {
+ Report(loc, message);
+ } else {
+ AsExpressionParsingScope()->RecordPatternError(loc, message);
+ }
+ }
+
+ void RecordStrictModeParameterError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ DCHECK_IMPLIES(!has_error(), loc.IsValid());
+ if (!CanBeParameterDeclaration()) return;
+ if (IsCertainlyParameterDeclaration()) {
+ if (is_strict(parser_->language_mode())) {
+ Report(loc, message);
+ } else {
+ parser_->parameters_->set_strict_parameter_error(loc, message);
+ }
+ } else {
+ parser_->next_arrow_function_info_.strict_parameter_error_location = loc;
+ parser_->next_arrow_function_info_.strict_parameter_error_message =
+ message;
+ }
+ }
+
+ void RecordDeclarationError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ if (!CanBeDeclaration()) return;
+ if (IsCertainlyDeclaration()) {
+ Report(loc, message);
+ } else {
+ AsArrowHeadParsingScope()->RecordDeclarationError(loc, message);
+ }
+ }
+
+ void RecordExpressionError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ if (!CanBeExpression()) return;
+ // TODO(verwaest): Non-assigning expression?
+ // if (IsCertainlyExpression()) Report(loc, message);
+ AsExpressionParsingScope()->RecordExpressionError(loc, message);
+ }
+
+ void RecordLexicalDeclarationError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ if (IsLexicalDeclaration()) Report(loc, message);
+ }
+
+ void RecordNonSimpleParameter() {
+ if (!IsArrowHeadParsingScope()) return;
+ AsArrowHeadParsingScope()->RecordNonSimpleParameter();
+ }
+
+ protected:
+ enum ScopeType : uint8_t {
+ // Expression or assignment target.
+ kExpression,
+
+ // Declaration or expression or assignment target.
+ kMaybeArrowParameterDeclaration,
+ kMaybeAsyncArrowParameterDeclaration,
+
+ // Declarations.
+ kParameterDeclaration,
+ kVarDeclaration,
+ kLexicalDeclaration,
+ };
+
+ ParserT* parser() const { return parser_; }
+ ExpressionScope* parent() const { return parent_; }
+
+ void Report(const Scanner::Location& loc, MessageTemplate message) const {
+ parser_->ReportMessageAt(loc, message);
+ }
+
+ ExpressionScope(ParserT* parser, ScopeType type)
+ : parser_(parser),
+ parent_(parser->expression_scope_),
+ type_(type),
+ has_possible_parameter_in_scope_chain_(
+ CanBeParameterDeclaration() ||
+ (parent_ && parent_->has_possible_parameter_in_scope_chain_)) {
+ parser->expression_scope_ = this;
+ }
+
+ ~ExpressionScope() {
+ DCHECK(parser_->expression_scope_ == this ||
+ parser_->expression_scope_ == parent_);
+ parser_->expression_scope_ = parent_;
+ }
+
+ ExpressionParsingScope<Types>* AsExpressionParsingScope() {
+ DCHECK(CanBeExpression());
+ return static_cast<ExpressionParsingScope<Types>*>(this);
+ }
+
+#ifdef DEBUG
+ bool has_error() const { return parser_->has_error(); }
+#endif
+
+ bool CanBeExpression() const {
+ return IsInRange(type_, kExpression, kMaybeAsyncArrowParameterDeclaration);
+ }
+ bool CanBeDeclaration() const {
+ return IsInRange(type_, kMaybeArrowParameterDeclaration,
+ kLexicalDeclaration);
+ }
+ bool IsCertainlyDeclaration() const {
+ return IsInRange(type_, kParameterDeclaration, kLexicalDeclaration);
+ }
+ bool IsVariableDeclaration() const {
+ return IsInRange(type_, kVarDeclaration, kLexicalDeclaration);
+ }
+ bool IsLexicalDeclaration() const { return type_ == kLexicalDeclaration; }
+ bool IsAsyncArrowHeadParsingScope() const {
+ return type_ == kMaybeAsyncArrowParameterDeclaration;
+ }
+
+ private:
+ friend class AccumulationScope<Types>;
+ friend class ExpressionParsingScope<Types>;
+
+ ArrowHeadParsingScope<Types>* AsArrowHeadParsingScope() {
+ DCHECK(IsArrowHeadParsingScope());
+ return static_cast<ArrowHeadParsingScope<Types>*>(this);
+ }
+
+ ParameterDeclarationParsingScope<Types>*
+ AsParameterDeclarationParsingScope() {
+ DCHECK(IsCertainlyParameterDeclaration());
+ return static_cast<ParameterDeclarationParsingScope<Types>*>(this);
+ }
+
+ VariableDeclarationParsingScope<Types>* AsVariableDeclarationParsingScope() {
+ DCHECK(IsVariableDeclaration());
+ return static_cast<VariableDeclarationParsingScope<Types>*>(this);
+ }
+
+ bool IsArrowHeadParsingScope() const {
+ return IsInRange(type_, kMaybeArrowParameterDeclaration,
+ kMaybeAsyncArrowParameterDeclaration);
+ }
+ bool IsCertainlyPattern() const { return IsCertainlyDeclaration(); }
+ bool CanBeParameterDeclaration() const {
+ return IsInRange(type_, kMaybeArrowParameterDeclaration,
+ kParameterDeclaration);
+ }
+ bool IsCertainlyParameterDeclaration() const {
+ return type_ == kParameterDeclaration;
+ }
+
+ ParserT* parser_;
+ ExpressionScope<Types>* parent_;
+ ScopeType type_;
+ bool has_possible_parameter_in_scope_chain_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpressionScope);
+};
+
+// Used to unambiguously parse var, let, const declarations.
+template <typename Types>
+class VariableDeclarationParsingScope : public ExpressionScope<Types> {
+ public:
+ typedef typename Types::Impl ParserT;
+ typedef class ExpressionScope<Types> ExpressionScopeT;
+ typedef typename ExpressionScopeT::ScopeType ScopeType;
+
+ VariableDeclarationParsingScope(ParserT* parser, VariableMode mode,
+ ZonePtrList<const AstRawString>* names)
+ : ExpressionScopeT(parser, IsLexicalVariableMode(mode)
+ ? ExpressionScopeT::kLexicalDeclaration
+ : ExpressionScopeT::kVarDeclaration),
+ mode_(mode),
+ names_(names) {}
+
+ VariableProxy* Declare(VariableProxy* proxy) {
+ VariableKind kind = NORMAL_VARIABLE;
+ bool was_added;
+ this->parser()->DeclareVariable(
+ proxy, kind, mode_, Variable::DefaultInitializationFlag(mode_),
+ this->parser()->scope(), &was_added, proxy->position());
+ if (was_added &&
+ this->parser()->scope()->num_var() > kMaxNumFunctionLocals) {
+ this->parser()->ReportMessage(MessageTemplate::kTooManyVariables);
+ }
+ if (names_) names_->Add(proxy->raw_name(), this->parser()->zone());
+ if (this->IsLexicalDeclaration()) {
+ if (this->parser()->IsLet(proxy->raw_name())) {
+ this->parser()->ReportMessageAt(proxy->location(),
+ MessageTemplate::kLetInLexicalBinding);
+ }
+ } else {
+ if (this->parser()->loop_nesting_depth() > 0) {
+ // Due to hoisting, the value of a 'var'-declared variable may actually
+ // change even if the code contains only the "initial" assignment,
+ // namely when that assignment occurs inside a loop. For example:
+ //
+ // let i = 10;
+ // do { var x = i } while (i--):
+ //
+ // Note that non-lexical variables include temporaries, which may also
+ // get assigned inside a loop due to the various rewritings that the
+ // parser performs.
+ //
+ // Pessimistically mark all vars in loops as assigned. This
+ // overapproximates the actual assigned vars due to unassigned var
+ // without initializer, but that's unlikely anyway.
+ //
+ // This also handles marking of loop variables in for-in and for-of
+ // loops, as determined by loop-nesting-depth.
+ proxy->set_is_assigned();
+ }
+
+ // Make sure we'll properly resolve the variable since we might be in a
+ // with or catch scope. In those cases the assignment isn't guaranteed to
+ // write to the variable declared above.
+ if (!this->parser()->scope()->is_declaration_scope()) {
+ proxy =
+ this->parser()->NewUnresolved(proxy->raw_name(), proxy->position());
+ }
+ }
+ return proxy;
+ }
+
+ private:
+ // Limit the allowed number of local variables in a function. The hard limit
+ // in Ignition is 2^31-1 due to the size of register operands. We limit it to
+ // a more reasonable lower up-limit.
+ static const int kMaxNumFunctionLocals = (1 << 23) - 1;
+
+ VariableMode mode_;
+ ZonePtrList<const AstRawString>* names_;
+
+ DISALLOW_COPY_AND_ASSIGN(VariableDeclarationParsingScope);
+};
+
+template <typename Types>
+class ParameterDeclarationParsingScope : public ExpressionScope<Types> {
+ public:
+ typedef typename Types::Impl ParserT;
+ typedef class ExpressionScope<Types> ExpressionScopeT;
+ typedef typename ExpressionScopeT::ScopeType ScopeType;
+
+ explicit ParameterDeclarationParsingScope(ParserT* parser)
+ : ExpressionScopeT(parser, ExpressionScopeT::kParameterDeclaration) {}
+
+ void Declare(VariableProxy* proxy) {
+ VariableKind kind = PARAMETER_VARIABLE;
+ VariableMode mode = VariableMode::kVar;
+ bool was_added;
+ this->parser()->DeclareVariable(
+ proxy, kind, mode, Variable::DefaultInitializationFlag(mode),
+ this->parser()->scope(), &was_added, proxy->position());
+ if (!has_duplicate() && !was_added) {
+ duplicate_loc_ = proxy->location();
+ }
+ }
+
+ bool has_duplicate() const { return duplicate_loc_.IsValid(); }
+
+ const Scanner::Location& duplicate_location() const { return duplicate_loc_; }
+
+ private:
+ Scanner::Location duplicate_loc_ = Scanner::Location::invalid();
+ DISALLOW_COPY_AND_ASSIGN(ParameterDeclarationParsingScope);
+};
+
+// Parsing expressions is always ambiguous between at least left-hand-side and
+// right-hand-side of assignments. This class is used to keep track of errors
+// relevant for either side until it is clear what was being parsed.
+// The class also keeps track of all variable proxies that are created while the
+// scope was active. If the scope is an expression, the variable proxies will be
+// added to the unresolved list. Otherwise they are declarations and aren't
+// added. The list is also used to mark the variables as assigned in case we are
+// parsing an assignment expression.
+template <typename Types>
+class ExpressionParsingScope : public ExpressionScope<Types> {
+ public:
+ typedef typename Types::Impl ParserT;
+ typedef typename Types::Expression ExpressionT;
+ typedef class ExpressionScope<Types> ExpressionScopeT;
+ typedef typename ExpressionScopeT::ScopeType ScopeType;
+
+ ExpressionParsingScope(ParserT* parser,
+ ScopeType type = ExpressionScopeT::kExpression)
+ : ExpressionScopeT(parser, type),
+ variable_list_(parser->variable_buffer()),
+ has_async_arrow_in_scope_chain_(
+ type == ExpressionScopeT::kMaybeAsyncArrowParameterDeclaration ||
+ (this->parent() && this->parent()->CanBeExpression() &&
+ this->parent()
+ ->AsExpressionParsingScope()
+ ->has_async_arrow_in_scope_chain_)) {
+ DCHECK(this->CanBeExpression());
+ clear(kExpressionIndex);
+ clear(kPatternIndex);
+ }
+
+ void RecordAsyncArrowParametersError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ for (ExpressionScopeT* scope = this; scope != nullptr;
+ scope = scope->parent()) {
+ if (!has_async_arrow_in_scope_chain_) break;
+ if (scope->type_ ==
+ ExpressionScopeT::kMaybeAsyncArrowParameterDeclaration) {
+ scope->AsArrowHeadParsingScope()->RecordDeclarationError(loc, message);
+ }
+ }
+ }
+
+ ~ExpressionParsingScope() { DCHECK(this->has_error() || verified_); }
+
+ ExpressionT ValidateAndRewriteReference(ExpressionT expression, int beg_pos,
+ int end_pos) {
+ if (V8_LIKELY(this->parser()->IsAssignableIdentifier(expression))) {
+ MarkIdentifierAsAssigned();
+ this->mark_verified();
+ return expression;
+ } else if (V8_LIKELY(expression->IsProperty())) {
+ ValidateExpression();
+ return expression;
+ }
+ this->mark_verified();
+ return this->parser()->RewriteInvalidReferenceExpression(
+ expression, beg_pos, end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError);
+ }
+
+ void RecordExpressionError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ Record(kExpressionIndex, loc, message);
+ }
+
+ void RecordPatternError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ Record(kPatternIndex, loc, message);
+ }
+
+ void ValidateExpression() { Validate(kExpressionIndex); }
+
+ void ValidatePattern(ExpressionT expression, int begin, int end) {
+ Validate(kPatternIndex);
+ if (expression->is_parenthesized()) {
+ ExpressionScopeT::Report(Scanner::Location(begin, end),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+ for (int i = 0; i < variable_list_.length(); i++) {
+ variable_list_.at(i)->set_is_assigned();
+ }
+ }
+
+ void ClearExpressionError() {
+ DCHECK(verified_);
+#ifdef DEBUG
+ verified_ = false;
+#endif
+ clear(kExpressionIndex);
+ }
+
+ void TrackVariable(VariableProxy* variable) {
+ if (!this->CanBeDeclaration()) {
+ this->parser()->scope()->AddUnresolved(variable);
+ }
+ variable_list_.Add(variable);
+ }
+
+ void MarkIdentifierAsAssigned() {
+ // It's possible we're parsing a syntax error. In that case it's not
+ // guaranteed that there's a variable in the list.
+ if (variable_list_.length() == 0) return;
+ variable_list_.at(variable_list_.length() - 1)->set_is_assigned();
+ }
+
+ protected:
+ bool is_verified() const {
+#ifdef DEBUG
+ return verified_;
+#else
+ return false;
+#endif
+ }
+
+ void ValidatePattern() { Validate(kPatternIndex); }
+
+ ScopedPtrList<VariableProxy>* variable_list() { return &variable_list_; }
+
+ private:
+ friend class AccumulationScope<Types>;
+
+ enum ErrorNumber : uint8_t {
+ kExpressionIndex = 0,
+ kPatternIndex = 1,
+ kNumberOfErrors = 2,
+ };
+ void clear(int index) {
+ messages_[index] = MessageTemplate::kNone;
+ locations_[index] = Scanner::Location::invalid();
+ }
+ bool is_valid(int index) const { return !locations_[index].IsValid(); }
+ void Record(int index, const Scanner::Location& loc,
+ MessageTemplate message) {
+ DCHECK_IMPLIES(!this->has_error(), loc.IsValid());
+ if (!is_valid(index)) return;
+ messages_[index] = message;
+ locations_[index] = loc;
+ }
+ void Validate(int index) {
+ DCHECK(!this->is_verified());
+ if (!is_valid(index)) Report(index);
+ this->mark_verified();
+ }
+ void Report(int index) const {
+ ExpressionScopeT::Report(locations_[index], messages_[index]);
+ }
+
+ // Debug verification to make sure every scope is validated exactly once.
+ void mark_verified() {
+#ifdef DEBUG
+ verified_ = true;
+#endif
+ }
+ void clear_verified() {
+#ifdef DEBUG
+ verified_ = false;
+#endif
+ }
+#ifdef DEBUG
+ bool verified_ = false;
+#endif
+
+ ScopedPtrList<VariableProxy> variable_list_;
+ MessageTemplate messages_[kNumberOfErrors];
+ Scanner::Location locations_[kNumberOfErrors];
+ bool has_async_arrow_in_scope_chain_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpressionParsingScope);
+};
+
+// This class is used to parse multiple ambiguous expressions and declarations
+// in the same scope. E.g., in async(X,Y,Z) or [X,Y,Z], X and Y and Z will all
+// be parsed in the respective outer ArrowHeadParsingScope and
+// ExpressionParsingScope. It provides a clean error state in the underlying
+// scope to parse the individual expressions, while keeping track of the
+// expression and pattern errors since the start. The AccumulationScope is only
+// used to keep track of the errors so far, and the underlying ExpressionScope
+// keeps being used as the expression_scope(). If the expression_scope() isn't
+// ambiguous, this class does not do anything.
+template <typename Types>
+class AccumulationScope {
+ public:
+ typedef typename Types::Impl ParserT;
+
+ static const int kNumberOfErrors =
+ ExpressionParsingScope<Types>::kNumberOfErrors;
+ explicit AccumulationScope(ExpressionScope<Types>* scope) : scope_(nullptr) {
+ if (!scope->CanBeExpression()) return;
+ scope_ = scope->AsExpressionParsingScope();
+ for (int i = 0; i < kNumberOfErrors; i++) {
+ // If the underlying scope is already invalid at the start, stop
+ // accumulating. That means an error was found outside of an
+ // accumulating path.
+ if (!scope_->is_valid(i)) {
+ scope_ = nullptr;
+ break;
+ }
+ copy(i);
+ }
+ }
+
+ // Merge errors from the underlying ExpressionParsingScope into this scope.
+ // Only keeps the first error across all accumulate calls, and removes the
+ // error from the underlying scope.
+ void Accumulate() {
+ if (scope_ == nullptr) return;
+ DCHECK(!scope_->is_verified());
+ for (int i = 0; i < kNumberOfErrors; i++) {
+ if (!locations_[i].IsValid()) copy(i);
+ scope_->clear(i);
+ }
+ }
+
+ // This is called instead of Accumulate in case the parsed member is already
+ // known to be an expression. In that case we don't need to accumulate the
+ // expression but rather validate it immediately. We also ignore the pattern
+ // error since the parsed member is known to not be a pattern. This is
+ // necessary for "{x:1}.y" parsed as part of an assignment pattern. {x:1} will
+ // record a pattern error, but "{x:1}.y" is actually a valid as part of an
+ // assignment pattern since it's a property access.
+ void ValidateExpression() {
+ if (scope_ == nullptr) return;
+ DCHECK(!scope_->is_verified());
+ scope_->ValidateExpression();
+ DCHECK(scope_->is_verified());
+ scope_->clear(ExpressionParsingScope<Types>::kPatternIndex);
+#ifdef DEBUG
+ scope_->clear_verified();
+#endif
+ }
+
+ ~AccumulationScope() {
+ if (scope_ == nullptr) return;
+ Accumulate();
+ for (int i = 0; i < kNumberOfErrors; i++) copy_back(i);
+ }
+
+ private:
+ void copy(int entry) {
+ messages_[entry] = scope_->messages_[entry];
+ locations_[entry] = scope_->locations_[entry];
+ }
+
+ void copy_back(int entry) {
+ if (!locations_[entry].IsValid()) return;
+ scope_->messages_[entry] = messages_[entry];
+ scope_->locations_[entry] = locations_[entry];
+ }
+
+ ExpressionParsingScope<Types>* scope_;
+ MessageTemplate messages_[2];
+ Scanner::Location locations_[2];
+
+ DISALLOW_COPY_AND_ASSIGN(AccumulationScope);
+};
+
+// The head of an arrow function is ambiguous between expression, assignment
+// pattern and declaration. This keeps track of the additional declaration
+// error and allows the scope to be validated as a declaration rather than an
+// expression or a pattern.
+template <typename Types>
+class ArrowHeadParsingScope : public ExpressionParsingScope<Types> {
+ public:
+ typedef typename Types::Impl ParserT;
+ typedef typename ExpressionScope<Types>::ScopeType ScopeType;
+
+ ArrowHeadParsingScope(ParserT* parser, FunctionKind kind)
+ : ExpressionParsingScope<Types>(
+ parser,
+ kind == FunctionKind::kArrowFunction
+ ? ExpressionScope<Types>::kMaybeArrowParameterDeclaration
+ : ExpressionScope<
+ Types>::kMaybeAsyncArrowParameterDeclaration) {
+ DCHECK(kind == FunctionKind::kAsyncArrowFunction ||
+ kind == FunctionKind::kArrowFunction);
+ DCHECK(this->CanBeDeclaration());
+ DCHECK(!this->IsCertainlyDeclaration());
+ }
+
+ void ValidateExpression() {
+ // Turns out this is not an arrow head. Clear any possible tracked strict
+ // parameter errors, and reinterpret tracked variables as unresolved
+ // references.
+ this->parser()->next_arrow_function_info_.ClearStrictParameterError();
+ ExpressionParsingScope<Types>::ValidateExpression();
+ for (int i = 0; i < this->variable_list()->length(); i++) {
+ this->parser()->scope()->AddUnresolved(this->variable_list()->at(i));
+ }
+ }
+
+ DeclarationScope* ValidateAndCreateScope() {
+ DCHECK(!this->is_verified());
+ if (declaration_error_location.IsValid()) {
+ ExpressionScope<Types>::Report(declaration_error_location,
+ declaration_error_message);
+ }
+ this->ValidatePattern();
+
+ DeclarationScope* result = this->parser()->NewFunctionScope(kind());
+ if (!has_simple_parameter_list_) result->SetHasNonSimpleParameters();
+ VariableKind kind = PARAMETER_VARIABLE;
+ VariableMode mode =
+ has_simple_parameter_list_ ? VariableMode::kVar : VariableMode::kLet;
+ for (int i = 0; i < this->variable_list()->length(); i++) {
+ VariableProxy* proxy = this->variable_list()->at(i);
+ bool was_added;
+ this->parser()->DeclareVariable(proxy, kind, mode,
+ Variable::DefaultInitializationFlag(mode),
+ result, &was_added, proxy->position());
+ if (!was_added) {
+ ExpressionScope<Types>::Report(proxy->location(),
+ MessageTemplate::kParamDupe);
+ }
+ }
+ return result;
+ }
+
+ void RecordDeclarationError(const Scanner::Location& loc,
+ MessageTemplate message) {
+ DCHECK_IMPLIES(!this->has_error(), loc.IsValid());
+ declaration_error_location = loc;
+ declaration_error_message = message;
+ }
+
+ void RecordNonSimpleParameter() { has_simple_parameter_list_ = false; }
+
+ private:
+ FunctionKind kind() const {
+ return this->IsAsyncArrowHeadParsingScope()
+ ? FunctionKind::kAsyncArrowFunction
+ : FunctionKind::kArrowFunction;
+ }
+
+ Scanner::Location declaration_error_location = Scanner::Location::invalid();
+ MessageTemplate declaration_error_message = MessageTemplate::kNone;
+ bool has_simple_parameter_list_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrowHeadParsingScope);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_EXPRESSION_SCOPE_H_
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 7d476f1e64..c21fb35ae9 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -11,13 +11,8 @@
namespace v8 {
namespace internal {
-FuncNameInferrer::FuncNameInferrer(AstValueFactory* ast_value_factory,
- Zone* zone)
- : ast_value_factory_(ast_value_factory),
- entries_stack_(zone),
- names_stack_(zone),
- funcs_to_infer_(zone),
- zone_(zone) {}
+FuncNameInferrer::FuncNameInferrer(AstValueFactory* ast_value_factory)
+ : ast_value_factory_(ast_value_factory) {}
void FuncNameInferrer::PushEnclosingName(const AstRawString* name) {
// Enclosing name is a name of a constructor function. To check
@@ -45,35 +40,31 @@ void FuncNameInferrer::PushVariableName(const AstRawString* name) {
void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
if (IsOpen()) {
CHECK_GT(names_stack_.size(), 0);
- CHECK(names_stack_.back().name->IsOneByteEqualTo("async"));
+ CHECK(names_stack_.back().name()->IsOneByteEqualTo("async"));
names_stack_.pop_back();
}
}
-void FuncNameInferrer::Leave() {
- DCHECK(IsOpen());
- size_t last_entry = entries_stack_.back();
- entries_stack_.pop_back();
- names_stack_.Rewind(last_entry);
- if (entries_stack_.is_empty()) funcs_to_infer_.Rewind();
-}
-
const AstConsString* FuncNameInferrer::MakeNameFromStack() {
+ if (names_stack_.size() == 0) {
+ return ast_value_factory_->empty_cons_string();
+ }
AstConsString* result = ast_value_factory_->NewConsString();
auto it = names_stack_.begin();
while (it != names_stack_.end()) {
// Advance the iterator to be able to peek the next value.
auto current = it++;
// Skip consecutive variable declarations.
- if (it != names_stack_.end() && current->type == kVariableName &&
- it->type == kVariableName) {
+ if (it != names_stack_.end() && current->type() == kVariableName &&
+ it->type() == kVariableName) {
continue;
}
// Add name. Separate names with ".".
+ Zone* zone = ast_value_factory_->zone();
if (!result->IsEmpty()) {
- result->AddString(zone(), ast_value_factory_->dot_string());
+ result->AddString(zone, ast_value_factory_->dot_string());
}
- result->AddString(zone(), current->name);
+ result->AddString(zone, current->name());
}
return result;
}
@@ -83,7 +74,7 @@ void FuncNameInferrer::InferFunctionsNames() {
for (FunctionLiteral* func : funcs_to_infer_) {
func->set_raw_inferred_name(func_name);
}
- funcs_to_infer_.Rewind(0);
+ funcs_to_infer_.resize(0);
}
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index d46d7f2c2b..bdc58221e1 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -5,8 +5,10 @@
#ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
#define V8_PARSING_FUNC_NAME_INFERRER_H_
-#include "src/zone/zone-chunk-list.h"
-#include "src/zone/zone.h"
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/pointer-with-payload.h"
namespace v8 {
namespace internal {
@@ -18,6 +20,11 @@ class FunctionLiteral;
enum class InferName { kYes, kNo };
+template <>
+struct PointerWithPayloadTraits<AstRawString> {
+ static constexpr int value = 2;
+};
+
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
@@ -28,25 +35,33 @@ enum class InferName { kYes, kNo };
// and during parsing of the RHS, a function literal can be collected. After
// parsing the RHS we can infer a name for function literals that do not have
// a name.
-class FuncNameInferrer : public ZoneObject {
+class FuncNameInferrer {
public:
- FuncNameInferrer(AstValueFactory* ast_value_factory, Zone* zone);
+ explicit FuncNameInferrer(AstValueFactory* ast_value_factory);
// To enter function name inference state, put a FuncNameInferrer::State
// on the stack.
class State {
public:
- explicit State(FuncNameInferrer* fni) : fni_(fni) { fni_->Enter(); }
- ~State() { fni_->Leave(); }
+ explicit State(FuncNameInferrer* fni)
+ : fni_(fni), top_(fni->names_stack_.size()) {
+ ++fni_->scope_depth_;
+ }
+ ~State() {
+ DCHECK(fni_->IsOpen());
+ fni_->names_stack_.resize(top_);
+ --fni_->scope_depth_;
+ }
private:
FuncNameInferrer* fni_;
+ size_t top_;
DISALLOW_COPY_AND_ASSIGN(State);
};
// Returns whether we have entered name collection state.
- bool IsOpen() const { return !entries_stack_.is_empty(); }
+ bool IsOpen() const { return scope_depth_ > 0; }
// Pushes an enclosing the name of enclosing function onto names stack.
void PushEnclosingName(const AstRawString* name);
@@ -64,9 +79,7 @@ class FuncNameInferrer : public ZoneObject {
}
void RemoveLastFunction() {
- if (IsOpen() && !funcs_to_infer_.is_empty()) {
- funcs_to_infer_.pop_back();
- }
+ if (IsOpen() && !funcs_to_infer_.empty()) funcs_to_infer_.pop_back();
}
void RemoveAsyncKeywordFromEnd();
@@ -74,29 +87,28 @@ class FuncNameInferrer : public ZoneObject {
// Infers a function name and leaves names collection state.
void Infer() {
DCHECK(IsOpen());
- if (!funcs_to_infer_.is_empty()) {
- InferFunctionsNames();
- }
+ if (!funcs_to_infer_.empty()) InferFunctionsNames();
}
private:
- enum NameType {
+ enum NameType : uint8_t {
kEnclosingConstructorName,
kLiteralName,
kVariableName
};
struct Name {
- Name(const AstRawString* name, NameType type) : name(name), type(type) {}
- const AstRawString* name;
- NameType type;
+ // Needed for names_stack_.resize()
+ Name() { UNREACHABLE(); }
+ Name(const AstRawString* name, NameType type)
+ : name_and_type_(name, type) {}
+
+ PointerWithPayload<const AstRawString, NameType, 2> name_and_type_;
+ inline const AstRawString* name() const {
+ return name_and_type_.GetPointer();
+ }
+ inline NameType type() const { return name_and_type_.GetPayload(); }
};
- void Enter() { entries_stack_.push_back(names_stack_.size()); }
-
- void Leave();
-
- Zone* zone() const { return zone_; }
-
// Constructs a full name in dotted notation from gathered names.
const AstConsString* MakeNameFromStack();
@@ -104,10 +116,9 @@ class FuncNameInferrer : public ZoneObject {
void InferFunctionsNames();
AstValueFactory* ast_value_factory_;
- ZoneChunkList<size_t> entries_stack_;
- ZoneChunkList<Name> names_stack_;
- ZoneChunkList<FunctionLiteral*> funcs_to_infer_;
- Zone* zone_;
+ std::vector<Name> names_stack_;
+ std::vector<FunctionLiteral*> funcs_to_infer_;
+ size_t scope_depth_ = 0;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};
diff --git a/deps/v8/src/parsing/keywords-gen.h b/deps/v8/src/parsing/keywords-gen.h
new file mode 100644
index 0000000000..67c47a2dda
--- /dev/null
+++ b/deps/v8/src/parsing/keywords-gen.h
@@ -0,0 +1,177 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is automatically generated by gen-keywords-gen-h.py and should not
+// be modified manually.
+
+#ifndef V8_PARSING_KEYWORDS_GEN_H_
+#define V8_PARSING_KEYWORDS_GEN_H_
+
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+/* C++ code produced by gperf version 3.1 */
+/* Command-line: gperf -m100 src/parsing/keywords.txt */
+/* Computed positions: -k'1-2' */
+
+#if !( \
+ (' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) && ('%' == 37) && \
+ ('&' == 38) && ('\'' == 39) && ('(' == 40) && (')' == 41) && \
+ ('*' == 42) && ('+' == 43) && (',' == 44) && ('-' == 45) && ('.' == 46) && \
+ ('/' == 47) && ('0' == 48) && ('1' == 49) && ('2' == 50) && ('3' == 51) && \
+ ('4' == 52) && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) && \
+ ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) && ('=' == 61) && \
+ ('>' == 62) && ('?' == 63) && ('A' == 65) && ('B' == 66) && ('C' == 67) && \
+ ('D' == 68) && ('E' == 69) && ('F' == 70) && ('G' == 71) && ('H' == 72) && \
+ ('I' == 73) && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) && \
+ ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) && ('R' == 82) && \
+ ('S' == 83) && ('T' == 84) && ('U' == 85) && ('V' == 86) && ('W' == 87) && \
+ ('X' == 88) && ('Y' == 89) && ('Z' == 90) && ('[' == 91) && \
+ ('\\' == 92) && (']' == 93) && ('^' == 94) && ('_' == 95) && \
+ ('a' == 97) && ('b' == 98) && ('c' == 99) && ('d' == 100) && \
+ ('e' == 101) && ('f' == 102) && ('g' == 103) && ('h' == 104) && \
+ ('i' == 105) && ('j' == 106) && ('k' == 107) && ('l' == 108) && \
+ ('m' == 109) && ('n' == 110) && ('o' == 111) && ('p' == 112) && \
+ ('q' == 113) && ('r' == 114) && ('s' == 115) && ('t' == 116) && \
+ ('u' == 117) && ('v' == 118) && ('w' == 119) && ('x' == 120) && \
+ ('y' == 121) && ('z' == 122) && ('{' == 123) && ('|' == 124) && \
+ ('}' == 125) && ('~' == 126))
+/* The character set is not based on ISO-646. */
+#error "gperf generated tables don't work with this execution character set."
+// If you see this error, please report a bug to <bug-gperf@gnu.org>.
+#endif
+
+struct PerfectKeywordHashTableEntry {
+ const char* name;
+ Token::Value value;
+};
+enum {
+ TOTAL_KEYWORDS = 47,
+ MIN_WORD_LENGTH = 2,
+ MAX_WORD_LENGTH = 10,
+ MIN_HASH_VALUE = 2,
+ MAX_HASH_VALUE = 51
+};
+
+/* maximum key range = 50, duplicates = 0 */
+
+class PerfectKeywordHash {
+ private:
+ static inline unsigned int Hash(const char* str, int len);
+
+ public:
+ static inline Token::Value GetToken(const char* str, int len);
+};
+
+inline unsigned int PerfectKeywordHash::Hash(const char* str, int len) {
+ DCHECK_LT(str[1], 128);
+ DCHECK_LT(str[0], 128);
+ static const unsigned char asso_values[128] = {
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
+ 52, 8, 2, 6, 0, 0, 9, 52, 21, 0, 52, 52, 36, 40, 0, 3,
+ 6, 52, 17, 13, 16, 16, 38, 25, 6, 26, 52, 52, 52, 52, 52, 52};
+ return len + asso_values[static_cast<unsigned char>(str[1])] +
+ asso_values[static_cast<unsigned char>(str[0])];
+}
+
+static const unsigned char kPerfectKeywordLengthTable[64] = {
+ 0, 0, 2, 3, 4, 2, 6, 7, 8, 9, 10, 2, 6, 7, 5, 3, 7, 8, 4, 5, 4, 7,
+ 5, 6, 5, 0, 5, 0, 6, 4, 7, 5, 9, 8, 5, 6, 3, 4, 5, 3, 4, 4, 5, 0,
+ 6, 4, 6, 5, 6, 3, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[64] =
+ {{"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"in", Token::IN},
+ {"new", Token::NEW},
+ {"enum", Token::ENUM},
+ {"do", Token::DO},
+ {"delete", Token::DELETE},
+ {"default", Token::DEFAULT},
+ {"debugger", Token::DEBUGGER},
+ {"interface", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"instanceof", Token::INSTANCEOF},
+ {"if", Token::IF},
+ {"export", Token::EXPORT},
+ {"extends", Token::EXTENDS},
+ {"const", Token::CONST},
+ {"for", Token::FOR},
+ {"finally", Token::FINALLY},
+ {"continue", Token::CONTINUE},
+ {"case", Token::CASE},
+ {"catch", Token::CATCH},
+ {"null", Token::NULL_LITERAL},
+ {"package", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"false", Token::FALSE_LITERAL},
+ {"return", Token::RETURN},
+ {"break", Token::BREAK},
+ {"", Token::IDENTIFIER},
+ {"async", Token::ASYNC},
+ {"", Token::IDENTIFIER},
+ {"public", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"with", Token::WITH},
+ {"private", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"yield", Token::YIELD},
+ {"protected", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"function", Token::FUNCTION},
+ {"super", Token::SUPER},
+ {"static", Token::STATIC},
+ {"try", Token::TRY},
+ {"true", Token::TRUE_LITERAL},
+ {"await", Token::AWAIT},
+ {"let", Token::LET},
+ {"else", Token::ELSE},
+ {"this", Token::THIS},
+ {"throw", Token::THROW},
+ {"", Token::IDENTIFIER},
+ {"switch", Token::SWITCH},
+ {"void", Token::VOID},
+ {"import", Token::IMPORT},
+ {"class", Token::CLASS},
+ {"typeof", Token::TYPEOF},
+ {"var", Token::VAR},
+ {"implements", Token::FUTURE_STRICT_RESERVED_WORD},
+ {"while", Token::WHILE},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER},
+ {"", Token::IDENTIFIER}};
+
+inline Token::Value PerfectKeywordHash::GetToken(const char* str, int len) {
+ if (IsInRange(len, MIN_WORD_LENGTH, MAX_WORD_LENGTH)) {
+ unsigned int key = Hash(str, len) & 0x3f;
+
+ DCHECK_LT(key, arraysize(kPerfectKeywordLengthTable));
+ DCHECK_LT(key, arraysize(kPerfectKeywordHashTable));
+ if (len == kPerfectKeywordLengthTable[key]) {
+ const char* s = kPerfectKeywordHashTable[key].name;
+
+ while (*s != 0) {
+ if (*s++ != *str++) return Token::IDENTIFIER;
+ }
+ return kPerfectKeywordHashTable[key].value;
+ }
+ }
+ return Token::IDENTIFIER;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_KEYWORDS_GEN_H_
diff --git a/deps/v8/src/parsing/keywords.txt b/deps/v8/src/parsing/keywords.txt
new file mode 100644
index 0000000000..a3b3e4614d
--- /dev/null
+++ b/deps/v8/src/parsing/keywords.txt
@@ -0,0 +1,64 @@
+%struct-type
+%language=C++
+%global-table
+%define initializer-suffix ,Token::IDENTIFIER
+%define hash-function-name Hash
+%define lookup-function-name GetToken
+%define class-name PerfectKeywordHash
+%define word-array-name kPerfectKeywordHashTable
+%define length-table-name kPerfectKeywordLengthTable
+%7bit
+%compare-lengths
+%enum
+%readonly-tables
+%compare-strncmp
+
+struct PerfectKeywordHashTableEntry { const char* name; Token::Value value; };
+%%
+async, Token::ASYNC
+await, Token::AWAIT
+break, Token::BREAK
+case, Token::CASE
+catch, Token::CATCH
+class, Token::CLASS
+const, Token::CONST
+continue, Token::CONTINUE
+debugger, Token::DEBUGGER
+default, Token::DEFAULT
+delete, Token::DELETE
+do, Token::DO
+else, Token::ELSE
+enum, Token::ENUM
+export, Token::EXPORT
+extends, Token::EXTENDS
+false, Token::FALSE_LITERAL
+finally, Token::FINALLY
+for, Token::FOR
+function, Token::FUNCTION
+if, Token::IF
+implements, Token::FUTURE_STRICT_RESERVED_WORD
+import, Token::IMPORT
+in, Token::IN
+instanceof, Token::INSTANCEOF
+interface, Token::FUTURE_STRICT_RESERVED_WORD
+let, Token::LET
+new, Token::NEW
+null, Token::NULL_LITERAL
+package, Token::FUTURE_STRICT_RESERVED_WORD
+private, Token::FUTURE_STRICT_RESERVED_WORD
+protected, Token::FUTURE_STRICT_RESERVED_WORD
+public, Token::FUTURE_STRICT_RESERVED_WORD
+return, Token::RETURN
+static, Token::STATIC
+super, Token::SUPER
+switch, Token::SWITCH
+this, Token::THIS
+throw, Token::THROW
+true, Token::TRUE_LITERAL
+try, Token::TRY
+typeof, Token::TYPEOF
+var, Token::VAR
+void, Token::VOID
+while, Token::WHILE
+with, Token::WITH
+yield, Token::YIELD
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 129b00a2c2..3050e01b48 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -8,7 +8,10 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/base/template-utils.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
+#include "src/counters.h"
#include "src/heap/heap-inl.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
@@ -21,7 +24,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
flags_(0),
extension_(nullptr),
script_scope_(nullptr),
- unicode_cache_(nullptr),
stack_limit_(0),
hash_seed_(0),
function_kind_(FunctionKind::kNormalFunction),
@@ -43,12 +45,24 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
: ParseInfo(zone_allocator) {
set_hash_seed(isolate->heap()->HashSeed());
set_stack_limit(isolate->stack_guard()->real_climit());
- set_unicode_cache(isolate->unicode_cache());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
set_logger(isolate->logger());
set_ast_string_constants(isolate->ast_string_constants());
if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
if (isolate->is_collecting_type_profile()) set_collect_type_profile();
+ if (isolate->compiler_dispatcher()->IsEnabled()) {
+ parallel_tasks_.reset(new ParallelTasks(isolate->compiler_dispatcher()));
+ }
+ set_might_always_opt(FLAG_always_opt || FLAG_prepare_always_opt);
+ set_allow_lazy_compile(FLAG_lazy);
+ set_allow_natives_syntax(FLAG_allow_natives_syntax);
+ set_allow_harmony_public_fields(FLAG_harmony_public_fields);
+ set_allow_harmony_static_fields(FLAG_harmony_static_fields);
+ set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
+ set_allow_harmony_import_meta(FLAG_harmony_import_meta);
+ set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
+ set_allow_harmony_private_fields(FLAG_harmony_private_fields);
+ set_allow_harmony_private_methods(FLAG_harmony_private_methods);
}
ParseInfo::ParseInfo(Isolate* isolate)
@@ -63,9 +77,10 @@ void ParseInfo::SetFunctionInfo(T function) {
set_language_mode(function->language_mode());
set_function_kind(function->kind());
set_declaration(function->is_declaration());
- set_requires_instance_fields_initializer(
- function->requires_instance_fields_initializer());
+ set_requires_instance_members_initializer(
+ function->requires_instance_members_initializer());
set_toplevel(function->is_toplevel());
+ set_is_oneshot_iife(function->is_oneshot_iife());
set_wrapped_as_function(function->is_wrapped());
}
@@ -76,7 +91,7 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
// wrapped script at all.
DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script())->is_wrapped());
- set_allow_lazy_parsing(FLAG_lazy_inner_functions);
+ set_allow_lazy_parsing(true);
set_asm_wasm_broken(shared->is_asm_wasm_broken());
set_start_position(shared->StartPosition());
@@ -225,5 +240,15 @@ void ParseInfo::set_script(Handle<Script> script) {
}
}
+void ParseInfo::ParallelTasks::Enqueue(ParseInfo* outer_parse_info,
+ const AstRawString* function_name,
+ FunctionLiteral* literal) {
+ base::Optional<CompilerDispatcher::JobId> job_id =
+ dispatcher_->Enqueue(outer_parse_info, function_name, literal);
+ if (job_id) {
+ enqueued_jobs_.emplace_front(std::make_pair(literal, *job_id));
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index ba3e3d2898..7ab236c82d 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -13,7 +13,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/objects/script.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/parsing/preparse-data.h"
#include "src/pending-compilation-error-handler.h"
namespace v8 {
@@ -26,12 +26,12 @@ class AccountingAllocator;
class AstRawString;
class AstStringConstants;
class AstValueFactory;
+class CompilerDispatcher;
class DeclarationScope;
class FunctionLiteral;
class RuntimeCallStats;
class Logger;
class SourceRangeMap;
-class UnicodeCache;
class Utf16CharacterStream;
class Zone;
@@ -81,6 +81,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
set_collect_type_profile)
FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
+ FLAG_ACCESSOR(kContainsAsmModule, contains_asm_module,
+ set_contains_asm_module)
FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
set_block_coverage_enabled)
FLAG_ACCESSOR(kOnBackgroundThread, on_background_thread,
@@ -89,9 +91,30 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_wrapped_as_function)
FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
FLAG_ACCESSOR(kIsDeclaration, is_declaration, set_declaration)
- FLAG_ACCESSOR(kRequiresInstanceFieldsInitializer,
- requires_instance_fields_initializer,
- set_requires_instance_fields_initializer);
+ FLAG_ACCESSOR(kRequiresInstanceMembersInitializer,
+ requires_instance_members_initializer,
+ set_requires_instance_members_initializer);
+ FLAG_ACCESSOR(kMightAlwaysOpt, might_always_opt, set_might_always_opt)
+ FLAG_ACCESSOR(kAllowNativeSyntax, allow_natives_syntax,
+ set_allow_natives_syntax)
+ FLAG_ACCESSOR(kAllowLazyCompile, allow_lazy_compile, set_allow_lazy_compile)
+ FLAG_ACCESSOR(kAllowNativeSyntax, allow_native_syntax,
+ set_allow_native_syntax);
+ FLAG_ACCESSOR(kAllowHarmonyPublicFields, allow_harmony_public_fields,
+ set_allow_harmony_public_fields);
+ FLAG_ACCESSOR(kAllowHarmonyStaticFields, allow_harmony_static_fields,
+ set_allow_harmony_static_fields);
+ FLAG_ACCESSOR(kAllowHarmonyDynamicImport, allow_harmony_dynamic_import,
+ set_allow_harmony_dynamic_import);
+ FLAG_ACCESSOR(kAllowHarmonyImportMeta, allow_harmony_import_meta,
+ set_allow_harmony_import_meta);
+ FLAG_ACCESSOR(kAllowHarmonyNumericSeparator, allow_harmony_numeric_separator,
+ set_allow_harmony_numeric_separator);
+ FLAG_ACCESSOR(kAllowHarmonyPrivateFields, allow_harmony_private_fields,
+ set_allow_harmony_private_fields);
+ FLAG_ACCESSOR(kAllowHarmonyPrivateMethods, allow_harmony_private_methods,
+ set_allow_harmony_private_methods);
+ FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife);
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -113,12 +136,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension() const { return extension_; }
void set_extension(v8::Extension* extension) { extension_ = extension; }
- void set_consumed_preparsed_scope_data(
- std::unique_ptr<ConsumedPreParsedScopeData> data) {
- consumed_preparsed_scope_data_.swap(data);
+ void set_consumed_preparse_data(std::unique_ptr<ConsumedPreparseData> data) {
+ consumed_preparse_data_.swap(data);
}
- ConsumedPreParsedScopeData* consumed_preparsed_scope_data() {
- return consumed_preparsed_scope_data_.get();
+ ConsumedPreparseData* consumed_preparse_data() {
+ return consumed_preparse_data_.get();
}
DeclarationScope* script_scope() const { return script_scope_; }
@@ -141,11 +163,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
DeclarationScope* scope() const;
- UnicodeCache* unicode_cache() const { return unicode_cache_; }
- void set_unicode_cache(UnicodeCache* unicode_cache) {
- unicode_cache_ = unicode_cache;
- }
-
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -205,6 +222,31 @@ class V8_EXPORT_PRIVATE ParseInfo {
return &pending_error_handler_;
}
+ class ParallelTasks {
+ public:
+ explicit ParallelTasks(CompilerDispatcher* compiler_dispatcher)
+ : dispatcher_(compiler_dispatcher) {
+ DCHECK(dispatcher_);
+ }
+
+ void Enqueue(ParseInfo* outer_parse_info, const AstRawString* function_name,
+ FunctionLiteral* literal);
+
+ typedef std::forward_list<std::pair<FunctionLiteral*, uintptr_t>>::iterator
+ EnqueuedJobsIterator;
+
+ EnqueuedJobsIterator begin() { return enqueued_jobs_.begin(); }
+ EnqueuedJobsIterator end() { return enqueued_jobs_.end(); }
+
+ CompilerDispatcher* dispatcher() { return dispatcher_; }
+
+ private:
+ CompilerDispatcher* dispatcher_;
+ std::forward_list<std::pair<FunctionLiteral*, uintptr_t>> enqueued_jobs_;
+ };
+
+ ParallelTasks* parallel_tasks() { return parallel_tasks_.get(); }
+
//--------------------------------------------------------------------------
// TODO(titzer): these should not be part of ParseInfo.
//--------------------------------------------------------------------------
@@ -257,7 +299,19 @@ class V8_EXPORT_PRIVATE ParseInfo {
kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
kAllowEvalCache = 1 << 15,
kIsDeclaration = 1 << 16,
- kRequiresInstanceFieldsInitializer = 1 << 17,
+ kRequiresInstanceMembersInitializer = 1 << 17,
+ kContainsAsmModule = 1 << 18,
+ kMightAlwaysOpt = 1 << 19,
+ kAllowLazyCompile = 1 << 20,
+ kAllowNativeSyntax = 1 << 21,
+ kAllowHarmonyPublicFields = 1 << 22,
+ kAllowHarmonyStaticFields = 1 << 23,
+ kAllowHarmonyDynamicImport = 1 << 24,
+ kAllowHarmonyImportMeta = 1 << 25,
+ kAllowHarmonyNumericSeparator = 1 << 26,
+ kAllowHarmonyPrivateFields = 1 << 27,
+ kAllowHarmonyPrivateMethods = 1 << 28,
+ kIsOneshotIIFE = 1 << 29
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -265,7 +319,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
unsigned flags_;
v8::Extension* extension_;
DeclarationScope* script_scope_;
- UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
uint64_t hash_seed_;
FunctionKind function_kind_;
@@ -282,13 +335,14 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
- std::unique_ptr<ConsumedPreParsedScopeData> consumed_preparsed_scope_data_;
+ std::unique_ptr<ConsumedPreparseData> consumed_preparse_data_;
std::unique_ptr<AstValueFactory> ast_value_factory_;
const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
Logger* logger_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
+ std::unique_ptr<ParallelTasks> parallel_tasks_;
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index bfb056e0c8..33c165cd92 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -18,11 +18,12 @@
#include "src/counters.h"
#include "src/globals.h"
#include "src/log.h"
-#include "src/messages.h"
-#include "src/parsing/expression-classifier.h"
+#include "src/message-template.h"
+#include "src/parsing/expression-scope.h"
#include "src/parsing/func-name-inferrer.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
+#include "src/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -39,6 +40,8 @@ enum AllowLabelledFunctionStatement {
kDisallowLabelledFunctionStatement,
};
+enum ParsingArrowHeadFlag { kCertainlyNotArrowHead, kMaybeArrowHead };
+
enum class ParseFunctionFlag : uint8_t {
kIsNormal = 0,
kIsGenerator = 1 << 0,
@@ -74,77 +77,39 @@ struct FormalParametersBase {
// Stack-allocated scope to collect source ranges from the parser.
class SourceRangeScope final {
public:
- enum PositionKind {
- POSITION_BEG,
- POSITION_END,
- PEEK_POSITION_BEG,
- PEEK_POSITION_END,
- };
-
- SourceRangeScope(Scanner* scanner, SourceRange* range,
- PositionKind pre_kind = PEEK_POSITION_BEG,
- PositionKind post_kind = POSITION_END)
- : scanner_(scanner), range_(range), post_kind_(post_kind) {
- range_->start = GetPosition(pre_kind);
+ SourceRangeScope(const Scanner* scanner, SourceRange* range)
+ : scanner_(scanner), range_(range) {
+ range_->start = scanner->peek_location().beg_pos;
DCHECK_NE(range_->start, kNoSourcePosition);
+ DCHECK_EQ(range_->end, kNoSourcePosition);
}
- ~SourceRangeScope() { Finalize(); }
-
- const SourceRange& Finalize() {
- if (is_finalized_) return *range_;
- is_finalized_ = true;
- range_->end = GetPosition(post_kind_);
+ ~SourceRangeScope() {
+ DCHECK_EQ(kNoSourcePosition, range_->end);
+ range_->end = scanner_->location().end_pos;
DCHECK_NE(range_->end, kNoSourcePosition);
- return *range_;
}
private:
- int32_t GetPosition(PositionKind kind) {
- switch (kind) {
- case POSITION_BEG:
- return scanner_->location().beg_pos;
- case POSITION_END:
- return scanner_->location().end_pos;
- case PEEK_POSITION_BEG:
- return scanner_->peek_location().beg_pos;
- case PEEK_POSITION_END:
- return scanner_->peek_location().end_pos;
- default:
- UNREACHABLE();
- }
- }
-
- Scanner* scanner_;
+ const Scanner* scanner_;
SourceRange* range_;
- PositionKind post_kind_;
- bool is_finalized_ = false;
DISALLOW_IMPLICIT_CONSTRUCTORS(SourceRangeScope);
};
// ----------------------------------------------------------------------------
-// The CHECK_OK macro is a convenient macro to enforce error
-// handling for functions that may fail (by returning !*ok).
+// The RETURN_IF_PARSE_ERROR macro is a convenient macro to enforce error
+// handling for functions that may fail (by returning if there was an parser
+// error).
//
-// CAUTION: This macro appends extra statements after a call,
-// thus it must never be used where only a single statement
-// is correct (e.g. an if statement branch w/o braces)!
-
-#define CHECK_OK_CUSTOM(x, ...) ok); \
- if (!*ok) return impl()->x(__VA_ARGS__); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-// Used in functions where the return type is ExpressionT.
-#define CHECK_OK CHECK_OK_CUSTOM(NullExpression)
+// Usage:
+// foo = ParseFoo(); // may fail
+// RETURN_IF_PARSE_ERROR
+//
+// SAFE_USE(foo);
-#define CHECK_OK_VOID ok); \
- if (!*ok) return; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
+#define RETURN_IF_PARSE_ERROR \
+ if (has_error()) return impl()->NullStatement();
// Common base class template shared between parser and pre-parser.
// The Impl parameter is the actual class of the parser/pre-parser,
@@ -217,8 +182,10 @@ enum class ParsePropertyKind : uint8_t {
kAccessorSetter,
kValue,
kShorthand,
+ kAssign,
kMethod,
kClassField,
+ kShorthandOrClassField,
kSpread,
kNotSet
};
@@ -228,25 +195,43 @@ class ParserBase {
public:
// Shorten type names defined by ParserTypes<Impl>.
typedef ParserTypes<Impl> Types;
- typedef typename Types::Identifier IdentifierT;
- typedef typename Types::Expression ExpressionT;
- typedef typename Types::FunctionLiteral FunctionLiteralT;
- typedef typename Types::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename v8::internal::ExpressionScope<Types> ExpressionScope;
+ typedef typename v8::internal::ExpressionParsingScope<Types>
+ ExpressionParsingScope;
+ typedef typename v8::internal::AccumulationScope<Types> AccumulationScope;
+ typedef typename v8::internal::ArrowHeadParsingScope<Types>
+ ArrowHeadParsingScope;
+ typedef typename v8::internal::VariableDeclarationParsingScope<Types>
+ VariableDeclarationParsingScope;
+ typedef typename v8::internal::ParameterDeclarationParsingScope<Types>
+ ParameterDeclarationParsingScope;
+
+ // Return types for traversing functions.
+ typedef typename Types::Block BlockT;
+ typedef typename Types::BreakableStatement BreakableStatementT;
typedef typename Types::ClassLiteralProperty ClassLiteralPropertyT;
- typedef typename Types::Suspend SuspendExpressionT;
- typedef typename Types::RewritableExpression RewritableExpressionT;
+ typedef typename Types::ClassPropertyList ClassPropertyListT;
+ typedef typename Types::Expression ExpressionT;
typedef typename Types::ExpressionList ExpressionListT;
typedef typename Types::FormalParameters FormalParametersT;
+ typedef typename Types::ForStatement ForStatementT;
+ typedef typename Types::FunctionLiteral FunctionLiteralT;
+ typedef typename Types::Identifier IdentifierT;
+ typedef typename Types::IterationStatement IterationStatementT;
+ typedef typename Types::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename Types::ObjectPropertyList ObjectPropertyListT;
typedef typename Types::Statement StatementT;
typedef typename Types::StatementList StatementListT;
- typedef typename Types::Block BlockT;
- typedef typename Types::ForStatement ForStatementT;
- typedef typename v8::internal::ExpressionClassifier<Types>
- ExpressionClassifier;
+ typedef typename Types::Suspend SuspendExpressionT;
+ // For constructing objects returned by the traversing functions.
+ typedef typename Types::Factory FactoryT;
+ // Other implementation-specific tasks.
typedef typename Types::FuncNameInferrer FuncNameInferrer;
typedef typename Types::FuncNameInferrer::State FuncNameInferrerState;
typedef typename Types::SourceRange SourceRange;
typedef typename Types::SourceRangeScope SourceRangeScope;
+ typedef typename Types::Target TargetT;
+ typedef typename Types::TargetScope TargetScopeT;
// All implementation-specific methods must be called through this.
Impl* impl() { return static_cast<Impl*>(this); }
@@ -261,7 +246,7 @@ class ParserBase {
original_scope_(nullptr),
function_state_(nullptr),
extension_(extension),
- fni_(ast_value_factory, zone),
+ fni_(ast_value_factory),
ast_value_factory_(ast_value_factory),
ast_node_factory_(ast_value_factory, zone),
runtime_call_stats_(runtime_call_stats),
@@ -271,34 +256,38 @@ class ParserBase {
stack_limit_(stack_limit),
pending_error_handler_(pending_error_handler),
zone_(zone),
- classifier_(nullptr),
+ expression_scope_(nullptr),
scanner_(scanner),
- default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
function_literal_id_(0),
script_id_(script_id),
+ default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
allow_natives_(false),
- allow_harmony_do_expressions_(false),
allow_harmony_public_fields_(false),
allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
allow_harmony_private_fields_(false),
- allow_eval_cache_(true) {}
+ allow_harmony_private_methods_(false),
+ allow_eval_cache_(true) {
+ pointer_buffer_.reserve(32);
+ variable_buffer_.reserve(32);
+ }
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
ALLOW_ACCESSORS(natives);
- ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_public_fields);
ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
+ ALLOW_ACCESSORS(harmony_private_methods);
ALLOW_ACCESSORS(eval_cache);
#undef ALLOW_ACCESSORS
+ V8_INLINE bool has_error() const { return scanner()->has_parser_error(); }
bool allow_harmony_numeric_separator() const {
return scanner()->allow_harmony_numeric_separator();
}
@@ -326,6 +315,10 @@ class ParserBase {
return default_eager_compile_hint_;
}
+ int loop_nesting_depth() const {
+ return function_state_->loop_nesting_depth();
+ }
+
int GetNextFunctionLiteralId() { return ++function_literal_id_; }
int GetLastFunctionLiteralId() const { return function_literal_id_; }
@@ -340,14 +333,9 @@ class ParserBase {
Zone* zone() const { return zone_; }
protected:
- friend class v8::internal::ExpressionClassifier<ParserTypes<Impl>>;
-
- enum AllowRestrictedIdentifiers {
- kAllowRestrictedIdentifiers,
- kDontAllowRestrictedIdentifiers
- };
-
- enum LazyParsingResult { kLazyParsingComplete, kLazyParsingAborted };
+ friend class v8::internal::ExpressionScope<ParserTypes<Impl>>;
+ friend class v8::internal::ExpressionParsingScope<ParserTypes<Impl>>;
+ friend class v8::internal::ArrowHeadParsingScope<ParserTypes<Impl>>;
enum VariableDeclarationContext {
kStatementListItem,
@@ -356,7 +344,6 @@ class ParserBase {
};
class ClassLiteralChecker;
- class ObjectLiteralChecker;
// ---------------------------------------------------------------------------
// BlockState and FunctionState implement the parser's scope stack.
@@ -403,33 +390,6 @@ class ParserBase {
FunctionKind kind() const { return scope()->function_kind(); }
- void RewindDestructuringAssignments(int pos) {
- destructuring_assignments_to_rewrite_.Rewind(pos);
- }
-
- void AdoptDestructuringAssignmentsFromParentState(int pos) {
- const auto& outer_assignments =
- outer_function_state_->destructuring_assignments_to_rewrite_;
- DCHECK_GE(outer_assignments.size(), pos);
- auto it = outer_assignments.begin();
- it.Advance(pos);
- for (; it != outer_assignments.end(); ++it) {
- auto expr = *it;
- expr->set_scope(scope_);
- destructuring_assignments_to_rewrite_.push_back(expr);
- }
- outer_function_state_->RewindDestructuringAssignments(pos);
- }
-
- const ZoneChunkList<RewritableExpressionT>&
- destructuring_assignments_to_rewrite() const {
- return destructuring_assignments_to_rewrite_;
- }
-
- ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
- return &reported_errors_;
- }
-
bool next_function_is_likely_called() const {
return next_function_is_likely_called_;
}
@@ -450,41 +410,50 @@ class ParserBase {
class FunctionOrEvalRecordingScope {
public:
explicit FunctionOrEvalRecordingScope(FunctionState* state)
- : state_(state) {
- prev_value_ = state->contains_function_or_eval_;
+ : state_and_prev_value_(state, state->contains_function_or_eval_) {
state->contains_function_or_eval_ = false;
}
~FunctionOrEvalRecordingScope() {
- bool found = state_->contains_function_or_eval_;
+ bool found = state_and_prev_value_->contains_function_or_eval_;
if (!found) {
- state_->contains_function_or_eval_ = prev_value_;
+ state_and_prev_value_->contains_function_or_eval_ =
+ state_and_prev_value_.GetPayload();
}
}
private:
- FunctionState* state_;
- bool prev_value_;
+ PointerWithPayload<FunctionState, bool, 1> state_and_prev_value_;
};
- private:
- void AddDestructuringAssignment(RewritableExpressionT expr) {
- destructuring_assignments_to_rewrite_.push_back(expr);
- }
+ class LoopScope {
+ public:
+ explicit LoopScope(FunctionState* function_state)
+ : function_state_(function_state) {
+ function_state_->loop_nesting_depth_++;
+ }
+ ~LoopScope() { function_state_->loop_nesting_depth_--; }
+
+ private:
+ FunctionState* function_state_;
+ };
+
+ int loop_nesting_depth() const { return loop_nesting_depth_; }
+
+ private:
// Properties count estimation.
int expected_property_count_;
// How many suspends are needed for this function.
int suspend_count_;
+ // How deeply nested we currently are in this function.
+ int loop_nesting_depth_ = 0;
+
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
DeclarationScope* scope_;
- ZoneChunkList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
-
- ZoneList<typename ExpressionClassifier::Error> reported_errors_;
-
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
@@ -503,12 +472,10 @@ class ParserBase {
};
struct DeclarationDescriptor {
- enum Kind { NORMAL, PARAMETER, FOR_EACH };
- Scope* scope;
VariableMode mode;
+ VariableKind kind;
int declaration_pos;
int initialization_pos;
- Kind declaration_kind;
};
struct DeclarationParsingResult {
@@ -538,18 +505,12 @@ class ParserBase {
struct CatchInfo {
public:
explicit CatchInfo(ParserBase* parser)
- : name(parser->impl()->NullIdentifier()),
- pattern(parser->impl()->NullExpression()),
- scope(nullptr),
- init_block(parser->impl()->NullStatement()),
- inner_block(parser->impl()->NullStatement()),
- bound_names(1, parser->zone()) {}
- IdentifierT name;
+ : pattern(parser->impl()->NullExpression()),
+ variable(nullptr),
+ scope(nullptr) {}
ExpressionT pattern;
+ Variable* variable;
Scope* scope;
- BlockT init_block;
- BlockT inner_block;
- ZonePtrList<const AstRawString> bound_names;
};
struct ForInfo {
@@ -578,29 +539,102 @@ class ParserBase {
has_name_static_property(false),
has_static_computed_names(false),
has_static_class_fields(false),
- has_instance_class_fields(false),
+ has_instance_members(false),
is_anonymous(false),
static_fields_scope(nullptr),
- instance_fields_scope(nullptr),
+ instance_members_scope(nullptr),
computed_field_count(0) {}
Variable* variable;
ExpressionT extends;
- typename Types::ClassPropertyList properties;
- typename Types::ClassPropertyList static_fields;
- typename Types::ClassPropertyList instance_fields;
+ ClassPropertyListT properties;
+ ClassPropertyListT static_fields;
+ ClassPropertyListT instance_fields;
FunctionLiteralT constructor;
bool has_seen_constructor;
bool has_name_static_property;
bool has_static_computed_names;
bool has_static_class_fields;
- bool has_instance_class_fields;
+ bool has_instance_members;
bool is_anonymous;
DeclarationScope* static_fields_scope;
- DeclarationScope* instance_fields_scope;
+ DeclarationScope* instance_members_scope;
int computed_field_count;
};
+ enum class PropertyPosition { kObjectLiteral, kClassLiteral };
+ struct ParsePropertyInfo {
+ public:
+ explicit ParsePropertyInfo(ParserBase* parser,
+ AccumulationScope* accumulation_scope = nullptr)
+ : accumulation_scope(accumulation_scope),
+ name(parser->impl()->NullIdentifier()),
+ position(PropertyPosition::kClassLiteral),
+ function_flags(ParseFunctionFlag::kIsNormal),
+ kind(ParsePropertyKind::kNotSet),
+ is_computed_name(false),
+ is_private(false),
+ is_static(false),
+ is_rest(false) {}
+
+ bool ParsePropertyKindFromToken(Token::Value token) {
+ // This returns true, setting the property kind, iff the given token is
+ // one which must occur after a property name, indicating that the
+ // previous token was in fact a name and not a modifier (like the "get" in
+ // "get x").
+ switch (token) {
+ case Token::COLON:
+ kind = ParsePropertyKind::kValue;
+ return true;
+ case Token::COMMA:
+ kind = ParsePropertyKind::kShorthand;
+ return true;
+ case Token::RBRACE:
+ kind = ParsePropertyKind::kShorthandOrClassField;
+ return true;
+ case Token::ASSIGN:
+ kind = ParsePropertyKind::kAssign;
+ return true;
+ case Token::LPAREN:
+ kind = ParsePropertyKind::kMethod;
+ return true;
+ case Token::MUL:
+ case Token::SEMICOLON:
+ kind = ParsePropertyKind::kClassField;
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ AccumulationScope* accumulation_scope;
+ IdentifierT name;
+ PropertyPosition position;
+ ParseFunctionFlags function_flags;
+ ParsePropertyKind kind;
+ bool is_computed_name;
+ bool is_private;
+ bool is_static;
+ bool is_rest;
+ };
+
+ ClassLiteralProperty::Kind ClassPropertyKindFor(ParsePropertyKind kind) {
+ switch (kind) {
+ case ParsePropertyKind::kAccessorGetter:
+ return ClassLiteralProperty::GETTER;
+ case ParsePropertyKind::kAccessorSetter:
+ return ClassLiteralProperty::SETTER;
+ case ParsePropertyKind::kMethod:
+ return ClassLiteralProperty::METHOD;
+ case ParsePropertyKind::kClassField:
+ return ClassLiteralProperty::FIELD;
+ default:
+ // Only returns for deterministic kinds
+ UNREACHABLE();
+ }
+ }
+
const AstRawString* ClassFieldVariableName(AstValueFactory* ast_value_factory,
int index) {
std::string name = ".class-field-" + std::to_string(index);
@@ -667,6 +701,22 @@ class ParserBase {
return scope()->GetClosureScope();
}
+ VariableProxy* NewRawVariable(const AstRawString* name, int pos) {
+ return factory()->ast_node_factory()->NewVariableProxy(
+ name, NORMAL_VARIABLE, pos);
+ }
+
+ VariableProxy* NewUnresolved(const AstRawString* name) {
+ return scope()->NewUnresolved(factory()->ast_node_factory(), name,
+ scanner()->location().beg_pos);
+ }
+
+ VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
+ VariableKind kind = NORMAL_VARIABLE) {
+ return scope()->NewUnresolved(factory()->ast_node_factory(), name,
+ begin_pos, kind);
+ }
+
Scanner* scanner() const { return scanner_; }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
int position() const { return scanner_->location().beg_pos; }
@@ -676,14 +726,18 @@ class ParserBase {
bool stack_overflow() const {
return pending_error_handler()->stack_overflow();
}
- void set_stack_overflow() { pending_error_handler()->set_stack_overflow(); }
+ void set_stack_overflow() {
+ scanner_->set_parser_error();
+ pending_error_handler()->set_stack_overflow();
+ }
+ void CheckStackOverflow() {
+ // Any further calls to Next or peek will return the illegal token.
+ if (GetCurrentStackPosition() < stack_limit_) set_stack_overflow();
+ }
int script_id() { return script_id_; }
void set_script_id(int id) { script_id_ = id; }
- V8_INLINE Token::Value peek() {
- if (stack_overflow()) return Token::ILLEGAL;
- return scanner()->peek();
- }
+ V8_INLINE Token::Value peek() { return scanner()->peek(); }
// Returns the position past the following semicolon (if it exists), and the
// position past the end of the current token otherwise.
@@ -691,33 +745,19 @@ class ParserBase {
return (peek() == Token::SEMICOLON) ? peek_end_position() : end_position();
}
- V8_INLINE Token::Value PeekAhead() {
- if (stack_overflow()) return Token::ILLEGAL;
- return scanner()->PeekAhead();
- }
+ V8_INLINE Token::Value PeekAhead() { return scanner()->PeekAhead(); }
- V8_INLINE Token::Value Next() {
- if (stack_overflow()) return Token::ILLEGAL;
- {
- if (GetCurrentStackPosition() < stack_limit_) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- set_stack_overflow();
- }
- }
- return scanner()->Next();
- }
+ V8_INLINE Token::Value Next() { return scanner()->Next(); }
- void Consume(Token::Value token) {
- Token::Value next = Next();
+ V8_INLINE void Consume(Token::Value token) {
+ Token::Value next = scanner()->Next();
USE(next);
USE(token);
- DCHECK_EQ(next, token);
+ DCHECK_IMPLIES(!has_error(), next == token);
}
- bool Check(Token::Value token) {
- Token::Value next = peek();
+ V8_INLINE bool Check(Token::Value token) {
+ Token::Value next = scanner()->peek();
if (next == token) {
Consume(next);
return true;
@@ -725,28 +765,26 @@ class ParserBase {
return false;
}
- void Expect(Token::Value token, bool* ok) {
+ void Expect(Token::Value token) {
Token::Value next = Next();
- if (next != token) {
+ if (V8_UNLIKELY(next != token)) {
ReportUnexpectedToken(next);
- *ok = false;
}
}
- void ExpectSemicolon(bool* ok) {
+ void ExpectSemicolon() {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
+ if (V8_LIKELY(tok == Token::SEMICOLON)) {
Next();
return;
}
- if (scanner()->HasLineTerminatorBeforeNext() || tok == Token::RBRACE ||
- tok == Token::EOS) {
+ if (V8_LIKELY(scanner()->HasLineTerminatorBeforeNext() ||
+ Token::IsAutoSemicolon(tok))) {
return;
}
- *ok = false;
if (scanner()->current_token() == Token::AWAIT && !is_async_function()) {
ReportMessageAt(scanner()->location(),
MessageTemplate::kAwaitNotInAsyncFunction, kSyntaxError);
@@ -756,38 +794,28 @@ class ParserBase {
ReportUnexpectedToken(Next());
}
- // Dummy functions, just useful as arguments to CHECK_OK_CUSTOM.
- static void Void() {}
- template <typename T>
- static T Return(T result) {
- return result;
- }
-
bool peek_any_identifier() { return Token::IsAnyIdentifier(peek()); }
- bool CheckContextualKeyword(Token::Value token) {
- if (PeekContextualKeyword(token)) {
+ bool PeekContextualKeyword(const AstRawString* name) {
+ return peek() == Token::IDENTIFIER &&
+ scanner()->NextSymbol(ast_value_factory()) == name;
+ }
+
+ bool CheckContextualKeyword(const AstRawString* name) {
+ if (PeekContextualKeyword(name)) {
Consume(Token::IDENTIFIER);
return true;
}
return false;
}
- bool PeekContextualKeyword(Token::Value token) {
- DCHECK(Token::IsContextualKeyword(token));
- return peek() == Token::IDENTIFIER &&
- scanner()->next_contextual_token() == token;
- }
-
- void ExpectMetaProperty(Token::Value property_name, const char* full_name,
- int pos, bool* ok);
+ void ExpectMetaProperty(const AstRawString* property_name,
+ const char* full_name, int pos);
- void ExpectContextualKeyword(Token::Value token, bool* ok) {
- DCHECK(Token::IsContextualKeyword(token));
- Expect(Token::IDENTIFIER, CHECK_OK_CUSTOM(Void));
- if (scanner()->current_contextual_token() != token) {
+ void ExpectContextualKeyword(const AstRawString* name) {
+ Expect(Token::IDENTIFIER);
+ if (V8_UNLIKELY(scanner()->CurrentSymbol(ast_value_factory()) != name)) {
ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
}
}
@@ -795,7 +823,7 @@ class ParserBase {
if (Check(Token::IN)) {
*visit_mode = ForEachStatement::ENUMERATE;
return true;
- } else if (CheckContextualKeyword(Token::OF)) {
+ } else if (CheckContextualKeyword(ast_value_factory()->of_string())) {
*visit_mode = ForEachStatement::ITERATE;
return true;
}
@@ -803,23 +831,23 @@ class ParserBase {
}
bool PeekInOrOf() {
- return peek() == Token::IN || PeekContextualKeyword(Token::OF);
+ return peek() == Token::IN ||
+ PeekContextualKeyword(ast_value_factory()->of_string());
}
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// Only called for strict mode strings.
- void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ void CheckStrictOctalLiteral(int beg_pos, int end_pos) {
Scanner::Location octal = scanner()->octal_position();
if (octal.IsValid() && beg_pos <= octal.beg_pos &&
octal.end_pos <= end_pos) {
- MessageTemplate::Template message = scanner()->octal_message();
+ MessageTemplate message = scanner()->octal_message();
DCHECK_NE(message, MessageTemplate::kNone);
impl()->ReportMessageAt(octal, message);
scanner()->clear_octal_position();
if (message == MessageTemplate::kStrictDecimalWithLeadingZero) {
impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
}
- *ok = false;
}
}
@@ -827,29 +855,29 @@ class ParserBase {
// appears in the current template literal token. In the presence of such,
// either returns false or reports an error, depending on should_throw.
// Otherwise returns true.
- inline bool CheckTemplateEscapes(bool should_throw, bool* ok) {
- DCHECK(scanner()->current_token() == Token::TEMPLATE_SPAN ||
- scanner()->current_token() == Token::TEMPLATE_TAIL);
- if (!scanner()->has_invalid_template_escape()) {
- return true;
- }
+ inline bool CheckTemplateEscapes(bool should_throw) {
+ DCHECK(Token::IsTemplate(scanner()->current_token()));
+ if (!scanner()->has_invalid_template_escape()) return true;
// Handle error case(s)
if (should_throw) {
impl()->ReportMessageAt(scanner()->invalid_template_escape_location(),
scanner()->invalid_template_escape_message());
- *ok = false;
}
- return false;
+ scanner()->clear_invalid_template_escape_message();
+ return should_throw;
}
- void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
+ ExpressionT ParsePossibleDestructuringSubPattern(AccumulationScope* scope);
+ void ClassifyParameter(IdentifierT parameter, int beg_pos, int end_pos);
+ void ClassifyArrowParameter(AccumulationScope* accumulation_scope,
+ int position, ExpressionT parameter);
// Checking the name of a function literal. This has to be done after parsing
// the function, since the function can declare itself strict.
void CheckFunctionName(LanguageMode language_mode, IdentifierT function_name,
FunctionNameValidity function_name_validity,
- const Scanner::Location& function_name_loc, bool* ok) {
+ const Scanner::Location& function_name_loc) {
if (impl()->IsNull(function_name)) return;
if (function_name_validity == kSkipFunctionNameCheck) return;
// The function name needs to be checked in strict mode.
@@ -858,24 +886,15 @@ class ParserBase {
if (impl()->IsEvalOrArguments(function_name)) {
impl()->ReportMessageAt(function_name_loc,
MessageTemplate::kStrictEvalArguments);
- *ok = false;
return;
}
if (function_name_validity == kFunctionNameIsStrictReserved) {
impl()->ReportMessageAt(function_name_loc,
MessageTemplate::kUnexpectedStrictReserved);
- *ok = false;
return;
}
}
- // Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN) {
- if (token == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
- return Token::Precedence(token);
- }
-
typename Types::Factory* factory() { return &ast_node_factory_; }
DeclarationScope* GetReceiverScope() const {
@@ -907,405 +926,280 @@ class ParserBase {
}
// Report syntax errors.
- void ReportMessage(MessageTemplate::Template message) {
+ V8_NOINLINE void ReportMessage(MessageTemplate message) {
Scanner::Location source_location = scanner()->location();
impl()->ReportMessageAt(source_location, message,
static_cast<const char*>(nullptr), kSyntaxError);
}
template <typename T>
- void ReportMessage(MessageTemplate::Template message, T arg,
- ParseErrorType error_type = kSyntaxError) {
+ V8_NOINLINE void ReportMessage(MessageTemplate message, T arg,
+ ParseErrorType error_type = kSyntaxError) {
Scanner::Location source_location = scanner()->location();
impl()->ReportMessageAt(source_location, message, arg, error_type);
}
- void ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- ParseErrorType error_type) {
+ V8_NOINLINE void ReportMessageAt(Scanner::Location location,
+ MessageTemplate message,
+ ParseErrorType error_type) {
impl()->ReportMessageAt(location, message,
static_cast<const char*>(nullptr), error_type);
}
- void GetUnexpectedTokenMessage(
- Token::Value token, MessageTemplate::Template* message,
- Scanner::Location* location, const char** arg,
- MessageTemplate::Template default_ = MessageTemplate::kUnexpectedToken);
+ V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
- void ReportUnexpectedToken(Token::Value token);
- void ReportUnexpectedTokenAt(
- Scanner::Location location, Token::Value token,
- MessageTemplate::Template message = MessageTemplate::kUnexpectedToken);
-
- void ReportClassifierError(
- const typename ExpressionClassifier::Error& error) {
- if (classifier()->does_error_reporting()) {
- impl()->ReportMessageAt(error.location, error.message, error.arg);
- } else {
- impl()->ReportUnidentifiableError();
- }
+ void ValidateFormalParameters(LanguageMode language_mode,
+ const FormalParametersT& parameters,
+ bool allow_duplicates) {
+ if (!allow_duplicates) parameters.ValidateDuplicate(impl());
+ if (is_strict(language_mode)) parameters.ValidateStrictMode(impl());
}
- void ValidateExpression(bool* ok) {
- if (!classifier()->is_valid_expression()) {
- ReportClassifierError(classifier()->expression_error());
- *ok = false;
- }
+ V8_INLINE IdentifierT ParseAndClassifyIdentifier(Token::Value token);
+ // Parses an identifier or a strict mode future reserved word. Allows passing
+ // in function_kind for the case of parsing the identifier in a function
+ // expression, where the relevant "function_kind" bit is of the function being
+ // parsed, not the containing function.
+ V8_INLINE IdentifierT ParseIdentifier(FunctionKind function_kind);
+ V8_INLINE IdentifierT ParseIdentifier() {
+ return ParseIdentifier(function_state_->kind());
}
+ // Same as above but additionally disallows 'eval' and 'arguments' in strict
+ // mode.
+ IdentifierT ParseNonRestrictedIdentifier();
- void ValidateFormalParameterInitializer(bool* ok) {
- if (!classifier()->is_valid_formal_parameter_initializer()) {
- ReportClassifierError(classifier()->formal_parameter_initializer_error());
- *ok = false;
- }
- }
+ V8_INLINE IdentifierT ParsePropertyName();
- void ValidateBindingPattern(bool* ok) {
- if (!classifier()->is_valid_binding_pattern()) {
- ReportClassifierError(classifier()->binding_pattern_error());
- *ok = false;
- }
- }
+ ExpressionT ParsePropertyOrPrivatePropertyName();
- void ValidateAssignmentPattern(bool* ok) {
- if (!classifier()->is_valid_assignment_pattern()) {
- ReportClassifierError(classifier()->assignment_pattern_error());
- *ok = false;
- }
- }
+ ExpressionT ParseRegExpLiteral();
- void ValidateFormalParameters(LanguageMode language_mode,
- bool allow_duplicates, bool* ok) {
- if (!allow_duplicates &&
- !classifier()->is_valid_formal_parameter_list_without_duplicates()) {
- ReportClassifierError(classifier()->duplicate_formal_parameter_error());
- *ok = false;
- } else if (is_strict(language_mode) &&
- !classifier()->is_valid_strict_mode_formal_parameters()) {
- ReportClassifierError(classifier()->strict_mode_formal_parameter_error());
- *ok = false;
- }
- }
-
- bool IsValidArrowFormalParametersStart(Token::Value token) {
- return Token::IsAnyIdentifier(token) || token == Token::LPAREN;
- }
-
- void ValidateArrowFormalParameters(ExpressionT expr,
- bool parenthesized_formals, bool is_async,
- bool* ok) {
- if (classifier()->is_valid_binding_pattern()) {
- // A simple arrow formal parameter: IDENTIFIER => BODY.
- if (!impl()->IsIdentifier(expr)) {
- impl()->ReportMessageAt(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(scanner()->current_token()));
- *ok = false;
- }
- } else if (!classifier()->is_valid_arrow_formal_parameters()) {
- // If after parsing the expr, we see an error but the expression is
- // neither a valid binding pattern nor a valid parenthesized formal
- // parameter list, show the "arrow formal parameters" error if the formals
- // started with a parenthesis, and the binding pattern error otherwise.
- const typename ExpressionClassifier::Error& error =
- parenthesized_formals ? classifier()->arrow_formal_parameters_error()
- : classifier()->binding_pattern_error();
- ReportClassifierError(error);
- *ok = false;
- }
- if (is_async && !classifier()->is_valid_async_arrow_formal_parameters()) {
- const typename ExpressionClassifier::Error& error =
- classifier()->async_arrow_formal_parameters_error();
- ReportClassifierError(error);
- *ok = false;
- }
- }
-
- void ValidateLetPattern(bool* ok) {
- if (!classifier()->is_valid_let_pattern()) {
- ReportClassifierError(classifier()->let_pattern_error());
- *ok = false;
- }
- }
-
- void BindingPatternUnexpectedToken() {
- MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
- const char* arg;
- Scanner::Location location = scanner()->peek_location();
- GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier()->RecordBindingPatternError(location, message, arg);
- }
-
- void ArrowFormalParametersUnexpectedToken() {
- MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
- const char* arg;
- Scanner::Location location = scanner()->peek_location();
- GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier()->RecordArrowFormalParametersError(location, message, arg);
- }
-
- // Recursive descent functions.
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites. The family of CHECK_OK* macros can
- // be useful for this.
-
- // Parses an identifier that is valid for the current scope, in particular it
- // fails on strict mode future reserved keywords in a strict scope. If
- // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
- // "arguments" as identifier even in strict mode (this is needed in cases like
- // "var foo = eval;").
- IdentifierT ParseIdentifier(AllowRestrictedIdentifiers, bool* ok);
- IdentifierT ParseAndClassifyIdentifier(bool* ok);
- // Parses an identifier or a strict mode future reserved word, and indicate
- // whether it is strict mode future reserved. Allows passing in function_kind
- // for the case of parsing the identifier in a function expression, where the
- // relevant "function_kind" bit is of the function being parsed, not the
- // containing function.
- IdentifierT ParseIdentifierOrStrictReservedWord(FunctionKind function_kind,
- bool* is_strict_reserved,
- bool* is_await, bool* ok);
- IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* is_await, bool* ok) {
- return ParseIdentifierOrStrictReservedWord(
- function_state_->kind(), is_strict_reserved, is_await, ok);
- }
-
- V8_INLINE IdentifierT ParseIdentifierName(bool* ok);
-
- ExpressionT ParseIdentifierNameOrPrivateName(bool* ok);
-
- ExpressionT ParseRegExpLiteral(bool* ok);
-
- ExpressionT ParseBindingPattern(bool* ok);
- ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
-
- // Use when parsing an expression that is known to not be a pattern or part
- // of a pattern.
- V8_INLINE ExpressionT ParseExpression(bool* ok);
-
- // This method does not wrap the parsing of the expression inside a
- // new expression classifier; it uses the top-level classifier instead.
- // It should be used whenever we're parsing something with the "cover"
- // grammar that recognizes both patterns and non-patterns (which roughly
- // corresponds to what's inside the parentheses generated by the symbol
+ ExpressionT ParseBindingPattern();
+ ExpressionT ParsePrimaryExpression();
+
+ // Use when parsing an expression that is known to not be a pattern or part of
+ // a pattern.
+ V8_INLINE ExpressionT ParseExpression();
+ V8_INLINE ExpressionT ParseAssignmentExpression();
+
+ // These methods do not wrap the parsing of the expression inside a new
+ // expression_scope; they use the outer expression_scope instead. They should
+ // be used whenever we're parsing something with the "cover" grammar that
+ // recognizes both patterns and non-patterns (which roughly corresponds to
+ // what's inside the parentheses generated by the symbol
// "CoverParenthesizedExpressionAndArrowParameterList" in the ES 2017
// specification).
- ExpressionT ParseExpressionCoverGrammar(bool accept_IN, bool* ok);
+ ExpressionT ParseExpressionCoverGrammar();
+ ExpressionT ParseAssignmentExpressionCoverGrammar();
+
+ ExpressionT ParseArrowParametersWithRest(ExpressionListT* list,
+ AccumulationScope* scope);
- ExpressionT ParseArrayLiteral(bool* ok);
+ ExpressionT ParseArrayLiteral();
inline static bool IsAccessor(ParsePropertyKind kind) {
return IsInRange(kind, ParsePropertyKind::kAccessorGetter,
ParsePropertyKind::kAccessorSetter);
}
- ExpressionT ParsePropertyName(IdentifierT* name, ParsePropertyKind* kind,
- ParseFunctionFlags* flags,
- bool* is_computed_name, bool* ok);
- ExpressionT ParseObjectLiteral(bool* ok);
+ ExpressionT ParseProperty(ParsePropertyInfo* prop_info);
+ ExpressionT ParseObjectLiteral();
ClassLiteralPropertyT ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info,
- IdentifierT* property_name, bool has_extends, bool* is_computed_name,
- ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok);
- ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
- bool is_static, bool* ok);
+ ClassInfo* class_info, ParsePropertyInfo* prop_info, bool has_extends);
+ void CheckClassFieldName(IdentifierT name, bool is_static);
+ void CheckClassMethodName(IdentifierT name, ParsePropertyKind type,
+ ParseFunctionFlags flags, bool is_static,
+ bool* has_seen_constructor);
+ ExpressionT ParseMemberInitializer(ClassInfo* class_info, int beg_pos,
+ bool is_static);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
- ObjectLiteralChecker* checker, bool* is_computed_name,
- bool* is_rest_property, bool* ok);
- ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
- bool maybe_arrow,
- bool* is_simple_parameter_list, bool* ok);
- ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
- bool* ok) {
- bool is_simple = true;
- return ParseArguments(first_spread_pos, false, &is_simple, ok);
- }
-
- ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
- ExpressionT ParseYieldExpression(bool accept_IN, bool* ok);
- V8_INLINE ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
- ExpressionT ParseConditionalContinuation(ExpressionT expression,
- bool accept_IN, int pos, bool* ok);
- ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- ExpressionT ParseUnaryOpExpression(bool* ok);
- ExpressionT ParseAwaitExpression(bool* ok);
- ExpressionT ParsePrefixExpression(bool* ok);
- V8_INLINE ExpressionT ParseUnaryExpression(bool* ok);
- V8_INLINE ExpressionT ParsePostfixExpression(bool* ok);
- V8_INLINE ExpressionT ParseLeftHandSideExpression(bool* ok);
- ExpressionT ParseMemberWithPresentNewPrefixesExpression(bool* is_async,
- bool* ok);
- V8_INLINE ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async,
- bool* ok);
- V8_INLINE ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
- V8_INLINE ExpressionT ParseMemberExpressionContinuation(
- ExpressionT expression, bool* is_async, bool* ok);
-
- // `rewritable_length`: length of the destructuring_assignments_to_rewrite()
- // queue in the parent function state, prior to parsing of formal parameters.
- // If the arrow function is lazy, any items added during formal parameter
- // parsing are removed from the queue.
- ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
- const FormalParametersT& parameters,
- int rewritable_length, bool* ok);
- void ParseAsyncFunctionBody(Scope* scope, StatementListT body, bool* ok);
- ExpressionT ParseAsyncFunctionLiteral(bool* ok);
+ ParsePropertyInfo* prop_info, bool* has_seen_proto);
+ void ParseArguments(
+ ExpressionListT* args, bool* has_spread,
+ ParsingArrowHeadFlag maybe_arrow = kCertainlyNotArrowHead);
+
+ ExpressionT ParseYieldExpression();
+ V8_INLINE ExpressionT ParseConditionalExpression();
+ ExpressionT ParseConditionalContinuation(ExpressionT expression, int pos);
+ ExpressionT ParseBinaryContinuation(ExpressionT x, int prec, int prec1);
+ V8_INLINE ExpressionT ParseBinaryExpression(int prec);
+ ExpressionT ParseUnaryOrPrefixExpression();
+ ExpressionT ParseAwaitExpression();
+ V8_INLINE ExpressionT ParseUnaryExpression();
+ V8_INLINE ExpressionT ParsePostfixExpression();
+ V8_INLINE ExpressionT ParseLeftHandSideExpression();
+ ExpressionT ParseLeftHandSideContinuation(ExpressionT expression);
+ ExpressionT ParseMemberWithPresentNewPrefixesExpression();
+ V8_INLINE ExpressionT ParseMemberWithNewPrefixesExpression();
+ ExpressionT ParseFunctionExpression();
+ V8_INLINE ExpressionT ParseMemberExpression();
+ V8_INLINE ExpressionT
+ ParseMemberExpressionContinuation(ExpressionT expression) {
+ if (!Token::IsMember(peek())) return expression;
+ return DoParseMemberExpressionContinuation(expression);
+ }
+ ExpressionT DoParseMemberExpressionContinuation(ExpressionT expression);
+
+ ExpressionT ParseArrowFunctionLiteral(const FormalParametersT& parameters);
+ void ParseAsyncFunctionBody(Scope* scope, StatementListT* body);
+ ExpressionT ParseAsyncFunctionLiteral();
ExpressionT ParseClassLiteral(IdentifierT name,
Scanner::Location class_name_location,
bool name_is_strict_reserved,
- int class_token_pos, bool* ok);
- ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged,
- bool* ok);
- ExpressionT ParseSuperExpression(bool is_new, bool* ok);
- ExpressionT ParseImportExpressions(bool* ok);
- ExpressionT ParseNewTargetExpression(bool* ok);
-
- V8_INLINE void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
- void ParseFormalParameterList(FormalParametersT* parameters, bool* ok);
+ int class_token_pos);
+ ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged);
+ ExpressionT ParseSuperExpression(bool is_new);
+ ExpressionT ParseImportExpressions();
+ ExpressionT ParseNewTargetExpression();
+
+ V8_INLINE void ParseFormalParameter(FormalParametersT* parameters);
+ void ParseFormalParameterList(FormalParametersT* parameters);
void CheckArityRestrictions(int param_count, FunctionKind function_type,
bool has_rest, int formals_start_pos,
- int formals_end_pos, bool* ok);
+ int formals_end_pos);
- BlockT ParseVariableDeclarations(VariableDeclarationContext var_context,
- DeclarationParsingResult* parsing_result,
- ZonePtrList<const AstRawString>* names,
- bool* ok);
+ void ParseVariableDeclarations(VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ ZonePtrList<const AstRawString>* names);
StatementT ParseAsyncFunctionDeclaration(
- ZonePtrList<const AstRawString>* names, bool default_export, bool* ok);
- StatementT ParseFunctionDeclaration(bool* ok);
+ ZonePtrList<const AstRawString>* names, bool default_export);
+ StatementT ParseFunctionDeclaration();
StatementT ParseHoistableDeclaration(ZonePtrList<const AstRawString>* names,
- bool default_export, bool* ok);
+ bool default_export);
StatementT ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
ZonePtrList<const AstRawString>* names,
- bool default_export, bool* ok);
+ bool default_export);
StatementT ParseClassDeclaration(ZonePtrList<const AstRawString>* names,
- bool default_export, bool* ok);
- StatementT ParseNativeDeclaration(bool* ok);
+ bool default_export);
+ StatementT ParseNativeDeclaration();
// Whether we're parsing a single-expression arrow function or something else.
enum class FunctionBodyType { kExpression, kBlock };
// Consumes the ending }.
- void ParseFunctionBody(StatementListT result, IdentifierT function_name,
+ void ParseFunctionBody(StatementListT* body, IdentifierT function_name,
int pos, const FormalParametersT& parameters,
FunctionKind kind,
FunctionLiteral::FunctionType function_type,
- FunctionBodyType body_type, bool accept_IN, bool* ok);
-
- // Under some circumstances, we allow preparsing to abort if the preparsed
- // function is "long and trivial", and fully parse instead. Our current
- // definition of "long and trivial" is:
- // - over kLazyParseTrialLimit statements
- // - all starting with an identifier (i.e., no if, for, while, etc.)
- static const int kLazyParseTrialLimit = 200;
+ FunctionBodyType body_type);
// TODO(nikolaos, marja): The first argument should not really be passed
// by value. The method is expected to add the parsed statements to the
// list. This works because in the case of the parser, StatementListT is
// a pointer whereas the preparser does not really modify the body.
- V8_INLINE void ParseStatementList(StatementListT body, Token::Value end_token,
- bool* ok) {
- LazyParsingResult result = ParseStatementList(body, end_token, false, ok);
- USE(result);
- DCHECK_EQ(result, kLazyParsingComplete);
- }
- V8_INLINE LazyParsingResult ParseStatementList(StatementListT body,
- Token::Value end_token,
- bool may_abort, bool* ok);
- StatementT ParseStatementListItem(bool* ok);
+ V8_INLINE void ParseStatementList(StatementListT* body,
+ Token::Value end_token);
+ StatementT ParseStatementListItem();
StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
return ParseStatement(labels, own_labels,
- kDisallowLabelledFunctionStatement, ok);
+ kDisallowLabelledFunctionStatement);
}
StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
- AllowLabelledFunctionStatement allow_function,
- bool* ok);
- BlockT ParseBlock(ZonePtrList<const AstRawString>* labels, bool* ok);
+ AllowLabelledFunctionStatement allow_function);
+ BlockT ParseBlock(ZonePtrList<const AstRawString>* labels);
// Parse a SubStatement in strict mode, or with an extra block scope in
// sloppy mode to handle
// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- StatementT ParseScopedStatement(ZonePtrList<const AstRawString>* labels,
- bool* ok);
+ StatementT ParseScopedStatement(ZonePtrList<const AstRawString>* labels);
StatementT ParseVariableStatement(VariableDeclarationContext var_context,
- ZonePtrList<const AstRawString>* names,
- bool* ok);
+ ZonePtrList<const AstRawString>* names);
// Magical syntax support.
- ExpressionT ParseV8Intrinsic(bool* ok);
+ ExpressionT ParseV8Intrinsic();
- ExpressionT ParseDoExpression(bool* ok);
-
- StatementT ParseDebuggerStatement(bool* ok);
+ StatementT ParseDebuggerStatement();
StatementT ParseExpressionOrLabelledStatement(
ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
- AllowLabelledFunctionStatement allow_function, bool* ok);
- StatementT ParseIfStatement(ZonePtrList<const AstRawString>* labels,
- bool* ok);
- StatementT ParseContinueStatement(bool* ok);
- StatementT ParseBreakStatement(ZonePtrList<const AstRawString>* labels,
- bool* ok);
- StatementT ParseReturnStatement(bool* ok);
- StatementT ParseWithStatement(ZonePtrList<const AstRawString>* labels,
- bool* ok);
+ AllowLabelledFunctionStatement allow_function);
+ StatementT ParseIfStatement(ZonePtrList<const AstRawString>* labels);
+ StatementT ParseContinueStatement();
+ StatementT ParseBreakStatement(ZonePtrList<const AstRawString>* labels);
+ StatementT ParseReturnStatement();
+ StatementT ParseWithStatement(ZonePtrList<const AstRawString>* labels);
StatementT ParseDoWhileStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- bool* ok);
+ ZonePtrList<const AstRawString>* own_labels);
StatementT ParseWhileStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- bool* ok);
- StatementT ParseThrowStatement(bool* ok);
- StatementT ParseSwitchStatement(ZonePtrList<const AstRawString>* labels,
- bool* ok);
- V8_INLINE StatementT ParseTryStatement(bool* ok);
+ ZonePtrList<const AstRawString>* own_labels);
+ StatementT ParseThrowStatement();
+ StatementT ParseSwitchStatement(ZonePtrList<const AstRawString>* labels);
+ V8_INLINE StatementT ParseTryStatement();
StatementT ParseForStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- bool* ok);
+ ZonePtrList<const AstRawString>* own_labels);
StatementT ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope,
- bool* ok);
+ ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope);
StatementT ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok);
+ ZonePtrList<const AstRawString>* own_labels);
// Parse a C-style for loop: 'for (<init>; <cond>; <next>) { ... }'
// "for (<init>;" is assumed to have been parser already.
ForStatementT ParseStandardForLoop(
int stmt_pos, ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
- StatementT* next, StatementT* body, bool* ok);
+ StatementT* next, StatementT* body);
// Same as the above, but handles those cases where <init> is a
// lexical variable declaration.
StatementT ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok);
- StatementT ParseForAwaitStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- bool* ok);
+ ZonePtrList<const AstRawString>* own_labels);
+ StatementT ParseForAwaitStatement(
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels);
+
+ V8_INLINE bool IsLet(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->let_string();
+ }
+
+ void DesugarBindingInForEachStatement(ForInfo* for_info, BlockT* body_block,
+ ExpressionT* each_variable) {
+ // Annex B.3.5 prohibits the form
+ // `try {} catch(e) { for (var e of {}); }`
+ // So if we are parsing a statement like `for (var ... of ...)`
+ // we need to walk up the scope chain and look for catch scopes
+ // which have a simple binding, then compare their binding against
+ // all of the names declared in the init of the for-of we're
+ // parsing.
+ bool is_for_var_of =
+ for_info->mode == ForEachStatement::ITERATE &&
+ for_info->parsing_result.descriptor.mode == VariableMode::kVar;
+
+ if (is_for_var_of) {
+ Scope* scope = this->scope();
+ while (scope != nullptr && !scope->is_declaration_scope()) {
+ if (scope->is_catch_scope()) {
+ auto name = scope->catch_variable()->raw_name();
+ // If it's a simple binding and the name is declared in the for loop.
+ if (name != ast_value_factory()->dot_catch_string() &&
+ for_info->bound_names.Contains(name)) {
+ impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
+ MessageTemplate::kVarRedeclaration, name);
+ }
+ }
+ scope = scope->outer_scope();
+ }
+ }
+
+ impl()->DesugarBindingInForEachStatement(for_info, body_block,
+ each_variable);
+ }
bool IsNextLetKeyword();
- bool IsTrivialExpression();
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
- ExpressionT CheckAndRewriteReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos,
- MessageTemplate::Template message, bool* ok);
- ExpressionT CheckAndRewriteReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos,
- MessageTemplate::Template message, ParseErrorType type, bool* ok);
+ ExpressionT RewriteInvalidReferenceExpression(
+ ExpressionT expression, int beg_pos, int end_pos, MessageTemplate message,
+ ParseErrorType type = kReferenceError);
bool IsValidReferenceExpression(ExpressionT expression);
@@ -1318,33 +1212,6 @@ class ParserBase {
return true;
}
- bool IsValidPattern(ExpressionT expression) {
- return expression->IsObjectLiteral() || expression->IsArrayLiteral();
- }
-
- // Due to hoisting, the value of a 'var'-declared variable may actually change
- // even if the code contains only the "initial" assignment, namely when that
- // assignment occurs inside a loop. For example:
- //
- // let i = 10;
- // do { var x = i } while (i--):
- //
- // As a simple and very conservative approximation of this, we explicitly mark
- // as maybe-assigned any non-lexical variable whose initializing "declaration"
- // does not syntactically occur in the function scope. (In the example above,
- // it occurs in a block scope.)
- //
- // Note that non-lexical variables include temporaries, which may also get
- // assigned inside a loop due to the various rewritings that the parser
- // performs.
- //
- // This also handles marking of loop variables in for-in and for-of loops,
- // as determined by declaration_kind.
- //
- static void MarkLoopVariableAsAssigned(
- Scope* scope, Variable* var,
- typename DeclarationDescriptor::Kind declaration_kind);
-
FunctionKind FunctionKindForImpl(bool is_method, ParseFunctionFlags flags) {
static const FunctionKind kFunctionKinds[][2][2] = {
{
@@ -1422,106 +1289,64 @@ class ParserBase {
return factory()->NewReturnStatement(expr, pos, end_pos);
}
- // Validation per ES6 object literals.
- class ObjectLiteralChecker {
- public:
- explicit ObjectLiteralChecker(ParserBase* parser)
- : parser_(parser), has_seen_proto_(false) {}
+ ModuleDescriptor* module() const {
+ return scope()->AsModuleScope()->module();
+ }
+ Scope* scope() const { return scope_; }
- void CheckDuplicateProto(Token::Value property);
+ // Stack of expression expression_scopes.
+ // The top of the stack is always pointed to by expression_scope().
+ V8_INLINE ExpressionScope* expression_scope() const {
+ DCHECK_NOT_NULL(expression_scope_);
+ return expression_scope_;
+ }
- private:
- bool IsProto() const {
- return this->scanner()->CurrentMatchesContextualEscaped(
- Token::PROTO_UNDERSCORED);
+ class AcceptINScope final {
+ public:
+ AcceptINScope(ParserBase* parser, bool accept_IN)
+ : parser_(parser), previous_accept_IN_(parser->accept_IN_) {
+ parser_->accept_IN_ = accept_IN;
}
- ParserBase* parser() const { return parser_; }
- Scanner* scanner() const { return parser_->scanner(); }
+ ~AcceptINScope() { parser_->accept_IN_ = previous_accept_IN_; }
+ private:
ParserBase* parser_;
- bool has_seen_proto_;
+ bool previous_accept_IN_;
};
- // Validation per ES6 class literals.
- class ClassLiteralChecker {
+ class ParameterParsingScope {
public:
- explicit ClassLiteralChecker(ParserBase* parser)
- : parser_(parser), has_seen_constructor_(false) {}
-
- void CheckClassMethodName(Token::Value property, ParsePropertyKind type,
- ParseFunctionFlags flags, bool is_static,
- bool* ok);
- void CheckClassFieldName(bool is_static, bool* ok);
-
- private:
- bool IsConstructor() {
- return this->scanner()->CurrentMatchesContextualEscaped(
- Token::CONSTRUCTOR);
- }
- bool IsPrivateConstructor() {
- return this->scanner()->CurrentMatchesContextualEscaped(
- Token::PRIVATE_CONSTRUCTOR);
- }
- bool IsPrototype() {
- return this->scanner()->CurrentMatchesContextualEscaped(Token::PROTOTYPE);
+ ParameterParsingScope(Impl* parser, FormalParametersT* parameters)
+ : parser_(parser), parent_parameters_(parser_->parameters_) {
+ parser_->parameters_ = parameters;
}
- ParserBase* parser() const { return parser_; }
- Scanner* scanner() const { return parser_->scanner(); }
+ ~ParameterParsingScope() { parser_->parameters_ = parent_parameters_; }
- ParserBase* parser_;
- bool has_seen_constructor_;
+ private:
+ Impl* parser_;
+ FormalParametersT* parent_parameters_;
};
- ModuleDescriptor* module() const {
- return scope()->AsModuleScope()->module();
- }
- Scope* scope() const { return scope_; }
-
- // Stack of expression classifiers.
- // The top of the stack is always pointed to by classifier().
- V8_INLINE ExpressionClassifier* classifier() const {
- DCHECK_NOT_NULL(classifier_);
- return classifier_;
- }
-
- // Accumulates the classifier that is on top of the stack (inner) to
- // the one that is right below (outer) and pops the inner.
- V8_INLINE void Accumulate(unsigned productions) {
- DCHECK_NOT_NULL(classifier_);
- ExpressionClassifier* previous = classifier_->previous();
- DCHECK_NOT_NULL(previous);
- previous->Accumulate(classifier_, productions);
- classifier_ = previous;
- }
+ class FunctionBodyParsingScope {
+ public:
+ explicit FunctionBodyParsingScope(Impl* parser)
+ : parser_(parser), expression_scope_(parser_->expression_scope_) {
+ parser_->expression_scope_ = nullptr;
+ }
- V8_INLINE void AccumulateNonBindingPatternErrors() {
- this->Accumulate(ExpressionClassifier::AllProductions &
- ~(ExpressionClassifier::BindingPatternProduction |
- ExpressionClassifier::LetPatternProduction));
- }
+ ~FunctionBodyParsingScope() {
+ parser_->expression_scope_ = expression_scope_;
+ }
- // Pops and discards the classifier that is on top of the stack
- // without accumulating.
- V8_INLINE void DiscardExpressionClassifier() {
- DCHECK_NOT_NULL(classifier_);
- classifier_->Discard();
- classifier_ = classifier_->previous();
- }
+ private:
+ Impl* parser_;
+ ExpressionScope* expression_scope_;
+ };
- // Accumulate errors that can be arbitrarily deep in an expression.
- // These correspond to the ECMAScript spec's 'Contains' operation
- // on productions. This includes:
- //
- // - YieldExpression is disallowed in arrow parameters in a generator.
- // - AwaitExpression is disallowed in arrow parameters in an async function.
- // - AwaitExpression is disallowed in async arrow parameters.
- //
- V8_INLINE void AccumulateFormalParameterContainmentErrors() {
- Accumulate(ExpressionClassifier::FormalParameterInitializerProduction |
- ExpressionClassifier::AsyncArrowFormalParametersProduction);
- }
+ std::vector<void*>* pointer_buffer() { return &pointer_buffer_; }
+ std::vector<void*>* variable_buffer() { return &variable_buffer_; }
// Parser base's protected field members.
@@ -1543,22 +1368,60 @@ class ParserBase {
private:
Zone* zone_;
- ExpressionClassifier* classifier_;
+ ExpressionScope* expression_scope_;
- Scanner* scanner_;
+ std::vector<void*> pointer_buffer_;
+ std::vector<void*> variable_buffer_;
- FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
+ Scanner* scanner_;
int function_literal_id_;
int script_id_;
+ FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
+
+ // This struct is used to move information about the next arrow function from
+ // the place where the arrow head was parsed to where the body will be parsed.
+ // Nothing can be parsed between the head and the body, so it will be consumed
+ // immediately after it's produced.
+ // Preallocating the struct as part of the parser minimizes the cost of
+ // supporting arrow functions on non-arrow expressions.
+ struct NextArrowFunctionInfo {
+ Scanner::Location strict_parameter_error_location =
+ Scanner::Location::invalid();
+ MessageTemplate strict_parameter_error_message = MessageTemplate::kNone;
+ DeclarationScope* scope = nullptr;
+
+ bool HasInitialState() const { return scope == nullptr; }
+
+ void Reset() {
+ scope = nullptr;
+ ClearStrictParameterError();
+ DCHECK(HasInitialState());
+ }
+
+ // Tracks strict-mode parameter violations of sloppy-mode arrow heads in
+ // case the function ends up becoming strict mode. Only one global place to
+ // track this is necessary since arrow functions with none-simple parameters
+ // cannot become strict-mode later on.
+ void ClearStrictParameterError() {
+ strict_parameter_error_location = Scanner::Location::invalid();
+ strict_parameter_error_message = MessageTemplate::kNone;
+ }
+ };
+
+ FormalParametersT* parameters_;
+ NextArrowFunctionInfo next_arrow_function_info_;
+
+ bool accept_IN_ = true;
+
bool allow_natives_;
- bool allow_harmony_do_expressions_;
bool allow_harmony_public_fields_;
bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
bool allow_harmony_private_fields_;
+ bool allow_harmony_private_methods_;
bool allow_eval_cache_;
};
@@ -1572,8 +1435,6 @@ ParserBase<Impl>::FunctionState::FunctionState(
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
scope_(scope),
- destructuring_assignments_to_rewrite_(scope->zone()),
- reported_errors_(16, scope->zone()),
dont_optimize_reason_(BailoutReason::kNoReason),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
@@ -1592,218 +1453,109 @@ ParserBase<Impl>::FunctionState::~FunctionState() {
}
template <typename Impl>
-void ParserBase<Impl>::GetUnexpectedTokenMessage(
- Token::Value token, MessageTemplate::Template* message,
- Scanner::Location* location, const char** arg,
- MessageTemplate::Template default_) {
- *arg = nullptr;
- switch (token) {
- case Token::EOS:
- *message = MessageTemplate::kUnexpectedEOS;
- break;
- case Token::SMI:
- case Token::NUMBER:
- case Token::BIGINT:
- *message = MessageTemplate::kUnexpectedTokenNumber;
- break;
- case Token::STRING:
- *message = MessageTemplate::kUnexpectedTokenString;
- break;
- case Token::PRIVATE_NAME:
- case Token::IDENTIFIER:
- *message = MessageTemplate::kUnexpectedTokenIdentifier;
- break;
- case Token::AWAIT:
- case Token::ENUM:
- *message = MessageTemplate::kUnexpectedReserved;
- break;
- case Token::LET:
- case Token::STATIC:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- *message = is_strict(language_mode())
- ? MessageTemplate::kUnexpectedStrictReserved
- : MessageTemplate::kUnexpectedTokenIdentifier;
- break;
- case Token::TEMPLATE_SPAN:
- case Token::TEMPLATE_TAIL:
- *message = MessageTemplate::kUnexpectedTemplateString;
- break;
- case Token::ESCAPED_STRICT_RESERVED_WORD:
- case Token::ESCAPED_KEYWORD:
- *message = MessageTemplate::kInvalidEscapedReservedWord;
- break;
- case Token::ILLEGAL:
- if (scanner()->has_error()) {
- *message = scanner()->error();
- *location = scanner()->error_location();
- } else {
- *message = MessageTemplate::kInvalidOrUnexpectedToken;
- }
- break;
- case Token::REGEXP_LITERAL:
- *message = MessageTemplate::kUnexpectedTokenRegExp;
- break;
- default:
- const char* name = Token::String(token);
- DCHECK_NOT_NULL(name);
- *arg = name;
- break;
- }
-}
-
-template <typename Impl>
void ParserBase<Impl>::ReportUnexpectedToken(Token::Value token) {
- return ReportUnexpectedTokenAt(scanner_->location(), token);
-}
-
-template <typename Impl>
-void ParserBase<Impl>::ReportUnexpectedTokenAt(
- Scanner::Location source_location, Token::Value token,
- MessageTemplate::Template message) {
- const char* arg;
- GetUnexpectedTokenMessage(token, &message, &source_location, &arg);
- impl()->ReportMessageAt(source_location, message, arg);
-}
-
-template <typename Impl>
-typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
- AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
- ExpressionClassifier classifier(this);
- auto result = ParseAndClassifyIdentifier(CHECK_OK_CUSTOM(NullIdentifier));
-
- if (allow_restricted_identifiers == kDontAllowRestrictedIdentifiers) {
- ValidateAssignmentPattern(CHECK_OK_CUSTOM(NullIdentifier));
- ValidateBindingPattern(CHECK_OK_CUSTOM(NullIdentifier));
- }
-
- return result;
+ return impl()->ReportUnexpectedTokenAt(scanner_->location(), token);
}
template <typename Impl>
typename ParserBase<Impl>::IdentifierT
-ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
- Token::Value next = Next();
+ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
+ DCHECK_EQ(scanner()->current_token(), next);
STATIC_ASSERT(Token::IDENTIFIER + 1 == Token::ASYNC);
- if (IsInRange(next, Token::IDENTIFIER, Token::ASYNC)) {
+ if (V8_LIKELY(IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
IdentifierT name = impl()->GetSymbol();
-
- // When this function is used to read a formal parameter, we don't always
- // know whether the function is going to be strict or sloppy. Indeed for
- // arrow functions we don't always know that the identifier we are reading
- // is actually a formal parameter. Therefore besides the errors that we
- // must detect because we know we're in strict mode, we also record any
- // error that we might make in the future once we know the language mode.
- if (impl()->IsEvalOrArguments(name)) {
- if (impl()->IsArguments(name) && scope()->ShouldBanArguments()) {
- ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
- *ok = false;
- return impl()->NullIdentifier();
- }
-
- classifier()->RecordStrictModeFormalParameterError(
- scanner()->location(), MessageTemplate::kStrictEvalArguments);
- if (is_strict(language_mode())) {
- classifier()->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kStrictEvalArguments);
- }
+ if (V8_UNLIKELY(impl()->IsArguments(name) &&
+ scope()->ShouldBanArguments())) {
+ ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
+ return impl()->EmptyIdentifierString();
}
+ return name;
+ }
- if (classifier()->duplicate_finder() != nullptr &&
- scanner()->IsDuplicateSymbol(classifier()->duplicate_finder(),
- ast_value_factory())) {
- classifier()->RecordDuplicateFormalParameterError(scanner()->location());
- }
+ if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
+ parsing_module_ || is_async_function())) {
+ ReportUnexpectedToken(next);
+ return impl()->EmptyIdentifierString();
+ }
- return name;
- } else if (next == Token::AWAIT && !parsing_module_ && !is_async_function()) {
- classifier()->RecordAsyncArrowFormalParametersError(
+ if (next == Token::AWAIT) {
+ expression_scope()->RecordAsyncArrowParametersError(
scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
return impl()->GetSymbol();
- } else if (is_sloppy(language_mode()) &&
- (Token::IsStrictReservedWord(next) ||
- (next == Token::YIELD && !is_generator()))) {
- classifier()->RecordStrictModeFormalParameterError(
- scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
- if (scanner()->IsLet()) {
- classifier()->RecordLetPatternError(
- scanner()->location(), MessageTemplate::kLetInLexicalBinding);
- }
- return impl()->GetSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return impl()->NullIdentifier();
}
+
+ DCHECK(Token::IsStrictReservedWord(next));
+ expression_scope()->RecordStrictModeParameterError(
+ scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
+ return impl()->GetSymbol();
}
template <class Impl>
-typename ParserBase<Impl>::IdentifierT
-ParserBase<Impl>::ParseIdentifierOrStrictReservedWord(
- FunctionKind function_kind, bool* is_strict_reserved, bool* is_await,
- bool* ok) {
+typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
+ FunctionKind function_kind) {
Token::Value next = Next();
- if (next == Token::IDENTIFIER || (next == Token::AWAIT && !parsing_module_ &&
- !IsAsyncFunction(function_kind)) ||
- next == Token::ASYNC) {
- *is_strict_reserved = false;
- *is_await = next == Token::AWAIT;
- } else if (Token::IsStrictReservedWord(next) ||
- (next == Token::YIELD && !IsGeneratorFunction(function_kind))) {
- *is_strict_reserved = true;
- } else {
+
+ if (!Token::IsValidIdentifier(
+ next, language_mode(), IsGeneratorFunction(function_kind),
+ parsing_module_ || IsAsyncFunction(function_kind))) {
ReportUnexpectedToken(next);
- *ok = false;
- return impl()->NullIdentifier();
+ return impl()->EmptyIdentifierString();
}
return impl()->GetSymbol();
}
template <typename Impl>
-typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
- bool* ok) {
- Token::Value next = Next();
- if (!Token::IsAnyIdentifier(next) && next != Token::ESCAPED_KEYWORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return impl()->NullIdentifier();
+typename ParserBase<Impl>::IdentifierT
+ParserBase<Impl>::ParseNonRestrictedIdentifier() {
+ IdentifierT result = ParseIdentifier();
+
+ if (is_strict(language_mode()) &&
+ V8_UNLIKELY(impl()->IsEvalOrArguments(result))) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrictEvalArguments);
}
- return impl()->GetSymbol();
+ return result;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParsePropertyName() {
+ Token::Value next = Next();
+ if (V8_LIKELY(Token::IsPropertyName(next))) return impl()->GetSymbol();
+
+ ReportUnexpectedToken(next);
+ return impl()->EmptyIdentifierString();
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseIdentifierNameOrPrivateName(bool* ok) {
+ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
int pos = position();
IdentifierT name;
ExpressionT key;
- if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
- Consume(Token::PRIVATE_NAME);
+ Token::Value next = Next();
+ if (V8_LIKELY(Token::IsPropertyName(next))) {
name = impl()->GetSymbol();
- auto key_proxy =
- impl()->ExpressionFromIdentifier(name, pos, InferName::kNo);
- key_proxy->set_is_private_field();
- key = key_proxy;
- } else {
- name = ParseIdentifierName(CHECK_OK);
key = factory()->NewStringLiteral(name, pos);
+ } else if (allow_harmony_private_fields() && next == Token::PRIVATE_NAME) {
+ name = impl()->GetSymbol();
+ key = impl()->ExpressionFromIdentifier(name, pos, InferName::kNo);
+ } else {
+ ReportUnexpectedToken(next);
+ return impl()->FailureExpression();
}
impl()->PushLiteralName(name);
return key;
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
int pos = peek_position();
if (!scanner()->ScanRegExpPattern()) {
Next();
ReportMessage(MessageTemplate::kUnterminatedRegExp);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
IdentifierT js_pattern = impl()->GetNextSymbol();
@@ -1811,8 +1563,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
int js_flags = flags.FromJust();
Next();
@@ -1820,8 +1571,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBindingPattern(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBindingPattern() {
// Pattern ::
// Identifier
// ArrayLiteral
@@ -1832,29 +1582,35 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBindingPattern(
ExpressionT result;
if (Token::IsAnyIdentifier(token)) {
- IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
- result = impl()->ExpressionFromIdentifier(name, beg_pos);
- } else {
- classifier()->RecordNonSimpleParameter();
-
- if (token == Token::LBRACK) {
- result = ParseArrayLiteral(CHECK_OK);
- } else if (token == Token::LBRACE) {
- result = ParseObjectLiteral(CHECK_OK);
- } else {
- ReportUnexpectedToken(Next());
- *ok = false;
- return impl()->NullExpression();
+ IdentifierT name = ParseAndClassifyIdentifier(Next());
+ if (V8_UNLIKELY(is_strict(language_mode()) &&
+ impl()->IsEvalOrArguments(name))) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrictEvalArguments);
+ return impl()->FailureExpression();
}
+ return impl()->ExpressionFromIdentifier(name, beg_pos);
+ }
+
+ CheckStackOverflow();
+
+ if (token == Token::LBRACK) {
+ result = ParseArrayLiteral();
+ } else if (token == Token::LBRACE) {
+ result = ParseObjectLiteral();
+ } else {
+ ReportUnexpectedToken(Next());
+ return impl()->FailureExpression();
}
- ValidateBindingPattern(CHECK_OK);
return result;
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
- bool* is_async, bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParsePrimaryExpression() {
+ CheckStackOverflow();
+
// PrimaryExpression ::
// 'this'
// 'null'
@@ -1874,136 +1630,115 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
int beg_pos = peek_position();
Token::Value token = peek();
- switch (token) {
- case Token::THIS: {
- BindingPatternUnexpectedToken();
- Consume(Token::THIS);
- return impl()->ThisExpression(beg_pos);
+
+ if (Token::IsAnyIdentifier(token)) {
+ Consume(token);
+
+ FunctionKind kind = FunctionKind::kArrowFunction;
+
+ if (V8_UNLIKELY(token == Token::ASYNC &&
+ !scanner()->HasLineTerminatorBeforeNext())) {
+ // async function ...
+ if (peek() == Token::FUNCTION) return ParseAsyncFunctionLiteral();
+
+ // async Identifier => ...
+ if (peek_any_identifier() && PeekAhead() == Token::ARROW) {
+ token = Next();
+ beg_pos = position();
+ kind = FunctionKind::kAsyncArrowFunction;
+ }
}
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::SMI:
- case Token::NUMBER:
- case Token::BIGINT: {
- // Ensure continuous enum range.
- DCHECK(Token::IsLiteral(token));
- BindingPatternUnexpectedToken();
- return impl()->ExpressionFromLiteral(Next(), beg_pos);
- }
- case Token::STRING: {
- DCHECK(Token::IsLiteral(token));
- BindingPatternUnexpectedToken();
- Consume(Token::STRING);
- return impl()->ExpressionFromString(beg_pos);
+ if (V8_UNLIKELY(peek() == Token::ARROW)) {
+ ArrowHeadParsingScope parsing_scope(impl(), kind);
+ IdentifierT name = ParseAndClassifyIdentifier(token);
+ ClassifyParameter(name, beg_pos, end_position());
+ ExpressionT result =
+ impl()->ExpressionFromIdentifier(name, beg_pos, InferName::kNo);
+ next_arrow_function_info_.scope = parsing_scope.ValidateAndCreateScope();
+ return result;
}
- case Token::ASYNC:
- if (!scanner()->HasLineTerminatorAfterNext() &&
- PeekAhead() == Token::FUNCTION) {
- BindingPatternUnexpectedToken();
- Consume(Token::ASYNC);
- return ParseAsyncFunctionLiteral(ok);
- }
- // CoverCallExpressionAndAsyncArrowHead
- *is_async = true;
- V8_FALLTHROUGH;
- case Token::IDENTIFIER:
- case Token::LET:
- case Token::STATIC:
- case Token::YIELD:
- case Token::AWAIT:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::ESCAPED_STRICT_RESERVED_WORD: {
- // Ensure continuous enum range.
- DCHECK(IsInRange(token, Token::IDENTIFIER,
- Token::ESCAPED_STRICT_RESERVED_WORD));
- // Using eval or arguments in this context is OK even in strict mode.
- IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
- return impl()->ExpressionFromIdentifier(name, beg_pos);
+ IdentifierT name = ParseAndClassifyIdentifier(token);
+ return impl()->ExpressionFromIdentifier(name, beg_pos);
+ }
+
+ if (Token::IsLiteral(token)) {
+ return impl()->ExpressionFromLiteral(Next(), beg_pos);
+ }
+
+ switch (token) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ return impl()->ThisExpression(beg_pos);
}
case Token::ASSIGN_DIV:
case Token::DIV:
- classifier()->RecordBindingPatternError(
- scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- return ParseRegExpLiteral(ok);
+ return ParseRegExpLiteral();
case Token::LBRACK:
- return ParseArrayLiteral(ok);
+ return ParseArrayLiteral();
case Token::LBRACE:
- return ParseObjectLiteral(ok);
+ return ParseObjectLiteral();
case Token::LPAREN: {
- // Arrow function formal parameters are either a single identifier or a
- // list of BindingPattern productions enclosed in parentheses.
- // Parentheses are not valid on the LHS of a BindingPattern, so we use
- // the is_valid_binding_pattern() check to detect multiple levels of
- // parenthesization.
- bool pattern_error = !classifier()->is_valid_binding_pattern();
- classifier()->RecordPatternError(scanner()->peek_location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::LPAREN));
- if (pattern_error) ArrowFormalParametersUnexpectedToken();
Consume(Token::LPAREN);
if (Check(Token::RPAREN)) {
- // ()=>x. The continuation that looks for the => is in
- // ParseAssignmentExpression.
- classifier()->RecordExpressionError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::RPAREN));
+ // ()=>x. The continuation that consumes the => is in
+ // ParseAssignmentExpressionCoverGrammar.
+ if (peek() != Token::ARROW) ReportUnexpectedToken(Token::RPAREN);
+ next_arrow_function_info_.scope =
+ NewFunctionScope(FunctionKind::kArrowFunction);
return factory()->NewEmptyParentheses(beg_pos);
}
+ Scope::Snapshot scope_snapshot(scope());
+ ArrowHeadParsingScope maybe_arrow(impl(), FunctionKind::kArrowFunction);
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
if (peek() == Token::FUNCTION ||
(peek() == Token::ASYNC && PeekAhead() == Token::FUNCTION)) {
function_state_->set_next_function_is_likely_called();
}
- ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
- Expect(Token::RPAREN, ok);
+ AcceptINScope scope(this, true);
+ ExpressionT expr = ParseExpressionCoverGrammar();
+ expr->mark_parenthesized();
+ Expect(Token::RPAREN);
+
+ if (peek() == Token::ARROW) {
+ next_arrow_function_info_.scope = maybe_arrow.ValidateAndCreateScope();
+ scope_snapshot.Reparent(next_arrow_function_info_.scope);
+ } else {
+ maybe_arrow.ValidateExpression();
+ }
+
return expr;
}
case Token::CLASS: {
- BindingPatternUnexpectedToken();
Consume(Token::CLASS);
int class_token_pos = position();
IdentifierT name = impl()->NullIdentifier();
bool is_strict_reserved_name = false;
Scanner::Location class_name_location = Scanner::Location::invalid();
if (peek_any_identifier()) {
- bool is_await = false;
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- &is_await, CHECK_OK);
+ name = ParseAndClassifyIdentifier(Next());
class_name_location = scanner()->location();
- if (is_await) {
- classifier()->RecordAsyncArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
- }
+ is_strict_reserved_name =
+ Token::IsStrictReservedWord(scanner()->current_token());
}
return ParseClassLiteral(name, class_name_location,
- is_strict_reserved_name, class_token_pos, ok);
+ is_strict_reserved_name, class_token_pos);
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- BindingPatternUnexpectedToken();
- return ParseTemplateLiteral(impl()->NullExpression(), beg_pos, false, ok);
+ return ParseTemplateLiteral(impl()->NullExpression(), beg_pos, false);
case Token::MOD:
if (allow_natives() || extension_ != nullptr) {
- BindingPatternUnexpectedToken();
- return ParseV8Intrinsic(ok);
- }
- break;
-
- case Token::DO:
- if (allow_harmony_do_expressions()) {
- BindingPatternUnexpectedToken();
- return ParseDoExpression(ok);
+ return ParseV8Intrinsic();
}
break;
@@ -2012,72 +1747,49 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
}
ReportUnexpectedToken(Next());
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
- bool* ok) {
- ExpressionClassifier classifier(this);
- ExpressionT result = ParseExpressionCoverGrammar(true, CHECK_OK);
- ValidateExpression(ok);
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression() {
+ ExpressionParsingScope expression_scope(impl());
+ AcceptINScope scope(this, true);
+ ExpressionT result = ParseExpressionCoverGrammar();
+ expression_scope.ValidateExpression();
return result;
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
+ParserBase<Impl>::ParseAssignmentExpression() {
+ ExpressionParsingScope expression_scope(impl());
+ ExpressionT result = ParseAssignmentExpressionCoverGrammar();
+ expression_scope.ValidateExpression();
+ return result;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseExpressionCoverGrammar() {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
- ExpressionT result = impl()->NullExpression();
+ ExpressionListT list(pointer_buffer());
+ ExpressionT expression;
+ AccumulationScope accumulation_scope(expression_scope());
while (true) {
- int comma_pos = position();
- ExpressionClassifier binding_classifier(this);
- ExpressionT right;
- if (Check(Token::ELLIPSIS)) {
- // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
- // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
- // valid expression.
- classifier()->RecordExpressionError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::ELLIPSIS));
- int ellipsis_pos = position();
- int pattern_pos = peek_position();
- ExpressionT pattern = ParseBindingPattern(CHECK_OK);
- if (peek() == Token::ASSIGN) {
- ReportMessage(MessageTemplate::kRestDefaultInitializer);
- *ok = false;
- return result;
- }
- right = factory()->NewSpread(pattern, ellipsis_pos, pattern_pos);
- } else {
- right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- }
- // No need to accumulate binding pattern-related errors, since
- // an Expression can't be a binding pattern anyway.
- AccumulateNonBindingPatternErrors();
- if (!impl()->IsIdentifier(right)) classifier()->RecordNonSimpleParameter();
- if (impl()->IsNull(result)) {
- // First time through the loop.
- result = right;
- } else if (impl()->CollapseNaryExpression(&result, right, Token::COMMA,
- comma_pos,
- SourceRange::Empty())) {
- // Do nothing, "result" is already updated.
- } else {
- result =
- factory()->NewBinaryOperation(Token::COMMA, result, right, comma_pos);
+ if (V8_UNLIKELY(peek() == Token::ELLIPSIS)) {
+ return ParseArrowParametersWithRest(&list, &accumulation_scope);
}
- if (!Check(Token::COMMA)) break;
+ int expr_pos = peek_position();
+ expression = ParseAssignmentExpressionCoverGrammar();
- if (right->IsSpread()) {
- classifier()->RecordArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kParamAfterRest);
- }
+ ClassifyArrowParameter(&accumulation_scope, expr_pos, expression);
+ list.Add(expression);
+
+ if (!Check(Token::COMMA)) break;
if (peek() == Token::RPAREN && PeekAhead() == Token::ARROW) {
// a trailing comma is allowed at the end of an arrow parameter list
@@ -2092,19 +1804,64 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
}
}
- return result;
+ // Return the single element if the list is empty. We need to do this because
+ // callers of this function care about the type of the result if there was
+ // only a single assignment expression. The preparser would lose this
+ // information otherwise.
+ if (list.length() == 1) return expression;
+ return impl()->ExpressionListToExpression(list);
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseArrowParametersWithRest(
+ typename ParserBase<Impl>::ExpressionListT* list,
+ AccumulationScope* accumulation_scope) {
+ Consume(Token::ELLIPSIS);
+
+ Scanner::Location ellipsis = scanner()->location();
+ int pattern_pos = peek_position();
+ ExpressionT pattern = ParseBindingPattern();
+ ClassifyArrowParameter(accumulation_scope, pattern_pos, pattern);
+
+ expression_scope()->RecordNonSimpleParameter();
+
+ if (V8_UNLIKELY(peek() == Token::ASSIGN)) {
+ ReportMessage(MessageTemplate::kRestDefaultInitializer);
+ return impl()->FailureExpression();
+ }
+
+ ExpressionT spread =
+ factory()->NewSpread(pattern, ellipsis.beg_pos, pattern_pos);
+ if (V8_UNLIKELY(peek() == Token::COMMA)) {
+ ReportMessage(MessageTemplate::kParamAfterRest);
+ return impl()->FailureExpression();
+ }
+
+ // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
+ // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
+ // valid expression.
+ if (peek() != Token::RPAREN || PeekAhead() != Token::ARROW) {
+ impl()->ReportUnexpectedTokenAt(ellipsis, Token::ELLIPSIS);
+ return impl()->FailureExpression();
+ }
+
+ list->Add(spread);
+ return impl()->ExpressionListToExpression(*list);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral() {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
int pos = peek_position();
- ExpressionListT values = impl()->NewExpressionList(4);
+ ExpressionListT values(pointer_buffer());
int first_spread_index = -1;
Consume(Token::LBRACK);
+
+ AccumulationScope accumulation_scope(expression_scope());
+
while (!Check(Token::RBRACK)) {
ExpressionT elem;
if (peek() == Token::COMMA) {
@@ -2112,115 +1869,80 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
} else if (Check(Token::ELLIPSIS)) {
int start_pos = position();
int expr_pos = peek_position();
- ExpressionT argument = ParseAssignmentExpression(true, CHECK_OK);
+ AcceptINScope scope(this, true);
+ ExpressionT argument =
+ ParsePossibleDestructuringSubPattern(&accumulation_scope);
elem = factory()->NewSpread(argument, start_pos, expr_pos);
if (first_spread_index < 0) {
- first_spread_index = values->length();
+ first_spread_index = values.length();
}
if (argument->IsAssignment()) {
- classifier()->RecordPatternError(
+ expression_scope()->RecordPatternError(
Scanner::Location(start_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
- } else {
- CheckDestructuringElement(argument, start_pos, end_position());
}
if (peek() == Token::COMMA) {
- classifier()->RecordPatternError(
+ expression_scope()->RecordPatternError(
Scanner::Location(start_pos, end_position()),
MessageTemplate::kElementAfterRest);
}
} else {
- int beg_pos = peek_position();
- elem = ParseAssignmentExpression(true, CHECK_OK);
- CheckDestructuringElement(elem, beg_pos, end_position());
+ AcceptINScope scope(this, true);
+ elem = ParsePossibleDestructuringSubPattern(&accumulation_scope);
}
- values->Add(elem, zone_);
+ values.Add(elem);
if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA);
+ if (elem->IsFailureExpression()) return elem;
}
}
return factory()->NewArrayLiteral(values, first_spread_index, pos);
}
-inline bool ParsePropertyKindFromToken(Token::Value token,
- ParsePropertyKind* kind) {
- // This returns true, setting the property kind, iff the given token is one
- // which must occur after a property name, indicating that the previous token
- // was in fact a name and not a modifier (like the "get" in "get x").
- switch (token) {
- case Token::COLON:
- *kind = ParsePropertyKind::kValue;
- return true;
- case Token::COMMA:
- case Token::RBRACE:
- case Token::ASSIGN:
- *kind = ParsePropertyKind::kShorthand;
- return true;
- case Token::LPAREN:
- *kind = ParsePropertyKind::kMethod;
- return true;
- case Token::MUL:
- case Token::SEMICOLON:
- *kind = ParsePropertyKind::kClassField;
- return true;
- case Token::PRIVATE_NAME:
- *kind = ParsePropertyKind::kClassField;
- return true;
- default:
- break;
- }
- return false;
-}
-
-inline bool ParseAsAccessor(Token::Value token, Token::Value contextual_token,
- ParsePropertyKind* kind) {
- if (ParsePropertyKindFromToken(token, kind)) return false;
-
- if (contextual_token == Token::GET) {
- *kind = ParsePropertyKind::kAccessorGetter;
- } else if (contextual_token == Token::SET) {
- *kind = ParsePropertyKind::kAccessorSetter;
- } else {
- return false;
- }
-
- return true;
-}
-
template <class Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
- IdentifierT* name, ParsePropertyKind* kind, ParseFunctionFlags* flags,
- bool* is_computed_name, bool* ok) {
- DCHECK_EQ(ParsePropertyKind::kNotSet, *kind);
- DCHECK_EQ(*flags, ParseFunctionFlag::kIsNormal);
- DCHECK(!*is_computed_name);
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
+ ParsePropertyInfo* prop_info) {
+ DCHECK_EQ(prop_info->kind, ParsePropertyKind::kNotSet);
+ DCHECK_EQ(prop_info->function_flags, ParseFunctionFlag::kIsNormal);
+ DCHECK(!prop_info->is_computed_name);
if (Check(Token::ASYNC)) {
Token::Value token = peek();
- if ((token != Token::MUL && ParsePropertyKindFromToken(token, kind)) ||
+ if ((token != Token::MUL && prop_info->ParsePropertyKindFromToken(token)) ||
scanner()->HasLineTerminatorBeforeNext()) {
- *name = impl()->GetSymbol();
- impl()->PushLiteralName(*name);
- return factory()->NewStringLiteral(*name, position());
+ prop_info->name = impl()->GetSymbol();
+ impl()->PushLiteralName(prop_info->name);
+ return factory()->NewStringLiteral(prop_info->name, position());
}
- *flags = ParseFunctionFlag::kIsAsync;
- *kind = ParsePropertyKind::kMethod;
+ prop_info->function_flags = ParseFunctionFlag::kIsAsync;
+ prop_info->kind = ParsePropertyKind::kMethod;
}
if (Check(Token::MUL)) {
- *flags |= ParseFunctionFlag::kIsGenerator;
- *kind = ParsePropertyKind::kMethod;
- }
-
- if (*kind == ParsePropertyKind::kNotSet && Check(Token::IDENTIFIER) &&
- !ParseAsAccessor(peek(), scanner()->current_contextual_token(), kind)) {
- *name = impl()->GetSymbol();
- impl()->PushLiteralName(*name);
- return factory()->NewStringLiteral(*name, position());
+ prop_info->function_flags |= ParseFunctionFlag::kIsGenerator;
+ prop_info->kind = ParsePropertyKind::kMethod;
+ }
+
+ if (prop_info->kind == ParsePropertyKind::kNotSet &&
+ Check(Token::IDENTIFIER)) {
+ IdentifierT symbol = impl()->GetSymbol();
+ if (!prop_info->ParsePropertyKindFromToken(peek())) {
+ if (impl()->IdentifierEquals(symbol, ast_value_factory()->get_string())) {
+ prop_info->kind = ParsePropertyKind::kAccessorGetter;
+ } else if (impl()->IdentifierEquals(symbol,
+ ast_value_factory()->set_string())) {
+ prop_info->kind = ParsePropertyKind::kAccessorSetter;
+ }
+ }
+ if (!IsAccessor(prop_info->kind)) {
+ prop_info->name = symbol;
+ impl()->PushLiteralName(prop_info->name);
+ return factory()->NewStringLiteral(prop_info->name, position());
+ }
}
int pos = peek_position();
@@ -2237,10 +1959,27 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
bool is_array_index;
uint32_t index;
switch (peek()) {
+ case Token::PRIVATE_NAME:
+ prop_info->is_private = true;
+ is_array_index = false;
+ Consume(Token::PRIVATE_NAME);
+ if (prop_info->kind == ParsePropertyKind::kNotSet) {
+ prop_info->ParsePropertyKindFromToken(peek());
+ }
+ prop_info->name = impl()->GetSymbol();
+ if (prop_info->position == PropertyPosition::kObjectLiteral ||
+ (!allow_harmony_private_methods() &&
+ (IsAccessor(prop_info->kind) ||
+ prop_info->kind == ParsePropertyKind::kMethod))) {
+ ReportUnexpectedToken(Next());
+ return impl()->FailureExpression();
+ }
+ break;
+
case Token::STRING:
Consume(Token::STRING);
- *name = impl()->GetSymbol();
- is_array_index = impl()->IsArrayIndex(*name, &index);
+ prop_info->name = impl()->GetSymbol();
+ is_array_index = impl()->IsArrayIndex(prop_info->name, &index);
break;
case Token::SMI:
@@ -2248,82 +1987,76 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
index = scanner()->smi_value();
is_array_index = true;
// Token::SMI were scanned from their canonical representation.
- *name = impl()->GetSymbol();
+ prop_info->name = impl()->GetSymbol();
break;
case Token::NUMBER: {
Consume(Token::NUMBER);
- *name = impl()->GetNumberAsSymbol();
- is_array_index = impl()->IsArrayIndex(*name, &index);
+ prop_info->name = impl()->GetNumberAsSymbol();
+ is_array_index = impl()->IsArrayIndex(prop_info->name, &index);
break;
}
case Token::LBRACK: {
- *name = impl()->NullIdentifier();
- *is_computed_name = true;
+ prop_info->name = impl()->NullIdentifier();
+ prop_info->is_computed_name = true;
Consume(Token::LBRACK);
- ExpressionClassifier computed_name_classifier(this);
- ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
- AccumulateFormalParameterContainmentErrors();
- Expect(Token::RBRACK, CHECK_OK);
- if (*kind == ParsePropertyKind::kNotSet) {
- ParsePropertyKindFromToken(peek(), kind);
+ AcceptINScope scope(this, true);
+ ExpressionT expression = ParseAssignmentExpression();
+ Expect(Token::RBRACK);
+ if (prop_info->kind == ParsePropertyKind::kNotSet) {
+ prop_info->ParsePropertyKindFromToken(peek());
}
return expression;
}
case Token::ELLIPSIS:
- if (*kind == ParsePropertyKind::kNotSet) {
- *name = impl()->NullIdentifier();
+ if (prop_info->kind == ParsePropertyKind::kNotSet) {
+ prop_info->name = impl()->NullIdentifier();
Consume(Token::ELLIPSIS);
- ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
- *kind = ParsePropertyKind::kSpread;
-
- if (!impl()->IsIdentifier(expression)) {
- classifier()->RecordBindingPatternError(
- scanner()->location(),
+ AcceptINScope scope(this, true);
+ int start_pos = peek_position();
+ ExpressionT expression =
+ ParsePossibleDestructuringSubPattern(prop_info->accumulation_scope);
+ prop_info->kind = ParsePropertyKind::kSpread;
+
+ if (!IsValidReferenceExpression(expression)) {
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(start_pos, end_position()),
MessageTemplate::kInvalidRestBindingPattern);
- }
-
- if (!expression->IsValidReferenceExpression()) {
- classifier()->RecordAssignmentPatternError(
- scanner()->location(),
+ expression_scope()->RecordPatternError(
+ Scanner::Location(start_pos, end_position()),
MessageTemplate::kInvalidRestAssignmentPattern);
}
if (peek() != Token::RBRACE) {
- classifier()->RecordPatternError(scanner()->location(),
- MessageTemplate::kElementAfterRest);
+ expression_scope()->RecordPatternError(
+ scanner()->location(), MessageTemplate::kElementAfterRest);
}
return expression;
}
V8_FALLTHROUGH;
default:
- *name = ParseIdentifierName(CHECK_OK);
+ prop_info->name = ParsePropertyName();
is_array_index = false;
break;
}
- if (*kind == ParsePropertyKind::kNotSet) {
- ParsePropertyKindFromToken(peek(), kind);
+ if (prop_info->kind == ParsePropertyKind::kNotSet) {
+ prop_info->ParsePropertyKindFromToken(peek());
}
- impl()->PushLiteralName(*name);
+ impl()->PushLiteralName(prop_info->name);
return is_array_index ? factory()->NewNumberLiteral(index, pos)
- : factory()->NewStringLiteral(*name, pos);
+ : factory()->NewStringLiteral(prop_info->name, pos);
}
template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
-ParserBase<Impl>::ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info, IdentifierT* name,
- bool has_extends, bool* is_computed_name,
- ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok) {
+ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
+ ParsePropertyInfo* prop_info,
+ bool has_extends) {
DCHECK_NOT_NULL(class_info);
- ParseFunctionFlags function_flags = ParseFunctionFlag::kIsNormal;
- *is_static = false;
- *property_kind = ClassLiteralProperty::METHOD;
- ParsePropertyKind kind = ParsePropertyKind::kNotSet;
+ DCHECK_EQ(prop_info->position, PropertyPosition::kClassLiteral);
Token::Value name_token = peek();
DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME,
@@ -2331,48 +2064,39 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
int property_beg_pos = scanner()->peek_location().beg_pos;
int name_token_position = property_beg_pos;
- *name = impl()->NullIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
name_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
- kind = ParsePropertyKind::kMethod;
- *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(*name, position());
+ prop_info->kind = ParsePropertyKind::kMethod;
+ // TODO(bakkot) specialize on 'static'
+ prop_info->name = impl()->GetSymbol();
+ name_expression =
+ factory()->NewStringLiteral(prop_info->name, position());
} else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
peek() == Token::RBRACE) {
- *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(*name, position());
- } else if (peek() == Token::PRIVATE_NAME) {
- DCHECK(allow_harmony_private_fields());
- // TODO(gsathya): Make a better error message for this.
- ReportUnexpectedToken(Next());
- *ok = false;
- return impl()->NullLiteralProperty();
- } else {
- *is_static = true;
+ // TODO(bakkot) specialize on 'static'
+ prop_info->name = impl()->GetSymbol();
name_expression =
- ParsePropertyName(name, &kind, &function_flags, is_computed_name,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ factory()->NewStringLiteral(prop_info->name, position());
+ } else {
+ prop_info->is_static = true;
+ name_expression = ParseProperty(prop_info);
}
- } else if (name_token == Token::PRIVATE_NAME) {
- Consume(Token::PRIVATE_NAME);
- *name = impl()->GetSymbol();
- name_expression = factory()->NewStringLiteral(*name, position());
} else {
- name_expression =
- ParsePropertyName(name, &kind, &function_flags, is_computed_name,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ name_expression = ParseProperty(prop_info);
}
- if (!class_info->has_name_static_property && *is_static &&
- impl()->IsName(*name)) {
+ if (!class_info->has_name_static_property && prop_info->is_static &&
+ impl()->IsName(prop_info->name)) {
class_info->has_name_static_property = true;
}
- switch (kind) {
+ switch (prop_info->kind) {
+ case ParsePropertyKind::kAssign:
case ParsePropertyKind::kClassField:
+ case ParsePropertyKind::kShorthandOrClassField:
case ParsePropertyKind::kNotSet: // This case is a name followed by a name
// or other property. Here we have to
// assume that's an uninitialized field
@@ -2381,34 +2105,33 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// semicolon. If not, there will be a
// syntax error after parsing the first
// name as an uninitialized field.
- case ParsePropertyKind::kShorthand:
- case ParsePropertyKind::kValue:
if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
- *property_kind = name_token == Token::PRIVATE_NAME
- ? ClassLiteralProperty::PRIVATE_FIELD
- : ClassLiteralProperty::PUBLIC_FIELD;
- if (*is_static && !allow_harmony_static_fields()) {
+ prop_info->kind = ParsePropertyKind::kClassField;
+ DCHECK_IMPLIES(prop_info->is_computed_name, !prop_info->is_private);
+
+ if (prop_info->is_static && !allow_harmony_static_fields()) {
ReportUnexpectedToken(Next());
- *ok = false;
return impl()->NullLiteralProperty();
}
- if (!*is_computed_name) {
- checker->CheckClassFieldName(*is_static,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+
+ if (!prop_info->is_computed_name) {
+ CheckClassFieldName(prop_info->name, prop_info->is_static);
}
- ExpressionT initializer =
- ParseClassFieldInitializer(class_info, property_beg_pos, *is_static,
- CHECK_OK_CUSTOM(NullLiteralProperty));
- ExpectSemicolon(CHECK_OK_CUSTOM(NullLiteralProperty));
+
+ ExpressionT initializer = ParseMemberInitializer(
+ class_info, property_beg_pos, prop_info->is_static);
+ ExpectSemicolon();
+
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
- name_expression, initializer, *property_kind, *is_static,
- *is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, *name);
+ name_expression, initializer, ClassLiteralProperty::FIELD,
+ prop_info->is_static, prop_info->is_computed_name,
+ prop_info->is_private);
+ impl()->SetFunctionNameFromPropertyName(result, prop_info->name);
+
return result;
} else {
ReportUnexpectedToken(Next());
- *ok = false;
return impl()->NullLiteralProperty();
}
@@ -2421,89 +2144,89 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// async '*' PropertyName '(' StrictFormalParameters ')'
// '{' FunctionBody '}'
- if (!*is_computed_name) {
- checker->CheckClassMethodName(name_token, ParsePropertyKind::kMethod,
- function_flags, *is_static,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ if (!prop_info->is_computed_name) {
+ CheckClassMethodName(prop_info->name, ParsePropertyKind::kMethod,
+ prop_info->function_flags, prop_info->is_static,
+ &class_info->has_seen_constructor);
}
- FunctionKind kind = MethodKindFor(function_flags);
+ FunctionKind kind = MethodKindFor(prop_info->function_flags);
- if (!*is_static && impl()->IsConstructor(*name)) {
+ if (!prop_info->is_static && impl()->IsConstructor(prop_info->name)) {
class_info->has_seen_constructor = true;
kind = has_extends ? FunctionKind::kDerivedConstructor
: FunctionKind::kBaseConstructor;
}
ExpressionT value = impl()->ParseFunctionLiteral(
- *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ prop_info->name, scanner()->location(), kSkipFunctionNameCheck, kind,
name_token_position, FunctionLiteral::kAccessorOrMethod,
- language_mode(), nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
+ language_mode(), nullptr);
- *property_kind = ClassLiteralProperty::METHOD;
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
- name_expression, value, *property_kind, *is_static,
- *is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, *name);
+ name_expression, value, ClassLiteralProperty::METHOD,
+ prop_info->is_static, prop_info->is_computed_name,
+ prop_info->is_private);
+ impl()->SetFunctionNameFromPropertyName(result, prop_info->name);
return result;
}
case ParsePropertyKind::kAccessorGetter:
case ParsePropertyKind::kAccessorSetter: {
- DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- bool is_get = kind == ParsePropertyKind::kAccessorGetter;
+ DCHECK_EQ(prop_info->function_flags, ParseFunctionFlag::kIsNormal);
+ bool is_get = prop_info->kind == ParsePropertyKind::kAccessorGetter;
- if (!*is_computed_name) {
- checker->CheckClassMethodName(name_token, kind,
- ParseFunctionFlag::kIsNormal, *is_static,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ if (!prop_info->is_computed_name) {
+ CheckClassMethodName(prop_info->name, prop_info->kind,
+ ParseFunctionFlag::kIsNormal, prop_info->is_static,
+ &class_info->has_seen_constructor);
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
- name_expression =
- factory()->NewStringLiteral(*name, name_expression->position());
+ name_expression = factory()->NewStringLiteral(
+ prop_info->name, name_expression->position());
}
FunctionKind kind = is_get ? FunctionKind::kGetterFunction
: FunctionKind::kSetterFunction;
FunctionLiteralT value = impl()->ParseFunctionLiteral(
- *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ prop_info->name, scanner()->location(), kSkipFunctionNameCheck, kind,
name_token_position, FunctionLiteral::kAccessorOrMethod,
- language_mode(), nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
+ language_mode(), nullptr);
- *property_kind =
+ ClassLiteralProperty::Kind property_kind =
is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER;
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
- name_expression, value, *property_kind, *is_static,
- *is_computed_name);
+ name_expression, value, property_kind, prop_info->is_static,
+ prop_info->is_computed_name, prop_info->is_private);
const AstRawString* prefix =
is_get ? ast_value_factory()->get_space_string()
: ast_value_factory()->set_space_string();
- impl()->SetFunctionNameFromPropertyName(result, *name, prefix);
+ impl()->SetFunctionNameFromPropertyName(result, prop_info->name, prefix);
return result;
}
+ case ParsePropertyKind::kValue:
+ case ParsePropertyKind::kShorthand:
case ParsePropertyKind::kSpread:
- ReportUnexpectedTokenAt(
+ impl()->ReportUnexpectedTokenAt(
Scanner::Location(name_token_position, name_expression->position()),
name_token);
- *ok = false;
return impl()->NullLiteralProperty();
}
UNREACHABLE();
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
- bool is_static, bool* ok) {
- DeclarationScope* initializer_scope = is_static
- ? class_info->static_fields_scope
- : class_info->instance_fields_scope;
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
+ ClassInfo* class_info, int beg_pos, bool is_static) {
+ DeclarationScope* initializer_scope =
+ is_static ? class_info->static_fields_scope
+ : class_info->instance_members_scope;
if (initializer_scope == nullptr) {
initializer_scope =
- NewFunctionScope(FunctionKind::kClassFieldsInitializerFunction);
+ NewFunctionScope(FunctionKind::kClassMembersInitializerFunction);
// TODO(gsathya): Make scopes be non contiguous.
initializer_scope->set_start_position(beg_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
@@ -2513,11 +2236,9 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
if (Check(Token::ASSIGN)) {
FunctionState initializer_state(&function_state_, &scope_,
initializer_scope);
- ExpressionClassifier expression_classifier(this);
- initializer =
- ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpression));
- ValidateExpression(CHECK_OK_CUSTOM(NullExpression));
+ AcceptINScope scope(this, true);
+ initializer = ParseAssignmentExpression();
} else {
initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
@@ -2527,8 +2248,8 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
class_info->static_fields_scope = initializer_scope;
class_info->has_static_class_fields = true;
} else {
- class_info->instance_fields_scope = initializer_scope;
- class_info->has_instance_class_fields = true;
+ class_info->instance_members_scope = initializer_scope;
+ class_info->has_instance_members = true;
}
return initializer;
@@ -2536,30 +2257,25 @@ ParserBase<Impl>::ParseClassFieldInitializer(ClassInfo* class_info, int beg_pos,
template <typename Impl>
typename ParserBase<Impl>::ObjectLiteralPropertyT
-ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
- bool* is_computed_name,
- bool* is_rest_property,
- bool* ok) {
- ParseFunctionFlags function_flags = ParseFunctionFlag::kIsNormal;
- ParsePropertyKind kind = ParsePropertyKind::kNotSet;
-
- IdentifierT name = impl()->NullIdentifier();
+ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
+ bool* has_seen_proto) {
+ DCHECK_EQ(prop_info->position, PropertyPosition::kObjectLiteral);
Token::Value name_token = peek();
- int next_beg_pos = peek_position();
- int next_end_pos = peek_end_position();
+ Scanner::Location next_loc = scanner()->peek_location();
- ExpressionT name_expression =
- ParsePropertyName(&name, &kind, &function_flags, is_computed_name,
- CHECK_OK_CUSTOM(NullLiteralProperty));
+ ExpressionT name_expression = ParseProperty(prop_info);
+ IdentifierT name = prop_info->name;
+ ParseFunctionFlags function_flags = prop_info->function_flags;
+ ParsePropertyKind kind = prop_info->kind;
- switch (kind) {
+ switch (prop_info->kind) {
case ParsePropertyKind::kSpread:
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- DCHECK(!*is_computed_name);
+ DCHECK(!prop_info->is_computed_name);
DCHECK_EQ(Token::ELLIPSIS, name_token);
- *is_computed_name = true;
- *is_rest_property = true;
+ prop_info->is_computed_name = true;
+ prop_info->is_rest = true;
return factory()->NewObjectLiteralProperty(
factory()->NewTheHoleLiteral(), name_expression,
@@ -2568,21 +2284,27 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
case ParsePropertyKind::kValue: {
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!*is_computed_name) {
- checker->CheckDuplicateProto(name_token);
+ if (!prop_info->is_computed_name &&
+ impl()->IdentifierEquals(name, ast_value_factory()->proto_string())) {
+ if (*has_seen_proto) {
+ expression_scope()->RecordExpressionError(
+ scanner()->location(), MessageTemplate::kDuplicateProto);
+ }
+ *has_seen_proto = true;
}
Consume(Token::COLON);
- int beg_pos = peek_position();
+ AcceptINScope scope(this, true);
ExpressionT value =
- ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullLiteralProperty));
- CheckDestructuringElement(value, beg_pos, end_position());
+ ParsePossibleDestructuringSubPattern(prop_info->accumulation_scope);
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
- name_expression, value, *is_computed_name);
+ name_expression, value, prop_info->is_computed_name);
impl()->SetFunctionNameFromPropertyName(result, name);
return result;
}
+ case ParsePropertyKind::kAssign:
+ case ParsePropertyKind::kShorthandOrClassField:
case ParsePropertyKind::kShorthand: {
// PropertyDefinition
// IdentifierReference
@@ -2592,56 +2314,43 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsIdentifier(name_token, language_mode(),
- this->is_generator(),
- parsing_module_ || is_async_function())) {
+ if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
+ parsing_module_ || is_async_function())) {
ReportUnexpectedToken(Next());
- *ok = false;
return impl()->NullLiteralProperty();
}
- DCHECK(!*is_computed_name);
-
- if (classifier()->duplicate_finder() != nullptr &&
- scanner()->IsDuplicateSymbol(classifier()->duplicate_finder(),
- ast_value_factory())) {
- classifier()->RecordDuplicateFormalParameterError(
- scanner()->location());
- }
-
- if (impl()->IsEvalOrArguments(name) && is_strict(language_mode())) {
- classifier()->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kStrictEvalArguments);
- }
+ DCHECK(!prop_info->is_computed_name);
if (name_token == Token::LET) {
- classifier()->RecordLetPatternError(
+ expression_scope()->RecordLexicalDeclarationError(
scanner()->location(), MessageTemplate::kLetInLexicalBinding);
}
if (name_token == Token::AWAIT) {
DCHECK(!is_async_function());
- classifier()->RecordAsyncArrowFormalParametersError(
- Scanner::Location(next_beg_pos, next_end_pos),
- MessageTemplate::kAwaitBindingIdentifier);
+ expression_scope()->RecordAsyncArrowParametersError(
+ next_loc, MessageTemplate::kAwaitBindingIdentifier);
+ }
+ ExpressionT lhs =
+ impl()->ExpressionFromIdentifier(name, next_loc.beg_pos);
+ if (!IsAssignableIdentifier(lhs)) {
+ expression_scope()->RecordPatternError(
+ next_loc, MessageTemplate::kStrictEvalArguments);
}
- ExpressionT lhs = impl()->ExpressionFromIdentifier(name, next_beg_pos);
- CheckDestructuringElement(lhs, next_beg_pos, next_end_pos);
ExpressionT value;
if (peek() == Token::ASSIGN) {
Consume(Token::ASSIGN);
- ExpressionClassifier rhs_classifier(this);
- ExpressionT rhs = ParseAssignmentExpression(
- true, CHECK_OK_CUSTOM(NullLiteralProperty));
- ValidateExpression(CHECK_OK_CUSTOM(NullLiteralProperty));
- AccumulateFormalParameterContainmentErrors();
- value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
- kNoSourcePosition);
- classifier()->RecordExpressionError(
- Scanner::Location(next_beg_pos, end_position()),
+ {
+ AcceptINScope scope(this, true);
+ ExpressionT rhs = ParseAssignmentExpression();
+ value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
+ kNoSourcePosition);
+ impl()->SetFunctionNameFromIdentifierRef(rhs, lhs);
+ }
+ expression_scope()->RecordExpressionError(
+ Scanner::Location(next_loc.beg_pos, end_position()),
MessageTemplate::kInvalidCoverInitializedName);
-
- impl()->SetFunctionNameFromIdentifierRef(rhs, lhs);
} else {
value = lhs;
}
@@ -2657,20 +2366,20 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
// PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
// '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
- classifier()->RecordPatternError(
- Scanner::Location(next_beg_pos, end_position()),
+ expression_scope()->RecordPatternError(
+ Scanner::Location(next_loc.beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
FunctionKind kind = MethodKindFor(function_flags);
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- next_beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
- nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
+ next_loc.beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ nullptr);
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED,
- *is_computed_name);
+ prop_info->is_computed_name);
impl()->SetFunctionNameFromPropertyName(result, name);
return result;
}
@@ -2680,11 +2389,11 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
bool is_get = kind == ParsePropertyKind::kAccessorGetter;
- classifier()->RecordPatternError(
- Scanner::Location(next_beg_pos, end_position()),
+ expression_scope()->RecordPatternError(
+ Scanner::Location(next_loc.beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
- if (!*is_computed_name) {
+ if (!prop_info->is_computed_name) {
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
@@ -2697,14 +2406,14 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- next_beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
- nullptr, CHECK_OK_CUSTOM(NullLiteralProperty));
+ next_loc.beg_pos, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ nullptr);
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value,
is_get ? ObjectLiteralProperty::GETTER
: ObjectLiteralProperty::SETTER,
- *is_computed_name);
+ prop_info->is_computed_name);
const AstRawString* prefix =
is_get ? ast_value_factory()->get_space_string()
: ast_value_factory()->set_space_string();
@@ -2715,42 +2424,41 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
case ParsePropertyKind::kClassField:
case ParsePropertyKind::kNotSet:
ReportUnexpectedToken(Next());
- *ok = false;
return impl()->NullLiteralProperty();
}
UNREACHABLE();
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral() {
// ObjectLiteral ::
// '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
int pos = peek_position();
- typename Types::ObjectPropertyList properties =
- impl()->NewObjectPropertyList(4);
+ ObjectPropertyListT properties(pointer_buffer());
int number_of_boilerplate_properties = 0;
bool has_computed_names = false;
bool has_rest_property = false;
- ObjectLiteralChecker checker(this);
+ bool has_seen_proto = false;
Consume(Token::LBRACE);
+ AccumulationScope accumulation_scope(expression_scope());
while (!Check(Token::RBRACE)) {
FuncNameInferrerState fni_state(&fni_);
- bool is_computed_name = false;
- bool is_rest_property = false;
- ObjectLiteralPropertyT property = ParseObjectPropertyDefinition(
- &checker, &is_computed_name, &is_rest_property, CHECK_OK);
+ ParsePropertyInfo prop_info(this, &accumulation_scope);
+ prop_info.position = PropertyPosition::kObjectLiteral;
+ ObjectLiteralPropertyT property =
+ ParseObjectPropertyDefinition(&prop_info, &has_seen_proto);
+ if (impl()->IsNull(property)) return impl()->FailureExpression();
- if (is_computed_name) {
+ if (prop_info.is_computed_name) {
has_computed_names = true;
}
- if (is_rest_property) {
+ if (prop_info.is_rest) {
has_rest_property = true;
}
@@ -2760,11 +2468,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
number_of_boilerplate_properties++;
}
- properties->Add(property, zone());
+ properties.Add(property);
if (peek() != Token::RBRACE) {
- // Need {} because of the CHECK_OK macro.
- Expect(Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA);
}
fni_.Infer();
@@ -2775,8 +2482,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
// this runtime function. Here, we make sure that the number of
// properties is less than number of arguments allowed for a runtime
// call.
- if (has_rest_property && properties->length() > Code::kMaxArguments) {
- this->classifier()->RecordPatternError(Scanner::Location(pos, position()),
+ if (has_rest_property && properties.length() > Code::kMaxArguments) {
+ expression_scope()->RecordPatternError(Scanner::Location(pos, position()),
MessageTemplate::kTooManyArguments);
}
@@ -2785,78 +2492,61 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
- Scanner::Location* first_spread_arg_loc, bool maybe_arrow,
- bool* is_simple_parameter_list, bool* ok) {
+void ParserBase<Impl>::ParseArguments(
+ typename ParserBase<Impl>::ExpressionListT* args, bool* has_spread,
+ ParsingArrowHeadFlag maybe_arrow) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- Scanner::Location spread_arg = Scanner::Location::invalid();
- ExpressionListT result = impl()->NewExpressionList(4);
- Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ *has_spread = false;
+ Consume(Token::LPAREN);
+ AccumulationScope accumulation_scope(expression_scope());
+
while (peek() != Token::RPAREN) {
int start_pos = peek_position();
bool is_spread = Check(Token::ELLIPSIS);
int expr_pos = peek_position();
- ExpressionT argument =
- ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpressionList));
- if (!impl()->IsIdentifier(argument)) *is_simple_parameter_list = false;
+ AcceptINScope scope(this, true);
+ ExpressionT argument = ParseAssignmentExpressionCoverGrammar();
- if (!maybe_arrow) {
- ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
+ if (V8_UNLIKELY(maybe_arrow == kMaybeArrowHead)) {
+ ClassifyArrowParameter(&accumulation_scope, expr_pos, argument);
+ if (is_spread) {
+ expression_scope()->RecordNonSimpleParameter();
+ if (argument->IsAssignment()) {
+ expression_scope()->RecordAsyncArrowParametersError(
+ scanner()->location(), MessageTemplate::kRestDefaultInitializer);
+ }
+ if (peek() == Token::COMMA) {
+ expression_scope()->RecordAsyncArrowParametersError(
+ scanner()->peek_location(), MessageTemplate::kParamAfterRest);
+ }
+ }
}
if (is_spread) {
- *is_simple_parameter_list = false;
- if (!spread_arg.IsValid()) {
- spread_arg.beg_pos = start_pos;
- spread_arg.end_pos = peek_position();
- }
- if (argument->IsAssignment()) {
- classifier()->RecordAsyncArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kRestDefaultInitializer);
- }
+ *has_spread = true;
argument = factory()->NewSpread(argument, start_pos, expr_pos);
}
- result->Add(argument, zone_);
-
- if (peek() != Token::COMMA) break;
-
- Next();
-
- if (argument->IsSpread()) {
- classifier()->RecordAsyncArrowFormalParametersError(
- scanner()->location(), MessageTemplate::kParamAfterRest);
- }
+ args->Add(argument);
+ if (!Check(Token::COMMA)) break;
}
- if (result->length() > Code::kMaxArguments) {
+ if (args->length() > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyArguments);
- *ok = false;
- return impl()->NullExpressionList();
+ return;
}
Scanner::Location location = scanner_->location();
- if (Token::RPAREN != Next()) {
+ if (!Check(Token::RPAREN)) {
impl()->ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
- *ok = false;
- return impl()->NullExpressionList();
- }
- *first_spread_arg_loc = spread_arg;
-
- if (!maybe_arrow || peek() != Token::ARROW) {
- if (maybe_arrow) {
- ValidateExpression(CHECK_OK_CUSTOM(NullExpressionList));
- }
}
-
- return result;
}
// Precedence = 2
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
// AssignmentExpression ::
// ConditionalExpression
// ArrowFunction
@@ -2865,188 +2555,120 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
- return ParseYieldExpression(accept_IN, ok);
+ return ParseYieldExpression();
}
FuncNameInferrerState fni_state(&fni_);
- ExpressionClassifier arrow_formals_classifier(
- this, classifier()->duplicate_finder());
- Scope::Snapshot scope_snapshot(scope());
- int rewritable_length = static_cast<int>(
- function_state_->destructuring_assignments_to_rewrite().size());
+ DCHECK_IMPLIES(!has_error(), next_arrow_function_info_.HasInitialState());
- bool is_async = peek() == Token::ASYNC &&
- !scanner()->HasLineTerminatorAfterNext() &&
- IsValidArrowFormalParametersStart(PeekAhead());
+ ExpressionT expression = ParseConditionalExpression();
- bool parenthesized_formals = peek() == Token::LPAREN;
- if (!is_async && !parenthesized_formals) {
- ArrowFormalParametersUnexpectedToken();
- }
-
- // Parse a simple, faster sub-grammar (primary expression) if it's evident
- // that we have only a trivial expression to parse.
- ExpressionT expression;
- if (IsTrivialExpression()) {
- expression = ParsePrimaryExpression(&is_async, CHECK_OK);
- } else {
- expression = ParseConditionalExpression(accept_IN, CHECK_OK);
- }
+ Token::Value op = peek();
- if (is_async && impl()->IsIdentifier(expression) && peek_any_identifier() &&
- PeekAhead() == Token::ARROW) {
- // async Identifier => AsyncConciseBody
- IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
- expression =
- impl()->ExpressionFromIdentifier(name, position(), InferName::kNo);
- // Remove `async` keyword from inferred name stack.
- fni_.RemoveAsyncKeywordFromEnd();
- }
-
- if (peek() == Token::ARROW) {
- Scanner::Location arrow_loc = scanner()->peek_location();
- ValidateArrowFormalParameters(expression, parenthesized_formals, is_async,
- CHECK_OK);
- // This reads strangely, but is correct: it checks whether any
- // sub-expression of the parameter list failed to be a valid formal
- // parameter initializer. Since YieldExpressions are banned anywhere
- // in an arrow parameter list, this is correct.
- // TODO(adamk): Rename "FormalParameterInitializerError" to refer to
- // "YieldExpression", which is its only use.
- ValidateFormalParameterInitializer(CHECK_OK);
+ if (!Token::IsArrowOrAssignmentOp(op)) return expression;
+ // Arrow functions.
+ if (V8_UNLIKELY(op == Token::ARROW)) {
Scanner::Location loc(lhs_beg_pos, end_position());
- DeclarationScope* scope =
- NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
- : FunctionKind::kArrowFunction);
- // Because the arrow's parameters were parsed in the outer scope,
- // we need to fix up the scope chain appropriately.
- scope_snapshot.Reparent(scope);
-
- FormalParametersT parameters(scope);
- if (!classifier()->is_simple_parameter_list()) {
- scope->SetHasNonSimpleParameters();
- parameters.is_simple = false;
+ if (!impl()->IsIdentifier(expression) && !expression->is_parenthesized()) {
+ impl()->ReportMessageAt(
+ Scanner::Location(expression->position(), position()),
+ MessageTemplate::kMalformedArrowFunParamList);
+ return impl()->FailureExpression();
}
+ DeclarationScope* scope = next_arrow_function_info_.scope;
scope->set_start_position(lhs_beg_pos);
- Scanner::Location duplicate_loc = Scanner::Location::invalid();
- impl()->DeclareArrowFunctionFormalParameters(&parameters, expression, loc,
- &duplicate_loc, CHECK_OK);
- if (duplicate_loc.IsValid()) {
- classifier()->RecordDuplicateFormalParameterError(duplicate_loc);
- }
- expression = ParseArrowFunctionLiteral(accept_IN, parameters,
- rewritable_length, CHECK_OK);
- Accumulate(ExpressionClassifier::AsyncArrowFormalParametersProduction);
- classifier()->RecordPatternError(arrow_loc,
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::ARROW));
- fni_.Infer();
-
- return expression;
- }
+ FormalParametersT parameters(scope);
+ parameters.set_strict_parameter_error(
+ next_arrow_function_info_.strict_parameter_error_location,
+ next_arrow_function_info_.strict_parameter_error_message);
+ parameters.is_simple = scope->has_simple_parameters();
+ next_arrow_function_info_.Reset();
- // "expression" was not itself an arrow function parameter list, but it might
- // form part of one. Propagate speculative formal parameter error locations
- // (including those for binding patterns, since formal parameters can
- // themselves contain binding patterns).
- unsigned productions = ExpressionClassifier::AllProductions &
- ~ExpressionClassifier::ArrowFormalParametersProduction;
+ impl()->DeclareArrowFunctionFormalParameters(&parameters, expression, loc);
- // Parenthesized identifiers and property references are allowed as part
- // of a larger assignment pattern, even though parenthesized patterns
- // themselves are not allowed, e.g., "[(x)] = []". Only accumulate
- // assignment pattern errors if the parsed expression is more complex.
- if (IsValidReferenceExpression(expression)) {
- productions &= ~ExpressionClassifier::AssignmentPatternProduction;
- }
+ expression = ParseArrowFunctionLiteral(parameters);
- const bool is_destructuring_assignment =
- IsValidPattern(expression) && peek() == Token::ASSIGN;
- if (is_destructuring_assignment) {
- // This is definitely not an expression so don't accumulate
- // expression-related errors.
- productions &= ~ExpressionClassifier::ExpressionProduction;
+ return expression;
}
- Accumulate(productions);
- if (!Token::IsAssignmentOp(peek())) return expression;
-
- if (is_destructuring_assignment) {
- ValidateAssignmentPattern(CHECK_OK);
+ if (V8_LIKELY(impl()->IsAssignableIdentifier(expression))) {
+ if (expression->is_parenthesized()) {
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(lhs_beg_pos, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+ expression_scope()->MarkIdentifierAsAssigned();
+ } else if (expression->IsProperty()) {
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(lhs_beg_pos, end_position()),
+ MessageTemplate::kInvalidPropertyBindingPattern);
+ } else if (expression->IsPattern() && op == Token::ASSIGN) {
+ // Destructuring assignmment.
+ if (expression->is_parenthesized()) {
+ expression_scope()->RecordPatternError(
+ Scanner::Location(lhs_beg_pos, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+ expression_scope()->ValidateAsPattern(expression, lhs_beg_pos,
+ end_position());
} else {
- expression = CheckAndRewriteReferenceExpression(
+ DCHECK(!IsValidReferenceExpression(expression));
+ expression = RewriteInvalidReferenceExpression(
expression, lhs_beg_pos, end_position(),
- MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
+ MessageTemplate::kInvalidLhsInAssignment);
}
- impl()->MarkExpressionAsAssigned(expression);
+ Consume(op);
+ int op_position = position();
- Token::Value op = Next(); // Get assignment operator.
- if (op != Token::ASSIGN) {
- classifier()->RecordPatternError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(op));
- }
- int pos = position();
-
- ExpressionClassifier rhs_classifier(this);
+ ExpressionT right = ParseAssignmentExpression();
- ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- ValidateExpression(CHECK_OK);
- AccumulateFormalParameterContainmentErrors();
-
- // We try to estimate the set of properties set by constructors. We define a
- // new property whenever there is an assignment to a property of 'this'. We
- // should probably only add properties if we haven't seen them
- // before. Otherwise we'll probably overestimate the number of properties.
- if (op == Token::ASSIGN && impl()->IsThisProperty(expression)) {
- function_state_->AddProperty();
- }
-
- impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
+ if (op == Token::ASSIGN) {
+ // We try to estimate the set of properties set by constructors. We define a
+ // new property whenever there is an assignment to a property of 'this'. We
+ // should probably only add properties if we haven't seen them before.
+ // Otherwise we'll probably overestimate the number of properties.
+ if (impl()->IsThisProperty(expression)) function_state_->AddProperty();
+
+ impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
+
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if (right->IsCall() || right->IsCallNew()) {
+ fni_.RemoveLastFunction();
+ } else {
+ fni_.Infer();
+ }
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if (op == Token::ASSIGN && !right->IsCall() && !right->IsCallNew()) {
- fni_.Infer();
+ impl()->SetFunctionNameFromIdentifierRef(right, expression);
} else {
+ expression_scope()->RecordPatternError(
+ Scanner::Location(lhs_beg_pos, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
fni_.RemoveLastFunction();
}
- if (op == Token::ASSIGN) {
- impl()->SetFunctionNameFromIdentifierRef(right, expression);
- }
-
- DCHECK_NE(op, Token::INIT);
- ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
-
- if (is_destructuring_assignment) {
- DCHECK_NE(op, Token::ASSIGN_EXP);
- auto rewritable = factory()->NewRewritableExpression(result, scope());
- impl()->QueueDestructuringAssignmentForRewriting(rewritable);
- result = rewritable;
- }
-
- return result;
+ return factory()->NewAssignment(op, expression, right, op_position);
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
- bool accept_IN, bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseYieldExpression() {
// YieldExpression ::
// 'yield' ([no line terminator] '*'? AssignmentExpression)?
int pos = peek_position();
- classifier()->RecordPatternError(
- scanner()->peek_location(), MessageTemplate::kInvalidDestructuringTarget);
- classifier()->RecordFormalParameterInitializerError(
+ expression_scope()->RecordParameterInitializerError(
scanner()->peek_location(), MessageTemplate::kYieldInParameter);
- Expect(Token::YIELD, CHECK_OK);
+ Consume(Token::YIELD);
+
+ CheckStackOverflow();
+
// The following initialization is necessary.
ExpressionT expression = impl()->NullExpression();
bool delegating = false; // yield*
@@ -3069,8 +2691,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// Delegating yields require an RHS; fall through.
V8_FALLTHROUGH;
default:
- expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ expression = ParseAssignmentExpressionCoverGrammar();
break;
}
}
@@ -3099,80 +2720,65 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// Precedence = 3
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
+ParserBase<Impl>::ParseConditionalExpression() {
// ConditionalExpression ::
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
- ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
+ ExpressionT expression = ParseBinaryExpression(4);
return peek() == Token::CONDITIONAL
- ? ParseConditionalContinuation(expression, accept_IN, pos, ok)
+ ? ParseConditionalContinuation(expression, pos)
: expression;
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseConditionalContinuation(ExpressionT expression,
- bool accept_IN, int pos,
- bool* ok) {
+ int pos) {
SourceRange then_range, else_range;
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
ExpressionT left;
{
SourceRangeScope range_scope(scanner(), &then_range);
Consume(Token::CONDITIONAL);
- ExpressionClassifier classifier(this);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
- left = ParseAssignmentExpression(true, CHECK_OK);
- AccumulateNonBindingPatternErrors();
+ AcceptINScope scope(this, true);
+ left = ParseAssignmentExpression();
}
- ValidateExpression(CHECK_OK);
ExpressionT right;
{
SourceRangeScope range_scope(scanner(), &else_range);
- Expect(Token::COLON, CHECK_OK);
- ExpressionClassifier classifier(this);
- right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- AccumulateNonBindingPatternErrors();
+ Expect(Token::COLON);
+ right = ParseAssignmentExpression();
}
- ValidateExpression(CHECK_OK);
ExpressionT expr = factory()->NewConditional(expression, left, right, pos);
impl()->RecordConditionalSourceRange(expr, then_range, else_range);
return expr;
}
-
// Precedence >= 4
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
- int prec, bool accept_IN, bool* ok) {
- DCHECK_GE(prec, 4);
- SourceRange right_range;
- ExpressionT x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseBinaryContinuation(ExpressionT x, int prec, int prec1) {
+ do {
// prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
- SourceRangeScope right_range_scope(scanner(), &right_range);
- Token::Value op = Next();
- int pos = position();
-
- const bool is_right_associative = op == Token::EXP;
- const int next_prec = is_right_associative ? prec1 : prec1 + 1;
- ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
- right_range_scope.Finalize();
- ValidateExpression(CHECK_OK);
+ while (Token::Precedence(peek(), accept_IN_) == prec1) {
+ SourceRange right_range;
+ int pos = peek_position();
+ ExpressionT y;
+ Token::Value op;
+ {
+ SourceRangeScope right_range_scope(scanner(), &right_range);
+ op = Next();
+
+ const bool is_right_associative = op == Token::EXP;
+ const int next_prec = is_right_associative ? prec1 : prec1 + 1;
+ y = ParseBinaryExpression(next_prec);
+ }
// For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this
@@ -3200,16 +2806,28 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
}
}
}
- }
+ --prec1;
+ } while (prec1 >= prec);
+
return x;
}
+// Precedence >= 4
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryOpExpression(
- bool* ok) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
+ int prec) {
+ DCHECK_GE(prec, 4);
+ ExpressionT x = ParseUnaryExpression();
+ int prec1 = Token::Precedence(peek(), accept_IN_);
+ if (prec1 >= prec) {
+ return ParseBinaryContinuation(x, prec, prec1);
+ }
+ return x;
+}
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseUnaryOrPrefixExpression() {
Token::Value op = Next();
int pos = position();
@@ -3218,66 +2836,64 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryOpExpression(
function_state_->set_next_function_is_likely_called();
}
- ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- ValidateExpression(CHECK_OK);
+ CheckStackOverflow();
+
+ int expression_position = peek_position();
+ ExpressionT expression = ParseUnaryExpression();
+
+ if (Token::IsUnaryOp(op)) {
+ if (op == Token::DELETE) {
+ if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
+ // "delete identifier" is a syntax error in strict mode.
+ ReportMessage(MessageTemplate::kStrictDelete);
+ return impl()->FailureExpression();
+ }
- if (op == Token::DELETE) {
- if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
- // "delete identifier" is a syntax error in strict mode.
- ReportMessage(MessageTemplate::kStrictDelete);
- *ok = false;
- return impl()->NullExpression();
+ if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ ReportMessage(MessageTemplate::kDeletePrivateField);
+ return impl()->FailureExpression();
+ }
}
- if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
- ReportMessage(MessageTemplate::kDeletePrivateField);
- *ok = false;
- return impl()->NullExpression();
+ if (peek() == Token::EXP) {
+ impl()->ReportMessageAt(
+ Scanner::Location(pos, peek_end_position()),
+ MessageTemplate::kUnexpectedTokenUnaryExponentiation);
+ return impl()->FailureExpression();
}
- }
- if (peek() == Token::EXP) {
- ReportUnexpectedToken(Next());
- *ok = false;
- return impl()->NullExpression();
+ // Allow the parser's implementation to rewrite the expression.
+ return impl()->BuildUnaryExpression(expression, op, pos);
}
- // Allow the parser's implementation to rewrite the expression.
- return impl()->BuildUnaryExpression(expression, op, pos);
-}
+ DCHECK(Token::IsCountOp(op));
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrefixExpression(
- bool* ok) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
- Token::Value op = Next();
- int beg_pos = peek_position();
- ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- expression = CheckAndRewriteReferenceExpression(
- expression, beg_pos, end_position(),
- MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
- impl()->MarkExpressionAsAssigned(expression);
- ValidateExpression(CHECK_OK);
+ if (V8_LIKELY(IsValidReferenceExpression(expression))) {
+ if (impl()->IsIdentifier(expression)) {
+ expression_scope()->MarkIdentifierAsAssigned();
+ }
+ } else {
+ expression = RewriteInvalidReferenceExpression(
+ expression, expression_position, end_position(),
+ MessageTemplate::kInvalidLhsInPrefixOp);
+ }
return factory()->NewCountOperation(op, true /* prefix */, expression,
position());
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseAwaitExpression(
- bool* ok) {
- classifier()->RecordFormalParameterInitializerError(
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseAwaitExpression() {
+ expression_scope()->RecordParameterInitializerError(
scanner()->peek_location(),
MessageTemplate::kAwaitExpressionFormalParameter);
int await_pos = peek_position();
Consume(Token::AWAIT);
- ExpressionT value = ParseUnaryExpression(CHECK_OK);
+ CheckStackOverflow();
- classifier()->RecordBindingPatternError(
- Scanner::Location(await_pos, end_position()),
- MessageTemplate::kInvalidDestructuringTarget);
+ ExpressionT value = ParseUnaryExpression();
ExpressionT expr = factory()->NewAwait(value, await_pos);
function_state_->AddSuspend();
@@ -3286,8 +2902,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseAwaitExpression(
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseUnaryExpression() {
// UnaryExpression ::
// PostfixExpression
// 'delete' UnaryExpression
@@ -3302,31 +2918,30 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
// [+Await] AwaitExpression[?Yield]
Token::Value op = peek();
- if (Token::IsUnaryOp(op)) return ParseUnaryOpExpression(ok);
- if (Token::IsCountOp(op)) return ParsePrefixExpression(ok);
+ if (Token::IsUnaryOrCountOp(op)) return ParseUnaryOrPrefixExpression();
if (is_async_function() && op == Token::AWAIT) {
- return ParseAwaitExpression(ok);
+ return ParseAwaitExpression();
}
- return ParsePostfixExpression(ok);
+ return ParsePostfixExpression();
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParsePostfixExpression() {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
int lhs_beg_pos = peek_position();
- ExpressionT expression = ParseLeftHandSideExpression(CHECK_OK);
+ ExpressionT expression = ParseLeftHandSideExpression();
if (!scanner()->HasLineTerminatorBeforeNext() && Token::IsCountOp(peek())) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
- expression = CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, end_position(),
- MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
- impl()->MarkExpressionAsAssigned(expression);
- ValidateExpression(CHECK_OK);
+ if (V8_UNLIKELY(!IsValidReferenceExpression(expression))) {
+ expression = RewriteInvalidReferenceExpression(
+ expression, lhs_beg_pos, end_position(),
+ MessageTemplate::kInvalidLhsInPostfixOp);
+ }
+ if (impl()->IsIdentifier(expression)) {
+ expression_scope()->MarkIdentifierAsAssigned();
+ }
Token::Value next = Next();
expression =
@@ -3340,36 +2955,83 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
+ParserBase<Impl>::ParseLeftHandSideExpression() {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- bool is_async = false;
- ExpressionT result =
- ParseMemberWithNewPrefixesExpression(&is_async, CHECK_OK);
+ ExpressionT result = ParseMemberWithNewPrefixesExpression();
+ if (!Token::IsPropertyOrCall(peek())) return result;
+ return ParseLeftHandSideContinuation(result);
+}
- while (true) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
+ DCHECK(Token::IsPropertyOrCall(peek()));
+
+ if (V8_UNLIKELY(peek() == Token::LPAREN && impl()->IsIdentifier(result) &&
+ scanner()->current_token() == Token::ASYNC &&
+ !scanner()->HasLineTerminatorBeforeNext())) {
+ DCHECK(impl()->IsAsync(impl()->AsIdentifier(result)));
+ int pos = position();
+
+ ArrowHeadParsingScope maybe_arrow(impl(),
+ FunctionKind::kAsyncArrowFunction);
+ Scope::Snapshot scope_snapshot(scope());
+
+ ExpressionListT args(pointer_buffer());
+ bool has_spread;
+ ParseArguments(&args, &has_spread, kMaybeArrowHead);
+ if (V8_LIKELY(peek() == Token::ARROW)) {
+ fni_.RemoveAsyncKeywordFromEnd();
+ next_arrow_function_info_.scope = maybe_arrow.ValidateAndCreateScope();
+ scope_snapshot.Reparent(next_arrow_function_info_.scope);
+ // async () => ...
+ if (!args.length()) return factory()->NewEmptyParentheses(pos);
+ // async ( Arguments ) => ...
+ ExpressionT result = impl()->ExpressionListToExpression(args);
+ result->mark_parenthesized();
+ return result;
+ }
+
+ if (has_spread) {
+ result = impl()->SpreadCall(result, args, pos, Call::NOT_EVAL);
+ } else {
+ result = factory()->NewCall(result, args, pos, Call::NOT_EVAL);
+ }
+
+ maybe_arrow.ValidateExpression();
+
+ fni_.RemoveLastFunction();
+ if (!Token::IsPropertyOrCall(peek())) return result;
+ }
+
+ do {
switch (peek()) {
+ /* Property */
case Token::LBRACK: {
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
- ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ ExpressionT index = ParseExpressionCoverGrammar();
result = factory()->NewProperty(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK);
break;
}
+ /* Property */
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ ExpressionT key = ParsePropertyOrPrivatePropertyName();
+ result = factory()->NewProperty(result, key, pos);
+ break;
+ }
+
+ /* Call */
case Token::LPAREN: {
int pos;
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- if (scanner()->current_token() == Token::IDENTIFIER ||
- scanner()->current_token() == Token::SUPER ||
- scanner()->current_token() == Token::ASYNC) {
+ if (Token::IsCallable(scanner()->current_token())) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -3385,43 +3047,16 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// function literal eagerly, we can also compile it eagerly.
if (result->IsFunctionLiteral()) {
result->AsFunctionLiteral()->SetShouldEagerCompile();
- result->AsFunctionLiteral()->mark_as_iife();
- }
- }
- Scanner::Location spread_pos;
- ExpressionListT args;
- if (V8_UNLIKELY(is_async && impl()->IsIdentifier(result))) {
- ExpressionClassifier async_classifier(this);
- bool is_simple_parameter_list = true;
- args = ParseArguments(&spread_pos, true, &is_simple_parameter_list,
- CHECK_OK);
- if (peek() == Token::ARROW) {
- fni_.RemoveAsyncKeywordFromEnd();
- ValidateBindingPattern(CHECK_OK);
- ValidateFormalParameterInitializer(CHECK_OK);
- if (!classifier()->is_valid_async_arrow_formal_parameters()) {
- ReportClassifierError(
- classifier()->async_arrow_formal_parameters_error());
- *ok = false;
- return impl()->NullExpression();
- }
- if (args->length()) {
- // async ( Arguments ) => ...
- if (!is_simple_parameter_list) {
- async_classifier.previous()->RecordNonSimpleParameter();
- }
- return impl()->ExpressionListToExpression(args);
+ if (scope()->is_script_scope()) {
+ // A non-top-level iife is likely to be executed multiple times
+ // and so shouldn`t be optimized as one-shot.
+ result->AsFunctionLiteral()->mark_as_oneshot_iife();
}
- // async () => ...
- return factory()->NewEmptyParentheses(pos);
- } else {
- AccumulateFormalParameterContainmentErrors();
}
- } else {
- args = ParseArguments(&spread_pos, CHECK_OK);
}
-
- ArrowFormalParametersUnexpectedToken();
+ bool has_spread;
+ ExpressionListT args(pointer_buffer());
+ ParseArguments(&args, &has_spread);
// Keep track of eval() calls since they disable all local variable
// optimizations.
@@ -3433,7 +3068,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
Call::PossiblyEval is_possibly_eval =
CheckPossibleEvalCall(result, scope());
- if (spread_pos.IsValid()) {
+ if (has_spread) {
result = impl()->SpreadCall(result, args, pos, is_possibly_eval);
} else {
result = factory()->NewCall(result, args, pos, is_possibly_eval);
@@ -3443,36 +3078,19 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
break;
}
- case Token::PERIOD: {
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
- Consume(Token::PERIOD);
- int pos = position();
- ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
- result = factory()->NewProperty(result, key, pos);
- break;
- }
-
- case Token::TEMPLATE_SPAN:
- case Token::TEMPLATE_TAIL: {
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
- result = ParseTemplateLiteral(result, position(), true, CHECK_OK);
- break;
- }
-
+ /* Call */
default:
- return result;
+ DCHECK(Token::IsTemplate(peek()));
+ result = ParseTemplateLiteral(result, position(), true);
+ break;
}
- }
+ } while (Token::IsPropertyOrCall(peek()));
+ return result;
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression(bool* is_async,
- bool* ok) {
+ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
// NewExpression ::
// ('new')+ MemberExpression
//
@@ -3492,57 +3110,94 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression(bool* is_async,
// new new foo means new (new foo)
// new new foo() means new (new foo())
// new new foo().bar().baz means (new (new foo()).bar()).baz
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
Consume(Token::NEW);
int new_pos = position();
ExpressionT result;
+
+ CheckStackOverflow();
+
if (peek() == Token::SUPER) {
const bool is_new = true;
- result = ParseSuperExpression(is_new, CHECK_OK);
+ result = ParseSuperExpression(is_new);
} else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT &&
(!allow_harmony_import_meta() || PeekAhead() == Token::LPAREN)) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kImportCallNotNewExpression);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
} else if (peek() == Token::PERIOD) {
- *is_async = false;
- result = ParseNewTargetExpression(CHECK_OK);
- return ParseMemberExpressionContinuation(result, is_async, ok);
+ result = ParseNewTargetExpression();
+ return ParseMemberExpressionContinuation(result);
} else {
- result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
+ result = ParseMemberWithNewPrefixesExpression();
}
- ValidateExpression(CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
- Scanner::Location spread_pos;
- ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
+ {
+ ExpressionListT args(pointer_buffer());
+ bool has_spread;
+ ParseArguments(&args, &has_spread);
- if (spread_pos.IsValid()) {
- result = impl()->SpreadCallNew(result, args, new_pos);
- } else {
- result = factory()->NewCallNew(result, args, new_pos);
+ if (has_spread) {
+ result = impl()->SpreadCallNew(result, args, new_pos);
+ } else {
+ result = factory()->NewCallNew(result, args, new_pos);
+ }
}
// The expression can still continue with . or [ after the arguments.
- return ParseMemberExpressionContinuation(result, is_async, ok);
+ return ParseMemberExpressionContinuation(result);
}
// NewExpression without arguments.
- return factory()->NewCallNew(result, impl()->NewExpressionList(0), new_pos);
+ ExpressionListT args(pointer_buffer());
+ return factory()->NewCallNew(result, args, new_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseMemberWithNewPrefixesExpression() {
+ return peek() == Token::NEW ? ParseMemberWithPresentNewPrefixesExpression()
+ : ParseMemberExpression();
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
- bool* ok) {
- return peek() == Token::NEW
- ? ParseMemberWithPresentNewPrefixesExpression(is_async, ok)
- : ParseMemberExpression(is_async, ok);
+ParserBase<Impl>::ParseFunctionExpression() {
+ Consume(Token::FUNCTION);
+ int function_token_position = position();
+
+ FunctionKind function_kind = Check(Token::MUL)
+ ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction;
+ IdentifierT name = impl()->NullIdentifier();
+ bool is_strict_reserved_name = Token::IsStrictReservedWord(peek());
+ Scanner::Location function_name_location = Scanner::Location::invalid();
+ FunctionLiteral::FunctionType function_type =
+ FunctionLiteral::kAnonymousExpression;
+ if (impl()->ParsingDynamicFunctionDeclaration()) {
+ // We don't want dynamic functions to actually declare their name
+ // "anonymous". We just want that name in the toString().
+ Consume(Token::IDENTIFIER);
+ DCHECK_IMPLIES(!has_error(),
+ scanner()->CurrentSymbol(ast_value_factory()) ==
+ ast_value_factory()->anonymous_string());
+ } else if (peek_any_identifier()) {
+ name = ParseIdentifier(function_kind);
+ function_name_location = scanner()->location();
+ function_type = FunctionLiteral::kNamedExpression;
+ }
+ FunctionLiteralT result = impl()->ParseFunctionLiteral(
+ name, function_name_location,
+ is_strict_reserved_name ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
+ function_kind, function_token_position, function_type, language_mode(),
+ nullptr);
+ // TODO(verwaest): FailureFunctionLiteral?
+ if (impl()->IsNull(result)) return impl()->FailureExpression();
+ return result;
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
- bool* is_async, bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseMemberExpression() {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral | ClassLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
@@ -3558,99 +3213,60 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
// Parse the initial primary or function expression.
ExpressionT result;
if (peek() == Token::FUNCTION) {
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
- Consume(Token::FUNCTION);
- int function_token_position = position();
-
- FunctionKind function_kind = Check(Token::MUL)
- ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction;
- IdentifierT name = impl()->NullIdentifier();
- bool is_strict_reserved_name = false;
- Scanner::Location function_name_location = Scanner::Location::invalid();
- FunctionLiteral::FunctionType function_type =
- FunctionLiteral::kAnonymousExpression;
- if (impl()->ParsingDynamicFunctionDeclaration()) {
- // We don't want dynamic functions to actually declare their name
- // "anonymous". We just want that name in the toString().
- if (stack_overflow()) {
- *ok = false;
- return impl()->NullExpression();
- }
- Consume(Token::IDENTIFIER);
- DCHECK(scanner()->CurrentMatchesContextual(Token::ANONYMOUS));
- } else if (peek_any_identifier()) {
- bool is_await = false;
- name = ParseIdentifierOrStrictReservedWord(
- function_kind, &is_strict_reserved_name, &is_await, CHECK_OK);
- function_name_location = scanner()->location();
- function_type = FunctionLiteral::kNamedExpression;
- }
- result = impl()->ParseFunctionLiteral(
- name, function_name_location,
- is_strict_reserved_name ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown,
- function_kind, function_token_position, function_type, language_mode(),
- nullptr, CHECK_OK);
+ result = ParseFunctionExpression();
} else if (peek() == Token::SUPER) {
const bool is_new = false;
- result = ParseSuperExpression(is_new, CHECK_OK);
+ result = ParseSuperExpression(is_new);
} else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
- result = ParseImportExpressions(CHECK_OK);
+ result = ParseImportExpressions();
} else {
- result = ParsePrimaryExpression(is_async, CHECK_OK);
+ result = ParsePrimaryExpression();
}
- return ParseMemberExpressionContinuation(result, is_async, ok);
+ return ParseMemberExpressionContinuation(result);
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseImportExpressions(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseImportExpressions() {
DCHECK(allow_harmony_dynamic_import());
- classifier()->RecordPatternError(scanner()->peek_location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::IMPORT));
-
Consume(Token::IMPORT);
int pos = position();
if (allow_harmony_import_meta() && peek() == Token::PERIOD) {
- ExpectMetaProperty(Token::META, "import.meta", pos, CHECK_OK);
+ ExpectMetaProperty(ast_value_factory()->meta_string(), "import.meta", pos);
if (!parsing_module_) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMetaOutsideModule);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
return impl()->ImportMetaExpression(pos);
}
- Expect(Token::LPAREN, CHECK_OK);
+ Expect(Token::LPAREN);
if (peek() == Token::RPAREN) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMissingSpecifier);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
- ExpressionT arg = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ AcceptINScope scope(this, true);
+ ExpressionT arg = ParseAssignmentExpressionCoverGrammar();
+ Expect(Token::RPAREN);
+
return factory()->NewImportCallExpression(arg, pos);
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
- bool is_new, bool* ok) {
- Expect(Token::SUPER, CHECK_OK);
+ bool is_new) {
+ Consume(Token::SUPER);
int pos = position();
DeclarationScope* scope = GetReceiverScope();
FunctionKind kind = scope->function_kind();
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
IsClassConstructor(kind)) {
- if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
+ if (Token::IsProperty(peek())) {
scope->RecordSuperPropertyUsage();
return impl()->NewSuperPropertyReference(pos);
}
@@ -3665,39 +3281,31 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedSuper);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
template <typename Impl>
-void ParserBase<Impl>::ExpectMetaProperty(Token::Value property_name,
- const char* full_name, int pos,
- bool* ok) {
+void ParserBase<Impl>::ExpectMetaProperty(const AstRawString* property_name,
+ const char* full_name, int pos) {
Consume(Token::PERIOD);
- ExpectContextualKeyword(property_name, CHECK_OK_CUSTOM(Void));
- if (scanner()->literal_contains_escapes()) {
+ ExpectContextualKeyword(property_name);
+ if (V8_UNLIKELY(scanner()->literal_contains_escapes())) {
impl()->ReportMessageAt(Scanner::Location(pos, end_position()),
MessageTemplate::kInvalidEscapedMetaProperty,
full_name);
- *ok = false;
}
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
+ParserBase<Impl>::ParseNewTargetExpression() {
int pos = position();
- ExpectMetaProperty(Token::TARGET, "new.target", pos, CHECK_OK);
-
- classifier()->RecordAssignmentPatternError(
- Scanner::Location(pos, end_position()),
- MessageTemplate::kInvalidDestructuringTarget);
+ ExpectMetaProperty(ast_value_factory()->target_string(), "new.target", pos);
if (!GetReceiverScope()->is_function_scope()) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedNewTarget);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
return impl()->NewTargetExpression(pos);
@@ -3705,45 +3313,31 @@ ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
- bool* is_async, bool* ok) {
+ParserBase<Impl>::DoParseMemberExpressionContinuation(ExpressionT expression) {
+ DCHECK(Token::IsMember(peek()));
// Parses this part of MemberExpression:
// ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
- while (true) {
+ do {
switch (peek()) {
case Token::LBRACK: {
- *is_async = false;
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
Consume(Token::LBRACK);
int pos = position();
- ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ ExpressionT index = ParseExpressionCoverGrammar();
expression = factory()->NewProperty(expression, index, pos);
impl()->PushPropertyName(index);
- Expect(Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK);
break;
}
case Token::PERIOD: {
- *is_async = false;
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
-
Consume(Token::PERIOD);
int pos = peek_position();
- ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
+ ExpressionT key = ParsePropertyOrPrivatePropertyName();
expression = factory()->NewProperty(expression, key, pos);
break;
}
- case Token::TEMPLATE_SPAN:
- case Token::TEMPLATE_TAIL: {
- *is_async = false;
- ValidateExpression(CHECK_OK);
- BindingPatternUnexpectedToken();
- ArrowFormalParametersUnexpectedToken();
+ default: {
+ DCHECK(Token::IsTemplate(peek()));
int pos;
if (scanner()->current_token() == Token::IDENTIFIER) {
pos = position();
@@ -3755,61 +3349,47 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
expression->AsFunctionLiteral()->SetShouldEagerCompile();
}
}
- expression = ParseTemplateLiteral(expression, pos, true, CHECK_OK);
+ expression = ParseTemplateLiteral(expression, pos, true);
break;
}
- case Token::ILLEGAL: {
- ReportUnexpectedTokenAt(scanner()->peek_location(), Token::ILLEGAL);
- *ok = false;
- return impl()->NullExpression();
- }
- default:
- return expression;
}
- }
- DCHECK(false);
- return impl()->NullExpression();
+ } while (Token::IsMember(peek()));
+ return expression;
}
template <typename Impl>
-void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
- bool* ok) {
+void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters) {
// FormalParameter[Yield,GeneratorParameter] :
// BindingElement[?Yield, ?GeneratorParameter]
- bool is_rest = parameters->has_rest;
-
FuncNameInferrerState fni_state(&fni_);
- ExpressionT pattern = ParseBindingPattern(CHECK_OK_CUSTOM(Void));
- if (!impl()->IsIdentifier(pattern)) {
+ int pos = peek_position();
+ ExpressionT pattern = ParseBindingPattern();
+ if (impl()->IsIdentifier(pattern)) {
+ ClassifyParameter(impl()->AsIdentifier(pattern), pos, end_position());
+ } else {
parameters->is_simple = false;
- ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
}
ExpressionT initializer = impl()->NullExpression();
if (Check(Token::ASSIGN)) {
- if (is_rest) {
+ parameters->is_simple = false;
+
+ if (parameters->has_rest) {
ReportMessage(MessageTemplate::kRestDefaultInitializer);
- *ok = false;
return;
}
- ExpressionClassifier init_classifier(this);
- initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
- ValidateExpression(CHECK_OK_CUSTOM(Void));
- ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
- parameters->is_simple = false;
- DiscardExpressionClassifier();
- classifier()->RecordNonSimpleParameter();
+ AcceptINScope scope(this, true);
+ initializer = ParseAssignmentExpression();
impl()->SetFunctionNameFromIdentifierRef(initializer, pattern);
}
impl()->AddFormalParameter(parameters, pattern, initializer, end_position(),
- is_rest);
+ parameters->has_rest);
}
template <typename Impl>
-void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
- bool* ok) {
+void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters) {
// FormalParameters[Yield] :
// [empty]
// FunctionRestParameter[?Yield]
@@ -3820,26 +3400,25 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
// FormalParameterList[Yield] :
// FormalParameter[?Yield]
// FormalParameterList[?Yield] , FormalParameter[?Yield]
+ ParameterParsingScope scope(impl(), parameters);
DCHECK_EQ(0, parameters->arity);
if (peek() != Token::RPAREN) {
while (true) {
- if (parameters->arity > Code::kMaxArguments) {
+ // Add one since we're going to be adding a parameter.
+ if (parameters->arity + 1 > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyParameters);
- *ok = false;
return;
}
parameters->has_rest = Check(Token::ELLIPSIS);
- ParseFormalParameter(parameters, CHECK_OK_CUSTOM(Void));
+ ParseFormalParameter(parameters);
if (parameters->has_rest) {
parameters->is_simple = false;
- classifier()->RecordNonSimpleParameter();
if (peek() == Token::COMMA) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
- *ok = false;
return;
}
break;
@@ -3852,15 +3431,14 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
}
}
- impl()->DeclareFormalParameters(parameters->scope, parameters->params,
- parameters->is_simple);
+ impl()->DeclareFormalParameters(parameters);
}
template <typename Impl>
-typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
+void ParserBase<Impl>::ParseVariableDeclarations(
VariableDeclarationContext var_context,
DeclarationParsingResult* parsing_result,
- ZonePtrList<const AstRawString>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names) {
// VariableDeclarations ::
// ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -3869,15 +3447,10 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
// declaration syntax.
DCHECK_NOT_NULL(parsing_result);
- parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ parsing_result->descriptor.kind = NORMAL_VARIABLE;
parsing_result->descriptor.declaration_pos = peek_position();
parsing_result->descriptor.initialization_pos = peek_position();
- BlockT init_block = impl()->NullStatement();
- if (var_context != kForStatement) {
- init_block = factory()->NewBlock(1, true);
- }
-
switch (peek()) {
case Token::VAR:
parsing_result->descriptor.mode = VariableMode::kVar;
@@ -3898,40 +3471,29 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
break;
}
- parsing_result->descriptor.scope = scope();
+ VariableDeclarationParsingScope declaration(
+ impl(), parsing_result->descriptor.mode, names);
int bindings_start = peek_position();
do {
// Parse binding pattern.
FuncNameInferrerState fni_state(&fni_);
- ExpressionT pattern = impl()->NullExpression();
int decl_pos = peek_position();
- {
- ExpressionClassifier pattern_classifier(this);
- pattern = ParseBindingPattern(CHECK_OK_CUSTOM(NullStatement));
+ ExpressionT pattern = ParseBindingPattern();
- if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
- ValidateLetPattern(CHECK_OK_CUSTOM(NullStatement));
- }
- }
Scanner::Location variable_loc = scanner()->location();
- bool single_name = impl()->IsIdentifier(pattern);
- if (single_name) {
- impl()->PushVariableName(impl()->AsIdentifier(pattern));
- }
-
ExpressionT value = impl()->NullExpression();
int initializer_position = kNoSourcePosition;
int value_beg_position = kNoSourcePosition;
if (Check(Token::ASSIGN)) {
value_beg_position = peek_position();
- ExpressionClassifier classifier(this);
- value = ParseAssignmentExpression(var_context != kForStatement,
- CHECK_OK_CUSTOM(NullStatement));
- ValidateExpression(CHECK_OK_CUSTOM(NullStatement));
+ {
+ AcceptINScope scope(this, var_context != kForStatement);
+ value = ParseAssignmentExpression();
+ }
variable_loc.end_pos = end_position();
if (!parsing_result->first_initializer_loc.IsValid()) {
@@ -3939,7 +3501,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
}
// Don't infer if it is "a = function(){...}();"-like expression.
- if (single_name) {
+ if (impl()->IsIdentifier(pattern)) {
if (!value->IsCall() && !value->IsCallNew()) {
fni_.Infer();
} else {
@@ -3960,8 +3522,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
Scanner::Location(decl_pos, end_position()),
MessageTemplate::kDeclarationMissingInitializer,
!impl()->IsIdentifier(pattern) ? "destructuring" : "const");
- *ok = false;
- return impl()->NullStatement();
+ return;
}
// 'let x' initializes 'x' to undefined.
if (parsing_result->descriptor.mode == VariableMode::kLet) {
@@ -3976,61 +3537,50 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
typename DeclarationParsingResult::Declaration decl(
pattern, initializer_position, value);
decl.value_beg_position = value_beg_position;
- if (var_context == kForStatement) {
- // Save the declaration for further handling in ParseForStatement.
- parsing_result->declarations.push_back(decl);
- } else {
- // Immediately declare the variable otherwise. This avoids O(N^2)
- // behavior (where N is the number of variables in a single
- // declaration) in the PatternRewriter having to do with removing
- // and adding VariableProxies to the Scope (see bug 4699).
- impl()->DeclareAndInitializeVariables(
- init_block, &parsing_result->descriptor, &decl, names,
- CHECK_OK_CUSTOM(NullStatement));
- }
+ parsing_result->declarations.push_back(decl);
} while (Check(Token::COMMA));
parsing_result->bindings_loc =
Scanner::Location(bindings_start, end_position());
-
- DCHECK(*ok);
- return init_block;
}
template <typename Impl>
typename ParserBase<Impl>::StatementT
-ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
+ParserBase<Impl>::ParseFunctionDeclaration() {
Consume(Token::FUNCTION);
+
int pos = position();
ParseFunctionFlags flags = ParseFunctionFlag::kIsNormal;
if (Check(Token::MUL)) {
impl()->ReportMessageAt(
scanner()->location(),
MessageTemplate::kGeneratorInSingleStatementContext);
- *ok = false;
return impl()->NullStatement();
}
- return ParseHoistableDeclaration(pos, flags, nullptr, false, ok);
+ return ParseHoistableDeclaration(pos, flags, nullptr, false);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseHoistableDeclaration(
- ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
- Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ ZonePtrList<const AstRawString>* names, bool default_export) {
+ Consume(Token::FUNCTION);
+
int pos = position();
ParseFunctionFlags flags = ParseFunctionFlag::kIsNormal;
if (Check(Token::MUL)) {
flags |= ParseFunctionFlag::kIsGenerator;
}
- return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+ return ParseHoistableDeclaration(pos, flags, names, default_export);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseHoistableDeclaration(
int pos, ParseFunctionFlags flags, ZonePtrList<const AstRawString>* names,
- bool default_export, bool* ok) {
+ bool default_export) {
+ CheckStackOverflow();
+
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
// 'function' '(' FormalParameters ')' '{' FunctionBody '}'
@@ -4057,10 +3607,8 @@ ParserBase<Impl>::ParseHoistableDeclaration(
impl()->GetDefaultStrings(&name, &variable_name);
name_validity = kSkipFunctionNameCheck;
} else {
- bool is_strict_reserved;
- bool is_await = false;
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, &is_await,
- CHECK_OK_CUSTOM(NullStatement));
+ bool is_strict_reserved = Token::IsStrictReservedWord(peek());
+ name = ParseIdentifier();
name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown;
variable_name = name;
@@ -4073,8 +3621,7 @@ ParserBase<Impl>::ParseHoistableDeclaration(
FunctionLiteralT function = impl()->ParseFunctionLiteral(
name, scanner()->location(), name_validity, kind, pos,
- FunctionLiteral::kDeclaration, language_mode(), nullptr,
- CHECK_OK_CUSTOM(NullStatement));
+ FunctionLiteral::kDeclaration, language_mode(), nullptr);
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
@@ -4092,12 +3639,13 @@ ParserBase<Impl>::ParseHoistableDeclaration(
flags == ParseFunctionFlag::kIsNormal;
return impl()->DeclareFunction(variable_name, function, mode, pos,
- is_sloppy_block_function, names, ok);
+ end_position(), is_sloppy_block_function,
+ names);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
- ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool default_export) {
// ClassDeclaration ::
// 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
// 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
@@ -4118,24 +3666,22 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
int class_token_pos = position();
IdentifierT name = impl()->NullIdentifier();
- bool is_strict_reserved = false;
+ bool is_strict_reserved = Token::IsStrictReservedWord(peek());
IdentifierT variable_name = impl()->NullIdentifier();
if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
impl()->GetDefaultStrings(&name, &variable_name);
} else {
- bool is_await = false;
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, &is_await,
- CHECK_OK_CUSTOM(NullStatement));
+ name = ParseIdentifier();
variable_name = name;
}
- ExpressionClassifier no_classifier(this);
- ExpressionT value =
- ParseClassLiteral(name, scanner()->location(), is_strict_reserved,
- class_token_pos, CHECK_OK_CUSTOM(NullStatement));
+ ExpressionParsingScope no_expression_scope(impl());
+ ExpressionT value = ParseClassLiteral(name, scanner()->location(),
+ is_strict_reserved, class_token_pos);
+ no_expression_scope.ValidateExpression();
int end_pos = position();
return impl()->DeclareClass(variable_name, value, names, class_token_pos,
- end_pos, ok);
+ end_pos);
}
// Language extension which is only enabled for source files loaded
@@ -4143,86 +3689,84 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
// declaration is resolved by looking up the function through a
// callback provided by the extension.
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
- bool* ok) {
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseNativeDeclaration() {
function_state_->DisableOptimization(BailoutReason::kNativeFunctionLiteral);
int pos = peek_position();
- Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ Consume(Token::FUNCTION);
// Allow "eval" or "arguments" for backward compatibility.
- IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers,
- CHECK_OK_CUSTOM(NullStatement));
- Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullStatement));
+ IdentifierT name = ParseIdentifier();
+ Expect(Token::LPAREN);
if (peek() != Token::RPAREN) {
do {
- ParseIdentifier(kAllowRestrictedIdentifiers,
- CHECK_OK_CUSTOM(NullStatement));
+ ParseIdentifier();
} while (Check(Token::COMMA));
}
- Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullStatement));
- Expect(Token::SEMICOLON, CHECK_OK_CUSTOM(NullStatement));
- return impl()->DeclareNative(name, pos, ok);
+ Expect(Token::RPAREN);
+ Expect(Token::SEMICOLON);
+ return impl()->DeclareNative(name, pos);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseAsyncFunctionDeclaration(
- ZonePtrList<const AstRawString>* names, bool default_export, bool* ok) {
+ ZonePtrList<const AstRawString>* names, bool default_export) {
// AsyncFunctionDeclaration ::
// async [no LineTerminator here] function BindingIdentifier[Await]
// ( FormalParameters[Await] ) { AsyncFunctionBody }
DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
int pos = position();
- if (scanner()->HasLineTerminatorBeforeNext()) {
- *ok = false;
- impl()->ReportUnexpectedToken(scanner()->current_token());
- return impl()->NullStatement();
- }
- Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ DCHECK(!scanner()->HasLineTerminatorBeforeNext());
+ Consume(Token::FUNCTION);
ParseFunctionFlags flags = ParseFunctionFlag::kIsAsync;
- return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+ return ParseHoistableDeclaration(pos, flags, names, default_export);
}
template <typename Impl>
void ParserBase<Impl>::ParseFunctionBody(
- typename ParserBase<Impl>::StatementListT result, IdentifierT function_name,
- int pos, const FormalParametersT& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, FunctionBodyType body_type,
- bool accept_IN, bool* ok) {
- DeclarationScope* function_scope = scope()->AsDeclarationScope();
+ StatementListT* body, IdentifierT function_name, int pos,
+ const FormalParametersT& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, FunctionBodyType body_type) {
+ FunctionBodyParsingScope body_parsing_scope(impl());
+
+ if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
+
+ DeclarationScope* function_scope = parameters.scope;
DeclarationScope* inner_scope = function_scope;
- BlockT inner_block = impl()->NullStatement();
- StatementListT body = result;
- if (!parameters.is_simple) {
+ // Building the parameter initialization block declares the parameters.
+ // TODO(verwaest): Rely on ArrowHeadParsingScope instead.
+ if (V8_UNLIKELY(!parameters.is_simple)) {
+ if (has_error()) return;
+ BlockT init_block = impl()->BuildParameterInitializationBlock(parameters);
+ if (IsAsyncFunction(kind) && !IsAsyncGeneratorFunction(kind)) {
+ init_block = impl()->BuildRejectPromiseOnException(init_block);
+ }
+ body->Add(init_block);
+ if (has_error()) return;
+
inner_scope = NewVarblockScope();
inner_scope->set_start_position(scanner()->location().beg_pos);
- inner_block = factory()->NewBlock(8, true);
- inner_block->set_scope(inner_scope);
- body = inner_block->statements();
}
+ StatementListT inner_body(pointer_buffer());
+
{
BlockState block_state(&scope_, inner_scope);
- if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
-
if (body_type == FunctionBodyType::kExpression) {
- ExpressionClassifier classifier(this);
- ExpressionT expression =
- ParseAssignmentExpression(accept_IN, CHECK_OK_VOID);
- ValidateExpression(CHECK_OK_VOID);
+ ExpressionT expression = ParseAssignmentExpression();
if (IsAsyncFunction(kind)) {
BlockT block = factory()->NewBlock(1, true);
- impl()->RewriteAsyncFunctionBody(body, block, expression,
- CHECK_OK_VOID);
+ impl()->RewriteAsyncFunctionBody(&inner_body, block, expression);
} else {
- body->Add(BuildReturnStatement(expression, expression->position()),
- zone());
+ inner_body.Add(
+ BuildReturnStatement(expression, expression->position()));
}
} else {
- DCHECK(accept_IN);
+ DCHECK(accept_IN_);
DCHECK_EQ(FunctionBodyType::kBlock, body_type);
// If we are parsing the source as if it is wrapped in a function, the
// source ends without a closing brace.
@@ -4231,63 +3775,64 @@ void ParserBase<Impl>::ParseFunctionBody(
: Token::RBRACE;
if (IsAsyncGeneratorFunction(kind)) {
- impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind, body,
- CHECK_OK_VOID);
+ impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind,
+ &inner_body);
} else if (IsGeneratorFunction(kind)) {
- impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body,
- CHECK_OK_VOID);
+ impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, &inner_body);
} else if (IsAsyncFunction(kind)) {
- ParseAsyncFunctionBody(inner_scope, body, CHECK_OK_VOID);
+ ParseAsyncFunctionBody(inner_scope, &inner_body);
} else {
- ParseStatementList(body, closing_token, CHECK_OK_VOID);
+ ParseStatementList(&inner_body, closing_token);
}
if (IsDerivedConstructor(kind)) {
- body->Add(factory()->NewReturnStatement(impl()->ThisExpression(),
- kNoSourcePosition),
- zone());
+ inner_body.Add(factory()->NewReturnStatement(impl()->ThisExpression(),
+ kNoSourcePosition));
}
- Expect(closing_token, CHECK_OK_VOID);
+ Expect(closing_token);
}
}
scope()->set_end_position(end_position());
- if (!parameters.is_simple) {
+ bool allow_duplicate_parameters = false;
+
+ if (V8_LIKELY(parameters.is_simple)) {
+ DCHECK_EQ(inner_scope, function_scope);
+ if (is_sloppy(function_scope->language_mode())) {
+ impl()->InsertSloppyBlockFunctionVarBindings(function_scope);
+ }
+ allow_duplicate_parameters =
+ is_sloppy(function_scope->language_mode()) && !IsConciseMethod(kind);
+ } else {
DCHECK_NOT_NULL(inner_scope);
DCHECK_EQ(function_scope, scope());
DCHECK_EQ(function_scope, inner_scope->outer_scope());
impl()->SetLanguageMode(function_scope, inner_scope->language_mode());
- BlockT init_block =
- impl()->BuildParameterInitializationBlock(parameters, CHECK_OK_VOID);
if (is_sloppy(inner_scope->language_mode())) {
impl()->InsertSloppyBlockFunctionVarBindings(inner_scope);
}
- // TODO(littledan): Merge the two rejection blocks into one
- if (IsAsyncFunction(kind) && !IsAsyncGeneratorFunction(kind)) {
- init_block = impl()->BuildRejectPromiseOnException(init_block);
- }
-
inner_scope->set_end_position(end_position());
if (inner_scope->FinalizeBlockScope() != nullptr) {
- impl()->CheckConflictingVarDeclarations(inner_scope, CHECK_OK_VOID);
+ BlockT inner_block = factory()->NewBlock(true, inner_body);
+ inner_body.Rewind();
+ inner_body.Add(inner_block);
+ inner_block->set_scope(inner_scope);
+ const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
+ function_scope, VariableMode::kLastLexicalVariableMode);
+ if (conflict != nullptr) {
+ impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ }
+ impl()->CheckConflictingVarDeclarations(inner_scope);
impl()->InsertShadowingVarBindingInitializers(inner_block);
- } else {
- inner_block->set_scope(nullptr);
- }
- inner_scope = nullptr;
-
- result->Add(init_block, zone());
- result->Add(inner_block, zone());
- } else {
- DCHECK_EQ(inner_scope, function_scope);
- if (is_sloppy(function_scope->language_mode())) {
- impl()->InsertSloppyBlockFunctionVarBindings(function_scope);
}
}
+ ValidateFormalParameters(language_mode(), parameters,
+ allow_duplicate_parameters);
+
if (!IsArrowFunction(kind)) {
// Declare arguments after parsing the function since lexical 'arguments'
// masks the arguments object. Declare arguments before declaring the
@@ -4296,6 +3841,8 @@ void ParserBase<Impl>::ParseFunctionBody(
}
impl()->DeclareFunctionNameVar(function_name, function_type, function_scope);
+
+ inner_body.MergeInto(body);
}
template <typename Impl>
@@ -4303,26 +3850,23 @@ void ParserBase<Impl>::CheckArityRestrictions(int param_count,
FunctionKind function_kind,
bool has_rest,
int formals_start_pos,
- int formals_end_pos, bool* ok) {
+ int formals_end_pos) {
if (IsGetterFunction(function_kind)) {
if (param_count != 0) {
impl()->ReportMessageAt(
Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadGetterArity);
- *ok = false;
}
} else if (IsSetterFunction(function_kind)) {
if (param_count != 1) {
impl()->ReportMessageAt(
Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadSetterArity);
- *ok = false;
}
if (has_rest) {
impl()->ReportMessageAt(
Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadSetterRestParameter);
- *ok = false;
}
}
}
@@ -4353,25 +3897,9 @@ bool ParserBase<Impl>::IsNextLetKeyword() {
}
template <typename Impl>
-bool ParserBase<Impl>::IsTrivialExpression() {
- if (Token::IsTrivialExpressionToken(peek())) {
- // PeekAhead() may not always be called, so we only call it after checking
- // peek().
- Token::Value peek_ahead = PeekAhead();
- if (peek_ahead == Token::COMMA || peek_ahead == Token::RPAREN ||
- peek_ahead == Token::SEMICOLON || peek_ahead == Token::RBRACK ||
- Token::IsAssignmentOp(peek_ahead)) {
- return true;
- }
- }
- return false;
-}
-
-template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
- bool accept_IN, const FormalParametersT& formal_parameters,
- int rewritable_length, bool* ok) {
+ const FormalParametersT& formal_parameters) {
const RuntimeCallCounterId counters[2][2] = {
{RuntimeCallCounterId::kParseBackgroundArrowFunctionLiteral,
RuntimeCallCounterId::kParseArrowFunctionLiteral},
@@ -4383,16 +3911,15 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- if (peek() == Token::ARROW && scanner_->HasLineTerminatorBeforeNext()) {
+ DCHECK_IMPLIES(!has_error(), peek() == Token::ARROW);
+ if (scanner_->HasLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
// If next token is not `=>`, it's a syntax error anyways.
- ReportUnexpectedTokenAt(scanner_->peek_location(), Token::ARROW);
- *ok = false;
- return impl()->NullExpression();
+ impl()->ReportUnexpectedTokenAt(scanner_->peek_location(), Token::ARROW);
+ return impl()->FailureExpression();
}
- StatementListT body = impl()->NullStatementList();
int expected_property_count = -1;
int suspend_count = 0;
int function_literal_id = GetNextFunctionLiteralId();
@@ -4407,93 +3934,102 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
bool is_lazy_top_level_function =
can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
bool has_braces = true;
- ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
+ ProducedPreparseData* produced_preparse_data = nullptr;
+ StatementListT body(pointer_buffer());
{
FunctionState function_state(&function_state_, &scope_,
formal_parameters.scope);
- // Move any queued destructuring assignments which appeared
- // in this function's parameter list into its own function_state.
- function_state.AdoptDestructuringAssignmentsFromParentState(
- rewritable_length);
-
- Expect(Token::ARROW, CHECK_OK);
+ Consume(Token::ARROW);
if (peek() == Token::LBRACE) {
// Multiple statement body
DCHECK_EQ(scope(), formal_parameters.scope);
+
if (is_lazy_top_level_function) {
// FIXME(marja): Arrow function parameters will be parsed even if the
// body is preparsed; move relevant parts of parameter handling to
// simulate consistent parameter handling.
+ // Building the parameter initialization block declares the parameters.
+ // TODO(verwaest): Rely on ArrowHeadParsingScope instead.
+ if (!formal_parameters.is_simple) {
+ impl()->BuildParameterInitializationBlock(formal_parameters);
+ if (has_error()) return impl()->FailureExpression();
+ }
+
// For arrow functions, we don't need to retrieve data about function
// parameters.
int dummy_num_parameters = -1;
DCHECK_NE(kind & FunctionKind::kArrowFunction, 0);
- FunctionLiteral::EagerCompileHint hint;
bool did_preparse_successfully = impl()->SkipFunction(
nullptr, kind, FunctionLiteral::kAnonymousExpression,
formal_parameters.scope, &dummy_num_parameters,
- &produced_preparsed_scope_data, false, false, &hint, CHECK_OK);
+ &produced_preparse_data);
- DCHECK_NULL(produced_preparsed_scope_data);
+ DCHECK_NULL(produced_preparse_data);
if (did_preparse_successfully) {
- // Discard any queued destructuring assignments which appeared
- // in this function's parameter list, and which were adopted
- // into this function state, above.
- function_state.RewindDestructuringAssignments(0);
+ // Validate parameter names. We can do this only after preparsing the
+ // function, since the function can declare itself strict.
+ ValidateFormalParameters(language_mode(), formal_parameters, false);
} else {
// In case we did not sucessfully preparse the function because of an
// unidentified error we do a full reparse to return the error.
+ // Parse again in the outer scope, since the language mode may change.
+ BlockState block_state(&scope_, scope()->outer_scope());
+ ExpressionT expression = ParseConditionalExpression();
+ // Reparsing the head may have caused a stack overflow.
+ if (has_error()) return impl()->FailureExpression();
+
+ DeclarationScope* function_scope = next_arrow_function_info_.scope;
+ FunctionState function_state(&function_state_, &scope_,
+ function_scope);
+ Scanner::Location loc(function_scope->start_position(),
+ end_position());
+ FormalParametersT parameters(function_scope);
+ parameters.is_simple = function_scope->has_simple_parameters();
+ impl()->DeclareArrowFunctionFormalParameters(&parameters, expression,
+ loc);
+ next_arrow_function_info_.Reset();
+
+ Consume(Token::ARROW);
Consume(Token::LBRACE);
- body = impl()->NewStatementList(8);
- ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
- formal_parameters, kind,
+
+ AcceptINScope scope(this, true);
+ ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
+ parameters, kind,
FunctionLiteral::kAnonymousExpression,
- FunctionBodyType::kBlock, true, ok);
- CHECK(!*ok);
- return impl()->NullExpression();
+ FunctionBodyType::kBlock);
+ CHECK(has_error());
+ return impl()->FailureExpression();
}
} else {
Consume(Token::LBRACE);
- body = impl()->NewStatementList(8);
- ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
+ AcceptINScope scope(this, true);
+ ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
formal_parameters, kind,
FunctionLiteral::kAnonymousExpression,
- FunctionBodyType::kBlock, true, CHECK_OK);
+ FunctionBodyType::kBlock);
expected_property_count = function_state.expected_property_count();
}
} else {
// Single-expression body
has_braces = false;
- body = impl()->NewStatementList(1);
- ParseFunctionBody(body, impl()->NullIdentifier(), kNoSourcePosition,
+ ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
formal_parameters, kind,
FunctionLiteral::kAnonymousExpression,
- FunctionBodyType::kExpression, accept_IN, CHECK_OK);
+ FunctionBodyType::kExpression);
expected_property_count = function_state.expected_property_count();
}
formal_parameters.scope->set_end_position(end_position());
- // Arrow function formal parameters are parsed as StrictFormalParameterList,
- // which is not the same as "parameters of a strict function"; it only means
- // that duplicates are not allowed. Of course, the arrow function may
- // itself be strict as well.
- const bool allow_duplicate_parameters = false;
- ValidateFormalParameters(language_mode(), allow_duplicate_parameters,
- CHECK_OK);
-
// Validate strict mode.
if (is_strict(language_mode())) {
CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
- end_position(), CHECK_OK);
+ end_position());
}
- impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
-
- impl()->RewriteDestructuringAssignments();
suspend_count = function_state.suspend_count();
}
@@ -4504,7 +4040,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, eager_compile_hint,
formal_parameters.scope->start_position(), has_braces,
- function_literal_id, produced_preparsed_scope_data);
+ function_literal_id, produced_preparse_data);
function_literal->set_suspend_count(suspend_count);
function_literal->set_function_token_position(
@@ -4528,7 +4064,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
IdentifierT name, Scanner::Location class_name_location,
- bool name_is_strict_reserved, int class_token_pos, bool* ok) {
+ bool name_is_strict_reserved, int class_token_pos) {
bool is_anonymous = impl()->IsNull(name);
// All parts of a ClassDeclaration and ClassExpression are strict code.
@@ -4536,14 +4072,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
if (name_is_strict_reserved) {
impl()->ReportMessageAt(class_name_location,
MessageTemplate::kUnexpectedStrictReserved);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
if (impl()->IsEvalOrArguments(name)) {
impl()->ReportMessageAt(class_name_location,
MessageTemplate::kStrictEvalArguments);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
}
@@ -4553,77 +4087,79 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
ClassInfo class_info(this);
class_info.is_anonymous = is_anonymous;
- impl()->DeclareClassVariable(name, &class_info, class_token_pos, CHECK_OK);
+ impl()->DeclareClassVariable(name, &class_info, class_token_pos);
scope()->set_start_position(end_position());
if (Check(Token::EXTENDS)) {
FuncNameInferrerState fni_state(&fni_);
- ExpressionClassifier extends_classifier(this);
- class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
- ValidateExpression(CHECK_OK);
- AccumulateFormalParameterContainmentErrors();
+ ExpressionParsingScope scope(impl());
+ class_info.extends = ParseLeftHandSideExpression();
+ scope.ValidateExpression();
}
- ClassLiteralChecker checker(this);
-
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE);
const bool has_extends = !impl()->IsNull(class_info.extends);
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
FuncNameInferrerState fni_state(&fni_);
- bool is_computed_name = false; // Classes do not care about computed
- // property names here.
- bool is_static;
- ClassLiteralProperty::Kind property_kind;
- ExpressionClassifier property_classifier(this);
- IdentifierT property_name;
// If we haven't seen the constructor yet, it potentially is the next
// property.
bool is_constructor = !class_info.has_seen_constructor;
- ClassLiteralPropertyT property = ParseClassPropertyDefinition(
- &checker, &class_info, &property_name, has_extends, &is_computed_name,
- &property_kind, &is_static, CHECK_OK);
- if (!class_info.has_static_computed_names && is_static &&
- is_computed_name) {
+ ParsePropertyInfo prop_info(this);
+ prop_info.position = PropertyPosition::kClassLiteral;
+ ClassLiteralPropertyT property =
+ ParseClassPropertyDefinition(&class_info, &prop_info, has_extends);
+
+ if (has_error()) return impl()->FailureExpression();
+
+ ClassLiteralProperty::Kind property_kind =
+ ClassPropertyKindFor(prop_info.kind);
+ if (!class_info.has_static_computed_names && prop_info.is_static &&
+ prop_info.is_computed_name) {
class_info.has_static_computed_names = true;
}
- if (is_computed_name &&
- property_kind == ClassLiteralProperty::PUBLIC_FIELD) {
- class_info.computed_field_count++;
- }
is_constructor &= class_info.has_seen_constructor;
- ValidateExpression(CHECK_OK);
- AccumulateFormalParameterContainmentErrors();
- impl()->DeclareClassProperty(name, property, property_name, property_kind,
- is_static, is_constructor, is_computed_name,
- &class_info, CHECK_OK);
+ if (V8_UNLIKELY(property_kind == ClassLiteralProperty::FIELD)) {
+ if (prop_info.is_computed_name) {
+ DCHECK(!prop_info.is_private);
+ class_info.computed_field_count++;
+ }
+
+ impl()->DeclareClassField(property, prop_info.name, prop_info.is_static,
+ prop_info.is_computed_name,
+ prop_info.is_private, &class_info);
+ } else {
+ impl()->DeclareClassProperty(name, property, is_constructor, &class_info);
+ }
impl()->InferFunctionName();
}
- Expect(Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE);
int end_pos = end_position();
block_scope->set_end_position(end_pos);
return impl()->RewriteClassLiteral(block_scope, name, &class_info,
- class_token_pos, end_pos, ok);
+ class_token_pos, end_pos);
}
template <typename Impl>
-void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
- bool* ok) {
- BlockT block = factory()->NewBlock(8, true);
-
- ParseStatementList(block->statements(), Token::RBRACE, CHECK_OK_VOID);
+void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope,
+ StatementListT* body) {
+ BlockT block = impl()->NullBlock();
+ {
+ StatementListT statements(pointer_buffer());
+ ParseStatementList(&statements, Token::RBRACE);
+ block = factory()->NewBlock(true, statements);
+ }
impl()->RewriteAsyncFunctionBody(
- body, block, factory()->NewUndefinedLiteral(kNoSourcePosition),
- CHECK_OK_VOID);
+ body, block, factory()->NewUndefinedLiteral(kNoSourcePosition));
scope->set_end_position(end_position());
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
+ParserBase<Impl>::ParseAsyncFunctionLiteral() {
// AsyncFunctionLiteral ::
// async [no LineTerminator here] function ( FormalParameters[Await] )
// { AsyncFunctionBody }
@@ -4632,43 +4168,41 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
// ( FormalParameters[Await] ) { AsyncFunctionBody }
DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
int pos = position();
- Expect(Token::FUNCTION, CHECK_OK);
- bool is_strict_reserved = false;
+ Consume(Token::FUNCTION);
IdentifierT name = impl()->NullIdentifier();
FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
ParseFunctionFlags flags = ParseFunctionFlag::kIsAsync;
if (Check(Token::MUL)) flags |= ParseFunctionFlag::kIsGenerator;
const FunctionKind kind = FunctionKindFor(flags);
+ bool is_strict_reserved = Token::IsStrictReservedWord(peek());
if (impl()->ParsingDynamicFunctionDeclaration()) {
// We don't want dynamic functions to actually declare their name
// "anonymous". We just want that name in the toString().
- if (stack_overflow()) {
- *ok = false;
- return impl()->NullExpression();
- }
+
+ // Consuming token we did not peek yet, which could lead to a ILLEGAL token
+ // in the case of a stackoverflow.
Consume(Token::IDENTIFIER);
- DCHECK(scanner()->CurrentMatchesContextual(Token::ANONYMOUS));
+ DCHECK_IMPLIES(!has_error(),
+ scanner()->CurrentSymbol(ast_value_factory()) ==
+ ast_value_factory()->anonymous_string());
} else if (peek_any_identifier()) {
type = FunctionLiteral::kNamedExpression;
- bool is_await = false;
- name = ParseIdentifierOrStrictReservedWord(kind, &is_strict_reserved,
- &is_await, CHECK_OK);
- // If the function name is "await", ParseIdentifierOrStrictReservedWord
- // recognized the error.
- DCHECK(!is_await);
- }
- return impl()->ParseFunctionLiteral(
+ name = ParseIdentifier(kind);
+ }
+ FunctionLiteralT result = impl()->ParseFunctionLiteral(
name, scanner()->location(),
is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
- kind, pos, type, language_mode(), nullptr, ok);
+ kind, pos, type, language_mode(), nullptr);
+ if (impl()->IsNull(result)) return impl()->FailureExpression();
+ return result;
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
- ExpressionT tag, int start, bool tagged, bool* ok) {
+ ExpressionT tag, int start, bool tagged) {
// A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
// text followed by a substitution expression), finalized by a single
// TEMPLATE_TAIL.
@@ -4696,7 +4230,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
Consume(Token::TEMPLATE_TAIL);
int pos = position();
typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
- bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes);
impl()->AddTemplateSpan(&ts, is_valid, true);
return impl()->CloseTemplateLiteral(&ts, start, tag);
}
@@ -4704,7 +4238,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
Consume(Token::TEMPLATE_SPAN);
int pos = position();
typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
- bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes);
impl()->AddTemplateSpan(&ts, is_valid, false);
Token::Value next;
@@ -4714,29 +4248,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
do {
next = peek();
- if (next == Token::EOS) {
- impl()->ReportMessageAt(Scanner::Location(start, peek_position()),
- MessageTemplate::kUnterminatedTemplate);
- *ok = false;
- return impl()->NullExpression();
- } else if (next == Token::ILLEGAL) {
- impl()->ReportMessageAt(
- Scanner::Location(position() + 1, peek_position()),
- MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
- *ok = false;
- return impl()->NullExpression();
- }
int expr_pos = peek_position();
- ExpressionT expression = ParseExpressionCoverGrammar(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ ExpressionT expression = ParseExpressionCoverGrammar();
impl()->AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
impl()->ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
MessageTemplate::kUnterminatedTemplateExpr);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
}
// If we didn't die parsing that expression, our next token should be a
@@ -4745,53 +4266,34 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
Next();
pos = position();
- if (next == Token::EOS) {
- impl()->ReportMessageAt(Scanner::Location(start, pos),
- MessageTemplate::kUnterminatedTemplate);
- *ok = false;
- return impl()->NullExpression();
- } else if (next == Token::ILLEGAL) {
- impl()->ReportMessageAt(
- Scanner::Location(position() + 1, peek_position()),
- MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
- *ok = false;
- return impl()->NullExpression();
- }
-
- bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+ bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes);
impl()->AddTemplateSpan(&ts, is_valid, next == Token::TEMPLATE_TAIL);
} while (next == Token::TEMPLATE_SPAN);
- DCHECK_EQ(next, Token::TEMPLATE_TAIL);
+ DCHECK_IMPLIES(!has_error(), next == Token::TEMPLATE_TAIL);
// Once we've reached a TEMPLATE_TAIL, we can close the TemplateLiteral.
return impl()->CloseTemplateLiteral(&ts, start, tag);
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::CheckAndRewriteReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos,
- MessageTemplate::Template message, bool* ok) {
- return CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
- message, kReferenceError, ok);
-}
+ParserBase<Impl>::RewriteInvalidReferenceExpression(ExpressionT expression,
+ int beg_pos, int end_pos,
+ MessageTemplate message,
+ ParseErrorType type) {
+ DCHECK(!IsValidReferenceExpression(expression));
+ if (impl()->IsIdentifier(expression)) {
+ DCHECK(is_strict(language_mode()));
+ DCHECK(impl()->IsEvalOrArguments(impl()->AsIdentifier(expression)));
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::CheckAndRewriteReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos,
- MessageTemplate::Template message, ParseErrorType type, bool* ok) {
- if (impl()->IsIdentifier(expression) && is_strict(language_mode()) &&
- impl()->IsEvalOrArguments(impl()->AsIdentifier(expression))) {
ReportMessageAt(Scanner::Location(beg_pos, end_pos),
MessageTemplate::kStrictEvalArguments, kSyntaxError);
- *ok = false;
- return impl()->NullExpression();
- }
- if (expression->IsValidReferenceExpression()) {
- return expression;
+ return impl()->FailureExpression();
}
if (expression->IsCall() && !expression->AsCall()->is_tagged_template()) {
+ expression_scope()->RecordPatternError(
+ Scanner::Location(beg_pos, end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
// If it is a call, make it a runtime error for legacy web compatibility.
// Bug: https://bugs.chromium.org/p/v8/issues/detail?id=4480
// Rewrite `expr' to `expr[throw ReferenceError]'.
@@ -4803,8 +4305,35 @@ ParserBase<Impl>::CheckAndRewriteReferenceExpression(
return factory()->NewProperty(expression, error, beg_pos);
}
ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
- *ok = false;
- return impl()->NullExpression();
+ return impl()->FailureExpression();
+}
+
+template <typename Impl>
+void ParserBase<Impl>::ClassifyParameter(IdentifierT parameter, int begin,
+ int end) {
+ if (impl()->IsEvalOrArguments(parameter)) {
+ expression_scope()->RecordStrictModeParameterError(
+ Scanner::Location(begin, end), MessageTemplate::kStrictEvalArguments);
+ }
+}
+
+template <typename Impl>
+void ParserBase<Impl>::ClassifyArrowParameter(
+ AccumulationScope* accumulation_scope, int position,
+ ExpressionT parameter) {
+ accumulation_scope->Accumulate();
+ if (parameter->is_parenthesized() ||
+ !(impl()->IsIdentifier(parameter) || parameter->IsPattern() ||
+ parameter->IsAssignment())) {
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(position, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
+ } else if (impl()->IsIdentifier(parameter)) {
+ ClassifyParameter(impl()->AsIdentifier(parameter), position,
+ end_position());
+ } else {
+ expression_scope()->RecordNonSimpleParameter();
+ }
}
template <typename Impl>
@@ -4813,147 +4342,133 @@ bool ParserBase<Impl>::IsValidReferenceExpression(ExpressionT expression) {
}
template <typename Impl>
-void ParserBase<Impl>::CheckDestructuringElement(ExpressionT expression,
- int begin, int end) {
- if (!IsValidPattern(expression) && !expression->IsAssignment() &&
- !IsValidReferenceExpression(expression)) {
- classifier()->RecordAssignmentPatternError(
- Scanner::Location(begin, end),
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParsePossibleDestructuringSubPattern(
+ AccumulationScope* scope) {
+ if (scope) scope->Accumulate();
+ int begin = peek_position();
+ ExpressionT result = ParseAssignmentExpressionCoverGrammar();
+
+ if (IsValidReferenceExpression(result)) {
+ // Parenthesized identifiers and property references are allowed as part of
+ // a larger assignment pattern, even though parenthesized patterns
+ // themselves are not allowed, e.g., "[(x)] = []". Only accumulate
+ // assignment pattern errors if the parsed expression is more complex.
+ if (impl()->IsIdentifier(result)) {
+ if (result->is_parenthesized()) {
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(begin, end_position()),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+ IdentifierT identifier = impl()->AsIdentifier(result);
+ ClassifyParameter(identifier, begin, end_position());
+ } else {
+ DCHECK(result->IsProperty());
+ expression_scope()->RecordDeclarationError(
+ Scanner::Location(begin, end_position()),
+ MessageTemplate::kInvalidPropertyBindingPattern);
+ if (scope != nullptr) scope->ValidateExpression();
+ }
+ } else if (result->is_parenthesized() ||
+ (!result->IsPattern() && !result->IsAssignment())) {
+ expression_scope()->RecordPatternError(
+ Scanner::Location(begin, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
}
+
+ return result;
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic(
- bool* ok) {
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic() {
// CallRuntime ::
// '%' Identifier Arguments
int pos = peek_position();
- Expect(Token::MOD, CHECK_OK);
+ Consume(Token::MOD);
// Allow "eval" or "arguments" for backward compatibility.
- IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- Scanner::Location spread_pos;
- ExpressionClassifier classifier(this);
- ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
-
- if (spread_pos.IsValid()) {
- *ok = false;
- ReportMessageAt(spread_pos, MessageTemplate::kIntrinsicWithSpread,
- kSyntaxError);
- return impl()->NullExpression();
+ IdentifierT name = ParseIdentifier();
+ if (peek() != Token::LPAREN) {
+ impl()->ReportUnexpectedToken(peek());
+ return impl()->FailureExpression();
}
+ bool has_spread;
+ ExpressionListT args(pointer_buffer());
+ ParseArguments(&args, &has_spread);
- return impl()->NewV8Intrinsic(name, args, pos, ok);
-}
-
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseDoExpression(
- bool* ok) {
- // AssignmentExpression ::
- // do '{' StatementList '}'
+ if (has_spread) {
+ ReportMessageAt(Scanner::Location(pos, position()),
+ MessageTemplate::kIntrinsicWithSpread, kSyntaxError);
+ return impl()->FailureExpression();
+ }
- int pos = peek_position();
- Expect(Token::DO, CHECK_OK);
- BlockT block = ParseBlock(nullptr, CHECK_OK);
- return impl()->RewriteDoExpression(block, pos, ok);
+ return impl()->NewV8Intrinsic(name, args, pos);
}
-// Redefinition of CHECK_OK for parsing statements.
-#undef CHECK_OK
-#define CHECK_OK CHECK_OK_CUSTOM(NullStatement)
-
template <typename Impl>
-typename ParserBase<Impl>::LazyParsingResult
-ParserBase<Impl>::ParseStatementList(StatementListT body,
- Token::Value end_token, bool may_abort,
- bool* ok) {
+void ParserBase<Impl>::ParseStatementList(StatementListT* body,
+ Token::Value end_token) {
// StatementList ::
// (StatementListItem)* <end_token>
+ DCHECK_NOT_NULL(body);
- // Allocate a target stack to use for this set of source
- // elements. This way, all scripts and functions get their own
- // target stack thus avoiding illegal breaks and continues across
- // functions.
- typename Types::TargetScope target_scope(this);
- int count_statements = 0;
+ while (peek() == Token::STRING) {
+ bool use_strict = false;
+ bool use_asm = false;
- DCHECK(!impl()->IsNull(body));
- bool directive_prologue = true; // Parsing directive prologue.
+ Scanner::Location token_loc = scanner()->peek_location();
- while (peek() != end_token) {
- if (directive_prologue && peek() != Token::STRING) {
- directive_prologue = false;
+ if (scanner()->NextLiteralEquals("use strict")) {
+ use_strict = true;
+ } else if (scanner()->NextLiteralEquals("use asm")) {
+ use_asm = true;
}
- bool starts_with_identifier = peek() == Token::IDENTIFIER;
- Scanner::Location token_loc = scanner()->peek_location();
- StatementT stat =
- ParseStatementListItem(CHECK_OK_CUSTOM(Return, kLazyParsingComplete));
-
- if (impl()->IsNull(stat) || stat->IsEmptyStatement()) {
- directive_prologue = false; // End of directive prologue.
- continue;
- }
-
- if (directive_prologue) {
- // The length of the token is used to distinguish between strings literals
- // that evaluate equal to directives but contain either escape sequences
- // (e.g., "use \x73trict") or line continuations (e.g., "use \(newline)
- // strict").
- if (impl()->IsUseStrictDirective(stat) &&
- token_loc.end_pos - token_loc.beg_pos == sizeof("use strict") + 1) {
- // Directive "use strict" (ES5 14.1).
- RaiseLanguageMode(LanguageMode::kStrict);
- if (!scope()->HasSimpleParameters()) {
- // TC39 deemed "use strict" directives to be an error when occurring
- // in the body of a function with non-simple parameter list, on
- // 29/7/2015. https://goo.gl/ueA7Ln
- impl()->ReportMessageAt(
- token_loc, MessageTemplate::kIllegalLanguageModeDirective,
- "use strict");
- *ok = false;
- return kLazyParsingComplete;
- }
- } else if (impl()->IsUseAsmDirective(stat) &&
- token_loc.end_pos - token_loc.beg_pos ==
- sizeof("use asm") + 1) {
- // Directive "use asm".
- impl()->SetAsmModule();
- } else if (impl()->IsStringLiteral(stat)) {
- // Possibly an unknown directive.
- // Should not change mode, but will increment usage counters
- // as appropriate. Ditto usages below.
- RaiseLanguageMode(LanguageMode::kSloppy);
- } else {
- // End of the directive prologue.
- directive_prologue = false;
- RaiseLanguageMode(LanguageMode::kSloppy);
+ StatementT stat = ParseStatementListItem();
+ if (impl()->IsNull(stat)) return;
+
+ body->Add(stat);
+
+ if (!impl()->IsStringLiteral(stat)) break;
+
+ if (use_strict) {
+ // Directive "use strict" (ES5 14.1).
+ RaiseLanguageMode(LanguageMode::kStrict);
+ if (!scope()->HasSimpleParameters()) {
+ // TC39 deemed "use strict" directives to be an error when occurring
+ // in the body of a function with non-simple parameter list, on
+ // 29/7/2015. https://goo.gl/ueA7Ln
+ impl()->ReportMessageAt(token_loc,
+ MessageTemplate::kIllegalLanguageModeDirective,
+ "use strict");
+ return;
}
+ } else if (use_asm) {
+ // Directive "use asm".
+ impl()->SetAsmModule();
} else {
+ // Possibly an unknown directive.
+ // Should not change mode, but will increment usage counters
+ // as appropriate. Ditto usages below.
RaiseLanguageMode(LanguageMode::kSloppy);
}
+ }
- // If we're allowed to abort, we will do so when we see a "long and
- // trivial" function. Our current definition of "long and trivial" is:
- // - over kLazyParseTrialLimit statements
- // - all starting with an identifier (i.e., no if, for, while, etc.)
- if (may_abort) {
- if (!starts_with_identifier) {
- may_abort = false;
- } else if (++count_statements > kLazyParseTrialLimit) {
- return kLazyParsingAborted;
- }
- }
-
- body->Add(stat, zone());
+ // Allocate a target stack to use for this set of source elements. This way,
+ // all scripts and functions get their own target stack thus avoiding illegal
+ // breaks and continues across functions.
+ TargetScopeT target_scope(this);
+ while (peek() != end_token) {
+ StatementT stat = ParseStatementListItem();
+ if (impl()->IsNull(stat)) return;
+ if (stat->IsEmptyStatement()) continue;
+ body->Add(stat);
}
- return kLazyParsingComplete;
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
- bool* ok) {
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseStatementListItem() {
// ECMA 262 6th Edition
// StatementListItem[Yield, Return] :
// Statement[?Yield, ?Return]
@@ -4973,36 +4488,36 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
switch (peek()) {
case Token::FUNCTION:
- return ParseHoistableDeclaration(nullptr, false, ok);
+ return ParseHoistableDeclaration(nullptr, false);
case Token::CLASS:
Consume(Token::CLASS);
- return ParseClassDeclaration(nullptr, false, ok);
+ return ParseClassDeclaration(nullptr, false);
case Token::VAR:
case Token::CONST:
- return ParseVariableStatement(kStatementListItem, nullptr, ok);
+ return ParseVariableStatement(kStatementListItem, nullptr);
case Token::LET:
if (IsNextLetKeyword()) {
- return ParseVariableStatement(kStatementListItem, nullptr, ok);
+ return ParseVariableStatement(kStatementListItem, nullptr);
}
break;
case Token::ASYNC:
if (PeekAhead() == Token::FUNCTION &&
!scanner()->HasLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
- return ParseAsyncFunctionDeclaration(nullptr, false, ok);
+ return ParseAsyncFunctionDeclaration(nullptr, false);
}
break;
default:
break;
}
- return ParseStatement(nullptr, nullptr, kAllowLabelledFunctionStatement, ok);
+ return ParseStatement(nullptr, nullptr, kAllowLabelledFunctionStatement);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
- AllowLabelledFunctionStatement allow_function, bool* ok) {
+ AllowLabelledFunctionStatement allow_function) {
// Statement ::
// Block
// VariableStatement
@@ -5031,46 +4546,48 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
// parsed into an empty statement.
switch (peek()) {
case Token::LBRACE:
- return ParseBlock(labels, ok);
+ return ParseBlock(labels);
case Token::SEMICOLON:
Next();
- return factory()->NewEmptyStatement(kNoSourcePosition);
+ return factory()->EmptyStatement();
case Token::IF:
- return ParseIfStatement(labels, ok);
+ return ParseIfStatement(labels);
case Token::DO:
- return ParseDoWhileStatement(labels, own_labels, ok);
+ return ParseDoWhileStatement(labels, own_labels);
case Token::WHILE:
- return ParseWhileStatement(labels, own_labels, ok);
+ return ParseWhileStatement(labels, own_labels);
case Token::FOR:
if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) {
- return ParseForAwaitStatement(labels, own_labels, ok);
+ return ParseForAwaitStatement(labels, own_labels);
}
- return ParseForStatement(labels, own_labels, ok);
+ return ParseForStatement(labels, own_labels);
case Token::CONTINUE:
- return ParseContinueStatement(ok);
+ return ParseContinueStatement();
case Token::BREAK:
- return ParseBreakStatement(labels, ok);
+ return ParseBreakStatement(labels);
case Token::RETURN:
- return ParseReturnStatement(ok);
+ return ParseReturnStatement();
case Token::THROW:
- return ParseThrowStatement(ok);
+ return ParseThrowStatement();
case Token::TRY: {
// It is somewhat complicated to have labels on try-statements.
// When breaking out of a try-finally statement, one must take
// great care not to treat it as a fall-through. It is much easier
// just to wrap the entire try-statement in a statement block and
// put the labels there.
- if (labels == nullptr) return ParseTryStatement(ok);
- BlockT result = factory()->NewBlock(1, false, labels);
- typename Types::Target target(this, result);
- StatementT statement = ParseTryStatement(CHECK_OK);
- result->statements()->Add(statement, zone());
+ if (labels == nullptr) return ParseTryStatement();
+ StatementListT statements(pointer_buffer());
+ BlockT result = factory()->NewBlock(false, labels);
+ TargetT target(this, result);
+ StatementT statement = ParseTryStatement();
+ statements.Add(statement);
+ result->InitializeStatements(statements, zone());
return result;
}
case Token::WITH:
- return ParseWithStatement(labels, ok);
+ return ParseWithStatement(labels);
case Token::SWITCH:
- return ParseSwitchStatement(labels, ok);
+ return ParseSwitchStatement(labels);
case Token::FUNCTION:
// FunctionDeclaration only allowed as a StatementListItem, not in
// an arbitrary Statement position. Exceptions such as
@@ -5081,72 +4598,77 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
is_strict(language_mode())
? MessageTemplate::kStrictFunction
: MessageTemplate::kSloppyFunction);
- *ok = false;
return impl()->NullStatement();
case Token::DEBUGGER:
- return ParseDebuggerStatement(ok);
+ return ParseDebuggerStatement();
case Token::VAR:
- return ParseVariableStatement(kStatement, nullptr, ok);
+ return ParseVariableStatement(kStatement, nullptr);
case Token::ASYNC:
if (!scanner()->HasLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
impl()->ReportMessageAt(
scanner()->peek_location(),
MessageTemplate::kAsyncFunctionInSingleStatementContext);
- *ok = false;
return impl()->NullStatement();
}
V8_FALLTHROUGH;
default:
return ParseExpressionOrLabelledStatement(labels, own_labels,
- allow_function, ok);
+ allow_function);
}
}
template <typename Impl>
typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
// Block ::
// '{' StatementList '}'
- // Construct block expecting 16 statements.
- BlockT body = factory()->NewBlock(16, false, labels);
-
// Parse the statements and collect escaping labels.
- Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullStatement));
+ BlockT body = factory()->NewBlock(false, labels);
+ StatementListT statements(pointer_buffer());
+
+ CheckStackOverflow();
+
{
BlockState block_state(zone(), &scope_);
- scope()->set_start_position(scanner()->location().beg_pos);
- typename Types::Target target(this, body);
+ scope()->set_start_position(peek_position());
+ TargetT target(this, body);
+
+ Expect(Token::LBRACE);
while (peek() != Token::RBRACE) {
- StatementT stat = ParseStatementListItem(CHECK_OK_CUSTOM(NullStatement));
- if (!impl()->IsNull(stat) && !stat->IsEmptyStatement()) {
- body->statements()->Add(stat, zone());
- }
+ StatementT stat = ParseStatementListItem();
+ if (impl()->IsNull(stat)) return body;
+ if (stat->IsEmptyStatement()) continue;
+ statements.Add(stat);
}
- Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullStatement));
+ Expect(Token::RBRACE);
+
int end_pos = end_position();
scope()->set_end_position(end_pos);
+
impl()->RecordBlockSourceRange(body, end_pos);
body->set_scope(scope()->FinalizeBlockScope());
}
+
+ body->InitializeStatements(statements, zone_);
return body;
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
- return ParseStatement(labels, nullptr, ok);
+ return ParseStatement(labels, nullptr);
} else {
// Make a block around the statement for a lexical binding
// is introduced by a FunctionDeclaration.
BlockState block_state(zone(), &scope_);
scope()->set_start_position(scanner()->location().beg_pos);
BlockT block = factory()->NewBlock(1, false);
- StatementT body = ParseFunctionDeclaration(CHECK_OK);
+ StatementT body = ParseFunctionDeclaration();
block->statements()->Add(body, zone());
scope()->set_end_position(end_position());
block->set_scope(scope()->FinalizeBlockScope());
@@ -5157,7 +4679,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
VariableDeclarationContext var_context,
- ZonePtrList<const AstRawString>* names, bool* ok) {
+ ZonePtrList<const AstRawString>* names) {
// VariableStatement ::
// VariableDeclarations ';'
@@ -5174,15 +4696,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
// is inside an initializer block, it is ignored.
DeclarationParsingResult parsing_result;
- StatementT result =
- ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
- ExpectSemicolon(ok);
- return result;
+ ParseVariableDeclarations(var_context, &parsing_result, names);
+ ExpectSemicolon();
+ return impl()->BuildInitializationBlock(&parsing_result);
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDebuggerStatement(
- bool* ok) {
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseDebuggerStatement() {
// In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
// contexts this is used as a statement which invokes the debugger as i a
// break point is present.
@@ -5190,8 +4711,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDebuggerStatement(
// 'debugger' ';'
int pos = peek_position();
- Expect(Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
+ Consume(Token::DEBUGGER);
+ ExpectSemicolon();
return factory()->NewDebuggerStatement(pos);
}
@@ -5200,7 +4721,7 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseExpressionOrLabelledStatement(
ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
- AllowLabelledFunctionStatement allow_function, bool* ok) {
+ AllowLabelledFunctionStatement allow_function) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
@@ -5216,7 +4737,6 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
UNREACHABLE(); // Always handled by the callers.
case Token::CLASS:
ReportUnexpectedToken(Next());
- *ok = false;
return impl()->NullStatement();
case Token::LET: {
Token::Value next_next = PeekAhead();
@@ -5230,7 +4750,6 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
}
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kUnexpectedLexicalDeclaration);
- *ok = false;
return impl()->NullStatement();
}
default:
@@ -5238,20 +4757,20 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
}
bool starts_with_identifier = peek_any_identifier();
- ExpressionT expr = ParseExpression(CHECK_OK);
+ ExpressionT expr = ParseExpression();
if (peek() == Token::COLON && starts_with_identifier &&
impl()->IsIdentifier(expr)) {
// The whole expression was a single identifier, and not, e.g.,
// something starting with an identifier or a parenthesized identifier.
impl()->DeclareLabel(&labels, &own_labels,
- impl()->AsIdentifierExpression(expr), CHECK_OK);
+ impl()->AsIdentifierExpression(expr));
Consume(Token::COLON);
// ES#sec-labelled-function-declarations Labelled Function Declarations
if (peek() == Token::FUNCTION && is_sloppy(language_mode()) &&
allow_function == kAllowLabelledFunctionStatement) {
- return ParseFunctionDeclaration(ok);
+ return ParseFunctionDeclaration();
}
- return ParseStatement(labels, own_labels, allow_function, ok);
+ return ParseStatement(labels, own_labels, allow_function);
}
// If we have an extension, we allow a native function declaration.
@@ -5260,39 +4779,46 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
if (extension_ != nullptr && peek() == Token::FUNCTION &&
!scanner()->HasLineTerminatorBeforeNext() && impl()->IsNative(expr) &&
!scanner()->literal_contains_escapes()) {
- return ParseNativeDeclaration(ok);
+ return ParseNativeDeclaration();
}
// Parsed expression statement, followed by semicolon.
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
+ if (expr->IsFailureExpression()) return impl()->NullStatement();
return factory()->NewExpressionStatement(expr, pos);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
int pos = peek_position();
- Expect(Token::IF, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ExpressionT condition = ParseExpression(CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ Consume(Token::IF);
+ Expect(Token::LPAREN);
+ ExpressionT condition = ParseExpression();
+ Expect(Token::RPAREN);
SourceRange then_range, else_range;
StatementT then_statement = impl()->NullStatement();
{
SourceRangeScope range_scope(scanner(), &then_range);
- then_statement = ParseScopedStatement(labels, CHECK_OK);
+ // Make a copy of {labels} to avoid conflicts with any
+ // labels that may be applied to the else clause below.
+ auto labels_copy =
+ labels == nullptr
+ ? labels
+ : new (zone()) ZonePtrList<const AstRawString>(*labels, zone());
+ then_statement = ParseScopedStatement(labels_copy);
}
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
- else_statement = ParseScopedStatement(labels, CHECK_OK);
+ else_statement = ParseScopedStatement(labels);
else_range = SourceRange::ContinuationOf(then_range, end_position());
} else {
- else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
+ else_statement = factory()->EmptyStatement();
}
StatementT stmt =
factory()->NewIfStatement(condition, then_statement, else_statement, pos);
@@ -5301,37 +4827,34 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
- bool* ok) {
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseContinueStatement() {
// ContinueStatement ::
// 'continue' Identifier? ';'
int pos = peek_position();
- Expect(Token::CONTINUE, CHECK_OK);
+ Consume(Token::CONTINUE);
IdentifierT label = impl()->NullIdentifier();
Token::Value tok = peek();
- if (!scanner()->HasLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
- tok != Token::RBRACE && tok != Token::EOS) {
+ if (!scanner()->HasLineTerminatorBeforeNext() &&
+ !Token::IsAutoSemicolon(tok)) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
- label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+ label = ParseIdentifier();
}
- typename Types::IterationStatement target =
- impl()->LookupContinueTarget(label, CHECK_OK);
+ IterationStatementT target = impl()->LookupContinueTarget(label);
if (impl()->IsNull(target)) {
// Illegal continue statement.
- MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
- typename Types::BreakableStatement breakable_target =
- impl()->LookupBreakTarget(label, CHECK_OK);
+ MessageTemplate message = MessageTemplate::kIllegalContinue;
+ BreakableStatementT breakable_target = impl()->LookupBreakTarget(label);
if (impl()->IsNull(label)) {
message = MessageTemplate::kNoIterationStatement;
} else if (impl()->IsNull(breakable_target)) {
message = MessageTemplate::kUnknownLabel;
}
ReportMessage(message, label);
- *ok = false;
return impl()->NullStatement();
}
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
StatementT stmt = factory()->NewContinueStatement(target, pos);
impl()->RecordJumpStatementSourceRange(stmt, end_position());
return stmt;
@@ -5339,53 +4862,50 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
// BreakStatement ::
// 'break' Identifier? ';'
int pos = peek_position();
- Expect(Token::BREAK, CHECK_OK);
+ Consume(Token::BREAK);
IdentifierT label = impl()->NullIdentifier();
Token::Value tok = peek();
- if (!scanner()->HasLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
- tok != Token::RBRACE && tok != Token::EOS) {
+ if (!scanner()->HasLineTerminatorBeforeNext() &&
+ !Token::IsAutoSemicolon(tok)) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
- label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+ label = ParseIdentifier();
}
// Parse labeled break statements that target themselves into
// empty statements, e.g. 'l1: l2: l3: break l2;'
if (!impl()->IsNull(label) && impl()->ContainsLabel(labels, label)) {
- ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement(pos);
+ ExpectSemicolon();
+ return factory()->EmptyStatement();
}
- typename Types::BreakableStatement target =
- impl()->LookupBreakTarget(label, CHECK_OK);
+ BreakableStatementT target = impl()->LookupBreakTarget(label);
if (impl()->IsNull(target)) {
// Illegal break statement.
- MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
+ MessageTemplate message = MessageTemplate::kIllegalBreak;
if (!impl()->IsNull(label)) {
message = MessageTemplate::kUnknownLabel;
}
ReportMessage(message, label);
- *ok = false;
return impl()->NullStatement();
}
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
StatementT stmt = factory()->NewBreakStatement(target, pos);
impl()->RecordJumpStatementSourceRange(stmt, end_position());
return stmt;
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
- bool* ok) {
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement() {
// ReturnStatement ::
// 'return' [no line terminator] Expression? ';'
// Consume the return token. It is necessary to do that before
// reporting any errors on it, because of the way errors are
// reported (underlining).
- Expect(Token::RETURN, CHECK_OK);
+ Consume(Token::RETURN);
Scanner::Location loc = scanner()->location();
switch (GetDeclarationScope()->scope_type()) {
@@ -5393,7 +4913,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
case EVAL_SCOPE:
case MODULE_SCOPE:
impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
- *ok = false;
return impl()->NullStatement();
default:
break;
@@ -5401,15 +4920,15 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
Token::Value tok = peek();
ExpressionT return_value = impl()->NullExpression();
- if (scanner()->HasLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
- tok == Token::RBRACE || tok == Token::EOS) {
+ if (scanner()->HasLineTerminatorBeforeNext() || Token::IsAutoSemicolon(tok)) {
if (IsDerivedConstructor(function_state_->kind())) {
return_value = impl()->ThisExpression(loc.beg_pos);
}
} else {
- return_value = ParseExpression(CHECK_OK);
+ return_value = ParseExpression();
}
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
+
return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
int continuation_pos = end_position();
StatementT stmt =
@@ -5420,29 +4939,28 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
- Expect(Token::WITH, CHECK_OK);
+ Consume(Token::WITH);
int pos = position();
if (is_strict(language_mode())) {
ReportMessage(MessageTemplate::kStrictWith);
- *ok = false;
return impl()->NullStatement();
}
- Expect(Token::LPAREN, CHECK_OK);
- ExpressionT expr = ParseExpression(CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::LPAREN);
+ ExpressionT expr = ParseExpression();
+ Expect(Token::RPAREN);
Scope* with_scope = NewScope(WITH_SCOPE);
StatementT body = impl()->NullStatement();
{
BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- body = ParseStatement(labels, nullptr, CHECK_OK);
+ body = ParseStatement(labels, nullptr);
with_scope->set_end_position(end_position());
}
return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -5451,27 +4969,30 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
+ typename FunctionState::LoopScope loop_scope(function_state_);
auto loop =
factory()->NewDoWhileStatement(labels, own_labels, peek_position());
- typename Types::Target target(this, loop);
+ TargetT target(this, loop);
SourceRange body_range;
StatementT body = impl()->NullStatement();
- Expect(Token::DO, CHECK_OK);
+ Consume(Token::DO);
+
+ CheckStackOverflow();
{
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr);
}
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE);
+ Expect(Token::LPAREN);
- ExpressionT cond = ParseExpression(CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ ExpressionT cond = ParseExpression();
+ Expect(Token::RPAREN);
// Allow do-statements to be terminated with and without
// semi-colons. This allows code such as 'do;while(0)return' to
@@ -5488,23 +5009,24 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
+ typename FunctionState::LoopScope loop_scope(function_state_);
auto loop = factory()->NewWhileStatement(labels, own_labels, peek_position());
- typename Types::Target target(this, loop);
+ TargetT target(this, loop);
SourceRange body_range;
StatementT body = impl()->NullStatement();
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ExpressionT cond = ParseExpression(CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ Consume(Token::WHILE);
+ Expect(Token::LPAREN);
+ ExpressionT cond = ParseExpression();
+ Expect(Token::RPAREN);
{
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr);
}
loop->Initialize(cond, body);
@@ -5514,20 +5036,18 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
- bool* ok) {
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement() {
// ThrowStatement ::
// 'throw' Expression ';'
- Expect(Token::THROW, CHECK_OK);
+ Consume(Token::THROW);
int pos = position();
if (scanner()->HasLineTerminatorBeforeNext()) {
ReportMessage(MessageTemplate::kNewlineAfterThrow);
- *ok = false;
return impl()->NullStatement();
}
- ExpressionT exception = ParseExpression(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
+ ExpressionT exception = ParseExpression();
+ ExpectSemicolon();
StatementT stmt = impl()->NewThrowStatement(exception, pos);
impl()->RecordThrowSourceRange(stmt, end_position());
@@ -5537,7 +5057,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
// CaseClause ::
@@ -5546,10 +5066,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
int switch_pos = peek_position();
- Expect(Token::SWITCH, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ExpressionT tag = ParseExpression(CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ Consume(Token::SWITCH);
+ Expect(Token::LPAREN);
+ ExpressionT tag = ParseExpression();
+ Expect(Token::RPAREN);
auto switch_statement =
factory()->NewSwitchStatement(labels, tag, switch_pos);
@@ -5558,38 +5078,41 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
BlockState cases_block_state(zone(), &scope_);
scope()->set_start_position(switch_pos);
scope()->SetNonlinear();
- typename Types::Target target(this, switch_statement);
+ TargetT target(this, switch_statement);
bool default_seen = false;
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE);
while (peek() != Token::RBRACE) {
// An empty label indicates the default case.
ExpressionT label = impl()->NullExpression();
+ StatementListT statements(pointer_buffer());
SourceRange clause_range;
- SourceRangeScope range_scope(scanner(), &clause_range);
- if (Check(Token::CASE)) {
- label = ParseExpression(CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- if (default_seen) {
- ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
- *ok = false;
- return impl()->NullStatement();
+ {
+ SourceRangeScope range_scope(scanner(), &clause_range);
+ if (Check(Token::CASE)) {
+ label = ParseExpression();
+ } else {
+ Expect(Token::DEFAULT);
+ if (default_seen) {
+ ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
+ return impl()->NullStatement();
+ }
+ default_seen = true;
+ }
+ Expect(Token::COLON);
+ while (peek() != Token::CASE && peek() != Token::DEFAULT &&
+ peek() != Token::RBRACE) {
+ StatementT stat = ParseStatementListItem();
+ if (impl()->IsNull(stat)) return stat;
+ if (stat->IsEmptyStatement()) continue;
+ statements.Add(stat);
}
- default_seen = true;
- }
- Expect(Token::COLON, CHECK_OK);
- StatementListT statements = impl()->NewStatementList(5);
- while (peek() != Token::CASE && peek() != Token::DEFAULT &&
- peek() != Token::RBRACE) {
- StatementT stat = ParseStatementListItem(CHECK_OK);
- statements->Add(stat, zone());
}
auto clause = factory()->NewCaseClause(label, statements);
- impl()->RecordCaseClauseSourceRange(clause, range_scope.Finalize());
+ impl()->RecordCaseClauseSourceRange(clause, clause_range);
switch_statement->cases()->Add(clause, zone());
}
- Expect(Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE);
int end_pos = end_position();
scope()->set_end_position(end_pos);
@@ -5603,8 +5126,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
}
template <typename Impl>
-typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
- bool* ok) {
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement() {
// TryStatement ::
// 'try' Block Catch
// 'try' Block Finally
@@ -5616,22 +5138,21 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
// Finally ::
// 'finally' Block
- Expect(Token::TRY, CHECK_OK);
+ Consume(Token::TRY);
int pos = position();
- BlockT try_block = ParseBlock(nullptr, CHECK_OK);
+ BlockT try_block = ParseBlock(nullptr);
CatchInfo catch_info(this);
if (peek() != Token::CATCH && peek() != Token::FINALLY) {
ReportMessage(MessageTemplate::kNoCatchOrFinally);
- *ok = false;
return impl()->NullStatement();
}
SourceRange catch_range, finally_range;
- BlockT catch_block = impl()->NullStatement();
+ BlockT catch_block = impl()->NullBlock();
{
SourceRangeScope catch_range_scope(scanner(), &catch_range);
if (Check(Token::CATCH)) {
@@ -5644,57 +5165,74 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
{
BlockState catch_block_state(&scope_, catch_info.scope);
-
- catch_block = factory()->NewBlock(16, false);
+ StatementListT catch_statements(pointer_buffer());
// Create a block scope to hold any lexical declarations created
// as part of destructuring the catch parameter.
{
BlockState catch_variable_block_state(zone(), &scope_);
- scope()->set_start_position(scanner()->location().beg_pos);
+ scope()->set_start_position(position());
- // This does not simply call ParsePrimaryExpression to avoid
- // ExpressionFromIdentifier from being called in the first
- // branch, which would introduce an unresolved symbol and mess
- // with arrow function names.
if (peek_any_identifier()) {
- catch_info.name =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ IdentifierT identifier = ParseNonRestrictedIdentifier();
+ RETURN_IF_PARSE_ERROR;
+ catch_info.variable = impl()->DeclareCatchVariableName(
+ catch_info.scope, identifier);
} else {
- ExpressionClassifier pattern_classifier(this);
- catch_info.pattern = ParseBindingPattern(CHECK_OK);
+ catch_info.variable = catch_info.scope->DeclareCatchVariableName(
+ ast_value_factory()->dot_catch_string());
+ VariableDeclarationParsingScope destructuring(
+ impl(), VariableMode::kLet, nullptr);
+ catch_info.pattern = ParseBindingPattern();
+ RETURN_IF_PARSE_ERROR;
+ catch_statements.Add(impl()->RewriteCatchPattern(&catch_info));
}
- Expect(Token::RPAREN, CHECK_OK);
- impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
- if (!impl()->IsNull(catch_info.init_block)) {
- catch_block->statements()->Add(catch_info.init_block, zone());
+ Expect(Token::RPAREN);
+
+ BlockT inner_block = ParseBlock(nullptr);
+ catch_statements.Add(inner_block);
+
+ // Check for `catch(e) { let e; }` and similar errors.
+ Scope* inner_scope = inner_block->scope();
+ if (inner_scope != nullptr) {
+ const AstRawString* conflict = nullptr;
+ if (impl()->IsNull(catch_info.pattern)) {
+ const AstRawString* name = catch_info.variable->raw_name();
+ if (inner_scope->LookupLocal(name)) conflict = name;
+ } else {
+ conflict = inner_scope->FindVariableDeclaredIn(
+ scope(), VariableMode::kVar);
+ }
+ if (conflict != nullptr) {
+ impl()->ReportVarRedeclarationIn(conflict, inner_scope);
+ }
}
- catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
- catch_block->statements()->Add(catch_info.inner_block, zone());
- impl()->ValidateCatchBlock(catch_info, CHECK_OK);
scope()->set_end_position(end_position());
+ catch_block = factory()->NewBlock(false, catch_statements);
catch_block->set_scope(scope()->FinalizeBlockScope());
}
}
catch_info.scope->set_end_position(end_position());
} else {
- catch_block = ParseBlock(nullptr, CHECK_OK);
+ catch_block = ParseBlock(nullptr);
}
}
}
- BlockT finally_block = impl()->NullStatement();
- DCHECK(peek() == Token::FINALLY || !impl()->IsNull(catch_block));
+ BlockT finally_block = impl()->NullBlock();
+ DCHECK(has_error() || peek() == Token::FINALLY ||
+ !impl()->IsNull(catch_block));
{
SourceRangeScope range_scope(scanner(), &finally_range);
if (Check(Token::FINALLY)) {
- finally_block = ParseBlock(nullptr, CHECK_OK);
+ finally_block = ParseBlock(nullptr);
}
}
+ RETURN_IF_PARSE_ERROR;
return impl()->RewriteTryStatement(try_block, catch_block, catch_range,
finally_block, finally_range, catch_info,
pos);
@@ -5703,7 +5241,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
// Either a standard for loop
// for (<init>; <cond>; <next>) { ... }
// or a for-each loop
@@ -5711,18 +5249,19 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
//
// We parse a declaration/expression after the 'for (' and then read the first
// expression/declaration before we know if this is a for or a for-each.
+ typename FunctionState::LoopScope loop_scope(function_state_);
int stmt_pos = peek_position();
ForInfo for_info(this);
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
+ Consume(Token::FOR);
+ Expect(Token::LPAREN);
if (peek() == Token::CONST || (peek() == Token::LET && IsNextLetKeyword())) {
// The initializer contains lexical declarations,
// so create an in-between scope.
BlockState for_state(zone(), &scope_);
- scope()->set_start_position(scanner()->location().beg_pos);
+ scope()->set_start_position(position());
// Also record whether inner functions or evals are found inside
// this loop, as this information is used to simplify the desugaring
@@ -5736,78 +5275,94 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
{
BlockState inner_state(&scope_, inner_block_scope);
ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
- nullptr, CHECK_OK);
+ &for_info.bound_names);
}
DCHECK(IsLexicalVariableMode(for_info.parsing_result.descriptor.mode));
- for_info.position = scanner()->location().beg_pos;
+ for_info.position = position();
if (CheckInOrOf(&for_info.mode)) {
scope()->set_is_hidden();
return ParseForEachStatementWithDeclarations(
- stmt_pos, &for_info, labels, own_labels, inner_block_scope, ok);
+ stmt_pos, &for_info, labels, own_labels, inner_block_scope);
}
- Expect(Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON);
- StatementT init = impl()->BuildInitializationBlock(
- &for_info.parsing_result, &for_info.bound_names, CHECK_OK);
+ // Parse the remaining code in the inner block scope since the declaration
+ // above was parsed there. We'll finalize the unnecessary outer block scope
+ // after parsing the rest of the loop.
+ StatementT result = impl()->NullStatement();
+ inner_block_scope->set_start_position(scope()->start_position());
+ {
+ BlockState inner_state(&scope_, inner_block_scope);
+ StatementT init =
+ impl()->BuildInitializationBlock(&for_info.parsing_result);
- Scope* finalized = inner_block_scope->FinalizeBlockScope();
- // No variable declarations will have been created in inner_block_scope.
+ result = ParseStandardForLoopWithLexicalDeclarations(
+ stmt_pos, init, &for_info, labels, own_labels);
+ }
+ Scope* finalized = scope()->FinalizeBlockScope();
DCHECK_NULL(finalized);
USE(finalized);
- return ParseStandardForLoopWithLexicalDeclarations(
- stmt_pos, init, &for_info, labels, own_labels, ok);
+ return result;
}
StatementT init = impl()->NullStatement();
if (peek() == Token::VAR) {
- ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
- CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
+ &for_info.bound_names);
DCHECK_EQ(for_info.parsing_result.descriptor.mode, VariableMode::kVar);
for_info.position = scanner()->location().beg_pos;
if (CheckInOrOf(&for_info.mode)) {
return ParseForEachStatementWithDeclarations(stmt_pos, &for_info, labels,
- own_labels, nullptr, ok);
+ own_labels, scope());
}
- init = impl()->BuildInitializationBlock(&for_info.parsing_result, nullptr,
- CHECK_OK);
+ init = impl()->BuildInitializationBlock(&for_info.parsing_result);
} else if (peek() != Token::SEMICOLON) {
// The initializer does not contain declarations.
int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier(this);
- ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
- int lhs_end_pos = end_position();
-
- bool is_for_each = CheckInOrOf(&for_info.mode);
- bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
- expression->IsObjectLiteral());
-
- if (is_destructuring) {
- ValidateAssignmentPattern(CHECK_OK);
- } else {
- ValidateExpression(CHECK_OK);
+ int lhs_end_pos;
+ bool is_for_each;
+ ExpressionT expression;
+ {
+ ExpressionParsingScope parsing_scope(impl());
+ AcceptINScope scope(this, false);
+ expression = ParseExpressionCoverGrammar();
+ // Initializer is reference followed by in/of.
+ lhs_end_pos = end_position();
+ is_for_each = CheckInOrOf(&for_info.mode);
+ if (is_for_each) {
+ if (expression->IsPattern()) {
+ parsing_scope.ValidatePattern(expression, lhs_beg_pos, lhs_end_pos);
+ } else {
+ expression = parsing_scope.ValidateAndRewriteReference(
+ expression, lhs_beg_pos, lhs_end_pos);
+ }
+ } else {
+ parsing_scope.ValidateExpression();
+ }
}
if (is_for_each) {
return ParseForEachStatementWithoutDeclarations(
stmt_pos, expression, lhs_beg_pos, lhs_end_pos, &for_info, labels,
- own_labels, ok);
+ own_labels);
}
// Initializer is just an expression.
init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
}
- Expect(Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON);
// Standard 'for' loop, we have parsed the initializer at this point.
ExpressionT cond = impl()->NullExpression();
StatementT next = impl()->NullStatement();
StatementT body = impl()->NullStatement();
- ForStatementT loop = ParseStandardForLoop(stmt_pos, labels, own_labels, &cond,
- &next, &body, CHECK_OK);
+ ForStatementT loop =
+ ParseStandardForLoop(stmt_pos, labels, own_labels, &cond, &next, &body);
+ RETURN_IF_PARSE_ERROR;
loop->Initialize(init, cond, next, body);
return loop;
}
@@ -5816,14 +5371,12 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope,
- bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope) {
// Just one declaration followed by in/of.
if (for_info->parsing_result.declarations.size() != 1) {
impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
MessageTemplate::kForInOfLoopMultiBindings,
ForEachStatement::VisitModeString(for_info->mode));
- *ok = false;
return impl()->NullStatement();
}
if (for_info->parsing_result.first_initializer_loc.IsValid() &&
@@ -5835,80 +5388,66 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
impl()->ReportMessageAt(for_info->parsing_result.first_initializer_loc,
MessageTemplate::kForInOfLoopInitializer,
ForEachStatement::VisitModeString(for_info->mode));
- *ok = false;
return impl()->NullStatement();
}
- // Reset the declaration_kind to ensure proper processing during declaration.
- for_info->parsing_result.descriptor.declaration_kind =
- DeclarationDescriptor::FOR_EACH;
-
BlockT init_block = impl()->RewriteForVarInLegacy(*for_info);
auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
stmt_pos);
- typename Types::Target target(this, loop);
+ TargetT target(this, loop);
ExpressionT enumerable = impl()->NullExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ enumerable = ParseAssignmentExpression();
} else {
- enumerable = ParseExpression(CHECK_OK);
+ enumerable = ParseExpression();
}
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN);
- Scope* for_scope = nullptr;
- if (inner_block_scope != nullptr) {
- for_scope = inner_block_scope->outer_scope();
- DCHECK_EQ(for_scope, scope());
- inner_block_scope->set_start_position(scanner()->location().beg_pos);
+ if (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode)) {
+ inner_block_scope->set_start_position(position());
}
ExpressionT each_variable = impl()->NullExpression();
- BlockT body_block = impl()->NullStatement();
+ BlockT body_block = impl()->NullBlock();
{
- BlockState block_state(
- &scope_, inner_block_scope != nullptr ? inner_block_scope : scope_);
+ BlockState block_state(&scope_, inner_block_scope);
SourceRange body_range;
- SourceRangeScope range_scope(scanner(), &body_range);
-
- StatementT body = ParseStatement(nullptr, nullptr, CHECK_OK);
- impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
+ StatementT body = impl()->NullStatement();
+ {
+ SourceRangeScope range_scope(scanner(), &body_range);
+ body = ParseStatement(nullptr, nullptr);
+ }
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
- impl()->DesugarBindingInForEachStatement(for_info, &body_block,
- &each_variable, CHECK_OK);
+ DesugarBindingInForEachStatement(for_info, &body_block, &each_variable);
body_block->statements()->Add(body, zone());
- if (inner_block_scope != nullptr) {
- inner_block_scope->set_end_position(end_position());
- body_block->set_scope(inner_block_scope->FinalizeBlockScope());
+ if (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode)) {
+ scope()->set_end_position(end_position());
+ body_block->set_scope(scope()->FinalizeBlockScope());
}
}
- StatementT final_loop = impl()->InitializeForEachStatement(
- loop, each_variable, enumerable, body_block);
+ loop->Initialize(each_variable, enumerable, body_block);
- init_block =
- impl()->CreateForEachStatementTDZ(init_block, *for_info, CHECK_OK);
-
- if (for_scope != nullptr) {
- for_scope->set_end_position(end_position());
- for_scope = for_scope->FinalizeBlockScope();
- }
+ init_block = impl()->CreateForEachStatementTDZ(init_block, *for_info);
// Parsed for-in loop w/ variable declarations.
if (!impl()->IsNull(init_block)) {
- init_block->statements()->Add(final_loop, zone());
- init_block->set_scope(for_scope);
+ init_block->statements()->Add(loop, zone());
+ if (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode)) {
+ scope()->set_end_position(end_position());
+ init_block->set_scope(scope()->FinalizeBlockScope());
+ }
return init_block;
}
- DCHECK_NULL(for_scope);
- return final_loop;
+ return loop;
}
template <typename Impl>
@@ -5916,38 +5455,31 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
- // Initializer is reference followed by in/of.
- if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
- expression = CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError, CHECK_OK);
- }
-
+ ZonePtrList<const AstRawString>* own_labels) {
auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
stmt_pos);
- typename Types::Target target(this, loop);
+ TargetT target(this, loop);
ExpressionT enumerable = impl()->NullExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ enumerable = ParseAssignmentExpression();
} else {
- enumerable = ParseExpression(CHECK_OK);
+ enumerable = ParseExpression();
}
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN);
StatementT body = impl()->NullStatement();
+ SourceRange body_range;
{
- SourceRange body_range;
SourceRangeScope range_scope(scanner(), &body_range);
-
- body = ParseStatement(nullptr, nullptr, CHECK_OK);
- impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
+ body = ParseStatement(nullptr, nullptr);
}
- return impl()->InitializeForEachStatement(loop, expression, enumerable, body);
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
+ RETURN_IF_PARSE_ERROR;
+ loop->Initialize(expression, enumerable, body);
+ return loop;
}
template <typename Impl>
@@ -5955,7 +5487,7 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
// The condition and the next statement of the for loop must be parsed
// in a new scope.
Scope* inner_scope = NewScope(BLOCK_SCOPE);
@@ -5966,8 +5498,9 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
{
BlockState block_state(&scope_, inner_scope);
scope()->set_start_position(scanner()->location().beg_pos);
- loop = ParseStandardForLoop(stmt_pos, labels, own_labels, &cond, &next,
- &body, CHECK_OK);
+ loop =
+ ParseStandardForLoop(stmt_pos, labels, own_labels, &cond, &next, &body);
+ RETURN_IF_PARSE_ERROR;
scope()->set_end_position(end_position());
}
@@ -5976,7 +5509,7 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
function_state_->contains_function_or_eval()) {
scope()->set_is_hidden();
return impl()->DesugarLexicalBindingsInForStatement(
- loop, init, cond, next, body, inner_scope, *for_info, ok);
+ loop, init, cond, next, body, inner_scope, *for_info);
} else {
inner_scope = inner_scope->FinalizeBlockScope();
DCHECK_NULL(inner_scope);
@@ -6012,25 +5545,25 @@ template <typename Impl>
typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
int stmt_pos, ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
- StatementT* next, StatementT* body, bool* ok) {
+ StatementT* next, StatementT* body) {
ForStatementT loop = factory()->NewForStatement(labels, own_labels, stmt_pos);
- typename Types::Target target(this, loop);
+ TargetT target(this, loop);
if (peek() != Token::SEMICOLON) {
- *cond = ParseExpression(CHECK_OK);
+ *cond = ParseExpression();
}
- Expect(Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON);
if (peek() != Token::RPAREN) {
- ExpressionT exp = ParseExpression(CHECK_OK);
+ ExpressionT exp = ParseExpression();
*next = factory()->NewExpressionStatement(exp, exp->position());
}
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN);
SourceRange body_range;
{
SourceRangeScope range_scope(scanner(), &body_range);
- *body = ParseStatement(nullptr, nullptr, CHECK_OK);
+ *body = ParseStatement(nullptr, nullptr);
}
impl()->RecordIterationStatementSourceRange(loop, body_range);
@@ -6038,22 +5571,12 @@ typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
}
template <typename Impl>
-void ParserBase<Impl>::MarkLoopVariableAsAssigned(
- Scope* scope, Variable* var,
- typename DeclarationDescriptor::Kind declaration_kind) {
- if (!IsLexicalVariableMode(var->mode()) &&
- (!scope->is_function_scope() ||
- declaration_kind == DeclarationDescriptor::FOR_EACH)) {
- var->set_maybe_assigned();
- }
-}
-
-template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels) {
// for await '(' ForDeclaration of AssignmentExpression ')'
DCHECK(is_async_function());
+ typename FunctionState::LoopScope loop_scope(function_state_);
int stmt_pos = peek_position();
@@ -6062,14 +5585,19 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
// Create an in-between scope for let-bound iteration variables.
BlockState for_state(zone(), &scope_);
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::AWAIT, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
+ Expect(Token::FOR);
+ Expect(Token::AWAIT);
+ Expect(Token::LPAREN);
scope()->set_start_position(scanner()->location().beg_pos);
scope()->set_is_hidden();
- auto loop = factory()->NewForOfStatement(labels, own_labels, stmt_pos);
- typename Types::Target target(this, loop);
+ auto loop = factory()->NewForOfStatement(labels, own_labels, stmt_pos,
+ IteratorType::kAsync);
+ // Two suspends: one for next() and one for return()
+ function_state_->AddSuspend();
+ function_state_->AddSuspend();
+
+ TargetT target(this, loop);
ExpressionT each_variable = impl()->NullExpression();
@@ -6088,7 +5616,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
{
BlockState inner_state(&scope_, inner_block_scope);
ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
- nullptr, CHECK_OK);
+ &for_info.bound_names);
}
for_info.position = scanner()->location().beg_pos;
@@ -6097,7 +5625,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
impl()->ReportMessageAt(for_info.parsing_result.bindings_loc,
MessageTemplate::kForInOfLoopMultiBindings,
"for-await-of");
- *ok = false;
return impl()->NullStatement();
}
@@ -6106,7 +5633,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
impl()->ReportMessageAt(for_info.parsing_result.first_initializer_loc,
MessageTemplate::kForInOfLoopInitializer,
"for-await-of");
- *ok = false;
return impl()->NullStatement();
}
} else {
@@ -6115,33 +5641,29 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
// Statement
int lhs_beg_pos = peek_position();
BlockState inner_state(&scope_, inner_block_scope);
- ExpressionClassifier classifier(this);
- ExpressionT lhs = each_variable = ParseLeftHandSideExpression(CHECK_OK);
+ ExpressionParsingScope parsing_scope(impl());
+ ExpressionT lhs = each_variable = ParseLeftHandSideExpression();
int lhs_end_pos = end_position();
- if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
- ValidateAssignmentPattern(CHECK_OK);
+ if (lhs->IsPattern()) {
+ parsing_scope.ValidatePattern(lhs, lhs_beg_pos, lhs_end_pos);
} else {
- ValidateExpression(CHECK_OK);
- each_variable = CheckAndRewriteReferenceExpression(
- lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError, CHECK_OK);
+ each_variable = parsing_scope.ValidateAndRewriteReference(
+ lhs, lhs_beg_pos, lhs_end_pos);
}
}
- ExpectContextualKeyword(Token::OF, CHECK_OK);
- int each_keyword_pos = scanner()->location().beg_pos;
+ ExpectContextualKeyword(ast_value_factory()->of_string());
const bool kAllowIn = true;
ExpressionT iterable = impl()->NullExpression();
{
- ExpressionClassifier classifier(this);
- iterable = ParseAssignmentExpression(kAllowIn, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, kAllowIn);
+ iterable = ParseAssignmentExpression();
}
- Expect(Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN);
StatementT body = impl()->NullStatement();
{
@@ -6149,16 +5671,16 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
scope()->set_start_position(scanner()->location().beg_pos);
SourceRange body_range;
- SourceRangeScope range_scope(scanner(), &body_range);
-
- body = ParseStatement(nullptr, nullptr, CHECK_OK);
- scope()->set_end_position(end_position());
- impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
+ {
+ SourceRangeScope range_scope(scanner(), &body_range);
+ body = ParseStatement(nullptr, nullptr);
+ scope()->set_end_position(end_position());
+ }
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
if (has_declarations) {
- BlockT body_block = impl()->NullStatement();
- impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
- &each_variable, CHECK_OK);
+ BlockT body_block = impl()->NullBlock();
+ DesugarBindingInForEachStatement(&for_info, &body_block, &each_variable);
body_block->statements()->Add(body, zone());
body_block->set_scope(scope()->FinalizeBlockScope());
body = body_block;
@@ -6168,103 +5690,85 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
USE(block_scope);
}
}
- const bool finalize = true;
- StatementT final_loop = impl()->InitializeForOfStatement(
- loop, each_variable, iterable, body, finalize, IteratorType::kAsync,
- each_keyword_pos);
+
+ loop->Initialize(each_variable, iterable, body);
if (!has_declarations) {
Scope* for_scope = scope()->FinalizeBlockScope();
DCHECK_NULL(for_scope);
USE(for_scope);
- return final_loop;
+ return loop;
}
- BlockT init_block = impl()->CreateForEachStatementTDZ(impl()->NullStatement(),
- for_info, CHECK_OK);
+ BlockT init_block =
+ impl()->CreateForEachStatementTDZ(impl()->NullBlock(), for_info);
scope()->set_end_position(end_position());
Scope* for_scope = scope()->FinalizeBlockScope();
// Parsed for-in loop w/ variable declarations.
if (!impl()->IsNull(init_block)) {
- init_block->statements()->Add(final_loop, zone());
+ init_block->statements()->Add(loop, zone());
init_block->set_scope(for_scope);
return init_block;
}
DCHECK_NULL(for_scope);
- return final_loop;
-}
-
-template <typename Impl>
-void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
- Token::Value property) {
- if (property == Token::SMI || property == Token::NUMBER) return;
-
- if (IsProto()) {
- if (has_seen_proto_) {
- this->parser()->classifier()->RecordExpressionError(
- this->scanner()->location(), MessageTemplate::kDuplicateProto);
- return;
- }
- has_seen_proto_ = true;
- }
+ return loop;
}
template <typename Impl>
-void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
- Token::Value property, ParsePropertyKind type, ParseFunctionFlags flags,
- bool is_static, bool* ok) {
+void ParserBase<Impl>::CheckClassMethodName(IdentifierT name,
+ ParsePropertyKind type,
+ ParseFunctionFlags flags,
+ bool is_static,
+ bool* has_seen_constructor) {
DCHECK(type == ParsePropertyKind::kMethod || IsAccessor(type));
- if (property == Token::SMI || property == Token::NUMBER) return;
+ AstValueFactory* avf = ast_value_factory();
if (is_static) {
- if (IsPrototype()) {
- this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
- *ok = false;
+ if (impl()->IdentifierEquals(name, avf->prototype_string())) {
+ ReportMessage(MessageTemplate::kStaticPrototype);
return;
}
- } else if (IsConstructor()) {
+ } else if (impl()->IdentifierEquals(name,
+ avf->private_constructor_string())) {
+ ReportMessage(MessageTemplate::kConstructorIsPrivate);
+ return;
+ } else if (impl()->IdentifierEquals(name, avf->constructor_string())) {
if (flags != ParseFunctionFlag::kIsNormal || IsAccessor(type)) {
- MessageTemplate::Template msg =
- (flags & ParseFunctionFlag::kIsGenerator) != 0
- ? MessageTemplate::kConstructorIsGenerator
- : (flags & ParseFunctionFlag::kIsAsync) != 0
- ? MessageTemplate::kConstructorIsAsync
- : MessageTemplate::kConstructorIsAccessor;
- this->parser()->ReportMessage(msg);
- *ok = false;
+ MessageTemplate msg = (flags & ParseFunctionFlag::kIsGenerator) != 0
+ ? MessageTemplate::kConstructorIsGenerator
+ : (flags & ParseFunctionFlag::kIsAsync) != 0
+ ? MessageTemplate::kConstructorIsAsync
+ : MessageTemplate::kConstructorIsAccessor;
+ ReportMessage(msg);
return;
}
- if (has_seen_constructor_) {
- this->parser()->ReportMessage(MessageTemplate::kDuplicateConstructor);
- *ok = false;
+ if (*has_seen_constructor) {
+ ReportMessage(MessageTemplate::kDuplicateConstructor);
return;
}
- has_seen_constructor_ = true;
+ *has_seen_constructor = true;
return;
}
}
template <typename Impl>
-void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
- bool* ok) {
- if (is_static && IsPrototype()) {
- this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
- *ok = false;
+void ParserBase<Impl>::CheckClassFieldName(IdentifierT name, bool is_static) {
+ AstValueFactory* avf = ast_value_factory();
+ if (is_static && impl()->IdentifierEquals(name, avf->prototype_string())) {
+ ReportMessage(MessageTemplate::kStaticPrototype);
return;
}
- if (IsConstructor() || IsPrivateConstructor()) {
- this->parser()->ReportMessage(MessageTemplate::kConstructorClassField);
- *ok = false;
+ if (impl()->IdentifierEquals(name, avf->constructor_string()) ||
+ impl()->IdentifierEquals(name, avf->private_constructor_string())) {
+ ReportMessage(MessageTemplate::kConstructorClassField);
return;
}
}
-#undef CHECK_OK
-#undef CHECK_OK_CUSTOM
-#undef CHECK_OK_VOID
+#undef RETURN_IF_PARSE_ERROR
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 5687633f2f..d6d55af2b6 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -12,14 +12,14 @@
#include "src/ast/ast.h"
#include "src/ast/source-range-ast-visitor.h"
#include "src/bailout-reason.h"
+#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/conversions-inl.h"
#include "src/log.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/scope-info.h"
-#include "src/parsing/duplicate-finder.h"
#include "src/parsing/expression-scope-reparenter.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/rewriter.h"
@@ -43,31 +43,31 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
- ZonePtrList<Statement>* body = nullptr;
+ ScopedPtrList<Statement> body(pointer_buffer());
{
FunctionState function_state(&function_state_, &scope_, function_scope);
- body = new (zone()) ZonePtrList<Statement>(call_super ? 2 : 1, zone());
if (call_super) {
// Create a SuperCallReference and handle in BytecodeGenerator.
auto constructor_args_name = ast_value_factory()->empty_string();
- bool is_duplicate;
bool is_rest = true;
bool is_optional = false;
Variable* constructor_args = function_scope->DeclareParameter(
constructor_args_name, VariableMode::kTemporary, is_optional, is_rest,
- &is_duplicate, ast_value_factory(), pos);
+ ast_value_factory(), pos);
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(1, zone());
- Spread* spread_args = factory()->NewSpread(
- factory()->NewVariableProxy(constructor_args), pos, pos);
+ Expression* call;
+ {
+ ScopedPtrList<Expression> args(pointer_buffer());
+ Spread* spread_args = factory()->NewSpread(
+ factory()->NewVariableProxy(constructor_args), pos, pos);
- args->Add(spread_args, zone());
- Expression* super_call_ref = NewSuperCallReference(pos);
- Expression* call = factory()->NewCall(super_call_ref, args, pos);
- body->Add(factory()->NewReturnStatement(call, pos), zone());
+ args.Add(spread_args);
+ Expression* super_call_ref = NewSuperCallReference(pos);
+ call = factory()->NewCall(super_call_ref, args, pos);
+ }
+ body.Add(factory()->NewReturnStatement(call, pos));
}
expected_property_count = function_state.expected_property_count();
@@ -81,28 +81,65 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
return function_literal;
}
-// ----------------------------------------------------------------------------
-// The CHECK_OK macro is a convenient macro to enforce error
-// handling for functions that may fail (by returning !*ok).
-//
-// CAUTION: This macro appends extra statements after a call,
-// thus it must never be used where only a single statement
-// is correct (e.g. an if statement branch w/o braces)!
-
-#define CHECK_OK_VALUE(x) ok); \
- if (!*ok) return x; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-#define CHECK_OK CHECK_OK_VALUE(nullptr)
-#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
-
-#define CHECK_FAILED /**/); \
- if (failed_) return nullptr; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
+void Parser::ReportUnexpectedTokenAt(Scanner::Location location,
+ Token::Value token,
+ MessageTemplate message) {
+ const char* arg = nullptr;
+ switch (token) {
+ case Token::EOS:
+ message = MessageTemplate::kUnexpectedEOS;
+ break;
+ case Token::SMI:
+ case Token::NUMBER:
+ case Token::BIGINT:
+ message = MessageTemplate::kUnexpectedTokenNumber;
+ break;
+ case Token::STRING:
+ message = MessageTemplate::kUnexpectedTokenString;
+ break;
+ case Token::PRIVATE_NAME:
+ case Token::IDENTIFIER:
+ message = MessageTemplate::kUnexpectedTokenIdentifier;
+ break;
+ case Token::AWAIT:
+ case Token::ENUM:
+ message = MessageTemplate::kUnexpectedReserved;
+ break;
+ case Token::LET:
+ case Token::STATIC:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ message = is_strict(language_mode())
+ ? MessageTemplate::kUnexpectedStrictReserved
+ : MessageTemplate::kUnexpectedTokenIdentifier;
+ break;
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL:
+ message = MessageTemplate::kUnexpectedTemplateString;
+ break;
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_KEYWORD:
+ message = MessageTemplate::kInvalidEscapedReservedWord;
+ break;
+ case Token::ILLEGAL:
+ if (scanner()->has_error()) {
+ message = scanner()->error();
+ location = scanner()->error_location();
+ } else {
+ message = MessageTemplate::kInvalidOrUnexpectedToken;
+ }
+ break;
+ case Token::REGEXP_LITERAL:
+ message = MessageTemplate::kUnexpectedTokenRegExp;
+ break;
+ default:
+ const char* name = Token::String(token);
+ DCHECK_NOT_NULL(name);
+ arg = name;
+ break;
+ }
+ ReportMessageAt(location, message, arg);
+}
// ----------------------------------------------------------------------------
// Implementation of Parser
@@ -124,7 +161,7 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
*x = factory()->NewNumberLiteral(x_val * y_val, pos);
return true;
case Token::DIV:
- *x = factory()->NewNumberLiteral(x_val / y_val, pos);
+ *x = factory()->NewNumberLiteral(base::Divide(x_val, y_val), pos);
return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
@@ -200,6 +237,7 @@ bool Parser::CollapseNaryExpression(Expression** x, Expression* y,
// TODO(leszeks): Do some literal collapsing here if we're appending Smi or
// String literals.
nary->AddSubsequent(y, pos);
+ nary->clear_parenthesized();
AppendNaryOperationSourceRange(nary, range);
return true;
@@ -232,12 +270,11 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
}
Expression* Parser::NewThrowError(Runtime::FunctionId id,
- MessageTemplate::Template message,
+ MessageTemplate message,
const AstRawString* arg, int pos) {
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(2, zone());
- args->Add(factory()->NewSmiLiteral(message, pos), zone());
- args->Add(factory()->NewStringLiteral(arg, pos), zone());
+ ScopedPtrList<Expression> args(pointer_buffer());
+ args.Add(factory()->NewSmiLiteral(static_cast<int>(message), pos));
+ args.Add(factory()->NewStringLiteral(arg, pos));
CallRuntime* call_constructor = factory()->NewCallRuntime(id, args, pos);
return factory()->NewThrow(call_constructor, pos);
}
@@ -271,12 +308,12 @@ Expression* Parser::NewTargetExpression(int pos) {
}
Expression* Parser::ImportMetaExpression(int pos) {
- return factory()->NewCallRuntime(
- Runtime::kInlineGetImportMetaObject,
- new (zone()) ZonePtrList<Expression>(0, zone()), pos);
+ ScopedPtrList<Expression> args(pointer_buffer());
+ return factory()->NewCallRuntime(Runtime::kInlineGetImportMetaObject, args,
+ pos);
}
-Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
+Expression* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
switch (token) {
case Token::NULL_LITERAL:
return factory()->NewNullLiteral(pos);
@@ -295,15 +332,18 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
case Token::BIGINT:
return factory()->NewBigIntLiteral(
AstBigInt(scanner()->CurrentLiteralAsCString(zone())), pos);
+ case Token::STRING: {
+ return factory()->NewStringLiteral(GetSymbol(), pos);
+ }
default:
DCHECK(false);
}
- return nullptr;
+ return FailureExpression();
}
Expression* Parser::NewV8Intrinsic(const AstRawString* name,
- ZonePtrList<Expression>* args, int pos,
- bool* ok) {
+ const ScopedPtrList<Expression>& args,
+ int pos) {
if (extension_ != nullptr) {
// The extension structures are only accessible while parsing the
// very first time, not when reparsing because of lazy compilation.
@@ -318,26 +358,11 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
// Check for possible name clash.
DCHECK_EQ(Context::kNotFound,
Context::IntrinsicIndexForName(name->raw_data(), name->length()));
- // Check for built-in IS_VAR macro.
- if (function->function_id == Runtime::kIS_VAR) {
- DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
- // %IS_VAR(x) evaluates to x if x is a variable,
- // leads to a parse error otherwise. Could be implemented as an
- // inline function %_IS_VAR(x) to eliminate this special case.
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != nullptr) {
- return args->at(0);
- } else {
- ReportMessage(MessageTemplate::kNotIsvar);
- *ok = false;
- return nullptr;
- }
- }
// Check that the expected number of arguments are being passed.
- if (function->nargs != -1 && function->nargs != args->length()) {
+ if (function->nargs != -1 && function->nargs != args.length()) {
ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
- *ok = false;
- return nullptr;
+ return FailureExpression();
}
return factory()->NewCallRuntime(function, args, pos);
@@ -349,8 +374,7 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
// Check that the function is defined.
if (context_index == Context::kNotFound) {
ReportMessage(MessageTemplate::kNotDefined, name);
- *ok = false;
- return nullptr;
+ return FailureExpression();
}
return factory()->NewCallRuntime(context_index, args, pos);
@@ -363,15 +387,16 @@ Parser::Parser(ParseInfo* info)
info->runtime_call_stats(), info->logger(),
info->script().is_null() ? -1 : info->script()->id(),
info->is_module(), true),
- scanner_(info->unicode_cache(), info->character_stream(),
- info->is_module()),
+ info_(info),
+ scanner_(info->character_stream(), info->is_module()),
preparser_zone_(info->zone()->allocator(), ZONE_NAME),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
target_stack_(nullptr),
total_preparse_skipped_(0),
- consumed_preparsed_scope_data_(info->consumed_preparsed_scope_data()),
+ consumed_preparse_data_(info->consumed_preparse_data()),
+ preparse_data_buffer_(),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
@@ -387,21 +412,22 @@ Parser::Parser(ParseInfo* info)
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript
// We also compile eagerly for kProduceExhaustiveCodeCache.
- bool can_compile_lazily = FLAG_lazy && !info->is_eager();
+ bool can_compile_lazily = info->allow_lazy_compile() && !info->is_eager();
set_default_eager_compile_hint(can_compile_lazily
? FunctionLiteral::kShouldLazyCompile
: FunctionLiteral::kShouldEagerCompile);
- allow_lazy_ = FLAG_lazy && info->allow_lazy_parsing() && !info->is_native() &&
- info->extension() == nullptr && can_compile_lazily;
- set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
- set_allow_harmony_public_fields(FLAG_harmony_public_fields);
- set_allow_harmony_static_fields(FLAG_harmony_static_fields);
- set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
- set_allow_harmony_import_meta(FLAG_harmony_import_meta);
- set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
- set_allow_harmony_private_fields(FLAG_harmony_private_fields);
+ allow_lazy_ = info->allow_lazy_compile() && info->allow_lazy_parsing() &&
+ !info->is_native() && info->extension() == nullptr &&
+ can_compile_lazily;
+ set_allow_natives(info->allow_natives_syntax() || info->is_native());
+ set_allow_harmony_public_fields(info->allow_harmony_public_fields());
+ set_allow_harmony_static_fields(info->allow_harmony_static_fields());
+ set_allow_harmony_dynamic_import(info->allow_harmony_dynamic_import());
+ set_allow_harmony_import_meta(info->allow_harmony_import_meta());
+ set_allow_harmony_numeric_separator(info->allow_harmony_numeric_separator());
+ set_allow_harmony_private_fields(info->allow_harmony_private_fields());
+ set_allow_harmony_private_methods(info->allow_harmony_private_methods());
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -411,8 +437,6 @@ Parser::Parser(ParseInfo* info)
void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
DCHECK_NULL(original_scope_);
DCHECK_NULL(info->script_scope());
- // TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
- // context, which will have the "this" binding for script scopes.
DeclarationScope* script_scope = NewScriptScope();
info->set_script_scope(script_scope);
original_scope_ = script_scope;
@@ -420,14 +444,15 @@ void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
void Parser::DeserializeScopeChain(
Isolate* isolate, ParseInfo* info,
- MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Scope::DeserializationMode mode) {
InitializeEmptyScopeChain(info);
Handle<ScopeInfo> outer_scope_info;
if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
original_scope_ = Scope::DeserializeScopeChain(
isolate, zone(), *outer_scope_info, info->script_scope(),
- ast_value_factory(), Scope::DeserializationMode::kScopesOnly);
+ ast_value_factory(), mode);
}
}
@@ -436,10 +461,11 @@ namespace {
void MaybeResetCharacterStream(ParseInfo* info, FunctionLiteral* literal) {
// Don't reset the character stream if there is an asm.js module since it will
// be used again by the asm-parser.
- if (!FLAG_stress_validate_asm &&
- (literal == nullptr || !literal->scope()->ContainsAsmModule())) {
- info->ResetCharacterStream();
+ if (info->contains_asm_module()) {
+ if (FLAG_stress_validate_asm) return;
+ if (literal != nullptr && literal->scope()->ContainsAsmModule()) return;
}
+ info->ResetCharacterStream();
}
void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
@@ -469,9 +495,13 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
// Initialize parser state.
- DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
+ DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info(),
+ Scope::DeserializationMode::kIncludingVariables);
scanner_.Initialize();
+ if (FLAG_harmony_hashbang && !info->is_eval()) {
+ scanner_.SkipHashBang();
+ }
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
@@ -481,7 +511,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
const char* event_name = "parse-eval";
- Script* script = *info->script();
+ Script script = *info->script();
int start = -1;
int end = -1;
if (!info->is_eval()) {
@@ -524,78 +554,80 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
scope->set_start_position(0);
FunctionState function_state(&function_state_, &scope_, scope);
- ZonePtrList<Statement>* body =
- new (zone()) ZonePtrList<Statement>(16, zone());
- bool ok = true;
+ ScopedPtrList<Statement> body(pointer_buffer());
int beg_pos = scanner()->location().beg_pos;
if (parsing_module_) {
DCHECK(info->is_module());
// Declare the special module parameter.
auto name = ast_value_factory()->empty_string();
- bool is_duplicate = false;
bool is_rest = false;
bool is_optional = false;
+ VariableMode mode = VariableMode::kVar;
+ bool was_added;
+ scope->DeclareLocal(name, mode, PARAMETER_VARIABLE, &was_added,
+ Variable::DefaultInitializationFlag(mode));
+ DCHECK(was_added);
auto var = scope->DeclareParameter(name, VariableMode::kVar, is_optional,
- is_rest, &is_duplicate,
- ast_value_factory(), beg_pos);
- DCHECK(!is_duplicate);
+ is_rest, ast_value_factory(), beg_pos);
var->AllocateTo(VariableLocation::PARAMETER, 0);
PrepareGeneratorVariables();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
- body->Add(
- factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
- zone());
-
- ParseModuleItemList(body, &ok);
- ok = ok && module()->Validate(this->scope()->AsModuleScope(),
- pending_error_handler(), zone());
+ body.Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
+
+ ParseModuleItemList(&body);
+ if (!has_error() &&
+ !module()->Validate(this->scope()->AsModuleScope(),
+ pending_error_handler(), zone())) {
+ scanner()->set_parser_error();
+ }
} else if (info->is_wrapped_as_function()) {
- ParseWrapped(isolate, info, body, scope, zone(), &ok);
+ ParseWrapped(isolate, info, &body, scope, zone());
} else {
// Don't count the mode in the use counters--give the program a chance
// to enable script-wide strict mode below.
this->scope()->SetLanguageMode(info->language_mode());
- ParseStatementList(body, Token::EOS, &ok);
+ ParseStatementList(&body, Token::EOS);
}
// The parser will peek but not consume EOS. Our scope logically goes all
// the way to the EOS, though.
- scope->set_end_position(scanner()->peek_location().beg_pos);
+ scope->set_end_position(peek_position());
- if (ok && is_strict(language_mode())) {
- CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+ if (is_strict(language_mode())) {
+ CheckStrictOctalLiteral(beg_pos, end_position());
}
- if (ok && is_sloppy(language_mode())) {
+ if (is_sloppy(language_mode())) {
// TODO(littledan): Function bindings on the global object that modify
// pre-existing bindings should be made writable, enumerable and
// nonconfigurable if possible, whereas this code will leave attributes
// unchanged if the property already exists.
InsertSloppyBlockFunctionVarBindings(scope);
}
- if (ok) {
- CheckConflictingVarDeclarations(scope, &ok);
+ // Internalize the ast strings in the case of eval so we can check for
+ // conflicting var declarations with outer scope-info-backed scopes.
+ if (info->is_eval()) {
+ DCHECK(parsing_on_main_thread_);
+ info->ast_value_factory()->Internalize(isolate);
}
+ CheckConflictingVarDeclarations(scope);
- if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
- if (body->length() != 1 ||
- !body->at(0)->IsExpressionStatement() ||
- !body->at(0)->AsExpressionStatement()->
- expression()->IsFunctionLiteral()) {
+ if (info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
+ if (body.length() != 1 || !body.at(0)->IsExpressionStatement() ||
+ !body.at(0)
+ ->AsExpressionStatement()
+ ->expression()
+ ->IsFunctionLiteral()) {
ReportMessage(MessageTemplate::kSingleFunctionLiteral);
- ok = false;
}
}
- if (ok) {
- RewriteDestructuringAssignments();
- int parameter_count = parsing_module_ ? 1 : 0;
- result = factory()->NewScriptOrEvalFunctionLiteral(
- scope, body, function_state.expected_property_count(),
- parameter_count);
- result->set_suspend_count(function_state.suspend_count());
- }
+ int parameter_count = parsing_module_ ? 1 : 0;
+ result = factory()->NewScriptOrEvalFunctionLiteral(
+ scope, body, function_state.expected_property_count(), parameter_count);
+ result->set_suspend_count(function_state.suspend_count());
}
info->set_max_function_literal_id(GetLastFunctionLiteralId());
@@ -603,6 +635,7 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
// Make sure the target stack is empty.
DCHECK_NULL(target_stack_);
+ if (has_error()) return nullptr;
return result;
}
@@ -623,8 +656,8 @@ ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
}
void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
- ZonePtrList<Statement>* body,
- DeclarationScope* outer_scope, Zone* zone, bool* ok) {
+ ScopedPtrList<Statement>* body,
+ DeclarationScope* outer_scope, Zone* zone) {
DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK(info->is_wrapped_as_function());
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
@@ -642,11 +675,11 @@ void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
FunctionLiteral* function_literal = ParseFunctionLiteral(
function_name, location, kSkipFunctionNameCheck, kNormalFunction,
kNoSourcePosition, FunctionLiteral::kWrapped, LanguageMode::kSloppy,
- arguments_for_wrapped_function, CHECK_OK_VOID);
+ arguments_for_wrapped_function);
Statement* return_statement = factory()->NewReturnStatement(
function_literal, kNoSourcePosition, kNoSourcePosition);
- body->Add(return_statement, zone);
+ body->Add(return_statement);
}
FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
@@ -660,7 +693,8 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
+ DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info(),
+ Scope::DeserializationMode::kIncludingVariables);
DCHECK_EQ(factory()->zone(), info->zone());
// Initialize parser state.
@@ -736,7 +770,6 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
is_strict(info->language_mode()));
FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
FunctionKind kind = info->function_kind();
- bool ok = true;
if (IsArrowFunction(kind)) {
if (IsAsyncFunction(kind)) {
@@ -759,70 +792,56 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
SetLanguageMode(scope, info->language_mode());
scope->set_start_position(info->start_position());
- ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
- // The outer FunctionState should not contain destructuring assignments.
- DCHECK_EQ(0,
- function_state.destructuring_assignments_to_rewrite().size());
{
+ ParameterDeclarationParsingScope formals_scope(this);
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Enter arrow function
// scope for formal parameter parsing.
BlockState block_state(&scope_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
- ParseFormalParameterList(&formals, &ok);
- if (ok) ok = Check(Token::RPAREN);
+ ParseFormalParameterList(&formals);
+ Expect(Token::RPAREN);
} else {
// BindingIdentifier
- ParseFormalParameter(&formals, &ok);
- if (ok) {
- DeclareFormalParameters(formals.scope, formals.params,
- formals.is_simple);
- }
+ ParameterParsingScope scope(impl(), &formals);
+ ParseFormalParameter(&formals);
+ DeclareFormalParameters(&formals);
}
+ formals.duplicate_loc = formals_scope.duplicate_location();
}
- if (ok) {
- if (GetLastFunctionLiteralId() != info->function_literal_id() - 1) {
- // If there were FunctionLiterals in the parameters, we need to
- // renumber them to shift down so the next function literal id for
- // the arrow function is the one requested.
- AstFunctionLiteralIdReindexer reindexer(
- stack_limit_,
- (info->function_literal_id() - 1) - GetLastFunctionLiteralId());
- for (auto p : formals.params) {
- if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
- if (p->initializer != nullptr) reindexer.Reindex(p->initializer);
+ if (GetLastFunctionLiteralId() != info->function_literal_id() - 1) {
+ if (has_error()) return nullptr;
+ // If there were FunctionLiterals in the parameters, we need to
+ // renumber them to shift down so the next function literal id for
+ // the arrow function is the one requested.
+ AstFunctionLiteralIdReindexer reindexer(
+ stack_limit_,
+ (info->function_literal_id() - 1) - GetLastFunctionLiteralId());
+ for (auto p : formals.params) {
+ if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
+ if (p->initializer() != nullptr) {
+ reindexer.Reindex(p->initializer());
}
- ResetFunctionLiteralId();
- SkipFunctionLiterals(info->function_literal_id() - 1);
}
+ ResetFunctionLiteralId();
+ SkipFunctionLiterals(info->function_literal_id() - 1);
+ }
- // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
- // not be observable, or else the preparser would have failed.
- const bool accept_IN = true;
- // Any destructuring assignments in the current FunctionState
- // actually belong to the arrow function itself.
- const int rewritable_length = 0;
- Expression* expression = ParseArrowFunctionLiteral(
- accept_IN, formals, rewritable_length, &ok);
- if (ok) {
- // Scanning must end at the same position that was recorded
- // previously. If not, parsing has been interrupted due to a stack
- // overflow, at which point the partially parsed arrow function
- // concise body happens to be a valid expression. This is a problem
- // only for arrow functions with single expression bodies, since there
- // is no end token such as "}" for normal functions.
- if (scanner()->location().end_pos == info->end_position()) {
- // The pre-parser saw an arrow function here, so the full parser
- // must produce a FunctionLiteral.
- DCHECK(expression->IsFunctionLiteral());
- result = expression->AsFunctionLiteral();
- } else {
- ok = false;
- }
- }
+ Expression* expression = ParseArrowFunctionLiteral(formals);
+ // Scanning must end at the same position that was recorded
+ // previously. If not, parsing has been interrupted due to a stack
+ // overflow, at which point the partially parsed arrow function
+ // concise body happens to be a valid expression. This is a problem
+ // only for arrow functions with single expression bodies, since there
+ // is no end token such as "}" for normal functions.
+ if (scanner()->location().end_pos == info->end_position()) {
+ // The pre-parser saw an arrow function here, so the full parser
+ // must produce a FunctionLiteral.
+ DCHECK(expression->IsFunctionLiteral());
+ result = expression->AsFunctionLiteral();
}
} else if (IsDefaultConstructor(kind)) {
DCHECK_EQ(scope(), outer);
@@ -836,15 +855,15 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
kNoSourcePosition, function_type, info->language_mode(),
- arguments_for_wrapped_function, &ok);
+ arguments_for_wrapped_function);
}
- if (ok) {
- result->set_requires_instance_fields_initializer(
- info->requires_instance_fields_initializer());
+ if (has_error()) return nullptr;
+ result->set_requires_instance_members_initializer(
+ info->requires_instance_members_initializer());
+ if (info->is_oneshot_iife()) {
+ result->mark_as_oneshot_iife();
}
- // Make sure the results agree.
- DCHECK(ok == (result != nullptr));
}
// Make sure the target stack is empty.
@@ -854,7 +873,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
return result;
}
-Statement* Parser::ParseModuleItem(bool* ok) {
+Statement* Parser::ParseModuleItem() {
// ecma262/#prod-ModuleItem
// ModuleItem :
// ImportDeclaration
@@ -864,7 +883,7 @@ Statement* Parser::ParseModuleItem(bool* ok) {
Token::Value next = peek();
if (next == Token::EXPORT) {
- return ParseExportDeclaration(ok);
+ return ParseExportDeclaration();
}
if (next == Token::IMPORT) {
@@ -873,15 +892,15 @@ Statement* Parser::ParseModuleItem(bool* ok) {
Token::Value peek_ahead = PeekAhead();
if ((!allow_harmony_dynamic_import() || peek_ahead != Token::LPAREN) &&
(!allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
- ParseImportDeclaration(CHECK_OK);
- return factory()->NewEmptyStatement(kNoSourcePosition);
+ ParseImportDeclaration();
+ return factory()->EmptyStatement();
}
}
- return ParseStatementListItem(ok);
+ return ParseStatementListItem();
}
-void Parser::ParseModuleItemList(ZonePtrList<Statement>* body, bool* ok) {
+void Parser::ParseModuleItemList(ScopedPtrList<Statement>* body) {
// ecma262/#prod-Module
// Module :
// ModuleBody?
@@ -892,24 +911,23 @@ void Parser::ParseModuleItemList(ZonePtrList<Statement>* body, bool* ok) {
DCHECK(scope()->is_module_scope());
while (peek() != Token::EOS) {
- Statement* stat = ParseModuleItem(CHECK_OK_VOID);
- if (stat && !stat->IsEmpty()) {
- body->Add(stat, zone());
- }
+ Statement* stat = ParseModuleItem();
+ if (stat == nullptr) return;
+ if (stat->IsEmptyStatement()) continue;
+ body->Add(stat);
}
}
-
-const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
+const AstRawString* Parser::ParseModuleSpecifier() {
// ModuleSpecifier :
// StringLiteral
- Expect(Token::STRING, CHECK_OK);
+ Expect(Token::STRING);
return GetSymbol();
}
ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
- Scanner::Location* reserved_loc, bool* ok) {
+ Scanner::Location* reserved_loc) {
// ExportClause :
// '{' '}'
// '{' ExportsList '}'
@@ -925,22 +943,22 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
ZoneChunkList<ExportClauseData>* export_data =
new (zone()) ZoneChunkList<ExportClauseData>(zone());
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE);
Token::Value name_tok;
while ((name_tok = peek()) != Token::RBRACE) {
// Keep track of the first reserved word encountered in case our
// caller needs to report an error.
if (!reserved_loc->IsValid() &&
- !Token::IsIdentifier(name_tok, LanguageMode::kStrict, false,
- parsing_module_)) {
+ !Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
+ parsing_module_)) {
*reserved_loc = scanner()->location();
}
- const AstRawString* local_name = ParseIdentifierName(CHECK_OK);
+ const AstRawString* local_name = ParsePropertyName();
const AstRawString* export_name = nullptr;
Scanner::Location location = scanner()->location();
- if (CheckContextualKeyword(Token::AS)) {
- export_name = ParseIdentifierName(CHECK_OK);
+ if (CheckContextualKeyword(ast_value_factory()->as_string())) {
+ export_name = ParsePropertyName();
// Set the location to the whole "a as b" string, so that it makes sense
// both for errors due to "a" and for errors due to "b".
location.end_pos = scanner()->location().end_pos;
@@ -950,15 +968,17 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
}
export_data->push_back({export_name, local_name, location});
if (peek() == Token::RBRACE) break;
- Expect(Token::COMMA, CHECK_OK);
+ if (V8_UNLIKELY(!Check(Token::COMMA))) {
+ ReportUnexpectedToken(Next());
+ break;
+ }
}
- Expect(Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE);
return export_data;
}
-ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos,
- bool* ok) {
+ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
// NamedImports :
// '{' '}'
// '{' ImportsList '}'
@@ -972,47 +992,45 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos,
// BindingIdentifier
// IdentifierName 'as' BindingIdentifier
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE);
auto result = new (zone()) ZonePtrList<const NamedImport>(1, zone());
while (peek() != Token::RBRACE) {
- const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
+ const AstRawString* import_name = ParsePropertyName();
const AstRawString* local_name = import_name;
Scanner::Location location = scanner()->location();
// In the presence of 'as', the left-side of the 'as' can
// be any IdentifierName. But without 'as', it must be a valid
// BindingIdentifier.
- if (CheckContextualKeyword(Token::AS)) {
- local_name = ParseIdentifierName(CHECK_OK);
+ if (CheckContextualKeyword(ast_value_factory()->as_string())) {
+ local_name = ParsePropertyName();
}
- if (!Token::IsIdentifier(scanner()->current_token(), LanguageMode::kStrict,
- false, parsing_module_)) {
- *ok = false;
+ if (!Token::IsValidIdentifier(scanner()->current_token(),
+ LanguageMode::kStrict, false,
+ parsing_module_)) {
ReportMessage(MessageTemplate::kUnexpectedReserved);
return nullptr;
} else if (IsEvalOrArguments(local_name)) {
- *ok = false;
ReportMessage(MessageTemplate::kStrictEvalArguments);
return nullptr;
}
DeclareVariable(local_name, VariableMode::kConst, kNeedsInitialization,
- position(), CHECK_OK);
+ position());
NamedImport* import =
new (zone()) NamedImport(import_name, local_name, location);
result->Add(import, zone());
if (peek() == Token::RBRACE) break;
- Expect(Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA);
}
- Expect(Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE);
return result;
}
-
-void Parser::ParseImportDeclaration(bool* ok) {
+void Parser::ParseImportDeclaration() {
// ImportDeclaration :
// 'import' ImportClause 'from' ModuleSpecifier ';'
// 'import' ModuleSpecifier ';'
@@ -1028,15 +1046,15 @@ void Parser::ParseImportDeclaration(bool* ok) {
// '*' 'as' ImportedBinding
int pos = peek_position();
- Expect(Token::IMPORT, CHECK_OK_VOID);
+ Expect(Token::IMPORT);
Token::Value tok = peek();
// 'import' ModuleSpecifier ';'
if (tok == Token::STRING) {
Scanner::Location specifier_loc = scanner()->peek_location();
- const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
- ExpectSemicolon(CHECK_OK_VOID);
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ ExpectSemicolon();
module()->AddEmptyImport(module_specifier, specifier_loc);
return;
}
@@ -1045,11 +1063,10 @@ void Parser::ParseImportDeclaration(bool* ok) {
const AstRawString* import_default_binding = nullptr;
Scanner::Location import_default_binding_loc;
if (tok != Token::MUL && tok != Token::LBRACE) {
- import_default_binding =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
+ import_default_binding = ParseNonRestrictedIdentifier();
import_default_binding_loc = scanner()->location();
DeclareVariable(import_default_binding, VariableMode::kConst,
- kNeedsInitialization, pos, CHECK_OK_VOID);
+ kNeedsInitialization, pos);
}
// Parse NameSpaceImport or NamedImports if present.
@@ -1060,30 +1077,28 @@ void Parser::ParseImportDeclaration(bool* ok) {
switch (peek()) {
case Token::MUL: {
Consume(Token::MUL);
- ExpectContextualKeyword(Token::AS, CHECK_OK_VOID);
- module_namespace_binding =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
+ ExpectContextualKeyword(ast_value_factory()->as_string());
+ module_namespace_binding = ParseNonRestrictedIdentifier();
module_namespace_binding_loc = scanner()->location();
DeclareVariable(module_namespace_binding, VariableMode::kConst,
- kCreatedInitialized, pos, CHECK_OK_VOID);
+ kCreatedInitialized, pos);
break;
}
case Token::LBRACE:
- named_imports = ParseNamedImports(pos, CHECK_OK_VOID);
+ named_imports = ParseNamedImports(pos);
break;
default:
- *ok = false;
ReportUnexpectedToken(scanner()->current_token());
return;
}
}
- ExpectContextualKeyword(Token::FROM, CHECK_OK_VOID);
+ ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
- const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
- ExpectSemicolon(CHECK_OK_VOID);
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ ExpectSemicolon();
// Now that we have all the information, we can make the appropriate
// declarations.
@@ -1119,42 +1134,40 @@ void Parser::ParseImportDeclaration(bool* ok) {
}
}
-
-Statement* Parser::ParseExportDefault(bool* ok) {
+Statement* Parser::ParseExportDefault() {
// Supports the following productions, starting after the 'default' token:
// 'export' 'default' HoistableDeclaration
// 'export' 'default' ClassDeclaration
// 'export' 'default' AssignmentExpression[In] ';'
- Expect(Token::DEFAULT, CHECK_OK);
+ Expect(Token::DEFAULT);
Scanner::Location default_loc = scanner()->location();
ZonePtrList<const AstRawString> local_names(1, zone());
Statement* result = nullptr;
switch (peek()) {
case Token::FUNCTION:
- result = ParseHoistableDeclaration(&local_names, true, CHECK_OK);
+ result = ParseHoistableDeclaration(&local_names, true);
break;
case Token::CLASS:
Consume(Token::CLASS);
- result = ParseClassDeclaration(&local_names, true, CHECK_OK);
+ result = ParseClassDeclaration(&local_names, true);
break;
case Token::ASYNC:
if (PeekAhead() == Token::FUNCTION &&
!scanner()->HasLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
- result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
+ result = ParseAsyncFunctionDeclaration(&local_names, true);
break;
}
V8_FALLTHROUGH;
default: {
int pos = position();
- ExpressionClassifier classifier(this);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
- ValidateExpression(CHECK_OK);
+ AcceptINScope scope(this, true);
+ Expression* value = ParseAssignmentExpression();
SetFunctionName(value, ast_value_factory()->default_string());
const AstRawString* local_name =
@@ -1163,57 +1176,98 @@ Statement* Parser::ParseExportDefault(bool* ok) {
// It's fine to declare this as VariableMode::kConst because the user has
// no way of writing to it.
- Declaration* decl =
- DeclareVariable(local_name, VariableMode::kConst, pos, CHECK_OK);
- decl->proxy()->var()->set_initializer_position(position());
+ VariableProxy* proxy =
+ DeclareVariable(local_name, VariableMode::kConst, pos);
+ proxy->var()->set_initializer_position(position());
Assignment* assignment = factory()->NewAssignment(
- Token::INIT, decl->proxy(), value, kNoSourcePosition);
+ Token::INIT, proxy, value, kNoSourcePosition);
result = IgnoreCompletion(
factory()->NewExpressionStatement(assignment, kNoSourcePosition));
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
break;
}
}
- DCHECK_EQ(local_names.length(), 1);
- module()->AddExport(local_names.first(),
- ast_value_factory()->default_string(), default_loc,
- zone());
+ if (result != nullptr) {
+ DCHECK_EQ(local_names.length(), 1);
+ module()->AddExport(local_names.first(),
+ ast_value_factory()->default_string(), default_loc,
+ zone());
+ }
- DCHECK_NOT_NULL(result);
return result;
}
-Statement* Parser::ParseExportDeclaration(bool* ok) {
+const AstRawString* Parser::NextInternalNamespaceExportName() {
+ const char* prefix = ".ns-export";
+ std::string s(prefix);
+ s.append(std::to_string(number_of_named_namespace_exports_++));
+ return ast_value_factory()->GetOneByteString(s.c_str());
+}
+
+void Parser::ParseExportStar() {
+ int pos = position();
+ Consume(Token::MUL);
+
+ if (!FLAG_harmony_namespace_exports ||
+ !PeekContextualKeyword(ast_value_factory()->as_string())) {
+ // 'export' '*' 'from' ModuleSpecifier ';'
+ Scanner::Location loc = scanner()->location();
+ ExpectContextualKeyword(ast_value_factory()->from_string());
+ Scanner::Location specifier_loc = scanner()->peek_location();
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ ExpectSemicolon();
+ module()->AddStarExport(module_specifier, loc, specifier_loc, zone());
+ return;
+ }
+ if (!FLAG_harmony_namespace_exports) return;
+
+ // 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
+ //
+ // Desugaring:
+ // export * as x from "...";
+ // ~>
+ // import * as .x from "..."; export {.x as x};
+
+ ExpectContextualKeyword(ast_value_factory()->as_string());
+ const AstRawString* export_name = ParsePropertyName();
+ Scanner::Location export_name_loc = scanner()->location();
+ const AstRawString* local_name = NextInternalNamespaceExportName();
+ Scanner::Location local_name_loc = Scanner::Location::invalid();
+ DeclareVariable(local_name, VariableMode::kConst, kCreatedInitialized, pos);
+
+ ExpectContextualKeyword(ast_value_factory()->from_string());
+ Scanner::Location specifier_loc = scanner()->peek_location();
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ ExpectSemicolon();
+
+ module()->AddStarImport(local_name, module_specifier, local_name_loc,
+ specifier_loc, zone());
+ module()->AddExport(local_name, export_name, export_name_loc, zone());
+}
+
+Statement* Parser::ParseExportDeclaration() {
// ExportDeclaration:
// 'export' '*' 'from' ModuleSpecifier ';'
+ // 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
// 'export' ExportClause ('from' ModuleSpecifier)? ';'
// 'export' VariableStatement
// 'export' Declaration
// 'export' 'default' ... (handled in ParseExportDefault)
- Expect(Token::EXPORT, CHECK_OK);
- int pos = position();
-
+ Expect(Token::EXPORT);
Statement* result = nullptr;
ZonePtrList<const AstRawString> names(1, zone());
Scanner::Location loc = scanner()->peek_location();
switch (peek()) {
case Token::DEFAULT:
- return ParseExportDefault(ok);
-
- case Token::MUL: {
- Consume(Token::MUL);
- loc = scanner()->location();
- ExpectContextualKeyword(Token::FROM, CHECK_OK);
- Scanner::Location specifier_loc = scanner()->peek_location();
- const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- module()->AddStarExport(module_specifier, loc, specifier_loc, zone());
- return factory()->NewEmptyStatement(pos);
- }
+ return ParseExportDefault();
+
+ case Token::MUL:
+ ParseExportStar();
+ return factory()->EmptyStatement();
case Token::LBRACE: {
// There are two cases here:
@@ -1229,19 +1283,18 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
ZoneChunkList<ExportClauseData>* export_data =
- ParseExportClause(&reserved_loc, CHECK_OK);
+ ParseExportClause(&reserved_loc);
const AstRawString* module_specifier = nullptr;
Scanner::Location specifier_loc;
- if (CheckContextualKeyword(Token::FROM)) {
+ if (CheckContextualKeyword(ast_value_factory()->from_string())) {
specifier_loc = scanner()->peek_location();
- module_specifier = ParseModuleSpecifier(CHECK_OK);
+ module_specifier = ParseModuleSpecifier();
} else if (reserved_loc.IsValid()) {
// No FromClause, so reserved words are invalid in ExportClause.
- *ok = false;
ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
return nullptr;
}
- ExpectSemicolon(CHECK_OK);
+ ExpectSemicolon();
if (module_specifier == nullptr) {
for (const ExportClauseData& data : *export_data) {
module()->AddExport(data.local_name, data.export_name, data.location,
@@ -1256,33 +1309,34 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
zone());
}
}
- return factory()->NewEmptyStatement(pos);
+ return factory()->EmptyStatement();
}
case Token::FUNCTION:
- result = ParseHoistableDeclaration(&names, false, CHECK_OK);
+ result = ParseHoistableDeclaration(&names, false);
break;
case Token::CLASS:
Consume(Token::CLASS);
- result = ParseClassDeclaration(&names, false, CHECK_OK);
+ result = ParseClassDeclaration(&names, false);
break;
case Token::VAR:
case Token::LET:
case Token::CONST:
- result = ParseVariableStatement(kStatementListItem, &names, CHECK_OK);
+ result = ParseVariableStatement(kStatementListItem, &names);
break;
case Token::ASYNC:
- // TODO(neis): Why don't we have the same check here as in
- // ParseStatementListItem?
Consume(Token::ASYNC);
- result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
- break;
+ if (peek() == Token::FUNCTION &&
+ !scanner()->HasLineTerminatorBeforeNext()) {
+ result = ParseAsyncFunctionDeclaration(&names, false);
+ break;
+ }
+ V8_FALLTHROUGH;
default:
- *ok = false;
ReportUnexpectedToken(scanner()->current_token());
return nullptr;
}
@@ -1293,126 +1347,117 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
descriptor->AddExport(names[i], names[i], loc, zone());
}
- DCHECK_NOT_NULL(result);
return result;
}
-VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
- VariableKind kind) {
- return scope()->NewUnresolved(factory(), name, begin_pos, kind);
-}
-
-VariableProxy* Parser::NewUnresolved(const AstRawString* name) {
- return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos);
-}
-
-Declaration* Parser::DeclareVariable(const AstRawString* name,
- VariableMode mode, int pos, bool* ok) {
+VariableProxy* Parser::DeclareVariable(const AstRawString* name,
+ VariableMode mode, int pos) {
return DeclareVariable(name, mode, Variable::DefaultInitializationFlag(mode),
- pos, ok);
+ pos);
}
-Declaration* Parser::DeclareVariable(const AstRawString* name,
- VariableMode mode, InitializationFlag init,
- int pos, bool* ok) {
+VariableProxy* Parser::DeclareVariable(const AstRawString* name,
+ VariableMode mode,
+ InitializationFlag init, int pos) {
DCHECK_NOT_NULL(name);
- VariableProxy* proxy = factory()->NewVariableProxy(
- name, NORMAL_VARIABLE, scanner()->location().beg_pos);
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(name, NORMAL_VARIABLE, position());
+ bool was_added;
+ DeclareVariable(proxy, NORMAL_VARIABLE, mode, init, scope(), &was_added, pos,
+ end_position());
+ return proxy;
+}
+
+void Parser::DeclareVariable(VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* scope, bool* was_added, int begin,
+ int end) {
Declaration* declaration;
- if (mode == VariableMode::kVar && !scope()->is_declaration_scope()) {
- DCHECK(scope()->is_block_scope() || scope()->is_with_scope());
- declaration = factory()->NewNestedVariableDeclaration(proxy, scope(), pos);
+ if (mode == VariableMode::kVar && !scope->is_declaration_scope()) {
+ DCHECK(scope->is_block_scope() || scope->is_with_scope());
+ declaration = factory()->NewNestedVariableDeclaration(scope, begin);
} else {
- declaration = factory()->NewVariableDeclaration(proxy, pos);
+ declaration = factory()->NewVariableDeclaration(begin);
}
- Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, ok, nullptr,
- scanner()->location().end_pos);
- if (!*ok) return nullptr;
- return declaration;
+ return Declare(declaration, proxy, kind, mode, init, scope, was_added, end);
}
-Variable* Parser::Declare(Declaration* declaration,
- DeclarationDescriptor::Kind declaration_kind,
- VariableMode mode, InitializationFlag init, bool* ok,
- Scope* scope, int var_end_pos) {
- if (scope == nullptr) {
- scope = this->scope();
- }
+void Parser::Declare(Declaration* declaration, VariableProxy* proxy,
+ VariableKind variable_kind, VariableMode mode,
+ InitializationFlag init, Scope* scope, bool* was_added,
+ int var_end_pos) {
+ bool local_ok = true;
bool sloppy_mode_block_scope_function_redefinition = false;
- Variable* variable = scope->DeclareVariable(
- declaration, mode, init, &sloppy_mode_block_scope_function_redefinition,
- ok);
- if (!*ok) {
+ scope->DeclareVariable(
+ declaration, proxy, mode, variable_kind, init, was_added,
+ &sloppy_mode_block_scope_function_redefinition, &local_ok);
+ if (!local_ok) {
// If we only have the start position of a proxy, we can't highlight the
// whole variable name. Pretend its length is 1 so that we highlight at
// least the first character.
- Scanner::Location loc(declaration->proxy()->position(),
- var_end_pos != kNoSourcePosition
- ? var_end_pos
- : declaration->proxy()->position() + 1);
- if (declaration_kind == DeclarationDescriptor::PARAMETER) {
+ Scanner::Location loc(proxy->position(), var_end_pos != kNoSourcePosition
+ ? var_end_pos
+ : proxy->position() + 1);
+ if (variable_kind == PARAMETER_VARIABLE) {
ReportMessageAt(loc, MessageTemplate::kParamDupe);
} else {
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
- declaration->proxy()->raw_name());
+ declaration->var()->raw_name());
}
- return nullptr;
- }
- if (sloppy_mode_block_scope_function_redefinition) {
+ } else if (sloppy_mode_block_scope_function_redefinition) {
++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
}
- return variable;
}
-Block* Parser::BuildInitializationBlock(
- DeclarationParsingResult* parsing_result,
- ZonePtrList<const AstRawString>* names, bool* ok) {
- Block* result = factory()->NewBlock(1, true);
+Statement* Parser::BuildInitializationBlock(
+ DeclarationParsingResult* parsing_result) {
+ ScopedPtrList<Statement> statements(pointer_buffer());
for (const auto& declaration : parsing_result->declarations) {
- DeclareAndInitializeVariables(result, &(parsing_result->descriptor),
- &declaration, names, CHECK_OK);
+ InitializeVariables(&statements, parsing_result->descriptor.kind,
+ &declaration);
}
- return result;
+ return factory()->NewBlock(true, statements);
}
Statement* Parser::DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int pos, bool is_sloppy_block_function,
- ZonePtrList<const AstRawString>* names,
- bool* ok) {
+ int beg_pos, int end_pos,
+ bool is_sloppy_block_function,
+ ZonePtrList<const AstRawString>* names) {
VariableProxy* proxy =
- factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, pos);
- Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, function, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
- CHECK_OK);
+ factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, beg_pos);
+ Declaration* declaration = factory()->NewFunctionDeclaration(
+ function, is_sloppy_block_function, beg_pos);
+ bool was_added;
+ Declare(declaration, proxy, NORMAL_VARIABLE, mode, kCreatedInitialized,
+ scope(), &was_added);
if (names) names->Add(variable_name, zone());
if (is_sloppy_block_function) {
SloppyBlockFunctionStatement* statement =
- factory()->NewSloppyBlockFunctionStatement();
+ factory()->NewSloppyBlockFunctionStatement(end_pos);
GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name, scope(),
statement);
return statement;
}
- return factory()->NewEmptyStatement(kNoSourcePosition);
+ return factory()->EmptyStatement();
}
Statement* Parser::DeclareClass(const AstRawString* variable_name,
Expression* value,
ZonePtrList<const AstRawString>* names,
- int class_token_pos, int end_pos, bool* ok) {
- Declaration* decl = DeclareVariable(variable_name, VariableMode::kLet,
- class_token_pos, CHECK_OK);
- decl->proxy()->var()->set_initializer_position(end_pos);
+ int class_token_pos, int end_pos) {
+ VariableProxy* proxy =
+ DeclareVariable(variable_name, VariableMode::kLet, class_token_pos);
+ proxy->var()->set_initializer_position(end_pos);
if (names) names->Add(variable_name, zone());
- Assignment* assignment = factory()->NewAssignment(Token::INIT, decl->proxy(),
- value, class_token_pos);
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, class_token_pos);
return IgnoreCompletion(
factory()->NewExpressionStatement(assignment, kNoSourcePosition));
}
-Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
+Statement* Parser::DeclareNative(const AstRawString* name, int pos) {
// Make sure that the function containing the native declaration
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
@@ -1422,18 +1467,17 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- Declaration* decl = DeclareVariable(name, VariableMode::kVar, pos, CHECK_OK);
+ VariableProxy* proxy = DeclareVariable(name, VariableMode::kVar, pos);
NativeFunctionLiteral* lit =
factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
return factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, decl->proxy(), lit,
- kNoSourcePosition),
+ factory()->NewAssignment(Token::INIT, proxy, lit, kNoSourcePosition),
pos);
}
void Parser::DeclareLabel(ZonePtrList<const AstRawString>** labels,
ZonePtrList<const AstRawString>** own_labels,
- VariableProxy* var, bool* ok) {
+ VariableProxy* var) {
DCHECK(IsIdentifier(var));
const AstRawString* label = var->raw_name();
@@ -1444,7 +1488,6 @@ void Parser::DeclareLabel(ZonePtrList<const AstRawString>** labels,
// make later anyway so we should go back and fix this then.
if (ContainsLabel(*labels, label) || TargetStackContainsLabel(label)) {
ReportMessage(MessageTemplate::kLabelRedeclaration, label);
- *ok = false;
return;
}
@@ -1464,7 +1507,7 @@ void Parser::DeclareLabel(ZonePtrList<const AstRawString>** labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- scope()->RemoveUnresolved(var);
+ scope()->DeleteUnresolved(var);
}
bool Parser::ContainsLabel(ZonePtrList<const AstRawString>* labels,
@@ -1513,16 +1556,6 @@ Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
return return_value;
}
-Expression* Parser::RewriteDoExpression(Block* body, int pos, bool* ok) {
- Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
- DoExpression* expr = factory()->NewDoExpression(body, result, pos);
- if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
- *ok = false;
- return nullptr;
- }
- return expr;
-}
-
Statement* Parser::RewriteSwitchStatement(SwitchStatement* switch_statement,
Scope* scope) {
// In order to get the CaseClauses to execute in their own lexical scope,
@@ -1561,53 +1594,34 @@ Statement* Parser::RewriteSwitchStatement(SwitchStatement* switch_statement,
return switch_block;
}
-void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
- if (catch_info->name == nullptr) {
- DCHECK_NOT_NULL(catch_info->pattern);
- catch_info->name = ast_value_factory()->dot_catch_string();
- }
- Variable* catch_variable =
- catch_info->scope->DeclareLocal(catch_info->name, VariableMode::kVar);
- if (catch_info->pattern != nullptr) {
- DeclarationDescriptor descriptor;
- descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
- descriptor.scope = scope();
- descriptor.mode = VariableMode::kLet;
- descriptor.declaration_pos = catch_info->pattern->position();
- descriptor.initialization_pos = catch_info->pattern->position();
-
- // Initializer position for variables declared by the pattern.
- const int initializer_position = position();
+Block* Parser::RewriteCatchPattern(CatchInfo* catch_info) {
+ DCHECK_NOT_NULL(catch_info->pattern);
- DeclarationParsingResult::Declaration decl(
- catch_info->pattern, initializer_position,
- factory()->NewVariableProxy(catch_variable));
+ // Initializer position for variables declared by the pattern.
+ const int initializer_position = position();
- catch_info->init_block = factory()->NewBlock(8, true);
- DeclareAndInitializeVariables(catch_info->init_block, &descriptor, &decl,
- &catch_info->bound_names, ok);
- } else {
- catch_info->bound_names.Add(catch_info->name, zone());
- }
+ DeclarationParsingResult::Declaration decl(
+ catch_info->pattern, initializer_position,
+ factory()->NewVariableProxy(catch_info->variable));
+
+ ScopedPtrList<Statement> init_statements(pointer_buffer());
+ InitializeVariables(&init_statements, NORMAL_VARIABLE, &decl);
+ return factory()->NewBlock(true, init_statements);
}
-void Parser::ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {
- // Check for `catch(e) { let e; }` and similar errors.
- Scope* inner_block_scope = catch_info.inner_block->scope();
- if (inner_block_scope != nullptr) {
- Declaration* decl = inner_block_scope->CheckLexDeclarationsConflictingWith(
- catch_info.bound_names);
- if (decl != nullptr) {
- const AstRawString* name = decl->proxy()->raw_name();
- int position = decl->proxy()->position();
+void Parser::ReportVarRedeclarationIn(const AstRawString* name, Scope* scope) {
+ for (Declaration* decl : *scope->declarations()) {
+ if (decl->var()->raw_name() == name) {
+ int position = decl->position();
Scanner::Location location =
position == kNoSourcePosition
? Scanner::Location::invalid()
- : Scanner::Location(position, position + 1);
+ : Scanner::Location(position, position + name->length());
ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
- *ok = false;
+ return;
}
}
+ UNREACHABLE();
}
Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
@@ -1647,18 +1661,17 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
}
}
-void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
- ZonePtrList<Statement>* body,
- bool* ok) {
+void Parser::ParseAndRewriteGeneratorFunctionBody(
+ int pos, FunctionKind kind, ScopedPtrList<Statement>* body) {
// For ES6 Generators, we just prepend the initial yield.
Expression* initial_yield = BuildInitialYield(pos, kind);
- body->Add(factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
- zone());
- ParseStatementList(body, Token::RBRACE, ok);
+ body->Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
+ ParseStatementList(body, Token::RBRACE);
}
void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
- int pos, FunctionKind kind, ZonePtrList<Statement>* body, bool* ok) {
+ int pos, FunctionKind kind, ScopedPtrList<Statement>* body) {
// For ES2017 Async Generators, we produce:
//
// try {
@@ -1681,58 +1694,69 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// "done" iterator result object containing a Promise-unwrapped value.
DCHECK(IsAsyncGeneratorFunction(kind));
- Block* try_block = factory()->NewBlock(3, false);
- Expression* initial_yield = BuildInitialYield(pos, kind);
- try_block->statements()->Add(
- factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
- zone());
- ParseStatementList(try_block->statements(), Token::RBRACE, ok);
- if (!*ok) return;
+ Block* try_block;
+ {
+ ScopedPtrList<Statement> statements(pointer_buffer());
+ Expression* initial_yield = BuildInitialYield(pos, kind);
+ statements.Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
+ ParseStatementList(&statements, Token::RBRACE);
+
+ // Don't create iterator result for async generators, as the resume methods
+ // will create it.
+ // TODO(leszeks): This will create another suspend point, which is
+ // unnecessary if there is already an unconditional return in the body.
+ Statement* final_return = BuildReturnStatement(
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
+ statements.Add(final_return);
- // Don't create iterator result for async generators, as the resume methods
- // will create it.
- // TODO(leszeks): This will create another suspend point, which is unnecessary
- // if there is already an unconditional return in the body.
- Statement* final_return = BuildReturnStatement(
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
- try_block->statements()->Add(final_return, zone());
+ try_block = factory()->NewBlock(false, statements);
+ }
// For AsyncGenerators, a top-level catch block will reject the Promise.
Scope* catch_scope = NewHiddenCatchScope();
- ZonePtrList<Expression>* reject_args =
- new (zone()) ZonePtrList<Expression>(2, zone());
- reject_args->Add(factory()->NewVariableProxy(
- function_state_->scope()->generator_object_var()),
- zone());
- reject_args->Add(factory()->NewVariableProxy(catch_scope->catch_variable()),
- zone());
+ Block* catch_block;
+ {
+ ScopedPtrList<Expression> reject_args(pointer_buffer());
+ reject_args.Add(factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var()));
+ reject_args.Add(factory()->NewVariableProxy(catch_scope->catch_variable()));
- Expression* reject_call = factory()->NewCallRuntime(
- Runtime::kInlineAsyncGeneratorReject, reject_args, kNoSourcePosition);
- Block* catch_block = IgnoreCompletion(
- factory()->NewReturnStatement(reject_call, kNoSourcePosition));
+ Expression* reject_call = factory()->NewCallRuntime(
+ Runtime::kInlineAsyncGeneratorReject, reject_args, kNoSourcePosition);
+ catch_block = IgnoreCompletion(
+ factory()->NewReturnStatement(reject_call, kNoSourcePosition));
+ }
- TryStatement* try_catch = factory()->NewTryCatchStatementForAsyncAwait(
- try_block, catch_scope, catch_block, kNoSourcePosition);
+ {
+ ScopedPtrList<Statement> statements(pointer_buffer());
+ TryStatement* try_catch = factory()->NewTryCatchStatementForAsyncAwait(
+ try_block, catch_scope, catch_block, kNoSourcePosition);
+ statements.Add(try_catch);
+ try_block = factory()->NewBlock(false, statements);
+ }
- try_block = factory()->NewBlock(1, false);
- try_block->statements()->Add(try_catch, zone());
+ Expression* close_call;
+ {
+ ScopedPtrList<Expression> close_args(pointer_buffer());
+ VariableProxy* call_proxy = factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var());
+ close_args.Add(call_proxy);
+ close_call = factory()->NewCallRuntime(Runtime::kInlineGeneratorClose,
+ close_args, kNoSourcePosition);
+ }
- Block* finally_block = factory()->NewBlock(1, false);
- ZonePtrList<Expression>* close_args =
- new (zone()) ZonePtrList<Expression>(1, zone());
- VariableProxy* call_proxy = factory()->NewVariableProxy(
- function_state_->scope()->generator_object_var());
- close_args->Add(call_proxy, zone());
- Expression* close_call = factory()->NewCallRuntime(
- Runtime::kInlineGeneratorClose, close_args, kNoSourcePosition);
- finally_block->statements()->Add(
- factory()->NewExpressionStatement(close_call, kNoSourcePosition), zone());
+ Block* finally_block;
+ {
+ ScopedPtrList<Statement> statements(pointer_buffer());
+ statements.Add(
+ factory()->NewExpressionStatement(close_call, kNoSourcePosition));
+ finally_block = factory()->NewBlock(false, statements);
+ }
body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
- kNoSourcePosition),
- zone());
+ kNoSourcePosition));
}
void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
@@ -1745,81 +1769,6 @@ void Parser::DeclareFunctionNameVar(const AstRawString* function_name,
}
}
-// [if (IteratorType == kNormal)]
-// !%_IsJSReceiver(result = iterator.next()) &&
-// %ThrowIteratorResultNotAnObject(result)
-// [else if (IteratorType == kAsync)]
-// !%_IsJSReceiver(result = Await(iterator.next())) &&
-// %ThrowIteratorResultNotAnObject(result)
-// [endif]
-Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
- VariableProxy* next,
- Variable* result, IteratorType type,
- int pos) {
- Expression* next_property = factory()->NewResolvedProperty(iterator, next);
- ZonePtrList<Expression>* next_arguments =
- new (zone()) ZonePtrList<Expression>(0, zone());
- Expression* next_call =
- factory()->NewCall(next_property, next_arguments, kNoSourcePosition);
- if (type == IteratorType::kAsync) {
- function_state_->AddSuspend();
- next_call = factory()->NewAwait(next_call, pos);
- }
- Expression* result_proxy = factory()->NewVariableProxy(result);
- Expression* left =
- factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
-
- // %_IsJSReceiver(...)
- ZonePtrList<Expression>* is_spec_object_args =
- new (zone()) ZonePtrList<Expression>(1, zone());
- is_spec_object_args->Add(left, zone());
- Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
-
- // %ThrowIteratorResultNotAnObject(result)
- Expression* result_proxy_again = factory()->NewVariableProxy(result);
- ZonePtrList<Expression>* throw_arguments =
- new (zone()) ZonePtrList<Expression>(1, zone());
- throw_arguments->Add(result_proxy_again, zone());
- Expression* throw_call = factory()->NewCallRuntime(
- Runtime::kThrowIteratorResultNotAnObject, throw_arguments, pos);
-
- return factory()->NewBinaryOperation(
- Token::AND,
- factory()->NewUnaryOperation(Token::NOT, is_spec_object_call, pos),
- throw_call, pos);
-}
-
-Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each,
- Expression* subject,
- Statement* body) {
- ForOfStatement* for_of = stmt->AsForOfStatement();
- if (for_of != nullptr) {
- const bool finalize = true;
- return InitializeForOfStatement(for_of, each, subject, body, finalize,
- IteratorType::kNormal, each->position());
- } else {
- if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
- Variable* temp = NewTemporary(ast_value_factory()->empty_string());
- VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- Expression* assign_each =
- RewriteDestructuringAssignment(factory()->NewAssignment(
- Token::ASSIGN, each, temp_proxy, kNoSourcePosition));
- auto block = factory()->NewBlock(2, false);
- block->statements()->Add(
- factory()->NewExpressionStatement(assign_each, kNoSourcePosition),
- zone());
- block->statements()->Add(body, zone());
- body = block;
- each = factory()->NewVariableProxy(temp);
- }
- MarkExpressionAsAssigned(each);
- stmt->AsForInStatement()->Initialize(each, subject, body);
- }
- return stmt;
-}
-
// Special case for legacy for
//
// for (var x = initializer in enumerable) body
@@ -1872,65 +1821,25 @@ Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
// }
void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
Block** body_block,
- Expression** each_variable,
- bool* ok) {
+ Expression** each_variable) {
DCHECK_EQ(1, for_info->parsing_result.declarations.size());
DeclarationParsingResult::Declaration& decl =
for_info->parsing_result.declarations[0];
Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
- auto each_initialization_block = factory()->NewBlock(1, true);
- {
- auto descriptor = for_info->parsing_result.descriptor;
- descriptor.declaration_pos = kNoSourcePosition;
- descriptor.initialization_pos = kNoSourcePosition;
- descriptor.scope = scope();
- decl.initializer = factory()->NewVariableProxy(temp);
-
- bool is_for_var_of =
- for_info->mode == ForEachStatement::ITERATE &&
- for_info->parsing_result.descriptor.mode == VariableMode::kVar;
- bool collect_names =
- IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
- is_for_var_of;
-
- DeclareAndInitializeVariables(
- each_initialization_block, &descriptor, &decl,
- collect_names ? &for_info->bound_names : nullptr, CHECK_OK_VOID);
-
- // Annex B.3.5 prohibits the form
- // `try {} catch(e) { for (var e of {}); }`
- // So if we are parsing a statement like `for (var ... of ...)`
- // we need to walk up the scope chain and look for catch scopes
- // which have a simple binding, then compare their binding against
- // all of the names declared in the init of the for-of we're
- // parsing.
- if (is_for_var_of) {
- Scope* catch_scope = scope();
- while (catch_scope != nullptr && !catch_scope->is_declaration_scope()) {
- if (catch_scope->is_catch_scope()) {
- auto name = catch_scope->catch_variable()->raw_name();
- // If it's a simple binding and the name is declared in the for loop.
- if (name != ast_value_factory()->dot_catch_string() &&
- for_info->bound_names.Contains(name)) {
- ReportMessageAt(for_info->parsing_result.bindings_loc,
- MessageTemplate::kVarRedeclaration, name);
- *ok = false;
- return;
- }
- }
- catch_scope = catch_scope->outer_scope();
- }
- }
- }
+ ScopedPtrList<Statement> each_initialization_statements(pointer_buffer());
+ decl.initializer = factory()->NewVariableProxy(temp);
+ InitializeVariables(&each_initialization_statements, NORMAL_VARIABLE, &decl);
*body_block = factory()->NewBlock(3, false);
- (*body_block)->statements()->Add(each_initialization_block, zone());
+ (*body_block)
+ ->statements()
+ ->Add(factory()->NewBlock(true, each_initialization_statements), zone());
*each_variable = factory()->NewVariableProxy(temp, for_info->position);
}
// Create a TDZ for any lexically-bound names in for in/of statements.
Block* Parser::CreateForEachStatementTDZ(Block* init_block,
- const ForInfo& for_info, bool* ok) {
+ const ForInfo& for_info) {
if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
DCHECK_NULL(init_block);
@@ -1940,142 +1849,17 @@ Block* Parser::CreateForEachStatementTDZ(Block* init_block,
// TODO(adamk): This needs to be some sort of special
// INTERNAL variable that's invisible to the debugger
// but visible to everything else.
- Declaration* tdz_decl =
- DeclareVariable(for_info.bound_names[i], VariableMode::kLet,
- kNoSourcePosition, CHECK_OK);
- tdz_decl->proxy()->var()->set_initializer_position(position());
+ VariableProxy* tdz_proxy = DeclareVariable(
+ for_info.bound_names[i], VariableMode::kLet, kNoSourcePosition);
+ tdz_proxy->var()->set_initializer_position(position());
}
}
return init_block;
}
-Statement* Parser::InitializeForOfStatement(
- ForOfStatement* for_of, Expression* each, Expression* iterable,
- Statement* body, bool finalize, IteratorType type, int next_result_pos) {
- // Create the auxiliary expressions needed for iterating over the iterable,
- // and initialize the given ForOfStatement with them.
- // If finalize is true, also instrument the loop with code that performs the
- // proper ES6 iterator finalization. In that case, the result is not
- // immediately a ForOfStatement.
- const int nopos = kNoSourcePosition;
- auto avfactory = ast_value_factory();
-
- Variable* iterator = NewTemporary(avfactory->dot_iterator_string());
- Variable* next = NewTemporary(avfactory->empty_string());
- Variable* result = NewTemporary(avfactory->dot_result_string());
- Variable* completion = NewTemporary(avfactory->empty_string());
-
- // iterator = GetIterator(iterable, type)
- Expression* assign_iterator;
- {
- assign_iterator = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(iterator),
- factory()->NewGetIterator(iterable, type, iterable->position()),
- iterable->position());
- }
-
- Expression* assign_next;
- {
- assign_next = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(next),
- factory()->NewProperty(factory()->NewVariableProxy(iterator),
- factory()->NewStringLiteral(
- avfactory->next_string(), kNoSourcePosition),
- kNoSourcePosition),
- kNoSourcePosition);
- }
-
- // [if (IteratorType == kNormal)]
- // !%_IsJSReceiver(result = iterator.next()) &&
- // %ThrowIteratorResultNotAnObject(result)
- // [else if (IteratorType == kAsync)]
- // !%_IsJSReceiver(result = Await(iterator.next())) &&
- // %ThrowIteratorResultNotAnObject(result)
- // [endif]
- Expression* next_result;
- {
- VariableProxy* iterator_proxy = factory()->NewVariableProxy(iterator);
- VariableProxy* next_proxy = factory()->NewVariableProxy(next);
- next_result = BuildIteratorNextResult(iterator_proxy, next_proxy, result,
- type, next_result_pos);
- }
-
- // result.done
- Expression* result_done;
- {
- Expression* done_literal = factory()->NewStringLiteral(
- ast_value_factory()->done_string(), kNoSourcePosition);
- Expression* result_proxy = factory()->NewVariableProxy(result);
- result_done =
- factory()->NewProperty(result_proxy, done_literal, kNoSourcePosition);
- }
-
- // result.value
- Expression* result_value;
- {
- Expression* value_literal =
- factory()->NewStringLiteral(avfactory->value_string(), nopos);
- Expression* result_proxy = factory()->NewVariableProxy(result);
- result_value = factory()->NewProperty(result_proxy, value_literal, nopos);
- }
-
- // {{tmp = #result_value, completion = kAbruptCompletion, tmp}}
- // Expression* result_value (gets overwritten)
- if (finalize) {
- Variable* tmp = NewTemporary(avfactory->empty_string());
- Expression* save_result = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(tmp), result_value, nopos);
-
- Expression* set_completion_abrupt = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(completion),
- factory()->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
-
- result_value = factory()->NewBinaryOperation(Token::COMMA, save_result,
- set_completion_abrupt, nopos);
- result_value = factory()->NewBinaryOperation(
- Token::COMMA, result_value, factory()->NewVariableProxy(tmp), nopos);
- }
-
- // each = #result_value;
- Expression* assign_each;
- {
- assign_each =
- factory()->NewAssignment(Token::ASSIGN, each, result_value, nopos);
- if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
- assign_each = RewriteDestructuringAssignment(assign_each->AsAssignment());
- }
- }
-
- // {{completion = kNormalCompletion;}}
- Statement* set_completion_normal;
- if (finalize) {
- Expression* proxy = factory()->NewVariableProxy(completion);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, proxy,
- factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
-
- set_completion_normal =
- IgnoreCompletion(factory()->NewExpressionStatement(assignment, nopos));
- }
-
- // { #loop-body; #set_completion_normal }
- // Statement* body (gets overwritten)
- if (finalize) {
- Block* block = factory()->NewBlock(2, false);
- block->statements()->Add(body, zone());
- block->statements()->Add(set_completion_normal, zone());
- body = block;
- }
-
- for_of->Initialize(body, iterator, assign_iterator, assign_next, next_result,
- result_done, assign_each);
- return finalize ? FinalizeForOfStatement(for_of, completion, type, nopos)
- : for_of;
-}
-
Statement* Parser::DesugarLexicalBindingsInForStatement(
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok) {
+ Statement* body, Scope* inner_scope, const ForInfo& for_info) {
// ES6 13.7.4.8 specifies that on each loop iteration the let variables are
// copied into a new environment. Moreover, the "next" statement must be
// evaluated not in the environment of the just completed iteration but in
@@ -2177,18 +1961,18 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// For each let variable x:
// make statement: let/const x = temp_x.
for (int i = 0; i < for_info.bound_names.length(); i++) {
- Declaration* decl = DeclareVariable(
+ VariableProxy* proxy = DeclareVariable(
for_info.bound_names[i], for_info.parsing_result.descriptor.mode,
- kNoSourcePosition, CHECK_OK);
- inner_vars.Add(decl->proxy()->var(), zone());
+ kNoSourcePosition);
+ inner_vars.Add(proxy->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment = factory()->NewAssignment(
- Token::INIT, decl->proxy(), temp_proxy, kNoSourcePosition);
+ Token::INIT, proxy, temp_proxy, kNoSourcePosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, kNoSourcePosition);
int declaration_pos = for_info.parsing_result.descriptor.declaration_pos;
DCHECK_NE(declaration_pos, kNoSourcePosition);
- decl->proxy()->var()->set_initializer_position(declaration_pos);
+ proxy->var()->set_initializer_position(declaration_pos);
ignore_completion_block->statements()->Add(assignment_statement, zone());
}
@@ -2234,7 +2018,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
if (cond) {
Statement* stop =
factory()->NewBreakStatement(outer_loop, kNoSourcePosition);
- Statement* noop = factory()->NewEmptyStatement(kNoSourcePosition);
+ Statement* noop = factory()->EmptyStatement();
ignore_completion_block->statements()->Add(
factory()->NewIfStatement(cond, noop, stop, cond->position()),
zone());
@@ -2297,7 +2081,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
Statement* stop =
factory()->NewBreakStatement(outer_loop, kNoSourcePosition);
- Statement* empty = factory()->NewEmptyStatement(kNoSourcePosition);
+ Statement* empty = factory()->EmptyStatement();
Statement* if_flag_break =
factory()->NewIfStatement(compare, stop, empty, kNoSourcePosition);
inner_block->statements()->Add(IgnoreCompletion(if_flag_break), zone());
@@ -2311,9 +2095,19 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
return outer_block;
}
+void ParserFormalParameters::ValidateDuplicate(Parser* parser) const {
+ if (has_duplicate()) {
+ parser->ReportMessageAt(duplicate_loc, MessageTemplate::kParamDupe);
+ }
+}
+void ParserFormalParameters::ValidateStrictMode(Parser* parser) const {
+ if (strict_error_loc.IsValid()) {
+ parser->ReportMessageAt(strict_error_loc, strict_error_message);
+ }
+}
+
void Parser::AddArrowFunctionFormalParameters(
- ParserFormalParameters* parameters, Expression* expr, int end_pos,
- bool* ok) {
+ ParserFormalParameters* parameters, Expression* expr, int end_pos) {
// ArrowFunctionFormals ::
// Nary(Token::COMMA, VariableProxy*, Tail)
// Binary(Token::COMMA, NonTailArrowFunctionFormals, Tail)
@@ -2339,11 +2133,11 @@ void Parser::AddArrowFunctionFormalParameters(
// the first child expression.
Expression* next = nary->first();
for (size_t i = 0; i < nary->subsequent_length(); ++i) {
- AddArrowFunctionFormalParameters(
- parameters, next, nary->subsequent_op_position(i), CHECK_OK_VOID);
+ AddArrowFunctionFormalParameters(parameters, next,
+ nary->subsequent_op_position(i));
next = nary->subsequent(i);
}
- AddArrowFunctionFormalParameters(parameters, next, end_pos, CHECK_OK_VOID);
+ AddArrowFunctionFormalParameters(parameters, next, end_pos);
return;
}
@@ -2357,8 +2151,7 @@ void Parser::AddArrowFunctionFormalParameters(
Expression* left = binop->left();
Expression* right = binop->right();
int comma_pos = binop->position();
- AddArrowFunctionFormalParameters(parameters, left, comma_pos,
- CHECK_OK_VOID);
+ AddArrowFunctionFormalParameters(parameters, left, comma_pos);
// LHS of comma expression should be unparenthesized.
expr = right;
}
@@ -2376,11 +2169,6 @@ void Parser::AddArrowFunctionFormalParameters(
Expression* initializer = nullptr;
if (expr->IsAssignment()) {
- if (expr->IsRewritableExpression()) {
- // This expression was parsed as a possible destructuring assignment.
- // Mark it as already-rewritten to avoid an unnecessary visit later.
- expr->AsRewritableExpression()->set_rewritten();
- }
Assignment* assignment = expr->AsAssignment();
DCHECK(!assignment->IsCompoundAssignment());
initializer = assignment->value();
@@ -2393,26 +2181,19 @@ void Parser::AddArrowFunctionFormalParameters(
void Parser::DeclareArrowFunctionFormalParameters(
ParserFormalParameters* parameters, Expression* expr,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- bool* ok) {
- if (expr->IsEmptyParentheses()) return;
+ const Scanner::Location& params_loc) {
+ if (expr->IsEmptyParentheses() || has_error()) return;
- AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos,
- CHECK_OK_VOID);
+ AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos);
if (parameters->arity > Code::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
- *ok = false;
return;
}
- bool has_duplicate = false;
- DeclareFormalParameters(parameters->scope, parameters->params,
- parameters->is_simple, &has_duplicate);
- if (has_duplicate) {
- *duplicate_loc = scanner()->location();
- }
- DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
+ DeclareFormalParameters(parameters);
+ DCHECK_IMPLIES(parameters->is_simple,
+ parameters->scope->has_simple_parameters());
}
void Parser::PrepareGeneratorVariables() {
@@ -2428,7 +2209,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -2493,18 +2274,16 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// parenthesis before the function means that it will be called
// immediately). bar can be parsed lazily, but we need to parse it in a mode
// that tracks unresolved variables.
- DCHECK_IMPLIES(parse_lazily(), FLAG_lazy);
- DCHECK_IMPLIES(parse_lazily(), allow_lazy_);
+ DCHECK_IMPLIES(parse_lazily(), info()->allow_lazy_compile());
+ DCHECK_IMPLIES(parse_lazily(), has_error() || allow_lazy_);
DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
const bool is_lazy =
eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
const bool is_top_level = AllowsLazyParsingWithoutUnresolvedVariables();
+ const bool is_eager_top_level_function = !is_lazy && is_top_level;
const bool is_lazy_top_level_function = is_lazy && is_top_level;
const bool is_lazy_inner_function = is_lazy && !is_top_level;
- const bool is_expression =
- function_type == FunctionLiteral::kAnonymousExpression ||
- function_type == FunctionLiteral::kNamedExpression;
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
@@ -2531,22 +2310,28 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Inner functions will be parsed using a temporary Zone. After parsing, we
// will migrate unresolved variable into a Scope in the main Zone.
- const bool should_preparse_inner =
- parse_lazily() && FLAG_lazy_inner_functions && is_lazy_inner_function &&
- (!is_expression || FLAG_aggressive_lazy_inner_functions);
+ const bool should_preparse_inner = parse_lazily() && is_lazy_inner_function;
+
+ // If parallel compile tasks are enabled, and the function is an eager
+ // top level function, then we can pre-parse the function and parse / compile
+ // in a parallel task on a worker thread.
+ bool should_post_parallel_task =
+ parse_lazily() && is_eager_top_level_function &&
+ FLAG_parallel_compile_tasks && info()->parallel_tasks() &&
+ scanner()->stream()->can_be_cloned_for_parallel_access();
// This may be modified later to reflect preparsing decision taken
- bool should_preparse =
- (parse_lazily() && is_lazy_top_level_function) || should_preparse_inner;
+ bool should_preparse = (parse_lazily() && is_lazy_top_level_function) ||
+ should_preparse_inner || should_post_parallel_task;
- ZonePtrList<Statement>* body = nullptr;
+ ScopedPtrList<Statement> body(pointer_buffer());
int expected_property_count = -1;
int suspend_count = -1;
int num_parameters = -1;
int function_length = -1;
bool has_duplicate_parameters = false;
int function_literal_id = GetNextFunctionLiteralId();
- ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
+ ProducedPreparseData* produced_preparse_data = nullptr;
// This Scope lives in the main zone. We'll migrate data into that zone later.
Zone* parse_zone = should_preparse ? &preparser_zone_ : zone();
@@ -2556,7 +2341,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->SetScopeName(function_name);
#endif
- if (!is_wrapped) Expect(Token::LPAREN, CHECK_OK);
+ if (!is_wrapped && V8_UNLIKELY(!Check(Token::LPAREN))) {
+ ReportUnexpectedToken(Next());
+ return nullptr;
+ }
scope->set_start_position(position());
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
@@ -2565,15 +2353,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// which case the parser is expected to have backtracked), or if we didn't
// try to lazy parse in the first place, we'll have to parse eagerly.
bool did_preparse_successfully =
- should_preparse &&
- SkipFunction(function_name, kind, function_type, scope, &num_parameters,
- &produced_preparsed_scope_data, is_lazy_inner_function,
- is_lazy_top_level_function, &eager_compile_hint, CHECK_OK);
+ should_preparse && SkipFunction(function_name, kind, function_type, scope,
+ &num_parameters, &produced_preparse_data);
+
if (!did_preparse_successfully) {
- body = ParseFunction(
- function_name, pos, kind, function_type, scope, &num_parameters,
- &function_length, &has_duplicate_parameters, &expected_property_count,
- &suspend_count, arguments_for_wrapped_function, CHECK_OK);
+ // If skipping aborted, it rewound the scanner until before the LPAREN.
+ // Consume it in that case.
+ if (should_preparse) Consume(Token::LPAREN);
+ should_post_parallel_task = false;
+ ParseFunction(&body, function_name, pos, kind, function_type, scope,
+ &num_parameters, &function_length, &has_duplicate_parameters,
+ &expected_property_count, &suspend_count,
+ arguments_for_wrapped_function);
}
if (V8_UNLIKELY(FLAG_log_function_events)) {
@@ -2589,16 +2380,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name->byte_length());
}
if (V8_UNLIKELY(FLAG_runtime_stats) && did_preparse_successfully) {
- const RuntimeCallCounterId counters[2][2] = {
- {RuntimeCallCounterId::kPreParseBackgroundNoVariableResolution,
- RuntimeCallCounterId::kPreParseNoVariableResolution},
- {RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
- RuntimeCallCounterId::kPreParseWithVariableResolution}};
+ const RuntimeCallCounterId counters[2] = {
+ RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
+ RuntimeCallCounterId::kPreParseWithVariableResolution};
if (runtime_call_stats_) {
- bool tracked_variables =
- PreParser::ShouldTrackUnresolvedVariables(is_lazy_top_level_function);
runtime_call_stats_->CorrectCurrentCounterId(
- counters[tracked_variables][parsing_on_main_thread_]);
+ counters[parsing_on_main_thread_]);
}
}
@@ -2606,13 +2393,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// since the function can declare itself strict.
language_mode = scope->language_mode();
CheckFunctionName(language_mode, function_name, function_name_validity,
- function_name_location, CHECK_OK);
+ function_name_location);
if (is_strict(language_mode)) {
- CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
- CHECK_OK);
+ CheckStrictOctalLiteral(scope->start_position(), scope->end_position());
}
- CheckConflictingVarDeclarations(scope, CHECK_OK);
+ CheckConflictingVarDeclarations(scope);
FunctionLiteral::ParameterFlag duplicate_parameters =
has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
@@ -2622,23 +2408,25 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, scope, body, expected_property_count, num_parameters,
function_length, duplicate_parameters, function_type, eager_compile_hint,
- pos, true, function_literal_id, produced_preparsed_scope_data);
+ pos, true, function_literal_id, produced_preparse_data);
function_literal->set_function_token_position(function_token_pos);
function_literal->set_suspend_count(suspend_count);
+ if (should_post_parallel_task) {
+ // Start a parallel parse / compile task on the compiler dispatcher.
+ info()->parallel_tasks()->Enqueue(info(), function_name, function_literal);
+ }
+
if (should_infer_name) {
fni_.AddFunction(function_literal);
}
return function_literal;
}
-bool Parser::SkipFunction(
- const AstRawString* function_name, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, int* num_parameters,
- ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort,
- FunctionLiteral::EagerCompileHint* hint, bool* ok) {
+bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreparseData** produced_preparse_data) {
FunctionState function_state(&function_state_, &scope_, function_scope);
function_scope->set_zone(&preparser_zone_);
@@ -2649,23 +2437,23 @@ bool Parser::SkipFunction(
scanner()->current_token() == Token::ARROW);
// FIXME(marja): There are 2 ways to skip functions now. Unify them.
- if (consumed_preparsed_scope_data_) {
- DCHECK(FLAG_preparser_scope_analysis);
+ if (consumed_preparse_data_) {
int end_position;
LanguageMode language_mode;
int num_inner_functions;
bool uses_super_property;
- *produced_preparsed_scope_data =
- consumed_preparsed_scope_data_->GetDataForSkippableFunction(
+ if (stack_overflow()) return true;
+ *produced_preparse_data =
+ consumed_preparse_data_->GetDataForSkippableFunction(
main_zone(), function_scope->start_position(), &end_position,
num_parameters, &num_inner_functions, &uses_super_property,
&language_mode);
- function_scope->outer_scope()->SetMustUsePreParsedScopeData();
+ function_scope->outer_scope()->SetMustUsePreparseData();
function_scope->set_is_skipped_function(true);
function_scope->set_end_position(end_position);
scanner()->SeekForward(end_position - 1);
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
+ Expect(Token::RBRACE);
SetLanguageMode(function_scope, language_mode);
if (uses_super_property) {
function_scope->RecordSuperPropertyUsage();
@@ -2676,53 +2464,47 @@ bool Parser::SkipFunction(
}
Scanner::BookmarkScope bookmark(scanner());
- bookmark.Set();
+ bookmark.Set(function_scope->start_position());
// With no cached data, we partially parse the function, without building an
// AST. This gathers the data needed to build a lazy function.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
- // Aborting inner function preparsing would leave scopes in an inconsistent
- // state; we don't parse inner functions in the abortable mode anyway.
- DCHECK(!is_inner_function || !may_abort);
-
PreParser::PreParseResult result = reusable_preparser()->PreParseFunction(
- function_name, kind, function_type, function_scope, is_inner_function,
- may_abort, use_counts_, produced_preparsed_scope_data, this->script_id());
-
- // Return immediately if pre-parser decided to abort parsing.
- if (result == PreParser::kPreParseAbort) {
- bookmark.Apply();
- function_scope->ResetAfterPreparsing(ast_value_factory(), true);
- *hint = FunctionLiteral::kShouldEagerCompile;
- return false;
- }
+ function_name, kind, function_type, function_scope, use_counts_,
+ produced_preparse_data, this->script_id());
if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
set_stack_overflow();
- *ok = false;
- } else if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
+ } else if (pending_error_handler()->has_error_unidentifiable_by_preparser()) {
+ // Make sure we don't re-preparse inner functions of the aborted function.
+ // The error might be in an inner function.
+ allow_lazy_ = false;
+ mode_ = PARSE_EAGERLY;
+ DCHECK(!pending_error_handler()->stack_overflow());
// If we encounter an error that the preparser can not identify we reset to
// the state before preparsing. The caller may then fully parse the function
// to identify the actual error.
bookmark.Apply();
function_scope->ResetAfterPreparsing(ast_value_factory(), true);
- pending_error_handler()->ResetUnidentifiableError();
+ pending_error_handler()->clear_unidentifiable_error();
return false;
} else if (pending_error_handler()->has_pending_error()) {
- *ok = false;
+ DCHECK(!pending_error_handler()->stack_overflow());
+ DCHECK(has_error());
} else {
+ DCHECK(!pending_error_handler()->stack_overflow());
set_allow_eval_cache(reusable_preparser()->allow_eval_cache());
PreParserLogger* logger = reusable_preparser()->logger();
function_scope->set_end_position(logger->end());
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
+ Expect(Token::RBRACE);
total_preparse_skipped_ +=
function_scope->end_position() - function_scope->start_position();
*num_parameters = logger->num_parameters();
SkipFunctionLiterals(logger->num_inner_functions());
- function_scope->AnalyzePartially(factory());
+ function_scope->AnalyzePartially(this, factory());
}
return true;
@@ -2734,7 +2516,7 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var,
// throw /* type error kNonCoercible) */;
auto source_position = pattern->position();
const AstRawString* property = ast_value_factory()->empty_string();
- MessageTemplate::Template msg = MessageTemplate::kNonCoercible;
+ MessageTemplate msg = MessageTemplate::kNonCoercible;
for (ObjectLiteralProperty* literal_property : *pattern->properties()) {
Expression* key = literal_property->key();
if (key->IsPropertyName()) {
@@ -2759,7 +2541,7 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var,
IfStatement* if_statement = factory()->NewIfStatement(
condition,
factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
- factory()->NewEmptyStatement(kNoSourcePosition), kNoSourcePosition);
+ factory()->EmptyStatement(), kNoSourcePosition);
return if_statement;
}
@@ -2774,13 +2556,6 @@ class InitializerRewriter final
// called by the base class (template).
friend class AstTraversalVisitor<InitializerRewriter>;
- // Just rewrite destructuring assignments wrapped in RewritableExpressions.
- void VisitRewritableExpression(RewritableExpression* to_rewrite) {
- if (to_rewrite->is_rewritten()) return;
- parser_->RewriteDestructuringAssignment(to_rewrite);
- AstTraversalVisitor::VisitRewritableExpression(to_rewrite);
- }
-
// Code in function literals does not need to be eagerly rewritten, it will be
// rewritten when scheduled.
void VisitFunctionLiteral(FunctionLiteral* expr) {}
@@ -2789,60 +2564,58 @@ class InitializerRewriter final
};
void Parser::RewriteParameterInitializer(Expression* expr) {
+ if (has_error()) return;
InitializerRewriter rewriter(stack_limit_, expr, this);
rewriter.Run();
}
-
Block* Parser::BuildParameterInitializationBlock(
- const ParserFormalParameters& parameters, bool* ok) {
+ const ParserFormalParameters& parameters) {
DCHECK(!parameters.is_simple);
DCHECK(scope()->is_function_scope());
DCHECK_EQ(scope(), parameters.scope);
- Block* init_block = factory()->NewBlock(parameters.num_parameters(), true);
+ ScopedPtrList<Statement> init_statements(pointer_buffer());
int index = 0;
for (auto parameter : parameters.params) {
- DeclarationDescriptor descriptor;
- descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
- descriptor.scope = scope();
- descriptor.mode = VariableMode::kLet;
- descriptor.declaration_pos = parameter->pattern->position();
- // The position that will be used by the AssignmentExpression
- // which copies from the temp parameter to the pattern.
- //
- // TODO(adamk): Should this be kNoSourcePosition, since
- // it's just copying from a temp var to the real param var?
- descriptor.initialization_pos = parameter->pattern->position();
Expression* initial_value =
factory()->NewVariableProxy(parameters.scope->parameter(index));
- if (parameter->initializer != nullptr) {
+ if (parameter->initializer() != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
- // Ensure initializer is rewritten
- RewriteParameterInitializer(parameter->initializer);
+ if (parameter->initializer()->IsClassLiteral()) {
+ // Initializers could have their own scopes. So set the scope
+ // here if necessary.
+ BlockState block_state(
+ &scope_, parameter->initializer()->AsClassLiteral()->scope());
+
+ // Ensure initializer is rewritten
+ RewriteParameterInitializer(parameter->initializer());
+ } else {
+ // Ensure initializer is rewritten
+ RewriteParameterInitializer(parameter->initializer());
+ }
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(index)),
factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
- initial_value = factory()->NewConditional(
- condition, parameter->initializer, initial_value, kNoSourcePosition);
- descriptor.initialization_pos = parameter->initializer->position();
+ initial_value =
+ factory()->NewConditional(condition, parameter->initializer(),
+ initial_value, kNoSourcePosition);
}
Scope* param_scope = scope();
- Block* param_block = init_block;
+ ScopedPtrList<Statement>* param_init_statements = &init_statements;
+
+ base::Optional<ScopedPtrList<Statement>> non_simple_param_init_statements;
if (!parameter->is_simple() &&
scope()->AsDeclarationScope()->calls_sloppy_eval()) {
param_scope = NewVarblockScope();
- param_scope->set_start_position(descriptor.initialization_pos);
+ param_scope->set_start_position(parameter->pattern->position());
param_scope->set_end_position(parameter->initializer_end_position);
param_scope->RecordEvalCall();
- param_block = factory()->NewBlock(8, true);
- param_block->set_scope(param_scope);
- // Pass the appropriate scope in so that PatternRewriter can appropriately
- // rewrite inner initializers of the pattern to param_scope
- descriptor.scope = param_scope;
+ non_simple_param_init_statements.emplace(pointer_buffer());
+ param_init_statements = &non_simple_param_init_statements.value();
// Rewrite the outer initializer to point to param_scope
ReparentExpressionScope(stack_limit(), initial_value, param_scope);
}
@@ -2850,122 +2623,68 @@ Block* Parser::BuildParameterInitializationBlock(
BlockState block_state(&scope_, param_scope);
DeclarationParsingResult::Declaration decl(
parameter->pattern, parameter->initializer_end_position, initial_value);
- DeclareAndInitializeVariables(param_block, &descriptor, &decl, nullptr,
- CHECK_OK);
- if (param_block != init_block) {
+ InitializeVariables(param_init_statements, PARAMETER_VARIABLE, &decl);
+
+ if (param_init_statements != &init_statements) {
+ DCHECK_EQ(param_init_statements,
+ &non_simple_param_init_statements.value());
+ Block* param_block =
+ factory()->NewBlock(true, *non_simple_param_init_statements);
+ non_simple_param_init_statements.reset();
+ param_block->set_scope(param_scope);
param_scope = param_scope->FinalizeBlockScope();
- if (param_scope != nullptr) {
- CheckConflictingVarDeclarations(param_scope, CHECK_OK);
- }
- init_block->statements()->Add(param_block, zone());
+ init_statements.Add(param_block);
}
++index;
}
- return init_block;
+ return factory()->NewBlock(true, init_statements);
}
Scope* Parser::NewHiddenCatchScope() {
Scope* catch_scope = NewScopeWithParent(scope(), CATCH_SCOPE);
+ bool was_added;
catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(),
- VariableMode::kVar);
+ VariableMode::kVar, NORMAL_VARIABLE, &was_added);
+ DCHECK(was_added);
catch_scope->set_is_hidden();
return catch_scope;
}
Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
- // .promise = %AsyncFunctionPromiseCreate();
// try {
// <inner_block>
// } catch (.catch) {
- // %RejectPromise(.promise, .catch);
- // return .promise;
- // } finally {
- // %AsyncFunctionPromiseRelease(.promise);
+ // return %_AsyncFunctionReject(.generator_object, .catch, can_suspend);
// }
- Block* result = factory()->NewBlock(2, true);
-
- // .promise = %AsyncFunctionPromiseCreate();
- Statement* set_promise;
- {
- Expression* create_promise = factory()->NewCallRuntime(
- Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
- new (zone()) ZonePtrList<Expression>(0, zone()), kNoSourcePosition);
- Assignment* assign_promise = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(PromiseVariable()),
- create_promise, kNoSourcePosition);
- set_promise =
- factory()->NewExpressionStatement(assign_promise, kNoSourcePosition);
- }
- result->statements()->Add(set_promise, zone());
+ Block* result = factory()->NewBlock(1, true);
- // catch (.catch) { return %RejectPromise(.promise, .catch), .promise }
+ // catch (.catch) {
+ // return %_AsyncFunctionReject(.generator_object, .catch, can_suspend)
+ // }
Scope* catch_scope = NewHiddenCatchScope();
- Expression* promise_reject = BuildRejectPromise(
- factory()->NewVariableProxy(catch_scope->catch_variable()),
- kNoSourcePosition);
+ Expression* reject_promise;
+ {
+ ScopedPtrList<Expression> args(pointer_buffer());
+ args.Add(factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var()));
+ args.Add(factory()->NewVariableProxy(catch_scope->catch_variable()));
+ args.Add(factory()->NewBooleanLiteral(function_state_->CanSuspend(),
+ kNoSourcePosition));
+ reject_promise = factory()->NewCallRuntime(
+ Runtime::kInlineAsyncFunctionReject, args, kNoSourcePosition);
+ }
Block* catch_block = IgnoreCompletion(
- factory()->NewReturnStatement(promise_reject, kNoSourcePosition));
+ factory()->NewReturnStatement(reject_promise, kNoSourcePosition));
TryStatement* try_catch_statement =
factory()->NewTryCatchStatementForAsyncAwait(
inner_block, catch_scope, catch_block, kNoSourcePosition);
-
- // There is no TryCatchFinally node, so wrap it in an outer try/finally
- Block* outer_try_block = IgnoreCompletion(try_catch_statement);
-
- // finally { %AsyncFunctionPromiseRelease(.promise, can_suspend) }
- Block* finally_block;
- {
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
- args->Add(factory()->NewBooleanLiteral(function_state_->CanSuspend(),
- kNoSourcePosition),
- zone());
- Expression* call_promise_release = factory()->NewCallRuntime(
- Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, args, kNoSourcePosition);
- Statement* promise_release = factory()->NewExpressionStatement(
- call_promise_release, kNoSourcePosition);
- finally_block = IgnoreCompletion(promise_release);
- }
-
- Statement* try_finally_statement = factory()->NewTryFinallyStatement(
- outer_try_block, finally_block, kNoSourcePosition);
-
- result->statements()->Add(try_finally_statement, zone());
+ result->statements()->Add(try_catch_statement, zone());
return result;
}
-Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
- // %promise_internal_reject(.promise, value, false), .promise
- // Disables the additional debug event for the rejection since a debug event
- // already happened for the exception that got us here.
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(3, zone());
- args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
- args->Add(value, zone());
- args->Add(factory()->NewBooleanLiteral(false, pos), zone());
- Expression* call_runtime =
- factory()->NewCallRuntime(Runtime::kInlineRejectPromise, args, pos);
- return factory()->NewBinaryOperation(
- Token::COMMA, call_runtime,
- factory()->NewVariableProxy(PromiseVariable()), pos);
-}
-
-Variable* Parser::PromiseVariable() {
- // Based on the various compilation paths, there are many different code
- // paths which may be the first to access the Promise temporary. Whichever
- // comes first should create it and stash it in the FunctionState.
- Variable* promise = function_state_->scope()->promise_var();
- if (promise == nullptr) {
- promise = function_state_->scope()->DeclarePromiseVar(
- ast_value_factory()->dot_promise_string());
- }
- return promise;
-}
-
Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
Expression* yield_result = factory()->NewVariableProxy(
function_state_->scope()->generator_object_var());
@@ -2977,22 +2696,19 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
Suspend::kOnExceptionThrow);
}
-ZonePtrList<Statement>* Parser::ParseFunction(
- const AstRawString* function_name, int pos, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
+void Parser::ParseFunction(
+ ScopedPtrList<Statement>* body, const AstRawString* function_name, int pos,
+ FunctionKind kind, FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
bool* has_duplicate_parameters, int* expected_property_count,
int* suspend_count,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionState function_state(&function_state_, &scope_, function_scope);
bool is_wrapped = function_type == FunctionLiteral::kWrapped;
- DuplicateFinder duplicate_finder;
- ExpressionClassifier formals_classifier(this, &duplicate_finder);
-
int expected_parameters_end_pos = parameters_end_pos_;
if (expected_parameters_end_pos != kNoSourcePosition) {
// This is the first function encountered in a CreateDynamicFunction eval.
@@ -3004,173 +2720,147 @@ ZonePtrList<Statement>* Parser::ParseFunction(
ParserFormalParameters formals(function_scope);
- if (is_wrapped) {
- // For a function implicitly wrapped in function header and footer, the
- // function arguments are provided separately to the source, and are
- // declared directly here.
- int arguments_length = arguments_for_wrapped_function->length();
- for (int i = 0; i < arguments_length; i++) {
- const bool is_rest = false;
- Expression* argument = ExpressionFromIdentifier(
- arguments_for_wrapped_function->at(i), kNoSourcePosition);
- AddFormalParameter(&formals, argument, NullExpression(),
- kNoSourcePosition, is_rest);
- }
- DCHECK_EQ(arguments_length, formals.num_parameters());
- DeclareFormalParameters(formals.scope, formals.params, formals.is_simple);
- } else {
- // For a regular function, the function arguments are parsed from source.
- DCHECK_NULL(arguments_for_wrapped_function);
- ParseFormalParameterList(&formals, CHECK_OK);
- if (expected_parameters_end_pos != kNoSourcePosition) {
- // Check for '(' or ')' shenanigans in the parameter string for dynamic
- // functions.
- int position = peek_position();
- if (position < expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(position, position + 1),
- MessageTemplate::kArgStringTerminatesParametersEarly);
- *ok = false;
- return nullptr;
- } else if (position > expected_parameters_end_pos) {
- ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
- expected_parameters_end_pos),
- MessageTemplate::kUnexpectedEndOfArgString);
- *ok = false;
- return nullptr;
+ {
+ ParameterDeclarationParsingScope formals_scope(this);
+ if (is_wrapped) {
+ // For a function implicitly wrapped in function header and footer, the
+ // function arguments are provided separately to the source, and are
+ // declared directly here.
+ int arguments_length = arguments_for_wrapped_function->length();
+ for (int i = 0; i < arguments_length; i++) {
+ const bool is_rest = false;
+ Expression* argument = ExpressionFromIdentifier(
+ arguments_for_wrapped_function->at(i), kNoSourcePosition);
+ AddFormalParameter(&formals, argument, NullExpression(),
+ kNoSourcePosition, is_rest);
}
- }
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
+ DCHECK_EQ(arguments_length, formals.num_parameters());
+ DeclareFormalParameters(&formals);
+ } else {
+ // For a regular function, the function arguments are parsed from source.
+ DCHECK_NULL(arguments_for_wrapped_function);
+ ParseFormalParameterList(&formals);
+ if (expected_parameters_end_pos != kNoSourcePosition) {
+ // Check for '(' or ')' shenanigans in the parameter string for dynamic
+ // functions.
+ int position = peek_position();
+ if (position < expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(position, position + 1),
+ MessageTemplate::kArgStringTerminatesParametersEarly);
+ return;
+ } else if (position > expected_parameters_end_pos) {
+ ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
+ expected_parameters_end_pos),
+ MessageTemplate::kUnexpectedEndOfArgString);
+ return;
+ }
+ }
+ Expect(Token::RPAREN);
+ int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(formals.arity, kind, formals.has_rest,
- function_scope->start_position(),
- formals_end_position, CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ function_scope->start_position(),
+ formals_end_position);
+ Expect(Token::LBRACE);
+ }
+ formals.duplicate_loc = formals_scope.duplicate_location();
}
+
*num_parameters = formals.num_parameters();
*function_length = formals.function_length;
- ZonePtrList<Statement>* body = new (zone()) ZonePtrList<Statement>(8, zone());
+ AcceptINScope scope(this, true);
ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
- FunctionBodyType::kBlock, true, ok);
+ FunctionBodyType::kBlock);
- // Validate parameter names. We can do this only after parsing the function,
- // since the function can declare itself strict.
- const bool allow_duplicate_parameters =
- is_sloppy(function_scope->language_mode()) && formals.is_simple &&
- !IsConciseMethod(kind);
- ValidateFormalParameters(function_scope->language_mode(),
- allow_duplicate_parameters, CHECK_OK);
-
- RewriteDestructuringAssignments();
-
- *has_duplicate_parameters =
- !classifier()->is_valid_formal_parameter_list_without_duplicates();
+ *has_duplicate_parameters = formals.has_duplicate();
*expected_property_count = function_state.expected_property_count();
*suspend_count = function_state.suspend_count();
- return body;
}
void Parser::DeclareClassVariable(const AstRawString* name,
- ClassInfo* class_info, int class_token_pos,
- bool* ok) {
+ ClassInfo* class_info, int class_token_pos) {
#ifdef DEBUG
scope()->SetScopeName(name);
#endif
if (name != nullptr) {
- VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, class_token_pos);
- class_info->variable = Declare(
- declaration, DeclarationDescriptor::NORMAL, VariableMode::kConst,
- Variable::DefaultInitializationFlag(VariableMode::kConst), ok);
+ VariableProxy* proxy =
+ DeclareVariable(name, VariableMode::kConst, class_token_pos);
+ class_info->variable = proxy->var();
}
}
// TODO(gsathya): Ideally, this should just bypass scope analysis and
// allocate a slot directly on the context. We should just store this
// index in the AST, instead of storing the variable.
-Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name,
- bool* ok) {
- VariableProxy* proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, kNoSourcePosition);
- Variable* var = Declare(
- declaration, DeclarationDescriptor::NORMAL, VariableMode::kConst,
- Variable::DefaultInitializationFlag(VariableMode::kConst), CHECK_OK);
- var->ForceContextAllocation();
- return var;
+Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
+ VariableProxy* proxy =
+ DeclareVariable(name, VariableMode::kConst, kNoSourcePosition);
+ proxy->var()->ForceContextAllocation();
+ return proxy->var();
}
-// This method declares a property of the given class. It updates the
-// following fields of class_info, as appropriate:
-// - constructor
-// - properties
-void Parser::DeclareClassProperty(const AstRawString* class_name,
- ClassLiteralProperty* property,
- const AstRawString* property_name,
- ClassLiteralProperty::Kind kind,
- bool is_static, bool is_constructor,
- bool is_computed_name, ClassInfo* class_info,
- bool* ok) {
- if (is_constructor) {
- DCHECK(!class_info->constructor);
- class_info->constructor = property->value()->AsFunctionLiteral();
- DCHECK_NOT_NULL(class_info->constructor);
- class_info->constructor->set_raw_name(
- class_name != nullptr ? ast_value_factory()->NewConsString(class_name)
- : nullptr);
- return;
- }
-
- if (kind != ClassLiteralProperty::PUBLIC_FIELD &&
- kind != ClassLiteralProperty::PRIVATE_FIELD) {
- class_info->properties->Add(property, zone());
- return;
- }
-
+void Parser::DeclareClassField(ClassLiteralProperty* property,
+ const AstRawString* property_name,
+ bool is_static, bool is_computed_name,
+ bool is_private, ClassInfo* class_info) {
DCHECK(allow_harmony_public_fields() || allow_harmony_private_fields());
if (is_static) {
- DCHECK(allow_harmony_static_fields());
- DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
}
+ DCHECK_IMPLIES(is_computed_name, !is_private);
if (is_computed_name) {
- DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
// We create a synthetic variable name here so that scope
// analysis doesn't dedupe the vars.
- Variable* computed_name_var = CreateSyntheticContextVariable(
- ClassFieldVariableName(ast_value_factory(),
- class_info->computed_field_count),
- CHECK_OK_VOID);
+ Variable* computed_name_var =
+ CreateSyntheticContextVariable(ClassFieldVariableName(
+ ast_value_factory(), class_info->computed_field_count));
property->set_computed_name_var(computed_name_var);
class_info->properties->Add(property, zone());
+ } else if (is_private) {
+ Variable* private_name_var = CreateSyntheticContextVariable(property_name);
+ private_name_var->set_initializer_position(property->value()->position());
+ property->set_private_name_var(private_name_var);
+ class_info->properties->Add(property, zone());
}
+}
- if (kind == ClassLiteralProperty::PRIVATE_FIELD) {
- Variable* private_field_name_var =
- CreateSyntheticContextVariable(property_name, CHECK_OK_VOID);
- property->set_private_field_name_var(private_field_name_var);
- class_info->properties->Add(property, zone());
+// This method declares a property of the given class. It updates the
+// following fields of class_info, as appropriate:
+// - constructor
+// - properties
+void Parser::DeclareClassProperty(const AstRawString* class_name,
+ ClassLiteralProperty* property,
+ bool is_constructor, ClassInfo* class_info) {
+ if (is_constructor) {
+ DCHECK(!class_info->constructor);
+ class_info->constructor = property->value()->AsFunctionLiteral();
+ DCHECK_NOT_NULL(class_info->constructor);
+ class_info->constructor->set_raw_name(
+ class_name != nullptr ? ast_value_factory()->NewConsString(class_name)
+ : nullptr);
+ return;
}
+
+ class_info->properties->Add(property, zone());
}
FunctionLiteral* Parser::CreateInitializerFunction(
const char* name, DeclarationScope* scope,
ZonePtrList<ClassLiteral::Property>* fields) {
DCHECK_EQ(scope->function_kind(),
- FunctionKind::kClassFieldsInitializerFunction);
+ FunctionKind::kClassMembersInitializerFunction);
// function() { .. class fields initializer .. }
- ZonePtrList<Statement>* statements = NewStatementList(1);
- InitializeClassFieldsStatement* static_fields =
- factory()->NewInitializeClassFieldsStatement(fields, kNoSourcePosition);
- statements->Add(static_fields, zone());
+ ScopedPtrList<Statement> statements(pointer_buffer());
+ InitializeClassMembersStatement* static_fields =
+ factory()->NewInitializeClassMembersStatement(fields, kNoSourcePosition);
+ statements.Add(static_fields);
return factory()->NewFunctionLiteral(
ast_value_factory()->GetOneByteString(name), scope, statements, 0, 0, 0,
FunctionLiteral::kNoDuplicateParameters,
@@ -3190,7 +2880,7 @@ FunctionLiteral* Parser::CreateInitializerFunction(
Expression* Parser::RewriteClassLiteral(Scope* block_scope,
const AstRawString* name,
ClassInfo* class_info, int pos,
- int end_pos, bool* ok) {
+ int end_pos) {
DCHECK_NOT_NULL(block_scope);
DCHECK_EQ(block_scope->scope_type(), BLOCK_SCOPE);
DCHECK_EQ(block_scope->language_mode(), LanguageMode::kStrict);
@@ -3214,18 +2904,18 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
class_info->static_fields);
}
- FunctionLiteral* instance_fields_initializer_function = nullptr;
- if (class_info->has_instance_class_fields) {
- instance_fields_initializer_function = CreateInitializerFunction(
- "<instance_fields_initializer>", class_info->instance_fields_scope,
+ FunctionLiteral* instance_members_initializer_function = nullptr;
+ if (class_info->has_instance_members) {
+ instance_members_initializer_function = CreateInitializerFunction(
+ "<instance_members_initializer>", class_info->instance_members_scope,
class_info->instance_fields);
- class_info->constructor->set_requires_instance_fields_initializer(true);
+ class_info->constructor->set_requires_instance_members_initializer(true);
}
ClassLiteral* class_literal = factory()->NewClassLiteral(
block_scope, class_info->variable, class_info->extends,
class_info->constructor, class_info->properties,
- static_fields_initializer, instance_fields_initializer_function, pos,
+ static_fields_initializer, instance_members_initializer_function, pos,
end_pos, class_info->has_name_static_property,
class_info->has_static_computed_names, class_info->is_anonymous);
@@ -3233,18 +2923,18 @@ Expression* Parser::RewriteClassLiteral(Scope* block_scope,
return class_literal;
}
-void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
+void Parser::CheckConflictingVarDeclarations(Scope* scope) {
+ if (has_error()) return;
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != nullptr) {
// In ES6, conflicting variable bindings are early errors.
- const AstRawString* name = decl->proxy()->raw_name();
- int position = decl->proxy()->position();
+ const AstRawString* name = decl->var()->raw_name();
+ int position = decl->position();
Scanner::Location location =
position == kNoSourcePosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
- *ok = false;
}
}
@@ -3255,7 +2945,7 @@ bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
if (!property->key()->IsVariableProxy()) return false;
VariableProxy* key = property->key()->AsVariableProxy();
- return key->is_private_field();
+ return key->IsPrivateName();
}
void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
@@ -3267,11 +2957,11 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
DCHECK(function_scope->is_function_scope());
BlockState block_state(&scope_, inner_scope);
for (Declaration* decl : *inner_scope->declarations()) {
- if (decl->proxy()->var()->mode() != VariableMode::kVar ||
+ if (decl->var()->mode() != VariableMode::kVar ||
!decl->IsVariableDeclaration()) {
continue;
}
- const AstRawString* name = decl->proxy()->raw_name();
+ const AstRawString* name = decl->var()->raw_name();
Variable* parameter = function_scope->LookupLocal(name);
if (parameter == nullptr) continue;
VariableProxy* to = NewUnresolved(name);
@@ -3305,9 +2995,7 @@ bool Parser::TargetStackContainsLabel(const AstRawString* label) {
return false;
}
-
-BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
- bool* ok) {
+BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label) {
bool anonymous = label == nullptr;
for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
BreakableStatement* stat = t->statement();
@@ -3319,9 +3007,7 @@ BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
return nullptr;
}
-
-IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
- bool* ok) {
+IterationStatement* Parser::LookupContinueTarget(const AstRawString* label) {
bool anonymous = label == nullptr;
for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
IterationStatement* stat = t->statement()->AsIterationStatement();
@@ -3336,7 +3022,6 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
return nullptr;
}
-
void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
Handle<String> source_url = scanner_.SourceUrl(isolate);
if (!source_url.is_null()) {
@@ -3444,59 +3129,57 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
factory()->NewGetTemplateObject(cooked_strings, raw_strings, pos);
// Call TagFn
- ZonePtrList<Expression>* call_args =
- new (zone()) ZonePtrList<Expression>(expressions->length() + 1, zone());
- call_args->Add(template_object, zone());
- call_args->AddAll(*expressions, zone());
+ ScopedPtrList<Expression> call_args(pointer_buffer());
+ call_args.Add(template_object);
+ call_args.AddAll(*expressions);
return factory()->NewTaggedTemplate(tag, call_args, pos);
}
}
namespace {
-bool OnlyLastArgIsSpread(ZonePtrList<Expression>* args) {
- for (int i = 0; i < args->length() - 1; i++) {
- if (args->at(i)->IsSpread()) {
+bool OnlyLastArgIsSpread(const ScopedPtrList<Expression>& args) {
+ for (int i = 0; i < args.length() - 1; i++) {
+ if (args.at(i)->IsSpread()) {
return false;
}
}
- return args->at(args->length() - 1)->IsSpread();
+ return args.at(args.length() - 1)->IsSpread();
}
} // namespace
ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
- ZonePtrList<Expression>* list) {
+ const ScopedPtrList<Expression>& list) {
// If there's only a single spread argument, a fast path using CallWithSpread
// is taken.
- DCHECK_LT(1, list->length());
+ DCHECK_LT(1, list.length());
// The arguments of the spread call become a single ArrayLiteral.
int first_spread = 0;
- for (; first_spread < list->length() && !list->at(first_spread)->IsSpread();
+ for (; first_spread < list.length() && !list.at(first_spread)->IsSpread();
++first_spread) {
}
- DCHECK_LT(first_spread, list->length());
+ DCHECK_LT(first_spread, list.length());
return factory()->NewArrayLiteral(list, first_spread, kNoSourcePosition);
}
Expression* Parser::SpreadCall(Expression* function,
- ZonePtrList<Expression>* args_list, int pos,
- Call::PossiblyEval is_possibly_eval) {
+ const ScopedPtrList<Expression>& args_list,
+ int pos, Call::PossiblyEval is_possibly_eval) {
// Handle this case in BytecodeGenerator.
if (OnlyLastArgIsSpread(args_list) || function->IsSuperCallReference()) {
return factory()->NewCall(function, args_list, pos);
}
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(3, zone());
+ ScopedPtrList<Expression> args(pointer_buffer());
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
Expression* home = ThisExpression(kNoSourcePosition);
- args->Add(function, zone());
- args->Add(home, zone());
+ args.Add(function);
+ args.Add(home);
} else {
Variable* temp = NewTemporary(ast_value_factory()->empty_string());
VariableProxy* obj = factory()->NewVariableProxy(temp);
@@ -3504,29 +3187,29 @@ Expression* Parser::SpreadCall(Expression* function,
Token::ASSIGN, obj, function->AsProperty()->obj(), kNoSourcePosition);
function = factory()->NewProperty(
assign_obj, function->AsProperty()->key(), kNoSourcePosition);
- args->Add(function, zone());
+ args.Add(function);
obj = factory()->NewVariableProxy(temp);
- args->Add(obj, zone());
+ args.Add(obj);
}
} else {
// Non-method calls
- args->Add(function, zone());
- args->Add(factory()->NewUndefinedLiteral(kNoSourcePosition), zone());
+ args.Add(function);
+ args.Add(factory()->NewUndefinedLiteral(kNoSourcePosition));
}
- args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
+ args.Add(ArrayLiteralFromListWithSpread(args_list));
return factory()->NewCallRuntime(Context::REFLECT_APPLY_INDEX, args, pos);
}
Expression* Parser::SpreadCallNew(Expression* function,
- ZonePtrList<Expression>* args_list, int pos) {
+ const ScopedPtrList<Expression>& args_list,
+ int pos) {
if (OnlyLastArgIsSpread(args_list)) {
// Handle in BytecodeGenerator.
return factory()->NewCallNew(function, args_list, pos);
}
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(2, zone());
- args->Add(function, zone());
- args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
+ ScopedPtrList<Expression> args(pointer_buffer());
+ args.Add(function);
+ args.Add(ArrayLiteralFromListWithSpread(args_list));
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
@@ -3549,27 +3232,34 @@ void Parser::SetAsmModule() {
// incremented after parsing is done.
++use_counts_[v8::Isolate::kUseAsm];
DCHECK(scope()->is_declaration_scope());
- scope()->AsDeclarationScope()->set_asm_module();
+ scope()->AsDeclarationScope()->set_is_asm_module();
+ info_->set_contains_asm_module(true);
}
-Expression* Parser::ExpressionListToExpression(ZonePtrList<Expression>* args) {
- Expression* expr = args->at(0);
- for (int i = 1; i < args->length(); ++i) {
- expr = factory()->NewBinaryOperation(Token::COMMA, expr, args->at(i),
- expr->position());
+Expression* Parser::ExpressionListToExpression(
+ const ScopedPtrList<Expression>& args) {
+ Expression* expr = args.at(0);
+ if (args.length() == 1) return expr;
+ if (args.length() == 2) {
+ return factory()->NewBinaryOperation(Token::COMMA, expr, args.at(1),
+ args.at(1)->position());
+ }
+ NaryOperation* result =
+ factory()->NewNaryOperation(Token::COMMA, expr, args.length() - 1);
+ for (int i = 1; i < args.length(); i++) {
+ result->AddSubsequent(args.at(i), args.at(i)->position());
}
- return expr;
+ return result;
}
// This method completes the desugaring of the body of async_function.
-void Parser::RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
- Block* block, Expression* return_value,
- bool* ok) {
+void Parser::RewriteAsyncFunctionBody(ScopedPtrList<Statement>* body,
+ Block* block, Expression* return_value) {
// function async_function() {
- // .generator_object = %CreateJSGeneratorObject();
+ // .generator_object = %_AsyncFunctionEnter();
// BuildRejectPromiseOnException({
// ... block ...
- // return %ResolvePromise(.promise, expr), .promise;
+ // return %_AsyncFunctionResolve(.generator_object, expr);
// })
// }
@@ -3577,40 +3267,13 @@ void Parser::RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
return_value, return_value->position()),
zone());
block = BuildRejectPromiseOnException(block);
- body->Add(block, zone());
-}
-
-void Parser::RewriteDestructuringAssignments() {
- const auto& assignments =
- function_state_->destructuring_assignments_to_rewrite();
- auto it = assignments.rbegin();
- for (; it != assignments.rend(); ++it) {
- // Rewrite list in reverse, so that nested assignment patterns are rewritten
- // correctly.
- RewritableExpression* to_rewrite = *it;
- DCHECK_NOT_NULL(to_rewrite);
- if (!to_rewrite->is_rewritten()) {
- // Since this function is called at the end of parsing the program,
- // pair.scope may already have been removed by FinalizeBlockScope in the
- // meantime.
- Scope* scope = to_rewrite->scope()->GetUnremovedScope();
- // Scope at the time of the rewriting and the original parsing
- // should be in the same function.
- DCHECK(scope->GetClosureScope() == scope_->GetClosureScope());
- BlockState block_state(&scope_, scope);
- RewriteDestructuringAssignment(to_rewrite);
- }
- }
-}
-
-void Parser::QueueDestructuringAssignmentForRewriting(
- RewritableExpression* expr) {
- function_state_->AddDestructuringAssignment(expr);
+ body->Add(block);
}
void Parser::SetFunctionNameFromPropertyName(LiteralProperty* property,
const AstRawString* name,
const AstRawString* prefix) {
+ if (has_error()) return;
// Ensure that the function we are going to create has shared name iff
// we are not going to set it later.
if (property->NeedsSetFunctionName()) {
@@ -3636,7 +3299,7 @@ void Parser::SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
// See ES #sec-__proto__-property-names-in-object-initializers.
- if (property->IsPrototype()) return;
+ if (property->IsPrototype() || has_error()) return;
DCHECK(!property->value()->IsAnonymousFunctionDefinition() ||
property->kind() == ObjectLiteralProperty::COMPUTED);
@@ -3691,440 +3354,10 @@ Statement* Parser::CheckCallable(Variable* var, Expression* error, int pos) {
Statement* throw_call = factory()->NewExpressionStatement(error, pos);
validate_var = factory()->NewIfStatement(
- condition, factory()->NewEmptyStatement(nopos), throw_call, nopos);
+ condition, factory()->EmptyStatement(), throw_call, nopos);
}
return validate_var;
}
-void Parser::BuildIteratorClose(ZonePtrList<Statement>* statements,
- Variable* iterator, Variable* input,
- Variable* var_output, IteratorType type) {
- //
- // This function adds four statements to [statements], corresponding to the
- // following code:
- //
- // let iteratorReturn = iterator.return;
- // if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
- // return {value: input, done: true};
- // }
- // output = %_Call(iteratorReturn, iterator, input);
- // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
- //
-
- const int nopos = kNoSourcePosition;
-
- // let iteratorReturn = iterator.return;
- Variable* var_return = var_output; // Reusing the output variable.
- Statement* get_return;
- {
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- Expression* literal = factory()->NewStringLiteral(
- ast_value_factory()->return_string(), nopos);
- Expression* property =
- factory()->NewProperty(iterator_proxy, literal, nopos);
- Expression* return_proxy = factory()->NewVariableProxy(var_return);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, return_proxy, property, nopos);
- get_return = factory()->NewExpressionStatement(assignment, nopos);
- }
-
- // if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
- // return {value: input, done: true};
- // }
- Statement* check_return;
- {
- Expression* condition = factory()->NewCompareOperation(
- Token::EQ, factory()->NewVariableProxy(var_return),
- factory()->NewNullLiteral(nopos), nopos);
-
- Expression* value = factory()->NewVariableProxy(input);
-
- Statement* return_input = BuildReturnStatement(value, nopos);
-
- check_return = factory()->NewIfStatement(
- condition, return_input, factory()->NewEmptyStatement(nopos), nopos);
- }
-
- // output = %_Call(iteratorReturn, iterator, input);
- Statement* call_return;
- {
- auto args = new (zone()) ZonePtrList<Expression>(3, zone());
- args->Add(factory()->NewVariableProxy(var_return), zone());
- args->Add(factory()->NewVariableProxy(iterator), zone());
- args->Add(factory()->NewVariableProxy(input), zone());
-
- Expression* call =
- factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- if (type == IteratorType::kAsync) {
- function_state_->AddSuspend();
- call = factory()->NewAwait(call, nopos);
- }
- Expression* output_proxy = factory()->NewVariableProxy(var_output);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
- call_return = factory()->NewExpressionStatement(assignment, nopos);
- }
-
- // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
- Statement* validate_output;
- {
- Expression* is_receiver_call;
- {
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(var_output), zone());
- is_receiver_call =
- factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
- }
-
- Statement* throw_call;
- {
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(var_output), zone());
- Expression* call = factory()->NewCallRuntime(
- Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory()->NewExpressionStatement(call, nopos);
- }
-
- validate_output = factory()->NewIfStatement(
- is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
- nopos);
- }
-
- statements->Add(get_return, zone());
- statements->Add(check_return, zone());
- statements->Add(call_return, zone());
- statements->Add(validate_output, zone());
-}
-
-void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
- Variable* iter, Block* iterator_use,
- Block* target, IteratorType type) {
- //
- // This function adds two statements to [target], corresponding to the
- // following code:
- //
- // completion = kNormalCompletion;
- // try {
- // try {
- // iterator_use
- // } catch(e) {
- // if (completion === kAbruptCompletion) completion = kThrowCompletion;
- // %ReThrow(e);
- // }
- // } finally {
- // if (condition) {
- // #BuildIteratorCloseForCompletion(iter, completion)
- // }
- // }
- //
-
- const int nopos = kNoSourcePosition;
-
- // completion = kNormalCompletion;
- Statement* initialize_completion;
- {
- Expression* proxy = factory()->NewVariableProxy(completion);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, proxy,
- factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- initialize_completion =
- factory()->NewExpressionStatement(assignment, nopos);
- }
-
- // if (completion === kAbruptCompletion) completion = kThrowCompletion;
- Statement* set_completion_throw;
- {
- Expression* condition = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(completion),
- factory()->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
-
- Expression* proxy = factory()->NewVariableProxy(completion);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, proxy,
- factory()->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
- Statement* statement = factory()->NewExpressionStatement(assignment, nopos);
- set_completion_throw = factory()->NewIfStatement(
- condition, statement, factory()->NewEmptyStatement(nopos), nopos);
- }
-
- // if (condition) {
- // #BuildIteratorCloseForCompletion(iter, completion)
- // }
- Block* maybe_close;
- {
- Block* block = factory()->NewBlock(2, true);
- Expression* proxy = factory()->NewVariableProxy(completion);
- BuildIteratorCloseForCompletion(block->statements(), iter, proxy, type);
- DCHECK_EQ(block->statements()->length(), 2);
-
- maybe_close = IgnoreCompletion(factory()->NewIfStatement(
- condition, block, factory()->NewEmptyStatement(nopos), nopos));
- }
-
- // try { #try_block }
- // catch(e) {
- // #set_completion_throw;
- // %ReThrow(e);
- // }
- Statement* try_catch;
- {
- Scope* catch_scope = NewHiddenCatchScope();
-
- Statement* rethrow;
- // We use %ReThrow rather than the ordinary throw because we want to
- // preserve the original exception message. This is also why we create a
- // TryCatchStatementForReThrow below (which does not clear the pending
- // message), rather than a TryCatchStatement.
- {
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(catch_scope->catch_variable()),
- zone());
- rethrow = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kReThrow, args, nopos), nopos);
- }
-
- Block* catch_block = factory()->NewBlock(2, false);
- catch_block->statements()->Add(set_completion_throw, zone());
- catch_block->statements()->Add(rethrow, zone());
-
- try_catch = factory()->NewTryCatchStatementForReThrow(
- iterator_use, catch_scope, catch_block, nopos);
- }
-
- // try { #try_catch } finally { #maybe_close }
- Statement* try_finally;
- {
- Block* try_block = factory()->NewBlock(1, false);
- try_block->statements()->Add(try_catch, zone());
-
- try_finally =
- factory()->NewTryFinallyStatement(try_block, maybe_close, nopos);
- }
-
- target->statements()->Add(initialize_completion, zone());
- target->statements()->Add(try_finally, zone());
-}
-
-void Parser::BuildIteratorCloseForCompletion(ZonePtrList<Statement>* statements,
- Variable* iterator,
- Expression* completion,
- IteratorType type) {
- //
- // This function adds two statements to [statements], corresponding to the
- // following code:
- //
- // let iteratorReturn = iterator.return;
- // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
- // if (completion === kThrowCompletion) {
- // if (!IS_CALLABLE(iteratorReturn)) {
- // throw MakeTypeError(kReturnMethodNotCallable);
- // }
- // [if (IteratorType == kAsync)]
- // try { Await(%_Call(iteratorReturn, iterator) } catch (_) { }
- // [else]
- // try { %_Call(iteratorReturn, iterator) } catch (_) { }
- // [endif]
- // } else {
- // [if (IteratorType == kAsync)]
- // let output = Await(%_Call(iteratorReturn, iterator));
- // [else]
- // let output = %_Call(iteratorReturn, iterator);
- // [endif]
- // if (!IS_RECEIVER(output)) {
- // %ThrowIterResultNotAnObject(output);
- // }
- // }
- // }
- //
-
- const int nopos = kNoSourcePosition;
- // let iteratorReturn = iterator.return;
- Variable* var_return = NewTemporary(ast_value_factory()->empty_string());
- Statement* get_return;
- {
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- Expression* literal = factory()->NewStringLiteral(
- ast_value_factory()->return_string(), nopos);
- Expression* property =
- factory()->NewProperty(iterator_proxy, literal, nopos);
- Expression* return_proxy = factory()->NewVariableProxy(var_return);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, return_proxy, property, nopos);
- get_return = factory()->NewExpressionStatement(assignment, nopos);
- }
-
- // if (!IS_CALLABLE(iteratorReturn)) {
- // throw MakeTypeError(kReturnMethodNotCallable);
- // }
- Statement* check_return_callable;
- {
- Expression* throw_expr =
- NewThrowTypeError(MessageTemplate::kReturnMethodNotCallable,
- ast_value_factory()->empty_string(), nopos);
- check_return_callable = CheckCallable(var_return, throw_expr, nopos);
- }
-
- // try { %_Call(iteratorReturn, iterator) } catch (_) { }
- Statement* try_call_return;
- {
- auto args = new (zone()) ZonePtrList<Expression>(2, zone());
- args->Add(factory()->NewVariableProxy(var_return), zone());
- args->Add(factory()->NewVariableProxy(iterator), zone());
-
- Expression* call =
- factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
-
- if (type == IteratorType::kAsync) {
- function_state_->AddSuspend();
- call = factory()->NewAwait(call, nopos);
- }
-
- Block* try_block = factory()->NewBlock(1, false);
- try_block->statements()->Add(factory()->NewExpressionStatement(call, nopos),
- zone());
-
- Block* catch_block = factory()->NewBlock(0, false);
- try_call_return =
- factory()->NewTryCatchStatement(try_block, nullptr, catch_block, nopos);
- }
-
- // let output = %_Call(iteratorReturn, iterator);
- // if (!IS_RECEIVER(output)) {
- // %ThrowIteratorResultNotAnObject(output);
- // }
- Block* validate_return;
- {
- Variable* var_output = NewTemporary(ast_value_factory()->empty_string());
- Statement* call_return;
- {
- auto args = new (zone()) ZonePtrList<Expression>(2, zone());
- args->Add(factory()->NewVariableProxy(var_return), zone());
- args->Add(factory()->NewVariableProxy(iterator), zone());
- Expression* call =
- factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- if (type == IteratorType::kAsync) {
- function_state_->AddSuspend();
- call = factory()->NewAwait(call, nopos);
- }
-
- Expression* output_proxy = factory()->NewVariableProxy(var_output);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
- call_return = factory()->NewExpressionStatement(assignment, nopos);
- }
-
- Expression* is_receiver_call;
- {
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(var_output), zone());
- is_receiver_call =
- factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
- }
-
- Statement* throw_call;
- {
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(factory()->NewVariableProxy(var_output), zone());
- Expression* call = factory()->NewCallRuntime(
- Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory()->NewExpressionStatement(call, nopos);
- }
-
- Statement* check_return = factory()->NewIfStatement(
- is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
- nopos);
-
- validate_return = factory()->NewBlock(2, false);
- validate_return->statements()->Add(call_return, zone());
- validate_return->statements()->Add(check_return, zone());
- }
-
- // if (completion === kThrowCompletion) {
- // #check_return_callable;
- // #try_call_return;
- // } else {
- // #validate_return;
- // }
- Statement* call_return_carefully;
- {
- Expression* condition = factory()->NewCompareOperation(
- Token::EQ_STRICT, completion,
- factory()->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
-
- Block* then_block = factory()->NewBlock(2, false);
- then_block->statements()->Add(check_return_callable, zone());
- then_block->statements()->Add(try_call_return, zone());
-
- call_return_carefully = factory()->NewIfStatement(condition, then_block,
- validate_return, nopos);
- }
-
- // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
- Statement* maybe_call_return;
- {
- Expression* condition = factory()->NewCompareOperation(
- Token::EQ, factory()->NewVariableProxy(var_return),
- factory()->NewNullLiteral(nopos), nopos);
-
- maybe_call_return = factory()->NewIfStatement(
- condition, factory()->NewEmptyStatement(nopos), call_return_carefully,
- nopos);
- }
-
- statements->Add(get_return, zone());
- statements->Add(maybe_call_return, zone());
-}
-
-Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
- Variable* var_completion,
- IteratorType type, int pos) {
- //
- // This function replaces the loop with the following wrapping:
- //
- // completion = kNormalCompletion;
- // try {
- // try {
- // #loop;
- // } catch(e) {
- // if (completion === kAbruptCompletion) completion = kThrowCompletion;
- // %ReThrow(e);
- // }
- // } finally {
- // if (!(completion === kNormalCompletion)) {
- // #BuildIteratorCloseForCompletion(#iterator, completion)
- // }
- // }
- //
- // Note that the loop's body and its assign_each already contain appropriate
- // assignments to completion (see InitializeForOfStatement).
- //
-
- const int nopos = kNoSourcePosition;
-
- // !(completion === kNormalCompletion)
- Expression* closing_condition;
- {
- Expression* cmp = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(var_completion),
- factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- closing_condition = factory()->NewUnaryOperation(Token::NOT, cmp, nopos);
- }
-
- Block* final_loop = factory()->NewBlock(2, false);
- {
- Block* try_block = factory()->NewBlock(1, false);
- try_block->statements()->Add(loop, zone());
-
- FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
- try_block, final_loop, type);
- }
-
- return final_loop;
-}
-
-#undef CHECK_OK
-#undef CHECK_OK_VOID
-#undef CHECK_FAILED
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 35de0656d3..cc0ceb2607 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -16,6 +16,7 @@
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
+#include "src/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -24,12 +25,12 @@ class ScriptCompiler;
namespace internal {
-class ConsumedPreParsedScopeData;
+class ConsumedPreparseData;
class ParseInfo;
class ParserTarget;
class ParserTargetScope;
class PendingCompilationErrorHandler;
-class PreParsedScopeData;
+class PreparseData;
class FunctionEntry {
public:
@@ -83,33 +84,55 @@ class Parser;
struct ParserFormalParameters : FormalParametersBase {
struct Parameter : public ZoneObject {
- Parameter(const AstRawString* name, Expression* pattern,
- Expression* initializer, int position,
+ Parameter(Expression* pattern, Expression* initializer, int position,
int initializer_end_position, bool is_rest)
- : name(name),
+ : initializer_and_is_rest(initializer, is_rest),
pattern(pattern),
- initializer(initializer),
position(position),
- initializer_end_position(initializer_end_position),
- is_rest(is_rest) {}
- const AstRawString* name;
+ initializer_end_position(initializer_end_position) {}
+
+ PointerWithPayload<Expression, bool, 1> initializer_and_is_rest;
+
Expression* pattern;
- Expression* initializer;
+ Expression* initializer() const {
+ return initializer_and_is_rest.GetPointer();
+ }
int position;
int initializer_end_position;
- bool is_rest;
+ inline bool is_rest() const { return initializer_and_is_rest.GetPayload(); }
+
Parameter* next_parameter = nullptr;
bool is_simple() const {
- return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
+ return pattern->IsVariableProxy() && initializer() == nullptr &&
+ !is_rest();
+ }
+
+ const AstRawString* name() const {
+ DCHECK(is_simple());
+ return pattern->AsVariableProxy()->raw_name();
}
Parameter** next() { return &next_parameter; }
Parameter* const* next() const { return &next_parameter; }
};
+ void set_strict_parameter_error(const Scanner::Location& loc,
+ MessageTemplate message) {
+ strict_error_loc = loc;
+ strict_error_message = message;
+ }
+
+ bool has_duplicate() const { return duplicate_loc.IsValid(); }
+ void ValidateDuplicate(Parser* parser) const;
+ void ValidateStrictMode(Parser* parser) const;
+
explicit ParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
+
base::ThreadedList<Parameter> params;
+ Scanner::Location duplicate_loc = Scanner::Location::invalid();
+ Scanner::Location strict_error_loc = Scanner::Location::invalid();
+ MessageTemplate strict_error_message = MessageTemplate::kNone;
};
template <>
@@ -118,34 +141,32 @@ struct ParserTypes<Parser> {
typedef Parser Impl;
// Return types for traversing functions.
- typedef const AstRawString* Identifier;
- typedef v8::internal::Expression* Expression;
- typedef v8::internal::FunctionLiteral* FunctionLiteral;
- typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef v8::internal::Block* Block;
+ typedef v8::internal::BreakableStatement* BreakableStatement;
typedef ClassLiteral::Property* ClassLiteralProperty;
- typedef v8::internal::Suspend* Suspend;
- typedef v8::internal::RewritableExpression* RewritableExpression;
- typedef ZonePtrList<v8::internal::Expression>* ExpressionList;
- typedef ZonePtrList<ObjectLiteral::Property>* ObjectPropertyList;
typedef ZonePtrList<ClassLiteral::Property>* ClassPropertyList;
+ typedef v8::internal::Expression* Expression;
+ typedef ScopedPtrList<v8::internal::Expression> ExpressionList;
typedef ParserFormalParameters FormalParameters;
- typedef v8::internal::Statement* Statement;
- typedef ZonePtrList<v8::internal::Statement>* StatementList;
- typedef v8::internal::Block* Block;
- typedef v8::internal::BreakableStatement* BreakableStatement;
typedef v8::internal::ForStatement* ForStatement;
+ typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef const AstRawString* Identifier;
typedef v8::internal::IterationStatement* IterationStatement;
- typedef v8::internal::FuncNameInferrer FuncNameInferrer;
- typedef v8::internal::SourceRange SourceRange;
- typedef v8::internal::SourceRangeScope SourceRangeScope;
+ typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef ScopedPtrList<v8::internal::ObjectLiteralProperty> ObjectPropertyList;
+ typedef v8::internal::Statement* Statement;
+ typedef ScopedPtrList<v8::internal::Statement> StatementList;
+ typedef v8::internal::Suspend* Suspend;
// For constructing objects returned by the traversing functions.
typedef AstNodeFactory Factory;
+ // Other implementation-specific functions.
+ typedef v8::internal::FuncNameInferrer FuncNameInferrer;
+ typedef v8::internal::SourceRange SourceRange;
+ typedef v8::internal::SourceRangeScope SourceRangeScope;
typedef ParserTarget Target;
typedef ParserTargetScope TargetScope;
-
- static constexpr bool ExpressionClassifierReportErrors = true;
};
class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
@@ -173,7 +194,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// their corresponding scope infos. Therefore, looking up variables in the
// deserialized scopes is not possible.
void DeserializeScopeChain(Isolate* isolate, ParseInfo* info,
- MaybeHandle<ScopeInfo> maybe_outer_scope_info);
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Scope::DeserializationMode mode =
+ Scope::DeserializationMode::kScopesOnly);
// Move statistics to Isolate
void UpdateStatistics(Isolate* isolate, Handle<Script> script);
@@ -181,8 +204,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
private:
friend class ParserBase<Parser>;
- friend class v8::internal::ExpressionClassifierErrorTracker<
- ParserTypes<Parser>>;
+ friend struct ParserFormalParameters;
+ friend class i::ExpressionScope<ParserTypes<Parser>>;
+ friend class i::VariableDeclarationParsingScope<ParserTypes<Parser>>;
+ friend class i::ParameterDeclarationParsingScope<ParserTypes<Parser>>;
+ friend class i::ArrowHeadParsingScope<ParserTypes<Parser>>;
friend bool v8::internal::parsing::ParseProgram(ParseInfo*, Isolate*);
friend bool v8::internal::parsing::ParseFunction(
ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*);
@@ -236,8 +262,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// We manually construct the AST and scopes for a top-level function and the
// function wrapper.
void ParseWrapped(Isolate* isolate, ParseInfo* info,
- ZonePtrList<Statement>* body, DeclarationScope* scope,
- Zone* zone, bool* ok);
+ ScopedPtrList<Statement>* body, DeclarationScope* scope,
+ Zone* zone);
ZonePtrList<const AstRawString>* PrepareWrappedArguments(Isolate* isolate,
ParseInfo* info,
@@ -251,31 +277,33 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
parsing_module_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_public_fields);
SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
SET_ALLOW(harmony_private_fields);
+ SET_ALLOW(harmony_private_methods);
SET_ALLOW(eval_cache);
#undef SET_ALLOW
+ preparse_data_buffer_.reserve(128);
}
return reusable_preparser_;
}
- void ParseModuleItemList(ZonePtrList<Statement>* body, bool* ok);
- Statement* ParseModuleItem(bool* ok);
- const AstRawString* ParseModuleSpecifier(bool* ok);
- void ParseImportDeclaration(bool* ok);
- Statement* ParseExportDeclaration(bool* ok);
- Statement* ParseExportDefault(bool* ok);
+ void ParseModuleItemList(ScopedPtrList<Statement>* body);
+ Statement* ParseModuleItem();
+ const AstRawString* ParseModuleSpecifier();
+ void ParseImportDeclaration();
+ Statement* ParseExportDeclaration();
+ Statement* ParseExportDefault();
+ void ParseExportStar();
struct ExportClauseData {
const AstRawString* export_name;
const AstRawString* local_name;
Scanner::Location location;
};
ZoneChunkList<ExportClauseData>* ParseExportClause(
- Scanner::Location* reserved_loc, bool* ok);
+ Scanner::Location* reserved_loc);
struct NamedImport : public ZoneObject {
const AstRawString* import_name;
const AstRawString* local_name;
@@ -286,117 +314,87 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
local_name(local_name),
location(location) {}
};
- ZonePtrList<const NamedImport>* ParseNamedImports(int pos, bool* ok);
- Block* BuildInitializationBlock(DeclarationParsingResult* parsing_result,
- ZonePtrList<const AstRawString>* names,
- bool* ok);
+ ZonePtrList<const NamedImport>* ParseNamedImports(int pos);
+ Statement* BuildInitializationBlock(DeclarationParsingResult* parsing_result);
void DeclareLabel(ZonePtrList<const AstRawString>** labels,
ZonePtrList<const AstRawString>** own_labels,
- VariableProxy* expr, bool* ok);
+ VariableProxy* expr);
bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
const AstRawString* label);
Expression* RewriteReturn(Expression* return_value, int pos);
Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
Scope* scope);
- void RewriteCatchPattern(CatchInfo* catch_info, bool* ok);
- void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok);
+ Block* RewriteCatchPattern(CatchInfo* catch_info);
+ void ReportVarRedeclarationIn(const AstRawString* name, Scope* scope);
Statement* RewriteTryStatement(Block* try_block, Block* catch_block,
const SourceRange& catch_range,
Block* finally_block,
const SourceRange& finally_range,
const CatchInfo& catch_info, int pos);
void ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
- ZonePtrList<Statement>* body,
- bool* ok);
- void ParseAndRewriteAsyncGeneratorFunctionBody(int pos, FunctionKind kind,
- ZonePtrList<Statement>* body,
- bool* ok);
+ ScopedPtrList<Statement>* body);
+ void ParseAndRewriteAsyncGeneratorFunctionBody(
+ int pos, FunctionKind kind, ScopedPtrList<Statement>* body);
void DeclareFunctionNameVar(const AstRawString* function_name,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope);
Statement* DeclareFunction(const AstRawString* variable_name,
FunctionLiteral* function, VariableMode mode,
- int pos, bool is_sloppy_block_function,
- ZonePtrList<const AstRawString>* names, bool* ok);
- Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name,
- bool* ok);
+ int beg_pos, int end_pos,
+ bool is_sloppy_block_function,
+ ZonePtrList<const AstRawString>* names);
+ Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
FunctionLiteral* CreateInitializerFunction(
const char* name, DeclarationScope* scope,
ZonePtrList<ClassLiteral::Property>* fields);
- V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
- Expression* value,
- ZonePtrList<const AstRawString>* names,
- int class_token_pos, int end_pos, bool* ok);
- V8_INLINE void DeclareClassVariable(const AstRawString* name,
- ClassInfo* class_info,
- int class_token_pos, bool* ok);
- V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
- ClassLiteralProperty* property,
- const AstRawString* property_name,
- ClassLiteralProperty::Kind kind,
- bool is_static, bool is_constructor,
- bool is_computed_name,
- ClassInfo* class_info, bool* ok);
- V8_INLINE Expression* RewriteClassLiteral(Scope* block_scope,
- const AstRawString* name,
- ClassInfo* class_info, int pos,
- int end_pos, bool* ok);
- V8_INLINE Statement* DeclareNative(const AstRawString* name, int pos,
- bool* ok);
-
- V8_INLINE Block* IgnoreCompletion(Statement* statement);
-
- V8_INLINE Scope* NewHiddenCatchScope();
+
+ bool IdentifierEquals(const AstRawString* identifier,
+ const AstRawString* other) {
+ return identifier == other;
+ }
+
+ Statement* DeclareClass(const AstRawString* variable_name, Expression* value,
+ ZonePtrList<const AstRawString>* names,
+ int class_token_pos, int end_pos);
+ void DeclareClassVariable(const AstRawString* name, ClassInfo* class_info,
+ int class_token_pos);
+ void DeclareClassProperty(const AstRawString* class_name,
+ ClassLiteralProperty* property, bool is_constructor,
+ ClassInfo* class_info);
+ void DeclareClassField(ClassLiteralProperty* property,
+ const AstRawString* property_name, bool is_static,
+ bool is_computed_name, bool is_private,
+ ClassInfo* class_info);
+ Expression* RewriteClassLiteral(Scope* block_scope, const AstRawString* name,
+ ClassInfo* class_info, int pos, int end_pos);
+ Statement* DeclareNative(const AstRawString* name, int pos);
+
+ Block* IgnoreCompletion(Statement* statement);
+
+ Scope* NewHiddenCatchScope();
// PatternRewriter and associated methods defined in pattern-rewriter.cc.
friend class PatternRewriter;
- void DeclareAndInitializeVariables(
- Block* block, const DeclarationDescriptor* declaration_descriptor,
- const DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok);
- void RewriteDestructuringAssignment(RewritableExpression* expr);
- Expression* RewriteDestructuringAssignment(Assignment* assignment);
-
- // [if (IteratorType == kAsync)]
- // !%_IsJSReceiver(result = Await(next.[[Call]](iterator, « »)) &&
- // %ThrowIteratorResultNotAnObject(result)
- // [else]
- // !%_IsJSReceiver(result = next.[[Call]](iterator, « »)) &&
- // %ThrowIteratorResultNotAnObject(result)
- // [endif]
- Expression* BuildIteratorNextResult(VariableProxy* iterator,
- VariableProxy* next, Variable* result,
- IteratorType type, int pos);
-
- // Initialize the components of a for-in / for-of statement.
- Statement* InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each, Expression* subject,
- Statement* body);
- Statement* InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
- Expression* iterable, Statement* body,
- bool finalize, IteratorType type,
- int next_result_pos = kNoSourcePosition);
+ void InitializeVariables(
+ ScopedPtrList<Statement>* statements, VariableKind kind,
+ const DeclarationParsingResult::Declaration* declaration);
Block* RewriteForVarInLegacy(const ForInfo& for_info);
void DesugarBindingInForEachStatement(ForInfo* for_info, Block** body_block,
- Expression** each_variable, bool* ok);
- Block* CreateForEachStatementTDZ(Block* init_block, const ForInfo& for_info,
- bool* ok);
+ Expression** each_variable);
+ Block* CreateForEachStatementTDZ(Block* init_block, const ForInfo& for_info);
Statement* DesugarLexicalBindingsInForStatement(
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok);
-
- Expression* RewriteDoExpression(Block* body, int pos, bool* ok);
+ Statement* body, Scope* inner_scope, const ForInfo& for_info);
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
- bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function);
ObjectLiteral* InitializeObjectLiteral(ObjectLiteral* object_literal) {
object_literal->CalculateEmitStore(main_zone());
@@ -412,7 +410,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// The var declarations are hoisted to the function scope, but originate from
// a scope where the name has also been let bound or the var declaration is
// hoisted over such a scope.
- void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ void CheckConflictingVarDeclarations(Scope* scope);
bool IsPropertyWithPrivateFieldKey(Expression* property);
@@ -423,22 +421,22 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
- VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
- VariableKind kind = NORMAL_VARIABLE);
- VariableProxy* NewUnresolved(const AstRawString* name);
- Variable* Declare(Declaration* declaration,
- DeclarationDescriptor::Kind declaration_kind,
- VariableMode mode, InitializationFlag init, bool* ok,
- Scope* declaration_scope = nullptr,
- int var_end_pos = kNoSourcePosition);
- Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
- int pos, bool* ok);
- Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
- InitializationFlag init, int pos, bool* ok);
+ VariableProxy* DeclareVariable(const AstRawString* name, VariableMode mode,
+ int pos);
+ VariableProxy* DeclareVariable(const AstRawString* name, VariableMode mode,
+ InitializationFlag init, int pos);
+ void DeclareVariable(VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init,
+ Scope* declaration_scope, bool* added, int begin,
+ int end = kNoSourcePosition);
+ void Declare(Declaration* declaration, VariableProxy* proxy,
+ VariableKind kind, VariableMode mode, InitializationFlag init,
+ Scope* declaration_scope, bool* added,
+ int var_end_pos = kNoSourcePosition);
bool TargetStackContainsLabel(const AstRawString* label);
- BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
- IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
+ BreakableStatement* LookupBreakTarget(const AstRawString* label);
+ IterationStatement* LookupContinueTarget(const AstRawString* label);
Statement* BuildAssertIsCoercible(Variable* var, ObjectLiteral* pattern);
@@ -448,8 +446,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Skip over a lazy function, either using cached data if we have it, or
// by parsing the function with PreParser. Consumes the ending }.
- // If may_abort == true, the (pre-)parser may decide to abort skipping
- // in order to force the function to be eagerly parsed, after all.
// In case the preparser detects an error it cannot identify, it resets the
// scanner- and preparser state to the initial one, before PreParsing the
// function.
@@ -460,22 +456,19 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool SkipFunction(const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
- ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort,
- FunctionLiteral::EagerCompileHint* hint, bool* ok);
+ ProducedPreparseData** produced_preparsed_scope_data);
Block* BuildParameterInitializationBlock(
- const ParserFormalParameters& parameters, bool* ok);
+ const ParserFormalParameters& parameters);
Block* BuildRejectPromiseOnException(Block* block);
- ZonePtrList<Statement>* ParseFunction(
- const AstRawString* function_name, int pos, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
+ void ParseFunction(
+ ScopedPtrList<Statement>* body, const AstRawString* function_name,
+ int pos, FunctionKind kind, FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
int* expected_property_count, int* suspend_count,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
- bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -497,7 +490,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
void AddExpression(Expression* expression, Zone* zone) {
- DCHECK_NOT_NULL(expression);
expressions_.Add(expression, zone);
}
@@ -524,60 +516,40 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- ArrayLiteral* ArrayLiteralFromListWithSpread(ZonePtrList<Expression>* list);
- Expression* SpreadCall(Expression* function, ZonePtrList<Expression>* args,
- int pos, Call::PossiblyEval is_possibly_eval);
- Expression* SpreadCallNew(Expression* function, ZonePtrList<Expression>* args,
- int pos);
+ ArrayLiteral* ArrayLiteralFromListWithSpread(
+ const ScopedPtrList<Expression>& list);
+ Expression* SpreadCall(Expression* function,
+ const ScopedPtrList<Expression>& args, int pos,
+ Call::PossiblyEval is_possibly_eval);
+ Expression* SpreadCallNew(Expression* function,
+ const ScopedPtrList<Expression>& args, int pos);
Expression* RewriteSuperCall(Expression* call_expression);
void SetLanguageMode(Scope* scope, LanguageMode mode);
void SetAsmModule();
- // Rewrite all DestructuringAssignments in the current FunctionState.
- V8_INLINE void RewriteDestructuringAssignments();
-
Expression* RewriteSpreads(ArrayLiteral* lit);
- V8_INLINE void QueueDestructuringAssignmentForRewriting(
- RewritableExpression* assignment);
-
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr);
Expression* BuildInitialYield(int pos, FunctionKind kind);
Assignment* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
- Expression* BuildRejectPromise(Expression* value, int pos);
- Variable* PromiseVariable();
- Variable* AsyncGeneratorAwaitVariable();
// Generic AST generator for throwing errors from compiled code.
Expression* NewThrowError(Runtime::FunctionId function_id,
- MessageTemplate::Template message,
- const AstRawString* arg, int pos);
-
- void FinalizeIteratorUse(Variable* completion, Expression* condition,
- Variable* iter, Block* iterator_use, Block* result,
- IteratorType type);
+ MessageTemplate message, const AstRawString* arg,
+ int pos);
Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
IteratorType type, int pos);
- void BuildIteratorClose(ZonePtrList<Statement>* statements,
- Variable* iterator, Variable* input, Variable* output,
- IteratorType type);
- void BuildIteratorCloseForCompletion(ZonePtrList<Statement>* statements,
- Variable* iterator,
- Expression* completion,
- IteratorType type);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
- V8_INLINE void RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
- Block* block,
- Expression* return_value, bool* ok);
+ void RewriteAsyncFunctionBody(ScopedPtrList<Statement>* body, Block* block,
+ Expression* return_value);
void AddArrowFunctionFormalParameters(ParserFormalParameters* parameters,
- Expression* params, int end_pos,
- bool* ok);
+ Expression* params, int end_pos);
void SetFunctionName(Expression* value, const AstRawString* name,
const AstRawString* prefix = nullptr);
@@ -586,6 +558,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return identifier == ast_value_factory()->eval_string();
}
+ V8_INLINE bool IsAsync(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->async_string();
+ }
+
V8_INLINE bool IsArguments(const AstRawString* identifier) const {
return identifier == ast_value_factory()->arguments_string();
}
@@ -606,7 +582,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// inside a variable proxy). We exclude the case of 'this', which
// has been converted to a variable proxy.
V8_INLINE static bool IsIdentifier(Expression* expression) {
- DCHECK_NOT_NULL(expression);
VariableProxy* operand = expression->AsVariableProxy();
return operand != nullptr && !operand->is_this() &&
!operand->is_new_target();
@@ -646,14 +621,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return string->AsArrayIndex(index);
}
- V8_INLINE bool IsUseStrictDirective(Statement* statement) const {
- return IsStringLiteral(statement, ast_value_factory()->use_strict_string());
- }
-
- V8_INLINE bool IsUseAsmDirective(Statement* statement) const {
- return IsStringLiteral(statement, ast_value_factory()->use_asm_string());
- }
-
// Returns true if the statement is an expression statement containing
// a single string literal. If a second argument is given, the literal
// is also compared with it and the result is true only if they are equal.
@@ -711,21 +678,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
}
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator.
- V8_INLINE static void MarkExpressionAsAssigned(Expression* expression) {
- DCHECK_NOT_NULL(expression);
- if (expression->IsVariableProxy()) {
- expression->AsVariableProxy()->set_is_assigned();
- }
- }
-
// A shortcut for performing a ToString operation
V8_INLINE Expression* ToString(Expression* expr) {
if (expr->IsStringLiteral()) return expr;
- ZonePtrList<Expression>* args =
- new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(expr, zone());
+ ScopedPtrList<Expression> args(pointer_buffer());
+ args.Add(expr);
return factory()->NewCallRuntime(Runtime::kInlineToString, args,
expr->position());
}
@@ -752,8 +709,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int pos);
// Generate AST node that throws a ReferenceError with the given type.
- V8_INLINE Expression* NewThrowReferenceError(
- MessageTemplate::Template message, int pos) {
+ V8_INLINE Expression* NewThrowReferenceError(MessageTemplate message,
+ int pos) {
return NewThrowError(Runtime::kNewReferenceError, message,
ast_value_factory()->empty_string(), pos);
}
@@ -761,33 +718,26 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Generate AST node that throws a SyntaxError with the given
// type. The first argument may be null (in the handle sense) in
// which case no arguments are passed to the constructor.
- V8_INLINE Expression* NewThrowSyntaxError(MessageTemplate::Template message,
+ V8_INLINE Expression* NewThrowSyntaxError(MessageTemplate message,
const AstRawString* arg, int pos) {
return NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
}
// Generate AST node that throws a TypeError with the given
// type. Both arguments must be non-null (in the handle sense).
- V8_INLINE Expression* NewThrowTypeError(MessageTemplate::Template message,
+ V8_INLINE Expression* NewThrowTypeError(MessageTemplate message,
const AstRawString* arg, int pos) {
return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = nullptr,
+ MessageTemplate message, const char* arg = nullptr,
ParseErrorType error_type = kSyntaxError) {
- if (stack_overflow()) {
- // Suppress the error message (syntax error or such) in the presence of a
- // stack overflow. The isolate allows only one pending exception at at
- // time
- // and we want to report the stack overflow later.
- return;
- }
pending_error_handler()->ReportMessageAt(source_location.beg_pos,
source_location.end_pos, message,
arg, error_type);
+ scanner_.set_parser_error();
}
// Dummy implementation. The parser should never have a unidentifiable
@@ -795,21 +745,18 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const AstRawString* arg,
+ MessageTemplate message, const AstRawString* arg,
ParseErrorType error_type = kSyntaxError) {
- if (stack_overflow()) {
- // Suppress the error message (syntax error or such) in the presence of a
- // stack overflow. The isolate allows only one pending exception at at
- // time
- // and we want to report the stack overflow later.
- return;
- }
pending_error_handler()->ReportMessageAt(source_location.beg_pos,
source_location.end_pos, message,
arg, error_type);
+ scanner_.set_parser_error();
}
+ void ReportUnexpectedTokenAt(
+ Scanner::Location location, Token::Value token,
+ MessageTemplate message = MessageTemplate::kUnexpectedToken);
+
// "null" return type creators.
V8_INLINE static std::nullptr_t NullIdentifier() { return nullptr; }
V8_INLINE static std::nullptr_t NullExpression() { return nullptr; }
@@ -821,6 +768,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return nullptr;
}
V8_INLINE static std::nullptr_t NullStatement() { return nullptr; }
+ V8_INLINE static std::nullptr_t NullBlock() { return nullptr; }
+ Expression* FailureExpression() { return factory()->FailureExpression(); }
template <typename T>
V8_INLINE static bool IsNull(T subject) {
@@ -860,7 +809,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* NewTargetExpression(int pos);
Expression* ImportMetaExpression(int pos);
- Literal* ExpressionFromLiteral(Token::Value token, int pos);
+ Expression* ExpressionFromLiteral(Token::Value token, int pos);
V8_INLINE VariableProxy* ExpressionFromIdentifier(
const AstRawString* name, int start_position,
@@ -868,13 +817,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
if (infer == InferName::kYes) {
fni_.PushVariableName(name);
}
- return NewUnresolved(name, start_position);
+ return expression_scope()->NewVariable(name, start_position);
}
- V8_INLINE Expression* ExpressionFromString(int pos) {
- const AstRawString* symbol = GetSymbol();
- fni_.PushLiteralName(symbol);
- return factory()->NewStringLiteral(symbol, pos);
+ V8_INLINE Variable* DeclareCatchVariableName(Scope* scope,
+ const AstRawString* name) {
+ return scope->DeclareCatchVariableName(name);
}
V8_INLINE ZonePtrList<Expression>* NewExpressionList(int size) const {
@@ -892,9 +840,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return new (zone()) ZonePtrList<Statement>(size, zone());
}
- V8_INLINE Expression* NewV8Intrinsic(const AstRawString* name,
- ZonePtrList<Expression>* args, int pos,
- bool* ok);
+ Expression* NewV8Intrinsic(const AstRawString* name,
+ const ScopedPtrList<Expression>& args, int pos);
V8_INLINE Statement* NewThrowStatement(Expression* exception, int pos) {
return factory()->NewExpressionStatement(
@@ -907,44 +854,37 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int initializer_end_position,
bool is_rest) {
parameters->UpdateArityAndFunctionLength(initializer != nullptr, is_rest);
- bool has_simple_name = pattern->IsVariableProxy() && initializer == nullptr;
- const AstRawString* name = has_simple_name
- ? pattern->AsVariableProxy()->raw_name()
- : ast_value_factory()->empty_string();
auto parameter = new (parameters->scope->zone())
- ParserFormalParameters::Parameter(name, pattern, initializer,
+ ParserFormalParameters::Parameter(pattern, initializer,
scanner()->location().beg_pos,
initializer_end_position, is_rest);
parameters->params.Add(parameter);
}
- V8_INLINE void DeclareFormalParameters(
- DeclarationScope* scope,
- const base::ThreadedList<ParserFormalParameters::Parameter>& parameters,
- bool is_simple, bool* has_duplicate = nullptr) {
- if (!is_simple) scope->SetHasNonSimpleParameters();
- for (auto parameter : parameters) {
- bool is_optional = parameter->initializer != nullptr;
+ V8_INLINE void DeclareFormalParameters(ParserFormalParameters* parameters) {
+ bool is_simple = parameters->is_simple;
+ DeclarationScope* scope = parameters->scope;
+ if (!is_simple) scope->MakeParametersNonSimple();
+ for (auto parameter : parameters->params) {
+ bool is_optional = parameter->initializer() != nullptr;
// If the parameter list is simple, declare the parameters normally with
// their names. If the parameter list is not simple, declare a temporary
// for each parameter - the corresponding named variable is declared by
// BuildParamerterInitializationBlock.
scope->DeclareParameter(
- is_simple ? parameter->name : ast_value_factory()->empty_string(),
+ is_simple ? parameter->name() : ast_value_factory()->empty_string(),
is_simple ? VariableMode::kVar : VariableMode::kTemporary,
- is_optional, parameter->is_rest, has_duplicate, ast_value_factory(),
+ is_optional, parameter->is_rest(), ast_value_factory(),
parameter->position);
}
}
- void DeclareArrowFunctionFormalParameters(ParserFormalParameters* parameters,
- Expression* params,
- const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc,
- bool* ok);
+ void DeclareArrowFunctionFormalParameters(
+ ParserFormalParameters* parameters, Expression* params,
+ const Scanner::Location& params_loc);
- Expression* ExpressionListToExpression(ZonePtrList<Expression>* args);
+ Expression* ExpressionListToExpression(const ScopedPtrList<Expression>& args);
void SetFunctionNameFromPropertyName(LiteralProperty* property,
const AstRawString* name,
@@ -956,11 +896,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier);
- V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
- GetReportedErrorList() const {
- return function_state_->GetReportedErrorList();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
++use_counts_[feature];
}
@@ -1091,9 +1026,21 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
node, new (zone()) TryFinallyStatementSourceRanges(body_range));
}
+ // Generate the next internal variable name for binding an exported namespace
+ // object (used to implement the "export * as" syntax).
+ const AstRawString* NextInternalNamespaceExportName();
+
+ ParseInfo* info() const { return info_; }
+
+ std::vector<uint8_t>* preparse_data_buffer() {
+ return &preparse_data_buffer_;
+ }
+
// Parser's private field members.
friend class PreParserZoneScope; // Uses reusable_preparser().
+ friend class PreparseDataBuilder; // Uses preparse_data_buffer()
+ ParseInfo* info_;
Scanner scanner_;
Zone preparser_zone_;
PreParser* reusable_preparser_;
@@ -1107,13 +1054,17 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ScriptCompiler::CompileOptions compile_options_;
+ // For NextInternalNamespaceExportName().
+ int number_of_named_namespace_exports_ = 0;
+
// Other information which will be stored in Parser and moved to Isolate after
// parsing.
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
int total_preparse_skipped_;
bool allow_lazy_;
bool temp_zoned_;
- ConsumedPreParsedScopeData* consumed_preparsed_scope_data_;
+ ConsumedPreparseData* consumed_preparse_data_;
+ std::vector<uint8_t> preparse_data_buffer_;
// If not kNoSourcePosition, indicates that the first function literal
// encountered is a dynamic function, see CreateDynamicFunction(). This field
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index 378023cbeb..7ff080b2f9 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -42,7 +42,6 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
info->pending_error_handler()->ReportErrors(isolate, info->script(),
info->ast_value_factory());
} else {
- result->scope()->AttachOuterScopeInfo(info, isolate);
info->set_language_mode(info->literal()->language_mode());
if (info->is_eval()) {
info->set_allow_eval_cache(parser.allow_eval_cache());
@@ -80,7 +79,7 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
info->pending_error_handler()->ReportErrors(isolate, info->script(),
info->ast_value_factory());
} else {
- result->scope()->AttachOuterScopeInfo(info, isolate);
+ info->ast_value_factory()->Internalize(isolate);
if (info->is_eval()) {
info->set_allow_eval_cache(parser.allow_eval_cache());
}
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 4465670a8f..0ef570ee52 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/ast/ast.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/parsing/expression-scope-reparenter.h"
#include "src/parsing/parser.h"
@@ -12,314 +12,108 @@ namespace v8 {
namespace internal {
+// An AST visitor which performs declaration and assignment related tasks,
+// particularly for destructuring patterns:
+//
+// 1. Declares variables from variable proxies (particularly for destructuring
+// declarations),
+// 2. Marks destructuring-assigned variable proxies as assigned, and
+// 3. Rewrites scopes for parameters containing a sloppy eval.
+//
+// Historically this also rewrote destructuring assignments/declarations as a
+// block of multiple assignments, hence the named, however this is now done
+// during bytecode generation.
+//
+// TODO(leszeks): Rename or remove this class
class PatternRewriter final : public AstVisitor<PatternRewriter> {
public:
- // Limit the allowed number of local variables in a function. The hard limit
- // is that offsets computed by FullCodeGenerator::StackOperand and similar
- // functions are ints, and they should not overflow. In addition, accessing
- // local variables creates user-controlled constants in the generated code,
- // and we don't want too much user-controlled memory inside the code (this was
- // the reason why this limit was introduced in the first place; see
- // https://codereview.chromium.org/7003030/ ).
- static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
-
typedef Parser::DeclarationDescriptor DeclarationDescriptor;
- static void DeclareAndInitializeVariables(
- Parser* parser, Block* block,
- const DeclarationDescriptor* declaration_descriptor,
- const Parser::DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok);
-
- static Expression* RewriteDestructuringAssignment(Parser* parser,
- Assignment* to_rewrite,
- Scope* scope);
+ static void InitializeVariables(
+ Parser* parser, VariableKind kind,
+ const Parser::DeclarationParsingResult::Declaration* declaration);
private:
- enum PatternContext : uint8_t { BINDING, ASSIGNMENT };
-
- PatternRewriter(Scope* scope, Parser* parser, PatternContext context,
- const DeclarationDescriptor* descriptor = nullptr,
- ZonePtrList<const AstRawString>* names = nullptr,
- int initializer_position = kNoSourcePosition,
- int value_beg_position = kNoSourcePosition,
- bool declares_parameter_containing_sloppy_eval = false)
- : scope_(scope),
- parser_(parser),
- block_(nullptr),
- descriptor_(descriptor),
- names_(names),
- current_value_(nullptr),
- ok_(nullptr),
+ PatternRewriter(Parser* parser, VariableKind kind, int initializer_position,
+ bool declares_parameter_containing_sloppy_eval)
+ : parser_(parser),
initializer_position_(initializer_position),
- value_beg_position_(value_beg_position),
- context_(context),
declares_parameter_containing_sloppy_eval_(
- declares_parameter_containing_sloppy_eval),
- recursion_level_(0) {}
+ declares_parameter_containing_sloppy_eval) {}
#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node);
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- PatternContext context() const { return context_; }
-
- void RecurseIntoSubpattern(AstNode* pattern, Expression* value) {
- Expression* old_value = current_value_;
- current_value_ = value;
- recursion_level_++;
- Visit(pattern);
- recursion_level_--;
- current_value_ = old_value;
- }
-
- Expression* Rewrite(Assignment* assign) {
+ Expression* Visit(Assignment* assign) {
+ if (parser_->has_error()) return parser_->FailureExpression();
DCHECK_EQ(Token::ASSIGN, assign->op());
- int pos = assign->position();
- DCHECK_NULL(block_);
- block_ = factory()->NewBlock(8, true);
- Variable* temp = nullptr;
Expression* pattern = assign->target();
- Expression* old_value = current_value_;
- current_value_ = assign->value();
if (pattern->IsObjectLiteral()) {
- VisitObjectLiteral(pattern->AsObjectLiteral(), &temp);
+ VisitObjectLiteral(pattern->AsObjectLiteral());
} else {
DCHECK(pattern->IsArrayLiteral());
- VisitArrayLiteral(pattern->AsArrayLiteral(), &temp);
+ VisitArrayLiteral(pattern->AsArrayLiteral());
}
- DCHECK_NOT_NULL(temp);
- current_value_ = old_value;
- return factory()->NewDoExpression(block_, temp, pos);
+ return assign;
}
- void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
- void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
-
- bool IsBindingContext() const { return context_ == BINDING; }
- bool IsAssignmentContext() const { return context_ == ASSIGNMENT; }
- bool IsSubPattern() const { return recursion_level_ > 1; }
-
void RewriteParameterScopes(Expression* expr);
- Variable* CreateTempVar(Expression* value = nullptr);
+ Scope* scope() const { return parser_->scope(); }
- AstNodeFactory* factory() const { return parser_->factory(); }
- AstValueFactory* ast_value_factory() const {
- return parser_->ast_value_factory();
- }
- Zone* zone() const { return parser_->zone(); }
- Scope* scope() const { return scope_; }
-
- Scope* const scope_;
Parser* const parser_;
- Block* block_;
- const DeclarationDescriptor* descriptor_;
- ZonePtrList<const AstRawString>* names_;
- Expression* current_value_;
- bool* ok_;
const int initializer_position_;
- const int value_beg_position_;
- PatternContext context_;
- const bool declares_parameter_containing_sloppy_eval_ : 1;
- int recursion_level_;
+ const bool declares_parameter_containing_sloppy_eval_;
DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
};
-void Parser::DeclareAndInitializeVariables(
- Block* block, const DeclarationDescriptor* declaration_descriptor,
- const DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok) {
- PatternRewriter::DeclareAndInitializeVariables(
- this, block, declaration_descriptor, declaration, names, ok);
-}
-
-void Parser::RewriteDestructuringAssignment(RewritableExpression* to_rewrite) {
- DCHECK(!to_rewrite->is_rewritten());
- Assignment* assignment = to_rewrite->expression()->AsAssignment();
- Expression* result = PatternRewriter::RewriteDestructuringAssignment(
- this, assignment, scope());
- to_rewrite->Rewrite(result);
-}
-
-Expression* Parser::RewriteDestructuringAssignment(Assignment* assignment) {
- DCHECK_NOT_NULL(assignment);
- DCHECK_EQ(Token::ASSIGN, assignment->op());
- return PatternRewriter::RewriteDestructuringAssignment(this, assignment,
- scope());
-}
-
-void PatternRewriter::DeclareAndInitializeVariables(
- Parser* parser, Block* block,
- const DeclarationDescriptor* declaration_descriptor,
- const Parser::DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok) {
- DCHECK(block->ignore_completion_value());
-
- Scope* scope = declaration_descriptor->scope;
- PatternRewriter rewriter(scope, parser, BINDING, declaration_descriptor,
- names, declaration->initializer_position,
- declaration->value_beg_position,
- declaration_descriptor->declaration_kind ==
- DeclarationDescriptor::PARAMETER &&
- scope->is_block_scope());
- rewriter.block_ = block;
- rewriter.ok_ = ok;
-
- rewriter.RecurseIntoSubpattern(declaration->pattern,
- declaration->initializer);
-}
-
-Expression* PatternRewriter::RewriteDestructuringAssignment(
- Parser* parser, Assignment* to_rewrite, Scope* scope) {
- DCHECK(!scope->HasBeenRemoved());
-
- PatternRewriter rewriter(scope, parser, ASSIGNMENT);
- return rewriter.Rewrite(to_rewrite);
-}
-
-void PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
- Expression* value = current_value_;
+void Parser::InitializeVariables(
+ ScopedPtrList<Statement>* statements, VariableKind kind,
+ const DeclarationParsingResult::Declaration* declaration) {
+ if (has_error()) return;
- if (IsAssignmentContext()) {
- // In an assignment context, simply perform the assignment
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, pattern, value, pattern->position());
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, pattern->position()),
- zone());
+ if (!declaration->initializer) {
+ // The parameter scope is only a block scope if the initializer calls sloppy
+ // eval. Since there is no initializer, we can't be calling sloppy eval.
+ DCHECK_IMPLIES(kind == PARAMETER_VARIABLE, scope()->is_function_scope());
return;
}
- DCHECK_NOT_NULL(block_);
- DCHECK_NOT_NULL(descriptor_);
- DCHECK_NOT_NULL(ok_);
-
- Scope* outer_function_scope = nullptr;
- bool success;
- if (declares_parameter_containing_sloppy_eval_) {
- outer_function_scope = scope()->outer_scope();
- success = outer_function_scope->RemoveUnresolved(pattern);
- } else {
- success = scope()->RemoveUnresolved(pattern);
- }
- USE(success);
- DCHECK(success);
-
- // Declare variable.
- // Note that we *always* must treat the initial value via a separate init
- // assignment for variables and constants because the value must be assigned
- // when the variable is encountered in the source. But the variable/constant
- // is declared (and set to 'undefined') upon entering the function within
- // which the variable or constant is declared. Only function variables have
- // an initial value in the declaration (because they are initialized upon
- // entering the function).
- const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = pattern;
- Declaration* declaration;
- if (descriptor_->mode == VariableMode::kVar &&
- !scope()->is_declaration_scope()) {
- DCHECK(scope()->is_block_scope() || scope()->is_with_scope());
- declaration = factory()->NewNestedVariableDeclaration(
- proxy, scope(), descriptor_->declaration_pos);
- } else {
- declaration =
- factory()->NewVariableDeclaration(proxy, descriptor_->declaration_pos);
- }
-
- // When an extra declaration scope needs to be inserted to account for
- // a sloppy eval in a default parameter or function body, the parameter
- // needs to be declared in the function's scope, not in the varblock
- // scope which will be used for the initializer expression.
- Variable* var = parser_->Declare(
- declaration, descriptor_->declaration_kind, descriptor_->mode,
- Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
- outer_function_scope);
- if (!*ok_) return;
- DCHECK_NOT_NULL(var);
- DCHECK(proxy->is_resolved());
- DCHECK_NE(initializer_position_, kNoSourcePosition);
- var->set_initializer_position(initializer_position_);
-
- Scope* declaration_scope = outer_function_scope != nullptr
- ? outer_function_scope
- : (IsLexicalVariableMode(descriptor_->mode)
- ? scope()
- : scope()->GetDeclarationScope());
- if (declaration_scope->num_var() > kMaxNumFunctionLocals) {
- parser_->ReportMessage(MessageTemplate::kTooManyVariables);
- *ok_ = false;
- return;
- }
- if (names_) {
- names_->Add(name, zone());
- }
-
- // If there's no initializer, we're done.
- if (value == nullptr) return;
-
- Scope* var_init_scope = scope();
- Parser::MarkLoopVariableAsAssigned(var_init_scope, proxy->var(),
- descriptor_->declaration_kind);
-
- // A declaration of the form:
- //
- // var v = x;
- //
- // is syntactic sugar for:
- //
- // var v; v = x;
- //
- // In particular, we need to re-lookup 'v' if it may be a different 'v' than
- // the 'v' in the declaration (e.g., if we are inside a 'with' statement or
- // 'catch' block).
-
- // For 'let' and 'const' declared variables the initialization always assigns
- // to the declared variable. But for var declarations that target a different
- // scope we need to do a new lookup.
- if (descriptor_->mode == VariableMode::kVar &&
- var_init_scope != declaration_scope) {
- proxy = var_init_scope->NewUnresolved(factory(), name);
- } else {
- DCHECK_NOT_NULL(proxy);
- DCHECK_NOT_NULL(proxy->var());
- }
- // Add break location for destructured sub-pattern.
- int pos = value_beg_position_;
+ PatternRewriter::InitializeVariables(this, kind, declaration);
+ int pos = declaration->value_beg_position;
if (pos == kNoSourcePosition) {
- pos = IsSubPattern() ? pattern->position() : value->position();
+ pos = declaration->initializer_position;
}
- Assignment* assignment =
- factory()->NewAssignment(Token::INIT, proxy, value, pos);
- block_->statements()->Add(factory()->NewExpressionStatement(assignment, pos),
- zone());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, declaration->pattern, declaration->initializer, pos);
+ statements->Add(factory()->NewExpressionStatement(assignment, pos));
}
-Variable* PatternRewriter::CreateTempVar(Expression* value) {
- auto temp = scope()->NewTemporary(ast_value_factory()->empty_string());
- if (value != nullptr) {
- auto assignment = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(temp), value,
- kNoSourcePosition);
+void PatternRewriter::InitializeVariables(
+ Parser* parser, VariableKind kind,
+ const Parser::DeclarationParsingResult::Declaration* declaration) {
+ PatternRewriter rewriter(
+ parser, kind, declaration->initializer_position,
+ kind == PARAMETER_VARIABLE && parser->scope()->is_block_scope());
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, kNoSourcePosition),
- zone());
- }
- return temp;
+ rewriter.Visit(declaration->pattern);
}
-void PatternRewriter::VisitRewritableExpression(RewritableExpression* node) {
- DCHECK(node->expression()->IsAssignment());
- // This is not a top-level destructuring assignment. Mark the node as
- // rewritten to prevent redundant rewriting and visit the underlying
- // expression.
- DCHECK(!node->is_rewritten());
- node->set_rewritten();
- return Visit(node->expression());
+void PatternRewriter::VisitVariableProxy(VariableProxy* proxy) {
+ DCHECK(!parser_->has_error());
+ Variable* var =
+ proxy->is_resolved()
+ ? proxy->var()
+ : scope()->GetDeclarationScope()->LookupLocal(proxy->raw_name());
+
+ DCHECK_NOT_NULL(var);
+
+ DCHECK_NE(initializer_position_, kNoSourcePosition);
+ var->set_initializer_position(initializer_position_);
}
// When an extra declaration scope needs to be inserted to account for
@@ -332,378 +126,35 @@ void PatternRewriter::RewriteParameterScopes(Expression* expr) {
}
}
-void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
- Variable** temp_var) {
- auto temp = *temp_var = CreateTempVar(current_value_);
-
- ZonePtrList<Expression>* rest_runtime_callargs = nullptr;
- if (pattern->has_rest_property()) {
- // non_rest_properties_count = pattern->properties()->length - 1;
- // args_length = 1 + non_rest_properties_count because we need to
- // pass temp as well to the runtime function.
- int args_length = pattern->properties()->length();
- rest_runtime_callargs =
- new (zone()) ZonePtrList<Expression>(args_length, zone());
- rest_runtime_callargs->Add(factory()->NewVariableProxy(temp), zone());
- }
-
- block_->statements()->Add(parser_->BuildAssertIsCoercible(temp, pattern),
- zone());
-
+void PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
for (ObjectLiteralProperty* property : *pattern->properties()) {
- Expression* value;
-
- if (property->kind() == ObjectLiteralProperty::Kind::SPREAD) {
- // var { y, [x++]: a, ...c } = temp
- // becomes
- // var y = temp.y;
- // var temp1 = %ToName(x++);
- // var a = temp[temp1];
- // var c;
- // c = %CopyDataPropertiesWithExcludedProperties(temp, "y", temp1);
- value = factory()->NewCallRuntime(
- Runtime::kCopyDataPropertiesWithExcludedProperties,
- rest_runtime_callargs, kNoSourcePosition);
- } else {
- Expression* key = property->key();
-
- if (!key->IsLiteral()) {
- // Computed property names contain expressions which might require
- // scope rewriting.
- RewriteParameterScopes(key);
- }
-
- if (pattern->has_rest_property()) {
- Expression* excluded_property = key;
-
- if (property->is_computed_name()) {
- DCHECK(!key->IsPropertyName() || !key->IsNumberLiteral());
- auto args = new (zone()) ZonePtrList<Expression>(1, zone());
- args->Add(key, zone());
- auto to_name_key = CreateTempVar(factory()->NewCallRuntime(
- Runtime::kToName, args, kNoSourcePosition));
- key = factory()->NewVariableProxy(to_name_key);
- excluded_property = factory()->NewVariableProxy(to_name_key);
- } else {
- DCHECK(key->IsPropertyName() || key->IsNumberLiteral());
- }
-
- DCHECK_NOT_NULL(rest_runtime_callargs);
- rest_runtime_callargs->Add(excluded_property, zone());
- }
-
- value = factory()->NewProperty(factory()->NewVariableProxy(temp), key,
- kNoSourcePosition);
+ Expression* key = property->key();
+ if (!key->IsLiteral()) {
+ // Computed property names contain expressions which might require
+ // scope rewriting.
+ RewriteParameterScopes(key);
}
-
- RecurseIntoSubpattern(property->value(), value);
+ Visit(property->value());
}
}
-void PatternRewriter::VisitObjectLiteral(ObjectLiteral* node) {
- Variable* temp_var = nullptr;
- VisitObjectLiteral(node, &temp_var);
-}
-
-void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
- Variable** temp_var) {
- DCHECK(block_->ignore_completion_value());
-
- auto temp = *temp_var = CreateTempVar(current_value_);
- auto iterator = CreateTempVar(factory()->NewGetIterator(
- factory()->NewVariableProxy(temp), current_value_, IteratorType::kNormal,
- current_value_->position()));
- auto next = CreateTempVar(factory()->NewProperty(
- factory()->NewVariableProxy(iterator),
- factory()->NewStringLiteral(ast_value_factory()->next_string(),
- kNoSourcePosition),
- kNoSourcePosition));
- auto done =
- CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
- auto result = CreateTempVar();
- auto v = CreateTempVar();
- auto completion = CreateTempVar();
- auto nopos = kNoSourcePosition;
-
- // For the purpose of iterator finalization, we temporarily set block_ to a
- // new block. In the main body of this function, we write to block_ (both
- // explicitly and implicitly via recursion). At the end of the function, we
- // wrap this new block in a try-finally statement, restore block_ to its
- // original value, and add the try-finally statement to block_.
- auto target = block_;
- block_ = factory()->NewBlock(8, true);
-
- Spread* spread = nullptr;
+void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
for (Expression* value : *node->values()) {
- if (value->IsSpread()) {
- spread = value->AsSpread();
- break;
- }
-
- // if (!done) {
- // done = true; // If .next, .done or .value throws, don't close.
- // result = IteratorNext(iterator);
- // if (result.done) {
- // v = undefined;
- // } else {
- // v = result.value;
- // done = false;
- // }
- // }
- Statement* if_not_done;
- {
- auto result_done = factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->done_string(),
- kNoSourcePosition),
- kNoSourcePosition);
-
- auto assign_undefined = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(v),
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
-
- auto assign_value = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(v),
- factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->value_string(),
- kNoSourcePosition),
- kNoSourcePosition),
- kNoSourcePosition);
-
- auto unset_done = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewBooleanLiteral(false, kNoSourcePosition),
- kNoSourcePosition);
-
- auto inner_else = factory()->NewBlock(2, true);
- inner_else->statements()->Add(
- factory()->NewExpressionStatement(assign_value, nopos), zone());
- inner_else->statements()->Add(
- factory()->NewExpressionStatement(unset_done, nopos), zone());
-
- auto inner_if = factory()->NewIfStatement(
- result_done,
- factory()->NewExpressionStatement(assign_undefined, nopos),
- inner_else, nopos);
-
- auto next_block = factory()->NewBlock(3, true);
- next_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewBooleanLiteral(true, nopos), nopos),
- nopos),
- zone());
- next_block->statements()->Add(
- factory()->NewExpressionStatement(
- parser_->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator),
- factory()->NewVariableProxy(next), result,
- IteratorType::kNormal, kNoSourcePosition),
- kNoSourcePosition),
- zone());
- next_block->statements()->Add(inner_if, zone());
-
- if_not_done = factory()->NewIfStatement(
- factory()->NewUnaryOperation(
- Token::NOT, factory()->NewVariableProxy(done), kNoSourcePosition),
- next_block, factory()->NewEmptyStatement(kNoSourcePosition),
- kNoSourcePosition);
- }
- block_->statements()->Add(if_not_done, zone());
-
- if (!value->IsTheHoleLiteral()) {
- {
- // completion = kAbruptCompletion;
- Expression* proxy = factory()->NewVariableProxy(completion);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, proxy,
- factory()->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, nopos), zone());
- }
-
- RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
-
- {
- // completion = kNormalCompletion;
- Expression* proxy = factory()->NewVariableProxy(completion);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, proxy,
- factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, nopos), zone());
- }
- }
- }
-
- if (spread != nullptr) {
- // A spread can only occur as the last component. It is not handled by
- // RecurseIntoSubpattern above.
-
- // let array = [];
- // let index = 0;
- // while (!done) {
- // done = true; // If .next, .done or .value throws, don't close.
- // result = IteratorNext(iterator);
- // if (!result.done) {
- // StoreInArrayLiteral(array, index, result.value);
- // done = false;
- // }
- // index++;
- // }
-
- // let array = [];
- Variable* array;
- {
- auto empty_exprs = new (zone()) ZonePtrList<Expression>(0, zone());
- array = CreateTempVar(
- factory()->NewArrayLiteral(empty_exprs, kNoSourcePosition));
- }
-
- // let index = 0;
- Variable* index =
- CreateTempVar(factory()->NewSmiLiteral(0, kNoSourcePosition));
-
- // done = true;
- Statement* set_done = factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewBooleanLiteral(true, nopos), nopos),
- nopos);
-
- // result = IteratorNext(iterator);
- Statement* get_next = factory()->NewExpressionStatement(
- parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
- factory()->NewVariableProxy(next),
- result, IteratorType::kNormal, nopos),
- nopos);
-
- // StoreInArrayLiteral(array, index, result.value);
- Statement* store;
- {
- auto value = factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->value_string(),
- nopos),
- nopos);
- store = factory()->NewExpressionStatement(
- factory()->NewStoreInArrayLiteral(factory()->NewVariableProxy(array),
- factory()->NewVariableProxy(index),
- value, nopos),
- nopos);
- }
-
- // done = false;
- Statement* unset_done = factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewBooleanLiteral(false, nopos), nopos),
- nopos);
-
- // if (!result.done) { #store; #unset_done }
- Statement* maybe_store_and_unset_done;
- {
- Expression* result_done =
- factory()->NewProperty(factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(
- ast_value_factory()->done_string(), nopos),
- nopos);
-
- Block* then = factory()->NewBlock(2, true);
- then->statements()->Add(store, zone());
- then->statements()->Add(unset_done, zone());
-
- maybe_store_and_unset_done = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT, result_done, nopos), then,
- factory()->NewEmptyStatement(nopos), nopos);
- }
-
- // index++;
- Statement* increment_index;
- {
- increment_index = factory()->NewExpressionStatement(
- factory()->NewCountOperation(
- Token::INC, false, factory()->NewVariableProxy(index), nopos),
- nopos);
- }
-
- // while (!done) {
- // #set_done;
- // #get_next;
- // #maybe_store_and_unset_done;
- // #increment_index;
- // }
- WhileStatement* loop =
- factory()->NewWhileStatement(nullptr, nullptr, nopos);
- {
- Expression* condition = factory()->NewUnaryOperation(
- Token::NOT, factory()->NewVariableProxy(done), nopos);
- Block* body = factory()->NewBlock(4, true);
- body->statements()->Add(set_done, zone());
- body->statements()->Add(get_next, zone());
- body->statements()->Add(maybe_store_and_unset_done, zone());
- body->statements()->Add(increment_index, zone());
- loop->Initialize(condition, body);
- }
-
- block_->statements()->Add(loop, zone());
- RecurseIntoSubpattern(spread->expression(),
- factory()->NewVariableProxy(array));
+ if (value->IsTheHoleLiteral()) continue;
+ Visit(value);
}
-
- Expression* closing_condition = factory()->NewUnaryOperation(
- Token::NOT, factory()->NewVariableProxy(done), nopos);
-
- parser_->FinalizeIteratorUse(completion, closing_condition, iterator, block_,
- target, IteratorType::kNormal);
- block_ = target;
-}
-
-void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- Variable* temp_var = nullptr;
- VisitArrayLiteral(node, &temp_var);
}
void PatternRewriter::VisitAssignment(Assignment* node) {
- // let {<pattern> = <init>} = <value>
- // becomes
- // temp = <value>;
- // <pattern> = temp === undefined ? <init> : temp;
DCHECK_EQ(Token::ASSIGN, node->op());
- auto initializer = node->value();
- auto value = initializer;
- auto temp = CreateTempVar(current_value_);
-
- Expression* is_undefined = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(temp),
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
- value = factory()->NewConditional(is_undefined, initializer,
- factory()->NewVariableProxy(temp),
- kNoSourcePosition);
-
// Initializer may have been parsed in the wrong scope.
- RewriteParameterScopes(initializer);
-
- RecurseIntoSubpattern(node->target(), value);
-}
-
-
-// =============== AssignmentPattern only ==================
-
-void PatternRewriter::VisitProperty(v8::internal::Property* node) {
- DCHECK(IsAssignmentContext());
- auto value = current_value_;
-
- Assignment* assignment =
- factory()->NewAssignment(Token::ASSIGN, node, value, node->position());
+ RewriteParameterScopes(node->value());
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
+ Visit(node->target());
}
+void PatternRewriter::VisitSpread(Spread* node) { Visit(node->expression()); }
// =============== UNREACHABLE =============================
@@ -734,17 +185,16 @@ NOT_A_PATTERN(ForOfStatement)
NOT_A_PATTERN(ForStatement)
NOT_A_PATTERN(FunctionDeclaration)
NOT_A_PATTERN(FunctionLiteral)
-NOT_A_PATTERN(GetIterator)
NOT_A_PATTERN(GetTemplateObject)
NOT_A_PATTERN(IfStatement)
NOT_A_PATTERN(ImportCallExpression)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
+NOT_A_PATTERN(Property)
NOT_A_PATTERN(RegExpLiteral)
NOT_A_PATTERN(ResolvedProperty)
NOT_A_PATTERN(ReturnStatement)
NOT_A_PATTERN(SloppyBlockFunctionStatement)
-NOT_A_PATTERN(Spread)
NOT_A_PATTERN(StoreInArrayLiteral)
NOT_A_PATTERN(SuperPropertyReference)
NOT_A_PATTERN(SuperCallReference)
@@ -761,7 +211,7 @@ NOT_A_PATTERN(WithStatement)
NOT_A_PATTERN(Yield)
NOT_A_PATTERN(YieldStar)
NOT_A_PATTERN(Await)
-NOT_A_PATTERN(InitializeClassFieldsStatement)
+NOT_A_PATTERN(InitializeClassMembersStatement)
#undef NOT_A_PATTERN
} // namespace internal
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
new file mode 100644
index 0000000000..7d1f0feed8
--- /dev/null
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -0,0 +1,234 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSE_DATA_IMPL_H_
+#define V8_PARSING_PREPARSE_DATA_IMPL_H_
+
+#include "src/parsing/preparse-data.h"
+
+#include "src/assert-scope.h"
+
+namespace v8 {
+namespace internal {
+
+// Classes which are internal to prepared-scope-data.cc, but are exposed in
+// a header for tests.
+
+// Wraps a ZoneVector<uint8_t> to have with functions named the same as
+// PodArray<uint8_t>.
+class ZoneVectorWrapper {
+ public:
+ ZoneVectorWrapper() = default;
+ explicit ZoneVectorWrapper(ZoneVector<uint8_t>* data) : data_(data) {}
+
+ int data_length() const { return static_cast<int>(data_->size()); }
+
+ uint8_t get(int index) const { return data_->at(index); }
+
+ private:
+ ZoneVector<uint8_t>* data_ = nullptr;
+};
+
+template <class Data>
+class BaseConsumedPreparseData : public ConsumedPreparseData {
+ public:
+ class ByteData : public PreparseByteDataConstants {
+ public:
+ ByteData() {}
+
+ // Reading from the ByteData is only allowed when a ReadingScope is on the
+ // stack. This ensures that we have a DisallowHeapAllocation in place
+ // whenever ByteData holds a raw pointer into the heap.
+ class ReadingScope {
+ public:
+ ReadingScope(ByteData* consumed_data, Data data)
+ : consumed_data_(consumed_data) {
+ consumed_data->data_ = data;
+#ifdef DEBUG
+ consumed_data->has_data_ = true;
+#endif
+ }
+ explicit ReadingScope(BaseConsumedPreparseData<Data>* parent)
+ : ReadingScope(parent->scope_data_.get(), parent->GetScopeData()) {}
+ ~ReadingScope() {
+#ifdef DEBUG
+ consumed_data_->has_data_ = false;
+#endif
+ }
+
+ private:
+ ByteData* consumed_data_;
+ DISALLOW_HEAP_ALLOCATION(no_gc);
+ };
+
+ void SetPosition(int position) {
+ DCHECK_LE(position, data_.data_length());
+ index_ = position;
+ }
+
+ size_t RemainingBytes() const {
+ DCHECK(has_data_);
+ DCHECK_LE(index_, data_.data_length());
+ return data_.data_length() - index_;
+ }
+
+ bool HasRemainingBytes(size_t bytes) const {
+ DCHECK(has_data_);
+ return index_ <= data_.data_length() && bytes <= RemainingBytes();
+ }
+
+ int32_t ReadUint32() {
+ DCHECK(has_data_);
+ DCHECK(HasRemainingBytes(kUint32Size));
+ // Check that there indeed is an integer following.
+ DCHECK_EQ(data_.get(index_++), kUint32Size);
+ int32_t result = data_.get(index_) + (data_.get(index_ + 1) << 8) +
+ (data_.get(index_ + 2) << 16) +
+ (data_.get(index_ + 3) << 24);
+ index_ += 4;
+ stored_quarters_ = 0;
+ return result;
+ }
+
+ int32_t ReadVarint32() {
+ DCHECK(HasRemainingBytes(kVarintMinSize));
+ DCHECK_EQ(data_.get(index_++), kVarintMinSize);
+ int32_t value = 0;
+ bool has_another_byte;
+ unsigned shift = 0;
+ do {
+ uint8_t byte = data_.get(index_++);
+ value |= static_cast<int32_t>(byte & 0x7F) << shift;
+ shift += 7;
+ has_another_byte = byte & 0x80;
+ } while (has_another_byte);
+ DCHECK_EQ(data_.get(index_++), kVarintEndMarker);
+ stored_quarters_ = 0;
+ return value;
+ }
+
+ uint8_t ReadUint8() {
+ DCHECK(has_data_);
+ DCHECK(HasRemainingBytes(kUint8Size));
+ // Check that there indeed is a byte following.
+ DCHECK_EQ(data_.get(index_++), kUint8Size);
+ stored_quarters_ = 0;
+ return data_.get(index_++);
+ }
+
+ uint8_t ReadQuarter() {
+ DCHECK(has_data_);
+ if (stored_quarters_ == 0) {
+ DCHECK(HasRemainingBytes(kUint8Size));
+ // Check that there indeed are quarters following.
+ DCHECK_EQ(data_.get(index_++), kQuarterMarker);
+ stored_byte_ = data_.get(index_++);
+ stored_quarters_ = 4;
+ }
+ // Read the first 2 bits from stored_byte_.
+ uint8_t result = (stored_byte_ >> 6) & 3;
+ DCHECK_LE(result, 3);
+ --stored_quarters_;
+ stored_byte_ <<= 2;
+ return result;
+ }
+
+ private:
+ Data data_ = {};
+ int index_ = 0;
+ uint8_t stored_quarters_ = 0;
+ uint8_t stored_byte_ = 0;
+#ifdef DEBUG
+ bool has_data_ = false;
+#endif
+ };
+
+ BaseConsumedPreparseData() : scope_data_(new ByteData()), child_index_(0) {}
+
+ virtual Data GetScopeData() = 0;
+
+ virtual ProducedPreparseData* GetChildData(Zone* zone, int child_index) = 0;
+
+ ProducedPreparseData* GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode) final;
+
+ void RestoreScopeAllocationData(DeclarationScope* scope) final;
+
+#ifdef DEBUG
+ bool VerifyDataStart();
+#endif
+
+ private:
+ void RestoreDataForScope(Scope* scope);
+ void RestoreDataForVariable(Variable* var);
+ void RestoreDataForInnerScopes(Scope* scope);
+
+ std::unique_ptr<ByteData> scope_data_;
+ // When consuming the data, these indexes point to the data we're going to
+ // consume next.
+ int child_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseConsumedPreparseData);
+};
+
+// Implementation of ConsumedPreparseData for on-heap data.
+class OnHeapConsumedPreparseData final
+ : public BaseConsumedPreparseData<PreparseData> {
+ public:
+ OnHeapConsumedPreparseData(Isolate* isolate, Handle<PreparseData> data);
+
+ PreparseData GetScopeData() final;
+ ProducedPreparseData* GetChildData(Zone* zone, int child_index) final;
+
+ private:
+ Isolate* isolate_;
+ Handle<PreparseData> data_;
+};
+
+// A serialized PreparseData in zone memory (as apposed to being on-heap).
+class ZonePreparseData : public ZoneObject {
+ public:
+ ZonePreparseData(Zone* zone, Vector<uint8_t>* byte_data, int child_length);
+
+ Handle<PreparseData> Serialize(Isolate* isolate);
+
+ int children_length() const { return static_cast<int>(children_.size()); }
+
+ ZonePreparseData* get_child(int index) { return children_[index]; }
+
+ void set_child(int index, ZonePreparseData* child) {
+ DCHECK_NOT_NULL(child);
+ children_[index] = child;
+ }
+
+ ZoneVector<uint8_t>* byte_data() { return &byte_data_; }
+
+ private:
+ ZoneVector<uint8_t> byte_data_;
+ ZoneVector<ZonePreparseData*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZonePreparseData);
+};
+
+// Implementation of ConsumedPreparseData for PreparseData
+// serialized into zone memory.
+class ZoneConsumedPreparseData final
+ : public BaseConsumedPreparseData<ZoneVectorWrapper> {
+ public:
+ ZoneConsumedPreparseData(Zone* zone, ZonePreparseData* data);
+
+ ZoneVectorWrapper GetScopeData() final;
+ ProducedPreparseData* GetChildData(Zone* zone, int child_index) final;
+
+ private:
+ ZonePreparseData* data_;
+ ZoneVectorWrapper scope_data_wrapper_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSE_DATA_IMPL_H_
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
new file mode 100644
index 0000000000..68986e451a
--- /dev/null
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -0,0 +1,716 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/preparse-data.h"
+
+#include <vector>
+
+#include "src/ast/scopes.h"
+#include "src/ast/variables.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data-impl.h"
+#include "src/parsing/preparser.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class ScopeCallsSloppyEvalField : public BitField8<bool, 0, 1> {};
+class InnerScopeCallsEvalField
+ : public BitField8<bool, ScopeCallsSloppyEvalField::kNext, 1> {};
+
+class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
+class VariableContextAllocatedField
+ : public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
+
+class HasDataField : public BitField<bool, 0, 1> {};
+class NumberOfParametersField
+ : public BitField<uint16_t, HasDataField::kNext, 16> {};
+
+class LanguageField : public BitField8<LanguageMode, 0, 1> {};
+class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
+STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
+
+} // namespace
+
+/*
+
+ Internal data format for the backing store of PreparseDataBuilder and
+ PreparseData::scope_data (on the heap):
+
+ (Skippable function data:)
+ ------------------------------------
+ | scope_data_start (debug only) |
+ ------------------------------------
+ | data for inner function n |
+ | ... |
+ ------------------------------------
+ | data for inner function 1 |
+ | ... |
+ ------------------------------------
+ (Scope allocation data:) << scope_data_start points here in debug
+ ------------------------------------
+ magic value (debug only)
+ ------------------------------------
+ scope positions (debug only)
+ ------------------------------------
+ | scope type << only in debug |
+ | eval |
+ | ---------------------- |
+ | | data for variables | |
+ | | ... | |
+ | ---------------------- |
+ ------------------------------------
+ ------------------------------------
+ | data for inner scope m | << but not for function scopes
+ | ... |
+ ------------------------------------
+ ...
+ ------------------------------------
+ | data for inner scope 1 |
+ | ... |
+ ------------------------------------
+
+ PreparseData::child_data is an array of PreparseData objects, one
+ for each skippable inner function.
+
+ ConsumedPreparseData wraps a PreparseData and reads data from it.
+
+ */
+
+PreparseDataBuilder::PreparseDataBuilder(Zone* zone,
+ PreparseDataBuilder* parent_builder)
+ : parent_(parent_builder),
+ byte_data_(),
+ children_(zone),
+ function_scope_(nullptr),
+ num_inner_functions_(0),
+ num_inner_with_data_(0),
+ bailed_out_(false),
+ has_data_(false) {}
+
+void PreparseDataBuilder::DataGatheringScope::Start(
+ DeclarationScope* function_scope) {
+ Zone* main_zone = preparser_->main_zone();
+ builder_ = new (main_zone)
+ PreparseDataBuilder(main_zone, preparser_->preparse_data_builder());
+ preparser_->set_preparse_data_builder(builder_);
+ function_scope->set_preparse_data_builder(builder_);
+}
+
+PreparseDataBuilder::DataGatheringScope::~DataGatheringScope() {
+ if (builder_ == nullptr) return;
+ // Copy over the data from the buffer into the zone-allocated byte_data_
+ PreparseDataBuilder* parent = builder_->parent_;
+ if (parent != nullptr && builder_->HasDataForParent()) {
+ parent->children_.push_back(builder_);
+ }
+ preparser_->set_preparse_data_builder(parent);
+}
+
+#ifdef DEBUG
+void PreparseDataBuilder::ByteData::WriteUint32(uint32_t data) {
+ DCHECK(!is_finalized_);
+ byte_data_->push_back(kUint32Size);
+ byte_data_->push_back(data & 0xFF);
+ byte_data_->push_back((data >> 8) & 0xFF);
+ byte_data_->push_back((data >> 16) & 0xFF);
+ byte_data_->push_back((data >> 24) & 0xFF);
+ free_quarters_in_last_byte_ = 0;
+}
+
+void PreparseDataBuilder::ByteData::SaveCurrentSizeAtFirstUint32() {
+ CHECK(!is_finalized_);
+ uint32_t data = static_cast<uint32_t>(byte_data_->size());
+ uint8_t* start = &byte_data_->front();
+ int i = 0;
+ // Check that that position already holds an item of the expected size.
+ CHECK_GE(byte_data_->size(), kUint32Size);
+ CHECK_EQ(start[i++], kUint32Size);
+ start[i++] = data & 0xFF;
+ start[i++] = (data >> 8) & 0xFF;
+ start[i++] = (data >> 16) & 0xFF;
+ start[i++] = (data >> 24) & 0xFF;
+}
+
+int PreparseDataBuilder::ByteData::length() const {
+ CHECK(!is_finalized_);
+ return static_cast<int>(byte_data_->size());
+}
+#endif
+
+void PreparseDataBuilder::ByteData::WriteVarint32(uint32_t data) {
+#ifdef DEBUG
+ // Save expected item size in debug mode.
+ byte_data_->push_back(kVarintMinSize);
+#endif
+ // See ValueSerializer::WriteVarint.
+ do {
+ uint8_t next_byte = (data & 0x7F);
+ data >>= 7;
+ // Add continue bit.
+ if (data) next_byte |= 0x80;
+ byte_data_->push_back(next_byte & 0xFF);
+ } while (data);
+#ifdef DEBUG
+ // Save a varint marker in debug mode.
+ byte_data_->push_back(kVarintEndMarker);
+#endif
+ free_quarters_in_last_byte_ = 0;
+}
+
+void PreparseDataBuilder::ByteData::WriteUint8(uint8_t data) {
+ DCHECK(!is_finalized_);
+#ifdef DEBUG
+ // Save expected item size in debug mode.
+ byte_data_->push_back(kUint8Size);
+#endif
+ byte_data_->push_back(data);
+ free_quarters_in_last_byte_ = 0;
+}
+
+void PreparseDataBuilder::ByteData::WriteQuarter(uint8_t data) {
+ DCHECK(!is_finalized_);
+ DCHECK_LE(data, 3);
+ if (free_quarters_in_last_byte_ == 0) {
+#ifdef DEBUG
+ // Save a marker in debug mode.
+ byte_data_->push_back(kQuarterMarker);
+#endif
+ byte_data_->push_back(0);
+ free_quarters_in_last_byte_ = 3;
+ } else {
+ --free_quarters_in_last_byte_;
+ }
+
+ uint8_t shift_amount = free_quarters_in_last_byte_ * 2;
+ DCHECK_EQ(byte_data_->back() & (3 << shift_amount), 0);
+ byte_data_->back() |= (data << shift_amount);
+}
+
+void PreparseDataBuilder::ByteData::Start(std::vector<uint8_t>* buffer) {
+ DCHECK(!is_finalized_);
+ byte_data_ = buffer;
+ DCHECK_EQ(byte_data_->size(), 0);
+}
+
+void PreparseDataBuilder::ByteData::Finalize(Zone* zone) {
+ int size = static_cast<int>(byte_data_->size());
+ uint8_t* raw_zone_data =
+ static_cast<uint8_t*>(ZoneAllocationPolicy(zone).New(size));
+ memcpy(raw_zone_data, &byte_data_->front(), size);
+
+ byte_data_->resize(0);
+
+ zone_byte_data_ = Vector<uint8_t>(raw_zone_data, size);
+#ifdef DEBUG
+ is_finalized_ = true;
+#endif
+}
+
+void PreparseDataBuilder::DataGatheringScope::SetSkippableFunction(
+ DeclarationScope* function_scope, int num_inner_functions) {
+ DCHECK_NULL(builder_->function_scope_);
+ builder_->function_scope_ = function_scope;
+ DCHECK_EQ(builder_->num_inner_functions_, 0);
+ builder_->num_inner_functions_ = num_inner_functions;
+ builder_->parent_->has_data_ = true;
+}
+
+bool PreparseDataBuilder::HasInnerFunctions() const {
+ return !children_.is_empty();
+}
+
+bool PreparseDataBuilder::HasData() const { return !bailed_out_ && has_data_; }
+
+bool PreparseDataBuilder::HasDataForParent() const {
+ return HasData() || function_scope_ != nullptr;
+}
+
+bool PreparseDataBuilder::ScopeNeedsData(Scope* scope) {
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ // Default constructors don't need data (they cannot contain inner functions
+ // defined by the user). Other functions do.
+ return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
+ }
+ if (!scope->is_hidden()) {
+ for (Variable* var : *scope->locals()) {
+ if (IsDeclaredVariableMode(var->mode())) return true;
+ }
+ }
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ if (ScopeNeedsData(inner)) return true;
+ }
+ return false;
+}
+
+bool PreparseDataBuilder::SaveDataForSkippableFunction(
+ PreparseDataBuilder* builder) {
+ DeclarationScope* function_scope = builder->function_scope_;
+ // Start position is used for a sanity check when consuming the data, we could
+ // remove it in the future if we're very pressed for space but it's been good
+ // at catching bugs in the wild so far.
+ byte_data_.WriteVarint32(function_scope->start_position());
+ byte_data_.WriteVarint32(function_scope->end_position());
+
+ bool has_data = builder->HasData();
+ uint32_t has_data_and_num_parameters =
+ HasDataField::encode(has_data) |
+ NumberOfParametersField::encode(function_scope->num_parameters());
+ byte_data_.WriteVarint32(has_data_and_num_parameters);
+ byte_data_.WriteVarint32(builder->num_inner_functions_);
+
+ uint8_t language_and_super =
+ LanguageField::encode(function_scope->language_mode()) |
+ UsesSuperField::encode(function_scope->NeedsHomeObject());
+ byte_data_.WriteQuarter(language_and_super);
+ return has_data;
+}
+
+void PreparseDataBuilder::SaveScopeAllocationData(DeclarationScope* scope,
+ Parser* parser) {
+ if (!has_data_) return;
+ DCHECK(HasInnerFunctions());
+
+ byte_data_.Start(parser->preparse_data_buffer());
+
+#ifdef DEBUG
+ // Reserve Uint32 for scope_data_start debug info.
+ byte_data_.WriteUint32(0);
+#endif
+
+ for (const auto& builder : children_) {
+ // Keep track of functions with inner data. {children_} contains also the
+ // builders that have no inner functions at all.
+ if (SaveDataForSkippableFunction(builder)) num_inner_with_data_++;
+ }
+
+ // Don't save imcoplete scope information when bailed out.
+ if (!bailed_out_) {
+#ifdef DEBUG
+ // function data items, kSkippableMinFunctionDataSize each.
+ CHECK_GE(byte_data_.length(), kPlaceholderSize);
+ CHECK_LE(byte_data_.length(), std::numeric_limits<uint32_t>::max());
+
+ byte_data_.SaveCurrentSizeAtFirstUint32();
+ // For a data integrity check, write a value between data about skipped inner
+ // funcs and data about variables.
+ byte_data_.WriteUint32(kMagicValue);
+ byte_data_.WriteUint32(scope->start_position());
+ byte_data_.WriteUint32(scope->end_position());
+#endif
+
+ if (ScopeNeedsData(scope)) SaveDataForScope(scope);
+ }
+ byte_data_.Finalize(parser->factory()->zone());
+}
+
+void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
+ DCHECK_NE(scope->end_position(), kNoSourcePosition);
+ DCHECK(ScopeNeedsData(scope));
+
+#ifdef DEBUG
+ byte_data_.WriteUint8(scope->scope_type());
+#endif
+
+ uint8_t eval =
+ ScopeCallsSloppyEvalField::encode(
+ scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->calls_sloppy_eval()) |
+ InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
+ byte_data_.WriteUint8(eval);
+
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ Variable* function = scope->AsDeclarationScope()->function_var();
+ if (function != nullptr) SaveDataForVariable(function);
+ }
+
+ for (Variable* var : *scope->locals()) {
+ if (IsDeclaredVariableMode(var->mode())) SaveDataForVariable(var);
+ }
+
+ SaveDataForInnerScopes(scope);
+}
+
+void PreparseDataBuilder::SaveDataForVariable(Variable* var) {
+#ifdef DEBUG
+ // Store the variable name in debug mode; this way we can check that we
+ // restore data to the correct variable.
+ const AstRawString* name = var->raw_name();
+ byte_data_.WriteUint8(name->is_one_byte());
+ byte_data_.WriteUint32(name->length());
+ for (int i = 0; i < name->length(); ++i) {
+ byte_data_.WriteUint8(name->raw_data()[i]);
+ }
+#endif
+
+ byte variable_data = VariableMaybeAssignedField::encode(
+ var->maybe_assigned() == kMaybeAssigned) |
+ VariableContextAllocatedField::encode(
+ var->has_forced_context_allocation());
+ byte_data_.WriteQuarter(variable_data);
+}
+
+void PreparseDataBuilder::SaveDataForInnerScopes(Scope* scope) {
+ // Inner scopes are stored in the reverse order, but we'd like to write the
+ // data in the logical order. There might be many inner scopes, so we don't
+ // want to recurse here.
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ if (ScopeIsSkippableFunctionScope(inner)) {
+ // Don't save data about function scopes, since they'll have their own
+ // PreparseDataBuilder where their data is saved.
+ DCHECK_NOT_NULL(inner->AsDeclarationScope()->preparse_data_builder());
+ continue;
+ }
+ if (!ScopeNeedsData(inner)) continue;
+ SaveDataForScope(inner);
+ }
+}
+
+bool PreparseDataBuilder::ScopeIsSkippableFunctionScope(Scope* scope) {
+ // Lazy non-arrow function scopes are skippable. Lazy functions are exactly
+ // those Scopes which have their own PreparseDataBuilder object. This
+ // logic ensures that the scope allocation data is consistent with the
+ // skippable function data (both agree on where the lazy function boundaries
+ // are).
+ if (scope->scope_type() != ScopeType::FUNCTION_SCOPE) return false;
+ DeclarationScope* declaration_scope = scope->AsDeclarationScope();
+ return !declaration_scope->is_arrow_scope() &&
+ declaration_scope->preparse_data_builder() != nullptr;
+}
+
+Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToHeap(
+ Isolate* isolate, int children_length) {
+ DCHECK(is_finalized_);
+ int data_length = zone_byte_data_.length();
+ Handle<PreparseData> data =
+ isolate->factory()->NewPreparseData(data_length, children_length);
+ data->copy_in(0, zone_byte_data_.start(), data_length);
+ return data;
+}
+
+ZonePreparseData* PreparseDataBuilder::ByteData::CopyToZone(
+ Zone* zone, int children_length) {
+ DCHECK(is_finalized_);
+ return new (zone) ZonePreparseData(zone, &zone_byte_data_, children_length);
+}
+
+Handle<PreparseData> PreparseDataBuilder::Serialize(Isolate* isolate) {
+ DCHECK(HasData());
+ DCHECK(!ThisOrParentBailedOut());
+ Handle<PreparseData> data =
+ byte_data_.CopyToHeap(isolate, num_inner_with_data_);
+ int i = 0;
+ for (const auto& builder : children_) {
+ if (!builder->HasData()) continue;
+ Handle<PreparseData> child_data = builder->Serialize(isolate);
+ data->set_child(i++, *child_data);
+ }
+ DCHECK_EQ(i, data->children_length());
+ return data;
+}
+
+ZonePreparseData* PreparseDataBuilder::Serialize(Zone* zone) {
+ DCHECK(HasData());
+ DCHECK(!ThisOrParentBailedOut());
+ ZonePreparseData* data = byte_data_.CopyToZone(zone, num_inner_with_data_);
+ int i = 0;
+ for (const auto& builder : children_) {
+ if (!builder->HasData()) continue;
+ ZonePreparseData* child = builder->Serialize(zone);
+ data->set_child(i++, child);
+ }
+ DCHECK_EQ(i, data->children_length());
+ return data;
+}
+
+class BuilderProducedPreparseData final : public ProducedPreparseData {
+ public:
+ explicit BuilderProducedPreparseData(PreparseDataBuilder* builder)
+ : builder_(builder) {
+ DCHECK(builder->HasData());
+ }
+
+ Handle<PreparseData> Serialize(Isolate* isolate) final {
+ return builder_->Serialize(isolate);
+ }
+
+ ZonePreparseData* Serialize(Zone* zone) final {
+ return builder_->Serialize(zone);
+ };
+
+ private:
+ PreparseDataBuilder* builder_;
+};
+
+class OnHeapProducedPreparseData final : public ProducedPreparseData {
+ public:
+ explicit OnHeapProducedPreparseData(Handle<PreparseData> data)
+ : data_(data) {}
+
+ Handle<PreparseData> Serialize(Isolate* isolate) final {
+ DCHECK(!data_->is_null());
+ return data_;
+ }
+
+ ZonePreparseData* Serialize(Zone* zone) final {
+ // Not required.
+ UNREACHABLE();
+ };
+
+ private:
+ Handle<PreparseData> data_;
+};
+
+class ZoneProducedPreparseData final : public ProducedPreparseData {
+ public:
+ explicit ZoneProducedPreparseData(ZonePreparseData* data) : data_(data) {}
+
+ Handle<PreparseData> Serialize(Isolate* isolate) final {
+ return data_->Serialize(isolate);
+ }
+
+ ZonePreparseData* Serialize(Zone* zone) final { return data_; };
+
+ private:
+ ZonePreparseData* data_;
+};
+
+ProducedPreparseData* ProducedPreparseData::For(PreparseDataBuilder* builder,
+ Zone* zone) {
+ return new (zone) BuilderProducedPreparseData(builder);
+}
+
+ProducedPreparseData* ProducedPreparseData::For(Handle<PreparseData> data,
+ Zone* zone) {
+ return new (zone) OnHeapProducedPreparseData(data);
+}
+
+ProducedPreparseData* ProducedPreparseData::For(ZonePreparseData* data,
+ Zone* zone) {
+ return new (zone) ZoneProducedPreparseData(data);
+}
+
+template <class Data>
+ProducedPreparseData*
+BaseConsumedPreparseData<Data>::GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode) {
+ // The skippable function *must* be the next function in the data. Use the
+ // start position as a sanity check.
+ typename ByteData::ReadingScope reading_scope(this);
+ CHECK(scope_data_->HasRemainingBytes(
+ PreparseByteDataConstants::kSkippableFunctionMinDataSize));
+ int start_position_from_data = scope_data_->ReadVarint32();
+ CHECK_EQ(start_position, start_position_from_data);
+ *end_position = scope_data_->ReadVarint32();
+ DCHECK_GT(*end_position, start_position);
+
+ uint32_t has_data_and_num_parameters = scope_data_->ReadVarint32();
+ bool has_data = HasDataField::decode(has_data_and_num_parameters);
+ *num_parameters =
+ NumberOfParametersField::decode(has_data_and_num_parameters);
+ *num_inner_functions = scope_data_->ReadVarint32();
+
+ uint8_t language_and_super = scope_data_->ReadQuarter();
+ *language_mode = LanguageMode(LanguageField::decode(language_and_super));
+ *uses_super_property = UsesSuperField::decode(language_and_super);
+
+ if (!has_data) return nullptr;
+
+ // Retrieve the corresponding PreparseData and associate it to the
+ // skipped function. If the skipped functions contains inner functions, those
+ // can be skipped when the skipped function is eagerly parsed.
+ return GetChildData(zone, child_index_++);
+}
+
+template <class Data>
+void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
+ DeclarationScope* scope) {
+ DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
+ typename ByteData::ReadingScope reading_scope(this);
+
+#ifdef DEBUG
+ int magic_value_from_data = scope_data_->ReadUint32();
+ // Check that we've consumed all inner function data.
+ DCHECK_EQ(magic_value_from_data, ByteData::kMagicValue);
+
+ int start_position_from_data = scope_data_->ReadUint32();
+ int end_position_from_data = scope_data_->ReadUint32();
+ DCHECK_EQ(start_position_from_data, scope->start_position());
+ DCHECK_EQ(end_position_from_data, scope->end_position());
+#endif
+
+ RestoreDataForScope(scope);
+
+ // Check that we consumed all scope data.
+ DCHECK_EQ(scope_data_->RemainingBytes(), 0);
+}
+
+template <typename Data>
+void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) {
+ if (scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->is_skipped_function()) {
+ return;
+ }
+
+ // It's possible that scope is not present in the data at all (since PreParser
+ // doesn't create the corresponding scope). In this case, the Scope won't
+ // contain any variables for which we need the data.
+ if (!PreparseDataBuilder::ScopeNeedsData(scope)) return;
+
+ // scope_type is stored only in debug mode.
+ DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type());
+
+ CHECK(scope_data_->HasRemainingBytes(ByteData::kUint8Size));
+ uint32_t eval = scope_data_->ReadUint8();
+ if (ScopeCallsSloppyEvalField::decode(eval)) scope->RecordEvalCall();
+ if (InnerScopeCallsEvalField::decode(eval)) scope->RecordInnerScopeEvalCall();
+
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ Variable* function = scope->AsDeclarationScope()->function_var();
+ if (function != nullptr) RestoreDataForVariable(function);
+ }
+
+ for (Variable* var : *scope->locals()) {
+ if (IsDeclaredVariableMode(var->mode())) RestoreDataForVariable(var);
+ }
+
+ RestoreDataForInnerScopes(scope);
+}
+
+template <typename Data>
+void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) {
+#ifdef DEBUG
+ const AstRawString* name = var->raw_name();
+ bool data_one_byte = scope_data_->ReadUint8();
+ DCHECK_IMPLIES(name->is_one_byte(), data_one_byte);
+ DCHECK_EQ(scope_data_->ReadUint32(), static_cast<uint32_t>(name->length()));
+ if (!name->is_one_byte() && data_one_byte) {
+ // It's possible that "name" is a two-byte representation of the string
+ // stored in the data.
+ for (int i = 0; i < 2 * name->length(); i += 2) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ DCHECK_EQ(0, name->raw_data()[i + 1]);
+#else
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i + 1]);
+ DCHECK_EQ(0, name->raw_data()[i]);
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
+ } else {
+ for (int i = 0; i < name->length(); ++i) {
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ }
+ }
+#endif
+ uint8_t variable_data = scope_data_->ReadQuarter();
+ if (VariableMaybeAssignedField::decode(variable_data)) {
+ var->set_maybe_assigned();
+ }
+ if (VariableContextAllocatedField::decode(variable_data)) {
+ var->set_is_used();
+ var->ForceContextAllocation();
+ }
+}
+
+template <typename Data>
+void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes(Scope* scope) {
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ RestoreDataForScope(inner);
+ }
+}
+
+#ifdef DEBUG
+template <class Data>
+bool BaseConsumedPreparseData<Data>::VerifyDataStart() {
+ typename ByteData::ReadingScope reading_scope(this);
+ // The first uint32 contains the size of the skippable function data.
+ int scope_data_start = scope_data_->ReadUint32();
+ scope_data_->SetPosition(scope_data_start);
+ CHECK_EQ(scope_data_->ReadUint32(), ByteData::kMagicValue);
+ // The first data item is scope_data_start. Skip over it.
+ scope_data_->SetPosition(ByteData::kPlaceholderSize);
+ return true;
+}
+#endif
+
+PreparseData OnHeapConsumedPreparseData::GetScopeData() { return *data_; }
+
+ProducedPreparseData* OnHeapConsumedPreparseData::GetChildData(Zone* zone,
+ int index) {
+ DisallowHeapAllocation no_gc;
+ Handle<PreparseData> child_data_handle(data_->get_child(index), isolate_);
+ return ProducedPreparseData::For(child_data_handle, zone);
+}
+
+OnHeapConsumedPreparseData::OnHeapConsumedPreparseData(
+ Isolate* isolate, Handle<PreparseData> data)
+ : BaseConsumedPreparseData<PreparseData>(), isolate_(isolate), data_(data) {
+ DCHECK_NOT_NULL(isolate);
+ DCHECK(data->IsPreparseData());
+ DCHECK(VerifyDataStart());
+}
+
+ZonePreparseData::ZonePreparseData(Zone* zone, Vector<uint8_t>* byte_data,
+ int children_length)
+ : byte_data_(byte_data->begin(), byte_data->end(), zone),
+ children_(children_length, zone) {}
+
+Handle<PreparseData> ZonePreparseData::Serialize(Isolate* isolate) {
+ int data_size = static_cast<int>(byte_data()->size());
+ int child_data_length = children_length();
+ Handle<PreparseData> result =
+ isolate->factory()->NewPreparseData(data_size, child_data_length);
+ result->copy_in(0, byte_data()->data(), data_size);
+
+ for (int i = 0; i < child_data_length; i++) {
+ ZonePreparseData* child = get_child(i);
+ DCHECK_NOT_NULL(child);
+ Handle<PreparseData> child_data = child->Serialize(isolate);
+ result->set_child(i, *child_data);
+ }
+ return result;
+}
+
+ZoneConsumedPreparseData::ZoneConsumedPreparseData(Zone* zone,
+ ZonePreparseData* data)
+ : data_(data), scope_data_wrapper_(data_->byte_data()) {
+ DCHECK(VerifyDataStart());
+}
+
+ZoneVectorWrapper ZoneConsumedPreparseData::GetScopeData() {
+ return scope_data_wrapper_;
+}
+
+ProducedPreparseData* ZoneConsumedPreparseData::GetChildData(Zone* zone,
+ int child_index) {
+ CHECK_GT(data_->children_length(), child_index);
+ ZonePreparseData* child_data = data_->get_child(child_index);
+ if (child_data == nullptr) return nullptr;
+ return ProducedPreparseData::For(child_data, zone);
+}
+
+std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
+ Isolate* isolate, Handle<PreparseData> data) {
+ DCHECK(!data.is_null());
+ return base::make_unique<OnHeapConsumedPreparseData>(isolate, data);
+}
+
+std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
+ Zone* zone, ZonePreparseData* data) {
+ if (data == nullptr) return {};
+ return base::make_unique<ZoneConsumedPreparseData>(zone, data);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
new file mode 100644
index 0000000000..0e08297c36
--- /dev/null
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -0,0 +1,275 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSE_DATA_H_
+#define V8_PARSING_PREPARSE_DATA_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/maybe-handles.h"
+#include "src/zone/zone-chunk-list.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class PodArray;
+
+class Parser;
+class PreParser;
+class PreparseData;
+class ZonePreparseData;
+
+/*
+
+ Skipping inner functions.
+
+ Consider the following code:
+ (function eager_outer() {
+ function lazy_inner() {
+ let a;
+ function skip_me() { a; }
+ }
+
+ return lazy_inner;
+ })();
+
+ ... lazy_inner(); ...
+
+ When parsing the code the first time, eager_outer is parsed and lazy_inner
+ (and everything inside it) is preparsed. When lazy_inner is called, we don't
+ want to parse or preparse skip_me again. Instead, we want to skip over it,
+ since it has already been preparsed once.
+
+ In order to be able to do this, we need to store the information needed for
+ allocating the variables in lazy_inner when we preparse it, and then later do
+ scope allocation based on that data.
+
+ We need the following data for each scope in lazy_inner's scope tree:
+ For each Variable:
+ - is_used
+ - maybe_assigned
+ - has_forced_context_allocation
+
+ For each Scope:
+ - inner_scope_calls_eval_.
+
+ ProducedPreparseData implements storing the above mentioned data and
+ ConsumedPreparseData implements restoring it (= setting the context
+ allocation status of the variables in a Scope (and its subscopes) based on the
+ data).
+
+ */
+
+struct PreparseByteDataConstants {
+#ifdef DEBUG
+ static constexpr int kMagicValue = 0xC0DE0DE;
+
+ static constexpr size_t kUint32Size = 5;
+ static constexpr size_t kVarintMinSize = 3;
+ static constexpr size_t kVarintEndMarker = 0xF1;
+ static constexpr size_t kUint8Size = 2;
+ static constexpr size_t kQuarterMarker = 0xF2;
+ static constexpr size_t kPlaceholderSize = kUint32Size;
+#else
+ static constexpr size_t kUint32Size = 4;
+ static constexpr size_t kVarintMinSize = 1;
+ static constexpr size_t kUint8Size = 1;
+ static constexpr size_t kPlaceholderSize = 0;
+#endif
+
+ static const size_t kSkippableFunctionMinDataSize =
+ 4 * kVarintMinSize + 1 * kUint8Size;
+};
+
+class PreparseDataBuilder : public ZoneObject,
+ public PreparseByteDataConstants {
+ public:
+ // Create a PreparseDataBuilder object which will collect data as we
+ // parse.
+ explicit PreparseDataBuilder(Zone* zone, PreparseDataBuilder* parent_builder);
+
+ PreparseDataBuilder* parent() const { return parent_; }
+
+ // For gathering the inner function data and splitting it up according to the
+ // laziness boundaries. Each lazy function gets its own
+ // ProducedPreparseData, and so do all lazy functions inside it.
+ class DataGatheringScope {
+ public:
+ explicit DataGatheringScope(PreParser* preparser)
+ : preparser_(preparser), builder_(nullptr) {}
+
+ void Start(DeclarationScope* function_scope);
+ void SetSkippableFunction(DeclarationScope* function_scope,
+ int num_inner_functions);
+ ~DataGatheringScope();
+
+ private:
+ PreParser* preparser_;
+ PreparseDataBuilder* builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataGatheringScope);
+ };
+
+ class ByteData : public ZoneObject, public PreparseByteDataConstants {
+ public:
+ ByteData() : byte_data_(nullptr), free_quarters_in_last_byte_(0) {}
+
+ ~ByteData() {}
+
+ void Start(std::vector<uint8_t>* buffer);
+ void Finalize(Zone* zone);
+
+ Handle<PreparseData> CopyToHeap(Isolate* isolate, int children_length);
+ ZonePreparseData* CopyToZone(Zone* zone, int children_length);
+
+ void WriteVarint32(uint32_t data);
+ void WriteUint8(uint8_t data);
+ void WriteQuarter(uint8_t data);
+
+#ifdef DEBUG
+ void WriteUint32(uint32_t data);
+ // For overwriting previously written data at position 0.
+ void SaveCurrentSizeAtFirstUint32();
+ int length() const;
+#endif
+
+ private:
+ union {
+ // Only used during construction (is_finalized_ == false).
+ std::vector<uint8_t>* byte_data_;
+ // Once the data is finalized, it lives in a Zone, this implies
+ // is_finalized_ == true.
+ Vector<uint8_t> zone_byte_data_;
+ };
+ uint8_t free_quarters_in_last_byte_;
+
+#ifdef DEBUG
+ bool is_finalized_ = false;
+#endif
+ };
+
+ // Saves the information needed for allocating the Scope's (and its
+ // subscopes') variables.
+ void SaveScopeAllocationData(DeclarationScope* scope, Parser* parser);
+
+ // In some cases, PreParser cannot produce the same Scope structure as
+ // Parser. If it happens, we're unable to produce the data that would enable
+ // skipping the inner functions of that function.
+ void Bailout() {
+ bailed_out_ = true;
+ // We don't need to call Bailout on existing / future children: the only way
+ // to try to retrieve their data is through calling Serialize on the parent,
+ // and if the parent is bailed out, it won't call Serialize on its children.
+ }
+
+ bool bailed_out() const { return bailed_out_; }
+
+#ifdef DEBUG
+ bool ThisOrParentBailedOut() const {
+ if (bailed_out_) return true;
+ if (parent_ == nullptr) return false;
+ return parent_->ThisOrParentBailedOut();
+ }
+#endif // DEBUG
+
+ bool HasInnerFunctions() const;
+ bool HasData() const;
+ bool HasDataForParent() const;
+
+ static bool ScopeNeedsData(Scope* scope);
+ static bool ScopeIsSkippableFunctionScope(Scope* scope);
+ void AddSkippableFunction(int start_position, int end_position,
+ int num_parameters, int num_inner_functions,
+ LanguageMode language_mode, bool has_data,
+ bool uses_super_property);
+
+ private:
+ friend class BuilderProducedPreparseData;
+
+ Handle<PreparseData> Serialize(Isolate* isolate);
+ ZonePreparseData* Serialize(Zone* zone);
+
+ void SaveDataForScope(Scope* scope);
+ void SaveDataForVariable(Variable* var);
+ void SaveDataForInnerScopes(Scope* scope);
+ bool SaveDataForSkippableFunction(PreparseDataBuilder* builder);
+
+ void CopyByteData(Zone* zone);
+
+ PreparseDataBuilder* parent_;
+ ByteData byte_data_;
+ ZoneChunkList<PreparseDataBuilder*> children_;
+
+ DeclarationScope* function_scope_;
+ int num_inner_functions_;
+ int num_inner_with_data_;
+
+ // Whether we've given up producing the data for this function.
+ bool bailed_out_ : 1;
+ bool has_data_ : 1;
+
+ DISALLOW_COPY_AND_ASSIGN(PreparseDataBuilder);
+};
+
+class ProducedPreparseData : public ZoneObject {
+ public:
+ // If there is data (if the Scope contains skippable inner functions), move
+ // the data into the heap and return a Handle to it; otherwise return a null
+ // MaybeHandle.
+ virtual Handle<PreparseData> Serialize(Isolate* isolate) = 0;
+
+ // If there is data (if the Scope contains skippable inner functions), return
+ // an off-heap ZonePreparseData representing the data; otherwise
+ // return nullptr.
+ virtual ZonePreparseData* Serialize(Zone* zone) = 0;
+
+ // Create a ProducedPreparseData which is a proxy for a previous
+ // produced PreparseData in zone.
+ static ProducedPreparseData* For(PreparseDataBuilder* builder, Zone* zone);
+
+ // Create a ProducedPreparseData which is a proxy for a previous
+ // produced PreparseData on the heap.
+ static ProducedPreparseData* For(Handle<PreparseData> data, Zone* zone);
+
+ // Create a ProducedPreparseData which is a proxy for a previous
+ // produced PreparseData in zone.
+ static ProducedPreparseData* For(ZonePreparseData* data, Zone* zone);
+};
+
+class ConsumedPreparseData {
+ public:
+ // Creates a ConsumedPreparseData representing the data of an on-heap
+ // PreparseData |data|.
+ static std::unique_ptr<ConsumedPreparseData> For(Isolate* isolate,
+ Handle<PreparseData> data);
+
+ // Creates a ConsumedPreparseData representing the data of an off-heap
+ // ZonePreparseData |data|.
+ static std::unique_ptr<ConsumedPreparseData> For(Zone* zone,
+ ZonePreparseData* data);
+
+ virtual ~ConsumedPreparseData() = default;
+
+ virtual ProducedPreparseData* GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode) = 0;
+
+ // Restores the information needed for allocating the Scope's (and its
+ // subscopes') variables.
+ virtual void RestoreScopeAllocationData(DeclarationScope* scope) = 0;
+
+ protected:
+ ConsumedPreparseData() = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConsumedPreparseData);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSE_DATA_H_
diff --git a/deps/v8/src/parsing/preparsed-scope-data-impl.h b/deps/v8/src/parsing/preparsed-scope-data-impl.h
deleted file mode 100644
index e2d31c07d5..0000000000
--- a/deps/v8/src/parsing/preparsed-scope-data-impl.h
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
-#define V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
-
-#include "src/parsing/preparsed-scope-data.h"
-
-#include "src/assert-scope.h"
-
-namespace v8 {
-namespace internal {
-
-// Classes which are internal to prepared-scope-data.cc, but are exposed in
-// a header for tests.
-
-struct PreParsedScopeByteDataConstants {
-#ifdef DEBUG
- static constexpr int kMagicValue = 0xC0DE0DE;
-
- static constexpr size_t kUint32Size = 5;
- static constexpr size_t kUint8Size = 2;
- static constexpr size_t kQuarterMarker = 0;
- static constexpr size_t kPlaceholderSize = kUint32Size;
-#else
- static constexpr size_t kUint32Size = 4;
- static constexpr size_t kUint8Size = 1;
- static constexpr size_t kPlaceholderSize = 0;
-#endif
-
- static const size_t kSkippableFunctionDataSize =
- 4 * kUint32Size + 1 * kUint8Size;
-};
-
-class PreParsedScopeDataBuilder::ByteData
- : public ZoneObject,
- public PreParsedScopeByteDataConstants {
- public:
- explicit ByteData(Zone* zone)
- : backing_store_(zone), free_quarters_in_last_byte_(0) {}
-
- void WriteUint32(uint32_t data);
- void WriteUint8(uint8_t data);
- void WriteQuarter(uint8_t data);
-
-#ifdef DEBUG
- // For overwriting previously written data at position 0.
- void OverwriteFirstUint32(uint32_t data);
-#endif
-
- Handle<PodArray<uint8_t>> Serialize(Isolate* isolate);
-
- size_t size() const { return backing_store_.size(); }
-
- ZoneChunkList<uint8_t>::iterator begin() { return backing_store_.begin(); }
-
- ZoneChunkList<uint8_t>::iterator end() { return backing_store_.end(); }
-
- private:
- ZoneChunkList<uint8_t> backing_store_;
- uint8_t free_quarters_in_last_byte_;
-};
-
-template <class Data>
-class BaseConsumedPreParsedScopeData : public ConsumedPreParsedScopeData {
- public:
- class ByteData : public PreParsedScopeByteDataConstants {
- public:
- ByteData()
- : data_(nullptr), index_(0), stored_quarters_(0), stored_byte_(0) {}
-
- // Reading from the ByteData is only allowed when a ReadingScope is on the
- // stack. This ensures that we have a DisallowHeapAllocation in place
- // whenever ByteData holds a raw pointer into the heap.
- class ReadingScope {
- public:
- ReadingScope(ByteData* consumed_data, Data* data)
- : consumed_data_(consumed_data) {
- consumed_data->data_ = data;
- }
- explicit ReadingScope(BaseConsumedPreParsedScopeData<Data>* parent)
- : ReadingScope(parent->scope_data_.get(), parent->GetScopeData()) {}
- ~ReadingScope() { consumed_data_->data_ = nullptr; }
-
- private:
- ByteData* consumed_data_;
- DisallowHeapAllocation no_gc;
- };
-
- void SetPosition(int position) { index_ = position; }
-
- size_t RemainingBytes() const {
- DCHECK_NOT_NULL(data_);
- return data_->length() - index_;
- }
-
- int32_t ReadUint32() {
- DCHECK_NOT_NULL(data_);
- DCHECK_GE(RemainingBytes(), kUint32Size);
-#ifdef DEBUG
- // Check that there indeed is an integer following.
- DCHECK_EQ(data_->get(index_++), kUint32Size);
-#endif
- int32_t result = 0;
- byte* p = reinterpret_cast<byte*>(&result);
- for (int i = 0; i < 4; ++i) {
- *p++ = data_->get(index_++);
- }
- stored_quarters_ = 0;
- return result;
- }
-
- uint8_t ReadUint8() {
- DCHECK_NOT_NULL(data_);
- DCHECK_GE(RemainingBytes(), kUint8Size);
-#ifdef DEBUG
- // Check that there indeed is a byte following.
- DCHECK_EQ(data_->get(index_++), kUint8Size);
-#endif
- stored_quarters_ = 0;
- return data_->get(index_++);
- }
-
- uint8_t ReadQuarter() {
- DCHECK_NOT_NULL(data_);
- if (stored_quarters_ == 0) {
- DCHECK_GE(RemainingBytes(), kUint8Size);
-#ifdef DEBUG
- // Check that there indeed are quarters following.
- DCHECK_EQ(data_->get(index_++), kQuarterMarker);
-#endif
- stored_byte_ = data_->get(index_++);
- stored_quarters_ = 4;
- }
- // Read the first 2 bits from stored_byte_.
- uint8_t result = (stored_byte_ >> 6) & 3;
- DCHECK_LE(result, 3);
- --stored_quarters_;
- stored_byte_ <<= 2;
- return result;
- }
-
- private:
- Data* data_;
- int index_;
- uint8_t stored_quarters_;
- uint8_t stored_byte_;
- };
-
- BaseConsumedPreParsedScopeData()
- : scope_data_(new ByteData()), child_index_(0) {}
-
- virtual Data* GetScopeData() = 0;
-
- virtual ProducedPreParsedScopeData* GetChildData(Zone* zone,
- int child_index) = 0;
-
- ProducedPreParsedScopeData* GetDataForSkippableFunction(
- Zone* zone, int start_position, int* end_position, int* num_parameters,
- int* num_inner_functions, bool* uses_super_property,
- LanguageMode* language_mode) final;
-
- void RestoreScopeAllocationData(DeclarationScope* scope) final;
-
-#ifdef DEBUG
- void VerifyDataStart();
-#endif
-
- private:
- void RestoreData(Scope* scope);
- void RestoreDataForVariable(Variable* var);
- void RestoreDataForInnerScopes(Scope* scope);
-
- std::unique_ptr<ByteData> scope_data_;
- // When consuming the data, these indexes point to the data we're going to
- // consume next.
- int child_index_;
-
- DISALLOW_COPY_AND_ASSIGN(BaseConsumedPreParsedScopeData);
-};
-
-// Implementation of ConsumedPreParsedScopeData for on-heap data.
-class OnHeapConsumedPreParsedScopeData final
- : public BaseConsumedPreParsedScopeData<PodArray<uint8_t>> {
- public:
- OnHeapConsumedPreParsedScopeData(Isolate* isolate,
- Handle<PreParsedScopeData> data);
-
- PodArray<uint8_t>* GetScopeData() final;
- ProducedPreParsedScopeData* GetChildData(Zone* zone, int child_index) final;
-
- private:
- Isolate* isolate_;
- Handle<PreParsedScopeData> data_;
-};
-
-// Wraps a ZoneVector<uint8_t> to have with functions named the same as
-// PodArray<uint8_t>.
-class ZoneVectorWrapper {
- public:
- explicit ZoneVectorWrapper(ZoneVector<uint8_t>* data) : data_(data) {}
-
- int length() const { return static_cast<int>(data_->size()); }
-
- uint8_t get(int index) const { return data_->at(index); }
-
- private:
- ZoneVector<uint8_t>* data_;
-
- DISALLOW_COPY_AND_ASSIGN(ZoneVectorWrapper);
-};
-
-// A serialized PreParsedScopeData in zone memory (as apposed to being on-heap).
-class ZonePreParsedScopeData : public ZoneObject {
- public:
- ZonePreParsedScopeData(Zone* zone,
- ZoneChunkList<uint8_t>::iterator byte_data_begin,
- ZoneChunkList<uint8_t>::iterator byte_data_end,
- int child_length);
-
- Handle<PreParsedScopeData> Serialize(Isolate* isolate);
-
- int child_length() const { return static_cast<int>(children_.size()); }
-
- ZonePreParsedScopeData* get_child(int index) { return children_[index]; }
-
- void set_child(int index, ZonePreParsedScopeData* child) {
- children_[index] = child;
- }
-
- ZoneVector<uint8_t>* byte_data() { return &byte_data_; }
-
- private:
- ZoneVector<uint8_t> byte_data_;
- ZoneVector<ZonePreParsedScopeData*> children_;
-
- DISALLOW_COPY_AND_ASSIGN(ZonePreParsedScopeData);
-};
-
-// Implementation of ConsumedPreParsedScopeData for PreParsedScopeData
-// serialized into zone memory.
-class ZoneConsumedPreParsedScopeData final
- : public BaseConsumedPreParsedScopeData<ZoneVectorWrapper> {
- public:
- ZoneConsumedPreParsedScopeData(Zone* zone, ZonePreParsedScopeData* data);
-
- ZoneVectorWrapper* GetScopeData() final;
- ProducedPreParsedScopeData* GetChildData(Zone* zone, int child_index) final;
-
- private:
- ZonePreParsedScopeData* data_;
- ZoneVectorWrapper scope_data_wrapper_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_PREPARSED_SCOPE_DATA_IMPL_H_
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
deleted file mode 100644
index 9d61740753..0000000000
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ /dev/null
@@ -1,737 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/parsing/preparsed-scope-data.h"
-
-#include <vector>
-
-#include "src/ast/scopes.h"
-#include "src/ast/variables.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/objects/shared-function-info.h"
-#include "src/parsing/preparsed-scope-data-impl.h"
-#include "src/parsing/preparser.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class ScopeCallsSloppyEvalField : public BitField<bool, 0, 1> {};
-class InnerScopeCallsEvalField
- : public BitField<bool, ScopeCallsSloppyEvalField::kNext, 1> {};
-
-class VariableMaybeAssignedField : public BitField8<bool, 0, 1> {};
-class VariableContextAllocatedField
- : public BitField8<bool, VariableMaybeAssignedField::kNext, 1> {};
-
-class LanguageField : public BitField8<LanguageMode, 0, 1> {};
-class UsesSuperField : public BitField8<bool, LanguageField::kNext, 1> {};
-STATIC_ASSERT(LanguageModeSize <= LanguageField::kNumValues);
-
-} // namespace
-
-/*
-
- Internal data format for the backing store of PreParsedScopeDataBuilder and
- PreParsedScopeData::scope_data (on the heap):
-
- (Skippable function data:)
- ------------------------------------
- | scope_data_start (debug only) |
- ------------------------------------
- | data for inner function 1 |
- | ... |
- ------------------------------------
- | data for inner function n |
- | ... |
- ------------------------------------
- (Scope allocation data:) << scope_data_start points here in debug
- ------------------------------------
- magic value (debug only)
- ------------------------------------
- scope positions (debug only)
- ------------------------------------
- | scope type << only in debug |
- | eval |
- | ---------------------- |
- | | data for variables | |
- | | ... | |
- | ---------------------- |
- ------------------------------------
- ------------------------------------
- | data for inner scope 1 | << but not for function scopes
- | ... |
- ------------------------------------
- ...
- ------------------------------------
- | data for inner scope m |
- | ... |
- ------------------------------------
-
- PreParsedScopeData::child_data is an array of PreParsedScopeData objects, one
- for each skippable inner function.
-
- ConsumedPreParsedScopeData wraps a PreParsedScopeData and reads data from it.
-
- */
-
-void PreParsedScopeDataBuilder::ByteData::WriteUint32(uint32_t data) {
-#ifdef DEBUG
- // Save expected item size in debug mode.
- backing_store_.push_back(kUint32Size);
-#endif
- const uint8_t* d = reinterpret_cast<uint8_t*>(&data);
- for (int i = 0; i < 4; ++i) {
- backing_store_.push_back(*d++);
- }
- free_quarters_in_last_byte_ = 0;
-}
-
-#ifdef DEBUG
-void PreParsedScopeDataBuilder::ByteData::OverwriteFirstUint32(uint32_t data) {
- auto it = backing_store_.begin();
- // Check that that position already holds an item of the expected size.
- DCHECK_GE(backing_store_.size(), kUint32Size);
- DCHECK_EQ(*it, kUint32Size);
- ++it;
- const uint8_t* d = reinterpret_cast<uint8_t*>(&data);
- for (size_t i = 0; i < 4; ++i) {
- *it++ = *d++;
- }
-}
-#endif
-
-void PreParsedScopeDataBuilder::ByteData::WriteUint8(uint8_t data) {
-#ifdef DEBUG
- // Save expected item size in debug mode.
- backing_store_.push_back(kUint8Size);
-#endif
- backing_store_.push_back(data);
- free_quarters_in_last_byte_ = 0;
-}
-
-void PreParsedScopeDataBuilder::ByteData::WriteQuarter(uint8_t data) {
- DCHECK_LE(data, 3);
- if (free_quarters_in_last_byte_ == 0) {
-#ifdef DEBUG
- // Save a marker in debug mode.
- backing_store_.push_back(kQuarterMarker);
-#endif
- backing_store_.push_back(0);
- free_quarters_in_last_byte_ = 3;
- } else {
- --free_quarters_in_last_byte_;
- }
-
- uint8_t shift_amount = free_quarters_in_last_byte_ * 2;
- DCHECK_EQ(backing_store_.back() & (3 << shift_amount), 0);
- backing_store_.back() |= (data << shift_amount);
-}
-
-Handle<PodArray<uint8_t>> PreParsedScopeDataBuilder::ByteData::Serialize(
- Isolate* isolate) {
- Handle<PodArray<uint8_t>> array = PodArray<uint8_t>::New(
- isolate, static_cast<int>(backing_store_.size()), TENURED);
-
- DisallowHeapAllocation no_gc;
- PodArray<uint8_t>* raw_array = *array;
-
- int i = 0;
- for (uint8_t item : backing_store_) {
- raw_array->set(i++, item);
- }
- return array;
-}
-
-PreParsedScopeDataBuilder::PreParsedScopeDataBuilder(
- Zone* zone, PreParsedScopeDataBuilder* parent)
- : parent_(parent),
- byte_data_(new (zone) ByteData(zone)),
- data_for_inner_functions_(zone),
- bailed_out_(false) {
- DCHECK(FLAG_preparser_scope_analysis);
- if (parent != nullptr) {
- parent->data_for_inner_functions_.push_back(this);
- }
-#ifdef DEBUG
- // Reserve space for scope_data_start, written later:
- byte_data_->WriteUint32(0);
-#endif
-}
-
-PreParsedScopeDataBuilder::DataGatheringScope::DataGatheringScope(
- DeclarationScope* function_scope, PreParser* preparser)
- : function_scope_(function_scope),
- preparser_(preparser),
- builder_(nullptr) {
- if (FLAG_preparser_scope_analysis) {
- PreParsedScopeDataBuilder* parent =
- preparser->preparsed_scope_data_builder();
- Zone* main_zone = preparser->main_zone();
- builder_ = new (main_zone) PreParsedScopeDataBuilder(main_zone, parent);
- preparser->set_preparsed_scope_data_builder(builder_);
- function_scope->set_preparsed_scope_data_builder(builder_);
- }
-}
-
-PreParsedScopeDataBuilder::DataGatheringScope::~DataGatheringScope() {
- if (builder_) {
- preparser_->set_preparsed_scope_data_builder(builder_->parent_);
- }
-}
-
-void PreParsedScopeDataBuilder::DataGatheringScope::MarkFunctionAsSkippable(
- int end_position, int num_inner_functions) {
- DCHECK_NOT_NULL(builder_);
- DCHECK_NOT_NULL(builder_->parent_);
- builder_->parent_->AddSkippableFunction(
- function_scope_->start_position(), end_position,
- function_scope_->num_parameters(), num_inner_functions,
- function_scope_->language_mode(), function_scope_->NeedsHomeObject());
-}
-
-void PreParsedScopeDataBuilder::AddSkippableFunction(int start_position,
- int end_position,
- int num_parameters,
- int num_inner_functions,
- LanguageMode language_mode,
- bool uses_super_property) {
- if (bailed_out_) {
- return;
- }
-
- // Start position is used for a sanity check when consuming the data, we could
- // remove it in the future if we're very pressed for space but it's been good
- // at catching bugs in the wild so far.
- byte_data_->WriteUint32(start_position);
- byte_data_->WriteUint32(end_position);
- byte_data_->WriteUint32(num_parameters);
- byte_data_->WriteUint32(num_inner_functions);
-
- uint8_t language_and_super = LanguageField::encode(language_mode) |
- UsesSuperField::encode(uses_super_property);
-
- byte_data_->WriteQuarter(language_and_super);
-}
-
-void PreParsedScopeDataBuilder::SaveScopeAllocationData(
- DeclarationScope* scope) {
- // The data contains a uint32 (reserved space for scope_data_start) and
- // function data items, kSkippableFunctionDataSize each.
- DCHECK_GE(byte_data_->size(), ByteData::kPlaceholderSize);
- DCHECK_LE(byte_data_->size(), std::numeric_limits<uint32_t>::max());
- DCHECK_EQ(byte_data_->size() % ByteData::kSkippableFunctionDataSize,
- ByteData::kPlaceholderSize);
-
- if (bailed_out_) {
- return;
- }
-
- uint32_t scope_data_start = static_cast<uint32_t>(byte_data_->size());
-
- // If there are no skippable inner functions, we don't need to save anything.
- if (scope_data_start == ByteData::kPlaceholderSize) {
- return;
- }
-
-#ifdef DEBUG
- byte_data_->OverwriteFirstUint32(scope_data_start);
-
- // For a data integrity check, write a value between data about skipped inner
- // funcs and data about variables.
- byte_data_->WriteUint32(ByteData::kMagicValue);
- byte_data_->WriteUint32(scope->start_position());
- byte_data_->WriteUint32(scope->end_position());
-#endif
-
- SaveDataForScope(scope);
-}
-
-bool PreParsedScopeDataBuilder::ContainsInnerFunctions() const {
- return byte_data_->size() > ByteData::kPlaceholderSize;
-}
-
-MaybeHandle<PreParsedScopeData> PreParsedScopeDataBuilder::Serialize(
- Isolate* isolate) {
- if (bailed_out_) {
- return MaybeHandle<PreParsedScopeData>();
- }
-
- DCHECK(!ThisOrParentBailedOut());
-
- if (byte_data_->size() <= ByteData::kPlaceholderSize) {
- // The data contains only the placeholder.
- return MaybeHandle<PreParsedScopeData>();
- }
-
- int child_data_length = static_cast<int>(data_for_inner_functions_.size());
- Handle<PreParsedScopeData> data =
- isolate->factory()->NewPreParsedScopeData(child_data_length);
-
- Handle<PodArray<uint8_t>> scope_data_array = byte_data_->Serialize(isolate);
- data->set_scope_data(*scope_data_array);
-
- int i = 0;
- for (const auto& item : data_for_inner_functions_) {
- Handle<PreParsedScopeData> child_data;
- if (item->Serialize(isolate).ToHandle(&child_data)) {
- data->set_child_data(i, *child_data);
- } else {
- DCHECK(data->child_data(i)->IsNull());
- }
- i++;
- }
-
- return data;
-}
-
-ZonePreParsedScopeData* PreParsedScopeDataBuilder::Serialize(Zone* zone) {
- if (bailed_out_) {
- return nullptr;
- }
-
- DCHECK(!ThisOrParentBailedOut());
-
- if (byte_data_->size() <= ByteData::kPlaceholderSize) {
- // The data contains only the placeholder.
- return nullptr;
- }
-
- int child_length = static_cast<int>(data_for_inner_functions_.size());
- ZonePreParsedScopeData* result = new (zone) ZonePreParsedScopeData(
- zone, byte_data_->begin(), byte_data_->end(), child_length);
-
- int i = 0;
- for (const auto& item : data_for_inner_functions_) {
- ZonePreParsedScopeData* child = item->Serialize(zone);
- result->set_child(i, child);
- i++;
- }
-
- return result;
-}
-
-bool PreParsedScopeDataBuilder::ScopeNeedsData(Scope* scope) {
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- // Default constructors don't need data (they cannot contain inner functions
- // defined by the user). Other functions do.
- return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
- }
- if (!scope->is_hidden()) {
- for (Variable* var : *scope->locals()) {
- if (IsDeclaredVariableMode(var->mode())) {
- return true;
- }
- }
- }
- for (Scope* inner = scope->inner_scope(); inner != nullptr;
- inner = inner->sibling()) {
- if (ScopeNeedsData(inner)) {
- return true;
- }
- }
- return false;
-}
-
-bool PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(Scope* scope) {
- // Lazy non-arrow function scopes are skippable. Lazy functions are exactly
- // those Scopes which have their own PreParsedScopeDataBuilder object. This
- // logic ensures that the scope allocation data is consistent with the
- // skippable function data (both agree on where the lazy function boundaries
- // are).
- if (scope->scope_type() != ScopeType::FUNCTION_SCOPE) {
- return false;
- }
- DeclarationScope* declaration_scope = scope->AsDeclarationScope();
- return !declaration_scope->is_arrow_scope() &&
- declaration_scope->preparsed_scope_data_builder() != nullptr;
-}
-
-void PreParsedScopeDataBuilder::SaveDataForScope(Scope* scope) {
- DCHECK_NE(scope->end_position(), kNoSourcePosition);
-
- if (!ScopeNeedsData(scope)) {
- return;
- }
-
-#ifdef DEBUG
- byte_data_->WriteUint8(scope->scope_type());
-#endif
-
- uint8_t eval =
- ScopeCallsSloppyEvalField::encode(
- scope->is_declaration_scope() &&
- scope->AsDeclarationScope()->calls_sloppy_eval()) |
- InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
- byte_data_->WriteUint8(eval);
-
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- Variable* function = scope->AsDeclarationScope()->function_var();
- if (function != nullptr) {
- SaveDataForVariable(function);
- }
- }
-
- for (Variable* var : *scope->locals()) {
- if (IsDeclaredVariableMode(var->mode())) {
- SaveDataForVariable(var);
- }
- }
-
- SaveDataForInnerScopes(scope);
-}
-
-void PreParsedScopeDataBuilder::SaveDataForVariable(Variable* var) {
-#ifdef DEBUG
- // Store the variable name in debug mode; this way we can check that we
- // restore data to the correct variable.
- const AstRawString* name = var->raw_name();
- byte_data_->WriteUint8(name->is_one_byte());
- byte_data_->WriteUint32(name->length());
- for (int i = 0; i < name->length(); ++i) {
- byte_data_->WriteUint8(name->raw_data()[i]);
- }
-#endif
- byte variable_data = VariableMaybeAssignedField::encode(
- var->maybe_assigned() == kMaybeAssigned) |
- VariableContextAllocatedField::encode(
- var->has_forced_context_allocation());
- byte_data_->WriteQuarter(variable_data);
-}
-
-void PreParsedScopeDataBuilder::SaveDataForInnerScopes(Scope* scope) {
- // Inner scopes are stored in the reverse order, but we'd like to write the
- // data in the logical order. There might be many inner scopes, so we don't
- // want to recurse here.
- std::vector<Scope*> scopes;
- for (Scope* inner = scope->inner_scope(); inner != nullptr;
- inner = inner->sibling()) {
- if (ScopeIsSkippableFunctionScope(inner)) {
- // Don't save data about function scopes, since they'll have their own
- // PreParsedScopeDataBuilder where their data is saved.
- DCHECK_NOT_NULL(
- inner->AsDeclarationScope()->preparsed_scope_data_builder());
- continue;
- }
- scopes.push_back(inner);
- }
- for (auto it = scopes.rbegin(); it != scopes.rend(); ++it) {
- SaveDataForScope(*it);
- }
-}
-
-class BuilderProducedPreParsedScopeData final
- : public ProducedPreParsedScopeData {
- public:
- explicit BuilderProducedPreParsedScopeData(PreParsedScopeDataBuilder* builder)
- : builder_(builder) {}
-
- MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
- return builder_->Serialize(isolate);
- }
-
- ZonePreParsedScopeData* Serialize(Zone* zone) final {
- return builder_->Serialize(zone);
- };
-
- private:
- PreParsedScopeDataBuilder* builder_;
-};
-
-class OnHeapProducedPreParsedScopeData final
- : public ProducedPreParsedScopeData {
- public:
- explicit OnHeapProducedPreParsedScopeData(Handle<PreParsedScopeData> data)
- : data_(data) {}
-
- MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
- return data_;
- }
-
- ZonePreParsedScopeData* Serialize(Zone* zone) final {
- // Not required.
- UNREACHABLE();
- };
-
- private:
- Handle<PreParsedScopeData> data_;
-};
-
-class ZoneProducedPreParsedScopeData final : public ProducedPreParsedScopeData {
- public:
- explicit ZoneProducedPreParsedScopeData(ZonePreParsedScopeData* data)
- : data_(data) {}
-
- MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) final {
- return data_->Serialize(isolate);
- }
-
- ZonePreParsedScopeData* Serialize(Zone* zone) final { return data_; };
-
- private:
- ZonePreParsedScopeData* data_;
-};
-
-ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
- PreParsedScopeDataBuilder* builder, Zone* zone) {
- return new (zone) BuilderProducedPreParsedScopeData(builder);
-}
-
-ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
- Handle<PreParsedScopeData> data, Zone* zone) {
- return new (zone) OnHeapProducedPreParsedScopeData(data);
-}
-
-ProducedPreParsedScopeData* ProducedPreParsedScopeData::For(
- ZonePreParsedScopeData* data, Zone* zone) {
- return new (zone) ZoneProducedPreParsedScopeData(data);
-}
-
-template <class Data>
-ProducedPreParsedScopeData*
-BaseConsumedPreParsedScopeData<Data>::GetDataForSkippableFunction(
- Zone* zone, int start_position, int* end_position, int* num_parameters,
- int* num_inner_functions, bool* uses_super_property,
- LanguageMode* language_mode) {
- // The skippable function *must* be the next function in the data. Use the
- // start position as a sanity check.
- typename ByteData::ReadingScope reading_scope(this);
- CHECK_GE(scope_data_->RemainingBytes(), ByteData::kSkippableFunctionDataSize);
- int start_position_from_data = scope_data_->ReadUint32();
- CHECK_EQ(start_position, start_position_from_data);
-
- *end_position = scope_data_->ReadUint32();
- DCHECK_GT(*end_position, start_position);
- *num_parameters = scope_data_->ReadUint32();
- *num_inner_functions = scope_data_->ReadUint32();
-
- uint8_t language_and_super = scope_data_->ReadQuarter();
- *language_mode = LanguageMode(LanguageField::decode(language_and_super));
- *uses_super_property = UsesSuperField::decode(language_and_super);
-
- // Retrieve the corresponding PreParsedScopeData and associate it to the
- // skipped function. If the skipped functions contains inner functions, those
- // can be skipped when the skipped function is eagerly parsed.
- return GetChildData(zone, child_index_++);
-}
-
-template <class Data>
-void BaseConsumedPreParsedScopeData<Data>::RestoreScopeAllocationData(
- DeclarationScope* scope) {
- DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
- typename ByteData::ReadingScope reading_scope(this);
-
-#ifdef DEBUG
- int magic_value_from_data = scope_data_->ReadUint32();
- // Check that we've consumed all inner function data.
- DCHECK_EQ(magic_value_from_data, ByteData::kMagicValue);
-
- int start_position_from_data = scope_data_->ReadUint32();
- int end_position_from_data = scope_data_->ReadUint32();
- DCHECK_EQ(start_position_from_data, scope->start_position());
- DCHECK_EQ(end_position_from_data, scope->end_position());
-#endif
-
- RestoreData(scope);
-
- // Check that we consumed all scope data.
- DCHECK_EQ(scope_data_->RemainingBytes(), 0);
-}
-
-template <typename Data>
-void BaseConsumedPreParsedScopeData<Data>::RestoreData(Scope* scope) {
- if (scope->is_declaration_scope() &&
- scope->AsDeclarationScope()->is_skipped_function()) {
- return;
- }
-
- // It's possible that scope is not present in the data at all (since PreParser
- // doesn't create the corresponding scope). In this case, the Scope won't
- // contain any variables for which we need the data.
- if (!PreParsedScopeDataBuilder::ScopeNeedsData(scope)) {
- return;
- }
-
- // scope_type is stored only in debug mode.
- CHECK_GE(scope_data_->RemainingBytes(), ByteData::kUint8Size);
- DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type());
-
- uint32_t eval = scope_data_->ReadUint8();
- if (ScopeCallsSloppyEvalField::decode(eval)) {
- scope->RecordEvalCall();
- }
- if (InnerScopeCallsEvalField::decode(eval)) {
- scope->RecordInnerScopeEvalCall();
- }
-
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- Variable* function = scope->AsDeclarationScope()->function_var();
- if (function != nullptr) {
- RestoreDataForVariable(function);
- }
- }
-
- for (Variable* var : *scope->locals()) {
- if (IsDeclaredVariableMode(var->mode())) {
- RestoreDataForVariable(var);
- }
- }
-
- RestoreDataForInnerScopes(scope);
-}
-
-template <typename Data>
-void BaseConsumedPreParsedScopeData<Data>::RestoreDataForVariable(
- Variable* var) {
-#ifdef DEBUG
- const AstRawString* name = var->raw_name();
- bool data_one_byte = scope_data_->ReadUint8();
- DCHECK_IMPLIES(name->is_one_byte(), data_one_byte);
- DCHECK_EQ(scope_data_->ReadUint32(), static_cast<uint32_t>(name->length()));
- if (!name->is_one_byte() && data_one_byte) {
- // It's possible that "name" is a two-byte representation of the string
- // stored in the data.
- for (int i = 0; i < 2 * name->length(); i += 2) {
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
- DCHECK_EQ(0, name->raw_data()[i + 1]);
-#else
- DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i + 1]);
- DCHECK_EQ(0, name->raw_data()[i]);
-#endif // V8_TARGET_LITTLE_ENDIAN
- }
- } else {
- for (int i = 0; i < name->length(); ++i) {
- DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
- }
- }
-#endif
- uint8_t variable_data = scope_data_->ReadQuarter();
- if (VariableMaybeAssignedField::decode(variable_data)) {
- var->set_maybe_assigned();
- }
- if (VariableContextAllocatedField::decode(variable_data)) {
- var->set_is_used();
- var->ForceContextAllocation();
- }
-}
-
-template <typename Data>
-void BaseConsumedPreParsedScopeData<Data>::RestoreDataForInnerScopes(
- Scope* scope) {
- std::vector<Scope*> scopes;
- for (Scope* inner = scope->inner_scope(); inner != nullptr;
- inner = inner->sibling()) {
- scopes.push_back(inner);
- }
- for (auto it = scopes.rbegin(); it != scopes.rend(); ++it) {
- RestoreData(*it);
- }
-}
-
-#ifdef DEBUG
-template <class Data>
-void BaseConsumedPreParsedScopeData<Data>::VerifyDataStart() {
- typename ByteData::ReadingScope reading_scope(this);
- int scope_data_start = scope_data_->ReadUint32();
- scope_data_->SetPosition(scope_data_start);
- DCHECK_EQ(scope_data_->ReadUint32(), ByteData::kMagicValue);
- // The first data item is scope_data_start. Skip over it.
- scope_data_->SetPosition(ByteData::kPlaceholderSize);
-}
-#endif
-
-PodArray<uint8_t>* OnHeapConsumedPreParsedScopeData::GetScopeData() {
- return data_->scope_data();
-}
-
-ProducedPreParsedScopeData* OnHeapConsumedPreParsedScopeData::GetChildData(
- Zone* zone, int child_index) {
- CHECK_GT(data_->length(), child_index);
- Object* child_data = data_->child_data(child_index);
- if (!child_data->IsPreParsedScopeData()) {
- return nullptr;
- }
- Handle<PreParsedScopeData> child_data_handle(
- PreParsedScopeData::cast(child_data), isolate_);
- return ProducedPreParsedScopeData::For(child_data_handle, zone);
-}
-
-OnHeapConsumedPreParsedScopeData::OnHeapConsumedPreParsedScopeData(
- Isolate* isolate, Handle<PreParsedScopeData> data)
- : BaseConsumedPreParsedScopeData<PodArray<uint8_t>>(),
- isolate_(isolate),
- data_(data) {
- DCHECK_NOT_NULL(isolate);
- DCHECK(data->IsPreParsedScopeData());
-#ifdef DEBUG
- VerifyDataStart();
-#endif
-}
-
-ZonePreParsedScopeData::ZonePreParsedScopeData(
- Zone* zone, ZoneChunkList<uint8_t>::iterator byte_data_begin,
- ZoneChunkList<uint8_t>::iterator byte_data_end, int child_length)
- : byte_data_(byte_data_begin, byte_data_end, zone),
- children_(child_length, zone) {}
-
-Handle<PreParsedScopeData> ZonePreParsedScopeData::Serialize(Isolate* isolate) {
- int child_data_length = child_length();
- Handle<PreParsedScopeData> result =
- isolate->factory()->NewPreParsedScopeData(child_data_length);
-
- Handle<PodArray<uint8_t>> scope_data_array = PodArray<uint8_t>::New(
- isolate, static_cast<int>(byte_data()->size()), TENURED);
- scope_data_array->copy_in(0, byte_data()->data(),
- static_cast<int>(byte_data()->size()));
- result->set_scope_data(*scope_data_array);
-
- for (int i = 0; i < child_data_length; i++) {
- ZonePreParsedScopeData* child = get_child(i);
- if (child) {
- Handle<PreParsedScopeData> child_data = child->Serialize(isolate);
- result->set_child_data(i, *child_data);
- }
- }
- return result;
-}
-
-ZoneConsumedPreParsedScopeData::ZoneConsumedPreParsedScopeData(
- Zone* zone, ZonePreParsedScopeData* data)
- : data_(data), scope_data_wrapper_(data_->byte_data()) {
-#ifdef DEBUG
- VerifyDataStart();
-#endif
-}
-
-ZoneVectorWrapper* ZoneConsumedPreParsedScopeData::GetScopeData() {
- return &scope_data_wrapper_;
-}
-
-ProducedPreParsedScopeData* ZoneConsumedPreParsedScopeData::GetChildData(
- Zone* zone, int child_index) {
- CHECK_GT(data_->child_length(), child_index);
- ZonePreParsedScopeData* child_data = data_->get_child(child_index);
- if (child_data == nullptr) {
- return nullptr;
- }
- return ProducedPreParsedScopeData::For(child_data, zone);
-}
-
-std::unique_ptr<ConsumedPreParsedScopeData> ConsumedPreParsedScopeData::For(
- Isolate* isolate, Handle<PreParsedScopeData> data) {
- DCHECK(!data.is_null());
- return base::make_unique<OnHeapConsumedPreParsedScopeData>(isolate, data);
-}
-
-std::unique_ptr<ConsumedPreParsedScopeData> ConsumedPreParsedScopeData::For(
- Zone* zone, ZonePreParsedScopeData* data) {
- if (data == nullptr) return {};
- return base::make_unique<ZoneConsumedPreParsedScopeData>(zone, data);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
deleted file mode 100644
index 25298c4331..0000000000
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_H_
-#define V8_PARSING_PREPARSED_SCOPE_DATA_H_
-
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/maybe-handles.h"
-#include "src/zone/zone-chunk-list.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename T>
-class PodArray;
-
-class PreParser;
-class PreParsedScopeData;
-class ZonePreParsedScopeData;
-
-/*
-
- Skipping inner functions.
-
- Consider the following code:
- (function eager_outer() {
- function lazy_inner() {
- let a;
- function skip_me() { a; }
- }
-
- return lazy_inner;
- })();
-
- ... lazy_inner(); ...
-
- When parsing the code the first time, eager_outer is parsed and lazy_inner
- (and everything inside it) is preparsed. When lazy_inner is called, we don't
- want to parse or preparse skip_me again. Instead, we want to skip over it,
- since it has already been preparsed once.
-
- In order to be able to do this, we need to store the information needed for
- allocating the variables in lazy_inner when we preparse it, and then later do
- scope allocation based on that data.
-
- We need the following data for each scope in lazy_inner's scope tree:
- For each Variable:
- - is_used
- - maybe_assigned
- - has_forced_context_allocation
-
- For each Scope:
- - inner_scope_calls_eval_.
-
- ProducedPreParsedScopeData implements storing the above mentioned data and
- ConsumedPreParsedScopeData implements restoring it (= setting the context
- allocation status of the variables in a Scope (and its subscopes) based on the
- data).
-
- */
-
-class PreParsedScopeDataBuilder : public ZoneObject {
- public:
- class ByteData;
-
- // Create a PreParsedScopeDataBuilder object which will collect data as we
- // parse.
- PreParsedScopeDataBuilder(Zone* zone, PreParsedScopeDataBuilder* parent);
-
- PreParsedScopeDataBuilder* parent() const { return parent_; }
-
- // For gathering the inner function data and splitting it up according to the
- // laziness boundaries. Each lazy function gets its own
- // ProducedPreParsedScopeData, and so do all lazy functions inside it.
- class DataGatheringScope {
- public:
- DataGatheringScope(DeclarationScope* function_scope, PreParser* preparser);
- ~DataGatheringScope();
-
- void MarkFunctionAsSkippable(int end_position, int num_inner_functions);
-
- private:
- DeclarationScope* function_scope_;
- PreParser* preparser_;
- PreParsedScopeDataBuilder* builder_;
-
- DISALLOW_COPY_AND_ASSIGN(DataGatheringScope);
- };
-
- // Saves the information needed for allocating the Scope's (and its
- // subscopes') variables.
- void SaveScopeAllocationData(DeclarationScope* scope);
-
- // In some cases, PreParser cannot produce the same Scope structure as
- // Parser. If it happens, we're unable to produce the data that would enable
- // skipping the inner functions of that function.
- void Bailout() {
- bailed_out_ = true;
-
- // We don't need to call Bailout on existing / future children: the only way
- // to try to retrieve their data is through calling Serialize on the parent,
- // and if the parent is bailed out, it won't call Serialize on its children.
- }
-
- bool bailed_out() const { return bailed_out_; }
-
-#ifdef DEBUG
- bool ThisOrParentBailedOut() const {
- if (bailed_out_) {
- return true;
- }
- if (parent_ == nullptr) {
- return false;
- }
- return parent_->ThisOrParentBailedOut();
- }
-#endif // DEBUG
-
- bool ContainsInnerFunctions() const;
-
- static bool ScopeNeedsData(Scope* scope);
- static bool ScopeIsSkippableFunctionScope(Scope* scope);
-
- private:
- friend class BuilderProducedPreParsedScopeData;
-
- virtual MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate);
- virtual ZonePreParsedScopeData* Serialize(Zone* zone);
-
- void AddSkippableFunction(int start_position, int end_position,
- int num_parameters, int num_inner_functions,
- LanguageMode language_mode,
- bool uses_super_property);
-
- void SaveDataForScope(Scope* scope);
- void SaveDataForVariable(Variable* var);
- void SaveDataForInnerScopes(Scope* scope);
-
- PreParsedScopeDataBuilder* parent_;
-
- ByteData* byte_data_;
- ZoneChunkList<PreParsedScopeDataBuilder*> data_for_inner_functions_;
-
- // Whether we've given up producing the data for this function.
- bool bailed_out_;
-
- DISALLOW_COPY_AND_ASSIGN(PreParsedScopeDataBuilder);
-};
-
-class ProducedPreParsedScopeData : public ZoneObject {
- public:
- // If there is data (if the Scope contains skippable inner functions), move
- // the data into the heap and return a Handle to it; otherwise return a null
- // MaybeHandle.
- virtual MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) = 0;
-
- // If there is data (if the Scope contains skippable inner functions), return
- // an off-heap ZonePreParsedScopeData representing the data; otherwise
- // return nullptr.
- virtual ZonePreParsedScopeData* Serialize(Zone* zone) = 0;
-
- // Create a ProducedPreParsedScopeData which is a proxy for a previous
- // produced PreParsedScopeData in zone.
- static ProducedPreParsedScopeData* For(PreParsedScopeDataBuilder* builder,
- Zone* zone);
-
- // Create a ProducedPreParsedScopeData which is a proxy for a previous
- // produced PreParsedScopeData on the heap.
- static ProducedPreParsedScopeData* For(Handle<PreParsedScopeData> data,
- Zone* zone);
-
- // Create a ProducedPreParsedScopeData which is a proxy for a previous
- // produced PreParsedScopeData in zone.
- static ProducedPreParsedScopeData* For(ZonePreParsedScopeData* data,
- Zone* zone);
-};
-
-class ConsumedPreParsedScopeData {
- public:
- // Creates a ConsumedPreParsedScopeData representing the data of an on-heap
- // PreParsedScopeData |data|.
- static std::unique_ptr<ConsumedPreParsedScopeData> For(
- Isolate* isolate, Handle<PreParsedScopeData> data);
-
- // Creates a ConsumedPreParsedScopeData representing the data of an off-heap
- // ZonePreParsedScopeData |data|.
- static std::unique_ptr<ConsumedPreParsedScopeData> For(
- Zone* zone, ZonePreParsedScopeData* data);
-
- virtual ~ConsumedPreParsedScopeData() = default;
-
- virtual ProducedPreParsedScopeData* GetDataForSkippableFunction(
- Zone* zone, int start_position, int* end_position, int* num_parameters,
- int* num_inner_functions, bool* uses_super_property,
- LanguageMode* language_mode) = 0;
-
- // Restores the information needed for allocating the Scope's (and its
- // subscopes') variables.
- virtual void RestoreScopeAllocationData(DeclarationScope* scope) = 0;
-
- protected:
- ConsumedPreParsedScopeData() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ConsumedPreParsedScopeData);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_PREPARSED_SCOPE_DATA_H_
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 0e74014542..ee496aad10 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -9,9 +9,8 @@
#include "src/conversions-inl.h"
#include "src/conversions.h"
#include "src/globals.h"
-#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
@@ -19,26 +18,11 @@
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// The CHECK_OK macro is a convenient macro to enforce error
-// handling for functions that may fail (by returning !*ok).
-//
-// CAUTION: This macro appends extra statements after a call,
-// thus it must never be used where only a single statement
-// is correct (e.g. an if statement branch w/o braces)!
-
-#define CHECK_OK_VALUE(x) ok); \
- if (!*ok) return x; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-#define CHECK_OK CHECK_OK_VALUE(Expression::Default())
-#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
-
namespace {
-PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
+PreParserIdentifier GetSymbolHelper(Scanner* scanner,
+ const AstRawString* string,
+ AstValueFactory* avf) {
// These symbols require slightly different treatement:
// - regular keywords (async, await, etc.; treated in 1st switch.)
// - 'contextual' keywords (and may contain escaped; treated in 2nd switch.)
@@ -53,24 +37,20 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
default:
break;
}
- switch (scanner->current_contextual_token()) {
- case Token::CONSTRUCTOR:
- return PreParserIdentifier::Constructor();
- case Token::NAME:
- return PreParserIdentifier::Name();
- default:
- break;
+ if (string == avf->constructor_string()) {
+ return PreParserIdentifier::Constructor();
+ }
+ if (string == avf->name_string()) {
+ return PreParserIdentifier::Name();
}
if (scanner->literal_contains_escapes()) {
return PreParserIdentifier::Default();
}
- switch (scanner->current_contextual_token()) {
- case Token::EVAL:
- return PreParserIdentifier::Eval();
- case Token::ARGUMENTS:
- return PreParserIdentifier::Arguments();
- default:
- break;
+ if (string == avf->eval_string()) {
+ return PreParserIdentifier::Eval();
+ }
+ if (string == avf->arguments_string()) {
+ return PreParserIdentifier::Arguments();
}
return PreParserIdentifier::Default();
}
@@ -78,12 +58,11 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
} // unnamed namespace
PreParserIdentifier PreParser::GetSymbol() const {
- PreParserIdentifier symbol = GetSymbolHelper(scanner());
- if (track_unresolved_variables_) {
- const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
- DCHECK_NOT_NULL(result);
- symbol.string_ = result;
- }
+ const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
+ PreParserIdentifier symbol =
+ GetSymbolHelper(scanner(), result, ast_value_factory());
+ DCHECK_NOT_NULL(result);
+ symbol.string_ = result;
return symbol;
}
@@ -94,6 +73,12 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
scope->set_is_being_lazily_parsed(true);
#endif
+ if (FLAG_harmony_hashbang) {
+ // Note: We should only skip the hashbang in non-Eval scripts
+ // (currently, Eval is not handled by the PreParser).
+ scanner()->SkipHashBang();
+ }
+
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
// the global scope.
@@ -101,26 +86,30 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
FunctionState top_scope(&function_state_, &scope_, scope);
original_scope_ = scope_;
- bool ok = true;
- int start_position = scanner()->peek_location().beg_pos;
- PreParserStatementList body;
- ParseStatementList(body, Token::EOS, &ok);
+ int start_position = peek_position();
+ PreParserScopedStatementList body(pointer_buffer());
+ ParseStatementList(&body, Token::EOS);
original_scope_ = nullptr;
if (stack_overflow()) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner()->current_token());
- } else if (is_strict(language_mode())) {
- CheckStrictOctalLiteral(start_position, scanner()->location().end_pos, &ok);
+ if (is_strict(language_mode())) {
+ CheckStrictOctalLiteral(start_position, scanner()->location().end_pos);
}
return kPreParseSuccess;
}
+void PreParserFormalParameters::ValidateDuplicate(PreParser* preparser) const {
+ if (has_duplicate_) preparser->ReportUnidentifiableError();
+}
+
+void PreParserFormalParameters::ValidateStrictMode(PreParser* preparser) const {
+ if (strict_parameter_error_) preparser->ReportUnidentifiableError();
+}
+
PreParser::PreParseResult PreParser::PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, bool is_inner_function, bool may_abort,
- int* use_counts, ProducedPreParsedScopeData** produced_preparsed_scope_data,
- int script_id) {
+ DeclarationScope* function_scope, int* use_counts,
+ ProducedPreparseData** produced_preparse_data, int script_id) {
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
use_counts_ = use_counts;
set_script_id(script_id);
@@ -128,19 +117,7 @@ PreParser::PreParseResult PreParser::PreParseFunction(
function_scope->set_is_being_lazily_parsed(true);
#endif
- track_unresolved_variables_ =
- ShouldTrackUnresolvedVariables(is_inner_function);
-
- // Start collecting data for a new function which might contain skippable
- // functions.
- std::unique_ptr<PreParsedScopeDataBuilder::DataGatheringScope>
- preparsed_scope_data_builder_scope;
- if (FLAG_preparser_scope_analysis && !IsArrowFunction(kind)) {
- DCHECK(track_unresolved_variables_);
- preparsed_scope_data_builder_scope.reset(
- new PreParsedScopeDataBuilder::DataGatheringScope(function_scope,
- this));
- }
+ PreParserFormalParameters formals(function_scope);
// In the preparser, we use the function literal ids to count how many
// FunctionLiterals were encountered. The PreParser doesn't actually persist
@@ -153,122 +130,123 @@ PreParser::PreParseResult PreParser::PreParseFunction(
DCHECK_NULL(function_state_);
DCHECK_NULL(scope_);
FunctionState function_state(&function_state_, &scope_, function_scope);
- // This indirection is needed so that we can use the CHECK_OK macros.
- bool ok_holder = true;
- bool* ok = &ok_holder;
- PreParserFormalParameters formals(function_scope);
- DuplicateFinder duplicate_finder;
- std::unique_ptr<ExpressionClassifier> formals_classifier;
+ // Start collecting data for a new function which might contain skippable
+ // functions.
+ PreparseDataBuilder::DataGatheringScope preparse_data_builder_scope(this);
+
+ if (IsArrowFunction(kind)) {
+ formals.is_simple = function_scope->has_simple_parameters();
+ } else {
+ preparse_data_builder_scope.Start(function_scope);
- // Parse non-arrow function parameters. For arrow functions, the parameters
- // have already been parsed.
- if (!IsArrowFunction(kind)) {
- formals_classifier.reset(new ExpressionClassifier(this, &duplicate_finder));
+ // Parse non-arrow function parameters. For arrow functions, the parameters
+ // have already been parsed.
+ ParameterDeclarationParsingScope formals_scope(this);
// We return kPreParseSuccess in failure cases too - errors are retrieved
// separately by Parser::SkipLazyFunctionBody.
- ParseFormalParameterList(
- &formals,
- CHECK_OK_VALUE(pending_error_handler()->ErrorUnidentifiableByPreParser()
- ? kPreParseNotIdentifiableError
- : kPreParseSuccess));
- Expect(Token::RPAREN, CHECK_OK_VALUE(kPreParseSuccess));
+ ParseFormalParameterList(&formals);
+ if (formals_scope.has_duplicate()) formals.set_has_duplicate();
+ if (!formals.is_simple) {
+ BuildParameterInitializationBlock(formals);
+ }
+
+ Expect(Token::RPAREN);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(
- formals.arity, kind, formals.has_rest, function_scope->start_position(),
- formals_end_position, CHECK_OK_VALUE(kPreParseSuccess));
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ function_scope->start_position(),
+ formals_end_position);
}
- Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
+ Expect(Token::LBRACE);
DeclarationScope* inner_scope = function_scope;
- LazyParsingResult result;
if (!formals.is_simple) {
inner_scope = NewVarblockScope();
- inner_scope->set_start_position(scanner()->location().beg_pos);
+ inner_scope->set_start_position(position());
}
{
BlockState block_state(&scope_, inner_scope);
- result = ParseStatementListAndLogFunction(&formals, may_abort, ok);
+ ParseStatementListAndLogFunction(&formals);
}
- if (!formals.is_simple) {
- BuildParameterInitializationBlock(formals, ok);
+ bool allow_duplicate_parameters = false;
+
+ if (formals.is_simple) {
+ if (is_sloppy(function_scope->language_mode())) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
+ allow_duplicate_parameters =
+ is_sloppy(function_scope->language_mode()) && !IsConciseMethod(kind);
+ } else {
if (is_sloppy(inner_scope->language_mode())) {
inner_scope->HoistSloppyBlockFunctions(nullptr);
}
SetLanguageMode(function_scope, inner_scope->language_mode());
inner_scope->set_end_position(scanner()->peek_location().end_pos);
- inner_scope->FinalizeBlockScope();
- } else {
- if (is_sloppy(function_scope->language_mode())) {
- function_scope->HoistSloppyBlockFunctions(nullptr);
+ if (inner_scope->FinalizeBlockScope() != nullptr) {
+ const AstRawString* conflict = inner_scope->FindVariableDeclaredIn(
+ function_scope, VariableMode::kLastLexicalVariableMode);
+ if (conflict != nullptr) ReportVarRedeclarationIn(conflict, inner_scope);
}
}
use_counts_ = nullptr;
- if (result == kLazyParsingAborted) {
- DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
- return kPreParseAbort;
- } else if (stack_overflow()) {
- DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
+ if (stack_overflow()) {
return kPreParseStackOverflow;
- } else if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
- DCHECK(!*ok);
+ } else if (pending_error_handler()->has_error_unidentifiable_by_preparser()) {
return kPreParseNotIdentifiableError;
- } else if (!*ok) {
+ } else if (has_error()) {
DCHECK(pending_error_handler()->has_pending_error());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
- DCHECK(result == kLazyParsingComplete);
if (!IsArrowFunction(kind)) {
// Validate parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
- const bool allow_duplicate_parameters =
- is_sloppy(function_scope->language_mode()) && formals.is_simple &&
- !IsConciseMethod(kind);
- ValidateFormalParameters(function_scope->language_mode(),
- allow_duplicate_parameters, ok);
- if (!*ok) {
- if (pending_error_handler()->ErrorUnidentifiableByPreParser()) {
+ ValidateFormalParameters(language_mode(), formals,
+ allow_duplicate_parameters);
+ if (has_error()) {
+ if (pending_error_handler()->has_error_unidentifiable_by_preparser()) {
return kPreParseNotIdentifiableError;
} else {
return kPreParseSuccess;
}
}
- if (track_unresolved_variables_) {
- // Declare arguments after parsing the function since lexical
- // 'arguments' masks the arguments object. Declare arguments before
- // declaring the function var since the arguments object masks 'function
- // arguments'.
- function_scope->DeclareArguments(ast_value_factory());
+ // Declare arguments after parsing the function since lexical
+ // 'arguments' masks the arguments object. Declare arguments before
+ // declaring the function var since the arguments object masks 'function
+ // arguments'.
+ function_scope->DeclareArguments(ast_value_factory());
- DeclareFunctionNameVar(function_name, function_type, function_scope);
+ DeclareFunctionNameVar(function_name, function_type, function_scope);
+
+ if (preparse_data_builder_->HasData()) {
+ *produced_preparse_data =
+ ProducedPreparseData::For(preparse_data_builder_, main_zone());
}
+ }
- *produced_preparsed_scope_data = ProducedPreParsedScopeData::For(
- preparsed_scope_data_builder_, main_zone());
+ if (pending_error_handler()->has_error_unidentifiable_by_preparser()) {
+ return kPreParseNotIdentifiableError;
}
- DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
if (is_strict(function_scope->language_mode())) {
int end_pos = scanner()->location().end_pos;
- CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
+ CheckStrictOctalLiteral(function_scope->start_position(), end_pos);
}
}
- DCHECK(!pending_error_handler()->ErrorUnidentifiableByPreParser());
+ DCHECK(!pending_error_handler()->has_error_unidentifiable_by_preparser());
return kPreParseSuccess;
}
-
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
// See preparser-data.h for the data.
@@ -287,94 +265,87 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function, bool* ok) {
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
// Wrapped functions are not parsed in the preparser.
DCHECK_NULL(arguments_for_wrapped_function);
DCHECK_NE(FunctionLiteral::kWrapped, function_type);
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
- const RuntimeCallCounterId counters[2][2] = {
- {RuntimeCallCounterId::kPreParseBackgroundNoVariableResolution,
- RuntimeCallCounterId::kPreParseNoVariableResolution},
- {RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
- RuntimeCallCounterId::kPreParseWithVariableResolution}};
- RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_,
- counters[track_unresolved_variables_][parsing_on_main_thread_]);
+ const RuntimeCallCounterId counters[2] = {
+ RuntimeCallCounterId::kPreParseBackgroundWithVariableResolution,
+ RuntimeCallCounterId::kPreParseWithVariableResolution};
+ RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
+ counters[parsing_on_main_thread_]);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
+ int func_id = GetNextFunctionLiteralId();
+ bool skippable_function = false;
// Start collecting data for a new function which might contain skippable
// functions.
- std::unique_ptr<PreParsedScopeDataBuilder::DataGatheringScope>
- preparsed_scope_data_builder_scope;
- if (!function_state_->next_function_is_likely_called() &&
- preparsed_scope_data_builder_ != nullptr) {
- DCHECK(FLAG_preparser_scope_analysis);
- DCHECK(track_unresolved_variables_);
- preparsed_scope_data_builder_scope.reset(
- new PreParsedScopeDataBuilder::DataGatheringScope(function_scope,
- this));
- }
+ {
+ PreparseDataBuilder::DataGatheringScope preparse_data_builder_scope(this);
+ skippable_function = !function_state_->next_function_is_likely_called() &&
+ preparse_data_builder_ != nullptr;
+ if (skippable_function) {
+ preparse_data_builder_scope.Start(function_scope);
+ }
- FunctionState function_state(&function_state_, &scope_, function_scope);
- DuplicateFinder duplicate_finder;
- ExpressionClassifier formals_classifier(this, &duplicate_finder);
- int func_id = GetNextFunctionLiteralId();
+ FunctionState function_state(&function_state_, &scope_, function_scope);
- Expect(Token::LPAREN, CHECK_OK);
- int start_position = scanner()->location().beg_pos;
- function_scope->set_start_position(start_position);
- PreParserFormalParameters formals(function_scope);
- ParseFormalParameterList(&formals, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
+ Expect(Token::LPAREN);
+ int start_position = position();
+ function_scope->set_start_position(start_position);
+ PreParserFormalParameters formals(function_scope);
+ {
+ ParameterDeclarationParsingScope formals_scope(this);
+ ParseFormalParameterList(&formals);
+ if (formals_scope.has_duplicate()) formals.set_has_duplicate();
+ }
+ Expect(Token::RPAREN);
+ int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(formals.arity, kind, formals.has_rest, start_position,
- formals_end_position, CHECK_OK);
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ start_position, formals_end_position);
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE);
- // Parse function body.
- PreParserStatementList body;
- int pos = function_token_pos == kNoSourcePosition ? peek_position()
- : function_token_pos;
- ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
- FunctionBodyType::kBlock, true, CHECK_OK);
+ // Parse function body.
+ PreParserScopedStatementList body(pointer_buffer());
+ int pos = function_token_pos == kNoSourcePosition ? peek_position()
+ : function_token_pos;
+ AcceptINScope scope(this, true);
+ ParseFunctionBody(&body, function_name, pos, formals, kind, function_type,
+ FunctionBodyType::kBlock);
- // Parsing the body may change the language mode in our scope.
- language_mode = function_scope->language_mode();
+ // Parsing the body may change the language mode in our scope.
+ language_mode = function_scope->language_mode();
- if (is_sloppy(language_mode)) {
- function_scope->HoistSloppyBlockFunctions(nullptr);
- }
+ if (is_sloppy(language_mode)) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
- // Validate name and parameter names. We can do this only after parsing the
- // function, since the function can declare itself strict.
- CheckFunctionName(language_mode, function_name, function_name_validity,
- function_name_location, CHECK_OK);
- const bool allow_duplicate_parameters =
- is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
- ValidateFormalParameters(language_mode, allow_duplicate_parameters, CHECK_OK);
-
- int end_position = scanner()->location().end_pos;
- if (is_strict(language_mode)) {
- CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
- }
+ // Validate name and parameter names. We can do this only after parsing the
+ // function, since the function can declare itself strict.
+ CheckFunctionName(language_mode, function_name, function_name_validity,
+ function_name_location);
- if (preparsed_scope_data_builder_scope) {
- preparsed_scope_data_builder_scope->MarkFunctionAsSkippable(
- end_position, GetLastFunctionLiteralId() - func_id);
+ if (is_strict(language_mode)) {
+ CheckStrictOctalLiteral(start_position, end_position());
+ }
+ if (skippable_function) {
+ preparse_data_builder_scope.SetSkippableFunction(
+ function_scope, GetLastFunctionLiteralId() - func_id);
+ }
}
+
if (V8_UNLIKELY(FLAG_log_function_events)) {
double ms = timer.Elapsed().InMillisecondsF();
- const char* event_name = track_unresolved_variables_
- ? "preparse-resolution"
- : "preparse-no-resolution";
+ const char* event_name = "preparse-resolution";
// We might not always get a function name here. However, it can be easily
// reconstructed from the script id and the byte range in the log processor.
const char* name = "";
@@ -392,87 +363,47 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
return Expression::Default();
}
-PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
- PreParserFormalParameters* formals, bool may_abort, bool* ok) {
- PreParserStatementList body;
- LazyParsingResult result = ParseStatementList(
- body, Token::RBRACE, may_abort, CHECK_OK_VALUE(kLazyParsingComplete));
- if (result == kLazyParsingAborted) return result;
+void PreParser::ParseStatementListAndLogFunction(
+ PreParserFormalParameters* formals) {
+ PreParserScopedStatementList body(pointer_buffer());
+ ParseStatementList(&body, Token::RBRACE);
// Position right after terminal '}'.
- DCHECK_EQ(Token::RBRACE, scanner()->peek());
+ DCHECK_IMPLIES(!has_error(), scanner()->peek() == Token::RBRACE);
int body_end = scanner()->peek_location().end_pos;
DCHECK_EQ(this->scope()->is_function_scope(), formals->is_simple);
log_.LogFunction(body_end, formals->num_parameters(),
GetLastFunctionLiteralId());
- return kLazyParsingComplete;
}
-PreParserStatement PreParser::BuildParameterInitializationBlock(
- const PreParserFormalParameters& parameters, bool* ok) {
+PreParserBlock PreParser::BuildParameterInitializationBlock(
+ const PreParserFormalParameters& parameters) {
DCHECK(!parameters.is_simple);
DCHECK(scope()->is_function_scope());
- if (FLAG_preparser_scope_analysis &&
- scope()->AsDeclarationScope()->calls_sloppy_eval() &&
- preparsed_scope_data_builder_ != nullptr) {
+ if (scope()->AsDeclarationScope()->calls_sloppy_eval() &&
+ preparse_data_builder_ != nullptr) {
// We cannot replicate the Scope structure constructed by the Parser,
// because we've lost information whether each individual parameter was
// simple or not. Give up trying to produce data to skip inner functions.
- if (preparsed_scope_data_builder_->parent() != nullptr) {
+ if (preparse_data_builder_->parent() != nullptr) {
// Lazy parsing started before the current function; the function which
// cannot contain skippable functions is the parent function. (Its inner
// functions cannot either; they are implicitly bailed out.)
- preparsed_scope_data_builder_->parent()->Bailout();
+ preparse_data_builder_->parent()->Bailout();
} else {
// Lazy parsing started at the current function; it cannot contain
// skippable functions.
- preparsed_scope_data_builder_->Bailout();
+ preparse_data_builder_->Bailout();
}
}
- return PreParserStatement::Default();
-}
-
-PreParserExpression PreParser::ExpressionFromIdentifier(
- const PreParserIdentifier& name, int start_position, InferName infer) {
- VariableProxy* proxy = nullptr;
- if (track_unresolved_variables_) {
- DCHECK_NOT_NULL(name.string_);
- proxy = scope()->NewUnresolved(factory()->ast_node_factory(), name.string_,
- start_position, NORMAL_VARIABLE);
- }
- return PreParserExpression::FromIdentifier(name, proxy, zone());
+ return PreParserBlock::Default();
}
-void PreParser::DeclareAndInitializeVariables(
- PreParserStatement block,
- const DeclarationDescriptor* declaration_descriptor,
- const DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok) {
- if (declaration->pattern.variables_ != nullptr) {
- DCHECK(FLAG_lazy_inner_functions);
- DCHECK(track_unresolved_variables_);
- for (auto variable : *(declaration->pattern.variables_)) {
- declaration_descriptor->scope->RemoveUnresolved(variable);
- Variable* var = scope()->DeclareVariableName(
- variable->raw_name(), declaration_descriptor->mode);
- if (FLAG_preparser_scope_analysis) {
- MarkLoopVariableAsAssigned(declaration_descriptor->scope, var,
- declaration_descriptor->declaration_kind);
- // This is only necessary if there is an initializer, but we don't have
- // that information here. Consequently, the preparser sometimes says
- // maybe-assigned where the parser (correctly) says never-assigned.
- }
- if (names) {
- names->Add(variable->raw_name(), zone());
- }
- }
- }
+bool PreParser::IdentifierEquals(const PreParserIdentifier& identifier,
+ const AstRawString* other) {
+ return identifier.string_ == other;
}
-#undef CHECK_OK
-#undef CHECK_OK_CUSTOM
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 65509a2029..d403854743 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -22,7 +22,7 @@ namespace internal {
// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
// used.
-class PreParsedScopeDataBuilder;
+class PreparseDataBuilder;
class PreParserIdentifier {
public:
@@ -56,8 +56,12 @@ class PreParserIdentifier {
}
bool IsNull() const { return type_ == kNullIdentifier; }
bool IsEval() const { return type_ == kEvalIdentifier; }
+ bool IsAsync() const { return type_ == kAsyncIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
+ bool IsEvalOrArguments() const {
+ STATIC_ASSERT(kEvalIdentifier + 1 == kArgumentsIdentifier);
+ return IsInRange(type_, kEvalIdentifier, kArgumentsIdentifier);
+ }
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
bool IsAwait() const { return type_ == kAwaitIdentifier; }
bool IsName() const { return type_ == kNameIdentifier; }
@@ -77,7 +81,6 @@ class PreParserIdentifier {
};
explicit PreParserIdentifier(Type type) : string_(nullptr), type_(type) {}
- // Only non-nullptr when PreParser.track_unresolved_variables_ is true.
const AstRawString* string_;
Type type_;
@@ -88,89 +91,54 @@ class PreParserIdentifier {
class PreParserExpression {
public:
- using VariableZoneThreadedListType =
- ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
-
- PreParserExpression()
- : code_(TypeField::encode(kNull)), variables_(nullptr) {}
+ PreParserExpression() : code_(TypeField::encode(kNull)) {}
static PreParserExpression Null() { return PreParserExpression(); }
+ static PreParserExpression Failure() {
+ return PreParserExpression(TypeField::encode(kFailure));
+ }
- static PreParserExpression Default(
- VariableZoneThreadedListType* variables = nullptr) {
- return PreParserExpression(TypeField::encode(kExpression), variables);
+ static PreParserExpression Default() {
+ return PreParserExpression(TypeField::encode(kExpression));
}
static PreParserExpression Spread(const PreParserExpression& expression) {
- return PreParserExpression(TypeField::encode(kSpreadExpression),
- expression.variables_);
+ return PreParserExpression(TypeField::encode(kSpreadExpression));
}
- static PreParserExpression FromIdentifier(const PreParserIdentifier& id,
- VariableProxy* variable,
- Zone* zone) {
- PreParserExpression expression(TypeField::encode(kIdentifierExpression) |
- IdentifierTypeField::encode(id.type_));
- expression.AddVariable(variable, zone);
- return expression;
+ static PreParserExpression FromIdentifier(const PreParserIdentifier& id) {
+ return PreParserExpression(TypeField::encode(kIdentifierExpression) |
+ IdentifierTypeField::encode(id.type_));
}
static PreParserExpression BinaryOperation(const PreParserExpression& left,
Token::Value op,
const PreParserExpression& right,
Zone* zone) {
- if (op == Token::COMMA) {
- // Possibly an arrow function parameter list.
- if (left.variables_ == nullptr) {
- return PreParserExpression(TypeField::encode(kExpression),
- right.variables_);
- }
- if (right.variables_ != nullptr) {
- left.variables_->Append(std::move(*right.variables_));
- }
- return PreParserExpression(TypeField::encode(kExpression),
- left.variables_);
- }
return PreParserExpression(TypeField::encode(kExpression));
}
- static PreParserExpression Assignment(
- VariableZoneThreadedListType* variables) {
+ static PreParserExpression Assignment() {
return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kAssignment),
- variables);
+ ExpressionTypeField::encode(kAssignment));
}
static PreParserExpression NewTargetExpression() {
return PreParserExpression::Default();
}
- static PreParserExpression ObjectLiteral(
- VariableZoneThreadedListType* variables) {
- return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
- variables);
+ static PreParserExpression ObjectLiteral() {
+ return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
}
- static PreParserExpression ArrayLiteral(
- VariableZoneThreadedListType* variables) {
- return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
- variables);
+ static PreParserExpression ArrayLiteral() {
+ return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
}
static PreParserExpression StringLiteral() {
return PreParserExpression(TypeField::encode(kStringLiteralExpression));
}
- static PreParserExpression UseStrictStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrictField::encode(true));
- }
-
- static PreParserExpression UseAsmStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseAsmField::encode(true));
- }
-
static PreParserExpression This() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kThisExpression));
@@ -229,6 +197,9 @@ class PreParserExpression {
}
bool IsNull() const { return TypeField::decode(code_) == kNull; }
+ bool IsFailureExpression() const {
+ return TypeField::decode(code_) == kFailure;
+ }
bool IsIdentifier() const {
return TypeField::decode(code_) == kIdentifierExpression;
@@ -252,18 +223,14 @@ class PreParserExpression {
return TypeField::decode(code_) == kArrayLiteralExpression;
}
- bool IsStringLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression;
- }
-
- bool IsUseStrictLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrictField::decode(code_);
+ bool IsPattern() const {
+ STATIC_ASSERT(kObjectLiteralExpression + 1 == kArrayLiteralExpression);
+ return IsInRange(TypeField::decode(code_), kObjectLiteralExpression,
+ kArrayLiteralExpression);
}
- bool IsUseAsmLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseAsmField::decode(code_);
+ bool IsStringLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression;
}
bool IsThis() const {
@@ -325,24 +292,25 @@ class PreParserExpression {
return TypeField::decode(code_) == kSpreadExpression;
}
+ bool is_parenthesized() const { return IsParenthesizedField::decode(code_); }
+
+ void mark_parenthesized() {
+ code_ = IsParenthesizedField::update(code_, true);
+ }
+
+ void clear_parenthesized() {
+ code_ = IsParenthesizedField::update(code_, false);
+ }
+
PreParserExpression AsFunctionLiteral() { return *this; }
// Dummy implementation for making expression->somefunc() work in both Parser
// and PreParser.
PreParserExpression* operator->() { return this; }
- void set_is_private_field() {
- if (variables_ != nullptr) {
- DCHECK(IsIdentifier());
- DCHECK(AsIdentifier().IsPrivateName());
- DCHECK_EQ(1, variables_->LengthForTest());
- variables_->first()->set_is_private_field();
- }
- }
-
// More dummy implementations of things PreParser doesn't need to track:
void SetShouldEagerCompile() {}
- void mark_as_iife() {}
+ void mark_as_oneshot_iife() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
@@ -352,6 +320,7 @@ class PreParserExpression {
private:
enum Type {
kNull,
+ kFailure,
kExpression,
kIdentifierExpression,
kStringLiteralExpression,
@@ -373,20 +342,8 @@ class PreParserExpression {
kAssignment
};
- explicit PreParserExpression(
- uint32_t expression_code,
- VariableZoneThreadedListType* variables = nullptr)
- : code_(expression_code), variables_(variables) {}
-
- void AddVariable(VariableProxy* variable, Zone* zone) {
- if (variable == nullptr) {
- return;
- }
- if (variables_ == nullptr) {
- variables_ = new (zone) VariableZoneThreadedListType();
- }
- variables_->Add(variable);
- }
+ explicit PreParserExpression(uint32_t expression_code)
+ : code_(expression_code) {}
// The first three bits are for the Type.
typedef BitField<Type, 0, 3> TypeField;
@@ -396,80 +353,66 @@ class PreParserExpression {
// Expression nodes may be represented as multiple Types, not exclusively
// through kExpression.
// TODO(caitp, adamk): clean up PreParserExpression bitfields.
- typedef BitField<bool, 31, 1> ParenthesizedField;
+ typedef BitField<bool, TypeField::kNext, 1> IsParenthesizedField;
// The rest of the bits are interpreted depending on the value
// of the Type field, so they can share the storage.
- typedef BitField<ExpressionType, TypeField::kNext, 4> ExpressionTypeField;
- typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
- typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseAsmField;
- typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 8>
+ typedef BitField<ExpressionType, IsParenthesizedField::kNext, 4>
+ ExpressionTypeField;
+ typedef BitField<PreParserIdentifier::Type, IsParenthesizedField::kNext, 8>
IdentifierTypeField;
- typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
+ typedef BitField<bool, IsParenthesizedField::kNext, 1>
+ HasCoverInitializedNameField;
uint32_t code_;
- // If the PreParser is used in the variable tracking mode, PreParserExpression
- // accumulates variables in that expression.
- VariableZoneThreadedListType* variables_;
-
friend class PreParser;
friend class PreParserFactory;
friend class PreParserExpressionList;
};
+class PreParserStatement;
+class PreParserStatementList {
+ public:
+ PreParserStatementList() : PreParserStatementList(false) {}
+ PreParserStatementList* operator->() { return this; }
+ void Add(const PreParserStatement& element, Zone* zone) {}
+ static PreParserStatementList Null() { return PreParserStatementList(true); }
+ bool IsNull() const { return is_null_; }
+
+ private:
+ explicit PreParserStatementList(bool is_null) : is_null_(is_null) {}
+ bool is_null_;
+};
+
+class PreParserScopedStatementList {
+ public:
+ explicit PreParserScopedStatementList(std::vector<void*>* buffer) {}
+ void Rewind() {}
+ void MergeInto(const PreParserScopedStatementList* other) {}
+ void Add(const PreParserStatement& element) {}
+ int length() { return 0; }
+};
// The pre-parser doesn't need to build lists of expressions, identifiers, or
// the like. If the PreParser is used in variable tracking mode, it needs to
// build lists of variables though.
class PreParserExpressionList {
- using VariableZoneThreadedListType =
- ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
-
public:
- // These functions make list->Add(some_expression) work (and do nothing).
- PreParserExpressionList() : PreParserExpressionList(0) {}
- PreParserExpressionList* operator->() { return this; }
- void Add(const PreParserExpression& expression, Zone* zone) {
- if (expression.variables_ != nullptr) {
- DCHECK(FLAG_lazy_inner_functions);
- DCHECK_NOT_NULL(zone);
- if (variables_ == nullptr) {
- variables_ = new (zone) VariableZoneThreadedListType();
- }
- variables_->Append(std::move(*expression.variables_));
- }
+ explicit PreParserExpressionList(std::vector<void*>* buffer) : length_(0) {}
+
+ int length() const { return length_; }
+
+ void Add(const PreParserExpression& expression) {
++length_;
}
- int length() const { return length_; }
- static PreParserExpressionList Null() { return PreParserExpressionList(-1); }
- bool IsNull() const { return length_ == -1; }
- void Set(int index, const PreParserExpression& element) {}
private:
- explicit PreParserExpressionList(int n) : length_(n), variables_(nullptr) {}
int length_;
- VariableZoneThreadedListType* variables_;
-
friend class PreParser;
friend class PreParserFactory;
};
-class PreParserStatement;
-
-class PreParserStatementList {
- public:
- PreParserStatementList() : PreParserStatementList(false) {}
- PreParserStatementList* operator->() { return this; }
- void Add(const PreParserStatement& element, Zone* zone) {}
- static PreParserStatementList Null() { return PreParserStatementList(true); }
- bool IsNull() const { return is_null_; }
-
- private:
- explicit PreParserStatementList(bool is_null) : is_null_(is_null) {}
- bool is_null_;
-};
-
class PreParserStatement {
public:
static PreParserStatement Default() {
@@ -488,33 +431,21 @@ class PreParserStatement {
return PreParserStatement(kJumpStatement);
}
+ void InitializeStatements(const PreParserScopedStatementList& statements,
+ Zone* zone) {}
+
// Creates expression statement from expression.
// Preserves being an unparenthesized string literal, possibly
// "use strict".
static PreParserStatement ExpressionStatement(
const PreParserExpression& expression) {
- if (expression.IsUseStrictLiteral()) {
- return PreParserStatement(kUseStrictExpressionStatement);
- }
- if (expression.IsUseAsmLiteral()) {
- return PreParserStatement(kUseAsmExpressionStatement);
- }
if (expression.IsStringLiteral()) {
return PreParserStatement(kStringLiteralExpressionStatement);
}
return Default();
}
- bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral() ||
- IsUseAsmLiteral();
- }
-
- bool IsUseStrictLiteral() {
- return code_ == kUseStrictExpressionStatement;
- }
-
- bool IsUseAsmLiteral() { return code_ == kUseAsmExpressionStatement; }
+ bool IsStringLiteral() { return code_ == kStringLiteralExpressionStatement; }
bool IsJumpStatement() {
return code_ == kJumpStatement;
@@ -540,22 +471,46 @@ class PreParserStatement {
void Initialize(PreParserStatement init, const PreParserExpression& cond,
PreParserStatement next, PreParserStatement body,
const SourceRange& body_range = {}) {}
+ void Initialize(PreParserExpression each, const PreParserExpression& subject,
+ PreParserStatement body, const SourceRange& body_range = {}) {
+ }
- private:
+ protected:
enum Type {
kNullStatement,
kEmptyStatement,
kUnknownStatement,
kJumpStatement,
kStringLiteralExpressionStatement,
- kUseStrictExpressionStatement,
- kUseAsmExpressionStatement,
};
explicit PreParserStatement(Type code) : code_(code) {}
+
+ private:
Type code_;
};
+// A PreParserBlock extends statement with a place to store the scope.
+// The scope is dropped as the block is returned as a statement.
+class PreParserBlock : public PreParserStatement {
+ public:
+ void set_scope(Scope* scope) { scope_ = scope; }
+ Scope* scope() const { return scope_; }
+ static PreParserBlock Default() {
+ return PreParserBlock(PreParserStatement::kUnknownStatement);
+ }
+ static PreParserBlock Null() {
+ return PreParserBlock(PreParserStatement::kNullStatement);
+ }
+ // Dummy implementation for making block->somefunc() work in both Parser and
+ // PreParser.
+ PreParserBlock* operator->() { return this; }
+
+ private:
+ explicit PreParserBlock(PreParserStatement::Type type)
+ : PreParserStatement(type), scope_(nullptr) {}
+ Scope* scope_;
+};
class PreParserFactory {
public:
@@ -566,16 +521,7 @@ class PreParserFactory {
PreParserExpression NewStringLiteral(const PreParserIdentifier& identifier,
int pos) {
- // This is needed for object literal property names. Property names are
- // normalized to string literals during object literal parsing.
- PreParserExpression expression = PreParserExpression::Default();
- if (identifier.string_ != nullptr) {
- DCHECK(FLAG_lazy_inner_functions);
- VariableProxy* variable = ast_node_factory_.NewVariableProxy(
- identifier.string_, NORMAL_VARIABLE);
- expression.AddVariable(variable, zone_);
- }
- return expression;
+ return PreParserExpression::Default();
}
PreParserExpression NewNumberLiteral(double number,
int pos) {
@@ -593,30 +539,31 @@ class PreParserFactory {
}
PreParserExpression NewArrayLiteral(const PreParserExpressionList& values,
int first_spread_index, int pos) {
- return PreParserExpression::ArrayLiteral(values.variables_);
+ return PreParserExpression::ArrayLiteral();
}
PreParserExpression NewClassLiteralProperty(const PreParserExpression& key,
const PreParserExpression& value,
ClassLiteralProperty::Kind kind,
bool is_static,
- bool is_computed_name) {
+ bool is_computed_name,
+ bool is_private) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteralProperty(const PreParserExpression& key,
const PreParserExpression& value,
ObjectLiteralProperty::Kind kind,
bool is_computed_name) {
- return PreParserExpression::Default(value.variables_);
+ return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteralProperty(const PreParserExpression& key,
const PreParserExpression& value,
bool is_computed_name) {
- return PreParserExpression::Default(value.variables_);
+ return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteral(
const PreParserExpressionList& properties, int boilerplate_properties,
int pos, bool has_rest_property) {
- return PreParserExpression::ObjectLiteral(properties.variables_);
+ return PreParserExpression::ObjectLiteral();
}
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
@@ -653,16 +600,12 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewRewritableExpression(
- const PreParserExpression& expression, Scope* scope) {
- return expression;
- }
PreParserExpression NewAssignment(Token::Value op,
const PreParserExpression& left,
const PreParserExpression& right, int pos) {
// Identifiers need to be tracked since this might be a parameter with a
// default value inside an arrow function parameter list.
- return PreParserExpression::Assignment(left.variables_);
+ return PreParserExpression::Assignment();
}
PreParserExpression NewYield(const PreParserExpression& expression, int pos,
Suspend::OnAbruptResume on_abrupt_resume) {
@@ -717,14 +660,14 @@ class PreParserFactory {
}
PreParserExpression NewFunctionLiteral(
const PreParserIdentifier& name, Scope* scope,
- PreParserStatementList body, int expected_property_count,
+ const PreParserScopedStatementList& body, int expected_property_count,
int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id,
- ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr) {
- DCHECK_NULL(produced_preparsed_scope_data);
+ ProducedPreparseData* produced_preparse_data = nullptr) {
+ DCHECK_NULL(produced_preparse_data);
return PreParserExpression::Default();
}
@@ -734,17 +677,25 @@ class PreParserFactory {
}
PreParserExpression NewEmptyParentheses(int pos) {
- return PreParserExpression::Default();
+ PreParserExpression result = PreParserExpression::Default();
+ result.mark_parenthesized();
+ return result;
}
- PreParserStatement NewEmptyStatement(int pos) {
- return PreParserStatement::Default();
+ PreParserStatement EmptyStatement() { return PreParserStatement::Default(); }
+
+ PreParserBlock NewBlock(int capacity, bool ignore_completion_value) {
+ return PreParserBlock::Default();
}
- PreParserStatement NewBlock(
- int capacity, bool ignore_completion_value,
- ZonePtrList<const AstRawString>* labels = nullptr) {
- return PreParserStatement::Default();
+ PreParserBlock NewBlock(bool ignore_completion_value,
+ ZonePtrList<const AstRawString>* labels) {
+ return PreParserBlock::Default();
+ }
+
+ PreParserBlock NewBlock(bool ignore_completion_value,
+ const PreParserScopedStatementList& list) {
+ return PreParserBlock::Default();
}
PreParserStatement NewDebuggerStatement(int pos) {
@@ -801,8 +752,9 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewCaseClause(const PreParserExpression& label,
- PreParserStatementList statements) {
+ PreParserStatement NewCaseClause(
+ const PreParserExpression& label,
+ const PreParserScopedStatementList& statements) {
return PreParserStatement::Default();
}
@@ -821,7 +773,7 @@ class PreParserFactory {
PreParserStatement NewForOfStatement(
ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
+ ZonePtrList<const AstRawString>* own_labels, int pos, IteratorType type) {
return PreParserStatement::Default();
}
@@ -837,35 +789,32 @@ class PreParserFactory {
}
private:
- // For creating VariableProxy objects (if
- // PreParser::track_unresolved_variables_ is used).
+ // For creating VariableProxy objects to track unresolved variables.
AstNodeFactory ast_node_factory_;
Zone* zone_;
};
+class PreParser;
-struct PreParserFormalParameters : FormalParametersBase {
- struct Parameter : public ZoneObject {
- using VariableZoneThreadedListType =
- ZoneThreadedList<VariableProxy, VariableProxy::PreParserNext>;
-
- Parameter(VariableZoneThreadedListType* variables, bool is_rest)
- : variables_(variables), is_rest(is_rest) {}
- Parameter** next() { return &next_parameter; }
- Parameter* const* next() const { return &next_parameter; }
-
- VariableZoneThreadedListType* variables_;
- Parameter* next_parameter = nullptr;
- bool is_rest : 1;
- };
+class PreParserFormalParameters : public FormalParametersBase {
+ public:
explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
- base::ThreadedList<Parameter> params;
-};
+ void set_has_duplicate() { has_duplicate_ = true; }
+ bool has_duplicate() { return has_duplicate_; }
+ void ValidateDuplicate(PreParser* preparser) const;
+ void set_strict_parameter_error(const Scanner::Location& loc,
+ MessageTemplate message) {
+ strict_parameter_error_ = loc.IsValid();
+ }
+ void ValidateStrictMode(PreParser* preparser) const;
-class PreParser;
+ private:
+ bool has_duplicate_ = false;
+ bool strict_parameter_error_ = false;
+};
class PreParserTarget {
public:
@@ -880,7 +829,7 @@ class PreParserTargetScope {
class PreParserFuncNameInferrer {
public:
- PreParserFuncNameInferrer(AstValueFactory* avf, Zone* zone) {}
+ explicit PreParserFuncNameInferrer(AstValueFactory* avf) {}
void RemoveAsyncKeywordFromEnd() const {}
void Infer() const {}
void RemoveLastFunction() const {}
@@ -920,39 +869,40 @@ class PreParserSourceRangeScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(PreParserSourceRangeScope);
};
+class PreParserPropertyList {};
+
template <>
struct ParserTypes<PreParser> {
typedef ParserBase<PreParser> Base;
typedef PreParser Impl;
// Return types for traversing functions.
- typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression ClassLiteralProperty;
typedef PreParserExpression Expression;
typedef PreParserExpression FunctionLiteral;
typedef PreParserExpression ObjectLiteralProperty;
- typedef PreParserExpression ClassLiteralProperty;
typedef PreParserExpression Suspend;
- typedef PreParserExpression RewritableExpression;
typedef PreParserExpressionList ExpressionList;
typedef PreParserExpressionList ObjectPropertyList;
- typedef PreParserExpressionList ClassPropertyList;
typedef PreParserFormalParameters FormalParameters;
- typedef PreParserStatement Statement;
- typedef PreParserStatementList StatementList;
- typedef PreParserStatement Block;
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserPropertyList ClassPropertyList;
+ typedef PreParserScopedStatementList StatementList;
+ typedef PreParserBlock Block;
typedef PreParserStatement BreakableStatement;
- typedef PreParserStatement IterationStatement;
typedef PreParserStatement ForStatement;
+ typedef PreParserStatement IterationStatement;
+ typedef PreParserStatement Statement;
// For constructing objects returned by the traversing functions.
typedef PreParserFactory Factory;
- typedef PreParserTarget Target;
- typedef PreParserTargetScope TargetScope;
+ // Other implementation-specific tasks.
typedef PreParserFuncNameInferrer FuncNameInferrer;
typedef PreParserSourceRange SourceRange;
typedef PreParserSourceRangeScope SourceRangeScope;
- static constexpr bool ExpressionClassifierReportErrors = false;
+ typedef PreParserTarget Target;
+ typedef PreParserTargetScope TargetScope;
};
@@ -970,7 +920,6 @@ struct ParserTypes<PreParser> {
// it is used) are generally omitted.
class PreParser : public ParserBase<PreParser> {
friend class ParserBase<PreParser>;
- friend class v8::internal::ExpressionClassifier<ParserTypes<PreParser>>;
public:
typedef PreParserIdentifier Identifier;
@@ -979,7 +928,6 @@ class PreParser : public ParserBase<PreParser> {
enum PreParseResult {
kPreParseStackOverflow,
- kPreParseAbort,
kPreParseNotIdentifiableError,
kPreParseSuccess
};
@@ -995,8 +943,7 @@ class PreParser : public ParserBase<PreParser> {
runtime_call_stats, logger, script_id,
parsing_module, parsing_on_main_thread),
use_counts_(nullptr),
- track_unresolved_variables_(false),
- preparsed_scope_data_builder_(nullptr) {}
+ preparse_data_builder_(nullptr) {}
static bool IsPreParser() { return true; }
@@ -1019,25 +966,23 @@ class PreParser : public ParserBase<PreParser> {
PreParseResult PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, bool track_unresolved_variables,
- bool may_abort, int* use_counts,
- ProducedPreParsedScopeData** produced_preparser_scope_data,
- int script_id);
-
- V8_INLINE static bool ShouldTrackUnresolvedVariables(bool is_inner_function) {
- return FLAG_preparser_scope_analysis || is_inner_function;
- }
+ DeclarationScope* function_scope, int* use_counts,
+ ProducedPreparseData** produced_preparser_scope_data, int script_id);
- PreParsedScopeDataBuilder* preparsed_scope_data_builder() const {
- return preparsed_scope_data_builder_;
+ PreparseDataBuilder* preparse_data_builder() const {
+ return preparse_data_builder_;
}
- void set_preparsed_scope_data_builder(
- PreParsedScopeDataBuilder* preparsed_scope_data_builder) {
- preparsed_scope_data_builder_ = preparsed_scope_data_builder;
+ void set_preparse_data_builder(PreparseDataBuilder* preparse_data_builder) {
+ preparse_data_builder_ = preparse_data_builder;
}
private:
+ friend class i::ExpressionScope<ParserTypes<PreParser>>;
+ friend class i::VariableDeclarationParsingScope<ParserTypes<PreParser>>;
+ friend class i::ParameterDeclarationParsingScope<ParserTypes<PreParser>>;
+ friend class i::ArrowHeadParsingScope<ParserTypes<PreParser>>;
+ friend class PreParserFormalParameters;
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
@@ -1057,13 +1002,11 @@ class PreParser : public ParserBase<PreParser> {
return pending_error_handler_;
}
- V8_INLINE bool SkipFunction(
- const AstRawString* name, FunctionKind kind,
- FunctionLiteral::FunctionType function_type,
- DeclarationScope* function_scope, int* num_parameters,
- ProducedPreParsedScopeData** produced_preparsed_scope_data,
- bool is_inner_function, bool may_abort,
- FunctionLiteral::EagerCompileHint* hint, bool* ok) {
+ V8_INLINE bool SkipFunction(const AstRawString* name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope,
+ int* num_parameters,
+ ProducedPreparseData** produced_preparse_data) {
UNREACHABLE();
}
@@ -1072,15 +1015,13 @@ class PreParser : public ParserBase<PreParser> {
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode,
- ZonePtrList<const AstRawString>* arguments_for_wrapped_function,
- bool* ok);
+ ZonePtrList<const AstRawString>* arguments_for_wrapped_function);
PreParserExpression InitializeObjectLiteral(PreParserExpression literal) {
return literal;
}
- LazyParsingResult ParseStatementListAndLogFunction(
- PreParserFormalParameters* formals, bool maybe_abort, bool* ok);
+ void ParseStatementListAndLogFunction(PreParserFormalParameters* formals);
struct TemplateLiteralState {};
@@ -1099,7 +1040,7 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& expression) {
return expression.IsPropertyWithPrivateFieldKey();
}
- V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
+ V8_INLINE void CheckConflictingVarDeclarations(Scope* scope) {}
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
@@ -1114,22 +1055,14 @@ class PreParser : public ParserBase<PreParser> {
SpreadCallNew(const PreParserExpression& function,
const PreParserExpressionList& args, int pos);
- V8_INLINE void RewriteDestructuringAssignments() {}
-
V8_INLINE void PrepareGeneratorVariables() {}
V8_INLINE void RewriteAsyncFunctionBody(
- PreParserStatementList body, PreParserStatement block,
- const PreParserExpression& return_value, bool* ok) {}
-
- void DeclareAndInitializeVariables(
- PreParserStatement block,
- const DeclarationDescriptor* declaration_descriptor,
- const DeclarationParsingResult::Declaration* declaration,
- ZonePtrList<const AstRawString>* names, bool* ok);
+ const PreParserScopedStatementList* body, PreParserStatement block,
+ const PreParserExpression& return_value) {}
V8_INLINE void DeclareLabel(ZonePtrList<const AstRawString>** labels,
ZonePtrList<const AstRawString>** own_labels,
- const PreParserExpression& expr, bool* ok) {
+ const PreParserExpression& expr) {
DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
DCHECK(IsIdentifier(expr));
}
@@ -1149,24 +1082,29 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
- V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
- if (track_unresolved_variables_) {
- const AstRawString* catch_name = catch_info->name.string_;
- if (catch_name == nullptr) {
- catch_name = ast_value_factory()->dot_catch_string();
- }
- catch_info->scope->DeclareCatchVariableName(catch_name);
+ void DeclareVariable(VariableProxy* proxy, VariableKind kind,
+ VariableMode mode, InitializationFlag init, Scope* scope,
+ bool* was_added, int position) {
+ DeclareVariableName(proxy->raw_name(), mode, scope, was_added, kind);
+ }
- if (catch_info->pattern.variables_ != nullptr) {
- for (auto variable : *catch_info->pattern.variables_) {
- scope()->DeclareVariableName(variable->raw_name(),
- VariableMode::kLet);
- }
- }
+ void DeclareVariableName(const AstRawString* name, VariableMode mode,
+ Scope* scope, bool* was_added,
+ VariableKind kind = NORMAL_VARIABLE) {
+ if (scope->DeclareVariableName(name, mode, was_added, kind) == nullptr) {
+ ReportUnidentifiableError();
}
}
- V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
+ V8_INLINE PreParserBlock RewriteCatchPattern(CatchInfo* catch_info) {
+ return PreParserBlock::Default();
+ }
+
+ V8_INLINE void ReportVarRedeclarationIn(const AstRawString* name,
+ Scope* scope) {
+ ReportUnidentifiableError();
+ }
+
V8_INLINE PreParserStatement RewriteTryStatement(
PreParserStatement try_block, PreParserStatement catch_block,
const SourceRange& catch_range, PreParserStatement finally_block,
@@ -1174,20 +1112,24 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
+ V8_INLINE void ReportUnexpectedTokenAt(
+ Scanner::Location location, Token::Value token,
+ MessageTemplate message = MessageTemplate::kUnexpectedToken) {
+ ReportUnidentifiableError();
+ }
V8_INLINE void ParseAndRewriteGeneratorFunctionBody(
- int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
- ParseStatementList(body, Token::RBRACE, ok);
+ int pos, FunctionKind kind, PreParserScopedStatementList* body) {
+ ParseStatementList(body, Token::RBRACE);
}
V8_INLINE void ParseAndRewriteAsyncGeneratorFunctionBody(
- int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
- ParseStatementList(body, Token::RBRACE, ok);
+ int pos, FunctionKind kind, PreParserScopedStatementList* body) {
+ ParseStatementList(body, Token::RBRACE);
}
V8_INLINE void DeclareFunctionNameVar(
const AstRawString* function_name,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope) {
- if (track_unresolved_variables_ &&
- function_type == FunctionLiteral::kNamedExpression &&
+ if (function_type == FunctionLiteral::kNamedExpression &&
function_scope->LookupLocal(function_name) == nullptr) {
DCHECK_EQ(function_scope, scope());
function_scope->DeclareFunctionVar(function_name);
@@ -1202,31 +1144,33 @@ class PreParser : public ParserBase<PreParser> {
function_scope);
}
- V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
- int pos, bool* ok) {
- return PreParserExpression::Default();
- }
+ bool IdentifierEquals(const PreParserIdentifier& identifier,
+ const AstRawString* other);
// TODO(nikolaos): The preparser currently does not keep track of labels
// and targets.
V8_INLINE PreParserStatement
- LookupBreakTarget(const PreParserIdentifier& label, bool* ok) {
+ LookupBreakTarget(const PreParserIdentifier& label) {
return PreParserStatement::Default();
}
V8_INLINE PreParserStatement
- LookupContinueTarget(const PreParserIdentifier& label, bool* ok) {
+ LookupContinueTarget(const PreParserIdentifier& label) {
return PreParserStatement::Default();
}
V8_INLINE PreParserStatement
DeclareFunction(const PreParserIdentifier& variable_name,
const PreParserExpression& function, VariableMode mode,
- int pos, bool is_sloppy_block_function,
- ZonePtrList<const AstRawString>* names, bool* ok) {
+ int beg_pos, int end_pos, bool is_sloppy_block_function,
+ ZonePtrList<const AstRawString>* names) {
DCHECK_NULL(names);
if (variable_name.string_ != nullptr) {
- DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(variable_name.string_, mode);
+ bool was_added;
+ if (is_strict(language_mode())) {
+ DeclareVariableName(variable_name.string_, mode, scope(), &was_added);
+ } else {
+ scope()->DeclareVariableName(variable_name.string_, mode, &was_added);
+ }
if (is_sloppy_block_function) {
GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name.string_,
scope());
@@ -1238,47 +1182,51 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserStatement DeclareClass(
const PreParserIdentifier& variable_name,
const PreParserExpression& value, ZonePtrList<const AstRawString>* names,
- int class_token_pos, int end_pos, bool* ok) {
+ int class_token_pos, int end_pos) {
// Preparser shouldn't be used in contexts where we need to track the names.
DCHECK_NULL(names);
if (variable_name.string_ != nullptr) {
- DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(variable_name.string_, VariableMode::kLet);
+ bool was_added;
+ DeclareVariableName(variable_name.string_, VariableMode::kLet, scope(),
+ &was_added);
}
return PreParserStatement::Default();
}
V8_INLINE void DeclareClassVariable(const PreParserIdentifier& name,
ClassInfo* class_info,
- int class_token_pos, bool* ok) {
+ int class_token_pos) {
if (name.string_ != nullptr) {
- DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(name.string_, VariableMode::kConst);
+ bool was_added;
+ DeclareVariableName(name.string_, VariableMode::kConst, scope(),
+ &was_added);
}
}
V8_INLINE void DeclareClassProperty(const PreParserIdentifier& class_name,
const PreParserExpression& property,
- const PreParserIdentifier& property_name,
- ClassLiteralProperty::Kind kind,
- bool is_static, bool is_constructor,
- bool is_computed_name,
- ClassInfo* class_info, bool* ok) {
- if (kind == ClassLiteralProperty::PUBLIC_FIELD && is_computed_name) {
- scope()->DeclareVariableName(
+ bool is_constructor,
+ ClassInfo* class_info) {}
+
+ V8_INLINE void DeclareClassField(const PreParserExpression& property,
+ const PreParserIdentifier& property_name,
+ bool is_static, bool is_computed_name,
+ bool is_private, ClassInfo* class_info) {
+ DCHECK_IMPLIES(is_computed_name, !is_private);
+ if (is_computed_name) {
+ bool was_added;
+ DeclareVariableName(
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
- VariableMode::kConst);
- }
-
- if (kind == ClassLiteralProperty::PRIVATE_FIELD &&
- property_name.string_ != nullptr) {
- DCHECK(track_unresolved_variables_);
- scope()->DeclareVariableName(property_name.string_, VariableMode::kConst);
+ VariableMode::kConst, scope(), &was_added);
+ } else if (is_private && property_name.string_ != nullptr) {
+ bool was_added;
+ DeclareVariableName(property_name.string_, VariableMode::kConst, scope(),
+ &was_added);
}
}
V8_INLINE PreParserExpression
RewriteClassLiteral(Scope* scope, const PreParserIdentifier& name,
- ClassInfo* class_info, int pos, int end_pos, bool* ok) {
+ ClassInfo* class_info, int pos, int end_pos) {
bool has_default_constructor = !class_info->has_seen_constructor;
// Account for the default constructor.
if (has_default_constructor) {
@@ -1300,14 +1248,14 @@ class PreParser : public ParserBase<PreParser> {
if (class_info->has_static_class_fields) {
GetNextFunctionLiteralId();
}
- if (class_info->has_instance_class_fields) {
+ if (class_info->has_instance_members) {
GetNextFunctionLiteralId();
}
return PreParserExpression::Default();
}
V8_INLINE PreParserStatement DeclareNative(const PreParserIdentifier& name,
- int pos, bool* ok) {
+ int pos) {
return PreParserStatement::Default();
}
@@ -1319,6 +1267,10 @@ class PreParser : public ParserBase<PreParser> {
return identifier.IsEval();
}
+ V8_INLINE bool IsAsync(const PreParserIdentifier& identifier) const {
+ return identifier.IsAsync();
+ }
+
V8_INLINE bool IsArguments(const PreParserIdentifier& identifier) const {
return identifier.IsArguments();
}
@@ -1378,14 +1330,6 @@ class PreParser : public ParserBase<PreParser> {
return false;
}
- V8_INLINE bool IsUseStrictDirective(PreParserStatement statement) const {
- return statement.IsUseStrictLiteral();
- }
-
- V8_INLINE bool IsUseAsmDirective(PreParserStatement statement) const {
- return statement.IsUseAsmLiteral();
- }
-
V8_INLINE bool IsStringLiteral(PreParserStatement statement) const {
return statement.IsStringLiteral();
}
@@ -1407,19 +1351,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
const PreParserExpression& left, const PreParserExpression& right) {}
- V8_INLINE void MarkExpressionAsAssigned(
- const PreParserExpression& expression) {
- // TODO(marja): To be able to produce the same errors, the preparser needs
- // to start tracking which expressions are variables and which are assigned.
- if (expression.variables_ != nullptr) {
- DCHECK(FLAG_lazy_inner_functions);
- DCHECK(track_unresolved_variables_);
- for (auto variable : *expression.variables_) {
- variable->set_is_assigned();
- }
- }
- }
-
V8_INLINE bool ShortcutNumericLiteralBinaryExpression(
PreParserExpression* x, const PreParserExpression& y, Token::Value op,
int pos) {
@@ -1430,6 +1361,7 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression y,
Token::Value op, int pos,
const SourceRange& range) {
+ x->clear_parenthesized();
return nullptr;
}
@@ -1439,64 +1371,27 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserStatement
- BuildInitializationBlock(DeclarationParsingResult* parsing_result,
- ZonePtrList<const AstRawString>* names, bool* ok) {
- for (auto declaration : parsing_result->declarations) {
- DeclareAndInitializeVariables(PreParserStatement::Default(),
- &(parsing_result->descriptor), &declaration,
- names, ok);
- }
+ BuildInitializationBlock(DeclarationParsingResult* parsing_result) {
return PreParserStatement::Default();
}
- V8_INLINE PreParserStatement InitializeForEachStatement(
- PreParserStatement stmt, const PreParserExpression& each,
- const PreParserExpression& subject, PreParserStatement body) {
- MarkExpressionAsAssigned(each);
- return stmt;
- }
-
- V8_INLINE PreParserStatement InitializeForOfStatement(
- PreParserStatement stmt, const PreParserExpression& each,
- const PreParserExpression& iterable, PreParserStatement body,
- bool finalize, IteratorType type,
- int next_result_pos = kNoSourcePosition) {
- MarkExpressionAsAssigned(each);
- return stmt;
- }
-
- V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
- return PreParserStatement::Null();
+ V8_INLINE PreParserBlock RewriteForVarInLegacy(const ForInfo& for_info) {
+ return PreParserBlock::Null();
}
V8_INLINE void DesugarBindingInForEachStatement(
ForInfo* for_info, PreParserStatement* body_block,
- PreParserExpression* each_variable, bool* ok) {
- if (track_unresolved_variables_) {
- DCHECK_EQ(1, for_info->parsing_result.declarations.size());
- bool is_for_var_of =
- for_info->mode == ForEachStatement::ITERATE &&
- for_info->parsing_result.descriptor.mode == VariableMode::kVar;
- bool collect_names =
- IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
- is_for_var_of;
-
- DeclareAndInitializeVariables(
- PreParserStatement::Default(), &for_info->parsing_result.descriptor,
- &for_info->parsing_result.declarations[0],
- collect_names ? &for_info->bound_names : nullptr, ok);
- }
+ PreParserExpression* each_variable) {
}
- V8_INLINE PreParserStatement CreateForEachStatementTDZ(
- PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
- if (track_unresolved_variables_) {
- if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
- for (auto name : for_info.bound_names) {
- scope()->DeclareVariableName(name, VariableMode::kLet);
- }
- return PreParserStatement::Default();
+ V8_INLINE PreParserBlock CreateForEachStatementTDZ(PreParserBlock init_block,
+ const ForInfo& for_info) {
+ if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
+ for (auto name : for_info.bound_names) {
+ bool was_added;
+ DeclareVariableName(name, VariableMode::kLet, scope(), &was_added);
}
+ return PreParserBlock::Default();
}
return init_block;
}
@@ -1504,24 +1399,22 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE StatementT DesugarLexicalBindingsInForStatement(
PreParserStatement loop, PreParserStatement init,
const PreParserExpression& cond, PreParserStatement next,
- PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
- bool* ok) {
+ PreParserStatement body, Scope* inner_scope, const ForInfo& for_info) {
// See Parser::DesugarLexicalBindingsInForStatement.
- if (track_unresolved_variables_) {
- for (auto name : for_info.bound_names) {
- inner_scope->DeclareVariableName(
- name, for_info.parsing_result.descriptor.mode);
- }
+ for (auto name : for_info.bound_names) {
+ bool was_added;
+ DeclareVariableName(name, for_info.parsing_result.descriptor.mode,
+ inner_scope, &was_added);
}
return loop;
}
- PreParserStatement BuildParameterInitializationBlock(
- const PreParserFormalParameters& parameters, bool* ok);
+ PreParserBlock BuildParameterInitializationBlock(
+ const PreParserFormalParameters& parameters);
- V8_INLINE PreParserStatement
+ V8_INLINE PreParserBlock
BuildRejectPromiseOnException(PreParserStatement init_block) {
- return PreParserStatement::Default();
+ return PreParserBlock::Default();
}
V8_INLINE void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
@@ -1531,44 +1424,52 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void InsertShadowingVarBindingInitializers(
PreParserStatement block) {}
- V8_INLINE PreParserExpression
- NewThrowReferenceError(MessageTemplate::Template message, int pos) {
+ V8_INLINE PreParserExpression NewThrowReferenceError(MessageTemplate message,
+ int pos) {
return PreParserExpression::Default();
}
- V8_INLINE PreParserExpression
- NewThrowSyntaxError(MessageTemplate::Template message,
- const PreParserIdentifier& arg, int pos) {
+ V8_INLINE PreParserExpression NewThrowSyntaxError(
+ MessageTemplate message, const PreParserIdentifier& arg, int pos) {
return PreParserExpression::Default();
}
- V8_INLINE PreParserExpression
- NewThrowTypeError(MessageTemplate::Template message,
- const PreParserIdentifier& arg, int pos) {
+ V8_INLINE PreParserExpression NewThrowTypeError(
+ MessageTemplate message, const PreParserIdentifier& arg, int pos) {
return PreParserExpression::Default();
}
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = nullptr,
+ MessageTemplate message, const char* arg = nullptr,
ParseErrorType error_type = kSyntaxError) {
pending_error_handler()->ReportMessageAt(source_location.beg_pos,
source_location.end_pos, message,
arg, error_type);
+ scanner()->set_parser_error();
}
V8_INLINE void ReportUnidentifiableError() {
- pending_error_handler()->SetUnidentifiableError();
+ pending_error_handler()->set_unidentifiable_error();
+ scanner()->set_parser_error();
}
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
+ MessageTemplate message,
const PreParserIdentifier& arg,
ParseErrorType error_type = kSyntaxError) {
UNREACHABLE();
}
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message, const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError) {
+ pending_error_handler()->ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
+ scanner()->set_parser_error();
+ }
+
// "null" return type creators.
V8_INLINE static PreParserIdentifier NullIdentifier() {
return PreParserIdentifier::Null();
@@ -1576,19 +1477,19 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE static PreParserExpression NullExpression() {
return PreParserExpression::Null();
}
+ V8_INLINE static PreParserExpression FailureExpression() {
+ return PreParserExpression::Failure();
+ }
V8_INLINE static PreParserExpression NullLiteralProperty() {
return PreParserExpression::Null();
}
- V8_INLINE static PreParserExpressionList NullExpressionList() {
- return PreParserExpressionList::Null();
- }
-
V8_INLINE static PreParserStatementList NullStatementList() {
return PreParserStatementList::Null();
}
V8_INLINE static PreParserStatement NullStatement() {
return PreParserStatement::Null();
}
+ V8_INLINE static PreParserBlock NullBlock() { return PreParserBlock::Null(); }
template <typename T>
V8_INLINE static bool IsNull(T subject) {
@@ -1611,38 +1512,32 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
- if (track_unresolved_variables_) {
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
- }
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
return PreParserExpression::This();
}
V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
- if (track_unresolved_variables_) {
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_function_string(), pos,
- NORMAL_VARIABLE);
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
- }
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_function_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
return PreParserExpression::Default();
}
V8_INLINE PreParserExpression NewSuperCallReference(int pos) {
- if (track_unresolved_variables_) {
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_function_string(), pos,
- NORMAL_VARIABLE);
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->new_target_string(), pos,
- NORMAL_VARIABLE);
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_string(), pos,
- THIS_VARIABLE);
- }
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_function_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->new_target_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
return PreParserExpression::SuperCallReference();
}
@@ -1656,30 +1551,26 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression ExpressionFromLiteral(Token::Value token,
int pos) {
- return PreParserExpression::Default();
+ if (token != Token::STRING) return PreParserExpression::Default();
+ return PreParserExpression::StringLiteral();
}
PreParserExpression ExpressionFromIdentifier(
const PreParserIdentifier& name, int start_position,
- InferName infer = InferName::kYes);
-
- V8_INLINE PreParserExpression ExpressionFromString(int pos) {
- if (scanner()->IsUseStrict()) {
- return PreParserExpression::UseStrictStringLiteral();
+ InferName infer = InferName::kYes) {
+ if (name.string_ != nullptr) {
+ expression_scope()->NewVariable(name.string_, start_position);
}
- return PreParserExpression::StringLiteral();
- }
-
- V8_INLINE PreParserExpressionList NewExpressionList(int size) const {
- return PreParserExpressionList();
+ return PreParserExpression::FromIdentifier(name);
}
- V8_INLINE PreParserExpressionList NewObjectPropertyList(int size) const {
- return PreParserExpressionList();
+ V8_INLINE Variable* DeclareCatchVariableName(
+ Scope* scope, const PreParserIdentifier& identifier) {
+ return scope->DeclareCatchVariableName(identifier.string_);
}
- V8_INLINE PreParserExpressionList NewClassPropertyList(int size) const {
- return PreParserExpressionList();
+ V8_INLINE PreParserPropertyList NewClassPropertyList(int size) const {
+ return PreParserPropertyList();
}
V8_INLINE PreParserStatementList NewStatementList(int size) const {
@@ -1688,7 +1579,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression
NewV8Intrinsic(const PreParserIdentifier& name,
- const PreParserExpressionList& arguments, int pos, bool* ok) {
+ const PreParserExpressionList& arguments, int pos) {
return PreParserExpression::Default();
}
@@ -1698,74 +1589,28 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
- const PreParserExpression& pattern,
+ PreParserExpression& pattern,
const PreParserExpression& initializer,
int initializer_end_position,
bool is_rest) {
- if (track_unresolved_variables_) {
- DCHECK(FLAG_lazy_inner_functions);
- parameters->params.Add(new (zone()) PreParserFormalParameters::Parameter(
- pattern.variables_, is_rest));
- }
+ DeclarationScope* scope = parameters->scope;
+ scope->RecordParameter(is_rest);
parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest);
}
V8_INLINE void DeclareFormalParameters(
- DeclarationScope* scope,
- const base::ThreadedList<PreParserFormalParameters::Parameter>&
- parameters,
- bool is_simple) {
- if (!is_simple) scope->SetHasNonSimpleParameters();
- if (track_unresolved_variables_) {
- DCHECK(FLAG_lazy_inner_functions);
- for (auto parameter : parameters) {
- DCHECK_IMPLIES(is_simple, parameter->variables_ != nullptr);
- DCHECK_IMPLIES(is_simple, parameter->variables_->LengthForTest() == 1);
- // Make sure each parameter is added only once even if it's a
- // destructuring parameter which contains multiple names.
- bool add_parameter = true;
- if (parameter->variables_ != nullptr) {
- for (auto variable : (*parameter->variables_)) {
- // We declare the parameter name for all names, but only create a
- // parameter entry for the first one.
- scope->DeclareParameterName(variable->raw_name(),
- parameter->is_rest, ast_value_factory(),
- true, add_parameter);
- add_parameter = false;
- }
- }
- if (add_parameter) {
- // No names were declared; declare a dummy one here to up the
- // parameter count.
- DCHECK(!is_simple);
- scope->DeclareParameterName(ast_value_factory()->empty_string(),
- parameter->is_rest, ast_value_factory(),
- false, add_parameter);
- }
- }
- }
+ const PreParserFormalParameters* parameters) {
+ if (!parameters->is_simple) parameters->scope->SetHasNonSimpleParameters();
}
V8_INLINE void DeclareArrowFunctionFormalParameters(
PreParserFormalParameters* parameters, const PreParserExpression& params,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- bool* ok) {
- // TODO(wingo): Detect duplicated identifiers in paramlists. Detect
- // parameter lists that are too long.
- if (track_unresolved_variables_) {
- DCHECK(FLAG_lazy_inner_functions);
- if (params.variables_ != nullptr) {
- for (auto variable : *params.variables_) {
- parameters->scope->DeclareVariableName(variable->raw_name(),
- VariableMode::kVar);
- }
- }
- }
+ const Scanner::Location& params_loc) {
}
V8_INLINE PreParserExpression
ExpressionListToExpression(const PreParserExpressionList& args) {
- return PreParserExpression::Default(args.variables_);
+ return PreParserExpression::Default();
}
V8_INLINE void SetFunctionNameFromPropertyName(
@@ -1775,11 +1620,6 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& value, const PreParserExpression& identifier) {
}
- V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
- GetReportedErrorList() const {
- return function_state_->GetReportedErrorList();
- }
-
V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
if (use_counts_ != nullptr) ++use_counts_[feature];
}
@@ -1797,10 +1637,9 @@ class PreParser : public ParserBase<PreParser> {
// Preparser's private field members.
int* use_counts_;
- bool track_unresolved_variables_;
PreParserLogger log_;
- PreParsedScopeDataBuilder* preparsed_scope_data_builder_;
+ PreparseDataBuilder* preparse_data_builder_;
};
PreParserExpression PreParser::SpreadCall(const PreParserExpression& function,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 151244f692..5ba7b3ba51 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -18,13 +18,13 @@ class Processor final : public AstVisitor<Processor> {
Processor(uintptr_t stack_limit, DeclarationScope* closure_scope,
Variable* result, AstValueFactory* ast_value_factory)
: result_(result),
- result_assigned_(false),
replacement_(nullptr),
- is_set_(false),
- breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
- factory_(ast_value_factory, ast_value_factory->zone()) {
+ factory_(ast_value_factory, ast_value_factory->zone()),
+ result_assigned_(false),
+ is_set_(false),
+ breakable_(false) {
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(stack_limit);
}
@@ -32,13 +32,13 @@ class Processor final : public AstVisitor<Processor> {
Processor(Parser* parser, DeclarationScope* closure_scope, Variable* result,
AstValueFactory* ast_value_factory)
: result_(result),
- result_assigned_(false),
replacement_(nullptr),
- is_set_(false),
- breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
- factory_(ast_value_factory, zone_) {
+ factory_(ast_value_factory, zone_),
+ result_assigned_(false),
+ is_set_(false),
+ breakable_(false) {
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(parser->stack_limit());
}
@@ -64,24 +64,10 @@ class Processor final : public AstVisitor<Processor> {
private:
Variable* result_;
- // We are not tracking result usage via the result_'s use
- // counts (we leave the accurate computation to the
- // usage analyzer). Instead we simple remember if
- // there was ever an assignment to result_.
- bool result_assigned_;
-
// When visiting a node, we "return" a replacement for that node in
// [replacement_]. In many cases this will just be the original node.
Statement* replacement_;
- // To avoid storing to .result all the time, we eliminate some of
- // the stores by keeping track of whether or not we're sure .result
- // will be overwritten anyway. This is a bit more tricky than what I
- // was hoping for.
- bool is_set_;
-
- bool breakable_;
-
class BreakableScope final {
public:
explicit BreakableScope(Processor* processor, bool breakable = true)
@@ -108,6 +94,20 @@ class Processor final : public AstVisitor<Processor> {
void VisitIterationStatement(IterationStatement* stmt);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ // We are not tracking result usage via the result_'s use
+ // counts (we leave the accurate computation to the
+ // usage analyzer). Instead we simple remember if
+ // there was ever an assignment to result_.
+ bool result_assigned_;
+
+ // To avoid storing to .result all the time, we eliminate some of
+ // the stores by keeping track of whether or not we're sure .result
+ // will be overwritten anyway. This is a bit more tricky than what I
+ // was hoping for.
+ bool is_set_;
+
+ bool breakable_;
};
@@ -337,8 +337,8 @@ void Processor::VisitDebuggerStatement(DebuggerStatement* node) {
replacement_ = node;
}
-void Processor::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* node) {
+void Processor::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* node) {
replacement_ = node;
}
@@ -405,36 +405,5 @@ bool Rewriter::Rewrite(ParseInfo* info) {
return true;
}
-bool Rewriter::Rewrite(Parser* parser, DeclarationScope* closure_scope,
- DoExpression* expr, AstValueFactory* factory) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- Block* block = expr->block();
- DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
- DCHECK(block->scope() == nullptr ||
- block->scope()->GetClosureScope() == closure_scope);
- ZonePtrList<Statement>* body = block->statements();
- VariableProxy* result = expr->result();
- Variable* result_var = result->var();
-
- if (!body->is_empty()) {
- Processor processor(parser, closure_scope, result_var, factory);
- processor.Process(body);
- if (processor.HasStackOverflow()) return false;
-
- if (!processor.result_assigned()) {
- AstNodeFactory* node_factory = processor.factory();
- Expression* undef = node_factory->NewUndefinedLiteral(kNoSourcePosition);
- Statement* completion = node_factory->NewExpressionStatement(
- processor.SetResult(undef), expr->position());
- body->Add(completion, factory->zone());
- }
- }
- return true;
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/rewriter.h b/deps/v8/src/parsing/rewriter.h
index d0b1191a9f..62873b2980 100644
--- a/deps/v8/src/parsing/rewriter.h
+++ b/deps/v8/src/parsing/rewriter.h
@@ -9,7 +9,6 @@ namespace v8 {
namespace internal {
class AstValueFactory;
-class DoExpression;
class Isolate;
class ParseInfo;
class Parser;
@@ -25,15 +24,6 @@ class Rewriter {
// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
static bool Rewrite(ParseInfo* info);
-
- // Rewrite a list of statements, using the same rules as a top-level program,
- // to ensure identical behaviour of completion result. The temporary is added
- // to the closure scope of the do-expression, which matches the closure scope
- // of the outer scope (the do-expression itself runs in a block scope, not a
- // closure scope). This closure scope needs to be passed in since the
- // do-expression could have dropped its own block scope.
- static bool Rewrite(Parser* parser, DeclarationScope* closure_scope,
- DoExpression* expr, AstValueFactory* factory);
};
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 8472e9f4fc..32dcaacbf5 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -20,8 +20,8 @@ namespace internal {
class ScopedExternalStringLock {
public:
- explicit ScopedExternalStringLock(ExternalString* string) {
- DCHECK(string);
+ explicit ScopedExternalStringLock(ExternalString string) {
+ DCHECK(!string.is_null());
if (string->IsExternalOneByteString()) {
resource_ = ExternalOneByteString::cast(string)->resource();
} else {
@@ -33,7 +33,7 @@ class ScopedExternalStringLock {
}
// Copying a lock increases the locking depth.
- ScopedExternalStringLock(const ScopedExternalStringLock& other)
+ ScopedExternalStringLock(const ScopedExternalStringLock& other) V8_NOEXCEPT
: resource_(other.resource_) {
resource_->Lock();
}
@@ -84,13 +84,16 @@ class OnHeapStream {
OnHeapStream(Handle<String> string, size_t start_offset, size_t end)
: string_(string), start_offset_(start_offset), length_(end) {}
- OnHeapStream(const OnHeapStream& other) : start_offset_(0), length_(0) {
+ OnHeapStream(const OnHeapStream&) V8_NOEXCEPT : start_offset_(0), length_(0) {
UNREACHABLE();
}
- Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
- return {&string_->GetChars()[start_offset_ + Min(length_, pos)],
- &string_->GetChars()[start_offset_ + length_]};
+ // The no_gc argument is only here because of the templated way this class
+ // is used along with other implementations that require V8 heap access.
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats,
+ DisallowHeapAllocation* no_gc) {
+ return {&string_->GetChars(*no_gc)[start_offset_ + Min(length_, pos)],
+ &string_->GetChars(*no_gc)[start_offset_ + length_]};
}
static const bool kCanBeCloned = false;
@@ -109,16 +112,21 @@ class ExternalStringStream {
typedef typename CharTraits<Char>::ExternalString ExternalString;
public:
- ExternalStringStream(ExternalString* string, size_t start_offset,
+ ExternalStringStream(ExternalString string, size_t start_offset,
size_t length)
: lock_(string),
data_(string->GetChars() + start_offset),
length_(length) {}
- ExternalStringStream(const ExternalStringStream& other)
- : lock_(other.lock_), data_(other.data_), length_(other.length_) {}
+ ExternalStringStream(const ExternalStringStream& other) V8_NOEXCEPT
+ : lock_(other.lock_),
+ data_(other.data_),
+ length_(other.length_) {}
- Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
+ // The no_gc argument is only here because of the templated way this class
+ // is used along with other implementations that require V8 heap access.
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats,
+ DisallowHeapAllocation* no_gc = nullptr) {
return {&data_[Min(length_, pos)], &data_[length_]};
}
@@ -137,7 +145,10 @@ class TestingStream {
public:
TestingStream(const Char* data, size_t length)
: data_(data), length_(length) {}
- Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
+ // The no_gc argument is only here because of the templated way this class
+ // is used along with other implementations that require V8 heap access.
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats,
+ DisallowHeapAllocation* no_gc = nullptr) {
return {&data_[Min(length_, pos)], &data_[length_]};
}
@@ -156,12 +167,15 @@ class ChunkedStream {
explicit ChunkedStream(ScriptCompiler::ExternalSourceStream* source)
: source_(source) {}
- ChunkedStream(const ChunkedStream& other) {
+ ChunkedStream(const ChunkedStream&) V8_NOEXCEPT {
// TODO(rmcilroy): Implement cloning for chunked streams.
UNREACHABLE();
}
- Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats) {
+ // The no_gc argument is only here because of the templated way this class
+ // is used along with other implementations that require V8 heap access.
+ Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats,
+ DisallowHeapAllocation* no_gc = nullptr) {
Chunk chunk = FindChunk(pos, stats);
size_t buffer_end = chunk.length;
size_t buffer_pos = Min(buffer_end, pos - chunk.position);
@@ -257,8 +271,9 @@ class BufferedCharacterStream : public Utf16CharacterStream {
buffer_start_ = &buffer_[0];
buffer_cursor_ = buffer_start_;
+ DisallowHeapAllocation no_gc;
Range<uint8_t> range =
- byte_stream_.GetDataAt(position, runtime_call_stats());
+ byte_stream_.GetDataAt(position, runtime_call_stats(), &no_gc);
if (range.length() == 0) {
buffer_end_ = buffer_start_;
return false;
@@ -310,8 +325,9 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
bool ReadBlock() final {
size_t position = pos();
buffer_pos_ = position;
+ DisallowHeapAllocation no_gc;
Range<uint16_t> range =
- byte_stream_.GetDataAt(position, runtime_call_stats());
+ byte_stream_.GetDataAt(position, runtime_call_stats(), &no_gc);
buffer_start_ = range.start;
buffer_end_ = range.end;
buffer_cursor_ = buffer_start_;
@@ -356,7 +372,9 @@ class RelocatingCharacterStream
}
void UpdateBufferPointers() {
- Range<uint16_t> range = byte_stream_.GetDataAt(0, runtime_call_stats());
+ DisallowHeapAllocation no_gc;
+ Range<uint16_t> range =
+ byte_stream_.GetDataAt(0, runtime_call_stats(), &no_gc);
if (range.start != buffer_start_) {
buffer_cursor_ = (buffer_cursor_ - buffer_start_) + range.start;
buffer_start_ = range.start;
@@ -707,9 +725,9 @@ Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
DCHECK_LE(end_pos, data->length());
size_t start_offset = 0;
if (data->IsSlicedString()) {
- SlicedString* string = SlicedString::cast(*data);
+ SlicedString string = SlicedString::cast(*data);
start_offset = string->offset();
- String* parent = string->parent();
+ String parent = string->parent();
if (parent->IsThinString()) parent = ThinString::cast(parent)->actual();
data = handle(parent, isolate);
} else {
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index 9647957062..1e2cf9e447 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -6,159 +6,19 @@
#define V8_PARSING_SCANNER_INL_H_
#include "src/char-predicates-inl.h"
+#include "src/parsing/keywords-gen.h"
#include "src/parsing/scanner.h"
-#include "src/unicode-cache-inl.h"
namespace v8 {
namespace internal {
-// Make sure tokens are stored as a single byte.
-STATIC_ASSERT(sizeof(Token::Value) == 1);
-
-// Table of one-character tokens, by character (0x00..0x7F only).
-// clang-format off
-static const Token::Value one_char_tokens[] = {
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LPAREN, // 0x28
- Token::RPAREN, // 0x29
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COMMA, // 0x2C
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::COLON, // 0x3A
- Token::SEMICOLON, // 0x3B
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::CONDITIONAL, // 0x3F
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACK, // 0x5B
- Token::ILLEGAL,
- Token::RBRACK, // 0x5D
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::ILLEGAL,
- Token::LBRACE, // 0x7B
- Token::ILLEGAL,
- Token::RBRACE, // 0x7D
- Token::BIT_NOT, // 0x7E
- Token::ILLEGAL
-};
-// clang-format on
-
// ----------------------------------------------------------------------------
// Keyword Matcher
#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
KEYWORD_GROUP('a') \
- KEYWORD("arguments", Token::ARGUMENTS) \
- KEYWORD("as", Token::AS) \
KEYWORD("async", Token::ASYNC) \
KEYWORD("await", Token::AWAIT) \
- KEYWORD("anonymous", Token::ANONYMOUS) \
KEYWORD_GROUP('b') \
KEYWORD("break", Token::BREAK) \
KEYWORD_GROUP('c') \
@@ -166,7 +26,6 @@ static const Token::Value one_char_tokens[] = {
KEYWORD("catch", Token::CATCH) \
KEYWORD("class", Token::CLASS) \
KEYWORD("const", Token::CONST) \
- KEYWORD("constructor", Token::CONSTRUCTOR) \
KEYWORD("continue", Token::CONTINUE) \
KEYWORD_GROUP('d') \
KEYWORD("debugger", Token::DEBUGGER) \
@@ -176,17 +35,13 @@ static const Token::Value one_char_tokens[] = {
KEYWORD_GROUP('e') \
KEYWORD("else", Token::ELSE) \
KEYWORD("enum", Token::ENUM) \
- KEYWORD("eval", Token::EVAL) \
KEYWORD("export", Token::EXPORT) \
KEYWORD("extends", Token::EXTENDS) \
KEYWORD_GROUP('f') \
KEYWORD("false", Token::FALSE_LITERAL) \
KEYWORD("finally", Token::FINALLY) \
KEYWORD("for", Token::FOR) \
- KEYWORD("from", Token::FROM) \
KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('g') \
- KEYWORD("get", Token::GET) \
KEYWORD_GROUP('i') \
KEYWORD("if", Token::IF) \
KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
@@ -196,36 +51,26 @@ static const Token::Value one_char_tokens[] = {
KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('l') \
KEYWORD("let", Token::LET) \
- KEYWORD_GROUP('m') \
- KEYWORD("meta", Token::META) \
KEYWORD_GROUP('n') \
- KEYWORD("name", Token::NAME) \
KEYWORD("new", Token::NEW) \
KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('o') \
- KEYWORD("of", Token::OF) \
KEYWORD_GROUP('p') \
KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("prototype", Token::PROTOTYPE) \
KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('r') \
KEYWORD("return", Token::RETURN) \
KEYWORD_GROUP('s') \
- KEYWORD("set", Token::SET) \
KEYWORD("static", Token::STATIC) \
KEYWORD("super", Token::SUPER) \
KEYWORD("switch", Token::SWITCH) \
KEYWORD_GROUP('t') \
- KEYWORD("target", Token::TARGET) \
KEYWORD("this", Token::THIS) \
KEYWORD("throw", Token::THROW) \
KEYWORD("true", Token::TRUE_LITERAL) \
KEYWORD("try", Token::TRY) \
KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('u') \
- KEYWORD("undefined", Token::UNDEFINED) \
KEYWORD_GROUP('v') \
KEYWORD("var", Token::VAR) \
KEYWORD("void", Token::VOID) \
@@ -233,124 +78,235 @@ static const Token::Value one_char_tokens[] = {
KEYWORD("while", Token::WHILE) \
KEYWORD("with", Token::WITH) \
KEYWORD_GROUP('y') \
- KEYWORD("yield", Token::YIELD) \
- KEYWORD_GROUP('_') \
- KEYWORD("__proto__", Token::PROTO_UNDERSCORED) \
- KEYWORD_GROUP('#') \
- KEYWORD("#constructor", Token::PRIVATE_CONSTRUCTOR)
+ KEYWORD("yield", Token::YIELD)
+
+constexpr bool IsKeywordStart(char c) {
+#define KEYWORD_GROUP_CHECK(ch) c == ch ||
+#define KEYWORD_CHECK(keyword, token)
+ return KEYWORDS(KEYWORD_GROUP_CHECK, KEYWORD_CHECK) /* || */ false;
+#undef KEYWORD_GROUP_CHECK
+#undef KEYWORD_CHECK
+}
V8_INLINE Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length) {
DCHECK_GE(input_length, 1);
- const int kMinLength = 2;
- const int kMaxLength = 12;
- if (input_length < kMinLength || input_length > kMaxLength) {
- return Token::IDENTIFIER;
- }
- switch (input[0]) {
- default:
-#define KEYWORD_GROUP_CASE(ch) \
- break; \
- case ch:
-#define KEYWORD(keyword, token) \
- { \
- /* 'keyword' is a char array, so sizeof(keyword) is */ \
- /* strlen(keyword) plus 1 for the NUL char. */ \
- const int keyword_length = sizeof(keyword) - 1; \
- STATIC_ASSERT(keyword_length >= kMinLength); \
- STATIC_ASSERT(keyword_length <= kMaxLength); \
- DCHECK_EQ(input[0], keyword[0]); \
- DCHECK(token == Token::FUTURE_STRICT_RESERVED_WORD || \
- 0 == strncmp(keyword, Token::String(token), sizeof(keyword))); \
- if (input_length == keyword_length && input[1] == keyword[1] && \
- (keyword_length <= 2 || input[2] == keyword[2]) && \
- (keyword_length <= 3 || input[3] == keyword[3]) && \
- (keyword_length <= 4 || input[4] == keyword[4]) && \
- (keyword_length <= 5 || input[5] == keyword[5]) && \
- (keyword_length <= 6 || input[6] == keyword[6]) && \
- (keyword_length <= 7 || input[7] == keyword[7]) && \
- (keyword_length <= 8 || input[8] == keyword[8]) && \
- (keyword_length <= 9 || input[9] == keyword[9]) && \
- (keyword_length <= 10 || input[10] == keyword[10])) { \
- return token; \
- } \
- }
+ return PerfectKeywordHash::GetToken(reinterpret_cast<const char*>(input),
+ input_length);
+}
+
+// Recursive constexpr template magic to check if a character is in a given
+// string.
+template <int N>
+constexpr bool IsInString(const char (&s)[N], char c, size_t i = 0) {
+ return i >= N ? false : s[i] == c ? true : IsInString(s, c, i + 1);
+}
+
+inline constexpr bool CanBeKeywordCharacter(char c) {
+ return IsInString(
+#define KEYWORD_GROUP_CASE(ch) // Nothing
+#define KEYWORD(keyword, token) keyword
+ // Use C string literal concatenation ("a" "b" becomes "ab") to build one
+ // giant string containing all the keywords.
KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
- }
- return Token::IDENTIFIER;
-#undef KEYWORDS
#undef KEYWORD
#undef KEYWORD_GROUP_CASE
+ ,
+ c);
+}
+
+// Make sure tokens are stored as a single byte.
+STATIC_ASSERT(sizeof(Token::Value) == 1);
+
+// Get the shortest token that this character starts, the token may change
+// depending on subsequent characters.
+constexpr Token::Value GetOneCharToken(char c) {
+ // clang-format off
+ return
+ c == '(' ? Token::LPAREN :
+ c == ')' ? Token::RPAREN :
+ c == '{' ? Token::LBRACE :
+ c == '}' ? Token::RBRACE :
+ c == '[' ? Token::LBRACK :
+ c == ']' ? Token::RBRACK :
+ c == '?' ? Token::CONDITIONAL :
+ c == ':' ? Token::COLON :
+ c == ';' ? Token::SEMICOLON :
+ c == ',' ? Token::COMMA :
+ c == '.' ? Token::PERIOD :
+ c == '|' ? Token::BIT_OR :
+ c == '&' ? Token::BIT_AND :
+ c == '^' ? Token::BIT_XOR :
+ c == '~' ? Token::BIT_NOT :
+ c == '!' ? Token::NOT :
+ c == '<' ? Token::LT :
+ c == '>' ? Token::GT :
+ c == '%' ? Token::MOD :
+ c == '=' ? Token::ASSIGN :
+ c == '+' ? Token::ADD :
+ c == '-' ? Token::SUB :
+ c == '*' ? Token::MUL :
+ c == '/' ? Token::DIV :
+ c == '#' ? Token::PRIVATE_NAME :
+ c == '"' ? Token::STRING :
+ c == '\'' ? Token::STRING :
+ c == '`' ? Token::TEMPLATE_SPAN :
+ c == '\\' ? Token::IDENTIFIER :
+ // Whitespace or line terminator
+ c == ' ' ? Token::WHITESPACE :
+ c == '\t' ? Token::WHITESPACE :
+ c == '\v' ? Token::WHITESPACE :
+ c == '\f' ? Token::WHITESPACE :
+ c == '\r' ? Token::WHITESPACE :
+ c == '\n' ? Token::WHITESPACE :
+ // IsDecimalDigit must be tested before IsAsciiIdentifier
+ IsDecimalDigit(c) ? Token::NUMBER :
+ IsAsciiIdentifier(c) ? Token::IDENTIFIER :
+ Token::ILLEGAL;
+ // clang-format on
}
+// Table of one-character tokens, by character (0x00..0x7F only).
+static const constexpr Token::Value one_char_tokens[128] = {
+#define CALL_GET_SCAN_FLAGS(N) GetOneCharToken(N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+};
+
+#undef KEYWORDS
+
V8_INLINE Token::Value Scanner::ScanIdentifierOrKeyword() {
- LiteralScope literal(this);
- return ScanIdentifierOrKeywordInner(&literal);
+ next().literal_chars.Start();
+ return ScanIdentifierOrKeywordInner();
+}
+
+// Character flags for the fast path of scanning a keyword or identifier token.
+enum class ScanFlags : uint8_t {
+ kTerminatesLiteral = 1 << 0,
+ // "Cannot" rather than "can" so that this flag can be ORed together across
+ // multiple characters.
+ kCannotBeKeyword = 1 << 1,
+ kCannotBeKeywordStart = 1 << 2,
+ kStringTerminator = 1 << 3,
+ kNeedsSlowPath = 1 << 4,
+};
+constexpr uint8_t GetScanFlags(char c) {
+ return
+ // Keywords are all lowercase and only contain letters.
+ // Note that non-identifier characters do not set this flag, so
+ // that it plays well with kTerminatesLiteral.
+ (IsAsciiIdentifier(c) && !CanBeKeywordCharacter(c)
+ ? static_cast<uint8_t>(ScanFlags::kCannotBeKeyword)
+ : 0) |
+ (IsKeywordStart(c)
+ ? 0
+ : static_cast<uint8_t>(ScanFlags::kCannotBeKeywordStart)) |
+ // Anything that isn't an identifier character will terminate the
+ // literal, or at least terminates the literal fast path processing
+ // (like an escape).
+ (!IsAsciiIdentifier(c)
+ ? static_cast<uint8_t>(ScanFlags::kTerminatesLiteral)
+ : 0) |
+ // Possible string termination characters.
+ ((c == '\'' || c == '"' || c == '\n' || c == '\r' || c == '\\')
+ ? static_cast<uint8_t>(ScanFlags::kStringTerminator)
+ : 0) |
+ // Escapes are processed on the slow path.
+ (c == '\\' ? static_cast<uint8_t>(ScanFlags::kNeedsSlowPath) : 0);
+}
+inline bool TerminatesLiteral(uint8_t scan_flags) {
+ return (scan_flags & static_cast<uint8_t>(ScanFlags::kTerminatesLiteral));
+}
+inline bool CanBeKeyword(uint8_t scan_flags) {
+ return !(scan_flags & static_cast<uint8_t>(ScanFlags::kCannotBeKeyword));
}
+inline bool NeedsSlowPath(uint8_t scan_flags) {
+ return (scan_flags & static_cast<uint8_t>(ScanFlags::kNeedsSlowPath));
+}
+inline bool MayTerminateString(uint8_t scan_flags) {
+ return (scan_flags & static_cast<uint8_t>(ScanFlags::kStringTerminator));
+}
+// Table of precomputed scan flags for the 128 ASCII characters, for branchless
+// flag calculation during the scan.
+static constexpr const uint8_t character_scan_flags[128] = {
+#define CALL_GET_SCAN_FLAGS(N) GetScanFlags(N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+};
-V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner(
- LiteralScope* literal) {
- DCHECK(unicode_cache_->IsIdentifierStart(c0_));
+inline bool CharCanBeKeyword(uc32 c) {
+ return static_cast<uint32_t>(c) < arraysize(character_scan_flags) &&
+ CanBeKeyword(character_scan_flags[c]);
+}
+
+V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
+ DCHECK(IsIdentifierStart(c0_));
bool escaped = false;
- if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
- do {
+ bool can_be_keyword = true;
+
+ STATIC_ASSERT(arraysize(character_scan_flags) == kMaxAscii + 1);
+ if (V8_LIKELY(static_cast<uint32_t>(c0_) <= kMaxAscii)) {
+ if (V8_LIKELY(c0_ != '\\')) {
+ uint8_t scan_flags = character_scan_flags[c0_];
+ DCHECK(!TerminatesLiteral(scan_flags));
+ STATIC_ASSERT(static_cast<uint8_t>(ScanFlags::kCannotBeKeywordStart) ==
+ static_cast<uint8_t>(ScanFlags::kCannotBeKeyword) << 1);
+ scan_flags >>= 1;
+ // Make sure the shifting above doesn't set NeedsSlowPath. Otherwise we'll
+ // fall into the slow path after scanning the identifier.
+ DCHECK(!NeedsSlowPath(scan_flags));
AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsInRange(c0_, 'a', 'z') || c0_ == '_');
-
- if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
- // Identifier starting with lowercase or _.
- do {
- AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsAsciiIdentifier(c0_));
-
- if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal->Complete();
- return Token::IDENTIFIER;
+ AdvanceUntil([this, &scan_flags](uc32 c0) {
+ if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
+ // A non-ascii character means we need to drop through to the slow
+ // path.
+ // TODO(leszeks): This would be most efficient as a goto to the slow
+ // path, check codegen and maybe use a bool instead.
+ scan_flags |= static_cast<uint8_t>(ScanFlags::kNeedsSlowPath);
+ return true;
+ }
+ uint8_t char_flags = character_scan_flags[c0];
+ scan_flags |= char_flags;
+ if (TerminatesLiteral(char_flags)) {
+ return true;
+ } else {
+ AddLiteralChar(static_cast<char>(c0));
+ return false;
+ }
+ });
+
+ if (V8_LIKELY(!NeedsSlowPath(scan_flags))) {
+ if (!CanBeKeyword(scan_flags)) return Token::IDENTIFIER;
+ // Could be a keyword or identifier.
+ Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
+ return KeywordOrIdentifierToken(chars.start(), chars.length());
}
- } else if (c0_ <= kMaxAscii && c0_ != '\\') {
- // Only a-z+ or _: could be a keyword or identifier.
- Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
- Token::Value token =
- KeywordOrIdentifierToken(chars.start(), chars.length());
- if (token == Token::IDENTIFIER ||
- token == Token::FUTURE_STRICT_RESERVED_WORD ||
- Token::IsContextualKeyword(token))
- literal->Complete();
- return token;
- }
- } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
- do {
- AddLiteralChar(static_cast<char>(c0_));
- Advance();
- } while (IsAsciiIdentifier(c0_));
- if (c0_ <= kMaxAscii && c0_ != '\\') {
- literal->Complete();
- return Token::IDENTIFIER;
- }
- } else if (c0_ == '\\') {
- escaped = true;
- uc32 c = ScanIdentifierUnicodeEscape();
- DCHECK(!unicode_cache_->IsIdentifierStart(-1));
- if (c == '\\' || !unicode_cache_->IsIdentifierStart(c)) {
- return Token::ILLEGAL;
+ can_be_keyword = CanBeKeyword(scan_flags);
+ } else {
+ // Special case for escapes at the start of an identifier.
+ escaped = true;
+ uc32 c = ScanIdentifierUnicodeEscape();
+ DCHECK(!IsIdentifierStart(-1));
+ if (c == '\\' || !IsIdentifierStart(c)) {
+ return Token::ILLEGAL;
+ }
+ AddLiteralChar(c);
+ can_be_keyword = CharCanBeKeyword(c);
}
- AddLiteralChar(c);
}
- return ScanIdentifierOrKeywordInnerSlow(literal, escaped);
+ return ScanIdentifierOrKeywordInnerSlow(escaped, can_be_keyword);
}
V8_INLINE Token::Value Scanner::SkipWhiteSpace() {
int start_position = source_pos();
// We won't skip behind the end of input.
- DCHECK(!unicode_cache_->IsWhiteSpaceOrLineTerminator(kEndOfInput));
+ DCHECK(!IsWhiteSpaceOrLineTerminator(kEndOfInput));
// Advance as long as character is a WhiteSpace or LineTerminator.
- while (unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_)) {
+ while (IsWhiteSpaceOrLineTerminator(c0_)) {
if (!next().after_line_terminator && unibrow::IsLineTerminator(c0_)) {
next().after_line_terminator = true;
}
@@ -371,178 +327,194 @@ V8_INLINE Token::Value Scanner::ScanSingleToken() {
do {
next().location.beg_pos = source_pos();
- if (static_cast<unsigned>(c0_) <= 0x7F) {
- Token::Value token = one_char_tokens[c0_];
- if (token != Token::ILLEGAL) {
- Advance();
- return token;
- }
- }
-
- switch (c0_) {
- case '"':
- case '\'':
- return ScanString();
-
- case '<':
- // < <= << <<= <!--
- Advance();
- if (c0_ == '=') return Select(Token::LTE);
- if (c0_ == '<') return Select('=', Token::ASSIGN_SHL, Token::SHL);
- if (c0_ == '!') {
- token = ScanHtmlComment();
- continue;
- }
- return Token::LT;
-
- case '>':
- // > >= >> >>= >>> >>>=
- Advance();
- if (c0_ == '=') return Select(Token::GTE);
- if (c0_ == '>') {
- // >> >>= >>> >>>=
+ if (V8_LIKELY(static_cast<unsigned>(c0_) <= kMaxAscii)) {
+ token = one_char_tokens[c0_];
+
+ switch (token) {
+ case Token::LPAREN:
+ case Token::RPAREN:
+ case Token::LBRACE:
+ case Token::RBRACE:
+ case Token::LBRACK:
+ case Token::RBRACK:
+ case Token::CONDITIONAL:
+ case Token::COLON:
+ case Token::SEMICOLON:
+ case Token::COMMA:
+ case Token::BIT_NOT:
+ case Token::ILLEGAL:
+ // One character tokens.
+ return Select(token);
+
+ case Token::STRING:
+ return ScanString();
+
+ case Token::LT:
+ // < <= << <<= <!--
Advance();
- if (c0_ == '=') return Select(Token::ASSIGN_SAR);
- if (c0_ == '>') return Select('=', Token::ASSIGN_SHR, Token::SHR);
- return Token::SAR;
- }
- return Token::GT;
-
- case '=':
- // = == === =>
- Advance();
- if (c0_ == '=') return Select('=', Token::EQ_STRICT, Token::EQ);
- if (c0_ == '>') return Select(Token::ARROW);
- return Token::ASSIGN;
-
- case '!':
- // ! != !==
- Advance();
- if (c0_ == '=') return Select('=', Token::NE_STRICT, Token::NE);
- return Token::NOT;
-
- case '+':
- // + ++ +=
- Advance();
- if (c0_ == '+') return Select(Token::INC);
- if (c0_ == '=') return Select(Token::ASSIGN_ADD);
- return Token::ADD;
-
- case '-':
- // - -- --> -=
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>' && next().after_line_terminator) {
- // For compatibility with SpiderMonkey, we skip lines that
- // start with an HTML comment end '-->'.
- token = SkipSingleHTMLComment();
+ if (c0_ == '=') return Select(Token::LTE);
+ if (c0_ == '<') return Select('=', Token::ASSIGN_SHL, Token::SHL);
+ if (c0_ == '!') {
+ token = ScanHtmlComment();
continue;
}
- return Token::DEC;
- }
- if (c0_ == '=') return Select(Token::ASSIGN_SUB);
- return Token::SUB;
-
- case '*':
- // * *=
- Advance();
- if (c0_ == '*') return Select('=', Token::ASSIGN_EXP, Token::EXP);
- if (c0_ == '=') return Select(Token::ASSIGN_MUL);
- return Token::MUL;
-
- case '%':
- // % %=
- return Select('=', Token::ASSIGN_MOD, Token::MOD);
-
- case '/':
- // / // /* /=
- Advance();
- if (c0_ == '/') {
- uc32 c = Peek();
- if (c == '#' || c == '@') {
+ return Token::LT;
+
+ case Token::GT:
+ // > >= >> >>= >>> >>>=
+ Advance();
+ if (c0_ == '=') return Select(Token::GTE);
+ if (c0_ == '>') {
+ // >> >>= >>> >>>=
Advance();
+ if (c0_ == '=') return Select(Token::ASSIGN_SAR);
+ if (c0_ == '>') return Select('=', Token::ASSIGN_SHR, Token::SHR);
+ return Token::SAR;
+ }
+ return Token::GT;
+
+ case Token::ASSIGN:
+ // = == === =>
+ Advance();
+ if (c0_ == '=') return Select('=', Token::EQ_STRICT, Token::EQ);
+ if (c0_ == '>') return Select(Token::ARROW);
+ return Token::ASSIGN;
+
+ case Token::NOT:
+ // ! != !==
+ Advance();
+ if (c0_ == '=') return Select('=', Token::NE_STRICT, Token::NE);
+ return Token::NOT;
+
+ case Token::ADD:
+ // + ++ +=
+ Advance();
+ if (c0_ == '+') return Select(Token::INC);
+ if (c0_ == '=') return Select(Token::ASSIGN_ADD);
+ return Token::ADD;
+
+ case Token::SUB:
+ // - -- --> -=
+ Advance();
+ if (c0_ == '-') {
Advance();
- token = SkipSourceURLComment();
+ if (c0_ == '>' && next().after_line_terminator) {
+ // For compatibility with SpiderMonkey, we skip lines that
+ // start with an HTML comment end '-->'.
+ token = SkipSingleHTMLComment();
+ continue;
+ }
+ return Token::DEC;
+ }
+ if (c0_ == '=') return Select(Token::ASSIGN_SUB);
+ return Token::SUB;
+
+ case Token::MUL:
+ // * *=
+ Advance();
+ if (c0_ == '*') return Select('=', Token::ASSIGN_EXP, Token::EXP);
+ if (c0_ == '=') return Select(Token::ASSIGN_MUL);
+ return Token::MUL;
+
+ case Token::MOD:
+ // % %=
+ return Select('=', Token::ASSIGN_MOD, Token::MOD);
+
+ case Token::DIV:
+ // / // /* /=
+ Advance();
+ if (c0_ == '/') {
+ uc32 c = Peek();
+ if (c == '#' || c == '@') {
+ Advance();
+ Advance();
+ token = SkipSourceURLComment();
+ continue;
+ }
+ token = SkipSingleLineComment();
continue;
}
- token = SkipSingleLineComment();
- continue;
- }
- if (c0_ == '*') {
- token = SkipMultiLineComment();
- continue;
- }
- if (c0_ == '=') return Select(Token::ASSIGN_DIV);
- return Token::DIV;
-
- case '&':
- // & && &=
- Advance();
- if (c0_ == '&') return Select(Token::AND);
- if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
- return Token::BIT_AND;
-
- case '|':
- // | || |=
- Advance();
- if (c0_ == '|') return Select(Token::OR);
- if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
- return Token::BIT_OR;
-
- case '^':
- // ^ ^=
- return Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
-
- case '.':
- // . Number
- Advance();
- if (IsDecimalDigit(c0_)) return ScanNumber(true);
- if (c0_ == '.') {
- if (Peek() == '.') {
- Advance();
- Advance();
- return Token::ELLIPSIS;
+ if (c0_ == '*') {
+ token = SkipMultiLineComment();
+ continue;
}
- }
- return Token::PERIOD;
+ if (c0_ == '=') return Select(Token::ASSIGN_DIV);
+ return Token::DIV;
+
+ case Token::BIT_AND:
+ // & && &=
+ Advance();
+ if (c0_ == '&') return Select(Token::AND);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
+ return Token::BIT_AND;
- case '`':
- Advance();
- return ScanTemplateSpan();
+ case Token::BIT_OR:
+ // | || |=
+ Advance();
+ if (c0_ == '|') return Select(Token::OR);
+ if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
+ return Token::BIT_OR;
+
+ case Token::BIT_XOR:
+ // ^ ^=
+ return Select('=', Token::ASSIGN_BIT_XOR, Token::BIT_XOR);
- case '#':
- return ScanPrivateName();
+ case Token::PERIOD:
+ // . Number
+ Advance();
+ if (IsDecimalDigit(c0_)) return ScanNumber(true);
+ if (c0_ == '.') {
+ if (Peek() == '.') {
+ Advance();
+ Advance();
+ return Token::ELLIPSIS;
+ }
+ }
+ return Token::PERIOD;
- default:
- if (unicode_cache_->IsIdentifierStart(c0_) ||
- (CombineSurrogatePair() &&
- unicode_cache_->IsIdentifierStart(c0_))) {
- Token::Value token = ScanIdentifierOrKeyword();
- if (!Token::IsContextualKeyword(token)) return token;
+ case Token::TEMPLATE_SPAN:
+ Advance();
+ return ScanTemplateSpan();
- next().contextual_token = token;
- return Token::IDENTIFIER;
- }
- if (IsDecimalDigit(c0_)) return ScanNumber(false);
- if (c0_ == kEndOfInput) return Token::EOS;
- token = SkipWhiteSpace();
- continue;
+ case Token::PRIVATE_NAME:
+ return ScanPrivateName();
+
+ case Token::WHITESPACE:
+ token = SkipWhiteSpace();
+ continue;
+
+ case Token::NUMBER:
+ return ScanNumber(false);
+
+ case Token::IDENTIFIER:
+ return ScanIdentifierOrKeyword();
+
+ default:
+ UNREACHABLE();
+ }
}
+
+ if (IsIdentifierStart(c0_) ||
+ (CombineSurrogatePair() && IsIdentifierStart(c0_))) {
+ return ScanIdentifierOrKeyword();
+ }
+ if (c0_ == kEndOfInput) {
+ return source_->has_parser_error() ? Token::ILLEGAL : Token::EOS;
+ }
+ token = SkipWhiteSpace();
+
// Continue scanning for tokens as long as we're just skipping whitespace.
} while (token == Token::WHITESPACE);
return token;
}
-void Scanner::Scan() {
- next().literal_chars.Drop();
- next().raw_literal_chars.Drop();
- next().contextual_token = Token::UNINITIALIZED;
- next().invalid_template_escape_message = MessageTemplate::kNone;
+void Scanner::Scan(TokenDesc* next_desc) {
+ DCHECK_EQ(next_desc, &next());
- next().token = ScanSingleToken();
- next().location.end_pos = source_pos();
+ next_desc->token = ScanSingleToken();
+ DCHECK_IMPLIES(has_parser_error(), next_desc->token == Token::ILLEGAL);
+ next_desc->location.end_pos = source_pos();
#ifdef DEBUG
SanityCheckTokenDesc(current());
@@ -551,6 +523,8 @@ void Scanner::Scan() {
#endif
}
+void Scanner::Scan() { Scan(next_); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 525b1bc681..43fc589e88 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -13,16 +13,15 @@
#include "src/ast/ast-value-factory.h"
#include "src/conversions-inl.h"
#include "src/objects/bigint.h"
-#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
#include "src/parsing/scanner-inl.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
class Scanner::ErrorState {
public:
- ErrorState(MessageTemplate::Template* message_stack,
- Scanner::Location* location_stack)
+ ErrorState(MessageTemplate* message_stack, Scanner::Location* location_stack)
: message_stack_(message_stack),
old_message_(*message_stack),
location_stack_(location_stack),
@@ -49,8 +48,8 @@ class Scanner::ErrorState {
}
private:
- MessageTemplate::Template* const message_stack_;
- MessageTemplate::Template const old_message_;
+ MessageTemplate* const message_stack_;
+ MessageTemplate const old_message_;
Scanner::Location* const location_stack_;
Scanner::Location const old_location_;
};
@@ -59,7 +58,6 @@ class Scanner::ErrorState {
// Scanner::LiteralBuffer
Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
- DCHECK(is_used_);
if (is_one_byte()) {
return isolate->factory()->InternalizeOneByteString(one_byte_literal());
}
@@ -67,20 +65,21 @@ Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
}
int Scanner::LiteralBuffer::NewCapacity(int min_capacity) {
- int capacity = Max(min_capacity, backing_store_.length());
- int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
- return new_capacity;
+ return min_capacity < (kMaxGrowth / (kGrowthFactor - 1))
+ ? min_capacity * kGrowthFactor
+ : min_capacity + kMaxGrowth;
}
void Scanner::LiteralBuffer::ExpandBuffer() {
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
+ int min_capacity = Max(kInitialCapacity, backing_store_.length());
+ Vector<byte> new_store = Vector<byte>::New(NewCapacity(min_capacity));
MemCopy(new_store.start(), backing_store_.start(), position_);
backing_store_.Dispose();
backing_store_ = new_store;
}
void Scanner::LiteralBuffer::ConvertToTwoByte() {
- DCHECK(is_one_byte_);
+ DCHECK(is_one_byte());
Vector<byte> new_store;
int new_content_size = position_ * kUC16Size;
if (new_content_size >= backing_store_.length()) {
@@ -104,7 +103,7 @@ void Scanner::LiteralBuffer::ConvertToTwoByte() {
}
void Scanner::LiteralBuffer::AddTwoByteChar(uc32 code_unit) {
- DCHECK(!is_one_byte_);
+ DCHECK(!is_one_byte());
if (position_ >= backing_store_.length()) ExpandBuffer();
if (code_unit <=
static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
@@ -124,36 +123,23 @@ void Scanner::LiteralBuffer::AddTwoByteChar(uc32 code_unit) {
// ----------------------------------------------------------------------------
// Scanner::BookmarkScope
-const size_t Scanner::BookmarkScope::kBookmarkAtFirstPos =
- std::numeric_limits<size_t>::max() - 2;
const size_t Scanner::BookmarkScope::kNoBookmark =
std::numeric_limits<size_t>::max() - 1;
const size_t Scanner::BookmarkScope::kBookmarkWasApplied =
std::numeric_limits<size_t>::max();
-void Scanner::BookmarkScope::Set() {
+void Scanner::BookmarkScope::Set(size_t position) {
DCHECK_EQ(bookmark_, kNoBookmark);
-
- // The first token is a bit special, since current_ will still be
- // uninitialized. In this case, store kBookmarkAtFirstPos and special-case it
- // when
- // applying the bookmark.
- DCHECK_IMPLIES(scanner_->current().token == Token::UNINITIALIZED,
- scanner_->current().location.beg_pos ==
- scanner_->next().location.beg_pos);
- bookmark_ = (scanner_->current().token == Token::UNINITIALIZED)
- ? kBookmarkAtFirstPos
- : scanner_->location().beg_pos;
+ bookmark_ = position;
}
void Scanner::BookmarkScope::Apply() {
DCHECK(HasBeenSet()); // Caller hasn't called SetBookmark.
- if (bookmark_ == kBookmarkAtFirstPos) {
- scanner_->SeekNext(0);
+ if (had_parser_error_) {
+ scanner_->set_parser_error();
} else {
+ scanner_->reset_parser_error_flag();
scanner_->SeekNext(bookmark_);
- scanner_->Next();
- DCHECK_EQ(scanner_->location().beg_pos, static_cast<int>(bookmark_));
}
bookmark_ = kBookmarkWasApplied;
}
@@ -169,10 +155,8 @@ bool Scanner::BookmarkScope::HasBeenApplied() const {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(UnicodeCache* unicode_cache, Utf16CharacterStream* source,
- bool is_module)
- : unicode_cache_(unicode_cache),
- source_(source),
+Scanner::Scanner(Utf16CharacterStream* source, bool is_module)
+ : source_(source),
found_html_comment_(false),
allow_harmony_numeric_separator_(false),
is_module_(is_module),
@@ -241,13 +225,14 @@ Token::Value Scanner::Next() {
// current_ as next_ and scan into it, leaving next_next_ uninitialized.
if (V8_LIKELY(next_next().token == Token::UNINITIALIZED)) {
next_ = previous;
- next().after_line_terminator = false;
- Scan();
+ // User 'previous' instead of 'next_' because for some reason the compiler
+ // thinks 'next_' could be modified before the entry into Scan.
+ previous->after_line_terminator = false;
+ Scan(previous);
} else {
next_ = next_next_;
next_next_ = previous;
previous->token = Token::UNINITIALIZED;
- previous->contextual_token = Token::UNINITIALIZED;
DCHECK_NE(Token::UNINITIALIZED, current().token);
}
return current().token;
@@ -300,42 +285,41 @@ Token::Value Scanner::SkipSourceURLComment() {
void Scanner::TryToParseSourceURLComment() {
// Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
- DCHECK(!unicode_cache_->IsWhiteSpaceOrLineTerminator(kEndOfInput));
- if (!unicode_cache_->IsWhiteSpace(c0_)) return;
+ DCHECK(!IsWhiteSpaceOrLineTerminator(kEndOfInput));
+ if (!IsWhiteSpace(c0_)) return;
Advance();
LiteralBuffer name;
name.Start();
- while (c0_ != kEndOfInput &&
- !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && c0_ != '=') {
+ while (c0_ != kEndOfInput && !IsWhiteSpaceOrLineTerminator(c0_) &&
+ c0_ != '=') {
name.AddChar(c0_);
Advance();
}
if (!name.is_one_byte()) return;
Vector<const uint8_t> name_literal = name.one_byte_literal();
LiteralBuffer* value;
- if (name_literal == STATIC_CHAR_VECTOR("sourceURL")) {
+ if (name_literal == StaticCharVector("sourceURL")) {
value = &source_url_;
- } else if (name_literal == STATIC_CHAR_VECTOR("sourceMappingURL")) {
+ } else if (name_literal == StaticCharVector("sourceMappingURL")) {
value = &source_mapping_url_;
} else {
return;
}
if (c0_ != '=')
return;
- value->Drop();
value->Start();
Advance();
- while (unicode_cache_->IsWhiteSpace(c0_)) {
+ while (IsWhiteSpace(c0_)) {
Advance();
}
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
- value->Drop();
+ value->Start();
return;
}
- if (unicode_cache_->IsWhiteSpace(c0_)) {
+ if (IsWhiteSpace(c0_)) {
break;
}
value->AddChar(c0_);
@@ -343,8 +327,8 @@ void Scanner::TryToParseSourceURLComment() {
}
// Allow whitespace at the end.
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
- if (!unicode_cache_->IsWhiteSpace(c0_)) {
- value->Drop();
+ if (!IsWhiteSpace(c0_)) {
+ value->Start();
break;
}
Advance();
@@ -377,6 +361,13 @@ Token::Value Scanner::SkipMultiLineComment() {
return Token::ILLEGAL;
}
+void Scanner::SkipHashBang() {
+ if (c0_ == '#' && Peek() == '!' && source_pos() == 0) {
+ SkipSingleLineComment();
+ Scan();
+ }
+}
+
Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
DCHECK_EQ(c0_, '!');
@@ -393,51 +384,20 @@ Token::Value Scanner::ScanHtmlComment() {
#ifdef DEBUG
void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
- // Most tokens should not have literal_chars or even raw_literal chars.
- // The rules are:
- // - UNINITIALIZED: we don't care.
- // - TEMPLATE_*: need both literal + raw literal chars.
- // - IDENTIFIERS, STRINGS, etc.: need a literal, but no raw literal.
- // - all others: should have neither.
- // Furthermore, only TEMPLATE_* tokens can have a
- // invalid_template_escape_message.
+ // Only TEMPLATE_* tokens can have a invalid_template_escape_message.
+ // ILLEGAL and UNINITIALIZED can have garbage for the field.
switch (token.token) {
case Token::UNINITIALIZED:
+ case Token::ILLEGAL:
// token.literal_chars & other members might be garbage. That's ok.
- break;
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- DCHECK(token.raw_literal_chars.is_used());
- DCHECK(token.literal_chars.is_used());
- break;
- case Token::ESCAPED_KEYWORD:
- case Token::ESCAPED_STRICT_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::IDENTIFIER:
- case Token::NUMBER:
- case Token::BIGINT:
- case Token::REGEXP_LITERAL:
- case Token::SMI:
- case Token::STRING:
- case Token::PRIVATE_NAME:
- DCHECK(token.literal_chars.is_used());
- DCHECK(!token.raw_literal_chars.is_used());
- DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
break;
default:
- DCHECK(!token.literal_chars.is_used());
- DCHECK(!token.raw_literal_chars.is_used());
DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
break;
}
-
- DCHECK_IMPLIES(token.token != Token::IDENTIFIER,
- token.contextual_token == Token::UNINITIALIZED);
- DCHECK_IMPLIES(token.contextual_token != Token::UNINITIALIZED,
- token.token == Token::IDENTIFIER &&
- Token::IsContextualKeyword(token.contextual_token));
- DCHECK(!Token::IsContextualKeyword(token.token));
}
#endif // DEBUG
@@ -541,24 +501,45 @@ Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
- LiteralScope literal(this);
+ next().literal_chars.Start();
while (true) {
+ if (V8_UNLIKELY(c0_ == kEndOfInput)) return Token::ILLEGAL;
+ if ((V8_UNLIKELY(static_cast<uint32_t>(c0_) >= kMaxAscii) &&
+ !unibrow::IsStringLiteralLineTerminator(c0_)) ||
+ !MayTerminateString(character_scan_flags[c0_])) {
+ AddLiteralChar(c0_);
+ AdvanceUntil([this](uc32 c0) {
+ if (V8_UNLIKELY(static_cast<uint32_t>(c0) > kMaxAscii)) {
+ if (V8_UNLIKELY(unibrow::IsStringLiteralLineTerminator(c0))) {
+ return true;
+ }
+ AddLiteralChar(c0);
+ return false;
+ }
+ uint8_t char_flags = character_scan_flags[c0];
+ if (MayTerminateString(char_flags)) return true;
+ AddLiteralChar(c0);
+ return false;
+ });
+ }
if (c0_ == quote) {
- literal.Complete();
Advance();
return Token::STRING;
}
- if (c0_ == kEndOfInput || unibrow::IsStringLiteralLineTerminator(c0_)) {
- return Token::ILLEGAL;
- }
if (c0_ == '\\') {
Advance();
// TODO(verwaest): Check whether we can remove the additional check.
- if (c0_ == kEndOfInput || !ScanEscape<false>()) {
+ if (V8_UNLIKELY(c0_ == kEndOfInput || !ScanEscape<false>())) {
return Token::ILLEGAL;
}
continue;
}
+ if (V8_UNLIKELY(c0_ == kEndOfInput ||
+ unibrow::IsStringLiteralLineTerminator(c0_))) {
+ return Token::ILLEGAL;
+ }
+ DCHECK_NE(quote, c0_);
+ DCHECK((c0_ == '\'' || c0_ == '"'));
AddLiteralCharAdvance();
}
}
@@ -570,17 +551,17 @@ Token::Value Scanner::ScanPrivateName() {
return Token::ILLEGAL;
}
- LiteralScope literal(this);
+ next().literal_chars.Start();
DCHECK_EQ(c0_, '#');
- DCHECK(!unicode_cache_->IsIdentifierStart(kEndOfInput));
- if (!unicode_cache_->IsIdentifierStart(Peek())) {
+ DCHECK(!IsIdentifierStart(kEndOfInput));
+ if (!IsIdentifierStart(Peek())) {
ReportScannerError(source_pos(),
MessageTemplate::kInvalidOrUnexpectedToken);
return Token::ILLEGAL;
}
AddLiteralCharAdvance();
- Token::Value token = ScanIdentifierOrKeywordInner(&literal);
+ Token::Value token = ScanIdentifierOrKeywordInner();
return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
}
@@ -605,7 +586,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ErrorState octal_error_state(&octal_message_, &octal_pos_);
Token::Value result = Token::TEMPLATE_SPAN;
- LiteralScope literal(this);
+ next().literal_chars.Start();
next().raw_literal_chars.Start();
const bool capture_raw = true;
while (true) {
@@ -658,10 +639,8 @@ Token::Value Scanner::ScanTemplateSpan() {
AddLiteralChar(c);
}
}
- literal.Complete();
next().location.end_pos = source_pos();
next().token = result;
- next().contextual_token = Token::UNINITIALIZED;
return result;
}
@@ -669,7 +648,6 @@ Token::Value Scanner::ScanTemplateSpan() {
Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
Handle<String> tmp;
if (source_url_.length() > 0) {
- DCHECK(source_url_.is_used());
tmp = source_url_.Internalize(isolate);
}
return tmp;
@@ -678,7 +656,6 @@ Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const {
Handle<String> tmp;
if (source_mapping_url_.length() > 0) {
- DCHECK(source_mapping_url_.is_used());
tmp = source_mapping_url_.Internalize(isolate);
}
return tmp;
@@ -847,7 +824,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
NumberKind kind = DECIMAL;
- LiteralScope literal(this);
+ next().literal_chars.Start();
bool at_start = !seen_period;
int start_pos = source_pos(); // For reporting octal positions.
if (seen_period) {
@@ -905,10 +882,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
if (next().literal_chars.one_byte_literal().length() <= 10 &&
- value <= Smi::kMaxValue && c0_ != '.' &&
- !unicode_cache_->IsIdentifierStart(c0_)) {
+ value <= Smi::kMaxValue && c0_ != '.' && !IsIdentifierStart(c0_)) {
next().smi_value_ = static_cast<uint32_t>(value);
- literal.Complete();
if (kind == DECIMAL_WITH_LEADING_ZERO) {
octal_pos_ = Location(start_pos, source_pos());
@@ -963,12 +938,10 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// not be an identifier start or a decimal digit; see ECMA-262
// section 7.8.3, page 17 (note that we read only one decimal digit
// if the value is 0).
- if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_)) {
+ if (IsDecimalDigit(c0_) || IsIdentifierStart(c0_)) {
return Token::ILLEGAL;
}
- literal.Complete();
-
if (kind == DECIMAL_WITH_LEADING_ZERO) {
octal_pos_ = Location(start_pos, source_pos());
octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
@@ -1004,54 +977,49 @@ uc32 Scanner::ScanUnicodeEscape() {
return ScanHexNumber<capture_raw, unicode>(4);
}
-Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(LiteralScope* literal,
- bool escaped) {
+Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(bool escaped,
+ bool can_be_keyword) {
while (true) {
if (c0_ == '\\') {
escaped = true;
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
// TODO(verwaest): Make this true.
- // DCHECK(!unicode_cache_->IsIdentifierPart('\\'));
- DCHECK(!unicode_cache_->IsIdentifierPart(-1));
- if (c == '\\' || !unicode_cache_->IsIdentifierPart(c)) {
+ // DCHECK(!IsIdentifierPart('\'));
+ DCHECK(!IsIdentifierPart(-1));
+ if (c == '\\' || !IsIdentifierPart(c)) {
return Token::ILLEGAL;
}
+ can_be_keyword = can_be_keyword && CharCanBeKeyword(c);
AddLiteralChar(c);
- } else if (unicode_cache_->IsIdentifierPart(c0_) ||
- (CombineSurrogatePair() &&
- unicode_cache_->IsIdentifierPart(c0_))) {
+ } else if (IsIdentifierPart(c0_) ||
+ (CombineSurrogatePair() && IsIdentifierPart(c0_))) {
+ can_be_keyword = can_be_keyword && CharCanBeKeyword(c0_);
AddLiteralCharAdvance();
} else {
break;
}
}
- if (next().literal_chars.is_one_byte()) {
+ if (can_be_keyword && next().literal_chars.is_one_byte()) {
Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
/* TODO(adamk): YIELD should be handled specially. */
if (token == Token::FUTURE_STRICT_RESERVED_WORD) {
- literal->Complete();
if (escaped) return Token::ESCAPED_STRICT_RESERVED_WORD;
return token;
}
- if (token == Token::IDENTIFIER || Token::IsContextualKeyword(token)) {
- literal->Complete();
- return token;
- }
+ if (token == Token::IDENTIFIER) return token;
if (!escaped) return token;
- literal->Complete();
if (token == Token::LET || token == Token::STATIC) {
return Token::ESCAPED_STRICT_RESERVED_WORD;
}
return Token::ESCAPED_KEYWORD;
}
- literal->Complete();
return Token::IDENTIFIER;
}
@@ -1065,7 +1033,7 @@ bool Scanner::ScanRegExpPattern() {
// Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
// the scanner should pass uninterpreted bodies to the RegExp
// constructor.
- LiteralScope literal(this);
+ next().literal_chars.Start();
if (next().token == Token::ASSIGN_DIV) {
AddLiteralChar('=');
}
@@ -1098,9 +1066,7 @@ bool Scanner::ScanRegExpPattern() {
}
Advance(); // consume '/'
- literal.Complete();
next().token = Token::REGEXP_LITERAL;
- next().contextual_token = Token::UNINITIALIZED;
return true;
}
@@ -1110,7 +1076,7 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
int flags = 0;
- while (unicode_cache_->IsIdentifierPart(c0_)) {
+ while (IsIdentifierPart(c0_)) {
RegExp::Flags flag = RegExp::kNone;
switch (c0_) {
case 'g':
@@ -1173,7 +1139,6 @@ const AstRawString* Scanner::CurrentRawSymbol(
double Scanner::DoubleValue() {
DCHECK(is_literal_one_byte());
return StringToDouble(
- unicode_cache_,
literal_one_byte_string(),
ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
}
@@ -1188,14 +1153,6 @@ const char* Scanner::CurrentLiteralAsCString(Zone* zone) const {
return buffer;
}
-bool Scanner::IsDuplicateSymbol(DuplicateFinder* duplicate_finder,
- AstValueFactory* ast_value_factory) const {
- DCHECK_NOT_NULL(duplicate_finder);
- DCHECK_NOT_NULL(ast_value_factory);
- const AstRawString* string = CurrentSymbol(ast_value_factory);
- return !duplicate_finder->known_symbols_.insert(string).second;
-}
-
void Scanner::SeekNext(size_t position) {
// Use with care: This cleanly resets most, but not all scanner state.
// TODO(vogelheim): Fix this, or at least DCHECK the relevant conditions.
@@ -1206,7 +1163,7 @@ void Scanner::SeekNext(size_t position) {
// current_ will remain unchanged, so overwrite it fully.)
for (TokenDesc& token : token_storage_) {
token.token = Token::UNINITIALIZED;
- token.contextual_token = Token::UNINITIALIZED;
+ token.invalid_template_escape_message = MessageTemplate::kNone;
}
// 2, reset the source to the desired position,
source_->Seek(position);
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 83002b53c8..383159557b 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -13,8 +13,9 @@
#include "src/base/logging.h"
#include "src/char-predicates.h"
#include "src/globals.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/parsing/token.h"
+#include "src/pointer-with-payload.h"
#include "src/unicode-decoder.h"
#include "src/unicode.h"
@@ -23,12 +24,11 @@ namespace internal {
class AstRawString;
class AstValueFactory;
-class DuplicateFinder;
class ExternalOneByteString;
class ExternalTwoByteString;
class ParserRecorder;
class RuntimeCallStats;
-class UnicodeCache;
+class Zone;
// ---------------------------------------------------------------------
// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
@@ -40,6 +40,13 @@ class Utf16CharacterStream {
virtual ~Utf16CharacterStream() = default;
+ V8_INLINE void set_parser_error() {
+ buffer_cursor_ = buffer_end_;
+ has_parser_error_ = true;
+ }
+ V8_INLINE void reset_parser_error_flag() { has_parser_error_ = false; }
+ V8_INLINE bool has_parser_error() const { return has_parser_error_; }
+
inline uc32 Peek() {
if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
return static_cast<uc32>(*buffer_cursor_);
@@ -109,6 +116,11 @@ class Utf16CharacterStream {
}
}
+ // Returns true if the stream could access the V8 heap after construction.
+ bool can_be_cloned_for_parallel_access() const {
+ return can_be_cloned() && !can_access_heap();
+ }
+
// Returns true if the stream can be cloned with Clone.
// TODO(rmcilroy): Remove this once ChunkedStreams can be cloned.
virtual bool can_be_cloned() const = 0;
@@ -138,7 +150,7 @@ class Utf16CharacterStream {
bool ReadBlockChecked() {
size_t position = pos();
USE(position);
- bool success = ReadBlock();
+ bool success = !has_parser_error() && ReadBlock();
// Post-conditions: 1, We should always be at the right position.
// 2, Cursor should be inside the buffer.
@@ -186,6 +198,7 @@ class Utf16CharacterStream {
const uint16_t* buffer_end_;
size_t buffer_pos_;
RuntimeCallStats* runtime_call_stats_;
+ bool has_parser_error_ = false;
};
// ----------------------------------------------------------------------------
@@ -197,12 +210,14 @@ class Scanner {
class BookmarkScope {
public:
explicit BookmarkScope(Scanner* scanner)
- : scanner_(scanner), bookmark_(kNoBookmark) {
+ : scanner_(scanner),
+ bookmark_(kNoBookmark),
+ had_parser_error_(scanner->has_parser_error()) {
DCHECK_NOT_NULL(scanner_);
}
~BookmarkScope() = default;
- void Set();
+ void Set(size_t bookmark);
void Apply();
bool HasBeenSet() const;
bool HasBeenApplied() const;
@@ -210,24 +225,39 @@ class Scanner {
private:
static const size_t kNoBookmark;
static const size_t kBookmarkWasApplied;
- static const size_t kBookmarkAtFirstPos;
Scanner* scanner_;
size_t bookmark_;
+ bool had_parser_error_;
DISALLOW_COPY_AND_ASSIGN(BookmarkScope);
};
+ // Sets the Scanner into an error state to stop further scanning and terminate
+ // the parsing by only returning ILLEGAL tokens after that.
+ V8_INLINE void set_parser_error() {
+ if (!has_parser_error()) {
+ c0_ = kEndOfInput;
+ source_->set_parser_error();
+ for (TokenDesc& desc : token_storage_) desc.token = Token::ILLEGAL;
+ }
+ }
+ V8_INLINE void reset_parser_error_flag() {
+ source_->reset_parser_error_flag();
+ }
+ V8_INLINE bool has_parser_error() const {
+ return source_->has_parser_error();
+ }
+
// Representation of an interval of source positions.
struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { }
- bool IsValid() const {
- return beg_pos >= 0 && end_pos >= beg_pos;
- }
+ int length() const { return end_pos - beg_pos; }
+ bool IsValid() const { return IsInRange(beg_pos, 0, end_pos); }
- static Location invalid() { return Location(-1, -1); }
+ static Location invalid() { return Location(-1, 0); }
int beg_pos;
int end_pos;
@@ -237,8 +267,7 @@ class Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(UnicodeCache* scanner_contants, Utf16CharacterStream* source,
- bool is_module);
+ explicit Scanner(Utf16CharacterStream* source, bool is_module);
void Initialize();
@@ -249,27 +278,28 @@ class Scanner {
// Returns the current token again.
Token::Value current_token() const { return current().token; }
- Token::Value current_contextual_token() const {
- return current().contextual_token;
- }
- Token::Value next_contextual_token() const { return next().contextual_token; }
-
// Returns the location information for the current token
// (the token last returned by Next()).
const Location& location() const { return current().location; }
// This error is specifically an invalid hex or unicode escape sequence.
bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
- MessageTemplate::Template error() const { return scanner_error_; }
+ MessageTemplate error() const { return scanner_error_; }
const Location& error_location() const { return scanner_error_location_; }
bool has_invalid_template_escape() const {
return current().invalid_template_escape_message != MessageTemplate::kNone;
}
- MessageTemplate::Template invalid_template_escape_message() const {
+ MessageTemplate invalid_template_escape_message() const {
DCHECK(has_invalid_template_escape());
return current().invalid_template_escape_message;
}
+
+ void clear_invalid_template_escape_message() {
+ DCHECK(has_invalid_template_escape());
+ current_->invalid_template_escape_message = MessageTemplate::kNone;
+ }
+
Location invalid_template_escape_location() const {
DCHECK(has_invalid_template_escape());
return current().invalid_template_escape_location;
@@ -301,55 +331,27 @@ class Scanner {
return current().token == token;
}
- inline bool CurrentMatchesContextual(Token::Value token) const {
- DCHECK(Token::IsContextualKeyword(token));
- return current_contextual_token() == token;
- }
-
- // Match the token against the contextual keyword or literal buffer.
- inline bool CurrentMatchesContextualEscaped(Token::Value token) const {
- DCHECK(Token::IsContextualKeyword(token) || token == Token::LET);
- // Escaped keywords are not matched as tokens. So if we require escape
- // and/or string processing we need to look at the literal content
- // (which was escape-processed already).
- // Conveniently, !current().literal_chars.is_used() for all proper
- // keywords, so this second condition should exit early in common cases.
- return (current_contextual_token() == token) ||
- (current().literal_chars.is_used() &&
- current().literal_chars.Equals(Vector<const char>(
- Token::String(token), Token::StringLength(token))));
- }
+ template <size_t N>
+ bool NextLiteralEquals(const char (&s)[N]) {
+ DCHECK_EQ(Token::STRING, peek());
+ // The length of the token is used to make sure the literal equals without
+ // taking escape sequences (e.g., "use \x73trict") or line continuations
+ // (e.g., "use \(newline) strict") into account.
+ if (!is_next_literal_one_byte()) return false;
+ if (peek_location().length() != N + 1) return false;
- bool IsUseStrict() const {
- return current().token == Token::STRING &&
- current().literal_chars.Equals(
- Vector<const char>("use strict", strlen("use strict")));
+ Vector<const uint8_t> next = next_literal_one_byte_string();
+ const char* chars = reinterpret_cast<const char*>(next.start());
+ return next.length() == N - 1 && strncmp(s, chars, N - 1) == 0;
}
- bool IsGet() { return CurrentMatchesContextual(Token::GET); }
-
- bool IsSet() { return CurrentMatchesContextual(Token::SET); }
-
- bool IsLet() const {
- return CurrentMatches(Token::LET) ||
- CurrentMatchesContextualEscaped(Token::LET);
- }
-
- // Check whether the CurrentSymbol() has already been seen.
- // The DuplicateFinder holds the data, so different instances can be used
- // for different sets of duplicates to check for.
- bool IsDuplicateSymbol(DuplicateFinder* duplicate_finder,
- AstValueFactory* ast_value_factory) const;
-
- UnicodeCache* unicode_cache() const { return unicode_cache_; }
-
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
void clear_octal_position() {
octal_pos_ = Location::invalid();
octal_message_ = MessageTemplate::kNone;
}
- MessageTemplate::Template octal_message() const { return octal_message_; }
+ MessageTemplate octal_message() const { return octal_message_; }
// Returns the value of the last smi that was scanned.
uint32_t smi_value() const { return current().smi_value_; }
@@ -405,6 +407,9 @@ class Scanner {
const Utf16CharacterStream* stream() const { return source_; }
+ // If the next characters in the stream are "#!", the line is skipped.
+ void SkipHashBang();
+
private:
// Scoped helper for saving & restoring scanner error state.
// This is used for tagged template literals, in which normally forbidden
@@ -414,20 +419,17 @@ class Scanner {
// LiteralBuffer - Collector of chars of literals.
class LiteralBuffer {
public:
- LiteralBuffer()
- : backing_store_(), position_(0), is_one_byte_(true), is_used_(false) {}
+ LiteralBuffer() : backing_store_(), position_(0), is_one_byte_(true) {}
~LiteralBuffer() { backing_store_.Dispose(); }
V8_INLINE void AddChar(char code_unit) {
- DCHECK(is_used_);
DCHECK(IsValidAscii(code_unit));
AddOneByteChar(static_cast<byte>(code_unit));
}
V8_INLINE void AddChar(uc32 code_unit) {
- DCHECK(is_used_);
- if (is_one_byte_) {
+ if (is_one_byte()) {
if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
AddOneByteChar(static_cast<byte>(code_unit));
return;
@@ -440,14 +442,12 @@ class Scanner {
bool is_one_byte() const { return is_one_byte_; }
bool Equals(Vector<const char> keyword) const {
- DCHECK(is_used_);
return is_one_byte() && keyword.length() == position_ &&
(memcmp(keyword.start(), backing_store_.start(), position_) == 0);
}
Vector<const uint16_t> two_byte_literal() const {
- DCHECK(!is_one_byte_);
- DCHECK(is_used_);
+ DCHECK(!is_one_byte());
DCHECK_EQ(position_ & 0x1, 0);
return Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(backing_store_.start()),
@@ -455,24 +455,14 @@ class Scanner {
}
Vector<const uint8_t> one_byte_literal() const {
- DCHECK(is_one_byte_);
- DCHECK(is_used_);
+ DCHECK(is_one_byte());
return Vector<const uint8_t>(
reinterpret_cast<const uint8_t*>(backing_store_.start()), position_);
}
- int length() const { return is_one_byte_ ? position_ : (position_ >> 1); }
+ int length() const { return is_one_byte() ? position_ : (position_ >> 1); }
void Start() {
- DCHECK(!is_used_);
- DCHECK_EQ(0, position_);
- is_used_ = true;
- }
-
- bool is_used() const { return is_used_; }
-
- void Drop() {
- is_used_ = false;
position_ = 0;
is_one_byte_ = true;
}
@@ -481,8 +471,7 @@ class Scanner {
private:
static const int kInitialCapacity = 16;
- static const int kGrowthFactory = 4;
- static const int kMinConversionSlack = 256;
+ static const int kGrowthFactor = 4;
static const int kMaxGrowth = 1 * MB;
inline bool IsValidAscii(char code_unit) {
@@ -494,7 +483,7 @@ class Scanner {
}
V8_INLINE void AddOneByteChar(byte one_byte_char) {
- DCHECK(is_one_byte_);
+ DCHECK(is_one_byte());
if (position_ >= backing_store_.length()) ExpandBuffer();
backing_store_[position_] = one_byte_char;
position_ += kOneByteSize;
@@ -507,42 +496,37 @@ class Scanner {
Vector<byte> backing_store_;
int position_;
+
bool is_one_byte_;
- bool is_used_;
DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
};
- // Scoped helper for literal recording. Automatically drops the literal
- // if aborting the scanning before it's complete.
- class LiteralScope {
- public:
- explicit LiteralScope(Scanner* scanner)
- : buffer_(&scanner->next().literal_chars), complete_(false) {
- buffer_->Start();
- }
- ~LiteralScope() {
- if (!complete_) buffer_->Drop();
- }
- void Complete() { complete_ = true; }
-
- private:
- LiteralBuffer* buffer_;
- bool complete_;
- };
-
// The current and look-ahead token.
struct TokenDesc {
Location location = {0, 0};
LiteralBuffer literal_chars;
LiteralBuffer raw_literal_chars;
Token::Value token = Token::UNINITIALIZED;
- MessageTemplate::Template invalid_template_escape_message =
- MessageTemplate::kNone;
+ MessageTemplate invalid_template_escape_message = MessageTemplate::kNone;
Location invalid_template_escape_location;
- Token::Value contextual_token = Token::UNINITIALIZED;
uint32_t smi_value_ = 0;
bool after_line_terminator = false;
+
+#ifdef DEBUG
+ bool CanAccessLiteral() const {
+ return token == Token::PRIVATE_NAME || token == Token::ILLEGAL ||
+ token == Token::UNINITIALIZED || token == Token::REGEXP_LITERAL ||
+ token == Token::ESCAPED_KEYWORD ||
+ IsInRange(token, Token::NUMBER, Token::STRING) ||
+ (Token::IsAnyIdentifier(token) && !Token::IsKeyword(token)) ||
+ IsInRange(token, Token::TEMPLATE_SPAN, Token::TEMPLATE_TAIL);
+ }
+ bool CanAccessRawLiteral() const {
+ return token == Token::ILLEGAL || token == Token::UNINITIALIZED ||
+ IsInRange(token, Token::TEMPLATE_SPAN, Token::TEMPLATE_TAIL);
+ }
+#endif // DEBUG
};
enum NumberKind {
@@ -575,14 +559,13 @@ class Scanner {
scanner_error_ = MessageTemplate::kNone;
}
- void ReportScannerError(const Location& location,
- MessageTemplate::Template error) {
+ void ReportScannerError(const Location& location, MessageTemplate error) {
if (has_error()) return;
scanner_error_ = error;
scanner_error_location_ = location;
}
- void ReportScannerError(int pos, MessageTemplate::Template error) {
+ void ReportScannerError(int pos, MessageTemplate error) {
if (has_error()) return;
scanner_error_ = error;
scanner_error_location_ = Location(pos, pos + 1);
@@ -668,45 +651,41 @@ class Scanner {
// token as a one-byte literal. E.g. Token::FUNCTION pretends to have a
// literal "function".
Vector<const uint8_t> literal_one_byte_string() const {
- if (current().literal_chars.is_used())
- return current().literal_chars.one_byte_literal();
- const char* str = Token::String(current().token);
- const uint8_t* str_as_uint8 = reinterpret_cast<const uint8_t*>(str);
- return Vector<const uint8_t>(str_as_uint8,
- Token::StringLength(current().token));
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
+ return current().literal_chars.one_byte_literal();
}
Vector<const uint16_t> literal_two_byte_string() const {
- DCHECK(current().literal_chars.is_used());
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
return current().literal_chars.two_byte_literal();
}
bool is_literal_one_byte() const {
- return !current().literal_chars.is_used() ||
- current().literal_chars.is_one_byte();
+ DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token));
+ return current().literal_chars.is_one_byte();
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
Vector<const uint8_t> next_literal_one_byte_string() const {
- DCHECK(next().literal_chars.is_used());
+ DCHECK(next().CanAccessLiteral());
return next().literal_chars.one_byte_literal();
}
Vector<const uint16_t> next_literal_two_byte_string() const {
- DCHECK(next().literal_chars.is_used());
+ DCHECK(next().CanAccessLiteral());
return next().literal_chars.two_byte_literal();
}
bool is_next_literal_one_byte() const {
- DCHECK(next().literal_chars.is_used());
+ DCHECK(next().CanAccessLiteral());
return next().literal_chars.is_one_byte();
}
Vector<const uint8_t> raw_literal_one_byte_string() const {
- DCHECK(current().raw_literal_chars.is_used());
+ DCHECK(current().CanAccessRawLiteral());
return current().raw_literal_chars.one_byte_literal();
}
Vector<const uint16_t> raw_literal_two_byte_string() const {
- DCHECK(current().raw_literal_chars.is_used());
+ DCHECK(current().CanAccessRawLiteral());
return current().raw_literal_chars.two_byte_literal();
}
bool is_raw_literal_one_byte() const {
- DCHECK(current().raw_literal_chars.is_used());
+ DCHECK(current().CanAccessRawLiteral());
return current().raw_literal_chars.is_one_byte();
}
@@ -721,6 +700,11 @@ class Scanner {
// Scans a single JavaScript token.
V8_INLINE Token::Value ScanSingleToken();
V8_INLINE void Scan();
+ // Performance hack: pass through a pre-calculated "next()" value to avoid
+ // having to re-calculate it in Scan. You'd think the compiler would be able
+ // to hoist the next() calculation out of the inlined Scan method, but seems
+ // that pointer aliasing analysis fails show that this is safe.
+ V8_INLINE void Scan(TokenDesc* next_desc);
V8_INLINE Token::Value SkipWhiteSpace();
Token::Value SkipSingleHTMLComment();
@@ -745,9 +729,9 @@ class Scanner {
Token::Value ScanNumber(bool seen_period);
V8_INLINE Token::Value ScanIdentifierOrKeyword();
- V8_INLINE Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
- Token::Value ScanIdentifierOrKeywordInnerSlow(LiteralScope* literal,
- bool escaped);
+ V8_INLINE Token::Value ScanIdentifierOrKeywordInner();
+ Token::Value ScanIdentifierOrKeywordInnerSlow(bool escaped,
+ bool can_be_keyword);
Token::Value ScanString();
Token::Value ScanPrivateName();
@@ -779,16 +763,13 @@ class Scanner {
// Subtract delimiters.
source_length -= 2;
}
- return token.literal_chars.is_used() &&
- (token.literal_chars.length() != source_length);
+ return token.literal_chars.length() != source_length;
}
#ifdef DEBUG
void SanityCheckTokenDesc(const TokenDesc&) const;
#endif
- UnicodeCache* const unicode_cache_;
-
TokenDesc& next() { return *next_; }
const TokenDesc& current() const { return *current_; }
@@ -822,9 +803,9 @@ class Scanner {
// Last-seen positions of potentially problematic tokens.
Location octal_pos_;
- MessageTemplate::Template octal_message_;
+ MessageTemplate octal_message_;
- MessageTemplate::Template scanner_error_;
+ MessageTemplate scanner_error_;
Location scanner_error_location_;
};
diff --git a/deps/v8/src/parsing/token.cc b/deps/v8/src/parsing/token.cc
index 4cbf244a2b..ec4b623775 100644
--- a/deps/v8/src/parsing/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -10,32 +10,37 @@ namespace v8 {
namespace internal {
#define T(name, string, precedence) #name,
-const char* const Token::name_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
+const char* const Token::name_[NUM_TOKENS] = {TOKEN_LIST(T, T)};
#undef T
#define T(name, string, precedence) string,
-const char* const Token::string_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
+const char* const Token::string_[NUM_TOKENS] = {TOKEN_LIST(T, T)};
#undef T
constexpr uint8_t length(const char* str) {
return str ? static_cast<uint8_t>(strlen(str)) : 0;
}
#define T(name, string, precedence) length(string),
-const uint8_t Token::string_length_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
+const uint8_t Token::string_length_[NUM_TOKENS] = {TOKEN_LIST(T, T)};
#undef T
-#define T(name, string, precedence) precedence,
-const int8_t Token::precedence_[NUM_TOKENS] = {TOKEN_LIST(T, T, T)};
-#undef T
-
-#define KT(a, b, c) 'T',
-#define KK(a, b, c) 'K',
-#define KC(a, b, c) 'C',
-const char Token::token_type[] = {TOKEN_LIST(KT, KK, KC)};
+#define T1(name, string, precedence) \
+ ((Token::name == Token::IN) ? 0 : precedence),
+#define T2(name, string, precedence) precedence,
+// precedence_[0] for accept_IN == false, precedence_[1] for accept_IN = true.
+const int8_t Token::precedence_[2][NUM_TOKENS] = {{TOKEN_LIST(T1, T1)},
+ {TOKEN_LIST(T2, T2)}};
+#undef T2
+#undef T1
+
+#define KT(a, b, c) \
+ IsPropertyNameBits::encode(Token::IsAnyIdentifier(a) || a == ESCAPED_KEYWORD),
+#define KK(a, b, c) \
+ IsKeywordBits::encode(true) | IsPropertyNameBits::encode(true),
+const uint8_t Token::token_flags[] = {TOKEN_LIST(KT, KK)};
#undef KT
#undef KK
-#undef KC
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index e1c6239e36..c457d39e92 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -20,12 +20,6 @@ namespace internal {
//
// T: Non-keyword tokens
// K: Keyword tokens
-// C: Contextual keyword token
-//
-// Contextual keyword tokens are tokens that are scanned as Token::IDENTIFIER,
-// but that in some contexts are treated as keywords. This mostly happens
-// when ECMAScript introduces new keywords, but for backwards compatibility
-// allows them to still be used as indentifiers in most contexts.
// IGNORE_TOKEN is a convenience macro that can be supplied as
// an argument (at any position) for a TOKEN_LIST call. It does
@@ -33,7 +27,8 @@ namespace internal {
#define IGNORE_TOKEN(name, string, precedence)
-/* Binary operators sorted by precedence */
+/* Binary operators */
+/* ADD and SUB are at the end since they are UnaryOp */
#define BINARY_OP_TOKEN_LIST(T, E) \
E(T, BIT_OR, "|", 6) \
E(T, BIT_XOR, "^", 7) \
@@ -41,12 +36,12 @@ namespace internal {
E(T, SHL, "<<", 11) \
E(T, SAR, ">>", 11) \
E(T, SHR, ">>>", 11) \
- E(T, ADD, "+", 12) \
- E(T, SUB, "-", 12) \
E(T, MUL, "*", 13) \
E(T, DIV, "/", 13) \
E(T, MOD, "%", 13) \
- E(T, EXP, "**", 14)
+ E(T, EXP, "**", 14) \
+ E(T, ADD, "+", 12) \
+ E(T, SUB, "-", 12)
#define EXPAND_BINOP_ASSIGN_TOKEN(T, name, string, precedence) \
T(ASSIGN_##name, string "=", 2)
@@ -54,32 +49,47 @@ namespace internal {
#define EXPAND_BINOP_TOKEN(T, name, string, precedence) \
T(name, string, precedence)
-#define TOKEN_LIST(T, K, C) \
- /* End of source indicator. */ \
- T(EOS, "EOS", 0) \
+#define TOKEN_LIST(T, K) \
+ \
+ /* BEGIN PropertyOrCall */ \
+ /* BEGIN Member */ \
+ /* BEGIN Template */ \
+ /* ES6 Template Literals */ \
+ T(TEMPLATE_SPAN, nullptr, 0) \
+ T(TEMPLATE_TAIL, nullptr, 0) \
+ /* END Template */ \
\
/* Punctuators (ECMA-262, section 7.7, page 15). */ \
+ /* BEGIN Property */ \
+ T(PERIOD, ".", 0) \
+ T(LBRACK, "[", 0) \
+ /* END Property */ \
+ /* END Member */ \
T(LPAREN, "(", 0) \
+ /* END PropertyOrCall */ \
T(RPAREN, ")", 0) \
- T(LBRACK, "[", 0) \
T(RBRACK, "]", 0) \
T(LBRACE, "{", 0) \
- T(RBRACE, "}", 0) \
T(COLON, ":", 0) \
- T(SEMICOLON, ";", 0) \
- T(PERIOD, ".", 0) \
T(ELLIPSIS, "...", 0) \
T(CONDITIONAL, "?", 3) \
- T(INC, "++", 0) \
- T(DEC, "--", 0) \
- T(ARROW, "=>", 0) \
+ /* BEGIN AutoSemicolon */ \
+ T(SEMICOLON, ";", 0) \
+ T(RBRACE, "}", 0) \
+ /* End of source indicator. */ \
+ T(EOS, "EOS", 0) \
+ /* END AutoSemicolon */ \
\
- /* Assignment operators. */ \
+ /* BEGIN ArrowOrAssignmentOp */ \
+ T(ARROW, "=>", 0) \
+ /* BEGIN AssignmentOp */ \
/* IsAssignmentOp() relies on this block of enum values being */ \
/* contiguous and sorted in the same order! */ \
T(INIT, "=init", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_ASSIGN_TOKEN) \
+ /* END AssignmentOp */ \
+ /* END ArrowOrAssignmentOp */ \
\
/* Binary operators sorted by precedence. */ \
/* IsBinaryOp() relies on this block of enum values */ \
@@ -87,8 +97,24 @@ namespace internal {
T(COMMA, ",", 1) \
T(OR, "||", 4) \
T(AND, "&&", 5) \
+ \
+ /* Unary operators, starting at ADD in BINARY_OP_TOKEN_LIST */ \
+ /* IsUnaryOp() relies on this block of enum values */ \
+ /* being contiguous and sorted in the same order! */ \
BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_TOKEN) \
\
+ T(NOT, "!", 0) \
+ T(BIT_NOT, "~", 0) \
+ K(DELETE, "delete", 0) \
+ K(TYPEOF, "typeof", 0) \
+ K(VOID, "void", 0) \
+ \
+ /* BEGIN IsCountOp */ \
+ T(INC, "++", 0) \
+ T(DEC, "--", 0) \
+ /* END IsCountOp */ \
+ /* END IsUnaryOrCountOp */ \
+ \
/* Compare operators sorted by precedence. */ \
/* IsCompareOp() relies on this block of enum values */ \
/* being contiguous and sorted in the same order! */ \
@@ -103,15 +129,6 @@ namespace internal {
K(INSTANCEOF, "instanceof", 10) \
K(IN, "in", 10) \
\
- /* Unary operators. */ \
- /* IsUnaryOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
- T(NOT, "!", 0) \
- T(BIT_NOT, "~", 0) \
- K(DELETE, "delete", 0) \
- K(TYPEOF, "typeof", 0) \
- K(VOID, "void", 0) \
- \
/* Keywords (ECMA-262, section 7.5.2, page 13). */ \
K(BREAK, "break", 0) \
K(CASE, "case", 0) \
@@ -149,6 +166,8 @@ namespace internal {
T(BIGINT, nullptr, 0) \
T(STRING, nullptr, 0) \
\
+ /* BEGIN Callable */ \
+ K(SUPER, "super", 0) \
/* BEGIN AnyIdentifier */ \
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, nullptr, 0) \
@@ -161,14 +180,14 @@ namespace internal {
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_STRICT_RESERVED_WORD, nullptr, 0) \
T(ESCAPED_STRICT_RESERVED_WORD, nullptr, 0) \
- K(ENUM, "enum", 0) \
/* END AnyIdentifier */ \
+ /* END Callable */ \
+ K(ENUM, "enum", 0) \
K(CLASS, "class", 0) \
K(CONST, "const", 0) \
K(EXPORT, "export", 0) \
K(EXTENDS, "extends", 0) \
K(IMPORT, "import", 0) \
- K(SUPER, "super", 0) \
T(PRIVATE_NAME, nullptr, 0) \
\
/* Illegal token - not able to scan. */ \
@@ -178,35 +197,13 @@ namespace internal {
/* Scanner-internal use only. */ \
T(WHITESPACE, nullptr, 0) \
T(UNINITIALIZED, nullptr, 0) \
- T(REGEXP_LITERAL, nullptr, 0) \
- \
- /* ES6 Template Literals */ \
- T(TEMPLATE_SPAN, nullptr, 0) \
- T(TEMPLATE_TAIL, nullptr, 0) \
- \
- /* Contextual keyword tokens */ \
- C(GET, "get", 0) \
- C(SET, "set", 0) \
- C(OF, "of", 0) \
- C(TARGET, "target", 0) \
- C(META, "meta", 0) \
- C(AS, "as", 0) \
- C(FROM, "from", 0) \
- C(NAME, "name", 0) \
- C(PROTO_UNDERSCORED, "__proto__", 0) \
- C(CONSTRUCTOR, "constructor", 0) \
- C(PRIVATE_CONSTRUCTOR, "#constructor", 0) \
- C(PROTOTYPE, "prototype", 0) \
- C(EVAL, "eval", 0) \
- C(ARGUMENTS, "arguments", 0) \
- C(UNDEFINED, "undefined", 0) \
- C(ANONYMOUS, "anonymous", 0)
+ T(REGEXP_LITERAL, nullptr, 0)
class Token {
public:
// All token values.
#define T(name, string, precedence) name,
- enum Value : uint8_t { TOKEN_LIST(T, T, T) NUM_TOKENS };
+ enum Value : uint8_t { TOKEN_LIST(T, T) NUM_TOKENS };
#undef T
// Returns a string corresponding to the C++ token name
@@ -216,43 +213,73 @@ class Token {
return name_[token];
}
- static char TypeForTesting(Value token) { return token_type[token]; }
+ class IsKeywordBits : public BitField8<bool, 0, 1> {};
+ class IsPropertyNameBits : public BitField8<bool, IsKeywordBits::kNext, 1> {};
// Predicates
- static bool IsKeyword(Value token) { return token_type[token] == 'K'; }
- static bool IsContextualKeyword(Value token) {
- return IsInRange(token, GET, ANONYMOUS);
+ static bool IsKeyword(Value token) {
+ return IsKeywordBits::decode(token_flags[token]);
+ }
+
+ static bool IsPropertyName(Value token) {
+ return IsPropertyNameBits::decode(token_flags[token]);
}
- static bool IsIdentifier(Value token, LanguageMode language_mode,
- bool is_generator, bool disallow_await) {
- if (IsInRange(token, IDENTIFIER, ASYNC)) return true;
- if (IsInRange(token, LET, ESCAPED_STRICT_RESERVED_WORD)) {
- return is_sloppy(language_mode);
- }
+ V8_INLINE static bool IsValidIdentifier(Value token,
+ LanguageMode language_mode,
+ bool is_generator,
+ bool disallow_await) {
+ if (V8_LIKELY(IsInRange(token, IDENTIFIER, ASYNC))) return true;
if (token == AWAIT) return !disallow_await;
if (token == YIELD) return !is_generator && is_sloppy(language_mode);
- return false;
+ return IsStrictReservedWord(token) && is_sloppy(language_mode);
+ }
+
+ static bool IsCallable(Value token) {
+ return IsInRange(token, SUPER, ESCAPED_STRICT_RESERVED_WORD);
+ }
+
+ static bool IsAutoSemicolon(Value token) {
+ return IsInRange(token, SEMICOLON, EOS);
}
static bool IsAnyIdentifier(Value token) {
- return IsInRange(token, IDENTIFIER, ENUM);
+ return IsInRange(token, IDENTIFIER, ESCAPED_STRICT_RESERVED_WORD);
}
static bool IsStrictReservedWord(Value token) {
- return IsInRange(token, LET, ESCAPED_STRICT_RESERVED_WORD);
+ return IsInRange(token, YIELD, ESCAPED_STRICT_RESERVED_WORD);
}
static bool IsLiteral(Value token) {
return IsInRange(token, NULL_LITERAL, STRING);
}
+ static bool IsTemplate(Value token) {
+ return IsInRange(token, TEMPLATE_SPAN, TEMPLATE_TAIL);
+ }
+
+ static bool IsMember(Value token) {
+ return IsInRange(token, TEMPLATE_SPAN, LBRACK);
+ }
+
+ static bool IsProperty(Value token) {
+ return IsInRange(token, PERIOD, LBRACK);
+ }
+
+ static bool IsPropertyOrCall(Value token) {
+ return IsInRange(token, TEMPLATE_SPAN, LPAREN);
+ }
+
+ static bool IsArrowOrAssignmentOp(Value token) {
+ return IsInRange(token, ARROW, ASSIGN_SUB);
+ }
+
static bool IsAssignmentOp(Value token) {
- return IsInRange(token, INIT, ASSIGN_EXP);
+ return IsInRange(token, INIT, ASSIGN_SUB);
}
- static bool IsGetOrSet(Value op) { return IsInRange(op, GET, SET); }
- static bool IsBinaryOp(Value op) { return IsInRange(op, COMMA, EXP); }
+ static bool IsBinaryOp(Value op) { return IsInRange(op, COMMA, SUB); }
static bool IsCompareOp(Value op) { return IsInRange(op, EQ, IN); }
@@ -263,7 +290,7 @@ class Token {
static bool IsEqualityOp(Value op) { return IsInRange(op, EQ, EQ_STRICT); }
static Value BinaryOpForAssignment(Value op) {
- DCHECK(IsInRange(op, ASSIGN_BIT_OR, ASSIGN_EXP));
+ DCHECK(IsInRange(op, ASSIGN_BIT_OR, ASSIGN_SUB));
Value result = static_cast<Value>(op - ASSIGN_BIT_OR + BIT_OR);
DCHECK(IsBinaryOp(result));
return result;
@@ -273,18 +300,11 @@ class Token {
return IsInRange(op, BIT_OR, SHR) || op == BIT_NOT;
}
- static bool IsUnaryOp(Value op) {
- return IsInRange(op, NOT, VOID) || IsInRange(op, ADD, SUB);
- }
-
+ static bool IsUnaryOp(Value op) { return IsInRange(op, ADD, VOID); }
static bool IsCountOp(Value op) { return IsInRange(op, INC, DEC); }
-
+ static bool IsUnaryOrCountOp(Value op) { return IsInRange(op, ADD, DEC); }
static bool IsShiftOp(Value op) { return IsInRange(op, SHL, SHR); }
- static bool IsTrivialExpressionToken(Value op) {
- return IsInRange(op, THIS, IDENTIFIER);
- }
-
// Returns a string corresponding to the JS token string
// (.e., "<" for the token LT) or nullptr if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
@@ -300,17 +320,17 @@ class Token {
// Returns the precedence > 0 for binary and compare
// operators; returns 0 otherwise.
- static int Precedence(Value token) {
+ static int Precedence(Value token, bool accept_IN) {
DCHECK_GT(NUM_TOKENS, token); // token is unsigned
- return precedence_[token];
+ return precedence_[accept_IN][token];
}
private:
static const char* const name_[NUM_TOKENS];
static const char* const string_[NUM_TOKENS];
static const uint8_t string_length_[NUM_TOKENS];
- static const int8_t precedence_[NUM_TOKENS];
- static const char token_type[NUM_TOKENS];
+ static const int8_t precedence_[2][NUM_TOKENS];
+ static const uint8_t token_flags[NUM_TOKENS];
};
} // namespace internal
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 50e1403626..5119e06cc8 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -31,7 +31,7 @@ MessageLocation PendingCompilationErrorHandler::MessageDetails::GetLocation(
}
void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate::Template message,
+ int start_position, int end_position, MessageTemplate message,
const char* arg, ParseErrorType error_type) {
if (has_pending_error_) return;
has_pending_error_ = true;
@@ -42,7 +42,7 @@ void PendingCompilationErrorHandler::ReportMessageAt(
}
void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate::Template message,
+ int start_position, int end_position, MessageTemplate message,
const AstRawString* arg, ParseErrorType error_type) {
if (has_pending_error_) return;
has_pending_error_ = true;
@@ -52,9 +52,10 @@ void PendingCompilationErrorHandler::ReportMessageAt(
error_type_ = error_type;
}
-void PendingCompilationErrorHandler::ReportWarningAt(
- int start_position, int end_position, MessageTemplate::Template message,
- const char* arg) {
+void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const char* arg) {
warning_messages_.emplace_front(
MessageDetails(start_position, end_position, message, nullptr, arg));
}
@@ -117,20 +118,20 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<JSObject> jserror = Handle<JSObject>::cast(error);
Handle<Name> key_start_pos = factory->error_start_pos_symbol();
- JSObject::SetProperty(isolate, jserror, key_start_pos,
- handle(Smi::FromInt(location.start_pos()), isolate),
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, jserror, key_start_pos,
+ handle(Smi::FromInt(location.start_pos()), isolate),
+ LanguageMode::kSloppy)
.Check();
Handle<Name> key_end_pos = factory->error_end_pos_symbol();
- JSObject::SetProperty(isolate, jserror, key_end_pos,
- handle(Smi::FromInt(location.end_pos()), isolate),
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, jserror, key_end_pos,
+ handle(Smi::FromInt(location.end_pos()), isolate),
+ LanguageMode::kSloppy)
.Check();
Handle<Name> key_script = factory->error_script_symbol();
- JSObject::SetProperty(isolate, jserror, key_script, script,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, jserror, key_script, script,
+ LanguageMode::kSloppy)
.Check();
isolate->Throw(*error, &location);
@@ -138,8 +139,9 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
Isolate* isolate) const {
- return MessageTemplate::FormatMessage(isolate, error_details_.message(),
- error_details_.ArgumentString(isolate));
+ return MessageFormatter::FormatMessage(
+ isolate, error_details_.message(),
+ error_details_.ArgumentString(isolate));
}
} // namespace internal
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index f18a8369e4..640409b10a 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -10,7 +10,7 @@
#include "src/base/macros.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "src/messages.h"
+#include "src/message-template.h"
namespace v8 {
namespace internal {
@@ -30,18 +30,15 @@ class PendingCompilationErrorHandler {
error_type_(kSyntaxError) {}
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate::Template message,
- const char* arg = nullptr,
+ MessageTemplate message, const char* arg = nullptr,
ParseErrorType error_type = kSyntaxError);
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate::Template message,
- const AstRawString* arg,
+ MessageTemplate message, const AstRawString* arg,
ParseErrorType error_type = kSyntaxError);
void ReportWarningAt(int start_position, int end_position,
- MessageTemplate::Template message,
- const char* arg = nullptr);
+ MessageTemplate message, const char* arg = nullptr);
bool stack_overflow() const { return stack_overflow_; }
@@ -62,11 +59,17 @@ class PendingCompilationErrorHandler {
Handle<String> FormatErrorMessageForTest(Isolate* isolate) const;
- bool SetUnidentifiableError() { return unidentifiable_error_ = true; }
-
- bool ResetUnidentifiableError() { return unidentifiable_error_ = false; }
-
- bool ErrorUnidentifiableByPreParser() { return unidentifiable_error_; }
+ void set_unidentifiable_error() {
+ has_pending_error_ = true;
+ unidentifiable_error_ = true;
+ }
+ void clear_unidentifiable_error() {
+ has_pending_error_ = false;
+ unidentifiable_error_ = false;
+ }
+ bool has_error_unidentifiable_by_preparser() const {
+ return unidentifiable_error_;
+ }
private:
class MessageDetails {
@@ -79,7 +82,7 @@ class PendingCompilationErrorHandler {
arg_(nullptr),
char_arg_(nullptr) {}
MessageDetails(int start_position, int end_position,
- MessageTemplate::Template message, const AstRawString* arg,
+ MessageTemplate message, const AstRawString* arg,
const char* char_arg)
: start_position_(start_position),
end_position_(end_position),
@@ -89,12 +92,12 @@ class PendingCompilationErrorHandler {
Handle<String> ArgumentString(Isolate* isolate) const;
MessageLocation GetLocation(Handle<Script> script) const;
- MessageTemplate::Template message() const { return message_; }
+ MessageTemplate message() const { return message_; }
private:
int start_position_;
int end_position_;
- MessageTemplate::Template message_;
+ MessageTemplate message_;
const AstRawString* arg_;
const char* char_arg_;
};
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index f6b2cf401a..0701f1b75f 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -31,8 +31,9 @@
#include "src/assembler.h"
#include "src/eh-frame.h"
-#include "src/instruction-stream.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
+#include "src/snapshot/embedded-data.h"
#include "src/source-position-table.h"
#include "src/wasm/wasm-code-manager.h"
@@ -195,8 +196,8 @@ uint64_t PerfJitLogger::GetTimestamp() {
return (ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
}
-void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
- SharedFunctionInfo* shared,
+void PerfJitLogger::LogRecordedBuffer(AbstractCode abstract_code,
+ SharedFunctionInfo shared,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
@@ -210,11 +211,11 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// We only support non-interpreted functions.
if (!abstract_code->IsCode()) return;
- Code* code = abstract_code->GetCode();
+ Code code = abstract_code->GetCode();
DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
// Debug info has to be emitted first.
- if (FLAG_perf_prof && shared != nullptr) {
+ if (FLAG_perf_prof && !shared.is_null()) {
// TODO(herhut): This currently breaks for js2wasm/wasm2js functions.
if (code->kind() != Code::JS_TO_WASM_FUNCTION &&
code->kind() != Code::WASM_TO_JS_FUNCTION) {
@@ -278,9 +279,9 @@ constexpr size_t kUnknownScriptNameStringLen =
size_t GetScriptNameLength(const SourcePositionInfo& info) {
if (!info.script.is_null()) {
- Object* name_or_url = info.script->GetNameOrSourceURL();
+ Object name_or_url = info.script->GetNameOrSourceURL();
if (name_or_url->IsString()) {
- String* str = String::cast(name_or_url);
+ String str = String::cast(name_or_url);
if (str->IsOneByteRepresentation()) return str->length();
int length;
str->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
@@ -291,12 +292,13 @@ size_t GetScriptNameLength(const SourcePositionInfo& info) {
}
Vector<const char> GetScriptName(const SourcePositionInfo& info,
- std::unique_ptr<char[]>* storage) {
+ std::unique_ptr<char[]>* storage,
+ const DisallowHeapAllocation& no_gc) {
if (!info.script.is_null()) {
- Object* name_or_url = info.script->GetNameOrSourceURL();
+ Object name_or_url = info.script->GetNameOrSourceURL();
if (name_or_url->IsSeqOneByteString()) {
- SeqOneByteString* str = SeqOneByteString::cast(name_or_url);
- return {reinterpret_cast<char*>(str->GetChars()),
+ SeqOneByteString str = SeqOneByteString::cast(name_or_url);
+ return {reinterpret_cast<char*>(str->GetChars(no_gc)),
static_cast<size_t>(str->length())};
} else if (name_or_url->IsString()) {
int length;
@@ -322,7 +324,7 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
} // namespace
-void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
+void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
@@ -377,7 +379,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
// The extracted name may point into heap-objects, thus disallow GC.
DisallowHeapAllocation no_gc;
std::unique_ptr<char[]> name_storage;
- Vector<const char> name_string = GetScriptName(info, &name_storage);
+ Vector<const char> name_string = GetScriptName(info, &name_storage, no_gc);
LogWriteBytes(name_string.start(),
static_cast<uint32_t>(name_string.size()) + 1);
}
@@ -385,7 +387,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
LogWriteBytes(padding_bytes, padding);
}
-void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
+void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
PerfJitCodeUnwindingInfo unwinding_info_header;
unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
unwinding_info_header.time_stamp_ = GetTimestamp();
@@ -420,7 +422,7 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
LogWriteBytes(padding_bytes, static_cast<int>(padding_size));
}
-void PerfJitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void PerfJitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
// We may receive a CodeMove event if a BytecodeArray object moves. Otherwise
// code relocation is not supported.
CHECK(from->IsBytecodeArray());
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 91f0dca10f..ac058c1660 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -41,9 +41,9 @@ class PerfJitLogger : public CodeEventLogger {
explicit PerfJitLogger(Isolate* isolate);
~PerfJitLogger() override;
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
private:
void OpenJitDumpFile();
@@ -52,7 +52,7 @@ class PerfJitLogger : public CodeEventLogger {
void CloseMarkerFile(void* marker_address);
uint64_t GetTimestamp();
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) override;
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
@@ -70,8 +70,8 @@ class PerfJitLogger : public CodeEventLogger {
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
- void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
- void LogWriteUnwindingInfo(Code* code);
+ void LogWriteDebugInfo(Code code, SharedFunctionInfo shared);
+ void LogWriteUnwindingInfo(Code code);
static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62;
@@ -120,16 +120,16 @@ class PerfJitLogger : public CodeEventLogger {
public:
explicit PerfJitLogger(Isolate* isolate) : CodeEventLogger(isolate) {}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override {
UNIMPLEMENTED();
}
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {
UNIMPLEMENTED();
}
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
const char* name, int length) override {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/pointer-with-payload.h b/deps/v8/src/pointer-with-payload.h
new file mode 100644
index 0000000000..06af29e907
--- /dev/null
+++ b/deps/v8/src/pointer-with-payload.h
@@ -0,0 +1,104 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_POINTER_WITH_PAYLOAD_H_
+#define V8_POINTER_WITH_PAYLOAD_H_
+
+#include <cstdint>
+#include <type_traits>
+
+#include "include/v8config.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename PointerType>
+struct PointerWithPayloadTraits {
+ static constexpr int value =
+ alignof(PointerType) >= 8 ? 3 : alignof(PointerType) >= 4 ? 2 : 1;
+};
+
+// PointerWithPayload combines a PointerType* an a small PayloadType into
+// one. The bits of the storage type get packed into the lower bits of the
+// pointer that are free due to alignment. The user needs to specify how many
+// bits are needed to store the PayloadType, allowing Types that by default are
+// larger to be stored.
+//
+// Example:
+// PointerWithPayload<int *, bool, 1> data_and_flag;
+//
+// Here we store a bool that needs 1 bit of storage state into the lower bits
+// of int *, which points to some int data;
+
+template <typename PointerType, typename PayloadType, int NumPayloadBits>
+class PointerWithPayload {
+ // We have log2(ptr alignment) kAvailBits free to use
+ static constexpr int kAvailBits = PointerWithPayloadTraits<
+ typename std::remove_const<PointerType>::type>::value;
+ static_assert(
+ kAvailBits >= NumPayloadBits,
+ "Ptr does not have sufficient alignment for the selected amount of "
+ "storage bits.");
+
+ static constexpr uintptr_t kPayloadMask = (uintptr_t{1} << kAvailBits) - 1;
+ static constexpr uintptr_t kPointerMask = ~kPayloadMask;
+
+ public:
+ PointerWithPayload() {}
+
+ explicit PointerWithPayload(PointerType* pointer)
+ : pointer_(reinterpret_cast<uintptr_t>(pointer)) {
+ DCHECK_EQ(GetPointer(), pointer);
+ DCHECK_EQ(GetPayload(), static_cast<PayloadType>(0));
+ }
+
+ explicit PointerWithPayload(PayloadType payload)
+ : pointer_(static_cast<uintptr_t>(payload)) {
+ DCHECK_EQ(GetPointer(), nullptr);
+ DCHECK_EQ(GetPayload(), payload);
+ }
+
+ PointerWithPayload(PointerType* pointer, PayloadType payload) {
+ update(pointer, payload);
+ }
+
+ V8_INLINE PointerType* GetPointer() const {
+ return reinterpret_cast<PointerType*>(pointer_ & kPointerMask);
+ }
+
+ V8_INLINE PointerType* operator->() const { return GetPointer(); }
+
+ V8_INLINE void update(PointerType* new_pointer, PayloadType new_payload) {
+ pointer_ = reinterpret_cast<uintptr_t>(new_pointer) |
+ static_cast<uintptr_t>(new_payload);
+ DCHECK_EQ(GetPayload(), new_payload);
+ DCHECK_EQ(GetPointer(), new_pointer);
+ }
+
+ V8_INLINE void SetPointer(PointerType* newptr) {
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(newptr) & kPayloadMask, 0);
+ pointer_ = reinterpret_cast<uintptr_t>(newptr) | (pointer_ & kPayloadMask);
+ DCHECK_EQ(GetPointer(), newptr);
+ }
+
+ V8_INLINE PayloadType GetPayload() const {
+ return static_cast<PayloadType>(pointer_ & kPayloadMask);
+ }
+
+ V8_INLINE void SetPayload(PayloadType new_payload) {
+ uintptr_t new_payload_ptr = static_cast<uintptr_t>(new_payload);
+ DCHECK_EQ(new_payload_ptr & kPayloadMask, new_payload_ptr);
+ pointer_ = (pointer_ & kPointerMask) | new_payload_ptr;
+ DCHECK_EQ(GetPayload(), new_payload);
+ }
+
+ private:
+ uintptr_t pointer_ = 0;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_POINTER_WITH_PAYLOAD_H_
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index e3dbaa96c9..99e75c377c 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -90,9 +90,7 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
if (FLAG_enable_embedded_constant_pool &&
Assembler::IsConstantPoolLoadStart(pc_)) {
@@ -161,31 +159,29 @@ Address Assembler::return_address_from_call_start(Address pc) {
return pc + (len + 2) * kInstrSize;
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
@@ -380,7 +376,7 @@ int Assembler::GetConstantPoolOffset(Address pc,
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
- Address pc = reinterpret_cast<Address>(buffer_) + pc_offset;
+ Address pc = reinterpret_cast<Address>(buffer_start_) + pc_offset;
bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
CHECK(overflowed != is_int16(offset));
#ifdef DEBUG
@@ -426,9 +422,10 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 5daa55604e..db84384595 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -40,7 +40,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
@@ -163,23 +162,6 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -205,13 +187,6 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -235,11 +210,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
}
- case HeapObjectRequest::kCodeStub: {
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
- }
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -247,7 +217,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
Address constant_pool = kNullAddress;
set_target_address_at(pc, constant_pool, object.address(),
SKIP_ICACHE_FLUSH);
@@ -257,11 +227,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
@@ -277,21 +247,25 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
- int constant_pool_offset = EmitConstantPool();
+ int constant_pool_size = EmitConstantPool();
EmitRelocations();
+
+ int code_comments_size = WriteCodeComments();
+
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->constant_pool_size =
- (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
+ desc->constant_pool_size = constant_pool_size;
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
@@ -516,7 +490,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 2);
+ reinterpret_cast<byte*>(buffer_start_ + pos),
+ 2);
patcher.bitwise_mov32(dst, offset);
break;
}
@@ -531,7 +506,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
: (SIGN_EXT_IMM22(operands & kImm22Mask));
int32_t offset = target_pos + delta;
PatchingAssembler patcher(
- options(), reinterpret_cast<byte*>(buffer_ + pos),
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos),
2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
patcher.bitwise_add32(dst, base, offset);
if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
@@ -541,7 +516,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos),
+ reinterpret_cast<byte*>(buffer_start_ + pos),
kMovInstructionsNoConstantPool);
// Keep internal references relative until EmitRelocations.
patcher.bitwise_mov(dst, target_pos);
@@ -549,7 +524,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
}
case kUnboundJumpTableEntryOpcode: {
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos),
+ reinterpret_cast<byte*>(buffer_start_ + pos),
kPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
patcher.dp(target_pos);
@@ -2006,54 +1981,43 @@ bool Assembler::IsNop(Instr instr, int type) {
void Assembler::GrowBuffer(int needed) {
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4 * KB) {
- desc.buffer_size = 4 * KB;
- } else if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2 * buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1 * MB;
- }
- int space = buffer_space() + (desc.buffer_size - buffer_size_);
- if (space < needed) {
- desc.buffer_size += needed - space;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+ int space = buffer_space() + (new_size - old_size);
+ new_size += (space < needed) ? needed - space : 0;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta =
- (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // Nothing else to do here since we keep all internal references and
- // deferred relocation entries relative to the buffer (until
- // EmitRelocations).
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
}
@@ -2098,18 +2062,19 @@ void Assembler::EmitRelocations() {
for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
- Address pc = reinterpret_cast<Address>(buffer_) + it->position();
- RelocInfo rinfo(pc, rmode, it->data(), nullptr);
+ Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
+ RelocInfo rinfo(pc, rmode, it->data(), Code());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
- Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
+ Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
- set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
+ set_target_address_at(pc, 0,
+ reinterpret_cast<Address>(buffer_start_) + pos,
SKIP_ICACHE_FLUSH);
}
@@ -2156,14 +2121,15 @@ void Assembler::CheckTrampolinePool() {
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
byte* address, int instructions)
- : Assembler(options, address, instructions * kInstrSize + kGap) {
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ : Assembler(options, ExternalAssemblerBuffer(
+ address, instructions * kInstrSize + kGap)) {
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
PatchingAssembler::~PatchingAssembler() {
// Check that the code was patched as expected.
- DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
} // namespace internal
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 9f3ff0dc7e..46c810334f 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -44,319 +44,17 @@
#include <vector>
#include "src/assembler.h"
+#include "src/constant-pool.h"
#include "src/double.h"
+#include "src/external-reference.h"
+#include "src/label.h"
+#include "src/objects/smi.h"
#include "src/ppc/constants-ppc.h"
-
-#if V8_HOST_ARCH_PPC && \
- (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
-#define ABI_USES_FUNCTION_DESCRIPTORS 1
-#else
-#define ABI_USES_FUNCTION_DESCRIPTORS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_PASSES_HANDLES_IN_REGS 1
-#else
-#define ABI_PASSES_HANDLES_IN_REGS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
-#else
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
-#define ABI_CALL_VIA_IP 1
-#else
-#define ABI_CALL_VIA_IP 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER 2
-#else
-#define ABI_TOC_REGISTER 13
-#endif
-
-#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+#include "src/ppc/register-ppc.h"
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
-
-#if V8_EMBEDDED_CONSTANT_POOL
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r30)
-#else
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
-#endif
-
-#define LOW_DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
-
-#define NON_LOW_DOUBLE_REGISTERS(V) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define DOUBLE_REGISTERS(V) \
- LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define C_REGISTERS(V) \
- V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
- V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
-// clang-format on
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 32;
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 3 | // r3 a1
- 1 << 4 | // r4 a2
- 1 << 5 | // r5 a3
- 1 << 6 | // r6 a4
- 1 << 7 | // r7 a5
- 1 << 8 | // r8 a6
- 1 << 9 | // r9 a7
- 1 << 10 | // r10 a8
- 1 << 11;
-
-const int kNumJSCallerSaved = 9;
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved = 1 << 14 | // r14
- 1 << 15 | // r15
- 1 << 16 | // r16
- 1 << 17 | // r17
- 1 << 18 | // r18
- 1 << 19 | // r19
- 1 << 20 | // r20
- 1 << 21 | // r21
- 1 << 22 | // r22
- 1 << 23 | // r23
- 1 << 24 | // r24
- 1 << 25 | // r25
- 1 << 26 | // r26
- 1 << 27 | // r27
- 1 << 28 | // r28
- 1 << 29 | // r29
- 1 << 30 | // r20
- 1 << 31; // r31
-
-const int kNumCalleeSaved = 18;
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7 | // d7
- 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13; // d13
-
-const int kNumCallerSavedDoubles = 14;
-
-const RegList kCalleeSavedDoubles = 1 << 14 | // d14
- 1 << 15 | // d15
- 1 << 16 | // d16
- 1 << 17 | // d17
- 1 << 18 | // d18
- 1 << 19 | // d19
- 1 << 20 | // d20
- 1 << 21 | // d21
- 1 << 22 | // d22
- 1 << 23 | // d23
- 1 << 24 | // d24
- 1 << 25 | // d25
- 1 << 26 | // d26
- 1 << 27 | // d27
- 1 << 28 | // d28
- 1 << 29 | // d29
- 1 << 30 | // d30
- 1 << 31; // d31
-
-const int kNumCalleeSavedDoubles = 18;
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 32;
-
-// The following constants describe the stack frame linkage area as
-// defined by the ABI. Note that kNumRequiredStackFrameSlots must
-// satisfy alignment requirements (rounding up if required).
-#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] TOC save area
-// [4] Parameter1 save area
-// ...
-// [11] Parameter8 save area
-// [12] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 12;
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 12;
-#else // AIX
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] reserved for compiler
-// [4] reserved by binder
-// [5] TOC save area
-// [6] Parameter1 save area
-// ...
-// [13] Parameter8 save area
-// [14] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 14;
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 14;
-#endif
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
-#if V8_TARGET_LITTLE_ENDIAN
- static constexpr int kMantissaOffset = 0;
- static constexpr int kExponentOffset = 4;
-#else
- static constexpr int kMantissaOffset = 4;
- static constexpr int kExponentOffset = 0;
-#endif
-
- private:
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-#define DEFINE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-// Aliases
-constexpr Register kConstantPoolRegister = r28; // Constant pool.
-constexpr Register kRootRegister = r29; // Roots array pointer.
-constexpr Register cp = r30; // JavaScript context pointer.
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Double word FP register.
-class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
- public:
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
- static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
-
- private:
- friend class RegisterBase;
- explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
-static_assert(sizeof(DoubleRegister) == sizeof(int),
- "DoubleRegister can efficiently be passed by value");
-
-typedef DoubleRegister FloatRegister;
-
-// TODO(ppc) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
-#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
-constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
-constexpr DoubleRegister kDoubleRegZero = d14;
-constexpr DoubleRegister kScratchDoubleReg = d13;
-
-Register ToRegister(int num);
-
-enum CRegisterCode {
-#define REGISTER_CODE(R) kCCode_##R,
- C_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kCAfterLast
-};
-
-// Coprocessor register
-class CRegister : public RegisterBase<CRegister, kCAfterLast> {
- friend class RegisterBase;
- explicit constexpr CRegister(int code) : RegisterBase(code) {}
-};
-
-constexpr CRegister no_creg = CRegister::no_reg();
-#define DECLARE_C_REGISTER(R) \
- constexpr CRegister R = CRegister::from_code<kCCode_##R>();
-C_REGISTERS(DECLARE_C_REGISTER)
-#undef DECLARE_C_REGISTER
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -375,15 +73,14 @@ class Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
// rm
V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedStringConstant(const StringConstantBase* str);
- static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
V8_INLINE bool is_reg() const { return rm_.is_valid(); }
@@ -484,15 +181,10 @@ class Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -574,7 +266,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -1331,10 +1023,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
};
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1348,9 +1036,11 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data);
// Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(Address pc, Instr instr) {
@@ -1437,10 +1127,13 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
- bool sharing_ok = RelocInfo::IsNone(rmode) ||
- (!options().record_reloc_info_for_serialization &&
- RelocInfo::IsShareableRelocMode(rmode) &&
- !is_constant_pool_entry_sharing_blocked());
+ bool sharing_ok =
+ RelocInfo::IsNone(rmode) ||
+ (!options().record_reloc_info_for_serialization &&
+ RelocInfo::IsShareableRelocMode(rmode) &&
+ !is_constant_pool_entry_sharing_blocked() &&
+ // TODO(johnyan): make the following rmode shareable
+ !RelocInfo::IsWasmCall(rmode) && !RelocInfo::IsWasmStubCall(rmode));
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
@@ -1615,6 +1308,8 @@ class Assembler : public AssemblerBase {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 7e287b08b8..c0d7b58b0f 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -15,634 +15,15 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/ppc/code-stubs-ppc.h" // Cannot be the first include.
-
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // [sp+0]: argv
-
- Label invoke, handler_entry, exit;
-
-// Called from C
- __ function_descriptor();
-
- {
- NoRootArrayScope no_root_array(masm);
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // PPC LINUX ABI:
- // preserve LR in pre-reserved slot in caller's frame
- __ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
-
- // Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved);
-
- // Save callee-saved double registers.
- __ MultiPushDoubles(kCalleeSavedDoubles);
- // Set up the reserved register for 0.0.
- __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
-
- __ InitializeRootRegister();
- }
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // r7: argv
- __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ push(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ li(kConstantPoolRegister, Operand::Zero());
- __ push(kConstantPoolRegister);
- }
- StackFrame::Type marker = type();
- __ mov(r0, Operand(StackFrame::TypeToMarker(marker)));
- __ push(r0);
- __ push(r0);
- // Save copies of the top frame descriptor on the stack.
- __ mov(r8, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ LoadP(r0, MemOperand(r8));
- __ push(r0);
-
- // Set up frame pointer for the frame to be pushed.
- __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ mov(r8, Operand(js_entry_sp));
- __ LoadP(r9, MemOperand(r8));
- __ cmpi(r9, Operand::Zero());
- __ bne(&non_outermost_js);
- __ StoreP(fp, MemOperand(r8));
- __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont);
- __ bind(&non_outermost_js);
- __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
- __ push(ip); // frame-type
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ b(&invoke);
-
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate())));
-
- __ StoreP(r3, MemOperand(ip));
- __ LoadRoot(r3, RootIndex::kException);
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r3-r7.
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the b(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // r7: argv
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit); // r3 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r8);
- __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ bne(&non_outermost_js_2);
- __ mov(r9, Operand::Zero());
- __ mov(r8, Operand(js_entry_sp));
- __ StoreP(r9, MemOperand(r8));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r6);
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ StoreP(r6, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved double registers.
- __ MultiPopDoubles(kCalleeSavedDoubles);
-
- // Restore callee-saved registers.
- __ MultiPop(kCalleeSaved);
-
- // Return
- __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
- __ mtlr(r0);
- __ blr();
-}
-
-// This stub is paired with DirectCEntryStub::GenerateCall
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Place the return address on the stack, making the call
- // GC safe. The RegExp backend also relies on this.
- __ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ Call(ip); // Call the C++ function.
- __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ mtlr(r0);
- __ blr();
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into lr instead of ip.
- DCHECK_NE(ip, target);
- __ IndirectLoadConstant(ip, GetCode());
- __ addi(r0, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Move(ip, target);
- __ Call(r0);
- return;
- }
- }
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- // AIX/PPC64BE Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
- __ LoadP(ip, MemOperand(target, 0)); // Instruction address
- } else {
- // ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_CALL_VIA_IP.
- __ Move(ip, target);
- }
-
- intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
- __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
- __ Call(r0); // Call the stub.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(tasm,
-#if V8_TARGET_ARCH_PPC64
- 14 * kInstrSize);
-#else
- 11 * kInstrSize);
-#endif
- tasm->mflr(r0);
- tasm->Push(r0, ip);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->Pop(r0, ip);
- tasm->mtlr(r0);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(masm,
-#if V8_TARGET_ARCH_PPC64
- 14 * kInstrSize);
-#else
- 11 * kInstrSize);
-#endif
- ProfileEntryHookStub stub(masm->isolate());
- __ mflr(r0);
- __ Push(r0, ip);
- __ CallStub(&stub);
- __ Pop(r0, ip);
- __ mtlr(r0);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push lr, ip" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 3 * kInstrSize;
-
- // This should contain all kJSCallerSaved registers.
- const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
- r15.bit(); // Saved stack pointer.
-
- // We also save lr, so the count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
-
- // Save all caller-save registers as this may be called from anywhere.
- __ mflr(ip);
- __ MultiPush(kSavedRegs | ip.bit());
-
- // Compute the function's address for the first argument.
- __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is two slots above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mr(r15, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
- }
-
-#if !defined(USE_SIMULATOR)
- uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- ExternalReference entry_hook =
- ExternalReference::Create(&dispatcher, ExternalReference::BUILTIN_CALL);
-
- // It additionally takes an isolate as a third parameter
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
-#endif
-
- __ mov(ip, Operand(entry_hook));
-
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0));
- }
- // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
-
- // PPC LINUX ABI:
- __ li(r0, Operand::Zero());
- __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
-
- __ Call(ip);
-
- __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mr(sp, r15);
- }
-
- // Also pop lr to get Ret(0).
- __ MultiPop(kSavedRegs | ip.bit());
- __ mtlr(ip);
- __ Ret();
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand* stack_space_operand,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- // Additional parameter is the address of the actual callback.
- DCHECK(function_address == r4 || function_address == r5);
- Register scratch = r6;
-
- __ Move(scratch, ExternalReference::is_profiling_address(isolate));
- __ lbz(scratch, MemOperand(scratch, 0));
- __ cmpi(scratch, Operand::Zero());
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ Move(scratch, thunk_ref);
- __ isel(eq, scratch, function_address, scratch);
- } else {
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check);
- __ bind(&profiler_disabled);
- __ mr(scratch, function_address);
- __ bind(&end_profiler_check);
- }
-
- // Allocate HandleScope in callee-save registers.
- // r17 - next_address
- // r14 - next_address->kNextOffset
- // r15 - next_address->kLimitOffset
- // r16 - next_address->kLevelOffset
- __ Move(r17, next_address);
- __ LoadP(r14, MemOperand(r17, kNextOffset));
- __ LoadP(r15, MemOperand(r17, kLimitOffset));
- __ lwz(r16, MemOperand(r17, kLevelOffset));
- __ addi(r16, r16, Operand(1));
- __ stw(r16, MemOperand(r17, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, scratch);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- __ LoadP(r3, return_value_operand);
- __ bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ StoreP(r14, MemOperand(r17, kNextOffset));
- if (__ emit_debug_code()) {
- __ lwz(r4, MemOperand(r17, kLevelOffset));
- __ cmp(r4, r16);
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
- }
- __ subi(r16, r16, Operand(1));
- __ stw(r16, MemOperand(r17, kLevelOffset));
- __ LoadP(r0, MemOperand(r17, kLimitOffset));
- __ cmp(r15, r0);
- __ bne(&delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- // LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != nullptr) {
- __ lwz(r14, *stack_space_operand);
- } else {
- __ mov(r14, Operand(stack_space));
- }
- __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
-
- // Check if the function scheduled an exception.
- __ LoadRoot(r14, RootIndex::kTheHoleValue);
- __ Move(r15, ExternalReference::scheduled_exception_address(isolate));
- __ LoadP(r15, MemOperand(r15));
- __ cmp(r14, r15);
- __ bne(&promote_scheduled_exception);
-
- __ blr();
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ StoreP(r15, MemOperand(r17, kLimitOffset));
- __ mr(r14, r3);
- __ PrepareCallCFunction(1, r15);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ mr(r3, r14);
- __ b(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r7 : call_data
- // -- r5 : holder
- // -- r4 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- Register call_data = r7;
- Register holder = r5;
- Register api_function_address = r4;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ push(call_data);
-
- Register scratch = call_data;
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- // return value
- __ push(scratch);
- // return value default
- __ push(scratch);
- // isolate
- __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
- // holder
- __ push(holder);
-
- // Prepare arguments.
- __ mr(scratch, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- // PPC LINUX ABI:
- //
- // Create 4 extra slots on stack:
- // [0] space for DirectCEntryStub's LR save
- // [1-3] FunctionCallbackInfo
- const int kApiStackSpace = 4;
- const int kFunctionCallbackInfoOffset =
- (kStackFrameExtraParamSlot + 1) * kPointerSize;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != r3 && scratch != r3);
- // r3 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
- // FunctionCallbackInfo::implicit_args_
- __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(ip, Operand(argc()));
- __ stw(ip, MemOperand(r3, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- MemOperand* stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- int arg0Slot = 0;
- int accessorInfoSlot = 0;
- int apiStackSpace = 0;
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = r7;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = r5;
-
- __ push(receiver);
- // Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ push(scratch);
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Push(scratch, scratch);
- __ Move(scratch, ExternalReference::isolate_address(isolate()));
- __ Push(scratch, holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch);
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ mr(r3, sp); // r3 = Handle<Name>
- __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
-
-// If ABI passes Handles (pointer-sized struct) in a register:
-//
-// Create 2 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] AccessorInfo&
-//
-// Otherwise:
-//
-// Create 3 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] copy of Handle (first arg)
-// [2] AccessorInfo&
- if (ABI_PASSES_HANDLES_IN_REGS) {
- accessorInfoSlot = kStackFrameExtraParamSlot + 1;
- apiStackSpace = 2;
- } else {
- arg0Slot = kStackFrameExtraParamSlot + 1;
- accessorInfoSlot = arg0Slot + 1;
- apiStackSpace = 3;
- }
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
-
- if (!ABI_PASSES_HANDLES_IN_REGS) {
- // pass 1st arg by reference
- __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
- __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
- }
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
- __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
- // r4 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ LoadP(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
deleted file mode 100644
index 80284587db..0000000000
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PPC_CODE_STUBS_PPC_H_
-#define V8_PPC_CODE_STUBS_PPC_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub : public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
deleted file mode 100644
index b27890d1f5..0000000000
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include <memory>
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/ppc/simulator-ppc.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- // Called from C
- __ function_descriptor();
-
- __ MovFromFloatParameter(d1);
- __ fsqrt(d1, d1);
- __ MovToFloatResult(d1);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index a6cecf7dc2..4d79fad031 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -20,6 +20,36 @@
#define UNIMPLEMENTED_PPC()
#endif
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define ABI_USES_FUNCTION_DESCRIPTORS 1
+#else
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_PASSES_HANDLES_IN_REGS 1
+#else
+#define ABI_PASSES_HANDLES_IN_REGS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
+#else
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#define ABI_CALL_VIA_IP 1
+#else
+#define ABI_CALL_VIA_IP 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_TOC_REGISTER 2
+#else
+#define ABI_TOC_REGISTER 13
+#endif
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index 91ea4000e1..cca8ebaf73 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -6,8 +6,9 @@
#if V8_TARGET_ARCH_PPC
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
+#include "src/cpu-features.h"
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
namespace v8 {
namespace internal {
@@ -45,4 +46,5 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
} // namespace internal
} // namespace v8
+#undef INSTR_AND_DATA_CACHE_COHERENCY
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index b10af51de1..9fe8cbefbd 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -4,20 +4,21 @@
#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 8;
-
-#define __ masm()->
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
@@ -57,21 +58,20 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
+ // Get the bailout id is passed as r29 by the caller.
+ __ mr(r5, r29);
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
__ mflr(r6);
- // Correct one word for bailout id.
- __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
__ sub(r7, fp, r7);
// Allocate a new deoptimizer object.
@@ -83,14 +83,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(r4, Operand(static_cast<int>(deopt_kind())));
+ __ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
// r7: Fp-to-sp delta.
- __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(r8, Operand(ExternalReference::isolate_address(isolate)));
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -127,8 +127,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lfs(d0, MemOperand(sp, src_offset));
__ stfs(d0, MemOperand(r4, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Remove the saved registers from the stack.
+ __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
@@ -156,7 +157,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, r4);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
@@ -218,8 +219,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ InitializeRootRegister();
-
__ pop(ip); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
@@ -227,24 +226,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ stop("Unreachable.");
}
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ li(ip, Operand(i));
- __ b(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
- __ push(ip);
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index ae56f3616d..f736f804c0 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -34,13 +34,11 @@
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/ppc/constants-ppc.h"
-
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
@@ -120,7 +118,7 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(GetRegConfig()->GetDoubleRegisterName(reg));
+ Print(RegisterName(DoubleRegister::from_code(reg)));
}
@@ -1497,18 +1495,16 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
+ return RegisterName(i::Register::from_code(reg));
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // PPC does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // PPC does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index f49296292a..546d495df8 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -4,14 +4,12 @@
#if V8_TARGET_ARCH_PPC
-#include "src/assembler.h"
+#include "src/ppc/frame-constants-ppc.h"
+
+#include "src/assembler-inl.h"
#include "src/frame-constants.h"
#include "src/macro-assembler.h"
-#include "src/ppc/assembler-ppc-inl.h"
-#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/macro-assembler-ppc.h"
-#include "src/ppc/frame-constants-ppc.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index a4516c367c..f9b3e40846 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -35,7 +35,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 505aaef93d..b5640d75c8 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -70,12 +72,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -208,10 +204,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- r7, // call_data
- r5, // holder
- r4, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ r4, // kApiFunctionAddress
+ r5, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -263,6 +258,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9565d04a4d..94bb328bc9 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -12,36 +12,26 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
#include "src/ppc/macro-assembler-ppc.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -127,8 +117,7 @@ void TurboAssembler::Jump(Register target) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
@@ -147,8 +136,11 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mr(destination, kRootRegister);
- } else {
+ } else if (is_int16(offset)) {
addi(destination, kRootRegister, Operand(offset));
+ } else {
+ mov(destination, Operand(offset));
+ add(destination, kRootRegister, destination);
}
}
@@ -181,35 +173,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ppc code, never THUMB code
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- Register scratch = ip;
- IndirectLoadConstant(scratch, code);
- addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip, cr);
- Jump(scratch);
- bind(&skip);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip, cr);
- Jump(ip);
- bind(&skip);
- return;
- }
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (root_array_available_ && options().isolate_independent_code) {
+ Label skip;
+ Register scratch = ip;
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadP(scratch, MemOperand(kRootRegister, offset), r0);
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(scratch);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ Label skip;
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(ip);
+ bind(&skip);
+ return;
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
@@ -252,37 +247,39 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
-
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- IndirectLoadConstant(ip, code);
- addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
- return;
- }
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+ DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (root_array_available_ && options().isolate_independent_code) {
+ Label skip;
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadP(ip, MemOperand(kRootRegister, offset));
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
}
Call(code.address(), rmode, cond);
}
@@ -305,7 +302,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(r0);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
@@ -398,7 +395,8 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
- LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
+ LoadP(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -468,25 +466,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
push(object);
push(address);
@@ -496,7 +512,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -1262,7 +1282,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Move(r7, debug_hook_active);
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
- CmpSmiLiteral(r7, Smi::kZero, r0);
+ CmpSmiLiteral(r7, Smi::zero(), r0);
beq(&skip_hook);
{
@@ -1331,12 +1351,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- CallJSEntry(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(code);
+ JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1399,17 +1418,17 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- Push(Smi::kZero); // Padding.
+ Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
- // Preserve r3-r7.
- mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
- LoadP(r0, MemOperand(r8));
+ // Preserve r4-r8.
+ Move(r3,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ LoadP(r0, MemOperand(r3));
push(r0);
// Set this new handler as the current one.
- StoreP(sp, MemOperand(r8));
+ StoreP(sp, MemOperand(r3));
}
@@ -1418,8 +1437,8 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
- mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
+ Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ isolate()));
StoreP(r4, MemOperand(ip));
Drop(1); // Drop padding.
@@ -1538,30 +1557,6 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
-
- // Block constant pool for the call instruction sequence.
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
-
- mov(ip, Operand::EmbeddedCode(stub));
- mtctr(ip);
- bctrl();
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -1649,8 +1644,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r3, Operand(f->nargs));
Move(r4, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r3, r4));
- addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -1702,7 +1696,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- cmpi(in, Operand(kClearedWeakHeapObject));
+ cmpi(in, Operand(kClearedWeakHeapObjectLower32));
beq(target_if_cleared);
mov(r0, Operand(~kWeakHeapObjectMask));
@@ -1876,6 +1870,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
beq(&do_check);
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+ beq(&do_check);
+
// Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -1990,6 +1988,23 @@ void TurboAssembler::CallCFunctionHelper(Register function,
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch1 = r7;
+ Register scratch2 = r8;
+ Push(scratch1, scratch2);
+
+ mflr(scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreP(r0, MemOperand(scratch1));
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(fp, MemOperand(scratch1));
+ mtlr(scratch2);
+ Pop(scratch1, scratch2);
+ }
+
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
@@ -2007,6 +2022,17 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch1 = r7;
+ Register scratch2 = r8;
+ Push(scratch1, scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ mov(scratch2, Operand::Zero());
+ StoreP(scratch2, MemOperand(scratch1));
+ Pop(scratch1, scratch2);
+ }
+
// Remove frame bought in PrepareCallCFunction
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
@@ -2054,7 +2080,7 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
mov(dst, Operand(value));
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
mov(dst, Operand(smi));
}
@@ -2421,8 +2447,7 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
}
}
-
-void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2432,8 +2457,7 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
#endif
}
-
-void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2443,8 +2467,7 @@ void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
#endif
}
-
-void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2454,8 +2477,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-
-void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2465,8 +2487,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-
-void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
Register scratch, RCBit rc) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -3000,6 +3021,136 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ ShiftRightArithImm(builtin_pointer, builtin_pointer,
+ kSmiShift - kSystemPointerSizeLog2);
+ addi(builtin_pointer, builtin_pointer,
+ Operand(IsolateData::builtin_entry_table_offset()));
+ LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ Register scratch = r11;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ LoadWordArith(scratch,
+ FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ cmpi(scratch, Operand(Builtins::kNoBuiltinId));
+ bne(&if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ b(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
+ add(destination, destination, kRootRegister);
+ LoadP(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
+
+ bind(&out);
+ } else {
+ addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ static constexpr int after_call_offset = 5 * kInstrSize;
+ Label start_call;
+ Register dest = target;
+
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
+ // aware of this descriptor and pick up values from it
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ LoadP(ip, MemOperand(target, 0));
+ dest = ip;
+ } else if (ABI_CALL_VIA_IP && dest != ip) {
+ Move(ip, target);
+ dest = ip;
+ }
+
+ LoadPC(r7);
+ bind(&start_call);
+ addi(r7, r7, Operand(after_call_offset));
+ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ Call(dest);
+
+ DCHECK_EQ(after_call_offset - kInstrSize,
+ SizeOfCodeGeneratedSince(&start_call));
+}
+
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deopt id in r29 (we don't need the roots array from now on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+
+ mov(r29, Operand(deopt_id));
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
+void TurboAssembler::ZeroExtByte(Register dst, Register src) {
+ clrldi(dst, src, Operand(56));
+}
+
+void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
+ clrldi(dst, src, Operand(48));
+}
+
+void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
+ clrldi(dst, src, Operand(32));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 897ac5553e..a85af61761 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -2,44 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
-#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/contexts.h"
#include "src/double.h"
#include "src/globals.h"
#include "src/ppc/assembler-ppc.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = r3;
-constexpr Register kReturnRegister1 = r4;
-constexpr Register kReturnRegister2 = r5;
-constexpr Register kJSFunctionRegister = r4;
-constexpr Register kContextRegister = r30;
-constexpr Register kAllocateSizeRegister = r4;
-constexpr Register kSpeculationPoisonRegister = r14;
-constexpr Register kInterpreterAccumulatorRegister = r3;
-constexpr Register kInterpreterBytecodeOffsetRegister = r15;
-constexpr Register kInterpreterBytecodeArrayRegister = r16;
-constexpr Register kInterpreterDispatchTableRegister = r17;
-
-constexpr Register kJavaScriptCallArgCountRegister = r3;
-constexpr Register kJavaScriptCallCodeStartRegister = r5;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = r6;
-constexpr Register kJavaScriptCallExtraArg1Register = r5;
-
-constexpr Register kOffHeapTrampolineRegister = ip;
-constexpr Register kRuntimeCallFunctionRegister = r4;
-constexpr Register kRuntimeCallArgCountRegister = r3;
-constexpr Register kRuntimeCallArgvRegister = r5;
-constexpr Register kWasmInstanceRegister = r10;
-
// ----------------------------------------------------------------------------
// Static helper functions
@@ -90,14 +68,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
@@ -166,10 +139,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- addi(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ mov(kRootRegister, Operand(isolate_root));
}
// These exist to provide portability between 32 and 64bit
@@ -187,7 +158,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
- void LoadSmiLiteral(Register dst, Smi* smi);
+ void LoadSmiLiteral(Register dst, Smi smi);
void LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -222,7 +193,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -298,6 +269,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
@@ -399,7 +373,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Print a message to stdout and abort execution.
void Abort(AbortReason reason);
- inline bool AllowThisStubCall(CodeStub* stub);
#if !V8_TARGET_ARCH_PPC64
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
@@ -434,11 +407,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond = al);
void Call(Label* target);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
- }
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -485,7 +459,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovIntToFloat(DoubleRegister dst, Register src);
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
- void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
+ void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
@@ -504,6 +478,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// do nothing
}
}
+
+ void ZeroExtByte(Register dst, Register src);
+ void ZeroExtHalfWord(Register dst, Register src);
+ void ZeroExtWord32(Register dst, Register src);
+
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
@@ -515,18 +494,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd, RCBit rc = LeaveRC,
bool test = false) {
- DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
- int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
+ int rotate = (rangeEnd == 0) ? 0 : kBitsPerSystemPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
// Prefer faster andi when applicable.
andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
- rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+ rldicl(dst, src, rotate, kBitsPerSystemPointer - width, rc);
#else
- rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
- rc);
+ rlwinm(dst, src, rotate, kBitsPerSystemPointer - width,
+ kBitsPerSystemPointer - 1, rc);
#endif
}
}
@@ -540,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC, bool test = false) {
- int start = kBitsPerPointer - 1;
+ int start = kBitsPerSystemPointer - 1;
int end;
uintptr_t bit = (1L << start);
@@ -628,16 +607,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
DoubleRegister double_input, StubCallMode stub_mode);
- // Call a code stub.
- void CallStubDelayed(CodeStub* stub);
-
void LoadConstantPoolPointerRegister();
// Loads the constant pool pointer (kConstantPoolRegister).
@@ -650,6 +624,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
void ResetSpeculationPoisonRegister();
private:
@@ -659,21 +638,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used acros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// ---------------------------------------------------------------------------
// GC Support
@@ -780,17 +756,15 @@ class MacroAssembler : public TurboAssembler {
void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
- void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
- void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
- void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+ void AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
+ void SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
+ void CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr = cr7);
- void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+ void CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr = cr7);
- void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
+ void AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch,
RCBit rc = LeaveRC);
-
-
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -883,10 +857,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond = al);
void CallJSEntry(Register target);
- // Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
diff --git a/deps/v8/src/ppc/register-ppc.h b/deps/v8/src/ppc/register-ppc.h
new file mode 100644
index 0000000000..11ddb17dc5
--- /dev/null
+++ b/deps/v8/src/ppc/register-ppc.h
@@ -0,0 +1,321 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_REGISTER_PPC_H_
+#define V8_PPC_REGISTER_PPC_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
+
+#if V8_EMBEDDED_CONSTANT_POOL
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r30)
+#else
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
+#endif
+
+#define LOW_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define NON_LOW_DOUBLE_REGISTERS(V) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define DOUBLE_REGISTERS(V) \
+ LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
+// clang-format on
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 32;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 3 | // r3 a1
+ 1 << 4 | // r4 a2
+ 1 << 5 | // r5 a3
+ 1 << 6 | // r6 a4
+ 1 << 7 | // r7 a5
+ 1 << 8 | // r8 a6
+ 1 << 9 | // r9 a7
+ 1 << 10 | // r10 a8
+ 1 << 11;
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 14 | // r14
+ 1 << 15 | // r15
+ 1 << 16 | // r16
+ 1 << 17 | // r17
+ 1 << 18 | // r18
+ 1 << 19 | // r19
+ 1 << 20 | // r20
+ 1 << 21 | // r21
+ 1 << 22 | // r22
+ 1 << 23 | // r23
+ 1 << 24 | // r24
+ 1 << 25 | // r25
+ 1 << 26 | // r26
+ 1 << 27 | // r27
+ 1 << 28 | // r28
+ 1 << 29 | // r29
+ 1 << 30 | // r20
+ 1 << 31; // r31
+
+const int kNumCalleeSaved = 18;
+
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7 | // d7
+ 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13; // d13
+
+const int kNumCallerSavedDoubles = 14;
+
+const RegList kCalleeSavedDoubles = 1 << 14 | // d14
+ 1 << 15 | // d15
+ 1 << 16 | // d16
+ 1 << 17 | // d17
+ 1 << 18 | // d18
+ 1 << 19 | // d19
+ 1 << 20 | // d20
+ 1 << 21 | // d21
+ 1 << 22 | // d22
+ 1 << 23 | // d23
+ 1 << 24 | // d24
+ 1 << 25 | // d25
+ 1 << 26 | // d26
+ 1 << 27 | // d27
+ 1 << 28 | // d28
+ 1 << 29 | // d29
+ 1 << 30 | // d30
+ 1 << 31; // d31
+
+const int kNumCalleeSavedDoubles = 18;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 32;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI. Note that kNumRequiredStackFrameSlots must
+// satisfy alignment requirements (rounding up if required).
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] TOC save area
+// [4] Parameter1 save area
+// ...
+// [11] Parameter8 save area
+// [12] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 12;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 12;
+#else // AIX
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] reserved for compiler
+// [4] reserved by binder
+// [5] TOC save area
+// [6] Parameter1 save area
+// ...
+// [13] Parameter8 save area
+// [14] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 14;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 14;
+#endif
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if V8_TARGET_LITTLE_ENDIAN
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#else
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+// Aliases
+constexpr Register kConstantPoolRegister = r28; // Constant pool.
+constexpr Register kRootRegister = r29; // Roots array pointer.
+constexpr Register cp = r30; // JavaScript context pointer.
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Double word FP register.
+class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
+ public:
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static constexpr int kSizeInBytes = 8;
+ inline static int NumRegisters();
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
+static_assert(sizeof(DoubleRegister) == sizeof(int),
+ "DoubleRegister can efficiently be passed by value");
+
+typedef DoubleRegister FloatRegister;
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
+#define DEFINE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
+constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
+constexpr DoubleRegister kDoubleRegZero = d14;
+constexpr DoubleRegister kScratchDoubleReg = d13;
+
+Register ToRegister(int num);
+
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
+};
+
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
+
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
+DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = r3;
+constexpr Register kReturnRegister1 = r4;
+constexpr Register kReturnRegister2 = r5;
+constexpr Register kJSFunctionRegister = r4;
+constexpr Register kContextRegister = r30;
+constexpr Register kAllocateSizeRegister = r4;
+constexpr Register kSpeculationPoisonRegister = r14;
+constexpr Register kInterpreterAccumulatorRegister = r3;
+constexpr Register kInterpreterBytecodeOffsetRegister = r15;
+constexpr Register kInterpreterBytecodeArrayRegister = r16;
+constexpr Register kInterpreterDispatchTableRegister = r17;
+
+constexpr Register kJavaScriptCallArgCountRegister = r3;
+constexpr Register kJavaScriptCallCodeStartRegister = r5;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r6;
+constexpr Register kJavaScriptCallExtraArg1Register = r5;
+
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r4;
+constexpr Register kRuntimeCallArgCountRegister = r3;
+constexpr Register kRuntimeCallArgvRegister = r5;
+constexpr Register kWasmInstanceRegister = r10;
+constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PPC_REGISTER_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 900e03f6bb..b46610d592 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -2,34 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/simulator-ppc.h"
+
+#if defined(USE_SIMULATOR)
+
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_PPC
-
#include "src/assembler.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
+#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frame-constants-ppc.h"
-#include "src/ppc/simulator-ppc.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime-utils.h"
-#if defined(USE_SIMULATOR)
-
// Only build the simulator if not compiling for real PPC hardware.
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
-// static
-base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -272,7 +270,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value);
+ RegisterName(Register::from_code(i)), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -291,7 +289,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value, value);
+ RegisterName(Register::from_code(i)), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -311,7 +309,7 @@ void PPCDebugger::Debug() {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
- GetRegConfig()->GetDoubleRegisterName(i), dvalue,
+ RegisterName(DoubleRegister::from_code(i)), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
@@ -349,7 +347,7 @@ void PPCDebugger::Debug() {
intptr_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -401,14 +399,12 @@ void PPCDebugger::Debug() {
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- intptr_t value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -774,7 +770,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
- global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
free(stack_);
}
@@ -870,245 +865,26 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
-int Simulator::WriteExDW(intptr_t addr, uint64_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-uint64_t Simulator::ReadExDWU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
- return *ptr;
-}
-
-uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- return *ptr;
-}
-
-uint32_t Simulator::ReadExWU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- return *ptr;
-}
-
-int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- *ptr = value;
- return;
-}
-
-int Simulator::WriteExW(intptr_t addr, uint32_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
+#define GENERATE_RW_FUNC(size, type) \
+ type Simulator::Read##size(uintptr_t addr) { \
+ type value; \
+ Read(addr, &value); \
+ return value; \
+ } \
+ type Simulator::ReadEx##size(uintptr_t addr) { \
+ type value; \
+ ReadEx(addr, &value); \
+ return value; \
+ } \
+ void Simulator::Write##size(uintptr_t addr, type value) { \
+ Write(addr, value); \
+ } \
+ int32_t Simulator::WriteEx##size(uintptr_t addr, type value) { \
+ return WriteEx(addr, value); \
}
-}
-
-void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr = value;
- return;
-}
-
-uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-}
-
-uint16_t Simulator::ReadExHU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-}
-
-int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-}
-
-
-void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-}
-
-int Simulator::WriteExH(intptr_t addr, uint16_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-uint8_t Simulator::ReadBU(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-
-int8_t Simulator::ReadB(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-uint8_t Simulator::ReadExBU(intptr_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-void Simulator::WriteB(intptr_t addr, uint8_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(intptr_t addr, int8_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-int Simulator::WriteExB(intptr_t addr, uint8_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-intptr_t* Simulator::ReadDW(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return ptr;
-}
-
-
-void Simulator::WriteDW(intptr_t addr, int64_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int64_t* ptr = reinterpret_cast<int64_t*>(addr);
- *ptr = value;
- return;
-}
+RW_VAR_LIST(GENERATE_RW_FUNC);
+#undef GENERATE_RW_FUNC
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
@@ -1172,23 +948,10 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
return overflow;
}
-
-#if V8_TARGET_ARCH_PPC64
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
- *x = reinterpret_cast<intptr_t>(pair->x);
- *y = reinterpret_cast<intptr_t>(pair->y);
+ *x = static_cast<intptr_t>(pair->x);
+ *y = static_cast<intptr_t>(pair->y);
}
-#else
-static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
-#if V8_TARGET_BIG_ENDIAN
- *x = static_cast<int32_t>(*pair >> 32);
- *y = static_cast<int32_t>(*pair);
-#else
- *x = static_cast<int32_t>(*pair);
- *y = static_cast<int32_t>(*pair >> 32);
-#endif
-}
-#endif
// Calls into the V8 runtime.
typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
@@ -2094,7 +1857,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#if V8_TARGET_ARCH_PPC64
case EXTSW: {
- const int shift = kBitsPerPointer - 32;
+ const int shift = kBitsPerSystemPointer - 32;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2107,7 +1870,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#endif
case EXTSH: {
- const int shift = kBitsPerPointer - 16;
+ const int shift = kBitsPerSystemPointer - 16;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2119,7 +1882,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case EXTSB: {
- const int shift = kBitsPerPointer - 8;
+ const int shift = kBitsPerSystemPointer - 8;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2137,7 +1900,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- int32_t val = ReadW(ra_val + rb_val, instr);
+ int32_t val = ReadW(ra_val + rb_val);
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
@@ -2165,8 +1928,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
- set_d_register(frt, *dptr);
+ int64_t dptr = ReadDW(ra_val + rb_val);
+ set_d_register(frt, dptr);
if (opcode == LFDUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2196,7 +1959,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#else
p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + rb_val, *p, instr);
+ WriteW(ra_val + rb_val, *p);
if (opcode == STFSUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2250,6 +2013,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#endif
case SYNC: {
// todo - simulate sync
+ __sync_synchronize();
break;
}
case ICBI: {
@@ -2263,7 +2027,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- set_register(rt, ReadWU(ra_val + offset, instr));
+ set_register(rt, ReadWU(ra_val + offset));
if (opcode == LWZU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -2292,7 +2056,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteW(ra_val + offset, rs_val, instr);
+ WriteW(ra_val + offset, rs_val);
if (opcode == STWU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -2328,7 +2092,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExH(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExH(ra_val + rb_val, rs_val));
break;
}
case STWCX: {
@@ -2338,7 +2102,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExW(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExW(ra_val + rb_val, rs_val));
break;
}
case STDCX: {
@@ -2348,7 +2112,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int64_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExDW(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExDW(ra_val + rb_val, rs_val));
break;
}
case TW: {
@@ -2962,7 +2726,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- WriteW(ra_val + rb_val, rs_val, instr);
+ WriteW(ra_val + rb_val, rs_val);
if (opcode == STWUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2992,7 +2756,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- WriteH(ra_val + rb_val, rs_val, instr);
+ WriteH(ra_val + rb_val, rs_val);
if (opcode == STHUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -3006,7 +2770,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadWU(ra_val + rb_val, instr));
+ set_register(rt, ReadWU(ra_val + rb_val));
if (opcode == LWZUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3020,7 +2784,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadW(ra_val + rb_val, instr));
+ set_register(rt, ReadW(ra_val + rb_val));
break;
}
case LDX:
@@ -3030,8 +2794,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- intptr_t* result = ReadDW(ra_val + rb_val);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + rb_val);
+ set_register(rt, result);
if (opcode == LDUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3075,7 +2839,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadHU(ra_val + rb_val, instr) & 0xFFFF);
+ set_register(rt, ReadHU(ra_val + rb_val) & 0xFFFF);
if (opcode == LHZUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3088,7 +2852,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadH(ra_val + rb_val, instr));
+ set_register(rt, ReadH(ra_val + rb_val));
break;
}
case LBARX: {
@@ -3106,7 +2870,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExHU(ra_val + rb_val, instr));
+ set_register(rt, ReadExHU(ra_val + rb_val));
break;
}
case LWARX: {
@@ -3115,7 +2879,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExWU(ra_val + rb_val, instr));
+ set_register(rt, ReadExWU(ra_val + rb_val));
break;
}
case LDARX: {
@@ -3124,7 +2888,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExDWU(ra_val + rb_val, instr));
+ set_register(rt, ReadExDWU(ra_val + rb_val));
break;
}
case DCBF: {
@@ -3165,7 +2929,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t result = ReadHU(ra_val + offset, instr) & 0xFFFF;
+ uintptr_t result = ReadHU(ra_val + offset) & 0xFFFF;
set_register(rt, result);
if (opcode == LHZU) {
set_register(ra, ra_val + offset);
@@ -3179,7 +2943,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t result = ReadH(ra_val + offset, instr);
+ intptr_t result = ReadH(ra_val + offset);
set_register(rt, result);
if (opcode == LHAU) {
set_register(ra, ra_val + offset);
@@ -3194,7 +2958,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteH(ra_val + offset, rs_val, instr);
+ WriteH(ra_val + offset, rs_val);
if (opcode == STHU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3214,7 +2978,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int32_t val = ReadW(ra_val + offset, instr);
+ int32_t val = ReadW(ra_val + offset);
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
@@ -3242,8 +3006,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
- set_d_register(frt, *dptr);
+ int64_t dptr = ReadDW(ra_val + offset);
+ set_d_register(frt, dptr);
if (opcode == LFDU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3273,7 +3037,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#else
p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + offset, *p, instr);
+ WriteW(ra_val + offset, *p);
if (opcode == STFSU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3345,11 +3109,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
return;
}
case FSQRT: {
- lazily_initialize_fast_sqrt();
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val);
+ double frt_val = std::sqrt(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -3847,19 +3610,19 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
switch (instr->Bits(1, 0)) {
case 0: { // ld
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + offset);
+ set_register(rt, result);
break;
}
case 1: { // ldu
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + offset);
+ set_register(rt, result);
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
break;
}
case 2: { // lwa
- intptr_t result = ReadW(ra_val + offset, instr);
+ intptr_t result = ReadW(ra_val + offset);
set_register(rt, result);
break;
}
@@ -4187,170 +3950,58 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-Simulator::LocalMonitor::LocalMonitor()
- : access_state_(MonitorAccess::Open),
- tagged_addr_(0),
- size_(TransactionSize::None) {}
-
-void Simulator::LocalMonitor::Clear() {
+void Simulator::GlobalMonitor::Clear() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
size_ = TransactionSize::None;
+ thread_id_ = ThreadId::Invalid();
}
-void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // A load could cause a cache eviction which will affect the monitor. As a
- // result, it's most strict to unconditionally clear the local monitor on
- // load.
- Clear();
- }
-}
-
-void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
- TransactionSize size) {
+void Simulator::GlobalMonitor::NotifyLoadExcl(uintptr_t addr,
+ TransactionSize size,
+ ThreadId thread_id) {
+ // TODO(s390): By using Global Monitors, we are effectively limiting one
+ // active reservation across all processors. This would potentially serialize
+ // parallel threads executing load&reserve + store conditional on unrelated
+ // memory. Technically, this implementation would still make the simulator
+ // adhere to the spec, but seems overly heavy-handed.
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
size_ = size;
+ thread_id_ = thread_id;
}
-void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // A store could cause a cache eviction which will affect the
- // monitor. As a result, it's most strict to unconditionally clear the
- // local monitor on store.
- Clear();
- }
-}
-
-bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
- TransactionSize size) {
+void Simulator::GlobalMonitor::NotifyStore(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id) {
if (access_state_ == MonitorAccess::Exclusive) {
- if (addr == tagged_addr_ && size_ == size) {
- Clear();
- return true;
- } else {
+ // Calculate if the transaction has been overlapped
+ uintptr_t transaction_start = addr;
+ uintptr_t transaction_end = addr + static_cast<uintptr_t>(size);
+ uintptr_t exclusive_transaction_start = tagged_addr_;
+ uintptr_t exclusive_transaction_end =
+ tagged_addr_ + static_cast<uintptr_t>(size_);
+ bool is_not_overlapped = transaction_end < exclusive_transaction_start ||
+ exclusive_transaction_end < transaction_start;
+ if (!is_not_overlapped && !thread_id_.Equals(thread_id)) {
Clear();
- return false;
}
- } else {
- DCHECK(access_state_ == MonitorAccess::Open);
- return false;
- }
-}
-
-Simulator::GlobalMonitor::Processor::Processor()
- : access_state_(MonitorAccess::Open),
- tagged_addr_(0),
- next_(nullptr),
- prev_(nullptr) {}
-
-void Simulator::GlobalMonitor::Processor::Clear_Locked() {
- access_state_ = MonitorAccess::Open;
- tagged_addr_ = 0;
-}
-void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
- access_state_ = MonitorAccess::Exclusive;
- tagged_addr_ = addr;
-}
-
-void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
- int32_t addr, bool is_requesting_processor) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // It is possible that a store caused a cache eviction,
- // which can affect the montior, so conservatively,
- // we always clear the monitor.
- Clear_Locked();
}
}
-bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
- int32_t addr, bool is_requesting_processor) {
- if (access_state_ == MonitorAccess::Exclusive) {
- if (is_requesting_processor) {
- if (addr == tagged_addr_) {
- Clear_Locked();
- return true;
- }
- } else if (addr == tagged_addr_) {
- Clear_Locked();
- return false;
- }
- }
- return false;
-}
-
-Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
-
-void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
- Processor* processor) {
- processor->NotifyLoadExcl_Locked(addr);
- PrependProcessor_Locked(processor);
-}
-
-void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
- Processor* processor) {
- // Notify each processor of the store operation.
- for (Processor* iter = head_; iter; iter = iter->next_) {
- bool is_requesting_processor = iter == processor;
- iter->NotifyStore_Locked(addr, is_requesting_processor);
- }
-}
-
-bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
- Processor* processor) {
- DCHECK(IsProcessorInLinkedList_Locked(processor));
- if (processor->NotifyStoreExcl_Locked(addr, true)) {
- // Notify the other processors that this StoreExcl succeeded.
- for (Processor* iter = head_; iter; iter = iter->next_) {
- if (iter != processor) {
- iter->NotifyStoreExcl_Locked(addr, false);
- }
- }
- return true;
- } else {
- return false;
- }
-}
-
-bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
- Processor* processor) const {
- return head_ == processor || processor->next_ || processor->prev_;
-}
-
-void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
- if (IsProcessorInLinkedList_Locked(processor)) {
- return;
- }
-
- if (head_) {
- head_->prev_ = processor;
- }
- processor->prev_ = nullptr;
- processor->next_ = head_;
- head_ = processor;
-}
-
-void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
- base::LockGuard<base::Mutex> lock_guard(&mutex);
- if (!IsProcessorInLinkedList_Locked(processor)) {
- return;
- }
-
- if (processor->prev_) {
- processor->prev_->next_ = processor->next_;
- } else {
- head_ = processor->next_;
- }
- if (processor->next_) {
- processor->next_->prev_ = processor->prev_;
- }
- processor->prev_ = nullptr;
- processor->next_ = nullptr;
+bool Simulator::GlobalMonitor::NotifyStoreExcl(uintptr_t addr,
+ TransactionSize size,
+ ThreadId thread_id) {
+ bool permission = access_state_ == MonitorAccess::Exclusive &&
+ addr == tagged_addr_ && size_ == size &&
+ thread_id_.Equals(thread_id);
+ // The reservation is cleared if the processor holding the reservation
+ // executes a store conditional instruction to any address.
+ Clear();
+ return permission;
}
} // namespace internal
} // namespace v8
+#undef SScanF
#endif // USE_SIMULATOR
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 7b26906c29..e0f4eeae2b 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -12,11 +12,16 @@
#ifndef V8_PPC_SIMULATOR_PPC_H_
#define V8_PPC_SIMULATOR_PPC_H_
-#include "src/allocation.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/ppc/constants-ppc.h"
@@ -241,49 +246,61 @@ class Simulator : public SimulatorBase {
void PrintStopInfo(uint32_t code);
// Read and write memory.
- inline uint8_t ReadBU(intptr_t addr);
- inline uint8_t ReadExBU(intptr_t addr);
- inline int8_t ReadB(intptr_t addr);
- inline void WriteB(intptr_t addr, uint8_t value);
- inline int WriteExB(intptr_t addr, uint8_t value);
- inline void WriteB(intptr_t addr, int8_t value);
-
- inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
- inline uint16_t ReadExHU(intptr_t addr, Instruction* instr);
- inline int16_t ReadH(intptr_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
- inline int WriteExH(intptr_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
-
- inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
- inline uint32_t ReadExWU(intptr_t addr, Instruction* instr);
- inline int32_t ReadW(intptr_t addr, Instruction* instr);
- inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
- inline int WriteExW(intptr_t addr, uint32_t value, Instruction* instr);
- inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
-
- intptr_t* ReadDW(intptr_t addr);
- void WriteDW(intptr_t addr, int64_t value);
- inline int WriteExDW(intptr_t addr, uint64_t value, Instruction* instr);
- inline uint64_t ReadExDWU(intptr_t addr, Instruction* instr);
+ template <typename T>
+ inline void Read(uintptr_t address, T* value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
+ }
+
+ template <typename T>
+ inline void ReadEx(uintptr_t address, T* value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyLoadExcl(
+ address, static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id());
+ memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
+ }
+
+ template <typename T>
+ inline void Write(uintptr_t address, T value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore(address,
+ static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id());
+ memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
+ }
+
+ template <typename T>
+ inline int32_t WriteEx(uintptr_t address, T value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (GlobalMonitor::Get()->NotifyStoreExcl(
+ address, static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id())) {
+ memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+
+#define RW_VAR_LIST(V) \
+ V(DWU, uint64_t) \
+ V(DW, int64_t) \
+ V(WU, uint32_t) \
+ V(W, int32_t) V(HU, uint16_t) V(H, int16_t) V(BU, uint8_t) V(B, int8_t)
+
+#define GENERATE_RW_FUNC(size, type) \
+ inline type Read##size(uintptr_t addr); \
+ inline type ReadEx##size(uintptr_t addr); \
+ inline void Write##size(uintptr_t addr, type value); \
+ inline int32_t WriteEx##size(uintptr_t addr, type value);
+
+ RW_VAR_LIST(GENERATE_RW_FUNC);
+#undef GENERATE_RW_FUNC
void Trace(Instruction* instr);
void SetCR0(intptr_t result, bool setSO = false);
void ExecuteBranchConditional(Instruction* instr, BCType type);
- void ExecuteExt1(Instruction* instr);
- bool ExecuteExt2_10bit_part1(Instruction* instr);
- bool ExecuteExt2_10bit_part2(Instruction* instr);
- bool ExecuteExt2_9bit_part1(Instruction* instr);
- bool ExecuteExt2_9bit_part2(Instruction* instr);
- void ExecuteExt2_5bit(Instruction* instr);
- void ExecuteExt2(Instruction* instr);
- void ExecuteExt3(Instruction* instr);
- void ExecuteExt4(Instruction* instr);
-#if V8_TARGET_ARCH_PPC64
- void ExecuteExt5(Instruction* instr);
-#endif
- void ExecuteExt6(Instruction* instr);
void ExecuteGeneric(Instruction* instr);
void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
@@ -364,72 +381,34 @@ class Simulator : public SimulatorBase {
Byte = 1,
HalfWord = 2,
Word = 4,
- };
-
- class LocalMonitor {
- public:
- LocalMonitor();
-
- // These functions manage the state machine for the local monitor, but do
- // not actually perform loads and stores. NotifyStoreExcl only returns
- // true if the exclusive store is allowed; the global monitor will still
- // have to be checked to see whether the memory should be updated.
- void NotifyLoad(int32_t addr);
- void NotifyLoadExcl(int32_t addr, TransactionSize size);
- void NotifyStore(int32_t addr);
- bool NotifyStoreExcl(int32_t addr, TransactionSize size);
-
- private:
- void Clear();
-
- MonitorAccess access_state_;
- int32_t tagged_addr_;
- TransactionSize size_;
+ DWord = 8,
};
class GlobalMonitor {
public:
- GlobalMonitor();
-
- class Processor {
- public:
- Processor();
-
- private:
- friend class GlobalMonitor;
- // These functions manage the state machine for the global monitor, but do
- // not actually perform loads and stores.
- void Clear_Locked();
- void NotifyLoadExcl_Locked(int32_t addr);
- void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
- bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
-
- MonitorAccess access_state_;
- int32_t tagged_addr_;
- Processor* next_;
- Processor* prev_;
- };
-
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
- void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
- void NotifyStore_Locked(int32_t addr, Processor* processor);
- bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
+ void NotifyLoadExcl(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id);
+ void NotifyStore(uintptr_t addr, TransactionSize size, ThreadId thread_id);
+ bool NotifyStoreExcl(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id);
- // Called when the simulator is destroyed.
- void RemoveProcessor(Processor* processor);
+ static GlobalMonitor* Get();
private:
- bool IsProcessorInLinkedList_Locked(Processor* processor) const;
- void PrependProcessor_Locked(Processor* processor);
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
- Processor* head_;
- };
+ void Clear();
- LocalMonitor local_monitor_;
- GlobalMonitor::Processor global_monitor_processor_;
- static base::LazyInstance<GlobalMonitor>::type global_monitor_;
+ MonitorAccess access_state_ = MonitorAccess::Open;
+ uintptr_t tagged_addr_ = 0;
+ TransactionSize size_ = TransactionSize::None;
+ ThreadId thread_id_ = ThreadId::Invalid();
+ };
};
} // namespace internal
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 51cb0eb47f..d01060543d 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -213,7 +213,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
- SharedFunctionInfo* shared = frame->function()->shared();
+ SharedFunctionInfo shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
@@ -237,8 +237,7 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
return ComputeUnseededHash(static_cast<uint32_t>(id));
}
-
-unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo shared,
SnapshotObjectId id) {
base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
@@ -247,9 +246,9 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
info->name = names_->GetName(shared->DebugName());
info->function_id = id;
if (shared->script()->IsScript()) {
- Script* script = Script::cast(shared->script());
+ Script script = Script::cast(shared->script());
if (script->name()->IsName()) {
- Name* name = Name::cast(script->name());
+ Name name = Name::cast(script->name());
info->script_name = names_->GetName(name);
}
info->script_id = script->id();
@@ -264,7 +263,6 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
}
-
unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
if (state != OTHER) return 0;
if (info_index_for_other_state_ == 0) {
@@ -277,20 +275,18 @@ unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
return info_index_for_other_state_;
}
-
-AllocationTracker::UnresolvedLocation::UnresolvedLocation(
- Script* script, int start, FunctionInfo* info)
- : start_position_(start),
- info_(info) {
+AllocationTracker::UnresolvedLocation::UnresolvedLocation(Script script,
+ int start,
+ FunctionInfo* info)
+ : start_position_(start), info_(info) {
script_ = script->GetIsolate()->global_handles()->Create(script);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
- &HandleWeakScript, v8::WeakCallbackType::kParameter);
+ GlobalHandles::MakeWeak(script_.location(), this, &HandleWeakScript,
+ v8::WeakCallbackType::kParameter);
}
-
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
+ GlobalHandles::Destroy(script_.location());
}
}
@@ -306,7 +302,7 @@ void AllocationTracker::UnresolvedLocation::HandleWeakScript(
const v8::WeakCallbackInfo<void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
- GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
+ GlobalHandles::Destroy(loc->script_.location());
loc->script_ = Handle<Script>::null();
}
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index bff9a62750..5305bdbf2d 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -120,12 +120,12 @@ class AllocationTracker {
AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
- unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ unsigned AddFunctionInfo(SharedFunctionInfo info, SnapshotObjectId id);
unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
public:
- UnresolvedLocation(Script* script, int start, FunctionInfo* info);
+ UnresolvedLocation(Script script, int start, FunctionInfo* info);
~UnresolvedLocation();
void Resolve();
diff --git a/deps/v8/src/profiler/circular-queue.h b/deps/v8/src/profiler/circular-queue.h
index d3df1d9f38..fcbb898571 100644
--- a/deps/v8/src/profiler/circular-queue.h
+++ b/deps/v8/src/profiler/circular-queue.h
@@ -46,7 +46,7 @@ class SamplingCircularQueue {
// completely processed by the consumer.
};
- struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
+ struct alignas(PROCESSOR_CACHE_LINE_SIZE) Entry {
Entry() : marker(kEmpty) {}
T record;
base::Atomic32 marker;
@@ -55,8 +55,8 @@ class SamplingCircularQueue {
Entry* Next(Entry* entry);
Entry buffer_[Length];
- V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
- V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
+ alignas(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
+ alignas(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 9274bc03c6..eb05c5be56 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -53,8 +53,7 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
entry->SetBuiltinId(builtin_id);
}
-
-TickSample* ProfilerEventsProcessor::StartTickSample() {
+TickSample* SamplingEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == nullptr) return nullptr;
TickSampleEventRecord* evt =
@@ -62,8 +61,7 @@ TickSample* ProfilerEventsProcessor::StartTickSample() {
return &evt->sample;
}
-
-void ProfilerEventsProcessor::FinishTickSample() {
+void SamplingEventsProcessor::FinishTickSample() {
ticks_buffer_.FinishEnqueue();
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 21d7c9072e..c3fba16879 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -14,7 +14,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/locked-queue-inl.h"
-#include "src/log-inl.h"
+#include "src/log.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
@@ -25,7 +25,7 @@ static const int kProfilerStackSize = 64 * KB;
class CpuSampler : public sampler::Sampler {
public:
- CpuSampler(Isolate* isolate, ProfilerEventsProcessor* processor)
+ CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
processor_(processor) {}
@@ -42,60 +42,68 @@ class CpuSampler : public sampler::Sampler {
}
private:
- ProfilerEventsProcessor* processor_;
+ SamplingEventsProcessor* processor_;
};
ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
- ProfileGenerator* generator,
- base::TimeDelta period)
+ ProfileGenerator* generator)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- sampler_(new CpuSampler(isolate, this)),
running_(1),
- period_(period),
last_code_event_id_(0),
- last_processed_code_event_id_(0) {
- sampler_->IncreaseProfilingDepth();
-}
+ last_processed_code_event_id_(0),
+ isolate_(isolate) {}
-ProfilerEventsProcessor::~ProfilerEventsProcessor() {
- sampler_->DecreaseProfilingDepth();
+SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
+ ProfileGenerator* generator,
+ base::TimeDelta period)
+ : ProfilerEventsProcessor(isolate, generator),
+ sampler_(new CpuSampler(isolate, this)),
+ period_(period) {
+ sampler_->Start();
}
+SamplingEventsProcessor::~SamplingEventsProcessor() { sampler_->Stop(); }
+
+ProfilerEventsProcessor::~ProfilerEventsProcessor() = default;
+
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
event.generic.order = ++last_code_event_id_;
events_buffer_.Enqueue(event);
}
-
-void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
- int fp_to_sp_delta) {
+void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
- Address fp = isolate->c_entry_fp(isolate->thread_local_top());
+ Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
regs.fp = reinterpret_cast<void*>(fp);
regs.pc = reinterpret_cast<void*>(from);
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false, false);
+ record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
+ false);
ticks_from_vm_buffer_.Enqueue(record);
}
-void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
- bool update_stats) {
+void ProfilerEventsProcessor::AddCurrentStack(bool update_stats) {
TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
- StackFrameIterator it(isolate);
+ StackFrameIterator it(isolate_);
if (!it.done()) {
StackFrame* frame = it.frame();
regs.sp = reinterpret_cast<void*>(frame->sp());
regs.fp = reinterpret_cast<void*>(frame->fp());
regs.pc = reinterpret_cast<void*>(frame->pc());
}
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats,
+ record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
false);
ticks_from_vm_buffer_.Enqueue(record);
}
+void ProfilerEventsProcessor::AddSample(TickSample sample) {
+ TickSampleEventRecord record(last_code_event_id_);
+ record.sample = sample;
+ ticks_from_vm_buffer_.Enqueue(record);
+}
void ProfilerEventsProcessor::StopSynchronously() {
if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
@@ -123,8 +131,30 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
return false;
}
+void ProfilerEventsProcessor::CodeEventHandler(
+ const CodeEventsContainer& evt_rec) {
+ switch (evt_rec.generic.type) {
+ case CodeEventRecord::CODE_CREATION:
+ case CodeEventRecord::CODE_MOVE:
+ case CodeEventRecord::CODE_DISABLE_OPT:
+ Enqueue(evt_rec);
+ break;
+ case CodeEventRecord::CODE_DEOPT: {
+ const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Address pc = rec->pc;
+ int fp_to_sp_delta = rec->fp_to_sp_delta;
+ Enqueue(evt_rec);
+ AddDeoptStack(pc, fp_to_sp_delta);
+ break;
+ }
+ case CodeEventRecord::NONE:
+ case CodeEventRecord::REPORT_BUILTIN:
+ UNREACHABLE();
+ }
+}
+
ProfilerEventsProcessor::SampleProcessingResult
- ProfilerEventsProcessor::ProcessOneSample() {
+SamplingEventsProcessor::ProcessOneSample() {
TickSampleEventRecord record1;
if (ticks_from_vm_buffer_.Peek(&record1) &&
(record1.order == last_processed_code_event_id_)) {
@@ -147,8 +177,7 @@ ProfilerEventsProcessor::SampleProcessingResult
return OneSampleProcessed;
}
-
-void ProfilerEventsProcessor::Run() {
+void SamplingEventsProcessor::Run() {
while (!!base::Relaxed_Load(&running_)) {
base::TimeTicks nextSampleTime =
base::TimeTicks::HighResolutionNow() + period_;
@@ -180,8 +209,8 @@ void ProfilerEventsProcessor::Run() {
}
}
- // Schedule next sample. sampler_ is nullptr in tests.
- if (sampler_) sampler_->DoSample();
+ // Schedule next sample.
+ sampler_->DoSample();
}
// Process remaining tick events.
@@ -193,16 +222,11 @@ void ProfilerEventsProcessor::Run() {
} while (ProcessCodeEvent());
}
-
-void* ProfilerEventsProcessor::operator new(size_t size) {
- return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
-}
-
-
-void ProfilerEventsProcessor::operator delete(void* ptr) {
- AlignedFree(ptr);
+void* SamplingEventsProcessor::operator new(size_t size) {
+ return AlignedAlloc(size, alignof(SamplingEventsProcessor));
}
+void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
@@ -229,37 +253,17 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
}
-void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
- switch (evt_rec.generic.type) {
- case CodeEventRecord::CODE_CREATION:
- case CodeEventRecord::CODE_MOVE:
- case CodeEventRecord::CODE_DISABLE_OPT:
- processor_->Enqueue(evt_rec);
- break;
- case CodeEventRecord::CODE_DEOPT: {
- const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
- Address pc = rec->pc;
- int fp_to_sp_delta = rec->fp_to_sp_delta;
- processor_->Enqueue(evt_rec);
- processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
namespace {
class CpuProfilersManager {
public:
void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
profilers_.emplace(isolate, profiler);
}
void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
auto range = profilers_.equal_range(isolate);
for (auto it = range.first; it != range.second; ++it) {
if (it->second != profiler) continue;
@@ -270,7 +274,7 @@ class CpuProfilersManager {
}
void CallCollectSample(Isolate* isolate) {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
auto range = profilers_.equal_range(isolate);
for (auto it = range.first; it != range.second; ++it) {
it->second->CollectSample();
@@ -282,8 +286,7 @@ class CpuProfilersManager {
base::Mutex mutex_;
};
-base::LazyInstance<CpuProfilersManager>::type g_profilers_manager =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager);
} // namespace
@@ -302,12 +305,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
processor_(test_processor),
is_profiling_(false) {
profiles_->set_cpu_profiler(this);
- g_profilers_manager.Pointer()->AddProfiler(isolate, this);
+ GetProfilersManager()->AddProfiler(isolate, this);
}
CpuProfiler::~CpuProfiler() {
DCHECK(!is_profiling_);
- g_profilers_manager.Pointer()->RemoveProfiler(isolate_, this);
+ GetProfilersManager()->RemoveProfiler(isolate_, this);
}
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
@@ -336,12 +339,12 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
// static
void CpuProfiler::CollectSample(Isolate* isolate) {
- g_profilers_manager.Pointer()->CallCollectSample(isolate);
+ GetProfilersManager()->CallCollectSample(isolate);
}
void CpuProfiler::CollectSample() {
if (processor_) {
- processor_->AddCurrentStack(isolate_);
+ processor_->AddCurrentStack();
}
}
@@ -353,7 +356,7 @@ void CpuProfiler::StartProfiling(const char* title, bool record_samples,
}
}
-void CpuProfiler::StartProfiling(String* title, bool record_samples,
+void CpuProfiler::StartProfiling(String title, bool record_samples,
ProfilingMode mode) {
StartProfiling(profiles_->GetName(title), record_samples, mode);
isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
@@ -361,13 +364,13 @@ void CpuProfiler::StartProfiling(String* title, bool record_samples,
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_) {
- processor_->AddCurrentStack(isolate_);
+ processor_->AddCurrentStack();
return;
}
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_is_logging_ = logger->is_logging_;
- logger->is_logging_ = false;
+ saved_is_logging_ = logger->is_logging();
+ logger->set_is_logging(false);
bool codemap_needs_initialization = false;
if (!generator_) {
@@ -375,10 +378,10 @@ void CpuProfiler::StartProcessorIfNotStarted() {
codemap_needs_initialization = true;
CreateEntriesForRuntimeCallStats();
}
- processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
+ processor_.reset(new SamplingEventsProcessor(isolate_, generator_.get(),
sampling_interval_));
if (!profiler_listener_) {
- profiler_listener_.reset(new ProfilerListener(isolate_, this));
+ profiler_listener_.reset(new ProfilerListener(isolate_, processor_.get()));
}
logger->AddCodeEventListener(profiler_listener_.get());
is_profiling_ = true;
@@ -394,7 +397,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
LogBuiltins();
}
// Enable stack sampling.
- processor_->AddCurrentStack(isolate_);
+ processor_->AddCurrentStack();
processor_->StartSynchronously();
}
@@ -404,7 +407,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
return profiles_->StopProfiling(title);
}
-CpuProfile* CpuProfiler::StopProfiling(String* title) {
+CpuProfile* CpuProfiler::StopProfiling(String title) {
return StopProfiling(profiles_->GetName(title));
}
@@ -420,7 +423,7 @@ void CpuProfiler::StopProcessor() {
logger->RemoveCodeEventListener(profiler_listener_.get());
processor_->StopSynchronously();
processor_.reset();
- logger->is_logging_ = saved_is_logging_;
+ logger->set_is_logging(saved_is_logging_);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 6e2acdfde7..ff5975a7a7 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -42,7 +42,6 @@ class CodeEventRecord {
enum Type {
NONE = 0,
CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES
};
#undef DECLARE_TYPE
@@ -131,37 +130,27 @@ class CodeEventsContainer {
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
-class ProfilerEventsProcessor : public base::Thread {
+class ProfilerEventsProcessor : public base::Thread, public CodeEventObserver {
public:
- ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
- base::TimeDelta period);
- ~ProfilerEventsProcessor() override;
+ virtual ~ProfilerEventsProcessor();
+
+ void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
// Thread control.
- void Run() override;
+ void Run() override = 0;
void StopSynchronously();
V8_INLINE bool running() { return !!base::Relaxed_Load(&running_); }
void Enqueue(const CodeEventsContainer& event);
- // Puts current stack into tick sample events buffer.
- void AddCurrentStack(Isolate* isolate, bool update_stats = false);
- void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
+ // Puts current stack into the tick sample events buffer.
+ void AddCurrentStack(bool update_stats = false);
+ void AddDeoptStack(Address from, int fp_to_sp_delta);
+ // Add a sample into the tick sample events buffer. Used for testing.
+ void AddSample(TickSample sample);
- // Tick sample events are filled directly in the buffer of the circular
- // queue (because the structure is of fixed width, but usually not all
- // stack frame entries are filled.) This method returns a pointer to the
- // next record of the buffer.
- inline TickSample* StartTickSample();
- inline void FinishTickSample();
+ protected:
+ ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator);
- // SamplingCircularQueue has stricter alignment requirements than a normal new
- // can fulfil, so we need to provide our own new/delete here.
- void* operator new(size_t size);
- void operator delete(void* ptr);
-
- sampler::Sampler* sampler() { return sampler_.get(); }
-
- private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -170,24 +159,54 @@ class ProfilerEventsProcessor : public base::Thread {
FoundSampleForNextCodeEvent,
NoSamplesInQueue
};
- SampleProcessingResult ProcessOneSample();
+ virtual SampleProcessingResult ProcessOneSample() = 0;
ProfileGenerator* generator_;
- std::unique_ptr<sampler::Sampler> sampler_;
base::Atomic32 running_;
- const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
- static const size_t kTickSampleBufferSize = 1 * MB;
+ LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
+ std::atomic<unsigned> last_code_event_id_;
+ unsigned last_processed_code_event_id_;
+ Isolate* isolate_;
+};
+
+class SamplingEventsProcessor : public ProfilerEventsProcessor {
+ public:
+ SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+ base::TimeDelta period);
+ ~SamplingEventsProcessor() override;
+
+ // SamplingCircularQueue has stricter alignment requirements than a normal new
+ // can fulfil, so we need to provide our own new/delete here.
+ void* operator new(size_t size);
+ void operator delete(void* ptr);
+
+ void Run() override;
+
+ // Tick sample events are filled directly in the buffer of the circular
+ // queue (because the structure is of fixed width, but usually not all
+ // stack frame entries are filled.) This method returns a pointer to the
+ // next record of the buffer.
+ // These methods are not thread-safe and should only ever be called by one
+ // producer (from CpuSampler::SampleStack()). For testing, use AddSample.
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
+
+ sampler::Sampler* sampler() { return sampler_.get(); }
+
+ private:
+ SampleProcessingResult ProcessOneSample() override;
+
+ static const size_t kTickSampleBufferSize = 512 * KB;
static const size_t kTickSampleQueueLength =
kTickSampleBufferSize / sizeof(TickSampleEventRecord);
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
- LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- std::atomic<unsigned> last_code_event_id_;
- unsigned last_processed_code_event_id_;
+ std::unique_ptr<sampler::Sampler> sampler_;
+ const base::TimeDelta period_; // Samples & code events processing period.
};
-class CpuProfiler : public CodeEventObserver {
+class CpuProfiler {
public:
explicit CpuProfiler(Isolate* isolate);
@@ -195,7 +214,7 @@ class CpuProfiler : public CodeEventObserver {
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor);
- ~CpuProfiler() override;
+ ~CpuProfiler();
static void CollectSample(Isolate* isolate);
@@ -205,16 +224,14 @@ class CpuProfiler : public CodeEventObserver {
void CollectSample();
void StartProfiling(const char* title, bool record_samples = false,
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
- void StartProfiling(String* title, bool record_samples, ProfilingMode mode);
+ void StartProfiling(String title, bool record_samples, ProfilingMode mode);
CpuProfile* StopProfiling(const char* title);
- CpuProfile* StopProfiling(String* title);
+ CpuProfile* StopProfiling(String title);
int GetProfilesCount();
CpuProfile* GetProfile(int index);
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
- void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
-
bool is_profiling() const { return is_profiling_; }
ProfileGenerator* generator() const { return generator_.get(); }
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 58a8f3851f..495baf9b34 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -52,12 +52,10 @@ void HeapProfiler::DefineWrapperClass(
wrapper_callbacks_[class_id] = callback;
}
-
v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
- uint16_t class_id, Object** wrapper) {
+ uint16_t class_id, Handle<Object> wrapper) {
if (wrapper_callbacks_.size() <= class_id) return nullptr;
- return wrapper_callbacks_[class_id](
- class_id, Utils::ToLocal(Handle<Object>(wrapper)));
+ return wrapper_callbacks_[class_id](class_id, Utils::ToLocal(wrapper));
}
void HeapProfiler::SetGetRetainerInfosCallback(
@@ -185,7 +183,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
- base::LockGuard<base::Mutex> guard(&profiler_mutex_);
+ base::MutexGuard guard(&profiler_mutex_);
bool known_object = ids_->MoveObject(from, to, size);
if (!known_object && allocation_tracker_) {
allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
@@ -205,18 +203,18 @@ void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
}
Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
- HeapObject* object = nullptr;
+ HeapObject object;
HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (ids_->FindEntry(obj->address()) == id) {
- DCHECK_NULL(object);
+ DCHECK(object.is_null());
object = obj;
// Can't break -- kFilterUnreachable requires full heap traversal.
}
}
- return object != nullptr ? Handle<HeapObject>(object, isolate())
+ return !object.is_null() ? Handle<HeapObject>(object, isolate())
: Handle<HeapObject>();
}
@@ -238,8 +236,8 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
// collect all garbage first.
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
HeapIterator heap_iterator(heap());
- HeapObject* heap_obj;
- while ((heap_obj = heap_iterator.next()) != nullptr) {
+ for (HeapObject heap_obj = heap_iterator.next(); !heap_obj.is_null();
+ heap_obj = heap_iterator.next()) {
if (!heap_obj->IsJSObject() || heap_obj->IsExternal(isolate())) continue;
v8::Local<v8::Object> v8_obj(
Utils::ToLocal(handle(JSObject::cast(heap_obj), isolate())));
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 1e3527765e..efeb8f769b 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -65,7 +65,7 @@ class HeapProfiler : public HeapObjectAllocationTracker {
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
- Object** wrapper);
+ Handle<Object> wrapper);
void SetGetRetainerInfosCallback(
v8::HeapProfiler::GetRetainerInfosCallback callback);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 57f620f4ec..17daea1964 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -7,7 +7,7 @@
#include <utility>
#include "src/api-inl.h"
-#include "src/code-stubs.h"
+#include "src/assembler-inl.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/global-handles.h"
@@ -15,6 +15,8 @@
#include "src/objects-body-descriptors.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/feedback-cell-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
@@ -23,6 +25,8 @@
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -177,10 +181,10 @@ const char* HeapEntry::TypeAsString() {
HeapSnapshot::HeapSnapshot(HeapProfiler* profiler) : profiler_(profiler) {
// It is very important to keep objects that form a heap snapshot
// as small as possible. Check assumptions about data structure sizes.
- STATIC_ASSERT((kPointerSize == 4 && sizeof(HeapGraphEdge) == 12) ||
- (kPointerSize == 8 && sizeof(HeapGraphEdge) == 24));
- STATIC_ASSERT((kPointerSize == 4 && sizeof(HeapEntry) == 28) ||
- (kPointerSize == 8 && sizeof(HeapEntry) == 40));
+ STATIC_ASSERT((kTaggedSize == 4 && sizeof(HeapGraphEdge) == 12) ||
+ (kTaggedSize == 8 && sizeof(HeapGraphEdge) == 24));
+ STATIC_ASSERT((kTaggedSize == 4 && sizeof(HeapEntry) == 28) ||
+ (kTaggedSize == 8 && sizeof(HeapEntry) == 40));
memset(&gc_subroot_entries_, 0, sizeof(gc_subroot_entries_));
}
@@ -389,7 +393,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
HeapIterator iterator(heap_);
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
FindOrAddEntry(obj->address(), obj->Size());
if (FLAG_heap_profiler_trace_objects) {
@@ -511,31 +515,32 @@ V8HeapExplorer::V8HeapExplorer(HeapSnapshot* snapshot,
global_object_name_resolver_(resolver) {}
HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
- return AddEntry(reinterpret_cast<HeapObject*>(ptr));
+ return AddEntry(HeapObject::cast(Object(reinterpret_cast<Address>(ptr))));
}
-void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject* object) {
+void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject object) {
if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
+ JSFunction func = JSFunction::cast(object);
ExtractLocationForJSFunction(entry, func);
} else if (object->IsJSGeneratorObject()) {
- JSGeneratorObject* gen = JSGeneratorObject::cast(object);
+ JSGeneratorObject gen = JSGeneratorObject::cast(object);
ExtractLocationForJSFunction(entry, gen->function());
} else if (object->IsJSObject()) {
- JSObject* obj = JSObject::cast(object);
- JSFunction* maybe_constructor = GetConstructor(obj);
+ JSObject obj = JSObject::cast(object);
+ JSFunction maybe_constructor = GetConstructor(obj);
- if (maybe_constructor)
+ if (!maybe_constructor.is_null()) {
ExtractLocationForJSFunction(entry, maybe_constructor);
+ }
}
}
void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
- JSFunction* func) {
+ JSFunction func) {
if (!func->shared()->script()->IsScript()) return;
- Script* script = Script::cast(func->shared()->script());
+ Script script = Script::cast(func->shared()->script());
int scriptId = script->id();
int start = func->shared()->StartPosition();
int line = script->GetLineNumber(start);
@@ -543,16 +548,16 @@ void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
snapshot_->AddLocation(entry, scriptId, line, col);
}
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- SharedFunctionInfo* shared = func->shared();
+ JSFunction func = JSFunction::cast(object);
+ SharedFunctionInfo shared = func->shared();
const char* name = names_->GetName(shared->Name());
return AddEntry(object, HeapEntry::kClosure, name);
} else if (object->IsJSBoundFunction()) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
} else if (object->IsJSRegExp()) {
- JSRegExp* re = JSRegExp::cast(object);
+ JSRegExp re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
names_->GetName(re->Pattern()));
@@ -567,7 +572,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
}
return AddEntry(object, HeapEntry::kObject, name);
} else if (object->IsString()) {
- String* string = String::cast(object);
+ String string = String::cast(object);
if (string->IsConsString()) {
return AddEntry(object, HeapEntry::kConsString, "(concatenated string)");
} else if (string->IsSlicedString()) {
@@ -586,10 +591,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
- String* name = SharedFunctionInfo::cast(object)->Name();
+ String name = SharedFunctionInfo::cast(object)->Name();
return AddEntry(object, HeapEntry::kCode, names_->GetName(name));
} else if (object->IsScript()) {
- Object* name = Script::cast(object)->name();
+ Object name = Script::cast(object)->name();
return AddEntry(
object, HeapEntry::kCode,
name->IsString() ? names_->GetName(String::cast(name)) : "");
@@ -606,8 +611,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
}
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- HeapEntry::Type type,
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type,
const char* name) {
return AddEntry(object->address(), type, name, object->Size());
}
@@ -627,7 +631,7 @@ HeapEntry* V8HeapExplorer::AddEntry(Address address,
return snapshot_->AddEntry(type, name, object_id, size, trace_node_id);
}
-const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
switch (object->map()->instance_type()) {
case MAP_TYPE:
switch (Map::cast(object)->instance_type()) {
@@ -655,56 +659,72 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
int V8HeapExplorer::EstimateObjectsCount() {
HeapIterator it(heap_, HeapIterator::kFilterUnreachable);
int objects_count = 0;
- while (it.next()) ++objects_count;
+ while (!it.next().is_null()) ++objects_count;
return objects_count;
}
class IndexedReferencesExtractor : public ObjectVisitor {
public:
- IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj,
+ IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject parent_obj,
HeapEntry* parent)
: generator_(generator),
parent_obj_(parent_obj),
- parent_start_(HeapObject::RawField(parent_obj_, 0)),
- parent_end_(HeapObject::RawField(parent_obj_, parent_obj_->Size())),
- parent_(parent) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
- }
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
- int next_index = 0;
- for (MaybeObject** p = start; p < end; p++) {
- int index = static_cast<int>(reinterpret_cast<Object**>(p) -
- HeapObject::RawField(parent_obj_, 0));
- ++next_index;
- // |p| could be outside of the object, e.g., while visiting RelocInfo of
- // code objects.
- if (reinterpret_cast<Object**>(p) >= parent_start_ &&
- reinterpret_cast<Object**>(p) < parent_end_ &&
- generator_->visited_fields_[index]) {
- generator_->visited_fields_[index] = false;
+ parent_start_(HeapObject::RawMaybeWeakField(parent_obj_, 0)),
+ parent_end_(
+ HeapObject::RawMaybeWeakField(parent_obj_, parent_obj_->Size())),
+ parent_(parent),
+ next_index_(0) {}
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
+ }
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ // [start,end) must be a sub-region of [parent_start_, parent_end), i.e.
+ // all the slots must point inside the object.
+ CHECK_LE(parent_start_, start);
+ CHECK_LE(end, parent_end_);
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ int field_index = static_cast<int>(p - parent_start_);
+ if (generator_->visited_fields_[field_index]) {
+ generator_->visited_fields_[field_index] = false;
continue;
}
- HeapObject* heap_object;
- if ((*p)->GetHeapObjectIfWeak(&heap_object) ||
- (*p)->GetHeapObjectIfStrong(&heap_object)) {
- generator_->SetHiddenReference(parent_obj_, parent_, next_index,
- heap_object, index * kPointerSize);
+ HeapObject heap_object;
+ if ((*p)->GetHeapObject(&heap_object)) {
+ VisitHeapObjectImpl(heap_object, field_index);
}
}
}
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VisitHeapObjectImpl(target, -1);
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ VisitHeapObjectImpl(rinfo->target_object(), -1);
+ }
+
private:
+ V8_INLINE void VisitHeapObjectImpl(HeapObject heap_object, int field_index) {
+ DCHECK_LE(-1, field_index);
+ // The last parameter {field_offset} is only used to check some well-known
+ // skipped references, so passing -1 * kTaggedSize for objects embedded
+ // into code is fine.
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_++,
+ heap_object, field_index * kTaggedSize);
+ }
+
V8HeapExplorer* generator_;
- HeapObject* parent_obj_;
- Object** parent_start_;
- Object** parent_end_;
+ HeapObject parent_obj_;
+ MaybeObjectSlot parent_start_;
+ MaybeObjectSlot parent_end_;
HeapEntry* parent_;
+ int next_index_;
};
-void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject* obj) {
+void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSArrayBuffer()) {
@@ -753,6 +773,8 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject* obj) {
entry, ArrayBoilerplateDescription::cast(obj));
} else if (obj->IsFeedbackVector()) {
ExtractFeedbackVectorReferences(entry, FeedbackVector::cast(obj));
+ } else if (obj->IsDescriptorArray()) {
+ ExtractDescriptorArrayReferences(entry, DescriptorArray::cast(obj));
} else if (obj->IsWeakFixedArray()) {
ExtractWeakArrayReferences(WeakFixedArray::kHeaderSize, entry,
WeakFixedArray::cast(obj));
@@ -769,14 +791,14 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject* obj) {
}
void V8HeapExplorer::ExtractJSGlobalProxyReferences(HeapEntry* entry,
- JSGlobalProxy* proxy) {
+ JSGlobalProxy proxy) {
SetInternalReference(entry, "native_context", proxy->native_context(),
JSGlobalProxy::kNativeContextOffset);
}
void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
- JSObject* js_obj) {
- HeapObject* obj = js_obj;
+ JSObject js_obj) {
+ HeapObject obj = js_obj;
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
@@ -784,7 +806,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
ReadOnlyRoots roots(heap_);
SetPropertyReference(entry, roots.proto_string(), iter.GetCurrent());
if (obj->IsJSBoundFunction()) {
- JSBoundFunction* js_fun = JSBoundFunction::cast(obj);
+ JSBoundFunction js_fun = JSBoundFunction::cast(obj);
TagObject(js_fun->bound_arguments(), "(bound arguments)");
SetInternalReference(entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
@@ -793,15 +815,15 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
SetInternalReference(entry, "bound_function",
js_fun->bound_target_function(),
JSBoundFunction::kBoundTargetFunctionOffset);
- FixedArray* bindings = js_fun->bound_arguments();
+ FixedArray bindings = js_fun->bound_arguments();
for (int i = 0; i < bindings->length(); i++) {
const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
SetNativeBindReference(entry, reference_name, bindings->get(i));
}
} else if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
+ JSFunction js_fun = JSFunction::cast(js_obj);
if (js_fun->has_prototype_slot()) {
- Object* proto_or_map = js_fun->prototype_or_initial_map();
+ Object proto_or_map = js_fun->prototype_or_initial_map();
if (!proto_or_map->IsTheHole(heap_->isolate())) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(entry, roots.prototype_string(), proto_or_map,
@@ -815,9 +837,9 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
}
}
}
- SharedFunctionInfo* shared_info = js_fun->shared();
- TagObject(js_fun->feedback_cell(), "(function feedback cell)");
- SetInternalReference(entry, "feedback_cell", js_fun->feedback_cell(),
+ SharedFunctionInfo shared_info = js_fun->shared();
+ TagObject(js_fun->raw_feedback_cell(), "(function feedback cell)");
+ SetInternalReference(entry, "feedback_cell", js_fun->raw_feedback_cell(),
JSFunction::kFeedbackCellOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(entry, "shared", shared_info,
@@ -825,19 +847,18 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
TagObject(js_fun->context(), "(context)");
SetInternalReference(entry, "context", js_fun->context(),
JSFunction::kContextOffset);
- TagCodeObject(js_fun->code());
SetInternalReference(entry, "code", js_fun->code(),
JSFunction::kCodeOffset);
} else if (obj->IsJSGlobalObject()) {
- JSGlobalObject* global_obj = JSGlobalObject::cast(obj);
+ JSGlobalObject global_obj = JSGlobalObject::cast(obj);
SetInternalReference(entry, "native_context", global_obj->native_context(),
JSGlobalObject::kNativeContextOffset);
SetInternalReference(entry, "global_proxy", global_obj->global_proxy(),
JSGlobalObject::kGlobalProxyOffset);
STATIC_ASSERT(JSGlobalObject::kSize - JSObject::kHeaderSize ==
- 2 * kPointerSize);
+ 2 * kTaggedSize);
} else if (obj->IsJSArrayBufferView()) {
- JSArrayBufferView* view = JSArrayBufferView::cast(obj);
+ JSArrayBufferView view = JSArrayBufferView::cast(obj);
SetInternalReference(entry, "buffer", view->buffer(),
JSArrayBufferView::kBufferOffset);
}
@@ -851,47 +872,47 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
JSObject::kElementsOffset);
}
-void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String* string) {
+void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String string) {
if (string->IsConsString()) {
- ConsString* cs = ConsString::cast(string);
+ ConsString cs = ConsString::cast(string);
SetInternalReference(entry, "first", cs->first(), ConsString::kFirstOffset);
SetInternalReference(entry, "second", cs->second(),
ConsString::kSecondOffset);
} else if (string->IsSlicedString()) {
- SlicedString* ss = SlicedString::cast(string);
+ SlicedString ss = SlicedString::cast(string);
SetInternalReference(entry, "parent", ss->parent(),
SlicedString::kParentOffset);
} else if (string->IsThinString()) {
- ThinString* ts = ThinString::cast(string);
+ ThinString ts = ThinString::cast(string);
SetInternalReference(entry, "actual", ts->actual(),
ThinString::kActualOffset);
}
}
-void V8HeapExplorer::ExtractSymbolReferences(HeapEntry* entry, Symbol* symbol) {
+void V8HeapExplorer::ExtractSymbolReferences(HeapEntry* entry, Symbol symbol) {
SetInternalReference(entry, "name", symbol->name(), Symbol::kNameOffset);
}
void V8HeapExplorer::ExtractJSCollectionReferences(HeapEntry* entry,
- JSCollection* collection) {
+ JSCollection collection) {
SetInternalReference(entry, "table", collection->table(),
JSCollection::kTableOffset);
}
void V8HeapExplorer::ExtractJSWeakCollectionReferences(HeapEntry* entry,
- JSWeakCollection* obj) {
+ JSWeakCollection obj) {
SetInternalReference(entry, "table", obj->table(),
JSWeakCollection::kTableOffset);
}
void V8HeapExplorer::ExtractEphemeronHashTableReferences(
- HeapEntry* entry, EphemeronHashTable* table) {
+ HeapEntry* entry, EphemeronHashTable table) {
for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
int key_index = EphemeronHashTable::EntryToIndex(i) +
EphemeronHashTable::kEntryKeyIndex;
int value_index = EphemeronHashTable::EntryToValueIndex(i);
- Object* key = table->get(key_index);
- Object* value = table->get(value_index);
+ Object key = table->get(key_index);
+ Object value = table->get(value_index);
SetWeakReference(entry, key_index, key,
table->OffsetOfElementAt(key_index));
SetWeakReference(entry, value_index, value,
@@ -916,23 +937,23 @@ static const struct {
} native_context_names[] = {
#define CONTEXT_FIELD_INDEX_NAME(index, _, name) {Context::index, #name},
NATIVE_CONTEXT_FIELDS(CONTEXT_FIELD_INDEX_NAME)
-#undef CONTEXT_FIELD_INDEX
+#undef CONTEXT_FIELD_INDEX_NAME
};
void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
- Context* context) {
+ Context context) {
if (!context->IsNativeContext() && context->is_declaration_context()) {
- ScopeInfo* scope_info = context->scope_info();
+ ScopeInfo scope_info = context->scope_info();
// Add context allocated locals.
int context_locals = scope_info->ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
- String* local_name = scope_info->ContextLocalName(i);
+ String local_name = scope_info->ContextLocalName(i);
int idx = Context::MIN_CONTEXT_SLOTS + i;
SetContextReference(entry, local_name, context->get(idx),
Context::OffsetOfElementAt(idx));
}
if (scope_info->HasFunctionName()) {
- String* name = String::cast(scope_info->FunctionName());
+ String name = String::cast(scope_info->FunctionName());
int idx = scope_info->FunctionContextSlotIndex(name);
if (idx >= 0) {
SetContextReference(entry, name, context->get(idx),
@@ -979,9 +1000,9 @@ void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
}
}
-void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
- MaybeObject* maybe_raw_transitions_or_prototype_info = map->raw_transitions();
- HeapObject* raw_transitions_or_prototype_info;
+void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
+ MaybeObject maybe_raw_transitions_or_prototype_info = map->raw_transitions();
+ HeapObject raw_transitions_or_prototype_info;
if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfWeak(
&raw_transitions_or_prototype_info)) {
DCHECK(raw_transitions_or_prototype_info->IsMap());
@@ -990,7 +1011,7 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
} else if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfStrong(
&raw_transitions_or_prototype_info)) {
if (raw_transitions_or_prototype_info->IsTransitionArray()) {
- TransitionArray* transitions =
+ TransitionArray transitions =
TransitionArray::cast(raw_transitions_or_prototype_info);
if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
TagObject(transitions->GetPrototypeTransitions(),
@@ -1012,7 +1033,7 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
Map::kTransitionsOrPrototypeInfoOffset);
}
}
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
SetInternalReference(entry, "descriptors", descriptors,
Map::kDescriptorsOffset);
@@ -1022,7 +1043,7 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
SetInternalReference(entry, "layout_descriptor", map->layout_descriptor(),
Map::kLayoutDescriptorOffset);
}
- Object* constructor_or_backpointer = map->constructor_or_backpointer();
+ Object constructor_or_backpointer = map->constructor_or_backpointer();
if (constructor_or_backpointer->IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
SetInternalReference(entry, "back_pointer", constructor_or_backpointer,
@@ -1042,8 +1063,8 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
}
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- HeapEntry* entry, SharedFunctionInfo* shared) {
- String* shared_name = shared->DebugName();
+ HeapEntry* entry, SharedFunctionInfo shared) {
+ String shared_name = shared->DebugName();
const char* name = nullptr;
if (shared_name != ReadOnlyRoots(heap_).empty_string()) {
name = names_->GetName(shared_name);
@@ -1071,7 +1092,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset);
}
-void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
+void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script script) {
SetInternalReference(entry, "source", script->source(),
Script::kSourceOffset);
SetInternalReference(entry, "name", script->name(), Script::kNameOffset);
@@ -1082,8 +1103,8 @@ void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
Script::kLineEndsOffset);
}
-void V8HeapExplorer::ExtractAccessorInfoReferences(
- HeapEntry* entry, AccessorInfo* accessor_info) {
+void V8HeapExplorer::ExtractAccessorInfoReferences(HeapEntry* entry,
+ AccessorInfo accessor_info) {
SetInternalReference(entry, "name", accessor_info->name(),
AccessorInfo::kNameOffset);
SetInternalReference(entry, "expected_receiver_type",
@@ -1098,27 +1119,18 @@ void V8HeapExplorer::ExtractAccessorInfoReferences(
}
void V8HeapExplorer::ExtractAccessorPairReferences(HeapEntry* entry,
- AccessorPair* accessors) {
+ AccessorPair accessors) {
SetInternalReference(entry, "getter", accessors->getter(),
AccessorPair::kGetterOffset);
SetInternalReference(entry, "setter", accessors->setter(),
AccessorPair::kSetterOffset);
}
-void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
+void V8HeapExplorer::TagBuiltinCodeObject(Code code, const char* name) {
TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
-void V8HeapExplorer::TagCodeObject(Code* code) {
- if (code->kind() == Code::STUB) {
- TagObject(code, names_->GetFormatted(
- "(%s code)",
- CodeStub::MajorName(CodeStub::GetMajorKey(code))));
- }
-}
-
-void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
- TagCodeObject(code);
+void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
TagObject(code->relocation_info(), "(code relocation info)");
SetInternalReference(entry, "relocation_info", code->relocation_info(),
Code::kRelocationInfoOffset);
@@ -1132,19 +1144,19 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
Code::kSourcePositionTableOffset);
}
-void V8HeapExplorer::ExtractCellReferences(HeapEntry* entry, Cell* cell) {
+void V8HeapExplorer::ExtractCellReferences(HeapEntry* entry, Cell cell) {
SetInternalReference(entry, "value", cell->value(), Cell::kValueOffset);
}
-void V8HeapExplorer::ExtractFeedbackCellReferences(
- HeapEntry* entry, FeedbackCell* feedback_cell) {
+void V8HeapExplorer::ExtractFeedbackCellReferences(HeapEntry* entry,
+ FeedbackCell feedback_cell) {
TagObject(feedback_cell, "(feedback cell)");
SetInternalReference(entry, "value", feedback_cell->value(),
FeedbackCell::kValueOffset);
}
void V8HeapExplorer::ExtractPropertyCellReferences(HeapEntry* entry,
- PropertyCell* cell) {
+ PropertyCell cell) {
SetInternalReference(entry, "value", cell->value(),
PropertyCell::kValueOffset);
TagObject(cell->dependent_code(), "(dependent code)");
@@ -1153,7 +1165,7 @@ void V8HeapExplorer::ExtractPropertyCellReferences(HeapEntry* entry,
}
void V8HeapExplorer::ExtractAllocationSiteReferences(HeapEntry* entry,
- AllocationSite* site) {
+ AllocationSite site) {
SetInternalReference(entry, "transition_info",
site->transition_info_or_boilerplate(),
AllocationSite::kTransitionInfoOrBoilerplateOffset);
@@ -1165,7 +1177,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(HeapEntry* entry,
}
void V8HeapExplorer::ExtractArrayBoilerplateDescriptionReferences(
- HeapEntry* entry, ArrayBoilerplateDescription* value) {
+ HeapEntry* entry, ArrayBoilerplateDescription value) {
SetInternalReference(entry, "constant_elements", value->constant_elements(),
ArrayBoilerplateDescription::kConstantElementsOffset);
}
@@ -1187,10 +1199,9 @@ class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
};
void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
- JSArrayBuffer* buffer) {
+ JSArrayBuffer buffer) {
// Setup a reference to a native memory backing_store object.
- if (!buffer->backing_store())
- return;
+ if (!buffer->backing_store()) return;
size_t data_size = buffer->byte_length();
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
@@ -1200,14 +1211,14 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
}
void V8HeapExplorer::ExtractJSPromiseReferences(HeapEntry* entry,
- JSPromise* promise) {
+ JSPromise promise) {
SetInternalReference(entry, "reactions_or_result",
promise->reactions_or_result(),
JSPromise::kReactionsOrResultOffset);
}
void V8HeapExplorer::ExtractJSGeneratorObjectReferences(
- HeapEntry* entry, JSGeneratorObject* generator) {
+ HeapEntry* entry, JSGeneratorObject generator) {
SetInternalReference(entry, "function", generator->function(),
JSGeneratorObject::kFunctionOffset);
SetInternalReference(entry, "context", generator->context(),
@@ -1220,7 +1231,7 @@ void V8HeapExplorer::ExtractJSGeneratorObjectReferences(
}
void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
- FixedArray* array) {
+ FixedArray array) {
for (int i = 0, l = array->length(); i < l; ++i) {
DCHECK(!HasWeakHeapObjectTag(array->get(i)));
SetInternalReference(entry, i, array->get(i), array->OffsetOfElementAt(i));
@@ -1228,35 +1239,55 @@ void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
}
void V8HeapExplorer::ExtractFeedbackVectorReferences(
- HeapEntry* entry, FeedbackVector* feedback_vector) {
- MaybeObject* code = feedback_vector->optimized_code_weak_or_smi();
- HeapObject* code_heap_object;
+ HeapEntry* entry, FeedbackVector feedback_vector) {
+ MaybeObject code = feedback_vector->optimized_code_weak_or_smi();
+ HeapObject code_heap_object;
if (code->GetHeapObjectIfWeak(&code_heap_object)) {
SetWeakReference(entry, "optimized code", code_heap_object,
FeedbackVector::kOptimizedCodeOffset);
}
}
+void V8HeapExplorer::ExtractDescriptorArrayReferences(HeapEntry* entry,
+ DescriptorArray array) {
+ SetInternalReference(entry, "enum_cache", array->enum_cache(),
+ DescriptorArray::kEnumCacheOffset);
+ MaybeObjectSlot start = MaybeObjectSlot(array->GetDescriptorSlot(0));
+ MaybeObjectSlot end = MaybeObjectSlot(
+ array->GetDescriptorSlot(array->number_of_all_descriptors()));
+ for (int i = 0; start + i < end; ++i) {
+ MaybeObjectSlot slot = start + i;
+ int offset = static_cast<int>(slot.address() - array->address());
+ MaybeObject object = *slot;
+ HeapObject heap_object;
+ if (object->GetHeapObjectIfWeak(&heap_object)) {
+ SetWeakReference(entry, i, heap_object, offset);
+ } else if (object->GetHeapObjectIfStrong(&heap_object)) {
+ SetInternalReference(entry, i, heap_object, offset);
+ }
+ }
+}
+
template <typename T>
void V8HeapExplorer::ExtractWeakArrayReferences(int header_size,
- HeapEntry* entry, T* array) {
+ HeapEntry* entry, T array) {
for (int i = 0; i < array->length(); ++i) {
- MaybeObject* object = array->Get(i);
- HeapObject* heap_object;
+ MaybeObject object = array->Get(i);
+ HeapObject heap_object;
if (object->GetHeapObjectIfWeak(&heap_object)) {
- SetWeakReference(entry, i, heap_object, header_size + i * kPointerSize);
+ SetWeakReference(entry, i, heap_object, header_size + i * kTaggedSize);
} else if (object->GetHeapObjectIfStrong(&heap_object)) {
SetInternalReference(entry, i, heap_object,
- header_size + i * kPointerSize);
+ header_size + i * kTaggedSize);
}
}
}
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
+void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
HeapEntry* entry) {
Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastProperties()) {
- DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ DescriptorArray descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
@@ -1265,9 +1296,9 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
Representation r = details.representation();
if (r.IsSmi() || r.IsDouble()) break;
- Name* k = descs->GetKey(i);
+ Name k = descs->GetKey(i);
FieldIndex field_index = FieldIndex::ForDescriptor(js_obj->map(), i);
- Object* value = js_obj->RawFastPropertyAt(field_index);
+ Object value = js_obj->RawFastPropertyAt(field_index);
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
@@ -1284,26 +1315,26 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
}
} else if (js_obj->IsJSGlobalObject()) {
// We assume that global objects can only have slow properties.
- GlobalDictionary* dictionary =
+ GlobalDictionary dictionary =
JSGlobalObject::cast(js_obj)->global_dictionary();
int length = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
if (!dictionary->IsKey(roots, dictionary->KeyAt(i))) continue;
- PropertyCell* cell = dictionary->CellAt(i);
- Name* name = cell->name();
- Object* value = cell->value();
+ PropertyCell cell = dictionary->CellAt(i);
+ Name name = cell->name();
+ Object value = cell->value();
PropertyDetails details = cell->property_details();
SetDataOrAccessorPropertyReference(details.kind(), entry, name, value);
}
} else {
- NameDictionary* dictionary = js_obj->property_dictionary();
+ NameDictionary dictionary = js_obj->property_dictionary();
int length = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
+ Object k = dictionary->KeyAt(i);
if (!dictionary->IsKey(roots, k)) continue;
- Object* value = dictionary->ValueAt(i);
+ Object value = dictionary->ValueAt(i);
PropertyDetails details = dictionary->DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), entry, Name::cast(k),
value);
@@ -1311,27 +1342,27 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
}
}
-void V8HeapExplorer::ExtractAccessorPairProperty(HeapEntry* entry, Name* key,
- Object* callback_obj,
+void V8HeapExplorer::ExtractAccessorPairProperty(HeapEntry* entry, Name key,
+ Object callback_obj,
int field_offset) {
if (!callback_obj->IsAccessorPair()) return;
- AccessorPair* accessors = AccessorPair::cast(callback_obj);
+ AccessorPair accessors = AccessorPair::cast(callback_obj);
SetPropertyReference(entry, key, accessors, nullptr, field_offset);
- Object* getter = accessors->getter();
+ Object getter = accessors->getter();
if (!getter->IsOddball()) {
SetPropertyReference(entry, key, getter, "get %s");
}
- Object* setter = accessors->setter();
+ Object setter = accessors->setter();
if (!setter->IsOddball()) {
SetPropertyReference(entry, key, setter, "set %s");
}
}
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
+void V8HeapExplorer::ExtractElementReferences(JSObject js_obj,
HeapEntry* entry) {
ReadOnlyRoots roots = js_obj->GetReadOnlyRoots();
if (js_obj->HasObjectElements()) {
- FixedArray* elements = FixedArray::cast(js_obj->elements());
+ FixedArray elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray()
? Smi::ToInt(JSArray::cast(js_obj)->length())
: elements->length();
@@ -1341,10 +1372,10 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
}
}
} else if (js_obj->HasDictionaryElements()) {
- NumberDictionary* dictionary = js_obj->element_dictionary();
+ NumberDictionary dictionary = js_obj->element_dictionary();
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
+ Object k = dictionary->KeyAt(i);
if (!dictionary->IsKey(roots, k)) continue;
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
@@ -1353,28 +1384,28 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
}
}
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
+void V8HeapExplorer::ExtractInternalReferences(JSObject js_obj,
HeapEntry* entry) {
int length = js_obj->GetEmbedderFieldCount();
for (int i = 0; i < length; ++i) {
- Object* o = js_obj->GetEmbedderField(i);
+ Object o = js_obj->GetEmbedderField(i);
SetInternalReference(entry, i, o, js_obj->GetEmbedderFieldOffset(i));
}
}
-JSFunction* V8HeapExplorer::GetConstructor(JSReceiver* receiver) {
+JSFunction V8HeapExplorer::GetConstructor(JSReceiver receiver) {
Isolate* isolate = receiver->GetIsolate();
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
MaybeHandle<JSFunction> maybe_constructor =
JSReceiver::GetConstructor(handle(receiver, isolate));
- if (maybe_constructor.is_null()) return nullptr;
+ if (maybe_constructor.is_null()) return JSFunction();
return *maybe_constructor.ToHandleChecked();
}
-String* V8HeapExplorer::GetConstructorName(JSObject* object) {
+String V8HeapExplorer::GetConstructorName(JSObject object) {
Isolate* isolate = object->GetIsolate();
if (object->IsJSFunction()) return ReadOnlyRoots(isolate).closure_string();
DisallowHeapAllocation no_gc;
@@ -1382,8 +1413,10 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
return *JSReceiver::GetConstructorName(handle(object, isolate));
}
-HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- return obj->IsHeapObject() ? generator_->FindOrAddEntry(obj, this) : nullptr;
+HeapEntry* V8HeapExplorer::GetEntry(Object obj) {
+ return obj->IsHeapObject() ? generator_->FindOrAddEntry(
+ reinterpret_cast<void*>(obj.ptr()), this)
+ : nullptr;
}
class RootsReferencesExtractor : public RootVisitor {
@@ -1394,7 +1427,7 @@ class RootsReferencesExtractor : public RootVisitor {
void SetVisitingWeakRoots() { visiting_weak_roots_ = true; }
void VisitRootPointer(Root root, const char* description,
- Object** object) override {
+ FullObjectSlot object) override {
if (root == Root::kBuiltins) {
explorer_->TagBuiltinCodeObject(Code::cast(*object), description);
}
@@ -1402,10 +1435,11 @@ class RootsReferencesExtractor : public RootVisitor {
*object);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++)
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
VisitRootPointer(root, description, p);
+ }
}
private:
@@ -1427,7 +1461,8 @@ bool V8HeapExplorer::IterateAndExtractReferences(
// first. Otherwise a particular JSFunction object could set
// its custom name to a generic builtin.
RootsReferencesExtractor extractor(this);
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG_FOR_SERIALIZATION);
+ ReadOnlyRoots(heap_).Iterate(&extractor);
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
extractor.SetVisitingWeakRoots();
heap_->IterateWeakGlobalHandles(&extractor);
@@ -1435,11 +1470,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
- for (HeapObject *obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next(), progress_->ProgressStep()) {
if (interrupted) continue;
- size_t max_pointer = obj->Size() / kPointerSize;
+ size_t max_pointer = obj->Size() / kTaggedSize;
if (max_pointer > visited_fields_.size()) {
// Clear the current bits.
std::vector<bool>().swap(visited_fields_);
@@ -1470,8 +1505,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
return interrupted ? false : progress_->ProgressReport(true);
}
-
-bool V8HeapExplorer::IsEssentialObject(Object* object) {
+bool V8HeapExplorer::IsEssentialObject(Object object) {
ReadOnlyRoots roots(heap_);
return object->IsHeapObject() && !object->IsOddball() &&
object != roots.empty_byte_array() &&
@@ -1486,7 +1520,7 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
object != roots.two_pointer_filler_map();
}
-bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
+bool V8HeapExplorer::IsEssentialHiddenReference(Object parent,
int field_offset) {
if (parent->IsAllocationSite() &&
field_offset == AllocationSite::kWeakNextOffset)
@@ -1501,8 +1535,8 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
}
void V8HeapExplorer::SetContextReference(HeapEntry* parent_entry,
- String* reference_name,
- Object* child_obj, int field_offset) {
+ String reference_name,
+ Object child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
parent_entry->SetNamedReference(HeapGraphEdge::kContextVariable,
@@ -1512,14 +1546,14 @@ void V8HeapExplorer::SetContextReference(HeapEntry* parent_entry,
void V8HeapExplorer::MarkVisitedField(int offset) {
if (offset < 0) return;
- int index = offset / kPointerSize;
+ int index = offset / kTaggedSize;
DCHECK(!visited_fields_[index]);
visited_fields_[index] = true;
}
void V8HeapExplorer::SetNativeBindReference(HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj) {
+ Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
parent_entry->SetNamedReference(HeapGraphEdge::kShortcut, reference_name,
@@ -1527,7 +1561,7 @@ void V8HeapExplorer::SetNativeBindReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetElementReference(HeapEntry* parent_entry, int index,
- Object* child_obj) {
+ Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
parent_entry->SetIndexedReference(HeapGraphEdge::kElement, index,
@@ -1536,7 +1570,7 @@ void V8HeapExplorer::SetElementReference(HeapEntry* parent_entry, int index,
void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj, int field_offset) {
+ Object child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
@@ -1547,7 +1581,7 @@ void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry, int index,
- Object* child_obj, int field_offset) {
+ Object child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
@@ -1557,9 +1591,9 @@ void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry, int index,
MarkVisitedField(field_offset);
}
-void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
+void V8HeapExplorer::SetHiddenReference(HeapObject parent_obj,
HeapEntry* parent_entry, int index,
- Object* child_obj, int field_offset) {
+ Object child_obj, int field_offset) {
DCHECK_EQ(parent_entry, GetEntry(parent_obj));
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != nullptr && IsEssentialObject(child_obj) &&
@@ -1571,7 +1605,7 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj, int field_offset) {
+ Object child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
@@ -1582,7 +1616,7 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
- Object* child_obj, int field_offset) {
+ Object child_obj, int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
if (IsEssentialObject(child_obj)) {
@@ -1593,8 +1627,8 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
}
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
- PropertyKind kind, HeapEntry* parent_entry, Name* reference_name,
- Object* child_obj, const char* name_format_string, int field_offset) {
+ PropertyKind kind, HeapEntry* parent_entry, Name reference_name,
+ Object child_obj, const char* name_format_string, int field_offset) {
if (kind == kAccessor) {
ExtractAccessorPairProperty(parent_entry, reference_name, child_obj,
field_offset);
@@ -1605,8 +1639,7 @@ void V8HeapExplorer::SetDataOrAccessorPropertyReference(
}
void V8HeapExplorer::SetPropertyReference(HeapEntry* parent_entry,
- Name* reference_name,
- Object* child_obj,
+ Name reference_name, Object child_obj,
const char* name_format_string,
int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
@@ -1633,7 +1666,7 @@ void V8HeapExplorer::SetRootGcRootsReference() {
snapshot_->gc_roots());
}
-void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
+void V8HeapExplorer::SetUserGlobalReference(Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
snapshot_->root()->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
@@ -1646,7 +1679,7 @@ void V8HeapExplorer::SetGcRootsReference(Root root) {
}
void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
- bool is_weak, Object* child_obj) {
+ bool is_weak, Object child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
const char* name = GetStrongGcSubrootName(child_obj);
@@ -1665,7 +1698,7 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
// also used as starting points in distance calculations.
if (is_weak || !child_obj->IsNativeContext()) return;
- JSGlobalObject* global = Context::cast(child_obj)->global_object();
+ JSGlobalObject global = Context::cast(child_obj)->global_object();
if (!global->IsJSGlobalObject()) return;
if (!user_roots_.insert(global).second) return;
@@ -1673,24 +1706,13 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
SetUserGlobalReference(global);
}
-// This static array is used to prevent excessive code-size in
-// GetStrongGcSubrootName below, which would happen if we called emplace() for
-// every root in a macro.
-static const char* root_names[] = {
-#define ROOT_NAME(type, name, CamelName) #name,
- READ_ONLY_ROOT_LIST(ROOT_NAME) MUTABLE_ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-};
-STATIC_ASSERT(static_cast<uint16_t>(RootIndex::kRootListLength) ==
- arraysize(root_names));
-
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+const char* V8HeapExplorer::GetStrongGcSubrootName(Object object) {
if (strong_gc_subroot_names_.empty()) {
- for (uint16_t i = 0; i < static_cast<uint16_t>(RootIndex::kRootListLength);
- i++) {
- const char* name = root_names[i];
- RootIndex index = static_cast<RootIndex>(i);
- strong_gc_subroot_names_.emplace(heap_->root(index), name);
+ Isolate* isolate = heap_->isolate();
+ for (RootIndex root_index = RootIndex::kFirstStrongOrReadOnlyRoot;
+ root_index <= RootIndex::kLastStrongOrReadOnlyRoot; ++root_index) {
+ const char* name = RootsTable::name(root_index);
+ strong_gc_subroot_names_.emplace(isolate->root(root_index), name);
}
CHECK(!strong_gc_subroot_names_.empty());
}
@@ -1698,7 +1720,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
return it != strong_gc_subroot_names_.end() ? it->second : nullptr;
}
-void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+void V8HeapExplorer::TagObject(Object obj, const char* tag) {
if (IsEssentialObject(obj)) {
HeapEntry* entry = GetEntry(obj);
if (entry->name()[0] == '\0') {
@@ -1709,13 +1731,13 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
class GlobalObjectsEnumerator : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) {
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot p = start; p < end; ++p) {
if (!(*p)->IsNativeContext()) continue;
- JSObject* proxy = Context::cast(*p)->global_proxy();
+ JSObject proxy = Context::cast(*p)->global_proxy();
if (!proxy->IsJSGlobalProxy()) continue;
- Object* global = proxy->map()->prototype();
+ Object global = proxy->map()->prototype();
if (!global->IsJSGlobalObject()) continue;
objects_.push_back(Handle<JSGlobalObject>(JSGlobalObject::cast(global),
proxy->GetIsolate()));
@@ -1759,8 +1781,8 @@ class EmbedderGraphImpl : public EmbedderGraph {
class V8NodeImpl : public Node {
public:
- explicit V8NodeImpl(Object* object) : object_(object) {}
- Object* GetObject() { return object_; }
+ explicit V8NodeImpl(Object object) : object_(object) {}
+ Object GetObject() { return object_; }
// Node overrides.
bool IsEmbedderNode() override { return false; }
@@ -1776,7 +1798,7 @@ class EmbedderGraphImpl : public EmbedderGraph {
}
private:
- Object* object_;
+ Object object_;
};
Node* V8Node(const v8::Local<v8::Value>& value) final {
@@ -1811,7 +1833,7 @@ class GlobalHandlesExtractor : public PersistentHandleVisitor {
void VisitPersistentHandle(Persistent<Value>* value,
uint16_t class_id) override {
Handle<Object> object = Utils::OpenPersistent(value);
- explorer_->VisitSubtreeWrapper(object.location(), class_id);
+ explorer_->VisitSubtreeWrapper(object, class_id);
}
private:
@@ -1948,7 +1970,7 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
for (auto map_entry : objects_by_info_) {
v8::RetainedObjectInfo* info = map_entry.first;
info->Dispose();
- std::vector<HeapObject*>* objects = map_entry.second;
+ std::vector<HeapObject>* objects = map_entry.second;
delete objects;
}
for (auto map_entry : native_groups_) {
@@ -1970,14 +1992,14 @@ void NativeObjectsExplorer::FillRetainedObjects() {
v8::HeapProfiler::RetainerInfos infos =
snapshot_->profiler()->GetRetainerInfos(isolate_);
for (auto& pair : infos.groups) {
- std::vector<HeapObject*>* info = GetVectorMaybeDisposeInfo(pair.first);
+ std::vector<HeapObject>* info = GetVectorMaybeDisposeInfo(pair.first);
for (auto& persistent : pair.second) {
if (persistent->IsEmpty()) continue;
Handle<Object> object = v8::Utils::OpenHandle(
*persistent->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
DCHECK(!object.is_null());
- HeapObject* heap_object = HeapObject::cast(*object);
+ HeapObject heap_object = HeapObject::cast(*object);
info->push_back(heap_object);
in_groups_.insert(heap_object);
}
@@ -1999,27 +2021,27 @@ void NativeObjectsExplorer::FillEdges() {
Handle<Object> parent_object = v8::Utils::OpenHandle(
*pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
- HeapObject* parent = HeapObject::cast(*parent_object);
- HeapEntry* parent_entry =
- generator_->FindOrAddEntry(parent, native_entries_allocator_.get());
+ HeapObject parent = HeapObject::cast(*parent_object);
+ HeapEntry* parent_entry = generator_->FindOrAddEntry(
+ reinterpret_cast<void*>(parent.ptr()), native_entries_allocator_.get());
DCHECK_NOT_NULL(parent_entry);
Handle<Object> child_object = v8::Utils::OpenHandle(
*pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
- HeapObject* child = HeapObject::cast(*child_object);
- HeapEntry* child_entry =
- generator_->FindOrAddEntry(child, native_entries_allocator_.get());
+ HeapObject child = HeapObject::cast(*child_object);
+ HeapEntry* child_entry = generator_->FindOrAddEntry(
+ reinterpret_cast<void*>(child.ptr()), native_entries_allocator_.get());
parent_entry->SetNamedReference(HeapGraphEdge::kInternal, "native",
child_entry);
}
edges_.clear();
}
-std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
+std::vector<HeapObject>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
if (objects_by_info_.count(info)) {
info->Dispose();
} else {
- objects_by_info_[info] = new std::vector<HeapObject*>();
+ objects_by_info_[info] = new std::vector<HeapObject>();
}
return objects_by_info_[info];
}
@@ -2036,9 +2058,10 @@ HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
} else {
EmbedderGraphImpl::V8NodeImpl* v8_node =
static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
- Object* object = v8_node->GetObject();
+ Object object = v8_node->GetObject();
if (object->IsSmi()) return nullptr;
- return generator_->FindEntry(HeapObject::cast(object));
+ return generator_->FindEntry(
+ reinterpret_cast<void*>(Object::cast(object).ptr()));
}
}
@@ -2088,8 +2111,8 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
for (auto map_entry : objects_by_info_) {
v8::RetainedObjectInfo* info = map_entry.first;
SetNativeRootReference(info);
- std::vector<HeapObject*>* objects = map_entry.second;
- for (HeapObject* object : *objects) {
+ std::vector<HeapObject>* objects = map_entry.second;
+ for (HeapObject object : *objects) {
SetWrapperNativeReferences(object, info);
}
}
@@ -2123,8 +2146,9 @@ void NativeObjectsExplorer::SetNativeRootReference(
}
void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = generator_->FindEntry(wrapper);
+ HeapObject wrapper, v8::RetainedObjectInfo* info) {
+ HeapEntry* wrapper_entry =
+ generator_->FindEntry(reinterpret_cast<void*>(wrapper.ptr()));
DCHECK_NOT_NULL(wrapper_entry);
HeapEntry* info_entry =
generator_->FindOrAddEntry(info, native_entries_allocator_.get());
@@ -2146,7 +2170,8 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
}
}
-void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
+void NativeObjectsExplorer::VisitSubtreeWrapper(Handle<Object> p,
+ uint16_t class_id) {
if (in_groups_.count(*p)) return;
v8::RetainedObjectInfo* info =
isolate_->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
@@ -2171,13 +2196,13 @@ class NullContextScope {
public:
explicit NullContextScope(Isolate* isolate)
: isolate_(isolate), prev_(isolate->context()) {
- isolate_->set_context(nullptr);
+ isolate_->set_context(Context());
}
~NullContextScope() { isolate_->set_context(prev_); }
private:
Isolate* isolate_;
- Context* prev_;
+ Context prev_;
};
} // namespace
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 1f8f364912..14cce75f90 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -15,6 +15,8 @@
#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/hash-table.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-objects.h"
#include "src/objects/literal-objects.h"
#include "src/profiler/strings-storage.h"
#include "src/string-hasher.h"
@@ -322,106 +324,105 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int EstimateObjectsCount();
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void TagGlobalObjects();
- void TagCodeObject(Code* code);
- void TagBuiltinCodeObject(Code* code, const char* name);
+ void TagBuiltinCodeObject(Code code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
const char* name,
size_t size);
- static JSFunction* GetConstructor(JSReceiver* receiver);
- static String* GetConstructorName(JSObject* object);
+ static JSFunction GetConstructor(JSReceiver receiver);
+ static String GetConstructorName(JSObject object);
private:
void MarkVisitedField(int offset);
- HeapEntry* AddEntry(HeapObject* object);
- HeapEntry* AddEntry(HeapObject* object,
- HeapEntry::Type type,
+ HeapEntry* AddEntry(HeapObject object);
+ HeapEntry* AddEntry(HeapObject object, HeapEntry::Type type,
const char* name);
- const char* GetSystemEntryName(HeapObject* object);
-
- void ExtractLocation(HeapEntry* entry, HeapObject* object);
- void ExtractLocationForJSFunction(HeapEntry* entry, JSFunction* func);
- void ExtractReferences(HeapEntry* entry, HeapObject* obj);
- void ExtractJSGlobalProxyReferences(HeapEntry* entry, JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(HeapEntry* entry, JSObject* js_obj);
- void ExtractStringReferences(HeapEntry* entry, String* obj);
- void ExtractSymbolReferences(HeapEntry* entry, Symbol* symbol);
- void ExtractJSCollectionReferences(HeapEntry* entry,
- JSCollection* collection);
+ const char* GetSystemEntryName(HeapObject object);
+
+ void ExtractLocation(HeapEntry* entry, HeapObject object);
+ void ExtractLocationForJSFunction(HeapEntry* entry, JSFunction func);
+ void ExtractReferences(HeapEntry* entry, HeapObject obj);
+ void ExtractJSGlobalProxyReferences(HeapEntry* entry, JSGlobalProxy proxy);
+ void ExtractJSObjectReferences(HeapEntry* entry, JSObject js_obj);
+ void ExtractStringReferences(HeapEntry* entry, String obj);
+ void ExtractSymbolReferences(HeapEntry* entry, Symbol symbol);
+ void ExtractJSCollectionReferences(HeapEntry* entry, JSCollection collection);
void ExtractJSWeakCollectionReferences(HeapEntry* entry,
- JSWeakCollection* collection);
+ JSWeakCollection collection);
void ExtractEphemeronHashTableReferences(HeapEntry* entry,
- EphemeronHashTable* table);
- void ExtractContextReferences(HeapEntry* entry, Context* context);
- void ExtractMapReferences(HeapEntry* entry, Map* map);
+ EphemeronHashTable table);
+ void ExtractContextReferences(HeapEntry* entry, Context context);
+ void ExtractMapReferences(HeapEntry* entry, Map map);
void ExtractSharedFunctionInfoReferences(HeapEntry* entry,
- SharedFunctionInfo* shared);
- void ExtractScriptReferences(HeapEntry* entry, Script* script);
+ SharedFunctionInfo shared);
+ void ExtractScriptReferences(HeapEntry* entry, Script script);
void ExtractAccessorInfoReferences(HeapEntry* entry,
- AccessorInfo* accessor_info);
- void ExtractAccessorPairReferences(HeapEntry* entry, AccessorPair* accessors);
- void ExtractCodeReferences(HeapEntry* entry, Code* code);
- void ExtractCellReferences(HeapEntry* entry, Cell* cell);
+ AccessorInfo accessor_info);
+ void ExtractAccessorPairReferences(HeapEntry* entry, AccessorPair accessors);
+ void ExtractCodeReferences(HeapEntry* entry, Code code);
+ void ExtractCellReferences(HeapEntry* entry, Cell cell);
void ExtractFeedbackCellReferences(HeapEntry* entry,
- FeedbackCell* feedback_cell);
- void ExtractPropertyCellReferences(HeapEntry* entry, PropertyCell* cell);
- void ExtractAllocationSiteReferences(HeapEntry* entry, AllocationSite* site);
+ FeedbackCell feedback_cell);
+ void ExtractPropertyCellReferences(HeapEntry* entry, PropertyCell cell);
+ void ExtractAllocationSiteReferences(HeapEntry* entry, AllocationSite site);
void ExtractArrayBoilerplateDescriptionReferences(
- HeapEntry* entry, ArrayBoilerplateDescription* value);
- void ExtractJSArrayBufferReferences(HeapEntry* entry, JSArrayBuffer* buffer);
- void ExtractJSPromiseReferences(HeapEntry* entry, JSPromise* promise);
+ HeapEntry* entry, ArrayBoilerplateDescription value);
+ void ExtractJSArrayBufferReferences(HeapEntry* entry, JSArrayBuffer buffer);
+ void ExtractJSPromiseReferences(HeapEntry* entry, JSPromise promise);
void ExtractJSGeneratorObjectReferences(HeapEntry* entry,
- JSGeneratorObject* generator);
- void ExtractFixedArrayReferences(HeapEntry* entry, FixedArray* array);
+ JSGeneratorObject generator);
+ void ExtractFixedArrayReferences(HeapEntry* entry, FixedArray array);
void ExtractFeedbackVectorReferences(HeapEntry* entry,
- FeedbackVector* feedback_vector);
+ FeedbackVector feedback_vector);
+ void ExtractDescriptorArrayReferences(HeapEntry* entry,
+ DescriptorArray array);
template <typename T>
- void ExtractWeakArrayReferences(int header_size, HeapEntry* entry, T* array);
- void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
- void ExtractAccessorPairProperty(HeapEntry* entry, Name* key,
- Object* callback_obj, int field_offset = -1);
- void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
- void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
-
- bool IsEssentialObject(Object* object);
- bool IsEssentialHiddenReference(Object* parent, int field_offset);
-
- void SetContextReference(HeapEntry* parent_entry, String* reference_name,
- Object* child, int field_offset);
+ void ExtractWeakArrayReferences(int header_size, HeapEntry* entry, T array);
+ void ExtractPropertyReferences(JSObject js_obj, HeapEntry* entry);
+ void ExtractAccessorPairProperty(HeapEntry* entry, Name key,
+ Object callback_obj, int field_offset = -1);
+ void ExtractElementReferences(JSObject js_obj, HeapEntry* entry);
+ void ExtractInternalReferences(JSObject js_obj, HeapEntry* entry);
+
+ bool IsEssentialObject(Object object);
+ bool IsEssentialHiddenReference(Object parent, int field_offset);
+
+ void SetContextReference(HeapEntry* parent_entry, String reference_name,
+ Object child, int field_offset);
void SetNativeBindReference(HeapEntry* parent_entry,
- const char* reference_name, Object* child);
- void SetElementReference(HeapEntry* parent_entry, int index, Object* child);
+ const char* reference_name, Object child);
+ void SetElementReference(HeapEntry* parent_entry, int index, Object child);
void SetInternalReference(HeapEntry* parent_entry, const char* reference_name,
- Object* child, int field_offset = -1);
- void SetInternalReference(HeapEntry* parent_entry, int index, Object* child,
+ Object child, int field_offset = -1);
+ void SetInternalReference(HeapEntry* parent_entry, int index, Object child,
int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj, HeapEntry* parent_entry,
- int index, Object* child, int field_offset);
+ void SetHiddenReference(HeapObject parent_obj, HeapEntry* parent_entry,
+ int index, Object child, int field_offset);
void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
- Object* child_obj, int field_offset);
- void SetWeakReference(HeapEntry* parent_entry, int index, Object* child_obj,
+ Object child_obj, int field_offset);
+ void SetWeakReference(HeapEntry* parent_entry, int index, Object child_obj,
int field_offset);
- void SetPropertyReference(HeapEntry* parent_entry, Name* reference_name,
- Object* child,
+ void SetPropertyReference(HeapEntry* parent_entry, Name reference_name,
+ Object child,
const char* name_format_string = nullptr,
int field_offset = -1);
void SetDataOrAccessorPropertyReference(
- PropertyKind kind, HeapEntry* parent_entry, Name* reference_name,
- Object* child, const char* name_format_string = nullptr,
+ PropertyKind kind, HeapEntry* parent_entry, Name reference_name,
+ Object child, const char* name_format_string = nullptr,
int field_offset = -1);
- void SetUserGlobalReference(Object* user_global);
+ void SetUserGlobalReference(Object user_global);
void SetRootGcRootsReference();
void SetGcRootsReference(Root root);
void SetGcSubrootReference(Root root, const char* description, bool is_weak,
- Object* child);
- const char* GetStrongGcSubrootName(Object* object);
- void TagObject(Object* obj, const char* tag);
+ Object child);
+ const char* GetStrongGcSubrootName(Object object);
+ void TagObject(Object obj, const char* tag);
- HeapEntry* GetEntry(Object* obj);
+ HeapEntry* GetEntry(Object obj);
Heap* heap_;
HeapSnapshot* snapshot_;
@@ -429,9 +430,10 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
HeapSnapshotGenerator* generator_ = nullptr;
- std::unordered_map<JSGlobalObject*, const char*> objects_tags_;
- std::unordered_map<Object*, const char*> strong_gc_subroot_names_;
- std::unordered_set<JSGlobalObject*> user_roots_;
+ std::unordered_map<JSGlobalObject, const char*, Object::Hasher> objects_tags_;
+ std::unordered_map<Object, const char*, Object::Hasher>
+ strong_gc_subroot_names_;
+ std::unordered_set<JSGlobalObject, Object::Hasher> user_roots_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
std::vector<bool> visited_fields_;
@@ -458,13 +460,13 @@ class NativeObjectsExplorer {
private:
void FillRetainedObjects();
void FillEdges();
- std::vector<HeapObject*>* GetVectorMaybeDisposeInfo(
+ std::vector<HeapObject>* GetVectorMaybeDisposeInfo(
v8::RetainedObjectInfo* info);
void SetNativeRootReference(v8::RetainedObjectInfo* info);
void SetRootNativeRootsReference();
- void SetWrapperNativeReferences(HeapObject* wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Object** p, uint16_t class_id);
+ void SetWrapperNativeReferences(HeapObject wrapper,
+ v8::RetainedObjectInfo* info);
+ void VisitSubtreeWrapper(Handle<Object> p, uint16_t class_id);
struct RetainedInfoHasher {
std::size_t operator()(v8::RetainedObjectInfo* info) const {
@@ -486,8 +488,8 @@ class NativeObjectsExplorer {
HeapSnapshot* snapshot_;
StringsStorage* names_;
bool embedder_queried_;
- std::unordered_set<Object*> in_groups_;
- std::unordered_map<v8::RetainedObjectInfo*, std::vector<HeapObject*>*,
+ std::unordered_set<Object, Object::Hasher> in_groups_;
+ std::unordered_map<v8::RetainedObjectInfo*, std::vector<HeapObject>*,
RetainedInfoHasher, RetainedInfoEquals>
objects_by_info_;
std::unordered_map<const char*, NativeGroupRetainedObjectInfo*,
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index d60da5a44d..8ce9fb392e 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -18,7 +18,8 @@
namespace v8 {
namespace internal {
-void SourcePositionTable::SetPosition(int pc_offset, int line) {
+void SourcePositionTable::SetPosition(int pc_offset, int line,
+ int inlining_id) {
DCHECK_GE(pc_offset, 0);
DCHECK_GT(line, 0); // The 1-based number of the source line.
// Check that we are inserting in ascending order, so that the vector remains
@@ -26,8 +27,9 @@ void SourcePositionTable::SetPosition(int pc_offset, int line) {
DCHECK(pc_offsets_to_lines_.empty() ||
pc_offsets_to_lines_.back().pc_offset < pc_offset);
if (pc_offsets_to_lines_.empty() ||
- pc_offsets_to_lines_.back().line_number != line) {
- pc_offsets_to_lines_.push_back({pc_offset, line});
+ pc_offsets_to_lines_.back().line_number != line ||
+ pc_offsets_to_lines_.back().inlining_id != inlining_id) {
+ pc_offsets_to_lines_.push_back({pc_offset, line, inlining_id});
}
}
@@ -35,13 +37,33 @@ int SourcePositionTable::GetSourceLineNumber(int pc_offset) const {
if (pc_offsets_to_lines_.empty()) {
return v8::CpuProfileNode::kNoLineNumberInfo;
}
- auto it =
- std::upper_bound(pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
- PCOffsetAndLineNumber{pc_offset, 0});
+ auto it = std::lower_bound(
+ pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
+ SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
if (it != pc_offsets_to_lines_.begin()) --it;
return it->line_number;
}
+int SourcePositionTable::GetInliningId(int pc_offset) const {
+ if (pc_offsets_to_lines_.empty()) {
+ return SourcePosition::kNotInlined;
+ }
+ auto it = std::lower_bound(
+ pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
+ SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
+ if (it != pc_offsets_to_lines_.begin()) --it;
+ return it->inlining_id;
+}
+
+void SourcePositionTable::print() const {
+ base::OS::Print(" - source position table at %p\n", this);
+ for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
+ base::OS::Print(" %d --> line_number: %d inlining_id: %d\n",
+ pos_info.pc_offset, pos_info.line_number,
+ pos_info.inlining_id);
+ }
+}
+
const char* const CodeEntry::kWasmResourceNamePrefix = "wasm ";
const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
@@ -66,20 +88,22 @@ base::LazyDynamicInstance<CodeEntry,
CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
- return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
+ return new CodeEntry(CodeEventListener::FUNCTION_TAG,
+ CodeEntry::kProgramEntryName);
}
CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
- return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
+ return new CodeEntry(CodeEventListener::FUNCTION_TAG,
+ CodeEntry::kIdleEntryName);
}
CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
- return new CodeEntry(Logger::BUILTIN_TAG,
+ return new CodeEntry(CodeEventListener::BUILTIN_TAG,
CodeEntry::kGarbageCollectorEntryName);
}
CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
- return new CodeEntry(Logger::FUNCTION_TAG,
+ return new CodeEntry(CodeEventListener::FUNCTION_TAG,
CodeEntry::kUnresolvedFunctionName);
}
@@ -119,17 +143,25 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
return v8::CpuProfileNode::kNoLineNumberInfo;
}
-void CodeEntry::AddInlineStack(
- int pc_offset, std::vector<std::unique_ptr<CodeEntry>> inline_stack) {
- EnsureRareData()->inline_locations_.insert(
- std::make_pair(pc_offset, std::move(inline_stack)));
+void CodeEntry::SetInlineStacks(
+ std::unordered_set<std::unique_ptr<CodeEntry>, Hasher, Equals>
+ inline_entries,
+ std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
+ inline_stacks) {
+ EnsureRareData()->inline_entries_ = std::move(inline_entries);
+ rare_data_->inline_stacks_ = std::move(inline_stacks);
}
-const std::vector<std::unique_ptr<CodeEntry>>* CodeEntry::GetInlineStack(
+const std::vector<CodeEntryAndLineNumber>* CodeEntry::GetInlineStack(
int pc_offset) const {
- if (!rare_data_) return nullptr;
- auto it = rare_data_->inline_locations_.find(pc_offset);
- return it != rare_data_->inline_locations_.end() ? &it->second : nullptr;
+ if (!line_info_) return nullptr;
+
+ int inlining_id = line_info_->GetInliningId(pc_offset);
+ if (inlining_id == SourcePosition::kNotInlined) return nullptr;
+ DCHECK(rare_data_);
+
+ auto it = rare_data_->inline_stacks_.find(inlining_id);
+ return it != rare_data_->inline_stacks_.end() ? &it->second : nullptr;
}
void CodeEntry::set_deopt_info(
@@ -142,9 +174,9 @@ void CodeEntry::set_deopt_info(
rare_data->deopt_inlined_frames_ = std::move(inlined_frames);
}
-void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
+void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
if (!shared->script()->IsScript()) return;
- Script* script = Script::cast(shared->script());
+ Script script = Script::cast(shared->script());
set_script_id(script->id());
set_position(shared->StartPosition());
if (shared->optimization_disabled()) {
@@ -174,6 +206,55 @@ CodeEntry::RareData* CodeEntry::EnsureRareData() {
return rare_data_.get();
}
+void CodeEntry::print() const {
+ base::OS::Print("CodeEntry: at %p\n", this);
+
+ base::OS::Print(" - name: %s\n", name_);
+ base::OS::Print(" - resource_name: %s\n", resource_name_);
+ base::OS::Print(" - line_number: %d\n", line_number_);
+ base::OS::Print(" - column_number: %d\n", column_number_);
+ base::OS::Print(" - script_id: %d\n", script_id_);
+ base::OS::Print(" - position: %d\n", position_);
+ base::OS::Print(" - instruction_start: %p\n",
+ reinterpret_cast<void*>(instruction_start_));
+
+ if (line_info_) {
+ line_info_->print();
+ }
+
+ if (rare_data_) {
+ base::OS::Print(" - deopt_reason: %s\n", rare_data_->deopt_reason_);
+ base::OS::Print(" - bailout_reason: %s\n", rare_data_->bailout_reason_);
+ base::OS::Print(" - deopt_id: %d\n", rare_data_->deopt_id_);
+
+ if (!rare_data_->inline_stacks_.empty()) {
+ base::OS::Print(" - inline stacks:\n");
+ for (auto it = rare_data_->inline_stacks_.begin();
+ it != rare_data_->inline_stacks_.end(); it++) {
+ base::OS::Print(" inlining_id: [%d]\n", it->first);
+ for (const auto& e : it->second) {
+ base::OS::Print(" %s --> %d\n", e.code_entry->name(),
+ e.line_number);
+ }
+ }
+ } else {
+ base::OS::Print(" - inline stacks: (empty)\n");
+ }
+
+ if (!rare_data_->deopt_inlined_frames_.empty()) {
+ base::OS::Print(" - deopt inlined frames:\n");
+ for (const CpuProfileDeoptFrame& frame :
+ rare_data_->deopt_inlined_frames_) {
+ base::OS::Print("script_id: %d position: %zu\n", frame.script_id,
+ frame.position);
+ }
+ } else {
+ base::OS::Print(" - deopt inlined frames: (empty)\n");
+ }
+ }
+ base::OS::Print("\n");
+}
+
void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
deopt_infos_.push_back(entry->GetDeoptInfo());
entry->clear_deopt_info();
@@ -759,15 +840,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
int pc_offset =
static_cast<int>(stack_pos - entry->instruction_start());
// TODO(petermarshall): pc_offset can still be negative in some cases.
- const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
+ const std::vector<CodeEntryAndLineNumber>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
- std::transform(
- inline_stack->rbegin(), inline_stack->rend(),
- std::back_inserter(stack_trace),
- [=](const std::unique_ptr<CodeEntry>& ptr) {
- return CodeEntryAndLineNumber{ptr.get(), no_line_info};
- });
+ int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
+ stack_trace.insert(stack_trace.end(), inline_stack->begin(),
+ inline_stack->end());
+ // This is a bit of a messy hack. The line number for the most-inlined
+ // frame (the function at the end of the chain of function calls) has
+ // the wrong line number in inline_stack. The actual line number in
+ // this function is stored in the SourcePositionTable in entry. We fix
+ // up the line number for the most-inlined frame here.
+ // TODO(petermarshall): Remove this and use a tree with a node per
+ // inlining_id.
+ DCHECK(!inline_stack->empty());
+ size_t index = stack_trace.size() - inline_stack->size();
+ stack_trace[index].line_number = most_inlined_frame_line_number;
}
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
@@ -779,6 +867,12 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
src_line_not_found = false;
}
line_number = entry->GetSourceLine(pc_offset);
+
+ // The inline stack contains the top-level function i.e. the same
+ // function as entry. We don't want to add it twice. The one from the
+ // inline stack has the correct line number for this particular inlining
+ // so we use it instead of pushing entry to stack_trace.
+ if (inline_stack) continue;
}
stack_trace.push_back({entry, line_number});
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index ac9506ab21..ebb4f0ea2c 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -16,7 +16,8 @@
#include "include/v8-profiler.h"
#include "src/allocation.h"
-#include "src/log.h"
+#include "src/builtins/builtins.h"
+#include "src/code-events.h"
#include "src/profiler/strings-storage.h"
#include "src/source-position.h"
@@ -26,29 +27,35 @@ namespace internal {
struct TickSample;
// Provides a mapping from the offsets within generated code or a bytecode array
-// to the source line.
+// to the source line and inlining id.
class SourcePositionTable : public Malloced {
public:
SourcePositionTable() = default;
- void SetPosition(int pc_offset, int line);
+ void SetPosition(int pc_offset, int line, int inlining_id);
int GetSourceLineNumber(int pc_offset) const;
+ int GetInliningId(int pc_offset) const;
+
+ void print() const;
private:
- struct PCOffsetAndLineNumber {
- bool operator<(const PCOffsetAndLineNumber& other) const {
+ struct SourcePositionTuple {
+ bool operator<(const SourcePositionTuple& other) const {
return pc_offset < other.pc_offset;
}
int pc_offset;
int line_number;
+ int inlining_id;
};
- // This is logically a map, but we store it as a vector of pairs, sorted by
+ // This is logically a map, but we store it as a vector of tuples, sorted by
// the pc offset, so that we can save space and look up items using binary
// search.
- std::vector<PCOffsetAndLineNumber> pc_offsets_to_lines_;
+ std::vector<SourcePositionTuple> pc_offsets_to_lines_;
DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
};
+struct CodeEntryAndLineNumber;
+
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
@@ -91,7 +98,7 @@ class CodeEntry {
void mark_used() { bit_field_ = UsedField::update(bit_field_, true); }
bool used() const { return UsedField::decode(bit_field_); }
- void FillFunctionInfo(SharedFunctionInfo* shared);
+ void FillFunctionInfo(SharedFunctionInfo shared);
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
@@ -103,9 +110,24 @@ class CodeEntry {
int GetSourceLine(int pc_offset) const;
- void AddInlineStack(int pc_offset,
- std::vector<std::unique_ptr<CodeEntry>> inline_stack);
- const std::vector<std::unique_ptr<CodeEntry>>* GetInlineStack(
+ struct Equals {
+ bool operator()(const std::unique_ptr<CodeEntry>& lhs,
+ const std::unique_ptr<CodeEntry>& rhs) const {
+ return lhs.get()->IsSameFunctionAs(rhs.get());
+ }
+ };
+ struct Hasher {
+ std::size_t operator()(const std::unique_ptr<CodeEntry>& e) const {
+ return e->GetHash();
+ }
+ };
+
+ void SetInlineStacks(
+ std::unordered_set<std::unique_ptr<CodeEntry>, Hasher, Equals>
+ inline_entries,
+ std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
+ inline_stacks);
+ const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
int pc_offset) const;
void set_instruction_start(Address start) { instruction_start_ = start; }
@@ -136,13 +158,16 @@ class CodeEntry {
return kUnresolvedEntry.Pointer();
}
+ void print() const;
+
private:
struct RareData {
const char* deopt_reason_ = kNoDeoptReason;
const char* bailout_reason_ = kEmptyBailoutReason;
int deopt_id_ = kNoDeoptimizationId;
- std::unordered_map<int, std::vector<std::unique_ptr<CodeEntry>>>
- inline_locations_;
+ std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks_;
+ std::unordered_set<std::unique_ptr<CodeEntry>, Hasher, Equals>
+ inline_entries_;
std::vector<CpuProfileDeoptFrame> deopt_inlined_frames_;
};
@@ -170,7 +195,7 @@ class CodeEntry {
static base::LazyDynamicInstance<CodeEntry, UnresolvedEntryCreateTrait>::type
kUnresolvedEntry;
- using TagField = BitField<Logger::LogEventsAndTags, 0, 8>;
+ using TagField = BitField<CodeEventListener::LogEventsAndTags, 0, 8>;
using BuiltinIdField = BitField<Builtins::Name, 8, 23>;
using UsedField = BitField<bool, 31, 1>;
@@ -313,6 +338,7 @@ class ProfileTree {
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
+class CpuProfiler;
class CpuProfile {
public:
@@ -412,7 +438,7 @@ class CpuProfilesCollection {
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
return &finished_profiles_;
}
- const char* GetName(Name* name) { return resource_names_.GetName(name); }
+ const char* GetName(Name name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index f90a2e11d3..2aac98f61f 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -5,10 +5,11 @@
#include "src/profiler/profiler-listener.h"
#include "src/deoptimizer.h"
-#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
+#include "src/reloc-info.h"
+#include "src/snapshot/embedded-data.h"
#include "src/source-position-table.h"
#include "src/wasm/wasm-code-manager.h"
@@ -21,7 +22,7 @@ ProfilerListener::ProfilerListener(Isolate* isolate,
ProfilerListener::~ProfilerListener() = default;
-void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
+void ProfilerListener::CallbackEvent(Name name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
@@ -31,7 +32,7 @@ void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
}
void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* name) {
+ AbstractCode code, const char* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
@@ -39,13 +40,12 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->InstructionStart());
- RecordInliningInfo(rec->entry, code);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name) {
+ AbstractCode code, Name name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
@@ -53,15 +53,14 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->InstructionStart());
- RecordInliningInfo(rec->entry, code);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code,
- SharedFunctionInfo* shared,
- Name* script_name) {
+ AbstractCode code,
+ SharedFunctionInfo shared,
+ Name script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
@@ -70,40 +69,113 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->InstructionStart());
- RecordInliningInfo(rec->entry, code);
+ DCHECK(!code->IsCode());
rec->entry->FillFunctionInfo(shared);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
+namespace {
+
+CodeEntry* GetOrInsertCachedEntry(
+ std::unordered_set<std::unique_ptr<CodeEntry>, CodeEntry::Hasher,
+ CodeEntry::Equals>* entries,
+ std::unique_ptr<CodeEntry> search_value) {
+ auto it = entries->find(search_value);
+ if (it != entries->end()) return it->get();
+ CodeEntry* ret = search_value.get();
+ entries->insert(std::move(search_value));
+ return ret;
+}
+
+} // namespace
+
void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* abstract_code,
- SharedFunctionInfo* shared,
- Name* script_name, int line,
- int column) {
+ AbstractCode abstract_code,
+ SharedFunctionInfo shared,
+ Name script_name, int line, int column) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = abstract_code->InstructionStart();
std::unique_ptr<SourcePositionTable> line_table;
+ std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks;
+ std::unordered_set<std::unique_ptr<CodeEntry>, CodeEntry::Hasher,
+ CodeEntry::Equals>
+ cached_inline_entries;
if (shared->script()->IsScript()) {
- Script* script = Script::cast(shared->script());
+ Script script = Script::cast(shared->script());
line_table.reset(new SourcePositionTable());
+ HandleScope scope(isolate_);
+
+ // Add each position to the source position table and store inlining stacks
+ // for inline positions. We store almost the same information in the
+ // profiler as is stored on the code object, except that we transform source
+ // positions to line numbers here, because we only care about attributing
+ // ticks to a given line.
for (SourcePositionTableIterator it(abstract_code->source_position_table());
!it.done(); it.Advance()) {
- // TODO(alph,tebbi) Skipping inlined positions for now, because they might
- // refer to a different script.
- if (it.source_position().InliningId() != SourcePosition::kNotInlined)
- continue;
int position = it.source_position().ScriptOffset();
int line_number = script->GetLineNumber(position) + 1;
- line_table->SetPosition(it.code_offset(), line_number);
+ int inlining_id = it.source_position().InliningId();
+ line_table->SetPosition(it.code_offset(), line_number, inlining_id);
+
+ if (inlining_id != SourcePosition::kNotInlined) {
+ DCHECK(abstract_code->IsCode());
+ Code code = abstract_code->GetCode();
+ std::vector<SourcePositionInfo> stack =
+ it.source_position().InliningStack(handle(code, isolate_));
+ DCHECK(!stack.empty());
+
+ std::vector<CodeEntryAndLineNumber> inline_stack;
+ for (SourcePositionInfo& pos_info : stack) {
+ if (pos_info.position.ScriptOffset() == kNoSourcePosition) continue;
+ if (pos_info.script.is_null()) continue;
+
+ int line_number =
+ pos_info.script->GetLineNumber(pos_info.position.ScriptOffset()) +
+ 1;
+
+ const char* resource_name =
+ (pos_info.script->name()->IsName())
+ ? GetName(Name::cast(pos_info.script->name()))
+ : CodeEntry::kEmptyResourceName;
+
+ // We need the start line number and column number of the function for
+ // kLeafNodeLineNumbers mode. Creating a SourcePositionInfo is a handy
+ // way of getting both easily.
+ SourcePositionInfo start_pos_info(
+ SourcePosition(pos_info.shared->StartPosition()),
+ pos_info.shared);
+
+ std::unique_ptr<CodeEntry> inline_entry =
+ base::make_unique<CodeEntry>(
+ tag, GetName(pos_info.shared->DebugName()), resource_name,
+ start_pos_info.line + 1, start_pos_info.column + 1, nullptr,
+ code->InstructionStart());
+ inline_entry->FillFunctionInfo(*pos_info.shared);
+
+ // Create a canonical CodeEntry for each inlined frame and then re-use
+ // them for subsequent inline stacks to avoid a lot of duplication.
+ CodeEntry* cached_entry = GetOrInsertCachedEntry(
+ &cached_inline_entries, std::move(inline_entry));
+
+ inline_stack.push_back(
+ CodeEntryAndLineNumber{cached_entry, line_number});
+ }
+ DCHECK(!inline_stack.empty());
+ inline_stacks.emplace(inlining_id, std::move(inline_stack));
+ }
}
}
rec->entry =
NewCodeEntry(tag, GetName(shared->DebugName()),
GetName(InferScriptName(script_name, shared)), line, column,
std::move(line_table), abstract_code->InstructionStart());
- RecordInliningInfo(rec->entry, abstract_code);
+ if (!inline_stacks.empty()) {
+ rec->entry->SetInlineStacks(std::move(cached_inline_entries),
+ std::move(inline_stacks));
+ }
+
rec->entry->FillFunctionInfo(shared);
rec->instruction_size = abstract_code->InstructionSize();
DispatchCodeEvent(evt_rec);
@@ -123,7 +195,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
+void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
rec->from_instruction_start = from->InstructionStart();
@@ -131,8 +203,8 @@ void ProfilerListener::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) {
+void ProfilerListener::CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
rec->instruction_start = code->InstructionStart();
@@ -140,7 +212,7 @@ void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeDeoptEvent(Code* code, DeoptimizeKind kind,
+void ProfilerListener::CodeDeoptEvent(Code code, DeoptimizeKind kind,
Address pc, int fp_to_sp_delta) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
@@ -157,7 +229,7 @@ void ProfilerListener::CodeDeoptEvent(Code* code, DeoptimizeKind kind,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
+void ProfilerListener::GetterCallbackEvent(Name name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
@@ -167,8 +239,7 @@ void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
- String* source) {
+void ProfilerListener::RegExpCodeCreateEvent(AbstractCode code, String source) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
@@ -180,7 +251,7 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
+void ProfilerListener::SetterCallbackEvent(Name name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
@@ -190,67 +261,14 @@ void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
DispatchCodeEvent(evt_rec);
}
-Name* ProfilerListener::InferScriptName(Name* name, SharedFunctionInfo* info) {
+Name ProfilerListener::InferScriptName(Name name, SharedFunctionInfo info) {
if (name->IsString() && String::cast(name)->length()) return name;
if (!info->script()->IsScript()) return name;
- Object* source_url = Script::cast(info->script())->source_url();
+ Object source_url = Script::cast(info->script())->source_url();
return source_url->IsName() ? Name::cast(source_url) : name;
}
-void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
- AbstractCode* abstract_code) {
- if (!abstract_code->IsCode()) return;
- Code* code = abstract_code->GetCode();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizationData* deopt_input_data =
- DeoptimizationData::cast(code->deoptimization_data());
- int deopt_count = deopt_input_data->DeoptCount();
- for (int i = 0; i < deopt_count; i++) {
- int pc_offset = deopt_input_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- int translation_index = deopt_input_data->TranslationIndex(i)->value();
- TranslationIterator it(deopt_input_data->TranslationByteArray(),
- translation_index);
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- int depth = 0;
- std::vector<std::unique_ptr<CodeEntry>> inline_stack;
- while (it.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(it.Next()))) {
- if (opcode != Translation::INTERPRETED_FRAME) {
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- continue;
- }
- it.Next(); // Skip ast_id
- int shared_info_id = it.Next();
- it.Next(); // Skip height
- SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
- deopt_input_data->LiteralArray()->get(shared_info_id));
- if (!depth++) continue; // Skip the current function itself.
-
- const char* resource_name =
- (shared_info->script()->IsScript() &&
- Script::cast(shared_info->script())->name()->IsName())
- ? GetName(Name::cast(Script::cast(shared_info->script())->name()))
- : CodeEntry::kEmptyResourceName;
-
- CodeEntry* inline_entry =
- new CodeEntry(entry->tag(), GetName(shared_info->DebugName()),
- resource_name, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
- inline_entry->FillFunctionInfo(shared_info);
- inline_stack.emplace_back(inline_entry);
- }
- if (!inline_stack.empty()) {
- entry->AddInlineStack(pc_offset, std::move(inline_stack));
- }
- }
-}
-
-void ProfilerListener::AttachDeoptInlinedFrames(Code* code,
+void ProfilerListener::AttachDeoptInlinedFrames(Code code,
CodeDeoptEventRecord* rec) {
int deopt_id = rec->deopt_id;
SourcePosition last_position = SourcePosition::Unknown();
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index 51fba18a60..6bd794df70 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -28,30 +28,30 @@ class ProfilerListener : public CodeEventListener {
ProfilerListener(Isolate*, CodeEventObserver*);
~ProfilerListener() override;
- void CallbackEvent(Name* name, Address entry_point) override;
+ void CallbackEvent(Name name, Address entry_point) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, const char* comment) override;
+ AbstractCode code, const char* comment) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, Name* name) override;
+ AbstractCode code, Name name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* script_name) override;
+ AbstractCode code, SharedFunctionInfo shared,
+ Name script_name) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- AbstractCode* code, SharedFunctionInfo* shared,
- Name* script_name, int line, int column) override;
+ AbstractCode code, SharedFunctionInfo shared,
+ Name script_name, int line, int column) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
const wasm::WasmCode* code,
wasm::WasmName name) override;
void CodeMovingGCEvent() override {}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override;
- void CodeDeoptEvent(Code* code, DeoptimizeKind kind, Address pc,
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override;
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta) override;
- void GetterCallbackEvent(Name* name, Address entry_point) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void SetterCallbackEvent(Name* name, Address entry_point) override;
+ void GetterCallbackEvent(Name name, Address entry_point) override;
+ void RegExpCodeCreateEvent(AbstractCode code, String source) override;
+ void SetterCallbackEvent(Name name, Address entry_point) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
CodeEntry* NewCodeEntry(
@@ -62,7 +62,7 @@ class ProfilerListener : public CodeEventListener {
std::unique_ptr<SourcePositionTable> line_info = nullptr,
Address instruction_start = kNullAddress);
- const char* GetName(Name* name) {
+ const char* GetName(Name name) {
return function_and_resource_names_.GetName(name);
}
const char* GetName(int args_count) {
@@ -71,14 +71,13 @@ class ProfilerListener : public CodeEventListener {
const char* GetName(const char* name) {
return function_and_resource_names_.GetCopy(name);
}
- const char* GetConsName(const char* prefix, Name* name) {
+ const char* GetConsName(const char* prefix, Name name) {
return function_and_resource_names_.GetConsName(prefix, name);
}
private:
- void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
- void AttachDeoptInlinedFrames(Code* code, CodeDeoptEventRecord* rec);
- Name* InferScriptName(Name* name, SharedFunctionInfo* info);
+ void AttachDeoptInlinedFrames(Code code, CodeDeoptEventRecord* rec);
+ Name InferScriptName(Name name, SharedFunctionInfo info);
V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
observer_->CodeEventHandler(evt_rec);
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 2e07135d85..3e158544fd 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -31,8 +31,8 @@ intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
}
double u = random_->NextDouble();
double next = (-base::ieee754::log(u)) * rate;
- return next < kPointerSize
- ? kPointerSize
+ return next < kTaggedSize
+ ? kTaggedSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
}
@@ -44,7 +44,7 @@ intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
// approximate the true number of allocations with size *size* given that
// *count* samples were observed.
v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
- size_t size, unsigned int count) {
+ size_t size, unsigned int count) const {
double scale = 1.0 / (1.0 - std::exp(-static_cast<double>(size) / rate_));
// Round count instead of truncating.
return {size, static_cast<unsigned int>(count * scale + 0.5)};
@@ -62,40 +62,39 @@ SamplingHeapProfiler::SamplingHeapProfiler(
heap_, static_cast<intptr_t>(rate), rate, this,
heap->isolate()->random_number_generator())),
names_(names),
- profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
+ profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0,
+ next_node_id()),
stack_depth_(stack_depth),
rate_(rate),
flags_(flags) {
CHECK_GT(rate_, 0u);
-
heap_->AddAllocationObserversToAllSpaces(other_spaces_observer_.get(),
new_space_observer_.get());
}
-
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
new_space_observer_.get());
}
-
void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate_);
- HeapObject* heap_object = HeapObject::FromAddress(soon_object);
+ HeapObject heap_object = HeapObject::FromAddress(soon_object);
Handle<Object> obj(heap_object, isolate_);
// Mark the new block as FreeSpace to make sure the heap is iterable while we
// are taking the sample.
- heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size),
- ClearRecordedSlots::kNo);
+ heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
Local<v8::Value> loc = v8::Utils::ToLocal(obj);
AllocationNode* node = AddStack();
node->allocations_[size]++;
- auto sample = base::make_unique<Sample>(size, node, loc, this);
+ auto sample =
+ base::make_unique<Sample>(size, node, loc, this, next_sample_id());
sample->global.SetWeak(sample.get(), OnWeakCallback,
WeakCallbackType::kParameter);
#if __clang__
@@ -132,25 +131,25 @@ void SamplingHeapProfiler::OnWeakCallback(
// sample is deleted because its unique ptr was erased from samples_.
}
-SamplingHeapProfiler::AllocationNode*
-SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
- int script_id,
- int start_position) {
- FunctionId id = function_id(script_id, start_position, name);
- auto it = children_.find(id);
- if (it != children_.end()) {
- DCHECK_EQ(strcmp(it->second->name_, name), 0);
- return it->second.get();
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
+ AllocationNode* parent, const char* name, int script_id,
+ int start_position) {
+ AllocationNode::FunctionId id =
+ AllocationNode::function_id(script_id, start_position, name);
+ AllocationNode* child = parent->FindChildNode(id);
+ if (child) {
+ DCHECK_EQ(strcmp(child->name_, name), 0);
+ return child;
}
- auto child =
- base::make_unique<AllocationNode>(this, name, script_id, start_position);
- return children_.emplace(id, std::move(child)).first->second.get();
+ auto new_child = base::make_unique<AllocationNode>(
+ parent, name, script_id, start_position, next_node_id());
+ return parent->AddChildNode(id, std::move(new_child));
}
SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
AllocationNode* node = &profile_root_;
- std::vector<SharedFunctionInfo*> stack;
+ std::vector<SharedFunctionInfo> stack;
JavaScriptFrameIterator it(isolate_);
int frames_captured = 0;
bool found_arguments_marker_frames = false;
@@ -162,7 +161,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
// in the top frames of the stack). The allocations made in this
// sensitive moment belong to the formerly optimized frame anyway.
if (frame->unchecked_function()->IsJSFunction()) {
- SharedFunctionInfo* shared = frame->function()->shared();
+ SharedFunctionInfo shared = frame->function()->shared();
stack.push_back(shared);
frames_captured++;
} else {
@@ -199,25 +198,25 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
name = "(JS)";
break;
}
- return node->FindOrAddChildNode(name, v8::UnboundScript::kNoScriptId, 0);
+ return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
}
// We need to process the stack in reverse order as the top of the stack is
// the first element in the list.
for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
- SharedFunctionInfo* shared = *it;
+ SharedFunctionInfo shared = *it;
const char* name = this->names()->GetName(shared->DebugName());
int script_id = v8::UnboundScript::kNoScriptId;
if (shared->script()->IsScript()) {
- Script* script = Script::cast(shared->script());
+ Script script = Script::cast(shared->script());
script_id = script->id();
}
- node = node->FindOrAddChildNode(name, script_id, shared->StartPosition());
+ node = FindOrAddChildNode(node, name, script_id, shared->StartPosition());
}
if (found_arguments_marker_frames) {
node =
- node->FindOrAddChildNode("(deopt)", v8::UnboundScript::kNoScriptId, 0);
+ FindOrAddChildNode(node, "(deopt)", v8::UnboundScript::kNoScriptId, 0);
}
return node;
@@ -243,7 +242,7 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
Handle<Script> script = non_const_scripts[node->script_id_];
if (!script.is_null()) {
if (script->name()->IsName()) {
- Name* name = Name::cast(script->name());
+ Name name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
}
@@ -255,12 +254,12 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
allocations.push_back(ScaleSample(alloc.first, alloc.second));
}
- profile->nodes().push_back(v8::AllocationProfile::Node{
+ profile->nodes_.push_back(v8::AllocationProfile::Node{
ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(node->name_)),
script_name, node->script_id_, node->script_position_, line, column,
- std::vector<v8::AllocationProfile::Node*>(), allocations});
- v8::AllocationProfile::Node* current = &profile->nodes().back();
+ node->id_, std::vector<v8::AllocationProfile::Node*>(), allocations});
+ v8::AllocationProfile::Node* current = &profile->nodes_.back();
// The |children_| map may have nodes inserted into it during translation
// because the translation may allocate strings on the JS heap that have
// the potential to be sampled. That's ok since map iterators are not
@@ -283,14 +282,30 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
std::map<int, Handle<Script>> scripts;
{
Script::Iterator iterator(isolate_);
- while (Script* script = iterator.Next()) {
+ for (Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
scripts[script->id()] = handle(script, isolate_);
}
}
auto profile = new v8::internal::AllocationProfile();
TranslateAllocationNode(profile, &profile_root_, scripts);
+ profile->samples_ = SamplingHeapProfiler::BuildSamples();
+
return profile;
}
+const std::vector<v8::AllocationProfile::Sample>
+SamplingHeapProfiler::BuildSamples() const {
+ std::vector<v8::AllocationProfile::Sample> samples;
+ samples.reserve(samples_.size());
+ for (const auto& it : samples_) {
+ const Sample* sample = it.second.get();
+ samples.emplace_back(v8::AllocationProfile::Sample{
+ sample->owner->id_, sample->size, ScaleSample(sample->size, 1).count,
+ sample->sample_id});
+ }
+ return samples;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 072c5eb677..818c33581d 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -25,62 +25,48 @@ class SamplingAllocationObserver;
class AllocationProfile : public v8::AllocationProfile {
public:
- AllocationProfile() : nodes_() {}
+ AllocationProfile() = default;
v8::AllocationProfile::Node* GetRootNode() override {
return nodes_.size() == 0 ? nullptr : &nodes_.front();
}
- std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
+ const std::vector<v8::AllocationProfile::Sample>& GetSamples() override {
+ return samples_;
+ }
private:
std::deque<v8::AllocationProfile::Node> nodes_;
+ std::vector<v8::AllocationProfile::Sample> samples_;
+
+ friend class SamplingHeapProfiler;
DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
};
class SamplingHeapProfiler {
public:
- SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
- int stack_depth, v8::HeapProfiler::SamplingFlags flags);
- ~SamplingHeapProfiler();
-
- v8::AllocationProfile* GetAllocationProfile();
-
- StringsStorage* names() const { return names_; }
-
- class AllocationNode;
-
- struct Sample {
- public:
- Sample(size_t size_, AllocationNode* owner_, Local<Value> local_,
- SamplingHeapProfiler* profiler_)
- : size(size_),
- owner(owner_),
- global(Global<Value>(
- reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
- profiler(profiler_) {}
- ~Sample() { global.Reset(); }
- const size_t size;
- AllocationNode* const owner;
- Global<Value> global;
- SamplingHeapProfiler* const profiler;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Sample);
- };
-
class AllocationNode {
public:
+ typedef uint64_t FunctionId;
AllocationNode(AllocationNode* parent, const char* name, int script_id,
- int start_position)
+ int start_position, uint32_t id)
: parent_(parent),
script_id_(script_id),
script_position_(start_position),
- name_(name) {}
+ name_(name),
+ id_(id) {}
+
+ AllocationNode* FindChildNode(FunctionId id) {
+ auto it = children_.find(id);
+ return it != children_.end() ? it->second.get() : nullptr;
+ }
+
+ AllocationNode* AddChildNode(FunctionId id,
+ std::unique_ptr<AllocationNode> node) {
+ return children_.emplace(id, std::move(node)).first->second.get();
+ }
- private:
- typedef uint64_t FunctionId;
static FunctionId function_id(int script_id, int start_position,
const char* name) {
// script_id == kNoScriptId case:
@@ -96,8 +82,8 @@ class SamplingHeapProfiler {
DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
}
- AllocationNode* FindOrAddChildNode(const char* name, int script_id,
- int start_position);
+
+ private:
// TODO(alph): make use of unordered_map's here. Pay attention to
// iterator invalidation during TranslateAllocationNode.
std::map<size_t, unsigned int> allocations_;
@@ -106,6 +92,7 @@ class SamplingHeapProfiler {
const int script_id_;
const int script_position_;
const char* const name_;
+ uint32_t id_;
bool pinned_ = false;
friend class SamplingHeapProfiler;
@@ -113,13 +100,45 @@ class SamplingHeapProfiler {
DISALLOW_COPY_AND_ASSIGN(AllocationNode);
};
- private:
- Heap* heap() const { return heap_; }
+ struct Sample {
+ Sample(size_t size_, AllocationNode* owner_, Local<Value> local_,
+ SamplingHeapProfiler* profiler_, uint64_t sample_id)
+ : size(size_),
+ owner(owner_),
+ global(Global<Value>(
+ reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
+ profiler(profiler_),
+ sample_id(sample_id) {}
+ ~Sample() { global.Reset(); }
+ const size_t size;
+ AllocationNode* const owner;
+ Global<Value> global;
+ SamplingHeapProfiler* const profiler;
+ const uint64_t sample_id;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Sample);
+ };
+ SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
+ int stack_depth, v8::HeapProfiler::SamplingFlags flags);
+ ~SamplingHeapProfiler();
+
+ v8::AllocationProfile* GetAllocationProfile();
+ StringsStorage* names() const { return names_; }
+
+ private:
void SampleObject(Address soon_object, size_t size);
+ const std::vector<v8::AllocationProfile::Sample> BuildSamples() const;
+
+ AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
+ int script_id, int start_position);
static void OnWeakCallback(const WeakCallbackInfo<Sample>& data);
+ uint32_t next_node_id() { return ++last_node_id_; }
+ uint64_t next_sample_id() { return ++last_sample_id_; }
+
// Methods that construct v8::AllocationProfile.
// Translates the provided AllocationNode *node* returning an equivalent
@@ -131,11 +150,13 @@ class SamplingHeapProfiler {
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
const std::map<int, Handle<Script>>& scripts);
v8::AllocationProfile::Allocation ScaleSample(size_t size,
- unsigned int count);
+ unsigned int count) const;
AllocationNode* AddStack();
Isolate* const isolate_;
Heap* const heap_;
+ uint64_t last_sample_id_ = 0;
+ uint32_t last_node_id_ = 0;
std::unique_ptr<SamplingAllocationObserver> new_space_observer_;
std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 9a5a006ff4..04a2379707 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -69,9 +69,9 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
return AddOrDisposeString(str.start(), len);
}
-const char* StringsStorage::GetName(Name* name) {
+const char* StringsStorage::GetName(Name name) {
if (name->IsString()) {
- String* str = String::cast(name);
+ String str = String::cast(name);
int length = Min(FLAG_heap_snapshot_string_limit, str->length());
int actual_length = 0;
std::unique_ptr<char[]> data = str->ToCString(
@@ -87,9 +87,9 @@ const char* StringsStorage::GetName(int index) {
return GetFormatted("%d", index);
}
-const char* StringsStorage::GetConsName(const char* prefix, Name* name) {
+const char* StringsStorage::GetConsName(const char* prefix, Name name) {
if (name->IsString()) {
- String* str = String::cast(name);
+ String str = String::cast(name);
int length = Min(FLAG_heap_snapshot_string_limit, str->length());
int actual_length = 0;
std::unique_ptr<char[]> data = str->ToCString(
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 5c0f8afd93..9b56a6e412 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -29,12 +29,12 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Returns a formatted string, de-duplicated via the storage.
PRINTF_FORMAT(2, 3) const char* GetFormatted(const char* format, ...);
// Returns a stored string resulting from name, or "<symbol>" for a symbol.
- const char* GetName(Name* name);
+ const char* GetName(Name name);
// Returns the string representation of the int from the store.
const char* GetName(int index);
// Appends string resulting from name to prefix, then returns the stored
// result.
- const char* GetConsName(const char* prefix, Name* name);
+ const char* GetConsName(const char* prefix, Name name);
private:
static bool StringsMatch(void* key1, void* key2);
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 69a6bbf778..501dbd63a8 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -255,17 +255,16 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// bytecode_array might be garbage, so don't actually dereference it. We
// avoid the frame->GetXXX functions since they call BytecodeArray::cast,
// which has a heap access in its DCHECK.
- i::Object* bytecode_array = i::Memory<i::Object*>(
+ i::Address bytecode_array = i::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeArrayFromFp);
- i::Object* bytecode_offset = i::Memory<i::Object*>(
+ i::Address bytecode_offset = i::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeOffsetFromFp);
// If the bytecode array is a heap object and the bytecode offset is a
// Smi, use those, otherwise fall back to using the frame's pc.
if (HAS_HEAP_OBJECT_TAG(bytecode_array) && HAS_SMI_TAG(bytecode_offset)) {
frames[i++] = reinterpret_cast<void*>(
- reinterpret_cast<i::Address>(bytecode_array) +
- i::Internals::SmiValue(bytecode_offset));
+ bytecode_array + i::Internals::SmiValue(bytecode_offset));
continue;
}
}
@@ -287,5 +286,20 @@ void TickSample::Init(Isolate* isolate, const v8::RegisterState& state,
timestamp = base::TimeTicks::HighResolutionNow();
}
+void TickSample::print() const {
+ PrintF("TickSample: at %p\n", this);
+ PrintF(" - state: %s\n", StateToString(state));
+ PrintF(" - pc: %p\n", pc);
+ PrintF(" - stack: (%u frames)\n", frames_count);
+ for (unsigned i = 0; i < frames_count; i++) {
+ PrintF(" %p\n", stack[i]);
+ }
+ PrintF(" - has_external_callback: %d\n", has_external_callback);
+ PrintF(" - %s: %p\n",
+ has_external_callback ? "external_callback_entry" : "tos", tos);
+ PrintF(" - update_stats: %d\n", update_stats);
+ PrintF("\n");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index 819b862388..ea66010632 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -19,6 +19,8 @@ struct TickSample : public v8::TickSample {
RecordCEntryFrame record_c_entry_frame, bool update_stats,
bool use_simulator_reg_state = true);
base::TimeTicks timestamp;
+
+ void print() const;
};
} // namespace internal
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index 875478139d..0cb502bdf1 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -13,10 +13,6 @@ namespace internal {
TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
: isolate_(isolate), profiling_enabled_(false) {
- // Make sure tracing system notices profiler categories.
- TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
- TRACE_EVENT_WARMUP_CATEGORY(
- TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"));
V8::GetCurrentPlatform()->GetTracingController()->AddTraceStateObserver(this);
}
@@ -40,7 +36,7 @@ void TracingCpuProfilerImpl::OnTraceEnabled() {
}
void TracingCpuProfilerImpl::OnTraceDisabled() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (!profiling_enabled_) return;
profiling_enabled_ = false;
isolate_->RequestInterrupt(
@@ -51,7 +47,7 @@ void TracingCpuProfilerImpl::OnTraceDisabled() {
}
void TracingCpuProfilerImpl::StartProfiling() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (!profiling_enabled_ || profiler_) return;
bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
@@ -64,7 +60,7 @@ void TracingCpuProfilerImpl::StartProfiling() {
}
void TracingCpuProfilerImpl::StopProfiling() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
if (!profiler_) return;
profiler_->StopProfiling("");
profiler_.reset();
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 902759a168..4947bfb99f 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -43,7 +43,7 @@ bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDescriptor* desc) {
if (!obj->IsJSObject()) return false;
- Map* map = Handle<JSObject>::cast(obj)->map();
+ Map map = Handle<JSObject>::cast(obj)->map();
if (map->instance_type() != JS_OBJECT_TYPE) return false;
if (map->is_access_check_needed()) return false;
if (map->prototype() != *isolate->initial_object_prototype()) return false;
@@ -60,7 +60,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
Handle<DescriptorArray>(map->instance_descriptors(), isolate);
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
- Name* key = descs->GetKey(i);
+ Name key = descs->GetKey(i);
Handle<Object> value;
if (details.location() == kField) {
if (details.kind() == kData) {
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 1e953001eb..9fefb45afc 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -154,16 +154,13 @@ class Representation {
int size() const {
DCHECK(!IsNone());
- if (IsInteger8() || IsUInteger8()) {
- return sizeof(uint8_t);
- }
- if (IsInteger16() || IsUInteger16()) {
- return sizeof(uint16_t);
- }
- if (IsInteger32()) {
- return sizeof(uint32_t);
- }
- return kPointerSize;
+ if (IsInteger8() || IsUInteger8()) return kUInt8Size;
+ if (IsInteger16() || IsUInteger16()) return kUInt16Size;
+ if (IsInteger32()) return kInt32Size;
+ if (IsDouble()) return kDoubleSize;
+ if (IsExternal()) return kSystemPointerSize;
+ DCHECK(IsTagged() || IsSmi() || IsHeapObject());
+ return kTaggedSize;
}
Kind kind() const { return static_cast<Kind>(kind_); }
@@ -287,9 +284,9 @@ class PropertyDetails {
return PropertyDetails(value_, new_attributes);
}
- // Conversion for storing details as Object*.
- explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi() const;
+ // Conversion for storing details as Object.
+ explicit inline PropertyDetails(Smi smi);
+ inline Smi AsSmi() const;
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -307,6 +304,11 @@ class PropertyDetails {
return AttributesField::decode(value_);
}
+ bool HasKindAndAttributes(PropertyKind kind, PropertyAttributes attributes) {
+ return (value_ & (KindField::kMask | AttributesField::kMask)) ==
+ (KindField::encode(kind) | AttributesField::encode(attributes));
+ }
+
int dictionary_index() const {
return DictionaryStorageField::decode(value_);
}
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 5f41948cfd..064f329fc0 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -8,6 +8,7 @@
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/objects/name-inl.h"
+#include "src/objects/smi.h"
#include "src/ostreams.h"
namespace v8 {
@@ -23,7 +24,7 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-Descriptor::Descriptor() : details_(Smi::kZero) {}
+Descriptor::Descriptor() : details_(Smi::zero()) {}
Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
PropertyKind kind, PropertyAttributes attributes,
diff --git a/deps/v8/src/prototype-inl.h b/deps/v8/src/prototype-inl.h
index 820d5756f1..8f5dedda71 100644
--- a/deps/v8/src/prototype-inl.h
+++ b/deps/v8/src/prototype-inl.h
@@ -18,7 +18,6 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate,
WhereToStart where_to_start,
WhereToEnd where_to_end)
: isolate_(isolate),
- object_(nullptr),
handle_(receiver),
where_to_end_(where_to_end),
is_at_end_(false),
@@ -27,7 +26,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate,
if (where_to_start == kStartAtPrototype) Advance();
}
-PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver receiver,
WhereToStart where_to_start,
WhereToEnd where_to_end)
: isolate_(isolate),
@@ -38,7 +37,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
if (where_to_start == kStartAtPrototype) Advance();
}
-PrototypeIterator::PrototypeIterator(Isolate* isolate, Map* receiver_map,
+PrototypeIterator::PrototypeIterator(Isolate* isolate, Map receiver_map,
WhereToEnd where_to_end)
: isolate_(isolate),
object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
@@ -47,7 +46,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Map* receiver_map,
seen_proxies_(0) {
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(object_->IsJSReceiver());
- Map* map = JSReceiver::cast(object_)->map();
+ Map map = JSReceiver::cast(object_)->map();
is_at_end_ = !map->has_hidden_prototype();
}
}
@@ -55,7 +54,6 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Map* receiver_map,
PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
WhereToEnd where_to_end)
: isolate_(isolate),
- object_(nullptr),
handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
isolate_),
where_to_end_(where_to_end),
@@ -63,7 +61,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
seen_proxies_(0) {
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(handle_->IsJSReceiver());
- Map* map = JSReceiver::cast(*handle_)->map();
+ Map map = JSReceiver::cast(*handle_)->map();
is_at_end_ = !map->has_hidden_prototype();
}
}
@@ -93,10 +91,10 @@ void PrototypeIterator::Advance() {
}
void PrototypeIterator::AdvanceIgnoringProxies() {
- Object* object = handle_.is_null() ? object_ : *handle_;
- Map* map = HeapObject::cast(object)->map();
+ Object object = handle_.is_null() ? object_ : *handle_;
+ Map map = HeapObject::cast(object)->map();
- Object* prototype = map->prototype();
+ Object prototype = map->prototype();
is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map->has_hidden_prototype()
: prototype->IsNull(isolate_);
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index e8fe06ac44..8b7edfaf0a 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -31,11 +31,11 @@ class PrototypeIterator {
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL);
- inline PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+ inline PrototypeIterator(Isolate* isolate, JSReceiver receiver,
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL);
- inline explicit PrototypeIterator(Isolate* isolate, Map* receiver_map,
+ inline explicit PrototypeIterator(Isolate* isolate, Map receiver_map,
WhereToEnd where_to_end = END_AT_NULL);
inline explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
@@ -46,7 +46,7 @@ class PrototypeIterator {
inline bool HasAccess() const;
template <typename T = Object>
- T* GetCurrent() const {
+ T GetCurrent() const {
DCHECK(handle_.is_null());
return T::cast(object_);
}
@@ -54,7 +54,7 @@ class PrototypeIterator {
template <typename T = Object>
static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
DCHECK(!iterator.handle_.is_null());
- DCHECK_NULL(iterator.object_);
+ DCHECK_EQ(iterator.object_, Object());
return Handle<T>::cast(iterator.handle_);
}
@@ -73,7 +73,7 @@ class PrototypeIterator {
private:
Isolate* isolate_;
- Object* object_;
+ Object object_;
Handle<Object> handle_;
WhereToEnd where_to_end_;
bool is_at_end_;
diff --git a/deps/v8/src/ptr-compr-inl.h b/deps/v8/src/ptr-compr-inl.h
new file mode 100644
index 0000000000..2acb04fb06
--- /dev/null
+++ b/deps/v8/src/ptr-compr-inl.h
@@ -0,0 +1,243 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PTR_COMPR_INL_H_
+#define V8_PTR_COMPR_INL_H_
+
+#if V8_TARGET_ARCH_64_BIT
+
+#include "src/objects/heap-object-inl.h"
+#include "src/ptr-compr.h"
+
+namespace v8 {
+namespace internal {
+
+// Compresses full-pointer representation of a tagged value to on-heap
+// representation.
+V8_INLINE Tagged_t CompressTagged(Address tagged) {
+ // The compression is no-op while we are using checked decompression.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ // TODO(ishell): implement once kTaggedSize is equal to kInt32Size.
+ return tagged;
+}
+
+// Calculates isolate root value from any on-heap address.
+V8_INLINE Address GetRootFromOnHeapAddress(Address addr) {
+ return RoundDown(addr + kPtrComprIsolateRootBias,
+ kPtrComprIsolateRootAlignment);
+}
+
+// Decompresses weak or strong heap object pointer or forwarding pointer,
+// preserving both weak- and smi- tags.
+V8_INLINE Address DecompressTaggedPointerImpl(Address on_heap_addr,
+ int32_t value) {
+ Address root = GetRootFromOnHeapAddress(on_heap_addr);
+ // Current compression scheme requires value to be sign-extended to inptr_t
+ // before adding the |root|.
+ return root + static_cast<Address>(static_cast<intptr_t>(value));
+}
+
+// Decompresses weak or strong heap object pointer or forwarding pointer,
+// preserving both weak- and smi- tags and checks that the result of
+// decompression matches full value stored in the field.
+// Checked decompression helps to find misuses of XxxSlots and FullXxxSlots.
+// TODO(ishell): remove in favour of DecompressTaggedPointerImpl() once
+// kTaggedSize is equal to kInt32Size.
+V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
+ Tagged_t full_value) {
+ // Use only lower 32-bits of the value for decompression.
+ int32_t compressed = static_cast<int32_t>(full_value);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Address result = DecompressTaggedPointerImpl(on_heap_addr, compressed);
+#ifdef DEBUG
+ if (full_value != result) {
+ base::OS::DebugBreak();
+ result = DecompressTaggedPointerImpl(on_heap_addr, compressed);
+ }
+#endif
+ DCHECK_EQ(full_value, result);
+ return result;
+}
+
+// Decompresses any tagged value, preserving both weak- and smi- tags.
+V8_INLINE Address DecompressTaggedAnyImpl(Address on_heap_addr, int32_t value) {
+ // |root_mask| is 0 if the |value| was a smi or -1 otherwise.
+ Address root_mask = -static_cast<Address>(value & kSmiTagMask);
+ Address root_or_zero = root_mask & GetRootFromOnHeapAddress(on_heap_addr);
+ // Current compression scheme requires value to be sign-extended to inptr_t
+ // before adding the |root_or_zero|.
+ return root_or_zero + static_cast<Address>(static_cast<intptr_t>(value));
+}
+
+// Decompresses any tagged value, preserving both weak- and smi- tags and checks
+// that the result of decompression matches full value stored in the field.
+// Checked decompression helps to find misuses of XxxSlots and FullXxxSlots.
+// TODO(ishell): remove in favour of DecompressTaggedAnyImpl() once
+// kTaggedSize is equal to kInt32Size.
+V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
+ Tagged_t full_value) {
+ // Use only lower 32-bits of the value for decompression.
+ int32_t compressed = static_cast<int32_t>(full_value);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Address result = DecompressTaggedAnyImpl(on_heap_addr, compressed);
+#ifdef DEBUG
+ if (full_value != result) {
+ base::OS::DebugBreak();
+ result = DecompressTaggedAnyImpl(on_heap_addr, compressed);
+ }
+#endif
+ DCHECK_EQ(full_value, result);
+ return result;
+}
+
+//
+// CompressedObjectSlot implementation.
+//
+
+CompressedObjectSlot::CompressedObjectSlot(Object* object)
+ : SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
+
+Object CompressedObjectSlot::operator*() const {
+ Tagged_t value = *location();
+ return Object(DecompressTaggedAny(address(), value));
+}
+
+void CompressedObjectSlot::store(Object value) const {
+ *location() = CompressTagged(value->ptr());
+}
+
+Object CompressedObjectSlot::Acquire_Load() const {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
+ return Object(DecompressTaggedAny(address(), value));
+}
+
+Object CompressedObjectSlot::Relaxed_Load() const {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return Object(DecompressTaggedAny(address(), value));
+}
+
+void CompressedObjectSlot::Relaxed_Store(Object value) const {
+ Tagged_t ptr = CompressTagged(value->ptr());
+ AsAtomicTagged::Relaxed_Store(location(), ptr);
+}
+
+void CompressedObjectSlot::Release_Store(Object value) const {
+ Tagged_t ptr = CompressTagged(value->ptr());
+ AsAtomicTagged::Release_Store(location(), ptr);
+}
+
+Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
+ Object target) const {
+ Tagged_t old_ptr = CompressTagged(old->ptr());
+ Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t result =
+ AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
+ return Object(DecompressTaggedAny(address(), result));
+}
+
+//
+// CompressedMapWordSlot implementation.
+//
+
+bool CompressedMapWordSlot::contains_value(Address raw_value) const {
+ Tagged_t value = *location();
+ return value == static_cast<Tagged_t>(raw_value);
+}
+
+Object CompressedMapWordSlot::operator*() const {
+ Tagged_t value = *location();
+ return Object(DecompressTaggedPointer(address(), value));
+}
+
+void CompressedMapWordSlot::store(Object value) const {
+ *location() = CompressTagged(value.ptr());
+}
+
+Object CompressedMapWordSlot::Relaxed_Load() const {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return Object(DecompressTaggedPointer(address(), value));
+}
+
+void CompressedMapWordSlot::Relaxed_Store(Object value) const {
+ Tagged_t ptr = CompressTagged(value.ptr());
+ AsAtomicTagged::Relaxed_Store(location(), ptr);
+}
+
+Object CompressedMapWordSlot::Acquire_Load() const {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
+ return Object(DecompressTaggedPointer(address(), value));
+}
+
+void CompressedMapWordSlot::Release_Store(Object value) const {
+ Tagged_t ptr = CompressTagged(value->ptr());
+ AsAtomicTagged::Release_Store(location(), ptr);
+}
+
+Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
+ Object target) const {
+ Tagged_t old_ptr = CompressTagged(old->ptr());
+ Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t result =
+ AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
+ return Object(DecompressTaggedPointer(address(), result));
+}
+
+//
+// CompressedMaybeObjectSlot implementation.
+//
+
+MaybeObject CompressedMaybeObjectSlot::operator*() const {
+ Tagged_t value = *location();
+ return MaybeObject(DecompressTaggedAny(address(), value));
+}
+
+void CompressedMaybeObjectSlot::store(MaybeObject value) const {
+ *location() = CompressTagged(value->ptr());
+}
+
+MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return MaybeObject(DecompressTaggedAny(address(), value));
+}
+
+void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
+ Tagged_t ptr = CompressTagged(value->ptr());
+ AsAtomicTagged::Relaxed_Store(location(), ptr);
+}
+
+void CompressedMaybeObjectSlot::Release_CompareAndSwap(
+ MaybeObject old, MaybeObject target) const {
+ Tagged_t old_ptr = CompressTagged(old->ptr());
+ Tagged_t target_ptr = CompressTagged(target->ptr());
+ AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
+}
+
+//
+// CompressedHeapObjectSlot implementation.
+//
+
+HeapObjectReference CompressedHeapObjectSlot::operator*() const {
+ Tagged_t value = *location();
+ return HeapObjectReference(DecompressTaggedPointer(address(), value));
+}
+
+void CompressedHeapObjectSlot::store(HeapObjectReference value) const {
+ *location() = CompressTagged(value.ptr());
+}
+
+HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
+ DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
+ return HeapObject::cast(Object(*location()));
+}
+
+void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
+ *location() = value->ptr();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_64_BIT
+
+#endif // V8_PTR_COMPR_INL_H_
diff --git a/deps/v8/src/ptr-compr.h b/deps/v8/src/ptr-compr.h
new file mode 100644
index 0000000000..930a80ccec
--- /dev/null
+++ b/deps/v8/src/ptr-compr.h
@@ -0,0 +1,145 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PTR_COMPR_H_
+#define V8_PTR_COMPR_H_
+
+#if V8_TARGET_ARCH_64_BIT
+
+#include "src/globals.h"
+#include "src/objects/slots.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
+constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2;
+constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
+
+// A CompressedObjectSlot instance describes a kTaggedSize-sized field ("slot")
+// holding a compressed tagged pointer (smi or heap object).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+class CompressedObjectSlot
+ : public SlotBase<CompressedObjectSlot, Tagged_t, kTaggedSize> {
+ public:
+ using TObject = Object;
+ using THeapObjectSlot = CompressedHeapObjectSlot;
+
+ static constexpr bool kCanBeWeak = false;
+
+ CompressedObjectSlot() : SlotBase(kNullAddress) {}
+ explicit CompressedObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit CompressedObjectSlot(Address* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ inline explicit CompressedObjectSlot(Object* object);
+ explicit CompressedObjectSlot(Object const* const* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ template <typename T>
+ explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ inline Object operator*() const;
+ inline void store(Object value) const;
+
+ inline Object Acquire_Load() const;
+ inline Object Relaxed_Load() const;
+ inline void Relaxed_Store(Object value) const;
+ inline void Release_Store(Object value) const;
+ inline Object Release_CompareAndSwap(Object old, Object target) const;
+};
+
+// A CompressedMapWordSlot instance describes a kTaggedSize-sized map-word field
+// ("slot") of heap objects holding a compressed tagged pointer or a Smi
+// representing forwaring pointer value.
+// This slot kind is similar to CompressedObjectSlot but decompression of
+// forwarding pointer is different.
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+class CompressedMapWordSlot
+ : public SlotBase<CompressedMapWordSlot, Tagged_t, kTaggedSize> {
+ public:
+ using TObject = Object;
+
+ static constexpr bool kCanBeWeak = false;
+
+ CompressedMapWordSlot() : SlotBase(kNullAddress) {}
+ explicit CompressedMapWordSlot(Address ptr) : SlotBase(ptr) {}
+
+ // Compares memory representation of a value stored in the slot with given
+ // raw value without decompression.
+ inline bool contains_value(Address raw_value) const;
+
+ inline Object operator*() const;
+ inline void store(Object value) const;
+
+ inline Object Relaxed_Load() const;
+ inline void Relaxed_Store(Object value) const;
+
+ inline Object Acquire_Load() const;
+ inline void Release_Store(Object value) const;
+ inline Object Release_CompareAndSwap(Object old, Object target) const;
+};
+
+// A CompressedMaybeObjectSlot instance describes a kTaggedSize-sized field
+// ("slot") holding a possibly-weak compressed tagged pointer
+// (think: MaybeObject).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+class CompressedMaybeObjectSlot
+ : public SlotBase<CompressedMaybeObjectSlot, Tagged_t, kTaggedSize> {
+ public:
+ using TObject = MaybeObject;
+ using THeapObjectSlot = CompressedHeapObjectSlot;
+
+ static constexpr bool kCanBeWeak = true;
+
+ CompressedMaybeObjectSlot() : SlotBase(kNullAddress) {}
+ explicit CompressedMaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit CompressedMaybeObjectSlot(Object* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ template <typename T>
+ explicit CompressedMaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ inline MaybeObject operator*() const;
+ inline void store(MaybeObject value) const;
+
+ inline MaybeObject Relaxed_Load() const;
+ inline void Relaxed_Store(MaybeObject value) const;
+ inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
+};
+
+// A CompressedHeapObjectSlot instance describes a kTaggedSize-sized field
+// ("slot") holding a weak or strong compressed pointer to a heap object (think:
+// HeapObjectReference).
+// Its address() is the address of the slot.
+// The slot's contents can be read and written using operator* and store().
+// In case it is known that that slot contains a strong heap object pointer,
+// ToHeapObject() can be used to retrieve that heap object.
+class CompressedHeapObjectSlot
+ : public SlotBase<CompressedHeapObjectSlot, Tagged_t, kTaggedSize> {
+ public:
+ CompressedHeapObjectSlot() : SlotBase(kNullAddress) {}
+ explicit CompressedHeapObjectSlot(Address ptr) : SlotBase(ptr) {}
+ explicit CompressedHeapObjectSlot(Object* ptr)
+ : SlotBase(reinterpret_cast<Address>(ptr)) {}
+ template <typename T>
+ explicit CompressedHeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
+ : SlotBase(slot.address()) {}
+
+ inline HeapObjectReference operator*() const;
+ inline void store(HeapObjectReference value) const;
+
+ inline HeapObject ToHeapObject() const;
+
+ inline void StoreHeapObject(HeapObject value) const;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_64_BIT
+
+#endif // V8_PTR_COMPR_H_
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index f77d521728..634259f8df 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -7,13 +7,13 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/heap/factory.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/unicode.h"
namespace v8 {
@@ -24,7 +24,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r5 : Pointer to current code object (Code*) including heap object tag.
+ * - r5 : Pointer to current Code object including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
@@ -76,7 +76,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -91,12 +91,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerARM::kRegExpCodeSize;
+
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -110,7 +112,6 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -150,7 +151,7 @@ void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerARM::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(r0);
__ add(pc, r0, Operand(code_pointer()));
}
@@ -1046,11 +1047,14 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
__ PrepareCallCFunction(3);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
- // Code* of self.
+ // Code of self.
__ mov(r1, Operand(masm_->CodeObject()));
// We need to make room for the return address on the stack.
@@ -1064,8 +1068,19 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, ip);
+
+ if (FLAG_embedded_builtins) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
+ __ mov(lr, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ } else {
+ // TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
+ Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
+ __ mov(lr, Operand(reinterpret_cast<intptr_t>(code.location()),
+ RelocInfo::CODE_TARGET));
+ }
+ __ Call(lr);
// Drop the return address from the stack.
__ add(sp, sp, Operand(stack_alignment));
@@ -1089,15 +1104,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<int>(re_frame, kStartIndex),
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 7c988e962f..758fe88d6b 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -90,8 +90,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
@@ -124,7 +124,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
static const int kBacktrackConstantPoolSize = 4;
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index bf68d8061e..54ad44d68a 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -7,12 +7,12 @@
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/unicode.h"
namespace v8 {
@@ -23,7 +23,7 @@ namespace internal {
* This assembler uses the following register assignment convention:
* - w19 : Used to temporarely store a value before a call to C code.
* See CheckNotBackReferenceIgnoreCase.
- * - x20 : Pointer to the current code object (Code*),
+ * - x20 : Pointer to the current Code object,
* it includes the heap object tag.
* - w21 : Current position in input, as negative offset from
* the end of the string. Please notice that this is
@@ -86,7 +86,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -101,12 +101,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerARM64::kRegExpCodeSize;
+
RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -123,7 +125,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
__ Bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -695,7 +696,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Bind(&entry_label_);
// Arguments on entry:
- // x0: String* input
+ // x0: String input
// x1: int start_offset
// x2: byte* input_start
// x3: byte* input_end
@@ -1326,14 +1327,14 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerARM64::CheckStackGuardState(
- Address* return_address, Code* re_code, Address re_frame, int start_index,
- const byte** input_start, const byte** input_end) {
+ Address* return_address, Address raw_code, Address re_frame,
+ int start_index, const byte** input_start, const byte** input_end) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate), start_index,
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInput), input_start, input_end);
+ frame_entry_address<Address>(re_frame, kInput), input_start, input_end);
}
@@ -1353,6 +1354,9 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
// Private methods:
void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
// Allocate space on the stack to store the return address. The
// CheckStackGuardState C++ function will override it if the code
// moved. Allocate extra space for 2 arguments passed by pointers.
@@ -1373,19 +1377,34 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Mov(w3, start_offset());
// RegExp code frame pointer.
__ Mov(x2, frame_pointer());
- // Code* of self.
+ // Code of self.
__ Mov(x1, Operand(masm_->CodeObject()));
// We need to pass a pointer to the return address as first argument.
- // The DirectCEntry stub will place the return address on the stack before
- // calling so the stack pointer will point to it.
+ // DirectCEntry will place the return address on the stack before calling so
+ // the stack pointer will point to it.
__ Mov(x0, sp);
+ DCHECK_EQ(scratch, x10);
ExternalReference check_stack_guard_state =
ExternalReference::re_check_stack_guard_state(isolate());
__ Mov(scratch, check_stack_guard_state);
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, scratch);
+
+ if (FLAG_embedded_builtins) {
+ UseScratchRegisterScope temps(masm_);
+ Register scratch = temps.AcquireX();
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
+
+ __ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(scratch);
+ } else {
+ // TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
+ Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
+ __ Call(code, RelocInfo::CODE_TARGET);
+ }
// The input string may have been moved in memory, we need to reload it.
__ Peek(input_start(), kPointerSize);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 42a41bab5d..2ab65a1523 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -95,10 +95,9 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame,
- int start_offset,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
+ Address re_frame, int start_offset,
const byte** input_start,
const byte** input_end);
@@ -128,7 +127,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 0d479cacb2..15b9e23692 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -45,7 +45,7 @@ namespace internal {
* - end of input (address of end of string)
* - start of input (address of first character in string)
* - start index (character index of start)
- * - String* input_string (location of a handle containing the string)
+ * - String input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
* - return address
* ebp-> - old ebp
@@ -66,7 +66,7 @@ namespace internal {
*
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -79,12 +79,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerIA32::kRegExpCodeSize;
+
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -93,12 +95,14 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
+ // Irregexp code clobbers ebx and spills/restores it at all boundaries.
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -135,7 +139,7 @@ void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerIA32::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(ebx);
__ add(ebx, Immediate(masm_->CodeObject()));
__ jmp(ebx);
@@ -585,7 +589,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
ExternalReference word_map =
ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
- masm_->StaticArray(current_character(), times_1, word_map));
+ Operand(current_character(), times_1, word_map.address(),
+ RelocInfo::EXTERNAL_REFERENCE));
BranchOrBacktrack(zero, on_no_match);
return true;
}
@@ -600,7 +605,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
ExternalReference word_map =
ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
- masm_->StaticArray(current_character(), times_1, word_map));
+ Operand(current_character(), times_1, word_map.address(),
+ RelocInfo::EXTERNAL_REFERENCE));
BranchOrBacktrack(not_zero, on_no_match);
if (mode_ != LATIN1) {
__ bind(&done);
@@ -681,7 +687,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ mov(ecx, esp);
- __ sub(ecx, masm_->StaticVariable(stack_limit));
+ __ sub(ecx, StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
@@ -1094,7 +1100,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(Operand(esp, 2 * kPointerSize), ebp);
- // Code* of self.
+ // Code of self.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
// Next address on the stack (will be address of return address).
__ lea(eax, Operand(esp, -kPointerSize));
@@ -1104,6 +1110,9 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ CallCFunction(check_stack_guard, num_arguments);
}
+Operand RegExpMacroAssemblerIA32::StaticVariable(const ExternalReference& ext) {
+ return Operand(ext.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
// Helper function for reading a value out of a stack frame.
template <typename T>
@@ -1117,15 +1126,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<int>(re_frame, kStartIndex),
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
@@ -1219,7 +1228,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
Label no_preempt;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, masm_->StaticVariable(stack_limit));
+ __ cmp(esp, StaticVariable(stack_limit));
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1232,7 +1241,7 @@ void RegExpMacroAssemblerIA32::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
- __ cmp(backtrack_stackpointer(), masm_->StaticVariable(stack_limit));
+ __ cmp(backtrack_stackpointer(), StaticVariable(stack_limit));
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 02afc999d1..7757506b49 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -88,11 +88,12 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
+ Operand StaticVariable(const ExternalReference& ext);
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
@@ -123,7 +124,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 2c1b890c4f..f98dc062cf 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -598,7 +598,7 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
DisallowHeapAllocation no_gc;
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
- String::FlatContent subject_content = subject->GetFlatContent();
+ String::FlatContent subject_content = subject->GetFlatContent(no_gc);
if (subject_content.IsOneByte()) {
Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 756210b218..926fec2868 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -75,6 +75,10 @@ int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
return &register_array_[index];
}
+RegExpEngine::CompilationResult::CompilationResult(Isolate* isolate,
+ const char* error_message)
+ : error_message(error_message),
+ code(ReadOnlyRoots(isolate).the_hole_value()) {}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 64028d3927..117ae6cd44 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -14,7 +14,7 @@
#include "src/execution.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/ostreams.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp-inl.h"
@@ -212,7 +212,7 @@ void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
static void SetAtomLastCapture(Isolate* isolate,
Handle<RegExpMatchInfo> last_match_info,
- String* subject, int from, int to) {
+ String subject, int from, int to) {
SealHandleScope shs(isolate);
last_match_info->SetNumberOfCaptureRegisters(2);
last_match_info->SetLastSubject(subject);
@@ -230,7 +230,7 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
subject = String::Flatten(isolate, subject);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
- String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
DCHECK(needle->IsFlat());
DCHECK_LT(0, needle_len);
@@ -240,8 +240,8 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
}
for (int i = 0; i < output_size; i += 2) {
- String::FlatContent needle_content = needle->GetFlatContent();
- String::FlatContent subject_content = subject->GetFlatContent();
+ String::FlatContent needle_content = needle->GetFlatContent(no_gc);
+ String::FlatContent subject_content = subject->GetFlatContent(no_gc);
DCHECK(needle_content.IsFlat());
DCHECK(subject_content.IsFlat());
// dispatch on type of strings
@@ -299,7 +299,7 @@ Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
- Object* compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
+ Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
#ifdef V8_INTERPRETED_REGEXP
if (compiled_code->IsByteArray()) return true;
#else // V8_INTERPRETED_REGEXP (RegExp native code)
@@ -315,7 +315,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Zone zone(isolate->allocator(), ZONE_NAME);
PostponeInterruptsScope postpone(isolate);
#ifdef DEBUG
- Object* entry = re->DataAt(JSRegExp::code_index(is_one_byte));
+ Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
// When arriving here entry can only be a smi representing an uncompiled
// regexp.
DCHECK(entry->IsSmi());
@@ -363,18 +363,16 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
return true;
}
-
-int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) {
+int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
return Smi::cast(
re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
}
-
-void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
+void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
}
-void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
+void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
Handle<FixedArray> value) {
if (value.is_null()) {
re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
@@ -383,22 +381,19 @@ void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
}
}
-int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
+int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
return Smi::ToInt(re->get(JSRegExp::kIrregexpCaptureCountIndex));
}
-
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
+int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
return Smi::ToInt(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex));
}
-
-ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_one_byte) {
+ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
return ByteArray::cast(re->get(JSRegExp::code_index(is_one_byte)));
}
-
-Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_one_byte) {
+Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
return Code::cast(re->get(JSRegExp::code_index(is_one_byte)));
}
@@ -415,7 +410,7 @@ int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
DCHECK(subject->IsFlat());
// Check representation of the underlying storage.
- bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
if (!EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte)) return -1;
#ifdef V8_INTERPRETED_REGEXP
@@ -441,7 +436,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
DCHECK_LE(index, subject->length());
DCHECK(subject->IsFlat());
- bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
#ifndef V8_INTERPRETED_REGEXP
DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
@@ -477,7 +472,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
// being internal and external, and even between being Latin1 and UC16,
// but the characters are always the same).
IrregexpPrepare(isolate, regexp, subject);
- is_one_byte = subject->IsOneByteRepresentationUnderneath();
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
} while (true);
UNREACHABLE();
#else // V8_INTERPRETED_REGEXP
@@ -524,7 +519,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
// Prepare space for the return values.
#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
- String* pattern = regexp->Pattern();
+ String pattern = regexp->Pattern();
PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get());
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
@@ -6704,6 +6699,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
// Create the correct assembler for the architecture.
#ifndef V8_INTERPRETED_REGEXP
+ DCHECK(!FLAG_jitless);
+
// Native regexp implementation.
NativeRegExpMacroAssembler::Mode mode =
@@ -6780,12 +6777,11 @@ bool RegExpEngine::TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
return too_much;
}
-
-Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
- Object* key_pattern,
- FixedArray** last_match_cache,
- ResultsCacheType type) {
- FixedArray* cache;
+Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
+ Object key_pattern,
+ FixedArray* last_match_cache,
+ ResultsCacheType type) {
+ FixedArray cache;
if (!key_string->IsInternalizedString()) return Smi::kZero;
if (type == STRING_SPLIT_SUBSTRINGS) {
DCHECK(key_pattern->IsString());
@@ -6814,7 +6810,6 @@ Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
return cache->get(index + kArrayOffset);
}
-
void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern,
Handle<FixedArray> value_array,
@@ -6874,8 +6869,7 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
ReadOnlyRoots(isolate).fixed_cow_array_map());
}
-
-void RegExpResultsCache::Clear(FixedArray* cache) {
+void RegExpResultsCache::Clear(FixedArray cache) {
for (int i = 0; i < kRegExpResultsCacheSize; i++) {
cache->set(i, Smi::kZero);
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index ee9d167aa2..dffde6bb73 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -6,7 +6,6 @@
#define V8_REGEXP_JSREGEXP_H_
#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/isolate.h"
#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
@@ -169,14 +168,14 @@ class RegExpImpl {
};
// For acting on the JSRegExp data FixedArray.
- static int IrregexpMaxRegisterCount(FixedArray* re);
- static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
- static void SetIrregexpCaptureNameMap(FixedArray* re,
+ static int IrregexpMaxRegisterCount(FixedArray re);
+ static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
+ static void SetIrregexpCaptureNameMap(FixedArray re,
Handle<FixedArray> value);
- static int IrregexpNumberOfCaptures(FixedArray* re);
- static int IrregexpNumberOfRegisters(FixedArray* re);
- static ByteArray* IrregexpByteCode(FixedArray* re, bool is_one_byte);
- static Code* IrregexpNativeCode(FixedArray* re, bool is_one_byte);
+ static int IrregexpNumberOfCaptures(FixedArray re);
+ static int IrregexpNumberOfRegisters(FixedArray re);
+ static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
+ static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
// Limit the space regexps take up on the heap. In order to limit this we
// would like to keep track of the amount of regexp code on the heap. This
@@ -1505,15 +1504,12 @@ struct RegExpCompileData {
class RegExpEngine: public AllStatic {
public:
struct CompilationResult {
- CompilationResult(Isolate* isolate, const char* error_message)
- : error_message(error_message),
- code(ReadOnlyRoots(isolate).the_hole_value()),
- num_registers(0) {}
- CompilationResult(Object* code, int registers)
- : error_message(nullptr), code(code), num_registers(registers) {}
- const char* error_message;
- Object* code;
- int num_registers;
+ inline CompilationResult(Isolate* isolate, const char* error_message);
+ CompilationResult(Object code, int registers)
+ : code(code), num_registers(registers) {}
+ const char* const error_message = nullptr;
+ Object const code;
+ int const num_registers = 0;
};
static CompilationResult Compile(Isolate* isolate, Zone* zone,
@@ -1535,14 +1531,14 @@ class RegExpResultsCache : public AllStatic {
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
- static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
- FixedArray** last_match_out, ResultsCacheType type);
+ static Object Lookup(Heap* heap, String key_string, Object key_pattern,
+ FixedArray* last_match_out, ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
static void Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern, Handle<FixedArray> value_array,
Handle<FixedArray> last_match_cache, ResultsCacheType type);
- static void Clear(FixedArray* cache);
+ static void Clear(FixedArray cache);
static const int kRegExpResultsCacheSize = 0x100;
private:
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 36ac93275e..81f7aa73c8 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -7,12 +7,12 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/unicode.h"
namespace v8 {
@@ -23,7 +23,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - t7 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - t1 : Pointer to current code object (Code*) including heap object tag.
+ * - t1 : Pointer to current Code object including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
@@ -75,7 +75,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -90,12 +90,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerMIPS::kRegExpCodeSize;
+
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -115,7 +117,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -156,7 +157,7 @@ void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerMIPS::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(a0);
__ Addu(a0, a0, code_pointer());
__ Jump(a0);
@@ -1087,6 +1088,9 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
int stack_alignment = base::OS::ActivationFrameAlignment();
// Align the stack pointer and save the original sp value on the stack.
@@ -1097,16 +1101,16 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ sw(scratch, MemOperand(sp));
__ mov(a2, frame_pointer());
- // Code* of self.
+ // Code of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
DCHECK(IsAligned(stack_alignment, kPointerSize));
__ Subu(sp, sp, Operand(stack_alignment));
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are in registers, meaning we teat the return address as
- // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
// argument slots, we don't need to care about that here. This is how the
// stack will look (sp meaning the value of sp at this moment):
// [sp + 3] - empty slot if needed for alignment.
@@ -1120,10 +1124,24 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
__ li(t9, Operand(stack_guard_check));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, t9);
- // DirectCEntryStub allocated space for the C argument slots so we have to
+ if (FLAG_embedded_builtins) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+ } else {
+ // TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
+ Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
+ __ li(kScratchReg,
+ Operand(reinterpret_cast<intptr_t>(code.location()),
+ RelocInfo::CODE_TARGET),
+ CONSTANT_SIZE);
+ __ Call(kScratchReg);
+ }
+
+ // DirectCEntry allocated space for the C argument slots so we have to
// drop them with the return address from the stack with loading saved sp.
// At this point stack must look:
// [sp + 7] - empty slot if needed for alignment.
@@ -1152,15 +1170,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<int>(re_frame, kStartIndex),
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 6d61601a40..97cdef8b83 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -90,8 +90,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
@@ -126,7 +126,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 17a8ce8752..a92b0f59b8 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -7,12 +7,12 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/unicode.h"
namespace v8 {
@@ -25,7 +25,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - t3 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - a5 : Pointer to current code object (Code*) including heap object tag.
+ * - a5 : Pointer to current Code object including heap object tag.
* - a6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - a7 : Currently loaded character. Must be loaded using
@@ -110,7 +110,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -127,12 +127,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerMIPS::kRegExpCodeSize;
+
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -152,7 +154,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -193,7 +194,7 @@ void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerMIPS::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(a0);
__ Daddu(a0, a0, code_pointer());
__ Jump(a0);
@@ -1125,6 +1126,9 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
int stack_alignment = base::OS::ActivationFrameAlignment();
// Align the stack pointer and save the original sp value on the stack.
@@ -1135,16 +1139,16 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ Sd(scratch, MemOperand(sp));
__ mov(a2, frame_pointer());
- // Code* of self.
+ // Code of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
DCHECK(IsAligned(stack_alignment, kPointerSize));
__ Dsubu(sp, sp, Operand(stack_alignment));
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are in registers, meaning we teat the return address as
- // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
// argument slots, we don't need to care about that here. This is how the
// stack will look (sp meaning the value of sp at this moment):
// [sp + 3] - empty slot if needed for alignment.
@@ -1158,10 +1162,24 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
__ li(t9, Operand(stack_guard_check));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, t9);
- // DirectCEntryStub allocated space for the C argument slots so we have to
+ if (FLAG_embedded_builtins) {
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+ } else {
+ // TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
+ Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
+ __ li(kScratchReg,
+ Operand(reinterpret_cast<intptr_t>(code.location()),
+ RelocInfo::CODE_TARGET),
+ CONSTANT_SIZE);
+ __ Call(kScratchReg);
+ }
+
+ // DirectCEntry allocated space for the C argument slots so we have to
// drop them with the return address from the stack with loading saved sp.
// At this point stack must look:
// [sp + 7] - empty slot if needed for alignment.
@@ -1190,15 +1208,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int64_t RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
frame_entry<int64_t>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 37c1d3fbb6..8c1275655d 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -90,7 +90,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int64_t CheckStackGuardState(Address* return_address, Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
void print_regexp_frame_constants();
@@ -130,7 +131,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/regexp/ppc/OWNERS
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 494422074c..b0f2de4dd7 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -8,11 +8,11 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/unicode.h"
namespace v8 {
@@ -23,7 +23,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r25: Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r26: Pointer to current code object (Code*) including heap object tag.
+ * - r26: Pointer to current Code object including heap object tag.
* - r27: Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r28: Currently loaded character. Must be loaded using
@@ -76,7 +76,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -91,12 +91,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerPPC::kRegExpCodeSize;
+
RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -108,7 +110,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
internal_failure_label_() {
DCHECK_EQ(0, registers_to_save % 2);
-// Called from C
+ // Because RegExp code respects C ABI, so needs a FD
__ function_descriptor();
__ b(&entry_label_); // We'll write the entry code later.
@@ -142,8 +144,13 @@ int RegExpMacroAssemblerPPC::stack_limit_slack() {
void RegExpMacroAssemblerPPC::AdvanceCurrentPosition(int by) {
if (by != 0) {
- __ addi(current_input_offset(), current_input_offset(),
- Operand(by * char_size()));
+ if (is_int16(by * char_size())) {
+ __ addi(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ } else {
+ __ mov(r0, Operand(by * char_size()));
+ __ add(current_input_offset(), r0, current_input_offset());
+ }
}
}
@@ -162,7 +169,7 @@ void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerPPC::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(r3);
__ add(r3, r3, code_pointer());
__ Jump(r3);
@@ -1091,6 +1098,9 @@ void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
int frame_alignment = masm_->ActivationFrameAlignment();
int stack_space = kNumRequiredStackFrameSlots;
int stack_passed_arguments = 1; // space for return address pointer
@@ -1117,7 +1127,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// RegExp code frame pointer.
__ mr(r5, frame_pointer());
- // Code* of self.
+ // Code of self.
__ mov(r4, Operand(masm_->CodeObject()));
// r3 will point to the return address, placed by DirectCEntry.
__ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kPointerSize));
@@ -1125,8 +1135,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, ip);
+ __ StoreReturnAddressAndCall(ip);
// Restore the stack pointer
stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
@@ -1152,15 +1161,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerPPC::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<intptr_t>(re_frame, kStartIndex),
frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
- re_code, frame_entry_address<String*>(re_frame, kInputString),
+ re_code, frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
@@ -1272,7 +1281,12 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
Register offset = current_input_offset();
if (cp_offset != 0) {
// r25 is not being used to store the capture start index at this point.
- __ addi(r25, current_input_offset(), Operand(cp_offset * char_size()));
+ if (is_int16(cp_offset * char_size())) {
+ __ addi(r25, current_input_offset(), Operand(cp_offset * char_size()));
+ } else {
+ __ mov(r25, Operand(cp_offset * char_size()));
+ __ add(r25, r25, current_input_offset());
+ }
offset = r25;
}
// The lwz, stw, lhz, sth instructions can do unaligned accesses, if the CPU
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 1e65600ecf..1bbb45885e 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -81,7 +81,8 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address, Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
@@ -116,7 +117,7 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index a60212903d..7a0aa35e72 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -7,6 +7,8 @@
#ifdef V8_INTERPRETED_REGEXP
+#include "src/regexp/regexp-macro-assembler-irregexp.h"
+
#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 24bd10c616..90f065e94f 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -29,8 +29,6 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
Address byte_offset2,
size_t byte_length,
Isolate* isolate) {
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
// A GC might move the calling generated code and invalidate the
// return address on the stack.
@@ -67,6 +65,8 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
}
#endif // V8_INTL_SUPPORT
DCHECK_NOT_NULL(isolate);
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ isolate->regexp_macro_assembler_canonicalize();
for (size_t i = 0; i < length; i++) {
unibrow::uchar c1 = substring1[i];
unibrow::uchar c2 = substring2[i];
@@ -121,8 +121,7 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() {
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
- String* subject,
- int start_index) {
+ String subject, int start_index, const DisallowHeapAllocation& no_gc) {
if (subject->IsConsString()) {
subject = ConsString::cast(subject)->first();
} else if (subject->IsSlicedString()) {
@@ -136,10 +135,10 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
DCHECK_LE(start_index, subject->length());
if (subject->IsSeqOneByteString()) {
return reinterpret_cast<const byte*>(
- SeqOneByteString::cast(subject)->GetChars() + start_index);
+ SeqOneByteString::cast(subject)->GetChars(no_gc) + start_index);
} else if (subject->IsSeqTwoByteString()) {
return reinterpret_cast<const byte*>(
- SeqTwoByteString::cast(subject)->GetChars() + start_index);
+ SeqTwoByteString::cast(subject)->GetChars(no_gc) + start_index);
} else if (subject->IsExternalOneByteString()) {
return reinterpret_cast<const byte*>(
ExternalOneByteString::cast(subject)->GetChars() + start_index);
@@ -150,19 +149,19 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
}
}
-
int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, bool is_direct_call,
- Address* return_address, Code* re_code, String** subject,
+ Address* return_address, Code re_code, Address* subject,
const byte** input_start, const byte** input_end) {
+ AllowHeapAllocation allow_allocation;
DCHECK(re_code->raw_instruction_start() <= *return_address);
DCHECK(*return_address <= re_code->raw_instruction_end());
int return_value = 0;
// Prepare for possible GC.
HandleScope handles(isolate);
Handle<Code> code_handle(re_code, isolate);
- Handle<String> subject_handle(*subject, isolate);
- bool is_one_byte = subject_handle->IsOneByteRepresentationUnderneath();
+ Handle<String> subject_handle(String::cast(Object(*subject)), isolate);
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject_handle);
StackLimitCheck check(isolate);
bool js_has_overflowed = check.JsHasOverflowed();
@@ -178,7 +177,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
isolate->StackOverflow();
return_value = EXCEPTION;
} else {
- Object* result = isolate->stack_guard()->HandleInterrupts();
+ Object result = isolate->stack_guard()->HandleInterrupts();
if (result->IsException(isolate)) return_value = EXCEPTION;
}
@@ -193,22 +192,23 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
// If we continue, we need to update the subject string addresses.
if (return_value == 0) {
// String encoding might have changed.
- if (subject_handle->IsOneByteRepresentationUnderneath() != is_one_byte) {
+ if (String::IsOneByteRepresentationUnderneath(*subject_handle) !=
+ is_one_byte) {
// If we changed between an LATIN1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return_value = RETRY;
} else {
- *subject = *subject_handle;
+ *subject = subject_handle->ptr();
intptr_t byte_length = *input_end - *input_start;
- *input_start = StringCharacterPosition(*subject, start_index);
+ *input_start =
+ StringCharacterPosition(*subject_handle, start_index, no_gc);
*input_end = *input_start + byte_length;
}
}
return return_value;
}
-
NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
Handle<Code> regexp_code,
Handle<String> subject,
@@ -225,7 +225,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
// DisallowHeapAllocation, since regexps might be preempted, and another
// thread might do allocation anyway.
- String* subject_ptr = *subject;
+ String subject_ptr = *subject;
// Character offsets into string.
int start_offset = previous_index;
int char_length = subject_ptr->length() - start_offset;
@@ -237,7 +237,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
DCHECK_EQ(0, ConsString::cast(subject_ptr)->second()->length());
subject_ptr = ConsString::cast(subject_ptr)->first();
} else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString* slice = SlicedString::cast(subject_ptr);
+ SlicedString slice = SlicedString::cast(subject_ptr);
subject_ptr = slice->parent();
slice_offset = slice->offset();
}
@@ -250,8 +250,9 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
// String is now either Sequential or External
int char_size_shift = is_one_byte ? 0 : 1;
+ DisallowHeapAllocation no_gc;
const byte* input_start =
- StringCharacterPosition(subject_ptr, start_offset + slice_offset);
+ StringCharacterPosition(subject_ptr, start_offset + slice_offset, no_gc);
int byte_length = char_length << char_size_shift;
const byte* input_end = input_start + byte_length;
Result res = Execute(*regexp_code,
@@ -265,16 +266,11 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
return res;
}
-
NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
- Code* code,
- String* input, // This needs to be the unpacked (sliced, cons) string.
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int output_size,
- Isolate* isolate) {
+ Code code,
+ String input, // This needs to be the unpacked (sliced, cons) string.
+ int start_offset, const byte* input_start, const byte* input_end,
+ int* output, int output_size, Isolate* isolate) {
// Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
Address stack_base = stack_scope.stack()->stack_base();
@@ -282,18 +278,22 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int direct_call = 0;
using RegexpMatcherSig = int(
- String * input, int start_offset, // NOLINT(readability/casting)
+ Address input_string, int start_offset, // NOLINT(readability/casting)
const byte* input_start, const byte* input_end, int* output,
int output_size, Address stack_base, int direct_call, Isolate* isolate);
auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
- int result = fn.Call(input, start_offset, input_start, input_end, output,
- output_size, stack_base, direct_call, isolate);
+ int result =
+ fn.CallIrregexp(input.ptr(), start_offset, input_start, input_end, output,
+ output_size, stack_base, direct_call, isolate);
DCHECK(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
- // but haven't created the exception yet.
+ // but haven't created the exception yet. Additionally, we allow heap
+ // allocation because even though it invalidates {input_start} and
+ // {input_end}, we are about to return anyway.
+ AllowHeapAllocation allow_allocation;
isolate->StackOverflow();
}
return static_cast<Result>(result);
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index e6bdd842c6..f571c3c5a5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
-#include "src/assembler.h"
+#include "src/label.h"
#include "src/regexp/regexp-ast.h"
namespace v8 {
@@ -230,11 +230,12 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static Address GrowStack(Address stack_pointer, Address* stack_top,
Isolate* isolate);
- static const byte* StringCharacterPosition(String* subject, int start_index);
+ static const byte* StringCharacterPosition(
+ String subject, int start_index, const DisallowHeapAllocation& no_gc);
static int CheckStackGuardState(Isolate* isolate, int start_index,
bool is_direct_call, Address* return_address,
- Code* re_code, String** subject,
+ Code re_code, Address* subject,
const byte** input_start,
const byte** input_end);
@@ -247,14 +248,9 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
return reinterpret_cast<Address>(&word_character_map[0]);
}
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int output_size,
- Isolate* isolate);
+ static Result Execute(Code code, String input, int start_offset,
+ const byte* input_start, const byte* input_end,
+ int* output, int output_size, Isolate* isolate);
};
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 797424baf8..3da99409c6 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -17,9 +17,6 @@
#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
-// TODO(mathias): Remove this when we no longer need to check
-// `U_ICU_VERSION_MAJOR_NUM`.
-#include "unicode/uvernum.h"
#endif // V8_INTL_SUPPORT
namespace v8 {
@@ -83,7 +80,7 @@ void RegExpParser::Advance() {
FATAL("Aborting on stack overflow");
}
ReportError(CStrVector(
- MessageTemplate::TemplateString(MessageTemplate::kStackOverflow)));
+ MessageFormatter::TemplateString(MessageTemplate::kStackOverflow)));
} else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
@@ -855,7 +852,7 @@ const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
}
if (at_start) {
- if (!IdentifierStart::Is(c)) {
+ if (!IsIdentifierStart(c)) {
ReportError(CStrVector("Invalid capture group name"));
return nullptr;
}
@@ -864,7 +861,7 @@ const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
} else {
if (c == '>') {
break;
- } else if (IdentifierPart::Is(c)) {
+ } else if (IsIdentifierPart(c)) {
push_code_unit(name, c);
} else {
ReportError(CStrVector("Invalid capture group name"));
@@ -1296,15 +1293,11 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_DEPRECATED:
case UCHAR_DIACRITIC:
case UCHAR_EMOJI:
-#if U_ICU_VERSION_MAJOR_NUM >= 60
case UCHAR_EMOJI_COMPONENT:
-#endif
case UCHAR_EMOJI_MODIFIER_BASE:
case UCHAR_EMOJI_MODIFIER:
case UCHAR_EMOJI_PRESENTATION:
-#if U_ICU_VERSION_MAJOR_NUM >= 62
case UCHAR_EXTENDED_PICTOGRAPHIC:
-#endif
case UCHAR_EXTENDER:
case UCHAR_GRAPHEME_BASE:
case UCHAR_GRAPHEME_EXTEND:
@@ -1323,9 +1316,7 @@ bool IsSupportedBinaryProperty(UProperty property) {
case UCHAR_PATTERN_WHITE_SPACE:
case UCHAR_QUOTATION_MARK:
case UCHAR_RADICAL:
-#if U_ICU_VERSION_MAJOR_NUM >= 60
case UCHAR_REGIONAL_INDICATOR:
-#endif
case UCHAR_S_TERM:
case UCHAR_SOFT_DOTTED:
case UCHAR_TERMINAL_PUNCTUATION:
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 8e9d576db1..b87fbc399a 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -72,8 +72,8 @@ Address RegExpStack::EnsureCapacity(size_t size) {
}
thread_local_.memory_ = new_memory;
thread_local_.memory_size_ = size;
- thread_local_.limit_ =
- reinterpret_cast<Address>(new_memory) + kStackLimitSlack * kPointerSize;
+ thread_local_.limit_ = reinterpret_cast<Address>(new_memory) +
+ kStackLimitSlack * kSystemPointerSize;
}
return reinterpret_cast<Address>(thread_local_.memory_) +
thread_local_.memory_size_;
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index c787a50297..56b1e1b708 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -131,38 +131,63 @@ Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
isolate->factory()->match_symbol()),
Nothing<bool>());
- if (!match->IsUndefined(isolate)) return Just(match->BooleanValue(isolate));
+ if (!match->IsUndefined(isolate)) {
+ const bool match_as_boolean = match->BooleanValue(isolate);
+
+ if (match_as_boolean && !object->IsJSRegExp()) {
+ isolate->CountUsage(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp);
+ } else if (!match_as_boolean && object->IsJSRegExp()) {
+ isolate->CountUsage(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp);
+ }
+
+ return Just(match_as_boolean);
+ }
+
return Just(object->IsJSRegExp());
}
bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
- // TODO(ishell): Update this check once map changes for constant field
- // tracking are landing.
-
#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
#endif
if (!obj->IsJSReceiver()) return false;
- JSReceiver* recv = JSReceiver::cast(*obj);
+ JSReceiver recv = JSReceiver::cast(*obj);
// Check the receiver's map.
Handle<JSFunction> regexp_function = isolate->regexp_function();
if (recv->map() != regexp_function->initial_map()) return false;
// Check the receiver's prototype's map.
- Object* proto = recv->map()->prototype();
+ Object proto = recv->map()->prototype();
if (!proto->IsJSReceiver()) return false;
Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
- if (JSReceiver::cast(proto)->map() != *initial_proto_initial_map) {
+ Map proto_map = JSReceiver::cast(proto)->map();
+ if (proto_map != *initial_proto_initial_map) {
return false;
}
+ // Check that the "exec" method is unmodified.
+ if (FLAG_track_constant_fields) {
+ // Check that the index refers to "exec" method (this has to be consistent
+ // with the init order in the bootstrapper).
+ DCHECK_EQ(*(isolate->factory()->exec_string()),
+ proto_map->instance_descriptors()->GetKey(
+ JSRegExp::kExecFunctionDescriptorIndex));
+ if (proto_map->instance_descriptors()
+ ->GetDetails(JSRegExp::kExecFunctionDescriptorIndex)
+ .constness() != PropertyConstness::kConst) {
+ return false;
+ }
+ }
+
+ if (!isolate->IsRegExpSpeciesLookupChainIntact()) return false;
+
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Object* last_index = JSRegExp::cast(recv)->last_index();
+ Object last_index = JSRegExp::cast(recv)->last_index();
return last_index->IsSmi() && Smi::ToInt(last_index) >= 0;
}
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 8fc6607d98..4e32bf10f4 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -36,7 +36,8 @@ class RegExpUtils : public AllStatic {
static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
// Checks whether the given object is an unmodified JSRegExp instance.
- // Neither the object's map, nor its prototype's map may be modified.
+ // Neither the object's map, nor its prototype's map, nor any relevant
+ // method on the prototype may be modified.
static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj);
// ES#sec-advancestringindex
diff --git a/deps/v8/src/regexp/s390/OWNERS b/deps/v8/src/regexp/s390/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/regexp/s390/OWNERS
+++ b/deps/v8/src/regexp/s390/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 3db1ebc421..d6c966484e 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -8,11 +8,11 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded-data.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#include "src/unicode.h"
@@ -24,7 +24,7 @@ namespace internal {
* This assembler uses the following register assignment convention
* - r6: Temporarily stores the index of capture start after a matching pass
* for a global regexp.
- * - r7: Pointer to current code object (Code*) including heap object tag.
+ * - r7: Pointer to current Code object including heap object tag.
* - r8: Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r9: Currently loaded character. Must be loaded using
@@ -78,7 +78,7 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -93,12 +93,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+const int RegExpMacroAssemblerS390::kRegExpCodeSize;
+
RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, nullptr, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -159,7 +161,7 @@ void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerS390::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(r2);
__ AddP(r2, code_pointer());
__ b(r2);
@@ -1071,17 +1073,30 @@ void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
+ DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
+ DCHECK(!masm_->options().isolate_independent_code);
+
+ static constexpr int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ LoadRR(r4, frame_pointer());
- // Code* of self.
+ // Code of self.
__ mov(r3, Operand(masm_->CodeObject()));
// r2 becomes return address pointer.
__ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize));
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+
+ __ mov(ip, Operand(stack_guard_check));
+ __ StoreReturnAddressAndCall(ip);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ } else {
+ __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ }
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
}
// Helper function for reading a value out of a stack frame.
@@ -1101,13 +1116,14 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
}
int RegExpMacroAssemblerS390::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<intptr_t>(re_frame, kStartIndex),
frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
- re_code, frame_entry_address<String*>(re_frame, kInputString),
+ re_code, frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 40ba5ece25..42ce06c494 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -80,7 +80,8 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address, Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
@@ -116,7 +117,7 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 43f80767ea..b196f70a49 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -36,7 +36,7 @@ namespace internal {
* - rsp : Points to tip of C stack.
* - rcx : Points to tip of backtrack stack. The backtrack stack contains
* only 32-bit values. Most are offsets from some base (e.g., character
- * positions from end of string or code location from Code* pointer).
+ * positions from end of string or code location from Code pointer).
* - r8 : Code object pointer. Used to convert between absolute and
* code-object-relative addresses.
*
@@ -60,7 +60,7 @@ namespace internal {
* - end of input (address of end of string)
* - start of input (address of first character in string)
* - start index (character index of start)
- * - String* input_string (input string)
+ * - String input_string (input string)
* - return address
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - success counter (only useful for global regexp to count matches)
@@ -80,7 +80,7 @@ namespace internal {
* The first seven values must be provided by the calling code by
* calling the code's entry address cast to a function pointer with the
* following signature:
- * int (*match)(String* input_string,
+ * int (*match)(String input_string,
* int start_index,
* Address start,
* Address end,
@@ -93,11 +93,14 @@ namespace internal {
#define __ ACCESS_MASM((&masm_))
+const int RegExpMacroAssemblerX64::kRegExpCodeSize;
+
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(isolate, nullptr, kRegExpCodeSize, CodeObjectRequired::kYes),
+ masm_(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize)),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(zone),
mode_(mode),
@@ -113,7 +116,6 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
@@ -149,7 +151,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ // Pop Code offset from backtrack stack, add Code and jump to location.
Pop(rbx);
__ addp(rbx, code_object_pointer());
__ jmp(rbx);
@@ -729,7 +731,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kSystemPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -746,7 +748,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subp(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kSystemPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
@@ -770,7 +772,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
+ const int kRegistersPerPage = kPageSize / kSystemPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
@@ -804,9 +806,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ movp(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
+ __ subq(rcx, Immediate(kSystemPointerSize));
+ __ cmpq(rcx, Immediate(kRegisterZero -
+ num_saved_registers_ * kSystemPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
@@ -1093,12 +1095,11 @@ void RegExpMacroAssemblerX64::PushRegister(int register_index,
if (check_stack_limit) CheckStackLimit();
}
-
-STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
-
+STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
+ kSystemPointerSize == kInt32Size);
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
__ movq(rdi, register_location(reg));
} else {
// Need sign extension for x32 as rdi might be used as an index register.
@@ -1108,7 +1109,7 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
__ movq(dst, register_location(reg));
} else {
// Need sign extension for x32 as dst might be used as an index register.
@@ -1183,17 +1184,17 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
- // Second argument: Code* of self. (Do this before overwriting r8).
+ // Second argument: Code of self. (Do this before overwriting r8).
__ movp(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
__ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ leap(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kSystemPointerSize));
#else
// Third argument: RegExp code frame pointer.
__ movp(rdx, rbp);
- // Second argument: Code* of self.
+ // Second argument: Code of self.
__ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
@@ -1217,15 +1218,15 @@ static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
-
int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
- Code* re_code,
+ Address raw_code,
Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<int>(re_frame, kStartIndex),
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<Address>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}
@@ -1236,7 +1237,7 @@ Operand RegExpMacroAssemblerX64::register_location(int register_index) {
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
- return Operand(rbp, kRegisterZero - register_index * kPointerSize);
+ return Operand(rbp, kRegisterZero - register_index * kSystemPointerSize);
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 59d4b43397..10ef0b5035 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -79,26 +79,11 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
void ClearRegisters(int reg_from, int reg_to) override;
void WriteStackPointerToRegister(int reg) override;
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start);
-
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int CheckStackGuardState(Address* return_address, Address raw_code,
Address re_frame);
private:
@@ -162,16 +147,18 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
- static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
+ static const int kSuccessfulCaptures =
+ kLastCalleeSaveRegister - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize;
// Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
+ static const int kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
diff --git a/deps/v8/src/register-arch.h b/deps/v8/src/register-arch.h
new file mode 100644
index 0000000000..4a5499892e
--- /dev/null
+++ b/deps/v8/src/register-arch.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ARCH_H_
+#define V8_REGISTER_ARCH_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/register-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/register-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/register-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/register-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/register-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/register-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/register-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/register-s390.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_REGISTER_ARCH_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 1c4831ef75..7a70d432d3 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/register-configuration.h"
+#include "src/base/lazy-instance.h"
+#include "src/cpu-features.h"
#include "src/globals.h"
-#include "src/macro-assembler.h"
+#include "src/register-arch.h"
namespace v8 {
namespace internal {
@@ -31,30 +33,6 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
#endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE
-static const char* const kGeneralRegisterNames[] = {
-#define REGISTER_NAME(R) #R,
- GENERAL_REGISTERS(REGISTER_NAME)
-#undef REGISTER_NAME
-};
-
-static const char* const kFloatRegisterNames[] = {
-#define REGISTER_NAME(R) #R,
- FLOAT_REGISTERS(REGISTER_NAME)
-#undef REGISTER_NAME
-};
-
-static const char* const kDoubleRegisterNames[] = {
-#define REGISTER_NAME(R) #R,
- DOUBLE_REGISTERS(REGISTER_NAME)
-#undef REGISTER_NAME
-};
-
-static const char* const kSimd128RegisterNames[] = {
-#define REGISTER_NAME(R) #R,
- SIMD128_REGISTERS(REGISTER_NAME)
-#undef REGISTER_NAME
-};
-
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
@@ -89,6 +67,8 @@ static int get_num_allocatable_double_registers() {
#endif
}
+#undef REGISTER_COUNT
+
static const int* get_allocatable_double_codes() {
return
#if V8_TARGET_ARCH_ARM
@@ -107,20 +87,12 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
- kSimd128RegisterNames) {}
-};
-
-struct RegisterConfigurationInitializer {
- static void Construct(void* config) {
- new (config) ArchDefaultRegisterConfiguration();
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
}
};
-static base::LazyInstance<ArchDefaultRegisterConfiguration,
- RegisterConfigurationInitializer>::type
- kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
+ GetDefaultRegisterConfiguration);
// Allocatable registers with the masking register removed.
class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
@@ -131,9 +103,8 @@ class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
kMaxAllocatableGeneralRegisterCount - 1,
get_num_allocatable_double_registers(),
InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
- kSimd128RegisterNames) {}
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
+ }
private:
static const int* InitializeGeneralRegisterCodes() {
@@ -156,64 +127,8 @@ class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
[kMaxAllocatableGeneralRegisterCount - 1];
-struct PoisoningRegisterConfigurationInitializer {
- static void Construct(void* config) {
- new (config) ArchDefaultPoisoningRegisterConfiguration();
- }
-};
-
-static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
- PoisoningRegisterConfigurationInitializer>::type
- kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
-// Allocatable registers with the root register removed.
-// TODO(v8:6666): Once all builtins have been migrated, we could remove this
-// configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS
-// instead.
-class ArchPreserveRootIA32RegisterConfiguration : public RegisterConfiguration {
- public:
- ArchPreserveRootIA32RegisterConfiguration()
- : RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- kMaxAllocatableGeneralRegisterCount - 1,
- get_num_allocatable_double_registers(),
- InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
- kSimd128RegisterNames) {}
-
- private:
- static const int* InitializeGeneralRegisterCodes() {
- int filtered_index = 0;
- for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
- if (kAllocatableGeneralCodes[i] != kRootRegister.code()) {
- allocatable_general_codes_[filtered_index] =
- kAllocatableGeneralCodes[i];
- filtered_index++;
- }
- }
- DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
- return allocatable_general_codes_;
- }
-
- static int
- allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
-};
-
-int ArchPreserveRootIA32RegisterConfiguration::allocatable_general_codes_
- [kMaxAllocatableGeneralRegisterCount - 1];
-
-struct PreserveRootIA32RegisterConfigurationInitializer {
- static void Construct(void* config) {
- new (config) ArchPreserveRootIA32RegisterConfiguration();
- }
-};
-
-static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration,
- PreserveRootIA32RegisterConfigurationInitializer>::
- type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
+ GetDefaultPoisoningRegisterConfiguration);
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
@@ -230,9 +145,7 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
get_num_allocatable_double_registers(),
allocatable_general_register_codes.get(),
get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
- kSimd128RegisterNames),
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
allocatable_general_register_codes_(
std::move(allocatable_general_register_codes)),
allocatable_general_register_names_(
@@ -260,18 +173,12 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
} // namespace
const RegisterConfiguration* RegisterConfiguration::Default() {
- return &kDefaultRegisterConfiguration.Get();
+ return GetDefaultRegisterConfiguration();
}
const RegisterConfiguration* RegisterConfiguration::Poisoning() {
- return &kDefaultPoisoningRegisterConfiguration.Get();
-}
-
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
-const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() {
- return &kPreserveRootIA32RegisterConfiguration.Get();
+ return GetDefaultPoisoningRegisterConfiguration();
}
-#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
@@ -284,7 +191,7 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
if (reg.bit() & registers) {
DCHECK(counter < num);
codes[counter] = reg.code();
- names[counter] = Default()->GetGeneralRegisterName(i);
+ names[counter] = RegisterName(Register::from_code(i));
counter++;
}
}
@@ -297,10 +204,7 @@ RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
- AliasingKind fp_aliasing_kind, const char* const* general_register_names,
- const char* const* float_register_names,
- const char* const* double_register_names,
- const char* const* simd128_register_names)
+ AliasingKind fp_aliasing_kind)
: num_general_registers_(num_general_registers),
num_float_registers_(0),
num_double_registers_(num_double_registers),
@@ -315,11 +219,7 @@ RegisterConfiguration::RegisterConfiguration(
allocatable_simd128_codes_mask_(0),
allocatable_general_codes_(allocatable_general_codes),
allocatable_double_codes_(allocatable_double_codes),
- fp_aliasing_kind_(fp_aliasing_kind),
- general_register_names_(general_register_names),
- float_register_names_(float_register_names),
- double_register_names_(double_register_names),
- simd128_register_names_(simd128_register_names) {
+ fp_aliasing_kind_(fp_aliasing_kind) {
DCHECK_LE(num_general_registers_,
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK_LE(num_double_registers_, RegisterConfiguration::kMaxFPRegisters);
@@ -371,12 +271,6 @@ RegisterConfiguration::RegisterConfiguration(
}
}
-const char* RegisterConfiguration::GetGeneralOrSpecialRegisterName(
- int code) const {
- if (code < num_general_registers_) return GetGeneralRegisterName(code);
- return Assembler::GetSpecialRegisterName(code);
-}
-
// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
static_cast<int>(MachineRepresentation::kFloat64) + 1);
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 46e2df79e9..f1c2c6cbc0 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/machine-type.h"
#include "src/reglist.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -25,8 +26,10 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
};
// Architecture independent maxes.
- static const int kMaxGeneralRegisters = 32;
- static const int kMaxFPRegisters = 32;
+ static constexpr int kMaxGeneralRegisters = 32;
+ static constexpr int kMaxFPRegisters = 32;
+ static constexpr int kMaxRegisters =
+ Max(kMaxFPRegisters, kMaxGeneralRegisters);
// Default RegisterConfigurations for the target architecture.
static const RegisterConfiguration* Default();
@@ -34,9 +37,6 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
// Register configuration with reserved masking register.
static const RegisterConfiguration* Poisoning();
- // Register configuration with reserved root register on ia32.
- static const RegisterConfiguration* PreserveRootIA32();
-
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
@@ -45,11 +45,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
int num_allocatable_double_registers,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
- AliasingKind fp_aliasing_kind,
- char const* const* general_names,
- char const* const* float_names,
- char const* const* double_names,
- char const* const* simd128_names);
+ AliasingKind fp_aliasing_kind);
int num_general_registers() const { return num_general_registers_; }
int num_float_registers() const { return num_float_registers_; }
@@ -105,20 +101,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
bool IsAllocatableSimd128Code(int index) const {
return ((1 << index) & allocatable_simd128_codes_mask_) != 0;
}
- const char* GetGeneralOrSpecialRegisterName(int code) const;
- const char* GetGeneralRegisterName(int code) const {
- DCHECK_LT(code, num_general_registers_);
- return general_register_names_[code];
- }
- const char* GetFloatRegisterName(int code) const {
- return float_register_names_[code];
- }
- const char* GetDoubleRegisterName(int code) const {
- return double_register_names_[code];
- }
- const char* GetSimd128RegisterName(int code) const {
- return simd128_register_names_[code];
- }
+
const int* allocatable_general_codes() const {
return allocatable_general_codes_;
}
@@ -164,10 +147,6 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
const int* allocatable_double_codes_;
int allocatable_simd128_codes_[kMaxFPRegisters];
AliasingKind fp_aliasing_kind_;
- char const* const* general_register_names_;
- char const* const* float_register_names_;
- char const* const* double_register_names_;
- char const* const* simd128_register_names_;
};
} // namespace internal
diff --git a/deps/v8/src/register.h b/deps/v8/src/register.h
new file mode 100644
index 0000000000..f1f803a340
--- /dev/null
+++ b/deps/v8/src/register.h
@@ -0,0 +1,126 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_H_
+#define V8_REGISTER_H_
+
+#include "src/reglist.h"
+
+namespace v8 {
+
+namespace internal {
+
+// Base type for CPU Registers.
+//
+// 1) We would prefer to use an enum for registers, but enum values are
+// assignment-compatible with int, which has caused code-generation bugs.
+//
+// 2) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the class in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+template <typename SubType, int kAfterLastRegister>
+class RegisterBase {
+ // Internal enum class; used for calling constexpr methods, where we need to
+ // pass an integral type as template parameter.
+ enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
+
+ public:
+ static constexpr int kCode_no_reg = -1;
+ static constexpr int kNumRegisters = kAfterLastRegister;
+
+ static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
+
+ template <int code>
+ static constexpr SubType from_code() {
+ static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
+ return SubType{code};
+ }
+
+ constexpr operator RegisterCode() const {
+ return static_cast<RegisterCode>(reg_code_);
+ }
+
+ template <RegisterCode reg_code>
+ static constexpr int code() {
+ static_assert(
+ reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
+ "must be valid reg");
+ return static_cast<int>(reg_code);
+ }
+
+ template <RegisterCode reg_code>
+ static constexpr int is_valid() {
+ return static_cast<int>(reg_code) != kCode_no_reg;
+ }
+
+ template <RegisterCode reg_code>
+ static constexpr RegList bit() {
+ return is_valid<reg_code>() ? RegList{1} << code<reg_code>() : RegList{};
+ }
+
+ static SubType from_code(int code) {
+ DCHECK_LE(0, code);
+ DCHECK_GT(kNumRegisters, code);
+ return SubType{code};
+ }
+
+ // Constexpr version (pass registers as template parameters).
+ template <RegisterCode... reg_codes>
+ static constexpr RegList ListOf() {
+ return CombineRegLists(RegisterBase::bit<reg_codes>()...);
+ }
+
+ // Non-constexpr version (pass registers as method parameters).
+ template <typename... Register>
+ static RegList ListOf(Register... regs) {
+ return CombineRegLists(regs.bit()...);
+ }
+
+ constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
+
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code_;
+ }
+
+ RegList bit() const { return is_valid() ? RegList{1} << code() : RegList{}; }
+
+ inline constexpr bool operator==(SubType other) const {
+ return reg_code_ == other.reg_code_;
+ }
+ inline constexpr bool operator!=(SubType other) const {
+ return reg_code_ != other.reg_code_;
+ }
+
+ // Used to print the name of some special registers.
+ static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
+
+ protected:
+ explicit constexpr RegisterBase(int code) : reg_code_(code) {}
+ int reg_code_;
+};
+
+template <typename RegType,
+ typename = decltype(RegisterName(std::declval<RegType>()))>
+inline std::ostream& operator<<(std::ostream& os, RegType reg) {
+ return os << RegisterName(reg);
+}
+
+// Helper macros to define a {RegisterName} method based on a macro list
+// containing all names.
+#define DEFINE_REGISTER_NAMES_NAME(name) #name,
+#define DEFINE_REGISTER_NAMES(RegType, LIST) \
+ inline const char* RegisterName(RegType reg) { \
+ static constexpr const char* Names[] = {LIST(DEFINE_REGISTER_NAMES_NAME)}; \
+ STATIC_ASSERT(arraysize(Names) == RegType::kNumRegisters); \
+ return reg.is_valid() ? Names[reg.code()] : "invalid"; \
+ }
+
+} // namespace internal
+} // namespace v8
+#endif // V8_REGISTER_H_
diff --git a/deps/v8/src/reloc-info.cc b/deps/v8/src/reloc-info.cc
index ec4a1c679d..8da70da65e 100644
--- a/deps/v8/src/reloc-info.cc
+++ b/deps/v8/src/reloc-info.cc
@@ -4,8 +4,8 @@
#include "src/reloc-info.h"
-#include "src/assembler-arch-inl.h"
-#include "src/code-stubs.h"
+#include "src/assembler-inl.h"
+#include "src/code-reference.h"
#include "src/deoptimize-reason.h"
#include "src/deoptimizer.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -158,9 +158,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteShortTaggedPC(pc_delta, kWasmStubCallTag);
} else {
WriteModeAndPC(pc_delta, rmode);
- if (RelocInfo::IsComment(rmode)) {
- WriteData(rinfo->data());
- } else if (RelocInfo::IsDeoptReason(rmode)) {
+ if (RelocInfo::IsDeoptReason(rmode)) {
DCHECK_LT(rinfo->data(), 1 << kBitsPerByte);
WriteShortData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
@@ -249,13 +247,7 @@ void RelocIterator::next() {
AdvanceReadLongPCJump();
} else {
AdvanceReadPC();
- if (RelocInfo::IsComment(rmode)) {
- if (SetMode(rmode)) {
- AdvanceReadData();
- return;
- }
- Advance(kIntptrSize);
- } else if (RelocInfo::IsDeoptReason(rmode)) {
+ if (RelocInfo::IsDeoptReason(rmode)) {
Advance();
if (SetMode(rmode)) {
ReadShortData();
@@ -279,18 +271,22 @@ void RelocIterator::next() {
done_ = true;
}
-RelocIterator::RelocIterator(Code* code, int mode_mask)
+RelocIterator::RelocIterator(Code code, int mode_mask)
+ : RelocIterator(code, code->unchecked_relocation_info(), mode_mask) {}
+
+RelocIterator::RelocIterator(Code code, ByteArray relocation_info,
+ int mode_mask)
: RelocIterator(code, code->raw_instruction_start(), code->constant_pool(),
- code->relocation_end(), code->relocation_start(),
- mode_mask) {}
+ relocation_info->GetDataEndAddress(),
+ relocation_info->GetDataStartAddress(), mode_mask) {}
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
- : RelocIterator(nullptr, code_reference.instruction_start(),
+ : RelocIterator(Code(), code_reference.instruction_start(),
code_reference.constant_pool(),
code_reference.relocation_end(),
code_reference.relocation_start(), mode_mask) {}
-RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
+RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
int mode_mask)
: RelocIterator(
code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
@@ -299,7 +295,7 @@ RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
code->relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
- : RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
+ : RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
desc.buffer + desc.buffer_size,
desc.buffer + desc.buffer_size - desc.reloc_size,
mode_mask) {}
@@ -307,11 +303,11 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask)
- : RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
+ : RelocIterator(Code(), reinterpret_cast<Address>(instructions.start()),
const_pool, reloc_info.start() + reloc_info.size(),
reloc_info.start(), mode_mask) {}
-RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
+RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask)
: pos_(pos), end_(end), mode_mask_(mode_mask) {
// Relocation info is read backwards.
@@ -369,19 +365,36 @@ void RelocInfo::set_target_address(Address target,
IsWasmCall(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
IsCodeTargetMode(rmode_)) {
- Code* target_code = Code::GetCodeFromTargetAddress(target);
+ Code target_code = Code::GetCodeFromTargetAddress(target);
MarkingBarrierForCode(host(), this, target_code);
}
}
+bool RelocInfo::HasTargetAddressAddress() const {
+ // TODO(jgruber): Investigate whether WASM_CALL is still appropriate on
+ // non-intel platforms now that wasm code is no longer on the heap.
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+ static constexpr int kTargetAddressAddressModeMask =
+ ModeMask(CODE_TARGET) | ModeMask(EMBEDDED_OBJECT) |
+ ModeMask(EXTERNAL_REFERENCE) | ModeMask(OFF_HEAP_TARGET) |
+ ModeMask(RUNTIME_ENTRY) | ModeMask(WASM_CALL) | ModeMask(WASM_STUB_CALL);
+#else
+ static constexpr int kTargetAddressAddressModeMask =
+ ModeMask(CODE_TARGET) | ModeMask(RELATIVE_CODE_TARGET) |
+ ModeMask(EMBEDDED_OBJECT) | ModeMask(EXTERNAL_REFERENCE) |
+ ModeMask(OFF_HEAP_TARGET) | ModeMask(RUNTIME_ENTRY) | ModeMask(WASM_CALL);
+#endif
+ return (ModeMask(rmode_) & kTargetAddressAddressModeMask) != 0;
+}
+
bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) {
RelocIterator it(desc, RelocInfo::PostCodegenRelocationMask());
return !it.done();
}
-bool RelocInfo::RequiresRelocation(Code* code) {
+bool RelocInfo::RequiresRelocation(Code code) {
RelocIterator it(code, RelocInfo::kApplyMask);
return !it.done();
}
@@ -399,8 +412,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "relative code target";
case RUNTIME_ENTRY:
return "runtime entry";
- case COMMENT:
- return "comment";
case EXTERNAL_REFERENCE:
return "external reference";
case INTERNAL_REFERENCE:
@@ -425,8 +436,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal wasm call";
case WASM_STUB_CALL:
return "wasm stub call";
- case JS_TO_WASM_CALL:
- return "js to wasm call";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -436,9 +445,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
- if (IsComment(rmode_)) {
- os << " (" << reinterpret_cast<char*>(data_) << ")";
- } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
+ if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
os << " (" << data() << ")";
} else if (rmode_ == DEOPT_REASON) {
os << " ("
@@ -456,22 +463,19 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
<< ")";
} else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
- Code* code = Code::GetCodeFromTargetAddress(code_target);
+ Code code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code->IsCode());
os << " (" << Code::Kind2String(code->kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code->builtin_index());
- } else if (code->kind() == Code::STUB) {
- os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
- int id = GetDeoptimizationId(isolate, type);
- os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
- << id << ")";
+ os << " (" << Deoptimizer::MessageFor(type)
+ << " deoptimization bailout)";
}
} else if (IsConstPool(rmode_)) {
os << " (size " << static_cast<int>(data_) << ")";
@@ -493,8 +497,8 @@ void RelocInfo::Verify(Isolate* isolate) {
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
// Check that we can find the right code object.
- Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = isolate->FindCodeObject(addr);
+ Code code = Code::GetCodeFromTargetAddress(addr);
+ Object found = isolate->FindCodeObject(addr);
CHECK(found->IsCode());
CHECK(code->address() == HeapObject::cast(found)->address());
break;
@@ -503,7 +507,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case INTERNAL_REFERENCE_ENCODED: {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
- Code* code = Code::cast(isolate->FindCodeObject(pc));
+ Code code = Code::cast(isolate->FindCodeObject(pc));
CHECK(target >= code->InstructionStart());
CHECK(target <= code->InstructionEnd());
break;
@@ -511,11 +515,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case OFF_HEAP_TARGET: {
Address addr = target_off_heap_target();
CHECK_NE(addr, kNullAddress);
- CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr));
+ CHECK(!InstructionStream::TryLookupCode(isolate, addr).is_null());
break;
}
case RUNTIME_ENTRY:
- case COMMENT:
case EXTERNAL_REFERENCE:
case DEOPT_SCRIPT_OFFSET:
case DEOPT_INLINING_ID:
@@ -525,7 +528,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case VENEER_POOL:
case WASM_CALL:
case WASM_STUB_CALL:
- case JS_TO_WASM_CALL:
case NONE:
break;
case NUMBER_OF_MODES:
diff --git a/deps/v8/src/reloc-info.h b/deps/v8/src/reloc-info.h
index 5f7071f845..26ab7b084e 100644
--- a/deps/v8/src/reloc-info.h
+++ b/deps/v8/src/reloc-info.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/code.h"
namespace v8 {
namespace internal {
@@ -57,12 +58,10 @@ class RelocInfo {
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
EMBEDDED_OBJECT, // LAST_GCED_ENUM
- JS_TO_WASM_CALL,
WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
WASM_STUB_CALL,
RUNTIME_ENTRY,
- COMMENT,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -102,7 +101,7 @@ class RelocInfo {
RelocInfo() = default;
- RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
+ RelocInfo(Address pc, Mode rmode, intptr_t data, Code host,
Address constant_pool = kNullAddress)
: pc_(pc),
rmode_(rmode),
@@ -137,10 +136,10 @@ class RelocInfo {
return mode == RUNTIME_ENTRY;
}
static constexpr bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
+ static constexpr bool IsWasmReference(Mode mode) { return mode == WASM_CALL; }
static constexpr bool IsWasmStubCall(Mode mode) {
return mode == WASM_STUB_CALL;
}
- static constexpr bool IsComment(Mode mode) { return mode == COMMENT; }
static constexpr bool IsConstPool(Mode mode) { return mode == CONST_POOL; }
static constexpr bool IsVeneerPool(Mode mode) { return mode == VENEER_POOL; }
static constexpr bool IsDeoptPosition(Mode mode) {
@@ -163,15 +162,6 @@ class RelocInfo {
return mode == OFF_HEAP_TARGET;
}
static constexpr bool IsNone(Mode mode) { return mode == NONE; }
- static constexpr bool IsWasmReference(Mode mode) {
- return IsWasmPtrReference(mode);
- }
- static constexpr bool IsJsToWasmCall(Mode mode) {
- return mode == JS_TO_WASM_CALL;
- }
- static constexpr bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
- }
static bool IsOnlyForSerializer(Mode mode) {
#ifdef V8_TARGET_ARCH_IA32
@@ -192,7 +182,7 @@ class RelocInfo {
Address pc() const { return pc_; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
- Code* host() const { return host_; }
+ Code host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
// Apply a relocation by delta bytes. When the code object is moved, PC
@@ -214,14 +204,8 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- // Returns the deoptimization id for the entry associated with the reloc info
- // where {kind} is the deoptimization kind.
- // This is only used for printing RUNTIME_ENTRY relocation info.
- int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
-
Address wasm_call_address() const;
Address wasm_stub_call_address() const;
- Address js_to_wasm_address() const;
uint32_t wasm_call_tag() const;
@@ -229,8 +213,6 @@ class RelocInfo {
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_wasm_stub_call_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void set_js_to_wasm_address(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
@@ -240,10 +222,10 @@ class RelocInfo {
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
V8_INLINE Address target_address();
- V8_INLINE HeapObject* target_object();
+ V8_INLINE HeapObject target_object();
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
V8_INLINE void set_target_object(
- Heap* heap, HeapObject* target,
+ Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE Address target_runtime_entry(Assembler* origin);
@@ -252,11 +234,6 @@ class RelocInfo {
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE Address target_off_heap_target();
- V8_INLINE Cell* target_cell();
- V8_INLINE Handle<Cell> target_cell_handle();
- V8_INLINE void set_target_cell(
- Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE void set_target_external_reference(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
@@ -271,6 +248,7 @@ class RelocInfo {
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
V8_INLINE Address target_address_address();
+ bool HasTargetAddressAddress() const;
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
@@ -305,7 +283,7 @@ class RelocInfo {
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
- static bool RequiresRelocation(Code* code);
+ static bool RequiresRelocation(Code code);
#ifdef ENABLE_DISASSEMBLER
// Printing
@@ -334,7 +312,7 @@ class RelocInfo {
Address pc_;
Mode rmode_;
intptr_t data_ = 0;
- Code* host_;
+ Code host_;
Address constant_pool_ = kNullAddress;
friend class RelocIterator;
};
@@ -392,9 +370,9 @@ class RelocIterator : public Malloced {
// the beginning of the reloc info.
// Relocation information with mode k is included in the
// iteration iff bit k of mode_mask is set.
- explicit RelocIterator(Code* code, int mode_mask = -1);
- explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
- int mode_mask);
+ explicit RelocIterator(Code code, int mode_mask = -1);
+ explicit RelocIterator(Code code, ByteArray relocation_info, int mode_mask);
+ explicit RelocIterator(EmbeddedData* embedded_data, Code code, int mode_mask);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
int mode_mask = -1);
@@ -414,7 +392,7 @@ class RelocIterator : public Malloced {
}
private:
- RelocIterator(Code* host, Address pc, Address constant_pool, const byte* pos,
+ RelocIterator(Code host, Address pc, Address constant_pool, const byte* pos,
const byte* end, int mode_mask);
// Advance* moves the position before/after reading.
diff --git a/deps/v8/src/roots-inl.h b/deps/v8/src/roots-inl.h
index fc6f86c8be..0eadb79555 100644
--- a/deps/v8/src/roots-inl.h
+++ b/deps/v8/src/roots-inl.h
@@ -7,12 +7,20 @@
#include "src/roots.h"
+#include "src/feedback-vector.h"
+#include "src/handles.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/literal-objects.h"
+#include "src/objects/map.h"
+#include "src/objects/scope-info.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
-V8_INLINE bool operator<(RootIndex lhs, RootIndex rhs) {
+V8_INLINE constexpr bool operator<(RootIndex lhs, RootIndex rhs) {
typedef typename std::underlying_type<RootIndex>::type type;
return static_cast<type>(lhs) < static_cast<type>(rhs);
}
@@ -23,56 +31,57 @@ V8_INLINE RootIndex operator++(RootIndex& index) {
return index;
}
-ReadOnlyRoots::ReadOnlyRoots(Heap* heap) : roots_table_(heap->roots_table()) {}
+bool RootsTable::IsRootHandleLocation(Address* handle_location,
+ RootIndex* index) const {
+ FullObjectSlot location(handle_location);
+ FullObjectSlot first_root(&roots_[0]);
+ FullObjectSlot last_root(&roots_[kEntriesCount]);
+ if (location >= last_root) return false;
+ if (location < first_root) return false;
+ *index = static_cast<RootIndex>(location - first_root);
+ return true;
+}
+
+template <typename T>
+bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
+ // This can't use handle.location() because it is called from places
+ // where handle dereferencing is disallowed. Comparing the handle's
+ // location against the root handle list is safe though.
+ Address* handle_location = reinterpret_cast<Address*>(handle.address());
+ return IsRootHandleLocation(handle_location, index);
+}
+
+ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
+ : roots_table_(heap->isolate()->roots_table()) {}
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
- : roots_table_(isolate->heap()->roots_table()) {}
-
-#define ROOT_ACCESSOR(type, name, CamelName) \
- type* ReadOnlyRoots::name() { \
- return type::cast(roots_table_[RootIndex::k##CamelName]); \
- } \
- Handle<type> ReadOnlyRoots::name##_handle() { \
- return Handle<type>( \
- bit_cast<type**>(&roots_table_[RootIndex::k##CamelName])); \
+ : roots_table_(isolate->roots_table()) {}
+
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type ReadOnlyRoots::name() const { \
+ return Type::cast(Object(roots_table_[RootIndex::k##CamelName])); \
+ } \
+ Handle<Type> ReadOnlyRoots::name##_handle() const { \
+ return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-Map* ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
+Map ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
- return Map::cast(roots_table_[root_index]);
+ return Map::cast(Object(roots_table_[root_index]));
}
-Map* ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
+Map ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
- return Map::cast(roots_table_[root_index]);
+ return Map::cast(Object(roots_table_[root_index]));
}
-FixedTypedArrayBase* ReadOnlyRoots::EmptyFixedTypedArrayForMap(const Map* map) {
+FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForMap(const Map map) {
RootIndex root_index =
RootsTable::RootIndexForEmptyFixedTypedArray(map->elements_kind());
- return FixedTypedArrayBase::cast(roots_table_[root_index]);
-}
-
-Object** RootsTable::read_only_roots_end() {
-// Enumerate the read-only roots into an expression of the form:
-// (root_1, root_2, root_3, ..., root_n)
-// This evaluates to root_n, but Clang warns that the other values in the list
-// are unused so suppress that warning.
-#if defined(__GNUC__) || defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-value"
-#endif
-#define ROOT(type, name, CamelName) , RootIndex::k##CamelName
- constexpr RootIndex kLastReadOnlyRoot =
- (RootIndex::kFirstRoot READ_ONLY_ROOT_LIST(ROOT));
-#undef ROOT
-#if defined(__GNUC__) || defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
- return &roots_[static_cast<size_t>(kLastReadOnlyRoot) + 1];
+ return FixedTypedArrayBase::cast(Object(roots_table_[root_index]));
}
} // namespace internal
diff --git a/deps/v8/src/roots.cc b/deps/v8/src/roots.cc
index 529d2ec472..8a0ed69895 100644
--- a/deps/v8/src/roots.cc
+++ b/deps/v8/src/roots.cc
@@ -3,11 +3,19 @@
// found in the LICENSE file.
#include "src/roots.h"
+
#include "src/elements-kind.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
+const char* RootsTable::root_names_[RootsTable::kEntriesCount] = {
+#define ROOT_NAME(type, name, CamelName) #name,
+ ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+};
+
// static
RootIndex RootsTable::RootIndexForFixedTypedArray(
ExternalArrayType array_type) {
@@ -50,5 +58,12 @@ RootIndex RootsTable::RootIndexForEmptyFixedTypedArray(
}
}
+void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
+ visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
+ roots_table_.read_only_roots_begin(),
+ roots_table_.read_only_roots_end());
+ visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots.h
index fc2b155604..be9ea8d252 100644
--- a/deps/v8/src/roots.h
+++ b/deps/v8/src/roots.h
@@ -7,9 +7,10 @@
#include "src/accessors.h"
#include "src/globals.h"
-#include "src/handles.h"
#include "src/heap-symbols.h"
#include "src/objects-definitions.h"
+#include "src/objects.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -17,11 +18,15 @@ namespace internal {
// Forward declarations.
enum ElementsKind : uint8_t;
class FixedTypedArrayBase;
+template <typename T>
+class Handle;
class Heap;
class Isolate;
class Map;
+class PropertyCell;
class String;
class Symbol;
+class RootVisitor;
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
@@ -60,9 +65,6 @@ class Symbol;
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
- /* The roots above this line should be boring from a GC point of view. */ \
- /* This means they are never in new space and never on a page that is */ \
- /* being compacted.*/ \
/* Oddballs */ \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, exception, Exception) \
@@ -95,11 +97,13 @@ class Symbol;
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, no_closures_cell_map, NoClosuresCellMap) \
+ V(Map, no_feedback_cell_map, NoFeedbackCellMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, ordered_hash_map_map, OrderedHashMapMap) \
V(Map, ordered_hash_set_map, OrderedHashSetMap) \
- V(Map, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
+ V(Map, ordered_name_dictionary_map, OrderedNameDictionaryMap) \
+ V(Map, preparse_data_map, PreparseDataMap) \
V(Map, property_array_map, PropertyArrayMap) \
V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap) \
V(Map, side_effect_free_call_handler_info_map, \
@@ -110,14 +114,16 @@ class Symbol;
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
+ V(Map, small_ordered_name_dictionary_map, SmallOrderedNameDictionaryMap) \
V(Map, string_table_map, StringTableMap) \
- V(Map, uncompiled_data_without_pre_parsed_scope_map, \
- UncompiledDataWithoutPreParsedScopeMap) \
- V(Map, uncompiled_data_with_pre_parsed_scope_map, \
- UncompiledDataWithPreParsedScopeMap) \
+ V(Map, uncompiled_data_without_preparse_data_map, \
+ UncompiledDataWithoutPreparseDataMap) \
+ V(Map, uncompiled_data_with_preparse_data_map, \
+ UncompiledDataWithPreparseDataMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
V(Map, ephemeron_hash_table_map, EphemeronHashTableMap) \
+ V(Map, embedder_data_array_map, EmbedderDataArrayMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -200,6 +206,7 @@ class Symbol;
V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
+ V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
@@ -210,15 +217,24 @@ class Symbol;
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
/* Marker for self-references during code-generation */ \
- V(HeapObject, self_reference_marker, SelfReferenceMarker)
-
-#define STRONG_MUTABLE_ROOT_LIST(V) \
+ V(HeapObject, self_reference_marker, SelfReferenceMarker) \
+ /* Canonical trampoline RelocInfo */ \
+ V(ByteArray, off_heap_trampoline_relocation_info, \
+ OffHeapTrampolineRelocationInfo) \
+ /* Hash seed */ \
+ V(ByteArray, hash_seed, HashSeed)
+
+// Mutable roots that are known to be immortal immovable, for which we can
+// safely skip write barriers.
+#define STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \
+ ACCESSOR_INFO_ROOT_LIST(V) \
/* Maps */ \
V(Map, external_map, ExternalMap) \
V(Map, message_object_map, JSMessageObjectMap) \
/* Canonical empty values */ \
V(Script, empty_script, EmptyScript) \
V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
+ V(FeedbackCell, no_feedback_cell, NoFeedbackCell) \
V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
/* Protectors */ \
V(Cell, array_constructor_protector, ArrayConstructorProtector) \
@@ -226,48 +242,54 @@ class Symbol;
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
+ V(PropertyCell, regexp_species_protector, RegExpSpeciesProtector) \
V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
- V(PropertyCell, array_buffer_neutering_protector, \
- ArrayBufferNeuteringProtector) \
+ V(PropertyCell, array_buffer_detaching_protector, \
+ ArrayBufferDetachingProtector) \
V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
V(Cell, promise_resolve_protector, PromiseResolveProtector) \
+ V(PropertyCell, map_iterator_protector, MapIteratorProtector) \
V(PropertyCell, promise_then_protector, PromiseThenProtector) \
+ V(PropertyCell, set_iterator_protector, SetIteratorProtector) \
V(PropertyCell, string_iterator_protector, StringIteratorProtector) \
/* Caches */ \
- V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- /* Lists and dictionaries */ \
- V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
- V(NameDictionary, public_symbol_table, PublicSymbolTable) \
- V(NameDictionary, api_symbol_table, ApiSymbolTable) \
- V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
- V(WeakArrayList, script_list, ScriptList) \
- V(SimpleNumberDictionary, code_stubs, CodeStubs) \
- V(FixedArray, materialized_objects, MaterializedObjects) \
- V(MicrotaskQueue, default_microtask_queue, DefaultMicrotaskQueue) \
- V(WeakArrayList, detached_contexts, DetachedContexts) \
- V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
- V(WeakArrayList, retained_maps, RetainedMaps) \
/* Indirection lists for isolate-independent builtins */ \
- V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
- /* Feedback vectors that we need for code coverage or type profile */ \
- V(Object, feedback_vectors_for_profiling_tools, \
- FeedbackVectorsForProfilingTools) \
- V(WeakArrayList, noscript_shared_function_infos, \
- NoScriptSharedFunctionInfos) \
- V(FixedArray, serialized_objects, SerializedObjects) \
- V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
- V(TemplateList, message_listeners, MessageListeners) \
- /* Hash seed */ \
- V(ByteArray, hash_seed, HashSeed) \
- /* JS Entries */ \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
- V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable)
+
+// These root references can be updated by the mutator.
+#define STRONG_MUTABLE_MOVABLE_ROOT_LIST(V) \
+ /* Caches */ \
+ V(FixedArray, number_string_cache, NumberStringCache) \
+ /* Lists and dictionaries */ \
+ V(NameDictionary, public_symbol_table, PublicSymbolTable) \
+ V(NameDictionary, api_symbol_table, ApiSymbolTable) \
+ V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
+ V(WeakArrayList, script_list, ScriptList) \
+ V(FixedArray, materialized_objects, MaterializedObjects) \
+ V(WeakArrayList, detached_contexts, DetachedContexts) \
+ V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
+ V(WeakArrayList, retained_maps, RetainedMaps) \
+ /* Feedback vectors that we need for code coverage or type profile */ \
+ V(Object, feedback_vectors_for_profiling_tools, \
+ FeedbackVectorsForProfilingTools) \
+ V(WeakArrayList, noscript_shared_function_infos, \
+ NoScriptSharedFunctionInfos) \
+ V(FixedArray, serialized_objects, SerializedObjects) \
+ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
+ V(TemplateList, message_listeners, MessageListeners) \
+ /* Support for async stack traces */ \
+ V(HeapObject, current_microtask, CurrentMicrotask) \
+ /* JSWeakFactory objects which need cleanup */ \
+ V(Object, dirty_js_weak_factories, DirtyJSWeakFactories) \
+ /* KeepDuringJob set for JS WeakRefs */ \
+ V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
+ V(HeapObject, interpreter_entry_trampoline_for_profiling, \
+ InterpreterEntryTrampolineForProfiling)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -324,8 +346,8 @@ class Symbol;
DATA_HANDLER_MAPS_LIST(V)
#define MUTABLE_ROOT_LIST(V) \
- STRONG_MUTABLE_ROOT_LIST(V) \
- ACCESSOR_INFO_ROOT_LIST(V) \
+ STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \
+ STRONG_MUTABLE_MOVABLE_ROOT_LIST(V) \
V(StringTable, string_table, StringTable) \
SMI_ROOT_LIST(V)
@@ -346,10 +368,29 @@ enum class RootIndex : uint16_t {
kFirstRoot = 0,
kLastRoot = kRootListLength - 1,
- // kStringTable is not a strong root.
- kFirstStrongRoot = kFirstRoot,
+#define ROOT(...) +1
+ kReadOnlyRootsCount = 0 READ_ONLY_ROOT_LIST(ROOT),
+ kImmortalImmovableRootsCount =
+ kReadOnlyRootsCount STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(ROOT),
+#undef ROOT
+ kFirstReadOnlyRoot = kFirstRoot,
+ kLastReadOnlyRoot = kFirstReadOnlyRoot + kReadOnlyRootsCount - 1,
+
+ // The strong roots visited by the garbage collector (not including read-only
+ // roots).
+ kFirstStrongRoot = kLastReadOnlyRoot + 1,
+ // (kStringTable is not a strong root).
kLastStrongRoot = kStringTable - 1,
+ // All of the strong roots plus the read-only roots.
+ kFirstStrongOrReadOnlyRoot = kFirstRoot,
+ kLastStrongOrReadOnlyRoot = kLastStrongRoot,
+
+ // All immortal immovable roots including read only ones.
+ kFirstImmortalImmovableRoot = kFirstReadOnlyRoot,
+ kLastImmortalImmovableRoot =
+ kFirstImmortalImmovableRoot + kImmortalImmovableRootsCount - 1,
+
kFirstSmiRoot = kStringTable + 1,
kLastSmiRoot = kLastRoot
};
@@ -363,60 +404,109 @@ class RootsTable {
RootsTable() : roots_{} {}
- bool IsRootHandleLocation(Object** handle_location, RootIndex* index) const {
- if (handle_location >= &roots_[kEntriesCount]) return false;
- if (handle_location < &roots_[0]) return false;
- *index = static_cast<RootIndex>(handle_location - &roots_[0]);
- return true;
- }
+ inline bool IsRootHandleLocation(Address* handle_location,
+ RootIndex* index) const;
template <typename T>
- bool IsRootHandle(Handle<T> handle, RootIndex* index) const {
- Object** handle_location = bit_cast<Object**>(handle.address());
- return IsRootHandleLocation(handle_location, index);
- }
+ bool IsRootHandle(Handle<T> handle, RootIndex* index) const;
- Object* const& operator[](RootIndex root_index) const {
+ Address const& operator[](RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
return roots_[index];
}
+ static const char* name(RootIndex root_index) {
+ size_t index = static_cast<size_t>(root_index);
+ DCHECK_LT(index, kEntriesCount);
+ return root_names_[index];
+ }
+
+ static constexpr int offset_of(RootIndex root_index) {
+ return static_cast<int>(root_index) * kSystemPointerSize;
+ }
+
static RootIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
static RootIndex RootIndexForFixedTypedArray(ElementsKind elements_kind);
static RootIndex RootIndexForEmptyFixedTypedArray(ElementsKind elements_kind);
+ // Immortal immovable root objects are allocated in OLD space and GC never
+ // moves them and the root table entries are guaranteed to not be modified
+ // after initialization. Note, however, that contents of those root objects
+ // that are allocated in writable space can still be modified after
+ // initialization.
+ // Generated code can treat direct references to these roots as constants.
+ static constexpr bool IsImmortalImmovable(RootIndex root_index) {
+ STATIC_ASSERT(static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) ==
+ 0);
+ return static_cast<unsigned>(root_index) <=
+ static_cast<unsigned>(RootIndex::kLastImmortalImmovableRoot);
+ }
+
private:
- Object** read_only_roots_begin() {
- return &roots_[static_cast<size_t>(RootIndex::kFirstStrongRoot)];
+ FullObjectSlot begin() {
+ return FullObjectSlot(&roots_[static_cast<size_t>(RootIndex::kFirstRoot)]);
+ }
+ FullObjectSlot end() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kLastRoot) + 1]);
+ }
+
+ // Used for iterating over all of the read-only and mutable strong roots.
+ FullObjectSlot strong_or_read_only_roots_begin() {
+ STATIC_ASSERT(static_cast<size_t>(RootIndex::kLastReadOnlyRoot) ==
+ static_cast<size_t>(RootIndex::kFirstStrongRoot) - 1);
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kFirstStrongOrReadOnlyRoot)]);
+ }
+ FullObjectSlot strong_or_read_only_roots_end() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kLastStrongOrReadOnlyRoot) + 1]);
}
- inline Object** read_only_roots_end();
- Object** strong_roots_begin() {
- return &roots_[static_cast<size_t>(RootIndex::kFirstStrongRoot)];
+ // The read-only, strong and Smi roots as defined by these accessors are all
+ // disjoint.
+ FullObjectSlot read_only_roots_begin() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kFirstReadOnlyRoot)]);
}
- Object** strong_roots_end() {
- return &roots_[static_cast<size_t>(RootIndex::kLastStrongRoot) + 1];
+ FullObjectSlot read_only_roots_end() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kLastReadOnlyRoot) + 1]);
}
- Object** smi_roots_begin() {
- return &roots_[static_cast<size_t>(RootIndex::kFirstSmiRoot)];
+ FullObjectSlot strong_roots_begin() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kFirstStrongRoot)]);
}
- Object** smi_roots_end() {
- return &roots_[static_cast<size_t>(RootIndex::kLastSmiRoot) + 1];
+ FullObjectSlot strong_roots_end() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kLastStrongRoot) + 1]);
}
- Object*& operator[](RootIndex root_index) {
+ FullObjectSlot smi_roots_begin() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kFirstSmiRoot)]);
+ }
+ FullObjectSlot smi_roots_end() {
+ return FullObjectSlot(
+ &roots_[static_cast<size_t>(RootIndex::kLastSmiRoot) + 1]);
+ }
+
+ Address& operator[](RootIndex root_index) {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
return roots_[index];
}
- Object* roots_[kEntriesCount];
+ Address roots_[kEntriesCount];
+ static const char* root_names_[kEntriesCount];
+ friend class Isolate;
friend class Heap;
friend class Factory;
friend class ReadOnlyRoots;
+ friend class RootsSerializer;
};
class ReadOnlyRoots {
@@ -424,19 +514,24 @@ class ReadOnlyRoots {
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
-#define ROOT_ACCESSOR(type, name, CamelName) \
- V8_INLINE class type* name(); \
- V8_INLINE Handle<type> name##_handle();
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ V8_INLINE class Type name() const; \
+ V8_INLINE Handle<Type> name##_handle() const;
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- V8_INLINE Map* MapForFixedTypedArray(ExternalArrayType array_type);
- V8_INLINE Map* MapForFixedTypedArray(ElementsKind elements_kind);
- V8_INLINE FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
+ V8_INLINE Map MapForFixedTypedArray(ExternalArrayType array_type);
+ V8_INLINE Map MapForFixedTypedArray(ElementsKind elements_kind);
+ V8_INLINE FixedTypedArrayBase EmptyFixedTypedArrayForMap(const Map map);
+
+ // Iterate over all the read-only roots. This is not necessary for garbage
+ // collection and is usually only performed as part of (de)serialization or
+ // heap verification.
+ void Iterate(RootVisitor* visitor);
private:
- const RootsTable& roots_table_;
+ RootsTable& roots_table_;
};
} // namespace internal
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 35456713a9..3d7da8ac25 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -7,7 +7,6 @@
#include "src/assembler.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
#include "src/execution.h"
@@ -70,11 +69,10 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
any_ic_changed_(false) {
}
-static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
+static void GetICCounts(JSFunction function, int* ic_with_type_info_count,
int* ic_generic_count, int* ic_total_count,
int* type_info_percentage, int* generic_percentage) {
- // Harvest vector-ics.
- FeedbackVector* vector = function->feedback_vector();
+ FeedbackVector vector = function->feedback_vector();
vector->ComputeCounts(ic_with_type_info_count, ic_generic_count,
ic_total_count);
@@ -87,7 +85,7 @@ static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
}
}
-static void TraceRecompile(JSFunction* function, const char* reason,
+static void TraceRecompile(JSFunction function, const char* reason,
const char* type) {
if (FLAG_trace_opt) {
PrintF("[marking ");
@@ -105,18 +103,17 @@ static void TraceRecompile(JSFunction* function, const char* reason,
}
}
-void RuntimeProfiler::Optimize(JSFunction* function,
- OptimizationReason reason) {
+void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
function->MarkForOptimization(ConcurrencyMode::kConcurrent);
}
-void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
+void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
int loop_nesting_levels) {
- JSFunction* function = frame->function();
- SharedFunctionInfo* shared = function->shared();
- if (!FLAG_use_osr || !function->shared()->IsUserJavaScript()) {
+ JSFunction function = frame->function();
+ SharedFunctionInfo shared = function->shared();
+ if (!FLAG_use_osr || !shared->IsUserJavaScript()) {
return;
}
@@ -133,14 +130,13 @@ void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
}
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
- DCHECK(shared->HasBytecodeArray());
- int level = shared->GetBytecodeArray()->osr_loop_nesting_level();
- shared->GetBytecodeArray()->set_osr_loop_nesting_level(
+ int level = frame->GetBytecodeArray()->osr_loop_nesting_level();
+ frame->GetBytecodeArray()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
-void RuntimeProfiler::MaybeOptimize(JSFunction* function,
- JavaScriptFrame* frame) {
+void RuntimeProfiler::MaybeOptimize(JSFunction function,
+ InterpretedFrame* frame) {
if (function->IsInOptimizationQueue()) {
if (FLAG_trace_opt_verbose) {
PrintF("[function ");
@@ -159,32 +155,28 @@ void RuntimeProfiler::MaybeOptimize(JSFunction* function,
if (function->shared()->optimization_disabled()) return;
- if (frame->is_optimized()) return;
-
- OptimizationReason reason = ShouldOptimize(function, frame);
+ OptimizationReason reason =
+ ShouldOptimize(function, function->shared()->GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
}
}
-bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
- SharedFunctionInfo* shared = function->shared();
+bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
int ticks = function->feedback_vector()->profiler_ticks();
-
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
- if (!frame->is_optimized() &&
- (function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->HasOptimizedCode())) {
+ if (function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->HasOptimizedCode()) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
- if (shared->GetBytecodeArray()->length() <= allowance) {
+ if (function->shared()->GetBytecodeArray()->length() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
@@ -192,22 +184,20 @@ bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
return false;
}
-OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
- JavaScriptFrame* frame) {
- SharedFunctionInfo* shared = function->shared();
+OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
+ BytecodeArray bytecode) {
int ticks = function->feedback_vector()->profiler_ticks();
-
- if (shared->GetBytecodeArray()->length() > kMaxBytecodeSizeForOpt) {
+ if (bytecode->length() > kMaxBytecodeSizeForOpt) {
return OptimizationReason::kDoNotOptimize;
}
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
- (shared->GetBytecodeArray()->length() / kBytecodeSizeAllowancePerTick);
+ (bytecode->length() / kBytecodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
- } else if (!any_ic_changed_ && shared->GetBytecodeArray()->length() <
- kMaxBytecodeSizeForEarlyOpt) {
+ } else if (!any_ic_changed_ &&
+ bytecode->length() < kMaxBytecodeSizeForEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
@@ -220,7 +210,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- shared->GetBytecodeArray()->length(), kMaxBytecodeSizeForEarlyOpt);
+ bytecode->length(), kMaxBytecodeSizeForEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
@@ -242,13 +232,15 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized()) continue;
+ if (!frame->is_interpreted()) continue;
- JSFunction* function = frame->function();
+ JSFunction function = frame->function();
DCHECK(function->shared()->is_compiled());
if (!function->shared()->IsInterpreted()) continue;
- MaybeOptimize(function, frame);
+ if (!function->has_feedback_vector()) continue;
+
+ MaybeOptimize(function, InterpretedFrame::cast(frame));
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 74a1d0f872..7e29d57bdc 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -10,8 +10,9 @@
namespace v8 {
namespace internal {
+class BytecodeArray;
class Isolate;
-class JavaScriptFrame;
+class InterpretedFrame;
class JSFunction;
enum class OptimizationReason : uint8_t;
@@ -23,18 +24,18 @@ class RuntimeProfiler {
void NotifyICChanged() { any_ic_changed_ = true; }
- void AttemptOnStackReplacement(JavaScriptFrame* frame,
+ void AttemptOnStackReplacement(InterpretedFrame* frame,
int nesting_levels = 1);
private:
- void MaybeOptimize(JSFunction* function, JavaScriptFrame* frame);
+ void MaybeOptimize(JSFunction function, InterpretedFrame* frame);
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
- bool MaybeOSR(JSFunction* function, JavaScriptFrame* frame);
- OptimizationReason ShouldOptimize(JSFunction* function,
- JavaScriptFrame* frame);
- void Optimize(JSFunction* function, OptimizationReason reason);
- void Baseline(JSFunction* function, OptimizationReason reason);
+ bool MaybeOSR(JSFunction function, InterpretedFrame* frame);
+ OptimizationReason ShouldOptimize(JSFunction function,
+ BytecodeArray bytecode_array);
+ void Optimize(JSFunction function, OptimizationReason reason);
+ void Baseline(JSFunction function, OptimizationReason reason);
Isolate* isolate_;
bool any_ic_changed_;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index abe3883097..d18ced02bd 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -3,14 +3,13 @@
// found in the LICENSE file.
#include "src/arguments-inl.h"
-#include "src/code-stubs.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
-#include "src/messages.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
@@ -48,7 +47,7 @@ Maybe<uint32_t> FindNextFreePosition(Isolate* isolate,
Handle<JSReceiver> receiver,
uint32_t current_pos) {
for (uint32_t position = current_pos;; ++position) {
- Maybe<bool> has_element = JSReceiver::HasElement(receiver, position);
+ Maybe<bool> has_element = JSReceiver::HasOwnProperty(receiver, position);
MAYBE_RETURN(has_element, Nothing<uint32_t>());
if (!has_element.FromJust()) return Just(position);
@@ -64,8 +63,8 @@ Maybe<uint32_t> FindNextFreePosition(Isolate* isolate,
// Dictionary (requires_slow_elements() is true), proxies and objects that
// might have accessors.
V8_WARN_UNUSED_RESULT
-Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
- uint32_t limit) {
+Object RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
+ uint32_t limit) {
HandleScope scope(isolate);
// For proxies, we do not collect the keys, instead we use all indices in
@@ -125,20 +124,35 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// If the array contains undefineds, the position at 'key' might later
// bet set to 'undefined'. If we delete the element now and later set it
// to undefined, the set operation would throw an exception.
+ // Instead, to mark it up as a free space, we set array[key] to undefined.
+ // As 'key' will be incremented afterward, this undefined value will not
+ // affect 'num_undefined', and the logic afterwards will correctly set
+ // the remaining undefineds or delete the remaining properties.
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSReceiver::SetElement(isolate, receiver, current_pos,
- element, LanguageMode::kStrict));
+ isolate, Object::SetElement(isolate, receiver, current_pos, element,
+ LanguageMode::kStrict));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetElement(isolate, receiver, key,
+ isolate->factory()->undefined_value(),
+ LanguageMode::kStrict));
++current_pos;
}
}
+ // current_pos points to the next free space in the array/object. In most
+ // cases this corresponds to the 'length' or to the number of non-undefined
+ // elements.
+ // In cases where an object is 'packed' and 'length' is smaller, e.g.:
+ // { 0: 5, 1: 4, 2: 3, length: 2 }
+ // current_pos will be greater than limit, thus, we need to take the minimum.
+ uint32_t result = std::min(current_pos, limit);
+
// Set [current_pos, current_pos + num_undefined) to undefined.
- uint32_t result = current_pos;
for (uint32_t i = 0; i < num_undefined; ++i) {
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSReceiver::SetElement(isolate, receiver, current_pos++,
- isolate->factory()->undefined_value(),
- LanguageMode::kStrict));
+ isolate, Object::SetElement(isolate, receiver, current_pos++,
+ isolate->factory()->undefined_value(),
+ LanguageMode::kStrict));
}
// TODO(szuend): Re-enable when we also copy from the prototype chain for
// JSArrays. Then we can use HasOwnProperty instead of
@@ -155,15 +169,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
MAYBE_RETURN(delete_result, ReadOnlyRoots(isolate).exception());
}
- // TODO(jgruber, szuend, chromium:897512): This is a workaround to prevent
- // returning a number greater than array.length to Array.p.sort, which could
- // trigger OOB accesses. There is still a correctness bug here though in
- // how we shift around undefineds and delete elements in the two blocks above.
- // This needs to be fixed soon.
- const uint32_t number_of_non_undefined_elements = std::min(limit, result);
-
- return *isolate->factory()->NewNumberFromUint(
- number_of_non_undefined_elements);
+ return *isolate->factory()->NewNumberFromUint(result);
}
// Collects all defined (non-hole) and non-undefined (array) elements at the
@@ -171,8 +177,8 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// converted to fast elements mode. Undefined values are placed after
// non-undefined values. Returns the number of non-undefined values.
V8_WARN_UNUSED_RESULT
-Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
- uint32_t limit) {
+Object RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
+ uint32_t limit) {
if (receiver->IsJSProxy()) {
return RemoveArrayHolesGeneric(isolate, receiver, limit);
}
@@ -231,7 +237,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t result = 0;
if (elements_base->map() == ReadOnlyRoots(isolate).fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
+ FixedDoubleArray elements = FixedDoubleArray::cast(*elements_base);
// Split elements into defined and the_hole, in that order.
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -258,7 +264,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
holes++;
}
} else {
- FixedArray* elements = FixedArray::cast(*elements_base);
+ FixedArray elements = FixedArray::cast(*elements_base);
DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
@@ -269,7 +275,7 @@ Object* RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
// Assume most arrays contain no holes and undefined values, so minimize the
// number of stores of non-undefined, non-the-hole values.
for (unsigned int i = 0; i < undefs; i++) {
- Object* current = elements->get(i);
+ Object current = elements->get(i);
if (current->IsTheHole(isolate)) {
holes--;
undefs--;
@@ -329,8 +335,8 @@ Maybe<bool> ConditionalCopy(Isolate* isolate, Handle<JSReceiver> source,
Handle<Object> set_result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, set_result,
- JSReceiver::SetElement(isolate, target, index, source_element,
- LanguageMode::kStrict),
+ Object::SetElement(isolate, target, index, source_element,
+ LanguageMode::kStrict),
Nothing<bool>());
return Just(true);
@@ -387,10 +393,10 @@ RUNTIME_FUNCTION(Runtime_PrepareElementsForSort) {
// Counter for sorting arrays that have non-packed elements and where either
// the ElementsProtector is invalid or the prototype does not match
// Array.prototype.
+ JSObject initial_array_proto = JSObject::cast(
+ isolate->native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
if (object->IsJSArray() &&
!Handle<JSArray>::cast(object)->HasFastPackedElements()) {
- JSObject* initial_array_proto = JSObject::cast(
- isolate->native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
if (!isolate->IsNoElementsProtectorIntact() ||
object->map()->prototype() != initial_array_proto) {
isolate->CountUsage(
@@ -398,43 +404,23 @@ RUNTIME_FUNCTION(Runtime_PrepareElementsForSort) {
}
}
- if (!object->IsJSArray()) {
+ // Skip copying from prototype for JSArrays with ElementsProtector intact and
+ // the original array prototype.
+ if (!object->IsJSArray() || !isolate->IsNoElementsProtectorIntact() ||
+ object->map()->prototype() != initial_array_proto) {
RETURN_FAILURE_ON_EXCEPTION(isolate,
CopyFromPrototype(isolate, object, length));
}
return RemoveArrayHoles(isolate, object, length);
}
-// Move contents of argument 0 (an array) to argument 1 (an array)
-RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
- JSObject::ValidateElements(*from);
- JSObject::ValidateElements(*to);
-
- Handle<FixedArrayBase> new_elements(from->elements(), isolate);
- ElementsKind from_kind = from->GetElementsKind();
- Handle<Map> new_map = JSObject::GetElementsTransitionMap(to, from_kind);
- JSObject::SetMapAndElements(to, new_map, new_elements);
- to->set_length(from->length());
-
- from->initialize_elements();
- from->set_length(Smi::kZero);
-
- JSObject::ValidateElements(*to);
- return *to;
-}
-
-
// How many elements does this object/array have?
RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSArray, array, 0);
- FixedArrayBase* elements = array->elements();
+ FixedArrayBase elements = array->elements();
SealHandleScope shs(isolate);
if (elements->IsNumberDictionary()) {
int result = NumberDictionary::cast(elements)->NumberOfElements();
@@ -561,7 +547,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
DCHECK_LE(3, args.length());
int const argc = args.length() - 3;
// TODO(bmeurer): Remove this Arguments nonsense.
- Arguments argv(argc, args.arguments() - 1);
+ Arguments argv(argc, args.address_of_arg_at(1));
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
@@ -741,7 +727,8 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// Let O be ? ToObject(this value).
Handle<JSReceiver> object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object, Object::ToObject(isolate, handle(args[0], isolate)));
+ isolate, object,
+ Object::ToObject(isolate, Handle<Object>(args[0], isolate)));
// Let len be ? ToLength(? Get(O, "length")).
int64_t len;
@@ -834,7 +821,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
}
RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
- HandleScope shs(isolate);
+ HandleScope hs(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
@@ -897,9 +884,9 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
}
}
- // If the receiver is not a special receiver type, and the length is a valid
- // element index, perform fast operation tailored to specific ElementsKinds.
- if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
+ // If the receiver is not a special receiver type, and the length fits
+ // uint32_t, perform fast operation tailored to specific ElementsKinds.
+ if (!object->map()->IsSpecialReceiverMap() && len <= kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -912,6 +899,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
// Otherwise, perform slow lookups for special receiver types
for (; index < len; ++index) {
+ HandleScope iteration_hs(isolate);
// Let elementK be the result of ? Get(O, ! ToString(k)).
Handle<Object> element_k;
{
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 3fd07af255..3fcb9934f9 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -6,6 +6,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/heap/factory.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -97,6 +98,10 @@ inline T XorSeqCst(T* p, T value) {
#define InterlockedOr32 _InterlockedOr
#define InterlockedXor32 _InterlockedXor
+#if defined(V8_HOST_ARCH_ARM64)
+#define InterlockedExchange8 _InterlockedExchange8
+#endif
+
#define ATOMIC_OPS(type, suffix, vctype) \
inline type ExchangeSeqCst(type* p, type value) { \
return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
@@ -160,6 +165,10 @@ inline void StoreSeqCst(T* p, T value) {
#undef InterlockedOr32
#undef InterlockedXor32
+#if defined(V8_HOST_ARCH_ARM64)
+#undef InterlockedExchange8
+#endif
+
#else
#error Unsupported platform!
@@ -209,35 +218,33 @@ inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
return Handle<BigInt>::cast(bigint)->AsInt64();
}
-inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
+inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
-inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
+inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
-inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
+inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
-inline Object* ToObject(Isolate* isolate, uint16_t t) {
- return Smi::FromInt(t);
-}
+inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); }
-inline Object* ToObject(Isolate* isolate, int32_t t) {
+inline Object ToObject(Isolate* isolate, int32_t t) {
return *isolate->factory()->NewNumber(t);
}
-inline Object* ToObject(Isolate* isolate, uint32_t t) {
+inline Object ToObject(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
-inline Object* ToObject(Isolate* isolate, int64_t t) {
+inline Object ToObject(Isolate* isolate, int64_t t) {
return *BigInt::FromInt64(isolate, t);
}
-inline Object* ToObject(Isolate* isolate, uint64_t t) {
+inline Object ToObject(Isolate* isolate, uint64_t t) {
return *BigInt::FromUint64(isolate, t);
}
template <typename T>
struct Load {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index) {
T result = LoadSeqCst(static_cast<T*>(buffer) + index);
return ToObject(isolate, result);
}
@@ -254,8 +261,8 @@ struct Store {
template <typename T>
struct Exchange {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -263,8 +270,8 @@ struct Exchange {
};
template <typename T>
-inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> oldobj, Handle<Object> newobj) {
+inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> oldobj, Handle<Object> newobj) {
T oldval = FromObject<T>(oldobj);
T newval = FromObject<T>(newobj);
T result =
@@ -274,8 +281,8 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
template <typename T>
struct Add {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -284,8 +291,8 @@ struct Add {
template <typename T>
struct Sub {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -294,8 +301,8 @@ struct Sub {
template <typename T>
struct And {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -304,8 +311,8 @@ struct And {
template <typename T>
struct Or {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -314,8 +321,8 @@ struct Or {
template <typename T>
struct Xor {
- static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
+ static inline Object Do(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
@@ -338,7 +345,7 @@ struct Xor {
// but also includes the ToInteger/ToBigInt conversion that's part of
// https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
template <template <typename> class Op>
-Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
+Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
@@ -353,7 +360,7 @@ Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
Handle<BigInt> bigint;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, value_obj));
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
return Op<int64_t>::Do(isolate, source, index, bigint);
@@ -365,7 +372,7 @@ Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
Handle<Object> value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToInteger(isolate, value_obj));
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
switch (sta->type()) {
@@ -395,7 +402,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
return Load<int64_t>::Do(isolate, source, index);
@@ -421,7 +428,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
Store<int64_t>::Do(isolate, source, index, bigint);
@@ -456,7 +463,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
@@ -473,7 +480,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
Object::ToInteger(isolate, old_value_obj));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
Object::ToInteger(isolate, new_value_obj));
- // SharedArrayBuffers are not neuterable.
+ // SharedArrayBuffers are not detachable.
CHECK_LT(index, NumberToSize(sta->length()));
switch (sta->type()) {
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 0aea983f41..02db33733e 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -9,12 +9,16 @@
#include "src/accessors.h"
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/log.h"
+#include "src/message-template.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/objects/struct-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -62,8 +66,8 @@ RUNTIME_FUNCTION(Runtime_ThrowSuperNotCalled) {
namespace {
-Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
- Handle<JSFunction> function) {
+Object ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
+ Handle<JSFunction> function) {
Handle<String> super_name;
if (constructor->IsJSFunction()) {
super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->Name(),
@@ -123,8 +127,8 @@ Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) {
return isolate->factory()->NumberToString(key);
}
-inline void SetHomeObject(Isolate* isolate, JSFunction* method,
- JSObject* home_object) {
+inline void SetHomeObject(Isolate* isolate, JSFunction method,
+ JSObject home_object) {
if (method->shared()->needs_home_object()) {
const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex;
CHECK_EQ(method->map()->instance_descriptors()->GetKey(kPropertyIndex),
@@ -146,9 +150,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction* method,
// shared name.
template <typename Dictionary>
MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
- Isolate* isolate, Arguments& args, Smi* index, Handle<JSObject> home_object,
+ Isolate* isolate, Arguments& args, Smi index, Handle<JSObject> home_object,
Handle<String> name_prefix, Handle<Object> key) {
- int int_index = Smi::ToInt(index);
+ int int_index = index.value();
// Class constructor and prototype values do not require post processing.
if (int_index < ClassBoilerplate::kFirstDynamicArgumentIndex) {
@@ -181,9 +185,9 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
// function above that is used when it's guaranteed that the method has
// shared name.
-Object* GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
- Arguments& args, Object* index,
- JSObject* home_object) {
+Object GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
+ Arguments& args, Object index,
+ JSObject home_object) {
DisallowHeapAllocation no_gc;
int int_index = Smi::ToInt(index);
@@ -210,7 +214,7 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
// Clone all AccessorPairs in the dictionary.
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* value = dictionary->ValueAt(i);
+ Object value = dictionary->ValueAt(i);
if (value->IsAccessorPair()) {
Handle<AccessorPair> pair(AccessorPair::cast(value), isolate);
pair = AccessorPair::Copy(isolate, pair);
@@ -230,7 +234,7 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
int capacity = dictionary->Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object* maybe_key = dictionary->KeyAt(i);
+ Object maybe_key = dictionary->KeyAt(i);
if (!Dictionary::IsKey(roots, maybe_key)) continue;
if (install_name_accessor && *install_name_accessor &&
(maybe_key == *name_string)) {
@@ -240,7 +244,7 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
Handle<Object> value(dictionary->ValueAt(i), isolate);
if (value->IsAccessorPair()) {
Handle<AccessorPair> pair = Handle<AccessorPair>::cast(value);
- Object* tmp = pair->getter();
+ Object tmp = pair->getter();
if (tmp->IsSmi()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -293,17 +297,34 @@ bool AddDescriptorsByTemplate(
: ShallowCopyDictionaryTemplate(isolate,
elements_dictionary_template);
+ Handle<PropertyArray> property_array =
+ isolate->factory()->empty_property_array();
+ if (FLAG_track_constant_fields) {
+ // If we store constants in instances, count the number of properties
+ // that must be in the instance and create the property array to
+ // hold the constants.
+ int count = 0;
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descriptors_template->GetDetails(i);
+ if (details.location() == kDescriptor && details.kind() == kData) {
+ count++;
+ }
+ }
+ property_array = isolate->factory()->NewPropertyArray(count);
+ }
+
// Read values from |descriptors_template| and store possibly post-processed
// values into "instantiated" |descriptors| array.
+ int field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
- Object* value = descriptors_template->GetStrongValue(i);
+ Object value = descriptors_template->GetStrongValue(i);
if (value->IsAccessorPair()) {
Handle<AccessorPair> pair = AccessorPair::Copy(
isolate, handle(AccessorPair::cast(value), isolate));
value = *pair;
}
DisallowHeapAllocation no_gc;
- Name* name = descriptors_template->GetKey(i);
+ Name name = descriptors_template->GetKey(i);
DCHECK(name->IsUniqueName());
PropertyDetails details = descriptors_template->GetDetails(i);
if (details.location() == kDescriptor) {
@@ -314,12 +335,11 @@ bool AddDescriptorsByTemplate(
}
details =
details.CopyWithRepresentation(value->OptimalRepresentation());
-
} else {
DCHECK_EQ(kAccessor, details.kind());
if (value->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(value);
- Object* tmp = pair->getter();
+ AccessorPair pair = AccessorPair::cast(value);
+ Object tmp = pair->getter();
if (tmp->IsSmi()) {
pair->set_getter(GetMethodWithSharedNameAndSetHomeObject(
isolate, args, tmp, *receiver));
@@ -332,14 +352,27 @@ bool AddDescriptorsByTemplate(
}
}
} else {
- DCHECK_EQ(kField, details.location());
- DCHECK(!details.representation().IsDouble());
+ UNREACHABLE();
}
DCHECK(value->FitsRepresentation(details.representation()));
- descriptors->Set(i, name, MaybeObject::FromObject(value), details);
+ // With constant field tracking, we store the values in the instance.
+ if (FLAG_track_constant_fields && details.location() == kDescriptor &&
+ details.kind() == kData) {
+ details = PropertyDetails(details.kind(), details.attributes(), kField,
+ PropertyConstness::kConst,
+ details.representation(), field_index)
+ .set_pointer(details.pointer());
+
+ property_array->set(field_index, value);
+ field_index++;
+ descriptors->Set(i, name, MaybeObject::FromObject(FieldType::Any()),
+ details);
+ } else {
+ descriptors->Set(i, name, MaybeObject::FromObject(value), details);
+ }
}
- map->InitializeDescriptors(*descriptors,
+ map->InitializeDescriptors(isolate, *descriptors,
LayoutDescriptor::FastPointerLayout());
if (elements_dictionary->NumberOfElements() > 0) {
if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
@@ -354,6 +387,9 @@ bool AddDescriptorsByTemplate(
if (elements_dictionary->NumberOfElements() > 0) {
receiver->set_elements(*elements_dictionary);
}
+ if (property_array->length() > 0) {
+ receiver->SetProperties(*property_array);
+ }
return true;
}
@@ -382,7 +418,7 @@ bool AddDescriptorsByTemplate(
ValueKind value_kind = ComputedEntryFlags::ValueKindBits::decode(flags);
int key_index = ComputedEntryFlags::KeyIndexBits::decode(flags);
- Object* value = Smi::FromInt(key_index + 1); // Value follows name.
+ Object value = Smi::FromInt(key_index + 1); // Value follows name.
Handle<Object> key = args.at<Object>(key_index);
DCHECK(key->IsName());
@@ -437,9 +473,20 @@ Handle<JSObject> CreateClassPrototype(Isolate* isolate) {
const int kInobjectFields = 0;
- // Just use some JSObject map of certain size.
- Handle<Map> map = factory->ObjectLiteralMapFromCache(
- isolate->native_context(), kInobjectFields);
+ Handle<Map> map;
+ if (FLAG_track_constant_fields) {
+ // For constant tracking we want to avoid tha hassle of handling
+ // in-object properties, so create a map with no in-object
+ // properties.
+
+ // TODO(ishell) Support caching of zero in-object properties map
+ // by ObjectLiteralMapFromCache().
+ map = Map::Create(isolate, 0);
+ } else {
+ // Just use some JSObject map of certain size.
+ map = factory->ObjectLiteralMapFromCache(isolate->native_context(),
+ kInobjectFields);
+ }
return factory->NewJSObjectFromMap(map);
}
@@ -520,7 +567,8 @@ bool InitClassConstructor(Isolate* isolate,
Handle<NameDictionary>::cast(properties_template);
map->set_is_dictionary_map(true);
- map->InitializeDescriptors(ReadOnlyRoots(isolate).empty_descriptor_array(),
+ map->InitializeDescriptors(isolate,
+ ReadOnlyRoots(isolate).empty_descriptor_array(),
LayoutDescriptor::FastPointerLayout());
map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
@@ -586,7 +634,7 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<JSObject> prototype = CreateClassPrototype(isolate);
DCHECK_EQ(*constructor, args[ClassBoilerplate::kConstructorArgumentIndex]);
- args[ClassBoilerplate::kPrototypeArgumentIndex] = *prototype;
+ args.set_at(ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
if (!InitClassConstructor(isolate, class_boilerplate, constructor_parent,
constructor, args) ||
@@ -597,9 +645,9 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
}
if (FLAG_trace_maps) {
LOG(isolate,
- MapEvent("InitialMap", nullptr, constructor->map(),
+ MapEvent("InitialMap", Map(), constructor->map(),
"init class constructor", constructor->shared()->DebugName()));
- LOG(isolate, MapEvent("InitialMap", nullptr, prototype->map(),
+ LOG(isolate, MapEvent("InitialMap", Map(), prototype->map(),
"init class prototype"));
}
@@ -637,9 +685,9 @@ MaybeHandle<JSReceiver> GetSuperHolder(
PrototypeIterator iter(isolate, home_object);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (!proto->IsJSReceiver()) {
- MessageTemplate::Template message =
- mode == SuperMode::kLoad ? MessageTemplate::kNonObjectPropertyLoad
- : MessageTemplate::kNonObjectPropertyStore;
+ MessageTemplate message = mode == SuperMode::kLoad
+ ? MessageTemplate::kNonObjectPropertyLoad
+ : MessageTemplate::kNonObjectPropertyStore;
Handle<Name> name;
if (!maybe_name.ToHandle(&name)) {
name = isolate->factory()->Uint32ToString(index);
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 03a24139f3..2f03bb8532 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -4,6 +4,7 @@
#include "src/arguments-inl.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/heap/factory.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index bebd489d70..c6a7e7960c 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -9,7 +9,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -36,7 +36,9 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
- if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope;
+ if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
@@ -123,7 +125,7 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
}
if (function->shared()->HasAsmWasmData()) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<FixedArray> data(shared->asm_wasm_data(), isolate);
+ Handle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
isolate, shared, data, stdlib, foreign, memory);
if (!result.is_null()) {
@@ -150,7 +152,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
DCHECK(deoptimizer->compiled_code()->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(deoptimizer->compiled_code()->is_turbofanned());
DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK_NULL(isolate->context());
+ DCHECK(isolate->context().is_null());
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
@@ -253,7 +255,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
Handle<Code> result;
if (maybe_result.ToHandle(&result) &&
result->kind() == Code::OPTIMIZED_FUNCTION) {
- DeoptimizationData* data =
+ DeoptimizationData data =
DeoptimizationData::cast(result->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
@@ -289,13 +291,13 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
if (!function->IsOptimized()) {
function->set_code(function->shared()->GetCode());
}
- return nullptr;
+ return Object();
}
-static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- LanguageMode language_mode,
- int eval_scope_position, int eval_position) {
+static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ LanguageMode language_mode,
+ int eval_scope_position, int eval_position) {
Handle<Context> context(isolate->context(), isolate);
Handle<Context> native_context(context->native_context(), isolate);
@@ -326,7 +328,6 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
return *compiled;
}
-
RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 102f89ac14..5c22d280df 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -6,10 +6,10 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/date.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 4381fa6dcf..98aa3b98e7 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -6,6 +6,7 @@
#include "src/arguments-inl.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
@@ -19,6 +20,7 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
@@ -39,6 +41,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
HandleScope scope(isolate);
+
// Return value can be changed by debugger. Last set value will be used as
// return value.
ReturnValueScope result_scope(isolate->debug());
@@ -51,12 +54,19 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
handle(it.frame()->function(), isolate));
}
+ // If we are dropping frames, there is no need to get a return value or
+ // bytecode, since we will be restarting execution at a different frame.
+ if (isolate->debug()->will_restart()) {
+ return MakePair(ReadOnlyRoots(isolate).undefined_value(),
+ Smi::FromInt(static_cast<uint8_t>(Bytecode::kIllegal)));
+ }
+
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(it.frame());
- SharedFunctionInfo* shared = interpreted_frame->function()->shared();
- BytecodeArray* bytecode_array = shared->GetBytecodeArray();
+ SharedFunctionInfo shared = interpreted_frame->function()->shared();
+ BytecodeArray bytecode_array = shared->GetBytecodeArray();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
@@ -80,14 +90,13 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// We need to deserialize now to ensure we don't hit the debug break again
// after deserializing.
OperandScale operand_scale = OperandScale::kSingle;
- isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(bytecode,
- operand_scale);
+ isolate->interpreter()->GetBytecodeHandler(bytecode, operand_scale);
if (side_effect_check_failed) {
return MakePair(ReadOnlyRoots(isolate).exception(),
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
- Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
if (interrupt_object->IsException(isolate)) {
return MakePair(interrupt_object,
Smi::FromInt(static_cast<uint8_t>(bytecode)));
@@ -479,7 +488,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
Script::InitLineEnds(script);
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+ FixedArray line_ends_array = FixedArray::cast(script->line_ends());
const int line_count = line_ends_array->length();
DCHECK_LT(0, line_count);
@@ -566,8 +575,8 @@ Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
// Slow traversal over all scripts on the heap.
bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
Script::Iterator iterator(isolate);
- Script* script = nullptr;
- while ((script = iterator.Next()) != nullptr) {
+ for (Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
if (script->id() == needle) {
*result = handle(script, isolate);
return true;
@@ -737,20 +746,22 @@ RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
// coverage collection mode, which triggers deletion of all coverage infos in
// order to avoid memory leaks.
- SharedFunctionInfo* shared = function->shared();
+ SharedFunctionInfo shared = function->shared();
if (shared->HasCoverageInfo()) {
- CoverageInfo* coverage_info = shared->GetCoverageInfo();
+ CoverageInfo coverage_info = shared->GetCoverageInfo();
coverage_info->IncrementBlockCount(coverage_array_slot_index);
}
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionEntered) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
+ isolate->RunPromiseHook(PromiseHookType::kInit, promise,
+ isolate->factory()->undefined_value());
+ if (isolate->debug()->is_active()) isolate->PushPromise(promise);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -764,6 +775,14 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
isolate->OnAsyncFunctionStateChanged(promise,
debug::kAsyncFunctionFinished);
}
+ return *promise;
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index b43d91540e..b0bb297bfe 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 769ccc528b..1edbd3d5cb 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -5,8 +5,8 @@
#include "src/accessors.h"
#include "src/arguments-inl.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -83,7 +83,7 @@ RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
CONVERT_ARG_CHECKED(Object, object, 0);
if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
+ JSFunction func = JSFunction::cast(object);
func->shared()->set_native(true);
}
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index c891b6582c..e8b4025981 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -7,8 +7,10 @@
#include "src/arguments-inl.h"
#include "src/base/platform/time.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/futex-emulation.h"
#include "src/globals.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/js-array-buffer-inl.h"
// Implement Futex API for SharedArrayBuffers as defined in the
@@ -23,7 +25,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- CHECK(!sta->WasNeutered());
+ CHECK(!sta->WasDetached());
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, NumberToSize(sta->length()));
CHECK_EQ(sta->type(), kExternalInt32Array);
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 9d652599c1..f8873ff938 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
@@ -12,11 +13,43 @@
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitUncaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionEnter) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionReject) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionResolve) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ CHECK_IMPLIES(IsAsyncFunction(function->shared()->kind()),
+ IsAsyncGeneratorFunction(function->shared()->kind()));
CHECK(IsResumableFunction(function->shared()->kind()));
// Underlying function needs to have bytecode available.
@@ -53,6 +86,18 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
return generator->function();
}
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitUncaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
RUNTIME_FUNCTION(Runtime_AsyncGeneratorResolve) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
@@ -82,8 +127,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
DisallowHeapAllocation no_allocation_scope;
DCHECK_EQ(1, args.length());
- DCHECK(args[0]->IsJSAsyncGeneratorObject());
- JSAsyncGeneratorObject* generator = JSAsyncGeneratorObject::cast(args[0]);
+ CONVERT_ARG_CHECKED(JSAsyncGeneratorObject, generator, 0);
int state = generator->continuation();
DCHECK_NE(state, JSAsyncGeneratorObject::kGeneratorExecuting);
@@ -93,7 +137,7 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
// not reach a catch handler.
if (state < 1) return ReadOnlyRoots(isolate).false_value();
- SharedFunctionInfo* shared = generator->function()->shared();
+ SharedFunctionInfo shared = generator->function()->shared();
DCHECK(shared->HasBytecodeArray());
HandlerTable handler_table(shared->GetBytecodeArray());
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 8c227a1703..f8a7d5ba83 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -6,15 +6,18 @@
#include "src/api.h"
#include "src/arguments-inl.h"
+#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/js-array-inl.h"
+#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/runtime/runtime-utils.h"
@@ -31,40 +34,18 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
+RUNTIME_FUNCTION(Runtime_FatalProcessOutOfMemoryInAllocateRaw) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
- CHECK(isolate->bootstrapper()->IsActive());
- JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
- "ExportFromRuntime");
- Bootstrapper::ExportFromRuntime(isolate, container);
- JSObject::MigrateSlowToFast(container, 0, "ExportFromRuntime");
- return *container;
+ DCHECK_EQ(0, args.length());
+ isolate->heap()->FatalProcessOutOfMemory("CodeStubAssembler::AllocateRaw");
+ UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_InstallToContext) {
+RUNTIME_FUNCTION(Runtime_FatalProcessOutOfMemoryInvalidArrayLength) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- CHECK(array->HasFastElements());
- CHECK(isolate->bootstrapper()->IsActive());
- Handle<Context> native_context = isolate->native_context();
- Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()), isolate);
- int length = Smi::ToInt(array->length());
- for (int i = 0; i < length; i += 2) {
- CHECK(fixed_array->get(i)->IsString());
- Handle<String> name(String::cast(fixed_array->get(i)), isolate);
- CHECK(fixed_array->get(i + 1)->IsJSObject());
- Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)), isolate);
- int index = Context::ImportedFieldIndexForName(name);
- if (index == Context::kNotFound) {
- index = Context::IntrinsicIndexForName(name);
- }
- CHECK_NE(index, Context::kNotFound);
- native_context->set(index, *object);
- }
- return ReadOnlyRoots(isolate).undefined_value();
+ DCHECK_EQ(0, args.length());
+ isolate->heap()->FatalProcessOutOfMemory("invalid array length");
+ UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_Throw) {
@@ -92,19 +73,18 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
isolate, NewTypeError(MessageTemplate::kSymbolAsyncIteratorInvalid));
}
-#define THROW_ERROR(isolate, args, call) \
- HandleScope scope(isolate); \
- DCHECK_LE(1, args.length()); \
- CONVERT_SMI_ARG_CHECKED(message_id_smi, 0); \
- \
- Handle<Object> undefined = isolate->factory()->undefined_value(); \
- Handle<Object> arg0 = (args.length() > 1) ? args.at(1) : undefined; \
- Handle<Object> arg1 = (args.length() > 2) ? args.at(2) : undefined; \
- Handle<Object> arg2 = (args.length() > 3) ? args.at(3) : undefined; \
- \
- MessageTemplate::Template message_id = \
- static_cast<MessageTemplate::Template>(message_id_smi); \
- \
+#define THROW_ERROR(isolate, args, call) \
+ HandleScope scope(isolate); \
+ DCHECK_LE(1, args.length()); \
+ CONVERT_SMI_ARG_CHECKED(message_id_smi, 0); \
+ \
+ Handle<Object> undefined = isolate->factory()->undefined_value(); \
+ Handle<Object> arg0 = (args.length() > 1) ? args.at(1) : undefined; \
+ Handle<Object> arg1 = (args.length() > 2) ? args.at(2) : undefined; \
+ Handle<Object> arg2 = (args.length() > 3) ? args.at(3) : undefined; \
+ \
+ MessageTemplate message_id = MessageTemplateFromInt(message_id_smi); \
+ \
THROW_NEW_ERROR_RETURN_FAILURE(isolate, call(message_id, arg0, arg1, arg2));
RUNTIME_FUNCTION(Runtime_ThrowRangeError) {
@@ -182,8 +162,7 @@ RUNTIME_FUNCTION(Runtime_NewTypeError) {
DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
- auto message_template =
- static_cast<MessageTemplate::Template>(template_index);
+ MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewTypeError(message_template, arg0);
}
@@ -192,8 +171,7 @@ RUNTIME_FUNCTION(Runtime_NewReferenceError) {
DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
- auto message_template =
- static_cast<MessageTemplate::Template>(template_index);
+ MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewReferenceError(message_template, arg0);
}
@@ -202,8 +180,7 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
- auto message_template =
- static_cast<MessageTemplate::Template>(template_index);
+ MessageTemplate message_template = MessageTemplateFromInt(template_index);
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
@@ -275,7 +252,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
- CHECK(IsAligned(size, kPointerSize));
+ CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
CHECK_LE(size, kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
@@ -286,7 +263,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
- CHECK(IsAligned(size, kPointerSize));
+ CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
@@ -316,10 +293,6 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
return *result;
}
-RUNTIME_FUNCTION(Runtime_IS_VAR) {
- UNREACHABLE(); // implemented as macro in the parser
-}
-
namespace {
bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
@@ -387,8 +360,8 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
return BuildDefaultCallSite(isolate, object);
}
-MessageTemplate::Template UpdateErrorTemplate(
- CallPrinter::ErrorHint hint, MessageTemplate::Template default_id) {
+MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
+ MessageTemplate default_id) {
switch (hint) {
case CallPrinter::ErrorHint::kNormalIterator:
return MessageTemplate::kNotIterable;
@@ -414,7 +387,7 @@ MaybeHandle<Object> Runtime::ThrowIteratorError(Isolate* isolate,
Handle<Object> object) {
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, object, &hint);
- MessageTemplate::Template id = MessageTemplate::kNotIterableNoSymbolLoad;
+ MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
if (hint == CallPrinter::kNone) {
Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
@@ -440,7 +413,7 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, object, &hint);
- MessageTemplate::Template id = MessageTemplate::kCalledNonCallable;
+ MessageTemplate id = MessageTemplate::kCalledNonCallable;
id = UpdateErrorTemplate(hint, id);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
@@ -451,10 +424,98 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, object, &hint);
- MessageTemplate::Template id = MessageTemplate::kNotConstructor;
+ MessageTemplate id = MessageTemplate::kNotConstructor;
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
+namespace {
+
+// Helper visitor for ThrowPatternAssignmentNonCoercible which finds an
+// object literal (representing a destructuring assignment) at a given source
+// position.
+class PatternFinder final : public AstTraversalVisitor<PatternFinder> {
+ public:
+ PatternFinder(Isolate* isolate, Expression* root, int position)
+ : AstTraversalVisitor(isolate, root),
+ position_(position),
+ object_literal_(nullptr) {}
+
+ ObjectLiteral* object_literal() const { return object_literal_; }
+
+ private:
+ // This is required so that the overriden Visit* methods can be
+ // called by the base class (template).
+ friend class AstTraversalVisitor<PatternFinder>;
+
+ void VisitObjectLiteral(ObjectLiteral* lit) {
+ // TODO(leszeks): This could be smarter in only traversing object literals
+ // that are known to be a destructuring pattern. We could then also
+ // potentially find the corresponding assignment value and report that too.
+ if (lit->position() == position_) {
+ object_literal_ = lit;
+ return;
+ }
+ AstTraversalVisitor::VisitObjectLiteral(lit);
+ }
+
+ int position_;
+ ObjectLiteral* object_literal_;
+};
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_ThrowPatternAssignmentNonCoercible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+
+ // Find the object literal representing the destructuring assignment, so that
+ // we can try to attribute the error to a property name on it rather than to
+ // the literal itself.
+ MaybeHandle<String> maybe_property_name;
+ MessageLocation location;
+ if (ComputeLocation(isolate, &location)) {
+ ParseInfo info(isolate, location.shared());
+ if (parsing::ParseAny(&info, location.shared(), isolate)) {
+ info.ast_value_factory()->Internalize(isolate);
+
+ PatternFinder finder(isolate, info.literal(), location.start_pos());
+ finder.Run();
+ if (finder.object_literal()) {
+ for (ObjectLiteralProperty* pattern_property :
+ *finder.object_literal()->properties()) {
+ Expression* key = pattern_property->key();
+ if (key->IsPropertyName()) {
+ int pos = key->position();
+ maybe_property_name =
+ key->AsLiteral()->AsRawPropertyName()->string();
+ // Change the message location to point at the property name.
+ location = MessageLocation(location.script(), pos, pos + 1,
+ location.shared());
+ break;
+ }
+ }
+ }
+ } else {
+ isolate->clear_pending_exception();
+ }
+ }
+
+ // Create a "non-coercible" type error with a property name if one is
+ // available, otherwise create a generic one.
+ Handle<Object> error;
+ Handle<String> property_name;
+ if (maybe_property_name.ToHandle(&property_name)) {
+ error = isolate->factory()->NewTypeError(
+ MessageTemplate::kNonCoercibleWithProperty, property_name);
+ } else {
+ error = isolate->factory()->NewTypeError(MessageTemplate::kNonCoercible);
+ }
+
+ // Explicitly pass the calculated location, as we may have updated it to match
+ // the property name.
+ return isolate->Throw(*error, &location);
+}
+
RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -473,33 +534,6 @@ RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
isolate, object, ElementTypes::kAll));
}
-RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
- DCHECK(FLAG_lazy_deserialization);
-
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
-
-#ifdef DEBUG
- int builtin_id = shared->builtin_id();
- // At this point, the builtins table should definitely have DeserializeLazy
- // set at the position of the target builtin.
- CHECK_EQ(Builtins::kDeserializeLazy,
- isolate->builtins()->builtin(builtin_id)->builtin_index());
- // The DeserializeLazy builtin tail-calls the deserialized builtin. This only
- // works with JS-linkage.
- CHECK(Builtins::IsLazy(builtin_id));
- CHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
-#endif // DEBUG
-
- Code* code = Snapshot::EnsureBuiltinIsDeserialized(isolate, shared);
-
- function->set_code(code);
- return code;
-}
-
RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -531,7 +565,8 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
if (args[0]->IsString()) {
// With a string argument, the results are appended to that file.
CONVERT_ARG_HANDLE_CHECKED(String, arg0, 0);
- String::FlatContent flat = arg0->GetFlatContent();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = arg0->GetFlatContent(no_gc);
const char* filename =
reinterpret_cast<const char*>(&(flat.ToOneByteVector()[0]));
f = std::fopen(filename, "a");
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index e87feac361..ad84317415 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -5,6 +5,7 @@
#include <iomanip>
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/frames-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
@@ -20,25 +21,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
- HandleScope scope(isolate);
-
- DCHECK(FLAG_lazy_deserialization);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(bytecode_int, 0);
- CONVERT_SMI_ARG_CHECKED(operand_scale_int, 1);
-
- using interpreter::Bytecode;
- using interpreter::Bytecodes;
- using interpreter::OperandScale;
-
- Bytecode bytecode = Bytecodes::FromByte(bytecode_int);
- OperandScale operand_scale = static_cast<OperandScale>(operand_scale_int);
-
- return isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
- bytecode, operand_scale);
-}
-
#ifdef V8_TRACE_IGNITION
namespace {
@@ -97,7 +79,7 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
for (int reg_index = first_reg.index();
reg_index < first_reg.index() + range; reg_index++) {
- Object* reg_object = frame->ReadInterpreterRegister(reg_index);
+ Object reg_object = frame->ReadInterpreterRegister(reg_index);
os << " [ " << std::setw(kRegFieldWidth)
<< interpreter::Register(reg_index).ToString(
bytecode_iterator.bytecode_array()->parameter_count())
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 32e7a46b6e..37cd2a45d7 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -12,13 +12,11 @@
#include "src/api-inl.h"
#include "src/api-natives.h"
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/date.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
-#include "src/intl.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
-#include "src/objects/intl-objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -31,30 +29,6 @@
#include "src/runtime/runtime-utils.h"
#include "src/utils.h"
-#include "unicode/brkiter.h"
-#include "unicode/calendar.h"
-#include "unicode/coll.h"
-#include "unicode/curramt.h"
-#include "unicode/datefmt.h"
-#include "unicode/dcfmtsym.h"
-#include "unicode/decimfmt.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/locid.h"
-#include "unicode/numfmt.h"
-#include "unicode/numsys.h"
-#include "unicode/plurrule.h"
-#include "unicode/smpdtfmt.h"
-#include "unicode/timezone.h"
-#include "unicode/uchar.h"
-#include "unicode/ucol.h"
-#include "unicode/ucurr.h"
-#include "unicode/uloc.h"
-#include "unicode/unistr.h"
-#include "unicode/unum.h"
-#include "unicode/uversion.h"
-
-
namespace v8 {
namespace internal {
@@ -78,44 +52,12 @@ RUNTIME_FUNCTION(Runtime_FormatListToParts) {
isolate, JSListFormat::FormatListToParts(isolate, list_format, list));
}
-// ECMA 402 6.2.3
-RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, locale, 0);
-
- std::string canonicalized;
- if (!Intl::CanonicalizeLanguageTag(isolate, locale).To(&canonicalized)) {
- return ReadOnlyRoots(isolate).exception();
- }
- return *isolate->factory()->NewStringFromAsciiChecked(canonicalized.c_str());
-}
-
-RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
- Handle<JSObject> locales;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, locales, Intl::AvailableLocalesOf(isolate, service));
- return *locales;
-}
-
-RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(0, args.length());
- return *isolate->factory()->NewStringFromAsciiChecked(
- Intl::DefaultLocale(isolate).c_str());
-}
-
RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
s = String::Flatten(isolate, s);
- RETURN_RESULT_OR_FAILURE(isolate, ConvertToLower(s, isolate));
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToLower(isolate, s));
}
RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
@@ -123,25 +65,7 @@ RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
s = String::Flatten(isolate, s);
- RETURN_RESULT_OR_FAILURE(isolate, ConvertToUpper(s, isolate));
-}
-
-RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- if (isolate->serializer_enabled())
- return ReadOnlyRoots(isolate).undefined_value();
- if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
- Handle<FixedArray> date_cache_version =
- isolate->factory()->NewFixedArray(1, TENURED);
- date_cache_version->set(0, Smi::kZero);
- isolate->eternal_handles()->CreateSingleton(
- isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
- }
- Handle<FixedArray> date_cache_version =
- Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
- EternalHandles::DATE_CACHE_VERSION));
- return date_cache_version->get(0);
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::ConvertToUpper(isolate, s));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 8632388388..d5e95f31b0 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -5,8 +5,11 @@
#include "src/allocation-site-scopes-inl.h"
#include "src/arguments-inl.h"
#include "src/ast/ast.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -17,7 +20,7 @@ namespace internal {
namespace {
-bool IsUninitializedLiteralSite(Object* literal_site) {
+bool IsUninitializedLiteralSite(Object literal_site) {
return literal_site == Smi::kZero;
}
@@ -118,7 +121,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
if (copy->IsUnboxedDoubleField(index)) continue;
- Object* raw = copy->RawFastPropertyAt(index);
+ Object raw = copy->RawFastPropertyAt(index);
if (raw->IsJSObject()) {
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
@@ -135,7 +138,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
} else {
Handle<NameDictionary> dict(copy->property_dictionary(), isolate);
for (int i = 0; i < dict->Capacity(); i++) {
- Object* raw = dict->ValueAt(i);
+ Object raw = dict->ValueAt(i);
if (!raw->IsJSObject()) continue;
DCHECK(dict->KeyAt(i)->IsName());
Handle<JSObject> value(JSObject::cast(raw), isolate);
@@ -162,7 +165,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
- Object* raw = elements->get(i);
+ Object raw = elements->get(i);
if (!raw->IsJSObject()) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
@@ -177,7 +180,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
isolate);
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* raw = element_dictionary->ValueAt(i);
+ Object raw = element_dictionary->ValueAt(i);
if (!raw->IsJSObject()) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
@@ -250,7 +253,7 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
scope_site = Handle<AllocationSite>(*top(), isolate());
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating top level %s AllocationSite %p\n", "Fat",
- static_cast<void*>(*scope_site));
+ reinterpret_cast<void*>(scope_site->ptr()));
}
} else {
DCHECK(!current().is_null());
@@ -260,8 +263,9 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
"*** Creating nested %s AllocationSite (top, current, new) (%p, "
"%p, "
"%p)\n",
- "Slim", static_cast<void*>(*top()), static_cast<void*>(*current()),
- static_cast<void*>(*scope_site));
+ "Slim", reinterpret_cast<void*>(top()->ptr()),
+ reinterpret_cast<void*>(current()->ptr()),
+ reinterpret_cast<void*>(scope_site->ptr()));
}
current()->set_nested_site(*scope_site);
update_current_site(*scope_site);
@@ -277,11 +281,13 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
!scope_site.is_null() && top().is_identical_to(scope_site);
if (top_level) {
PrintF("*** Setting AllocationSite %p transition_info %p\n",
- static_cast<void*>(*scope_site), static_cast<void*>(*object));
+ reinterpret_cast<void*>(scope_site->ptr()),
+ reinterpret_cast<void*>(object->ptr()));
} else {
PrintF("*** Setting AllocationSite (%p, %p) transition_info %p\n",
- static_cast<void*>(*top()), static_cast<void*>(*scope_site),
- static_cast<void*>(*object));
+ reinterpret_cast<void*>(top()->ptr()),
+ reinterpret_cast<void*>(scope_site->ptr()),
+ reinterpret_cast<void*>(object->ptr()));
}
}
}
@@ -492,9 +498,15 @@ MaybeHandle<JSObject> CreateLiteralWithoutAllocationSite(
template <typename LiteralHelper>
MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
- Handle<FeedbackVector> vector,
+ MaybeHandle<FeedbackVector> maybe_vector,
int literals_index,
Handle<HeapObject> description, int flags) {
+ if (maybe_vector.is_null()) {
+ return CreateLiteralWithoutAllocationSite<LiteralHelper>(
+ isolate, description, flags);
+ }
+
+ Handle<FeedbackVector> vector = maybe_vector.ToHandleChecked();
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->length());
Handle<Object> literal_site(vector->Get(literals_slot)->cast<Object>(),
@@ -546,10 +558,15 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ObjectLiteralHelper>(
isolate, vector, literals_index, description, flags));
@@ -578,10 +595,15 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralWithoutAllocationSite) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
RETURN_RESULT_OR_FAILURE(
isolate, CreateLiteral<ArrayLiteralHelper>(
isolate, vector, literals_index, elements, flags));
@@ -590,17 +612,27 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 0);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
-
FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>();
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ vector = Handle<FeedbackVector>::cast(maybe_vector);
+ }
+ Handle<Object> boilerplate;
+ if (vector.is_null()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, boilerplate,
+ JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
+ return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
+ }
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(),
isolate);
- Handle<Object> boilerplate;
if (!HasBoilerplate(literal_site)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, boilerplate,
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index a8f62099a4..59f15a62bd 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -5,6 +5,7 @@
#include "src/arguments-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -62,8 +63,7 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- double value = StringToDouble(isolate, isolate->unicode_cache(), subject,
- ALLOW_TRAILING_JUNK,
+ double value = StringToDouble(isolate, subject, ALLOW_TRAILING_JUNK,
std::numeric_limits<double>::quiet_NaN());
return *isolate->factory()->NewNumber(value);
@@ -89,7 +89,7 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
CONVERT_ARG_CHECKED(Smi, x_value, 0);
CONVERT_ARG_CHECKED(Smi, y_value, 1);
- return Smi::LexicographicCompare(isolate, x_value, y_value);
+ return Object(Smi::LexicographicCompare(isolate, x_value, y_value));
}
RUNTIME_FUNCTION(Runtime_MaxSmi) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 3778e0576c..fd3d2dd168 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -4,9 +4,10 @@
#include "src/arguments-inl.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/property-descriptor-object.h"
@@ -40,11 +41,13 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
if (is_found_out) *is_found_out = it.IsFound();
if (!it.IsFound() && key->IsSymbol() &&
- Symbol::cast(*key)->is_private_field()) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
- Object);
+ Symbol::cast(*key)->is_private_name()) {
+ Handle<Object> name_string(Symbol::cast(*key)->name(), isolate);
+ DCHECK(name_string->IsString());
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldRead,
+ name_string, object),
+ Object);
}
return result;
}
@@ -59,7 +62,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// the properties, we can undo the last map transition, with a few
// prerequisites:
// (1) The receiver must be a regular object and the key a unique name.
- Map* map = receiver->map();
+ Map map = receiver->map();
if (map->IsSpecialReceiverMap()) return false;
if (!raw_key->IsUniqueName()) return false;
Handle<Name> key = Handle<Name>::cast(raw_key);
@@ -67,13 +70,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
int nof = map->NumberOfOwnDescriptors();
if (nof == 0) return false;
int descriptor = nof - 1;
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
if (descriptors->GetKey(descriptor) != *key) return false;
// (3) The property to be deleted must be deletable.
PropertyDetails details = descriptors->GetDetails(descriptor);
if (!details.IsConfigurable()) return false;
// (4) The map must have a back pointer.
- Object* backpointer = map->GetBackPointer();
+ Object backpointer = map->GetBackPointer();
if (!backpointer->IsMap()) return false;
// (5) The last transition must have been caused by adding a property
// (and not any kind of special transition).
@@ -93,7 +96,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Clear out the properties backing store.
receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
- Object* filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
+ Object filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
@@ -252,7 +255,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
if (maybe.FromJust()) return ReadOnlyRoots(isolate).true_value();
}
- Map* map = js_obj->map();
+ Map map = js_obj->map();
if (!map->has_hidden_prototype() &&
(key_is_array_index ? !map->has_indexed_interceptor()
: !map->has_named_interceptor())) {
@@ -358,11 +361,13 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
if (!success) return MaybeHandle<Object>();
if (!it.IsFound() && key->IsSymbol() &&
- Symbol::cast(*key)->is_private_field()) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
- Object);
+ Symbol::cast(*key)->is_private_name()) {
+ Handle<Object> name_string(Symbol::cast(*key)->name(), isolate);
+ DCHECK(name_string->IsString());
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldWrite,
+ name_string, object),
+ Object);
}
MAYBE_RETURN_NULL(
@@ -377,17 +382,6 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (prototype->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(prototype);
- if (!function->shared()->HasSharedName()) {
- Handle<Map> function_map(function->map(), isolate);
- if (!JSFunction::SetName(function, isolate->factory()->proto_string(),
- isolate->factory()->empty_string())) {
- return ReadOnlyRoots(isolate).exception();
- }
- CHECK_EQ(*function_map, function->map());
- }
- }
MAYBE_RETURN(JSReceiver::SetPrototype(obj, prototype, false, kThrowOnError),
ReadOnlyRoots(isolate).exception());
return *obj;
@@ -497,20 +491,20 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
DisallowHeapAllocation no_allocation;
if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
- GlobalDictionary* dictionary =
+ GlobalDictionary dictionary =
JSGlobalObject::cast(*receiver)->global_dictionary();
int entry = dictionary->FindEntry(isolate, key);
if (entry != GlobalDictionary::kNotFound) {
- PropertyCell* cell = dictionary->CellAt(entry);
+ PropertyCell cell = dictionary->CellAt(entry);
if (cell->property_details().kind() == kData) {
- Object* value = cell->value();
+ Object value = cell->value();
if (!value->IsTheHole(isolate)) return value;
// If value is the hole (meaning, absent) do the general lookup.
}
}
} else if (!receiver->HasFastProperties()) {
// Attempt dictionary lookup.
- NameDictionary* dictionary = receiver->property_dictionary();
+ NameDictionary dictionary = receiver->property_dictionary();
int entry = dictionary->FindEntry(isolate, key);
if ((entry != NameDictionary::kNotFound) &&
(dictionary->DetailsAt(entry).kind() == kData)) {
@@ -553,59 +547,6 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
isolate, Runtime::GetObjectProperty(isolate, receiver_obj, key_obj));
}
-RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-
-#ifdef DEBUG
- uint32_t index = 0;
- DCHECK(!name->ToArrayIndex(&index));
- LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
- DCHECK(!it.IsFound());
-#endif
-
- RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- object, name, value, attrs));
-}
-
-
-// Adds an element to an array.
-// This is used to create an indexed data property into an array.
-RUNTIME_FUNCTION(Runtime_AddElement) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-
- uint32_t index = 0;
- CHECK(key->ToArrayIndex(&index));
-
-#ifdef DEBUG
- LookupIterator it(isolate, object, index, object,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (maybe.IsNothing()) return ReadOnlyRoots(isolate).exception();
- DCHECK(!it.IsFound());
-
- if (object->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- DCHECK(!JSArray::WouldChangeReadOnlyLength(array, index));
- }
-#endif
-
- RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnElementIgnoreAttributes(
- object, index, value, NONE));
-}
-
RUNTIME_FUNCTION(Runtime_SetKeyedProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -663,8 +604,8 @@ RUNTIME_FUNCTION(Runtime_StoreDataPropertyInLiteral) {
namespace {
// ES6 section 12.5.4.
-Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
- Handle<Object> key, LanguageMode language_mode) {
+Object DeleteProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key, LanguageMode language_mode) {
Handle<JSReceiver> receiver;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
Object::ToObject(isolate, object));
@@ -835,21 +776,25 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(flag, 3);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 4);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 4);
CONVERT_SMI_ARG_CHECKED(index, 5);
- FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
- if (nexus.ic_state() == UNINITIALIZED) {
- if (name->IsUniqueName()) {
- nexus.ConfigureMonomorphic(name, handle(object->map(), isolate),
- MaybeObjectHandle());
- } else {
- nexus.ConfigureMegamorphic(PROPERTY);
- }
- } else if (nexus.ic_state() == MONOMORPHIC) {
- if (nexus.FindFirstMap() != object->map() ||
- nexus.GetFeedbackExtra() != MaybeObject::FromObject(*name)) {
- nexus.ConfigureMegamorphic(PROPERTY);
+ if (!maybe_vector->IsUndefined()) {
+ DCHECK(maybe_vector->IsFeedbackVector());
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
+ FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
+ if (nexus.ic_state() == UNINITIALIZED) {
+ if (name->IsUniqueName()) {
+ nexus.ConfigureMonomorphic(name, handle(object->map(), isolate),
+ MaybeObjectHandle());
+ } else {
+ nexus.ConfigureMegamorphic(PROPERTY);
+ }
+ } else if (nexus.ic_state() == MONOMORPHIC) {
+ if (nexus.FindFirstMap() != object->map() ||
+ nexus.GetFeedbackExtra() != MaybeObject::FromObject(*name)) {
+ nexus.ConfigureMegamorphic(PROPERTY);
+ }
}
}
@@ -1039,7 +984,7 @@ inline void TrySetNative(Handle<Object> maybe_func) {
inline void TrySetNativeAndLength(Handle<Object> maybe_func, int length) {
if (!maybe_func->IsJSFunction()) return;
- SharedFunctionInfo* shared = JSFunction::cast(*maybe_func)->shared();
+ SharedFunctionInfo shared = JSFunction::cast(*maybe_func)->shared();
shared->set_native(true);
if (length >= 0) {
shared->set_length(length);
@@ -1215,7 +1160,7 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
CONVERT_ARG_HANDLE_CHECKED(Symbol, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- DCHECK(key->is_private_field());
+ DCHECK(key->is_private_name());
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, o, key, LookupIterator::OWN);
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index d2d55ed753..1ce7fffd18 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/arguments.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index eeb92e9a35..cd76d5ee7d 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -7,8 +7,11 @@
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/elements.h"
+#include "src/microtask-queue.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/oddball-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -75,14 +78,14 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<CallableTask> microtask =
isolate->factory()->NewCallableTask(function, isolate->native_context());
- isolate->EnqueueMicrotask(microtask);
+ isolate->native_context()->microtask_queue()->EnqueueMicrotask(*microtask);
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
+RUNTIME_FUNCTION(Runtime_PerformMicrotaskCheckpoint) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- isolate->RunMicrotasks();
+ MicrotasksScope::PerformCheckpoint(reinterpret_cast<v8::Isolate*>(isolate));
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -106,13 +109,6 @@ RUNTIME_FUNCTION(Runtime_PromiseStatus) {
return Smi::FromInt(promise->status());
}
-RUNTIME_FUNCTION(Runtime_PromiseResult) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- return promise->result();
-}
-
RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -131,21 +127,81 @@ RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_AwaitPromisesInit) {
- DCHECK_EQ(3, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, wrapped_value, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, throwaway, 2);
- isolate->RunPromiseHook(PromiseHookType::kInit, wrapped_value, outer_promise);
- isolate->RunPromiseHook(PromiseHookType::kInit, throwaway, wrapped_value);
+namespace {
+
+Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
+ Handle<Object> value,
+ Handle<JSPromise> promise,
+ Handle<JSPromise> outer_promise,
+ Handle<JSFunction> reject_handler,
+ bool is_predicted_as_caught) {
+ // Allocate the throwaway promise and fire the appropriate init
+ // hook for the throwaway promise (passing the {promise} as its
+ // parent).
+ Handle<JSPromise> throwaway = isolate->factory()->NewJSPromiseWithoutHook();
+ isolate->RunPromiseHook(PromiseHookType::kInit, throwaway, promise);
+
// On inspector side we capture async stack trace and store it by
// outer_promise->async_task_id when async function is suspended first time.
// To use captured stack trace later throwaway promise should have the same
// async_task_id as outer_promise since we generate WillHandle and DidHandle
// events using throwaway promise.
throwaway->set_async_task_id(outer_promise->async_task_id());
- return ReadOnlyRoots(isolate).undefined_value();
+
+ // The Promise will be thrown away and not handled, but it
+ // shouldn't trigger unhandled reject events as its work is done
+ throwaway->set_has_handler(true);
+
+ // Enable proper debug support for promises.
+ if (isolate->debug()->is_active()) {
+ if (value->IsJSPromise()) {
+ Object::SetProperty(
+ isolate, reject_handler,
+ isolate->factory()->promise_forwarding_handler_symbol(),
+ isolate->factory()->true_value(), LanguageMode::kStrict)
+ .Check();
+ Handle<JSPromise>::cast(value)->set_handled_hint(is_predicted_as_caught);
+ }
+
+ // Mark the dependency to {outer_promise} in case the {throwaway}
+ // Promise is found on the Promise stack
+ Object::SetProperty(isolate, throwaway,
+ isolate->factory()->promise_handled_by_symbol(),
+ outer_promise, LanguageMode::kStrict)
+ .Check();
+ }
+
+ return throwaway;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_AwaitPromisesInit) {
+ DCHECK_EQ(5, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject_handler, 3);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_predicted_as_caught, 4);
+ return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
+ reject_handler, is_predicted_as_caught);
+}
+
+RUNTIME_FUNCTION(Runtime_AwaitPromisesInitOld) {
+ DCHECK_EQ(5, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, outer_promise, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject_handler, 3);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_predicted_as_caught, 4);
+
+ // Fire the init hook for the wrapper promise (that we created for the
+ // {value} previously).
+ isolate->RunPromiseHook(PromiseHookType::kInit, promise, outer_promise);
+ return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
+ reject_handler, is_predicted_as_caught);
}
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 69b7c9795c..f4f84ebec9 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index e66319bfb5..f472da7478 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -6,8 +6,9 @@
#include "src/arguments-inl.h"
#include "src/conversions-inl.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects/js-array-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/jsregexp.h"
@@ -38,8 +39,8 @@ uint32_t GetArgcForReplaceCallable(uint32_t num_captures,
// Looks up the capture of the given name. Returns the (1-based) numbered
// capture index or -1 on failure.
-int LookupNamedCapture(const std::function<bool(String*)>& name_matches,
- FixedArray* capture_name_map) {
+int LookupNamedCapture(const std::function<bool(String)>& name_matches,
+ FixedArray capture_name_map) {
// TODO(jgruber): Sort capture_name_map and do binary search via
// internalized strings.
@@ -51,7 +52,7 @@ int LookupNamedCapture(const std::function<bool(String*)>& name_matches,
const int name_ix = j * 2;
const int index_ix = j * 2 + 1;
- String* capture_name = String::cast(capture_name_map->get(name_ix));
+ String capture_name = String::cast(capture_name_map->get(name_ix));
if (!name_matches(capture_name)) continue;
maybe_capture_index = Smi::ToInt(capture_name_map->get(index_ix));
@@ -143,7 +144,7 @@ class CompiledReplacement {
template <typename Char>
bool ParseReplacementPattern(ZoneChunkList<ReplacementPart>* parts,
Vector<Char> characters,
- FixedArray* capture_name_map, int capture_count,
+ FixedArray capture_name_map, int capture_count,
int subject_length) {
// Equivalent to String::GetSubstitution, except that this method converts
// the replacement string into an internal representation that avoids
@@ -235,7 +236,7 @@ class CompiledReplacement {
break;
}
case '<': {
- if (capture_name_map == nullptr) {
+ if (capture_name_map.is_null()) {
i = next_index;
break;
}
@@ -265,7 +266,7 @@ class CompiledReplacement {
// Let capture be ? Get(namedCaptures, groupName).
const int capture_index = LookupNamedCapture(
- [=](String* capture_name) {
+ [=](String capture_name) {
return capture_name->IsEqualTo(requested_name);
},
capture_name_map);
@@ -315,13 +316,13 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
int subject_length) {
{
DisallowHeapAllocation no_gc;
- String::FlatContent content = replacement->GetFlatContent();
+ String::FlatContent content = replacement->GetFlatContent(no_gc);
DCHECK(content.IsFlat());
- FixedArray* capture_name_map = nullptr;
+ FixedArray capture_name_map;
if (capture_count > 0) {
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
- Object* maybe_capture_name_map = regexp->CaptureNameMap();
+ Object maybe_capture_name_map = regexp->CaptureNameMap();
if (maybe_capture_name_map->IsFixedArray()) {
capture_name_map = FixedArray::cast(maybe_capture_name_map);
}
@@ -449,13 +450,12 @@ void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
}
}
-void FindStringIndicesDispatch(Isolate* isolate, String* subject,
- String* pattern, std::vector<int>* indices,
- unsigned int limit) {
+void FindStringIndicesDispatch(Isolate* isolate, String subject, String pattern,
+ std::vector<int>* indices, unsigned int limit) {
{
DisallowHeapAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent();
- String::FlatContent pattern_content = pattern->GetFlatContent();
+ String::FlatContent subject_content = subject->GetFlatContent(no_gc);
+ String::FlatContent pattern_content = pattern->GetFlatContent(no_gc);
DCHECK(subject_content.IsFlat());
DCHECK(pattern_content.IsFlat());
if (subject_content.IsOneByte()) {
@@ -521,7 +521,7 @@ void TruncateRegexpIndicesList(Isolate* isolate) {
} // namespace
template <typename ResultSeqString>
-V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
+V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -530,7 +530,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
std::vector<int>* indices = GetRewoundRegexpIndicesList(isolate);
DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
- String* pattern =
+ String pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
int subject_len = subject->length();
int pattern_len = pattern->length();
@@ -569,17 +569,18 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, untyped_res, maybe_res);
Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(untyped_res);
+ DisallowHeapAllocation no_gc;
for (int index : *indices) {
// Copy non-matched subject content.
if (subject_pos < index) {
- String::WriteToFlat(*subject, result->GetChars() + result_pos,
+ String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
subject_pos, index);
result_pos += index - subject_pos;
}
// Replace match.
if (replacement_len > 0) {
- String::WriteToFlat(*replacement, result->GetChars() + result_pos, 0,
+ String::WriteToFlat(*replacement, result->GetChars(no_gc) + result_pos, 0,
replacement_len);
result_pos += replacement_len;
}
@@ -588,8 +589,8 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
}
// Add remaining subject content at the end.
if (subject_pos < subject_len) {
- String::WriteToFlat(*subject, result->GetChars() + result_pos, subject_pos,
- subject_len);
+ String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
+ subject_pos, subject_len);
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
@@ -601,7 +602,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
return *result;
}
-V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
+V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -692,7 +693,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
}
template <typename ResultSeqString>
-V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
+V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -738,12 +739,14 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
int prev = 0;
int position = 0;
+ DisallowHeapAllocation no_gc;
do {
start = current_match[0];
end = current_match[1];
if (prev < start) {
// Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
+ String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
+ start);
position += start - prev;
}
prev = end;
@@ -758,7 +761,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
- String::WriteToFlat(*subject, answer->GetChars() + position, prev,
+ String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
subject_length);
position += subject_length - prev;
}
@@ -782,7 +785,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
// needed.
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
- if (!heap->lo_space()->Contains(*answer)) {
+ if (!heap->IsLargeObject(*answer)) {
heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
}
return *answer;
@@ -790,7 +793,7 @@ V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
namespace {
-Object* StringReplaceGlobalRegExpWithStringHelper(
+Object StringReplaceGlobalRegExpWithStringHelper(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
@@ -828,7 +831,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
CHECK_LT(0, pattern_length);
if (limit == 0xFFFFFFFFu) {
- FixedArray* last_match_cache_unused;
+ FixedArray last_match_cache_unused;
Handle<Object> cached_answer(
RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
&last_match_cache_unused,
@@ -937,7 +940,7 @@ class MatchInfoBackedMatch : public String::Match {
subject_ = String::Flatten(isolate, subject);
if (regexp->TypeTag() == JSRegExp::IRREGEXP) {
- Object* o = regexp->CaptureNameMap();
+ Object o = regexp->CaptureNameMap();
has_named_captures_ = o->IsFixedArray();
if (has_named_captures_) {
capture_name_map_ = handle(FixedArray::cast(o), isolate);
@@ -979,7 +982,7 @@ class MatchInfoBackedMatch : public String::Match {
CaptureState* state) override {
DCHECK(has_named_captures_);
const int capture_index = LookupNamedCapture(
- [=](String* capture_name) { return capture_name->Equals(*name); },
+ [=](String capture_name) { return capture_name->Equals(*name); },
*capture_name_map_);
if (capture_index == -1) {
@@ -1097,7 +1100,7 @@ class VectorBackedMatch : public String::Match {
// RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo).
Handle<JSObject> ConstructNamedCaptureGroupsObject(
Isolate* isolate, Handle<FixedArray> capture_map,
- const std::function<Object*(int)>& f_get_capture) {
+ const std::function<Object(int)>& f_get_capture) {
Handle<JSObject> groups = isolate->factory()->NewJSObjectWithNullProto();
const int capture_count = capture_map->length() >> 1;
@@ -1122,10 +1125,10 @@ Handle<JSObject> ConstructNamedCaptureGroupsObject(
// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
// separate last match info. See comment on that function.
template <bool has_capture>
-static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<RegExpMatchInfo> last_match_array,
- Handle<JSArray> result_array) {
+static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<RegExpMatchInfo> last_match_array,
+ Handle<JSArray> result_array) {
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
DCHECK(subject->IsFlat());
@@ -1136,8 +1139,8 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
static const int kMinLengthToCache = 0x1000;
if (subject_length > kMinLengthToCache) {
- FixedArray* last_match_cache;
- Object* cached_answer = RegExpResultsCache::Lookup(
+ FixedArray last_match_cache;
+ Object cached_answer = RegExpResultsCache::Lookup(
isolate->heap(), *subject, regexp->data(), &last_match_cache,
RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
if (cached_answer->IsFixedArray()) {
@@ -1373,19 +1376,19 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
if (replace->length() == 0) {
if (string->HasOnlyOneByteChars()) {
- Object* result =
+ Object result =
StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, string, regexp, last_match_info);
return handle(String::cast(result), isolate);
} else {
- Object* result =
+ Object result =
StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
isolate, string, regexp, last_match_info);
return handle(String::cast(result), isolate);
}
}
- Object* result = StringReplaceGlobalRegExpWithString(
+ Object result = StringReplaceGlobalRegExpWithString(
isolate, string, regexp, replace, last_match_info);
if (result->IsString()) {
return handle(String::cast(result), isolate);
@@ -1486,7 +1489,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
// The existence of capture groups implies IRREGEXP kind.
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
- Object* maybe_capture_map = regexp->CaptureNameMap();
+ Object maybe_capture_map = regexp->CaptureNameMap();
if (maybe_capture_map->IsFixedArray()) {
has_named_captures = true;
capture_map = handle(FixedArray::cast(maybe_capture_map), isolate);
@@ -1572,8 +1575,6 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- DCHECK(args[1]->IsString());
-
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, limit_obj, 2);
@@ -1736,15 +1737,19 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
string = String::Flatten(isolate, string);
- // Fast-path for unmodified JSRegExps.
+ const bool functional_replace = replace_obj->IsCallable();
+
+ // Fast-path for unmodified JSRegExps (and non-functional replace).
if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
+ // We should never get here with functional replace because unmodified
+ // regexp and functional replace should be fully handled in CSA code.
+ CHECK(!functional_replace);
RETURN_RESULT_OR_FAILURE(
isolate, RegExpReplace(isolate, Handle<JSRegExp>::cast(recv), string,
replace_obj));
}
const uint32_t length = string->length();
- const bool functional_replace = replace_obj->IsCallable();
Handle<String> replace;
if (!functional_replace) {
@@ -1891,7 +1896,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
DCHECK(!functional_replace);
if (!groups_obj->IsUndefined(isolate)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, groups_obj, JSReceiver::ToObject(isolate, groups_obj));
+ isolate, groups_obj, Object::ToObject(isolate, groups_obj));
}
VectorBackedMatch m(isolate, string, match, position, &captures,
groups_obj);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 7a24b066c1..8227242940 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -8,11 +8,14 @@
#include "src/arguments-inl.h"
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
+#include "src/objects/heap-object-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/smi.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -28,8 +31,8 @@ namespace {
enum class RedeclarationType { kSyntaxError = 0, kTypeError = 1 };
-Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
- RedeclarationType redeclaration_type) {
+Object ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
+ RedeclarationType redeclaration_type) {
HandleScope scope(isolate);
if (redeclaration_type == RedeclarationType::kSyntaxError) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -40,9 +43,8 @@ Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
}
}
-
// May throw a RedeclarationError.
-Object* DeclareGlobal(
+Object DeclareGlobal(
Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
Handle<Object> value, PropertyAttributes attr, bool is_var,
bool is_function_declaration, RedeclarationType redeclaration_type,
@@ -130,8 +132,8 @@ Object* DeclareGlobal(
return ReadOnlyRoots(isolate).undefined_value();
}
-Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
- int flags, Handle<FeedbackVector> feedback_vector) {
+Object DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
+ int flags, Handle<FeedbackVector> feedback_vector) {
HandleScope scope(isolate);
Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context(), isolate);
@@ -147,20 +149,27 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
bool is_var = initial_value->IsUndefined(isolate);
bool is_function = initial_value->IsSharedFunctionInfo();
- DCHECK_EQ(1, BoolToInt(is_var) + BoolToInt(is_function));
+ DCHECK_NE(is_var, is_function);
Handle<Object> value;
if (is_function) {
- DCHECK(possibly_feedback_cell_slot->IsSmi());
+ // If feedback vector was not allocated for this function, then we don't
+ // have any information about number of closures. Use NoFeedbackCell to
+ // indicate that.
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->no_feedback_cell();
+ if (!feedback_vector.is_null()) {
+ DCHECK(possibly_feedback_cell_slot->IsSmi());
+ FeedbackSlot feedback_cells_slot(
+ Smi::ToInt(*possibly_feedback_cell_slot));
+ feedback_cell = Handle<FeedbackCell>(
+ FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)
+ ->GetHeapObjectAssumeStrong()),
+ isolate);
+ }
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
- FeedbackSlot feedback_cells_slot(
- Smi::ToInt(*possibly_feedback_cell_slot));
- Handle<FeedbackCell> feedback_cell(
- FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)
- ->GetHeapObjectAssumeStrong()),
- isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, context, feedback_cell, TENURED);
@@ -179,10 +188,10 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
// ES#sec-globaldeclarationinstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
- Object* result = DeclareGlobal(
- isolate, global, name, value, static_cast<PropertyAttributes>(attr),
- is_var, is_function, RedeclarationType::kSyntaxError, feedback_vector,
- slot);
+ Object result = DeclareGlobal(isolate, global, name, value,
+ static_cast<PropertyAttributes>(attr), is_var,
+ is_function, RedeclarationType::kSyntaxError,
+ feedback_vector, slot);
if (isolate->has_pending_exception()) return result;
});
@@ -199,20 +208,23 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
CONVERT_SMI_ARG_CHECKED(flags, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
- Handle<FeedbackVector> feedback_vector(closure->feedback_vector(), isolate);
+ Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>();
+ if (closure->has_feedback_vector()) {
+ feedback_vector =
+ Handle<FeedbackVector>(closure->feedback_vector(), isolate);
+ }
return DeclareGlobals(isolate, declarations, flags, feedback_vector);
}
namespace {
-Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
- Handle<Object> value) {
+Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
+ Handle<Object> value) {
// Declarations are always made in a function, native, eval, or script
// context, or a declaration block scope. Since this is called from eval, the
// context passed is the context of the caller, which may be some nested
// context and not the declaration context.
- Handle<Context> context_arg(isolate->context(), isolate);
- Handle<Context> context(context_arg->declaration_context(), isolate);
+ Handle<Context> context(isolate->context()->declaration_context(), isolate);
DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
context->IsScriptContext() || context->IsEvalContext() ||
@@ -228,23 +240,9 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
InitializationFlag init_flag;
VariableMode mode;
- // Check for a conflict with a lexically scoped variable
- const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
- FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
- context_arg->Lookup(name, lookup_flags, &index, &attributes, &init_flag,
- &mode);
- if (attributes != ABSENT && IsLexicalVariableMode(mode)) {
- // ES#sec-evaldeclarationinstantiation 5.a.i.1:
- // If varEnvRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
- // exception.
- // ES#sec-evaldeclarationinstantiation 5.d.ii.2.a.i:
- // Throw a SyntaxError exception.
- return ThrowRedeclarationError(isolate, name,
- RedeclarationType::kSyntaxError);
- }
-
- Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
- &attributes, &init_flag, &mode);
+ Handle<Object> holder =
+ Context::Lookup(context, name, DONT_FOLLOW_CHAINS, &index, &attributes,
+ &init_flag, &mode);
DCHECK(holder.is_null() || !holder->IsModule());
DCHECK(!isolate->has_pending_exception());
@@ -257,9 +255,9 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
value, NONE, is_var, is_function,
RedeclarationType::kTypeError);
}
- if (context_arg->extension()->IsJSGlobalObject()) {
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(context_arg->extension()), isolate);
+ if (context->extension()->IsJSGlobalObject()) {
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(context->extension()),
+ isolate);
return DeclareGlobal(isolate, global, name, value, NONE, is_var,
is_function, RedeclarationType::kTypeError);
} else if (context->IsScriptContext()) {
@@ -287,7 +285,7 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
} else if (context->has_extension()) {
object = handle(context->extension_object(), isolate);
- DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject());
+ DCHECK(object->IsJSContextExtensionObject());
} else {
// Sloppy varblock and function contexts might not have an extension object
// yet. Sloppy eval will never have an extension object, as vars are hoisted
@@ -334,7 +332,7 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- std::vector<SharedFunctionInfo*> functions;
+ std::vector<SharedFunctionInfo> functions;
frame->GetFunctions(&functions);
if (functions.size() > 1) {
int inlined_jsframe_index = static_cast<int>(functions.size()) - 1;
@@ -444,7 +442,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
int parameter = scope_info->ContextLocalParameterNumber(i);
if (parameter >= mapped_count) continue;
arguments->set_the_hole(parameter);
- Smi* slot = Smi::FromInt(Context::MIN_CONTEXT_SLOTS + i);
+ Smi slot = Smi::FromInt(Context::MIN_CONTEXT_SLOTS + i);
parameter_map->set(parameter + 2, slot);
}
} else {
@@ -464,7 +462,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
class HandleArguments {
public:
explicit HandleArguments(Handle<Object>* array) : array_(array) {}
- Object* operator[](int index) { return *array_[index]; }
+ Object operator[](int index) { return *array_[index]; }
private:
Handle<Object>* array_;
@@ -472,11 +470,13 @@ class HandleArguments {
class ParameterArguments {
public:
- explicit ParameterArguments(Object** parameters) : parameters_(parameters) {}
- Object*& operator[](int index) { return *(parameters_ - index - 1); }
+ explicit ParameterArguments(Address parameters) : parameters_(parameters) {}
+ Object operator[](int index) {
+ return *FullObjectSlot(parameters_ - (index + 1) * kSystemPointerSize);
+ }
private:
- Object** parameters_;
+ Address parameters_;
};
} // namespace
@@ -537,7 +537,7 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
DONT_INITIALIZE_ARRAY_ELEMENTS);
{
DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(result->elements());
+ FixedArray elements = FixedArray::cast(result->elements());
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < num_elements; i++) {
elements->set(i, *arguments[i + start_index], mode);
@@ -571,8 +571,8 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
fp = adaptor_frame->fp();
}
- Object** parameters = reinterpret_cast<Object**>(
- fp + argc * kPointerSize + StandardFrameConstants::kCallerSPOffset);
+ Address parameters =
+ fp + argc * kSystemPointerSize + StandardFrameConstants::kCallerSPOffset;
ParameterArguments argument_getter(parameters);
return *NewSloppyArguments(isolate, callee, argument_getter, argc);
}
@@ -580,7 +580,10 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Object** frame = reinterpret_cast<Object**>(args[0]);
+ // Note that args[0] is the address of an array of full object pointers
+ // (a.k.a. FullObjectSlot), which looks like a Smi because it's aligned.
+ DCHECK(args[0].IsSmi());
+ FullObjectSlot frame(args[0]->ptr());
CONVERT_SMI_ARG_CHECKED(length, 1);
CONVERT_SMI_ARG_CHECKED(mapped_count, 2);
Handle<FixedArray> result =
@@ -593,7 +596,7 @@ RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
result->set_the_hole(isolate, index);
}
for (int index = number_of_holes; index < length; ++index) {
- result->set(index, frame[offset - index], mode);
+ result->set(index, *(frame + (offset - index)), mode);
}
return *result;
}
@@ -624,9 +627,9 @@ RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
return *function;
}
-static Object* FindNameClash(Isolate* isolate, Handle<ScopeInfo> scope_info,
- Handle<JSGlobalObject> global_object,
- Handle<ScriptContextTable> script_context) {
+static Object FindNameClash(Isolate* isolate, Handle<ScopeInfo> scope_info,
+ Handle<JSGlobalObject> global_object,
+ Handle<ScriptContextTable> script_context) {
for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
Handle<String> name(scope_info->ContextLocalName(var), isolate);
VariableMode mode = scope_info->ContextLocalMode(var);
@@ -662,7 +665,6 @@ static Object* FindNameClash(Isolate* isolate, Handle<ScopeInfo> scope_info,
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_NewScriptContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -675,7 +677,7 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
Handle<ScriptContextTable> script_context_table(
native_context->script_context_table(), isolate);
- Object* name_clash_result =
+ Object name_clash_result =
FindNameClash(isolate, scope_info, global_object, script_context_table);
if (isolate->has_pending_exception()) return name_clash_result;
@@ -760,8 +762,9 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
PropertyAttributes attributes;
InitializationFlag flag;
VariableMode mode;
- Handle<Object> holder = isolate->context()->Lookup(
- name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
+ Handle<Context> context(isolate->context(), isolate);
+ Handle<Object> holder = Context::Lookup(context, name, FOLLOW_CHAINS, &index,
+ &attributes, &flag, &mode);
// If the slot was not found the result is true.
if (holder.is_null()) {
@@ -796,8 +799,9 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
PropertyAttributes attributes;
InitializationFlag flag;
VariableMode mode;
- Handle<Object> holder = isolate->context()->Lookup(
- name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
+ Handle<Context> context(isolate->context(), isolate);
+ Handle<Object> holder = Context::Lookup(context, name, FOLLOW_CHAINS, &index,
+ &attributes, &flag, &mode);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
if (!holder.is_null() && holder->IsModule()) {
@@ -880,7 +884,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
Handle<Object> receiver;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, value, LoadLookupSlot(isolate, name, kThrowOnError, &receiver),
- MakePair(ReadOnlyRoots(isolate).exception(), nullptr));
+ MakePair(ReadOnlyRoots(isolate).exception(), Object()));
return MakePair(*value, *receiver);
}
@@ -888,19 +892,17 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
namespace {
MaybeHandle<Object> StoreLookupSlot(
- Isolate* isolate, Handle<String> name, Handle<Object> value,
- LanguageMode language_mode,
+ Isolate* isolate, Handle<Context> context, Handle<String> name,
+ Handle<Object> value, LanguageMode language_mode,
ContextLookupFlags context_lookup_flags = FOLLOW_CHAINS) {
- Handle<Context> context(isolate->context(), isolate);
-
int index;
PropertyAttributes attributes;
InitializationFlag flag;
VariableMode mode;
bool is_sloppy_function_name;
Handle<Object> holder =
- context->Lookup(name, context_lookup_flags, &index, &attributes, &flag,
- &mode, &is_sloppy_function_name);
+ Context::Lookup(context, name, context_lookup_flags, &index, &attributes,
+ &flag, &mode, &is_sloppy_function_name);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
@@ -916,7 +918,7 @@ MaybeHandle<Object> StoreLookupSlot(
// The property was found in a context slot.
if (index != Context::kNotFound) {
if (flag == kNeedsInitialization &&
- Handle<Context>::cast(holder)->is_the_hole(isolate, index)) {
+ Handle<Context>::cast(holder)->get(index)->IsTheHole(isolate)) {
THROW_NEW_ERROR(isolate,
NewReferenceError(MessageTemplate::kNotDefined, name),
Object);
@@ -960,32 +962,37 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<Context> context(isolate->context(), isolate);
RETURN_RESULT_OR_FAILURE(
- isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kSloppy));
+ isolate,
+ StoreLookupSlot(isolate, context, name, value, LanguageMode::kSloppy));
}
-// Store into a dynamic context for sloppy-mode block-scoped function hoisting
-// which leaks out of an eval. In particular, with-scopes are be skipped to
-// reach the appropriate var-like declaration.
-RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
- FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
+ Handle<Context> context(isolate->context(), isolate);
RETURN_RESULT_OR_FAILURE(
- isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kSloppy,
- lookup_flags));
+ isolate,
+ StoreLookupSlot(isolate, context, name, value, LanguageMode::kStrict));
}
-RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
+// Store into a dynamic declaration context for sloppy-mode block-scoped
+// function hoisting which leaks out of an eval.
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ const ContextLookupFlags lookup_flags =
+ static_cast<ContextLookupFlags>(DONT_FOLLOW_CHAINS);
+ Handle<Context> declaration_context(isolate->context()->declaration_context(),
+ isolate);
RETURN_RESULT_OR_FAILURE(
- isolate, StoreLookupSlot(isolate, name, value, LanguageMode::kStrict));
+ isolate, StoreLookupSlot(isolate, declaration_context, name, value,
+ LanguageMode::kSloppy, lookup_flags));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index d57959687c..9a537e7fa2 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -7,6 +7,8 @@
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
@@ -73,7 +75,7 @@ MaybeHandle<String> StringReplaceOneCharWithString(
}
recursion_limit--;
if (subject->IsConsString()) {
- ConsString* cons = ConsString::cast(*subject);
+ ConsString cons = ConsString::cast(*subject);
Handle<String> first = handle(cons->first(), isolate);
Handle<String> second = handle(cons->second(), isolate);
Handle<String> new_first;
@@ -299,7 +301,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
{
DisallowHeapAllocation no_gc;
- FixedArray* fixed_array = FixedArray::cast(array->elements());
+ FixedArray fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
array_length = fixed_array->length();
}
@@ -307,7 +309,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
if (array_length == 0) {
return ReadOnlyRoots(isolate).empty_string();
} else if (array_length == 1) {
- Object* first = fixed_array->get(0);
+ Object first = fixed_array->get(0);
if (first->IsString()) return first;
}
length = StringBuilderConcatLength(special_length, fixed_array,
@@ -325,7 +327,8 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
Handle<SeqOneByteString> answer;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, answer, isolate->factory()->NewRawOneByteString(length));
- StringBuilderConcatHelper(*special, answer->GetChars(),
+ DisallowHeapAllocation no_gc;
+ StringBuilderConcatHelper(*special, answer->GetChars(no_gc),
FixedArray::cast(array->elements()),
array_length);
return *answer;
@@ -333,13 +336,15 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
Handle<SeqTwoByteString> answer;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, answer, isolate->factory()->NewRawTwoByteString(length));
- StringBuilderConcatHelper(*special, answer->GetChars(),
+ DisallowHeapAllocation no_gc;
+ StringBuilderConcatHelper(*special, answer->GetChars(no_gc),
FixedArray::cast(array->elements()),
array_length);
return *answer;
}
}
+// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -360,7 +365,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
if (array_length == 0) {
return ReadOnlyRoots(isolate).empty_string();
} else if (array_length == 1) {
- Object* first = fixed_array->get(0);
+ Object first = fixed_array->get(0);
CHECK(first->IsString());
return first;
}
@@ -374,9 +379,9 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
- Object* element_obj = fixed_array->get(i);
+ Object element_obj = fixed_array->get(i);
CHECK(element_obj->IsString());
- String* element = String::cast(element_obj);
+ String element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
STATIC_ASSERT(String::kMaxLength < kMaxInt);
@@ -392,14 +397,14 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
DisallowHeapAllocation no_gc;
- uc16* sink = answer->GetChars();
+ uc16* sink = answer->GetChars(no_gc);
#ifdef DEBUG
uc16* end = sink + length;
#endif
CHECK(fixed_array->get(0)->IsString());
- String* first = String::cast(fixed_array->get(0));
- String* separator_raw = *separator;
+ String first = String::cast(fixed_array->get(0));
+ String separator_raw = *separator;
int first_length = first->length();
String::WriteToFlat(first, sink, 0, first_length);
@@ -411,7 +416,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
sink += separator_length;
CHECK(fixed_array->get(i)->IsString());
- String* element = String::cast(fixed_array->get(i));
+ String element = String::cast(fixed_array->get(i));
int element_length = element->length();
DCHECK(sink + element_length <= end);
String::WriteToFlat(element, sink, 0, element_length);
@@ -425,7 +430,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
}
template <typename sinkchar>
-static void WriteRepeatToFlat(String* src, Vector<sinkchar> buffer, int cursor,
+static void WriteRepeatToFlat(String src, Vector<sinkchar> buffer, int cursor,
int repeat, int length) {
if (repeat == 0) return;
@@ -444,11 +449,12 @@ static void WriteRepeatToFlat(String* src, Vector<sinkchar> buffer, int cursor,
}
}
+// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
template <typename Char>
-static void JoinSparseArrayWithSeparator(FixedArray* elements,
+static void JoinSparseArrayWithSeparator(FixedArray elements,
int elements_length,
uint32_t array_length,
- String* separator,
+ String separator,
Vector<Char> buffer) {
DisallowHeapAllocation no_gc;
int previous_separator_position = 0;
@@ -457,7 +463,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
int cursor = 0;
for (int i = 0; i < elements_length; i += 2) {
int position = NumberToInt32(elements->get(i));
- String* string = String::cast(elements->get(i + 1));
+ String string = String::cast(elements->get(i + 1));
int string_length = string->length();
if (string->length() > 0) {
int repeat = position - previous_separator_position;
@@ -480,6 +486,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
DCHECK(cursor <= buffer.length());
}
+// TODO(pwong): Remove once TypedArray.prototype.join() is ported to Torque.
RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -500,11 +507,11 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
CHECK(elements_length <= elements_array->elements()->length());
CHECK_EQ(elements_length & 1, 0); // Even length.
- FixedArray* elements = FixedArray::cast(elements_array->elements());
+ FixedArray elements = FixedArray::cast(elements_array->elements());
{
DisallowHeapAllocation no_gc;
for (int i = 0; i < elements_length; i += 2) {
- String* string = String::cast(elements->get(i + 1));
+ String string = String::cast(elements->get(i + 1));
int length = string->length();
if (is_one_byte && !string->IsOneByteRepresentation()) {
is_one_byte = false;
@@ -547,19 +554,21 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
Handle<SeqOneByteString> result = isolate->factory()
->NewRawOneByteString(string_length)
.ToHandleChecked();
+ DisallowHeapAllocation no_gc;
JoinSparseArrayWithSeparator<uint8_t>(
FixedArray::cast(elements_array->elements()), elements_length,
array_length, *separator,
- Vector<uint8_t>(result->GetChars(), string_length));
+ Vector<uint8_t>(result->GetChars(no_gc), string_length));
return *result;
} else {
Handle<SeqTwoByteString> result = isolate->factory()
->NewRawTwoByteString(string_length)
.ToHandleChecked();
+ DisallowHeapAllocation no_gc;
JoinSparseArrayWithSeparator<uc16>(
FixedArray::cast(elements_array->elements()), elements_length,
array_length, *separator,
- Vector<uc16>(result->GetChars(), string_length));
+ Vector<uc16>(result->GetChars(no_gc), string_length));
return *result;
}
}
@@ -569,25 +578,23 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
- FixedArray* elements, int length) {
+ FixedArray elements, int length) {
DisallowHeapAllocation no_gc;
- FixedArray* one_byte_cache = heap->single_character_string_cache();
- Object* undefined = ReadOnlyRoots(heap).undefined_value();
+ FixedArray one_byte_cache = heap->single_character_string_cache();
+ Object undefined = ReadOnlyRoots(heap).undefined_value();
int i;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (i = 0; i < length; ++i) {
- Object* value = one_byte_cache->get(chars[i]);
+ Object value = one_byte_cache->get(chars[i]);
if (value == undefined) break;
elements->set(i, value, mode);
}
if (i < length) {
- static_assert(Smi::kZero == nullptr,
- "Can use memset since Smi::kZero is 0");
- memset(elements->data_start() + i, 0, kPointerSize * (length - i));
+ MemsetTagged(elements->RawFieldOfElementAt(i), Smi::kZero, length - i);
}
#ifdef DEBUG
for (int j = 0; j < length; ++j) {
- Object* element = elements->get(j);
+ Object element = elements->get(j);
DCHECK(element == Smi::kZero ||
(element->IsString() && String::cast(element)->LooksValid()));
}
@@ -613,7 +620,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
elements = isolate->factory()->NewUninitializedFixedArray(length);
DisallowHeapAllocation no_gc;
- String::FlatContent content = s->GetFlatContent();
+ String::FlatContent content = s->GetFlatContent(no_gc);
if (content.IsOneByte()) {
Vector<const uint8_t> chars = content.ToOneByteVector();
// Note, this will initialize all elements (not only the prefix)
@@ -621,8 +628,8 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.start(),
*elements, length);
} else {
- MemsetPointer(elements->data_start(),
- ReadOnlyRoots(isolate).undefined_value(), length);
+ MemsetTagged(elements->data_start(),
+ ReadOnlyRoots(isolate).undefined_value(), length);
}
} else {
elements = isolate->factory()->NewFixedArray(length);
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 0c9ea75d5d..8cd48505d2 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -23,10 +24,11 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
return *symbol;
}
-RUNTIME_FUNCTION(Runtime_CreatePrivateFieldSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateNameSymbol) {
HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Handle<Symbol> symbol = isolate->factory()->NewPrivateFieldSymbol();
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateNameSymbol(name);
return *symbol;
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index bcc36e9d87..40ca5de401 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -13,9 +13,13 @@
#include "src/base/platform/mutex.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/smi.h"
+#include "src/ostreams.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
@@ -34,20 +38,20 @@ struct WasmCompileControls {
uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
bool AllowAnySizeForAsync = true;
};
+using WasmCompileControlsMap = std::map<v8::Isolate*, WasmCompileControls>;
// We need per-isolate controls, because we sometimes run tests in multiple
// isolates concurrently. Methods need to hold the accompanying mutex on access.
// To avoid upsetting the static initializer count, we lazy initialize this.
-base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
- g_PerIsolateWasmControls = LAZY_INSTANCE_INITIALIZER;
-base::LazyInstance<base::Mutex>::type g_PerIsolateWasmControlsMutex =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(WasmCompileControlsMap,
+ GetPerIsolateWasmControls);
+base::LazyMutex g_PerIsolateWasmControlsMutex = LAZY_MUTEX_INITIALIZER;
bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
bool is_async) {
- base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
- DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
- const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
+ const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
return (is_async && ctrls.AllowAnySizeForAsync) ||
(value->IsArrayBuffer() &&
v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
@@ -58,16 +62,17 @@ bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
v8::Local<v8::Value> module_or_bytes,
bool is_async) {
- base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
- DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
- const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
+ const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
if (is_async && ctrls.AllowAnySizeForAsync) return true;
if (!module_or_bytes->IsWebAssemblyCompiledModule()) {
return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
}
- v8::Local<v8::WasmCompiledModule> module =
- v8::Local<v8::WasmCompiledModule>::Cast(module_or_bytes);
- return static_cast<uint32_t>(module->GetWasmWireBytesRef().size) <=
+ v8::Local<v8::WasmModuleObject> module =
+ v8::Local<v8::WasmModuleObject>::Cast(module_or_bytes);
+ return static_cast<uint32_t>(
+ module->GetCompiledModule().GetWireBytesRef().size()) <=
ctrls.MaxWasmBufferSize;
}
@@ -156,7 +161,6 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -176,7 +180,6 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -187,6 +190,11 @@ RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
#endif
}
+RUNTIME_FUNCTION(Runtime_ICsAreEnabled) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(FLAG_use_ic);
+}
RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
SealHandleScope shs(isolate);
@@ -219,8 +227,16 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
// If function isn't compiled, compile it now.
- if (!function->shared()->is_compiled() &&
- !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+ IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ if (!is_compiled_scope.is_compiled() &&
+ !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ if (function->shared()->optimization_disabled() &&
+ function->shared()->disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -237,8 +253,13 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
+ // Ignore invalid inputs produced by fuzzers.
+ CONVERT_ARG_HANDLE_CHECKED(Object, type, 1);
+ if (!type->IsString()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ if (Handle<String>::cast(type)->IsOneByteEqualTo(
+ StaticCharVector("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
concurrency_mode = ConcurrencyMode::kConcurrent;
}
@@ -282,6 +303,12 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// If the function is already optimized, just return.
if (function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
+ if (function->shared()->optimization_disabled() &&
+ function->shared()->disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
if (!function->HasOptimizedCode()) {
@@ -296,7 +323,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->type() == StackFrame::INTERPRETED) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
- it.frame(), AbstractCode::kMaxLoopNestingMarker);
+ InterpretedFrame::cast(it.frame()),
+ AbstractCode::kMaxLoopNestingMarker);
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -313,8 +341,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->DisableOptimization(
- BailoutReason::kOptimizationDisabledForTest);
+ function->shared()->DisableOptimization(BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -322,6 +349,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
DCHECK(args.length() == 1 || args.length() == 2);
int status = 0;
+ if (FLAG_lite_mode) {
+ status |= static_cast<int>(OptimizationStatus::kLiteMode);
+ }
if (!isolate->use_optimizer()) {
status |= static_cast<int>(OptimizationStatus::kNeverOptimize);
}
@@ -347,7 +377,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (!sync_object->IsString())
return ReadOnlyRoots(isolate).undefined_value();
Handle<String> sync = Handle<String>::cast(sync_object);
- if (sync->IsOneByteEqualTo(STATIC_CHAR_VECTOR("no sync"))) {
+ if (sync->IsOneByteEqualTo(StaticCharVector("no sync"))) {
sync_with_compiler_thread = false;
}
}
@@ -433,7 +463,7 @@ RUNTIME_FUNCTION(Runtime_GetUndetectable) {
desc->SetCallAsFunctionHandler(ReturnThis);
Local<v8::Object> obj;
if (!desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocal(&obj)) {
- return nullptr;
+ return Object();
}
return *Utils::OpenHandle(*obj);
}
@@ -481,8 +511,8 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
CHECK_EQ(args.length(), 2);
CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
- base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
- WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
+ base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
+ WasmCompileControls& ctrl = (*GetPerIsolateWasmControls())[v8_isolate];
ctrl.AllowAnySizeForAsync = allow_async;
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
@@ -533,27 +563,17 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- // Hack: The argument is passed as Object* but here it's really a
- // MaybeObject*.
- MaybeObject* maybe_object = reinterpret_cast<MaybeObject*>(args[0]);
+ MaybeObject maybe_object(*args.address_of_arg_at(0));
StdoutStream os;
if (maybe_object->IsCleared()) {
os << "[weak cleared]";
} else {
- Object* object;
- HeapObject* heap_object;
- bool weak = false;
- if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
- weak = true;
- object = heap_object;
- } else {
- // Strong reference or SMI.
- object = maybe_object->cast<Object>();
- }
+ Object object = maybe_object.GetHeapObjectOrSmi();
+ bool weak = maybe_object.IsWeak();
#ifdef DEBUG
- if (object->IsString() && isolate->context() != nullptr) {
+ if (object->IsString() && !isolate->context().is_null()) {
DCHECK(!weak);
// If we have a string, assume it's a code "marker"
// and print some interesting cpu debugging info.
@@ -625,7 +645,7 @@ RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
const char track_ephemeron_path[] = "track-ephemeron-path";
- if (str->IsOneByteEqualTo(STATIC_CHAR_VECTOR(track_ephemeron_path))) {
+ if (str->IsOneByteEqualTo(StaticCharVector(track_ephemeron_path))) {
option = RetainingPathOption::kTrackEphemeronPath;
} else if (str->length() != 0) {
PrintF("Unexpected second argument of DebugTrackRetainingPath.\n");
@@ -695,7 +715,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
if (FLAG_disable_abortjs) {
base::OS::PrintError("[disabled] abort: %s\n", message->ToCString().get());
- return nullptr;
+ return Object();
}
base::OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
@@ -710,8 +730,9 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
DCHECK_EQ(1, args.length());
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+ IsCompiledScope is_compiled_scope;
if (!func->is_compiled() &&
- !Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
+ !Compiler::Compile(func, Compiler::KEEP_EXCEPTION, &is_compiled_scope)) {
return ReadOnlyRoots(isolate).exception();
}
StdoutStream os;
@@ -835,6 +856,12 @@ RUNTIME_FUNCTION(Runtime_IsWasmTrapHandlerEnabled) {
return isolate->heap()->ToBoolean(trap_handler::IsTrapHandlerEnabled());
}
+RUNTIME_FUNCTION(Runtime_IsThreadInWasm) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(trap_handler::IsThreadInWasm());
+}
+
RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -863,10 +890,13 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0);
- RETURN_RESULT_OR_FAILURE(
- isolate, JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol()));
+ Handle<Object> values_obj;
+ CHECK(JSReceiver::GetProperty(
+ isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol())
+ .ToHandle(&values_obj));
+ Handle<FixedArray> values = Handle<FixedArray>::cast(values_obj);
+ return *isolate->factory()->NewJSArrayWithElements(values);
}
namespace {
@@ -921,18 +951,16 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesProtector) {
return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
}
-RUNTIME_FUNCTION(Runtime_TypedArraySpeciesProtector) {
+RUNTIME_FUNCTION(Runtime_MapIteratorProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(
- isolate->IsTypedArraySpeciesLookupChainIntact());
+ return isolate->heap()->ToBoolean(isolate->IsMapIteratorLookupChainIntact());
}
-RUNTIME_FUNCTION(Runtime_PromiseSpeciesProtector) {
+RUNTIME_FUNCTION(Runtime_SetIteratorProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(
- isolate->IsPromiseSpeciesLookupChainIntact());
+ return isolate->heap()->ToBoolean(isolate->IsSetIteratorLookupChainIntact());
}
RUNTIME_FUNCTION(Runtime_StringIteratorProtector) {
@@ -950,10 +978,11 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
wasm::NativeModule* native_module = module_obj->native_module();
- wasm::WasmSerializer wasm_serializer(isolate, native_module);
+ wasm::WasmSerializer wasm_serializer(native_module);
size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize();
void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size);
- Handle<JSArrayBuffer> array_buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<JSArrayBuffer> array_buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size);
if (!array_data ||
!wasm_serializer.SerializeNativeModule(
@@ -1009,7 +1038,7 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
int instance_count = 0;
- WeakArrayList* weak_instance_list = module_obj->weak_instance_list();
+ WeakArrayList weak_instance_list = module_obj->weak_instance_list();
for (int i = 0; i < weak_instance_list->length(); ++i) {
if (weak_instance_list->Get(i)->IsWeak()) instance_count++;
}
@@ -1020,7 +1049,7 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- if (!instance->has_debug_info()) return nullptr;
+ if (!instance->has_debug_info()) return Object();
uint64_t num = instance->debug_info()->NumInterpretedCalls();
return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
}
@@ -1043,7 +1072,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
CONVERT_ARG_CHECKED(Smi, info_addr, 0);
wasm::MemoryTracingInfo* info =
- reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr);
+ reinterpret_cast<wasm::MemoryTracingInfo*>(info_addr.ptr());
// Find the caller wasm frame.
StackTraceFrameIterator it(isolate);
@@ -1071,11 +1100,10 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(function_index, 1);
- if (!isolate->wasm_engine()->CompileFunction(
- isolate, instance->module_object()->native_module(), function_index,
- wasm::ExecutionTier::kOptimized)) {
- return ReadOnlyRoots(isolate).exception();
- }
+ auto* native_module = instance->module_object()->native_module();
+ isolate->wasm_engine()->CompileFunction(
+ isolate, native_module, function_index, wasm::ExecutionTier::kOptimized);
+ CHECK(!native_module->compilation_state()->failed());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 8a9d6fe366..850a68e28f 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -3,10 +3,11 @@
// found in the LICENSE file.
#include "src/arguments-inl.h"
+#include "src/counters.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -15,7 +16,7 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
+RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<Object> argument = args.at(0);
@@ -26,21 +27,21 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
isolate, NewTypeError(MessageTemplate::kNotTypedArray));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument);
- if (!array_buffer->is_neuterable()) {
+ if (!array_buffer->is_detachable()) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (array_buffer->backing_store() == nullptr) {
CHECK_EQ(0, array_buffer->byte_length());
return ReadOnlyRoots(isolate).undefined_value();
}
- // Shared array buffers should never be neutered.
+ // Shared array buffers should never be detached.
CHECK(!array_buffer->is_shared());
DCHECK(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
size_t byte_length = array_buffer->byte_length();
array_buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*array_buffer);
- array_buffer->Neuter();
+ array_buffer->Detach();
isolate->array_buffer_allocator()->Free(backing_store, byte_length);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -66,10 +67,10 @@ RUNTIME_FUNCTION(Runtime_TypedArrayGetLength) {
return holder->length();
}
-RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasNeutered) {
+RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasDetached) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(JSTypedArray::cast(args[0])->WasNeutered());
+ return isolate->heap()->ToBoolean(JSTypedArray::cast(args[0])->WasDetached());
}
RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
@@ -115,8 +116,8 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
isolate, array, JSTypedArray::Validate(isolate, target_obj, method));
// This line can be removed when JSTypedArray::Validate throws
- // if array.[[ViewedArrayBuffer]] is neutered(v8:4648)
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
+ // if array.[[ViewedArrayBuffer]] is detached(v8:4648)
+ if (V8_UNLIKELY(array->WasDetached())) return *array;
size_t length = array->length_value();
if (length <= 1) return *array;
@@ -155,7 +156,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
Handle<Object> obj = args.at(1);
Handle<Smi> offset = args.at<Smi>(2);
- DCHECK(!target->WasNeutered()); // Checked in TypedArrayPrototypeSet.
+ DCHECK(!target->WasDetached()); // Checked in TypedArrayPrototypeSet.
DCHECK(!obj->IsJSTypedArray()); // Should be handled by CSA.
DCHECK_LE(0, offset->value());
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index e58934ba33..7d35010435 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -7,6 +7,7 @@
#include "src/base/logging.h"
#include "src/globals.h"
+#include "src/objects.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -17,7 +18,7 @@ namespace internal {
// expected type we crash safely.
#define CONVERT_ARG_CHECKED(Type, name, index) \
CHECK(args[index]->Is##Type()); \
- Type* name = Type::cast(args[index]);
+ Type name = Type::cast(args[index]);
#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
CHECK(args[index]->Is##Type()); \
@@ -109,43 +110,23 @@ namespace internal {
// allocated by the caller, and passed as a pointer in a hidden first parameter.
#ifdef V8_HOST_ARCH_64_BIT
struct ObjectPair {
- Object* x;
- Object* y;
+ Address x;
+ Address y;
};
-
-static inline ObjectPair MakePair(Object* x, Object* y) {
- ObjectPair result = {x, y};
+static inline ObjectPair MakePair(Object x, Object y) {
+ ObjectPair result = {x->ptr(), y->ptr()};
// Pointers x and y returned in rax and rdx, in AMD-x64-abi.
// In Win64 they are assigned to a hidden first argument.
return result;
}
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair
-// are used in generated code. An alternative is using uint64_t and modifying
-// generated code.
-struct ObjectPair {
- Object* x;
- uint32_t x_upper;
- Object* y;
- uint32_t y_upper;
-};
-
-
-static inline ObjectPair MakePair(Object* x, Object* y) {
- ObjectPair result = {x, 0, y, 0};
- // Pointers x and y returned in rax and rdx, in x32-abi.
- return result;
-}
#else
typedef uint64_t ObjectPair;
-static inline ObjectPair MakePair(Object* x, Object* y) {
+static inline ObjectPair MakePair(Object x, Object y) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- return reinterpret_cast<uint32_t>(x) |
- (reinterpret_cast<ObjectPair>(y) << 32);
+ return x->ptr() | (static_cast<ObjectPair>(y->ptr()) << 32);
#elif defined(V8_TARGET_BIG_ENDIAN)
- return reinterpret_cast<uint32_t>(y) |
- (reinterpret_cast<ObjectPair>(x) << 32);
+ return y->ptr() | (static_cast<ObjectPair>(x->ptr()) << 32);
#else
#error Unknown endianness
#endif
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index f852df0d85..84ef744d8b 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -5,9 +5,11 @@
#include "src/arguments-inl.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/frame-constants.h"
#include "src/heap/factory.h"
+#include "src/message-template.h"
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -24,7 +26,7 @@ namespace internal {
namespace {
-Context* GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
+WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
StackFrameIterator it(isolate, isolate->thread_local_top());
// On top: C entry stub.
DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
@@ -32,7 +34,11 @@ Context* GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
// Next: the wasm compiled frame.
DCHECK(it.frame()->is_wasm_compiled());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
- return frame->wasm_instance()->native_context();
+ return frame->wasm_instance();
+}
+
+Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
+ return GetWasmInstanceOnStackTop(isolate)->native_context();
}
class ClearThreadInWasmScope {
@@ -50,11 +56,25 @@ class ClearThreadInWasmScope {
} // namespace
-RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
+RUNTIME_FUNCTION(Runtime_WasmIsValidAnyFuncValue) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, function, 0);
+
+ if (function->IsNull(isolate)) {
+ return Smi::FromInt(true);
+ }
+ if (WasmExportedFunction::IsWasmExportedFunction(*function)) {
+ return Smi::FromInt(true);
+ }
+ return Smi::FromInt(false);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- // {delta_pages} is checked to be a positive smi in the WasmGrowMemory builtin
+ // {delta_pages} is checked to be a positive smi in the WasmMemoryGrow builtin
// which calls this runtime function.
CONVERT_UINT32_ARG_CHECKED(delta_pages, 1);
@@ -63,7 +83,7 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
int ret = WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), delta_pages);
- // The WasmGrowMemory builtin which calls this runtime function expects us to
+ // The WasmMemoryGrow builtin which calls this runtime function expects us to
// always return a Smi.
return Smi::FromInt(ret);
}
@@ -75,14 +95,14 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
HandleScope scope(isolate);
Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
- static_cast<MessageTemplate::Template>(message_id));
+ MessageTemplateFromInt(message_id));
return isolate->Throw(*error_obj);
}
RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
- DCHECK_NULL(isolate->context());
+ DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
return isolate->StackOverflow();
}
@@ -98,26 +118,22 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- DCHECK_NULL(isolate->context());
+ DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
CONVERT_ARG_CHECKED(HeapObject, tag_raw, 0);
CONVERT_SMI_ARG_CHECKED(size, 1);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> tag(tag_raw, isolate);
Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
- static_cast<MessageTemplate::Template>(
- MessageTemplate::kWasmExceptionError));
- CHECK(
- !JSReceiver::SetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol(),
- tag, LanguageMode::kStrict)
- .is_null());
- Handle<JSTypedArray> values =
- isolate->factory()->NewJSTypedArray(ElementsKind::UINT16_ELEMENTS, size);
- CHECK(!JSReceiver::SetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol(), values,
- LanguageMode::kStrict)
+ MessageTemplate::kWasmExceptionError);
+ CHECK(!Object::SetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_tag_symbol(),
+ tag, LanguageMode::kStrict)
+ .is_null());
+ Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
+ CHECK(!Object::SetProperty(isolate, exception,
+ isolate->factory()->wasm_exception_values_symbol(),
+ values, LanguageMode::kStrict)
.is_null());
return *exception;
}
@@ -126,7 +142,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- DCHECK_NULL(isolate->context());
+ DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
@@ -143,64 +159,24 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
+RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- DCHECK_NULL(isolate->context());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
- // TODO(mstarzinger): Manually box because parameters are not visited yet.
- Handle<Object> except_obj(except_obj_raw, isolate);
- if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
- Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
- Handle<Object> values_obj;
- if (JSReceiver::GetProperty(
- isolate, exception,
- isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values_obj)) {
- if (values_obj->IsJSTypedArray()) {
- Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
- CHECK_EQ(values->type(), kExternalUint16Array);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CHECK(!values->WasNeutered());
- CHECK_LT(index, Smi::ToInt(values->length()));
- auto* vals =
- reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
- return Smi::FromInt(vals[index]);
- }
- }
- }
- return Smi::FromInt(0);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
- // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- DCHECK_NULL(isolate->context());
+ DCHECK_EQ(1, args.length());
+ DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> except_obj(except_obj_raw, isolate);
if (!except_obj.is_null() && except_obj->IsJSReceiver()) {
Handle<JSReceiver> exception(JSReceiver::cast(*except_obj), isolate);
- Handle<Object> values_obj;
+ Handle<Object> values;
if (JSReceiver::GetProperty(
isolate, exception,
isolate->factory()->wasm_exception_values_symbol())
- .ToHandle(&values_obj)) {
- if (values_obj->IsJSTypedArray()) {
- Handle<JSTypedArray> values = Handle<JSTypedArray>::cast(values_obj);
- CHECK_EQ(values->type(), kExternalUint16Array);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CHECK(!values->WasNeutered());
- CHECK_LT(index, Smi::ToInt(values->length()));
- CONVERT_SMI_ARG_CHECKED(value, 2);
- auto* vals =
- reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
- vals[index] = static_cast<uint16_t>(value);
- }
+ .ToHandle(&values)) {
+ DCHECK(values->IsFixedArray());
+ return *values;
}
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -217,7 +193,7 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// cast it back to the raw pointer.
CHECK(!arg_buffer_obj->IsHeapObject());
CHECK(arg_buffer_obj->IsSmi());
- Address arg_buffer = reinterpret_cast<Address>(*arg_buffer_obj);
+ Address arg_buffer = arg_buffer_obj->ptr();
ClearThreadInWasmScope wasm_flag;
@@ -237,7 +213,7 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
}
// Set the current isolate's context.
- DCHECK_NULL(isolate->context());
+ DCHECK(isolate->context().is_null());
isolate->set_context(instance->native_context());
// Run the function in the interpreter. Note that neither the {WasmDebugInfo}
@@ -290,8 +266,121 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
Address entrypoint = wasm::CompileLazy(
isolate, instance->module_object()->native_module(), func_index);
- return reinterpret_cast<Object*>(entrypoint);
+ return Object(entrypoint);
+}
+
+// Should be called from within a handle scope
+Handle<JSArrayBuffer> getSharedArrayBuffer(Handle<WasmInstanceObject> instance,
+ Isolate* isolate, uint32_t address) {
+ DCHECK(instance->has_memory_object());
+ Handle<JSArrayBuffer> array_buffer(instance->memory_object()->array_buffer(),
+ isolate);
+
+ // Validation should have failed if the memory was not shared.
+ DCHECK(array_buffer->is_shared());
+
+ // Should have trapped if address was OOB
+ DCHECK_LT(address, array_buffer->byte_length());
+ return array_buffer;
+}
+
+RUNTIME_FUNCTION(Runtime_WasmAtomicWake) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
+ Handle<JSArrayBuffer> array_buffer =
+ getSharedArrayBuffer(instance, isolate, address);
+ return FutexEmulation::Wake(array_buffer, address, count);
}
+double WaitTimeoutInMs(double timeout_ns) {
+ return timeout_ns < 0
+ ? V8_INFINITY
+ : timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
+ base::Time::kMicrosecondsPerMillisecond);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
+ CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 3);
+ double timeout_ms = WaitTimeoutInMs(timeout_ns);
+ Handle<JSArrayBuffer> array_buffer =
+ getSharedArrayBuffer(instance, isolate, address);
+ return FutexEmulation::Wait32(isolate, array_buffer, address, expected_value,
+ timeout_ms);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_NUMBER_CHECKED(uint32_t, expected_value_high, Uint32, args[2]);
+ CONVERT_NUMBER_CHECKED(uint32_t, expected_value_low, Uint32, args[3]);
+ CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 4);
+ int64_t expected_value = (static_cast<uint64_t>(expected_value_high) << 32) |
+ static_cast<uint64_t>(expected_value_low);
+ double timeout_ms = WaitTimeoutInMs(timeout_ns);
+ Handle<JSArrayBuffer> array_buffer =
+ getSharedArrayBuffer(instance, isolate, address);
+ return FutexEmulation::Wait64(isolate, array_buffer, address, expected_value,
+ timeout_ms);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTableInit) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ auto instance =
+ Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(dst, 2);
+ CONVERT_UINT32_ARG_CHECKED(src, 3);
+ CONVERT_UINT32_ARG_CHECKED(size, 4);
+
+ PrintF(
+ "TableInit(table_index=%u, elem_segment_index=%u, dst=%u, src=%u, "
+ "size=%u)\n",
+ table_index, elem_segment_index, dst, src, size);
+
+ USE(instance);
+ USE(table_index);
+ USE(elem_segment_index);
+ USE(dst);
+ USE(src);
+ USE(size);
+
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ auto instance =
+ Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(dst, 1);
+ CONVERT_UINT32_ARG_CHECKED(src, 2);
+ CONVERT_UINT32_ARG_CHECKED(count, 3);
+
+ bool oob = !WasmInstanceObject::CopyTableEntries(
+ isolate, instance, table_index, dst, src, count);
+ if (oob) {
+ // Handle out-of-bounds access here in the runtime call, rather
+ // than having the lower-level layers deal with JS exceptions.
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
+ Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
+ MessageTemplate::kWasmTrapTableOutOfBounds);
+ return isolate->Throw(*error_obj);
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
new file mode 100644
index 0000000000..4bc258d7de
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/arguments-inl.h"
+#include "src/counters.h"
+#include "src/execution.h"
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/runtime/runtime-utils.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_WeakFactoryCleanupJob) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakFactory, weak_factory, 0);
+ weak_factory->set_scheduled_for_cleanup(false);
+
+ JSWeakFactory::Cleanup(weak_factory, isolate);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index d9d7d85664..3d70a67553 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -18,13 +18,13 @@ namespace internal {
// Header of runtime functions.
#define F(name, number_of_args, result_size) \
- Object* Runtime_##name(int args_length, Object** args_object, \
+ Address Runtime_##name(int args_length, Address* args_object, \
Isolate* isolate);
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
#undef F
#define P(name, number_of_args, result_size) \
- ObjectPair Runtime_##name(int args_length, Object** args_object, \
+ ObjectPair Runtime_##name(int args_length, Address* args_object, \
Isolate* isolate);
FOR_EACH_INTRINSIC_RETURN_PAIR(P)
#undef P
@@ -98,10 +98,17 @@ void InitializeIntrinsicFunctionNames() {
bool Runtime::NeedsExactContext(FunctionId id) {
switch (id) {
+ case Runtime::kInlineAsyncFunctionReject:
+ case Runtime::kInlineAsyncFunctionResolve:
+ // For %_AsyncFunctionReject and %_AsyncFunctionResolve we don't
+ // really need the current context, which in particular allows
+ // us to usually eliminate the catch context for the implicit
+ // try-catch in async function.
+ return false;
case Runtime::kAddPrivateField:
case Runtime::kCopyDataProperties:
case Runtime::kCreateDataProperty:
- case Runtime::kCreatePrivateFieldSymbol:
+ case Runtime::kCreatePrivateNameSymbol:
case Runtime::kReThrow:
case Runtime::kThrow:
case Runtime::kThrowApplyNonFunction:
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index f091d99092..43e4e99f5a 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -47,7 +47,6 @@ namespace internal {
F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \
I(IsArray, 1, 1) \
- F(MoveArrayContents, 2, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
F(NormalizeElements, 1, 1) \
F(PrepareElementsForSort, 2, 1) \
@@ -129,8 +128,9 @@ namespace internal {
F(DebugPopPromise, 0, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
F(DebugPushPromise, 1, 1) \
- F(DebugAsyncFunctionSuspended, 1, 1) \
+ F(DebugAsyncFunctionEntered, 1, 1) \
F(DebugAsyncFunctionFinished, 2, 1) \
+ F(DebugAsyncFunctionSuspended, 1, 1) \
F(DebugToggleBlockCoverage, 1, 1) \
F(DebugTogglePreciseCoverage, 1, 1) \
F(FunctionGetInferredName, 1, 1) \
@@ -165,10 +165,9 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I)
#endif
-#define FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I) \
- F(InterpreterDeserializeLazy, 2, 1)
+#define FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I)
#define FOR_EACH_INTRINSIC_FUNCTION(F, I) \
I(Call, -1 /* >= 2 */, 1) \
@@ -181,6 +180,13 @@ namespace internal {
F(SetNativeFlag, 1, 1)
#define FOR_EACH_INTRINSIC_GENERATOR(F, I) \
+ I(AsyncFunctionAwaitCaught, 2, 1) \
+ I(AsyncFunctionAwaitUncaught, 2, 1) \
+ I(AsyncFunctionEnter, 2, 1) \
+ I(AsyncFunctionReject, 3, 1) \
+ I(AsyncFunctionResolve, 3, 1) \
+ I(AsyncGeneratorAwaitCaught, 2, 1) \
+ I(AsyncGeneratorAwaitUncaught, 2, 1) \
F(AsyncGeneratorHasCatchHandlerForPC, 1, 1) \
I(AsyncGeneratorReject, 2, 1) \
I(AsyncGeneratorResolve, 3, 1) \
@@ -192,64 +198,60 @@ namespace internal {
#ifdef V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTL(F, I) \
- F(AvailableLocalesOf, 1, 1) \
- F(CanonicalizeLanguageTag, 1, 1) \
- F(DateCacheVersion, 0, 1) \
F(FormatList, 2, 1) \
F(FormatListToParts, 2, 1) \
- F(GetDefaultICULocale, 0, 1) \
F(StringToLowerCaseIntl, 1, 1) \
F(StringToUpperCaseIntl, 1, 1) // End of macro.
#else
#define FOR_EACH_INTRINSIC_INTL(F, I)
#endif // V8_INTL_SUPPORT
-#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(AllowDynamicFunction, 1, 1) \
- F(CheckIsBootstrapping, 0, 1) \
- I(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(CreateTemplateObject, 1, 1) \
- F(DeserializeLazy, 1, 1) \
- F(ExportFromRuntime, 1, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(InstallToContext, 1, 1) \
- F(Interrupt, 0, 1) \
- F(IS_VAR, 1, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, 2, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReportMessage, 1, 1) \
- F(ReThrow, 1, 1) \
- F(RunMicrotaskCallback, 2, 1) \
- F(RunMicrotasks, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorError, 1, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowThrowMethodMissing, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(Typeof, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1)
+#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(AllowDynamicFunction, 1, 1) \
+ F(CheckIsBootstrapping, 0, 1) \
+ I(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(CreateTemplateObject, 1, 1) \
+ F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
+ F(FatalProcessOutOfMemoryInvalidArrayLength, 0, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(Interrupt, 0, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReportMessage, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
+ F(PerformMicrotaskCheckpoint, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorError, 1, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowPatternAssignmentNonCoercible, 0, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(Typeof, 1, 1) \
+ F(UnwindAndFindExceptionHandler, 0, 1) \
+ F(WeakFactoryCleanupJob, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F, I) \
F(CreateArrayLiteral, 4, 1) \
@@ -277,8 +279,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_OBJECT(F, I) \
F(AddDictionaryProperty, 3, 1) \
- F(AddElement, 3, 1) \
- F(AddNamedProperty, 4, 1) \
F(AddPrivateField, 3, 1) \
F(AllocateHeapNumber, 0, 1) \
F(ClassOf, 1, 1) \
@@ -345,14 +345,14 @@ namespace internal {
F(PromiseHookAfter, 1, 1) \
F(PromiseHookBefore, 1, 1) \
F(PromiseHookInit, 2, 1) \
- F(AwaitPromisesInit, 3, 1) \
+ F(AwaitPromisesInit, 5, 1) \
+ F(AwaitPromisesInitOld, 5, 1) \
F(PromiseMarkAsHandled, 1, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseResult, 1, 1) \
F(PromiseRevokeReject, 1, 1) \
F(PromiseStatus, 1, 1) \
- I(RejectPromise, 3, 1) \
- I(ResolvePromise, 2, 1) \
+ F(RejectPromise, 3, 1) \
+ F(ResolvePromise, 2, 1) \
F(PromiseRejectAfterResolved, 2, 1) \
F(PromiseResolveAfterResolved, 2, 1)
@@ -427,7 +427,7 @@ namespace internal {
F(StringTrim, 2, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F, I) \
- F(CreatePrivateFieldSymbol, 0, 1) \
+ F(CreatePrivateNameSymbol, 1, 1) \
F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -481,6 +481,7 @@ namespace internal {
F(HasSmiOrObjectElements, 1, 1) \
F(HaveSameMap, 2, 1) \
F(HeapObjectVerify, 1, 1) \
+ F(ICsAreEnabled, 0, 1) \
F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
@@ -488,6 +489,7 @@ namespace internal {
F(IsLiftoffFunction, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
+ F(IsThreadInWasm, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
@@ -501,8 +503,8 @@ namespace internal {
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
- F(TypedArraySpeciesProtector, 0, 1) \
- F(PromiseSpeciesProtector, 0, 1) \
+ F(MapIteratorProtector, 0, 1) \
+ F(SetIteratorProtector, 0, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
@@ -515,8 +517,8 @@ namespace internal {
F(SetWasmThreadsEnabled, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
- F(ArrayBufferNeuter, 1, 1) \
- F(ArrayBufferViewWasNeutered, 1, 1) \
+ F(ArrayBufferDetach, 1, 1) \
+ F(ArrayBufferViewWasDetached, 1, 1) \
I(IsTypedArray, 1, 1) \
F(TypedArrayCopyElements, 3, 1) \
F(TypedArrayGetBuffer, 1, 1) \
@@ -527,14 +529,19 @@ namespace internal {
#define FOR_EACH_INTRINSIC_WASM(F, I) \
F(ThrowWasmError, 1, 1) \
F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmExceptionGetElement, 2, 1) \
- F(WasmExceptionSetElement, 3, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicWake, 3, 1) \
+ F(WasmExceptionGetValues, 1, 1) \
F(WasmExceptionGetTag, 1, 1) \
- F(WasmGrowMemory, 2, 1) \
+ F(WasmMemoryGrow, 2, 1) \
F(WasmRunInterpreter, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \
+ F(WasmTableInit, 5, 1) \
+ F(WasmTableCopy, 4, 1) \
+ F(WasmIsValidAnyFuncValue, 1, 1) \
F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
@@ -547,22 +554,25 @@ namespace internal {
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
+ F(KeyedStoreICNoFeedback_Miss, 4, 1) \
+ F(StoreInArrayLiteralIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 5, 1) \
F(LoadAccessorProperty, 4, 1) \
F(LoadCallbackProperty, 4, 1) \
F(LoadElementWithInterceptor, 2, 1) \
- F(LoadGlobalIC_Miss, 3, 1) \
+ F(LoadGlobalIC_Miss, 4, 1) \
F(LoadGlobalIC_Slow, 3, 1) \
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 5, 1) \
F(StoreCallbackProperty, 6, 1) \
F(StoreGlobalIC_Miss, 4, 1) \
+ F(StoreGlobalICNoFeedback_Miss, 3, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
+ F(StoreICNoFeedback_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Slow, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
- F(CloneObjectIC_Miss, 4, 1) \
- F(CloneObjectIC_Slow, 2, 1)
+ F(CloneObjectIC_Miss, 4, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT_IMPL(F, I) \
FOR_EACH_INTRINSIC_ARRAY(F, I) \
@@ -615,7 +625,7 @@ namespace internal {
#define FOR_EACH_INLINE_INTRINSIC(I) FOR_EACH_INTRINSIC_IMPL(NOTHING, I)
#define F(name, nargs, ressize) \
- Object* Runtime_##name(int args_length, Object** args_object, \
+ Address Runtime_##name(int args_length, Address* args_object, \
Isolate* isolate);
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
#undef F
@@ -766,10 +776,9 @@ enum class OptimizationStatus {
kOptimizingConcurrently = 1 << 9,
kIsExecuting = 1 << 10,
kTopmostFrameIsTurboFanned = 1 << 11,
+ kLiteMode = 1 << 12,
};
-Smi* SmiLexicographicCompare(Smi* x_value, Smi* y_value);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/OWNERS b/deps/v8/src/s390/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/s390/OWNERS
+++ b/deps/v8/src/s390/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index e2bf452b62..b9440d0f65 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -92,14 +92,13 @@ Address RelocInfo::target_internal_reference_address() {
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
@@ -140,30 +139,30 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return GetCodeTarget(index);
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
return Handle<HeapObject>::cast(origin->code_target_object_handle_at(pc_));
}
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
@@ -278,9 +277,10 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index f25d79ab5a..ad0c2892c9 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -47,7 +47,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
@@ -63,10 +62,11 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
static bool supportsCPUFeature(const char* feature) {
- static std::set<std::string> features;
- static std::set<std::string> all_available_features = {
- "iesan3", "zarch", "stfle", "msa", "ldisp", "eimm",
- "dfp", "etf3eh", "highgprs", "te", "vx"};
+ static std::set<std::string>& features = *new std::set<std::string>();
+ static std::set<std::string>& all_available_features =
+ *new std::set<std::string>({"iesan3", "zarch", "stfle", "msa", "ldisp",
+ "eimm", "dfp", "etf3eh", "highgprs", "te",
+ "vx"});
if (features.empty()) {
#if V8_HOST_ARCH_S390
@@ -238,16 +238,16 @@ void CpuFeatures::PrintTarget() {
s390_arch = "s390";
#endif
- printf("target %s\n", s390_arch);
+ PrintF("target %s\n", s390_arch);
}
void CpuFeatures::PrintFeatures() {
- printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
- printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
- printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
- printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
- printf("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
- printf("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
+ PrintF("FPU=%d\n", CpuFeatures::IsSupported(FPU));
+ PrintF("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
+ PrintF("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+ PrintF("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
+ PrintF("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
+ PrintF("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
}
Register ToRegister(int num) {
@@ -274,23 +274,6 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() { return false; }
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -334,7 +317,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
- Address pc = reinterpret_cast<Address>(buffer_ + request.offset());
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
object =
@@ -343,14 +326,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
SKIP_ICACHE_FLUSH);
break;
}
- case HeapObjectRequest::kCodeStub: {
- request.code_stub()->set_isolate(isolate);
- SixByteInstr instr =
- Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
- int index = instr & 0xFFFFFFFF;
- UpdateCodeTarget(index, request.code_stub()->GetCode());
- break;
- }
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -365,10 +340,10 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
ReserveCodeTargetSpace(100);
last_bound_pos_ = 0;
relocations_.reserve(128);
@@ -377,17 +352,21 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitRelocations();
+ int code_comments_size = WriteCodeComments();
+
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
desc->constant_pool_size = 0;
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
void Assembler::Align(int m) {
@@ -444,7 +423,7 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) {
SixByteInstr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
- Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
@@ -476,7 +455,7 @@ int Assembler::target_at(int pos) {
// Update the target address of the current relative instruction.
void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
SixByteInstr instr = instr_at(pos);
- Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (is_branch != nullptr) {
*is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
@@ -499,7 +478,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
} else if (LLILF == opcode) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
+ // Make label relative to Code pointer of generated Code object.
int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
instr_at_put<SixByteInstr>(pos, instr | imm32);
@@ -519,7 +498,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Returns the maximum number of bits given instruction can address.
int Assembler::max_reach_from(int pos) {
- Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
// Check which type of instr. In theory, we can return
// the values below + 1, given offset is # of halfwords
if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
@@ -693,17 +672,9 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
brasl(r14, Operand(target_index));
}
-void Assembler::call(CodeStub* stub) {
- EnsureSpace ensure_space(this);
- RequestHeapObject(HeapObjectRequest(stub));
- RecordRelocInfo(RelocInfo::CODE_TARGET);
- int32_t target_index = AddCodeTarget(Handle<Code>());
- brasl(r14, Operand(target_index));
-}
-
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
Condition cond) {
- DCHECK(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
EnsureSpace ensure_space(this);
RecordRelocInfo(rmode);
@@ -737,47 +708,36 @@ void Assembler::dumy(int r1, int x2, int b2, int d2) {
}
void Assembler::GrowBuffer(int needed) {
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4 * KB) {
- desc.buffer_size = 4 * KB;
- } else if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2 * buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1 * MB;
- }
- int space = buffer_space() + (desc.buffer_size - buffer_size_);
- if (space < needed) {
- desc.buffer_size += needed - space;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+ int space = buffer_space() + (new_size - old_size);
+ new_size += (space < needed) ? needed - space : 0;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta =
- (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -832,18 +792,19 @@ void Assembler::EmitRelocations() {
for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
- Address pc = reinterpret_cast<Address>(buffer_) + it->position();
- RelocInfo rinfo(pc, rmode, it->data(), nullptr);
+ Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
+ RelocInfo rinfo(pc, rmode, it->data(), Code());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
Address pos = Memory<Address>(pc);
- Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
+ Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
Address pos = target_address_at(pc, 0);
- set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
+ set_target_address_at(pc, 0,
+ reinterpret_cast<Address>(buffer_start_) + pos,
SKIP_ICACHE_FLUSH);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 8e494543f8..e50e77d3da 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -51,7 +51,11 @@
#include <vector>
#include "src/assembler.h"
+#include "src/external-reference.h"
+#include "src/label.h"
+#include "src/objects/smi.h"
#include "src/s390/constants-s390.h"
+#include "src/s390/register-s390.h"
#define ABI_USES_FUNCTION_DESCRIPTORS 0
@@ -74,273 +78,9 @@
#define ABI_CALL_VIA_IP 1
-#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
-
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(fp) V(ip) V(r13) V(r14) V(sp)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r13)
-
-#define DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
-
-#define C_REGISTERS(V) \
- V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
- V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
-// clang-format on
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
- 1 << 3 | // r3 a2
- 1 << 4 | // r4 a3
- 1 << 5; // r5 a4
-
-const int kNumJSCallerSaved = 5;
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
- 1 << 6 | // r6 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 7 | // r7 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 8 | // r8 (argument passing in CEntryStub)
- // (HandleScope logic in MacroAssembler)
- 1 << 9 | // r9 (HandleScope logic in MacroAssembler)
- 1 << 10 | // r10 (Roots register in Javascript)
- 1 << 11 | // r11 (fp in Javascript)
- 1 << 12 | // r12 (ip in Javascript)
- 1 << 13; // r13 (cp in Javascript)
-// 1 << 15; // r15 (sp in Javascript)
-
-const int kNumCalleeSaved = 8;
-
-#ifdef V8_TARGET_ARCH_S390X
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7; // d7
-
-const int kNumCallerSavedDoubles = 8;
-
-const RegList kCalleeSavedDoubles = 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13 | // d12
- 1 << 14 | // d12
- 1 << 15; // d13
-
-const int kNumCalleeSavedDoubles = 8;
-
-#else
-
-const RegList kCallerSavedDoubles = 1 << 14 | // d14
- 1 << 15 | // d15
- 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 5 | // d5
- 1 << 7 | // d7
- 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d10
- 1 << 12 | // d10
- 1 << 13; // d11
-
-const int kNumCallerSavedDoubles = 14;
-
-const RegList kCalleeSavedDoubles = 1 << 4 | // d4
- 1 << 6; // d6
-
-const int kNumCalleeSavedDoubles = 2;
-
-#endif
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-// The following constants describe the stack frame linkage area as
-// defined by the ABI.
-
-#if V8_TARGET_ARCH_S390X
-// [0] Back Chain
-// [1] Reserved for compiler use
-// [2] GPR 2
-// [3] GPR 3
-// ...
-// [15] GPR 15
-// [16] FPR 0
-// [17] FPR 2
-// [18] FPR 4
-// [19] FPR 6
-const int kNumRequiredStackFrameSlots = 20;
-const int kStackFrameRASlot = 14;
-const int kStackFrameSPSlot = 15;
-const int kStackFrameExtraParamSlot = 20;
-#else
-// [0] Back Chain
-// [1] Reserved for compiler use
-// [2] GPR 2
-// [3] GPR 3
-// ...
-// [15] GPR 15
-// [16..17] FPR 0
-// [18..19] FPR 2
-// [20..21] FPR 4
-// [22..23] FPR 6
-const int kNumRequiredStackFrameSlots = 24;
-const int kStackFrameRASlot = 14;
-const int kStackFrameSPSlot = 15;
-const int kStackFrameExtraParamSlot = 24;
-#endif
-
-// zLinux ABI requires caller frames to include sufficient space for
-// callee preserved register save area.
-#if V8_TARGET_ARCH_S390X
-const int kCalleeRegisterSaveAreaSize = 160;
-#elif V8_TARGET_ARCH_S390
-const int kCalleeRegisterSaveAreaSize = 96;
-#else
-const int kCalleeRegisterSaveAreaSize = 0;
-#endif
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
-#if V8_TARGET_LITTLE_ENDIAN
- static constexpr int kMantissaOffset = 0;
- static constexpr int kExponentOffset = 4;
-#else
- static constexpr int kMantissaOffset = 4;
- static constexpr int kExponentOffset = 0;
-#endif
-
- private:
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-#define DEFINE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-// Register aliases
-constexpr Register kRootRegister = r10; // Roots array pointer.
-constexpr Register cp = r13; // JavaScript context pointer.
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Double word VFP register.
-class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
- public:
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
- static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
-
- private:
- friend class RegisterBase;
-
- explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
-static_assert(sizeof(DoubleRegister) == sizeof(int),
- "DoubleRegister can efficiently be passed by value");
-
-typedef DoubleRegister FloatRegister;
-
-// TODO(john.yan) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
-#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-constexpr DoubleRegister kDoubleRegZero = d14;
-constexpr DoubleRegister kScratchDoubleReg = d13;
-
-Register ToRegister(int num);
-
-enum CRegisterCode {
-#define REGISTER_CODE(R) kCCode_##R,
- C_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kCAfterLast
-};
-
-// Coprocessor register
-class CRegister : public RegisterBase<CRegister, kCAfterLast> {
- friend class RegisterBase;
- explicit constexpr CRegister(int code) : RegisterBase(code) {}
-};
-
-constexpr CRegister no_creg = CRegister::no_reg();
-#define DECLARE_C_REGISTER(R) \
- constexpr CRegister R = CRegister::from_code<kCCode_##R>();
-C_REGISTERS(DECLARE_C_REGISTER)
-#undef DECLARE_C_REGISTER
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -360,8 +100,8 @@ class Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
// rm
@@ -480,15 +220,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -550,7 +285,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -584,16 +319,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Patch will be appiled to other FIXED_SEQUENCE call
static constexpr int kCallTargetAddressOffset = 6;
-// The length of FIXED_SEQUENCE call
-// iihf r8, <address_hi> // <64-bit only>
-// iilf r8, <address_lo>
-// basr r14, r8
-#if V8_TARGET_ARCH_S390X
- static constexpr int kCallSequenceLength = 14;
-#else
- static constexpr int kCallSequenceLength = 8;
-#endif
-
// ---------------------------------------------------------------------------
// Code generation
@@ -1402,7 +1127,6 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
}
void call(Handle<Code> target, RelocInfo::Mode rmode);
- void call(CodeStub* stub);
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
@@ -1488,10 +1212,6 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
return pc_offset() - label->pos();
}
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1504,25 +1224,18 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
void dq(uint64_t data);
void dp(uintptr_t data);
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
// Read/patch instructions
SixByteInstr instr_at(int pos) {
- return Instruction::InstructionBits(buffer_ + pos);
+ return Instruction::InstructionBits(buffer_start_ + pos);
}
template <typename T>
void instr_at_put(int pos, T instr) {
- Instruction::SetInstructionBits<T>(buffer_ + pos, instr);
+ Instruction::SetInstructionBits<T>(buffer_start_ + pos, instr);
}
// Decodes instruction at pos, and returns its length
int32_t instr_length_at(int pos) {
- return Instruction::InstructionLength(buffer_ + pos);
+ return Instruction::InstructionLength(buffer_start_ + pos);
}
static SixByteInstr instr_at(byte* pc) {
@@ -1553,7 +1266,7 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
void emit_label_addr(Label* label);
public:
- byte* buffer_pos() const { return buffer_; }
+ byte* buffer_pos() const { return buffer_start_; }
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1659,6 +1372,8 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
friend class EnsureSpace;
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 9a8111ffcf..688b6bc816 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -14,668 +14,15 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/s390/code-stubs-s390.h" // Cannot be the first include.
-
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- // r2: code entry
- // r3: function
- // r4: receiver
- // r5: argc
- // r6: argv
-
- Label invoke, handler_entry, exit;
-
- {
- NoRootArrayScope no_root_array(masm);
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
-// saving floating point registers
-#if V8_TARGET_ARCH_S390X
- // 64bit ABI requires f8 to f15 be saved
- __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
- __ std(d8, MemOperand(sp));
- __ std(d9, MemOperand(sp, 1 * kDoubleSize));
- __ std(d10, MemOperand(sp, 2 * kDoubleSize));
- __ std(d11, MemOperand(sp, 3 * kDoubleSize));
- __ std(d12, MemOperand(sp, 4 * kDoubleSize));
- __ std(d13, MemOperand(sp, 5 * kDoubleSize));
- __ std(d14, MemOperand(sp, 6 * kDoubleSize));
- __ std(d15, MemOperand(sp, 7 * kDoubleSize));
-#else
- // 31bit ABI requires you to store f4 and f6:
- // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
- __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
- __ std(d4, MemOperand(sp));
- __ std(d6, MemOperand(sp, kDoubleSize));
-#endif
-
- // zLinux ABI
- // Incoming parameters:
- // r2: code entry
- // r3: function
- // r4: receiver
- // r5: argc
- // r6: argv
- // Requires us to save the callee-preserved registers r6-r13
- // General convention is to also save r14 (return addr) and
- // sp/r15 as well in a single STM/STMG
- __ lay(sp, MemOperand(sp, -10 * kPointerSize));
- __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
-
- // Set up the reserved register for 0.0.
- // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
-
- // Push a frame with special values setup to mark it as an entry frame.
- // Bad FP (-1)
- // SMI Marker
- // SMI Marker
- // kCEntryFPAddress
- // Frame type
- __ lay(sp, MemOperand(sp, -5 * kPointerSize));
-
- // Push a bad frame pointer to fail if it is used.
- __ LoadImmP(r10, Operand(-1));
-
- StackFrame::Type marker = type();
- __ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
- __ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
- // Save copies of the top frame descriptor on the stack.
- __ mov(r7, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ LoadP(r7, MemOperand(r7));
- __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
- // Set up frame pointer for the frame to be pushed.
- // Need to add kPointerSize, because sp has one extra
- // frame already for the frame type being pushed later.
- __ lay(fp, MemOperand(
- sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
-
- __ InitializeRootRegister();
- }
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ mov(r7, Operand(js_entry_sp));
- __ LoadAndTestP(r8, MemOperand(r7));
- __ bne(&non_outermost_js, Label::kNear);
- __ StoreP(fp, MemOperand(r7));
- __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont, Label::kNear);
- __ bind(&non_outermost_js);
- __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
-
- __ bind(&cont);
- __ StoreP(ip, MemOperand(sp)); // frame-type
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ b(&invoke, Label::kNear);
-
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate())));
-
- __ StoreP(r2, MemOperand(ip));
- __ LoadRoot(r2, RootIndex::kException);
- __ b(&exit, Label::kNear);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r2-r6.
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the b(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r2: code entry
- // r3: function
- // r4: receiver
- // r5: argc
- // r6: argv
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
- __ bind(&exit); // r2 holds result
-
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r7);
- __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ bne(&non_outermost_js_2, Label::kNear);
- __ mov(r8, Operand::Zero());
- __ mov(r7, Operand(js_entry_sp));
- __ StoreP(r8, MemOperand(r7));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r5);
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ StoreP(r5, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
-
- // Reload callee-saved preserved regs, return address reg (r14) and sp
- __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
- __ la(sp, MemOperand(sp, 10 * kPointerSize));
-
-// saving floating point registers
-#if V8_TARGET_ARCH_S390X
- // 64bit ABI requires f8 to f15 be saved
- __ ld(d8, MemOperand(sp));
- __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
- __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
- __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
- __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
- __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
- __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
- __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
- __ la(sp, MemOperand(sp, 8 * kDoubleSize));
-#else
- // 31bit ABI requires you to store f4 and f6:
- // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
- __ ld(d4, MemOperand(sp));
- __ ld(d6, MemOperand(sp, kDoubleSize));
- __ la(sp, MemOperand(sp, 2 * kDoubleSize));
-#endif
-
- __ b(r14);
-}
-
-// This stub is paired with DirectCEntryStub::GenerateCall
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- __ CleanseP(r14);
-
- __ b(ip); // Callee will return to R14 directly
-}
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into lr instead of ip.
- __ Move(ip, target);
- __ IndirectLoadConstant(r1, GetCode());
- __ AddP(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r1);
- return;
- }
- }
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // Native AIX/S390X Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
- __ LoadP(target, MemOperand(target, 0)); // Instruction address
-#else
- // ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_CALL_VIA_IP.
- __ Move(ip, target);
-#endif
-
- __ call(GetCode(), RelocInfo::CODE_TARGET); // Call the stub.
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(tasm,
-#if V8_TARGET_ARCH_S390X
- 40);
-#elif V8_HOST_ARCH_S390
- 36);
-#else
- 32);
-#endif
- tasm->CleanseP(r14);
- tasm->Push(r14, ip);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->Pop(r14, ip);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(masm,
-#if V8_TARGET_ARCH_S390X
- 40);
-#elif V8_HOST_ARCH_S390
- 36);
-#else
- 32);
-#endif
- ProfileEntryHookStub stub(masm->isolate());
- __ CleanseP(r14);
- __ Push(r14, ip);
- __ CallStub(&stub); // BRASL
- __ Pop(r14, ip);
- }
-}
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
-// The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
-#if V8_TARGET_ARCH_S390X
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 18; // LAY + STG * 2
-#elif V8_HOST_ARCH_S390
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 18; // NILH + LAY + ST * 2
-#else
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 14; // LAY + ST * 2
-#endif
-
- // This should contain all kJSCallerSaved registers.
- const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
- r7.bit(); // Saved stack pointer.
-
- // We also save r14+ip, so count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
-
- // Save all caller-save registers as this may be called from anywhere.
- __ CleanseP(r14);
- __ LoadRR(ip, r14);
- __ MultiPush(kSavedRegs | ip.bit());
-
- // Compute the function's address for the first argument.
-
- __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is two slots above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ LoadRR(r7, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
- }
-
-#if !defined(USE_SIMULATOR)
- uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
- __ mov(ip, Operand(entry_hook));
-
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // Function descriptor
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0));
-// ip already set.
-#endif
-#endif
-
- // zLinux ABI requires caller's frame to have sufficient space for callee
- // preserved regsiter save area.
- __ LoadImmP(r0, Operand::Zero());
- __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
- kNumRequiredStackFrameSlots * kPointerSize));
- __ StoreP(r0, MemOperand(sp));
-#if defined(USE_SIMULATOR)
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter
- __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
-
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ mov(ip, Operand(ExternalReference::Create(
- &dispatcher, ExternalReference::BUILTIN_CALL)));
-#endif
- __ Call(ip);
-
- // zLinux ABI requires caller's frame to have sufficient space for callee
- // preserved regsiter save area.
- __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
- kNumRequiredStackFrameSlots * kPointerSize));
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ LoadRR(sp, r7);
- }
-
- // Also pop lr to get Ret(0).
- __ MultiPop(kSavedRegs | ip.bit());
- __ LoadRR(r14, ip);
- __ Ret();
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand* stack_space_operand,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- // Additional parameter is the address of the actual callback.
- DCHECK(function_address == r3 || function_address == r4);
- Register scratch = r5;
-
- __ Move(scratch, ExternalReference::is_profiling_address(isolate));
- __ LoadlB(scratch, MemOperand(scratch, 0));
- __ CmpP(scratch, Operand::Zero());
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled, Label::kNear);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check, Label::kNear);
- __ bind(&profiler_disabled);
- __ LoadRR(scratch, function_address);
- __ bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- // r9 - next_address
- // r6 - next_address->kNextOffset
- // r7 - next_address->kLimitOffset
- // r8 - next_address->kLevelOffset
- __ Move(r9, next_address);
- __ LoadP(r6, MemOperand(r9, kNextOffset));
- __ LoadP(r7, MemOperand(r9, kLimitOffset));
- __ LoadlW(r8, MemOperand(r9, kLevelOffset));
- __ AddP(r8, Operand(1));
- __ StoreW(r8, MemOperand(r9, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r2);
- __ Move(r2, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, scratch);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r2);
- __ Move(r2, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- __ LoadP(r2, return_value_operand);
- __ bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ StoreP(r6, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
- __ LoadlW(r3, MemOperand(r9, kLevelOffset));
- __ CmpP(r3, r8);
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
- }
- __ SubP(r8, Operand(1));
- __ StoreW(r8, MemOperand(r9, kLevelOffset));
- __ CmpP(r7, MemOperand(r9, kLimitOffset));
- __ bne(&delete_allocated_handles, Label::kNear);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- // LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != nullptr) {
- __ l(r6, *stack_space_operand);
- } else {
- __ mov(r6, Operand(stack_space));
- }
- __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
-
- // Check if the function scheduled an exception.
- __ Move(r7, ExternalReference::scheduled_exception_address(isolate));
- __ LoadP(r7, MemOperand(r7));
- __ CompareRoot(r7, RootIndex::kTheHoleValue);
- __ bne(&promote_scheduled_exception, Label::kNear);
-
- __ b(r14);
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ StoreP(r7, MemOperand(r9, kLimitOffset));
- __ LoadRR(r6, r2);
- __ PrepareCallCFunction(1, r7);
- __ Move(r2, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ LoadRR(r2, r6);
- __ b(&leave_exit_frame, Label::kNear);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r6 : call_data
- // -- r4 : holder
- // -- r3 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- Register call_data = r6;
- Register holder = r4;
- Register api_function_address = r3;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ push(call_data);
-
- Register scratch = call_data;
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- // return value
- __ push(scratch);
- // return value default
- __ push(scratch);
- // isolate
- __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
- // holder
- __ push(holder);
-
- // Prepare arguments.
- __ LoadRR(scratch, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- // S390 LINUX ABI:
- //
- // Create 4 extra slots on stack:
- // [0] space for DirectCEntryStub's LR save
- // [1-3] FunctionCallbackInfo
- const int kApiStackSpace = 4;
- const int kFunctionCallbackInfoOffset =
- (kStackFrameExtraParamSlot + 1) * kPointerSize;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != r2 && scratch != r2);
- // r2 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
- // FunctionCallbackInfo::implicit_args_
- __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ LoadImmP(ip, Operand(argc()));
- __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- MemOperand* stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand);
-}
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- int arg0Slot = 0;
- int accessorInfoSlot = 0;
- int apiStackSpace = 0;
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = r6;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = r4;
-
- __ push(receiver);
- // Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ push(scratch);
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Push(scratch, scratch);
- __ Move(scratch, ExternalReference::isolate_address(isolate()));
- __ Push(scratch, holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch);
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ LoadRR(r2, sp); // r2 = Handle<Name>
- __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
-
- // If ABI passes Handles (pointer-sized struct) in a register:
- //
- // Create 2 extra slots on stack:
- // [0] space for DirectCEntryStub's LR save
- // [1] AccessorInfo&
- //
- // Otherwise:
- //
- // Create 3 extra slots on stack:
- // [0] space for DirectCEntryStub's LR save
- // [1] copy of Handle (first arg)
- // [2] AccessorInfo&
- if (ABI_PASSES_HANDLES_IN_REGS) {
- accessorInfoSlot = kStackFrameExtraParamSlot + 1;
- apiStackSpace = 2;
- } else {
- arg0Slot = kStackFrameExtraParamSlot + 1;
- accessorInfoSlot = arg0Slot + 1;
- apiStackSpace = 3;
- }
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
-
- if (!ABI_PASSES_HANDLES_IN_REGS) {
- // pass 1st arg by reference
- __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
- __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
- }
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
- __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
- // r3 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ LoadP(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/code-stubs-s390.h b/deps/v8/src/s390/code-stubs-s390.h
deleted file mode 100644
index 269d25ffb4..0000000000
--- a/deps/v8/src/s390/code-stubs-s390.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_S390_CODE_STUBS_S390_H_
-#define V8_S390_CODE_STUBS_S390_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub : public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_S390_CODE_STUBS_S390_H_
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
deleted file mode 100644
index 00342955e6..0000000000
--- a/deps/v8/src/s390/codegen-s390.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include <memory>
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/s390/simulator-s390.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- __ MovFromFloatParameter(d0);
- __ sqdbr(d0, d0);
- __ MovToFloatResult(d0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index 8dd330b8f8..25b76fa4ea 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -29,8 +29,7 @@
namespace v8 {
namespace internal {
-// TODO(sigurds): Change this value once we use relative jumps.
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// Number of registers
const int kNumRegisters = 16;
diff --git a/deps/v8/src/s390/cpu-s390.cc b/deps/v8/src/s390/cpu-s390.cc
index d0d54a8a6b..e00495ae09 100644
--- a/deps/v8/src/s390/cpu-s390.cc
+++ b/deps/v8/src/s390/cpu-s390.cc
@@ -3,10 +3,9 @@
// found in the LICENSE file.
// CPU specific code for s390 independent of OS goes here.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_S390
-#include "src/assembler.h"
+
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index d2ae1ded27..6e090227b8 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -2,23 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-// LAY + LGHI/LHI + BRCL
-const int Deoptimizer::table_entry_size_ = 16;
-
-#define __ masm()->
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Save all the registers onto the stack
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -50,15 +49,15 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ StoreP(fp, MemOperand(ip));
+ __ mov(r1, Operand(ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate)));
+ __ StoreP(fp, MemOperand(r1));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
+ // The bailout id is passed using r10
+ __ LoadRR(r4, r10);
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
@@ -67,7 +66,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
- __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
@@ -79,17 +79,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind())));
+ __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
- __ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -134,8 +134,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ LoadFloat32(d0, MemOperand(sp, src_offset));
__ StoreFloat32(d0, MemOperand(r3, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Remove the saved registers from the stack.
+ __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
@@ -163,7 +164,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, r3);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
@@ -187,7 +188,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
- __ AddP(r5, Operand(-sizeof(intptr_t)));
+ __ SubP(r5, Operand(sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
@@ -224,32 +225,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ InitializeRootRegister();
-
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
__ stop("Unreachable.");
}
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ lay(sp, MemOperand(sp, -kPointerSize));
- __ LoadImmP(ip, Operand(i));
- __ b(&done);
- int end = masm()->pc_offset();
- USE(end);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
- __ StoreP(ip, MemOperand(sp));
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index f62df67738..f11f441c8a 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -32,13 +32,12 @@
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
@@ -112,7 +111,7 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(GetRegConfig()->GetDoubleRegisterName(reg));
+ Print(RegisterName(DoubleRegister::from_code(reg)));
}
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
@@ -938,12 +937,11 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
}
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
+ return RegisterName(i::Register::from_code(reg));
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // S390 does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
diff --git a/deps/v8/src/s390/frame-constants-s390.cc b/deps/v8/src/s390/frame-constants-s390.cc
index ca4a191dde..c91a826ccb 100644
--- a/deps/v8/src/s390/frame-constants-s390.cc
+++ b/deps/v8/src/s390/frame-constants-s390.cc
@@ -4,14 +4,12 @@
#if V8_TARGET_ARCH_S390
-#include "src/assembler.h"
+#include "src/s390/frame-constants-s390.h"
+
+#include "src/assembler-inl.h"
#include "src/frame-constants.h"
#include "src/macro-assembler.h"
-#include "src/s390/assembler-s390-inl.h"
-#include "src/s390/assembler-s390.h"
-#include "src/s390/macro-assembler-s390.h"
-#include "src/s390/frame-constants-s390.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/s390/frame-constants-s390.h
index 0d89ceedb3..c2ba4edf2b 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/s390/frame-constants-s390.h
@@ -15,6 +15,8 @@ class EntryFrameConstants : public AllStatic {
public:
static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ // Stack offsets for arguments passed to JSEntry.
+ static constexpr int kArgvOffset = 20 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -35,7 +37,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 5;
+ static constexpr int kNumberOfSavedGpParamRegs = 4;
#ifdef V8_TARGET_ARCH_S390X
static constexpr int kNumberOfSavedFpParamRegs = 4;
#else
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index dee5452ea2..e7d4c8e449 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -69,12 +71,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@@ -206,10 +202,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- r6, // call_data
- r4, // holder
- r3, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ r3, // kApiFunctionAddress
+ r4, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -261,6 +256,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r2, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 1a047e3eba..69fca2b15e 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -12,36 +12,27 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler.h"
+#include "src/objects/smi.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
#include "src/s390/macro-assembler-s390.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -122,8 +113,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
@@ -180,30 +170,31 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- Register scratch = r1;
- IndirectLoadConstant(scratch, code);
- la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
- b(cond, scratch);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(ip, cond);
- return;
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ Label skip;
+ if (cond != al) {
+ b(NegateCondition(cond), &skip, Label::kNear);
}
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ b(ip);
+ bind(&skip);
+ return;
}
- jump(code, rmode, cond);
+ jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
}
void TurboAssembler::Call(Register target) {
@@ -241,30 +232,23 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- IndirectLoadConstant(ip, code);
- la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
- Call(ip);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
- return;
- }
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+ return;
}
call(code, rmode);
}
@@ -294,7 +278,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(r0);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
@@ -431,7 +415,8 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition) {
- LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
+ LoadP(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -500,24 +485,42 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object);
Push(address);
@@ -527,7 +530,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -1397,12 +1404,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- CallJSEntry(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(code);
+ JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1466,14 +1472,14 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- mov(r7, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
+ Move(r7, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ isolate()));
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
// Store padding.
- mov(r0, Operand(Smi::kZero));
+ lghi(r0, Operand::Zero());
StoreP(r0, MemOperand(sp)); // Padding.
// Copy the old handler into the next handler slot.
@@ -1489,8 +1495,8 @@ void MacroAssembler::PopStackHandler() {
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
- mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
+ Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ isolate()));
StoreP(r3, MemOperand(ip));
Drop(1); // Drop padding.
@@ -1513,25 +1519,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
- CmpP(obj, MemOperand(kRootRegister, RootRegisterOffset(index)));
-}
-
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- call(stub);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+ CmpP(obj, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void MacroAssembler::TryDoubleToInt32Exact(Register result,
@@ -1595,8 +1583,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r2, Operand(f->nargs));
Move(r3, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r2, r3));
- la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -1648,7 +1635,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- CmpP(in, Operand(kClearedWeakHeapObject));
+ Cmp32(in, Operand(kClearedWeakHeapObjectLower32));
beq(target_if_cleared);
AndP(out, in, Operand(~kWeakHeapObjectMask));
@@ -1822,6 +1809,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
beq(&do_check);
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ CmpP(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+ beq(&do_check);
+
// Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
CmpP(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -1930,6 +1921,20 @@ void TurboAssembler::CallCFunctionHelper(Register function,
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch = r6;
+ push(scratch);
+
+ Move(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreP(r0, MemOperand(scratch));
+ Move(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(fp, MemOperand(scratch));
+ pop(scratch);
+ }
+
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
@@ -1941,6 +1946,18 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch1 = r6;
+ Register scratch2 = r7;
+ Push(scratch1, scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ lghi(scratch2, Operand::Zero());
+ StoreP(scratch2, MemOperand(scratch1));
+ Pop(scratch1, scratch2);
+ }
+
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
@@ -3469,8 +3486,8 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
Load(dst, Operand(value));
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
- intptr_t value = reinterpret_cast<intptr_t>(smi);
+void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
+ intptr_t value = static_cast<intptr_t>(smi.ptr());
#if V8_TARGET_ARCH_S390X
DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
@@ -3511,10 +3528,10 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
LoadDoubleLiteral(result, int_val, scratch);
}
-void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
+void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
cgr(src1, scratch);
@@ -3525,62 +3542,6 @@ void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#endif
}
-void TurboAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
- Register scratch) {
-#if V8_TARGET_ARCH_S390X
- if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
- } else {
- LoadSmiLiteral(scratch, smi);
- clgr(src1, scratch);
- }
-#else
- // CLFI takes 32-bit immediate
- clfi(src1, Operand(smi));
-#endif
-}
-
-void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
- Register scratch) {
-#if V8_TARGET_ARCH_S390X
- if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- if (dst != src) LoadRR(dst, src);
- aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
- } else {
- LoadSmiLiteral(scratch, smi);
- AddP(dst, src, scratch);
- }
-#else
- AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
-#endif
-}
-
-void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
- Register scratch) {
-#if V8_TARGET_ARCH_S390X
- if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- if (dst != src) LoadRR(dst, src);
- aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
- } else {
- LoadSmiLiteral(scratch, smi);
- SubP(dst, src, scratch);
- }
-#else
- AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
-#endif
-}
-
-void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
- if (dst != src) LoadRR(dst, src);
-#if V8_TARGET_ARCH_S390X
- DCHECK_EQ(reinterpret_cast<intptr_t>(smi) & 0xFFFFFFFF, 0);
- int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
- nihf(dst, Operand(value));
-#else
- nilf(dst, Operand(reinterpret_cast<int>(smi)));
-#endif
-}
-
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
@@ -4402,6 +4363,12 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
+void TurboAssembler::LoadPC(Register dst) {
+ Label current_pc;
+ larl(dst, &current_pc);
+ bind(&current_pc);
+}
+
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
Cmp32(x, Operand(y));
beq(dest);
@@ -4412,6 +4379,107 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ ShiftRightArithP(builtin_pointer, builtin_pointer,
+ Operand(kSmiShift - kSystemPointerSizeLog2));
+ AddP(builtin_pointer, builtin_pointer,
+ Operand(IsolateData::builtin_entry_table_offset()));
+ LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ Register scratch = r1;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ LoadW(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ CmpP(scratch, Operand(Builtins::kNoBuiltinId));
+ bne(&if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ b(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
+ AddP(destination, destination, kRootRegister);
+ LoadP(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ Label return_label;
+ larl(r14, &return_label); // Generate the return addr of call later.
+ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+
+ // zLinux ABI requires caller's frame to have sufficient space for callee
+ // preserved regsiter save area.
+ b(target);
+ bind(&return_label);
+}
+
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deopt id in r10 (we don't need the roots array from now on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+ lghi(r10, Operand(deopt_id));
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 98f8bb6e03..243ed278a1 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -2,43 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
#define V8_S390_MACRO_ASSEMBLER_S390_H_
-#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/contexts.h"
#include "src/globals.h"
#include "src/s390/assembler-s390.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = r2;
-constexpr Register kReturnRegister1 = r3;
-constexpr Register kReturnRegister2 = r4;
-constexpr Register kJSFunctionRegister = r3;
-constexpr Register kContextRegister = r13;
-constexpr Register kAllocateSizeRegister = r3;
-constexpr Register kSpeculationPoisonRegister = r9;
-constexpr Register kInterpreterAccumulatorRegister = r2;
-constexpr Register kInterpreterBytecodeOffsetRegister = r6;
-constexpr Register kInterpreterBytecodeArrayRegister = r7;
-constexpr Register kInterpreterDispatchTableRegister = r8;
-
-constexpr Register kJavaScriptCallArgCountRegister = r2;
-constexpr Register kJavaScriptCallCodeStartRegister = r4;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = r5;
-constexpr Register kJavaScriptCallExtraArg1Register = r4;
-
-constexpr Register kOffHeapTrampolineRegister = ip;
-constexpr Register kRuntimeCallFunctionRegister = r3;
-constexpr Register kRuntimeCallArgCountRegister = r2;
-constexpr Register kRuntimeCallArgvRegister = r4;
-constexpr Register kWasmInstanceRegister = r6;
-
// ----------------------------------------------------------------------------
// Static helper functions
@@ -148,14 +126,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
void LoadFromConstantsTable(Register destination,
int constant_index) override;
@@ -181,11 +154,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
- }
+ void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -199,8 +168,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target);
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
// Register move. May do nothing if the registers are identical.
- void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
+ void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
@@ -227,6 +202,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
@@ -547,7 +525,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a handle.
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -646,10 +624,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RestoreFrameStateForTailCall();
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- AddP(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ mov(kRootRegister, Operand(isolate_root));
}
// If the value is a NaN, canonicalize the value else, do nothing.
@@ -747,7 +723,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
- void LoadSmiLiteral(Register dst, Smi* smi);
+ void LoadSmiLiteral(Register dst, Smi smi);
// load a literal double value <value> to FPR <result>
void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
@@ -766,14 +742,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreHalfWord(Register src, const MemOperand& mem,
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
-
- void AddSmiLiteral(Register dst, Register src, Smi* smi,
- Register scratch = r0);
- void SubSmiLiteral(Register dst, Register src, Smi* smi,
- Register scratch = r0);
- void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
- void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
- void AndSmiLiteral(Register dst, Register src, Smi* smi);
+ void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
@@ -821,9 +790,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub.
- void CallStubDelayed(CodeStub* stub);
-
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
@@ -885,8 +851,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Print a message to stdout and abort execution.
void Abort(AbortReason reason);
- inline bool AllowThisStubCall(CodeStub* stub);
-
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
@@ -897,7 +861,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// and place them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd) {
- DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
// Try to use RISBG if possible.
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -932,7 +896,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC) {
- int start = kBitsPerPointer - 1;
+ int start = kBitsPerSystemPointer - 1;
int end;
uintptr_t bit = (1L << start);
@@ -1011,6 +975,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
+ void LoadPC(Register dst);
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1018,29 +988,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
+
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
- void CallStub(CodeStub* stub, Condition cond = al);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
diff --git a/deps/v8/src/s390/register-s390.h b/deps/v8/src/s390/register-s390.h
new file mode 100644
index 0000000000..e0114342e0
--- /dev/null
+++ b/deps/v8/src/s390/register-s390.h
@@ -0,0 +1,281 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_REGISTER_S390_H_
+#define V8_S390_REGISTER_S390_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(fp) V(ip) V(r13) V(r14) V(sp)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r13)
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
+// clang-format on
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 16;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
+ 1 << 3 | // r3 a2
+ 1 << 4 | // r4 a3
+ 1 << 5; // r5 a4
+
+const int kNumJSCallerSaved = 5;
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved =
+ 1 << 6 | // r6 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 7 | // r7 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 8 | // r8 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 9 | // r9 (HandleScope logic in MacroAssembler)
+ 1 << 10 | // r10 (Roots register in Javascript)
+ 1 << 11 | // r11 (fp in Javascript)
+ 1 << 12 | // r12 (ip in Javascript)
+ 1 << 13; // r13 (cp in Javascript)
+// 1 << 15; // r15 (sp in Javascript)
+
+const int kNumCalleeSaved = 8;
+
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7; // d7
+
+const int kNumCallerSavedDoubles = 8;
+
+const RegList kCalleeSavedDoubles = 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13 | // d12
+ 1 << 14 | // d12
+ 1 << 15; // d13
+
+const int kNumCalleeSavedDoubles = 8;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI.
+
+#if V8_TARGET_ARCH_S390X
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16] FPR 0
+// [17] FPR 2
+// [18] FPR 4
+// [19] FPR 6
+const int kNumRequiredStackFrameSlots = 20;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 20;
+#else
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16..17] FPR 0
+// [18..19] FPR 2
+// [20..21] FPR 4
+// [22..23] FPR 6
+const int kNumRequiredStackFrameSlots = 24;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 24;
+#endif
+
+// zLinux ABI requires caller frames to include sufficient space for
+// callee preserved register save area.
+#if V8_TARGET_ARCH_S390X
+const int kCalleeRegisterSaveAreaSize = 160;
+#elif V8_TARGET_ARCH_S390
+const int kCalleeRegisterSaveAreaSize = 96;
+#else
+const int kCalleeRegisterSaveAreaSize = 0;
+#endif
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if V8_TARGET_LITTLE_ENDIAN
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#else
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+// Register aliases
+constexpr Register kRootRegister = r10; // Roots array pointer.
+constexpr Register cp = r13; // JavaScript context pointer.
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Double word VFP register.
+class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
+ public:
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static constexpr int kSizeInBytes = 8;
+ inline static int NumRegisters();
+
+ private:
+ friend class RegisterBase;
+
+ explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
+static_assert(sizeof(DoubleRegister) == sizeof(int),
+ "DoubleRegister can efficiently be passed by value");
+
+typedef DoubleRegister FloatRegister;
+
+// TODO(john.yan) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
+#define DEFINE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+constexpr DoubleRegister kDoubleRegZero = d14;
+constexpr DoubleRegister kScratchDoubleReg = d13;
+
+Register ToRegister(int num);
+
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
+};
+
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
+
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
+DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = r2;
+constexpr Register kReturnRegister1 = r3;
+constexpr Register kReturnRegister2 = r4;
+constexpr Register kJSFunctionRegister = r3;
+constexpr Register kContextRegister = r13;
+constexpr Register kAllocateSizeRegister = r3;
+constexpr Register kSpeculationPoisonRegister = r9;
+constexpr Register kInterpreterAccumulatorRegister = r2;
+constexpr Register kInterpreterBytecodeOffsetRegister = r6;
+constexpr Register kInterpreterBytecodeArrayRegister = r7;
+constexpr Register kInterpreterDispatchTableRegister = r8;
+
+constexpr Register kJavaScriptCallArgCountRegister = r2;
+constexpr Register kJavaScriptCallCodeStartRegister = r4;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r5;
+constexpr Register kJavaScriptCallExtraArg1Register = r4;
+
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r3;
+constexpr Register kRuntimeCallArgCountRegister = r2;
+constexpr Register kRuntimeCallArgvRegister = r4;
+constexpr Register kWasmInstanceRegister = r6;
+constexpr Register kWasmCompileLazyFuncIndexRegister = r7;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_REGISTER_S390_H_
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index e6761ca610..04b2a7748c 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -2,30 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/s390/simulator-s390.h"
+
+// Only build the simulator if not compiling for real s390 hardware.
+#if defined(USE_SIMULATOR)
+
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_S390
-
#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/once.h"
-#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime-utils.h"
#include "src/s390/constants-s390.h"
-#include "src/s390/simulator-s390.h"
-#if defined(USE_SIMULATOR)
-// Only build the simulator if not compiling for real s390 hardware.
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
@@ -290,7 +289,7 @@ void S390Debugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value);
+ RegisterName(Register::from_code(i)), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -305,7 +304,7 @@ void S390Debugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value, value);
+ RegisterName(Register::from_code(i)), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -321,7 +320,7 @@ void S390Debugger::Debug() {
float fvalue = GetFPFloatRegisterValue(i);
uint32_t as_words = bit_cast<uint32_t>(fvalue);
PrintF("%3s: %f 0x%08x\n",
- GetRegConfig()->GetDoubleRegisterName(i), fvalue,
+ RegisterName(DoubleRegister::from_code(i)), fvalue,
as_words);
}
} else if (strcmp(arg1, "alld") == 0) {
@@ -329,7 +328,7 @@ void S390Debugger::Debug() {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
- GetRegConfig()->GetDoubleRegisterName(i), dvalue,
+ RegisterName(DoubleRegister::from_code(i)), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
@@ -367,7 +366,7 @@ void S390Debugger::Debug() {
intptr_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -419,13 +418,11 @@ void S390Debugger::Debug() {
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- intptr_t value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
- PrintF("(smi %d)", PlatformSmiTagging::SmiToInt(obj));
- } else if (current_heap->Contains(obj)) {
+ if (obj.IsSmi()) {
+ PrintF(" (smi %d)", Smi::ToInt(obj));
+ } else if (current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
obj->ShortPrint();
PrintF(")");
@@ -1798,22 +1795,10 @@ bool Simulator::OverflowFromSigned(T1 alu_out, T1 left, T1 right,
return overflow;
}
-#if V8_TARGET_ARCH_S390X
-static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
- *x = reinterpret_cast<intptr_t>(pair->x);
- *y = reinterpret_cast<intptr_t>(pair->y);
-}
-#else
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
-#if V8_TARGET_BIG_ENDIAN
- *x = static_cast<int32_t>(*pair >> 32);
- *y = static_cast<int32_t>(*pair);
-#else
- *x = static_cast<int32_t>(*pair);
- *y = static_cast<int32_t>(*pair >> 32);
-#endif
+ *x = static_cast<intptr_t>(pair->x);
+ *y = static_cast<intptr_t>(pair->y);
}
-#endif
// Calls into the V8 runtime.
typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
@@ -9773,4 +9758,3 @@ EVALUATE(CXZT) {
} // namespace v8
#endif // USE_SIMULATOR
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index dba7ce8947..6b6a91e2a7 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -12,11 +12,13 @@
#ifndef V8_S390_SIMULATOR_S390_H_
#define V8_S390_SIMULATOR_S390_H_
-#include "src/allocation.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/s390/constants-s390.h"
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 5765038084..f3fc966b20 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -50,7 +50,7 @@ SafepointTable::SafepointTable(Address instruction_start,
Safepoint::kNoDeoptimizationIndex);
}
-SafepointTable::SafepointTable(Code* code)
+SafepointTable::SafepointTable(Code code)
: SafepointTable(code->InstructionStart(), code->safepoint_table_offset(),
code->stack_slots(), true) {}
@@ -129,11 +129,9 @@ void Safepoint::DefinePointerRegister(Register reg) {
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler,
Safepoint::Kind kind,
- int arguments,
Safepoint::DeoptMode deopt_mode) {
- DCHECK_GE(arguments, 0);
deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset(), arguments, kind));
+ DeoptimizationInfo(zone_, assembler->pc_offset(), kind));
if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deoptimization_info_.size();
}
@@ -182,11 +180,19 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
// Emit the table header.
+ STATIC_ASSERT(SafepointTable::kLengthOffset == 0 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kEntrySizeOffset == 1 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kHeaderSize == 2 * kIntSize);
int length = static_cast<int>(deoptimization_info_.size());
assembler->dd(length);
assembler->dd(bytes_per_entry);
- // Emit sorted table of pc offsets together with deoptimization indexes.
+ // Emit sorted table of pc offsets together with additional info (i.e. the
+ // deoptimization index or arguments count) and trampoline offsets.
+ STATIC_ASSERT(SafepointTable::kPcOffset == 0 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kEncodedInfoOffset == 1 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kTrampolinePcOffset == 2 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kFixedEntrySize == 3 * kIntSize);
for (const DeoptimizationInfo& info : deoptimization_info_) {
assembler->dd(info.pc);
assembler->dd(EncodeExceptPC(info));
@@ -234,7 +240,6 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
return SafepointEntry::DeoptimizationIndexField::encode(info.deopt_index) |
- SafepointEntry::ArgumentsField::encode(info.arguments) |
SafepointEntry::SaveDoublesField::encode(info.has_doubles);
}
@@ -261,9 +266,7 @@ void SafepointTableBuilder::RemoveDuplicates() {
bool SafepointTableBuilder::IsIdenticalExceptForPc(
const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
- if (info1.arguments != info2.arguments) return false;
if (info1.has_doubles != info2.has_doubles) return false;
-
if (info1.deopt_index != info2.deopt_index) return false;
ZoneChunkList<int>* indexes1 = info1.indexes;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 475b4a80b1..f8003f115d 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
-#include "src/utils.h"
#include "src/v8memory.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
@@ -37,34 +36,25 @@ class SafepointEntry {
bits_ = nullptr;
}
+ int trampoline_pc() { return trampoline_pc_; }
+
+ static const int kSaveDoublesFieldBits = 1;
+ static const int kDeoptIndexBits = 32 - kSaveDoublesFieldBits;
+
+ class DeoptimizationIndexField : public BitField<int, 0, kDeoptIndexBits> {};
+ class SaveDoublesField
+ : public BitField<bool, DeoptimizationIndexField::kNext,
+ kSaveDoublesFieldBits> {};
+
int deoptimization_index() const {
- DCHECK(is_valid());
+ DCHECK(is_valid() && has_deoptimization_index());
return DeoptimizationIndexField::decode(info_);
}
- int trampoline_pc() { return trampoline_pc_; }
-
- void set_trampoline_pc(int trampoline_pc) { trampoline_pc_ = trampoline_pc; }
-
- static const int kArgumentsFieldBits = 3;
- static const int kSaveDoublesFieldBits = 1;
- static const int kDeoptIndexBits =
- 32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
-
- class DeoptimizationIndexField:
- public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT
- class ArgumentsField:
- public BitField<unsigned,
- kDeoptIndexBits,
- kArgumentsFieldBits> {}; // NOLINT
- class SaveDoublesField:
- public BitField<bool,
- kDeoptIndexBits + kArgumentsFieldBits,
- kSaveDoublesFieldBits> { }; // NOLINT
-
- int argument_count() const {
+ bool has_deoptimization_index() const {
DCHECK(is_valid());
- return ArgumentsField::decode(info_);
+ return DeoptimizationIndexField::decode(info_) !=
+ DeoptimizationIndexField::kMax;
}
bool has_doubles() const {
@@ -89,7 +79,7 @@ class SafepointEntry {
class SafepointTable {
public:
- explicit SafepointTable(Code* code);
+ explicit SafepointTable(Code code);
explicit SafepointTable(Address instruction_start,
size_t safepoint_table_offset, uint32_t stack_slots,
bool has_deopt = false);
@@ -114,7 +104,7 @@ class SafepointTable {
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
- unsigned info = Memory<uint32_t>(GetInfoLocation(index));
+ unsigned info = Memory<uint32_t>(GetEncodedInfoLocation(index));
uint8_t* bits = &Memory<uint8_t>(entries_ + (index * entry_size_));
int trampoline_pc =
has_deopt_ ? Memory<int>(GetTrampolineLocation(index)) : -1;
@@ -134,17 +124,16 @@ class SafepointTable {
static const int kEntrySizeOffset = kLengthOffset + kIntSize;
static const int kHeaderSize = kEntrySizeOffset + kIntSize;
static const int kPcOffset = 0;
- static const int kDeoptimizationIndexOffset = kPcOffset + kIntSize;
- static const int kTrampolinePcOffset = kDeoptimizationIndexOffset + kIntSize;
+ static const int kEncodedInfoOffset = kPcOffset + kIntSize;
+ static const int kTrampolinePcOffset = kEncodedInfoOffset + kIntSize;
static const int kFixedEntrySize = kTrampolinePcOffset + kIntSize;
Address GetPcOffsetLocation(unsigned index) const {
return pc_and_deoptimization_indexes_ + (index * kFixedEntrySize);
}
- // TODO(juliana): rename this to GetDeoptimizationIndexLocation
- Address GetInfoLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kDeoptimizationIndexOffset;
+ Address GetEncodedInfoLocation(unsigned index) const {
+ return GetPcOffsetLocation(index) + kEncodedInfoOffset;
}
Address GetTrampolineLocation(unsigned index) const {
@@ -154,7 +143,7 @@ class SafepointTable {
static void PrintBits(std::ostream& os, // NOLINT
uint8_t byte, int digits);
- DisallowHeapAllocation no_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_allocation_);
Address instruction_start_;
uint32_t stack_slots_;
unsigned length_;
@@ -185,7 +174,7 @@ class Safepoint {
};
static const int kNoDeoptimizationIndex =
- (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
+ SafepointEntry::DeoptimizationIndexField::kMax;
void DefinePointerSlot(int index) { indexes_->push_back(index); }
void DefinePointerRegister(Register reg);
@@ -213,7 +202,6 @@ class SafepointTableBuilder {
// Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint(Assembler* assembler,
Safepoint::Kind kind,
- int arguments,
Safepoint::DeoptMode mode);
// Record deoptimization index for lazy deoptimization for the last
@@ -229,23 +217,21 @@ class SafepointTableBuilder {
// Find the Deoptimization Info with pc offset {pc} and update its
// trampoline field. Calling this function ensures that the safepoint
- // table contains the trampoline PC (trampoline} that replaced the
+ // table contains the trampoline PC {trampoline} that replaced the
// return PC {pc} on the stack.
int UpdateDeoptimizationInfo(int pc, int trampoline, int start);
private:
struct DeoptimizationInfo {
unsigned pc;
- unsigned arguments;
+ unsigned deopt_index;
bool has_doubles;
int trampoline;
ZoneChunkList<int>* indexes;
ZoneChunkList<int>* registers;
- unsigned deopt_index;
- DeoptimizationInfo(Zone* zone, unsigned pc, unsigned arguments,
- Safepoint::Kind kind)
+ DeoptimizationInfo(Zone* zone, unsigned pc, Safepoint::Kind kind)
: pc(pc),
- arguments(arguments),
+ deopt_index(Safepoint::kNoDeoptimizationIndex),
has_doubles(kind & Safepoint::kWithDoubles),
trampoline(-1),
indexes(new (zone) ZoneChunkList<int>(
@@ -253,14 +239,16 @@ class SafepointTableBuilder {
registers(kind & Safepoint::kWithRegisters
? new (zone) ZoneChunkList<int>(
zone, ZoneChunkList<int>::StartMode::kSmall)
- : nullptr),
- deopt_index(Safepoint::kNoDeoptimizationIndex) {}
+ : nullptr) {}
};
+ // Encodes all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
uint32_t EncodeExceptPC(const DeoptimizationInfo&);
+ // Compares all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
const DeoptimizationInfo&) const;
+
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/setup-isolate-full.cc
index c902f06b30..494322ef06 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/setup-isolate-full.cc
@@ -5,6 +5,7 @@
#include "src/setup-isolate.h"
#include "src/base/logging.h"
+#include "src/debug/debug-evaluate.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
@@ -15,6 +16,9 @@ namespace internal {
void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate) {
if (create_heap_objects_) {
SetupBuiltinsInternal(isolate);
+#ifdef DEBUG
+ DebugEvaluate::VerifyTransitiveBuiltins(isolate);
+#endif // DEBUG
} else {
CHECK(isolate->snapshot_available());
}
diff --git a/deps/v8/src/setup-isolate.h b/deps/v8/src/setup-isolate.h
index 61dedd6fe8..9081f7014f 100644
--- a/deps/v8/src/setup-isolate.h
+++ b/deps/v8/src/setup-isolate.h
@@ -42,7 +42,7 @@ class SetupIsolateDelegate {
protected:
static void SetupBuiltinsInternal(Isolate* isolate);
- static void AddBuiltin(Builtins* builtins, int index, Code* code);
+ static void AddBuiltin(Builtins* builtins, int index, Code code);
static void PopulateWithPlaceholders(Isolate* isolate);
static void ReplacePlaceholders(Isolate* isolate);
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 60950a93bb..6890699ab4 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -27,12 +27,12 @@ class Signature : public ZoneObject {
size_t parameter_count() const { return parameter_count_; }
T GetParam(size_t index) const {
- DCHECK(index < parameter_count_);
+ DCHECK_LT(index, parameter_count_);
return reps_[return_count_ + index];
}
T GetReturn(size_t index = 0) const {
- DCHECK(index < return_count_);
+ DCHECK_LT(index, return_count_);
return reps_[index];
}
@@ -71,16 +71,24 @@ class Signature : public ZoneObject {
const size_t parameter_count_;
void AddReturn(T val) {
- DCHECK(rcursor_ < return_count_);
+ DCHECK_LT(rcursor_, return_count_);
buffer_[rcursor_++] = val;
}
+
void AddParam(T val) {
- DCHECK(pcursor_ < parameter_count_);
+ DCHECK_LT(pcursor_, parameter_count_);
buffer_[return_count_ + pcursor_++] = val;
}
+
+ void AddParamAt(size_t index, T val) {
+ DCHECK_LT(index, parameter_count_);
+ buffer_[return_count_ + index] = val;
+ pcursor_ = std::max(pcursor_, index + 1);
+ }
+
Signature<T>* Build() {
- DCHECK(rcursor_ == return_count_);
- DCHECK(pcursor_ == parameter_count_);
+ DCHECK_EQ(rcursor_, return_count_);
+ DCHECK_EQ(pcursor_, parameter_count_);
return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
}
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/simulator-base.cc
index e5ecbc0a6d..25e21cdbdf 100644
--- a/deps/v8/src/simulator-base.cc
+++ b/deps/v8/src/simulator-base.cc
@@ -4,7 +4,6 @@
#include "src/simulator-base.h"
-#include "src/assembler.h"
#include "src/isolate.h"
#include "src/simulator.h"
@@ -61,7 +60,7 @@ void SimulatorBase::GlobalTearDown() {
// static
Address SimulatorBase::RedirectExternalReference(Address external_function,
ExternalReference::Type type) {
- base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
+ base::MutexGuard lock_guard(Simulator::redirection_mutex());
Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_instruction();
}
@@ -70,7 +69,7 @@ Redirection::Redirection(Address external_function,
ExternalReference::Type type)
: external_function_(external_function), type_(type), next_(nullptr) {
next_ = Simulator::redirection();
- base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
+ base::MutexGuard lock_guard(Simulator::i_cache_mutex());
Simulator::SetRedirectInstruction(
reinterpret_cast<Instruction*>(address_of_instruction()));
Simulator::FlushICache(Simulator::i_cache(),
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
index 1bada2b812..09270ff5ae 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/simulator-base.h
@@ -61,6 +61,12 @@ class SimulatorBase {
return reinterpret_cast<T>(ret);
}
+ template <typename T>
+ static typename std::enable_if<std::is_base_of<Object, T>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ return Object(ret);
+ }
+
// Convert back void return type (i.e. no return).
template <typename T>
static typename std::enable_if<std::is_void<T>::value, T>::type ConvertReturn(
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index a7badaa6e5..37e8fd4f12 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -8,10 +8,12 @@
#include "src/globals.h"
#include "src/objects/code.h"
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/simulator-x64.h"
+#if !defined(USE_SIMULATOR)
+#include "src/utils.h"
+#endif
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+// No simulator for ia32 or x64.
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
@@ -45,12 +47,18 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
- return Simulator::current(isolate)->PushAddress(try_catch_address);
+ // Returns the current stack address on the simulator stack frame.
+ // The returned address is comparable with JS stack address.
+ static inline uintptr_t RegisterJSStackComparableAddress(
+ v8::internal::Isolate* isolate) {
+ // The value of |kPlaceHolder| is actually not used. It just occupies a
+ // single word on the stack frame of the simulator.
+ const uintptr_t kPlaceHolder = 0x4A535350u; // "JSSP" in ASCII
+ return Simulator::current(isolate)->PushAddress(kPlaceHolder);
}
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ static inline void UnregisterJSStackComparableAddress(
+ v8::internal::Isolate* isolate) {
Simulator::current(isolate)->PopAddress();
}
};
@@ -69,13 +77,16 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
- uintptr_t try_catch_address) {
+ // Returns the current stack address on the native stack frame.
+ // The returned address is comparable with JS stack address.
+ static inline uintptr_t RegisterJSStackComparableAddress(
+ v8::internal::Isolate* isolate) {
USE(isolate);
- return try_catch_address;
+ return internal::GetCurrentStackPosition();
}
- static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ static inline void UnregisterJSStackComparableAddress(
+ v8::internal::Isolate* isolate) {
USE(isolate);
}
};
@@ -97,7 +108,7 @@ class GeneratedCode {
return GeneratedCode(isolate, reinterpret_cast<Signature*>(buffer));
}
- static GeneratedCode FromCode(Code* code) {
+ static GeneratedCode FromCode(Code code) {
return FromAddress(code->GetIsolate(), code->entry());
}
@@ -107,12 +118,32 @@ class GeneratedCode {
return Simulator::current(isolate_)->template Call<Return>(
reinterpret_cast<Address>(fn_ptr_), args...);
}
+
+ DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { return Call(args...); }
#else
+
DISABLE_CFI_ICALL Return Call(Args... args) {
// When running without a simulator we call the entry directly.
+#if V8_OS_AIX
+ // AIX ABI requires function descriptors (FD). Artificially create a pseudo
+ // FD to ensure correct dispatch to generated code. The 'volatile'
+ // declaration is required to avoid the compiler from not observing the
+ // alias of the pseudo FD to the function pointer, and hence, optimizing the
+ // pseudo FD declaration/initialization away.
+ volatile Address function_desc[] = {reinterpret_cast<Address>(fn_ptr_), 0,
+ 0};
+ Signature* fn = reinterpret_cast<Signature*>(function_desc);
+ return fn(args...);
+#else
+ return fn_ptr_(args...);
+#endif // V8_OS_AIX
+ }
+
+ DISABLE_CFI_ICALL Return CallIrregexp(Args... args) {
+ // When running without a simulator we call the entry directly.
return fn_ptr_(args...);
}
-#endif
+#endif // USE_SIMULATOR
private:
friend class GeneratedCode<Return(Args...)>;
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc b/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
deleted file mode 100644
index 80300c9f1d..0000000000
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-deserializer-allocator.h"
-
-#include "src/heap/heap-inl.h"
-#include "src/interpreter/interpreter.h"
-#include "src/snapshot/builtin-deserializer.h"
-#include "src/snapshot/deserializer.h"
-
-namespace v8 {
-namespace internal {
-
-using interpreter::Bytecodes;
-using interpreter::Interpreter;
-
-BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
- Deserializer<BuiltinDeserializerAllocator>* deserializer)
- : deserializer_(deserializer) {}
-
-Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
- int size) {
- const int code_object_id = deserializer()->CurrentCodeObjectId();
- DCHECK_NE(BuiltinDeserializer::kNoCodeObjectId, code_object_id);
- DCHECK_EQ(CODE_SPACE, space);
- DCHECK_EQ(deserializer()->ExtractCodeObjectSize(code_object_id), size);
-#ifdef DEBUG
- RegisterCodeObjectAllocation(code_object_id);
-#endif
-
- DCHECK(Builtins::IsBuiltinId(code_object_id));
- Object* obj = isolate()->builtins()->builtin(code_object_id);
- DCHECK(Internals::HasHeapObjectTag(obj));
- return HeapObject::cast(obj)->address();
-}
-
-Heap::Reservation
-BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
- Heap::Reservation result;
-
- // Reservations for builtins.
-
- // DeserializeLazy is always the first builtin reservation (to simplify logic
- // in InitializeBuiltinsTable).
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- uint32_t builtin_size =
- deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, kNullAddress, kNullAddress});
- }
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- // Skip lazy builtins. These will be replaced by the DeserializeLazy code
- // object in InitializeFromReservations and thus require no reserved space.
- if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- continue;
- }
-
- uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
- result.push_back({builtin_size, kNullAddress, kNullAddress});
- }
-
- return result;
-}
-
-void BuiltinDeserializerAllocator::InitializeBuiltinFromReservation(
- const Heap::Chunk& chunk, int builtin_id) {
- DCHECK_EQ(deserializer()->ExtractCodeObjectSize(builtin_id), chunk.size);
- DCHECK_EQ(chunk.size, chunk.end - chunk.start);
-
- SkipList::Update(chunk.start, chunk.size);
- isolate()->builtins()->set_builtin(builtin_id,
- HeapObject::FromAddress(chunk.start));
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(builtin_id);
-#endif
-}
-
-void BuiltinDeserializerAllocator::InitializeFromReservations(
- const Heap::Reservation& reservation) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
-
- // Initialize the builtins table.
-
- Builtins* builtins = isolate()->builtins();
- int reservation_index = 0;
-
- // Other builtins can be replaced by DeserializeLazy so it may not be lazy.
- // It always occupies the first reservation slot.
- {
- DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
- InitializeBuiltinFromReservation(reservation[reservation_index],
- Builtins::kDeserializeLazy);
- reservation_index++;
- }
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (i == Builtins::kDeserializeLazy) continue;
-
- if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- builtins->set_builtin(
- i, builtins->builtin(builtins->LazyDeserializerForBuiltin(i)));
- } else {
- InitializeBuiltinFromReservation(reservation[reservation_index], i);
- reservation_index++;
- }
- }
-
- DCHECK_EQ(reservation.size(), reservation_index);
-}
-
-void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
- int builtin_id) {
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK(isolate()->builtins()->is_initialized());
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- DCHECK(!Builtins::IsLazyDeserializer(builtin_id));
- DCHECK(Builtins::IsLazyDeserializer(
- isolate()->builtins()->builtin(builtin_id)->builtin_index()));
-
- const uint32_t builtin_size =
- deserializer()->ExtractCodeObjectSize(builtin_id);
- DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
-
- Handle<HeapObject> o =
- isolate()->factory()->NewCodeForDeserialization(builtin_size);
-
- // Note: After this point and until deserialization finishes, heap allocation
- // is disallowed. We currently can't safely assert this since we'd need to
- // pass the DisallowHeapAllocation scope out of this function.
-
- // Write the allocated filler object into the builtins table. It will be
- // returned by our custom Allocate method below once needed.
-
- isolate()->builtins()->set_builtin(builtin_id, *o);
-
-#ifdef DEBUG
- RegisterCodeObjectReservation(builtin_id);
-#endif
-}
-
-#ifdef DEBUG
-void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
- int code_object_id) {
- const auto result = unused_reservations_.emplace(code_object_id);
- CHECK(result.second); // False, iff builtin_id was already present in set.
-}
-
-void BuiltinDeserializerAllocator::RegisterCodeObjectAllocation(
- int code_object_id) {
- const size_t removed_elems = unused_reservations_.erase(code_object_id);
- CHECK_EQ(removed_elems, 1);
-}
-
-bool BuiltinDeserializerAllocator::ReservationsAreFullyUsed() const {
- // Not 100% precise but should be good enough.
- return unused_reservations_.empty();
-}
-#endif // DEBUG
-
-Isolate* BuiltinDeserializerAllocator::isolate() const {
- return deserializer()->isolate();
-}
-
-BuiltinDeserializer* BuiltinDeserializerAllocator::deserializer() const {
- return static_cast<BuiltinDeserializer*>(deserializer_);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
deleted file mode 100644
index b606eb2749..0000000000
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
-
-#include <unordered_set>
-
-#include "src/globals.h"
-#include "src/heap/heap.h"
-#include "src/interpreter/interpreter.h"
-#include "src/snapshot/serializer-common.h"
-
-namespace v8 {
-namespace internal {
-
-template <class AllocatorT>
-class Deserializer;
-
-class BuiltinDeserializer;
-class BuiltinSnapshotUtils;
-
-class BuiltinDeserializerAllocator final {
- using BSU = BuiltinSnapshotUtils;
- using Bytecode = interpreter::Bytecode;
- using OperandScale = interpreter::OperandScale;
-
- public:
- BuiltinDeserializerAllocator(
- Deserializer<BuiltinDeserializerAllocator>* deserializer);
-
- // ------- Allocation Methods -------
- // Methods related to memory allocation during deserialization.
-
- // Allocation works differently here than in other deserializers. Instead of
- // a statically-known memory area determined at serialization-time, our
- // memory requirements here are determined at runtime. Another major
- // difference is that we create builtin Code objects up-front (before
- // deserialization) in order to avoid having to patch builtin references
- // later on. See also the kBuiltin case in deserializer.cc.
- //
- // There is one way that we use to reserve / allocate space. Required objects
- // are requested from the GC prior to deserialization. Pre-allocated builtin
- // code objects are written into the builtins table (this is to make
- // deserialization of builtin references easier).
- //
- // Allocate simply returns the pre-allocated object prepared by
- // InitializeFromReservations.
- Address Allocate(AllocationSpace space, int size);
-
- void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
- void SetAlignment(AllocationAlignment alignment) { UNREACHABLE(); }
-
- void set_next_reference_is_weak(bool next_reference_is_weak) {
- next_reference_is_weak_ = next_reference_is_weak;
- }
-
- bool GetAndClearNextReferenceIsWeak() {
- bool saved = next_reference_is_weak_;
- next_reference_is_weak_ = false;
- return saved;
- }
-
-#ifdef DEBUG
- bool next_reference_is_weak() const { return next_reference_is_weak_; }
-#endif
-
- HeapObject* GetMap(uint32_t index) { UNREACHABLE(); }
- HeapObject* GetLargeObject(uint32_t index) { UNREACHABLE(); }
- HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset) {
- UNREACHABLE();
- }
-
- // ------- Reservation Methods -------
- // Methods related to memory reservations (prior to deserialization).
-
- // Builtin deserialization does not bake reservations into the snapshot, hence
- // this is a nop.
- void DecodeReservation(const std::vector<SerializedData::Reservation>& res) {}
-
- // These methods are used to pre-allocate builtin objects prior to
- // deserialization.
- // TODO(jgruber): Refactor reservation/allocation logic in deserializers to
- // make this less messy.
- Heap::Reservation CreateReservationsForEagerBuiltins();
- void InitializeFromReservations(const Heap::Reservation& reservation);
-
- // Creates reservations and initializes the builtins table in preparation for
- // lazily deserializing a single builtin.
- void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
-
-#ifdef DEBUG
- bool ReservationsAreFullyUsed() const;
-#endif
-
- private:
- Isolate* isolate() const;
- BuiltinDeserializer* deserializer() const;
-
- // Used after memory allocation prior to isolate initialization, to register
- // the newly created object in code space and add it to the builtins table.
- void InitializeBuiltinFromReservation(const Heap::Chunk& chunk,
- int builtin_id);
-
-#ifdef DEBUG
- void RegisterCodeObjectReservation(int code_object_id);
- void RegisterCodeObjectAllocation(int code_object_id);
- std::unordered_set<int> unused_reservations_;
-#endif
-
- private:
- // The current deserializer. Note that this always points to a
- // BuiltinDeserializer instance, but we can't perform the cast during
- // construction since that makes vtable-based checks fail.
- Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
-
- bool next_reference_is_weak_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
deleted file mode 100644
index 136b74b26e..0000000000
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-deserializer.h"
-
-#include "src/assembler-inl.h"
-#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/snapshot.h"
-
-namespace v8 {
-namespace internal {
-
-using interpreter::Bytecodes;
-using interpreter::Interpreter;
-
-// Tracks the code object currently being deserialized (required for
-// allocation).
-class DeserializingCodeObjectScope {
- public:
- DeserializingCodeObjectScope(BuiltinDeserializer* builtin_deserializer,
- int code_object_id)
- : builtin_deserializer_(builtin_deserializer) {
- DCHECK_EQ(BuiltinDeserializer::kNoCodeObjectId,
- builtin_deserializer->current_code_object_id_);
- builtin_deserializer->current_code_object_id_ = code_object_id;
- }
-
- ~DeserializingCodeObjectScope() {
- builtin_deserializer_->current_code_object_id_ =
- BuiltinDeserializer::kNoCodeObjectId;
- }
-
- private:
- BuiltinDeserializer* builtin_deserializer_;
-
- DISALLOW_COPY_AND_ASSIGN(DeserializingCodeObjectScope)
-};
-
-BuiltinDeserializer::BuiltinDeserializer(Isolate* isolate,
- const BuiltinSnapshotData* data)
- : Deserializer(data, false) {
- code_offsets_ = data->BuiltinOffsets();
- DCHECK_EQ(Builtins::builtin_count, code_offsets_.length());
- DCHECK(std::is_sorted(code_offsets_.begin(), code_offsets_.end()));
-
- Initialize(isolate);
-}
-
-void BuiltinDeserializer::DeserializeEagerBuiltins() {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK_EQ(0, source()->position());
-
- // Deserialize builtins.
-
- Builtins* builtins = isolate()->builtins();
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
- // Do nothing. These builtins have been replaced by DeserializeLazy in
- // InitializeFromReservations.
- DCHECK_EQ(builtins->builtin(builtins->LazyDeserializerForBuiltin(i)),
- builtins->builtin(i));
- } else {
- builtins->set_builtin(i, DeserializeBuiltinRaw(i));
- }
- }
-
-#ifdef DEBUG
- for (int i = 0; i < Builtins::builtin_count; i++) {
- Object* o = builtins->builtin(i);
- DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
- }
-#endif
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- // We can't print builtins during deserialization because they may refer
- // to not yet deserialized builtins.
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!IsLazyDeserializationEnabled() || !Builtins::IsLazy(i)) {
- Code* code = builtins->builtin(i);
- const char* name = Builtins::name(i);
- code->PrintBuiltinCode(isolate(), name);
- }
- }
- }
-#endif
-}
-
-Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
- allocator()->ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
- DisallowHeapAllocation no_gc;
- Code* code = DeserializeBuiltinRaw(builtin_id);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- const char* name = Builtins::name(builtin_id);
- code->PrintBuiltinCode(isolate(), name);
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
-Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK(Builtins::IsBuiltinId(builtin_id));
-
- DeserializingCodeObjectScope scope(this, builtin_id);
-
- const int initial_position = source()->position();
- source()->set_position(code_offsets_[builtin_id]);
-
- Object* o = ReadDataSingle();
- DCHECK(o->IsCode() && Code::cast(o)->is_builtin());
-
- // Rewind.
- source()->set_position(initial_position);
-
- // Flush the instruction cache.
- Code* code = Code::cast(o);
- Assembler::FlushICache(code->raw_instruction_start(),
- code->raw_instruction_size());
-
- CodeEventListener::LogEventsAndTags code_tag;
- switch (code->kind()) {
- case AbstractCode::BUILTIN:
- code_tag = CodeEventListener::BUILTIN_TAG;
- break;
- case AbstractCode::BYTECODE_HANDLER:
- code_tag = CodeEventListener::BYTECODE_HANDLER_TAG;
- break;
- default:
- UNREACHABLE();
- }
-
- PROFILE(isolate(), CodeCreateEvent(code_tag, AbstractCode::cast(code),
- Builtins::name(builtin_id)));
- LOG_CODE_EVENT(isolate(),
- CodeLinePosInfoRecordEvent(
- code->raw_instruction_start(),
- ByteArray::cast(code->source_position_table())));
- return code;
-}
-
-uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
- DCHECK_LT(code_object_id, Builtins::builtin_count);
-
- const int initial_position = source()->position();
-
- // Grab the size of the code object.
- source()->set_position(code_offsets_[code_object_id]);
- byte data = source()->Get();
-
- USE(data);
- DCHECK_EQ(kNewObject | kPlain | kStartOfObject | CODE_SPACE, data);
- const uint32_t result = source()->GetInt() << kObjectAlignmentBits;
-
- // Rewind.
- source()->set_position(initial_position);
-
- return result;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
deleted file mode 100644
index e77598db68..0000000000
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
-#define V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
-
-#include "src/interpreter/interpreter.h"
-#include "src/snapshot/builtin-deserializer-allocator.h"
-#include "src/snapshot/deserializer.h"
-
-namespace v8 {
-namespace internal {
-
-class BuiltinSnapshotData;
-
-// Deserializes the builtins blob.
-class BuiltinDeserializer final
- : public Deserializer<BuiltinDeserializerAllocator> {
- using BSU = BuiltinSnapshotUtils;
- using Bytecode = interpreter::Bytecode;
- using OperandScale = interpreter::OperandScale;
-
- public:
- BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
-
- // Builtins deserialization is tightly integrated with deserialization of the
- // startup blob. In particular, we need to ensure that no GC can occur
- // between startup- and builtins deserialization, as all builtins have been
- // pre-allocated and their pointers may not be invalidated.
- //
- // After this, the instruction cache must be flushed by the caller (we don't
- // do it ourselves since the startup serializer batch-flushes all code pages).
- void DeserializeEagerBuiltins();
-
- // Deserializes the single given builtin. This is used whenever a builtin is
- // lazily deserialized at runtime.
- Code* DeserializeBuiltin(int builtin_id);
-
- private:
- // Deserializes the single given builtin. Assumes that reservations have
- // already been allocated.
- Code* DeserializeBuiltinRaw(int builtin_id);
-
- // Extracts the size builtin Code objects (baked into the snapshot).
- uint32_t ExtractCodeObjectSize(int builtin_id);
-
- // BuiltinDeserializer implements its own builtin iteration logic. Make sure
- // the RootVisitor API is not used accidentally.
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- UNREACHABLE();
- }
-
- int CurrentCodeObjectId() const { return current_code_object_id_; }
-
- // Convenience function to grab the handler off the heap's strong root list.
- Code* GetDeserializeLazyHandler(OperandScale operand_scale) const;
-
- private:
- // Stores the code object currently being deserialized. The
- // {current_code_object_id} stores the index of the currently-deserialized
- // code object within the snapshot (and within {code_offsets_}). We need this
- // to determine where to 'allocate' from during deserialization.
- static const int kNoCodeObjectId = -1;
- int current_code_object_id_ = kNoCodeObjectId;
-
- // The offsets of each builtin within the serialized data. Equivalent to
- // BuiltinSerializer::builtin_offsets_ but on the deserialization side.
- Vector<const uint32_t> code_offsets_;
-
- // For current_code_object_id_.
- friend class DeserializingCodeObjectScope;
-
- // For isolate(), IsLazyDeserializationEnabled(), CurrentCodeObjectId() and
- // ExtractBuiltinSize().
- friend class BuiltinDeserializerAllocator;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.cc b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
deleted file mode 100644
index a50fc23dd8..0000000000
--- a/deps/v8/src/snapshot/builtin-serializer-allocator.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-serializer-allocator.h"
-
-#include "src/heap/heap-inl.h"
-
-namespace v8 {
-namespace internal {
-
-SerializerReference BuiltinSerializerAllocator::Allocate(AllocationSpace space,
- uint32_t size) {
- DCHECK_EQ(space, CODE_SPACE);
- DCHECK_GT(size, 0);
-
- // Builtin serialization & deserialization does not use the reservation
- // system. Instead of worrying about chunk indices and offsets, we simply
- // need to generate unique offsets here.
-
- const auto ref =
- SerializerReference::BuiltinReference(next_builtin_reference_index_);
-
- allocated_bytes_ += size;
- next_builtin_reference_index_++;
-
- return ref;
-}
-
-#ifdef DEBUG
-bool BuiltinSerializerAllocator::BackReferenceIsAlreadyAllocated(
- SerializerReference reference) const {
- DCHECK(reference.is_builtin_reference());
- return reference.builtin_index() < next_builtin_reference_index_;
-}
-#endif // DEBUG
-
-std::vector<SerializedData::Reservation>
-BuiltinSerializerAllocator::EncodeReservations() const {
- return std::vector<SerializedData::Reservation>();
-}
-
-void BuiltinSerializerAllocator::OutputStatistics() {
- DCHECK(FLAG_serialization_statistics);
-
- PrintF(" Spaces (bytes):\n");
-
- for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
- PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
- }
- PrintF("\n");
-
- for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
- uint32_t space_size = (space == CODE_SPACE) ? allocated_bytes_ : 0;
- PrintF("%16d", space_size);
- }
- PrintF("\n");
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.h b/deps/v8/src/snapshot/builtin-serializer-allocator.h
deleted file mode 100644
index 5a92843685..0000000000
--- a/deps/v8/src/snapshot/builtin-serializer-allocator.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
-
-#include "src/snapshot/serializer-common.h"
-
-namespace v8 {
-namespace internal {
-
-template <class AllocatorT>
-class Serializer;
-
-class BuiltinSerializerAllocator final {
- public:
- BuiltinSerializerAllocator(
- Serializer<BuiltinSerializerAllocator>* serializer) {}
-
- SerializerReference Allocate(AllocationSpace space, uint32_t size);
- SerializerReference AllocateMap() { UNREACHABLE(); }
- SerializerReference AllocateLargeObject(uint32_t size) { UNREACHABLE(); }
- SerializerReference AllocateOffHeapBackingStore() { UNREACHABLE(); }
-
-#ifdef DEBUG
- bool BackReferenceIsAlreadyAllocated(
- SerializerReference back_reference) const;
-#endif
-
- std::vector<SerializedData::Reservation> EncodeReservations() const;
-
- void OutputStatistics();
-
- private:
- static constexpr int kNumberOfPreallocatedSpaces =
- SerializerDeserializer::kNumberOfPreallocatedSpaces;
- static constexpr int kNumberOfSpaces =
- SerializerDeserializer::kNumberOfSpaces;
-
- uint32_t allocated_bytes_ = 0;
- uint32_t next_builtin_reference_index_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(BuiltinSerializerAllocator)
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
deleted file mode 100644
index 6c71606b2e..0000000000
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/builtin-serializer.h"
-
-#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/startup-serializer.h"
-
-namespace v8 {
-namespace internal {
-
-using interpreter::Bytecode;
-using interpreter::Bytecodes;
-using interpreter::OperandScale;
-
-BuiltinSerializer::BuiltinSerializer(Isolate* isolate,
- StartupSerializer* startup_serializer)
- : Serializer(isolate), startup_serializer_(startup_serializer) {}
-
-BuiltinSerializer::~BuiltinSerializer() {
- OutputStatistics("BuiltinSerializer");
-}
-
-void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
- // Serialize builtins.
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- Code* code = isolate()->builtins()->builtin(i);
- DCHECK_IMPLIES(Builtins::IsLazyDeserializer(code),
- Builtins::IsLazyDeserializer(i));
- SetBuiltinOffset(i, sink_.Position());
- SerializeBuiltin(code);
- }
-
- // Append the offset table. During deserialization, the offset table is
- // extracted by BuiltinSnapshotData.
- const byte* data = reinterpret_cast<const byte*>(&code_offsets_[0]);
- int data_length = static_cast<int>(sizeof(code_offsets_));
-
- // Pad with kNop since GetInt() might read too far.
- Pad(data_length);
-
- // Append the offset table. During deserialization, the offset table is
- // extracted by BuiltinSnapshotData.
- sink_.PutRaw(data, data_length, "BuiltinOffsets");
-}
-
-void BuiltinSerializer::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
- UNREACHABLE(); // We iterate manually in SerializeBuiltins.
-}
-
-void BuiltinSerializer::SerializeBuiltin(Code* code) {
- DCHECK_GE(code->builtin_index(), 0);
-
- // All builtins are serialized unconditionally when the respective builtin is
- // reached while iterating the builtins list. A builtin seen at any other
- // time (e.g. startup snapshot creation, or while iterating a builtin code
- // object during builtin serialization) is serialized by reference - see
- // BuiltinSerializer::SerializeObject below.
- ObjectSerializer object_serializer(this, code, &sink_, kPlain,
- kStartOfObject);
- object_serializer.Serialize();
-}
-
-void BuiltinSerializer::SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- DCHECK(!o->IsSmi());
-
- // Roots can simply be serialized as root references.
- RootIndex root_index;
- if (root_index_map()->Lookup(o, &root_index)) {
- DCHECK(startup_serializer_->root_has_been_serialized(root_index));
- PutRoot(root_index, o, how_to_code, where_to_point, skip);
- return;
- }
-
- // Builtins are serialized using a dedicated bytecode. We only reach this
- // point if encountering a Builtin e.g. while iterating the body of another
- // builtin.
- if (SerializeBuiltinReference(o, how_to_code, where_to_point, skip)) return;
-
- // Embedded objects are serialized as part of the partial snapshot cache.
- // Currently we expect to see:
- // * Code: Jump targets.
- // * ByteArrays: Relocation infos.
- // * FixedArrays: Handler tables.
- // * Strings: CSA_ASSERTs in debug builds, various other string constants.
- // * HeapNumbers: Embedded constants.
- // TODO(6624): Jump targets should never trigger content serialization, it
- // should always result in a reference instead. Reloc infos and handler tables
- // should not end up in the partial snapshot cache.
-
- FlushSkip(skip);
-
- int cache_index = startup_serializer_->PartialSnapshotCacheIndex(o);
- sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_.PutInt(cache_index, "partial_snapshot_cache_index");
-}
-
-void BuiltinSerializer::SetBuiltinOffset(int builtin_id, uint32_t offset) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- code_offsets_[builtin_id] = offset;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
deleted file mode 100644
index 132aa0894b..0000000000
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
-#define V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
-
-#include "src/builtins/builtins.h"
-#include "src/interpreter/interpreter.h"
-#include "src/snapshot/builtin-serializer-allocator.h"
-#include "src/snapshot/serializer.h"
-
-namespace v8 {
-namespace internal {
-
-class StartupSerializer;
-
-// Responsible for serializing builtin objects during startup snapshot creation
-// into a dedicated area of the snapshot.
-// See snapshot.h for documentation of the snapshot layout.
-class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
- public:
- BuiltinSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
- ~BuiltinSerializer() override;
-
- void SerializeBuiltinsAndHandlers();
-
- private:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
-
- void SerializeBuiltin(Code* code);
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
-
- void SetBuiltinOffset(int builtin_id, uint32_t offset);
- void SetHandlerOffset(interpreter::Bytecode bytecode,
- interpreter::OperandScale operand_scale,
- uint32_t offset);
-
- // The startup serializer is needed for access to the partial snapshot cache,
- // which is used to serialize things like embedded constants.
- StartupSerializer* startup_serializer_;
-
- // Stores the starting offset, within the serialized data, of each code
- // object. This is later packed into the builtin snapshot, and used by the
- // builtin deserializer to deserialize individual builtins.
- //
- // Indices [kFirstBuiltinIndex, kFirstBuiltinIndex + kNumberOfBuiltins[:
- // Builtin offsets.
- uint32_t code_offsets_[Builtins::builtin_count];
-
- DISALLOW_COPY_AND_ASSIGN(BuiltinSerializer);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_BUILTIN_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index b463ca2047..bdae825950 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -4,12 +4,12 @@
#include "src/snapshot/code-serializer.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/slots.h"
#include "src/snapshot/object-deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/version.h"
@@ -63,7 +63,8 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(
source, script->origin_options()));
DisallowHeapAllocation no_gc;
- cs.reference_map()->AddAttachedReference(*source);
+ cs.reference_map()->AddAttachedReference(
+ reinterpret_cast<void*>(source->ptr()));
ScriptData* script_data = cs.SerializeSharedFunctionInfo(info);
if (FLAG_profile_deserialization) {
@@ -86,7 +87,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
DisallowHeapAllocation no_gc;
VisitRootPointer(Root::kHandleScope, nullptr,
- Handle<Object>::cast(info).location());
+ FullObjectSlot(info.location()));
SerializeDeferredObjects();
Pad();
@@ -95,7 +96,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(HeapObject* obj,
+bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj,
HowToCode how_to_code,
WhereToPoint where_to_point,
int skip) {
@@ -115,20 +116,16 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject* obj,
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
SerializerReference back_reference =
SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
- reference_map()->Add(obj, back_reference);
+ reference_map()->Add(reinterpret_cast<void*>(obj->ptr()), back_reference);
CHECK(SerializeBackReference(obj, how_to_code, where_to_point, skip));
return true;
}
-void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+void CodeSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- RootIndex root_index;
- if (root_index_map()->Lookup(obj, &root_index)) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
+ if (SerializeRoot(obj, how_to_code, where_to_point, skip)) return;
if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
@@ -137,24 +134,15 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
if (obj->IsCode()) {
- Code* code_object = Code::cast(obj);
+ Code code_object = Code::cast(obj);
switch (code_object->kind()) {
case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
case Code::REGEXP: // No regexp literals initialized yet.
case Code::NUMBER_OF_KINDS: // Pseudo enum value.
case Code::BYTECODE_HANDLER: // No direct references to handlers.
break; // hit UNREACHABLE below.
- case Code::BUILTIN:
- SerializeBuiltinReference(code_object, how_to_code, where_to_point, 0);
- return;
case Code::STUB:
- if (code_object->builtin_index() == -1) {
- SerializeCodeStub(code_object, how_to_code, where_to_point);
- } else {
- SerializeBuiltinReference(code_object, how_to_code, where_to_point,
- 0);
- }
- return;
+ case Code::BUILTIN:
default:
return SerializeCodeObject(code_object, how_to_code, where_to_point);
}
@@ -168,19 +156,19 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (obj->IsScript()) {
- Script* script_obj = Script::cast(obj);
+ Script script_obj = Script::cast(obj);
DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
// We want to differentiate between undefined and uninitialized_symbol for
// context_data for now. It is hack to allow debugging for scripts that are
// included as a part of custom snapshot. (see debug::Script::IsEmbedded())
- Object* context_data = script_obj->context_data();
+ Object context_data = script_obj->context_data();
if (context_data != roots.undefined_value() &&
context_data != roots.uninitialized_symbol()) {
script_obj->set_context_data(roots.undefined_value());
}
// We don't want to serialize host options to avoid serializing unnecessary
// object graph.
- FixedArray* host_options = script_obj->host_defined_options();
+ FixedArray host_options = script_obj->host_defined_options();
script_obj->set_host_defined_options(roots.empty_fixed_array());
SerializeGeneric(obj, how_to_code, where_to_point);
script_obj->set_host_defined_options(host_options);
@@ -189,13 +177,13 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
- DebugInfo* debug_info = nullptr;
- BytecodeArray* debug_bytecode_array = nullptr;
+ DebugInfo debug_info;
+ BytecodeArray debug_bytecode_array;
if (sfi->HasDebugInfo()) {
// Clear debug info.
debug_info = sfi->GetDebugInfo();
@@ -207,16 +195,12 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
DCHECK(!sfi->HasDebugInfo());
- // Mark SFI to indicate whether the code is cached.
- bool was_deserialized = sfi->deserialized();
- sfi->set_deserialized(sfi->is_compiled());
SerializeGeneric(obj, how_to_code, where_to_point);
- sfi->set_deserialized(was_deserialized);
// Restore debug info
- if (debug_info != nullptr) {
+ if (!debug_info.is_null()) {
sfi->set_script_or_debug_info(debug_info);
- if (debug_bytecode_array != nullptr) {
+ if (!debug_bytecode_array.is_null()) {
sfi->SetDebugBytecodeArray(debug_bytecode_array);
}
}
@@ -240,7 +224,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
SerializeGeneric(obj, how_to_code, where_to_point);
}
-void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
+void CodeSerializer::SerializeGeneric(HeapObject heap_object,
HowToCode how_to_code,
WhereToPoint where_to_point) {
// Object has not yet been serialized. Serialize it here.
@@ -249,25 +233,6 @@ void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
serializer.Serialize();
}
-void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- // We only arrive here if we have not encountered this code stub before.
- DCHECK(!reference_map()->LookupReference(code_stub).is_valid());
- uint32_t stub_key = code_stub->stub_key();
- DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
- DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
- stub_keys_.push_back(stub_key);
-
- SerializerReference reference =
- reference_map()->AddAttachedReference(code_stub);
- if (FLAG_trace_serializer) {
- PrintF(" Encoding code stub %s as attached reference %d\n",
- CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)),
- reference.attached_reference_index());
- }
- PutAttachedReference(reference, how_to_code, where_to_point);
-}
-
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source,
ScriptOriginOptions origin_options) {
@@ -307,23 +272,35 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
}
- bool log_code_creation = isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
+ bool log_code_creation =
+ isolate->logger()->is_listening_to_code_events() ||
+ isolate->is_profiling() ||
+ isolate->code_event_dispatcher()->IsListeningToCodeEvents();
if (log_code_creation || FLAG_log_function_events) {
- String* name = ReadOnlyRoots(isolate).empty_string();
- if (result->script()->IsScript()) {
- Script* script = Script::cast(result->script());
- if (script->name()->IsString()) name = String::cast(script->name());
- if (FLAG_log_function_events) {
- LOG(isolate, FunctionEvent("deserialize", script->id(),
- timer.Elapsed().InMillisecondsF(),
- result->StartPosition(),
- result->EndPosition(), name));
- }
+ String name = ReadOnlyRoots(isolate).empty_string();
+ Script script = Script::cast(result->script());
+ Handle<Script> script_handle(script, isolate);
+ if (script->name()->IsString()) name = String::cast(script->name());
+ if (FLAG_log_function_events) {
+ LOG(isolate,
+ FunctionEvent("deserialize", script->id(),
+ timer.Elapsed().InMillisecondsF(),
+ result->StartPosition(), result->EndPosition(), name));
}
if (log_code_creation) {
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
- result->abstract_code(), *result, name));
+ Script::InitLineEnds(Handle<Script>(script, isolate));
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(isolate, script);
+ for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
+ info = iter.Next()) {
+ if (info->is_compiled()) {
+ int line_num = script->GetLineNumber(info->StartPosition()) + 1;
+ int column_num = script->GetColumnNumber(info->StartPosition()) + 1;
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
+ info->abstract_code(), info, name,
+ line_num, column_num));
+ }
+ }
}
}
@@ -338,13 +315,12 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
- const std::vector<uint32_t>* stub_keys = cs->stub_keys();
std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
uint32_t reservation_size =
static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t num_stub_keys = static_cast<uint32_t>(stub_keys->size());
+ uint32_t num_stub_keys = 0; // TODO(jgruber): Remove.
uint32_t stub_keys_size = num_stub_keys * kUInt32Size;
uint32_t payload_offset = kHeaderSize + reservation_size + stub_keys_size;
uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
@@ -359,7 +335,7 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
memset(data_, 0, padded_payload_offset);
// Set header values.
- SetMagicNumber(cs->isolate());
+ SetMagicNumber();
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, cs->source_hash());
SetHeaderValue(kCpuFeaturesOffset,
@@ -367,7 +343,6 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumReservationsOffset,
static_cast<uint32_t>(reservations.size()));
- SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, static_cast<uint32_t>(payload->size()));
// Zero out any padding in the header.
@@ -378,10 +353,6 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
reinterpret_cast<const byte*>(reservations.data()),
reservation_size);
- // Copy code stub keys.
- CopyBytes(data_ + kHeaderSize + reservation_size,
- reinterpret_cast<const byte*>(stub_keys->data()), stub_keys_size);
-
// Copy serialized data.
CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
@@ -395,7 +366,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
Isolate* isolate, uint32_t expected_source_hash) const {
if (this->size_ < kHeaderSize) return INVALID_HEADER;
uint32_t magic_number = GetMagicNumber();
- if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
+ if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
@@ -412,8 +383,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t max_payload_length =
this->size_ -
POINTER_SIZE_ALIGN(kHeaderSize +
- GetHeaderValue(kNumReservationsOffset) * kInt32Size +
- GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size);
+ GetHeaderValue(kNumReservationsOffset) * kInt32Size);
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
if (!Checksum(ChecksummedContent()).Check(c1, c2)) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
@@ -451,8 +421,7 @@ std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
Vector<const byte> SerializedCodeData::Payload() const {
int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
- int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
+ int payload_offset = kHeaderSize + reservations_size;
int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
const byte* payload = data_ + padded_payload_offset;
DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
@@ -461,13 +430,6 @@ Vector<const byte> SerializedCodeData::Payload() const {
return Vector<const byte>(payload, length);
}
-Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- const byte* start = data_ + kHeaderSize + reservations_size;
- return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
- GetHeaderValue(kNumCodeStubKeysOffset));
-}
-
SerializedCodeData::SerializedCodeData(ScriptData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index d9b4be9a34..446566602c 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -42,7 +42,7 @@ class ScriptData {
DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
-class CodeSerializer : public Serializer<> {
+class CodeSerializer : public Serializer {
public:
static ScriptCompiler::CachedData* Serialize(Handle<SharedFunctionInfo> info);
@@ -52,36 +52,30 @@ class CodeSerializer : public Serializer<> {
Isolate* isolate, ScriptData* cached_data, Handle<String> source,
ScriptOriginOptions origin_options);
- const std::vector<uint32_t>* stub_keys() const { return &stub_keys_; }
-
uint32_t source_hash() const { return source_hash_; }
protected:
CodeSerializer(Isolate* isolate, uint32_t source_hash);
~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
- virtual void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
+ virtual void SerializeCodeObject(Code code_object, HowToCode how_to_code,
WhereToPoint where_to_point) {
UNREACHABLE();
}
- virtual bool ElideObject(Object* obj) { return false; }
- void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
+ virtual bool ElideObject(Object obj) { return false; }
+ void SerializeGeneric(HeapObject heap_object, HowToCode how_to_code,
WhereToPoint where_to_point);
private:
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ void SerializeObject(HeapObject o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
- void SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
- WhereToPoint where_to_point);
-
- bool SerializeReadOnlyObject(HeapObject* obj, HowToCode how_to_code,
+ bool SerializeReadOnlyObject(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
- DisallowHeapAllocation no_gc_;
+ DISALLOW_HEAP_ALLOCATION(no_gc_);
uint32_t source_hash_;
- std::vector<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
@@ -102,16 +96,14 @@ class SerializedCodeData : public SerializedData {
// The data header consists of uint32_t-sized entries:
// [0] magic number and (internally provided) external reference count
- // [1] extra (API-provided) external reference count
- // [2] version hash
- // [3] source hash
- // [4] cpu features
- // [5] flag hash
- // [6] number of code stub keys
- // [7] number of reservation size entries
- // [8] payload length
- // [9] payload checksum part A
- // [10] payload checksum part B
+ // [1] version hash
+ // [2] source hash
+ // [3] cpu features
+ // [4] flag hash
+ // [5] number of reservation size entries
+ // [6] payload length
+ // [7] payload checksum part A
+ // [8] payload checksum part B
// ... reservations
// ... code stub keys
// ... serialized payload
@@ -120,10 +112,8 @@ class SerializedCodeData : public SerializedData {
static const uint32_t kCpuFeaturesOffset = kSourceHashOffset + kUInt32Size;
static const uint32_t kFlagHashOffset = kCpuFeaturesOffset + kUInt32Size;
static const uint32_t kNumReservationsOffset = kFlagHashOffset + kUInt32Size;
- static const uint32_t kNumCodeStubKeysOffset =
- kNumReservationsOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
- kNumCodeStubKeysOffset + kUInt32Size;
+ kNumReservationsOffset + kUInt32Size;
static const uint32_t kChecksumPartAOffset =
kPayloadLengthOffset + kUInt32Size;
static const uint32_t kChecksumPartBOffset =
@@ -148,8 +138,6 @@ class SerializedCodeData : public SerializedData {
std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
- Vector<const uint32_t> CodeStubKeys() const;
-
static uint32_t SourceHash(Handle<String> source,
ScriptOriginOptions origin_options);
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index f3afc4d498..09f8f678c3 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -2,18 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/default-deserializer-allocator.h"
+#include "src/snapshot/deserializer-allocator.h"
-#include "src/heap/heap-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/snapshot/deserializer.h"
#include "src/snapshot/startup-deserializer.h"
namespace v8 {
namespace internal {
-DefaultDeserializerAllocator::DefaultDeserializerAllocator(
- Deserializer<DefaultDeserializerAllocator>* deserializer)
+DeserializerAllocator::DeserializerAllocator(Deserializer* deserializer)
: deserializer_(deserializer) {}
// We know the space requirements before deserialization and can
@@ -27,16 +25,14 @@ DefaultDeserializerAllocator::DefaultDeserializerAllocator(
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
-Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
- int size) {
+Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
if (space == LO_SPACE) {
AlwaysAllocateScope scope(isolate());
+ // Note that we currently do not support deserialization of large code
+ // objects.
LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
- // TODO(jgruber): May be cleaner to pass in executability as an argument.
- Executability exec =
- static_cast<Executability>(deserializer_->source()->Get());
- AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = result.ToObjectChecked();
+ AllocationResult result = lo_space->AllocateRaw(size);
+ HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
return obj->address();
} else if (space == MAP_SPACE) {
@@ -58,10 +54,9 @@ Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
}
}
-Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
- int size) {
+Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
Address address;
- HeapObject* obj;
+ HeapObject obj;
if (next_alignment_ != kWordAligned) {
const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
@@ -83,7 +78,7 @@ Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
}
}
-void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
+void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
uint32_t chunk_index = current_chunk_[space];
const Heap::Reservation& reservation = reservations_[space];
@@ -95,19 +90,19 @@ void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
high_water_[space] = reservation[chunk_index].start;
}
-HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
+HeapObject DeserializerAllocator::GetMap(uint32_t index) {
DCHECK_LT(index, next_map_index_);
return HeapObject::FromAddress(allocated_maps_[index]);
}
-HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
+HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
DCHECK_LT(index, deserialized_large_objects_.size());
return deserialized_large_objects_[index];
}
-HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
+HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
+ uint32_t chunk_index,
+ uint32_t chunk_offset) {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
DCHECK_LE(chunk_index, current_chunk_[space]);
Address address = reservations_[space][chunk_index].start + chunk_offset;
@@ -120,7 +115,7 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
return HeapObject::FromAddress(address);
}
-void DefaultDeserializerAllocator::DecodeReservation(
+void DeserializerAllocator::DecodeReservation(
const std::vector<SerializedData::Reservation>& res) {
DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
int current_space = FIRST_SPACE;
@@ -133,7 +128,7 @@ void DefaultDeserializerAllocator::DecodeReservation(
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
}
-bool DefaultDeserializerAllocator::ReserveSpace() {
+bool DeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
DCHECK_GT(reservations_[i].size(), 0);
@@ -149,74 +144,7 @@ bool DefaultDeserializerAllocator::ReserveSpace() {
return true;
}
-// static
-bool DefaultDeserializerAllocator::ReserveSpace(
- StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer) {
- Isolate* isolate = startup_deserializer->isolate();
-
- // Create a set of merged reservations to reserve space in one go.
- // The BuiltinDeserializer's reservations are ignored, since our actual
- // requirements vary based on whether lazy deserialization is enabled.
- // Instead, we manually determine the required code-space.
-
- Heap::Reservation merged_reservations[kNumberOfSpaces];
- for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
- merged_reservations[i] =
- startup_deserializer->allocator()->reservations_[i];
- }
-
- Heap::Reservation builtin_reservations =
- builtin_deserializer->allocator()->CreateReservationsForEagerBuiltins();
- DCHECK(!builtin_reservations.empty());
-
- for (const auto& c : builtin_reservations) {
- merged_reservations[CODE_SPACE].push_back(c);
- }
-
- if (!isolate->heap()->ReserveSpace(
- merged_reservations,
- &startup_deserializer->allocator()->allocated_maps_)) {
- return false;
- }
-
- DisallowHeapAllocation no_allocation;
-
- // Distribute the successful allocations between both deserializers.
- // There's nothing to be done here except for code space.
-
- {
- const int num_builtin_reservations =
- static_cast<int>(builtin_reservations.size());
- for (int i = num_builtin_reservations - 1; i >= 0; i--) {
- const auto& c = merged_reservations[CODE_SPACE].back();
- DCHECK_EQ(c.size, builtin_reservations[i].size);
- DCHECK_EQ(c.size, c.end - c.start);
- builtin_reservations[i].start = c.start;
- builtin_reservations[i].end = c.end;
- merged_reservations[CODE_SPACE].pop_back();
- }
-
- builtin_deserializer->allocator()->InitializeFromReservations(
- builtin_reservations);
- }
-
- // Write back startup reservations.
-
- for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
- startup_deserializer->allocator()->reservations_[i].swap(
- merged_reservations[i]);
- }
-
- for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
- startup_deserializer->allocator()->high_water_[i] =
- startup_deserializer->allocator()->reservations_[i][0].start;
- }
-
- return true;
-}
-
-bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
+bool DeserializerAllocator::ReservationsAreFullyUsed() const {
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
const uint32_t chunk_index = current_chunk_[space];
if (reservations_[space].size() != chunk_index + 1) {
@@ -229,13 +157,12 @@ bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
return (allocated_maps_.size() == next_map_index_);
}
-void DefaultDeserializerAllocator::
- RegisterDeserializedObjectsForBlackAllocation() {
+void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
reservations_, deserialized_large_objects_, allocated_maps_);
}
-Isolate* DefaultDeserializerAllocator::isolate() const {
+Isolate* DeserializerAllocator::isolate() const {
return deserializer_->isolate();
}
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index 4a5758cc5a..eb06c2689a 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -2,26 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+#ifndef V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
#include "src/globals.h"
#include "src/heap/heap.h"
+#include "src/objects/heap-object.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
-template <class AllocatorT>
class Deserializer;
-
-class BuiltinDeserializer;
class StartupDeserializer;
-class DefaultDeserializerAllocator final {
+class DeserializerAllocator final {
public:
- DefaultDeserializerAllocator(
- Deserializer<DefaultDeserializerAllocator>* deserializer);
+ explicit DeserializerAllocator(Deserializer* deserializer);
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@@ -50,10 +47,10 @@ class DefaultDeserializerAllocator final {
bool next_reference_is_weak() const { return next_reference_is_weak_; }
#endif
- HeapObject* GetMap(uint32_t index);
- HeapObject* GetLargeObject(uint32_t index);
- HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset);
+ HeapObject GetMap(uint32_t index);
+ HeapObject GetLargeObject(uint32_t index);
+ HeapObject GetObject(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset);
// ------- Reservation Methods -------
// Methods related to memory reservations (prior to deserialization).
@@ -61,11 +58,6 @@ class DefaultDeserializerAllocator final {
void DecodeReservation(const std::vector<SerializedData::Reservation>& res);
bool ReserveSpace();
- // Atomically reserves space for the two given deserializers. Guarantees
- // reservation for both without garbage collection in-between.
- static bool ReserveSpace(StartupDeserializer* startup_deserializer,
- BuiltinDeserializer* builtin_deserializer);
-
bool ReservationsAreFullyUsed() const;
// ------- Misc Utility Methods -------
@@ -103,15 +95,15 @@ class DefaultDeserializerAllocator final {
// Allocated large objects are kept in this map and may be fetched later as
// back-references.
- std::vector<HeapObject*> deserialized_large_objects_;
+ std::vector<HeapObject> deserialized_large_objects_;
// The current deserializer.
- Deserializer<DefaultDeserializerAllocator>* const deserializer_;
+ Deserializer* const deserializer_;
- DISALLOW_COPY_AND_ASSIGN(DefaultDeserializerAllocator)
+ DISALLOW_COPY_AND_ASSIGN(DeserializerAllocator);
};
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_DEFAULT_DESERIALIZER_ALLOCATOR_H_
+#endif // V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index bc5805fb52..abb31c5326 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -6,27 +6,83 @@
#include "src/assembler-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
+#include "src/log.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/cell-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
#include "src/objects/string.h"
-#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-template <class AllocatorT>
-void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
+// This is like a MaybeObjectSlot, except it doesn't enforce alignment.
+// Most slots used below are aligned, but when writing into Code objects,
+// they might not be, hence the use of UnalignedSlot and UnalignedCopy.
+class UnalignedSlot {
+ public:
+ explicit UnalignedSlot(ObjectSlot slot) : ptr_(slot.address()) {}
+ explicit UnalignedSlot(Address address) : ptr_(address) {}
+ explicit UnalignedSlot(MaybeObject* slot)
+ : ptr_(reinterpret_cast<Address>(slot)) {}
+ explicit UnalignedSlot(Object* slot)
+ : ptr_(reinterpret_cast<Address>(slot)) {}
+
+ inline bool operator<(const UnalignedSlot& other) const {
+ return ptr_ < other.ptr_;
+ }
+ inline bool operator==(const UnalignedSlot& other) const {
+ return ptr_ == other.ptr_;
+ }
+
+ inline void Advance(int bytes = kPointerSize) { ptr_ += bytes; }
+
+ MaybeObject Read() {
+ Address result;
+ memcpy(&result, reinterpret_cast<void*>(ptr_), sizeof(result));
+ return MaybeObject(result);
+ }
+ MaybeObject ReadPrevious() {
+ Address result;
+ memcpy(&result, reinterpret_cast<void*>(ptr_ - kPointerSize),
+ sizeof(result));
+ return MaybeObject(result);
+ }
+ inline void Write(Address value) {
+ memcpy(reinterpret_cast<void*>(ptr_), &value, sizeof(value));
+ }
+ MaybeObjectSlot Slot() { return MaybeObjectSlot(ptr_); }
+
+ Address address() { return ptr_; }
+
+ private:
+ Address ptr_;
+};
+
+void Deserializer::UnalignedCopy(UnalignedSlot dest, MaybeObject value) {
+ DCHECK(!allocator()->next_reference_is_weak());
+ dest.Write(value.ptr());
+}
+
+void Deserializer::UnalignedCopy(UnalignedSlot dest, Address value) {
+ DCHECK(!allocator()->next_reference_is_weak());
+ dest.Write(value);
+}
+
+void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
DCHECK_NULL(external_reference_table_);
- external_reference_table_ = isolate->heap()->external_reference_table();
+ external_reference_table_ = isolate->external_reference_table();
#ifdef DEBUG
// Count the number of external references registered through the API.
num_api_references_ = 0;
@@ -36,23 +92,15 @@ void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
}
}
#endif // DEBUG
- CHECK_EQ(magic_number_,
- SerializedData::ComputeMagicNumber(external_reference_table_));
-}
-
-template <class AllocatorT>
-bool Deserializer<AllocatorT>::IsLazyDeserializationEnabled() const {
- return FLAG_lazy_deserialization && !isolate()->serializer_enabled();
+ CHECK_EQ(magic_number_, SerializedData::kMagicNumber);
}
-template <class AllocatorT>
-void Deserializer<AllocatorT>::Rehash() {
+void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
- for (const auto& item : to_rehash_) item->RehashBasedOnMap(isolate());
+ for (HeapObject item : to_rehash_) item->RehashBasedOnMap(isolate());
}
-template <class AllocatorT>
-Deserializer<AllocatorT>::~Deserializer() {
+Deserializer::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
if (source_.position() == 0) return;
@@ -65,28 +113,22 @@ Deserializer<AllocatorT>::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-template <class AllocatorT>
-void Deserializer<AllocatorT>::VisitRootPointers(Root root,
- const char* description,
- Object** start, Object** end) {
- // Builtins are deserialized in a separate pass by the BuiltinDeserializer.
- if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
-
+void Deserializer::VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) {
// The space must be new space. Any other space would cause ReadChunk to try
// to update the remembered using nullptr as the address.
- ReadData(reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end), NEW_SPACE, kNullAddress);
+ // TODO(ishell): this will not work once we actually compress pointers.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ ReadData(UnalignedSlot(start.address()), UnalignedSlot(end.address()),
+ NEW_SPACE, kNullAddress);
}
-template <class AllocatorT>
-void Deserializer<AllocatorT>::Synchronize(
- VisitorSynchronization::SyncTag tag) {
+void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
static const byte expected = kSynchronize;
CHECK_EQ(expected, source_.Get());
}
-template <class AllocatorT>
-void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
+void Deserializer::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
switch (code) {
case kAlignmentPrefix:
@@ -100,12 +142,11 @@ void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
int space = code & kSpaceMask;
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
- HeapObject* object = GetBackReferencedObject(space);
+ HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
- MaybeObject** start =
- reinterpret_cast<MaybeObject**>(obj_address + kPointerSize);
- MaybeObject** end = reinterpret_cast<MaybeObject**>(obj_address + size);
+ UnalignedSlot start(obj_address + kPointerSize);
+ UnalignedSlot end(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
@@ -115,12 +156,40 @@ void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
}
}
-StringTableInsertionKey::StringTableInsertionKey(String* string)
+void Deserializer::LogNewObjectEvents() {
+ {
+ // {new_maps_} and {new_code_objects_} are vectors containing raw
+ // pointers, hence there should be no GC happening.
+ DisallowHeapAllocation no_gc;
+ // Issue code events for newly deserialized code objects.
+ LOG_CODE_EVENT(isolate_, LogCodeObjects());
+ }
+ LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
+ LogNewMapEvents();
+}
+
+void Deserializer::LogNewMapEvents() {
+ DisallowHeapAllocation no_gc;
+ for (Map map : new_maps()) {
+ DCHECK(FLAG_trace_maps);
+ LOG(isolate_, MapCreate(map));
+ LOG(isolate_, MapDetails(map));
+ }
+}
+
+void Deserializer::LogScriptEvents(Script script) {
+ DisallowHeapAllocation no_gc;
+ LOG(isolate_,
+ ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
+ LOG(isolate_, ScriptDetails(script));
+}
+
+StringTableInsertionKey::StringTableInsertionKey(String string)
: StringTableKey(ComputeHashField(string)), string_(string) {
DCHECK(string->IsInternalizedString());
}
-bool StringTableInsertionKey::IsMatch(Object* string) {
+bool StringTableInsertionKey::IsMatch(Object string) {
// We know that all entries in a hash table had their hash keys created.
// Use that knowledge to have fast failure.
if (Hash() != String::cast(string)->Hash()) return false;
@@ -132,19 +201,17 @@ Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
return handle(string_, isolate);
}
-uint32_t StringTableInsertionKey::ComputeHashField(String* string) {
+uint32_t StringTableInsertionKey::ComputeHashField(String string) {
// Make sure hash_field() is computed.
string->Hash();
return string->hash_field();
}
-template <class AllocatorT>
-HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
- int space) {
+HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj->IsString()) {
// Uninitialize hash field as we need to recompute the hash.
- String* string = String::cast(obj);
+ String string = String::cast(obj);
string->set_hash_field(String::kEmptyHashField);
} else if (obj->NeedsRehashing()) {
to_rehash_.push_back(obj);
@@ -153,43 +220,44 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
if (deserializing_user_code()) {
if (obj->IsString()) {
- String* string = String::cast(obj);
+ String string = String::cast(obj);
if (string->IsInternalizedString()) {
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String* canonical =
+ String canonical =
StringTable::ForwardStringIfExists(isolate_, &key, string);
- if (canonical != nullptr) return canonical;
+ if (!canonical.is_null()) return canonical;
new_internalized_strings_.push_back(handle(string, isolate_));
return string;
}
} else if (obj->IsScript()) {
new_scripts_.push_back(handle(Script::cast(obj), isolate_));
+ } else if (obj->IsAllocationSite()) {
+ // We should link new allocation sites, but we can't do this immediately
+ // because |AllocationSite::HasWeakNext()| internally accesses
+ // |Heap::roots_| that may not have been initialized yet. So defer this to
+ // |ObjectDeserializer::CommitPostProcessedObjects()|.
+ new_allocation_sites_.push_back(AllocationSite::cast(obj));
} else {
DCHECK(CanBeDeferred(obj));
}
- } else if (obj->IsScript()) {
- LOG(isolate_, ScriptEvent(Logger::ScriptEventType::kDeserialize,
- Script::cast(obj)->id()));
- LOG(isolate_, ScriptDetails(Script::cast(obj)));
}
-
- if (obj->IsAllocationSite()) {
- // We should link new allocation sites, but we can't do this immediately
- // because |AllocationSite::HasWeakNext()| internally accesses
- // |Heap::roots_| that may not have been initialized yet. So defer this to
- // |ObjectDeserializer::CommitPostProcessedObjects()|.
- new_allocation_sites_.push_back(AllocationSite::cast(obj));
+ if (obj->IsScript()) {
+ LogScriptEvents(Script::cast(obj));
} else if (obj->IsCode()) {
- // We flush all code pages after deserializing the startup snapshot. In that
- // case, we only need to remember code objects in the large object space.
- // When deserializing user code, remember each individual code object.
+ // We flush all code pages after deserializing the startup snapshot.
+ // Hence we only remember each individual code object when deserializing
+ // user code.
if (deserializing_user_code() || space == LO_SPACE) {
new_code_objects_.push_back(Code::cast(obj));
}
+ } else if (FLAG_trace_maps && obj->IsMap()) {
+ // Keep track of all seen Maps to log them later since they might be only
+ // partially initialized at this point.
+ new_maps_.push_back(Map::cast(obj));
} else if (obj->IsAccessorInfo()) {
#ifdef USE_SIMULATOR
accessor_infos_.push_back(AccessorInfo::cast(obj));
@@ -200,13 +268,13 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
#endif
} else if (obj->IsExternalString()) {
if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
- ExternalOneByteString* string = ExternalOneByteString::cast(obj);
+ ExternalOneByteString string = ExternalOneByteString::cast(obj);
DCHECK(string->is_uncached());
string->SetResource(
isolate_, NativesExternalStringResource::DecodeForDeserialization(
string->resource()));
} else {
- ExternalString* string = ExternalString::cast(obj);
+ ExternalString string = ExternalString::cast(obj);
uint32_t index = string->resource_as_uint32();
Address address =
static_cast<Address>(isolate_->api_external_references()[index]);
@@ -216,11 +284,11 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
}
isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
- JSTypedArray* typed_array = JSTypedArray::cast(obj);
+ JSTypedArray typed_array = JSTypedArray::cast(obj);
CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
if (byte_offset > 0) {
- FixedTypedArrayBase* elements =
+ FixedTypedArrayBase elements =
FixedTypedArrayBase::cast(typed_array->elements());
// Must be off-heap layout.
DCHECK(!typed_array->is_on_heap());
@@ -231,30 +299,34 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
elements->set_external_pointer(pointer_with_offset);
}
} else if (obj->IsJSArrayBuffer()) {
- JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
+ JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
if (buffer->backing_store() != nullptr) {
- Smi* store_index = reinterpret_cast<Smi*>(buffer->backing_store());
+ Smi store_index(reinterpret_cast<Address>(buffer->backing_store()));
void* backing_store = off_heap_backing_stores_[store_index->value()];
buffer->set_backing_store(backing_store);
isolate_->heap()->RegisterNewArrayBuffer(buffer);
}
} else if (obj->IsFixedTypedArrayBase()) {
- FixedTypedArrayBase* fta = FixedTypedArrayBase::cast(obj);
+ FixedTypedArrayBase fta = FixedTypedArrayBase::cast(obj);
// Only fixup for the off-heap case.
- if (fta->base_pointer() == nullptr) {
- Smi* store_index = reinterpret_cast<Smi*>(fta->external_pointer());
+ if (fta->base_pointer() == Smi::kZero) {
+ Smi store_index(reinterpret_cast<Address>(fta->external_pointer()));
void* backing_store = off_heap_backing_stores_[store_index->value()];
fta->set_external_pointer(backing_store);
}
} else if (obj->IsBytecodeArray()) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
- BytecodeArray* bytecode_array = BytecodeArray::cast(obj);
+ BytecodeArray bytecode_array = BytecodeArray::cast(obj);
bytecode_array->set_interrupt_budget(
interpreter::Interpreter::InterruptBudget());
bytecode_array->set_osr_loop_nesting_level(0);
+ } else if (obj->IsDescriptorArray()) {
+ // Reset the marking state of the descriptor array.
+ DescriptorArray descriptor_array = DescriptorArray::cast(obj);
+ descriptor_array->set_raw_number_of_marked_descriptors(0);
}
// Check alignment.
@@ -263,17 +335,8 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
return obj;
}
-template <class AllocatorT>
-int Deserializer<AllocatorT>::MaybeReplaceWithDeserializeLazy(int builtin_id) {
- DCHECK(Builtins::IsBuiltinId(builtin_id));
- return IsLazyDeserializationEnabled() && Builtins::IsLazy(builtin_id)
- ? Builtins::kDeserializeLazy
- : builtin_id;
-}
-
-template <class AllocatorT>
-HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
- HeapObject* obj;
+HeapObject Deserializer::GetBackReferencedObject(int space) {
+ HeapObject obj;
switch (space) {
case LO_SPACE:
obj = allocator()->GetLargeObject(source_.GetInt());
@@ -312,7 +375,7 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
}
hot_objects_.Add(obj);
- DCHECK(!HasWeakHeapObjectTag(obj));
+ DCHECK(!HasWeakHeapObjectTag(obj->ptr()));
return obj;
}
@@ -320,51 +383,36 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
-template <class AllocatorT>
-void Deserializer<AllocatorT>::ReadObject(
- int space_number, MaybeObject** write_back,
- HeapObjectReferenceType reference_type) {
+void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
+ HeapObjectReferenceType reference_type) {
const int size = source_.GetInt() << kObjectAlignmentBits;
Address address =
allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
- HeapObject* obj = HeapObject::FromAddress(address);
+ HeapObject obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
- MaybeObject** current = reinterpret_cast<MaybeObject**>(address);
- MaybeObject** limit = current + (size >> kPointerSizeLog2);
+ UnalignedSlot current(address);
+ UnalignedSlot limit(address + size);
if (ReadData(current, limit, space_number, address)) {
// Only post process if object content has not been deferred.
obj = PostProcessNewObject(obj, space_number);
}
- MaybeObject* write_back_obj =
- reference_type == HeapObjectReferenceType::STRONG
- ? HeapObjectReference::Strong(obj)
- : HeapObjectReference::Weak(obj);
- UnalignedCopy(write_back, &write_back_obj);
+ MaybeObject write_back_obj = reference_type == HeapObjectReferenceType::STRONG
+ ? HeapObjectReference::Strong(obj)
+ : HeapObjectReference::Weak(obj);
+ UnalignedCopy(write_back, write_back_obj);
#ifdef DEBUG
if (obj->IsCode()) {
- DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
+ DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
} else {
- DCHECK(space_number != CODE_SPACE);
+ DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
}
#endif // DEBUG
}
-template <class AllocatorT>
-Object* Deserializer<AllocatorT>::ReadDataSingle() {
- MaybeObject* o;
- MaybeObject** start = &o;
- MaybeObject** end = start + 1;
- int source_space = NEW_SPACE;
- Address current_object = kNullAddress;
-
- CHECK(ReadData(start, end, source_space, current_object));
- return o->GetHeapObjectAssumeStrong();
-}
-
static void NoExternalReferencesCallback() {
// The following check will trigger if a function or object template
// with references to native functions have been deserialized from
@@ -373,10 +421,8 @@ static void NoExternalReferencesCallback() {
CHECK_WITH_MSG(false, "No external references provided via API");
}
-template <class AllocatorT>
-bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
- MaybeObject** limit, int source_space,
- Address current_object_address) {
+bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
+ int source_space, Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
@@ -473,15 +519,16 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
SINGLE_CASE(kPartialSnapshotCache, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kPartialSnapshotCache, kFromCode, kInnerPointer, 0)
+ // Find an object in the partial snapshots cache and write a pointer to it
+ // to the current object.
+ SINGLE_CASE(kReadOnlyObjectCache, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kStartOfObject, 0)
+ SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kInnerPointer, 0)
// Find an object in the attached references and write a pointer to it to
// the current object.
SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
- // Find a builtin and write a pointer to it to the current object.
- SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kBuiltin, kFromCode, kStartOfObject, 0)
- SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -489,23 +536,21 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
case kSkip: {
int size = source_.GetInt();
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<Address>(current) + size);
+ current.Advance(size);
break;
}
// Find an external reference and write a pointer to it to the current
// object.
case kExternalReference + kPlain + kStartOfObject:
- current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
- kPlain, reinterpret_cast<void**>(current), current_object_address));
+ current =
+ ReadExternalReferenceCase(kPlain, current, current_object_address);
break;
// Find an external reference and write a pointer to it in the current
// code object.
case kExternalReference + kFromCode + kStartOfObject:
- current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
- kFromCode, reinterpret_cast<void**>(current),
- current_object_address));
+ current = ReadExternalReferenceCase(kFromCode, current,
+ current_object_address);
break;
case kInternalReferenceEncoded:
@@ -514,8 +559,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
// from code entry.
int pc_offset = source_.GetInt();
int target_offset = source_.GetInt();
- Code* code =
- Code::cast(HeapObject::FromAddress(current_object_address));
+ Code code = Code::cast(HeapObject::FromAddress(current_object_address));
DCHECK(0 <= pc_offset && pc_offset <= code->raw_instruction_size());
DCHECK(0 <= target_offset &&
target_offset <= code->raw_instruction_size());
@@ -534,8 +578,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
int builtin_index = source_.GetInt();
DCHECK(Builtins::IsBuiltinId(builtin_index));
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<Address>(current) + skip);
+ current.Advance(skip);
CHECK_NOT_NULL(isolate->embedded_blob());
EmbeddedData d = EmbeddedData::FromBlob();
@@ -543,19 +586,17 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
CHECK_NE(kNullAddress, address);
if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
- Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Address location_of_branch_data = current.address();
int skip = Assembler::deserialization_special_target_size(
location_of_branch_data);
Assembler::deserialization_set_special_target_at(
location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)),
address);
- location_of_branch_data += skip;
- current = reinterpret_cast<MaybeObject**>(location_of_branch_data);
+ current.Advance(skip);
} else {
- MaybeObject* o = reinterpret_cast<MaybeObject*>(address);
- UnalignedCopy(current, &o);
- current++;
+ UnalignedCopy(current, address);
+ current.Advance();
}
break;
}
@@ -571,9 +612,8 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
case kDeferred: {
// Deferred can only occur right after the heap object header.
- DCHECK_EQ(current, reinterpret_cast<MaybeObject**>(
- current_object_address + kPointerSize));
- HeapObject* obj = HeapObject::FromAddress(current_object_address);
+ DCHECK_EQ(current.address(), current_object_address + kPointerSize);
+ HeapObject obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
@@ -589,10 +629,9 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
// Deserialize raw data of variable length.
case kVariableRawData: {
int size_in_bytes = source_.GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current);
+ byte* raw_data_out = reinterpret_cast<byte*>(current.address());
source_.CopyRaw(raw_data_out, size_in_bytes);
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<intptr_t>(current) + size_in_bytes);
+ current.Advance(size_in_bytes);
break;
}
@@ -608,10 +647,12 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
case kVariableRepeat: {
int repeats = source_.GetInt();
- MaybeObject* object = current[-1];
+ MaybeObject object = current.ReadPrevious();
DCHECK(!Heap::InNewSpace(object));
- DCHECK(!allocator()->next_reference_is_weak());
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ for (int i = 0; i < repeats; i++) {
+ UnalignedCopy(current, object);
+ current.Advance();
+ }
break;
}
@@ -628,8 +669,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
case kApiReference: {
int skip = source_.GetInt();
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<Address>(current) + skip);
+ current.Advance(skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
if (isolate->api_external_references()) {
@@ -641,11 +681,16 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
- memcpy(current, &address, kPointerSize);
- current++;
+ UnalignedCopy(current, address);
+ current.Advance();
break;
}
+ case kClearedWeakReference:
+ UnalignedCopy(current, HeapObjectReference::ClearedValue(isolate_));
+ current.Advance();
+ break;
+
case kWeakPrefix:
DCHECK(!allocator()->next_reference_is_weak());
allocator()->set_next_reference_is_weak(true);
@@ -659,13 +704,17 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
break;
}
- STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
+ // First kNumberOfRootArrayConstants roots are guaranteed to be in
+ // the old space.
+ STATIC_ASSERT(
+ static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) == 0);
+ STATIC_ASSERT(kNumberOfRootArrayConstants <=
+ static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
SIXTEEN_CASES(kRootArrayConstantsWithSkip)
SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
int skip = source_.GetInt();
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<intptr_t>(current) + skip);
+ current.Advance(skip);
V8_FALLTHROUGH;
}
@@ -673,11 +722,10 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
RootIndex root_index = static_cast<RootIndex>(id);
- MaybeObject* object =
- MaybeObject::FromObject(isolate->heap()->root(root_index));
+ MaybeObject object = MaybeObject::FromObject(isolate->root(root_index));
DCHECK(!Heap::InNewSpace(object));
- DCHECK(!allocator()->next_reference_is_weak());
- UnalignedCopy(current++, &object);
+ UnalignedCopy(current, object);
+ current.Advance();
break;
}
@@ -685,28 +733,26 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
FOUR_CASES(kHotObjectWithSkip)
FOUR_CASES(kHotObjectWithSkip + 4) {
int skip = source_.GetInt();
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<Address>(current) + skip);
+ current.Advance(skip);
V8_FALLTHROUGH;
}
FOUR_CASES(kHotObject)
FOUR_CASES(kHotObject + 4) {
int index = data & kHotObjectMask;
- Object* hot_object = hot_objects_.Get(index);
- MaybeObject* hot_maybe_object = MaybeObject::FromObject(hot_object);
+ Object hot_object = hot_objects_.Get(index);
+ MaybeObject hot_maybe_object = MaybeObject::FromObject(hot_object);
if (allocator()->GetAndClearNextReferenceIsWeak()) {
hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
}
- UnalignedCopy(current, &hot_maybe_object);
+ UnalignedCopy(current, hot_maybe_object);
if (write_barrier_needed && Heap::InNewSpace(hot_object)) {
- Address current_address = reinterpret_cast<Address>(current);
- GenerationalBarrier(HeapObject::FromAddress(current_object_address),
- reinterpret_cast<MaybeObject**>(current_address),
- hot_maybe_object);
+ HeapObject current_object =
+ HeapObject::FromAddress(current_object_address);
+ GenerationalBarrier(current_object, current.Slot(), hot_maybe_object);
}
- current++;
+ current.Advance();
break;
}
@@ -714,21 +760,22 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
STATIC_ASSERT(kNumberOfFixedRawData == 32);
SIXTEEN_CASES(kFixedRawData)
SIXTEEN_CASES(kFixedRawData + 16) {
- byte* raw_data_out = reinterpret_cast<byte*>(current);
+ byte* raw_data_out = reinterpret_cast<byte*>(current.address());
int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
source_.CopyRaw(raw_data_out, size_in_bytes);
- current = reinterpret_cast<MaybeObject**>(raw_data_out + size_in_bytes);
+ current.Advance(size_in_bytes);
break;
}
STATIC_ASSERT(kNumberOfFixedRepeat == 16);
SIXTEEN_CASES(kFixedRepeat) {
int repeats = data - kFixedRepeatStart;
- MaybeObject* object;
- DCHECK(!allocator()->next_reference_is_weak());
- UnalignedCopy(&object, current - 1);
+ MaybeObject object = current.ReadPrevious();
DCHECK(!Heap::InNewSpace(object));
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ for (int i = 0; i < repeats; i++) {
+ UnalignedCopy(current, object);
+ current.Advance();
+ }
break;
}
@@ -749,36 +796,33 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
return true;
}
-template <class AllocatorT>
-void** Deserializer<AllocatorT>::ReadExternalReferenceCase(
- HowToCode how, void** current, Address current_object_address) {
+UnalignedSlot Deserializer::ReadExternalReferenceCase(
+ HowToCode how, UnalignedSlot current, Address current_object_address) {
int skip = source_.GetInt();
- current = reinterpret_cast<void**>(reinterpret_cast<Address>(current) + skip);
+ current.Advance(skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address = external_reference_table_->address(reference_id);
if (how == kFromCode) {
- Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Address location_of_branch_data = current.address();
int skip =
Assembler::deserialization_special_target_size(location_of_branch_data);
Assembler::deserialization_set_special_target_at(
location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)), address);
- location_of_branch_data += skip;
- current = reinterpret_cast<void**>(location_of_branch_data);
+ current.Advance(skip);
} else {
- void* new_current = reinterpret_cast<void**>(address);
- UnalignedCopy(current, &new_current);
- ++current;
+ UnalignedCopy(current, address);
+ current.Advance();
}
return current;
}
-template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
-MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
- Isolate* isolate, MaybeObject** current, Address current_object_address,
- byte data, bool write_barrier_needed) {
+UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
+ UnalignedSlot current,
+ Address current_object_address,
+ byte data, bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
@@ -791,91 +835,78 @@ MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
ReadObject(space_number, current, reference_type);
emit_write_barrier = (space_number == NEW_SPACE);
} else {
- Object* new_object = nullptr; /* May not be a real Object pointer. */
+ Object new_object; /* May not be a real Object pointer. */
if (where == kNewObject) {
- ReadObject(space_number, reinterpret_cast<MaybeObject**>(&new_object),
+ ReadObject(space_number, UnalignedSlot(&new_object),
HeapObjectReferenceType::STRONG);
} else if (where == kBackref) {
emit_write_barrier = (space_number == NEW_SPACE);
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kBackrefWithSkip) {
int skip = source_.GetInt();
- current = reinterpret_cast<MaybeObject**>(
- reinterpret_cast<Address>(current) + skip);
+ current.Advance(skip);
emit_write_barrier = (space_number == NEW_SPACE);
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
- new_object = isolate->heap()->root(root_index);
+ new_object = isolate->root(root_index);
emit_write_barrier = Heap::InNewSpace(new_object);
hot_objects_.Add(HeapObject::cast(new_object));
+ } else if (where == kReadOnlyObjectCache) {
+ int cache_index = source_.GetInt();
+ new_object = isolate->read_only_object_cache()->at(cache_index);
+ DCHECK(!Heap::InNewSpace(new_object));
+ emit_write_barrier = false;
} else if (where == kPartialSnapshotCache) {
int cache_index = source_.GetInt();
new_object = isolate->partial_snapshot_cache()->at(cache_index);
emit_write_barrier = Heap::InNewSpace(new_object);
- } else if (where == kAttachedReference) {
+ } else {
+ DCHECK_EQ(where, kAttachedReference);
int index = source_.GetInt();
new_object = *attached_objects_[index];
emit_write_barrier = Heap::InNewSpace(new_object);
- } else {
- DCHECK_EQ(where, kBuiltin);
- int builtin_id = MaybeReplaceWithDeserializeLazy(source_.GetInt());
- new_object = isolate->builtins()->builtin(builtin_id);
- emit_write_barrier = false;
}
if (within == kInnerPointer) {
DCHECK_EQ(how, kFromCode);
- if (where == kBuiltin) {
- // At this point, new_object may still be uninitialized, thus the
- // unchecked Code cast.
- new_object = reinterpret_cast<Object*>(
- reinterpret_cast<Code*>(new_object)->raw_instruction_start());
- } else if (new_object->IsCode()) {
- new_object = reinterpret_cast<Object*>(
- Code::cast(new_object)->raw_instruction_start());
+ if (new_object->IsCode()) {
+ new_object = Object(Code::cast(new_object)->raw_instruction_start());
} else {
- Cell* cell = Cell::cast(new_object);
- new_object = reinterpret_cast<Object*>(cell->ValueAddress());
+ Cell cell = Cell::cast(new_object);
+ new_object = Object(cell->ValueAddress());
}
}
if (how == kFromCode) {
DCHECK(!allocator()->next_reference_is_weak());
- Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Address location_of_branch_data = current.address();
int skip = Assembler::deserialization_special_target_size(
location_of_branch_data);
Assembler::deserialization_set_special_target_at(
location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)),
- reinterpret_cast<Address>(new_object));
- location_of_branch_data += skip;
- current = reinterpret_cast<MaybeObject**>(location_of_branch_data);
+ new_object->ptr());
+ current.Advance(skip);
current_was_incremented = true;
} else {
- MaybeObject* new_maybe_object = MaybeObject::FromObject(new_object);
+ MaybeObject new_maybe_object = MaybeObject::FromObject(new_object);
if (allocator()->GetAndClearNextReferenceIsWeak()) {
new_maybe_object = MaybeObject::MakeWeak(new_maybe_object);
}
- UnalignedCopy(current, &new_maybe_object);
+ UnalignedCopy(current, new_maybe_object);
}
}
if (emit_write_barrier && write_barrier_needed) {
- Address current_address = reinterpret_cast<Address>(current);
- SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
- GenerationalBarrier(HeapObject::FromAddress(current_object_address),
- reinterpret_cast<MaybeObject**>(current_address),
- *reinterpret_cast<MaybeObject**>(current_address));
+ HeapObject object = HeapObject::FromAddress(current_object_address);
+ SLOW_DCHECK(isolate->heap()->Contains(object));
+ GenerationalBarrier(object, current.Slot(), current.Read());
}
if (!current_was_incremented) {
- current++;
+ current.Advance();
}
return current;
}
-// Explicit instantiation.
-template class Deserializer<BuiltinDeserializerAllocator>;
-template class Deserializer<DefaultDeserializerAllocator>;
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 8340a93538..70b48bf0ef 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -7,17 +7,22 @@
#include <vector>
+#include "src/objects/allocation-site.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/code.h"
#include "src/objects/js-array.h"
-#include "src/snapshot/default-deserializer-allocator.h"
+#include "src/objects/map.h"
+#include "src/objects/string.h"
+#include "src/snapshot/deserializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
-class AllocationSite;
class HeapObject;
class Object;
+class UnalignedSlot;
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
@@ -30,7 +35,6 @@ class Object;
#endif
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-template <class AllocatorT = DefaultDeserializerAllocator>
class Deserializer : public SerializerDeserializer {
public:
~Deserializer() override;
@@ -57,12 +61,14 @@ class Deserializer : public SerializerDeserializer {
void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
- // Deserializes into a single pointer and returns the resulting object.
- Object* ReadDataSingle();
+ // Create Log events for newly deserialized objects.
+ void LogNewObjectEvents();
+ void LogScriptEvents(Script script);
+ void LogNewMapEvents();
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
- HeapObject* GetBackReferencedObject(int space);
+ HeapObject GetBackReferencedObject(int space);
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
@@ -72,16 +78,17 @@ class Deserializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
- const std::vector<AllocationSite*>& new_allocation_sites() const {
+ const std::vector<AllocationSite>& new_allocation_sites() const {
return new_allocation_sites_;
}
- const std::vector<Code*>& new_code_objects() const {
+ const std::vector<Code>& new_code_objects() const {
return new_code_objects_;
}
- const std::vector<AccessorInfo*>& accessor_infos() const {
+ const std::vector<Map>& new_maps() const { return new_maps_; }
+ const std::vector<AccessorInfo>& accessor_infos() const {
return accessor_infos_;
}
- const std::vector<CallHandlerInfo*>& call_handler_infos() const {
+ const std::vector<CallHandlerInfo>& call_handler_infos() const {
return call_handler_infos_;
}
const std::vector<Handle<String>>& new_internalized_strings() const {
@@ -91,58 +98,49 @@ class Deserializer : public SerializerDeserializer {
return new_scripts_;
}
- AllocatorT* allocator() { return &allocator_; }
+ DeserializerAllocator* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
- bool IsLazyDeserializationEnabled() const;
-
void Rehash();
+ // Cached current isolate.
+ Isolate* isolate_;
+
private:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- template <typename T>
- void UnalignedCopy(T** dest, T** src) {
- DCHECK(!allocator()->next_reference_is_weak());
- memcpy(dest, src, sizeof(*src));
- }
+ void UnalignedCopy(UnalignedSlot dest, MaybeObject value);
+ void UnalignedCopy(UnalignedSlot dest, Address value);
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
- bool ReadData(MaybeObject** start, MaybeObject** end, int space,
+ bool ReadData(UnalignedSlot start, UnalignedSlot end, int space,
Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
template <int where, int how, int within, int space_number_if_any>
- inline MaybeObject** ReadDataCase(Isolate* isolate, MaybeObject** current,
+ inline UnalignedSlot ReadDataCase(Isolate* isolate, UnalignedSlot current,
Address current_object_address, byte data,
bool write_barrier_needed);
// A helper function for ReadData for reading external references.
// Returns the new value of {current}.
- inline void** ReadExternalReferenceCase(HowToCode how, void** current,
- Address current_object_address);
+ inline UnalignedSlot ReadExternalReferenceCase(
+ HowToCode how, UnalignedSlot current, Address current_object_address);
- void ReadObject(int space_number, MaybeObject** write_back,
+ void ReadObject(int space_number, UnalignedSlot write_back,
HeapObjectReferenceType reference_type);
// Special handling for serialized code like hooking up internalized strings.
- HeapObject* PostProcessNewObject(HeapObject* obj, int space);
-
- // May replace the given builtin_id with the DeserializeLazy builtin for lazy
- // deserialization.
- int MaybeReplaceWithDeserializeLazy(int builtin_id);
-
- // Cached current isolate.
- Isolate* isolate_;
+ HeapObject PostProcessNewObject(HeapObject obj, int space);
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
@@ -152,27 +150,28 @@ class Deserializer : public SerializerDeserializer {
ExternalReferenceTable* external_reference_table_;
- std::vector<AllocationSite*> new_allocation_sites_;
- std::vector<Code*> new_code_objects_;
- std::vector<AccessorInfo*> accessor_infos_;
- std::vector<CallHandlerInfo*> call_handler_infos_;
+ std::vector<Map> new_maps_;
+ std::vector<AllocationSite> new_allocation_sites_;
+ std::vector<Code> new_code_objects_;
+ std::vector<AccessorInfo> accessor_infos_;
+ std::vector<CallHandlerInfo> call_handler_infos_;
std::vector<Handle<String>> new_internalized_strings_;
std::vector<Handle<Script>> new_scripts_;
std::vector<byte*> off_heap_backing_stores_;
- AllocatorT allocator_;
+ DeserializerAllocator allocator_;
const bool deserializing_user_code_;
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
- std::vector<HeapObject*> to_rehash_;
+ std::vector<HeapObject> to_rehash_;
#ifdef DEBUG
uint32_t num_api_references_;
#endif // DEBUG
// For source(), isolate(), and allocator().
- friend class DefaultDeserializerAllocator;
+ friend class DeserializerAllocator;
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
@@ -180,17 +179,17 @@ class Deserializer : public SerializerDeserializer {
// Used to insert a deserialized internalized string into the string table.
class StringTableInsertionKey : public StringTableKey {
public:
- explicit StringTableInsertionKey(String* string);
+ explicit StringTableInsertionKey(String string);
- bool IsMatch(Object* string) override;
+ bool IsMatch(Object string) override;
V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) override;
private:
- uint32_t ComputeHashField(String* string);
+ uint32_t ComputeHashField(String string);
- String* string_;
- DisallowHeapAllocation no_gc;
+ String string_;
+ DISALLOW_HEAP_ALLOCATION(no_gc);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded-data.cc b/deps/v8/src/snapshot/embedded-data.cc
new file mode 100644
index 0000000000..f5dded1bd1
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded-data.cc
@@ -0,0 +1,332 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded-data.h"
+
+#include "src/assembler-inl.h"
+#include "src/callable.h"
+#include "src/objects-inl.h"
+#include "src/snapshot/snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
+ if (FLAG_embedded_builtins) {
+ const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
+ return start <= pc && pc < start + isolate->embedded_blob_size();
+ } else {
+ return false;
+ }
+}
+
+// static
+Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
+ if (!PcIsOffHeap(isolate, address)) return Code();
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ if (address < d.InstructionStartOfBuiltin(0)) return Code();
+
+ // Note: Addresses within the padding section between builtins (i.e. within
+ // start + size <= address < start + padded_size) are interpreted as belonging
+ // to the preceding builtin.
+
+ int l = 0, r = Builtins::builtin_count;
+ while (l < r) {
+ const int mid = (l + r) / 2;
+ Address start = d.InstructionStartOfBuiltin(mid);
+ Address end = start + d.PaddedInstructionSizeOfBuiltin(mid);
+
+ if (address < start) {
+ r = mid;
+ } else if (address >= end) {
+ l = mid + 1;
+ } else {
+ return isolate->builtins()->builtin(mid);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+// static
+void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
+ uint8_t** data,
+ uint32_t* size) {
+ EmbeddedData d = EmbeddedData::FromIsolate(isolate);
+
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+ const uint32_t page_size =
+ static_cast<uint32_t>(page_allocator->AllocatePageSize());
+ const uint32_t allocated_size = RoundUp(d.size(), page_size);
+
+ uint8_t* allocated_bytes = static_cast<uint8_t*>(
+ AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
+ allocated_size, page_size, PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(allocated_bytes);
+
+ std::memcpy(allocated_bytes, d.data(), d.size());
+ CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
+ PageAllocator::kReadExecute));
+
+ *data = allocated_bytes;
+ *size = d.size();
+
+ d.Dispose();
+}
+
+// static
+void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
+ uint32_t size) {
+ v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
+ const uint32_t page_size =
+ static_cast<uint32_t>(page_allocator->AllocatePageSize());
+ CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
+}
+
+namespace {
+
+bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
+ DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
+ switch (Builtins::KindOf(code->builtin_index())) {
+ case Builtins::CPP:
+ case Builtins::TFC:
+ case Builtins::TFH:
+ case Builtins::TFJ:
+ case Builtins::TFS:
+ break;
+
+ // Bytecode handlers will only ever be used by the interpreter and so there
+ // will never be a need to use trampolines with them.
+ case Builtins::BCH:
+ case Builtins::API:
+ case Builtins::ASM:
+ // TODO(jgruber): Extend checks to remaining kinds.
+ return false;
+ }
+
+ Callable callable = Builtins::CallableFor(
+ isolate, static_cast<Builtins::Name>(code->builtin_index()));
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
+ return true;
+ }
+
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
+ Register reg = descriptor.GetRegisterParameter(i);
+ if (reg == kOffHeapTrampolineRegister) return true;
+ }
+
+ return false;
+}
+
+void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
+ static const int kRelocMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+
+ Code code = isolate->builtins()->builtin(i);
+ RelocIterator on_heap_it(code, kRelocMask);
+ RelocIterator off_heap_it(blob, code, kRelocMask);
+
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
+ defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390)
+ // On these platforms we emit relative builtin-to-builtin
+ // jumps for isolate independent builtins in the snapshot. This fixes up the
+ // relative jumps to the right offsets in the snapshot.
+ // See also: Code::IsIsolateIndependent.
+ while (!on_heap_it.done()) {
+ DCHECK(!off_heap_it.done());
+
+ RelocInfo* rinfo = on_heap_it.rinfo();
+ DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK(Builtins::IsIsolateIndependentBuiltin(target));
+
+ // Do not emit write-barrier for off-heap writes.
+ off_heap_it.rinfo()->set_target_address(
+ blob->InstructionStartOfBuiltin(target->builtin_index()),
+ SKIP_WRITE_BARRIER);
+
+ on_heap_it.next();
+ off_heap_it.next();
+ }
+ DCHECK(off_heap_it.done());
+#else
+ // Architectures other than x64 and arm/arm64 do not use pc-relative calls
+ // and thus must not contain embedded code targets. Instead, we use an
+ // indirection through the root register.
+ CHECK(on_heap_it.done());
+ CHECK(off_heap_it.done());
+#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
+ }
+}
+
+} // namespace
+
+// static
+EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
+ Builtins* builtins = isolate->builtins();
+
+ // Store instruction stream lengths and offsets.
+ std::vector<struct Metadata> metadata(kTableSize);
+
+ bool saw_unsafe_builtin = false;
+ uint32_t raw_data_size = 0;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = builtins->builtin(i);
+
+ if (Builtins::IsIsolateIndependent(i)) {
+ // Sanity-check that the given builtin is isolate-independent and does not
+ // use the trampoline register in its calling convention.
+ if (!code->IsIsolateIndependent(isolate)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
+ }
+ if (Builtins::IsWasmRuntimeStub(i) &&
+ RelocInfo::RequiresRelocation(code)) {
+ // Wasm additionally requires that its runtime stubs must be
+ // individually PIC (i.e. we must be able to copy each stub outside the
+ // embedded area without relocations). In particular, that means
+ // pc-relative calls to other builtins are disallowed.
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is a wasm runtime stub but needs relocation.\n",
+ Builtins::name(i));
+ }
+ if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
+ Builtins::name(i));
+ }
+
+ uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
+
+ DCHECK_EQ(0, raw_data_size % kCodeAlignment);
+ metadata[i].instructions_offset = raw_data_size;
+ metadata[i].instructions_length = length;
+
+ // Align the start of each instruction stream.
+ raw_data_size += PadAndAlign(length);
+ } else {
+ metadata[i].instructions_offset = raw_data_size;
+ }
+ }
+ CHECK_WITH_MSG(
+ !saw_unsafe_builtin,
+ "One or more builtins marked as isolate-independent either contains "
+ "isolate-dependent code or aliases the off-heap trampoline register. "
+ "If in doubt, ask jgruber@");
+
+ const uint32_t blob_size = RawDataOffset() + raw_data_size;
+ uint8_t* const blob = new uint8_t[blob_size];
+ uint8_t* const raw_data_start = blob + RawDataOffset();
+
+ // Initially zap the entire blob, effectively padding the alignment area
+ // between two builtins with int3's (on x64/ia32).
+ ZapCode(reinterpret_cast<Address>(blob), blob_size);
+
+ // Write the metadata tables.
+ DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
+ std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
+
+ // Write the raw data section.
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+ Code code = builtins->builtin(i);
+ uint32_t offset = metadata[i].instructions_offset;
+ uint8_t* dst = raw_data_start + offset;
+ DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
+ blob_size);
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
+ code->raw_instruction_size());
+ }
+
+ EmbeddedData d(blob, blob_size);
+
+ // Fix up call targets that point to other embedded builtins.
+ FinalizeEmbeddedCodeTargets(isolate, &d);
+
+ // Hash the blob and store the result.
+ STATIC_ASSERT(HashSize() == kSizetSize);
+ const size_t hash = d.CreateHash();
+ std::memcpy(blob + HashOffset(), &hash, HashSize());
+
+ DCHECK_EQ(hash, d.CreateHash());
+ DCHECK_EQ(hash, d.Hash());
+
+ if (FLAG_serialization_statistics) d.PrintStatistics();
+
+ return d;
+}
+
+Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct Metadata* metadata = Metadata();
+ const uint8_t* result = RawData() + metadata[i].instructions_offset;
+ DCHECK_LE(result, data_ + size_);
+ DCHECK_IMPLIES(result == data_ + size_, InstructionSizeOfBuiltin(i) == 0);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct Metadata* metadata = Metadata();
+ return metadata[i].instructions_length;
+}
+
+size_t EmbeddedData::CreateHash() const {
+ STATIC_ASSERT(HashOffset() == 0);
+ STATIC_ASSERT(HashSize() == kSizetSize);
+ return base::hash_range(data_ + HashSize(), data_ + size_);
+}
+
+void EmbeddedData::PrintStatistics() const {
+ DCHECK(FLAG_serialization_statistics);
+
+ constexpr int kCount = Builtins::builtin_count;
+
+ int embedded_count = 0;
+ int instruction_size = 0;
+ int sizes[kCount];
+ for (int i = 0; i < kCount; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+ const int size = InstructionSizeOfBuiltin(i);
+ instruction_size += size;
+ sizes[embedded_count] = size;
+ embedded_count++;
+ }
+
+ // Sort for percentiles.
+ std::sort(&sizes[0], &sizes[embedded_count]);
+
+ const int k50th = embedded_count * 0.5;
+ const int k75th = embedded_count * 0.75;
+ const int k90th = embedded_count * 0.90;
+ const int k99th = embedded_count * 0.99;
+
+ const int metadata_size = static_cast<int>(HashSize() + MetadataSize());
+
+ PrintF("EmbeddedData:\n");
+ PrintF(" Total size: %d\n",
+ static_cast<int>(size()));
+ PrintF(" Metadata size: %d\n", metadata_size);
+ PrintF(" Instruction size: %d\n", instruction_size);
+ PrintF(" Padding: %d\n",
+ static_cast<int>(size() - metadata_size - instruction_size));
+ PrintF(" Embedded builtin count: %d\n", embedded_count);
+ PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
+ PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
+ PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
+ PrintF(" Instruction size (99th percentile): %d\n", sizes[k99th]);
+ PrintF("\n");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded-data.h b/deps/v8/src/snapshot/embedded-data.h
new file mode 100644
index 0000000000..6e28071525
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded-data.h
@@ -0,0 +1,134 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_DATA_H_
+#define V8_SNAPSHOT_EMBEDDED_DATA_H_
+
+#include "src/base/macros.h"
+#include "src/builtins/builtins.h"
+#include "src/globals.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+
+// Wraps an off-heap instruction stream.
+// TODO(jgruber,v8:6666): Remove this class.
+class InstructionStream final : public AllStatic {
+ public:
+ // Returns true, iff the given pc points into an off-heap instruction stream.
+ static bool PcIsOffHeap(Isolate* isolate, Address pc);
+
+ // Returns the corresponding Code object if it exists, and nullptr otherwise.
+ static Code TryLookupCode(Isolate* isolate, Address address);
+
+ // During snapshot creation, we first create an executable off-heap area
+ // containing all off-heap code. The area is guaranteed to be contiguous.
+ // Note that this only applies when building the snapshot, e.g. for
+ // mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
+ static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** data,
+ uint32_t* size);
+ static void FreeOffHeapInstructionStream(uint8_t* data, uint32_t size);
+};
+
+class EmbeddedData final {
+ public:
+ static EmbeddedData FromIsolate(Isolate* isolate);
+
+ static EmbeddedData FromBlob() {
+ return EmbeddedData(Isolate::CurrentEmbeddedBlob(),
+ Isolate::CurrentEmbeddedBlobSize());
+ }
+
+ static EmbeddedData FromBlob(Isolate* isolate) {
+ return EmbeddedData(isolate->embedded_blob(),
+ isolate->embedded_blob_size());
+ }
+
+ const uint8_t* data() const { return data_; }
+ uint32_t size() const { return size_; }
+
+ void Dispose() { delete[] data_; }
+
+ Address InstructionStartOfBuiltin(int i) const;
+ uint32_t InstructionSizeOfBuiltin(int i) const;
+
+ bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
+
+ uint32_t AddressForHashing(Address addr) {
+ Address start = reinterpret_cast<Address>(data_);
+ DCHECK(IsInRange(addr, start, start + size_));
+ return static_cast<uint32_t>(addr - start);
+ }
+
+ // Padded with kCodeAlignment.
+ uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
+ uint32_t size = InstructionSizeOfBuiltin(i);
+ return (size == 0) ? 0 : PadAndAlign(size);
+ }
+
+ size_t CreateHash() const;
+ size_t Hash() const {
+ return *reinterpret_cast<const size_t*>(data_ + HashOffset());
+ }
+
+ struct Metadata {
+ // Blob layout information.
+ uint32_t instructions_offset;
+ uint32_t instructions_length;
+ };
+ STATIC_ASSERT(offsetof(Metadata, instructions_offset) == 0);
+ STATIC_ASSERT(offsetof(Metadata, instructions_length) == kUInt32Size);
+ STATIC_ASSERT(sizeof(Metadata) == kUInt32Size + kUInt32Size);
+
+ // The layout of the blob is as follows:
+ //
+ // [0] hash of the remaining blob
+ // [1] metadata of instruction stream 0
+ // ... metadata
+ // ... instruction streams
+
+ static constexpr uint32_t kTableSize = Builtins::builtin_count;
+ static constexpr uint32_t HashOffset() { return 0; }
+ static constexpr uint32_t HashSize() { return kSizetSize; }
+ static constexpr uint32_t MetadataOffset() {
+ return HashOffset() + HashSize();
+ }
+ static constexpr uint32_t MetadataSize() {
+ return sizeof(struct Metadata) * kTableSize;
+ }
+ static constexpr uint32_t RawDataOffset() {
+ return PadAndAlign(MetadataOffset() + MetadataSize());
+ }
+
+ private:
+ EmbeddedData(const uint8_t* data, uint32_t size) : data_(data), size_(size) {
+ DCHECK_NOT_NULL(data);
+ DCHECK_LT(0, size);
+ }
+
+ const Metadata* Metadata() const {
+ return reinterpret_cast<const struct Metadata*>(data_ + MetadataOffset());
+ }
+ const uint8_t* RawData() const { return data_ + RawDataOffset(); }
+
+ static constexpr int PadAndAlign(int size) {
+ // Ensure we have at least one byte trailing the actual builtin
+ // instructions which we can later fill with int3.
+ return RoundUp<kCodeAlignment>(size + 1);
+ }
+
+ void PrintStatistics() const;
+
+ const uint8_t* data_;
+ uint32_t size_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_DATA_H_
diff --git a/deps/v8/src/snapshot/embedded-empty.cc b/deps/v8/src/snapshot/embedded-empty.cc
index 77e83b73ce..9ffb3458d3 100644
--- a/deps/v8/src/snapshot/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded-empty.cc
@@ -6,16 +6,18 @@
#include <cstdint>
-namespace v8 {
-namespace internal {
+#include "src/base/macros.h"
-const uint8_t* DefaultEmbeddedBlob() { return nullptr; }
-uint32_t DefaultEmbeddedBlobSize() { return 0; }
+extern "C" const uint8_t* v8_Default_embedded_blob_;
+extern "C" uint32_t v8_Default_embedded_blob_size_;
+
+const uint8_t* v8_Default_embedded_blob_ = nullptr;
+uint32_t v8_Default_embedded_blob_size_ = 0;
#ifdef V8_MULTI_SNAPSHOTS
-const uint8_t* TrustedEmbeddedBlob() { return nullptr; }
-uint32_t TrustedEmbeddedBlobSize() { return 0; }
-#endif
+extern "C" const uint8_t* v8_Trusted_embedded_blob_;
+extern "C" uint32_t v8_Trusted_embedded_blob_size_;
-} // namespace internal
-} // namespace v8
+const uint8_t* v8_Trusted_embedded_blob_ = nullptr;
+uint32_t v8_Trusted_embedded_blob_size_ = 0;
+#endif
diff --git a/deps/v8/src/snapshot/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded-file-writer.cc
new file mode 100644
index 0000000000..36a5f0e88b
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded-file-writer.cc
@@ -0,0 +1,645 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded-file-writer.h"
+
+#include <algorithm>
+#include <cinttypes>
+
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// V8_CC_MSVC is true for both MSVC and clang on windows. clang can handle
+// __asm__-style inline assembly but MSVC cannot, and thus we need a more
+// precise compiler detection that can distinguish between the two. clang on
+// windows sets both __clang__ and _MSC_VER, MSVC sets only _MSC_VER.
+#if defined(_MSC_VER) && !defined(__clang__)
+#define V8_COMPILER_IS_MSVC
+#endif
+
+// MSVC uses MASM for x86 and x64, while it has a ARMASM for ARM32 and
+// ARMASM64 for ARM64. Since ARMASM and ARMASM64 accept a slightly tweaked
+// version of ARM assembly language, they are referred to together in Visual
+// Studio project files as MARMASM.
+//
+// ARM assembly language docs:
+// http://infocenter.arm.com/help/topic/com.arm.doc.dui0802b/index.html
+// Microsoft ARM assembler and assembly language docs:
+// https://docs.microsoft.com/en-us/cpp/assembler/arm/arm-assembler-reference
+#if defined(V8_COMPILER_IS_MSVC)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
+#define V8_ASSEMBLER_IS_MARMASM
+#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+#define V8_ASSEMBLER_IS_MASM
+#else
+#error Unknown Windows assembler target architecture.
+#endif
+#endif
+
+// Name mangling.
+// Symbols are prefixed with an underscore on 32-bit architectures.
+#if defined(V8_OS_WIN) && !defined(V8_TARGET_ARCH_X64) && \
+ !defined(V8_TARGET_ARCH_ARM64)
+#define SYMBOL_PREFIX "_"
+#else
+#define SYMBOL_PREFIX ""
+#endif
+
+// Platform-independent bits.
+// -----------------------------------------------------------------------------
+
+namespace {
+
+DataDirective PointerSizeDirective() {
+ if (kSystemPointerSize == 8) {
+ return kQuad;
+ } else {
+ CHECK_EQ(4, kSystemPointerSize);
+ return kLong;
+ }
+}
+
+} // namespace
+
+const char* DirectiveAsString(DataDirective directive) {
+#if defined(V8_OS_WIN) && defined(V8_ASSEMBLER_IS_MASM)
+ switch (directive) {
+ case kByte:
+ return "BYTE";
+ case kLong:
+ return "DWORD";
+ case kQuad:
+ return "QWORD";
+ default:
+ UNREACHABLE();
+ }
+#elif defined(V8_OS_WIN) && defined(V8_ASSEMBLER_IS_MARMASM)
+ switch (directive) {
+ case kByte:
+ return "DCB";
+ case kLong:
+ return "DCDU";
+ case kQuad:
+ return "DCQU";
+ default:
+ UNREACHABLE();
+ }
+#elif defined(V8_OS_AIX)
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".llong";
+ default:
+ UNREACHABLE();
+ }
+#else
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".quad";
+ case kOcta:
+ return ".octa";
+ }
+ UNREACHABLE();
+#endif
+}
+
+void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ // Retrieve the SourcePositionTable and copy it.
+ Code code = builtins->builtin(i);
+ // Verify that the code object is still the "real code" and not a
+ // trampoline (which wouldn't have source positions).
+ DCHECK(!code->is_off_heap_trampoline());
+ std::vector<unsigned char> data(
+ code->SourcePositionTable()->GetDataStartAddress(),
+ code->SourcePositionTable()->GetDataEndAddress());
+ source_positions_[i] = data;
+ }
+}
+
+// V8_OS_MACOSX
+// Fuchsia target is explicitly excluded here for Mac hosts. This is to avoid
+// generating uncompilable assembly files for the Fuchsia target.
+// -----------------------------------------------------------------------------
+
+#if defined(V8_OS_MACOSX) && !defined(V8_TARGET_OS_FUCHSIA)
+
+void PlatformDependentEmbeddedFileWriter::SectionText() {
+ fprintf(fp_, ".text\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionData() {
+ fprintf(fp_, ".data\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionRoData() {
+ fprintf(fp_, ".const_data\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d", value);
+ Newline();
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ fprintf(fp_, " %s _%s\n", DirectiveAsString(PointerSizeDirective()), target);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
+ const char* name) {
+ // TODO(jgruber): Investigate switching to .globl. Using .private_extern
+ // prevents something along the compilation chain from messing with the
+ // embedded blob. Using .global here causes embedded blob hash verification
+ // failures at runtime.
+ fprintf(fp_, ".private_extern _%s\n", name);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
+ fprintf(fp_, ".balign 32\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+
+void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
+ fprintf(fp_, "_%s:\n", name);
+}
+
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
+ const char* name) {
+ DeclareLabel(name);
+
+ // TODO(mvstanton): Investigate the proper incantations to mark the label as
+ // a function on OSX.
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
+}
+
+int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
+
+void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, filename);
+}
+
+void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
+
+int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+// V8_OS_AIX
+// -----------------------------------------------------------------------------
+
+#elif defined(V8_OS_AIX)
+
+void PlatformDependentEmbeddedFileWriter::SectionText() {
+ fprintf(fp_, ".csect .text[PR]\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionData() {
+ fprintf(fp_, ".csect .data[RW]\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionRoData() {
+ fprintf(fp_, ".csect[RO]\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, ".align 2\n");
+ fprintf(fp_, "%s:\n", name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d\n", value);
+ Newline();
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ AlignToCodeAlignment();
+ DeclareLabel(name);
+ fprintf(fp_, " %s %s\n", DirectiveAsString(PointerSizeDirective()), target);
+ Newline();
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
+ const char* name) {
+ fprintf(fp_, ".globl %s\n", name);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
+ fprintf(fp_, ".align 5\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+
+void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s:\n", name);
+}
+
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
+ const char* name) {
+ Newline();
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, ".csect %s[DS]\n", name); // function descriptor
+ fprintf(fp_, "%s:\n", name);
+ fprintf(fp_, ".llong .%s, 0, 0\n", name);
+ SectionText();
+ fprintf(fp_, ".%s:\n", name);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
+}
+
+int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
+
+void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, filename);
+}
+
+void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
+
+int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+// V8_OS_WIN (MSVC)
+// -----------------------------------------------------------------------------
+
+#elif defined(V8_OS_WIN) && defined(V8_ASSEMBLER_IS_MASM)
+
+// For MSVC builds we emit assembly in MASM syntax.
+// See https://docs.microsoft.com/en-us/cpp/assembler/masm/directives-reference.
+
+void PlatformDependentEmbeddedFileWriter::SectionText() {
+ fprintf(fp_, ".CODE\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionData() {
+ fprintf(fp_, ".DATA\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionRoData() {
+ fprintf(fp_, ".CONST\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
+ value);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
+ const char* name) {
+ fprintf(fp_, "PUBLIC %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
+ // Diverges from other platforms due to compile error
+ // 'invalid combination with segment alignment'.
+ fprintf(fp_, "ALIGN 4\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {}
+
+void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
+ fprintf(fp_, "; %s\n", string);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s LABEL %s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(kByte));
+}
+
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+ // TODO(mvstanton): output source information for MSVC.
+ // Its syntax is #line <line> "<filename>"
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
+ const char* name) {
+ fprintf(fp_, "%s%s PROC\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
+ fprintf(fp_, "%s%s ENDP\n", SYMBOL_PREFIX, name);
+}
+
+int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0%" PRIx64 "h", value);
+}
+
+void PlatformDependentEmbeddedFileWriter::FilePrologue() {
+#if !defined(V8_TARGET_ARCH_X64)
+ fprintf(fp_, ".MODEL FLAT\n");
+#endif
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
+ int fileid, const char* filename) {}
+
+void PlatformDependentEmbeddedFileWriter::FileEpilogue() {
+ fprintf(fp_, "END\n");
+}
+
+int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#undef V8_ASSEMBLER_IS_MASM
+
+#elif defined(V8_OS_WIN) && defined(V8_ASSEMBLER_IS_MARMASM)
+
+// The the AARCH64 ABI requires instructions be 4-byte-aligned and Windows does
+// not have a stricter alignment requirement (see the TEXTAREA macro of
+// kxarm64.h in the Windows SDK), so code is 4-byte-aligned.
+// The data fields in the emitted assembly tend to be accessed with 8-byte
+// LDR instructions, so data is 8-byte-aligned.
+//
+// armasm64's warning A4228 states
+// Alignment value exceeds AREA alignment; alignment not guaranteed
+// To ensure that ALIGN directives are honored, their values are defined as
+// equal to their corresponding AREA's ALIGN attributes.
+
+#define ARM64_DATA_ALIGNMENT_POWER (3)
+#define ARM64_DATA_ALIGNMENT (1 << ARM64_DATA_ALIGNMENT_POWER)
+#define ARM64_CODE_ALIGNMENT_POWER (2)
+#define ARM64_CODE_ALIGNMENT (1 << ARM64_CODE_ALIGNMENT_POWER)
+
+void PlatformDependentEmbeddedFileWriter::SectionText() {
+ fprintf(fp_, " AREA |.text|, CODE, ALIGN=%d, READONLY\n",
+ ARM64_CODE_ALIGNMENT_POWER);
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionData() {
+ fprintf(fp_, " AREA |.data|, DATA, ALIGN=%d, READWRITE\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionRoData() {
+ fprintf(fp_, " AREA |.rodata|, DATA, ALIGN=%d, READONLY\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
+ value);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
+ const char* name) {
+ fprintf(fp_, " EXPORT %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
+ fprintf(fp_, " ALIGN %d\n", ARM64_CODE_ALIGNMENT);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
+ fprintf(fp_, " ALIGN %d\n", ARM64_DATA_ALIGNMENT);
+}
+
+void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
+ fprintf(fp_, "; %s\n", string);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+ // TODO(mvstanton): output source information for MSVC.
+ // Its syntax is #line <line> "<filename>"
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
+ const char* name) {
+ fprintf(fp_, "%s%s FUNCTION\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
+ fprintf(fp_, " ENDFUNC\n");
+}
+
+int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
+
+void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
+ int fileid, const char* filename) {}
+
+void PlatformDependentEmbeddedFileWriter::FileEpilogue() {
+ fprintf(fp_, " END\n");
+}
+
+int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#undef V8_ASSEMBLER_IS_MARMASM
+#undef ARM64_DATA_ALIGNMENT_POWER
+#undef ARM64_DATA_ALIGNMENT
+#undef ARM64_CODE_ALIGNMENT_POWER
+#undef ARM64_CODE_ALIGNMENT
+
+// Everything but AIX, Windows with MSVC, or OSX.
+// -----------------------------------------------------------------------------
+
+#else
+
+void PlatformDependentEmbeddedFileWriter::SectionText() {
+#ifdef OS_CHROMEOS
+ fprintf(fp_, ".section .text.hot.embedded\n");
+#else
+ fprintf(fp_, ".section .text\n");
+#endif
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionData() {
+ fprintf(fp_, ".section .data\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::SectionRoData() {
+#if defined(V8_OS_WIN)
+ fprintf(fp_, ".section .rdata\n");
+#else
+ fprintf(fp_, ".section .rodata\n");
+#endif
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d", value);
+ Newline();
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ fprintf(fp_, " %s %s%s\n", DirectiveAsString(PointerSizeDirective()),
+ SYMBOL_PREFIX, target);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
+ const char* name) {
+ fprintf(fp_, ".global %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
+ fprintf(fp_, ".balign 32\n");
+}
+
+void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
+#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
+ // On Windows ARM64, instruction "ldr xt,[xn,v8_Default_embedded_blob_]" is
+ // generated by clang-cl to load elements in v8_Default_embedded_blob_.
+ // The generated instruction has scale 3 which requires the load target to be
+ // aligned at 8 bytes (2^3).
+ fprintf(fp_, ".balign 8\n");
+#endif
+}
+
+void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s:\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid, int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
+ const char* name) {
+ DeclareLabel(name);
+
+#if defined(V8_OS_WIN)
+#if defined(V8_TARGET_ARCH_ARM64)
+ // Windows ARM64 assembly is in GAS syntax, but ".type" is invalid directive
+ // in PE/COFF for Windows.
+#else
+ // The directives for inserting debugging information on Windows come
+ // from the PE (Portable Executable) and COFF (Common Object File Format)
+ // standards. Documented here:
+ // https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format
+ //
+ // .scl 2 means StorageClass external.
+ // .type 32 means Type Representation Function.
+ fprintf(fp_, ".def %s%s; .scl 2; .type 32; .endef;\n", SYMBOL_PREFIX, name);
+#endif
+#elif defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
+ // ELF format binaries on ARM use ".type <function name>, %function"
+ // to create a DWARF subprogram entry.
+ fprintf(fp_, ".type %s, %%function\n", name);
+#else
+ // Other ELF Format binaries use ".type <function name>, @function"
+ // to create a DWARF subprogram entry.
+ fprintf(fp_, ".type %s, @function\n", name);
+#endif
+}
+
+void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
+}
+
+int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
+
+void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ // Replace any Windows style paths (backslashes) with forward
+ // slashes.
+ std::string fixed_filename(filename);
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
+}
+
+void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
+
+int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#endif
+
+#undef SYMBOL_PREFIX
+#undef V8_COMPILER_IS_MSVC
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded-file-writer.h b/deps/v8/src/snapshot/embedded-file-writer.h
new file mode 100644
index 0000000000..55e134cd47
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded-file-writer.h
@@ -0,0 +1,424 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
+#define V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
+
+#include <cstdio>
+#include <cstring>
+
+#include "src/globals.h"
+#include "src/snapshot/snapshot.h"
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+
+enum DataDirective {
+ kByte,
+ kLong,
+ kQuad,
+ kOcta,
+};
+
+static constexpr char kDefaultEmbeddedVariant[] = "Default";
+
+// The platform-dependent logic for emitting assembly code for the generated
+// embedded.S file.
+class EmbeddedFileWriter;
+class PlatformDependentEmbeddedFileWriter final {
+ public:
+ void SetFile(FILE* fp) { fp_ = fp; }
+
+ void SectionText();
+ void SectionData();
+ void SectionRoData();
+
+ void AlignToCodeAlignment();
+ void AlignToDataAlignment();
+
+ void DeclareUint32(const char* name, uint32_t value);
+ void DeclarePointerToSymbol(const char* name, const char* target);
+
+ void DeclareLabel(const char* name);
+
+ void SourceInfo(int fileid, int line);
+ void DeclareFunctionBegin(const char* name);
+ void DeclareFunctionEnd(const char* name);
+
+ // Returns the number of printed characters.
+ int HexLiteral(uint64_t value);
+
+ void Comment(const char* string);
+ void Newline() { fprintf(fp_, "\n"); }
+
+ void FilePrologue();
+ void DeclareExternalFilename(int fileid, const char* filename);
+ void FileEpilogue();
+
+ int IndentedDataDirective(DataDirective directive);
+
+ FILE* fp() const { return fp_; }
+
+ private:
+ void DeclareSymbolGlobal(const char* name);
+
+ private:
+ FILE* fp_ = nullptr;
+};
+
+// When writing out compiled builtins to a file, we
+// Detailed source-code information about builtins can only be obtained by
+// registration on the isolate during compilation.
+class EmbeddedFileWriterInterface {
+ public:
+ // We maintain a database of filenames to synthetic IDs.
+ virtual int LookupOrAddExternallyCompiledFilename(const char* filename) = 0;
+ virtual const char* GetExternallyCompiledFilename(int index) const = 0;
+ virtual int GetExternallyCompiledFilenameCount() const = 0;
+
+ // The isolate will call the method below just prior to replacing the
+ // compiled builtin Code objects with trampolines.
+ virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
+};
+
+// Generates the embedded.S file which is later compiled into the final v8
+// binary. Its contents are exported through two symbols:
+//
+// v8_<variant>_embedded_blob_ (intptr_t):
+// a pointer to the start of the embedded blob.
+// v8_<variant>_embedded_blob_size_ (uint32_t):
+// size of the embedded blob in bytes.
+//
+// The variant is usually "Default" but can be modified in multisnapshot builds.
+class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
+ public:
+ int LookupOrAddExternallyCompiledFilename(const char* filename) override {
+ auto result = external_filenames_.find(filename);
+ if (result != external_filenames_.end()) {
+ return result->second;
+ }
+ int new_id =
+ ExternalFilenameIndexToId(static_cast<int>(external_filenames_.size()));
+ external_filenames_.insert(std::make_pair(filename, new_id));
+ external_filenames_by_index_.push_back(filename);
+ DCHECK_EQ(external_filenames_by_index_.size(), external_filenames_.size());
+ return new_id;
+ }
+
+ const char* GetExternallyCompiledFilename(int fileid) const override {
+ size_t index = static_cast<size_t>(ExternalFilenameIdToIndex(fileid));
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, external_filenames_by_index_.size());
+
+ return external_filenames_by_index_[index];
+ }
+
+ int GetExternallyCompiledFilenameCount() const override {
+ return static_cast<int>(external_filenames_.size());
+ }
+
+ void PrepareBuiltinSourcePositionMap(Builtins* builtins) override;
+
+ void SetEmbeddedFile(const char* embedded_src_path) {
+ embedded_src_path_ = embedded_src_path;
+ }
+
+ void SetEmbeddedVariant(const char* embedded_variant) {
+ embedded_variant_ = embedded_variant;
+ }
+
+ void WriteEmbedded(const i::EmbeddedData* blob) const {
+ MaybeWriteEmbeddedFile(blob);
+ }
+
+ private:
+ void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
+ if (embedded_src_path_ == nullptr) return;
+
+ FILE* fp = GetFileDescriptorOrDie(embedded_src_path_);
+
+ PlatformDependentEmbeddedFileWriter writer;
+ writer.SetFile(fp);
+
+ WriteFilePrologue(&writer);
+ WriteExternalFilenames(&writer);
+ WriteMetadataSection(&writer, blob);
+ WriteInstructionStreams(&writer, blob);
+ WriteFileEpilogue(&writer, blob);
+
+ fclose(fp);
+ }
+
+ static FILE* GetFileDescriptorOrDie(const char* filename) {
+ FILE* fp = v8::base::OS::FOpen(filename, "wb");
+ if (fp == nullptr) {
+ i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
+ exit(1);
+ }
+ return fp;
+ }
+
+ void WriteFilePrologue(PlatformDependentEmbeddedFileWriter* w) const {
+ w->Comment("Autogenerated file. Do not edit.");
+ w->Newline();
+ w->FilePrologue();
+ }
+
+ void WriteExternalFilenames(PlatformDependentEmbeddedFileWriter* w) const {
+ w->Comment(
+ "Source positions in the embedded blob refer to filenames by id.");
+ w->Comment("Assembly directives here map the id to a filename.");
+ w->Newline();
+
+ // Write external filenames.
+ int size = static_cast<int>(external_filenames_by_index_.size());
+ for (int i = 0; i < size; i++) {
+ w->DeclareExternalFilename(ExternalFilenameIndexToId(i),
+ external_filenames_by_index_[i]);
+ }
+ }
+
+ // Fairly arbitrary but should fit all symbol names.
+ static constexpr int kTemporaryStringLength = 256;
+
+ void WriteMetadataSection(PlatformDependentEmbeddedFileWriter* w,
+ const i::EmbeddedData* blob) const {
+ char embedded_blob_data_symbol[kTemporaryStringLength];
+ i::SNPrintF(i::Vector<char>(embedded_blob_data_symbol),
+ "v8_%s_embedded_blob_data_", embedded_variant_);
+
+ w->Comment("The embedded blob starts here. Metadata comes first, followed");
+ w->Comment("by builtin instruction streams.");
+ w->SectionText();
+ w->AlignToCodeAlignment();
+ w->DeclareLabel(embedded_blob_data_symbol);
+
+ WriteBinaryContentsAsInlineAssembly(w, blob->data(),
+ i::EmbeddedData::RawDataOffset());
+ }
+
+ void WriteBuiltin(PlatformDependentEmbeddedFileWriter* w,
+ const i::EmbeddedData* blob, const int builtin_id) const {
+ const bool is_default_variant =
+ std::strcmp(embedded_variant_, kDefaultEmbeddedVariant) == 0;
+
+ char builtin_symbol[kTemporaryStringLength];
+ if (is_default_variant) {
+ // Create nicer symbol names for the default mode.
+ i::SNPrintF(i::Vector<char>(builtin_symbol), "Builtins_%s",
+ i::Builtins::name(builtin_id));
+ } else {
+ i::SNPrintF(i::Vector<char>(builtin_symbol), "%s_Builtins_%s",
+ embedded_variant_, i::Builtins::name(builtin_id));
+ }
+
+ // Labels created here will show up in backtraces. We check in
+ // Isolate::SetEmbeddedBlob that the blob layout remains unchanged, i.e.
+ // that labels do not insert bytes into the middle of the blob byte
+ // stream.
+ w->DeclareFunctionBegin(builtin_symbol);
+ const std::vector<byte>& current_positions = source_positions_[builtin_id];
+
+ // The code below interleaves bytes of assembly code for the builtin
+ // function with source positions at the appropriate offsets.
+ Vector<const byte> vpos(current_positions.data(), current_positions.size());
+ v8::internal::SourcePositionTableIterator positions(
+ vpos, SourcePositionTableIterator::kExternalOnly);
+
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(
+ blob->InstructionStartOfBuiltin(builtin_id));
+ uint32_t size = blob->PaddedInstructionSizeOfBuiltin(builtin_id);
+ uint32_t i = 0;
+ uint32_t next_offset = static_cast<uint32_t>(
+ positions.done() ? size : positions.code_offset());
+ while (i < size) {
+ if (i == next_offset) {
+ // Write source directive.
+ w->SourceInfo(positions.source_position().ExternalFileId(),
+ positions.source_position().ExternalLine());
+ positions.Advance();
+ next_offset = static_cast<uint32_t>(
+ positions.done() ? size : positions.code_offset());
+ }
+ CHECK_GE(next_offset, i);
+ WriteBinaryContentsAsInlineAssembly(w, data + i, next_offset - i);
+ i = next_offset;
+ }
+
+ w->DeclareFunctionEnd(builtin_symbol);
+ }
+
+ void WriteInstructionStreams(PlatformDependentEmbeddedFileWriter* w,
+ const i::EmbeddedData* blob) const {
+ for (int i = 0; i < i::Builtins::builtin_count; i++) {
+ if (!blob->ContainsBuiltin(i)) continue;
+
+ WriteBuiltin(w, blob, i);
+ }
+ w->Newline();
+ }
+
+ void WriteFileEpilogue(PlatformDependentEmbeddedFileWriter* w,
+ const i::EmbeddedData* blob) const {
+ {
+ char embedded_blob_data_symbol[kTemporaryStringLength];
+ i::SNPrintF(i::Vector<char>(embedded_blob_data_symbol),
+ "v8_%s_embedded_blob_data_", embedded_variant_);
+
+ char embedded_blob_symbol[kTemporaryStringLength];
+ i::SNPrintF(i::Vector<char>(embedded_blob_symbol), "v8_%s_embedded_blob_",
+ embedded_variant_);
+
+ w->Comment("Pointer to the beginning of the embedded blob.");
+ w->SectionData();
+ w->AlignToDataAlignment();
+ w->DeclarePointerToSymbol(embedded_blob_symbol,
+ embedded_blob_data_symbol);
+ w->Newline();
+ }
+
+ {
+ char embedded_blob_size_symbol[kTemporaryStringLength];
+ i::SNPrintF(i::Vector<char>(embedded_blob_size_symbol),
+ "v8_%s_embedded_blob_size_", embedded_variant_);
+
+ w->Comment("The size of the embedded blob in bytes.");
+ w->SectionRoData();
+ w->DeclareUint32(embedded_blob_size_symbol, blob->size());
+ w->Newline();
+ }
+
+ w->FileEpilogue();
+ }
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define V8_COMPILER_IS_MSVC
+#endif
+
+#if defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
+ // Windows MASM doesn't have an .octa directive, use QWORDs instead.
+ // Note: MASM *really* does not like large data streams. It takes over 5
+ // minutes to assemble the ~350K lines of embedded.S produced when using
+ // BYTE directives in a debug build. QWORD produces roughly 120KLOC and
+ // reduces assembly time to ~40 seconds. Still terrible, but much better
+ // than before. See also: https://crbug.com/v8/8475.
+
+ // GCC MASM on Aix doesn't have an .octa directive, use .llong instead.
+
+ static constexpr DataDirective kByteChunkDirective = kQuad;
+ static constexpr int kByteChunkSize = 8;
+
+ static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
+ int current_line_length, const uint8_t* data) {
+ const uint64_t* quad_ptr = reinterpret_cast<const uint64_t*>(data);
+ return current_line_length + w->HexLiteral(*quad_ptr);
+ }
+#else // defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
+ static constexpr DataDirective kByteChunkDirective = kOcta;
+ static constexpr int kByteChunkSize = 16;
+
+ static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
+ int current_line_length, const uint8_t* data) {
+ const uint64_t* quad_ptr1 = reinterpret_cast<const uint64_t*>(data);
+ const uint64_t* quad_ptr2 = reinterpret_cast<const uint64_t*>(data + 8);
+
+#ifdef V8_TARGET_BIG_ENDIAN
+ uint64_t part1 = *quad_ptr1;
+ uint64_t part2 = *quad_ptr2;
+#else
+ uint64_t part1 = *quad_ptr2;
+ uint64_t part2 = *quad_ptr1;
+#endif // V8_TARGET_BIG_ENDIAN
+
+ if (part1 != 0) {
+ current_line_length +=
+ fprintf(w->fp(), "0x%" PRIx64 "%016" PRIx64, part1, part2);
+ } else {
+ current_line_length += fprintf(w->fp(), "0x%" PRIx64, part2);
+ }
+ return current_line_length;
+ }
+#endif // defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
+#undef V8_COMPILER_IS_MSVC
+
+ static int WriteDirectiveOrSeparator(PlatformDependentEmbeddedFileWriter* w,
+ int current_line_length,
+ DataDirective directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = w->IndentedDataDirective(directive);
+ DCHECK_LT(0, printed_chars);
+ } else {
+ printed_chars = fprintf(w->fp(), ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+ }
+
+ static int WriteLineEndIfNeeded(PlatformDependentEmbeddedFileWriter* w,
+ int current_line_length, int write_size) {
+ static const int kTextWidth = 100;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
+ fprintf(w->fp(), "\n");
+ return 0;
+ } else {
+ return current_line_length;
+ }
+ }
+
+ static void WriteBinaryContentsAsInlineAssembly(
+ PlatformDependentEmbeddedFileWriter* w, const uint8_t* data,
+ uint32_t size) {
+ int current_line_length = 0;
+ uint32_t i = 0;
+
+ // Begin by writing out byte chunks.
+ for (; i + kByteChunkSize < size; i += kByteChunkSize) {
+ current_line_length = WriteDirectiveOrSeparator(w, current_line_length,
+ kByteChunkDirective);
+ current_line_length = WriteByteChunk(w, current_line_length, data + i);
+ current_line_length =
+ WriteLineEndIfNeeded(w, current_line_length, kByteChunkSize);
+ }
+ if (current_line_length != 0) w->Newline();
+ current_line_length = 0;
+
+ // Write any trailing bytes one-by-one.
+ for (; i < size; i++) {
+ current_line_length =
+ WriteDirectiveOrSeparator(w, current_line_length, kByte);
+ current_line_length += w->HexLiteral(data[i]);
+ current_line_length = WriteLineEndIfNeeded(w, current_line_length, 1);
+ }
+
+ if (current_line_length != 0) w->Newline();
+ }
+
+ static int ExternalFilenameIndexToId(int index) {
+ return kFirstExternalFilenameId + index;
+ }
+
+ static int ExternalFilenameIdToIndex(int id) {
+ return id - kFirstExternalFilenameId;
+ }
+
+ std::vector<byte> source_positions_[Builtins::builtin_count];
+
+ // In assembly directives, filename ids need to begin with 1.
+ static const int kFirstExternalFilenameId = 1;
+ std::map<const char*, int> external_filenames_;
+ std::vector<const char*> external_filenames_by_index_;
+
+ const char* embedded_src_path_ = nullptr;
+ const char* embedded_variant_ = kDefaultEmbeddedVariant;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
diff --git a/deps/v8/src/snapshot/macros.h b/deps/v8/src/snapshot/macros.h
deleted file mode 100644
index 8551281614..0000000000
--- a/deps/v8/src/snapshot/macros.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_MACROS_H_
-#define V8_SNAPSHOT_MACROS_H_
-
-#include "include/v8config.h"
-
-// .byte portability macros.
-
-#if defined(V8_OS_MACOSX) // MACOSX
-#define V8_ASM_MANGLE_LABEL "_"
-#define V8_ASM_RODATA_SECTION ".const_data\n"
-#define V8_ASM_TEXT_SECTION ".text\n"
-#define V8_ASM_DECLARE(NAME) ".private_extern " V8_ASM_MANGLE_LABEL NAME "\n"
-#elif defined(V8_OS_AIX) // AIX
-#define V8_ASM_RODATA_SECTION ".csect[RO]\n"
-#define V8_ASM_TEXT_SECTION ".csect .text[PR]\n"
-#define V8_ASM_MANGLE_LABEL ""
-#define V8_ASM_DECLARE(NAME) ".globl " V8_ASM_MANGLE_LABEL NAME "\n"
-#elif defined(V8_OS_WIN) // WIN
-#if defined(V8_TARGET_ARCH_X64)
-#define V8_ASM_MANGLE_LABEL ""
-#else
-#define V8_ASM_MANGLE_LABEL "_"
-#endif
-#define V8_ASM_RODATA_SECTION ".section .rodata\n"
-#define V8_ASM_TEXT_SECTION ".section .text\n"
-#define V8_ASM_DECLARE(NAME)
-#else // !MACOSX && !WIN && !AIX
-#define V8_ASM_MANGLE_LABEL ""
-#define V8_ASM_RODATA_SECTION ".section .rodata\n"
-#if defined(OS_CHROMEOS) // ChromeOS
-#define V8_ASM_TEXT_SECTION ".section .text.hot.embedded\n"
-#else
-#define V8_ASM_TEXT_SECTION ".section .text\n"
-#endif
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
-#define V8_ASM_DECLARE(NAME) ".global " V8_ASM_MANGLE_LABEL NAME "\n"
-#else
-#define V8_ASM_DECLARE(NAME) ".local " V8_ASM_MANGLE_LABEL NAME "\n"
-#endif
-#endif
-
-// Align to kCodeAlignment.
-#define V8_ASM_BALIGN32 ".balign 32\n"
-#define V8_ASM_LABEL(NAME) V8_ASM_MANGLE_LABEL NAME ":\n"
-
-// clang-format off
-#if defined(V8_OS_AIX)
-
-#define V8_EMBEDDED_TEXT_HEADER(LABEL) \
- __asm__(V8_ASM_DECLARE(#LABEL) \
- ".csect " #LABEL "[DS]\n" \
- #LABEL ":\n" \
- ".llong ." #LABEL ", TOC[tc0], 0\n" \
- V8_ASM_TEXT_SECTION \
- "." #LABEL ":\n");
-
-#define V8_EMBEDDED_RODATA_HEADER(LABEL) \
- __asm__(V8_ASM_RODATA_SECTION \
- V8_ASM_DECLARE(#LABEL) \
- ".align 5\n" \
- V8_ASM_LABEL(#LABEL));
-
-#else
-
-#define V8_EMBEDDED_TEXT_HEADER(LABEL) \
- __asm__(V8_ASM_TEXT_SECTION \
- V8_ASM_DECLARE(#LABEL) \
- V8_ASM_BALIGN32 \
- V8_ASM_LABEL(#LABEL));
-
-#define V8_EMBEDDED_RODATA_HEADER(LABEL) \
- __asm__(V8_ASM_RODATA_SECTION \
- V8_ASM_DECLARE(#LABEL) \
- V8_ASM_BALIGN32 \
- V8_ASM_LABEL(#LABEL));
-
-#endif // #if defined(V8_OS_AIX)
-#endif // V8_SNAPSHOT_MACROS_H_
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 09db077694..98af2bf0e8 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -11,25 +11,17 @@
#include "src/base/platform/platform.h"
#include "src/flags.h"
#include "src/msan.h"
+#include "src/snapshot/embedded-file-writer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
+#include "src/source-position-table.h"
namespace {
-class SnapshotWriter {
- public:
- SnapshotWriter()
- : snapshot_cpp_path_(nullptr), snapshot_blob_path_(nullptr) {}
-
- void SetEmbeddedFile(const char* embedded_cpp_file) {
- embedded_cpp_path_ = embedded_cpp_file;
- }
-
- void SetEmbeddedVariant(const char* embedded_variant) {
- embedded_variant_ = embedded_variant;
- }
+class SnapshotFileWriter {
+ public:
void SetSnapshotFile(const char* snapshot_cpp_file) {
snapshot_cpp_path_ = snapshot_cpp_file;
}
@@ -49,10 +41,6 @@ class SnapshotWriter {
MaybeWriteStartupBlob(blob_vector);
}
- void WriteEmbedded(const i::EmbeddedData* blob) const {
- MaybeWriteEmbeddedFile(blob);
- }
-
private:
void MaybeWriteStartupBlob(const i::Vector<const i::byte>& blob) const {
if (!snapshot_blob_path_) return;
@@ -116,170 +104,6 @@ class SnapshotWriter {
fprintf(fp, "\n");
}
- void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
- if (embedded_cpp_path_ == nullptr) return;
-
- FILE* fp = GetFileDescriptorOrDie(embedded_cpp_path_);
-
- WriteEmbeddedFilePrefix(fp);
- WriteEmbeddedFileData(fp, blob, embedded_variant_);
- WriteEmbeddedFileSuffix(fp, embedded_variant_);
-
- fclose(fp);
- }
-
- static void WriteEmbeddedFilePrefix(FILE* fp) {
- fprintf(fp, "// Autogenerated file. Do not edit.\n\n");
- fprintf(fp, "#include <cstdint>\n\n");
- fprintf(fp, "#include \"src/snapshot/macros.h\"\n\n");
- fprintf(fp, "namespace v8 {\n");
- fprintf(fp, "namespace internal {\n\n");
- fprintf(fp, "namespace {\n\n");
- }
-
- static void WriteEmbeddedFileSuffix(FILE* fp, const char* embedded_variant) {
- fprintf(fp, "} // namespace\n\n");
- fprintf(fp,
- "const uint8_t* %sEmbeddedBlob() { return "
- "v8_%s_embedded_blob_; }\n",
- embedded_variant, embedded_variant);
- fprintf(fp,
- "uint32_t %sEmbeddedBlobSize() { return "
- "v8_embedded_blob_size_; }\n\n",
- embedded_variant);
- fprintf(fp, "} // namespace internal\n");
- fprintf(fp, "} // namespace v8\n");
- }
-
- static void WriteEmbeddedFileData(FILE* fp, const i::EmbeddedData* blob,
- const char* embedded_variant) {
- fprintf(fp, "V8_EMBEDDED_TEXT_HEADER(v8_%s_embedded_blob_)\n",
- embedded_variant);
-#ifdef V8_OS_MACOSX
- // Note: On some platforms (observed on mac64), inserting labels into the
- // .byte stream causes the compiler to reorder symbols, invalidating stored
- // offsets.
- // We either need to avoid doing so, or stop relying on our own offset table
- // and directly reference symbols instead. But there is another complication
- // there since the chrome build process on mac verifies the order of symbols
- // present in the binary.
- // For now, the straight-forward solution seems to be to just emit a pure
- // .byte stream on OSX.
- WriteBinaryContentsAsInlineAssembly(fp, blob->data(), blob->size());
-#else
- WriteBinaryContentsAsInlineAssembly(fp, blob->data(),
- i::EmbeddedData::RawDataOffset());
- WriteBuiltins(fp, blob, embedded_variant);
-#endif
- fprintf(fp, "extern \"C\" const uint8_t v8_%s_embedded_blob_[];\n",
- embedded_variant);
- fprintf(fp, "static const uint32_t v8_embedded_blob_size_ = %d;\n\n",
- blob->size());
- }
-
- static void WriteBuiltins(FILE* fp, const i::EmbeddedData* blob,
- const char* embedded_variant) {
- const bool is_default_variant =
- std::strcmp(embedded_variant, "Default") == 0;
- for (int i = 0; i < i::Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
-
- // Labels created here will show up in backtraces. We check in
- // Isolate::SetEmbeddedBlob that the blob layout remains unchanged, i.e.
- // that labels do not insert bytes into the middle of the blob byte
- // stream.
- if (is_default_variant) {
- // Create nicer symbol names for the default mode.
- fprintf(fp, "__asm__(V8_ASM_LABEL(\"Builtins_%s\"));\n",
- i::Builtins::name(i));
- } else {
- fprintf(fp, "__asm__(V8_ASM_LABEL(\"%s_Builtins_%s\"));\n",
- embedded_variant, i::Builtins::name(i));
- }
-
- WriteBinaryContentsAsInlineAssembly(
- fp,
- reinterpret_cast<const uint8_t*>(blob->InstructionStartOfBuiltin(i)),
- blob->PaddedInstructionSizeOfBuiltin(i));
- }
- fprintf(fp, "\n");
- }
-
- static int WriteOcta(FILE* fp, int current_line_length, const uint8_t* data) {
- const uint64_t* quad_ptr1 = reinterpret_cast<const uint64_t*>(data);
- const uint64_t* quad_ptr2 = reinterpret_cast<const uint64_t*>(data + 8);
-
-#ifdef V8_TARGET_BIG_ENDIAN
- uint64_t part1 = *quad_ptr1;
- uint64_t part2 = *quad_ptr2;
-#else
- uint64_t part1 = *quad_ptr2;
- uint64_t part2 = *quad_ptr1;
-#endif // V8_TARGET_BIG_ENDIAN
-
- if (part1 != 0) {
- current_line_length +=
- fprintf(fp, "0x%" PRIx64 "%016" PRIx64, part1, part2);
- } else {
- current_line_length += fprintf(fp, "0x%" PRIx64, part2);
- }
- return current_line_length;
- }
-
- static int WriteDirectiveOrSeparator(FILE* fp, int current_line_length,
- const char* directive) {
- int printed_chars;
- if (current_line_length == 0) {
- printed_chars = fprintf(fp, " \"%s ", directive);
- DCHECK_LT(0, printed_chars);
- } else {
- printed_chars = fprintf(fp, ",");
- DCHECK_EQ(1, printed_chars);
- }
- return current_line_length + printed_chars;
- }
-
- static int WriteLineEndIfNeeded(FILE* fp, int current_line_length,
- int write_size) {
- static const int kTextWidth = 80;
- // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
- // the actual size of the string to be written to determine this so it's
- // more conservative than strictly needed.
- if (current_line_length + strlen(",0x\\n\"") + write_size * 2 >
- kTextWidth) {
- fprintf(fp, "\\n\"\n");
- return 0;
- } else {
- return current_line_length;
- }
- }
-
- static void WriteBinaryContentsAsInlineAssembly(FILE* fp, const uint8_t* data,
- uint32_t size) {
- int current_line_length = 0;
-
- fprintf(fp, "__asm__(\n");
- uint32_t i = 0;
- const uint32_t size_of_octa = 16;
- for (; i <= size - size_of_octa; i += size_of_octa) {
- current_line_length =
- WriteDirectiveOrSeparator(fp, current_line_length, ".octa");
- current_line_length = WriteOcta(fp, current_line_length, data + i);
- current_line_length =
- WriteLineEndIfNeeded(fp, current_line_length, size_of_octa);
- }
- if (current_line_length != 0) fprintf(fp, "\\n\"\n");
- current_line_length = 0;
- for (; i < size; i++) {
- current_line_length =
- WriteDirectiveOrSeparator(fp, current_line_length, ".byte");
- current_line_length += fprintf(fp, "0x%x", data[i]);
- current_line_length = WriteLineEndIfNeeded(fp, current_line_length, 1);
- }
- if (current_line_length != 0) fprintf(fp, "\\n\"\n");
- fprintf(fp, ");\n");
- }
-
static FILE* GetFileDescriptorOrDie(const char* filename) {
FILE* fp = v8::base::OS::FOpen(filename, "wb");
if (fp == nullptr) {
@@ -289,10 +113,8 @@ class SnapshotWriter {
return fp;
}
- const char* embedded_cpp_path_ = nullptr;
- const char* embedded_variant_ = "Default";
- const char* snapshot_cpp_path_;
- const char* snapshot_blob_path_;
+ const char* snapshot_cpp_path_ = nullptr;
+ const char* snapshot_blob_path_ = nullptr;
};
char* GetExtraCode(char* filename, const char* description) {
@@ -418,9 +240,7 @@ v8::StartupData WarmUpSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
return result;
}
-void WriteEmbeddedFile(v8::SnapshotCreator* creator, SnapshotWriter* writer) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(creator->GetIsolate());
- isolate->PrepareEmbeddedBlobForSerialization();
+void WriteEmbeddedFile(i::EmbeddedFileWriter* writer) {
i::EmbeddedData embedded_blob = i::EmbeddedData::FromBlob();
writer->WriteEmbedded(&embedded_blob);
}
@@ -449,20 +269,20 @@ int main(int argc, char** argv) {
v8::V8::Initialize();
{
- SnapshotWriter writer;
- if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
- if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- if (i::FLAG_embedded_builtins) {
- if (i::FLAG_embedded_src) writer.SetEmbeddedFile(i::FLAG_embedded_src);
- if (i::FLAG_embedded_variant)
- writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
- }
+ SnapshotFileWriter snapshot_writer;
+ snapshot_writer.SetSnapshotFile(i::FLAG_startup_src);
+ snapshot_writer.SetStartupBlobFile(i::FLAG_startup_blob);
+
+ i::EmbeddedFileWriter embedded_writer;
+ embedded_writer.SetEmbeddedFile(i::FLAG_embedded_src);
+ embedded_writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
std::unique_ptr<char> embed_script(
GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding"));
std::unique_ptr<char> warmup_script(
GetExtraCode(argc >= 3 ? argv[2] : nullptr, "warm up"));
+ i::DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::Isolate* isolate = v8::Isolate::Allocate();
@@ -476,14 +296,13 @@ int main(int argc, char** argv) {
: std::min(i::kMaximalCodeRangeSize / i::MB,
i::kMaxPCRelativeCodeRangeInMB);
i_isolate->heap()->ConfigureHeap(0, 0, code_range_size);
+ // The isolate contains data from builtin compilation that needs
+ // to be written out if builtins are embedded.
+ i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
}
v8::SnapshotCreator snapshot_creator(isolate);
if (i::FLAG_embedded_builtins) {
- // This process is a bit tricky since we might go on to make a second
- // snapshot if a warmup script is passed. In that case, create the first
- // snapshot without off-heap trampolines and only move code off-heap for
- // the warmed-up snapshot.
- if (!warmup_script) WriteEmbeddedFile(&snapshot_creator, &writer);
+ WriteEmbeddedFile(&embedded_writer);
}
blob = CreateSnapshotDataBlob(&snapshot_creator, embed_script.get());
}
@@ -492,17 +311,15 @@ int main(int argc, char** argv) {
CHECK(blob.raw_size > 0 && blob.data != nullptr);
v8::StartupData cold = blob;
v8::SnapshotCreator snapshot_creator(nullptr, &cold);
- if (i::FLAG_embedded_builtins) {
- WriteEmbeddedFile(&snapshot_creator, &writer);
- }
blob = WarmUpSnapshotDataBlob(&snapshot_creator, warmup_script.get());
delete[] cold.data;
}
CHECK(blob.data);
- writer.WriteSnapshot(blob);
+ snapshot_writer.WriteSnapshot(blob);
delete[] blob.data;
}
+ i::FreeCurrentEmbeddedBlob();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index e865498c7d..4cb7b5f0da 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -16,22 +16,9 @@ NativesExternalStringResource::NativesExternalStringResource(NativeType type,
: type_(type), index_(index) {
Vector<const char> source;
DCHECK_LE(0, index);
- switch (type_) {
- case CORE:
- DCHECK(index < Natives::GetBuiltinsCount());
- source = Natives::GetScriptSource(index);
- break;
- case EXTRAS:
- DCHECK(index < ExtraNatives::GetBuiltinsCount());
- source = ExtraNatives::GetScriptSource(index);
- break;
- case EXPERIMENTAL_EXTRAS:
- DCHECK(index < ExperimentalExtraNatives::GetBuiltinsCount());
- source = ExperimentalExtraNatives::GetScriptSource(index);
- break;
- default:
- UNREACHABLE();
- }
+ CHECK_EQ(EXTRAS, type_);
+ DCHECK(index < ExtraNatives::GetBuiltinsCount());
+ source = ExtraNatives::GetScriptSource(index);
data_ = source.start();
length_ = source.length();
}
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index ea2a9e6f84..c9177bbc99 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -33,7 +33,6 @@ class NativesStore {
}
int GetBuiltinsCount() { return static_cast<int>(native_ids_.size()); }
- int GetDebuggerCount() { return debugger_count_; }
Vector<const char> GetScriptSource(int index) {
return native_source_[index];
@@ -60,22 +59,17 @@ class NativesStore {
NativesStore* store = new NativesStore;
// We expect the libraries in the following format:
- // int: # of debugger sources.
+ // int: # of sources.
// 2N blobs: N pairs of source name + actual source.
- // then, repeat for non-debugger sources.
- int debugger_count = source->GetInt();
- for (int i = 0; i < debugger_count; ++i)
- store->ReadNameAndContentPair(source);
int library_count = source->GetInt();
for (int i = 0; i < library_count; ++i)
store->ReadNameAndContentPair(source);
- store->debugger_count_ = debugger_count;
return store;
}
private:
- NativesStore() : debugger_count_(0) {}
+ NativesStore() = default;
Vector<const char> NameFromId(const byte* id, int id_length) {
const char native[] = "native ";
@@ -103,7 +97,6 @@ class NativesStore {
std::vector<Vector<const char>> native_ids_;
std::vector<Vector<const char>> native_names_;
std::vector<Vector<const char>> native_source_;
- int debugger_count_;
DISALLOW_COPY_AND_ASSIGN(NativesStore);
};
@@ -140,12 +133,9 @@ static StartupData* natives_blob_ = nullptr;
* Read the Natives blob, as previously set by SetNativesFromFile.
*/
void ReadNatives() {
- if (natives_blob_ && NativesHolder<CORE>::empty()) {
+ if (natives_blob_ && NativesHolder<EXTRAS>::empty()) {
SnapshotByteSource bytes(natives_blob_->data, natives_blob_->raw_size);
- NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXTRAS>::set(NativesStore::MakeFromScriptsSource(&bytes));
- NativesHolder<EXPERIMENTAL_EXTRAS>::set(
- NativesStore::MakeFromScriptsSource(&bytes));
DCHECK(!bytes.HasMore());
}
}
@@ -170,9 +160,7 @@ void SetNativesFromFile(StartupData* natives_blob) {
* Release memory allocated by SetNativesFromFile.
*/
void DisposeNatives() {
- NativesHolder<CORE>::Dispose();
NativesHolder<EXTRAS>::Dispose();
- NativesHolder<EXPERIMENTAL_EXTRAS>::Dispose();
}
@@ -188,11 +176,6 @@ int NativesCollection<type>::GetBuiltinsCount() {
}
template<NativeType type>
-int NativesCollection<type>::GetDebuggerCount() {
- return NativesHolder<type>::get()->GetDebuggerCount();
-}
-
-template<NativeType type>
int NativesCollection<type>::GetIndex(const char* name) {
return NativesHolder<type>::get()->GetIndex(name);
}
@@ -216,14 +199,11 @@ Vector<const char> NativesCollection<type>::GetScriptsSource() {
// Explicit template instantiations.
#define INSTANTIATE_TEMPLATES(T) \
template int NativesCollection<T>::GetBuiltinsCount(); \
- template int NativesCollection<T>::GetDebuggerCount(); \
template int NativesCollection<T>::GetIndex(const char* name); \
template Vector<const char> NativesCollection<T>::GetScriptSource(int i); \
template Vector<const char> NativesCollection<T>::GetScriptName(int i); \
template Vector<const char> NativesCollection<T>::GetScriptsSource();
-INSTANTIATE_TEMPLATES(CORE)
INSTANTIATE_TEMPLATES(EXTRAS)
-INSTANTIATE_TEMPLATES(EXPERIMENTAL_EXTRAS)
#undef INSTANTIATE_TEMPLATES
} // namespace internal
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index 79f7db3922..a5701979f4 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -15,10 +15,7 @@ namespace v8 {
namespace internal {
enum NativeType {
- CORE,
EXTRAS,
- EXPERIMENTAL_EXTRAS,
- D8,
TEST
};
@@ -36,22 +33,13 @@ class V8_EXPORT_PRIVATE NativesCollection {
// Number of built-in scripts.
static int GetBuiltinsCount();
- // Number of debugger implementation scripts.
- static int GetDebuggerCount();
-
- // These are used to access built-in scripts. The debugger implementation
- // scripts have an index in the interval [0, GetDebuggerCount()). The
- // non-debugger scripts have an index in the interval [GetDebuggerCount(),
- // GetNativesCount()).
static int GetIndex(const char* name);
static Vector<const char> GetScriptSource(int index);
static Vector<const char> GetScriptName(int index);
static Vector<const char> GetScriptsSource();
};
-typedef NativesCollection<CORE> Natives;
typedef NativesCollection<EXTRAS> ExtraNatives;
-typedef NativesCollection<EXPERIMENTAL_EXTRAS> ExperimentalExtraNatives;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
@@ -70,19 +58,20 @@ class NativesExternalStringResource final
size_t length() const override { return length_; }
v8::String::ExternalOneByteStringResource* EncodeForSerialization() const {
- DCHECK(type_ == CORE || type_ == EXTRAS);
- intptr_t val = (index_ << 1) | ((type_ == CORE) ? 0 : 1);
- val = val << kPointerSizeLog2; // Pointer align.
+ DCHECK(type_ == EXTRAS);
+ intptr_t val = (index_ << 1) | 1;
+ val = val << kSystemPointerSizeLog2; // Pointer align.
return reinterpret_cast<v8::String::ExternalOneByteStringResource*>(val);
}
// Decode from serialization.
static NativesExternalStringResource* DecodeForDeserialization(
const v8::String::ExternalOneByteStringResource* encoded) {
- intptr_t val = reinterpret_cast<intptr_t>(encoded) >> kPointerSizeLog2;
- NativeType type = (val & 1) ? EXTRAS : CORE;
+ intptr_t val =
+ reinterpret_cast<intptr_t>(encoded) >> kSystemPointerSizeLog2;
+ DCHECK(val & 1);
int index = static_cast<int>(val >> 1);
- return new NativesExternalStringResource(type, index);
+ return new NativesExternalStringResource(EXTRAS, index);
}
private:
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 8935c0ef89..4d6e736223 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -5,14 +5,17 @@
#include "src/snapshot/object-deserializer.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/slots.h"
#include "src/snapshot/code-serializer.h"
namespace v8 {
namespace internal {
+ObjectDeserializer::ObjectDeserializer(const SerializedCodeData* data)
+ : Deserializer(data, true) {}
+
MaybeHandle<SharedFunctionInfo>
ObjectDeserializer::DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source) {
@@ -20,12 +23,6 @@ ObjectDeserializer::DeserializeSharedFunctionInfo(
d.AddAttachedObject(source);
- Vector<const uint32_t> code_stub_keys = data->CodeStubKeys();
- for (int i = 0; i < code_stub_keys.length(); i++) {
- d.AddAttachedObject(
- CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked());
- }
-
Handle<HeapObject> result;
return d.Deserialize(isolate).ToHandle(&result)
? Handle<SharedFunctionInfo>::cast(result)
@@ -42,10 +39,13 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Handle<HeapObject> result;
{
DisallowHeapAllocation no_gc;
- Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
+ Object root;
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ FullObjectSlot(&root));
DeserializeDeferredObjects();
- FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
+ FlushICache();
+ LinkAllocationSites();
+ LogNewMapEvents();
result = handle(HeapObject::cast(root), isolate);
Rehash();
allocator()->RegisterDeserializedObjectsForBlackAllocation();
@@ -54,10 +54,9 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
return scope.CloseAndEscape(result);
}
-void ObjectDeserializer::
- FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects() {
+void ObjectDeserializer::FlushICache() {
DCHECK(deserializing_user_code());
- for (Code* code : new_code_objects()) {
+ for (Code code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
WriteBarrierForCode(code);
Assembler::FlushICache(code->raw_instruction_start(),
@@ -72,7 +71,8 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
for (Handle<String> string : new_internalized_strings()) {
DisallowHeapAllocation no_gc;
StringTableInsertionKey key(*string);
- DCHECK_NULL(StringTable::ForwardStringIfExists(isolate(), &key, *string));
+ DCHECK(
+ StringTable::ForwardStringIfExists(isolate(), &key, *string).is_null());
StringTable::AddKeyNoResize(isolate(), &key);
}
@@ -81,19 +81,21 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
for (Handle<Script> script : new_scripts()) {
// Assign a new script id to avoid collision.
script->set_id(isolate()->heap()->NextScriptId());
- LOG(isolate(),
- ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
- LOG(isolate(), ScriptDetails(*script));
+ LogScriptEvents(*script);
// Add script to list.
Handle<WeakArrayList> list = factory->script_list();
list = WeakArrayList::AddToEnd(isolate(), list,
MaybeObjectHandle::Weak(script));
heap->SetRootScriptList(*list);
}
+}
+void ObjectDeserializer::LinkAllocationSites() {
+ DisallowHeapAllocation no_gc;
+ Heap* heap = isolate()->heap();
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
- for (AllocationSite* site : new_allocation_sites()) {
+ for (AllocationSite site : new_allocation_sites()) {
if (!site->HasWeakNext()) continue;
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index 1e8bf1b649..ad7fecb021 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -14,19 +14,19 @@ class SerializedCodeData;
class SharedFunctionInfo;
// Deserializes the object graph rooted at a given object.
-class ObjectDeserializer final : public Deserializer<> {
+class ObjectDeserializer final : public Deserializer {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
private:
- explicit ObjectDeserializer(const SerializedCodeData* data)
- : Deserializer(data, true) {}
+ explicit ObjectDeserializer(const SerializedCodeData* data);
// Deserialize an object graph. Fail gracefully.
MaybeHandle<HeapObject> Deserialize(Isolate* isolate);
- void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
+ void FlushICache();
+ void LinkAllocationSites();
void CommitPostProcessedObjects();
};
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index a772594636..769b46e468 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -5,7 +5,7 @@
#include "src/snapshot/partial-deserializer.h"
#include "src/api-inl.h"
-#include "src/heap/heap-inl.h"
+#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -41,8 +41,8 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
// code objects were unserialized
CodeSpace* code_space = isolate->heap()->code_space();
Address start_address = code_space->top();
- Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
+ Object root;
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, FullObjectSlot(&root));
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
@@ -54,6 +54,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
CHECK_EQ(start_address, code_space->top());
if (FLAG_rehash_snapshot && can_rehash()) Rehash();
+ LogNewMapEvents();
return Handle<Object>(root, isolate);
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.h b/deps/v8/src/snapshot/partial-deserializer.h
index bbc55b7b51..d4362f5f23 100644
--- a/deps/v8/src/snapshot/partial-deserializer.h
+++ b/deps/v8/src/snapshot/partial-deserializer.h
@@ -15,7 +15,7 @@ class Context;
// Deserializes the context-dependent object graph rooted at a given object.
// The PartialDeserializer is not expected to deserialize any code objects.
-class PartialDeserializer final : public Deserializer<> {
+class PartialDeserializer final : public Deserializer {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 1f3cbc5521..a3b4f04161 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -7,7 +7,9 @@
#include "src/api-inl.h"
#include "src/math-random.h"
+#include "src/microtask-queue.h"
#include "src/objects-inl.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -18,8 +20,7 @@ PartialSerializer::PartialSerializer(
: Serializer(isolate),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
- can_be_rehashed_(true),
- context_(nullptr) {
+ can_be_rehashed_(true) {
InitializeCodeAddressMap();
allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
@@ -28,10 +29,11 @@ PartialSerializer::~PartialSerializer() {
OutputStatistics("PartialSerializer");
}
-void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
+void PartialSerializer::Serialize(Context* o, bool include_global_proxy) {
context_ = *o;
DCHECK(context_->IsNativeContext());
- reference_map()->AddAttachedReference(context_->global_proxy());
+ reference_map()->AddAttachedReference(
+ reinterpret_cast<void*>(context_->global_proxy()->ptr()));
// The bootstrap snapshot has a code-stub context. When serializing the
// partial snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
@@ -43,37 +45,47 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
// Reset math random cache to get fresh random numbers.
MathRandom::ResetContext(context_);
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- reinterpret_cast<Object**>(o));
+#ifdef DEBUG
+ MicrotaskQueue* microtask_queue =
+ context_->native_context()->microtask_queue();
+ DCHECK_EQ(0, microtask_queue->size());
+ DCHECK(!microtask_queue->HasMicrotasksSuppressions());
+ DCHECK_EQ(0, microtask_queue->GetMicrotasksScopeDepth());
+ DCHECK(microtask_queue->DebugMicrotasksScopeDepthIsZero());
+#endif
+ context_->native_context()->set_microtask_queue(nullptr);
+
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, FullObjectSlot(o));
SerializeDeferredObjects();
- SerializeEmbedderFields();
+
+ // Add section for embedder-serialized embedder fields.
+ if (!embedder_fields_sink_.data()->empty()) {
+ sink_.Put(kEmbedderFieldsData, "embedder fields data");
+ sink_.Append(embedder_fields_sink_);
+ sink_.Put(kSynchronize, "Finished with embedder fields data");
+ }
+
Pad();
}
-void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+void PartialSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
- if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
- return;
- }
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
- RootIndex root_index;
- if (root_index_map()->Lookup(obj, &root_index)) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
+ if (SerializeRoot(obj, how_to_code, where_to_point, skip)) return;
if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
- if (ShouldBeInThePartialSnapshotCache(obj)) {
- FlushSkip(skip);
+ if (startup_serializer_->SerializeUsingReadOnlyObjectCache(
+ &sink_, obj, how_to_code, where_to_point, skip)) {
+ return;
+ }
- int cache_index = startup_serializer_->PartialSnapshotCacheIndex(obj);
- sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_.PutInt(cache_index, "partial_snapshot_cache_index");
+ if (ShouldBeInThePartialSnapshotCache(obj)) {
+ startup_serializer_->SerializeUsingPartialSnapshotCache(
+ &sink_, obj, how_to_code, where_to_point, skip);
return;
}
@@ -94,18 +106,15 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Clear literal boilerplates and feedback.
if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
- if (obj->IsJSObject()) {
- JSObject* jsobj = JSObject::cast(obj);
- if (jsobj->GetEmbedderFieldCount() > 0) {
- DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
- embedder_field_holders_.push_back(jsobj);
- }
+ if (SerializeJSObjectWithEmbedderFields(obj, how_to_code, where_to_point)) {
+ return;
}
if (obj->IsJSFunction()) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
- JSFunction* closure = JSFunction::cast(obj);
+ JSFunction closure = JSFunction::cast(obj);
+ closure->ResetIfBytecodeFlushed();
if (closure->is_compiled()) closure->set_code(closure->shared()->GetCode());
}
@@ -116,7 +125,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
serializer.Serialize();
}
-bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject o) {
// Scripts should be referred only through shared function infos. We can't
// allow them to be part of the partial snapshot because they contain a
// unique ID, and deserializing several partial snapshots containing script
@@ -129,38 +138,97 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
.fixed_cow_array_map();
}
-void PartialSerializer::SerializeEmbedderFields() {
- if (embedder_field_holders_.empty()) return;
+namespace {
+bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
+} // anonymous namespace
+
+bool PartialSerializer::SerializeJSObjectWithEmbedderFields(
+ Object obj, HowToCode how_to_code, WhereToPoint where_to_point) {
+ if (!obj->IsJSObject()) return false;
+ JSObject js_obj = JSObject::cast(obj);
+ int embedder_fields_count = js_obj->GetEmbedderFieldCount();
+ if (embedder_fields_count == 0) return false;
+ CHECK_GT(embedder_fields_count, 0);
+ DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
+ DCHECK(!js_obj->NeedsRehashing());
+
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
- DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
- sink_.Put(kEmbedderFieldsData, "embedder fields data");
- while (!embedder_field_holders_.empty()) {
- HandleScope scope(isolate());
- Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
- embedder_field_holders_.pop_back();
- SerializerReference reference = reference_map()->LookupReference(*obj);
- DCHECK(reference.is_back_reference());
- int embedder_fields_count = obj->GetEmbedderFieldCount();
- for (int i = 0; i < embedder_fields_count; i++) {
- if (obj->GetEmbedderField(i)->IsHeapObject()) continue;
+ HandleScope scope(isolate());
+ Handle<JSObject> obj_handle(js_obj, isolate());
+ v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(obj_handle);
+
+ std::vector<EmbedderDataSlot::RawData> original_embedder_values;
+ std::vector<StartupData> serialized_data;
+
+ // 1) Iterate embedder fields. Hold onto the original value of the fields.
+ // Ignore references to heap objects since these are to be handled by the
+ // serializer. For aligned pointers, call the serialize callback. Hold
+ // onto the result.
+ for (int i = 0; i < embedder_fields_count; i++) {
+ EmbedderDataSlot embedder_data_slot(js_obj, i);
+ original_embedder_values.emplace_back(embedder_data_slot.load_raw(no_gc));
+ Object object = embedder_data_slot.load_tagged();
+ if (object->IsHeapObject()) {
+ DCHECK(isolate()->heap()->Contains(HeapObject::cast(object)));
+ serialized_data.push_back({nullptr, 0});
+ } else {
StartupData data = serialize_embedder_fields_.callback(
- v8::Utils::ToLocal(obj), i, serialize_embedder_fields_.data);
- sink_.Put(kNewObject + reference.space(), "embedder field holder");
- PutBackReference(*obj, reference);
- sink_.PutInt(i, "embedder field index");
- sink_.PutInt(data.raw_size, "embedder fields data size");
- sink_.PutRaw(reinterpret_cast<const byte*>(data.data), data.raw_size,
- "embedder fields data");
- delete[] data.data;
+ api_obj, i, serialize_embedder_fields_.data);
+ serialized_data.push_back(data);
}
}
- sink_.Put(kSynchronize, "Finished with embedder fields data");
+
+ // 2) Embedder fields for which the embedder callback produced non-zero
+ // serialized data should be considered aligned pointers to objects owned
+ // by the embedder. Clear these memory addresses to avoid non-determism
+ // in the snapshot. This is done separately to step 1 to no not interleave
+ // with embedder callbacks.
+ for (int i = 0; i < embedder_fields_count; i++) {
+ if (!DataIsEmpty(serialized_data[i])) {
+ EmbedderDataSlot(js_obj, i).store_raw({kNullAddress}, no_gc);
+ }
+ }
+
+ // 3) Serialize the object. References from embedder fields to heap objects or
+ // smis are serialized regularly.
+ ObjectSerializer(this, js_obj, &sink_, how_to_code, where_to_point)
+ .Serialize();
+
+ // 4) Obtain back reference for the serialized object.
+ SerializerReference reference =
+ reference_map()->LookupReference(reinterpret_cast<void*>(js_obj->ptr()));
+ DCHECK(reference.is_back_reference());
+
+ // 5) Write data returned by the embedder callbacks into a separate sink,
+ // headed by the back reference. Restore the original embedder fields.
+ for (int i = 0; i < embedder_fields_count; i++) {
+ StartupData data = serialized_data[i];
+ if (DataIsEmpty(data)) continue;
+ // Restore original values from cleared fields.
+ EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc);
+ embedder_fields_sink_.Put(kNewObject + reference.space(),
+ "embedder field holder");
+ embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
+ embedder_fields_sink_.PutInt(reference.chunk_offset(),
+ "BackRefChunkOffset");
+ embedder_fields_sink_.PutInt(i, "embedder field index");
+ embedder_fields_sink_.PutInt(data.raw_size, "embedder fields data size");
+ embedder_fields_sink_.PutRaw(reinterpret_cast<const byte*>(data.data),
+ data.raw_size, "embedder fields data");
+ delete[] data.data;
+ }
+
+ // 6) The content of the separate sink is appended eventually to the default
+ // sink. The ensures that during deserialization, we call the deserializer
+ // callback at the end, and can guarantee that the deserialized objects are
+ // in a consistent state. See PartialSerializer::Serialize.
+ return true;
}
-void PartialSerializer::CheckRehashability(HeapObject* obj) {
+void PartialSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
if (!obj->NeedsRehashing()) return;
if (obj->CanBeRehashed()) return;
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 3225b004cb..dca0588a90 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
#include "src/address-map.h"
+#include "src/contexts.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -13,7 +14,7 @@ namespace internal {
class StartupSerializer;
-class PartialSerializer : public Serializer<> {
+class PartialSerializer : public Serializer {
public:
PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);
@@ -21,27 +22,30 @@ class PartialSerializer : public Serializer<> {
~PartialSerializer() override;
// Serialize the objects reachable from a single object pointer.
- void Serialize(Context** o, bool include_global_proxy);
+ void Serialize(Context* o, bool include_global_proxy);
bool can_be_rehashed() const { return can_be_rehashed_; }
private:
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ void SerializeObject(HeapObject o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
- bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
+ bool ShouldBeInThePartialSnapshotCache(HeapObject o);
- void SerializeEmbedderFields();
+ bool SerializeJSObjectWithEmbedderFields(Object obj, HowToCode how_to_code,
+ WhereToPoint where_to_point);
- void CheckRehashability(HeapObject* obj);
+ void CheckRehashability(HeapObject obj);
StartupSerializer* startup_serializer_;
- std::vector<JSObject*> embedder_field_holders_;
v8::SerializeEmbedderFieldsCallback serialize_embedder_fields_;
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
- Context* context_;
+ Context context_;
+
+ // Used to store serialized data for embedder fields.
+ SnapshotByteSink embedder_fields_sink_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
new file mode 100644
index 0000000000..57b1f1dbcb
--- /dev/null
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -0,0 +1,60 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/read-only-deserializer.h"
+
+#include "src/api.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/objects/slots.h"
+#include "src/snapshot/snapshot.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
+ Initialize(isolate);
+
+ if (!allocator()->ReserveSpace()) {
+ V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
+ }
+
+ // No active threads.
+ DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ // No active handles.
+ DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ // Partial snapshot cache is not yet populated.
+ DCHECK(isolate->read_only_object_cache()->empty());
+ DCHECK(isolate->partial_snapshot_cache()->empty());
+ // Builtins are not yet created.
+ DCHECK(!isolate->builtins()->is_initialized());
+
+ {
+ DisallowHeapAllocation no_gc;
+
+ ReadOnlyRoots(isolate).Iterate(this);
+ isolate->heap()->read_only_space()->RepairFreeListsAfterDeserialization();
+
+ // Deserialize the Read-only Object Cache.
+ std::vector<Object>* cache = isolate->read_only_object_cache();
+ for (size_t i = 0;; ++i) {
+ // Extend the array ready to get a value when deserializing.
+ if (cache->size() <= i) cache->push_back(Smi::kZero);
+ // During deserialization, the visitor populates the read-only object
+ // cache and eventually terminates the cache with undefined.
+ VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
+ FullObjectSlot(&cache->at(i)));
+ if (cache->at(i)->IsUndefined(isolate)) break;
+ }
+ DeserializeDeferredObjects();
+ }
+}
+
+void ReadOnlyDeserializer::RehashHeap() {
+ DCHECK(FLAG_rehash_snapshot && can_rehash());
+ Rehash();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
new file mode 100644
index 0000000000..25b6c29802
--- /dev/null
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
+#define V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
+
+#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+// Deserializes the read-only blob, creating the read-only roots and the
+// Read-only object cache used by the other deserializers.
+class ReadOnlyDeserializer final : public Deserializer {
+ public:
+ explicit ReadOnlyDeserializer(const SnapshotData* data)
+ : Deserializer(data, false) {}
+
+ // Deserialize the snapshot into an empty heap.
+ void DeserializeInto(Isolate* isolate);
+
+ private:
+ friend class StartupDeserializer;
+
+ // Rehash after deserializing.
+ void RehashHeap();
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
new file mode 100644
index 0000000000..ba20ec8d64
--- /dev/null
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/read-only-serializer.h"
+
+#include "src/api.h"
+#include "src/code-tracer.h"
+#include "src/global-handles.h"
+#include "src/objects-inl.h"
+#include "src/objects/slots.h"
+#include "src/snapshot/startup-serializer.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate)
+ : RootsSerializer(isolate, RootIndex::kFirstReadOnlyRoot) {
+ STATIC_ASSERT(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
+}
+
+ReadOnlySerializer::~ReadOnlySerializer() {
+ OutputStatistics("ReadOnlySerializer");
+}
+
+void ReadOnlySerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip) {
+ CHECK(isolate()->heap()->read_only_space()->Contains(obj));
+ CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
+
+ if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+ if (IsRootAndHasBeenSerialized(obj) &&
+ SerializeRoot(obj, how_to_code, where_to_point, skip)) {
+ return;
+ }
+ if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+
+ FlushSkip(skip);
+
+ CheckRehashability(obj);
+
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
+ where_to_point);
+ object_serializer.Serialize();
+}
+
+void ReadOnlySerializer::SerializeReadOnlyRoots() {
+ // No active threads.
+ CHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
+ // No active or weak handles.
+ CHECK(isolate()->handle_scope_implementer()->blocks()->empty());
+
+ ReadOnlyRoots(isolate()).Iterate(this);
+}
+
+void ReadOnlySerializer::FinalizeSerialization() {
+ // This comes right after serialization of the other snapshots, where we
+ // add entries to the read-only object cache. Add one entry with 'undefined'
+ // to terminate the read-only object cache.
+ Object undefined = ReadOnlyRoots(isolate()).undefined_value();
+ VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
+ FullObjectSlot(&undefined));
+ SerializeDeferredObjects();
+ Pad();
+}
+
+bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
+ if (root_has_been_serialized(RootIndex::kFreeSpaceMap) &&
+ root_has_been_serialized(RootIndex::kOnePointerFillerMap) &&
+ root_has_been_serialized(RootIndex::kTwoPointerFillerMap)) {
+ // All required root objects are serialized, so any aligned objects can
+ // be saved without problems.
+ return false;
+ }
+ // Just defer everything except for Map objects until all required roots are
+ // serialized. Some objects may have special alignment requirements, that may
+ // not be fulfilled during deserialization until few first root objects are
+ // serialized. But we must serialize Map objects since deserializer checks
+ // that these root objects are indeed Maps.
+ return !object->IsMap();
+}
+
+bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
+ SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ if (!isolate()->heap()->read_only_space()->Contains(obj)) return false;
+
+ // Get the cache index and serialize it into the read-only snapshot if
+ // necessary.
+ int cache_index = SerializeInObjectCache(obj);
+
+ // Writing out the cache entry into the calling serializer's sink.
+ FlushSkip(sink, skip);
+ sink->Put(kReadOnlyObjectCache + how_to_code + where_to_point,
+ "ReadOnlyObjectCache");
+ sink->PutInt(cache_index, "read_only_object_cache_index");
+
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
new file mode 100644
index 0000000000..23259f4cc2
--- /dev/null
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_READ_ONLY_SERIALIZER_H_
+#define V8_SNAPSHOT_READ_ONLY_SERIALIZER_H_
+
+#include "src/snapshot/roots-serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject;
+class SnapshotByteSink;
+
+class ReadOnlySerializer : public RootsSerializer {
+ public:
+ explicit ReadOnlySerializer(Isolate* isolate);
+ ~ReadOnlySerializer() override;
+
+ void SerializeReadOnlyRoots();
+
+ // Completes the serialization of the read-only object cache and serializes
+ // any deferred objects.
+ void FinalizeSerialization();
+
+ // If |obj| can be serialized in the read-only snapshot then add it to the
+ // read-only object cache if not already present and emit a
+ // ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
+ // successful.
+ bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink, HeapObject obj,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
+ private:
+ void SerializeObject(HeapObject o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+ bool MustBeDeferred(HeapObject object) override;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlySerializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_READ_ONLY_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index 6beb2065c1..40f1d60345 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -156,7 +156,7 @@ class SerializerReferenceMap
public:
typedef base::TemplateHashMapEntry<uintptr_t, SerializerReference> Entry;
- SerializerReferenceMap() : no_allocation_(), attached_reference_index_(0) {}
+ SerializerReferenceMap() : attached_reference_index_(0) {}
SerializerReference LookupReference(void* value) const {
uintptr_t key = Key(value);
@@ -186,7 +186,7 @@ class SerializerReferenceMap
static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
- DisallowHeapAllocation no_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_allocation_);
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
new file mode 100644
index 0000000000..14e0e46c51
--- /dev/null
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -0,0 +1,67 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/roots-serializer.h"
+
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/slots.h"
+
+namespace v8 {
+namespace internal {
+
+RootsSerializer::RootsSerializer(Isolate* isolate,
+ RootIndex first_root_to_be_serialized)
+ : Serializer(isolate),
+ first_root_to_be_serialized_(first_root_to_be_serialized),
+ can_be_rehashed_(true) {
+ for (size_t i = 0; i < static_cast<size_t>(first_root_to_be_serialized);
+ ++i) {
+ root_has_been_serialized_[i] = true;
+ }
+}
+
+int RootsSerializer::SerializeInObjectCache(HeapObject heap_object) {
+ int index;
+ if (!object_cache_index_map_.LookupOrInsert(heap_object, &index)) {
+ // This object is not part of the object cache yet. Add it to the cache so
+ // we can refer to it via cache index from the delegating snapshot.
+ SerializeObject(heap_object, kPlain, kStartOfObject, 0);
+ }
+ return index;
+}
+
+void RootsSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+ sink_.Put(kSynchronize, "Synchronize");
+}
+
+void RootsSerializer::VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ RootsTable& roots_table = isolate()->heap()->roots_table();
+ if (start ==
+ roots_table.begin() + static_cast<int>(first_root_to_be_serialized_)) {
+ // Serializing the root list needs special handling:
+ // - Only root list elements that have been fully serialized can be
+ // referenced using kRootArray bytecodes.
+ for (FullObjectSlot current = start; current < end; ++current) {
+ SerializeRootObject(*current);
+ size_t root_index = current - roots_table.begin();
+ root_has_been_serialized_.set(root_index);
+ }
+ } else {
+ Serializer::VisitRootPointers(root, description, start, end);
+ }
+}
+
+void RootsSerializer::CheckRehashability(HeapObject obj) {
+ if (!can_be_rehashed_) return;
+ if (!obj->NeedsRehashing()) return;
+ if (obj->CanBeRehashed()) return;
+ can_be_rehashed_ = false;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
new file mode 100644
index 0000000000..50c63402d2
--- /dev/null
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_ROOTS_SERIALIZER_H_
+#define V8_SNAPSHOT_ROOTS_SERIALIZER_H_
+
+#include <bitset>
+
+#include "src/snapshot/serializer.h"
+#include "src/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject;
+class Object;
+class Isolate;
+enum class RootIndex : uint16_t;
+
+// Base class for serializer that iterate over roots. Also maintains a cache
+// that can be used to share non-root objects with other serializers.
+class RootsSerializer : public Serializer {
+ public:
+ // The serializer expects that all roots before |first_root_to_be_serialized|
+ // are already serialized.
+ RootsSerializer(Isolate* isolate, RootIndex first_root_to_be_serialized);
+
+ bool can_be_rehashed() const { return can_be_rehashed_; }
+ bool root_has_been_serialized(RootIndex root_index) const {
+ return root_has_been_serialized_.test(static_cast<size_t>(root_index));
+ }
+
+ bool IsRootAndHasBeenSerialized(HeapObject obj) const {
+ RootIndex root_index;
+ return root_index_map()->Lookup(obj, &root_index) &&
+ root_has_been_serialized(root_index);
+ }
+
+ protected:
+ void CheckRehashability(HeapObject obj);
+
+ // Serializes |object| if not previously seen and returns its cache index.
+ int SerializeInObjectCache(HeapObject object);
+
+ private:
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
+ void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
+ const RootIndex first_root_to_be_serialized_;
+ std::bitset<RootsTable::kEntriesCount> root_has_been_serialized_;
+ ObjectCacheIndexMap object_cache_index_map_;
+ // Indicates whether we only serialized hash tables that we can rehash.
+ // TODO(yangguo): generalize rehashing, and remove this flag.
+ bool can_be_rehashed_;
+
+ DISALLOW_COPY_AND_ASSIGN(RootsSerializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_ROOTS_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
index d0efc2bd65..e69441dc14 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/serializer-allocator.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/default-serializer-allocator.h"
+#include "src/snapshot/serializer-allocator.h"
-#include "src/heap/heap-inl.h"
+#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/snapshot/references.h"
#include "src/snapshot/serializer.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -12,36 +12,38 @@
namespace v8 {
namespace internal {
-DefaultSerializerAllocator::DefaultSerializerAllocator(
- Serializer<DefaultSerializerAllocator>* serializer)
+SerializerAllocator::SerializerAllocator(Serializer* serializer)
: serializer_(serializer) {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
pending_chunk_[i] = 0;
}
}
-void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
+void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
custom_chunk_size_ = chunk_size;
}
static uint32_t PageSizeOfSpace(int space) {
return static_cast<uint32_t>(
- MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ static_cast<AllocationSpace>(space)));
}
-uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) {
+uint32_t SerializerAllocator::TargetChunkSize(int space) {
if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
return custom_chunk_size_;
}
-SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
- uint32_t size) {
+SerializerReference SerializerAllocator::Allocate(AllocationSpace space,
+ uint32_t size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
DCHECK(size > 0 && size <= PageSizeOfSpace(space));
// Maps are allocated through AllocateMap.
DCHECK_NE(MAP_SPACE, space);
+ // We tenure large object allocations.
+ DCHECK_NE(NEW_LO_SPACE, space);
uint32_t old_chunk_size = pending_chunk_[space];
uint32_t new_chunk_size = old_chunk_size + size;
@@ -59,27 +61,26 @@ SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}
-SerializerReference DefaultSerializerAllocator::AllocateMap() {
+SerializerReference SerializerAllocator::AllocateMap() {
// Maps are allocated one-by-one when deserializing.
return SerializerReference::MapReference(num_maps_++);
}
-SerializerReference DefaultSerializerAllocator::AllocateLargeObject(
- uint32_t size) {
+SerializerReference SerializerAllocator::AllocateLargeObject(uint32_t size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
large_objects_total_size_ += size;
return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
}
-SerializerReference DefaultSerializerAllocator::AllocateOffHeapBackingStore() {
+SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
DCHECK_NE(0, seen_backing_stores_index_);
return SerializerReference::OffHeapBackingStoreReference(
seen_backing_stores_index_++);
}
#ifdef DEBUG
-bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
+bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
@@ -105,7 +106,7 @@ bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
#endif
std::vector<SerializedData::Reservation>
-DefaultSerializerAllocator::EncodeReservations() const {
+SerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
@@ -130,7 +131,7 @@ DefaultSerializerAllocator::EncodeReservations() const {
return out;
}
-void DefaultSerializerAllocator::OutputStatistics() {
+void SerializerAllocator::OutputStatistics() {
DCHECK(FLAG_serialization_statistics);
PrintF(" Spaces (bytes):\n");
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
index e410eab565..0ca968f0fe 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.h
+++ b/deps/v8/src/snapshot/serializer-allocator.h
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
+#ifndef V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
+#define V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
-template <class AllocatorT>
class Serializer;
-class DefaultSerializerAllocator final {
+class SerializerAllocator final {
public:
- DefaultSerializerAllocator(
- Serializer<DefaultSerializerAllocator>* serializer);
+ explicit SerializerAllocator(Serializer* serializer);
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference AllocateMap();
@@ -68,12 +66,12 @@ class DefaultSerializerAllocator final {
uint32_t custom_chunk_size_ = 0;
// The current serializer.
- Serializer<DefaultSerializerAllocator>* const serializer_;
+ Serializer* const serializer_;
- DISALLOW_COPY_AND_ASSIGN(DefaultSerializerAllocator)
+ DISALLOW_COPY_AND_ASSIGN(SerializerAllocator);
};
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_DEFAULT_SERIALIZER_ALLOCATOR_H_
+#endif // V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index d5a8cf1273..fa8d19e438 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -6,6 +6,8 @@
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -22,8 +24,8 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
map_ = new AddressToIndexHashMap();
isolate->set_external_reference_map(map_);
// Add V8's external references.
- ExternalReferenceTable* table = isolate->heap()->external_reference_table();
- for (uint32_t i = 0; i < table->size(); ++i) {
+ ExternalReferenceTable* table = isolate->external_reference_table();
+ for (uint32_t i = 0; i < ExternalReferenceTable::kSize; ++i) {
Address addr = table->address(i);
// Ignore duplicate references.
// This can happen due to ICF. See http://crbug.com/726896.
@@ -89,7 +91,7 @@ const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
if (maybe_index.IsNothing()) return "<unknown>";
Value value(maybe_index.FromJust());
if (value.is_from_api()) return "<from api>";
- return isolate->heap()->external_reference_table()->name(value.index());
+ return isolate->external_reference_table()->name(value.index());
}
void SerializedData::AllocateData(uint32_t size) {
@@ -101,9 +103,7 @@ void SerializedData::AllocateData(uint32_t size) {
}
// static
-uint32_t SerializedData::ComputeMagicNumber(Isolate* isolate) {
- return ComputeMagicNumber(isolate->heap()->external_reference_table());
-}
+constexpr uint32_t SerializedData::kMagicNumber;
// The partial snapshot cache is terminated by undefined. We visit the
// partial snapshot...
@@ -112,34 +112,34 @@ uint32_t SerializedData::ComputeMagicNumber(Isolate* isolate) {
// - not during serialization. The partial serializer adds to it explicitly.
DISABLE_CFI_PERF
void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
- std::vector<Object*>* cache = isolate->partial_snapshot_cache();
+ std::vector<Object>* cache = isolate->partial_snapshot_cache();
for (size_t i = 0;; ++i) {
// Extend the array ready to get a value when deserializing.
if (cache->size() <= i) cache->push_back(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
visitor->VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- &cache->at(i));
+ FullObjectSlot(&cache->at(i)));
if (cache->at(i)->IsUndefined(isolate)) break;
}
}
-bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
+bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
return !o->IsString() && !o->IsScript() && !o->IsJSTypedArray();
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- const std::vector<AccessorInfo*>& accessor_infos) {
+ const std::vector<AccessorInfo>& accessor_infos) {
// Restore wiped accessor infos.
- for (AccessorInfo* info : accessor_infos) {
+ for (AccessorInfo info : accessor_infos) {
Foreign::cast(info->js_getter())
->set_foreign_address(info->redirected_getter());
}
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- const std::vector<CallHandlerInfo*>& call_handler_infos) {
- for (CallHandlerInfo* info : call_handler_infos) {
+ const std::vector<CallHandlerInfo>& call_handler_infos) {
+ for (CallHandlerInfo info : call_handler_infos) {
Foreign::cast(info->js_callback())
->set_foreign_address(info->redirected_callback());
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 8f547243d6..ec24c7831d 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -60,25 +60,23 @@ class ExternalReferenceEncoder {
class HotObjectsList {
public:
- HotObjectsList() : index_(0) {
- for (int i = 0; i < kSize; i++) circular_queue_[i] = nullptr;
- }
+ HotObjectsList() : index_(0) {}
- void Add(HeapObject* object) {
+ void Add(HeapObject object) {
DCHECK(!AllowHeapAllocation::IsAllowed());
circular_queue_[index_] = object;
index_ = (index_ + 1) & kSizeMask;
}
- HeapObject* Get(int index) {
+ HeapObject Get(int index) {
DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK_NOT_NULL(circular_queue_[index]);
+ DCHECK(!circular_queue_[index].is_null());
return circular_queue_[index];
}
static const int kNotFound = -1;
- int Find(HeapObject* object) {
+ int Find(HeapObject object) {
DCHECK(!AllowHeapAllocation::IsAllowed());
for (int i = 0; i < kSize; i++) {
if (circular_queue_[i] == object) return i;
@@ -91,7 +89,7 @@ class HotObjectsList {
private:
static_assert(base::bits::IsPowerOfTwo(kSize), "kSize must be power of two");
static const int kSizeMask = kSize - 1;
- HeapObject* circular_queue_[kSize];
+ HeapObject circular_queue_[kSize];
int index_;
DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
@@ -108,25 +106,29 @@ class SerializerDeserializer : public RootVisitor {
// We also handle map space differenly.
STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
- // We do not support young generation large objects.
+ // We do not support young generation large objects and large code objects.
STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE);
- STATIC_ASSERT(LAST_SPACE - 1 == LO_SPACE);
+ STATIC_ASSERT(LAST_SPACE - 2 == LO_SPACE);
static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
+
+ // The number of spaces supported by the serializer. Spaces after LO_SPACE
+ // (NEW_LO_SPACE and CODE_LO_SPACE) are not supported.
static const int kNumberOfSpaces = LO_SPACE + 1;
protected:
- static bool CanBeDeferred(HeapObject* o);
+ static bool CanBeDeferred(HeapObject o);
void RestoreExternalReferenceRedirectors(
- const std::vector<AccessorInfo*>& accessor_infos);
+ const std::vector<AccessorInfo>& accessor_infos);
void RestoreExternalReferenceRedirectors(
- const std::vector<CallHandlerInfo*>& call_handler_infos);
+ const std::vector<CallHandlerInfo>& call_handler_infos);
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- V(0x18) \
- V(0x3d) \
+ V(0x0e) \
+ V(0x2e) \
V(0x3e) \
V(0x3f) \
+ V(0x4e) \
V(0x58) \
V(0x59) \
V(0x5a) \
@@ -136,13 +138,12 @@ class SerializerDeserializer : public RootVisitor {
V(0x5e) \
V(0x5f) \
V(0x67) \
+ V(0x6e) \
V(0x76) \
- V(0x78) \
V(0x79) \
V(0x7a) \
V(0x7b) \
- V(0x7c) \
- V(0x7d)
+ V(0x7c)
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
@@ -163,12 +164,12 @@ class SerializerDeserializer : public RootVisitor {
// 0x07 External reference referenced by id.
kExternalReference = 0x07,
- // 0x0e Builtin code referenced by index.
- kBuiltin = 0x0e,
// 0x16 Root array item.
kRootArray = 0x16,
// 0x17 Object provided in the attached list.
kAttachedReference = 0x17,
+ // 0x18 Object in the read-only object cache.
+ kReadOnlyObjectCache = 0x18,
// 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
// 0x18..0x1f Misc, see below (incl. 0x38..0x3f, 0x58..0x5f, 0x78..0x7f).
@@ -225,16 +226,17 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
- // Used to encode external referenced provided through the API.
- static const int kApiReference = 0x38;
-
static const int kVariableRawCode = 0x39;
static const int kVariableRawData = 0x3a;
static const int kInternalReference = 0x3b;
static const int kInternalReferenceEncoded = 0x3c;
+ // Used to encode external references provided through the API.
+ static const int kApiReference = 0x3d;
+
// In-place weak references
+ static const int kClearedWeakReference = 0x7d;
static const int kWeakPrefix = 0x7e;
// Encodes an off-heap instruction stream target.
@@ -318,12 +320,9 @@ class SerializedData {
class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
class IsLastChunkBits : public BitField<bool, 31, 1> {};
- static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
- uint32_t external_refs = table->size();
- return 0xC0DE0000 ^ external_refs;
- }
-
- static const uint32_t kMagicNumberOffset = 0;
+ static constexpr uint32_t kMagicNumberOffset = 0;
+ static constexpr uint32_t kMagicNumber =
+ 0xC0DE0000 ^ ExternalReferenceTable::kSize;
protected:
void SetHeaderValue(uint32_t offset, uint32_t value) {
@@ -337,11 +336,7 @@ class SerializedData {
void AllocateData(uint32_t size);
- static uint32_t ComputeMagicNumber(Isolate* isolate);
-
- void SetMagicNumber(Isolate* isolate) {
- SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
- }
+ void SetMagicNumber() { SetHeaderValue(kMagicNumberOffset, kMagicNumber); }
byte* data_;
uint32_t size_;
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index a8b911a191..50394ab843 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -11,15 +11,15 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/map.h"
-#include "src/snapshot/builtin-serializer-allocator.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/smi.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-template <class AllocatorT>
-Serializer<AllocatorT>::Serializer(Isolate* isolate)
+Serializer::Serializer(Isolate* isolate)
: isolate_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
@@ -43,8 +43,7 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
#endif // OBJECT_PRINT
}
-template <class AllocatorT>
-Serializer<AllocatorT>::~Serializer() {
+Serializer::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
for (int space = 0; space < LAST_SPACE; ++space) {
@@ -57,17 +56,14 @@ Serializer<AllocatorT>::~Serializer() {
}
#ifdef OBJECT_PRINT
-template <class AllocatorT>
-void Serializer<AllocatorT>::CountInstanceType(Map* map, int size,
- AllocationSpace space) {
+void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
int instance_type = map->instance_type();
instance_type_count_[space][instance_type]++;
instance_type_size_[space][instance_type] += size;
}
#endif // OBJECT_PRINT
-template <class AllocatorT>
-void Serializer<AllocatorT>::OutputStatistics(const char* name) {
+void Serializer::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
PrintF("%s:\n", name);
@@ -91,10 +87,9 @@ void Serializer<AllocatorT>::OutputStatistics(const char* name) {
#endif // OBJECT_PRINT
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::SerializeDeferredObjects() {
+void Serializer::SerializeDeferredObjects() {
while (!deferred_objects_.empty()) {
- HeapObject* obj = deferred_objects_.back();
+ HeapObject obj = deferred_objects_.back();
deferred_objects_.pop_back();
ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
obj_serializer.SerializeDeferred();
@@ -102,25 +97,16 @@ void Serializer<AllocatorT>::SerializeDeferredObjects() {
sink_.Put(kSynchronize, "Finished with deferred objects");
}
-template <class AllocatorT>
-bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
- return false;
-}
-
-template <class AllocatorT>
-void Serializer<AllocatorT>::VisitRootPointers(Root root,
- const char* description,
- Object** start, Object** end) {
- // Builtins are serialized in a separate pass by the BuiltinSerializer.
- if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
+bool Serializer::MustBeDeferred(HeapObject object) { return false; }
- for (Object** current = start; current < end; current++) {
+void Serializer::VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) {
+ for (FullObjectSlot current = start; current < end; ++current) {
SerializeRootObject(*current);
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::SerializeRootObject(Object* object) {
+void Serializer::SerializeRootObject(Object object) {
if (object->IsSmi()) {
PutSmi(Smi::cast(object));
} else {
@@ -129,8 +115,7 @@ void Serializer<AllocatorT>::SerializeRootObject(Object* object) {
}
#ifdef DEBUG
-template <class AllocatorT>
-void Serializer<AllocatorT>::PrintStack() {
+void Serializer::PrintStack() {
for (const auto o : stack_) {
o->Print();
PrintF("\n");
@@ -138,11 +123,20 @@ void Serializer<AllocatorT>::PrintStack() {
}
#endif // DEBUG
-template <class AllocatorT>
-bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
+bool Serializer::SerializeRoot(HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ RootIndex root_index;
+ // Derived serializers are responsible for determining if the root has
+ // actually been serialized before calling this.
+ if (root_index_map()->Lookup(obj, &root_index)) {
+ PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ return true;
+ }
+ return false;
+}
+
+bool Serializer::SerializeHotObject(HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
@@ -162,12 +156,10 @@ bool Serializer<AllocatorT>::SerializeHotObject(HeapObject* obj,
return true;
}
-template <class AllocatorT>
-bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
- HowToCode how_to_code,
- WhereToPoint where_to_point,
- int skip) {
- SerializerReference reference = reference_map_.LookupReference(obj);
+bool Serializer::SerializeBackReference(HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ SerializerReference reference =
+ reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()));
if (!reference.is_valid()) return false;
// Encode the location of an already deserialized object in order to write
// its location into a later object. We can encode the location as an
@@ -202,44 +194,15 @@ bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
return true;
}
-template <class AllocatorT>
-bool Serializer<AllocatorT>::SerializeBuiltinReference(
- HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
- int skip) {
- if (!obj->IsCode()) return false;
-
- Code* code = Code::cast(obj);
- int builtin_index = code->builtin_index();
- if (builtin_index < 0) return false;
-
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kFromCode));
- DCHECK_LT(builtin_index, Builtins::builtin_count);
- DCHECK_LE(0, builtin_index);
-
- if (FLAG_trace_serializer) {
- PrintF(" Encoding builtin reference: %s\n",
- isolate()->builtins()->name(builtin_index));
- }
-
- FlushSkip(skip);
- sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
- sink_.PutInt(builtin_index, "builtin_index");
-
- return true;
-}
-
-template <class AllocatorT>
-bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
+bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
if (!obj->IsCode()) return false;
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::PutRoot(
- RootIndex root, HeapObject* object,
- SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point, int skip) {
+void Serializer::PutRoot(RootIndex root, HeapObject object,
+ SerializerDeserializer::HowToCode how_to_code,
+ SerializerDeserializer::WhereToPoint where_to_point,
+ int skip) {
int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
@@ -268,16 +231,16 @@ void Serializer<AllocatorT>::PutRoot(
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::PutSmi(Smi* smi) {
+void Serializer::PutSmi(Smi smi) {
sink_.Put(kOnePointerRawData, "Smi");
- byte* bytes = reinterpret_cast<byte*>(&smi);
- for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
+ Tagged_t raw_value = static_cast<Tagged_t>(smi.ptr());
+ byte bytes[kTaggedSize];
+ memcpy(bytes, &raw_value, kTaggedSize);
+ for (int i = 0; i < kTaggedSize; i++) sink_.Put(bytes[i], "Byte");
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
- SerializerReference reference) {
+void Serializer::PutBackReference(HeapObject object,
+ SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
switch (reference.space()) {
case MAP_SPACE:
@@ -297,10 +260,9 @@ void Serializer<AllocatorT>::PutBackReference(HeapObject* object,
hot_objects_.Add(object);
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
+void Serializer::PutAttachedReference(SerializerReference reference,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
DCHECK(reference.is_attached_reference());
DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
(how_to_code == kFromCode && where_to_point == kStartOfObject) ||
@@ -309,8 +271,7 @@ void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
-template <class AllocatorT>
-int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
+int Serializer::PutAlignmentPrefix(HeapObject object) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
@@ -321,14 +282,12 @@ int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
return 0;
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::PutNextChunk(int space) {
+void Serializer::PutNextChunk(int space) {
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::Pad(int padding_offset) {
+void Serializer::Pad(int padding_offset) {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
@@ -340,26 +299,25 @@ void Serializer<AllocatorT>::Pad(int padding_offset) {
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::InitializeCodeAddressMap() {
+void Serializer::InitializeCodeAddressMap() {
isolate_->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate_);
}
-template <class AllocatorT>
-Code* Serializer<AllocatorT>::CopyCode(Code* code) {
+Code Serializer::CopyCode(Code code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
int size = code->CodeSize();
code_buffer_.insert(code_buffer_.end(),
reinterpret_cast<byte*>(code->address()),
reinterpret_cast<byte*>(code->address() + size));
- return Code::cast(HeapObject::FromAddress(
+ // When pointer compression is enabled the checked cast will try to
+ // decompress map field of off-heap Code object.
+ return Code::unchecked_cast(HeapObject::FromAddress(
reinterpret_cast<Address>(&code_buffer_.front())));
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
- AllocationSpace space, int size, Map* map) {
+void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
+ int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
serializer_->code_address_map_->Lookup(object_->address());
@@ -372,11 +330,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
sink_->Put(kNewObject + reference_representation_ + space,
"NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
- if (object_->IsCode()) {
- sink_->Put(EXECUTABLE, "executable large object");
- } else {
- sink_->Put(NOT_EXECUTABLE, "not executable large object");
- }
+ CHECK(!object_->IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
@@ -398,14 +352,14 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
#endif // OBJECT_PRINT
// Mark this object as already serialized.
- serializer_->reference_map()->Add(object_, back_reference);
+ serializer_->reference_map()->Add(reinterpret_cast<void*>(object_.ptr()),
+ back_reference);
// Serialize the map (first word of the object).
serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
}
-template <class AllocatorT>
-int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
+int32_t Serializer::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
SerializerReference reference =
serializer_->reference_map()->LookupReference(backing_store);
@@ -424,16 +378,15 @@ int32_t Serializer<AllocatorT>::ObjectSerializer::SerializeBackingStore(
return static_cast<int32_t>(reference.off_heap_backing_store_index());
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
- JSTypedArray* typed_array = JSTypedArray::cast(object_);
- FixedTypedArrayBase* elements =
+void Serializer::ObjectSerializer::SerializeJSTypedArray() {
+ JSTypedArray typed_array = JSTypedArray::cast(object_);
+ FixedTypedArrayBase elements =
FixedTypedArrayBase::cast(typed_array->elements());
- if (!typed_array->WasNeutered()) {
+ if (!typed_array->WasDetached()) {
if (!typed_array->is_on_heap()) {
// Explicitly serialize the backing store now.
- JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
+ JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
@@ -449,22 +402,22 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
// The external_pointer is the backing_store + typed_array->byte_offset.
// To properly share the buffer, we set the backing store ref here. On
// deserialization we re-add the byte_offset to external_pointer.
- elements->set_external_pointer(Smi::FromInt(ref));
+ elements->set_external_pointer(
+ reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
}
} else {
- // When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
+ // When a JSArrayBuffer is detached, the FixedTypedArray that points to the
// same backing store does not know anything about it. This fixup step finds
- // neutered TypedArrays and clears the values in the FixedTypedArray so that
+ // detached TypedArrays and clears the values in the FixedTypedArray so that
// we don't try to serialize the now invalid backing store.
- elements->set_external_pointer(Smi::kZero);
+ elements->set_external_pointer(reinterpret_cast<void*>(Smi::kZero.ptr()));
elements->set_length(0);
}
SerializeObject();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
- JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
+void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
+ JSArrayBuffer buffer = JSArrayBuffer::cast(object_);
void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
@@ -473,14 +426,13 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
int32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer->set_backing_store(Smi::FromInt(ref));
+ buffer->set_backing_store(reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
}
SerializeObject();
buffer->set_backing_store(backing_store);
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
+void Serializer::ObjectSerializer::SerializeExternalString() {
Heap* heap = serializer_->isolate()->heap();
// For external strings with known resources, we replace the resource field
// with the encoded external reference, which we restore upon deserialize.
@@ -488,7 +440,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
// with the native source id.
// For the rest we serialize them to look like ordinary sequential strings.
if (object_->map() != ReadOnlyRoots(heap).native_source_string_map()) {
- ExternalString* string = ExternalString::cast(object_);
+ ExternalString string = ExternalString::cast(object_);
Address resource = string->resource_as_address();
ExternalReferenceEncoder::Value reference;
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
@@ -501,7 +453,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
SerializeExternalStringAsSequentialString();
}
} else {
- ExternalOneByteString* string = ExternalOneByteString::cast(object_);
+ ExternalOneByteString string = ExternalOneByteString::cast(object_);
DCHECK(string->is_uncached());
const NativesExternalStringResource* resource =
reinterpret_cast<const NativesExternalStringResource*>(
@@ -514,17 +466,15 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeExternalString() {
}
}
-template <class AllocatorT>
-void Serializer<
- AllocatorT>::ObjectSerializer::SerializeExternalStringAsSequentialString() {
+void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
ReadOnlyRoots roots(serializer_->isolate());
DCHECK(object_->IsExternalString());
DCHECK(object_->map() != roots.native_source_string_map());
- ExternalString* string = ExternalString::cast(object_);
+ ExternalString string = ExternalString::cast(object_);
int length = string->length();
- Map* map;
+ Map map;
int content_size;
int allocation_size;
const byte* resource;
@@ -576,8 +526,7 @@ void Serializer<
// TODO(all): replace this with proper iteration of weak slots in serializer.
class UnlinkWeakNextScope {
public:
- explicit UnlinkWeakNextScope(Heap* heap, HeapObject* object)
- : object_(nullptr) {
+ explicit UnlinkWeakNextScope(Heap* heap, HeapObject object) {
if (object->IsAllocationSite() &&
AllocationSite::cast(object)->HasWeakNext()) {
object_ = object;
@@ -588,20 +537,19 @@ class UnlinkWeakNextScope {
}
~UnlinkWeakNextScope() {
- if (object_ != nullptr) {
+ if (!object_.is_null()) {
AllocationSite::cast(object_)->set_weak_next(next_,
UPDATE_WEAK_WRITE_BARRIER);
}
}
private:
- HeapObject* object_;
- Object* next_;
- DisallowHeapAllocation no_gc_;
+ HeapObject object_;
+ Object next_;
+ DISALLOW_HEAP_ALLOCATION(no_gc_);
};
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
+void Serializer::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
object_->ShortPrint();
@@ -636,25 +584,27 @@ void Serializer<AllocatorT>::ObjectSerializer::Serialize() {
if (object_->IsScript()) {
// Clear cached line ends.
- Object* undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
+ Object undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
Script::cast(object_)->set_line_ends(undefined);
}
SerializeObject();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
+void Serializer::ObjectSerializer::SerializeObject() {
int size = object_->Size();
- Map* map = object_->map();
+ Map map = object_->map();
AllocationSpace space =
- MemoryChunk::FromAddress(object_->address())->owner()->identity();
- DCHECK(space != NEW_LO_SPACE);
+ MemoryChunk::FromHeapObject(object_)->owner()->identity();
+ // Young generation large objects are tenured.
+ if (space == NEW_LO_SPACE) {
+ space = LO_SPACE;
+ }
SerializePrologue(space, size, map);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
+ bytes_processed_so_far_ = kTaggedSize;
RecursionScope recursion(serializer_);
// Objects that are immediately post processed during deserialization
@@ -669,8 +619,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeObject() {
SerializeContent(map, size);
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
+void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
object_->ShortPrint();
@@ -678,26 +627,25 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeDeferred() {
}
int size = object_->Size();
- Map* map = object_->map();
+ Map map = object_->map();
SerializerReference back_reference =
- serializer_->reference_map()->LookupReference(object_);
+ serializer_->reference_map()->LookupReference(
+ reinterpret_cast<void*>(object_.ptr()));
DCHECK(back_reference.is_back_reference());
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
+ bytes_processed_so_far_ = kTaggedSize;
serializer_->PutAlignmentPrefix(object_);
sink_->Put(kNewObject + back_reference.space(), "deferred object");
serializer_->PutBackReference(object_, back_reference);
- sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
+ sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size");
SerializeContent(map, size);
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
- int size) {
+void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
if (object_->IsCode()) {
// For code objects, output raw bytes first.
@@ -714,26 +662,31 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
- Object** start,
- Object** end) {
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
+ ObjectSlot start,
+ ObjectSlot end) {
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
- HeapObject* host, MaybeObject** start, MaybeObject** end) {
- MaybeObject** current = start;
+void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ MaybeObjectSlot current = start;
while (current < end) {
- while (current < end && ((*current)->IsSmi() || (*current)->IsCleared())) {
- current++;
+ while (current < end && (*current)->IsSmi()) {
+ ++current;
}
if (current < end) {
- OutputRawData(reinterpret_cast<Address>(current));
+ OutputRawData(current.address());
}
- HeapObject* current_contents;
+ // TODO(ishell): Revisit this change once we stick to 32-bit compressed
+ // tagged values.
+ while (current < end && (*current)->IsCleared()) {
+ sink_->Put(kClearedWeakReference, "ClearedWeakReference");
+ bytes_processed_so_far_ += kTaggedSize;
+ ++current;
+ }
+ HeapObject current_contents;
HeapObjectReferenceType reference_type;
while (current < end &&
(*current)->GetHeapObject(&current_contents, &reference_type)) {
@@ -743,17 +696,17 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
if (current != start &&
serializer_->root_index_map()->Lookup(current_contents,
&root_index) &&
- Heap::RootIsImmortalImmovable(root_index) &&
- *current == current[-1]) {
+ RootsTable::IsImmortalImmovable(root_index) &&
+ *current == *(current - 1)) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
DCHECK(!Heap::InNewSpace(current_contents));
int repeat_count = 1;
- while (&current[repeat_count] < end - 1 &&
- current[repeat_count] == *current) {
+ while (current + repeat_count < end - 1 &&
+ *(current + repeat_count) == *current) {
repeat_count++;
}
current += repeat_count;
- bytes_processed_so_far_ += repeat_count * kPointerSize;
+ bytes_processed_so_far_ += repeat_count * kTaggedSize;
if (repeat_count > kNumberOfFixedRepeat) {
sink_->Put(kVariableRepeat, "VariableRepeat");
sink_->PutInt(repeat_count, "repeat count");
@@ -766,27 +719,25 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
}
serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
0);
- bytes_processed_so_far_ += kPointerSize;
- current++;
+ bytes_processed_so_far_ += kTaggedSize;
+ ++current;
}
}
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitEmbeddedPointer(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- Object* object = rinfo->target_object();
+ Object object = rinfo->target_object();
serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
kStartOfObject, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
- Foreign* host, Address* p) {
+void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
+ Address* p) {
int skip = SkipTo(reinterpret_cast<Address>(p));
Address target = *p;
auto encoded_reference = serializer_->EncodeExternalReference(target);
@@ -797,12 +748,11 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
}
sink_->PutInt(skip, "SkipB4ExternalRef");
sink_->PutInt(encoded_reference.index(), "reference index");
- bytes_processed_so_far_ += kPointerSize;
+ bytes_processed_so_far_ += kSystemPointerSize;
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitExternalReference(Code host,
+ RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
Address target = rinfo->target_external_reference();
auto encoded_reference = serializer_->EncodeExternalReference(target);
@@ -820,9 +770,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitExternalReference(
bytes_processed_so_far_ += rinfo->target_address_size();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitInternalReference(Code host,
+ RelocInfo* rinfo) {
// We do not use skip from last patched pc to find the pc to patch, since
// target_address_address may not return addresses in ascending order when
// used for internal references. External references may be stored at the
@@ -844,9 +793,8 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
sink_->PutInt(target_offset, "internal ref value");
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitRuntimeEntry(Code host,
+ RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
Address target = rinfo->target_address();
@@ -858,54 +806,41 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
bytes_processed_so_far_ += rinfo->target_address_size();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitOffHeapTarget(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
+ RelocInfo* rinfo) {
DCHECK(FLAG_embedded_builtins);
- {
- STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
- CHECK(Builtins::IsIsolateIndependentBuiltin(host));
- Address addr = rinfo->target_off_heap_target();
- CHECK_NE(kNullAddress, addr);
- CHECK_NOT_NULL(
- InstructionStream::TryLookupCode(serializer_->isolate(), addr));
- }
+ STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
+
+ Address addr = rinfo->target_off_heap_target();
+ CHECK_NE(kNullAddress, addr);
+
+ Code target = InstructionStream::TryLookupCode(serializer_->isolate(), addr);
+ CHECK(Builtins::IsIsolateIndependentBuiltin(target));
int skip = SkipTo(rinfo->target_address_address());
sink_->Put(kOffHeapTarget, "OffHeapTarget");
sink_->PutInt(skip, "SkipB4OffHeapTarget");
- sink_->PutInt(host->builtin_index(), "builtin index");
+ sink_->PutInt(target->builtin_index(), "builtin index");
bytes_processed_so_far_ += rinfo->target_address_size();
}
namespace {
+
class CompareRelocInfo {
public:
bool operator()(RelocInfo x, RelocInfo y) {
// Everything that does not use target_address_address will compare equal.
Address x_num = 0;
Address y_num = 0;
- if (HasTargetAddressAddress(x.rmode())) {
- x_num = x.target_address_address();
- }
- if (HasTargetAddressAddress(y.rmode())) {
- y_num = y.target_address_address();
- }
+ if (x.HasTargetAddressAddress()) x_num = x.target_address_address();
+ if (y.HasTargetAddressAddress()) y_num = y.target_address_address();
return x_num > y_num;
}
-
- private:
- static bool HasTargetAddressAddress(RelocInfo::Mode mode) {
- return RelocInfo::IsEmbeddedObject(mode) || RelocInfo::IsCodeTarget(mode) ||
- RelocInfo::IsExternalReference(mode) ||
- RelocInfo::IsRuntimeEntry(mode);
- }
};
+
} // namespace
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitRelocInfo(
- RelocIterator* it) {
+void Serializer::ObjectSerializer::VisitRelocInfo(RelocIterator* it) {
std::priority_queue<RelocInfo, std::vector<RelocInfo>, CompareRelocInfo>
reloc_queue;
for (; !it->done(); it->next()) {
@@ -918,17 +853,18 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRelocInfo(
}
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::VisitCodeTarget(
- Code* host, RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
+ RelocInfo* rinfo) {
+#ifdef V8_TARGET_ARCH_ARM
+ DCHECK(!RelocInfo::IsRelativeCodeTarget(rinfo->rmode()));
+#endif
int skip = SkipTo(rinfo->target_address_address());
- Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code object = Code::GetCodeFromTargetAddress(rinfo->target_address());
serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
+void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
Address object_start = object_->address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
@@ -938,9 +874,9 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
DCHECK_GE(to_skip, 0);
if (bytes_to_output != 0) {
DCHECK(to_skip == bytes_to_output);
- if (IsAligned(bytes_to_output, kPointerAlignment) &&
- bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
- int size_in_words = bytes_to_output >> kPointerSizeLog2;
+ if (IsAligned(bytes_to_output, kObjectAlignment) &&
+ bytes_to_output <= kNumberOfFixedRawData * kTaggedSize) {
+ int size_in_words = bytes_to_output >> kTaggedSizeLog2;
sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
} else {
sink_->Put(kVariableRawData, "VariableRawData");
@@ -974,8 +910,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputRawData(Address up_to) {
}
}
-template <class AllocatorT>
-int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
+int Serializer::ObjectSerializer::SkipTo(Address to) {
Address object_start = object_->address();
int up_to_offset = static_cast<int>(to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
@@ -987,13 +922,12 @@ int Serializer<AllocatorT>::ObjectSerializer::SkipTo(Address to) {
return to_skip;
}
-template <class AllocatorT>
-void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
- DCHECK_EQ(kPointerSize, bytes_processed_so_far_);
- Code* code = Code::cast(object_);
+void Serializer::ObjectSerializer::OutputCode(int size) {
+ DCHECK_EQ(kTaggedSize, bytes_processed_so_far_);
+ Code on_heap_code = Code::cast(object_);
// To make snapshots reproducible, we make a copy of the code object
// and wipe all pointers in the copy, which we then serialize.
- code = serializer_->CopyCode(code);
+ Code off_heap_code = serializer_->CopyCode(on_heap_code);
int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
@@ -1001,15 +935,20 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ // With enabled pointer compression normal accessors no longer work for
+ // off-heap objects, so we have to get the relocation info data via the
+ // on-heap code object.
+ ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
+ for (RelocIterator it(off_heap_code, relocation_info, mode_mask); !it.done();
+ it.next()) {
RelocInfo* rinfo = it.rinfo();
rinfo->WipeOut();
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
- code->WipeOutHeader();
+ off_heap_code->WipeOutHeader();
- Address start = code->address() + Code::kDataStart;
+ Address start = off_heap_code->address() + Code::kDataStart;
int bytes_to_output = size - Code::kDataStart;
sink_->Put(kVariableRawCode, "VariableRawCode");
@@ -1023,9 +962,5 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
}
-// Explicit instantiation.
-template class Serializer<BuiltinSerializerAllocator>;
-template class Serializer<DefaultSerializerAllocator>;
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 5a08e4299e..9f37d6ffd9 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -7,11 +7,11 @@
#include <map>
-#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
-#include "src/snapshot/default-serializer-allocator.h"
+#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/serializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -28,12 +28,12 @@ class CodeAddressMap : public CodeEventLogger {
isolate_->logger()->RemoveCodeEventListener(this);
}
- void CodeMoveEvent(AbstractCode* from, AbstractCode* to) override {
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) override {
address_to_name_map_.Move(from->address(), to->address());
}
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override {}
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) override {}
const char* Lookup(Address address) {
return address_to_name_map_.Lookup(address);
@@ -114,7 +114,7 @@ class CodeAddressMap : public CodeEventLogger {
DISALLOW_COPY_AND_ASSIGN(NameMap);
};
- void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+ void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
@@ -127,7 +127,33 @@ class CodeAddressMap : public CodeEventLogger {
NameMap address_to_name_map_;
};
-template <class AllocatorT = DefaultSerializerAllocator>
+class ObjectCacheIndexMap {
+ public:
+ ObjectCacheIndexMap() : map_(), next_index_(0) {}
+
+ // If |obj| is in the map, immediately return true. Otherwise add it to the
+ // map and return false. In either case set |*index_out| to the index
+ // associated with the map.
+ bool LookupOrInsert(HeapObject obj, int* index_out) {
+ Maybe<uint32_t> maybe_index = map_.Get(obj);
+ if (maybe_index.IsJust()) {
+ *index_out = maybe_index.FromJust();
+ return true;
+ }
+ *index_out = next_index_;
+ map_.Set(obj, next_index_++);
+ return false;
+ }
+
+ private:
+ DisallowHeapAllocation no_allocation_;
+
+ HeapObjectToIndexHashMap map_;
+ int next_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectCacheIndexMap);
+};
+
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(Isolate* isolate);
@@ -139,8 +165,10 @@ class Serializer : public SerializerDeserializer {
const std::vector<byte>* Payload() const { return sink_.data(); }
- bool ReferenceMapContains(HeapObject* o) {
- return reference_map()->LookupReference(o).is_valid();
+ bool ReferenceMapContains(HeapObject o) {
+ return reference_map()
+ ->LookupReference(reinterpret_cast<void*>(o->ptr()))
+ .is_valid();
}
Isolate* isolate() const { return isolate_; }
@@ -163,48 +191,49 @@ class Serializer : public SerializerDeserializer {
};
void SerializeDeferredObjects();
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ virtual void SerializeObject(HeapObject o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0;
- virtual bool MustBeDeferred(HeapObject* object);
+ virtual bool MustBeDeferred(HeapObject object);
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
- void SerializeRootObject(Object* object);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
+ void SerializeRootObject(Object object);
- void PutRoot(RootIndex root_index, HeapObject* object, HowToCode how,
+ void PutRoot(RootIndex root_index, HeapObject object, HowToCode how,
WhereToPoint where, int skip);
- void PutSmi(Smi* smi);
- void PutBackReference(HeapObject* object, SerializerReference reference);
+ void PutSmi(Smi smi);
+ void PutBackReference(HeapObject object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference,
HowToCode how_to_code, WhereToPoint where_to_point);
// Emit alignment prefix if necessary, return required padding space in bytes.
- int PutAlignmentPrefix(HeapObject* object);
+ int PutAlignmentPrefix(HeapObject object);
void PutNextChunk(int space);
+ // Returns true if the object was successfully serialized as a root.
+ bool SerializeRoot(HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
// Returns true if the object was successfully serialized as hot object.
- bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+ bool SerializeHotObject(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
// Returns true if the object was successfully serialized as back reference.
- bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+ bool SerializeBackReference(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
- // Returns true if the object was successfully serialized as a builtin
- // reference.
- bool SerializeBuiltinReference(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
-
// Returns true if the given heap object is a bytecode handler code object.
- bool ObjectIsBytecodeHandler(HeapObject* obj) const;
+ bool ObjectIsBytecodeHandler(HeapObject obj) const;
- inline void FlushSkip(int skip) {
+ static inline void FlushSkip(SnapshotByteSink* sink, int skip) {
if (skip != 0) {
- sink_.Put(kSkip, "SkipFromSerializeObject");
- sink_.PutInt(skip, "SkipDistanceFromSerializeObject");
+ sink->Put(kSkip, "SkipFromSerializeObject");
+ sink->PutInt(skip, "SkipDistanceFromSerializeObject");
}
}
+ inline void FlushSkip(int skip) { FlushSkip(&sink_, skip); }
+
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
@@ -217,28 +246,29 @@ class Serializer : public SerializerDeserializer {
// of the serializer. Initialize it on demand.
void InitializeCodeAddressMap();
- Code* CopyCode(Code* code);
+ Code CopyCode(Code code);
- void QueueDeferredObject(HeapObject* obj) {
- DCHECK(reference_map_.LookupReference(obj).is_back_reference());
+ void QueueDeferredObject(HeapObject obj) {
+ DCHECK(reference_map_.LookupReference(reinterpret_cast<void*>(obj->ptr()))
+ .is_back_reference());
deferred_objects_.push_back(obj);
}
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
- void CountInstanceType(Map* map, int size, AllocationSpace space);
+ void CountInstanceType(Map map, int size, AllocationSpace space);
#endif // OBJECT_PRINT
#ifdef DEBUG
- void PushStack(HeapObject* o) { stack_.push_back(o); }
+ void PushStack(HeapObject o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
#endif // DEBUG
SerializerReferenceMap* reference_map() { return &reference_map_; }
- RootIndexMap* root_index_map() { return &root_index_map_; }
- AllocatorT* allocator() { return &allocator_; }
+ const RootIndexMap* root_index_map() const { return &root_index_map_; }
+ SerializerAllocator* allocator() { return &allocator_; }
SnapshotByteSink sink_; // Used directly by subclasses.
@@ -249,9 +279,9 @@ class Serializer : public SerializerDeserializer {
RootIndexMap root_index_map_;
CodeAddressMap* code_address_map_ = nullptr;
std::vector<byte> code_buffer_;
- std::vector<HeapObject*> deferred_objects_; // To handle stack overflow.
+ std::vector<HeapObject> deferred_objects_; // To handle stack overflow.
int recursion_depth_ = 0;
- AllocatorT allocator_;
+ SerializerAllocator allocator_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
@@ -260,20 +290,19 @@ class Serializer : public SerializerDeserializer {
#endif // OBJECT_PRINT
#ifdef DEBUG
- std::vector<HeapObject*> stack_;
+ std::vector<HeapObject> stack_;
#endif // DEBUG
- friend class DefaultSerializerAllocator;
+ friend class SerializerAllocator;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class RelocInfoIterator;
-template <class AllocatorT>
-class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
+class Serializer::ObjectSerializer : public ObjectVisitor {
public:
- ObjectSerializer(Serializer* serializer, HeapObject* obj,
+ ObjectSerializer(Serializer* serializer, HeapObject obj,
SnapshotByteSink* sink, HowToCode how_to_code,
WhereToPoint where_to_point)
: serializer_(serializer),
@@ -294,25 +323,26 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
void Serialize();
void SerializeObject();
void SerializeDeferred();
- void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override;
- void VisitEmbeddedPointer(Code* host, RelocInfo* target) override;
- void VisitExternalReference(Foreign* host, Address* p) override;
- void VisitExternalReference(Code* host, RelocInfo* rinfo) override;
- void VisitInternalReference(Code* host, RelocInfo* rinfo) override;
- void VisitCodeTarget(Code* host, RelocInfo* target) override;
- void VisitRuntimeEntry(Code* host, RelocInfo* reloc) override;
- void VisitOffHeapTarget(Code* host, RelocInfo* target) override;
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override;
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override;
+ void VisitEmbeddedPointer(Code host, RelocInfo* target) override;
+ void VisitExternalReference(Foreign host, Address* p) override;
+ void VisitExternalReference(Code host, RelocInfo* rinfo) override;
+ void VisitInternalReference(Code host, RelocInfo* rinfo) override;
+ void VisitCodeTarget(Code host, RelocInfo* target) override;
+ void VisitRuntimeEntry(Code host, RelocInfo* reloc) override;
+ void VisitOffHeapTarget(Code host, RelocInfo* target) override;
// Relocation info needs to be visited sorted by target_address_address.
void VisitRelocInfo(RelocIterator* it) override;
private:
- void SerializePrologue(AllocationSpace space, int size, Map* map);
+ void SerializePrologue(AllocationSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
- void SerializeContent(Map* map, int size);
+ void SerializeContent(Map map, int size);
void OutputRawData(Address up_to);
void OutputCode(int size);
int SkipTo(Address to);
@@ -323,9 +353,8 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
void SerializeExternalStringAsSequentialString();
Serializer* serializer_;
- HeapObject* object_;
+ HeapObject object_;
SnapshotByteSink* sink_;
- std::map<void*, Smi*> backing_stores;
int reference_representation_;
int bytes_processed_so_far_;
};
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 95baef0cc0..8ee14aac9d 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -6,17 +6,10 @@
#include "src/snapshot/snapshot.h"
-#include "src/assembler-inl.h"
#include "src/base/platform/platform.h"
-#include "src/callable.h"
-#include "src/interface-descriptors.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
-#include "src/snapshot/builtin-serializer.h"
+#include "src/counters.h"
#include "src/snapshot/partial-deserializer.h"
-#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/startup-deserializer.h"
-#include "src/utils.h"
#include "src/version.h"
namespace v8 {
@@ -39,6 +32,8 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
+ RuntimeCallTimerScope rcs_timer(isolate,
+ RuntimeCallCounterId::kDeserializeIsolate);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -47,10 +42,10 @@ bool Snapshot::Initialize(Isolate* isolate) {
CHECK(VerifyChecksum(blob));
Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData startup_snapshot_data(startup_data);
- Vector<const byte> builtin_data = ExtractBuiltinData(blob);
- BuiltinSnapshotData builtin_snapshot_data(builtin_data);
+ Vector<const byte> read_only_data = ExtractReadOnlyData(blob);
+ SnapshotData read_only_snapshot_data(read_only_data);
StartupDeserializer deserializer(&startup_snapshot_data,
- &builtin_snapshot_data);
+ &read_only_snapshot_data);
deserializer.SetRehashability(ExtractRehashability(blob));
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
@@ -65,6 +60,8 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy, size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
if (!isolate->snapshot_available()) return Handle<Context>();
+ RuntimeCallTimerScope rcs_timer(isolate,
+ RuntimeCallCounterId::kDeserializeContext);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -90,99 +87,17 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return result;
}
-// static
-Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
- }
-
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
-
- const v8::StartupData* blob = isolate->snapshot_blob();
- Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
- BuiltinSnapshotData builtin_snapshot_data(builtin_data);
-
- CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
- BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
- Code* code = builtin_deserializer.DeserializeBuiltin(builtin_id);
- DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int bytes = code->Size();
- PrintF("[Deserializing builtin %s (%d bytes) took %0.3f ms]\n",
- Builtins::name(builtin_id), bytes, ms);
- }
-
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- isolate->logger()->LogCodeObject(code);
- }
-
- return code;
-}
-
-// static
-void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
- if (!FLAG_lazy_deserialization) return;
-
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Forcing eager builtin deserialization\n");
- }
-
- Builtins* builtins = isolate->builtins();
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsLazy(i)) continue;
-
- DCHECK_NE(Builtins::kDeserializeLazy, i);
- Code* code = builtins->builtin(i);
- if (code->builtin_index() == Builtins::LazyDeserializerForBuiltin(i)) {
- code = Snapshot::DeserializeBuiltin(isolate, i);
- }
-
- DCHECK_EQ(i, code->builtin_index());
- DCHECK_EQ(code, builtins->builtin(i));
- }
-
- // Re-initialize the dispatch table now that any bytecodes have been
- // deserialized.
- isolate->interpreter()->InitializeDispatchTable();
-}
-
-// static
-Code* Snapshot::EnsureBuiltinIsDeserialized(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
- DCHECK(FLAG_lazy_deserialization);
-
- int builtin_id = shared->builtin_id();
-
- // We should never lazily deserialize DeserializeLazy.
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
-
- // Look up code from builtins list.
- Code* code = isolate->builtins()->builtin(builtin_id);
-
- // Deserialize if builtin is not on the list.
- if (code->builtin_index() != builtin_id) {
- DCHECK_EQ(code->builtin_index(), Builtins::kDeserializeLazy);
- code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
- DCHECK_EQ(builtin_id, code->builtin_index());
- DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
- }
- return code;
-}
-
void ProfileDeserialization(
- const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
+ const SnapshotData* read_only_snapshot,
+ const SnapshotData* startup_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
PrintF("Deserialization will reserve:\n");
- for (const auto& reservation : startup_snapshot->Reservations()) {
+ for (const auto& reservation : read_only_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
- for (const auto& reservation : builtin_snapshot->Reservations()) {
+ for (const auto& reservation : startup_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
PrintF("%10d bytes per isolate\n", startup_total);
@@ -198,7 +113,7 @@ void ProfileDeserialization(
v8::StartupData Snapshot::CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
- const BuiltinSnapshotData* builtin_snapshot,
+ const SnapshotData* read_only_snapshot,
const std::vector<SnapshotData*>& context_snapshots, bool can_be_rehashed) {
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
@@ -206,14 +121,15 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
- total_length += static_cast<uint32_t>(builtin_snapshot->RawData().length());
+ total_length += static_cast<uint32_t>(read_only_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
for (const auto context_snapshot : context_snapshots) {
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
DCHECK(IsAligned(total_length, kPointerAlignment));
}
- ProfileDeserialization(startup_snapshot, builtin_snapshot, context_snapshots);
+ ProfileDeserialization(read_only_snapshot, startup_snapshot,
+ context_snapshots);
char* data = new char[total_length];
// Zero out pre-payload data. Part of that is only used for padding.
@@ -241,14 +157,15 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
}
payload_offset += payload_length;
- // Builtins.
- SetHeaderValue(data, kBuiltinOffsetOffset, payload_offset);
- payload_length = builtin_snapshot->RawData().length();
- CopyBytes(data + payload_offset,
- reinterpret_cast<const char*>(builtin_snapshot->RawData().start()),
- payload_length);
+ // Read-only.
+ SetHeaderValue(data, kReadOnlyOffsetOffset, payload_offset);
+ payload_length = read_only_snapshot->RawData().length();
+ CopyBytes(
+ data + payload_offset,
+ reinterpret_cast<const char*>(read_only_snapshot->RawData().start()),
+ payload_length);
if (FLAG_profile_deserialization) {
- PrintF("%10d bytes for builtins\n", payload_length);
+ PrintF("%10d bytes for read-only\n", payload_length);
}
payload_offset += payload_length;
@@ -278,216 +195,6 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
return result;
}
-namespace {
-bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
- DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
- switch (Builtins::KindOf(code->builtin_index())) {
- case Builtins::CPP:
- case Builtins::TFC:
- case Builtins::TFH:
- case Builtins::TFJ:
- case Builtins::TFS:
- break;
-
- // Bytecode handlers (and their lazy deserializers) will only ever be used
- // by the interpreter and so there will never be a need to use trampolines
- // with them.
- case Builtins::BCH:
- case Builtins::DLH:
- case Builtins::API:
- case Builtins::ASM:
- // TODO(jgruber): Extend checks to remaining kinds.
- return false;
- }
-
- Callable callable = Builtins::CallableFor(
- isolate, static_cast<Builtins::Name>(code->builtin_index()));
- CallInterfaceDescriptor descriptor = callable.descriptor();
-
- if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
- return true;
- }
-
- for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
- Register reg = descriptor.GetRegisterParameter(i);
- if (reg == kOffHeapTrampolineRegister) return true;
- }
-
- return false;
-}
-
-void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
- static const int kRelocMask =
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
-
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
-
- Code* code = isolate->builtins()->builtin(i);
- RelocIterator on_heap_it(code, kRelocMask);
- RelocIterator off_heap_it(blob, code, kRelocMask);
-
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
- defined(V8_TARGET_ARCH_ARM)
- // On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
- // independent builtins in the snapshot. This fixes up the relative jumps
- // to the right offsets in the snapshot.
- // See also: Code::IsIsolateIndependent.
- while (!on_heap_it.done()) {
- DCHECK(!off_heap_it.done());
-
- RelocInfo* rinfo = on_heap_it.rinfo();
- DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK(Builtins::IsIsolateIndependentBuiltin(target));
-
- // Do not emit write-barrier for off-heap writes.
- off_heap_it.rinfo()->set_target_address(
- blob->InstructionStartOfBuiltin(target->builtin_index()),
- SKIP_WRITE_BARRIER);
-
- on_heap_it.next();
- off_heap_it.next();
- }
- DCHECK(off_heap_it.done());
-#else
- // Architectures other than x64 and arm/arm64 do not use pc-relative calls
- // and thus must not contain embedded code targets. Instead, we use an
- // indirection through the root register.
- CHECK(on_heap_it.done());
- CHECK(off_heap_it.done());
-#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
- }
-}
-} // namespace
-
-// static
-EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
- Builtins* builtins = isolate->builtins();
-
- // Store instruction stream lengths and offsets.
- std::vector<struct Metadata> metadata(kTableSize);
-
- bool saw_unsafe_builtin = false;
- uint32_t raw_data_size = 0;
- for (int i = 0; i < Builtins::builtin_count; i++) {
- Code* code = builtins->builtin(i);
-
- if (Builtins::IsIsolateIndependent(i)) {
- DCHECK(!Builtins::IsLazy(i));
-
- // Sanity-check that the given builtin is isolate-independent and does not
- // use the trampoline register in its calling convention.
- if (!code->IsIsolateIndependent(isolate)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
- }
- if (Builtins::IsWasmRuntimeStub(i) &&
- RelocInfo::RequiresRelocation(code)) {
- // Wasm additionally requires that its runtime stubs must be
- // individually PIC (i.e. we must be able to copy each stub outside the
- // embedded area without relocations). In particular, that means
- // pc-relative calls to other builtins are disallowed.
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s is a wasm runtime stub but needs relocation.\n",
- Builtins::name(i));
- }
- if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
- Builtins::name(i));
- }
-
- uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
-
- DCHECK_EQ(0, raw_data_size % kCodeAlignment);
- metadata[i].instructions_offset = raw_data_size;
- metadata[i].instructions_length = length;
-
- // Align the start of each instruction stream.
- raw_data_size += PadAndAlign(length);
- } else {
- metadata[i].instructions_offset = raw_data_size;
- }
- }
- CHECK_WITH_MSG(
- !saw_unsafe_builtin,
- "One or more builtins marked as isolate-independent either contains "
- "isolate-dependent code or aliases the off-heap trampoline register. "
- "If in doubt, ask jgruber@");
-
- const uint32_t blob_size = RawDataOffset() + raw_data_size;
- uint8_t* const blob = new uint8_t[blob_size];
- uint8_t* const raw_data_start = blob + RawDataOffset();
-
- // Initially zap the entire blob, effectively padding the alignment area
- // between two builtins with int3's (on x64/ia32).
- ZapCode(reinterpret_cast<Address>(blob), blob_size);
-
- // Write the metadata tables.
- DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
- std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
-
- // Write the raw data section.
- for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
- Code* code = builtins->builtin(i);
- uint32_t offset = metadata[i].instructions_offset;
- uint8_t* dst = raw_data_start + offset;
- DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
- blob_size);
- std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
- code->raw_instruction_size());
- }
-
- EmbeddedData d(blob, blob_size);
-
- // Fix up call targets that point to other embedded builtins.
- FinalizeEmbeddedCodeTargets(isolate, &d);
-
- // Hash the blob and store the result.
- STATIC_ASSERT(HashSize() == kSizetSize);
- const size_t hash = d.CreateHash();
- std::memcpy(blob + HashOffset(), &hash, HashSize());
-
- DCHECK_EQ(hash, d.CreateHash());
- DCHECK_EQ(hash, d.Hash());
-
- if (FLAG_serialization_statistics) d.PrintStatistics();
-
- return d;
-}
-
-EmbeddedData EmbeddedData::FromBlob() {
- const uint8_t* data = Isolate::CurrentEmbeddedBlob();
- uint32_t size = Isolate::CurrentEmbeddedBlobSize();
- DCHECK_NOT_NULL(data);
- DCHECK_LT(0, size);
- return {data, size};
-}
-
-Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
- DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- const uint8_t* result = RawData() + metadata[i].instructions_offset;
- DCHECK_LE(result, data_ + size_);
- DCHECK_IMPLIES(result == data_ + size_, InstructionSizeOfBuiltin(i) == 0);
- return reinterpret_cast<Address>(result);
-}
-
-uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
- DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- return metadata[i].instructions_length;
-}
-
-size_t EmbeddedData::CreateHash() const {
- STATIC_ASSERT(HashOffset() == 0);
- STATIC_ASSERT(HashSize() == kSizetSize);
- return base::hash_range(data_ + HashSize(), data_ + size_);
-}
-
uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
CHECK_LT(kNumberOfContextsOffset, data->raw_size);
uint32_t num_contexts = GetHeaderValue(data, kNumberOfContextsOffset);
@@ -507,47 +214,6 @@ bool Snapshot::VerifyChecksum(const v8::StartupData* data) {
return checksum.Check(expected_a, expected_b);
}
-void EmbeddedData::PrintStatistics() const {
- DCHECK(FLAG_serialization_statistics);
-
- constexpr int kCount = Builtins::builtin_count;
-
- int embedded_count = 0;
- int instruction_size = 0;
- int sizes[kCount];
- for (int i = 0; i < kCount; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
- const int size = InstructionSizeOfBuiltin(i);
- instruction_size += size;
- sizes[embedded_count] = size;
- embedded_count++;
- }
-
- // Sort for percentiles.
- std::sort(&sizes[0], &sizes[embedded_count]);
-
- const int k50th = embedded_count * 0.5;
- const int k75th = embedded_count * 0.75;
- const int k90th = embedded_count * 0.90;
- const int k99th = embedded_count * 0.99;
-
- const int metadata_size = static_cast<int>(HashSize() + MetadataSize());
-
- PrintF("EmbeddedData:\n");
- PrintF(" Total size: %d\n",
- static_cast<int>(size()));
- PrintF(" Metadata size: %d\n", metadata_size);
- PrintF(" Instruction size: %d\n", instruction_size);
- PrintF(" Padding: %d\n",
- static_cast<int>(size() - metadata_size - instruction_size));
- PrintF(" Embedded builtin count: %d\n", embedded_count);
- PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
- PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
- PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
- PrintF(" Instruction size (99th percentile): %d\n", sizes[k99th]);
- PrintF("\n");
-}
-
uint32_t Snapshot::ExtractContextOffset(const v8::StartupData* data,
uint32_t index) {
// Extract the offset of the context at a given index from the StartupData,
@@ -563,33 +229,31 @@ bool Snapshot::ExtractRehashability(const v8::StartupData* data) {
return GetHeaderValue(data, kRehashabilityOffset) != 0;
}
-Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
- uint32_t num_contexts = ExtractNumContexts(data);
- uint32_t startup_offset = StartupSnapshotOffset(num_contexts);
- CHECK_LT(startup_offset, data->raw_size);
- uint32_t builtin_offset = GetHeaderValue(data, kBuiltinOffsetOffset);
- CHECK_LT(builtin_offset, data->raw_size);
- CHECK_GT(builtin_offset, startup_offset);
- uint32_t startup_length = builtin_offset - startup_offset;
- const byte* startup_data =
- reinterpret_cast<const byte*>(data->data + startup_offset);
- return Vector<const byte>(startup_data, startup_length);
+namespace {
+Vector<const byte> ExtractData(const v8::StartupData* snapshot,
+ uint32_t start_offset, uint32_t end_offset) {
+ CHECK_LT(start_offset, end_offset);
+ CHECK_LT(end_offset, snapshot->raw_size);
+ uint32_t length = end_offset - start_offset;
+ const byte* data =
+ reinterpret_cast<const byte*>(snapshot->data + start_offset);
+ return Vector<const byte>(data, length);
}
+} // namespace
-Vector<const byte> Snapshot::ExtractBuiltinData(const v8::StartupData* data) {
+Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
DCHECK(SnapshotIsValid(data));
- uint32_t from_offset = GetHeaderValue(data, kBuiltinOffsetOffset);
- CHECK_LT(from_offset, data->raw_size);
+ uint32_t num_contexts = ExtractNumContexts(data);
+ return ExtractData(data, StartupSnapshotOffset(num_contexts),
+ GetHeaderValue(data, kReadOnlyOffsetOffset));
+}
- uint32_t to_offset = GetHeaderValue(data, ContextSnapshotOffsetOffset(0));
- CHECK_LT(to_offset, data->raw_size);
+Vector<const byte> Snapshot::ExtractReadOnlyData(const v8::StartupData* data) {
+ DCHECK(SnapshotIsValid(data));
- CHECK_GT(to_offset, from_offset);
- uint32_t length = to_offset - from_offset;
- const byte* builtin_data =
- reinterpret_cast<const byte*>(data->data + from_offset);
- return Vector<const byte>(builtin_data, length);
+ return ExtractData(data, GetHeaderValue(data, kReadOnlyOffsetOffset),
+ GetHeaderValue(data, ContextSnapshotOffsetOffset(0)));
}
Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
@@ -631,8 +295,7 @@ void Snapshot::CheckVersion(const v8::StartupData* data) {
}
}
-template <class AllocatorT>
-SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
+SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowHeapAllocation no_gc;
std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
@@ -653,7 +316,7 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
memset(data_, 0, padded_payload_offset);
// Set header values.
- SetMagicNumber(serializer->isolate());
+ SetMagicNumber();
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
@@ -666,10 +329,6 @@ SnapshotData::SnapshotData(const Serializer<AllocatorT>* serializer) {
static_cast<size_t>(payload->size()));
}
-// Explicit instantiation.
-template SnapshotData::SnapshotData(
- const Serializer<DefaultSerializerAllocator>* serializer);
-
std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
uint32_t size = GetHeaderValue(kNumReservationsOffset);
std::vector<SerializedData::Reservation> reservations(size);
@@ -689,27 +348,5 @@ Vector<const byte> SnapshotData::Payload() const {
return Vector<const byte>(payload, length);
}
-BuiltinSnapshotData::BuiltinSnapshotData(const BuiltinSerializer* serializer)
- : SnapshotData(serializer) {}
-
-Vector<const byte> BuiltinSnapshotData::Payload() const {
- Vector<const byte> payload = SnapshotData::Payload();
- const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
- DCHECK_EQ(data_ + size_, payload.start() + payload.size());
- DCHECK_GT(payload.size(), builtin_offsets_size);
- return Vector<const byte>(payload.start(),
- payload.size() - builtin_offsets_size);
-}
-
-Vector<const uint32_t> BuiltinSnapshotData::BuiltinOffsets() const {
- Vector<const byte> payload = SnapshotData::Payload();
- const int builtin_offsets_size = Builtins::builtin_count * kUInt32Size;
- DCHECK_EQ(data_ + size_, payload.start() + payload.size());
- DCHECK_GT(payload.size(), builtin_offsets_size);
- const uint32_t* data = reinterpret_cast<const uint32_t*>(
- payload.start() + payload.size() - builtin_offsets_size);
- return Vector<const uint32_t>(data, Builtins::builtin_count);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index fc68a85c5f..c5f56ebb5a 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -23,8 +23,7 @@ static base::LazyMutex external_startup_data_mutex = LAZY_MUTEX_INITIALIZER;
static v8::StartupData external_startup_blob = {nullptr, 0};
void SetSnapshotFromFile(StartupData* snapshot_blob) {
- base::LockGuard<base::Mutex> lock_guard(
- external_startup_data_mutex.Pointer());
+ base::MutexGuard lock_guard(external_startup_data_mutex.Pointer());
DCHECK(snapshot_blob);
DCHECK(snapshot_blob->data);
DCHECK_GT(snapshot_blob->raw_size, 0);
@@ -35,8 +34,7 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
const v8::StartupData* Snapshot::DefaultSnapshotBlob() {
- base::LockGuard<base::Mutex> lock_guard(
- external_startup_data_mutex.Pointer());
+ base::MutexGuard lock_guard(external_startup_data_mutex.Pointer());
return &external_startup_blob;
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 49e0f2298a..ffc6ad0973 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -32,6 +32,9 @@ void SnapshotByteSink::PutRaw(const byte* data, int number_of_bytes,
data_.insert(data_.end(), data, data + number_of_bytes);
}
+void SnapshotByteSink::Append(const SnapshotByteSink& other) {
+ data_.insert(data_.end(), other.data_.begin(), other.data_.end());
+}
int SnapshotByteSource::GetBlob(const byte** data) {
int size = GetInt();
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 8cf86526a3..04e575fed6 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -96,6 +96,8 @@ class SnapshotByteSink {
void PutInt(uintptr_t integer, const char* description);
void PutRaw(const byte* data, int number_of_bytes, const char* description);
+
+ void Append(const SnapshotByteSink& other);
int Position() const { return static_cast<int>(data_.size()); }
const std::vector<byte>* data() const { return &data_; }
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 9edc12c1ce..01da513106 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -15,7 +15,6 @@ namespace internal {
// Forward declarations.
class Isolate;
-class BuiltinSerializer;
class PartialSerializer;
class StartupSerializer;
@@ -23,8 +22,7 @@ class StartupSerializer;
class SnapshotData : public SerializedData {
public:
// Used when producing.
- template <class AllocatorT>
- explicit SnapshotData(const Serializer<AllocatorT>* serializer);
+ explicit SnapshotData(const Serializer* serializer);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)
@@ -52,107 +50,6 @@ class SnapshotData : public SerializedData {
static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
};
-class BuiltinSnapshotData final : public SnapshotData {
- public:
- // Used when producing.
- // This simply forwards to the SnapshotData constructor.
- // The BuiltinSerializer appends the builtin offset table to the payload.
- explicit BuiltinSnapshotData(const BuiltinSerializer* serializer);
-
- // Used when consuming.
- explicit BuiltinSnapshotData(const Vector<const byte> snapshot)
- : SnapshotData(snapshot) {
- }
-
- // Returns the serialized payload without the builtin offsets table.
- Vector<const byte> Payload() const override;
-
- // Returns only the builtin offsets table.
- Vector<const uint32_t> BuiltinOffsets() const;
-
- private:
- // In addition to the format specified in SnapshotData, BuiltinsSnapshotData
- // includes a list of builtin at the end of the serialized payload:
- //
- // ...
- // ... serialized payload
- // ... list of builtins offsets
-};
-
-class EmbeddedData final {
- public:
- static EmbeddedData FromIsolate(Isolate* isolate);
- static EmbeddedData FromBlob();
-
- const uint8_t* data() const { return data_; }
- uint32_t size() const { return size_; }
-
- void Dispose() { delete[] data_; }
-
- Address InstructionStartOfBuiltin(int i) const;
- uint32_t InstructionSizeOfBuiltin(int i) const;
-
- bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
-
- // Padded with kCodeAlignment.
- uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
- return PadAndAlign(InstructionSizeOfBuiltin(i));
- }
-
- size_t CreateHash() const;
- size_t Hash() const {
- return *reinterpret_cast<const size_t*>(data_ + HashOffset());
- }
-
- struct Metadata {
- // Blob layout information.
- uint32_t instructions_offset;
- uint32_t instructions_length;
- };
- STATIC_ASSERT(offsetof(Metadata, instructions_offset) == 0);
- STATIC_ASSERT(offsetof(Metadata, instructions_length) == kUInt32Size);
- STATIC_ASSERT(sizeof(Metadata) == kUInt32Size + kUInt32Size);
-
- // The layout of the blob is as follows:
- //
- // [0] hash of the remaining blob
- // [1] metadata of instruction stream 0
- // ... metadata
- // ... instruction streams
-
- static constexpr uint32_t kTableSize = Builtins::builtin_count;
- static constexpr uint32_t HashOffset() { return 0; }
- static constexpr uint32_t HashSize() { return kSizetSize; }
- static constexpr uint32_t MetadataOffset() {
- return HashOffset() + HashSize();
- }
- static constexpr uint32_t MetadataSize() {
- return sizeof(struct Metadata) * kTableSize;
- }
- static constexpr uint32_t RawDataOffset() {
- return PadAndAlign(MetadataOffset() + MetadataSize());
- }
-
- private:
- EmbeddedData(const uint8_t* data, uint32_t size) : data_(data), size_(size) {}
-
- const Metadata* Metadata() const {
- return reinterpret_cast<const struct Metadata*>(data_ + MetadataOffset());
- }
- const uint8_t* RawData() const { return data_ + RawDataOffset(); }
-
- static constexpr int PadAndAlign(int size) {
- // Ensure we have at least one byte trailing the actual builtin
- // instructions which we can later fill with int3.
- return RoundUp<kCodeAlignment>(size + 1);
- }
-
- void PrintStatistics() const;
-
- const uint8_t* data_;
- uint32_t size_;
-};
-
class Snapshot : public AllStatic {
public:
// ---------------- Deserialization ----------------
@@ -167,14 +64,6 @@ class Snapshot : public AllStatic {
size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
- // Deserializes a single given builtin code object. Intended to be called at
- // runtime after the isolate (and the builtins table) has been fully
- // initialized.
- static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
- static void EnsureAllBuiltinsAreDeserialized(Isolate* isolate);
- static Code* EnsureBuiltinIsDeserialized(Isolate* isolate,
- Handle<SharedFunctionInfo> shared);
-
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
@@ -189,7 +78,7 @@ class Snapshot : public AllStatic {
static v8::StartupData CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
- const BuiltinSnapshotData* builtin_snapshot,
+ const SnapshotData* read_only_snapshot,
const std::vector<SnapshotData*>& context_snapshots,
bool can_be_rehashed);
@@ -203,7 +92,7 @@ class Snapshot : public AllStatic {
uint32_t index);
static bool ExtractRehashability(const v8::StartupData* data);
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
- static Vector<const byte> ExtractBuiltinData(const v8::StartupData* data);
+ static Vector<const byte> ExtractReadOnlyData(const v8::StartupData* data);
static Vector<const byte> ExtractContextData(const v8::StartupData* data,
uint32_t index);
@@ -223,13 +112,13 @@ class Snapshot : public AllStatic {
// [2] checksum part A
// [3] checksum part B
// [4] (128 bytes) version string
- // [5] offset to builtins
+ // [5] offset to readonly
// [6] offset to context 0
// [7] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
- // ... builtin snapshot data
+ // ... read-only snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
@@ -244,10 +133,10 @@ class Snapshot : public AllStatic {
static const uint32_t kVersionStringOffset =
kChecksumPartBOffset + kUInt32Size;
static const uint32_t kVersionStringLength = 64;
- static const uint32_t kBuiltinOffsetOffset =
+ static const uint32_t kReadOnlyOffsetOffset =
kVersionStringOffset + kVersionStringLength;
static const uint32_t kFirstContextOffsetOffset =
- kBuiltinOffsetOffset + kUInt32Size;
+ kReadOnlyOffsetOffset + kUInt32Size;
static Vector<const byte> ChecksummedContent(const v8::StartupData* data) {
const uint32_t kChecksumStart = kVersionStringOffset;
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index e9c23bb907..582c105ccf 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -6,10 +6,8 @@
#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
-#include "src/code-tracer.h"
#include "src/heap/heap-inl.h"
-#include "src/snapshot/builtin-deserializer.h"
+#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -18,10 +16,11 @@ namespace internal {
void StartupDeserializer::DeserializeInto(Isolate* isolate) {
Initialize(isolate);
- BuiltinDeserializer builtin_deserializer(isolate, builtin_data_);
+ ReadOnlyDeserializer read_only_deserializer(read_only_data_);
+ read_only_deserializer.SetRehashability(can_rehash());
+ read_only_deserializer.DeserializeInto(isolate);
- if (!DefaultDeserializerAllocator::ReserveSpace(this,
- &builtin_deserializer)) {
+ if (!allocator()->ReserveSpace()) {
V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
}
@@ -37,21 +36,16 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this,
- VISIT_ONLY_STRONG_FOR_SERIALIZATION);
- isolate->heap()->RepairFreeListsAfterDeserialization();
+ isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
+ Iterate(isolate, this);
isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
DeserializeDeferredObjects();
RestoreExternalReferenceRedirectors(accessor_infos());
RestoreExternalReferenceRedirectors(call_handler_infos());
- // Deserialize eager builtins from the builtin snapshot. Note that deferred
- // objects must have been deserialized prior to this.
- builtin_deserializer.DeserializeEagerBuiltins();
-
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
- FlushICacheForNewIsolate();
+ FlushICache();
}
isolate->heap()->set_native_contexts_list(
@@ -63,21 +57,23 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
ReadOnlyRoots(isolate).undefined_value());
}
- // Issue code events for newly deserialized code objects.
- LOG_CODE_EVENT(isolate, LogCodeObjects());
- LOG_CODE_EVENT(isolate, LogCompiledFunctions());
isolate->builtins()->MarkInitialized();
- // If needed, print the dissassembly of deserialized code objects.
- // Needs to be called after the builtins are marked as initialized, in order
- // to display the builtin names.
- PrintDisassembledCodeObjects();
+ LogNewMapEvents();
+
+ if (FLAG_rehash_snapshot && can_rehash()) {
+ isolate->heap()->InitializeHashSeed();
+ read_only_deserializer.RehashHeap();
+ Rehash();
+ }
+}
- if (FLAG_rehash_snapshot && can_rehash()) RehashHeap();
+void StartupDeserializer::LogNewMapEvents() {
+ if (FLAG_trace_maps) LOG(isolate_, LogAllMaps());
}
-void StartupDeserializer::FlushICacheForNewIsolate() {
+void StartupDeserializer::FlushICache() {
DCHECK(!deserializing_user_code());
// The entire isolate is newly deserialized. Simply flush all code pages.
for (Page* p : *isolate()->heap()->code_space()) {
@@ -85,37 +81,5 @@ void StartupDeserializer::FlushICacheForNewIsolate() {
}
}
-void StartupDeserializer::PrintDisassembledCodeObjects() {
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- Heap* heap = isolate()->heap();
- HeapIterator iterator(heap);
- DisallowHeapAllocation no_gc;
-
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
-
- for (HeapObject* obj = iterator.next(); obj != nullptr;
- obj = iterator.next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- // Printing of builtins and bytecode handlers is handled during their
- // deserialization.
- if (code->kind() != Code::BUILTIN &&
- code->kind() != Code::BYTECODE_HANDLER) {
- code->PrintBuiltinCode(isolate(), nullptr);
- }
- }
- }
- }
-#endif
-}
-
-void StartupDeserializer::RehashHeap() {
- DCHECK(FLAG_rehash_snapshot && can_rehash());
- isolate()->heap()->InitializeHashSeed();
- Rehash();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 6e1b5db332..cfe89f01e2 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -12,23 +12,20 @@ namespace v8 {
namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
-class StartupDeserializer final : public Deserializer<> {
+class StartupDeserializer final : public Deserializer {
public:
StartupDeserializer(const SnapshotData* startup_data,
- const BuiltinSnapshotData* builtin_data)
- : Deserializer(startup_data, false), builtin_data_(builtin_data) {}
+ const SnapshotData* read_only_data)
+ : Deserializer(startup_data, false), read_only_data_(read_only_data) {}
// Deserialize the snapshot into an empty heap.
void DeserializeInto(Isolate* isolate);
private:
- void FlushICacheForNewIsolate();
- void PrintDisassembledCodeObjects();
+ void FlushICache();
+ void LogNewMapEvents();
- // Rehash after deserializing an isolate.
- void RehashHeap();
-
- const BuiltinSnapshotData* builtin_data_;
+ const SnapshotData* read_only_data_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 146d413de8..2e64423ea5 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -6,15 +6,21 @@
#include "src/api.h"
#include "src/code-tracer.h"
+#include "src/contexts.h"
#include "src/global-handles.h"
#include "src/objects-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/slots.h"
+#include "src/snapshot/read-only-serializer.h"
#include "src/v8threads.h"
namespace v8 {
namespace internal {
-StartupSerializer::StartupSerializer(Isolate* isolate)
- : Serializer(isolate), can_be_rehashed_(true) {
+StartupSerializer::StartupSerializer(Isolate* isolate,
+ ReadOnlySerializer* read_only_serializer)
+ : RootsSerializer(isolate, RootIndex::kFirstStrongRoot),
+ read_only_serializer_(read_only_serializer) {
InitializeCodeAddressMap();
}
@@ -24,26 +30,39 @@ StartupSerializer::~StartupSerializer() {
OutputStatistics("StartupSerializer");
}
-void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+namespace {
+
+// Due to how we currently create the embedded blob, we may encounter both
+// off-heap trampolines and old, outdated full Code objects during
+// serialization. This ensures that we only serialize the canonical version of
+// each builtin.
+// See also CreateOffHeapTrampolines().
+HeapObject MaybeCanonicalizeBuiltin(Isolate* isolate, HeapObject obj) {
+ if (!obj->IsCode()) return obj;
+
+ const int builtin_index = Code::cast(obj)->builtin_index();
+ if (!Builtins::IsBuiltinId(builtin_index)) return obj;
+
+ return isolate->builtins()->builtin(builtin_index);
+}
+
+} // namespace
+
+void StartupSerializer::SerializeObject(HeapObject obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
- if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
- return;
- }
- if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
-
- RootIndex root_index;
- // We can only encode roots as such if it has already been serialized.
- // That applies to root indices below the wave front.
- if (root_index_map()->Lookup(obj, &root_index)) {
- if (root_has_been_serialized(root_index)) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
- }
+ // TODO(jgruber): Remove canonicalization once off-heap trampoline creation
+ // moves to Isolate::Init().
+ obj = MaybeCanonicalizeBuiltin(isolate(), obj);
+ if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+ if (IsRootAndHasBeenSerialized(obj) &&
+ SerializeRoot(obj, how_to_code, where_to_point, skip))
+ return;
+ if (SerializeUsingReadOnlyObjectCache(&sink_, obj, how_to_code,
+ where_to_point, skip))
+ return;
if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
FlushSkip(skip);
@@ -54,12 +73,12 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
if (use_simulator && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
- AccessorInfo* info = AccessorInfo::cast(obj);
+ AccessorInfo info = AccessorInfo::cast(obj);
Address original_address = Foreign::cast(info->getter())->foreign_address();
Foreign::cast(info->js_getter())->set_foreign_address(original_address);
accessor_infos_.push_back(info);
} else if (use_simulator && obj->IsCallHandlerInfo()) {
- CallHandlerInfo* info = CallHandlerInfo::cast(obj);
+ CallHandlerInfo info = CallHandlerInfo::cast(obj);
Address original_address =
Foreign::cast(info->callback())->foreign_address();
Foreign::cast(info->js_callback())->set_foreign_address(original_address);
@@ -69,7 +88,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
ReadOnlyRoots(isolate()).uninitialized_symbol());
} else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
if (!shared->IsSubjectToDebugging() && shared->HasUncompiledData()) {
shared->uncompiled_data()->set_inferred_name(
ReadOnlyRoots(isolate()).empty_string());
@@ -79,6 +98,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
+ DCHECK(!isolate()->heap()->read_only_space()->Contains(obj));
ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
where_to_point);
object_serializer.Serialize();
@@ -88,29 +108,14 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// This comes right after serialization of the partial snapshot, where we
// add entries to the partial snapshot cache of the startup snapshot. Add
// one entry with 'undefined' to terminate the partial snapshot cache.
- Object* undefined = ReadOnlyRoots(isolate()).undefined_value();
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &undefined);
+ Object undefined = ReadOnlyRoots(isolate()).undefined_value();
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ FullObjectSlot(&undefined));
isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
Pad();
}
-int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- int index;
- if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) {
- // This object is not part of the partial snapshot cache yet. Add it to the
- // startup snapshot so we can refer to it via partial snapshot index from
- // the partial snapshot.
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- reinterpret_cast<Object**>(&heap_object));
- }
- return index;
-}
-
-void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
- sink_.Put(kSynchronize, "Synchronize");
-}
-
void StartupSerializer::SerializeStrongReferences() {
Isolate* isolate = this->isolate();
// No active threads.
@@ -128,63 +133,43 @@ void StartupSerializer::SerializeStrongReferences() {
isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
-void StartupSerializer::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
- if (start == isolate()->heap()->roots_array_start()) {
- // Serializing the root list needs special handling:
- // - Only root list elements that have been fully serialized can be
- // referenced using kRootArray bytecodes.
- for (Object** current = start; current < end; current++) {
- SerializeRootObject(*current);
- size_t root_index = static_cast<size_t>(current - start);
- root_has_been_serialized_.set(root_index);
- }
- } else {
- Serializer::VisitRootPointers(root, description, start, end);
+SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
+ std::vector<Context>* contexts)
+ : isolate_(isolate) {
+ AddToSet(isolate->heap()->serialized_objects());
+ for (auto const& context : *contexts) {
+ AddToSet(context->serialized_objects());
}
}
-void StartupSerializer::CheckRehashability(HeapObject* obj) {
- if (!can_be_rehashed_) return;
- if (!obj->NeedsRehashing()) return;
- if (obj->CanBeRehashed()) return;
- can_be_rehashed_ = false;
+bool StartupSerializer::SerializeUsingReadOnlyObjectCache(
+ SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ return read_only_serializer_->SerializeUsingReadOnlyObjectCache(
+ sink, obj, how_to_code, where_to_point, skip);
}
-bool StartupSerializer::MustBeDeferred(HeapObject* object) {
- if (root_has_been_serialized(RootIndex::kFreeSpaceMap) &&
- root_has_been_serialized(RootIndex::kOnePointerFillerMap) &&
- root_has_been_serialized(RootIndex::kTwoPointerFillerMap)) {
- // All required root objects are serialized, so any aligned objects can
- // be saved without problems.
- return false;
- }
- // Just defer everything except of Map objects until all required roots are
- // serialized. Some objects may have special alignment requirements, that may
- // not be fulfilled during deserialization until few first root objects are
- // serialized. But we must serialize Map objects since deserializer checks
- // that these root objects are indeed Maps.
- return !object->IsMap();
-}
+void StartupSerializer::SerializeUsingPartialSnapshotCache(
+ SnapshotByteSink* sink, HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ FlushSkip(sink, skip);
-SerializedHandleChecker::SerializedHandleChecker(
- Isolate* isolate, std::vector<Context*>* contexts)
- : isolate_(isolate) {
- AddToSet(isolate->heap()->serialized_objects());
- for (auto const& context : *contexts) {
- AddToSet(context->serialized_objects());
- }
+ int cache_index = SerializeInObjectCache(obj);
+ sink->Put(kPartialSnapshotCache + how_to_code + where_to_point,
+ "PartialSnapshotCache");
+ sink->PutInt(cache_index, "partial_snapshot_cache_index");
}
-void SerializedHandleChecker::AddToSet(FixedArray* serialized) {
+void SerializedHandleChecker::AddToSet(FixedArray serialized) {
int length = serialized->length();
for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
}
void SerializedHandleChecker::VisitRootPointers(Root root,
const char* description,
- Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ for (FullObjectSlot p = start; p < end; ++p) {
if (serialized_.find(*p) != serialized_.end()) continue;
PrintF("%s handle not serialized: ",
root == Root::kGlobalHandles ? "global" : "eternal");
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 0b2065c3d0..31c3081103 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -5,16 +5,20 @@
#ifndef V8_SNAPSHOT_STARTUP_SERIALIZER_H_
#define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
-#include <bitset>
-#include "include/v8.h"
-#include "src/snapshot/serializer.h"
+#include <unordered_set>
+
+#include "src/snapshot/roots-serializer.h"
namespace v8 {
namespace internal {
-class StartupSerializer : public Serializer<> {
+class HeapObject;
+class SnapshotByteSink;
+class ReadOnlySerializer;
+
+class StartupSerializer : public RootsSerializer {
public:
- explicit StartupSerializer(Isolate* isolate);
+ StartupSerializer(Isolate* isolate, ReadOnlySerializer* read_only_serializer);
~StartupSerializer() override;
// Serialize the current state of the heap. The order is:
@@ -25,73 +29,44 @@ class StartupSerializer : public Serializer<> {
void SerializeStrongReferences();
void SerializeWeakReferencesAndDeferred();
- int PartialSnapshotCacheIndex(HeapObject* o);
-
- bool can_be_rehashed() const { return can_be_rehashed_; }
- bool root_has_been_serialized(RootIndex root_index) const {
- return root_has_been_serialized_.test(static_cast<size_t>(root_index));
- }
+ // If |obj| can be serialized in the read-only snapshot then add it to the
+ // read-only object cache if not already present and emits a
+ // ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
+ // successful.
+ bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink, HeapObject obj,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
+ // Adds |obj| to the partial snapshot object cache if not already present and
+ // emits a PartialSnapshotCache bytecode into |sink|.
+ void SerializeUsingPartialSnapshotCache(SnapshotByteSink* sink,
+ HeapObject obj, HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip);
private:
- class PartialCacheIndexMap {
- public:
- PartialCacheIndexMap() : map_(), next_index_(0) {}
-
- // Lookup object in the map. Return its index if found, or create
- // a new entry with new_index as value, and return kInvalidIndex.
- bool LookupOrInsert(HeapObject* obj, int* index_out) {
- Maybe<uint32_t> maybe_index = map_.Get(obj);
- if (maybe_index.IsJust()) {
- *index_out = maybe_index.FromJust();
- return true;
- }
- *index_out = next_index_;
- map_.Set(obj, next_index_++);
- return false;
- }
-
- private:
- DisallowHeapAllocation no_allocation_;
- HeapObjectToIndexHashMap map_;
- int next_index_;
-
- DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
- };
-
- // The StartupSerializer has to serialize the root array, which is slightly
- // different.
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ void SerializeObject(HeapObject o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
- void Synchronize(VisitorSynchronization::SyncTag tag) override;
- bool MustBeDeferred(HeapObject* object) override;
-
- void CheckRehashability(HeapObject* obj);
- std::bitset<RootsTable::kEntriesCount> root_has_been_serialized_;
- PartialCacheIndexMap partial_cache_index_map_;
- std::vector<AccessorInfo*> accessor_infos_;
- std::vector<CallHandlerInfo*> call_handler_infos_;
- // Indicates whether we only serialized hash tables that we can rehash.
- // TODO(yangguo): generalize rehashing, and remove this flag.
- bool can_be_rehashed_;
+ ReadOnlySerializer* read_only_serializer_;
+ std::vector<AccessorInfo> accessor_infos_;
+ std::vector<CallHandlerInfo> call_handler_infos_;
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
class SerializedHandleChecker : public RootVisitor {
public:
- SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ SerializedHandleChecker(Isolate* isolate, std::vector<Context>* contexts);
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
bool CheckGlobalAndEternalHandles();
private:
- void AddToSet(FixedArray* serialized);
+ void AddToSet(FixedArray serialized);
Isolate* isolate_;
- std::unordered_set<Object*> serialized_;
+ std::unordered_set<Object, Object::Hasher> serialized_;
bool ok_ = true;
};
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index 6ae24533df..22ab4dfe11 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -106,7 +106,7 @@ void DecodeEntry(Vector<const byte> bytes, int* index,
entry->source_position = DecodeInt<int64_t>(bytes, index);
}
-Vector<const byte> VectorFromByteArray(ByteArray* byte_array) {
+Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
return Vector<const byte>(byte_array->GetDataStartAddress(),
byte_array->length());
}
@@ -164,7 +164,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
- SourcePositionTableIterator it(*table);
+ SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll);
CheckTableEquals(raw_entries_, it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
@@ -181,7 +181,8 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
- SourcePositionTableIterator it(table.as_vector());
+ SourcePositionTableIterator it(table.as_vector(),
+ SourcePositionTableIterator::kAll);
CheckTableEquals(raw_entries_, it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
@@ -189,25 +190,30 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
return table;
}
-SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
- : raw_table_(VectorFromByteArray(byte_array)) {
+SourcePositionTableIterator::SourcePositionTableIterator(ByteArray byte_array,
+ IterationFilter filter)
+ : raw_table_(VectorFromByteArray(byte_array)), filter_(filter) {
Advance();
}
SourcePositionTableIterator::SourcePositionTableIterator(
- Handle<ByteArray> byte_array)
- : table_(byte_array) {
+ Handle<ByteArray> byte_array, IterationFilter filter)
+ : table_(byte_array), filter_(filter) {
Advance();
+#ifdef DEBUG
// We can enable allocation because we keep the table in a handle.
no_gc.Release();
+#endif // DEBUG
}
SourcePositionTableIterator::SourcePositionTableIterator(
- Vector<const byte> bytes)
- : raw_table_(bytes) {
+ Vector<const byte> bytes, IterationFilter filter)
+ : raw_table_(bytes), filter_(filter) {
Advance();
+#ifdef DEBUG
// We can enable allocation because the underlying vector does not move.
no_gc.Release();
+#endif // DEBUG
}
void SourcePositionTableIterator::Advance() {
@@ -215,12 +221,19 @@ void SourcePositionTableIterator::Advance() {
table_.is_null() ? raw_table_ : VectorFromByteArray(*table_);
DCHECK(!done());
DCHECK(index_ >= 0 && index_ <= bytes.length());
- if (index_ >= bytes.length()) {
- index_ = kDone;
- } else {
- PositionTableEntry tmp;
- DecodeEntry(bytes, &index_, &tmp);
- AddAndSetEntry(current_, tmp);
+ bool filter_satisfied = false;
+ while (!done() && !filter_satisfied) {
+ if (index_ >= bytes.length()) {
+ index_ = kDone;
+ } else {
+ PositionTableEntry tmp;
+ DecodeEntry(bytes, &index_, &tmp);
+ AddAndSetEntry(current_, tmp);
+ SourcePosition p = source_position();
+ filter_satisfied = (filter_ == kAll) ||
+ (filter_ == kJavaScriptOnly && p.IsJavaScript()) ||
+ (filter_ == kExternalOnly && p.IsExternal());
+ }
}
}
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 60853bc938..8f676dc0f3 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -59,10 +59,13 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
class V8_EXPORT_PRIVATE SourcePositionTableIterator {
public:
+ enum IterationFilter { kJavaScriptOnly = 0, kExternalOnly = 1, kAll = 2 };
+
// Used for saving/restoring the iterator.
- struct IndexAndPosition {
+ struct IndexAndPositionState {
int index_;
PositionTableEntry position_;
+ IterationFilter filter_;
};
// We expose three flavours of the iterator, depending on the argument passed
@@ -70,16 +73,19 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
// Handlified iterator allows allocation, but it needs a handle (and thus
// a handle scope). This is the preferred version.
- explicit SourcePositionTableIterator(Handle<ByteArray> byte_array);
+ explicit SourcePositionTableIterator(
+ Handle<ByteArray> byte_array, IterationFilter filter = kJavaScriptOnly);
// Non-handlified iterator does not need a handle scope, but it disallows
// allocation during its lifetime. This is useful if there is no handle
// scope around.
- explicit SourcePositionTableIterator(ByteArray* byte_array);
+ explicit SourcePositionTableIterator(
+ ByteArray byte_array, IterationFilter filter = kJavaScriptOnly);
// Handle-safe iterator based on an a vector located outside the garbage
// collected heap, allows allocation during its lifetime.
- explicit SourcePositionTableIterator(Vector<const byte> bytes);
+ explicit SourcePositionTableIterator(
+ Vector<const byte> bytes, IterationFilter filter = kJavaScriptOnly);
void Advance();
@@ -97,11 +103,12 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
}
bool done() const { return index_ == kDone; }
- IndexAndPosition GetState() const { return {index_, current_}; }
+ IndexAndPositionState GetState() const { return {index_, current_, filter_}; }
- void RestoreState(const IndexAndPosition& saved_state) {
+ void RestoreState(const IndexAndPositionState& saved_state) {
index_ = saved_state.index_;
current_ = saved_state.position_;
+ filter_ = saved_state.filter_;
}
private:
@@ -111,7 +118,8 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
Handle<ByteArray> table_;
int index_ = 0;
PositionTableEntry current_;
- DisallowHeapAllocation no_gc;
+ IterationFilter filter_;
+ DISALLOW_HEAP_ALLOCATION(no_gc);
};
} // namespace internal
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index 2df5380a24..3d7ac98462 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -37,7 +37,12 @@ std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
} else {
out << "<not inlined:";
}
- out << pos.ScriptOffset() << ">";
+
+ if (pos.IsExternal()) {
+ out << pos.ExternalLine() << ", " << pos.ExternalFileId() << ">";
+ } else {
+ out << pos.ScriptOffset() << ">";
+ }
return out;
}
@@ -76,16 +81,16 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
}
void SourcePosition::Print(std::ostream& out,
- SharedFunctionInfo* function) const {
+ SharedFunctionInfo function) const {
Script::PositionInfo pos;
- Object* source_name = nullptr;
+ Object source_name;
if (function->script()->IsScript()) {
- Script* script = Script::cast(function->script());
+ Script script = Script::cast(function->script());
source_name = script->name();
script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
}
out << "<";
- if (source_name != nullptr && source_name->IsString()) {
+ if (source_name->IsString()) {
out << String::cast(source_name)
->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.get();
@@ -96,15 +101,21 @@ void SourcePosition::Print(std::ostream& out,
}
void SourcePosition::PrintJson(std::ostream& out) const {
- out << "{ \"scriptOffset\" : " << ScriptOffset() << ", "
- << " \"inliningId\" : " << InliningId() << "}";
+ if (IsExternal()) {
+ out << "{ \"line\" : " << ExternalLine() << ", "
+ << " \"fileId\" : " << ExternalFileId() << ", "
+ << " \"inliningId\" : " << InliningId() << "}";
+ } else {
+ out << "{ \"scriptOffset\" : " << ScriptOffset() << ", "
+ << " \"inliningId\" : " << InliningId() << "}";
+ }
}
-void SourcePosition::Print(std::ostream& out, Code* code) const {
- DeoptimizationData* deopt_data =
+void SourcePosition::Print(std::ostream& out, Code code) const {
+ DeoptimizationData deopt_data =
DeoptimizationData::cast(code->deoptimization_data());
if (!isInlined()) {
- SharedFunctionInfo* function(
+ SharedFunctionInfo function(
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
Print(out, function);
} else {
@@ -112,7 +123,7 @@ void SourcePosition::Print(std::ostream& out, Code* code) const {
if (inl.inlined_function_id == -1) {
out << *this;
} else {
- SharedFunctionInfo* function =
+ SharedFunctionInfo function =
deopt_data->GetInlinedFunction(inl.inlined_function_id);
Print(out, function);
}
@@ -124,6 +135,7 @@ void SourcePosition::Print(std::ostream& out, Code* code) const {
SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
Handle<SharedFunctionInfo> f)
: position(pos),
+ shared(f),
script(f.is_null() || !f->script()->IsScript()
? Handle<Script>::null()
: handle(Script::cast(f->script()), f->GetIsolate())) {
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index 4931d1d7ad..e62c7e5ddf 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -22,9 +22,23 @@ class SharedFunctionInfo;
struct SourcePositionInfo;
// SourcePosition stores
-// - script_offset (31 bit non-negative int or kNoSourcePosition)
+// - is_external (1 bit true/false)
+//
+// - if is_external is true:
+// - external_line (20 bits, non-negative int)
+// - external_file_id (10 bits, non-negative int)
+//
+// - if is_external is false:
+// - script_offset (30 bit non-negative int or kNoSourcePosition)
+//
+// - In both cases, there is an inlining_id.
// - inlining_id (16 bit non-negative int or kNotInlined).
//
+// An "external" SourcePosition is one given by a file_id and a line,
+// suitable for embedding references to .cc or .tq files.
+// Otherwise, a SourcePosition contains an offset into a JavaScript
+// file.
+//
// A defined inlining_id refers to positions in
// OptimizedCompilationInfo::inlined_functions or
// DeoptimizationData::InliningPositions, depending on the compilation stage.
@@ -32,28 +46,70 @@ class SourcePosition final {
public:
explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
: value_(0) {
+ SetIsExternal(false);
SetScriptOffset(script_offset);
SetInliningId(inlining_id);
}
+ // External SourcePositions should use the following method to construct
+ // SourcePositions to avoid confusion.
+ static SourcePosition External(int line, int file_id) {
+ return SourcePosition(line, file_id, kNotInlined);
+ }
+
static SourcePosition Unknown() { return SourcePosition(kNoSourcePosition); }
bool IsKnown() const {
+ if (IsExternal()) return true;
return ScriptOffset() != kNoSourcePosition || InliningId() != kNotInlined;
}
- bool isInlined() const { return InliningId() != kNotInlined; }
+ bool isInlined() const {
+ if (IsExternal()) return false;
+ return InliningId() != kNotInlined;
+ }
+
+ bool IsExternal() const { return IsExternalField::decode(value_); }
+ bool IsJavaScript() const { return !IsExternal(); }
+
+ int ExternalLine() const {
+ DCHECK(IsExternal());
+ return ExternalLineField::decode(value_);
+ }
+
+ int ExternalFileId() const {
+ DCHECK(IsExternal());
+ return ExternalFileIdField::decode(value_);
+ }
// Assumes that the code object is optimized
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
std::vector<SourcePositionInfo> InliningStack(
OptimizedCompilationInfo* cinfo) const;
- void Print(std::ostream& out, Code* code) const;
+ void Print(std::ostream& out, Code code) const;
void PrintJson(std::ostream& out) const;
- int ScriptOffset() const { return ScriptOffsetField::decode(value_) - 1; }
+ int ScriptOffset() const {
+ DCHECK(IsJavaScript());
+ return ScriptOffsetField::decode(value_) - 1;
+ }
int InliningId() const { return InliningIdField::decode(value_) - 1; }
+ void SetIsExternal(bool external) {
+ value_ = IsExternalField::update(value_, external);
+ }
+ void SetExternalLine(int line) {
+ DCHECK(IsExternal());
+ DCHECK(line <= ExternalLineField::kMax - 1);
+ value_ = ExternalLineField::update(value_, line);
+ }
+ void SetExternalFileId(int file_id) {
+ DCHECK(IsExternal());
+ DCHECK(file_id <= ExternalFileIdField::kMax - 1);
+ value_ = ExternalFileIdField::update(value_, file_id);
+ }
+
void SetScriptOffset(int script_offset) {
+ DCHECK(IsJavaScript());
DCHECK(script_offset <= ScriptOffsetField::kMax - 2);
DCHECK_GE(script_offset, kNoSourcePosition);
value_ = ScriptOffsetField::update(value_, script_offset + 1);
@@ -76,12 +132,29 @@ class SourcePosition final {
}
private:
- void Print(std::ostream& out, SharedFunctionInfo* function) const;
+ // Used by SourcePosition::External(line, file_id).
+ SourcePosition(int line, int file_id, int inlining_id) : value_(0) {
+ SetIsExternal(true);
+ SetExternalLine(line);
+ SetExternalFileId(file_id);
+ SetInliningId(inlining_id);
+ }
+
+ void Print(std::ostream& out, SharedFunctionInfo function) const;
+
+ typedef BitField64<bool, 0, 1> IsExternalField;
+
+ // The two below are only used if IsExternal() is true.
+ typedef BitField64<int, 1, 20> ExternalLineField;
+ typedef BitField64<int, 21, 10> ExternalFileIdField;
+
+ // ScriptOffsetField is only used if IsExternal() is false.
+ typedef BitField64<int, 1, 30> ScriptOffsetField;
// InliningId is in the high bits for better compression in
// SourcePositionTable.
- typedef BitField64<int, 0, 31> ScriptOffsetField;
typedef BitField64<int, 31, 16> InliningIdField;
+
// Leaving the highest bit untouched to allow for signed conversion.
uint64_t value_;
};
@@ -106,6 +179,7 @@ struct SourcePositionInfo {
SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f);
SourcePosition position;
+ Handle<SharedFunctionInfo> shared;
Handle<Script> script;
int line = -1;
int column = -1;
diff --git a/deps/v8/src/string-builder-inl.h b/deps/v8/src/string-builder-inl.h
index dccdb3d01a..0c3f83b2d4 100644
--- a/deps/v8/src/string-builder-inl.h
+++ b/deps/v8/src/string-builder-inl.h
@@ -27,12 +27,12 @@ typedef BitField<int, kStringBuilderConcatHelperLengthBits,
StringBuilderSubstringPosition;
template <typename sinkchar>
-void StringBuilderConcatHelper(String* special, sinkchar* sink,
- FixedArray* fixed_array, int array_length);
+void StringBuilderConcatHelper(String special, sinkchar* sink,
+ FixedArray fixed_array, int array_length);
// Returns the result length of the concatenation.
// On illegal argument, -1 is returned.
-int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
+int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
int array_length, bool* one_byte);
class FixedArrayBuilder {
@@ -43,8 +43,8 @@ class FixedArrayBuilder {
bool HasCapacity(int elements);
void EnsureCapacity(Isolate* isolate, int elements);
- void Add(Object* value);
- void Add(Smi* value);
+ void Add(Object value);
+ void Add(Smi value);
Handle<FixedArray> array() { return array_; }
@@ -103,7 +103,7 @@ class ReplacementStringBuilder {
}
private:
- void AddElement(Object* element);
+ void AddElement(Object element);
Heap* heap_;
FixedArrayBuilder array_builder_;
@@ -182,14 +182,15 @@ class IncrementalStringBuilder {
template <typename DestChar>
class NoExtend {
public:
- explicit NoExtend(Handle<String> string, int offset) {
+ NoExtend(Handle<String> string, int offset,
+ const DisallowHeapAllocation& no_gc) {
DCHECK(string->IsSeqOneByteString() || string->IsSeqTwoByteString());
if (sizeof(DestChar) == 1) {
start_ = reinterpret_cast<DestChar*>(
- Handle<SeqOneByteString>::cast(string)->GetChars() + offset);
+ Handle<SeqOneByteString>::cast(string)->GetChars(no_gc) + offset);
} else {
start_ = reinterpret_cast<DestChar*>(
- Handle<SeqTwoByteString>::cast(string)->GetChars() + offset);
+ Handle<SeqTwoByteString>::cast(string)->GetChars(no_gc) + offset);
}
cursor_ = start_;
}
@@ -205,7 +206,7 @@ class IncrementalStringBuilder {
private:
DestChar* start_;
DestChar* cursor_;
- DisallowHeapAllocation no_gc_;
+ DISALLOW_HEAP_ALLOCATION(no_gc_);
};
template <typename DestChar>
@@ -231,8 +232,10 @@ class IncrementalStringBuilder {
template <typename DestChar>
class NoExtendBuilder : public NoExtend<DestChar> {
public:
- NoExtendBuilder(IncrementalStringBuilder* builder, int required_length)
- : NoExtend<DestChar>(builder->current_part(), builder->current_index_),
+ NoExtendBuilder(IncrementalStringBuilder* builder, int required_length,
+ const DisallowHeapAllocation& no_gc)
+ : NoExtend<DestChar>(builder->current_part(), builder->current_index_,
+ no_gc),
builder_(builder) {
DCHECK(builder->CurrentPartCanFit(required_length));
}
@@ -251,13 +254,13 @@ class IncrementalStringBuilder {
V8_INLINE Handle<String> accumulator() { return accumulator_; }
V8_INLINE void set_accumulator(Handle<String> string) {
- *accumulator_.location() = *string;
+ *accumulator_.location() = string->ptr();
}
V8_INLINE Handle<String> current_part() { return current_part_; }
V8_INLINE void set_current_part(Handle<String> string) {
- *current_part_.location() = *string;
+ *current_part_.location() = string->ptr();
}
// Add the current part to the accumulator.
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 66ccb77184..57571a11a1 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -12,12 +12,12 @@ namespace v8 {
namespace internal {
template <typename sinkchar>
-void StringBuilderConcatHelper(String* special, sinkchar* sink,
- FixedArray* fixed_array, int array_length) {
+void StringBuilderConcatHelper(String special, sinkchar* sink,
+ FixedArray fixed_array, int array_length) {
DisallowHeapAllocation no_gc;
int position = 0;
for (int i = 0; i < array_length; i++) {
- Object* element = fixed_array->get(i);
+ Object element = fixed_array->get(i);
if (element->IsSmi()) {
// Smi encoding of position and length.
int encoded_slice = Smi::ToInt(element);
@@ -29,7 +29,7 @@ void StringBuilderConcatHelper(String* special, sinkchar* sink,
len = StringBuilderSubstringLength::decode(encoded_slice);
} else {
// Position and length encoded in two smis.
- Object* obj = fixed_array->get(++i);
+ Object obj = fixed_array->get(++i);
DCHECK(obj->IsSmi());
pos = Smi::ToInt(obj);
len = -encoded_slice;
@@ -37,7 +37,7 @@ void StringBuilderConcatHelper(String* special, sinkchar* sink,
String::WriteToFlat(special, sink + position, pos, pos + len);
position += len;
} else {
- String* string = String::cast(element);
+ String string = String::cast(element);
int element_length = string->length();
String::WriteToFlat(string, sink + position, 0, element_length);
position += element_length;
@@ -45,21 +45,21 @@ void StringBuilderConcatHelper(String* special, sinkchar* sink,
}
}
-template void StringBuilderConcatHelper<uint8_t>(String* special, uint8_t* sink,
- FixedArray* fixed_array,
+template void StringBuilderConcatHelper<uint8_t>(String special, uint8_t* sink,
+ FixedArray fixed_array,
int array_length);
-template void StringBuilderConcatHelper<uc16>(String* special, uc16* sink,
- FixedArray* fixed_array,
+template void StringBuilderConcatHelper<uc16>(String special, uc16* sink,
+ FixedArray fixed_array,
int array_length);
-int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
+int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
int array_length, bool* one_byte) {
DisallowHeapAllocation no_gc;
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
- Object* elt = fixed_array->get(i);
+ Object elt = fixed_array->get(i);
if (elt->IsSmi()) {
// Smi encoding of position and length.
int smi_value = Smi::ToInt(elt);
@@ -75,7 +75,7 @@ int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) return -1;
- Object* next_smi = fixed_array->get(i);
+ Object next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) return -1;
pos = Smi::ToInt(next_smi);
if (pos < 0) return -1;
@@ -85,7 +85,7 @@ int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
if (pos > special_length || len > special_length - pos) return -1;
increment = len;
} else if (elt->IsString()) {
- String* element = String::cast(elt);
+ String element = String::cast(elt);
int element_length = element->length();
increment = element_length;
if (*one_byte && !element->HasOnlyOneByteChars()) {
@@ -139,7 +139,7 @@ void FixedArrayBuilder::EnsureCapacity(Isolate* isolate, int elements) {
}
}
-void FixedArrayBuilder::Add(Object* value) {
+void FixedArrayBuilder::Add(Object value) {
DCHECK(!value->IsSmi());
DCHECK(length_ < capacity());
array_->set(length_, value);
@@ -147,7 +147,7 @@ void FixedArrayBuilder::Add(Object* value) {
has_non_smi_elements_ = true;
}
-void FixedArrayBuilder::Add(Smi* value) {
+void FixedArrayBuilder::Add(Smi value) {
DCHECK(value->IsSmi());
DCHECK(length_ < capacity());
array_->set(length_, value);
@@ -203,7 +203,7 @@ MaybeHandle<String> ReplacementStringBuilder::ToString() {
String);
DisallowHeapAllocation no_gc;
- uint8_t* char_buffer = seq->GetChars();
+ uint8_t* char_buffer = seq->GetChars(no_gc);
StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
array_builder_.length());
joined_string = Handle<String>::cast(seq);
@@ -215,7 +215,7 @@ MaybeHandle<String> ReplacementStringBuilder::ToString() {
String);
DisallowHeapAllocation no_gc;
- uc16* char_buffer = seq->GetChars();
+ uc16* char_buffer = seq->GetChars(no_gc);
StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
array_builder_.length());
joined_string = Handle<String>::cast(seq);
@@ -223,7 +223,7 @@ MaybeHandle<String> ReplacementStringBuilder::ToString() {
return joined_string;
}
-void ReplacementStringBuilder::AddElement(Object* element) {
+void ReplacementStringBuilder::AddElement(Object element) {
DCHECK(element->IsSmi() || element->IsString());
DCHECK(array_builder_.capacity() > array_builder_.length());
array_builder_.Add(element);
diff --git a/deps/v8/src/string-constants.h b/deps/v8/src/string-constants.h
index b7134849db..301a9bdd0b 100644
--- a/deps/v8/src/string-constants.h
+++ b/deps/v8/src/string-constants.h
@@ -5,6 +5,7 @@
#ifndef V8_STRING_CONSTANTS_H_
#define V8_STRING_CONSTANTS_H_
+#include "src/handles.h"
#include "src/objects/string.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
index 35948c385b..21c92084ca 100644
--- a/deps/v8/src/string-hasher-inl.h
+++ b/deps/v8/src/string-hasher-inl.h
@@ -10,6 +10,7 @@
#include "src/char-predicates-inl.h"
#include "src/objects.h"
#include "src/objects/string-inl.h"
+#include "src/utils-inl.h"
namespace v8 {
namespace internal {
@@ -18,8 +19,7 @@ StringHasher::StringHasher(int length, uint64_t seed)
: length_(length),
raw_running_hash_(static_cast<uint32_t>(seed)),
array_index_(0),
- is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
- is_first_char_(true) {
+ is_array_index_(IsInRange(length, 1, String::kMaxArrayIndexSize)) {
DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
}
@@ -38,34 +38,23 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
running_hash += (running_hash << 3);
running_hash ^= (running_hash >> 11);
running_hash += (running_hash << 15);
- if ((running_hash & String::kHashBitMask) == 0) {
- return kZeroHash;
- }
- return running_hash;
+ int32_t hash = static_cast<int32_t>(running_hash & String::kHashBitMask);
+ int32_t mask = (hash - 1) >> 31;
+ return running_hash | (kZeroHash & mask);
}
+template <typename Char>
uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
- const uc16* chars, int length) {
- DCHECK_NOT_NULL(chars);
- DCHECK_GE(length, 0);
- for (int i = 0; i < length; ++i) {
+ const Char* chars, int length) {
+ DCHECK_LE(0, length);
+ DCHECK_IMPLIES(0 < length, chars != nullptr);
+ const Char* end = &chars[length];
+ while (chars != end) {
running_hash = AddCharacterCore(running_hash, *chars++);
}
return running_hash;
}
-uint32_t StringHasher::ComputeRunningHashOneByte(uint32_t running_hash,
- const char* chars,
- int length) {
- DCHECK_NOT_NULL(chars);
- DCHECK_GE(length, 0);
- for (int i = 0; i < length; ++i) {
- uint16_t c = static_cast<uint16_t>(*chars++);
- running_hash = AddCharacterCore(running_hash, c);
- }
- return running_hash;
-}
-
void StringHasher::AddCharacter(uint16_t c) {
// Use the Jenkins one-at-a-time hash function to update the hash
// for the given character.
@@ -74,24 +63,12 @@ void StringHasher::AddCharacter(uint16_t c) {
bool StringHasher::UpdateIndex(uint16_t c) {
DCHECK(is_array_index_);
- if (!IsDecimalDigit(c)) {
- is_array_index_ = false;
- return false;
- }
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (d == 0 && length_ > 1) {
- is_array_index_ = false;
- return false;
- }
- }
- if (array_index_ > 429496729U - ((d + 3) >> 3)) {
+ if (!TryAddIndexChar(&array_index_, c)) {
is_array_index_ = false;
return false;
}
- array_index_ = array_index_ * 10 + d;
- return true;
+ is_array_index_ = array_index_ != 0 || length_ == 1;
+ return is_array_index_;
}
template <typename Char>
@@ -107,29 +84,61 @@ inline void StringHasher::AddCharacters(const Char* chars, int length) {
}
}
}
- for (; i < length; i++) {
- DCHECK(!is_array_index_);
- AddCharacter(chars[i]);
- }
+ raw_running_hash_ =
+ ComputeRunningHash(raw_running_hash_, &chars[i], length - i);
}
template <typename schar>
uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
uint64_t seed) {
+#ifdef DEBUG
StringHasher hasher(length, seed);
if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
- return hasher.GetHashField();
+ uint32_t expected = hasher.GetHashField();
+#endif
+
+ // Check whether the string is a valid array index. In that case, compute the
+ // array index hash. It'll fall through to compute a regular string hash from
+ // the start if it turns out that the string isn't a valid array index.
+ if (IsInRange(length, 1, String::kMaxArrayIndexSize)) {
+ if (IsDecimalDigit(chars[0]) && (length == 1 || chars[0] != '0')) {
+ uint32_t index = chars[0] - '0';
+ int i = 1;
+ do {
+ if (i == length) {
+ uint32_t result = MakeArrayIndexHash(index, length);
+ DCHECK_EQ(expected, result);
+ return result;
+ }
+ } while (TryAddIndexChar(&index, chars[i++]));
+ }
+ } else if (length > String::kMaxHashCalcLength) {
+ // String hash of a large string is simply the length.
+ uint32_t result =
+ (length << String::kHashShift) | String::kIsNotArrayIndexMask;
+ DCHECK_EQ(result, expected);
+ return result;
+ }
+
+ // Non-array-index hash.
+ uint32_t hash =
+ ComputeRunningHash(static_cast<uint32_t>(seed), chars, length);
+
+ uint32_t result =
+ (GetHashCore(hash) << String::kHashShift) | String::kIsNotArrayIndexMask;
+ DCHECK_EQ(result, expected);
+ return result;
}
IteratingStringHasher::IteratingStringHasher(int len, uint64_t seed)
: StringHasher(len, seed) {}
-uint32_t IteratingStringHasher::Hash(String* string, uint64_t seed) {
+uint32_t IteratingStringHasher::Hash(String string, uint64_t seed) {
IteratingStringHasher hasher(string->length(), seed);
// Nothing to do.
if (hasher.has_trivial_hash()) return hasher.GetHashField();
- ConsString* cons_string = String::VisitFlat(&hasher, string);
- if (cons_string == nullptr) return hasher.GetHashField();
+ ConsString cons_string = String::VisitFlat(&hasher, string);
+ if (cons_string.is_null()) return hasher.GetHashField();
hasher.VisitConsString(cons_string);
return hasher.GetHashField();
}
diff --git a/deps/v8/src/string-hasher.h b/deps/v8/src/string-hasher.h
index 68cff519c2..c661500acd 100644
--- a/deps/v8/src/string-hasher.h
+++ b/deps/v8/src/string-hasher.h
@@ -41,11 +41,9 @@ class V8_EXPORT_PRIVATE StringHasher {
// Reusable parts of the hashing algorithm.
V8_INLINE static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c);
V8_INLINE static uint32_t GetHashCore(uint32_t running_hash);
+ template <typename Char>
V8_INLINE static uint32_t ComputeRunningHash(uint32_t running_hash,
- const uc16* chars, int length);
- V8_INLINE static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
- const char* chars,
- int length);
+ const Char* chars, int length);
protected:
// Returns the value to store in the hash field of a string with
@@ -68,19 +66,18 @@ class V8_EXPORT_PRIVATE StringHasher {
uint32_t raw_running_hash_;
uint32_t array_index_;
bool is_array_index_;
- bool is_first_char_;
DISALLOW_COPY_AND_ASSIGN(StringHasher);
};
class IteratingStringHasher : public StringHasher {
public:
- static inline uint32_t Hash(String* string, uint64_t seed);
+ static inline uint32_t Hash(String string, uint64_t seed);
inline void VisitOneByteString(const uint8_t* chars, int length);
inline void VisitTwoByteString(const uint16_t* chars, int length);
private:
inline IteratingStringHasher(int len, uint64_t seed);
- void VisitConsString(ConsString* cons_string);
+ void VisitConsString(ConsString cons_string);
DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
};
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 3ab70c4ffa..ea73139813 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -121,7 +121,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
}
case 'o': {
DCHECK_EQ(FmtElm::OBJ, current.type_);
- Object* obj = current.data_.u_obj_;
+ Object obj(current.data_.u_obj_);
PrintObject(obj);
break;
}
@@ -177,8 +177,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
DCHECK_EQ(buffer_[length_], '\0');
}
-
-void StringStream::PrintObject(Object* o) {
+void StringStream::PrintObject(Object o) {
o->ShortPrint(this);
if (o->IsString()) {
if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
@@ -207,7 +206,6 @@ void StringStream::PrintObject(Object* o) {
}
}
-
std::unique_ptr<char[]> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
MemCopy(str, buffer_, length_);
@@ -244,7 +242,7 @@ Handle<String> StringStream::ToString(Isolate* isolate) {
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
- isolate->set_string_stream_current_security_token(nullptr);
+ isolate->set_string_stream_current_security_token(Object());
if (isolate->string_stream_debug_object_cache() == nullptr) {
isolate->set_string_stream_debug_object_cache(new DebugObjectCache());
}
@@ -259,13 +257,9 @@ bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
}
#endif
+bool StringStream::Put(String str) { return Put(str, 0, str->length()); }
-bool StringStream::Put(String* str) {
- return Put(str, 0, str->length());
-}
-
-
-bool StringStream::Put(String* str, int start, int end) {
+bool StringStream::Put(String str, int start, int end) {
StringCharacterStream stream(str, start);
for (int i = start; i < end && stream.HasMore(); i++) {
uint16_t c = stream.GetNext();
@@ -279,10 +273,9 @@ bool StringStream::Put(String* str, int start, int end) {
return true;
}
-
-void StringStream::PrintName(Object* name) {
+void StringStream::PrintName(Object name) {
if (name->IsString()) {
- String* str = String::cast(name);
+ String str = String::cast(name);
if (str->length() > 0) {
Put(str);
} else {
@@ -293,16 +286,15 @@ void StringStream::PrintName(Object* name) {
}
}
-
-void StringStream::PrintUsingMap(JSObject* js_object) {
- Map* map = js_object->map();
+void StringStream::PrintUsingMap(JSObject js_object) {
+ Map map = js_object->map();
int real_size = map->NumberOfOwnDescriptors();
- DescriptorArray* descs = map->instance_descriptors();
+ DescriptorArray descs = map->instance_descriptors();
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
- Object* key = descs->GetKey(i);
+ Object key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
int len = 3;
if (key->IsString()) {
@@ -321,7 +313,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
double value = js_object->RawFastDoublePropertyAt(index);
Add("<unboxed double> %.16g\n", FmtElm(value));
} else {
- Object* value = js_object->RawFastPropertyAt(index);
+ Object value = js_object->RawFastPropertyAt(index);
Add("%o\n", value);
}
}
@@ -329,11 +321,10 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
}
}
-
-void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
+void StringStream::PrintFixedArray(FixedArray array, unsigned int limit) {
ReadOnlyRoots roots = array->GetReadOnlyRoots();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
- Object* element = array->get(i);
+ Object element = array->get(i);
if (element->IsTheHole(roots)) continue;
for (int len = 1; len < 18; len++) {
Put(' ');
@@ -345,8 +336,7 @@ void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
}
}
-
-void StringStream::PrintByteArray(ByteArray* byte_array) {
+void StringStream::PrintByteArray(ByteArray byte_array) {
unsigned int limit = byte_array->length();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
byte b = byte_array->get(i);
@@ -367,15 +357,15 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
}
}
-
void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
if (object_print_mode_ == kPrintObjectConcise) return;
DebugObjectCache* debug_object_cache =
isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (size_t i = 0; i < debug_object_cache->size(); i++) {
- HeapObject* printee = (*debug_object_cache)[i];
- Add(" #%d# %p: ", static_cast<int>(i), printee);
+ HeapObject printee = (*debug_object_cache)[i];
+ Add(" #%d# %p: ", static_cast<int>(i),
+ reinterpret_cast<void*>(printee->ptr()));
printee->ShortPrint(this);
Add("\n");
if (printee->IsJSObject()) {
@@ -384,7 +374,7 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
}
PrintUsingMap(JSObject::cast(printee));
if (printee->IsJSArray()) {
- JSArray* array = JSArray::cast(printee);
+ JSArray array = JSArray::cast(printee);
if (array->HasObjectElements()) {
unsigned int limit = FixedArray::cast(array->elements())->length();
unsigned int length =
@@ -402,9 +392,8 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
}
}
-void StringStream::PrintSecurityTokenIfChanged(JSFunction* fun) {
- Context* context = fun->context();
- Object* token = context->native_context()->security_token();
+void StringStream::PrintSecurityTokenIfChanged(JSFunction fun) {
+ Object token = fun->native_context()->security_token();
Isolate* isolate = fun->GetIsolate();
if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
@@ -412,21 +401,19 @@ void StringStream::PrintSecurityTokenIfChanged(JSFunction* fun) {
}
}
-void StringStream::PrintFunction(JSFunction* fun, Object* receiver,
- Code** code) {
+void StringStream::PrintFunction(JSFunction fun, Object receiver, Code* code) {
PrintPrototype(fun, receiver);
*code = fun->code();
}
-
-void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
- Object* name = fun->shared()->Name();
+void StringStream::PrintPrototype(JSFunction fun, Object receiver) {
+ Object name = fun->shared()->Name();
bool print_name = false;
Isolate* isolate = fun->GetIsolate();
if (receiver->IsNullOrUndefined(isolate) || receiver->IsTheHole(isolate) ||
receiver->IsJSProxy()) {
print_name = true;
- } else if (isolate->context() != nullptr) {
+ } else if (!isolate->context().is_null()) {
if (!receiver->IsJSObject()) {
receiver = receiver->GetPrototypeChainRootMap(isolate)->prototype();
}
@@ -435,7 +422,7 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSProxy()) break;
- Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
+ Object key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
if (!key->IsUndefined(isolate)) {
if (!name->IsString() ||
!key->IsString() ||
@@ -460,7 +447,6 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
}
}
-
char* HeapStringAllocator::grow(unsigned* bytes) {
unsigned new_bytes = *bytes * 2;
// Check for overflow.
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 6c4da47508..dea31e0b3d 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/handles.h"
+#include "src/objects/heap-object.h"
#include "src/vector.h"
namespace v8 {
@@ -70,8 +71,8 @@ class StringStream final {
FmtElm(const Vector<const uc16>& value) : FmtElm(LC_STR) { // NOLINT
data_.u_lc_str_ = &value;
}
- FmtElm(Object* value) : FmtElm(OBJ) { // NOLINT
- data_.u_obj_ = value;
+ FmtElm(Object value) : FmtElm(OBJ) { // NOLINT
+ data_.u_obj_ = value.ptr();
}
FmtElm(Handle<Object> value) : FmtElm(HANDLE) { // NOLINT
data_.u_handle_ = value.location();
@@ -96,8 +97,8 @@ class StringStream final {
double u_double_;
const char* u_c_str_;
const Vector<const uc16>* u_lc_str_;
- Object* u_obj_;
- Object** u_handle_;
+ Address u_obj_;
+ Address* u_handle_;
void* u_pointer_;
} data_;
};
@@ -115,8 +116,8 @@ class StringStream final {
}
bool Put(char c);
- bool Put(String* str);
- bool Put(String* str, int start, int end);
+ bool Put(String str);
+ bool Put(String str, int start, int end);
void Add(const char* format) { Add(CStrVector(format)); }
void Add(Vector<const char> format) { Add(format, Vector<FmtElm>()); }
@@ -140,14 +141,14 @@ class StringStream final {
int length() const { return length_; }
// Object printing support.
- void PrintName(Object* o);
- void PrintFixedArray(FixedArray* array, unsigned int limit);
- void PrintByteArray(ByteArray* ba);
- void PrintUsingMap(JSObject* js_object);
- void PrintPrototype(JSFunction* fun, Object* receiver);
- void PrintSecurityTokenIfChanged(JSFunction* function);
+ void PrintName(Object o);
+ void PrintFixedArray(FixedArray array, unsigned int limit);
+ void PrintByteArray(ByteArray ba);
+ void PrintUsingMap(JSObject js_object);
+ void PrintPrototype(JSFunction fun, Object receiver);
+ void PrintSecurityTokenIfChanged(JSFunction function);
// NOTE: Returns the code in the output parameter.
- void PrintFunction(JSFunction* function, Object* receiver, Code** code);
+ void PrintFunction(JSFunction function, Object receiver, Code* code);
// Reset the stream.
void Reset() {
@@ -166,7 +167,7 @@ class StringStream final {
private:
void Add(Vector<const char> format, Vector<FmtElm> elms);
- void PrintObject(Object* obj);
+ void PrintObject(Object obj);
StringAllocator* allocator_;
ObjectPrintMode object_print_mode_;
diff --git a/deps/v8/src/task-utils.cc b/deps/v8/src/task-utils.cc
new file mode 100644
index 0000000000..aaa36346e1
--- /dev/null
+++ b/deps/v8/src/task-utils.cc
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/task-utils.h"
+
+#include "src/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class CancelableFuncTask final : public CancelableTask {
+ public:
+ CancelableFuncTask(Isolate* isolate, std::function<void()> func)
+ : CancelableTask(isolate), func_(std::move(func)) {}
+ CancelableFuncTask(CancelableTaskManager* manager, std::function<void()> func)
+ : CancelableTask(manager), func_(std::move(func)) {}
+ void RunInternal() final { func_(); }
+
+ private:
+ const std::function<void()> func_;
+};
+
+class CancelableIdleFuncTask final : public CancelableIdleTask {
+ public:
+ CancelableIdleFuncTask(Isolate* isolate, std::function<void(double)> func)
+ : CancelableIdleTask(isolate), func_(std::move(func)) {}
+ CancelableIdleFuncTask(CancelableTaskManager* manager,
+ std::function<void(double)> func)
+ : CancelableIdleTask(manager), func_(std::move(func)) {}
+ void RunInternal(double deadline_in_seconds) final {
+ func_(deadline_in_seconds);
+ }
+
+ private:
+ const std::function<void(double)> func_;
+};
+
+} // namespace
+
+std::unique_ptr<CancelableTask> MakeCancelableTask(Isolate* isolate,
+ std::function<void()> func) {
+ return base::make_unique<CancelableFuncTask>(isolate, std::move(func));
+}
+
+std::unique_ptr<CancelableTask> MakeCancelableTask(
+ CancelableTaskManager* manager, std::function<void()> func) {
+ return base::make_unique<CancelableFuncTask>(manager, std::move(func));
+}
+
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
+ Isolate* isolate, std::function<void(double)> func) {
+ return base::make_unique<CancelableIdleFuncTask>(isolate, std::move(func));
+}
+
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
+ CancelableTaskManager* manager, std::function<void(double)> func) {
+ return base::make_unique<CancelableIdleFuncTask>(manager, std::move(func));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/task-utils.h b/deps/v8/src/task-utils.h
new file mode 100644
index 0000000000..81ad5e1e3a
--- /dev/null
+++ b/deps/v8/src/task-utils.h
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TASK_UTILS_H_
+#define V8_TASK_UTILS_H_
+
+#include <functional>
+#include <memory>
+
+namespace v8 {
+
+namespace internal {
+
+class CancelableIdleTask;
+class CancelableTask;
+class CancelableTaskManager;
+class Isolate;
+
+std::unique_ptr<CancelableTask> MakeCancelableTask(Isolate*,
+ std::function<void()>);
+std::unique_ptr<CancelableTask> MakeCancelableTask(CancelableTaskManager*,
+ std::function<void()>);
+
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
+ Isolate*, std::function<void(double)>);
+std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
+ CancelableTaskManager* manager, std::function<void(double)>);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TASK_UTILS_H_
diff --git a/deps/v8/src/thread-id.cc b/deps/v8/src/thread-id.cc
new file mode 100644
index 0000000000..3b89f16ef6
--- /dev/null
+++ b/deps/v8/src/thread-id.cc
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/thread-id.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+base::Atomic32 ThreadId::highest_thread_id_ = 0;
+
+namespace {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey, GetThreadIdKey,
+ base::Thread::CreateThreadLocalKey());
+
+} // namespace
+
+// static
+ThreadId ThreadId::TryGetCurrent() {
+ int thread_id = base::Thread::GetThreadLocalInt(*GetThreadIdKey());
+ return thread_id == 0 ? Invalid() : ThreadId(thread_id);
+}
+
+// static
+int ThreadId::GetCurrentThreadId() {
+ int thread_id = base::Thread::GetThreadLocalInt(*GetThreadIdKey());
+ if (thread_id == 0) {
+ thread_id = AllocateThreadId();
+ base::Thread::SetThreadLocalInt(*GetThreadIdKey(), thread_id);
+ }
+ return thread_id;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/thread-id.h b/deps/v8/src/thread-id.h
new file mode 100644
index 0000000000..437109b839
--- /dev/null
+++ b/deps/v8/src/thread-id.h
@@ -0,0 +1,73 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_THREAD_ID_H_
+#define V8_THREAD_ID_H_
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+// Platform-independent, reliable thread identifier.
+class ThreadId {
+ public:
+ // Creates an invalid ThreadId.
+ ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
+
+ ThreadId& operator=(const ThreadId& other) V8_NOEXCEPT {
+ base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
+ return *this;
+ }
+
+ bool operator==(const ThreadId& other) const { return Equals(other); }
+
+ // Returns ThreadId for current thread if it exists or invalid id.
+ static ThreadId TryGetCurrent();
+
+ // Returns ThreadId for current thread.
+ static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
+
+ // Returns invalid ThreadId (guaranteed not to be equal to any thread).
+ static ThreadId Invalid() { return ThreadId(kInvalidId); }
+
+ // Compares ThreadIds for equality.
+ V8_INLINE bool Equals(const ThreadId& other) const {
+ return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
+ }
+
+ // Checks whether this ThreadId refers to any thread.
+ V8_INLINE bool IsValid() const {
+ return base::Relaxed_Load(&id_) != kInvalidId;
+ }
+
+ // Converts ThreadId to an integer representation
+ // (required for public API: V8::V8::GetCurrentThreadId).
+ int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
+
+ // Converts ThreadId to an integer representation
+ // (required for public API: V8::V8::TerminateExecution).
+ static ThreadId FromInteger(int id) { return ThreadId(id); }
+
+ private:
+ static const int kInvalidId = -1;
+
+ explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
+
+ static int AllocateThreadId() {
+ int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
+ return new_id;
+ }
+
+ static int GetCurrentThreadId();
+
+ base::Atomic32 id_;
+
+ static base::Atomic32 highest_thread_id_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_THREAD_ID_H_
diff --git a/deps/v8/src/torque-assembler.h b/deps/v8/src/torque-assembler.h
deleted file mode 100644
index 3d7cf361c4..0000000000
--- a/deps/v8/src/torque-assembler.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TORQUE_ASSEMBLER_H_
-#define V8_TORQUE_ASSEMBLER_H_
-
-#include <deque>
-#include <vector>
-
-#include "src/code-stub-assembler.h"
-
-#include "src/base/optional.h"
-
-namespace v8 {
-namespace internal {
-
-class TorqueAssembler : public CodeStubAssembler {
- public:
- using CodeStubAssembler::CodeStubAssembler;
-
- protected:
- template <class... Ts>
- using PLabel = compiler::CodeAssemblerParameterizedLabel<Ts...>;
-
- template <class T>
- TNode<T> Uninitialized() {
- return {};
- }
-
- template <class... T, class... Args>
- void Goto(PLabel<T...>* label, Args... args) {
- label->AddInputs(args...);
- CodeStubAssembler::Goto(label->plain_label());
- }
- using CodeStubAssembler::Goto;
- template <class... T>
- void Bind(PLabel<T...>* label, TNode<T>*... phis) {
- Bind(label->plain_label());
- label->CreatePhis(phis...);
- }
- void Bind(Label* label) { CodeAssembler::Bind(label); }
- using CodeStubAssembler::Bind;
- template <class... T, class... Args>
- void Branch(TNode<BoolT> condition, PLabel<T...>* if_true,
- PLabel<T...>* if_false, Args... args) {
- if_true->AddInputs(args...);
- if_false->AddInputs(args...);
- CodeStubAssembler::Branch(condition, if_true->plain_label(),
- if_false->plain_label());
- }
- using CodeStubAssembler::Branch;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TORQUE_ASSEMBLER_H_
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 6bd4d79096..4a7eecb16c 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -19,6 +19,10 @@ namespace torque {
#define AST_EXPRESSION_NODE_KIND_LIST(V) \
V(CallExpression) \
+ V(CallMethodExpression) \
+ V(LoadObjectFieldExpression) \
+ V(StoreObjectFieldExpression) \
+ V(IntrinsicCallExpression) \
V(StructExpression) \
V(LogicalOrExpression) \
V(LogicalAndExpression) \
@@ -30,6 +34,7 @@ namespace torque {
V(ElementAccessExpression) \
V(AssignmentExpression) \
V(IncrementDecrementExpression) \
+ V(NewExpression) \
V(AssumeTypeImpossibleExpression) \
V(StatementExpression) \
V(TryLabelExpression)
@@ -53,7 +58,7 @@ namespace torque {
V(AssertStatement) \
V(TailCallStatement) \
V(VarDeclarationStatement) \
- V(GotoStatement) \
+ V(GotoStatement)
#define AST_DECLARATION_NODE_KIND_LIST(V) \
V(TypeDeclaration) \
@@ -62,17 +67,19 @@ namespace torque {
V(GenericDeclaration) \
V(SpecializationDeclaration) \
V(ExternConstDeclaration) \
+ V(ClassDeclaration) \
V(StructDeclaration) \
- V(DefaultModuleDeclaration) \
- V(ExplicitModuleDeclaration) \
- V(ConstDeclaration)
+ V(NamespaceDeclaration) \
+ V(ConstDeclaration) \
+ V(CppIncludeDeclaration)
#define AST_CALLABLE_NODE_KIND_LIST(V) \
V(TorqueMacroDeclaration) \
V(TorqueBuiltinDeclaration) \
V(ExternalMacroDeclaration) \
V(ExternalBuiltinDeclaration) \
- V(ExternalRuntimeDeclaration)
+ V(ExternalRuntimeDeclaration) \
+ V(IntrinsicDeclaration)
#define AST_NODE_KIND_LIST(V) \
AST_EXPRESSION_NODE_KIND_LIST(V) \
@@ -152,49 +159,26 @@ struct Statement : AstNode {
DEFINE_AST_NODE_INNER_BOILERPLATE(Statement)
};
-class Module;
+class Namespace;
-struct ModuleDeclaration : Declaration {
- ModuleDeclaration(AstNode::Kind kind, SourcePosition pos,
- std::vector<Declaration*> declarations)
- : Declaration(kind, pos),
- module(nullptr),
- declarations(std::move(declarations)) {}
- virtual bool IsDefault() const = 0;
- // virtual std::string GetName() const = 0;
- void SetModule(Module* m) { module = m; }
- Module* GetModule() const { return module; }
- Module* module;
+struct NamespaceDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(NamespaceDeclaration)
+ NamespaceDeclaration(SourcePosition pos, std::string name,
+ std::vector<Declaration*> declarations)
+ : Declaration(kKind, pos),
+ declarations(std::move(declarations)),
+ name(name) {}
std::vector<Declaration*> declarations;
-};
-
-struct DefaultModuleDeclaration : ModuleDeclaration {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(DefaultModuleDeclaration)
- DefaultModuleDeclaration(SourcePosition pos,
- std::vector<Declaration*> declarations)
- : ModuleDeclaration(kKind, pos, std::move(declarations)) {}
- bool IsDefault() const override { return true; }
-};
-
-struct ExplicitModuleDeclaration : ModuleDeclaration {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(ExplicitModuleDeclaration)
- ExplicitModuleDeclaration(SourcePosition pos, std::string name,
- std::vector<Declaration*> declarations)
- : ModuleDeclaration(kKind, pos, std::move(declarations)),
- name(std::move(name)) {}
- bool IsDefault() const override { return false; }
std::string name;
};
class Ast {
public:
- Ast() : default_module_{SourcePosition{CurrentSourceFile::Get(), 0, 0}, {}} {}
+ Ast() {}
- std::vector<Declaration*>& declarations() {
- return default_module_.declarations;
- }
+ std::vector<Declaration*>& declarations() { return declarations_; }
const std::vector<Declaration*>& declarations() const {
- return default_module_.declarations;
+ return declarations_;
}
template <class T>
T* AddNode(std::unique_ptr<T> node) {
@@ -202,48 +186,112 @@ class Ast {
nodes_.push_back(std::move(node));
return result;
}
- DefaultModuleDeclaration* default_module() { return &default_module_; }
private:
- DefaultModuleDeclaration default_module_;
+ std::vector<Declaration*> declarations_;
std::vector<std::unique_ptr<AstNode>> nodes_;
};
+static const char* const kThisParameterName = "this";
+
struct IdentifierExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IdentifierExpression)
- IdentifierExpression(SourcePosition pos, std::string name,
- std::vector<TypeExpression*> args = {})
+ IdentifierExpression(SourcePosition pos,
+ std::vector<std::string> namespace_qualification,
+ std::string name, std::vector<TypeExpression*> args = {})
: LocationExpression(kKind, pos),
+ namespace_qualification(std::move(namespace_qualification)),
name(std::move(name)),
generic_arguments(std::move(args)) {}
+ IdentifierExpression(SourcePosition pos, std::string name,
+ std::vector<TypeExpression*> args = {})
+ : IdentifierExpression(pos, {}, std::move(name), std::move(args)) {}
+ bool IsThis() const { return name == kThisParameterName; }
+ std::vector<std::string> namespace_qualification;
+ std::string name;
+ std::vector<TypeExpression*> generic_arguments;
+};
+
+struct LoadObjectFieldExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(LoadObjectFieldExpression)
+ LoadObjectFieldExpression(SourcePosition pos, Expression* base,
+ std::string field_name)
+ : Expression(kKind, pos),
+ base(std::move(base)),
+ field_name(std::move(field_name)) {}
+ Expression* base;
+ std::string field_name;
+};
+
+struct StoreObjectFieldExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(StoreObjectFieldExpression)
+ StoreObjectFieldExpression(SourcePosition pos, Expression* base,
+ std::string field_name, Expression* value)
+ : Expression(kKind, pos),
+ base(std::move(base)),
+ field_name(std::move(field_name)),
+ value(std::move(value)) {}
+ Expression* base;
+ std::string field_name;
+ Expression* value;
+ size_t offset;
+};
+
+struct IntrinsicCallExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(IntrinsicCallExpression)
+ IntrinsicCallExpression(SourcePosition pos, std::string name,
+ std::vector<TypeExpression*> generic_arguments,
+ std::vector<Expression*> arguments)
+ : Expression(kKind, pos),
+ name(std::move(name)),
+ generic_arguments(std::move(generic_arguments)),
+ arguments(std::move(arguments)) {}
std::string name;
std::vector<TypeExpression*> generic_arguments;
+ std::vector<Expression*> arguments;
+};
+
+struct CallMethodExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(CallMethodExpression)
+ CallMethodExpression(SourcePosition pos, Expression* target,
+ IdentifierExpression* method,
+ std::vector<Expression*> arguments,
+ std::vector<std::string> labels)
+ : Expression(kKind, pos),
+ target(target),
+ method(method),
+ arguments(std::move(arguments)),
+ labels(std::move(labels)) {}
+ Expression* target;
+ IdentifierExpression* method;
+ std::vector<Expression*> arguments;
+ std::vector<std::string> labels;
};
struct CallExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(CallExpression)
- CallExpression(SourcePosition pos, std::string callee, bool is_operator,
- std::vector<TypeExpression*> generic_arguments,
+ CallExpression(SourcePosition pos, IdentifierExpression* callee,
std::vector<Expression*> arguments,
std::vector<std::string> labels)
: Expression(kKind, pos),
- callee(pos, std::move(callee), std::move(generic_arguments)),
- is_operator(is_operator),
+ callee(callee),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
- IdentifierExpression callee;
- bool is_operator;
+ IdentifierExpression* callee;
std::vector<Expression*> arguments;
std::vector<std::string> labels;
};
struct StructExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructExpression)
- StructExpression(SourcePosition pos, std::string name,
- std::vector<Expression*> expressions)
+ StructExpression(SourcePosition pos,
+ std::vector<std::string> namespace_qualification,
+ std::string name, std::vector<Expression*> expressions)
: Expression(kKind, pos),
+ namespace_qualification(std::move(namespace_qualification)),
name(std::move(name)),
expressions(std::move(expressions)) {}
+ std::vector<std::string> namespace_qualification;
std::string name;
std::vector<Expression*> expressions;
};
@@ -352,21 +400,43 @@ struct AssumeTypeImpossibleExpression : Expression {
Expression* expression;
};
+struct NewExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(NewExpression)
+ NewExpression(SourcePosition pos, TypeExpression* type,
+ std::vector<Expression*> parameters)
+ : Expression(kKind, pos), type(type), parameters(parameters) {}
+ TypeExpression* type;
+ std::vector<Expression*> parameters;
+};
+
struct ParameterList {
std::vector<std::string> names;
std::vector<TypeExpression*> types;
+ size_t implicit_count;
bool has_varargs;
std::string arguments_variable;
- static ParameterList Empty() { return ParameterList{{}, {}, false, ""}; }
+ static ParameterList Empty() { return ParameterList{{}, {}, 0, false, ""}; }
+ std::vector<TypeExpression*> GetImplicitTypes() {
+ return std::vector<TypeExpression*>(types.begin(),
+ types.begin() + implicit_count);
+ }
+ std::vector<TypeExpression*> GetExplicitTypes() {
+ return std::vector<TypeExpression*>(types.begin() + implicit_count,
+ types.end());
+ }
};
struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
- BasicTypeExpression(SourcePosition pos, bool is_constexpr, std::string name)
+ BasicTypeExpression(SourcePosition pos,
+ std::vector<std::string> namespace_qualification,
+ bool is_constexpr, std::string name)
: TypeExpression(kKind, pos),
+ namespace_qualification(std::move(namespace_qualification)),
is_constexpr(is_constexpr),
name(std::move(name)) {}
+ std::vector<std::string> namespace_qualification;
bool is_constexpr;
std::string name;
};
@@ -501,7 +571,7 @@ struct ForLoopStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ForLoopStatement)
ForLoopStatement(SourcePosition pos, base::Optional<Statement*> declaration,
base::Optional<Expression*> test,
- base::Optional<Expression*> action, Statement* body)
+ base::Optional<Statement*> action, Statement* body)
: Statement(kKind, pos),
var_declaration(),
test(std::move(test)),
@@ -512,7 +582,7 @@ struct ForLoopStatement : Statement {
}
base::Optional<VarDeclarationStatement*> var_declaration;
base::Optional<Expression*> test;
- base::Optional<Expression*> action;
+ base::Optional<Statement*> action;
Statement* body;
};
@@ -543,7 +613,7 @@ struct ForOfLoopStatement : Statement {
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
- LabelBlock(SourcePosition pos, const std::string& label,
+ LabelBlock(SourcePosition pos, std::string label,
const ParameterList& parameters, Statement* body)
: AstNode(kKind, pos),
label(std::move(label)),
@@ -563,11 +633,13 @@ struct StatementExpression : Expression {
struct TryLabelExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelExpression)
- TryLabelExpression(SourcePosition pos, Expression* try_expression,
- LabelBlock* label_block)
+ TryLabelExpression(SourcePosition pos, bool catch_exceptions,
+ Expression* try_expression, LabelBlock* label_block)
: Expression(kKind, pos),
+ catch_exceptions(catch_exceptions),
try_expression(try_expression),
label_block(label_block) {}
+ bool catch_exceptions;
Expression* try_expression;
LabelBlock* label_block;
};
@@ -585,16 +657,18 @@ struct BlockStatement : Statement {
struct TypeDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeDeclaration)
- TypeDeclaration(SourcePosition pos, std::string name,
+ TypeDeclaration(SourcePosition pos, std::string name, bool transient,
base::Optional<std::string> extends,
base::Optional<std::string> generates,
base::Optional<std::string> constexpr_generates)
: Declaration(kKind, pos),
name(std::move(name)),
+ transient(transient),
extends(std::move(extends)),
generates(std::move(generates)),
constexpr_generates(std::move(constexpr_generates)) {}
std::string name;
+ bool transient;
base::Optional<std::string> extends;
base::Optional<std::string> generates;
base::Optional<std::string> constexpr_generates;
@@ -614,6 +688,15 @@ struct NameAndTypeExpression {
TypeExpression* type;
};
+struct StructFieldExpression {
+ NameAndTypeExpression name_and_type;
+};
+
+struct ClassFieldExpression {
+ NameAndTypeExpression name_and_type;
+ bool weak;
+};
+
struct LabelAndTypes {
std::string name;
std::vector<TypeExpression*> types;
@@ -628,84 +711,106 @@ struct CallableNodeSignature {
};
struct CallableNode : AstNode {
- CallableNode(AstNode::Kind kind, SourcePosition pos, std::string name,
- ParameterList parameters, TypeExpression* return_type,
- const LabelAndTypesVector& labels)
+ CallableNode(AstNode::Kind kind, SourcePosition pos, bool transitioning,
+ std::string name, ParameterList parameters,
+ TypeExpression* return_type, const LabelAndTypesVector& labels)
: AstNode(kind, pos),
+ transitioning(transitioning),
name(std::move(name)),
signature(new CallableNodeSignature{parameters, return_type, labels}) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(CallableNode)
+ bool transitioning;
std::string name;
std::unique_ptr<CallableNodeSignature> signature;
};
struct MacroDeclaration : CallableNode {
DEFINE_AST_NODE_INNER_BOILERPLATE(MacroDeclaration)
- MacroDeclaration(AstNode::Kind kind, SourcePosition pos, std::string name,
- base::Optional<std::string> op, ParameterList parameters,
- TypeExpression* return_type,
+ MacroDeclaration(AstNode::Kind kind, SourcePosition pos, bool transitioning,
+ std::string name, base::Optional<std::string> op,
+ ParameterList parameters, TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : CallableNode(kind, pos, std::move(name), std::move(parameters),
- return_type, labels),
+ : CallableNode(kind, pos, transitioning, std::move(name),
+ std::move(parameters), return_type, labels),
op(std::move(op)) {}
base::Optional<std::string> op;
};
struct ExternalMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalMacroDeclaration)
- ExternalMacroDeclaration(SourcePosition pos, std::string name,
- base::Optional<std::string> op,
+ ExternalMacroDeclaration(SourcePosition pos, bool transitioning,
+ std::string external_assembler_name,
+ std::string name, base::Optional<std::string> op,
ParameterList parameters,
TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : MacroDeclaration(kKind, pos, std::move(name), std::move(op),
- std::move(parameters), return_type, labels) {}
+ : MacroDeclaration(kKind, pos, transitioning, std::move(name),
+ std::move(op), std::move(parameters), return_type,
+ labels),
+ external_assembler_name(std::move(external_assembler_name)) {}
+ std::string external_assembler_name;
+};
+
+struct IntrinsicDeclaration : CallableNode {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(IntrinsicDeclaration)
+ IntrinsicDeclaration(SourcePosition pos, std::string name,
+ ParameterList parameters, TypeExpression* return_type)
+ : CallableNode(kKind, pos, false, std::move(name), std::move(parameters),
+ return_type, {}) {}
};
struct TorqueMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueMacroDeclaration)
- TorqueMacroDeclaration(SourcePosition pos, std::string name,
- base::Optional<std::string> op,
+ TorqueMacroDeclaration(SourcePosition pos, bool transitioning,
+ std::string name, base::Optional<std::string> op,
ParameterList parameters, TypeExpression* return_type,
const LabelAndTypesVector& labels)
- : MacroDeclaration(kKind, pos, std::move(name), std::move(op),
- std::move(parameters), return_type, labels) {}
+ : MacroDeclaration(kKind, pos, transitioning, std::move(name),
+ std::move(op), std::move(parameters), return_type,
+ labels) {}
};
struct BuiltinDeclaration : CallableNode {
+ DEFINE_AST_NODE_INNER_BOILERPLATE(BuiltinDeclaration)
BuiltinDeclaration(AstNode::Kind kind, SourcePosition pos,
- bool javascript_linkage, std::string name,
- ParameterList parameters, TypeExpression* return_type)
- : CallableNode(kind, pos, std::move(name), std::move(parameters),
- return_type, {}),
+ bool javascript_linkage, bool transitioning,
+ std::string name, ParameterList parameters,
+ TypeExpression* return_type)
+ : CallableNode(kind, pos, transitioning, std::move(name),
+ std::move(parameters), return_type, {}),
javascript_linkage(javascript_linkage) {}
bool javascript_linkage;
};
struct ExternalBuiltinDeclaration : BuiltinDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalBuiltinDeclaration)
- ExternalBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
- std::string name, ParameterList parameters,
+ ExternalBuiltinDeclaration(SourcePosition pos, bool transitioning,
+ bool javascript_linkage, std::string name,
+ ParameterList parameters,
TypeExpression* return_type)
- : BuiltinDeclaration(kKind, pos, javascript_linkage, std::move(name),
- std::move(parameters), return_type) {}
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning,
+ std::move(name), std::move(parameters),
+ return_type) {}
};
struct TorqueBuiltinDeclaration : BuiltinDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueBuiltinDeclaration)
- TorqueBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
- std::string name, ParameterList parameters,
+ TorqueBuiltinDeclaration(SourcePosition pos, bool transitioning,
+ bool javascript_linkage, std::string name,
+ ParameterList parameters,
TypeExpression* return_type)
- : BuiltinDeclaration(kKind, pos, javascript_linkage, std::move(name),
- std::move(parameters), return_type) {}
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning,
+ std::move(name), std::move(parameters),
+ return_type) {}
};
struct ExternalRuntimeDeclaration : CallableNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalRuntimeDeclaration)
- ExternalRuntimeDeclaration(SourcePosition pos, std::string name,
- ParameterList parameters,
+ ExternalRuntimeDeclaration(SourcePosition pos, bool transitioning,
+ std::string name, ParameterList parameters,
TypeExpression* return_type)
- : CallableNode(kKind, pos, name, parameters, return_type, {}) {}
+ : CallableNode(kKind, pos, transitioning, name, parameters, return_type,
+ {}) {}
};
struct ConstDeclaration : Declaration {
@@ -724,10 +829,10 @@ struct ConstDeclaration : Declaration {
struct StandardDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StandardDeclaration)
StandardDeclaration(SourcePosition pos, CallableNode* callable,
- Statement* body)
+ base::Optional<Statement*> body)
: Declaration(kKind, pos), callable(callable), body(body) {}
CallableNode* callable;
- Statement* body;
+ base::Optional<Statement*> body;
};
struct GenericDeclaration : Declaration {
@@ -781,12 +886,43 @@ struct ExternConstDeclaration : Declaration {
struct StructDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
StructDeclaration(SourcePosition pos, std::string name,
- std::vector<NameAndTypeExpression> fields)
+ std::vector<Declaration*> methods,
+ std::vector<StructFieldExpression> fields)
: Declaration(kKind, pos),
name(std::move(name)),
+ methods(std::move(methods)),
fields(std::move(fields)) {}
std::string name;
- std::vector<NameAndTypeExpression> fields;
+ std::vector<Declaration*> methods;
+ std::vector<StructFieldExpression> fields;
+};
+
+struct ClassDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration)
+ ClassDeclaration(SourcePosition pos, std::string name, bool transient,
+ std::string super, base::Optional<std::string> generates,
+ std::vector<Declaration*> methods,
+ std::vector<ClassFieldExpression> fields)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ transient(transient),
+ super(std::move(super)),
+ generates(std::move(generates)),
+ methods(std::move(methods)),
+ fields(std::move(fields)) {}
+ std::string name;
+ bool transient;
+ std::string super;
+ base::Optional<std::string> generates;
+ std::vector<Declaration*> methods;
+ std::vector<ClassFieldExpression> fields;
+};
+
+struct CppIncludeDeclaration : Declaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(CppIncludeDeclaration)
+ CppIncludeDeclaration(SourcePosition pos, std::string include_path)
+ : Declaration(kKind, pos), include_path(std::move(include_path)) {}
+ std::string include_path;
};
#define ENUM_ITEM(name) \
@@ -806,6 +942,21 @@ bool AstNodeClassCheck::IsInstanceOf(AstNode* node) {
#undef ENUM_ITEM
+inline bool IsDeferred(Statement* stmt) {
+ if (auto* block = BlockStatement::DynamicCast(stmt)) {
+ return block->deferred;
+ }
+ return false;
+}
+
+DECLARE_CONTEXTUAL_VARIABLE(CurrentAst, Ast);
+
+template <class T, class... Args>
+T* MakeNode(Args... args) {
+ return CurrentAst::Get().AddNode(std::unique_ptr<T>(
+ new T(CurrentSourcePosition::Get(), std::move(args)...)));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/cfg.cc b/deps/v8/src/torque/cfg.cc
index 1489d9f6af..8ff3aec5d7 100644
--- a/deps/v8/src/torque/cfg.cc
+++ b/deps/v8/src/torque/cfg.cc
@@ -13,38 +13,60 @@ namespace torque {
void Block::SetInputTypes(const Stack<const Type*>& input_types) {
if (!input_types_) {
input_types_ = input_types;
- } else if (*input_types_ != input_types) {
- std::stringstream error;
- error << "incompatible types at branch:\n";
- for (intptr_t i = std::max(input_types_->Size(), input_types.Size()) - 1;
- i >= 0; --i) {
- base::Optional<const Type*> left;
- base::Optional<const Type*> right;
- if (static_cast<size_t>(i) < input_types.Size()) {
- left = input_types.Peek(BottomOffset{static_cast<size_t>(i)});
- }
- if (static_cast<size_t>(i) < input_types_->Size()) {
- right = input_types_->Peek(BottomOffset{static_cast<size_t>(i)});
+ return;
+ } else if (*input_types_ == input_types) {
+ return;
+ }
+
+ DCHECK_EQ(input_types.Size(), input_types_->Size());
+ Stack<const Type*> merged_types;
+ bool widened = false;
+ auto c2_iterator = input_types.begin();
+ for (const Type* c1 : *input_types_) {
+ const Type* merged_type = TypeOracle::GetUnionType(c1, *c2_iterator++);
+ if (!merged_type->IsSubtypeOf(c1)) {
+ widened = true;
+ }
+ merged_types.Push(merged_type);
+ }
+ if (merged_types.Size() == input_types_->Size()) {
+ if (widened) {
+ input_types_ = merged_types;
+ Retype();
+ }
+ return;
+ }
+
+ std::stringstream error;
+ error << "incompatible types at branch:\n";
+ for (intptr_t i = std::max(input_types_->Size(), input_types.Size()) - 1;
+ i >= 0; --i) {
+ base::Optional<const Type*> left;
+ base::Optional<const Type*> right;
+ if (static_cast<size_t>(i) < input_types.Size()) {
+ left = input_types.Peek(BottomOffset{static_cast<size_t>(i)});
+ }
+ if (static_cast<size_t>(i) < input_types_->Size()) {
+ right = input_types_->Peek(BottomOffset{static_cast<size_t>(i)});
+ }
+ if (left && right && *left == *right) {
+ error << **left << "\n";
+ } else {
+ if (left) {
+ error << **left;
+ } else {
+ error << "/*missing*/";
}
- if (left && right && *left == *right) {
- error << **left << "\n";
+ error << " => ";
+ if (right) {
+ error << **right;
} else {
- if (left) {
- error << **left;
- } else {
- error << "/*missing*/";
- }
- error << " => ";
- if (right) {
- error << **right;
- } else {
- error << "/*missing*/";
- }
- error << "\n";
+ error << "/*missing*/";
}
+ error << "\n";
}
- ReportError(error.str());
}
+ ReportError(error.str());
}
void CfgAssembler::Bind(Block* block) {
@@ -125,9 +147,18 @@ void CfgAssembler::Print(std::string s) {
Emit(PrintConstantStringInstruction{std::move(s)});
}
-void CfgAssembler::Unreachable() { Emit(DebugBreakInstruction{true}); }
+void CfgAssembler::AssertionFailure(std::string message) {
+ Emit(AbortInstruction{AbortInstruction::Kind::kAssertionFailure,
+ std::move(message)});
+}
-void CfgAssembler::DebugBreak() { Emit(DebugBreakInstruction{false}); }
+void CfgAssembler::Unreachable() {
+ Emit(AbortInstruction{AbortInstruction::Kind::kUnreachable});
+}
+
+void CfgAssembler::DebugBreak() {
+ Emit(AbortInstruction{AbortInstruction::Kind::kDebugBreak});
+}
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/cfg.h b/deps/v8/src/torque/cfg.h
index 6fca593505..67b64bbc04 100644
--- a/deps/v8/src/torque/cfg.h
+++ b/deps/v8/src/torque/cfg.h
@@ -19,11 +19,15 @@ namespace v8 {
namespace internal {
namespace torque {
+class ControlFlowGraph;
+
class Block {
public:
- explicit Block(size_t id, base::Optional<Stack<const Type*>> input_types,
+ explicit Block(ControlFlowGraph* cfg, size_t id,
+ base::Optional<Stack<const Type*>> input_types,
bool is_deferred)
- : input_types_(std::move(input_types)),
+ : cfg_(cfg),
+ input_types_(std::move(input_types)),
id_(id),
is_deferred_(is_deferred) {}
void Add(Instruction instruction) {
@@ -34,6 +38,12 @@ class Block {
bool HasInputTypes() const { return input_types_ != base::nullopt; }
const Stack<const Type*>& InputTypes() const { return *input_types_; }
void SetInputTypes(const Stack<const Type*>& input_types);
+ void Retype() {
+ Stack<const Type*> current_stack = InputTypes();
+ for (const Instruction& instruction : instructions()) {
+ instruction.TypeInstruction(&current_stack, cfg_);
+ }
+ }
const std::vector<Instruction>& instructions() const { return instructions_; }
bool IsComplete() const {
@@ -43,6 +53,7 @@ class Block {
bool IsDeferred() const { return is_deferred_; }
private:
+ ControlFlowGraph* cfg_;
std::vector<Instruction> instructions_;
base::Optional<Stack<const Type*>> input_types_;
const size_t id_;
@@ -58,7 +69,8 @@ class ControlFlowGraph {
Block* NewBlock(base::Optional<Stack<const Type*>> input_types,
bool is_deferred) {
- blocks_.emplace_back(next_block_id_++, std::move(input_types), is_deferred);
+ blocks_.emplace_back(this, next_block_id_++, std::move(input_types),
+ is_deferred);
return &blocks_.back();
}
void PlaceBlock(Block* block) { placed_blocks_.push_back(block); }
@@ -133,15 +145,42 @@ class CfgAssembler {
void Poke(StackRange destination, StackRange origin,
base::Optional<const Type*> type);
void Print(std::string s);
+ void AssertionFailure(std::string message);
void Unreachable();
void DebugBreak();
+ void PrintCurrentStack(std::ostream& s) { s << "stack: " << current_stack_; }
+
private:
+ friend class CfgAssemblerScopedTemporaryBlock;
Stack<const Type*> current_stack_;
ControlFlowGraph cfg_;
Block* current_block_ = cfg_.start();
};
+class CfgAssemblerScopedTemporaryBlock {
+ public:
+ CfgAssemblerScopedTemporaryBlock(CfgAssembler* assembler, Block* block)
+ : assembler_(assembler), saved_block_(block) {
+ saved_stack_ = block->InputTypes();
+ DCHECK(!assembler->CurrentBlockIsComplete());
+ std::swap(saved_block_, assembler->current_block_);
+ std::swap(saved_stack_, assembler->current_stack_);
+ assembler->cfg_.PlaceBlock(block);
+ }
+
+ ~CfgAssemblerScopedTemporaryBlock() {
+ DCHECK(assembler_->CurrentBlockIsComplete());
+ std::swap(saved_block_, assembler_->current_block_);
+ std::swap(saved_stack_, assembler_->current_stack_);
+ }
+
+ private:
+ CfgAssembler* assembler_;
+ Stack<const Type*> saved_stack_;
+ Block* saved_block_;
+};
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 902b1b7f4a..68bb170863 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -14,11 +14,11 @@ namespace torque {
base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
Stack<std::string> parameters) {
for (Block* block : cfg_.blocks()) {
- out_ << " PLabel<";
+ out_ << " compiler::CodeAssemblerParameterizedLabel<";
PrintCommaSeparatedList(out_, block->InputTypes(), [](const Type* t) {
return t->GetGeneratedTNodeTypeName();
});
- out_ << "> " << BlockName(block) << "(this, compiler::CodeAssemblerLabel::"
+ out_ << "> " << BlockName(block) << "(&ca_, compiler::CodeAssemblerLabel::"
<< (block->IsDeferred() ? "kDeferred" : "kNonDeferred") << ");\n";
}
@@ -40,10 +40,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
Stack<std::string> stack;
for (const Type* t : block->InputTypes()) {
stack.Push(FreshNodeName());
- out_ << " TNode<" << t->GetGeneratedTNodeTypeName() << "> "
+ out_ << " compiler::TNode<" << t->GetGeneratedTNodeTypeName() << "> "
<< stack.Top() << ";\n";
}
- out_ << " Bind(&" << BlockName(block);
+ out_ << " ca_.Bind(&" << BlockName(block);
for (const std::string& name : stack) {
out_ << ", &" << name;
}
@@ -87,27 +87,27 @@ void CSAGenerator::EmitInstruction(
// TODO(tebbi): This can trigger an error in CSA if it is used. Instead, we
// should prevent usage of uninitialized in the type system. This
// requires "if constexpr" being evaluated at Torque time.
- stack->Push("Uninitialized<" + instruction.type->GetGeneratedTNodeTypeName() +
- ">()");
+ stack->Push("ca_.Uninitialized<" +
+ instruction.type->GetGeneratedTNodeTypeName() + ">()");
}
void CSAGenerator::EmitInstruction(
- const PushCodePointerInstruction& instruction, Stack<std::string>* stack) {
- stack->Push(
- "UncheckedCast<Code>(HeapConstant(Builtins::CallableFor(isolate(), "
- "Builtins::k" +
- instruction.external_name + ").code()))");
+ const PushBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Push("ca_.UncheckedCast<BuiltinPtr>(ca_.SmiConstant(Builtins::k" +
+ instruction.external_name + "))");
}
-void CSAGenerator::EmitInstruction(const ModuleConstantInstruction& instruction,
- Stack<std::string>* stack) {
+void CSAGenerator::EmitInstruction(
+ const NamespaceConstantInstruction& instruction,
+ Stack<std::string>* stack) {
const Type* type = instruction.constant->type();
std::vector<std::string> results;
for (const Type* lowered : LowerType(type)) {
results.push_back(FreshNodeName());
stack->Push(results.back());
- out_ << " TNode<" << lowered->GetGeneratedTNodeTypeName() << "> "
- << stack->Top() << ";\n";
+ out_ << " compiler::TNode<" << lowered->GetGeneratedTNodeTypeName()
+ << "> " << stack->Top() << ";\n";
out_ << " USE(" << stack->Top() << ");\n";
}
out_ << " ";
@@ -118,7 +118,8 @@ void CSAGenerator::EmitInstruction(const ModuleConstantInstruction& instruction,
} else if (results.size() == 1) {
out_ << results[0] << " = ";
}
- out_ << instruction.constant->constant_name() << "()";
+ out_ << instruction.constant->ExternalAssemblerName() << "(state_)."
+ << instruction.constant->constant_name() << "()";
if (type->IsStructType()) {
out_ << ".Flatten();\n";
} else {
@@ -126,39 +127,152 @@ void CSAGenerator::EmitInstruction(const ModuleConstantInstruction& instruction,
}
}
-void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
- Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
- TypeVector parameter_types =
- instruction.macro->signature().parameter_types.types;
+void CSAGenerator::ProcessArgumentsCommon(
+ const TypeVector& parameter_types, std::vector<std::string>* args,
+ std::vector<std::string>* constexpr_arguments, Stack<std::string>* stack) {
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
VisitResult arg;
if (type->IsConstexpr()) {
- args.push_back(std::move(constexpr_arguments.back()));
- constexpr_arguments.pop_back();
+ args->push_back(std::move(constexpr_arguments->back()));
+ constexpr_arguments->pop_back();
} else {
std::stringstream s;
size_t slot_count = LoweredSlotCount(type);
VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
EmitCSAValue(arg, *stack, s);
- args.push_back(s.str());
+ args->push_back(s.str());
stack->PopMany(slot_count);
}
}
- std::reverse(args.begin(), args.end());
+ std::reverse(args->begin(), args->end());
+}
+
+void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> constexpr_arguments =
+ instruction.constexpr_arguments;
+ std::vector<std::string> args;
+ TypeVector parameter_types =
+ instruction.intrinsic->signature().parameter_types.types;
+ ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
+ const Type* return_type = instruction.intrinsic->signature().return_type;
+ std::vector<std::string> results;
+ for (const Type* type : LowerType(return_type)) {
+ results.push_back(FreshNodeName());
+ stack->Push(results.back());
+ out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
+ out_ << " USE(" << stack->Top() << ");\n";
+ }
+ out_ << " ";
+
+ if (return_type->IsStructType()) {
+ out_ << "std::tie(";
+ PrintCommaSeparatedList(out_, results);
+ out_ << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out_ << results[0] << " = ";
+ }
+ }
+
+ if (instruction.intrinsic->ExternalName() == "%RawObjectCast") {
+ if (parameter_types.size() != 1) {
+ ReportError("%RawObjectCast must take a single parameter");
+ }
+ if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (return_type->GetGeneratedTNodeTypeName() !=
+ parameter_types[0]->GetGeneratedTNodeTypeName()) {
+ out_ << "TORQUE_CAST";
+ }
+ } else {
+ std::stringstream s;
+ s << "%RawObjectCast must cast to subtype of Tagged (" << *return_type
+ << " is not)";
+ ReportError(s.str());
+ }
+ } else if (instruction.intrinsic->ExternalName() == "%RawPointerCast") {
+ if (parameter_types.size() != 1) {
+ ReportError("%RawPointerCast must take a single parameter");
+ }
+ if (!return_type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
+ std::stringstream s;
+ s << "%RawObjectCast must cast to subtype of RawPtr (" << *return_type
+ << " is not)";
+ ReportError(s.str());
+ }
+ } else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
+ if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
+ ReportError(
+ "%FromConstexpr must take a single parameter with constexpr "
+ "type");
+ }
+ if (return_type->IsConstexpr()) {
+ ReportError("%FromConstexpr must return a non-constexpr type");
+ }
+ if (return_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ out_ << "ca_.SmiConstant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetNumberType())) {
+ out_ << "ca_.NumberConstant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetStringType())) {
+ out_ << "ca_.StringConstant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ ReportError(
+ "%FromConstexpr cannot cast to subclass of HeapObject unless it's a "
+ "String or Number");
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetIntPtrType())) {
+ out_ << "ca_.IntPtrConstant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
+ out_ << "ca_.UintPtrConstant";
+ } else if (return_type->IsSubtypeOf(TypeOracle::GetInt32Type())) {
+ out_ << "ca_.Int32Constant";
+ } else {
+ std::stringstream s;
+ s << "%FromConstexpr does not support return type " << *return_type;
+ ReportError(s.str());
+ }
+ } else if (instruction.intrinsic->ExternalName() == "%Allocate") {
+ out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_).Allocate";
+ } else {
+ ReportError("no built in intrinsic with name " +
+ instruction.intrinsic->ExternalName());
+ }
+ out_ << "(";
+ PrintCommaSeparatedList(out_, args);
+ if (instruction.intrinsic->ExternalName() == "%Allocate") out_ << ")";
+ if (return_type->IsStructType()) {
+ out_ << ").Flatten();\n";
+ } else {
+ out_ << ");\n";
+ }
+}
+
+void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::vector<std::string> constexpr_arguments =
+ instruction.constexpr_arguments;
+ std::vector<std::string> args;
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
std::vector<std::string> results;
for (const Type* type : LowerType(return_type)) {
results.push_back(FreshNodeName());
stack->Push(results.back());
- out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
+ out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> "
<< stack->Top() << ";\n";
out_ << " USE(" << stack->Top() << ");\n";
}
+ std::string catch_name =
+ PreCallableExceptionPreparation(instruction.catch_block);
out_ << " ";
if (return_type->IsStructType()) {
out_ << "std::tie(";
@@ -166,11 +280,12 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
out_ << ") = ";
} else {
if (results.size() == 1) {
- out_ << results[0] << " = UncheckedCast<"
+ out_ << results[0] << " = ca_.UncheckedCast<"
<< return_type->GetGeneratedTNodeTypeName() << ">(";
}
}
- out_ << instruction.macro->name() << "(";
+ out_ << instruction.macro->external_assembler_name() << "(state_)."
+ << instruction.macro->ExternalName() << "(";
PrintCommaSeparatedList(out_, args);
if (return_type->IsStructType()) {
out_ << ").Flatten();\n";
@@ -178,6 +293,8 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
if (results.size() == 1) out_ << ")";
out_ << ");\n";
}
+ PostCallableExceptionPreparation(catch_name, return_type,
+ instruction.catch_block, &pre_call_stack);
}
void CSAGenerator::EmitInstruction(
@@ -188,31 +305,17 @@ void CSAGenerator::EmitInstruction(
std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
- for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
- const Type* type = *it;
- VisitResult arg;
- if (type->IsConstexpr()) {
- args.push_back(std::move(constexpr_arguments.back()));
- constexpr_arguments.pop_back();
- } else {
- std::stringstream s;
- size_t slot_count = LoweredSlotCount(type);
- VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
- EmitCSAValue(arg, *stack, s);
- args.push_back(s.str());
- stack->PopMany(slot_count);
- }
- }
- std::reverse(args.begin(), args.end());
+ ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ Stack<std::string> pre_call_stack = *stack;
std::vector<std::string> results;
const Type* return_type = instruction.macro->signature().return_type;
if (return_type != TypeOracle::GetNeverType()) {
for (const Type* type :
LowerType(instruction.macro->signature().return_type)) {
results.push_back(FreshNodeName());
- out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
- << results.back() << ";\n";
+ out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName()
+ << "> " << results.back() << ";\n";
out_ << " USE(" << results.back() << ");\n";
}
}
@@ -228,13 +331,16 @@ void CSAGenerator::EmitInstruction(
for (size_t j = 0; j < label_parameters.size(); ++j) {
var_names[i].push_back("result_" + std::to_string(i) + "_" +
std::to_string(j));
- out_ << " TVariable<"
+ out_ << " compiler::TypedCodeAssemblerVariable<"
<< label_parameters[j]->GetGeneratedTNodeTypeName() << "> "
- << var_names[i][j] << "(this);\n";
+ << var_names[i][j] << "(&ca_);\n";
}
- out_ << " Label " << label_names[i] << "(this);\n";
+ out_ << " compiler::CodeAssemblerLabel " << label_names[i]
+ << "(&ca_);\n";
}
+ std::string catch_name =
+ PreCallableExceptionPreparation(instruction.catch_block);
out_ << " ";
if (results.size() == 1) {
out_ << results[0] << " = ";
@@ -243,7 +349,8 @@ void CSAGenerator::EmitInstruction(
PrintCommaSeparatedList(out_, results);
out_ << ") = ";
}
- out_ << instruction.macro->name() << "(";
+ out_ << instruction.macro->external_assembler_name() << "(state_)."
+ << instruction.macro->ExternalName() << "(";
PrintCommaSeparatedList(out_, args);
bool first = args.empty();
for (size_t i = 0; i < label_names.size(); ++i) {
@@ -254,9 +361,17 @@ void CSAGenerator::EmitInstruction(
out_ << ", &" << var_names[i][j];
}
}
- out_ << ");\n";
+ if (return_type->IsStructType()) {
+ out_ << ").Flatten();\n";
+ } else {
+ out_ << ");\n";
+ }
+
+ PostCallableExceptionPreparation(catch_name, return_type,
+ instruction.catch_block, &pre_call_stack);
+
if (instruction.return_continuation) {
- out_ << " Goto(&" << BlockName(*instruction.return_continuation);
+ out_ << " ca_.Goto(&" << BlockName(*instruction.return_continuation);
for (const std::string& value : *stack) {
out_ << ", " << value;
}
@@ -267,8 +382,8 @@ void CSAGenerator::EmitInstruction(
}
for (size_t i = 0; i < label_names.size(); ++i) {
out_ << " if (" << label_names[i] << ".is_used()) {\n";
- out_ << " Bind(&" << label_names[i] << ");\n";
- out_ << " Goto(&" << BlockName(instruction.label_blocks[i]);
+ out_ << " ca_.Bind(&" << label_names[i] << ");\n";
+ out_ << " ca_.Goto(&" << BlockName(instruction.label_blocks[i]);
for (const std::string& value : *stack) {
out_ << ", " << value;
}
@@ -287,30 +402,44 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
std::vector<const Type*> result_types =
LowerType(instruction.builtin->signature().return_type);
if (instruction.is_tailcall) {
- out_ << " TailCallBuiltin(Builtins::k" << instruction.builtin->name()
- << ", ";
+ out_ << " CodeStubAssembler(state_).TailCallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
out_ << ");\n";
} else {
+ std::string result_name = FreshNodeName();
+ if (result_types.size() == 1) {
+ out_ << " compiler::TNode<"
+ << result_types[0]->GetGeneratedTNodeTypeName() << "> "
+ << result_name << ";\n";
+ }
+ std::string catch_name =
+ PreCallableExceptionPreparation(instruction.catch_block);
+ Stack<std::string> pre_call_stack = *stack;
if (result_types.size() == 1) {
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- stack->Push(FreshNodeName());
- out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
- if (generated_type != "Object") out_ << "CAST(";
- out_ << "CallBuiltin(Builtins::k" << instruction.builtin->name() << ", ";
+ stack->Push(result_name);
+ out_ << " " << result_name << " = ";
+ if (generated_type != "Object") out_ << "TORQUE_CAST(";
+ out_ << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
if (generated_type != "Object") out_ << ")";
out_ << ");\n";
- out_ << " USE(" << stack->Top() << ");\n";
+ out_ << " USE(" << result_name << ");\n";
} else {
DCHECK_EQ(0, result_types.size());
// TODO(tebbi): Actually, builtins have to return a value, so we should
// not have to handle this case.
- out_ << " CallBuiltin(Builtins::k" << instruction.builtin->name()
- << ", ";
+ out_ << " CodeStubAssembler(state_).CallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
out_ << ");\n";
}
+ PostCallableExceptionPreparation(
+ catch_name,
+ result_types.size() == 0 ? TypeOracle::GetVoidType() : result_types[0],
+ instruction.catch_block, &pre_call_stack);
}
}
@@ -320,67 +449,127 @@ void CSAGenerator::EmitInstruction(
std::vector<std::string> function_and_arguments =
stack->PopMany(1 + instruction.argc);
std::vector<const Type*> result_types =
- LowerType(instruction.example_builtin->signature().return_type);
+ LowerType(instruction.type->return_type());
if (result_types.size() != 1) {
ReportError("builtins must have exactly one result");
}
if (instruction.is_tailcall) {
- out_ << " Tail (Builtins::CallableFor(isolate(), Builtins::k"
- << instruction.example_builtin->name() << ").descriptor(), ";
- PrintCommaSeparatedList(out_, function_and_arguments);
- out_ << ");\n";
- } else {
- stack->Push(FreshNodeName());
- std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
- if (generated_type != "Object") out_ << "CAST(";
- out_ << "CallStub(Builtins::CallableFor(isolate(), Builtins::k"
- << instruction.example_builtin->name() << ").descriptor(), ";
- PrintCommaSeparatedList(out_, function_and_arguments);
- out_ << ")";
- if (generated_type != "Object") out_ << ")";
- out_ << "; \n";
- out_ << " USE(" << stack->Top() << ");\n";
+ ReportError("tail-calls to builtin pointers are not supported");
+ }
+
+ stack->Push(FreshNodeName());
+ std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
+ out_ << " compiler::TNode<" << generated_type << "> " << stack->Top()
+ << " = ";
+ if (generated_type != "Object") out_ << "TORQUE_CAST(";
+ out_ << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
+ "CallableFor(ca_."
+ "isolate(),"
+ "ExampleBuiltinForTorqueFunctionPointerType("
+ << instruction.type->function_pointer_type_id() << ")).descriptor(), ";
+ PrintCommaSeparatedList(out_, function_and_arguments);
+ out_ << ")";
+ if (generated_type != "Object") out_ << ")";
+ out_ << "; \n";
+ out_ << " USE(" << stack->Top() << ");\n";
+}
+
+std::string CSAGenerator::PreCallableExceptionPreparation(
+ base::Optional<Block*> catch_block) {
+ std::string catch_name;
+ if (catch_block) {
+ catch_name = FreshCatchName();
+ out_ << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
+ << "_label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
+ out_ << " { compiler::CodeAssemblerScopedExceptionHandler s(&ca_, &"
+ << catch_name << "_label);\n";
+ }
+ return catch_name;
+}
+
+void CSAGenerator::PostCallableExceptionPreparation(
+ const std::string& catch_name, const Type* return_type,
+ base::Optional<Block*> catch_block, Stack<std::string>* stack) {
+ if (catch_block) {
+ std::string block_name = BlockName(*catch_block);
+ out_ << " }\n";
+ out_ << " if (" << catch_name << "_label.is_used()) {\n";
+ out_ << " compiler::CodeAssemblerLabel " << catch_name
+ << "_skip(&ca_);\n";
+ if (!return_type->IsNever()) {
+ out_ << " ca_.Goto(&" << catch_name << "_skip);\n";
+ }
+ out_ << " compiler::TNode<Object> " << catch_name
+ << "_exception_object;\n";
+ out_ << " ca_.Bind(&" << catch_name << "_label, &" << catch_name
+ << "_exception_object);\n";
+ out_ << " ca_.Goto(&" << block_name;
+ for (size_t i = 0; i < stack->Size(); ++i) {
+ out_ << ", " << stack->begin()[i];
+ }
+ out_ << ", " << catch_name << "_exception_object);\n";
+ if (!return_type->IsNever()) {
+ out_ << " ca_.Bind(&" << catch_name << "_skip);\n";
+ }
+ out_ << " }\n";
}
}
void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
Stack<std::string>* stack) {
std::vector<std::string> arguments = stack->PopMany(instruction.argc);
- std::vector<const Type*> result_types =
- LowerType(instruction.runtime_function->signature().return_type);
+ const Type* return_type =
+ instruction.runtime_function->signature().return_type;
+ std::vector<const Type*> result_types;
+ if (return_type != TypeOracle::GetNeverType()) {
+ result_types = LowerType(return_type);
+ }
if (result_types.size() > 1) {
ReportError("runtime function must have at most one result");
}
if (instruction.is_tailcall) {
- out_ << " TailCallRuntime(Runtime::k"
- << instruction.runtime_function->name() << ", ";
+ out_ << " CodeStubAssembler(state_).TailCallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
out_ << ");\n";
} else {
+ std::string result_name = FreshNodeName();
+ if (result_types.size() == 1) {
+ out_ << " compiler::TNode<"
+ << result_types[0]->GetGeneratedTNodeTypeName() << "> "
+ << result_name << ";\n";
+ }
+ std::string catch_name =
+ PreCallableExceptionPreparation(instruction.catch_block);
+ Stack<std::string> pre_call_stack = *stack;
if (result_types.size() == 1) {
- stack->Push(FreshNodeName());
- out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
- << "> " << stack->Top() << " = CAST(CallRuntime(Runtime::k"
- << instruction.runtime_function->name() << ", ";
+ stack->Push(result_name);
+ out_ << " " << result_name
+ << " = TORQUE_CAST(CodeStubAssembler(state_).CallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
out_ << "));\n";
- out_ << " USE(" << stack->Top() << ");\n";
+ out_ << " USE(" << result_name << ");\n";
} else {
DCHECK_EQ(0, result_types.size());
- // TODO(tebbi): Actually, runtime functions have to return a value, so we
- // should not have to handle this case.
- out_ << " CallRuntime(Runtime::k"
- << instruction.runtime_function->name() << ", ";
+ out_ << " CodeStubAssembler(state_).CallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
out_ << ");\n";
+ if (return_type == TypeOracle::GetNeverType()) {
+ out_ << " CodeStubAssembler(state_).Unreachable();\n";
+ } else {
+ DCHECK(return_type == TypeOracle::GetVoidType());
+ }
}
+ PostCallableExceptionPreparation(catch_name, return_type,
+ instruction.catch_block, &pre_call_stack);
}
}
void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " Branch(" << stack->Pop() << ", &"
+ out_ << " ca_.Branch(" << stack->Pop() << ", &"
<< BlockName(instruction.if_true) << ", &"
<< BlockName(instruction.if_false);
for (const std::string& value : *stack) {
@@ -392,13 +581,13 @@ void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
void CSAGenerator::EmitInstruction(
const ConstexprBranchInstruction& instruction, Stack<std::string>* stack) {
out_ << " if (" << instruction.condition << ") {\n";
- out_ << " Goto(&" << BlockName(instruction.if_true);
+ out_ << " ca_.Goto(&" << BlockName(instruction.if_true);
for (const std::string& value : *stack) {
out_ << ", " << value;
}
out_ << ");\n";
out_ << " } else {\n";
- out_ << " Goto(&" << BlockName(instruction.if_false);
+ out_ << " ca_.Goto(&" << BlockName(instruction.if_false);
for (const std::string& value : *stack) {
out_ << ", " << value;
}
@@ -409,7 +598,7 @@ void CSAGenerator::EmitInstruction(
void CSAGenerator::EmitInstruction(const GotoInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " Goto(&" << BlockName(instruction.destination);
+ out_ << " ca_.Goto(&" << BlockName(instruction.destination);
for (const std::string& value : *stack) {
out_ << ", " << value;
}
@@ -422,7 +611,7 @@ void CSAGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
it != instruction.variable_names.rend(); ++it) {
out_ << " *" << *it << " = " << stack->Pop() << ";\n";
}
- out_ << " Goto(" << instruction.destination << ");\n";
+ out_ << " ca_.Goto(" << instruction.destination << ");\n";
}
void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
@@ -430,7 +619,7 @@ void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
if (*linkage_ == Builtin::kVarArgsJavaScript) {
out_ << " " << ARGUMENTS_VARIABLE_STRING << "->PopAndReturn(";
} else {
- out_ << " Return(";
+ out_ << " CodeStubAssembler(state_).Return(";
}
out_ << stack->Pop() << ");\n";
}
@@ -438,26 +627,75 @@ void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
void CSAGenerator::EmitInstruction(
const PrintConstantStringInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " Print(" << StringLiteralQuote(instruction.message) << ");\n";
+ out_ << " CodeStubAssembler(state_).Print("
+ << StringLiteralQuote(instruction.message) << ");\n";
}
-void CSAGenerator::EmitInstruction(const DebugBreakInstruction& instruction,
+void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
Stack<std::string>* stack) {
- if (instruction.never_continues) {
- out_ << " Unreachable();\n";
- } else {
- out_ << " DebugBreak();\n";
+ switch (instruction.kind) {
+ case AbortInstruction::Kind::kUnreachable:
+ DCHECK(instruction.message.empty());
+ out_ << " CodeStubAssembler(state_).Unreachable();\n";
+ break;
+ case AbortInstruction::Kind::kDebugBreak:
+ DCHECK(instruction.message.empty());
+ out_ << " CodeStubAssembler(state_).DebugBreak();\n";
+ break;
+ case AbortInstruction::Kind::kAssertionFailure: {
+ std::string file =
+ StringLiteralQuote(SourceFileMap::GetSource(instruction.pos.source));
+ out_ << " CodeStubAssembler(state_).FailAssert("
+ << StringLiteralQuote(instruction.message) << ", " << file << ", "
+ << instruction.pos.line + 1 << ");\n";
+ break;
+ }
}
}
void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
Stack<std::string>* stack) {
stack->Poke(stack->AboveTop() - 1,
- "UncheckedCast<" +
+ "ca_.UncheckedCast<" +
instruction.destination_type->GetGeneratedTNodeTypeName() +
">(" + stack->Top() + ")");
}
+void CSAGenerator::EmitInstruction(
+ const LoadObjectFieldInstruction& instruction, Stack<std::string>* stack) {
+ const Field& field =
+ instruction.class_type->LookupField(instruction.field_name);
+ std::string result_name = FreshNodeName();
+ std::string type_string =
+ field.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())
+ ? "MachineType::TaggedSigned()"
+ : "MachineType::AnyTagged()";
+ out_ << field.name_and_type.type->GetGeneratedTypeName() << " " << result_name
+ << " = "
+ << "ca_.UncheckedCast<"
+ << field.name_and_type.type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_).LoadObjectField("
+ << stack->Top() + ", " + std::to_string(field.offset) + ", "
+ << type_string + "));\n";
+ stack->Poke(stack->AboveTop() - 1, result_name);
+}
+
+void CSAGenerator::EmitInstruction(
+ const StoreObjectFieldInstruction& instruction, Stack<std::string>* stack) {
+ auto value = stack->Pop();
+ auto object = stack->Pop();
+ stack->Push(value);
+ const Field& field =
+ instruction.class_type->LookupField(instruction.field_name);
+ if (field.offset == 0) {
+ out_ << " CodeStubAssembler(state_).StoreMap(" + object + ", " + value +
+ ");\n";
+ } else {
+ out_ << " CodeStubAssembler(state_).StoreObjectField(" + object + ", " +
+ std::to_string(field.offset) + ", " + value + ");\n";
+ }
+}
+
// static
void CSAGenerator::EmitCSAValue(VisitResult result,
const Stack<std::string>& values,
@@ -465,20 +703,21 @@ void CSAGenerator::EmitCSAValue(VisitResult result,
if (!result.IsOnStack()) {
out << result.constexpr_value();
} else if (auto* struct_type = StructType::DynamicCast(result.type())) {
- out << struct_type->name() << "{";
+ out << struct_type->GetGeneratedTypeName() << "{";
bool first = true;
for (auto& field : struct_type->fields()) {
if (!first) {
out << ", ";
}
first = false;
- EmitCSAValue(ProjectStructField(result, field.name), values, out);
+ EmitCSAValue(ProjectStructField(result, field.name_and_type.name), values,
+ out);
}
out << "}";
} else {
DCHECK_EQ(1, result.stack_range().Size());
- out << "TNode<" << result.type()->GetGeneratedTNodeTypeName() << ">{"
- << values.Peek(result.stack_range().begin()) << "}";
+ out << "compiler::TNode<" << result.type()->GetGeneratedTNodeTypeName()
+ << ">{" << values.Peek(result.stack_range().begin()) << "}";
}
}
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
index 78fccebd6d..e3fbacdecf 100644
--- a/deps/v8/src/torque/csa-generator.h
+++ b/deps/v8/src/torque/csa-generator.h
@@ -32,11 +32,24 @@ class CSAGenerator {
size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
+ std::string PreCallableExceptionPreparation(
+ base::Optional<Block*> catch_block);
+ void PostCallableExceptionPreparation(const std::string& catch_name,
+ const Type* return_type,
+ base::Optional<Block*> catch_block,
+ Stack<std::string>* stack);
+
std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
+ std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
std::string BlockName(const Block* block) {
return "block" + std::to_string(block->id());
}
+ void ProcessArgumentsCommon(const TypeVector& parameter_types,
+ std::vector<std::string>* args,
+ std::vector<std::string>* constexpr_arguments,
+ Stack<std::string>* stack);
+
Stack<std::string> EmitBlock(const Block* block);
void EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack);
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 6768da5474..89501e5682 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -11,53 +11,45 @@ namespace v8 {
namespace internal {
namespace torque {
-std::ostream& operator<<(std::ostream& os, const Callable& m) {
- os << "callable " << m.name() << "(" << m.signature().parameter_types
- << "): " << *m.signature().return_type;
- return os;
+DEFINE_CONTEXTUAL_VARIABLE(CurrentScope);
+
+std::ostream& operator<<(std::ostream& os, const QualifiedName& name) {
+ for (const std::string& qualifier : name.namespace_qualification) {
+ os << qualifier << "::";
+ }
+ return os << name.name;
}
-std::ostream& operator<<(std::ostream& os, const Variable& v) {
- os << "variable " << v.name() << ": " << *v.type();
+std::ostream& operator<<(std::ostream& os, const Callable& m) {
+ os << "callable " << m.ReadableName() << "(";
+ if (m.signature().implicit_count != 0) {
+ os << "implicit ";
+ TypeVector implicit_parameter_types(
+ m.signature().parameter_types.types.begin(),
+ m.signature().parameter_types.types.begin() +
+ m.signature().implicit_count);
+ os << implicit_parameter_types << ")(";
+ TypeVector explicit_parameter_types(
+ m.signature().parameter_types.types.begin() +
+ m.signature().implicit_count,
+ m.signature().parameter_types.types.end());
+ os << explicit_parameter_types;
+ } else {
+ os << m.signature().parameter_types;
+ }
+ os << "): " << *m.signature().return_type;
return os;
}
std::ostream& operator<<(std::ostream& os, const Builtin& b) {
- os << "builtin " << *b.signature().return_type << " " << b.name()
+ os << "builtin " << *b.signature().return_type << " " << b.ReadableName()
<< b.signature().parameter_types;
return os;
}
std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b) {
- os << "runtime function " << *b.signature().return_type << " " << b.name()
- << b.signature().parameter_types;
- return os;
-}
-
-void PrintLabel(std::ostream& os, const Label& l, bool with_names) {
- os << l.name();
- if (l.GetParameterCount() != 0) {
- os << "(";
- if (with_names) {
- PrintCommaSeparatedList(os, l.GetParameters(),
- [](Variable* v) -> std::string {
- std::stringstream stream;
- stream << v->name();
- stream << ": ";
- stream << *(v->type());
- return stream.str();
- });
- } else {
- PrintCommaSeparatedList(
- os, l.GetParameters(),
- [](Variable* v) -> const Type& { return *(v->type()); });
- }
- os << ")";
- }
-}
-
-std::ostream& operator<<(std::ostream& os, const Label& l) {
- PrintLabel(os, l, true);
+ os << "runtime function " << *b.signature().return_type << " "
+ << b.ReadableName() << b.signature().parameter_types;
return os;
}
@@ -69,7 +61,39 @@ std::ostream& operator<<(std::ostream& os, const Generic& g) {
return os;
}
-size_t Label::next_id_ = 0;
+base::Optional<const Type*> Generic::InferTypeArgument(
+ size_t i, const TypeVector& arguments) {
+ const std::string type_name = declaration()->generic_parameters[i];
+ const std::vector<TypeExpression*>& parameters =
+ declaration()->callable->signature->parameters.types;
+ size_t j = declaration()->callable->signature->parameters.implicit_count;
+ for (size_t i = 0; i < arguments.size() && j < parameters.size(); ++i, ++j) {
+ BasicTypeExpression* basic =
+ BasicTypeExpression::DynamicCast(parameters[j]);
+ if (basic && basic->namespace_qualification.empty() &&
+ !basic->is_constexpr && basic->name == type_name) {
+ return arguments[i];
+ }
+ }
+ return base::nullopt;
+}
+
+base::Optional<TypeVector> Generic::InferSpecializationTypes(
+ const TypeVector& explicit_specialization_types,
+ const TypeVector& arguments) {
+ TypeVector result = explicit_specialization_types;
+ size_t type_parameter_count = declaration()->generic_parameters.size();
+ if (explicit_specialization_types.size() > type_parameter_count) {
+ return base::nullopt;
+ }
+ for (size_t i = explicit_specialization_types.size();
+ i < type_parameter_count; ++i) {
+ base::Optional<const Type*> inferred = InferTypeArgument(i, arguments);
+ if (!inferred) return base::nullopt;
+ result.push_back(*inferred);
+ }
+ return result;
+}
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 1d173062bd..a262022409 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -7,6 +7,7 @@
#include <cassert>
#include <string>
+#include <unordered_map>
#include "src/base/functional.h"
#include "src/base/logging.h"
@@ -18,52 +19,68 @@ namespace v8 {
namespace internal {
namespace torque {
-class Block;
-class Generic;
class Scope;
-class ScopeChain;
+class Namespace;
+
+DECLARE_CONTEXTUAL_VARIABLE(CurrentScope, Scope*);
+
+struct QualifiedName {
+ std::vector<std::string> namespace_qualification;
+ std::string name;
+
+ QualifiedName(std::vector<std::string> namespace_qualification,
+ std::string name)
+ : namespace_qualification(std::move(namespace_qualification)),
+ name(std::move(name)) {}
+ explicit QualifiedName(std::string name)
+ : QualifiedName({}, std::move(name)) {}
+
+ friend std::ostream& operator<<(std::ostream& os, const QualifiedName& name);
+};
class Declarable {
public:
virtual ~Declarable() = default;
enum Kind {
- kVariable,
- kParameter,
+ kNamespace,
kMacro,
- kMacroList,
+ kMethod,
kBuiltin,
kRuntimeFunction,
+ kIntrinsic,
kGeneric,
- kGenericList,
kTypeAlias,
- kLabel,
kExternConstant,
- kModuleConstant
+ kNamespaceConstant
};
Kind kind() const { return kind_; }
- bool IsMacro() const { return kind() == kMacro; }
+ bool IsNamespace() const { return kind() == kNamespace; }
+ bool IsMacro() const { return kind() == kMacro || kind() == kMethod; }
+ bool IsMethod() const { return kind() == kMethod; }
+ bool IsIntrinsic() const { return kind() == kIntrinsic; }
bool IsBuiltin() const { return kind() == kBuiltin; }
bool IsRuntimeFunction() const { return kind() == kRuntimeFunction; }
bool IsGeneric() const { return kind() == kGeneric; }
bool IsTypeAlias() const { return kind() == kTypeAlias; }
- bool IsParameter() const { return kind() == kParameter; }
- bool IsLabel() const { return kind() == kLabel; }
- bool IsVariable() const { return kind() == kVariable; }
- bool IsMacroList() const { return kind() == kMacroList; }
- bool IsGenericList() const { return kind() == kGenericList; }
bool IsExternConstant() const { return kind() == kExternConstant; }
- bool IsModuleConstant() const { return kind() == kModuleConstant; }
- bool IsValue() const {
- return IsVariable() || IsExternConstant() || IsParameter() ||
- IsModuleConstant();
+ bool IsNamespaceConstant() const { return kind() == kNamespaceConstant; }
+ bool IsValue() const { return IsExternConstant() || IsNamespaceConstant(); }
+ bool IsScope() const { return IsNamespace() || IsCallable(); }
+ bool IsCallable() const {
+ return IsMacro() || IsBuiltin() || IsRuntimeFunction() || IsIntrinsic() ||
+ IsMethod();
}
virtual const char* type_name() const { return "<<unknown>>"; }
+ Scope* ParentScope() const { return parent_scope_; }
+ const SourcePosition& pos() const { return pos_; }
protected:
explicit Declarable(Kind kind) : kind_(kind) {}
private:
const Kind kind_;
+ Scope* const parent_scope_ = CurrentScope::Get();
+ SourcePosition pos_ = CurrentSourcePosition::Get();
};
#define DECLARE_DECLARABLE_BOILERPLATE(x, y) \
@@ -87,6 +104,81 @@ class Declarable {
return static_cast<const x*>(declarable); \
}
+class Scope : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(Scope, scope);
+ explicit Scope(Declarable::Kind kind) : Declarable(kind) {}
+
+ std::vector<Declarable*> LookupShallow(const QualifiedName& name) {
+ if (name.namespace_qualification.empty()) return declarations_[name.name];
+ Scope* child = nullptr;
+ for (Declarable* declarable :
+ declarations_[name.namespace_qualification.front()]) {
+ if (Scope* scope = Scope::DynamicCast(declarable)) {
+ if (child != nullptr) {
+ ReportError("ambiguous reference to scope ",
+ name.namespace_qualification.front());
+ }
+ child = scope;
+ }
+ }
+ if (child == nullptr) return {};
+ return child->LookupShallow(
+ QualifiedName({name.namespace_qualification.begin() + 1,
+ name.namespace_qualification.end()},
+ name.name));
+ }
+
+ std::vector<Declarable*> Lookup(const QualifiedName& name) {
+ std::vector<Declarable*> result;
+ if (ParentScope()) {
+ result = ParentScope()->Lookup(name);
+ }
+ for (Declarable* declarable : LookupShallow(name)) {
+ result.push_back(declarable);
+ }
+ return result;
+ }
+ template <class T>
+ T* AddDeclarable(const std::string& name, T* declarable) {
+ declarations_[name].push_back(declarable);
+ return declarable;
+ }
+
+ private:
+ std::unordered_map<std::string, std::vector<Declarable*>> declarations_;
+};
+
+class Namespace : public Scope {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(Namespace, namespace);
+ explicit Namespace(const std::string& name)
+ : Scope(Declarable::kNamespace), name_(name) {}
+ const std::string& name() const { return name_; }
+ std::string ExternalName() const {
+ return CamelifyString(name()) + "BuiltinsFromDSLAssembler";
+ }
+ std::ostream& source_stream() { return source_stream_; }
+ std::ostream& header_stream() { return header_stream_; }
+ std::string source() { return source_stream_.str(); }
+ std::string header() { return header_stream_.str(); }
+
+ private:
+ std::string name_;
+ std::stringstream header_stream_;
+ std::stringstream source_stream_;
+};
+
+inline Namespace* CurrentNamespace() {
+ Scope* scope = CurrentScope::Get();
+ while (true) {
+ if (Namespace* n = Namespace::DynamicCast(scope)) {
+ return n;
+ }
+ scope = scope->ParentScope();
+ }
+}
+
class Value : public Declarable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Value, value);
@@ -110,104 +202,26 @@ class Value : public Declarable {
base::Optional<VisitResult> value_;
};
-class Parameter : public Value {
+class NamespaceConstant : public Value {
public:
- DECLARE_DECLARABLE_BOILERPLATE(Parameter, parameter);
-
- const std::string& external_name() const { return external_name_; }
-
- private:
- friend class Declarations;
- Parameter(const std::string& name, std::string external_name,
- const Type* type)
- : Value(Declarable::kParameter, type, name),
- external_name_(external_name) {}
-
- std::string external_name_;
-};
-
-class ModuleConstant : public Value {
- public:
- DECLARE_DECLARABLE_BOILERPLATE(ModuleConstant, constant);
+ DECLARE_DECLARABLE_BOILERPLATE(NamespaceConstant, constant);
const std::string& constant_name() const { return constant_name_; }
-
- private:
- friend class Declarations;
- explicit ModuleConstant(std::string constant_name, const Type* type)
- : Value(Declarable::kModuleConstant, type, constant_name),
- constant_name_(std::move(constant_name)) {}
-
- std::string constant_name_;
-};
-
-class Variable : public Value {
- public:
- DECLARE_DECLARABLE_BOILERPLATE(Variable, variable);
- bool IsConst() const override { return const_; }
- void Define() {
- if (defined_ && IsConst()) {
- ReportError("Cannot re-define a const-bound variable.");
- }
- defined_ = true;
+ Expression* body() { return body_; }
+ std::string ExternalAssemblerName() const {
+ return Namespace::cast(ParentScope())->ExternalName();
}
- bool IsDefined() const { return defined_; }
private:
friend class Declarations;
- Variable(std::string name, const Type* type, bool is_const)
- : Value(Declarable::kVariable, type, name),
- defined_(false),
- const_(is_const) {
- DCHECK_IMPLIES(type->IsConstexpr(), IsConst());
- }
+ explicit NamespaceConstant(std::string constant_name, const Type* type,
+ Expression* body)
+ : Value(Declarable::kNamespaceConstant, type, constant_name),
+ constant_name_(std::move(constant_name)),
+ body_(body) {}
- std::string value_;
- bool defined_;
- bool const_;
-};
-
-class Label : public Declarable {
- public:
- void AddVariable(Variable* var) { parameters_.push_back(var); }
- Block* block() const { return *block_; }
- void set_block(Block* block) {
- DCHECK(!block_);
- block_ = block;
- }
- const std::string& external_label_name() const {
- return *external_label_name_;
- }
- const std::string& name() const { return name_; }
- void set_external_label_name(std::string external_label_name) {
- DCHECK(!block_);
- DCHECK(!external_label_name_);
- external_label_name_ = std::move(external_label_name);
- }
- Variable* GetParameter(size_t i) const { return parameters_[i]; }
- size_t GetParameterCount() const { return parameters_.size(); }
- const std::vector<Variable*>& GetParameters() const { return parameters_; }
-
- DECLARE_DECLARABLE_BOILERPLATE(Label, label);
- void MarkUsed() { used_ = true; }
- bool IsUsed() const { return used_; }
- bool IsDeferred() const { return deferred_; }
-
- private:
- friend class Declarations;
- explicit Label(std::string name, bool deferred = false)
- : Declarable(Declarable::kLabel),
- name_(std::move(name)),
- used_(false),
- deferred_(deferred) {}
-
- std::string name_;
- base::Optional<Block*> block_;
- base::Optional<std::string> external_label_name_;
- std::vector<Variable*> parameters_;
- static size_t next_id_;
- bool used_;
- bool deferred_;
+ std::string constant_name_;
+ Expression* body_;
};
class ExternConstant : public Value {
@@ -222,19 +236,11 @@ class ExternConstant : public Value {
}
};
-class Callable : public Declarable {
+class Callable : public Scope {
public:
- static Callable* cast(Declarable* declarable) {
- assert(declarable->IsMacro() || declarable->IsBuiltin() ||
- declarable->IsRuntimeFunction());
- return static_cast<Callable*>(declarable);
- }
- static const Callable* cast(const Declarable* declarable) {
- assert(declarable->IsMacro() || declarable->IsBuiltin() ||
- declarable->IsRuntimeFunction());
- return static_cast<const Callable*>(declarable);
- }
- const std::string& name() const { return name_; }
+ DECLARE_DECLARABLE_BOILERPLATE(Callable, callable);
+ const std::string& ExternalName() const { return external_name_; }
+ const std::string& ReadableName() const { return readable_name_; }
const Signature& signature() const { return signature_; }
const NameVector& parameter_names() const {
return signature_.parameter_names;
@@ -244,53 +250,98 @@ class Callable : public Declarable {
}
void IncrementReturns() { ++returns_; }
bool HasReturns() const { return returns_; }
- base::Optional<Generic*> generic() const { return generic_; }
+ bool IsTransitioning() const { return transitioning_; }
+ base::Optional<Statement*> body() const { return body_; }
+ bool IsExternal() const { return !body_.has_value(); }
+ virtual bool ShouldBeInlined() const { return false; }
+ bool IsConstructor() const { return readable_name_ == kConstructMethodName; }
protected:
- Callable(Declarable::Kind kind, const std::string& name,
- const Signature& signature, base::Optional<Generic*> generic)
- : Declarable(kind),
- name_(name),
- signature_(signature),
+ Callable(Declarable::Kind kind, std::string external_name,
+ std::string readable_name, Signature signature, bool transitioning,
+ base::Optional<Statement*> body)
+ : Scope(kind),
+ external_name_(std::move(external_name)),
+
+ readable_name_(std::move(readable_name)),
+ signature_(std::move(signature)),
+ transitioning_(transitioning),
returns_(0),
- generic_(generic) {}
+ body_(body) {
+ DCHECK(!body || *body);
+ }
private:
- std::string name_;
+ std::string external_name_;
+ std::string readable_name_;
Signature signature_;
+ bool transitioning_;
size_t returns_;
- base::Optional<Generic*> generic_;
+ base::Optional<Statement*> body_;
};
class Macro : public Callable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Macro, macro);
+ bool ShouldBeInlined() const override {
+ for (const LabelDeclaration& label : signature().labels) {
+ for (const Type* type : label.types) {
+ if (type->IsStructType()) return true;
+ }
+ }
+ return Callable::ShouldBeInlined();
+ }
- private:
- friend class Declarations;
- Macro(const std::string& name, const Signature& signature,
- base::Optional<Generic*> generic)
- : Callable(Declarable::kMacro, name, signature, generic) {
+ const std::string& external_assembler_name() const {
+ return external_assembler_name_;
+ }
+
+ protected:
+ Macro(Declarable::Kind kind, std::string external_name,
+ std::string readable_name, std::string external_assembler_name,
+ const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body)
+ : Callable(kind, std::move(external_name), std::move(readable_name),
+ signature, transitioning, body),
+ external_assembler_name_(std::move(external_assembler_name)) {
if (signature.parameter_types.var_args) {
ReportError("Varargs are not supported for macros.");
}
}
+
+ private:
+ friend class Declarations;
+ Macro(std::string external_name, std::string readable_name,
+ std::string external_assembler_name, const Signature& signature,
+ bool transitioning, base::Optional<Statement*> body)
+ : Macro(Declarable::kMacro, std::move(external_name),
+ std::move(readable_name), external_assembler_name, signature,
+ transitioning, body) {}
+
+ std::string external_assembler_name_;
};
-class MacroList : public Declarable {
+class Method : public Macro {
public:
- DECLARE_DECLARABLE_BOILERPLATE(MacroList, macro_list);
- const std::vector<Macro*>& list() { return list_; }
- Macro* AddMacro(Macro* macro) {
- list_.emplace_back(macro);
- return macro;
+ DECLARE_DECLARABLE_BOILERPLATE(Method, Method);
+ bool ShouldBeInlined() const override {
+ return Macro::ShouldBeInlined() ||
+ signature()
+ .parameter_types.types[signature().implicit_count]
+ ->IsStructType();
}
+ AggregateType* aggregate_type() const { return aggregate_type_; }
private:
friend class Declarations;
- MacroList() : Declarable(Declarable::kMacroList) {}
-
- std::vector<Macro*> list_;
+ Method(AggregateType* aggregate_type, std::string external_name,
+ std::string readable_name, std::string external_assembler_name,
+ const Signature& signature, bool transitioning, Statement* body)
+ : Macro(Declarable::kMethod, std::move(external_name),
+ std::move(readable_name), std::move(external_assembler_name),
+ signature, transitioning, body),
+ aggregate_type_(aggregate_type) {}
+ AggregateType* aggregate_type_;
};
class Builtin : public Callable {
@@ -301,18 +352,17 @@ class Builtin : public Callable {
bool IsStub() const { return kind_ == kStub; }
bool IsVarArgsJavaScript() const { return kind_ == kVarArgsJavaScript; }
bool IsFixedArgsJavaScript() const { return kind_ == kFixedArgsJavaScript; }
- bool IsExternal() const { return external_; }
private:
friend class Declarations;
- Builtin(const std::string& name, Builtin::Kind kind, bool external,
- const Signature& signature, base::Optional<Generic*> generic)
- : Callable(Declarable::kBuiltin, name, signature, generic),
- kind_(kind),
- external_(external) {}
+ Builtin(std::string external_name, std::string readable_name,
+ Builtin::Kind kind, const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body)
+ : Callable(Declarable::kBuiltin, std::move(external_name),
+ std::move(readable_name), signature, transitioning, body),
+ kind_(kind) {}
Kind kind_;
- bool external_;
};
class RuntimeFunction : public Callable {
@@ -322,8 +372,24 @@ class RuntimeFunction : public Callable {
private:
friend class Declarations;
RuntimeFunction(const std::string& name, const Signature& signature,
- base::Optional<Generic*> generic)
- : Callable(Declarable::kRuntimeFunction, name, signature, generic) {}
+ bool transitioning)
+ : Callable(Declarable::kRuntimeFunction, name, name, signature,
+ transitioning, base::nullopt) {}
+};
+
+class Intrinsic : public Callable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(Intrinsic, intrinsic);
+
+ private:
+ friend class Declarations;
+ Intrinsic(std::string name, const Signature& signature)
+ : Callable(Declarable::kIntrinsic, name, name, signature, false,
+ base::nullopt) {
+ if (signature.parameter_types.var_args) {
+ ReportError("Varargs are not supported for intrinsics.");
+ }
+ }
};
class Generic : public Declarable {
@@ -331,61 +397,65 @@ class Generic : public Declarable {
DECLARE_DECLARABLE_BOILERPLATE(Generic, generic);
GenericDeclaration* declaration() const { return declaration_; }
+ const std::vector<std::string> generic_parameters() const {
+ return declaration()->generic_parameters;
+ }
const std::string& name() const { return name_; }
- Module* module() const { return module_; }
+ void AddSpecialization(const TypeVector& type_arguments,
+ Callable* specialization) {
+ DCHECK_EQ(0, specializations_.count(type_arguments));
+ specializations_[type_arguments] = specialization;
+ }
+ base::Optional<Callable*> GetSpecialization(
+ const TypeVector& type_arguments) const {
+ auto it = specializations_.find(type_arguments);
+ if (it != specializations_.end()) return it->second;
+ return base::nullopt;
+ }
+ base::Optional<TypeVector> InferSpecializationTypes(
+ const TypeVector& explicit_specialization_types,
+ const TypeVector& arguments);
private:
friend class Declarations;
- Generic(const std::string& name, Module* module,
- GenericDeclaration* declaration)
+ Generic(const std::string& name, GenericDeclaration* declaration)
: Declarable(Declarable::kGeneric),
name_(name),
- module_(module),
declaration_(declaration) {}
+ base::Optional<const Type*> InferTypeArgument(size_t i,
+ const TypeVector& arguments);
std::string name_;
- Module* module_;
+ std::unordered_map<TypeVector, Callable*, base::hash<TypeVector>>
+ specializations_;
GenericDeclaration* declaration_;
};
-class GenericList : public Declarable {
- public:
- DECLARE_DECLARABLE_BOILERPLATE(GenericList, generic_list);
- const std::vector<Generic*>& list() { return list_; }
- Generic* AddGeneric(Generic* generic) {
- list_.push_back(generic);
- return generic;
- }
-
- private:
- friend class Declarations;
- GenericList() : Declarable(Declarable::kGenericList) {}
-
- std::vector<Generic*> list_;
+struct SpecializationKey {
+ Generic* generic;
+ TypeVector specialized_types;
};
-typedef std::pair<Generic*, TypeVector> SpecializationKey;
-
class TypeAlias : public Declarable {
public:
DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias);
const Type* type() const { return type_; }
+ bool IsRedeclaration() const { return redeclaration_; }
private:
friend class Declarations;
- explicit TypeAlias(const Type* type)
- : Declarable(Declarable::kTypeAlias), type_(type) {}
+ explicit TypeAlias(const Type* type, bool redeclaration)
+ : Declarable(Declarable::kTypeAlias),
+ type_(type),
+ redeclaration_(redeclaration) {}
const Type* type_;
+ bool redeclaration_;
};
-void PrintLabel(std::ostream& os, const Label& l, bool with_names);
-
std::ostream& operator<<(std::ostream& os, const Callable& m);
-std::ostream& operator<<(std::ostream& os, const Variable& v);
std::ostream& operator<<(std::ostream& os, const Builtin& b);
-std::ostream& operator<<(std::ostream& os, const Label& l);
std::ostream& operator<<(std::ostream& os, const RuntimeFunction& b);
std::ostream& operator<<(std::ostream& os, const Generic& g);
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index abc207c049..cccb8ce1d2 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -3,37 +3,12 @@
// found in the LICENSE file.
#include "src/torque/declaration-visitor.h"
+#include "src/torque/ast.h"
namespace v8 {
namespace internal {
namespace torque {
-void DeclarationVisitor::Visit(Expression* expr) {
- CurrentSourcePosition::Scope scope(expr->pos);
- switch (expr->kind) {
-#define ENUM_ITEM(name) \
- case AstNode::Kind::k##name: \
- return Visit(name::cast(expr));
- AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- default:
- UNIMPLEMENTED();
- }
-}
-
-void DeclarationVisitor::Visit(Statement* stmt) {
- CurrentSourcePosition::Scope scope(stmt->pos);
- switch (stmt->kind) {
-#define ENUM_ITEM(name) \
- case AstNode::Kind::k##name: \
- return Visit(name::cast(stmt));
- AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- default:
- UNIMPLEMENTED();
- }
-}
-
void DeclarationVisitor::Visit(Declaration* decl) {
CurrentSourcePosition::Scope scope(decl->pos);
switch (decl->kind) {
@@ -48,7 +23,7 @@ void DeclarationVisitor::Visit(Declaration* decl) {
}
void DeclarationVisitor::Visit(CallableNode* decl, const Signature& signature,
- Statement* body) {
+ base::Optional<Statement*> body) {
switch (decl->kind) {
#define ENUM_ITEM(name) \
case AstNode::Kind::k##name: \
@@ -60,8 +35,11 @@ void DeclarationVisitor::Visit(CallableNode* decl, const Signature& signature,
}
}
-Builtin* DeclarationVisitor::BuiltinDeclarationCommon(
- BuiltinDeclaration* decl, bool external, const Signature& signature) {
+Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
+ std::string external_name,
+ std::string readable_name,
+ Signature signature,
+ base::Optional<Statement*> body) {
const bool javascript = decl->javascript_linkage;
const bool varargs = decl->signature->parameters.has_varargs;
Builtin::Kind kind = !javascript ? Builtin::kStub
@@ -70,7 +48,7 @@ Builtin* DeclarationVisitor::BuiltinDeclarationCommon(
if (signature.types().size() == 0 ||
!(signature.types()[0] ==
- declarations()->LookupGlobalType(CONTEXT_TYPE_STRING))) {
+ Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
std::stringstream stream;
stream << "first parameter to builtin " << decl->name
<< " is not a context but should be";
@@ -87,7 +65,7 @@ Builtin* DeclarationVisitor::BuiltinDeclarationCommon(
if (javascript) {
if (signature.types().size() < 2 ||
!(signature.types()[1] ==
- declarations()->LookupGlobalType(OBJECT_TYPE_STRING))) {
+ Declarations::LookupGlobalType(OBJECT_TYPE_STRING))) {
std::stringstream stream;
stream << "second parameter to javascript builtin " << decl->name
<< " is " << *signature.types()[1] << " but should be Object";
@@ -104,22 +82,22 @@ Builtin* DeclarationVisitor::BuiltinDeclarationCommon(
ReportError(stream.str());
}
- std::string generated_name = GetGeneratedCallableName(
- decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- return declarations()->DeclareBuiltin(generated_name, kind, external,
- signature);
+ return Declarations::CreateBuiltin(
+ std::move(external_name), std::move(readable_name), kind,
+ std::move(signature), decl->transitioning, body);
}
void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
- const Signature& signature, Statement* body) {
- if (global_context_.verbose()) {
+ const Signature& signature,
+ base::Optional<Statement*> body) {
+ if (GlobalContext::verbose()) {
std::cout << "found declaration of external runtime " << decl->name
<< " with signature ";
}
if (signature.parameter_types.types.size() == 0 ||
!(signature.parameter_types.types[0] ==
- declarations()->LookupGlobalType(CONTEXT_TYPE_STRING))) {
+ Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
std::stringstream stream;
stream << "first parameter to runtime " << decl->name
<< " is not a context but should be";
@@ -135,55 +113,45 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
ReportError(stream.str());
}
- declarations()->DeclareRuntimeFunction(decl->name, signature);
+ Declarations::DeclareRuntimeFunction(decl->name, signature,
+ decl->transitioning);
}
void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl,
- const Signature& signature, Statement* body) {
- if (global_context_.verbose()) {
+ const Signature& signature,
+ base::Optional<Statement*> body) {
+ if (GlobalContext::verbose()) {
std::cout << "found declaration of external macro " << decl->name
<< " with signature ";
}
- std::string generated_name = GetGeneratedCallableName(
- decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- declarations()->DeclareMacro(generated_name, signature, decl->op);
+ Declarations::DeclareMacro(decl->name, decl->external_assembler_name,
+ signature, decl->transitioning, body, decl->op);
}
void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl,
- const Signature& signature, Statement* body) {
- Builtin* builtin = BuiltinDeclarationCommon(decl, false, signature);
- CurrentCallableActivator activator(global_context_, builtin, decl);
- DeclareSignature(signature);
- if (signature.parameter_types.var_args) {
- declarations()->DeclareExternConstant(
- decl->signature->parameters.arguments_variable,
- TypeOracle::GetArgumentsType(), "arguments");
- }
- torque_builtins_.push_back(builtin);
- Visit(body);
+ const Signature& signature,
+ base::Optional<Statement*> body) {
+ Declarations::Declare(
+ decl->name, CreateBuiltin(decl, decl->name, decl->name, signature, body));
}
void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl,
- const Signature& signature, Statement* body) {
- std::string generated_name = GetGeneratedCallableName(
- decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- Macro* macro =
- declarations()->DeclareMacro(generated_name, signature, decl->op);
-
- CurrentCallableActivator activator(global_context_, macro, decl);
-
- DeclareSignature(signature);
+ const Signature& signature,
+ base::Optional<Statement*> body) {
+ Declarations::DeclareMacro(decl->name, base::nullopt, signature,
+ decl->transitioning, body, decl->op);
+}
- if (body != nullptr) {
- Visit(body);
- }
+void DeclarationVisitor::Visit(IntrinsicDeclaration* decl,
+ const Signature& signature,
+ base::Optional<Statement*> body) {
+ Declarations::DeclareIntrinsic(decl->name, signature);
}
void DeclarationVisitor::Visit(ConstDeclaration* decl) {
- declarations()->DeclareModuleConstant(decl->name,
- declarations()->GetType(decl->type));
- Visit(decl->expression);
+ Declarations::DeclareNamespaceConstant(
+ decl->name, Declarations::GetType(decl->type), decl->expression);
}
void DeclarationVisitor::Visit(StandardDeclaration* decl) {
@@ -192,7 +160,7 @@ void DeclarationVisitor::Visit(StandardDeclaration* decl) {
}
void DeclarationVisitor::Visit(GenericDeclaration* decl) {
- declarations()->DeclareGeneric(decl->callable->name, CurrentModule(), decl);
+ Declarations::DeclareGeneric(decl->callable->name, decl);
}
void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
@@ -203,111 +171,54 @@ void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
ReportError(stream.str());
}
- GenericList* generic_list = declarations()->LookupGeneric(decl->name);
+ std::vector<Generic*> generic_list = Declarations::LookupGeneric(decl->name);
// Find the matching generic specialization based on the concrete parameter
// list.
- CallableNode* matching_callable = nullptr;
- SpecializationKey matching_key;
+ Generic* matching_generic = nullptr;
Signature signature_with_types = MakeSignature(decl->signature.get());
- for (Generic* generic : generic_list->list()) {
- SpecializationKey key = {generic, GetTypeVector(decl->generic_parameters)};
- CallableNode* callable_candidate = generic->declaration()->callable;
- // Abuse the Specialization nodes' scope to temporarily declare the
- // specialization aliases for the generic types to compare signatures. This
- // scope is never used for anything else, so it's OK to pollute it.
- Declarations::CleanNodeScopeActivator specialization_activator(
- declarations(), decl);
- DeclareSpecializedTypes(key);
- Signature generic_signature_with_types =
- MakeSignature(generic->declaration()->callable->signature.get());
- if (signature_with_types.HasSameTypesAs(generic_signature_with_types)) {
- if (matching_callable != nullptr) {
+ for (Generic* generic : generic_list) {
+ Signature generic_signature_with_types = MakeSpecializedSignature(
+ SpecializationKey{generic, GetTypeVector(decl->generic_parameters)});
+ if (signature_with_types.HasSameTypesAs(generic_signature_with_types,
+ ParameterMode::kIgnoreImplicit)) {
+ if (matching_generic != nullptr) {
std::stringstream stream;
- stream << "specialization of " << callable_candidate->name
+ stream << "specialization of " << decl->name
<< " is ambigous, it matches more than one generic declaration ("
- << *matching_key.first << " and " << *key.first << ")";
+ << *matching_generic << " and " << *generic << ")";
ReportError(stream.str());
}
- matching_callable = callable_candidate;
- matching_key = key;
+ matching_generic = generic;
}
}
- if (matching_callable == nullptr) {
+ if (matching_generic == nullptr) {
std::stringstream stream;
- stream << "specialization of " << decl->name
- << " doesn't match any generic declaration";
- ReportError(stream.str());
- }
-
- // Make sure the declarations of the parameter types for the specialization
- // are the ones from the matching generic.
- {
- Declarations::CleanNodeScopeActivator specialization_activator(
- declarations(), decl);
- DeclareSpecializedTypes(matching_key);
- }
-
- SpecializeGeneric({matching_key, matching_callable, decl->signature.get(),
- decl->body, decl->pos});
-}
-
-void DeclarationVisitor::Visit(ReturnStatement* stmt) {
- if (stmt->value) {
- Visit(*stmt->value);
- }
-}
-
-Variable* DeclarationVisitor::DeclareVariable(const std::string& name,
- const Type* type, bool is_const) {
- Variable* result = declarations()->DeclareVariable(name, type, is_const);
- return result;
-}
-
-Parameter* DeclarationVisitor::DeclareParameter(const std::string& name,
- const Type* type) {
- return declarations()->DeclareParameter(
- name, GetParameterVariableFromName(name), type);
-}
-
-void DeclarationVisitor::Visit(VarDeclarationStatement* stmt) {
- std::string variable_name = stmt->name;
- if (!stmt->const_qualified) {
- if (!stmt->type) {
- ReportError(
- "variable declaration is missing type. Only 'const' bindings can "
- "infer the type.");
- }
- const Type* type = declarations()->GetType(*stmt->type);
- if (type->IsConstexpr()) {
- ReportError(
- "cannot declare variable with constexpr type. Use 'const' instead.");
+ if (generic_list.size() == 0) {
+ stream << "no generic defined with the name " << decl->name;
+ ReportError(stream.str());
}
- DeclareVariable(variable_name, type, stmt->const_qualified);
- if (global_context_.verbose()) {
- std::cout << "declared variable " << variable_name << " with type "
- << *type << "\n";
+ stream << "specialization of " << decl->name
+ << " doesn't match any generic declaration\n";
+ stream << "specialization signature:";
+ stream << "\n " << signature_with_types;
+ stream << "\ncandidates are:";
+ for (Generic* generic : generic_list) {
+ stream << "\n "
+ << MakeSpecializedSignature(SpecializationKey{
+ generic, GetTypeVector(decl->generic_parameters)});
}
- }
-
- // const qualified variables are required to be initialized properly.
- if (stmt->const_qualified && !stmt->initializer) {
- std::stringstream stream;
- stream << "local constant \"" << variable_name << "\" is not initialized.";
ReportError(stream.str());
}
- if (stmt->initializer) {
- Visit(*stmt->initializer);
- if (global_context_.verbose()) {
- std::cout << "variable has initialization expression at "
- << CurrentPositionAsString() << "\n";
- }
- }
+ Specialize(SpecializationKey{matching_generic,
+ GetTypeVector(decl->generic_parameters)},
+ matching_generic->declaration()->callable, decl->signature.get(),
+ decl->body);
}
void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
- const Type* type = declarations()->GetType(decl->type);
+ const Type* type = Declarations::GetType(decl->type);
if (!type->IsConstexpr()) {
std::stringstream stream;
stream << "extern constants must have constexpr type, but found: \""
@@ -315,306 +226,365 @@ void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
ReportError(stream.str());
}
- declarations()->DeclareExternConstant(decl->name, type, decl->literal);
-}
-
-void DeclarationVisitor::Visit(StructDeclaration* decl) {
- std::vector<NameAndType> fields;
- for (auto& field : decl->fields) {
- const Type* field_type = declarations()->GetType(field.type);
- fields.push_back({field.name, field_type});
+ Declarations::DeclareExternConstant(decl->name, type, decl->literal);
+}
+
+void DeclarationVisitor::DeclareMethods(
+ AggregateType* container_type, const std::vector<Declaration*>& methods) {
+ // Declare the class' methods
+ IdentifierExpression* constructor_this = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, kThisParameterName);
+ AggregateType* constructor_this_type =
+ container_type->IsStructType()
+ ? container_type
+ : ClassType::cast(container_type)->struct_type();
+ for (auto declaration : methods) {
+ CurrentSourcePosition::Scope pos_scope(declaration->pos);
+ StandardDeclaration* standard_declaration =
+ StandardDeclaration::DynamicCast(declaration);
+ DCHECK(standard_declaration);
+ TorqueMacroDeclaration* method =
+ TorqueMacroDeclaration::DynamicCast(standard_declaration->callable);
+ Signature signature = MakeSignature(method->signature.get());
+ signature.parameter_names.insert(
+ signature.parameter_names.begin() + signature.implicit_count,
+ kThisParameterName);
+ Statement* body = *(standard_declaration->body);
+ std::string method_name(method->name);
+ if (method->name == kConstructMethodName) {
+ signature.parameter_types.types.insert(
+ signature.parameter_types.types.begin() + signature.implicit_count,
+ constructor_this_type);
+ // Constructor
+ if (!signature.return_type->IsVoid()) {
+ ReportError("constructors musn't have a return type");
+ }
+ if (signature.labels.size() != 0) {
+ ReportError("constructors musn't have labels");
+ }
+ method_name = kConstructMethodName;
+ Declarations::CreateMethod(constructor_this_type, method_name, signature,
+ false, body);
+ } else {
+ signature.parameter_types.types.insert(
+ signature.parameter_types.types.begin() + signature.implicit_count,
+ container_type);
+ Declarations::CreateMethod(container_type, method_name, signature, false,
+ body);
+ }
}
- declarations()->DeclareStruct(CurrentModule(), decl->name, fields);
-}
-void DeclarationVisitor::Visit(LogicalOrExpression* expr) {
- {
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- declarations()->DeclareLabel(kFalseLabelName);
- Visit(expr->left);
+ if (container_type->Constructors().size() != 0) return;
+
+ // Generate default constructor.
+ Signature constructor_signature;
+ constructor_signature.parameter_types.var_args = false;
+ constructor_signature.return_type = TypeOracle::GetVoidType();
+ std::vector<const AggregateType*> hierarchy = container_type->GetHierarchy();
+
+ std::vector<Statement*> statements;
+ std::vector<Statement*> initializer_statements;
+
+ size_t parameter_number = 0;
+ constructor_signature.parameter_names.push_back(kThisParameterName);
+ constructor_signature.parameter_types.types.push_back(constructor_this_type);
+ std::vector<Expression*> super_arguments;
+ for (auto current_type : hierarchy) {
+ for (auto& f : current_type->fields()) {
+ std::string parameter_name("p" + std::to_string(parameter_number++));
+ constructor_signature.parameter_names.push_back(parameter_name);
+ constructor_signature.parameter_types.types.push_back(
+ f.name_and_type.type);
+ IdentifierExpression* value = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, parameter_name);
+ if (container_type != current_type) {
+ super_arguments.push_back(MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, parameter_name));
+ } else {
+ LocationExpression* location = MakeNode<FieldAccessExpression>(
+ constructor_this, f.name_and_type.name);
+ Statement* statement = MakeNode<ExpressionStatement>(
+ MakeNode<AssignmentExpression>(location, base::nullopt, value));
+ initializer_statements.push_back(statement);
+ }
+ }
}
- Visit(expr->right);
-}
-void DeclarationVisitor::Visit(LogicalAndExpression* expr) {
- {
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- declarations()->DeclareLabel(kTrueLabelName);
- Visit(expr->left);
+ if (hierarchy.size() > 1) {
+ IdentifierExpression* super_identifier = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, kSuperMethodName);
+ Statement* statement =
+ MakeNode<ExpressionStatement>(MakeNode<CallMethodExpression>(
+ constructor_this, super_identifier, super_arguments,
+ std::vector<std::string>{}));
+ statements.push_back(statement);
}
- Visit(expr->right);
-}
-
-void DeclarationVisitor::DeclareExpressionForBranch(
- Expression* node, base::Optional<Statement*> true_statement,
- base::Optional<Statement*> false_statement) {
- Declarations::NodeScopeActivator scope(declarations(), node);
- // Conditional expressions can either explicitly return a bit
- // type, or they can be backed by macros that don't return but
- // take a true and false label. By declaring the labels before
- // visiting the conditional expression, those label-based
- // macro conditionals will be able to find them through normal
- // label lookups.
- declarations()->DeclareLabel(kTrueLabelName, true_statement);
- declarations()->DeclareLabel(kFalseLabelName, false_statement);
- Visit(node);
-}
-
-void DeclarationVisitor::Visit(ConditionalExpression* expr) {
- DeclareExpressionForBranch(expr->condition);
- Visit(expr->if_true);
- Visit(expr->if_false);
-}
-
-void DeclarationVisitor::Visit(IfStatement* stmt) {
- DeclareExpressionForBranch(stmt->condition, stmt->if_true, stmt->if_false);
- Visit(stmt->if_true);
- if (stmt->if_false) Visit(*stmt->if_false);
-}
-void DeclarationVisitor::Visit(WhileStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- DeclareExpressionForBranch(stmt->condition);
- Visit(stmt->body);
-}
-
-void DeclarationVisitor::Visit(ForOfLoopStatement* stmt) {
- // Scope for for iteration variable
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- Visit(stmt->var_declaration);
- Visit(stmt->iterable);
- if (stmt->begin) Visit(*stmt->begin);
- if (stmt->end) Visit(*stmt->end);
- Visit(stmt->body);
-}
-
-void DeclarationVisitor::Visit(ForLoopStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- if (stmt->var_declaration) Visit(*stmt->var_declaration);
+ for (auto s : initializer_statements) {
+ statements.push_back(s);
+ }
- // Same as DeclareExpressionForBranch, but without the extra scope.
- // If no test expression is present we can not use it for the scope.
- declarations()->DeclareLabel(kTrueLabelName);
- declarations()->DeclareLabel(kFalseLabelName);
- if (stmt->test) Visit(*stmt->test);
+ Statement* constructor_body = MakeNode<BlockStatement>(false, statements);
- Visit(stmt->body);
- if (stmt->action) Visit(*stmt->action);
+ Declarations::CreateMethod(constructor_this_type, kConstructMethodName,
+ constructor_signature, false, constructor_body);
}
-void DeclarationVisitor::Visit(TryLabelExpression* stmt) {
- // Activate a new scope to declare the handler's label parameters, they should
- // not be visible outside the label block.
- {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
-
- // Declare label
- {
- LabelBlock* block = stmt->label_block;
- CurrentSourcePosition::Scope scope(block->pos);
- Label* shared_label =
- declarations()->DeclareLabel(block->label, block->body);
- {
- Declarations::NodeScopeActivator scope(declarations(), block->body);
- if (block->parameters.has_varargs) {
- std::stringstream stream;
- stream << "cannot use ... for label parameters";
- ReportError(stream.str());
- }
-
- size_t i = 0;
- for (const auto& p : block->parameters.names) {
- const Type* type =
- declarations()->GetType(block->parameters.types[i]);
- if (type->IsConstexpr()) {
- ReportError("no constexpr type allowed for label arguments");
- }
-
- shared_label->AddVariable(DeclareVariable(p, type, false));
- ++i;
- }
- if (global_context_.verbose()) {
- std::cout << " declaring label " << block->label << "\n";
- }
- }
+void DeclarationVisitor::Visit(StructDeclaration* decl) {
+ std::vector<Field> fields;
+ size_t offset = 0;
+ for (auto& field : decl->fields) {
+ const Type* field_type = Declarations::GetType(field.name_and_type.type);
+ fields.push_back({field.name_and_type.type->pos,
+ {field.name_and_type.name, field_type},
+ offset,
+ false});
+ offset += LoweredSlotCount(field_type);
+ }
+ StructType* struct_type = Declarations::DeclareStruct(decl->name, fields);
+ DeclareMethods(struct_type, decl->methods);
+}
+
+void DeclarationVisitor::Visit(ClassDeclaration* decl) {
+ // Compute the offset of the class' first member. If the class extends
+ // another class, it's the size of the extended class, otherwise zero.
+ size_t first_field_offset = 0;
+ const Type* super_type = Declarations::LookupType(decl->super);
+ if (super_type != TypeOracle::GetTaggedType()) {
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ if (!super_class) {
+ ReportError("class \"", decl->name,
+ "\" must extend either Tagged or an already declared class");
}
-
- Visit(stmt->try_expression);
+ first_field_offset = super_class->size();
}
- Visit(stmt->label_block->body);
-}
-
-void DeclarationVisitor::GenerateHeader(std::string& file_name) {
- std::stringstream new_contents_stream;
- new_contents_stream
- << "#ifndef V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "#define V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "\n"
- "#define BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) "
- "\\\n";
- for (auto builtin : torque_builtins_) {
- int firstParameterIndex = 1;
- bool declareParameters = true;
- if (builtin->IsStub()) {
- new_contents_stream << "TFS(" << builtin->name();
- } else {
- new_contents_stream << "TFJ(" << builtin->name();
- if (builtin->IsVarArgsJavaScript()) {
- new_contents_stream
- << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
- declareParameters = false;
- } else {
- assert(builtin->IsFixedArgsJavaScript());
- // FixedArg javascript builtins need to offer the parameter
- // count.
- assert(builtin->parameter_names().size() >= 2);
- new_contents_stream << ", " << (builtin->parameter_names().size() - 2);
- // And the receiver is explicitly declared.
- new_contents_stream << ", kReceiver";
- firstParameterIndex = 2;
- }
+ // The generates clause must create a TNode<>
+ std::string generates = decl->name;
+ if (decl->generates) {
+ if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
+ generates.substr(generates.length() - 1, 1) != ">") {
+ ReportError("generated type \"", generates,
+ "\" should be of the form \"TNode<...>\"");
}
- if (declareParameters) {
- int index = 0;
- for (const auto& parameter : builtin->parameter_names()) {
- if (index >= firstParameterIndex) {
- new_contents_stream << ", k" << CamelifyString(parameter);
+ generates = generates.substr(6, generates.length() - 7);
+ }
+
+ std::vector<Field> fields;
+ size_t class_offset = first_field_offset;
+ bool seen_strong = false;
+ bool seen_weak = false;
+ for (ClassFieldExpression& field : decl->fields) {
+ const Type* field_type = Declarations::GetType(field.name_and_type.type);
+ if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (field.weak) {
+ seen_weak = true;
+ } else {
+ if (seen_weak) {
+ ReportError("cannot declare strong field \"",
+ field.name_and_type.name,
+ "\" after weak Tagged references");
}
- index++;
+ seen_strong = true;
}
- }
- new_contents_stream << ") \\\n";
- }
- new_contents_stream
- << "\n"
- "#endif // V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
-
- std::string new_contents(new_contents_stream.str());
- ReplaceFileContentsIfDifferent(file_name, new_contents);
-}
-
-void DeclarationVisitor::Visit(IdentifierExpression* expr) {
- if (expr->generic_arguments.size() != 0) {
- TypeVector specialization_types;
- for (auto t : expr->generic_arguments) {
- specialization_types.push_back(declarations()->GetType(t));
- }
- // Specialize all versions of the generic, since the exact parameter type
- // list cannot be resolved until the call's parameter expressions are
- // evaluated. This is an overly conservative but simple way to make sure
- // that the correct specialization exists.
- for (auto generic : declarations()->LookupGeneric(expr->name)->list()) {
- CallableNode* callable = generic->declaration()->callable;
- if (generic->declaration()->body) {
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
+ } else {
+ if (seen_strong || seen_weak) {
+ ReportError("cannot declare scalar field \"", field.name_and_type.name,
+ "\" after strong or weak Tagged references");
}
}
- }
-}
-
-void DeclarationVisitor::Visit(StatementExpression* expr) {
- Visit(expr->statement);
-}
-
-void DeclarationVisitor::Visit(CallExpression* expr) {
- Visit(&expr->callee);
- for (Expression* arg : expr->arguments) Visit(arg);
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ ReportError(
+ "field \"", field.name_and_type.name, "\" of class \"", decl->name,
+ "\" must be a subtype of Tagged (other types not yet supported)");
+ }
+ fields.push_back({field.name_and_type.type->pos,
+ {field.name_and_type.name, field_type},
+ class_offset,
+ field.weak});
+ class_offset += kTaggedSize;
+ }
+
+ auto new_class = Declarations::DeclareClass(
+ super_type, decl->name, decl->transient, generates, fields, class_offset);
+ DeclareMethods(new_class, decl->methods);
+
+ // For each field, construct AST snippits that implement a CSA accessor
+ // function and define a corresponding '.field' operator. The
+ // implementation iterator will turn the snippits into code.
+ for (auto& field : new_class->fields()) {
+ IdentifierExpression* parameter =
+ MakeNode<IdentifierExpression>(std::string{"o"});
+
+ // Load accessor
+ std::string camel_field_name = CamelifyString(field.name_and_type.name);
+ std::string load_macro_name = "Load" + new_class->name() + camel_field_name;
+ std::string load_operator_name = "." + field.name_and_type.name;
+ Signature load_signature;
+ load_signature.parameter_names.push_back("o");
+ load_signature.parameter_types.types.push_back(new_class);
+ load_signature.parameter_types.var_args = false;
+ load_signature.return_type = field.name_and_type.type;
+ Statement* load_body =
+ MakeNode<ReturnStatement>(MakeNode<LoadObjectFieldExpression>(
+ parameter, field.name_and_type.name));
+ Declarations::DeclareMacro(load_macro_name, base::nullopt, load_signature,
+ false, load_body, load_operator_name);
+
+ // Store accessor
+ IdentifierExpression* value = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, std::string{"v"});
+ std::string store_macro_name =
+ "Store" + new_class->name() + camel_field_name;
+ std::string store_operator_name = "." + field.name_and_type.name + "=";
+ Signature store_signature;
+ store_signature.parameter_names.push_back("o");
+ store_signature.parameter_names.push_back("v");
+ store_signature.parameter_types.types.push_back(new_class);
+ store_signature.parameter_types.types.push_back(field.name_and_type.type);
+ store_signature.parameter_types.var_args = false;
+ // TODO(danno): Store macros probably should return their value argument
+ store_signature.return_type = TypeOracle::GetVoidType();
+ Statement* store_body =
+ MakeNode<ExpressionStatement>(MakeNode<StoreObjectFieldExpression>(
+ parameter, field.name_and_type.name, value));
+ Declarations::DeclareMacro(store_macro_name, base::nullopt, store_signature,
+ false, store_body, store_operator_name);
+ }
+
+ GlobalContext::RegisterClass(decl->name, new_class);
+}
+
+void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
+ GlobalContext::AddCppInclude(decl->include_path);
}
void DeclarationVisitor::Visit(TypeDeclaration* decl) {
std::string generates = decl->generates ? *decl->generates : std::string("");
- const AbstractType* type = declarations()->DeclareAbstractType(
- decl->name, generates, {}, decl->extends);
+ if (decl->generates) {
+ if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
+ generates.substr(generates.length() - 1, 1) != ">") {
+ ReportError("generated type \"", generates,
+ "\" should be of the form \"TNode<...>\"");
+ }
+ generates = generates.substr(6, generates.length() - 7);
+ }
+
+ const AbstractType* type = Declarations::DeclareAbstractType(
+ decl->name, decl->transient, generates, {}, decl->extends);
if (decl->constexpr_generates) {
+ if (decl->transient) {
+ ReportError("cannot declare a transient type that is also constexpr");
+ }
std::string constexpr_name = CONSTEXPR_TYPE_PREFIX + decl->name;
base::Optional<std::string> constexpr_extends;
if (decl->extends)
constexpr_extends = CONSTEXPR_TYPE_PREFIX + *decl->extends;
- declarations()->DeclareAbstractType(
- constexpr_name, *decl->constexpr_generates, type, constexpr_extends);
- }
-}
-
-void DeclarationVisitor::DeclareSignature(const Signature& signature) {
- auto type_iterator = signature.parameter_types.types.begin();
- for (const auto& name : signature.parameter_names) {
- const Type* t(*type_iterator++);
- if (name.size() != 0) {
- DeclareParameter(name, t);
- }
- }
- for (auto& label : signature.labels) {
- auto label_params = label.types;
- Label* new_label = declarations()->DeclareLabel(label.name);
- new_label->set_external_label_name("label_" + label.name);
- size_t i = 0;
- for (auto var_type : label_params) {
- if (var_type->IsConstexpr()) {
- ReportError("no constexpr type allowed for label arguments");
- }
-
- std::string var_name = label.name + std::to_string(i++);
- new_label->AddVariable(
- declarations()->CreateVariable(var_name, var_type, false));
- }
+ Declarations::DeclareAbstractType(constexpr_name, false,
+ *decl->constexpr_generates, type,
+ constexpr_extends);
}
}
void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
size_t i = 0;
- Generic* generic = key.first;
const std::size_t generic_parameter_count =
- generic->declaration()->generic_parameters.size();
- if (generic_parameter_count != key.second.size()) {
+ key.generic->declaration()->generic_parameters.size();
+ if (generic_parameter_count != key.specialized_types.size()) {
std::stringstream stream;
stream << "Wrong generic argument count for specialization of \""
- << generic->name() << "\", expected: " << generic_parameter_count
- << ", actual: " << key.second.size();
+ << key.generic->name() << "\", expected: " << generic_parameter_count
+ << ", actual: " << key.specialized_types.size();
ReportError(stream.str());
}
- for (auto type : key.second) {
+ for (auto type : key.specialized_types) {
std::string generic_type_name =
- generic->declaration()->generic_parameters[i++];
- declarations()->DeclareType(generic_type_name, type);
- }
+ key.generic->declaration()->generic_parameters[i++];
+ Declarations::DeclareType(generic_type_name, type, true);
+ }
+}
+
+Signature DeclarationVisitor::MakeSpecializedSignature(
+ const SpecializationKey& key) {
+ CurrentScope::Scope generic_scope(key.generic->ParentScope());
+ // Create a temporary fake-namespace just to temporarily declare the
+ // specialization aliases for the generic types to create a signature.
+ Namespace tmp_namespace("_tmp");
+ CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
+ DeclareSpecializedTypes(key);
+ return MakeSignature(key.generic->declaration()->callable->signature.get());
+}
+
+Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
+ if (!key.generic->declaration()->body &&
+ IntrinsicDeclaration::DynamicCast(key.generic->declaration()->callable) ==
+ nullptr) {
+ ReportError("missing specialization of ", key.generic->name(),
+ " with types <", key.specialized_types, "> declared at ",
+ key.generic->pos());
+ }
+ CurrentScope::Scope generic_scope(key.generic->ParentScope());
+ Callable* result =
+ Specialize(key, key.generic->declaration()->callable, base::nullopt,
+ key.generic->declaration()->body);
+ CurrentScope::Scope callable_scope(result);
+ DeclareSpecializedTypes(key);
+ return result;
}
-void DeclarationVisitor::Specialize(const SpecializationKey& key,
- CallableNode* callable,
- const CallableNodeSignature* signature,
- Statement* body) {
- Generic* generic = key.first;
-
+Callable* DeclarationVisitor::Specialize(
+ const SpecializationKey& key, CallableNode* declaration,
+ base::Optional<const CallableNodeSignature*> signature,
+ base::Optional<Statement*> body) {
// TODO(tebbi): The error should point to the source position where the
// instantiation was requested.
- CurrentSourcePosition::Scope pos_scope(generic->declaration()->pos);
+ CurrentSourcePosition::Scope pos_scope(key.generic->declaration()->pos);
size_t generic_parameter_count =
- generic->declaration()->generic_parameters.size();
- if (generic_parameter_count != key.second.size()) {
+ key.generic->declaration()->generic_parameters.size();
+ if (generic_parameter_count != key.specialized_types.size()) {
std::stringstream stream;
stream << "number of template parameters ("
- << std::to_string(key.second.size())
- << ") to intantiation of generic " << callable->name
+ << std::to_string(key.specialized_types.size())
+ << ") to intantiation of generic " << declaration->name
<< " doesnt match the generic's declaration ("
<< std::to_string(generic_parameter_count) << ")";
ReportError(stream.str());
}
-
- Signature type_signature;
- {
- // Manually activate the specialized generic's scope when declaring the
- // generic parameter specializations.
- Declarations::GenericScopeActivator namespace_scope(declarations(), key);
- DeclareSpecializedTypes(key);
- type_signature = MakeSignature(signature);
- }
-
- Visit(callable, type_signature, body);
+ if (key.generic->GetSpecialization(key.specialized_types)) {
+ ReportError("cannot redeclare specialization of ", key.generic->name(),
+ " with types <", key.specialized_types, ">");
+ }
+
+ Signature type_signature =
+ signature ? MakeSignature(*signature) : MakeSpecializedSignature(key);
+
+ std::string generated_name = Declarations::GetGeneratedCallableName(
+ declaration->name, key.specialized_types);
+ std::stringstream readable_name;
+ readable_name << declaration->name << "<";
+ bool first = true;
+ for (const Type* t : key.specialized_types) {
+ if (!first) readable_name << ", ";
+ readable_name << *t;
+ first = false;
+ }
+ readable_name << ">";
+ Callable* callable;
+ if (MacroDeclaration::DynamicCast(declaration) != nullptr) {
+ callable = Declarations::CreateMacro(generated_name, readable_name.str(),
+ base::nullopt, type_signature,
+ declaration->transitioning, *body);
+ } else if (IntrinsicDeclaration::DynamicCast(declaration) != nullptr) {
+ callable = Declarations::CreateIntrinsic(declaration->name, type_signature);
+ } else {
+ BuiltinDeclaration* builtin = BuiltinDeclaration::cast(declaration);
+ callable = CreateBuiltin(builtin, generated_name, readable_name.str(),
+ type_signature, *body);
+ }
+ key.generic->AddSpecialization(key.specialized_types, callable);
+ return callable;
}
} // namespace torque
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index d8a9698c6f..a492a277fd 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -12,7 +12,6 @@
#include "src/torque/declarations.h"
#include "src/torque/file-visitor.h"
#include "src/torque/global-context.h"
-#include "src/torque/scope.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -22,141 +21,80 @@ namespace torque {
class DeclarationVisitor : public FileVisitor {
public:
- explicit DeclarationVisitor(GlobalContext& global_context)
- : FileVisitor(global_context),
- scope_(declarations(), global_context.GetDefaultModule()) {}
-
void Visit(Ast* ast) {
- Visit(ast->default_module());
- DrainSpecializationQueue();
+ CurrentScope::Scope current_namespace(GlobalContext::GetDefaultNamespace());
+ for (Declaration* child : ast->declarations()) Visit(child);
}
- void Visit(Expression* expr);
- void Visit(Statement* stmt);
void Visit(Declaration* decl);
- void Visit(ModuleDeclaration* decl) {
- ScopedModuleActivator activator(this, decl->GetModule());
- Declarations::ModuleScopeActivator scope(declarations(), decl->GetModule());
- for (Declaration* child : decl->declarations) Visit(child);
- }
- void Visit(DefaultModuleDeclaration* decl) {
- decl->SetModule(global_context_.GetDefaultModule());
- Visit(implicit_cast<ModuleDeclaration*>(decl));
- }
- void Visit(ExplicitModuleDeclaration* decl) {
- decl->SetModule(global_context_.GetModule(decl->name));
- Visit(implicit_cast<ModuleDeclaration*>(decl));
+ Namespace* GetOrCreateNamespace(const std::string& name) {
+ std::vector<Namespace*> existing_namespaces = FilterDeclarables<Namespace>(
+ Declarations::TryLookupShallow(QualifiedName(name)));
+ if (existing_namespaces.empty()) {
+ return Declarations::DeclareNamespace(name);
+ }
+ DCHECK_EQ(1, existing_namespaces.size());
+ return existing_namespaces.front();
}
- void Visit(IdentifierExpression* expr);
- void Visit(NumberLiteralExpression* expr) {}
- void Visit(StringLiteralExpression* expr) {}
- void Visit(CallExpression* expr);
- void Visit(ElementAccessExpression* expr) {
- Visit(expr->array);
- Visit(expr->index);
- }
- void Visit(FieldAccessExpression* expr) { Visit(expr->object); }
- void Visit(BlockStatement* expr) {
- Declarations::NodeScopeActivator scope(declarations(), expr);
- for (Statement* stmt : expr->statements) Visit(stmt);
+ void Visit(NamespaceDeclaration* decl) {
+ CurrentScope::Scope current_scope(GetOrCreateNamespace(decl->name));
+ for (Declaration* child : decl->declarations) Visit(child);
}
- void Visit(ExpressionStatement* stmt) { Visit(stmt->expression); }
- void Visit(TailCallStatement* stmt) { Visit(stmt->call); }
+
void Visit(TypeDeclaration* decl);
+ void DeclareMethods(AggregateType* container,
+ const std::vector<Declaration*>& methods);
+ void Visit(StructDeclaration* decl);
+ void Visit(ClassDeclaration* decl);
+
void Visit(TypeAliasDeclaration* decl) {
- const Type* type = declarations()->GetType(decl->type);
+ const Type* type = Declarations::GetType(decl->type);
type->AddAlias(decl->name);
- declarations()->DeclareType(decl->name, type);
+ Declarations::DeclareType(decl->name, type, true);
}
- Builtin* BuiltinDeclarationCommon(BuiltinDeclaration* decl, bool external,
- const Signature& signature);
-
+ Builtin* CreateBuiltin(BuiltinDeclaration* decl, std::string external_name,
+ std::string readable_name, Signature signature,
+ base::Optional<Statement*> body);
void Visit(ExternalBuiltinDeclaration* decl, const Signature& signature,
- Statement* body) {
- BuiltinDeclarationCommon(decl, true, signature);
+ base::Optional<Statement*> body) {
+ Declarations::Declare(
+ decl->name,
+ CreateBuiltin(decl, decl->name, decl->name, signature, base::nullopt));
}
void Visit(ExternalRuntimeDeclaration* decl, const Signature& sig,
- Statement* body);
+ base::Optional<Statement*> body);
void Visit(ExternalMacroDeclaration* decl, const Signature& sig,
- Statement* body);
+ base::Optional<Statement*> body);
void Visit(TorqueBuiltinDeclaration* decl, const Signature& signature,
- Statement* body);
+ base::Optional<Statement*> body);
void Visit(TorqueMacroDeclaration* decl, const Signature& signature,
- Statement* body);
+ base::Optional<Statement*> body);
+ void Visit(IntrinsicDeclaration* decl, const Signature& signature,
+ base::Optional<Statement*> body);
- void Visit(CallableNode* decl, const Signature& signature, Statement* body);
+ void Visit(CallableNode* decl, const Signature& signature,
+ base::Optional<Statement*> body);
void Visit(ConstDeclaration* decl);
void Visit(StandardDeclaration* decl);
void Visit(GenericDeclaration* decl);
void Visit(SpecializationDeclaration* decl);
- void Visit(ReturnStatement* stmt);
-
- void Visit(DebugStatement* stmt) {}
- void Visit(AssertStatement* stmt) {
- bool do_check = !stmt->debug_only;
-#if defined(DEBUG)
- do_check = true;
-#endif
- if (do_check) DeclareExpressionForBranch(stmt->expression);
- }
-
- void Visit(VarDeclarationStatement* stmt);
void Visit(ExternConstDeclaration* decl);
+ void Visit(CppIncludeDeclaration* decl);
- void Visit(StructDeclaration* decl);
- void Visit(StructExpression* decl) {}
-
- void Visit(LogicalOrExpression* expr);
- void Visit(LogicalAndExpression* expr);
- void DeclareExpressionForBranch(
- Expression* node, base::Optional<Statement*> true_statement = {},
- base::Optional<Statement*> false_statement = {});
-
- void Visit(ConditionalExpression* expr);
- void Visit(IfStatement* stmt);
- void Visit(WhileStatement* stmt);
- void Visit(ForOfLoopStatement* stmt);
-
- void Visit(AssignmentExpression* expr) {
- Visit(expr->location);
- Visit(expr->value);
- }
-
- void Visit(BreakStatement* stmt) {}
- void Visit(ContinueStatement* stmt) {}
- void Visit(GotoStatement* expr) {}
- void Visit(ForLoopStatement* stmt);
-
- void Visit(IncrementDecrementExpression* expr) {
- Visit(expr->location);
- }
-
- void Visit(AssumeTypeImpossibleExpression* expr) { Visit(expr->expression); }
-
- void Visit(TryLabelExpression* stmt);
- void Visit(StatementExpression* stmt);
- void GenerateHeader(std::string& file_name);
+ Signature MakeSpecializedSignature(const SpecializationKey& key);
+ Callable* SpecializeImplicit(const SpecializationKey& key);
+ Callable* Specialize(const SpecializationKey& key, CallableNode* declaration,
+ base::Optional<const CallableNodeSignature*> signature,
+ base::Optional<Statement*> body);
private:
- Variable* DeclareVariable(const std::string& name, const Type* type,
- bool is_const);
- Parameter* DeclareParameter(const std::string& name, const Type* type);
-
- void DeclareSignature(const Signature& signature);
void DeclareSpecializedTypes(const SpecializationKey& key);
-
- void Specialize(const SpecializationKey& key, CallableNode* callable,
- const CallableNodeSignature* signature,
- Statement* body) override;
-
- Declarations::ModuleScopeActivator scope_;
- std::vector<Builtin*> torque_builtins_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index f001b98355..6cf7d0c4a4 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -4,84 +4,82 @@
#include "src/torque/declarations.h"
#include "src/torque/declarable.h"
+#include "src/torque/global-context.h"
#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
namespace torque {
-Scope* Declarations::GetModuleScope(const Module* module) {
- auto i = module_scopes_.find(module);
- if (i != module_scopes_.end()) return i->second;
- Scope* result = chain_.NewScope();
- module_scopes_[module] = result;
- return result;
-}
+DEFINE_CONTEXTUAL_VARIABLE(GlobalContext)
+
+namespace {
-Scope* Declarations::GetNodeScope(const AstNode* node, bool reset_scope) {
- std::pair<const AstNode*, TypeVector> key(
- node, current_generic_specialization_ == nullptr
- ? TypeVector()
- : current_generic_specialization_->second);
- if (!reset_scope) {
- auto i = scopes_.find(key);
- if (i != scopes_.end()) return i->second;
+template <class T>
+std::vector<T> EnsureNonempty(std::vector<T> list, const std::string& name,
+ const char* kind) {
+ if (list.empty()) {
+ ReportError("there is no ", kind, " named ", name);
}
- Scope* result = chain_.NewScope();
- scopes_[key] = result;
- return result;
+ return std::move(list);
}
-Scope* Declarations::GetGenericScope(Generic* generic,
- const TypeVector& types) {
- std::pair<const AstNode*, TypeVector> key(generic->declaration()->callable,
- types);
- auto i = scopes_.find(key);
- if (i != scopes_.end()) return i->second;
- Scope* result = chain_.NewScope();
- scopes_[key] = result;
- return result;
+template <class T, class Name>
+T EnsureUnique(const std::vector<T>& list, const Name& name, const char* kind) {
+ if (list.empty()) {
+ ReportError("there is no ", kind, " named ", name);
+ }
+ if (list.size() >= 2) {
+ ReportError("ambiguous reference to ", kind, " ", name);
+ }
+ return list.front();
}
-bool Declarations::IsDeclaredInCurrentScope(const std::string& name) {
- return chain_.ShallowLookup(name) != nullptr;
+template <class T>
+void CheckAlreadyDeclared(const std::string& name, const char* new_type) {
+ std::vector<T*> declarations =
+ FilterDeclarables<T>(Declarations::TryLookupShallow(QualifiedName(name)));
+ if (!declarations.empty()) {
+ Scope* scope = CurrentScope::Get();
+ ReportError("cannot redeclare ", name, " (type ", new_type, scope, ")");
+ }
}
-void Declarations::CheckAlreadyDeclared(const std::string& name,
- const char* new_type) {
- if (IsDeclaredInCurrentScope(name)) {
+} // namespace
+
+std::vector<Declarable*> Declarations::LookupGlobalScope(
+ const std::string& name) {
+ std::vector<Declarable*> d =
+ GlobalContext::GetDefaultNamespace()->Lookup(QualifiedName(name));
+ if (d.empty()) {
std::stringstream s;
- s << "cannot redeclare " << name << " (type " << new_type << ")";
+ s << "cannot find \"" << name << "\" in global scope";
ReportError(s.str());
}
+ return d;
}
-const Type* Declarations::LookupType(const std::string& name) {
- Declarable* raw = Lookup(name);
- if (raw->IsTypeAlias()) {
- return TypeAlias::cast(raw)->type();
- }
- std::stringstream s;
- s << "declaration \"" << name << "\" is not a Type";
- ReportError(s.str());
- return nullptr;
+const Type* Declarations::LookupType(const QualifiedName& name) {
+ TypeAlias* declaration =
+ EnsureUnique(FilterDeclarables<TypeAlias>(Lookup(name)), name, "type");
+ return declaration->type();
+}
+
+const Type* Declarations::LookupType(std::string name) {
+ return LookupType(QualifiedName(std::move(name)));
}
const Type* Declarations::LookupGlobalType(const std::string& name) {
- Declarable* raw = LookupGlobalScope(name);
- if (!raw->IsTypeAlias()) {
- std::stringstream s;
- s << "declaration \"" << name << "\" is not a Type";
- ReportError(s.str());
- }
- return TypeAlias::cast(raw)->type();
+ TypeAlias* declaration = EnsureUnique(
+ FilterDeclarables<TypeAlias>(LookupGlobalScope(name)), name, "type");
+ return declaration->type();
}
const Type* Declarations::GetType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
std::string name =
(basic->is_constexpr ? CONSTEXPR_TYPE_PREFIX : "") + basic->name;
- return LookupType(name);
+ return LookupType(QualifiedName{basic->namespace_qualification, name});
} else if (auto* union_type = UnionTypeExpression::cast(type_expression)) {
return TypeOracle::GetUnionType(GetType(union_type->a),
GetType(union_type->b));
@@ -91,14 +89,14 @@ const Type* Declarations::GetType(TypeExpression* type_expression) {
for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(GetType(type_exp));
}
- return TypeOracle::GetFunctionPointerType(
+ return TypeOracle::GetBuiltinPointerType(
argument_types, GetType(function_type_exp->return_type));
}
}
Builtin* Declarations::FindSomeInternalBuiltinWithType(
- const FunctionPointerType* type) {
- for (auto& declarable : declarables_) {
+ const BuiltinPointerType* type) {
+ for (auto& declarable : GlobalContext::AllDeclarables()) {
if (Builtin* builtin = Builtin::DynamicCast(declarable.get())) {
if (!builtin->IsExternal() && builtin->kind() == Builtin::kStub &&
builtin->signature().return_type == type->return_type() &&
@@ -111,275 +109,223 @@ Builtin* Declarations::FindSomeInternalBuiltinWithType(
return nullptr;
}
-Value* Declarations::LookupValue(const std::string& name) {
- Declarable* d = Lookup(name);
- if (!d->IsValue()) {
- std::stringstream s;
- s << "declaration \"" << name << "\" is not a Value";
- ReportError(s.str());
- }
- return Value::cast(d);
-}
-
-Label* Declarations::LookupLabel(const std::string& name) {
- Declarable* d = Lookup(name);
- if (!d->IsLabel()) {
- std::stringstream s;
- s << "declaration \"" << name << "\" is not a Label";
- ReportError(s.str());
- }
- return Label::cast(d);
+Value* Declarations::LookupValue(const QualifiedName& name) {
+ return EnsureUnique(FilterDeclarables<Value>(Lookup(name)), name, "value");
}
Macro* Declarations::TryLookupMacro(const std::string& name,
const TypeVector& types) {
- Declarable* declarable = TryLookup(name);
- if (declarable != nullptr) {
- if (declarable->IsMacroList()) {
- for (auto& m : MacroList::cast(declarable)->list()) {
- if (m->signature().parameter_types.types == types &&
- !m->signature().parameter_types.var_args) {
- return m;
- }
- }
+ std::vector<Macro*> macros = TryLookup<Macro>(QualifiedName(name));
+ for (auto& m : macros) {
+ auto signature_types = m->signature().GetExplicitTypes();
+ if (signature_types == types && !m->signature().parameter_types.var_args) {
+ return m;
}
}
return nullptr;
}
-Macro* Declarations::LookupMacro(const std::string& name,
- const TypeVector& types) {
- Macro* result = TryLookupMacro(name, types);
- if (result != nullptr) return result;
- std::stringstream stream;
- stream << "macro " << name << " with parameter types " << types
- << " is not defined";
- ReportError(stream.str());
- return nullptr;
+base::Optional<Builtin*> Declarations::TryLookupBuiltin(
+ const QualifiedName& name) {
+ std::vector<Builtin*> builtins = TryLookup<Builtin>(name);
+ if (builtins.empty()) return base::nullopt;
+ return EnsureUnique(builtins, name.name, "builtin");
}
-Builtin* Declarations::LookupBuiltin(const std::string& name) {
- Declarable* declarable = Lookup(name);
- if (declarable != nullptr) {
- if (declarable->IsBuiltin()) {
- return Builtin::cast(declarable);
- }
- ReportError(name + " is not a builtin");
- }
- ReportError(std::string("builtin ") + name + " is not defined");
- return nullptr;
+std::vector<Generic*> Declarations::LookupGeneric(const std::string& name) {
+ return EnsureNonempty(FilterDeclarables<Generic>(Lookup(QualifiedName(name))),
+ name, "generic");
}
-GenericList* Declarations::LookupGeneric(const std::string& name) {
- Declarable* declarable_list = Lookup(name);
- if (declarable_list != nullptr) {
- if (declarable_list->IsGenericList()) {
- return GenericList::cast(declarable_list);
- }
- ReportError(name + " is not a generic");
- }
- ReportError(std::string("generic ") + name + " is not defined");
- return nullptr;
+Generic* Declarations::LookupUniqueGeneric(const QualifiedName& name) {
+ return EnsureUnique(FilterDeclarables<Generic>(Lookup(name)), name,
+ "generic");
}
-ModuleConstant* Declarations::LookupModuleConstant(const std::string& name) {
- Declarable* declarable = Lookup(name);
- if (declarable != nullptr) {
- if (declarable->IsModuleConstant()) {
- return ModuleConstant::cast(declarable);
- }
- ReportError(name + " is not a constant");
- }
- ReportError(std::string("constant \"") + name + "\" is not defined");
- return nullptr;
+Namespace* Declarations::DeclareNamespace(const std::string& name) {
+ return Declare(name, std::unique_ptr<Namespace>(new Namespace(name)));
}
const AbstractType* Declarations::DeclareAbstractType(
- const std::string& name, const std::string& generated,
+ const std::string& name, bool transient, const std::string& generated,
base::Optional<const AbstractType*> non_constexpr_version,
const base::Optional<std::string>& parent) {
- CheckAlreadyDeclared(name, "type");
+ CheckAlreadyDeclared<TypeAlias>(name, "type");
const Type* parent_type = nullptr;
if (parent) {
- Declarable* maybe_parent_type = Lookup(*parent);
- if (maybe_parent_type == nullptr) {
- std::stringstream s;
- s << "cannot find parent type \"" << *parent << "\"";
- ReportError(s.str());
- }
- if (!maybe_parent_type->IsTypeAlias()) {
- std::stringstream s;
- s << "parent \"" << *parent << "\" of type \"" << name << "\""
- << " is not a type";
- ReportError(s.str());
- }
- parent_type = TypeAlias::cast(maybe_parent_type)->type();
+ parent_type = LookupType(QualifiedName{*parent});
}
const AbstractType* type = TypeOracle::GetAbstractType(
- parent_type, name, generated, non_constexpr_version);
- DeclareType(name, type);
+ parent_type, name, transient, generated, non_constexpr_version);
+ DeclareType(name, type, false);
return type;
}
-void Declarations::DeclareType(const std::string& name, const Type* type) {
- CheckAlreadyDeclared(name, "type");
- TypeAlias* result = new TypeAlias(type);
- Declare(name, std::unique_ptr<TypeAlias>(result));
-}
-
-void Declarations::DeclareStruct(Module* module, const std::string& name,
- const std::vector<NameAndType>& fields) {
- const StructType* new_type = TypeOracle::GetStructType(module, name, fields);
- DeclareType(name, new_type);
-}
-
-Label* Declarations::DeclareLabel(const std::string& name,
- base::Optional<Statement*> statement) {
- CheckAlreadyDeclared(name, "label");
- bool deferred = false;
- if (statement) {
- BlockStatement* block = BlockStatement::DynamicCast(*statement);
- deferred = block && block->deferred;
+void Declarations::DeclareType(const std::string& name, const Type* type,
+ bool redeclaration) {
+ CheckAlreadyDeclared<TypeAlias>(name, "type");
+ Declare(name, std::unique_ptr<TypeAlias>(new TypeAlias(type, redeclaration)));
+}
+
+StructType* Declarations::DeclareStruct(const std::string& name,
+ const std::vector<Field>& fields) {
+ StructType* new_type = TypeOracle::GetStructType(name, fields);
+ DeclareType(name, new_type, false);
+ return new_type;
+}
+
+ClassType* Declarations::DeclareClass(const Type* super_type,
+ const std::string& name, bool transient,
+ const std::string& generates,
+ std::vector<Field> fields, size_t size) {
+ std::vector<Field> this_struct_fields;
+ size_t struct_offset = 0;
+ const StructType* super_struct_type = nullptr;
+ // In order to ensure "atomicity" of object allocation, a class'
+ // constructors operate on a per-class internal struct rather than the class
+ // directly until the constructor has successfully completed and all class
+ // members are available. Create the appropriate struct type for use in the
+ // class' constructors, including a '_super' field in the struct that
+ // contains the values constructed by calls to super constructors.
+ if (const ClassType* super_class = ClassType::DynamicCast(super_type)) {
+ super_struct_type = super_class->struct_type();
+ this_struct_fields.push_back(
+ {CurrentSourcePosition::Get(),
+ {kConstructorStructSuperFieldName, super_struct_type},
+ struct_offset,
+ false});
+ struct_offset += LoweredSlotCount(super_struct_type);
}
- Label* result = new Label(name, deferred);
- Declare(name, std::unique_ptr<Declarable>(result));
- return result;
-}
-
-MacroList* Declarations::GetMacroListForName(const std::string& name,
- const Signature& signature) {
- auto previous = chain_.Lookup(name);
- MacroList* macro_list = nullptr;
- if (previous == nullptr) {
- macro_list = new MacroList();
- Declare(name, std::unique_ptr<Declarable>(macro_list));
- } else if (!previous->IsMacroList()) {
- std::stringstream s;
- s << "cannot redeclare non-macro " << name << " as a macro";
- ReportError(s.str());
- } else {
- macro_list = MacroList::cast(previous);
+ for (auto& field : fields) {
+ const Type* field_type = field.name_and_type.type;
+ this_struct_fields.push_back({field.pos,
+ {field.name_and_type.name, field_type},
+ struct_offset,
+ false});
+ struct_offset += LoweredSlotCount(field_type);
}
- for (auto& macro : macro_list->list()) {
- if (signature.parameter_types.types ==
- macro->signature().parameter_types.types &&
- signature.parameter_types.var_args ==
- macro->signature().parameter_types.var_args) {
- std::stringstream s;
- s << "cannot redeclare " << name
- << " as a macro with identical parameter list "
- << signature.parameter_types;
- ReportError(s.str());
+ StructType* this_struct_type = DeclareStruct(
+ kClassConstructorThisStructPrefix + name, this_struct_fields);
+
+ ClassType* new_type =
+ TypeOracle::GetClassType(super_type, name, transient, generates,
+ std::move(fields), this_struct_type, size);
+ this_struct_type->SetDerivedFrom(new_type);
+ DeclareType(name, new_type, false);
+ return new_type;
+}
+
+Macro* Declarations::CreateMacro(
+ std::string external_name, std::string readable_name,
+ base::Optional<std::string> external_assembler_name, Signature signature,
+ bool transitioning, base::Optional<Statement*> body) {
+ if (!external_assembler_name) {
+ external_assembler_name = CurrentNamespace()->ExternalName();
+ }
+ return RegisterDeclarable(std::unique_ptr<Macro>(
+ new Macro(std::move(external_name), std::move(readable_name),
+ std::move(*external_assembler_name), std::move(signature),
+ transitioning, body)));
+}
+
+Macro* Declarations::DeclareMacro(
+ const std::string& name,
+ base::Optional<std::string> external_assembler_name,
+ const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body, base::Optional<std::string> op) {
+ if (TryLookupMacro(name, signature.GetExplicitTypes())) {
+ ReportError("cannot redeclare macro ", name,
+ " with identical explicit parameters");
+ }
+ Macro* macro = CreateMacro(name, name, std::move(external_assembler_name),
+ signature, transitioning, body);
+ Declare(name, macro);
+ if (op) {
+ if (TryLookupMacro(*op, signature.GetExplicitTypes())) {
+ ReportError("cannot redeclare operator ", name,
+ " with identical explicit parameters");
}
+ DeclareOperator(*op, macro);
}
- return macro_list;
+ return macro;
}
-Macro* Declarations::DeclareMacro(const std::string& name,
- const Signature& signature,
- base::Optional<std::string> op) {
- Macro* macro = RegisterDeclarable(
- std::unique_ptr<Macro>(new Macro(name, signature, GetCurrentGeneric())));
- GetMacroListForName(name, signature)->AddMacro(macro);
- if (op) GetMacroListForName(*op, signature)->AddMacro(macro);
- return macro;
+Method* Declarations::CreateMethod(AggregateType* container_type,
+ const std::string& name, Signature signature,
+ bool transitioning, Statement* body) {
+ std::string generated_name{container_type->GetGeneratedMethodName(name)};
+ Method* result = RegisterDeclarable(std::unique_ptr<Method>(
+ new Method(container_type, container_type->GetGeneratedMethodName(name),
+ name, CurrentNamespace()->ExternalName(), std::move(signature),
+ transitioning, body)));
+ container_type->RegisterMethod(result);
+ return result;
}
-Builtin* Declarations::DeclareBuiltin(const std::string& name,
- Builtin::Kind kind, bool external,
- const Signature& signature) {
- CheckAlreadyDeclared(name, "builtin");
- Builtin* result =
- new Builtin(name, kind, external, signature, GetCurrentGeneric());
- Declare(name, std::unique_ptr<Declarable>(result));
+Intrinsic* Declarations::CreateIntrinsic(const std::string& name,
+ const Signature& signature) {
+ Intrinsic* result = RegisterDeclarable(std::unique_ptr<Intrinsic>(
+ new Intrinsic(std::move(name), std::move(signature))));
return result;
}
-RuntimeFunction* Declarations::DeclareRuntimeFunction(
- const std::string& name, const Signature& signature) {
- CheckAlreadyDeclared(name, "runtime function");
- RuntimeFunction* result =
- new RuntimeFunction(name, signature, GetCurrentGeneric());
- Declare(name, std::unique_ptr<Declarable>(result));
+Intrinsic* Declarations::DeclareIntrinsic(const std::string& name,
+ const Signature& signature) {
+ Intrinsic* result = CreateIntrinsic(std::move(name), std::move(signature));
+ Declare(name, result);
return result;
}
-Variable* Declarations::CreateVariable(const std::string& var, const Type* type,
- bool is_const) {
- return RegisterDeclarable(
- std::unique_ptr<Variable>(new Variable(var, type, is_const)));
+Builtin* Declarations::CreateBuiltin(std::string external_name,
+ std::string readable_name,
+ Builtin::Kind kind, Signature signature,
+ bool transitioning,
+ base::Optional<Statement*> body) {
+ return RegisterDeclarable(std::unique_ptr<Builtin>(
+ new Builtin(std::move(external_name), std::move(readable_name), kind,
+ std::move(signature), transitioning, body)));
}
-Variable* Declarations::DeclareVariable(const std::string& var,
- const Type* type, bool is_const) {
- CheckAlreadyDeclared(var, "variable");
- Variable* result = new Variable(var, type, is_const);
- Declare(var, std::unique_ptr<Declarable>(result));
- return result;
+Builtin* Declarations::DeclareBuiltin(const std::string& name,
+ Builtin::Kind kind,
+ const Signature& signature,
+ bool transitioning,
+ base::Optional<Statement*> body) {
+ CheckAlreadyDeclared<Builtin>(name, "builtin");
+ return Declare(
+ name, CreateBuiltin(name, name, kind, signature, transitioning, body));
}
-Parameter* Declarations::DeclareParameter(const std::string& name,
- std::string external_name,
- const Type* type) {
- CheckAlreadyDeclared(name, "parameter");
- Parameter* result = new Parameter(name, std::move(external_name), type);
- Declare(name, std::unique_ptr<Declarable>(result));
- return result;
+RuntimeFunction* Declarations::DeclareRuntimeFunction(
+ const std::string& name, const Signature& signature, bool transitioning) {
+ CheckAlreadyDeclared<RuntimeFunction>(name, "runtime function");
+ return Declare(name,
+ RegisterDeclarable(std::unique_ptr<RuntimeFunction>(
+ new RuntimeFunction(name, signature, transitioning))));
}
void Declarations::DeclareExternConstant(const std::string& name,
const Type* type, std::string value) {
- CheckAlreadyDeclared(name, "constant, parameter or arguments");
+ CheckAlreadyDeclared<Value>(name, "constant");
ExternConstant* result = new ExternConstant(name, type, value);
Declare(name, std::unique_ptr<Declarable>(result));
}
-ModuleConstant* Declarations::DeclareModuleConstant(const std::string& name,
- const Type* type) {
- CheckAlreadyDeclared(name, "module constant");
- ModuleConstant* result = new ModuleConstant(name, type);
+NamespaceConstant* Declarations::DeclareNamespaceConstant(
+ const std::string& name, const Type* type, Expression* body) {
+ CheckAlreadyDeclared<Value>(name, "constant");
+ NamespaceConstant* result = new NamespaceConstant(name, type, body);
Declare(name, std::unique_ptr<Declarable>(result));
return result;
}
-Generic* Declarations::DeclareGeneric(const std::string& name, Module* module,
+Generic* Declarations::DeclareGeneric(const std::string& name,
GenericDeclaration* generic) {
- auto previous = chain_.Lookup(name);
- GenericList* generic_list = nullptr;
- if (previous == nullptr) {
- generic_list = new GenericList();
- Declare(name, std::unique_ptr<Declarable>(generic_list));
- } else if (!previous->IsGenericList()) {
- std::stringstream s;
- s << "cannot redeclare non-generic " << name << " as a generic";
- ReportError(s.str());
- } else {
- generic_list = GenericList::cast(previous);
- }
- Generic* result = RegisterDeclarable(
- std::unique_ptr<Generic>(new Generic(name, module, generic)));
- generic_list->AddGeneric(result);
- generic_declaration_scopes_[result] = GetScopeChainSnapshot();
- return result;
+ return Declare(name, std::unique_ptr<Generic>(new Generic(name, generic)));
}
-TypeVector Declarations::GetCurrentSpecializationTypeNamesVector() {
- TypeVector result;
- if (current_generic_specialization_ != nullptr) {
- result = current_generic_specialization_->second;
- }
- return result;
-}
-
-base::Optional<Generic*> Declarations::GetCurrentGeneric() {
- if (current_generic_specialization_ != nullptr) {
- return current_generic_specialization_->first;
- }
- return base::nullopt;
-}
-
-std::string GetGeneratedCallableName(const std::string& name,
- const TypeVector& specialized_types) {
+std::string Declarations::GetGeneratedCallableName(
+ const std::string& name, const TypeVector& specialized_types) {
std::string result = name;
for (auto type : specialized_types) {
std::string type_string = type->MangledName();
@@ -388,6 +334,11 @@ std::string GetGeneratedCallableName(const std::string& name,
return result;
}
+Macro* Declarations::DeclareOperator(const std::string& name, Macro* m) {
+ GlobalContext::GetDefaultNamespace()->AddDeclarable(name, m);
+ return m;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 5a45e5dbda..efc01e8138 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -8,7 +8,6 @@
#include <string>
#include "src/torque/declarable.h"
-#include "src/torque/scope.h"
#include "src/torque/utils.h"
namespace v8 {
@@ -19,215 +18,134 @@ static constexpr const char* const kFromConstexprMacroName = "FromConstexpr";
static constexpr const char* kTrueLabelName = "_True";
static constexpr const char* kFalseLabelName = "_False";
-class Declarations {
- public:
- Declarations()
- : unique_declaration_number_(0),
- current_generic_specialization_(nullptr) {}
-
- Declarable* TryLookup(const std::string& name) { return chain_.Lookup(name); }
-
- Declarable* Lookup(const std::string& name) {
- Declarable* d = TryLookup(name);
- if (d == nullptr) {
- std::stringstream s;
- s << "cannot find \"" << name << "\"";
- ReportError(s.str());
- }
- return d;
- }
-
- Declarable* LookupGlobalScope(const std::string& name) {
- Declarable* d = chain_.LookupGlobalScope(name);
- if (d == nullptr) {
- std::stringstream s;
- s << "cannot find \"" << name << "\" in global scope";
- ReportError(s.str());
+template <class T>
+std::vector<T*> FilterDeclarables(const std::vector<Declarable*> list) {
+ std::vector<T*> result;
+ for (Declarable* declarable : list) {
+ if (T* t = T::DynamicCast(declarable)) {
+ result.push_back(t);
}
- return d;
}
+ return result;
+}
- const Type* LookupType(const std::string& name);
- const Type* LookupGlobalType(const std::string& name);
- const Type* GetType(TypeExpression* type_expression);
-
- Builtin* FindSomeInternalBuiltinWithType(const FunctionPointerType* type);
-
- Value* LookupValue(const std::string& name);
-
- Macro* TryLookupMacro(const std::string& name, const TypeVector& types);
- Macro* LookupMacro(const std::string& name, const TypeVector& types);
-
- Builtin* LookupBuiltin(const std::string& name);
-
- Label* TryLookupLabel(const std::string& name) {
- Declarable* d = TryLookup(name);
- return d && d->IsLabel() ? Label::cast(d) : nullptr;
+class Declarations {
+ public:
+ static std::vector<Declarable*> TryLookup(const QualifiedName& name) {
+ return CurrentScope::Get()->Lookup(name);
}
- Label* LookupLabel(const std::string& name);
-
- GenericList* LookupGeneric(const std::string& name);
- ModuleConstant* LookupModuleConstant(const std::string& name);
-
- const AbstractType* DeclareAbstractType(
- const std::string& name, const std::string& generated,
- base::Optional<const AbstractType*> non_constexpr_version,
- const base::Optional<std::string>& parent = {});
-
- void DeclareType(const std::string& name, const Type* type);
-
- void DeclareStruct(Module* module, const std::string& name,
- const std::vector<NameAndType>& fields);
-
- Label* DeclareLabel(const std::string& name,
- base::Optional<Statement*> statement = {});
-
- Macro* DeclareMacro(const std::string& name, const Signature& signature,
- base::Optional<std::string> op = {});
-
- Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
- bool external, const Signature& signature);
-
- RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
- const Signature& signature);
-
- Variable* CreateVariable(const std::string& var, const Type* type,
- bool is_const);
- Variable* DeclareVariable(const std::string& var, const Type* type,
- bool is_const);
-
- Parameter* DeclareParameter(const std::string& name,
- std::string external_name, const Type* type);
-
- void DeclareExternConstant(const std::string& name, const Type* type,
- std::string value);
- ModuleConstant* DeclareModuleConstant(const std::string& name,
- const Type* type);
-
- Generic* DeclareGeneric(const std::string& name, Module* module,
- GenericDeclaration* generic);
-
- TypeVector GetCurrentSpecializationTypeNamesVector();
- base::Optional<Generic*> GetCurrentGeneric();
-
- ScopeChain::Snapshot GetScopeChainSnapshot() { return chain_.TaskSnapshot(); }
- std::set<const Variable*> GetLiveVariables() {
- return chain_.GetLiveVariables();
+ static std::vector<Declarable*> TryLookupShallow(const QualifiedName& name) {
+ return CurrentScope::Get()->LookupShallow(name);
}
- bool IsDeclaredInCurrentScope(const std::string& name);
-
- Statement* next_body() const { return next_body_; }
-
- void PrintScopeChain() { chain_.Print(); }
-
- class ModuleScopeActivator;
- class NodeScopeActivator;
- class CleanNodeScopeActivator;
- class GenericScopeActivator;
- class ScopedGenericSpecializationKey;
- class ScopedGenericScopeChainSnapshot;
-
- private:
- Scope* GetModuleScope(const Module* module);
- Scope* GetNodeScope(const AstNode* node, bool reset_scope = false);
- Scope* GetGenericScope(Generic* generic, const TypeVector& types);
-
template <class T>
- T* RegisterDeclarable(std::unique_ptr<T> d) {
- T* ptr = d.get();
- declarables_.push_back(std::move(d));
- return ptr;
+ static std::vector<T*> TryLookup(const QualifiedName& name) {
+ return FilterDeclarables<T>(TryLookup(name));
}
- MacroList* GetMacroListForName(const std::string& name,
- const Signature& signature);
-
- void Declare(const std::string& name, std::unique_ptr<Declarable> d) {
- chain_.Declare(name, RegisterDeclarable(std::move(d)));
+ static std::vector<Declarable*> Lookup(const QualifiedName& name) {
+ std::vector<Declarable*> d = TryLookup(name);
+ if (d.empty()) {
+ ReportError("cannot find \"", name, "\"");
+ }
+ return d;
}
- int GetNextUniqueDeclarationNumber() { return unique_declaration_number_++; }
+ static std::vector<Declarable*> LookupGlobalScope(const std::string& name);
- void CheckAlreadyDeclared(const std::string& name, const char* new_type);
+ static const Type* LookupType(const QualifiedName& name);
+ static const Type* LookupType(std::string name);
+ static const Type* LookupGlobalType(const std::string& name);
+ static const Type* GetType(TypeExpression* type_expression);
- int unique_declaration_number_;
- ScopeChain chain_;
- const SpecializationKey* current_generic_specialization_;
- Statement* next_body_;
- std::vector<std::unique_ptr<Declarable>> declarables_;
- std::map<const Module*, Scope*> module_scopes_;
- std::map<std::pair<const AstNode*, TypeVector>, Scope*> scopes_;
- std::map<Generic*, ScopeChain::Snapshot> generic_declaration_scopes_;
-};
-
-class Declarations::NodeScopeActivator {
- public:
- NodeScopeActivator(Declarations* declarations, AstNode* node)
- : activator_(declarations->GetNodeScope(node)) {}
+ static Builtin* FindSomeInternalBuiltinWithType(
+ const BuiltinPointerType* type);
- private:
- Scope::Activator activator_;
-};
+ static Value* LookupValue(const QualifiedName& name);
-class Declarations::ModuleScopeActivator {
- public:
- ModuleScopeActivator(Declarations* declarations, const Module* module)
- : activator_(declarations->GetModuleScope(module)) {}
-
- private:
- Scope::Activator activator_;
-};
+ static Macro* TryLookupMacro(const std::string& name,
+ const TypeVector& types);
+ static base::Optional<Builtin*> TryLookupBuiltin(const QualifiedName& name);
-class Declarations::CleanNodeScopeActivator {
- public:
- CleanNodeScopeActivator(Declarations* declarations, AstNode* node)
- : activator_(declarations->GetNodeScope(node, true)) {}
+ static std::vector<Generic*> LookupGeneric(const std::string& name);
+ static Generic* LookupUniqueGeneric(const QualifiedName& name);
- private:
- Scope::Activator activator_;
-};
+ static Namespace* DeclareNamespace(const std::string& name);
-class Declarations::GenericScopeActivator {
- public:
- GenericScopeActivator(Declarations* declarations,
- const SpecializationKey& key)
- : activator_(declarations->GetGenericScope(key.first, key.second)) {}
+ static const AbstractType* DeclareAbstractType(
+ const std::string& name, bool transient, const std::string& generated,
+ base::Optional<const AbstractType*> non_constexpr_version,
+ const base::Optional<std::string>& parent = {});
- private:
- Scope::Activator activator_;
-};
+ static void DeclareType(const std::string& name, const Type* type,
+ bool redeclaration);
+
+ static StructType* DeclareStruct(const std::string& name,
+ const std::vector<Field>& fields);
+
+ static ClassType* DeclareClass(const Type* super, const std::string& name,
+ bool transient, const std::string& generates,
+ std::vector<Field> fields, size_t size);
+
+ static Macro* CreateMacro(std::string external_name,
+ std::string readable_name,
+ base::Optional<std::string> external_assembler_name,
+ Signature signature, bool transitioning,
+ base::Optional<Statement*> body);
+ static Macro* DeclareMacro(
+ const std::string& name,
+ base::Optional<std::string> external_assembler_name,
+ const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body, base::Optional<std::string> op = {});
+
+ static Method* CreateMethod(AggregateType* class_type,
+ const std::string& name, Signature signature,
+ bool transitioning, Statement* body);
+
+ static Intrinsic* CreateIntrinsic(const std::string& name,
+ const Signature& signature);
+
+ static Intrinsic* DeclareIntrinsic(const std::string& name,
+ const Signature& signature);
+
+ static Builtin* CreateBuiltin(std::string external_name,
+ std::string readable_name, Builtin::Kind kind,
+ Signature signature, bool transitioning,
+ base::Optional<Statement*> body);
+ static Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
+ const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body);
+
+ static RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
+ const Signature& signature,
+ bool transitioning);
+
+ static void DeclareExternConstant(const std::string& name, const Type* type,
+ std::string value);
+ static NamespaceConstant* DeclareNamespaceConstant(const std::string& name,
+ const Type* type,
+ Expression* body);
+
+ static Generic* DeclareGeneric(const std::string& name,
+ GenericDeclaration* generic);
-class Declarations::ScopedGenericSpecializationKey {
- public:
- ScopedGenericSpecializationKey(Declarations* declarations,
- const SpecializationKey& key)
- : declarations_(declarations) {
- declarations->current_generic_specialization_ = &key;
+ template <class T>
+ static T* Declare(const std::string& name, T* d) {
+ CurrentScope::Get()->AddDeclarable(name, d);
+ return d;
}
- ~ScopedGenericSpecializationKey() {
- declarations_->current_generic_specialization_ = nullptr;
+ template <class T>
+ static T* Declare(const std::string& name, std::unique_ptr<T> d) {
+ return CurrentScope::Get()->AddDeclarable(name,
+ RegisterDeclarable(std::move(d)));
}
+ static Macro* DeclareOperator(const std::string& name, Macro* m);
- private:
- Declarations* declarations_;
+ static std::string GetGeneratedCallableName(
+ const std::string& name, const TypeVector& specialized_types);
};
-class Declarations::ScopedGenericScopeChainSnapshot {
- public:
- ScopedGenericScopeChainSnapshot(Declarations* declarations,
- const SpecializationKey& key)
- : restorer_(declarations->generic_declaration_scopes_[key.first]) {}
- ~ScopedGenericScopeChainSnapshot() = default;
-
- private:
- ScopeChain::ScopedSnapshotRestorer restorer_;
-};
-
-std::string GetGeneratedCallableName(const std::string& name,
- const TypeVector& specialized_types);
-
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 1e77734ab6..8efe0c704d 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -121,7 +121,7 @@ class ParseResultIterator {
size_t i_ = 0;
MatchedInput matched_input_;
- DISALLOW_COPY_AND_MOVE_AND_ASSIGN(ParseResultIterator);
+ DISALLOW_COPY_AND_ASSIGN(ParseResultIterator);
};
struct LexerResult {
@@ -196,7 +196,7 @@ class Symbol {
std::vector<std::unique_ptr<Rule>> rules_;
// Disallow copying and moving to ensure Symbol has a stable address.
- DISALLOW_COPY_AND_MOVE_AND_ASSIGN(Symbol);
+ DISALLOW_COPY_AND_ASSIGN(Symbol);
};
// Items are the core datastructure of Earley's algorithm.
diff --git a/deps/v8/src/torque/file-visitor.cc b/deps/v8/src/torque/file-visitor.cc
index 865b7b456d..deeebded9d 100644
--- a/deps/v8/src/torque/file-visitor.cc
+++ b/deps/v8/src/torque/file-visitor.cc
@@ -5,7 +5,6 @@
#include "src/torque/file-visitor.h"
#include "src/torque/declarable.h"
-#include "src/torque/parameter-difference.h"
namespace v8 {
namespace internal {
@@ -17,66 +16,19 @@ Signature FileVisitor::MakeSignature(const CallableNodeSignature* signature) {
LabelDeclaration def = {label.name, GetTypeVector(label.types)};
definition_vector.push_back(def);
}
+ base::Optional<std::string> arguments_variable;
+ if (signature->parameters.has_varargs)
+ arguments_variable = signature->parameters.arguments_variable;
Signature result{signature->parameters.names,
+ arguments_variable,
{GetTypeVector(signature->parameters.types),
signature->parameters.has_varargs},
- declarations()->GetType(signature->return_type),
+ signature->parameters.implicit_count,
+ Declarations::GetType(signature->return_type),
definition_vector};
return result;
}
-Signature FileVisitor::MakeSignatureFromReturnType(
- TypeExpression* return_type) {
- Signature result{{}, {{}, false}, declarations()->GetType(return_type), {}};
- return result;
-}
-
-void FileVisitor::QueueGenericSpecialization(
- const SpecializationKey& key, CallableNode* callable,
- const CallableNodeSignature* signature, base::Optional<Statement*> body) {
- pending_specializations_.push_back(
- {key, callable, signature, body, CurrentSourcePosition::Get()});
-}
-
-void FileVisitor::SpecializeGeneric(
- const PendingSpecialization& specialization) {
- CurrentSourcePosition::Scope scope(specialization.request_position);
- if (completed_specializations_.find(specialization.key) !=
- completed_specializations_.end()) {
- std::stringstream stream;
- stream << "cannot redeclare specialization of "
- << specialization.key.first->name() << " with types <"
- << specialization.key.second << ">";
- ReportError(stream.str());
- }
- if (!specialization.body) {
- std::stringstream stream;
- stream << "missing specialization of " << specialization.key.first->name()
- << " with types <" << specialization.key.second << ">";
- ReportError(stream.str());
- }
- Declarations::ScopedGenericSpecializationKey instantiation(
- declarations(), specialization.key);
- FileVisitor::ScopedModuleActivator activator(
- this, specialization.key.first->module());
- Specialize(specialization.key, specialization.callable,
- specialization.signature, *specialization.body);
- completed_specializations_.insert(specialization.key);
-}
-
-void FileVisitor::DrainSpecializationQueue() {
- while (pending_specializations_.size() != 0) {
- PendingSpecialization specialization(pending_specializations_.front());
- pending_specializations_.pop_front();
- if (completed_specializations_.find(specialization.key) ==
- completed_specializations_.end()) {
- Declarations::ScopedGenericScopeChainSnapshot scope(declarations(),
- specialization.key);
- SpecializeGeneric(specialization);
- }
- }
-}
-
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/file-visitor.h b/deps/v8/src/torque/file-visitor.h
index 7d79e9acba..4d9700bd6c 100644
--- a/deps/v8/src/torque/file-visitor.h
+++ b/deps/v8/src/torque/file-visitor.h
@@ -19,74 +19,16 @@ namespace torque {
class FileVisitor {
public:
- explicit FileVisitor(GlobalContext& global_context)
- : global_context_(global_context),
- declarations_(global_context.declarations()),
- module_(global_context.GetDefaultModule()) {}
-
TypeVector GetTypeVector(const std::vector<TypeExpression*>& v) {
TypeVector result;
for (TypeExpression* t : v) {
- result.push_back(declarations()->GetType(t));
+ result.push_back(Declarations::GetType(t));
}
return result;
}
- Ast* ast() { return global_context_.ast(); }
- Declarations* declarations() { return global_context_.declarations(); }
-
- void DrainSpecializationQueue();
-
- class ScopedModuleActivator {
- public:
- ScopedModuleActivator(FileVisitor* visitor, Module* module)
- : visitor_(visitor), saved_module_(visitor->CurrentModule()) {
- visitor->module_ = module;
- }
- ~ScopedModuleActivator() { visitor_->module_ = saved_module_; }
-
- private:
- FileVisitor* visitor_;
- Module* saved_module_;
- };
-
protected:
- Module* CurrentModule() const { return module_; }
-
- friend class ScopedModuleActivator;
-
- std::string GetParameterVariableFromName(const std::string& name) {
- return std::string("p_") + name;
- }
-
Signature MakeSignature(const CallableNodeSignature* signature);
- Signature MakeSignatureFromReturnType(TypeExpression* return_type);
-
- struct PendingSpecialization {
- SpecializationKey key;
- CallableNode* callable;
- const CallableNodeSignature* signature;
- base::Optional<Statement*> body;
- SourcePosition request_position;
- };
-
- void QueueGenericSpecialization(const SpecializationKey& key,
- CallableNode* callable,
- const CallableNodeSignature* signature,
- base::Optional<Statement*> body);
-
- void SpecializeGeneric(const PendingSpecialization& specialization);
-
- virtual void Specialize(const SpecializationKey&, CallableNode* callable,
- const CallableNodeSignature* signature,
- Statement* body) = 0;
-
- GlobalContext& global_context_;
- Declarations* declarations_;
- std::deque<PendingSpecialization> pending_specializations_;
- std::set<SpecializationKey> completed_specializations_;
- Callable* current_callable_;
- Module* module_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index cd20c332f3..949362ca1c 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -5,119 +5,80 @@
#ifndef V8_TORQUE_GLOBAL_CONTEXT_H_
#define V8_TORQUE_GLOBAL_CONTEXT_H_
+#include <map>
+
#include "src/torque/declarable.h"
#include "src/torque/declarations.h"
-#include "src/torque/scope.h"
#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
namespace torque {
-class GlobalContext;
-class Scope;
-class TypeOracle;
-class Builtin;
-class Label;
-
-class Module {
+class GlobalContext : public ContextualClass<GlobalContext> {
public:
- explicit Module(const std::string& name, bool is_default)
- : name_(name), is_default_(is_default) {}
- const std::string& name() const { return name_; }
- bool IsDefault() const { return is_default_; }
- std::ostream& source_stream() { return source_stream_; }
- std::ostream& header_stream() { return header_stream_; }
- std::string source() { return source_stream_.str(); }
- std::string header() { return header_stream_.str(); }
+ explicit GlobalContext(Ast ast) : verbose_(false), ast_(std::move(ast)) {
+ CurrentScope::Scope current_scope(nullptr);
+ CurrentSourcePosition::Scope current_source_position(
+ SourcePosition{CurrentSourceFile::Get(), -1, -1});
+ default_namespace_ =
+ RegisterDeclarable(base::make_unique<Namespace>("base"));
+ }
+ static Namespace* GetDefaultNamespace() { return Get().default_namespace_; }
+ template <class T>
+ T* RegisterDeclarable(std::unique_ptr<T> d) {
+ T* ptr = d.get();
+ declarables_.push_back(std::move(d));
+ return ptr;
+ }
- private:
- std::string name_;
- bool is_default_;
- std::stringstream header_stream_;
- std::stringstream source_stream_;
-};
+ static const std::vector<std::unique_ptr<Declarable>>& AllDeclarables() {
+ return Get().declarables_;
+ }
-class GlobalContext {
- public:
- explicit GlobalContext(Ast ast)
- : verbose_(false),
- next_label_number_(0),
- default_module_(GetModule("base", true)),
- ast_(std::move(ast)) {}
- Module* GetDefaultModule() { return default_module_; }
- Module* GetModule(const std::string& name, bool is_default = false) {
- auto i = modules_.find(name);
- if (i != modules_.end()) {
- return i->second.get();
+ static const std::vector<Namespace*> GetNamespaces() {
+ std::vector<Namespace*> result;
+ for (auto& declarable : AllDeclarables()) {
+ if (Namespace* n = Namespace::DynamicCast(declarable.get())) {
+ result.push_back(n);
+ }
}
- Module* module = new Module(name, is_default);
- modules_[name] = std::unique_ptr<Module>(module);
- return module;
+ return result;
}
- int GetNextLabelNumber() { return next_label_number_++; }
-
- const std::map<std::string, std::unique_ptr<Module>>& GetModules() const {
- return modules_;
+ static void RegisterClass(const std::string& name,
+ const ClassType* new_class) {
+ Get().classes_[name] = new_class;
}
- void SetVerbose() { verbose_ = true; }
- bool verbose() const { return verbose_; }
-
- friend class CurrentCallableActivator;
- friend class BreakContinueActivator;
+ static const std::map<std::string, const ClassType*>& GetClasses() {
+ return Get().classes_;
+ }
- Callable* GetCurrentCallable() const { return current_callable_; }
- Block* GetCurrentBreak() const { return break_continue_stack_.back().first; }
- Block* GetCurrentContinue() const {
- return break_continue_stack_.back().second;
+ static void AddCppInclude(std::string include_path) {
+ Get().cpp_includes_.push_back(std::move(include_path));
+ }
+ static const std::vector<std::string>& CppIncludes() {
+ return Get().cpp_includes_;
}
- Declarations* declarations() { return &declarations_; }
- Ast* ast() { return &ast_; }
+ static void SetVerbose() { Get().verbose_ = true; }
+ static bool verbose() { return Get().verbose_; }
+ static Ast* ast() { return &Get().ast_; }
private:
bool verbose_;
- int next_label_number_;
- Declarations declarations_;
- Callable* current_callable_;
- std::vector<std::pair<Block*, Block*>> break_continue_stack_;
- std::map<std::string, std::unique_ptr<Module>> modules_;
- Module* default_module_;
+ Namespace* default_namespace_;
Ast ast_;
+ std::vector<std::unique_ptr<Declarable>> declarables_;
+ std::vector<std::string> cpp_includes_;
+ std::map<std::string, const ClassType*> classes_;
};
-class CurrentCallableActivator {
- public:
- CurrentCallableActivator(GlobalContext& context, Callable* callable,
- CallableNode* decl)
- : context_(context), scope_activator_(context.declarations(), decl) {
- remembered_callable_ = context_.current_callable_;
- context_.current_callable_ = callable;
- }
- ~CurrentCallableActivator() {
- context_.current_callable_ = remembered_callable_;
- }
-
- private:
- GlobalContext& context_;
- Callable* remembered_callable_;
- Declarations::NodeScopeActivator scope_activator_;
-};
-
-class BreakContinueActivator {
- public:
- BreakContinueActivator(GlobalContext& context, Block* break_block,
- Block* continue_block)
- : context_(context) {
- context_.break_continue_stack_.push_back({break_block, continue_block});
- }
- ~BreakContinueActivator() { context_.break_continue_stack_.pop_back(); }
-
- private:
- GlobalContext& context_;
-};
+template <class T>
+T* RegisterDeclarable(std::unique_ptr<T> d) {
+ return GlobalContext::Get().RegisterDeclarable(std::move(d));
+}
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 89c0c70416..5b52a31cf3 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -5,6 +5,7 @@
#include <algorithm>
#include "src/torque/csa-generator.h"
+#include "src/torque/declaration-visitor.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
@@ -27,6 +28,7 @@ VisitResult ImplementationVisitor::Visit(Expression* expr) {
const Type* ImplementationVisitor::Visit(Statement* stmt) {
CurrentSourcePosition::Scope scope(stmt->pos);
+ StackScope stack_scope(this);
const Type* result;
switch (stmt->kind) {
#define ENUM_ITEM(name) \
@@ -43,102 +45,53 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
return result;
}
-void ImplementationVisitor::Visit(Declaration* decl) {
- CurrentSourcePosition::Scope scope(decl->pos);
- switch (decl->kind) {
-#define ENUM_ITEM(name) \
- case AstNode::Kind::k##name: \
- return Visit(name::cast(decl));
- AST_DECLARATION_NODE_KIND_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- default:
- UNIMPLEMENTED();
- }
-}
+void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
+ std::ostream& source = nspace->source_stream();
+ std::ostream& header = nspace->header_stream();
-void ImplementationVisitor::Visit(CallableNode* decl,
- const Signature& signature, Statement* body) {
- switch (decl->kind) {
-#define ENUM_ITEM(name) \
- case AstNode::Kind::k##name: \
- return Visit(name::cast(decl), signature, body);
- AST_CALLABLE_NODE_KIND_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- default:
- UNIMPLEMENTED();
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ source << "#include " << StringLiteralQuote(include_path) << "\n";
}
-}
-
-void ImplementationVisitor::BeginModuleFile(Module* module) {
- std::ostream& source = module->source_stream();
- std::ostream& header = module->header_stream();
- if (module->IsDefault()) {
- source << "#include \"src/torque-assembler.h\"";
- } else {
- source << "#include \"src/builtins/builtins-" +
- DashifyString(module->name()) + "-gen.h\"";
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ source << "#include \"torque-generated/builtins-" +
+ DashifyString(n->name()) + "-from-dsl-gen.h\"\n";
}
source << "\n";
- source << "#include \"src/objects/arguments.h\"\n";
- source << "#include \"src/builtins/builtins-utils-gen.h\"\n";
- source << "#include \"src/builtins/builtins.h\"\n";
- source << "#include \"src/code-factory.h\"\n";
- source << "#include \"src/elements-kind.h\"\n";
- source << "#include \"src/heap/factory-inl.h\"\n";
- source << "#include \"src/objects.h\"\n";
- source << "#include \"src/objects/bigint.h\"\n";
-
- source << "#include \"builtins-" + DashifyString(module->name()) +
- "-from-dsl-gen.h\"\n\n";
source << "namespace v8 {\n"
<< "namespace internal {\n"
- << "\n"
- << "using Node = compiler::Node;\n"
<< "\n";
- std::string upper_name(module->name());
+ std::string upper_name(nspace->name());
transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
::toupper);
std::string headerDefine =
std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
header << "#ifndef " << headerDefine << "\n";
header << "#define " << headerDefine << "\n\n";
- if (module->IsDefault()) {
- header << "#include \"src/torque-assembler.h\"";
- } else {
- header << "#include \"src/builtins/builtins-" +
- DashifyString(module->name()) + "-gen.h\"\n";
+ header << "#include \"src/compiler/code-assembler.h\"\n";
+ if (nspace != GlobalContext::GetDefaultNamespace()) {
+ header << "#include \"src/code-stub-assembler.h\"\n";
}
- header << "\n\n ";
+ header << "\n";
header << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
- header << "class " << GetDSLAssemblerName(module) << ": public "
- << GetBaseAssemblerName(module) << " {\n";
+ header << "class " << nspace->ExternalName() << " {\n";
header << " public:\n";
- header << " explicit " << GetDSLAssemblerName(module)
- << "(compiler::CodeAssemblerState* state) : "
- << GetBaseAssemblerName(module) << "(state) {}\n";
-
- header << "\n";
- header << " using Node = compiler::Node;\n";
- header << " template <class T>\n";
- header << " using TNode = compiler::TNode<T>;\n";
- header << " template <class T>\n";
- header << " using SloppyTNode = compiler::SloppyTNode<T>;\n\n";
+ header << " explicit " << nspace->ExternalName()
+ << "(compiler::CodeAssemblerState* state) : state_(state), ca_(state) "
+ "{ USE(state_, ca_); }\n";
}
-void ImplementationVisitor::EndModuleFile(Module* module) {
- std::ostream& source = module->source_stream();
- std::ostream& header = module->header_stream();
-
- DrainSpecializationQueue();
+void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
+ std::ostream& source = nspace->source_stream();
+ std::ostream& header = nspace->header_stream();
- std::string upper_name(module->name());
+ std::string upper_name(nspace->name());
transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
::toupper);
std::string headerDefine =
@@ -148,32 +101,28 @@ void ImplementationVisitor::EndModuleFile(Module* module) {
<< "} // namespace v8\n"
<< "\n";
- header << "};\n\n";
+ header << " private:\n"
+ << " compiler::CodeAssemblerState* const state_;\n"
+ << " compiler::CodeAssembler ca_;"
+ << "}; \n\n";
header << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
header << "#endif // " << headerDefine << "\n";
}
-void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
- Module* module = decl->GetModule();
- Module* saved_module = module_;
- module_ = module;
- Declarations::ModuleScopeActivator scope(declarations(), decl->GetModule());
- for (auto& child : decl->declarations) Visit(child);
- module_ = saved_module;
-}
+void ImplementationVisitor::Visit(NamespaceConstant* decl) {
+ Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(), {}};
+ const std::string& name = decl->name();
-void ImplementationVisitor::Visit(ConstDeclaration* decl) {
- Signature signature = MakeSignatureFromReturnType(decl->type);
- std::string name = decl->name;
+ BindingsManagersScope bindings_managers_scope;
header_out() << " ";
GenerateFunctionDeclaration(header_out(), "", name, signature, {});
header_out() << ";\n";
GenerateFunctionDeclaration(source_out(),
- GetDSLAssemblerName(CurrentModule()) + "::", name,
+ CurrentNamespace()->ExternalName() + "::", name,
signature, {});
source_out() << " {\n";
@@ -181,7 +130,7 @@ void ImplementationVisitor::Visit(ConstDeclaration* decl) {
assembler_ = CfgAssembler(Stack<const Type*>{});
- VisitResult expression_result = Visit(decl->expression);
+ VisitResult expression_result = Visit(decl->body());
VisitResult return_result =
GenerateImplicitConvert(signature.return_type, expression_result);
@@ -196,13 +145,15 @@ void ImplementationVisitor::Visit(ConstDeclaration* decl) {
source_out() << "}\n\n";
}
-void ImplementationVisitor::Visit(StructDeclaration* decl) {
- header_out() << " struct " << decl->name << " {\n";
- const StructType* struct_type =
- static_cast<const StructType*>(declarations()->LookupType(decl->name));
+void ImplementationVisitor::Visit(TypeAlias* alias) {
+ if (alias->IsRedeclaration()) return;
+ const StructType* struct_type = StructType::DynamicCast(alias->type());
+ if (!struct_type) return;
+ const std::string& name = struct_type->name();
+ header_out() << " struct " << name << " {\n";
for (auto& field : struct_type->fields()) {
- header_out() << " " << field.type->GetGeneratedTypeName();
- header_out() << " " << field.name << ";\n";
+ header_out() << " " << field.name_and_type.type->GetGeneratedTypeName();
+ header_out() << " " << field.name_and_type.name << ";\n";
}
header_out() << "\n std::tuple<";
bool first = true;
@@ -221,10 +172,10 @@ void ImplementationVisitor::Visit(StructDeclaration* decl) {
header_out() << ", ";
}
first = false;
- if (field.type->IsStructType()) {
- header_out() << field.name << ".Flatten()";
+ if (field.name_and_type.type->IsStructType()) {
+ header_out() << field.name_and_type.name << ".Flatten()";
} else {
- header_out() << "std::make_tuple(" << field.name << ")";
+ header_out() << "std::make_tuple(" << field.name_and_type.name << ")";
}
}
header_out() << ");\n";
@@ -232,151 +183,275 @@ void ImplementationVisitor::Visit(StructDeclaration* decl) {
header_out() << " };\n";
}
-void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
- const Signature& sig, Statement* body) {
- Signature signature = MakeSignature(decl->signature.get());
- const Type* return_type = signature.return_type;
+VisitResult ImplementationVisitor::InlineMacro(
+ Macro* macro, base::Optional<LocationReference> this_reference,
+ const std::vector<VisitResult>& arguments,
+ const std::vector<Block*> label_blocks) {
+ CurrentScope::Scope current_scope(macro);
+ BindingsManagersScope bindings_managers_scope;
+ CurrentCallable::Scope current_callable(macro);
+ CurrentReturnValue::Scope current_return_value;
+ const Signature& signature = macro->signature();
+ const Type* return_type = macro->signature().return_type;
+ bool can_return = return_type != TypeOracle::GetNeverType();
+
+ CurrentConstructorInfo::Scope current_constructor;
+ if (macro->IsConstructor())
+ CurrentConstructorInfo::Get() = ConstructorInfo{0};
+
+ BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
+ BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
+ DCHECK_EQ(macro->signature().parameter_names.size(),
+ arguments.size() + (this_reference ? 1 : 0));
+ DCHECK_EQ(this_reference.has_value(), macro->IsMethod());
+
+ // Bind the this for methods. Methods that modify a struct-type "this" must
+ // only be called if the this is in a variable, in which case the
+ // LocalValue is non-const. Otherwise, the LocalValue used for the parameter
+ // binding is const, and thus read-only, which will cause errors if
+ // modified, e.g. when called by a struct method that sets the structs
+ // fields. This prevents using temporary struct values for anything other
+ // than read operations.
+ if (this_reference) {
+ DCHECK(macro->IsMethod());
+ LocalValue this_value = LocalValue{!this_reference->IsVariableAccess(),
+ this_reference->GetVisitResult()};
+ parameter_bindings.Add(kThisParameterName, this_value);
+ }
+
+ size_t i = 0;
+ for (auto arg : arguments) {
+ if (this_reference && i == signature.implicit_count) i++;
+ const std::string& name = macro->parameter_names()[i++];
+ parameter_bindings.Add(name, LocalValue{true, arg});
+ }
+
+ DCHECK_EQ(label_blocks.size(), signature.labels.size());
+ for (size_t i = 0; i < signature.labels.size(); ++i) {
+ const LabelDeclaration& label_info = signature.labels[i];
+ label_bindings.Add(label_info.name,
+ LocalLabel{label_blocks[i], label_info.types});
+ }
+
+ Block* macro_end;
+ base::Optional<Binding<LocalLabel>> macro_end_binding;
+ if (can_return) {
+ Stack<const Type*> stack = assembler().CurrentStack();
+ std::vector<const Type*> lowered_return_types = LowerType(return_type);
+ stack.PushMany(lowered_return_types);
+ if (!return_type->IsConstexpr()) {
+ SetReturnValue(VisitResult(return_type,
+ stack.TopRange(lowered_return_types.size())));
+ }
+ // The stack copy used to initialize the _macro_end block is only used
+ // as a template for the actual gotos generated by return statements. It
+ // doesn't correspond to any real return values, and thus shouldn't contain
+ // top types, because these would pollute actual return value types that get
+ // unioned with them for return statements, erroneously forcing them to top.
+ for (auto i = stack.begin(); i != stack.end(); ++i) {
+ if ((*i)->IsTopType()) {
+ *i = TopType::cast(*i)->source_type();
+ }
+ }
+ macro_end = assembler().NewBlock(std::move(stack));
+ macro_end_binding.emplace(&LabelBindingsManager::Get(), "_macro_end",
+ LocalLabel{macro_end, {return_type}});
+ } else {
+ SetReturnValue(VisitResult::NeverResult());
+ }
+
+ const Type* result = Visit(*macro->body());
+
+ if (result->IsNever()) {
+ if (!return_type->IsNever() && !macro->HasReturns()) {
+ std::stringstream s;
+ s << "macro " << macro->ReadableName()
+ << " that never returns must have return type never";
+ ReportError(s.str());
+ }
+ } else {
+ if (return_type->IsNever()) {
+ std::stringstream s;
+ s << "macro " << macro->ReadableName()
+ << " has implicit return at end of its declartion but return type "
+ "never";
+ ReportError(s.str());
+ } else if (!macro->signature().return_type->IsVoid()) {
+ std::stringstream s;
+ s << "macro " << macro->ReadableName()
+ << " expects to return a value but doesn't on all paths";
+ ReportError(s.str());
+ }
+ }
+ if (!result->IsNever()) {
+ assembler().Goto(macro_end);
+ }
+
+ if (macro->HasReturns() || !result->IsNever()) {
+ assembler().Bind(macro_end);
+ }
+
+ return GetAndClearReturnValue();
+}
+
+void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
+ // Do not generate code for inlined macros.
+ if (macro->ShouldBeInlined()) {
+ return;
+ }
+
+ CurrentCallable::Scope current_callable(macro);
+ const Signature& signature = macro->signature();
+ const Type* return_type = macro->signature().return_type;
bool can_return = return_type != TypeOracle::GetNeverType();
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
- std::string name = GetGeneratedCallableName(
- decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- const TypeVector& list = signature.types();
- Macro* macro = declarations()->LookupMacro(name, list);
- CurrentCallableActivator activator(global_context_, macro, decl);
+ // Struct methods should never generate code, they should always be inlined
+ DCHECK(!macro->IsMethod() ||
+ Method::cast(macro)->aggregate_type()->IsClassType());
- if (body != nullptr) {
- header_out() << " ";
- GenerateMacroFunctionDeclaration(header_out(), "", macro);
- header_out() << ";\n";
+ header_out() << " ";
+ GenerateMacroFunctionDeclaration(header_out(), "", macro);
+ header_out() << ";\n";
- GenerateMacroFunctionDeclaration(
- source_out(), GetDSLAssemblerName(CurrentModule()) + "::", macro);
- source_out() << " {\n";
+ GenerateMacroFunctionDeclaration(
+ source_out(), CurrentNamespace()->ExternalName() + "::", macro);
+ source_out() << " {\n";
- Stack<std::string> lowered_parameters;
- Stack<const Type*> lowered_parameter_types;
+ Stack<std::string> lowered_parameters;
+ Stack<const Type*> lowered_parameter_types;
- for (const std::string& name : macro->parameter_names()) {
- Parameter* parameter = Parameter::cast(declarations()->LookupValue(name));
- const Type* type = parameter->type();
- if (type->IsConstexpr()) {
- parameter->set_value(
- VisitResult(parameter->type(), parameter->external_name()));
- } else {
- LowerParameter(type, parameter->external_name(), &lowered_parameters);
- StackRange range = lowered_parameter_types.PushMany(LowerType(type));
- parameter->set_value(VisitResult(type, range));
- }
- }
+ std::vector<VisitResult> arguments;
- DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
- assembler_ = CfgAssembler(lowered_parameter_types);
+ base::Optional<LocationReference> this_reference;
+ if (Method* method = Method::DynamicCast(macro)) {
+ const Type* this_type = method->aggregate_type();
+ DCHECK(this_type->IsClassType());
+ lowered_parameter_types.Push(this_type);
+ lowered_parameters.Push(ExternalParameterName(kThisParameterName));
+ VisitResult this_result =
+ VisitResult(this_type, lowered_parameters.TopRange(1));
+ // Mark the this as a temporary to prevent assignment to it.
+ this_reference =
+ LocationReference::Temporary(this_result, "this parameter");
+ }
- for (const LabelDeclaration& label_info : sig.labels) {
- Label* label = declarations()->LookupLabel(label_info.name);
- Stack<const Type*> label_input_stack;
- for (Variable* v : label->GetParameters()) {
- label_input_stack.PushMany(LowerType(v->type()));
- }
- CreateBlockForLabel(label, label_input_stack);
- }
+ for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
+ if (this_reference && i == macro->signature().implicit_count) continue;
+ const std::string& name = macro->parameter_names()[i];
+ std::string external_name = ExternalParameterName(name);
+ const Type* type = macro->signature().types()[i];
- Label* macro_end = declarations()->DeclareLabel("macro_end");
- if (can_return) {
- Stack<const Type*> result_stack;
- CreateBlockForLabel(macro_end,
- Stack<const Type*>{LowerType(signature.return_type)});
+ if (type->IsConstexpr()) {
+ arguments.push_back(VisitResult(type, external_name));
+ } else {
+ LowerParameter(type, external_name, &lowered_parameters);
+ StackRange range = lowered_parameter_types.PushMany(LowerType(type));
+ arguments.push_back(VisitResult(type, range));
}
+ }
- const Type* result = Visit(body);
+ DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
+ assembler_ = CfgAssembler(lowered_parameter_types);
- if (result->IsNever()) {
- if (!macro->signature().return_type->IsNever() && !macro->HasReturns()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " that never returns must have return type never";
- ReportError(s.str());
- }
- } else {
- if (macro->signature().return_type->IsNever()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " has implicit return at end of its declartion but return type "
- "never";
- ReportError(s.str());
- } else if (!macro->signature().return_type->IsVoid()) {
- std::stringstream s;
- s << "macro " << decl->name
- << " expects to return a value but doesn't on all paths";
- ReportError(s.str());
- }
- }
- if (!result->IsNever()) {
- GenerateLabelGoto(macro_end);
+ std::vector<Block*> label_blocks;
+ for (const LabelDeclaration& label_info : signature.labels) {
+ Stack<const Type*> label_input_stack;
+ for (const Type* type : label_info.types) {
+ label_input_stack.PushMany(LowerType(type));
}
+ Block* block = assembler().NewBlock(std::move(label_input_stack));
+ label_blocks.push_back(block);
+ }
- for (const LabelDeclaration& label_info : sig.labels) {
- Label* label = declarations()->LookupLabel(label_info.name);
- GenerateLabelBind(label);
- std::vector<std::string> label_parameter_variables;
- for (size_t i = 0; i < label->GetParameterCount(); ++i) {
- label_parameter_variables.push_back(
- ExternalLabelParameterName(label, i));
- }
- assembler().Emit(GotoExternalInstruction{label->external_label_name(),
- label_parameter_variables});
- }
+ VisitResult return_value =
+ InlineMacro(macro, this_reference, arguments, label_blocks);
+ Block* end = assembler().NewBlock();
+ if (return_type != TypeOracle::GetNeverType()) {
+ assembler().Goto(end);
+ }
- if (macro->HasReturns() || !result->IsNever()) {
- GenerateLabelBind(macro_end);
+ for (size_t i = 0; i < label_blocks.size(); ++i) {
+ Block* label_block = label_blocks[i];
+ const LabelDeclaration& label_info = signature.labels[i];
+ assembler().Bind(label_block);
+ std::vector<std::string> label_parameter_variables;
+ for (size_t i = 0; i < label_info.types.size(); ++i) {
+ label_parameter_variables.push_back(
+ ExternalLabelParameterName(label_info.name, i));
}
+ assembler().Emit(GotoExternalInstruction{ExternalLabelName(label_info.name),
+ label_parameter_variables});
+ }
- CSAGenerator csa_generator{assembler().Result(), source_out()};
- base::Optional<Stack<std::string>> values =
- csa_generator.EmitGraph(lowered_parameters);
+ if (return_type != TypeOracle::GetNeverType()) {
+ assembler().Bind(end);
+ }
- assembler_ = base::nullopt;
+ CSAGenerator csa_generator{assembler().Result(), source_out()};
+ base::Optional<Stack<std::string>> values =
+ csa_generator.EmitGraph(lowered_parameters);
- if (has_return_value) {
- source_out() << " return ";
- CSAGenerator::EmitCSAValue(GetAndClearReturnValue(), *values,
- source_out());
- source_out() << ";\n";
- }
- source_out() << "}\n\n";
+ assembler_ = base::nullopt;
+
+ if (has_return_value) {
+ source_out() << " return ";
+ CSAGenerator::EmitCSAValue(return_value, *values, source_out());
+ source_out() << ";\n";
}
+ source_out() << "}\n\n";
+}
+
+void ImplementationVisitor::Visit(Macro* macro) {
+ if (macro->IsExternal()) return;
+ VisitMacroCommon(macro);
+}
+
+void ImplementationVisitor::Visit(Method* method) {
+ DCHECK(!method->IsExternal());
+ VisitMacroCommon(method);
}
namespace {
-std::string AddParameter(Value* parameter, size_t i,
+
+std::string AddParameter(size_t i, Builtin* builtin,
Stack<std::string>* parameters,
- Stack<const Type*>* parameter_types) {
- std::string name = "parameter" + std::to_string(i);
- parameters->Push(name);
- StackRange range = parameter_types->PushMany(LowerType(parameter->type()));
- parameter->set_value(VisitResult(parameter->type(), range));
- return name;
+ Stack<const Type*>* parameter_types,
+ BlockBindings<LocalValue>* parameter_bindings) {
+ const std::string& name = builtin->signature().parameter_names[i];
+ const Type* type = builtin->signature().types()[i];
+ std::string external_name = "parameter" + std::to_string(i);
+ parameters->Push(external_name);
+ StackRange range = parameter_types->PushMany(LowerType(type));
+ parameter_bindings->Add(name, LocalValue{true, VisitResult(type, range)});
+ return external_name;
}
+
} // namespace
-void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
- const Signature& signature, Statement* body) {
- std::string name = GetGeneratedCallableName(
- decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
- source_out() << "TF_BUILTIN(" << name << ", "
- << GetDSLAssemblerName(CurrentModule()) << ") {\n";
- Builtin* builtin = declarations()->LookupBuiltin(name);
- CurrentCallableActivator activator(global_context_, builtin, decl);
+void ImplementationVisitor::Visit(Builtin* builtin) {
+ if (builtin->IsExternal()) return;
+ CurrentScope::Scope current_scope(builtin);
+ const std::string& name = builtin->ExternalName();
+ const Signature& signature = builtin->signature();
+ source_out() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
+ << " compiler::CodeAssemblerState* state_ = state();"
+ << " compiler::CodeAssembler ca_(state());\n";
+
+ CurrentCallable::Scope current_callable(builtin);
+ CurrentReturnValue::Scope current_return_value;
Stack<const Type*> parameter_types;
Stack<std::string> parameters;
+ BindingsManagersScope bindings_managers_scope;
+
+ BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
+
// Context
- Value* val =
- declarations()->LookupValue(decl->signature->parameters.names[0]);
- std::string parameter0 = AddParameter(val, 0, &parameters, &parameter_types);
+ std::string parameter0 = AddParameter(0, builtin, &parameters,
+ &parameter_types, &parameter_bindings);
source_out() << " TNode<Context> " << parameter0
<< " = UncheckedCast<Context>(Parameter("
<< "Descriptor::kContext));\n";
@@ -384,19 +459,13 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
size_t first = 1;
if (builtin->IsVarArgsJavaScript()) {
- assert(decl->signature->parameters.has_varargs);
- ExternConstant* arguments =
- ExternConstant::cast(declarations()->LookupValue(
- decl->signature->parameters.arguments_variable));
- std::string arguments_name = arguments->value().constexpr_value();
+ DCHECK(signature.parameter_types.var_args);
source_out()
<< " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
source_out() << " CodeStubArguments arguments_impl(this, "
"ChangeInt32ToIntPtr(argc));\n";
- Value* receiver =
- declarations()->LookupValue(decl->signature->parameters.names[1]);
- std::string parameter1 =
- AddParameter(receiver, 1, &parameters, &parameter_types);
+ std::string parameter1 = AddParameter(
+ 1, builtin, &parameters, &parameter_types, &parameter_bindings);
source_out() << " TNode<Object> " << parameter1
<< " = arguments_impl.GetReceiver();\n";
@@ -404,25 +473,28 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
<< " = &arguments_impl;\n";
source_out() << "USE(arguments);\n";
source_out() << "USE(" << parameter1 << ");\n";
+ parameter_bindings.Add(
+ *signature.arguments_variable,
+ LocalValue{true,
+ VisitResult(TypeOracle::GetArgumentsType(), "arguments")});
first = 2;
}
- for (size_t i = 0; i < decl->signature->parameters.names.size(); ++i) {
+ for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
if (i < first) continue;
- const std::string& parameter_name = decl->signature->parameters.names[i];
- Value* parameter = declarations()->LookupValue(parameter_name);
- std::string var = AddParameter(parameter, i, &parameters, &parameter_types);
- source_out() << " " << parameter->type()->GetGeneratedTypeName() << " "
- << var << " = "
- << "UncheckedCast<"
- << parameter->type()->GetGeneratedTNodeTypeName()
+ const std::string& parameter_name = signature.parameter_names[i];
+ const Type* type = signature.types()[i];
+ std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
+ &parameter_bindings);
+ source_out() << " " << type->GetGeneratedTypeName() << " " << var << " = "
+ << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
<< ">(Parameter(Descriptor::k"
<< CamelifyString(parameter_name) << "));\n";
source_out() << " USE(" << var << ");\n";
}
assembler_ = CfgAssembler(parameter_types);
- const Type* body_result = Visit(body);
+ const Type* body_result = Visit(*builtin->body());
if (body_result != TypeOracle::GetNeverType()) {
ReportError("control reaches end of builtin, expected return of a value");
}
@@ -434,8 +506,30 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
+ BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
+ return Visit(stmt, &block_bindings);
+}
+
+const Type* ImplementationVisitor::Visit(
+ VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
+ if (!stmt->const_qualified && !stmt->type) {
+ ReportError(
+ "variable declaration is missing type. Only 'const' bindings can "
+ "infer the type.");
+ }
+ // const qualified variables are required to be initialized properly.
+ if (stmt->const_qualified && !stmt->initializer) {
+ ReportError("local constant \"", stmt->name, "\" is not initialized.");
+ }
+
base::Optional<const Type*> type;
- if (stmt->type) type = declarations()->GetType(*stmt->type);
+ if (stmt->type) {
+ type = Declarations::GetType(*stmt->type);
+ if ((*type)->IsConstexpr() && !stmt->const_qualified) {
+ ReportError(
+ "cannot declare variable with constexpr type. Use 'const' instead.");
+ }
+ }
base::Optional<VisitResult> init_result;
if (stmt->initializer) {
StackScope scope(this);
@@ -451,19 +545,17 @@ const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
}
TypeVector lowered_types = LowerType(*type);
for (const Type* type : lowered_types) {
- assembler().Emit(PushUninitializedInstruction{type});
+ assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
+ "unitialized variable '" + stmt->name + "' of type " +
+ type->ToString() + " originally defined at " +
+ PositionAsString(stmt->pos),
+ type)});
}
init_result =
VisitResult(*type, assembler().TopRange(lowered_types.size()));
}
- Variable* var;
- if (stmt->const_qualified) {
- var = declarations()->DeclareVariable(stmt->name, init_result->type(),
- stmt->const_qualified);
- } else {
- var = Variable::cast(declarations()->LookupValue(stmt->name));
- }
- var->set_value(*init_result);
+ block_bindings->Add(stmt->name,
+ LocalValue{stmt->const_qualified, *init_result});
return TypeOracle::GetVoidType();
}
@@ -472,29 +564,11 @@ const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
}
VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
- Label* true_label;
- Label* false_label;
+ Block* true_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* false_block = assembler().NewBlock(assembler().CurrentStack());
Block* done_block = assembler().NewBlock();
Block* true_conversion_block = assembler().NewBlock();
- {
- Declarations::NodeScopeActivator scope(declarations(), expr->condition);
-
- true_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(true_label, assembler().CurrentStack());
- false_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(false_label, assembler().CurrentStack());
- done_block = assembler().NewBlock();
-
- {
- StackScope condition_scope(this);
- VisitResult condition_result = Visit(expr->condition);
- if (!condition_result.type()->IsNever()) {
- condition_result = condition_scope.Yield(GenerateImplicitConvert(
- TypeOracle::GetBoolType(), condition_result));
- assembler().Branch(true_label->block(), false_label->block());
- }
- }
- }
+ GenerateExpressionBranch(expr->condition, true_block, false_block);
VisitResult left;
VisitResult right;
@@ -504,14 +578,14 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
// before evaluating the conditional expression because the common type of
// the result of both the true and false of the condition needs to be known
// to convert both branches to a common type.
- assembler().Bind(true_label->block());
+ assembler().Bind(true_block);
StackScope left_scope(this);
left = Visit(expr->if_true);
assembler().Goto(true_conversion_block);
const Type* common_type;
{
- assembler().Bind(false_label->block());
+ assembler().Bind(false_block);
StackScope right_scope(this);
right = Visit(expr->if_false);
common_type = GetCommonType(left.type(), right.type());
@@ -532,16 +606,16 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
VisitResult left_result;
{
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(false_label, assembler().CurrentStack());
+ Block* false_block = assembler().NewBlock(assembler().CurrentStack());
+ Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
+ kFalseLabelName, LocalLabel{false_block}};
left_result = Visit(expr->left);
if (left_result.type()->IsBool()) {
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- assembler().Branch(true_label->block(), false_label->block());
- assembler().Bind(false_label->block());
+ Block* true_block = LookupSimpleLabel(kTrueLabelName);
+ assembler().Branch(true_block, false_block);
+ assembler().Bind(false_block);
} else if (left_result.type()->IsNever()) {
- assembler().Bind(false_label->block());
+ assembler().Bind(false_block);
} else if (!left_result.type()->IsConstexprBool()) {
ReportError(
"expected type bool, constexpr bool, or never on left-hand side of "
@@ -563,9 +637,9 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
VisitResult right_result = Visit(expr->right);
if (right_result.type()->IsBool()) {
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- assembler().Branch(true_label->block(), false_label->block());
+ Block* true_block = LookupSimpleLabel(kTrueLabelName);
+ Block* false_block = LookupSimpleLabel(kFalseLabelName);
+ assembler().Branch(true_block, false_block);
return VisitResult::NeverResult();
} else if (!right_result.type()->IsNever()) {
ReportError(
@@ -577,16 +651,16 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
VisitResult left_result;
{
- Declarations::NodeScopeActivator scope(declarations(), expr->left);
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(true_label, assembler().CurrentStack());
+ Block* true_block = assembler().NewBlock(assembler().CurrentStack());
+ Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
+ kTrueLabelName, LocalLabel{true_block}};
left_result = Visit(expr->left);
if (left_result.type()->IsBool()) {
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- assembler().Branch(true_label->block(), false_label->block());
- assembler().Bind(true_label->block());
+ Block* false_block = LookupSimpleLabel(kFalseLabelName);
+ assembler().Branch(true_block, false_block);
+ assembler().Bind(true_block);
} else if (left_result.type()->IsNever()) {
- assembler().Bind(true_label->block());
+ assembler().Bind(true_block);
} else if (!left_result.type()->IsConstexprBool()) {
ReportError(
"expected type bool, constexpr bool, or never on left-hand side of "
@@ -608,9 +682,9 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
VisitResult right_result = Visit(expr->right);
if (right_result.type()->IsBool()) {
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
- assembler().Branch(true_label->block(), false_label->block());
+ Block* true_block = LookupSimpleLabel(kTrueLabelName);
+ Block* false_block = LookupSimpleLabel(kFalseLabelName);
+ assembler().Branch(true_block, false_block);
return VisitResult::NeverResult();
} else if (!right_result.type()->IsNever()) {
ReportError(
@@ -654,13 +728,12 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
// TODO(tebbi): Do not silently loose precision; support 64bit literals.
double d = std::stod(expr->number.c_str());
int32_t i = static_cast<int32_t>(d);
- const Type* result_type =
- declarations()->LookupType(CONST_FLOAT64_TYPE_STRING);
+ const Type* result_type = Declarations::LookupType(CONST_FLOAT64_TYPE_STRING);
if (i == d) {
if ((i >> 30) == (i >> 31)) {
- result_type = declarations()->LookupType(CONST_INT31_TYPE_STRING);
+ result_type = Declarations::LookupType(CONST_INT31_TYPE_STRING);
} else {
- result_type = declarations()->LookupType(CONST_INT32_TYPE_STRING);
+ result_type = Declarations::LookupType(CONST_INT32_TYPE_STRING);
}
}
return VisitResult{result_type, expr->number};
@@ -669,7 +742,7 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
VisitResult result = Visit(expr->expression);
const Type* result_type =
- SubtractType(result.type(), declarations()->GetType(expr->excluded_type));
+ SubtractType(result.type(), Declarations::GetType(expr->excluded_type));
if (result_type->IsNever()) {
ReportError("unreachable code");
}
@@ -691,44 +764,25 @@ VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
"creating function pointers is only allowed for internal builtins with "
"stub linkage");
}
- const Type* type = TypeOracle::GetFunctionPointerType(
+ const Type* type = TypeOracle::GetBuiltinPointerType(
builtin->signature().parameter_types.types,
builtin->signature().return_type);
- assembler().Emit(PushCodePointerInstruction{builtin->name(), type});
+ assembler().Emit(
+ PushBuiltinPointerInstruction{builtin->ExternalName(), type});
return VisitResult(type, assembler().TopRange(1));
}
VisitResult ImplementationVisitor::Visit(IdentifierExpression* expr) {
StackScope scope(this);
- std::string name = expr->name;
- if (expr->generic_arguments.size() != 0) {
- GenericList* generic_list = declarations()->LookupGeneric(expr->name);
- for (Generic* generic : generic_list->list()) {
- TypeVector specialization_types = GetTypeVector(expr->generic_arguments);
- name = GetGeneratedCallableName(name, specialization_types);
- CallableNode* callable = generic->declaration()->callable;
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
- }
- }
-
- if (Builtin* builtin = Builtin::DynamicCast(declarations()->Lookup(name))) {
- return scope.Yield(GetBuiltinCode(builtin));
- }
-
return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
- Label* label = declarations()->LookupLabel(stmt->label);
-
- if (stmt->arguments.size() != label->GetParameterCount()) {
- std::stringstream stream;
- stream << "goto to label has incorrect number of parameters (expected "
- << std::to_string(label->GetParameterCount()) << " found "
- << std::to_string(stmt->arguments.size()) << ")";
- ReportError(stream.str());
+ LocalLabel* label = LookupLabel(stmt->label);
+ size_t parameter_count = label->parameter_types.size();
+ if (stmt->arguments.size() != parameter_count) {
+ ReportError("goto to label has incorrect number of parameters (expected ",
+ parameter_count, " found ", stmt->arguments.size(), ")");
}
size_t i = 0;
@@ -736,13 +790,12 @@ const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
for (Expression* e : stmt->arguments) {
StackScope scope(this);
VisitResult result = Visit(e);
- Variable* var = label->GetParameter(i++);
- result = GenerateImplicitConvert(var->type(), result);
+ const Type* parameter_type = label->parameter_types[i++];
+ result = GenerateImplicitConvert(parameter_type, result);
arguments.Extend(scope.Yield(result).stack_range());
}
- GenerateLabelGoto(label, arguments);
- label->MarkUsed();
+ assembler().Goto(label->block, arguments.Size());
return TypeOracle::GetNeverType();
}
@@ -767,11 +820,7 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
expression_result.constexpr_value(), true_block, false_block});
assembler().Bind(true_block);
- const Type* left_result;
- {
- StackScope stack_scope(this);
- left_result = Visit(stmt->if_true);
- }
+ const Type* left_result = Visit(stmt->if_true);
if (left_result == TypeOracle::GetVoidType()) {
assembler().Goto(done_block);
}
@@ -779,7 +828,6 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
assembler().Bind(false_block);
const Type* right_result = TypeOracle::GetVoidType();
if (has_else) {
- StackScope stack_scope(this);
right_result = Visit(*stmt->if_false);
}
if (right_result == TypeOracle::GetVoidType()) {
@@ -799,30 +847,40 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
}
return left_result;
} else {
- Label* true_label = nullptr;
- Label* false_label = nullptr;
- {
- Declarations::NodeScopeActivator scope(declarations(), &*stmt->condition);
- true_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(true_label, assembler().CurrentStack());
- false_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(false_label, assembler().CurrentStack());
- }
+ Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
+ IsDeferred(stmt->if_true));
+ Block* false_block =
+ assembler().NewBlock(assembler().CurrentStack(),
+ stmt->if_false && IsDeferred(*stmt->if_false));
+ GenerateExpressionBranch(stmt->condition, true_block, false_block);
Block* done_block;
bool live = false;
if (has_else) {
done_block = assembler().NewBlock();
} else {
- done_block = false_label->block();
+ done_block = false_block;
live = true;
}
- std::vector<Statement*> blocks = {stmt->if_true};
- std::vector<Label*> labels = {true_label, false_label};
- if (has_else) blocks.push_back(*stmt->if_false);
- if (GenerateExpressionBranch(stmt->condition, labels, blocks, done_block)) {
- live = true;
+
+ assembler().Bind(true_block);
+ {
+ const Type* result = Visit(stmt->if_true);
+ if (result == TypeOracle::GetVoidType()) {
+ live = true;
+ assembler().Goto(done_block);
+ }
}
+
+ if (has_else) {
+ assembler().Bind(false_block);
+ const Type* result = Visit(*stmt->if_false);
+ if (result == TypeOracle::GetVoidType()) {
+ live = true;
+ assembler().Goto(done_block);
+ }
+ }
+
if (live) {
assembler().Bind(done_block);
}
@@ -831,42 +889,41 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
}
const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
- Label* body_label = nullptr;
- Label* exit_label = nullptr;
- {
- Declarations::NodeScopeActivator scope(declarations(), stmt->condition);
- body_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(body_label, assembler().CurrentStack());
- exit_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(exit_label, assembler().CurrentStack());
- }
+ Block* body_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* header_block = assembler().NewBlock();
assembler().Goto(header_block);
assembler().Bind(header_block);
+ GenerateExpressionBranch(stmt->condition, body_block, exit_block);
- Declarations::NodeScopeActivator scope(declarations(), stmt->body);
- BreakContinueActivator activator(global_context_, exit_label->block(),
- header_block);
-
- GenerateExpressionBranch(stmt->condition, {body_label, exit_label},
- {stmt->body}, header_block);
+ assembler().Bind(body_block);
+ {
+ BreakContinueActivator activator{exit_block, header_block};
+ const Type* body_result = Visit(stmt->body);
+ if (body_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(header_block);
+ }
+ }
- GenerateLabelBind(exit_label);
+ assembler().Bind(exit_block);
return TypeOracle::GetVoidType();
}
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
- Declarations::NodeScopeActivator scope(declarations(), block);
+ BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
const Type* type = TypeOracle::GetVoidType();
for (Statement* s : block->statements) {
+ CurrentSourcePosition::Scope source_position(s->pos);
if (type->IsNever()) {
- std::stringstream stream;
- stream << "statement after non-returning statement";
- ReportError(stream.str());
+ ReportError("statement after non-returning statement");
+ }
+ if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
+ type = Visit(var_declaration, &block_bindings);
+ } else {
+ type = Visit(s);
}
- type = Visit(s);
}
return type;
}
@@ -877,7 +934,9 @@ const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
stmt->reason + "' at " +
PositionAsString(stmt->pos)});
#endif
- assembler().Emit(DebugBreakInstruction{stmt->never_continues});
+ assembler().Emit(AbortInstruction{stmt->never_continues
+ ? AbortInstruction::Kind::kUnreachable
+ : AbortInstruction::Kind::kDebugBreak});
if (stmt->never_continues) {
return TypeOracle::GetNeverType();
} else {
@@ -918,33 +977,17 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
// isn't trivial up-front. Secondly, on failure, the assert text should be
// the corresponding Torque code, not the -gen.cc code, which would be the
// case when using CSA_ASSERT_XXX.
- Label* true_label = nullptr;
- Label* false_label = nullptr;
- Declarations::NodeScopeActivator scope(declarations(), stmt->expression);
- true_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(true_label, assembler().CurrentStack());
- false_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(false_label, assembler().CurrentStack());
-
- VisitResult expression_result = Visit(stmt->expression);
- if (expression_result.type() == TypeOracle::GetBoolType()) {
- GenerateBranch(expression_result, true_label, false_label);
- } else {
- if (expression_result.type() != TypeOracle::GetNeverType()) {
- std::stringstream s;
- s << "unexpected return type " << *expression_result.type()
- << " for branch expression";
- ReportError(s.str());
- }
- }
+ Block* true_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
+ GenerateExpressionBranch(stmt->expression, true_block, false_block);
- GenerateLabelBind(false_label);
- assembler().Emit(PrintConstantStringInstruction{
- "assert '" + FormatAssertSource(stmt->source) + "' failed at " +
- PositionAsString(stmt->pos)});
- assembler().Emit(DebugBreakInstruction{true});
+ assembler().Bind(false_block);
- GenerateLabelBind(true_label);
+ assembler().Emit(AbortInstruction{
+ AbortInstruction::Kind::kAssertionFailure,
+ "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
+
+ assembler().Bind(true_block);
}
return TypeOracle::GetVoidType();
}
@@ -955,15 +998,14 @@ const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
}
const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
- Callable* current_callable = global_context_.GetCurrentCallable();
+ Callable* current_callable = CurrentCallable::Get();
if (current_callable->signature().return_type->IsNever()) {
std::stringstream s;
s << "cannot return from a function with return type never";
ReportError(s.str());
}
- Label* end = current_callable->IsMacro()
- ? declarations()->LookupLabel("macro_end")
- : nullptr;
+ LocalLabel* end =
+ current_callable->IsMacro() ? LookupLabel("_macro_end") : nullptr;
if (current_callable->HasReturnValue()) {
if (!stmt->value) {
std::stringstream s;
@@ -1002,9 +1044,6 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
}
const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- StackScope stack_scope(this);
-
VisitResult expression_result = Visit(stmt->iterable);
VisitResult begin = stmt->begin
? Visit(*stmt->begin)
@@ -1027,8 +1066,7 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
assembler().Bind(header_block);
- BreakContinueActivator activator(global_context_, exit_block,
- increment_block);
+ BreakContinueActivator activator(exit_block, increment_block);
{
StackScope comparison_scope(this);
@@ -1046,22 +1084,20 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
assembler().Bind(body_block);
{
- StackScope body_scope(this);
-
VisitResult element_result;
{
StackScope element_scope(this);
VisitResult result = GenerateCall("[]", {{expression_result, index}, {}});
if (stmt->var_declaration->type) {
const Type* declared_type =
- declarations()->GetType(*stmt->var_declaration->type);
+ Declarations::GetType(*stmt->var_declaration->type);
result = GenerateImplicitConvert(declared_type, result);
}
element_result = element_scope.Yield(result);
}
- Variable* element_var = Variable::cast(
- declarations()->LookupValue(stmt->var_declaration->name));
- element_var->set_value(element_result);
+ Binding<LocalValue> element_var_binding{&ValueBindingsManager::Get(),
+ stmt->var_declaration->name,
+ LocalValue{true, element_result}};
Visit(stmt->body);
}
assembler().Goto(increment_block);
@@ -1082,63 +1118,95 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
return TypeOracle::GetVoidType();
}
+VisitResult ImplementationVisitor::TemporaryUninitializedStruct(
+ const StructType* struct_type, const std::string& reason) {
+ StackRange range = assembler().TopRange(0);
+ for (const Field& f : struct_type->fields()) {
+ if (const StructType* struct_type =
+ StructType::DynamicCast(f.name_and_type.type)) {
+ range.Extend(
+ TemporaryUninitializedStruct(struct_type, reason).stack_range());
+ } else {
+ std::string descriptor = "unitialized field '" + f.name_and_type.name +
+ "' declared at " + PositionAsString(f.pos) +
+ " (" + reason + ")";
+ TypeVector lowered_types = LowerType(f.name_and_type.type);
+ for (const Type* type : lowered_types) {
+ assembler().Emit(PushUninitializedInstruction{
+ TypeOracle::GetTopType(descriptor, type)});
+ }
+ range.Extend(assembler().TopRange(lowered_types.size()));
+ }
+ }
+ return VisitResult(struct_type, range);
+}
+
VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
+ size_t parameter_count = expr->label_block->parameters.names.size();
+ std::vector<VisitResult> parameters;
+
+ Block* label_block = nullptr;
Block* done_block = assembler().NewBlock();
VisitResult try_result;
- Label* label = nullptr;
- // Output labels for the goto handlers and for the merge after the try.
{
- // Activate a new scope to see handler labels
- Declarations::NodeScopeActivator scope(declarations(), expr);
- {
- LabelBlock* block = expr->label_block;
- CurrentSourcePosition::Scope source_position(block->pos);
- label = declarations()->LookupLabel(block->label);
-
- Declarations::NodeScopeActivator scope(declarations(), block->body);
- Stack<const Type*> label_input_stack = assembler().CurrentStack();
- for (Variable* v : label->GetParameters()) {
- StackRange range = label_input_stack.PushMany(LowerType(v->type()));
- v->set_value(VisitResult(v->type(), range));
- v->Define();
+ CurrentSourcePosition::Scope source_position(expr->label_block->pos);
+ if (expr->label_block->parameters.has_varargs) {
+ ReportError("cannot use ... for label parameters");
+ }
+ Stack<const Type*> label_input_stack = assembler().CurrentStack();
+ TypeVector parameter_types;
+ for (size_t i = 0; i < parameter_count; ++i) {
+ const Type* type =
+ Declarations::GetType(expr->label_block->parameters.types[i]);
+ parameter_types.push_back(type);
+ if (type->IsConstexpr()) {
+ ReportError("no constexpr type allowed for label arguments");
}
- CreateBlockForLabel(label, label_input_stack);
+ StackRange range = label_input_stack.PushMany(LowerType(type));
+ parameters.push_back(VisitResult(type, range));
}
+ label_block = assembler().NewBlock(label_input_stack,
+ IsDeferred(expr->label_block->body));
+
+ Binding<LocalLabel> label_binding{&LabelBindingsManager::Get(),
+ expr->label_block->label,
+ LocalLabel{label_block, parameter_types}};
// Visit try
- {
- StackScope stack_scope(this);
- try_result = Visit(expr->try_expression);
- if (try_result.type() != TypeOracle::GetNeverType()) {
- try_result = stack_scope.Yield(try_result);
- assembler().Goto(done_block);
- }
+ StackScope stack_scope(this);
+ try_result = Visit(expr->try_expression);
+ if (try_result.type() != TypeOracle::GetNeverType()) {
+ try_result = stack_scope.Yield(try_result);
+ assembler().Goto(done_block);
}
}
- if (label->IsUsed()) {
- // Visit and output the code for the label block. If the label block falls
- // through, then the try must not return a value. Also, if the try doesn't
- // fall through, but the label does, then overall the try-label block
- // returns type void.
- GenerateLabelBind(label);
- const Type* label_result;
- {
- StackScope stack_scope(this);
- label_result = Visit(expr->label_block->body);
- }
- if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
- ReportError(
- "otherwise clauses cannot fall through in a non-void expression");
- }
- if (label_result != TypeOracle::GetNeverType()) {
- assembler().Goto(done_block);
- }
- if (label_result->IsVoid() && try_result.type()->IsNever()) {
- try_result =
- VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
+ // Visit and output the code for the label block. If the label block falls
+ // through, then the try must not return a value. Also, if the try doesn't
+ // fall through, but the label does, then overall the try-label block
+ // returns type void.
+ assembler().Bind(label_block);
+ const Type* label_result;
+ {
+ BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
+ for (size_t i = 0; i < parameter_count; ++i) {
+ parameter_bindings.Add(expr->label_block->parameters.names[i],
+ LocalValue{true, parameters[i]});
}
+
+ label_result = Visit(expr->label_block->body);
+ }
+ if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
+ ReportError(
+ "otherwise clauses cannot fall through in a non-void expression");
+ }
+ if (label_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(done_block);
+ }
+ if (label_result->IsVoid() && try_result.type()->IsNever()) {
+ try_result =
+ VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
}
if (!try_result.type()->IsNever()) {
@@ -1151,34 +1219,106 @@ VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}
+VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
+ StackScope stack_scope(this);
+ const Type* type = Declarations::GetType(expr->type);
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (class_type == nullptr) {
+ ReportError("type for new expression must be a class, \"", *type,
+ "\" is not");
+ }
+
+ // In order to ensure "atomicity" of object allocation, a class' constructors
+ // operate on a per-class internal struct rather than the class directly until
+ // the constructor has successfully completed and all class members are
+ // available. Create the appropriate unitialized struct and pass it to the
+ // matching class constructor with the arguments that were passed to new{}
+ StructType* class_this_struct = class_type->struct_type();
+ VisitResult unitialized_struct = TemporaryUninitializedStruct(
+ class_this_struct,
+ "it's not set in the constructor for class " + class_type->name());
+ Arguments constructor_arguments;
+ for (auto p : expr->parameters) {
+ constructor_arguments.parameters.push_back(Visit(p));
+ }
+ LocationReference unitialized_struct_ref =
+ LocationReference::VariableAccess(unitialized_struct);
+ Callable* callable =
+ LookupConstructor(unitialized_struct_ref, constructor_arguments, {});
+ GenerateCall(callable, unitialized_struct_ref, constructor_arguments,
+ {class_type}, false);
+ VisitResult new_struct_result = unitialized_struct;
+
+ // Output the code to generate an unitialized object of the class size in the
+ // GC heap.
+ Arguments allocate_arguments;
+ allocate_arguments.parameters.push_back(VisitResult(
+ TypeOracle::GetConstInt31Type(), std::to_string(class_type->size())));
+ VisitResult allocate_result =
+ GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
+ DCHECK(allocate_result.IsOnStack());
+
+ // Fill in the fields of the newly allocated class by copying the values
+ // from the struct that was built by the constructor. So that the generaeted
+ // code is a bit more readable, assign the values from the first class
+ // member to the last, in order. To do this, first build a list of fields
+ // to assign to in reverse order by visiting the class heirarchy.
+ std::vector<std::pair<const Field*, VisitResult>> store_pairs;
+ const ClassType* current_class = class_type;
+ while (current_class != nullptr) {
+ auto& fields = current_class->fields();
+ for (auto i = fields.rbegin(); i != fields.rend(); ++i) {
+ store_pairs.push_back(std::make_pair(
+ &*i, ProjectStructField(new_struct_result, i->name_and_type.name)));
+ }
+ current_class = current_class->GetSuperClass();
+ if (current_class) {
+ new_struct_result = ProjectStructField(new_struct_result,
+ kConstructorStructSuperFieldName);
+ }
+ }
+
+ // Now that the reversed list of fields and the assignment VisitResults are
+ // available, emit the copies in reverse order of the reversed list to
+ // produce the class field assignments in the expected order.
+ for (auto i = store_pairs.rbegin(); i != store_pairs.rend(); ++i) {
+ assembler().Emit(
+ PeekInstruction(allocate_result.stack_range().begin(), class_type));
+ assembler().Emit(PeekInstruction(i->second.stack_range().begin(),
+ i->first->name_and_type.type));
+ assembler().Emit(
+ StoreObjectFieldInstruction(class_type, i->first->name_and_type.name));
+ }
+
+ return stack_scope.Yield(allocate_result);
+}
+
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
- Block* break_block = global_context_.GetCurrentBreak();
- if (break_block == nullptr) {
+ base::Optional<Binding<LocalLabel>*> break_label = TryLookupLabel("_break");
+ if (!break_label) {
ReportError("break used outside of loop");
}
- assembler().Goto(break_block);
+ assembler().Goto((*break_label)->block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
- Block* continue_block = global_context_.GetCurrentContinue();
- if (continue_block == nullptr) {
+ base::Optional<Binding<LocalLabel>*> continue_label =
+ TryLookupLabel("_continue");
+ if (!continue_label) {
ReportError("continue used outside of loop");
}
- assembler().Goto(continue_block);
+ assembler().Goto((*continue_label)->block);
return TypeOracle::GetNeverType();
}
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
- Declarations::NodeScopeActivator scope(declarations(), stmt);
- StackScope stack_scope(this);
+ BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
- if (stmt->var_declaration) Visit(*stmt->var_declaration);
+ if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);
- Label* body_label = declarations()->LookupLabel(kTrueLabelName);
- CreateBlockForLabel(body_label, assembler().CurrentStack());
- Label* exit_label = declarations()->LookupLabel(kFalseLabelName);
- CreateBlockForLabel(exit_label, assembler().CurrentStack());
+ Block* body_block = assembler().NewBlock(assembler().CurrentStack());
+ Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* header_block = assembler().NewBlock();
assembler().Goto(header_block);
@@ -1197,83 +1337,64 @@ const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
continue_block = action_block;
}
- BreakContinueActivator activator(global_context_, exit_label->block(),
- continue_block);
-
- std::vector<Label*> labels = {body_label, exit_label};
- bool generate_action = true;
if (stmt->test) {
- generate_action = GenerateExpressionBranch(*stmt->test, labels,
- {stmt->body}, continue_block);
+ GenerateExpressionBranch(*stmt->test, body_block, exit_block);
} else {
- GenerateLabelGoto(body_label);
- generate_action =
- GenerateLabeledStatementBlocks({stmt->body}, labels, continue_block);
+ assembler().Goto(body_block);
}
- if (generate_action && stmt->action) {
+ assembler().Bind(body_block);
+ {
+ BreakContinueActivator activator(exit_block, continue_block);
+ const Type* body_result = Visit(stmt->body);
+ if (body_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(continue_block);
+ }
+ }
+
+ if (stmt->action) {
assembler().Bind(action_block);
- Visit(*stmt->action);
- assembler().Goto(header_block);
+ const Type* action_result = Visit(*stmt->action);
+ if (action_result != TypeOracle::GetNeverType()) {
+ assembler().Goto(header_block);
+ }
}
- GenerateLabelBind(exit_label);
+ assembler().Bind(exit_block);
return TypeOracle::GetVoidType();
}
void ImplementationVisitor::GenerateImplementation(const std::string& dir,
- Module* module) {
- std::string new_source(module->source());
+ Namespace* nspace) {
+ std::string new_source(nspace->source());
std::string base_file_name =
- "builtins-" + DashifyString(module->name()) + "-from-dsl-gen";
+ "builtins-" + DashifyString(nspace->name()) + "-from-dsl-gen";
std::string source_file_name = dir + "/" + base_file_name + ".cc";
ReplaceFileContentsIfDifferent(source_file_name, new_source);
- std::string new_header(module->header());
+ std::string new_header(nspace->header());
std::string header_file_name = dir + "/" + base_file_name + ".h";
ReplaceFileContentsIfDifferent(header_file_name, new_header);
}
-std::string ImplementationVisitor::GetBaseAssemblerName(Module* module) {
- if (module == global_context_.GetDefaultModule()) {
- return "TorqueAssembler";
- } else {
- std::string assembler_name(CamelifyString(module->name()) +
- "BuiltinsAssembler");
- return assembler_name;
- }
-}
-
-std::string ImplementationVisitor::GetDSLAssemblerName(Module* module) {
- std::string assembler_name(CamelifyString(module->name()) +
- "BuiltinsFromDSLAssembler");
- return assembler_name;
-}
-
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, Macro* macro) {
- GenerateFunctionDeclaration(o, macro_prefix, macro->name(),
+ GenerateFunctionDeclaration(o, macro_prefix, macro->ExternalName(),
macro->signature(), macro->parameter_names());
}
void ImplementationVisitor::GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
const Signature& signature, const NameVector& parameter_names) {
- if (global_context_.verbose()) {
+ if (GlobalContext::verbose()) {
std::cout << "generating source for declaration " << name << "\n";
}
- // Quite a hack here. Make sure that TNode is namespace qualified if the
- // macro/constant name is also qualified.
- std::string return_type_name(signature.return_type->GetGeneratedTypeName());
- if (const StructType* struct_type =
- StructType::DynamicCast(signature.return_type)) {
- o << GetDSLAssemblerName(struct_type->module()) << "::";
- } else if (macro_prefix != "" && (return_type_name.length() > 5) &&
- (return_type_name.substr(0, 5) == "TNode")) {
- o << "compiler::";
+ if (signature.return_type->IsVoidOrNever()) {
+ o << "void";
+ } else {
+ o << signature.return_type->GetGeneratedTypeName();
}
- o << return_type_name;
o << " " << macro_prefix << name << "(";
DCHECK_EQ(signature.types().size(), parameter_names.size());
@@ -1283,29 +1404,27 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
if (!first) {
o << ", ";
}
- const Parameter* parameter =
- Parameter::cast(declarations()->LookupValue(name));
const Type* parameter_type = *type_iterator;
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
- o << generated_type_name << " " << parameter->external_name();
+ o << generated_type_name << " " << ExternalParameterName(name);
type_iterator++;
first = false;
}
for (const LabelDeclaration& label_info : signature.labels) {
- Label* label = declarations()->LookupLabel(label_info.name);
if (!first) {
o << ", ";
}
- o << "Label* " << label->external_label_name();
+ o << "compiler::CodeAssemblerLabel* " << ExternalLabelName(label_info.name);
size_t i = 0;
- for (Variable* var : label->GetParameters()) {
- std::string generated_type_name("TVariable<");
- generated_type_name += var->type()->GetGeneratedTNodeTypeName();
+ for (const Type* type : label_info.types) {
+ std::string generated_type_name("compiler::TypedCodeAssemblerVariable<");
+ generated_type_name += type->GetGeneratedTNodeTypeName();
generated_type_name += ">*";
o << ", ";
- o << generated_type_name << " " << ExternalLabelParameterName(label, i);
+ o << generated_type_name << " "
+ << ExternalLabelParameterName(label_info.name, i);
++i;
}
}
@@ -1315,107 +1434,150 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
namespace {
-void PrintMacroSignatures(std::stringstream& s, const std::string& name,
- const std::vector<Macro*>& macros) {
- for (Macro* m : macros) {
- s << "\n " << name;
- PrintSignature(s, m->signature(), false);
- }
-}
-
-void FailMacroLookup(const std::string& reason, const std::string& name,
- const Arguments& arguments,
- const std::vector<Macro*>& candidates) {
+void FailCallableLookup(const std::string& reason, const QualifiedName& name,
+ const TypeVector& parameter_types,
+ const std::vector<Binding<LocalLabel>*>& labels,
+ const std::vector<Signature>& candidates) {
std::stringstream stream;
- stream << "\n"
- << reason << ": \n " << name << "("
- << arguments.parameters.GetTypeVector() << ")";
- if (arguments.labels.size() != 0) {
+ stream << "\n" << reason << ": \n " << name << "(" << parameter_types << ")";
+ if (labels.size() != 0) {
stream << " labels ";
- for (auto l : arguments.labels) {
- PrintLabel(stream, *l, false);
+ for (size_t i = 0; i < labels.size(); ++i) {
+ stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
}
}
stream << "\ncandidates are:";
- PrintMacroSignatures(stream, name, candidates);
+ for (const Signature& signature : candidates) {
+ stream << "\n " << name;
+ PrintSignature(stream, signature, false);
+ }
ReportError(stream.str());
}
+Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
+ if (base::Optional<Callable*> specialization =
+ key.generic->GetSpecialization(key.specialized_types)) {
+ return *specialization;
+ }
+ return DeclarationVisitor().SpecializeImplicit(key);
+}
+
} // namespace
-Callable* ImplementationVisitor::LookupCall(
- const std::string& name, const Arguments& arguments,
+base::Optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
+ const std::string& name) {
+ return ValueBindingsManager::Get().TryLookup(name);
+}
+
+base::Optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
+ const std::string& name) {
+ return LabelBindingsManager::Get().TryLookup(name);
+}
+
+Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
+ const std::string& name) {
+ base::Optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
+ if (!label) ReportError("cannot find label ", name);
+ return *label;
+}
+
+Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
+ LocalLabel* label = LookupLabel(name);
+ if (!label->parameter_types.empty()) {
+ ReportError("label ", name,
+ "was expected to have no parameters, but has parameters (",
+ label->parameter_types, ")");
+ }
+ return label->block;
+}
+
+template <class Container>
+Callable* ImplementationVisitor::LookupCallable(
+ const QualifiedName& name, const Container& declaration_container,
+ const TypeVector& parameter_types,
+ const std::vector<Binding<LocalLabel>*>& labels,
const TypeVector& specialization_types) {
Callable* result = nullptr;
- TypeVector parameter_types(arguments.parameters.GetTypeVector());
- bool has_template_arguments = !specialization_types.empty();
- std::string mangled_name = name;
- if (has_template_arguments) {
- mangled_name = GetGeneratedCallableName(name, specialization_types);
- }
- Declarable* declarable = declarations()->Lookup(mangled_name);
- if (declarable->IsBuiltin()) {
- result = Builtin::cast(declarable);
- } else if (declarable->IsRuntimeFunction()) {
- result = RuntimeFunction::cast(declarable);
- } else if (declarable->IsMacroList()) {
- std::vector<Macro*> candidates;
- std::vector<Macro*> macros_with_same_name;
- for (Macro* m : MacroList::cast(declarable)->list()) {
- bool try_bool_context =
- arguments.labels.size() == 0 &&
- m->signature().return_type == TypeOracle::GetNeverType();
- Label* true_label = nullptr;
- Label* false_label = nullptr;
- if (try_bool_context) {
- true_label = declarations()->TryLookupLabel(kTrueLabelName);
- false_label = declarations()->TryLookupLabel(kFalseLabelName);
- }
- if (IsCompatibleSignature(m->signature(), parameter_types,
- arguments.labels) ||
- (true_label && false_label &&
- IsCompatibleSignature(m->signature(), parameter_types,
- {true_label, false_label}))) {
- candidates.push_back(m);
- } else {
- macros_with_same_name.push_back(m);
- }
- }
- if (candidates.empty() && macros_with_same_name.empty()) {
- std::stringstream stream;
- stream << "no matching declaration found for " << name;
- ReportError(stream.str());
- } else if (candidates.empty()) {
- FailMacroLookup("cannot find macro with name", name, arguments,
- macros_with_same_name);
- }
-
- auto is_better_candidate = [&](Macro* a, Macro* b) {
- return ParameterDifference(a->signature().parameter_types.types,
- parameter_types)
- .StrictlyBetterThan(ParameterDifference(
- b->signature().parameter_types.types, parameter_types));
- };
-
- Macro* best = *std::min_element(candidates.begin(), candidates.end(),
- is_better_candidate);
- for (Macro* candidate : candidates) {
- if (candidate != best && !is_better_candidate(best, candidate)) {
- FailMacroLookup("ambiguous macro", name, arguments, candidates);
+ std::vector<Declarable*> overloads;
+ std::vector<Signature> overload_signatures;
+ for (auto* declarable : declaration_container) {
+ if (Generic* generic = Generic::DynamicCast(declarable)) {
+ base::Optional<TypeVector> inferred_specialization_types =
+ generic->InferSpecializationTypes(specialization_types,
+ parameter_types);
+ if (!inferred_specialization_types) continue;
+ overloads.push_back(generic);
+ overload_signatures.push_back(
+ DeclarationVisitor().MakeSpecializedSignature(
+ SpecializationKey{generic, *inferred_specialization_types}));
+ } else if (Callable* callable = Callable::DynamicCast(declarable)) {
+ overloads.push_back(callable);
+ overload_signatures.push_back(callable->signature());
+ }
+ }
+ // Indices of candidates in overloads/overload_signatures.
+ std::vector<size_t> candidates;
+ for (size_t i = 0; i < overloads.size(); ++i) {
+ const Signature& signature = overload_signatures[i];
+ bool try_bool_context = labels.size() == 0 &&
+ signature.return_type == TypeOracle::GetNeverType();
+ base::Optional<Binding<LocalLabel>*> true_label;
+ base::Optional<Binding<LocalLabel>*> false_label;
+ if (try_bool_context) {
+ true_label = TryLookupLabel(kTrueLabelName);
+ false_label = TryLookupLabel(kFalseLabelName);
+ }
+ if (IsCompatibleSignature(signature, parameter_types, labels) ||
+ (true_label && false_label &&
+ IsCompatibleSignature(signature, parameter_types,
+ {*true_label, *false_label}))) {
+ candidates.push_back(i);
+ }
+ }
+
+ if (overloads.empty()) {
+ std::stringstream stream;
+ stream << "no matching declaration found for " << name;
+ ReportError(stream.str());
+ } else if (candidates.empty()) {
+ FailCallableLookup("cannot find suitable callable with name", name,
+ parameter_types, labels, overload_signatures);
+ }
+
+ auto is_better_candidate = [&](size_t a, size_t b) {
+ return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
+ parameter_types)
+ .StrictlyBetterThan(ParameterDifference(
+ overload_signatures[b].GetExplicitTypes(), parameter_types));
+ };
+
+ size_t best = *std::min_element(candidates.begin(), candidates.end(),
+ is_better_candidate);
+ // This check is contained in libstdc++'s std::min_element.
+ DCHECK(!is_better_candidate(best, best));
+ for (size_t candidate : candidates) {
+ if (candidate != best && !is_better_candidate(best, candidate)) {
+ std::vector<Signature> candidate_signatures;
+ for (size_t i : candidates) {
+ candidate_signatures.push_back(overload_signatures[i]);
}
+ FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
+ candidate_signatures);
}
- result = best;
+ }
+
+ if (Generic* generic = Generic::DynamicCast(overloads[best])) {
+ result = GetOrCreateSpecialization(
+ SpecializationKey{generic, *generic->InferSpecializationTypes(
+ specialization_types, parameter_types)});
} else {
- std::stringstream stream;
- stream << "can't call " << declarable->type_name() << " " << name
- << " because it's not callable"
- << ": call parameters were (" << parameter_types << ")";
- ReportError(stream.str());
+ result = Callable::cast(overloads[best]);
}
size_t caller_size = parameter_types.size();
- size_t callee_size = result->signature().types().size();
+ size_t callee_size =
+ result->signature().types().size() - result->signature().implicit_count;
if (caller_size != callee_size &&
!result->signature().parameter_types.var_args) {
std::stringstream stream;
@@ -1425,19 +1587,30 @@ Callable* ImplementationVisitor::LookupCall(
ReportError(stream.str());
}
- if (has_template_arguments) {
- Generic* generic = *result->generic();
- CallableNode* callable = generic->declaration()->callable;
- if (generic->declaration()->body) {
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
- }
- }
-
return result;
}
+template <class Container>
+Callable* ImplementationVisitor::LookupCallable(
+ const QualifiedName& name, const Container& declaration_container,
+ const Arguments& arguments, const TypeVector& specialization_types) {
+ return LookupCallable(name, declaration_container,
+ arguments.parameters.GetTypeVector(), arguments.labels,
+ specialization_types);
+}
+
+Method* ImplementationVisitor::LookupMethod(
+ const std::string& name, LocationReference this_reference,
+ const Arguments& arguments, const TypeVector& specialization_types) {
+ TypeVector types(arguments.parameters.GetTypeVector());
+ types.insert(types.begin(), this_reference.GetVisitResult().type());
+ return Method::cast(
+ LookupCallable({{}, name},
+ AggregateType::cast(this_reference.GetVisitResult().type())
+ ->Methods(name),
+ types, arguments.labels, specialization_types));
+}
+
const Type* ImplementationVisitor::GetCommonType(const Type* left,
const Type* right) {
const Type* common_type;
@@ -1461,29 +1634,28 @@ VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
}
VisitResult ImplementationVisitor::Visit(StructExpression* decl) {
- const Type* raw_type = declarations()->LookupType(decl->name);
+ StackScope stack_scope(this);
+ const Type* raw_type = Declarations::LookupType(
+ QualifiedName(decl->namespace_qualification, decl->name));
if (!raw_type->IsStructType()) {
std::stringstream s;
s << decl->name << " is not a struct but used like one ";
ReportError(s.str());
}
const StructType* struct_type = StructType::cast(raw_type);
- if (struct_type->fields().size() != decl->expressions.size()) {
- std::stringstream s;
- s << "initializer count mismatch for struct " << decl->name << " (expected "
- << struct_type->fields().size() << ", found " << decl->expressions.size()
- << ")";
- ReportError(s.str());
+ // Push unitialized 'this'
+ VisitResult uninitialized_struct = TemporaryUninitializedStruct(
+ struct_type,
+ "it's not set in the constructor for struct " + struct_type->name());
+ Arguments constructor_arguments;
+ for (auto p : decl->expressions) {
+ constructor_arguments.parameters.push_back(Visit(p));
}
- StackRange stack_range = assembler().TopRange(0);
- for (size_t i = 0; i < struct_type->fields().size(); ++i) {
- const NameAndType& field = struct_type->fields()[i];
- StackScope scope(this);
- VisitResult value = Visit(decl->expressions[i]);
- value = GenerateImplicitConvert(field.type, value);
- stack_range.Extend(scope.Yield(value).stack_range());
- }
- return VisitResult(struct_type, stack_range);
+ LocationReference this_ref =
+ LocationReference::VariableAccess(uninitialized_struct);
+ Callable* callable = LookupConstructor(this_ref, constructor_arguments, {});
+ GenerateCall(callable, this_ref, constructor_arguments, {}, false);
+ return stack_scope.Yield(uninitialized_struct);
}
LocationReference ImplementationVisitor::GetLocationReference(
@@ -1528,26 +1700,61 @@ LocationReference ImplementationVisitor::GetLocationReference(
LocationReference ImplementationVisitor::GetLocationReference(
IdentifierExpression* expr) {
- Value* value = declarations()->LookupValue(expr->name);
- if (auto* constant = ModuleConstant::DynamicCast(value)) {
+ if (expr->namespace_qualification.empty()) {
+ if (base::Optional<Binding<LocalValue>*> value =
+ TryLookupLocalValue(expr->name)) {
+ if (expr->generic_arguments.size() != 0) {
+ ReportError("cannot have generic parameters on local name ",
+ expr->name);
+ }
+ if ((*value)->is_const) {
+ return LocationReference::Temporary((*value)->value,
+ "constant value " + expr->name);
+ }
+ return LocationReference::VariableAccess((*value)->value);
+ }
+ }
+
+ if (expr->IsThis()) {
+ ReportError("\"this\" cannot be qualified");
+ }
+ QualifiedName name = QualifiedName(expr->namespace_qualification, expr->name);
+ if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
+ return LocationReference::Temporary(GetBuiltinCode(*builtin),
+ "builtin " + expr->name);
+ }
+ if (expr->generic_arguments.size() != 0) {
+ Generic* generic = Declarations::LookupUniqueGeneric(name);
+ Callable* specialization = GetOrCreateSpecialization(
+ SpecializationKey{generic, GetTypeVector(expr->generic_arguments)});
+ if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
+ DCHECK(!builtin->IsExternal());
+ return LocationReference::Temporary(GetBuiltinCode(builtin),
+ "builtin " + expr->name);
+ } else {
+ ReportError("cannot create function pointer for non-builtin ",
+ generic->name());
+ }
+ }
+ Value* value = Declarations::LookupValue(name);
+ if (auto* constant = NamespaceConstant::DynamicCast(value)) {
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
- VisitResult(constant->type(), constant->constant_name() + "()"),
- "module constant " + expr->name);
+ VisitResult(constant->type(), constant->ExternalAssemblerName() +
+ "(state_)." +
+ constant->constant_name() + "()"),
+ "namespace constant " + expr->name);
}
- assembler().Emit(ModuleConstantInstruction{constant});
+ assembler().Emit(NamespaceConstantInstruction{constant});
StackRange stack_range =
assembler().TopRange(LoweredSlotCount(constant->type()));
return LocationReference::Temporary(
VisitResult(constant->type(), stack_range),
- "module constant " + expr->name);
+ "namespace constant " + expr->name);
}
- if (value->IsConst()) {
- return LocationReference::Temporary(value->value(),
- "constant value " + expr->name);
- }
- DCHECK(value->IsVariable());
- return LocationReference::VariableAccess(value->value());
+ ExternConstant* constant = ExternConstant::cast(value);
+ return LocationReference::Temporary(constant->value(),
+ "extern value " + expr->name);
}
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
@@ -1577,7 +1784,8 @@ void ImplementationVisitor::GenerateAssignToLocation(
variable.type());
} else {
DCHECK(reference.IsTemporary());
- ReportError("cannot assign to ", reference.temporary_description());
+ ReportError("cannot assign to temporary ",
+ reference.temporary_description());
}
}
@@ -1586,14 +1794,14 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
StackScope scope(this);
TypeVector parameter_types(arguments.parameters.GetTypeVector());
VisitResult callee_result = Visit(callee);
- if (!callee_result.type()->IsFunctionPointerType()) {
+ if (!callee_result.type()->IsBuiltinPointerType()) {
std::stringstream stream;
stream << "Expected a function pointer type but found "
<< *callee_result.type();
ReportError(stream.str());
}
- const FunctionPointerType* type =
- FunctionPointerType::cast(callee_result.type());
+ const BuiltinPointerType* type =
+ BuiltinPointerType::cast(callee_result.type());
if (type->parameter_types().size() != parameter_types.size()) {
std::stringstream stream;
@@ -1624,16 +1832,8 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
.stack_range());
}
- Builtin* example_builtin =
- declarations()->FindSomeInternalBuiltinWithType(type);
- if (!example_builtin) {
- std::stringstream stream;
- stream << "unable to find any builtin with type \"" << *type << "\"";
- ReportError(stream.str());
- }
-
- assembler().Emit(CallBuiltinPointerInstruction{is_tailcall, example_builtin,
- arg_range.Size()});
+ assembler().Emit(
+ CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
if (is_tailcall) {
return VisitResult::NeverResult();
@@ -1642,19 +1842,32 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
}
-VisitResult ImplementationVisitor::GenerateCall(
- const std::string& callable_name, Arguments arguments,
- const TypeVector& specialization_types, bool is_tailcall) {
- Callable* callable =
- LookupCall(callable_name, arguments, specialization_types);
+void ImplementationVisitor::AddCallParameter(
+ Callable* callable, VisitResult parameter, const Type* parameter_type,
+ std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
+ std::vector<std::string>* constexpr_arguments) {
+ VisitResult converted = GenerateImplicitConvert(parameter_type, parameter);
+ converted_arguments->push_back(converted);
+ if (!callable->ShouldBeInlined()) {
+ if (converted.IsOnStack()) {
+ argument_range->Extend(converted.stack_range());
+ } else {
+ constexpr_arguments->push_back(converted.constexpr_value());
+ }
+ }
+}
+VisitResult ImplementationVisitor::GenerateCall(
+ Callable* callable, base::Optional<LocationReference> this_reference,
+ Arguments arguments, const TypeVector& specialization_types,
+ bool is_tailcall) {
// Operators used in a branching context can also be function calls that never
// return but have a True and False label
if (arguments.labels.size() == 0 &&
callable->signature().labels.size() == 2) {
- Label* true_label = declarations()->LookupLabel(kTrueLabelName);
+ Binding<LocalLabel>* true_label = LookupLabel(kTrueLabelName);
arguments.labels.push_back(true_label);
- Label* false_label = declarations()->LookupLabel(kFalseLabelName);
+ Binding<LocalLabel>* false_label = LookupLabel(kFalseLabelName);
arguments.labels.push_back(false_label);
}
@@ -1663,36 +1876,81 @@ VisitResult ImplementationVisitor::GenerateCall(
std::vector<VisitResult> converted_arguments;
StackRange argument_range = assembler().TopRange(0);
std::vector<std::string> constexpr_arguments;
- for (size_t current = 0; current < arguments.parameters.size(); ++current) {
- const Type* to_type = (current >= callable->signature().types().size())
- ? TypeOracle::GetObjectType()
- : callable->signature().types()[current];
- VisitResult converted =
- GenerateImplicitConvert(to_type, arguments.parameters[current]);
- converted_arguments.push_back(converted);
- if (converted.IsOnStack()) {
- argument_range.Extend(converted.stack_range());
+
+ size_t current = 0;
+ for (; current < callable->signature().implicit_count; ++current) {
+ std::string implicit_name = callable->signature().parameter_names[current];
+ base::Optional<Binding<LocalValue>*> val =
+ TryLookupLocalValue(implicit_name);
+ if (!val) {
+ ReportError("implicit parameter '", implicit_name,
+ "' required for call to '", callable->ReadableName(),
+ "' is not defined");
+ }
+ AddCallParameter(callable, (*val)->value,
+ callable->signature().parameter_types.types[current],
+ &converted_arguments, &argument_range,
+ &constexpr_arguments);
+ }
+
+ if (this_reference) {
+ DCHECK(callable->IsMethod());
+ Method* method = Method::cast(callable);
+ // By now, the this reference should either be a variable or
+ // a temporary, in both cases the fetch of the VisitResult should succeed.
+ VisitResult this_value = this_reference->GetVisitResult();
+ if (method->ShouldBeInlined()) {
+ if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
+ ReportError("this parameter must be a subtype of ",
+ *method->aggregate_type(), " but it is of type ",
+ this_value.type());
+ }
} else {
- constexpr_arguments.push_back(converted.constexpr_value());
+ AddCallParameter(callable, this_value, method->aggregate_type(),
+ &converted_arguments, &argument_range,
+ &constexpr_arguments);
}
+ ++current;
+ }
+
+ for (auto arg : arguments.parameters) {
+ const Type* to_type = (current >= callable->signature().types().size())
+ ? TypeOracle::GetObjectType()
+ : callable->signature().types()[current++];
+ AddCallParameter(callable, arg, to_type, &converted_arguments,
+ &argument_range, &constexpr_arguments);
}
- if (global_context_.verbose()) {
- std::cout << "generating code for call to " << callable_name << "\n";
+ if (GlobalContext::verbose()) {
+ std::cout << "generating code for call to " << callable->ReadableName()
+ << "\n";
}
size_t label_count = callable->signature().labels.size();
if (label_count != arguments.labels.size()) {
std::stringstream s;
- s << "unexpected number of otherwise labels for " << callable->name()
- << " (expected " << std::to_string(label_count) << " found "
+ s << "unexpected number of otherwise labels for "
+ << callable->ReadableName() << " (expected "
+ << std::to_string(label_count) << " found "
<< std::to_string(arguments.labels.size()) << ")";
ReportError(s.str());
}
+ if (callable->IsTransitioning()) {
+ if (!CurrentCallable::Get()->IsTransitioning()) {
+ std::stringstream s;
+ s << *CurrentCallable::Get()
+ << " isn't marked transitioning but calls the transitioning "
+ << *callable;
+ ReportError(s.str());
+ }
+ }
+
if (auto* builtin = Builtin::DynamicCast(callable)) {
- assembler().Emit(
- CallBuiltinInstruction{is_tailcall, builtin, argument_range.Size()});
+ base::Optional<Block*> catch_block = GetCatchBlock();
+ assembler().Emit(CallBuiltinInstruction{
+ is_tailcall, builtin, argument_range.Size(), catch_block});
+ GenerateCatchBlock(catch_block);
if (is_tailcall) {
return VisitResult::NeverResult();
} else {
@@ -1709,7 +1967,8 @@ VisitResult ImplementationVisitor::GenerateCall(
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
- result << "(" << macro->name() << "(";
+ result << "(" << macro->external_assembler_name() << "(state_)."
+ << macro->ExternalName() << "(";
bool first = true;
for (VisitResult arg : arguments.parameters) {
DCHECK(!arg.IsOnStack());
@@ -1721,9 +1980,19 @@ VisitResult ImplementationVisitor::GenerateCall(
}
result << "))";
return VisitResult(return_type, result.str());
+ } else if (macro->ShouldBeInlined()) {
+ std::vector<Block*> label_blocks;
+ for (Binding<LocalLabel>* label : arguments.labels) {
+ label_blocks.push_back(label->block);
+ }
+ return InlineMacro(macro, this_reference, converted_arguments,
+ label_blocks);
} else if (arguments.labels.empty() &&
return_type != TypeOracle::GetNeverType()) {
- assembler().Emit(CallCsaMacroInstruction{macro, constexpr_arguments});
+ base::Optional<Block*> catch_block = GetCatchBlock();
+ assembler().Emit(
+ CallCsaMacroInstruction{macro, constexpr_arguments, catch_block});
+ GenerateCatchBlock(catch_block);
size_t return_slot_count = LoweredSlotCount(return_type);
return VisitResult(return_type, assembler().TopRange(return_slot_count));
} else {
@@ -1737,40 +2006,38 @@ VisitResult ImplementationVisitor::GenerateCall(
for (size_t i = 0; i < label_count; ++i) {
label_blocks.push_back(assembler().NewBlock());
}
-
+ base::Optional<Block*> catch_block = GetCatchBlock();
assembler().Emit(CallCsaMacroAndBranchInstruction{
- macro, constexpr_arguments, return_continuation, label_blocks});
+ macro, constexpr_arguments, return_continuation, label_blocks,
+ catch_block});
+ GenerateCatchBlock(catch_block);
for (size_t i = 0; i < label_count; ++i) {
- Label* label = arguments.labels[i];
+ Binding<LocalLabel>* label = arguments.labels[i];
size_t callee_label_parameters =
callable->signature().labels[i].types.size();
- if (label->GetParameterCount() != callee_label_parameters) {
+ if (label->parameter_types.size() != callee_label_parameters) {
std::stringstream s;
s << "label " << label->name()
<< " doesn't have the right number of parameters (found "
- << std::to_string(label->GetParameterCount()) << " expected "
+ << std::to_string(label->parameter_types.size()) << " expected "
<< std::to_string(callee_label_parameters) << ")";
ReportError(s.str());
}
assembler().Bind(label_blocks[i]);
assembler().Goto(
- label->block(),
+ label->block,
LowerParameterTypes(callable->signature().labels[i].types).size());
size_t j = 0;
for (auto t : callable->signature().labels[i].types) {
- Variable* variable = label->GetParameter(j);
- if (!(variable->type() == t)) {
- std::stringstream s;
- s << "mismatch of label parameters (expected " << *t << " got "
- << *label->GetParameter(j)->type() << " for parameter "
- << std::to_string(i + 1) << ")";
- ReportError(s.str());
+ const Type* parameter_type = label->parameter_types[j];
+ if (parameter_type != t) {
+ ReportError("mismatch of label parameters (expected ", *t, " got ",
+ parameter_type, " for parameter ", i + 1, ")");
}
j++;
}
- label->MarkUsed();
}
if (return_continuation) {
@@ -1783,9 +2050,11 @@ VisitResult ImplementationVisitor::GenerateCall(
}
}
} else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
- assembler().Emit(CallRuntimeInstruction{is_tailcall, runtime_function,
- argument_range.Size()});
- if (is_tailcall) {
+ base::Optional<Block*> catch_block = GetCatchBlock();
+ assembler().Emit(CallRuntimeInstruction{
+ is_tailcall, runtime_function, argument_range.Size(), catch_block});
+ GenerateCatchBlock(catch_block);
+ if (is_tailcall || return_type == TypeOracle::GetNeverType()) {
return VisitResult::NeverResult();
} else {
size_t slot_count = LoweredSlotCount(return_type);
@@ -1794,113 +2063,204 @@ VisitResult ImplementationVisitor::GenerateCall(
// we should assert slot_count == 1 here.
return VisitResult(return_type, assembler().TopRange(slot_count));
}
+ } else if (auto* intrinsic = Intrinsic::DynamicCast(callable)) {
+ if (intrinsic->ExternalName() == "%RawConstexprCast") {
+ if (intrinsic->signature().parameter_types.types.size() != 1 ||
+ constexpr_arguments.size() != 1) {
+ ReportError(
+ "%RawConstexprCast must take a single parameter with constexpr "
+ "type");
+ }
+ if (!return_type->IsConstexpr()) {
+ std::stringstream s;
+ s << *return_type
+ << " return type for %RawConstexprCast is not constexpr";
+ ReportError(s.str());
+ }
+ std::stringstream result;
+ result << "static_cast<" << return_type->GetGeneratedTypeName() << ">(";
+ result << constexpr_arguments[0];
+ result << ")";
+ return VisitResult(return_type, result.str());
+ } else {
+ assembler().Emit(
+ CallIntrinsicInstruction{intrinsic, constexpr_arguments});
+ size_t return_slot_count =
+ LoweredSlotCount(intrinsic->signature().return_type);
+ return VisitResult(return_type, assembler().TopRange(return_slot_count));
+ }
} else {
UNREACHABLE();
}
}
-void ImplementationVisitor::Visit(StandardDeclaration* decl) {
- Signature signature = MakeSignature(decl->callable->signature.get());
- Visit(decl->callable, signature, decl->body);
-}
-
-void ImplementationVisitor::Visit(SpecializationDeclaration* decl) {
- Signature signature_with_types = MakeSignature(decl->signature.get());
- Declarations::NodeScopeActivator specialization_activator(declarations(),
- decl);
- GenericList* generic_list = declarations()->LookupGeneric(decl->name);
- for (Generic* generic : generic_list->list()) {
- CallableNode* callable = generic->declaration()->callable;
- Signature generic_signature_with_types =
- MakeSignature(callable->signature.get());
- if (signature_with_types.HasSameTypesAs(generic_signature_with_types)) {
- TypeVector specialization_types = GetTypeVector(decl->generic_parameters);
- SpecializeGeneric({{generic, specialization_types},
- callable,
- decl->signature.get(),
- decl->body,
- decl->pos});
- return;
- }
- }
- // Because the DeclarationVisitor already performed the same lookup
- // as above to find aspecialization match and already threw if it didn't
- // find one, failure to find a match here should never happen.
- // TODO(danno): Remember the specialization found in the declaration visitor
- // so that the lookup doesn't have to be repeated here.
- UNREACHABLE();
+VisitResult ImplementationVisitor::GenerateCall(
+ const QualifiedName& callable_name, Arguments arguments,
+ const TypeVector& specialization_types, bool is_tailcall) {
+ Callable* callable =
+ LookupCallable(callable_name, Declarations::Lookup(callable_name),
+ arguments, specialization_types);
+ return GenerateCall(callable, base::nullopt, arguments, specialization_types,
+ is_tailcall);
}
VisitResult ImplementationVisitor::Visit(CallExpression* expr,
bool is_tailcall) {
StackScope scope(this);
Arguments arguments;
- std::string name = expr->callee.name;
+ QualifiedName name =
+ QualifiedName(expr->callee->namespace_qualification, expr->callee->name);
TypeVector specialization_types =
- GetTypeVector(expr->callee.generic_arguments);
+ GetTypeVector(expr->callee->generic_arguments);
bool has_template_arguments = !specialization_types.empty();
for (Expression* arg : expr->arguments)
arguments.parameters.push_back(Visit(arg));
arguments.labels = LabelsFromIdentifiers(expr->labels);
- VisitResult result;
- if (!has_template_arguments &&
- declarations()->Lookup(expr->callee.name)->IsValue()) {
+ if (!has_template_arguments && name.namespace_qualification.empty() &&
+ TryLookupLocalValue(name.name)) {
return scope.Yield(
- GeneratePointerCall(&expr->callee, arguments, is_tailcall));
+ GeneratePointerCall(expr->callee, arguments, is_tailcall));
} else {
return scope.Yield(
GenerateCall(name, arguments, specialization_types, is_tailcall));
}
}
-bool ImplementationVisitor::GenerateLabeledStatementBlocks(
- const std::vector<Statement*>& blocks,
- const std::vector<Label*>& statement_labels, Block* merge_block) {
- bool live = false;
- auto label_iterator = statement_labels.begin();
- for (Statement* block : blocks) {
- GenerateLabelBind(*label_iterator++);
- const Type* stmt_result;
- {
- StackScope stack_scope(this);
- stmt_result = Visit(block);
- }
- if (stmt_result != TypeOracle::GetNeverType()) {
- assembler().Goto(merge_block);
- live = true;
+VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
+ StackScope scope(this);
+ Arguments arguments;
+ std::string method_name = expr->method->name;
+ TypeVector specialization_types =
+ GetTypeVector(expr->method->generic_arguments);
+ LocationReference target = GetLocationReference(expr->target);
+ if (!target.IsVariableAccess()) {
+ VisitResult result = GenerateFetchFromLocation(target);
+ target = LocationReference::Temporary(result, "method target result");
+ }
+ const AggregateType* target_type =
+ AggregateType::DynamicCast(target.GetVisitResult().type());
+ if (!target_type) {
+ ReportError("target of method call not a struct or class type");
+ }
+ if (method_name == kConstructMethodName || method_name == kSuperMethodName) {
+ if (CurrentConstructorInfo::Get()) {
+ ConstructorInfo& info = *CurrentConstructorInfo::Get();
+ if (method_name == kSuperMethodName) {
+ if (info.super_calls != 0) {
+ ReportError("\"super\" can only be called once from a constructor");
+ }
+ ++info.super_calls;
+ DCHECK(target_type->IsStructType());
+ base::Optional<const ClassType*> derived_from =
+ StructType::cast(target_type)->GetDerivedFrom();
+ if (!derived_from) {
+ ReportError("\"super\" can only be called from class constructors");
+ }
+ if ((*derived_from)->GetSuperClass() == nullptr) {
+ ReportError(
+ "\"super\" can only be called in constructors for derived "
+ "classes");
+ }
+ } else {
+ ReportError("cannot call a constructor from a constructor");
+ }
+ } else {
+ ReportError(
+ "cannot call a constructor or \"super\" from a non-constructor");
}
}
- return live;
+ for (Expression* arg : expr->arguments) {
+ arguments.parameters.push_back(Visit(arg));
+ }
+ arguments.labels = LabelsFromIdentifiers(expr->labels);
+ TypeVector argument_types = arguments.parameters.GetTypeVector();
+ DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
+ QualifiedName qualified_name = QualifiedName(method_name);
+ Callable* callable = nullptr;
+ if (method_name == kConstructMethodName) {
+ callable = LookupConstructor(target, arguments, {});
+ } else if (method_name == kSuperMethodName) {
+ LocationReference super_this =
+ LocationReference::VariableAccess(ProjectStructField(
+ target.GetVisitResult(), kConstructorStructSuperFieldName));
+ callable = LookupConstructor(super_this, arguments, {});
+ VisitResult super_result =
+ GenerateCall(callable, super_this, arguments, {}, false);
+ return scope.Yield(super_result);
+ } else {
+ callable = LookupMethod(method_name, target, arguments, {});
+ }
+ return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}
-void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
- Label* true_label,
- Label* false_label) {
- DCHECK_EQ(condition,
- VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
- assembler().Branch(true_label->block(), false_label->block());
+VisitResult ImplementationVisitor::Visit(LoadObjectFieldExpression* expr) {
+ VisitResult base_result = Visit(expr->base);
+ auto class_type = ClassType::DynamicCast(base_result.type());
+ if (!class_type) {
+ ReportError(
+ "base expression for a LoadObjectFieldExpression is not a class type "
+ "but instead ",
+ *base_result.type());
+ }
+ VisitResult result = base_result;
+ assembler().Emit(LoadObjectFieldInstruction{class_type, expr->field_name});
+ const Field& field = class_type->LookupField(expr->field_name);
+ result.SetType(field.name_and_type.type);
+ return result;
+}
+
+VisitResult ImplementationVisitor::Visit(StoreObjectFieldExpression* expr) {
+ VisitResult base_result = Visit(expr->base);
+ auto class_type = ClassType::DynamicCast(base_result.type());
+ if (!class_type) {
+ ReportError(
+ "base expression for a StoreObjectFieldExpression is not a class type "
+ "but instead ",
+ *base_result.type());
+ }
+ VisitResult value = Visit(expr->value);
+ assembler().Emit(StoreObjectFieldInstruction{class_type, expr->field_name});
+ return VisitResult(value.type(), assembler().TopRange(0));
}
-bool ImplementationVisitor::GenerateExpressionBranch(
- Expression* expression, const std::vector<Label*>& statement_labels,
- const std::vector<Statement*>& statement_blocks, Block* merge_block) {
- // Activate a new scope to define True/False catch labels
- Declarations::NodeScopeActivator scope(declarations(), expression);
+VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
+ StackScope scope(this);
+ Arguments arguments;
+ TypeVector specialization_types = GetTypeVector(expr->generic_arguments);
+ for (Expression* arg : expr->arguments)
+ arguments.parameters.push_back(Visit(arg));
+ return scope.Yield(
+ GenerateCall(expr->name, arguments, specialization_types, false));
+}
+void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
+ Block* true_block,
+ Block* false_block) {
+ DCHECK_EQ(condition,
+ VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
+ assembler().Branch(true_block, false_block);
+}
+
+void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
+ Block* true_block,
+ Block* false_block) {
+ // Conditional expressions can either explicitly return a bit
+ // type, or they can be backed by macros that don't return but
+ // take a true and false label. By declaring the labels before
+ // visiting the conditional expression, those label-based
+ // macro conditionals will be able to find them through normal
+ // label lookups.
+ Binding<LocalLabel> true_binding{&LabelBindingsManager::Get(), kTrueLabelName,
+ LocalLabel{true_block}};
+ Binding<LocalLabel> false_binding{&LabelBindingsManager::Get(),
+ kFalseLabelName, LocalLabel{false_block}};
+ StackScope stack_scope(this);
VisitResult expression_result = Visit(expression);
- if (expression_result.type() == TypeOracle::GetBoolType()) {
- GenerateBranch(expression_result, statement_labels[0], statement_labels[1]);
- } else {
- if (expression_result.type() != TypeOracle::GetNeverType()) {
- std::stringstream s;
- s << "unexpected return type " << *expression_result.type()
- << " for branch expression";
- ReportError(s.str());
- }
+ if (!expression_result.type()->IsNever()) {
+ expression_result = stack_scope.Yield(
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
+ GenerateBranch(expression_result, true_block, false_block);
}
-
- return GenerateLabeledStatementBlocks(statement_blocks, statement_labels,
- merge_block);
}
VisitResult ImplementationVisitor::GenerateImplicitConvert(
@@ -1916,9 +2276,8 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
if (TypeOracle::IsImplicitlyConvertableFrom(destination_type,
source.type())) {
- std::string name =
- GetGeneratedCallableName(kFromConstexprMacroName, {destination_type});
- return scope.Yield(GenerateCall(name, {{source}, {}}, {}, false));
+ return scope.Yield(GenerateCall(kFromConstexprMacroName, {{source}, {}},
+ {destination_type, source.type()}, false));
} else if (IsAssignableFrom(destination_type, source.type())) {
source.SetType(destination_type);
return scope.Yield(GenerateCopy(source));
@@ -1930,26 +2289,17 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
}
}
-void ImplementationVisitor::CreateBlockForLabel(Label* label,
- Stack<const Type*> stack) {
- label->set_block(assembler().NewBlock(std::move(stack), label->IsDeferred()));
-}
-
-void ImplementationVisitor::GenerateLabelBind(Label* label) {
- assembler().Bind(label->block());
-}
-
StackRange ImplementationVisitor::GenerateLabelGoto(
- Label* label, base::Optional<StackRange> arguments) {
- return assembler().Goto(label->block(), arguments ? arguments->Size() : 0);
+ LocalLabel* label, base::Optional<StackRange> arguments) {
+ return assembler().Goto(label->block, arguments ? arguments->Size() : 0);
}
-std::vector<Label*> ImplementationVisitor::LabelsFromIdentifiers(
+std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
const std::vector<std::string>& names) {
- std::vector<Label*> result;
+ std::vector<Binding<LocalLabel>*> result;
result.reserve(names.size());
for (const auto& name : names) {
- result.push_back(declarations()->LookupLabel(name));
+ result.push_back(LookupLabel(name));
}
return result;
}
@@ -1957,12 +2307,12 @@ std::vector<Label*> ImplementationVisitor::LabelsFromIdentifiers(
StackRange ImplementationVisitor::LowerParameter(
const Type* type, const std::string& parameter_name,
Stack<std::string>* lowered_parameters) {
- if (type->IsStructType()) {
- const StructType* struct_type = StructType::cast(type);
+ if (const StructType* struct_type = StructType::DynamicCast(type)) {
StackRange range = lowered_parameters->TopRange(0);
for (auto& field : struct_type->fields()) {
StackRange parameter_range = LowerParameter(
- field.type, parameter_name + "." + field.name, lowered_parameters);
+ field.name_and_type.type,
+ parameter_name + "." + field.name_and_type.name, lowered_parameters);
range.Extend(parameter_range);
}
return range;
@@ -1972,9 +2322,215 @@ StackRange ImplementationVisitor::LowerParameter(
}
}
-std::string ImplementationVisitor::ExternalLabelParameterName(Label* label,
- size_t i) {
- return label->external_label_name() + "_parameter_" + std::to_string(i);
+std::string ImplementationVisitor::ExternalLabelName(
+ const std::string& label_name) {
+ return "label_" + label_name;
+}
+
+std::string ImplementationVisitor::ExternalLabelParameterName(
+ const std::string& label_name, size_t i) {
+ return "label_" + label_name + "_parameter_" + std::to_string(i);
+}
+
+std::string ImplementationVisitor::ExternalParameterName(
+ const std::string& name) {
+ return std::string("p_") + name;
+}
+
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager);
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager);
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable);
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue);
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentConstructorInfo);
+
+bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
+ const std::vector<Binding<LocalLabel>*>& labels) {
+ auto i = sig.parameter_types.types.begin() + sig.implicit_count;
+ if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
+ return false;
+ // TODO(danno): The test below is actually insufficient. The labels'
+ // parameters must be checked too. ideally, the named part of
+ // LabelDeclarationVector would be factored out so that the label count and
+ // parameter types could be passed separately.
+ if (sig.labels.size() != labels.size()) return false;
+ for (auto current : types) {
+ if (i == sig.parameter_types.types.end()) {
+ if (!sig.parameter_types.var_args) return false;
+ if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
+ } else {
+ if (!IsAssignableFrom(*i++, current)) return false;
+ }
+ }
+ return true;
+}
+
+base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
+ base::Optional<Block*> catch_block;
+ if (base::Optional<Binding<LocalLabel>*> catch_handler =
+ TryLookupLabel("_catch")) {
+ catch_block = assembler().NewBlock(base::nullopt, true);
+ }
+ return catch_block;
+}
+
+void ImplementationVisitor::GenerateCatchBlock(
+ base::Optional<Block*> catch_block) {
+ if (catch_block) {
+ base::Optional<Binding<LocalLabel>*> catch_handler =
+ TryLookupLabel("_catch");
+ if (assembler().CurrentBlockIsComplete()) {
+ assembler().Bind(*catch_block);
+ assembler().Goto((*catch_handler)->block, 1);
+ } else {
+ CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
+ assembler().Goto((*catch_handler)->block, 1);
+ }
+ }
+}
+
+void ImplementationVisitor::VisitAllDeclarables() {
+ const std::vector<std::unique_ptr<Declarable>>& all_declarables =
+ GlobalContext::AllDeclarables();
+ // This has to be an index-based loop because all_declarables can be extended
+ // during the loop.
+ for (size_t i = 0; i < all_declarables.size(); ++i) {
+ Visit(all_declarables[i].get());
+ }
+}
+
+void ImplementationVisitor::Visit(Declarable* declarable) {
+ CurrentConstructorInfo::Scope current_constructor(base::nullopt);
+ CurrentScope::Scope current_scope(declarable->ParentScope());
+ CurrentSourcePosition::Scope current_source_position(declarable->pos());
+ switch (declarable->kind()) {
+ case Declarable::kMacro:
+ return Visit(Macro::cast(declarable));
+ case Declarable::kMethod:
+ return Visit(Method::cast(declarable));
+ case Declarable::kBuiltin:
+ return Visit(Builtin::cast(declarable));
+ case Declarable::kTypeAlias:
+ return Visit(TypeAlias::cast(declarable));
+ case Declarable::kNamespaceConstant:
+ return Visit(NamespaceConstant::cast(declarable));
+ case Declarable::kRuntimeFunction:
+ case Declarable::kIntrinsic:
+ case Declarable::kExternConstant:
+ case Declarable::kNamespace:
+ case Declarable::kGeneric:
+ return;
+ }
+}
+
+void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
+ std::stringstream new_contents_stream;
+ new_contents_stream
+ << "#ifndef V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "#define V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "\n"
+ "#define BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) "
+ "\\\n";
+ for (auto& declarable : GlobalContext::AllDeclarables()) {
+ Builtin* builtin = Builtin::DynamicCast(declarable.get());
+ if (!builtin || builtin->IsExternal()) continue;
+ int firstParameterIndex = 1;
+ bool declareParameters = true;
+ if (builtin->IsStub()) {
+ new_contents_stream << "TFS(" << builtin->ExternalName();
+ } else {
+ new_contents_stream << "TFJ(" << builtin->ExternalName();
+ if (builtin->IsVarArgsJavaScript()) {
+ new_contents_stream
+ << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
+ declareParameters = false;
+ } else {
+ assert(builtin->IsFixedArgsJavaScript());
+ // FixedArg javascript builtins need to offer the parameter
+ // count.
+ assert(builtin->parameter_names().size() >= 2);
+ new_contents_stream << ", " << (builtin->parameter_names().size() - 2);
+ // And the receiver is explicitly declared.
+ new_contents_stream << ", kReceiver";
+ firstParameterIndex = 2;
+ }
+ }
+ if (declareParameters) {
+ int index = 0;
+ for (const auto& parameter : builtin->parameter_names()) {
+ if (index >= firstParameterIndex) {
+ new_contents_stream << ", k" << CamelifyString(parameter);
+ }
+ index++;
+ }
+ }
+ new_contents_stream << ") \\\n";
+ }
+ new_contents_stream << "\n";
+
+ new_contents_stream
+ << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
+ for (const BuiltinPointerType* type : TypeOracle::AllBuiltinPointerTypes()) {
+ Builtin* example_builtin =
+ Declarations::FindSomeInternalBuiltinWithType(type);
+ if (!example_builtin) {
+ CurrentSourcePosition::Scope current_source_position(
+ SourcePosition{CurrentSourceFile::Get(), -1, -1});
+ ReportError("unable to find any builtin with type \"", *type, "\"");
+ }
+ new_contents_stream << " V(" << type->function_pointer_type_id() << ","
+ << example_builtin->ExternalName() << ")\\\n";
+ }
+ new_contents_stream << "\n";
+
+ new_contents_stream
+ << "#endif // V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
+
+ std::string new_contents(new_contents_stream.str());
+ ReplaceFileContentsIfDifferent(file_name, new_contents);
+}
+
+void ImplementationVisitor::GenerateClassDefinitions(std::string& file_name) {
+ std::stringstream new_contents_stream;
+ new_contents_stream << "#ifndef V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "#define V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
+ "\n\n";
+
+ for (auto i : GlobalContext::GetClasses()) {
+ // TODO(danno): Ideally (and we've got several core V8 dev's feedback
+ // supporting this), Torque should generate the constants for the offsets
+ // directly and not go through the existing layer of macros, which actually
+ // currently just serves to additionally obfuscate where these values come
+ // from.
+ new_contents_stream << "#define ";
+ new_contents_stream << CapifyStringWithUnderscores(i.first)
+ << "_FIELDS(V) \\\n";
+ const ClassType* type = i.second;
+ std::vector<Field> fields = type->fields();
+ new_contents_stream << "V(kStartOfStrongFieldsOffset, 0) \\\n";
+ for (auto f : fields) {
+ if (!f.is_weak) {
+ new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
+ << "Offset, kTaggedSize) \\\n";
+ }
+ }
+ new_contents_stream << "V(kEndOfStrongFieldsOffset, 0) \\\n";
+ new_contents_stream << "V(kStartOfWeakFieldsOffset, 0) \\\n";
+ for (auto f : fields) {
+ if (f.is_weak) {
+ new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
+ << "Offset, kTaggedSize) \\\n";
+ }
+ }
+ new_contents_stream << "V(kEndOfWeakFieldsOffset, 0) \\\n";
+ new_contents_stream << "V(kSize, 0) \\\n";
+ new_contents_stream << "\n";
+ }
+
+ new_contents_stream
+ << "\n#endif // V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
+
+ std::string new_contents(new_contents_stream.str());
+ ReplaceFileContentsIfDifferent(file_name, new_contents);
}
} // namespace torque
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index a7440251d7..1cbccf3142 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -69,6 +69,13 @@ class LocationReference {
DCHECK(IsTemporary());
return *temporary_;
}
+
+ const VisitResult& GetVisitResult() const {
+ if (IsVariableAccess()) return variable();
+ DCHECK(IsTemporary());
+ return temporary();
+ }
+
// For error reporting.
const std::string& temporary_description() const {
DCHECK(IsTemporary());
@@ -104,17 +111,109 @@ class LocationReference {
LocationReference() = default;
};
-class ImplementationVisitor : public FileVisitor {
+template <class T>
+class Binding;
+
+template <class T>
+class BindingsManager {
+ public:
+ base::Optional<Binding<T>*> TryLookup(const std::string& name) {
+ return current_bindings_[name];
+ }
+
+ private:
+ friend class Binding<T>;
+ std::unordered_map<std::string, base::Optional<Binding<T>*>>
+ current_bindings_;
+};
+
+template <class T>
+class Binding : public T {
public:
- explicit ImplementationVisitor(GlobalContext& global_context)
- : FileVisitor(global_context) {}
+ template <class... Args>
+ Binding(BindingsManager<T>* manager, const std::string& name, Args&&... args)
+ : T(std::forward<Args>(args)...),
+ manager_(manager),
+ name_(name),
+ previous_binding_(this) {
+ std::swap(previous_binding_, manager_->current_bindings_[name]);
+ }
+ ~Binding() { manager_->current_bindings_[name_] = previous_binding_; }
+
+ const std::string& name() const { return name_; }
+ SourcePosition declaration_position() const { return declaration_position_; }
- void Visit(Ast* ast) { Visit(ast->default_module()); }
+ private:
+ BindingsManager<T>* manager_;
+ const std::string name_;
+ base::Optional<Binding*> previous_binding_;
+ SourcePosition declaration_position_ = CurrentSourcePosition::Get();
+ DISALLOW_COPY_AND_ASSIGN(Binding);
+};
+
+template <class T>
+class BlockBindings {
+ public:
+ explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
+ void Add(std::string name, T value) {
+ for (const auto& binding : bindings_) {
+ if (binding->name() == name) {
+ ReportError(
+ "redeclaration of name \"", name,
+ "\" in the same block is illegal, previous declaration at: ",
+ binding->declaration_position());
+ }
+ }
+ bindings_.push_back(base::make_unique<Binding<T>>(manager_, std::move(name),
+ std::move(value)));
+ }
+
+ std::vector<Binding<T>*> bindings() const {
+ std::vector<Binding<T>*> result;
+ result.reserve(bindings_.size());
+ for (auto& b : bindings_) {
+ result.push_back(b.get());
+ }
+ return result;
+ }
+
+ private:
+ BindingsManager<T>* manager_;
+ std::vector<std::unique_ptr<Binding<T>>> bindings_;
+};
+
+struct LocalValue {
+ bool is_const;
+ VisitResult value;
+};
+
+struct LocalLabel {
+ Block* block;
+ std::vector<const Type*> parameter_types;
+
+ explicit LocalLabel(Block* block,
+ std::vector<const Type*> parameter_types = {})
+ : block(block), parameter_types(std::move(parameter_types)) {}
+};
+
+struct Arguments {
+ VisitResultVector parameters;
+ std::vector<Binding<LocalLabel>*> labels;
+};
+
+bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
+ const std::vector<Binding<LocalLabel>*>& labels);
+
+class ImplementationVisitor : public FileVisitor {
+ public:
+ void GenerateBuiltinDefinitions(std::string& file_name);
+ void GenerateClassDefinitions(std::string& file_name);
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
- void Visit(Declaration* decl);
+ VisitResult TemporaryUninitializedStruct(const StructType* struct_type,
+ const std::string& reason);
VisitResult Visit(StructExpression* decl);
LocationReference GetLocationReference(Expression* location);
@@ -136,35 +235,24 @@ class ImplementationVisitor : public FileVisitor {
return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
- void Visit(ModuleDeclaration* decl);
- void Visit(DefaultModuleDeclaration* decl) {
- Visit(implicit_cast<ModuleDeclaration*>(decl));
- }
- void Visit(ExplicitModuleDeclaration* decl) {
- Visit(implicit_cast<ModuleDeclaration*>(decl));
- }
- void Visit(TypeDeclaration* decl) {}
- void Visit(TypeAliasDeclaration* decl) {}
- void Visit(ExternConstDeclaration* decl) {}
- void Visit(StructDeclaration* decl);
- void Visit(StandardDeclaration* decl);
- void Visit(GenericDeclaration* decl) {}
- void Visit(SpecializationDeclaration* decl);
-
- void Visit(TorqueMacroDeclaration* decl, const Signature& signature,
- Statement* body);
- void Visit(TorqueBuiltinDeclaration* decl, const Signature& signature,
- Statement* body);
- void Visit(ExternalMacroDeclaration* decl, const Signature& signature,
- Statement* body) {}
- void Visit(ExternalBuiltinDeclaration* decl, const Signature& signature,
- Statement* body) {}
- void Visit(ExternalRuntimeDeclaration* decl, const Signature& signature,
- Statement* body) {}
- void Visit(CallableNode* decl, const Signature& signature, Statement* body);
- void Visit(ConstDeclaration* decl);
+ void VisitAllDeclarables();
+ void Visit(Declarable* delarable);
+ void Visit(TypeAlias* decl);
+ VisitResult InlineMacro(Macro* macro,
+ base::Optional<LocationReference> this_reference,
+ const std::vector<VisitResult>& arguments,
+ const std::vector<Block*> label_blocks);
+ void VisitMacroCommon(Macro* macro);
+ void Visit(Macro* macro);
+ void Visit(Method* macro);
+ void Visit(Builtin* builtin);
+ void Visit(NamespaceConstant* decl);
VisitResult Visit(CallExpression* expr, bool is_tail = false);
+ VisitResult Visit(CallMethodExpression* expr);
+ VisitResult Visit(IntrinsicCallExpression* intrinsic);
+ VisitResult Visit(LoadObjectFieldExpression* intrinsic);
+ VisitResult Visit(StoreObjectFieldExpression* intrinsic);
const Type* Visit(TailCallStatement* stmt);
VisitResult Visit(ConditionalExpression* expr);
@@ -179,6 +267,7 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(AssumeTypeImpossibleExpression* expr);
VisitResult Visit(TryLabelExpression* expr);
VisitResult Visit(StatementExpression* expr);
+ VisitResult Visit(NewExpression* expr);
const Type* Visit(ReturnStatement* stmt);
const Type* Visit(GotoStatement* stmt);
@@ -188,21 +277,43 @@ class ImplementationVisitor : public FileVisitor {
const Type* Visit(ContinueStatement* stmt);
const Type* Visit(ForLoopStatement* stmt);
const Type* Visit(VarDeclarationStatement* stmt);
+ const Type* Visit(VarDeclarationStatement* stmt,
+ BlockBindings<LocalValue>* block_bindings);
const Type* Visit(ForOfLoopStatement* stmt);
const Type* Visit(BlockStatement* block);
const Type* Visit(ExpressionStatement* stmt);
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
- void BeginModuleFile(Module* module);
- void EndModuleFile(Module* module);
+ void BeginNamespaceFile(Namespace* nspace);
+ void EndNamespaceFile(Namespace* nspace);
- void GenerateImplementation(const std::string& dir, Module* module);
+ void GenerateImplementation(const std::string& dir, Namespace* nspace);
- private:
- std::string GetBaseAssemblerName(Module* module);
+ struct ConstructorInfo {
+ int super_calls;
+ };
- std::string GetDSLAssemblerName(Module* module);
+ DECLARE_CONTEXTUAL_VARIABLE(ValueBindingsManager,
+ BindingsManager<LocalValue>);
+ DECLARE_CONTEXTUAL_VARIABLE(LabelBindingsManager,
+ BindingsManager<LocalLabel>);
+ DECLARE_CONTEXTUAL_VARIABLE(CurrentCallable, Callable*);
+ DECLARE_CONTEXTUAL_VARIABLE(CurrentReturnValue, base::Optional<VisitResult>);
+ DECLARE_CONTEXTUAL_VARIABLE(CurrentConstructorInfo,
+ base::Optional<ConstructorInfo>);
+
+ // A BindingsManagersScope has to be active for local bindings to be created.
+ // Shadowing an existing BindingsManagersScope by creating a new one hides all
+ // existing bindings while the additional BindingsManagersScope is active.
+ struct BindingsManagersScope {
+ ValueBindingsManager::Scope value_bindings_manager;
+ LabelBindingsManager::Scope label_bindings_manager;
+ };
+
+ private:
+ base::Optional<Block*> GetCatchBlock();
+ void GenerateCatchBlock(base::Optional<Block*> catch_block);
// {StackScope} records the stack height at creation time and reconstructs it
// when being destructed by emitting a {DeleteRangeInstruction}, except for
@@ -224,8 +335,8 @@ class ImplementationVisitor : public FileVisitor {
base_ = visitor_->assembler().CurrentStack().AboveTop();
}
VisitResult Yield(VisitResult result) {
- DCHECK(!yield_called_);
- yield_called_ = true;
+ DCHECK(!closed_);
+ closed_ = true;
if (!result.IsOnStack()) {
if (!visitor_->assembler().CurrentBlockIsComplete()) {
visitor_->assembler().DropTo(base_);
@@ -243,25 +354,72 @@ class ImplementationVisitor : public FileVisitor {
result.stack_range().Size()));
}
+ void Close() {
+ DCHECK(!closed_);
+ closed_ = true;
+ if (!visitor_->assembler().CurrentBlockIsComplete()) {
+ visitor_->assembler().DropTo(base_);
+ }
+ }
+
~StackScope() {
- if (yield_called_) {
+ if (closed_) {
DCHECK_IMPLIES(
!visitor_->assembler().CurrentBlockIsComplete(),
base_ == visitor_->assembler().CurrentStack().AboveTop());
- } else if (!visitor_->assembler().CurrentBlockIsComplete()) {
- visitor_->assembler().DropTo(base_);
+ } else {
+ Close();
}
}
private:
ImplementationVisitor* visitor_;
BottomOffset base_;
- bool yield_called_ = false;
+ bool closed_ = false;
+ };
+
+ class BreakContinueActivator {
+ public:
+ BreakContinueActivator(Block* break_block, Block* continue_block)
+ : break_binding_{&LabelBindingsManager::Get(), "_break",
+ LocalLabel{break_block}},
+ continue_binding_{&LabelBindingsManager::Get(), "_continue",
+ LocalLabel{continue_block}} {}
+
+ private:
+ Binding<LocalLabel> break_binding_;
+ Binding<LocalLabel> continue_binding_;
};
- Callable* LookupCall(const std::string& name, const Arguments& arguments,
+ base::Optional<Binding<LocalValue>*> TryLookupLocalValue(
+ const std::string& name);
+ base::Optional<Binding<LocalLabel>*> TryLookupLabel(const std::string& name);
+ Binding<LocalLabel>* LookupLabel(const std::string& name);
+ Block* LookupSimpleLabel(const std::string& name);
+ template <class Container>
+ Callable* LookupCallable(const QualifiedName& name,
+ const Container& declaration_container,
+ const TypeVector& types,
+ const std::vector<Binding<LocalLabel>*>& labels,
+ const TypeVector& specialization_types);
+
+ template <class Container>
+ Callable* LookupCallable(const QualifiedName& name,
+ const Container& declaration_container,
+ const Arguments& arguments,
+ const TypeVector& specialization_types);
+
+ Method* LookupMethod(const std::string& name, LocationReference target,
+ const Arguments& arguments,
const TypeVector& specialization_types);
+ Method* LookupConstructor(LocationReference target,
+ const Arguments& arguments,
+ const TypeVector& specialization_types) {
+ return LookupMethod(kConstructMethodName, target, arguments,
+ specialization_types);
+ }
+
const Type* GetCommonType(const Type* left, const Type* right);
VisitResult GenerateCopy(const VisitResult& to_copy);
@@ -269,24 +427,35 @@ class ImplementationVisitor : public FileVisitor {
void GenerateAssignToLocation(const LocationReference& reference,
const VisitResult& assignment_value);
- VisitResult GenerateCall(const std::string& callable_name,
+ void AddCallParameter(Callable* callable, VisitResult parameter,
+ const Type* parameter_type,
+ std::vector<VisitResult>* converted_arguments,
+ StackRange* argument_range,
+ std::vector<std::string>* constexpr_arguments);
+
+ VisitResult GenerateCall(Callable* callable,
+ base::Optional<LocationReference> this_parameter,
Arguments parameters,
const TypeVector& specialization_types = {},
bool tail_call = false);
+ VisitResult GenerateCall(const QualifiedName& callable_name,
+ Arguments parameters,
+ const TypeVector& specialization_types = {},
+ bool tail_call = false);
+ VisitResult GenerateCall(std::string callable_name, Arguments parameters,
+ const TypeVector& specialization_types = {},
+ bool tail_call = false) {
+ return GenerateCall(QualifiedName(std::move(callable_name)),
+ std::move(parameters), specialization_types, tail_call);
+ }
VisitResult GeneratePointerCall(Expression* callee,
const Arguments& parameters, bool tail_call);
- bool GenerateLabeledStatementBlocks(
- const std::vector<Statement*>& blocks,
- const std::vector<Label*>& statement_labels, Block* merge_block);
-
- void GenerateBranch(const VisitResult& condition, Label* true_label,
- Label* false_label);
+ void GenerateBranch(const VisitResult& condition, Block* true_block,
+ Block* false_block);
- bool GenerateExpressionBranch(Expression* expression,
- const std::vector<Label*>& statement_labels,
- const std::vector<Statement*>& statement_blocks,
- Block* merge_block);
+ void GenerateExpressionBranch(Expression* expression, Block* true_block,
+ Block* false_block);
void GenerateMacroFunctionDeclaration(std::ostream& o,
const std::string& macro_prefix,
@@ -300,47 +469,40 @@ class ImplementationVisitor : public FileVisitor {
VisitResult GenerateImplicitConvert(const Type* destination_type,
VisitResult source);
- void Specialize(const SpecializationKey& key, CallableNode* callable,
- const CallableNodeSignature* signature,
- Statement* body) override {
- Declarations::GenericScopeActivator scope(declarations(), key);
- Visit(callable, MakeSignature(signature), body);
- }
-
- void CreateBlockForLabel(Label* label, Stack<const Type*> stack);
-
- void GenerateLabelBind(Label* label);
-
- StackRange GenerateLabelGoto(Label* label,
+ StackRange GenerateLabelGoto(LocalLabel* label,
base::Optional<StackRange> arguments = {});
- std::vector<Label*> LabelsFromIdentifiers(
+ std::vector<Binding<LocalLabel>*> LabelsFromIdentifiers(
const std::vector<std::string>& names);
StackRange LowerParameter(const Type* type, const std::string& parameter_name,
Stack<std::string>* lowered_parameters);
- std::string ExternalLabelParameterName(Label* label, size_t i);
+ std::string ExternalLabelName(const std::string& label_name);
+ std::string ExternalLabelParameterName(const std::string& label_name,
+ size_t i);
+ std::string ExternalParameterName(const std::string& name);
- std::ostream& source_out() { return module_->source_stream(); }
+ std::ostream& source_out() { return CurrentNamespace()->source_stream(); }
- std::ostream& header_out() { return module_->header_stream(); }
+ std::ostream& header_out() { return CurrentNamespace()->header_stream(); }
CfgAssembler& assembler() { return *assembler_; }
void SetReturnValue(VisitResult return_value) {
- DCHECK_IMPLIES(return_value_, *return_value_ == return_value);
- return_value_ = std::move(return_value);
+ base::Optional<VisitResult>& current_return_value =
+ CurrentReturnValue::Get();
+ DCHECK_IMPLIES(current_return_value, *current_return_value == return_value);
+ current_return_value = std::move(return_value);
}
VisitResult GetAndClearReturnValue() {
- VisitResult return_value = *return_value_;
- return_value_ = base::nullopt;
+ VisitResult return_value = *CurrentReturnValue::Get();
+ CurrentReturnValue::Get() = base::nullopt;
return return_value;
}
base::Optional<CfgAssembler> assembler_;
- base::Optional<VisitResult> return_value_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index 13dbd75a2e..1badcc462a 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -25,8 +25,12 @@ void PeekInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
const Type* type = stack->Peek(slot);
if (widened_type) {
+ if (type->IsTopType()) {
+ const TopType* top_type = TopType::cast(type);
+ ReportError("use of " + top_type->reason());
+ }
if (!type->IsSubtypeOf(*widened_type)) {
- ReportError("type ", type, " is not a subtype of ", *widened_type);
+ ReportError("type ", *type, " is not a subtype of ", **widened_type);
}
type = *widened_type;
}
@@ -56,16 +60,50 @@ void PushUninitializedInstruction::TypeInstruction(
stack->Push(type);
}
-void PushCodePointerInstruction::TypeInstruction(Stack<const Type*>* stack,
- ControlFlowGraph* cfg) const {
+void PushBuiltinPointerInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->Push(type);
}
-void ModuleConstantInstruction::TypeInstruction(Stack<const Type*>* stack,
- ControlFlowGraph* cfg) const {
+void NamespaceConstantInstruction::TypeInstruction(
+ Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->PushMany(LowerType(constant->type()));
}
+void InstructionBase::InvalidateTransientTypes(
+ Stack<const Type*>* stack) const {
+ auto current = stack->begin();
+ while (current != stack->end()) {
+ if ((*current)->IsTransient()) {
+ std::stringstream stream;
+ stream << "type " << **current
+ << " is made invalid by transitioning callable invocation at "
+ << PositionAsString(pos);
+ *current = TypeOracle::GetTopType(stream.str(), *current);
+ }
+ ++current;
+ }
+}
+
+void CallIntrinsicInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ std::vector<const Type*> parameter_types =
+ LowerParameterTypes(intrinsic->signature().parameter_types);
+ for (intptr_t i = parameter_types.size() - 1; i >= 0; --i) {
+ const Type* arg_type = stack->Pop();
+ const Type* parameter_type = parameter_types.back();
+ parameter_types.pop_back();
+ if (arg_type != parameter_type) {
+ ReportError("parameter ", i, ": expected type ", *parameter_type,
+ " but found type ", *arg_type);
+ }
+ }
+ if (intrinsic->IsTransitioning()) {
+ InvalidateTransientTypes(stack);
+ }
+ stack->PushMany(LowerType(intrinsic->signature().return_type));
+}
+
void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> parameter_types =
@@ -79,7 +117,16 @@ void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
" but found type ", *arg_type);
}
}
- if (!parameter_types.empty()) ReportError("missing arguments");
+
+ if (macro->IsTransitioning()) {
+ InvalidateTransientTypes(stack);
+ }
+
+ if (catch_block) {
+ Stack<const Type*> catch_stack = *stack;
+ catch_stack.Push(TypeOracle::GetObjectType());
+ (*catch_block)->SetInputTypes(catch_stack);
+ }
stack->PushMany(LowerType(macro->signature().return_type));
}
@@ -97,7 +144,6 @@ void CallCsaMacroAndBranchInstruction::TypeInstruction(
" but found type ", *arg_type);
}
}
- if (!parameter_types.empty()) ReportError("missing arguments");
if (label_blocks.size() != macro->signature().labels.size()) {
ReportError("wrong number of labels");
@@ -109,6 +155,16 @@ void CallCsaMacroAndBranchInstruction::TypeInstruction(
label_blocks[i]->SetInputTypes(std::move(continuation_stack));
}
+ if (macro->IsTransitioning()) {
+ InvalidateTransientTypes(stack);
+ }
+
+ if (catch_block) {
+ Stack<const Type*> catch_stack = *stack;
+ catch_stack.Push(TypeOracle::GetObjectType());
+ (*catch_block)->SetInputTypes(catch_stack);
+ }
+
if (macro->signature().return_type != TypeOracle::GetNeverType()) {
Stack<const Type*> return_stack = *stack;
return_stack.PushMany(LowerType(macro->signature().return_type));
@@ -130,17 +186,30 @@ void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
LowerParameterTypes(builtin->signature().parameter_types)) {
ReportError("wrong argument types");
}
+ if (builtin->IsTransitioning()) {
+ InvalidateTransientTypes(stack);
+ }
+
+ if (catch_block) {
+ Stack<const Type*> catch_stack = *stack;
+ catch_stack.Push(TypeOracle::GetObjectType());
+ (*catch_block)->SetInputTypes(catch_stack);
+ }
+
stack->PushMany(LowerType(builtin->signature().return_type));
}
void CallBuiltinPointerInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
- const FunctionPointerType* f = FunctionPointerType::DynamicCast(stack->Pop());
+ const BuiltinPointerType* f = BuiltinPointerType::DynamicCast(stack->Pop());
if (!f) ReportError("expected function pointer type");
if (argument_types != LowerParameterTypes(f->parameter_types())) {
ReportError("wrong argument types");
}
+ // TODO(tebbi): Only invalidate transient types if the function pointer type
+ // is transitioning.
+ InvalidateTransientTypes(stack);
stack->PushMany(LowerType(f->return_type()));
}
@@ -152,7 +221,20 @@ void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
argc)) {
ReportError("wrong argument types");
}
- stack->PushMany(LowerType(runtime_function->signature().return_type));
+ if (runtime_function->IsTransitioning()) {
+ InvalidateTransientTypes(stack);
+ }
+
+ if (catch_block) {
+ Stack<const Type*> catch_stack = *stack;
+ catch_stack.Push(TypeOracle::GetObjectType());
+ (*catch_block)->SetInputTypes(catch_stack);
+ }
+
+ const Type* return_type = runtime_function->signature().return_type;
+ if (return_type != TypeOracle::GetNeverType()) {
+ stack->PushMany(LowerType(return_type));
+ }
}
void BranchInstruction::TypeInstruction(Stack<const Type*>* stack,
@@ -191,14 +273,54 @@ void ReturnInstruction::TypeInstruction(Stack<const Type*>* stack,
void PrintConstantStringInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {}
-void DebugBreakInstruction::TypeInstruction(Stack<const Type*>* stack,
- ControlFlowGraph* cfg) const {}
+void AbortInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {}
void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
stack->Poke(stack->AboveTop() - 1, destination_type);
}
+void LoadObjectFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ const ClassType* stack_class_type = ClassType::DynamicCast(stack->Top());
+ if (!stack_class_type) {
+ ReportError(
+ "first argument to a LoadObjectFieldInstruction instruction isn't a "
+ "class");
+ }
+ if (stack_class_type != class_type) {
+ ReportError(
+ "first argument to a LoadObjectFieldInstruction doesn't match "
+ "instruction's type");
+ }
+ const Field& field = class_type->LookupField(field_name);
+ stack->Poke(stack->AboveTop() - 1, field.name_and_type.type);
+}
+
+void StoreObjectFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ auto value = stack->Pop();
+ const ClassType* stack_class_type = ClassType::DynamicCast(stack->Top());
+ if (!stack_class_type) {
+ ReportError(
+ "first argument to a StoreObjectFieldInstruction instruction isn't a "
+ "class");
+ }
+ if (stack_class_type != class_type) {
+ ReportError(
+ "first argument to a StoreObjectFieldInstruction doesn't match "
+ "instruction's type");
+ }
+ stack->Pop();
+ stack->Push(value);
+}
+
+bool CallRuntimeInstruction::IsBlockTerminator() const {
+ return is_tailcall || runtime_function->signature().return_type ==
+ TypeOracle::GetNeverType();
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 881074e827..1bf38aaa94 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -19,8 +19,9 @@ namespace torque {
class Block;
class Builtin;
class ControlFlowGraph;
+class Intrinsic;
class Macro;
-class ModuleConstant;
+class NamespaceConstant;
class RuntimeFunction;
#define TORQUE_INSTRUCTION_LIST(V) \
@@ -28,9 +29,12 @@ class RuntimeFunction;
V(PokeInstruction) \
V(DeleteRangeInstruction) \
V(PushUninitializedInstruction) \
- V(PushCodePointerInstruction) \
+ V(PushBuiltinPointerInstruction) \
+ V(LoadObjectFieldInstruction) \
+ V(StoreObjectFieldInstruction) \
V(CallCsaMacroInstruction) \
- V(ModuleConstantInstruction) \
+ V(CallIntrinsicInstruction) \
+ V(NamespaceConstantInstruction) \
V(CallCsaMacroAndBranchInstruction) \
V(CallBuiltinInstruction) \
V(CallRuntimeInstruction) \
@@ -41,7 +45,7 @@ class RuntimeFunction;
V(GotoExternalInstruction) \
V(ReturnInstruction) \
V(PrintConstantStringInstruction) \
- V(DebugBreakInstruction) \
+ V(AbortInstruction) \
V(UnsafeCastInstruction)
#define TORQUE_INSTRUCTION_BOILERPLATE() \
@@ -65,6 +69,7 @@ struct InstructionBase {
virtual void TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const = 0;
+ void InvalidateTransientTypes(Stack<const Type*>* stack) const;
virtual bool IsBlockTerminator() const { return false; }
virtual void AppendSuccessorBlocks(std::vector<Block*>* block_list) const {}
@@ -106,9 +111,10 @@ class Instruction {
return nullptr;
}
- Instruction(const Instruction& other)
- : kind_(other.kind_), instruction_(other.instruction_->Clone()) {}
- Instruction& operator=(const Instruction& other) {
+ Instruction(const Instruction& other) V8_NOEXCEPT
+ : kind_(other.kind_),
+ instruction_(other.instruction_->Clone()) {}
+ Instruction& operator=(const Instruction& other) V8_NOEXCEPT {
if (kind_ == other.kind_) {
instruction_->Assign(*other.instruction_);
} else {
@@ -119,6 +125,17 @@ class Instruction {
}
InstructionKind kind() const { return kind_; }
+ const char* Mnemonic() const {
+ switch (kind()) {
+#define ENUM_ITEM(name) \
+ case InstructionKind::k##name: \
+ return #name;
+ TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ default:
+ UNREACHABLE();
+ }
+ }
void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
return instruction_->TypeInstruction(stack, cfg);
}
@@ -167,33 +184,74 @@ struct PushUninitializedInstruction : InstructionBase {
const Type* type;
};
-struct PushCodePointerInstruction : InstructionBase {
+struct PushBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- PushCodePointerInstruction(std::string external_name, const Type* type)
+ PushBuiltinPointerInstruction(std::string external_name, const Type* type)
: external_name(std::move(external_name)), type(type) {
- DCHECK(type->IsFunctionPointerType());
+ DCHECK(type->IsBuiltinPointerType());
}
std::string external_name;
const Type* type;
};
-struct ModuleConstantInstruction : InstructionBase {
+struct NamespaceConstantInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- explicit ModuleConstantInstruction(ModuleConstant* constant)
+ explicit NamespaceConstantInstruction(NamespaceConstant* constant)
: constant(constant) {}
- ModuleConstant* constant;
+ NamespaceConstant* constant;
+};
+
+struct LoadObjectFieldInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ LoadObjectFieldInstruction(const ClassType* class_type,
+ std::string field_name)
+ : class_type(class_type) {
+ // The normal way to write this triggers a bug in Clang on Windows.
+ this->field_name = std::move(field_name);
+ }
+ const ClassType* class_type;
+ std::string field_name;
+};
+
+struct StoreObjectFieldInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ StoreObjectFieldInstruction(const ClassType* class_type,
+ std::string field_name)
+ : class_type(class_type) {
+ // The normal way to write this triggers a bug in Clang on Windows.
+ this->field_name = std::move(field_name);
+ }
+ const ClassType* class_type;
+ std::string field_name;
+};
+
+struct CallIntrinsicInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ CallIntrinsicInstruction(Intrinsic* intrinsic,
+ std::vector<std::string> constexpr_arguments)
+ : intrinsic(intrinsic), constexpr_arguments(constexpr_arguments) {}
+
+ Intrinsic* intrinsic;
+ std::vector<std::string> constexpr_arguments;
};
struct CallCsaMacroInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
CallCsaMacroInstruction(Macro* macro,
- std::vector<std::string> constexpr_arguments)
- : macro(macro), constexpr_arguments(constexpr_arguments) {}
+ std::vector<std::string> constexpr_arguments,
+ base::Optional<Block*> catch_block)
+ : macro(macro),
+ constexpr_arguments(constexpr_arguments),
+ catch_block(catch_block) {}
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ if (catch_block) block_list->push_back(*catch_block);
+ }
Macro* macro;
std::vector<std::string> constexpr_arguments;
+ base::Optional<Block*> catch_block;
};
struct CallCsaMacroAndBranchInstruction : InstructionBase {
@@ -201,13 +259,16 @@ struct CallCsaMacroAndBranchInstruction : InstructionBase {
CallCsaMacroAndBranchInstruction(Macro* macro,
std::vector<std::string> constexpr_arguments,
base::Optional<Block*> return_continuation,
- std::vector<Block*> label_blocks)
+ std::vector<Block*> label_blocks,
+ base::Optional<Block*> catch_block)
: macro(macro),
constexpr_arguments(constexpr_arguments),
return_continuation(return_continuation),
- label_blocks(label_blocks) {}
+ label_blocks(label_blocks),
+ catch_block(catch_block) {}
bool IsBlockTerminator() const override { return true; }
void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ if (catch_block) block_list->push_back(*catch_block);
if (return_continuation) block_list->push_back(*return_continuation);
for (Block* block : label_blocks) block_list->push_back(block);
}
@@ -216,46 +277,58 @@ struct CallCsaMacroAndBranchInstruction : InstructionBase {
std::vector<std::string> constexpr_arguments;
base::Optional<Block*> return_continuation;
std::vector<Block*> label_blocks;
+ base::Optional<Block*> catch_block;
};
struct CallBuiltinInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
- CallBuiltinInstruction(bool is_tailcall, Builtin* builtin, size_t argc)
- : is_tailcall(is_tailcall), builtin(builtin), argc(argc) {}
+ CallBuiltinInstruction(bool is_tailcall, Builtin* builtin, size_t argc,
+ base::Optional<Block*> catch_block)
+ : is_tailcall(is_tailcall),
+ builtin(builtin),
+ argc(argc),
+ catch_block(catch_block) {}
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ if (catch_block) block_list->push_back(*catch_block);
+ }
bool is_tailcall;
Builtin* builtin;
size_t argc;
+ base::Optional<Block*> catch_block;
};
struct CallBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
- CallBuiltinPointerInstruction(bool is_tailcall, Builtin* example_builtin,
- size_t argc)
- : is_tailcall(is_tailcall),
- example_builtin(example_builtin),
- argc(argc) {}
+ CallBuiltinPointerInstruction(bool is_tailcall,
+ const BuiltinPointerType* type, size_t argc)
+ : is_tailcall(is_tailcall), type(type), argc(argc) {}
bool is_tailcall;
- Builtin* example_builtin;
+ const BuiltinPointerType* type;
size_t argc;
};
struct CallRuntimeInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- bool IsBlockTerminator() const override { return is_tailcall; }
+ bool IsBlockTerminator() const override;
CallRuntimeInstruction(bool is_tailcall, RuntimeFunction* runtime_function,
- size_t argc)
+ size_t argc, base::Optional<Block*> catch_block)
: is_tailcall(is_tailcall),
runtime_function(runtime_function),
- argc(argc) {}
+ argc(argc),
+ catch_block(catch_block) {}
+ void AppendSuccessorBlocks(std::vector<Block*>* block_list) const override {
+ if (catch_block) block_list->push_back(*catch_block);
+ }
bool is_tailcall;
RuntimeFunction* runtime_function;
size_t argc;
+ base::Optional<Block*> catch_block;
};
struct BranchInstruction : InstructionBase {
@@ -330,13 +403,17 @@ struct PrintConstantStringInstruction : InstructionBase {
std::string message;
};
-struct DebugBreakInstruction : InstructionBase {
+struct AbortInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- bool IsBlockTerminator() const override { return never_continues; }
- explicit DebugBreakInstruction(bool never_continues)
- : never_continues(never_continues) {}
+ enum class Kind { kDebugBreak, kUnreachable, kAssertionFailure };
+ bool IsBlockTerminator() const override { return kind != Kind::kDebugBreak; }
+ explicit AbortInstruction(Kind kind, std::string message = "") : kind(kind) {
+ // The normal way to write this triggers a bug in Clang on Windows.
+ this->message = std::move(message);
+ }
- bool never_continues;
+ Kind kind;
+ std::string message;
};
struct UnsafeCastInstruction : InstructionBase {
diff --git a/deps/v8/src/torque/scope.cc b/deps/v8/src/torque/scope.cc
deleted file mode 100644
index 4cb1442020..0000000000
--- a/deps/v8/src/torque/scope.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <fstream>
-#include <iostream>
-#include <string>
-
-#include "src/torque/global-context.h"
-
-#include "src/torque/scope.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-Scope::Scope(ScopeChain& scope_chain)
- : scope_chain_(scope_chain),
- scope_number_(scope_chain_.GetNextScopeNumber()),
- private_label_number_(0) {}
-
-Scope* ScopeChain::NewScope() {
- Scope* new_scope = new Scope(*this);
- scopes_.emplace_back(std::unique_ptr<Scope>(new_scope));
- return new_scope;
-}
-
-void Scope::AddLiveVariables(std::set<const Variable*>& set) {
- for (auto& current : lookup_) {
- if (current.second->IsVariable()) {
- set.insert(Variable::cast(current.second));
- }
- }
-}
-
-void Scope::Print() {
- std::cout << "scope #" << std::to_string(scope_number_) << "\n";
- for (auto& i : lookup_) {
- std::cout << i.first << ": " << i.second << "\n";
- }
-}
-
-Scope::Activator::Activator(Scope* scope) : scope_(scope) {
- scope->GetScopeChain().PushScope(scope);
-}
-
-Scope::Activator::~Activator() { scope_->GetScopeChain().PopScope(); }
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/torque/scope.h b/deps/v8/src/torque/scope.h
deleted file mode 100644
index 21438da8fe..0000000000
--- a/deps/v8/src/torque/scope.h
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TORQUE_SCOPE_H_
-#define V8_TORQUE_SCOPE_H_
-
-#include <map>
-#include <string>
-
-#include "src/torque/ast.h"
-#include "src/torque/types.h"
-#include "src/torque/utils.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-class ScopeChain;
-class Variable;
-class Declarable;
-
-class Scope {
- public:
- explicit Scope(ScopeChain& scope_chain);
-
- void Stream(std::ostream& stream) const {
- stream << "scope " << std::to_string(scope_number_) << " {";
- for (auto& c : lookup_) {
- stream << c.first << ",";
- }
- stream << "}";
- }
-
- int scope_number() const { return scope_number_; }
-
- ScopeChain& GetScopeChain() const { return scope_chain_; }
-
- void AddLiveVariables(std::set<const Variable*>& set);
-
- void Print();
-
- class Activator;
-
- private:
- friend class ScopeChain;
-
- void CheckAlreadyDeclared(SourcePosition pos, const std::string& name,
- const char* new_type);
-
- void Declare(const std::string& name, Declarable* d) {
- DCHECK_EQ(lookup_.end(), lookup_.find(name));
- DCHECK(d != nullptr);
- lookup_[name] = d;
- }
-
- Declarable* Lookup(const std::string& name) {
- auto i = lookup_.find(name);
- if (i == lookup_.end()) {
- return nullptr;
- }
- return i->second;
- }
-
- ScopeChain& scope_chain_;
- int scope_number_;
- int private_label_number_;
- std::map<std::string, Declarable*> lookup_;
-};
-
-class Scope::Activator {
- public:
- explicit Activator(Scope* scope);
- ~Activator();
-
- private:
- Scope* scope_;
-};
-
-class ScopeChain {
- public:
- ScopeChain() : next_scope_number_(0) {}
- Scope* NewScope();
-
- Scope* TopScope() const { return current_scopes_.back(); }
- void PushScope(Scope* scope) { current_scopes_.push_back(scope); }
- void PopScope() { current_scopes_.pop_back(); }
-
- std::set<const Variable*> GetLiveVariables() {
- std::set<const Variable*> result;
- for (auto scope : current_scopes_) {
- scope->AddLiveVariables(result);
- }
- return result;
- }
-
- void Declare(const std::string& name, Declarable* d) {
- TopScope()->Declare(name, d);
- }
-
- Declarable* Lookup(const std::string& name) {
- auto e = current_scopes_.rend();
- auto c = current_scopes_.rbegin();
- while (c != e) {
- Declarable* result = (*c)->Lookup(name);
- if (result != nullptr) return result;
- ++c;
- }
- return nullptr;
- }
-
- Declarable* ShallowLookup(const std::string& name) {
- auto& e = current_scopes_.back();
- return e->Lookup(name);
- }
-
- Declarable* LookupGlobalScope(const std::string& name) {
- auto& e = current_scopes_.front();
- return e->Lookup(name);
- }
-
- void Print() {
- for (auto s : current_scopes_) {
- s->Print();
- }
- }
-
- struct Snapshot {
- ScopeChain* chain;
- std::vector<Scope*> current_scopes;
- };
-
- Snapshot TaskSnapshot() { return {this, current_scopes_}; }
-
- class ScopedSnapshotRestorer {
- public:
- explicit ScopedSnapshotRestorer(const Snapshot& snapshot)
- : chain_(snapshot.chain) {
- saved_ = chain_->current_scopes_;
- chain_->current_scopes_ = snapshot.current_scopes;
- }
- ~ScopedSnapshotRestorer() { chain_->current_scopes_ = saved_; }
-
- private:
- ScopeChain* chain_;
- std::vector<Scope*> saved_;
- };
-
- private:
- friend class Scope;
- friend class ScopedSnapshotRestorer;
-
- int GetNextScopeNumber() { return next_scope_number_++; }
-
- int next_scope_number_;
- std::vector<std::unique_ptr<Scope>> scopes_;
- std::vector<Scope*> current_scopes_;
-};
-
-inline std::ostream& operator<<(std::ostream& os, const Scope& scope) {
- scope.Stream(os);
- return os;
-}
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TORQUE_SCOPE_H_
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index ae07ff9bf2..7b6f7a32ca 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -5,6 +5,8 @@
#ifndef V8_TORQUE_SOURCE_POSITIONS_H_
#define V8_TORQUE_SOURCE_POSITIONS_H_
+#include <iostream>
+
#include "src/torque/contextual.h"
namespace v8 {
@@ -48,6 +50,10 @@ inline std::string PositionAsString(SourcePosition pos) {
std::to_string(pos.line + 1) + ":" + std::to_string(pos.column + 1);
}
+inline std::ostream& operator<<(std::ostream& out, SourcePosition pos) {
+ return out << PositionAsString(pos);
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 91f9ef4ed2..6f00686cef 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -39,8 +39,13 @@ enum class ParseResultHolderBase::TypeId {
kDeclarationPtr,
kTypeExpressionPtr,
kLabelBlockPtr,
+ kOptionalLabelBlockPtr,
kNameAndTypeExpression,
+ kClassFieldExpression,
+ kStructFieldExpression,
kStdVectorOfNameAndTypeExpression,
+ kStdVectorOfClassFieldExpression,
+ kStdVectorOfStructFieldExpression,
kIncrementDecrementOperator,
kOptionalStdString,
kStdVectorOfStatementPtr,
@@ -82,6 +87,10 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<LabelBlock*>::id =
ParseResultTypeId::kLabelBlockPtr;
template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<LabelBlock*>>::id =
+ ParseResultTypeId::kOptionalLabelBlockPtr;
+template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Expression*>::id =
ParseResultTypeId::kExpressionPtr;
template <>
@@ -97,10 +106,26 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kNameAndTypeExpression;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<ClassFieldExpression>::id =
+ ParseResultTypeId::kClassFieldExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<StructFieldExpression>::id =
+ ParseResultTypeId::kStructFieldExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<NameAndTypeExpression>>::id =
ParseResultTypeId::kStdVectorOfNameAndTypeExpression;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<ClassFieldExpression>>::id =
+ ParseResultTypeId::kStdVectorOfClassFieldExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<StructFieldExpression>>::id =
+ ParseResultTypeId::kStdVectorOfStructFieldExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<IncrementDecrementOperator>::id =
ParseResultTypeId::kIncrementDecrementOperator;
template <>
@@ -177,12 +202,6 @@ base::Optional<ParseResult> AddGlobalDeclaration(
return base::nullopt;
}
-template <class T, class... Args>
-T* MakeNode(Args... args) {
- return CurrentAst::Get().AddNode(std::unique_ptr<T>(
- new T(CurrentSourcePosition::Get(), std::move(args)...)));
-}
-
void LintGenericParameters(const GenericParameters& parameters) {
for (const std::string& parameter : parameters) {
if (!IsUpperCamelCase(parameter)) {
@@ -192,6 +211,7 @@ void LintGenericParameters(const GenericParameters& parameters) {
}
void CheckNotDeferredStatement(Statement* statement) {
+ CurrentSourcePosition::Scope source_position(statement->pos);
if (BlockStatement* block = BlockStatement::DynamicCast(statement)) {
if (block->deferred) {
LintError(
@@ -201,9 +221,9 @@ void CheckNotDeferredStatement(Statement* statement) {
}
}
-Expression* MakeCall(const std::string& callee, bool is_operator,
- const std::vector<TypeExpression*>& generic_arguments,
- const std::vector<Expression*>& arguments,
+Expression* MakeCall(IdentifierExpression* callee,
+ base::Optional<Expression*> target,
+ std::vector<Expression*> arguments,
const std::vector<Statement*>& otherwise) {
std::vector<std::string> labels;
@@ -231,20 +251,60 @@ Expression* MakeCall(const std::string& callee, bool is_operator,
// Create nested try-label expression for all of the temporary Labels that
// were created.
- Expression* result = MakeNode<CallExpression>(
- callee, false, generic_arguments, arguments, labels);
+ Expression* result = nullptr;
+ if (target) {
+ result = MakeNode<CallMethodExpression>(*target, callee, arguments, labels);
+ } else {
+ result = MakeNode<CallExpression>(callee, arguments, labels);
+ }
+
for (auto* label : temp_labels) {
- result = MakeNode<TryLabelExpression>(result, label);
+ result = MakeNode<TryLabelExpression>(false, result, label);
}
return result;
}
+Expression* MakeCall(const std::string& callee,
+ const std::vector<TypeExpression*>& generic_arguments,
+ const std::vector<Expression*>& arguments,
+ const std::vector<Statement*>& otherwise) {
+ return MakeCall(MakeNode<IdentifierExpression>(callee, generic_arguments),
+ base::nullopt, arguments, otherwise);
+}
+
base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
+ auto callee = child_results->NextAs<LocationExpression*>();
+ auto args = child_results->NextAs<std::vector<Expression*>>();
+ auto otherwise = child_results->NextAs<std::vector<Statement*>>();
+ IdentifierExpression* target = IdentifierExpression::cast(callee);
+ if (target->name == kSuperMethodName) {
+ if (target->namespace_qualification.size() != 0) {
+ ReportError(
+ "\"super\" invocation cannot be used with namespace qualification");
+ }
+ target = MakeNode<IdentifierExpression>(kSuperMethodName);
+ return ParseResult{
+ MakeCall(target, MakeNode<IdentifierExpression>(kThisParameterName),
+ args, otherwise)};
+ } else {
+ return ParseResult{MakeCall(target, base::nullopt, args, otherwise)};
+ }
+}
+
+base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
+ auto this_arg = child_results->NextAs<Expression*>();
auto callee = child_results->NextAs<std::string>();
- auto generic_args = child_results->NextAs<TypeList>();
auto args = child_results->NextAs<std::vector<Expression*>>();
auto otherwise = child_results->NextAs<std::vector<Statement*>>();
- return ParseResult{MakeCall(callee, false, generic_args, args, otherwise)};
+ return ParseResult{MakeCall(MakeNode<IdentifierExpression>(callee), this_arg,
+ args, otherwise)};
+}
+
+base::Optional<ParseResult> MakeNew(ParseResultIterator* child_results) {
+ TypeExpression* type = child_results->NextAs<TypeExpression*>();
+ auto args = child_results->NextAs<std::vector<Expression*>>();
+ Expression* result = MakeNode<NewExpression>(type, args);
+ return ParseResult{result};
}
base::Optional<ParseResult> MakeBinaryOperator(
@@ -252,38 +312,73 @@ base::Optional<ParseResult> MakeBinaryOperator(
auto left = child_results->NextAs<Expression*>();
auto op = child_results->NextAs<std::string>();
auto right = child_results->NextAs<Expression*>();
- return ParseResult{MakeCall(op, true, TypeList{},
+ return ParseResult{MakeCall(op, TypeList{},
std::vector<Expression*>{left, right},
std::vector<Statement*>{})};
}
+base::Optional<ParseResult> MakeIntrinsicCallExpression(
+ ParseResultIterator* child_results) {
+ auto callee = child_results->NextAs<std::string>();
+ auto generic_arguments =
+ child_results->NextAs<std::vector<TypeExpression*>>();
+ auto args = child_results->NextAs<std::vector<Expression*>>();
+ Expression* result =
+ MakeNode<IntrinsicCallExpression>(callee, generic_arguments, args);
+ return ParseResult{result};
+}
+
base::Optional<ParseResult> MakeUnaryOperator(
ParseResultIterator* child_results) {
auto op = child_results->NextAs<std::string>();
auto e = child_results->NextAs<Expression*>();
- return ParseResult{MakeCall(op, true, TypeList{}, std::vector<Expression*>{e},
+ return ParseResult{MakeCall(op, TypeList{}, std::vector<Expression*>{e},
std::vector<Statement*>{})};
}
template <bool has_varargs>
base::Optional<ParseResult> MakeParameterListFromTypes(
ParseResultIterator* child_results) {
- auto types = child_results->NextAs<TypeList>();
+ auto implicit_params =
+ child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ auto explicit_types = child_results->NextAs<TypeList>();
ParameterList result;
- result.types = std::move(types);
result.has_varargs = has_varargs;
+ result.implicit_count = implicit_params.size();
+ for (NameAndTypeExpression& implicit_param : implicit_params) {
+ if (!IsLowerCamelCase(implicit_param.name)) {
+ NamingConventionError("Parameter", implicit_param.name, "lowerCamelCase");
+ }
+ result.names.push_back(implicit_param.name);
+ result.types.push_back(implicit_param.type);
+ }
+ for (auto* explicit_type : explicit_types) {
+ result.types.push_back(explicit_type);
+ }
return ParseResult{std::move(result)};
}
+
template <bool has_varargs>
base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
ParseResultIterator* child_results) {
- auto params = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ auto implicit_params =
+ child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ auto explicit_params =
+ child_results->NextAs<std::vector<NameAndTypeExpression>>();
std::string arguments_variable = "";
if (child_results->HasNext()) {
arguments_variable = child_results->NextAs<std::string>();
}
ParameterList result;
- for (NameAndTypeExpression& pair : params) {
+ for (NameAndTypeExpression& pair : implicit_params) {
+ if (!IsLowerCamelCase(pair.name)) {
+ NamingConventionError("Parameter", pair.name, "lowerCamelCase");
+ }
+
+ result.names.push_back(std::move(pair.name));
+ result.types.push_back(pair.type);
+ }
+ for (NameAndTypeExpression& pair : explicit_params) {
if (!IsLowerCamelCase(pair.name)) {
NamingConventionError("Parameter", pair.name, "lowerCamelCase");
}
@@ -291,6 +386,7 @@ base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
result.names.push_back(std::move(pair.name));
result.types.push_back(pair.type);
}
+ result.implicit_count = implicit_params.size();
result.has_varargs = has_varargs;
result.arguments_variable = arguments_variable;
return ParseResult{std::move(result)};
@@ -315,13 +411,17 @@ base::Optional<ParseResult> MakeDebugStatement(
}
base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
- TypeExpression* result = MakeNode<BasicTypeExpression>(false, "void");
+ TypeExpression* result =
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, false, "void");
return ParseResult{result};
}
base::Optional<ParseResult> MakeExternalMacro(
ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
+ auto external_assembler_name =
+ child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
auto generic_parameters = child_results->NextAs<GenericParameters>();
LintGenericParameters(generic_parameters);
@@ -330,10 +430,31 @@ base::Optional<ParseResult> MakeExternalMacro(
auto return_type = child_results->NextAs<TypeExpression*>();
auto labels = child_results->NextAs<LabelAndTypesVector>();
MacroDeclaration* macro = MakeNode<ExternalMacroDeclaration>(
+ transitioning,
+ external_assembler_name ? *external_assembler_name : "CodeStubAssembler",
name, operator_name, args, return_type, labels);
Declaration* result;
if (generic_parameters.empty()) {
- result = MakeNode<StandardDeclaration>(macro, nullptr);
+ result = MakeNode<StandardDeclaration>(macro, base::nullopt);
+ } else {
+ result = MakeNode<GenericDeclaration>(macro, generic_parameters);
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeIntrinsicDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
+
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ IntrinsicDeclaration* macro =
+ MakeNode<IntrinsicDeclaration>(name, args, return_type);
+ Declaration* result;
+ if (generic_parameters.empty()) {
+ result = MakeNode<StandardDeclaration>(macro, base::nullopt);
} else {
result = MakeNode<GenericDeclaration>(macro, generic_parameters);
}
@@ -342,6 +463,7 @@ base::Optional<ParseResult> MakeExternalMacro(
base::Optional<ParseResult> MakeTorqueMacroDeclaration(
ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
if (!IsUpperCamelCase(name)) {
@@ -356,7 +478,7 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
auto labels = child_results->NextAs<LabelAndTypesVector>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
- name, operator_name, args, return_type, labels);
+ transitioning, name, operator_name, args, return_type, labels);
Declaration* result;
if (generic_parameters.empty()) {
if (!body) ReportError("A non-generic declaration needs a body.");
@@ -369,6 +491,7 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
auto javascript_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
if (!IsUpperCamelCase(name)) {
@@ -382,7 +505,7 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
auto return_type = child_results->NextAs<TypeExpression*>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
BuiltinDeclaration* builtin = MakeNode<TorqueBuiltinDeclaration>(
- javascript_linkage, name, args, return_type);
+ transitioning, javascript_linkage, name, args, return_type);
Declaration* result;
if (generic_parameters.empty()) {
if (!body) ReportError("A non-generic declaration needs a body.");
@@ -396,7 +519,7 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
base::Optional<ParseResult> MakeConstDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
- if (!IsValidModuleConstName(name)) {
+ if (!IsValidNamespaceConstName(name)) {
NamingConventionError("Constant", name, "kUpperCamelCase");
}
@@ -427,6 +550,7 @@ base::Optional<ParseResult> MakeTypeAliasDeclaration(
base::Optional<ParseResult> MakeTypeDeclaration(
ParseResultIterator* child_results) {
+ auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
if (!IsValidTypeName(name)) {
NamingConventionError("Type", name, "UpperCamelCase");
@@ -436,20 +560,56 @@ base::Optional<ParseResult> MakeTypeDeclaration(
auto constexpr_generates =
child_results->NextAs<base::Optional<std::string>>();
Declaration* result = MakeNode<TypeDeclaration>(
- std::move(name), std::move(extends), std::move(generates),
+ std::move(name), transient, std::move(extends), std::move(generates),
std::move(constexpr_generates));
return ParseResult{result};
}
-base::Optional<ParseResult> MakeExplicitModuleDeclaration(
+base::Optional<ParseResult> MakeMethodDeclaration(
+ ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
+ auto operator_name = child_results->NextAs<base::Optional<std::string>>();
+ auto name = child_results->NextAs<std::string>();
+ if (name != kConstructMethodName && !IsUpperCamelCase(name)) {
+ NamingConventionError("Method", name, "UpperCamelCase");
+ }
+
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ auto labels = child_results->NextAs<LabelAndTypesVector>();
+ auto body = child_results->NextAs<Statement*>();
+ MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
+ transitioning, name, operator_name, args, return_type, labels);
+ Declaration* result = MakeNode<StandardDeclaration>(macro, body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeClassDeclaration(
+ ParseResultIterator* child_results) {
+ auto transient = child_results->NextAs<bool>();
+ auto name = child_results->NextAs<std::string>();
+ if (!IsValidTypeName(name)) {
+ NamingConventionError("Type", name, "UpperCamelCase");
+ }
+ auto extends = child_results->NextAs<std::string>();
+ auto generates = child_results->NextAs<base::Optional<std::string>>();
+ auto methods = child_results->NextAs<std::vector<Declaration*>>();
+ auto fields = child_results->NextAs<std::vector<ClassFieldExpression>>();
+ Declaration* result = MakeNode<ClassDeclaration>(
+ std::move(name), transient, std::move(extends), std::move(generates),
+ std::move(methods), fields);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeNamespaceDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
if (!IsSnakeCase(name)) {
- NamingConventionError("Module", name, "snake_case");
+ NamingConventionError("Namespace", name, "snake_case");
}
auto declarations = child_results->NextAs<std::vector<Declaration*>>();
- Declaration* result = MakeNode<ExplicitModuleDeclaration>(
- std::move(name), std::move(declarations));
+ Declaration* result =
+ MakeNode<NamespaceDeclaration>(std::move(name), std::move(declarations));
return ParseResult{result};
}
@@ -472,14 +632,24 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<std::string>();
- auto fields = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ auto methods = child_results->NextAs<std::vector<Declaration*>>();
+ auto fields = child_results->NextAs<std::vector<StructFieldExpression>>();
+ Declaration* result = MakeNode<StructDeclaration>(
+ std::move(name), std::move(methods), std::move(fields));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeCppIncludeDeclaration(
+ ParseResultIterator* child_results) {
+ auto include_path = child_results->NextAs<std::string>();
Declaration* result =
- MakeNode<StructDeclaration>(std::move(name), std::move(fields));
+ MakeNode<CppIncludeDeclaration>(std::move(include_path));
return ParseResult{result};
}
base::Optional<ParseResult> MakeExternalBuiltin(
ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
auto js_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
auto generic_parameters = child_results->NextAs<GenericParameters>();
@@ -487,11 +657,11 @@ base::Optional<ParseResult> MakeExternalBuiltin(
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
- BuiltinDeclaration* builtin =
- MakeNode<ExternalBuiltinDeclaration>(js_linkage, name, args, return_type);
+ BuiltinDeclaration* builtin = MakeNode<ExternalBuiltinDeclaration>(
+ transitioning, js_linkage, name, args, return_type);
Declaration* result;
if (generic_parameters.empty()) {
- result = MakeNode<StandardDeclaration>(builtin, nullptr);
+ result = MakeNode<StandardDeclaration>(builtin, base::nullopt);
} else {
result = MakeNode<GenericDeclaration>(builtin, generic_parameters);
}
@@ -500,12 +670,13 @@ base::Optional<ParseResult> MakeExternalBuiltin(
base::Optional<ParseResult> MakeExternalRuntime(
ParseResultIterator* child_results) {
+ auto transitioning = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
- ExternalRuntimeDeclaration* runtime =
- MakeNode<ExternalRuntimeDeclaration>(name, args, return_type);
- Declaration* result = MakeNode<StandardDeclaration>(runtime, nullptr);
+ ExternalRuntimeDeclaration* runtime = MakeNode<ExternalRuntimeDeclaration>(
+ transitioning, name, args, return_type);
+ Declaration* result = MakeNode<StandardDeclaration>(runtime, base::nullopt);
return ParseResult{result};
}
@@ -517,10 +688,12 @@ base::Optional<ParseResult> StringLiteralUnquoteAction(
base::Optional<ParseResult> MakeBasicTypeExpression(
ParseResultIterator* child_results) {
+ auto namespace_qualification =
+ child_results->NextAs<std::vector<std::string>>();
auto is_constexpr = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
- TypeExpression* result =
- MakeNode<BasicTypeExpression>(is_constexpr, std::move(name));
+ TypeExpression* result = MakeNode<BasicTypeExpression>(
+ std::move(namespace_qualification), is_constexpr, std::move(name));
return ParseResult{result};
}
@@ -561,6 +734,11 @@ base::Optional<ParseResult> MakeIfStatement(
ReportError("if-else statements require curly braces");
}
+ if (is_constexpr) {
+ CheckNotDeferredStatement(if_true);
+ if (if_false) CheckNotDeferredStatement(*if_false);
+ }
+
Statement* result =
MakeNode<IfStatement>(is_constexpr, condition, if_true, if_false);
return ParseResult{result};
@@ -617,11 +795,10 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
}
BlockStatement* case_block;
if (i < cases.size() - 1) {
- value =
- MakeCall("Cast", false, std::vector<TypeExpression*>{cases[i].type},
- std::vector<Expression*>{value},
- std::vector<Statement*>{MakeNode<ExpressionStatement>(
- MakeNode<IdentifierExpression>("_NextCase"))});
+ value = MakeCall("Cast", std::vector<TypeExpression*>{cases[i].type},
+ std::vector<Expression*>{value},
+ std::vector<Statement*>{MakeNode<ExpressionStatement>(
+ MakeNode<IdentifierExpression>("_NextCase"))});
case_block = MakeNode<BlockStatement>();
} else {
case_block = current_block;
@@ -635,7 +812,7 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
BlockStatement* next_block = MakeNode<BlockStatement>();
current_block->statements.push_back(
MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
- MakeNode<StatementExpression>(case_block),
+ false, MakeNode<StatementExpression>(case_block),
MakeNode<LabelBlock>("_NextCase", ParameterList::Empty(),
next_block))));
current_block = next_block;
@@ -652,7 +829,6 @@ base::Optional<ParseResult> MakeTypeswitchCase(
auto name = child_results->NextAs<base::Optional<std::string>>();
auto type = child_results->NextAs<TypeExpression*>();
auto block = child_results->NextAs<Statement*>();
- CheckNotDeferredStatement(block);
return ParseResult{TypeswitchCase{child_results->matched_input().pos,
std::move(name), type, block}};
}
@@ -724,6 +900,9 @@ base::Optional<ParseResult> MakeBlockStatement(
ParseResultIterator* child_results) {
auto deferred = child_results->NextAs<bool>();
auto statements = child_results->NextAs<std::vector<Statement*>>();
+ for (Statement* statement : statements) {
+ CheckNotDeferredStatement(statement);
+ }
Statement* result = MakeNode<BlockStatement>(deferred, std::move(statements));
return ParseResult{result};
}
@@ -734,9 +913,14 @@ base::Optional<ParseResult> MakeTryLabelExpression(
CheckNotDeferredStatement(try_block);
Statement* result = try_block;
auto label_blocks = child_results->NextAs<std::vector<LabelBlock*>>();
+ auto catch_block = child_results->NextAs<base::Optional<LabelBlock*>>();
for (auto block : label_blocks) {
result = MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
- MakeNode<StatementExpression>(result), block));
+ false, MakeNode<StatementExpression>(result), block));
+ }
+ if (catch_block) {
+ result = MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
+ true, MakeNode<StatementExpression>(result), *catch_block));
}
return ParseResult{result};
}
@@ -759,9 +943,12 @@ base::Optional<ParseResult> MakeForLoopStatement(
auto var_decl = child_results->NextAs<base::Optional<Statement*>>();
auto test = child_results->NextAs<base::Optional<Expression*>>();
auto action = child_results->NextAs<base::Optional<Expression*>>();
+ base::Optional<Statement*> action_stmt;
+ if (action) action_stmt = MakeNode<ExpressionStatement>(*action);
auto body = child_results->NextAs<Statement*>();
CheckNotDeferredStatement(body);
- Statement* result = MakeNode<ForLoopStatement>(var_decl, test, action, body);
+ Statement* result =
+ MakeNode<ForLoopStatement>(var_decl, test, action_stmt, body);
return ParseResult{result};
}
@@ -777,6 +964,22 @@ base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
return ParseResult{result};
}
+base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
+ auto variable = child_results->NextAs<std::string>();
+ auto body = child_results->NextAs<Statement*>();
+ if (!IsLowerCamelCase(variable)) {
+ NamingConventionError("Exception", variable, "lowerCamelCase");
+ }
+ ParameterList parameters;
+ parameters.names.push_back(variable);
+ parameters.types.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, false, "Object"));
+ parameters.has_varargs = false;
+ LabelBlock* result =
+ MakeNode<LabelBlock>("_catch", std::move(parameters), body);
+ return ParseResult{result};
+}
+
base::Optional<ParseResult> MakeRangeExpression(
ParseResultIterator* child_results) {
auto begin = child_results->NextAs<base::Optional<Expression*>>();
@@ -794,11 +997,14 @@ base::Optional<ParseResult> MakeExpressionWithSource(
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
+ auto namespace_qualification =
+ child_results->NextAs<std::vector<std::string>>();
auto name = child_results->NextAs<std::string>();
auto generic_arguments =
child_results->NextAs<std::vector<TypeExpression*>>();
LocationExpression* result = MakeNode<IdentifierExpression>(
- std::move(name), std::move(generic_arguments));
+ std::move(namespace_qualification), std::move(name),
+ std::move(generic_arguments));
return ParseResult{result};
}
@@ -821,10 +1027,13 @@ base::Optional<ParseResult> MakeElementAccessExpression(
base::Optional<ParseResult> MakeStructExpression(
ParseResultIterator* child_results) {
+ auto namespace_qualification =
+ child_results->NextAs<std::vector<std::string>>();
auto name = child_results->NextAs<std::string>();
auto expressions = child_results->NextAs<std::vector<Expression*>>();
Expression* result =
- MakeNode<StructExpression>(std::move(name), std::move(expressions));
+ MakeNode<StructExpression>(std::move(namespace_qualification),
+ std::move(name), std::move(expressions));
return ParseResult{result};
}
@@ -913,6 +1122,20 @@ base::Optional<ParseResult> MakeNameAndType(
return ParseResult{NameAndTypeExpression{std::move(name), type}};
}
+base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
+ auto weak = child_results->NextAs<bool>();
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ return ParseResult{ClassFieldExpression{{std::move(name), type}, weak}};
+}
+
+base::Optional<ParseResult> MakeStructField(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ return ParseResult{StructFieldExpression{{std::move(name), type}}};
+}
+
base::Optional<ParseResult> ExtractAssignmentOperator(
ParseResultIterator* child_results) {
auto op = child_results->NextAs<std::string>();
@@ -940,6 +1163,16 @@ struct TorqueGrammar : Grammar {
return true;
}
+ static bool MatchIntrinsicName(InputPosition* pos) {
+ InputPosition current = *pos;
+ if (!MatchString("%", &current)) return false;
+ if (!MatchChar(std::isalpha, &current)) return false;
+ while (MatchChar(std::isalnum, &current) || MatchString("_", pos)) {
+ }
+ *pos = current;
+ return true;
+ }
+
static bool MatchStringLiteral(InputPosition* pos) {
InputPosition current = *pos;
if (MatchString("\"", &current)) {
@@ -1004,6 +1237,10 @@ struct TorqueGrammar : Grammar {
Symbol identifier = {Rule({Pattern(MatchIdentifier)}, YieldMatchedInput)};
// Result: std::string
+ Symbol intrinsicName = {
+ Rule({Pattern(MatchIntrinsicName)}, YieldMatchedInput)};
+
+ // Result: std::string
Symbol stringLiteral = {
Rule({Pattern(MatchStringLiteral)}, YieldMatchedInput)};
@@ -1021,7 +1258,9 @@ struct TorqueGrammar : Grammar {
// Result: TypeExpression*
Symbol simpleType = {
Rule({Token("("), &type, Token(")")}),
- Rule({CheckIf(Token("constexpr")), &identifier}, MakeBasicTypeExpression),
+ Rule({List<std::string>(Sequence({&identifier, Token("::")})),
+ CheckIf(Token("constexpr")), &identifier},
+ MakeBasicTypeExpression),
Rule({Token("builtin"), Token("("), typeList, Token(")"), Token("=>"),
&simpleType},
MakeFunctionTypeExpression)};
@@ -1044,12 +1283,19 @@ struct TorqueGrammar : Grammar {
// Result: base::Optional<TypeList>
Symbol* optionalGenericParameters = Optional<TypeList>(&genericParameters);
+ Symbol* optionalImplicitParameterList{
+ TryOrDefault<std::vector<NameAndTypeExpression>>(
+ Sequence({Token("("), Token("implicit"),
+ List<NameAndTypeExpression>(&nameAndType, Token(",")),
+ Token(")")}))};
+
// Result: ParameterList
Symbol typeListMaybeVarArgs = {
- Rule({Token("("), List<TypeExpression*>(Sequence({&type, Token(",")})),
- Token("..."), Token(")")},
+ Rule({optionalImplicitParameterList, Token("("),
+ List<TypeExpression*>(Sequence({&type, Token(",")})), Token("..."),
+ Token(")")},
MakeParameterListFromTypes<true>),
- Rule({Token("("), typeList, Token(")")},
+ Rule({optionalImplicitParameterList, Token("("), typeList, Token(")")},
MakeParameterListFromTypes<false>)};
// Result: LabelAndTypes
@@ -1076,16 +1322,23 @@ struct TorqueGrammar : Grammar {
Symbol nameAndType = {
Rule({&identifier, Token(":"), &type}, MakeNameAndType)};
+ Symbol classField = {
+ Rule({CheckIf(Token("weak")), &identifier, Token(":"), &type, Token(";")},
+ MakeClassField)};
+
+ Symbol structField = {
+ Rule({&identifier, Token(":"), &type, Token(";")}, MakeStructField)};
+
// Result: ParameterList
Symbol parameterListNoVararg = {
- Rule({Token("("), List<NameAndTypeExpression>(&nameAndType, Token(",")),
- Token(")")},
+ Rule({optionalImplicitParameterList, Token("("),
+ List<NameAndTypeExpression>(&nameAndType, Token(",")), Token(")")},
MakeParameterListFromNameAndTypeList<false>)};
// Result: ParameterList
Symbol parameterListAllowVararg = {
Rule({&parameterListNoVararg}),
- Rule({Token("("),
+ Rule({optionalImplicitParameterList, Token("("),
NonemptyList<NameAndTypeExpression>(&nameAndType, Token(",")),
Token(","), Token("..."), &identifier, Token(")")},
MakeParameterListFromNameAndTypeList<true>)};
@@ -1120,10 +1373,16 @@ struct TorqueGrammar : Grammar {
IncrementDecrementOperator::kDecrement>)};
// Result: LocationExpression*
- Symbol locationExpression = {
+ Symbol identifierExpression = {
Rule(
- {&identifier, TryOrDefault<TypeList>(&genericSpecializationTypeList)},
+ {List<std::string>(Sequence({&identifier, Token("::")})), &identifier,
+ TryOrDefault<TypeList>(&genericSpecializationTypeList)},
MakeIdentifierExpression),
+ };
+
+ // Result: LocationExpression*
+ Symbol locationExpression = {
+ Rule({&identifierExpression}),
Rule({&primaryExpression, Token("."), &identifier},
MakeFieldAccessExpression),
Rule({&primaryExpression, Token("["), expression, Token("]")},
@@ -1134,21 +1393,40 @@ struct TorqueGrammar : Grammar {
{Token("("), List<Expression*>(expression, Token(",")), Token(")")})};
// Result: Expression*
- Symbol callExpression = {
- Rule({&identifier, TryOrDefault<TypeList>(&genericSpecializationTypeList),
- &argumentList, optionalOtherwise},
- MakeCall)};
+ Symbol callExpression = {Rule(
+ {&identifierExpression, &argumentList, optionalOtherwise}, MakeCall)};
+
+ Symbol callMethodExpression = {
+ Rule({&primaryExpression, Token("."), &identifier, &argumentList,
+ optionalOtherwise},
+ MakeMethodCall)};
+
+ Symbol initializerList = {Rule(
+ {Token("{"), List<Expression*>(expression, Token(",")), Token("}")})};
+
+ Symbol newExpression = {
+ Rule({Token("new"), &type, &initializerList}, MakeNew)};
+
+ // Result: Expression*
+ Symbol intrinsicCallExpression = {Rule(
+ {&intrinsicName, TryOrDefault<TypeList>(&genericSpecializationTypeList),
+ &argumentList},
+ MakeIntrinsicCallExpression)};
// Result: Expression*
Symbol primaryExpression = {
+ Rule({&newExpression}),
Rule({&callExpression}),
+ Rule({&callMethodExpression}),
+ Rule({&intrinsicCallExpression}),
Rule({&locationExpression},
CastParseResult<LocationExpression*, Expression*>),
Rule({&decimalLiteral}, MakeNumberLiteralExpression),
Rule({&stringLiteral}, MakeStringLiteralExpression),
- Rule({&identifier, Token("{"), List<Expression*>(expression, Token(",")),
- Token("}")},
- MakeStructExpression),
+ Rule(
+ {List<std::string>(Sequence({&identifier, Token("::")})), &identifier,
+ Token("{"), List<Expression*>(expression, Token(",")), Token("}")},
+ MakeStructExpression),
Rule({Token("("), expression, Token(")")})};
// Result: Expression*
@@ -1231,6 +1509,10 @@ struct TorqueGrammar : Grammar {
TryOrDefault<ParameterList>(&parameterListNoVararg), &block},
MakeLabelBlock)};
+ Symbol catchBlock = {
+ Rule({Token("catch"), Token("("), &identifier, Token(")"), &block},
+ MakeCatchBlock)};
+
// Result: ExpressionWithSource
Symbol expressionWithSource = {Rule({expression}, MakeExpressionWithSource)};
@@ -1281,7 +1563,8 @@ struct TorqueGrammar : Grammar {
Token("}"),
},
MakeTypeswitchStatement),
- Rule({Token("try"), &block, NonemptyList<LabelBlock*>(&labelBlock)},
+ Rule({Token("try"), &block, List<LabelBlock*>(&labelBlock),
+ Optional<LabelBlock*>(&catchBlock)},
MakeTryLabelExpression),
Rule({OneOf({"assert", "check"}), Token("("), &expressionWithSource,
Token(")"), Token(";")},
@@ -1310,6 +1593,14 @@ struct TorqueGrammar : Grammar {
Rule({Token(";")}, YieldDefaultValue<base::Optional<Statement*>>)};
// Result: Declaration*
+ Symbol method = {Rule(
+ {CheckIf(Token("transitioning")),
+ Optional<std::string>(Sequence({Token("operator"), &externalString})),
+ &identifier, &parameterListNoVararg, &optionalReturnType,
+ optionalLabelList, &block},
+ MakeMethodDeclaration)};
+
+ // Result: Declaration*
Symbol declaration = {
Rule({Token("const"), &identifier, Token(":"), &type, Token("="),
expression, Token(";")},
@@ -1317,7 +1608,18 @@ struct TorqueGrammar : Grammar {
Rule({Token("const"), &identifier, Token(":"), &type, Token("generates"),
&externalString, Token(";")},
MakeExternConstDeclaration),
- Rule({Token("type"), &identifier,
+ Rule({CheckIf(Token("transient")), Token("class"), &identifier,
+ Sequence({Token("extends"), &identifier}),
+ Optional<std::string>(
+ Sequence({Token("generates"), &externalString})),
+ Token("{"), List<Declaration*>(&method),
+ List<ClassFieldExpression>(&classField), Token("}")},
+ MakeClassDeclaration),
+ Rule({Token("struct"), &identifier, Token("{"),
+ List<Declaration*>(&method),
+ List<StructFieldExpression>(&structField), Token("}")},
+ MakeStructDeclaration),
+ Rule({CheckIf(Token("transient")), Token("type"), &identifier,
Optional<std::string>(Sequence({Token("extends"), &identifier})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
@@ -1327,29 +1629,38 @@ struct TorqueGrammar : Grammar {
MakeTypeDeclaration),
Rule({Token("type"), &identifier, Token("="), &type, Token(";")},
MakeTypeAliasDeclaration),
- Rule({Token("extern"),
+ Rule({Token("intrinsic"), &intrinsicName,
+ TryOrDefault<GenericParameters>(&genericParameters),
+ &parameterListNoVararg, &optionalReturnType, Token(";")},
+ MakeIntrinsicDeclaration),
+ Rule({Token("extern"), CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
- Token("macro"), &identifier,
- TryOrDefault<GenericParameters>(&genericParameters),
+ Token("macro"),
+ Optional<std::string>(Sequence({&identifier, Token("::")})),
+ &identifier, TryOrDefault<GenericParameters>(&genericParameters),
&typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
Token(";")},
MakeExternalMacro),
- Rule({Token("extern"), CheckIf(Token("javascript")), Token("builtin"),
- &identifier, TryOrDefault<GenericParameters>(&genericParameters),
+ Rule({Token("extern"), CheckIf(Token("transitioning")),
+ CheckIf(Token("javascript")), Token("builtin"), &identifier,
+ TryOrDefault<GenericParameters>(&genericParameters),
&typeListMaybeVarArgs, &optionalReturnType, Token(";")},
MakeExternalBuiltin),
- Rule({Token("extern"), Token("runtime"), &identifier,
- &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
- MakeExternalRuntime),
- Rule({Optional<std::string>(
+ Rule(
+ {Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
+ &identifier, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ MakeExternalRuntime),
+ Rule({CheckIf(Token("transitioning")),
+ Optional<std::string>(
Sequence({Token("operator"), &externalString})),
Token("macro"), &identifier,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListNoVararg, &optionalReturnType, optionalLabelList,
&optionalBody},
MakeTorqueMacroDeclaration),
- Rule({CheckIf(Token("javascript")), Token("builtin"), &identifier,
+ Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
+ Token("builtin"), &identifier,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListAllowVararg, &optionalReturnType, &optionalBody},
MakeTorqueBuiltinDeclaration),
@@ -1357,18 +1668,15 @@ struct TorqueGrammar : Grammar {
&parameterListAllowVararg, &optionalReturnType, optionalLabelList,
&block},
MakeSpecializationDeclaration),
- Rule({Token("struct"), &identifier, Token("{"),
- List<NameAndTypeExpression>(Sequence({&nameAndType, Token(";")})),
- Token("}")},
- MakeStructDeclaration)};
+ Rule({Token("#include"), &externalString}, MakeCppIncludeDeclaration)};
// Result: Declaration*
- Symbol moduleDeclaration = {
- Rule({Token("module"), &identifier, Token("{"),
+ Symbol namespaceDeclaration = {
+ Rule({Token("namespace"), &identifier, Token("{"),
List<Declaration*>(&declaration), Token("}")},
- MakeExplicitModuleDeclaration)};
+ MakeNamespaceDeclaration)};
- Symbol file = {Rule({&file, &moduleDeclaration}, AddGlobalDeclaration),
+ Symbol file = {Rule({&file, &namespaceDeclaration}, AddGlobalDeclaration),
Rule({&file, &declaration}, AddGlobalDeclaration), Rule({})};
};
diff --git a/deps/v8/src/torque/torque-parser.h b/deps/v8/src/torque/torque-parser.h
index 99ac7cb75c..f80008207d 100644
--- a/deps/v8/src/torque/torque-parser.h
+++ b/deps/v8/src/torque/torque-parser.h
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
namespace torque {
-DECLARE_CONTEXTUAL_VARIABLE(CurrentAst, Ast);
-
// Adds the parsed input to {CurrentAst}
void ParseTorque(const std::string& input);
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index fd47251d59..05dabdccca 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -9,7 +9,6 @@
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/implementation-visitor.h"
-#include "src/torque/scope.h"
#include "src/torque/torque-parser.h"
#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
@@ -51,31 +50,30 @@ int WrappedMain(int argc, const char** argv) {
ParseTorque(file_content);
}
- GlobalContext global_context(std::move(CurrentAst::Get()));
- if (verbose) global_context.SetVerbose();
- TypeOracle::Scope type_oracle(global_context.declarations());
+ GlobalContext::Scope global_context(std::move(CurrentAst::Get()));
+ if (verbose) GlobalContext::SetVerbose();
+ TypeOracle::Scope type_oracle;
if (output_directory.length() != 0) {
- {
- DeclarationVisitor visitor(global_context);
+ DeclarationVisitor().Visit(GlobalContext::Get().ast());
- visitor.Visit(global_context.ast());
-
- std::string output_header_path = output_directory;
- output_header_path += "/builtin-definitions-from-dsl.h";
- visitor.GenerateHeader(output_header_path);
+ ImplementationVisitor visitor;
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ visitor.BeginNamespaceFile(n);
}
- ImplementationVisitor visitor(global_context);
- for (auto& module : global_context.GetModules()) {
- visitor.BeginModuleFile(module.second.get());
- }
+ visitor.VisitAllDeclarables();
+
+ std::string output_header_path = output_directory;
+ output_header_path += "/builtin-definitions-from-dsl.h";
+ visitor.GenerateBuiltinDefinitions(output_header_path);
- visitor.Visit(global_context.ast());
+ output_header_path = output_directory + "/class-definitions-from-dsl.h";
+ visitor.GenerateClassDefinitions(output_header_path);
- for (auto& module : global_context.GetModules()) {
- visitor.EndModuleFile(module.second.get());
- visitor.GenerateImplementation(output_directory, module.second.get());
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ visitor.EndNamespaceFile(n);
+ visitor.GenerateImplementation(output_directory, n);
}
}
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 1e498c19f4..ee1b5cee1c 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -17,31 +17,52 @@ namespace torque {
class TypeOracle : public ContextualClass<TypeOracle> {
public:
- explicit TypeOracle(Declarations* declarations)
- : declarations_(declarations) {}
-
static const AbstractType* GetAbstractType(
- const Type* parent, std::string name, std::string generated,
+ const Type* parent, std::string name, bool transient,
+ std::string generated,
base::Optional<const AbstractType*> non_constexpr_version) {
- AbstractType* result = new AbstractType(
- parent, std::move(name), std::move(generated), non_constexpr_version);
+ AbstractType* result =
+ new AbstractType(parent, transient, std::move(name),
+ std::move(generated), non_constexpr_version);
Get().nominal_types_.push_back(std::unique_ptr<AbstractType>(result));
return result;
}
- static const StructType* GetStructType(
- Module* module, const std::string& name,
- const std::vector<NameAndType>& fields) {
- StructType* result = new StructType(module, name, fields);
+ static StructType* GetStructType(const std::string& name,
+ const std::vector<Field>& fields) {
+ StructType* result = new StructType(CurrentNamespace(), name, fields);
Get().struct_types_.push_back(std::unique_ptr<StructType>(result));
return result;
}
- static const FunctionPointerType* GetFunctionPointerType(
+ static ClassType* GetClassType(const Type* parent, const std::string& name,
+ bool transient, const std::string& generates,
+ const std::vector<Field>& fields,
+ StructType* this_struct, size_t size) {
+ ClassType* result =
+ new ClassType(parent, CurrentNamespace(), name, transient, generates,
+ fields, this_struct, size);
+ Get().struct_types_.push_back(std::unique_ptr<ClassType>(result));
+ return result;
+ }
+
+ static const BuiltinPointerType* GetBuiltinPointerType(
TypeVector argument_types, const Type* return_type) {
- const Type* code_type = Get().GetBuiltinType(CODE_TYPE_STRING);
- return Get().function_pointer_types_.Add(
- FunctionPointerType(code_type, argument_types, return_type));
+ TypeOracle& self = Get();
+ const Type* builtin_type = self.GetBuiltinType(BUILTIN_POINTER_TYPE_STRING);
+ const BuiltinPointerType* result = self.function_pointer_types_.Add(
+ BuiltinPointerType(builtin_type, argument_types, return_type,
+ self.all_builtin_pointer_types_.size()));
+ if (result->function_pointer_type_id() ==
+ self.all_builtin_pointer_types_.size()) {
+ self.all_builtin_pointer_types_.push_back(result);
+ }
+ return result;
+ }
+
+ static const std::vector<const BuiltinPointerType*>&
+ AllBuiltinPointerTypes() {
+ return Get().all_builtin_pointer_types_;
}
static const Type* GetUnionType(UnionType type) {
@@ -59,6 +80,13 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return GetUnionType(std::move(result));
}
+ static const TopType* GetTopType(std::string reason,
+ const Type* source_type) {
+ TopType* result = new TopType(std::move(reason), source_type);
+ Get().top_types_.push_back(std::unique_ptr<TopType>(result));
+ return result;
+ }
+
static const Type* GetArgumentsType() {
return Get().GetBuiltinType(ARGUMENTS_TYPE_STRING);
}
@@ -75,18 +103,46 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(VOID_TYPE_STRING);
}
+ static const Type* GetRawPtrType() {
+ return Get().GetBuiltinType(RAWPTR_TYPE_STRING);
+ }
+
static const Type* GetObjectType() {
return Get().GetBuiltinType(OBJECT_TYPE_STRING);
}
+ static const Type* GetTaggedType() {
+ return Get().GetBuiltinType(TAGGED_TYPE_STRING);
+ }
+
+ static const Type* GetSmiType() {
+ return Get().GetBuiltinType(SMI_TYPE_STRING);
+ }
+
static const Type* GetConstStringType() {
return Get().GetBuiltinType(CONST_STRING_TYPE_STRING);
}
+ static const Type* GetStringType() {
+ return Get().GetBuiltinType(STRING_TYPE_STRING);
+ }
+
+ static const Type* GetNumberType() {
+ return Get().GetBuiltinType(NUMBER_TYPE_STRING);
+ }
+
static const Type* GetIntPtrType() {
return Get().GetBuiltinType(INTPTR_TYPE_STRING);
}
+ static const Type* GetUIntPtrType() {
+ return Get().GetBuiltinType(UINTPTR_TYPE_STRING);
+ }
+
+ static const Type* GetInt32Type() {
+ return Get().GetBuiltinType(INT32_TYPE_STRING);
+ }
+
static const Type* GetNeverType() {
return Get().GetBuiltinType(NEVER_TYPE_STRING);
}
@@ -96,20 +152,30 @@ class TypeOracle : public ContextualClass<TypeOracle> {
}
static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
- std::string name = GetGeneratedCallableName(kFromConstexprMacroName, {to});
- return Get().declarations_->TryLookupMacro(name, {from}) != nullptr;
+ for (Generic* from_constexpr :
+ Declarations::LookupGeneric(kFromConstexprMacroName)) {
+ if (base::Optional<Callable*> specialization =
+ from_constexpr->GetSpecialization({to, from})) {
+ if ((*specialization)->signature().GetExplicitTypes() ==
+ TypeVector{from}) {
+ return true;
+ }
+ }
+ }
+ return false;
}
private:
const Type* GetBuiltinType(const std::string& name) {
- return declarations_->LookupGlobalType(name);
+ return Declarations::LookupGlobalType(name);
}
- Declarations* declarations_;
- Deduplicator<FunctionPointerType> function_pointer_types_;
+ Deduplicator<BuiltinPointerType> function_pointer_types_;
+ std::vector<const BuiltinPointerType*> all_builtin_pointer_types_;
Deduplicator<UnionType> union_types_;
std::vector<std::unique_ptr<Type>> nominal_types_;
std::vector<std::unique_ptr<Type>> struct_types_;
+ std::vector<std::unique_ptr<Type>> top_types_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 4f009a8f32..86a2020d21 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -32,6 +32,7 @@ std::string Type::ToString() const {
}
bool Type::IsSubtypeOf(const Type* supertype) const {
+ if (supertype->IsTopType()) return true;
if (IsNever()) return true;
if (const UnionType* union_type = UnionType::DynamicCast(supertype)) {
return union_type->IsSupertypeOf(this);
@@ -74,13 +75,12 @@ bool Type::IsAbstractName(const std::string& name) const {
}
std::string AbstractType::GetGeneratedTNodeTypeName() const {
- std::string result = GetGeneratedTypeName();
- DCHECK_EQ(result.substr(0, 6), "TNode<");
- result = result.substr(6, result.length() - 7);
- return result;
+ return generated_type_;
}
-std::string FunctionPointerType::ToExplicitString() const {
+std::string ClassType::GetGeneratedTNodeTypeName() const { return generates_; }
+
+std::string BuiltinPointerType::ToExplicitString() const {
std::stringstream result;
result << "builtin (";
PrintCommaSeparatedList(result, parameter_types_);
@@ -88,7 +88,7 @@ std::string FunctionPointerType::ToExplicitString() const {
return result.str();
}
-std::string FunctionPointerType::MangledName() const {
+std::string BuiltinPointerType::MangledName() const {
std::stringstream result;
result << "FT";
for (const Type* t : parameter_types_) {
@@ -184,10 +184,88 @@ const Type* SubtractType(const Type* a, const Type* b) {
return TypeOracle::GetUnionType(result);
}
+void AggregateType::CheckForDuplicateFields() {
+ // Check the aggregate hierarchy and currently defined class for duplicate
+ // field declarations.
+ auto hierarchy = GetHierarchy();
+ std::map<std::string, const AggregateType*> field_names;
+ for (const AggregateType* aggregate_type : hierarchy) {
+ for (const Field& field : aggregate_type->fields()) {
+ const std::string& field_name = field.name_and_type.name;
+ auto i = field_names.find(field_name);
+ if (i != field_names.end()) {
+ CurrentSourcePosition::Scope current_source_position(field.pos);
+ std::string aggregate_type_name =
+ aggregate_type->IsClassType() ? "class" : "struct";
+ if (i->second == this) {
+ ReportError(aggregate_type_name, " '", name(),
+ "' declares a field with the name '", field_name,
+ "' more than once");
+ } else {
+ ReportError(aggregate_type_name, " '", name(),
+ "' declares a field with the name '", field_name,
+ "' that masks an inherited field from class '",
+ i->second->name(), "'");
+ }
+ }
+ field_names[field_name] = aggregate_type;
+ }
+ }
+}
+
+std::vector<const AggregateType*> AggregateType::GetHierarchy() {
+ std::vector<const AggregateType*> hierarchy;
+ const AggregateType* current_container_type = this;
+ while (current_container_type != nullptr) {
+ hierarchy.push_back(current_container_type);
+ current_container_type =
+ current_container_type->IsClassType()
+ ? ClassType::cast(current_container_type)->GetSuperClass()
+ : nullptr;
+ }
+ std::reverse(hierarchy.begin(), hierarchy.end());
+ return hierarchy;
+}
+
+const Field& AggregateType::LookupField(const std::string& name) const {
+ for (auto& field : fields_) {
+ if (field.name_and_type.name == name) return field;
+ }
+ if (parent() != nullptr) {
+ if (auto parent_class = ClassType::DynamicCast(parent())) {
+ return parent_class->LookupField(name);
+ }
+ }
+ ReportError("no field ", name, "found");
+}
+
+std::string StructType::GetGeneratedTypeName() const {
+ return nspace()->ExternalName() + "::" + name();
+}
+
+std::vector<Method*> AggregateType::Methods(const std::string& name) const {
+ std::vector<Method*> result;
+ std::copy_if(methods_.begin(), methods_.end(), std::back_inserter(result),
+ [name](Macro* macro) { return macro->ReadableName() == name; });
+ return result;
+}
+
+std::vector<Method*> AggregateType::Constructors() const {
+ return Methods(kConstructMethodName);
+}
+
std::string StructType::ToExplicitString() const {
std::stringstream result;
- result << "{";
- PrintCommaSeparatedList(result, fields_);
+ result << "struct " << name() << "{";
+ PrintCommaSeparatedList(result, fields());
+ result << "}";
+ return result.str();
+}
+
+std::string ClassType::ToExplicitString() const {
+ std::stringstream result;
+ result << "class " << name() << "{";
+ PrintCommaSeparatedList(result, fields());
result << "}";
return result.str();
}
@@ -195,9 +273,16 @@ std::string StructType::ToExplicitString() const {
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << "(";
for (size_t i = 0; i < sig.parameter_types.types.size(); ++i) {
- if (i > 0) os << ", ";
+ if (i == 0 && sig.implicit_count != 0) os << "implicit ";
+ if (sig.implicit_count > 0 && sig.implicit_count == i) {
+ os << ")(";
+ } else {
+ if (i > 0) os << ", ";
+ }
if (with_names && !sig.parameter_names.empty()) {
- os << sig.parameter_names[i] << ": ";
+ if (i < sig.parameter_names.size()) {
+ os << sig.parameter_names[i] << ": ";
+ }
}
os << *sig.parameter_types.types[i];
}
@@ -213,8 +298,7 @@ void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << " labels ";
for (size_t i = 0; i < sig.labels.size(); ++i) {
if (i > 0) os << ", ";
- if (with_names) os << sig.labels[i].name;
-
+ os << sig.labels[i].name;
if (sig.labels[i].types.size() > 0) os << "(" << sig.labels[i].types << ")";
}
}
@@ -226,6 +310,14 @@ std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const Field& field) {
+ os << field.name_and_type;
+ if (field.is_weak) {
+ os << " (weak)";
+ }
+ return os;
+}
+
std::ostream& operator<<(std::ostream& os, const Signature& sig) {
PrintSignature(os, sig, true);
return os;
@@ -245,8 +337,15 @@ std::ostream& operator<<(std::ostream& os, const ParameterTypes& p) {
return os;
}
-bool Signature::HasSameTypesAs(const Signature& other) const {
- if (!(parameter_types.types == other.parameter_types.types &&
+bool Signature::HasSameTypesAs(const Signature& other,
+ ParameterMode mode) const {
+ auto compare_types = GetTypes();
+ auto other_compare_types = other.GetTypes();
+ if (mode == ParameterMode::kIgnoreImplicit) {
+ compare_types = GetExplicitTypes();
+ other_compare_types = other.GetExplicitTypes();
+ }
+ if (!(compare_types == other_compare_types &&
parameter_types.var_args == other.parameter_types.var_args &&
return_type == other.return_type)) {
return false;
@@ -269,43 +368,58 @@ bool IsAssignableFrom(const Type* to, const Type* from) {
return TypeOracle::IsImplicitlyConvertableFrom(to, from);
}
-bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
- const std::vector<Label*>& labels) {
- auto i = sig.parameter_types.types.begin();
- if (sig.parameter_types.types.size() > types.size()) return false;
- // TODO(danno): The test below is actually insufficient. The labels'
- // parameters must be checked too. ideally, the named part of
- // LabelDeclarationVector would be factored out so that the label count and
- // parameter types could be passed separately.
- if (sig.labels.size() != labels.size()) return false;
- for (auto current : types) {
- if (i == sig.parameter_types.types.end()) {
- if (!sig.parameter_types.var_args) return false;
- if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
- } else {
- if (!IsAssignableFrom(*i++, current)) return false;
- }
- }
- return true;
-}
-
bool operator<(const Type& a, const Type& b) {
return a.MangledName() < b.MangledName();
}
-VisitResult ProjectStructField(VisitResult structure,
+VisitResult ProjectStructField(const StructType* original_struct,
+ VisitResult structure,
const std::string& fieldname) {
- DCHECK(structure.IsOnStack());
BottomOffset begin = structure.stack_range().begin();
+
+ // Check constructor this super classes for fields.
const StructType* type = StructType::cast(structure.type());
- for (auto& field : type->fields()) {
- BottomOffset end = begin + LoweredSlotCount(field.type);
- if (field.name == fieldname) {
- return VisitResult(field.type, StackRange{begin, end});
+ auto& fields = type->fields();
+ for (auto& field : fields) {
+ BottomOffset end = begin + LoweredSlotCount(field.name_and_type.type);
+ if (field.name_and_type.name == fieldname) {
+ return VisitResult(field.name_and_type.type, StackRange{begin, end});
}
begin = end;
}
- UNREACHABLE();
+
+ if (fields.size() > 0 &&
+ fields[0].name_and_type.name == kConstructorStructSuperFieldName) {
+ structure = ProjectStructField(original_struct, structure,
+ kConstructorStructSuperFieldName);
+ return ProjectStructField(original_struct, structure, fieldname);
+ } else {
+ base::Optional<const ClassType*> class_type =
+ original_struct->GetDerivedFrom();
+ if (original_struct == type) {
+ if (class_type) {
+ ReportError("class '", (*class_type)->name(),
+ "' doesn't contain a field '", fieldname, "'");
+ } else {
+ ReportError("struct '", original_struct->name(),
+ "' doesn't contain a field '", fieldname, "'");
+ }
+ } else {
+ DCHECK(class_type);
+ ReportError(
+ "class '", (*class_type)->name(),
+ "' or one of its derived-from classes doesn't contain a field '",
+ fieldname, "'");
+ }
+ }
+}
+
+VisitResult ProjectStructField(VisitResult structure,
+ const std::string& fieldname) {
+ DCHECK(structure.IsOnStack());
+ DCHECK(structure.type()->IsStructType());
+ const StructType* type = StructType::cast(structure.type());
+ return ProjectStructField(type, structure, fieldname);
}
namespace {
@@ -314,8 +428,8 @@ void AppendLoweredTypes(const Type* type, std::vector<const Type*>* result) {
if (type->IsConstexpr()) return;
if (type == TypeOracle::GetVoidType()) return;
if (auto* s = StructType::DynamicCast(type)) {
- for (const NameAndType& field : s->fields()) {
- AppendLoweredTypes(field.type, result);
+ for (const Field& field : s->fields()) {
+ AppendLoweredTypes(field.name_and_type.type, result);
}
} else {
result->push_back(type);
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index e94413e4c9..6d189068f2 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -6,11 +6,13 @@
#define V8_TORQUE_TYPES_H_
#include <algorithm>
+#include <map>
#include <set>
#include <string>
#include <vector>
#include "src/base/optional.h"
+#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
namespace v8 {
@@ -25,32 +27,47 @@ static const char* const VOID_TYPE_STRING = "void";
static const char* const ARGUMENTS_TYPE_STRING = "constexpr Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
static const char* const OBJECT_TYPE_STRING = "Object";
+static const char* const SMI_TYPE_STRING = "Smi";
+static const char* const TAGGED_TYPE_STRING = "Tagged";
+static const char* const RAWPTR_TYPE_STRING = "RawPtr";
static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
-static const char* const CODE_TYPE_STRING = "Code";
+static const char* const STRING_TYPE_STRING = "String";
+static const char* const NUMBER_TYPE_STRING = "Number";
+static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
static const char* const INTPTR_TYPE_STRING = "intptr";
+static const char* const UINTPTR_TYPE_STRING = "uintptr";
+static const char* const INT32_TYPE_STRING = "int32";
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
-class Label;
+class Macro;
+class Method;
+class StructType;
+class ClassType;
class Value;
-class Module;
+class Namespace;
class TypeBase {
public:
enum class Kind {
+ kTopType,
kAbstractType,
- kFunctionPointerType,
+ kBuiltinPointerType,
kUnionType,
- kStructType
+ kStructType,
+ kClassType
};
virtual ~TypeBase() = default;
+ bool IsTopType() const { return kind() == Kind::kTopType; }
bool IsAbstractType() const { return kind() == Kind::kAbstractType; }
- bool IsFunctionPointerType() const {
- return kind() == Kind::kFunctionPointerType;
+ bool IsBuiltinPointerType() const {
+ return kind() == Kind::kBuiltinPointerType;
}
bool IsUnionType() const { return kind() == Kind::kUnionType; }
bool IsStructType() const { return kind() == Kind::kStructType; }
+ bool IsClassType() const { return kind() == Kind::kClassType; }
+ bool IsAggregateType() const { return IsStructType() || IsClassType(); }
protected:
explicit TypeBase(Kind kind) : kind_(kind) {}
@@ -96,6 +113,7 @@ class Type : public TypeBase {
virtual std::string GetGeneratedTypeName() const = 0;
virtual std::string GetGeneratedTNodeTypeName() const = 0;
virtual bool IsConstexpr() const = 0;
+ virtual bool IsTransient() const { return false; }
virtual const Type* NonConstexprVersion() const = 0;
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
@@ -118,6 +136,14 @@ class Type : public TypeBase {
using TypeVector = std::vector<const Type*>;
+inline size_t hash_value(const TypeVector& types) {
+ size_t hash = 0;
+ for (const Type* t : types) {
+ hash = base::hash_combine(hash, t);
+ }
+ return hash;
+}
+
struct NameAndType {
std::string name;
const Type* type;
@@ -125,13 +151,58 @@ struct NameAndType {
std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type);
+struct Field {
+ SourcePosition pos;
+ NameAndType name_and_type;
+ size_t offset;
+ bool is_weak;
+};
+
+std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
+
+class TopType final : public Type {
+ public:
+ DECLARE_TYPE_BOILERPLATE(TopType);
+ virtual std::string MangledName() const { return "top"; }
+ virtual std::string GetGeneratedTypeName() const { UNREACHABLE(); }
+ virtual std::string GetGeneratedTNodeTypeName() const {
+ return source_type_->GetGeneratedTNodeTypeName();
+ }
+ virtual bool IsConstexpr() const { return false; }
+ virtual const Type* NonConstexprVersion() const { return nullptr; }
+ virtual std::string ToExplicitString() const {
+ std::stringstream s;
+ s << "inaccessible " + source_type_->ToString();
+ return s.str();
+ }
+
+ const Type* source_type() const { return source_type_; }
+ const std::string reason() const { return reason_; }
+
+ private:
+ friend class TypeOracle;
+ explicit TopType(std::string reason, const Type* source_type)
+ : Type(Kind::kTopType, nullptr),
+ reason_(std::move(reason)),
+ source_type_(source_type) {}
+ std::string reason_;
+ const Type* source_type_;
+};
+
class AbstractType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(AbstractType);
const std::string& name() const { return name_; }
std::string ToExplicitString() const override { return name(); }
- std::string MangledName() const override { return "AT" + name(); }
- std::string GetGeneratedTypeName() const override { return generated_type_; }
+ std::string MangledName() const override {
+ std::string str(name());
+ std::replace(str.begin(), str.end(), ' ', '_');
+ return "AT" + str;
+ }
+ std::string GetGeneratedTypeName() const override {
+ return IsConstexpr() ? generated_type_
+ : "compiler::TNode<" + generated_type_ + ">";
+ }
std::string GetGeneratedTNodeTypeName() const override;
bool IsConstexpr() const override {
return name().substr(0, strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -144,10 +215,11 @@ class AbstractType final : public Type {
private:
friend class TypeOracle;
- AbstractType(const Type* parent, const std::string& name,
+ AbstractType(const Type* parent, bool transient, const std::string& name,
const std::string& generated_type,
base::Optional<const AbstractType*> non_constexpr_version)
: Type(Kind::kAbstractType, parent),
+ transient_(transient),
name_(name),
generated_type_(generated_type),
non_constexpr_version_(non_constexpr_version) {
@@ -155,16 +227,18 @@ class AbstractType final : public Type {
if (parent) DCHECK(parent->IsConstexpr() == IsConstexpr());
}
+ bool IsTransient() const override { return transient_; }
+
+ bool transient_;
const std::string name_;
const std::string generated_type_;
base::Optional<const AbstractType*> non_constexpr_version_;
};
-// For now, function pointers are restricted to Code objects of Torque-defined
-// builtins.
-class FunctionPointerType final : public Type {
+// For now, builtin pointers are restricted to Torque-defined builtins.
+class BuiltinPointerType final : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(FunctionPointerType);
+ DECLARE_TYPE_BOILERPLATE(BuiltinPointerType);
std::string ToExplicitString() const override;
std::string MangledName() const override;
std::string GetGeneratedTypeName() const override {
@@ -182,28 +256,31 @@ class FunctionPointerType final : public Type {
const TypeVector& parameter_types() const { return parameter_types_; }
const Type* return_type() const { return return_type_; }
- friend size_t hash_value(const FunctionPointerType& p) {
+ friend size_t hash_value(const BuiltinPointerType& p) {
size_t result = base::hash_value(p.return_type_);
for (const Type* parameter : p.parameter_types_) {
result = base::hash_combine(result, parameter);
}
return result;
}
- bool operator==(const FunctionPointerType& other) const {
+ bool operator==(const BuiltinPointerType& other) const {
return parameter_types_ == other.parameter_types_ &&
return_type_ == other.return_type_;
}
+ size_t function_pointer_type_id() const { return function_pointer_type_id_; }
private:
friend class TypeOracle;
- FunctionPointerType(const Type* parent, TypeVector parameter_types,
- const Type* return_type)
- : Type(Kind::kFunctionPointerType, parent),
+ BuiltinPointerType(const Type* parent, TypeVector parameter_types,
+ const Type* return_type, size_t function_pointer_type_id)
+ : Type(Kind::kBuiltinPointerType, parent),
parameter_types_(parameter_types),
- return_type_(return_type) {}
+ return_type_(return_type),
+ function_pointer_type_id_(function_pointer_type_id) {}
const TypeVector parameter_types_;
const Type* const return_type_;
+ const size_t function_pointer_type_id_;
};
bool operator<(const Type& a, const Type& b);
@@ -219,7 +296,7 @@ class UnionType final : public Type {
std::string ToExplicitString() const override;
std::string MangledName() const override;
std::string GetGeneratedTypeName() const override {
- return "TNode<" + GetGeneratedTNodeTypeName() + ">";
+ return "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">";
}
std::string GetGeneratedTNodeTypeName() const override;
@@ -264,6 +341,15 @@ class UnionType final : public Type {
return false;
}
+ bool IsTransient() const override {
+ for (const Type* member : types_) {
+ if (member->IsTransient()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
void Extend(const Type* t) {
if (const UnionType* union_type = UnionType::DynamicCast(t)) {
for (const Type* member : union_type->types_) {
@@ -272,11 +358,8 @@ class UnionType final : public Type {
} else {
if (t->IsSubtypeOf(this)) return;
set_parent(CommonSupertype(parent(), t));
- for (const Type* member : types_) {
- if (member->IsSubtypeOf(t)) {
- types_.erase(member);
- }
- }
+ EraseIf(&types_,
+ [&](const Type* member) { return member->IsSubtypeOf(t); });
types_.insert(t);
}
}
@@ -297,44 +380,107 @@ class UnionType final : public Type {
const Type* SubtractType(const Type* a, const Type* b);
-class StructType final : public Type {
+class AggregateType : public Type {
public:
- DECLARE_TYPE_BOILERPLATE(StructType);
- std::string ToExplicitString() const override;
+ DECLARE_TYPE_BOILERPLATE(AggregateType);
std::string MangledName() const override { return name_; }
- std::string GetGeneratedTypeName() const override { return GetStructName(); }
+ std::string GetGeneratedTypeName() const override { UNREACHABLE(); };
std::string GetGeneratedTNodeTypeName() const override { UNREACHABLE(); }
const Type* NonConstexprVersion() const override { return this; }
bool IsConstexpr() const override { return false; }
- const std::vector<NameAndType>& fields() const { return fields_; }
- const Type* GetFieldType(const std::string& fieldname) const {
- for (const NameAndType& field : fields()) {
- if (field.name == fieldname) return field.type;
- }
- std::stringstream s;
- s << "\"" << fieldname << "\" is not a field of struct type \"" << name()
- << "\"";
- ReportError(s.str());
- }
+ void SetFields(std::vector<Field> fields) { fields_ = std::move(fields); }
+ const std::vector<Field>& fields() const { return fields_; }
+ const Field& LookupField(const std::string& name) const;
const std::string& name() const { return name_; }
- Module* module() const { return module_; }
+ Namespace* nspace() const { return namespace_; }
+
+ std::string GetGeneratedMethodName(const std::string& name) const {
+ return "_method_" + name_ + "_" + name;
+ }
+
+ void RegisterMethod(Method* method) { methods_.push_back(method); }
+ std::vector<Method*> Constructors() const;
+ const std::vector<Method*>& Methods() const { return methods_; }
+ std::vector<Method*> Methods(const std::string& name) const;
+
+ std::vector<const AggregateType*> GetHierarchy();
+
+ protected:
+ AggregateType(Kind kind, const Type* parent, Namespace* nspace,
+ const std::string& name, const std::vector<Field>& fields)
+ : Type(kind, parent), namespace_(nspace), name_(name), fields_(fields) {}
+
+ void CheckForDuplicateFields();
+
+ private:
+ Namespace* namespace_;
+ std::string name_;
+ std::vector<Method*> methods_;
+ std::vector<Field> fields_;
+};
+
+class StructType final : public AggregateType {
+ public:
+ DECLARE_TYPE_BOILERPLATE(StructType);
+ std::string ToExplicitString() const override;
+ std::string GetGeneratedTypeName() const override;
+
+ void SetDerivedFrom(const ClassType* derived_from) {
+ derived_from_ = derived_from;
+ }
+ base::Optional<const ClassType*> GetDerivedFrom() const {
+ return derived_from_;
+ }
private:
friend class TypeOracle;
- StructType(Module* module, const std::string& name,
- const std::vector<NameAndType>& fields)
- : Type(Kind::kStructType, nullptr),
- module_(module),
- name_(name),
- fields_(fields) {}
+ StructType(Namespace* nspace, const std::string& name,
+ const std::vector<Field>& fields)
+ : AggregateType(Kind::kStructType, nullptr, nspace, name, fields) {
+ CheckForDuplicateFields();
+ }
- const std::string& GetStructName() const { return name_; }
+ const std::string& GetStructName() const { return name(); }
- Module* module_;
- std::string name_;
- std::vector<NameAndType> fields_;
+ base::Optional<const ClassType*> derived_from_;
+};
+
+class ClassType final : public AggregateType {
+ public:
+ DECLARE_TYPE_BOILERPLATE(ClassType);
+ std::string ToExplicitString() const override;
+ std::string GetGeneratedTypeName() const override {
+ return IsConstexpr() ? generates_ : "compiler::TNode<" + generates_ + ">";
+ }
+ std::string GetGeneratedTNodeTypeName() const override;
+ bool IsTransient() const override { return transient_; }
+ size_t size() const { return size_; }
+ StructType* struct_type() const { return this_struct_; }
+ const ClassType* GetSuperClass() const {
+ if (parent() == nullptr) return nullptr;
+ return parent()->IsClassType() ? ClassType::DynamicCast(parent()) : nullptr;
+ }
+
+ private:
+ friend class TypeOracle;
+ ClassType(const Type* parent, Namespace* nspace, const std::string& name,
+ bool transient, const std::string& generates,
+ const std::vector<Field>& fields, StructType* this_struct,
+ size_t size)
+ : AggregateType(Kind::kClassType, parent, nspace, name, fields),
+ this_struct_(this_struct),
+ transient_(transient),
+ size_(size),
+ generates_(generates) {
+ CheckForDuplicateFields();
+ }
+
+ StructType* this_struct_;
+ bool transient_;
+ size_t size_;
+ const std::string generates_;
};
inline std::ostream& operator<<(std::ostream& os, const Type& t) {
@@ -412,26 +558,43 @@ struct ParameterTypes {
std::ostream& operator<<(std::ostream& os, const ParameterTypes& parameters);
+enum class ParameterMode { kProcessImplicit, kIgnoreImplicit };
+
struct Signature {
+ Signature(NameVector n, base::Optional<std::string> arguments_variable,
+ ParameterTypes p, size_t i, const Type* r, LabelDeclarationVector l)
+ : parameter_names(std::move(n)),
+ arguments_variable(arguments_variable),
+ parameter_types(std::move(p)),
+ implicit_count(i),
+ return_type(r),
+ labels(std::move(l)) {}
+ Signature() : implicit_count(0), return_type(nullptr) {}
const TypeVector& types() const { return parameter_types.types; }
NameVector parameter_names;
+ base::Optional<std::string> arguments_variable;
ParameterTypes parameter_types;
+ size_t implicit_count;
const Type* return_type;
LabelDeclarationVector labels;
- bool HasSameTypesAs(const Signature& other) const;
-};
-
-struct Arguments {
- VisitResultVector parameters;
- std::vector<Label*> labels;
+ bool HasSameTypesAs(
+ const Signature& other,
+ ParameterMode mode = ParameterMode::kProcessImplicit) const;
+ const TypeVector& GetTypes() const { return parameter_types.types; }
+ TypeVector GetImplicitTypes() const {
+ return TypeVector(parameter_types.types.begin(),
+ parameter_types.types.begin() + implicit_count);
+ }
+ TypeVector GetExplicitTypes() const {
+ return TypeVector(parameter_types.types.begin() + implicit_count,
+ parameter_types.types.end());
+ }
};
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names);
std::ostream& operator<<(std::ostream& os, const Signature& sig);
bool IsAssignableFrom(const Type* to, const Type* from);
-bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
- const std::vector<Label*>& labels);
TypeVector LowerType(const Type* type);
size_t LoweredSlotCount(const Type* type);
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index b39ea288e0..b1fdb2b913 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -49,7 +49,7 @@ std::string StringLiteralUnquote(const std::string& s) {
std::string StringLiteralQuote(const std::string& s) {
std::stringstream result;
result << '"';
- for (size_t i = 0; i < s.length() - 1; ++i) {
+ for (size_t i = 0; i < s.length(); ++i) {
switch (s[i]) {
case '\n':
result << "\\n";
@@ -109,26 +109,27 @@ bool ContainsUpperCase(const std::string& s) {
return std::any_of(s.begin(), s.end(), [](char c) { return isupper(c); });
}
-// Torque has some module constants that are used like language level
+// Torque has some namespace constants that are used like language level
// keywords, e.g.: 'True', 'Undefined', etc.
// These do not need to follow the default naming convention for constants.
bool IsKeywordLikeName(const std::string& s) {
- static const std::vector<std::string> keyword_like_constants{
- "True", "False", "Hole", "Null", "Undefined"};
+ static const char* const keyword_like_constants[]{"True", "False", "Hole",
+ "Null", "Undefined"};
- return std::find(keyword_like_constants.begin(), keyword_like_constants.end(),
- s) != keyword_like_constants.end();
+ return std::find(std::begin(keyword_like_constants),
+ std::end(keyword_like_constants),
+ s) != std::end(keyword_like_constants);
}
// Untagged/MachineTypes like 'int32', 'intptr' etc. follow a 'all-lowercase'
// naming convention and are those exempt from the normal type convention.
bool IsMachineType(const std::string& s) {
- static const std::vector<std::string> machine_types{
- "void", "never", "int32", "uint32", "int64", "intptr",
- "uintptr", "float32", "float64", "bool", "string", "int31"};
+ static const char* const machine_types[]{
+ "void", "never", "int32", "uint32", "int64", "intptr", "uintptr",
+ "float32", "float64", "bool", "string", "bint", "int31"};
- return std::find(machine_types.begin(), machine_types.end(), s) !=
- machine_types.end();
+ return std::find(std::begin(machine_types), std::end(machine_types), s) !=
+ std::end(machine_types);
}
} // namespace
@@ -148,7 +149,7 @@ bool IsSnakeCase(const std::string& s) {
return !ContainsUpperCase(s);
}
-bool IsValidModuleConstName(const std::string& s) {
+bool IsValidNamespaceConstName(const std::string& s) {
if (s.empty()) return false;
if (IsKeywordLikeName(s)) return true;
@@ -162,6 +163,19 @@ bool IsValidTypeName(const std::string& s) {
return IsUpperCamelCase(s);
}
+std::string CapifyStringWithUnderscores(const std::string& camellified_string) {
+ std::string result;
+ bool previousWasLower = false;
+ for (auto current : camellified_string) {
+ if (previousWasLower && isupper(current)) {
+ result += "_";
+ }
+ result += toupper(current);
+ previousWasLower = (islower(current));
+ }
+ return result;
+}
+
std::string CamelifyString(const std::string& underscore_string) {
std::string result;
bool word_beginning = true;
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 16e3b03ed4..ca90a78522 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -42,7 +42,7 @@ void NamingConventionError(const std::string& type, const std::string& name,
bool IsLowerCamelCase(const std::string& s);
bool IsUpperCamelCase(const std::string& s);
bool IsSnakeCase(const std::string& s);
-bool IsValidModuleConstName(const std::string& s);
+bool IsValidNamespaceConstName(const std::string& s);
bool IsValidTypeName(const std::string& s);
[[noreturn]] void ReportErrorString(const std::string& error);
@@ -53,6 +53,7 @@ template <class... Args>
ReportErrorString(s.str());
}
+std::string CapifyStringWithUnderscores(const std::string& camellified_string);
std::string CamelifyString(const std::string& underscore_string);
std::string DashifyString(const std::string& underscore_string);
@@ -167,6 +168,10 @@ class StackRange {
BottomOffset end_;
};
+inline std::ostream& operator<<(std::ostream& out, StackRange range) {
+ return out << "StackRange{" << range.begin() << ", " << range.end() << "}";
+}
+
template <class T>
class Stack {
public:
@@ -214,9 +219,9 @@ class Stack {
// Delete the slots in {range}, moving higher slots to fill the gap.
void DeleteRange(StackRange range) {
DCHECK_LE(range.end(), AboveTop());
- for (BottomOffset i = range.begin();
- i < std::min(range.end(), AboveTop() - range.Size()); ++i) {
- elements_[i.offset] = std::move(elements_[i.offset + range.Size()]);
+ if (range.Size() == 0) return;
+ for (BottomOffset i = range.end(); i < AboveTop(); ++i) {
+ elements_[i.offset - range.Size()] = std::move(elements_[i.offset]);
}
elements_.resize(elements_.size() - range.Size());
}
@@ -243,6 +248,13 @@ T* CheckNotNull(T* x) {
return x;
}
+template <class T>
+inline std::ostream& operator<<(std::ostream& os, Stack<T>& t) {
+ os << "Stack{";
+ PrintCommaSeparatedList(os, t);
+ os << "}";
+ return os;
+}
class ToString {
public:
template <class T>
@@ -256,6 +268,27 @@ class ToString {
std::stringstream s_;
};
+constexpr int kTaggedSize = sizeof(void*);
+
+static const char* const kConstructMethodName = "constructor";
+static const char* const kSuperMethodName = "super";
+static const char* const kConstructorStructSuperFieldName = "_super";
+static const char* const kClassConstructorThisStructPrefix = "_ThisStruct";
+
+// Erase elements of a container that has a constant-time erase function, like
+// std::set or std::list. Calling this on std::vector would have quadratic
+// complexity.
+template <class Container, class F>
+void EraseIf(Container* container, F f) {
+ for (auto it = container->begin(); it != container->end();) {
+ if (f(*it)) {
+ it = container->erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
new file mode 100644
index 0000000000..87c96616bc
--- /dev/null
+++ b/deps/v8/src/tracing/OWNERS
@@ -0,0 +1 @@
+alph@chromium.org
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index de9382e65b..d019b3b5b4 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -26,16 +26,24 @@ const bool kStackTypeArray = true;
void EscapeAndAppendString(const char* value, std::string* result) {
*result += '"';
- char number_buffer[10];
while (*value) {
- char c = *value++;
+ unsigned char c = *value++;
switch (c) {
- case '\t':
- *result += "\\t";
+ case '\b':
+ *result += "\\b";
+ break;
+ case '\f':
+ *result += "\\f";
break;
case '\n':
*result += "\\n";
break;
+ case '\r':
+ *result += "\\r";
+ break;
+ case '\t':
+ *result += "\\t";
+ break;
case '\"':
*result += "\\\"";
break;
@@ -43,10 +51,10 @@ void EscapeAndAppendString(const char* value, std::string* result) {
*result += "\\\\";
break;
default:
- if (c < '\x20') {
- base::OS::SNPrintF(
- number_buffer, arraysize(number_buffer), "\\u%04X",
- static_cast<unsigned>(static_cast<unsigned char>(c)));
+ if (c < '\x20' || c == '\x7F') {
+ char number_buffer[8];
+ base::OS::SNPrintF(number_buffer, arraysize(number_buffer), "\\u%04X",
+ static_cast<unsigned>(c));
*result += number_buffer;
} else {
*result += c;
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 28c107d88f..7fb8ee329a 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -19,11 +19,6 @@ void TracingCategoryObserver::SetUp() {
v8::internal::V8::GetCurrentPlatform()
->GetTracingController()
->AddTraceStateObserver(TracingCategoryObserver::instance_);
- TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"));
- TRACE_EVENT_WARMUP_CATEGORY(
- TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
- TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"));
- TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"));
}
void TracingCategoryObserver::TearDown() {
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 072e15318b..cd1bd9d654 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -10,6 +10,8 @@
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/slots.h"
+#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,58 +19,60 @@
namespace v8 {
namespace internal {
-TransitionArray* TransitionsAccessor::transitions() {
+TransitionArray TransitionsAccessor::transitions() {
DCHECK_EQ(kFullTransitionArray, encoding());
return TransitionArray::cast(raw_transitions_->GetHeapObjectAssumeStrong());
}
+OBJECT_CONSTRUCTORS_IMPL(TransitionArray, WeakFixedArray)
+
CAST_ACCESSOR(TransitionArray)
bool TransitionArray::HasPrototypeTransitions() {
- return Get(kPrototypeTransitionsIndex) != MaybeObject::FromSmi(Smi::kZero);
+ return Get(kPrototypeTransitionsIndex) != MaybeObject::FromSmi(Smi::zero());
}
-WeakFixedArray* TransitionArray::GetPrototypeTransitions() {
+WeakFixedArray TransitionArray::GetPrototypeTransitions() {
DCHECK(HasPrototypeTransitions()); // Callers must check first.
- Object* prototype_transitions =
+ Object prototype_transitions =
Get(kPrototypeTransitionsIndex)->GetHeapObjectAssumeStrong();
return WeakFixedArray::cast(prototype_transitions);
}
-HeapObjectReference** TransitionArray::GetKeySlot(int transition_number) {
+HeapObjectSlot TransitionArray::GetKeySlot(int transition_number) {
DCHECK(transition_number < number_of_transitions());
- return reinterpret_cast<HeapObjectReference**>(
- RawFieldOfElementAt(ToKeyIndex(transition_number)));
+ return HeapObjectSlot(RawFieldOfElementAt(ToKeyIndex(transition_number)));
}
-void TransitionArray::SetPrototypeTransitions(WeakFixedArray* transitions) {
+void TransitionArray::SetPrototypeTransitions(WeakFixedArray transitions) {
DCHECK(transitions->IsWeakFixedArray());
WeakFixedArray::Set(kPrototypeTransitionsIndex,
HeapObjectReference::Strong(transitions));
}
int TransitionArray::NumberOfPrototypeTransitions(
- WeakFixedArray* proto_transitions) {
+ WeakFixedArray proto_transitions) {
if (proto_transitions->length() == 0) return 0;
- MaybeObject* raw =
+ MaybeObject raw =
proto_transitions->Get(kProtoTransitionNumberOfEntriesOffset);
- return Smi::ToInt(raw->cast<Smi>());
+ return raw.ToSmi().value();
}
-Name* TransitionArray::GetKey(int transition_number) {
+Name TransitionArray::GetKey(int transition_number) {
DCHECK(transition_number < number_of_transitions());
return Name::cast(
Get(ToKeyIndex(transition_number))->GetHeapObjectAssumeStrong());
}
-Name* TransitionsAccessor::GetKey(int transition_number) {
+Name TransitionsAccessor::GetKey(int transition_number) {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
UNREACHABLE();
- return nullptr;
+ return Name();
case kWeakRef: {
- Map* map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ Map map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
return GetSimpleTransitionKey(map);
}
case kFullTransitionArray:
@@ -77,49 +81,60 @@ Name* TransitionsAccessor::GetKey(int transition_number) {
UNREACHABLE();
}
-void TransitionArray::SetKey(int transition_number, Name* key) {
+void TransitionArray::SetKey(int transition_number, Name key) {
DCHECK(transition_number < number_of_transitions());
WeakFixedArray::Set(ToKeyIndex(transition_number),
HeapObjectReference::Strong(key));
}
-HeapObjectReference** TransitionArray::GetTargetSlot(int transition_number) {
+HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
DCHECK(transition_number < number_of_transitions());
- return reinterpret_cast<HeapObjectReference**>(
- RawFieldOfElementAt(ToTargetIndex(transition_number)));
+ return HeapObjectSlot(RawFieldOfElementAt(ToTargetIndex(transition_number)));
}
// static
-PropertyDetails TransitionsAccessor::GetTargetDetails(Name* name, Map* target) {
+PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
DCHECK(!IsSpecialTransition(name->GetReadOnlyRoots(), name));
int descriptor = target->LastAdded();
- DescriptorArray* descriptors = target->instance_descriptors();
+ DescriptorArray descriptors = target->instance_descriptors();
// Transitions are allowed only for the last added property.
DCHECK(descriptors->GetKey(descriptor)->Equals(name));
return descriptors->GetDetails(descriptor);
}
// static
-Map* TransitionsAccessor::GetTargetFromRaw(MaybeObject* raw) {
+PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
+ return transition->GetLastDescriptorDetails();
+}
+
+// static
+Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
+ int descriptor = transition->LastAdded();
+ return transition->instance_descriptors()->GetKey(descriptor);
+}
+
+// static
+Map TransitionsAccessor::GetTargetFromRaw(MaybeObject raw) {
return Map::cast(raw->GetHeapObjectAssumeWeak());
}
-MaybeObject* TransitionArray::GetRawTarget(int transition_number) {
+MaybeObject TransitionArray::GetRawTarget(int transition_number) {
DCHECK(transition_number < number_of_transitions());
return Get(ToTargetIndex(transition_number));
}
-Map* TransitionArray::GetTarget(int transition_number) {
- MaybeObject* raw = GetRawTarget(transition_number);
+Map TransitionArray::GetTarget(int transition_number) {
+ MaybeObject raw = GetRawTarget(transition_number);
return TransitionsAccessor::GetTargetFromRaw(raw);
}
-Map* TransitionsAccessor::GetTarget(int transition_number) {
+Map TransitionsAccessor::GetTarget(int transition_number) {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
UNREACHABLE();
- return nullptr;
+ return Map();
case kWeakRef:
return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
case kFullTransitionArray:
@@ -128,7 +143,7 @@ Map* TransitionsAccessor::GetTarget(int transition_number) {
UNREACHABLE();
}
-void TransitionArray::SetRawTarget(int transition_number, MaybeObject* value) {
+void TransitionArray::SetRawTarget(int transition_number, MaybeObject value) {
DCHECK(transition_number < number_of_transitions());
DCHECK(value->IsWeak());
DCHECK(value->GetHeapObjectAssumeWeak()->IsMap());
@@ -136,9 +151,9 @@ void TransitionArray::SetRawTarget(int transition_number, MaybeObject* value) {
}
bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
- Map** target) {
- MaybeObject* raw = GetRawTarget(transition_number);
- HeapObject* heap_object;
+ Map* target) {
+ MaybeObject raw = GetRawTarget(transition_number);
+ HeapObject heap_object;
if (raw->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)) {
return false;
@@ -147,7 +162,15 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
return true;
}
-int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
+int TransitionArray::SearchNameForTesting(Name name, int* out_insertion_index) {
+ return SearchName(name, out_insertion_index);
+}
+
+int TransitionArray::SearchSpecial(Symbol symbol, int* out_insertion_index) {
+ return SearchName(symbol, out_insertion_index);
+}
+
+int TransitionArray::SearchName(Name name, int* out_insertion_index) {
DCHECK(name->IsUniqueName());
return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
out_insertion_index);
@@ -155,11 +178,11 @@ int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
int TransitionArray::number_of_transitions() const {
if (length() < kFirstIndex) return 0;
- return Smi::ToInt(Get(kTransitionLengthIndex)->cast<Smi>());
+ return Get(kTransitionLengthIndex).ToSmi().value();
}
-int TransitionArray::CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
- PropertyAttributes attributes1, Name* key2,
+int TransitionArray::CompareKeys(Name key1, uint32_t hash1, PropertyKind kind1,
+ PropertyAttributes attributes1, Name key2,
uint32_t hash2, PropertyKind kind2,
PropertyAttributes attributes2) {
int cmp = CompareNames(key1, hash1, key2, hash2);
@@ -168,7 +191,7 @@ int TransitionArray::CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
return CompareDetails(kind1, attributes1, kind2, attributes2);
}
-int TransitionArray::CompareNames(Name* key1, uint32_t hash1, Name* key2,
+int TransitionArray::CompareNames(Name key1, uint32_t hash1, Name key2,
uint32_t hash2) {
if (key1 != key2) {
// In case of hash collisions key1 is always "less" than key2.
@@ -194,13 +217,20 @@ int TransitionArray::CompareDetails(PropertyKind kind1,
return 0;
}
-void TransitionArray::Set(int transition_number, Name* key,
- MaybeObject* target) {
+void TransitionArray::Set(int transition_number, Name key, MaybeObject target) {
WeakFixedArray::Set(ToKeyIndex(transition_number),
MaybeObject::FromObject(key));
WeakFixedArray::Set(ToTargetIndex(transition_number), target);
}
+Name TransitionArray::GetSortedKey(int transition_number) {
+ return GetKey(transition_number);
+}
+
+int TransitionArray::number_of_entries() const {
+ return number_of_transitions();
+}
+
int TransitionArray::Capacity() {
if (length() <= kFirstIndex) return 0;
return (length() - kFirstIndex) / kEntrySize;
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 6c55f53b03..3dbc1602fa 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -13,7 +13,7 @@ namespace internal {
void TransitionsAccessor::Initialize() {
raw_transitions_ = map_->raw_transitions();
- HeapObject* heap_object;
+ HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
} else if (raw_transitions_->IsWeak()) {
@@ -21,9 +21,12 @@ void TransitionsAccessor::Initialize() {
} else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
if (heap_object->IsTransitionArray()) {
encoding_ = kFullTransitionArray;
- } else {
- DCHECK(heap_object->IsPrototypeInfo());
+ } else if (heap_object->IsPrototypeInfo()) {
encoding_ = kPrototypeInfo;
+ } else {
+ DCHECK(map_->is_deprecated());
+ DCHECK(heap_object->IsMap());
+ encoding_ = kMigrationTarget;
}
} else {
UNREACHABLE();
@@ -33,21 +36,22 @@ void TransitionsAccessor::Initialize() {
#endif
}
-Map* TransitionsAccessor::GetSimpleTransition() {
+Map TransitionsAccessor::GetSimpleTransition() {
switch (encoding()) {
case kWeakRef:
return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
default:
- return nullptr;
+ return Map();
}
}
-bool TransitionsAccessor::HasSimpleTransitionTo(Map* map) {
+bool TransitionsAccessor::HasSimpleTransitionTo(Map map) {
switch (encoding()) {
case kWeakRef:
return raw_transitions_->GetHeapObjectAssumeWeak() == map;
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
case kFullTransitionArray:
return false;
}
@@ -60,7 +64,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
target->SetBackPointer(map_);
// If the map doesn't have any transitions at all yet, install the new one.
- if (encoding() == kUninitialized) {
+ if (encoding() == kUninitialized || encoding() == kMigrationTarget) {
if (flag == SIMPLE_PROPERTY_TRANSITION) {
ReplaceTransitions(HeapObjectReference::Weak(*target));
return;
@@ -74,9 +78,9 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
bool is_special_transition = flag == SPECIAL_TRANSITION;
// If the map has a simple transition, check if it should be overwritten.
- Map* simple_transition = GetSimpleTransition();
- if (simple_transition != nullptr) {
- Name* key = GetSimpleTransitionKey(simple_transition);
+ Map simple_transition = GetSimpleTransition();
+ if (!simple_transition.is_null()) {
+ Name key = GetSimpleTransitionKey(simple_transition);
PropertyDetails old_details = GetSimpleTargetDetails(simple_transition);
PropertyDetails new_details = is_special_transition
? PropertyDetails::Empty()
@@ -94,7 +98,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// Reload state; allocations might have caused it to be cleared.
Reload();
simple_transition = GetSimpleTransition();
- if (simple_transition != nullptr) {
+ if (!simple_transition.is_null()) {
DCHECK_EQ(*map, simple_transition);
if (encoding_ == kWeakRef) {
result->Set(0, GetSimpleTransitionKey(simple_transition),
@@ -123,7 +127,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
{
DisallowHeapAllocation no_gc;
- TransitionArray* array = transitions();
+ TransitionArray array = transitions();
number_of_transitions = array->number_of_transitions();
new_nof = number_of_transitions;
@@ -166,7 +170,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
// result copy if needed, and recompute variables.
Reload();
DisallowHeapAllocation no_gc;
- TransitionArray* array = transitions();
+ TransitionArray array = transitions();
if (array->number_of_transitions() != number_of_transitions) {
DCHECK(array->number_of_transitions() < number_of_transitions);
@@ -206,36 +210,35 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
ReplaceTransitions(MaybeObject::FromObject(*result));
}
-Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
- PropertyAttributes attributes) {
+Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
+ PropertyAttributes attributes) {
DCHECK(name->IsUniqueName());
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
- return nullptr;
+ case kMigrationTarget:
+ return Map();
case kWeakRef: {
- Map* map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
- if (!IsMatchingMap(map, name, kind, attributes)) return nullptr;
+ Map map = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ if (!IsMatchingMap(map, name, kind, attributes)) return Map();
return map;
}
case kFullTransitionArray: {
- int transition = transitions()->Search(kind, name, attributes);
- if (transition == kNotFound) return nullptr;
- return transitions()->GetTarget(transition);
+ return transitions()->SearchAndGetTarget(kind, name, attributes);
}
}
UNREACHABLE();
}
-Map* TransitionsAccessor::SearchSpecial(Symbol* name) {
- if (encoding() != kFullTransitionArray) return nullptr;
+Map TransitionsAccessor::SearchSpecial(Symbol name) {
+ if (encoding() != kFullTransitionArray) return Map();
int transition = transitions()->SearchSpecial(name);
- if (transition == kNotFound) return nullptr;
+ if (transition == kNotFound) return Map();
return transitions()->GetTarget(transition);
}
// static
-bool TransitionsAccessor::IsSpecialTransition(ReadOnlyRoots roots, Name* name) {
+bool TransitionsAccessor::IsSpecialTransition(ReadOnlyRoots roots, Name name) {
if (!name->IsSymbol()) return false;
return name == roots.nonextensible_symbol() ||
name == roots.sealed_symbol() || name == roots.frozen_symbol() ||
@@ -248,8 +251,8 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
DCHECK(name->IsUniqueName());
DisallowHeapAllocation no_gc;
PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
- Map* target = SearchTransition(*name, kData, attributes);
- if (target == nullptr) return MaybeHandle<Map>();
+ Map target = SearchTransition(*name, kData, attributes);
+ if (target.is_null()) return MaybeHandle<Map>();
PropertyDetails details = target->GetLastDescriptorDetails();
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
@@ -264,15 +267,16 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
case kFullTransitionArray:
return Handle<String>::null();
case kWeakRef: {
- Map* target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
PropertyDetails details = GetSimpleTargetDetails(target);
if (details.location() != kField) return Handle<String>::null();
DCHECK_EQ(kData, details.kind());
if (details.attributes() != NONE) return Handle<String>::null();
- Name* name = GetSimpleTransitionKey(target);
+ Name name = GetSimpleTransitionKey(target);
if (!name->IsString()) return Handle<String>::null();
return handle(String::cast(name), isolate_);
}
@@ -294,20 +298,20 @@ bool TransitionsAccessor::CanHaveMoreTransitions() {
}
// static
-bool TransitionsAccessor::IsMatchingMap(Map* target, Name* name,
+bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
PropertyKind kind,
PropertyAttributes attributes) {
int descriptor = target->LastAdded();
- DescriptorArray* descriptors = target->instance_descriptors();
- Name* key = descriptors->GetKey(descriptor);
+ DescriptorArray descriptors = target->instance_descriptors();
+ Name key = descriptors->GetKey(descriptor);
if (key != name) return false;
- PropertyDetails details = descriptors->GetDetails(descriptor);
- return (details.kind() == kind && details.attributes() == attributes);
+ return descriptors->GetDetails(descriptor)
+ .HasKindAndAttributes(kind, attributes);
}
// static
bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
- WeakFixedArray* array) {
+ WeakFixedArray array) {
const int header = kProtoTransitionHeaderSize;
int number_of_transitions = NumberOfPrototypeTransitions(array);
if (number_of_transitions == 0) {
@@ -316,7 +320,7 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
}
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
- MaybeObject* target = array->Get(header + i);
+ MaybeObject target = array->Get(header + i);
DCHECK(target->IsCleared() ||
(target->IsWeak() && target->GetHeapObject()->IsMap()));
if (!target->IsCleared()) {
@@ -327,7 +331,7 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
}
}
// Fill slots that became free with undefined value.
- MaybeObject* undefined =
+ MaybeObject undefined =
MaybeObject::FromObject(*isolate->factory()->undefined_value());
for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
array->Set(header + i, undefined);
@@ -338,7 +342,6 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
return new_number_of_transitions < number_of_transitions;
}
-
// static
Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate) {
@@ -393,15 +396,15 @@ void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
Handle<Map> TransitionsAccessor::GetPrototypeTransition(
Handle<Object> prototype) {
DisallowHeapAllocation no_gc;
- WeakFixedArray* cache = GetPrototypeTransitions();
+ WeakFixedArray cache = GetPrototypeTransitions();
int length = TransitionArray::NumberOfPrototypeTransitions(cache);
for (int i = 0; i < length; i++) {
- MaybeObject* target =
+ MaybeObject target =
cache->Get(TransitionArray::kProtoTransitionHeaderSize + i);
DCHECK(target->IsWeakOrCleared());
- HeapObject* heap_object;
+ HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
if (map->prototype() == *prototype) {
return handle(map, isolate_);
}
@@ -410,7 +413,7 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
return Handle<Map>();
}
-WeakFixedArray* TransitionsAccessor::GetPrototypeTransitions() {
+WeakFixedArray TransitionsAccessor::GetPrototypeTransitions() {
if (encoding() != kFullTransitionArray ||
!transitions()->HasPrototypeTransitions()) {
return ReadOnlyRoots(isolate_).empty_weak_fixed_array();
@@ -420,7 +423,7 @@ WeakFixedArray* TransitionsAccessor::GetPrototypeTransitions() {
// static
void TransitionArray::SetNumberOfPrototypeTransitions(
- WeakFixedArray* proto_transitions, int value) {
+ WeakFixedArray proto_transitions, int value) {
DCHECK_NE(proto_transitions->length(), 0);
proto_transitions->Set(kProtoTransitionNumberOfEntriesOffset,
MaybeObject::FromSmi(Smi::FromInt(value)));
@@ -430,6 +433,7 @@ int TransitionsAccessor::NumberOfTransitions() {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
return 0;
case kWeakRef:
return 1;
@@ -440,17 +444,32 @@ int TransitionsAccessor::NumberOfTransitions() {
return 0; // Make GCC happy.
}
+void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
+ // We only cache the migration target for maps with empty transitions for GC's
+ // sake.
+ if (encoding() != kUninitialized) return;
+ DCHECK(map_->is_deprecated());
+ map_->set_raw_transitions(MaybeObject::FromObject(migration_target));
+ MarkNeedsReload();
+}
+
+Map TransitionsAccessor::GetMigrationTarget() {
+ if (encoding() == kMigrationTarget) {
+ return map_->raw_transitions()->cast<Map>();
+ }
+ return Map();
+}
+
void TransitionArray::Zap(Isolate* isolate) {
- MemsetPointer(
- data_start() + kPrototypeTransitionsIndex,
- MaybeObject::FromObject(ReadOnlyRoots(isolate).the_hole_value()),
- length() - kPrototypeTransitionsIndex);
+ MemsetTagged(ObjectSlot(RawFieldOfElementAt(kPrototypeTransitionsIndex)),
+ ReadOnlyRoots(isolate).the_hole_value(),
+ length() - kPrototypeTransitionsIndex);
SetNumberOfTransitions(0);
}
-void TransitionsAccessor::ReplaceTransitions(MaybeObject* new_transitions) {
+void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
if (encoding() == kFullTransitionArray) {
- TransitionArray* old_transitions = transitions();
+ TransitionArray old_transitions = transitions();
#if DEBUG
CheckNewTransitionsAreConsistent(
old_transitions, new_transitions->GetHeapObjectAssumeStrong());
@@ -474,7 +493,8 @@ void TransitionsAccessor::SetPrototypeTransitions(
void TransitionsAccessor::EnsureHasFullTransitionArray() {
if (encoding() == kFullTransitionArray) return;
- int nof = encoding() == kUninitialized ? 0 : 1;
+ int nof =
+ (encoding() == kUninitialized || encoding() == kMigrationTarget) ? 0 : 1;
Handle<TransitionArray> result = isolate_->factory()->NewTransitionArray(nof);
Reload(); // Reload after possible GC.
if (nof == 1) {
@@ -484,7 +504,7 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
} else {
// Otherwise populate the new array.
Handle<Map> target(GetSimpleTransition(), isolate_);
- Name* key = GetSimpleTransitionKey(*target);
+ Name key = GetSimpleTransitionKey(*target);
result->Set(0, key, HeapObjectReference::Weak(*target));
}
}
@@ -497,9 +517,10 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
+ case kMigrationTarget:
break;
case kWeakRef: {
- Map* simple_target =
+ Map simple_target =
Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
TransitionsAccessor(isolate_, simple_target, no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
@@ -507,12 +528,12 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
}
case kFullTransitionArray: {
if (transitions()->HasPrototypeTransitions()) {
- WeakFixedArray* proto_trans = transitions()->GetPrototypeTransitions();
+ WeakFixedArray proto_trans = transitions()->GetPrototypeTransitions();
int length = TransitionArray::NumberOfPrototypeTransitions(proto_trans);
for (int i = 0; i < length; ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
- MaybeObject* target = proto_trans->Get(index);
- HeapObject* heap_object;
+ MaybeObject target = proto_trans->Get(index);
+ HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
@@ -533,14 +554,14 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
#ifdef DEBUG
void TransitionsAccessor::CheckNewTransitionsAreConsistent(
- TransitionArray* old_transitions, Object* transitions) {
+ TransitionArray old_transitions, Object transitions) {
// This function only handles full transition arrays.
DCHECK_EQ(kFullTransitionArray, encoding());
- TransitionArray* new_transitions = TransitionArray::cast(transitions);
+ TransitionArray new_transitions = TransitionArray::cast(transitions);
for (int i = 0; i < old_transitions->number_of_transitions(); i++) {
- Map* target = old_transitions->GetTarget(i);
+ Map target = old_transitions->GetTarget(i);
if (target->instance_descriptors() == map_->instance_descriptors()) {
- Name* key = old_transitions->GetKey(i);
+ Name key = old_transitions->GetKey(i);
int new_target_index;
if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
new_target_index = new_transitions->SearchSpecial(Symbol::cast(key));
@@ -563,10 +584,10 @@ int TransitionArray::SearchDetails(int transition, PropertyKind kind,
int* out_insertion_index) {
int nof_transitions = number_of_transitions();
DCHECK(transition < nof_transitions);
- Name* key = GetKey(transition);
+ Name key = GetKey(transition);
for (; transition < nof_transitions && GetKey(transition) == key;
transition++) {
- Map* target = GetTarget(transition);
+ Map target = GetTarget(transition);
PropertyDetails target_details =
TransitionsAccessor::GetTargetDetails(key, target);
@@ -582,7 +603,30 @@ int TransitionArray::SearchDetails(int transition, PropertyKind kind,
return kNotFound;
}
-int TransitionArray::Search(PropertyKind kind, Name* name,
+Map TransitionArray::SearchDetailsAndGetTarget(int transition,
+ PropertyKind kind,
+ PropertyAttributes attributes) {
+ int nof_transitions = number_of_transitions();
+ DCHECK(transition < nof_transitions);
+ Name key = GetKey(transition);
+ for (; transition < nof_transitions && GetKey(transition) == key;
+ transition++) {
+ Map target = GetTarget(transition);
+ PropertyDetails target_details =
+ TransitionsAccessor::GetTargetDetails(key, target);
+
+ int cmp = CompareDetails(kind, attributes, target_details.kind(),
+ target_details.attributes());
+ if (cmp == 0) {
+ return target;
+ } else if (cmp < 0) {
+ break;
+ }
+ }
+ return Map();
+}
+
+int TransitionArray::Search(PropertyKind kind, Name name,
PropertyAttributes attributes,
int* out_insertion_index) {
int transition = SearchName(name, out_insertion_index);
@@ -590,18 +634,27 @@ int TransitionArray::Search(PropertyKind kind, Name* name,
return SearchDetails(transition, kind, attributes, out_insertion_index);
}
+Map TransitionArray::SearchAndGetTarget(PropertyKind kind, Name name,
+ PropertyAttributes attributes) {
+ int transition = SearchName(name, nullptr);
+ if (transition == kNotFound) {
+ return Map();
+ }
+ return SearchDetailsAndGetTarget(transition, kind, attributes);
+}
+
void TransitionArray::Sort() {
DisallowHeapAllocation no_gc;
// In-place insertion sort.
int length = number_of_transitions();
ReadOnlyRoots roots = GetReadOnlyRoots();
for (int i = 1; i < length; i++) {
- Name* key = GetKey(i);
- MaybeObject* target = GetRawTarget(i);
+ Name key = GetKey(i);
+ MaybeObject target = GetRawTarget(i);
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(roots, key)) {
- Map* target_map = TransitionsAccessor::GetTargetFromRaw(target);
+ Map target_map = TransitionsAccessor::GetTargetFromRaw(target);
PropertyDetails details =
TransitionsAccessor::GetTargetDetails(key, target_map);
kind = details.kind();
@@ -609,12 +662,12 @@ void TransitionArray::Sort() {
}
int j;
for (j = i - 1; j >= 0; j--) {
- Name* temp_key = GetKey(j);
- MaybeObject* temp_target = GetRawTarget(j);
+ Name temp_key = GetKey(j);
+ MaybeObject temp_target = GetRawTarget(j);
PropertyKind temp_kind = kData;
PropertyAttributes temp_attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(roots, temp_key)) {
- Map* temp_target_map =
+ Map temp_target_map =
TransitionsAccessor::GetTargetFromRaw(temp_target);
PropertyDetails details =
TransitionsAccessor::GetTargetDetails(temp_key, temp_target_map);
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 9684815239..58268907e3 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -10,6 +10,7 @@
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/map.h"
+#include "src/objects/maybe-object.h"
#include "src/objects/name.h"
// Has to be the last include (doesn't have include guards):
@@ -37,7 +38,7 @@ namespace internal {
// cleared when the map they refer to is not otherwise reachable.
class TransitionsAccessor {
public:
- TransitionsAccessor(Isolate* isolate, Map* map, DisallowHeapAllocation* no_gc)
+ TransitionsAccessor(Isolate* isolate, Map map, DisallowHeapAllocation* no_gc)
: isolate_(isolate), map_(map) {
Initialize();
USE(no_gc);
@@ -53,13 +54,13 @@ class TransitionsAccessor {
// This TransitionsAccessor instance is unusable after this operation.
void Insert(Handle<Name> name, Handle<Map> target, SimpleTransitionFlag flag);
- Map* SearchTransition(Name* name, PropertyKind kind,
- PropertyAttributes attributes);
+ Map SearchTransition(Name name, PropertyKind kind,
+ PropertyAttributes attributes);
- Map* SearchSpecial(Symbol* name);
+ Map SearchSpecial(Symbol name);
// Returns true for non-property transitions like elements kind, or
// or frozen/sealed transitions.
- static bool IsSpecialTransition(ReadOnlyRoots roots, Name* name);
+ static bool IsSpecialTransition(ReadOnlyRoots roots, Name name);
enum RequestedLocation { kAnyLocation, kFieldOnly };
MaybeHandle<Map> FindTransitionToDataProperty(
@@ -78,15 +79,15 @@ class TransitionsAccessor {
// applying in-place right trimming.
static const int kMaxNumberOfTransitions = 1024 + 512;
bool CanHaveMoreTransitions();
- inline Name* GetKey(int transition_number);
- inline Map* GetTarget(int transition_number);
- static inline PropertyDetails GetTargetDetails(Name* name, Map* target);
+ inline Name GetKey(int transition_number);
+ inline Map GetTarget(int transition_number);
+ static inline PropertyDetails GetTargetDetails(Name name, Map target);
- static bool IsMatchingMap(Map* target, Name* name, PropertyKind kind,
+ static bool IsMatchingMap(Map target, Name name, PropertyKind kind,
PropertyAttributes attributes);
// ===== ITERATION =====
- typedef void (*TraverseCallback)(Map* map, void* data);
+ typedef void (*TraverseCallback)(Map map, void* data);
// Traverse the transition tree in postorder.
void TraverseTransitionTree(TraverseCallback callback, void* data) {
@@ -106,16 +107,24 @@ class TransitionsAccessor {
void PutPrototypeTransition(Handle<Object> prototype, Handle<Map> target_map);
Handle<Map> GetPrototypeTransition(Handle<Object> prototype);
+ // During the first-time Map::Update and Map::TryUpdate, the migration target
+ // map could be cached in the raw_transitions slot of the old map that is
+ // deprecated from the map transition tree. The next time old map is updated,
+ // we will check this cache slot as a shortcut to get the migration target
+ // map.
+ void SetMigrationTarget(Map migration_target);
+ Map GetMigrationTarget();
+
#if DEBUG || OBJECT_PRINT
void PrintTransitions(std::ostream& os);
- static void PrintOneTransition(std::ostream& os, Name* key, Map* target);
+ static void PrintOneTransition(std::ostream& os, Name key, Map target);
void PrintTransitionTree();
void PrintTransitionTree(std::ostream& os, int level,
DisallowHeapAllocation* no_gc);
#endif
#if DEBUG
- void CheckNewTransitionsAreConsistent(TransitionArray* old_transitions,
- Object* transitions);
+ void CheckNewTransitionsAreConsistent(TransitionArray old_transitions,
+ Object transitions);
bool IsConsistentWithBackPointers();
bool IsSortedNoDuplicates();
#endif
@@ -125,6 +134,7 @@ class TransitionsAccessor {
enum Encoding {
kPrototypeInfo,
kUninitialized,
+ kMigrationTarget,
kWeakRef,
kFullTransitionArray,
};
@@ -144,16 +154,11 @@ class TransitionsAccessor {
friend class MarkCompactCollector; // For HasSimpleTransitionTo.
friend class TransitionArray;
- static inline PropertyDetails GetSimpleTargetDetails(Map* transition) {
- return transition->GetLastDescriptorDetails();
- }
+ static inline PropertyDetails GetSimpleTargetDetails(Map transition);
- static inline Name* GetSimpleTransitionKey(Map* transition) {
- int descriptor = transition->LastAdded();
- return transition->instance_descriptors()->GetKey(descriptor);
- }
+ static inline Name GetSimpleTransitionKey(Map transition);
- static inline Map* GetTargetFromRaw(MaybeObject* raw);
+ static inline Map GetTargetFromRaw(MaybeObject raw);
void MarkNeedsReload() {
#if DEBUG
@@ -163,26 +168,26 @@ class TransitionsAccessor {
void Initialize();
- inline Map* GetSimpleTransition();
- bool HasSimpleTransitionTo(Map* map);
+ inline Map GetSimpleTransition();
+ bool HasSimpleTransitionTo(Map map);
- void ReplaceTransitions(MaybeObject* new_transitions);
+ void ReplaceTransitions(MaybeObject new_transitions);
- inline Map* GetTargetMapFromWeakRef();
+ inline Map GetTargetMapFromWeakRef();
void EnsureHasFullTransitionArray();
void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
- WeakFixedArray* GetPrototypeTransitions();
+ WeakFixedArray GetPrototypeTransitions();
void TraverseTransitionTreeInternal(TraverseCallback callback, void* data,
DisallowHeapAllocation* no_gc);
- inline TransitionArray* transitions();
+ inline TransitionArray transitions();
Isolate* isolate_;
Handle<Map> map_handle_;
- Map* map_;
- MaybeObject* raw_transitions_;
+ Map map_;
+ MaybeObject raw_transitions_;
Encoding encoding_;
#if DEBUG
bool needs_reload_;
@@ -207,28 +212,27 @@ class TransitionArray : public WeakFixedArray {
public:
DECL_CAST(TransitionArray)
- inline WeakFixedArray* GetPrototypeTransitions();
+ inline WeakFixedArray GetPrototypeTransitions();
inline bool HasPrototypeTransitions();
// Accessors for fetching instance transition at transition number.
- inline void SetKey(int transition_number, Name* value);
- inline Name* GetKey(int transition_number);
- inline HeapObjectReference** GetKeySlot(int transition_number);
-
- inline Map* GetTarget(int transition_number);
- inline void SetRawTarget(int transition_number, MaybeObject* target);
- inline MaybeObject* GetRawTarget(int transition_number);
- inline HeapObjectReference** GetTargetSlot(int transition_number);
+ inline void SetKey(int transition_number, Name value);
+ inline Name GetKey(int transition_number);
+ inline HeapObjectSlot GetKeySlot(int transition_number);
+
+ inline Map GetTarget(int transition_number);
+ inline void SetRawTarget(int transition_number, MaybeObject target);
+ inline MaybeObject GetRawTarget(int transition_number);
+ inline HeapObjectSlot GetTargetSlot(int transition_number);
inline bool GetTargetIfExists(int transition_number, Isolate* isolate,
- Map** target);
+ Map* target);
// Required for templatized Search interface.
- static const int kNotFound = -1;
- Name* GetSortedKey(int transition_number) {
- return GetKey(transition_number);
- }
+ static constexpr int kNotFound = -1;
+
+ inline Name GetSortedKey(int transition_number);
int GetSortedKeyIndex(int transition_number) { return transition_number; }
- inline int number_of_entries() const { return number_of_transitions(); }
+ inline int number_of_entries() const;
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
#endif
@@ -259,10 +263,8 @@ class TransitionArray : public WeakFixedArray {
return kFirstIndex + (transition_number * kEntrySize) + kEntryTargetIndex;
}
- inline int SearchNameForTesting(Name* name,
- int* out_insertion_index = nullptr) {
- return SearchName(name, out_insertion_index);
- }
+ inline int SearchNameForTesting(Name name,
+ int* out_insertion_index = nullptr);
private:
friend class Factory;
@@ -280,11 +282,11 @@ class TransitionArray : public WeakFixedArray {
static const int kProtoTransitionHeaderSize = 1;
static const int kMaxCachedPrototypeTransitions = 256;
- inline void SetPrototypeTransitions(WeakFixedArray* prototype_transitions);
+ inline void SetPrototypeTransitions(WeakFixedArray prototype_transitions);
static inline int NumberOfPrototypeTransitions(
- WeakFixedArray* proto_transitions);
- static void SetNumberOfPrototypeTransitions(WeakFixedArray* proto_transitions,
+ WeakFixedArray proto_transitions);
+ static void SetNumberOfPrototypeTransitions(WeakFixedArray proto_transitions,
int value);
static const int kProtoTransitionNumberOfEntriesOffset = 0;
@@ -297,37 +299,40 @@ class TransitionArray : public WeakFixedArray {
}
// Search a transition for a given kind, property name and attributes.
- int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
+ int Search(PropertyKind kind, Name name, PropertyAttributes attributes,
int* out_insertion_index = nullptr);
+ Map SearchAndGetTarget(PropertyKind kind, Name name,
+ PropertyAttributes attributes);
+
// Search a non-property transition (like elements kind, observe or frozen
// transitions).
- inline int SearchSpecial(Symbol* symbol, int* out_insertion_index = nullptr) {
- return SearchName(symbol, out_insertion_index);
- }
+ inline int SearchSpecial(Symbol symbol, int* out_insertion_index = nullptr);
// Search a first transition for a given property name.
- inline int SearchName(Name* name, int* out_insertion_index = nullptr);
+ inline int SearchName(Name name, int* out_insertion_index = nullptr);
int SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
+ Map SearchDetailsAndGetTarget(int transition, PropertyKind kind,
+ PropertyAttributes attributes);
inline int number_of_transitions() const;
static bool CompactPrototypeTransitionArray(Isolate* isolate,
- WeakFixedArray* array);
+ WeakFixedArray array);
static Handle<WeakFixedArray> GrowPrototypeTransitionArray(
Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate);
// Compares two tuples <key, kind, attributes>, returns -1 if
// tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
- static inline int CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
- PropertyAttributes attributes1, Name* key2,
+ static inline int CompareKeys(Name key1, uint32_t hash1, PropertyKind kind1,
+ PropertyAttributes attributes1, Name key2,
uint32_t hash2, PropertyKind kind2,
PropertyAttributes attributes2);
// Compares keys, returns -1 if key1 is "less" than key2,
// 0 if key1 equal to key2 and 1 otherwise.
- static inline int CompareNames(Name* key1, uint32_t hash1, Name* key2,
+ static inline int CompareNames(Name key1, uint32_t hash1, Name key2,
uint32_t hash2);
// Compares two details, returns -1 if details1 is "less" than details2,
@@ -337,11 +342,11 @@ class TransitionArray : public WeakFixedArray {
PropertyKind kind2,
PropertyAttributes attributes2);
- inline void Set(int transition_number, Name* key, MaybeObject* target);
+ inline void Set(int transition_number, Name key, MaybeObject target);
void Zap(Isolate* isolate);
- DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
+ OBJECT_CONSTRUCTORS(TransitionArray, WeakFixedArray);
};
} // namespace internal
diff --git a/deps/v8/src/trap-handler/DEPS b/deps/v8/src/trap-handler/DEPS
index 681cbd8825..7241cf55c5 100644
--- a/deps/v8/src/trap-handler/DEPS
+++ b/deps/v8/src/trap-handler/DEPS
@@ -13,5 +13,12 @@ specific_include_rules = {
"+src/base/build_config.h",
"+src/globals.h",
"+src/flags.h",
+ ],
+ "handler-inside-posix.h": [
+ # To access V8_OS_LINUX. This file is already included in build_config.h.
+ "+include/v8config.h",
+ ],
+ "handler-inside-win.h": [
+ "+src/base/macros.h",
]
}
diff --git a/deps/v8/src/trap-handler/handler-inside-linux.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index 867f90bfe7..60f3398ece 100644
--- a/deps/v8/src/trap-handler/handler-inside-linux.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -23,7 +23,16 @@
// context. Some additional code is used both inside and outside the signal
// handler. This code can be found in handler-shared.cc.
+#include "src/trap-handler/handler-inside-posix.h"
+
#include <signal.h>
+
+#ifdef V8_OS_LINUX
+#include <ucontext.h>
+#elif V8_OS_MACOSX
+#include <sys/ucontext.h>
+#endif
+
#include <stddef.h>
#include <stdlib.h>
@@ -35,6 +44,9 @@ namespace internal {
namespace trap_handler {
bool IsKernelGeneratedSignal(siginfo_t* info) {
+ // On macOS, only `info->si_code > 0` is relevant, because macOS leaves
+ // si_code at its default of 0 for signals that don’t originate in hardware.
+ // The other conditions are only relevant for Linux.
return info->si_code > 0 && info->si_code != SI_USER &&
info->si_code != SI_QUEUE && info->si_code != SI_TIMER &&
info->si_code != SI_ASYNCIO && info->si_code != SI_MESGQ;
@@ -59,25 +71,29 @@ class SigUnmaskStack {
void operator=(const SigUnmaskStack&) = delete;
};
-bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
- // Bail out early in case we got called for the wrong kind of signal.
- if (signum != SIGSEGV) {
+bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
+ // Ensure the faulting thread was actually running Wasm code. This should be
+ // the first check in the trap handler to guarantee that the IsThreadInWasm
+ // flag is only set in wasm code. Otherwise a later signal handler is executed
+ // with the flag set.
+ if (!IsThreadInWasm()) {
return false;
}
- // Make sure the signal was generated by the kernel and not some other source.
- if (!IsKernelGeneratedSignal(info)) {
+ // Clear g_thread_in_wasm_code, primarily to protect against nested faults.
+ g_thread_in_wasm_code = false;
+
+ // Bail out early in case we got called for the wrong kind of signal.
+
+ if (signum != kOobSignal) {
return false;
}
- // Ensure the faulting thread was actually running Wasm code.
- if (!IsThreadInWasm()) {
+ // Make sure the signal was generated by the kernel and not some other source.
+ if (!IsKernelGeneratedSignal(info)) {
return false;
}
- // Clear g_thread_in_wasm_code, primarily to protect against nested faults.
- g_thread_in_wasm_code = false;
-
// Begin signal mask scope. We need to be sure to restore the signal mask
// before we restore the g_thread_in_wasm_code flag.
{
@@ -91,11 +107,19 @@ bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
sigaddset(&sigs, SIGSEGV);
SigUnmaskStack unmask(sigs);
- uintptr_t fault_addr = context->uc_mcontext.gregs[REG_RIP];
+ ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
+#if V8_OS_LINUX
+ auto* context_rip = &uc->uc_mcontext.gregs[REG_RIP];
+#elif V8_OS_MACOSX
+ auto* context_rip = &uc->uc_mcontext->__ss.__rip;
+#else
+#error Unsupported platform
+#endif
+ uintptr_t fault_addr = *context_rip;
uintptr_t landing_pad = 0;
if (TryFindLandingPad(fault_addr, &landing_pad)) {
// Tell the caller to return to the landing pad.
- context->uc_mcontext.gregs[REG_RIP] = landing_pad;
+ *context_rip = landing_pad;
// We will return to wasm code, so restore the g_thread_in_wasm_code flag.
g_thread_in_wasm_code = true;
return true;
@@ -109,9 +133,7 @@ bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context) {
}
void HandleSignal(int signum, siginfo_t* info, void* context) {
- ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
-
- if (!TryHandleSignal(signum, info, uc)) {
+ if (!TryHandleSignal(signum, info, context)) {
// Since V8 didn't handle this signal, we want to re-raise the same signal.
// For kernel-generated SEGV signals, we do this by restoring the original
// SEGV handler and then returning. The fault will happen again and the
@@ -120,7 +142,7 @@ void HandleSignal(int signum, siginfo_t* info, void* context) {
// We handle user-generated signals by calling raise() instead. This is for
// completeness. We should never actually see one of these, but just in
// case, we do the right thing.
- RestoreOriginalSignalHandler();
+ RemoveTrapHandler();
if (!IsKernelGeneratedSignal(info)) {
raise(signum);
}
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.h b/deps/v8/src/trap-handler/handler-inside-posix.h
new file mode 100644
index 0000000000..a167455efc
--- /dev/null
+++ b/deps/v8/src/trap-handler/handler-inside-posix.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRAP_HANDLER_HANDLER_INSIDE_POSIX_H_
+#define V8_TRAP_HANDLER_HANDLER_INSIDE_POSIX_H_
+
+#include <signal.h>
+#include "include/v8config.h"
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+#if V8_OS_LINUX
+constexpr int kOobSignal = SIGSEGV;
+#elif V8_OS_MACOSX
+constexpr int kOobSignal = SIGBUS;
+#else
+#error Posix trap handlers are only supported on Linux and MacOSX.
+#endif
+
+void HandleSignal(int signum, siginfo_t* info, void* context);
+
+bool TryHandleSignal(int signum, siginfo_t* info, void* context);
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TRAP_HANDLER_HANDLER_INSIDE_POSIX_H_
diff --git a/deps/v8/src/trap-handler/handler-inside-win.cc b/deps/v8/src/trap-handler/handler-inside-win.cc
new file mode 100644
index 0000000000..4c99f5e5a8
--- /dev/null
+++ b/deps/v8/src/trap-handler/handler-inside-win.cc
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PLEASE READ BEFORE CHANGING THIS FILE!
+//
+// This file implements the out of bounds trap handler for
+// WebAssembly. Exception handlers are notoriously difficult to get
+// right, and getting it wrong can lead to security
+// vulnerabilities. In order to minimize this risk, here are some
+// rules to follow.
+//
+// 1. Do not introduce any new external dependencies. This file needs
+// to be self contained so it is easy to audit everything that a
+// trap handler might do.
+//
+// 2. Any changes must be reviewed by someone from the crash reporting
+// or security team. See OWNERS for suggested reviewers.
+//
+// For more information, see https://goo.gl/yMeyUY.
+//
+// This file contains most of the code that actually runs in an exception
+// handler context. Some additional code is used both inside and outside the
+// trap handler. This code can be found in handler-shared.cc.
+
+#include "src/trap-handler/handler-inside-win.h"
+
+#include <windows.h>
+
+#include "src/trap-handler/trap-handler-internal.h"
+#include "src/trap-handler/trap-handler.h"
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
+ // Ensure the faulting thread was actually running Wasm code.
+ if (!IsThreadInWasm()) {
+ return false;
+ }
+
+ // Clear g_thread_in_wasm_code, primarily to protect against nested faults.
+ g_thread_in_wasm_code = false;
+
+ const EXCEPTION_RECORD* record = exception->ExceptionRecord;
+
+ if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) {
+ return false;
+ }
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(record->ExceptionAddress);
+ uintptr_t landing_pad = 0;
+
+ if (TryFindLandingPad(fault_addr, &landing_pad)) {
+ exception->ContextRecord->Rip = landing_pad;
+ // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
+ g_thread_in_wasm_code = true;
+ return true;
+ }
+
+ // If we get here, it's not a recoverable wasm fault, so we go to the next
+ // handler. Leave the g_thread_in_wasm_code flag unset since we do not return
+ // to wasm code.
+ return false;
+}
+
+LONG HandleWasmTrap(EXCEPTION_POINTERS* exception) {
+ if (TryHandleWasmTrap(exception)) {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/trap-handler/handler-inside-win.h b/deps/v8/src/trap-handler/handler-inside-win.h
new file mode 100644
index 0000000000..6db28149e7
--- /dev/null
+++ b/deps/v8/src/trap-handler/handler-inside-win.h
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRAP_HANDLER_HANDLER_INSIDE_WIN_H_
+#define V8_TRAP_HANDLER_HANDLER_INSIDE_WIN_H_
+
+#include <windows.h>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+LONG WINAPI HandleWasmTrap(EXCEPTION_POINTERS* exception);
+
+// On Windows, asan installs its own exception handler which maps shadow
+// memory. Since our exception handler may be executed before the asan exception
+// handler, we have to make sure that asan shadow memory is not accessed here.
+DISABLE_ASAN bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception);
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TRAP_HANDLER_HANDLER_INSIDE_WIN_H_
diff --git a/deps/v8/src/trap-handler/handler-inside.cc b/deps/v8/src/trap-handler/handler-inside.cc
index adab6a0f47..81e37c205a 100644
--- a/deps/v8/src/trap-handler/handler-inside.cc
+++ b/deps/v8/src/trap-handler/handler-inside.cc
@@ -4,23 +4,23 @@
// PLEASE READ BEFORE CHANGING THIS FILE!
//
-// This file implements the out of bounds signal handler for
-// WebAssembly. Signal handlers are notoriously difficult to get
+// This file implements the out of bounds trap handler for
+// WebAssembly. Trap handlers are notoriously difficult to get
// right, and getting it wrong can lead to security
// vulnerabilities. In order to minimize this risk, here are some
// rules to follow.
//
// 1. Do not introduce any new external dependencies. This file needs
// to be self contained so it is easy to audit everything that a
-// signal handler might do.
+// trap handler might do.
//
// 2. Any changes must be reviewed by someone from the crash reporting
// or security team. See OWNERS for suggested reviewers.
//
// For more information, see https://goo.gl/yMeyUY.
//
-// This file contains most of the code that actually runs in a signal handler
-// context. Some additional code is used both inside and outside the signal
+// This file contains most of the code that actually runs in a trap handler
+// context. Some additional code is used both inside and outside the trap
// handler. This code can be found in handler-shared.cc.
#include "src/trap-handler/trap-handler-internal.h"
@@ -37,11 +37,11 @@ namespace trap_handler {
bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad) {
// TODO(eholk): broad code range check
- // Taking locks in a signal handler is risky because a fault in the signal
- // handler could lead to a deadlock when attempting to acquire the lock
- // again. We guard against this case with g_thread_in_wasm_code. The lock
- // may only be taken when not executing Wasm code (an assert in
- // MetadataLock's constructor ensures this). This signal handler will bail
+ // Taking locks in the trap handler is risky because a fault in the trap
+ // handler itself could lead to a deadlock when attempting to acquire the
+ // lock again. We guard against this case with g_thread_in_wasm_code. The
+ // lock may only be taken when not executing Wasm code (an assert in
+ // MetadataLock's constructor ensures this). The trap handler will bail
// out before trying to take the lock if g_thread_in_wasm_code is not set.
MetadataLock lock_holder;
diff --git a/deps/v8/src/trap-handler/handler-outside-linux.cc b/deps/v8/src/trap-handler/handler-outside-posix.cc
index 34f3983315..55bcc0075b 100644
--- a/deps/v8/src/trap-handler/handler-outside-linux.cc
+++ b/deps/v8/src/trap-handler/handler-outside-posix.cc
@@ -21,14 +21,23 @@
#include <signal.h>
+#include "src/trap-handler/handler-inside-posix.h"
#include "src/trap-handler/trap-handler-internal.h"
-#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
namespace trap_handler {
#if V8_TRAP_HANDLER_SUPPORTED
+namespace {
+struct sigaction g_old_handler;
+
+// When using the default signal handler, we save the old one to restore in case
+// V8 chooses not to handle the signal.
+bool g_is_default_signal_handler_registered;
+
+} // namespace
+
bool RegisterDefaultTrapHandler() {
CHECK(!g_is_default_signal_handler_registered);
@@ -39,7 +48,7 @@ bool RegisterDefaultTrapHandler() {
// {sigaction} installs a new custom segfault handler. On success, it returns
// 0. If we get a nonzero value, we report an error to the caller by returning
// false.
- if (sigaction(SIGSEGV, &action, &g_old_handler) != 0) {
+ if (sigaction(kOobSignal, &action, &g_old_handler) != 0) {
return false;
}
@@ -52,7 +61,7 @@ bool RegisterDefaultTrapHandler() {
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) || \
defined(UNDEFINED_SANITIZER)
struct sigaction installed_handler;
- CHECK_EQ(sigaction(SIGSEGV, NULL, &installed_handler), 0);
+ CHECK_EQ(sigaction(kOobSignal, NULL, &installed_handler), 0);
// If the installed handler does not point to HandleSignal, then
// allow_user_segv_handler is 0.
if (installed_handler.sa_sigaction != HandleSignal) {
@@ -66,7 +75,15 @@ bool RegisterDefaultTrapHandler() {
g_is_default_signal_handler_registered = true;
return true;
}
-#endif
+
+void RemoveTrapHandler() {
+ if (g_is_default_signal_handler_registered) {
+ if (sigaction(kOobSignal, &g_old_handler, nullptr) == 0) {
+ g_is_default_signal_handler_registered = false;
+ }
+ }
+}
+#endif // V8_TRAP_HANDLER_SUPPORTED
} // namespace trap_handler
} // namespace internal
diff --git a/deps/v8/src/trap-handler/handler-outside-win.cc b/deps/v8/src/trap-handler/handler-outside-win.cc
index 3bfcb05a1f..09673c8ccc 100644
--- a/deps/v8/src/trap-handler/handler-outside-win.cc
+++ b/deps/v8/src/trap-handler/handler-outside-win.cc
@@ -4,9 +4,9 @@
// PLEASE READ BEFORE CHANGING THIS FILE!
//
-// This file implements the support code for the out of bounds signal handler.
-// Nothing in here actually runs in the signal handler, but the code here
-// manipulates data structures used by the signal handler so we still need to be
+// This file implements the support code for the out of bounds trap handler.
+// Nothing in here actually runs in the trap handler, but the code here
+// manipulates data structures used by the trap handler so we still need to be
// careful. In order to minimize this risk, here are some rules to follow.
//
// 1. Avoid introducing new external dependencies. The files in src/trap-handler
@@ -17,18 +17,43 @@
//
// For more information, see https://goo.gl/yMeyUY.
//
-// For the code that runs in the signal handler itself, see handler-inside.cc.
+// For the code that runs in the trap handler itself, see handler-inside.cc.
+
+#include <windows.h>
+
+#include "src/trap-handler/handler-inside-win.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
namespace trap_handler {
#if V8_TRAP_HANDLER_SUPPORTED
+
+namespace {
+
+// A handle to our registered exception handler, so that we can remove it
+// again later.
+void* g_registered_handler = nullptr;
+
+} // namespace
+
bool RegisterDefaultTrapHandler() {
- // Not yet implemented
- return false;
+ constexpr ULONG first = TRUE;
+ CHECK_NULL(g_registered_handler);
+ g_registered_handler = AddVectoredExceptionHandler(first, HandleWasmTrap);
+
+ return nullptr != g_registered_handler;
}
-#endif
+
+void RemoveTrapHandler() {
+ if (!g_registered_handler) return;
+
+ RemoveVectoredExceptionHandler(g_registered_handler);
+ g_registered_handler = nullptr;
+}
+
+#endif // V8_TRAP_HANDLER_SUPPORTED
} // namespace trap_handler
} // namespace internal
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 2d75d2d7e4..565289e18b 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -4,9 +4,9 @@
// PLEASE READ BEFORE CHANGING THIS FILE!
//
-// This file implements the support code for the out of bounds signal handler.
-// Nothing in here actually runs in the signal handler, but the code here
-// manipulates data structures used by the signal handler so we still need to be
+// This file implements the support code for the out of bounds trap handler.
+// Nothing in here actually runs in the trap handler, but the code here
+// manipulates data structures used by the trap handler so we still need to be
// careful. In order to minimize this risk, here are some rules to follow.
//
// 1. Avoid introducing new external dependencies. The files in src/trap-handler
@@ -17,9 +17,8 @@
//
// For more information, see https://goo.gl/yMeyUY.
//
-// For the code that runs in the signal handler itself, see handler-inside.cc.
+// For the code that runs in the trap handler itself, see handler-inside.cc.
-#include <signal.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
@@ -235,6 +234,8 @@ void ReleaseHandlerData(int index) {
free(data);
}
+int* GetThreadInWasmThreadLocalAddress() { return &g_thread_in_wasm_code; }
+
size_t GetRecoveredTrapCount() {
return gRecoveredTrapCount.load(std::memory_order_relaxed);
}
@@ -244,15 +245,17 @@ size_t GetRecoveredTrapCount() {
// Otherwise, the correct one should be implemented in the appropriate
// platform-specific handler-outside.cc.
bool RegisterDefaultTrapHandler() { return false; }
+
+void RemoveTrapHandler() {}
#endif
bool g_is_trap_handler_enabled;
-bool EnableTrapHandler(bool use_v8_signal_handler) {
+bool EnableTrapHandler(bool use_v8_handler) {
if (!V8_TRAP_HANDLER_SUPPORTED) {
return false;
}
- if (use_v8_signal_handler) {
+ if (use_v8_handler) {
g_is_trap_handler_enabled = RegisterDefaultTrapHandler();
return g_is_trap_handler_enabled;
}
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index d07f7ae131..0607d2ed54 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -5,12 +5,12 @@
// PLEASE READ BEFORE CHANGING THIS FILE!
//
// This file contains code that is used both inside and outside the out of
-// bounds signal handler. Because this code runs in a signal handler context,
+// bounds trap handler. Because this code runs in a trap handler context,
// use extra care when modifying this file. Here are some rules to follow.
//
// 1. Do not introduce any new external dependencies. This file needs
// to be self contained so it is easy to audit everything that a
-// signal handler might do.
+// trap handler might do.
//
// 2. Any changes must be reviewed by someone from the crash reporting
// or security team. See OWNERS for suggested reviewers.
@@ -28,21 +28,6 @@ namespace trap_handler {
// 1 byte in size; see https://sourceware.org/bugzilla/show_bug.cgi?id=14898.
THREAD_LOCAL int g_thread_in_wasm_code;
-#if V8_TRAP_HANDLER_SUPPORTED
-// When using the default signal handler, we save the old one to restore in case
-// V8 chooses not to handle the signal.
-struct sigaction g_old_handler;
-bool g_is_default_signal_handler_registered;
-#endif
-
-V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler() {
-#if V8_TRAP_HANDLER_SUPPORTED
- if (sigaction(SIGSEGV, &g_old_handler, nullptr) == 0) {
- g_is_default_signal_handler_registered = false;
- }
-#endif
-}
-
static_assert(sizeof(g_thread_in_wasm_code) > 1,
"sizeof(thread_local_var) must be > 1, see "
"https://sourceware.org/bugzilla/show_bug.cgi?id=14898");
diff --git a/deps/v8/src/trap-handler/trap-handler-internal.h b/deps/v8/src/trap-handler/trap-handler-internal.h
index 66ae4f652a..f564b10082 100644
--- a/deps/v8/src/trap-handler/trap-handler-internal.h
+++ b/deps/v8/src/trap-handler/trap-handler-internal.h
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace trap_handler {
-// This describes a chunk of code that the signal handler will be able to handle
+// This describes a chunk of code that the trap handler will be able to handle
// faults in. {base} points to the beginning of the chunk, and {size} is the
// number of bytes in the code chunk. The remainder of the struct is a list of
// protected memory access instructions and an offset to a landing pad to handle
@@ -41,10 +41,6 @@ class MetadataLock {
void operator=(const MetadataLock&) = delete;
};
-#if V8_TRAP_HANDLER_SUPPORTED
-void HandleSignal(int signum, siginfo_t* info, void* context);
-#endif
-
// To enable constant time registration of handler data, we keep a free list of
// entries in the gCodeObjects table. Each entry contains a {next_free} field,
// which can be used to figure out where the next entry should be inserted.
@@ -68,13 +64,6 @@ extern std::atomic_size_t gRecoveredTrapCount;
// unchanged.
bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad);
-#if V8_TRAP_HANDLER_SUPPORTED
-// When using the default signal handler, we save the old one to restore in case
-// V8 chooses not to handle the signal.
-extern struct sigaction g_old_handler;
-extern bool g_is_default_signal_handler_registered;
-#endif
-
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 0e9dbf248c..50fd4de439 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -5,7 +5,6 @@
#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_H_
#define V8_TRAP_HANDLER_TRAP_HANDLER_H_
-#include <signal.h>
#include <stdint.h>
#include <stdlib.h>
@@ -13,10 +12,6 @@
#include "src/flags.h"
#include "src/globals.h"
-#if V8_OS_LINUX
-#include <ucontext.h>
-#endif
-
namespace v8 {
namespace internal {
namespace trap_handler {
@@ -24,6 +19,10 @@ namespace trap_handler {
// TODO(eholk): Support trap handlers on other platforms.
#if V8_TARGET_ARCH_X64 && V8_OS_LINUX && !V8_OS_ANDROID
#define V8_TRAP_HANDLER_SUPPORTED true
+#elif V8_TARGET_ARCH_X64 && V8_OS_WIN
+#define V8_TRAP_HANDLER_SUPPORTED true
+#elif V8_TARGET_ARCH_X64 && V8_OS_MACOSX
+#define V8_TRAP_HANDLER_SUPPORTED true
#else
#define V8_TRAP_HANDLER_SUPPORTED false
#endif
@@ -41,18 +40,18 @@ struct ProtectedInstructionData {
const int kInvalidIndex = -1;
-/// Adds the handler data to the place where the signal handler will find it.
+/// Adds the handler data to the place where the trap handler will find it.
///
/// This returns a number that can be used to identify the handler data to
/// ReleaseHandlerData, or -1 on failure.
-int RegisterHandlerData(Address base, size_t size,
- size_t num_protected_instructions,
- const ProtectedInstructionData* protected_instructions);
+int V8_EXPORT_PRIVATE RegisterHandlerData(
+ Address base, size_t size, size_t num_protected_instructions,
+ const ProtectedInstructionData* protected_instructions);
/// Removes the data from the master list and frees any memory, if necessary.
/// TODO(mtrofin): We can switch to using size_t for index and not need
/// kInvalidIndex.
-void ReleaseHandlerData(int index);
+void V8_EXPORT_PRIVATE ReleaseHandlerData(int index);
#if V8_OS_WIN
#define THREAD_LOCAL __declspec(thread)
@@ -66,9 +65,9 @@ void ReleaseHandlerData(int index);
extern bool g_is_trap_handler_enabled;
// Enables trap handling for WebAssembly bounds checks.
//
-// use_v8_signal_handler indicates that V8 should install its own signal handler
+// use_v8_handler indicates that V8 should install its own handler
// rather than relying on the embedder to do it.
-bool EnableTrapHandler(bool use_v8_signal_handler);
+bool EnableTrapHandler(bool use_v8_handler);
inline bool IsTrapHandlerEnabled() {
DCHECK_IMPLIES(g_is_trap_handler_enabled, V8_TRAP_HANDLER_SUPPORTED);
@@ -80,11 +79,12 @@ extern THREAD_LOCAL int g_thread_in_wasm_code;
// Return the address of the thread-local {g_thread_in_wasm_code} variable. This
// pointer can be accessed and modified as long as the thread calling this
// function exists. Only use if from the same thread do avoid race conditions.
-inline int* GetThreadInWasmThreadLocalAddress() {
- return &g_thread_in_wasm_code;
-}
+V8_NOINLINE V8_EXPORT_PRIVATE int* GetThreadInWasmThreadLocalAddress();
-inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
+// On Windows, asan installs its own exception handler which maps shadow
+// memory. Since our exception handler may be executed before the asan exception
+// handler, we have to make sure that asan shadow memory is not accessed here.
+DISABLE_ASAN inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
inline void SetThreadInWasm() {
if (IsTrapHandlerEnabled()) {
@@ -101,11 +101,7 @@ inline void ClearThreadInWasm() {
}
bool RegisterDefaultTrapHandler();
-V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler();
-
-#if V8_OS_LINUX
-bool TryHandleSignal(int signum, siginfo_t* info, ucontext_t* context);
-#endif // V8_OS_LINUX
+V8_EXPORT_PRIVATE void RemoveTrapHandler();
size_t GetRecoveredTrapCount();
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/turbo-assembler.cc
index 4bb09047bb..65e05aa45c 100644
--- a/deps/v8/src/turbo-assembler.cc
+++ b/deps/v8/src/turbo-assembler.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
-#include "src/heap/heap-inl.h"
-#include "src/lsan.h"
+#include "src/isolate-data.h"
+#include "src/isolate-inl.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
@@ -15,9 +15,9 @@ namespace internal {
TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : Assembler(options, buffer, buffer_size), isolate_(isolate) {
+ CodeObjectRequired create_code_object,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : Assembler(options, std::move(buffer)), isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ = Handle<HeapObject>::New(
ReadOnlyRoots(isolate).self_reference_marker(), isolate);
@@ -33,7 +33,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
int builtin_index;
RootIndex root_index;
- if (isolate()->heap()->IsRootHandle(object, &root_index)) {
+ if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
} else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
@@ -71,62 +71,55 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
LoadRootRegisterOffset(destination, offset);
} else {
// Otherwise, do a memory load from the external reference table.
-
- // Encode as an index into the external reference table stored on the
- // isolate.
- ExternalReferenceEncoder encoder(isolate());
- ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
- CHECK(!v.is_from_api());
-
- LoadRootRelative(destination,
- RootRegisterOffsetForExternalReferenceIndex(v.index()));
+ LoadRootRelative(
+ destination,
+ RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
}
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffset(RootIndex root_index) {
- return (static_cast<int32_t>(root_index) << kPointerSizeLog2) -
- kRootRegisterBias;
+int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
+ RootIndex root_index) {
+ return IsolateData::root_slot_offset(root_index);
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceIndex(
- int reference_index) {
- return Heap::roots_to_external_reference_table_offset() - kRootRegisterBias +
- ExternalReferenceTable::OffsetOfEntry(reference_index);
+int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinIndex(
+ int builtin_index) {
+ return IsolateData::builtin_slot_offset(builtin_index);
}
// static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
- return static_cast<intptr_t>(reference.address()) - kRootRegisterBias -
- reinterpret_cast<intptr_t>(isolate->heap()->roots_array_start());
+ return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
}
// static
-bool TurboAssemblerBase::IsAddressableThroughRootRegister(
+int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
Isolate* isolate, const ExternalReference& reference) {
- Address address = reference.address();
- return isolate->root_register_addressable_region().contains(address);
+ // Encode as an index into the external reference table stored on the
+ // isolate.
+ ExternalReferenceEncoder encoder(isolate);
+ ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
+ CHECK(!v.is_from_api());
+
+ return IsolateData::external_reference_table_offset() +
+ ExternalReferenceTable::OffsetOfEntry(v.index());
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinIndex(
- int builtin_index) {
- return Heap::roots_to_builtins_offset() - kRootRegisterBias +
- builtin_index * kPointerSize;
+bool TurboAssemblerBase::IsAddressableThroughRootRegister(
+ Isolate* isolate, const ExternalReference& reference) {
+ Address address = reference.address();
+ return isolate->root_register_addressable_region().contains(address);
}
void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
if (!FLAG_code_comments) return;
- size_t len = strlen("-- Inlined Trampoline to --") +
- strlen(Builtins::name(builtin_index)) + 1;
- Vector<char> buffer = Vector<char>::New(static_cast<int>(len));
- char* buffer_start = buffer.start();
- LSAN_IGNORE_OBJECT(buffer_start);
- SNPrintF(buffer, "-- Inlined Trampoline to %s --",
- Builtins::name(builtin_index));
- RecordComment(buffer_start);
+ std::ostringstream str;
+ str << "-- Inlined Trampoline to " << Builtins::name(builtin_index) << " --";
+ RecordComment(str.str().c_str());
}
} // namespace internal
diff --git a/deps/v8/src/turbo-assembler.h b/deps/v8/src/turbo-assembler.h
index 70048962dd..494d1d7296 100644
--- a/deps/v8/src/turbo-assembler.h
+++ b/deps/v8/src/turbo-assembler.h
@@ -7,7 +7,8 @@
#include "src/assembler-arch.h"
#include "src/base/template-utils.h"
-#include "src/heap/heap.h"
+#include "src/builtins/builtins.h"
+#include "src/roots.h"
namespace v8 {
namespace internal {
@@ -39,6 +40,19 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
void set_has_frame(bool v) { has_frame_ = v; }
bool has_frame() const { return has_frame_; }
+ // Calls the given builtin. If builtins are embedded, the trampoline Code
+ // object on the heap is not used.
+ virtual void CallBuiltinPointer(Register builtin_pointer) = 0;
+
+ // Calls/jumps to the given Code object. If builtins are embedded, the
+ // trampoline Code object on the heap is not used.
+ virtual void CallCodeObject(Register code_object) = 0;
+ virtual void JumpCodeObject(Register code_object) = 0;
+
+ // Loads the given Code object's entry point into the destination register.
+ virtual void LoadCodeObjectEntry(Register destination,
+ Register code_object) = 0;
+
// Loads the given constant or external reference without embedding its direct
// pointer. The produced code is isolate-independent.
void IndirectLoadConstant(Register destination, Handle<HeapObject> object);
@@ -48,35 +62,46 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
virtual void LoadFromConstantsTable(Register destination,
int constant_index) = 0;
+ // Corresponds to: destination = kRootRegister + offset.
virtual void LoadRootRegisterOffset(Register destination,
intptr_t offset) = 0;
+
+ // Corresponds to: destination = [kRootRegister + offset].
virtual void LoadRootRelative(Register destination, int32_t offset) = 0;
virtual void LoadRoot(Register destination, RootIndex index) = 0;
- static int32_t RootRegisterOffset(RootIndex root_index);
- static int32_t RootRegisterOffsetForExternalReferenceIndex(
- int reference_index);
-
+ static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
static int32_t RootRegisterOffsetForBuiltinIndex(int builtin_index);
+ // Returns the root-relative offset to reference.address().
static intptr_t RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference);
+ // Returns the root-relative offset to the external reference table entry,
+ // which itself contains reference.address().
+ static int32_t RootRegisterOffsetForExternalReferenceTableEntry(
+ Isolate* isolate, const ExternalReference& reference);
+
// An address is addressable through kRootRegister if it is located within
// isolate->root_register_addressable_region().
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
protected:
- TurboAssemblerBase(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : TurboAssemblerBase(nullptr, options.EnableV8AgnosticCode(), buffer,
- buffer_size, CodeObjectRequired::kNo) {}
+ TurboAssemblerBase(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer = {})
+ : TurboAssemblerBase(nullptr, options.EnableV8AgnosticCode(),
+ CodeObjectRequired::kNo, std::move(buffer)) {}
+
+ TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
+ std::unique_ptr<AssemblerBuffer> buffer = {})
+ : TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate),
+ create_code_object, std::move(buffer)) {}
TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object);
+ CodeObjectRequired create_code_object,
+ std::unique_ptr<AssemblerBuffer> buffer = {});
void RecordCommentForOffHeapTrampoline(int builtin_index);
@@ -118,13 +143,14 @@ class HardAbortScope {
bool old_value_;
};
-// Helper stubs can be called in different ways depending on where the target
-// code is located and how the call sequence is expected to look like:
-// - JavaScript: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
-// - WebAssembly: Call native {WasmCode} stub via {RelocInfo::WASM_STUB_CALL}.
-enum class StubCallMode { kCallOnHeapBuiltin, kCallWasmRuntimeStub };
-
#ifdef DEBUG
+struct CountIfValidRegisterFunctor {
+ template <typename RegType>
+ constexpr int operator()(int count, RegType reg) const {
+ return count + (reg.is_valid() ? 1 : 0);
+ }
+};
+
template <typename RegType, typename... RegTypes,
// All arguments must be either Register or DoubleRegister.
typename = typename std::enable_if<
@@ -132,7 +158,8 @@ template <typename RegType, typename... RegTypes,
base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
inline bool AreAliased(RegType first_reg, RegTypes... regs) {
int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
- int num_given_regs = sizeof...(regs) + 1;
+ int num_given_regs =
+ base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
return num_different_regs < num_given_regs;
}
#endif
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 2b7090ad08..c6fd06f9c8 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -51,6 +51,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "BigInt";
case CompareOperationHint::kReceiver:
return os << "Receiver";
+ case CompareOperationHint::kReceiverOrNullOrUndefined:
+ return os << "ReceiverOrNullOrUndefined";
case CompareOperationHint::kAny:
return os << "Any";
}
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index df5af6c49d..3f34f925c4 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -41,6 +41,7 @@ enum class CompareOperationHint : uint8_t {
kSymbol,
kBigInt,
kReceiver,
+ kReceiverOrNullOrUndefined,
kAny
};
diff --git a/deps/v8/src/unicode-cache-inl.h b/deps/v8/src/unicode-cache-inl.h
deleted file mode 100644
index 7f73589666..0000000000
--- a/deps/v8/src/unicode-cache-inl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UNICODE_CACHE_INL_H_
-#define V8_UNICODE_CACHE_INL_H_
-
-#include "src/unicode-inl.h"
-#include "src/unicode-cache.h"
-
-namespace v8 {
-namespace internal {
-
-bool UnicodeCache::IsIdentifierStart(unibrow::uchar c) {
- return kIsIdentifierStart.get(c);
-}
-
-
-bool UnicodeCache::IsIdentifierPart(unibrow::uchar c) {
- return kIsIdentifierPart.get(c);
-}
-
-bool UnicodeCache::IsLineTerminatorSequence(unibrow::uchar c,
- unibrow::uchar next) {
- if (!unibrow::IsLineTerminator(c)) return false;
- if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
- return true;
-}
-
-
-bool UnicodeCache::IsWhiteSpace(unibrow::uchar c) {
- return kIsWhiteSpace.get(c);
-}
-
-
-bool UnicodeCache::IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
- return kIsWhiteSpaceOrLineTerminator.get(c);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UNICODE_CACHE_INL_H_
diff --git a/deps/v8/src/unicode-cache.h b/deps/v8/src/unicode-cache.h
index ddc81b738c..b6f6a85c6c 100644
--- a/deps/v8/src/unicode-cache.h
+++ b/deps/v8/src/unicode-cache.h
@@ -6,9 +6,9 @@
#define V8_UNICODE_CACHE_H_
#include "src/base/macros.h"
-#include "src/char-predicates.h"
-#include "src/unicode.h"
#include "src/unicode-decoder.h"
+#include "src/unicode.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -21,20 +21,7 @@ class UnicodeCache {
StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
- inline bool IsIdentifierStart(unibrow::uchar c);
- inline bool IsIdentifierPart(unibrow::uchar c);
- inline bool IsLineTerminator(unibrow::uchar c);
- inline bool IsLineTerminatorSequence(unibrow::uchar c, unibrow::uchar next);
-
- inline bool IsWhiteSpace(unibrow::uchar c);
- inline bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c);
-
private:
- unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
- unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
- kIsWhiteSpaceOrLineTerminator;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index c87e192ad0..2bd4032c56 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -8,8 +8,8 @@
#include <sys/types.h>
#include <algorithm>
#include "src/globals.h"
+#include "src/memcopy.h"
#include "src/unicode.h"
-#include "src/utils.h"
#include "src/vector.h"
namespace unibrow {
diff --git a/deps/v8/src/unoptimized-compilation-info.cc b/deps/v8/src/unoptimized-compilation-info.cc
index b58fe97a2c..1211af3715 100644
--- a/deps/v8/src/unoptimized-compilation-info.cc
+++ b/deps/v8/src/unoptimized-compilation-info.cc
@@ -18,9 +18,7 @@ namespace internal {
UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
ParseInfo* parse_info,
FunctionLiteral* literal)
- : flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
- zone_(zone),
- feedback_vector_spec_(zone) {
+ : flags_(0), zone_(zone), feedback_vector_spec_(zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this OptimizedCompilationInfo. As such,
@@ -34,6 +32,7 @@ UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
if (parse_info->is_eval()) MarkAsEval();
if (parse_info->is_native()) MarkAsNative();
if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
+ if (parse_info->might_always_opt()) MarkAsMightAlwaysOpt();
}
DeclarationScope* UnoptimizedCompilationInfo::scope() const {
diff --git a/deps/v8/src/unoptimized-compilation-info.h b/deps/v8/src/unoptimized-compilation-info.h
index 53295819bb..a70dc88651 100644
--- a/deps/v8/src/unoptimized-compilation-info.h
+++ b/deps/v8/src/unoptimized-compilation-info.h
@@ -17,6 +17,7 @@
namespace v8 {
namespace internal {
+class AsmWasmData;
class CoverageInfo;
class DeclarationScope;
class FunctionLiteral;
@@ -45,6 +46,9 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
+ void MarkAsMightAlwaysOpt() { SetFlag(kMightAlwaysOpt); }
+ bool might_always_opt() const { return GetFlag(kMightAlwaysOpt); }
+
// Accessors for the input data of the function being compiled.
FunctionLiteral* literal() const { return literal_; }
@@ -83,8 +87,8 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
}
bool has_asm_wasm_data() const { return !asm_wasm_data_.is_null(); }
- Handle<FixedArray> asm_wasm_data() const { return asm_wasm_data_; }
- void SetAsmWasmData(Handle<FixedArray> asm_wasm_data) {
+ Handle<AsmWasmData> asm_wasm_data() const { return asm_wasm_data_; }
+ void SetAsmWasmData(Handle<AsmWasmData> asm_wasm_data) {
asm_wasm_data_ = asm_wasm_data;
}
@@ -97,7 +101,7 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
kIsEval = 1 << 0,
kIsNative = 1 << 1,
kCollectTypeProfile = 1 << 2,
- kUntrustedCodeMitigations = 1 << 3,
+ kMightAlwaysOpt = 1 << 3,
};
void SetFlag(Flag flag) { flags_ |= flag; }
@@ -123,8 +127,8 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
// Holds the bytecode array generated by the interpreter.
Handle<BytecodeArray> bytecode_array_;
- // Holds the asm_wasm array generated by the asmjs compiler.
- Handle<FixedArray> asm_wasm_data_;
+ // Holds the asm_wasm data struct generated by the asmjs compiler.
+ Handle<AsmWasmData> asm_wasm_data_;
// Holds the feedback vector spec generated during compilation
FeedbackVectorSpec feedback_vector_spec_;
diff --git a/deps/v8/src/unwinder.cc b/deps/v8/src/unwinder.cc
new file mode 100644
index 0000000000..b0b6ee0504
--- /dev/null
+++ b/deps/v8/src/unwinder.cc
@@ -0,0 +1,98 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/frame-constants.h"
+#include "src/globals.h"
+
+namespace v8 {
+
+namespace {
+
+bool PCIsInCodeRange(const v8::MemoryRange& code_range, void* pc) {
+ // Given that the length of the memory range is in bytes and it is not
+ // necessarily aligned, we need to do the pointer arithmetic in byte* here.
+ const i::byte* pc_as_byte = reinterpret_cast<i::byte*>(pc);
+ const i::byte* start = reinterpret_cast<const i::byte*>(code_range.start);
+ const i::byte* end = start + code_range.length_in_bytes;
+ return pc_as_byte >= start && pc_as_byte < end;
+}
+
+bool IsInUnsafeJSEntryRange(const v8::JSEntryStub& js_entry_stub, void* pc) {
+ return PCIsInCodeRange(js_entry_stub.code, pc);
+
+ // TODO(petermarshall): We can be more precise by checking whether we are
+ // in JSEntry but after frame setup and before frame teardown, in which case
+ // we are safe to unwind the stack. For now, we bail out if the PC is anywhere
+ // within JSEntry.
+}
+
+i::Address Load(i::Address address) {
+ return *reinterpret_cast<i::Address*>(address);
+}
+
+void* GetReturnAddressFromFP(void* fp) {
+ return reinterpret_cast<void*>(
+ Load(reinterpret_cast<i::Address>(fp) +
+ i::CommonFrameConstants::kCallerPCOffset));
+}
+
+void* GetCallerFPFromFP(void* fp) {
+ return reinterpret_cast<void*>(
+ Load(reinterpret_cast<i::Address>(fp) +
+ i::CommonFrameConstants::kCallerFPOffset));
+}
+
+void* GetCallerSPFromFP(void* fp) {
+ return reinterpret_cast<void*>(reinterpret_cast<i::Address>(fp) +
+ i::CommonFrameConstants::kCallerSPOffset);
+}
+
+bool AddressIsInStack(const void* address, const void* stack_base,
+ const void* stack_top) {
+ return address <= stack_base && address >= stack_top;
+}
+
+} // namespace
+
+bool Unwinder::TryUnwindV8Frames(const UnwindState& unwind_state,
+ RegisterState* register_state,
+ const void* stack_base) {
+ const void* stack_top = register_state->sp;
+
+ void* pc = register_state->pc;
+ if (PCIsInV8(unwind_state, pc) &&
+ !IsInUnsafeJSEntryRange(unwind_state.js_entry_stub, pc)) {
+ void* current_fp = register_state->fp;
+ if (!AddressIsInStack(current_fp, stack_base, stack_top)) return false;
+
+ // Peek at the return address that the caller pushed. If it's in V8, then we
+ // assume the caller frame is a JS frame and continue to unwind.
+ void* next_pc = GetReturnAddressFromFP(current_fp);
+ while (PCIsInV8(unwind_state, next_pc)) {
+ current_fp = GetCallerFPFromFP(current_fp);
+ if (!AddressIsInStack(current_fp, stack_base, stack_top)) return false;
+ next_pc = GetReturnAddressFromFP(current_fp);
+ }
+
+ void* final_sp = GetCallerSPFromFP(current_fp);
+ if (!AddressIsInStack(final_sp, stack_base, stack_top)) return false;
+ register_state->sp = final_sp;
+
+ void* final_fp = GetCallerFPFromFP(current_fp);
+ if (!AddressIsInStack(final_fp, stack_base, stack_top)) return false;
+ register_state->fp = final_fp;
+
+ register_state->pc = next_pc;
+ return true;
+ }
+ return false;
+}
+
+bool Unwinder::PCIsInV8(const UnwindState& unwind_state, void* pc) {
+ return pc && (PCIsInCodeRange(unwind_state.code_range, pc) ||
+ PCIsInCodeRange(unwind_state.embedded_code_range, pc));
+}
+
+} // namespace v8
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 54566bb32e..b3066b9a2a 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -138,7 +138,7 @@ bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
std::vector<uint8_t>* one_byte_buffer,
std::vector<uc16>* two_byte_buffer) {
DisallowHeapAllocation no_gc;
- String::FlatContent uri_content = uri->GetFlatContent();
+ String::FlatContent uri_content = uri->GetFlatContent(no_gc);
int uri_length = uri->length();
for (int k = 0; k < uri_length; k++) {
@@ -194,9 +194,11 @@ MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
isolate, result, isolate->factory()->NewRawTwoByteString(result_length),
String);
- CopyChars(result->GetChars(), one_byte_buffer.data(), one_byte_buffer.size());
- CopyChars(result->GetChars() + one_byte_buffer.size(), two_byte_buffer.data(),
- two_byte_buffer.size());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), one_byte_buffer.data(),
+ one_byte_buffer.size());
+ CopyChars(result->GetChars(no_gc) + one_byte_buffer.size(),
+ two_byte_buffer.data(), two_byte_buffer.size());
return result;
}
@@ -279,7 +281,7 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
{
DisallowHeapAllocation no_gc;
- String::FlatContent uri_content = uri->GetFlatContent();
+ String::FlatContent uri_content = uri->GetFlatContent(no_gc);
for (int k = 0; k < uri_length; k++) {
uc16 cc1 = uri_content.Get(k);
@@ -307,8 +309,7 @@ MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
}
}
- return isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(buffer.data(), static_cast<int>(buffer.size())));
+ return isolate->factory()->NewStringFromOneByte(VectorOf(buffer));
}
namespace { // Anonymous namespace for Escape and Unescape
@@ -342,7 +343,7 @@ MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
int unescaped_length = 0;
{
DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
+ Vector<const Char> vector = string->GetCharVector<Char>(no_allocation);
for (int i = start_index; i < length; unescaped_length++) {
int step;
if (UnescapeChar(vector, i, length, &step) >
@@ -365,7 +366,7 @@ MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
->NewRawOneByteString(unescaped_length)
.ToHandleChecked();
DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
+ Vector<const Char> vector = string->GetCharVector<Char>(no_allocation);
for (int i = start_index; i < length; dest_position++) {
int step;
dest->SeqOneByteStringSet(dest_position,
@@ -378,7 +379,7 @@ MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
->NewRawTwoByteString(unescaped_length)
.ToHandleChecked();
DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
+ Vector<const Char> vector = string->GetCharVector<Char>(no_allocation);
for (int i = start_index; i < length; dest_position++) {
int step;
dest->SeqTwoByteStringSet(dest_position,
@@ -415,8 +416,8 @@ static MaybeHandle<String> UnescapePrivate(Isolate* isolate,
int index;
{
DisallowHeapAllocation no_allocation;
- StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
- index = search.Search(source->GetCharVector<Char>(), 0);
+ StringSearch<uint8_t, Char> search(isolate, StaticCharVector("%"));
+ index = search.Search(source->GetCharVector<Char>(no_allocation), 0);
if (index < 0) return source;
}
return UnescapeSlow<Char>(isolate, source, index);
@@ -431,7 +432,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
{
DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
+ Vector<const Char> vector = string->GetCharVector<Char>(no_allocation);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
if (c >= 256) {
@@ -459,7 +460,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
{
DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
+ Vector<const Char> vector = string->GetCharVector<Char>(no_allocation);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
if (c >= 256) {
@@ -492,7 +493,7 @@ static MaybeHandle<String> EscapePrivate(Isolate* isolate,
MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
Handle<String> result;
string = String::Flatten(isolate, string);
- return string->IsOneByteRepresentationUnderneath()
+ return String::IsOneByteRepresentationUnderneath(*string)
? EscapePrivate<uint8_t>(isolate, string)
: EscapePrivate<uc16>(isolate, string);
}
@@ -500,7 +501,7 @@ MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
MaybeHandle<String> Uri::Unescape(Isolate* isolate, Handle<String> string) {
Handle<String> result;
string = String::Flatten(isolate, string);
- return string->IsOneByteRepresentationUnderneath()
+ return String::IsOneByteRepresentationUnderneath(*string)
? UnescapePrivate<uint8_t>(isolate, string)
: UnescapePrivate<uc16>(isolate, string);
}
diff --git a/deps/v8/src/utils-inl.h b/deps/v8/src/utils-inl.h
index b7108a4361..3627327ff3 100644
--- a/deps/v8/src/utils-inl.h
+++ b/deps/v8/src/utils-inl.h
@@ -32,6 +32,15 @@ class TimedScope {
double* result_;
};
+template <typename Char>
+bool TryAddIndexChar(uint32_t* index, Char c) {
+ if (!IsDecimalDigit(c)) return false;
+ int d = c - '0';
+ if (*index > 429496729U - ((d + 3) >> 3)) return false;
+ *index = (*index) * 10 + d;
+ return true;
+}
+
template <typename Stream>
bool StringToArrayIndex(Stream* stream, uint32_t* index) {
uint16_t ch = stream->GetNext();
@@ -48,12 +57,7 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) {
int d = ch - '0';
uint32_t result = d;
while (stream->HasMore()) {
- ch = stream->GetNext();
- if (!IsDecimalDigit(ch)) return false;
- d = ch - '0';
- // Check that the new result is below the 32 bit limit.
- if (result > 429496729U - ((d + 3) >> 3)) return false;
- result = (result * 10) + d;
+ if (!TryAddIndexChar(&result, stream->GetNext())) return false;
}
*index = result;
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index e799e9ad85..e8d84e12c8 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -11,6 +11,7 @@
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/memcopy.h"
namespace v8 {
namespace internal {
@@ -335,71 +336,6 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
-#if V8_TARGET_ARCH_IA32
-static void MemMoveWrapper(void* dest, const void* src, size_t size) {
- memmove(dest, src, size);
-}
-
-
-// Initialize to library version so we can call this at any time during startup.
-static MemMoveFunction memmove_function = &MemMoveWrapper;
-
-// Defined in codegen-ia32.cc.
-MemMoveFunction CreateMemMoveFunction();
-
-// Copy memory area to disjoint memory area.
-void MemMove(void* dest, const void* src, size_t size) {
- if (size == 0) return;
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- (*memmove_function)(dest, src, size);
-}
-
-#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
-void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
- size_t chars) {
- uint16_t* limit = dest + chars;
- while (dest < limit) {
- *dest++ = static_cast<uint16_t>(*src++);
- }
-}
-
-V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
- &MemCopyUint8Wrapper;
-MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
- &MemCopyUint16Uint8Wrapper;
-// Defined in codegen-arm.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
-MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub);
-
-#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
-V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
- &MemCopyUint8Wrapper;
-// Defined in codegen-mips.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
-#endif
-
-
-static bool g_memcopy_functions_initialized = false;
-
-void init_memcopy_functions() {
- if (g_memcopy_functions_initialized) return;
- g_memcopy_functions_initialized = true;
-#if V8_TARGET_ARCH_IA32
- MemMoveFunction generated_memmove = CreateMemMoveFunction();
- if (generated_memmove != nullptr) {
- memmove_function = generated_memmove;
- }
-#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
- memcopy_uint16_uint8_function =
- CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
-#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
-#endif
-}
-
// Returns false iff d is NaN, +0, or -0.
bool DoubleToBoolean(double d) {
IeeeDoubleArchType u;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 51c22ffd70..7ba6ba487d 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1,4 +1,3 @@
-
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -61,8 +60,10 @@ inline bool CStringEquals(const char* s1, const char* s2) {
// Checks if value is in range [lower_limit, higher_limit] using a single
// branch.
template <typename T, typename U>
-inline bool IsInRange(T value, U lower_limit, U higher_limit) {
- DCHECK_LE(lower_limit, higher_limit);
+inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK(lower_limit <= higher_limit);
+#endif
STATIC_ASSERT(sizeof(U) <= sizeof(T));
typedef typename std::make_unsigned<T>::type unsigned_T;
// Use static_cast to support enum classes.
@@ -72,6 +73,12 @@ inline bool IsInRange(T value, U lower_limit, U higher_limit) {
static_cast<unsigned_T>(lower_limit));
}
+// Checks if [index, index+length) is in range [0, max). Note that this check
+// works even if {index+length} would wrap around.
+inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) {
+ return length <= max && index <= (max - length);
+}
+
// X must be a power of 2. Returns the number of trailing zeros.
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
@@ -162,20 +169,6 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
return Compare<T*>(*(*a), *(*b));
}
-
-template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
- return (value & (alignment - 1)) == 0;
-}
-
-// Returns true if {addr + offset} is aligned.
-inline bool IsAddressAligned(Address addr,
- intptr_t alignment,
- int offset = 0) {
- return IsAligned(addr + offset, alignment);
-}
-
-
// Returns the maximum of the two parameters.
template <typename T>
constexpr T Max(T a, T b) {
@@ -242,7 +235,11 @@ inline double Modulo(double x, double y) {
// dividend is a zero and divisor is nonzero finite => result equals dividend
if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
!(x == 0 && (y != 0 && std::isfinite(y)))) {
- x = fmod(x, y);
+ double result = fmod(x, y);
+ // Workaround MS bug in VS CRT in some OS versions, https://crbug.com/915045
+ // fmod(-17, +/-1) should equal -0.0 but now returns 0.0.
+ if (x < 0 && result == 0) result = -0.0;
+ x = result;
}
return x;
#elif defined(V8_OS_AIX)
@@ -355,18 +352,20 @@ class BitFieldBase {
}
// Returns a type U with the bit field value encoded.
- static U encode(T value) {
+ static constexpr U encode(T value) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
DCHECK(is_valid(value));
+#endif
return static_cast<U>(value) << shift;
}
// Returns a type U with the bit field value updated.
- static U update(U previous, T value) {
+ static constexpr U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}
// Extracts the bit field from the value.
- static T decode(U value) {
+ static constexpr T decode(U value) {
return static_cast<T>((value & kMask) >> shift);
}
@@ -468,10 +467,10 @@ class BitSetComputer {
// macro definition are omitted here to please the compiler)
//
// #define MAP_FIELDS(V)
-// V(kField1Offset, kPointerSize)
+// V(kField1Offset, kTaggedSize)
// V(kField2Offset, kIntSize)
// V(kField3Offset, kIntSize)
-// V(kField4Offset, kPointerSize)
+// V(kField4Offset, kSystemPointerSize)
// V(kSize, 0)
//
// DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
@@ -484,6 +483,9 @@ class BitSetComputer {
LIST_MACRO(DEFINE_ONE_FIELD_OFFSET) \
};
+// Size of the field defined by DEFINE_FIELD_OFFSET_CONSTANTS
+#define FIELD_SIZE(Name) (Name##End + 1 - Name)
+
// ----------------------------------------------------------------------------
// Hash function.
@@ -531,87 +533,6 @@ inline uint32_t ComputeAddressHash(Address address) {
}
// ----------------------------------------------------------------------------
-// Generated memcpy/memmove
-
-// Initializes the codegen support that depends on CPU features.
-void init_memcopy_functions();
-
-#if defined(V8_TARGET_ARCH_IA32)
-// Limit below which the extra overhead of the MemCopy function is likely
-// to outweigh the benefits of faster copying.
-const int kMinComplexMemCopy = 64;
-
-// Copy memory area. No restrictions.
-V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
-typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
-
-// Keep the distinction of "move" vs. "copy" for the benefit of other
-// architectures.
-V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
- MemMove(dest, src, size);
-}
-#elif defined(V8_HOST_ARCH_ARM)
-typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
- size_t size);
-V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
-V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
- size_t chars) {
- memcpy(dest, src, chars);
-}
-// For values < 16, the assembler function is slower than the inlined C code.
-const int kMinComplexMemCopy = 16;
-V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
- (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src), size);
-}
-V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
- size_t size) {
- memmove(dest, src, size);
-}
-
-typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
- size_t size);
-extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
-void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
- size_t chars);
-// For values < 12, the assembler function is slower than the inlined C code.
-const int kMinComplexConvertMemCopy = 12;
-V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
- size_t size) {
- (*memcopy_uint16_uint8_function)(dest, src, size);
-}
-#elif defined(V8_HOST_ARCH_MIPS)
-typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
- size_t size);
-V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
-V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
- size_t chars) {
- memcpy(dest, src, chars);
-}
-// For values < 16, the assembler function is slower than the inlined C code.
-const int kMinComplexMemCopy = 16;
-V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
- (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src), size);
-}
-V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
- size_t size) {
- memmove(dest, src, size);
-}
-#else
-// Copy memory area to disjoint memory area.
-V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
- size_t size) {
- memmove(dest, src, size);
-}
-const int kMinComplexMemCopy = 8;
-#endif // V8_TARGET_ARCH_IA32
-
-
-// ----------------------------------------------------------------------------
// Miscellaneous
// Memory offset for lower and higher bits in a 64 bit integer.
@@ -706,13 +627,12 @@ class EmbeddedVector : public Vector<T> {
}
// When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs)
- : Vector<T>(rhs) {
+ EmbeddedVector(const EmbeddedVector& rhs) V8_NOEXCEPT : Vector<T>(rhs) {
MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
}
- EmbeddedVector& operator=(const EmbeddedVector& rhs) {
+ EmbeddedVector& operator=(const EmbeddedVector& rhs) V8_NOEXCEPT {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
@@ -882,42 +802,6 @@ class SimpleStringBuilder {
DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
};
-
-// A poor man's version of STL's bitset: A bit set of enums E (without explicit
-// values), fitting into an integral type T.
-template <class E, class T = int>
-class EnumSet {
- public:
- explicit EnumSet(T bits = 0) : bits_(bits) {}
- bool IsEmpty() const { return bits_ == 0; }
- bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
- bool ContainsAnyOf(const EnumSet& set) const {
- return (bits_ & set.bits_) != 0;
- }
- void Add(E element) { bits_ |= Mask(element); }
- void Add(const EnumSet& set) { bits_ |= set.bits_; }
- void Remove(E element) { bits_ &= ~Mask(element); }
- void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
- void RemoveAll() { bits_ = 0; }
- void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
- T ToIntegral() const { return bits_; }
- bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
- bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
- EnumSet operator|(const EnumSet& set) const {
- return EnumSet(bits_ | set.bits_);
- }
-
- private:
- static_assert(std::is_enum<E>::value, "EnumSet can only be used with enums");
-
- T Mask(E element) const {
- DCHECK_GT(sizeof(T) * CHAR_BIT, static_cast<int>(element));
- return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
- }
-
- T bits_;
-};
-
// Bit field extraction.
inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
@@ -979,6 +863,23 @@ INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
#undef DECLARE_IS_UINT_N
#undef DECLARE_TRUNCATE_TO_INT_N
+// clang-format off
+#define INT_0_TO_127_LIST(V) \
+V(0) V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) \
+V(10) V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) \
+V(20) V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) \
+V(30) V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) \
+V(40) V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) \
+V(50) V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) \
+V(60) V(61) V(62) V(63) V(64) V(65) V(66) V(67) V(68) V(69) \
+V(70) V(71) V(72) V(73) V(74) V(75) V(76) V(77) V(78) V(79) \
+V(80) V(81) V(82) V(83) V(84) V(85) V(86) V(87) V(88) V(89) \
+V(90) V(91) V(92) V(93) V(94) V(95) V(96) V(97) V(98) V(99) \
+V(100) V(101) V(102) V(103) V(104) V(105) V(106) V(107) V(108) V(109) \
+V(110) V(111) V(112) V(113) V(114) V(115) V(116) V(117) V(118) V(119) \
+V(120) V(121) V(122) V(123) V(124) V(125) V(126) V(127)
+// clang-format on
+
class FeedbackSlot {
public:
FeedbackSlot() : id_(kInvalidSlot) {}
@@ -1128,435 +1029,12 @@ int WriteAsCFile(const char* filename, const char* varname,
const char* str, int size, bool verbose = true);
-// ----------------------------------------------------------------------------
-// Memory
-
-// Copies words from |src| to |dst|. The data spans must not overlap.
-template <typename T>
-inline void CopyWords(T* dst, const T* src, size_t num_words) {
- STATIC_ASSERT(sizeof(T) == kPointerSize);
- DCHECK(Min(dst, const_cast<T*>(src)) + num_words <=
- Max(dst, const_cast<T*>(src)));
- DCHECK_GT(num_words, 0);
-
- // Use block copying MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const size_t kBlockCopyLimit = 16;
-
- if (num_words < kBlockCopyLimit) {
- do {
- num_words--;
- *dst++ = *src++;
- } while (num_words > 0);
- } else {
- MemCopy(dst, src, num_words * kPointerSize);
- }
-}
-
-
-// Copies words from |src| to |dst|. No restrictions.
-template <typename T>
-inline void MoveWords(T* dst, const T* src, size_t num_words) {
- STATIC_ASSERT(sizeof(T) == kPointerSize);
- DCHECK_GT(num_words, 0);
-
- // Use block copying MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const size_t kBlockCopyLimit = 16;
-
- if (num_words < kBlockCopyLimit &&
- ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
- T* end = dst + num_words;
- do {
- num_words--;
- *dst++ = *src++;
- } while (num_words > 0);
- } else {
- MemMove(dst, src, num_words * kPointerSize);
- }
-}
-
-
-// Copies data from |src| to |dst|. The data spans must not overlap.
-template <typename T>
-inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
- STATIC_ASSERT(sizeof(T) == 1);
- DCHECK(Min(dst, const_cast<T*>(src)) + num_bytes <=
- Max(dst, const_cast<T*>(src)));
- if (num_bytes == 0) return;
-
- // Use block copying MemCopy if the segment we're copying is
- // enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = kMinComplexMemCopy;
-
- if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
- do {
- num_bytes--;
- *dst++ = *src++;
- } while (num_bytes > 0);
- } else {
- MemCopy(dst, src, num_bytes);
- }
-}
-
-
-template <typename T, typename U>
-inline void MemsetPointer(T** dest, U* value, int counter) {
-#ifdef DEBUG
- T* a = nullptr;
- U* b = nullptr;
- a = b; // Fake assignment to check assignability.
- USE(a);
-#endif // DEBUG
-#if V8_HOST_ARCH_IA32
-#define STOS "stosl"
-#elif V8_HOST_ARCH_X64
-#if V8_HOST_ARCH_32_BIT
-#define STOS "addr32 stosl"
-#else
-#define STOS "stosq"
-#endif
-#endif
-
-#if defined(MEMORY_SANITIZER)
- // MemorySanitizer does not understand inline assembly.
-#undef STOS
-#endif
-
-#if defined(__GNUC__) && defined(STOS)
- asm volatile(
- "cld;"
- "rep ; " STOS
- : "+&c" (counter), "+&D" (dest)
- : "a" (value)
- : "memory", "cc");
-#else
- for (int i = 0; i < counter; i++) {
- dest[i] = value;
- }
-#endif
-
-#undef STOS
-}
-
// Simple support to read a file into std::string.
// On return, *exits tells whether the file existed.
V8_EXPORT_PRIVATE std::string ReadFile(const char* filename, bool* exists,
bool verbose = true);
std::string ReadFile(FILE* file, bool* exists, bool verbose = true);
-template <typename sourcechar, typename sinkchar>
-V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
- size_t chars);
-#if defined(V8_HOST_ARCH_ARM)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#elif defined(V8_HOST_ARCH_MIPS)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
-V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src,
- size_t chars);
-V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
- size_t chars);
-#endif
-
-// Copy from 8bit/16bit chars to 8bit/16bit chars.
-template <typename sourcechar, typename sinkchar>
-V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars);
-
-template <typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
- DCHECK_LE(sizeof(sourcechar), 2);
- DCHECK_LE(sizeof(sinkchar), 2);
- if (sizeof(sinkchar) == 1) {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- } else {
- if (sizeof(sourcechar) == 1) {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint8_t*>(src),
- chars);
- } else {
- CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
- reinterpret_cast<const uint16_t*>(src),
- chars);
- }
- }
-}
-
-template <typename sourcechar, typename sinkchar>
-void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
- sinkchar* limit = dest + chars;
- if ((sizeof(*dest) == sizeof(*src)) &&
- (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
- MemCopy(dest, src, chars * sizeof(*dest));
- } else {
- while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
- }
-}
-
-
-#if defined(V8_HOST_ARCH_ARM)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 2);
- break;
- case 3:
- memcpy(dest, src, 3);
- break;
- case 4:
- memcpy(dest, src, 4);
- break;
- case 5:
- memcpy(dest, src, 5);
- break;
- case 6:
- memcpy(dest, src, 6);
- break;
- case 7:
- memcpy(dest, src, 7);
- break;
- case 8:
- memcpy(dest, src, 8);
- break;
- case 9:
- memcpy(dest, src, 9);
- break;
- case 10:
- memcpy(dest, src, 10);
- break;
- case 11:
- memcpy(dest, src, 11);
- break;
- case 12:
- memcpy(dest, src, 12);
- break;
- case 13:
- memcpy(dest, src, 13);
- break;
- case 14:
- memcpy(dest, src, 14);
- break;
- case 15:
- memcpy(dest, src, 15);
- break;
- default:
- MemCopy(dest, src, chars);
- break;
- }
-}
-
-
-void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) {
- if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) {
- MemCopyUint16Uint8(dest, src, chars);
- } else {
- MemCopyUint16Uint8Wrapper(dest, src, chars);
- }
-}
-
-
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- case 2:
- memcpy(dest, src, 4);
- break;
- case 3:
- memcpy(dest, src, 6);
- break;
- case 4:
- memcpy(dest, src, 8);
- break;
- case 5:
- memcpy(dest, src, 10);
- break;
- case 6:
- memcpy(dest, src, 12);
- break;
- case 7:
- memcpy(dest, src, 14);
- break;
- default:
- MemCopy(dest, src, chars * sizeof(*dest));
- break;
- }
-}
-
-
-#elif defined(V8_HOST_ARCH_MIPS)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- if (chars < kMinComplexMemCopy) {
- memcpy(dest, src, chars);
- } else {
- MemCopy(dest, src, chars);
- }
-}
-
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- if (chars < kMinComplexMemCopy) {
- memcpy(dest, src, chars * sizeof(*dest));
- } else {
- MemCopy(dest, src, chars * sizeof(*dest));
- }
-}
-#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
-#define CASE(n) \
- case n: \
- memcpy(dest, src, n); \
- break
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- CASE(2);
- CASE(3);
- CASE(4);
- CASE(5);
- CASE(6);
- CASE(7);
- CASE(8);
- CASE(9);
- CASE(10);
- CASE(11);
- CASE(12);
- CASE(13);
- CASE(14);
- CASE(15);
- CASE(16);
- CASE(17);
- CASE(18);
- CASE(19);
- CASE(20);
- CASE(21);
- CASE(22);
- CASE(23);
- CASE(24);
- CASE(25);
- CASE(26);
- CASE(27);
- CASE(28);
- CASE(29);
- CASE(30);
- CASE(31);
- CASE(32);
- CASE(33);
- CASE(34);
- CASE(35);
- CASE(36);
- CASE(37);
- CASE(38);
- CASE(39);
- CASE(40);
- CASE(41);
- CASE(42);
- CASE(43);
- CASE(44);
- CASE(45);
- CASE(46);
- CASE(47);
- CASE(48);
- CASE(49);
- CASE(50);
- CASE(51);
- CASE(52);
- CASE(53);
- CASE(54);
- CASE(55);
- CASE(56);
- CASE(57);
- CASE(58);
- CASE(59);
- CASE(60);
- CASE(61);
- CASE(62);
- CASE(63);
- CASE(64);
- default:
- memcpy(dest, src, chars);
- break;
- }
-}
-#undef CASE
-
-#define CASE(n) \
- case n: \
- memcpy(dest, src, n * 2); \
- break
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
- switch (static_cast<unsigned>(chars)) {
- case 0:
- break;
- case 1:
- *dest = *src;
- break;
- CASE(2);
- CASE(3);
- CASE(4);
- CASE(5);
- CASE(6);
- CASE(7);
- CASE(8);
- CASE(9);
- CASE(10);
- CASE(11);
- CASE(12);
- CASE(13);
- CASE(14);
- CASE(15);
- CASE(16);
- CASE(17);
- CASE(18);
- CASE(19);
- CASE(20);
- CASE(21);
- CASE(22);
- CASE(23);
- CASE(24);
- CASE(25);
- CASE(26);
- CASE(27);
- CASE(28);
- CASE(29);
- CASE(30);
- CASE(31);
- CASE(32);
- default:
- memcpy(dest, src, chars * 2);
- break;
- }
-}
-#undef CASE
-#endif
-
-
class StringBuilder : public SimpleStringBuilder {
public:
explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
@@ -1575,6 +1053,9 @@ class StringBuilder : public SimpleStringBuilder {
bool DoubleToBoolean(double d);
+template <typename Char>
+bool TryAddIndexChar(uint32_t* index, Char c);
+
template <typename Stream>
bool StringToArrayIndex(Stream* stream, uint32_t* index);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 98a807963c..ee2a3ba8ce 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -11,6 +11,7 @@
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
+#include "src/cpu-features.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -55,7 +56,6 @@ void V8::TearDown() {
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
RegisteredExtension::UnregisterAll();
- sampler::Sampler::TearDown();
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
@@ -89,7 +89,6 @@ void V8::InitializeOncePerProcessImpl() {
#if defined(USE_SIMULATOR)
Simulator::InitializeOncePerProcess();
#endif
- sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
Bootstrapper::InitializeOncePerProcess();
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 0fb333c1f3..67fa40804b 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -238,12 +238,13 @@ ThreadState* ThreadState::Next() {
// Thread ids must start with 1, because in TLS having thread id 0 can't
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
-ThreadManager::ThreadManager()
+ThreadManager::ThreadManager(Isolate* isolate)
: mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(nullptr),
free_anchor_(nullptr),
- in_use_anchor_(nullptr) {
+ in_use_anchor_(nullptr),
+ isolate_(isolate) {
free_anchor_ = new ThreadState(this);
in_use_anchor_ = new ThreadState(this);
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index ac32b7465e..faaffaa61f 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -88,7 +88,7 @@ class ThreadManager {
ThreadState* GetFreeThreadState();
private:
- ThreadManager();
+ explicit ThreadManager(Isolate* isolate);
~ThreadManager();
void DeleteThreadStateList(ThreadState* anchor);
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 3d80634c97..4075fe9d63 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -16,10 +16,13 @@
#include "src/isolate.h"
#include "src/maybe-handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/smi.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
#include "src/wasm/wasm-engine.h"
@@ -264,7 +267,7 @@ void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
WriteRawBytes(chars.begin(), chars.length() * sizeof(uc16));
}
-void ValueSerializer::WriteBigIntContents(BigInt* bigint) {
+void ValueSerializer::WriteBigIntContents(BigInt bigint) {
uint32_t bitfield = bigint->GetBitfieldForSerialization();
int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
WriteVarint<uint32_t>(bitfield);
@@ -326,10 +329,6 @@ void ValueSerializer::WriteUint64(uint64_t value) {
WriteVarint<uint64_t>(value);
}
-std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
- return std::vector<uint8_t>(buffer_, buffer_ + buffer_size_);
-}
-
std::pair<uint8_t*, size_t> ValueSerializer::Release() {
auto result = std::make_pair(buffer_, buffer_size_);
buffer_ = nullptr;
@@ -346,7 +345,11 @@ void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
}
Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
- out_of_memory_ = false;
+ // There is no sense in trying to proceed if we've previously run out of
+ // memory. Bail immediately, as this likely implies that some write has
+ // previously failed and so the buffer is corrupt.
+ if (V8_UNLIKELY(out_of_memory_)) return ThrowIfOutOfMemory();
+
if (object->IsSmi()) {
WriteSmi(Smi::cast(*object));
return ThrowIfOutOfMemory();
@@ -396,7 +399,7 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
}
}
-void ValueSerializer::WriteOddball(Oddball* oddball) {
+void ValueSerializer::WriteOddball(Oddball oddball) {
SerializationTag tag = SerializationTag::kUndefined;
switch (oddball->kind()) {
case Oddball::kUndefined:
@@ -418,23 +421,23 @@ void ValueSerializer::WriteOddball(Oddball* oddball) {
WriteTag(tag);
}
-void ValueSerializer::WriteSmi(Smi* smi) {
+void ValueSerializer::WriteSmi(Smi smi) {
static_assert(kSmiValueSize <= 32, "Expected SMI <= 32 bits.");
WriteTag(SerializationTag::kInt32);
WriteZigZag<int32_t>(smi->value());
}
-void ValueSerializer::WriteHeapNumber(HeapNumber* number) {
+void ValueSerializer::WriteHeapNumber(HeapNumber number) {
WriteTag(SerializationTag::kDouble);
WriteDouble(number->value());
}
-void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber* number) {
+void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber number) {
WriteTag(SerializationTag::kDouble);
WriteDouble(number->value());
}
-void ValueSerializer::WriteBigInt(BigInt* bigint) {
+void ValueSerializer::WriteBigInt(BigInt bigint) {
WriteTag(SerializationTag::kBigInt);
WriteBigIntContents(bigint);
}
@@ -442,7 +445,7 @@ void ValueSerializer::WriteBigInt(BigInt* bigint) {
void ValueSerializer::WriteString(Handle<String> string) {
string = String::Flatten(isolate_, string);
DisallowHeapAllocation no_gc;
- String::FlatContent flat = string->GetFlatContent();
+ String::FlatContent flat = string->GetFlatContent(no_gc);
DCHECK(flat.IsFlat());
if (flat.IsOneByte()) {
Vector<const uint8_t> chars = flat.ToOneByteVector();
@@ -517,12 +520,14 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
- case WASM_MODULE_TYPE:
- if (!FLAG_wasm_disable_structured_cloning) {
+ case WASM_MODULE_TYPE: {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
// Only write WebAssembly modules if not disabled by a flag.
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
}
break;
+ }
case WASM_MEMORY_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (enabled_features.threads) {
@@ -711,13 +716,13 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
return ThrowIfOutOfMemory();
}
-void ValueSerializer::WriteJSDate(JSDate* date) {
+void ValueSerializer::WriteJSDate(JSDate date) {
WriteTag(SerializationTag::kDate);
WriteDouble(date->value()->Number());
}
Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
- Object* inner_value = value->value();
+ Object inner_value = value->value();
if (inner_value->IsTrue(isolate_)) {
WriteTag(SerializationTag::kTrueObject);
} else if (inner_value->IsFalse(isolate_)) {
@@ -739,7 +744,7 @@ Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
return ThrowIfOutOfMemory();
}
-void ValueSerializer::WriteJSRegExp(JSRegExp* regexp) {
+void ValueSerializer::WriteJSRegExp(JSRegExp regexp) {
WriteTag(SerializationTag::kRegExp);
WriteString(handle(regexp->Pattern(), isolate_));
WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
@@ -752,11 +757,11 @@ Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowHeapAllocation no_gc;
- Oddball* the_hole = ReadOnlyRoots(isolate_).the_hole_value();
+ Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int capacity = table->UsedCapacity();
int result_index = 0;
for (int i = 0; i < capacity; i++) {
- Object* key = table->KeyAt(i);
+ Object key = table->KeyAt(i);
if (key == the_hole) continue;
entries->set(result_index++, key);
entries->set(result_index++, table->ValueAt(i));
@@ -783,11 +788,11 @@ Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
{
DisallowHeapAllocation no_gc;
- Oddball* the_hole = ReadOnlyRoots(isolate_).the_hole_value();
+ Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
int capacity = table->UsedCapacity();
int result_index = 0;
for (int i = 0; i < capacity; i++) {
- Object* key = table->KeyAt(i);
+ Object key = table->KeyAt(i);
if (key == the_hole) continue;
entries->set(result_index++, key);
}
@@ -830,8 +835,8 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
WriteVarint(*transfer_entry);
return ThrowIfOutOfMemory();
}
- if (array_buffer->was_neutered()) {
- ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
+ if (array_buffer->was_detached()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneErrorDetachedArrayBuffer);
return Nothing<bool>();
}
double byte_length = array_buffer->byte_length();
@@ -845,7 +850,7 @@ Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
return ThrowIfOutOfMemory();
}
-Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
+Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
if (treat_array_buffer_views_as_host_objects_) {
return WriteHostObject(handle(view, isolate_));
}
@@ -875,7 +880,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
// TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
Maybe<uint32_t> transfer_id = delegate_->GetWasmModuleTransferId(
reinterpret_cast<v8::Isolate*>(isolate_),
- v8::Local<v8::WasmCompiledModule>::Cast(
+ v8::Local<v8::WasmModuleObject>::Cast(
Utils::ToLocal(Handle<JSObject>::cast(object))));
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
uint32_t id = 0;
@@ -898,7 +903,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
memcpy(destination, wire_bytes.start(), wire_bytes.size());
}
- wasm::WasmSerializer wasm_serializer(isolate_, native_module);
+ wasm::WasmSerializer wasm_serializer(native_module);
size_t module_size = wasm_serializer.GetSerializedNativeModuleSize();
CHECK_GE(std::numeric_limits<uint32_t>::max(), module_size);
WriteVarint<uint32_t>(static_cast<uint32_t>(module_size));
@@ -933,8 +938,10 @@ Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
Maybe<bool> result =
delegate_->WriteHostObject(v8_isolate, Utils::ToLocal(object));
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+ USE(result);
DCHECK(!result.IsNothing());
- return result;
+ DCHECK(result.ToChecked());
+ return ThrowIfOutOfMemory();
}
Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
@@ -965,8 +972,7 @@ Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
return Just(properties_written);
}
-void ValueSerializer::ThrowDataCloneError(
- MessageTemplate::Template template_index) {
+void ValueSerializer::ThrowDataCloneError(MessageTemplate template_index) {
return ThrowDataCloneError(template_index,
isolate_->factory()->empty_string());
}
@@ -979,10 +985,10 @@ Maybe<bool> ValueSerializer::ThrowIfOutOfMemory() {
return Just(true);
}
-void ValueSerializer::ThrowDataCloneError(
- MessageTemplate::Template template_index, Handle<Object> arg0) {
+void ValueSerializer::ThrowDataCloneError(MessageTemplate index,
+ Handle<Object> arg0) {
Handle<String> message =
- MessageTemplate::FormatMessage(isolate_, template_index, arg0);
+ MessageFormatter::FormatMessage(isolate_, index, arg0);
if (delegate_) {
delegate_->ThrowDataCloneError(Utils::ToLocal(message));
} else {
@@ -1006,7 +1012,7 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
ReadOnlyRoots(isolate_).empty_fixed_array())) {}
ValueDeserializer::~ValueDeserializer() {
- GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+ GlobalHandles::Destroy(id_map_.location());
Handle<Object> transfer_map_handle;
if (array_buffer_transfer_map_.ToHandle(&transfer_map_handle)) {
@@ -1140,7 +1146,7 @@ void ValueDeserializer::TransferArrayBuffer(
Handle<SimpleNumberDictionary> new_dictionary = SimpleNumberDictionary::Set(
isolate_, dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
- GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
+ GlobalHandles::Destroy(dictionary.location());
array_buffer_transfer_map_ =
isolate_->global_handles()->Create(*new_dictionary);
}
@@ -1148,6 +1154,9 @@ void ValueDeserializer::TransferArrayBuffer(
MaybeHandle<Object> ValueDeserializer::ReadObject() {
DisallowJavascriptExecution no_js(isolate_);
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<Object>());
+
MaybeHandle<Object> result = ReadObjectInternal();
// ArrayBufferView is special in that it consumes the value before it, even
@@ -1333,7 +1342,8 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
// Copy the bytes directly into the new string.
// Warning: this uses host endianness.
- memcpy(string->GetChars(), bytes.begin(), bytes.length());
+ DisallowHeapAllocation no_gc;
+ memcpy(string->GetChars(no_gc), bytes.begin(), bytes.length());
return string;
}
@@ -1353,7 +1363,7 @@ bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
return false;
}
- String::FlatContent flat = expected->GetFlatContent();
+ String::FlatContent flat = expected->GetFlatContent(no_gc);
// If the bytes are verbatim what is in the flattened string, then the string
// is successfully consumed.
@@ -1471,7 +1481,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
// hole. Past version 11, undefined means undefined.
if (version_ < 11 && element->IsUndefined(isolate_)) continue;
- // Make sure elements is still large enough.
+ // Safety check.
if (i >= static_cast<uint32_t>(elements->length())) {
return MaybeHandle<JSArray>();
}
@@ -1755,7 +1765,9 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
- if (FLAG_wasm_disable_structured_cloning || expect_inline_wasm()) {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}
@@ -1777,7 +1789,9 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
- if (FLAG_wasm_disable_structured_cloning || !expect_inline_wasm()) {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ !expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}
@@ -1878,7 +1892,7 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DCHECK(!object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
- DescriptorArray* descriptors = object->map()->instance_descriptors();
+ DescriptorArray descriptors = object->map()->instance_descriptors();
for (unsigned i = 0; i < properties.size(); i++) {
// Initializing store.
object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
@@ -1984,8 +1998,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate_, object, key, &success, LookupIterator::OWN);
- CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
- if (!success ||
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
return Nothing<uint32_t>();
@@ -2019,8 +2032,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate_, object, key, &success, LookupIterator::OWN);
- CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
- if (!success ||
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
return Nothing<uint32_t>();
@@ -2037,7 +2049,7 @@ MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
if (id >= static_cast<unsigned>(id_map_->length())) {
return MaybeHandle<JSReceiver>();
}
- Object* value = id_map_->get(id);
+ Object value = id_map_->get(id);
if (value->IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
DCHECK(value->IsJSReceiver());
return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
@@ -2051,7 +2063,7 @@ void ValueDeserializer::AddObjectWithID(uint32_t id,
// If the dictionary was reallocated, update the global handle.
if (!new_array.is_identical_to(id_map_)) {
- GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+ GlobalHandles::Destroy(id_map_.location());
id_map_ = isolate_->global_handles()->Create(*new_array);
}
}
@@ -2067,8 +2079,7 @@ static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, key, &success, LookupIterator::OWN);
- CHECK_EQ(LookupIterator::NOT_FOUND, it.state());
- if (!success ||
+ if (!success || it.state() != LookupIterator::NOT_FOUND ||
JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
.is_null()) {
return Nothing<bool>();
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index ac683e8c75..19e277617f 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -13,7 +13,7 @@
#include "src/base/macros.h"
#include "src/identity-map.h"
#include "src/maybe-handles.h"
-#include "src/messages.h"
+#include "src/message-template.h"
#include "src/vector.h"
#include "src/zone/zone.h"
@@ -61,12 +61,6 @@ class ValueSerializer {
Maybe<bool> WriteObject(Handle<Object> object) V8_WARN_UNUSED_RESULT;
/*
- * Returns the stored data. This serializer should not be used once the buffer
- * is released. The contents are undefined if a previous write has failed.
- */
- std::vector<uint8_t> ReleaseBuffer();
-
- /*
* Returns the buffer, allocated via the delegate, and its size.
* Caller assumes ownership of the buffer.
*/
@@ -110,29 +104,29 @@ class ValueSerializer {
void WriteZigZag(T value);
void WriteOneByteString(Vector<const uint8_t> chars);
void WriteTwoByteString(Vector<const uc16> chars);
- void WriteBigIntContents(BigInt* bigint);
+ void WriteBigIntContents(BigInt bigint);
Maybe<uint8_t*> ReserveRawBytes(size_t bytes);
// Writing V8 objects of various kinds.
- void WriteOddball(Oddball* oddball);
- void WriteSmi(Smi* smi);
- void WriteHeapNumber(HeapNumber* number);
- void WriteMutableHeapNumber(MutableHeapNumber* number);
- void WriteBigInt(BigInt* bigint);
+ void WriteOddball(Oddball oddball);
+ void WriteSmi(Smi smi);
+ void WriteHeapNumber(HeapNumber number);
+ void WriteMutableHeapNumber(MutableHeapNumber number);
+ void WriteBigInt(BigInt bigint);
void WriteString(Handle<String> string);
Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArray(Handle<JSArray> array) V8_WARN_UNUSED_RESULT;
- void WriteJSDate(JSDate* date);
+ void WriteJSDate(JSDate date);
Maybe<bool> WriteJSValue(Handle<JSValue> value) V8_WARN_UNUSED_RESULT;
- void WriteJSRegExp(JSRegExp* regexp);
+ void WriteJSRegExp(JSRegExp regexp);
Maybe<bool> WriteJSMap(Handle<JSMap> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSSet(Handle<JSSet> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
V8_WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
+ Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
@@ -151,18 +145,18 @@ class ValueSerializer {
* Asks the delegate to handle an error that occurred during data cloning, by
* throwing an exception appropriate for the host.
*/
- void ThrowDataCloneError(MessageTemplate::Template template_index);
- V8_NOINLINE void ThrowDataCloneError(MessageTemplate::Template template_index,
+ void ThrowDataCloneError(MessageTemplate template_index);
+ V8_NOINLINE void ThrowDataCloneError(MessageTemplate template_index,
Handle<Object> arg0);
Maybe<bool> ThrowIfOutOfMemory();
Isolate* const isolate_;
v8::ValueSerializer::Delegate* const delegate_;
- bool treat_array_buffer_views_as_host_objects_ = false;
uint8_t* buffer_ = nullptr;
size_t buffer_size_ = 0;
size_t buffer_capacity_ = 0;
+ bool treat_array_buffer_views_as_host_objects_ = false;
bool out_of_memory_ = false;
Zone zone_;
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index b8f10133b5..75e4a51e91 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -136,7 +136,9 @@ class Vector {
}
// Implicit conversion from Vector<T> to Vector<const T>.
- inline operator Vector<const T>() { return Vector<const T>::cast(*this); }
+ inline operator Vector<const T>() const {
+ return Vector<const T>::cast(*this);
+ }
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(nullptr, 0); }
@@ -147,7 +149,7 @@ class Vector {
input.length() * sizeof(S) / sizeof(T));
}
- bool operator==(const Vector<T>& other) const {
+ bool operator==(const Vector<const T> other) const {
if (length_ != other.length_) return false;
if (start_ == other.start_) return true;
for (size_t i = 0; i < length_; ++i) {
@@ -203,7 +205,10 @@ class OwnedVector {
typename = typename std::enable_if<std::is_convertible<
std::unique_ptr<U>, std::unique_ptr<T>>::value>::type>
OwnedVector(OwnedVector<U>&& other)
- : data_(other.ReleaseData()), length_(other.size()) {}
+ : data_(std::move(other.data_)), length_(other.length_) {
+ STATIC_ASSERT(sizeof(U) == sizeof(T));
+ other.length_ = 0;
+ }
// Returns the length of the vector as a size_t.
constexpr size_t size() const { return length_; }
@@ -221,8 +226,11 @@ class OwnedVector {
Vector<T> as_vector() const { return Vector<T>(start(), size()); }
// Releases the backing data from this vector and transfers ownership to the
- // caller. This vectors data can no longer be used afterwards.
- std::unique_ptr<T[]> ReleaseData() { return std::move(data_); }
+ // caller. This vector will be empty afterwards.
+ std::unique_ptr<T[]> ReleaseData() {
+ length_ = 0;
+ return std::move(data_);
+ }
// Allocates a new vector of the specified size via the default allocator.
static OwnedVector<T> New(size_t size) {
@@ -244,7 +252,13 @@ class OwnedVector {
return vec;
}
+ bool operator==(std::nullptr_t) const { return data_ == nullptr; }
+ bool operator!=(std::nullptr_t) const { return data_ != nullptr; }
+
private:
+ template <typename U>
+ friend class OwnedVector;
+
std::unique_ptr<T[]> data_;
size_t length_ = 0;
};
@@ -255,10 +269,10 @@ inline int StrLength(const char* string) {
return static_cast<int>(length);
}
-
-#define STATIC_CHAR_VECTOR(x) \
- v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
- arraysize(x) - 1)
+template <size_t N>
+constexpr Vector<const uint8_t> StaticCharVector(const char (&array)[N]) {
+ return Vector<const uint8_t>::cast(Vector<const char>(array, N - 1));
+}
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, StrLength(data));
@@ -286,6 +300,19 @@ inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
return Vector<T>(arr);
}
+// Construct a Vector from a start pointer and a size.
+template <typename T>
+inline constexpr Vector<T> VectorOf(T* start, size_t size) {
+ return Vector<T>(start, size);
+}
+
+// Construct a Vector from anything providing a {data()} and {size()} accessor.
+template <typename Container>
+inline constexpr auto VectorOf(Container&& c)
+ -> decltype(VectorOf(c.data(), c.size())) {
+ return VectorOf(c.data(), c.size());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index ebe58c3e75..28f925cbac 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -6,6 +6,9 @@
#define V8_VISITORS_H_
#include "src/globals.h"
+#include "src/objects/code.h"
+#include "src/objects/foreign.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -17,6 +20,7 @@ class Object;
#define ROOT_ID_LIST(V) \
V(kStringTable, "(Internalized strings)") \
V(kExternalStringsTable, "(External strings)") \
+ V(kReadOnlyRootList, "(Read-only roots)") \
V(kStrongRootList, "(Strong roots)") \
V(kSmiRootList, "(Smi roots)") \
V(kBootstrapper, "(Bootstrapper)") \
@@ -34,6 +38,7 @@ class Object;
V(kExtensions, "(Extensions)") \
V(kCodeFlusher, "(Code flusher)") \
V(kPartialSnapshotCache, "(Partial snapshot cache)") \
+ V(kReadOnlyObjectCache, "(Read-only object cache)") \
V(kWeakCollections, "(Weak collections)") \
V(kWrapperTracing, "(Wrapper tracing)") \
V(kUnknown, "(Unknown)")
@@ -61,11 +66,11 @@ class RootVisitor {
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
virtual void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) = 0;
+ FullObjectSlot start, FullObjectSlot end) = 0;
// Handy shorthand for visiting a single pointer.
virtual void VisitRootPointer(Root root, const char* description,
- Object** p) {
+ FullObjectSlot p) {
VisitRootPointers(root, description, p, p + 1);
}
@@ -88,28 +93,28 @@ class ObjectVisitor {
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
- virtual void VisitPointers(HeapObject* host, Object** start,
- Object** end) = 0;
- virtual void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) = 0;
+ virtual void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) = 0;
+ virtual void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) = 0;
// Custom weak pointers must be ignored by the GC but not other
// visitors. They're used for e.g., lists that are recreated after GC. The
// default implementation treats them as strong pointers. Visitors who want to
// ignore them must override this function with empty.
- virtual void VisitCustomWeakPointers(HeapObject* host, Object** start,
- Object** end) {
+ virtual void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
VisitPointers(host, start, end);
}
// Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(HeapObject* host, Object** p) {
+ virtual void VisitPointer(HeapObject host, ObjectSlot p) {
VisitPointers(host, p, p + 1);
}
- virtual void VisitPointer(HeapObject* host, MaybeObject** p) {
+ virtual void VisitPointer(HeapObject host, MaybeObjectSlot p) {
VisitPointers(host, p, p + 1);
}
- virtual void VisitCustomWeakPointer(HeapObject* host, Object** p) {
+ virtual void VisitCustomWeakPointer(HeapObject host, ObjectSlot p) {
VisitCustomWeakPointers(host, p, p + 1);
}
@@ -117,25 +122,25 @@ class ObjectVisitor {
// a rich interface for iterating over Code objects ...
// Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(Code* host, RelocInfo* rinfo);
-
- // Visits a runtime entry in the instruction stream.
- virtual void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) {}
+ virtual void VisitCodeTarget(Code host, RelocInfo* rinfo) = 0;
// Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo);
+ virtual void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) = 0;
+
+ // Visits a runtime entry in the instruction stream.
+ virtual void VisitRuntimeEntry(Code host, RelocInfo* rinfo) {}
// Visits an external reference embedded into a code object.
- virtual void VisitExternalReference(Code* host, RelocInfo* rinfo) {}
+ virtual void VisitExternalReference(Code host, RelocInfo* rinfo) {}
// Visits an external reference.
- virtual void VisitExternalReference(Foreign* host, Address* p) {}
+ virtual void VisitExternalReference(Foreign host, Address* p) {}
// Visits an (encoded) internal reference.
- virtual void VisitInternalReference(Code* host, RelocInfo* rinfo) {}
+ virtual void VisitInternalReference(Code host, RelocInfo* rinfo) {}
// Visits an off-heap target in the instruction stream.
- virtual void VisitOffHeapTarget(Code* host, RelocInfo* rinfo) {}
+ virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
// Visits the relocation info using the given iterator.
virtual void VisitRelocInfo(RelocIterator* it);
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 7388238ebd..d22e1abd69 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -34,8 +34,8 @@ inline const char* StateToString(StateTag state) {
return "OTHER";
case EXTERNAL:
return "EXTERNAL";
- default:
- UNREACHABLE();
+ case IDLE:
+ return "IDLE";
}
}
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 5e51225ab0..c9b1aa4d78 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -3,12 +3,11 @@ set noparent
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
-bradnelson@chromium.org
clemensh@chromium.org
-eholk@chromium.org
gdeepti@chromium.org
-kschimpf@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
+per-file wasm-js.*=adamk@chromium.org
+
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 24c6d90ec6..a4de6ceed7 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -13,209 +13,806 @@ namespace v8 {
namespace internal {
namespace wasm {
+namespace liftoff {
+
+// half
+// slot Frame
+// -----+--------------------+---------------------------
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (lr) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM_COMPILED |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 (high) | ^
+// -4 | slot 0 (low) | |
+// -5 | slot 1 (high) | Frame slots
+// -6 | slot 1 (low) | |
+// | | v
+// -----+--------------------+ <-- stack ptr (sp)
+//
+static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
+ "Slot size should be twice the size of the 32 bit pointer.");
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+constexpr int32_t kConstantStackSpace = kSystemPointerSize;
+// kPatchInstructionsRequired sets a maximum limit of how many instructions that
+// PatchPrepareStackFrame will use in order to increase the stack appropriately.
+// Three instructions are required to sub a large constant, movw + movt + sub.
+constexpr int32_t kPatchInstructionsRequired = 3;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset =
+ kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(fp, -offset);
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = kFirstStackSlotOffset +
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
+ return MemOperand(fp, -offset);
+}
+
+inline MemOperand GetInstanceOperand() {
+ return MemOperand(fp, -kInstanceOffset);
+}
+
+inline MemOperand GetMemOp(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps, Register addr,
+ Register offset, int32_t offset_imm) {
+ if (offset != no_reg) {
+ if (offset_imm == 0) return MemOperand(addr, offset);
+ Register tmp = temps->Acquire();
+ assm->add(tmp, offset, Operand(offset_imm));
+ return MemOperand(addr, tmp);
+ }
+ return MemOperand(addr, offset_imm);
+}
+
+inline Register CalculateActualAddress(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps,
+ Register addr_reg, Register offset_reg,
+ int32_t offset_imm) {
+ if (offset_reg == no_reg && offset_imm == 0) {
+ return addr_reg;
+ }
+ Register actual_addr_reg = temps->Acquire();
+ if (offset_reg == no_reg) {
+ assm->add(actual_addr_reg, addr_reg, Operand(offset_imm));
+ } else {
+ assm->add(actual_addr_reg, addr_reg, Operand(offset_reg));
+ if (offset_imm != 0) {
+ assm->add(actual_addr_reg, actual_addr_reg, Operand(offset_imm));
+ }
+ }
+ return actual_addr_reg;
+}
+
+inline Condition MakeUnsigned(Condition cond) {
+ switch (cond) {
+ case kSignedLessThan:
+ return kUnsignedLessThan;
+ case kSignedLessEqual:
+ return kUnsignedLessEqual;
+ case kSignedGreaterThan:
+ return kUnsignedGreaterThan;
+ case kSignedGreaterEqual:
+ return kUnsignedGreaterEqual;
+ case kEqual:
+ case kUnequal:
+ case kUnsignedLessThan:
+ case kUnsignedLessEqual:
+ case kUnsignedGreaterThan:
+ case kUnsignedGreaterEqual:
+ return cond;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <void (Assembler::*op)(Register, Register, Register, SBit, Condition),
+ void (Assembler::*op_with_carry)(Register, Register, const Operand&,
+ SBit, Condition)>
+inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(assm);
+ Register scratch = dst.low_gp();
+ bool can_use_dst =
+ dst.low_gp() != lhs.high_gp() && dst.low_gp() != rhs.high_gp();
+ if (!can_use_dst) {
+ scratch = temps.Acquire();
+ }
+ (assm->*op)(scratch, lhs.low_gp(), rhs.low_gp(), SetCC, al);
+ (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(rhs.high_gp()),
+ LeaveCC, al);
+ if (!can_use_dst) {
+ assm->mov(dst.low_gp(), scratch);
+ }
+}
+
+template <void (TurboAssembler::*op)(Register, Register, Register, Register,
+ Register),
+ bool is_left_shift>
+inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src, Register amount,
+ LiftoffRegList pinned) {
+ Register src_low = src.low_gp();
+ Register src_high = src.high_gp();
+ Register dst_low = dst.low_gp();
+ Register dst_high = dst.high_gp();
+ // Left shift writes {dst_high} then {dst_low}, right shifts write {dst_low}
+ // then {dst_high}.
+ Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
+ pinned.set(clobbered_dst_reg);
+ pinned.set(src);
+ Register amount_capped =
+ pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
+ assm->and_(amount_capped, amount, Operand(0x3F));
+
+ // Ensure that writing the first half of {dst} does not overwrite the still
+ // needed half of {src}.
+ Register* later_src_reg = is_left_shift ? &src_low : &src_high;
+ if (*later_src_reg == clobbered_dst_reg) {
+ *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg);
+ }
+
+ (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
+}
+
+inline FloatRegister GetFloatRegister(DoubleRegister reg) {
+ DCHECK_LT(reg.code(), kDoubleCode_d16);
+ return LowDwVfpRegister::from_code(reg.code()).low();
+}
+
+enum class MinOrMax : uint8_t { kMin, kMax };
+template <typename RegisterType>
+inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
+ RegisterType lhs, RegisterType rhs,
+ MinOrMax min_or_max) {
+ DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8);
+ if (lhs == rhs) {
+ assm->TurboAssembler::Move(dst, lhs);
+ return;
+ }
+ Label done, is_nan;
+ if (min_or_max == MinOrMax::kMin) {
+ assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan);
+ } else {
+ assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan);
+ }
+ assm->b(&done);
+ assm->bind(&is_nan);
+ // Create a NaN output.
+ assm->vadd(dst, lhs, rhs);
+ assm->bind(&done);
+}
+
+} // namespace liftoff
+
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
- return 0;
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ BAILOUT("Armv6 not supported");
+ return 0;
+ }
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ // PatchPrepareStackFrame will patch this in order to increase the stack
+ // appropriately. Additional nops are required as the bytes operand might
+ // require extra moves to encode.
+ for (int i = 0; i < liftoff::kPatchInstructionsRequired; i++) {
+ nop();
+ }
+ DCHECK_EQ(offset + liftoff::kPatchInstructionsRequired * kInstrSize,
+ pc_offset());
+ return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ // Allocate space for instance plus what is needed for the frame slots.
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+#ifdef USE_SIMULATOR
+ // When using the simulator, deal with Liftoff which allocates the stack
+ // before checking it.
+ // TODO(arm): Remove this when the stack check mechanism will be updated.
+ if (bytes > KB / 2) {
+ BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ return;
+ }
+#endif
+ PatchingAssembler patching_assembler(AssemblerOptions{},
+ buffer_start_ + offset,
+ liftoff::kPatchInstructionsRequired);
+ patching_assembler.sub(sp, sp, Operand(bytes));
+ patching_assembler.PadWithNops();
}
void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
-void LiftoffAssembler::AbortCompilation() { FinishCode(); }
+void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::Move(reg.low_gp(), Operand(low_word));
+ TurboAssembler::Move(reg.high_gp(), Operand(high_word));
+ break;
+ }
+ case kWasmF32:
+ vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
+ break;
+ case kWasmF64: {
+ Register extra_scratch = GetUnusedRegister(kGpReg).gp();
+ vmov(reg.fp(), Double(value.to_f64_boxed().get_scalar()), extra_scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ DCHECK_LE(offset, kMaxInt);
+ DCHECK_EQ(4, size);
+ ldr(dst, liftoff::GetInstanceOperand());
+ ldr(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ str(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ ldr(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ DCHECK_IMPLIES(type.value_type() == kWasmI64, dst.is_pair());
+ // If offset_imm cannot be converted to int32 safely, we abort as a separate
+ // check should cause this code to never be executed.
+ // TODO(7881): Support when >2GB is required.
+ if (!is_uint31(offset_imm)) {
+ TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ if (type.value() == LoadType::kF64Load ||
+ type.value() == LoadType::kF32Load) {
+ Register actual_src_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ if (type.value() == LoadType::kF64Load) {
+ // Armv6 is not supported so Neon can be used to avoid alignment issues.
+ CpuFeatureScope scope(this, NEON);
+ vld1(Neon64, NeonListOperand(dst.fp()), NeonMemOperand(actual_src_addr));
+ } else {
+ // TODO(arm): Use vld1 for f32 when implemented in simulator as used for
+ // f64. It supports unaligned access.
+ Register scratch =
+ (actual_src_addr == src_addr) ? temps.Acquire() : actual_src_addr;
+ ldr(scratch, MemOperand(actual_src_addr));
+ vmov(liftoff::GetFloatRegister(dst.fp()), scratch);
+ }
+ } else {
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ ldrb(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8U:
+ ldrb(dst.low_gp(), src_op);
+ mov(dst.high_gp(), Operand(0));
+ break;
+ case LoadType::kI32Load8S:
+ ldrsb(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8S:
+ ldrsb(dst.low_gp(), src_op);
+ asr(dst.high_gp(), dst.low_gp(), Operand(31));
+ break;
+ case LoadType::kI32Load16U:
+ ldrh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16U:
+ ldrh(dst.low_gp(), src_op);
+ mov(dst.high_gp(), Operand(0));
+ break;
+ case LoadType::kI32Load16S:
+ ldrsh(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ ldr(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16S:
+ ldrsh(dst.low_gp(), src_op);
+ asr(dst.high_gp(), dst.low_gp(), Operand(31));
+ break;
+ case LoadType::kI64Load32U:
+ ldr(dst.low_gp(), src_op);
+ mov(dst.high_gp(), Operand(0));
+ break;
+ case LoadType::kI64Load32S:
+ ldr(dst.low_gp(), src_op);
+ asr(dst.high_gp(), dst.low_gp(), Operand(31));
+ break;
+ case LoadType::kI64Load:
+ ldr(dst.low_gp(), src_op);
+ // GetMemOp may use a scratch register as the offset register, in which
+ // case, calling GetMemOp again will fail due to the assembler having
+ // ran out of scratch registers.
+ if (temps.CanAcquire()) {
+ src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
+ offset_imm + kRegisterSize);
+ } else {
+ add(src_op.rm(), src_op.rm(), Operand(kRegisterSize));
+ }
+ ldr(dst.high_gp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ // If offset_imm cannot be converted to int32 safely, we abort as a separate
+ // check should cause this code to never be executed.
+ // TODO(7881): Support when >2GB is required.
+ if (!is_uint31(offset_imm)) {
+ TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ if (type.value() == StoreType::kF64Store) {
+ Register actual_dst_addr = liftoff::CalculateActualAddress(
+ this, &temps, dst_addr, offset_reg, offset_imm);
+ // Armv6 is not supported so Neon can be used to avoid alignment issues.
+ CpuFeatureScope scope(this, NEON);
+ vst1(Neon64, NeonListOperand(src.fp()), NeonMemOperand(actual_dst_addr));
+ } else if (type.value() == StoreType::kF32Store) {
+ // TODO(arm): Use vst1 for f32 when implemented in simulator as used for
+ // f64. It supports unaligned access.
+ // CalculateActualAddress will only not use a scratch register if the
+ // following condition holds, otherwise another register must be
+ // retrieved.
+ Register scratch = (offset_reg == no_reg && offset_imm == 0)
+ ? temps.Acquire()
+ : GetUnusedRegister(kGpReg, pinned).gp();
+ Register actual_dst_addr = liftoff::CalculateActualAddress(
+ this, &temps, dst_addr, offset_reg, offset_imm);
+ vmov(scratch, liftoff::GetFloatRegister(src.fp()));
+ str(scratch, MemOperand(actual_dst_addr));
+ } else {
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ strb(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ strh(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ str(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ str(src.low_gp(), dst_op);
+ // GetMemOp may use a scratch register as the offset register, in which
+ // case, calling GetMemOp again will fail due to the assembler having
+ // ran out of scratch registers.
+ if (temps.CanAcquire()) {
+ dst_op = liftoff::GetMemOp(this, &temps, dst_addr, offset_reg,
+ offset_imm + kRegisterSize);
+ } else {
+ add(dst_op.rm(), dst_op.rm(), Operand(kRegisterSize));
+ }
+ str(src.high_gp(), dst_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ int32_t offset = (caller_slot_idx + 1) * kRegisterSize;
+ MemOperand src(fp, offset);
+ switch (type) {
+ case kWasmI32:
+ ldr(dst.gp(), src);
+ break;
+ case kWasmI64:
+ ldr(dst.low_gp(), src);
+ ldr(dst.high_gp(), MemOperand(fp, offset + kRegisterSize));
+ break;
+ case kWasmF32:
+ vldr(liftoff::GetFloatRegister(dst.fp()), src);
+ break;
+ case kWasmF64:
+ vldr(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ DCHECK_NE(dst, src);
+ DCHECK_EQ(type, kWasmI32);
+ TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ } else {
+ DCHECK_EQ(kWasmF64, type);
+ vmov(dst, src);
+ }
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ str(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ str(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ str(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
+ break;
+ case kWasmF32:
+ vstr(liftoff::GetFloatRegister(reg.fp()), dst);
+ break;
+ case kWasmF64:
+ vstr(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ UseScratchRegisterScope temps(this);
+ Register src = no_reg;
+ // The scratch register will be required by str if multiple instructions
+ // are required to encode the offset, and so we cannot use it in that case.
+ if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
+ src = GetUnusedRegister(kGpReg).gp();
+ } else {
+ src = temps.Acquire();
+ }
+ switch (value.type()) {
+ case kWasmI32:
+ mov(src, Operand(value.to_i32()));
+ str(src, dst);
+ break;
+ case kWasmI64: {
+ int32_t low_word = value.to_i64();
+ mov(src, Operand(low_word));
+ str(src, liftoff::GetHalfStackSlot(index, kLowWord));
+ int32_t high_word = value.to_i64() >> 32;
+ mov(src, Operand(high_word));
+ str(src, liftoff::GetHalfStackSlot(index, kHighWord));
+ break;
+ }
+ default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ switch (type) {
+ case kWasmI32:
+ ldr(reg.gp(), liftoff::GetStackSlot(index));
+ break;
+ case kWasmI64:
+ ldr(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ ldr(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
+ break;
+ case kWasmF32:
+ vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(index));
+ break;
+ case kWasmF64:
+ vldr(reg.fp(), liftoff::GetStackSlot(index));
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
- BAILOUT("FillI64Half");
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ ldr(reg, liftoff::GetHalfStackSlot(index, half));
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
+#define I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop: " #name); \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount, LiftoffRegList pinned) { \
+ UseScratchRegisterScope temps(this); \
+ Register scratch = temps.Acquire(); \
+ and_(scratch, amount, Operand(0x1f)); \
+ instruction(dst, src, Operand(scratch)); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
- return true; \
+#define FP32_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(liftoff::GetFloatRegister(dst), \
+ liftoff::GetFloatRegister(src)); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+#define FP32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ instruction(liftoff::GetFloatRegister(dst), \
+ liftoff::GetFloatRegister(lhs), \
+ liftoff::GetFloatRegister(rhs)); \
}
-#define UNIMPLEMENTED_FP_UNOP(name) \
+#define FP64_UNOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ instruction(dst, src); \
}
-#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
- bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
- return true; \
+#define FP64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+
+I32_BINOP(i32_add, add)
+I32_BINOP(i32_sub, sub)
+I32_BINOP(i32_mul, mul)
+I32_BINOP(i32_and, and_)
+I32_BINOP(i32_or, orr)
+I32_BINOP(i32_xor, eor)
+I32_SHIFTOP(i32_shl, lsl)
+I32_SHIFTOP(i32_sar, asr)
+I32_SHIFTOP(i32_shr, lsr)
+FP32_BINOP(f32_add, vadd)
+FP32_BINOP(f32_sub, vsub)
+FP32_BINOP(f32_mul, vmul)
+FP32_BINOP(f32_div, vdiv)
+FP32_UNOP(f32_abs, vabs)
+FP32_UNOP(f32_neg, vneg)
+FP32_UNOP(f32_sqrt, vsqrt)
+FP64_BINOP(f64_add, vadd)
+FP64_BINOP(f64_sub, vsub)
+FP64_BINOP(f64_mul, vmul)
+FP64_BINOP(f64_div, vdiv)
+FP64_UNOP(f64_abs, vabs)
+FP64_UNOP(f64_neg, vneg)
+FP64_UNOP(f64_sqrt, vsqrt)
+
+#undef I32_BINOP
+#undef I32_SHIFTOP
+#undef FP32_UNOP
+#undef FP32_BINOP
+#undef FP64_UNOP
+#undef FP64_BINOP
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ rbit(dst, src);
+ clz(dst, dst);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ {
+ UseScratchRegisterScope temps(this);
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch_2 = GetUnusedRegister(kGpReg, pinned).gp();
+ // x = x - ((x & (0x55555555 << 1)) >> 1)
+ and_(scratch, src, Operand(0xaaaaaaaa));
+ sub(dst, src, Operand(scratch, LSR, 1));
+ // x = (x & 0x33333333) + ((x & (0x33333333 << 2)) >> 2)
+ mov(scratch, Operand(0x33333333));
+ and_(scratch_2, dst, Operand(scratch, LSL, 2));
+ and_(scratch, dst, scratch);
+ add(dst, scratch, Operand(scratch_2, LSR, 2));
}
-#define UNIMPLEMENTED_I64_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
-UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
-UNIMPLEMENTED_I64_BINOP(i64_sub)
-UNIMPLEMENTED_I64_BINOP(i64_mul)
-UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
-UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
-UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_min)
-UNIMPLEMENTED_FP_BINOP(f32_max)
-UNIMPLEMENTED_FP_BINOP(f32_copysign)
-UNIMPLEMENTED_FP_UNOP(f32_abs)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP(f32_ceil)
-UNIMPLEMENTED_FP_UNOP(f32_floor)
-UNIMPLEMENTED_FP_UNOP(f32_trunc)
-UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f32_sqrt)
-UNIMPLEMENTED_FP_BINOP(f64_add)
-UNIMPLEMENTED_FP_BINOP(f64_sub)
-UNIMPLEMENTED_FP_BINOP(f64_mul)
-UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_min)
-UNIMPLEMENTED_FP_BINOP(f64_max)
-UNIMPLEMENTED_FP_BINOP(f64_copysign)
-UNIMPLEMENTED_FP_UNOP(f64_abs)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_I64_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
-#undef UNIMPLEMENTED_FP_BINOP
-#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
-#undef UNIMPLEMENTED_I32_SHIFTOP
-#undef UNIMPLEMENTED_I64_SHIFTOP
+ // x = (x + (x >> 4)) & 0x0F0F0F0F
+ add(dst, dst, Operand(dst, LSR, 4));
+ and_(dst, dst, Operand(0x0f0f0f0f));
+ // x = x + (x >> 8)
+ add(dst, dst, Operand(dst, LSR, 8));
+ // x = x + (x >> 16)
+ add(dst, dst, Operand(dst, LSR, 16));
+ // x = x & 0x3F
+ and_(dst, dst, Operand(0x3f));
+ return true;
+}
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ BAILOUT("i32_divs");
+ return;
+ }
+ CpuFeatureScope scope(this, SUDIV);
+ // Issue division early so we can perform the trapping checks whilst it
+ // completes.
+ bool speculative_sdiv = dst != lhs && dst != rhs;
+ if (speculative_sdiv) {
+ sdiv(dst, lhs, rhs);
+ }
+ Label noTrap;
+ // Check for division by zero.
+ cmp(rhs, Operand(0));
+ b(trap_div_by_zero, eq);
+ // Check for kMinInt / -1. This is unrepresentable.
+ cmp(rhs, Operand(-1));
+ b(&noTrap, ne);
+ cmp(lhs, Operand(kMinInt));
+ b(trap_div_unrepresentable, eq);
+ bind(&noTrap);
+ if (!speculative_sdiv) {
+ sdiv(dst, lhs, rhs);
+ }
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ BAILOUT("i32_divu");
+ return;
+ }
+ CpuFeatureScope scope(this, SUDIV);
+ // Check for division by zero.
+ cmp(rhs, Operand(0));
+ b(trap_div_by_zero, eq);
+ udiv(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ // When this case is handled, a check for ARMv7 is required to use mls.
+ // Mls support is implied with SUDIV support.
+ BAILOUT("i32_rems");
+ return;
+ }
+ CpuFeatureScope scope(this, SUDIV);
+ // No need to check kMinInt / -1 because the result is kMinInt and then
+ // kMinInt * -1 -> kMinInt. In this case, the Msub result is therefore 0.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sdiv(scratch, lhs, rhs);
+ // Check for division by zero.
+ cmp(rhs, Operand(0));
+ b(trap_div_by_zero, eq);
+ // Compute remainder.
+ mls(dst, scratch, rhs, lhs);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ // When this case is handled, a check for ARMv7 is required to use mls.
+ // Mls support is implied with SUDIV support.
+ BAILOUT("i32_remu");
+ return;
+ }
+ CpuFeatureScope scope(this, SUDIV);
+ // No need to check kMinInt / -1 because the result is kMinInt and then
+ // kMinInt * -1 -> kMinInt. In this case, the Msub result is therefore 0.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ udiv(scratch, lhs, rhs);
+ // Check for division by zero.
+ cmp(rhs, Operand(0));
+ b(trap_div_by_zero, eq);
+ // Compute remainder.
+ mls(dst, scratch, rhs, lhs);
+}
+
+void LiftoffAssembler::emit_i32_shr(Register dst, Register src, int amount) {
+ DCHECK(is_uint5(amount));
+ lsr(dst, src, Operand(amount));
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Idea:
+ // [ lhs_hi | lhs_lo ] * [ rhs_hi | rhs_lo ]
+ // = [ lhs_hi * rhs_lo | ] (32 bit mul, shift 32)
+ // + [ lhs_lo * rhs_hi | ] (32 bit mul, shift 32)
+ // + [ lhs_lo * rhs_lo ] (32x32->64 mul, shift 0)
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ // scratch = lhs_hi * rhs_lo
+ mul(scratch, lhs.high_gp(), rhs.low_gp());
+ // scratch += lhs_lo * rhs_hi
+ mla(scratch, lhs.low_gp(), rhs.high_gp(), scratch);
+ // TODO(arm): use umlal once implemented correctly in the simulator.
+ // [dst_hi|dst_lo] = lhs_lo * rhs_lo
+ umull(dst.low_gp(), dst.high_gp(), lhs.low_gp(), rhs.low_gp());
+ // dst_hi += scratch
+ add(dst.high_gp(), dst.high_gp(), scratch);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
@@ -243,108 +840,503 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return false;
}
-void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount,
+ pinned);
+}
+
+void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount,
+ pinned);
+}
+
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount,
+ pinned);
+}
+
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
int amount) {
- BAILOUT("i64_shr");
+ DCHECK(is_uint6(amount));
+ UseScratchRegisterScope temps(this);
+ Register src_high = src.high_gp();
+ // {src.high_gp()} will still be needed after writing {dst.low_gp()}.
+ if (src_high == dst.low_gp()) {
+ src_high = GetUnusedRegister(kGpReg).gp();
+ TurboAssembler::Move(src_high, dst.low_gp());
+ }
+
+ LsrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount);
+}
+
+bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintp(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintm(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintz(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintn(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
+ return false;
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(
+ this, liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs), liftoff::MinOrMax::kMin);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(
+ this, liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs), liftoff::MinOrMax::kMax);
+}
+
+bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintp(dst, src);
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintm(dst, src);
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintz(dst, src);
+ return true;
+ }
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintn(dst, src);
+ return true;
+ }
+ return false;
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(this, dst, lhs, rhs, liftoff::MinOrMax::kMin);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(this, dst, lhs, rhs, liftoff::MinOrMax::kMax);
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
// This is a nop on arm.
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
+ UseScratchRegisterScope temps(this);
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 = temps.Acquire();
+ VmovLow(scratch, lhs);
+ // Clear sign bit in {scratch}.
+ bic(scratch, scratch, Operand(kF32SignBit));
+ VmovLow(scratch2, rhs);
+ // Isolate sign bit in {scratch2}.
+ and_(scratch2, scratch2, Operand(kF32SignBit));
+ // Combine {scratch2} into {scratch}.
+ orr(scratch, scratch, scratch2);
+ VmovLow(dst, scratch);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ constexpr uint32_t kF64SignBitHighWord = uint32_t{1} << 31;
+ // On arm, we cannot hold the whole f64 value in a gp register, so we just
+ // operate on the upper half (UH).
+ UseScratchRegisterScope temps(this);
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 = temps.Acquire();
+ VmovHigh(scratch, lhs);
+ // Clear sign bit in {scratch}.
+ bic(scratch, scratch, Operand(kF64SignBitHighWord));
+ VmovHigh(scratch2, rhs);
+ // Isolate sign bit in {scratch2}.
+ and_(scratch2, scratch2, Operand(kF64SignBitHighWord));
+ // Combine {scratch2} into {scratch}.
+ orr(scratch, scratch, scratch2);
+ vmov(dst, lhs);
+ VmovHigh(dst, scratch);
+}
+
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
- return true;
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::Move(dst.gp(), src.low_gp());
+ return true;
+ case kExprI32SConvertF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ vmov(scratch_f, Float32(static_cast<float>(INT32_MIN)));
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(src.fp()), scratch_f);
+ b(trap, lt);
+ // Check overflow.
+ cmp(dst.gp(), Operand(-1));
+ b(trap, vs);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ vmov(scratch_f, Float32(-1.0f));
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(src.fp()), scratch_f);
+ b(trap, le);
+ // Check overflow.
+ cmp(dst.gp(), Operand(-1));
+ b(trap, eq);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f64(scratch_f, src.fp()); // f64 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ DwVfpRegister scratch_d = temps.AcquireD();
+ vmov(scratch_d, Double(static_cast<double>(INT32_MIN - 1.0)));
+ VFPCompareAndSetFlags(src.fp(), scratch_d);
+ b(trap, le);
+ // Check overflow.
+ vmov(scratch_d, Double(static_cast<double>(INT32_MAX + 1.0)));
+ VFPCompareAndSetFlags(src.fp(), scratch_d);
+ b(trap, ge);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f64(scratch_f, src.fp()); // f64 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ DwVfpRegister scratch_d = temps.AcquireD();
+ vmov(scratch_d, Double(static_cast<double>(-1.0)));
+ VFPCompareAndSetFlags(src.fp(), scratch_d);
+ b(trap, le);
+ // Check overflow.
+ vmov(scratch_d, Double(static_cast<double>(UINT32_MAX + 1.0)));
+ VFPCompareAndSetFlags(src.fp(), scratch_d);
+ b(trap, ge);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ vmov(dst.gp(), liftoff::GetFloatRegister(src.fp()));
+ return true;
+ case kExprI64SConvertI32:
+ if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
+ mov(dst.high_gp(), Operand(src.gp(), ASR, 31));
+ return true;
+ case kExprI64UConvertI32:
+ if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
+ mov(dst.high_gp(), Operand(0));
+ return true;
+ case kExprI64ReinterpretF64:
+ vmov(dst.low_gp(), dst.high_gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ SwVfpRegister dst_float = liftoff::GetFloatRegister(dst.fp());
+ vmov(dst_float, src.gp());
+ vcvt_f32_s32(dst_float, dst_float);
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ SwVfpRegister dst_float = liftoff::GetFloatRegister(dst.fp());
+ vmov(dst_float, src.gp());
+ vcvt_f32_u32(dst_float, dst_float);
+ return true;
+ }
+ case kExprF32ConvertF64:
+ vcvt_f32_f64(liftoff::GetFloatRegister(dst.fp()), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
+ vcvt_f64_s32(dst.fp(), liftoff::GetFloatRegister(dst.fp()));
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
+ vcvt_f64_u32(dst.fp(), liftoff::GetFloatRegister(dst.fp()));
+ return true;
+ }
+ case kExprF64ConvertF32:
+ vcvt_f64_f32(dst.fp(), liftoff::GetFloatRegister(src.fp()));
+ return true;
+ case kExprF64ReinterpretI64:
+ vmov(dst.fp(), src.low_gp(), src.high_gp());
+ return true;
+ case kExprF64SConvertI64:
+ case kExprF64UConvertI64:
+ case kExprI64SConvertF32:
+ case kExprI64UConvertF32:
+ case kExprF32SConvertI64:
+ case kExprF32UConvertI64:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF64:
+ // These cases can be handled by the C fallback function.
+ return false;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ sxtb(dst, src);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ sxth(dst, src);
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ emit_i32_signextend_i8(dst.low_gp(), src.low_gp());
+ mov(dst.high_gp(), Operand(dst.low_gp(), ASR, 31));
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ emit_i32_signextend_i16(dst.low_gp(), src.low_gp());
+ mov(dst.high_gp(), Operand(dst.low_gp(), ASR, 31));
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ TurboAssembler::Move(dst.low_gp(), src.low_gp());
+ mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31));
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) { b(label); }
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) { bx(target); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ DCHECK_EQ(type, kWasmI32);
+ if (rhs == no_reg) {
+ cmp(lhs, Operand(0));
+ } else {
+ cmp(lhs, rhs);
+ }
+ b(label, cond);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ clz(dst, src);
+ mov(dst, Operand(dst, LSR, kRegSizeInBitsLog2));
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ cmp(lhs, rhs);
+ mov(dst, Operand(0), LeaveCC);
+ mov(dst, Operand(1), LeaveCC, cond);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ orr(dst, src.low_gp(), src.high_gp());
+ clz(dst, dst);
+ mov(dst, Operand(dst, LSR, 5));
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ // For signed i64 comparisons, we still need to use unsigned comparison for
+ // the low word (the only bit carrying signedness information is the MSB in
+ // the high word).
+ Condition unsigned_cond = liftoff::MakeUnsigned(cond);
+ Label set_cond;
+ Label cont;
+ LiftoffRegister dest = LiftoffRegister(dst);
+ bool speculative_move = !dest.overlaps(lhs) && !dest.overlaps(rhs);
+ if (speculative_move) {
+ mov(dst, Operand(0));
+ }
+ // Compare high word first. If it differs, use it for the set_cond. If it's
+ // equal, compare the low word and use that for set_cond.
+ cmp(lhs.high_gp(), rhs.high_gp());
+ if (unsigned_cond == cond) {
+ cmp(lhs.low_gp(), rhs.low_gp(), kEqual);
+ if (!speculative_move) {
+ mov(dst, Operand(0));
+ }
+ mov(dst, Operand(1), LeaveCC, cond);
+ } else {
+ // If the condition predicate for the low differs from that for the high
+ // word, the conditional move instructions must be separated.
+ b(ne, &set_cond);
+ cmp(lhs.low_gp(), rhs.low_gp());
+ if (!speculative_move) {
+ mov(dst, Operand(0));
+ }
+ mov(dst, Operand(1), LeaveCC, unsigned_cond);
+ b(&cont);
+ bind(&set_cond);
+ if (!speculative_move) {
+ mov(dst, Operand(0));
+ }
+ mov(dst, Operand(1), LeaveCC, cond);
+ bind(&cont);
+ }
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs));
+ mov(dst, Operand(0), LeaveCC);
+ mov(dst, Operand(1), LeaveCC, cond);
+ if (cond != ne) {
+ // If V flag set, at least one of the arguments was a Nan -> false.
+ mov(dst, Operand(0), LeaveCC, vs);
+ }
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ VFPCompareAndSetFlags(lhs, rhs);
+ mov(dst, Operand(0), LeaveCC);
+ mov(dst, Operand(1), LeaveCC, cond);
+ if (cond != ne) {
+ // If V flag set, at least one of the arguments was a Nan -> false.
+ mov(dst, Operand(0), LeaveCC, vs);
+ }
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ ldr(limit_address, MemOperand(limit_address));
+ cmp(sp, limit_address);
+ b(ool_code, ls);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ PrepareCallCFunction(0, 0);
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ // Asserts unreachable within the wasm code.
+ TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ RegList core_regs = regs.GetGpList();
+ if (core_regs != 0) {
+ stm(db_w, sp, core_regs);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ DoubleRegister first = reg.fp();
+ DoubleRegister last = first;
+ fp_regs.clear(reg);
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ int code = reg.fp().code();
+ // vstm can not push more than 16 registers. We have to make sure the
+ // condition is met.
+ if ((code != last.code() + 1) || ((code - first.code() + 1) > 16)) break;
+ last = reg.fp();
+ fp_regs.clear(reg);
+ }
+ vstm(db_w, sp, first, last);
+ }
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetLastRegSet();
+ DoubleRegister last = reg.fp();
+ DoubleRegister first = last;
+ fp_regs.clear(reg);
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetLastRegSet();
+ int code = reg.fp().code();
+ if ((code != first.code() - 1) || ((last.code() - code + 1) > 16)) break;
+ first = reg.fp();
+ fp_regs.clear(reg);
+ }
+ vldm(ia_w, sp, first, last);
+ }
+ RegList core_regs = regs.GetGpList();
+ if (core_regs != 0) {
+ ldm(ia_w, sp, core_regs);
+ }
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ Drop(num_stack_slots);
+ Ret();
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -352,33 +1344,165 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ // Arguments are passed by pushing them all to the stack and then passing
+ // a pointer to them.
+ DCHECK(IsAligned(stack_bytes, kSystemPointerSize));
+ // Reserve space in the stack.
+ sub(sp, sp, Operand(stack_bytes));
+
+ int arg_bytes = 0;
+ for (ValueType param_type : sig->parameters()) {
+ switch (param_type) {
+ case kWasmI32:
+ str(args->gp(), MemOperand(sp, arg_bytes));
+ break;
+ case kWasmI64:
+ str(args->low_gp(), MemOperand(sp, arg_bytes));
+ str(args->high_gp(), MemOperand(sp, arg_bytes + kRegisterSize));
+ break;
+ case kWasmF32:
+ vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
+ break;
+ case kWasmF64:
+ vstr(args->fp(), MemOperand(sp, arg_bytes));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += ValueTypes::MemSize(param_type);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mov(r0, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r0;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_type != kWasmStmt) {
+ switch (out_argument_type) {
+ case kWasmI32:
+ ldr(result_reg->gp(), MemOperand(sp));
+ break;
+ case kWasmI64:
+ ldr(result_reg->low_gp(), MemOperand(sp));
+ ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
+ break;
+ case kWasmF32:
+ vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
+ break;
+ case kWasmF64:
+ vldr(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ add(sp, sp, Operand(stack_bytes));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ DCHECK(target != no_reg);
+ Call(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ sub(sp, sp, Operand(size));
+ mov(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ add(sp, sp, Operand(size));
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ for (auto& slot : slots_) {
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.type()) {
+ // i32 and i64 can be treated as similar cases, i64 being previously
+ // split into two i32 registers
+ case kWasmI32:
+ case kWasmI64:
+ case kWasmF32: {
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->ldr(scratch,
+ liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
+ asm_->Push(scratch);
+ } break;
+ case kWasmF64: {
+ UseScratchRegisterScope temps(asm_);
+ DwVfpRegister scratch = temps.AcquireD();
+ asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_index_));
+ asm_->vpush(scratch);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister:
+ switch (src.type()) {
+ case kWasmI64: {
+ LiftoffRegister reg =
+ slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
+ asm_->push(reg.gp());
+ } break;
+ case kWasmI32:
+ asm_->push(src.reg().gp());
+ break;
+ case kWasmF32:
+ asm_->vpush(liftoff::GetFloatRegister(src.reg().fp()));
+ break;
+ case kWasmF64:
+ asm_->vpush(src.reg().fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case LiftoffAssembler::VarState::KIntConst: {
+ DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ // The high word is the sign extension of the low word.
+ asm_->mov(scratch,
+ Operand(slot.half_ == kLowWord ? src.i32_const()
+ : src.i32_const() >> 31));
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index c73a60fd7d..d85b9b268b 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -41,8 +41,8 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp)
//
-constexpr int32_t kInstanceOffset = 2 * kPointerSize;
-constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kPointerSize;
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
inline MemOperand GetStackSlot(uint32_t index) {
@@ -148,7 +148,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
return;
}
#endif
- PatchingAssembler patching_assembler(AssemblerOptions{}, buffer_ + offset, 1);
+ PatchingAssembler patching_assembler(AssemblerOptions{},
+ buffer_start_ + offset, 1);
patching_assembler.PatchSubSp(bytes);
}
@@ -188,6 +189,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
@@ -196,6 +202,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI64Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -349,7 +364,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
Ldr(liftoff::GetRegFromType(reg, type), src);
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
@@ -372,6 +387,11 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst.S(), src.S()); \
}
+#define FP32_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst.S(), src.S()); \
+ return true; \
+ }
#define FP64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
@@ -436,10 +456,10 @@ FP32_BINOP(f32_min, Fmin)
FP32_BINOP(f32_max, Fmax)
FP32_UNOP(f32_abs, Fabs)
FP32_UNOP(f32_neg, Fneg)
-FP32_UNOP(f32_ceil, Frintp)
-FP32_UNOP(f32_floor, Frintm)
-FP32_UNOP(f32_trunc, Frintz)
-FP32_UNOP(f32_nearest_int, Frintn)
+FP32_UNOP_RETURN_TRUE(f32_ceil, Frintp)
+FP32_UNOP_RETURN_TRUE(f32_floor, Frintm)
+FP32_UNOP_RETURN_TRUE(f32_trunc, Frintz)
+FP32_UNOP_RETURN_TRUE(f32_nearest_int, Frintn)
FP32_UNOP(f32_sqrt, Fsqrt)
FP64_BINOP(f64_add, Fadd)
FP64_BINOP(f64_sub, Fsub)
@@ -628,12 +648,24 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ UseScratchRegisterScope temps(this);
+ DoubleRegister scratch = temps.AcquireD();
+ Ushr(scratch.V2S(), rhs.V2S(), 31);
+ if (dst != lhs) {
+ Fmov(dst.S(), lhs.S());
+ }
+ Sli(dst.V2S(), scratch.V2S(), 31);
}
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ UseScratchRegisterScope temps(this);
+ DoubleRegister scratch = temps.AcquireD();
+ Ushr(scratch.V1D(), rhs.V1D(), 63);
+ if (dst != lhs) {
+ Fmov(dst.D(), lhs.D());
+ }
+ Sli(dst.V1D(), scratch.V1D(), 63);
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 3a0ace0d62..067c79be32 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -34,8 +34,10 @@ inline Operand GetStackSlot(uint32_t index) {
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
-inline Operand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize - half_offset;
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
@@ -43,10 +45,7 @@ inline Operand GetHalfStackSlot(uint32_t half_index) {
inline Operand GetInstanceOperand() { return Operand(ebp, -8); }
static constexpr LiftoffRegList kByteRegs =
- LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
-static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
-static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
- "kByteRegs only contains gp cache registers");
+ LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx>()>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueType type) {
@@ -126,6 +125,28 @@ inline void SignExtendI32ToI64(Assembler* assm, LiftoffRegister reg) {
assm->sar(reg.high_gp(), 31);
}
+// Get a temporary byte register, using {candidate} if possible.
+// Might spill, but always keeps status flags intact.
+inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
+ if (candidate.is_byte_register()) return candidate;
+ // {GetUnusedRegister()} may insert move instructions to spill registers to
+ // the stack. This is OK because {mov} does not change the status flags.
+ return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
+}
+
+inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
+ const Operand& dst) {
+ if (assm->cache_state()->has_unused_register(kGpReg)) {
+ Register tmp = assm->cache_state()->unused_register(kGpReg).gp();
+ assm->mov(tmp, src);
+ assm->mov(dst, tmp);
+ } else {
+ // No free register, move via the stack.
+ assm->push(src);
+ assm->pop(dst);
+ }
+}
+
constexpr DoubleRegister kScratchDoubleReg = xmm7;
constexpr int kSubSpSize = 6; // 6 bytes for "sub esp, <imm32>"
@@ -146,8 +167,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
- kAvailableSpace);
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_OS_WIN
constexpr int kPageSize = 4 * 1024;
if (bytes > kPageSize) {
@@ -216,6 +238,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
mov(dst, Operand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
@@ -224,6 +251,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -324,7 +360,13 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
if (src.gp().is_byte_register()) {
mov_b(dst_op, src.gp());
} else {
- Register byte_src = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ // We know that {src} is not a byte register, so the only pinned byte
+ // registers (beside the outer {pinned}) are {dst_addr} and potentially
+ // {offset_reg}.
+ LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
+ if (offset_reg != no_reg) pinned_byte.set(offset_reg);
+ Register byte_src =
+ GetUnusedRegister(liftoff::kByteRegs, pinned_byte).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
@@ -367,19 +409,22 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- liftoff::Load(this, dst, ebp, kPointerSize * (caller_slot_idx + 1), type);
+ liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
+ type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register(kGpReg)) {
- LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index, type);
- Spill(dst_index, reg, type);
+ if (needs_reg_pair(type)) {
+ liftoff::MoveStackValue(this,
+ liftoff::GetHalfStackSlot(src_index, kLowWord),
+ liftoff::GetHalfStackSlot(dst_index, kLowWord));
+ liftoff::MoveStackValue(this,
+ liftoff::GetHalfStackSlot(src_index, kHighWord),
+ liftoff::GetHalfStackSlot(dst_index, kHighWord));
} else {
- push(liftoff::GetStackSlot(src_index));
- pop(liftoff::GetStackSlot(dst_index));
+ liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_index),
+ liftoff::GetStackSlot(dst_index));
}
}
@@ -409,8 +454,8 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
mov(dst, reg.gp());
break;
case kWasmI64:
- mov(dst, reg.low_gp());
- mov(liftoff::GetHalfStackSlot(2 * index - 1), reg.high_gp());
+ mov(liftoff::GetHalfStackSlot(index, kLowWord), reg.low_gp());
+ mov(liftoff::GetHalfStackSlot(index, kHighWord), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
@@ -433,8 +478,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- mov(dst, Immediate(low_word));
- mov(liftoff::GetHalfStackSlot(2 * index - 1), Immediate(high_word));
+ mov(liftoff::GetHalfStackSlot(index, kLowWord), Immediate(low_word));
+ mov(liftoff::GetHalfStackSlot(index, kHighWord), Immediate(high_word));
break;
}
default:
@@ -451,8 +496,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
mov(reg.gp(), src);
break;
case kWasmI64:
- mov(reg.low_gp(), src);
- mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index - 1));
+ mov(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
movss(reg.fp(), src);
@@ -465,8 +510,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- mov(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ mov(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -478,12 +524,17 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
- if (dst == rhs) {
- neg(dst);
- add(dst, lhs);
- } else {
+ if (dst != rhs) {
+ // Default path.
if (dst != lhs) mov(dst, lhs);
sub(dst, rhs);
+ } else if (lhs == rhs) {
+ // Degenerate case.
+ xor_(dst, dst);
+ } else {
+ // Emit {dst = lhs + -rhs} if dst == rhs.
+ neg(dst);
+ add(dst, lhs);
}
}
@@ -768,15 +819,16 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
Register lhs_hi = ecx;
Register lhs_lo = dst_lo;
Register rhs_hi = dst_hi;
- Register rhs_lo = ebx;
+ Register rhs_lo = esi;
// Spill all these registers if they are still holding other values.
liftoff::SpillRegisters(this, dst_hi, dst_lo, lhs_hi, rhs_lo);
// Move lhs and rhs into the respective registers.
- ParallelRegisterMove(
- {{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
- {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}});
+ ParallelRegisterMoveTuple reg_moves[]{
+ {LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
+ {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}};
+ ParallelRegisterMove(ArrayVector(reg_moves));
// First mul: lhs_hi' = lhs_hi * rhs_lo.
imul(lhs_hi, rhs_lo);
@@ -784,7 +836,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
imul(rhs_hi, lhs_lo);
// Add them: lhs_hi'' = lhs_hi' + rhs_hi' = lhs_hi * rhs_lo + rhs_hi * lhs_lo.
add(lhs_hi, rhs_hi);
- // Third mul: edx:eax (dst_hi:dst_lo) = eax * ebx (lhs_lo * rhs_lo).
+ // Third mul: edx:eax (dst_hi:dst_lo) = eax * esi (lhs_lo * rhs_lo).
mul(rhs_lo);
// Add lhs_hi'' to dst_hi.
add(dst_hi, lhs_hi);
@@ -839,27 +891,32 @@ inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount, void (TurboAssembler::*emit_shift)(Register, Register),
LiftoffRegList pinned) {
+ // Temporary registers cannot overlap with {dst}.
pinned.set(dst);
- pinned.set(src);
- pinned.set(amount);
+
+ constexpr size_t kMaxRegMoves = 3;
+ base::SmallVector<LiftoffAssembler::ParallelRegisterMoveTuple, kMaxRegMoves>
+ reg_moves;
+
// If {dst} contains {ecx}, replace it by an unused register, which is then
// moved to {ecx} in the end.
Register ecx_replace = no_reg;
if (PairContains(dst, ecx)) {
- ecx_replace = pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
+ ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
dst = ReplaceInPair(dst, ecx, ecx_replace);
// If {amount} needs to be moved to {ecx}, but {ecx} is in use (and not part
// of {dst}, hence overwritten anyway), move {ecx} to a tmp register and
// restore it at the end.
} else if (amount != ecx &&
- assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
+ pinned.has(LiftoffRegister(ecx)))) {
ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
- assm->mov(ecx_replace, ecx);
+ reg_moves.emplace_back(ecx_replace, ecx, kWasmI32);
}
- assm->ParallelRegisterMove(
- {{dst, src, kWasmI64},
- {LiftoffRegister{ecx}, LiftoffRegister{amount}, kWasmI32}});
+ reg_moves.emplace_back(dst, src, kWasmI64);
+ reg_moves.emplace_back(ecx, amount, kWasmI32);
+ assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
(assm->*emit_shift)(dst.high_gp(), dst.low_gp());
@@ -1063,25 +1120,41 @@ void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
}
}
-void LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- roundss(dst, src, kRoundUp);
+bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ roundss(dst, src, kRoundUp);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- roundss(dst, src, kRoundDown);
+bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ roundss(dst, src, kRoundDown);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- roundss(dst, src, kRoundToZero);
+bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ roundss(dst, src, kRoundToZero);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- roundss(dst, src, kRoundToNearest);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ roundss(dst, src, kRoundToNearest);
+ return true;
+ }
+ return false;
}
void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) {
@@ -1239,7 +1312,8 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
assm->Cvtsi2sd(converted_back, dst);
} else { // f64 -> u32
assm->Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
- assm->Cvtui2sd(converted_back, dst);
+ assm->Cvtui2sd(converted_back, dst,
+ assm->GetUnusedRegister(kGpReg, pinned).gp());
}
} else { // f32
if (std::is_signed<dst_type>::value) { // f32 -> i32
@@ -1346,9 +1420,12 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64SConvertI32:
Cvtsi2sd(dst.fp(), src.gp());
return true;
- case kExprF64UConvertI32:
- Cvtui2sd(dst.fp(), src.gp());
+ case kExprF64UConvertI32: {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
+ Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
+ Cvtui2sd(dst.fp(), src.gp(), scratch);
return true;
+ }
case kExprF64ConvertF32:
cvtss2sd(dst.fp(), src.fp());
return true;
@@ -1366,7 +1443,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- movsx_b(dst, src);
+ Register byte_reg = liftoff::GetTmpByteRegister(this, src);
+ if (byte_reg != src) mov(byte_reg, src);
+ movsx_b(dst, byte_reg);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
@@ -1375,7 +1454,9 @@ void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- movsx_b(dst.low_gp(), src.low_gp());
+ Register byte_reg = liftoff::GetTmpByteRegister(this, src.low_gp());
+ if (byte_reg != src.low_gp()) mov(byte_reg, src.low_gp());
+ movsx_b(dst.low_gp(), byte_reg);
liftoff::SignExtendI32ToI64(this, dst);
}
@@ -1416,16 +1497,6 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
namespace liftoff {
-// Get a temporary byte register, using {candidate} if possible.
-// Might spill, but always keeps status flags intact.
-inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
- if (candidate.is_byte_register()) return candidate;
- LiftoffRegList pinned = LiftoffRegList::ForRegs(candidate);
- // {GetUnusedRegister()} may insert move instructions to spill registers to
- // the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
-}
-
// Setcc into dst register, given a scratch byte register (might be the same as
// dst). Never spills.
inline void setcc_32_no_spill(LiftoffAssembler* assm, Condition cond,
@@ -1606,8 +1677,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- ret(static_cast<int>(num_stack_slots * kPointerSize));
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -1695,10 +1767,9 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
- asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
+ asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
}
- asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ -
- (slot.half_ == kLowWord ? 0 : 1)));
+ asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index c8d8dab1d9..d8ce32182c 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -15,24 +15,20 @@ namespace wasm {
#if V8_TARGET_ARCH_IA32
constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
+ Register::ListOf<eax, ecx, edx, esi, edi>();
// Omit xmm7, which is the kScratchDoubleReg.
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6>();
-constexpr Register kNoParamRegister = edi;
-
#elif V8_TARGET_ARCH_X64
constexpr RegList kLiftoffAssemblerGpCacheRegs =
- Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
+ Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi, r9>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
-constexpr Register kNoParamRegister = r8;
-
#elif V8_TARGET_ARCH_MIPS
constexpr RegList kLiftoffAssemblerGpCacheRegs =
@@ -42,8 +38,6 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
f22, f24>();
-constexpr Register kNoParamRegister = t0;
-
#elif V8_TARGET_ARCH_MIPS64
constexpr RegList kLiftoffAssemblerGpCacheRegs =
@@ -53,7 +47,16 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
f22, f24, f26>();
-constexpr Register kNoParamRegister = t0;
+#elif V8_TARGET_ARCH_ARM
+
+// r7: cp, r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<r0, r1, r2, r3, r4, r5, r6, r8, r9>();
+
+// d13: zero, d14-d15: scratch
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ LowDwVfpRegister::ListOf<d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11,
+ d12>();
#elif V8_TARGET_ARCH_ARM64
@@ -69,18 +72,12 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
d13, d14, d16, d17, d18, d19, d20, d21, d22, d23, d24,
d25, d26, d27, d28, d29>();
-constexpr Register kNoParamRegister = x28;
-
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
-// This should be an allocatable, general purpose register
-// that is not used for parameters, see {wasm-linkage.cc}.
-constexpr Register kNoParamRegister = Register::no_reg();
-
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 63cc7344b3..e7415e2079 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -7,10 +7,13 @@
#include <sstream>
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/macro-assembler-inl.h"
+#include "src/ostreams.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@@ -30,47 +33,44 @@ namespace {
class StackTransferRecipe {
struct RegisterMove {
- LiftoffRegister dst;
LiftoffRegister src;
ValueType type;
- constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
- ValueType type)
- : dst(dst), src(src), type(type) {}
+ constexpr RegisterMove(LiftoffRegister src, ValueType type)
+ : src(src), type(type) {}
};
+
struct RegisterLoad {
enum LoadKind : uint8_t {
- kConstant, // load a constant value into a register.
- kStack, // fill a register from a stack slot.
- kHalfStack // fill one half of a register pair from half a stack slot.
+ kConstant, // load a constant value into a register.
+ kStack, // fill a register from a stack slot.
+ kLowHalfStack, // fill a register from the low half of a stack slot.
+ kHighHalfStack // fill a register from the high half of a stack slot.
};
- LiftoffRegister dst;
LoadKind kind;
ValueType type;
int32_t value; // i32 constant value or stack index, depending on kind.
// Named constructors.
- static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
+ static RegisterLoad Const(WasmValue constant) {
if (constant.type() == kWasmI32) {
- return {dst, kConstant, kWasmI32, constant.to_i32()};
+ return {kConstant, kWasmI32, constant.to_i32()};
}
DCHECK_EQ(kWasmI64, constant.type());
DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
- return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
+ return {kConstant, kWasmI64, constant.to_i32_unchecked()};
}
- static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
- ValueType type) {
- return {dst, kStack, type, stack_index};
+ static RegisterLoad Stack(int32_t stack_index, ValueType type) {
+ return {kStack, type, stack_index};
}
- static RegisterLoad HalfStack(LiftoffRegister dst,
- int32_t half_stack_index) {
- return {dst, kHalfStack, kWasmI32, half_stack_index};
+ static RegisterLoad HalfStack(int32_t stack_index, RegPairHalf half) {
+ return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
+ stack_index};
}
private:
- RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
- int32_t value)
- : dst(dst), kind(kind), type(type), value(value) {}
+ RegisterLoad(LoadKind kind, ValueType type, int32_t value)
+ : kind(kind), type(type), value(value) {}
};
public:
@@ -80,81 +80,18 @@ class StackTransferRecipe {
void Execute() {
// First, execute register moves. Then load constants and stack values into
// registers.
-
- if ((move_dst_regs_ & move_src_regs_).is_empty()) {
- // No overlap in src and dst registers. Just execute the moves in any
- // order.
- for (RegisterMove& rm : register_moves_) {
- asm_->Move(rm.dst, rm.src, rm.type);
- }
- register_moves_.clear();
- } else {
- // Keep use counters of src registers.
- uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
- for (RegisterMove& rm : register_moves_) {
- ++src_reg_use_count[rm.src.liftoff_code()];
- }
- // Now repeatedly iterate the list of register moves, and execute those
- // whose dst register does not appear as src any more. The remaining moves
- // are compacted during this iteration.
- // If no more moves can be executed (because of a cycle), spill one
- // register to the stack, add a RegisterLoad to reload it later, and
- // continue.
- uint32_t next_spill_slot = asm_->cache_state()->stack_height();
- while (!register_moves_.empty()) {
- int executed_moves = 0;
- for (auto& rm : register_moves_) {
- if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
- asm_->Move(rm.dst, rm.src, rm.type);
- ++executed_moves;
- DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
- --src_reg_use_count[rm.src.liftoff_code()];
- } else if (executed_moves) {
- // Compaction: Move not-executed moves to the beginning of the list.
- (&rm)[-executed_moves] = rm;
- }
- }
- if (executed_moves == 0) {
- // There is a cycle. Spill one register, then continue.
- // TODO(clemensh): Use an unused register if available.
- RegisterMove& rm = register_moves_.back();
- LiftoffRegister spill_reg = rm.src;
- asm_->Spill(next_spill_slot, spill_reg, rm.type);
- // Remember to reload into the destination register later.
- LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
- DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
- src_reg_use_count[spill_reg.liftoff_code()] = 0;
- ++next_spill_slot;
- executed_moves = 1;
- }
- register_moves_.erase(register_moves_.end() - executed_moves,
- register_moves_.end());
- }
- }
-
- for (RegisterLoad& rl : register_loads_) {
- switch (rl.kind) {
- case RegisterLoad::kConstant:
- asm_->LoadConstant(rl.dst, rl.type == kWasmI64
- ? WasmValue(int64_t{rl.value})
- : WasmValue(int32_t{rl.value}));
- break;
- case RegisterLoad::kStack:
- asm_->Fill(rl.dst, rl.value, rl.type);
- break;
- case RegisterLoad::kHalfStack:
- // As half of a register pair, {rl.dst} must be a gp register.
- asm_->FillI64Half(rl.dst.gp(), rl.value);
- break;
- }
- }
- register_loads_.clear();
+ ExecuteMoves();
+ DCHECK(move_dst_regs_.is_empty());
+ ExecuteLoads();
+ DCHECK(load_dst_regs_.is_empty());
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
- uint32_t dst_index, uint32_t src_index) {
+ uint32_t dst_index,
+ const LiftoffAssembler::CacheState& src_state,
+ uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
- const VarState& src = __ cache_state()->stack_state[src_index];
+ const VarState& src = src_state.stack_state[src_index];
DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
@@ -206,7 +143,7 @@ class StackTransferRecipe {
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
- LoadI64HalfStackSlot(dst, 2 * index - (half == kLowWord ? 0 : 1));
+ LoadI64HalfStackSlot(dst, index, half);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
@@ -234,99 +171,302 @@ class StackTransferRecipe {
MoveRegister(dst.high(), src.high(), kWasmI32);
return;
}
- DCHECK(!move_dst_regs_.has(dst));
+ if (move_dst_regs_.has(dst)) {
+ DCHECK_EQ(register_move(dst)->src, src);
+ // Non-fp registers can only occur with the exact same type.
+ DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type);
+ // It can happen that one fp register holds both the f32 zero and the f64
+ // zero, as the initial value for local variables. Move the value as f64
+ // in that case.
+ if (type == kWasmF64) register_move(dst)->type = kWasmF64;
+ return;
+ }
move_dst_regs_.set(dst);
- move_src_regs_.set(src);
- register_moves_.emplace_back(dst, src, type);
+ ++*src_reg_use_count(src);
+ *register_move(dst) = {src, type};
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
- register_loads_.push_back(RegisterLoad::Const(dst, value));
+ DCHECK(!load_dst_regs_.has(dst));
+ load_dst_regs_.set(dst);
+ if (dst.is_pair()) {
+ DCHECK_EQ(kWasmI64, value.type());
+ int64_t i64 = value.to_i64();
+ *register_load(dst.low()) =
+ RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
+ *register_load(dst.high()) =
+ RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64 >> 32)));
+ } else {
+ *register_load(dst) = RegisterLoad::Const(value);
+ }
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
- register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
+ if (load_dst_regs_.has(dst)) {
+ // It can happen that we spilled the same register to different stack
+ // slots, and then we reload them later into the same dst register.
+ // In that case, it is enough to load one of the stack slots.
+ return;
+ }
+ load_dst_regs_.set(dst);
+ if (dst.is_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ *register_load(dst.low()) =
+ RegisterLoad::HalfStack(stack_index, kLowWord);
+ *register_load(dst.high()) =
+ RegisterLoad::HalfStack(stack_index, kHighWord);
+ } else {
+ *register_load(dst) = RegisterLoad::Stack(stack_index, type);
+ }
}
- void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
- register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
+ void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t stack_index,
+ RegPairHalf half) {
+ if (load_dst_regs_.has(dst)) {
+ // It can happen that we spilled the same register to different stack
+ // slots, and then we reload them later into the same dst register.
+ // In that case, it is enough to load one of the stack slots.
+ return;
+ }
+ load_dst_regs_.set(dst);
+ *register_load(dst) = RegisterLoad::HalfStack(stack_index, half);
}
private:
- // TODO(clemensh): Avoid unconditionally allocating on the heap.
- std::vector<RegisterMove> register_moves_;
- std::vector<RegisterLoad> register_loads_;
+ using MovesStorage =
+ std::aligned_storage<kAfterMaxLiftoffRegCode * sizeof(RegisterMove),
+ alignof(RegisterMove)>::type;
+ using LoadsStorage =
+ std::aligned_storage<kAfterMaxLiftoffRegCode * sizeof(RegisterLoad),
+ alignof(RegisterLoad)>::type;
+
+ ASSERT_TRIVIALLY_COPYABLE(RegisterMove);
+ ASSERT_TRIVIALLY_COPYABLE(RegisterLoad);
+
+ MovesStorage register_moves_; // uninitialized
+ LoadsStorage register_loads_; // uninitialized
+ int src_reg_use_count_[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList move_dst_regs_;
- LiftoffRegList move_src_regs_;
+ LiftoffRegList load_dst_regs_;
LiftoffAssembler* const asm_;
+
+ RegisterMove* register_move(LiftoffRegister reg) {
+ return reinterpret_cast<RegisterMove*>(&register_moves_) +
+ reg.liftoff_code();
+ }
+ RegisterLoad* register_load(LiftoffRegister reg) {
+ return reinterpret_cast<RegisterLoad*>(&register_loads_) +
+ reg.liftoff_code();
+ }
+ int* src_reg_use_count(LiftoffRegister reg) {
+ return src_reg_use_count_ + reg.liftoff_code();
+ }
+
+ void ExecuteMove(LiftoffRegister dst) {
+ RegisterMove* move = register_move(dst);
+ DCHECK_EQ(0, *src_reg_use_count(dst));
+ asm_->Move(dst, move->src, move->type);
+ ClearExecutedMove(dst);
+ }
+
+ void ClearExecutedMove(LiftoffRegister dst) {
+ DCHECK(move_dst_regs_.has(dst));
+ move_dst_regs_.clear(dst);
+ RegisterMove* move = register_move(dst);
+ DCHECK_LT(0, *src_reg_use_count(move->src));
+ if (--*src_reg_use_count(move->src)) return;
+ // src count dropped to zero. If this is a destination register, execute
+ // that move now.
+ if (!move_dst_regs_.has(move->src)) return;
+ ExecuteMove(move->src);
+ }
+
+ void ExecuteMoves() {
+ // Execute all moves whose {dst} is not being used as src in another move.
+ // If any src count drops to zero, also (transitively) execute the
+ // corresponding move to that register.
+ for (LiftoffRegister dst : move_dst_regs_) {
+ // Check if already handled via transitivity in {ClearExecutedMove}.
+ if (!move_dst_regs_.has(dst)) continue;
+ if (*src_reg_use_count(dst)) continue;
+ ExecuteMove(dst);
+ }
+
+ // All remaining moves are parts of a cycle. Just spill the first one, then
+ // process all remaining moves in that cycle. Repeat for all cycles.
+ uint32_t next_spill_slot = asm_->cache_state()->stack_height();
+ while (!move_dst_regs_.is_empty()) {
+ // TODO(clemensh): Use an unused register if available.
+ LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
+ RegisterMove* move = register_move(dst);
+ LiftoffRegister spill_reg = move->src;
+ asm_->Spill(next_spill_slot, spill_reg, move->type);
+ // Remember to reload into the destination register later.
+ LoadStackSlot(dst, next_spill_slot, move->type);
+ ++next_spill_slot;
+ ClearExecutedMove(dst);
+ }
+ }
+
+ void ExecuteLoads() {
+ for (LiftoffRegister dst : load_dst_regs_) {
+ RegisterLoad* load = register_load(dst);
+ switch (load->kind) {
+ case RegisterLoad::kConstant:
+ asm_->LoadConstant(dst, load->type == kWasmI64
+ ? WasmValue(int64_t{load->value})
+ : WasmValue(int32_t{load->value}));
+ break;
+ case RegisterLoad::kStack:
+ asm_->Fill(dst, load->value, load->type);
+ break;
+ case RegisterLoad::kLowHalfStack:
+ // Half of a register pair, {dst} must be a gp register.
+ asm_->FillI64Half(dst.gp(), load->value, kLowWord);
+ break;
+ case RegisterLoad::kHighHalfStack:
+ // Half of a register pair, {dst} must be a gp register.
+ asm_->FillI64Half(dst.gp(), load->value, kHighWord);
+ break;
+ }
+ }
+ load_dst_regs_ = {};
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
+class RegisterReuseMap {
+ public:
+ void Add(LiftoffRegister src, LiftoffRegister dst) {
+ if (auto previous = Lookup(src)) {
+ DCHECK_EQ(previous, dst);
+ return;
+ }
+ map_.emplace_back(src);
+ map_.emplace_back(dst);
+ }
+
+ base::Optional<LiftoffRegister> Lookup(LiftoffRegister src) {
+ for (auto it = map_.begin(), end = map_.end(); it != end; it += 2) {
+ if (it->is_pair() == src.is_pair() && *it == src) return *(it + 1);
+ }
+ return {};
+ }
+
+ private:
+ // {map_} holds pairs of <src, dst>.
+ base::SmallVector<LiftoffRegister, 8> map_;
+};
+
+enum MergeKeepStackSlots : bool {
+ kKeepStackSlots = true,
+ kTurnStackSlotsIntoRegisters = false
+};
+enum MergeAllowConstants : bool {
+ kConstantsAllowed = true,
+ kConstantsNotAllowed = false
+};
+enum ReuseRegisters : bool {
+ kReuseRegisters = true,
+ kNoReuseRegisters = false
+};
+void InitMergeRegion(LiftoffAssembler::CacheState* state,
+ const VarState* source, VarState* target, uint32_t count,
+ MergeKeepStackSlots keep_stack_slots,
+ MergeAllowConstants allow_constants,
+ ReuseRegisters reuse_registers, LiftoffRegList used_regs) {
+ RegisterReuseMap register_reuse_map;
+ for (const VarState* source_end = source + count; source < source_end;
+ ++source, ++target) {
+ if ((source->is_stack() && keep_stack_slots) ||
+ (source->is_const() && allow_constants)) {
+ *target = *source;
+ continue;
+ }
+ base::Optional<LiftoffRegister> reg;
+ // First try: Keep the same register, if it's free.
+ if (source->is_reg() && state->is_free(source->reg())) {
+ reg = source->reg();
+ }
+ // Second try: Use the same register we used before (if we reuse registers).
+ if (!reg && reuse_registers) {
+ reg = register_reuse_map.Lookup(source->reg());
+ }
+ // Third try: Use any free register.
+ RegClass rc = reg_class_for(source->type());
+ if (!reg && state->has_unused_register(rc, used_regs)) {
+ reg = state->unused_register(rc, used_regs);
+ }
+ if (!reg) {
+ // No free register; make this a stack slot.
+ *target = VarState(source->type());
+ continue;
+ }
+ if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
+ state->inc_used(*reg);
+ *target = VarState(source->type(), *reg);
+ }
+}
+
} // namespace
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t num_locals,
- uint32_t arity) {
+ uint32_t arity,
+ uint32_t stack_depth) {
+ // |------locals------|---(in between)----|--(discarded)--|----merge----|
+ // <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
+
+ uint32_t stack_base = stack_depth + num_locals;
+ uint32_t target_height = stack_base + arity;
+ uint32_t discarded = source.stack_height() - target_height;
DCHECK(stack_state.empty());
+
DCHECK_GE(source.stack_height(), stack_base);
- stack_state.resize(stack_base + arity, VarState(kWasmStmt));
-
- // |------locals------|--(in between)--|--(discarded)--|----merge----|
- // <-- num_locals --> ^stack_base <-- arity -->
-
- // First, initialize merge slots and locals. Keep them in the registers which
- // are being used in {source}, but avoid using a register multiple times. Use
- // unused registers where necessary and possible.
- for (int range = 0; range < 2; ++range) {
- auto src_idx = range ? 0 : source.stack_state.size() - arity;
- auto src_end = range ? num_locals : source.stack_state.size();
- auto dst_idx = range ? 0 : stack_state.size() - arity;
- for (; src_idx < src_end; ++src_idx, ++dst_idx) {
- auto& dst = stack_state[dst_idx];
- auto& src = source.stack_state[src_idx];
- // Just initialize to any register; will be overwritten before use.
- LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
- RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
- if (src.is_reg() && is_free(src.reg())) {
- reg = src.reg();
- } else if (has_unused_register(rc)) {
- reg = unused_register(rc);
- } else {
- // Make this a stack slot.
- dst = VarState(src.type());
- continue;
- }
- dst = VarState(src.type(), reg);
- inc_used(reg);
- }
+ stack_state.resize_no_init(target_height);
+
+ const VarState* source_begin = source.stack_state.data();
+ VarState* target_begin = stack_state.data();
+
+ // Try to keep locals and the merge region in their registers. Register used
+ // multiple times need to be copied to another free register. Compute the list
+ // of used registers.
+ LiftoffRegList used_regs;
+ for (auto& src : VectorOf(source_begin, num_locals)) {
+ if (src.is_reg()) used_regs.set(src.reg());
+ }
+ for (auto& src : VectorOf(source_begin + stack_base + discarded, arity)) {
+ if (src.is_reg()) used_regs.set(src.reg());
}
+
+ // Initialize the merge region. If this region moves, try to turn stack slots
+ // into registers since we need to load the value anyways.
+ MergeKeepStackSlots keep_merge_stack_slots =
+ discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters;
+ InitMergeRegion(this, source_begin + stack_base + discarded,
+ target_begin + stack_base, arity, keep_merge_stack_slots,
+ kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+
+ // Initialize the locals region. Here, stack slots stay stack slots (because
+ // they do not move). Try to keep register in registers, but avoid duplicates.
+ InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
+ kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+ // Sanity check: All the {used_regs} are really in use now.
+ DCHECK_EQ(used_regs, used_registers & used_regs);
+
// Last, initialize the section in between. Here, constants are allowed, but
// registers which are already used for the merge region or locals must be
- // spilled.
- for (uint32_t i = num_locals; i < stack_base; ++i) {
- auto& dst = stack_state[i];
- auto& src = source.stack_state[i];
- if (src.is_reg()) {
- if (is_used(src.reg())) {
- // Make this a stack slot.
- dst = VarState(src.type());
- } else {
- dst = VarState(src.type(), src.reg());
- inc_used(src.reg());
- }
- } else if (src.is_const()) {
- dst = src;
- } else {
- DCHECK(src.is_stack());
- // Make this a stack slot.
- dst = VarState(src.type());
- }
- }
- last_spilled_regs = source.last_spilled_regs;
+ // moved to other registers or spilled. If a register appears twice in the
+ // source region, ensure to use the same register twice in the target region.
+ InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals,
+ stack_depth, kKeepStackSlots, kConstantsAllowed,
+ kReuseRegisters, used_regs);
}
-void LiftoffAssembler::CacheState::Steal(CacheState& source) {
+void LiftoffAssembler::CacheState::Steal(const CacheState& source) {
// Just use the move assignment operator.
*this = std::move(source);
}
@@ -346,9 +486,9 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
// size.
-LiftoffAssembler::LiftoffAssembler()
- : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
- CodeObjectRequired::kNo) {
+LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
+ : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
+ std::move(buffer)) {
set_abort_hard(true); // Avoid calls to Abort.
}
@@ -383,18 +523,20 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
UNREACHABLE();
}
-void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
- DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
+void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
+ const CacheState& source) {
+ DCHECK_EQ(source.stack_height(), target.stack_height());
// TODO(clemensh): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
- for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
- transfers.TransferStackSlot(target, i, i);
+ for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
+ transfers.TransferStackSlot(target, i, source, i);
}
}
-void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
- // Before: ----------------|------ pop_count -----|--- arity ---|
+void LiftoffAssembler::MergeStackWith(const CacheState& target,
+ uint32_t arity) {
+ // Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
// ^ ^target_stack_height
@@ -407,10 +549,11 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
for (uint32_t i = 0; i < target_stack_base; ++i) {
- transfers.TransferStackSlot(target, i, i);
+ transfers.TransferStackSlot(target, i, cache_state_, i);
}
for (uint32_t i = 0; i < arity; ++i) {
- transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
+ transfers.TransferStackSlot(target, target_stack_base + i, cache_state_,
+ stack_base + i);
}
}
@@ -449,7 +592,7 @@ void LiftoffAssembler::SpillAllRegisters() {
void LiftoffAssembler::PrepareCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
- LiftoffRegister* target_instance) {
+ Register* target_instance) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
@@ -472,10 +615,12 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
compiler::LinkageLocation instance_loc =
call_descriptor->GetInputLocation(kInputShift);
DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
- LiftoffRegister instance_reg(Register::from_code(instance_loc.AsRegister()));
+ Register instance_reg = Register::from_code(instance_loc.AsRegister());
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
- stack_transfers.MoveRegister(instance_reg, *target_instance, kWasmIntPtr);
+ stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
+ LiftoffRegister(*target_instance),
+ kWasmIntPtr);
}
// Now move all parameter values into the right slot for the call.
@@ -504,7 +649,18 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
RegClass rc = is_pair ? kGpReg : reg_class_for(type);
- LiftoffRegister reg = LiftoffRegister::from_code(rc, loc.AsRegister());
+ int reg_code = loc.AsRegister();
+#if V8_TARGET_ARCH_ARM
+ // Liftoff assumes a one-to-one mapping between float registers and
+ // double registers, and so does not distinguish between f32 and f64
+ // registers. The f32 register code must therefore be halved in order to
+ // pass the f64 code to Liftoff.
+ DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0);
+ LiftoffRegister reg = LiftoffRegister::from_code(
+ rc, (type == kWasmF32) ? (reg_code / 2) : reg_code);
+#else
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
+#endif
param_regs.set(reg);
if (is_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
@@ -543,15 +699,14 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
stack_transfers.Execute();
// Pop parameters from the value stack.
- auto stack_end = cache_state_.stack_state.end();
- cache_state_.stack_state.erase(stack_end - num_params, stack_end);
+ cache_state_.stack_state.pop_back(num_params);
// Reset register use counters.
cache_state_.reset_used_registers();
// Reload the instance from the stack.
if (!target_instance) {
- FillInstanceInto(instance_reg.gp());
+ FillInstanceInto(instance_reg);
}
}
@@ -564,6 +719,11 @@ void LiftoffAssembler::FinishCall(FunctionSig* sig,
const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
+#if V8_TARGET_ARCH_ARM
+ // If the return register was not d0 for f32, the code value would have to
+ // be halved as is done for the parameter registers.
+ DCHECK_EQ(call_descriptor->GetReturnLocation(0).AsRegister(), 0);
+#endif
LiftoffRegister return_reg = LiftoffRegister::from_code(
rc, call_descriptor->GetReturnLocation(0).AsRegister());
DCHECK(GetCacheRegList(rc).has(return_reg));
@@ -594,7 +754,7 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::ParallelRegisterMove(
- std::initializer_list<ParallelRegisterMoveTuple> tuples) {
+ Vector<ParallelRegisterMoveTuple> tuples) {
StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue;
@@ -602,6 +762,23 @@ void LiftoffAssembler::ParallelRegisterMove(
}
}
+void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
+ // We do not support multi-value yet.
+ DCHECK_EQ(1, sig->return_count());
+ ValueType return_type = sig->GetReturn(0);
+ StackTransferRecipe stack_transfers(this);
+ LiftoffRegister return_reg =
+ needs_reg_pair(return_type)
+ ? LiftoffRegister::ForPair(kGpReturnRegisters[0],
+ kGpReturnRegisters[1])
+ : reg_class_for(return_type) == kGpReg
+ ? LiftoffRegister(kGpReturnRegisters[0])
+ : LiftoffRegister(kFpReturnRegisters[0]);
+ stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
+ cache_state_.stack_height() - 1);
+}
+
+#ifdef ENABLE_SLOW_DCHECKS
bool LiftoffAssembler::ValidateCacheState() const {
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList used_regs;
@@ -626,9 +803,10 @@ bool LiftoffAssembler::ValidateCacheState() const {
<< PrintCollection(register_use_count) << "\n";
os << "found: used_regs " << cache_state_.used_registers << ", counts "
<< PrintCollection(cache_state_.register_use_count) << "\n";
- os << "Use --trace-liftoff to debug.";
+ os << "Use --trace-wasm-decoder and --trace-liftoff to debug.";
FATAL("%s", os.str().c_str());
}
+#endif
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 673aa4125f..3ff60a42ab 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -9,6 +9,7 @@
#include <memory>
#include "src/base/bits.h"
+#include "src/base/small-vector.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
@@ -35,7 +36,7 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr uint32_t kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
- kPointerSize == 8 ? kWasmI64 : kWasmI32;
+ kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
class VarState {
public:
@@ -113,21 +114,18 @@ class LiftoffAssembler : public TurboAssembler {
struct CacheState {
// Allow default construction, move construction, and move assignment.
CacheState() = default;
- CacheState(CacheState&&) = default;
- CacheState& operator=(CacheState&&) = default;
+ CacheState(CacheState&&) V8_NOEXCEPT = default;
+ CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
- // TODO(clemensh): Improve memory management here; avoid std::vector.
- std::vector<VarState> stack_state;
+ base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
- // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
- uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList available_regs =
- kGpCacheRegList & ~used_registers & ~pinned;
+ kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
return available_regs.GetNumRegsSet() >= 2;
}
DCHECK(rc == kGpReg || rc == kFpReg);
@@ -137,7 +135,8 @@ class LiftoffAssembler : public TurboAssembler {
bool has_unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
- LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ LiftoffRegList available_regs =
+ candidates.MaskOut(used_registers).MaskOut(pinned);
return !available_regs.is_empty();
}
@@ -155,7 +154,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(LiftoffRegList candidates,
LiftoffRegList pinned = {}) const {
- LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
+ LiftoffRegList available_regs =
+ candidates.MaskOut(used_registers).MaskOut(pinned);
return available_regs.GetFirstRegSet();
}
@@ -230,9 +230,9 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
- uint32_t arity);
+ uint32_t arity, uint32_t stack_depth);
- void Steal(CacheState& source);
+ void Steal(const CacheState& source);
void Split(const CacheState& source);
@@ -242,12 +242,12 @@ class LiftoffAssembler : public TurboAssembler {
private:
// Make the copy assignment operator private (to be used from {Split()}).
- CacheState& operator=(const CacheState&) = default;
+ CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
// Disallow copy construction.
CacheState(const CacheState&) = delete;
};
- LiftoffAssembler();
+ explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
~LiftoffAssembler() override;
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
@@ -298,8 +298,8 @@ class LiftoffAssembler : public TurboAssembler {
return SpillOneRegister(candidates, pinned);
}
- void MergeFullStackWith(CacheState&);
- void MergeStackWith(CacheState&, uint32_t arity);
+ void MergeFullStackWith(const CacheState& target, const CacheState& source);
+ void MergeStackWith(const CacheState& target, uint32_t arity);
void Spill(uint32_t index);
void SpillLocals();
@@ -316,7 +316,7 @@ class LiftoffAssembler : public TurboAssembler {
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
- LiftoffRegister* target_instance = nullptr);
+ Register* target_instance = nullptr);
// Process return values of the call.
void FinishCall(FunctionSig*, compiler::CallDescriptor*);
@@ -330,11 +330,18 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister dst;
LiftoffRegister src;
ValueType type;
+ template <typename Dst, typename Src>
+ ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
+ : dst(dst), src(src), type(type) {}
};
- void ParallelRegisterMove(std::initializer_list<ParallelRegisterMoveTuple>);
+ void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
+ void MoveToReturnRegisters(FunctionSig*);
+
+#ifdef ENABLE_SLOW_DCHECKS
// Validate that the register use counts reflect the state of the cache.
bool ValidateCacheState() const;
+#endif
////////////////////////////////////
// Platform-specific part. //
@@ -352,8 +359,12 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromInstance(Register dst, uint32_t offset, int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
+ inline void LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegList pinned);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
@@ -373,9 +384,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
- // 4 bytes on the stack holding half of a 64-bit value. The two half_indexes
- // corresponding to slot {index} are {2*index} and {2*index-1}.
- inline void FillI64Half(Register, uint32_t half_index);
+ // 4 bytes on the stack holding half of a 64-bit value.
+ inline void FillI64Half(Register, uint32_t index, RegPairHalf);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
@@ -440,7 +450,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_to_intptr(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -448,7 +458,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -456,7 +466,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -464,7 +474,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
} else {
emit_i32_shr(dst, src, amount);
@@ -490,10 +500,10 @@ class LiftoffAssembler : public TurboAssembler {
// f32 unops.
inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
- inline void emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
- inline void emit_f32_floor(DoubleRegister dst, DoubleRegister src);
- inline void emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
- inline void emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
+ inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
+ inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src);
+ inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
+ inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
// f64 binops.
@@ -706,7 +716,7 @@ class LiftoffStackSlots {
private:
struct Slot {
// Allow move construction.
- Slot(Slot&&) = default;
+ Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half)
: src_(src), src_index_(src_index), half_(half) {}
@@ -718,8 +728,10 @@ class LiftoffStackSlots {
RegPairHalf half_;
};
- std::vector<Slot> slots_;
+ base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index d77e7cde4a..8c5203479e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -10,8 +10,13 @@
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
+#include "src/interface-descriptors.h"
+#include "src/log.h"
#include "src/macro-assembler-inl.h"
+#include "src/objects/smi.h"
+#include "src/ostreams.h"
#include "src/tracing/trace-event.h"
+#include "src/utils.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
@@ -39,7 +44,7 @@ namespace {
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
-#define WASM_INSTANCE_OBJECT_OFFSET(name) \
+#define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
template <int expected_size, int actual_size>
@@ -49,14 +54,19 @@ struct assert_field_size {
static constexpr int size = actual_size;
};
-#define WASM_INSTANCE_OBJECT_SIZE(name) \
- (WasmInstanceObject::k##name##OffsetEnd - \
- WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
+#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
+ FIELD_SIZE(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
- __ LoadFromInstance( \
- dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \
- assert_field_size<WASM_INSTANCE_OBJECT_SIZE(name), load_size>::size);
+#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
+ __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
+ assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
+ load_size>::size);
+
+#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
+ static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
+ "field in WasmInstance does not have the expected size"); \
+ __ LoadTaggedPointerFromInstance(dst, \
+ WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef DEBUG
#define DEBUG_CODE_COMMENT(str) \
@@ -68,7 +78,7 @@ struct assert_field_size {
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
- kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+ kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
@@ -99,12 +109,15 @@ class MovableLabel {
compiler::CallDescriptor* GetLoweredCallDescriptor(
Zone* zone, compiler::CallDescriptor* call_desc) {
- return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
- : call_desc;
+ return kSystemPointerSize == 4
+ ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
+ : call_desc;
}
-constexpr ValueType kTypesArr_ilfd[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64};
-constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
+constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
+ kWasmF64};
+constexpr Vector<const ValueType> kSupportedTypes =
+ ArrayVector(kSupportedTypesArr);
class LiftoffCompiler {
public:
@@ -118,12 +131,16 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
- struct Control : public ControlWithNamedConstructors<Control, Value> {
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
-
+ struct Control : public ControlBase<Value> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
+
+ template <typename... Args>
+ explicit Control(Args&&... args) V8_NOEXCEPT
+ : ControlBase(std::forward<Args>(args)...) {}
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
@@ -147,9 +164,11 @@ class LiftoffCompiler {
}
};
- LiftoffCompiler(compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
- Zone* compilation_zone)
- : descriptor_(
+ LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
+ CompilationEnv* env, Zone* compilation_zone,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : asm_(std::move(buffer)),
+ descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
compilation_zone_(compilation_zone),
@@ -178,7 +197,8 @@ class LiftoffCompiler {
void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
- decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
+ decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
+ reason);
BindUnboundLabels(decoder);
}
@@ -226,46 +246,20 @@ class LiftoffCompiler {
}
void StartFunction(FullDecoder* decoder) {
- int num_locals = decoder->NumLocals();
+ int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
__ set_local_type(i, decoder->GetLocalType(i));
}
}
- void CollectReservedRegsForParameters(uint32_t input_idx_start,
- uint32_t num_params,
- LiftoffRegList& param_regs) {
- uint32_t input_idx = input_idx_start;
- for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
- ValueType type = __ local_type(param_idx);
- const int num_lowered_params = 1 + needs_reg_pair(type);
- RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
-
- for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
- compiler::LinkageLocation param_loc =
- descriptor_->GetInputLocation(input_idx + pair_idx);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- int reg_code = param_loc.AsRegister();
- RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
- : kLiftoffAssemblerFpCacheRegs;
- if (cache_regs & (1 << reg_code)) {
- LiftoffRegister in_reg = LiftoffRegister::from_code(rc, reg_code);
- param_regs.set(in_reg);
- }
- }
- }
- input_idx += num_lowered_params;
- }
- }
-
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
const int num_lowered_params = 1 + needs_reg_pair(type);
+ ValueType lowered_type = needs_reg_pair(type) ? kWasmI32 : type;
+ RegClass rc = reg_class_for(lowered_type);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
- RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
LiftoffRegList pinned;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation param_loc =
@@ -275,9 +269,19 @@ class LiftoffCompiler {
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
+#if V8_TARGET_ARCH_ARM
+ // Liftoff assumes a one-to-one mapping between float registers and
+ // double registers, and so does not distinguish between f32 and f64
+ // registers. The f32 register code must therefore be halved in order to
+ // pass the f64 code to Liftoff.
+ DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0);
+ if (type == kWasmF32) {
+ reg_code /= 2;
+ }
+#endif
RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
: kLiftoffAssemblerFpCacheRegs;
- if (cache_regs & (1 << reg_code)) {
+ if (cache_regs & (1ULL << reg_code)) {
// This is a cache register, just use it.
in_reg = LiftoffRegister::from_code(rc, reg_code);
} else {
@@ -286,14 +290,14 @@ class LiftoffCompiler {
// {LiftoffRegister} can only store cache regs.
in_reg = __ GetUnusedRegister(rc, pinned);
if (rc == kGpReg) {
- __ Move(in_reg.gp(), Register::from_code(reg_code), type);
+ __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
} else {
- __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), type);
+ __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
+ lowered_type);
}
}
} else if (param_loc.IsCallerFrameSlot()) {
in_reg = __ GetUnusedRegister(rc, pinned);
- ValueType lowered_type = num_lowered_params == 1 ? type : kWasmI32;
__ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
lowered_type);
}
@@ -310,15 +314,16 @@ class LiftoffCompiler {
out_of_line_code_.push_back(
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
- LiftoffRegister limit_address = __ GetUnusedRegister(kGpReg);
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerSize);
- __ StackCheck(ool.label.get(), limit_address.gp());
+ Register limit_address = __ GetUnusedRegister(kGpReg).gp();
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
+ __ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
}
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
+ if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
+ "param"))
return;
}
@@ -382,7 +387,6 @@ class LiftoffCompiler {
UNIMPLEMENTED();
}
}
- block->label_state.stack_base = __ num_locals();
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
@@ -421,7 +425,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool.position), false);
__ CallRuntimeStub(ool.stub);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
@@ -457,13 +461,9 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
- void Block(FullDecoder* decoder, Control* block) {
- block->label_state.stack_base = __ cache_state()->stack_height();
- }
+ void Block(FullDecoder* decoder, Control* block) {}
void Loop(FullDecoder* decoder, Control* loop) {
- loop->label_state.stack_base = __ cache_state()->stack_height();
-
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
@@ -485,6 +485,10 @@ class LiftoffCompiler {
unsupported(decoder, "try");
}
+ void Catch(FullDecoder* decoder, Control* block, Value* exception) {
+ unsupported(decoder, "catch");
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -500,31 +504,69 @@ class LiftoffCompiler {
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
value);
- if_block->label_state.stack_base = __ cache_state()->stack_height();
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
}
void FallThruTo(FullDecoder* decoder, Control* c) {
if (c->end_merge.reached) {
- __ MergeFullStackWith(c->label_state);
- } else if (c->is_onearmed_if()) {
- c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- c->br_merge()->arity);
- __ MergeFullStackWith(c->label_state);
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
} else {
c->label_state.Split(*__ cache_state());
}
TraceCacheState(decoder);
}
- void PopControl(FullDecoder* decoder, Control* c) {
- if (!c->is_loop() && c->end_merge.reached) {
+ void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
+ DCHECK(c->is_onearmed_if());
+ if (c->end_merge.reached) {
+ // Someone already merged to the end of the if. Merge both arms into that.
+ if (c->reachable()) {
+ // Merge the if state into the end state.
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ }
+ // Merge the else state into the end state.
+ __ bind(c->else_state->label.get());
+ __ MergeFullStackWith(c->label_state, c->else_state->state);
+ __ cache_state()->Steal(c->label_state);
+ } else if (c->reachable()) {
+ // No merge yet at the end of the if, but we need to create a merge for
+ // the both arms of this if. Thus init the merge point from the else
+ // state, then merge the if state into that.
+ DCHECK_EQ(0, c->end_merge.arity);
+ c->label_state.InitMerge(c->else_state->state, __ num_locals(), 0,
+ c->stack_depth);
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ // Merge the else state into the end state.
+ __ bind(c->else_state->label.get());
+ __ MergeFullStackWith(c->label_state, c->else_state->state);
__ cache_state()->Steal(c->label_state);
+ } else {
+ // No merge needed, just continue with the else state.
+ __ bind(c->else_state->label.get());
+ __ cache_state()->Steal(c->else_state->state);
}
- if (!c->label.get()->is_bound()) {
- __ bind(c->label.get());
+ }
+
+ void PopControl(FullDecoder* decoder, Control* c) {
+ if (c->is_loop()) return; // A loop just falls through.
+ if (c->is_onearmed_if()) {
+ // Special handling for one-armed ifs.
+ FinishOneArmedIf(decoder, c);
+ } else if (c->end_merge.reached) {
+ // There is a merge already. Merge our state into that, then continue with
+ // that state.
+ if (c->reachable()) {
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ }
+ __ cache_state()->Steal(c->label_state);
+ } else {
+ // No merge, just continue with our current state.
}
+
+ if (!c->label.get()->is_bound()) __ bind(c->label.get());
}
void EndControl(FullDecoder* decoder, Control* c) {}
@@ -626,8 +668,8 @@ class LiftoffCompiler {
__ PushRegister(dst_type, dst);
}
- void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
- const Value& value, Value* result) {
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
+ Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
@@ -672,10 +714,10 @@ class LiftoffCompiler {
CASE_I32_UNOP(I32Ctz, i32_ctz)
CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
- CASE_FLOAT_UNOP(F32Ceil, F32, f32_ceil)
- CASE_FLOAT_UNOP(F32Floor, F32, f32_floor)
- CASE_FLOAT_UNOP(F32Trunc, F32, f32_trunc)
- CASE_FLOAT_UNOP(F32NearestInt, F32, f32_nearest_int)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc)
+ CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int)
CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
@@ -780,8 +822,8 @@ class LiftoffCompiler {
}
}
- void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
- const Value& lhs, const Value& rhs, Value* result) {
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
+ const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32, kWasmI32>( \
@@ -1071,30 +1113,19 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back();
}
- void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
- if (implicit) {
- DCHECK_EQ(1, decoder->control_depth());
- Control* func_block = decoder->control_at(0);
- __ bind(func_block->label.get());
- __ cache_state()->Steal(func_block->label_state);
- }
- if (!values.is_empty()) {
- if (values.size() > 1) return unsupported(decoder, "multi-return");
- LiftoffRegister reg = __ PopToRegister();
- LiftoffRegister return_reg =
- kNeedI64RegPair && values[0].type == kWasmI64
- ? LiftoffRegister::ForPair(kGpReturnRegisters[0],
- kGpReturnRegisters[1])
- : reg_class_for(values[0].type) == kGpReg
- ? LiftoffRegister(kGpReturnRegisters[0])
- : LiftoffRegister(kFpReturnRegisters[0]);
- if (reg != return_reg) __ Move(return_reg, reg, values[0].type);
- }
+ void ReturnImpl(FullDecoder* decoder) {
+ size_t num_returns = decoder->sig_->return_count();
+ if (num_returns > 1) return unsupported(decoder, "multi-return");
+ if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
+ void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
+ ReturnImpl(decoder);
+ }
+
void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
@@ -1168,17 +1199,16 @@ class LiftoffCompiler {
SetLocal(imm.index, true);
}
- LiftoffRegister GetGlobalBaseAndOffset(const WasmGlobal* global,
- LiftoffRegList& pinned,
- uint32_t* offset) {
- LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
+ Register GetGlobalBaseAndOffset(const WasmGlobal* global,
+ LiftoffRegList& pinned, uint32_t* offset) {
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
- LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerSize);
- __ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address),
- kPointerLoadType, pinned);
+ LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
+ __ Load(LiftoffRegister(addr), addr, no_reg,
+ global->index * sizeof(Address), kPointerLoadType, pinned);
*offset = 0;
} else {
- LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerSize);
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
*offset = global->offset;
}
return addr;
@@ -1187,29 +1217,29 @@ class LiftoffCompiler {
void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
+ if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
- __ Load(value, addr.gp(), no_reg, offset, type, pinned, nullptr, true);
+ __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(global->type, value);
}
void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
+ if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global"))
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
- __ Store(addr.gp(), no_reg, offset, reg, type, pinned, nullptr, true);
+ __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
void Unreachable(FullDecoder* decoder) {
@@ -1244,23 +1274,32 @@ class LiftoffCompiler {
__ bind(&cont);
}
- void Br(Control* target) {
+ void BrImpl(Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- target->br_merge()->arity);
+ target->br_merge()->arity,
+ target->stack_depth);
}
__ MergeStackWith(target->label_state, target->br_merge()->arity);
__ jmp(target->label.get());
}
- void Br(FullDecoder* decoder, Control* target) { Br(target); }
+ void Br(FullDecoder* decoder, Control* target) { BrImpl(target); }
+
+ void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ if (depth == decoder->control_depth() - 1) {
+ ReturnImpl(decoder);
+ } else {
+ BrImpl(decoder->control_at(depth));
+ }
+ }
- void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
+ void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
Label cont_false;
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
- Br(target);
+ BrOrRet(decoder, depth);
__ bind(&cont_false);
}
@@ -1273,7 +1312,7 @@ class LiftoffCompiler {
__ jmp(label.get());
} else {
__ bind(label.get());
- Br(decoder->control_at(br_depth));
+ BrOrRet(decoder, br_depth);
}
}
@@ -1330,10 +1369,17 @@ class LiftoffCompiler {
DCHECK(!table_iterator.has_next());
}
- void Else(FullDecoder* decoder, Control* if_block) {
- if (if_block->reachable()) __ emit_jump(if_block->label.get());
- __ bind(if_block->else_state->label.get());
- __ cache_state()->Steal(if_block->else_state->state);
+ void Else(FullDecoder* decoder, Control* c) {
+ if (c->reachable()) {
+ if (!c->end_merge.reached) {
+ c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ c->end_merge.arity, c->stack_depth);
+ }
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ }
+ __ bind(c->else_state->label.get());
+ __ cache_state()->Steal(c->else_state->state);
}
Label* AddOutOfLineTrap(WasmCodePosition position,
@@ -1352,8 +1398,8 @@ class LiftoffCompiler {
// (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint32_t offset, Register index, LiftoffRegList pinned) {
- const bool statically_oob = access_size > env_->max_memory_size ||
- offset > env_->max_memory_size - access_size;
+ const bool statically_oob =
+ !IsInBounds(offset, access_size, env_->max_memory_size);
if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
@@ -1386,10 +1432,10 @@ class LiftoffCompiler {
// compile time. Otherwise, only one check is required (see below).
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
+ Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
} else {
__ LoadConstant(end_offset_reg,
@@ -1399,13 +1445,12 @@ class LiftoffCompiler {
if (end_offset >= env_->min_memory_size) {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
- mem_size.gp());
+ mem_size);
}
// Just reuse the end_offset register for computing the effective size.
LiftoffRegister effective_size_reg = end_offset_reg;
- __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
- end_offset_reg.gp());
+ __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp());
__ emit_i32_to_intptr(index, index);
@@ -1474,33 +1519,34 @@ class LiftoffCompiler {
stack_slots.Construct();
}
- // Set context to zero (Smi::kZero) for the runtime call.
- __ TurboAssembler::Move(kContextRegister, Smi::kZero);
- LiftoffRegister centry(kJavaScriptCallCodeStartRegister);
- LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerSize);
- __ CallRuntimeWithCEntry(runtime_function, centry.gp());
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ // Set context to "no context" for the runtime call.
+ __ TurboAssembler::Move(kContextRegister,
+ Smi::FromInt(Context::kNoContext));
+ Register centry = kJavaScriptCallCodeStartRegister;
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
+ __ CallRuntimeWithCEntry(runtime_function, centry);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
}
- LiftoffRegister AddMemoryMasking(LiftoffRegister index, uint32_t* offset,
- LiftoffRegList& pinned) {
+ Register AddMemoryMasking(Register index, uint32_t* offset,
+ LiftoffRegList& pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
}
DEBUG_CODE_COMMENT("Mask memory index");
// Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(index)) {
- LiftoffRegister old_index = index;
- pinned.clear(old_index);
- index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- if (index != old_index) __ Move(index.gp(), old_index.gp(), kWasmI32);
+ if (__ cache_state()->is_used(LiftoffRegister(index))) {
+ Register old_index = index;
+ pinned.clear(LiftoffRegister(old_index));
+ index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ if (index != old_index) __ Move(index, old_index, kWasmI32);
}
- LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
- __ LoadConstant(tmp, WasmValue(*offset));
- __ emit_ptrsize_add(index.gp(), index.gp(), tmp.gp());
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kPointerSize);
- __ emit_ptrsize_and(index.gp(), index.gp(), tmp.gp());
+ Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
+ __ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
+ __ emit_ptrsize_add(index, index, tmp);
+ LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
+ __ emit_ptrsize_and(index, index, tmp);
*offset = 0;
return index;
}
@@ -1509,22 +1555,22 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
+ if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "load"))
+ return;
LiftoffRegList pinned;
- LiftoffRegister index = pinned.set(__ PopToRegister());
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index.gp(), pinned)) {
+ Register index = pinned.set(__ PopToRegister()).gp();
+ if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
return;
}
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Load from memory");
- LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
RegClass rc = reg_class_for(value_type);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
- __ Load(value, addr.gp(), index.gp(), offset, type, pinned,
- &protected_load_pc, true);
+ __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapMemOutOfBounds,
@@ -1533,7 +1579,7 @@ class LiftoffCompiler {
__ PushRegister(value_type, value);
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(false, type.mem_type().representation(), index.gp(),
+ TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
}
@@ -1542,20 +1588,23 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
+ if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "store"))
+ return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
- LiftoffRegister index = pinned.set(__ PopToRegister(pinned));
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index.gp(), pinned)) {
+ Register index = pinned.set(__ PopToRegister(pinned)).gp();
+ if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned)) {
return;
}
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Store to memory");
- LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
uint32_t protected_store_pc = 0;
- __ Store(addr.gp(), index.gp(), offset, value, type, pinned,
+ LiftoffRegList outer_pinned;
+ if (FLAG_trace_wasm_memory) outer_pinned.set(index);
+ __ Store(addr, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
@@ -1563,19 +1612,19 @@ class LiftoffCompiler {
protected_store_pc);
}
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(true, type.mem_rep(), index.gp(), offset,
+ TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg);
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
- __ emit_ptrsize_shr(mem_size.gp(), mem_size.gp(), kWasmPageSizeLog2);
- __ PushRegister(kWasmI32, mem_size);
+ Register mem_size = __ GetUnusedRegister(kGpReg).gp();
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
+ __ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
+ __ PushRegister(kWasmI32, LiftoffRegister(mem_size));
}
- void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) {
+ void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
// Pop the input, then spill all cache registers to make the runtime call.
LiftoffRegList pinned;
LiftoffRegister input = pinned.set(__ PopToRegister());
@@ -1587,7 +1636,7 @@ class LiftoffCompiler {
"complex code here otherwise)");
LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
- WasmGrowMemoryDescriptor descriptor;
+ WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
@@ -1596,8 +1645,8 @@ class LiftoffCompiler {
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
- __ CallRuntimeStub(WasmCode::kWasmGrowMemory);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ __ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (kReturnRegister0 != result.gp()) {
@@ -1613,7 +1662,7 @@ class LiftoffCompiler {
if (imm.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
if (imm.sig->return_count() == 1 &&
- !CheckSupportedType(decoder, kTypes_ilfd, imm.sig->GetReturn(0),
+ !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return"))
return;
@@ -1625,32 +1674,31 @@ class LiftoffCompiler {
if (imm.index < env_->module->num_imported_functions) {
// A direct call to an imported function.
LiftoffRegList pinned;
- LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Register tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LiftoffRegister imported_targets = tmp;
+ Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
- kPointerSize);
- __ Load(target, imported_targets.gp(), no_reg,
+ kSystemPointerSize);
+ __ Load(LiftoffRegister(target), imported_targets, no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
- LiftoffRegister imported_instances = tmp;
- LOAD_INSTANCE_FIELD(imported_instances, ImportedFunctionInstances,
- kPointerSize);
- LiftoffRegister target_instance = tmp;
- __ Load(target_instance, imported_instances.gp(), no_reg,
- ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index),
- kPointerLoadType, pinned);
-
- LiftoffRegister* explicit_instance = &target_instance;
- Register target_reg = target.gp();
- __ PrepareCall(imm.sig, call_descriptor, &target_reg, explicit_instance);
+ Register imported_function_refs = tmp;
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
+ ImportedFunctionRefs);
+ Register imported_function_ref = tmp;
+ __ LoadTaggedPointer(
+ imported_function_ref, imported_function_refs, no_reg,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
+
+ Register* explicit_instance = &imported_function_ref;
+ __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
- __ CallIndirect(imm.sig, call_descriptor, target_reg);
+ __ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1665,7 +1713,7 @@ class LiftoffCompiler {
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1679,28 +1727,27 @@ class LiftoffCompiler {
return unsupported(decoder, "multi-return");
}
if (imm.sig->return_count() == 1 &&
- !CheckSupportedType(decoder, kTypes_ilfd, imm.sig->GetReturn(0),
+ !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return")) {
return;
}
// Pop the index.
- LiftoffRegister index = __ PopToRegister();
+ Register index = __ PopToRegister().gp();
// If that register is still being used after popping, we move it to another
// register, because we want to modify that register.
- if (__ cache_state()->is_used(index)) {
- LiftoffRegister new_index =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
+ if (__ cache_state()->is_used(LiftoffRegister(index))) {
+ Register new_index =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index)).gp();
__ Move(new_index, index, kWasmI32);
index = new_index;
}
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
// Get three temporary registers.
- LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_const =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
@@ -1714,67 +1761,71 @@ class LiftoffCompiler {
// {instance->indirect_function_table_size}.
LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
- index.gp(), tmp_const.gp());
+ index, tmp_const);
// Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) {
DEBUG_CODE_COMMENT("Mask indirect call index");
// mask = ((index - size) & ~index) >> 31
// Reuse allocated registers; note: size is still stored in {tmp_const}.
- LiftoffRegister diff = table;
- LiftoffRegister neg_index = tmp_const;
- LiftoffRegister mask = scratch;
+ Register diff = table;
+ Register neg_index = tmp_const;
+ Register mask = scratch;
// 1) diff = index - size
- __ emit_i32_sub(diff.gp(), index.gp(), tmp_const.gp());
+ __ emit_i32_sub(diff, index, tmp_const);
// 2) neg_index = ~index
- __ LoadConstant(neg_index, WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index.gp(), neg_index.gp(), index.gp());
+ __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
+ __ emit_i32_xor(neg_index, neg_index, index);
// 3) mask = diff & neg_index
- __ emit_i32_and(mask.gp(), diff.gp(), neg_index.gp());
+ __ emit_i32_and(mask, diff, neg_index);
// 4) mask = mask >> 31
- __ LoadConstant(tmp_const, WasmValue(int32_t{31}));
- __ emit_i32_sar(mask.gp(), mask.gp(), tmp_const.gp(), pinned);
+ __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(int32_t{31}));
+ __ emit_i32_sar(mask, mask, tmp_const, pinned);
// Apply mask.
- __ emit_i32_and(index.gp(), index.gp(), mask.gp());
+ __ emit_i32_and(index, index, mask);
}
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerSize);
- __ LoadConstant(tmp_const,
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
+ __ LoadConstant(LiftoffRegister(tmp_const),
WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
// TODO(wasm): use a emit_i32_shli() instead of a multiply.
// (currently cannot use shl on ia32/x64 because it clobbers %rcx).
- __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
- __ Load(scratch, table.gp(), index.gp(), 0, LoadType::kI32Load, pinned);
+ __ emit_i32_mul(index, index, tmp_const);
+ __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
+ pinned);
// Compare against expected signature.
- __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
+ __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label,
- LiftoffAssembler::kWasmIntPtr, scratch.gp(),
- tmp_const.gp());
+ LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
DEBUG_CODE_COMMENT("Execute indirect call");
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
// {index} has already been multiplied by 4. Multiply by another 2.
- __ LoadConstant(tmp_const, WasmValue(2));
- __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
+ __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(2));
+ __ emit_i32_mul(index, index, tmp_const);
}
- // Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerSize);
- __ Load(scratch, table.gp(), index.gp(), 0, kPointerLoadType, pinned);
-
// Load the instance from {instance->ift_instances[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances, kPointerSize);
- __ Load(tmp_const, table.gp(), index.gp(),
- ObjectAccess::ElementOffsetInTaggedFixedArray(0), kPointerLoadType,
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
+ // {index} has already been multiplied by kSystemPointerSizeLog2.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ __ LoadTaggedPointer(tmp_const, table, index,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(0),
+ pinned);
+ Register* explicit_instance = &tmp_const;
+
+ // Load the target from {instance->ift_targets[key]}
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
+ kSystemPointerSize);
+ __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
- LiftoffRegister* explicit_instance = &tmp_const;
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -1784,11 +1835,11 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- Register target = scratch.gp();
+ Register target = scratch;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1815,23 +1866,57 @@ class LiftoffCompiler {
unsupported(decoder, "simd");
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
- Control* block, const Vector<Value>& args) {
+ const Vector<Value>& args) {
unsupported(decoder, "throw");
}
- void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> caught_values) {
- unsupported(decoder, "catch");
+ void Rethrow(FullDecoder* decoder, const Value& exception) {
+ unsupported(decoder, "rethrow");
+ }
+ void BrOnException(FullDecoder* decoder, const Value& exception,
+ const ExceptionIndexImmediate<validate>& imm,
+ uint32_t depth, Vector<Value> values) {
+ unsupported(decoder, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
}
+ void MemoryInit(FullDecoder* decoder,
+ const MemoryInitImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ unsupported(decoder, "memory.init");
+ }
+ void MemoryDrop(FullDecoder* decoder,
+ const MemoryDropImmediate<validate>& imm) {
+ unsupported(decoder, "memory.drop");
+ }
+ void MemoryCopy(FullDecoder* decoder,
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ unsupported(decoder, "memory.copy");
+ }
+ void MemoryFill(FullDecoder* decoder,
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& value, const Value& size) {
+ unsupported(decoder, "memory.fill");
+ }
+ void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
+ Vector<Value> args) {
+ unsupported(decoder, "table.init");
+ }
+ void TableDrop(FullDecoder* decoder,
+ const TableDropImmediate<validate>& imm) {
+ unsupported(decoder, "table.drop");
+ }
+ void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Vector<Value> args) {
+ unsupported(decoder, "table.copy");
+ }
private:
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
- ModuleEnv* const env_;
+ CompilationEnv* const env_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
@@ -1867,7 +1952,9 @@ class LiftoffCompiler {
} // namespace
-bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
+WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
+ CompilationEnv* env, const FunctionBody& func_body, Counters* counters,
+ WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1876,63 +1963,53 @@ bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
}
Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
- const WasmModule* module =
- wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
- auto call_descriptor =
- compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
+ const WasmModule* module = env ? env->module : nullptr;
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
- base::in_place, wasm_unit_->counters_->liftoff_compile_time());
+ base::in_place, counters->liftoff_compile_time());
+ std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
+ wasm::WasmInstructionBuffer::New();
WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
- &zone, module, wasm_unit_->native_module_->enabled_features(), detected,
- wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_, &zone);
+ &zone, module, env->enabled_features, detected, func_body,
+ call_descriptor, env, &zone, instruction_buffer->CreateView());
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) return false; // validation error
+ if (decoder.failed()) return WasmCompilationResult{decoder.error()};
if (!compiler->ok()) {
// Liftoff compilation failed.
- wasm_unit_->counters_->liftoff_unsupported_functions()->Increment();
- return false;
+ counters->liftoff_unsupported_functions()->Increment();
+ return WasmCompilationResult{WasmError{0, "Liftoff bailout"}};
}
- wasm_unit_->counters_->liftoff_compiled_functions()->Increment();
+ counters->liftoff_compiled_functions()->Increment();
if (FLAG_trace_wasm_decode_time) {
double compile_ms = compile_timer.Elapsed().InMillisecondsF();
PrintF(
"wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
"compile\n",
- static_cast<unsigned>(wasm_unit_->func_body_.end -
- wasm_unit_->func_body_.start),
- compile_ms);
- }
-
- CodeDesc desc;
- compiler->GetCode(&desc);
- OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
- compiler->GetProtectedInstructions();
- uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
- int safepoint_table_offset = compiler->GetSafepointTableOffset();
-
- code_ = wasm_unit_->native_module_->AddCode(
- wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
- 0, std::move(protected_instructions), std::move(source_positions),
- WasmCode::kLiftoff);
- wasm_unit_->native_module_->PublishCode(code_);
-
- return true;
-}
+ static_cast<unsigned>(func_body.end - func_body.start), compile_ms);
+ }
+
+ WasmCompilationResult result;
+ compiler->GetCode(&result.code_desc);
+ result.instr_buffer = instruction_buffer->ReleaseBuffer();
+ result.source_positions = compiler->GetSourcePositionTable();
+ result.protected_instructions = compiler->GetProtectedInstructions();
+ result.frame_slot_count = compiler->GetTotalFrameSlotCount();
+ result.safepoint_table_offset = compiler->GetSafepointTableOffset();
-WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
- return code_;
+ DCHECK(result.succeeded());
+ return result;
}
#undef __
#undef TRACE
-#undef WASM_INSTANCE_OBJECT_OFFSET
-#undef WASM_INSTANCE_OBJECT_SIZE
+#undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
+#undef WASM_INSTANCE_OBJECT_FIELD_SIZE
#undef LOAD_INSTANCE_FIELD
+#undef LOAD_TAGGED_PTR_INSTANCE_FIELD
#undef DEBUG_CODE_COMMENT
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index c7696cbb56..e1fb79138f 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -6,30 +6,32 @@
#define V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#include "src/base/macros.h"
+#include "src/wasm/function-compiler.h"
namespace v8 {
namespace internal {
+
+class Counters;
+
namespace wasm {
+struct CompilationEnv;
+struct FunctionBody;
+class NativeModule;
struct WasmFeatures;
-class ErrorThrower;
-class WasmCode;
-class WasmCompilationUnit;
class LiftoffCompilationUnit final {
public:
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
: wasm_unit_(wasm_unit) {}
- bool ExecuteCompilation(WasmFeatures* detected);
- WasmCode* FinishCompilation(ErrorThrower*);
+ WasmCompilationResult ExecuteCompilation(CompilationEnv*, const FunctionBody&,
+ Counters*,
+ WasmFeatures* detected_features);
private:
WasmCompilationUnit* const wasm_unit_;
- // Result of compilation:
- WasmCode* code_;
-
DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
};
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index c3f89eb506..267a005547 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-static constexpr bool kNeedI64RegPair = kPointerSize == 4;
+static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
enum RegClass : uint8_t {
kGpReg,
@@ -26,7 +26,7 @@ enum RegClass : uint8_t {
kNoReg = kGpRegPair + kNeedI64RegPair
};
-enum RegPairHalf : uint8_t { kLowWord, kHighWord };
+enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
static inline constexpr bool needs_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
@@ -92,7 +92,7 @@ class LiftoffRegister {
DCHECK_EQ(reg, fp());
}
- static LiftoffRegister from_liftoff_code(int code) {
+ static LiftoffRegister from_liftoff_code(uint32_t code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
DCHECK_EQ(code, static_cast<storage_t>(code));
@@ -153,7 +153,7 @@ class LiftoffRegister {
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- uint32_t liftoff_code() const {
+ int liftoff_code() const {
DCHECK(is_gp() || is_fp());
return code_;
}
@@ -185,17 +185,18 @@ ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
if (reg.is_pair()) {
- return os << "<gp" << reg.low_gp().code() << "+" << reg.high_gp().code()
- << ">";
+ return os << "<" << reg.low_gp() << "+" << reg.high_gp() << ">";
} else if (reg.is_gp()) {
- return os << "gp" << reg.gp().code();
+ return os << reg.gp();
} else {
- return os << "fp" << reg.fp().code();
+ return os << reg.fp();
}
}
class LiftoffRegList {
public:
+ class Iterator;
+
static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
using storage_t = std::conditional<
@@ -253,8 +254,8 @@ class LiftoffRegList {
return LiftoffRegList(regs_ & other.regs_);
}
- constexpr LiftoffRegList operator~() const {
- return LiftoffRegList(~regs_ & (kGpMask | kFpMask));
+ constexpr LiftoffRegList operator|(const LiftoffRegList other) const {
+ return LiftoffRegList(regs_ | other.regs_);
}
constexpr bool operator==(const LiftoffRegList other) const {
@@ -266,13 +267,13 @@ class LiftoffRegList {
LiftoffRegister GetFirstRegSet() const {
DCHECK(!is_empty());
- unsigned first_code = base::bits::CountTrailingZeros(regs_);
+ int first_code = base::bits::CountTrailingZeros(regs_);
return LiftoffRegister::from_liftoff_code(first_code);
}
LiftoffRegister GetLastRegSet() const {
DCHECK(!is_empty());
- unsigned last_code =
+ int last_code =
8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
return LiftoffRegister::from_liftoff_code(last_code);
}
@@ -283,6 +284,12 @@ class LiftoffRegList {
return FromBits(regs_ & ~mask.regs_);
}
+ RegList GetGpList() { return regs_ & kGpMask; }
+ RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
+
+ inline Iterator begin() const;
+ inline Iterator end() const;
+
static LiftoffRegList FromBits(storage_t bits) {
DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
return LiftoffRegList(bits);
@@ -301,9 +308,6 @@ class LiftoffRegList {
return list;
}
- RegList GetGpList() { return regs_ & kGpMask; }
- RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
-
private:
storage_t regs_ = 0;
@@ -317,8 +321,32 @@ static constexpr LiftoffRegList kGpCacheRegList =
static constexpr LiftoffRegList kFpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
+class LiftoffRegList::Iterator {
+ public:
+ LiftoffRegister operator*() { return remaining_.GetFirstRegSet(); }
+ Iterator& operator++() {
+ remaining_.clear(remaining_.GetFirstRegSet());
+ return *this;
+ }
+ bool operator==(Iterator other) { return remaining_ == other.remaining_; }
+ bool operator!=(Iterator other) { return remaining_ != other.remaining_; }
+
+ private:
+ explicit Iterator(LiftoffRegList remaining) : remaining_(remaining) {}
+ friend class LiftoffRegList;
+
+ LiftoffRegList remaining_;
+};
+
+LiftoffRegList::Iterator LiftoffRegList::begin() const {
+ return Iterator{*this};
+}
+LiftoffRegList::Iterator LiftoffRegList::end() const {
+ return Iterator{LiftoffRegList{}};
+}
+
static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
- return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+ return rc == kFpReg ? kFpCacheRegList : kGpCacheRegList;
}
inline std::ostream& operator<<(std::ostream& os, LiftoffRegList reglist) {
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
index c653ce404d..b455d9ef29 100644
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
+arikalo@wavecomp.com
+prudic@wavecomp.com
skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index cc8170b499..cb66406de4 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -34,8 +34,10 @@ inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
-inline MemOperand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset;
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
@@ -255,9 +257,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
- buffer_ + offset, kAvailableSpace,
- CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
// If bytes can be represented as 16bit, addiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, addu will be generated.
@@ -301,6 +303,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
lw(dst, MemOperand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
@@ -309,6 +316,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -401,16 +417,19 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
Register dst = no_reg;
+ MemOperand dst_op = MemOperand(dst_addr, offset_imm);
if (offset_reg != no_reg) {
+ if (is_store_mem) {
+ pinned.set(src);
+ }
dst = GetUnusedRegister(kGpReg, pinned).gp();
emit_ptrsize_add(dst, dst_addr, offset_reg);
+ dst_op = MemOperand(dst, offset_imm);
}
- MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
- : MemOperand(dst_addr, offset_imm);
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) {
- pinned.set(dst_op.rm());
+ pinned = pinned | LiftoffRegList::ForRegs(dst_op.rm(), src);
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value.
Move(tmp, src, type.value_type());
@@ -442,15 +461,11 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usw(src.gp(), dst_op);
break;
case StoreType::kI64Store: {
- MemOperand dst_op =
- (offset_reg != no_reg)
- ? MemOperand(dst, offset_imm + liftoff::kLowWordOffset)
- : MemOperand(dst_addr, offset_imm + liftoff::kLowWordOffset);
- MemOperand dst_op_upper =
- (offset_reg != no_reg)
- ? MemOperand(dst, offset_imm + liftoff::kHighWordOffset)
- : MemOperand(dst_addr, offset_imm + liftoff::kHighWordOffset);
- TurboAssembler::Usw(src.low_gp(), dst_op);
+ MemOperand dst_op_lower(dst_op.rm(),
+ offset_imm + liftoff::kLowWordOffset);
+ MemOperand dst_op_upper(dst_op.rm(),
+ offset_imm + liftoff::kHighWordOffset);
+ TurboAssembler::Usw(src.low_gp(), dst_op_lower);
TurboAssembler::Usw(src.high_gp(), dst_op_upper);
break;
}
@@ -468,7 +483,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- int32_t offset = kPointerSize * (caller_slot_idx + 1);
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
liftoff::Load(this, dst, fp, offset, type);
}
@@ -500,8 +515,8 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
sw(reg.gp(), dst);
break;
case kWasmI64:
- sw(reg.low_gp(), dst);
- sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
swc1(reg.fp(), dst);
@@ -532,8 +547,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word));
- sw(tmp.low_gp(), dst);
- sw(tmp.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
}
default:
@@ -551,8 +566,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
lw(reg.gp(), src);
break;
case kWasmI64:
- lw(reg.low_gp(), src);
- lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
lwc1(reg.fp(), src);
@@ -565,8 +580,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- lw(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ lw(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -855,16 +871,21 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst, src); \
}
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
FP_BINOP(f32_add, add_s)
FP_BINOP(f32_sub, sub_s)
FP_BINOP(f32_mul, mul_s)
FP_BINOP(f32_div, div_s)
FP_UNOP(f32_abs, abs_s)
-FP_UNOP(f32_ceil, Ceil_s_s)
-FP_UNOP(f32_floor, Floor_s_s)
-FP_UNOP(f32_trunc, Trunc_s_s)
-FP_UNOP(f32_nearest_int, Round_s_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
FP_UNOP(f32_sqrt, sqrt_s)
FP_BINOP(f64_add, add_d)
FP_BINOP(f64_sub, sub_d)
@@ -1302,11 +1323,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
- unsigned offset = num_gp_regs * kPointerSize;
+ unsigned offset = num_gp_regs * kSystemPointerSize;
addiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
- offset -= kPointerSize;
+ offset -= kSystemPointerSize;
sw(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
@@ -1343,13 +1364,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
lw(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
- gp_offset += kPointerSize;
+ gp_offset += kSystemPointerSize;
}
addiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
@@ -1434,12 +1456,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
+ liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
asm_->push(kScratchReg);
}
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ +
- (slot.half_ == kLowWord ? 0 : 1)));
+ liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->push(kScratchReg);
break;
}
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
index c653ce404d..b455d9ef29 100644
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
+arikalo@wavecomp.com
+prudic@wavecomp.com
skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index a2447d8b32..6f9de8189c 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -72,18 +72,18 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
case kWasmI64:
assm->push(reg.gp());
break;
case kWasmF32:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
case kWasmF64:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
default:
@@ -222,9 +222,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
- buffer_ + offset, kAvailableSpace,
- CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
// If bytes can be represented as 16bit, daddiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, daddu will be generated.
@@ -267,6 +267,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
sd(instance, liftoff::GetInstanceOperand());
}
@@ -275,6 +280,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
ld(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI64Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -338,12 +352,15 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
Register dst = no_reg;
+ MemOperand dst_op = MemOperand(dst_addr, offset_imm);
if (offset_reg != no_reg) {
+ if (is_store_mem) {
+ pinned.set(src);
+ }
dst = GetUnusedRegister(kGpReg, pinned).gp();
emit_ptrsize_add(dst, dst_addr, offset_reg);
+ dst_op = MemOperand(dst, offset_imm);
}
- MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
- : MemOperand(dst_addr, offset_imm);
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) {
@@ -389,7 +406,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
@@ -479,7 +496,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
@@ -745,10 +762,10 @@ FP_BINOP(f32_sub, sub_s)
FP_BINOP(f32_mul, mul_s)
FP_BINOP(f32_div, div_s)
FP_UNOP(f32_abs, abs_s)
-FP_UNOP(f32_ceil, Ceil_s_s)
-FP_UNOP(f32_floor, Floor_s_s)
-FP_UNOP(f32_trunc, Trunc_s_s)
-FP_UNOP(f32_nearest_int, Round_s_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
FP_UNOP(f32_sqrt, sqrt_s)
FP_BINOP(f64_add, add_d)
FP_BINOP(f64_sub, sub_d)
@@ -1155,11 +1172,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
- unsigned offset = num_gp_regs * kPointerSize;
+ unsigned offset = num_gp_regs * kSystemPointerSize;
daddiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
- offset -= kPointerSize;
+ offset -= kSystemPointerSize;
sd(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
@@ -1196,13 +1213,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
ld(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
- gp_offset += kPointerSize;
+ gp_offset += kSystemPointerSize;
}
daddiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index e9dcd419ba..d6c372e80f 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -37,6 +37,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
BAILOUT("LoadFromInstance");
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ BAILOUT("LoadTaggedPointerFromInstance");
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
@@ -45,6 +50,13 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ BAILOUT("LoadTaggedPointer");
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -93,7 +105,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
BAILOUT("Fill");
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
@@ -169,10 +181,10 @@ UNIMPLEMENTED_FP_BINOP(f32_max)
UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP(f32_ceil)
-UNIMPLEMENTED_FP_UNOP(f32_floor)
-UNIMPLEMENTED_FP_UNOP(f32_trunc)
-UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_ceil)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_floor)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_trunc)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_nearest_int)
UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 970cfe5753..9680d9664f 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -37,6 +37,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
BAILOUT("LoadFromInstance");
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ BAILOUT("LoadTaggedPointerFromInstance");
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
@@ -45,6 +50,13 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ BAILOUT("LoadTaggedPointer");
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -93,7 +105,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
BAILOUT("Fill");
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
@@ -169,10 +181,10 @@ UNIMPLEMENTED_FP_BINOP(f32_max)
UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP(f32_ceil)
-UNIMPLEMENTED_FP_UNOP(f32_floor)
-UNIMPLEMENTED_FP_UNOP(f32_trunc)
-UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_ceil)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_floor)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_trunc)
+UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_nearest_int)
UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 6805e19a76..35a2e855f1 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
- assm->subp(rsp, Immediate(kPointerSize));
+ assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
- assm->subp(rsp, Immediate(kPointerSize));
+ assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
@@ -146,8 +146,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
- kAvailableSpace);
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
patching_assembler.sub_sp_32(bytes);
}
@@ -195,6 +196,13 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ DCHECK_LE(offset, kMaxInt);
+ movp(dst, liftoff::GetInstanceOperand());
+ LoadTaggedPointerField(dst, Operand(dst, offset));
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
movp(liftoff::GetInstanceOperand(), instance);
}
@@ -203,6 +211,17 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
movp(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ LoadTaggedPointerField(dst, src_op);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -256,7 +275,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
- StoreType type, LiftoffRegList pinned,
+ StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc, bool is_store_mem) {
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
@@ -293,19 +312,22 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
+ Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register(kGpReg)) {
- Fill(LiftoffRegister{kScratchRegister}, src_index, type);
- Spill(dst_index, LiftoffRegister{kScratchRegister}, type);
+ Operand src = liftoff::GetStackSlot(src_index);
+ Operand dst = liftoff::GetStackSlot(dst_index);
+ if (ValueTypes::ElementSizeLog2Of(type) == 2) {
+ movl(kScratchRegister, src);
+ movl(dst, kScratchRegister);
} else {
- pushq(liftoff::GetStackSlot(src_index));
- popq(liftoff::GetStackSlot(dst_index));
+ DCHECK_EQ(3, ValueTypes::ElementSizeLog2Of(type));
+ movq(kScratchRegister, src);
+ movq(dst, kScratchRegister);
}
}
@@ -400,7 +422,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
@@ -413,12 +435,17 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
- if (dst == rhs) {
- negl(dst);
- addl(dst, lhs);
- } else {
+ if (dst != rhs) {
+ // Default path.
if (dst != lhs) movl(dst, lhs);
subl(dst, rhs);
+ } else if (lhs == rhs) {
+ // Degenerate case.
+ xorl(dst, dst);
+ } else {
+ // Emit {dst = lhs + -rhs} if dst == rhs.
+ negl(dst);
+ addl(dst, lhs);
}
}
@@ -933,25 +960,41 @@ void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
}
}
-void LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- Roundss(dst, src, kRoundUp);
+bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ Roundss(dst, src, kRoundUp);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- Roundss(dst, src, kRoundDown);
+bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ Roundss(dst, src, kRoundDown);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- Roundss(dst, src, kRoundToZero);
+bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ Roundss(dst, src, kRoundToZero);
+ return true;
+ }
+ return false;
}
-void LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
DoubleRegister src) {
- REQUIRE_CPU_FEATURE(SSE4_1);
- Roundss(dst, src, kRoundToNearest);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope feature(this, SSE4_1);
+ Roundss(dst, src, kRoundToNearest);
+ return true;
+ }
+ return false;
}
void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) {
@@ -1423,8 +1466,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- ret(static_cast<int>(num_stack_slots * kPointerSize));
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
new file mode 100644
index 0000000000..c6bed6c2e4
--- /dev/null
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -0,0 +1,127 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_COMPILATION_ENVIRONMENT_H_
+#define V8_WASM_COMPILATION_ENVIRONMENT_H_
+
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-tier.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class NativeModule;
+class WasmError;
+
+enum RuntimeExceptionSupport : bool {
+ kRuntimeExceptionSupport = true,
+ kNoRuntimeExceptionSupport = false
+};
+
+enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
+
+enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
+
+// The {CompilationEnv} encapsulates the module data that is used during
+// compilation. CompilationEnvs are shareable across multiple compilations.
+struct CompilationEnv {
+ // A pointer to the decoded module's static representation.
+ const WasmModule* const module;
+
+ // True if trap handling should be used in compiled code, rather than
+ // compiling in bounds checks for each memory access.
+ const UseTrapHandler use_trap_handler;
+
+ // If the runtime doesn't support exception propagation,
+ // we won't generate stack checks, and trap handling will also
+ // be generated differently.
+ const RuntimeExceptionSupport runtime_exception_support;
+
+ // The smallest size of any memory that could be used with this module, in
+ // bytes.
+ const uint64_t min_memory_size;
+
+ // The largest size of any memory that could be used with this module, in
+ // bytes.
+ const uint64_t max_memory_size;
+
+ // Features enabled for this compilation.
+ const WasmFeatures enabled_features;
+
+ const LowerSimd lower_simd;
+
+ constexpr CompilationEnv(const WasmModule* module,
+ UseTrapHandler use_trap_handler,
+ RuntimeExceptionSupport runtime_exception_support,
+ const WasmFeatures& enabled_features,
+ LowerSimd lower_simd = kNoLowerSimd)
+ : module(module),
+ use_trap_handler(use_trap_handler),
+ runtime_exception_support(runtime_exception_support),
+ min_memory_size(module ? module->initial_pages * uint64_t{kWasmPageSize}
+ : 0),
+ max_memory_size((module && module->has_maximum_pages
+ ? module->maximum_pages
+ : kV8MaxWasmMemoryPages) *
+ uint64_t{kWasmPageSize}),
+ enabled_features(enabled_features),
+ lower_simd(lower_simd) {}
+};
+
+// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
+// by the NativeModule. This class abstracts over the storage location.
+class WireBytesStorage {
+ public:
+ virtual ~WireBytesStorage() = default;
+ virtual Vector<const uint8_t> GetCode(WireBytesRef) const = 0;
+};
+
+// Callbacks will receive either {kFailedCompilation} or both
+// {kFinishedBaselineCompilation} and {kFinishedTopTierCompilation}, in that
+// order. If tier up is off, both events are delivered right after each other.
+enum class CompilationEvent : uint8_t {
+ kFinishedBaselineCompilation,
+ kFinishedTopTierCompilation,
+ kFailedCompilation,
+
+ // Marker:
+ // After an event >= kFirstFinalEvent, no further events are generated.
+ kFirstFinalEvent = kFinishedTopTierCompilation
+};
+
+// The implementation of {CompilationState} lives in module-compiler.cc.
+// This is the PIMPL interface to that private class.
+class CompilationState {
+ public:
+ using callback_t = std::function<void(CompilationEvent, const WasmError*)>;
+ ~CompilationState();
+
+ void CancelAndWait();
+
+ void SetError(uint32_t func_index, const WasmError& error);
+
+ void SetWireBytesStorage(std::shared_ptr<WireBytesStorage>);
+
+ std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const;
+
+ void AddCallback(callback_t);
+
+ bool failed() const;
+
+ private:
+ friend class NativeModule;
+ friend class WasmCompilationUnit;
+ CompilationState() = delete;
+
+ static std::unique_ptr<CompilationState> New(Isolate*, NativeModule*);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_COMPILATION_ENVIRONMENT_H_
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 74955f9ede..2c555bb413 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -28,11 +28,8 @@ namespace wasm {
if (FLAG_trace_wasm_decoder && (cond)) PrintF(__VA_ARGS__); \
} while (false)
-// A {DecodeResult} only stores the failure / success status, but no data. Thus
-// we use {nullptr_t} as data value, such that the only valid data stored in
-// this type is a nullptr.
-// Storing {void} would require template specialization.
-using DecodeResult = Result<std::nullptr_t>;
+// A {DecodeResult} only stores the failure / success status, but no data.
+using DecodeResult = VoidResult;
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
@@ -176,30 +173,30 @@ class Decoder {
return true;
}
- void error(const char* msg) { errorf(pc_, "%s", msg); }
+ // Do not inline error methods. This has measurable impact on validation time,
+ // see https://crbug.com/910432.
+ void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
+ void V8_NOINLINE error(const uint8_t* pc, const char* msg) {
+ errorf(pc_offset(pc), "%s", msg);
+ }
+ void V8_NOINLINE error(uint32_t offset, const char* msg) {
+ errorf(offset, "%s", msg);
+ }
- void error(const byte* pc, const char* msg) { errorf(pc, "%s", msg); }
+ void V8_NOINLINE PRINTF_FORMAT(3, 4)
+ errorf(uint32_t offset, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(offset, format, args);
+ va_end(args);
+ }
- // Sets internal error state.
- void PRINTF_FORMAT(3, 4) errorf(const byte* pc, const char* format, ...) {
- // Only report the first error.
- if (!ok()) return;
-#if DEBUG
- if (FLAG_wasm_break_on_decoder_error) {
- base::OS::DebugBreak();
- }
-#endif
- constexpr int kMaxErrorMsg = 256;
- EmbeddedVector<char, kMaxErrorMsg> buffer;
- va_list arguments;
- va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- error_msg_.assign(buffer.start(), len);
- DCHECK_GE(pc, start_);
- error_offset_ = static_cast<uint32_t>(pc - start_) + buffer_offset_;
- onFirstError();
+ void V8_NOINLINE PRINTF_FORMAT(3, 4)
+ errorf(const uint8_t* pc, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(pc_offset(pc), format, args);
+ va_end(args);
}
// Behavior triggered on first error, overridden in subclasses.
@@ -220,12 +217,11 @@ class Decoder {
// Converts the given value to a {Result}, copying the error if necessary.
template <typename T, typename U = typename std::remove_reference<T>::type>
Result<U> toResult(T&& val) {
- Result<U> result(std::forward<T>(val));
if (failed()) {
- TRACE("Result error: %s\n", error_msg_.c_str());
- result.error(error_offset_, std::move(error_msg_));
+ TRACE("Result error: %s\n", error_.message().c_str());
+ return Result<U>{error_};
}
- return result;
+ return Result<U>{std::forward<T>(val)};
}
// Resets the boundaries of this decoder.
@@ -236,24 +232,30 @@ class Decoder {
pc_ = start;
end_ = end;
buffer_offset_ = buffer_offset;
- error_offset_ = 0;
- error_msg_.clear();
+ error_ = {};
}
void Reset(Vector<const uint8_t> bytes, uint32_t buffer_offset = 0) {
Reset(bytes.begin(), bytes.end(), buffer_offset);
}
- bool ok() const { return error_msg_.empty(); }
+ bool ok() const { return error_.empty(); }
bool failed() const { return !ok(); }
bool more() const { return pc_ < end_; }
+ const WasmError& error() const { return error_; }
const byte* start() const { return start_; }
const byte* pc() const { return pc_; }
- uint32_t position() const { return static_cast<uint32_t>(pc_ - start_); }
- uint32_t pc_offset() const {
- return static_cast<uint32_t>(pc_ - start_) + buffer_offset_;
+ uint32_t V8_INLINE position() const {
+ return static_cast<uint32_t>(pc_ - start_);
}
+ // This needs to be inlined for performance (see https://crbug.com/910432).
+ uint32_t V8_INLINE pc_offset(const uint8_t* pc) const {
+ DCHECK_LE(start_, pc);
+ DCHECK_GE(kMaxUInt32 - buffer_offset_, pc - start_);
+ return static_cast<uint32_t>(pc - start_) + buffer_offset_;
+ }
+ uint32_t pc_offset() const { return pc_offset(pc_); }
uint32_t buffer_offset() const { return buffer_offset_; }
// Takes an offset relative to the module start and returns an offset relative
// to the current buffer of the decoder.
@@ -269,10 +271,25 @@ class Decoder {
const byte* end_;
// The offset of the current buffer in the module. Needed for streaming.
uint32_t buffer_offset_;
- uint32_t error_offset_ = 0;
- std::string error_msg_;
+ WasmError error_;
private:
+ void verrorf(uint32_t offset, const char* format, va_list args) {
+ // Only report the first error.
+ if (!ok()) return;
+#if DEBUG
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
+ }
+#endif
+ constexpr int kMaxErrorMsg = 256;
+ EmbeddedVector<char, kMaxErrorMsg> buffer;
+ int len = VSNPrintF(buffer, format, args);
+ CHECK_LT(0, len);
+ error_ = {offset, {buffer.start(), static_cast<size_t>(len)}};
+ onFirstError();
+ }
+
template <typename IntType, bool validate>
inline IntType read_little_endian(const byte* pc, const char* msg) {
if (!validate) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 0e49ee7e97..578a0ff5b7 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -61,6 +61,9 @@ struct WasmException;
(message)))
#define ATOMIC_OP_LIST(V) \
+ V(AtomicWake, Uint32) \
+ V(I32AtomicWait, Uint32) \
+ V(I64AtomicWait, Uint32) \
V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \
@@ -127,17 +130,12 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
-template <typename T, typename Allocator>
-Vector<T> vec2vec(std::vector<T, Allocator>& vec) {
- return Vector<T>(vec.data(), vec.size());
-}
-
// Helpers for decoding different kinds of immediates which follow bytecodes.
template <Decoder::ValidateFlag validate>
struct LocalIndexImmediate {
uint32_t index;
ValueType type = kWasmStmt;
- unsigned length;
+ uint32_t length;
inline LocalIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u32v<validate>(pc + 1, &length, "local index");
@@ -148,7 +146,7 @@ template <Decoder::ValidateFlag validate>
struct ExceptionIndexImmediate {
uint32_t index;
const WasmException* exception = nullptr;
- unsigned length;
+ uint32_t length;
inline ExceptionIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u32v<validate>(pc + 1, &length, "exception index");
@@ -158,7 +156,7 @@ struct ExceptionIndexImmediate {
template <Decoder::ValidateFlag validate>
struct ImmI32Immediate {
int32_t value;
- unsigned length;
+ uint32_t length;
inline ImmI32Immediate(Decoder* decoder, const byte* pc) {
value = decoder->read_i32v<validate>(pc + 1, &length, "immi32");
}
@@ -167,7 +165,7 @@ struct ImmI32Immediate {
template <Decoder::ValidateFlag validate>
struct ImmI64Immediate {
int64_t value;
- unsigned length;
+ uint32_t length;
inline ImmI64Immediate(Decoder* decoder, const byte* pc) {
value = decoder->read_i64v<validate>(pc + 1, &length, "immi64");
}
@@ -176,7 +174,7 @@ struct ImmI64Immediate {
template <Decoder::ValidateFlag validate>
struct ImmF32Immediate {
float value;
- unsigned length = 4;
+ uint32_t length = 4;
inline ImmF32Immediate(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
uint32_t tmp = decoder->read_u32<validate>(pc + 1, "immf32");
@@ -187,7 +185,7 @@ struct ImmF32Immediate {
template <Decoder::ValidateFlag validate>
struct ImmF64Immediate {
double value;
- unsigned length = 8;
+ uint32_t length = 8;
inline ImmF64Immediate(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
uint64_t tmp = decoder->read_u64<validate>(pc + 1, "immf64");
@@ -200,7 +198,7 @@ struct GlobalIndexImmediate {
uint32_t index;
ValueType type = kWasmStmt;
const WasmGlobal* global = nullptr;
- unsigned length;
+ uint32_t length;
inline GlobalIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u32v<validate>(pc + 1, &length, "global index");
@@ -209,7 +207,7 @@ struct GlobalIndexImmediate {
template <Decoder::ValidateFlag validate>
struct BlockTypeImmediate {
- unsigned length = 1;
+ uint32_t length = 1;
ValueType type = kWasmStmt;
uint32_t sig_index = 0;
FunctionSig* sig = nullptr;
@@ -290,11 +288,11 @@ struct BlockTypeImmediate {
};
template <Decoder::ValidateFlag validate>
-struct BreakDepthImmediate {
+struct BranchDepthImmediate {
uint32_t depth;
- unsigned length;
- inline BreakDepthImmediate(Decoder* decoder, const byte* pc) {
- depth = decoder->read_u32v<validate>(pc + 1, &length, "break depth");
+ uint32_t length;
+ inline BranchDepthImmediate(Decoder* decoder, const byte* pc) {
+ depth = decoder->read_u32v<validate>(pc + 1, &length, "branch depth");
}
};
@@ -303,9 +301,9 @@ struct CallIndirectImmediate {
uint32_t table_index;
uint32_t sig_index;
FunctionSig* sig = nullptr;
- unsigned length = 0;
+ uint32_t length = 0;
inline CallIndirectImmediate(Decoder* decoder, const byte* pc) {
- unsigned len = 0;
+ uint32_t len = 0;
sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
@@ -321,7 +319,7 @@ template <Decoder::ValidateFlag validate>
struct CallFunctionImmediate {
uint32_t index;
FunctionSig* sig = nullptr;
- unsigned length;
+ uint32_t length;
inline CallFunctionImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
}
@@ -330,7 +328,7 @@ struct CallFunctionImmediate {
template <Decoder::ValidateFlag validate>
struct MemoryIndexImmediate {
uint32_t index;
- unsigned length = 1;
+ uint32_t length = 1;
inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc + 1, "memory index");
if (!VALIDATE(index == 0)) {
@@ -340,6 +338,18 @@ struct MemoryIndexImmediate {
};
template <Decoder::ValidateFlag validate>
+struct TableIndexImmediate {
+ uint32_t index;
+ unsigned length = 1;
+ inline TableIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u8<validate>(pc + 1, "table index");
+ if (!VALIDATE(index == 0)) {
+ decoder->errorf(pc + 1, "expected table index 0, found %u", index);
+ }
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct BranchTableImmediate {
uint32_t table_count;
const byte* start;
@@ -347,7 +357,7 @@ struct BranchTableImmediate {
inline BranchTableImmediate(Decoder* decoder, const byte* pc) {
DCHECK_EQ(kExprBrTable, decoder->read_u8<validate>(pc, "opcode"));
start = pc + 1;
- unsigned len = 0;
+ uint32_t len = 0;
table_count = decoder->read_u32v<validate>(pc + 1, &len, "table count");
table = pc + 1 + len;
}
@@ -357,12 +367,12 @@ struct BranchTableImmediate {
template <Decoder::ValidateFlag validate>
class BranchTableIterator {
public:
- unsigned cur_index() { return index_; }
+ uint32_t cur_index() { return index_; }
bool has_next() { return VALIDATE(decoder_->ok()) && index_ <= table_count_; }
uint32_t next() {
DCHECK(has_next());
index_++;
- unsigned length;
+ uint32_t length;
uint32_t result =
decoder_->read_u32v<validate>(pc_, &length, "branch table entry");
pc_ += length;
@@ -370,9 +380,9 @@ class BranchTableIterator {
}
// length, including the length of the {BranchTableImmediate}, but not the
// opcode.
- unsigned length() {
+ uint32_t length() {
while (has_next()) next();
- return static_cast<unsigned>(pc_ - start_);
+ return static_cast<uint32_t>(pc_ - start_);
}
const byte* pc() { return pc_; }
@@ -395,10 +405,10 @@ template <Decoder::ValidateFlag validate>
struct MemoryAccessImmediate {
uint32_t alignment;
uint32_t offset;
- unsigned length = 0;
+ uint32_t length = 0;
inline MemoryAccessImmediate(Decoder* decoder, const byte* pc,
uint32_t max_alignment) {
- unsigned alignment_length;
+ uint32_t alignment_length;
alignment =
decoder->read_u32v<validate>(pc + 1, &alignment_length, "alignment");
if (!VALIDATE(alignment <= max_alignment)) {
@@ -408,7 +418,7 @@ struct MemoryAccessImmediate {
max_alignment, alignment);
}
if (!VALIDATE(decoder->ok())) return;
- unsigned offset_length;
+ uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + 1 + alignment_length,
&offset_length, "offset");
length = alignment_length + offset_length;
@@ -419,7 +429,7 @@ struct MemoryAccessImmediate {
template <Decoder::ValidateFlag validate>
struct SimdLaneImmediate {
uint8_t lane;
- unsigned length = 1;
+ uint32_t length = 1;
inline SimdLaneImmediate(Decoder* decoder, const byte* pc) {
lane = decoder->read_u8<validate>(pc + 2, "lane");
@@ -430,7 +440,7 @@ struct SimdLaneImmediate {
template <Decoder::ValidateFlag validate>
struct SimdShiftImmediate {
uint8_t shift;
- unsigned length = 1;
+ uint32_t length = 1;
inline SimdShiftImmediate(Decoder* decoder, const byte* pc) {
shift = decoder->read_u8<validate>(pc + 2, "shift");
@@ -450,24 +460,73 @@ struct Simd8x16ShuffleImmediate {
}
};
+template <Decoder::ValidateFlag validate>
+struct MemoryInitImmediate {
+ MemoryIndexImmediate<validate> memory;
+ uint32_t data_segment_index = 0;
+ unsigned length = 0;
+
+ inline MemoryInitImmediate(Decoder* decoder, const byte* pc)
+ : memory(decoder, pc + 1) {
+ if (!VALIDATE(decoder->ok())) return;
+ uint32_t len = 0;
+ data_segment_index = decoder->read_i32v<validate>(
+ pc + 2 + memory.length, &len, "data segment index");
+ length = memory.length + len;
+ }
+};
+
+template <Decoder::ValidateFlag validate>
+struct MemoryDropImmediate {
+ uint32_t index;
+ unsigned length;
+
+ inline MemoryDropImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_i32v<validate>(pc + 2, &length, "data segment index");
+ }
+};
+
+template <Decoder::ValidateFlag validate>
+struct TableInitImmediate {
+ TableIndexImmediate<validate> table;
+ uint32_t elem_segment_index = 0;
+ unsigned length = 0;
+
+ inline TableInitImmediate(Decoder* decoder, const byte* pc)
+ : table(decoder, pc + 1) {
+ if (!VALIDATE(decoder->ok())) return;
+ uint32_t len = 0;
+ elem_segment_index = decoder->read_i32v<validate>(
+ pc + 2 + table.length, &len, "elem segment index");
+ length = table.length + len;
+ }
+};
+
+template <Decoder::ValidateFlag validate>
+struct TableDropImmediate {
+ uint32_t index;
+ unsigned length;
+
+ inline TableDropImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_i32v<validate>(pc + 2, &length, "elem segment index");
+ }
+};
+
// An entry on the value stack.
struct ValueBase {
- const byte* pc;
- ValueType type;
-
- // Named constructors.
- static ValueBase Unreachable(const byte* pc) { return {pc, kWasmVar}; }
+ const byte* pc = nullptr;
+ ValueType type = kWasmStmt;
- static ValueBase New(const byte* pc, ValueType type) { return {pc, type}; }
+ ValueBase(const byte* pc, ValueType type) : pc(pc), type(type) {}
};
template <typename Value>
struct Merge {
- uint32_t arity;
- union {
+ uint32_t arity = 0;
+ union { // Either multiple values or a single value.
Value* array;
Value first;
- } vals; // Either multiple values or a single value.
+ } vals = {nullptr}; // Initialize {array} with {nullptr}.
// Tracks whether this merge was ever reached. Uses precise reachability, like
// Reachability::kReachable.
@@ -487,8 +546,7 @@ enum ControlKind : uint8_t {
kControlBlock,
kControlLoop,
kControlTry,
- kControlTryCatch,
- kControlTryCatchAll
+ kControlTryCatch
};
enum Reachability : uint8_t {
@@ -503,18 +561,24 @@ enum Reachability : uint8_t {
// An entry on the control stack (i.e. if, block, loop, or try).
template <typename Value>
struct ControlBase {
- ControlKind kind;
- uint32_t stack_depth; // stack height at the beginning of the construct.
- const byte* pc;
+ ControlKind kind = kControlBlock;
+ uint32_t stack_depth = 0; // stack height at the beginning of the construct.
+ const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
Merge<Value> start_merge;
Merge<Value> end_merge;
- ControlBase() = default;
- ControlBase(ControlKind kind, uint32_t stack_depth, const byte* pc)
- : kind(kind), stack_depth(stack_depth), pc(pc) {}
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
+
+ ControlBase(ControlKind kind, uint32_t stack_depth, const uint8_t* pc,
+ Reachability reachability)
+ : kind(kind),
+ stack_depth(stack_depth),
+ pc(pc),
+ reachability(reachability),
+ start_merge(reachability == kReachable) {}
// Check whether the current block is reachable.
bool reachable() const { return reachability == kReachable; }
@@ -537,67 +601,11 @@ struct ControlBase {
bool is_loop() const { return kind == kControlLoop; }
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
- bool is_try_catchall() const { return kind == kControlTryCatchAll; }
- bool is_try() const {
- return is_incomplete_try() || is_try_catch() || is_try_catchall();
- }
+ bool is_try() const { return is_incomplete_try() || is_try_catch(); }
inline Merge<Value>* br_merge() {
return is_loop() ? &this->start_merge : &this->end_merge;
}
-
- // Named constructors.
- static ControlBase Block(const byte* pc, uint32_t stack_depth) {
- return {kControlBlock, stack_depth, pc};
- }
-
- static ControlBase If(const byte* pc, uint32_t stack_depth) {
- return {kControlIf, stack_depth, pc};
- }
-
- static ControlBase Loop(const byte* pc, uint32_t stack_depth) {
- return {kControlLoop, stack_depth, pc};
- }
-
- static ControlBase Try(const byte* pc, uint32_t stack_depth) {
- return {kControlTry, stack_depth, pc};
- }
-};
-
-#define CONCRETE_NAMED_CONSTRUCTOR(concrete_type, abstract_type, name) \
- template <typename... Args> \
- static concrete_type name(Args&&... args) { \
- concrete_type val; \
- static_cast<abstract_type&>(val) = \
- abstract_type::name(std::forward<Args>(args)...); \
- return val; \
- }
-
-// Provide the default named constructors, which default-initialize the
-// ConcreteType and the initialize the fields of ValueBase correctly.
-// Use like this:
-// struct Value : public ValueWithNamedConstructors<Value> { int new_field; };
-template <typename ConcreteType>
-struct ValueWithNamedConstructors : public ValueBase {
- // Named constructors.
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, Unreachable)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, New)
-};
-
-// Provide the default named constructors, which default-initialize the
-// ConcreteType and the initialize the fields of ControlBase correctly.
-// Use like this:
-// struct Control : public ControlWithNamedConstructors<Control, Value> {
-// int my_uninitialized_field;
-// char* other_field = nullptr;
-// };
-template <typename ConcreteType, typename Value>
-struct ControlWithNamedConstructors : public ControlBase<Value> {
- // Named constructors.
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Block)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, If)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Loop)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Try)
};
// This is the list of callback functions that an interface for the
@@ -614,21 +622,22 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(Block, Control* block) \
F(Loop, Control* block) \
F(Try, Control* block) \
+ F(Catch, Control* block, Value* exception) \
F(If, const Value& cond, Control* if_block) \
F(FallThruTo, Control* c) \
F(PopControl, Control* block) \
F(EndControl, Control* block) \
/* Instructions: */ \
- F(UnOp, WasmOpcode opcode, FunctionSig*, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, FunctionSig*, const Value& lhs, \
- const Value& rhs, Value* result) \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
F(I32Const, Value* result, int32_t value) \
F(I64Const, Value* result, int64_t value) \
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
F(RefNull, Value* result) \
F(Drop, const Value& value) \
- F(DoReturn, Vector<Value> values, bool implicit) \
+ F(DoReturn, Vector<Value> values) \
F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \
F(TeeLocal, const Value& value, Value* result, \
@@ -639,7 +648,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
F(Br, Control* target) \
- F(BrIf, const Value& cond, Control* target) \
+ F(BrIf, const Value& cond, uint32_t depth) \
F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
F(Else, Control* if_block) \
F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
@@ -647,7 +656,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
const Value& index, const Value& value) \
F(CurrentMemoryPages, Value* result) \
- F(GrowMemory, const Value& value, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
F(CallDirect, const CallFunctionImmediate<validate>& imm, \
const Value args[], Value returns[]) \
F(CallIndirect, const Value& index, \
@@ -660,12 +669,24 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
const Value& input, Value* result) \
F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>&, Control* block, \
+ F(Throw, const ExceptionIndexImmediate<validate>& imm, \
const Vector<Value>& args) \
- F(CatchException, const ExceptionIndexImmediate<validate>& imm, \
- Control* block, Vector<Value> caught_values) \
+ F(Rethrow, const Value& exception) \
+ F(BrOnException, const Value& exception, \
+ const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
+ Vector<Value> values) \
F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result)
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryDrop, const MemoryDropImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
+ F(TableDrop, const TableDropImmediate<validate>& imm) \
+ F(TableCopy, const TableIndexImmediate<validate>& imm, Vector<Value> args)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
@@ -776,7 +797,7 @@ class WasmDecoder : public Decoder {
// Iteratively process all AST nodes nested inside the loop.
while (pc < decoder->end() && VALIDATE(decoder->ok())) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- unsigned length = 1;
+ uint32_t length = 1;
switch (opcode) {
case kExprLoop:
case kExprIf:
@@ -796,7 +817,7 @@ class WasmDecoder : public Decoder {
length = 1 + imm.length;
break;
}
- case kExprGrowMemory:
+ case kExprMemoryGrow:
case kExprCallFunction:
case kExprCallIndirect:
// Add instance cache nodes to the assigned set.
@@ -826,16 +847,23 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
imm.index < module_->exceptions.size())) {
- errorf(pc + 1, "Invalid exception index: %u", imm.index);
return false;
}
imm.exception = &module_->exceptions[imm.index];
return true;
}
+ inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ if (!Complete(pc, imm)) {
+ errorf(pc + 1, "Invalid exception index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr && imm.index < module_->globals.size())) {
errorf(pc + 1, "invalid global index: %u", imm.index);
@@ -884,10 +912,10 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, BreakDepthImmediate<validate>& imm,
+ inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
- errorf(pc + 1, "invalid break depth: %u", imm.depth);
+ errorf(pc + 1, "invalid branch depth: %u", imm.depth);
return false;
}
return true;
@@ -996,7 +1024,63 @@ class WasmDecoder : public Decoder {
return true;
}
- static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
+ inline bool Validate(MemoryIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && module_->has_memory)) {
+ errorf(pc_ + 1, "memory instruction with no memory");
+ return false;
+ }
+ return true;
+ }
+
+ inline bool Validate(MemoryInitImmediate<validate>& imm) {
+ if (!Validate(imm.memory)) return false;
+ if (!VALIDATE(module_ != nullptr &&
+ imm.data_segment_index <
+ module_->num_declared_data_segments)) {
+ errorf(pc_ + 2, "invalid data segment index: %u", imm.data_segment_index);
+ return false;
+ }
+ return true;
+ }
+
+ inline bool Validate(MemoryDropImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr &&
+ imm.index < module_->num_declared_data_segments)) {
+ errorf(pc_ + 2, "invalid data segment index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
+ inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && imm.index < module_->tables.size())) {
+ errorf(pc_ + 1, "invalid table index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
+ inline bool Validate(TableInitImmediate<validate>& imm) {
+ if (!Validate(pc_ + 1, imm.table)) return false;
+ if (!VALIDATE(module_ != nullptr &&
+ imm.elem_segment_index < module_->elem_segments.size())) {
+ errorf(pc_ + 2, "invalid element segment index: %u",
+ imm.elem_segment_index);
+ return false;
+ }
+ return true;
+ }
+
+ inline bool Validate(TableDropImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr &&
+ imm.index < module_->elem_segments.size())) {
+ errorf(pc_ + 2, "invalid element segment index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
+ static uint32_t OpcodeLength(Decoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
@@ -1009,7 +1093,7 @@ class WasmDecoder : public Decoder {
}
case kExprBr:
case kExprBrIf: {
- BreakDepthImmediate<validate> imm(decoder, pc);
+ BranchDepthImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
case kExprSetGlobal:
@@ -1035,12 +1119,18 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
- case kExprThrow:
- case kExprCatch: {
+ case kExprThrow: {
ExceptionIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
+ case kExprBrOnExn: {
+ BranchDepthImmediate<validate> imm_br(decoder, pc);
+ if (!VALIDATE(decoder->ok())) return 1 + imm_br.length;
+ ExceptionIndexImmediate<validate> imm_idx(decoder, pc + imm_br.length);
+ return 1 + imm_br.length + imm_idx.length;
+ }
+
case kExprSetLocal:
case kExprTeeLocal:
case kExprGetLocal: {
@@ -1063,7 +1153,7 @@ class WasmDecoder : public Decoder {
case kExprRefNull: {
return 1;
}
- case kExprGrowMemory:
+ case kExprMemoryGrow:
case kExprMemorySize: {
MemoryIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
@@ -1072,10 +1162,55 @@ class WasmDecoder : public Decoder {
return 5;
case kExprF64Const:
return 9;
- case kNumericPrefix:
- return 2;
+ case kNumericPrefix: {
+ byte numeric_index =
+ decoder->read_u8<validate>(pc + 1, "numeric_index");
+ if (!VALIDATE(decoder->ok())) return 2;
+ WasmOpcode opcode =
+ static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ switch (opcode) {
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
+ return 2;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
+ case kExprMemoryDrop: {
+ MemoryDropImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
+ case kExprMemoryCopy:
+ case kExprMemoryFill: {
+ MemoryIndexImmediate<validate> imm(decoder, pc + 1);
+ return 2 + imm.length;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
+ case kExprTableDrop: {
+ TableDropImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
+ case kExprTableCopy: {
+ TableIndexImmediate<validate> imm(decoder, pc + 1);
+ return 2 + imm.length;
+ }
+ default:
+ decoder->error(pc, "invalid numeric opcode");
+ return 2;
+ }
+ }
case kSimdPrefix: {
byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
+ if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
switch (opcode) {
@@ -1104,6 +1239,7 @@ class WasmDecoder : public Decoder {
}
case kAtomicPrefix: {
byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
+ if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
switch (opcode) {
@@ -1140,7 +1276,7 @@ class WasmDecoder : public Decoder {
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTeeLocal:
- case kExprGrowMemory:
+ case kExprMemoryGrow:
return {1, 1};
case kExprSetLocal:
case kExprSetGlobal:
@@ -1148,7 +1284,9 @@ class WasmDecoder : public Decoder {
case kExprBrIf:
case kExprBrTable:
case kExprIf:
+ case kExprRethrow:
return {1, 0};
+ case kExprCatch:
case kExprGetLocal:
case kExprGetGlobal:
case kExprI32Const:
@@ -1170,11 +1308,19 @@ class WasmDecoder : public Decoder {
return {imm.sig->parameter_count() + 1,
imm.sig->return_count()};
}
+ case kExprThrow: {
+ ExceptionIndexImmediate<validate> imm(this, pc);
+ CHECK(Complete(pc, imm));
+ DCHECK_EQ(0, imm.exception->sig->return_count());
+ return {imm.exception->sig->parameter_count(), 0};
+ }
case kExprBr:
case kExprBlock:
case kExprLoop:
case kExprEnd:
case kExprElse:
+ case kExprTry:
+ case kExprBrOnExn:
case kExprNop:
case kExprReturn:
case kExprUnreachable:
@@ -1247,8 +1393,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
local_type_vec_(zone),
stack_(zone),
control_(zone),
- args_(zone),
- last_end_found_(false) {
+ args_(zone) {
this->local_types_ = &local_type_vec_;
}
@@ -1275,24 +1420,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DecodeFunctionBody();
if (!this->failed()) CALL_INTERFACE(FinishFunction);
- if (this->failed()) return this->TraceFailed();
-
- if (!control_.empty()) {
- // Generate a better error message whether the unterminated control
- // structure is the function body block or an innner structure.
- if (control_.size() > 1) {
- this->error(control_.back().pc, "unterminated control structure");
- } else {
- this->error("function body must end with \"end\" opcode");
- }
- return TraceFailed();
- }
-
- if (!last_end_found_) {
+ // Generate a better error message whether the unterminated control
+ // structure is the function body block or an innner structure.
+ if (control_.size() > 1) {
+ this->error(control_.back().pc, "unterminated control structure");
+ } else if (control_.size() == 1) {
this->error("function body must end with \"end\" opcode");
- return false;
}
+ if (this->failed()) return this->TraceFailed();
+
if (FLAG_trace_wasm_decode_time) {
double ms = decode_timer.Elapsed().InMillisecondsF();
PrintF("wasm-decode %s (%0.3f ms)\n\n",
@@ -1305,9 +1442,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_offset_,
- this->GetBufferRelativeOffset(this->error_offset_),
- this->error_msg_.c_str());
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
+ this->GetBufferRelativeOffset(this->error_.offset()),
+ this->error_.message().c_str());
return false;
}
@@ -1318,7 +1455,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
inline Zone* zone() const { return zone_; }
- inline uint32_t NumLocals() {
+ inline uint32_t num_locals() const {
return static_cast<uint32_t>(local_type_vec_.size());
}
@@ -1347,21 +1484,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline Value* stack_value(uint32_t depth) {
- DCHECK_GT(stack_.size(), depth);
- return &stack_[stack_.size() - depth - 1];
- }
-
- inline Value& GetMergeValueFromStack(
- Control* c, Merge<Value>* merge, uint32_t i) {
- DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_GT(merge->arity, i);
- DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
- return stack_[stack_.size() - merge->arity + i];
+ DCHECK_LT(0, depth);
+ DCHECK_GE(stack_.size(), depth);
+ return &*(stack_.end() - depth);
}
private:
- static constexpr size_t kErrorMsgSize = 128;
-
Zone* zone_;
Interface interface_;
@@ -1370,7 +1498,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
ZoneVector<Value> args_; // parameters of current block or call
- bool last_end_found_;
+
+ static Value UnreachableValue(const uint8_t* pc) {
+ return Value{pc, kWasmVar};
+ }
bool CheckHasMemory() {
if (!VALIDATE(this->module_->has_memory)) {
@@ -1423,17 +1554,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Set up initial function block.
{
- auto* c = PushBlock();
+ auto* c = PushControl(kControlBlock);
InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
InitMerge(&c->end_merge,
static_cast<uint32_t>(this->sig_->return_count()),
- [&] (uint32_t i) {
- return Value::New(this->pc_, this->sig_->GetReturn(i)); });
+ [&](uint32_t i) {
+ return Value{this->pc_, this->sig_->GetReturn(i)};
+ });
CALL_INTERFACE(StartFunctionBody, c);
}
while (this->pc_ < this->end_) { // decoding loop.
- unsigned len = 1;
+ uint32_t len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*this->pc_);
CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
@@ -1449,544 +1581,543 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#define TRACE_PART(...)
#endif
- FunctionSig* sig = const_cast<FunctionSig*>(kSimpleOpcodeSigs[opcode]);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- } else {
- // Complex bytecode.
- switch (opcode) {
- case kExprNop:
- break;
- case kExprBlock: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* block = PushBlock();
- SetBlockType(block, imm);
- CALL_INTERFACE_IF_REACHABLE(Block, block);
- PushMergeValues(block, &block->start_merge);
- len = 1 + imm.length;
+ switch (opcode) {
+#define BUILD_SIMPLE_OPCODE(op, _, sig) \
+ case kExpr##op: \
+ BuildSimpleOperator_##sig(opcode); \
+ break;
+ FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+#undef BUILD_SIMPLE_OPCODE
+ case kExprNop:
+ break;
+ case kExprBlock: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* block = PushControl(kControlBlock);
+ SetBlockType(block, imm);
+ CALL_INTERFACE_IF_REACHABLE(Block, block);
+ PushMergeValues(block, &block->start_merge);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRethrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ auto exception = Pop(0, kWasmExceptRef);
+ CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
+ EndControl();
+ break;
+ }
+ case kExprThrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ PopArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args_));
+ EndControl();
+ break;
+ }
+ case kExprTry: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* try_block = PushControl(kControlTry);
+ SetBlockType(try_block, imm);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ PushMergeValues(try_block, &try_block->start_merge);
+ break;
+ }
+ case kExprCatch: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ if (!VALIDATE(!control_.empty())) {
+ this->error("catch does not match any try");
break;
}
- case kExprRethrow: {
- // TODO(kschimpf): Implement.
- CHECK_PROTOTYPE_OPCODE(eh);
- OPCODE_ERROR(opcode, "not implemented yet");
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_try())) {
+ this->error("catch does not match any try");
break;
}
- case kExprThrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, &control_.back(),
- vec2vec(args_));
- EndControl();
+ if (!VALIDATE(c->is_incomplete_try())) {
+ this->error("catch already present for try");
break;
}
- case kExprTry: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* try_block = PushTry();
- SetBlockType(try_block, imm);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Try, try_block);
- PushMergeValues(try_block, &try_block->start_merge);
- break;
+ c->kind = kControlTryCatch;
+ FallThruTo(c);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
+ c->reachability = control_at(1)->innerReachability();
+ auto* exception = Push(kWasmExceptRef);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
+ break;
+ }
+ case kExprBrOnExn: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BranchDepthImmediate<validate> imm_br(this, this->pc_);
+ if (!this->Validate(this->pc_, imm_br, control_.size())) break;
+ ExceptionIndexImmediate<validate> imm_idx(this,
+ this->pc_ + imm_br.length);
+ if (!this->Validate(this->pc_ + imm_br.length, imm_idx)) break;
+ Control* c = control_at(imm_br.depth);
+ auto exception = Pop(0, kWasmExceptRef);
+ const WasmExceptionSig* sig = imm_idx.exception->sig;
+ size_t value_count = sig->parameter_count();
+ // TODO(mstarzinger): This operand stack mutation is an ugly hack to
+ // make both type checking here as well as environment merging in the
+ // graph builder interface work out of the box. We should introduce
+ // special handling for both and do minimal/no stack mutation here.
+ for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ Vector<Value> values(stack_.data() + c->stack_depth, value_count);
+ if (!TypeCheckBranch(c)) break;
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrOnException, exception, imm_idx, imm_br.depth,
+ values);
+ c->br_merge()->reached = true;
}
- case kExprCatch: {
- // TODO(kschimpf): Fix to use type signature of exception.
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
-
- if (!this->Validate(this->pc_, imm)) break;
-
- if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
- break;
- }
-
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
- break;
- }
-
- if (!VALIDATE(c->is_incomplete_try())) {
- OPCODE_ERROR(opcode, "multiple catch blocks not implemented");
- break;
- }
- c->kind = kControlTryCatch;
- FallThruTo(c);
- stack_.resize(c->stack_depth);
- const WasmExceptionSig* sig = imm.exception->sig;
- for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(sig->GetParam(i));
- }
- Vector<Value> values(stack_.data() + c->stack_depth,
- sig->parameter_count());
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchException, imm, c, values);
- c->reachability = control_at(1)->innerReachability();
+ len = 1 + imm_br.length + imm_idx.length;
+ for (size_t i = 0; i < value_count; ++i) Pop();
+ auto* pexception = Push(kWasmExceptRef);
+ *pexception = exception;
+ break;
+ }
+ case kExprLoop: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* block = PushControl(kControlLoop);
+ SetBlockType(&control_.back(), imm);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ PushMergeValues(block, &block->start_merge);
+ break;
+ }
+ case kExprIf: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ auto cond = Pop(0, kWasmI32);
+ PopArgs(imm.sig);
+ if (!VALIDATE(this->ok())) break;
+ auto* if_block = PushControl(kControlIf);
+ SetBlockType(if_block, imm);
+ CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
+ len = 1 + imm.length;
+ PushMergeValues(if_block, &if_block->start_merge);
+ break;
+ }
+ case kExprElse: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("else does not match any if");
break;
}
- case kExprCatchAll: {
- CHECK_PROTOTYPE_OPCODE(eh);
- if (!VALIDATE(!control_.empty())) {
- this->error("catch-all does not match any try");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch-all does not match any try");
- break;
- }
- if (!VALIDATE(!c->is_try_catchall())) {
- this->error("catch-all already present for try");
- break;
- }
- c->kind = kControlTryCatchAll;
- // TODO(mstarzinger): Implement control flow for catch-all.
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_if())) {
+ this->error(this->pc_, "else does not match an if");
break;
}
- case kExprLoop: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* block = PushLoop();
- SetBlockType(&control_.back(), imm);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Loop, block);
- PushMergeValues(block, &block->start_merge);
+ if (c->is_if_else()) {
+ this->error(this->pc_, "else already present for if");
break;
}
- case kExprIf: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto cond = Pop(0, kWasmI32);
- PopArgs(imm.sig);
- if (!VALIDATE(this->ok())) break;
- auto* if_block = PushIf();
- SetBlockType(if_block, imm);
- CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
- len = 1 + imm.length;
- PushMergeValues(if_block, &if_block->start_merge);
+ if (!TypeCheckFallThru(c)) break;
+ c->kind = kControlIfElse;
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ if (c->reachable()) c->end_merge.reached = true;
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
+ break;
+ }
+ case kExprEnd: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("end does not match any if, try, or block");
break;
}
- case kExprElse: {
- if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
- break;
- }
- if (c->is_if_else()) {
- this->error(this->pc_, "else already present for if");
- break;
- }
- FallThruTo(c);
- c->kind = kControlIfElse;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
+ Control* c = &control_.back();
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->error(this->pc_, "missing catch or catch-all in try");
break;
}
- case kExprEnd: {
- if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
- return;
- }
- Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
+ this->error(
+ c->pc,
+ "start-arity and end-arity of one-armed if must match");
break;
}
- if (c->is_onearmed_if()) {
- // Emulate empty else arm.
- FallThruTo(c);
- if (this->failed()) break;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
- }
-
- FallThruTo(c);
- // A loop just leaves the values on the stack.
- if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
-
- if (control_.size() == 1) {
- // If at the last (implicit) control, check we are at end.
- if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
- break;
- }
- last_end_found_ = true;
- // The result of the block is the return value.
- TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
- "(implicit) return");
- DoReturn(c, true);
- }
-
- PopControl(c);
- break;
- }
- case kExprSelect: {
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop();
- auto tval = Pop(0, fval.type);
- auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- break;
- }
- case kExprBr: {
- BreakDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- if (!TypeCheckBreak(c)) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- len = 1 + imm.length;
- EndControl();
- break;
}
- case kExprBrIf: {
- BreakDepthImmediate<validate> imm(this, this->pc_);
- auto cond = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- if (!TypeCheckBreak(c)) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrIf, cond, c);
- c->br_merge()->reached = true;
- }
- len = 1 + imm.length;
- break;
- }
- case kExprBrTable: {
- BranchTableImmediate<validate> imm(this, this->pc_);
- BranchTableIterator<validate> iterator(this, imm);
- auto key = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- uint32_t br_arity = 0;
- std::vector<bool> br_targets(control_.size());
- while (iterator.has_next()) {
- const uint32_t i = iterator.cur_index();
- const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->errorf(pos,
- "improper branch in br_table target %u (depth %u)",
- i, target);
- break;
- }
- // Avoid redundant break target checks.
- if (br_targets[target]) continue;
- br_targets[target] = true;
- // Check that label types match up.
- Control* c = control_at(target);
- uint32_t arity = c->br_merge()->arity;
- if (i == 0) {
- br_arity = arity;
- } else if (!VALIDATE(br_arity == arity)) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u"
- " (previous was %u, this one %u)",
- i, br_arity, arity);
- }
- if (!TypeCheckBreak(c)) break;
- }
- if (this->failed()) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrTable, imm, key);
+ if (!TypeCheckFallThru(c)) break;
- for (uint32_t depth = control_depth(); depth-- > 0;) {
- if (!br_targets[depth]) continue;
- control_at(depth)->br_merge()->reached = true;
- }
+ if (control_.size() == 1) {
+ // If at the last (implicit) control, check we are at end.
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
+ this->error(this->pc_ + 1, "trailing code after function end");
+ break;
}
-
- len = 1 + iterator.length();
- EndControl();
- break;
- }
- case kExprReturn: {
- DoReturn(&control_.back(), false);
- break;
- }
- case kExprUnreachable: {
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI64);
- CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF32);
- CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF64);
- CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- auto* value = Push(kWasmAnyRef);
- CALL_INTERFACE_IF_REACHABLE(RefNull, value);
- len = 1;
- break;
- }
- case kExprGetLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprSetLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
- len = 1 + imm.length;
+ // The result of the block is the return value.
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ control_.clear();
break;
}
- case kExprTeeLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- auto value = Pop();
- CALL_INTERFACE_IF_REACHABLE(Drop, value);
- break;
+
+ PopControl(c);
+ break;
+ }
+ case kExprSelect: {
+ auto cond = Pop(2, kWasmI32);
+ auto fval = Pop();
+ auto tval = Pop(0, fval.type);
+ auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ break;
+ }
+ case kExprBr: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ if (!TypeCheckBranch(c)) break;
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else if (control_.back().reachable()) {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
}
- case kExprGetGlobal: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
- break;
+ len = 1 + imm.length;
+ EndControl();
+ break;
+ }
+ case kExprBrIf: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ auto cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ if (!TypeCheckBranch(c)) break;
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
+ c->br_merge()->reached = true;
}
- case kExprSetGlobal: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableImmediate<validate> imm(this, this->pc_);
+ BranchTableIterator<validate> iterator(this, imm);
+ auto key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ uint32_t br_arity = 0;
+ std::vector<bool> br_targets(control_.size());
+ while (iterator.has_next()) {
+ const uint32_t i = iterator.cur_index();
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (!VALIDATE(target < control_.size())) {
+ this->errorf(pos,
+ "improper branch in br_table target %u (depth %u)",
+ i, target);
break;
}
- auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
- break;
- }
- case kExprI32LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
- break;
- case kExprI32LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
- break;
- case kExprI32LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
- break;
- case kExprI32LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
- break;
- case kExprI32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI32Load);
- break;
- case kExprI64LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
- break;
- case kExprI64LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
- break;
- case kExprI64LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
- break;
- case kExprI64LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
- break;
- case kExprI64LoadMem32S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
- break;
- case kExprI64LoadMem32U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
- break;
- case kExprI64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI64Load);
- break;
- case kExprF32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF32Load);
- break;
- case kExprF64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF64Load);
- break;
- case kExprI32StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI32Store8);
- break;
- case kExprI32StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI32Store16);
- break;
- case kExprI32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI32Store);
- break;
- case kExprI64StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI64Store8);
- break;
- case kExprI64StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI64Store16);
- break;
- case kExprI64StoreMem32:
- len = 1 + DecodeStoreMem(StoreType::kI64Store32);
- break;
- case kExprI64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI64Store);
- break;
- case kExprF32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF32Store);
- break;
- case kExprF64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF64Store);
- break;
- case kExprGrowMemory: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- DCHECK_NOT_NULL(this->module_);
- if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
- break;
+ // Avoid redundant branch target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
+ // Check that label types match up.
+ Control* c = control_at(target);
+ uint32_t arity = c->br_merge()->arity;
+ if (i == 0) {
+ br_arity = arity;
+ } else if (!VALIDATE(br_arity == arity)) {
+ this->errorf(pos,
+ "inconsistent arity in br_table target %u"
+ " (previous was %u, this one %u)",
+ i, br_arity, arity);
}
- auto value = Pop(0, kWasmI32);
- auto* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(GrowMemory, value, result);
- break;
+ if (!TypeCheckBranch(c)) break;
}
- case kExprMemorySize: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- auto* result = Push(kWasmI32);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
- break;
+ if (this->failed()) break;
+
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrTable, imm, key);
+
+ for (uint32_t depth = control_depth(); depth-- > 0;) {
+ if (!br_targets[depth]) continue;
+ control_at(depth)->br_merge()->reached = true;
+ }
}
- case kExprCallFunction: {
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- // TODO(clemensh): Better memory management.
- PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args_.data(), returns);
+
+ len = 1 + iterator.length();
+ EndControl();
+ break;
+ }
+ case kExprReturn: {
+ if (!TypeCheckReturn()) break;
+ DoReturn();
+ EndControl();
+ break;
+ }
+ case kExprUnreachable: {
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ EndControl();
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmI64);
+ CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmF32);
+ CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmF64);
+ CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefNull: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ auto* value = Push(kWasmAnyRef);
+ CALL_INTERFACE_IF_REACHABLE(RefNull, value);
+ len = 1;
+ break;
+ }
+ case kExprGetLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprSetLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto value = Pop(0, local_type_vec_[imm.index]);
+ CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprTeeLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto value = Pop(0, local_type_vec_[imm.index]);
+ auto* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprDrop: {
+ auto value = Pop();
+ CALL_INTERFACE_IF_REACHABLE(Drop, value);
+ break;
+ }
+ case kExprGetGlobal: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
+ break;
+ }
+ case kExprSetGlobal: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!VALIDATE(imm.global->mutability)) {
+ this->errorf(this->pc_, "immutable global #%u cannot be assigned",
+ imm.index);
break;
}
- case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto index = Pop(0, kWasmI32);
- PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args_.data(),
- returns);
+ auto value = Pop(0, imm.type);
+ CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
+ break;
+ }
+ case kExprI32LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
+ break;
+ case kExprI32LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
+ break;
+ case kExprI32LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
+ break;
+ case kExprI32LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
+ break;
+ case kExprI32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
+ break;
+ case kExprI64LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
+ break;
+ case kExprI64LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
+ break;
+ case kExprI64LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
+ break;
+ case kExprI64LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
+ break;
+ case kExprI64LoadMem32S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
+ break;
+ case kExprI64LoadMem32U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
+ break;
+ case kExprI64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
+ break;
+ case kExprF32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
+ break;
+ case kExprF64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
+ break;
+ case kExprI32StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
+ break;
+ case kExprI32StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
+ break;
+ case kExprI32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
+ break;
+ case kExprI64StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
+ break;
+ case kExprI64StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
+ break;
+ case kExprI64StoreMem32:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
+ break;
+ case kExprI64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
+ break;
+ case kExprF32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
+ break;
+ case kExprF64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
+ break;
+ case kExprMemoryGrow: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ DCHECK_NOT_NULL(this->module_);
+ if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
+ this->error("grow_memory is not supported for asmjs modules");
break;
}
- case kNumericPrefix: {
+ auto value = Pop(0, kWasmI32);
+ auto* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
+ break;
+ }
+ case kExprMemorySize: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ auto* result = Push(kWasmI32);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ // TODO(clemensh): Better memory management.
+ PopArgs(imm.sig);
+ auto* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args_.data(), returns);
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto index = Pop(0, kWasmI32);
+ PopArgs(imm.sig);
+ auto* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args_.data(),
+ returns);
+ break;
+ }
+ case kNumericPrefix: {
+ ++len;
+ byte numeric_index =
+ this->template read_u8<validate>(this->pc_ + 1, "numeric index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ if (opcode < kExprMemoryInit) {
CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
- ++len;
- byte numeric_index = this->template read_u8<validate>(
- this->pc_ + 1, "numeric index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- sig = WasmOpcodes::Signature(opcode);
- if (sig == nullptr) {
- this->errorf(this->pc_, "Unrecognized numeric opcode: %x\n",
- opcode);
- return;
- }
- BuildSimpleOperator(opcode, sig);
- break;
- }
- case kSimdPrefix: {
- CHECK_PROTOTYPE_OPCODE(simd);
- len++;
- byte simd_index =
- this->template read_u8<validate>(this->pc_ + 1, "simd index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeSimdOpcode(opcode);
- break;
- }
- case kAtomicPrefix: {
- CHECK_PROTOTYPE_OPCODE(threads);
- if (!CheckHasSharedMemory()) break;
- len++;
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeAtomicOpcode(opcode);
- break;
+ } else {
+ CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeNumericOpcode(opcode);
+ break;
+ }
+ case kSimdPrefix: {
+ CHECK_PROTOTYPE_OPCODE(simd);
+ len++;
+ byte simd_index =
+ this->template read_u8<validate>(this->pc_ + 1, "simd index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeSimdOpcode(opcode);
+ break;
+ }
+ case kAtomicPrefix: {
+ CHECK_PROTOTYPE_OPCODE(threads);
+ if (!CheckHasSharedMemory()) break;
+ len++;
+ byte atomic_index =
+ this->template read_u8<validate>(this->pc_ + 1, "atomic index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeAtomicOpcode(opcode);
+ break;
+ }
// Note that prototype opcodes are not handled in the fastpath
// above this switch, to avoid checking a feature flag.
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
case kExpr##name: /* fallthrough */
- FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
- BuildSimplePrototypeOperator(opcode);
- break;
- default: {
- // Deal with special asmjs opcodes.
- if (this->module_ != nullptr &&
- this->module_->origin == kAsmJsOrigin) {
- sig = WasmOpcodes::AsmjsSignature(opcode);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- }
- } else {
- this->error("Invalid opcode");
- return;
+ BuildSimplePrototypeOperator(opcode);
+ break;
+ default: {
+ // Deal with special asmjs opcodes.
+ if (this->module_ != nullptr &&
+ this->module_->origin == kAsmJsOrigin) {
+ FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) {
+ BuildSimpleOperator(opcode, sig);
}
+ } else {
+ this->error("Invalid opcode");
+ return;
}
}
}
@@ -2063,7 +2194,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void EndControl() {
DCHECK(!control_.empty());
auto* current = &control_.back();
- stack_.resize(current->stack_depth);
+ stack_.erase(stack_.begin() + current->stack_depth, stack_.end());
CALL_INTERFACE_IF_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
}
@@ -2075,7 +2206,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
merge->vals.first = get_val(0);
} else if (arity > 1) {
merge->vals.array = zone_->NewArray<Value>(arity);
- for (unsigned i = 0; i < arity; i++) {
+ for (uint32_t i = 0; i < arity; i++) {
merge->vals.array[i] = get_val(i);
}
}
@@ -2086,7 +2217,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
const byte* pc = this->pc_;
Value* args = this->args_.data();
InitMerge(&c->end_merge, imm.out_arity(), [pc, &imm](uint32_t i) {
- return Value::New(pc, imm.out_type(i));
+ return Value{pc, imm.out_type(i)};
});
InitMerge(&c->start_merge, imm.in_arity(),
[args](uint32_t i) { return args[i]; });
@@ -2095,7 +2226,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Pops arguments as required by signature into {args_}.
V8_INLINE void PopArgs(FunctionSig* sig) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
- args_.resize(count);
+ args_.resize(count, UnreachableValue(nullptr));
for (int i = count - 1; i >= 0; --i) {
args_[i] = Pop(i, sig->GetParam(i));
}
@@ -2106,38 +2237,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
- Control* PushControl(Control&& new_control) {
+ Control* PushControl(ControlKind kind) {
Reachability reachability =
control_.empty() ? kReachable : control_.back().innerReachability();
- control_.emplace_back(std::move(new_control));
- Control* c = &control_.back();
- c->reachability = reachability;
- c->start_merge.reached = c->reachable();
- return c;
- }
-
- Control* PushBlock() {
- return PushControl(Control::Block(this->pc_, stack_size()));
- }
- Control* PushLoop() {
- return PushControl(Control::Loop(this->pc_, stack_size()));
- }
- Control* PushIf() {
- return PushControl(Control::If(this->pc_, stack_size()));
- }
- Control* PushTry() {
- // current_catch_ = static_cast<int32_t>(control_.size() - 1);
- return PushControl(Control::Try(this->pc_, stack_size()));
+ control_.emplace_back(kind, stack_size(), this->pc_, reachability);
+ return &control_.back();
}
void PopControl(Control* c) {
DCHECK_EQ(c, &control_.back());
CALL_INTERFACE_IF_PARENT_REACHABLE(PopControl, c);
- bool reached = c->end_merge.reached;
+
+ // A loop just leaves the values on the stack.
+ if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
+
+ bool parent_reached =
+ c->reachable() || c->end_merge.reached || c->is_onearmed_if();
control_.pop_back();
// If the parent block was reachable before, but the popped control does not
- // return to here, this block becomes indirectly unreachable.
- if (!control_.empty() && !reached && control_.back().reachable()) {
+ // return to here, this block becomes "spec only reachable".
+ if (!parent_reached && control_.back().reachable()) {
control_.back().reachability = kSpecOnlyReachable;
}
}
@@ -2162,7 +2281,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
+ uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
Value inputs[] = {Pop(0, kWasmS128)};
@@ -2173,10 +2292,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- unsigned SimdReplaceLane(WasmOpcode opcode, ValueType type) {
+ uint32_t SimdReplaceLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
- Value inputs[2];
+ Value inputs[2] = {UnreachableValue(this->pc_),
+ UnreachableValue(this->pc_)};
inputs[1] = Pop(1, type);
inputs[0] = Pop(0, kWasmS128);
auto* result = Push(kWasmS128);
@@ -2186,7 +2306,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- unsigned SimdShiftOp(WasmOpcode opcode) {
+ uint32_t SimdShiftOp(WasmOpcode opcode) {
SimdShiftImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
auto input = Pop(0, kWasmS128);
@@ -2196,7 +2316,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- unsigned Simd8x16ShuffleOp() {
+ uint32_t Simd8x16ShuffleOp() {
Simd8x16ShuffleImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, imm)) {
auto input1 = Pop(1, kWasmS128);
@@ -2208,8 +2328,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 16;
}
- unsigned DecodeSimdOpcode(WasmOpcode opcode) {
- unsigned len = 0;
+ uint32_t DecodeSimdOpcode(WasmOpcode opcode) {
+ uint32_t len = 0;
switch (opcode) {
case kExprF32x4ExtractLane: {
len = SimdExtractLane(opcode, kWasmF32);
@@ -2262,14 +2382,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopArgs(sig);
auto* results =
sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, vec2vec(args_), results);
+ CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args_), results);
}
}
return len;
}
- unsigned DecodeAtomicOpcode(WasmOpcode opcode) {
- unsigned len = 0;
+ uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
+ uint32_t len = 0;
ValueType ret_type;
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
@@ -2300,7 +2420,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len += imm.length;
PopArgs(sig);
auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, vec2vec(args_), imm,
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args_), imm,
result);
} else {
this->error("invalid atomic opcode");
@@ -2308,36 +2428,116 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
- void DoReturn(Control* c, bool implicit) {
- int return_count = static_cast<int>(this->sig_->return_count());
- args_.resize(return_count);
-
- // Pop return values off the stack in reverse order.
- for (int i = return_count - 1; i >= 0; --i) {
- args_[i] = Pop(i, this->sig_->GetReturn(i));
+ unsigned DecodeNumericOpcode(WasmOpcode opcode) {
+ unsigned len = 0;
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig != nullptr) {
+ switch (opcode) {
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
+ BuildSimpleOperator(opcode, sig);
+ break;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ auto size = Pop(2, sig->GetParam(2));
+ auto src = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
+ break;
+ }
+ case kExprMemoryDrop: {
+ MemoryDropImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ CALL_INTERFACE_IF_REACHABLE(MemoryDrop, imm);
+ break;
+ }
+ case kExprMemoryCopy: {
+ MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ auto size = Pop(2, sig->GetParam(2));
+ auto src = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
+ break;
+ }
+ case kExprMemoryFill: {
+ MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ auto size = Pop(2, sig->GetParam(2));
+ auto value = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
+ break;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ PopArgs(sig);
+ CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args_));
+ break;
+ }
+ case kExprTableDrop: {
+ TableDropImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(imm)) break;
+ len += imm.length;
+ CALL_INTERFACE_IF_REACHABLE(TableDrop, imm);
+ break;
+ }
+ case kExprTableCopy: {
+ TableIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_ + 1, imm)) break;
+ len += imm.length;
+ PopArgs(sig);
+ CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args_));
+ break;
+ }
+ default:
+ this->error("invalid numeric opcode");
+ break;
+ }
+ } else {
+ this->error("invalid numeric opcode");
}
+ return len;
+ }
- // Simulate that an implicit return morally comes after the current block.
- if (implicit && c->end_merge.reached) c->reachability = kReachable;
- CALL_INTERFACE_IF_REACHABLE(DoReturn, vec2vec(args_), implicit);
+ void DoReturn() {
+ size_t return_count = this->sig_->return_count();
+ DCHECK_GE(stack_.size(), return_count);
+ Vector<Value> return_values =
+ return_count == 0
+ ? Vector<Value>{}
+ : Vector<Value>{&*(stack_.end() - return_count), return_count};
- EndControl();
+ CALL_INTERFACE_IF_REACHABLE(DoReturn, return_values);
}
inline Value* Push(ValueType type) {
DCHECK_NE(kWasmStmt, type);
- stack_.push_back(Value::New(this->pc_, type));
+ stack_.emplace_back(this->pc_, type);
return &stack_.back();
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
DCHECK_EQ(c, &control_.back());
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- stack_.resize(c->stack_depth);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
if (merge->arity == 1) {
stack_.push_back(merge->vals.first);
} else {
- for (unsigned i = 0; i < merge->arity; i++) {
+ for (uint32_t i = 0; i < merge->arity; i++) {
stack_.push_back(merge->vals.array[i]);
}
}
@@ -2354,7 +2554,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return stack_.data() + old_size;
}
- Value Pop(int index, ValueType expected) {
+ V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
if (!VALIDATE(val.type == expected || val.type == kWasmVar ||
expected == kWasmVar)) {
@@ -2366,7 +2566,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
- Value Pop() {
+ V8_INLINE Value Pop() {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
if (stack_.size() <= limit) {
@@ -2375,7 +2575,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->errorf(this->pc_, "%s found empty stack",
SafeOpcodeNameAt(this->pc_));
}
- return Value::Unreachable(this->pc_);
+ return UnreachableValue(this->pc_);
}
auto val = stack_.back();
stack_.pop_back();
@@ -2396,22 +2596,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
+ // The computation of {stack_values} is only valid if {merge->arity} is >0.
+ DCHECK_LT(0, merge->arity);
+ Value* stack_values = &*(stack_.end() - merge->arity);
// Typecheck the topmost {merge->arity} values on the stack.
for (uint32_t i = 0; i < merge->arity; ++i) {
- auto& val = GetMergeValueFromStack(c, merge, i);
- auto& old = (*merge)[i];
- if (val.type != old.type) {
- // If {val.type} is polymorphic, which results from unreachable, make
- // it more specific by using the merge value's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
- this->errorf(
- this->pc_, "type error in merge[%u] (expected %s, got %s)", i,
- ValueTypes::TypeName(old.type), ValueTypes::TypeName(val.type));
- return false;
- }
- val.type = old.type;
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
+ if (val.type == old.type) continue;
+ // If {val.type} is polymorphic, which results from unreachable, make
+ // it more specific by using the merge value's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
+ i, ValueTypes::TypeName(old.type),
+ ValueTypes::TypeName(val.type));
+ return false;
}
+ val.type = old.type;
}
return true;
@@ -2431,13 +2633,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
expected, startrel(c->pc), actual);
return false;
}
+ if (expected == 0) return true; // Fast path.
return TypeCheckMergeValues(c, &c->end_merge);
}
- bool TypeCheckBreak(Control* c) {
- // Breaks must have at least the number of values expected; can have more.
+ bool TypeCheckBranch(Control* c) {
+ // Branches must have at least the number of values expected; can have more.
uint32_t expected = c->br_merge()->arity;
+ if (expected == 0) return true; // Fast path.
DCHECK_GE(stack_.size(), control_.back().stack_depth);
uint32_t actual =
static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
@@ -2450,6 +2654,42 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return TypeCheckMergeValues(c, c->br_merge());
}
+ bool TypeCheckReturn() {
+ // Returns must have at least the number of values expected; can have more.
+ uint32_t num_returns = static_cast<uint32_t>(this->sig_->return_count());
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (!InsertUnreachablesIfNecessary(num_returns, actual)) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for return, found %u",
+ num_returns, actual);
+ return false;
+ }
+
+ // Typecheck the topmost {num_returns} values on the stack.
+ if (num_returns == 0) return true;
+ // This line requires num_returns > 0.
+ Value* stack_values = &*(stack_.end() - num_returns);
+ for (uint32_t i = 0; i < num_returns; ++i) {
+ auto& val = stack_values[i];
+ ValueType expected_type = this->sig_->GetReturn(i);
+ if (val.type == expected_type) continue;
+ // If {val.type} is polymorphic, which results from unreachable,
+ // make it more specific by using the return's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(this->pc_,
+ "type error in return[%u] (expected %s, got %s)", i,
+ ValueTypes::TypeName(expected_type),
+ ValueTypes::TypeName(val.type));
+ return false;
+ }
+ val.type = expected_type;
+ }
+ return true;
+ }
+
inline bool InsertUnreachablesIfNecessary(uint32_t expected,
uint32_t actual) {
if (V8_LIKELY(actual >= expected)) {
@@ -2464,13 +2704,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// unreachable, insert unreachable values below the actual values.
// This simplifies {TypeCheckMergeValues}.
auto pos = stack_.begin() + (stack_.size() - actual);
- stack_.insert(pos, (expected - actual), Value::Unreachable(this->pc_));
+ stack_.insert(pos, expected - actual, UnreachableValue(this->pc_));
return true;
}
void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
- TRACE(" !%s\n", this->error_msg_.c_str());
+ TRACE(" !%s\n", this->error_.message().c_str());
CALL_INTERFACE(OnFirstError);
}
@@ -2485,13 +2725,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BuildSimpleOperator(opcode, sig);
}
- inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, sig, val, ret);
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
break;
}
case 2: {
@@ -2499,13 +2739,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto lval = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, sig, lval, rval, ret);
+ CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
break;
}
default:
UNREACHABLE();
}
}
+
+ void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
+ ValueType arg_type) {
+ auto val = Pop(0, arg_type);
+ auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
+ }
+
+ void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
+ ValueType lhs_type, ValueType rhs_type) {
+ auto rval = Pop(1, rhs_type);
+ auto lval = Pop(0, lhs_type);
+ auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
+ }
+
+#define DEFINE_SIMPLE_SIG_OPERATOR(sig, ...) \
+ void BuildSimpleOperator_##sig(WasmOpcode opcode) { \
+ BuildSimpleOperator(opcode, __VA_ARGS__); \
+ }
+ FOREACH_SIGNATURE(DEFINE_SIMPLE_SIG_OPERATOR)
+#undef DEFINE_SIMPLE_SIG_OPERATOR
};
#undef CALL_INTERFACE
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 2c5ea465cc..27cbe10b7e 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -2,17 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/signature.h"
+#include "src/wasm/function-body-decoder.h"
-#include "src/base/platform/elapsed-timer.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/flags.h"
#include "src/handles.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
-#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
@@ -22,788 +19,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-namespace {
-
-// An SsaEnv environment carries the current local variable renaming
-// as well as the current effect and control dependency in the TF graph.
-// It maintains a control state that tracks whether the environment
-// is reachable, has reached a control end, or has been merged.
-struct SsaEnv {
- enum State { kControlEnd, kUnreachable, kReached, kMerged };
-
- State state;
- TFNode* control;
- TFNode* effect;
- compiler::WasmInstanceCacheNodes instance_cache;
- TFNode** locals;
-
- bool reached() const { return state >= kReached; }
- void Kill(State new_state = kControlEnd) {
- state = new_state;
- locals = nullptr;
- control = nullptr;
- effect = nullptr;
- instance_cache = {};
- }
- void SetNotMerged() {
- if (state == kMerged) state = kReached;
- }
-};
-
-#define BUILD(func, ...) \
- ([&] { \
- DCHECK(ssa_env_->reached()); \
- DCHECK(decoder->ok()); \
- return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
- })()
-
-constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
-
-class WasmGraphBuildingInterface {
- public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
- using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
-
- struct Value : public ValueWithNamedConstructors<Value> {
- TFNode* node;
- };
-
- struct TryInfo : public ZoneObject {
- SsaEnv* catch_env;
- TFNode* exception = nullptr;
-
- explicit TryInfo(SsaEnv* c) : catch_env(c) {}
- };
-
- struct Control : public ControlWithNamedConstructors<Control, Value> {
- SsaEnv* end_env; // end environment for the construct.
- SsaEnv* false_env; // false environment (only for if).
- TryInfo* try_info; // information used for compiling try statements.
- int32_t previous_catch; // previous Control (on the stack) with a catch.
- };
-
- explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {}
-
- void StartFunction(FullDecoder* decoder) {
- SsaEnv* ssa_env =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t num_locals = decoder->NumLocals();
- uint32_t env_count = num_locals;
- size_t size = sizeof(TFNode*) * env_count;
- ssa_env->state = SsaEnv::kReached;
- ssa_env->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
-
- // The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
- // instance parameter.
- TFNode* start = builder_->Start(
- static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
- ssa_env->effect = start;
- ssa_env->control = start;
- // Initialize effect and control before initializing the locals default
- // values (which might require instance loads) or loading the context.
- builder_->set_effect_ptr(&ssa_env->effect);
- builder_->set_control_ptr(&ssa_env->control);
- // Initialize the instance parameter (index 0).
- builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
- // Initialize local variables. Parameters are shifted by 1 because of the
- // the instance parameter.
- uint32_t index = 0;
- for (; index < decoder->sig_->parameter_count(); ++index) {
- ssa_env->locals[index] = builder_->Param(index + 1);
- }
- while (index < num_locals) {
- ValueType type = decoder->GetLocalType(index);
- TFNode* node = DefaultValue(type);
- while (index < num_locals && decoder->GetLocalType(index) == type) {
- // Do a whole run of like-typed locals at a time.
- ssa_env->locals[index++] = node;
- }
- }
- LoadContextIntoSsa(ssa_env);
- SetEnv(ssa_env);
- }
-
- // Reload the instance cache entries into the Ssa Environment.
- void LoadContextIntoSsa(SsaEnv* ssa_env) {
- if (!ssa_env || !ssa_env->reached()) return;
- builder_->InitInstanceCache(&ssa_env->instance_cache);
- }
-
- void StartFunctionBody(FullDecoder* decoder, Control* block) {
- SsaEnv* break_env = ssa_env_;
- SetEnv(Steal(decoder->zone(), break_env));
- block->end_env = break_env;
- }
-
- void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
-
- void OnFirstError(FullDecoder*) {}
-
- void NextInstruction(FullDecoder*, WasmOpcode) {}
-
- void Block(FullDecoder* decoder, Control* block) {
- // The break environment is the outer environment.
- block->end_env = ssa_env_;
- SetEnv(Steal(decoder->zone(), ssa_env_));
- }
-
- void Loop(FullDecoder* decoder, Control* block) {
- SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
- block->end_env = finish_try_env;
- // The continue environment is the inner environment.
- SetEnv(PrepareForLoop(decoder, finish_try_env));
- ssa_env_->SetNotMerged();
- if (!decoder->ok()) return;
- // Wrap input merge into phis.
- for (unsigned i = 0; i < block->start_merge.arity; ++i) {
- Value& val = block->start_merge[i];
- val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control);
- }
- }
-
- void Try(FullDecoder* decoder, Control* block) {
- SsaEnv* outer_env = ssa_env_;
- SsaEnv* catch_env = Split(decoder, outer_env);
- // Mark catch environment as unreachable, since only accessable
- // through catch unwinding (i.e. landing pads).
- catch_env->state = SsaEnv::kUnreachable;
- SsaEnv* try_env = Steal(decoder->zone(), outer_env);
- SetEnv(try_env);
- TryInfo* try_info = new (decoder->zone()) TryInfo(catch_env);
- block->end_env = outer_env;
- block->try_info = try_info;
- block->previous_catch = current_catch_;
- current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
- }
-
- void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
- TFNode* if_true = nullptr;
- TFNode* if_false = nullptr;
- if (ssa_env_->reached()) {
- BUILD(BranchNoHint, cond.node, &if_true, &if_false);
- }
- SsaEnv* end_env = ssa_env_;
- SsaEnv* false_env = Split(decoder, ssa_env_);
- false_env->control = if_false;
- SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
- true_env->control = if_true;
- if_block->end_env = end_env;
- if_block->false_env = false_env;
- SetEnv(true_env);
- }
-
- void FallThruTo(FullDecoder* decoder, Control* c) {
- DCHECK(!c->is_loop());
- MergeValuesInto(decoder, c, &c->end_merge);
- }
-
- void PopControl(FullDecoder* decoder, Control* block) {
- if (!block->is_loop()) SetEnv(block->end_env);
- }
-
- void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
-
- void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
- const Value& value, Value* result) {
- result->node = BUILD(Unop, opcode, value.node, decoder->position());
- }
-
- void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
- const Value& lhs, const Value& rhs, Value* result) {
- auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
- if (result) result->node = node;
- }
-
- void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
- result->node = builder_->Int32Constant(value);
- }
-
- void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
- result->node = builder_->Int64Constant(value);
- }
-
- void F32Const(FullDecoder* decoder, Value* result, float value) {
- result->node = builder_->Float32Constant(value);
- }
-
- void F64Const(FullDecoder* decoder, Value* result, double value) {
- result->node = builder_->Float64Constant(value);
- }
-
- void RefNull(FullDecoder* decoder, Value* result) {
- result->node = builder_->RefNull();
- }
-
- void Drop(FullDecoder* decoder, const Value& value) {}
-
- void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
- if (implicit) {
- DCHECK_EQ(1, decoder->control_depth());
- SetEnv(decoder->control_at(0)->end_env);
- }
- size_t num_values = values.size();
- TFNode** buffer = GetNodes(values);
- for (size_t i = 0; i < num_values; ++i) {
- buffer[i] = values[i].node;
- }
- BUILD(Return, static_cast<unsigned>(values.size()), buffer);
- }
-
- void GetLocal(FullDecoder* decoder, Value* result,
- const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
- result->node = ssa_env_->locals[imm.index];
- }
-
- void SetLocal(FullDecoder* decoder, const Value& value,
- const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[imm.index] = value.node;
- }
-
- void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
- const LocalIndexImmediate<validate>& imm) {
- result->node = value.node;
- if (!ssa_env_->locals) return; // unreachable
- ssa_env_->locals[imm.index] = value.node;
- }
-
- void GetGlobal(FullDecoder* decoder, Value* result,
- const GlobalIndexImmediate<validate>& imm) {
- result->node = BUILD(GetGlobal, imm.index);
- }
-
- void SetGlobal(FullDecoder* decoder, const Value& value,
- const GlobalIndexImmediate<validate>& imm) {
- BUILD(SetGlobal, imm.index, value.node);
- }
-
- void Unreachable(FullDecoder* decoder) {
- BUILD(Unreachable, decoder->position());
- }
-
- void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
- const Value& tval, Value* result) {
- TFNode* controls[2];
- BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
- TFNode* merge = BUILD(Merge, 2, controls);
- TFNode* vals[2] = {tval.node, fval.node};
- TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
- result->node = phi;
- ssa_env_->control = merge;
- }
-
- void Br(FullDecoder* decoder, Control* target) {
- MergeValuesInto(decoder, target, target->br_merge());
- }
-
- void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
- SsaEnv* fenv = ssa_env_;
- SsaEnv* tenv = Split(decoder, fenv);
- fenv->SetNotMerged();
- BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
- ssa_env_ = tenv;
- Br(decoder, target);
- ssa_env_ = fenv;
- }
-
- void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
- const Value& key) {
- if (imm.table_count == 0) {
- // Only a default target. Do the equivalent of br.
- uint32_t target = BranchTableIterator<validate>(decoder, imm).next();
- Br(decoder, decoder->control_at(target));
- return;
- }
-
- SsaEnv* break_env = ssa_env_;
- // Build branches to the various blocks based on the table.
- TFNode* sw = BUILD(Switch, imm.table_count + 1, key.node);
-
- SsaEnv* copy = Steal(decoder->zone(), break_env);
- ssa_env_ = copy;
- BranchTableIterator<validate> iterator(decoder, imm);
- while (iterator.has_next()) {
- uint32_t i = iterator.cur_index();
- uint32_t target = iterator.next();
- ssa_env_ = Split(decoder, copy);
- ssa_env_->control =
- (i == imm.table_count) ? BUILD(IfDefault, sw) : BUILD(IfValue, i, sw);
- Br(decoder, decoder->control_at(target));
- }
- DCHECK(decoder->ok());
- ssa_env_ = break_env;
- }
-
- void Else(FullDecoder* decoder, Control* if_block) {
- SetEnv(if_block->false_env);
- }
-
- void LoadMem(FullDecoder* decoder, LoadType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
- Value* result) {
- result->node =
- BUILD(LoadMem, type.value_type(), type.mem_type(), index.node,
- imm.offset, imm.alignment, decoder->position());
- }
-
- void StoreMem(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
- const Value& value) {
- BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment,
- value.node, decoder->position(), type.value_type());
- }
-
- void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- result->node = BUILD(CurrentMemoryPages);
- }
-
- void GrowMemory(FullDecoder* decoder, const Value& value, Value* result) {
- result->node = BUILD(GrowMemory, value.node);
- // Always reload the instance cache after growing memory.
- LoadContextIntoSsa(ssa_env_);
- }
-
- void CallDirect(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
- const Value args[], Value returns[]) {
- DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
- }
-
- void CallIndirect(FullDecoder* decoder, const Value& index,
- const CallIndirectImmediate<validate>& imm,
- const Value args[], Value returns[]) {
- DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
- }
-
- void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
- Value* result) {
- TFNode** inputs = GetNodes(args);
- TFNode* node = BUILD(SimdOp, opcode, inputs);
- if (result) result->node = node;
- }
-
- void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
- const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
- Value* result) {
- TFNode** nodes = GetNodes(inputs);
- result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes);
- }
-
- void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
- const SimdShiftImmediate<validate> imm, const Value& input,
- Value* result) {
- TFNode* inputs[] = {input.node};
- result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs);
- }
-
- void Simd8x16ShuffleOp(FullDecoder* decoder,
- const Simd8x16ShuffleImmediate<validate>& imm,
- const Value& input0, const Value& input1,
- Value* result) {
- TFNode* input_nodes[] = {input0.node, input1.node};
- result->node = BUILD(Simd8x16ShuffleOp, imm.shuffle, input_nodes);
- }
-
- TFNode* GetExceptionTag(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm) {
- // TODO(kschimpf): Need to get runtime exception tag values. This
- // code only handles non-imported/exported exceptions.
- return BUILD(Int32Constant, imm.index);
- }
-
- void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
- Control* block, const Vector<Value>& value_args) {
- int count = value_args.length();
- ZoneVector<TFNode*> args(count, decoder->zone());
- for (int i = 0; i < count; ++i) {
- args[i] = value_args[i].node;
- }
- BUILD(Throw, imm.index, imm.exception, vec2vec(args));
- Unreachable(decoder);
- EndControl(decoder, block);
- }
-
- void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> values) {
- DCHECK(block->is_try_catch());
- TFNode* exception = block->try_info->exception;
- current_catch_ = block->previous_catch;
- SsaEnv* catch_env = block->try_info->catch_env;
- SetEnv(catch_env);
-
- // The catch block is unreachable if no possible throws in the try block
- // exist. We only build a landing pad if some node in the try block can
- // (possibly) throw. Otherwise the below catch environments remain empty.
- DCHECK_EQ(exception != nullptr, ssa_env_->reached());
-
- TFNode* if_catch = nullptr;
- TFNode* if_no_catch = nullptr;
- if (exception != nullptr) {
- // Get the exception tag and see if it matches the expected one.
- TFNode* caught_tag = BUILD(GetExceptionTag, exception);
- TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
- TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
- BUILD(BranchNoHint, compare, &if_catch, &if_no_catch);
- }
-
- SsaEnv* if_no_catch_env = Split(decoder, ssa_env_);
- if_no_catch_env->control = if_no_catch;
- SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
- if_catch_env->control = if_catch;
-
- SetEnv(if_no_catch_env);
- if (exception != nullptr) {
- // TODO(kschimpf): Generalize to allow more catches. Will force
- // moving no_catch code to END opcode.
- BUILD(Rethrow, exception);
- Unreachable(decoder);
- EndControl(decoder, block);
- }
-
- SetEnv(if_catch_env);
- if (exception != nullptr) {
- // TODO(kschimpf): Can't use BUILD() here, GetExceptionValues() returns
- // TFNode** rather than TFNode*. Fix to add landing pads.
- TFNode** caught_values =
- builder_->GetExceptionValues(exception, imm.exception);
- for (size_t i = 0, e = values.size(); i < e; ++i) {
- values[i].node = caught_values[i];
- }
- }
- }
-
- void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
- const MemoryAccessImmediate<validate>& imm, Value* result) {
- TFNode** inputs = GetNodes(args);
- TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset,
- decoder->position());
- if (result) result->node = node;
- }
-
- private:
- SsaEnv* ssa_env_;
- TFBuilder* builder_;
- uint32_t current_catch_ = kNullCatch;
-
- TryInfo* current_try_info(FullDecoder* decoder) {
- return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
- ->try_info;
- }
-
- TFNode** GetNodes(Value* values, size_t count) {
- TFNode** nodes = builder_->Buffer(count);
- for (size_t i = 0; i < count; ++i) {
- nodes[i] = values[i].node;
- }
- return nodes;
- }
-
- TFNode** GetNodes(Vector<Value> values) {
- return GetNodes(values.start(), values.size());
- }
-
- void SetEnv(SsaEnv* env) {
-#if DEBUG
- if (FLAG_trace_wasm_decoder) {
- char state = 'X';
- if (env) {
- switch (env->state) {
- case SsaEnv::kReached:
- state = 'R';
- break;
- case SsaEnv::kUnreachable:
- state = 'U';
- break;
- case SsaEnv::kMerged:
- state = 'M';
- break;
- case SsaEnv::kControlEnd:
- state = 'E';
- break;
- }
- }
- PrintF("{set_env = %p, state = %c", static_cast<void*>(env), state);
- if (env && env->control) {
- PrintF(", control = ");
- compiler::WasmGraphBuilder::PrintDebugName(env->control);
- }
- PrintF("}\n");
- }
-#endif
- ssa_env_ = env;
- // TODO(wasm): combine the control and effect pointers with instance cache.
- builder_->set_control_ptr(&env->control);
- builder_->set_effect_ptr(&env->effect);
- builder_->set_instance_cache(&env->instance_cache);
- }
-
- TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
- if (node == nullptr) return nullptr;
-
- const bool inside_try_scope = current_catch_ != kNullCatch;
-
- if (!inside_try_scope) return node;
-
- TFNode* if_success = nullptr;
- TFNode* if_exception = nullptr;
- if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
- return node;
- }
-
- SsaEnv* success_env = Steal(decoder->zone(), ssa_env_);
- success_env->control = if_success;
-
- SsaEnv* exception_env = Split(decoder, success_env);
- exception_env->control = if_exception;
- TryInfo* try_info = current_try_info(decoder);
- Goto(decoder, exception_env, try_info->catch_env);
- TFNode* exception = try_info->exception;
- if (exception == nullptr) {
- DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
- try_info->exception = if_exception;
- } else {
- DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
- try_info->exception = builder_->CreateOrMergeIntoPhi(
- MachineRepresentation::kWord32, try_info->catch_env->control,
- try_info->exception, if_exception);
- }
-
- SetEnv(success_env);
- return node;
- }
-
- TFNode* DefaultValue(ValueType type) {
- switch (type) {
- case kWasmI32:
- return builder_->Int32Constant(0);
- case kWasmI64:
- return builder_->Int64Constant(0);
- case kWasmF32:
- return builder_->Float32Constant(0);
- case kWasmF64:
- return builder_->Float64Constant(0);
- case kWasmS128:
- return builder_->S128Zero();
- case kWasmAnyRef:
- case kWasmExceptRef:
- return builder_->RefNull();
- default:
- UNREACHABLE();
- }
- }
-
- void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
- DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- if (!ssa_env_->reached()) return;
-
- SsaEnv* target = c->end_env;
- const bool first = target->state == SsaEnv::kUnreachable;
- Goto(decoder, ssa_env_, target);
-
- uint32_t avail =
- decoder->stack_size() - decoder->control_at(0)->stack_depth;
- uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
- for (uint32_t i = start; i < merge->arity; ++i) {
- auto& val = decoder->GetMergeValueFromStack(c, merge, i);
- auto& old = (*merge)[i];
- DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == old.type || val.type == kWasmVar);
- old.node = first ? val.node
- : builder_->CreateOrMergeIntoPhi(
- ValueTypes::MachineRepresentationFor(old.type),
- target->control, old.node, val.node);
- }
- }
-
- void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
- DCHECK_NOT_NULL(to);
- if (!from->reached()) return;
- switch (to->state) {
- case SsaEnv::kUnreachable: { // Overwrite destination.
- to->state = SsaEnv::kReached;
- to->locals = from->locals;
- to->control = from->control;
- to->effect = from->effect;
- to->instance_cache = from->instance_cache;
- break;
- }
- case SsaEnv::kReached: { // Create a new merge.
- to->state = SsaEnv::kMerged;
- // Merge control.
- TFNode* controls[] = {to->control, from->control};
- TFNode* merge = builder_->Merge(2, controls);
- to->control = merge;
- // Merge effects.
- if (from->effect != to->effect) {
- TFNode* effects[] = {to->effect, from->effect, merge};
- to->effect = builder_->EffectPhi(2, effects, merge);
- }
- // Merge SSA values.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- TFNode* a = to->locals[i];
- TFNode* b = from->locals[i];
- if (a != b) {
- TFNode* vals[] = {a, b};
- to->locals[i] =
- builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
- }
- }
- // Start a new merge from the instance cache.
- builder_->NewInstanceCacheMerge(&to->instance_cache,
- &from->instance_cache, merge);
- break;
- }
- case SsaEnv::kMerged: {
- TFNode* merge = to->control;
- // Extend the existing merge control node.
- builder_->AppendToMerge(merge, from->control);
- // Merge effects.
- to->effect = builder_->CreateOrMergeIntoEffectPhi(merge, to->effect,
- from->effect);
- // Merge locals.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- to->locals[i] = builder_->CreateOrMergeIntoPhi(
- ValueTypes::MachineRepresentationFor(decoder->GetLocalType(i)),
- merge, to->locals[i], from->locals[i]);
- }
- // Merge the instance caches.
- builder_->MergeInstanceCacheInto(&to->instance_cache,
- &from->instance_cache, merge);
- break;
- }
- default:
- UNREACHABLE();
- }
- return from->Kill();
- }
-
- SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
- if (!env->reached()) return Split(decoder, env);
- env->state = SsaEnv::kMerged;
-
- env->control = builder_->Loop(env->control);
- env->effect = builder_->EffectPhi(1, &env->effect, env->control);
- builder_->Terminate(env->effect, env->control);
- // The '+ 1' here is to be able to set the instance cache as assigned.
- BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone());
- if (decoder->failed()) return env;
- if (assigned != nullptr) {
- // Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->total_locals();
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
- }
- // Introduce phis for instance cache pointers if necessary.
- if (assigned->Contains(instance_cache_index)) {
- builder_->PrepareInstanceCacheForLoop(&env->instance_cache,
- env->control);
- }
-
- SsaEnv* loop_body_env = Split(decoder, env);
- builder_->StackCheck(decoder->position(), &(loop_body_env->effect),
- &(loop_body_env->control));
- return loop_body_env;
- }
-
- // Conservatively introduce phis for all local variables.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
- env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
- &env->locals[i], env->control);
- }
-
- // Conservatively introduce phis for instance cache.
- builder_->PrepareInstanceCacheForLoop(&env->instance_cache, env->control);
-
- SsaEnv* loop_body_env = Split(decoder, env);
- builder_->StackCheck(decoder->position(), &loop_body_env->effect,
- &loop_body_env->control);
- return loop_body_env;
- }
-
- // Create a complete copy of {from}.
- SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
- DCHECK_NOT_NULL(from);
- SsaEnv* result =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->NumLocals();
- result->control = from->control;
- result->effect = from->effect;
-
- if (from->reached()) {
- result->state = SsaEnv::kReached;
- result->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
- memcpy(result->locals, from->locals, size);
- result->instance_cache = from->instance_cache;
- } else {
- result->state = SsaEnv::kUnreachable;
- result->locals = nullptr;
- result->instance_cache = {};
- }
-
- return result;
- }
-
- // Create a copy of {from} that steals its state and leaves {from}
- // unreachable.
- SsaEnv* Steal(Zone* zone, SsaEnv* from) {
- DCHECK_NOT_NULL(from);
- if (!from->reached()) return UnreachableEnv(zone);
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
- result->state = SsaEnv::kReached;
- result->locals = from->locals;
- result->control = from->control;
- result->effect = from->effect;
- result->instance_cache = from->instance_cache;
- from->Kill(SsaEnv::kUnreachable);
- return result;
- }
-
- // Create an unreachable environment.
- SsaEnv* UnreachableEnv(Zone* zone) {
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
- result->state = SsaEnv::kUnreachable;
- result->control = nullptr;
- result->effect = nullptr;
- result->locals = nullptr;
- result->instance_cache = {};
- return result;
- }
-
- void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig,
- uint32_t index, const Value args[], Value returns[]) {
- int param_count = static_cast<int>(sig->parameter_count());
- TFNode** arg_nodes = builder_->Buffer(param_count + 1);
- TFNode** return_nodes = nullptr;
- arg_nodes[0] = index_node;
- for (int i = 0; i < param_count; ++i) {
- arg_nodes[i + 1] = args[i].node;
- }
- if (index_node) {
- BUILD(CallIndirect, index, arg_nodes, &return_nodes, decoder->position());
- } else {
- BUILD(CallDirect, index, arg_nodes, &return_nodes, decoder->position());
- }
- int return_count = static_cast<int>(sig->return_count());
- for (int i = 0; i < return_count; ++i) {
- returns[i].node = return_nodes[i];
- }
- // The invoked function could have used grow_memory, so we need to
- // reload mem_size and mem_start.
- LoadContextIntoSsa(ssa_env_);
- }
-};
-
-} // namespace
-
bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
Decoder decoder(start, end);
@@ -838,24 +53,6 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
return decoder.toResult(nullptr);
}
-DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- const WasmFeatures& enabled,
- const wasm::WasmModule* module, TFBuilder* builder,
- WasmFeatures* detected, FunctionBody& body,
- compiler::NodeOriginTable* node_origins) {
- Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder);
- if (node_origins) {
- builder->AddBytecodePositionDecorator(node_origins, &decoder);
- }
- decoder.Decode();
- if (node_origins) {
- builder->RemoveBytecodePositionDecorator();
- }
- return decoder.toResult(nullptr);
-}
-
unsigned OpcodeLength(const byte* pc, const byte* end) {
Decoder decoder(pc, end);
return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
@@ -919,7 +116,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BodyLocalDecls decls(&zone);
BytecodeIterator i(body.start, body.end, &decls);
if (body.start != i.pc() && print_locals == kPrintLocals) {
- os << "// locals: ";
+ os << "// locals:";
if (!decls.type_list.empty()) {
ValueType type = decls.type_list[0];
uint32_t count = 0;
@@ -932,6 +129,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
count = 1;
}
}
+ os << " " << count << " " << ValueTypes::TypeName(type);
}
os << std::endl;
if (line_numbers) line_numbers->push_back(kNoByteCode);
@@ -955,7 +153,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
WasmOpcode opcode = i.current();
if (line_numbers) line_numbers->push_back(i.position());
- if (opcode == kExprElse) control_depth--;
+ if (opcode == kExprElse || opcode == kExprCatch) {
+ control_depth--;
+ }
int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
@@ -995,6 +195,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
switch (opcode) {
case kExprElse:
+ case kExprCatch:
os << " // @" << i.pc_offset();
control_depth++;
break;
@@ -1006,7 +207,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
i.pc());
os << " // @" << i.pc_offset();
if (decoder.Complete(imm)) {
- for (unsigned i = 0; i < imm.out_arity(); i++) {
+ for (uint32_t i = 0; i < imm.out_arity(); i++) {
os << " " << ValueTypes::TypeName(imm.out_type(i));
}
}
@@ -1018,12 +219,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << " // depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << " // depth=" << imm.depth;
break;
}
@@ -1066,8 +267,6 @@ BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
-#undef BUILD
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 13a3ae2d0c..5564dcd969 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -8,7 +8,6 @@
#include "src/base/compiler-specific.h"
#include "src/base/iterator.h"
#include "src/globals.h"
-#include "src/signature.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -18,18 +17,11 @@ namespace internal {
class BitVector; // forward declaration
-namespace compiler { // external declarations from compiler.
-class NodeOriginTable;
-class WasmGraphBuilder;
-}
-
namespace wasm {
struct WasmModule; // forward declaration of module interface.
struct WasmFeatures;
-typedef compiler::WasmGraphBuilder TFBuilder;
-
// A wrapper around the signature and bytes of a function.
struct FunctionBody {
FunctionSig* sig; // function signature
@@ -48,11 +40,6 @@ V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
WasmFeatures* detected,
FunctionBody& body);
-DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- const WasmFeatures& enabled, const WasmModule* module,
- TFBuilder* builder, WasmFeatures* detected,
- FunctionBody& body,
- compiler::NodeOriginTable* node_origins);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 4cec770ecc..c166683bee 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -16,8 +16,8 @@ namespace wasm {
namespace {
-const char* GetExecutionTierAsString(ExecutionTier mode) {
- switch (mode) {
+const char* GetExecutionTierAsString(ExecutionTier tier) {
+ switch (tier) {
case ExecutionTier::kBaseline:
return "liftoff";
case ExecutionTier::kOptimized:
@@ -28,103 +28,194 @@ const char* GetExecutionTierAsString(ExecutionTier mode) {
UNREACHABLE();
}
-void RecordStats(const WasmCode* code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(code->reloc_info().size()));
+class WasmInstructionBufferImpl {
+ public:
+ class View : public AssemblerBuffer {
+ public:
+ View(Vector<uint8_t> buffer, WasmInstructionBufferImpl* holder)
+ : buffer_(buffer), holder_(holder) {}
+
+ ~View() override {
+ if (buffer_.start() == holder_->old_buffer_.start()) {
+ DCHECK_EQ(buffer_.size(), holder_->old_buffer_.size());
+ holder_->old_buffer_ = {};
+ }
+ }
+
+ byte* start() const override { return buffer_.start(); }
+
+ int size() const override { return static_cast<int>(buffer_.size()); }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ // If we grow, we must be the current buffer of {holder_}.
+ DCHECK_EQ(buffer_.start(), holder_->buffer_.start());
+ DCHECK_EQ(buffer_.size(), holder_->buffer_.size());
+ DCHECK_NULL(holder_->old_buffer_);
+
+ DCHECK_LT(size(), new_size);
+
+ holder_->old_buffer_ = std::move(holder_->buffer_);
+ holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
+ return base::make_unique<View>(holder_->buffer_.as_vector(), holder_);
+ }
+
+ private:
+ const Vector<uint8_t> buffer_;
+ WasmInstructionBufferImpl* const holder_;
+ };
+
+ std::unique_ptr<AssemblerBuffer> CreateView() {
+ DCHECK_NOT_NULL(buffer_);
+ return base::make_unique<View>(buffer_.as_vector(), this);
+ }
+
+ std::unique_ptr<uint8_t[]> ReleaseBuffer() {
+ DCHECK_NULL(old_buffer_);
+ DCHECK_NOT_NULL(buffer_);
+ return buffer_.ReleaseData();
+ }
+
+ bool released() const { return buffer_ == nullptr; }
+
+ private:
+ // The current buffer used to emit code.
+ OwnedVector<uint8_t> buffer_ =
+ OwnedVector<uint8_t>::New(AssemblerBase::kMinimalBufferSize);
+
+ // While the buffer is grown, we need to temporarily also keep the old
+ // buffer alive.
+ OwnedVector<uint8_t> old_buffer_;
+};
+
+WasmInstructionBufferImpl* Impl(WasmInstructionBuffer* buf) {
+ return reinterpret_cast<WasmInstructionBufferImpl*>(buf);
}
} // namespace
+// PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
+WasmInstructionBuffer::~WasmInstructionBuffer() {
+ Impl(this)->~WasmInstructionBufferImpl();
+}
+
+std::unique_ptr<AssemblerBuffer> WasmInstructionBuffer::CreateView() {
+ return Impl(this)->CreateView();
+}
+
+std::unique_ptr<uint8_t[]> WasmInstructionBuffer::ReleaseBuffer() {
+ return Impl(this)->ReleaseBuffer();
+}
+
// static
-ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier() {
- return FLAG_liftoff ? ExecutionTier::kBaseline : ExecutionTier::kOptimized;
+std::unique_ptr<WasmInstructionBuffer> WasmInstructionBuffer::New() {
+ return std::unique_ptr<WasmInstructionBuffer>{
+ reinterpret_cast<WasmInstructionBuffer*>(
+ new WasmInstructionBufferImpl())};
}
+// End of PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
-WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
- ModuleEnv* env,
- NativeModule* native_module,
- FunctionBody body, int index,
- Counters* counters, ExecutionTier mode)
- : env_(env),
- wasm_engine_(wasm_engine),
- func_body_(body),
- counters_(counters),
- func_index_(index),
- native_module_(native_module),
- mode_(mode) {
- DCHECK_GE(index, env->module->num_imported_functions);
- DCHECK_LT(index, env->module->functions.size());
- // Always disable Liftoff for asm.js, for two reasons:
- // 1) asm-specific opcodes are not implemented, and
- // 2) tier-up does not work with lazy compilation.
- if (env->module->origin == kAsmJsOrigin) mode = ExecutionTier::kOptimized;
+// static
+ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
+ const WasmModule* module) {
+ return FLAG_liftoff && module->origin == kWasmOrigin
+ ? ExecutionTier::kBaseline
+ : ExecutionTier::kOptimized;
+}
+
+WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, int index,
+ ExecutionTier tier)
+ : wasm_engine_(wasm_engine), func_index_(index), tier_(tier) {
if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
(FLAG_wasm_tier_mask_for_testing & (1 << index))) {
- mode = ExecutionTier::kOptimized;
+ tier = ExecutionTier::kOptimized;
}
- SwitchMode(mode);
+ SwitchTier(tier);
}
// Declared here such that {LiftoffCompilationUnit} and
// {TurbofanWasmCompilationUnit} can be opaque in the header file.
WasmCompilationUnit::~WasmCompilationUnit() = default;
-void WasmCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
- auto size_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
- wasm, function_size_bytes);
- size_histogram->AddSample(
- static_cast<int>(func_body_.end - func_body_.start));
- auto timed_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
+WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
+ CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected) {
+ auto* func = &env->module->functions[func_index_];
+ Vector<const uint8_t> code = wire_bytes_storage->GetCode(func->code);
+ wasm::FunctionBody func_body{func->sig, func->code.offset(), code.start(),
+ code.end()};
+
+ auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin, wasm,
+ function_size_bytes);
+ size_histogram->AddSample(static_cast<int>(func_body.end - func_body.start));
+ auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
wasm_compile, function_time);
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
if (FLAG_trace_wasm_compiler) {
PrintF("Compiling wasm function %d with %s\n\n", func_index_,
- GetExecutionTierAsString(mode_));
+ GetExecutionTierAsString(tier_));
}
- switch (mode_) {
+ WasmCompilationResult result;
+ switch (tier_) {
case ExecutionTier::kBaseline:
- if (liftoff_unit_->ExecuteCompilation(detected)) break;
+ result =
+ liftoff_unit_->ExecuteCompilation(env, func_body, counters, detected);
+ if (result.succeeded()) break;
// Otherwise, fall back to turbofan.
- SwitchMode(ExecutionTier::kOptimized);
+ SwitchTier(ExecutionTier::kOptimized);
+ // TODO(wasm): We could actually stop or remove the tiering unit for this
+ // function to avoid compiling it twice with TurboFan.
V8_FALLTHROUGH;
case ExecutionTier::kOptimized:
- turbofan_unit_->ExecuteCompilation(detected);
+ result = turbofan_unit_->ExecuteCompilation(env, func_body, counters,
+ detected);
break;
case ExecutionTier::kInterpreter:
UNREACHABLE(); // TODO(titzer): compile interpreter entry stub.
}
-}
-WasmCode* WasmCompilationUnit::FinishCompilation(ErrorThrower* thrower) {
- WasmCode* ret;
- switch (mode_) {
- case ExecutionTier::kBaseline:
- ret = liftoff_unit_->FinishCompilation(thrower);
- break;
- case ExecutionTier::kOptimized:
- ret = turbofan_unit_->FinishCompilation(thrower);
- break;
- case ExecutionTier::kInterpreter:
- UNREACHABLE(); // TODO(titzer): finish interpreter entry stub.
+ if (result.succeeded()) {
+ counters->wasm_generated_code_size()->Increment(
+ result.code_desc.instr_size);
+ counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
}
- if (ret == nullptr) {
- thrower->RuntimeError("Error finalizing code.");
- } else {
- RecordStats(ret, counters_);
+
+ return result;
+}
+
+WasmCode* WasmCompilationUnit::Publish(WasmCompilationResult result,
+ NativeModule* native_module) {
+ if (!result.succeeded()) {
+ native_module->compilation_state()->SetError(func_index_,
+ std::move(result.error));
+ return nullptr;
}
- return ret;
+
+ // The {tier} argument specifies the requested tier, which can differ from the
+ // actually executed tier stored in {unit->tier()}.
+ DCHECK(result.succeeded());
+ WasmCode::Tier code_tier = tier_ == ExecutionTier::kBaseline
+ ? WasmCode::kLiftoff
+ : WasmCode::kTurbofan;
+ DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
+ WasmCode* code = native_module->AddCode(
+ func_index_, result.code_desc, result.frame_slot_count,
+ result.safepoint_table_offset, result.handler_table_offset,
+ std::move(result.protected_instructions),
+ std::move(result.source_positions), WasmCode::kFunction, code_tier);
+ // TODO(clemensh): Merge this into {AddCode}?
+ native_module->PublishCode(code);
+ return code;
}
-void WasmCompilationUnit::SwitchMode(ExecutionTier new_mode) {
+void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
// This method is being called in the constructor, where neither
- // {liftoff_unit_} nor {turbofan_unit_} are set, or to switch mode from
+ // {liftoff_unit_} nor {turbofan_unit_} are set, or to switch tier from
// kLiftoff to kTurbofan, in which case {liftoff_unit_} is already set.
- mode_ = new_mode;
- switch (new_mode) {
+ tier_ = new_tier;
+ switch (new_tier) {
case ExecutionTier::kBaseline:
DCHECK(!turbofan_unit_);
DCHECK(!liftoff_unit_);
@@ -142,20 +233,22 @@ void WasmCompilationUnit::SwitchMode(ExecutionTier new_mode) {
}
// static
-WasmCode* WasmCompilationUnit::CompileWasmFunction(
- Isolate* isolate, NativeModule* native_module, WasmFeatures* detected,
- ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function,
- ExecutionTier mode) {
+void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
+ NativeModule* native_module,
+ WasmFeatures* detected,
+ const WasmFunction* function,
+ ExecutionTier tier) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
FunctionBody function_body{function->sig, function->code.offset(),
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
- WasmCompilationUnit unit(isolate->wasm_engine(), env, native_module,
- function_body,
- function->func_index, isolate->counters(), mode);
- unit.ExecuteCompilation(detected);
- return unit.FinishCompilation(thrower);
+ WasmCompilationUnit unit(isolate->wasm_engine(), function->func_index, tier);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = unit.ExecuteCompilation(
+ &env, native_module->compilation_state()->GetWireBytesStorage(),
+ isolate->counters(), detected);
+ unit.Publish(std::move(result), native_module);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index b821f1f64d..8f235a5d1c 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -13,112 +15,99 @@
namespace v8 {
namespace internal {
+class AssemblerBuffer;
class Counters;
namespace compiler {
+class Pipeline;
class TurbofanWasmCompilationUnit;
} // namespace compiler
namespace wasm {
class LiftoffCompilationUnit;
-struct ModuleWireBytes;
class NativeModule;
class WasmCode;
+class WasmCompilationUnit;
class WasmEngine;
struct WasmFunction;
-enum RuntimeExceptionSupport : bool {
- kRuntimeExceptionSupport = true,
- kNoRuntimeExceptionSupport = false
+class WasmInstructionBuffer final {
+ public:
+ ~WasmInstructionBuffer();
+ std::unique_ptr<AssemblerBuffer> CreateView();
+ std::unique_ptr<uint8_t[]> ReleaseBuffer();
+
+ static std::unique_ptr<WasmInstructionBuffer> New();
+
+ private:
+ WasmInstructionBuffer() = delete;
+ DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
-enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
-
-enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
-
-// The {ModuleEnv} encapsulates the module data that is used during compilation.
-// ModuleEnvs are shareable across multiple compilations.
-struct ModuleEnv {
- // A pointer to the decoded module's static representation.
- const WasmModule* const module;
-
- // True if trap handling should be used in compiled code, rather than
- // compiling in bounds checks for each memory access.
- const UseTrapHandler use_trap_handler;
-
- // If the runtime doesn't support exception propagation,
- // we won't generate stack checks, and trap handling will also
- // be generated differently.
- const RuntimeExceptionSupport runtime_exception_support;
-
- // The smallest size of any memory that could be used with this module, in
- // bytes.
- const uint64_t min_memory_size;
-
- // The largest size of any memory that could be used with this module, in
- // bytes.
- const uint64_t max_memory_size;
-
- const LowerSimd lower_simd;
-
- constexpr ModuleEnv(const WasmModule* module, UseTrapHandler use_trap_handler,
- RuntimeExceptionSupport runtime_exception_support,
- LowerSimd lower_simd = kNoLowerSimd)
- : module(module),
- use_trap_handler(use_trap_handler),
- runtime_exception_support(runtime_exception_support),
- min_memory_size(module ? module->initial_pages * uint64_t{kWasmPageSize}
- : 0),
- max_memory_size(module && module->has_maximum_pages
- ? (module->maximum_pages * uint64_t{kWasmPageSize})
- : kSpecMaxWasmMemoryBytes),
- lower_simd(lower_simd) {}
+struct WasmCompilationResult {
+ public:
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
+
+ explicit WasmCompilationResult(WasmError error) : error(std::move(error)) {}
+
+ bool succeeded() const {
+ DCHECK_EQ(code_desc.buffer != nullptr, error.empty());
+ return error.empty();
+ }
+ operator bool() const { return succeeded(); }
+
+ CodeDesc code_desc;
+ std::unique_ptr<uint8_t[]> instr_buffer;
+ uint32_t frame_slot_count = 0;
+ size_t safepoint_table_offset = 0;
+ size_t handler_table_offset = 0;
+ OwnedVector<byte> source_positions;
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
+
+ WasmError error;
};
class WasmCompilationUnit final {
public:
- static ExecutionTier GetDefaultExecutionTier();
+ static ExecutionTier GetDefaultExecutionTier(const WasmModule*);
// If constructing from a background thread, pass in a Counters*, and ensure
// that the Counters live at least as long as this compilation unit (which
// typically means to hold a std::shared_ptr<Counters>).
// If used exclusively from a foreground thread, Isolate::counters() may be
// used by callers to pass Counters.
- WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, NativeModule*,
- FunctionBody, int index, Counters*,
- ExecutionTier = GetDefaultExecutionTier());
+ WasmCompilationUnit(WasmEngine*, int index, ExecutionTier);
~WasmCompilationUnit();
- void ExecuteCompilation(WasmFeatures* detected);
- WasmCode* FinishCompilation(ErrorThrower* thrower);
+ WasmCompilationResult ExecuteCompilation(
+ CompilationEnv*, const std::shared_ptr<WireBytesStorage>&, Counters*,
+ WasmFeatures* detected);
- static WasmCode* CompileWasmFunction(
- Isolate* isolate, NativeModule* native_module, WasmFeatures* detected,
- ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function,
- ExecutionTier = GetDefaultExecutionTier());
+ WasmCode* Publish(WasmCompilationResult, NativeModule*);
- NativeModule* native_module() const { return native_module_; }
- ExecutionTier mode() const { return mode_; }
+ ExecutionTier tier() const { return tier_; }
+
+ static void CompileWasmFunction(Isolate*, NativeModule*,
+ WasmFeatures* detected, const WasmFunction*,
+ ExecutionTier);
private:
friend class LiftoffCompilationUnit;
friend class compiler::TurbofanWasmCompilationUnit;
- ModuleEnv* env_;
- WasmEngine* wasm_engine_;
- FunctionBody func_body_;
- Counters* counters_;
- int func_index_;
- NativeModule* native_module_;
- ExecutionTier mode_;
- // LiftoffCompilationUnit, set if {mode_ == kLiftoff}.
+ WasmEngine* const wasm_engine_;
+ const int func_index_;
+ ExecutionTier tier_;
+ WasmCode* result_ = nullptr;
+
+ // LiftoffCompilationUnit, set if {tier_ == kLiftoff}.
std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
- // TurbofanWasmCompilationUnit, set if {mode_ == kTurbofan}.
+ // TurbofanWasmCompilationUnit, set if {tier_ == kTurbofan}.
std::unique_ptr<compiler::TurbofanWasmCompilationUnit> turbofan_unit_;
- void SwitchMode(ExecutionTier new_mode);
+ void SwitchTier(ExecutionTier new_tier);
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
new file mode 100644
index 0000000000..ac297662c8
--- /dev/null
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -0,0 +1,879 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/graph-builder-interface.h"
+
+#include "src/compiler/wasm-compiler.h"
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/ostreams.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+// An SsaEnv environment carries the current local variable renaming
+// as well as the current effect and control dependency in the TF graph.
+// It maintains a control state that tracks whether the environment
+// is reachable, has reached a control end, or has been merged.
+struct SsaEnv {
+ enum State { kControlEnd, kUnreachable, kReached, kMerged };
+
+ State state;
+ TFNode* control;
+ TFNode* effect;
+ compiler::WasmInstanceCacheNodes instance_cache;
+ TFNode** locals;
+
+ void Kill(State new_state = kControlEnd) {
+ state = new_state;
+ locals = nullptr;
+ control = nullptr;
+ effect = nullptr;
+ instance_cache = {};
+ }
+ void SetNotMerged() {
+ if (state == kMerged) state = kReached;
+ }
+};
+
+#define BUILD(func, ...) \
+ ([&] { \
+ DCHECK(decoder->ok()); \
+ return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
+ })()
+
+constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
+
+class WasmGraphBuildingInterface {
+ public:
+ static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
+
+ struct Value : public ValueBase {
+ TFNode* node = nullptr;
+
+ template <typename... Args>
+ explicit Value(Args&&... args) V8_NOEXCEPT
+ : ValueBase(std::forward<Args>(args)...) {}
+ };
+
+ struct TryInfo : public ZoneObject {
+ SsaEnv* catch_env;
+ TFNode* exception = nullptr;
+
+ bool might_throw() const { return exception != nullptr; }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TryInfo);
+
+ explicit TryInfo(SsaEnv* c) : catch_env(c) {}
+ };
+
+ struct Control : public ControlBase<Value> {
+ SsaEnv* end_env = nullptr; // end environment for the construct.
+ SsaEnv* false_env = nullptr; // false environment (only for if).
+ TryInfo* try_info = nullptr; // information about try statements.
+ int32_t previous_catch = -1; // previous Control with a catch.
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
+
+ template <typename... Args>
+ explicit Control(Args&&... args) V8_NOEXCEPT
+ : ControlBase(std::forward<Args>(args)...) {}
+ };
+
+ explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder)
+ : builder_(builder) {}
+
+ void StartFunction(FullDecoder* decoder) {
+ SsaEnv* ssa_env =
+ reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
+ uint32_t num_locals = decoder->num_locals();
+ uint32_t env_count = num_locals;
+ size_t size = sizeof(TFNode*) * env_count;
+ ssa_env->state = SsaEnv::kReached;
+ ssa_env->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
+ : nullptr;
+
+ // The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
+ // instance parameter.
+ TFNode* start = builder_->Start(
+ static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
+ ssa_env->effect = start;
+ ssa_env->control = start;
+ // Initialize effect and control before initializing the locals default
+ // values (which might require instance loads) or loading the context.
+ builder_->set_effect_ptr(&ssa_env->effect);
+ builder_->set_control_ptr(&ssa_env->control);
+ // Initialize the instance parameter (index 0).
+ builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
+ // Initialize local variables. Parameters are shifted by 1 because of the
+ // the instance parameter.
+ uint32_t index = 0;
+ for (; index < decoder->sig_->parameter_count(); ++index) {
+ ssa_env->locals[index] = builder_->Param(index + 1);
+ }
+ while (index < num_locals) {
+ ValueType type = decoder->GetLocalType(index);
+ TFNode* node = DefaultValue(type);
+ while (index < num_locals && decoder->GetLocalType(index) == type) {
+ // Do a whole run of like-typed locals at a time.
+ ssa_env->locals[index++] = node;
+ }
+ }
+ LoadContextIntoSsa(ssa_env);
+ SetEnv(ssa_env);
+ }
+
+ // Reload the instance cache entries into the Ssa Environment.
+ void LoadContextIntoSsa(SsaEnv* ssa_env) {
+ if (ssa_env) builder_->InitInstanceCache(&ssa_env->instance_cache);
+ }
+
+ void StartFunctionBody(FullDecoder* decoder, Control* block) {}
+
+ void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
+
+ void OnFirstError(FullDecoder*) {}
+
+ void NextInstruction(FullDecoder*, WasmOpcode) {}
+
+ void Block(FullDecoder* decoder, Control* block) {
+ // The branch environment is the outer environment.
+ block->end_env = ssa_env_;
+ SetEnv(Steal(decoder->zone(), ssa_env_));
+ }
+
+ void Loop(FullDecoder* decoder, Control* block) {
+ SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
+ block->end_env = finish_try_env;
+ // The continue environment is the inner environment.
+ SetEnv(PrepareForLoop(decoder, finish_try_env));
+ ssa_env_->SetNotMerged();
+ if (!decoder->ok()) return;
+ // Wrap input merge into phis.
+ for (uint32_t i = 0; i < block->start_merge.arity; ++i) {
+ Value& val = block->start_merge[i];
+ val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control);
+ }
+ }
+
+ void Try(FullDecoder* decoder, Control* block) {
+ SsaEnv* outer_env = ssa_env_;
+ SsaEnv* catch_env = Split(decoder, outer_env);
+ // Mark catch environment as unreachable, since only accessable
+ // through catch unwinding (i.e. landing pads).
+ catch_env->state = SsaEnv::kUnreachable;
+ SsaEnv* try_env = Steal(decoder->zone(), outer_env);
+ SetEnv(try_env);
+ TryInfo* try_info = new (decoder->zone()) TryInfo(catch_env);
+ block->end_env = outer_env;
+ block->try_info = try_info;
+ block->previous_catch = current_catch_;
+ current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
+ }
+
+ void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
+ TFNode* if_true = nullptr;
+ TFNode* if_false = nullptr;
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
+ SsaEnv* end_env = ssa_env_;
+ SsaEnv* false_env = Split(decoder, ssa_env_);
+ false_env->control = if_false;
+ SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
+ true_env->control = if_true;
+ if_block->end_env = end_env;
+ if_block->false_env = false_env;
+ SetEnv(true_env);
+ }
+
+ void FallThruTo(FullDecoder* decoder, Control* c) {
+ DCHECK(!c->is_loop());
+ MergeValuesInto(decoder, c, &c->end_merge);
+ }
+
+ void PopControl(FullDecoder* decoder, Control* block) {
+ // A loop just continues with the end environment. There is no merge.
+ if (block->is_loop()) return;
+ // Any other block falls through to the parent block.
+ if (block->reachable()) FallThruTo(decoder, block);
+ if (block->is_onearmed_if()) {
+ // Merge the else branch into the end merge.
+ SetEnv(block->false_env);
+ MergeValuesInto(decoder, block, &block->end_merge);
+ }
+ // Now continue with the merged environment.
+ SetEnv(block->end_env);
+ }
+
+ void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
+
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
+ Value* result) {
+ result->node = BUILD(Unop, opcode, value.node, decoder->position());
+ }
+
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
+ const Value& rhs, Value* result) {
+ auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ if (result) result->node = node;
+ }
+
+ void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
+ result->node = builder_->Int32Constant(value);
+ }
+
+ void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
+ result->node = builder_->Int64Constant(value);
+ }
+
+ void F32Const(FullDecoder* decoder, Value* result, float value) {
+ result->node = builder_->Float32Constant(value);
+ }
+
+ void F64Const(FullDecoder* decoder, Value* result, double value) {
+ result->node = builder_->Float64Constant(value);
+ }
+
+ void RefNull(FullDecoder* decoder, Value* result) {
+ result->node = builder_->RefNull();
+ }
+
+ void Drop(FullDecoder* decoder, const Value& value) {}
+
+ void DoReturn(FullDecoder* decoder, Vector<Value> values) {
+ TFNode** nodes = GetNodes(values);
+ BUILD(Return, static_cast<uint32_t>(values.size()), nodes);
+ }
+
+ void GetLocal(FullDecoder* decoder, Value* result,
+ const LocalIndexImmediate<validate>& imm) {
+ if (!ssa_env_->locals) return; // unreachable
+ result->node = ssa_env_->locals[imm.index];
+ }
+
+ void SetLocal(FullDecoder* decoder, const Value& value,
+ const LocalIndexImmediate<validate>& imm) {
+ if (!ssa_env_->locals) return; // unreachable
+ ssa_env_->locals[imm.index] = value.node;
+ }
+
+ void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
+ const LocalIndexImmediate<validate>& imm) {
+ result->node = value.node;
+ if (!ssa_env_->locals) return; // unreachable
+ ssa_env_->locals[imm.index] = value.node;
+ }
+
+ void GetGlobal(FullDecoder* decoder, Value* result,
+ const GlobalIndexImmediate<validate>& imm) {
+ result->node = BUILD(GetGlobal, imm.index);
+ }
+
+ void SetGlobal(FullDecoder* decoder, const Value& value,
+ const GlobalIndexImmediate<validate>& imm) {
+ BUILD(SetGlobal, imm.index, value.node);
+ }
+
+ void Unreachable(FullDecoder* decoder) {
+ BUILD(Unreachable, decoder->position());
+ }
+
+ void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
+ const Value& tval, Value* result) {
+ TFNode* controls[2];
+ BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
+ TFNode* merge = BUILD(Merge, 2, controls);
+ TFNode* vals[2] = {tval.node, fval.node};
+ TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge);
+ result->node = phi;
+ ssa_env_->control = merge;
+ }
+
+ void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ if (depth == decoder->control_depth() - 1) {
+ uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
+ TFNode** values =
+ ret_count == 0 ? nullptr
+ : GetNodes(decoder->stack_value(ret_count), ret_count);
+ BUILD(Return, ret_count, values);
+ } else {
+ Br(decoder, decoder->control_at(depth));
+ }
+ }
+
+ void Br(FullDecoder* decoder, Control* target) {
+ MergeValuesInto(decoder, target, target->br_merge());
+ }
+
+ void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
+ SsaEnv* fenv = ssa_env_;
+ SsaEnv* tenv = Split(decoder, fenv);
+ fenv->SetNotMerged();
+ BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
+ SetEnv(tenv);
+ BrOrRet(decoder, depth);
+ SetEnv(fenv);
+ }
+
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
+ const Value& key) {
+ if (imm.table_count == 0) {
+ // Only a default target. Do the equivalent of br.
+ uint32_t target = BranchTableIterator<validate>(decoder, imm).next();
+ BrOrRet(decoder, target);
+ return;
+ }
+
+ SsaEnv* branch_env = ssa_env_;
+ // Build branches to the various blocks based on the table.
+ TFNode* sw = BUILD(Switch, imm.table_count + 1, key.node);
+
+ SsaEnv* copy = Steal(decoder->zone(), branch_env);
+ SetEnv(copy);
+ BranchTableIterator<validate> iterator(decoder, imm);
+ while (iterator.has_next()) {
+ uint32_t i = iterator.cur_index();
+ uint32_t target = iterator.next();
+ SetEnv(Split(decoder, copy));
+ ssa_env_->control =
+ (i == imm.table_count) ? BUILD(IfDefault, sw) : BUILD(IfValue, i, sw);
+ BrOrRet(decoder, target);
+ }
+ DCHECK(decoder->ok());
+ SetEnv(branch_env);
+ }
+
+ void Else(FullDecoder* decoder, Control* if_block) {
+ if (if_block->reachable()) {
+ // Merge the if branch into the end merge.
+ MergeValuesInto(decoder, if_block, &if_block->end_merge);
+ }
+ SetEnv(if_block->false_env);
+ }
+
+ void LoadMem(FullDecoder* decoder, LoadType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ Value* result) {
+ result->node =
+ BUILD(LoadMem, type.value_type(), type.mem_type(), index.node,
+ imm.offset, imm.alignment, decoder->position());
+ }
+
+ void StoreMem(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value) {
+ BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, decoder->position(), type.value_type());
+ }
+
+ void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
+ result->node = BUILD(CurrentMemoryPages);
+ }
+
+ void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result) {
+ result->node = BUILD(MemoryGrow, value.node);
+ // Always reload the instance cache after growing memory.
+ LoadContextIntoSsa(ssa_env_);
+ }
+
+ void CallDirect(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
+ const Value args[], Value returns[]) {
+ DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
+ }
+
+ void CallIndirect(FullDecoder* decoder, const Value& index,
+ const CallIndirectImmediate<validate>& imm,
+ const Value args[], Value returns[]) {
+ DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
+ }
+
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ Value* result) {
+ TFNode** inputs = GetNodes(args);
+ TFNode* node = BUILD(SimdOp, opcode, inputs);
+ if (result) result->node = node;
+ }
+
+ void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
+ const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
+ Value* result) {
+ TFNode** nodes = GetNodes(inputs);
+ result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes);
+ }
+
+ void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
+ const SimdShiftImmediate<validate> imm, const Value& input,
+ Value* result) {
+ TFNode* inputs[] = {input.node};
+ result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs);
+ }
+
+ void Simd8x16ShuffleOp(FullDecoder* decoder,
+ const Simd8x16ShuffleImmediate<validate>& imm,
+ const Value& input0, const Value& input1,
+ Value* result) {
+ TFNode* input_nodes[] = {input0.node, input1.node};
+ result->node = BUILD(Simd8x16ShuffleOp, imm.shuffle, input_nodes);
+ }
+
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ const Vector<Value>& value_args) {
+ int count = value_args.length();
+ ZoneVector<TFNode*> args(count, decoder->zone());
+ for (int i = 0; i < count; ++i) {
+ args[i] = value_args[i].node;
+ }
+ BUILD(Throw, imm.index, imm.exception, VectorOf(args));
+ builder_->TerminateThrow(ssa_env_->effect, ssa_env_->control);
+ }
+
+ void Rethrow(FullDecoder* decoder, const Value& exception) {
+ BUILD(Rethrow, exception.node);
+ builder_->TerminateThrow(ssa_env_->effect, ssa_env_->control);
+ }
+
+ void BrOnException(FullDecoder* decoder, const Value& exception,
+ const ExceptionIndexImmediate<validate>& imm,
+ uint32_t depth, Vector<Value> values) {
+ TFNode* if_match = nullptr;
+ TFNode* if_no_match = nullptr;
+
+ // Get the exception tag and see if it matches the expected one.
+ TFNode* caught_tag = BUILD(GetExceptionTag, exception.node);
+ TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
+ TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
+ BUILD(BranchNoHint, compare, &if_match, &if_no_match);
+ SsaEnv* if_no_match_env = Split(decoder, ssa_env_);
+ SsaEnv* if_match_env = Steal(decoder->zone(), ssa_env_);
+ if_no_match_env->control = if_no_match;
+ if_match_env->control = if_match;
+
+ // If the tags match we extract the values from the exception object and
+ // push them onto the operand stack using the passed {values} vector.
+ SetEnv(if_match_env);
+ // TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns
+ // TFNode** rather than TFNode*. Fix to add landing pads.
+ TFNode** caught_values =
+ builder_->GetExceptionValues(exception.node, imm.exception);
+ for (size_t i = 0, e = values.size(); i < e; ++i) {
+ values[i].node = caught_values[i];
+ }
+ BrOrRet(decoder, depth);
+
+ // If the tags don't match we fall-through here.
+ SetEnv(if_no_match_env);
+ }
+
+ void Catch(FullDecoder* decoder, Control* block, Value* exception) {
+ DCHECK(block->is_try_catch());
+
+ current_catch_ = block->previous_catch; // Pop try scope.
+
+ // The catch block is unreachable if no possible throws in the try block
+ // exist. We only build a landing pad if some node in the try block can
+ // (possibly) throw. Otherwise the catch environments remain empty.
+ if (!block->try_info->might_throw()) {
+ block->reachability = kSpecOnlyReachable;
+ return;
+ }
+
+ SetEnv(block->try_info->catch_env);
+ DCHECK_NOT_NULL(block->try_info->exception);
+ exception->node = block->try_info->exception;
+ }
+
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ const MemoryAccessImmediate<validate>& imm, Value* result) {
+ TFNode** inputs = GetNodes(args);
+ TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset,
+ decoder->position());
+ if (result) result->node = node;
+ }
+
+ void MemoryInit(FullDecoder* decoder,
+ const MemoryInitImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ BUILD(MemoryInit, imm.data_segment_index, dst.node, src.node, size.node,
+ decoder->position());
+ }
+ void MemoryDrop(FullDecoder* decoder,
+ const MemoryDropImmediate<validate>& imm) {
+ BUILD(MemoryDrop, imm.index, decoder->position());
+ }
+ void MemoryCopy(FullDecoder* decoder,
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ BUILD(MemoryCopy, dst.node, src.node, size.node, decoder->position());
+ }
+ void MemoryFill(FullDecoder* decoder,
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& value, const Value& size) {
+ BUILD(MemoryFill, dst.node, value.node, size.node, decoder->position());
+ }
+ void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
+ Vector<Value> args) {
+ BUILD(TableInit, imm.table.index, imm.elem_segment_index, args[0].node,
+ args[1].node, args[2].node, decoder->position());
+ }
+ void TableDrop(FullDecoder* decoder,
+ const TableDropImmediate<validate>& imm) {
+ BUILD(TableDrop, imm.index, decoder->position());
+ }
+ void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Vector<Value> args) {
+ BUILD(TableCopy, imm.index, args[0].node, args[1].node, args[2].node,
+ decoder->position());
+ }
+
+ private:
+ SsaEnv* ssa_env_;
+ compiler::WasmGraphBuilder* builder_;
+ uint32_t current_catch_ = kNullCatch;
+
+ TryInfo* current_try_info(FullDecoder* decoder) {
+ return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
+ ->try_info;
+ }
+
+ TFNode** GetNodes(Value* values, size_t count) {
+ TFNode** nodes = builder_->Buffer(count);
+ for (size_t i = 0; i < count; ++i) {
+ nodes[i] = values[i].node;
+ }
+ return nodes;
+ }
+
+ TFNode** GetNodes(Vector<Value> values) {
+ return GetNodes(values.start(), values.size());
+ }
+
+ void SetEnv(SsaEnv* env) {
+#if DEBUG
+ if (FLAG_trace_wasm_decoder) {
+ char state = 'X';
+ if (env) {
+ switch (env->state) {
+ case SsaEnv::kReached:
+ state = 'R';
+ break;
+ case SsaEnv::kUnreachable:
+ state = 'U';
+ break;
+ case SsaEnv::kMerged:
+ state = 'M';
+ break;
+ case SsaEnv::kControlEnd:
+ state = 'E';
+ break;
+ }
+ }
+ PrintF("{set_env = %p, state = %c", static_cast<void*>(env), state);
+ if (env && env->control) {
+ PrintF(", control = ");
+ compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ }
+ PrintF("}\n");
+ }
+#endif
+ ssa_env_ = env;
+ // TODO(wasm): combine the control and effect pointers with instance cache.
+ builder_->set_control_ptr(&env->control);
+ builder_->set_effect_ptr(&env->effect);
+ builder_->set_instance_cache(&env->instance_cache);
+ }
+
+ TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
+ if (node == nullptr) return nullptr;
+
+ const bool inside_try_scope = current_catch_ != kNullCatch;
+
+ if (!inside_try_scope) return node;
+
+ TFNode* if_success = nullptr;
+ TFNode* if_exception = nullptr;
+ if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
+ return node;
+ }
+
+ SsaEnv* success_env = Steal(decoder->zone(), ssa_env_);
+ success_env->control = if_success;
+
+ SsaEnv* exception_env = Split(decoder, success_env);
+ exception_env->control = if_exception;
+ TryInfo* try_info = current_try_info(decoder);
+ Goto(decoder, exception_env, try_info->catch_env);
+ TFNode* exception = try_info->exception;
+ if (exception == nullptr) {
+ DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
+ try_info->exception = if_exception;
+ } else {
+ DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
+ try_info->exception = builder_->CreateOrMergeIntoPhi(
+ MachineRepresentation::kWord32, try_info->catch_env->control,
+ try_info->exception, if_exception);
+ }
+
+ SetEnv(success_env);
+ return node;
+ }
+
+ TFNode* DefaultValue(ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ return builder_->Int32Constant(0);
+ case kWasmI64:
+ return builder_->Int64Constant(0);
+ case kWasmF32:
+ return builder_->Float32Constant(0);
+ case kWasmF64:
+ return builder_->Float64Constant(0);
+ case kWasmS128:
+ return builder_->S128Zero();
+ case kWasmAnyRef:
+ case kWasmExceptRef:
+ return builder_->RefNull();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
+ DCHECK(merge == &c->start_merge || merge == &c->end_merge);
+
+ SsaEnv* target = c->end_env;
+ const bool first = target->state == SsaEnv::kUnreachable;
+ Goto(decoder, ssa_env_, target);
+
+ if (merge->arity == 0) return;
+
+ uint32_t avail =
+ decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ DCHECK_GE(avail, merge->arity);
+ uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
+ Value* stack_values = decoder->stack_value(merge->arity);
+ for (uint32_t i = start; i < merge->arity; ++i) {
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
+ DCHECK_NOT_NULL(val.node);
+ DCHECK(val.type == old.type || val.type == kWasmVar);
+ old.node = first ? val.node
+ : builder_->CreateOrMergeIntoPhi(
+ ValueTypes::MachineRepresentationFor(old.type),
+ target->control, old.node, val.node);
+ }
+ }
+
+ void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
+ DCHECK_NOT_NULL(to);
+ switch (to->state) {
+ case SsaEnv::kUnreachable: { // Overwrite destination.
+ to->state = SsaEnv::kReached;
+ to->locals = from->locals;
+ to->control = from->control;
+ to->effect = from->effect;
+ to->instance_cache = from->instance_cache;
+ break;
+ }
+ case SsaEnv::kReached: { // Create a new merge.
+ to->state = SsaEnv::kMerged;
+ // Merge control.
+ TFNode* controls[] = {to->control, from->control};
+ TFNode* merge = builder_->Merge(2, controls);
+ to->control = merge;
+ // Merge effects.
+ if (from->effect != to->effect) {
+ TFNode* effects[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, effects, merge);
+ }
+ // Merge SSA values.
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ TFNode* a = to->locals[i];
+ TFNode* b = from->locals[i];
+ if (a != b) {
+ TFNode* vals[] = {a, b};
+ to->locals[i] =
+ builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
+ }
+ }
+ // Start a new merge from the instance cache.
+ builder_->NewInstanceCacheMerge(&to->instance_cache,
+ &from->instance_cache, merge);
+ break;
+ }
+ case SsaEnv::kMerged: {
+ TFNode* merge = to->control;
+ // Extend the existing merge control node.
+ builder_->AppendToMerge(merge, from->control);
+ // Merge effects.
+ to->effect = builder_->CreateOrMergeIntoEffectPhi(merge, to->effect,
+ from->effect);
+ // Merge locals.
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ to->locals[i] = builder_->CreateOrMergeIntoPhi(
+ ValueTypes::MachineRepresentationFor(decoder->GetLocalType(i)),
+ merge, to->locals[i], from->locals[i]);
+ }
+ // Merge the instance caches.
+ builder_->MergeInstanceCacheInto(&to->instance_cache,
+ &from->instance_cache, merge);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return from->Kill();
+ }
+
+ SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
+ env->state = SsaEnv::kMerged;
+
+ env->control = builder_->Loop(env->control);
+ env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ builder_->TerminateLoop(env->effect, env->control);
+ // The '+ 1' here is to be able to set the instance cache as assigned.
+ BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
+ decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone());
+ if (decoder->failed()) return env;
+ if (assigned != nullptr) {
+ // Only introduce phis for variables assigned in this loop.
+ int instance_cache_index = decoder->total_locals();
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
+ &env->locals[i], env->control);
+ }
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&env->instance_cache,
+ env->control);
+ }
+
+ SsaEnv* loop_body_env = Split(decoder, env);
+ builder_->StackCheck(decoder->position(), &(loop_body_env->effect),
+ &(loop_body_env->control));
+ return loop_body_env;
+ }
+
+ // Conservatively introduce phis for all local variables.
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
+ &env->locals[i], env->control);
+ }
+
+ // Conservatively introduce phis for instance cache.
+ builder_->PrepareInstanceCacheForLoop(&env->instance_cache, env->control);
+
+ SsaEnv* loop_body_env = Split(decoder, env);
+ builder_->StackCheck(decoder->position(), &loop_body_env->effect,
+ &loop_body_env->control);
+ return loop_body_env;
+ }
+
+ // Create a complete copy of {from}.
+ SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ SsaEnv* result =
+ reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * decoder->num_locals();
+ result->control = from->control;
+ result->effect = from->effect;
+
+ result->state = SsaEnv::kReached;
+ result->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
+ : nullptr;
+ memcpy(result->locals, from->locals, size);
+ result->instance_cache = from->instance_cache;
+
+ return result;
+ }
+
+ // Create a copy of {from} that steals its state and leaves {from}
+ // unreachable.
+ SsaEnv* Steal(Zone* zone, SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kReached;
+ result->locals = from->locals;
+ result->control = from->control;
+ result->effect = from->effect;
+ result->instance_cache = from->instance_cache;
+ from->Kill(SsaEnv::kUnreachable);
+ return result;
+ }
+
+ // Create an unreachable environment.
+ SsaEnv* UnreachableEnv(Zone* zone) {
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kUnreachable;
+ result->control = nullptr;
+ result->effect = nullptr;
+ result->locals = nullptr;
+ result->instance_cache = {};
+ return result;
+ }
+
+ void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig,
+ uint32_t index, const Value args[], Value returns[]) {
+ int param_count = static_cast<int>(sig->parameter_count());
+ TFNode** arg_nodes = builder_->Buffer(param_count + 1);
+ TFNode** return_nodes = nullptr;
+ arg_nodes[0] = index_node;
+ for (int i = 0; i < param_count; ++i) {
+ arg_nodes[i + 1] = args[i].node;
+ }
+ if (index_node) {
+ BUILD(CallIndirect, index, arg_nodes, &return_nodes, decoder->position());
+ } else {
+ BUILD(CallDirect, index, arg_nodes, &return_nodes, decoder->position());
+ }
+ int return_count = static_cast<int>(sig->return_count());
+ for (int i = 0; i < return_count; ++i) {
+ returns[i].node = return_nodes[i];
+ }
+ // The invoked function could have used grow_memory, so we need to
+ // reload mem_size and mem_start.
+ LoadContextIntoSsa(ssa_env_);
+ }
+};
+
+} // namespace
+
+DecodeResult BuildTFGraph(AccountingAllocator* allocator,
+ const WasmFeatures& enabled,
+ const wasm::WasmModule* module,
+ compiler::WasmGraphBuilder* builder,
+ WasmFeatures* detected, const FunctionBody& body,
+ compiler::NodeOriginTable* node_origins) {
+ Zone zone(allocator, ZONE_NAME);
+ WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
+ &zone, module, enabled, detected, body, builder);
+ if (node_origins) {
+ builder->AddBytecodePositionDecorator(node_origins, &decoder);
+ }
+ decoder.Decode();
+ if (node_origins) {
+ builder->RemoveBytecodePositionDecorator();
+ }
+ return decoder.toResult(nullptr);
+}
+
+#undef BUILD
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
new file mode 100644
index 0000000000..53885fef38
--- /dev/null
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_GRAPH_BUILDER_INTERFACE_H_
+#define V8_WASM_GRAPH_BUILDER_INTERFACE_H_
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler { // external declarations from compiler.
+class NodeOriginTable;
+class WasmGraphBuilder;
+} // namespace compiler
+
+namespace wasm {
+
+struct FunctionBody;
+struct WasmModule;
+struct WasmFeatures;
+
+DecodeResult BuildTFGraph(AccountingAllocator* allocator,
+ const WasmFeatures& enabled, const WasmModule* module,
+ compiler::WasmGraphBuilder* builder,
+ WasmFeatures* detected, const FunctionBody& body,
+ compiler::NodeOriginTable* node_origins);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_GRAPH_BUILDER_INTERFACE_H_
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h
new file mode 100644
index 0000000000..c50183d33e
--- /dev/null
+++ b/deps/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+
+#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class JSToWasmWrapperCache {
+ public:
+ Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import) {
+ std::pair<bool, FunctionSig> key(is_import, *sig);
+ Handle<Code>& cached = cache_[key];
+ if (cached.is_null()) {
+ cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
+ .ToHandleChecked();
+ }
+ return cached;
+ }
+
+ private:
+ // We generate different code for calling imports than calling wasm functions
+ // in this module. Both are cached separately.
+ using CacheKey = std::pair<bool, FunctionSig>;
+ std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 029044c005..462e3a4b5a 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -53,7 +53,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_IA32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- mov(edi, func_index); // 5 bytes
+ mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
@@ -69,10 +69,10 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- // Load function index to r4.
+ // Load function index to a register.
// This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
// constant] on ARMv6.
- Move32BitImmediate(r4, Operand(func_index));
+ Move32BitImmediate(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
// constant].
// In total, this is <=5 instructions on all architectures.
@@ -99,7 +99,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- Mov(w8, func_index); // max. 2 instr
+ Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
}
@@ -122,7 +122,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to r7. 6 bytes
- lgfi(r7, Operand(func_index));
+ lgfi(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. 6 bytes or 12 bytes
mov(r1, Operand(lazy_compile_target));
b(r1); // 2 bytes
@@ -144,7 +144,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- li(t0, func_index); // max. 2 instr
+ li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform.
Jump(lazy_compile_target, RelocInfo::NONE);
@@ -165,8 +165,8 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_PPC
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- // Load function index to r8. max 5 instrs
- mov(r15, Operand(func_index));
+ // Load function index to register. max 5 instrs
+ mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. max 5 instrs
mov(r0, Operand(lazy_compile_target));
mtctr(r0);
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index a83a7d5b21..68fe596660 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -84,8 +84,9 @@ class JumpTableAssembler : public TurboAssembler {
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(nullptr, JumpTableAssemblerOptions(),
- reinterpret_cast<void*>(slot_addr), size,
- CodeObjectRequired::kNo) {}
+ CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(
+ reinterpret_cast<uint8_t*>(slot_addr), size)) {}
// To allow concurrent patching of the jump table entries, we need to ensure
// that the instruction containing the call target does not cross cache-line
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 61ec6bc32a..bdceb0b73b 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
+#include "src/base/enum-set.h"
#include "src/base/optional.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
@@ -13,22 +14,21 @@
#include "src/counters.h"
#include "src/identity-map.h"
#include "src/property-descriptor.h"
+#include "src/task-utils.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
- } while (false)
+#include "src/wasm/wasm-serialization.h"
#define TRACE_COMPILE(...) \
do { \
@@ -49,32 +49,32 @@ namespace v8 {
namespace internal {
namespace wasm {
-enum class CompilationEvent : uint8_t {
- kFinishedBaselineCompilation,
- kFinishedTopTierCompilation,
- kFailedCompilation
-};
+namespace {
enum class CompileMode : uint8_t { kRegular, kTiering };
-// The CompilationState keeps track of the compilation state of the
+// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
// compilation of functions.
-class CompilationState {
+// It's public interface {CompilationState} lives in compilation-environment.h.
+class CompilationStateImpl {
public:
- CompilationState(internal::Isolate*, const ModuleEnv&);
- ~CompilationState();
+ CompilationStateImpl(internal::Isolate*, NativeModule*);
+ ~CompilationStateImpl();
+
+ // Cancel all background compilation and wait for all tasks to finish. Call
+ // this before destructing this object.
+ void CancelAndWait();
// Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background
// compilation.
void SetNumberOfFunctionsToCompile(size_t num_functions);
- // Set the callback function to be called on compilation events. Needs to be
+ // Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run.
- void SetCallback(
- std::function<void(CompilationEvent, ErrorThrower*)> callback);
+ void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
@@ -85,13 +85,12 @@ class CompilationState {
bool HasCompilationUnitToFinish();
- void OnError(ErrorThrower* thrower);
- void OnFinishedUnit();
- void ScheduleUnitForFinishing(std::unique_ptr<WasmCompilationUnit> unit,
- ExecutionTier mode);
+ void OnFinishedUnit(ExecutionTier, WasmCode*);
+ void ReportDetectedFeatures(const WasmFeatures& detected);
void OnBackgroundTaskStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
+ void RestartBackgroundCompileTask();
void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max());
// Only one foreground thread (finisher) is allowed to run at a time.
// {SetFinisherIsRunning} returns whether the flag changed its state.
@@ -100,41 +99,137 @@ class CompilationState {
void Abort();
+ void SetError(uint32_t func_index, const WasmError& error);
+
Isolate* isolate() const { return isolate_; }
bool failed() const {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return failed_;
+ return compile_error_.load(std::memory_order_relaxed) != nullptr;
}
bool baseline_compilation_finished() const {
- return baseline_compilation_finished_;
+ base::MutexGuard guard(&mutex_);
+ return outstanding_baseline_units_ == 0 ||
+ (compile_mode_ == CompileMode::kTiering &&
+ outstanding_tiering_units_ == 0);
}
- WasmEngine* wasm_engine() const { return wasm_engine_; }
CompileMode compile_mode() const { return compile_mode_; }
- ModuleEnv* module_env() { return &module_env_; }
WasmFeatures* detected_features() { return &detected_features_; }
+ // Call {GetCompileError} from foreground threads only, since we access
+ // NativeModule::wire_bytes, which is set from the foreground thread once the
+ // stream has finished.
+ WasmError GetCompileError() {
+ CompilationError* error = compile_error_.load(std::memory_order_acquire);
+ DCHECK_NOT_NULL(error);
+ std::ostringstream error_msg;
+ error_msg << "Compiling wasm function \"";
+ wasm::ModuleWireBytes wire_bytes(native_module_->wire_bytes());
+ wasm::WireBytesRef name_ref = native_module_->module()->LookupFunctionName(
+ wire_bytes, error->func_index);
+ if (name_ref.is_set()) {
+ wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ error_msg.write(name.start(), name.length());
+ } else {
+ error_msg << "wasm-function[" << error->func_index << "]";
+ }
+ error_msg << "\" failed: " << error->error.message();
+ return WasmError{error->error.offset(), error_msg.str()};
+ }
+
+ void SetWireBytesStorage(
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
+ base::MutexGuard guard(&mutex_);
+ wire_bytes_storage_ = wire_bytes_storage;
+ }
+
+ std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_NOT_NULL(wire_bytes_storage_);
+ return wire_bytes_storage_;
+ }
+
private:
- void NotifyOnEvent(CompilationEvent event, ErrorThrower* thrower);
+ struct CompilationError {
+ uint32_t const func_index;
+ WasmError const error;
+ CompilationError(uint32_t func_index, WasmError error)
+ : func_index(func_index), error(std::move(error)) {}
+ };
+
+ class LogCodesTask : public CancelableTask {
+ public:
+ LogCodesTask(CancelableTaskManager* manager,
+ CompilationStateImpl* compilation_state, Isolate* isolate)
+ : CancelableTask(manager),
+ compilation_state_(compilation_state),
+ isolate_(isolate) {
+ // This task should only be created if we should actually log code.
+ DCHECK(WasmCode::ShouldBeLogged(isolate));
+ }
+
+ // Hold the compilation state {mutex_} when calling this method.
+ void AddCode(WasmCode* code) { code_to_log_.push_back(code); }
+
+ void RunInternal() override {
+ // Remove this task from the {CompilationStateImpl}. The next compilation
+ // that finishes will allocate and schedule a new task.
+ {
+ base::MutexGuard guard(&compilation_state_->mutex_);
+ DCHECK_EQ(this, compilation_state_->log_codes_task_);
+ compilation_state_->log_codes_task_ = nullptr;
+ }
+ // If by now we shouldn't log code any more, don't log it.
+ if (!WasmCode::ShouldBeLogged(isolate_)) return;
+ for (WasmCode* code : code_to_log_) {
+ code->LogCode(isolate_);
+ }
+ }
+
+ private:
+ CompilationStateImpl* const compilation_state_;
+ Isolate* const isolate_;
+ std::vector<WasmCode*> code_to_log_;
+ };
+
+ class FreeCallbacksTask : public CancelableTask {
+ public:
+ explicit FreeCallbacksTask(CompilationStateImpl* comp_state)
+ : CancelableTask(&comp_state->foreground_task_manager_),
+ compilation_state_(comp_state) {}
+
+ void RunInternal() override { compilation_state_->callbacks_.clear(); }
+
+ private:
+ CompilationStateImpl* const compilation_state_;
+ };
+
+ void NotifyOnEvent(CompilationEvent event, const WasmError* error);
std::vector<std::unique_ptr<WasmCompilationUnit>>& finish_units() {
- return baseline_compilation_finished_ ? tiering_finish_units_
- : baseline_finish_units_;
+ return baseline_compilation_finished() ? tiering_finish_units_
+ : baseline_finish_units_;
}
- // TODO(7423): Get rid of the Isolate field to make sure the CompilationState
- // can be shared across multiple Isolates.
+ // TODO(mstarzinger): Get rid of the Isolate field to make sure the
+ // {CompilationStateImpl} can be shared across multiple Isolates.
Isolate* const isolate_;
- WasmEngine* const wasm_engine_;
- // TODO(clemensh): Remove ModuleEnv, generate it when needed.
- ModuleEnv module_env_;
+ NativeModule* const native_module_;
const CompileMode compile_mode_;
- bool baseline_compilation_finished_ = false;
-
- // This mutex protects all information of this CompilationState which is being
- // accessed concurrently.
+ // Store the value of {WasmCode::ShouldBeLogged()} at creation time of the
+ // compilation state.
+ // TODO(wasm): We might lose log events if logging is enabled while
+ // compilation is running.
+ bool const should_log_code_;
+
+ // Compilation error, atomically updated, but at most once (nullptr -> error).
+ // Uses acquire-release semantics (acquire on load, release on update).
+ // For checking whether an error is set, relaxed semantics can be used.
+ std::atomic<CompilationError*> compile_error_{nullptr};
+
+ // This mutex protects all information of this {CompilationStateImpl} which is
+ // being accessed concurrently.
mutable base::Mutex mutex_;
//////////////////////////////////////////////////////////////////////////////
@@ -144,7 +239,6 @@ class CompilationState {
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
bool finisher_is_running_ = false;
- bool failed_ = false;
size_t num_background_tasks_ = 0;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_;
@@ -154,190 +248,90 @@ class CompilationState {
// as a module is being compiled.
WasmFeatures detected_features_ = kNoWasmFeatures;
+ // The foreground task to log finished wasm code. Is {nullptr} if no such task
+ // is currently scheduled.
+ LogCodesTask* log_codes_task_ = nullptr;
+
+ // Abstraction over the storage of the wire bytes. Held in a shared_ptr so
+ // that background compilation jobs can keep the storage alive while
+ // compiling.
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
+
+ size_t outstanding_baseline_units_ = 0;
+ size_t outstanding_tiering_units_ = 0;
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
- // Callback function to be called on compilation events.
- std::function<void(CompilationEvent, ErrorThrower*)> callback_;
+ // Callback functions to be called on compilation events. Only accessible from
+ // the foreground thread.
+ std::vector<CompilationState::callback_t> callbacks_;
+
+ // Remember whether {Abort()} was called. When set from the foreground this
+ // ensures no more callbacks will be called afterwards. No guarantees when set
+ // from the background. Only needs to be atomic so that it can be set from
+ // foreground and background.
+ std::atomic<bool> aborted_{false};
CancelableTaskManager background_task_manager_;
CancelableTaskManager foreground_task_manager_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
const size_t max_background_tasks_ = 0;
-
- size_t outstanding_units_ = 0;
- size_t num_tiering_units_ = 0;
};
-namespace {
-
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
if (detected.threads) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
}
}
-class JSToWasmWrapperCache {
- public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
- bool is_import) {
- std::pair<bool, FunctionSig> key(is_import, *sig);
- Handle<Code>& cached = cache_[key];
- if (cached.is_null()) {
- cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
- .ToHandleChecked();
- }
- return cached;
- }
-
- private:
- // We generate different code for calling imports than calling wasm functions
- // in this module. Both are cached separately.
- using CacheKey = std::pair<bool, FunctionSig>;
- std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
-};
-
-// A helper class to simplify instantiating a module from a module object.
-// It closes over the {Isolate}, the {ErrorThrower}, etc.
-class InstanceBuilder {
- public:
- InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory);
-
- // Build an instance, in all of its glory.
- MaybeHandle<WasmInstanceObject> Build();
- // Run the start function, if any.
- bool ExecuteStartFunction();
-
- private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- size_t table_size;
- };
-
- // A pre-evaluated value to use in import binding.
- struct SanitizedImport {
- Handle<String> module_name;
- Handle<String> import_name;
- Handle<Object> value;
- };
-
- Isolate* isolate_;
- const WasmFeatures enabled_;
- const WasmModule* const module_;
- ErrorThrower* thrower_;
- Handle<WasmModuleObject> module_object_;
- MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
- Handle<JSArrayBuffer> globals_;
- std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
- std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
- Handle<WasmExportedFunction> start_function_;
- JSToWasmWrapperCache js_to_wasm_cache_;
- std::vector<SanitizedImport> sanitized_imports_;
-
- UseTrapHandler use_trap_handler() const {
- return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
- : kNoTrapHandler;
- }
-
-// Helper routines to print out errors with imports.
-#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
- void Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name, Handle<String> import_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
- index, module_name->ToCString().get(), \
- import_name->ToCString().get(), error); \
- } \
- \
- MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
- module_name->ToCString().get(), error); \
- return MaybeHandle<Object>(); \
- }
-
- ERROR_THROWER_WITH_MESSAGE(LinkError)
- ERROR_THROWER_WITH_MESSAGE(TypeError)
-
-#undef ERROR_THROWER_WITH_MESSAGE
-
- // Look up an import value in the {ffi_} object.
- MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- Handle<String> import_name);
-
- // Look up an import value in the {ffi_} object specifically for linking an
- // asm.js module. This only performs non-observable lookups, which allows
- // falling back to JavaScript proper (and hence re-executing all lookups) if
- // module instantiation fails.
- MaybeHandle<Object> LookupImportAsm(uint32_t index,
- Handle<String> import_name);
-
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
-
- // Load data segments into the memory.
- void LoadDataSegments(Handle<WasmInstanceObject> instance);
-
- void WriteGlobalValue(const WasmGlobal& global, double value);
- void WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value);
-
- void SanitizeImports();
-
- // Find the imported memory buffer if there is one. This is used to see if we
- // need to recompile with bounds checks before creating the instance.
- MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
-
- // Process the imports, including functions, tables, globals, and memory, in
- // order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
- int ProcessImports(Handle<WasmInstanceObject> instance);
+CompilationStateImpl* Impl(CompilationState* compilation_state) {
+ return reinterpret_cast<CompilationStateImpl*>(compilation_state);
+}
+const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
+ return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
+}
- template <typename T>
- T* GetRawGlobalPtr(const WasmGlobal& global);
+} // namespace
- // Process initialization of globals.
- void InitGlobals();
+//////////////////////////////////////////////////////
+// PIMPL implementation of {CompilationState}.
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
- bool NeedsWrappers() const;
+void CompilationState::CancelAndWait() { Impl(this)->CancelAndWait(); }
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<WasmInstanceObject> instance);
+void CompilationState::SetError(uint32_t func_index, const WasmError& error) {
+ Impl(this)->SetError(func_index, error);
+}
- void InitializeTables(Handle<WasmInstanceObject> instance);
+void CompilationState::SetWireBytesStorage(
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
+ Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage));
+}
- void LoadTableSegments(Handle<WasmInstanceObject> instance);
+std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage()
+ const {
+ return Impl(this)->GetWireBytesStorage();
+}
- // Creates new exception tags for all exceptions. Note that some tags might
- // already exist if they were imported, those tags will be re-used.
- void InitializeExceptions(Handle<WasmInstanceObject> instance);
-};
+void CompilationState::AddCallback(CompilationState::callback_t callback) {
+ return Impl(this)->AddCallback(std::move(callback));
+}
-} // namespace
+bool CompilationState::failed() const { return Impl(this)->failed(); }
-MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
- auto instance = builder.Build();
- if (!instance.is_null() && builder.ExecuteStartFunction()) {
- return instance;
- }
- DCHECK(isolate->has_pending_exception() || thrower->error());
- return {};
+// static
+std::unique_ptr<CompilationState> CompilationState::New(
+ Isolate* isolate, NativeModule* native_module) {
+ return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
+ new CompilationStateImpl(isolate, native_module)));
}
+// End of PIMPL implementation of {CompilationState}.
+//////////////////////////////////////////////////////
+
WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
int func_index) {
base::ElapsedTimer compilation_timer;
@@ -345,32 +339,33 @@ WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
compilation_timer.Start();
- ModuleEnv* module_env = native_module->compilation_state()->module_env();
-
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
- const WasmFunction* func = &module_env->module->functions[func_index];
- FunctionBody body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
-
- ErrorThrower thrower(isolate, "WasmLazyCompile");
- WasmCompilationUnit unit(isolate->wasm_engine(), module_env, native_module,
- body, func_index, isolate->counters());
- unit.ExecuteCompilation(
- native_module->compilation_state()->detected_features());
- WasmCode* wasm_code = unit.FinishCompilation(&thrower);
-
- if (WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
-
- // If there is a pending error, something really went wrong. The module was
- // verified before starting execution with lazy compilation.
+ const WasmFunction* func = &native_module->module()->functions[func_index];
+ FunctionBody func_body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
+
+ ExecutionTier tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
+ WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = unit.ExecuteCompilation(
+ &env, native_module->compilation_state()->GetWireBytesStorage(),
+ isolate->counters(),
+ Impl(native_module->compilation_state())->detected_features());
+ WasmCode* code = unit.Publish(std::move(result), native_module);
+
+ // During lazy compilation, we should never get compilation errors. The module
+ // was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
- CHECK(!thrower.error());
+ CHECK(!native_module->compilation_state()->failed());
+
+ if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
int64_t func_size =
static_cast<int64_t>(func->code.end_offset() - func->code.offset());
@@ -383,7 +378,7 @@ WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
: 0);
- return wasm_code;
+ return code;
}
Address CompileLazy(Isolate* isolate, NativeModule* native_module,
@@ -402,28 +397,30 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module,
return result->instruction_start();
}
-// The CompilationUnitBuilder builds compilation units and stores them in an
+namespace {
+
+// The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
-// CompilationState when {Commit} is called.
+// {CompilationStateImpl} when {Commit} is called.
class CompilationUnitBuilder {
public:
- explicit CompilationUnitBuilder(NativeModule* native_module)
+ explicit CompilationUnitBuilder(NativeModule* native_module,
+ WasmEngine* wasm_engine)
: native_module_(native_module),
- compilation_state_(native_module->compilation_state()) {}
+ wasm_engine_(wasm_engine),
+ default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
+ native_module->module())) {}
- void AddUnit(const WasmFunction* function, uint32_t buffer_offset,
- Vector<const uint8_t> bytes) {
- switch (compilation_state_->compile_mode()) {
+ void AddUnit(uint32_t func_index) {
+ switch (compilation_state()->compile_mode()) {
case CompileMode::kTiering:
- tiering_units_.emplace_back(CreateUnit(function, buffer_offset, bytes,
- ExecutionTier::kOptimized));
- baseline_units_.emplace_back(CreateUnit(function, buffer_offset, bytes,
- ExecutionTier::kBaseline));
+ tiering_units_.emplace_back(
+ CreateUnit(func_index, ExecutionTier::kOptimized));
+ baseline_units_.emplace_back(
+ CreateUnit(func_index, ExecutionTier::kBaseline));
return;
case CompileMode::kRegular:
- baseline_units_.emplace_back(
- CreateUnit(function, buffer_offset, bytes,
- WasmCompilationUnit::GetDefaultExecutionTier()));
+ baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
return;
}
UNREACHABLE();
@@ -431,7 +428,7 @@ class CompilationUnitBuilder {
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty()) return false;
- compilation_state_->AddCompilationUnits(baseline_units_, tiering_units_);
+ compilation_state()->AddCompilationUnits(baseline_units_, tiering_units_);
Clear();
return true;
}
@@ -442,130 +439,88 @@ class CompilationUnitBuilder {
}
private:
- std::unique_ptr<WasmCompilationUnit> CreateUnit(const WasmFunction* function,
- uint32_t buffer_offset,
- Vector<const uint8_t> bytes,
- ExecutionTier mode) {
- return base::make_unique<WasmCompilationUnit>(
- compilation_state_->wasm_engine(), compilation_state_->module_env(),
- native_module_,
- FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()},
- function->func_index,
- compilation_state_->isolate()->async_counters().get(), mode);
- }
-
- NativeModule* native_module_;
- CompilationState* compilation_state_;
+ std::unique_ptr<WasmCompilationUnit> CreateUnit(uint32_t func_index,
+ ExecutionTier tier) {
+ return base::make_unique<WasmCompilationUnit>(wasm_engine_, func_index,
+ tier);
+ }
+
+ CompilationStateImpl* compilation_state() const {
+ return Impl(native_module_->compilation_state());
+ }
+
+ NativeModule* const native_module_;
+ WasmEngine* const wasm_engine_;
+ const ExecutionTier default_tier_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
-namespace {
bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
(FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
-byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
- return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
-}
-
-void RecordStats(const Code* code, Counters* counters) {
+void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-bool in_bounds(uint32_t offset, size_t size, size_t upper) {
- return offset + size <= upper && offset + size >= offset;
-}
-
-using WasmInstanceMap =
- IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-
double MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
base::Time::kMillisecondsPerSecond;
}
-ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
- bool allow_trap_handler = true) {
- UseTrapHandler use_trap_handler =
- trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
- ? kUseTrapHandler
- : kNoTrapHandler;
- return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
-}
-
// Run by each compilation task and by the main thread (i.e. in both
// foreground and background threads). The no_finisher_callback is called
// within the result_mutex_ lock when no finishing task is running, i.e. when
// the finisher_is_running_ flag is not set.
-bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state,
- WasmFeatures* detected) {
+bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
+ NativeModule* native_module,
+ CompilationStateImpl* compilation_state,
+ WasmFeatures* detected,
+ Counters* counters) {
DisallowHeapAccess no_heap_access;
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextCompilationUnit();
if (unit == nullptr) return false;
- // TODO(kimanh): We need to find out in which mode the unit
- // should be compiled in before compiling it, as it might fallback
- // to Turbofan if it cannot be compiled using Liftoff. This can be removed
- // later as soon as Liftoff can compile any function. Then, we can directly
- // access {unit->mode()} within {ScheduleUnitForFinishing()}.
- ExecutionTier mode = unit->mode();
- unit->ExecuteCompilation(detected);
- compilation_state->ScheduleUnitForFinishing(std::move(unit), mode);
+ // Get the tier before starting compilation, as compilation can switch tiers
+ // if baseline bails out.
+ ExecutionTier tier = unit->tier();
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ env, compilation_state->GetWireBytesStorage(), counters, detected);
+
+ WasmCode* code = unit->Publish(std::move(result), native_module);
+ compilation_state->OnFinishedUnit(tier, code);
return true;
}
-void InitializeCompilationUnits(NativeModule* native_module) {
+void InitializeCompilationUnits(NativeModule* native_module,
+ WasmEngine* wasm_engine) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module();
- CompilationUnitBuilder builder(native_module);
+ CompilationUnitBuilder builder(native_module, wasm_engine);
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t i = start; i < end; ++i) {
- const WasmFunction* func = &module->functions[i];
- uint32_t buffer_offset = func->code.offset();
- Vector<const uint8_t> bytes(wire_bytes.start() + func->code.offset(),
- func->code.end_offset() - func->code.offset());
-
- DCHECK_NOT_NULL(native_module);
- builder.AddUnit(func, buffer_offset, bytes);
+ builder.AddUnit(i);
}
builder.Commit();
}
-void FinishCompilationUnits(CompilationState* compilation_state,
- ErrorThrower* thrower) {
+void FinishCompilationUnits(CompilationStateImpl* compilation_state) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FinishCompilationUnits");
- while (true) {
- if (compilation_state->failed()) break;
+ while (!compilation_state->failed()) {
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextExecutedUnit();
if (unit == nullptr) break;
- WasmCode* result = unit->FinishCompilation(thrower);
-
- if (thrower->error()) {
- compilation_state->Abort();
- break;
- }
-
- // Update the compilation state.
- compilation_state->OnFinishedUnit();
- DCHECK_IMPLIES(result == nullptr, thrower->error());
- if (result == nullptr) break;
- }
- if (!compilation_state->failed()) {
- compilation_state->RestartBackgroundTasks();
}
}
-void CompileInParallel(Isolate* isolate, NativeModule* native_module,
- Handle<WasmModuleObject> module_object,
- ErrorThrower* thrower) {
+void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// Data structures for the parallel compilation.
//-----------------------------------------------------------------------
@@ -575,12 +530,8 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTasks} instances are spawned which run on
// the background threads.
- // 2.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {baseline_finish_units_}.
- // 2.b) If {baseline_finish_units_} contains a compilation unit, the main
- // thread dequeues it and finishes the compilation.
+ // 2) The background threads and the main thread pick one compilation unit at
+ // a time and execute the parallel phase of the compilation unit.
// 3) After the parallel phase of all compilation units has started, the
// main thread continues to finish all compilation units as long as
// baseline-compilation units are left to be processed.
@@ -592,7 +543,8 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// use the node cache.
CanonicalHandleScope canonical(isolate);
- CompilationState* compilation_state = native_module->compilation_state();
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
// Make sure that no foreground task is spawned for finishing
// the compilation units. This foreground thread will be
// responsible for finishing compilation.
@@ -606,23 +558,19 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTask} instances are spawned which run on
// background threads.
- InitializeCompilationUnits(native_module);
-
- // 2.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {baseline_finish_units_}.
- // The foreground task bypasses waiting on memory threshold, because
- // its results will immediately be converted to code (below).
+ InitializeCompilationUnits(native_module, isolate->wasm_engine());
+
+ // 2) The background threads and the main thread pick one compilation unit at
+ // a time and execute the parallel phase of the compilation unit.
WasmFeatures detected_features;
- while (
- FetchAndExecuteCompilationUnit(compilation_state, &detected_features) &&
- !compilation_state->baseline_compilation_finished()) {
- // 2.b) If {baseline_finish_units_} contains a compilation unit, the main
- // thread dequeues it and finishes the compilation unit. Compilation
- // units are finished concurrently to the background threads to save
- // memory.
- FinishCompilationUnits(compilation_state, thrower);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ while (FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
+ &detected_features,
+ isolate->counters()) &&
+ !compilation_state->baseline_compilation_finished()) {
+ // TODO(clemensh): Refactor ownership of the AsyncCompileJob and remove
+ // this.
+ FinishCompilationUnits(compilation_state);
if (compilation_state->failed()) break;
}
@@ -633,7 +581,7 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// baseline compilation units are left to be processed. If compilation
// already failed, all background tasks have already been canceled
// in {FinishCompilationUnits}, and there are no units to finish.
- FinishCompilationUnits(compilation_state, thrower);
+ FinishCompilationUnits(compilation_state);
if (compilation_state->baseline_compilation_finished()) break;
}
@@ -647,28 +595,27 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
if (!compilation_state->failed() &&
compilation_state->compile_mode() == CompileMode::kTiering) {
compilation_state->SetFinisherIsRunning(false);
- compilation_state->RestartBackgroundTasks();
}
}
void CompileSequentially(Isolate* isolate, NativeModule* native_module,
- ModuleEnv* module_env, ErrorThrower* thrower) {
+ ErrorThrower* thrower) {
DCHECK(!thrower->error());
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = module_env->module;
+ const WasmModule* module = native_module->module();
WasmFeatures detected = kNoWasmFeatures;
- for (uint32_t i = 0; i < module->functions.size(); ++i) {
- const WasmFunction& func = module->functions[i];
+ auto* comp_state = Impl(native_module->compilation_state());
+ ExecutionTier tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
+ for (const WasmFunction& func : module->functions) {
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
- WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
- isolate, native_module, &detected, thrower, module_env, &func);
- if (code == nullptr) {
- TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
- thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
- name.start());
+ WasmCompilationUnit::CompileWasmFunction(isolate, native_module, &detected,
+ &func, tier);
+ if (comp_state->failed()) {
+ thrower->CompileFailed(comp_state->GetCompileError());
break;
}
}
@@ -691,9 +638,8 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
base + func.code.end_offset()};
DecodeResult result;
{
- auto time_counter =
- SELECT_WASM_COUNTER(isolate->async_counters(), module->origin,
- wasm_decode, function_time);
+ auto time_counter = SELECT_WASM_COUNTER(
+ isolate->counters(), module->origin, wasm_decode, function_time);
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
@@ -705,16 +651,16 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
name.length(), name.start(),
- result.error_msg().c_str(), result.error_offset());
+ result.error().message().c_str(),
+ result.error().offset());
break;
}
}
}
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- const WasmModule* wasm_module, ModuleEnv* env) {
- NativeModule* const native_module = module_object->native_module();
+ const WasmModule* wasm_module,
+ NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
if (compile_lazy(wasm_module)) {
@@ -739,11 +685,14 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
if (compile_parallel) {
- CompileInParallel(isolate, native_module, module_object, thrower);
+ CompileInParallel(isolate, native_module);
} else {
- CompileSequentially(isolate, native_module, env, thrower);
+ CompileSequentially(isolate, native_module, thrower);
+ }
+ auto* compilation_state = Impl(native_module->compilation_state());
+ if (compilation_state->failed()) {
+ thrower->CompileFailed(compilation_state->GetCompileError());
}
- if (thrower->error()) return;
}
}
@@ -751,7 +700,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// the NativeModule, the code table, etc.).
class FinishCompileTask : public CancelableTask {
public:
- explicit FinishCompileTask(CompilationState* compilation_state,
+ explicit FinishCompileTask(CompilationStateImpl* compilation_state,
CancelableTaskManager* task_manager)
: CancelableTask(task_manager), compilation_state_(compilation_state) {}
@@ -759,7 +708,7 @@ class FinishCompileTask : public CancelableTask {
Isolate* isolate = compilation_state_->isolate();
HandleScope scope(isolate);
SaveContext saved_context(isolate);
- isolate->set_context(nullptr);
+ isolate->set_context(Context());
TRACE_COMPILE("(4a) Finishing compilation units...\n");
if (compilation_state_->failed()) {
@@ -787,30 +736,7 @@ class FinishCompileTask : public CancelableTask {
break;
}
- ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile");
- WasmCode* result = unit->FinishCompilation(&thrower);
-
- if (thrower.error()) {
- DCHECK_NULL(result);
- compilation_state_->OnError(&thrower);
- compilation_state_->SetFinisherIsRunning(false);
- thrower.Reset();
- break;
- }
-
- if (compilation_state_->baseline_compilation_finished()) {
- // If Liftoff compilation finishes it will directly start executing.
- // As soon as we have Turbofan-compiled code available, it will
- // directly be used by Liftoff-compiled code via the jump table.
- DCHECK_EQ(CompileMode::kTiering, compilation_state_->compile_mode());
- DCHECK(!result->is_liftoff());
-
- if (WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
- }
-
- // Update the compilation state, and possibly notify
- // threads waiting for events.
- compilation_state_->OnFinishedUnit();
+ if (compilation_state_->failed()) break;
if (deadline < MonotonicallyIncreasingTimeInMs()) {
// We reached the deadline. We reschedule this task and return
@@ -823,40 +749,55 @@ class FinishCompileTask : public CancelableTask {
}
private:
- CompilationState* compilation_state_;
+ CompilationStateImpl* compilation_state_;
};
// The runnable task that performs compilations in the background.
class BackgroundCompileTask : public CancelableTask {
public:
- explicit BackgroundCompileTask(CompilationState* compilation_state,
- CancelableTaskManager* task_manager)
- : CancelableTask(task_manager), compilation_state_(compilation_state) {}
+ explicit BackgroundCompileTask(CancelableTaskManager* task_manager,
+ NativeModule* native_module,
+ Counters* counters)
+ : CancelableTask(task_manager),
+ native_module_(native_module),
+ counters_(counters) {}
void RunInternal() override {
TRACE_COMPILE("(3b) Compiling...\n");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "BackgroundCompileTask::RunInternal");
// The number of currently running background tasks is reduced in
// {OnBackgroundTaskStopped}.
- while (!compilation_state_->failed()) {
- if (!FetchAndExecuteCompilationUnit(compilation_state_,
- &detected_features_)) {
+ CompilationEnv env = native_module_->CreateCompilationEnv();
+ auto* compilation_state = Impl(native_module_->compilation_state());
+ WasmFeatures detected_features = kNoWasmFeatures;
+ double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
+ while (!compilation_state->failed()) {
+ if (!FetchAndExecuteCompilationUnit(&env, native_module_,
+ compilation_state, &detected_features,
+ counters_)) {
break;
}
+ if (deadline < MonotonicallyIncreasingTimeInMs()) {
+ compilation_state->ReportDetectedFeatures(detected_features);
+ compilation_state->RestartBackgroundCompileTask();
+ return;
+ }
}
- compilation_state_->OnBackgroundTaskStopped(detected_features_);
+ compilation_state->OnBackgroundTaskStopped(detected_features);
}
private:
- CompilationState* compilation_state_;
- WasmFeatures detected_features_ = kNoWasmFeatures;
+ NativeModule* const native_module_;
+ Counters* const counters_;
};
+
} // namespace
-MaybeHandle<WasmModuleObject> CompileToModuleObject(
+std::unique_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
+ Handle<FixedArray>* export_wrappers_out) {
const WasmModule* wasm_module = module.get();
TimedHistogramScope wasm_compile_module_time_scope(SELECT_WASM_COUNTER(
isolate->counters(), wasm_module->origin, wasm_compile, module_time));
@@ -865,1317 +806,45 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject(
if (wasm_module->has_shared_memory) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
+ int export_wrapper_size = static_cast<int>(module->num_exported_functions);
- // TODO(6792): No longer needed once WebAssembly code is off heap. Use
- // base::Optional to be able to close the scope before notifying the debugger.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate->heap());
-
- // Create heap objects for script, module bytes and asm.js offset table to
- // be stored in the module object.
- Handle<Script> script;
- Handle<ByteArray> asm_js_offset_table;
- if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate, wire_bytes, wasm_module->source_map_url);
- } else {
- script = asm_js_script;
- asm_js_offset_table =
- isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
- asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
- asm_js_offset_table_bytes.length());
- }
// TODO(wasm): only save the sections necessary to deserialize a
// {WasmModule}. E.g. function bodies could be omitted.
OwnedVector<uint8_t> wire_bytes_copy =
OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
- // Create the module object.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmModuleObject. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
- ModuleEnv env = CreateDefaultModuleEnv(wasm_module);
-
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of this
- // object.
- Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, enabled, std::move(module), env, std::move(wire_bytes_copy),
- script, asm_js_offset_table);
- CompileNativeModule(isolate, thrower, module_object, wasm_module, &env);
+ // Create and compile the native module.
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
+
+ // Create a new {NativeModule} first.
+ auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
+ isolate, enabled, code_size_estimate,
+ wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
+ native_module->SetWireBytes(std::move(wire_bytes_copy));
+ native_module->SetRuntimeStubs(isolate);
+
+ CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(isolate, module_object);
-
- // If we created a wasm script, finish it now and make it public to the
- // debugger.
- if (asm_js_script.is_null()) {
- // Close the CodeSpaceMemoryModificationScope before calling into the
- // debugger.
- modification_scope.reset();
- isolate->debug()->OnAfterCompile(script);
- }
+ *export_wrappers_out =
+ isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ CompileJsToWasmWrappers(isolate, native_module->module(),
+ *export_wrappers_out);
// Log the code within the generated module for profiling.
- module_object->native_module()->LogWasmCodes(isolate);
+ native_module->LogWasmCodes(isolate);
- return module_object;
+ return native_module;
}
-InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory)
- : isolate_(isolate),
- enabled_(module_object->native_module()->enabled_features()),
- module_(module_object->module()),
- thrower_(thrower),
- module_object_(module_object),
- ffi_(ffi),
- memory_(memory) {
- sanitized_imports_.reserve(module_->import_table.size());
-}
-
-// Build an instance, in all of its glory.
-MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
- // Check that an imports argument was provided, if the module requires it.
- // No point in continuing otherwise.
- if (!module_->import_table.empty() && ffi_.is_null()) {
- thrower_->TypeError(
- "Imports argument must be present and must be an object");
- return {};
- }
-
- SanitizeImports();
- if (thrower_->error()) return {};
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- // From here on, we expect the build pipeline to run without exiting to JS.
- DisallowJavascriptExecution no_js(isolate_);
- // Record build time into correct bucket, then build instance.
- TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
- isolate_->counters(), module_->origin, wasm_instantiate, module_time));
-
- //--------------------------------------------------------------------------
- // Allocate the memory array buffer.
- //--------------------------------------------------------------------------
- // We allocate the memory buffer before cloning or reusing the compiled module
- // so we will know whether we need to recompile with bounds checks.
- uint32_t initial_pages = module_->initial_pages;
- auto initial_pages_counter = SELECT_WASM_COUNTER(
- isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
- initial_pages_counter->AddSample(initial_pages);
- // Asm.js has memory_ already set at this point, so we don't want to
- // overwrite it.
- if (memory_.is_null()) {
- memory_ = FindImportedMemoryBuffer();
- }
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non neuterable.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- memory->set_is_neuterable(false);
-
- DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
- memory->backing_store() == nullptr);
- } else if (initial_pages > 0 || use_trap_handler()) {
- // We need to unconditionally create a guard region if using trap handlers,
- // even when the size is zero to prevent null-dereference issues
- // (e.g. https://crbug.com/769637).
- // Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages);
- if (memory_.is_null()) {
- // failed to allocate memory
- DCHECK(isolate_->has_pending_exception() || thrower_->error());
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Recompile module if using trap handlers but could not get guarded memory
- //--------------------------------------------------------------------------
- if (module_->origin == kWasmOrigin && use_trap_handler()) {
- // Make sure the memory has suitable guard regions.
- WasmMemoryTracker* const memory_tracker =
- isolate_->wasm_engine()->memory_tracker();
-
- if (!memory_tracker->HasFullGuardRegions(
- memory_.ToHandleChecked()->backing_store())) {
- if (!FLAG_wasm_trap_handler_fallback) {
- thrower_->LinkError(
- "Provided memory is lacking guard regions but fallback was "
- "disabled.");
- return {};
- }
-
- TRACE("Recompiling module without bounds checks\n");
- constexpr bool allow_trap_handler = false;
- ModuleEnv env = CreateDefaultModuleEnv(module_, allow_trap_handler);
- // Disable trap handlers on this native module.
- NativeModule* native_module = module_object_->native_module();
- native_module->DisableTrapHandler();
-
- // Recompile all functions in this native module.
- ErrorThrower thrower(isolate_, "recompile");
- CompileNativeModule(isolate_, &thrower, module_object_, module_, &env);
- if (thrower.error()) {
- return {};
- }
- DCHECK(!native_module->use_trap_handler());
- }
- }
-
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Instance object.
- //--------------------------------------------------------------------------
- NativeModule* native_module = module_object_->native_module();
- TRACE("New module instantiation for %p\n", native_module);
- Handle<WasmInstanceObject> instance =
- WasmInstanceObject::New(isolate_, module_object_);
- NativeModuleModificationScope native_modification_scope(native_module);
-
- //--------------------------------------------------------------------------
- // Set up the globals for the new instance.
- //--------------------------------------------------------------------------
- uint32_t globals_buffer_size = module_->globals_buffer_size;
- if (globals_buffer_size > 0) {
- void* backing_store =
- isolate_->array_buffer_allocator()->Allocate(globals_buffer_size);
- if (backing_store == nullptr) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- globals_ =
- isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
- constexpr bool is_external = false;
- constexpr bool is_wasm_memory = false;
- JSArrayBuffer::Setup(globals_, isolate_, is_external, backing_store,
- globals_buffer_size, SharedFlag::kNotShared,
- is_wasm_memory);
- if (globals_.is_null()) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- instance->set_globals_start(
- reinterpret_cast<byte*>(globals_->backing_store()));
- instance->set_globals_buffer(*globals_);
- }
-
- //--------------------------------------------------------------------------
- // Set up the array of references to imported globals' array buffers.
- //--------------------------------------------------------------------------
- if (module_->num_imported_mutable_globals > 0) {
- // TODO(binji): This allocates one slot for each mutable global, which is
- // more than required if multiple globals are imported from the same
- // module.
- Handle<FixedArray> buffers_array = isolate_->factory()->NewFixedArray(
- module_->num_imported_mutable_globals, TENURED);
- instance->set_imported_mutable_globals_buffers(*buffers_array);
- }
-
- //--------------------------------------------------------------------------
- // Set up the exception table used for exception tag checks.
- //--------------------------------------------------------------------------
- int exceptions_count = static_cast<int>(module_->exceptions.size());
- if (exceptions_count > 0) {
- Handle<FixedArray> exception_table =
- isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
- instance->set_exceptions_table(*exception_table);
- exception_wrappers_.resize(exceptions_count);
- }
-
- //--------------------------------------------------------------------------
- // Reserve the metadata for indirect function tables.
- //--------------------------------------------------------------------------
- int table_count = static_cast<int>(module_->tables.size());
- table_instances_.resize(table_count);
-
- //--------------------------------------------------------------------------
- // Process the imports for the module.
- //--------------------------------------------------------------------------
- int num_imported_functions = ProcessImports(instance);
- if (num_imported_functions < 0) return {};
-
- //--------------------------------------------------------------------------
- // Process the initialization for the module's globals.
- //--------------------------------------------------------------------------
- InitGlobals();
-
- //--------------------------------------------------------------------------
- // Initialize the indirect tables.
- //--------------------------------------------------------------------------
- if (table_count > 0) {
- InitializeTables(instance);
- }
-
- //--------------------------------------------------------------------------
- // Initialize the exceptions table.
- //--------------------------------------------------------------------------
- if (exceptions_count > 0) {
- InitializeExceptions(instance);
- }
-
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Memory object.
- //--------------------------------------------------------------------------
- if (module_->has_memory) {
- if (!instance->has_memory_object()) {
- // No memory object exists. Create one.
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_, memory_,
- module_->maximum_pages != 0 ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- }
-
- // Add the instance object to the list of instances for this memory.
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
- WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
-
- if (!memory_.is_null()) {
- // Double-check the {memory} array buffer matches the instance.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- CHECK_EQ(instance->memory_size(), memory->byte_length());
- CHECK_EQ(instance->memory_start(), memory->backing_store());
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that indirect function table segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmTableInit& table_init : module_->table_inits) {
- DCHECK(table_init.table_index < table_instances_.size());
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- size_t table_size = table_instances_[table_init.table_index].table_size;
- if (!in_bounds(base, table_init.entries.size(), table_size)) {
- thrower_->LinkError("table initializer is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that memory segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmDataSegment& seg : module_->data_segments) {
- uint32_t base = EvalUint32InitExpr(seg.dest_addr);
- if (!in_bounds(base, seg.source.length(), instance->memory_size())) {
- thrower_->LinkError("data segment is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Set up the exports object for the new instance.
- //--------------------------------------------------------------------------
- ProcessExports(instance);
- if (thrower_->error()) return {};
-
- //--------------------------------------------------------------------------
- // Initialize the indirect function tables.
- //--------------------------------------------------------------------------
- if (table_count > 0) {
- LoadTableSegments(instance);
- }
-
- //--------------------------------------------------------------------------
- // Initialize the memory by loading data segments.
- //--------------------------------------------------------------------------
- if (module_->data_segments.size() > 0) {
- LoadDataSegments(instance);
- }
-
- //--------------------------------------------------------------------------
- // Debugging support.
- //--------------------------------------------------------------------------
- // Set all breakpoints that were set on the shared module.
- WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
-
- if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- std::vector<int> func_indexes;
- for (int func_index = num_imported_functions,
- num_wasm_functions = static_cast<int>(module_->functions.size());
- func_index < num_wasm_functions; ++func_index) {
- func_indexes.push_back(func_index);
- }
- WasmDebugInfo::RedirectToInterpreter(
- debug_info, Vector<int>(func_indexes.data(),
- static_cast<int>(func_indexes.size())));
- }
-
- //--------------------------------------------------------------------------
- // Create a wrapper for the start function.
- //--------------------------------------------------------------------------
- if (module_->start_function_index >= 0) {
- int start_index = module_->start_function_index;
- auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function.sig, function.imported);
- // TODO(clemensh): Don't generate an exported function for the start
- // function. Use CWasmEntry instead.
- start_function_ = WasmExportedFunction::New(
- isolate_, instance, MaybeHandle<String>(), start_index,
- static_cast<int>(function.sig->parameter_count()), wrapper_code);
- }
-
- DCHECK(!isolate_->has_pending_exception());
- TRACE("Successfully built instance for module %p\n",
- module_object_->native_module());
- return instance;
-}
-
-bool InstanceBuilder::ExecuteStartFunction() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "InstanceBuilder::ExecuteStartFunction");
- if (start_function_.is_null()) return true; // No start function.
-
- HandleScope scope(isolate_);
- // Call the JS function.
- Handle<Object> undefined = isolate_->factory()->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- return false;
- }
- return true;
-}
-
-// Look up an import value in the {ffi_} object.
-MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
- Handle<String> module_name,
-
- Handle<String> import_name) {
- // We pre-validated in the js-api layer that the ffi object is present, and
- // a JSObject, if the module has imports.
- DCHECK(!ffi_.is_null());
-
- // Look up the module first.
- MaybeHandle<Object> result = Object::GetPropertyOrElement(
- isolate_, ffi_.ToHandleChecked(), module_name);
- if (result.is_null()) {
- return ReportTypeError("module not found", index, module_name);
- }
-
- Handle<Object> module = result.ToHandleChecked();
-
- // Look up the value in the module.
- if (!module->IsJSReceiver()) {
- return ReportTypeError("module is not an object or function", index,
- module_name);
- }
-
- result = Object::GetPropertyOrElement(isolate_, module, import_name);
- if (result.is_null()) {
- ReportLinkError("import not found", index, module_name, import_name);
- return MaybeHandle<JSFunction>();
- }
-
- return result;
-}
-
-// Look up an import value in the {ffi_} object specifically for linking an
-// asm.js module. This only performs non-observable lookups, which allows
-// falling back to JavaScript proper (and hence re-executing all lookups) if
-// module instantiation fails.
-MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
- uint32_t index, Handle<String> import_name) {
- // Check that a foreign function interface object was provided.
- if (ffi_.is_null()) {
- return ReportLinkError("missing imports object", index, import_name);
- }
-
- // Perform lookup of the given {import_name} without causing any observable
- // side-effect. We only accept accesses that resolve to data properties,
- // which is indicated by the asm.js spec in section 7 ("Linking") as well.
- Handle<Object> result;
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate_, ffi_.ToHandleChecked(), import_name);
- switch (it.state()) {
- case LookupIterator::ACCESS_CHECK:
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::JSPROXY:
- case LookupIterator::ACCESSOR:
- case LookupIterator::TRANSITION:
- return ReportLinkError("not a data property", index, import_name);
- case LookupIterator::NOT_FOUND:
- // Accepting missing properties as undefined does not cause any
- // observable difference from JavaScript semantics, we are lenient.
- result = isolate_->factory()->undefined_value();
- break;
- case LookupIterator::DATA:
- result = it.GetDataValue();
- break;
- }
-
- return result;
-}
-
-uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
- switch (expr.kind) {
- case WasmInitExpr::kI32Const:
- return expr.val.i32_const;
- case WasmInitExpr::kGlobalIndex: {
- uint32_t offset = module_->globals[expr.val.global_index].offset;
- return ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(raw_buffer_ptr(globals_, offset)));
- }
- default:
- UNREACHABLE();
- }
-}
-
-// Load data segments into the memory.
-void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
- Vector<const uint8_t> wire_bytes =
- module_object_->native_module()->wire_bytes();
- for (const WasmDataSegment& segment : module_->data_segments) {
- uint32_t source_size = segment.source.length();
- // Segments of size == 0 are just nops.
- if (source_size == 0) continue;
- uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- DCHECK(in_bounds(dest_offset, source_size, instance->memory_size()));
- byte* dest = instance->memory_start() + dest_offset;
- const byte* src = wire_bytes.start() + segment.source.offset();
- memcpy(dest, src, source_size);
- }
-}
-
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
- TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
- reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset,
- num, ValueTypes::TypeName(global.type));
- switch (global.type) {
- case kWasmI32:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- static_cast<int32_t>(num));
- break;
- case kWasmI64:
- // TODO(titzer): initialization of imported i64 globals.
- UNREACHABLE();
- break;
- case kWasmF32:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- static_cast<float>(num));
- break;
- case kWasmF64:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
- static_cast<double>(num));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value) {
- TRACE("init [globals_start=%p + %u] = ",
- reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset);
- switch (global.type) {
- case kWasmI32: {
- int32_t num = value->GetI32();
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
- TRACE("%d", num);
- break;
- }
- case kWasmI64: {
- int64_t num = value->GetI64();
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
- TRACE("%" PRId64, num);
- break;
- }
- case kWasmF32: {
- float num = value->GetF32();
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
- TRACE("%f", num);
- break;
- }
- case kWasmF64: {
- double num = value->GetF64();
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
- TRACE("%lf", num);
- break;
- }
- default:
- UNREACHABLE();
- }
- TRACE(", type = %s (from WebAssembly.Global)\n",
- ValueTypes::TypeName(global.type));
-}
-
-void InstanceBuilder::SanitizeImports() {
- Vector<const uint8_t> wire_bytes =
- module_object_->native_module()->wire_bytes();
- for (size_t index = 0; index < module_->import_table.size(); ++index) {
- const WasmImport& import = module_->import_table[index];
-
- Handle<String> module_name;
- MaybeHandle<String> maybe_module_name =
- WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
- import.module_name);
- if (!maybe_module_name.ToHandle(&module_name)) {
- thrower_->LinkError("Could not resolve module name for import %zu",
- index);
- return;
- }
-
- Handle<String> import_name;
- MaybeHandle<String> maybe_import_name =
- WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
- import.field_name);
- if (!maybe_import_name.ToHandle(&import_name)) {
- thrower_->LinkError("Could not resolve import name for import %zu",
- index);
- return;
- }
-
- int int_index = static_cast<int>(index);
- MaybeHandle<Object> result =
- module_->origin == kAsmJsOrigin
- ? LookupImportAsm(int_index, import_name)
- : LookupImport(int_index, module_name, import_name);
- if (thrower_->error()) {
- thrower_->LinkError("Could not find value for import %zu", index);
- return;
- }
- Handle<Object> value = result.ToHandleChecked();
- sanitized_imports_.push_back({module_name, import_name, value});
- }
-}
-
-MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
- DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
- for (size_t index = 0; index < module_->import_table.size(); index++) {
- const WasmImport& import = module_->import_table[index];
-
- if (import.kind == kExternalMemory) {
- const auto& value = sanitized_imports_[index].value;
- if (!value->IsWasmMemoryObject()) {
- return {};
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- return buffer;
- }
- }
- return {};
-}
-
-// Process the imports, including functions, tables, globals, and memory, in
-// order, loading them from the {ffi_} object. Returns the number of imported
-// functions.
-int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
- int num_imported_functions = 0;
- int num_imported_tables = 0;
- int num_imported_mutable_globals = 0;
-
- DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
- int num_imports = static_cast<int>(module_->import_table.size());
- NativeModule* native_module = instance->module_object()->native_module();
- for (int index = 0; index < num_imports; ++index) {
- const WasmImport& import = module_->import_table[index];
-
- Handle<String> module_name = sanitized_imports_[index].module_name;
- Handle<String> import_name = sanitized_imports_[index].import_name;
- Handle<Object> value = sanitized_imports_[index].value;
-
- switch (import.kind) {
- case kExternalFunction: {
- // Function imports must be callable.
- if (!value->IsCallable()) {
- ReportLinkError("function import requires a callable", index,
- module_name, import_name);
- return -1;
- }
- uint32_t func_index = import.index;
- DCHECK_EQ(num_imported_functions, func_index);
- auto js_receiver = Handle<JSReceiver>::cast(value);
- FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig);
- switch (kind) {
- case compiler::WasmImportCallKind::kLinkError:
- ReportLinkError(
- "imported function does not match the expected type", index,
- module_name, import_name);
- return -1;
- case compiler::WasmImportCallKind::kWasmToWasm: {
- // The imported function is a WASM function from another instance.
- auto imported_function = Handle<WasmExportedFunction>::cast(value);
- Handle<WasmInstanceObject> imported_instance(
- imported_function->instance(), isolate_);
- // The import reference is the instance object itself.
- Address imported_target = imported_function->GetWasmCallTarget();
- ImportedFunctionEntry entry(instance, func_index);
- entry.set_wasm_to_wasm(*imported_instance, imported_target);
- break;
- }
- default: {
- // The imported function is a callable.
- Handle<Code> wrapper_code =
- compiler::CompileWasmImportCallWrapper(
- isolate_, kind, expected_sig, func_index, module_->origin,
- use_trap_handler())
- .ToHandleChecked();
- RecordStats(*wrapper_code, isolate_->counters());
-
- WasmCode* wasm_code =
- native_module->AddImportWrapper(wrapper_code, func_index);
- ImportedFunctionEntry entry(instance, func_index);
- entry.set_wasm_to_js(*js_receiver, wasm_code);
- break;
- }
- }
- num_imported_functions++;
- break;
- }
- case kExternalTable: {
- if (!value->IsWasmTableObject()) {
- ReportLinkError("table import requires a WebAssembly.Table", index,
- module_name, import_name);
- return -1;
- }
- uint32_t table_num = import.index;
- DCHECK_EQ(table_num, num_imported_tables);
- const WasmTable& table = module_->tables[table_num];
- TableInstance& table_instance = table_instances_[table_num];
- table_instance.table_object = Handle<WasmTableObject>::cast(value);
- instance->set_table_object(*table_instance.table_object);
- table_instance.js_wrappers = Handle<FixedArray>(
- table_instance.table_object->functions(), isolate_);
-
- int imported_table_size = table_instance.js_wrappers->length();
- if (imported_table_size < static_cast<int>(table.initial_size)) {
- thrower_->LinkError(
- "table import %d is smaller than initial %d, got %u", index,
- table.initial_size, imported_table_size);
- return -1;
- }
-
- if (table.has_maximum_size) {
- int64_t imported_maximum_size =
- table_instance.table_object->maximum_length()->Number();
- if (imported_maximum_size < 0) {
- thrower_->LinkError(
- "table import %d has no maximum length, expected %d", index,
- table.maximum_size);
- return -1;
- }
- if (imported_maximum_size > table.maximum_size) {
- thrower_->LinkError(
- " table import %d has a larger maximum size %" PRIx64
- " than the module's declared maximum %u",
- index, imported_maximum_size, table.maximum_size);
- return -1;
- }
- }
-
- // Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
- table_instances_[table_num].table_size = imported_table_size;
- }
- // Initialize the dispatch table with the (foreign) JS functions
- // that are already in the table.
- for (int i = 0; i < imported_table_size; ++i) {
- Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
- // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
- // insert in the local table a wrapper from the other module, and add
- // a reference to the owning instance of the other module.
- if (!val->IsJSFunction()) continue;
- if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
- thrower_->LinkError("table import %d[%d] is not a wasm function",
- index, i);
- return -1;
- }
- // Look up the signature's canonical id. If there is no canonical
- // id, then the signature does not appear at all in this module,
- // so putting {-1} in the table will cause checks to always fail.
- auto target = Handle<WasmExportedFunction>::cast(val);
- Handle<WasmInstanceObject> imported_instance =
- handle(target->instance(), isolate_);
- Address exported_call_target = target->GetWasmCallTarget();
- FunctionSig* sig = imported_instance->module()
- ->functions[target->function_index()]
- .sig;
- IndirectFunctionTableEntry(instance, i)
- .set(module_->signature_map.Find(*sig), *imported_instance,
- exported_call_target);
- }
- num_imported_tables++;
- break;
- }
- case kExternalMemory: {
- // Validation should have failed if more than one memory object was
- // provided.
- DCHECK(!instance->has_memory_object());
- if (!value->IsWasmMemoryObject()) {
- ReportLinkError("memory import must be a WebAssembly.Memory object",
- index, module_name, import_name);
- return -1;
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- instance->set_memory_object(*memory);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- // memory_ should have already been assigned in Build().
- DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
- uint32_t imported_cur_pages =
- static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
- if (imported_cur_pages < module_->initial_pages) {
- thrower_->LinkError(
- "memory import %d is smaller than initial %u, got %u", index,
- module_->initial_pages, imported_cur_pages);
- }
- int32_t imported_maximum_pages = memory->maximum_pages();
- if (module_->has_maximum_pages) {
- if (imported_maximum_pages < 0) {
- thrower_->LinkError(
- "memory import %d has no maximum limit, expected at most %u",
- index, imported_maximum_pages);
- return -1;
- }
- if (static_cast<uint32_t>(imported_maximum_pages) >
- module_->maximum_pages) {
- thrower_->LinkError(
- "memory import %d has a larger maximum size %u than the "
- "module's declared maximum %u",
- index, imported_maximum_pages, module_->maximum_pages);
- return -1;
- }
- }
- if (module_->has_shared_memory != buffer->is_shared()) {
- thrower_->LinkError(
- "mismatch in shared state of memory, declared = %d, imported = "
- "%d",
- module_->has_shared_memory, buffer->is_shared());
- return -1;
- }
-
- break;
- }
- case kExternalGlobal: {
- // Immutable global imports are converted to numbers and written into
- // the {globals_} array buffer.
- //
- // Mutable global imports instead have their backing array buffers
- // referenced by this instance, and store the address of the imported
- // global in the {imported_mutable_globals_} array.
- const WasmGlobal& global = module_->globals[import.index];
-
- // The mutable-global proposal allows importing i64 values, but only if
- // they are passed as a WebAssembly.Global object.
- if (global.type == kWasmI64 &&
- !(enabled_.mut_global && value->IsWasmGlobalObject())) {
- ReportLinkError("global import cannot have type i64", index,
- module_name, import_name);
- return -1;
- }
- if (module_->origin == kAsmJsOrigin) {
- // Accepting {JSFunction} on top of just primitive values here is a
- // workaround to support legacy asm.js code with broken binding. Note
- // that using {NaN} (or Smi::kZero) here is what using the observable
- // conversion via {ToPrimitive} would produce as well.
- // TODO(mstarzinger): Still observable if Function.prototype.valueOf
- // or friends are patched, we might need to check for that as well.
- if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
- if (value->IsPrimitive() && !value->IsSymbol()) {
- if (global.type == kWasmI32) {
- value = Object::ToInt32(isolate_, value).ToHandleChecked();
- } else {
- value = Object::ToNumber(isolate_, value).ToHandleChecked();
- }
- }
- }
- if (enabled_.mut_global) {
- if (value->IsWasmGlobalObject()) {
- auto global_object = Handle<WasmGlobalObject>::cast(value);
- if (global_object->type() != global.type) {
- ReportLinkError(
- "imported global does not match the expected type", index,
- module_name, import_name);
- return -1;
- }
- if (global_object->is_mutable() != global.mutability) {
- ReportLinkError(
- "imported global does not match the expected mutability",
- index, module_name, import_name);
- return -1;
- }
- if (global.mutability) {
- Handle<JSArrayBuffer> buffer(global_object->array_buffer(),
- isolate_);
- int index = num_imported_mutable_globals++;
- instance->imported_mutable_globals_buffers()->set(index, *buffer);
- // It is safe in this case to store the raw pointer to the buffer
- // since the backing store of the JSArrayBuffer will not be
- // relocated.
- instance->imported_mutable_globals()[index] =
- reinterpret_cast<Address>(
- raw_buffer_ptr(buffer, global_object->offset()));
- } else {
- WriteGlobalValue(global, global_object);
- }
- } else if (value->IsNumber()) {
- if (global.mutability) {
- ReportLinkError(
- "imported mutable global must be a WebAssembly.Global object",
- index, module_name, import_name);
- return -1;
- }
- WriteGlobalValue(global, value->Number());
- } else {
- ReportLinkError(
- "global import must be a number or WebAssembly.Global object",
- index, module_name, import_name);
- return -1;
- }
- } else {
- if (value->IsNumber()) {
- WriteGlobalValue(global, value->Number());
- } else {
- ReportLinkError("global import must be a number", index,
- module_name, import_name);
- return -1;
- }
- }
- break;
- }
- case kExternalException: {
- if (!value->IsWasmExceptionObject()) {
- ReportLinkError("exception import requires a WebAssembly.Exception",
- index, module_name, import_name);
- return -1;
- }
- Handle<WasmExceptionObject> imported_exception =
- Handle<WasmExceptionObject>::cast(value);
- if (!imported_exception->IsSignatureEqual(
- module_->exceptions[import.index].sig)) {
- ReportLinkError("imported exception does not match the expected type",
- index, module_name, import_name);
- return -1;
- }
- Object* exception_tag = imported_exception->exception_tag();
- DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
- instance->exceptions_table()->set(import.index, exception_tag);
- exception_wrappers_[import.index] = imported_exception;
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-
- DCHECK_EQ(module_->num_imported_mutable_globals,
- num_imported_mutable_globals);
-
- return num_imported_functions;
-}
-
-template <typename T>
-T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
- return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
-}
-
-// Process initialization of globals.
-void InstanceBuilder::InitGlobals() {
- for (auto global : module_->globals) {
- if (global.mutability && global.imported) {
- continue;
- }
-
- switch (global.init.kind) {
- case WasmInitExpr::kI32Const:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- global.init.val.i32_const);
- break;
- case WasmInitExpr::kI64Const:
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
- global.init.val.i64_const);
- break;
- case WasmInitExpr::kF32Const:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- global.init.val.f32_const);
- break;
- case WasmInitExpr::kF64Const:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
- global.init.val.f64_const);
- break;
- case WasmInitExpr::kGlobalIndex: {
- // Initialize with another global.
- uint32_t new_offset = global.offset;
- uint32_t old_offset =
- module_->globals[global.init.val.global_index].offset;
- TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
- ? sizeof(double)
- : sizeof(int32_t);
- memcpy(raw_buffer_ptr(globals_, new_offset),
- raw_buffer_ptr(globals_, old_offset), size);
- break;
- }
- case WasmInitExpr::kNone:
- // Happens with imported globals.
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
- if (num_pages > FLAG_wasm_max_mem_pages) {
- thrower_->RangeError("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
- i::SharedFlag shared_flag =
- is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared;
- Handle<JSArrayBuffer> mem_buffer;
- if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, shared_flag)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm memory");
- }
- return mem_buffer;
-}
-
-bool InstanceBuilder::NeedsWrappers() const {
- if (module_->num_exported_functions > 0) return true;
- for (auto& table_instance : table_instances_) {
- if (!table_instance.js_wrappers.is_null()) return true;
- }
- for (auto& table : module_->tables) {
- if (table.exported) return true;
- }
- return false;
-}
-
-// Process the exports, creating wrappers for functions, tables, memories,
-// globals, and exceptions.
-void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
- isolate_);
- if (NeedsWrappers()) {
- // Fill the table to cache the exported JSFunction wrappers.
- js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
- Handle<JSFunction>::null());
-
- // If an imported WebAssembly function gets exported, the exported function
- // has to be identical to to imported function. Therefore we put all
- // imported WebAssembly functions into the js_wrappers_ list.
- for (int index = 0, end = static_cast<int>(module_->import_table.size());
- index < end; ++index) {
- const WasmImport& import = module_->import_table[index];
- if (import.kind == kExternalFunction) {
- Handle<Object> value = sanitized_imports_[index].value;
- if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
- }
- }
- }
- }
-
- Handle<JSObject> exports_object;
- bool is_asm_js = false;
- switch (module_->origin) {
- case kWasmOrigin: {
- // Create the "exports" object.
- exports_object = isolate_->factory()->NewJSObjectWithNullProto();
- break;
- }
- case kAsmJsOrigin: {
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate_->native_context()->object_function(), isolate_);
- exports_object = isolate_->factory()->NewJSObject(object_function);
- is_asm_js = true;
- break;
- }
- default:
- UNREACHABLE();
- }
- instance->set_exports_object(*exports_object);
-
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
- PropertyDescriptor desc;
- desc.set_writable(is_asm_js);
- desc.set_enumerable(true);
- desc.set_configurable(is_asm_js);
-
- // Process each export in the export table.
- int export_index = 0; // Index into {export_wrappers}.
- for (const WasmExport& exp : module_->export_table) {
- Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, exp.name)
- .ToHandleChecked();
- Handle<JSObject> export_to;
- if (is_asm_js && exp.kind == kExternalFunction &&
- String::Equals(isolate_, name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
- switch (exp.kind) {
- case kExternalFunction: {
- // Wrap and export the code as a JSFunction.
- const WasmFunction& function = module_->functions[exp.index];
- Handle<JSFunction> js_function = js_wrappers_[exp.index];
- if (js_function.is_null()) {
- // Wrap the exported code as a JSFunction.
- Handle<Code> export_code =
- export_wrappers->GetValueChecked<Code>(isolate_, export_index);
- MaybeHandle<String> func_name;
- if (is_asm_js) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- module_object_->native_module()->wire_bytes(),
- function.func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, function.func_index,
- static_cast<int>(function.sig->parameter_count()), export_code);
- js_wrappers_[exp.index] = js_function;
- }
- desc.set_value(js_function);
- export_index++;
- break;
- }
- case kExternalTable: {
- // Export a table as a WebAssembly.Table object.
- TableInstance& table_instance = table_instances_[exp.index];
- const WasmTable& table = module_->tables[exp.index];
- if (table_instance.table_object.is_null()) {
- uint32_t maximum = table.has_maximum_size ? table.maximum_size
- : FLAG_wasm_max_table_size;
- table_instance.table_object =
- WasmTableObject::New(isolate_, table.initial_size, maximum,
- &table_instance.js_wrappers);
- }
- desc.set_value(table_instance.table_object);
- break;
- }
- case kExternalMemory: {
- // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
- // should already be available if the module has memory, since we always
- // create or import it when building an WasmInstanceObject.
- DCHECK(instance->has_memory_object());
- desc.set_value(
- Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
- break;
- }
- case kExternalGlobal: {
- const WasmGlobal& global = module_->globals[exp.index];
- if (enabled_.mut_global) {
- Handle<JSArrayBuffer> buffer;
- uint32_t offset;
-
- if (global.mutability && global.imported) {
- Handle<FixedArray> buffers_array(
- instance->imported_mutable_globals_buffers(), isolate_);
- buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
- isolate_, global.index);
- Address global_addr =
- instance->imported_mutable_globals()[global.index];
-
- size_t buffer_size = buffer->byte_length();
- Address backing_store =
- reinterpret_cast<Address>(buffer->backing_store());
- CHECK(global_addr >= backing_store &&
- global_addr < backing_store + buffer_size);
- offset = static_cast<uint32_t>(global_addr - backing_store);
- } else {
- buffer = handle(instance->globals_buffer(), isolate_);
- offset = global.offset;
- }
-
- // Since the global's array buffer is always provided, allocation
- // should never fail.
- Handle<WasmGlobalObject> global_obj =
- WasmGlobalObject::New(isolate_, buffer, global.type, offset,
- global.mutability)
- .ToHandleChecked();
- desc.set_value(global_obj);
- } else {
- // Export the value of the global variable as a number.
- double num = 0;
- switch (global.type) {
- case kWasmI32:
- num = ReadLittleEndianValue<int32_t>(
- GetRawGlobalPtr<int32_t>(global));
- break;
- case kWasmF32:
- num =
- ReadLittleEndianValue<float>(GetRawGlobalPtr<float>(global));
- break;
- case kWasmF64:
- num = ReadLittleEndianValue<double>(
- GetRawGlobalPtr<double>(global));
- break;
- case kWasmI64:
- thrower_->LinkError(
- "export of globals of type I64 is not allowed.");
- return;
- default:
- UNREACHABLE();
- }
- desc.set_value(isolate_->factory()->NewNumber(num));
- }
- break;
- }
- case kExternalException: {
- const WasmException& exception = module_->exceptions[exp.index];
- Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
- if (wrapper.is_null()) {
- Handle<HeapObject> exception_tag(
- HeapObject::cast(instance->exceptions_table()->get(exp.index)),
- isolate_);
- wrapper =
- WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
- exception_wrappers_[exp.index] = wrapper;
- }
- desc.set_value(wrapper);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, export_to, name, &desc, kThrowOnError);
- if (!status.IsJust()) {
- TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>());
- thrower_->LinkError("export of %.*s failed.", trunc_name.length(),
- trunc_name.start());
- return;
- }
- }
- DCHECK_EQ(export_index, export_wrappers->length());
-
- if (module_->origin == kWasmOrigin) {
- v8::Maybe<bool> success =
- JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
- DCHECK(success.FromMaybe(false));
- USE(success);
- }
-}
-
-void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->tables.size();
- for (size_t index = 0; index < table_count; ++index) {
- const WasmTable& table = module_->tables[index];
- TableInstance& table_instance = table_instances_[index];
-
- if (!instance->has_indirect_function_table() &&
- table.type == kWasmAnyFunc) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table.initial_size);
- table_instance.table_size = table.initial_size;
- }
- }
-}
-
-void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
- NativeModule* native_module = module_object_->native_module();
- for (auto& table_init : module_->table_inits) {
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
- uint32_t index = table_init.table_index;
- TableInstance& table_instance = table_instances_[index];
- DCHECK(in_bounds(base, num_entries, table_instance.table_size));
- for (uint32_t i = 0; i < num_entries; ++i) {
- uint32_t func_index = table_init.entries[i];
- const WasmFunction* function = &module_->functions[func_index];
- int table_index = static_cast<int>(i + base);
-
- // Update the local dispatch table first.
- uint32_t sig_id = module_->signature_ids[function->sig_index];
- Handle<WasmInstanceObject> target_instance = instance;
- Address call_target;
- const bool is_import = func_index < module_->num_imported_functions;
- if (is_import) {
- // For imported calls, take target instance and address from the
- // import table.
- ImportedFunctionEntry entry(instance, func_index);
- target_instance = handle(entry.instance(), isolate_);
- call_target = entry.target();
- } else {
- call_target = native_module->GetCallTargetForFunction(func_index);
- }
- IndirectFunctionTableEntry(instance, table_index)
- .set(sig_id, *target_instance, call_target);
-
- if (!table_instance.table_object.is_null()) {
- // Update the table object's other dispatch tables.
- if (js_wrappers_[func_index].is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->wasm wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function->sig, function->imported);
- MaybeHandle<String> func_name;
- if (module_->origin == kAsmJsOrigin) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- native_module->wire_bytes(), func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()), wrapper_code);
- js_wrappers_[func_index] = js_function;
- }
- table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(
- isolate_, table_instance.table_object, table_index, function->sig,
- target_instance, call_target);
- }
- }
- }
-
- int table_count = static_cast<int>(module_->tables.size());
- for (int index = 0; index < table_count; ++index) {
- TableInstance& table_instance = table_instances_[index];
-
- // Add the new dispatch table at the end to avoid redundant lookups.
- if (!table_instance.table_object.is_null()) {
- WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
- instance, index);
- }
- }
-}
-
-void InstanceBuilder::InitializeExceptions(
- Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
- // TODO(mstarzinger): Tags provide an object identity for each exception,
- // using {JSObject} here is gigantic hack and we should use a dedicated
- // object with a much lighter footprint for this purpose here.
- Handle<HeapObject> exception_tag =
- isolate_->factory()->NewJSObjectWithNullProto();
- exceptions_table->set(index, *exception_tag);
- }
+void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* wasm_module,
+ NativeModule* native_module) {
+ native_module->DisableTrapHandler();
+ CompileNativeModule(isolate, thrower, wasm_module, native_module);
}
AsyncCompileJob::AsyncCompileJob(
@@ -2184,7 +853,6 @@ AsyncCompileJob::AsyncCompileJob(
std::shared_ptr<CompilationResultResolver> resolver)
: isolate_(isolate),
enabled_features_(enabled),
- async_counters_(isolate->async_counters()),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
@@ -2199,7 +867,7 @@ AsyncCompileJob::AsyncCompileJob(
}
void AsyncCompileJob::Start() {
- DoAsync<DecodeModule>(); // --
+ DoAsync<DecodeModule>(isolate_->counters()); // --
}
void AsyncCompileJob::Abort() {
@@ -2218,8 +886,8 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset) override;
- bool ProcessCodeSectionHeader(size_t functions_count,
- uint32_t offset) override;
+ bool ProcessCodeSectionHeader(size_t functions_count, uint32_t offset,
+ std::shared_ptr<WireBytesStorage>) override;
bool ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) override;
@@ -2228,13 +896,16 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
void OnFinishedStream(OwnedVector<uint8_t> bytes) override;
- void OnError(DecodeResult result) override;
+ void OnError(const WasmError&) override;
void OnAbort() override;
+ bool Deserialize(Vector<const uint8_t> wire_bytes,
+ Vector<const uint8_t> module_bytes) override;
+
private:
// Finishes the AsyncCompileJob with an error.
- void FinishAsyncCompileJobWithError(ResultBase result);
+ void FinishAsyncCompileJobWithError(const WasmError&);
void CommitCompilationUnits();
@@ -2253,7 +924,11 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
AsyncCompileJob::~AsyncCompileJob() {
background_task_manager_.CancelAndWait();
- if (native_module_) native_module_->compilation_state()->Abort();
+ // If the runtime objects were not created yet, then initial compilation did
+ // not finish yet. In this case we can abort compilation.
+ if (native_module_ && module_object_.is_null()) {
+ Impl(native_module_->compilation_state())->Abort();
+ }
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
// TODO(ahaas): Is this notification really necessary? Check
@@ -2263,10 +938,61 @@ AsyncCompileJob::~AsyncCompileJob() {
for (auto d : deferred_handles_) delete d;
}
+void AsyncCompileJob::CreateNativeModule(
+ std::shared_ptr<const WasmModule> module) {
+ // Embedder usage count for declared shared memories.
+ if (module->has_shared_memory) {
+ isolate_->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
+ }
+
+ // TODO(wasm): Improve efficiency of storing module wire bytes. Only store
+ // relevant sections, not function bodies
+
+ // Create the module object and populate with compiled functions and
+ // information needed at instantiation time.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one {WasmModuleObject}. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+ // Create the module object.
+
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
+ native_module_ = isolate_->wasm_engine()->code_manager()->NewNativeModule(
+ isolate_, enabled_features_, code_size_estimate,
+ wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
+ native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
+ native_module_->SetRuntimeStubs(isolate_);
+
+ if (stream_) stream_->NotifyNativeModuleCreated(native_module_);
+}
+
+void AsyncCompileJob::PrepareRuntimeObjects() {
+ // Create heap objects for script and module bytes to be stored in the
+ // module object. Asm.js is not compiled asynchronously.
+ const WasmModule* module = native_module_->module();
+ Handle<Script> script =
+ CreateWasmScript(isolate_, wire_bytes_, module->source_map_url);
+
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module);
+ module_object_ = WasmModuleObject::New(isolate_, native_module_, script,
+ code_size_estimate);
+
+ {
+ DeferredHandleScope deferred(isolate_);
+ module_object_ = handle(*module_object_, isolate_);
+ deferred_handles_.push_back(deferred.Detach());
+ }
+}
+
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile() {
- DCHECK_NOT_NULL(isolate_->context());
+ bool is_after_deserialization = !module_object_.is_null();
+ if (!is_after_deserialization) {
+ PrepareRuntimeObjects();
+ }
+ DCHECK(!isolate_->context().is_null());
// Finish the wasm script now and make it public to the debugger.
Handle<Script> script(module_object_->script(), isolate_);
if (script->type() == Script::TYPE_WASM &&
@@ -2277,16 +1003,19 @@ void AsyncCompileJob::FinishCompile() {
}
isolate_->debug()->OnAfterCompile(script);
- // Log the code within the generated module for profiling.
- native_module_->LogWasmCodes(isolate_);
-
// We can only update the feature counts once the entire compile is done.
- auto compilation_state = native_module_->compilation_state();
+ auto compilation_state =
+ Impl(module_object_->native_module()->compilation_state());
compilation_state->PublishDetectedFeatures(
isolate_, *compilation_state->detected_features());
- // TODO(wasm): compiling wrappers should be made async as well.
- DoSync<CompileWrappers>();
+ // TODO(bbudge) Allow deserialization without wrapper compilation, so we can
+ // just compile wrappers here.
+ if (!is_after_deserialization) {
+ // TODO(wasm): compiling wrappers should be made async.
+ CompileWrappers();
+ }
+ FinishModule();
}
void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) {
@@ -2300,27 +1029,83 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
resolver_->OnCompilationSucceeded(result);
}
+class AsyncCompileJob::CompilationStateCallback {
+ public:
+ explicit CompilationStateCallback(AsyncCompileJob* job) : job_(job) {}
+
+ void operator()(CompilationEvent event, const WasmError* error) {
+ // This callback is only being called from a foreground task.
+ switch (event) {
+ case CompilationEvent::kFinishedBaselineCompilation:
+ DCHECK(!last_event_.has_value());
+ if (job_->DecrementAndCheckFinisherCount()) {
+ SaveContext saved_context(job_->isolate());
+ job_->isolate()->set_context(*job_->native_context_);
+ job_->FinishCompile();
+ }
+ break;
+ case CompilationEvent::kFinishedTopTierCompilation:
+ DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
+ // This callback should not react to top tier finished callbacks, since
+ // the job might already be gone then.
+ break;
+ case CompilationEvent::kFailedCompilation:
+ DCHECK(!last_event_.has_value());
+ DCHECK_NOT_NULL(error);
+ // Tier-up compilation should not fail if baseline compilation
+ // did not fail.
+ DCHECK(!Impl(job_->native_module_->compilation_state())
+ ->baseline_compilation_finished());
+
+ {
+ SaveContext saved_context(job_->isolate());
+ job_->isolate()->set_context(*job_->native_context_);
+ ErrorThrower thrower(job_->isolate(), "AsyncCompilation");
+ thrower.CompileFailed(nullptr, *error);
+ Handle<Object> error = thrower.Reify();
+
+ DeferredHandleScope deferred(job_->isolate());
+ error = handle(*error, job_->isolate());
+ job_->deferred_handles_.push_back(deferred.Detach());
+
+ job_->DoSync<CompileFailed, kUseExistingForegroundTask>(error);
+ }
+
+ break;
+ default:
+ UNREACHABLE();
+ }
+#ifdef DEBUG
+ last_event_ = event;
+#endif
+ }
+
+ private:
+ AsyncCompileJob* job_;
+#ifdef DEBUG
+ base::Optional<CompilationEvent> last_event_;
+#endif
+};
+
// A closure to run a compilation step (either as foreground or background
// task) and schedule the next step(s), if any.
class AsyncCompileJob::CompileStep {
public:
virtual ~CompileStep() = default;
- void Run(bool on_foreground) {
+ void Run(AsyncCompileJob* job, bool on_foreground) {
if (on_foreground) {
- HandleScope scope(job_->isolate_);
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->native_context_);
- RunInForeground();
+ HandleScope scope(job->isolate_);
+ SaveContext saved_context(job->isolate_);
+ job->isolate_->set_context(*job->native_context_);
+ RunInForeground(job);
} else {
- RunInBackground();
+ RunInBackground(job);
}
}
- virtual void RunInForeground() { UNREACHABLE(); }
- virtual void RunInBackground() { UNREACHABLE(); }
-
- AsyncCompileJob* job_ = nullptr;
+ virtual void RunInForeground(AsyncCompileJob*) { UNREACHABLE(); }
+ virtual void RunInBackground(AsyncCompileJob*) { UNREACHABLE(); }
};
class AsyncCompileJob::CompileTask : public CancelableTask {
@@ -2342,7 +1127,7 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
void RunInternal() final {
if (!job_) return;
if (on_foreground_) ResetPendingForegroundTask();
- job_->step_->Run(on_foreground_);
+ job_->step_->Run(job_, on_foreground_);
// After execution, reset {job_} such that we don't try to reset the pending
// foreground task when the task is deleted.
job_ = nullptr;
@@ -2398,9 +1183,12 @@ void AsyncCompileJob::StartBackgroundTask() {
}
}
-template <typename Step, typename... Args>
+template <typename Step,
+ AsyncCompileJob::UseExistingForegroundTask use_existing_fg_task,
+ typename... Args>
void AsyncCompileJob::DoSync(Args&&... args) {
NextStep<Step>(std::forward<Args>(args)...);
+ if (use_existing_fg_task && pending_foreground_task_ != nullptr) return;
StartForegroundTask();
}
@@ -2419,7 +1207,6 @@ void AsyncCompileJob::DoAsync(Args&&... args) {
template <typename Step, typename... Args>
void AsyncCompileJob::NextStep(Args&&... args) {
step_.reset(new Step(std::forward<Args>(args)...));
- step_->job_ = this;
}
//==========================================================================
@@ -2427,27 +1214,33 @@ void AsyncCompileJob::NextStep(Args&&... args) {
//==========================================================================
class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
public:
- void RunInBackground() override {
+ explicit DecodeModule(Counters* counters) : counters_(counters) {}
+
+ void RunInBackground(AsyncCompileJob* job) override {
ModuleResult result;
{
DisallowHandleAllocation no_handle;
DisallowHeapAllocation no_allocation;
// Decode the module bytes.
TRACE_COMPILE("(1) Decoding module...\n");
- result =
- DecodeWasmModule(job_->enabled_features_, job_->wire_bytes_.start(),
- job_->wire_bytes_.end(), false, kWasmOrigin,
- job_->async_counters().get(),
- job_->isolate()->wasm_engine()->allocator());
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "AsyncCompileJob::DecodeModule");
+ result = DecodeWasmModule(
+ job->enabled_features_, job->wire_bytes_.start(),
+ job->wire_bytes_.end(), false, kWasmOrigin, counters_,
+ job->isolate()->wasm_engine()->allocator());
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
- job_->DoSync<DecodeFail>(std::move(result));
+ job->DoSync<DecodeFail>(std::move(result).error());
} else {
// Decode passed.
- job_->DoSync<PrepareAndStartCompile>(std::move(result.val), true);
+ job->DoSync<PrepareAndStartCompile>(std::move(result).value(), true);
}
}
+
+ private:
+ Counters* const counters_;
};
//==========================================================================
@@ -2455,16 +1248,17 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
//==========================================================================
class AsyncCompileJob::DecodeFail : public CompileStep {
public:
- explicit DecodeFail(ModuleResult result) : result_(std::move(result)) {}
+ explicit DecodeFail(WasmError error) : error_(std::move(error)) {}
private:
- ModuleResult result_;
- void RunInForeground() override {
+ WasmError error_;
+
+ void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(1b) Decoding failed.\n");
- ErrorThrower thrower(job_->isolate_, "AsyncCompile");
- thrower.CompileFailed("Wasm decoding failed", result_);
+ ErrorThrower thrower(job->isolate_, "AsyncCompile");
+ thrower.CompileFailed("Wasm decoding failed", error_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job_->AsyncCompileFailed(thrower.Reify());
+ return job->AsyncCompileFailed(thrower.Reify());
}
};
@@ -2481,108 +1275,27 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
std::shared_ptr<const WasmModule> module_;
bool start_compilation_;
- void RunInForeground() override {
+ void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
// Make sure all compilation tasks stopped running. Decoding (async step)
// is done.
- job_->background_task_manager_.CancelAndWait();
-
- // Embedder usage count for declared shared memories.
- if (module_->has_shared_memory) {
- job_->isolate_->CountUsage(
- v8::Isolate::UseCounterFeature::kWasmSharedMemory);
- }
+ job->background_task_manager_.CancelAndWait();
- // Create heap objects for script and module bytes to be stored in the
- // module object. Asm.js is not compiled asynchronously.
- const WasmModule* module = module_.get();
- Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_,
- module->source_map_url);
- Handle<ByteArray> asm_js_offset_table;
-
- ModuleEnv env = CreateDefaultModuleEnv(module);
- // TODO(wasm): Improve efficiency of storing module wire bytes. Only store
- // relevant sections, not function bodies
-
- // Create the module object and populate with compiled functions and
- // information needed at instantiation time.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one {WasmModuleObject}. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
- // Create the module object.
- job_->module_object_ = WasmModuleObject::New(
- job_->isolate_, job_->enabled_features_, module_, env,
- {std::move(job_->bytes_copy_), job_->wire_bytes_.length()}, script,
- asm_js_offset_table);
- job_->native_module_ = job_->module_object_->native_module();
+ job->CreateNativeModule(module_);
- {
- DeferredHandleScope deferred(job_->isolate_);
- job_->module_object_ = handle(*job_->module_object_, job_->isolate_);
- job_->deferred_handles_.push_back(deferred.Detach());
- }
size_t num_functions =
- module->functions.size() - module->num_imported_functions;
+ module_->functions.size() - module_->num_imported_functions;
if (num_functions == 0) {
- // Tiering has nothing to do if module is empty.
- job_->tiering_completed_ = true;
-
// Degenerate case of an empty module.
- job_->FinishCompile();
+ job->FinishCompile();
return;
}
- CompilationState* compilation_state =
- job_->native_module_->compilation_state();
- {
- // Instance field {job_} cannot be captured by copy, therefore
- // we need to add a local helper variable {job}. We want to
- // capture the {job} pointer by copy, as it otherwise is dependent
- // on the current step we are in.
- AsyncCompileJob* job = job_;
- compilation_state->SetCallback(
- [job](CompilationEvent event, ErrorThrower* thrower) {
- // Callback is called from a foreground thread.
- switch (event) {
- case CompilationEvent::kFinishedBaselineCompilation:
- if (job->DecrementAndCheckFinisherCount()) {
- SaveContext saved_context(job->isolate());
- job->isolate()->set_context(*job->native_context_);
- job->FinishCompile();
- }
- return;
- case CompilationEvent::kFinishedTopTierCompilation:
- // If a foreground task or a finisher is pending, we rely on
- // FinishModule to remove the job.
- if (job->pending_foreground_task_ ||
- job->outstanding_finishers_.load() > 0) {
- job->tiering_completed_ = true;
- return;
- }
- job->isolate_->wasm_engine()->RemoveCompileJob(job);
- return;
- case CompilationEvent::kFailedCompilation: {
- // Tier-up compilation should not fail if baseline compilation
- // did not fail.
- DCHECK(!job->native_module_->compilation_state()
- ->baseline_compilation_finished());
-
- SaveContext saved_context(job->isolate());
- job->isolate()->set_context(*job->native_context_);
- Handle<Object> error = thrower->Reify();
-
- DeferredHandleScope deferred(job->isolate());
- error = handle(*error, job->isolate());
- job->deferred_handles_.push_back(deferred.Detach());
- job->DoSync<CompileFailed>(error);
- return;
- }
- }
- UNREACHABLE();
- });
- }
+ CompilationStateImpl* compilation_state =
+ Impl(job->native_module_->compilation_state());
+ compilation_state->AddCallback(CompilationStateCallback{job});
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
@@ -2590,9 +1303,10 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// then DoAsync would do the same as NextStep already.
compilation_state->SetNumberOfFunctionsToCompile(
- module->num_declared_functions);
+ module_->num_declared_functions);
// Add compilation units and kick off compilation.
- InitializeCompilationUnits(job_->native_module_);
+ InitializeCompilationUnits(job->native_module_.get(),
+ job->isolate()->wasm_engine());
}
}
};
@@ -2605,92 +1319,70 @@ class AsyncCompileJob::CompileFailed : public CompileStep {
explicit CompileFailed(Handle<Object> error_reason)
: error_reason_(error_reason) {}
- void RunInForeground() override {
+ void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(4b) Compilation Failed...\n");
- return job_->AsyncCompileFailed(error_reason_);
+ return job->AsyncCompileFailed(error_reason_);
}
private:
Handle<Object> error_reason_;
};
-//==========================================================================
-// Step 5 (sync): Compile JS->wasm wrappers.
-//==========================================================================
-class AsyncCompileJob::CompileWrappers : public CompileStep {
+void AsyncCompileJob::CompileWrappers() {
// TODO(wasm): Compile all wrappers here, including the start function wrapper
// and the wrappers for the function table elements.
- void RunInForeground() override {
- TRACE_COMPILE("(5) Compile wrappers...\n");
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(job_->isolate_->heap());
- // Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(job_->isolate_, job_->module_object_);
- job_->DoSync<FinishModule>();
- }
-};
+ TRACE_COMPILE("(5) Compile wrappers...\n");
+ // Compile JS->wasm wrappers for exported functions.
+ CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
+ handle(module_object_->export_wrappers(), isolate_));
+}
-//==========================================================================
-// Step 6 (sync): Finish the module and resolve the promise.
-//==========================================================================
-class AsyncCompileJob::FinishModule : public CompileStep {
- void RunInForeground() override {
- TRACE_COMPILE("(6) Finish module...\n");
- job_->AsyncCompileSucceeded(job_->module_object_);
-
- size_t num_functions = job_->native_module_->num_functions() -
- job_->native_module_->num_imported_functions();
- if (job_->native_module_->compilation_state()->compile_mode() ==
- CompileMode::kRegular ||
- num_functions == 0) {
- // If we do not tier up, the async compile job is done here and
- // can be deleted.
- job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
- return;
- }
- // If background tiering compilation finished before we resolved the
- // promise, switch to patching now. Otherwise, patching will be scheduled
- // by a callback.
- DCHECK_EQ(CompileMode::kTiering,
- job_->native_module_->compilation_state()->compile_mode());
- if (job_->tiering_completed_) {
- job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
- }
+void AsyncCompileJob::FinishModule() {
+ TRACE_COMPILE("(6) Finish module...\n");
+ AsyncCompileSucceeded(module_object_);
+
+ size_t num_functions = native_module_->num_functions() -
+ native_module_->num_imported_functions();
+ auto* compilation_state = Impl(native_module_->compilation_state());
+ if (compilation_state->compile_mode() == CompileMode::kRegular ||
+ num_functions == 0) {
+ // If we do not tier up, the async compile job is done here and
+ // can be deleted.
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ return;
}
-};
+ DCHECK_EQ(CompileMode::kTiering, compilation_state->compile_mode());
+ if (compilation_state->baseline_compilation_finished()) {
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ }
+}
AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
: decoder_(job->enabled_features_),
job_(job),
compilation_unit_builder_(nullptr) {}
-void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
+void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
+ const WasmError& error) {
+ DCHECK(error.has_error());
// Make sure all background tasks stopped executing before we change the state
// of the AsyncCompileJob to DecodeFail.
job_->background_task_manager_.CancelAndWait();
- // Create a ModuleResult from the result we got as parameter. Since there was
- // no error, we don't have to provide a real wasm module to the ModuleResult.
- ModuleResult result(nullptr);
- result.MoveErrorFrom(error);
-
// Check if there is already a CompiledModule, in which case we have to clean
- // up the CompilationState as well.
+ // up the CompilationStateImpl as well.
if (job_->native_module_) {
- job_->native_module_->compilation_state()->Abort();
+ Impl(job_->native_module_->compilation_state())->Abort();
- if (job_->pending_foreground_task_ == nullptr) {
- job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
- } else {
- job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
- }
+ job_->DoSync<AsyncCompileJob::DecodeFail,
+ AsyncCompileJob::kUseExistingForegroundTask>(error);
// Clear the {compilation_unit_builder_} if it exists. This is needed
// because there is a check in the destructor of the
// {CompilationUnitBuilder} that it is empty.
if (compilation_unit_builder_) compilation_unit_builder_->Clear();
} else {
- job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ job_->DoSync<AsyncCompileJob::DecodeFail>(error);
}
}
@@ -2698,11 +1390,11 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process module header...\n");
- decoder_.StartDecoding(job_->async_counters().get(),
+ decoder_.StartDecoding(job_->isolate()->counters(),
job_->isolate()->wasm_engine()->allocator());
decoder_.DecodeModuleHeader(bytes, offset);
if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
return true;
@@ -2734,35 +1426,38 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
constexpr bool verify_functions = false;
decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
return true;
}
// Start the code section.
-bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
- uint32_t offset) {
+bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
+ size_t functions_count, uint32_t offset,
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
TRACE_STREAMING("Start the code section with %zu functions...\n",
functions_count);
if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(functions_count),
offset)) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
// Execute the PrepareAndStartCompile step immediately and not in a separate
// task.
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false);
+ job_->native_module_->compilation_state()->SetWireBytesStorage(
+ std::move(wire_bytes_storage));
- job_->native_module_->compilation_state()->SetNumberOfFunctionsToCompile(
- functions_count);
+ auto* compilation_state = Impl(job_->native_module_->compilation_state());
+ compilation_state->SetNumberOfFunctionsToCompile(functions_count);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
- compilation_unit_builder_.reset(
- new CompilationUnitBuilder(job_->native_module_));
+ compilation_unit_builder_.reset(new CompilationUnitBuilder(
+ job_->native_module_.get(), job_->isolate()->wasm_engine()));
return true;
}
@@ -2775,8 +1470,7 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
next_function_, static_cast<uint32_t>(bytes.length()), offset, false);
uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
- const WasmFunction* func = &decoder_.module()->functions[index];
- compilation_unit_builder_->AddUnit(func, offset, bytes);
+ compilation_unit_builder_->AddUnit(index);
++next_function_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
@@ -2797,32 +1491,35 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
ModuleResult result = decoder_.FinishDecoding(false);
- DCHECK(result.ok());
+ if (result.failed()) {
+ FinishAsyncCompileJobWithError(result.error());
+ return;
+ }
+ // We have to open a HandleScope and prepare the Context for
+ // CreateNativeModule, PrepareRuntimeObjects and FinishCompile as this is a
+ // callback from the embedder.
+ HandleScope scope(job_->isolate_);
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->native_context_);
+
bool needs_finish = job_->DecrementAndCheckFinisherCount();
if (job_->native_module_ == nullptr) {
- // We are processing a WebAssembly module without code section. We need to
- // prepare compilation first before we can finish it.
- // {PrepareAndStartCompile} will call {FinishCompile} by itself if there
- // is no code section.
+ // We are processing a WebAssembly module without code section. Create the
+ // runtime objects now (would otherwise happen in {PrepareAndStartCompile}).
+ job_->CreateNativeModule(std::move(result).value());
DCHECK(needs_finish);
- needs_finish = false;
- job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(result.val,
- true);
}
job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
- job_->native_module_->set_wire_bytes(std::move(bytes));
+ job_->native_module_->SetWireBytes(std::move(bytes));
if (needs_finish) {
- HandleScope scope(job_->isolate_);
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->native_context_);
job_->FinishCompile();
}
}
// Report an error detected in the StreamingDecoder.
-void AsyncStreamingProcessor::OnError(DecodeResult result) {
+void AsyncStreamingProcessor::OnError(const WasmError& error) {
TRACE_STREAMING("Stream error...\n");
- FinishAsyncCompileJobWithError(std::move(result));
+ FinishAsyncCompileJobWithError(error);
}
void AsyncStreamingProcessor::OnAbort() {
@@ -2830,29 +1527,41 @@ void AsyncStreamingProcessor::OnAbort() {
job_->Abort();
}
-void CompilationStateDeleter::operator()(
- CompilationState* compilation_state) const {
- delete compilation_state;
-}
+bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
+ Vector<const uint8_t> wire_bytes) {
+ // DeserializeNativeModule and FinishCompile assume that they are executed in
+ // a HandleScope, and that a context is set on the isolate.
+ HandleScope scope(job_->isolate_);
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->native_context_);
-std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
- Isolate* isolate, const ModuleEnv& env) {
- return std::unique_ptr<CompilationState, CompilationStateDeleter>(
- new CompilationState(isolate, env));
-}
+ MaybeHandle<WasmModuleObject> result =
+ DeserializeNativeModule(job_->isolate_, module_bytes, wire_bytes);
+ if (result.is_null()) return false;
-ModuleEnv* GetModuleEnv(CompilationState* compilation_state) {
- return compilation_state->module_env();
+ job_->module_object_ = result.ToHandleChecked();
+ {
+ DeferredHandleScope deferred(job_->isolate_);
+ job_->module_object_ = handle(*job_->module_object_, job_->isolate_);
+ job_->deferred_handles_.push_back(deferred.Detach());
+ }
+ job_->native_module_ = job_->module_object_->shared_native_module();
+ auto owned_wire_bytes = OwnedVector<uint8_t>::Of(wire_bytes);
+ job_->wire_bytes_ = ModuleWireBytes(owned_wire_bytes.as_vector());
+ job_->native_module_->SetWireBytes(std::move(owned_wire_bytes));
+ job_->FinishCompile();
+ return true;
}
-CompilationState::CompilationState(internal::Isolate* isolate,
- const ModuleEnv& env)
+CompilationStateImpl::CompilationStateImpl(internal::Isolate* isolate,
+ NativeModule* native_module)
: isolate_(isolate),
- wasm_engine_(isolate->wasm_engine()),
- module_env_(env),
- compile_mode_(FLAG_wasm_tier_up && env.module->origin == kWasmOrigin
+ native_module_(native_module),
+ compile_mode_(FLAG_wasm_tier_up &&
+ native_module->module()->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
+ should_log_code_(WasmCode::ShouldBeLogged(isolate)),
max_background_tasks_(std::max(
1, std::min(FLAG_wasm_num_compilation_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {
@@ -2861,36 +1570,41 @@ CompilationState::CompilationState(internal::Isolate* isolate,
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
}
-CompilationState::~CompilationState() {
+CompilationStateImpl::~CompilationStateImpl() {
+ DCHECK(background_task_manager_.canceled());
+ DCHECK(foreground_task_manager_.canceled());
+ CompilationError* error = compile_error_.load(std::memory_order_acquire);
+ if (error != nullptr) delete error;
+}
+
+void CompilationStateImpl::CancelAndWait() {
background_task_manager_.CancelAndWait();
foreground_task_manager_.CancelAndWait();
}
-void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) {
+void CompilationStateImpl::SetNumberOfFunctionsToCompile(size_t num_functions) {
DCHECK(!failed());
- outstanding_units_ = num_functions;
+ base::MutexGuard guard(&mutex_);
+ outstanding_baseline_units_ = num_functions;
if (compile_mode_ == CompileMode::kTiering) {
- outstanding_units_ += num_functions;
- num_tiering_units_ = num_functions;
+ outstanding_tiering_units_ = num_functions;
}
}
-void CompilationState::SetCallback(
- std::function<void(CompilationEvent, ErrorThrower*)> callback) {
- DCHECK_NULL(callback_);
- callback_ = std::move(callback);
+void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
+ callbacks_.emplace_back(std::move(callback));
}
-void CompilationState::AddCompilationUnits(
+void CompilationStateImpl::AddCompilationUnits(
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
{
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (compile_mode_ == CompileMode::kTiering) {
DCHECK_EQ(baseline_units.size(), tiering_units.size());
- DCHECK_EQ(tiering_units.back()->mode(), ExecutionTier::kOptimized);
+ DCHECK_EQ(tiering_units.back()->tier(), ExecutionTier::kOptimized);
tiering_compilation_units_.insert(
tiering_compilation_units_.end(),
std::make_move_iterator(tiering_units.begin()),
@@ -2909,8 +1623,8 @@ void CompilationState::AddCompilationUnits(
}
std::unique_ptr<WasmCompilationUnit>
-CompilationState::GetNextCompilationUnit() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+CompilationStateImpl::GetNextCompilationUnit() {
+ base::MutexGuard guard(&mutex_);
std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
baseline_compilation_units_.empty() ? tiering_compilation_units_
@@ -2925,97 +1639,134 @@ CompilationState::GetNextCompilationUnit() {
return std::unique_ptr<WasmCompilationUnit>();
}
-std::unique_ptr<WasmCompilationUnit> CompilationState::GetNextExecutedUnit() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+std::unique_ptr<WasmCompilationUnit>
+CompilationStateImpl::GetNextExecutedUnit() {
std::vector<std::unique_ptr<WasmCompilationUnit>>& units = finish_units();
+ base::MutexGuard guard(&mutex_);
if (units.empty()) return {};
std::unique_ptr<WasmCompilationUnit> ret = std::move(units.back());
units.pop_back();
return ret;
}
-bool CompilationState::HasCompilationUnitToFinish() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+bool CompilationStateImpl::HasCompilationUnitToFinish() {
return !finish_units().empty();
}
-void CompilationState::OnError(ErrorThrower* thrower) {
- Abort();
- DCHECK(thrower->error());
- NotifyOnEvent(CompilationEvent::kFailedCompilation, thrower);
-}
+void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
+ // This mutex guarantees that events happen in the right order.
+ base::MutexGuard guard(&mutex_);
-void CompilationState::OnFinishedUnit() {
- DCHECK_GT(outstanding_units_, 0);
- --outstanding_units_;
+ if (failed()) return;
- if (outstanding_units_ == 0) {
- background_task_manager_.CancelAndWait();
- baseline_compilation_finished_ = true;
+ // If we are *not* compiling in tiering mode, then all units are counted as
+ // baseline units.
+ bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
+ bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
- DCHECK(compile_mode_ == CompileMode::kRegular ||
- compile_mode_ == CompileMode::kTiering);
- NotifyOnEvent(compile_mode_ == CompileMode::kRegular
- ? CompilationEvent::kFinishedBaselineCompilation
- : CompilationEvent::kFinishedTopTierCompilation,
- nullptr);
+ // Sanity check: If we are not in tiering mode, there cannot be outstanding
+ // tiering units.
+ DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
- } else if (outstanding_units_ == num_tiering_units_) {
- DCHECK_EQ(compile_mode_, CompileMode::kTiering);
- baseline_compilation_finished_ = true;
+ // Bitset of events to deliver.
+ base::EnumSet<CompilationEvent> events;
- // TODO(wasm): For streaming compilation, we want to start top tier
- // compilation before all functions have been compiled with Liftoff, e.g.
- // in the case when all received functions have been compiled with Liftoff
- // and we are waiting for new functions to compile.
+ if (is_tiering_unit) {
+ DCHECK_LT(0, outstanding_tiering_units_);
+ --outstanding_tiering_units_;
+ if (outstanding_tiering_units_ == 0) {
+ // If baseline compilation has not finished yet, then also trigger
+ // {kFinishedBaselineCompilation}.
+ if (outstanding_baseline_units_ > 0) {
+ events.Add(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ events.Add(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ } else {
+ DCHECK_LT(0, outstanding_baseline_units_);
+ --outstanding_baseline_units_;
+ if (outstanding_baseline_units_ == 0) {
+ events.Add(CompilationEvent::kFinishedBaselineCompilation);
+ // If we are not tiering, then we also trigger the "top tier finished"
+ // event when baseline compilation is finished.
+ if (!is_tiering_mode) {
+ events.Add(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ }
+ }
+
+ if (!events.empty()) {
+ auto notify_events = [this, events] {
+ for (auto event : {CompilationEvent::kFinishedBaselineCompilation,
+ CompilationEvent::kFinishedTopTierCompilation}) {
+ if (!events.contains(event)) continue;
+ NotifyOnEvent(event, nullptr);
+ }
+ };
+ foreground_task_runner_->PostTask(
+ MakeCancelableTask(&foreground_task_manager_, notify_events));
+ }
- // If we are in {kRegular} mode, {num_tiering_units_} is 0, therefore
- // this case is already caught by the previous check.
- NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation, nullptr);
- RestartBackgroundTasks();
+ if (should_log_code_ && code != nullptr) {
+ if (log_codes_task_ == nullptr) {
+ auto new_task = base::make_unique<LogCodesTask>(&foreground_task_manager_,
+ this, isolate_);
+ log_codes_task_ = new_task.get();
+ foreground_task_runner_->PostTask(std::move(new_task));
+ }
+ log_codes_task_->AddCode(code);
}
}
-void CompilationState::ScheduleUnitForFinishing(
- std::unique_ptr<WasmCompilationUnit> unit, ExecutionTier mode) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (compile_mode_ == CompileMode::kTiering &&
- mode == ExecutionTier::kOptimized) {
- tiering_finish_units_.push_back(std::move(unit));
- } else {
- baseline_finish_units_.push_back(std::move(unit));
+void CompilationStateImpl::RestartBackgroundCompileTask() {
+ auto task = base::make_unique<BackgroundCompileTask>(
+ &background_task_manager_, native_module_, isolate_->counters());
+
+ // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
+ // tasks. This is used to make timing deterministic.
+ if (FLAG_wasm_num_compilation_tasks == 0) {
+ foreground_task_runner_->PostTask(std::move(task));
+ return;
}
- if (!finisher_is_running_ && !failed_) {
- ScheduleFinisherTask();
- // We set the flag here so that not more than one finisher is started.
- finisher_is_running_ = true;
+ if (baseline_compilation_finished()) {
+ V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
+ std::move(task));
+ } else {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
-void CompilationState::OnBackgroundTaskStopped(const WasmFeatures& detected) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+void CompilationStateImpl::ReportDetectedFeatures(
+ const WasmFeatures& detected) {
+ base::MutexGuard guard(&mutex_);
+ UnionFeaturesInto(&detected_features_, detected);
+}
+
+void CompilationStateImpl::OnBackgroundTaskStopped(
+ const WasmFeatures& detected) {
+ base::MutexGuard guard(&mutex_);
DCHECK_LE(1, num_background_tasks_);
--num_background_tasks_;
UnionFeaturesInto(&detected_features_, detected);
}
-void CompilationState::PublishDetectedFeatures(Isolate* isolate,
- const WasmFeatures& detected) {
+void CompilationStateImpl::PublishDetectedFeatures(
+ Isolate* isolate, const WasmFeatures& detected) {
// Notifying the isolate of the feature counts must take place under
// the mutex, because even if we have finished baseline compilation,
// tiering compilations may still occur in the background.
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
UnionFeaturesInto(&detected_features_, detected);
UpdateFeatureUseCounts(isolate, detected_features_);
}
-void CompilationState::RestartBackgroundTasks(size_t max) {
+void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
size_t num_restart;
{
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
// No need to restart tasks if compilation already failed.
- if (failed_) return;
+ if (failed()) return;
DCHECK_LE(num_background_tasks_, max_background_tasks_);
if (num_background_tasks_ == max_background_tasks_) return;
@@ -3027,51 +1778,77 @@ void CompilationState::RestartBackgroundTasks(size_t max) {
}
for (; num_restart > 0; --num_restart) {
- auto task = base::make_unique<BackgroundCompileTask>(
- this, &background_task_manager_);
-
- // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
- // tasks. This is used to make timing deterministic.
- if (FLAG_wasm_num_compilation_tasks > 0) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- } else {
- foreground_task_runner_->PostTask(std::move(task));
- }
+ RestartBackgroundCompileTask();
}
}
-bool CompilationState::SetFinisherIsRunning(bool value) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+bool CompilationStateImpl::SetFinisherIsRunning(bool value) {
+ base::MutexGuard guard(&mutex_);
if (finisher_is_running_ == value) return false;
finisher_is_running_ = value;
return true;
}
-void CompilationState::ScheduleFinisherTask() {
+void CompilationStateImpl::ScheduleFinisherTask() {
foreground_task_runner_->PostTask(
base::make_unique<FinishCompileTask>(this, &foreground_task_manager_));
}
-void CompilationState::Abort() {
- {
- base::LockGuard<base::Mutex> guard(&mutex_);
- failed_ = true;
- }
+void CompilationStateImpl::Abort() {
+ SetError(0, WasmError{0, "Compilation aborted"});
background_task_manager_.CancelAndWait();
+ // No more callbacks after abort. Don't free the std::function objects here,
+ // since this might clear references in the embedder, which is only allowed on
+ // the main thread.
+ aborted_.store(true);
+ if (!callbacks_.empty()) {
+ foreground_task_runner_->PostTask(
+ base::make_unique<FreeCallbacksTask>(this));
+ }
+}
+
+void CompilationStateImpl::SetError(uint32_t func_index,
+ const WasmError& error) {
+ DCHECK(error.has_error());
+ std::unique_ptr<CompilationError> compile_error =
+ base::make_unique<CompilationError>(func_index, error);
+ CompilationError* expected = nullptr;
+ bool set = compile_error_.compare_exchange_strong(
+ expected, compile_error.get(), std::memory_order_acq_rel);
+ // Ignore all but the first error. If the previous value is not nullptr, just
+ // return (and free the allocated error).
+ if (!set) return;
+ // If set successfully, give up ownership.
+ compile_error.release();
+ // Schedule a foreground task to call the callback and notify users about the
+ // compile error.
+ foreground_task_runner_->PostTask(
+ MakeCancelableTask(&foreground_task_manager_, [this] {
+ WasmError error = GetCompileError();
+ NotifyOnEvent(CompilationEvent::kFailedCompilation, &error);
+ }));
}
-void CompilationState::NotifyOnEvent(CompilationEvent event,
- ErrorThrower* thrower) {
- if (callback_) callback_(event, thrower);
+void CompilationStateImpl::NotifyOnEvent(CompilationEvent event,
+ const WasmError* error) {
+ if (aborted_.load()) return;
+ HandleScope scope(isolate_);
+ for (auto& callback : callbacks_) callback(event, error);
+ // If no more events are expected after this one, clear the callbacks to free
+ // memory. We can safely do this here, as this method is only called from
+ // foreground tasks.
+ if (event >= CompilationEvent::kFirstFinalEvent) callbacks_.clear();
}
-void CompileJsToWasmWrappers(Isolate* isolate,
- Handle<WasmModuleObject> module_object) {
+void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
+ Handle<FixedArray> export_wrappers) {
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate);
- NativeModule* native_module = module_object->native_module();
- const WasmModule* module = native_module->module();
+
+ // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
+ // optimization we keep the code space unlocked to avoid repeated unlocking
+ // because many such wrapper are allocated in sequence below.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = module->functions[exp.index];
@@ -3101,8 +1878,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), TENURED);
script->set_name(*name_str.ToHandleChecked());
if (source_map_url.size() != 0) {
@@ -3117,7 +1893,6 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace internal
} // namespace v8
-#undef TRACE
#undef TRACE_COMPILE
#undef TRACE_STREAMING
#undef TRACE_LAZY
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index f108a5f939..7f860ac036 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -11,6 +11,7 @@
#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
@@ -28,39 +29,27 @@ class Vector;
namespace wasm {
+struct CompilationEnv;
class CompilationResultResolver;
-class CompilationState;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
class WasmCode;
-struct ModuleEnv;
struct WasmModule;
-struct CompilationStateDeleter {
- void operator()(CompilationState* compilation_state) const;
-};
-
-// Wrapper to create a CompilationState exists in order to avoid having
-// the CompilationState in the header file.
-std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
- Isolate* isolate, const ModuleEnv& env);
-
-ModuleEnv* GetModuleEnv(CompilationState* compilation_state);
-
-MaybeHandle<WasmModuleObject> CompileToModuleObject(
+std::unique_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+ Handle<FixedArray>* export_wrappers_out);
-MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory);
+void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* wasm_module,
+ NativeModule* native_module);
V8_EXPORT_PRIVATE
-void CompileJsToWasmWrappers(Isolate* isolate,
- Handle<WasmModuleObject> module_object);
+void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
+ Handle<FixedArray> export_wrappers);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes,
@@ -97,19 +86,25 @@ class AsyncCompileJob {
private:
class CompileTask;
class CompileStep;
+ class CompilationStateCallback;
// States of the AsyncCompileJob.
class DecodeModule; // Step 1 (async)
class DecodeFail; // Step 1b (sync)
class PrepareAndStartCompile; // Step 2 (sync)
class CompileFailed; // Step 4b (sync)
- class CompileWrappers; // Step 5 (sync)
- class FinishModule; // Step 6 (sync)
- const std::shared_ptr<Counters>& async_counters() const {
- return async_counters_;
+ friend class AsyncStreamingProcessor;
+
+ // Decrements the number of outstanding finishers. The last caller of this
+ // function should finish the asynchronous compilation, see the comment on
+ // {outstanding_finishers_}.
+ V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
+ return outstanding_finishers_.fetch_sub(1) == 1;
}
- Counters* counters() const { return async_counters().get(); }
+
+ void CreateNativeModule(std::shared_ptr<const WasmModule> module);
+ void PrepareRuntimeObjects();
void FinishCompile();
@@ -117,14 +112,26 @@ class AsyncCompileJob {
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
+ void CompileWrappers();
+
+ void FinishModule();
+
void StartForegroundTask();
void ExecuteForegroundTaskImmediately();
void StartBackgroundTask();
+ enum UseExistingForegroundTask : bool {
+ kUseExistingForegroundTask = true,
+ kAssertNoExistingForegroundTask = false
+ };
// Switches to the compilation step {Step} and starts a foreground task to
- // execute it.
- template <typename Step, typename... Args>
+ // execute it. Most of the time we know that there cannot be a running
+ // foreground task. If there might be one, then pass
+ // kUseExistingForegroundTask to avoid spawning a second one.
+ template <typename Step,
+ UseExistingForegroundTask = kAssertNoExistingForegroundTask,
+ typename... Args>
void DoSync(Args&&... args);
// Switches to the compilation step {Step} and immediately executes that step.
@@ -141,11 +148,8 @@ class AsyncCompileJob {
template <typename Step, typename... Args>
void NextStep(Args&&... args);
- friend class AsyncStreamingProcessor;
-
- Isolate* isolate_;
+ Isolate* const isolate_;
const WasmFeatures enabled_features_;
- const std::shared_ptr<Counters> async_counters_;
// Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
std::unique_ptr<byte[]> bytes_copy_;
@@ -153,11 +157,11 @@ class AsyncCompileJob {
// {native_module_}).
ModuleWireBytes wire_bytes_;
Handle<Context> native_context_;
- std::shared_ptr<CompilationResultResolver> resolver_;
+ const std::shared_ptr<CompilationResultResolver> resolver_;
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
- NativeModule* native_module_ = nullptr;
+ std::shared_ptr<NativeModule> native_module_;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
@@ -169,13 +173,6 @@ class AsyncCompileJob {
// compilation can be finished.
std::atomic<int32_t> outstanding_finishers_{1};
- // Decrements the number of outstanding finishers. The last caller of this
- // function should finish the asynchronous compilation, see the comment on
- // {outstanding_finishers_}.
- V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount() {
- return outstanding_finishers_.fetch_sub(1) == 1;
- }
-
// A reference to a pending foreground task, or {nullptr} if none is pending.
CompileTask* pending_foreground_task_ = nullptr;
@@ -184,9 +181,8 @@ class AsyncCompileJob {
// compilation. The AsyncCompileJob does not actively use the
// StreamingDecoder.
std::shared_ptr<StreamingDecoder> stream_;
-
- bool tiering_completed_ = false;
};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 6b0a3d6485..25a6633178 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -9,7 +9,6 @@
#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/flags.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/v8.h"
@@ -31,8 +30,6 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
-constexpr char kExceptionString[] = "exception";
-constexpr char kUnknownString[] = "<unknown>";
template <size_t N>
constexpr size_t num_chars(const char (&)[N]) {
@@ -83,14 +80,16 @@ const char* SectionName(SectionCode code) {
return "Element";
case kDataSectionCode:
return "Data";
+ case kExceptionSectionCode:
+ return "Exception";
+ case kDataCountSectionCode:
+ return "DataCount";
case kNameSectionCode:
return kNameString;
case kSourceMappingURLSectionCode:
return kSourceMappingURLString;
- case kExceptionSectionCode:
- return kExceptionString;
default:
- return kUnknownString;
+ return "<unknown>";
}
}
@@ -333,6 +332,30 @@ class ModuleDecoderImpl : public Decoder {
#undef BYTES
}
+ bool CheckSectionOrder(SectionCode section_code,
+ SectionCode prev_section_code,
+ SectionCode next_section_code) {
+ if (next_ordered_section_ > next_section_code) {
+ errorf(pc(), "The %s section must appear before the %s section",
+ SectionName(section_code), SectionName(next_section_code));
+ return false;
+ }
+ if (next_ordered_section_ <= prev_section_code) {
+ next_ordered_section_ = prev_section_code + 1;
+ }
+ return true;
+ }
+
+ bool CheckUnorderedSection(SectionCode section_code) {
+ if (has_seen_unordered_section(section_code)) {
+ errorf(pc(), "Multiple %s sections not allowed",
+ SectionName(section_code));
+ return false;
+ }
+ set_seen_unordered_section(section_code);
+ return true;
+ }
+
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
if (failed()) return;
@@ -351,20 +374,17 @@ class ModuleDecoderImpl : public Decoder {
switch (section_code) {
case kUnknownSectionCode:
break;
- case kExceptionSectionCode:
- // Note: kExceptionSectionCode > kExportSectionCode, but must appear
- // before the export (and code) section, as well as after the import
- // section. Hence, treat it as a special case.
- if (seen_unordered_sections_ & (1 << kExceptionSectionCode)) {
- errorf(pc(), "Multiple exception sections not allowed");
+ case kDataCountSectionCode:
+ if (!CheckUnorderedSection(section_code)) return;
+ if (!CheckSectionOrder(section_code, kElementSectionCode,
+ kCodeSectionCode))
return;
- } else if (next_ordered_section_ > kExportSectionCode) {
- errorf(pc(), "Exception section must appear before export section");
+ break;
+ case kExceptionSectionCode:
+ if (!CheckUnorderedSection(section_code)) return;
+ if (!CheckSectionOrder(section_code, kGlobalSectionCode,
+ kExportSectionCode))
return;
- } else if (next_ordered_section_ < kImportSectionCode) {
- next_ordered_section_ = kImportSectionCode + 1;
- }
- seen_unordered_sections_ |= 1 << kExceptionSectionCode;
break;
case kSourceMappingURLSectionCode:
// sourceMappingURL is a custom section and currently can occur anywhere
@@ -422,6 +442,13 @@ class ModuleDecoderImpl : public Decoder {
case kSourceMappingURLSectionCode:
DecodeSourceMappingURLSection();
break;
+ case kDataCountSectionCode:
+ if (enabled_features_.bulk_memory) {
+ DecodeDataCountSection();
+ } else {
+ errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ }
+ break;
case kExceptionSectionCode:
if (enabled_features_.eh) {
DecodeExceptionSection();
@@ -520,7 +547,7 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
- "memory", "pages", FLAG_wasm_max_mem_pages,
+ "memory", "pages", kSpecMaxWasmMemoryPages,
&module_->initial_pages, &module_->has_maximum_pages,
kSpecMaxWasmMemoryPages, &module_->maximum_pages, flags);
break;
@@ -534,11 +561,7 @@ class ModuleDecoderImpl : public Decoder {
global->type = consume_value_type();
global->mutability = consume_mutability();
if (global->mutability) {
- if (enabled_features_.mut_global) {
- module_->num_imported_mutable_globals++;
- } else {
- error("mutable globals cannot be imported");
- }
+ module_->num_imported_mutable_globals++;
}
break;
}
@@ -549,8 +572,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
import->index = static_cast<uint32_t>(module_->exceptions.size());
- module_->exceptions.emplace_back(
- consume_exception_sig(module_->signature_zone.get()));
+ WasmExceptionSig* exception_sig = nullptr;
+ consume_exception_attribute(); // Attribute ignored for now.
+ consume_exception_sig_index(module_.get(), &exception_sig);
+ module_->exceptions.emplace_back(exception_sig);
break;
}
default:
@@ -612,7 +637,7 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
- "memory", "pages", FLAG_wasm_max_mem_pages, &module_->initial_pages,
+ "memory", "pages", kSpecMaxWasmMemoryPages, &module_->initial_pages,
&module_->has_maximum_pages, kSpecMaxWasmMemoryPages,
&module_->maximum_pages, flags);
}
@@ -680,9 +705,6 @@ class ModuleDecoderImpl : public Decoder {
WasmGlobal* global = nullptr;
exp->index = consume_global_index(module_.get(), &global);
if (global) {
- if (!enabled_features_.mut_global && global->mutability) {
- error("mutable globals cannot be exported");
- }
global->exported = true;
}
break;
@@ -751,24 +773,35 @@ class ModuleDecoderImpl : public Decoder {
}
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
- uint32_t table_index = consume_u32v("table index");
- if (!enabled_features_.anyref && table_index != 0) {
- errorf(pos, "illegal table index %u != 0", table_index);
- }
- if (table_index >= module_->tables.size()) {
- errorf(pos, "out of bounds table index %u", table_index);
- break;
- }
- if (module_->tables[table_index].type != kWasmAnyFunc) {
- errorf(pos, "Invalid element segment. Table %u is not of type AnyFunc",
- table_index);
- break;
+
+ bool is_active;
+ uint32_t table_index;
+ WasmInitExpr offset;
+ consume_segment_header("table index", &is_active, &table_index, &offset);
+ if (failed()) return;
+
+ if (is_active) {
+ if (table_index >= module_->tables.size()) {
+ errorf(pos, "out of bounds table index %u", table_index);
+ break;
+ }
+ if (module_->tables[table_index].type != kWasmAnyFunc) {
+ errorf(pos,
+ "Invalid element segment. Table %u is not of type AnyFunc",
+ table_index);
+ break;
+ }
}
- WasmInitExpr offset = consume_init_expr(module_.get(), kWasmI32);
+
uint32_t num_elem =
consume_count("number of elements", kV8MaxWasmTableEntries);
- module_->table_inits.emplace_back(table_index, offset);
- WasmTableInit* init = &module_->table_inits.back();
+ if (is_active) {
+ module_->elem_segments.emplace_back(table_index, offset);
+ } else {
+ module_->elem_segments.emplace_back();
+ }
+
+ WasmElemSegment* init = &module_->elem_segments.back();
for (uint32_t j = 0; j < num_elem; j++) {
WasmFunction* func = nullptr;
uint32_t index = consume_func_index(module_.get(), &func);
@@ -822,31 +855,66 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ bool CheckDataSegmentsCount(uint32_t data_segments_count) {
+ if (has_seen_unordered_section(kDataCountSectionCode) &&
+ data_segments_count != module_->num_declared_data_segments) {
+ errorf(pc(), "data segments count %u mismatch (%u expected)",
+ data_segments_count, module_->num_declared_data_segments);
+ return false;
+ }
+ return true;
+ }
+
void DecodeDataSection() {
uint32_t data_segments_count =
consume_count("data segments count", kV8MaxWasmDataSegments);
+ if (!CheckDataSegmentsCount(data_segments_count)) return;
+
module_->data_segments.reserve(data_segments_count);
for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+ const byte* pos = pc();
if (!module_->has_memory) {
error("cannot load data without memory");
break;
}
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module_->data_segments.push_back({
- WasmInitExpr(), // dest_addr
- {0, 0} // source
- });
+
+ bool is_active;
+ uint32_t memory_index;
+ WasmInitExpr dest_addr;
+ consume_segment_header("memory index", &is_active, &memory_index,
+ &dest_addr);
+ if (failed()) break;
+
+ if (is_active && memory_index != 0) {
+ errorf(pos, "illegal memory index %u != 0", memory_index);
+ break;
+ }
+
+ uint32_t source_length = consume_u32v("source size");
+ uint32_t source_offset = pc_offset();
+
+ if (is_active) {
+ module_->data_segments.emplace_back(dest_addr);
+ } else {
+ module_->data_segments.emplace_back();
+ }
+
WasmDataSegment* segment = &module_->data_segments.back();
- DecodeDataSegmentInModule(module_.get(), segment);
+
+ consume_bytes(source_length, "segment data");
+ if (failed()) break;
+
+ segment->source = {source_offset, source_length};
}
}
void DecodeNameSection() {
// TODO(titzer): find a way to report name errors as warnings.
// ignore all but the first occurrence of name section.
- if (!(seen_unordered_sections_ & (1 << kNameSectionCode))) {
- seen_unordered_sections_ |= 1 << kNameSectionCode;
+ if (!has_seen_unordered_section(kNameSectionCode)) {
+ set_seen_unordered_section(kNameSectionCode);
// Use an inner decoder so that errors don't fail the outer decoder.
Decoder inner(start_, pc_, end_, buffer_offset_);
// Decode all name subsections.
@@ -876,35 +944,65 @@ class ModuleDecoderImpl : public Decoder {
Decoder inner(start_, pc_, end_, buffer_offset_);
WireBytesRef url = wasm::consume_string(inner, true, "module name");
if (inner.ok() &&
- !(seen_unordered_sections_ & (1 << kSourceMappingURLSectionCode))) {
+ !has_seen_unordered_section(kSourceMappingURLSectionCode)) {
const byte* url_start =
inner.start() + inner.GetBufferRelativeOffset(url.offset());
module_->source_map_url.assign(reinterpret_cast<const char*>(url_start),
url.length());
- seen_unordered_sections_ |= 1 << kSourceMappingURLSectionCode;
+ set_seen_unordered_section(kSourceMappingURLSectionCode);
}
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeDataCountSection() {
+ module_->num_declared_data_segments =
+ consume_count("data segments count", kV8MaxWasmDataSegments);
+ }
+
void DecodeExceptionSection() {
uint32_t exception_count =
consume_count("exception count", kV8MaxWasmExceptions);
for (uint32_t i = 0; ok() && i < exception_count; ++i) {
- TRACE("DecodeExceptionSignature[%d] module+%d\n", i,
+ TRACE("DecodeException[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module_->exceptions.emplace_back(
- consume_exception_sig(module_->signature_zone.get()));
+ WasmExceptionSig* exception_sig = nullptr;
+ consume_exception_attribute(); // Attribute ignored for now.
+ consume_exception_sig_index(module_.get(), &exception_sig);
+ module_->exceptions.emplace_back(exception_sig);
+ }
+ }
+
+ bool CheckMismatchedCounts() {
+ // The declared vs. defined function count is normally checked when
+ // decoding the code section, but we have to check it here too in case the
+ // code section is absent.
+ if (module_->num_declared_functions != 0) {
+ DCHECK_LT(module_->num_imported_functions, module_->functions.size());
+ // We know that the code section has been decoded if the first
+ // non-imported function has its code set.
+ if (!module_->functions[module_->num_imported_functions].code.is_set()) {
+ errorf(pc(), "function count is %u, but code section is absent",
+ module_->num_declared_functions);
+ return false;
+ }
+ }
+ // Perform a similar check for the DataCount and Data sections, where data
+ // segments are declared but the Data section is absent.
+ if (!CheckDataSegmentsCount(
+ static_cast<uint32_t>(module_->data_segments.size()))) {
+ return false;
}
+ return true;
}
ModuleResult FinishDecoding(bool verify_functions = true) {
- if (ok()) {
+ if (ok() && CheckMismatchedCounts()) {
CalculateGlobalOffsets(module_.get());
}
ModuleResult result = toResult(std::move(module_));
- if (verify_functions && result.ok()) {
- // Copy error code and location.
- result.MoveErrorFrom(intermediate_result_);
+ if (verify_functions && result.ok() && intermediate_error_.has_error()) {
+ // Copy error message and location.
+ return ModuleResult{std::move(intermediate_error_)};
}
return result;
}
@@ -915,7 +1013,7 @@ class ModuleDecoderImpl : public Decoder {
StartDecoding(counters, allocator);
uint32_t offset = 0;
Vector<const byte> orig_bytes(start(), end() - start());
- DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
+ DecodeModuleHeader(VectorOf(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
}
@@ -959,10 +1057,11 @@ class ModuleDecoderImpl : public Decoder {
VerifyFunctionBody(zone->allocator(), 0, wire_bytes, module,
function.get());
- FunctionResult result(std::move(function));
- // Copy error code and location.
- result.MoveErrorFrom(intermediate_result_);
- return result;
+ if (intermediate_error_.has_error()) {
+ return FunctionResult{std::move(intermediate_error_)};
+ }
+
+ return FunctionResult(std::move(function));
}
// Decodes a single function signature at {start}.
@@ -1006,9 +1105,17 @@ class ModuleDecoderImpl : public Decoder {
sizeof(ModuleDecoderImpl::seen_unordered_sections_) >
kLastKnownModuleSection,
"not enough bits");
- Result<bool> intermediate_result_;
+ WasmError intermediate_error_;
ModuleOrigin origin_;
+ bool has_seen_unordered_section(SectionCode section_code) {
+ return seen_unordered_sections_ & (1 << section_code);
+ }
+
+ void set_seen_unordered_section(SectionCode section_code) {
+ seen_unordered_sections_ |= 1 << section_code;
+ }
+
uint32_t off(const byte* ptr) {
return static_cast<uint32_t>(ptr - start_) + buffer_offset_;
}
@@ -1064,38 +1171,29 @@ class ModuleDecoderImpl : public Decoder {
}
// Decodes a single data segment entry inside a module starting at {pc_}.
- void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
- expect_u8("linear memory index", 0);
- segment->dest_addr = consume_init_expr(module, kWasmI32);
- uint32_t source_length = consume_u32v("source size");
- uint32_t source_offset = pc_offset();
-
- consume_bytes(source_length, "segment data");
- if (failed()) return;
-
- segment->source = {source_offset, source_length};
- }
// Calculate individual global offsets and total size of globals table.
void CalculateGlobalOffsets(WasmModule* module) {
- uint32_t offset = 0;
+ uint32_t untagged_offset = 0;
+ uint32_t tagged_offset = 0;
uint32_t num_imported_mutable_globals = 0;
- if (module->globals.size() == 0) {
- module->globals_buffer_size = 0;
- return;
- }
for (WasmGlobal& global : module->globals) {
- byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
if (global.mutability && global.imported) {
- DCHECK(enabled_features_.mut_global);
global.index = num_imported_mutable_globals++;
+ } else if (global.type == ValueType::kWasmAnyRef) {
+ global.offset = tagged_offset;
+ // All entries in the tagged_globals_buffer have size 1.
+ tagged_offset++;
} else {
- offset = (offset + size - 1) & ~(size - 1); // align
- global.offset = offset;
- offset += size;
+ byte size =
+ ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
+ untagged_offset = (untagged_offset + size - 1) & ~(size - 1); // align
+ global.offset = untagged_offset;
+ untagged_offset += size;
}
}
- module->globals_buffer_size = offset;
+ module->untagged_globals_buffer_size = untagged_offset;
+ module->tagged_globals_buffer_size = tagged_offset;
}
// Verifies the body (code) of a given function.
@@ -1124,16 +1222,14 @@ class ModuleDecoderImpl : public Decoder {
&unused_detected_features, body);
}
- if (result.failed()) {
+ // If the decode failed and this is the first error, set error code and
+ // location.
+ if (result.failed() && intermediate_error_.empty()) {
// Wrap the error message from the function decoder.
- std::ostringstream wrapped;
- wrapped << "in function " << func_name << ": " << result.error_msg();
- result.error(result.error_offset(), wrapped.str());
-
- // Set error code and location, if this is the first error.
- if (intermediate_result_.ok()) {
- intermediate_result_.MoveErrorFrom(result);
- }
+ std::ostringstream error_msg;
+ error_msg << "in function " << func_name << ": "
+ << result.error().message();
+ intermediate_error_ = WasmError{result.error().offset(), error_msg.str()};
}
}
@@ -1150,6 +1246,17 @@ class ModuleDecoderImpl : public Decoder {
return sig_index;
}
+ uint32_t consume_exception_sig_index(WasmModule* module, FunctionSig** sig) {
+ const byte* pos = pc_;
+ uint32_t sig_index = consume_sig_index(module, sig);
+ if (*sig && (*sig)->return_count() != 0) {
+ errorf(pos, "exception signature %u has non-void return", sig_index);
+ *sig = nullptr;
+ return 0;
+ }
+ return sig_index;
+ }
+
uint32_t consume_count(const char* name, size_t maximum) {
const byte* p = pc_;
uint32_t count = consume_u32v(name);
@@ -1268,7 +1375,7 @@ class ModuleDecoderImpl : public Decoder {
const byte* pos = pc();
uint8_t opcode = consume_u8("opcode");
WasmInitExpr expr;
- unsigned len = 0;
+ uint32_t len = 0;
switch (opcode) {
case kExprGetGlobal: {
GlobalIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
@@ -1408,19 +1515,7 @@ class ModuleDecoderImpl : public Decoder {
}
FunctionSig* consume_sig(Zone* zone) {
- constexpr bool has_return_values = true;
- return consume_sig_internal(zone, has_return_values);
- }
-
- WasmExceptionSig* consume_exception_sig(Zone* zone) {
- constexpr bool has_return_values = true;
- return consume_sig_internal(zone, !has_return_values);
- }
-
- private:
- FunctionSig* consume_sig_internal(Zone* zone, bool has_return_values) {
- if (has_return_values && !expect_u8("type form", kWasmFunctionTypeCode))
- return nullptr;
+ if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
// parse parameter types
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
@@ -1431,18 +1526,15 @@ class ModuleDecoderImpl : public Decoder {
params.push_back(param);
}
std::vector<ValueType> returns;
- uint32_t return_count = 0;
- if (has_return_values) {
- // parse return types
- const size_t max_return_count = enabled_features_.mv
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
- return_count = consume_count("return count", max_return_count);
- if (failed()) return nullptr;
- for (uint32_t i = 0; ok() && i < return_count; ++i) {
- ValueType ret = consume_value_type();
- returns.push_back(ret);
- }
+ // parse return types
+ const size_t max_return_count = enabled_features_.mv
+ ? kV8MaxWasmFunctionMultiReturns
+ : kV8MaxWasmFunctionReturns;
+ uint32_t return_count = consume_count("return count", max_return_count);
+ if (failed()) return nullptr;
+ for (uint32_t i = 0; ok() && i < return_count; ++i) {
+ ValueType ret = consume_value_type();
+ returns.push_back(ret);
}
if (failed()) return nullptr;
@@ -1455,6 +1547,66 @@ class ModuleDecoderImpl : public Decoder {
return new (zone) FunctionSig(return_count, param_count, buffer);
}
+
+ // Consume the attribute field of an exception.
+ uint32_t consume_exception_attribute() {
+ const byte* pos = pc_;
+ uint32_t attribute = consume_u32v("exception attribute");
+ if (attribute != kExceptionAttribute) {
+ errorf(pos, "exception attribute %u not supported", attribute);
+ return 0;
+ }
+ return attribute;
+ }
+
+ void consume_segment_header(const char* name, bool* is_active,
+ uint32_t* index, WasmInitExpr* offset) {
+ const byte* pos = pc();
+ // In the MVP, this is a table or memory index field that must be 0, but
+ // we've repurposed it as a flags field in the bulk memory proposal.
+ uint32_t flags;
+ if (enabled_features_.bulk_memory) {
+ flags = consume_u32v("flags");
+ if (failed()) return;
+ } else {
+ flags = consume_u32v(name);
+ if (failed()) return;
+
+ if (flags != 0) {
+ errorf(pos, "illegal %s %u != 0", name, flags);
+ return;
+ }
+ }
+
+ bool read_index;
+ bool read_offset;
+ if (flags == SegmentFlags::kActiveNoIndex) {
+ *is_active = true;
+ read_index = false;
+ read_offset = true;
+ } else if (flags == SegmentFlags::kPassive) {
+ *is_active = false;
+ read_index = false;
+ read_offset = false;
+ } else if (flags == SegmentFlags::kActiveWithIndex) {
+ *is_active = true;
+ read_index = true;
+ read_offset = true;
+ } else {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flags);
+ return;
+ }
+
+ if (read_index) {
+ *index = consume_u32v(name);
+ } else {
+ *index = 0;
+ }
+
+ if (read_offset) {
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ }
+ }
};
ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
@@ -1466,9 +1618,11 @@ ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
SELECT_WASM_COUNTER(counters, origin, wasm_decode, module_time);
TimedHistogramScope wasm_decode_module_time_scope(counter);
size_t size = module_end - module_start;
- if (module_start > module_end) return ModuleResult::Error("start > end");
- if (size >= kV8MaxWasmModuleSize)
- return ModuleResult::Error("size > maximum module size: %zu", size);
+ CHECK_LE(module_start, module_end);
+ if (size >= kV8MaxWasmModuleSize) {
+ return ModuleResult{WasmError{0, "size > maximum module size (%zu): %zu",
+ kV8MaxWasmModuleSize, size}};
+ }
// TODO(bradnelson): Improve histogram handling of size_t.
auto size_counter =
SELECT_WASM_COUNTER(counters, origin, wasm, module_size_bytes);
@@ -1486,7 +1640,7 @@ ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
auto peak_counter = SELECT_WASM_COUNTER(counters, origin, wasm_decode,
module_peak_memory_bytes);
peak_counter->AddSample(
- static_cast<int>(result.val->signature_zone->allocation_size()));
+ static_cast<int>(result.value()->signature_zone->allocation_size()));
}
return result;
}
@@ -1580,14 +1734,16 @@ FunctionResult DecodeWasmFunctionForTesting(
const WasmModule* module, const byte* function_start,
const byte* function_end, Counters* counters) {
size_t size = function_end - function_start;
- if (function_start > function_end)
- return FunctionResult::Error("start > end");
+ CHECK_LE(function_start, function_end);
auto size_histogram = SELECT_WASM_COUNTER(counters, module->origin, wasm,
function_size_bytes);
// TODO(bradnelson): Improve histogram handling of ptrdiff_t.
size_histogram->AddSample(static_cast<int>(size));
- if (size > kV8MaxWasmFunctionSize)
- return FunctionResult::Error("size > maximum function size: %zu", size);
+ if (size > kV8MaxWasmFunctionSize) {
+ return FunctionResult{WasmError{0,
+ "size > maximum function size (%zu): %zu",
+ kV8MaxWasmFunctionSize, size}};
+ }
ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
@@ -1601,7 +1757,7 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
Decoder decoder(tables_start, tables_end);
uint32_t functions_count = decoder.consume_u32v("functions count");
// Reserve space for the entries, taking care of invalid input.
- if (functions_count < static_cast<unsigned>(tables_end - tables_start)) {
+ if (functions_count < static_cast<uint32_t>(tables_end - tables_start)) {
table.reserve(functions_count);
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index f190a12844..95c449640c 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -19,7 +19,7 @@ class Counters;
namespace wasm {
-struct ModuleEnv;
+struct CompilationEnv;
inline bool IsValidSectionCode(uint8_t byte) {
return kTypeSectionCode <= byte && byte <= kLastKnownModuleSection;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
new file mode 100644
index 0000000000..04c0f3cf44
--- /dev/null
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -0,0 +1,1537 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/module-instantiate.h"
+#include "src/asmjs/asm-js.h"
+#include "src/property-descriptor.h"
+#include "src/utils.h"
+#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
+}
+} // namespace
+
+// A helper class to simplify instantiating a module from a module object.
+// It closes over the {Isolate}, the {ErrorThrower}, etc.
+class InstanceBuilder {
+ public:
+ InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory);
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<WasmInstanceObject> Build();
+ // Run the start function, if any.
+ bool ExecuteStartFunction();
+
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ size_t table_size;
+ };
+
+ // A pre-evaluated value to use in import binding.
+ struct SanitizedImport {
+ Handle<String> module_name;
+ Handle<String> import_name;
+ Handle<Object> value;
+ };
+
+ Isolate* isolate_;
+ const WasmFeatures enabled_;
+ const WasmModule* const module_;
+ ErrorThrower* thrower_;
+ Handle<WasmModuleObject> module_object_;
+ MaybeHandle<JSReceiver> ffi_;
+ MaybeHandle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> untagged_globals_;
+ Handle<FixedArray> tagged_globals_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+ std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
+ Handle<WasmExportedFunction> start_function_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
+ std::vector<SanitizedImport> sanitized_imports_;
+
+ UseTrapHandler use_trap_handler() const {
+ return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
+ : kNoTrapHandler;
+ }
+
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
+ index, module_name->ToCString().get(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
+ module_name->ToCString().get(), error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
+
+#undef ERROR_THROWER_WITH_MESSAGE
+
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ Handle<String> import_name);
+
+ // Look up an import value in the {ffi_} object specifically for linking an
+ // asm.js module. This only performs non-observable lookups, which allows
+ // falling back to JavaScript proper (and hence re-executing all lookups) if
+ // module instantiation fails.
+ MaybeHandle<Object> LookupImportAsm(uint32_t index,
+ Handle<String> import_name);
+
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
+
+ // Load data segments into the memory.
+ void LoadDataSegments(Handle<WasmInstanceObject> instance);
+
+ void WriteGlobalValue(const WasmGlobal& global, double value);
+ void WriteGlobalValue(const WasmGlobal& global,
+ Handle<WasmGlobalObject> value);
+
+ void WriteGlobalAnyRef(const WasmGlobal& global, Handle<Object> value);
+
+ void SanitizeImports();
+
+ // Find the imported memory buffer if there is one. This is used to see if we
+ // need to recompile with bounds checks before creating the instance.
+ MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
+
+ // Processes a single imported function.
+ bool ProcessImportedFunction(Handle<WasmInstanceObject> instance,
+ int import_index, int func_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value);
+
+ // Process a single imported table.
+ bool ProcessImportedTable(Handle<WasmInstanceObject> instance,
+ int import_index, int table_index,
+ Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported memory.
+ bool ProcessImportedMemory(Handle<WasmInstanceObject> instance,
+ int import_index, Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported global.
+ bool ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
+ int import_index, int global_index,
+ Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported WasmGlobalObject.
+ bool ProcessImportedWasmGlobalObject(Handle<WasmInstanceObject> instance,
+ int import_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ const WasmGlobal& global,
+ Handle<WasmGlobalObject> global_object);
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<WasmInstanceObject> instance);
+
+ template <typename T>
+ T* GetRawGlobalPtr(const WasmGlobal& global);
+
+ // Process initialization of globals.
+ void InitGlobals();
+
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+
+ bool NeedsWrappers() const;
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<WasmInstanceObject> instance);
+
+ void InitializeTables(Handle<WasmInstanceObject> instance);
+
+ void LoadTableSegments(Handle<WasmInstanceObject> instance);
+
+ // Creates new exception tags for all exceptions. Note that some tags might
+ // already exist if they were imported, those tags will be re-used.
+ void InitializeExceptions(Handle<WasmInstanceObject> instance);
+};
+
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ auto instance = builder.Build();
+ if (!instance.is_null() && builder.ExecuteStartFunction()) {
+ return instance;
+ }
+ DCHECK(isolate->has_pending_exception() || thrower->error());
+ return {};
+}
+
+InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
+ : isolate_(isolate),
+ enabled_(module_object->native_module()->enabled_features()),
+ module_(module_object->module()),
+ thrower_(thrower),
+ module_object_(module_object),
+ ffi_(ffi),
+ memory_(memory) {
+ sanitized_imports_.reserve(module_->import_table.size());
+}
+
+// Build an instance, in all of its glory.
+MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
+ // Check that an imports argument was provided, if the module requires it.
+ // No point in continuing otherwise.
+ if (!module_->import_table.empty() && ffi_.is_null()) {
+ thrower_->TypeError(
+ "Imports argument must be present and must be an object");
+ return {};
+ }
+
+ SanitizeImports();
+ if (thrower_->error()) return {};
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
+ // From here on, we expect the build pipeline to run without exiting to JS.
+ DisallowJavascriptExecution no_js(isolate_);
+ // Record build time into correct bucket, then build instance.
+ TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm_instantiate, module_time));
+
+ //--------------------------------------------------------------------------
+ // Allocate the memory array buffer.
+ //--------------------------------------------------------------------------
+ // We allocate the memory buffer before cloning or reusing the compiled module
+ // so we will know whether we need to recompile with bounds checks.
+ uint32_t initial_pages = module_->initial_pages;
+ auto initial_pages_counter = SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
+ initial_pages_counter->AddSample(initial_pages);
+ // Asm.js has memory_ already set at this point, so we don't want to
+ // overwrite it.
+ if (memory_.is_null()) {
+ memory_ = FindImportedMemoryBuffer();
+ }
+ if (!memory_.is_null()) {
+ // Set externally passed ArrayBuffer non detachable.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ memory->set_is_detachable(false);
+
+ DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr);
+ } else if (initial_pages > 0 || use_trap_handler()) {
+ // We need to unconditionally create a guard region if using trap handlers,
+ // even when the size is zero to prevent null-dereference issues
+ // (e.g. https://crbug.com/769637).
+ // Allocate memory if the initial size is more than 0 pages.
+ memory_ = AllocateMemory(initial_pages);
+ if (memory_.is_null()) {
+ // failed to allocate memory
+ DCHECK(isolate_->has_pending_exception() || thrower_->error());
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Recompile module if using trap handlers but could not get guarded memory
+ //--------------------------------------------------------------------------
+ if (module_->origin == kWasmOrigin && use_trap_handler()) {
+ // Make sure the memory has suitable guard regions.
+ WasmMemoryTracker* const memory_tracker =
+ isolate_->wasm_engine()->memory_tracker();
+
+ if (!memory_tracker->HasFullGuardRegions(
+ memory_.ToHandleChecked()->backing_store())) {
+ if (!FLAG_wasm_trap_handler_fallback) {
+ thrower_->LinkError(
+ "Provided memory is lacking guard regions but fallback was "
+ "disabled.");
+ return {};
+ }
+
+ TRACE("Recompiling module without bounds checks\n");
+ ErrorThrower thrower(isolate_, "recompile");
+ auto native_module = module_object_->native_module();
+ CompileNativeModuleWithExplicitBoundsChecks(isolate_, &thrower, module_,
+ native_module);
+ if (thrower.error()) {
+ return {};
+ }
+ DCHECK(!native_module->use_trap_handler());
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Create the WebAssembly.Instance object.
+ //--------------------------------------------------------------------------
+ NativeModule* native_module = module_object_->native_module();
+ TRACE("New module instantiation for %p\n", native_module);
+ Handle<WasmInstanceObject> instance =
+ WasmInstanceObject::New(isolate_, module_object_);
+ NativeModuleModificationScope native_modification_scope(native_module);
+
+ //--------------------------------------------------------------------------
+ // Set up the globals for the new instance.
+ //--------------------------------------------------------------------------
+ uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size;
+ if (untagged_globals_buffer_size > 0) {
+ void* backing_store = isolate_->array_buffer_allocator()->Allocate(
+ untagged_globals_buffer_size);
+ if (backing_store == nullptr) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return {};
+ }
+ untagged_globals_ =
+ isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ constexpr bool is_external = false;
+ constexpr bool is_wasm_memory = false;
+ JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
+ backing_store, untagged_globals_buffer_size,
+ SharedFlag::kNotShared, is_wasm_memory);
+ if (untagged_globals_.is_null()) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return {};
+ }
+ instance->set_globals_start(
+ reinterpret_cast<byte*>(untagged_globals_->backing_store()));
+ instance->set_untagged_globals_buffer(*untagged_globals_);
+ }
+
+ uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size;
+ if (tagged_globals_buffer_size > 0) {
+ tagged_globals_ = isolate_->factory()->NewFixedArray(
+ static_cast<int>(tagged_globals_buffer_size));
+ instance->set_tagged_globals_buffer(*tagged_globals_);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the array of references to imported globals' array buffers.
+ //--------------------------------------------------------------------------
+ if (module_->num_imported_mutable_globals > 0) {
+ // TODO(binji): This allocates one slot for each mutable global, which is
+ // more than required if multiple globals are imported from the same
+ // module.
+ Handle<FixedArray> buffers_array = isolate_->factory()->NewFixedArray(
+ module_->num_imported_mutable_globals, TENURED);
+ instance->set_imported_mutable_globals_buffers(*buffers_array);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the exception table used for exception tag checks.
+ //--------------------------------------------------------------------------
+ int exceptions_count = static_cast<int>(module_->exceptions.size());
+ if (exceptions_count > 0) {
+ Handle<FixedArray> exception_table =
+ isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
+ instance->set_exceptions_table(*exception_table);
+ exception_wrappers_.resize(exceptions_count);
+ }
+
+ //--------------------------------------------------------------------------
+ // Reserve the metadata for indirect function tables.
+ //--------------------------------------------------------------------------
+ int table_count = static_cast<int>(module_->tables.size());
+ table_instances_.resize(table_count);
+
+ //--------------------------------------------------------------------------
+ // Process the imports for the module.
+ //--------------------------------------------------------------------------
+ int num_imported_functions = ProcessImports(instance);
+ if (num_imported_functions < 0) return {};
+
+ //--------------------------------------------------------------------------
+ // Process the initialization for the module's globals.
+ //--------------------------------------------------------------------------
+ InitGlobals();
+
+ //--------------------------------------------------------------------------
+ // Initialize the indirect tables.
+ //--------------------------------------------------------------------------
+ if (table_count > 0) {
+ InitializeTables(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize the exceptions table.
+ //--------------------------------------------------------------------------
+ if (exceptions_count > 0) {
+ InitializeExceptions(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Create the WebAssembly.Memory object.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ if (!instance->has_memory_object()) {
+ // No memory object exists. Create one.
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_, memory_,
+ module_->maximum_pages != 0 ? module_->maximum_pages : -1);
+ instance->set_memory_object(*memory_object);
+ }
+
+ // Add the instance object to the list of instances for this memory.
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
+ WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
+
+ if (!memory_.is_null()) {
+ // Double-check the {memory} array buffer matches the instance.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ CHECK_EQ(instance->memory_size(), memory->byte_length());
+ CHECK_EQ(instance->memory_start(), memory->backing_store());
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that indirect function table segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmElemSegment& elem_segment : module_->elem_segments) {
+ if (!elem_segment.active) continue;
+ DCHECK(elem_segment.table_index < table_instances_.size());
+ uint32_t base = EvalUint32InitExpr(elem_segment.offset);
+ size_t table_size = table_instances_[elem_segment.table_index].table_size;
+ if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
+ thrower_->LinkError("table initializer is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that memory segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmDataSegment& seg : module_->data_segments) {
+ if (!seg.active) continue;
+ uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+ if (!IsInBounds(base, seg.source.length(), instance->memory_size())) {
+ thrower_->LinkError("data segment is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the exports object for the new instance.
+ //--------------------------------------------------------------------------
+ ProcessExports(instance);
+ if (thrower_->error()) return {};
+
+ //--------------------------------------------------------------------------
+ // Initialize the indirect function tables.
+ //--------------------------------------------------------------------------
+ if (table_count > 0) {
+ LoadTableSegments(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize the memory by loading data segments.
+ //--------------------------------------------------------------------------
+ if (module_->data_segments.size() > 0) {
+ LoadDataSegments(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Debugging support.
+ //--------------------------------------------------------------------------
+ // Set all breakpoints that were set on the shared module.
+ WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
+
+ if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ std::vector<int> func_indexes;
+ for (int func_index = num_imported_functions,
+ num_wasm_functions = static_cast<int>(module_->functions.size());
+ func_index < num_wasm_functions; ++func_index) {
+ func_indexes.push_back(func_index);
+ }
+ WasmDebugInfo::RedirectToInterpreter(debug_info, VectorOf(func_indexes));
+ }
+
+ //--------------------------------------------------------------------------
+ // Create a wrapper for the start function.
+ //--------------------------------------------------------------------------
+ if (module_->start_function_index >= 0) {
+ int start_index = module_->start_function_index;
+ auto& function = module_->functions[start_index];
+ Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported);
+ // TODO(clemensh): Don't generate an exported function for the start
+ // function. Use CWasmEntry instead.
+ start_function_ = WasmExportedFunction::New(
+ isolate_, instance, MaybeHandle<String>(), start_index,
+ static_cast<int>(function.sig->parameter_count()), wrapper_code);
+ }
+
+ DCHECK(!isolate_->has_pending_exception());
+ TRACE("Successfully built instance for module %p\n",
+ module_object_->native_module());
+ return instance;
+}
+
+bool InstanceBuilder::ExecuteStartFunction() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "InstanceBuilder::ExecuteStartFunction");
+ if (start_function_.is_null()) return true; // No start function.
+
+ HandleScope scope(isolate_);
+ // Call the JS function.
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ return false;
+ }
+ return true;
+}
+
+// Look up an import value in the {ffi_} object.
+MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
+ Handle<String> module_name,
+
+ Handle<String> import_name) {
+ // We pre-validated in the js-api layer that the ffi object is present, and
+ // a JSObject, if the module has imports.
+ DCHECK(!ffi_.is_null());
+
+ // Look up the module first.
+ MaybeHandle<Object> result = Object::GetPropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), module_name);
+ if (result.is_null()) {
+ return ReportTypeError("module not found", index, module_name);
+ }
+
+ Handle<Object> module = result.ToHandleChecked();
+
+ // Look up the value in the module.
+ if (!module->IsJSReceiver()) {
+ return ReportTypeError("module is not an object or function", index,
+ module_name);
+ }
+
+ result = Object::GetPropertyOrElement(isolate_, module, import_name);
+ if (result.is_null()) {
+ ReportLinkError("import not found", index, module_name, import_name);
+ return MaybeHandle<JSFunction>();
+ }
+
+ return result;
+}
+
+// Look up an import value in the {ffi_} object specifically for linking an
+// asm.js module. This only performs non-observable lookups, which allows
+// falling back to JavaScript proper (and hence re-executing all lookups) if
+// module instantiation fails.
+MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
+ uint32_t index, Handle<String> import_name) {
+ // Check that a foreign function interface object was provided.
+ if (ffi_.is_null()) {
+ return ReportLinkError("missing imports object", index, import_name);
+ }
+
+ // Perform lookup of the given {import_name} without causing any observable
+ // side-effect. We only accept accesses that resolve to data properties,
+ // which is indicated by the asm.js spec in section 7 ("Linking") as well.
+ Handle<Object> result;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), import_name);
+ switch (it.state()) {
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::TRANSITION:
+ return ReportLinkError("not a data property", index, import_name);
+ case LookupIterator::NOT_FOUND:
+ // Accepting missing properties as undefined does not cause any
+ // observable difference from JavaScript semantics, we are lenient.
+ result = isolate_->factory()->undefined_value();
+ break;
+ case LookupIterator::DATA:
+ result = it.GetDataValue();
+ break;
+ }
+
+ return result;
+}
+
+uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kI32Const:
+ return expr.val.i32_const;
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t offset = module_->globals[expr.val.global_index].offset;
+ return ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(raw_buffer_ptr(untagged_globals_, offset)));
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Load data segments into the memory.
+void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
+ for (const WasmDataSegment& segment : module_->data_segments) {
+ uint32_t source_size = segment.source.length();
+ // Segments of size == 0 are just nops.
+ if (source_size == 0) continue;
+ // Passive segments are not copied during instantiation.
+ if (!segment.active) continue;
+ uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+ DCHECK(IsInBounds(dest_offset, source_size, instance->memory_size()));
+ byte* dest = instance->memory_start() + dest_offset;
+ const byte* src = wire_bytes.start() + segment.source.offset();
+ memcpy(dest, src, source_size);
+ }
+}
+
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
+ TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
+ reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
+ global.offset, num, ValueTypes::TypeName(global.type));
+ switch (global.type) {
+ case kWasmI32:
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ static_cast<int32_t>(num));
+ break;
+ case kWasmI64:
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
+ static_cast<int64_t>(num));
+ break;
+ case kWasmF32:
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ static_cast<float>(num));
+ break;
+ case kWasmF64:
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ static_cast<double>(num));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
+ Handle<WasmGlobalObject> value) {
+ TRACE("init [globals_start=%p + %u] = ",
+ reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
+ global.offset);
+ switch (global.type) {
+ case kWasmI32: {
+ int32_t num = value->GetI32();
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
+ TRACE("%d", num);
+ break;
+ }
+ case kWasmI64: {
+ int64_t num = value->GetI64();
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
+ TRACE("%" PRId64, num);
+ break;
+ }
+ case kWasmF32: {
+ float num = value->GetF32();
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
+ TRACE("%f", num);
+ break;
+ }
+ case kWasmF64: {
+ double num = value->GetF64();
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
+ TRACE("%lf", num);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ TRACE(", type = %s (from WebAssembly.Global)\n",
+ ValueTypes::TypeName(global.type));
+}
+
+void InstanceBuilder::WriteGlobalAnyRef(const WasmGlobal& global,
+ Handle<Object> value) {
+ tagged_globals_->set(global.offset, *value, UPDATE_WRITE_BARRIER);
+}
+
+void InstanceBuilder::SanitizeImports() {
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
+ for (size_t index = 0; index < module_->import_table.size(); ++index) {
+ const WasmImport& import = module_->import_table[index];
+
+ Handle<String> module_name;
+ MaybeHandle<String> maybe_module_name =
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.module_name);
+ if (!maybe_module_name.ToHandle(&module_name)) {
+ thrower_->LinkError("Could not resolve module name for import %zu",
+ index);
+ return;
+ }
+
+ Handle<String> import_name;
+ MaybeHandle<String> maybe_import_name =
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.field_name);
+ if (!maybe_import_name.ToHandle(&import_name)) {
+ thrower_->LinkError("Could not resolve import name for import %zu",
+ index);
+ return;
+ }
+
+ int int_index = static_cast<int>(index);
+ MaybeHandle<Object> result =
+ module_->origin == kAsmJsOrigin
+ ? LookupImportAsm(int_index, import_name)
+ : LookupImport(int_index, module_name, import_name);
+ if (thrower_->error()) {
+ thrower_->LinkError("Could not find value for import %zu", index);
+ return;
+ }
+ Handle<Object> value = result.ToHandleChecked();
+ sanitized_imports_.push_back({module_name, import_name, value});
+ }
+}
+
+MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
+ DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+ for (size_t index = 0; index < module_->import_table.size(); index++) {
+ const WasmImport& import = module_->import_table[index];
+
+ if (import.kind == kExternalMemory) {
+ const auto& value = sanitized_imports_[index].value;
+ if (!value->IsWasmMemoryObject()) {
+ return {};
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ return buffer;
+ }
+ }
+ return {};
+}
+
+bool InstanceBuilder::ProcessImportedFunction(
+ Handle<WasmInstanceObject> instance, int import_index, int func_index,
+ Handle<String> module_name, Handle<String> import_name,
+ Handle<Object> value) {
+ // Function imports must be callable.
+ if (!value->IsCallable()) {
+ ReportLinkError("function import requires a callable", import_index,
+ module_name, import_name);
+ return false;
+ }
+ auto js_receiver = Handle<JSReceiver>::cast(value);
+ FunctionSig* expected_sig = module_->functions[func_index].sig;
+ auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig,
+ enabled_.bigint);
+ switch (kind) {
+ case compiler::WasmImportCallKind::kLinkError:
+ ReportLinkError("imported function does not match the expected type",
+ import_index, module_name, import_name);
+ return false;
+ case compiler::WasmImportCallKind::kWasmToWasm: {
+ // The imported function is a WASM function from another instance.
+ auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ Handle<WasmInstanceObject> imported_instance(
+ imported_function->instance(), isolate_);
+ // The import reference is the instance object itself.
+ Address imported_target = imported_function->GetWasmCallTarget();
+ ImportedFunctionEntry entry(instance, func_index);
+ entry.SetWasmToWasm(*imported_instance, imported_target);
+ break;
+ }
+ default: {
+ // The imported function is a callable.
+ NativeModule* native_module = instance->module_object()->native_module();
+ WasmCode* wasm_code = native_module->import_wrapper_cache()->GetOrCompile(
+ isolate_->wasm_engine(), isolate_->counters(), kind, expected_sig);
+ ImportedFunctionEntry entry(instance, func_index);
+ if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
+ // Wasm to JS wrappers are treated specially in the import table.
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
+ } else {
+ // Wasm math intrinsics are compiled as regular Wasm functions.
+ DCHECK(kind >= compiler::WasmImportCallKind::kFirstMathIntrinsic &&
+ kind <= compiler::WasmImportCallKind::kLastMathIntrinsic);
+ entry.SetWasmToWasm(*instance, wasm_code->instruction_start());
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
+ int import_index, int table_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ if (!value->IsWasmTableObject()) {
+ ReportLinkError("table import requires a WebAssembly.Table", import_index,
+ module_name, import_name);
+ return false;
+ }
+ const WasmTable& table = module_->tables[table_index];
+ TableInstance& table_instance = table_instances_[table_index];
+ table_instance.table_object = Handle<WasmTableObject>::cast(value);
+ instance->set_table_object(*table_instance.table_object);
+ table_instance.js_wrappers =
+ Handle<FixedArray>(table_instance.table_object->functions(), isolate_);
+
+ int imported_table_size = table_instance.js_wrappers->length();
+ if (imported_table_size < static_cast<int>(table.initial_size)) {
+ thrower_->LinkError("table import %d is smaller than initial %d, got %u",
+ import_index, table.initial_size, imported_table_size);
+ return false;
+ }
+
+ if (table.has_maximum_size) {
+ int64_t imported_maximum_size =
+ table_instance.table_object->maximum_length()->Number();
+ if (imported_maximum_size < 0) {
+ thrower_->LinkError("table import %d has no maximum length, expected %d",
+ import_index, table.maximum_size);
+ return false;
+ }
+ if (imported_maximum_size > table.maximum_size) {
+ thrower_->LinkError("table import %d has a larger maximum size %" PRIx64
+ " than the module's declared maximum %u",
+ import_index, imported_maximum_size,
+ table.maximum_size);
+ return false;
+ }
+ }
+
+ // Allocate a new dispatch table.
+ if (!instance->has_indirect_function_table()) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, imported_table_size);
+ table_instances_[table_index].table_size = imported_table_size;
+ }
+ // Initialize the dispatch table with the (foreign) JS functions
+ // that are already in the table.
+ for (int i = 0; i < imported_table_size; ++i) {
+ Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
+ // insert in the local table a wrapper from the other module, and add
+ // a reference to the owning instance of the other module.
+ if (!val->IsJSFunction()) continue;
+ if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
+ thrower_->LinkError("table import %d[%d] is not a wasm function",
+ import_index, i);
+ return false;
+ }
+ auto target_func = Handle<WasmExportedFunction>::cast(val);
+ Handle<WasmInstanceObject> target_instance =
+ handle(target_func->instance(), isolate_);
+ // Look up the signature's canonical id. If there is no canonical
+ // id, then the signature does not appear at all in this module,
+ // so putting {-1} in the table will cause checks to always fail.
+ FunctionSig* sig = target_func->sig();
+ IndirectFunctionTableEntry(instance, i)
+ .Set(module_->signature_map.Find(*sig), target_instance,
+ target_func->function_index());
+ }
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
+ int import_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ // Validation should have failed if more than one memory object was
+ // provided.
+ DCHECK(!instance->has_memory_object());
+ if (!value->IsWasmMemoryObject()) {
+ ReportLinkError("memory import must be a WebAssembly.Memory object",
+ import_index, module_name, import_name);
+ return false;
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ instance->set_memory_object(*memory);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ // memory_ should have already been assigned in Build().
+ DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
+ uint32_t imported_cur_pages =
+ static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
+ if (imported_cur_pages < module_->initial_pages) {
+ thrower_->LinkError("memory import %d is smaller than initial %u, got %u",
+ import_index, module_->initial_pages,
+ imported_cur_pages);
+ return false;
+ }
+ int32_t imported_maximum_pages = memory->maximum_pages();
+ if (module_->has_maximum_pages) {
+ if (imported_maximum_pages < 0) {
+ thrower_->LinkError(
+ "memory import %d has no maximum limit, expected at most %u",
+ import_index, imported_maximum_pages);
+ return false;
+ }
+ if (static_cast<uint32_t>(imported_maximum_pages) >
+ module_->maximum_pages) {
+ thrower_->LinkError(
+ "memory import %d has a larger maximum size %u than the "
+ "module's declared maximum %u",
+ import_index, imported_maximum_pages, module_->maximum_pages);
+ return false;
+ }
+ }
+ if (module_->has_shared_memory != buffer->is_shared()) {
+ thrower_->LinkError(
+ "mismatch in shared state of memory, declared = %d, imported = %d",
+ module_->has_shared_memory, buffer->is_shared());
+ return false;
+ }
+
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedWasmGlobalObject(
+ Handle<WasmInstanceObject> instance, int import_index,
+ Handle<String> module_name, Handle<String> import_name,
+ const WasmGlobal& global, Handle<WasmGlobalObject> global_object) {
+ if (global_object->type() != global.type) {
+ ReportLinkError("imported global does not match the expected type",
+ import_index, module_name, import_name);
+ return false;
+ }
+ if (global_object->is_mutable() != global.mutability) {
+ ReportLinkError("imported global does not match the expected mutability",
+ import_index, module_name, import_name);
+ return false;
+ }
+ if (global.mutability) {
+ DCHECK_LT(global.index, module_->num_imported_mutable_globals);
+ Handle<Object> buffer;
+ Address address_or_offset;
+ if (global.type == kWasmAnyRef) {
+ static_assert(sizeof(global_object->offset()) <= sizeof(Address),
+ "The offset into the globals buffer does not fit into "
+ "the imported_mutable_globals array");
+ buffer = handle(global_object->tagged_buffer(), isolate_);
+ // For anyref globals we use a relative offset, not an absolute address.
+ address_or_offset = static_cast<Address>(global_object->offset());
+ } else {
+ buffer = handle(global_object->untagged_buffer(), isolate_);
+ // It is safe in this case to store the raw pointer to the buffer
+ // since the backing store of the JSArrayBuffer will not be
+ // relocated.
+ address_or_offset = reinterpret_cast<Address>(raw_buffer_ptr(
+ Handle<JSArrayBuffer>::cast(buffer), global_object->offset()));
+ }
+ instance->imported_mutable_globals_buffers()->set(global.index, *buffer);
+ instance->imported_mutable_globals()[global.index] = address_or_offset;
+ return true;
+ }
+
+ WriteGlobalValue(global, global_object);
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
+ int import_index, int global_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ // Immutable global imports are converted to numbers and written into
+ // the {untagged_globals_} array buffer.
+ //
+ // Mutable global imports instead have their backing array buffers
+ // referenced by this instance, and store the address of the imported
+ // global in the {imported_mutable_globals_} array.
+ const WasmGlobal& global = module_->globals[global_index];
+
+ // The mutable-global proposal allows importing i64 values, but only if
+ // they are passed as a WebAssembly.Global object.
+ //
+ // However, the bigint proposal allows importing constant i64 values,
+ // as non WebAssembly.Global object.
+ if (global.type == kWasmI64 && !enabled_.bigint &&
+ !value->IsWasmGlobalObject()) {
+ ReportLinkError("global import cannot have type i64", import_index,
+ module_name, import_name);
+ return false;
+ }
+ if (module_->origin == kAsmJsOrigin) {
+ // Accepting {JSFunction} on top of just primitive values here is a
+ // workaround to support legacy asm.js code with broken binding. Note
+ // that using {NaN} (or Smi::kZero) here is what using the observable
+ // conversion via {ToPrimitive} would produce as well.
+ // TODO(mstarzinger): Still observable if Function.prototype.valueOf
+ // or friends are patched, we might need to check for that as well.
+ if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
+ if (value->IsPrimitive() && !value->IsSymbol()) {
+ if (global.type == kWasmI32) {
+ value = Object::ToInt32(isolate_, value).ToHandleChecked();
+ } else {
+ value = Object::ToNumber(isolate_, value).ToHandleChecked();
+ }
+ }
+ }
+
+ if (value->IsWasmGlobalObject()) {
+ auto global_object = Handle<WasmGlobalObject>::cast(value);
+ return ProcessImportedWasmGlobalObject(instance, import_index, module_name,
+ import_name, global, global_object);
+ }
+
+ if (global.mutability) {
+ ReportLinkError(
+ "imported mutable global must be a WebAssembly.Global object",
+ import_index, module_name, import_name);
+ return false;
+ }
+
+ if (global.type == ValueType::kWasmAnyRef) {
+ WriteGlobalAnyRef(global, value);
+ return true;
+ }
+
+ if (value->IsNumber()) {
+ WriteGlobalValue(global, value->Number());
+ return true;
+ }
+
+ if (enabled_.bigint && global.type == kWasmI64) {
+ Handle<BigInt> bigint;
+
+ if (!BigInt::FromObject(isolate_, value).ToHandle(&bigint)) {
+ return false;
+ }
+ WriteGlobalValue(global, bigint->AsInt64());
+ return true;
+ }
+
+ ReportLinkError("global import must be a number or WebAssembly.Global object",
+ import_index, module_name, import_name);
+ return false;
+}
+
+// Process the imports, including functions, tables, globals, and memory, in
+// order, loading them from the {ffi_} object. Returns the number of imported
+// functions.
+int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
+ int num_imported_functions = 0;
+ int num_imported_tables = 0;
+
+ DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+ int num_imports = static_cast<int>(module_->import_table.size());
+ for (int index = 0; index < num_imports; ++index) {
+ const WasmImport& import = module_->import_table[index];
+
+ Handle<String> module_name = sanitized_imports_[index].module_name;
+ Handle<String> import_name = sanitized_imports_[index].import_name;
+ Handle<Object> value = sanitized_imports_[index].value;
+
+ switch (import.kind) {
+ case kExternalFunction: {
+ uint32_t func_index = import.index;
+ DCHECK_EQ(num_imported_functions, func_index);
+ if (!ProcessImportedFunction(instance, index, func_index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ num_imported_functions++;
+ break;
+ }
+ case kExternalTable: {
+ uint32_t table_index = import.index;
+ DCHECK_EQ(table_index, num_imported_tables);
+ if (!ProcessImportedTable(instance, index, table_index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ num_imported_tables++;
+ break;
+ }
+ case kExternalMemory: {
+ if (!ProcessImportedMemory(instance, index, module_name, import_name,
+ value)) {
+ return -1;
+ }
+ break;
+ }
+ case kExternalGlobal: {
+ if (!ProcessImportedGlobal(instance, index, import.index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ break;
+ }
+ case kExternalException: {
+ if (!value->IsWasmExceptionObject()) {
+ ReportLinkError("exception import requires a WebAssembly.Exception",
+ index, module_name, import_name);
+ return -1;
+ }
+ Handle<WasmExceptionObject> imported_exception =
+ Handle<WasmExceptionObject>::cast(value);
+ if (!imported_exception->IsSignatureEqual(
+ module_->exceptions[import.index].sig)) {
+ ReportLinkError("imported exception does not match the expected type",
+ index, module_name, import_name);
+ return -1;
+ }
+ Object exception_tag = imported_exception->exception_tag();
+ DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
+ instance->exceptions_table()->set(import.index, exception_tag);
+ exception_wrappers_[import.index] = imported_exception;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return num_imported_functions;
+}
+
+template <typename T>
+T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
+ return reinterpret_cast<T*>(raw_buffer_ptr(untagged_globals_, global.offset));
+}
+
+// Process initialization of globals.
+void InstanceBuilder::InitGlobals() {
+ for (auto global : module_->globals) {
+ if (global.mutability && global.imported) {
+ continue;
+ }
+
+ switch (global.init.kind) {
+ case WasmInitExpr::kI32Const:
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ global.init.val.i32_const);
+ break;
+ case WasmInitExpr::kI64Const:
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
+ global.init.val.i64_const);
+ break;
+ case WasmInitExpr::kF32Const:
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ global.init.val.f32_const);
+ break;
+ case WasmInitExpr::kF64Const:
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ global.init.val.f64_const);
+ break;
+ case WasmInitExpr::kAnyRefConst:
+ DCHECK(enabled_.anyref);
+ if (global.imported) break; // We already initialized imported globals.
+
+ tagged_globals_->set(global.offset,
+ ReadOnlyRoots(isolate_).null_value(),
+ SKIP_WRITE_BARRIER);
+ break;
+ case WasmInitExpr::kGlobalIndex: {
+ if (global.type == ValueType::kWasmAnyRef) {
+ DCHECK(enabled_.anyref);
+ int other_offset =
+ module_->globals[global.init.val.global_index].offset;
+
+ tagged_globals_->set(global.offset,
+ tagged_globals_->get(other_offset),
+ SKIP_WRITE_BARRIER);
+ }
+ // Initialize with another global.
+ uint32_t new_offset = global.offset;
+ uint32_t old_offset =
+ module_->globals[global.init.val.global_index].offset;
+ TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
+ size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
+ ? sizeof(double)
+ : sizeof(int32_t);
+ memcpy(raw_buffer_ptr(untagged_globals_, new_offset),
+ raw_buffer_ptr(untagged_globals_, old_offset), size);
+ break;
+ }
+ case WasmInitExpr::kNone:
+ // Happens with imported globals.
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+// Allocate memory for a module instance as a new JSArrayBuffer.
+Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
+ if (num_pages > max_mem_pages()) {
+ thrower_->RangeError("Out of memory: wasm memory too large");
+ return Handle<JSArrayBuffer>::null();
+ }
+ const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
+ SharedFlag shared_flag =
+ is_shared_memory ? SharedFlag::kShared : SharedFlag::kNotShared;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, shared_flag)
+ .ToHandle(&mem_buffer)) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ }
+ return mem_buffer;
+}
+
+bool InstanceBuilder::NeedsWrappers() const {
+ if (module_->num_exported_functions > 0) return true;
+ for (auto& table_instance : table_instances_) {
+ if (!table_instance.js_wrappers.is_null()) return true;
+ }
+ for (auto& table : module_->tables) {
+ if (table.exported) return true;
+ }
+ return false;
+}
+
+// Process the exports, creating wrappers for functions, tables, memories,
+// globals, and exceptions.
+void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
+ isolate_);
+ if (NeedsWrappers()) {
+ // Fill the table to cache the exported JSFunction wrappers.
+ js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
+ Handle<JSFunction>::null());
+
+ // If an imported WebAssembly function gets exported, the exported function
+ // has to be identical to to imported function. Therefore we put all
+ // imported WebAssembly functions into the js_wrappers_ list.
+ for (int index = 0, end = static_cast<int>(module_->import_table.size());
+ index < end; ++index) {
+ const WasmImport& import = module_->import_table[index];
+ if (import.kind == kExternalFunction) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ }
+ }
+ }
+ }
+
+ Handle<JSObject> exports_object;
+ bool is_asm_js = false;
+ switch (module_->origin) {
+ case kWasmOrigin: {
+ // Create the "exports" object.
+ exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+ break;
+ }
+ case kAsmJsOrigin: {
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate_->native_context()->object_function(), isolate_);
+ exports_object = isolate_->factory()->NewJSObject(object_function);
+ is_asm_js = true;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ instance->set_exports_object(*exports_object);
+
+ Handle<String> single_function_name =
+ isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
+
+ PropertyDescriptor desc;
+ desc.set_writable(is_asm_js);
+ desc.set_enumerable(true);
+ desc.set_configurable(is_asm_js);
+
+ // Process each export in the export table.
+ int export_index = 0; // Index into {export_wrappers}.
+ for (const WasmExport& exp : module_->export_table) {
+ Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, exp.name)
+ .ToHandleChecked();
+ Handle<JSObject> export_to;
+ if (is_asm_js && exp.kind == kExternalFunction &&
+ String::Equals(isolate_, name, single_function_name)) {
+ export_to = instance;
+ } else {
+ export_to = exports_object;
+ }
+
+ switch (exp.kind) {
+ case kExternalFunction: {
+ // Wrap and export the code as a JSFunction.
+ const WasmFunction& function = module_->functions[exp.index];
+ Handle<JSFunction> js_function = js_wrappers_[exp.index];
+ if (js_function.is_null()) {
+ // Wrap the exported code as a JSFunction.
+ Handle<Code> export_code =
+ export_wrappers->GetValueChecked<Code>(isolate_, export_index);
+ MaybeHandle<String> func_name;
+ if (is_asm_js) {
+ // For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ ModuleWireBytes(module_object_->native_module()->wire_bytes()),
+ function.func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
+ }
+ js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, function.func_index,
+ static_cast<int>(function.sig->parameter_count()), export_code);
+ js_wrappers_[exp.index] = js_function;
+ }
+ desc.set_value(js_function);
+ export_index++;
+ break;
+ }
+ case kExternalTable: {
+ // Export a table as a WebAssembly.Table object.
+ TableInstance& table_instance = table_instances_[exp.index];
+ const WasmTable& table = module_->tables[exp.index];
+ if (table_instance.table_object.is_null()) {
+ uint32_t maximum = table.has_maximum_size ? table.maximum_size
+ : FLAG_wasm_max_table_size;
+ table_instance.table_object =
+ WasmTableObject::New(isolate_, table.initial_size, maximum,
+ &table_instance.js_wrappers);
+ }
+ instance->set_table_object(*table_instance.table_object);
+ desc.set_value(table_instance.table_object);
+ break;
+ }
+ case kExternalMemory: {
+ // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
+ // should already be available if the module has memory, since we always
+ // create or import it when building an WasmInstanceObject.
+ DCHECK(instance->has_memory_object());
+ desc.set_value(
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
+ break;
+ }
+ case kExternalGlobal: {
+ const WasmGlobal& global = module_->globals[exp.index];
+ Handle<JSArrayBuffer> untagged_buffer;
+ Handle<FixedArray> tagged_buffer;
+ uint32_t offset;
+
+ if (global.mutability && global.imported) {
+ Handle<FixedArray> buffers_array(
+ instance->imported_mutable_globals_buffers(), isolate_);
+ if (global.type == kWasmAnyRef) {
+ tagged_buffer = buffers_array->GetValueChecked<FixedArray>(
+ isolate_, global.index);
+ // For anyref globals we store the relative offset in the
+ // imported_mutable_globals array instead of an absolute address.
+ Address addr = instance->imported_mutable_globals()[global.index];
+ DCHECK_LE(addr, static_cast<Address>(
+ std::numeric_limits<uint32_t>::max()));
+ offset = static_cast<uint32_t>(addr);
+ } else {
+ untagged_buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
+ isolate_, global.index);
+ Address global_addr =
+ instance->imported_mutable_globals()[global.index];
+
+ size_t buffer_size = untagged_buffer->byte_length();
+ Address backing_store =
+ reinterpret_cast<Address>(untagged_buffer->backing_store());
+ CHECK(global_addr >= backing_store &&
+ global_addr < backing_store + buffer_size);
+ offset = static_cast<uint32_t>(global_addr - backing_store);
+ }
+ } else {
+ if (global.type == kWasmAnyRef) {
+ tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
+ } else {
+ untagged_buffer =
+ handle(instance->untagged_globals_buffer(), isolate_);
+ }
+ offset = global.offset;
+ }
+
+ // Since the global's array untagged_buffer is always provided,
+ // allocation should never fail.
+ Handle<WasmGlobalObject> global_obj =
+ WasmGlobalObject::New(isolate_, untagged_buffer, tagged_buffer,
+ global.type, offset, global.mutability)
+ .ToHandleChecked();
+ desc.set_value(global_obj);
+ break;
+ }
+ case kExternalException: {
+ const WasmException& exception = module_->exceptions[exp.index];
+ Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
+ if (wrapper.is_null()) {
+ Handle<HeapObject> exception_tag(
+ HeapObject::cast(instance->exceptions_table()->get(exp.index)),
+ isolate_);
+ wrapper =
+ WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
+ exception_wrappers_[exp.index] = wrapper;
+ }
+ desc.set_value(wrapper);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate_, export_to, name, &desc, kThrowOnError);
+ if (!status.IsJust()) {
+ DisallowHeapAllocation no_gc;
+ TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>(no_gc));
+ thrower_->LinkError("export of %.*s failed.", trunc_name.length(),
+ trunc_name.start());
+ return;
+ }
+ }
+ DCHECK_EQ(export_index, export_wrappers->length());
+
+ if (module_->origin == kWasmOrigin) {
+ v8::Maybe<bool> success =
+ JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
+ DCHECK(success.FromMaybe(false));
+ USE(success);
+ }
+}
+
+void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
+ size_t table_count = module_->tables.size();
+ for (size_t index = 0; index < table_count; ++index) {
+ const WasmTable& table = module_->tables[index];
+ TableInstance& table_instance = table_instances_[index];
+
+ if (!instance->has_indirect_function_table() &&
+ table.type == kWasmAnyFunc) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table.initial_size);
+ table_instance.table_size = table.initial_size;
+ }
+ }
+}
+
+void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
+ NativeModule* native_module = module_object_->native_module();
+ for (auto& elem_segment : module_->elem_segments) {
+ // Passive segments are not copied during instantiation.
+ if (!elem_segment.active) continue;
+
+ uint32_t base = EvalUint32InitExpr(elem_segment.offset);
+ uint32_t num_entries = static_cast<uint32_t>(elem_segment.entries.size());
+ uint32_t index = elem_segment.table_index;
+ TableInstance& table_instance = table_instances_[index];
+ DCHECK(IsInBounds(base, num_entries, table_instance.table_size));
+ for (uint32_t i = 0; i < num_entries; ++i) {
+ uint32_t func_index = elem_segment.entries[i];
+ const WasmFunction* function = &module_->functions[func_index];
+ int table_index = static_cast<int>(i + base);
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
+ IndirectFunctionTableEntry(instance, table_index)
+ .Set(sig_id, instance, func_index);
+
+ if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
+ if (js_wrappers_[func_index].is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->wasm wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, function->sig, function->imported);
+ MaybeHandle<String> func_name;
+ if (module_->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ ModuleWireBytes(native_module->wire_bytes()), func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
+ }
+ Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, func_index,
+ static_cast<int>(function->sig->parameter_count()), wrapper_code);
+ js_wrappers_[func_index] = js_function;
+ }
+ table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ instance, func_index);
+ }
+ }
+ }
+
+ int table_count = static_cast<int>(module_->tables.size());
+ for (int index = 0; index < table_count; ++index) {
+ TableInstance& table_instance = table_instances_[index];
+
+ // Add the new dispatch table at the end to avoid redundant lookups.
+ if (!table_instance.table_object.is_null()) {
+ WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
+ instance, index);
+ }
+ }
+}
+
+void InstanceBuilder::InitializeExceptions(
+ Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
+ Handle<WasmExceptionTag> exception_tag =
+ WasmExceptionTag::New(isolate_, index);
+ exceptions_table->set(index, *exception_tag);
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#undef TRACE
diff --git a/deps/v8/src/wasm/module-instantiate.h b/deps/v8/src/wasm/module-instantiate.h
new file mode 100644
index 0000000000..15393969b9
--- /dev/null
+++ b/deps/v8/src/wasm/module-instantiate.h
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_INSTANTIATE_H_
+#define V8_WASM_MODULE_INSTANTIATE_H_
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class JSArrayBuffer;
+class JSReceiver;
+class WasmModuleObject;
+class WasmInstanceObject;
+
+template <typename T>
+class Handle;
+template <typename T>
+class MaybeHandle;
+
+namespace wasm {
+
+class ErrorThrower;
+
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_INSTANTIATE_H_
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
index dca3b1ef89..5f494aca62 100644
--- a/deps/v8/src/wasm/signature-map.cc
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -13,22 +13,18 @@ namespace wasm {
uint32_t SignatureMap::FindOrInsert(const FunctionSig& sig) {
CHECK(!frozen_);
auto pos = map_.find(sig);
- if (pos != map_.end()) {
- return pos->second;
- } else {
- uint32_t index = next_++;
- map_[sig] = index;
- return index;
- }
+ if (pos != map_.end()) return pos->second;
+ // Indexes are returned as int32_t, thus check against their limit.
+ CHECK_GE(kMaxInt, map_.size());
+ uint32_t index = static_cast<uint32_t>(map_.size());
+ map_.insert(std::make_pair(sig, index));
+ return index;
}
int32_t SignatureMap::Find(const FunctionSig& sig) const {
auto pos = map_.find(sig);
- if (pos != map_.end()) {
- return static_cast<int32_t>(pos->second);
- } else {
- return -1;
- }
+ if (pos == map_.end()) return -1;
+ return static_cast<int32_t>(pos->second);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index 5ed66976d5..04c6b2efa5 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -15,8 +15,6 @@ namespace internal {
namespace wasm {
-using FunctionSig = Signature<ValueType>;
-
// A signature map canonicalizes signatures into a range of indices so that
// two different {FunctionSig} instances with the same contents map to the
// same index.
@@ -37,7 +35,6 @@ class V8_EXPORT_PRIVATE SignatureMap {
void Freeze() { frozen_ = true; }
private:
- uint32_t next_ = 0;
bool frozen_ = false;
std::unordered_map<FunctionSig, uint32_t, base::hash<FunctionSig>> map_;
};
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 15ced2316b..1896178c48 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -12,6 +12,7 @@
#include "src/wasm/decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
@@ -26,14 +27,21 @@ namespace internal {
namespace wasm {
void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+ if (deserializing()) {
+ wire_bytes_for_deserializing_.insert(wire_bytes_for_deserializing_.end(),
+ bytes.begin(), bytes.end());
+ return;
+ }
+
TRACE_STREAMING("OnBytesReceived(%zu bytes)\n", bytes.size());
+
size_t current = 0;
while (ok() && current < bytes.size()) {
size_t num_bytes =
state_->ReadBytes(this, bytes.SubVector(current, bytes.size()));
current += num_bytes;
module_offset_ += num_bytes;
- if (state_->is_finished()) {
+ if (state_->offset() == state_->buffer().size()) {
state_ = state_->Next(this);
}
}
@@ -45,17 +53,28 @@ void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
Vector<const uint8_t> bytes) {
- size_t num_bytes = std::min(bytes.size(), remaining());
+ Vector<uint8_t> remaining_buf = buffer() + offset();
+ size_t num_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
- memcpy(buffer() + offset(), &bytes.first(), num_bytes);
+ memcpy(remaining_buf.start(), &bytes.first(), num_bytes);
set_offset(offset() + num_bytes);
return num_bytes;
}
void StreamingDecoder::Finish() {
TRACE_STREAMING("Finish\n");
- if (!ok()) {
- return;
+ if (!ok()) return;
+
+ if (deserializing()) {
+ Vector<const uint8_t> wire_bytes = VectorOf(wire_bytes_for_deserializing_);
+ // Try to deserialize the module from wire bytes and module bytes.
+ if (processor_->Deserialize(compiled_module_bytes_, wire_bytes)) return;
+
+ // Deserialization failed. Restart decoding using |wire_bytes|.
+ compiled_module_bytes_ = {};
+ DCHECK(!deserializing());
+ OnBytesReceived(wire_bytes);
+ // The decoder has received all wire bytes; fall through and finish.
}
if (!state_->is_finishing_allowed()) {
@@ -73,9 +92,9 @@ void StreamingDecoder::Finish() {
memcpy(cursor, module_header, arraysize(module_header));
cursor += arraysize(module_header);
}
- for (auto&& buffer : section_buffers_) {
+ for (const auto& buffer : section_buffers_) {
DCHECK_LE(cursor - bytes.start() + buffer->length(), total_size_);
- memcpy(cursor, buffer->bytes(), buffer->length());
+ memcpy(cursor, buffer->bytes().start(), buffer->length());
cursor += buffer->length();
}
processor_->OnFinishedStream(std::move(bytes));
@@ -83,10 +102,59 @@ void StreamingDecoder::Finish() {
void StreamingDecoder::Abort() {
TRACE_STREAMING("Abort\n");
- if (ok()) {
- ok_ = false;
- processor_->OnAbort();
+ if (!ok()) return; // Failed already.
+ processor_->OnAbort();
+ Fail();
+}
+
+void StreamingDecoder::SetModuleCompiledCallback(
+ ModuleCompiledCallback callback) {
+ DCHECK_NULL(module_compiled_callback_);
+ module_compiled_callback_ = callback;
+}
+
+bool StreamingDecoder::SetCompiledModuleBytes(
+ Vector<const uint8_t> compiled_module_bytes) {
+ compiled_module_bytes_ = compiled_module_bytes;
+ return true;
+}
+
+namespace {
+
+class TopTierCompiledCallback {
+ public:
+ TopTierCompiledCallback(std::shared_ptr<NativeModule> native_module,
+ StreamingDecoder::ModuleCompiledCallback callback)
+ : native_module_(std::move(native_module)),
+ callback_(std::move(callback)) {}
+
+ void operator()(CompilationEvent event, const WasmError* error) const {
+ if (event != CompilationEvent::kFinishedTopTierCompilation) return;
+ DCHECK_NULL(error);
+ callback_(native_module_);
+#ifdef DEBUG
+ DCHECK(!called_);
+ called_ = true;
+#endif
}
+
+ private:
+ const std::shared_ptr<NativeModule> native_module_;
+ const StreamingDecoder::ModuleCompiledCallback callback_;
+#ifdef DEBUG
+ mutable bool called_ = false;
+#endif
+};
+
+} // namespace
+
+void StreamingDecoder::NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) {
+ if (!module_compiled_callback_) return;
+ auto* comp_state = native_module->compilation_state();
+ comp_state->AddCallback(TopTierCompiledCallback{
+ std::move(native_module), std::move(module_compiled_callback_)});
+ module_compiled_callback_ = {};
}
// An abstract class to share code among the states which decode VarInts. This
@@ -97,9 +165,7 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
- uint8_t* buffer() override { return byte_buffer_; }
-
- size_t size() const override { return kMaxVarInt32Size; }
+ Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
size_t ReadBytes(StreamingDecoder* streaming,
Vector<const uint8_t> bytes) override;
@@ -113,16 +179,15 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
uint8_t byte_buffer_[kMaxVarInt32Size];
// The maximum valid value decoded in this state. {Next} returns an error if
// this value is exceeded.
- size_t max_value_;
- const char* field_name_;
+ const size_t max_value_;
+ const char* const field_name_;
size_t value_ = 0;
size_t bytes_consumed_ = 0;
};
class StreamingDecoder::DecodeModuleHeader : public DecodingState {
public:
- size_t size() const override { return kModuleHeaderSize; }
- uint8_t* buffer() override { return byte_buffer_; }
+ Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -140,8 +205,7 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
explicit DecodeSectionID(uint32_t module_offset)
: module_offset_(module_offset) {}
- size_t size() const override { return 1; }
- uint8_t* buffer() override { return &id_; }
+ Vector<uint8_t> buffer() override { return {&id_, 1}; }
bool is_finishing_allowed() const override { return true; }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
@@ -149,7 +213,7 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
private:
uint8_t id_ = 0;
// The start offset of this section in the module.
- uint32_t module_offset_;
+ const uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
@@ -163,9 +227,9 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
StreamingDecoder* streaming) override;
private:
- uint8_t section_id_;
+ const uint8_t section_id_;
// The start offset of this section in the module.
- uint32_t module_offset_;
+ const uint32_t module_offset_;
};
class StreamingDecoder::DecodeSectionPayload : public DecodingState {
@@ -173,16 +237,12 @@ class StreamingDecoder::DecodeSectionPayload : public DecodingState {
explicit DecodeSectionPayload(SectionBuffer* section_buffer)
: section_buffer_(section_buffer) {}
- size_t size() const override { return section_buffer_->payload_length(); }
-
- uint8_t* buffer() override {
- return section_buffer_->bytes() + section_buffer_->payload_offset();
- }
+ Vector<uint8_t> buffer() override { return section_buffer_->payload(); }
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
- SectionBuffer* section_buffer_;
+ SectionBuffer* const section_buffer_;
};
class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
@@ -195,7 +255,7 @@ class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
StreamingDecoder* streaming) override;
private:
- SectionBuffer* section_buffer_;
+ SectionBuffer* const section_buffer_;
};
class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
@@ -215,73 +275,76 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
StreamingDecoder* streaming) override;
private:
- SectionBuffer* section_buffer_;
- size_t buffer_offset_;
- size_t num_remaining_functions_;
+ SectionBuffer* const section_buffer_;
+ const size_t buffer_offset_;
+ const size_t num_remaining_functions_;
};
class StreamingDecoder::DecodeFunctionBody : public DecodingState {
public:
explicit DecodeFunctionBody(SectionBuffer* section_buffer,
- size_t buffer_offset, size_t function_length,
+ size_t buffer_offset, size_t function_body_length,
size_t num_remaining_functions,
uint32_t module_offset)
: section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
- size_(function_length),
+ function_body_length_(function_body_length),
num_remaining_functions_(num_remaining_functions),
module_offset_(module_offset) {}
- size_t size() const override { return size_; }
-
- uint8_t* buffer() override {
- return section_buffer_->bytes() + buffer_offset_;
+ Vector<uint8_t> buffer() override {
+ Vector<uint8_t> remaining_buffer =
+ section_buffer_->bytes() + buffer_offset_;
+ return remaining_buffer.SubVector(0, function_body_length_);
}
std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
private:
- SectionBuffer* section_buffer_;
- size_t buffer_offset_;
- size_t size_;
- size_t num_remaining_functions_;
- uint32_t module_offset_;
+ SectionBuffer* const section_buffer_;
+ const size_t buffer_offset_;
+ const size_t function_body_length_;
+ const size_t num_remaining_functions_;
+ const uint32_t module_offset_;
};
size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
- size_t bytes_read = std::min(bytes.size(), remaining());
+ Vector<uint8_t> buf = buffer();
+ Vector<uint8_t> remaining_buf = buf + offset();
+ size_t new_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes of a VarInt\n");
- memcpy(buffer() + offset(), &bytes.first(), bytes_read);
- Decoder decoder(buffer(), buffer() + offset() + bytes_read,
- streaming->module_offset());
+ memcpy(remaining_buf.start(), &bytes.first(), new_bytes);
+ buf.Truncate(offset() + new_bytes);
+ Decoder decoder(buf, streaming->module_offset());
value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
- DCHECK_GT(decoder.pc(), buffer());
- bytes_consumed_ = static_cast<size_t>(decoder.pc() - buffer());
+ DCHECK_GT(decoder.pc(), buffer().start());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.start());
TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
- if (offset() + bytes_read == size()) {
+ if (new_bytes == remaining_buf.size()) {
// We only report an error if we read all bytes.
- streaming->Error(decoder.toResult(nullptr));
+ streaming->Error(decoder.error());
}
- set_offset(offset() + bytes_read);
- return bytes_read;
- } else {
- DCHECK_GT(bytes_consumed_, offset());
- size_t result = bytes_consumed_ - offset();
- // We read all the bytes we needed.
- set_offset(size());
- return result;
+ set_offset(offset() + new_bytes);
+ return new_bytes;
}
+
+ // We read all the bytes we needed.
+ DCHECK_GT(bytes_consumed_, offset());
+ new_bytes = bytes_consumed_ - offset();
+ // Set the offset to the buffer size to signal that we are at the end of this
+ // section.
+ set_offset(buffer().size());
+ return new_bytes;
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
- if (!streaming->ok()) {
- return nullptr;
- }
+ if (!streaming->ok()) return nullptr;
+
if (value_ > max_value_) {
std::ostringstream oss;
oss << "function size > maximum function size: " << value_ << " < "
@@ -296,10 +359,8 @@ std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
- if (streaming->ok()) {
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
- }
- return nullptr;
+ if (!streaming->ok()) return nullptr;
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -313,30 +374,25 @@ std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionLength::NextWithValue(
StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionLength(%zu)\n", value_);
- SectionBuffer* buf = streaming->CreateNewBuffer(
- module_offset_, section_id_, value_,
- Vector<const uint8_t>(buffer(), static_cast<int>(bytes_consumed_)));
+ SectionBuffer* buf =
+ streaming->CreateNewBuffer(module_offset_, section_id_, value_,
+ buffer().SubVector(0, bytes_consumed_));
if (!buf) return nullptr;
if (value_ == 0) {
if (section_id_ == SectionCode::kCodeSectionCode) {
return streaming->Error("Code section cannot have size 0");
- } else {
- streaming->ProcessSection(buf);
- if (streaming->ok()) {
- // There is no payload, we go to the next section immediately.
- return base::make_unique<DecodeSectionID>(streaming->module_offset_);
- } else {
- return nullptr;
- }
}
+ streaming->ProcessSection(buf);
+ if (!streaming->ok()) return nullptr;
+ // There is no payload, we go to the next section immediately.
+ return base::make_unique<DecodeSectionID>(streaming->module_offset_);
} else {
if (section_id_ == SectionCode::kCodeSectionCode) {
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
return base::make_unique<DecodeNumberOfFunctions>(buf);
- } else {
- return base::make_unique<DecodeSectionPayload>(buf);
}
+ return base::make_unique<DecodeSectionPayload>(buf);
}
}
@@ -344,10 +400,8 @@ std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer_);
- if (streaming->ok()) {
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
- }
- return nullptr;
+ if (!streaming->ok()) return nullptr;
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -355,26 +409,25 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
- if (section_buffer_->payload_length() >= bytes_consumed_) {
- memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
- buffer(), bytes_consumed_);
- } else {
+ Vector<uint8_t> payload_buf = section_buffer_->payload();
+ if (payload_buf.size() < bytes_consumed_) {
return streaming->Error("Invalid code section length");
}
+ memcpy(payload_buf.start(), buffer().start(), bytes_consumed_);
// {value} is the number of functions.
- if (value_ > 0) {
- streaming->StartCodeSection(value_);
- if (!streaming->ok()) return nullptr;
- return base::make_unique<DecodeFunctionLength>(
- section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
- value_);
- } else {
- if (section_buffer_->payload_length() != bytes_consumed_) {
+ if (value_ == 0) {
+ if (payload_buf.size() != bytes_consumed_) {
return streaming->Error("not all code section bytes were consumed");
}
return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
+
+ streaming->StartCodeSection(value_, streaming->section_buffers_.back());
+ if (!streaming->ok()) return nullptr;
+ return base::make_unique<DecodeFunctionLength>(
+ section_buffer_, section_buffer_->payload_offset() + bytes_consumed_,
+ value_);
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -382,20 +435,17 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
- if (section_buffer_->length() >= buffer_offset_ + bytes_consumed_) {
- memcpy(section_buffer_->bytes() + buffer_offset_, buffer(),
- bytes_consumed_);
- } else {
+ Vector<uint8_t> fun_length_buffer = section_buffer_->bytes() + buffer_offset_;
+ if (fun_length_buffer.size() < bytes_consumed_) {
return streaming->Error("Invalid code section length");
}
+ memcpy(fun_length_buffer.start(), buffer().start(), bytes_consumed_);
// {value} is the length of the function.
- if (value_ == 0) {
- return streaming->Error("Invalid function length (0)");
- } else if (buffer_offset_ + bytes_consumed_ + value_ >
- section_buffer_->length()) {
- streaming->Error("not enough code section bytes");
- return nullptr;
+ if (value_ == 0) return streaming->Error("Invalid function length (0)");
+
+ if (buffer_offset_ + bytes_consumed_ + value_ > section_buffer_->length()) {
+ return streaming->Error("not enough code section bytes");
}
return base::make_unique<DecodeFunctionBody>(
@@ -406,21 +456,19 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionBody\n");
- streaming->ProcessFunctionBody(
- Vector<const uint8_t>(buffer(), static_cast<int>(size())),
- module_offset_);
- if (!streaming->ok()) {
- return nullptr;
+ streaming->ProcessFunctionBody(buffer(), module_offset_);
+ if (!streaming->ok()) return nullptr;
+
+ size_t end_offset = buffer_offset_ + function_body_length_;
+ if (num_remaining_functions_ > 0) {
+ return base::make_unique<DecodeFunctionLength>(section_buffer_, end_offset,
+ num_remaining_functions_);
}
- if (num_remaining_functions_ != 0) {
- return base::make_unique<DecodeFunctionLength>(
- section_buffer_, buffer_offset_ + size(), num_remaining_functions_);
- } else {
- if (buffer_offset_ + size() != section_buffer_->length()) {
- return streaming->Error("not all code section bytes were used");
- }
- return base::make_unique<DecodeSectionID>(streaming->module_offset());
+ // We just read the last function body. Continue with the next section.
+ if (end_offset != section_buffer_->length()) {
+ return streaming->Error("not all code section bytes were used");
}
+ return base::make_unique<DecodeSectionID>(streaming->module_offset());
}
StreamingDecoder::StreamingDecoder(
@@ -428,6 +476,23 @@ StreamingDecoder::StreamingDecoder(
: processor_(std::move(processor)),
// A module always starts with a module header.
state_(new DecodeModuleHeader()) {}
+
+StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
+ uint32_t module_offset, uint8_t section_id, size_t length,
+ Vector<const uint8_t> length_bytes) {
+ // Check the order of sections. Unknown sections can appear at any position.
+ if (section_id != kUnknownSectionCode) {
+ if (section_id < next_section_id_) {
+ Error("Unexpected section");
+ return nullptr;
+ }
+ next_section_id_ = section_id + 1;
+ }
+ section_buffers_.emplace_back(std::make_shared<SectionBuffer>(
+ module_offset, section_id, length, length_bytes));
+ return section_buffers_.back().get();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index e14c32daf3..d4e3ff7d14 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -5,14 +5,19 @@
#ifndef V8_WASM_STREAMING_DECODER_H_
#define V8_WASM_STREAMING_DECODER_H_
+#include <memory>
#include <vector>
-#include "src/isolate.h"
-#include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-objects.h"
+
+#include "src/base/macros.h"
+#include "src/vector.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
namespace wasm {
+class NativeModule;
// This class is an interface for the StreamingDecoder to start the processing
// of the incoming module bytes.
@@ -31,8 +36,8 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// Process the start of the code section. Returns true if the processing
// finished successfully and the decoding should continue.
- virtual bool ProcessCodeSectionHeader(size_t num_functions,
- uint32_t offset) = 0;
+ virtual bool ProcessCodeSectionHeader(size_t num_functions, uint32_t offset,
+ std::shared_ptr<WireBytesStorage>) = 0;
// Process a function body. Returns true if the processing finished
// successfully and the decoding should continue.
@@ -46,9 +51,13 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// empty array is passed.
virtual void OnFinishedStream(OwnedVector<uint8_t> bytes) = 0;
// Report an error detected in the StreamingDecoder.
- virtual void OnError(DecodeResult result) = 0;
+ virtual void OnError(const WasmError&) = 0;
// Report the abortion of the stream.
virtual void OnAbort() = 0;
+
+ // Attempt to deserialize the module. Supports embedder caching.
+ virtual bool Deserialize(Vector<const uint8_t> module_bytes,
+ Vector<const uint8_t> wire_bytes) = 0;
};
// The StreamingDecoder takes a sequence of byte arrays, each received by a call
@@ -67,11 +76,18 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// Notify the StreamingDecoder that compilation ended and the
// StreamingProcessor should not be called anymore.
- void NotifyCompilationEnded() {
- // We set {ok_} to false to turn all future calls to the StreamingDecoder
- // into no-ops.
- ok_ = false;
- }
+ void NotifyCompilationEnded() { Fail(); }
+
+ // Caching support.
+ // Sets the callback that is called after the module is fully compiled.
+ using ModuleCompiledCallback =
+ std::function<void(const std::shared_ptr<NativeModule>&)>;
+ void SetModuleCompiledCallback(ModuleCompiledCallback callback);
+ // Passes previously compiled module bytes from the embedder's cache.
+ bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes);
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module);
private:
// TODO(ahaas): Put the whole private state of the StreamingDecoder into the
@@ -80,7 +96,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// The SectionBuffer is the data object for the content of a single section.
// It stores all bytes of the section (including section id and section
// length), and the offset where the actual payload starts.
- class SectionBuffer {
+ class SectionBuffer : public WireBytesStorage {
public:
// id: The section id.
// payload_length: The length of the payload.
@@ -89,32 +105,34 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
Vector<const uint8_t> length_bytes)
: // ID + length + payload
module_offset_(module_offset),
- length_(1 + length_bytes.length() + payload_length),
- bytes_(new uint8_t[length_]),
+ bytes_(OwnedVector<uint8_t>::New(1 + length_bytes.length() +
+ payload_length)),
payload_offset_(1 + length_bytes.length()) {
- bytes_[0] = id;
- memcpy(bytes_.get() + 1, &length_bytes.first(), length_bytes.length());
+ bytes_.start()[0] = id;
+ memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
}
SectionCode section_code() const {
- return static_cast<SectionCode>(bytes_[0]);
+ return static_cast<SectionCode>(bytes_.start()[0]);
+ }
+
+ Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ DCHECK_LE(module_offset_, ref.offset());
+ uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
+ return bytes().SubVector(offset_in_code_buffer,
+ offset_in_code_buffer + ref.length());
}
uint32_t module_offset() const { return module_offset_; }
- uint8_t* bytes() const { return bytes_.get(); }
- size_t length() const { return length_; }
+ Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
+ Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
+ size_t length() const { return bytes_.size(); }
size_t payload_offset() const { return payload_offset_; }
- size_t payload_length() const { return length_ - payload_offset_; }
- Vector<const uint8_t> payload() const {
- return Vector<const uint8_t>(bytes() + payload_offset(),
- payload_length());
- }
private:
- uint32_t module_offset_;
- size_t length_;
- std::unique_ptr<uint8_t[]> bytes_;
- size_t payload_offset_;
+ const uint32_t module_offset_;
+ const OwnedVector<uint8_t> bytes_;
+ const size_t payload_offset_;
};
// The decoding of a stream of wasm module bytes is organized in states. Each
@@ -155,16 +173,11 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// Returns the next state of the streaming decoding.
virtual std::unique_ptr<DecodingState> Next(
StreamingDecoder* streaming) = 0;
- // The number of bytes to be received.
- virtual size_t size() const = 0;
// The buffer to store the received bytes.
- virtual uint8_t* buffer() = 0;
+ virtual Vector<uint8_t> buffer() = 0;
// The number of bytes which were already received.
size_t offset() const { return offset_; }
void set_offset(size_t value) { offset_ = value; }
- // The number of bytes which are still needed.
- size_t remaining() const { return size() - offset(); }
- bool is_finished() const { return offset() == size(); }
// A flag to indicate if finishing the streaming decoder is allowed without
// error.
virtual bool is_finishing_allowed() const { return false; }
@@ -185,82 +198,79 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
class DecodeFunctionBody;
// Creates a buffer for the next section of the module.
- SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t id,
+ SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
size_t length,
- Vector<const uint8_t> length_bytes) {
- // Check the order of sections. Unknown sections can appear at any position.
- if (id != kUnknownSectionCode) {
- if (id < next_section_id_) {
- Error("Unexpected section");
- return nullptr;
- }
- next_section_id_ = id + 1;
- }
- section_buffers_.emplace_back(
- new SectionBuffer(module_offset, id, length, length_bytes));
- return section_buffers_.back().get();
- }
+ Vector<const uint8_t> length_bytes);
- std::unique_ptr<DecodingState> Error(DecodeResult result) {
- if (ok_) processor_->OnError(std::move(result));
- ok_ = false;
+ std::unique_ptr<DecodingState> Error(const WasmError& error) {
+ if (ok()) processor_->OnError(error);
+ Fail();
return std::unique_ptr<DecodingState>(nullptr);
}
std::unique_ptr<DecodingState> Error(std::string message) {
- DecodeResult result(nullptr);
- result.error(module_offset_ - 1, std::move(message));
- return Error(std::move(result));
+ return Error(WasmError{module_offset_ - 1, std::move(message)});
}
void ProcessModuleHeader() {
- if (!ok_) return;
- if (!processor_->ProcessModuleHeader(
- Vector<const uint8_t>(state_->buffer(),
- static_cast<int>(state_->size())),
- 0)) {
- ok_ = false;
- }
+ if (!ok()) return;
+ if (!processor_->ProcessModuleHeader(state_->buffer(), 0)) Fail();
}
void ProcessSection(SectionBuffer* buffer) {
- if (!ok_) return;
+ if (!ok()) return;
if (!processor_->ProcessSection(
buffer->section_code(), buffer->payload(),
buffer->module_offset() +
static_cast<uint32_t>(buffer->payload_offset()))) {
- ok_ = false;
+ Fail();
}
}
- void StartCodeSection(size_t num_functions) {
- if (!ok_) return;
+ void StartCodeSection(size_t num_functions,
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
+ if (!ok()) return;
// The offset passed to {ProcessCodeSectionHeader} is an error offset and
// not the start offset of a buffer. Therefore we need the -1 here.
if (!processor_->ProcessCodeSectionHeader(num_functions,
- module_offset() - 1)) {
- ok_ = false;
+ module_offset() - 1,
+ std::move(wire_bytes_storage))) {
+ Fail();
}
}
void ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t module_offset) {
- if (!ok_) return;
- if (!processor_->ProcessFunctionBody(bytes, module_offset)) ok_ = false;
+ if (!ok()) return;
+ if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
}
- bool ok() const { return ok_; }
+ void Fail() {
+ // We reset the {processor_} field to represent failure. This also ensures
+ // that we do not accidentally call further methods on the processor after
+ // failure.
+ processor_.reset();
+ }
+
+ bool ok() const { return processor_ != nullptr; }
uint32_t module_offset() const { return module_offset_; }
+ bool deserializing() const { return !compiled_module_bytes_.is_empty(); }
+
std::unique_ptr<StreamingProcessor> processor_;
- bool ok_ = true;
std::unique_ptr<DecodingState> state_;
- std::vector<std::unique_ptr<SectionBuffer>> section_buffers_;
+ std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
uint32_t module_offset_ = 0;
size_t total_size_ = 0;
uint8_t next_section_id_ = kFirstSectionInModule;
+ // Caching support.
+ ModuleCompiledCallback module_compiled_callback_ = nullptr;
+ // We need wire bytes in an array for deserializing cached modules.
+ std::vector<uint8_t> wire_bytes_for_deserializing_;
+ Vector<const uint8_t> compiled_module_bytes_;
+
DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
};
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index d34bc4bca9..02e9c79bd2 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -10,6 +10,10 @@
namespace v8 {
namespace internal {
+
+template <typename T>
+class Signature;
+
namespace wasm {
enum ValueType : uint8_t {
@@ -25,6 +29,8 @@ enum ValueType : uint8_t {
kWasmVar,
};
+using FunctionSig = Signature<ValueType>;
+
inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); }
// TODO(clemensh): Compute memtype and size from ValueType once we have c++14
@@ -263,6 +269,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
return MachineRepresentation::kSimd128;
@@ -306,6 +314,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 'd';
case kWasmAnyRef:
return 'r';
+ case kWasmAnyFunc:
+ return 'a';
case kWasmS128:
return 's';
case kWasmStmt:
@@ -328,7 +338,11 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return "f64";
case kWasmAnyRef:
- return "ref";
+ return "anyref";
+ case kWasmAnyFunc:
+ return "anyfunc";
+ case kWasmExceptRef:
+ return "exn";
case kWasmS128:
return "s128";
case kWasmStmt:
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 6495421b8f..f55508c7a6 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -10,14 +10,17 @@
#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/codegen.h"
#include "src/disassembler.h"
#include "src/globals.h"
+#include "src/log.h"
#include "src/macro-assembler-inl.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
+#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -103,13 +106,20 @@ base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
Address WasmCode::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
- if (constant_pool_offset_ < instructions().size()) {
+ if (constant_pool_offset_ < code_comments_offset_) {
return instruction_start() + constant_pool_offset_;
}
}
return kNullAddress;
}
+Address WasmCode::code_comments() const {
+ if (code_comments_offset_ < unpadded_binary_size_) {
+ return instruction_start() + code_comments_offset_;
+ }
+ return kNullAddress;
+}
+
size_t WasmCode::trap_handler_index() const {
CHECK(HasTrapHandlerIndex());
return static_cast<size_t>(trap_handler_index_);
@@ -148,11 +158,11 @@ void WasmCode::LogCode(Isolate* isolate) const {
ModuleWireBytes wire_bytes(native_module()->wire_bytes());
// TODO(herhut): Allow to log code without on-heap round-trip of the name.
- ModuleEnv* module_env = GetModuleEnv(native_module()->compilation_state());
WireBytesRef name_ref =
- module_env->module->LookupFunctionName(wire_bytes, index());
+ native_module()->module()->LookupFunctionName(wire_bytes, index());
WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
if (!name_vec.is_empty()) {
+ HandleScope scope(isolate);
MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
Vector<const char>::cast(name_vec));
Handle<String> name;
@@ -168,7 +178,8 @@ void WasmCode::LogCode(Isolate* isolate) const {
{cname.get(), static_cast<size_t>(name_length)}));
} else {
EmbeddedVector<char, 32> generated_name;
- SNPrintF(generated_name, "wasm-function[%d]", index());
+ int length = SNPrintF(generated_name, "wasm-function[%d]", index());
+ generated_name.Truncate(length);
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
generated_name));
}
@@ -223,9 +234,7 @@ void WasmCode::Validate() const {
CHECK(contains(target));
break;
}
- case RelocInfo::JS_TO_WASM_CALL:
case RelocInfo::EXTERNAL_REFERENCE:
- case RelocInfo::COMMENT:
case RelocInfo::CONST_POOL:
case RelocInfo::VENEER_POOL:
// These are OK to appear.
@@ -237,6 +246,14 @@ void WasmCode::Validate() const {
#endif
}
+void WasmCode::MaybePrint(const char* name) const {
+ // Determines whether flags want this code to be printed.
+ if ((FLAG_print_wasm_code && kind() == kFunction) ||
+ (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
+ Print(name);
+ }
+}
+
void WasmCode::Print(const char* name) const {
StdoutStream os;
os << "--- WebAssembly code ---\n";
@@ -250,12 +267,13 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
- size_t body_size = instructions().size();
- os << "Body (size = " << body_size << ")\n";
+ size_t padding = instructions().size() - unpadded_binary_size_;
+ os << "Body (size = " << instructions().size() << " = "
+ << unpadded_binary_size_ << " + " << padding << " padding)\n";
#ifdef ENABLE_DISASSEMBLER
- size_t instruction_size = body_size;
- if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
+ size_t instruction_size = unpadded_binary_size_;
+ if (constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
}
if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
@@ -299,12 +317,40 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
+ if (safepoint_table_offset_ > 0) {
+ SafepointTable table(instruction_start(), safepoint_table_offset_,
+ stack_slots_);
+ os << "Safepoints (size = " << table.size() << ")\n";
+ for (uint32_t i = 0; i < table.length(); i++) {
+ uintptr_t pc_offset = table.GetPcOffset(i);
+ os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
+ os << std::setw(6) << std::hex << pc_offset << " " << std::dec;
+ table.PrintEntry(i, os);
+ os << " (sp -> fp)";
+ SafepointEntry entry = table.GetEntry(i);
+ if (entry.trampoline_pc() != -1) {
+ os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
+ }
+ if (entry.has_deoptimization_index()) {
+ os << " deopt: " << std::setw(6) << entry.deoptimization_index();
+ }
+ os << "\n";
+ }
+ os << "\n";
+ }
+
os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
it.rinfo()->Print(nullptr, os);
}
os << "\n";
+
+ if (code_comments_offset() < unpadded_binary_size_) {
+ Address code_comments = reinterpret_cast<Address>(instructions().start() +
+ code_comments_offset());
+ PrintCodeCommentsSection(os, code_comments);
+ }
#endif // ENABLE_DISASSEMBLER
}
@@ -337,16 +383,17 @@ WasmCode::~WasmCode() {
NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
- std::shared_ptr<const WasmModule> module,
- const ModuleEnv& env)
+ std::shared_ptr<const WasmModule> module)
: enabled_features_(enabled),
module_(std::move(module)),
- compilation_state_(NewCompilationState(isolate, env)),
+ compilation_state_(CompilationState::New(isolate, this)),
+ import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
+ new WasmImportWrapperCache(this))),
free_code_space_(code_space.region()),
- wasm_code_manager_(code_manager),
+ code_manager_(code_manager),
can_request_more_memory_(can_request_more),
- use_trap_handler_(env.use_trap_handler) {
- DCHECK_EQ(module_.get(), env.module);
+ use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
+ : kNoTrapHandler) {
DCHECK_NOT_NULL(module_);
owned_code_space_.emplace_back(std::move(code_space));
owned_code_.reserve(num_functions());
@@ -382,10 +429,16 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
}
}
+CompilationEnv NativeModule::CreateCompilationEnv() const {
+ return {module(), use_trap_handler_, kRuntimeExceptionSupport,
+ enabled_features_};
+}
+
WasmCode* NativeModule::AddOwnedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
@@ -394,14 +447,14 @@ WasmCode* NativeModule::AddOwnedCode(
{
// Both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ base::MutexGuard lock(&allocation_mutex_);
Vector<byte> executable_buffer = AllocateForCode(instructions.size());
// Ownership will be transferred to {owned_code_} below.
- code = new WasmCode(this, index, executable_buffer, stack_slots,
- safepoint_table_offset, handler_table_offset,
- constant_pool_offset, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_position_table),
- kind, tier);
+ code = new WasmCode(
+ this, index, executable_buffer, stack_slots, safepoint_table_offset,
+ handler_table_offset, constant_pool_offset, code_comments_offset,
+ unpadded_binary_size, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_position_table), kind, tier);
if (owned_code_.empty() ||
code->instruction_start() > owned_code_.back()->instruction_start()) {
@@ -424,23 +477,6 @@ WasmCode* NativeModule::AddOwnedCode(
return code;
}
-WasmCode* NativeModule::AddImportWrapper(Handle<Code> code, uint32_t index) {
- // TODO(wasm): Adding instance-specific wasm-to-js wrappers as owned code to
- // this NativeModule is a memory leak until the whole NativeModule dies.
- WasmCode* ret = AddAnonymousCode(code, WasmCode::kWasmToJsWrapper);
- DCHECK_LT(index, module_->num_imported_functions);
- ret->index_ = index;
- return ret;
-}
-
-WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
- ret->index_ = index;
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
- InstallCode(ret);
- return ret;
-}
-
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::kFunction);
return ret;
@@ -463,6 +499,7 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
}
void NativeModule::SetRuntimeStubs(Isolate* isolate) {
+ HandleScope scope(isolate);
DCHECK_NULL(runtime_stub_table_[0]); // Only called once.
#define COPY_BUILTIN(Name) \
runtime_stub_table_[WasmCode::k##Name] = \
@@ -501,6 +538,8 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
safepoint_table_offset, // safepoint_table_offset
code->handler_table_offset(), // handler_table_offset
code->constant_pool_offset(), // constant_pool_offset
+ code->code_comments_offset(), // code_comments_offset
+ instructions.size(), // unpadded_binary_size
{}, // protected_instructions
std::move(reloc_info), // reloc_info
std::move(source_pos), // source positions
@@ -532,7 +571,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_code || FLAG_print_wasm_code) ret->Print(name);
+ ret->MaybePrint(name);
ret->Validate();
return ret;
}
@@ -541,16 +580,18 @@ WasmCode* NativeModule::AddCode(
uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> source_pos_table, WasmCode::Tier tier) {
+ OwnedVector<const byte> source_pos_table, WasmCode::Kind kind,
+ WasmCode::Tier tier) {
OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
- WasmCode* ret =
- AddOwnedCode(index, {desc.buffer, static_cast<size_t>(desc.instr_size)},
- stack_slots, safepoint_table_offset, handler_table_offset,
- desc.instr_size - desc.constant_pool_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos_table), WasmCode::kFunction, tier);
+
+ WasmCode* ret = AddOwnedCode(
+ index, {desc.buffer, static_cast<size_t>(desc.instr_size)}, stack_slots,
+ safepoint_table_offset, handler_table_offset, desc.constant_pool_offset(),
+ desc.code_comments_offset(), desc.instr_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos_table), kind, tier);
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = ret->instructions().start() - desc.buffer;
@@ -581,7 +622,7 @@ WasmCode* NativeModule::AddCode(
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
+ ret->MaybePrint();
ret->Validate();
return ret;
}
@@ -589,20 +630,22 @@ WasmCode* NativeModule::AddCode(
WasmCode* NativeModule::AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
WasmCode* code =
AddOwnedCode(index, instructions, stack_slots, safepoint_table_offset,
handler_table_offset, constant_pool_offset,
+ code_comments_offset, unpadded_binary_size,
std::move(protected_instructions), std::move(reloc_info),
std::move(source_position_table), WasmCode::kFunction, tier);
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
}
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ base::MutexGuard lock(&allocation_mutex_);
InstallCode(code);
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -610,21 +653,27 @@ WasmCode* NativeModule::AddDeserializedCode(
}
void NativeModule::PublishCode(WasmCode* code) {
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ base::MutexGuard lock(&allocation_mutex_);
// Skip publishing code if there is an active redirection to the interpreter
// for the given function index, in order to preserve the redirection.
- if (has_code(code->index()) &&
- this->code(code->index())->kind() == WasmCode::kInterpreterEntry) {
- return;
- }
+ if (has_interpreter_redirection(code->index())) return;
+
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
}
InstallCode(code);
}
+void NativeModule::PublishInterpreterEntry(WasmCode* code,
+ uint32_t func_index) {
+ code->index_ = func_index;
+ base::MutexGuard lock(&allocation_mutex_);
+ InstallCode(code);
+ SetInterpreterRedirection(func_index);
+}
+
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ base::MutexGuard lock(&allocation_mutex_);
std::vector<WasmCode*> result;
result.reserve(code_table().size());
for (WasmCode* code : code_table()) result.push_back(code);
@@ -640,9 +689,11 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
instructions.as_vector(), // instructions
0, // stack_slots
- 0, // safepoint_table_offset
- 0, // handler_table_offset
- 0, // constant_pool_offset
+ instructions.size(), // safepoint_table_offset
+ instructions.size(), // handler_table_offset
+ instructions.size(), // constant_pool_offset
+ instructions.size(), // code_comments_offset
+ instructions.size(), // unpadded_binary_size
{}, // protected_instructions
{}, // reloc_info
{}, // source_pos
@@ -654,8 +705,10 @@ void NativeModule::InstallCode(WasmCode* code) {
DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index());
- // Update code table.
- code_table_[code->index() - module_->num_imported_functions] = code;
+ // Update code table, except for interpreter entries.
+ if (code->kind() != WasmCode::kInterpreterEntry) {
+ code_table_[code->index() - module_->num_imported_functions] = code;
+ }
// Patch jump table.
uint32_t slot_idx = code->index() - module_->num_imported_functions;
@@ -669,8 +722,8 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
- base::AddressRegion mem = free_code_space_.Allocate(size);
- if (mem.is_empty()) {
+ base::AddressRegion code_space = free_code_space_.Allocate(size);
+ if (code_space.is_empty()) {
if (!can_request_more_memory_) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode reservation");
@@ -681,24 +734,24 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
: owned_code_space_.back().end();
VirtualMemory new_mem =
- wasm_code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
+ code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode reservation");
UNREACHABLE();
}
- wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
+ code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_code_space_.Merge(new_mem.region());
owned_code_space_.emplace_back(std::move(new_mem));
- mem = free_code_space_.Allocate(size);
- DCHECK(!mem.is_empty());
+ code_space = free_code_space_.Allocate(size);
+ DCHECK(!code_space.is_empty());
}
const Address page_size = page_allocator->AllocatePageSize();
- Address commit_start = RoundUp(mem.begin(), page_size);
- Address commit_end = RoundUp(mem.end(), page_size);
- // {commit_start} will be either mem.start or the start of the next page.
- // {commit_end} will be the start of the page after the one in which
+ Address commit_start = RoundUp(code_space.begin(), page_size);
+ Address commit_end = RoundUp(code_space.end(), page_size);
+ // {commit_start} will be either code_space.start or the start of the next
+ // page. {commit_end} will be the start of the page after the one in which
// the allocation ends.
// We start from an aligned start, and we know we allocated vmem in
// page multiples.
@@ -720,7 +773,7 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
Address start = std::max(commit_start, vmem.address());
Address end = std::min(commit_end, vmem.end());
size_t commit_size = static_cast<size_t>(end - start);
- if (!wasm_code_manager_->Commit(start, commit_size)) {
+ if (!code_manager_->Commit(start, commit_size)) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
@@ -732,21 +785,49 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
if (commit_start >= commit_end) break;
}
#else
- if (!wasm_code_manager_->Commit(commit_start, commit_end - commit_start)) {
+ if (!code_manager_->Commit(commit_start, commit_end - commit_start)) {
V8::FatalProcessOutOfMemory(nullptr,
"NativeModule::AllocateForCode commit");
UNREACHABLE();
}
#endif
}
- DCHECK(IsAligned(mem.begin(), kCodeAlignment));
- allocated_code_space_.Merge(mem);
- TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, mem.begin(), size);
- return {reinterpret_cast<byte*>(mem.begin()), mem.size()};
+ DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
+ allocated_code_space_.Merge(code_space);
+ TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, code_space.begin(),
+ size);
+ return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
+}
+
+namespace {
+class NativeModuleWireBytesStorage final : public WireBytesStorage {
+ public:
+ explicit NativeModuleWireBytesStorage(
+ std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
+ : wire_bytes_(std::move(wire_bytes)) {}
+
+ Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ return wire_bytes_->as_vector().SubVector(ref.offset(), ref.end_offset());
+ }
+
+ private:
+ const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
+};
+} // namespace
+
+void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
+ auto shared_wire_bytes =
+ std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
+ wire_bytes_ = shared_wire_bytes;
+ if (!shared_wire_bytes->is_empty()) {
+ compilation_state_->SetWireBytesStorage(
+ std::make_shared<NativeModuleWireBytesStorage>(
+ std::move(shared_wire_bytes)));
+ }
}
WasmCode* NativeModule::Lookup(Address pc) const {
- base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ base::MutexGuard lock(&allocation_mutex_);
if (owned_code_.empty()) return nullptr;
auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
WasmCodeUniquePtrComparator());
@@ -783,7 +864,7 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
void NativeModule::DisableTrapHandler() {
// Switch {use_trap_handler_} from true to false.
DCHECK(use_trap_handler_);
- use_trap_handler_ = false;
+ use_trap_handler_ = kNoTrapHandler;
// Clear the code table (just to increase the chances to hit an error if we
// forget to re-add all code).
@@ -796,25 +877,30 @@ void NativeModule::DisableTrapHandler() {
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
- compilation_state_.reset(); // Cancels tasks, needs to be done first.
- wasm_code_manager_->FreeNativeModule(this);
+ // Cancel all background compilation before resetting any field of the
+ // NativeModule or freeing anything.
+ compilation_state_->CancelAndWait();
+ code_manager_->FreeNativeModule(this);
}
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
- remaining_uncommitted_code_space_(max_committed) {
+ remaining_uncommitted_code_space_(max_committed),
+ critical_uncommitted_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
bool WasmCodeManager::Commit(Address start, size_t size) {
+ // TODO(v8:8462) Remove eager commit once perf supports remapping.
+ if (FLAG_perf_prof) return true;
DCHECK(IsAligned(start, AllocatePageSize()));
DCHECK(IsAligned(size, AllocatePageSize()));
// Reserve the size. Use CAS loop to avoid underflow on
// {remaining_uncommitted_}. Temporary underflow would allow concurrent
// threads to over-commit.
+ size_t old_value = remaining_uncommitted_code_space_.load();
while (true) {
- size_t old_value = remaining_uncommitted_code_space_.load();
if (old_value < size) return false;
if (remaining_uncommitted_code_space_.compare_exchange_weak(
old_value, old_value - size)) {
@@ -841,13 +927,13 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
void WasmCodeManager::AssignRanges(Address start, Address end,
NativeModule* native_module) {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
}
void WasmCodeManager::AssignRangesAndAddModule(Address start, Address end,
NativeModule* native_module) {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ base::MutexGuard lock(&native_modules_mutex_);
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
native_modules_.emplace(native_module);
}
@@ -871,11 +957,17 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
reinterpret_cast<void*>(mem.address()),
reinterpret_cast<void*>(mem.end()), mem.size());
+
+ // TODO(v8:8462) Remove eager commit once perf supports remapping.
+ if (FLAG_perf_prof) {
+ SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
+ PageAllocator::kReadWriteExecute);
+ }
return mem;
}
void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ base::MutexGuard lock(&native_modules_mutex_);
for (NativeModule* native_module : native_modules_) {
int code_size =
static_cast<int>(native_module->committed_code_space_.load() / MB);
@@ -883,6 +975,11 @@ void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
}
}
+void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
+ remaining_uncommitted_code_space_.store(limit);
+ critical_uncommitted_code_space_.store(limit / 2);
+}
+
namespace {
void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type,
@@ -900,57 +997,62 @@ void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) {
}
// static
-size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
+size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
- constexpr size_t kImportSize = 32 * kPointerSize;
-
- uint32_t num_wasm_functions = module->num_declared_functions;
-
- size_t estimate =
- AllocatePageSize() /* TODO(titzer): 1 page spot bonus */ +
- sizeof(NativeModule) +
- (sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
- (sizeof(WasmCode) * num_wasm_functions /* code object size */) +
- (kImportSize * module->num_imported_functions /* import size */) +
- (JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
+ constexpr size_t kCodeOverhead = 32; // for prologue, stack check, ...
+ constexpr size_t kStaticCodeSize = 512; // runtime stubs, ...
+ constexpr size_t kImportSize = 64 * kSystemPointerSize;
+ size_t estimate = kStaticCodeSize;
for (auto& function : module->functions) {
- estimate += kCodeSizeMultiplier * function.code.length();
+ estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length();
}
+ estimate +=
+ JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions);
+ estimate += kImportSize * module->num_imported_functions;
return estimate;
}
-bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
- // TODO(titzer): we force a critical memory pressure notification
- // when the code space is almost exhausted, but only upon the next module
- // creation. This is only for one isolate, and it should really do this for
- // all isolates, at the point of commit.
- constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
- return native_modules_.size() > 1 &&
- remaining_uncommitted_code_space_.load() < kCriticalThreshold;
+// static
+size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
+ const WasmModule* module) {
+ size_t wasm_module_estimate = EstimateStoredSize(module);
+
+ uint32_t num_wasm_functions = module->num_declared_functions;
+
+ // TODO(wasm): Include wire bytes size.
+ size_t native_module_estimate =
+ sizeof(NativeModule) + /* NativeModule struct */
+ (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
+ (sizeof(WasmCode) * num_wasm_functions); /* code object size */
+
+ return wasm_module_estimate + native_module_estimate;
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- Isolate* isolate, const WasmFeatures& enabled, size_t memory_estimate,
- bool can_request_more, std::shared_ptr<const WasmModule> module,
- const ModuleEnv& env) {
- if (ShouldForceCriticalMemoryPressureNotification()) {
+ Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
+ bool can_request_more, std::shared_ptr<const WasmModule> module) {
+ DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
+ if (remaining_uncommitted_code_space_.load() <
+ critical_uncommitted_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical);
+ critical_uncommitted_code_space_.store(
+ remaining_uncommitted_code_space_.load() / 2);
}
// If the code must be contiguous, reserve enough address space up front.
- size_t vmem_size = kRequiresCodeRange ? kMaxWasmCodeMemory : memory_estimate;
- // Try up to three times; getting rid of dead JSArrayBuffer allocations might
+ size_t code_vmem_size =
+ kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
+ // Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
static constexpr int kAllocationRetries = 2;
- VirtualMemory mem;
+ VirtualMemory code_space;
for (int retries = 0;; ++retries) {
- mem = TryAllocate(vmem_size);
- if (mem.IsReserved()) break;
+ code_space = TryAllocate(code_vmem_size);
+ if (code_space.IsReserved()) break;
if (retries == kAllocationRetries) {
V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
UNREACHABLE();
@@ -960,13 +1062,13 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
true);
}
- Address start = mem.address();
- size_t size = mem.size();
- Address end = mem.end();
- std::unique_ptr<NativeModule> ret(
- new NativeModule(isolate, enabled, can_request_more, std::move(mem), this,
- std::move(module), env));
- TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
+ Address start = code_space.address();
+ size_t size = code_space.size();
+ Address end = code_space.end();
+ std::unique_ptr<NativeModule> ret(new NativeModule(
+ isolate, enabled, can_request_more, std::move(code_space),
+ isolate->wasm_engine()->code_manager(), std::move(module)));
+ TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
size);
AssignRangesAndAddModule(start, end, ret.get());
return ret;
@@ -1022,18 +1124,18 @@ bool NativeModule::SetExecutable(bool executable) {
}
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ base::MutexGuard lock(&native_modules_mutex_);
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_.erase(native_module);
- TRACE_HEAP("Freeing NativeModule %p\n", this);
- for (auto& mem : native_module->owned_code_space_) {
- DCHECK(mem.IsReserved());
- TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n", mem.address(),
- mem.end(), mem.size());
- lookup_map_.erase(mem.address());
- memory_tracker_->ReleaseReservation(mem.size());
- mem.Free();
- DCHECK(!mem.IsReserved());
+ TRACE_HEAP("Freeing NativeModule %p\n", native_module);
+ for (auto& code_space : native_module->owned_code_space_) {
+ DCHECK(code_space.IsReserved());
+ TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n",
+ code_space.address(), code_space.end(), code_space.size());
+ lookup_map_.erase(code_space.address());
+ memory_tracker_->ReleaseReservation(code_space.size());
+ code_space.Free();
+ DCHECK(!code_space.IsReserved());
}
native_module->owned_code_space_.clear();
@@ -1045,7 +1147,7 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
}
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
- base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ base::MutexGuard lock(&native_modules_mutex_);
if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc);
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 65156b7457..4247350ceb 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -16,7 +16,7 @@
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
-#include "src/wasm/module-compiler.h"
+#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
@@ -30,7 +30,9 @@ namespace wasm {
class NativeModule;
class WasmCodeManager;
+class WasmEngine;
class WasmMemoryTracker;
+class WasmImportWrapperCache;
struct WasmModule;
// Sorted, disjoint and non-overlapping memory regions. A region is of the
@@ -43,8 +45,9 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
- DisjointAllocationPool(DisjointAllocationPool&& other) = default;
- DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
+ DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
+ DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
+ V8_NOEXCEPT = default;
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
@@ -62,7 +65,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
private:
std::list<base::AddressRegion> regions_;
- DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
+ DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
class V8_EXPORT_PRIVATE WasmCode final {
@@ -111,9 +114,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
Address constant_pool() const;
+ Address code_comments() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
+ size_t code_comments_offset() const { return code_comments_offset_; }
+ size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return tier_ == kLiftoff; }
bool contains(Address pc) const {
@@ -130,6 +136,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
+ void MaybePrint(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -140,13 +147,17 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
+ static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+ STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
+
private:
friend class NativeModule;
WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -161,11 +172,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
+ code_comments_offset_(code_comments_offset),
+ unpadded_binary_size_(unpadded_binary_size),
protected_instructions_(std::move(protected_instructions)),
tier_(tier) {
- DCHECK_LE(safepoint_table_offset, instructions.size());
- DCHECK_LE(constant_pool_offset, instructions.size());
- DCHECK_LE(handler_table_offset, instructions.size());
+ DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
+ DCHECK_LE(handler_table_offset, unpadded_binary_size);
+ DCHECK_LE(code_comments_offset, unpadded_binary_size);
+ DCHECK_LE(constant_pool_offset, unpadded_binary_size);
}
// Code objects that have been registered with the global trap handler within
@@ -178,9 +192,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
// trap_handler_index.
void RegisterTrapHandlerData();
- static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
- STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
-
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
OwnedVector<const byte> source_position_table_;
@@ -194,6 +205,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
// conversions.
size_t safepoint_table_offset_ = 0;
size_t handler_table_offset_ = 0;
+ size_t code_comments_offset_ = 0;
+ size_t unpadded_binary_size_ = 0;
intptr_t trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
Tier tier_;
@@ -219,27 +232,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> source_position_table,
- WasmCode::Tier tier);
+ WasmCode::Kind kind, WasmCode::Tier tier);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier);
- // Add an import wrapper for wasm-to-JS transitions. This method copies over
- // JS-allocated code, because we compile wrappers using a different pipeline.
- WasmCode* AddImportWrapper(Handle<Code> code, uint32_t index);
-
- // Add an interpreter entry. For the same reason as AddImportWrapper, we
- // currently compile these using a different pipeline and we can't get a
- // CodeDesc here. When adding interpreter wrappers, we do not insert them in
- // the code_table, however, we let them self-identify as the {index} function.
- WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
-
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
@@ -259,6 +263,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// threads executing the old code.
void PublishCode(WasmCode* code);
+ // Switch a function to an interpreter entry wrapper. When adding interpreter
+ // wrappers, we do not insert them in the code_table, however, we let them
+ // self-identify as the {index} function.
+ void PublishInterpreterEntry(WasmCode* code, uint32_t index);
+
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
@@ -317,24 +326,31 @@ class V8_EXPORT_PRIVATE NativeModule final {
CompilationState* compilation_state() { return compilation_state_.get(); }
+ // Create a {CompilationEnv} object for compilation. Only valid as long as
+ // this {NativeModule} is alive.
+ CompilationEnv CreateCompilationEnv() const;
+
uint32_t num_functions() const {
return module_->num_declared_functions + module_->num_imported_functions;
}
uint32_t num_imported_functions() const {
return module_->num_imported_functions;
}
- bool use_trap_handler() const { return use_trap_handler_; }
+ UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
- void set_wire_bytes(OwnedVector<const byte> wire_bytes) {
- wire_bytes_ = std::move(wire_bytes);
- }
+ Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
- WasmCodeManager* code_manager() const { return wasm_code_manager_; }
+ size_t committed_code_space() const { return committed_code_space_.load(); }
+
+ void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
WasmCode* Lookup(Address) const;
+ WasmImportWrapperCache* import_wrapper_cache() const {
+ return import_wrapper_cache_.get();
+ }
+
~NativeModule();
const WasmFeatures& enabled_features() const { return enabled_features_; }
@@ -347,7 +363,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
bool can_request_more, VirtualMemory code_space,
WasmCodeManager* code_manager,
- std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
+ std::shared_ptr<const WasmModule> module);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
@@ -356,12 +372,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
- // code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
+ // code is obtained (CodeDesc vs, as a point in time, Code), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(uint32_t index, Vector<const byte> instructions,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
+ size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table,
@@ -376,6 +394,30 @@ class V8_EXPORT_PRIVATE NativeModule final {
return {code_table_.get(), module_->num_declared_functions};
}
+ // Hold the {mutex_} when calling this method.
+ bool has_interpreter_redirection(uint32_t func_index) {
+ DCHECK_LT(func_index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ if (!interpreter_redirections_) return false;
+ uint32_t bitset_idx = func_index - module_->num_imported_functions;
+ uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
+ return byte & (1 << (bitset_idx % kBitsPerByte));
+ }
+
+ // Hold the {mutex_} when calling this method.
+ void SetInterpreterRedirection(uint32_t func_index) {
+ DCHECK_LT(func_index, num_functions());
+ DCHECK_LE(module_->num_imported_functions, func_index);
+ if (!interpreter_redirections_) {
+ interpreter_redirections_.reset(
+ new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
+ kBitsPerByte]);
+ }
+ uint32_t bitset_idx = func_index - module_->num_imported_functions;
+ uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
+ byte |= 1 << (bitset_idx % kBitsPerByte);
+ }
+
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
// to be consistent across asynchronous compilations later.
@@ -385,7 +427,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
- OwnedVector<const byte> wire_bytes_;
+ // Wire bytes, held in a shared_ptr so they can be kept alive by the
+ // {WireBytesStorage}, held by background compile tasks.
+ std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@@ -395,7 +439,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
- std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
+ std::unique_ptr<CompilationState> compilation_state_;
+
+ // A cache of the import wrappers, keyed on the kind and signature.
+ std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
// This mutex protects concurrent calls to {AddCode} and friends.
mutable base::Mutex allocation_mutex_;
@@ -409,6 +456,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<WasmCode* []> code_table_;
+ // Null if no redirections exist, otherwise a bitset over all functions in
+ // this module marking those functions that have been redirected.
+ std::unique_ptr<uint8_t[]> interpreter_redirections_;
+
DisjointAllocationPool free_code_space_;
DisjointAllocationPool allocated_code_space_;
std::list<VirtualMemory> owned_code_space_;
@@ -416,11 +467,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- WasmCodeManager* wasm_code_manager_;
+ WasmCodeManager* const code_manager_;
std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
- bool use_trap_handler_ = false;
+ UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
@@ -439,8 +490,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// TODO(titzer): isolate is only required here for CompilationState.
std::unique_ptr<NativeModule> NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled_features,
- size_t memory_estimate, bool can_request_more,
- std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
+ size_t code_size_estimate, bool can_request_more,
+ std::shared_ptr<const WasmModule> module);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
@@ -449,12 +500,15 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Add a sample of all module sizes.
void SampleModuleSizes(Isolate* isolate) const;
+ void SetMaxCommittedMemoryForTesting(size_t limit);
+
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
// using sampling based on regular intervals independent of the GC.
static void InstallSamplingGCCallback(Isolate* isolate);
- static size_t EstimateNativeModuleSize(const WasmModule* module);
+ static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
+ static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
friend class NativeModule;
@@ -469,10 +523,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*);
void AssignRanges(Address start, Address end, NativeModule*);
void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
- bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
+ // If the remaining uncommitted code space falls below
+ // {critical_uncommitted_code_space_}, then we trigger a GC before creating
+ // the next module. This value is initialized to 50% of the available code
+ // space on creation and after each GC.
+ std::atomic<size_t> critical_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 8e1f508979..668b08eba9 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -50,6 +50,13 @@ enum MemoryFlags : uint8_t {
kSharedAndMaximum = 3
};
+// Flags for data and element segments.
+enum SegmentFlags : uint8_t {
+ kActiveNoIndex = 0, // Active segment with a memory/table index of zero.
+ kPassive = 1, // Passive segment.
+ kActiveWithIndex = 2, // Active segment with a given memory/table index.
+};
+
// Binary encoding of sections identifiers.
enum SectionCode : int8_t {
kUnknownSectionCode = 0, // code for unknown sections
@@ -64,14 +71,19 @@ enum SectionCode : int8_t {
kElementSectionCode = 9, // Elements section
kCodeSectionCode = 10, // Function code
kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
- kExceptionSectionCode = 13, // Exception section
- kSourceMappingURLSectionCode = 14, // Source Map URL section
+ kExceptionSectionCode = 12, // Exception section
+ kDataCountSectionCode = 13, // Number of data segments
+
+ // The following sections are custom sections, and are identified using a
+ // string rather than an integer. Their enumeration values are not guaranteed
+ // to be consistent.
+ kNameSectionCode, // Name section (encoded as a string)
+ kSourceMappingURLSectionCode, // Source Map URL section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
kLastKnownModuleSection = kSourceMappingURLSectionCode,
- kFirstUnorderedSection = kNameSectionCode,
+ kFirstUnorderedSection = kExceptionSectionCode,
};
// Binary encoding of name section kinds.
@@ -85,6 +97,8 @@ static_assert(kWasmPageSize == size_t{1} << kWasmPageSizeLog2, "consistency");
using WasmCodePosition = int;
constexpr WasmCodePosition kNoCodePosition = -1;
+constexpr uint32_t kExceptionAttribute = 0;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 425681a5e1..98619b5c14 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -31,9 +31,9 @@ namespace {
template <bool internal, typename... Args>
Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
Args... args) {
- // Maximum length of a formatted value name ("param#%d", "local#%d",
- // "global#%d").
- constexpr int kMaxStrLen = 18;
+ // Maximum length of a formatted value name ("arg#%d", "local#%d",
+ // "global#%d", i32 constants, i64 constants), including null character.
+ static constexpr int kMaxStrLen = 21;
EmbeddedVector<char, kMaxStrLen> value;
int len = SNPrintF(value, format, args...);
CHECK(len > 0 && len < value.length());
@@ -49,11 +49,13 @@ Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
if (Smi::IsValid(value.to<int32_t>()))
return handle(Smi::FromInt(value.to<int32_t>()), isolate);
return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
- case kWasmI64:
- if (Smi::IsValid(value.to<int64_t>()))
- return handle(Smi::FromIntptr(value.to<int64_t>()), isolate);
- return PrintFToOneByteString<false>(isolate, "%" PRId64,
- value.to<int64_t>());
+ case kWasmI64: {
+ int64_t i64 = value.to<int64_t>();
+ int32_t i32 = static_cast<int32_t>(i64);
+ if (i32 == i64 && Smi::IsValid(i32))
+ return handle(Smi::FromIntptr(i32), isolate);
+ return PrintFToOneByteString<false>(isolate, "%" PRId64, i64);
+ }
case kWasmF32:
return isolate->factory()->NewNumber(value.to<float>());
case kWasmF64:
@@ -129,12 +131,12 @@ class InterpreterHandle {
return {frame_base, frame_limit};
}
- static Vector<const byte> GetBytes(WasmDebugInfo* debug_info) {
+ static ModuleWireBytes GetBytes(WasmDebugInfo debug_info) {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
NativeModule* native_module =
debug_info->wasm_instance()->module_object()->native_module();
- return native_module->wire_bytes();
+ return ModuleWireBytes{native_module->wire_bytes()};
}
public:
@@ -209,10 +211,10 @@ class InterpreterHandle {
finished = true;
break;
case WasmInterpreter::State::TRAPPED: {
- int message_id =
+ MessageTemplate message_id =
WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
- Handle<Object> exception = isolate_->factory()->NewWasmRuntimeError(
- static_cast<MessageTemplate::Template>(message_id));
+ Handle<Object> exception =
+ isolate_->factory()->NewWasmRuntimeError(message_id);
isolate_->Throw(*exception);
// Handle this exception. Return without trying to read back the
// return value.
@@ -412,7 +414,7 @@ class InterpreterHandle {
isolate_->factory()->NewJSObjectWithNullProto();
if (instance->has_memory_object()) {
Handle<String> name = isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("memory"));
+ StaticCharVector("memory"));
Handle<JSArrayBuffer> memory_buffer(
instance->memory_object()->array_buffer(), isolate_);
Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
@@ -439,7 +441,7 @@ class InterpreterHandle {
isolate_->factory()->NewJSObjectWithNullProto();
Handle<String> locals_name =
isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("locals"));
+ StaticCharVector("locals"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, locals_name,
locals_obj, NONE)
.Assert();
@@ -468,7 +470,7 @@ class InterpreterHandle {
Handle<JSObject> stack_obj =
isolate_->factory()->NewJSObjectWithNullProto();
Handle<String> stack_name = isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("stack"));
+ StaticCharVector("stack"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, stack_name,
stack_obj, NONE)
.Assert();
@@ -539,14 +541,14 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->raw();
}
-wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->interpreter_handle();
+wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo debug_info) {
+ Object handle_obj = debug_info->interpreter_handle();
DCHECK(!handle_obj->IsUndefined());
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
-wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->interpreter_handle();
+wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
+ Object handle_obj = debug_info->interpreter_handle();
if (handle_obj->IsUndefined()) return nullptr;
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
@@ -617,9 +619,7 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
instance->module_object()->native_module();
const wasm::WasmModule* module = instance->module();
- // We may modify js wrappers, as well as wasm functions. Hence the 2
- // modification scopes.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // We may modify the wasm jump table.
wasm::NativeModuleModificationScope native_module_modification_scope(
native_module);
@@ -628,10 +628,10 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
DCHECK_GT(module->functions.size(), func_index);
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
- MaybeHandle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index, module->functions[func_index].sig);
- const wasm::WasmCode* wasm_new_code = native_module->AddInterpreterEntry(
- new_code.ToHandleChecked(), func_index);
+ wasm::WasmCode* wasm_new_code = compiler::CompileWasmInterpreterEntry(
+ isolate->wasm_engine(), native_module, func_index,
+ module->functions[func_index].sig);
+ native_module->PublishInterpreterEntry(wasm_new_code, func_index);
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED);
interpreted_functions->set(func_index, *foreign_holder);
@@ -639,7 +639,7 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
- GetInterpreterHandle(this)->PrepareStep(step_action);
+ GetInterpreterHandle(*this)->PrepareStep(step_action);
}
// static
@@ -656,20 +656,20 @@ bool WasmDebugInfo::RunInterpreter(Isolate* isolate,
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
Address frame_pointer) {
- return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
+ return GetInterpreterHandle(*this)->GetInterpretedStack(frame_pointer);
}
wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
Address frame_pointer, int idx) {
- return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
+ return GetInterpreterHandle(*this)->GetInterpretedFrame(frame_pointer, idx);
}
void WasmDebugInfo::Unwind(Address frame_pointer) {
- return GetInterpreterHandle(this)->Unwind(frame_pointer);
+ return GetInterpreterHandle(*this)->Unwind(frame_pointer);
}
uint64_t WasmDebugInfo::NumInterpretedCalls() {
- auto* handle = GetInterpreterHandleOrNull(this);
+ auto* handle = GetInterpreterHandleOrNull(*this);
return handle ? handle->NumInterpretedCalls() : 0;
}
@@ -730,7 +730,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
function_data->set_jump_table_offset(-1);
function_data->set_function_index(-1);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("c-wasm-entry"));
+ StaticCharVector("c-wasm-entry"));
NewFunctionArgs args = NewFunctionArgs::ForWasm(
name, function_data, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index dc78797365..d948157a12 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -7,10 +7,13 @@
#include "src/code-tracer.h"
#include "src/compilation-statistics.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
+#include "src/ostreams.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/module-instantiate.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -19,7 +22,7 @@ namespace internal {
namespace wasm {
WasmEngine::WasmEngine()
- : code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {}
+ : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
// All AsyncCompileJobs have been canceled.
@@ -38,20 +41,50 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
return result.ok();
}
-MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs(
+MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
+ Vector<const byte> asm_js_offset_table_bytes,
+ Handle<HeapNumber> uses_bitset) {
ModuleResult result =
DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
kAsmJsOrigin, isolate->counters(), allocator());
CHECK(!result.failed());
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
- // in {CompileToModuleObject}.
- return CompileToModuleObject(isolate, kAsmjsWasmFeatures, thrower,
- std::move(result.val), bytes, asm_js_script,
- asm_js_offset_table_bytes);
+ // in {CompileToNativeModule}.
+ Handle<FixedArray> export_wrappers;
+ std::unique_ptr<NativeModule> native_module =
+ CompileToNativeModule(isolate, kAsmjsWasmFeatures, thrower,
+ std::move(result).value(), bytes, &export_wrappers);
+ if (!native_module) return {};
+
+ // Create heap objects for asm.js offset table to be stored in the module
+ // object.
+ Handle<ByteArray> asm_js_offset_table =
+ isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table_bytes.length());
+
+ return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
+ asm_js_offset_table, uses_bitset);
+}
+
+Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
+ Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
+ Handle<Script> script) {
+ std::shared_ptr<NativeModule> native_module =
+ asm_wasm_data->managed_native_module()->get();
+ Handle<FixedArray> export_wrappers =
+ handle(asm_wasm_data->export_wrappers(), isolate);
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ native_module->module());
+
+ Handle<WasmModuleObject> module_object =
+ WasmModuleObject::New(isolate, std::move(native_module), script,
+ export_wrappers, code_size_estimate);
+ module_object->set_asm_js_offset_table(asm_wasm_data->asm_js_offset_table());
+ return module_object;
}
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
@@ -61,14 +94,40 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), allocator());
if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
+ thrower->CompileFailed("Wasm decoding failed", result.error());
return {};
}
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToModuleObject}.
- return CompileToModuleObject(isolate, enabled, thrower, std::move(result.val),
- bytes, Handle<Script>(), Vector<const byte>());
+ Handle<FixedArray> export_wrappers;
+ std::unique_ptr<NativeModule> native_module =
+ CompileToNativeModule(isolate, enabled, thrower,
+ std::move(result).value(), bytes, &export_wrappers);
+ if (!native_module) return {};
+
+ Handle<Script> script =
+ CreateWasmScript(isolate, bytes, native_module->module()->source_map_url);
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ native_module->module());
+
+ // Create the module object.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmModuleObject. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ Handle<WasmModuleObject> module_object =
+ WasmModuleObject::New(isolate, std::move(native_module), script,
+ export_wrappers, code_size_estimate);
+
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
+ return module_object;
}
MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
@@ -170,43 +229,36 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
return job->CreateStreamingDecoder();
}
-bool WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
+void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier) {
- ErrorThrower thrower(isolate, "Manually requested tier up");
// Note we assume that "one-off" compilations can discard detected features.
WasmFeatures detected = kNoWasmFeatures;
- WasmCode* ret = WasmCompilationUnit::CompileWasmFunction(
- isolate, native_module, &detected, &thrower,
- GetModuleEnv(native_module->compilation_state()),
+ WasmCompilationUnit::CompileWasmFunction(
+ isolate, native_module, &detected,
&native_module->module()->functions[function_index], tier);
- return ret != nullptr;
}
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
Handle<WasmModuleObject> module_object) {
- return module_object->managed_native_module()->get();
+ return module_object->shared_native_module();
}
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_module) {
- CHECK_EQ(code_manager(), shared_module->code_manager());
- Vector<const byte> wire_bytes = shared_module->wire_bytes();
+ ModuleWireBytes wire_bytes(shared_module->wire_bytes());
const WasmModule* module = shared_module->module();
Handle<Script> script =
CreateWasmScript(isolate, wire_bytes, module->source_map_url);
- Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate, std::move(shared_module), script);
-
- // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}.
- // This requires unlocking the code space here. This should eventually be
- // moved into the allocator.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- CompileJsToWasmWrappers(isolate, module_object);
+ size_t code_size = shared_module->committed_code_space();
+ Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+ isolate, std::move(shared_module), script, code_size);
+ CompileJsToWasmWrappers(isolate, module_object->native_module()->module(),
+ handle(module_object->export_wrappers(), isolate));
return module_object;
}
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (compilation_stats_ == nullptr) {
compilation_stats_.reset(new CompilationStatistics());
}
@@ -214,7 +266,7 @@ CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
}
void WasmEngine::DumpAndResetTurboStatistics() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (compilation_stats_ != nullptr) {
StdoutStream os;
os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
@@ -223,7 +275,7 @@ void WasmEngine::DumpAndResetTurboStatistics() {
}
CodeTracer* WasmEngine::GetCodeTracer() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
return code_tracer_.get();
}
@@ -236,14 +288,14 @@ AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
context, std::move(resolver));
// Pass ownership to the unique_ptr in {jobs_}.
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
return job;
}
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
AsyncCompileJob* job) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
auto item = jobs_.find(job);
DCHECK(item != jobs_.end());
std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
@@ -252,7 +304,7 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
}
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
for (auto& entry : jobs_) {
if (entry.first->isolate() == isolate) return true;
@@ -261,7 +313,7 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
}
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
for (auto it = jobs_.begin(); it != jobs_.end();) {
if (it->first->isolate() == isolate) {
@@ -273,51 +325,46 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
}
void WasmEngine::AddIsolate(Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.insert(isolate);
}
void WasmEngine::RemoveIsolate(Isolate* isolate) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
isolates_.erase(isolate);
}
namespace {
-struct WasmEnginePointerConstructTrait final {
- static void Construct(void* raw_ptr) {
- auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr);
- *engine_ptr = std::shared_ptr<WasmEngine>();
- }
-};
-
-// Holds the global shared pointer to the single {WasmEngine} that is intended
-// to be shared among Isolates within the same process. The {LazyStaticInstance}
-// here is required because {std::shared_ptr} has a non-trivial initializer.
-base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
- WasmEnginePointerConstructTrait>::type
- global_wasm_engine;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
+ GetSharedWasmEngine);
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
if (!FLAG_wasm_shared_engine) return;
- global_wasm_engine.Pointer()->reset(new WasmEngine());
+ *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
}
// static
void WasmEngine::GlobalTearDown() {
if (!FLAG_wasm_shared_engine) return;
- global_wasm_engine.Pointer()->reset();
+ GetSharedWasmEngine()->reset();
}
// static
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
- if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
- return std::shared_ptr<WasmEngine>(new WasmEngine());
+ if (FLAG_wasm_shared_engine) return *GetSharedWasmEngine();
+ return std::make_shared<WasmEngine>();
+}
+
+// {max_mem_pages} is declared in wasm-limits.h.
+uint32_t max_mem_pages() {
+ STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
+ return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 4f4cddb550..4aa9331268 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -16,16 +16,19 @@
namespace v8 {
namespace internal {
+class AsmWasmData;
class CodeTracer;
class CompilationStatistics;
-class WasmModuleObject;
+class HeapNumber;
class WasmInstanceObject;
+class WasmModuleObject;
namespace wasm {
+class AsyncCompileJob;
class ErrorThrower;
-struct WasmFeatures;
struct ModuleWireBytes;
+struct WasmFeatures;
class V8_EXPORT_PRIVATE CompilationResultResolver {
public:
@@ -55,10 +58,13 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Synchronously compiles the given bytes that represent a translated
// asm.js module.
- MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
+ Vector<const byte> asm_js_offset_table_bytes,
+ Handle<HeapNumber> uses_bitset);
+ Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
+ Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
+ Handle<Script> script);
// Synchronously compiles the given bytes that represent an encoded WASM
// module.
@@ -93,10 +99,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
std::shared_ptr<CompilationResultResolver> resolver);
- // Compiles the function with the given index at a specific compilation tier
- // and returns true on success, false (and pending exception) otherwise. This
- // is mostly used for testing to force a function into a specific tier.
- bool CompileFunction(Isolate* isolate, NativeModule* native_module,
+ // Compiles the function with the given index at a specific compilation tier.
+ // Errors are stored internally in the CompilationState.
+ // This is mostly used for testing to force a function into a specific tier.
+ void CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
// Exports the sharable parts of the given module object so that they can be
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 0317bb7bf5..9fc3b707c4 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/base/bits.h"
+#include "src/memcopy.h"
#include "src/utils.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-external-refs.h"
@@ -232,13 +233,13 @@ uint32_t word64_popcnt_wrapper(Address data) {
uint32_t word32_rol_wrapper(Address data) {
uint32_t input = ReadUnalignedValue<uint32_t>(data);
uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
- return (input << shift) | (input >> (32 - shift));
+ return (input << shift) | (input >> ((32 - shift) & 31));
}
uint32_t word32_ror_wrapper(Address data) {
uint32_t input = ReadUnalignedValue<uint32_t>(data);
uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
- return (input >> shift) | (input << (32 - shift));
+ return (input >> shift) | (input << ((32 - shift) & 31));
}
void float64_pow_wrapper(Address data) {
@@ -247,6 +248,14 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, Pow(x, y));
}
+void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
+ MemMove(reinterpret_cast<void*>(dst), reinterpret_cast<void*>(src), size);
+}
+
+void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
+ memset(reinterpret_cast<void*>(dst), value, size);
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index fc116b7fd8..64a6653277 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -67,6 +67,10 @@ uint32_t word32_ror_wrapper(Address data);
void float64_pow_wrapper(Address data);
+void memory_copy_wrapper(Address dst, Address src, uint32_t size);
+
+void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size);
+
typedef void (*WasmTrapCallbackForTesting)();
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index ec8aa8ba0c..711c747d8e 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -21,6 +21,8 @@
SEPARATOR \
V(anyref, "anyref opcodes", false) \
SEPARATOR \
- V(mut_global, "import/export mutable global support", true)
+ V(bigint, "JS BigInt support", false) \
+ SEPARATOR \
+ V(bulk_memory, "bulk memory opcodes", false)
#endif // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h b/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h
new file mode 100644
index 0000000000..290df24898
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache-inl.h
@@ -0,0 +1,52 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
+#define V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
+
+#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Implements a cache for import wrappers.
+class WasmImportWrapperCache {
+ public:
+ WasmCode* GetOrCompile(WasmEngine* wasm_engine, Counters* counters,
+ compiler::WasmImportCallKind kind, FunctionSig* sig) {
+ base::MutexGuard lock(&mutex_);
+ CacheKey key(static_cast<uint8_t>(kind), *sig);
+ WasmCode*& cached = entry_map_[key];
+ if (cached == nullptr) {
+ // TODO(wasm): no need to hold the lock while compiling an import wrapper.
+ bool source_positions = native_module_->module()->origin == kAsmJsOrigin;
+ cached = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, native_module_, kind, sig, source_positions);
+ counters->wasm_generated_code_size()->Increment(
+ cached->instructions().length());
+ counters->wasm_reloc_size()->Increment(cached->reloc_info().length());
+ }
+ return cached;
+ }
+
+ private:
+ friend class NativeModule;
+ mutable base::Mutex mutex_;
+ NativeModule* native_module_;
+ using CacheKey = std::pair<uint8_t, FunctionSig>;
+ std::unordered_map<CacheKey, WasmCode*, base::hash<CacheKey>> entry_map_;
+
+ explicit WasmImportWrapperCache(NativeModule* native_module)
+ : native_module_(native_module) {}
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_IMPORT_WRAPPER_CACHE_INL_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index e724e73078..8e75ad233f 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -8,6 +8,7 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/boxed-float.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
@@ -289,23 +290,19 @@ inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
}
inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1F);
- return (a >> shift) | (a << (32 - shift));
+ return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
}
inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1F);
- return (a << shift) | (a >> (32 - shift));
+ return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
}
inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3F);
- return (a >> shift) | (a << (64 - shift));
+ return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
}
inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3F);
- return (a << shift) | (a >> (64 - shift));
+ return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
}
inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
@@ -858,14 +855,14 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
@@ -1244,7 +1241,7 @@ class ThreadImpl {
pc_t pc;
sp_t sp;
size_t fp;
- unsigned arity;
+ uint32_t arity;
};
friend class InterpretedFrameImpl;
@@ -1405,14 +1402,18 @@ class ThreadImpl {
template <typename mtype>
inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
- size_t mem_size = instance_object_->memory_size();
- if (sizeof(mtype) > mem_size) return kNullAddress;
- if (offset > (mem_size - sizeof(mtype))) return kNullAddress;
- if (index > (mem_size - sizeof(mtype) - offset)) return kNullAddress;
+ uint32_t effective_index = offset + index;
+ if (effective_index < index) {
+ return kNullAddress; // wraparound => oob
+ }
+ if (!IsInBounds(effective_index, sizeof(mtype),
+ instance_object_->memory_size())) {
+ return kNullAddress; // oob
+ }
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
return reinterpret_cast<Address>(instance_object_->memory_start()) +
- offset + (index & instance_object_->memory_mask());
+ (effective_index & instance_object_->memory_mask());
}
template <typename ctype, typename mtype>
@@ -1737,9 +1738,9 @@ class ThreadImpl {
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
- BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
- BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
- BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
+ BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
+ BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
+ BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
BINOP_CASE(I32x4MinU, i32x4, int4, 4,
static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
@@ -1749,9 +1750,9 @@ class ThreadImpl {
BINOP_CASE(S128And, i32x4, int4, 4, a & b)
BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
- BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
- BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
- BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
+ BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
+ BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
+ BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
BINOP_CASE(I16x8MinU, i16x8, int8, 8,
static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
@@ -1762,9 +1763,9 @@ class ThreadImpl {
BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
- BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
- BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
+ BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
+ BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
+ BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
BINOP_CASE(I8x16MinU, i8x16, int16, 16,
static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
@@ -1792,12 +1793,12 @@ class ThreadImpl {
}
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
- UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
- UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
- UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
+ UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
+ UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
+ UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
- UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
- UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
+ UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
+ UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
#undef UNOP_CASE
#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
case kExpr##op: { \
@@ -2191,13 +2192,15 @@ class ThreadImpl {
break;
}
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
len = DoBreak(code, pc, imm.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -2475,7 +2478,7 @@ class ThreadImpl {
ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
- case kExprGrowMemory: {
+ case kExprMemoryGrow: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
@@ -2710,11 +2713,15 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- ExternalCallResult CallExternalWasmFunction(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- const WasmCode* code, FunctionSig* sig) {
+ ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
+ Handle<Object> object_ref,
+ const WasmCode* code,
+ FunctionSig* sig) {
+ wasm::WasmFeatures enabled_features =
+ wasm::WasmFeaturesFromIsolate(isolate);
+
if (code->kind() == WasmCode::kWasmToJsWrapper &&
- !IsJSCompatibleSignature(sig)) {
+ !IsJSCompatibleSignature(sig, enabled_features.bigint)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
return TryHandleException(isolate);
@@ -2770,18 +2777,17 @@ class ThreadImpl {
// Wrap the arg_buffer and the code target data pointers in handles. As
// these are aligned pointers, to the GC it will look like Smis.
- Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
- isolate);
+ Handle<Object> arg_buffer_obj(
+ Object(reinterpret_cast<Address>(arg_buffer.data())), isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(
- reinterpret_cast<Object*>(code->instruction_start()), isolate);
+ Handle<Object> code_entry_obj(Object(code->instruction_start()), isolate);
DCHECK(!code_entry_obj->IsHeapObject());
static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
"code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
- args[compiler::CWasmEntryParameters::kWasmInstance] = instance;
+ args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2792,9 +2798,9 @@ class ThreadImpl {
maybe_retval.is_null() ? " with exception" : "");
if (maybe_retval.is_null()) {
- // JSEntryStub may through a stack overflow before we actually get to wasm
- // code or back to the interpreter, meaning the thread-in-wasm flag won't
- // be cleared.
+ // JSEntry may throw a stack overflow before we actually get to wasm code
+ // or back to the interpreter, meaning the thread-in-wasm flag won't be
+ // cleared.
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
}
@@ -2844,19 +2850,18 @@ class ThreadImpl {
}
ExternalCallResult CallImportedFunction(uint32_t function_index) {
+ DCHECK_GT(module()->num_imported_functions, function_index);
// Use a new HandleScope to avoid leaking / accumulating handles in the
// outer scope.
Isolate* isolate = instance_object_->GetIsolate();
HandleScope handle_scope(isolate);
- DCHECK_GT(module()->num_imported_functions, function_index);
- Handle<WasmInstanceObject> instance;
ImportedFunctionEntry entry(instance_object_, function_index);
- instance = handle(entry.instance(), isolate);
+ Handle<Object> object_ref(entry.object_ref(), isolate);
WasmCode* code =
GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
- FunctionSig* sig = codemap()->module()->functions[function_index].sig;
- return CallExternalWasmFunction(isolate, instance, code, sig);
+ FunctionSig* sig = module()->functions[function_index].sig;
+ return CallExternalWasmFunction(isolate, object_ref, code, sig);
}
ExternalCallResult CallIndirectFunction(uint32_t table_index,
@@ -2900,28 +2905,20 @@ class ThreadImpl {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
- WasmCode* code =
- GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
-
- // Call either an internal or external WASM function.
HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];
+ Handle<Object> object_ref = handle(entry.object_ref(), isolate);
+ WasmCode* code =
+ GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
- if (code->kind() == WasmCode::kFunction) {
- if (!instance_object_.is_identical_to(instance)) {
- // Cross instance call.
- return CallExternalWasmFunction(isolate, instance, code, signature);
- }
- return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
+ if (!object_ref->IsWasmInstanceObject() || /* call to an import */
+ !instance_object_.is_identical_to(object_ref) /* cross-instance */) {
+ return CallExternalWasmFunction(isolate, object_ref, code, signature);
}
- // Call to external function.
- if (code->kind() == WasmCode::kInterpreterEntry ||
- code->kind() == WasmCode::kWasmToJsWrapper) {
- return CallExternalWasmFunction(isolate, instance, code, signature);
- }
- return {ExternalCallResult::INVALID_FUNC};
+ DCHECK(code->kind() == WasmCode::kInterpreterEntry ||
+ code->kind() == WasmCode::kFunction);
+ return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
inline Activation current_activation() {
@@ -3101,8 +3098,8 @@ class WasmInterpreterInternals : public ZoneObject {
namespace {
void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
- Object** global_handle_location =
- reinterpret_cast<Object**>(data.GetParameter());
+ Address* global_handle_location =
+ reinterpret_cast<Address*>(data.GetParameter());
GlobalHandles::Destroy(global_handle_location);
}
@@ -3110,8 +3107,7 @@ Handle<WasmInstanceObject> MakeWeak(
Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
Handle<WasmInstanceObject> weak_instance =
isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
- Object** global_handle_location =
- Handle<Object>::cast(weak_instance).location();
+ Address* global_handle_location = weak_instance.location();
GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
&NopFinalizer, v8::WeakCallbackType::kParameter);
return weak_instance;
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 314db914ed..4ad7d49076 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -4,10 +4,13 @@
#include "src/wasm/wasm-js.h"
+#include <string>
+
#include "src/api-inl.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
+#include "src/base/overflowing-math.h"
#include "src/execution.h"
#include "src/handles.h"
#include "src/heap/factory.h"
@@ -22,6 +25,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-serialization.h"
using v8::internal::wasm::ErrorThrower;
@@ -41,7 +45,7 @@ class WasmStreaming::WasmStreamingImpl {
}
void OnBytesReceived(const uint8_t* bytes, size_t size) {
- streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size));
+ streaming_decoder_->OnBytesReceived(i::VectorOf(bytes, size));
}
void Finish() { streaming_decoder_->Finish(); }
@@ -58,6 +62,20 @@ class WasmStreaming::WasmStreamingImpl {
Utils::OpenHandle(*exception.ToLocalChecked()));
}
+ bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
+ if (!i::wasm::IsSupportedVersion({bytes, size})) return false;
+ return streaming_decoder_->SetCompiledModuleBytes({bytes, size});
+ }
+
+ void SetClient(std::shared_ptr<Client> client) {
+ // There are no other event notifications so just pass client to decoder.
+ // Wrap the client with a callback here so we can also wrap the result.
+ streaming_decoder_->SetModuleCompiledCallback(
+ [client](const std::shared_ptr<i::wasm::NativeModule>& native_module) {
+ client->OnModuleCompiled(Utils::Convert(native_module));
+ });
+ }
+
private:
Isolate* isolate_ = nullptr;
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
@@ -81,6 +99,14 @@ void WasmStreaming::Abort(MaybeLocal<Value> exception) {
impl_->Abort(exception);
}
+bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
+ return impl_->SetCompiledModuleBytes(bytes, size);
+}
+
+void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
+ impl_->SetClient(client);
+}
+
// static
std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
Local<Value> value) {
@@ -210,10 +236,13 @@ namespace {
class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
public:
AsyncCompilationResolver(i::Isolate* isolate, i::Handle<i::JSPromise> promise)
- : promise_(isolate->global_handles()->Create(*promise)) {}
+ : promise_(isolate->global_handles()->Create(*promise)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ }
~AsyncCompilationResolver() override {
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ i::GlobalHandles::Destroy(promise_.location());
}
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
@@ -235,10 +264,14 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "AsyncCompilationResolver::promise_";
bool finished_ = false;
i::Handle<i::JSPromise> promise_;
};
+constexpr char AsyncCompilationResolver::kGlobalPromiseHandle[];
+
// This class resolves the result of WebAssembly.instantiate(module, imports).
// It just places the instantiation result in the supplied {promise}.
class InstantiateModuleResultResolver
@@ -246,10 +279,13 @@ class InstantiateModuleResultResolver
public:
InstantiateModuleResultResolver(i::Isolate* isolate,
i::Handle<i::JSPromise> promise)
- : promise_(isolate->global_handles()->Create(*promise)) {}
+ : promise_(isolate->global_handles()->Create(*promise)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ }
~InstantiateModuleResultResolver() override {
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ i::GlobalHandles::Destroy(promise_.location());
}
void OnInstantiationSucceeded(
@@ -268,9 +304,13 @@ class InstantiateModuleResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "InstantiateModuleResultResolver::promise_";
i::Handle<i::JSPromise> promise_;
};
+constexpr char InstantiateModuleResultResolver::kGlobalPromiseHandle[];
+
// This class resolves the result of WebAssembly.instantiate(bytes, imports).
// For that it creates a new {JSObject} which contains both the provided
// {WasmModuleObject} and the resulting {WebAssemblyInstanceObject} itself.
@@ -282,11 +322,16 @@ class InstantiateBytesResultResolver
i::Handle<i::WasmModuleObject> module)
: isolate_(isolate),
promise_(isolate_->global_handles()->Create(*promise)),
- module_(isolate_->global_handles()->Create(*module)) {}
+ module_(isolate_->global_handles()->Create(*module)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ i::GlobalHandles::AnnotateStrongRetainer(module_.location(),
+ kGlobalModuleHandle);
+ }
~InstantiateBytesResultResolver() override {
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(module_).location());
+ i::GlobalHandles::Destroy(promise_.location());
+ i::GlobalHandles::Destroy(module_.location());
}
void OnInstantiationSucceeded(
@@ -296,20 +341,14 @@ class InstantiateBytesResultResolver
i::Handle<i::JSObject> result =
isolate_->factory()->NewJSObject(isolate_->object_function());
- const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance");
i::Handle<i::String> instance_name =
isolate_->factory()
- ->NewStringFromOneByte(i::Vector<const uint8_t>(
- instance_str,
- i::StrLength(reinterpret_cast<const char*>(instance_str))))
+ ->NewStringFromOneByte(i::StaticCharVector("instance"))
.ToHandleChecked();
- const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module");
i::Handle<i::String> module_name =
isolate_->factory()
- ->NewStringFromOneByte(i::Vector<const uint8_t>(
- module_str,
- i::StrLength(reinterpret_cast<const char*>(module_str))))
+ ->NewStringFromOneByte(i::StaticCharVector("module"))
.ToHandleChecked();
i::JSObject::AddProperty(isolate_, result, instance_name, instance,
@@ -328,11 +367,18 @@ class InstantiateBytesResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "InstantiateBytesResultResolver::promise_";
+ static constexpr char kGlobalModuleHandle[] =
+ "InstantiateBytesResultResolver::module_";
i::Isolate* isolate_;
i::Handle<i::JSPromise> promise_;
i::Handle<i::WasmModuleObject> module_;
};
+constexpr char InstantiateBytesResultResolver::kGlobalPromiseHandle[];
+constexpr char InstantiateBytesResultResolver::kGlobalModuleHandle[];
+
// This class is the {CompilationResultResolver} for
// WebAssembly.instantiate(bytes, imports). When compilation finishes,
// {AsyncInstantiate} is started on the compilation result.
@@ -347,14 +393,19 @@ class AsyncInstantiateCompileResultResolver
maybe_imports_(maybe_imports.is_null()
? maybe_imports
: isolate_->global_handles()->Create(
- *maybe_imports.ToHandleChecked())) {}
+ *maybe_imports.ToHandleChecked())) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ if (!maybe_imports_.is_null()) {
+ i::GlobalHandles::AnnotateStrongRetainer(
+ maybe_imports_.ToHandleChecked().location(), kGlobalImportsHandle);
+ }
+ }
~AsyncInstantiateCompileResultResolver() override {
- i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
+ i::GlobalHandles::Destroy(promise_.location());
if (!maybe_imports_.is_null()) {
- i::GlobalHandles::Destroy(
- i::Handle<i::Object>::cast(maybe_imports_.ToHandleChecked())
- .location());
+ i::GlobalHandles::Destroy(maybe_imports_.ToHandleChecked().location());
}
}
@@ -377,19 +428,63 @@ class AsyncInstantiateCompileResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "AsyncInstantiateCompileResultResolver::promise_";
+ static constexpr char kGlobalImportsHandle[] =
+ "AsyncInstantiateCompileResultResolver::module_";
bool finished_ = false;
i::Isolate* isolate_;
i::Handle<i::JSPromise> promise_;
i::MaybeHandle<i::JSReceiver> maybe_imports_;
};
+constexpr char AsyncInstantiateCompileResultResolver::kGlobalPromiseHandle[];
+constexpr char AsyncInstantiateCompileResultResolver::kGlobalImportsHandle[];
+
+std::string ToString(const char* name) { return std::string(name); }
+
+std::string ToString(const i::Handle<i::String> name) {
+ return std::string("Property '") + name->ToCString().get() + "'";
+}
+
+// Web IDL: '[EnforceRange] unsigned long'
+// Previously called ToNonWrappingUint32 in the draft WebAssembly JS spec.
+// https://heycam.github.io/webidl/#EnforceRange
+template <typename T>
+bool EnforceUint32(T argument_name, Local<v8::Value> v, Local<Context> context,
+ ErrorThrower* thrower, uint32_t* res) {
+ double double_number;
+
+ if (!v->NumberValue(context).To(&double_number)) {
+ thrower->TypeError("%s must be convertible to a number",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (!std::isfinite(double_number)) {
+ thrower->TypeError("%s must be convertible to a valid number",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (double_number < 0) {
+ thrower->TypeError("%s must be non-negative",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (double_number > std::numeric_limits<uint32_t>::max()) {
+ thrower->TypeError("%s must be in the unsigned long range",
+ ToString(argument_name).c_str());
+ return false;
+ }
+
+ *res = static_cast<uint32_t>(double_number);
+ return true;
+}
} // namespace
// WebAssembly.compile(bytes) -> Promise
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
@@ -424,7 +519,6 @@ void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
Local<Context> context = isolate->GetCurrentContext();
@@ -643,8 +737,6 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- MicrotasksScope does_not_run_microtasks(isolate,
- MicrotasksScope::kDoNotRunMicrotasks);
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
@@ -675,7 +767,6 @@ void WebAssemblyInstantiateStreaming(
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
ScheduledErrorThrower thrower(i_isolate,
@@ -754,7 +845,6 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
@@ -829,30 +919,74 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
- Local<Context> context, Local<v8::Object> object,
- Local<String> property, int64_t* result,
+ Local<Context> context, v8::Local<v8::Value> value,
+ i::Handle<i::String> property_name, int64_t* result,
int64_t lower_bound, uint64_t upper_bound) {
- v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
+ uint32_t number;
+ if (!EnforceUint32(property_name, value, context, thrower, &number)) {
+ return false;
+ }
+ if (number < lower_bound) {
+ thrower->RangeError("Property '%s': value %" PRIu32
+ " is below the lower bound %" PRIx64,
+ property_name->ToCString().get(), number, lower_bound);
+ return false;
+ }
+ if (number > upper_bound) {
+ thrower->RangeError("Property '%s': value %" PRIu32
+ " is above the upper bound %" PRIu64,
+ property_name->ToCString().get(), number, upper_bound);
+ return false;
+ }
+
+ *result = static_cast<int64_t>(number);
+ return true;
+}
+
+bool GetRequiredIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context,
+ Local<v8::Object> object,
+ Local<String> property, int64_t* result,
+ int64_t lower_bound, uint64_t upper_bound) {
v8::Local<v8::Value> value;
- if (maybe.ToLocal(&value)) {
- int64_t number;
- if (!value->IntegerValue(context).To(&number)) return false;
- if (number < lower_bound) {
- thrower->RangeError("Property value %" PRId64
- " is below the lower bound %" PRIx64,
- number, lower_bound);
- return false;
- }
- if (number > static_cast<int64_t>(upper_bound)) {
- thrower->RangeError("Property value %" PRId64
- " is above the upper bound %" PRIu64,
- number, upper_bound);
- return false;
- }
- *result = static_cast<int>(number);
+ if (!object->Get(context, property).ToLocal(&value)) {
+ return false;
+ }
+
+ i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
+
+ // Web IDL: dictionary presence
+ // https://heycam.github.io/webidl/#dfn-present
+ if (value->IsUndefined()) {
+ thrower->TypeError("Property '%s' is required",
+ property_name->ToCString().get());
+ return false;
+ }
+
+ return GetIntegerProperty(isolate, thrower, context, value, property_name,
+ result, lower_bound, upper_bound);
+}
+
+bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context,
+ Local<v8::Object> object,
+ Local<String> property, int64_t* result,
+ int64_t lower_bound, uint64_t upper_bound) {
+ v8::Local<v8::Value> value;
+ if (!object->Get(context, property).ToLocal(&value)) {
+ return false;
+ }
+
+ // Web IDL: dictionary presence
+ // https://heycam.github.io/webidl/#dfn-present
+ if (value->IsUndefined()) {
return true;
}
- return false;
+
+ i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
+
+ return GetIntegerProperty(isolate, thrower, context, value, property_name,
+ result, lower_bound, upper_bound);
}
// new WebAssembly.Table(args) -> WebAssembly.Table
@@ -886,27 +1020,23 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// The descriptor's 'initial'.
int64_t initial = 0;
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::FLAG_wasm_max_table_size)) {
+ if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0,
+ i::FLAG_wasm_max_table_size)) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
- Local<String> maximum_key = v8_str(isolate, "maximum");
- Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
-
- if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial,
- i::wasm::kSpecMaxWasmTableSize)) {
- return;
- }
+ if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "maximum"), &maximum, initial,
+ i::wasm::kSpecMaxWasmTableSize)) {
+ return;
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj = i::WasmTableObject::New(
- i_isolate, static_cast<uint32_t>(initial), maximum, &fixed_array);
+ i::Handle<i::JSObject> table_obj =
+ i::WasmTableObject::New(i_isolate, static_cast<uint32_t>(initial),
+ static_cast<uint32_t>(maximum), &fixed_array);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(table_obj));
}
@@ -928,22 +1058,17 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'initial'.
int64_t initial = 0;
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::FLAG_wasm_max_mem_pages)) {
+ if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0,
+ i::wasm::max_mem_pages())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
- Local<String> maximum_key = v8_str(isolate, "maximum");
- Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
-
- if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial,
- i::wasm::kSpecMaxWasmMemoryPages)) {
- return;
- }
+ if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "maximum"), &maximum, initial,
+ i::wasm::kSpecMaxWasmMemoryPages)) {
+ return;
}
bool is_shared_memory = false;
@@ -985,7 +1110,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
- i_isolate, buffer, static_cast<int32_t>(maximum));
+ i_isolate, buffer, static_cast<uint32_t>(maximum));
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
@@ -1032,11 +1157,16 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
type = i::wasm::kWasmI32;
} else if (string->StringEquals(v8_str(isolate, "f32"))) {
type = i::wasm::kWasmF32;
+ } else if (string->StringEquals(v8_str(isolate, "i64"))) {
+ type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
type = i::wasm::kWasmF64;
+ } else if (string->StringEquals(v8_str(isolate, "anyref"))) {
+ type = i::wasm::kWasmAnyRef;
} else {
thrower.TypeError(
- "Descriptor property 'value' must be 'i32', 'f32', or 'f64'");
+ "Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
+ "'f64'");
return;
}
}
@@ -1044,7 +1174,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
const uint32_t offset = 0;
i::MaybeHandle<i::WasmGlobalObject> maybe_global_obj =
i::WasmGlobalObject::New(i_isolate, i::MaybeHandle<i::JSArrayBuffer>(),
- type, offset, is_mutable);
+ i::MaybeHandle<i::FixedArray>(), type, offset,
+ is_mutable);
i::Handle<i::WasmGlobalObject> global_obj;
if (!maybe_global_obj.ToHandle(&global_obj)) {
@@ -1065,6 +1196,22 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI32(i32_value);
break;
}
+ case i::wasm::kWasmI64: {
+ int64_t i64_value = 0;
+ if (!value->IsUndefined()) {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (!enabled_features.bigint) {
+ thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ return;
+ }
+
+ v8::Local<v8::BigInt> bigint_value;
+ if (!value->ToBigInt(context).ToLocal(&bigint_value)) return;
+ i64_value = bigint_value->Int64Value();
+ }
+ global_obj->SetI64(i64_value);
+ break;
+ }
case i::wasm::kWasmF32: {
float f32_value = 0;
if (!value->IsUndefined()) {
@@ -1087,6 +1234,17 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
+ case i::wasm::kWasmAnyRef: {
+ if (args.Length() < 2) {
+ // When no inital value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetAnyRef(
+ handle(i::ReadOnlyRoots(i_isolate).null_value(), i_isolate));
+ break;
+ }
+ global_obj->SetAnyRef(Utils::OpenHandle(*value));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1151,31 +1309,39 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- int64_t grow_by = 0;
- if (!args[0]->IntegerValue(context).To(&grow_by)) return;
+ uint32_t grow_by;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &grow_by)) {
+ return;
+ }
+
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
- int old_size = old_array->length();
+ uint32_t old_size = static_cast<uint32_t>(old_array->length());
- int64_t max_size64 = receiver->maximum_length()->Number();
- if (max_size64 < 0 || max_size64 > i::FLAG_wasm_max_table_size) {
+ uint64_t max_size64 = receiver->maximum_length()->Number();
+ if (max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
- if (grow_by < 0 || grow_by > max_size64 - old_size) {
- thrower.RangeError(grow_by < 0 ? "trying to shrink table"
- : "maximum table size exceeded");
+ DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+
+ uint64_t new_size64 =
+ static_cast<uint64_t>(old_size) + static_cast<uint64_t>(grow_by);
+ if (new_size64 > max_size64) {
+ thrower.RangeError("maximum table size exceeded");
return;
}
-
- int new_size = static_cast<int>(old_size + grow_by);
- receiver->Grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
+ uint32_t new_size = static_cast<uint32_t>(new_size64);
if (new_size != old_size) {
+ receiver->Grow(i_isolate, new_size - old_size);
+
i::Handle<i::FixedArray> new_array =
i_isolate->factory()->NewFixedArray(new_size);
- for (int i = 0; i < old_size; ++i) new_array->set(i, old_array->get(i));
- i::Object* null = i::ReadOnlyRoots(i_isolate).null_value();
- for (int i = old_size; i < new_size; ++i) new_array->set(i, null);
+ for (uint32_t i = 0; i < old_size; ++i) {
+ new_array->set(i, old_array->get(i));
+ }
+ i::Object null = i::ReadOnlyRoots(i_isolate).null_value();
+ for (uint32_t i = old_size; i < new_size; ++i) new_array->set(i, null);
receiver->set_functions(*new_array);
}
@@ -1193,15 +1359,19 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
- int64_t i = 0;
- if (!args[0]->IntegerValue(context).To(&i)) return;
+
+ uint32_t index;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
+ return;
+ }
+
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (i < 0 || i >= array->length()) {
- thrower.RangeError("index out of bounds");
+ if (index >= static_cast<uint32_t>(array->length())) {
+ thrower.RangeError("Index out of bounds");
return;
}
- i::Handle<i::Object> value(array->get(static_cast<int>(i)), i_isolate);
+ i::Handle<i::Object> value(array->get(static_cast<int>(index)), i_isolate);
return_value.Set(Utils::ToLocal(value));
}
@@ -1215,8 +1385,10 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmTableObject);
// Parameter 0.
- int64_t index;
- if (!args[0]->IntegerValue(context).To(&index)) return;
+ uint32_t index;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
+ return;
+ }
// Parameter 1.
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
@@ -1226,12 +1398,12 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (index < 0 || index >= receiver->functions()->length()) {
+ if (index >= static_cast<uint64_t>(receiver->functions()->length())) {
thrower.RangeError("index out of bounds");
return;
}
- i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ i::WasmTableObject::Set(i_isolate, receiver, index,
value->IsNull(i_isolate)
? i::Handle<i::JSFunction>::null()
: i::Handle<i::JSFunction>::cast(value));
@@ -1246,28 +1418,32 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
- int64_t delta_size = 0;
- if (!args[0]->IntegerValue(context).To(&delta_size)) return;
+ uint32_t delta_size;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &delta_size)) {
+ return;
+ }
- int64_t max_size64 = receiver->maximum_pages();
- if (max_size64 < 0 ||
- max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
- max_size64 = i::FLAG_wasm_max_mem_pages;
+ uint64_t max_size64 = receiver->maximum_pages();
+ if (max_size64 > uint64_t{i::wasm::max_mem_pages()}) {
+ max_size64 = i::wasm::max_mem_pages();
}
i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
if (!old_buffer->is_growable()) {
thrower.RangeError("This memory cannot be grown");
return;
}
- int64_t old_size = old_buffer->byte_length() / i::wasm::kWasmPageSize;
- int64_t new_size64 = old_size + delta_size;
- if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
- thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
- : "maximum memory size exceeded");
+
+ DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+
+ uint64_t old_size64 = old_buffer->byte_length() / i::wasm::kWasmPageSize;
+ uint64_t new_size64 = old_size64 + static_cast<uint64_t>(delta_size);
+
+ if (new_size64 > max_size64) {
+ thrower.RangeError("Maximum memory size exceeded");
return;
}
- int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver,
- static_cast<uint32_t>(delta_size));
+
+ int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver, delta_size);
if (ret == -1) {
thrower.RangeError("Unable to grow instance memory.");
return;
@@ -1318,15 +1494,26 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::kWasmI32:
return_value.Set(receiver->GetI32());
break;
- case i::wasm::kWasmI64:
- thrower.TypeError("Can't get the value of i64 WebAssembly.Global");
+ case i::wasm::kWasmI64: {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (enabled_features.bigint) {
+ Local<BigInt> value = BigInt::New(isolate, receiver->GetI64());
+
+ return_value.Set(value);
+ } else {
+ thrower.TypeError("Can't get the value of i64 WebAssembly.Global");
+ }
break;
+ }
case i::wasm::kWasmF32:
return_value.Set(receiver->GetF32());
break;
case i::wasm::kWasmF64:
return_value.Set(receiver->GetF64());
break;
+ case i::wasm::kWasmAnyRef:
+ return_value.Set(Utils::ToLocal(receiver->GetAnyRef()));
+ break;
default:
UNREACHABLE();
}
@@ -1357,6 +1544,10 @@ void WebAssemblyGlobalSetValue(
thrower.TypeError("Can't set the value of an immutable global.");
return;
}
+ if (args[0]->IsUndefined()) {
+ thrower.TypeError("Argument 0: must be a value");
+ return;
+ }
switch (receiver->type()) {
case i::wasm::kWasmI32: {
@@ -1365,9 +1556,17 @@ void WebAssemblyGlobalSetValue(
receiver->SetI32(i32_value);
break;
}
- case i::wasm::kWasmI64:
- thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ case i::wasm::kWasmI64: {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (enabled_features.bigint) {
+ v8::Local<v8::BigInt> bigint_value;
+ if (!args[0]->ToBigInt(context).ToLocal(&bigint_value)) return;
+ receiver->SetI64(bigint_value->Int64Value());
+ } else {
+ thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ }
break;
+ }
case i::wasm::kWasmF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
@@ -1380,6 +1579,10 @@ void WebAssemblyGlobalSetValue(
receiver->SetF64(f64_value);
break;
}
+ case i::wasm::kWasmAnyRef: {
+ receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1417,31 +1620,36 @@ Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name,
Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
const char* str, FunctionCallback func,
- int length = 0) {
+ int length = 0,
+ PropertyAttributes attributes = NONE) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> function = CreateFunc(isolate, name, func);
function->shared()->set_length(length);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(isolate, object, name, function, attributes);
return function;
}
+Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
+ Handle<JSObject> object,
+ const char* str,
+ FunctionCallback func) {
+ return InstallFunc(isolate, object, str, func, 1, DONT_ENUM);
+}
+
Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
return Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
}
-void InstallGetter(Isolate* isolate, Handle<JSObject> object,
- const char* str, FunctionCallback func) {
+void InstallGetter(Isolate* isolate, Handle<JSObject> object, const char* str,
+ FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> function =
CreateFunc(isolate, GetterName(isolate, name), func);
- v8::PropertyAttribute attributes =
- static_cast<v8::PropertyAttribute>(v8::DontEnum);
Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
Utils::ToLocal(function),
- Local<Function>(), attributes);
+ Local<Function>(), v8::None);
}
Handle<String> SetterName(Isolate* isolate, Handle<String> name) {
@@ -1459,8 +1667,7 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
CreateFunc(isolate, SetterName(isolate, name), setter);
setter_func->shared()->set_length(1);
- v8::PropertyAttribute attributes =
- static_cast<v8::PropertyAttribute>(v8::DontEnum);
+ v8::PropertyAttribute attributes = v8::None;
Utils::ToLocal(object)->SetAccessorProperty(
Utils::ToLocal(name), Utils::ToLocal(getter_func),
@@ -1473,14 +1680,17 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
// object explicitly and ignore implicit receiver.
void SetDummyInstanceTemplate(Isolate* isolate, Handle<JSFunction> fun) {
Handle<ObjectTemplateInfo> instance_template = NewObjectTemplate(isolate);
- fun->shared()->get_api_func_data()->set_instance_template(*instance_template);
+ FunctionTemplateInfo::SetInstanceTemplate(
+ isolate, handle(fun->shared()->get_api_func_data(), isolate),
+ instance_template);
}
+// static
void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
// Install the JS API once only.
- Object* prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
+ Object prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
if (!prev->IsUndefined(isolate)) {
DCHECK(prev->IsJSFunction());
return;
@@ -1495,7 +1705,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
@@ -1514,12 +1723,12 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Expose the API on the global object if configured to do so.
if (exposed_on_global_object) {
- JSObject::AddProperty(isolate, global, name, webassembly, attributes);
+ JSObject::AddProperty(isolate, global, name, webassembly, DONT_ENUM);
}
// Setup Module
Handle<JSFunction> module_constructor =
- InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
+ InstallConstructorFunc(isolate, webassembly, "Module", WebAssemblyModule);
context->set_wasm_module_constructor(*module_constructor);
SetDummyInstanceTemplate(isolate, module_constructor);
JSFunction::EnsureHasInitialMap(module_constructor);
@@ -1538,8 +1747,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Module"), ro_attributes);
// Setup Instance
- Handle<JSFunction> instance_constructor =
- InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1);
+ Handle<JSFunction> instance_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Instance", WebAssemblyInstance);
context->set_wasm_instance_constructor(*instance_constructor);
SetDummyInstanceTemplate(isolate, instance_constructor);
JSFunction::EnsureHasInitialMap(instance_constructor);
@@ -1556,7 +1765,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Table
Handle<JSFunction> table_constructor =
- InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1);
+ InstallConstructorFunc(isolate, webassembly, "Table", WebAssemblyTable);
context->set_wasm_table_constructor(*table_constructor);
SetDummyInstanceTemplate(isolate, table_constructor);
JSFunction::EnsureHasInitialMap(table_constructor);
@@ -1574,7 +1783,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Memory
Handle<JSFunction> memory_constructor =
- InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1);
+ InstallConstructorFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
context->set_wasm_memory_constructor(*memory_constructor);
SetDummyInstanceTemplate(isolate, memory_constructor);
JSFunction::EnsureHasInitialMap(memory_constructor);
@@ -1593,29 +1802,26 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
// Setup Global
- if (enabled_features.mut_global) {
- Handle<JSFunction> global_constructor =
- InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1);
- context->set_wasm_global_constructor(*global_constructor);
- SetDummyInstanceTemplate(isolate, global_constructor);
- JSFunction::EnsureHasInitialMap(global_constructor);
- Handle<JSObject> global_proto(
- JSObject::cast(global_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> global_map = isolate->factory()->NewMap(
- i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
- JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
- InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
- InstallGetterSetter(isolate, global_proto, "value",
- WebAssemblyGlobalGetValue, WebAssemblyGlobalSetValue);
- JSObject::AddProperty(isolate, global_proto,
- factory->to_string_tag_symbol(),
- v8_str(isolate, "WebAssembly.Global"), ro_attributes);
- }
+ Handle<JSFunction> global_constructor =
+ InstallConstructorFunc(isolate, webassembly, "Global", WebAssemblyGlobal);
+ context->set_wasm_global_constructor(*global_constructor);
+ SetDummyInstanceTemplate(isolate, global_constructor);
+ JSFunction::EnsureHasInitialMap(global_constructor);
+ Handle<JSObject> global_proto(
+ JSObject::cast(global_constructor->instance_prototype()), isolate);
+ i::Handle<i::Map> global_map =
+ isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
+ JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
+ InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
+ InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
+ WebAssemblyGlobalSetValue);
+ JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Global"), ro_attributes);
// Setup Exception
if (enabled_features.eh) {
- Handle<JSFunction> exception_constructor =
- InstallFunc(isolate, webassembly, "Exception", WebAssemblyException, 1);
+ Handle<JSFunction> exception_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Exception", WebAssemblyException);
context->set_wasm_exception_constructor(*exception_constructor);
SetDummyInstanceTemplate(isolate, exception_constructor);
JSFunction::EnsureHasInitialMap(exception_constructor);
@@ -1628,22 +1834,21 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
}
// Setup errors
- attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
isolate->native_context()->wasm_compile_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->CompileError_string(),
- compile_error, attributes);
+ compile_error, DONT_ENUM);
Handle<JSFunction> link_error(
isolate->native_context()->wasm_link_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->LinkError_string(), link_error,
- attributes);
+ DONT_ENUM);
Handle<JSFunction> runtime_error(
isolate->native_context()->wasm_runtime_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->RuntimeError_string(),
- runtime_error, attributes);
+ runtime_error, DONT_ENUM);
}
#undef ASSIGN
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index bdcc1f061e..4a60f5d13d 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -10,6 +10,10 @@
namespace v8 {
namespace internal {
+namespace wasm {
+class StreamingDecoder;
+}
+
// Exposes a WebAssembly API to JavaScript through the V8 API.
class WasmJs {
public:
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index db99313e07..0fed6e9628 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -27,7 +27,7 @@ constexpr size_t kV8MaxWasmGlobals = 1000000;
constexpr size_t kV8MaxWasmExceptions = 1000000;
constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
-// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
+// Don't use this limit directly, but use the value of {max_mem_pages()}.
constexpr size_t kV8MaxWasmMemoryPages = 32767; // = ~ 2 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -46,17 +46,20 @@ static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
"v8 should not be more permissive than the spec");
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
-constexpr uint64_t kV8MaxWasmMemoryBytes =
- kV8MaxWasmMemoryPages * uint64_t{kWasmPageSize};
-
-constexpr uint64_t kSpecMaxWasmMemoryBytes =
- kSpecMaxWasmMemoryPages * uint64_t{kWasmPageSize};
-
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
+// Defined in wasm-engine.cc.
+// TODO(wasm): Make this size_t for wasm64. Currently the --wasm-max-mem-pages
+// flag is only uint32_t.
+uint32_t max_mem_pages();
+
+inline uint64_t max_mem_bytes() {
+ return uint64_t{max_mem_pages()} * kWasmPageSize;
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 6f022207bf..1761a4cea0 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -14,11 +14,15 @@ namespace v8 {
namespace internal {
namespace wasm {
+// TODO(wasm): optimize calling conventions to be both closer to C++ (to
+// reduce adapter costs for fast WASM <-> C++ calls) and to be more efficient
+// in general.
+
#if V8_TARGET_ARCH_IA32
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {esi, eax, edx, ecx, ebx};
+constexpr Register kGpParamRegisters[] = {esi, eax, edx, ecx};
constexpr Register kGpReturnRegisters[] = {eax, edx};
constexpr DoubleRegister kFpParamRegisters[] = {xmm1, xmm2, xmm3,
xmm4, xmm5, xmm6};
@@ -28,7 +32,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {xmm1, xmm2};
// ===========================================================================
// == x64 ====================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {rsi, rax, rdx, rcx, rbx, rdi};
+constexpr Register kGpParamRegisters[] = {rsi, rax, rdx, rcx, rbx, r9};
constexpr Register kGpReturnRegisters[] = {rax, rdx};
constexpr DoubleRegister kFpParamRegisters[] = {xmm1, xmm2, xmm3,
xmm4, xmm5, xmm6};
@@ -38,7 +42,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {xmm1, xmm2};
// ===========================================================================
// == arm ====================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {r3, r0, r1, r2};
+constexpr Register kGpParamRegisters[] = {r3, r0, r2, r6};
constexpr Register kGpReturnRegisters[] = {r0, r1};
// ARM d-registers must be in ascending order for correct allocation.
constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7};
@@ -48,7 +52,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
// ===========================================================================
// == arm64 ====================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {x7, x0, x1, x2, x3, x4, x5, x6};
+constexpr Register kGpParamRegisters[] = {x7, x0, x2, x3, x4, x5, x6};
constexpr Register kGpReturnRegisters[] = {x0, x1};
constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
@@ -57,7 +61,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1};
// ===========================================================================
// == mips ===================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {a0, a1, a2, a3};
+constexpr Register kGpParamRegisters[] = {a0, a2, a3};
constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
@@ -66,7 +70,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
// ===========================================================================
// == mips64 =================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
@@ -75,7 +79,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
// ===========================================================================
// == ppc & ppc64 ============================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {r10, r3, r4, r5, r6, r7, r8, r9};
+constexpr Register kGpParamRegisters[] = {r10, r3, r5, r6, r7, r8, r9};
constexpr Register kGpReturnRegisters[] = {r3, r4};
constexpr DoubleRegister kFpParamRegisters[] = {d1, d2, d3, d4, d5, d6, d7, d8};
constexpr DoubleRegister kFpReturnRegisters[] = {d1, d2};
@@ -84,7 +88,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {d1, d2};
// ===========================================================================
// == s390x ==================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {r6, r2, r3, r4, r5};
+constexpr Register kGpParamRegisters[] = {r6, r2, r4, r5};
constexpr Register kGpReturnRegisters[] = {r2, r3};
constexpr DoubleRegister kFpParamRegisters[] = {d0, d2, d4, d6};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d2, d4, d6};
@@ -93,7 +97,7 @@ constexpr DoubleRegister kFpReturnRegisters[] = {d0, d2, d4, d6};
// ===========================================================================
// == s390 ===================================================================
// ===========================================================================
-constexpr Register kGpParamRegisters[] = {r6, r2, r3, r4, r5};
+constexpr Register kGpParamRegisters[] = {r6, r2, r4, r5};
constexpr Register kGpReturnRegisters[] = {r2, r3};
constexpr DoubleRegister kFpParamRegisters[] = {d0, d2};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d2};
@@ -130,9 +134,7 @@ class LinkageAllocator {
#if V8_TARGET_ARCH_ARM
switch (rep) {
case MachineRepresentation::kFloat32:
- return extra_float_reg_ >= 0 ||
- (extra_double_reg_ >= 0 && extra_double_reg_ < 16) ||
- (fp_offset_ < fp_count_ && fp_regs_[fp_offset_].code() < 16);
+ return fp_offset_ < fp_count_ && fp_regs_[fp_offset_].code() < 16;
case MachineRepresentation::kFloat64:
return extra_double_reg_ >= 0 || fp_offset_ < fp_count_;
case MachineRepresentation::kSimd128:
@@ -154,20 +156,12 @@ class LinkageAllocator {
#if V8_TARGET_ARCH_ARM
switch (rep) {
case MachineRepresentation::kFloat32: {
- // Use the extra S-register if there is one.
- if (extra_float_reg_ >= 0) {
- int reg_code = extra_float_reg_;
- extra_float_reg_ = -1;
- return reg_code;
- }
- // Allocate a D-register and split into 2 float registers.
+ // Liftoff uses only even-numbered f32 registers, and encodes them using
+ // the code of the corresponding f64 register. This limits the calling
+ // interface to only using the even-numbered f32 registers.
int d_reg_code = NextFpReg(MachineRepresentation::kFloat64);
DCHECK_GT(16, d_reg_code); // D-registers 16 - 31 can't split.
- int reg_code = d_reg_code * 2;
- // Save the extra S-register.
- DCHECK_EQ(-1, extra_float_reg_);
- extra_float_reg_ = reg_code + 1;
- return reg_code;
+ return d_reg_code * 2;
}
case MachineRepresentation::kFloat64: {
// Use the extra D-register if there is one.
@@ -211,7 +205,7 @@ class LinkageAllocator {
// Stackslots are counted upwards starting from 0 (or the offset set by
// {SetStackOffset}.
int NumStackSlots(MachineRepresentation type) {
- return std::max(1, ElementSizeInBytes(type) / kPointerSize);
+ return std::max(1, ElementSizeInBytes(type) / kSystemPointerSize);
}
// Stackslots are counted upwards starting from 0 (or the offset set by
@@ -244,10 +238,8 @@ class LinkageAllocator {
const DoubleRegister* const fp_regs_;
#if V8_TARGET_ARCH_ARM
- // ARM FP register aliasing may require splitting or merging double registers.
// Track fragments of registers below fp_offset_ here. There can only be one
- // extra float and double register.
- int extra_float_reg_ = -1;
+ // extra double register.
int extra_double_reg_ = -1;
#endif
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index cf18817bb1..b4aee28d78 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,6 +4,7 @@
#include <limits>
+#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -153,6 +154,20 @@ WasmMemoryTracker::~WasmMemoryTracker() {
DCHECK_EQ(allocated_address_space_, 0u);
}
+void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
+ Heap* heap, size_t size, void** allocation_base,
+ size_t* allocation_length) {
+ return TryAllocateBackingStore(this, heap, size, allocation_base,
+ allocation_length);
+}
+
+void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
+ void* buffer_start) {
+ ReleaseAllocation(nullptr, buffer_start);
+ CHECK(FreePages(GetPlatformPageAllocator(),
+ reinterpret_cast<void*>(memory.begin()), memory.size()));
+}
+
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
ReservationLimit limit) {
size_t reservation_limit =
@@ -179,7 +194,7 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
size_t allocation_length,
void* buffer_start,
size_t buffer_length) {
- base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ base::MutexGuard scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
AddAddressSpaceSample(isolate);
@@ -191,7 +206,7 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
Isolate* isolate, const void* buffer_start) {
- base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ base::MutexGuard scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start);
CHECK_NE(find_result, allocations_.end());
@@ -216,7 +231,7 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
const void* buffer_start) {
- base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
if (result != allocations_.end()) {
return &result->second;
@@ -225,12 +240,12 @@ const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
}
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
- base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ base::MutexGuard scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
- base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ base::MutexGuard scope_lock(&mutex_);
const auto allocation = allocations_.find(buffer_start);
if (allocation == allocations_.end()) {
@@ -270,17 +285,15 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
constexpr bool is_wasm_memory = true;
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
shared, is_wasm_memory);
- buffer->set_is_neuterable(false);
+ buffer->set_is_detachable(false);
buffer->set_is_growable(true);
return buffer;
}
MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
SharedFlag shared) {
- // Enforce engine-limited maximum allocation size.
- if (size > kV8MaxWasmMemoryBytes) return {};
// Enforce flag-limited maximum allocation size.
- if (size > (FLAG_wasm_max_mem_pages * uint64_t{kWasmPageSize})) return {};
+ if (size > max_mem_bytes()) return {};
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
@@ -310,17 +323,17 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
- DCHECK(!buffer->is_neuterable());
+ DCHECK(!buffer->is_detachable());
const bool is_external = buffer->is_external();
- DCHECK(!buffer->is_neuterable());
+ DCHECK(!buffer->is_detachable());
if (!is_external) {
buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*buffer);
if (free_memory) {
- // We need to free the memory before neutering the buffer because
+ // We need to free the memory before detaching the buffer because
// FreeBackingStore reads buffer->allocation_base(), which is nulled out
- // by Neuter. This means there is a dangling pointer until we neuter the
+ // by Detach. This means there is a dangling pointer until we detach the
// buffer. Since there is no way for the user to directly call
// FreeBackingStore, we can ensure this is safe.
buffer->FreeBackingStoreFromMainThread();
@@ -329,8 +342,8 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
DCHECK(buffer->is_external());
buffer->set_is_wasm_memory(false);
- buffer->set_is_neuterable(true);
- buffer->Neuter();
+ buffer->set_is_detachable(true);
+ buffer->Detach();
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 5a919fe71c..5fb4554cc2 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -66,6 +66,18 @@ class WasmMemoryTracker {
friend WasmMemoryTracker;
};
+ // Allow tests to allocate a backing store the same way as we do it for
+ // WebAssembly memory. This is used in unit tests for trap handler to
+ // generate the same signals/exceptions for invalid memory accesses as
+ // we would get with WebAssembly memory.
+ V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
+ Heap* heap, size_t size, void** allocation_base,
+ size_t* allocation_length);
+
+ // Free memory allocated with TryAllocateBackingStoreForTesting.
+ V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
+ void* buffer_start);
+
// Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes);
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 7e4621571a..3502a03272 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -74,8 +74,8 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
return os;
}
-WasmModule::WasmModule(std::unique_ptr<Zone> owned)
- : signature_zone(std::move(owned)) {}
+WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
+ : signature_zone(std::move(signature_zone)) {}
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
@@ -260,7 +260,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
memcpy(memory, wire_bytes.start() + section.payload.offset(),
@@ -312,15 +313,15 @@ inline size_t VectorSize(const std::vector<T>& vector) {
}
} // namespace
-size_t EstimateWasmModuleSize(const WasmModule* module) {
- size_t estimate =
- sizeof(WasmModule) + VectorSize(module->signatures) +
- VectorSize(module->signature_ids) + VectorSize(module->functions) +
- VectorSize(module->data_segments) + VectorSize(module->tables) +
- VectorSize(module->import_table) + VectorSize(module->export_table) +
- VectorSize(module->exceptions) + VectorSize(module->table_inits);
- // TODO(wasm): include names table and wire bytes in size estimate
- return estimate;
+size_t EstimateStoredSize(const WasmModule* module) {
+ return sizeof(WasmModule) + VectorSize(module->globals) +
+ (module->signature_zone ? module->signature_zone->allocation_size()
+ : 0) +
+ VectorSize(module->signatures) + VectorSize(module->signature_ids) +
+ VectorSize(module->functions) + VectorSize(module->data_segments) +
+ VectorSize(module->tables) + VectorSize(module->import_table) +
+ VectorSize(module->export_table) + VectorSize(module->exceptions) +
+ VectorSize(module->elem_segments);
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index d188cf59e1..75f6e98ca5 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -84,8 +84,16 @@ struct WasmException {
// Static representation of a wasm data segment.
struct WasmDataSegment {
+ // Construct an active segment.
+ explicit WasmDataSegment(WasmInitExpr dest_addr)
+ : dest_addr(dest_addr), active(true) {}
+
+ // Construct a passive segment, which has no dest_addr.
+ WasmDataSegment() : active(false) {}
+
WasmInitExpr dest_addr; // destination memory address of the data.
WireBytesRef source; // start offset in the module bytes.
+ bool active = true; // true if copied automatically during instantiation.
};
// Static representation of a wasm indirect call table.
@@ -101,16 +109,21 @@ struct WasmTable {
bool exported = false; // true if exported.
};
-// Static representation of how to initialize a table.
-struct WasmTableInit {
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmTableInit);
+// Static representation of wasm element segment (table initializer).
+struct WasmElemSegment {
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
+
+ // Construct an active segment.
+ WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
+ : table_index(table_index), offset(offset), active(true) {}
- WasmTableInit(uint32_t table_index, WasmInitExpr offset)
- : table_index(table_index), offset(offset) {}
+ // Construct a passive segment, which has no table index or offset.
+ WasmElemSegment() : table_index(0), active(false) {}
uint32_t table_index;
WasmInitExpr offset;
std::vector<uint32_t> entries;
+ bool active; // true if copied automatically during instantiation.
};
// Static representation of a wasm import.
@@ -152,11 +165,13 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmGlobal> globals;
// Size of the buffer required for all globals that are not imported and
// mutable.
- uint32_t globals_buffer_size = 0;
+ uint32_t untagged_globals_buffer_size = 0;
+ uint32_t tagged_globals_buffer_size = 0;
uint32_t num_imported_mutable_globals = 0;
uint32_t num_imported_functions = 0;
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
+ uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
@@ -166,7 +181,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmImport> import_table;
std::vector<WasmExport> export_table;
std::vector<WasmException> exceptions;
- std::vector<WasmTableInit> table_inits;
+ std::vector<WasmElemSegment> elem_segments;
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
@@ -174,21 +189,21 @@ struct V8_EXPORT_PRIVATE WasmModule {
function_names;
std::string source_map_url;
- explicit WasmModule(std::unique_ptr<Zone> owned = nullptr);
+ explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
uint32_t function_index) const;
void AddFunctionNameForTesting(int function_index, WireBytesRef name);
};
-size_t EstimateWasmModuleSize(const WasmModule* module);
+size_t EstimateStoredSize(const WasmModule* module);
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
// this struct is alive.
struct V8_EXPORT_PRIVATE ModuleWireBytes {
- ModuleWireBytes(Vector<const byte> module_bytes)
+ explicit ModuleWireBytes(Vector<const byte> module_bytes)
: module_bytes_(module_bytes) {}
ModuleWireBytes(const byte* start, const byte* end)
: module_bytes_(start, static_cast<int>(end - start)) {
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 0144b8af5b..9adcc94f12 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -9,8 +9,11 @@
#include "src/contexts-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/oddball-inl.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
@@ -21,18 +24,33 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmDebugInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
+
CAST_ACCESSOR(WasmDebugInfo)
CAST_ACCESSOR(WasmExceptionObject)
+CAST_ACCESSOR(WasmExceptionTag)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
+CAST_ACCESSOR(AsmWasmData)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(); \
+ return !READ_FIELD(*this, offset)->IsUndefined(); \
} \
ACCESSORS(holder, name, type, offset)
@@ -64,6 +82,10 @@ OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module()->raw();
}
+std::shared_ptr<wasm::NativeModule> WasmModuleObject::shared_native_module()
+ const {
+ return managed_native_module()->get();
+}
const wasm::WasmModule* WasmModuleObject::module() const {
// TODO(clemensh): Remove this helper (inline in callers).
return native_module()->module();
@@ -90,7 +112,9 @@ SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakArrayList, kInstancesOffset)
// WasmGlobalObject
-ACCESSORS(WasmGlobalObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
+ kUntaggedBufferOffset)
+ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, type, WasmGlobalObject::TypeBits)
@@ -102,8 +126,9 @@ int WasmGlobalObject::type_size() const {
}
Address WasmGlobalObject::address() const {
- DCHECK_LE(offset() + type_size(), array_buffer()->byte_length());
- return Address(array_buffer()->backing_store()) + offset();
+ DCHECK_NE(type(), wasm::kWasmAnyRef);
+ DCHECK_LE(offset() + type_size(), untagged_buffer()->byte_length());
+ return Address(untagged_buffer()->backing_store()) + offset();
}
int32_t WasmGlobalObject::GetI32() {
@@ -122,6 +147,11 @@ double WasmGlobalObject::GetF64() {
return ReadLittleEndianValue<double>(address());
}
+Handle<Object> WasmGlobalObject::GetAnyRef() {
+ DCHECK_EQ(type(), wasm::kWasmAnyRef);
+ return handle(tagged_buffer()->get(offset()), GetIsolate());
+}
+
void WasmGlobalObject::SetI32(int32_t value) {
WriteLittleEndianValue<int32_t>(address(), value);
}
@@ -138,12 +168,17 @@ void WasmGlobalObject::SetF64(double value) {
WriteLittleEndianValue<double>(address(), value);
}
+void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmAnyRef);
+ tagged_buffer()->set(offset(), *value);
+}
+
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, roots_array_address, Address,
- kRootsArrayAddressOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
+ kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
kStackLimitAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, real_stack_limit_address, Address,
@@ -162,6 +197,14 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address,
kJumpTableStartOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*,
+ kDataSegmentStartsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
+ kDataSegmentSizesOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_data_segments, byte*,
+ kDroppedDataSegmentsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
+ kDroppedElemSegmentsOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
@@ -169,20 +212,20 @@ ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
ACCESSORS(WasmInstanceObject, native_context, Context, kNativeContextOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
kMemoryObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
- kGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, untagged_globals_buffer, JSArrayBuffer,
+ kUntaggedGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, tagged_globals_buffer, FixedArray,
+ kTaggedGlobalsBufferOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
FixedArray, kImportedMutableGlobalsBuffersOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
-ACCESSORS(WasmInstanceObject, imported_function_instances, FixedArray,
- kImportedFunctionInstancesOffset)
-ACCESSORS(WasmInstanceObject, imported_function_callables, FixedArray,
- kImportedFunctionCallablesOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
- FixedArray, kIndirectFunctionTableInstancesOffset)
+ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
+ kImportedFunctionRefsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
+ kIndirectFunctionTableRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
@@ -195,6 +238,14 @@ inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
}
+void WasmInstanceObject::clear_padding() {
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
+ }
+}
+
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
Handle<WasmInstanceObject> instance, int index)
: instance_(instance), index_(index) {
@@ -214,6 +265,12 @@ ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
+// WasmExportedFunction
+WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
+ SLOW_DCHECK(IsWasmExportedFunction(*this));
+}
+CAST_ACCESSOR(WasmExportedFunction)
+
// WasmExportedFunctionData
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
@@ -242,6 +299,16 @@ uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
+// WasmExceptionTag
+SMI_ACCESSORS(WasmExceptionTag, index, kIndexOffset)
+
+// AsmWasmData
+ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
+ kManagedNativeModuleOffset)
+ACCESSORS(AsmWasmData, export_wrappers, FixedArray, kExportWrappersOffset)
+ACCESSORS(AsmWasmData, asm_js_offset_table, ByteArray, kAsmJsOffsetTableOffset)
+ACCESSORS(AsmWasmData, uses_bitset, HeapNumber, kUsesBitsetOffset)
+
#include "src/objects/object-macros-undef.h"
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 9d0e20ab2b..392ddd4ca8 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -9,10 +9,12 @@
#include "src/base/iterator.h"
#include "src/code-factory.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
@@ -64,13 +66,24 @@ class WasmInstanceNativeAllocations {
// Allocates initial native storage for a given instance.
WasmInstanceNativeAllocations(Handle<WasmInstanceObject> instance,
size_t num_imported_functions,
- size_t num_imported_mutable_globals) {
+ size_t num_imported_mutable_globals,
+ size_t num_data_segments,
+ size_t num_elem_segments) {
SET(instance, imported_function_targets,
reinterpret_cast<Address*>(
calloc(num_imported_functions, sizeof(Address))));
SET(instance, imported_mutable_globals,
reinterpret_cast<Address*>(
calloc(num_imported_mutable_globals, sizeof(Address))));
+ SET(instance, data_segment_starts,
+ reinterpret_cast<Address*>(calloc(num_data_segments, sizeof(Address))));
+ SET(instance, data_segment_sizes,
+ reinterpret_cast<uint32_t*>(
+ calloc(num_data_segments, sizeof(uint32_t))));
+ SET(instance, dropped_data_segments,
+ reinterpret_cast<uint8_t*>(calloc(num_data_segments, sizeof(uint8_t))));
+ SET(instance, dropped_elem_segments,
+ reinterpret_cast<uint8_t*>(calloc(num_elem_segments, sizeof(uint8_t))));
}
~WasmInstanceNativeAllocations() {
::free(indirect_function_table_sig_ids_);
@@ -81,6 +94,14 @@ class WasmInstanceNativeAllocations {
imported_function_targets_ = nullptr;
::free(imported_mutable_globals_);
imported_mutable_globals_ = nullptr;
+ ::free(data_segment_starts_);
+ data_segment_starts_ = nullptr;
+ ::free(data_segment_sizes_);
+ data_segment_sizes_ = nullptr;
+ ::free(dropped_data_segments_);
+ dropped_data_segments_ = nullptr;
+ ::free(dropped_elem_segments_);
+ dropped_elem_segments_ = nullptr;
}
// Resizes the indirect function table.
void resize_indirect_function_table(Isolate* isolate,
@@ -89,7 +110,7 @@ class WasmInstanceNativeAllocations {
uint32_t old_size = instance->indirect_function_table_size();
void* new_sig_ids = nullptr;
void* new_targets = nullptr;
- Handle<FixedArray> new_instances;
+ Handle<FixedArray> new_refs;
if (indirect_function_table_sig_ids_) {
// Reallocate the old storage.
new_sig_ids = realloc(indirect_function_table_sig_ids_,
@@ -97,16 +118,14 @@ class WasmInstanceNativeAllocations {
new_targets =
realloc(indirect_function_table_targets_, new_size * sizeof(Address));
- Handle<FixedArray> old(instance->indirect_function_table_instances(),
- isolate);
- new_instances = isolate->factory()->CopyFixedArrayAndGrow(
+ Handle<FixedArray> old(instance->indirect_function_table_refs(), isolate);
+ new_refs = isolate->factory()->CopyFixedArrayAndGrow(
old, static_cast<int>(new_size - old_size));
} else {
// Allocate new storage.
new_sig_ids = malloc(new_size * sizeof(uint32_t));
new_targets = malloc(new_size * sizeof(Address));
- new_instances =
- isolate->factory()->NewFixedArray(static_cast<int>(new_size));
+ new_refs = isolate->factory()->NewFixedArray(static_cast<int>(new_size));
}
// Initialize new entries.
instance->set_indirect_function_table_size(new_size);
@@ -115,7 +134,7 @@ class WasmInstanceNativeAllocations {
SET(instance, indirect_function_table_targets,
reinterpret_cast<Address*>(new_targets));
- instance->set_indirect_function_table_instances(*new_instances);
+ instance->set_indirect_function_table_refs(*new_refs);
for (uint32_t j = old_size; j < new_size; j++) {
IndirectFunctionTableEntry(instance, static_cast<int>(j)).clear();
}
@@ -124,22 +143,29 @@ class WasmInstanceNativeAllocations {
Address* indirect_function_table_targets_ = nullptr;
Address* imported_function_targets_ = nullptr;
Address* imported_mutable_globals_ = nullptr;
+ Address* data_segment_starts_ = nullptr;
+ uint32_t* data_segment_sizes_ = nullptr;
+ uint8_t* dropped_data_segments_ = nullptr;
+ uint8_t* dropped_elem_segments_ = nullptr;
#undef SET
};
size_t EstimateNativeAllocationsSize(const WasmModule* module) {
- size_t estimate = sizeof(WasmInstanceNativeAllocations) +
- (1 * kPointerSize * module->num_imported_mutable_globals) +
- (2 * kPointerSize * module->num_imported_functions);
+ size_t estimate =
+ sizeof(WasmInstanceNativeAllocations) +
+ (1 * kSystemPointerSize * module->num_imported_mutable_globals) +
+ (2 * kSystemPointerSize * module->num_imported_functions) +
+ ((kSystemPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
+ module->num_declared_data_segments);
for (auto& table : module->tables) {
- estimate += 3 * kPointerSize * table.initial_size;
+ estimate += 3 * kSystemPointerSize * table.initial_size;
}
return estimate;
}
WasmInstanceNativeAllocations* GetNativeAllocations(
- WasmInstanceObject* instance) {
- return reinterpret_cast<Managed<WasmInstanceNativeAllocations>*>(
+ WasmInstanceObject instance) {
+ return Managed<WasmInstanceNativeAllocations>::cast(
instance->managed_native_allocations())
->raw();
}
@@ -177,25 +203,21 @@ enum DispatchTableElements : int {
// static
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, const wasm::WasmFeatures& enabled,
- std::shared_ptr<const wasm::WasmModule> shared_module, wasm::ModuleEnv& env,
+ std::shared_ptr<const wasm::WasmModule> shared_module,
OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table) {
- DCHECK_EQ(shared_module.get(), env.module);
-
// Create a new {NativeModule} first.
- size_t native_memory_estimate =
- isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize(
- env.module);
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(shared_module.get());
auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, enabled, native_memory_estimate,
- wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module),
- env);
- native_module->set_wire_bytes(std::move(wire_bytes));
+ isolate, enabled, code_size_estimate,
+ wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module));
+ native_module->SetWireBytes(std::move(wire_bytes));
native_module->SetRuntimeStubs(isolate);
// Delegate to the shared {WasmModuleObject::New} allocator.
Handle<WasmModuleObject> module_object =
- New(isolate, std::move(native_module), script);
+ New(isolate, std::move(native_module), script, code_size_estimate);
if (!asm_js_offset_table.is_null()) {
module_object->set_asm_js_offset_table(*asm_js_offset_table);
}
@@ -205,19 +227,27 @@ Handle<WasmModuleObject> WasmModuleObject::New(
// static
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
- Handle<Script> script) {
- int export_wrapper_size =
- static_cast<int>(native_module->module()->num_exported_functions);
+ Handle<Script> script, size_t code_size_estimate) {
+ const WasmModule* module = native_module->module();
+ int export_wrapper_size = static_cast<int>(module->num_exported_functions);
Handle<FixedArray> export_wrappers =
isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
+ return New(isolate, std::move(native_module), script, export_wrappers,
+ code_size_estimate);
+}
+
+// static
+Handle<WasmModuleObject> WasmModuleObject::New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<Script> script, Handle<FixedArray> export_wrappers,
+ size_t code_size_estimate) {
+ const WasmModule* module = native_module->module();
// Use the given shared {NativeModule}, but increase its reference count by
// allocating a new {Managed<T>} that the {WasmModuleObject} references.
- size_t native_memory_estimate =
- isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize(
- native_module->module());
size_t memory_estimate =
- EstimateWasmModuleSize(native_module->module()) + native_memory_estimate;
+ code_size_estimate +
+ wasm::WasmCodeManager::EstimateNativeModuleNonCodeSize(module);
Handle<Managed<wasm::NativeModule>> managed_native_module =
Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
std::move(native_module));
@@ -259,7 +289,7 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(),
isolate);
for (int i = 0; i < weak_instance_list->length(); ++i) {
- MaybeObject* maybe_instance = weak_instance_list->Get(i);
+ MaybeObject maybe_instance = weak_instance_list->Get(i);
if (maybe_instance->IsWeak()) {
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
@@ -275,7 +305,7 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
namespace {
-int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
+int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
}
@@ -291,7 +321,7 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
int right = breakpoint_infos->length(); // exclusive
while (right - left > 1) {
int mid = left + (right - left) / 2;
- Object* mid_obj = breakpoint_infos->get(mid);
+ Object mid_obj = breakpoint_infos->get(mid);
if (GetBreakpointPos(isolate, mid_obj) <= position) {
left = mid;
} else {
@@ -346,7 +376,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
// Move elements [insert_pos, ...] up by one.
for (int i = breakpoint_infos->length() - 1; i >= insert_pos; --i) {
- Object* entry = breakpoint_infos->get(i);
+ Object entry = breakpoint_infos->get(i);
if (entry->IsUndefined(isolate)) continue;
new_breakpoint_infos->set(i + 1, entry);
}
@@ -414,25 +444,24 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
DCHECK(table_type == Encoded || table_type == Decoded);
if (table_type == Decoded) return offset_table;
- wasm::AsmJsOffsetsResult asm_offsets;
+ wasm::AsmJsOffsets asm_offsets;
{
DisallowHeapAllocation no_gc;
byte* bytes_start = offset_table->GetDataStartAddress();
byte* bytes_end = reinterpret_cast<byte*>(
reinterpret_cast<Address>(bytes_start) + offset_table->length() - 1);
- asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end).value();
}
// Wasm bytes must be valid and must contain asm.js offset table.
- DCHECK(asm_offsets.ok());
- DCHECK_GE(kMaxInt, asm_offsets.val.size());
- int num_functions = static_cast<int>(asm_offsets.val.size());
+ DCHECK_GE(kMaxInt, asm_offsets.size());
+ int num_functions = static_cast<int>(asm_offsets.size());
int num_imported_functions =
static_cast<int>(module_object->module()->num_imported_functions);
DCHECK_EQ(module_object->module()->functions.size(),
static_cast<size_t>(num_functions) + num_imported_functions);
int num_entries = 0;
for (int func = 0; func < num_functions; ++func) {
- size_t new_size = asm_offsets.val[func].size();
+ size_t new_size = asm_offsets[func].size();
DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
num_entries += static_cast<int>(new_size);
}
@@ -449,8 +478,7 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
const std::vector<WasmFunction>& wasm_funs =
module_object->module()->functions;
for (int func = 0; func < num_functions; ++func) {
- std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets =
- asm_offsets.val[func];
+ std::vector<wasm::AsmJsOffsetEntry>& func_asm_offsets = asm_offsets[func];
if (func_asm_offsets.empty()) continue;
int func_offset = wasm_funs[num_imported_functions + func].code.offset();
for (wasm::AsmJsOffsetEntry& e : func_asm_offsets) {
@@ -520,7 +548,7 @@ v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction(
static_cast<uint32_t>(func_index) >= module()->functions.size())
return {};
- Vector<const byte> wire_bytes = native_module()->wire_bytes();
+ wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
std::ostringstream disassembly_os;
v8::debug::WasmDisassembly::OffsetTable offset_table;
@@ -747,7 +775,7 @@ bool WasmModuleObject::GetPositionInfo(uint32_t position,
}
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
- int64_t maximum,
+ uint32_t maximum,
Handle<FixedArray>* js_functions) {
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor(), isolate);
@@ -755,13 +783,12 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
isolate->factory()->NewJSObject(table_ctor));
*js_functions = isolate->factory()->NewFixedArray(initial);
- Object* null = ReadOnlyRoots(isolate).null_value();
+ Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
(*js_functions)->set(i, null);
}
table_obj->set_functions(**js_functions);
- DCHECK_EQ(maximum, static_cast<int>(maximum));
- Handle<Object> max = isolate->factory()->NewNumber(maximum);
+ Handle<Object> max = isolate->factory()->NewNumberFromUint(maximum);
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -817,7 +844,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t table_index, Handle<JSFunction> function) {
+ uint32_t table_index, Handle<JSFunction> function) {
Handle<FixedArray> array(table->functions(), isolate);
if (function.is_null()) {
ClearDispatchTables(isolate, table, table_index); // Degenerate case.
@@ -825,26 +852,23 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
return;
}
- // TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
- DCHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
auto exported_function = Handle<WasmExportedFunction>::cast(function);
- Handle<WasmInstanceObject> other_instance(exported_function->instance(),
- isolate);
+ Handle<WasmInstanceObject> target_instance(exported_function->instance(),
+ isolate);
int func_index = exported_function->function_index();
- auto* wasm_function = &other_instance->module()->functions[func_index];
+ auto* wasm_function = &target_instance->module()->functions[func_index];
DCHECK_NOT_NULL(wasm_function);
DCHECK_NOT_NULL(wasm_function->sig);
- Address call_target = exported_function->GetWasmCallTarget();
UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
handle(exported_function->instance(), isolate),
- call_target);
+ func_index);
array->set(table_index, *function);
}
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int table_index,
- wasm::FunctionSig* sig, Handle<WasmInstanceObject> from_instance,
- Address call_target) {
+ wasm::FunctionSig* sig, Handle<WasmInstanceObject> target_instance,
+ int target_func_index) {
// We simply need to update the IFTs for each instance that imports
// this table.
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
@@ -852,15 +876,15 @@ void WasmTableObject::UpdateDispatchTables(
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- Handle<WasmInstanceObject> to_instance(
+ Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
- auto sig_id = to_instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(to_instance, table_index)
- .set(sig_id, *from_instance, call_target);
+ auto sig_id = instance->module()->signature_map.Find(*sig);
+ IndirectFunctionTableEntry(instance, table_index)
+ .Set(sig_id, target_instance, target_func_index);
}
}
@@ -881,25 +905,12 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
}
namespace {
-MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
+MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
- uint32_t pages,
- uint32_t maximum_pages) {
- if (!old_buffer->is_growable()) return {};
- void* old_mem_start = old_buffer->backing_store();
+ size_t new_size) {
+ CHECK_EQ(0, new_size % wasm::kWasmPageSize);
size_t old_size = old_buffer->byte_length();
- CHECK_GE(wasm::kV8MaxWasmMemoryBytes, old_size);
- CHECK_EQ(0, old_size % wasm::kWasmPageSize);
- size_t old_pages = old_size / wasm::kWasmPageSize;
- if (old_pages > maximum_pages || // already reached maximum
- (pages > maximum_pages - old_pages) || // exceeds remaining
- (pages > FLAG_wasm_max_mem_pages - old_pages)) { // exceeds limit
- return {};
- }
- size_t new_size =
- static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
- CHECK_GE(wasm::kV8MaxWasmMemoryBytes, new_size);
-
+ void* old_mem_start = old_buffer->backing_store();
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
@@ -914,8 +925,9 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
new_size, PageAllocator::kReadWrite)) {
return {};
}
+ DCHECK_GE(new_size, old_size);
reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
+ ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
}
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
@@ -976,7 +988,7 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
- int32_t maximum) {
+ uint32_t maximum) {
// TODO(kschimpf): Do we need to add an argument that defines the
// style of memory the user prefers (with/without trap handling), so
// that the memory will match the style of the compiled wasm module.
@@ -997,11 +1009,6 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
return memory_obj;
}
-uint32_t WasmMemoryObject::current_pages() {
- return static_cast<uint32_t>(array_buffer()->byte_length() /
- wasm::kWasmPageSize);
-}
-
bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
const wasm::WasmMemoryTracker::AllocationData* allocation =
isolate->wasm_engine()->memory_tracker()->FindAllocationData(
@@ -1041,38 +1048,44 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
SetInstanceMemory(instance, buffer);
}
-void WasmMemoryObject::RemoveInstance(Handle<WasmMemoryObject> memory,
- Handle<WasmInstanceObject> instance) {
- if (memory->has_instances()) {
- memory->instances()->RemoveOne(MaybeObjectHandle::Weak(instance));
- }
-}
-
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
if (!old_buffer->is_growable()) return -1;
- size_t old_size = old_buffer->byte_length();
- DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
- Handle<JSArrayBuffer> new_buffer;
- uint32_t maximum_pages = FLAG_wasm_max_mem_pages;
+ // Checks for maximum memory size, compute new size.
+ uint32_t maximum_pages = wasm::max_mem_pages();
if (memory_object->has_maximum_pages()) {
- maximum_pages = Min(FLAG_wasm_max_mem_pages,
- static_cast<uint32_t>(memory_object->maximum_pages()));
+ maximum_pages = std::min(
+ maximum_pages, static_cast<uint32_t>(memory_object->maximum_pages()));
+ }
+ CHECK_GE(wasm::max_mem_pages(), maximum_pages);
+ size_t old_size = old_buffer->byte_length();
+ CHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ size_t old_pages = old_size / wasm::kWasmPageSize;
+ CHECK_GE(wasm::max_mem_pages(), old_pages);
+ if ((pages > maximum_pages - old_pages) || // exceeds remaining
+ (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
+ return -1;
}
- if (!GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages)
- .ToHandle(&new_buffer)) {
+ size_t new_size =
+ static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+
+ // Grow the buffer.
+ Handle<JSArrayBuffer> new_buffer;
+ if (!MemoryGrowBuffer(isolate, old_buffer, new_size).ToHandle(&new_buffer)) {
return -1;
}
+ // Update instances if any.
if (memory_object->has_instances()) {
Handle<WeakArrayList> instances(memory_object->instances(), isolate);
for (int i = 0; i < instances->length(); i++) {
- MaybeObject* elem = instances->Get(i);
- HeapObject* heap_object;
+ MaybeObject elem = instances->Get(i);
+ HeapObject heap_object;
if (elem->GetHeapObjectIfWeak(&heap_object)) {
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(heap_object), isolate);
@@ -1088,32 +1101,44 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// static
MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
- wasm::ValueType type, int32_t offset, bool is_mutable) {
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_untagged_buffer,
+ MaybeHandle<FixedArray> maybe_tagged_buffer, wasm::ValueType type,
+ int32_t offset, bool is_mutable) {
Handle<JSFunction> global_ctor(
isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
- uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
-
- Handle<JSArrayBuffer> buffer;
- if (!maybe_buffer.ToHandle(&buffer)) {
- // If no buffer was provided, create one long enough for the given type.
- buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
-
- const bool initialize = true;
- if (!JSArrayBuffer::SetupAllocatingData(buffer, isolate, type_size,
- initialize)) {
- return {};
+ if (type == wasm::kWasmAnyRef) {
+ DCHECK(maybe_untagged_buffer.is_null());
+ Handle<FixedArray> tagged_buffer;
+ if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
+ // If no buffer was provided, create one.
+ tagged_buffer = isolate->factory()->NewFixedArray(1, TENURED);
+ CHECK_EQ(offset, 0);
+ }
+ global_obj->set_tagged_buffer(*tagged_buffer);
+ } else {
+ DCHECK(maybe_tagged_buffer.is_null());
+ Handle<JSArrayBuffer> untagged_buffer;
+ uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
+ if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
+ // If no buffer was provided, create one long enough for the given type.
+ untagged_buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+
+ const bool initialize = true;
+ if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
+ type_size, initialize)) {
+ return {};
+ }
}
- }
- // Check that the offset is in bounds.
- CHECK_LE(offset + type_size, buffer->byte_length());
+ // Check that the offset is in bounds.
+ CHECK_LE(offset + type_size, untagged_buffer->byte_length());
- global_obj->set_array_buffer(*buffer);
+ global_obj->set_untagged_buffer(*untagged_buffer);
+ }
global_obj->set_flags(0);
global_obj->set_type(type);
global_obj->set_offset(offset);
@@ -1125,23 +1150,42 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
void IndirectFunctionTableEntry::clear() {
instance_->indirect_function_table_sig_ids()[index_] = -1;
instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_instances()->set(
+ instance_->indirect_function_table_refs()->set(
index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
}
-void IndirectFunctionTableEntry::set(int sig_id, WasmInstanceObject* instance,
- Address call_target) {
- TRACE_IFT("IFT entry %p[%d] = {sig_id=%d, instance=%p, target=%" PRIuPTR
- "}\n",
- *instance_, index_, sig_id, instance, call_target);
+void IndirectFunctionTableEntry::Set(int sig_id,
+ Handle<WasmInstanceObject> target_instance,
+ int target_func_index) {
+ TRACE_IFT(
+ "IFT entry %p[%d] = {sig_id=%d, target_instance=%p, "
+ "target_func_index=%d}\n",
+ reinterpret_cast<void*>(instance_->ptr()), index_, sig_id,
+ reinterpret_cast<void*>(target_instance->ptr()), target_func_index);
+
+ Object ref;
+ Address call_target = 0;
+ if (target_func_index <
+ static_cast<int>(target_instance->module()->num_imported_functions)) {
+ // The function in the target instance was imported. Use its imports table,
+ // which contains a tuple needed by the import wrapper.
+ ImportedFunctionEntry entry(target_instance, target_func_index);
+ ref = entry.object_ref();
+ call_target = entry.target();
+ } else {
+ // The function in the target instance was not imported.
+ ref = *target_instance;
+ call_target = target_instance->GetCallTarget(target_func_index);
+ }
+
+ // Set the signature id, the target, and the receiver ref.
instance_->indirect_function_table_sig_ids()[index_] = sig_id;
instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_instances()->set(index_, instance);
+ instance_->indirect_function_table_refs()->set(index_, ref);
}
-WasmInstanceObject* IndirectFunctionTableEntry::instance() {
- return WasmInstanceObject::cast(
- instance_->indirect_function_table_instances()->get(index_));
+Object IndirectFunctionTableEntry::object_ref() {
+ return instance_->indirect_function_table_refs()->get(index_);
}
int IndirectFunctionTableEntry::sig_id() {
@@ -1152,43 +1196,61 @@ Address IndirectFunctionTableEntry::target() {
return instance_->indirect_function_table_targets()[index_];
}
-void ImportedFunctionEntry::set_wasm_to_js(
- JSReceiver* callable, const wasm::WasmCode* wasm_to_js_wrapper) {
- TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n", *instance_,
- index_, callable, wasm_to_js_wrapper->instructions().start());
+void IndirectFunctionTableEntry::CopyFrom(
+ const IndirectFunctionTableEntry& that) {
+ instance_->indirect_function_table_sig_ids()[index_] =
+ that.instance_->indirect_function_table_sig_ids()[that.index_];
+ instance_->indirect_function_table_targets()[index_] =
+ that.instance_->indirect_function_table_targets()[that.index_];
+ instance_->indirect_function_table_refs()->set(
+ index_, that.instance_->indirect_function_table_refs()->get(that.index_));
+}
+
+void ImportedFunctionEntry::SetWasmToJs(
+ Isolate* isolate, Handle<JSReceiver> callable,
+ const wasm::WasmCode* wasm_to_js_wrapper) {
+ TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n",
+ reinterpret_cast<void*>(instance_->ptr()), index_,
+ reinterpret_cast<void*>(callable->ptr()),
+ wasm_to_js_wrapper->instructions().start());
DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_to_js_wrapper->kind());
- instance_->imported_function_instances()->set(index_, *instance_);
- instance_->imported_function_callables()->set(index_, callable);
+ Handle<Tuple2> tuple =
+ isolate->factory()->NewTuple2(instance_, callable, TENURED);
+ instance_->imported_function_refs()->set(index_, *tuple);
instance_->imported_function_targets()[index_] =
wasm_to_js_wrapper->instruction_start();
}
-void ImportedFunctionEntry::set_wasm_to_wasm(WasmInstanceObject* instance,
- Address call_target) {
+void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
+ Address call_target) {
TRACE_IFT("Import WASM %p[%d] = {instance=%p, target=%" PRIuPTR "}\n",
- *instance_, index_, instance, call_target);
- instance_->imported_function_instances()->set(index_, instance);
- instance_->imported_function_callables()->set(
- index_, instance_->GetReadOnlyRoots().undefined_value());
+ reinterpret_cast<void*>(instance_->ptr()), index_,
+ reinterpret_cast<void*>(instance->ptr()), call_target);
+ instance_->imported_function_refs()->set(index_, instance);
instance_->imported_function_targets()[index_] = call_target;
}
-WasmInstanceObject* ImportedFunctionEntry::instance() {
- return WasmInstanceObject::cast(
- instance_->imported_function_instances()->get(index_));
+WasmInstanceObject ImportedFunctionEntry::instance() {
+ // The imported reference entry is either a target instance or a tuple
+ // of this instance and the target callable.
+ Object value = instance_->imported_function_refs()->get(index_);
+ if (value->IsWasmInstanceObject()) {
+ return WasmInstanceObject::cast(value);
+ }
+ Tuple2 tuple = Tuple2::cast(value);
+ return WasmInstanceObject::cast(tuple->value1());
}
-JSReceiver* ImportedFunctionEntry::callable() {
- return JSReceiver::cast(
- instance_->imported_function_callables()->get(index_));
+JSReceiver ImportedFunctionEntry::callable() {
+ return JSReceiver::cast(Tuple2::cast(object_ref())->value2());
}
-Address ImportedFunctionEntry::target() {
- return instance_->imported_function_targets()[index_];
+Object ImportedFunctionEntry::object_ref() {
+ return instance_->imported_function_refs()->get(index_);
}
-bool ImportedFunctionEntry::is_js_receiver_entry() {
- return instance_->imported_function_callables()->get(index_)->IsJSReceiver();
+Address ImportedFunctionEntry::target() {
+ return instance_->imported_function_targets()[index_];
}
bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
@@ -1205,7 +1267,7 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
}
void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
- CHECK_LE(mem_size, wasm::kV8MaxWasmMemoryBytes);
+ CHECK_LE(mem_size, wasm::max_mem_bytes());
#if V8_HOST_ARCH_64_BIT
uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
@@ -1247,32 +1309,30 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->factory()->NewJSObject(instance_cons, TENURED);
Handle<WasmInstanceObject> instance(
- reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
+ WasmInstanceObject::cast(*instance_object), isolate);
+ instance->clear_padding();
// Initialize the imported function arrays.
auto module = module_object->module();
auto num_imported_functions = module->num_imported_functions;
auto num_imported_mutable_globals = module->num_imported_mutable_globals;
+ auto num_data_segments = module->num_declared_data_segments;
size_t native_allocations_size = EstimateNativeAllocationsSize(module);
auto native_allocations = Managed<WasmInstanceNativeAllocations>::Allocate(
isolate, native_allocations_size, instance, num_imported_functions,
- num_imported_mutable_globals);
+ num_imported_mutable_globals, num_data_segments,
+ module->elem_segments.size());
instance->set_managed_native_allocations(*native_allocations);
- Handle<FixedArray> imported_function_instances =
- isolate->factory()->NewFixedArray(num_imported_functions);
- instance->set_imported_function_instances(*imported_function_instances);
-
- Handle<FixedArray> imported_function_callables =
+ Handle<FixedArray> imported_function_refs =
isolate->factory()->NewFixedArray(num_imported_functions);
- instance->set_imported_function_callables(*imported_function_callables);
+ instance->set_imported_function_refs(*imported_function_refs);
Handle<Code> centry_stub = CodeFactory::CEntry(isolate);
instance->set_centry_stub(*centry_stub);
instance->SetRawMemory(nullptr, 0);
- instance->set_roots_array_address(
- reinterpret_cast<Address>(isolate->heap()->roots_array_start()));
+ instance->set_isolate_root(isolate->isolate_root());
instance->set_stack_limit_address(
isolate->stack_guard()->address_of_jslimit());
instance->set_real_stack_limit_address(
@@ -1296,9 +1356,54 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
module_object->set_weak_instance_list(*weak_instance_list);
+ InitDataSegmentArrays(instance, module_object);
+ InitElemSegmentArrays(instance, module_object);
+
return instance;
}
+// static
+void WasmInstanceObject::InitDataSegmentArrays(
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmModuleObject> module_object) {
+ auto module = module_object->module();
+ auto wire_bytes = module_object->native_module()->wire_bytes();
+ auto num_data_segments = module->num_declared_data_segments;
+ // The number of declared data segments will be zero if there is no DataCount
+ // section. These arrays will not be allocated nor initialized in that case,
+ // since they cannot be used (since the validator checks that number of
+ // declared data segments when validating the memory.init and memory.drop
+ // instructions).
+ DCHECK(num_data_segments == 0 ||
+ num_data_segments == module->data_segments.size());
+ for (size_t i = 0; i < num_data_segments; ++i) {
+ const wasm::WasmDataSegment& segment = module->data_segments[i];
+ // Set the active segments to being already dropped, since memory.init on
+ // a dropped passive segment and an active segment have the same
+ // behavior.
+ instance->dropped_data_segments()[i] = segment.active ? 1 : 0;
+
+ // Initialize the pointer and size of passive segments.
+ instance->data_segment_starts()[i] =
+ reinterpret_cast<Address>(&wire_bytes[segment.source.offset()]);
+ instance->data_segment_sizes()[i] = segment.source.length();
+ }
+}
+
+void WasmInstanceObject::InitElemSegmentArrays(
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmModuleObject> module_object) {
+ auto module = module_object->module();
+ auto num_elem_segments = module->elem_segments.size();
+ for (size_t i = 0; i < num_elem_segments; ++i) {
+ const wasm::WasmElemSegment& segment = module->elem_segments[i];
+ // Set the active segments to being already dropped, since table.init on
+ // a dropped passive segment and an active segment have the same
+ // behavior.
+ instance->dropped_elem_segments()[i] = segment.active ? 1 : 0;
+ }
+}
+
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
wasm::NativeModule* native_module = module_object()->native_module();
if (func_index < native_module->num_imported_functions()) {
@@ -1307,6 +1412,71 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
+namespace {
+void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
+ if (src < dst) {
+ for (uint32_t i = count; i > 0; i--) {
+ auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
+ auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
+ to_entry.CopyFrom(from_entry);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; i++) {
+ auto to_entry = IndirectFunctionTableEntry(instance, dst + i);
+ auto from_entry = IndirectFunctionTableEntry(instance, src + i);
+ to_entry.CopyFrom(from_entry);
+ }
+ }
+}
+} // namespace
+
+// static
+bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ CHECK_EQ(0, table_index); // TODO(titzer): multiple tables in TableCopy
+ if (count == 0) return true; // no-op
+ auto max = instance->indirect_function_table_size();
+ if (!IsInBounds(dst, count, max)) return false;
+ if (!IsInBounds(src, count, max)) return false;
+ if (dst == src) return true; // no-op
+
+ if (!instance->has_table_object()) {
+ // No table object, only need to update this instance.
+ CopyTableEntriesImpl(instance, dst, src, count);
+ return true;
+ }
+
+ Handle<WasmTableObject> table =
+ Handle<WasmTableObject>(instance->table_object(), isolate);
+ // Broadcast table copy operation to all instances that import this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ Handle<WasmInstanceObject> target_instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ CopyTableEntriesImpl(target_instance, dst, src, count);
+ }
+
+ // Copy the function entries.
+ Handle<FixedArray> functions(table->functions(), isolate);
+ if (src < dst) {
+ for (uint32_t i = count; i > 0; i--) {
+ functions->set(dst + i - 1, functions->get(src + i - 1));
+ }
+ } else {
+ for (uint32_t i = 0; i < count; i++) {
+ functions->set(dst + i, functions->get(src + i));
+ }
+ }
+ return true;
+}
+
// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
@@ -1347,20 +1517,15 @@ bool WasmExceptionObject::IsSignatureEqual(const wasm::FunctionSig* sig) {
return true;
}
-bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
+bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object->IsJSFunction()) return false;
- JSFunction* js_function = JSFunction::cast(object);
+ JSFunction js_function = JSFunction::cast(object);
if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
DCHECK(js_function->shared()->HasWasmExportedFunctionData());
return true;
}
-WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
- DCHECK(IsWasmExportedFunction(object));
- return reinterpret_cast<WasmExportedFunction*>(object);
-}
-
-WasmInstanceObject* WasmExportedFunction::instance() {
+WasmInstanceObject WasmExportedFunction::instance() {
return shared()->wasm_exported_function_data()->instance();
}
@@ -1413,6 +1578,37 @@ Address WasmExportedFunction::GetWasmCallTarget() {
return instance()->GetCallTarget(function_index());
}
+wasm::FunctionSig* WasmExportedFunction::sig() {
+ return instance()->module()->functions[function_index()].sig;
+}
+
+Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
+ Handle<WasmExceptionTag> result = Handle<WasmExceptionTag>::cast(
+ isolate->factory()->NewStruct(WASM_EXCEPTION_TAG_TYPE, TENURED));
+ result->set_index(index);
+ return result;
+}
+
+Handle<AsmWasmData> AsmWasmData::New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<FixedArray> export_wrappers, Handle<ByteArray> asm_js_offset_table,
+ Handle<HeapNumber> uses_bitset) {
+ const WasmModule* module = native_module->module();
+ size_t memory_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module) +
+ wasm::WasmCodeManager::EstimateNativeModuleNonCodeSize(module);
+ Handle<Managed<wasm::NativeModule>> managed_native_module =
+ Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
+ std::move(native_module));
+ Handle<AsmWasmData> result = Handle<AsmWasmData>::cast(
+ isolate->factory()->NewStruct(ASM_WASM_DATA_TYPE, TENURED));
+ result->set_managed_native_module(*managed_native_module);
+ result->set_export_wrappers(*export_wrappers);
+ result->set_asm_js_offset_table(*asm_js_offset_table);
+ result->set_uses_bitset(*uses_bitset);
+ return result;
+}
+
#undef TRACE
#undef TRACE_IFT
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 084b70489c..84aeb8972d 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -20,17 +20,16 @@
namespace v8 {
namespace internal {
namespace wasm {
+struct CompilationEnv;
class InterpretedFrame;
struct InterpretedFrameDeleter;
class NativeModule;
-struct ModuleEnv;
+class SignatureMap;
class WasmCode;
+struct WasmFeatures;
+class WasmInterpreter;
struct WasmModule;
-class SignatureMap;
class WireBytesRef;
-class WasmInterpreter;
-using FunctionSig = Signature<ValueType>;
-struct WasmFeatures;
} // namespace wasm
class BreakPoint;
@@ -38,6 +37,7 @@ class JSArrayBuffer;
class SeqOneByteString;
class WasmDebugInfo;
class WasmInstanceObject;
+class WasmModuleObject;
template <class CppType>
class Managed;
@@ -46,19 +46,24 @@ class Managed;
V8_INLINE bool has_##name(); \
DECL_ACCESSORS(name, type)
-// An entry in an indirect function table (IFT).
-// Each entry in the IFT has the following fields:
-// - instance = target instance
-// - sig_id = signature id of function
-// - target = entrypoint to wasm code for the function, or wasm-to-js wrapper
+// A helper for an entry in an indirect function table (IFT).
+// The underlying storage in the instance is used by generated code to
+// call functions indirectly at runtime.
+// Each entry has the following fields:
+// - object = target instance, if a WASM function, tuple if imported
+// - sig_id = signature id of function
+// - target = entrypoint to WASM code or import wrapper code
class IndirectFunctionTableEntry {
public:
inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
void clear();
- void set(int sig_id, WasmInstanceObject* instance, Address call_target);
+ void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
+
+ void CopyFrom(const IndirectFunctionTableEntry& that);
- WasmInstanceObject* instance();
+ Object object_ref();
int sig_id();
Address target();
@@ -67,32 +72,31 @@ class IndirectFunctionTableEntry {
int const index_;
};
-// An entry for an imported function.
-// (note this is not called a "table" since it is not dynamically indexed).
-// The imported function entries are used to call imported functions.
-// For each imported function there is an entry which is either:
-// - an imported JSReceiver, which has fields
-// - instance = importing instance
-// - receiver = JSReceiver, either a JS function or other callable
-// - target = pointer to wasm-to-js wrapper code entrypoint
-// - an imported wasm function from another instance, which has fields
-// - instance = target instance
-// - target = entrypoint for the function
+// A helper for an entry for an imported function, indexed statically.
+// The underlying storage in the instance is used by generated code to
+// call imported functions at runtime.
+// Each entry is either:
+// - WASM to JS, which has fields
+// - object = a Tuple2 of the importing instance and the callable
+// - target = entrypoint to import wrapper code
+// - WASM to WASM, which has fields
+// - object = target instance
+// - target = entrypoint for the function
class ImportedFunctionEntry {
public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
- // Initialize this entry as a {JSReceiver} call.
- void set_wasm_to_js(JSReceiver* callable,
- const wasm::WasmCode* wasm_to_js_wrapper);
+ // Initialize this entry as a WASM to JS call. This accepts the isolate as a
+ // parameter, since it must allocate a tuple.
+ void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
+ const wasm::WasmCode* wasm_to_js_wrapper);
// Initialize this entry as a WASM to WASM call.
- void set_wasm_to_wasm(WasmInstanceObject* target_instance,
- Address call_target);
+ void SetWasmToWasm(WasmInstanceObject target_instance, Address call_target);
- WasmInstanceObject* instance();
- JSReceiver* callable();
+ WasmInstanceObject instance();
+ JSReceiver callable();
+ Object object_ref();
Address target();
- bool is_js_receiver_entry();
private:
Handle<WasmInstanceObject> const instance_;
@@ -111,6 +115,7 @@ class WasmModuleObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
inline wasm::NativeModule* native_module() const;
+ inline std::shared_ptr<wasm::NativeModule> shared_native_module() const;
inline const wasm::WasmModule* module() const;
inline void reset_breakpoint_infos();
@@ -119,13 +124,13 @@ class WasmModuleObject : public JSObject {
DECL_VERIFIER(WasmModuleObject)
// Layout description.
-#define WASM_MODULE_OBJECT_FIELDS(V) \
- V(kNativeModuleOffset, kPointerSize) \
- V(kExportWrappersOffset, kPointerSize) \
- V(kScriptOffset, kPointerSize) \
- V(kWeakInstanceListOffset, kPointerSize) \
- V(kAsmJsOffsetTableOffset, kPointerSize) \
- V(kBreakPointInfosOffset, kPointerSize) \
+#define WASM_MODULE_OBJECT_FIELDS(V) \
+ V(kNativeModuleOffset, kTaggedSize) \
+ V(kExportWrappersOffset, kTaggedSize) \
+ V(kScriptOffset, kTaggedSize) \
+ V(kWeakInstanceListOffset, kTaggedSize) \
+ V(kAsmJsOffsetTableOffset, kTaggedSize) \
+ V(kBreakPointInfosOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -135,7 +140,7 @@ class WasmModuleObject : public JSObject {
// Creates a new {WasmModuleObject} with a new {NativeModule} underneath.
static Handle<WasmModuleObject> New(
Isolate* isolate, const wasm::WasmFeatures& enabled,
- std::shared_ptr<const wasm::WasmModule> module, wasm::ModuleEnv& env,
+ std::shared_ptr<const wasm::WasmModule> module,
OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table);
@@ -143,7 +148,11 @@ class WasmModuleObject : public JSObject {
// reference counted and might be shared between multiple Isolates.
static Handle<WasmModuleObject> New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
- Handle<Script> script);
+ Handle<Script> script, size_t code_size_estimate);
+ static Handle<WasmModuleObject> New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<Script> script, Handle<FixedArray> export_wrappers,
+ size_t code_size_estimate);
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
@@ -233,6 +242,8 @@ class WasmModuleObject : public JSObject {
static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
Handle<WasmModuleObject>,
int position);
+
+ OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject)
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -246,10 +257,10 @@ class WasmTableObject : public JSObject {
DECL_ACCESSORS(dispatch_tables, FixedArray)
// Layout description.
-#define WASM_TABLE_OBJECT_FIELDS(V) \
- V(kFunctionsOffset, kPointerSize) \
- V(kMaximumLengthOffset, kPointerSize) \
- V(kDispatchTablesOffset, kPointerSize) \
+#define WASM_TABLE_OBJECT_FIELDS(V) \
+ V(kFunctionsOffset, kTaggedSize) \
+ V(kMaximumLengthOffset, kTaggedSize) \
+ V(kDispatchTablesOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_TABLE_OBJECT_FIELDS)
@@ -259,23 +270,25 @@ class WasmTableObject : public JSObject {
void Grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
- int64_t maximum,
+ uint32_t maximum,
Handle<FixedArray>* js_functions);
static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance,
int table_index);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t index, Handle<JSFunction> function);
+ uint32_t index, Handle<JSFunction> function);
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int table_index, wasm::FunctionSig* sig,
- Handle<WasmInstanceObject> from_instance,
- Address call_target);
+ Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
+
+ OBJECT_CONSTRUCTORS(WasmTableObject, JSObject)
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -288,10 +301,10 @@ class WasmMemoryObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
// Layout description.
-#define WASM_MEMORY_OBJECT_FIELDS(V) \
- V(kArrayBufferOffset, kPointerSize) \
- V(kMaximumPagesOffset, kPointerSize) \
- V(kInstancesOffset, kPointerSize) \
+#define WASM_MEMORY_OBJECT_FIELDS(V) \
+ V(kArrayBufferOffset, kTaggedSize) \
+ V(kMaximumPagesOffset, kTaggedSize) \
+ V(kInstancesOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -301,10 +314,6 @@ class WasmMemoryObject : public JSObject {
// Add an instance to the internal (weak) list.
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
- // Remove an instance from the internal (weak) list.
- static void RemoveInstance(Handle<WasmMemoryObject> memory,
- Handle<WasmInstanceObject> object);
- uint32_t current_pages();
inline bool has_maximum_pages();
// Return whether the underlying backing store has guard regions large enough
@@ -312,9 +321,11 @@ class WasmMemoryObject : public JSObject {
bool has_full_guard_region(Isolate* isolate);
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
+
+ OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject)
};
// Representation of a WebAssembly.Global JavaScript-level object.
@@ -322,7 +333,8 @@ class WasmGlobalObject : public JSObject {
public:
DECL_CAST(WasmGlobalObject)
- DECL_ACCESSORS(array_buffer, JSArrayBuffer)
+ DECL_ACCESSORS(untagged_buffer, JSArrayBuffer)
+ DECL_ACCESSORS(tagged_buffer, FixedArray)
DECL_INT32_ACCESSORS(offset)
DECL_INT_ACCESSORS(flags)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
@@ -337,10 +349,11 @@ class WasmGlobalObject : public JSObject {
#undef WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS
// Layout description.
-#define WASM_GLOBAL_OBJECT_FIELDS(V) \
- V(kArrayBufferOffset, kPointerSize) \
- V(kOffsetOffset, kPointerSize) \
- V(kFlagsOffset, kPointerSize) \
+#define WASM_GLOBAL_OBJECT_FIELDS(V) \
+ V(kUntaggedBufferOffset, kTaggedSize) \
+ V(kTaggedBufferOffset, kTaggedSize) \
+ V(kOffsetOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -348,7 +361,8 @@ class WasmGlobalObject : public JSObject {
#undef WASM_GLOBAL_OBJECT_FIELDS
V8_EXPORT_PRIVATE static MaybeHandle<WasmGlobalObject> New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, wasm::ValueType type,
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_untagged_buffer,
+ MaybeHandle<FixedArray> maybe_tagged_buffer, wasm::ValueType type,
int32_t offset, bool is_mutable);
inline int type_size() const;
@@ -357,17 +371,21 @@ class WasmGlobalObject : public JSObject {
inline int64_t GetI64();
inline float GetF32();
inline double GetF64();
+ inline Handle<Object> GetAnyRef();
inline void SetI32(int32_t value);
inline void SetI64(int64_t value);
inline void SetF32(float value);
inline void SetF64(double value);
+ inline void SetAnyRef(Handle<Object> value);
private:
// This function returns the address of the global's data in the
// JSArrayBuffer. This buffer may be allocated on-heap, in which case it may
// not have a fixed address.
inline Address address() const;
+
+ OBJECT_CONSTRUCTORS(WasmGlobalObject, JSObject)
};
// Representation of a WebAssembly.Instance JavaScript-level object.
@@ -379,13 +397,13 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(exports_object, JSObject)
DECL_ACCESSORS(native_context, Context)
DECL_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject)
- DECL_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer)
+ DECL_OPTIONAL_ACCESSORS(untagged_globals_buffer, JSArrayBuffer)
+ DECL_OPTIONAL_ACCESSORS(tagged_globals_buffer, FixedArray)
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
- DECL_ACCESSORS(imported_function_instances, FixedArray)
- DECL_ACCESSORS(imported_function_callables, FixedArray)
- DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
+ DECL_ACCESSORS(imported_function_refs, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
DECL_ACCESSORS(undefined_value, Oddball)
@@ -394,7 +412,7 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
- DECL_PRIMITIVE_ACCESSORS(roots_array_address, Address)
+ DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(imported_function_targets, Address*)
@@ -404,50 +422,68 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
+ DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
+ DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(dropped_data_segments, byte*)
+ DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
+
+ V8_INLINE void clear_padding();
// Dispatched behavior.
DECL_PRINTER(WasmInstanceObject)
DECL_VERIFIER(WasmInstanceObject)
// Layout description.
-#define WASM_INSTANCE_OBJECT_FIELDS(V) \
- V(kModuleObjectOffset, kPointerSize) \
- V(kExportsObjectOffset, kPointerSize) \
- V(kNativeContextOffset, kPointerSize) \
- V(kMemoryObjectOffset, kPointerSize) \
- V(kGlobalsBufferOffset, kPointerSize) \
- V(kImportedMutableGlobalsBuffersOffset, kPointerSize) \
- V(kDebugInfoOffset, kPointerSize) \
- V(kTableObjectOffset, kPointerSize) \
- V(kImportedFunctionInstancesOffset, kPointerSize) \
- V(kImportedFunctionCallablesOffset, kPointerSize) \
- V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
- V(kManagedNativeAllocationsOffset, kPointerSize) \
- V(kExceptionsTableOffset, kPointerSize) \
- V(kUndefinedValueOffset, kPointerSize) \
- V(kNullValueOffset, kPointerSize) \
- V(kCEntryStubOffset, kPointerSize) \
- V(kFirstUntaggedOffset, 0) /* marker */ \
- V(kMemoryStartOffset, kPointerSize) /* untagged */ \
- V(kMemorySizeOffset, kSizetSize) /* untagged */ \
- V(kMemoryMaskOffset, kSizetSize) /* untagged */ \
- V(kRootsArrayAddressOffset, kPointerSize) /* untagged */ \
- V(kStackLimitAddressOffset, kPointerSize) /* untagged */ \
- V(kRealStackLimitAddressOffset, kPointerSize) /* untagged */ \
- V(kImportedFunctionTargetsOffset, kPointerSize) /* untagged */ \
- V(kGlobalsStartOffset, kPointerSize) /* untagged */ \
- V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
- V(kJumpTableStartOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
- V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
+#define WASM_INSTANCE_OBJECT_FIELDS(V) \
+ /* Tagged values. */ \
+ V(kModuleObjectOffset, kTaggedSize) \
+ V(kExportsObjectOffset, kTaggedSize) \
+ V(kNativeContextOffset, kTaggedSize) \
+ V(kMemoryObjectOffset, kTaggedSize) \
+ V(kUntaggedGlobalsBufferOffset, kTaggedSize) \
+ V(kTaggedGlobalsBufferOffset, kTaggedSize) \
+ V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
+ V(kDebugInfoOffset, kTaggedSize) \
+ V(kTableObjectOffset, kTaggedSize) \
+ V(kImportedFunctionRefsOffset, kTaggedSize) \
+ V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
+ V(kManagedNativeAllocationsOffset, kTaggedSize) \
+ V(kExceptionsTableOffset, kTaggedSize) \
+ V(kUndefinedValueOffset, kTaggedSize) \
+ V(kNullValueOffset, kTaggedSize) \
+ V(kCEntryStubOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data. */ \
+ V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
+ /* Optional padding to align system pointer size fields */ \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
+ V(kFirstSystemPointerFieldOffset, 0) \
+ V(kMemoryStartOffset, kSystemPointerSize) \
+ V(kMemorySizeOffset, kSizetSize) \
+ V(kMemoryMaskOffset, kSizetSize) \
+ V(kIsolateRootOffset, kSystemPointerSize) \
+ V(kStackLimitAddressOffset, kSystemPointerSize) \
+ V(kRealStackLimitAddressOffset, kSystemPointerSize) \
+ V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
+ V(kGlobalsStartOffset, kSystemPointerSize) \
+ V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
+ V(kJumpTableStartOffset, kSystemPointerSize) \
+ V(kDataSegmentStartsOffset, kSystemPointerSize) \
+ V(kDataSegmentSizesOffset, kSystemPointerSize) \
+ V(kDroppedDataSegmentsOffset, kSystemPointerSize) \
+ V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
+ /* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
WASM_INSTANCE_OBJECT_FIELDS)
#undef WASM_INSTANCE_OBJECT_FIELDS
+ STATIC_ASSERT(IsAligned(kFirstSystemPointerFieldOffset, kSystemPointerSize));
+ STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
+
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
static bool EnsureIndirectFunctionTableWithMinimumSize(
@@ -465,8 +501,22 @@ class WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
+ // Copies table entries. Returns {false} if the ranges are out-of-bounds.
+ static bool CopyTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t dst, uint32_t src,
+ uint32_t count) V8_WARN_UNUSED_RESULT;
+
// Iterates all fields in the object except the untagged fields.
class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject)
+
+ private:
+ static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
+ Handle<WasmModuleObject>);
+ static void InitElemSegmentArrays(Handle<WasmInstanceObject>,
+ Handle<WasmModuleObject>);
};
// Representation of WebAssembly.Exception JavaScript-level object.
@@ -478,9 +528,9 @@ class WasmExceptionObject : public JSObject {
DECL_ACCESSORS(exception_tag, HeapObject)
// Layout description.
-#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
- V(kSerializedSignatureOffset, kPointerSize) \
- V(kExceptionTagOffset, kPointerSize) \
+#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
+ V(kSerializedSignatureOffset, kTaggedSize) \
+ V(kExceptionTagOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -494,16 +544,17 @@ class WasmExceptionObject : public JSObject {
static Handle<WasmExceptionObject> New(Isolate* isolate,
const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag);
+
+ OBJECT_CONSTRUCTORS(WasmExceptionObject, JSObject)
};
// A WASM function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
- WasmInstanceObject* instance();
+ WasmInstanceObject instance();
V8_EXPORT_PRIVATE int function_index();
- V8_EXPORT_PRIVATE static WasmExportedFunction* cast(Object* object);
- static bool IsWasmExportedFunction(Object* object);
+ V8_EXPORT_PRIVATE static bool IsWasmExportedFunction(Object object);
static Handle<WasmExportedFunction> New(Isolate* isolate,
Handle<WasmInstanceObject> instance,
@@ -512,6 +563,11 @@ class WasmExportedFunction : public JSFunction {
Handle<Code> export_wrapper);
Address GetWasmCallTarget();
+
+ wasm::FunctionSig* sig();
+
+ DECL_CAST(WasmExportedFunction)
+ OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction)
};
// Information for a WasmExportedFunction which is referenced as the function
@@ -531,20 +587,23 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
-#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
- V(kWrapperCodeOffset, kPointerSize) \
- V(kInstanceOffset, kPointerSize) \
- V(kJumpTableOffsetOffset, kPointerSize) /* Smi */ \
- V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
+#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
+ V(kWrapperCodeOffset, kTaggedSize) \
+ V(kInstanceOffset, kTaggedSize) \
+ V(kJumpTableOffsetOffset, kTaggedSize) /* Smi */ \
+ V(kFunctionIndexOffset, kTaggedSize) /* Smi */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
WASM_EXPORTED_FUNCTION_DATA_FIELDS)
#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
+
+ OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct)
};
-class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
+class WasmDebugInfo : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object); // Foreign or undefined
DECL_ACCESSORS(interpreted_functions, FixedArray);
@@ -559,13 +618,13 @@ class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
DECL_VERIFIER(WasmDebugInfo)
// Layout description.
-#define WASM_DEBUG_INFO_FIELDS(V) \
- V(kInstanceOffset, kPointerSize) \
- V(kInterpreterHandleOffset, kPointerSize) \
- V(kInterpretedFunctionsOffset, kPointerSize) \
- V(kLocalsNamesOffset, kPointerSize) \
- V(kCWasmEntriesOffset, kPointerSize) \
- V(kCWasmEntryMapOffset, kPointerSize) \
+#define WASM_DEBUG_INFO_FIELDS(V) \
+ V(kInstanceOffset, kTaggedSize) \
+ V(kInterpreterHandleOffset, kTaggedSize) \
+ V(kInterpretedFunctionsOffset, kTaggedSize) \
+ V(kLocalsNamesOffset, kTaggedSize) \
+ V(kCWasmEntriesOffset, kTaggedSize) \
+ V(kCWasmEntryMapOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WASM_DEBUG_INFO_FIELDS)
@@ -636,6 +695,68 @@ class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
static Handle<JSFunction> GetCWasmEntry(Handle<WasmDebugInfo>,
wasm::FunctionSig*);
+
+ OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct)
+};
+
+// Tags provide an object identity for each exception defined in a wasm module
+// header. They are referenced by the following fields:
+// - {WasmExceptionObject::exception_tag} : The tag of the exception object.
+// - {WasmInstanceObject::exceptions_table}: List of tags used by an instance.
+class WasmExceptionTag : public Struct {
+ public:
+ static Handle<WasmExceptionTag> New(Isolate* isolate, int index);
+
+ // Note that this index is only useful for debugging purposes and it is not
+ // unique across modules. The GC however does not allow objects without at
+ // least one field, hence this also serves as a padding field for now.
+ DECL_INT_ACCESSORS(index);
+
+ DECL_CAST(WasmExceptionTag)
+ DECL_PRINTER(WasmExceptionTag)
+ DECL_VERIFIER(WasmExceptionTag)
+
+// Layout description.
+#define WASM_EXCEPTION_TAG_FIELDS(V) \
+ V(kIndexOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, WASM_EXCEPTION_TAG_FIELDS)
+#undef WASM_EXCEPTION_TAG_FIELDS
+
+ OBJECT_CONSTRUCTORS(WasmExceptionTag, Struct)
+};
+
+class AsmWasmData : public Struct {
+ public:
+ static Handle<AsmWasmData> New(
+ Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
+ Handle<FixedArray> export_wrappers, Handle<ByteArray> asm_js_offset_table,
+ Handle<HeapNumber> uses_bitset);
+
+ DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
+ DECL_ACCESSORS(export_wrappers, FixedArray)
+ DECL_ACCESSORS(asm_js_offset_table, ByteArray)
+ DECL_ACCESSORS(uses_bitset, HeapNumber)
+
+ DECL_CAST(AsmWasmData)
+ DECL_PRINTER(AsmWasmData)
+ DECL_VERIFIER(AsmWasmData)
+
+// Layout description.
+#define ASM_WASM_DATA_FIELDS(V) \
+ V(kManagedNativeModuleOffset, kTaggedSize) \
+ V(kExportWrappersOffset, kTaggedSize) \
+ V(kAsmJsOffsetTableOffset, kTaggedSize) \
+ V(kUsesBitsetOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, ASM_WASM_DATA_FIELDS)
+#undef ASM_WASM_DATA_FIELDS
+
+ OBJECT_CONSTRUCTORS(AsmWasmData, Struct)
};
#undef DECL_OPTIONAL_ACCESSORS
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 650cb629f6..c8dfcf50e6 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -110,12 +110,6 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- // TODO(kschimpf): Simplify after filling in other saturating operations.
- CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
-
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
@@ -151,8 +145,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(GetGlobal, "get_global")
CASE_OP(SetGlobal, "set_global")
CASE_ALL_OP(Const, "const")
- CASE_OP(MemorySize, "current_memory")
- CASE_OP(GrowMemory, "grow_memory")
+ CASE_OP(MemorySize, "memory.size")
+ CASE_OP(MemoryGrow, "memory.grow")
CASE_ALL_OP(LoadMem, "load")
CASE_SIGN_OP(INT, LoadMem8, "load8")
CASE_SIGN_OP(INT, LoadMem16, "load16")
@@ -164,12 +158,12 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(StoreMem32, "store32")
CASE_S128_OP(StoreMem, "store128")
- // Non-standard opcodes.
+ // Exception handling opcodes.
CASE_OP(Try, "try")
+ CASE_OP(Catch, "catch")
CASE_OP(Throw, "throw")
CASE_OP(Rethrow, "rethrow")
- CASE_OP(Catch, "catch")
- CASE_OP(CatchAll, "catch_all")
+ CASE_OP(BrOnExn, "br_on_exn")
// asm.js-only opcodes.
CASE_F64_OP(Acos, "acos")
@@ -198,6 +192,19 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_s/f64")
CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_u/f64")
+ // Numeric Opcodes.
+ CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
+ CASE_OP(MemoryInit, "memory.init")
+ CASE_OP(MemoryDrop, "memory.drop")
+ CASE_OP(MemoryCopy, "memory.copy")
+ CASE_OP(MemoryFill, "memory.fill")
+ CASE_OP(TableInit, "table.init")
+ CASE_OP(TableDrop, "table.drop")
+ CASE_OP(TableCopy, "table.copy")
+
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
@@ -256,6 +263,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
+ CASE_OP(AtomicWake, "atomic_wake")
+ CASE_INT_OP(AtomicWait, "atomic_wait")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic_add")
@@ -372,11 +381,18 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
return os;
}
-bool IsJSCompatibleSignature(const FunctionSig* sig) {
+bool IsJSCompatibleSignature(const FunctionSig* sig, bool has_bigint_feature) {
+ if (sig->return_count() > 1) {
+ return false;
+ }
for (auto type : sig->all()) {
- if (type == kWasmI64 || type == kWasmS128) return false;
+ if (!has_bigint_feature && type == kWasmI64) {
+ return false;
+ }
+
+ if (type == kWasmS128) return false;
}
- return sig->return_count() <= 1;
+ return true;
}
namespace {
@@ -459,20 +475,8 @@ constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
base::make_array<256>(GetNumericOpcodeSigIndex{});
-// Computes a direct pointer to a cached signature for a simple opcode.
-struct GetSimpleOpcodeSig {
- constexpr const FunctionSig* operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == opc ? &kSig_##sig:
- return FOREACH_SIMPLE_OPCODE(CASE) nullptr;
-#undef CASE
- }
-};
-
} // namespace
-const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs =
- base::make_array<256>(GetSimpleOpcodeSig{});
-
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
switch (opcode >> 8) {
case 0:
@@ -506,7 +510,7 @@ constexpr uint8_t StoreType::kStoreSizeLog2[];
constexpr ValueType StoreType::kValueType[];
constexpr MachineRepresentation StoreType::kMemRep[];
-int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
+MessageTemplate WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
case k##name: \
@@ -519,7 +523,7 @@ int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
}
const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
- return MessageTemplate::TemplateString(TrapReasonToMessageId(reason));
+ return MessageFormatter::TemplateString(TrapReasonToMessageId(reason));
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 238873228f..b4ed83474f 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -6,38 +6,35 @@
#define V8_WASM_WASM_OPCODES_H_
#include "src/globals.h"
+#include "src/message-template.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
-template <typename T>
-class Signature;
-
namespace wasm {
-using FunctionSig = Signature<ValueType>;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig);
+bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
// Control expressions and blocks.
-#define FOREACH_CONTROL_OPCODE(V) \
- V(Unreachable, 0x00, _) \
- V(Nop, 0x01, _) \
- V(Block, 0x02, _) \
- V(Loop, 0x03, _) \
- V(If, 0x004, _) \
- V(Else, 0x05, _) \
- V(Try, 0x06, _ /* eh_prototype */) \
- V(Catch, 0x07, _ /* eh_prototype */) \
- V(Throw, 0x08, _ /* eh_prototype */) \
- V(Rethrow, 0x09, _ /* eh_prototype */) \
- V(CatchAll, 0x0a, _ /* eh prototype */) \
- V(End, 0x0b, _) \
- V(Br, 0x0c, _) \
- V(BrIf, 0x0d, _) \
- V(BrTable, 0x0e, _) \
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Unreachable, 0x00, _) \
+ V(Nop, 0x01, _) \
+ V(Block, 0x02, _) \
+ V(Loop, 0x03, _) \
+ V(If, 0x04, _) \
+ V(Else, 0x05, _) \
+ V(Try, 0x06, _ /* eh_prototype */) \
+ V(Catch, 0x07, _ /* eh_prototype */) \
+ V(Throw, 0x08, _ /* eh_prototype */) \
+ V(Rethrow, 0x09, _ /* eh_prototype */) \
+ V(BrOnExn, 0x0a, _ /* eh prototype */) \
+ V(End, 0x0b, _) \
+ V(Br, 0x0c, _) \
+ V(BrIf, 0x0d, _) \
+ V(BrTable, 0x0e, _) \
V(Return, 0x0f, _)
// Constants, locals, globals, and calls.
@@ -89,7 +86,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
V(MemorySize, 0x3f, i_v) \
- V(GrowMemory, 0x40, i_i)
+ V(MemoryGrow, 0x40, i_i)
// Expressions with signatures.
#define FOREACH_SIMPLE_OPCODE(V) \
@@ -259,150 +256,150 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
V(I32AsmjsSConvertF64, 0xe5, i_d) \
V(I32AsmjsUConvertF64, 0xe6, i_d)
+#define FOREACH_SIMD_MEM_OPCODE(V) \
+ V(S128LoadMem, 0xfd00, s_i) \
+ V(S128StoreMem, 0xfd01, v_is)
+
+#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd03, s_ss)
+
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- V(F32x4Splat, 0xfd00, s_f) \
- V(F32x4Abs, 0xfd03, s_s) \
- V(F32x4Neg, 0xfd04, s_s) \
- V(F32x4RecipApprox, 0xfd06, s_s) \
- V(F32x4RecipSqrtApprox, 0xfd07, s_s) \
- V(F32x4Add, 0xfd08, s_ss) \
- V(F32x4AddHoriz, 0xfdb9, s_ss) \
- V(F32x4Sub, 0xfd09, s_ss) \
- V(F32x4Mul, 0xfd0a, s_ss) \
- V(F32x4Min, 0xfd0c, s_ss) \
- V(F32x4Max, 0xfd0d, s_ss) \
- V(F32x4Eq, 0xfd10, s_ss) \
- V(F32x4Ne, 0xfd11, s_ss) \
- V(F32x4Lt, 0xfd12, s_ss) \
- V(F32x4Le, 0xfd13, s_ss) \
- V(F32x4Gt, 0xfd14, s_ss) \
- V(F32x4Ge, 0xfd15, s_ss) \
- V(F32x4SConvertI32x4, 0xfd19, s_s) \
- V(F32x4UConvertI32x4, 0xfd1a, s_s) \
- V(I32x4Splat, 0xfd1b, s_i) \
- V(I32x4Neg, 0xfd1e, s_s) \
- V(I32x4Add, 0xfd1f, s_ss) \
- V(I32x4AddHoriz, 0xfdba, s_ss) \
- V(I32x4Sub, 0xfd20, s_ss) \
- V(I32x4Mul, 0xfd21, s_ss) \
- V(I32x4MinS, 0xfd22, s_ss) \
- V(I32x4MaxS, 0xfd23, s_ss) \
- V(I32x4Eq, 0xfd26, s_ss) \
- V(I32x4Ne, 0xfd27, s_ss) \
- V(I32x4LtS, 0xfd28, s_ss) \
- V(I32x4LeS, 0xfd29, s_ss) \
- V(I32x4GtS, 0xfd2a, s_ss) \
- V(I32x4GeS, 0xfd2b, s_ss) \
- V(I32x4SConvertF32x4, 0xfd2f, s_s) \
- V(I32x4UConvertF32x4, 0xfd37, s_s) \
- V(I32x4SConvertI16x8Low, 0xfd94, s_s) \
- V(I32x4SConvertI16x8High, 0xfd95, s_s) \
- V(I32x4UConvertI16x8Low, 0xfd96, s_s) \
- V(I32x4UConvertI16x8High, 0xfd97, s_s) \
- V(I32x4MinU, 0xfd30, s_ss) \
- V(I32x4MaxU, 0xfd31, s_ss) \
- V(I32x4LtU, 0xfd33, s_ss) \
- V(I32x4LeU, 0xfd34, s_ss) \
- V(I32x4GtU, 0xfd35, s_ss) \
- V(I32x4GeU, 0xfd36, s_ss) \
- V(I16x8Splat, 0xfd38, s_i) \
- V(I16x8Neg, 0xfd3b, s_s) \
- V(I16x8Add, 0xfd3c, s_ss) \
- V(I16x8AddSaturateS, 0xfd3d, s_ss) \
- V(I16x8AddHoriz, 0xfdbb, s_ss) \
- V(I16x8Sub, 0xfd3e, s_ss) \
- V(I16x8SubSaturateS, 0xfd3f, s_ss) \
- V(I16x8Mul, 0xfd40, s_ss) \
- V(I16x8MinS, 0xfd41, s_ss) \
- V(I16x8MaxS, 0xfd42, s_ss) \
- V(I16x8Eq, 0xfd45, s_ss) \
- V(I16x8Ne, 0xfd46, s_ss) \
- V(I16x8LtS, 0xfd47, s_ss) \
- V(I16x8LeS, 0xfd48, s_ss) \
- V(I16x8GtS, 0xfd49, s_ss) \
- V(I16x8GeS, 0xfd4a, s_ss) \
- V(I16x8AddSaturateU, 0xfd4e, s_ss) \
- V(I16x8SubSaturateU, 0xfd4f, s_ss) \
- V(I16x8MinU, 0xfd50, s_ss) \
- V(I16x8MaxU, 0xfd51, s_ss) \
- V(I16x8LtU, 0xfd53, s_ss) \
- V(I16x8LeU, 0xfd54, s_ss) \
- V(I16x8GtU, 0xfd55, s_ss) \
- V(I16x8GeU, 0xfd56, s_ss) \
- V(I16x8SConvertI32x4, 0xfd98, s_ss) \
- V(I16x8UConvertI32x4, 0xfd99, s_ss) \
- V(I16x8SConvertI8x16Low, 0xfd9a, s_s) \
- V(I16x8SConvertI8x16High, 0xfd9b, s_s) \
- V(I16x8UConvertI8x16Low, 0xfd9c, s_s) \
- V(I16x8UConvertI8x16High, 0xfd9d, s_s) \
- V(I8x16Splat, 0xfd57, s_i) \
- V(I8x16Neg, 0xfd5a, s_s) \
- V(I8x16Add, 0xfd5b, s_ss) \
- V(I8x16AddSaturateS, 0xfd5c, s_ss) \
- V(I8x16Sub, 0xfd5d, s_ss) \
- V(I8x16SubSaturateS, 0xfd5e, s_ss) \
- V(I8x16Mul, 0xfd5f, s_ss) \
- V(I8x16MinS, 0xfd60, s_ss) \
- V(I8x16MaxS, 0xfd61, s_ss) \
- V(I8x16Eq, 0xfd64, s_ss) \
- V(I8x16Ne, 0xfd65, s_ss) \
- V(I8x16LtS, 0xfd66, s_ss) \
- V(I8x16LeS, 0xfd67, s_ss) \
- V(I8x16GtS, 0xfd68, s_ss) \
- V(I8x16GeS, 0xfd69, s_ss) \
- V(I8x16AddSaturateU, 0xfd6d, s_ss) \
- V(I8x16SubSaturateU, 0xfd6e, s_ss) \
- V(I8x16MinU, 0xfd6f, s_ss) \
- V(I8x16MaxU, 0xfd70, s_ss) \
- V(I8x16LtU, 0xfd72, s_ss) \
- V(I8x16LeU, 0xfd73, s_ss) \
- V(I8x16GtU, 0xfd74, s_ss) \
- V(I8x16GeU, 0xfd75, s_ss) \
- V(I8x16SConvertI16x8, 0xfd9e, s_ss) \
- V(I8x16UConvertI16x8, 0xfd9f, s_ss) \
- V(S128And, 0xfd76, s_ss) \
- V(S128Or, 0xfd77, s_ss) \
- V(S128Xor, 0xfd78, s_ss) \
- V(S128Not, 0xfd79, s_s) \
- V(S128Select, 0xfd2c, s_sss) \
- V(S1x4AnyTrue, 0xfd84, i_s) \
- V(S1x4AllTrue, 0xfd85, i_s) \
- V(S1x8AnyTrue, 0xfd8a, i_s) \
- V(S1x8AllTrue, 0xfd8b, i_s) \
- V(S1x16AnyTrue, 0xfd90, i_s) \
- V(S1x16AllTrue, 0xfd91, i_s)
+ V(I8x16Splat, 0xfd04, s_i) \
+ V(I16x8Splat, 0xfd08, s_i) \
+ V(I32x4Splat, 0xfd0c, s_i) \
+ V(F32x4Splat, 0xfd12, s_f) \
+ V(I8x16Eq, 0xfd18, s_ss) \
+ V(I8x16Ne, 0xfd19, s_ss) \
+ V(I8x16LtS, 0xfd1a, s_ss) \
+ V(I8x16LtU, 0xfd1b, s_ss) \
+ V(I8x16GtS, 0xfd1c, s_ss) \
+ V(I8x16GtU, 0xfd1d, s_ss) \
+ V(I8x16LeS, 0xfd1e, s_ss) \
+ V(I8x16LeU, 0xfd1f, s_ss) \
+ V(I8x16GeS, 0xfd20, s_ss) \
+ V(I8x16GeU, 0xfd21, s_ss) \
+ V(I16x8Eq, 0xfd22, s_ss) \
+ V(I16x8Ne, 0xfd23, s_ss) \
+ V(I16x8LtS, 0xfd24, s_ss) \
+ V(I16x8LtU, 0xfd25, s_ss) \
+ V(I16x8GtS, 0xfd26, s_ss) \
+ V(I16x8GtU, 0xfd27, s_ss) \
+ V(I16x8LeS, 0xfd28, s_ss) \
+ V(I16x8LeU, 0xfd29, s_ss) \
+ V(I16x8GeS, 0xfd2a, s_ss) \
+ V(I16x8GeU, 0xfd2b, s_ss) \
+ V(I32x4Eq, 0xfd2c, s_ss) \
+ V(I32x4Ne, 0xfd2d, s_ss) \
+ V(I32x4LtS, 0xfd2e, s_ss) \
+ V(I32x4LtU, 0xfd2f, s_ss) \
+ V(I32x4GtS, 0xfd30, s_ss) \
+ V(I32x4GtU, 0xfd31, s_ss) \
+ V(I32x4LeS, 0xfd32, s_ss) \
+ V(I32x4LeU, 0xfd33, s_ss) \
+ V(I32x4GeS, 0xfd34, s_ss) \
+ V(I32x4GeU, 0xfd35, s_ss) \
+ V(F32x4Eq, 0xfd40, s_ss) \
+ V(F32x4Ne, 0xfd41, s_ss) \
+ V(F32x4Lt, 0xfd42, s_ss) \
+ V(F32x4Gt, 0xfd43, s_ss) \
+ V(F32x4Le, 0xfd44, s_ss) \
+ V(F32x4Ge, 0xfd45, s_ss) \
+ V(S128Not, 0xfd4c, s_s) \
+ V(S128And, 0xfd4d, s_ss) \
+ V(S128Or, 0xfd4e, s_ss) \
+ V(S128Xor, 0xfd4f, s_ss) \
+ V(S128Select, 0xfd50, s_sss) \
+ V(I8x16Neg, 0xfd51, s_s) \
+ V(S1x16AnyTrue, 0xfd52, i_s) \
+ V(S1x16AllTrue, 0xfd53, i_s) \
+ V(I8x16Add, 0xfd57, s_ss) \
+ V(I8x16AddSaturateS, 0xfd58, s_ss) \
+ V(I8x16AddSaturateU, 0xfd59, s_ss) \
+ V(I8x16Sub, 0xfd5a, s_ss) \
+ V(I8x16SubSaturateS, 0xfd5b, s_ss) \
+ V(I8x16SubSaturateU, 0xfd5c, s_ss) \
+ V(I8x16Mul, 0xfd5d, s_ss) \
+ V(I8x16MinS, 0xfd5e, s_ss) \
+ V(I8x16MinU, 0xfd5f, s_ss) \
+ V(I8x16MaxS, 0xfd60, s_ss) \
+ V(I8x16MaxU, 0xfd61, s_ss) \
+ V(I16x8Neg, 0xfd62, s_s) \
+ V(S1x8AnyTrue, 0xfd63, i_s) \
+ V(S1x8AllTrue, 0xfd64, i_s) \
+ V(I16x8Add, 0xfd68, s_ss) \
+ V(I16x8AddSaturateS, 0xfd69, s_ss) \
+ V(I16x8AddSaturateU, 0xfd6a, s_ss) \
+ V(I16x8Sub, 0xfd6b, s_ss) \
+ V(I16x8SubSaturateS, 0xfd6c, s_ss) \
+ V(I16x8SubSaturateU, 0xfd6d, s_ss) \
+ V(I16x8Mul, 0xfd6e, s_ss) \
+ V(I16x8MinS, 0xfd6f, s_ss) \
+ V(I16x8MinU, 0xfd70, s_ss) \
+ V(I16x8MaxS, 0xfd71, s_ss) \
+ V(I16x8MaxU, 0xfd72, s_ss) \
+ V(I32x4Neg, 0xfd73, s_s) \
+ V(S1x4AnyTrue, 0xfd74, i_s) \
+ V(S1x4AllTrue, 0xfd75, i_s) \
+ V(I32x4Add, 0xfd79, s_ss) \
+ V(I32x4Sub, 0xfd7c, s_ss) \
+ V(I32x4Mul, 0xfd7f, s_ss) \
+ V(I32x4MinS, 0xfd80, s_ss) \
+ V(I32x4MinU, 0xfd81, s_ss) \
+ V(I32x4MaxS, 0xfd82, s_ss) \
+ V(I32x4MaxU, 0xfd83, s_ss) \
+ V(F32x4Abs, 0xfd95, s_s) \
+ V(F32x4Neg, 0xfd96, s_s) \
+ V(F32x4RecipApprox, 0xfd98, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfd99, s_s) \
+ V(F32x4Add, 0xfd9a, s_ss) \
+ V(F32x4Sub, 0xfd9b, s_ss) \
+ V(F32x4Mul, 0xfd9c, s_ss) \
+ V(F32x4Min, 0xfd9e, s_ss) \
+ V(F32x4Max, 0xfd9f, s_ss) \
+ V(I32x4SConvertF32x4, 0xfdab, s_s) \
+ V(I32x4UConvertF32x4, 0xfdac, s_s) \
+ V(F32x4SConvertI32x4, 0xfdaf, s_s) \
+ V(F32x4UConvertI32x4, 0xfdb0, s_s) \
+ V(I8x16SConvertI16x8, 0xfdb1, s_ss) \
+ V(I8x16UConvertI16x8, 0xfdb2, s_ss) \
+ V(I16x8SConvertI32x4, 0xfdb3, s_ss) \
+ V(I16x8UConvertI32x4, 0xfdb4, s_ss) \
+ V(I16x8SConvertI8x16Low, 0xfdb5, s_s) \
+ V(I16x8SConvertI8x16High, 0xfdb6, s_s) \
+ V(I16x8UConvertI8x16Low, 0xfdb7, s_s) \
+ V(I16x8UConvertI8x16High, 0xfdb8, s_s) \
+ V(I32x4SConvertI16x8Low, 0xfdb9, s_s) \
+ V(I32x4SConvertI16x8High, 0xfdba, s_s) \
+ V(I32x4UConvertI16x8Low, 0xfdbb, s_s) \
+ V(I32x4UConvertI16x8High, 0xfdbc, s_s) \
+ V(I16x8AddHoriz, 0xfdbd, s_ss) \
+ V(I32x4AddHoriz, 0xfdbe, s_ss) \
+ V(F32x4AddHoriz, 0xfdbf, s_ss)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
- V(F32x4ExtractLane, 0xfd01, _) \
- V(I32x4ExtractLane, 0xfd1c, _) \
- V(I32x4Shl, 0xfd24, _) \
- V(I32x4ShrS, 0xfd25, _) \
- V(I32x4ShrU, 0xfd32, _) \
- V(I16x8ExtractLane, 0xfd39, _) \
- V(I16x8Shl, 0xfd43, _) \
- V(I16x8ShrS, 0xfd44, _) \
- V(I16x8ShrU, 0xfd52, _) \
- V(I8x16ExtractLane, 0xfd58, _) \
- V(I8x16Shl, 0xfd62, _) \
- V(I8x16ShrS, 0xfd63, _) \
- V(I8x16ShrU, 0xfd71, _)
+ V(I8x16ExtractLane, 0xfd05, _) \
+ V(I16x8ExtractLane, 0xfd09, _) \
+ V(I32x4ExtractLane, 0xfd0d, _) \
+ V(F32x4ExtractLane, 0xfd13, _) \
+ V(I8x16Shl, 0xfd54, _) \
+ V(I8x16ShrS, 0xfd55, _) \
+ V(I8x16ShrU, 0xfd56, _) \
+ V(I16x8Shl, 0xfd65, _) \
+ V(I16x8ShrS, 0xfd66, _) \
+ V(I16x8ShrU, 0xfd67, _) \
+ V(I32x4Shl, 0xfd76, _) \
+ V(I32x4ShrS, 0xfd77, _) \
+ V(I32x4ShrU, 0xfd78, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
- V(F32x4ReplaceLane, 0xfd02, _) \
- V(I32x4ReplaceLane, 0xfd1d, _) \
- V(I16x8ReplaceLane, 0xfd3a, _) \
- V(I8x16ReplaceLane, 0xfd59, _)
+ V(I8x16ReplaceLane, 0xfd07, _) \
+ V(I16x8ReplaceLane, 0xfd0b, _) \
+ V(I32x4ReplaceLane, 0xfd0e, _) \
+ V(F32x4ReplaceLane, 0xfd14, _)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
-#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd6b, s_ss)
-
-#define FOREACH_SIMD_MEM_OPCODE(V) \
- V(S128LoadMem, 0xfd80, s_i) \
- V(S128StoreMem, 0xfd81, v_is)
-
#define FOREACH_NUMERIC_OPCODE(V) \
V(I32SConvertSatF32, 0xfc00, i_f) \
V(I32UConvertSatF32, 0xfc01, i_f) \
@@ -411,9 +408,19 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
V(I64SConvertSatF32, 0xfc04, l_f) \
V(I64UConvertSatF32, 0xfc05, l_f) \
V(I64SConvertSatF64, 0xfc06, l_d) \
- V(I64UConvertSatF64, 0xfc07, l_d)
+ V(I64UConvertSatF64, 0xfc07, l_d) \
+ V(MemoryInit, 0xfc08, v_iii) \
+ V(MemoryDrop, 0xfc09, v_v) \
+ V(MemoryCopy, 0xfc0a, v_iii) \
+ V(MemoryFill, 0xfc0b, v_iii) \
+ V(TableInit, 0xfc0c, v_iii) \
+ V(TableDrop, 0xfc0d, v_v) \
+ V(TableCopy, 0xfc0e, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
+ V(AtomicWake, 0xfe00, i_ii) \
+ V(I32AtomicWait, 0xfe01, i_iil) \
+ V(I64AtomicWait, 0xfe02, i_ill) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
@@ -496,41 +503,45 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
-#define FOREACH_SIGNATURE(V) \
- FOREACH_SIMD_SIGNATURE(V) \
- V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
- V(i_i, kWasmI32, kWasmI32) \
- V(i_v, kWasmI32) \
- V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
- V(i_f, kWasmI32, kWasmF32) \
- V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
- V(i_d, kWasmI32, kWasmF64) \
- V(i_l, kWasmI32, kWasmI64) \
- V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
- V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
- V(l_l, kWasmI64, kWasmI64) \
- V(l_i, kWasmI64, kWasmI32) \
- V(l_f, kWasmI64, kWasmF32) \
- V(l_d, kWasmI64, kWasmF64) \
- V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
- V(f_f, kWasmF32, kWasmF32) \
- V(f_d, kWasmF32, kWasmF64) \
- V(f_i, kWasmF32, kWasmI32) \
- V(f_l, kWasmF32, kWasmI64) \
- V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
- V(d_d, kWasmF64, kWasmF64) \
- V(d_f, kWasmF64, kWasmF32) \
- V(d_i, kWasmF64, kWasmI32) \
- V(d_l, kWasmF64, kWasmI64) \
- V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
- V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
- V(d_id, kWasmF64, kWasmI32, kWasmF64) \
- V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
- V(f_if, kWasmF32, kWasmI32, kWasmF32) \
- V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
- V(l_il, kWasmI64, kWasmI32, kWasmI64) \
- V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
- V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
+#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
+ V(v_v, kWasmStmt) \
+ V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_i, kWasmI32, kWasmI32) \
+ V(i_v, kWasmI32) \
+ V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+ V(i_f, kWasmI32, kWasmF32) \
+ V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+ V(i_d, kWasmI32, kWasmF64) \
+ V(i_l, kWasmI32, kWasmI64) \
+ V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+ V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+ V(l_l, kWasmI64, kWasmI64) \
+ V(l_i, kWasmI64, kWasmI32) \
+ V(l_f, kWasmI64, kWasmF32) \
+ V(l_d, kWasmI64, kWasmF64) \
+ V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+ V(f_f, kWasmF32, kWasmF32) \
+ V(f_d, kWasmF32, kWasmF64) \
+ V(f_i, kWasmF32, kWasmI32) \
+ V(f_l, kWasmF32, kWasmI64) \
+ V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+ V(d_d, kWasmF64, kWasmF64) \
+ V(d_f, kWasmF64, kWasmF32) \
+ V(d_i, kWasmF64, kWasmI32) \
+ V(d_l, kWasmF64, kWasmI64) \
+ V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
+ V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
+ V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+ V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
+ V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+ V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
+ V(l_il, kWasmI64, kWasmI32, kWasmI64) \
+ V(v_iii, kWasmStmt, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
+ V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
+ V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
+ V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef)
#define FOREACH_SIMD_SIGNATURE(V) \
@@ -565,8 +576,6 @@ enum TrapReason {
#undef DECLARE_ENUM
};
-extern const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs;
-
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
@@ -581,7 +590,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
- static int TrapReasonToMessageId(TrapReason reason);
+ static MessageTemplate TrapReasonToMessageId(TrapReason reason);
static const char* TrapReasonMessage(TrapReason reason);
};
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 1bcad13030..80b7b4a6ad 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -49,18 +49,11 @@ void PrintFToString(std::string& str, size_t str_offset, const char* format,
} // namespace
-void ResultBase::error(uint32_t offset, std::string error_msg) {
- // The error message must not be empty, otherwise Result::failed() will be
- // false.
- DCHECK(!error_msg.empty());
- error_offset_ = offset;
- error_msg_ = std::move(error_msg);
-}
-
-void ResultBase::verror(const char* format, va_list args) {
- VPrintFToString(error_msg_, 0, format, args);
- // Assign default message such that ok() and failed() work.
- if (error_msg_.empty() == 0) error_msg_.assign("Error");
+// static
+std::string WasmError::FormatError(const char* format, va_list args) {
+ std::string result;
+ VPrintFToString(result, 0, format, args);
+ return result;
}
void ErrorThrower::Format(ErrorType type, const char* format, va_list args) {
@@ -133,9 +126,9 @@ Handle<Object> ErrorThrower::Reify() {
constructor = isolate_->wasm_runtime_error_function();
break;
}
- Vector<const char> msg_vec(error_msg_.data(), error_msg_.size());
- Handle<String> message =
- isolate_->factory()->NewStringFromUtf8(msg_vec).ToHandleChecked();
+ Handle<String> message = isolate_->factory()
+ ->NewStringFromUtf8(VectorOf(error_msg_))
+ .ToHandleChecked();
Reset();
return isolate_->factory()->NewError(constructor, message);
}
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 694a8b7f76..824e838ae2 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -12,84 +12,98 @@
#include "src/utils.h"
#include "src/globals.h"
-#include "src/handles.h"
namespace v8 {
namespace internal {
class Isolate;
+template <typename T>
+class Handle;
namespace wasm {
-// Base class for Result<T>.
-class V8_EXPORT_PRIVATE ResultBase {
- protected:
- ResultBase() = default;
-
- ResultBase& operator=(ResultBase&& other) V8_NOEXCEPT = default;
-
+class V8_EXPORT_PRIVATE WasmError {
public:
- ResultBase(ResultBase&& other) V8_NOEXCEPT
- : error_offset_(other.error_offset_),
- error_msg_(std::move(other.error_msg_)) {}
+ WasmError() = default;
- void error(uint32_t offset, std::string error_msg);
+ WasmError(uint32_t offset, std::string message)
+ : offset_(offset), message_(std::move(message)) {
+ // The error message must not be empty, otherwise {empty()} would be true.
+ DCHECK(!message_.empty());
+ }
- void PRINTF_FORMAT(2, 3) error(const char* format, ...) {
+ PRINTF_FORMAT(3, 4)
+ WasmError(uint32_t offset, const char* format, ...) : offset_(offset) {
va_list args;
va_start(args, format);
- verror(format, args);
+ message_ = FormatError(format, args);
va_end(args);
+ // The error message must not be empty, otherwise {empty()} would be true.
+ DCHECK(!message_.empty());
}
- void PRINTF_FORMAT(2, 0) verror(const char* format, va_list args);
-
- void MoveErrorFrom(ResultBase& that) {
- error_offset_ = that.error_offset_;
- // Use {swap()} + {clear()} instead of move assign, as {that} might still
- // be used afterwards.
- error_msg_.swap(that.error_msg_);
- that.error_msg_.clear();
- }
+ bool empty() const { return message_.empty(); }
+ bool has_error() const { return !message_.empty(); }
- bool ok() const { return error_msg_.empty(); }
- bool failed() const { return !ok(); }
+ uint32_t offset() const { return offset_; }
+ const std::string& message() const& { return message_; }
+ std::string&& message() && { return std::move(message_); }
- uint32_t error_offset() const { return error_offset_; }
- const std::string& error_msg() const { return error_msg_; }
+ protected:
+ static std::string FormatError(const char* format, va_list args);
private:
- uint32_t error_offset_ = 0;
- std::string error_msg_;
+ uint32_t offset_ = 0;
+ std::string message_;
};
-// The overall result of decoding a function or a module.
+// Either a result of type T, or a WasmError.
template <typename T>
-class Result : public ResultBase {
+class Result {
public:
Result() = default;
template <typename S>
- explicit Result(S&& value) : val(std::forward<S>(value)) {}
+ explicit Result(S&& value) : value_(std::forward<S>(value)) {}
template <typename S>
- Result(Result<S>&& other) V8_NOEXCEPT : ResultBase(std::move(other)),
- val(std::move(other.val)) {}
+ Result(Result<S>&& other) V8_NOEXCEPT : value_(std::move(other.value_)),
+ error_(std::move(other.error_)) {}
- Result& operator=(Result&& other) V8_NOEXCEPT = default;
+ explicit Result(WasmError error) : error_(std::move(error)) {}
- static Result<T> PRINTF_FORMAT(1, 2) Error(const char* format, ...) {
- va_list args;
- va_start(args, format);
- Result<T> result;
- result.verror(format, args);
- va_end(args);
- return result;
+ template <typename S>
+ Result& operator=(Result<S>&& other) V8_NOEXCEPT {
+ value_ = std::move(other.value_);
+ error_ = std::move(other.error_);
+ return *this;
}
- T val = T{};
+ bool ok() const { return error_.empty(); }
+ bool failed() const { return error_.has_error(); }
+ const WasmError& error() const& { return error_; }
+ WasmError&& error() && { return std::move(error_); }
+
+ // Accessor for the value. Returns const reference if {this} is l-value or
+ // const, and returns r-value reference if {this} is r-value. This allows to
+ // extract non-copyable values like {std::unique_ptr} by using
+ // {std::move(result).value()}.
+ const T& value() const & {
+ DCHECK(ok());
+ return value_;
+ }
+ T&& value() && {
+ DCHECK(ok());
+ return std::move(value_);
+ }
private:
+ template <typename S>
+ friend class Result;
+
+ T value_ = T{};
+ WasmError error_;
+
DISALLOW_COPY_AND_ASSIGN(Result);
};
@@ -108,11 +122,15 @@ class V8_EXPORT_PRIVATE ErrorThrower {
PRINTF_FORMAT(2, 3) void LinkError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
- template <typename T>
- void CompileFailed(const char* error, Result<T>& result) {
- DCHECK(result.failed());
- CompileError("%s: %s @+%u", error, result.error_msg().c_str(),
- result.error_offset());
+ void CompileFailed(const char* context, const WasmError& error) {
+ DCHECK(error.has_error());
+ CompileError("%s: %s @+%u", context, error.message().c_str(),
+ error.offset());
+ }
+
+ void CompileFailed(const WasmError& error) {
+ DCHECK(error.has_error());
+ CompileError("%s @+%u", error.message().c_str(), error.offset());
}
// Create and return exception object.
@@ -149,12 +167,17 @@ class V8_EXPORT_PRIVATE ErrorThrower {
ErrorType error_type_ = kNone;
std::string error_msg_;
- DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
DISALLOW_NEW_AND_DELETE();
+ DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
+// Use {nullptr_t} as data value to indicate that this only stores the error,
+// but no result value (the only valid value is {nullptr}).
+// [Storing {void} would require template specialization.]
+using VoidResult = Result<std::nullptr_t>;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index b676a5b61b..a167b81cbd 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -8,6 +8,7 @@
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/ostreams.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
#include "src/utils.h"
@@ -119,9 +120,8 @@ class Reader {
constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-void WriteVersion(Isolate* isolate, Writer* writer) {
- writer->Write(SerializedData::ComputeMagicNumber(
- isolate->heap()->external_reference_table()));
+void WriteVersion(Writer* writer) {
+ writer->Write(SerializedData::kMagicNumber);
writer->Write(Version::Hash());
writer->Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer->Write(FlagList::Hash());
@@ -134,6 +134,7 @@ void WriteVersion(Isolate* isolate, Writer* writer) {
// Other platforms simply require accessing the target address.
void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ DCHECK(rinfo->HasTargetAddressAddress());
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
@@ -191,6 +192,8 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
sizeof(size_t) + // offset of handler table
+ sizeof(size_t) + // offset of code comments
+ sizeof(size_t) + // unpadded binary size
sizeof(uint32_t) + // stack slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
@@ -198,13 +201,68 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // protected instructions size
sizeof(WasmCode::Tier); // tier
+// A List of all isolate-independent external references. This is used to create
+// a tag from the Address of an external reference and vice versa.
+class ExternalReferenceList {
+ public:
+ uint32_t tag_from_address(Address ext_ref_address) const {
+ auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
+ return external_reference_by_tag_[tag] < searched_addr;
+ };
+ auto it = std::lower_bound(std::begin(tags_ordered_by_address_),
+ std::end(tags_ordered_by_address_),
+ ext_ref_address, tag_addr_less_than);
+ DCHECK_NE(std::end(tags_ordered_by_address_), it);
+ uint32_t tag = *it;
+ DCHECK_EQ(address_from_tag(tag), ext_ref_address);
+ return tag;
+ }
+
+ Address address_from_tag(uint32_t tag) const {
+ DCHECK_GT(kNumExternalReferences, tag);
+ return external_reference_by_tag_[tag];
+ }
+
+ static const ExternalReferenceList& Get() {
+ static ExternalReferenceList list; // Lazily initialized.
+ return list;
+ }
+
+ private:
+ // Private constructor. There will only be a single instance of this object.
+ ExternalReferenceList() {
+ for (uint32_t i = 0; i < kNumExternalReferences; ++i) {
+ tags_ordered_by_address_[i] = i;
+ }
+ auto addr_by_tag_less_than = [this](uint32_t a, uint32_t b) {
+ return external_reference_by_tag_[a] < external_reference_by_tag_[b];
+ };
+ std::sort(std::begin(tags_ordered_by_address_),
+ std::end(tags_ordered_by_address_), addr_by_tag_less_than);
+ }
+
+#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
+ static constexpr uint32_t kNumExternalReferences =
+ EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+#undef COUNT_EXTERNAL_REFERENCE
+
+#define EXT_REF_ADDR(name, desc) ExternalReference::name().address(),
+ Address external_reference_by_tag_[kNumExternalReferences] = {
+ EXTERNAL_REFERENCE_LIST(EXT_REF_ADDR)};
+#undef EXT_REF_ADDR
+ uint32_t tags_ordered_by_address_[kNumExternalReferences];
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
+};
+
+static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
+ "static destructors not allowed");
+
} // namespace
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
NativeModuleSerializer() = delete;
- NativeModuleSerializer(Isolate*, const NativeModule*,
- Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
size_t Measure() const;
bool Write(Writer* writer);
@@ -214,26 +272,19 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
void WriteHeader(Writer* writer);
void WriteCode(const WasmCode*, Writer* writer);
- Isolate* const isolate_;
const NativeModule* const native_module_;
Vector<WasmCode* const> code_table_;
bool write_called_;
// Reverse lookup tables for embedded addresses.
std::map<Address, uint32_t> wasm_stub_targets_lookup_;
- std::map<Address, uint32_t> reference_table_lookup_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(
- Isolate* isolate, const NativeModule* module,
- Vector<WasmCode* const> code_table)
- : isolate_(isolate),
- native_module_(module),
- code_table_(code_table),
- write_called_(false) {
- DCHECK_NOT_NULL(isolate_);
+ const NativeModule* module, Vector<WasmCode* const> code_table)
+ : native_module_(module), code_table_(code_table), write_called_(false) {
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
@@ -243,11 +294,6 @@ NativeModuleSerializer::NativeModuleSerializer(
->instruction_start();
wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
}
- ExternalReferenceTable* table = isolate_->heap()->external_reference_table();
- for (uint32_t i = 0; i < table->size(); ++i) {
- Address addr = table->address(i);
- reference_table_lookup_.insert(std::make_pair(addr, i));
- }
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
@@ -268,6 +314,9 @@ size_t NativeModuleSerializer::Measure() const {
}
void NativeModuleSerializer::WriteHeader(Writer* writer) {
+ // TODO(eholk): We need to properly preserve the flag whether the trap
+ // handler was used or not when serializing.
+
writer->Write(native_module_->num_functions());
writer->Write(native_module_->num_imported_functions());
}
@@ -283,6 +332,8 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
writer->Write(code->handler_table_offset());
+ writer->Write(code->code_comments_offset());
+ writer->Write(code->unpadded_binary_size());
writer->Write(code->stack_slots());
writer->Write(code->instructions().size());
writer->Write(code->reloc_info().size());
@@ -340,10 +391,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
} break;
case RelocInfo::EXTERNAL_REFERENCE: {
Address orig_target = orig_iter.rinfo()->target_external_reference();
- auto ref_iter = reference_table_lookup_.find(orig_target);
- DCHECK(ref_iter != reference_table_lookup_.end());
- uint32_t tag = ref_iter->second;
- SetWasmCalleeTag(iter.rinfo(), tag);
+ uint32_t ext_ref_tag =
+ ExternalReferenceList::Get().tag_from_address(orig_target);
+ SetWasmCalleeTag(iter.rinfo(), ext_ref_tag);
} break;
case RelocInfo::INTERNAL_REFERENCE:
case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
@@ -374,25 +424,22 @@ bool NativeModuleSerializer::Write(Writer* writer) {
return true;
}
-WasmSerializer::WasmSerializer(Isolate* isolate, NativeModule* native_module)
- : isolate_(isolate),
- native_module_(native_module),
+WasmSerializer::WasmSerializer(NativeModule* native_module)
+ : native_module_(native_module),
code_table_(native_module->SnapshotCodeTable()) {}
size_t WasmSerializer::GetSerializedNativeModuleSize() const {
- Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size());
- NativeModuleSerializer serializer(isolate_, native_module_, code_table);
+ NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
return kVersionSize + serializer.Measure();
}
bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
- Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size());
- NativeModuleSerializer serializer(isolate_, native_module_, code_table);
+ NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
size_t measured_size = kVersionSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
Writer writer(buffer);
- WriteVersion(isolate_, &writer);
+ WriteVersion(&writer);
if (!serializer.Write(&writer)) return false;
DCHECK_EQ(measured_size, writer.bytes_written());
@@ -402,7 +449,7 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
NativeModuleDeserializer() = delete;
- NativeModuleDeserializer(Isolate*, NativeModule*);
+ explicit NativeModuleDeserializer(NativeModule*);
bool Read(Reader* reader);
@@ -410,16 +457,14 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
bool ReadHeader(Reader* reader);
bool ReadCode(uint32_t fn_index, Reader* reader);
- Isolate* const isolate_;
NativeModule* const native_module_;
bool read_called_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
-NativeModuleDeserializer::NativeModuleDeserializer(Isolate* isolate,
- NativeModule* native_module)
- : isolate_(isolate), native_module_(native_module), read_called_(false) {}
+NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
+ : native_module_(native_module), read_called_(false) {}
bool NativeModuleDeserializer::Read(Reader* reader) {
DCHECK(!read_called_);
@@ -447,6 +492,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
+ size_t code_comment_offset = reader->Read<size_t>();
+ size_t unpadded_binary_size = reader->Read<size_t>();
uint32_t stack_slot_count = reader->Read<uint32_t>();
size_t code_size = reader->Read<size_t>();
size_t reloc_size = reader->Read<size_t>();
@@ -468,9 +515,9 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
WasmCode* code = native_module_->AddDeserializedCode(
fn_index, code_buffer, stack_slot_count, safepoint_table_offset,
- handler_table_offset, constant_pool_offset,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos), tier);
+ handler_table_offset, constant_pool_offset, code_comment_offset,
+ unpadded_binary_size, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_pos), tier);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -501,8 +548,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
}
case RelocInfo::EXTERNAL_REFERENCE: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address address =
- isolate_->heap()->external_reference_table()->address(tag);
+ Address address = ExternalReferenceList::Get().address_from_tag(tag);
iter.rinfo()->set_target_external_reference(address, SKIP_ICACHE_FLUSH);
break;
}
@@ -519,7 +565,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
}
}
- if (FLAG_print_code || FLAG_print_wasm_code) code->Print();
+ code->MaybePrint();
code->Validate();
// Finally, flush the icache for that code.
@@ -529,60 +575,50 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
return true;
}
-bool IsSupportedVersion(Isolate* isolate, Vector<const byte> version) {
+bool IsSupportedVersion(Vector<const byte> version) {
if (version.size() < kVersionSize) return false;
byte current_version[kVersionSize];
Writer writer({current_version, kVersionSize});
- WriteVersion(isolate, &writer);
+ WriteVersion(&writer);
return memcmp(version.start(), current_version, kVersionSize) == 0;
}
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
- Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
- if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- return {};
- }
- if (!IsSupportedVersion(isolate, data)) {
- return {};
- }
+ Isolate* isolate, Vector<const byte> data,
+ Vector<const byte> wire_bytes_vec) {
+ if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) return {};
+ if (!IsSupportedVersion(data)) return {};
+
+ ModuleWireBytes wire_bytes(wire_bytes_vec);
// TODO(titzer): module features should be part of the serialization format.
WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
ModuleResult decode_result = DecodeWasmModule(
enabled_features, wire_bytes.start(), wire_bytes.end(), false,
i::wasm::kWasmOrigin, isolate->counters(), isolate->allocator());
- if (!decode_result.ok()) return {};
- CHECK_NOT_NULL(decode_result.val);
- WasmModule* module = decode_result.val.get();
+ if (decode_result.failed()) return {};
+ CHECK_NOT_NULL(decode_result.value());
+ WasmModule* module = decode_result.value().get();
Handle<Script> script =
CreateWasmScript(isolate, wire_bytes, module->source_map_url);
- // TODO(eholk): We need to properly preserve the flag whether the trap
- // handler was used or not when serializing.
- UseTrapHandler use_trap_handler =
- trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler;
- ModuleEnv env(module, use_trap_handler,
- RuntimeExceptionSupport::kRuntimeExceptionSupport);
-
- OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes);
+ OwnedVector<uint8_t> wire_bytes_copy =
+ OwnedVector<uint8_t>::Of(wire_bytes_vec);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, enabled_features, std::move(decode_result.val), env,
+ isolate, enabled_features, std::move(decode_result).value(),
std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
}
- NativeModuleDeserializer deserializer(isolate, native_module);
+ NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize);
if (!deserializer.Read(&reader)) return {};
- // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
- // requires unlocking the code space here. This should eventually be moved
- // into the allocator.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- CompileJsToWasmWrappers(isolate, module_object);
+ CompileJsToWasmWrappers(isolate, native_module->module(),
+ handle(module_object->export_wrappers(), isolate));
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 93f79a59de..eaa1ee7ffe 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -16,7 +16,7 @@ namespace wasm {
// the module after that won't affect the serialized result.
class WasmSerializer {
public:
- WasmSerializer(Isolate* isolate, NativeModule* native_module);
+ explicit WasmSerializer(NativeModule* native_module);
// Measure the required buffer size needed for serialization.
size_t GetSerializedNativeModuleSize() const;
@@ -26,16 +26,15 @@ class WasmSerializer {
bool SerializeNativeModule(Vector<byte> buffer) const;
private:
- Isolate* isolate_;
NativeModule* native_module_;
std::vector<WasmCode*> code_table_;
};
// Support for deserializing WebAssembly {NativeModule} objects.
// Checks the version header of the data against the current version.
-bool IsSupportedVersion(Isolate* isolate, Vector<const byte> data);
+bool IsSupportedVersion(Vector<const byte> data);
-// Deserializes the given data to create a compiled Wasm module.
+// Deserializes the given data to create a Wasm module object.
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 9885f18ce1..1bd0b0ce89 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -81,7 +81,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
for (; i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
- if (opcode == kExprElse || opcode == kExprEnd) --control_depth;
+ if (opcode == kExprElse || opcode == kExprCatch || opcode == kExprEnd) {
+ --control_depth;
+ }
DCHECK_LE(0, control_depth);
const int kMaxIndentation = 64;
@@ -113,12 +115,21 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
}
case kExprBr:
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.depth;
break;
}
+ case kExprBrOnExn: {
+ BranchDepthImmediate<Decoder::kNoValidate> imm_br(&i, i.pc());
+ ExceptionIndexImmediate<Decoder::kNoValidate> imm_idx(
+ &i, i.pc() + imm_br.length);
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm_br.depth << ' '
+ << imm_idx.index;
+ break;
+ }
case kExprElse:
- os << "else";
+ case kExprCatch:
+ os << WasmOpcodes::OpcodeName(opcode);
control_depth++;
break;
case kExprEnd:
@@ -149,8 +160,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprThrow:
- case kExprCatch: {
+ case kExprThrow: {
ExceptionIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
@@ -188,7 +198,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprNop:
case kExprReturn:
case kExprMemorySize:
- case kExprGrowMemory:
+ case kExprMemoryGrow:
case kExprDrop:
case kExprSelect:
os << WasmOpcodes::OpcodeName(opcode);
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 4c6cce5482..0dcd84da79 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -256,9 +256,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
@@ -310,13 +311,13 @@ int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
- return kPointerSize;
+ return kSystemPointerSize;
}
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Memory<Object*>(pc_));
+ return HeapObject::cast(Object(Memory<Address>(pc_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
@@ -353,20 +354,19 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory<Object*>(pc_) = target;
+ Memory<Address>(pc_) = target->ptr();
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return origin->runtime_entry_at(pc_);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index e52c35a532..5cf944a697 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -18,7 +18,6 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/string-constants.h"
@@ -32,9 +31,10 @@ namespace internal {
namespace {
-#if !V8_LIBC_MSVCRT
-
-V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
+V8_INLINE uint64_t xgetbv(unsigned int xcr) {
+#if V8_LIBC_MSVCRT
+ return _xgetbv(xcr);
+#else
unsigned eax, edx;
// Check xgetbv; this uses a .byte sequence instead of the instruction
// directly because older assemblers do not include support for xgetbv and
@@ -42,13 +42,9 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
// used.
__asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
+#endif
}
-#define _XCR_XFEATURE_ENABLED_MASK 0
-
-#endif // !V8_LIBC_MSVCRT
-
-
bool OSHasAVXSupport() {
#if V8_OS_MACOSX
// Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
@@ -68,7 +64,7 @@ bool OSHasAVXSupport() {
if (kernel_version_major <= 13) return false;
#endif // V8_OS_MACOSX
// Check whether OS claims to support AVX.
- uint64_t feature_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+ uint64_t feature_mask = xgetbv(0); // XCR_XFEATURE_ENABLED_MASK
return (feature_mask & 0x6) == 0x6;
}
@@ -128,20 +124,6 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Memory<Address>(pc_) = address;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc_, sizeof(Address));
- }
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Memory<Address>(pc_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return Memory<uint32_t>(pc_);
@@ -341,7 +323,7 @@ bool Operand::AddressUsesRegister(Register reg) const {
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapNumber> object =
@@ -349,11 +331,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Memory<Handle<Object>>(pc) = object;
break;
}
- case HeapObjectRequest::kCodeStub: {
- request.code_stub()->set_isolate(isolate);
- UpdateCodeTarget(Memory<int32_t>(pc), request.code_stub()->GetCode());
- break;
- }
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -449,18 +426,11 @@ bool Assembler::UseConstPoolFor(RelocInfo::Mode rmode) {
// -----------------------------------------------------------------------------
// Implementation of Assembler.
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size), constpool_(this) {
-// Clear the buffer in debug mode unless it was provided by the
-// caller in which case we can't be sure it's okay to overwrite
-// existing code in it.
-#ifdef DEBUG
- if (own_buffer_) ZapCode(reinterpret_cast<Address>(buffer_), buffer_size_);
-#endif
-
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)), constpool_(this) {
ReserveCodeTargetSpace(100);
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
if (CpuFeatures::IsSupported(SSE4_1)) {
EnableCpuFeature(SSSE3);
}
@@ -470,6 +440,8 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
PatchConstPool();
DCHECK(constpool_.IsEmpty());
+ int code_comments_size = WriteCodeComments();
+
// At this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info.
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
@@ -477,17 +449,20 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
DCHECK_GT(desc->instr_size, 0); // Zero-size code objects upset the system.
- desc->reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
+ desc->reloc_size = static_cast<int>((buffer_start_ + desc->buffer_size) -
+ reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
+}
+void Assembler::FinalizeJumpOptimizationInfo() {
// Collection stage
auto jump_opt = jump_optimization_info();
if (jump_opt && jump_opt->is_collecting()) {
@@ -512,7 +487,6 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
}
}
-
void Assembler::Align(int m) {
DCHECK(base::bits::IsPowerOfTwo(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
@@ -543,7 +517,7 @@ void Assembler::bind_to(Label* L, int pos) {
while (next != current) {
if (current >= 4 && long_at(current - 4) == 0) {
// Absolute address.
- intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_ + pos);
+ intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_start_ + pos);
*reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
internal_reference_positions_.push_back(current - 4);
} else {
@@ -557,7 +531,7 @@ void Assembler::bind_to(Label* L, int pos) {
// Fix up last fixup on linked list.
if (current >= 4 && long_at(current - 4) == 0) {
// Absolute address.
- intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_ + pos);
+ intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_start_ + pos);
*reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
internal_reference_positions_.push_back(current - 4);
} else {
@@ -621,50 +595,41 @@ bool Assembler::is_optimizable_farjmp(int idx) {
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- desc.buffer_size = 2 * buffer_size_;
+ DCHECK_EQ(buffer_start_, buffer_->start());
+ int old_size = buffer_->size();
+ int new_size = 2 * old_size;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- ZapCode(reinterpret_cast<Address>(desc.buffer), desc.buffer_size);
-#endif
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
- desc.reloc_size);
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
+ intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
*p += pc_delta;
}
@@ -1125,16 +1090,6 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) {
emit_runtime_entry(entry, rmode);
}
-void Assembler::call(CodeStub* stub) {
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- RequestHeapObject(HeapObjectRequest(stub));
- RecordRelocInfo(RelocInfo::CODE_TARGET);
- int code_target_index = AddCodeTarget(Handle<Code>());
- emitl(code_target_index);
-}
-
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
@@ -1683,12 +1638,12 @@ void Assembler::emit_lea(Register dst, Operand src, int size) {
void Assembler::load_rax(Address value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
emit(0x48); // REX.W
emit(0xA1);
emitp(value, mode);
} else {
- DCHECK_EQ(kPointerSize, kInt32Size);
+ DCHECK_EQ(kSystemPointerSize, kInt32Size);
emit(0xA1);
emitp(value, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -1832,11 +1787,11 @@ void Assembler::movp(Register dst, Address value, RelocInfo::Mode rmode) {
if (constpool_.TryRecordEntry(value, rmode)) {
// Emit rip-relative move with offset = 0
Label label;
- emit_mov(dst, Operand(&label, 0), kPointerSize);
+ emit_mov(dst, Operand(&label, 0), kSystemPointerSize);
bind(&label);
} else {
EnsureSpace ensure_space(this);
- emit_rex(dst, kPointerSize);
+ emit_rex(dst, kSystemPointerSize);
emit(0xB8 | dst.low_bits());
emitp(value, rmode);
}
@@ -1844,7 +1799,7 @@ void Assembler::movp(Register dst, Address value, RelocInfo::Mode rmode) {
void Assembler::movp_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
- emit_rex(dst, kPointerSize);
+ emit_rex(dst, kSystemPointerSize);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(value));
emitp(0, RelocInfo::EMBEDDED_OBJECT);
@@ -1852,7 +1807,7 @@ void Assembler::movp_heap_number(Register dst, double value) {
void Assembler::movp_string(Register dst, const StringConstantBase* str) {
EnsureSpace ensure_space(this);
- emit_rex(dst, kPointerSize);
+ emit_rex(dst, kSystemPointerSize);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(str));
emitp(0, RelocInfo::EMBEDDED_OBJECT);
@@ -1862,7 +1817,7 @@ void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
if (constpool_.TryRecordEntry(value, rmode)) {
// Emit rip-relative move with offset = 0
Label label;
- emit_mov(dst, Operand(&label, 0), kPointerSize);
+ emit_mov(dst, Operand(&label, 0), kInt64Size);
bind(&label);
} else {
EnsureSpace ensure_space(this);
@@ -2356,12 +2311,12 @@ void Assembler::emit_xchg(Register dst, Operand src, int size) {
void Assembler::store_rax(Address dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
emit(0x48); // REX.W
emit(0xA3);
emitp(dst, mode);
} else {
- DCHECK_EQ(kPointerSize, kInt32Size);
+ DCHECK_EQ(kSystemPointerSize, kInt32Size);
emit(0xA3);
emitp(dst, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -4901,6 +4856,16 @@ void Assembler::pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
+void Assembler::pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4911,6 +4876,16 @@ void Assembler::pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
+void Assembler::pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4981,7 +4956,7 @@ void Assembler::dq(Label* label) {
EnsureSpace ensure_space(this);
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
- emitp(reinterpret_cast<Address>(buffer_) + label->pos(),
+ emitp(reinterpret_cast<Address>(buffer_start_) + label->pos(),
RelocInfo::INTERNAL_REFERENCE);
} else {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -5003,7 +4978,7 @@ void Assembler::dq(Label* label) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
- RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
reloc_info_writer.Write(&rinfo);
}
@@ -5020,16 +4995,10 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
-
bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index a0af3a3509..4f16dc0fd3 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -38,11 +38,14 @@
#define V8_X64_ASSEMBLER_X64_H_
#include <deque>
-#include <forward_list>
+#include <map>
#include <vector>
#include "src/assembler.h"
+#include "src/label.h"
+#include "src/objects/smi.h"
#include "src/x64/constants-x64.h"
+#include "src/x64/register-x64.h"
#include "src/x64/sse-instr.h"
namespace v8 {
@@ -50,177 +53,6 @@ namespace internal {
// Utility functions
-#define GENERAL_REGISTERS(V) \
- V(rax) \
- V(rcx) \
- V(rdx) \
- V(rbx) \
- V(rsp) \
- V(rbp) \
- V(rsi) \
- V(rdi) \
- V(r8) \
- V(r9) \
- V(r10) \
- V(r11) \
- V(r12) \
- V(r13) \
- V(r14) \
- V(r15)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(rax) \
- V(rbx) \
- V(rdx) \
- V(rcx) \
- V(rsi) \
- V(rdi) \
- V(r8) \
- V(r9) \
- V(r11) \
- V(r12) \
- V(r14) \
- V(r15)
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
- bool is_byte_register() const { return reg_code_ <= 3; }
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const { return reg_code_ >> 3; }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const { return reg_code_ & 0x7; }
-
- private:
- friend class RegisterBase<Register, kRegAfterLast>;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-#define DECLARE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-constexpr int kNumRegs = 16;
-
-constexpr RegList kJSCallerSaved =
- Register::ListOf<rax, rcx, rdx,
- rbx, // used as a caller-saved register in JavaScript code
- rdi // callee function
- >();
-
-constexpr int kNumJSCallerSaved = 5;
-
-// Number of registers for which space is reserved in safepoints.
-constexpr int kNumSafepointRegisters = 16;
-
-#ifdef _WIN64
- // Windows calling convention
-constexpr Register arg_reg_1 = rcx;
-constexpr Register arg_reg_2 = rdx;
-constexpr Register arg_reg_3 = r8;
-constexpr Register arg_reg_4 = r9;
-#else
- // AMD64 calling convention
-constexpr Register arg_reg_1 = rdi;
-constexpr Register arg_reg_2 = rsi;
-constexpr Register arg_reg_3 = rdx;
-constexpr Register arg_reg_4 = rcx;
-#endif // _WIN64
-
-
-#define DOUBLE_REGISTERS(V) \
- V(xmm0) \
- V(xmm1) \
- V(xmm2) \
- V(xmm3) \
- V(xmm4) \
- V(xmm5) \
- V(xmm6) \
- V(xmm7) \
- V(xmm8) \
- V(xmm9) \
- V(xmm10) \
- V(xmm11) \
- V(xmm12) \
- V(xmm13) \
- V(xmm14) \
- V(xmm15)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(xmm0) \
- V(xmm1) \
- V(xmm2) \
- V(xmm3) \
- V(xmm4) \
- V(xmm5) \
- V(xmm6) \
- V(xmm7) \
- V(xmm8) \
- V(xmm9) \
- V(xmm10) \
- V(xmm11) \
- V(xmm12) \
- V(xmm13) \
- V(xmm14)
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
- public:
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const { return reg_code_ >> 3; }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const { return reg_code_ & 0x7; }
-
- private:
- friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
- explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
-static_assert(sizeof(XMMRegister) == sizeof(int),
- "XMMRegister can efficiently be passed by value");
-
-typedef XMMRegister FloatRegister;
-
-typedef XMMRegister DoubleRegister;
-
-typedef XMMRegister Simd128Register;
-
-#define DECLARE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -282,8 +114,8 @@ class Immediate {
explicit constexpr Immediate(int32_t value) : value_(value) {}
explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
: value_(value), rmode_(rmode) {}
- explicit Immediate(Smi* value)
- : value_(static_cast<int32_t>(reinterpret_cast<intptr_t>(value))) {
+ explicit Immediate(Smi value)
+ : value_(static_cast<int32_t>(static_cast<intptr_t>(value.ptr()))) {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
}
@@ -294,7 +126,7 @@ class Immediate {
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Immediate);
-static_assert(sizeof(Immediate) <= kPointerSize,
+static_assert(sizeof(Immediate) <= kSystemPointerSize,
"Immediate must be small enough to pass it by value");
// -----------------------------------------------------------------------------
@@ -306,10 +138,11 @@ enum ScaleFactor : int8_t {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
+ times_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
+ times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
};
-class Operand {
+class V8_EXPORT_PRIVATE Operand {
public:
struct Data {
byte rex = 0;
@@ -340,7 +173,7 @@ class Operand {
// [rip + disp/r]
explicit Operand(Label* label, int addend = 0);
- Operand(const Operand&) = default;
+ Operand(const Operand&) V8_NOEXCEPT = default;
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
@@ -359,7 +192,7 @@ class Operand {
const Data data_;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
-static_assert(sizeof(Operand) <= 2 * kPointerSize,
+static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
#define ASSEMBLER_INSTRUCTION_LIST(V) \
@@ -386,8 +219,8 @@ static_assert(sizeof(Operand) <= 2 * kPointerSize,
V(xchg) \
V(xor)
-// Shift instructions on operands/registers with kPointerSize, kInt32Size and
-// kInt64Size.
+// Shift instructions on operands/registers with kSystemPointerSize, kInt32Size
+// and kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
@@ -478,15 +311,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
~Assembler() override = default;
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -494,6 +321,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Assembler functions are invoked in between GetCode() calls.
void GetCode(Isolate* isolate, CodeDesc* desc);
+ void FinalizeJumpOptimizationInfo();
+
// Read/Modify the code target in the relative branch/call instruction at pc.
// On the x64 architecture, we use relative jumps with a 32-bit displacement
// to jump to other Code objects in the Code space in the heap.
@@ -514,7 +343,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -533,17 +362,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static constexpr int kCallTargetAddressOffset = 4; // 32-bit displacement.
- // The length of call(kScratchRegister).
- static constexpr int kCallScratchRegisterInstructionLength = 3;
- // The length of call(Immediate32).
- static constexpr int kShortCallInstructionLength = 5;
- // The length of movq(kScratchRegister, address).
- static constexpr int kMoveAddressIntoScratchRegisterInstructionLength =
- 2 + kPointerSize;
- // The length of movq(kScratchRegister, address) and call(kScratchRegister).
- static constexpr int kCallSequenceLength =
- kMoveAddressIntoScratchRegisterInstructionLength +
- kCallScratchRegisterInstructionLength;
// One byte opcode for test eax,0xXXXXXXXX.
static constexpr byte kTestEaxByte = 0xA9;
@@ -582,52 +400,76 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
- STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
-
-#define DECLARE_INSTRUCTION(instruction) \
- template<class P1> \
- void instruction##p(P1 p1) { \
- emit_##instruction(p1, kPointerSize); \
- } \
- \
- template<class P1> \
- void instruction##l(P1 p1) { \
- emit_##instruction(p1, kInt32Size); \
- } \
- \
- template<class P1> \
- void instruction##q(P1 p1) { \
- emit_##instruction(p1, kInt64Size); \
- } \
- \
- template<class P1, class P2> \
- void instruction##p(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kPointerSize); \
- } \
- \
- template<class P1, class P2> \
- void instruction##l(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt32Size); \
- } \
- \
- template<class P1, class P2> \
- void instruction##q(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt64Size); \
- } \
- \
- template<class P1, class P2, class P3> \
- void instruction##p(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kPointerSize); \
- } \
- \
- template<class P1, class P2, class P3> \
- void instruction##l(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt32Size); \
- } \
- \
- template<class P1, class P2, class P3> \
- void instruction##q(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt64Size); \
+ STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
+ kSystemPointerSize == kInt32Size);
+
+#define DECLARE_INSTRUCTION(instruction) \
+ template <class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kSystemPointerSize); \
+ } \
+ \
+ template <class P1> \
+ void instruction##_tagged(P1 p1) { \
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
+ /* TODO(ishell): change to kTaggedSize */ \
+ emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
+ } \
+ \
+ template <class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template <class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
+ template <class P1, class P2> \
+ void instruction##p(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kSystemPointerSize); \
+ } \
+ \
+ template <class P1, class P2> \
+ void instruction##_tagged(P1 p1, P2 p2) { \
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
+ /* TODO(ishell): change to kTaggedSize */ \
+ emit_##instruction(p1, p2, \
+ COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
+ } \
+ \
+ template <class P1, class P2> \
+ void instruction##l(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt32Size); \
+ } \
+ \
+ template <class P1, class P2> \
+ void instruction##q(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template <class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kSystemPointerSize); \
+ } \
+ \
+ template <class P1, class P2, class P3> \
+ void instruction##_tagged(P1 p1, P2 p2, P3 p3) { \
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
+ /* TODO(ishell): change to kTaggedSize */ \
+ emit_##instruction(p1, p2, p3, \
+ COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
+ } \
+ \
+ template <class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template <class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -711,7 +553,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void repmovsb();
void repmovsw();
- void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsp() { emit_repmovs(kSystemPointerSize); }
void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
@@ -790,41 +632,45 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Multiply rax by src, put the result in rdx:rax.
void mulq(Register src);
-#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
- void instruction##p(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kPointerSize); \
- } \
- \
- void instruction##l(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt32Size); \
- } \
- \
- void instruction##q(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt64Size); \
- } \
- \
- void instruction##p(Operand dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kPointerSize); \
- } \
- \
- void instruction##l(Operand dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt32Size); \
- } \
- \
- void instruction##q(Operand dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt64Size); \
- } \
- \
- void instruction##p_cl(Register dst) { shift(dst, subcode, kPointerSize); } \
- \
- void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
- \
- void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
- \
- void instruction##p_cl(Operand dst) { shift(dst, subcode, kPointerSize); } \
- \
- void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
- \
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
+ void instruction##p(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kSystemPointerSize); \
+ } \
+ \
+ void instruction##l(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kSystemPointerSize); \
+ } \
+ \
+ void instruction##l(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p_cl(Register dst) { \
+ shift(dst, subcode, kSystemPointerSize); \
+ } \
+ \
+ void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
+ \
+ void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
+ \
+ void instruction##p_cl(Operand dst) { \
+ shift(dst, subcode, kSystemPointerSize); \
+ } \
+ \
+ void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
+ \
void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
#undef DECLARE_SHIFT_INSTRUCTION
@@ -911,7 +757,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void call(Address entry, RelocInfo::Mode rmode);
void near_call(Address entry, RelocInfo::Mode rmode);
void near_jmp(Address entry, RelocInfo::Mode rmode);
- void call(CodeStub* stub);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
@@ -1301,7 +1146,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, Operand src);
@@ -1914,21 +1761,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
int id);
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
@@ -1961,15 +1798,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static constexpr int kMaximalBufferSize = 512 * MB;
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ byte byte_at(int pos) { return buffer_start_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_start_[pos] = value; }
protected:
// Call near indirect
void call(Operand operand);
private:
- byte* addr_at(int pos) { return buffer_ + pos; }
+ byte* addr_at(int pos) { return buffer_start_ + pos; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
@@ -2410,6 +2247,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
deleted file mode 100644
index d13181eea3..0000000000
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ /dev/null
@@ -1,581 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/api-arguments-inl.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/counters.h"
-#include "src/double.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/heap/heap-inl.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/regexp-match-info.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- { // NOLINT. Scope block confuses linter.
- NoRootArrayScope uninitialized_root_register(masm);
- // Set up frame.
- __ pushq(rbp);
- __ movp(rbp, rsp);
-
- // Push the stack frame type.
- __ Push(Immediate(StackFrame::TypeToMarker(type()))); // context slot
- ExternalReference context_address =
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- __ Load(kScratchRegister, context_address);
- __ Push(kScratchRegister); // context
- // Save callee-saved registers (X64/X32/Win64 calling conventions).
- __ pushq(r12);
- __ pushq(r13);
- __ pushq(r14);
- __ pushq(r15);
-#ifdef _WIN64
- __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ pushq(rbx);
-
-#ifdef _WIN64
- // On Win64 XMM6-XMM15 are callee-save
- __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
- __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
-#endif
-
- __ InitializeRootRegister();
- }
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- {
- Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ Push(c_entry_fp_operand);
- }
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ Load(rax, js_entry_sp);
- __ testp(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movp(rax, rbp);
- __ Store(js_entry_sp, rax);
- Label cont;
- __ jmp(&cont);
- __ bind(&not_outermost_js);
- __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception = ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate());
- __ Store(pending_exception, rax);
- __ LoadRoot(rax, RootIndex::kException);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushStackHandler();
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. We load the address from an
- // external reference instead of inlining the call target address directly
- // in the code, because the builtin stubs may not have been generated yet
- // at the time this code is generated.
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ Pop(rbx);
- __ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ j(not_equal, &not_outermost_js_2);
- __ Move(kScratchRegister, js_entry_sp);
- __ movp(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ Pop(c_entry_fp_operand);
- }
-
- // Restore callee-saved registers (X64 conventions).
-#ifdef _WIN64
- // On Win64 XMM6-XMM15 are callee-save
- __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
- __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
- __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
- __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
- __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
- __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
- __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
- __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
- __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
- __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
-#endif
-
- __ popq(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ popq(rsi);
- __ popq(rdi);
-#endif
- __ popq(r15);
- __ popq(r14);
- __ popq(r13);
- __ popq(r12);
- __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ popq(rbp);
- __ ret(0);
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- ProfileEntryHookStub stub(masm->isolate());
- masm->CallStub(&stub);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- }
-}
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // This stub can be called from essentially anywhere, so it needs to save
- // all volatile and callee-save registers.
- const size_t kNumSavedRegisters = 2;
- __ pushq(arg_reg_1);
- __ pushq(arg_reg_2);
-
- // Calculate the original stack pointer and store it in the second arg.
- __ leap(arg_reg_2,
- Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
-
- // Calculate the function address to the first arg.
- __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
- __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
-
- // Save the remainder of the volatile registers.
- masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
-
- // Call the entry hook function.
- __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
- RelocInfo::NONE);
-
- AllowExternalCallThatCantCauseGC scope(masm);
-
- const int kArgumentCount = 2;
- __ PrepareCallCFunction(kArgumentCount);
- __ CallCFunction(rax, kArgumentCount);
-
- // Restore volatile regs.
- masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ popq(arg_reg_2);
- __ popq(arg_reg_1);
-
- __ Ret();
-}
-
-static int Offset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- // Check that fits into int.
- DCHECK(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-// Prepares stack to put arguments (aligns and so on). WIN64 calling convention
-// requires to put the pointer to the return value slot into rcx (rcx must be
-// preserverd until CallApiFunctionAndReturn). Clobbers rax. Allocates
-// arg_stack_space * kPointerSize inside the exit frame (not GCed) accessible
-// via StackSpaceOperand.
-static void PrepareCallApiFunction(MacroAssembler* masm, int arg_stack_space) {
- __ EnterApiExitFrame(arg_stack_space);
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Clobbers r14, r15, rbx and
-// caller-save registers. Restores context. On return removes
-// stack_space * kPointerSize (GCed).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg, int stack_space,
- Operand* stack_space_operand,
- Operand return_value_operand) {
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label write_back;
-
- Isolate* isolate = masm->isolate();
- Factory* factory = isolate->factory();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate);
-
- DCHECK(rdx == function_address || r8 == function_address);
- // Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
- Register prev_limit_reg = rbx;
- Register base_reg = r15;
- __ Move(base_reg, next_address);
- __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
- __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
- __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ Move(rax, ExternalReference::is_profiling_address(isolate));
- __ cmpb(Operand(rax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- __ Move(thunk_last_arg, function_address);
- __ Move(rax, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function!
- __ Move(rax, function_address);
-
- __ bind(&end_profiler_check);
-
- // Call the api function!
- __ call(rax);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Load the value from ReturnValue
- __ movp(rax, return_value_operand);
- __ bind(&prologue);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
- __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
- __ j(not_equal, &delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- if (stack_space_operand != nullptr) {
- __ movp(rbx, *stack_space_operand);
- }
- __ LeaveApiExitFrame();
-
- // Check if the function scheduled an exception.
- __ Move(rdi, scheduled_exception_address);
- __ Cmp(Operand(rdi, 0), factory->the_hole_value());
- __ j(not_equal, &promote_scheduled_exception);
-
-#if DEBUG
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = rax;
- Register map = rcx;
-
- __ JumpIfSmi(return_value, &ok, Label::kNear);
- __ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- __ CmpInstanceType(map, LAST_NAME_TYPE);
- __ j(below_equal, &ok, Label::kNear);
-
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, &ok, Label::kNear);
-
- __ CompareRoot(map, RootIndex::kHeapNumberMap);
- __ j(equal, &ok, Label::kNear);
-
- __ CompareRoot(return_value, RootIndex::kUndefinedValue);
- __ j(equal, &ok, Label::kNear);
-
- __ CompareRoot(return_value, RootIndex::kTrueValue);
- __ j(equal, &ok, Label::kNear);
-
- __ CompareRoot(return_value, RootIndex::kFalseValue);
- __ j(equal, &ok, Label::kNear);
-
- __ CompareRoot(return_value, RootIndex::kNullValue);
- __ j(equal, &ok, Label::kNear);
-
- __ Abort(AbortReason::kAPICallReturnedInvalidObject);
-
- __ bind(&ok);
-#endif
-
- if (stack_space_operand != nullptr) {
- DCHECK_EQ(stack_space, 0);
- __ PopReturnAddressTo(rcx);
- __ addq(rsp, rbx);
- __ jmp(rcx);
- } else {
- __ ret(stack_space * kPointerSize);
- }
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
- __ movp(prev_limit_reg, rax);
- __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
- __ LoadAddress(rax, ExternalReference::delete_handle_scope_extensions());
- __ call(rax);
- __ movp(rax, prev_limit_reg);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : call_data
- // -- rcx : holder
- // -- rdx : api_function_address
- // -- rsi : context
- // -- rax : number of arguments if argc is a register
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -- ...
- // -- rsp[argc * 8] : first argument
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- Register call_data = rbx;
- Register holder = rcx;
- Register api_function_address = rdx;
- Register return_address = r8;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- __ PopReturnAddressTo(return_address);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ Push(call_data);
-
- // return value
- __ PushRoot(RootIndex::kUndefinedValue);
- // return value default
- __ PushRoot(RootIndex::kUndefinedValue);
- // isolate
- Register scratch = call_data;
- __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ Push(scratch);
- // holder
- __ Push(holder);
-
- int argc = this->argc();
-
- __ movp(scratch, rsp);
- // Push return address back on stack.
- __ PushReturnAddressFrom(return_address);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- PrepareCallApiFunction(masm, kApiStackSpace);
-
- // FunctionCallbackInfo::implicit_args_.
- __ movp(StackSpaceOperand(0), scratch);
- __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ movp(StackSpaceOperand(1), scratch);
- // FunctionCallbackInfo::length_.
- __ Set(StackSpaceOperand(2), argc);
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
- // It's okay if api_function_address == callback_arg
- // but not arguments_arg
- DCHECK(api_function_address != arguments_arg);
-
- // v8::InvocationCallback's argument.
- __ leap(arguments_arg, StackSpaceOperand(0));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // Accessor for FunctionCallbackInfo and first js arg.
- StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - FCA::kReturnValueOffset);
- const int stack_space = argc + FCA::kArgsLength + 1;
- Operand* stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
- stack_space, stack_space_operand,
- return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
-#if defined(__MINGW64__) || defined(_WIN64)
- Register getter_arg = r8;
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#else
- Register getter_arg = rdx;
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
- Register api_function_address = r8;
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = rax;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- // Insert additional parameters into the stack frame above return address.
- __ PopReturnAddressTo(scratch);
- __ Push(receiver);
- __ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ Push(kScratchRegister); // return value
- __ Push(kScratchRegister); // return value default
- __ PushAddress(ExternalReference::isolate_address(isolate()));
- __ Push(holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
- __ PushReturnAddressFrom(scratch);
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array.
- __ leap(scratch, Operand(rsp, 2 * kPointerSize));
-
- PrepareCallApiFunction(masm, kArgStackSpace);
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- Operand info_object = StackSpaceOperand(0);
- __ movp(info_object, scratch);
-
- __ leap(name_arg, Operand(scratch, -kPointerSize));
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ leap(accessor_info_arg, info_object);
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- // It's okay if api_function_address == getter_arg
- // but not accessor_info_arg or name_arg
- DCHECK(api_function_address != accessor_info_arg);
- DCHECK(api_function_address != name_arg);
- __ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
- __ movp(api_function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- Operand return_value_operand(
- rbp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
deleted file mode 100644
index bced4a0fd3..0000000000
--- a/deps/v8/src/x64/codegen-x64.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/x64/assembler-x64-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-UnaryMathFunction CreateSqrtFunction() {
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- // xmm0: raw double input.
- // Move double input into registers.
- __ Sqrtsd(xmm0, xmm0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index f98862b001..07e00023ff 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -10,8 +10,7 @@
#if V8_TARGET_ARCH_X64
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
+#include "src/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 481a47c164..a600c329ce 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -5,6 +5,7 @@
#if V8_TARGET_ARCH_X64
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -12,15 +13,12 @@
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 5;
+#define __ masm->
-#define __ masm()->
-
-void Deoptimizer::TableEntryGenerator::Generate() {
- Label deopt_table_entry;
- __ bind(&deopt_table_entry);
-
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -57,7 +55,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
kNumberOfRegisters * kRegisterSize + kDoubleRegsSize + kFloatRegsSize;
__ Store(
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
rbp);
// We use this to keep the value of the fifth argument temporarily.
@@ -65,29 +63,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
- // Get the bailout id from the stack.
- __ movp(rax, Operand(rsp, kSavedRegistersAreaSize));
-
- // address of deoptimization table
- __ leap(rdx, Operand(&deopt_table_entry));
-
- // rax = deopt_entry - deopt_table_entry - 5
- __ subp(rax, rdx);
- __ subl(rax, Immediate(5));
-
- // rax /= 5
- __ movl(rbx, Immediate(0xcccccccd));
- __ imulq(rax, rbx);
- __ shrq(rax, Immediate(0x22));
-
- // bailout id
- __ movl(arg_reg_3, rax);
+ // The bailout id is passed using r13 on the stack.
+ __ movp(arg_reg_3, r13);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
- __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
- kPCOnStackSize));
+ __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subp(arg5, rbp);
__ negp(arg5);
@@ -101,21 +83,22 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movp(arg_reg_1, rax);
- __ Set(arg_reg_2, static_cast<int>(deopt_kind()));
+ __ Set(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
__ movq(Operand(rsp, 4 * kRegisterSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
+ __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else
__ movp(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
+ __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
#endif
- { AllowExternalCallThatCantCauseGC scope(masm());
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve deoptimizer object in register rax and get the input
@@ -124,7 +107,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
}
@@ -145,8 +128,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ popq(Operand(rbx, dst_offset));
}
- // Remove the bailout id and return address from the stack.
- __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
+ // Remove the return address from the stack.
+ __ addp(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
@@ -171,9 +154,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ pushq(rax);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, rax);
- __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
+ __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
}
__ popq(rax);
@@ -200,7 +183,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&inner_loop_header);
__ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addp(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
@@ -218,7 +201,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
__ PushQuad(Operand(rbx, offset));
}
@@ -234,41 +217,25 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ popq(r);
}
- // Set up the roots register.
- __ InitializeRootRegister();
-
// Return to the continuation point.
__ ret(0);
}
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ call(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
- if (kPCOnStackSize == 2 * kPointerSize) {
+ if (kPCOnStackSize == 2 * kSystemPointerSize) {
// Zero out the high-32 bit of PC for x32 port.
- SetFrameSlot(offset + kPointerSize, 0);
+ SetFrameSlot(offset + kSystemPointerSize, 0);
}
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
- if (kFPOnStackSize == 2 * kPointerSize) {
+ if (kFPOnStackSize == 2 * kSystemPointerSize) {
// Zero out the high-32 bit of FP for x32 port.
- SetFrameSlot(offset + kPointerSize, 0);
+ SetFrameSlot(offset + kSystemPointerSize, 0);
}
SetFrameSlot(offset, value);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 4b57221571..1282fd8d7e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -12,7 +12,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/v8-fallthrough.h"
#include "src/disasm.h"
-#include "src/macro-assembler.h"
+#include "src/x64/register-x64.h"
#include "src/x64/sse-instr.h"
namespace disasm {
@@ -250,10 +250,9 @@ void InstructionTable::AddJumpConditionalShort() {
}
}
-
-static v8::base::LazyInstance<InstructionTable>::type instruction_table =
- LAZY_INSTANCE_INITIALIZER;
-
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(InstructionTable, GetInstructionTable);
+}
static const InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
@@ -296,7 +295,7 @@ class DisassemblerX64 {
vex_byte1_(0),
vex_byte2_(0),
byte_size_operand_(false),
- instruction_table_(instruction_table.Pointer()) {
+ instruction_table_(GetInstructionTable()) {
tmp_buffer_[0] = '\0';
}
@@ -580,7 +579,7 @@ int DisassemblerX64::PrintRightOperandHelper(
disp < 0 ? -disp : disp);
if (rm == i::kRootRegister.code()) {
// For root-relative accesses, try to append a description.
- TryAppendRootRelativeName(i::kRootRegisterBias + disp);
+ TryAppendRootRelativeName(disp);
}
return (mod == 2) ? 5 : 2;
}
diff --git a/deps/v8/src/x64/eh-frame-x64.cc b/deps/v8/src/x64/eh-frame-x64.cc
index ec4fc11289..45f758a774 100644
--- a/deps/v8/src/x64/eh-frame-x64.cc
+++ b/deps/v8/src/x64/eh-frame-x64.cc
@@ -21,9 +21,9 @@ void EhFrameWriter::WriteReturnAddressRegisterCode() {
}
void EhFrameWriter::WriteInitialStateInCie() {
- SetBaseAddressRegisterAndOffset(rsp, kPointerSize);
+ SetBaseAddressRegisterAndOffset(rsp, kSystemPointerSize);
// x64 rip (r16) has no Register instance associated.
- RecordRegisterSavedToStack(kRipDwarfCode, -kPointerSize);
+ RecordRegisterSavedToStack(kRipDwarfCode, -kSystemPointerSize);
}
// static
diff --git a/deps/v8/src/x64/frame-constants-x64.cc b/deps/v8/src/x64/frame-constants-x64.cc
index 553d3ef665..9780bb2d7a 100644
--- a/deps/v8/src/x64/frame-constants-x64.cc
+++ b/deps/v8/src/x64/frame-constants-x64.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_X64
-#include "src/assembler.h"
+#include "src/x64/frame-constants-x64.h"
+
#include "src/frame-constants.h"
#include "src/x64/assembler-x64-inl.h"
-#include "src/x64/assembler-x64.h"
-#include "src/x64/frame-constants-x64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index 5658aaebea..21f51a096d 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -18,13 +18,26 @@ class EntryFrameConstants : public AllStatic {
static constexpr int kXMMRegisterSize = 16;
static constexpr int kXMMRegistersBlockSize =
kXMMRegisterSize * kCalleeSaveXMMRegisters;
+
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ // On x64, there are 7 pushq() and 3 Push() calls between setting up rbp and
+ // pushing the c_entry_fp, plus we manually allocate kXMMRegistersBlockSize
+ // bytes on the stack.
static constexpr int kCallerFPOffset =
- -3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
+ -3 * kSystemPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
+
+ // Stack offsets for arguments passed to JSEntry.
+ static constexpr int kArgcOffset = 6 * kSystemPointerSize;
+ static constexpr int kArgvOffset = 7 * kSystemPointerSize;
#else
- // We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody.
- static constexpr int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ // On x64, there are 5 pushq() and 3 Push() calls between setting up rbp and
+ // pushing the c_entry_fp.
+ static constexpr int kCallerFPOffset =
+ -3 * kSystemPointerSize + -5 * kRegisterSize;
#endif
- static constexpr int kArgvOffset = 6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
@@ -33,7 +46,7 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
- static constexpr int kCallerFPOffset = +0 * kPointerSize;
+ static constexpr int kCallerFPOffset = +0 * kSystemPointerSize;
static constexpr int kCallerPCOffset = kFPOnStackSize;
// FP-relative displacement of the caller's SP. It points just
@@ -52,7 +65,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kSimd128Size;
};
@@ -66,8 +79,8 @@ class JavaScriptFrameConstants : public AllStatic {
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static constexpr int kParam0Offset = -2 * kPointerSize;
- static constexpr int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kSystemPointerSize;
+ static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 0115fcf75d..3e14f1e407 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -72,12 +74,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -209,10 +205,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- rbx, // call_data
- rcx, // holder
- rdx, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ rdx, // kApiFunctionAddress
+ rcx, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -264,6 +259,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {arg_reg_1, arg_reg_2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 16dc893ab7..1955e80f79 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -10,21 +10,25 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/globals.h"
-#include "src/heap/heap-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "src/register-configuration.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/string-constants.h"
#include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h" // Cannot be the first include.
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/x64/macro-assembler-x64.h"
+#endif
namespace v8 {
namespace internal {
@@ -37,18 +41,19 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
displacement_to_last_argument += extra_displacement_to_last_argument_;
if (argument_count_reg_ == no_reg) {
// argument[0] is at base_reg_ + displacement_to_last_argument +
- // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ // (argument_count_immediate_ + receiver - 1) * kSystemPointerSize.
DCHECK_GT(argument_count_immediate_ + receiver, 0);
- return Operand(
- base_reg_,
- displacement_to_last_argument +
- (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ return Operand(base_reg_,
+ displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) *
+ kSystemPointerSize);
} else {
// argument[0] is at base_reg_ + displacement_to_last_argument +
- // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
- return Operand(
- base_reg_, argument_count_reg_, times_pointer_size,
- displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) *
+ // kSystemPointerSize.
+ return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument +
+ (receiver - 1 - index) * kSystemPointerSize);
}
}
@@ -65,82 +70,47 @@ StackArgumentsAccessor::StackArgumentsAccessor(
extra_displacement_to_last_argument_(
extra_displacement_to_last_argument) {}
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
-static const int64_t kInvalidRootRegisterDelta = -1;
-
-int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
- if (predictable_code_size() &&
- (other.address() < reinterpret_cast<Address>(isolate()) ||
- other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
- return kInvalidRootRegisterDelta;
- }
- return RootRegisterOffsetForExternalReference(isolate(), other);
-}
-
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
- int64_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
+ if (is_int32(delta)) {
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
// Safe code.
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(kScratchRegister, source);
- movp(destination, Operand(kScratchRegister, 0));
- return;
- }
- }
- if (destination == rax) {
+ if (destination == rax && !options().isolate_independent_code) {
load_rax(source);
} else {
- Move(kScratchRegister, source);
- movp(destination, Operand(kScratchRegister, 0));
+ movp(destination, ExternalReferenceAsOperand(source));
}
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
- int64_t delta = RootRegisterDelta(destination);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ intptr_t delta =
+ RootRegisterOffsetForExternalReference(isolate(), destination);
+ if (is_int32(delta)) {
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
// Safe code.
- if (source == rax) {
+ if (source == rax && !options().isolate_independent_code) {
store_rax(destination);
} else {
- Move(kScratchRegister, destination);
- movp(Operand(kScratchRegister, 0), source);
+ movp(ExternalReferenceAsOperand(destination), source);
}
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- movp(destination,
- FieldOperand(destination,
- FixedArray::kHeaderSize + constant_index * kPointerSize));
+ LoadTaggedPointerField(
+ destination,
+ FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
@@ -160,8 +130,8 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
- int64_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
+ if (is_int32(delta)) {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
@@ -176,15 +146,32 @@ void TurboAssembler::LoadAddress(Register destination,
Move(destination, source);
}
-Operand TurboAssembler::ExternalOperand(ExternalReference target,
- Register scratch) {
+Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch) {
if (root_array_available_ && options().enable_root_array_delta_access) {
- int64_t delta = RootRegisterDelta(target);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ int64_t delta =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ if (is_int32(delta)) {
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
- Move(scratch, target);
+ if (root_array_available_ && options().isolate_independent_code) {
+ if (IsAddressableThroughRootRegister(isolate(), reference)) {
+ // Some external references can be efficiently loaded as an offset from
+ // kRootRegister.
+ intptr_t offset =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ CHECK(is_int32(offset));
+ return Operand(kRootRegister, static_cast<int32_t>(offset));
+ } else {
+ // Otherwise, do a memory load from the external reference table.
+ movp(scratch, Operand(kRootRegister,
+ RootRegisterOffsetForExternalReferenceTableEntry(
+ isolate(), reference)));
+ return Operand(scratch, 0);
+ }
+ }
+ Move(scratch, reference);
return Operand(scratch, 0);
}
@@ -195,24 +182,180 @@ void MacroAssembler::PushAddress(ExternalReference source) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
DCHECK(root_array_available_);
- movp(destination, Operand(kRootRegister, RootRegisterOffset(index)));
+ movp(destination,
+ Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void MacroAssembler::PushRoot(RootIndex index) {
DCHECK(root_array_available_);
- Push(Operand(kRootRegister, RootRegisterOffset(index)));
+ Push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
DCHECK(root_array_available_);
- cmpp(with, Operand(kRootRegister, RootRegisterOffset(index)));
+ if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
+ RootIndex::kLastStrongOrReadOnlyRoot)) {
+ cmp_tagged(with,
+ Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ } else {
+ // Some smi roots contain system pointer size values like stack limits.
+ cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ }
}
void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpp(with, kScratchRegister);
+ if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
+ RootIndex::kLastStrongOrReadOnlyRoot)) {
+ cmp_tagged(with, kScratchRegister);
+ } else {
+ // Some smi roots contain system pointer size values like stack limits.
+ cmpp(with, kScratchRegister);
+ }
+}
+
+void TurboAssembler::LoadTaggedPointerField(Register destination,
+ Operand field_operand,
+ Register scratch_for_debug) {
+#ifdef V8_COMPRESS_POINTERS
+ DecompressTaggedPointer(destination, field_operand, scratch_for_debug);
+#else
+ movp(destination, field_operand);
+#endif
+}
+
+void TurboAssembler::LoadAnyTaggedField(Register destination,
+ Operand field_operand, Register scratch,
+ Register scratch_for_debug) {
+#ifdef V8_COMPRESS_POINTERS
+ DecompressAnyTagged(destination, field_operand, scratch, scratch_for_debug);
+#else
+ movp(destination, field_operand);
+#endif
+}
+
+void TurboAssembler::PushTaggedPointerField(Operand field_operand,
+ Register scratch,
+ Register scratch_for_debug) {
+#ifdef V8_COMPRESS_POINTERS
+ DCHECK(!AreAliased(scratch, scratch_for_debug));
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
+ DecompressTaggedPointer(scratch, field_operand, scratch_for_debug);
+ Push(scratch);
+#else
+ Push(field_operand);
+#endif
+}
+
+void TurboAssembler::PushTaggedAnyField(Operand field_operand,
+ Register scratch1, Register scratch2,
+ Register scratch_for_debug) {
+#ifdef V8_COMPRESS_POINTERS
+ DCHECK(!AreAliased(scratch1, scratch2, scratch_for_debug));
+ DCHECK(!field_operand.AddressUsesRegister(scratch1));
+ DCHECK(!field_operand.AddressUsesRegister(scratch2));
+ DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
+ DecompressAnyTagged(scratch1, field_operand, scratch2, scratch_for_debug);
+ Push(scratch1);
+#else
+ Push(field_operand);
+#endif
+}
+
+void TurboAssembler::SmiUntagField(Register dst, Operand src) {
+ SmiUntag(dst, src);
+}
+
+void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
+ Immediate value) {
+ movp(dst_field_operand, value);
+}
+
+void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
+ Register value) {
+ movp(dst_field_operand, value);
+}
+
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ Operand field_operand,
+ Register scratch_for_debug) {
+ DCHECK(!AreAliased(destination, scratch_for_debug));
+ RecordComment("[ DecompressTaggedSigned");
+ if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
+ Register expected_value = scratch_for_debug;
+ movq(expected_value, field_operand);
+ movsxlq(destination, expected_value);
+ Label check_passed;
+ cmpq(destination, expected_value);
+ j(equal, &check_passed);
+ RecordComment("DecompressTaggedSigned failed");
+ int3();
+ bind(&check_passed);
+ } else {
+ movsxlq(destination, field_operand);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ Operand field_operand,
+ Register scratch_for_debug) {
+ DCHECK(!AreAliased(destination, scratch_for_debug));
+ RecordComment("[ DecompressTaggedPointer");
+ if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
+ Register expected_value = scratch_for_debug;
+ movq(expected_value, field_operand);
+ movsxlq(destination, expected_value);
+ addq(destination, kRootRegister);
+ Label check_passed;
+ cmpq(destination, expected_value);
+ j(equal, &check_passed);
+ RecordComment("DecompressTaggedPointer failed");
+ int3();
+ bind(&check_passed);
+ } else {
+ movsxlq(destination, field_operand);
+ addq(destination, kRootRegister);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ Operand field_operand,
+ Register scratch,
+ Register scratch_for_debug) {
+ DCHECK(!AreAliased(destination, scratch, scratch_for_debug));
+ RecordComment("[ DecompressAnyTagged");
+ Register expected_value = scratch_for_debug;
+ if (DEBUG_BOOL && expected_value.is_valid()) {
+ movq(expected_value, field_operand);
+ movsxlq(destination, expected_value);
+ } else {
+ movsxlq(destination, field_operand);
+ }
+ // Branchlessly compute |masked_root|:
+ // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
+ Register masked_root = scratch;
+ movl(masked_root, destination);
+ andl(masked_root, Immediate(kSmiTagMask));
+ negq(masked_root);
+ andq(masked_root, kRootRegister);
+ // Now this add operation will either leave the value unchanged if it is a smi
+ // or add the isolate root if it is a heap object.
+ addq(destination, masked_root);
+ if (DEBUG_BOOL && expected_value.is_valid()) {
+ Label check_passed;
+ cmpq(destination, expected_value);
+ j(equal, &check_passed);
+ RecordComment("Decompression failed: Tagged");
+ int3();
+ bind(&check_passed);
+ }
+ RecordComment("]");
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -230,13 +373,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so the offset must be a multiple of kTaggedSize.
+ DCHECK(IsAligned(offset, kTaggedSize));
leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- testb(dst, Immediate(kPointerSize - 1));
+ testb(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -276,20 +419,39 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
+
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
// Prepare argument registers for calling RecordWrite
// slot_parameter <= address
@@ -310,15 +472,20 @@ void TurboAssembler::CallRecordWriteStub(
xchgq(slot_parameter, object_parameter);
}
- Smi* smi_rsa = Smi::FromEnum(remembered_set_action);
- Smi* smi_fm = Smi::FromEnum(fp_mode);
+ Smi smi_rsa = Smi::FromEnum(remembered_set_action);
+ Smi smi_fm = Smi::FromEnum(fp_mode);
Move(remembered_set_parameter, smi_rsa);
if (smi_rsa != smi_fm) {
Move(fp_mode_parameter, smi_fm);
} else {
movq(fp_mode_parameter, remembered_set_parameter);
}
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ // Use {near_call} for direct Wasm call within a module.
+ near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -339,7 +506,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (emit_debug_code()) {
Label ok;
- cmpp(value, Operand(address, 0));
+ cmp_tagged(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -401,7 +568,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
testp(rsp, Immediate(frame_alignment_mask));
@@ -449,25 +616,6 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- call(stub);
-}
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
@@ -478,8 +626,8 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Set(rax, f->nargs);
LoadAddress(rbx, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, rax, rbx));
- addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ DCHECK(centry == rcx);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -542,7 +690,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -564,7 +712,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
pushq(reg);
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -599,7 +747,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
popq(reg);
- bytes += kPointerSize;
+ bytes += kSystemPointerSize;
}
}
@@ -978,7 +1126,7 @@ void TurboAssembler::Set(Register dst, int64_t x) {
}
void TurboAssembler::Set(Operand dst, intptr_t x) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
if (is_int32(x)) {
movp(dst, Immediate(static_cast<int32_t>(x)));
} else {
@@ -994,7 +1142,7 @@ void TurboAssembler::Set(Operand dst, intptr_t x) {
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
-Register TurboAssembler::GetSmiConstant(Smi* source) {
+Register TurboAssembler::GetSmiConstant(Smi source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
@@ -1005,13 +1153,13 @@ Register TurboAssembler::GetSmiConstant(Smi* source) {
return kScratchRegister;
}
-void TurboAssembler::Move(Register dst, Smi* source) {
+void TurboAssembler::Move(Register dst, Smi source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
xorl(dst, dst);
} else {
- Move(dst, reinterpret_cast<Address>(source), RelocInfo::NONE);
+ Move(dst, source.ptr(), RelocInfo::NONE);
}
}
@@ -1061,36 +1209,34 @@ void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
cmpp(smi1, smi2);
}
-
-void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+void MacroAssembler::SmiCompare(Register dst, Smi src) {
AssertSmi(dst);
Cmp(dst, src);
}
-
-void MacroAssembler::Cmp(Register dst, Smi* src) {
+void MacroAssembler::Cmp(Register dst, Smi src) {
DCHECK_NE(dst, kScratchRegister);
if (src->value() == 0) {
- testp(dst, dst);
+ test_tagged(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpp(dst, constant_reg);
+ cmp_tagged(dst, constant_reg);
}
}
void MacroAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
- cmpp(dst, src);
+ cmp_tagged(dst, src);
}
void MacroAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpp(dst, src);
+ cmp_tagged(dst, src);
}
-void MacroAssembler::SmiCompare(Operand dst, Smi* src) {
+void MacroAssembler::SmiCompare(Operand dst, Smi src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
@@ -1100,11 +1246,11 @@ void MacroAssembler::SmiCompare(Operand dst, Smi* src) {
}
}
-void MacroAssembler::Cmp(Operand dst, Smi* src) {
+void MacroAssembler::Cmp(Operand dst, Smi src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
- cmpp(dst, smi_reg);
+ cmp_tagged(dst, smi_reg);
}
@@ -1139,14 +1285,14 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
+void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
if (constant->value() != 0) {
if (SmiValuesAre32Bits()) {
addl(Operand(dst, kSmiShift / kBitsPerByte),
Immediate(constant->value()));
} else {
DCHECK(SmiValuesAre31Bits());
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
// Sign-extend value after addition
movl(kScratchRegister, dst);
addl(kScratchRegister, Immediate(constant));
@@ -1179,7 +1325,7 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
} else {
DCHECK(SmiValuesAre31Bits());
if (dst != src) {
- movp(dst, src);
+ mov_tagged(dst, src);
}
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
@@ -1196,15 +1342,15 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
}
}
-void TurboAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
+void TurboAssembler::Push(Smi source) {
+ intptr_t smi = static_cast<intptr_t>(source.ptr());
if (is_int32(smi)) {
Push(Immediate(static_cast<int32_t>(smi)));
return;
}
int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
- if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
+ if (first_byte_set == last_byte_set && kSystemPointerSize == kInt64Size) {
// This sequence has only 7 bytes, compared to the 12 bytes below.
Push(Immediate(0));
movb(Operand(rsp, first_byte_set),
@@ -1279,23 +1425,23 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
// ----------------------------------------------------------------------------
void MacroAssembler::Absps(XMMRegister dst) {
- Andps(dst,
- ExternalOperand(ExternalReference::address_of_float_abs_constant()));
+ Andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_abs_constant()));
}
void MacroAssembler::Negps(XMMRegister dst) {
- Xorps(dst,
- ExternalOperand(ExternalReference::address_of_float_neg_constant()));
+ Xorps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_neg_constant()));
}
void MacroAssembler::Abspd(XMMRegister dst) {
- Andps(dst,
- ExternalOperand(ExternalReference::address_of_double_abs_constant()));
+ Andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_abs_constant()));
}
void MacroAssembler::Negpd(XMMRegister dst) {
- Xorps(dst,
- ExternalOperand(ExternalReference::address_of_double_neg_constant()));
+ Xorps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_neg_constant()));
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
@@ -1304,7 +1450,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, Handle<HeapObject>::cast(source));
- cmpp(dst, kScratchRegister);
+ cmp_tagged(dst, kScratchRegister);
}
}
@@ -1314,7 +1460,7 @@ void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, Handle<HeapObject>::cast(source));
- cmpp(dst, kScratchRegister);
+ cmp_tagged(dst, kScratchRegister);
}
}
@@ -1348,7 +1494,7 @@ void TurboAssembler::MoveStringConstant(Register result,
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addp(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kSystemPointerSize));
}
}
@@ -1356,7 +1502,7 @@ void MacroAssembler::Drop(int stack_elements) {
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
DCHECK_GT(stack_elements, 0);
- if (kPointerSize == kInt64Size && stack_elements == 1) {
+ if (kSystemPointerSize == kInt64Size && stack_elements == 1) {
popq(MemOperand(rsp, 0));
return;
}
@@ -1367,7 +1513,7 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
}
void TurboAssembler::Push(Register src) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
// x32 uses 64-bit push for rbp in the prologue.
@@ -1378,7 +1524,7 @@ void TurboAssembler::Push(Register src) {
}
void TurboAssembler::Push(Operand src) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
movp(kScratchRegister, src);
@@ -1388,7 +1534,7 @@ void TurboAssembler::Push(Operand src) {
}
void MacroAssembler::PushQuad(Operand src) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
movp(kScratchRegister, src);
@@ -1397,7 +1543,7 @@ void MacroAssembler::PushQuad(Operand src) {
}
void TurboAssembler::Push(Immediate value) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
pushq(value);
} else {
leal(rsp, Operand(rsp, -4));
@@ -1407,7 +1553,7 @@ void TurboAssembler::Push(Immediate value) {
void MacroAssembler::PushImm32(int32_t imm32) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
pushq_imm32(imm32);
} else {
leal(rsp, Operand(rsp, -4));
@@ -1417,7 +1563,7 @@ void MacroAssembler::PushImm32(int32_t imm32) {
void MacroAssembler::Pop(Register dst) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
// x32 uses 64-bit pop for rbp in the epilogue.
@@ -1428,7 +1574,7 @@ void MacroAssembler::Pop(Register dst) {
}
void MacroAssembler::Pop(Operand dst) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
Register scratch = dst.AddressUsesRegister(kScratchRegister)
@@ -1444,7 +1590,7 @@ void MacroAssembler::Pop(Operand dst) {
}
void MacroAssembler::PopQuad(Operand dst) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
popq(kScratchRegister);
@@ -1458,7 +1604,7 @@ void TurboAssembler::Jump(ExternalReference ext) {
}
void TurboAssembler::Jump(Operand op) {
- if (kPointerSize == kInt64Size) {
+ if (kSystemPointerSize == kInt64Size) {
jmp(op);
} else {
movp(kScratchRegister, op);
@@ -1473,29 +1619,17 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc) {
-// TODO(X64): Inline this
-if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference code_object above since code generation
- // for builtins and code stubs happens on the main thread.
- Label skip;
- if (cc != always) {
- if (cc == never) return;
- j(NegateCondition(cc), &skip, Label::kNear);
- }
- IndirectLoadConstant(kScratchRegister, code_object);
- leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- jmp(kScratchRegister);
- bind(&skip);
- return;
- } else if (options().inline_offheap_trampolines) {
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code_object));
+ if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
+ Label skip;
+ if (cc != always) {
+ if (cc == never) return;
+ j(NegateCondition(cc), &skip, Label::kNear);
+ }
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -1503,11 +1637,11 @@ if (FLAG_embedded_builtins) {
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kScratchRegister);
+ bind(&skip);
return;
}
}
-}
-j(cc, code_object, rmode);
+ j(cc, code_object, rmode);
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
@@ -1521,7 +1655,7 @@ void TurboAssembler::Call(ExternalReference ext) {
}
void TurboAssembler::Call(Operand op) {
- if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
+ if (kSystemPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
movp(kScratchRegister, op);
@@ -1535,37 +1669,103 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference code_object above since code generation
- // for builtins and code stubs happens on the main thread.
- IndirectLoadConstant(kScratchRegister, code_object);
- leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code_object));
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
- call(kScratchRegister);
- return;
- }
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below (we use times_4 instead
+ // of times_8 since smis are already shifted by one).
+ Call(Operand(kRootRegister, builtin_pointer, times_4,
+ IsolateData::builtin_entry_table_offset()));
+#else // V8_COMPRESS_POINTERS
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ SmiUntag(builtin_pointer, builtin_pointer);
+ Call(Operand(kRootRegister, builtin_pointer, times_8,
+ IsolateData::builtin_entry_table_offset()));
+#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+ cmpl(FieldOperand(code_object, Code::kBuiltinIndexOffset),
+ Immediate(Builtins::kNoBuiltinId));
+ j(not_equal, &if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ Move(destination, code_object);
+ addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_builtin);
+ movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
+ movp(destination, Operand(kRootRegister, destination, times_pointer_size,
+ IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Move(destination, code_object);
+ addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ jmp(code_object);
+}
+
void TurboAssembler::RetpolineCall(Register reg) {
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
@@ -1636,7 +1836,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
@@ -1816,16 +2015,16 @@ void MacroAssembler::Pushad() {
Push(r15);
STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
+ kSystemPointerSize;
leap(rsp, Operand(rsp, -sp_delta));
}
void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
+ kSystemPointerSize;
leap(rsp, Operand(rsp, sp_delta));
Pop(r15);
Pop(r14);
@@ -1866,7 +2065,7 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Push(Immediate(0)); // Padding.
@@ -1874,10 +2073,10 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- Push(ExternalOperand(handler_address));
+ Push(ExternalReferenceAsOperand(handler_address));
// Set this new handler as the current one.
- movp(ExternalOperand(handler_address), rsp);
+ movp(ExternalReferenceAsOperand(handler_address), rsp);
}
@@ -1885,8 +2084,8 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- Pop(ExternalOperand(handler_address));
- addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalReferenceAsOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void TurboAssembler::Ret() { ret(0); }
@@ -1905,7 +2104,8 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(map,
+ FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
@@ -1961,7 +2161,8 @@ void MacroAssembler::AssertConstructor(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(object,
+ FieldOperand(object, HeapObject::kMapOffset));
testb(FieldOperand(object, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
Pop(object);
@@ -2000,13 +2201,17 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Load map
Register map = object;
Push(object);
- movp(map, FieldOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(map, FieldOperand(object, HeapObject::kMapOffset));
Label do_check;
// Check if JSGeneratorObject
CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
j(equal, &do_check);
+ // Check if JSAsyncFunctionObject
+ CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
+ j(equal, &do_check);
+
// Check if JSAsyncGeneratorObject
CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -2029,17 +2234,17 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
- cmpp(in_out, Immediate(kClearedWeakHeapObject));
+ cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
j(equal, target_if_cleared);
- andp(in_out, Immediate(~kWeakHeapObjectMask));
+ andp(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand =
- ExternalOperand(ExternalReference::Create(counter));
+ ExternalReferenceAsOperand(ExternalReference::Create(counter));
if (value == 1) {
incl(counter_operand);
} else {
@@ -2053,7 +2258,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand =
- ExternalOperand(ExternalReference::Create(counter));
+ ExternalReferenceAsOperand(ExternalReference::Create(counter));
if (value == 1) {
decl(counter_operand);
} else {
@@ -2096,9 +2301,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
StandardFrameConstants::kCallerPCOffset));
} else {
- leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset -
- callee_args_count.immediate() * kPointerSize));
+ leap(new_sp_reg,
+ Operand(rbp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ callee_args_count.immediate() * kSystemPointerSize));
}
if (FLAG_debug_code) {
@@ -2145,7 +2351,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
- movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(
+ rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2158,7 +2365,8 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
DCHECK(function == rdi);
- movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ LoadTaggedPointerField(rsi,
+ FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected, actual, flag);
}
@@ -2188,13 +2396,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
- addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ LoadTaggedPointerField(rcx,
+ FieldOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
- call(rcx);
+ CallCodeObject(rcx);
} else {
DCHECK(flag == JUMP_FUNCTION);
- jmp(rcx);
+ JumpCodeObject(rcx);
}
bind(&done);
}
@@ -2268,7 +2476,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
- Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
+ Operand debug_hook_active_operand =
+ ExternalReferenceAsOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
j(equal, &skip_hook);
@@ -2336,23 +2545,6 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(rbp);
- Move(rbp, rsp);
- Push(context);
- Push(target);
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(target);
- Pop(context);
- leave();
-}
-
void MacroAssembler::EnterExitFramePrologue(bool save_rax,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -2363,13 +2555,13 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
Push(Immediate(StackFrame::TypeToMarker(frame_type)));
- DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
+ DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister); // Accessed from ExitFrame::code_slot.
@@ -2430,7 +2622,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
@@ -2459,11 +2651,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
if (pop_arguments) {
// Get the return address from the stack and restore the frame pointer.
movp(rcx, Operand(rbp, kFPOnStackSize));
- movp(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rbp, Operand(rbp, 0 * kSystemPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- leap(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kSystemPointerSize));
PushReturnAddressFrom(rcx);
} else {
@@ -2485,7 +2677,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- Operand context_operand = ExternalOperand(context_address);
+ Operand context_operand = ExternalReferenceAsOperand(context_address);
movp(rsi, context_operand);
#ifdef DEBUG
movp(context_operand, Immediate(Context::kInvalidContext));
@@ -2494,7 +2686,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
+ Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
movp(c_entry_fp_operand, Immediate(0));
}
@@ -2507,8 +2699,8 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
- movp(dst, NativeContextOperand());
- movp(dst, ContextOperand(dst, index));
+ LoadTaggedPointerField(dst, NativeContextOperand());
+ LoadTaggedPointerField(dst, ContextOperand(dst, index));
}
@@ -2559,7 +2751,30 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CheckStackAlignment();
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Label get_pc;
+ DCHECK(!AreAliased(kScratchRegister, function));
+ leaq(kScratchRegister, Operand(&get_pc, 0));
+ bind(&get_pc);
+ movp(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_pc_address(isolate())),
+ kScratchRegister);
+ movp(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate())),
+ rbp);
+ }
+
call(function);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ movp(ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate())),
+ Immediate(0));
+ }
+
DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
DCHECK_GE(num_arguments, 0);
int argument_slots_on_stack =
@@ -2599,6 +2814,13 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+ // Save the deopt id in r13 (we don't need the roots array from now on).
+ movp(r13, Immediate(deopt_id));
+ call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 25c488ad35..cfd040a5c3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -2,51 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "src/bailout-reason.h"
#include "src/base/flags.h"
+#include "src/contexts.h"
#include "src/globals.h"
-#include "src/turbo-assembler.h"
#include "src/x64/assembler-x64.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = rax;
-constexpr Register kReturnRegister1 = rdx;
-constexpr Register kReturnRegister2 = r8;
-constexpr Register kJSFunctionRegister = rdi;
-constexpr Register kContextRegister = rsi;
-constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r12;
-constexpr Register kInterpreterAccumulatorRegister = rax;
-constexpr Register kInterpreterBytecodeOffsetRegister = r9;
-constexpr Register kInterpreterBytecodeArrayRegister = r14;
-constexpr Register kInterpreterDispatchTableRegister = r15;
-
-constexpr Register kJavaScriptCallArgCountRegister = rax;
-constexpr Register kJavaScriptCallCodeStartRegister = rcx;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = rdx;
-constexpr Register kJavaScriptCallExtraArg1Register = rbx;
-
-constexpr Register kRuntimeCallFunctionRegister = rbx;
-constexpr Register kRuntimeCallArgCountRegister = rax;
-constexpr Register kRuntimeCallArgvRegister = r15;
-constexpr Register kWasmInstanceRegister = rsi;
-
-// Default scratch register used by MacroAssembler (and other code that needs
-// a spare register). The register isn't callee save, and not used by the
-// function calling convention.
-constexpr Register kScratchRegister = r10;
-constexpr XMMRegister kScratchDoubleReg = xmm15;
-constexpr Register kRootRegister = r13; // callee save
-
-constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
-
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -116,14 +87,9 @@ class StackArgumentsAccessor {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
template <typename Dst, typename... Args>
struct AvxHelper {
@@ -229,7 +195,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src);
void Push(Operand src);
void Push(Immediate value);
- void Push(Smi* smi);
+ void Push(Smi smi);
void Push(Handle<HeapObject> source);
// Before calling a C-function from generated code, align arguments on stack.
@@ -325,9 +291,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
j(less, dest);
}
- void Move(Register dst, Smi* source);
+ void Move(Register dst, Smi source);
- void Move(Operand dst, Smi* source) {
+ void Move(Operand dst, Smi source) {
Register constant = GetSmiConstant(source);
movp(dst, constant);
}
@@ -377,8 +343,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// isn't changed.
// If the operand is used more than once, use a scratch register
// that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
+ Operand ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch = kScratchRegister);
void Call(Register reg) { call(reg); }
void Call(Operand op);
@@ -387,6 +353,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
+ void CallBuiltinPointer(Register builtin_pointer) override;
+
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
@@ -398,11 +370,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- call(target, rmode);
- }
+ void CallForDeoptimization(Address target, int deopt_id);
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
@@ -453,22 +421,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register caller_args_count_reg, Register scratch0,
Register scratch1);
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // Call a code stub. This expects {stub} to be zone-allocated, as it does not
- // trigger generation of the stub's code object but instead files a
- // HeapObjectRequest that will be fulfilled after code assembly.
- void CallStubDelayed(CodeStub* stub);
-
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- Move(kRootRegister, roots_array_start);
- addp(kRootRegister, Immediate(kRootRegisterBias));
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ Move(kRootRegister, isolate_root);
}
void SaveRegisters(RegList registers);
@@ -477,6 +436,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
void MoveNumber(Register dst, double value);
void MoveNonSmi(Register dst, double value);
@@ -510,37 +472,78 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // ---------------------------------------------------------------------------
+ // Pointer compression support
+
+ // TODO(ishell): remove |scratch_for_debug| once pointer compression works.
+
+ // Loads a field containing a HeapObject and decompresses it if pointer
+ // compression is enabled.
+ void LoadTaggedPointerField(Register destination, Operand field_operand,
+ Register scratch_for_debug = no_reg);
+
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ // When pointer compression is enabled, uses |scratch| to decompress the
+ // value.
+ void LoadAnyTaggedField(Register destination, Operand field_operand,
+ Register scratch,
+ Register scratch_for_debug = no_reg);
+
+ // Loads a field containing a HeapObject, decompresses it if necessary and
+ // pushes full pointer to the stack. When pointer compression is enabled,
+ // uses |scratch| to decompress the value.
+ void PushTaggedPointerField(Operand field_operand, Register scratch,
+ Register scratch_for_debug = no_reg);
+
+ // Loads a field containing any tagged value, decompresses it if necessary and
+ // pushes the full pointer to the stack. When pointer compression is enabled,
+ // uses |scratch1| and |scratch2| to decompress the value.
+ void PushTaggedAnyField(Operand field_operand, Register scratch1,
+ Register scratch2,
+ Register scratch_for_debug = no_reg);
+
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, Operand src);
+
+ // Compresses and stores tagged value to given on-heap location.
+ // TODO(ishell): drop once mov_tagged() can be used.
+ void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
+ void StoreTaggedField(Operand dst_field_operand, Register value);
+
+ void DecompressTaggedSigned(Register destination, Operand field_operand,
+ Register scratch_for_debug);
+ void DecompressTaggedPointer(Register destination, Operand field_operand,
+ Register scratch_for_debug);
+ void DecompressAnyTagged(Register destination, Operand field_operand,
+ Register scratch, Register scratch_for_debug);
+
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int smi_count = 0;
int heap_object_count = 0;
- int64_t RootRegisterDelta(ExternalReference other);
-
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi* value);
+ Register GetSmiConstant(Smi value);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of
// load_rax/store_rax if possible/necessary.
// For other operations, just use:
- // Operand operand = ExternalOperand(extref);
+ // Operand operand = ExternalReferenceAsOperand(extref);
// operation(operand, ..);
void Load(Register destination, ExternalReference source);
void Store(ExternalReference destination, Register source);
@@ -578,9 +581,8 @@ class MacroAssembler : public TurboAssembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
-
-// ---------------------------------------------------------------------------
-// GC Support
+ // ---------------------------------------------------------------------------
+ // GC Support
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -611,13 +613,14 @@ class MacroAssembler : public TurboAssembler {
// sets up the number of arguments in register rdi and the pointer
// to the first argument in register rsi.
//
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
+ // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the
+ // stack accessible via StackSpaceOperand.
void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
StackFrame::Type frame_type = StackFrame::EXIT);
- // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
- // memory (not GCed) on the stack accessible via StackSpaceOperand.
+ // Enter specific kind of exit frame. Allocates
+ // (arg_stack_space * kSystemPointerSize) memory (not GCed) on the stack
+ // accessible via StackSpaceOperand.
void EnterApiExitFrame(int arg_stack_space);
// Leave the current exit frame. Expects/provides the return value in
@@ -664,10 +667,10 @@ class MacroAssembler : public TurboAssembler {
// Simple comparison of smis. Both sides must be known smis to use these,
// otherwise use Cmp.
void SmiCompare(Register smi1, Register smi2);
- void SmiCompare(Register dst, Smi* src);
+ void SmiCompare(Register dst, Smi src);
void SmiCompare(Register dst, Operand src);
void SmiCompare(Operand dst, Register src);
- void SmiCompare(Operand dst, Smi* src);
+ void SmiCompare(Operand dst, Smi src);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
@@ -691,15 +694,15 @@ class MacroAssembler : public TurboAssembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
- void SmiAddConstant(Operand dst, Smi* constant);
+ void SmiAddConstant(Operand dst, Smi constant);
// Specialized operations
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
// The src register contains a *positive* smi value. The shift is the
- // power of two to multiply the index value by (e.g.
- // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
+ // power of two to multiply the index value by (e.g. to index by
+ // smi-value * kSystemPointerSize, pass the smi and kSystemPointerSizeLog2).
// The returned index register may be either src or dst, depending
// on what is most efficient. If src and dst are different registers,
// src is always unchanged.
@@ -714,8 +717,8 @@ class MacroAssembler : public TurboAssembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source);
- void Cmp(Register dst, Smi* src);
- void Cmp(Operand dst, Smi* src);
+ void Cmp(Register dst, Smi src);
+ void Cmp(Operand dst, Smi src);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
@@ -820,14 +823,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub.
- // The code object is generated immediately, in contrast to
- // TurboAssembler::CallStubDelayed.
- void CallStub(CodeStub* stub);
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f,
int num_arguments,
@@ -869,9 +864,6 @@ class MacroAssembler : public TurboAssembler {
return SafepointRegisterStackIndex(reg.code());
}
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
@@ -886,8 +878,8 @@ class MacroAssembler : public TurboAssembler {
void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
+ // Allocates arg_stack_space * kSystemPointerSize memory (not GCed) on the
+ // stack accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
void LeaveExitFrameEpilogue();
@@ -907,6 +899,8 @@ class MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
// -----------------------------------------------------------------------------
@@ -946,9 +940,9 @@ inline Operand NativeContextOperand() {
inline Operand StackSpaceOperand(int index) {
#ifdef _WIN64
const int kShaddowSpace = 4;
- return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
+ return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize);
#else
- return Operand(rsp, index * kPointerSize);
+ return Operand(rsp, index * kSystemPointerSize);
#endif
}
diff --git a/deps/v8/src/x64/register-x64.h b/deps/v8/src/x64/register-x64.h
new file mode 100644
index 0000000000..cd7614744d
--- /dev/null
+++ b/deps/v8/src/x64/register-x64.h
@@ -0,0 +1,224 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X64_REGISTER_X64_H_
+#define V8_X64_REGISTER_X64_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+#define GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rcx) \
+ V(rdx) \
+ V(rbx) \
+ V(rsp) \
+ V(rbp) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r10) \
+ V(r11) \
+ V(r12) \
+ V(r13) \
+ V(r14) \
+ V(r15)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rbx) \
+ V(rdx) \
+ V(rcx) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r11) \
+ V(r12) \
+ V(r14) \
+ V(r15)
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ bool is_byte_register() const { return reg_code_ <= 3; }
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const { return reg_code_ >> 3; }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const { return reg_code_ & 0x7; }
+
+ private:
+ friend class RegisterBase<Register, kRegAfterLast>;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+constexpr int kNumRegs = 16;
+
+constexpr RegList kJSCallerSaved =
+ Register::ListOf<rax, rcx, rdx,
+ rbx, // used as a caller-saved register in JavaScript code
+ rdi // callee function
+ >();
+
+constexpr int kNumJSCallerSaved = 5;
+
+// Number of registers for which space is reserved in safepoints.
+constexpr int kNumSafepointRegisters = 16;
+
+#ifdef _WIN64
+// Windows calling convention
+constexpr Register arg_reg_1 = rcx;
+constexpr Register arg_reg_2 = rdx;
+constexpr Register arg_reg_3 = r8;
+constexpr Register arg_reg_4 = r9;
+#else
+// AMD64 calling convention
+constexpr Register arg_reg_1 = rdi;
+constexpr Register arg_reg_2 = rsi;
+constexpr Register arg_reg_3 = rdx;
+constexpr Register arg_reg_4 = rcx;
+#endif // _WIN64
+
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14) \
+ V(xmm15)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14)
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
+ public:
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const { return reg_code_ >> 3; }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const { return reg_code_ & 0x7; }
+
+ private:
+ friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
+ explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
+static_assert(sizeof(XMMRegister) == sizeof(int),
+ "XMMRegister can efficiently be passed by value");
+
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
+
+#define DECLARE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = rax;
+constexpr Register kReturnRegister1 = rdx;
+constexpr Register kReturnRegister2 = r8;
+constexpr Register kJSFunctionRegister = rdi;
+constexpr Register kContextRegister = rsi;
+constexpr Register kAllocateSizeRegister = rdx;
+constexpr Register kSpeculationPoisonRegister = r12;
+constexpr Register kInterpreterAccumulatorRegister = rax;
+constexpr Register kInterpreterBytecodeOffsetRegister = r9;
+constexpr Register kInterpreterBytecodeArrayRegister = r14;
+constexpr Register kInterpreterDispatchTableRegister = r15;
+
+constexpr Register kJavaScriptCallArgCountRegister = rax;
+constexpr Register kJavaScriptCallCodeStartRegister = rcx;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = rdx;
+constexpr Register kJavaScriptCallExtraArg1Register = rbx;
+
+constexpr Register kRuntimeCallFunctionRegister = rbx;
+constexpr Register kRuntimeCallArgCountRegister = rax;
+constexpr Register kRuntimeCallArgvRegister = r15;
+constexpr Register kWasmInstanceRegister = rsi;
+
+// Default scratch register used by MacroAssembler (and other code that needs
+// a spare register). The register isn't callee save, and not used by the
+// function calling convention.
+constexpr Register kScratchRegister = r10;
+constexpr XMMRegister kScratchDoubleReg = xmm15;
+constexpr Register kRootRegister = r13; // callee save
+
+constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_X64_REGISTER_X64_H_
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
deleted file mode 100644
index 4797ae91bb..0000000000
--- a/deps/v8/src/x64/simulator-x64.cc
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/x64/simulator-x64.h"
-
-// Since there is no simulator for the x64 architecture this file is empty.
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
deleted file mode 100644
index ce9f3592dc..0000000000
--- a/deps/v8/src/x64/simulator-x64.h
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X64_SIMULATOR_X64_H_
-#define V8_X64_SIMULATOR_X64_H_
-
-// Since there is no simulator for the x64 architecture this file is empty.
-
-#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 8ef141b4c1..37ebcf0dd4 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -43,7 +43,7 @@ void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
(size_t(1) << kMinSegmentSizePower);
size_t fits_fully = max_pool_size / full_size;
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
// We assume few zones (less than 'fits_fully' many) to be active at the same
// time. When zones grow regularly, they will keep requesting segments of
@@ -138,7 +138,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
Segment* segment;
{
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
@@ -173,7 +173,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
power -= kMinSegmentSizePower;
{
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
return false;
@@ -189,7 +189,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
}
void AccountingAllocator::ClearPool() {
- base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+ base::MutexGuard lock_guard(&unused_segments_mutex_);
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index b3ce2473b6..56d8ea09ef 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -30,11 +30,9 @@ class ZoneAllocator {
// MSVS unfortunately requires the default constructor to be defined.
ZoneAllocator() : ZoneAllocator(nullptr) { UNREACHABLE(); }
#endif
- explicit ZoneAllocator(Zone* zone) throw() : zone_(zone) {}
- explicit ZoneAllocator(const ZoneAllocator& other) throw()
- : ZoneAllocator<T>(other.zone_) {}
+ explicit ZoneAllocator(Zone* zone) : zone_(zone) {}
template <typename U>
- ZoneAllocator(const ZoneAllocator<U>& other) throw()
+ ZoneAllocator(const ZoneAllocator<U>& other) V8_NOEXCEPT
: ZoneAllocator<T>(other.zone_) {}
template <typename U>
friend class ZoneAllocator;
@@ -48,7 +46,7 @@ class ZoneAllocator {
void deallocate(T* p, size_t) { /* noop for Zones */
}
- size_t max_size() const throw() {
+ size_t max_size() const {
return std::numeric_limits<int>::max() / sizeof(T);
}
template <typename U, typename... Args>
@@ -93,13 +91,12 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
UNREACHABLE();
}
#endif
- explicit RecyclingZoneAllocator(Zone* zone) throw()
+ explicit RecyclingZoneAllocator(Zone* zone)
: ZoneAllocator<T>(zone), free_list_(nullptr) {}
- explicit RecyclingZoneAllocator(const RecyclingZoneAllocator& other) throw()
- : ZoneAllocator<T>(other), free_list_(nullptr) {}
template <typename U>
- RecyclingZoneAllocator(const RecyclingZoneAllocator<U>& other) throw()
- : ZoneAllocator<T>(other), free_list_(nullptr) {}
+ RecyclingZoneAllocator(const RecyclingZoneAllocator<U>& other) V8_NOEXCEPT
+ : ZoneAllocator<T>(other),
+ free_list_(nullptr) {}
template <typename U>
friend class RecyclingZoneAllocator;
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index 049e8f52a9..fe32e48c0b 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -6,7 +6,7 @@
#include "src/base/iterator.h"
#include "src/globals.h"
-#include "src/utils.h"
+#include "src/memcopy.h"
#include "src/zone/zone.h"
#ifndef V8_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index c5297902d8..ccf7411268 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -17,8 +17,8 @@ class ZoneHandleSet final {
public:
ZoneHandleSet() : data_(kEmptyTag) {}
explicit ZoneHandleSet(Handle<T> handle)
- : data_(bit_cast<intptr_t>(handle.address()) | kSingletonTag) {
- DCHECK(IsAligned(bit_cast<intptr_t>(handle.address()), kPointerAlignment));
+ : data_(handle.address() | kSingletonTag) {
+ DCHECK(IsAligned(handle.address(), kPointerAlignment));
}
bool is_empty() const { return data_ == kEmptyTag; }
@@ -41,10 +41,10 @@ class ZoneHandleSet final {
Handle<T> operator[](size_t i) const { return at(i); }
void insert(Handle<T> handle, Zone* zone) {
- T** const value = bit_cast<T**>(handle.address());
- DCHECK(IsAligned(bit_cast<intptr_t>(value), kPointerAlignment));
+ Address* const value = reinterpret_cast<Address*>(handle.address());
+ DCHECK(IsAligned(reinterpret_cast<Address>(value), kPointerAlignment));
if ((data_ & kTagMask) == kEmptyTag) {
- data_ = bit_cast<intptr_t>(value) | kSingletonTag;
+ data_ = reinterpret_cast<Address>(value) | kSingletonTag;
} else if ((data_ & kTagMask) == kSingletonTag) {
if (singleton() == value) return;
List* list = new (zone->New(sizeof(List))) List(zone);
@@ -55,8 +55,8 @@ class ZoneHandleSet final {
list->push_back(value);
list->push_back(singleton());
}
- DCHECK(IsAligned(bit_cast<intptr_t>(list), kPointerAlignment));
- data_ = bit_cast<intptr_t>(list) | kListTag;
+ DCHECK(IsAligned(reinterpret_cast<Address>(list), kPointerAlignment));
+ data_ = reinterpret_cast<Address>(list) | kListTag;
} else {
DCHECK_EQ(kListTag, data_ & kTagMask);
List const* const old_list = list();
@@ -76,8 +76,8 @@ class ZoneHandleSet final {
new_list->push_back(old_list->at(i));
}
DCHECK_EQ(old_list->size() + 1, new_list->size());
- DCHECK(IsAligned(bit_cast<intptr_t>(new_list), kPointerAlignment));
- data_ = bit_cast<intptr_t>(new_list) | kListTag;
+ DCHECK(IsAligned(reinterpret_cast<Address>(new_list), kPointerAlignment));
+ data_ = reinterpret_cast<Address>(new_list) | kListTag;
}
}
@@ -105,12 +105,13 @@ class ZoneHandleSet final {
bool contains(Handle<T> other) const {
if (data_ == kEmptyTag) return false;
+ Address* other_address = reinterpret_cast<Address*>(other.address());
if ((data_ & kTagMask) == kSingletonTag) {
- return singleton() == bit_cast<T**>(other.address());
+ return singleton() == other_address;
}
DCHECK_EQ(kListTag, data_ & kTagMask);
- return std::find(list()->begin(), list()->end(),
- bit_cast<T**>(other.address())) != list()->end();
+ return std::find(list()->begin(), list()->end(), other_address) !=
+ list()->end();
}
void remove(Handle<T> handle, Zone* zone) {
@@ -156,19 +157,19 @@ class ZoneHandleSet final {
inline const_iterator end() const;
private:
- typedef ZoneVector<T**> List;
+ typedef ZoneVector<Address*> List;
List const* list() const {
DCHECK_EQ(kListTag, data_ & kTagMask);
- return bit_cast<List const*>(data_ - kListTag);
+ return reinterpret_cast<List const*>(data_ - kListTag);
}
- T** singleton() const {
+ Address* singleton() const {
DCHECK_EQ(kSingletonTag, data_ & kTagMask);
- return bit_cast<T**>(data_ - kSingletonTag);
+ return reinterpret_cast<Address*>(data_ - kSingletonTag);
}
- enum Tag : intptr_t {
+ enum Tag : Address {
kSingletonTag = 0,
kEmptyTag = 1,
kListTag = 2,
@@ -177,7 +178,7 @@ class ZoneHandleSet final {
STATIC_ASSERT(kTagMask < kPointerAlignment);
- intptr_t data_;
+ Address data_;
};
template <typename T>
diff --git a/deps/v8/src/zone/zone-list-inl.h b/deps/v8/src/zone/zone-list-inl.h
index d90c9a28fe..0eebdcc212 100644
--- a/deps/v8/src/zone/zone-list-inl.h
+++ b/deps/v8/src/zone/zone-list-inl.h
@@ -9,7 +9,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/utils.h"
+#include "src/memcopy.h"
namespace v8 {
namespace internal {
@@ -129,14 +129,6 @@ void ZoneList<T>::Iterate(Visitor* visitor) {
}
template <typename T>
-bool ZoneList<T>::Contains(const T& elm) const {
- for (int i = 0; i < length_; i++) {
- if (data_[i] == elm) return true;
- }
- return false;
-}
-
-template <typename T>
template <typename CompareFunction>
void ZoneList<T>::Sort(CompareFunction cmp) {
ToVector().Sort(cmp, 0, length_);
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index a851f6797a..2b836f3778 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -48,7 +48,7 @@ Zone::~Zone() {
DCHECK_EQ(segment_bytes_allocated_, 0);
}
-void* Zone::New(size_t size) {
+void* Zone::AsanNew(size_t size) {
CHECK(!sealed_);
// Round up the requested size to fit the alignment.
@@ -58,7 +58,7 @@ void* Zone::New(size_t size) {
Address result = position_;
const size_t size_with_redzone = size + kASanRedzoneBytes;
- DCHECK(limit_ >= position_);
+ DCHECK_LE(position_, limit_);
if (size_with_redzone > limit_ - position_) {
result = NewExpand(size_with_redzone);
} else {
@@ -71,8 +71,7 @@ void* Zone::New(size_t size) {
kASanRedzoneBytes);
// Check that the result has the proper alignment and return it.
- DCHECK(IsAddressAligned(result, kAlignmentInBytes, 0));
- allocation_size_ += size;
+ DCHECK(IsAligned(result, kAlignmentInBytes));
return reinterpret_cast<void*>(result);
}
@@ -122,6 +121,8 @@ Address Zone::NewExpand(size_t size) {
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
DCHECK(limit_ - position_ < size);
+ // Commit the allocation_size_ of segment_head_ if any.
+ allocation_size_ = allocation_size();
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index 5fcc25b350..8fb7d1fc74 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -47,7 +47,21 @@ class V8_EXPORT_PRIVATE Zone final {
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
- void* New(size_t size);
+ void* New(size_t size) {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ return AsanNew(size);
+#else
+ size = RoundUp(size, kAlignmentInBytes);
+ Address result = position_;
+ if (V8_UNLIKELY(size > limit_ - position_)) {
+ result = NewExpand(size);
+ } else {
+ position_ += size;
+ }
+ return reinterpret_cast<void*>(result);
+#endif
+ }
+ void* AsanNew(size_t size);
template <typename T>
T* NewArray(size_t length) {
@@ -70,7 +84,10 @@ class V8_EXPORT_PRIVATE Zone final {
const char* name() const { return name_; }
- size_t allocation_size() const { return allocation_size_; }
+ size_t allocation_size() const {
+ size_t extra = segment_head_ ? position_ - segment_head_->start() : 0;
+ return allocation_size_ + extra;
+ }
AccountingAllocator* allocator() const { return allocator_; }
@@ -208,6 +225,9 @@ class ZoneList final {
V8_INLINE int capacity() const { return capacity_; }
Vector<T> ToVector() const { return Vector<T>(data_, length_); }
+ Vector<T> ToVector(int start, int length) const {
+ return Vector<T>(data_ + start, Min(length_ - start, length));
+ }
Vector<const T> ToConstVector() const {
return Vector<const T>(data_, length_);
@@ -257,7 +277,12 @@ class ZoneList final {
// Drops all but the first 'pos' elements from the list.
V8_INLINE void Rewind(int pos);
- inline bool Contains(const T& elm) const;
+ inline bool Contains(const T& elm) const {
+ for (int i = 0; i < length_; i++) {
+ if (data_[i] == elm) return true;
+ }
+ return false;
+ }
// Iterate through all list entries, starting at index 0.
template <class Visitor>
@@ -301,6 +326,65 @@ class ZoneList final {
template <typename T>
using ZonePtrList = ZoneList<T*>;
+template <typename T>
+class ScopedPtrList final {
+ public:
+ explicit ScopedPtrList(std::vector<void*>* buffer)
+ : buffer_(*buffer), start_(buffer->size()), end_(buffer->size()) {}
+
+ ~ScopedPtrList() { Rewind(); }
+
+ void Rewind() {
+ DCHECK_EQ(buffer_.size(), end_);
+ buffer_.resize(start_);
+ end_ = start_;
+ }
+
+ void MergeInto(ScopedPtrList* parent) {
+ DCHECK_EQ(parent->end_, start_);
+ parent->end_ = end_;
+ start_ = end_;
+ DCHECK_EQ(0, length());
+ }
+
+ int length() const { return static_cast<int>(end_ - start_); }
+ T* at(int i) const {
+ size_t index = start_ + i;
+ DCHECK_LE(start_, index);
+ DCHECK_LT(index, buffer_.size());
+ return reinterpret_cast<T*>(buffer_[index]);
+ }
+
+ void CopyTo(ZonePtrList<T>* target, Zone* zone) const {
+ DCHECK_LE(end_, buffer_.size());
+ // Make sure we don't reference absent elements below.
+ if (length() == 0) return;
+ target->Initialize(length(), zone);
+ T** data = reinterpret_cast<T**>(&buffer_[start_]);
+ target->AddAll(Vector<T*>(data, length()), zone);
+ }
+
+ void Add(T* value) {
+ DCHECK_EQ(buffer_.size(), end_);
+ buffer_.push_back(value);
+ ++end_;
+ }
+
+ void AddAll(const ZonePtrList<T>& list) {
+ DCHECK_EQ(buffer_.size(), end_);
+ buffer_.reserve(buffer_.size() + list.length());
+ for (int i = 0; i < list.length(); i++) {
+ buffer_.push_back(list.at(i));
+ }
+ end_ += list.length();
+ }
+
+ private:
+ std::vector<void*>& buffer_;
+ size_t start_;
+ size_t end_;
+};
+
// ZoneThreadedList is a special variant of the ThreadedList that can be put
// into a Zone.
template <typename T, typename TLTraits = base::ThreadedListTraits<T>>
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 45e1b34032..70c8b51fa3 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -16,6 +16,7 @@ group("gn_all") {
"mozilla:v8_mozilla",
"preparser:v8_preparser",
"test262:v8_test262",
+ "wasm-js:v8_wasm_js",
"wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
@@ -80,6 +81,7 @@ group("v8_bot_default") {
"mkgrokdump:mkgrokdump",
"preparser:v8_preparser",
"unittests:unittests",
+ "wasm-js:v8_wasm_js",
"wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
@@ -99,6 +101,7 @@ group("v8_default") {
"mkgrokdump:mkgrokdump",
"preparser:v8_preparser",
"unittests:unittests",
+ "wasm-js:v8_wasm_js",
"wasm-spec-tests:v8_wasm_spec_tests",
]
}
diff --git a/deps/v8/test/benchmarks/csuite/README.md b/deps/v8/test/benchmarks/csuite/README.md
new file mode 100644
index 0000000000..726133820e
--- /dev/null
+++ b/deps/v8/test/benchmarks/csuite/README.md
@@ -0,0 +1,43 @@
+# CSuite: Local benchmarking help for V8 performance analysis
+
+CSuite helps you make N averaged runs of a benchmark, then compare with
+a different binary and/or different flags. It knows about the "classic"
+benchmarks of SunSpider, Kraken and Octane, which are still useful for
+investigating peak performance scenarios. It offers a default number of
+runs, by default they are:
+
+ * SunSpider - 100 runs
+ * Kraken - 80 runs
+ * Octane - 10 runs
+
+# Usage
+
+Say you want to see how much optimization buys you:
+
+ ./csuite.py kraken baseline ~/src/v8/out/d8 -x="--noopt"
+ ./csuite.py kraken compare ~/src/v8/out/d8
+
+
+Suppose you are comparing two binaries, and want a quick look at results.
+Normally, Octane should have about 10 runs, but 3 will only take a few
+minutes:
+
+ ./csuite.py -r 3 octane baseline ~/src/v8/out-master/d8
+ ./csuite.py -r 3 octane compare ~/src/v8/out-mine/d8
+
+You can run from any place:
+
+ ../../somewhere-strange/csuite.py sunspider baseline ./d8
+ ../../somewhere-strange/csuite.py sunspider compare ./d8-better
+
+Note that all output files are created in the directory where you run
+from. A `_benchmark_runner_data` directory will be created to store run
+output, and a `_results` directory as well for scores.
+
+For more detailed documentation, see:
+
+ ./csuite.py --help
+
+Output from the runners is captured into files and cached, so you can cancel
+and resume multi-hour benchmark runs with minimal loss of data/time. The -f
+flag forces re-running even if these cached files still exist.
diff --git a/deps/v8/test/benchmarks/csuite/benchmark.py b/deps/v8/test/benchmarks/csuite/benchmark.py
new file mode 100755
index 0000000000..fee08c8157
--- /dev/null
+++ b/deps/v8/test/benchmarks/csuite/benchmark.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# Copyright 2018 the V8 project authors. All rights reserved.
+
+'''
+python %prog -c <command> [options]
+
+Local benchmark runner.
+The -c option is mandatory.
+'''
+
+import math
+from optparse import OptionParser
+import os
+import re
+import subprocess
+import sys
+import time
+
+def GeometricMean(numbers):
+ log = sum([math.log(n) for n in numbers])
+ return math.pow(math.e, log / len(numbers))
+
+
+class BenchmarkSuite(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.results = {}
+ self.tests = []
+ self.avgresult = {}
+ self.sigmaresult = {}
+ self.numresult = {}
+ self.kClassicScoreSuites = ["SunSpider", "Kraken"]
+ self.kGeometricScoreSuites = ["Octane"]
+
+
+ def RecordResult(self, test, result):
+ if test not in self.tests:
+ self.tests += [test]
+ self.results[test] = []
+ self.results[test] += [int(result)]
+
+ def ThrowAwayWorstResult(self, results):
+ if len(results) <= 1: return
+ if self.name in self.kClassicScoreSuites:
+ results.pop()
+ elif self.name in self.kGeometricScoreSuites:
+ del results[0]
+
+ def ProcessResults(self, opts):
+ for test in self.tests:
+ results = self.results[test]
+ results.sort()
+ self.ThrowAwayWorstResult(results)
+ mean = sum(results) * 1.0 / len(results)
+ self.avgresult[test] = mean
+ sigma_divisor = len(results) - 1
+ if sigma_divisor == 0:
+ sigma_divisor = 1
+ self.sigmaresult[test] = math.sqrt(
+ sum((x - mean) ** 2 for x in results) / sigma_divisor)
+ self.numresult[test] = len(results)
+ if opts.verbose:
+ if not test in ["Octane"]:
+ print("%s,%.1f,%.2f,%d" %
+ (test, self.avgresult[test],
+ self.sigmaresult[test], self.numresult[test]))
+
+ def ComputeScoreGeneric(self):
+ self.score = 0
+ self.sigma = 0
+ for test in self.tests:
+ self.score += self.avgresult[test]
+ self.sigma += self.sigmaresult[test]
+ self.num = self.numresult[test]
+
+ def ComputeScoreV8Octane(self, name):
+ # The score for the run is stored with the form
+ # "Octane-octane2.1(Score): <score>"
+ found_name = ''
+ for s in self.avgresult.keys():
+ if re.search("^Octane", s):
+ found_name = s
+ break
+
+ self.score = self.avgresult[found_name]
+ self.sigma = 0
+ for test in self.tests:
+ self.sigma += self.sigmaresult[test]
+ self.num = self.numresult[test]
+ self.sigma /= len(self.tests)
+
+ def ComputeScore(self):
+ if self.name in self.kClassicScoreSuites:
+ self.ComputeScoreGeneric()
+ elif self.name in self.kGeometricScoreSuites:
+ self.ComputeScoreV8Octane(self.name)
+ else:
+ print "Don't know how to compute score for suite: '%s'" % self.name
+
+ def IsBetterThan(self, other):
+ if self.name in self.kClassicScoreSuites:
+ return self.score < other.score
+ elif self.name in self.kGeometricScoreSuites:
+ return self.score > other.score
+ else:
+ print "Don't know how to compare score for suite: '%s'" % self.name
+
+
+class BenchmarkRunner(object):
+ def __init__(self, args, current_directory, opts):
+ self.best = {}
+ self.second_best = {}
+ self.args = args
+ self.opts = opts
+ self.current_directory = current_directory
+ self.outdir = os.path.join(opts.cachedir, "_benchmark_runner_data")
+
+ def Run(self):
+ if not os.path.exists(self.outdir):
+ os.mkdir(self.outdir)
+
+ self.RunCommand()
+ # Figure out the suite from the command line (heuristic) or the current
+ # working directory.
+ teststr = opts.command.lower() + " " + self.current_directory.lower()
+ if teststr.find('octane') >= 0:
+ suite = 'Octane'
+ elif teststr.find('sunspider') >= 0:
+ suite = 'SunSpider'
+ elif teststr.find('kraken') >= 0:
+ suite = 'Kraken'
+ else:
+ suite = 'Generic'
+
+ self.ProcessOutput(suite)
+
+ def RunCommand(self):
+ for i in range(self.opts.runs):
+ outfile = "%s/out.%d.txt" % (self.outdir, i)
+ if os.path.exists(outfile) and not self.opts.force:
+ continue
+ print "run #%d" % i
+ cmdline = "%s > %s" % (self.opts.command, outfile)
+ subprocess.call(cmdline, shell=True)
+ time.sleep(self.opts.sleep)
+
+ def ProcessLine(self, line):
+ # Octane puts this line in before score.
+ if line == "----":
+ return (None, None)
+
+ # Kraken or Sunspider?
+ g = re.match("(?P<test_name>\w+(-\w+)*)\(RunTime\): (?P<score>\d+) ms\.", \
+ line)
+ if g == None:
+ # Octane?
+ g = re.match("(?P<test_name>\w+): (?P<score>\d+)", line)
+ if g == None:
+ g = re.match("Score \(version [0-9]+\): (?P<score>\d+)", line)
+ if g != None:
+ return ('Octane', g.group('score'))
+ else:
+ # Generic?
+ g = re.match("(?P<test_name>\w+)\W+(?P<score>\d+)", line)
+ if g == None:
+ return (None, None)
+ return (g.group('test_name'), g.group('score'))
+
+ def ProcessOutput(self, suitename):
+ suite = BenchmarkSuite(suitename)
+ for i in range(self.opts.runs):
+ outfile = "%s/out.%d.txt" % (self.outdir, i)
+ with open(outfile, 'r') as f:
+ for line in f:
+ (test, result) = self.ProcessLine(line)
+ if test != None:
+ suite.RecordResult(test, result)
+
+ suite.ProcessResults(self.opts)
+ suite.ComputeScore()
+ print ("%s,%.1f,%.2f,%d " %
+ (suite.name, suite.score, suite.sigma, suite.num)),
+ if self.opts.verbose:
+ print ""
+ print ""
+
+
+if __name__ == '__main__':
+ parser = OptionParser(usage=__doc__)
+ parser.add_option("-c", "--command", dest="command",
+ help="Command to run the test suite.")
+ parser.add_option("-r", "--runs", dest="runs", default=4,
+ help="Number of runs")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ default=False, help="Print results for each test")
+ parser.add_option("-f", "--force", dest="force", action="store_true",
+ default=False,
+ help="Force re-run even if output files exist")
+ parser.add_option("-z", "--sleep", dest="sleep", default=0,
+ help="Number of seconds to sleep between runs")
+ parser.add_option("-d", "--run-directory", dest="cachedir",
+ help="Directory where a cache directory will be created")
+ (opts, args) = parser.parse_args()
+ opts.runs = int(opts.runs)
+ opts.sleep = int(opts.sleep)
+
+ if not opts.command:
+ print "You must specify the command to run (-c). Aborting."
+ sys.exit(1)
+
+ cachedir = os.path.abspath(os.getcwd())
+ if not opts.cachedir:
+ opts.cachedir = cachedir
+ if not os.path.exists(opts.cachedir):
+ print "Directory " + opts.cachedir + " is not valid. Aborting."
+ sys.exit(1)
+
+ br = BenchmarkRunner(args, os.getcwd(), opts)
+ br.Run()
diff --git a/deps/v8/test/benchmarks/csuite/compare-baseline.py b/deps/v8/test/benchmarks/csuite/compare-baseline.py
new file mode 100755
index 0000000000..36ebd2cf0e
--- /dev/null
+++ b/deps/v8/test/benchmarks/csuite/compare-baseline.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# Copyright 2018 the V8 project authors. All rights reserved.
+
+'''
+python %prog [options] [baseline_files]
+
+Compare benchmark results from the benchmark runner against one or
+more baselines. You can either pipe the result of the benchmark
+runner directly into this script or specify the results file with
+the -f option.
+'''
+
+import csv
+import math
+from optparse import OptionParser
+import os
+import sys
+
+PERCENT_CONSIDERED_SIGNIFICANT = 0.5
+PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
+PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
+
+RESET_SEQ = "\033[0m"
+RED_SEQ = "\033[31m"
+GREEN_SEQ = "\033[32m"
+BLUE_SEQ = "\033[34m"
+BOLD_SEQ = "\033[1m"
+
+v8_benchmarks = ["V8", "Octane", "Richards", "DeltaBlue", "Crypto",
+ "EarleyBoyer", "RayTrace", "RegExp", "Splay", "SplayLatency",
+ "NavierStokes", "PdfJS", "Mandreel", "MandreelLatency",
+ "Gameboy", "CodeLoad", "Box2D", "zlib", "Typescript"]
+
+suite_names = ["V8", "Octane", "Kraken-Orig", "Kraken-Once", "Kraken",
+ "SunSpider", "SunSpider-Once", "SunSpider-Orig"]
+
+def ColorText(opts, text):
+ if opts.no_color:
+ result = text.replace("$RESET", "")
+ result = result.replace("$BLUE", "")
+ result = result.replace("$RED", "")
+ result = result.replace("$GREEN", "")
+ result = result.replace("$BOLD", "")
+ else:
+ if opts.html:
+ result = text.replace("$RESET", "</font></b>")
+ result = result.replace("$BLUE", "<font COLOR=\"0000DD\">")
+ result = result.replace("$RED", "<font COLOR=\"DD0000\">")
+ result = result.replace("$GREEN", "<font COLOR=\"00DD00\">")
+ result = result.replace("$BOLD", "<b>")
+ else:
+ result = text.replace("$RESET", RESET_SEQ)
+ result = result.replace("$BLUE", BLUE_SEQ)
+ result = result.replace("$RED", RED_SEQ)
+ result = result.replace("$GREEN", GREEN_SEQ)
+ result = result.replace("$BOLD", BOLD_SEQ)
+ return result
+
+def NormalizedSigmaToString(normalized_sigma):
+ assert normalized_sigma >= 0
+ if normalized_sigma < PROBABILITY_CONSIDERED_SIGNIFICANT:
+ return "|"
+ return "S"
+
+def ComputeZ(baseline_avg, baseline_sigma, mean, n):
+ if baseline_sigma == 0:
+ return 1000.0;
+ return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
+
+# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
+def ComputeProbability(z):
+ if z > 2.575829: # p 0.005: two sided < 0.01
+ return 0
+ if z > 2.326348: # p 0.010
+ return 0.01
+ if z > 2.170091: # p 0.015
+ return 0.02
+ if z > 2.053749: # p 0.020
+ return 0.03
+ if z > 1.959964: # p 0.025: two sided < 0.05
+ return 0.04
+ if z > 1.880793: # p 0.030
+ return 0.05
+ if z > 1.811910: # p 0.035
+ return 0.06
+ if z > 1.750686: # p 0.040
+ return 0.07
+ if z > 1.695397: # p 0.045
+ return 0.08
+ if z > 1.644853: # p 0.050: two sided < 0.10
+ return 0.09
+ if z > 1.281551: # p 0.100: two sided < 0.20
+ return 0.10
+ return 0.20 # two sided p >= 0.20
+
+def PercentColor(change_percent, flakyness):
+ result = ""
+ if change_percent >= PERCENT_CONSIDERED_SIGNIFICANT:
+ result = "$GREEN"
+ elif change_percent <= -PERCENT_CONSIDERED_SIGNIFICANT:
+ result = "$RED"
+ else:
+ return ""
+ if flakyness < PROBABILITY_CONSIDERED_SIGNIFICANT:
+ result += "$BOLD"
+ elif flakyness > PROBABILITY_CONSIDERED_MEANINGLESS:
+ result = ""
+ return result
+
+def ProcessOneResultLine(opts, suite, testname, time, sigma, num, baselines):
+ time = float(time)
+ sigma = float(sigma)
+ num = int(num)
+ if testname in suite_names:
+ base_color = "$BOLD"
+ else:
+ base_color = ""
+ if opts.html:
+ line_out = ("<tr><td>%s%s$RESET</td><td>%s%8.1f$RESET</td>" %
+ (base_color, testname, base_color, time))
+ else:
+ sigma_string = NormalizedSigmaToString(sigma / time)
+ line_out = ("%s%40s$RESET: %s%8.1f$RESET %s" %
+ (base_color, testname, base_color, time, sigma_string))
+ for baseline in baselines:
+ raw_score = ""
+ compare_score = ""
+ found = False
+ if suite in baseline[1]:
+ baseline_results = baseline[1][suite]
+ for item in baseline_results:
+ if testname == item[0]:
+ found = True
+ raw_score_num = float(item[1])
+ raw_sigma_num = float(item[2])
+ raw_score = "%7.1f" % raw_score_num
+ compare_num = 0
+ compare_score = ""
+ percent_color = ""
+ if testname in v8_benchmarks:
+ compare_num = 100*time/raw_score_num - 100
+ else:
+ compare_num = 100*raw_score_num/time - 100
+ if abs(compare_num) > 0.1:
+ compare_score = "%3.1f" % (compare_num)
+ z = ComputeZ(raw_score_num, raw_sigma_num, time, num)
+ p = ComputeProbability(z)
+ percent_color = PercentColor(compare_num, p)
+ sigma_string = NormalizedSigmaToString(raw_sigma_num / raw_score_num)
+ if opts.html:
+ format_string = "<td>%s%8s$RESET</td><td>%s%6s$RESET</td>"
+ else:
+ format_string = " %s%8s$RESET %s %s%6s$RESET |"
+ line_out += (format_string %
+ (base_color, raw_score, sigma_string,
+ percent_color, compare_score))
+ if not found:
+ if opts.html:
+ line_out += "<td></td><td></td>"
+ else:
+ line_out += "| | "
+ if opts.html:
+ line_out += "</tr>"
+ print(ColorText(opts, line_out))
+
+def PrintSeparator(opts, baselines, big):
+ if not opts.html:
+ if big:
+ separator = "==================================================="
+ else:
+ separator = "---------------------------------------------------"
+ for baseline in baselines:
+ if big:
+ separator += "+==========+========"
+ else:
+ separator += "+----------+--------"
+ separator += "+"
+ print(separator)
+
+def ProcessResults(opts, results, baselines):
+ for suite in suite_names:
+ if suite in results:
+ for result in results[suite]:
+ ProcessOneResultLine(opts, suite, result[0], result[1], result[2],
+ result[3], baselines);
+ PrintSeparator(opts, baselines, False)
+
+def ProcessFile(file_path):
+ file_reader = csv.reader(open(file_path, 'rb'), delimiter=',')
+ benchmark_results = {}
+ current_rows = []
+ for row in file_reader:
+ if len(row) > 1:
+ current_rows.append(row)
+ for suite in suite_names:
+ if row[0] == suite:
+ benchmark_results[row[0]] = current_rows
+ current_rows = []
+ return benchmark_results
+
+def ProcessStdIn():
+ benchmark_results = {}
+ current_rows = []
+ for line_in in sys.stdin:
+ line_in = line_in.rstrip()
+ row = line_in.split(",")
+ if len(row) > 1:
+ current_rows.append(row)
+ for suite in suite_names:
+ if row[0] == suite:
+ benchmark_results[row[0]] = current_rows
+ current_rows = []
+ return benchmark_results
+
+def CompareFiles(opts, args):
+ results = []
+ baselines = []
+ for file_path in args:
+ baseline = ProcessFile(file_path)
+ baselines.append((os.path.basename(file_path), baseline))
+ if opts.html:
+ header = "<tr><th>benchmark</th><th>score</th>"
+ else:
+ header = "%40s: %8s " % ("benchmark", "score")
+ for baseline in baselines:
+ (baseline_name, baseline_results) = baseline
+ if opts.html:
+ header += ("<th>%s</th><th>%s</th>") % (baseline_name[0:7], "%")
+ else:
+ header += "| %8s | %6s " % (baseline_name[0:7], "%")
+ if opts.html:
+ header += "</tr>\n"
+ else:
+ header += "|"
+ print(header)
+ PrintSeparator(opts, baselines, True)
+ if opts.filename:
+ file_reader = csv.reader(open(opts.filename, 'rb'), delimiter=',')
+ results = ProcessFile(opts.filename)
+ else:
+ results = ProcessStdIn()
+ ProcessResults(opts, results, baselines)
+
+if __name__ == '__main__':
+ parser = OptionParser(usage=__doc__)
+ parser.add_option("-f", "--filename", dest="filename",
+ help="Specifies the filename for the results to "\
+"compare to the baselines rather than reading from stdin.")
+ parser.add_option("-b", "--baselines", dest="baselines",
+ help="Specifies a directory of baseline files to "\
+"compare against.")
+ parser.add_option("-n", "--no-color", action="store_true",
+ dest="no_color", default=False,
+ help="Generates output without escape codes that "\
+"add color highlights.")
+ parser.add_option("--html", action="store_true",
+ dest="html", default=False,
+ help="Generates output as a HTML table ")
+ (opts, args) = parser.parse_args()
+ if opts.baselines:
+ args.extend(map(lambda x: (opts.baselines + "/" + x),
+ (os.listdir(opts.baselines))))
+ args = reversed(sorted(args))
+ CompareFiles(opts, args)
diff --git a/deps/v8/test/benchmarks/csuite/csuite.py b/deps/v8/test/benchmarks/csuite/csuite.py
new file mode 100755
index 0000000000..25b447efb8
--- /dev/null
+++ b/deps/v8/test/benchmarks/csuite/csuite.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# Copyright 2018 the V8 project authors. All rights reserved.
+'''
+C S u i t e because who can remember?
+-----------------------------------------------------------------------------
+python csuite.py [options] <benchmark> <mode> <d8 path>
+
+Arguments
+ benchmark: one of octane, sunspider or kraken.
+ mode: baseline or compare.
+ d8 path: a valid path to the d8 executable you want to use.
+
+CSuite is a wrapper around benchmark.py and compare-baseline.py, old
+friends in the d8 benchmarking world. Unlike those tools, it can be
+run in any directory. It's also opinionated about which benchmarks it
+will run, currently SunSpider, Octane and Kraken. Furthermore, it
+runs the versions we pull into ./test/benchmarks/data.
+
+Examples:
+
+Say you want to see how much optimization buys you:
+ ./csuite.py kraken baseline ~/src/v8/out/d8 -x="--noopt"
+ ./csuite.py kraken compare ~/src/v8/out/d8
+
+Suppose you are comparing two binaries, quick n' dirty style:
+ ./csuite.py -r 3 octane baseline ~/src/v8/out-master/d8
+ ./csuite.py -r 3 octane compare ~/src/v8/out-mine/d8
+
+You can run from any place:
+ ../../somewhere-strange/csuite.py sunspider baseline ./d8
+ ../../somewhere-strange/csuite.py sunspider compare ./d8-better
+'''
+
+import os
+from optparse import OptionParser
+import subprocess
+import sys
+
+if __name__ == '__main__':
+ parser = OptionParser(usage=__doc__)
+ parser.add_option("-r", "--runs", dest="runs",
+ help="Override the default number of runs for the benchmark.")
+ parser.add_option("-x", "--extra-arguments", dest="extra_args",
+ help="Pass these extra arguments to d8.")
+ parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
+ help="See more output about what magic csuite is doing.")
+ (opts, args) = parser.parse_args()
+
+ if len(args) < 3:
+ print 'not enough arguments'
+ sys.exit(1)
+
+ suite = args[0]
+ mode = args[1]
+
+ if suite not in ['octane', 'sunspider', 'kraken']:
+ print 'Suite must be octane, sunspider or kraken. Aborting.'
+ sys.exit(1)
+
+ if mode != 'baseline' and mode != 'compare':
+ print 'mode must be baseline or compare. Aborting.'
+ sys.exit(1)
+
+ # Set up paths.
+ d8_path = os.path.abspath(args[2])
+ if not os.path.exists(d8_path):
+ print d8_path + " is not valid."
+ sys.exit(1)
+
+ csuite_path = os.path.dirname(os.path.abspath(__file__))
+ if not os.path.exists(csuite_path):
+ print "The csuite directory is invalid."
+ sys.exit(1)
+
+ benchmark_py_path = os.path.join(csuite_path, "benchmark.py")
+ if not os.path.exists(benchmark_py_path):
+ print "Unable to find benchmark.py in " + output_path_base \
+ + ". Aborting."
+ sys.exit(1)
+
+ compare_baseline_py_path = os.path.join(csuite_path,
+ "compare-baseline.py")
+
+ if not os.path.exists(compare_baseline_py_path):
+ print "Unable to find compare-baseline.py in " + output_path_base \
+ + ". Aborting."
+ sys.exit(1)
+
+ benchmark_path = os.path.abspath(os.path.join(csuite_path, "../data"))
+ if not os.path.exists(benchmark_path):
+ print "I can't find the benchmark data directory. Aborting."
+ sys.exit(1)
+
+ # Gather the remaining arguments into a string of extra args for d8.
+ extra_args = ""
+ if opts.extra_args:
+ extra_args = opts.extra_args
+
+ if suite == "octane":
+ runs = 10
+ suite_path = os.path.join(benchmark_path, "octane")
+ cmd = "run.js"
+ elif suite == "kraken":
+ runs = 80
+ suite_path = os.path.join(benchmark_path, "kraken")
+ cmd = os.path.join(csuite_path, "run-kraken.js")
+ else:
+ runs = 100
+ suite_path = os.path.join(benchmark_path, "sunspider")
+ cmd = os.path.join(csuite_path, "sunspider-standalone-driver.js")
+
+ if opts.runs:
+ if (float(opts.runs) / runs) < 0.6:
+ print "Normally, %s requires %d runs to get stable results." \
+ % (suite, runs)
+ runs = int(opts.runs)
+
+ if opts.verbose:
+ print "Running and averaging %s %d times." % (suite, runs)
+
+ # Ensure output directory is setup
+ output_path_base = os.path.abspath(os.getcwd())
+ output_path = os.path.join(output_path_base, "_results")
+ output_file = os.path.join(output_path, "master")
+ if not os.path.exists(output_path):
+ if opts.verbose:
+ print "Creating directory %s." % output_path
+ os.mkdir(output_path)
+
+ if opts.verbose:
+ print "Working directory for runs is %s." % suite_path
+
+ inner_command = " -c \"%s --expose-gc %s %s \"" \
+ % (d8_path, extra_args, cmd)
+ if opts.verbose:
+ print "calling d8 like so: %s." % inner_command
+
+ cmdline_base = "python %s %s -fv -r %d -d %s" \
+ % (benchmark_py_path, inner_command, runs, output_path_base)
+
+ if mode == "baseline":
+ cmdline = "%s > %s" % (cmdline_base, output_file)
+ else:
+ cmdline = "%s | %s %s" \
+ % (cmdline_base, compare_baseline_py_path, output_file)
+
+ if opts.verbose:
+ print "Spawning subprocess: %s." % cmdline
+ return_code = subprocess.call(cmdline, shell=True, cwd=suite_path)
+ if return_code < 0:
+ print "Error return code: %d." % return_code
+ if mode == "baseline":
+ print "Wrote %s." % output_file
+ print "Run %s again with compare mode to see results." % suite
diff --git a/deps/v8/test/mjsunit/regress/regress-85177.js b/deps/v8/test/benchmarks/csuite/run-kraken.js
index aa938f5e24..f3c9b25597 100644
--- a/deps/v8/test/mjsunit/regress/regress-85177.js
+++ b/deps/v8/test/benchmarks/csuite/run-kraken.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,40 +25,39 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// This file was copied from the output of
+// http://hg.mozilla.org/projects/kraken/sunspider script.
-gW=gH=175;
-g=[];
+var benchmarkPath = "";
+var tests = [ "ai-astar", "audio-beat-detection", "audio-dft", "audio-fft", "audio-oscillator", "imaging-gaussian-blur", "imaging-darkroom", "imaging-desaturate", "json-parse-financial", "json-stringify-tinderbox", "stanford-crypto-aes", "stanford-crypto-ccm", "stanford-crypto-pbkdf2", "stanford-crypto-sha256-iterative" ];
+var categories = [ "ai", "audio", "imaging", "json", "stanford" ];
+var results = new Array();
-for(var n=0; n<gW; n++){
- var l=[];
- for(var p=0; p<gH; p++){
- l.push(1)
- }
- g.push(l)
-}
+var time = 0;
+var times = [];
+
+times.length = tests.length;
-function k(a,b){
- if(a<0||b<0||a>=gW||b>=gH)
- return 0;
- return g[a][b];
+for (var krakenCounter = 0; krakenCounter < tests.length; krakenCounter++) {
+ var testBase = benchmarkPath + tests[krakenCounter];
+ var testName = testBase + ".js";
+ var testData = testBase + "-data.js";
+ // load test data
+ load(testData);
+ var startTime = new Date;
+ load(testName);
+ times[krakenCounter] = new Date() - startTime;
+ gc();
}
-function f(){
- for(var a=[],f=0; f<gW; f++){
- var b=[];
- for(var h=0; h<gH; h++){
- var e=0;
- for(var i=-1; i<=1; i++)
- for(var j=-1; j<=1; j++)
- e+=k(f+i,h+j);
- e=k(f,h)==1?1:0;
- b.push(e)
- }
- a.push(b)
- }
+function recordResults(tests, times)
+{
+ var output = "";
+
+ for (j = 0; j < tests.length; j++) {
+ output += tests[j] + '-orig(RunTime): ' + times[j] + ' ms.\n';
+ }
+ print(output);
}
-f();
-%OptimizeFunctionOnNextCall(f);
-f();
+recordResults(tests, times);
diff --git a/deps/v8/test/benchmarks/csuite/sunspider-standalone-driver.js b/deps/v8/test/benchmarks/csuite/sunspider-standalone-driver.js
new file mode 100644
index 0000000000..d9b767a267
--- /dev/null
+++ b/deps/v8/test/benchmarks/csuite/sunspider-standalone-driver.js
@@ -0,0 +1,75 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var suitePath = "sunspider-0.9.1";
+var tests = [ "3d-cube", "3d-morph", "3d-raytrace",
+ "access-binary-trees", "access-fannkuch",
+ "access-nbody", "access-nsieve",
+ "bitops-3bit-bits-in-byte", "bitops-bits-in-byte",
+ "bitops-bitwise-and", "bitops-nsieve-bits",
+ "controlflow-recursive", "crypto-aes",
+ "crypto-md5", "crypto-sha1", "date-format-tofte",
+ "date-format-xparb", "math-cordic", "math-partial-sums",
+ "math-spectral-norm", "regexp-dna", "string-base64",
+ "string-fasta", "string-tagcloud", "string-unpack-code",
+ "string-validate-input" ];
+var categories = [ "3d", "access", "bitops", "controlflow", "crypto",
+ "date", "math", "regexp", "string" ];
+
+var results = new Array();
+
+(function(){
+
+var time = 0;
+var times = [];
+times.length = tests.length;
+
+for (var j = 0; j < tests.length; j++) {
+ var testName = tests[j] + ".js";
+ var startTime = new Date;
+ if (testName.indexOf('parse-only') >= 0)
+ checkSyntax(testName);
+ else
+ load(testName);
+ times[j] = new Date() - startTime;
+ gc();
+}
+
+function recordResults(tests, times)
+{
+ var output = "";
+ // Changed original output to match test infrastructure.
+ for (j = 0; j < tests.length; j++) {
+ output += tests[j] + '-sunspider(RunTime): ' +
+ Math.max(times[j], 1) + ' ms.\n';
+ }
+
+ print(output);
+}
+
+recordResults(tests, times);
+
+})();
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index df37a3d4c5..9c18ce5806 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -97,7 +97,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-native-calls.cc",
"compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
- "compiler/test-run-stubs.cc",
"compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
@@ -180,6 +179,7 @@ v8_source_set("cctest_sources") {
"test-double.cc",
"test-dtoa.cc",
"test-elements-kind.cc",
+ "test-factory.cc",
"test-fast-dtoa.cc",
"test-feedback-vector.cc",
"test-feedback-vector.h",
@@ -192,11 +192,12 @@ v8_source_set("cctest_sources") {
"test-hashcode.cc",
"test-hashmap.cc",
"test-heap-profiler.cc",
+ "test-icache.cc",
"test-identity-map.cc",
"test-inobject-slack-tracking.cc",
"test-inspector.cc",
"test-intl.cc",
- "test-isolate-independent-builtins.cc",
+ "test-js-weak-refs.cc",
"test-liveedit.cc",
"test-lockers.cc",
"test-log.cc",
@@ -229,6 +230,7 @@ v8_source_set("cctest_sources") {
"test-unbound-queue.cc",
"test-unboxed-doubles.cc",
"test-unscopables-hidden-prototype.cc",
+ "test-unwinder.cc",
"test-usecounters.cc",
"test-utils.cc",
"test-version.cc",
@@ -255,6 +257,7 @@ v8_source_set("cctest_sources") {
"wasm/test-streaming-compilation.cc",
"wasm/test-wasm-breakpoints.cc",
"wasm/test-wasm-codegen.cc",
+ "wasm/test-wasm-import-wrapper-cache.cc",
"wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-serialization.cc",
"wasm/test-wasm-shared-engine.cc",
@@ -270,10 +273,8 @@ v8_source_set("cctest_sources") {
# TODO(mostynb@opera.com): figure out the jumbo issues with these source
# files, and include them in jumbo compilation units.
"interpreter/bytecode-expectations-printer.cc",
- "interpreter/bytecode-expectations-printer.h",
"interpreter/test-bytecode-generator.cc",
"test-api.cc",
- "test-api.h",
]
}
@@ -282,9 +283,6 @@ v8_source_set("cctest_sources") {
"assembler-helper-arm.cc",
"assembler-helper-arm.h",
"test-assembler-arm.cc",
- "test-code-stubs-arm.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
"test-poison-disasm-arm.cc",
@@ -293,9 +291,6 @@ v8_source_set("cctest_sources") {
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
"test-assembler-arm64.cc",
- "test-code-stubs-arm64.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-arm64.cc",
"test-fuzz-arm64.cc",
"test-javascript-arm64.cc",
@@ -307,54 +302,36 @@ v8_source_set("cctest_sources") {
} else if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"test-assembler-ia32.cc",
- "test-code-stubs-ia32.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-ia32.cc",
"test-log-stack-tracer.cc",
]
} else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ###
"test-assembler-mips.cc",
- "test-code-stubs-mips.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-mips.cc",
"test-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"test-assembler-mips.cc",
- "test-code-stubs-mips.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-mips.cc",
"test-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mips64") {
sources += [ ### gcmole(arch:mips64) ###
"test-assembler-mips64.cc",
- "test-code-stubs-mips64.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-mips64.cc",
"test-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"test-assembler-mips64.cc",
- "test-code-stubs-mips64.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-mips64.cc",
"test-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
"test-assembler-x64.cc",
- "test-code-stubs-x64.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-x64.cc",
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
@@ -362,15 +339,11 @@ v8_source_set("cctest_sources") {
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
"test-assembler-ppc.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"test-assembler-s390.cc",
- "test-code-stubs.cc",
- "test-code-stubs.h",
"test-disasm-s390.cc",
]
}
@@ -418,7 +391,9 @@ v8_source_set("cctest_sources") {
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") {
# Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
- cflags += [ "-ffp-contract=off" ]
+ if (!is_win) {
+ cflags += [ "-ffp-contract=off" ]
+ }
}
if (is_win) {
@@ -437,12 +412,6 @@ action("resources") {
script = "../../tools/js2c.py"
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [
- "../../tools/jsmin.py",
- ]
-
# NOSORT
sources = [
"../../tools/splaytree.js",
@@ -486,3 +455,12 @@ v8_executable("generate-bytecode-expectations") {
"//build/win:default_exe_manifest",
]
}
+
+#Target to generate all .cc files.
+group("v8_generated_cc_files") {
+ testonly = true
+
+ deps = [
+ ":resources",
+ ]
+}
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 43d91faf84..30fc172657 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,4 +1,5 @@
-per-file *-mips*=ibogosavljevic@wavecomp.com
+per-file *-mips*=arikalo@wavecomp.com
+per-file *-mips*=prudic@wavecomp.com
per-file *-mips*=skovacevic@wavecomp.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
index b2533d1430..bb3ed9eb4c 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.cc
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -14,7 +14,7 @@ namespace internal {
Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble) {
Isolate* isolate = CcTest::i_isolate();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
assemble(assm);
assm.bx(lr);
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
index c0b0cf8255..1f7c0ff9ad 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.h
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -15,11 +15,11 @@ namespace internal {
// TODO(arm): Refine these signatures per test case, they can have arbitrary
// return and argument types and arbitrary number of arguments.
-using F_iiiii = Object*(int x, int p1, int p2, int p3, int p4);
-using F_piiii = Object*(void* p0, int p1, int p2, int p3, int p4);
-using F_ppiii = Object*(void* p0, void* p1, int p2, int p3, int p4);
-using F_pppii = Object*(void* p0, void* p1, void* p2, int p3, int p4);
-using F_ippii = Object*(int p0, void* p1, void* p2, int p3, int p4);
+using F_iiiii = void*(int x, int p1, int p2, int p3, int p4);
+using F_piiii = void*(void* p0, int p1, int p2, int p3, int p4);
+using F_ppiii = void*(void* p0, void* p1, int p2, int p3, int p4);
+using F_pppii = void*(void* p0, void* p1, void* p2, int p3, int p4);
+using F_ippii = void*(int p0, void* p1, void* p2, int p3, int p4);
Handle<Code> AssembleCodeImpl(std::function<void(Assembler&)> assemble);
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 1b1eeb5d41..ee03a66ea3 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -160,21 +160,21 @@ void CcTest::TearDown() {
if (isolate_ != nullptr) isolate_->Dispose();
}
-v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extensions,
+v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extension_flags,
v8::Isolate* isolate) {
- const char* extension_names[kMaxExtensions];
- int extension_count = 0;
- #define CHECK_EXTENSION_FLAG(Name, Id) \
- if (extensions.Contains(Name##_ID)) extension_names[extension_count++] = Id;
- EXTENSION_LIST(CHECK_EXTENSION_FLAG)
- #undef CHECK_EXTENSION_FLAG
- v8::ExtensionConfiguration config(extension_count, extension_names);
- v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
- CHECK(!context.IsEmpty());
- return context;
+ const char* extension_names[kMaxExtensions];
+ int extension_count = 0;
+ for (int i = 0; i < kMaxExtensions; ++i) {
+ if (!extension_flags.contains(static_cast<CcTestExtensionId>(i))) continue;
+ extension_names[extension_count] = kExtensionName[i];
+ ++extension_count;
+ }
+ v8::ExtensionConfiguration config(extension_count, extension_names);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
+ CHECK(!context.IsEmpty());
+ return context;
}
-
void CcTest::DisableAutomaticDispose() {
CHECK_EQ(kUninitialized, initialization_state_);
disable_automatic_dispose_ = true;
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 9d4af5af3f..3c99721760 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -32,13 +32,13 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
+#include "src/base/enum-set.h"
#include "src/debug/debug-interface.h"
#include "src/flags.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/register-configuration.h"
-#include "src/utils.h"
#include "src/v8.h"
#include "src/zone/accounting-allocator.h"
@@ -51,12 +51,7 @@ class RandomNumberGenerator;
namespace internal {
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
-// TODO(v8:6666): Fold into Default config once root is fully supported.
-const auto GetRegConfig = RegisterConfiguration::PreserveRootIA32;
-#else
const auto GetRegConfig = RegisterConfiguration::Default;
-#endif
class HandleScope;
class Zone;
@@ -93,20 +88,15 @@ class Zone;
V(TRACE_EXTENSION, "v8/trace")
#define DEFINE_EXTENSION_ID(Name, Ident) Name##_ID,
-enum CcTestExtensionIds {
- EXTENSION_LIST(DEFINE_EXTENSION_ID)
- kMaxExtensions
-};
+enum CcTestExtensionId { EXTENSION_LIST(DEFINE_EXTENSION_ID) kMaxExtensions };
#undef DEFINE_EXTENSION_ID
-typedef v8::internal::EnumSet<CcTestExtensionIds> CcTestExtensionFlags;
-#define DEFINE_EXTENSION_FLAG(Name, Ident) \
- static const CcTestExtensionFlags Name(1 << Name##_ID);
- static const CcTestExtensionFlags NO_EXTENSIONS(0);
- static const CcTestExtensionFlags ALL_EXTENSIONS((1 << kMaxExtensions) - 1);
- EXTENSION_LIST(DEFINE_EXTENSION_FLAG)
-#undef DEFINE_EXTENSION_FLAG
+using CcTestExtensionFlags = v8::base::EnumSet<CcTestExtensionId>;
+#define DEFINE_EXTENSION_NAME(Name, Ident) Ident,
+static constexpr const char* kExtensionName[kMaxExtensions] = {
+ EXTENSION_LIST(DEFINE_EXTENSION_NAME)};
+#undef DEFINE_EXTENSION_NAME
class CcTest {
public:
@@ -166,7 +156,11 @@ class CcTest {
// Helper function to configure a context.
// Must be in a HandleScope.
static v8::Local<v8::Context> NewContext(
- CcTestExtensionFlags extensions,
+ v8::Isolate* isolate = CcTest::isolate()) {
+ return NewContext({}, isolate);
+ }
+ static v8::Local<v8::Context> NewContext(
+ CcTestExtensionFlags extension_flags,
v8::Isolate* isolate = CcTest::isolate());
static void TearDown();
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index feaf2eb327..b05848d07e 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -34,7 +34,7 @@
# This test is so detailed in it's look at the literals array, I can't
# maintain it until the CL is done.
- 'test-heap-profiler/AllocationSitesAreVisible': [FAIL],
+ 'test-heap-profiler/AllocationSitesAreVisible': [FAIL, ['lite_mode == True', SKIP]],
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
@@ -75,7 +75,7 @@
# BUG(5193). The cpu profiler tests are notoriously flaky.
'test-profile-generator/RecordStackTraceAtStartProfiling': [SKIP],
'test-cpu-profiler/CollectCpuProfile': [SKIP],
- 'test-cpu-profiler/CollectCpuProfileCallerLineNumbers': [FAIL, PASS],
+ 'test-cpu-profiler/CollectCpuProfileCallerLineNumbers': [SKIP],
'test-cpu-profiler/CollectCpuProfileSamples': [SKIP],
'test-cpu-profiler/CollectDeoptEvents': [SKIP],
'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
@@ -93,12 +93,6 @@
'test-cpu-profiler/TracingCpuProfiler': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
- # BUG(v8:8209). Flaky
- 'test-cpu-profiler/Issue1398': [SKIP],
-
- # BUG(7702). Flaky data race and other test failures.
- 'test-cpu-profiler/MultipleProfilers': [SKIP],
-
# BUG(7202). The test is flaky.
'test-cpu-profiler/NativeFrameStackTrace': [SKIP],
@@ -112,13 +106,6 @@
'test-func-name-inference/UpperCaseClass': [FAIL],
'test-func-name-inference/LowerCaseClass': [FAIL],
- # Bug(5784). StubCache tests need to be redesigned, as a) they don't work
- # in the new (ignition + turbofan) pipeline environment, and b) they are
- # stymied by a move of code stubs into builtins.
- 'test-api/PrimaryStubCache': [SKIP],
- 'test-api/SecondaryStubCache': [SKIP],
- 'test-api/AccessCheckInIC': [SKIP],
-
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
@@ -128,6 +115,9 @@
'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail1': [FAIL],
'test-serialize/SnapshotCreatorNoExternalReferencesCustomFail2': [FAIL],
+ # BUG(v8:8296). Flaky OOM test.
+ 'test-heap/OutOfMemorySmallObjects': [SKIP],
+
############################################################################
# Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
@@ -167,7 +157,7 @@
'test-api/Threading*': [SKIP],
}], # 'arch == arm64 and simulator_run'
-['arch == arm64 and mode == debug and simulator_run', {
+['arch == arm64 and (mode == debug) and simulator_run', {
# Pass but take too long with the simulator in debug mode.
'test-api/ExternalDoubleArray': [SKIP],
@@ -176,7 +166,22 @@
'test-api/ExternalFloatArray': [SKIP],
'test-api/Float32Array': [SKIP],
'test-api/Float64Array': [SKIP],
-}], # 'arch == arm64 and mode == debug and simulator_run'
+ 'test-api/Uint8Array': [SKIP],
+}], # 'arch == arm64 and (mode == debug) and simulator_run'
+
+##############################################################################
+['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
+ # Slow tests: https://crbug.com/v8/7783
+ 'test-cpu-profiler/MultipleIsolates': [SKIP],
+
+ # Slow but tolarable tests.
+ 'test-api/FixedFloat64Array': [PASS, SLOW],
+ 'test-api/FixedUint16Array': [PASS, SLOW],
+ 'test-api/Int8Array': [PASS, SLOW],
+ 'test-api/SharedFloat64Array': [PASS, SLOW],
+ 'test-api/SharedUint16Array': [PASS, SLOW],
+ 'test-api/Uint16Array': [PASS, SLOW],
+}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
@@ -188,9 +193,6 @@
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
- # BUG(v8:5193): Flaky.
- 'test-cpu-profiler/TickEvents': [SKIP],
-
# BUG(v8:7587): Strange breakage on Mac.
'test-log-stack-tracer/PureJSStackTrace': [SKIP],
}], # 'asan == True'
@@ -203,8 +205,6 @@
##############################################################################
['tsan == True', {
- # BUG(v8:6133).
- 'test-cpu-profiler/TickEvents': [SKIP],
# BUG(v8:6924). The test allocates a lot of memory.
'test-api/NewStringRangeError': [PASS, NO_VARIANTS],
}], # 'tsan == True'
@@ -223,10 +223,6 @@
# https://crbug.com/v8/7763
'test-lockers/ExtensionsRegistration': [SKIP],
}], # 'no_snap == True'
-['no_snap == False', {
- # FunctionEntryHooks require bootstrapping from scratch.
- 'test-api/SetFunctionEntryHook': [SKIP],
-}], # 'no_snap == False'
##############################################################################
# TODO(machenbach): Fix application of '*'. Nosnap windows needs a separate
@@ -259,9 +255,6 @@
# BUG(5920): Flaky crash.
'test-serialize/PartialSerializerContext': [PASS, ['arch == x64 and mode == debug', SKIP]],
-
- # BUG(v8:8220). Flaky
- 'test-log/LogAll': [SKIP],
}], # 'system == windows'
##############################################################################
@@ -352,7 +345,7 @@
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
-['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
+['arch == mips or arch == mips64', {
# TODO(mips-team): Implement I64Atomic operations on MIPS
'test-run-wasm-atomics64/*': [SKIP],
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
@@ -384,6 +377,12 @@
}], # 'system == android'
##############################################################################
+['system != android and arch in [arm, arm64] and not simulator_run', {
+ # Consumes too much memory on ODROIDs in debug mode and optimize_for_size.
+ 'test-code-generator/FuzzAssemble*': [PASS, ['(mode == debug) and optimize_for_size', SKIP]],
+}], # 'system != android and arch in [arm, arm64] and not simulator_run'
+
+##############################################################################
['system == aix and arch == ppc64', {
# BUG 2857
@@ -429,6 +428,12 @@
}], # variant == stress_incremental_marking
##############################################################################
+# The test relies on deterministic compilation.
+['variant == stress_background_compile', {
+ 'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
+}], # variant == stress_background_compile
+
+##############################################################################
['variant == no_wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
@@ -453,4 +458,90 @@
'test-dtoa/*': [SKIP],
}], # variant == no_wasm_traps
+##############################################################################
+# The stack unwinder API is only supported on x64.
+['arch != x64', {
+ 'test-unwinder/*': [SKIP]
+}],
+
+##############################################################################
+['lite_mode', {
+
+ # TODO(8394): First execution events don't work in lite_mode. Enable this after
+ # we fix the lite mode to track the first execution.
+ 'test-log/LogFunctionEvents': [SKIP],
+
+ # Skip tests for weak references in feedback vector.
+ 'test-weak-references/WeakReferencesBasic': [SKIP],
+ 'test-weak-references/WeakReferencesOldToOld': [SKIP],
+ 'test-weak-references/WeakReferencesOldToNew': [SKIP],
+ 'test-weak-references/WeakReferencesOldToNewScavenged': [SKIP],
+ 'test-weak-references/WeakReferencesOldToCleared': [SKIP],
+ 'test-weak-references/ObjectMovesBeforeClearingWeakField': [SKIP],
+ 'test-weak-references/ObjectWithWeakFieldDies': [SKIP],
+ 'test-weak-references/ObjectWithWeakReferencePromoted': [SKIP],
+ 'test-weak-references/ObjectWithClearedWeakReferencePromoted': [SKIP],
+ 'test-weak-references/WeakReferenceWriteBarrier': [SKIP],
+ 'test-heap-profiler/WeakReference': [SKIP],
+
+ # Skip compiler tests that need optimizer to be enabled.
+ 'test-run-intrinsics/*': [SKIP],
+ 'test-run-jsbranches/*': [SKIP],
+ 'test-run-jscalls/*': [SKIP],
+ 'test-run-jsexceptions/*': [SKIP],
+ 'test-run-jsobjects/*': [SKIP],
+ 'test-run-jsops/*': [SKIP],
+ 'test-run-load-store/*': [SKIP],
+ 'test-run-machops/*': [SKIP],
+ 'test-run-native-calls/*': [SKIP],
+ 'test-run-retpoline/*': [SKIP],
+ 'test-run-stackcheck/*': [SKIP],
+ 'test-run-tail-calls/*': [SKIP],
+ 'test-run-unwinding-info/*': [SKIP],
+ 'test-run-variables/*': [SKIP],
+ 'test-run-bytecode-graph-builder/*': [SKIP],
+ 'test-run-deopt/*': [SKIP],
+ 'test-js-context-specialization/*': [SKIP],
+ 'test-code-assembler/*': [SKIP],
+ 'test-code-stub-assembler/*': [SKIP],
+ 'test-accessor-assembler/*': [SKIP],
+ 'test-torque/*': [SKIP],
+ 'test-code-generator/*': [SKIP],
+
+ # Slow tests
+ 'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [SKIP],
+
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'test-c-wasm-entry/*': [SKIP],
+ 'test-jump-table-assembler/*': [SKIP],
+ 'test-run-wasm-64/*': [SKIP],
+ 'test-run-wasm-asmjs/*': [SKIP],
+ 'test-run-wasm-atomics64/*': [SKIP],
+ 'test-run-wasm-atomics/*': [SKIP],
+ 'test-run-wasm/*': [SKIP],
+ 'test-run-wasm-interpreter/*': [SKIP],
+ 'test-run-wasm-js/*': [SKIP],
+ 'test-run-wasm-module/*': [SKIP],
+ 'test-run-wasm-sign-extension/*': [SKIP],
+ 'test-run-wasm-simd/*': [SKIP],
+ 'test-streaming-compilation/*': [SKIP],
+ 'test-wasm-breakpoints/*': [SKIP],
+ 'test-wasm-codegen/*': [SKIP],
+ 'test-wasm-import-wrapper-cache/*': [SKIP],
+ 'test-wasm-interpreter-entry/*': [SKIP],
+ 'test-wasm-serialization/*': [SKIP],
+ 'test-wasm-shared-engine/*': [SKIP],
+ 'test-wasm-stack/*': [SKIP],
+ 'test-wasm-trap-position/*': [SKIP],
+ 'wasm-run-utils/*': [SKIP],
+
+ # Tests that generate code at runtime.
+ 'codegen-tester/*': [SKIP],
+ 'test-assembler-*': [SKIP],
+ 'test-basic-block-profiler/*': [SKIP],
+ 'test-branch-combine/*': [SKIP],
+ 'test-multiple-return/*': [SKIP],
+ 'test-run-calls-to-external-references/*': [SKIP],
+}], # lite_mode
+
]
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 0aea6e938b..f7d08ec899 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -29,8 +29,8 @@ namespace compiler {
template <typename T>
inline constexpr MachineType MachineTypeForC() {
- static_assert(std::is_convertible<T, Object*>::value,
- "all non-specialized types must be convertible to Object*");
+ static_assert(std::is_convertible<T, Object>::value,
+ "all non-specialized types must be convertible to Object");
return MachineType::AnyTagged();
}
@@ -114,7 +114,7 @@ typedef CSignatureOf<int32_t, int32_t, int32_t> CSignature_i_ii;
typedef CSignatureOf<uint32_t, uint32_t, uint32_t> CSignature_u_uu;
typedef CSignatureOf<float, float, float> CSignature_f_ff;
typedef CSignatureOf<double, double, double> CSignature_d_dd;
-typedef CSignatureOf<Object*, Object*, Object*> CSignature_o_oo;
+typedef CSignatureOf<Object, Object, Object> CSignature_o_oo;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 4bf06a9ba3..4bca79625c 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -40,6 +40,15 @@ class CallHelper {
Isolate* isolate_;
};
+template <>
+template <typename... Params>
+Object CallHelper<Object>::Call(Params... args) {
+ CSignature::VerifyParams<Params...>(csig_);
+ Address entry = Generate();
+ auto fn = GeneratedCode<Address, Params...>::FromAddress(isolate_, entry);
+ return Object(fn.Call(args...));
+}
+
// A call helper that calls the given code object assuming C calling convention.
template <typename T>
class CodeRunner : public CallHelper<T> {
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 7c88998f8a..6707e2ba13 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -46,7 +46,7 @@ class CodeAssemblerTester {
: zone_(isolate->allocator(), ZONE_NAME),
scope_(isolate),
state_(isolate, &zone_, call_descriptor, Code::STUB, name,
- PoisoningMitigationLevel::kDontPoison, 0, -1) {}
+ PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {}
CodeAssemblerState* state() { return &state_; }
@@ -56,11 +56,13 @@ class CodeAssemblerTester {
}
Handle<Code> GenerateCode() {
- return CodeAssembler::GenerateCode(
- &state_, AssemblerOptions::Default(scope_.isolate()));
+ return GenerateCode(AssemblerOptions::Default(scope_.isolate()));
}
Handle<Code> GenerateCode(const AssemblerOptions& options) {
+ if (state_.InsideBlock()) {
+ CodeAssembler(&state_).Unreachable();
+ }
return CodeAssembler::GenerateCode(&state_, options);
}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index f66385a92e..0aff318211 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "test/cctest/compiler/codegen-tester.h"
+
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -381,7 +383,7 @@ void RunSmiConstant(int32_t v) {
// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
#if !V8_TARGET_ARCH_X64
if (Smi::IsValid(v)) {
- RawMachineAssemblerTester<Object*> m;
+ RawMachineAssemblerTester<Object> m;
m.Return(m.NumberConstant(v));
CHECK_EQ(Smi::FromInt(v), m.Call());
}
@@ -390,14 +392,14 @@ void RunSmiConstant(int32_t v) {
void RunNumberConstant(double v) {
- RawMachineAssemblerTester<Object*> m;
+ RawMachineAssemblerTester<Object> m;
#if V8_TARGET_ARCH_X64
// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
Handle<Object> number = m.isolate()->factory()->NewNumber(v);
if (number->IsSmi()) return;
#endif
m.Return(m.NumberConstant(v));
- Object* result = m.Call();
+ Object result = m.Call();
m.CheckNumber(v, result);
}
@@ -419,11 +421,12 @@ TEST(RunInt32Constants) {
TEST(RunSmiConstants) {
- for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+ for (int32_t i = 1; i < Smi::kMaxValue && i != 0;
+ i = base::ShlWithWraparound(i, 1)) {
RunSmiConstant(i);
- RunSmiConstant(3 * i);
- RunSmiConstant(5 * i);
- RunSmiConstant(-i);
+ RunSmiConstant(base::MulWithWraparound(3, i));
+ RunSmiConstant(base::MulWithWraparound(5, i));
+ RunSmiConstant(base::NegateWithWraparound(i));
RunSmiConstant(i | 1);
RunSmiConstant(i | 3);
}
@@ -444,9 +447,10 @@ TEST(RunNumberConstants) {
FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
}
- for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+ for (int32_t i = 1; i < Smi::kMaxValue && i != 0;
+ i = base::ShlWithWraparound(i, 1)) {
RunNumberConstant(i);
- RunNumberConstant(-i);
+ RunNumberConstant(base::NegateWithWraparound(i));
RunNumberConstant(i | 1);
RunNumberConstant(i | 3);
}
@@ -458,24 +462,25 @@ TEST(RunNumberConstants) {
TEST(RunEmptyString) {
- RawMachineAssemblerTester<Object*> m;
+ RawMachineAssemblerTester<Object> m;
m.Return(m.StringConstant("empty"));
m.CheckString("empty", m.Call());
}
TEST(RunHeapConstant) {
- RawMachineAssemblerTester<Object*> m;
+ RawMachineAssemblerTester<Object> m;
m.Return(m.StringConstant("empty"));
m.CheckString("empty", m.Call());
}
TEST(RunHeapNumberConstant) {
- RawMachineAssemblerTester<HeapObject*> m;
+ RawMachineAssemblerTester<void*> m;
Handle<HeapObject> number = m.isolate()->factory()->NewHeapNumber(100.5);
m.Return(m.HeapConstant(number));
- HeapObject* result = m.Call();
+ HeapObject result =
+ HeapObject::cast(Object(reinterpret_cast<Address>(m.Call())));
CHECK_EQ(result, *number);
}
@@ -575,6 +580,20 @@ TEST(RunBinopTester) {
#if V8_TARGET_ARCH_64_BIT
// TODO(ahaas): run int64 tests on all platforms when supported.
+
+namespace {
+
+int64_t Add4(int64_t a, int64_t b, int64_t c, int64_t d) {
+ // Operate on uint64_t values to avoid undefined behavior.
+ return static_cast<int64_t>(
+ static_cast<uint64_t>(a) + static_cast<uint64_t>(b) +
+ static_cast<uint64_t>(c) + static_cast<uint64_t>(d));
+}
+
+int64_t Add3(int64_t a, int64_t b, int64_t c) { return Add4(a, b, c, 0); }
+
+} // namespace
+
TEST(RunBufferedRawMachineAssemblerTesterTester) {
{
BufferedRawMachineAssemblerTester<int64_t> m;
@@ -592,8 +611,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int64Add(m.Parameter(0), m.Parameter(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i + *j, m.Call(*i, *j));
- CHECK_EQ(*j + *i, m.Call(*j, *i));
+ CHECK_EQ(base::AddWithWraparound(*i, *j), m.Call(*i, *j));
+ CHECK_EQ(base::AddWithWraparound(*j, *i), m.Call(*j, *i));
}
}
}
@@ -604,9 +623,9 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i + *i + *j, m.Call(*i, *i, *j));
- CHECK_EQ(*i + *j + *i, m.Call(*i, *j, *i));
- CHECK_EQ(*j + *i + *i, m.Call(*j, *i, *i));
+ CHECK_EQ(Add3(*i, *i, *j), m.Call(*i, *i, *j));
+ CHECK_EQ(Add3(*i, *j, *i), m.Call(*i, *j, *i));
+ CHECK_EQ(Add3(*j, *i, *i), m.Call(*j, *i, *i));
}
}
}
@@ -619,10 +638,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Parameter(3)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i + *i + *i + *j, m.Call(*i, *i, *i, *j));
- CHECK_EQ(*i + *i + *j + *i, m.Call(*i, *i, *j, *i));
- CHECK_EQ(*i + *j + *i + *i, m.Call(*i, *j, *i, *i));
- CHECK_EQ(*j + *i + *i + *i, m.Call(*j, *i, *i, *i));
+ CHECK_EQ(Add4(*i, *i, *i, *j), m.Call(*i, *i, *i, *j));
+ CHECK_EQ(Add4(*i, *i, *j, *i), m.Call(*i, *i, *j, *i));
+ CHECK_EQ(Add4(*i, *j, *i, *i), m.Call(*i, *j, *i, *i));
+ CHECK_EQ(Add4(*j, *i, *i, *i), m.Call(*j, *i, *i, *i));
}
}
}
@@ -658,10 +677,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *j);
- CHECK_EQ(*i + *j, result);
+ CHECK_EQ(base::AddWithWraparound(*i, *j), result);
m.Call(*j, *i);
- CHECK_EQ(*j + *i, result);
+ CHECK_EQ(base::AddWithWraparound(*j, *i), result);
}
}
}
@@ -677,13 +696,13 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *i, *j);
- CHECK_EQ(*i + *i + *j, result);
+ CHECK_EQ(Add3(*i, *i, *j), result);
m.Call(*i, *j, *i);
- CHECK_EQ(*i + *j + *i, result);
+ CHECK_EQ(Add3(*i, *j, *i), result);
m.Call(*j, *i, *i);
- CHECK_EQ(*j + *i + *i, result);
+ CHECK_EQ(Add3(*j, *i, *i), result);
}
}
}
@@ -702,16 +721,16 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *i, *i, *j);
- CHECK_EQ(*i + *i + *i + *j, result);
+ CHECK_EQ(Add4(*i, *i, *i, *j), result);
m.Call(*i, *i, *j, *i);
- CHECK_EQ(*i + *i + *j + *i, result);
+ CHECK_EQ(Add4(*i, *i, *j, *i), result);
m.Call(*i, *j, *i, *i);
- CHECK_EQ(*i + *j + *i + *i, result);
+ CHECK_EQ(Add4(*i, *j, *i, *i), result);
m.Call(*j, *i, *i, *i);
- CHECK_EQ(*j + *i + *i + *i, result);
+ CHECK_EQ(Add4(*j, *i, *i, *i), result);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index f9fbd4af3a..dc35a6b928 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -5,7 +5,7 @@
#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
-#include "src/compiler/instruction-selector.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/optimized-compilation-info.h"
@@ -59,11 +59,11 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
~RawMachineAssemblerTester() override = default;
- void CheckNumber(double expected, Object* number) {
+ void CheckNumber(double expected, Object number) {
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
}
- void CheckString(const char* expected, Object* string) {
+ void CheckString(const char* expected, Object string) {
CHECK(
this->isolate()->factory()->InternalizeUtf8String(expected)->SameValue(
string));
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 86678606d4..bb23d0644a 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -5,6 +5,7 @@
#include "test/cctest/compiler/function-tester.h"
#include "src/api-inl.h"
+#include "src/assembler.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -131,7 +132,7 @@ Handle<Object> FunctionTester::false_value() {
Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
int param_count) {
- JSFunction* p = nullptr;
+ JSFunction p;
{ // because of the implicit handle scope of FunctionTester.
FunctionTester f(graph, param_count);
p = *f.function;
@@ -142,6 +143,11 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ CHECK(is_compiled_scope.is_compiled() ||
+ Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope));
+
Zone zone(isolate->allocator(), ZONE_NAME);
OptimizedCompilationInfo info(&zone, isolate, shared, function);
@@ -149,14 +155,12 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
info.MarkAsInliningEnabled();
}
- CHECK(function->is_compiled() ||
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureFeedbackVector(function);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, isolate).ToHandleChecked();
- info.context()->native_context()->AddOptimizedCode(*code);
+ info.native_context()->AddOptimizedCode(*code);
function->set_code(*code);
return function;
}
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index e0045979d4..4fe0fc9292 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -5,8 +5,9 @@
#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+#include "src/assembler.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/operator-properties.h"
@@ -96,8 +97,9 @@ class GraphBuilderTester : public HandleAndZoneScope,
Node* PointerConstant(void* value) {
intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
- return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value))
- : Int32Constant(static_cast<int>(intptr_value));
+ return kSystemPointerSize == 8
+ ? NewNode(common()->Int64Constant(intptr_value))
+ : Int32Constant(static_cast<int>(intptr_value));
}
Node* Int32Constant(int32_t value) {
return NewNode(common()->Int32Constant(value));
@@ -222,7 +224,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = zone()->template NewArray<Node*>(input_count_with_deps);
- memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count);
Node** current_input = buffer + value_input_count;
if (has_effect) {
*current_input++ = effect_;
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index f961021913..0414532002 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -48,13 +48,13 @@ TEST(ProfileDiamond) {
m.GenerateCode();
{
- uint32_t expected[] = {0, 0, 0, 0};
+ uint32_t expected[] = {0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected);
}
m.Call(0);
{
- uint32_t expected[] = {1, 1, 0, 1};
+ uint32_t expected[] = {1, 1, 1, 0, 0, 1};
m.Expect(arraysize(expected), expected);
}
@@ -62,13 +62,13 @@ TEST(ProfileDiamond) {
m.Call(1);
{
- uint32_t expected[] = {1, 0, 1, 1};
+ uint32_t expected[] = {1, 0, 0, 1, 1, 1};
m.Expect(arraysize(expected), expected);
}
m.Call(0);
{
- uint32_t expected[] = {2, 1, 1, 2};
+ uint32_t expected[] = {2, 1, 1, 1, 1, 2};
m.Expect(arraysize(expected), expected);
}
}
@@ -94,7 +94,7 @@ TEST(ProfileLoop) {
m.GenerateCode();
{
- uint32_t expected[] = {0, 0, 0, 0};
+ uint32_t expected[] = {0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected);
}
@@ -102,7 +102,7 @@ TEST(ProfileLoop) {
for (size_t i = 0; i < arraysize(runs); i++) {
m.ResetCounts();
CHECK_EQ(1, m.Call(static_cast<int>(runs[i])));
- uint32_t expected[] = {1, runs[i] + 1, runs[i], 1};
+ uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1};
m.Expect(arraysize(expected), expected);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 090a0f23cd..46240aa9b1 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
@@ -501,7 +502,8 @@ TEST(BranchCombineInt32AddLessThanZero) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
- int32_t expect = (a + b < 0) ? t_constant : f_constant;
+ int32_t expect =
+ (base::AddWithWraparound(a, b) < 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
@@ -529,7 +531,8 @@ TEST(BranchCombineInt32AddGreaterThanOrEqualZero) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
- int32_t expect = (a + b >= 0) ? t_constant : f_constant;
+ int32_t expect =
+ (base::AddWithWraparound(a, b) >= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
@@ -557,7 +560,8 @@ TEST(BranchCombineInt32ZeroGreaterThanAdd) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
- int32_t expect = (0 > a + b) ? t_constant : f_constant;
+ int32_t expect =
+ (0 > base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
@@ -585,7 +589,8 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
- int32_t expect = (0 <= a + b) ? t_constant : f_constant;
+ int32_t expect =
+ (0 <= base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
@@ -609,8 +614,8 @@ TEST(BranchCombineUint32AddLessThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (a + b <= 0) ? t_constant : f_constant;
@@ -637,8 +642,8 @@ TEST(BranchCombineUint32AddGreaterThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (a + b > 0) ? t_constant : f_constant;
@@ -665,8 +670,8 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (0 >= a + b) ? t_constant : f_constant;
@@ -693,8 +698,8 @@ TEST(BranchCombineUint32ZeroLessThanAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (0 < a + b) ? t_constant : f_constant;
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index a2243e6edd..3b83d422d2 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -8,6 +8,7 @@
#include "src/compiler/opcodes.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -68,8 +69,7 @@ TEST(SimpleIntPtrReturn) {
m.IntPtrConstant(reinterpret_cast<intptr_t>(&test))));
FunctionTester ft(asm_tester.GenerateCode());
MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(reinterpret_cast<intptr_t>(&test),
- reinterpret_cast<intptr_t>(*result.ToHandleChecked()));
+ CHECK_EQ(reinterpret_cast<Address>(&test), result.ToHandleChecked()->ptr());
}
TEST(SimpleDoubleReturn) {
@@ -560,6 +560,50 @@ TEST(GotoIfExceptionMultiple) {
CHECK(constructor->SameValue(*isolate->type_error_function()));
}
+TEST(ExceptionHandler) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssembler m(asm_tester.state());
+
+ CodeAssembler::TVariable<Object> var(m.SmiConstant(0), &m);
+ Label exception(&m, {&var}, Label::kDeferred);
+ {
+ CodeAssemblerScopedExceptionHandler handler(&m, &exception, &var);
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ m.CallRuntime(Runtime::kThrow, context, m.SmiConstant(2));
+ }
+ m.Return(m.SmiConstant(1));
+
+ m.Bind(&exception);
+ m.Return(var.value());
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ CHECK_EQ(2, ft.CallChecked<Smi>()->value());
+}
+
+TEST(TestCodeAssemblerCodeComment) {
+ i::FLAG_code_comments = true;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeAssembler m(asm_tester.state());
+
+ m.Comment("Comment1");
+ m.Return(m.SmiConstant(1));
+
+ Handle<Code> code = asm_tester.GenerateCode();
+ CHECK_NE(code->code_comments(), kNullAddress);
+ CodeCommentsIterator it(code->code_comments());
+ CHECK(it.HasCurrent());
+ bool found_comment = false;
+ while (it.HasCurrent()) {
+ if (strcmp(it.GetComment(), "Comment1") == 0) found_comment = true;
+ it.Next();
+ }
+ CHECK(found_comment);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 8bf29dca69..6125ef4bdb 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -5,12 +5,13 @@
#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/code-stub-assembler.h"
-#include "src/codegen.h"
-#include "src/compiler/code-generator.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/smi.h"
#include "src/optimized-compilation-info.h"
#include "test/cctest/cctest.h"
@@ -129,7 +130,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
__ Int32Constant(0));
for (int lane = 0; lane < 4; lane++) {
TNode<Int32T> lane_value = __ LoadAndUntagToWord32FixedArrayElement(
- element, __ IntPtrConstant(lane));
+ __ CAST(element), __ IntPtrConstant(lane));
vector = tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()->machine()->I32x4ReplaceLane(
lane),
@@ -156,11 +157,11 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
// ~~~
// FixedArray teardown(CodeObject* /* unused */, FixedArray result,
// // Tagged registers.
-// Object* r0, Object* r1, ...,
+// Object r0, Object r1, ...,
// // FP registers.
// Float32 s0, Float64 d1, ...,
// // Mixed stack slots.
-// Float64 mem0, Object* mem1, Float32 mem2, ...) {
+// Float64 mem0, Object mem1, Float32 mem2, ...) {
// result[0] = r0;
// result[1] = r1;
// ...
@@ -256,7 +257,7 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
os << value->Number();
break;
case MachineRepresentation::kSimd128: {
- FixedArray* vector = FixedArray::cast(*value);
+ FixedArray vector = FixedArray::cast(*value);
os << "[";
for (int lane = 0; lane < 4; lane++) {
os << Smi::cast(*vector->GetValueChecked<Smi>(isolate, lane))->value();
@@ -361,7 +362,7 @@ class TestEnvironment : public HandleAndZoneScope {
public:
// These constants may be tuned to experiment with different environments.
-#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+#ifdef V8_TARGET_ARCH_IA32
static constexpr int kGeneralRegisterCount = 3;
#else
static constexpr int kGeneralRegisterCount = 4;
@@ -380,19 +381,12 @@ class TestEnvironment : public HandleAndZoneScope {
static constexpr int kDoubleConstantCount = 4;
TestEnvironment()
- : blocks_(1, main_zone()),
+ : blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()),
code_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
supported_reps_({MachineRepresentation::kTagged,
MachineRepresentation::kFloat32,
MachineRepresentation::kFloat64}) {
- // Create and initialize a single empty block in blocks_.
- InstructionBlock* block = new (main_zone()) InstructionBlock(
- main_zone(), RpoNumber::FromInt(0), RpoNumber::Invalid(),
- RpoNumber::Invalid(), false, false);
- block->set_ao_number(RpoNumber::FromInt(0));
- blocks_[0] = block;
-
stack_slot_count_ =
kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount;
if (TestSimd128Moves()) {
@@ -404,11 +398,11 @@ class TestEnvironment : public HandleAndZoneScope {
// ~~~
// FixedArray f(CodeObject* teardown, FixedArray preallocated_result,
// // Tagged registers.
- // Object*, Object*, ...,
+ // Object, Object, ...,
// // FP registers.
// Float32, Float64, Simd128, ...,
// // Mixed stack slots.
- // Float64, Object*, Float32, Simd128, ...);
+ // Float64, Object, Float32, Simd128, ...);
// ~~~
LocationSignature::Builder test_signature(
main_zone(), 1,
@@ -539,8 +533,8 @@ class TestEnvironment : public HandleAndZoneScope {
// differentiate between a pointer to a HeapNumber and a integer. For this
// reason, we make sure all integers are Smis, including constants.
for (int i = 0; i < kSmiConstantCount; i++) {
- intptr_t smi_value = reinterpret_cast<intptr_t>(
- Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
+ intptr_t smi_value = static_cast<intptr_t>(
+ Smi::FromInt(rng_->NextInt(Smi::kMaxValue)).ptr());
Constant constant = kPointerSize == 8
? Constant(static_cast<int64_t>(smi_value))
: Constant(static_cast<int32_t>(smi_value));
@@ -733,15 +727,13 @@ class TestEnvironment : public HandleAndZoneScope {
switch (constant.type()) {
case Constant::kInt32:
constant_value =
- Handle<Smi>(reinterpret_cast<Smi*>(
- static_cast<intptr_t>(constant.ToInt32())),
+ Handle<Smi>(Smi(static_cast<Address>(
+ static_cast<intptr_t>(constant.ToInt32()))),
main_isolate());
break;
case Constant::kInt64:
- constant_value =
- Handle<Smi>(reinterpret_cast<Smi*>(
- static_cast<intptr_t>(constant.ToInt64())),
- main_isolate());
+ constant_value = Handle<Smi>(
+ Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
break;
case Constant::kFloat32:
constant_value = main_isolate()->factory()->NewHeapNumber(
@@ -824,7 +816,7 @@ class TestEnvironment : public HandleAndZoneScope {
Handle<Smi> expected_lane =
FixedArray::cast(*expected)->GetValueChecked<Smi>(main_isolate(),
lane);
- if (!actual_lane->StrictEquals(*expected_lane)) {
+ if (*actual_lane != *expected_lane) {
return false;
}
}
@@ -926,6 +918,11 @@ class TestEnvironment : public HandleAndZoneScope {
return allocated_constants_[rep][index];
}
+ static InstructionBlock* NewBlock(Zone* zone, RpoNumber rpo) {
+ return new (zone) InstructionBlock(zone, rpo, RpoNumber::Invalid(),
+ RpoNumber::Invalid(), false, false);
+ }
+
v8::base::RandomNumberGenerator* rng() const { return rng_; }
InstructionSequence* code() { return &code_; }
CallDescriptor* test_descriptor() { return test_descriptor_; }
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 504c01cb09..85dd389287 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/gap-resolver.h"
+#include "src/compiler/backend/gap-resolver.h"
#include "src/base/utils/random-number-generator.h"
#include "test/cctest/cctest.h"
@@ -165,14 +165,12 @@ class InterpreterState {
friend std::ostream& operator<<(std::ostream& os,
const InterpreterState& is) {
- for (OperandMap::const_iterator it = is.values_.begin();
- it != is.values_.end(); ++it) {
- if (it != is.values_.begin()) os << " ";
- InstructionOperand source = FromKey(it->second);
- InstructionOperand destination = FromKey(it->first);
- MoveOperands mo(source, destination);
- PrintableMoveOperands pmo = {GetRegConfig(), &mo};
- os << pmo;
+ const char* space = "";
+ for (auto& value : is.values_) {
+ InstructionOperand source = FromKey(value.second);
+ InstructionOperand destination = FromKey(value.first);
+ os << space << MoveOperands{source, destination};
+ space = " ";
}
return os;
}
@@ -314,9 +312,9 @@ class ParallelMoveCreator : public HandleAndZoneScope {
UNREACHABLE();
}
- // min(num_alloctable_general_registers for each arch) == 6 from
+ // min(num_alloctable_general_registers for each arch) == 5 from
// assembler-ia32.h
- const int kMaxIndex = 6;
+ const int kMaxIndex = 5;
const int kMaxIndices = kMaxIndex + 1;
// Non-FP slots shouldn't overlap FP slots.
diff --git a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
index 468961c010..f80718e05e 100644
--- a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-scheduler.h"
-#include "src/compiler/instruction-selector-impl.h"
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction.h"
#include "test/cctest/cctest.h"
@@ -14,13 +14,11 @@ namespace compiler {
// Create InstructionBlocks with a single block.
InstructionBlocks* CreateSingleBlock(Zone* zone) {
- InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
- new (blocks) InstructionBlocks(1, nullptr, zone);
InstructionBlock* block = new (zone)
InstructionBlock(zone, RpoNumber::FromInt(0), RpoNumber::Invalid(),
RpoNumber::Invalid(), false, false);
- block->set_ao_number(RpoNumber::FromInt(0));
- (*blocks)[0] = block;
+ InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
+ new (blocks) InstructionBlocks(1, block, zone);
return blocks;
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 1140ef9113..a806cd857f 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/code-generator.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 7938c50069..5e6e3b3cc2 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -82,7 +82,7 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
Node* new_context = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context->opcode());
HeapObjectMatcher match(new_context);
- CHECK_EQ(*match.Value(), *expected_new_context_object);
+ CHECK_EQ(Context::cast(*match.Value()), *expected_new_context_object);
ContextAccess new_access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(new_access.depth(), expected_new_depth);
@@ -108,11 +108,6 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
static const int slot_index = Context::NATIVE_CONTEXT_INDEX;
TEST(ReduceJSLoadContext0) {
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -158,7 +153,7 @@ TEST(ReduceJSLoadContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, *match.Value());
+ CHECK_EQ(*native, Context::cast(*match.Value()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
@@ -177,8 +172,6 @@ TEST(ReduceJSLoadContext0) {
CHECK(match.HasValue());
CHECK_EQ(*expected, *match.Value());
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSLoadContext1) {
@@ -256,11 +249,6 @@ TEST(ReduceJSLoadContext2) {
// context2 <-- context1 <-- context0 (= HeapConstant(context_object1))
// context_object1 <~~ context_object0
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -331,8 +319,6 @@ TEST(ReduceJSLoadContext2) {
t.javascript()->LoadContext(3, slot_index, true), context2, start);
t.CheckChangesToValue(load, slot_value0);
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSLoadContext3) {
@@ -342,11 +328,6 @@ TEST(ReduceJSLoadContext3) {
// context_object2 from ReduceJSLoadContext2 for this, so almost all test
// expectations are the same as in ReduceJSLoadContext2.
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
HandleAndZoneScope handle_zone_scope;
auto factory = handle_zone_scope.main_isolate()->factory();
@@ -421,16 +402,9 @@ TEST(ReduceJSLoadContext3) {
t.javascript()->LoadContext(3, slot_index, true), context2, start);
t.CheckChangesToValue(load, slot_value0);
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext0) {
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -484,14 +458,12 @@ TEST(ReduceJSStoreContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, *match.Value());
+ CHECK_EQ(*native, Context::cast(*match.Value()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
CHECK_EQ(false, access.immutable());
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext1) {
@@ -539,11 +511,6 @@ TEST(ReduceJSStoreContext1) {
}
TEST(ReduceJSStoreContext2) {
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -594,16 +561,9 @@ TEST(ReduceJSStoreContext2) {
context2, context2, start, start);
t.CheckContextInputAndDepthChanges(store, context_object0, 0);
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext3) {
- // TODO(neis): The native context below does not have all the fields
- // initialized that the heap broker wants to serialize.
- bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
- FLAG_concurrent_compiler_frontend = false;
-
HandleAndZoneScope handle_zone_scope;
auto factory = handle_zone_scope.main_isolate()->factory();
@@ -658,8 +618,6 @@ TEST(ReduceJSStoreContext3) {
context2, context2, start, start);
t.CheckContextInputAndDepthChanges(store, context_object0, 0);
}
-
- FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(SpecializeJSFunction_ToConstant1) {
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 52309f41e0..994fea0868 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction-codes.h"
-#include "src/compiler/instruction.h"
-#include "src/compiler/jump-threading.h"
+#include "src/compiler/backend/instruction-codes.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/backend/jump-threading.h"
#include "src/source-position.h"
#include "test/cctest/cctest.h"
@@ -613,6 +613,7 @@ TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); }
void ApplyForwarding(TestCode& code, int size, int* forward) {
+ code.sequence_.RecomputeAssemblyOrderForTesting();
ZoneVector<RpoNumber> vector(code.main_zone());
for (int i = 0; i < size; i++) {
vector.push_back(RpoNumber::FromInt(forward[i]));
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 38c5d17b6b..b8e9479675 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -4,7 +4,6 @@
#include "src/api-inl.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index df18062acf..073891a52b 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/codegen.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/operator-properties.h"
@@ -85,7 +85,8 @@ class ReducerTester : public HandleAndZoneScope {
graph(main_zone()),
javascript(main_zone()),
jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine),
- maxuint32(Constant<int32_t>(kMaxUInt32)) {
+ maxuint32(Constant<int32_t>(kMaxUInt32)),
+ graph_reducer(main_zone(), &graph, jsgraph.Dead()) {
Node* s = graph.NewNode(common.Start(num_parameters));
graph.SetStart(s);
}
@@ -99,6 +100,7 @@ class ReducerTester : public HandleAndZoneScope {
JSOperatorBuilder javascript;
JSGraph jsgraph;
Node* maxuint32;
+ GraphReducer graph_reducer;
template <typename T>
Node* Constant(volatile T value) {
@@ -123,7 +125,7 @@ class ReducerTester : public HandleAndZoneScope {
void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
CHECK(binop);
Node* n = CreateBinopNode(a, b);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_NE(n, reduction.replacement());
@@ -143,7 +145,7 @@ class ReducerTester : public HandleAndZoneScope {
void CheckBinop(Node* expect, Node* a, Node* b) {
CHECK(binop);
Node* n = CreateBinopNode(a, b);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(expect, reduction.replacement());
@@ -155,7 +157,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_EQ(binop, reduction.replacement()->op());
@@ -170,7 +172,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* right_expect, Node* left, Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -185,7 +187,7 @@ class ReducerTester : public HandleAndZoneScope {
volatile T right_expect, Node* left, Node* right) {
CHECK(binop);
Node* n = CreateBinopNode(left, right);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction r = reducer.Reduce(n);
CHECK(r.Changed());
CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
@@ -204,7 +206,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* k = Constant<T>(constant);
{
Node* n = CreateBinopNode(k, p);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed() || reduction.replacement() == n);
CHECK_EQ(p, n->InputAt(0));
@@ -212,7 +214,7 @@ class ReducerTester : public HandleAndZoneScope {
}
{
Node* n = CreateBinopNode(p, k);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(p, n->InputAt(0));
@@ -228,7 +230,7 @@ class ReducerTester : public HandleAndZoneScope {
Node* p = Parameter();
Node* k = Constant<T>(constant);
Node* n = CreateBinopNode(k, p);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer, &jsgraph);
Reduction reduction = reducer.Reduce(n);
CHECK(!reduction.Changed());
CHECK_EQ(k, n->InputAt(0));
@@ -503,7 +505,7 @@ TEST(ReduceInt32Add) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x + y, x, y); // TODO(titzer): signed overflow
+ R.CheckFoldBinop<int32_t>(base::AddWithWraparound(x, y), x, y);
}
}
@@ -524,7 +526,7 @@ TEST(ReduceInt64Add) {
FOR_INT64_INPUTS(pl) {
FOR_INT64_INPUTS(pr) {
int64_t x = *pl, y = *pr;
- R.CheckFoldBinop<int64_t>(x + y, x, y);
+ R.CheckFoldBinop<int64_t>(base::AddWithWraparound(x, y), x, y);
}
}
@@ -543,7 +545,7 @@ TEST(ReduceInt32Sub) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x - y, x, y);
+ R.CheckFoldBinop<int32_t>(base::SubWithWraparound(x, y), x, y);
}
}
@@ -562,7 +564,7 @@ TEST(ReduceInt64Sub) {
FOR_INT64_INPUTS(pl) {
FOR_INT64_INPUTS(pr) {
int64_t x = *pl, y = *pr;
- R.CheckFoldBinop<int64_t>(x - y, x, y);
+ R.CheckFoldBinop<int64_t>(base::SubWithWraparound(x, y), x, y);
}
}
@@ -587,7 +589,7 @@ TEST(ReduceInt32Mul) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
- R.CheckFoldBinop<int32_t>(x * y, x, y); // TODO(titzer): signed overflow
+ R.CheckFoldBinop<int32_t>(base::MulWithWraparound(x, y), x, y);
}
}
@@ -626,7 +628,8 @@ TEST(ReduceInt32Div) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
if (y == 0) continue; // TODO(titzer): test / 0
- int32_t r = y == -1 ? -x : x / y; // INT_MIN / -1 may explode in C
+ int32_t r = y == -1 ? base::NegateWithWraparound(x)
+ : x / y; // INT_MIN / -1 may explode in C
R.CheckFoldBinop<int32_t>(r, x, y);
}
}
@@ -823,7 +826,7 @@ TEST(ReduceLoadStore) {
index, R.graph.start(), R.graph.start());
{
- MachineOperatorReducer reducer(&R.jsgraph);
+ MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph);
Reduction reduction = reducer.Reduce(load);
CHECK(!reduction.Changed()); // loads should not be reduced.
}
@@ -833,7 +836,7 @@ TEST(ReduceLoadStore) {
R.graph.NewNode(R.machine.Store(StoreRepresentation(
MachineRepresentation::kWord32, kNoWriteBarrier)),
base, index, load, load, R.graph.start());
- MachineOperatorReducer reducer(&R.jsgraph);
+ MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph);
Reduction reduction = reducer.Reduce(store);
CHECK(!reduction.Changed()); // stores should not be reduced.
}
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index dccdbd9b92..bf5e829509 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -9,7 +9,6 @@
#include "src/assembler.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
@@ -123,15 +122,11 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
size_t code_size) {
std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule());
module->num_declared_functions = 1;
- wasm::ModuleEnv env(
- module.get(), wasm::UseTrapHandler::kNoTrapHandler,
- wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
return isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module),
- env);
+ isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module));
}
void TestReturnMultipleValues(MachineType type) {
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index c334ecb383..f4218467f7 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -71,7 +71,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
CHECK_FLOAT_EQ(expected, fval);
}
- void CheckHeapConstant(Node* n, HeapObject* expected) {
+ void CheckHeapConstant(Node* n, HeapObject expected) {
HeapObjectMatcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, *m.Value());
@@ -204,6 +204,15 @@ TEST(ToFloat64_constant) {
UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
r.CheckFloat64Constant(c, i);
}
+
+ {
+ Node* n = r.jsgraph()->Constant(0);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord64, Type::Range(0, 0, r.zone()), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, 0);
+ }
}
@@ -280,7 +289,7 @@ TEST(ToInt64_constant) {
Node* n = r.jsgraph()->Constant(*i);
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, TypeCache::Get().kSafeInteger, use,
+ n, MachineRepresentation::kTagged, TypeCache::Get()->kSafeInteger, use,
UseInfo(MachineRepresentation::kWord64, Truncation::None()));
r.CheckInt64Constant(c, *i);
}
@@ -299,7 +308,8 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
CHECK_EQ(expected, c->opcode());
CHECK_EQ(n, c->InputAt(0));
- if (expected == IrOpcode::kCheckedFloat64ToInt32) {
+ if (expected == IrOpcode::kCheckedFloat64ToInt32 ||
+ expected == IrOpcode::kCheckedFloat64ToInt64) {
CheckForMinusZeroMode mode =
from_type.Maybe(Type::MinusZero())
? use_info.minus_zero_check()
@@ -316,13 +326,13 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
static void CheckTwoChanges(IrOpcode::Value expected2,
IrOpcode::Value expected1,
MachineRepresentation from, Type from_type,
- MachineRepresentation to) {
+ MachineRepresentation to, UseInfo use_info) {
RepresentationChangerTester r;
Node* n = r.Parameter();
Node* use = r.Return(n);
- Node* c1 = r.changer()->GetRepresentationFor(n, from, from_type, use,
- UseInfo(to, Truncation::None()));
+ Node* c1 =
+ r.changer()->GetRepresentationFor(n, from, from_type, use, use_info);
CHECK_NE(c1, n);
CHECK_EQ(expected1, c1->opcode());
@@ -332,6 +342,14 @@ static void CheckTwoChanges(IrOpcode::Value expected2,
CHECK_EQ(n, c2->InputAt(0));
}
+static void CheckTwoChanges(IrOpcode::Value expected2,
+ IrOpcode::Value expected1,
+ MachineRepresentation from, Type from_type,
+ MachineRepresentation to) {
+ CheckTwoChanges(expected2, expected1, from, from_type, to,
+ UseInfo(to, Truncation::None()));
+}
+
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
Type from_type, MachineRepresentation to,
UseInfo use_info) {
@@ -349,13 +367,13 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
TEST(Word64) {
CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord8,
- TypeCache::Get().kInt8, MachineRepresentation::kWord64);
+ TypeCache::Get()->kInt8, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord8,
- TypeCache::Get().kUint8, MachineRepresentation::kWord64);
+ TypeCache::Get()->kUint8, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord16,
- TypeCache::Get().kInt16, MachineRepresentation::kWord64);
+ TypeCache::Get()->kInt16, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord16,
- TypeCache::Get().kUint16, MachineRepresentation::kWord64);
+ TypeCache::Get()->kUint16, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32,
@@ -366,15 +384,15 @@ TEST(Word64) {
CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32,
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord32,
UseInfo::TruncatingWord32());
CheckChange(
IrOpcode::kCheckedInt64ToInt32, MachineRepresentation::kWord64,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32,
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord32,
UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
CheckChange(
IrOpcode::kCheckedUint64ToInt32, MachineRepresentation::kWord64,
- TypeCache::Get().kPositiveSafeInteger, MachineRepresentation::kWord32,
+ TypeCache::Get()->kPositiveSafeInteger, MachineRepresentation::kWord32,
UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
@@ -382,18 +400,22 @@ TEST(Word64) {
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
Type::Unsigned32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
- TypeCache::Get().kInt64, MachineRepresentation::kWord64);
+ TypeCache::Get()->kInt64, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToUint64, MachineRepresentation::kFloat64,
- TypeCache::Get().kUint64, MachineRepresentation::kWord64);
+ TypeCache::Get()->kUint64, MachineRepresentation::kWord64);
+ CheckChange(
+ IrOpcode::kCheckedFloat64ToInt64, MachineRepresentation::kFloat64,
+ Type::Number(), MachineRepresentation::kWord64,
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
Type::Signed32(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
Type::Unsigned32(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kFloat64);
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kFloat64);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToInt64,
@@ -405,12 +427,17 @@ TEST(Word64) {
MachineRepresentation::kWord64);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToInt64,
- MachineRepresentation::kFloat32, TypeCache::Get().kInt64,
+ MachineRepresentation::kFloat32, TypeCache::Get()->kInt64,
MachineRepresentation::kWord64);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToUint64,
- MachineRepresentation::kFloat32, TypeCache::Get().kUint64,
+ MachineRepresentation::kFloat32, TypeCache::Get()->kUint64,
MachineRepresentation::kWord64);
+ CheckTwoChanges(
+ IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kCheckedFloat64ToInt64,
+ MachineRepresentation::kFloat32, Type::Number(),
+ MachineRepresentation::kWord64,
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64,
IrOpcode::kTruncateFloat64ToFloat32,
@@ -422,12 +449,20 @@ TEST(Word64) {
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
Type::Unsigned32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
- TypeCache::Get().kInt64, MachineRepresentation::kWord64);
+ TypeCache::Get()->kInt64, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedSignedToInt64,
MachineRepresentation::kTaggedSigned, Type::SignedSmall(),
MachineRepresentation::kWord64);
+ CheckChange(
+ IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTagged,
+ Type::Number(), MachineRepresentation::kWord64,
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
+ CheckChange(
+ IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTaggedPointer,
+ Type::Number(), MachineRepresentation::kWord64,
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair()));
CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
IrOpcode::kChangeInt31ToTaggedSigned,
@@ -442,9 +477,9 @@ TEST(Word64) {
MachineRepresentation::kWord64, Type::Unsigned32(),
MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeInt64ToTagged, MachineRepresentation::kWord64,
- TypeCache::Get().kSafeInteger, MachineRepresentation::kTagged);
+ TypeCache::Get()->kSafeInteger, MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeUint64ToTagged, MachineRepresentation::kWord64,
- TypeCache::Get().kPositiveSafeInteger,
+ TypeCache::Get()->kPositiveSafeInteger,
MachineRepresentation::kTagged);
CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32,
@@ -458,19 +493,19 @@ TEST(Word64) {
MachineRepresentation::kTaggedSigned);
}
CheckChange(IrOpcode::kCheckedInt64ToTaggedSigned,
- MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger,
+ MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger,
MachineRepresentation::kTaggedSigned,
UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
CheckChange(IrOpcode::kCheckedUint64ToTaggedSigned,
MachineRepresentation::kWord64,
- TypeCache::Get().kPositiveSafeInteger,
+ TypeCache::Get()->kPositiveSafeInteger,
MachineRepresentation::kTaggedSigned,
UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()));
- CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64,
- IrOpcode::kChangeFloat64ToTaggedPointer,
- MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger,
- MachineRepresentation::kTaggedPointer);
+ CheckTwoChanges(
+ IrOpcode::kChangeInt64ToFloat64, IrOpcode::kChangeFloat64ToTaggedPointer,
+ MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger,
+ MachineRepresentation::kTaggedPointer);
}
TEST(SingleChanges) {
@@ -589,6 +624,11 @@ TEST(SignednessInWord32) {
IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat32, Type::Number(),
MachineRepresentation::kWord32);
+
+ CheckChange(
+ IrOpcode::kCheckedUint32ToInt32, MachineRepresentation::kWord32,
+ Type::Unsigned32(),
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair()));
}
static void TestMinusZeroCheck(IrOpcode::Value expected, Type from_type) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 681669f334..775ffadfd4 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -2703,37 +2703,6 @@ void TestJumpWithConstantsAndWideConstants(size_t shard) {
SHARD_TEST_BY_4(JumpWithConstantsAndWideConstants)
-TEST(BytecodeGraphBuilderDoExpressions) {
- bool old_flag = FLAG_harmony_do_expressions;
- FLAG_harmony_do_expressions = true;
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Factory* factory = isolate->factory();
- ExpectedSnippet<0> snippets[] = {
- {"var a = do {}; return a;", {factory->undefined_value()}},
- {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}},
- {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}},
- {"var a = do { var x = 100; x++; }; return a;",
- {handle(Smi::FromInt(100), isolate)}},
- {"var i = 0; for (; i < 5;) { i = do { if (i == 3) { break; }; i + 1; }};"
- "return i;",
- {handle(Smi::FromInt(3), isolate)}},
- };
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- ScopedVector<char> script(1024);
- SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
- snippets[i].code_snippet, kFunctionName);
-
- BytecodeGraphTester tester(isolate, script.start());
- auto callable = tester.GetCallable<>();
- Handle<Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*snippets[i].return_value()));
- }
-
- FLAG_harmony_do_expressions = old_flag;
-}
-
TEST(BytecodeGraphBuilderWithStatement) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index a9c7ed0587..ffee5310d2 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -7,8 +7,8 @@
#include <limits>
#include "src/base/bits.h"
+#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/codegen.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
@@ -25,6 +25,14 @@ enum TestAlignment {
kUnaligned,
};
+#if V8_TARGET_LITTLE_ENDIAN
+#define LSB(addr, bytes) addr
+#elif V8_TARGET_BIG_ENDIAN
+#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
+#else
+#error "Unknown Architecture"
+#endif
+
// This is a America!
#define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
@@ -82,7 +90,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
float p2 = 0.0f; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342AABB + *i * 3;
+ int32_t magic =
+ base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
@@ -119,7 +128,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
double p2 = 0; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342AABB + *i * 3;
+ int32_t magic =
+ base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
@@ -178,22 +188,61 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
}
namespace {
-template <typename Type>
-void RunLoadImmIndex(MachineType rep, TestAlignment t) {
- const int kNumElems = 3;
- Type buffer[kNumElems];
- // initialize the buffer with some raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+// Initializes the buffer with some raw data respecting requested representation
+// of the values.
+template <typename CType>
+void InitBuffer(CType* buffer, size_t length, MachineType rep) {
+ const size_t kBufferSize = sizeof(CType) * length;
+ if (!rep.IsTagged()) {
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < kBufferSize; i++) {
+ raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
+ }
+ return;
+ }
+
+ // Tagged field loads require values to be properly tagged because of
+ // pointer decompression that may be happenning during load.
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
+ if (rep.IsTaggedSigned()) {
+ for (size_t i = 0; i < length; i++) {
+ smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
+ }
+ } else {
+ memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
+ if (!rep.IsTaggedPointer()) {
+ // Also add some Smis if we are checking AnyTagged case.
+ for (size_t i = 0; i < length / 2; i++) {
+ smi_view[i] =
+ Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
+ }
+ }
}
+}
+
+template <typename CType>
+void RunLoadImmIndex(MachineType rep, TestAlignment t) {
+ const int kNumElems = 16;
+ CType buffer[kNumElems];
+
+ InitBuffer(buffer, kNumElems, rep);
// Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
- BufferedRawMachineAssemblerTester<Type> m;
- Node* base = m.PointerConstant(buffer - offset);
+ BufferedRawMachineAssemblerTester<CType> m;
+ void* base_pointer = &buffer[0] - offset;
+#ifdef V8_COMPRESS_POINTERS
+ if (rep.IsTagged()) {
+ // When pointer compression is enabled then we need to access only
+ // the lower 32-bit of the tagged value while the buffer contains
+ // full 64-bit values.
+ base_pointer = LSB(base_pointer, kPointerSize / 2);
+ }
+#endif
+ Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index));
@@ -203,82 +252,91 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
UNREACHABLE();
}
- volatile Type expected = buffer[i];
- volatile Type actual = m.Call();
- CHECK_EQ(expected, actual);
+ CHECK_EQ(buffer[i], m.Call());
}
}
}
template <typename CType>
+CType NullValue() {
+ return CType{0};
+}
+
+template <>
+HeapObject NullValue<HeapObject>() {
+ return HeapObject();
+}
+
+template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) {
- const int kNumElems = 4;
- CType buffer[kNumElems];
+ const int kNumElems = 16;
+ CType in_buffer[kNumElems];
+ CType out_buffer[kNumElems];
+
+ InitBuffer(in_buffer, kNumElems, rep);
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
- // initialize the buffer with raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
- Node* base = m.PointerConstant(buffer);
- Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
- Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
+ Node* in_base = m.PointerConstant(in_buffer);
+ Node* in_index = m.IntPtrConstant(x * sizeof(CType));
+ Node* out_base = m.PointerConstant(out_buffer);
+ Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) {
- Node* load = m.Load(rep, base, index0);
- m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
+ Node* load = m.Load(rep, in_base, in_index);
+ m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
- Node* load = m.UnalignedLoad(rep, base, index0);
- m.UnalignedStore(rep.representation(), base, index1, load);
+ Node* load = m.UnalignedLoad(rep, in_base, in_index);
+ m.UnalignedStore(rep.representation(), out_base, out_index, load);
}
m.Return(m.Int32Constant(OK));
- CHECK(buffer[x] != buffer[y]);
+ memset(out_buffer, 0, sizeof(out_buffer));
+ CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call());
- CHECK(buffer[x] == buffer[y]);
+ CHECK_EQ(in_buffer[x], out_buffer[y]);
+ for (int32_t z = 0; z < kNumElems; z++) {
+ if (z != y) CHECK_EQ(NullValue<CType>(), out_buffer[z]);
+ }
}
}
template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
CType in, out;
- CType in_buffer[2];
- CType out_buffer[2];
- byte* raw;
-
- for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
- int y = sizeof(CType) - x;
+ byte in_buffer[2 * sizeof(CType)];
+ byte out_buffer[2 * sizeof(CType)];
- raw = reinterpret_cast<byte*>(&in);
- for (size_t i = 0; i < sizeof(CType); i++) {
- raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
- }
+ InitBuffer(&in, 1, rep);
- raw = reinterpret_cast<byte*>(in_buffer);
- MemCopy(raw + x, &in, sizeof(CType));
+ for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
+ // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
+ // we use MemCopy() to handle that.
+ MemCopy(&in_buffer[x], &in, sizeof(CType));
- RawMachineAssemblerTester<int32_t> m;
- int32_t OK = 0x29000 + x;
+ for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
- Node* base0 = m.PointerConstant(in_buffer);
- Node* base1 = m.PointerConstant(out_buffer);
- Node* index0 = m.IntPtrConstant(x);
- Node* index1 = m.IntPtrConstant(y);
- Node* load = m.UnalignedLoad(rep, base0, index0);
- m.UnalignedStore(rep.representation(), base1, index1, load);
+ Node* in_base = m.PointerConstant(in_buffer);
+ Node* in_index = m.IntPtrConstant(x);
+ Node* load = m.UnalignedLoad(rep, in_base, in_index);
- m.Return(m.Int32Constant(OK));
+ Node* out_base = m.PointerConstant(out_buffer);
+ Node* out_index = m.IntPtrConstant(y);
+ m.UnalignedStore(rep.representation(), out_base, out_index, load);
- CHECK_EQ(OK, m.Call());
+ m.Return(m.Int32Constant(OK));
- raw = reinterpret_cast<byte*>(&out_buffer);
- MemCopy(&out, raw + y, sizeof(CType));
- CHECK(in == out);
+ CHECK_EQ(OK, m.Call());
+ // Direct read of &out_buffer[y] may cause unaligned access in C++ code
+ // so we use MemCopy() to handle that.
+ MemCopy(&out, &out_buffer[y], sizeof(CType));
+ CHECK_EQ(in, out);
+ }
}
}
} // namespace
@@ -290,7 +348,11 @@ TEST(RunLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
- RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
+ RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
+ RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
+ RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
+ TestAlignment::kAligned);
+ RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
@@ -304,8 +366,11 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
- RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
- TestAlignment::kUnaligned);
+ RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
+ TestAlignment::kUnaligned);
+ RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@@ -321,7 +386,11 @@ TEST(RunLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
- RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
+ RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
+ RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
+ RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
+ TestAlignment::kAligned);
+ RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
@@ -334,7 +403,11 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
- RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
+ RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
+ RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
+ RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
+ TestAlignment::kUnaligned);
+ RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@@ -347,7 +420,11 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
- RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
+ RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
+ RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
+ RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
+ MachineType::TaggedPointer());
+ RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT
@@ -355,14 +432,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
#endif
}
-#if V8_TARGET_LITTLE_ENDIAN
-#define LSB(addr, bytes) addr
-#elif V8_TARGET_BIG_ENDIAN
-#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
-#else
-#error "Unknown Architecture"
-#endif
-
namespace {
void RunLoadStoreSignExtend32(TestAlignment t) {
int32_t buffer[4];
@@ -608,6 +677,10 @@ TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
}
+#undef LSB
+#undef A_BILLION
+#undef A_GIG
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 419d1b0699..782e9b51b8 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -8,9 +8,9 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
+#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/boxed-float.h"
-#include "src/codegen.h"
#include "src/objects-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -848,7 +848,7 @@ TEST(RunDiamondPhiConst) {
TEST(RunDiamondPhiNumber) {
- RawMachineAssemblerTester<Object*> m(MachineType::Int32());
+ RawMachineAssemblerTester<Object> m(MachineType::Int32());
double false_val = -11.1;
double true_val = 200.1;
Node* true_node = m.NumberConstant(true_val);
@@ -861,7 +861,7 @@ TEST(RunDiamondPhiNumber) {
TEST(RunDiamondPhiString) {
- RawMachineAssemblerTester<Object*> m(MachineType::Int32());
+ RawMachineAssemblerTester<Object> m(MachineType::Int32());
const char* false_val = "false";
const char* true_val = "true";
Node* true_node = m.StringConstant(true_val);
@@ -2058,7 +2058,7 @@ TEST(RunInt32MulP) {
bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int expected = static_cast<int32_t>(*i * *j);
+ int expected = base::MulWithWraparound(*i, *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -2125,7 +2125,8 @@ TEST(RunInt32MulAndInt32AddP) {
m.Int32Mul(m.Parameter(0), m.Int32Constant(p1))));
FOR_INT32_INPUTS(k) {
int32_t p2 = *k;
- int expected = p0 + static_cast<int32_t>(p1 * p2);
+ int expected =
+ base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p2));
}
}
@@ -2142,7 +2143,8 @@ TEST(RunInt32MulAndInt32AddP) {
int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
- int expected = p0 + static_cast<int32_t>(p1 * p2);
+ int expected =
+ base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
@@ -2159,7 +2161,8 @@ TEST(RunInt32MulAndInt32AddP) {
int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
- int expected = static_cast<int32_t>(p0 * p1) + p2;
+ int expected =
+ base::AddWithWraparound(base::MulWithWraparound(p0, p1), p2);
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
@@ -2175,7 +2178,8 @@ TEST(RunInt32MulAndInt32AddP) {
FOR_INT32_INPUTS(k) {
int32_t p0 = *j;
int32_t p1 = *k;
- int expected = *i + static_cast<int32_t>(p0 * p1);
+ int expected =
+ base::AddWithWraparound(*i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -2187,24 +2191,24 @@ TEST(RunInt32MulAndInt32AddP) {
TEST(RunInt32MulAndInt32SubP) {
{
RawMachineAssemblerTester<int32_t> m(
- MachineType::Uint32(), MachineType::Int32(), MachineType::Int32());
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- FOR_UINT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
- uint32_t p0 = *i;
+ int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
- // Use uint32_t because signed overflow is UB in C.
- int expected = p0 - static_cast<uint32_t>(p1 * p2);
+ int expected =
+ base::SubWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
}
}
{
- FOR_UINT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
@@ -2213,8 +2217,8 @@ TEST(RunInt32MulAndInt32SubP) {
FOR_INT32_INPUTS(k) {
int32_t p0 = *j;
int32_t p1 = *k;
- // Use uint32_t because signed overflow is UB in C.
- int expected = *i - static_cast<uint32_t>(p0 * p1);
+ int expected =
+ base::SubWithWraparound(*i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -2262,7 +2266,8 @@ TEST(RunInt32DivP) {
int p0 = *i;
int p1 = *j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
- int expected = static_cast<int32_t>(p0 + (p0 / p1));
+ int expected =
+ static_cast<int32_t>(base::AddWithWraparound(p0, (p0 / p1)));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -2330,7 +2335,8 @@ TEST(RunInt32ModP) {
int p0 = *i;
int p1 = *j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
- int expected = static_cast<int32_t>(p0 + (p0 % p1));
+ int expected =
+ static_cast<int32_t>(base::AddWithWraparound(p0, (p0 % p1)));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -3463,7 +3469,7 @@ TEST(RunInt32NegP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Int32Neg(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
- int expected = -*i;
+ int expected = base::NegateWithWraparound(*i);
CHECK_EQ(expected, m.Call(*i));
}
}
@@ -3676,7 +3682,9 @@ TEST(RunFloat32Div) {
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i / *j, m.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) {
+ CHECK_FLOAT_EQ(base::Divide(*i, *j), m.Call(*i, *j));
+ }
}
}
@@ -3725,7 +3733,9 @@ TEST(RunFloat64Div) {
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i / *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) {
+ CHECK_DOUBLE_EQ(base::Divide(*i, *j), m.Call(*i, *j));
+ }
}
}
@@ -4056,7 +4066,9 @@ TEST(RunFloat32DivP) {
bt.AddReturn(m.Float32Div(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl / *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT32_INPUTS(pr) {
+ CHECK_FLOAT_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
+ }
}
}
@@ -4068,7 +4080,9 @@ TEST(RunFloat64DivP) {
bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl / *pr, bt.call(*pl, *pr)); }
+ FOR_FLOAT64_INPUTS(pr) {
+ CHECK_DOUBLE_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
+ }
}
}
@@ -4714,7 +4728,7 @@ TEST(RunRefDiamond) {
const int magic = 99644;
Handle<String> rexpected =
CcTest::i_isolate()->factory()->InternalizeUtf8String("A");
- String* buffer;
+ String buffer;
RawMachineLabel blocka, blockb, end;
Node* k1 = m.StringConstant("A");
@@ -4743,7 +4757,7 @@ TEST(RunDoubleRefDiamond) {
double dconstant = 99.99;
Handle<String> rexpected =
CcTest::i_isolate()->factory()->InternalizeUtf8String("AX");
- String* rbuffer;
+ String rbuffer;
RawMachineLabel blocka, blockb, end;
Node* d1 = m.Float64Constant(dconstant);
@@ -4778,7 +4792,7 @@ TEST(RunDoubleRefDoubleDiamond) {
double dconstant = 99.997;
Handle<String> rexpected =
CcTest::i_isolate()->factory()->InternalizeUtf8String("AD");
- String* rbuffer;
+ String rbuffer;
RawMachineLabel blocka, blockb, mid, blockd, blocke, end;
Node* d1 = m.Float64Constant(dconstant);
@@ -5250,7 +5264,7 @@ TEST(RunSpillConstantsAndParameters) {
Node* accs[kInputSize];
Node* acc = m.Int32Constant(0);
for (int i = 0; i < kInputSize; i++) {
- csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
+ csts[i] = m.Int32Constant(base::AddWithWraparound(kBase, i));
}
for (int i = 0; i < kInputSize; i++) {
acc = m.Int32Add(acc, csts[i]);
@@ -5262,9 +5276,9 @@ TEST(RunSpillConstantsAndParameters) {
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- int32_t expected = *i + *j;
+ int32_t expected = base::AddWithWraparound(*i, *j);
for (int k = 0; k < kInputSize; k++) {
- expected += kBase + k;
+ expected = base::AddWithWraparound(expected, kBase + k);
}
CHECK_EQ(expected, m.Call(*i, *j));
expected = 0;
@@ -5278,7 +5292,7 @@ TEST(RunSpillConstantsAndParameters) {
TEST(RunNewSpaceConstantsInPhi) {
- RawMachineAssemblerTester<Object*> m(MachineType::Int32());
+ RawMachineAssemblerTester<Object> m(MachineType::Int32());
Isolate* isolate = CcTest::i_isolate();
Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
@@ -6238,17 +6252,15 @@ int32_t foo0() { return kMagicFoo0; }
int32_t foo1(int32_t x) { return x; }
+int32_t foo2(int32_t x, int32_t y) { return base::SubWithWraparound(x, y); }
-int32_t foo2(int32_t x, int32_t y) { return x - y; }
-
-
-int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
- int32_t g, int32_t h) {
+uint32_t foo8(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
+ uint32_t f, uint32_t g, uint32_t h) {
return a + b + c + d + e + f + g + h;
}
-int32_t foo9(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
- int32_t g, int32_t h, int32_t i) {
+uint32_t foo9(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
+ uint32_t f, uint32_t g, uint32_t h, uint32_t i) {
return a + b + c + d + e + f + g + h + i;
}
@@ -6289,7 +6301,7 @@ TEST(RunCallCFunction2) {
int32_t const x = *i;
FOR_INT32_INPUTS(j) {
int32_t const y = *j;
- CHECK_EQ(x - y, m.Call(x, y));
+ CHECK_EQ(base::SubWithWraparound(x, y), m.Call(x, y));
}
}
}
@@ -6307,7 +6319,7 @@ TEST(RunCallCFunction8) {
function, param, param, param, param, param, param, param, param));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
- CHECK_EQ(x * 8, m.Call(x));
+ CHECK_EQ(base::MulWithWraparound(x, 8), m.Call(x));
}
}
@@ -6331,7 +6343,8 @@ TEST(RunCallCFunction9) {
m.Int32Add(param, m.Int32Constant(8))));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
- CHECK_EQ(x * 9 + 36, m.Call(x));
+ CHECK_EQ(base::AddWithWraparound(base::MulWithWraparound(x, 9), 36),
+ m.Call(x));
}
}
#endif // USE_SIMULATOR
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 2ddaa1bc07..19c6abb8fc 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -5,7 +5,7 @@
#include <vector>
#include "src/assembler.h"
-#include "src/codegen.h"
+#include "src/base/overflowing-math.h"
#include "src/compiler/linkage.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/machine-type.h"
@@ -1061,7 +1061,7 @@ void MixedParamTest(int start) {
Handle<Code> wrapper = Handle<Code>::null();
int32_t expected_ret;
char bytes[kDoubleSize];
- V8_ALIGNED(8) char output[kDoubleSize];
+ alignas(8) char output[kDoubleSize];
int expected_size = 0;
CSignatureOf<int32_t> csig;
{
@@ -1101,7 +1101,8 @@ void MixedParamTest(int start) {
CHECK_NOT_NULL(konst);
inputs[input_count++] = konst;
- constant += 0x1010101010101010;
+ const int64_t kIncrement = 0x1010101010101010;
+ constant = base::AddWithWraparound(constant, kIncrement);
}
Node* call = raw.CallN(desc, input_count, inputs);
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index 3bbab4265f..24080bc573 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -4,6 +4,7 @@
#include "src/assembler-inl.h"
#include "src/code-stub-assembler.h"
+#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
deleted file mode 100644
index 9c76f22b99..0000000000
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-stubs.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/pipeline.h"
-#include "src/objects-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/optimized-compilation-info.h"
-#include "test/cctest/compiler/function-tester.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class StubTester {
- public:
- StubTester(Zone* zone, CodeStub* stub)
- : zone_(zone),
- info_(ArrayVector("test"), zone, Code::STUB),
- interface_descriptor_(stub->GetCallInterfaceDescriptor()),
- descriptor_(Linkage::GetStubCallDescriptor(
- zone, interface_descriptor_, stub->GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties)),
- graph_(zone_),
- common_(zone_),
- tester_(InitializeFunctionTester(stub->GetCode()),
- GetParameterCountWithContext()) {}
-
- StubTester(Isolate* isolate, Zone* zone, Builtins::Name name)
- : zone_(zone),
- info_(ArrayVector("test"), zone, Code::STUB),
- interface_descriptor_(
- Builtins::CallableFor(isolate, name).descriptor()),
- descriptor_(Linkage::GetStubCallDescriptor(
- zone, interface_descriptor_,
- interface_descriptor_.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties)),
- graph_(zone_),
- common_(zone_),
- tester_(InitializeFunctionTester(
- Handle<Code>(isolate->builtins()->builtin(name), isolate)),
- GetParameterCountWithContext()) {}
-
- template <typename... Args>
- Handle<Object> Call(Args... args) {
- DCHECK_EQ(interface_descriptor_.GetParameterCount(), sizeof...(args));
- MaybeHandle<Object> result =
- tester_
- .Call(args...,
- Handle<HeapObject>(tester_.function->context(), ft().isolate))
- .ToHandleChecked();
- return result.ToHandleChecked();
- }
-
- FunctionTester& ft() { return tester_; }
-
- private:
- Graph* InitializeFunctionTester(Handle<Code> stub) {
- // Add target, effect and control.
- int node_count = GetParameterCountWithContext() + 3;
- // Add extra inputs for the JSFunction parameter and the receiver (which for
- // the tester is always undefined) to the start node.
- Node* start =
- graph_.NewNode(common_.Start(GetParameterCountWithContext() + 2));
- Node** node_array = zone_->NewArray<Node*>(node_count);
- node_array[0] = graph_.NewNode(common_.HeapConstant(stub));
- for (int i = 0; i < GetParameterCountWithContext(); ++i) {
- CHECK(IsAnyTagged(descriptor_->GetParameterType(i).representation()));
- node_array[i + 1] = graph_.NewNode(common_.Parameter(i + 1), start);
- }
- node_array[node_count - 2] = start;
- node_array[node_count - 1] = start;
- Node* call =
- graph_.NewNode(common_.Call(descriptor_), node_count, &node_array[0]);
-
- Node* zero = graph_.NewNode(common_.Int32Constant(0));
- Node* ret = graph_.NewNode(common_.Return(), zero, call, call, start);
- Node* end = graph_.NewNode(common_.End(1), ret);
- graph_.SetStart(start);
- graph_.SetEnd(end);
- return &graph_;
- }
-
- int GetParameterCountWithContext() {
- return interface_descriptor_.GetParameterCount() + 1;
- }
-
- Zone* zone_;
- OptimizedCompilationInfo info_;
- CallInterfaceDescriptor interface_descriptor_;
- CallDescriptor* descriptor_;
- Graph graph_;
- CommonOperatorBuilder common_;
- FunctionTester tester_;
-};
-
-TEST(RunStringWrapperLengthStub) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- StubTester tester(isolate, zone, Builtins::kLoadIC_StringWrapperLength);
-
- // Actuall call through to the stub, verifying its result.
- const char* testString = "Und das Lamm schrie HURZ!";
- Handle<Object> receiverArg =
- Object::ToObject(isolate, tester.ft().Val(testString)).ToHandleChecked();
- Handle<Object> nameArg = tester.ft().Val("length");
- Handle<Object> slot = tester.ft().Val(0.0);
- Handle<Object> vector = tester.ft().Val(0.0);
- Handle<Object> result = tester.Call(receiverArg, nameArg, slot, vector);
- CHECK_EQ(static_cast<int>(strlen(testString)), Smi::ToInt(*result));
-}
-
-TEST(RunArrayExtractStubSimple) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
-
- // Actuall call through to the stub, verifying its result.
- Handle<JSArray> source_array = isolate->factory()->NewJSArray(
- PACKED_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- static_cast<FixedArray*>(source_array->elements())->set(0, Smi::FromInt(5));
- static_cast<FixedArray*>(source_array->elements())->set(1, Smi::FromInt(4));
- static_cast<FixedArray*>(source_array->elements())->set(2, Smi::FromInt(3));
- static_cast<FixedArray*>(source_array->elements())->set(3, Smi::FromInt(2));
- static_cast<FixedArray*>(source_array->elements())->set(4, Smi::FromInt(1));
- Handle<JSArray> result = Handle<JSArray>::cast(
- tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
- Handle<Smi>(Smi::FromInt(5), isolate)));
- CHECK_NE(*source_array, *result);
- CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS);
- CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(0),
- Smi::FromInt(5));
- CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(1),
- Smi::FromInt(4));
- CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(2),
- Smi::FromInt(3));
- CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(3),
- Smi::FromInt(2));
- CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(4),
- Smi::FromInt(1));
-}
-
-TEST(RunArrayExtractDoubleStubSimple) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
-
- // Actuall call through to the stub, verifying its result.
- Handle<JSArray> source_array = isolate->factory()->NewJSArray(
- PACKED_DOUBLE_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- static_cast<FixedDoubleArray*>(source_array->elements())->set(0, 5);
- static_cast<FixedDoubleArray*>(source_array->elements())->set(1, 4);
- static_cast<FixedDoubleArray*>(source_array->elements())->set(2, 3);
- static_cast<FixedDoubleArray*>(source_array->elements())->set(3, 2);
- static_cast<FixedDoubleArray*>(source_array->elements())->set(4, 1);
- Handle<JSArray> result = Handle<JSArray>::cast(
- tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
- Handle<Smi>(Smi::FromInt(5), isolate)));
- CHECK_NE(*source_array, *result);
- CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS);
- CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(0),
- 5);
- CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(1),
- 4);
- CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(2),
- 3);
- CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(3),
- 2);
- CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(4),
- 1);
-}
-
-TEST(RunArrayExtractStubTooBigForNewSpace) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
-
- // Actuall call through to the stub, verifying its result.
- Handle<JSArray> source_array = isolate->factory()->NewJSArray(
- PACKED_ELEMENTS, 500000, 500000, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- for (int i = 0; i < 500000; ++i) {
- static_cast<FixedArray*>(source_array->elements())->set(i, Smi::FromInt(i));
- }
- Handle<JSArray> result = Handle<JSArray>::cast(
- tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
- Handle<Smi>(Smi::FromInt(500000), isolate)));
- CHECK_NE(*source_array, *result);
- CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS);
- for (int i = 0; i < 500000; ++i) {
- CHECK_EQ(static_cast<FixedArray*>(source_array->elements())->get(i),
- static_cast<FixedArray*>(result->elements())->get(i));
- }
-}
-
-TEST(RunArrayExtractDoubleStubTooBigForNewSpace) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- StubTester tester(isolate, zone, Builtins::kExtractFastJSArray);
-
- // Actuall call through to the stub, verifying its result.
- Handle<JSArray> source_array = isolate->factory()->NewJSArray(
- PACKED_DOUBLE_ELEMENTS, 500000, 500000,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, TENURED);
- for (int i = 0; i < 500000; ++i) {
- static_cast<FixedDoubleArray*>(source_array->elements())->set(i, i);
- }
- Handle<JSArray> result = Handle<JSArray>::cast(
- tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate),
- Handle<Smi>(Smi::FromInt(500000), isolate)));
- CHECK_NE(*source_array, *result);
- CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS);
- for (int i = 0; i < 500000; ++i) {
- CHECK_EQ(
- static_cast<FixedDoubleArray*>(source_array->elements())->get_scalar(i),
- static_cast<FixedDoubleArray*>(result->elements())->get_scalar(i));
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index b57b4fcbac..b0ca000a02 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -5,6 +5,7 @@
#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/code-stub-assembler.h"
+#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
index e50fcd90cd..5ecc501c2e 100644
--- a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
+++ b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
@@ -9,7 +9,6 @@
#include "src/flags.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/unicode-cache.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 8e652ec3b5..45750e7e28 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -48,7 +48,7 @@ class ValueHelper {
CHECK_EQ(expected, OpParameter<int32_t>(node->op()));
}
- void CheckHeapConstant(HeapObject* expected, Node* node) {
+ void CheckHeapConstant(HeapObject expected, Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
CHECK_EQ(expected, *HeapConstantOf(node->op()));
}
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index a2e860a8a3..c53b9aeccb 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -6,6 +6,7 @@
#define HEAP_HEAP_TESTER_H_
#include "src/heap/spaces.h"
+#include "src/objects/fixed-array.h"
// Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
@@ -29,6 +30,8 @@
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
+ V(MarkCompactEpochCounter) \
+ V(MemoryReducerActivationForSmallHeaps) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
@@ -87,7 +90,7 @@ class HeapTester {
// test-invalidated-slots.cc
static Page* AllocateByteArraysOnPage(Heap* heap,
- std::vector<ByteArray*>* byte_arrays);
+ std::vector<ByteArray>* byte_arrays);
// test-api.cc
static void ResetWeakHandle(bool global_gc);
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 8f70847c9a..084bf6ef1b 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -26,7 +26,7 @@ void SealCurrentObjects(Heap* heap) {
}
int FixedArrayLenFromSize(int size) {
- return (size - FixedArray::kHeaderSize) / kPointerSize;
+ return (size - FixedArray::kHeaderSize) / kTaggedSize;
}
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
@@ -36,25 +36,32 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
- CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
- for (int allocated = 0; allocated != (Page::kAllocatableMemory - remainder);
- allocated += array->Size()) {
- if (allocated == (Page::kAllocatableMemory - kArraySize)) {
- array = isolate->factory()->NewFixedArray(
- heap::FixedArrayLenFromSize(kArraySize - remainder), TENURED);
- CHECK_EQ(kArraySize - remainder, array->Size());
+ int allocated = 0;
+ do {
+ if (allocated + kArraySize * 2 >
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
+ int size =
+ kArraySize * 2 -
+ ((allocated + kArraySize * 2) -
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
+ remainder;
+ int last_array_len = heap::FixedArrayLenFromSize(size);
+ array = isolate->factory()->NewFixedArray(last_array_len, TENURED);
+ CHECK_EQ(size, array->Size());
+ allocated += array->Size() + remainder;
} else {
array = isolate->factory()->NewFixedArray(kArrayLen, TENURED);
+ allocated += array->Size();
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
// Check that allocations started on a new page.
- CHECK_EQ(array->address(),
- Page::FromAddress(array->address())->area_start());
+ CHECK_EQ(array->address(), Page::FromHeapObject(*array)->area_start());
}
handles.push_back(array);
- }
+ } while (allocated <
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
return handles;
}
@@ -85,7 +92,7 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
length = FixedArrayLenFromSize(allocate_memory);
if (length <= 0) {
// Not enough room to create another fixed array. Let's create a filler.
- if (free_memory > (2 * kPointerSize)) {
+ if (free_memory > (2 * kTaggedSize)) {
heap->CreateFillerObjectAt(
*heap->old_space()->allocation_top_address(), free_memory,
ClearRecordedSlots::kNo);
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index a669233b3a..89bb25b56c 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -49,7 +49,7 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
Heap* heap = CcTest::heap();
int size = FixedArray::SizeFor(100);
// New space.
- HeapObject* obj = heap->AllocateRaw(size, NEW_SPACE).ToObjectChecked();
+ HeapObject obj = heap->AllocateRaw(size, NEW_SPACE).ToObjectChecked();
// In order to pass heap verification on Isolate teardown, mark the
// allocated area as a filler.
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
@@ -150,7 +150,7 @@ TEST(StressJS) {
Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(foreign->name()), isolate), foreign, attrs);
- map->AppendDescriptor(&d);
+ map->AppendDescriptor(isolate, &d);
// Add the Foo constructor the global object.
CHECK(env->Global()
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 272c8831fd..daeccca777 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -16,7 +16,7 @@ namespace {
typedef i::LocalArrayBufferTracker LocalTracker;
-bool IsTracked(i::JSArrayBuffer* buf) {
+bool IsTracked(i::JSArrayBuffer buf) {
return i::ArrayBufferTracker::IsTracked(buf);
}
@@ -36,7 +36,7 @@ TEST(ArrayBuffer_OnlyMC) {
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
- JSArrayBuffer* raw_ab = nullptr;
+ JSArrayBuffer raw_ab;
{
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
@@ -48,7 +48,7 @@ TEST(ArrayBuffer_OnlyMC) {
CHECK(IsTracked(*buf));
raw_ab = *buf;
// Prohibit page from being released.
- Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(*buf)->MarkNeverEvacuate();
}
// 2 GCs are needed because we promote to old space as live, meaning that
// we will survive one GC.
@@ -64,7 +64,7 @@ TEST(ArrayBuffer_OnlyScavenge) {
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
- JSArrayBuffer* raw_ab = nullptr;
+ JSArrayBuffer raw_ab;
{
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
@@ -78,7 +78,7 @@ TEST(ArrayBuffer_OnlyScavenge) {
CHECK(IsTracked(*buf));
raw_ab = *buf;
// Prohibit page from being released.
- Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(*buf)->MarkNeverEvacuate();
}
// 2 GCs are needed because we promote to old space as live, meaning that
// we will survive one GC.
@@ -94,7 +94,7 @@ TEST(ArrayBuffer_ScavengeAndMC) {
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
- JSArrayBuffer* raw_ab = nullptr;
+ JSArrayBuffer raw_ab;
{
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
@@ -110,7 +110,7 @@ TEST(ArrayBuffer_ScavengeAndMC) {
CHECK(IsTracked(*buf));
raw_ab = *buf;
// Prohibit page from being released.
- Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(*buf)->MarkNeverEvacuate();
}
// 2 GCs are needed because we promote to old space as live, meaning that
// we will survive one GC.
@@ -136,13 +136,13 @@ TEST(ArrayBuffer_Compaction) {
heap::GcAndSweep(heap, NEW_SPACE);
heap::GcAndSweep(heap, NEW_SPACE);
- Page* page_before_gc = Page::FromAddress(buf1->address());
+ Page* page_before_gc = Page::FromHeapObject(*buf1);
heap::ForceEvacuationCandidate(page_before_gc);
CHECK(IsTracked(*buf1));
CcTest::CollectAllGarbage();
- Page* page_after_gc = Page::FromAddress(buf1->address());
+ Page* page_after_gc = Page::FromHeapObject(*buf1);
CHECK(IsTracked(*buf1));
CHECK_NE(page_before_gc, page_after_gc);
@@ -208,7 +208,7 @@ TEST(ArrayBuffer_NonLivePromotion) {
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
- JSArrayBuffer* raw_ab = nullptr;
+ JSArrayBuffer raw_ab;
{
v8::HandleScope handle_scope(isolate);
Handle<FixedArray> root =
@@ -229,7 +229,7 @@ TEST(ArrayBuffer_NonLivePromotion) {
root->set(0, ReadOnlyRoots(heap).undefined_value());
heap::SimulateIncrementalMarking(heap, true);
// Prohibit page from being released.
- Page::FromAddress(raw_ab->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(raw_ab)->MarkNeverEvacuate();
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(!IsTracked(raw_ab));
}
@@ -245,7 +245,7 @@ TEST(ArrayBuffer_LivePromotion) {
v8::Isolate* isolate = env->GetIsolate();
Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
- JSArrayBuffer* raw_ab = nullptr;
+ JSArrayBuffer raw_ab;
{
v8::HandleScope handle_scope(isolate);
Handle<FixedArray> root =
@@ -265,7 +265,7 @@ TEST(ArrayBuffer_LivePromotion) {
raw_ab = JSArrayBuffer::cast(root->get(0));
root->set(0, ReadOnlyRoots(heap).undefined_value());
// Prohibit page from being released.
- Page::FromAddress(raw_ab->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(raw_ab)->MarkNeverEvacuate();
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(IsTracked(raw_ab));
}
@@ -291,7 +291,7 @@ TEST(ArrayBuffer_SemiSpaceCopyThenPagePromotion) {
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
root->set(0, *buf); // Buffer that should be promoted as live.
- Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ Page::FromHeapObject(*buf)->MarkNeverEvacuate();
}
std::vector<Handle<FixedArray>> handles;
// Make the whole page transition from new->old, getting the buffers
@@ -331,8 +331,7 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
heap::FillCurrentPage(heap->new_space());
Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, 100);
Handle<JSArrayBuffer> buf2 = v8::Utils::OpenHandle(*ab2);
- CHECK_NE(Page::FromAddress(buf1->address()),
- Page::FromAddress(buf2->address()));
+ CHECK_NE(Page::FromHeapObject(*buf1), Page::FromHeapObject(*buf2));
heap::GcAndSweep(heap, OLD_SPACE);
}
isolate->Dispose();
@@ -403,7 +402,7 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeIncreasesMarkCompact) {
heap::GcAndSweep(heap, NEW_SPACE);
heap::GcAndSweep(heap, NEW_SPACE);
- Page* page_before_gc = Page::FromAddress(buf1->address());
+ Page* page_before_gc = Page::FromHeapObject(*buf1);
heap::ForceEvacuationCandidate(page_before_gc);
CHECK(IsTracked(*buf1));
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index fec41c738a..9fb989482c 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -32,8 +32,8 @@ void CheckInvariantsOfAbortedPage(Page* page) {
void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
Page* page) {
- for (auto& fixed_array : handles) {
- CHECK(Page::FromAddress(fixed_array->address()) == page);
+ for (Handle<FixedArray> fixed_array : handles) {
+ CHECK(Page::FromHeapObject(*fixed_array) == page);
}
}
@@ -59,10 +59,12 @@ HEAP_TEST(CompactionFullAbortedPage) {
{
HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand());
- auto compaction_page_handles =
- heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
+ auto compaction_page_handles = heap::CreatePadding(
+ heap,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
+ TENURED);
Page* to_be_aborted_page =
- Page::FromAddress(compaction_page_handles.front()->address());
+ Page::FromHeapObject(*compaction_page_handles.front());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
@@ -74,7 +76,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
// Check that all handles still point to the same page, i.e., compaction
// has been aborted on the page.
for (Handle<FixedArray> object : compaction_page_handles) {
- CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
+ CHECK_EQ(to_be_aborted_page, Page::FromHeapObject(*object));
}
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
@@ -93,7 +95,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size = Page::kAllocatableMemory / objects_per_page;
+ const int object_size =
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -109,9 +113,11 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding(
- heap, Page::kAllocatableMemory, TENURED, object_size);
+ heap,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
+ TENURED, object_size);
Page* to_be_aborted_page =
- Page::FromAddress(compaction_page_handles.front()->address());
+ Page::FromHeapObject(*compaction_page_handles.front());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
@@ -137,12 +143,12 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// Once compaction has been aborted, all following objects still have
// to be on the initial page.
CHECK(!migration_aborted ||
- (Page::FromAddress(object->address()) == to_be_aborted_page));
- if (Page::FromAddress(object->address()) == to_be_aborted_page) {
+ (Page::FromHeapObject(*object) == to_be_aborted_page));
+ if (Page::FromHeapObject(*object) == to_be_aborted_page) {
// This object has not been migrated.
migration_aborted = true;
} else {
- CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
+ CHECK_EQ(Page::FromHeapObject(*object), page_to_fill);
}
}
// Check that we actually created a scenario with a partially aborted
@@ -168,7 +174,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size = Page::kAllocatableMemory / objects_per_page;
+ const int object_size =
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -187,10 +195,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
std::vector<Handle<FixedArray>> compaction_page_handles =
- heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
- object_size);
+ heap::CreatePadding(
+ heap,
+ static_cast<int>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage()),
+ TENURED, object_size);
to_be_aborted_page =
- Page::FromAddress(compaction_page_handles.front()->address());
+ Page::FromHeapObject(*compaction_page_handles.front());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
@@ -208,8 +219,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
heap::CreatePadding(heap, used_memory, TENURED, object_size);
- Page* page_to_fill =
- Page::FromAddress(page_to_fill_handles.front()->address());
+ Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
@@ -223,13 +233,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
current =
Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
CHECK(current->IsFixedArray());
- if (Page::FromAddress(current->address()) != to_be_aborted_page) {
+ if (Page::FromHeapObject(*current) != to_be_aborted_page) {
in_place = false;
}
bool on_aborted_page =
- Page::FromAddress(current->address()) == to_be_aborted_page;
- bool on_fill_page =
- Page::FromAddress(current->address()) == page_to_fill;
+ Page::FromHeapObject(*current) == to_be_aborted_page;
+ bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
}
// Check that we at least migrated one object, as otherwise the test would
@@ -257,7 +266,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
- const int object_size = Page::kAllocatableMemory / objects_per_page;
+ const int object_size =
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
+ objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -275,11 +286,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding(
- heap, Page::kAllocatableMemory, TENURED, object_size);
+ heap,
+ static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
+ TENURED, object_size);
// Sanity check that we have enough space for linking up arrays.
CHECK_GE(compaction_page_handles.front()->length(), 2);
to_be_aborted_page =
- Page::FromAddress(compaction_page_handles.front()->address());
+ Page::FromHeapObject(*compaction_page_handles.front());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
@@ -303,8 +316,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
heap::CreatePadding(heap, used_memory, TENURED, object_size);
- Page* page_to_fill =
- Page::FromAddress(page_to_fill_handles.front()->address());
+ Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
heap->set_force_oom(true);
CcTest::CollectAllGarbage();
@@ -319,13 +331,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
CHECK(!Heap::InNewSpace(*current));
CHECK(current->IsFixedArray());
- if (Page::FromAddress(current->address()) != to_be_aborted_page) {
+ if (Page::FromHeapObject(*current) != to_be_aborted_page) {
in_place = false;
}
bool on_aborted_page =
- Page::FromAddress(current->address()) == to_be_aborted_page;
- bool on_fill_page =
- Page::FromAddress(current->address()) == page_to_fill;
+ Page::FromHeapObject(*current) == to_be_aborted_page;
+ bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
}
// Check that we at least migrated one object, as otherwise the test would
@@ -338,10 +349,10 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
isolate->factory()->NewFixedArray(10, NOT_TENURED);
// Create a broken address that looks like a tagged pointer to a new space
// object.
- Address broken_address = holder->address() + 2 * kPointerSize + 1;
+ Address broken_address = holder->address() + 2 * kTaggedSize + 1;
// Convert it to a vector to create a string from it.
Vector<const uint8_t> string_to_broken_addresss(
- reinterpret_cast<const uint8_t*>(&broken_address), kPointerSize);
+ reinterpret_cast<const uint8_t*>(&broken_address), kTaggedSize);
Handle<String> string;
do {
@@ -353,7 +364,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
string = isolate->factory()
->NewStringFromOneByte(string_to_broken_addresss, TENURED)
.ToHandleChecked();
- } while (Page::FromAddress(string->address()) != to_be_aborted_page);
+ } while (Page::FromHeapObject(*string) != to_be_aborted_page);
// If store buffer entries are not properly filtered/reset for aborted
// pages we have now a broken address at an object slot in old space and
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index d49ccf6213..57a5842850 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -19,7 +19,7 @@ namespace internal {
namespace heap {
void PublishSegment(ConcurrentMarking::MarkingWorklist* worklist,
- HeapObject* object) {
+ HeapObject object) {
for (size_t i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
worklist->Push(0, object);
@@ -38,11 +38,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted();
}
- ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
- heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
+ heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@@ -61,11 +61,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
- ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
- heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
+ heap, &shared, &on_hold, &weak_objects, &embedder_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
@@ -88,11 +88,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted();
}
- ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ ConcurrentMarking::MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
- heap, &shared, &bailout, &on_hold, &weak_objects, &embedder_objects);
+ heap, &shared, &on_hold, &weak_objects, &embedder_objects);
for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks();
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index d54ffcf377..5134392886 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -49,15 +49,16 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
to_register_with_v8_.push_back(persistent);
}
- bool AdvanceTracing(double deadline_in_ms,
- AdvanceTracingActions actions) final {
+ bool AdvanceTracing(double deadline_in_ms) final {
for (auto persistent : to_register_with_v8_) {
persistent->RegisterExternalReference(isolate_);
}
to_register_with_v8_.clear();
- return false;
+ return true;
}
+ bool IsTracingDone() final { return to_register_with_v8_.empty(); }
+
void TracePrologue() final {}
void TraceEpilogue() final {}
void AbortTracing() final {}
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index cfade38da7..36a9391307 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -112,7 +112,7 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesMarkCompact) {
isolate, new TestOneByteResource(i::StrDup(TEST_STR))).ToLocalChecked();
v8::internal::Handle<v8::internal::String> esh = v8::Utils::OpenHandle(*es);
- Page* page_before_gc = Page::FromAddress(esh->address());
+ Page* page_before_gc = Page::FromHeapObject(*esh);
heap::ForceEvacuationCandidate(page_before_gc);
CcTest::CollectAllGarbage();
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 8c6a3c446c..c7c1d93f87 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -30,7 +30,6 @@
#include "src/api-inl.h"
#include "src/assembler-inl.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -47,9 +46,12 @@
#include "src/ic/ic.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/slots.h"
+#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/snapshot/snapshot.h"
#include "src/transitions.h"
@@ -67,7 +69,7 @@ namespace heap {
static const int kPretenureCreationCount =
AllocationSite::kPretenureMinimumCreated + 1;
-static void CheckMap(Map* map, int type, int instance_size) {
+static void CheckMap(Map map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
CHECK(CcTest::heap()->Contains(map));
@@ -144,17 +146,16 @@ TEST(InitialObjects) {
*v8::Utils::OpenHandle(*CompileRun("Object.prototype")));
}
-static void CheckOddball(Isolate* isolate, Object* obj, const char* string) {
+static void CheckOddball(Isolate* isolate, Object obj, const char* string) {
CHECK(obj->IsOddball());
Handle<Object> handle(obj, isolate);
- Object* print_string = *Object::ToString(isolate, handle).ToHandleChecked();
+ Object print_string = *Object::ToString(isolate, handle).ToHandleChecked();
CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
-
static void CheckSmi(Isolate* isolate, int value, const char* string) {
Handle<Object> handle(Smi::FromInt(value), isolate);
- Object* print_string = *Object::ToString(isolate, handle).ToHandleChecked();
+ Object print_string = *Object::ToString(isolate, handle).ToHandleChecked();
CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
@@ -190,8 +191,8 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
CHECK(Heap::InNewSpace(*value));
i::byte buffer[i::Assembler::kMinimalBufferSize];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
// Add a new-space reference to the code.
masm.Push(value);
@@ -215,7 +216,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
// Test FindCodeObject
#define __ assm.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ nop(); // supported on all architectures
@@ -225,19 +226,19 @@ static void CheckFindCodeObject(Isolate* isolate) {
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
CHECK(code->IsCode());
- HeapObject* obj = HeapObject::cast(*code);
+ HeapObject obj = HeapObject::cast(*code);
Address obj_addr = obj->address();
- for (int i = 0; i < obj->Size(); i += kPointerSize) {
- Object* found = isolate->FindCodeObject(obj_addr + i);
+ for (int i = 0; i < obj->Size(); i += kTaggedSize) {
+ Object found = isolate->FindCodeObject(obj_addr + i);
CHECK_EQ(*code, found);
}
Handle<Code> copy =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
- HeapObject* obj_copy = HeapObject::cast(*copy);
- Object* not_right = isolate->FindCodeObject(obj_copy->address() +
- obj_copy->Size() / 2);
+ HeapObject obj_copy = HeapObject::cast(*copy);
+ Object not_right =
+ isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2);
CHECK(not_right != *code);
}
@@ -247,7 +248,7 @@ TEST(HandleNull) {
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
LocalContext context;
- Handle<Object> n(static_cast<Object*>(nullptr), isolate);
+ Handle<Object> n(Object(0), isolate);
CHECK(!n.is_null());
}
@@ -367,16 +368,15 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate a function and keep it in global object's property.
Handle<JSFunction> function = factory->NewFunctionForTest(name);
- JSReceiver::SetProperty(isolate, global, name, function,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, global, name, function, LanguageMode::kSloppy)
.Check();
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_namex, twenty_four,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_namex, twenty_four,
+ LanguageMode::kSloppy)
.Check();
CHECK_EQ(Smi::FromInt(23),
@@ -399,11 +399,10 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(isolate, global, obj_name, obj,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, global, obj_name, obj, LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
}
@@ -565,14 +564,15 @@ TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
HandleScope scope(isolate);
// Create an Api object that is unmodified.
- auto function = FunctionTemplate::New(context->GetIsolate())
- ->GetFunction(context.local())
- .ToLocalChecked();
- auto i = function->NewInstance(context.local()).ToLocalChecked();
+ Local<v8::Function> function = FunctionTemplate::New(context->GetIsolate())
+ ->GetFunction(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> i =
+ function->NewInstance(context.local()).ToLocalChecked();
Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*u);
- h2 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ h2 = global_handles->Create(*(reinterpret_cast<internal::Address*>(*i)));
}
std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
@@ -613,7 +613,7 @@ TEST(WeakGlobalApiHandleModifiedMapScavenge) {
function_template->GetFunction(context.local()).ToLocalChecked();
auto i = function->NewInstance(context.local()).ToLocalChecked();
- h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Address*>(*i)));
}
std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
@@ -657,7 +657,7 @@ TEST(WeakGlobalApiHandleWithElementsScavenge) {
function_template->GetFunction(context.local()).ToLocalChecked();
auto i = function->NewInstance(context.local()).ToLocalChecked();
- h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Address*>(*i)));
}
std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
@@ -794,11 +794,11 @@ TEST(BytecodeArray) {
CHECK_EQ(array->get(i), kRawBytes[i]);
}
- FixedArray* old_constant_pool_address = *constant_pool;
+ FixedArray old_constant_pool_address = *constant_pool;
// Perform a full garbage collection and force the constant pool to be on an
// evacuation candidate.
- Page* evac_page = Page::FromAddress(constant_pool->address());
+ Page* evac_page = Page::FromHeapObject(*constant_pool);
heap::ForceEvacuationCandidate(evac_page);
CcTest::CollectAllGarbage();
@@ -944,14 +944,14 @@ TEST(FunctionAllocation) {
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(isolate, obj, prop_name).ToHandleChecked());
// Check that we can add properties to function objects.
- JSReceiver::SetProperty(isolate, function, prop_name, twenty_four,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, function, prop_name, twenty_four,
+ LanguageMode::kSloppy)
.Check();
CHECK_EQ(
Smi::FromInt(24),
@@ -983,8 +983,7 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first
- JSReceiver::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
// delete first
@@ -993,10 +992,8 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first and then second
- JSReceiver::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy)
- .Check();
- JSReceiver::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
@@ -1010,10 +1007,8 @@ TEST(ObjectProperties) {
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// add first and then second
- JSReceiver::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy)
- .Check();
- JSReceiver::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
@@ -1029,14 +1024,14 @@ TEST(ObjectProperties) {
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
- JSReceiver::SetProperty(isolate, obj, s1, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, s1, one, LanguageMode::kSloppy).Check();
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- JSReceiver::SetProperty(isolate, obj, s2_string, one, LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, s2_string, one, LanguageMode::kSloppy)
.Check();
Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2));
@@ -1058,8 +1053,8 @@ TEST(JSObjectMaps) {
// Set a propery
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
CHECK_EQ(Smi::FromInt(23),
*Object::GetProperty(isolate, obj, prop_name).ToHandleChecked());
@@ -1095,8 +1090,7 @@ TEST(JSArray) {
CHECK(array->HasSmiOrObjectElements());
// array[length] = name.
- JSReceiver::SetElement(isolate, array, 0, name, LanguageMode::kSloppy)
- .Check();
+ Object::SetElement(isolate, array, 0, name, LanguageMode::kSloppy).Check();
CHECK_EQ(Smi::FromInt(1), array->length());
element = i::Object::GetElement(isolate, array, 0).ToHandleChecked();
CHECK_EQ(*element, *name);
@@ -1110,8 +1104,7 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- JSReceiver::SetElement(isolate, array, int_length, name,
- LanguageMode::kSloppy)
+ Object::SetElement(isolate, array, int_length, name, LanguageMode::kSloppy)
.Check();
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
@@ -1143,14 +1136,11 @@ TEST(JSObjectCopy) {
Handle<Smi> one(Smi::FromInt(1), isolate);
Handle<Smi> two(Smi::FromInt(2), isolate);
- JSReceiver::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy)
- .Check();
- JSReceiver::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy)
- .Check();
+ Object::SetProperty(isolate, obj, first, one, LanguageMode::kSloppy).Check();
+ Object::SetProperty(isolate, obj, second, two, LanguageMode::kSloppy).Check();
- JSReceiver::SetElement(isolate, obj, 0, first, LanguageMode::kSloppy).Check();
- JSReceiver::SetElement(isolate, obj, 1, second, LanguageMode::kSloppy)
- .Check();
+ Object::SetElement(isolate, obj, 0, first, LanguageMode::kSloppy).Check();
+ Object::SetElement(isolate, obj, 1, second, LanguageMode::kSloppy).Check();
// Make the clone.
Handle<Object> value1, value2;
@@ -1172,15 +1162,13 @@ TEST(JSObjectCopy) {
CHECK_EQ(*value1, *value2);
// Flip the values.
- JSReceiver::SetProperty(isolate, clone, first, two, LanguageMode::kSloppy)
+ Object::SetProperty(isolate, clone, first, two, LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, clone, second, one, LanguageMode::kSloppy)
+ Object::SetProperty(isolate, clone, second, one, LanguageMode::kSloppy)
.Check();
- JSReceiver::SetElement(isolate, clone, 0, second, LanguageMode::kSloppy)
- .Check();
- JSReceiver::SetElement(isolate, clone, 1, first, LanguageMode::kSloppy)
- .Check();
+ Object::SetElement(isolate, clone, 0, second, LanguageMode::kSloppy).Check();
+ Object::SetElement(isolate, clone, 1, first, LanguageMode::kSloppy).Check();
value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked();
value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked();
@@ -1242,7 +1230,7 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
HeapIterator iterator(heap);
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
@@ -1290,6 +1278,147 @@ TEST(Iteration) {
CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count));
}
+TEST(TestBytecodeFlushing) {
+#ifndef V8_LITE_MODE
+ FLAG_opt = false;
+ FLAG_always_opt = false;
+ i::FLAG_optimize_for_size = false;
+#endif // V8_LITE_MODE
+ i::FLAG_flush_bytecode = true;
+ i::FLAG_allow_natives_syntax = true;
+
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ Factory* factory = i_isolate->factory();
+
+ {
+ v8::HandleScope scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ const char* source =
+ "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // This compile will add the code to the compilation cache.
+ {
+ v8::HandleScope scope(isolate);
+ CompileRun(source);
+ }
+
+ // Check function is compiled.
+ Handle<Object> func_value =
+ Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
+ .ToHandleChecked();
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
+ CHECK(function->shared()->is_compiled());
+
+ // The code will survive at least two GCs.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CHECK(function->shared()->is_compiled());
+
+ // Simulate several GCs that use full marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ CcTest::CollectAllGarbage();
+ }
+
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared()->is_compiled());
+ CHECK(!function->is_compiled());
+ // Call foo to get it recompiled.
+ CompileRun("foo()");
+ CHECK(function->shared()->is_compiled());
+ CHECK(function->is_compiled());
+ }
+}
+
+#ifndef V8_LITE_MODE
+
+TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
+ FLAG_opt = true;
+ FLAG_always_opt = false;
+ i::FLAG_optimize_for_size = false;
+ i::FLAG_incremental_marking = true;
+ i::FLAG_flush_bytecode = true;
+ i::FLAG_allow_natives_syntax = true;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source =
+ "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // This compile will add the code to the compilation cache.
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun(source);
+ }
+
+ // Check function is compiled.
+ Handle<Object> func_value =
+ Object::GetProperty(isolate, isolate->global_object(), foo_name)
+ .ToHandleChecked();
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
+ CHECK(function->shared()->is_compiled());
+
+ // The code will survive at least two GCs.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CHECK(function->shared()->is_compiled());
+
+ // Simulate several GCs that use incremental marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ heap::SimulateIncrementalMarking(CcTest::heap());
+ CcTest::CollectAllGarbage();
+ }
+ CHECK(!function->shared()->is_compiled());
+ CHECK(!function->is_compiled());
+
+ // This compile will compile the function again.
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun("foo();");
+ }
+
+ // Simulate several GCs that use incremental marking but make sure
+ // the loop breaks once the function is enqueued as a candidate.
+ for (int i = 0; i < kAgingThreshold; i++) {
+ heap::SimulateIncrementalMarking(CcTest::heap());
+ if (function->shared()->GetBytecodeArray()->IsOld()) break;
+ CcTest::CollectAllGarbage();
+ }
+
+ // Force optimization while incremental marking is active and while
+ // the function is enqueued as a candidate.
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
+ }
+
+ // Simulate one final GC and make sure the candidate wasn't flushed.
+ CcTest::CollectAllGarbage();
+ CHECK(function->shared()->is_compiled());
+ CHECK(function->is_compiled());
+}
+
+#endif // V8_LITE_MODE
+
TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
if (!FLAG_incremental_marking) return;
// Turn off always_opt because it interferes with running the built-in for
@@ -1421,7 +1550,7 @@ static void OptimizeEmptyFunction(const char* name) {
// Count the number of native contexts in the weak list of native contexts.
int CountNativeContexts() {
int count = 0;
- Object* object = CcTest::heap()->native_contexts_list();
+ Object object = CcTest::heap()->native_contexts_list();
while (!object->IsUndefined(CcTest::i_isolate())) {
count++;
object = Context::cast(object)->next_context_link();
@@ -1506,8 +1635,8 @@ TEST(TestInternalWeakLists) {
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
// TODO(dcarney): is there a better way to do this?
- i::Object** unsafe = reinterpret_cast<i::Object**>(*ctx[i]);
- *unsafe = ReadOnlyRoots(CcTest::heap()).undefined_value();
+ i::Address* unsafe = reinterpret_cast<i::Address*>(*ctx[i]);
+ *unsafe = ReadOnlyRoots(CcTest::heap()).undefined_value()->ptr();
ctx[i].Clear();
// Scavenge treats these references as strong.
@@ -1626,7 +1755,7 @@ HEAP_TEST(TestSizeOfObjects) {
TEST(TestAlignmentCalculations) {
// Maximum fill amounts are consistent.
- int maximum_double_misalignment = kDoubleSize - kPointerSize;
+ int maximum_double_misalignment = kDoubleSize - kTaggedSize;
int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned);
CHECK_EQ(0, max_word_fill);
int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned);
@@ -1640,35 +1769,33 @@ TEST(TestAlignmentCalculations) {
// Word alignment never requires fill.
fill = Heap::GetFillToAlign(base, kWordAligned);
CHECK_EQ(0, fill);
- fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned);
+ fill = Heap::GetFillToAlign(base + kTaggedSize, kWordAligned);
CHECK_EQ(0, fill);
// No fill is required when address is double aligned.
fill = Heap::GetFillToAlign(base, kDoubleAligned);
CHECK_EQ(0, fill);
// Fill is required if address is not double aligned.
- fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned);
+ fill = Heap::GetFillToAlign(base + kTaggedSize, kDoubleAligned);
CHECK_EQ(maximum_double_misalignment, fill);
// kDoubleUnaligned has the opposite fill amounts.
fill = Heap::GetFillToAlign(base, kDoubleUnaligned);
CHECK_EQ(maximum_double_misalignment, fill);
- fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned);
+ fill = Heap::GetFillToAlign(base + kTaggedSize, kDoubleUnaligned);
CHECK_EQ(0, fill);
}
-
-static HeapObject* NewSpaceAllocateAligned(int size,
- AllocationAlignment alignment) {
+static HeapObject NewSpaceAllocateAligned(int size,
+ AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
- HeapObject* obj = nullptr;
+ HeapObject obj;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
-
// Get new space allocation into the desired alignment.
static Address AlignNewSpace(AllocationAlignment alignment, int offset) {
Address* top_addr = CcTest::heap()->new_space()->allocation_top_address();
@@ -1681,61 +1808,58 @@ static Address AlignNewSpace(AllocationAlignment alignment, int offset) {
TEST(TestAlignedAllocation) {
- // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
- const intptr_t double_misalignment = kDoubleSize - kPointerSize;
+ // Double misalignment is 4 on 32-bit platforms or when pointer compression
+ // is enabled, 0 on 64-bit ones when pointer compression is disabled.
+ const intptr_t double_misalignment = kDoubleSize - kTaggedSize;
Address* top_addr = CcTest::heap()->new_space()->allocation_top_address();
Address start;
- HeapObject* obj;
- HeapObject* filler;
+ HeapObject obj;
+ HeapObject filler;
if (double_misalignment) {
// Allocate a pointer sized object that must be double aligned at an
// aligned address.
start = AlignNewSpace(kDoubleAligned, 0);
- obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
+ obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
+ CHECK(IsAligned(obj->address(), kDoubleAlignment));
// There is no filler.
- CHECK_EQ(kPointerSize, *top_addr - start);
+ CHECK_EQ(kTaggedSize, *top_addr - start);
// Allocate a second pointer sized object that must be double aligned at an
// unaligned address.
- start = AlignNewSpace(kDoubleAligned, kPointerSize);
- obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
+ start = AlignNewSpace(kDoubleAligned, kTaggedSize);
+ obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
+ CHECK(IsAligned(obj->address(), kDoubleAlignment));
// There is a filler object before the object.
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
- CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start);
+ CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK_EQ(kTaggedSize + double_misalignment, *top_addr - start);
// Similarly for kDoubleUnaligned.
start = AlignNewSpace(kDoubleUnaligned, 0);
- obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
- CHECK_EQ(kPointerSize, *top_addr - start);
- start = AlignNewSpace(kDoubleUnaligned, kPointerSize);
- obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
+ obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
+ CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
+ CHECK_EQ(kTaggedSize, *top_addr - start);
+ start = AlignNewSpace(kDoubleUnaligned, kTaggedSize);
+ obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
+ CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
// There is a filler object before the object.
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
- CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start);
+ CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK_EQ(kTaggedSize + double_misalignment, *top_addr - start);
}
}
-
-static HeapObject* OldSpaceAllocateAligned(int size,
- AllocationAlignment alignment) {
+static HeapObject OldSpaceAllocateAligned(int size,
+ AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->old_space()->AllocateRawAligned(size, alignment);
- HeapObject* obj = nullptr;
+ HeapObject obj;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
-
// Get old space allocation into the desired alignment.
static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
Address* top_addr = CcTest::heap()->old_space()->allocation_top_address();
@@ -1759,45 +1883,43 @@ TEST(TestAlignedOverAllocation) {
// page and empty free list.
heap::AbandonCurrentlyFreeMemory(heap->old_space());
// Allocate a dummy object to properly set up the linear allocation info.
- AllocationResult dummy =
- heap->old_space()->AllocateRawUnaligned(kPointerSize);
+ AllocationResult dummy = heap->old_space()->AllocateRawUnaligned(kTaggedSize);
CHECK(!dummy.IsRetry());
- heap->CreateFillerObjectAt(dummy.ToObjectChecked()->address(), kPointerSize,
+ heap->CreateFillerObjectAt(dummy.ToObjectChecked()->address(), kTaggedSize,
ClearRecordedSlots::kNo);
- // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
- const intptr_t double_misalignment = kDoubleSize - kPointerSize;
+ // Double misalignment is 4 on 32-bit platforms or when pointer compression
+ // is enabled, 0 on 64-bit ones when pointer compression is disabled.
+ const intptr_t double_misalignment = kDoubleSize - kTaggedSize;
Address start;
- HeapObject* obj;
- HeapObject* filler;
+ HeapObject obj;
+ HeapObject filler;
if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0);
- obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
+ obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
// The object is aligned.
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
+ CHECK(IsAligned(obj->address(), kDoubleAlignment));
// Try the opposite alignment case.
- start = AlignOldSpace(kDoubleAligned, kPointerSize);
- obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
+ start = AlignOldSpace(kDoubleAligned, kTaggedSize);
+ obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
+ CHECK(IsAligned(obj->address(), kDoubleAlignment));
filler = HeapObject::FromAddress(start);
CHECK(obj != filler);
CHECK(filler->IsFiller());
- CHECK_EQ(kPointerSize, filler->Size());
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
+ CHECK_EQ(kTaggedSize, filler->Size());
+ CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
// Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0);
- obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
+ obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
// The object is aligned.
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
+ CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
// Try the opposite alignment case.
- start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
- obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
- CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
+ start = AlignOldSpace(kDoubleUnaligned, kTaggedSize);
+ obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
+ CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() &&
- filler->Size() == kPointerSize);
+ CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
}
}
@@ -1807,7 +1929,7 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
HeapIterator iterator(CcTest::heap());
intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects();
intptr_t size_of_objects_2 = 0;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (!obj->IsFreeSpace()) {
size_of_objects_2 += obj->Size();
@@ -1920,7 +2042,7 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
static int NumberOfGlobalObjects() {
int count = 0;
HeapIterator iterator(CcTest::heap());
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (obj->IsJSGlobalObject()) count++;
}
@@ -2497,8 +2619,7 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
CHECK_EQ(1.1, o->RawFastDoublePropertyAt(idx2));
}
- JSObject* inner_object =
- reinterpret_cast<JSObject*>(o->RawFastPropertyAt(idx1));
+ JSObject inner_object = JSObject::cast(o->RawFastPropertyAt(idx1));
CHECK(CcTest::heap()->InOldSpace(inner_object));
if (!inner_object->IsUnboxedDoubleField(idx1)) {
CHECK(CcTest::heap()->InOldSpace(inner_object->RawFastPropertyAt(idx1)));
@@ -2779,7 +2900,7 @@ TEST(OptimizedAllocationArrayLiterals) {
CHECK(Heap::InNewSpace(o->elements()));
}
-static int CountMapTransitions(i::Isolate* isolate, Map* map) {
+static int CountMapTransitions(i::Isolate* isolate, Map map) {
DisallowHeapAllocation no_gc;
return TransitionsAccessor(isolate, map, &no_gc).NumberOfTransitions();
}
@@ -2862,8 +2983,8 @@ static void AddPropertyTo(
FLAG_gc_global = true;
FLAG_retain_maps_for_n_gc = 0;
CcTest::heap()->set_allocation_timeout(gc_count);
- JSReceiver::SetProperty(isolate, object, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, object, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
}
@@ -2963,8 +3084,10 @@ TEST(ReleaseOverReservedPages) {
if (FLAG_never_compact) return;
FLAG_trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
+#ifndef V8_LITE_MODE
FLAG_opt = false;
FLAG_always_opt = false;
+#endif // V8_LITE_MODE
// - Parallel compaction increases fragmentation, depending on how existing
// memory is distributed. Since this is non-deterministic because of
// concurrent sweeping, we disable it for this test.
@@ -3065,6 +3188,7 @@ TEST(PrintSharedFunctionInfo) {
TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
+ if (!FLAG_use_ic) return;
if (!FLAG_incremental_marking) return;
if (FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3119,6 +3243,7 @@ static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
}
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
+ if (FLAG_lite_mode) return;
if (!FLAG_incremental_marking) return;
if (FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3143,6 +3268,7 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
}
TEST(IncrementalMarkingPreservesMonomorphicIC) {
+ if (!FLAG_use_ic) return;
if (!FLAG_incremental_marking) return;
if (FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3165,6 +3291,7 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
}
TEST(IncrementalMarkingPreservesPolymorphicIC) {
+ if (!FLAG_use_ic) return;
if (!FLAG_incremental_marking) return;
if (FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3203,6 +3330,7 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
}
TEST(ContextDisposeDoesntClearPolymorphicIC) {
+ if (!FLAG_use_ic) return;
if (!FLAG_incremental_marking) return;
if (FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3305,7 +3433,10 @@ UNINITIALIZED_TEST(ReleaseStackTraceData) {
// See: https://codereview.chromium.org/181833004/
return;
}
- FLAG_use_ic = false; // ICs retain objects.
+#ifndef V8_LITE_MODE
+ // ICs retain objects.
+ FLAG_use_ic = false;
+#endif // V8_LITE_MODE
FLAG_concurrent_recompilation = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3359,7 +3490,9 @@ UNINITIALIZED_TEST(ReleaseStackTraceData) {
TEST(Regress169928) {
FLAG_allow_natives_syntax = true;
+#ifndef V8_LITE_MODE
FLAG_opt = false;
+#endif // V8_LITE_MODE
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
@@ -3404,7 +3537,7 @@ TEST(Regress169928) {
heap::AllocateAllButNBytes(
CcTest::heap()->new_space(),
- JSArray::kSize + AllocationMemento::kSize + kPointerSize);
+ JSArray::kSize + AllocationMemento::kSize + kTaggedSize);
Handle<JSArray> array =
factory->NewJSArrayWithElements(array_data, PACKED_SMI_ELEMENTS);
@@ -3414,14 +3547,14 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
- HeapObject* obj = nullptr;
+ HeapObject obj;
AllocationResult allocation =
CcTest::heap()->new_space()->AllocateRawUnaligned(
- AllocationMemento::kSize + kPointerSize);
+ AllocationMemento::kSize + kTaggedSize);
CHECK(allocation.To(&obj));
Address addr_obj = obj->address();
CcTest::heap()->CreateFillerObjectAt(addr_obj,
- AllocationMemento::kSize + kPointerSize,
+ AllocationMemento::kSize + kTaggedSize,
ClearRecordedSlots::kNo);
// Give the array a name, making sure not to allocate strings.
@@ -3450,9 +3583,9 @@ TEST(LargeObjectSlotRecording) {
// Create an object on an evacuation candidate.
heap::SimulateFullSpace(heap->old_space());
Handle<FixedArray> lit = isolate->factory()->NewFixedArray(4, TENURED);
- Page* evac_page = Page::FromAddress(lit->address());
+ Page* evac_page = Page::FromHeapObject(*lit);
heap::ForceEvacuationCandidate(evac_page);
- FixedArray* old_location = *lit;
+ FixedArray old_location = *lit;
// Allocate a large object.
int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
@@ -3485,8 +3618,8 @@ TEST(LargeObjectSlotRecording) {
class DummyVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {}
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {}
};
@@ -3565,9 +3698,8 @@ TEST(DisableInlineAllocation) {
static int AllocationSitesCount(Heap* heap) {
int count = 0;
- for (Object* site = heap->allocation_sites_list();
- site->IsAllocationSite();) {
- AllocationSite* cur = AllocationSite::cast(site);
+ for (Object site = heap->allocation_sites_list(); site->IsAllocationSite();) {
+ AllocationSite cur = AllocationSite::cast(site);
CHECK(cur->HasWeakNext());
site = cur->weak_next();
count++;
@@ -3577,11 +3709,11 @@ static int AllocationSitesCount(Heap* heap) {
static int SlimAllocationSiteCount(Heap* heap) {
int count = 0;
- for (Object* weak_list = heap->allocation_sites_list();
+ for (Object weak_list = heap->allocation_sites_list();
weak_list->IsAllocationSite();) {
- AllocationSite* weak_cur = AllocationSite::cast(weak_list);
- for (Object* site = weak_cur->nested_site(); site->IsAllocationSite();) {
- AllocationSite* cur = AllocationSite::cast(site);
+ AllocationSite weak_cur = AllocationSite::cast(weak_list);
+ for (Object site = weak_cur->nested_site(); site->IsAllocationSite();) {
+ AllocationSite cur = AllocationSite::cast(site);
CHECK(!cur->HasWeakNext());
site = cur->nested_site();
count++;
@@ -3629,7 +3761,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
.ToLocalChecked())));
int dependency_group_count = 0;
- DependentCode* dependency = site->dependent_code();
+ DependentCode dependency = site->dependent_code();
while (dependency != ReadOnlyRoots(heap).empty_weak_fixed_array()) {
CHECK(dependency->group() ==
DependentCode::kAllocationSiteTransitionChangedGroup ||
@@ -3637,7 +3769,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
DependentCode::kAllocationSiteTenuringChangedGroup);
CHECK_EQ(1, dependency->count());
CHECK(dependency->object_at(0)->IsWeak());
- Code* function_bar =
+ Code function_bar =
Code::cast(dependency->object_at(0)->GetHeapObjectAssumeWeak());
CHECK_EQ(bar_handle->code(), function_bar);
dependency = dependency->next_link();
@@ -3674,6 +3806,8 @@ void CheckNumberOfAllocations(Heap* heap, const char* source,
}
TEST(AllocationSiteCreation) {
+ // No feedback vectors and hence no allocation sites.
+ if (FLAG_lite_mode) return;
FLAG_always_opt = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -3848,6 +3982,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
}
CHECK(code->marked_for_deoptimization());
+ CHECK(code->embedded_objects_cleared());
}
@@ -3890,6 +4025,7 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
}
CHECK(code->marked_for_deoptimization());
+ CHECK(code->embedded_objects_cleared());
}
TEST(NewSpaceObjectsInOptimizedCode) {
@@ -3950,6 +4086,52 @@ TEST(NewSpaceObjectsInOptimizedCode) {
}
CHECK(code->marked_for_deoptimization());
+ CHECK(code->embedded_objects_cleared());
+}
+
+TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
+ if (FLAG_always_opt || !FLAG_opt) return;
+ FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_optimizer()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ {
+ LocalContext context;
+ HandleScope scope(heap->isolate());
+
+ CompileRun(
+ "function bar() {"
+ " return foo(1);"
+ "};"
+ "function foo(x) { with (x) { return 1 + x; } };"
+ "%NeverOptimizeFunction(foo);"
+ "bar();"
+ "bar();"
+ "bar();"
+ "%OptimizeFunctionOnNextCall(bar);"
+ "bar();"
+ "%DeoptimizeFunction(bar);");
+
+ Handle<JSFunction> bar = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CcTest::global()
+ ->Get(context.local(), v8_str("bar"))
+ .ToLocalChecked())));
+ code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
+ }
+
+ CHECK(code->marked_for_deoptimization());
+
+ // Now make sure that a gc should get rid of the function
+ for (int i = 0; i < 4; i++) {
+ CcTest::CollectAllGarbage();
+ }
+
+ CHECK(code->marked_for_deoptimization());
+ CHECK(code->embedded_objects_cleared());
}
static Handle<JSFunction> OptimizeDummyFunction(v8::Isolate* isolate,
@@ -3969,8 +4151,7 @@ static Handle<JSFunction> OptimizeDummyFunction(v8::Isolate* isolate,
return fun;
}
-
-static int GetCodeChainLength(Code* code) {
+static int GetCodeChainLength(Code code) {
int result = 0;
while (code->next_code_link()->IsCode()) {
result++;
@@ -4037,8 +4218,8 @@ TEST(NextCodeLinkInCodeDataContainerIsCleared) {
static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
i::byte buffer[i::Assembler::kMinimalBufferSize];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
CodeDesc desc;
masm.Push(isolate->factory()->undefined_value());
masm.Push(isolate->factory()->undefined_value());
@@ -4089,6 +4270,7 @@ static void ClearWeakIC(
TEST(WeakFunctionInConstructor) {
+ if (FLAG_lite_mode) return;
if (FLAG_always_opt) return;
FLAG_stress_compaction = false;
FLAG_stress_incremental_marking = false;
@@ -4132,13 +4314,13 @@ TEST(WeakFunctionInConstructor) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(createObj->feedback_vector(), CcTest::i_isolate());
for (int i = 0; i < 20; i++) {
- MaybeObject* slot_value = feedback_vector->Get(FeedbackSlot(0));
+ MaybeObject slot_value = feedback_vector->Get(FeedbackSlot(0));
CHECK(slot_value->IsWeakOrCleared());
if (slot_value->IsCleared()) break;
CcTest::CollectAllGarbage();
}
- MaybeObject* slot_value = feedback_vector->Get(FeedbackSlot(0));
+ MaybeObject slot_value = feedback_vector->Get(FeedbackSlot(0));
CHECK(slot_value->IsCleared());
CompileRun(
"function coat() { this.x = 6; }"
@@ -4334,13 +4516,14 @@ Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
void CheckIC(Handle<JSFunction> function, int slot_index,
InlineCacheState state) {
- FeedbackVector* vector = function->feedback_vector();
+ FeedbackVector vector = function->feedback_vector();
FeedbackSlot slot(slot_index);
FeedbackNexus nexus(vector, slot);
CHECK_EQ(nexus.StateFromFeedback(), state);
}
TEST(MonomorphicStaysMonomorphicAfterGC) {
+ if (!FLAG_use_ic) return;
if (FLAG_always_opt) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
@@ -4374,6 +4557,7 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
TEST(PolymorphicStaysPolymorphicAfterGC) {
+ if (!FLAG_use_ic) return;
if (FLAG_always_opt) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
@@ -4508,7 +4692,7 @@ HEAP_TEST(Regress538257) {
heap->CanExpandOldGeneration(old_space->AreaSize());
i++) {
objects[i] = i_isolate->factory()->NewFixedArray(kFixedArrayLen, TENURED);
- heap::ForceEvacuationCandidate(Page::FromAddress(objects[i]->address()));
+ heap::ForceEvacuationCandidate(Page::FromHeapObject(*objects[i]));
}
heap::SimulateFullSpace(old_space);
CcTest::CollectAllGarbage();
@@ -4560,7 +4744,7 @@ TEST(Regress507979) {
// way the filler object shares the mark bits with the following live object.
o1->Shrink(isolate, kFixedArrayLen - 1);
- for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
+ for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
// Let's not optimize the loop away.
CHECK_NE(obj->address(), kNullAddress);
}
@@ -4590,14 +4774,15 @@ TEST(Regress388880) {
// Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page.
heap::SimulateFullSpace(heap->old_space());
- size_t padding_size = desired_offset - Page::kObjectStartOffset;
+ size_t padding_size =
+ desired_offset - MemoryChunkLayout::ObjectStartOffsetInDataPage();
heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
o->set_raw_properties_or_hash(*factory->empty_fixed_array());
// Ensure that the object allocated where we need it.
- Page* page = Page::FromAddress(o->address());
+ Page* page = Page::FromHeapObject(*o);
CHECK_EQ(desired_offset, page->Offset(o->address()));
// Now we have an object right at the end of the page.
@@ -4640,9 +4825,8 @@ TEST(Regress3631) {
// Incrementally mark the backing store.
Handle<JSReceiver> obj =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
- Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj),
- isolate);
- HeapObject* weak_map_table = HeapObject::cast(weak_map->table());
+ Handle<JSWeakCollection> weak_map(JSWeakCollection::cast(*obj), isolate);
+ HeapObject weak_map_table = HeapObject::cast(weak_map->table());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
@@ -4671,7 +4855,7 @@ TEST(Regress442710) {
Handle<JSArray> array = factory->NewJSArray(2);
Handle<String> name = factory->InternalizeUtf8String("testArray");
- JSReceiver::SetProperty(isolate, global, name, array, LanguageMode::kSloppy)
+ Object::SetProperty(isolate, global, name, array, LanguageMode::kSloppy)
.Check();
CompileRun("testArray[0] = 1; testArray[1] = 2; testArray.shift();");
CcTest::CollectGarbage(OLD_SPACE);
@@ -4764,16 +4948,6 @@ TEST(MapRetaining) {
CheckMapRetainingFor(7);
}
-TEST(WritableVsImmortalRoots) {
- for (RootIndex root_index = RootIndex::kFirstRoot;
- root_index <= RootIndex::kLastRoot; ++root_index) {
- bool writable = Heap::RootCanBeWrittenAfterInitialization(root_index);
- bool immortal = Heap::RootIsImmortalImmovable(root_index);
- // A root value can be writable, immortal, or neither, but not both.
- CHECK(!immortal || !writable);
- }
-}
-
TEST(PreprocessStackTrace) {
// Do not automatically trigger early GC.
FLAG_gc_interval = -1;
@@ -4807,88 +4981,14 @@ TEST(PreprocessStackTrace) {
}
-static bool utils_has_been_collected = false;
-
-static void UtilsHasBeenCollected(
- const v8::WeakCallbackInfo<v8::Persistent<v8::Object>>& data) {
- utils_has_been_collected = true;
- data.GetParameter()->Reset();
-}
-
-
-TEST(BootstrappingExports) {
- // Expose utils object and delete it to observe that it is indeed
- // being garbage-collected.
- FLAG_expose_natives_as = "utils";
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- LocalContext env;
-
- if (Snapshot::HasContextSnapshot(CcTest::i_isolate(), 0)) return;
-
- utils_has_been_collected = false;
-
- v8::Persistent<v8::Object> utils;
-
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::String> name = v8_str("utils");
- utils.Reset(isolate, CcTest::global()
- ->Get(env.local(), name)
- .ToLocalChecked()
- ->ToObject(env.local())
- .ToLocalChecked());
- CHECK(CcTest::global()->Delete(env.local(), name).FromJust());
- }
-
- utils.SetWeak(&utils, UtilsHasBeenCollected,
- v8::WeakCallbackType::kParameter);
-
- CcTest::CollectAllAvailableGarbage();
-
- CHECK(utils_has_been_collected);
-}
-
-
-TEST(Regress1878) {
- FLAG_allow_natives_syntax = true;
- CcTest::InitializeVM();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Function> constructor = v8::Utils::CallableToLocal(
- CcTest::i_isolate()->internal_array_function());
- LocalContext env;
- CHECK(CcTest::global()
- ->Set(env.local(), v8_str("InternalArray"), constructor)
- .FromJust());
-
- v8::TryCatch try_catch(isolate);
-
- CompileRun(
- "var a = Array();"
- "for (var i = 0; i < 1000; i++) {"
- " var ai = new InternalArray(10000);"
- " if (%HaveSameMap(ai, a)) throw Error();"
- " if (!%HasObjectElements(ai)) throw Error();"
- "}"
- "for (var i = 0; i < 1000; i++) {"
- " var ai = new InternalArray(10000);"
- " if (%HaveSameMap(ai, a)) throw Error();"
- " if (!%HasObjectElements(ai)) throw Error();"
- "}");
-
- CHECK(!try_catch.HasCaught());
-}
-
-
void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
CHECK_LE(FixedArray::kHeaderSize, bytes);
- CHECK_EQ(0, bytes % kPointerSize);
+ CHECK(IsAligned(bytes, kTaggedSize));
Factory* factory = isolate->factory();
HandleScope scope(isolate);
AlwaysAllocateScope always_allocate(isolate);
int elements =
- static_cast<int>((bytes - FixedArray::kHeaderSize) / kPointerSize);
+ static_cast<int>((bytes - FixedArray::kHeaderSize) / kTaggedSize);
Handle<FixedArray> array = factory->NewFixedArray(
elements, space == NEW_SPACE ? NOT_TENURED : TENURED);
CHECK((space == NEW_SPACE) == Heap::InNewSpace(*array));
@@ -4960,8 +5060,8 @@ TEST(OldSpaceAllocationCounter) {
static void CheckLeak(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = CcTest::i_isolate();
- Object* message =
- *reinterpret_cast<Object**>(isolate->pending_message_obj_address());
+ Object message(
+ *reinterpret_cast<Address*>(isolate->pending_message_obj_address()));
CHECK(message->IsTheHole(isolate));
}
@@ -5060,14 +5160,17 @@ TEST(ScriptIterator) {
int script_count = 0;
{
HeapIterator it(heap);
- for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
+ for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
if (obj->IsScript()) script_count++;
}
}
{
Script::Iterator iterator(isolate);
- while (iterator.Next()) script_count--;
+ for (Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
+ script_count--;
+ }
}
CHECK_EQ(0, script_count);
@@ -5087,7 +5190,7 @@ TEST(SharedFunctionInfoIterator) {
int sfi_count = 0;
{
HeapIterator it(heap);
- for (HeapObject* obj = it.next(); obj != nullptr; obj = it.next()) {
+ for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
sfi_count++;
}
@@ -5095,7 +5198,7 @@ TEST(SharedFunctionInfoIterator) {
{
SharedFunctionInfo::GlobalIterator iterator(isolate);
- while (iterator.Next()) sfi_count--;
+ while (!iterator.Next().is_null()) sfi_count--;
}
CHECK_EQ(0, sfi_count);
@@ -5108,7 +5211,7 @@ AllocationResult HeapTester::AllocateByteArrayForTest(Heap* heap, int length,
DCHECK(length >= 0 && length <= ByteArray::kMaxLength);
int size = ByteArray::SizeFor(length);
AllocationSpace space = heap->SelectSpace(pretenure);
- HeapObject* result = nullptr;
+ HeapObject result;
{
AllocationResult allocation = heap->AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
@@ -5132,7 +5235,7 @@ HEAP_TEST(Regress587004) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
const int N =
- (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) / kPointerSize;
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) / kTaggedSize;
Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
CHECK(heap->old_space()->Contains(*array));
Handle<Object> number = factory->NewHeapNumber(1.0);
@@ -5144,7 +5247,7 @@ HEAP_TEST(Regress587004) {
heap::SimulateFullSpace(heap->old_space());
heap->RightTrimFixedArray(*array, N - 1);
heap->mark_compact_collector()->EnsureSweepingCompleted();
- ByteArray* byte_array;
+ ByteArray byte_array;
const int M = 256;
// Don't allow old space expansion. The test works without this flag too,
// but becomes very slow.
@@ -5175,7 +5278,7 @@ HEAP_TEST(Regress589413) {
Factory* factory = isolate->factory();
// Fill the new space with byte arrays with elements looking like pointers.
const int M = 256;
- ByteArray* byte_array;
+ ByteArray byte_array;
while (AllocateByteArrayForTest(heap, M, NOT_TENURED).To(&byte_array)) {
for (int j = 0; j < M; j++) {
byte_array->set(j, 0x31);
@@ -5188,14 +5291,14 @@ HEAP_TEST(Regress589413) {
// This number is close to large free list category threshold.
const int N = 0x3EEE;
{
- std::vector<FixedArray*> arrays;
+ std::vector<FixedArray> arrays;
std::set<Page*> pages;
- FixedArray* array;
+ FixedArray array;
// Fill all pages with fixed arrays.
heap->set_force_oom(true);
while (AllocateFixedArrayForTest(heap, N, TENURED).To(&array)) {
arrays.push_back(array);
- pages.insert(Page::FromAddress(array->address()));
+ pages.insert(Page::FromHeapObject(array));
// Add the array in root set.
handle(array, isolate);
}
@@ -5203,7 +5306,7 @@ HEAP_TEST(Regress589413) {
heap->set_force_oom(false);
while (AllocateFixedArrayForTest(heap, N, TENURED).To(&array)) {
arrays.push_back(array);
- pages.insert(Page::FromAddress(array->address()));
+ pages.insert(Page::FromHeapObject(array));
// Add the array in root set.
handle(array, isolate);
// Do not expand anymore.
@@ -5214,7 +5317,7 @@ HEAP_TEST(Regress589413) {
{
AlwaysAllocateScope always_allocate(isolate);
Handle<HeapObject> ec_obj = factory->NewFixedArray(5000, TENURED);
- Page* ec_page = Page::FromAddress(ec_obj->address());
+ Page* ec_page = Page::FromHeapObject(*ec_obj);
heap::ForceEvacuationCandidate(ec_page);
// Make all arrays point to evacuation candidate so that
// slots are recorded for them.
@@ -5246,7 +5349,7 @@ TEST(Regress598319) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- const int kNumberOfObjects = kMaxRegularHeapObjectSize / kPointerSize;
+ const int kNumberOfObjects = kMaxRegularHeapObjectSize / kTaggedSize;
struct Arr {
Arr(Isolate* isolate, int number_of_objects) {
@@ -5255,7 +5358,7 @@ TEST(Regress598319) {
// Temporary scope to avoid getting any other objects into the root set.
v8::HandleScope scope(CcTest::isolate());
Handle<FixedArray> tmp =
- isolate->factory()->NewFixedArray(number_of_objects);
+ isolate->factory()->NewFixedArray(number_of_objects, TENURED);
root->set(0, *tmp);
for (int i = 0; i < get()->length(); i++) {
tmp = isolate->factory()->NewFixedArray(100, TENURED);
@@ -5264,7 +5367,7 @@ TEST(Regress598319) {
}
}
- FixedArray* get() { return FixedArray::cast(root->get(0)); }
+ FixedArray get() { return FixedArray::cast(root->get(0)); }
Handle<FixedArray> root;
} arr(isolate, kNumberOfObjects);
@@ -5286,7 +5389,7 @@ TEST(Regress598319) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
CHECK(marking_state->IsWhite(arr.get()));
for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
+ HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(marking_state->IsWhite(arr_value));
}
@@ -5300,7 +5403,7 @@ TEST(Regress598319) {
// Check that we have not marked the interesting array during root scanning.
for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
+ HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(marking_state->IsWhite(arr_value));
}
@@ -5336,7 +5439,7 @@ TEST(Regress598319) {
// All objects need to be black after marking. If a white object crossed the
// progress bar, we would fail here.
for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
+ HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(marking_state->IsBlack(arr_value));
}
}
@@ -5368,7 +5471,7 @@ TEST(Regress609761) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
- int length = kMaxRegularHeapObjectSize / kPointerSize + 1;
+ int length = kMaxRegularHeapObjectSize / kTaggedSize + 1;
Handle<FixedArray> array = ShrinkArrayAndCheckSize(heap, length);
CHECK(heap->lo_space()->Contains(*array));
}
@@ -5383,7 +5486,6 @@ TEST(LiveBytes) {
TEST(Regress615489) {
if (!FLAG_incremental_marking) return;
- FLAG_black_allocation = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5454,7 +5556,7 @@ TEST(Regress631969) {
heap::SimulateFullSpace(heap->old_space());
Handle<String> s1 = factory->NewStringFromStaticChars("123456789", TENURED);
Handle<String> s2 = factory->NewStringFromStaticChars("01234", TENURED);
- heap::ForceEvacuationCandidate(Page::FromAddress(s1->address()));
+ heap::ForceEvacuationCandidate(Page::FromHeapObject(*s1));
heap::SimulateIncrementalMarking(heap, false);
@@ -5487,7 +5589,6 @@ TEST(Regress631969) {
TEST(LeftTrimFixedArrayInBlackArea) {
if (!FLAG_incremental_marking) return;
- FLAG_black_allocation = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5519,7 +5620,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
// Now left trim the allocated black area. A filler has to be installed
// for the trimmed area and all mark bits of the trimmed area have to be
// cleared.
- FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10);
+ FixedArrayBase trimmed = heap->LeftTrimFixedArray(*array, 10);
CHECK(marking_state->IsBlack(trimmed));
heap::GcAndSweep(heap, OLD_SPACE);
@@ -5527,7 +5628,6 @@ TEST(LeftTrimFixedArrayInBlackArea) {
TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
if (!FLAG_incremental_marking) return;
- FLAG_black_allocation = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5564,13 +5664,13 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
page->AddressToMarkbitIndex(end_address)));
CHECK(heap->old_space()->Contains(*array));
- FixedArrayBase* previous = *array;
- FixedArrayBase* trimmed;
+ FixedArrayBase previous = *array;
+ FixedArrayBase trimmed;
// First trim in one word steps.
for (int i = 0; i < 10; i++) {
trimmed = heap->LeftTrimFixedArray(previous, 1);
- HeapObject* filler = HeapObject::FromAddress(previous->address());
+ HeapObject filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
@@ -5581,7 +5681,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
for (int i = 2; i <= 3; i++) {
for (int j = 0; j < 10; j++) {
trimmed = heap->LeftTrimFixedArray(previous, i);
- HeapObject* filler = HeapObject::FromAddress(previous->address());
+ HeapObject filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
@@ -5594,7 +5694,6 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
TEST(ContinuousRightTrimFixedArrayInBlackArea) {
if (!FLAG_incremental_marking) return;
- FLAG_black_allocation = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5634,19 +5733,19 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
CHECK(heap->old_space()->Contains(*array));
// Trim it once by one word to make checking for white marking color uniform.
- Address previous = end_address - kPointerSize;
+ Address previous = end_address - kTaggedSize;
isolate->heap()->RightTrimFixedArray(*array, 1);
- HeapObject* filler = HeapObject::FromAddress(previous);
+ HeapObject filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller());
CHECK(marking_state->IsImpossible(filler));
// Trim 10 times by one, two, and three word.
for (int i = 1; i <= 3; i++) {
for (int j = 0; j < 10; j++) {
- previous -= kPointerSize * i;
+ previous -= kTaggedSize * i;
isolate->heap()->RightTrimFixedArray(*array, i);
- HeapObject* filler = HeapObject::FromAddress(previous);
+ HeapObject filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller());
CHECK(marking_state->IsWhite(filler));
}
@@ -5674,21 +5773,18 @@ TEST(Regress618958) {
!heap->incremental_marking()->IsStopped()));
}
-TEST(YoungGenerationLargeObjectAllocation) {
+TEST(YoungGenerationLargeObjectAllocationScavenge) {
if (FLAG_minor_mc) return;
FLAG_young_generation_large_objects = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
+ if (!isolate->serializer_enabled()) return;
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
- MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
- CHECK_EQ(LO_SPACE, chunk->owner()->identity());
- CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
-
- Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
- chunk = MemoryChunk::FromAddress(array_small->address());
+ // TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
+ Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
@@ -5699,21 +5795,78 @@ TEST(YoungGenerationLargeObjectAllocation) {
// After the first young generation GC array_small will be in the old
// generation large object space.
- chunk = MemoryChunk::FromAddress(array_small->address());
+ chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
CcTest::CollectAllAvailableGarbage();
}
+TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
+ if (FLAG_minor_mc) return;
+ FLAG_young_generation_large_objects = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ if (!isolate->serializer_enabled()) return;
+
+ // TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
+ Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
+ CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+
+ Handle<Object> number = isolate->factory()->NewHeapNumber(123.456);
+ array_small->set(0, *number);
+
+ CcTest::CollectGarbage(OLD_SPACE);
+
+ // After the first full GC array_small will be in the old generation
+ // large object space.
+ chunk = MemoryChunk::FromHeapObject(*array_small);
+ CHECK_EQ(LO_SPACE, chunk->owner()->identity());
+ CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+
+ CcTest::CollectAllAvailableGarbage();
+}
+
+TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
+ if (FLAG_minor_mc) return;
+ FLAG_young_generation_large_objects = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ if (!isolate->serializer_enabled()) return;
+
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < 10; i++) {
+ Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
+ CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
+ }
+ }
+
+ CcTest::CollectGarbage(NEW_SPACE);
+ CHECK(isolate->heap()->new_lo_space()->IsEmpty());
+ CHECK_EQ(0, isolate->heap()->new_lo_space()->Size());
+ CHECK_EQ(0, isolate->heap()->new_lo_space()->SizeOfObjects());
+ CHECK(isolate->heap()->lo_space()->IsEmpty());
+ CHECK_EQ(0, isolate->heap()->lo_space()->Size());
+ CHECK_EQ(0, isolate->heap()->lo_space()->SizeOfObjects());
+}
+
TEST(UncommitUnusedLargeObjectMemory) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
- MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000, TENURED);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE);
intptr_t size_before = array->Size();
@@ -5735,71 +5888,71 @@ TEST(RememberedSetRemoveRange) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(Page::kPageSize /
- kPointerSize);
- MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(Page::kPageSize / kTaggedSize, TENURED);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE);
Address start = array->address();
// Maps slot to boolean indicator of whether the slot should be in the set.
std::map<Address, bool> slots;
slots[start + 0] = true;
- slots[start + kPointerSize] = true;
- slots[start + Page::kPageSize - kPointerSize] = true;
+ slots[start + kTaggedSize] = true;
+ slots[start + Page::kPageSize - kTaggedSize] = true;
slots[start + Page::kPageSize] = true;
- slots[start + Page::kPageSize + kPointerSize] = true;
- slots[chunk->area_end() - kPointerSize] = true;
+ slots[start + Page::kPageSize + kTaggedSize] = true;
+ slots[chunk->area_end() - kTaggedSize] = true;
for (auto x : slots) {
RememberedSet<OLD_TO_NEW>::Insert(chunk, x.first);
}
RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](Address addr) {
- CHECK(slots[addr]);
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kPointerSize,
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kTaggedSize,
SlotSet::FREE_EMPTY_BUCKETS);
slots[start] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](Address addr) {
- CHECK(slots[addr]);
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start + kPointerSize,
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start + kTaggedSize,
start + Page::kPageSize,
SlotSet::FREE_EMPTY_BUCKETS);
- slots[start + kPointerSize] = false;
- slots[start + Page::kPageSize - kPointerSize] = false;
+ slots[start + kTaggedSize] = false;
+ slots[start + Page::kPageSize - kTaggedSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](Address addr) {
- CHECK(slots[addr]);
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start,
- start + Page::kPageSize + kPointerSize,
+ start + Page::kPageSize + kTaggedSize,
SlotSet::FREE_EMPTY_BUCKETS);
slots[start + Page::kPageSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](Address addr) {
- CHECK(slots[addr]);
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(
- chunk, chunk->area_end() - kPointerSize, chunk->area_end(),
- SlotSet::FREE_EMPTY_BUCKETS);
- slots[chunk->area_end() - kPointerSize] = false;
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, chunk->area_end() - kTaggedSize,
+ chunk->area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
+ slots[chunk->area_end() - kTaggedSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk,
- [&slots](Address addr) {
- CHECK(slots[addr]);
+ [&slots](MaybeObjectSlot slot) {
+ CHECK(slots[slot.address()]);
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
@@ -5822,12 +5975,13 @@ HEAP_TEST(Regress670675) {
if (marking->IsStopped()) {
marking->Start(i::GarbageCollectionReason::kTesting);
}
- size_t array_length = Page::kPageSize / kPointerSize + 100;
+ size_t array_length = Page::kPageSize / kTaggedSize + 100;
size_t n = heap->OldGenerationSpaceAvailable() / array_length;
for (size_t i = 0; i < n + 40; i++) {
{
HandleScope inner_scope(isolate);
- isolate->factory()->NewFixedArray(static_cast<int>(array_length));
+ isolate->factory()->NewFixedArray(static_cast<int>(array_length),
+ TENURED);
}
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
@@ -5839,7 +5993,7 @@ HEAP_TEST(Regress670675) {
namespace {
Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumberOfNops = 1 << 10;
for (int i = 0; i < kNumberOfNops; i++) {
@@ -5875,8 +6029,8 @@ HEAP_TEST(Regress5831) {
Handle<Code> code = GenerateDummyImmovableCode(isolate);
array = FixedArray::SetAndGrow(isolate, array, i, code);
CHECK(heap->code_space()->Contains(code->address()) ||
- heap->lo_space()->Contains(*code));
- if (heap->lo_space()->Contains(*code)) {
+ heap->code_lo_space()->Contains(*code));
+ if (heap->code_lo_space()->Contains(*code)) {
overflowed_into_lospace = true;
break;
}
@@ -5893,7 +6047,7 @@ HEAP_TEST(Regress5831) {
CHECK(!heap->code_space()->first_page()->Contains(code->address()));
// Ensure it's not in large object space.
- MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
CHECK(chunk->owner()->identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate());
}
@@ -5918,7 +6072,7 @@ TEST(Regress6800) {
}
CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(0, RememberedSet<OLD_TO_NEW>::NumberOfPreFreedEmptyBuckets(
- MemoryChunk::FromAddress(root->address())));
+ MemoryChunk::FromHeapObject(*root)));
}
TEST(Regress6800LargeObject) {
@@ -5926,7 +6080,7 @@ TEST(Regress6800LargeObject) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
- const int kRootLength = i::kMaxRegularHeapObjectSize / kPointerSize;
+ const int kRootLength = i::kMaxRegularHeapObjectSize / kTaggedSize;
Handle<FixedArray> root =
isolate->factory()->NewFixedArray(kRootLength, TENURED);
CcTest::heap()->lo_space()->Contains(*root);
@@ -5942,13 +6096,12 @@ TEST(Regress6800LargeObject) {
}
CcTest::CollectGarbage(OLD_SPACE);
CHECK_EQ(0, RememberedSet<OLD_TO_NEW>::NumberOfPreFreedEmptyBuckets(
- MemoryChunk::FromAddress(root->address())));
+ MemoryChunk::FromHeapObject(*root)));
}
HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
- FLAG_black_allocation = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@@ -5977,6 +6130,24 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
CHECK(object->map()->IsMap());
}
+HEAP_TEST(MarkCompactEpochCounter) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ unsigned epoch0 = heap->mark_compact_collector()->epoch();
+ CcTest::CollectGarbage(OLD_SPACE);
+ unsigned epoch1 = heap->mark_compact_collector()->epoch();
+ CHECK_EQ(epoch0 + 1, epoch1);
+ heap::SimulateIncrementalMarking(heap, true);
+ CcTest::CollectGarbage(OLD_SPACE);
+ unsigned epoch2 = heap->mark_compact_collector()->epoch();
+ CHECK_EQ(epoch1 + 1, epoch2);
+ CcTest::CollectGarbage(NEW_SPACE);
+ unsigned epoch3 = heap->mark_compact_collector()->epoch();
+ CHECK_EQ(epoch2, epoch3);
+}
+
UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
// Enable rehashing and create an isolate and context.
i::FLAG_rehash_snapshot = true;
@@ -6120,6 +6291,8 @@ struct OutOfMemoryState {
size_t old_generation_capacity_at_oom;
size_t memory_allocator_size_at_oom;
size_t new_space_capacity_at_oom;
+ size_t current_heap_limit;
+ size_t initial_heap_limit;
};
size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
@@ -6130,15 +6303,18 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
state->old_generation_capacity_at_oom = heap->OldGenerationCapacity();
state->memory_allocator_size_at_oom = heap->memory_allocator()->Size();
state->new_space_capacity_at_oom = heap->new_space()->Capacity();
+ state->current_heap_limit = current_heap_limit;
+ state->initial_heap_limit = initial_heap_limit;
return initial_heap_limit + 100 * MB;
}
size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
// Size to capacity factor.
- double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
+ double factor =
+ Page::kPageSize * 1.0 / MemoryChunkLayout::AllocatableMemoryInDataPage();
// Some tables (e.g. deoptimization table) are allocated directly with the
// memory allocator. Allow some slack to account for them.
- size_t slack = 1 * MB;
+ size_t slack = 5 * MB;
return static_cast<size_t>(capacity * factor) + slack;
}
@@ -6210,6 +6386,44 @@ UNINITIALIZED_TEST(OutOfMemoryLargeObjects) {
reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
}
+UNINITIALIZED_TEST(RestoreHeapLimit) {
+ if (FLAG_stress_incremental_marking) return;
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) return;
+#endif
+ ManualGCScope manual_gc_scope;
+ const size_t kOldGenerationLimit = 300 * MB;
+ FLAG_max_old_space_size = kOldGenerationLimit / MB;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ Isolate* isolate =
+ reinterpret_cast<Isolate*>(v8::Isolate::New(create_params));
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ OutOfMemoryState state;
+ state.heap = heap;
+ state.oom_triggered = false;
+ heap->AddNearHeapLimitCallback(NearHeapLimitCallback, &state);
+ heap->AutomaticallyRestoreInitialHeapLimit(0.5);
+ const int kFixedArrayLength = 1000000;
+ {
+ HandleScope handle_scope(isolate);
+ while (!state.oom_triggered) {
+ factory->NewFixedArray(kFixedArrayLength);
+ }
+ }
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
+ state.oom_triggered = false;
+ {
+ HandleScope handle_scope(isolate);
+ while (!state.oom_triggered) {
+ factory->NewFixedArray(kFixedArrayLength);
+ }
+ }
+ CHECK_EQ(state.current_heap_limit, state.initial_heap_limit);
+ reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
+}
+
void HeapTester::UncommitFromSpace(Heap* heap) {
heap->UncommitFromSpace();
heap->memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -6240,6 +6454,64 @@ TEST(Regress8014) {
CHECK_LE(heap->ms_count(), ms_count + 10);
}
+TEST(Regress8617) {
+ ManualGCScope manual_gc_scope;
+ FLAG_manual_evacuation_candidates_selection = true;
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ heap::SimulateFullSpace(heap->old_space());
+ // Step 1. Create a function and ensure that it is in the old space.
+ Handle<Object> foo =
+ v8::Utils::OpenHandle(*CompileRun("function foo() { return 42; };"
+ "foo;"));
+ if (heap->InNewSpace(*foo)) {
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ }
+ // Step 2. Create an object with a reference to foo in the descriptor array.
+ CompileRun(
+ "var obj = {};"
+ "obj.method = foo;"
+ "obj;");
+ // Step 3. Make sure that foo moves during Mark-Compact.
+ Page* ec_page = Page::FromAddress(foo->ptr());
+ heap::ForceEvacuationCandidate(ec_page);
+ // Step 4. Start incremental marking.
+ heap::SimulateIncrementalMarking(heap, false);
+ CHECK(ec_page->IsEvacuationCandidate());
+ // Step 5. Install a new descriptor array on the map of the object.
+ // This runs the marking barrier for the descriptor array.
+ // In the bad case it sets the number of marked descriptors but does not
+ // change the color of the descriptor array.
+ CompileRun("obj.bar = 10;");
+ // Step 6. Promote the descriptor array to old space. During promotion
+ // the Scavenger will not record the slot of foo in the descriptor array.
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ // Step 7. Complete the Mark-Compact.
+ CcTest::CollectAllGarbage();
+ // Step 8. Use the descriptor for foo, which contains a stale pointer.
+ CompileRun("obj.method()");
+}
+
+HEAP_TEST(MemoryReducerActivationForSmallHeaps) {
+ ManualGCScope manual_gc_scope;
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kDone);
+ HandleScope scope(isolate);
+ const size_t kActivationThreshold = 1 * MB;
+ size_t initial_capacity = heap->OldGenerationCapacity();
+ while (heap->OldGenerationCapacity() <
+ initial_capacity + kActivationThreshold) {
+ isolate->factory()->NewFixedArray(1 * KB, TENURED);
+ }
+ CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kWait);
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 9162573dd9..897f4d0242 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -19,7 +19,7 @@ namespace internal {
namespace heap {
Page* HeapTester::AllocateByteArraysOnPage(
- Heap* heap, std::vector<ByteArray*>* byte_arrays) {
+ Heap* heap, std::vector<ByteArray>* byte_arrays) {
PauseAllocationObserversScope pause_observers(heap);
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
@@ -31,16 +31,15 @@ Page* HeapTester::AllocateByteArraysOnPage(
{
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
- ByteArray* byte_array;
+ ByteArray byte_array;
CHECK(AllocateByteArrayForTest(heap, kLength, TENURED).To(&byte_array));
byte_arrays->push_back(byte_array);
- page = Page::FromAddress(byte_array->address());
- CHECK_EQ(page->area_size() % kSize, 0u);
+ page = Page::FromHeapObject(byte_array);
size_t n = page->area_size() / kSize;
for (size_t i = 1; i < n; i++) {
CHECK(AllocateByteArrayForTest(heap, kLength, TENURED).To(&byte_array));
byte_arrays->push_back(byte_array);
- CHECK_EQ(page, Page::FromAddress(byte_array->address()));
+ CHECK_EQ(page, Page::FromHeapObject(byte_array));
}
}
CHECK_NULL(page->invalidated_slots());
@@ -50,13 +49,13 @@ Page* HeapTester::AllocateByteArraysOnPage(
HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
InvalidatedSlotsFilter filter(page);
- for (auto byte_array : byte_arrays) {
+ for (ByteArray byte_array : byte_arrays) {
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(filter.IsValid(addr));
}
}
@@ -65,7 +64,7 @@ HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
@@ -74,10 +73,10 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
- ByteArray* byte_array = byte_arrays[i];
+ ByteArray byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
if (i % 2 == 0) {
CHECK(!filter.IsValid(addr));
} else {
@@ -90,7 +89,7 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
@@ -99,10 +98,10 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
- ByteArray* byte_array = byte_arrays[i];
+ ByteArray byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(!filter.IsValid(addr));
}
}
@@ -112,7 +111,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
@@ -123,11 +122,11 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
// considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
- ByteArray* byte_array = byte_arrays[i];
+ ByteArray byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
heap->RightTrimFixedArray(byte_array, byte_array->length());
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
}
@@ -137,7 +136,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
page->MarkEvacuationCandidate();
// Register the all byte arrays as invalidated.
@@ -150,10 +149,10 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// All slots must still be valid.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
- ByteArray* byte_array = byte_arrays[i];
+ ByteArray byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(filter.IsValid(addr));
}
}
@@ -162,7 +161,7 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- std::vector<ByteArray*> byte_arrays;
+ std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Ensure that the first array has smaller size then the rest.
heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0]->length() - 8);
@@ -174,10 +173,10 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
// All slots must still be invalid.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
- ByteArray* byte_array = byte_arrays[i];
+ ByteArray byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
- for (Address addr = start; addr < end; addr += kPointerSize) {
+ for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(!filter.IsValid(addr));
}
}
@@ -247,8 +246,8 @@ HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
Handle<FixedArray> trimmed;
{
AlwaysAllocateScope always_allocate(isolate);
- trimmed =
- factory->NewFixedArray(kMaxRegularHeapObjectSize / kPointerSize + 100);
+ trimmed = factory->NewFixedArray(
+ kMaxRegularHeapObjectSize / kTaggedSize + 100, TENURED);
DCHECK(MemoryChunk::FromHeapObject(*trimmed)->InLargeObjectSpace());
}
heap::SimulateIncrementalMarking(heap);
@@ -331,27 +330,24 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
// Start incremental marking.
heap::SimulateIncrementalMarking(heap);
// Set properties to point to the evacuation candidate.
- JSReceiver::SetProperty(isolate, obj, prop_name1, evacuated,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name1, evacuated,
+ LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_name2, evacuated,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name2, evacuated,
+ LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_name3, evacuated,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name3, evacuated,
+ LanguageMode::kSloppy)
.Check();
{
HandleScope scope(isolate);
Handle<HeapObject> dead = factory->NewFixedArray(1);
- JSReceiver::SetProperty(isolate, obj, prop_name1, dead,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name1, dead, LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_name2, dead,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name2, dead, LanguageMode::kSloppy)
.Check();
- JSReceiver::SetProperty(isolate, obj, prop_name3, dead,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name3, dead, LanguageMode::kSloppy)
.Check();
Handle<Map> map(obj->map(), isolate);
Handle<Map> normalized_map =
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index ae0bfd969a..94f652e037 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -26,7 +26,7 @@ static void VerifyIterable(v8::internal::Address base,
v8::internal::Address limit,
std::vector<intptr_t> expected_size) {
CHECK_LE(base, limit);
- HeapObject* object = nullptr;
+ HeapObject object;
size_t counter = 0;
while (base < limit) {
object = HeapObject::FromAddress(base);
@@ -42,7 +42,7 @@ static void VerifyIterable(v8::internal::Address base,
static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
intptr_t size_in_bytes,
AllocationAlignment alignment = kWordAligned) {
- HeapObject* obj;
+ HeapObject obj;
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
if (result.To(&obj)) {
@@ -63,7 +63,6 @@ TEST(InvalidLab) {
TEST(UnusedLabImplicitClose) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- heap->root(RootIndex::kOnePointerFillerMap);
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 1e21f92a65..4f141af7a5 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -100,7 +100,7 @@ HEAP_TEST(NoPromotion) {
// allocation failure.
AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
Heap* heap = isolate->heap();
- HeapObject* obj;
+ HeapObject obj;
AllocationResult alloc = heap->AllocateRaw(Map::kSize, MAP_SPACE);
if (!alloc.To(&obj)) return alloc;
obj->set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
@@ -117,17 +117,17 @@ AllocationResult HeapTester::AllocateFixedArrayForTest(
DCHECK(length >= 0 && length <= FixedArray::kMaxLength);
int size = FixedArray::SizeFor(length);
AllocationSpace space = heap->SelectSpace(pretenure);
- HeapObject* obj;
+ HeapObject obj;
{
AllocationResult result = heap->AllocateRaw(size, space);
if (!result.To(&obj)) return result;
}
obj->set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
SKIP_WRITE_BARRIER);
- FixedArray* array = FixedArray::cast(obj);
+ FixedArray array = FixedArray::cast(obj);
array->set_length(length);
- MemsetPointer(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
- length);
+ MemsetTagged(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
+ length);
return array;
}
@@ -165,8 +165,8 @@ HEAP_TEST(MarkCompactCollector) {
// allocate a garbage
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
Handle<JSFunction> function = factory->NewFunctionForTest(func_name);
- JSReceiver::SetProperty(isolate, global, func_name, function,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, global, func_name, function,
+ LanguageMode::kSloppy)
.Check();
factory->NewJSObject(function);
@@ -184,13 +184,12 @@ HEAP_TEST(MarkCompactCollector) {
Handle<JSObject> obj = factory->NewJSObject(function);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
- JSReceiver::SetProperty(isolate, global, obj_name, obj,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, global, obj_name, obj, LanguageMode::kSloppy)
.Check();
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(isolate, obj, prop_name, twenty_three,
- LanguageMode::kSloppy)
+ Object::SetProperty(isolate, obj, prop_name, twenty_three,
+ LanguageMode::kSloppy)
.Check();
}
@@ -367,7 +366,7 @@ TEST(Regress5829) {
Address old_end = array->address() + array->Size();
// Right trim the array without clearing the mark bits.
array->set_length(9);
- heap->CreateFillerObjectAt(old_end - kPointerSize, kPointerSize,
+ heap->CreateFillerObjectAt(old_end - kTaggedSize, kTaggedSize,
ClearRecordedSlots::kNo);
heap->old_space()->FreeLinearAllocationArea();
Page* page = Page::FromAddress(array->address());
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index 97f50203f7..2db538d484 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -15,6 +15,9 @@ namespace v8 {
namespace internal {
namespace heap {
+// Tests don't work when --optimize-for-size is set.
+#ifndef V8_LITE_MODE
+
namespace {
v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
@@ -42,7 +45,8 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
Page* FindLastPageInNewSpace(std::vector<Handle<FixedArray>>& handles) {
for (auto rit = handles.rbegin(); rit != handles.rend(); ++rit) {
- Page* candidate = Page::FromAddress((*rit)->address());
+ // One deref gets the Handle, the second deref gets the FixedArray.
+ Page* candidate = Page::FromHeapObject(**rit);
if (candidate->InNewSpace()) return candidate;
}
return nullptr;
@@ -73,8 +77,9 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
- const int threshold_bytes =
- FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+ const int threshold_bytes = static_cast<int>(
+ FLAG_page_promotion_threshold *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() / 100);
CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes(
to_be_promoted_page),
threshold_bytes);
@@ -107,7 +112,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNew) {
// Last object in handles should definitely be on a page that does not
// contain the age mark, thus qualifying for moving.
Handle<FixedArray> last_object = handles.back();
- Page* to_be_promoted_page = Page::FromAddress(last_object->address());
+ Page* to_be_promoted_page = Page::FromHeapObject(*last_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
@@ -144,7 +149,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
// First object in handles should be on the same page as the allocated
// JSArrayBuffer.
Handle<FixedArray> first_object = handles.front();
- Page* to_be_promoted_page = Page::FromAddress(first_object->address());
+ Page* to_be_promoted_page = Page::FromHeapObject(*first_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
@@ -186,7 +191,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) {
// First object in handles should be on the same page as the allocated
// JSArrayBuffer.
Handle<FixedArray> first_object = handles.front();
- Page* to_be_promoted_page = Page::FromAddress(first_object->address());
+ Page* to_be_promoted_page = Page::FromHeapObject(*first_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
@@ -223,7 +228,7 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
// Last object in handles should definitely be on a page that does not
// contain the age mark, thus qualifying for moving.
Handle<FixedArray> last_object = handles.back();
- Page* to_be_promoted_page = Page::FromAddress(last_object->address());
+ Page* to_be_promoted_page = Page::FromHeapObject(*last_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
@@ -241,6 +246,8 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
isolate->Dispose();
}
+#endif // V8_LITE_MODE
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index e03d8229b3..8219c1487d 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -32,6 +32,7 @@
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/free-space.h"
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -98,19 +99,19 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(executable);
- size_t header_size = (executable == EXECUTABLE)
- ? MemoryAllocator::CodePageGuardStartOffset()
- : MemoryChunk::kObjectStartOffset;
+ size_t allocatable_memory_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
size_t guard_size =
- (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
+ (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space);
size_t reserved_size =
((executable == EXECUTABLE))
- ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
- page_allocator->CommitPageSize())
- : RoundUp(header_size + reserve_area_size,
+ ? allocatable_memory_area_offset +
+ RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
+ guard_size
+ : RoundUp(allocatable_memory_area_offset + reserve_area_size,
page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
@@ -213,6 +214,44 @@ TEST(MemoryAllocator) {
delete memory_allocator;
}
+TEST(ComputeDiscardMemoryAreas) {
+ base::AddressRegion memory_area;
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ size_t free_header_size = FreeSpace::kSize;
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(0, 0);
+ CHECK_EQ(memory_area.begin(), 0);
+ CHECK_EQ(memory_area.size(), 0);
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
+ 0, page_size + free_header_size);
+ CHECK_EQ(memory_area.begin(), 0);
+ CHECK_EQ(memory_area.size(), 0);
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
+ page_size - free_header_size, page_size + free_header_size);
+ CHECK_EQ(memory_area.begin(), page_size);
+ CHECK_EQ(memory_area.size(), page_size);
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(page_size, page_size);
+ CHECK_EQ(memory_area.begin(), 0);
+ CHECK_EQ(memory_area.size(), 0);
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
+ page_size / 2, page_size + page_size / 2);
+ CHECK_EQ(memory_area.begin(), page_size);
+ CHECK_EQ(memory_area.size(), page_size);
+
+ memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
+ page_size / 2, page_size + page_size / 4);
+ CHECK_EQ(memory_area.begin(), 0);
+ CHECK_EQ(memory_area.size(), 0);
+
+ memory_area =
+ MemoryAllocator::ComputeDiscardMemoryArea(page_size / 2, page_size * 3);
+ CHECK_EQ(memory_area.begin(), page_size);
+ CHECK_EQ(memory_area.size(), page_size * 2);
+}
TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
@@ -269,10 +308,10 @@ TEST(LargeObjectSpace) {
int lo_size = Page::kPageSize;
- Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
+ Object obj = lo->AllocateRaw(lo_size).ToObjectChecked();
CHECK(obj->IsHeapObject());
- HeapObject* ho = HeapObject::cast(obj);
+ HeapObject ho = HeapObject::cast(obj);
CHECK(lo->Contains(HeapObject::cast(obj)));
@@ -281,18 +320,15 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(ho));
while (true) {
- size_t available = lo->Available();
- { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
+ {
+ AllocationResult allocation = lo->AllocateRaw(lo_size);
if (allocation.IsRetry()) break;
}
- // The available value is conservative such that it may report
- // zero prior to heap exhaustion.
- CHECK(lo->Available() < available || available == 0);
}
CHECK(!lo->IsEmpty());
- CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
+ CHECK(lo->AllocateRaw(lo_size).IsRetry());
}
#ifndef DEBUG
@@ -359,30 +395,30 @@ TEST(SizeOfInitialHeap) {
}
#endif // DEBUG
-static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
+static HeapObject AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRawUnaligned(size);
CHECK(!allocation.IsRetry());
- HeapObject* filler = nullptr;
+ HeapObject filler;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size,
ClearRecordedSlots::kNo);
return filler;
}
-static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
+static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
CHECK(!allocation.IsRetry());
- HeapObject* filler = nullptr;
+ HeapObject filler;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size,
ClearRecordedSlots::kNo);
return filler;
}
-static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
- AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
+static HeapObject AllocateUnaligned(LargeObjectSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size);
CHECK(!allocation.IsRetry());
- HeapObject* filler = nullptr;
+ HeapObject filler;
CHECK(allocation.To(&filler));
return filler;
}
@@ -544,7 +580,7 @@ HEAP_TEST(Regress777177) {
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
- HeapObject* obj = result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), filler_size,
ClearRecordedSlots::kNo);
}
@@ -554,7 +590,7 @@ HEAP_TEST(Regress777177) {
// top_on_previous_step_ to the next page.
AllocationResult result =
old_space->AllocateRaw(max_object_size, kWordAligned);
- HeapObject* obj = result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
// Simulate allocation folding moving the top pointer back.
old_space->SetTopAndLimit(obj->address(), old_space->limit());
}
@@ -562,7 +598,7 @@ HEAP_TEST(Regress777177) {
{
// This triggers assert in crbug.com/777177.
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
- HeapObject* obj = result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), filler_size,
ClearRecordedSlots::kNo);
}
@@ -581,7 +617,7 @@ HEAP_TEST(Regress791582) {
int until_page_end = static_cast<int>(new_space->limit() - new_space->top());
- if (until_page_end % kPointerSize != 0) {
+ if (!IsAligned(until_page_end, kTaggedSize)) {
// The test works if the size of allocation area size is a multiple of
// pointer size. This is usually the case unless some allocation observer
// is already active (e.g. incremental marking observer).
@@ -594,7 +630,7 @@ HEAP_TEST(Regress791582) {
{
AllocationResult result =
new_space->AllocateRaw(until_page_end, kWordAligned);
- HeapObject* obj = result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), until_page_end,
ClearRecordedSlots::kNo);
// Simulate allocation folding moving the top pointer back.
@@ -604,7 +640,7 @@ HEAP_TEST(Regress791582) {
{
// This triggers assert in crbug.com/791582
AllocationResult result = new_space->AllocateRaw(256, kWordAligned);
- HeapObject* obj = result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj->address(), 256, ClearRecordedSlots::kNo);
}
new_space->RemoveAllocationObserver(&observer);
@@ -621,20 +657,20 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
// Prepare page that only contains a single object and a trailing FreeSpace
// filler.
Handle<FixedArray> array = isolate->factory()->NewFixedArray(128, TENURED);
- Page* page = Page::FromAddress(array->address());
+ Page* page = Page::FromHeapObject(*array);
// Reset space so high water mark is consistent.
PagedSpace* old_space = CcTest::heap()->old_space();
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- HeapObject* filler =
- HeapObject::FromAddress(array->address() + array->Size());
+ HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
- size_t should_have_shrunk =
- RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
- CommitPageSize());
+ size_t should_have_shrunk = RoundDown(
+ static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
+ array->Size()),
+ CommitPageSize());
CHECK_EQ(should_have_shrunk, shrunk);
}
@@ -648,7 +684,7 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
- Page* page = Page::FromAddress(array->address());
+ Page* page = Page::FromHeapObject(*array);
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
@@ -667,11 +703,11 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
heap::SealCurrentObjects(CcTest::heap());
- const int kFillerSize = kPointerSize;
+ const int kFillerSize = kTaggedSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
- Page* page = Page::FromAddress(array->address());
+ Page* page = Page::FromHeapObject(*array);
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
@@ -679,8 +715,7 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- HeapObject* filler =
- HeapObject::FromAddress(array->address() + array->Size());
+ HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(),
ReadOnlyRoots(CcTest::heap()).one_pointer_filler_map());
@@ -695,11 +730,11 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
heap::SealCurrentObjects(CcTest::heap());
- const int kFillerSize = 2 * kPointerSize;
+ const int kFillerSize = 2 * kTaggedSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
- Page* page = Page::FromAddress(array->address());
+ Page* page = Page::FromHeapObject(*array);
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
@@ -707,8 +742,7 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
old_space->FreeLinearAllocationArea();
old_space->ResetFreeList();
- HeapObject* filler =
- HeapObject::FromAddress(array->address() + array->Size());
+ HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(),
ReadOnlyRoots(CcTest::heap()).two_pointer_filler_map());
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index bbe4776b93..8a2ad3c184 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -6,6 +6,7 @@
#include "src/assembler-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/objects/smi.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -42,7 +43,7 @@ TEST(WeakReferencesBasic) {
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
CHECK(Heap::InNewSpace(*fv));
- MaybeObject* code_object = fv->optimized_code_weak_or_smi();
+ MaybeObject code_object = fv->optimized_code_weak_or_smi();
CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage();
CHECK(Heap::InNewSpace(*fv));
@@ -52,7 +53,7 @@ TEST(WeakReferencesBasic) {
HandleScope inner_scope(isolate);
// Create a new Code.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
assm.nop(); // supported on all architectures
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -61,7 +62,7 @@ TEST(WeakReferencesBasic) {
CHECK(code->IsCode());
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
- HeapObject* code_heap_object;
+ HeapObject code_heap_object;
CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(
&code_heap_object));
CHECK_EQ(*code, code_heap_object);
@@ -97,12 +98,12 @@ TEST(WeakReferencesOldToOld) {
CHECK(heap->InOldSpace(*fixed_array));
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
- Page* page_before_gc = Page::FromAddress(fixed_array->address());
+ Page* page_before_gc = Page::FromHeapObject(*fixed_array);
heap::ForceEvacuationCandidate(page_before_gc);
CcTest::CollectAllGarbage();
CHECK(heap->InOldSpace(*fixed_array));
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -127,7 +128,7 @@ TEST(WeakReferencesOldToNew) {
CcTest::CollectAllGarbage();
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -152,7 +153,7 @@ TEST(WeakReferencesOldToNewScavenged) {
CcTest::CollectGarbage(NEW_SPACE);
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -171,7 +172,8 @@ TEST(WeakReferencesOldToCleared) {
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory, TENURED);
CHECK(heap->InOldSpace(*fv));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::ClearedValue());
+ fv->set_optimized_code_weak_or_smi(
+ HeapObjectReference::ClearedValue(isolate));
CcTest::CollectAllGarbage();
CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
@@ -191,7 +193,7 @@ TEST(ObjectMovesBeforeClearingWeakField) {
Handle<FeedbackVector> fv =
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
CHECK(Heap::InNewSpace(*fv));
- FeedbackVector* fv_location = *fv;
+ FeedbackVector fv_location = *fv;
{
HandleScope inner_scope(isolate);
// Create a new FixedArray which the FeedbackVector will point to.
@@ -208,7 +210,7 @@ TEST(ObjectMovesBeforeClearingWeakField) {
// Scavenger will move *fv.
CcTest::CollectGarbage(NEW_SPACE);
- FeedbackVector* new_fv_location = *fv;
+ FeedbackVector new_fv_location = *fv;
CHECK_NE(fv_location, new_fv_location);
CHECK(fv->optimized_code_weak_or_smi()->IsWeak());
@@ -276,7 +278,7 @@ TEST(ObjectWithWeakReferencePromoted) {
CHECK(heap->InOldSpace(*fv));
CHECK(heap->InOldSpace(*fixed_array));
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -292,7 +294,8 @@ TEST(ObjectWithClearedWeakReferencePromoted) {
CreateFeedbackVectorForTest(CcTest::isolate(), factory);
CHECK(Heap::InNewSpace(*fv));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::ClearedValue());
+ fv->set_optimized_code_weak_or_smi(
+ HeapObjectReference::ClearedValue(isolate));
CcTest::CollectGarbage(NEW_SPACE);
CHECK(Heap::InNewSpace(*fv));
@@ -374,7 +377,7 @@ TEST(WeakArraysBasic) {
CHECK(Heap::InNewSpace(*array));
for (int i = 0; i < length; ++i) {
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(array->Get(i)->GetHeapObjectIfStrong(&heap_object));
CHECK_EQ(heap_object, ReadOnlyRoots(heap).undefined_value());
}
@@ -406,7 +409,7 @@ TEST(WeakArraysBasic) {
// TODO(marja): update this when/if we do handle weak references in the new
// space.
CcTest::CollectGarbage(NEW_SPACE);
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
CHECK(array->Get(1)->GetHeapObjectIfWeak(&heap_object));
@@ -481,13 +484,13 @@ TEST(WeakArrayListBasic) {
CHECK(Heap::InNewSpace(*array));
CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*index0));
- CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
+ CHECK_EQ(array->Get(1).ToSmi().value(), 1);
CHECK_EQ(array->Get(2), HeapObjectReference::Weak(*index2));
- CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
+ CHECK_EQ(array->Get(3).ToSmi().value(), 3);
CHECK_EQ(array->Get(4), HeapObjectReference::Weak(*index4));
- CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
+ CHECK_EQ(array->Get(5).ToSmi().value(), 5);
CHECK_EQ(array->Get(6), HeapObjectReference::Weak(*index6));
array = inner_scope.CloseAndEscape(array);
@@ -500,39 +503,39 @@ TEST(WeakArrayListBasic) {
// TODO(marja): update this when/if we do handle weak references in the new
// space.
CcTest::CollectGarbage(NEW_SPACE);
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK_EQ(array->length(), 8);
CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
- CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
+ CHECK_EQ(array->Get(1).ToSmi().value(), 1);
CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
+ CHECK_EQ(array->Get(3).ToSmi().value(), 3);
CHECK(array->Get(4)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
- CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
+ CHECK_EQ(array->Get(5).ToSmi().value(), 5);
CHECK(array->Get(6)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2019);
- CHECK_EQ(Smi::ToInt(array->Get(7)->cast<Smi>()), 7);
+ CHECK_EQ(array->Get(7).ToSmi().value(), 7);
CcTest::CollectAllGarbage();
CHECK(heap->InOldSpace(*array));
CHECK_EQ(array->length(), 8);
CHECK(array->Get(0)->IsCleared());
- CHECK_EQ(Smi::ToInt(array->Get(1)->cast<Smi>()), 1);
+ CHECK_EQ(array->Get(1).ToSmi().value(), 1);
CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
- CHECK_EQ(Smi::ToInt(array->Get(3)->cast<Smi>()), 3);
+ CHECK_EQ(array->Get(3).ToSmi().value(), 3);
CHECK(array->Get(4)->IsCleared());
- CHECK_EQ(Smi::ToInt(array->Get(5)->cast<Smi>()), 5);
+ CHECK_EQ(array->Get(5).ToSmi().value(), 5);
CHECK(array->Get(6)->IsCleared());
- CHECK_EQ(Smi::ToInt(array->Get(7)->cast<Smi>()), 7);
+ CHECK_EQ(array->Get(7).ToSmi().value(), 7);
}
TEST(WeakArrayListRemove) {
@@ -709,9 +712,9 @@ TEST(PrototypeUsersBasic) {
namespace {
-HeapObject* saved_heap_object = nullptr;
+HeapObject saved_heap_object;
-static void TestCompactCallback(HeapObject* value, int old_index,
+static void TestCompactCallback(HeapObject value, int old_index,
int new_index) {
saved_heap_object = value;
CHECK_EQ(old_index, 2);
@@ -756,7 +759,7 @@ TEST(PrototypeUsersCompacted) {
CHECK(array->Get(3)->IsCleared());
CHECK_EQ(array->length(), 3 + PrototypeUsers::kFirstIndex);
- WeakArrayList* new_array =
+ WeakArrayList new_array =
PrototypeUsers::Compact(array, heap, TestCompactCallback);
CHECK_EQ(new_array->length(), 1 + PrototypeUsers::kFirstIndex);
CHECK_EQ(saved_heap_object, *live_map);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index a048e82e62..7e1d6329c8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -18,7 +18,9 @@
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/module-inl.h"
+#include "src/ostreams.h"
#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
#include "test/cctest/cctest.h"
@@ -275,7 +277,7 @@ void BytecodeExpectationsPrinter::PrintSourcePosition(
}
void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
- i::String* string) const {
+ i::String string) const {
stream << '"';
for (int i = 0, length = string->length(); i < length; ++i) {
stream << i::AsEscapedUC16ForJSON(string->Get(i));
@@ -332,7 +334,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeSequence(
}
void BytecodeExpectationsPrinter::PrintConstantPool(
- std::ostream& stream, i::FixedArray* constant_pool) const {
+ std::ostream& stream, i::FixedArray constant_pool) const {
stream << "constant pool: [\n";
int num_constants = constant_pool->length();
if (num_constants > 0) {
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 1d1bc437d0..53793c1751 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -73,7 +73,7 @@ class BytecodeExpectationsPrinter final {
SourcePositionTableIterator& source_iterator,
int bytecode_offset) const;
void PrintV8String(std::ostream& stream, // NOLINT
- i::String* string) const;
+ i::String string) const;
void PrintConstant(std::ostream& stream, // NOLINT
i::Handle<i::Object> constant) const;
void PrintFrameSize(std::ostream& stream, // NOLINT
@@ -81,7 +81,7 @@ class BytecodeExpectationsPrinter final {
void PrintBytecodeSequence(std::ostream& stream, // NOLINT
i::Handle<i::BytecodeArray> bytecode_array) const;
void PrintConstantPool(std::ostream& stream, // NOLINT
- i::FixedArray* constant_pool) const;
+ i::FixedArray constant_pool) const;
void PrintCodeSnippet(std::ostream& stream, // NOLINT
const std::string& body) const;
void PrintBytecodeArray(std::ostream& stream, // NOLINT
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 31272f1c29..9392d60181 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -143,7 +143,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 86
+bytecode array length: 84
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -152,28 +152,27 @@ bytecodes: [
B(Star), R(2),
B(LdaConstant), U8(2),
/* 67 S> */ B(Star), R(1),
- B(LdaNamedProperty), R(0), U8(3), U8(5),
- B(Star), R(7),
- B(CallProperty0), R(7), R(0), U8(7),
- B(Mov), R(0), R(6),
+ B(LdaNamedProperty), R(0), U8(3), U8(2),
+ B(Star), R(6),
+ B(CallProperty0), R(6), R(0), U8(4),
+ B(Mov), R(0), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- B(LdaNamedProperty), R(5), U8(4), U8(9),
B(Star), R(4),
- B(CallProperty0), R(4), R(5), U8(11),
+ B(LdaNamedProperty), R(4), U8(4), U8(6),
B(Star), R(3),
+ B(CallProperty0), R(3), R(4), U8(15),
+ B(Star), R(7),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(5), U8(13),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(3), U8(6), U8(15),
- B(Star), R(3),
- B(StaInArrayLiteral), R(2), R(1), U8(3),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(LdaNamedProperty), R(7), U8(5), U8(17),
+ B(JumpIfToBooleanTrue), U8(19),
+ B(LdaNamedProperty), R(7), U8(6), U8(8),
+ B(StaInArrayLiteral), R(2), R(1), U8(13),
B(Ldar), R(1),
- B(Inc), U8(2),
+ B(Inc), U8(12),
B(Star), R(1),
- B(JumpLoop), U8(35), I8(0),
+ B(JumpLoop), U8(33), I8(0),
B(Ldar), R(2),
/* 71 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index f3ddec23a0..b0d3e93003 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,7 +14,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 189
+bytecode array length: 190
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
@@ -32,14 +32,14 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
B(Jump), U8(95),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(5), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
@@ -50,10 +50,10 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(5),
B(ReThrow),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(52),
+ B(Jump), U8(51),
B(Jump), U8(36),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(4),
@@ -68,15 +68,15 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
B(Star), R(2),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(16),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
B(Star), R(2),
B(Star), R(1),
- B(Jump), U8(8),
+ B(Jump), U8(7),
B(Star), R(2),
- B(LdaSmi), I8(2),
+ B(LdaZero),
B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
@@ -87,6 +87,8 @@ bytecodes: [
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(3), I8(0),
B(Jump), U8(22),
+ B(Ldar), R(2),
+ B(ReThrow),
B(LdaTrue),
B(Star), R(6),
B(Mov), R(0), R(4),
@@ -95,24 +97,22 @@ bytecodes: [
/* 22 S> */ B(Return),
B(Ldar), R(2),
/* 22 S> */ B(Return),
- B(Ldar), R(2),
- B(ReThrow),
B(LdaUndefined),
/* 22 S> */ B(Return),
]
constant pool: [
Smi [30],
- Smi [70],
- Smi [15],
+ Smi [71],
+ Smi [16],
Smi [7],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [20],
+ Smi [9],
Smi [23],
]
handlers: [
- [20, 134, 142],
- [23, 98, 100],
+ [20, 136, 144],
+ [23, 100, 102],
]
---
@@ -122,7 +122,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 233
+bytecode array length: 235
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
B(Mov), R(closure), R(1),
@@ -140,10 +140,10 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(139),
+ B(Jump), U8(140),
/* 22 S> */ B(LdaSmi), I8(42),
B(Star), R(6),
B(LdaFalse),
@@ -157,14 +157,14 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
B(Ldar), R(5),
/* 22 E> */ B(Throw),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
B(Jump), U8(95),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(5), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
B(SuspendGenerator), R(0), R(0), U8(5), U8(2),
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
@@ -175,10 +175,10 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(5),
B(ReThrow),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(52),
+ B(Jump), U8(51),
B(Jump), U8(36),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(7),
@@ -193,15 +193,15 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
B(Star), R(2),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(16),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
B(Star), R(2),
B(Star), R(1),
- B(Jump), U8(8),
+ B(Jump), U8(7),
B(Star), R(2),
- B(LdaSmi), I8(2),
+ B(LdaZero),
B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
@@ -212,6 +212,8 @@ bytecodes: [
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(8), U8(3), I8(0),
B(Jump), U8(22),
+ B(Ldar), R(2),
+ B(ReThrow),
B(LdaTrue),
B(Star), R(6),
B(Mov), R(0), R(4),
@@ -220,27 +222,25 @@ bytecodes: [
/* 31 S> */ B(Return),
B(Ldar), R(2),
/* 31 S> */ B(Return),
- B(Ldar), R(2),
- B(ReThrow),
B(LdaUndefined),
/* 31 S> */ B(Return),
]
constant pool: [
Smi [30],
- Smi [74],
- Smi [114],
- Smi [15],
+ Smi [75],
+ Smi [116],
+ Smi [16],
Smi [7],
- Smi [15],
+ Smi [16],
Smi [7],
SCOPE_INFO_TYPE,
Smi [6],
- Smi [20],
+ Smi [9],
Smi [23],
]
handlers: [
- [20, 178, 186],
- [23, 142, 144],
+ [20, 181, 189],
+ [23, 145, 147],
]
---
@@ -248,248 +248,215 @@ snippet: "
async function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 22
+frame size: 20
parameter count: 1
-bytecode array length: 490
+bytecode array length: 416
bytecodes: [
- B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
- B(Mov), R(closure), R(11),
- B(Mov), R(this), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
- B(Star), R(0),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(2),
/* 17 E> */ B(StackCheck),
- B(Mov), R(context), R(13),
- B(Mov), R(context), R(14),
- B(Ldar), R(0),
- /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(15), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(15),
- B(Star), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Mov), R(context), R(6),
+ B(Mov), R(context), R(7),
+ B(Ldar), R(2),
+ /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(8), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(8),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(15),
+ B(Ldar), R(8),
/* 17 E> */ B(Throw),
- B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
- B(JumpConstant), U8(18),
- B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(17),
- B(Mov), R(context), R(18),
+ B(LdaSmi), I8(1),
+ B(Star), R(4),
+ B(Mov), R(8), R(5),
+ B(JumpConstant), U8(17),
/* 36 S> */ B(CreateArrayLiteral), U8(5), U8(0), U8(37),
- B(Star), R(19),
- B(LdaNamedProperty), R(19), U8(6), U8(1),
- B(Star), R(20),
- B(CallProperty0), R(20), R(19), U8(3),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(6), U8(1),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(10), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- /* 36 E> */ B(LdaNamedProperty), R(4), U8(7), U8(5),
- B(Star), R(5),
- /* 31 S> */ B(CallProperty0), R(5), R(4), U8(7),
- B(Star), R(6),
- /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(8), U8(9),
- B(JumpIfToBooleanTrue), U8(68),
- B(LdaNamedProperty), R(6), U8(9), U8(11),
+ B(Star), R(9),
+ B(LdaNamedProperty), R(9), U8(7), U8(5),
B(Star), R(8),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(LdaFalse),
+ B(Star), R(12),
+ B(Mov), R(context), R(15),
+ B(LdaTrue),
+ B(Star), R(12),
+ /* 31 S> */ B(CallProperty0), R(8), R(9), U8(7),
+ B(Star), R(16),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+ B(LdaNamedProperty), R(16), U8(8), U8(9),
+ B(JumpIfToBooleanTrue), U8(67),
+ B(LdaNamedProperty), R(16), U8(9), U8(11),
+ B(Star), R(16),
+ B(LdaFalse),
+ B(Star), R(12),
+ B(Mov), R(16), R(3),
/* 22 E> */ B(StackCheck),
- B(Mov), R(3), R(1),
+ /* 31 S> */ B(Mov), R(3), R(0),
/* 42 S> */ B(LdaFalse),
- B(Star), R(21),
- B(Mov), R(0), R(19),
- B(Mov), R(1), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(19), U8(3),
- /* 42 E> */ B(SuspendGenerator), R(0), R(0), U8(19), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(19),
B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Mov), R(2), R(17),
+ B(Mov), R(0), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(17), U8(3),
+ /* 42 E> */ B(SuspendGenerator), R(2), R(0), U8(17), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(17),
+ B(Star), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Ldar), R(19),
+ B(Ldar), R(17),
/* 42 E> */ B(Throw),
- B(LdaZero),
- B(Star), R(15),
- B(Mov), R(19), R(16),
- B(Jump), U8(59),
- B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(87), I8(0),
- B(Jump), U8(37),
- B(Star), R(19),
- B(CreateCatchContext), R(19), U8(12),
- B(Star), R(18),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(18),
- B(PushContext), R(19),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kReThrow), R(20), U8(1),
- B(PopContext), R(19),
+ B(Star), R(13),
+ B(Mov), R(17), R(14),
+ B(Jump), U8(20),
+ B(Ldar), R(17),
+ B(JumpLoop), U8(84), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(16),
- B(Star), R(15),
- B(Jump), U8(8),
- B(Star), R(16),
- B(LdaSmi), I8(1),
- B(Star), R(15),
+ B(Star), R(14),
+ B(Star), R(13),
+ B(Jump), U8(7),
+ B(Star), R(14),
+ B(LdaZero),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(15),
+ B(Ldar), R(12),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(9), U8(12), U8(13),
B(Star), R(17),
- B(LdaZero),
- B(TestEqualStrict), R(7), U8(14),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(13), U8(15),
- B(Star), R(9),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(17),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(9),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(18),
- B(LdaConstant), U8(14),
+ B(LdaConstant), U8(13),
B(Star), R(19),
B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
B(Mov), R(context), R(18),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(17), R(9), U8(15),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(19), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(18),
+ B(LdaZero),
+ B(TestReferenceEqual), R(13),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(18),
- B(Jump), U8(27),
- B(Mov), R(9), R(18),
- B(Mov), R(4), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(17),
- B(SetPendingMessage),
+ B(ReThrow),
B(Ldar), R(15),
- B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
- B(Jump), U8(13),
- B(LdaZero),
- B(Star), R(11),
- B(Mov), R(16), R(12),
- B(Jump), U8(98),
- B(Ldar), R(16),
+ B(SetPendingMessage),
+ B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(Jump), U8(14),
+ B(Ldar), R(14),
B(ReThrow),
+ B(LdaSmi), I8(1),
+ B(Star), R(4),
+ B(Mov), R(14), R(5),
+ B(Jump), U8(95),
B(LdaUndefined),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(15), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(15), U8(2),
- B(ResumeGenerator), R(0), R(0), U8(15),
- B(Star), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(16),
+ B(Star), R(9),
+ B(Mov), R(2), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(8), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(8), U8(2),
+ B(ResumeGenerator), R(2), R(0), U8(8),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(9),
B(LdaZero),
- B(TestReferenceEqual), R(16),
+ B(TestReferenceEqual), R(9),
B(JumpIfTrue), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(8),
B(ReThrow),
- B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
- B(Jump), U8(52),
+ B(LdaSmi), I8(1),
+ B(Star), R(4),
+ B(Mov), R(8), R(5),
+ B(Jump), U8(51),
B(Jump), U8(36),
- B(Star), R(15),
- B(CreateCatchContext), R(15), U8(17),
- B(Star), R(14),
+ B(Star), R(8),
+ B(CreateCatchContext), R(8), U8(16),
+ B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(14),
- B(PushContext), R(15),
+ B(Ldar), R(7),
+ B(PushContext), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(Mov), R(0), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(16), U8(2),
- B(PopContext), R(15),
- B(Star), R(12),
- B(LdaSmi), I8(1),
- B(Star), R(11),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
- B(Jump), U8(8),
- B(Star), R(12),
+ B(Star), R(10),
+ B(Mov), R(2), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(9), U8(2),
+ B(PopContext), R(8),
+ B(Star), R(5),
B(LdaSmi), I8(2),
- B(Star), R(11),
+ B(Star), R(4),
+ B(Jump), U8(15),
+ B(LdaSmi), I8(-1),
+ B(Star), R(5),
+ B(Star), R(4),
+ B(Jump), U8(7),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
- B(Ldar), R(13),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(2), U8(1),
+ B(Ldar), R(6),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
+ B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
B(Jump), U8(22),
+ B(Ldar), R(5),
+ B(ReThrow),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(0), R(14),
- B(Mov), R(12), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(14), U8(3),
+ B(Star), R(9),
+ B(Mov), R(2), R(7),
+ B(Mov), R(5), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(7), U8(3),
/* 50 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(5),
/* 50 S> */ B(Return),
- B(Ldar), R(12),
- B(ReThrow),
B(LdaUndefined),
/* 50 S> */ B(Return),
]
constant pool: [
Smi [30],
- Smi [154],
- Smi [371],
- Smi [15],
+ Smi [149],
+ Smi [297],
+ Smi [16],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [15],
+ Smi [16],
Smi [7],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
- Smi [14],
+ Smi [9],
SCOPE_INFO_TYPE,
- Smi [396],
+ Smi [321],
Smi [6],
- Smi [20],
+ Smi [9],
Smi [23],
]
handlers: [
- [20, 435, 443],
- [23, 399, 401],
- [61, 222, 230],
- [64, 185, 187],
- [291, 301, 303],
+ [20, 362, 370],
+ [23, 326, 328],
+ [93, 180, 188],
+ [234, 247, 249],
]
---
@@ -500,7 +467,7 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 479
+bytecode array length: 482
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
@@ -518,13 +485,13 @@ bytecodes: [
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
B(Ldar), R(5),
/* 44 E> */ B(Throw),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
B(JumpConstant), U8(18),
/* 49 S> */ B(LdaGlobal), U8(7), U8(0),
- B(Star), R(12),
- /* 56 E> */ B(CallUndefinedReceiver0), R(12), U8(2),
+ B(Star), R(9),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
B(Star), R(10),
B(LdaNamedProperty), R(10), U8(8), U8(4),
B(JumpIfUndefined), U8(17),
@@ -548,14 +515,14 @@ bytecodes: [
B(Ldar), R(6),
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
B(CallProperty1), R(9), R(7), R(8), U8(14),
- B(Jump), U8(110),
+ B(Jump), U8(111),
B(LdaNamedProperty), R(7), U8(13), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(12),
B(CallProperty1), R(12), R(7), R(8), U8(18),
- B(Jump), U8(93),
- B(LdaZero),
+ B(Jump), U8(94),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(8), R(2),
B(JumpConstant), U8(19),
@@ -573,7 +540,7 @@ bytecodes: [
B(Jump), U8(2),
B(Star), R(13),
B(Mov), R(0), R(12),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(12), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(1),
B(ResumeGenerator), R(0), R(0), U8(12),
B(Star), R(12),
@@ -591,7 +558,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
B(Star), R(13),
B(Mov), R(0), R(12),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(12), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(2),
B(ResumeGenerator), R(0), R(0), U8(12),
B(Star), R(12),
@@ -619,20 +586,20 @@ bytecodes: [
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(6),
- B(JumpLoop), U8(206), I8(0),
+ B(JumpLoop), U8(207), I8(0),
B(LdaNamedProperty), R(5), U8(16), U8(32),
B(Star), R(7),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(6),
- B(JumpIfFalse), U8(10),
- B(LdaZero),
+ B(JumpIfFalse), U8(11),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(7), R(2),
B(Jump), U8(95),
B(LdaUndefined),
B(Star), R(6),
B(Mov), R(0), R(5),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(5), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
B(SuspendGenerator), R(0), R(0), U8(5), U8(4),
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
@@ -643,10 +610,10 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(5),
B(ReThrow),
- B(LdaZero),
+ B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(52),
+ B(Jump), U8(51),
B(Jump), U8(36),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(17),
@@ -661,15 +628,15 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
B(Star), R(2),
- B(LdaSmi), I8(1),
+ B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(16),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
B(Star), R(2),
B(Star), R(1),
- B(Jump), U8(8),
+ B(Jump), U8(7),
B(Star), R(2),
- B(LdaSmi), I8(2),
+ B(LdaZero),
B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
@@ -680,6 +647,8 @@ bytecodes: [
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(20), U8(3), I8(0),
B(Jump), U8(22),
+ B(Ldar), R(2),
+ B(ReThrow),
B(LdaTrue),
B(Star), R(6),
B(Mov), R(0), R(4),
@@ -688,38 +657,36 @@ bytecodes: [
/* 60 S> */ B(Return),
B(Ldar), R(2),
/* 60 S> */ B(Return),
- B(Ldar), R(2),
- B(ReThrow),
B(LdaUndefined),
/* 60 S> */ B(Return),
]
constant pool: [
Smi [30],
- Smi [201],
- Smi [251],
- Smi [310],
- Smi [360],
- Smi [15],
+ Smi [203],
+ Smi [253],
+ Smi [312],
+ Smi [363],
+ Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
Smi [11],
- Smi [36],
+ Smi [37],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [385],
- Smi [286],
+ Smi [387],
+ Smi [287],
Smi [6],
- Smi [20],
+ Smi [9],
Smi [23],
]
handlers: [
- [20, 424, 432],
- [23, 388, 390],
+ [20, 428, 436],
+ [23, 392, 394],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 630d103b60..6bbc4d11ba 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -708,7 +708,7 @@ bytecodes: [
B(Star), R(0),
/* 73 S> */ B(LdaSmi), I8(1),
/* 73 E> */ B(StaCurrentContextSlot), U8(4),
- B(Mov), R(0), R(2),
+ /* 102 S> */ B(Mov), R(0), R(2),
/* 106 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(6),
/* 113 S> */ B(PopContext), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 318cc2d651..179ac8071a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -112,7 +112,7 @@ bytecodes: [
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(10),
/* 53 E> */ B(StaCurrentContextSlot), U8(4),
- B(Mov), R(0), R(1),
+ /* 85 S> */ B(Mov), R(0), R(1),
B(Ldar), R(0),
/* 88 S> */ B(Jump), U8(2),
B(PopContext), R(2),
@@ -158,7 +158,7 @@ bytecodes: [
B(Star), R(0),
/* 76 S> */ B(LdaSmi), I8(2),
/* 76 E> */ B(StaCurrentContextSlot), U8(4),
- B(Mov), R(0), R(1),
+ /* 113 S> */ B(Mov), R(0), R(1),
/* 118 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(6),
/* 125 S> */ B(PopContext), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 4c27e3a8d4..33bd5434b4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -67,7 +67,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 109
+bytecode array length: 107
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
@@ -78,32 +78,31 @@ bytecodes: [
B(Star), R(4),
B(LdaConstant), U8(3),
B(Star), R(3),
- /* 49 S> */ B(CreateArrayLiteral), U8(4), U8(8), U8(37),
+ /* 49 S> */ B(CreateArrayLiteral), U8(4), U8(5), U8(37),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(5), U8(6),
B(Star), R(8),
- B(LdaNamedProperty), R(8), U8(5), U8(9),
- B(Star), R(9),
- B(CallProperty0), R(9), R(8), U8(11),
+ B(CallProperty0), R(8), R(7), U8(8),
B(Mov), R(0), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(6), U8(13),
B(Star), R(6),
- B(CallProperty0), R(6), R(7), U8(15),
+ B(LdaNamedProperty), R(6), U8(6), U8(10),
B(Star), R(5),
+ B(CallProperty0), R(5), R(6), U8(19),
+ B(Star), R(9),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(7), U8(17),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(5), U8(8), U8(19),
- B(Star), R(5),
- B(StaInArrayLiteral), R(4), R(3), U8(6),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(LdaNamedProperty), R(9), U8(7), U8(21),
+ B(JumpIfToBooleanTrue), U8(19),
+ B(LdaNamedProperty), R(9), U8(8), U8(12),
+ B(StaInArrayLiteral), R(4), R(3), U8(17),
B(Ldar), R(3),
- B(Inc), U8(5),
+ B(Inc), U8(16),
B(Star), R(3),
- B(JumpLoop), U8(35), I8(0),
+ B(JumpLoop), U8(33), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(4), R(3), U8(6),
+ B(StaInArrayLiteral), R(4), R(3), U8(17),
B(Mov), R(4), R(3),
B(CallJSRuntime), U8(%reflect_apply), R(1), U8(3),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index c56e29436e..27911a41c2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -12,28 +12,32 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 7
+frame size: 8
parameter count: 1
-bytecode array length: 38
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(6),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -48,28 +52,32 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 7
+frame size: 8
parameter count: 1
-bytecode array length: 38
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CreateClosure), U8(2), U8(1), U8(2),
B(Star), R(6),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -86,9 +94,9 @@ snippet: "
static [n1]() { return n1; }
}
"
-frame size: 11
+frame size: 12
parameter count: 1
-bytecode array length: 77
+bytecode array length: 87
bytecodes: [
B(CreateFunctionContext), U8(0), U8(2),
B(PushContext), R(2),
@@ -97,28 +105,31 @@ bytecodes: [
/* 43 E> */ B(StaCurrentContextSlot), U8(4),
/* 57 S> */ B(LdaConstant), U8(2),
/* 57 E> */ B(StaCurrentContextSlot), U8(5),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(3),
B(LdaTheHole),
- B(Star), R(6),
- B(CreateClosure), U8(4), U8(0), U8(2),
- B(Star), R(3),
- B(LdaConstant), U8(3),
+ B(Star), R(7),
+ B(CreateClosure), U8(5), U8(0), U8(2),
B(Star), R(4),
- /* 75 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(ToName), R(7),
- B(CreateClosure), U8(5), U8(1), U8(2),
- B(Star), R(8),
- /* 106 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
- B(ToName), R(9),
- B(LdaConstant), U8(6),
- B(TestEqualStrict), R(9), U8(2),
- B(Mov), R(3), R(5),
+ B(LdaConstant), U8(4),
+ B(Star), R(5),
+ /* 75 S> */ B(LdaImmutableContextSlot), R(3), U8(4), U8(0),
+ B(ToName), R(8),
+ B(CreateClosure), U8(6), U8(1), U8(2),
+ B(Star), R(9),
+ /* 106 S> */ B(LdaImmutableContextSlot), R(3), U8(5), U8(0),
+ B(ToName), R(10),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(10), U8(2),
+ B(Mov), R(4), R(6),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(CreateClosure), U8(7), U8(3), U8(2),
- B(Star), R(10),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(7),
- B(Star), R(4),
- B(Mov), R(3), R(0),
+ B(CreateClosure), U8(8), U8(3), U8(2),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(7),
+ B(Star), R(5),
+ B(Mov), R(4), R(0),
+ B(PopContext), R(3),
B(Mov), R(0), R(1),
B(LdaUndefined),
/* 129 S> */ B(Return),
@@ -127,6 +138,7 @@ constant pool: [
SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -142,32 +154,36 @@ snippet: "
class C { constructor() { count++; }}
return new C();
"
-frame size: 7
+frame size: 8
parameter count: 1
-bytecode array length: 46
+bytecode array length: 52
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(2),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
/* 46 E> */ B(StaCurrentContextSlot), U8(4),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(3),
B(LdaTheHole),
- B(Star), R(6),
- B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(7),
+ B(CreateClosure), U8(3), U8(0), U8(2),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
+ B(Star), R(5),
+ B(Mov), R(6), R(0),
+ B(PopContext), R(3),
B(Mov), R(0), R(1),
/* 87 S> */ B(Ldar), R(1),
- /* 94 E> */ B(Construct), R(3), R(0), U8(0), U8(1),
+ /* 94 E> */ B(Construct), R(1), R(0), U8(0), U8(1),
/* 102 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
@@ -179,39 +195,47 @@ snippet: "
(class {})
class E { static name () {}}
"
-frame size: 7
+frame size: 8
parameter count: 1
-bytecode array length: 61
+bytecode array length: 73
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ /* 34 S> */ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(2),
+ B(Star), R(6),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(CreateClosure), U8(4), U8(2), U8(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(PopContext), R(2),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(2),
+ B(LdaTheHole),
B(Star), R(6),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(CreateClosure), U8(5), U8(1), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(4),
+ B(CreateClosure), U8(6), U8(2), U8(2),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index 311dd164d8..b5d9f60681 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -51,16 +51,16 @@ handlers: [
snippet: "
var a = { val: 2 }; a.name *= 2;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(MulSmi), I8(2), U8(3),
- /* 61 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 61 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -77,16 +77,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 52 S> */ B(LdaSmi), I8(1),
B(Star), R(2),
- B(LdaKeyedProperty), R(1), U8(1),
+ B(LdaKeyedProperty), R(0), U8(1),
B(BitwiseXorSmi), I8(2), U8(3),
- /* 57 E> */ B(StaKeyedProperty), R(1), R(2), U8(4),
+ /* 57 E> */ B(StaKeyedProperty), R(0), R(2), U8(4),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 2c2a3784d4..38af7fc364 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -97,17 +97,17 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(ToNumeric), U8(3),
B(Star), R(2),
B(Inc), U8(3),
B(Star), R(3),
- /* 66 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 66 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -124,15 +124,15 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(Dec), U8(3),
B(Star), R(2),
- /* 65 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 65 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
]
@@ -149,20 +149,20 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(2),
- B(Mov), R(2), R(1),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 81 E> */ B(LdaKeyedProperty), R(2), U8(1),
+ /* 81 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(ToNumeric), U8(3),
B(Star), R(4),
B(Dec), U8(3),
B(Star), R(5),
- /* 86 E> */ B(StaKeyedProperty), R(2), R(0), U8(4),
+ /* 86 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
@@ -179,18 +179,18 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 29
+bytecode array length: 27
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(2),
- B(Mov), R(2), R(1),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 83 E> */ B(LdaKeyedProperty), R(2), U8(1),
+ /* 83 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(Inc), U8(3),
B(Star), R(4),
- /* 87 E> */ B(StaKeyedProperty), R(2), R(0), U8(4),
+ /* 87 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index 1d79f8e7e2..6973d1166a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -16,9 +16,9 @@ parameter count: 1
bytecode array length: 7
bytecodes: [
B(CreateRestParameter),
- B(Star), R(0),
- /* 10 E> */ B(StackCheck),
B(Star), R(1),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(Star), R(0),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -36,11 +36,11 @@ parameter count: 2
bytecode array length: 13
bytecodes: [
B(CreateRestParameter),
- B(Star), R(0),
+ B(Star), R(2),
/* 10 E> */ B(StackCheck),
- B(Mov), R(arg0), R(1),
- B(Mov), R(0), R(2),
- /* 29 S> */ B(Ldar), R(2),
+ /* 12 S> */ B(Mov), R(arg0), R(0),
+ /* 25 S> */ B(Mov), R(2), R(1),
+ /* 29 S> */ B(Ldar), R(1),
/* 45 S> */ B(Return),
]
constant pool: [
@@ -58,12 +58,12 @@ parameter count: 2
bytecode array length: 15
bytecodes: [
B(CreateRestParameter),
- B(Star), R(0),
+ B(Star), R(2),
/* 10 E> */ B(StackCheck),
- B(Mov), R(arg0), R(1),
- B(Mov), R(0), R(2),
+ /* 12 S> */ B(Mov), R(arg0), R(0),
+ /* 25 S> */ B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(2), U8(0),
+ /* 44 E> */ B(LdaKeyedProperty), R(1), U8(0),
/* 48 S> */ B(Return),
]
constant pool: [
@@ -83,12 +83,12 @@ bytecodes: [
B(CreateUnmappedArguments),
B(Star), R(3),
B(CreateRestParameter),
- B(Star), R(0),
+ B(Star), R(2),
/* 10 E> */ B(StackCheck),
- B(Mov), R(arg0), R(1),
- B(Mov), R(0), R(2),
+ /* 12 S> */ B(Mov), R(arg0), R(0),
+ /* 25 S> */ B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(2), U8(1),
+ /* 44 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(Star), R(4),
B(LdaZero),
/* 59 E> */ B(LdaKeyedProperty), R(3), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index a7e48a2a5c..06de86b362 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -9,15 +9,15 @@ wrap: yes
snippet: "
var a = {x:13, y:14}; return delete a.x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 56 S> */ B(LdaConstant), U8(1),
- B(DeletePropertySloppy), R(1),
+ B(DeletePropertySloppy), R(0),
/* 74 S> */ B(Return),
]
constant pool: [
@@ -31,15 +31,15 @@ handlers: [
snippet: "
'use strict'; var a = {x:13, y:14}; return delete a.x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
+ /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 70 S> */ B(LdaConstant), U8(1),
- B(DeletePropertyStrict), R(1),
+ B(DeletePropertyStrict), R(0),
/* 88 S> */ B(Return),
]
constant pool: [
@@ -53,15 +53,15 @@ handlers: [
snippet: "
var a = {1:13, 2:14}; return delete a[2];
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 56 S> */ B(LdaSmi), I8(2),
- B(DeletePropertySloppy), R(1),
+ B(DeletePropertySloppy), R(0),
/* 75 S> */ B(Return),
]
constant pool: [
@@ -98,13 +98,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 28
+bytecode array length: 25
bytecodes: [
B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 56 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
- B(Ldar), R(1),
+ /* 56 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
/* 56 E> */ B(StaCurrentContextSlot), U8(4),
/* 64 S> */ B(CreateClosure), U8(2), U8(1), U8(2),
/* 93 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
@@ -138,3 +137,20 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+ return delete this;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaTrue),
+ /* 53 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
new file mode 100644
index 0000000000..3a2ea7d5d8
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -0,0 +1,467 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+
+---
+snippet: "
+ var x, a = [0,1,2,3];
+ [x] = a;
+"
+frame size: 15
+parameter count: 1
+bytecode array length: 178
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(1),
+ /* 60 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
+ B(Star), R(6),
+ B(CallProperty0), R(6), R(1), U8(3),
+ B(Mov), R(1), R(5),
+ B(Mov), R(1), R(2),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(4),
+ B(LdaNamedProperty), R(4), U8(2), U8(5),
+ B(Star), R(3),
+ B(LdaFalse),
+ B(Star), R(7),
+ B(Mov), R(context), R(10),
+ /* 57 S> */ B(Ldar), R(7),
+ B(JumpIfToBooleanTrue), U8(37),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(CallProperty0), R(3), R(4), U8(11),
+ B(Star), R(11),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaNamedProperty), R(11), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(15),
+ B(LdaNamedProperty), R(11), U8(4), U8(7),
+ B(Star), R(11),
+ B(LdaFalse),
+ B(Star), R(7),
+ B(Ldar), R(11),
+ B(Jump), U8(3),
+ B(LdaUndefined),
+ B(Star), R(0),
+ B(LdaSmi), I8(-1),
+ B(Star), R(9),
+ B(Star), R(8),
+ B(Jump), U8(7),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(10),
+ B(Ldar), R(7),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(4), U8(5), U8(13),
+ B(Star), R(12),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
+ B(TestTypeOf), U8(6),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(154),
+ B(Star), R(13),
+ B(LdaConstant), U8(6),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(13),
+ B(CallProperty0), R(12), R(4), U8(15),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(13),
+ B(LdaZero),
+ B(TestReferenceEqual), R(8),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(13),
+ B(ReThrow),
+ B(Ldar), R(10),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestReferenceEqual), R(8),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(9),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 65 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+]
+handlers: [
+ [44, 86, 94],
+ [140, 153, 155],
+]
+
+---
+snippet: "
+ var x, y, a = [0,1,2,3];
+ [,x,...y] = a;
+"
+frame size: 16
+parameter count: 1
+bytecode array length: 266
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(2),
+ /* 69 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(2), U8(3),
+ B(Mov), R(2), R(6),
+ B(Mov), R(2), R(3),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(2), U8(5),
+ B(Star), R(4),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(context), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(35),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(CallProperty0), R(4), R(5), U8(11),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(13),
+ B(LdaNamedProperty), R(12), U8(4), U8(7),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Ldar), R(12),
+ /* 61 S> */ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(37),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(CallProperty0), R(4), R(5), U8(13),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(15),
+ B(LdaNamedProperty), R(12), U8(4), U8(7),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Ldar), R(12),
+ B(Jump), U8(3),
+ B(LdaUndefined),
+ B(Star), R(0),
+ /* 63 S> */ B(CreateEmptyArrayLiteral), U8(15),
+ B(Star), R(13),
+ B(LdaZero),
+ B(Star), R(14),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(CallProperty0), R(4), R(5), U8(19),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(3), U8(21),
+ B(JumpIfToBooleanTrue), U8(19),
+ B(LdaNamedProperty), R(12), U8(4), U8(7),
+ B(StaInArrayLiteral), R(13), R(14), U8(16),
+ B(Ldar), R(14),
+ B(Inc), U8(18),
+ B(Star), R(14),
+ B(JumpLoop), U8(33), I8(0),
+ B(Mov), R(13), R(1),
+ B(Ldar), R(1),
+ B(LdaSmi), I8(-1),
+ B(Star), R(10),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(5), U8(23),
+ B(Star), R(13),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
+ B(TestTypeOf), U8(6),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(154),
+ B(Star), R(14),
+ B(LdaConstant), U8(6),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(14),
+ B(CallProperty0), R(13), R(5), U8(25),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(14),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(14),
+ B(ReThrow),
+ B(Ldar), R(11),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 74 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+]
+handlers: [
+ [44, 174, 182],
+ [228, 241, 243],
+]
+
+---
+snippet: "
+ var x={}, y, a = [0];
+ [x.foo,y=4] = a;
+"
+frame size: 17
+parameter count: 1
+bytecode array length: 229
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 40 S> */ B(CreateEmptyObjectLiteral),
+ B(Star), R(0),
+ /* 51 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(2),
+ /* 68 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(2), U8(3),
+ B(Mov), R(2), R(6),
+ B(Mov), R(2), R(3),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(2), U8(5),
+ B(Star), R(4),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(context), R(11),
+ /* 59 S> */ B(Ldar), R(8),
+ B(Mov), R(0), R(13),
+ B(JumpIfToBooleanTrue), U8(37),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(CallProperty0), R(4), R(5), U8(11),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(15),
+ B(LdaNamedProperty), R(12), U8(4), U8(7),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Ldar), R(12),
+ B(Jump), U8(3),
+ B(LdaUndefined),
+ B(StaNamedProperty), R(13), U8(5), U8(13),
+ /* 63 S> */ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(37),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(CallProperty0), R(4), R(5), U8(15),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(15),
+ B(LdaNamedProperty), R(12), U8(4), U8(7),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Ldar), R(12),
+ B(JumpIfNotUndefined), U8(4),
+ B(LdaSmi), I8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(-1),
+ B(Star), R(10),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(6), U8(17),
+ B(Star), R(14),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
+ B(TestTypeOf), U8(6),
+ B(JumpIfTrue), U8(18),
+ B(Wide), B(LdaSmi), I16(154),
+ B(Star), R(15),
+ B(LdaConstant), U8(7),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(15),
+ B(CallProperty0), R(14), R(5), U8(19),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(15),
+ B(ReThrow),
+ B(Ldar), R(11),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 73 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+]
+handlers: [
+ [47, 137, 145],
+ [191, 204, 206],
+]
+
+---
+snippet: "
+ var x, a = {x:1};
+ ({x} = a);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
+ /* 52 S> */ B(JumpIfNull), U8(4),
+ B(JumpIfNotUndefined), U8(7),
+ /* 53 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
+ B(Star), R(2),
+ /* 54 S> */ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 63 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x={}, a = {y:1};
+ ({y:x.foo} = a);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 40 S> */ B(CreateEmptyObjectLiteral),
+ B(Star), R(0),
+ /* 48 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
+ /* 55 S> */ B(JumpIfNull), U8(4),
+ B(JumpIfNotUndefined), U8(7),
+ /* 56 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
+ /* 61 S> */ B(Star), R(2),
+ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(StaNamedProperty), R(0), U8(2), U8(3),
+ B(LdaUndefined),
+ /* 72 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x, a = {y:1, w:2, v:3};
+ ({x=0,...y} = a);
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 41
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
+ /* 62 S> */ B(JumpIfNull), U8(4),
+ B(JumpIfNotUndefined), U8(7),
+ /* 63 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
+ B(Star), R(2),
+ /* 64 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaNamedProperty), R(2), U8(1), U8(1),
+ B(JumpIfNotUndefined), U8(3),
+ B(LdaZero),
+ B(Star), R(0),
+ /* 71 S> */ B(CallRuntime), U16(Runtime::kCopyDataPropertiesWithExcludedProperties), R(2), U8(2),
+ B(StaGlobal), U8(2), U8(3),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
deleted file mode 100644
index 44912a714e..0000000000
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Autogenerated by generate-bytecode-expectations.
-#
-
----
-wrap: yes
-do expressions: yes
-
----
-snippet: "
- var a = do { }; return a;
-"
-frame size: 2
-parameter count: 1
-bytecode array length: 7
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 42 S> */ B(Mov), R(0), R(1),
- /* 50 S> */ B(Ldar), R(1),
- /* 59 S> */ B(Return),
-]
-constant pool: [
-]
-handlers: [
-]
-
----
-snippet: "
- var a = do { var x = 100; }; return a;
-"
-frame size: 3
-parameter count: 1
-bytecode array length: 11
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 55 S> */ B(LdaSmi), I8(100),
- B(Star), R(0),
- /* 42 S> */ B(LdaUndefined),
- B(Star), R(1),
- B(Star), R(2),
- /* 72 S> */ B(Return),
-]
-constant pool: [
-]
-handlers: [
-]
-
----
-snippet: "
- while(true) { var a = 10; a = do { ++a; break; }; a = 20; }
-"
-frame size: 2
-parameter count: 1
-bytecode array length: 16
-bytecodes: [
- /* 30 E> */ B(StackCheck),
- /* 34 E> */ B(StackCheck),
- /* 56 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
- /* 69 S> */ B(Inc), U8(0),
- B(Star), R(0),
- B(Star), R(1),
- /* 74 S> */ B(Jump), U8(2),
- B(LdaUndefined),
- /* 94 S> */ B(Return),
-]
-constant pool: [
-]
-handlers: [
-]
-
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 098130c480..3c89cfed30 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -14,253 +14,167 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 21
parameter count: 1
-bytecode array length: 518
+bytecode array length: 329
bytecodes: [
- B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
- B(LdaZero),
- B(Star), R(8),
- B(Mov), R(context), R(18),
- B(Mov), R(context), R(19),
- /* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Mov), R(context), R(4),
+ /* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(5),
- /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(7),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(Star), R(5),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(Mov), R(context), R(12),
+ B(LdaTrue),
+ B(Star), R(9),
+ /* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
+ B(Star), R(15),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(14),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(14),
B(ReThrow),
- B(Mov), R(20), R(7),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(7), U8(13),
- B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(Ldar), R(14),
+ B(Mov), R(14), R(13),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(LdaNamedProperty), R(13), U8(6), U8(13),
+ B(JumpIfToBooleanTrue), U8(23),
+ B(LdaNamedProperty), R(13), U8(7), U8(15),
+ B(Star), R(13),
+ B(LdaFalse),
B(Star), R(9),
- B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Mov), R(13), R(3),
/* 23 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
- B(LdaZero),
- B(Star), R(8),
- B(JumpLoop), U8(82), I8(0),
- B(Jump), U8(37),
- B(Star), R(20),
- B(CreateCatchContext), R(20), U8(9),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(17),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ /* 38 S> */ B(Mov), R(3), R(0),
+ B(Ldar), R(13),
+ B(JumpLoop), U8(77), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(17),
- B(Star), R(16),
+ B(Star), R(11),
+ B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(17),
- B(LdaZero),
- B(Star), R(16),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(8), U8(18),
- B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(5), U8(10), U8(19),
B(Star), R(10),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(156),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(21),
- B(JumpIfFalse), U8(86),
- B(Ldar), R(10),
+ B(LdaTheHole),
+ /* 38 E> */ B(SetPendingMessage),
+ B(Star), R(12),
+ B(Ldar), R(9),
+ B(JumpIfToBooleanTrue), U8(96),
+ B(LdaNamedProperty), R(6), U8(8), U8(17),
+ B(Star), R(16),
+ B(JumpIfUndefined), U8(88),
+ B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(19),
- B(LdaConstant), U8(11),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(Star), R(17),
+ B(LdaConstant), U8(9),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(10), R(20),
- B(Mov), R(5), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(Mov), R(context), R(17),
+ B(CallProperty0), R(16), R(6), U8(19),
+ B(Star), R(19),
+ B(Mov), R(2), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(18),
+ B(Star), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(19),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(19),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(18),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(Jump), U8(65),
- B(Mov), R(10), R(19),
- B(Mov), R(5), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(3), R(19),
- B(Mov), R(0), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(3), R(0), U8(19),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Ldar), R(18),
+ B(JumpIfJSReceiver), U8(21),
B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(17),
B(LdaZero),
- B(TestReferenceEqual), R(20),
+ B(TestReferenceEqual), R(10),
B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
+ B(Ldar), R(17),
B(ReThrow),
- B(Mov), R(19), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(18),
+ B(Ldar), R(12),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(16),
+ B(TestReferenceEqual), R(10),
B(JumpIfFalse), U8(5),
- B(Ldar), R(17),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(13),
- B(LdaZero),
- B(Star), R(12),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(16),
- B(CreateCatchContext), R(16), U8(12),
- B(Star), R(15),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 57 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(10),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
- B(Star), R(19),
- B(Mov), R(0), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
- B(PopContext), R(16),
- B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(0), R(13),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(13),
- B(Star), R(12),
- B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(2),
- B(Star), R(12),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(7),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
- B(Ldar), R(14),
- B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(15),
- B(Mov), R(13), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
- B(Ldar), R(0),
- /* 57 S> */ B(Return),
- B(Ldar), R(13),
+ B(Star), R(8),
+ B(Mov), R(2), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 57 S> */ B(Return),
- B(Ldar), R(13),
- B(ReThrow),
B(LdaUndefined),
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [110],
- Smi [306],
- Smi [363],
+ Smi [98],
+ Smi [229],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [26, 458, 466],
- [29, 418, 420],
- [35, 211, 219],
- [38, 174, 176],
- [279, 328, 330],
+ [20, 297, 299],
+ [77, 157, 165],
+ [211, 260, 262],
]
---
@@ -270,260 +184,177 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 21
parameter count: 1
-bytecode array length: 532
+bytecode array length: 350
bytecodes: [
- B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
- B(LdaZero),
- B(Star), R(8),
- B(Mov), R(context), R(18),
- B(Mov), R(context), R(19),
- /* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Mov), R(context), R(4),
+ /* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(5),
- /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(7),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(Star), R(5),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(Mov), R(context), R(12),
+ B(LdaTrue),
+ B(Star), R(9),
+ /* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
+ B(Star), R(15),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(14),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(14),
B(ReThrow),
- B(Mov), R(20), R(7),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(7), U8(13),
+ B(Ldar), R(14),
+ B(Mov), R(14), R(13),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(LdaNamedProperty), R(13), U8(6), U8(13),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(LdaNamedProperty), R(13), U8(7), U8(15),
+ B(Star), R(13),
+ B(LdaFalse),
B(Star), R(9),
- B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Mov), R(13), R(3),
/* 23 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
- /* 56 S> */ B(LdaZero),
- B(Star), R(16),
- B(Mov), R(9), R(17),
- B(Jump), U8(53),
- B(Jump), U8(37),
- B(Star), R(20),
- B(CreateCatchContext), R(20), U8(9),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(17),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ /* 38 S> */ B(Mov), R(3), R(0),
+ /* 56 S> */ B(LdaSmi), I8(1),
+ B(Mov), R(13), R(11),
+ B(Star), R(10),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
- B(Star), R(17),
- B(Star), R(16),
- B(Jump), U8(8),
- B(Star), R(17),
- B(LdaSmi), I8(1),
- B(Star), R(16),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(11),
+ B(Star), R(10),
+ B(Jump), U8(7),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(8), U8(18),
- B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(5), U8(10), U8(19),
B(Star), R(10),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(156),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(21),
- B(JumpIfFalse), U8(86),
- B(Ldar), R(10),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(12),
+ B(Ldar), R(9),
+ B(JumpIfToBooleanTrue), U8(96),
+ B(LdaNamedProperty), R(6), U8(8), U8(17),
+ B(Star), R(16),
+ B(JumpIfUndefined), U8(88),
+ B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(19),
- B(LdaConstant), U8(11),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(Star), R(17),
+ B(LdaConstant), U8(9),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(10), R(20),
- B(Mov), R(5), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(Mov), R(context), R(17),
+ B(CallProperty0), R(16), R(6), U8(19),
+ B(Star), R(19),
+ B(Mov), R(2), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(18),
+ B(Star), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(19),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(19),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(18),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(Jump), U8(65),
- B(Mov), R(10), R(19),
- B(Mov), R(5), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(3), R(19),
- B(Mov), R(0), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(3), R(0), U8(19),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Ldar), R(18),
+ B(JumpIfJSReceiver), U8(21),
B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(17),
B(LdaZero),
- B(TestReferenceEqual), R(20),
+ B(TestReferenceEqual), R(10),
B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
+ B(Ldar), R(17),
B(ReThrow),
- B(Mov), R(19), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(18),
+ B(Ldar), R(12),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
- B(Jump), U8(13),
- B(LdaZero),
- B(Star), R(12),
- B(Mov), R(17), R(13),
- B(Jump), U8(67),
- B(Ldar), R(17),
+ B(Ldar), R(10),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+ B(Jump), U8(19),
+ B(Ldar), R(11),
B(ReThrow),
+ B(LdaTrue),
+ B(Star), R(18),
+ B(Mov), R(2), R(16),
+ B(Mov), R(11), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(16), U8(3),
+ /* 68 S> */ B(Return),
B(LdaUndefined),
- B(Star), R(13),
- B(LdaZero),
- B(Star), R(12),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(16),
- B(CreateCatchContext), R(16), U8(14),
- B(Star), R(15),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 68 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(12),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
- B(Star), R(19),
- B(Mov), R(0), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
- B(PopContext), R(16),
- B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(0), R(13),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(13),
- B(Star), R(12),
- B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(2),
- B(Star), R(12),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(7),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
- B(Ldar), R(14),
- B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(15), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(15),
- B(Mov), R(13), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
- B(Ldar), R(0),
- /* 68 S> */ B(Return),
- B(Ldar), R(13),
+ B(Star), R(8),
+ B(Mov), R(2), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 68 S> */ B(Return),
- B(Ldar), R(13),
- B(ReThrow),
B(LdaUndefined),
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [110],
- Smi [309],
- Smi [366],
+ Smi [98],
+ Smi [233],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
- Smi [14],
+ Smi [9],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [26, 472, 480],
- [29, 432, 434],
- [35, 213, 221],
- [38, 176, 178],
- [282, 331, 333],
+ [20, 318, 320],
+ [77, 161, 169],
+ [215, 264, 266],
]
---
@@ -536,261 +367,174 @@ snippet: "
}
f();
"
-frame size: 23
+frame size: 21
parameter count: 1
-bytecode array length: 536
+bytecode array length: 345
bytecodes: [
- B(SwitchOnGeneratorState), R(3), U8(0), U8(3),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
- B(LdaZero),
- B(Star), R(8),
- B(Mov), R(context), R(18),
- B(Mov), R(context), R(19),
- /* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(4), U8(1),
+ B(Mov), R(context), R(4),
+ /* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(3), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(20), U8(5), U8(5),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(7),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
- B(Star), R(5),
- /* 43 E> */ B(LdaNamedProperty), R(5), U8(6), U8(9),
+ B(LdaNamedProperty), R(7), U8(4), U8(5),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(7), U8(7),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
B(Star), R(6),
- /* 40 S> */ B(CallProperty0), R(6), R(5), U8(11),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 40 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(LdaNamedProperty), R(6), U8(5), U8(9),
+ B(Star), R(5),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(Mov), R(context), R(12),
+ B(LdaTrue),
+ B(Star), R(9),
+ /* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
+ B(Star), R(15),
+ B(Mov), R(2), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(14),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(14),
B(ReThrow),
- B(Mov), R(20), R(7),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(7), U8(13),
- B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(7), U8(8), U8(15),
+ B(Ldar), R(14),
+ B(Mov), R(14), R(13),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(LdaNamedProperty), R(13), U8(6), U8(13),
+ B(JumpIfToBooleanTrue), U8(39),
+ B(LdaNamedProperty), R(13), U8(7), U8(15),
+ B(Star), R(13),
+ B(LdaFalse),
B(Star), R(9),
- B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Mov), R(13), R(3),
/* 23 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
+ /* 38 S> */ B(Mov), R(3), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
- /* 69 E> */ B(TestEqual), R(1), U8(17),
+ /* 69 E> */ B(TestEqual), R(0), U8(17),
B(JumpIfFalse), U8(4),
- /* 76 S> */ B(Jump), U8(14),
+ /* 76 S> */ B(Jump), U8(11),
/* 90 S> */ B(LdaSmi), I8(20),
- /* 96 E> */ B(TestEqual), R(1), U8(18),
+ /* 96 E> */ B(TestEqual), R(0), U8(18),
B(JumpIfFalse), U8(4),
- /* 103 S> */ B(Jump), U8(8),
- B(LdaZero),
- B(Star), R(8),
- B(JumpLoop), U8(100), I8(0),
- B(Jump), U8(37),
- B(Star), R(20),
- B(CreateCatchContext), R(20), U8(9),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(19),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ /* 103 S> */ B(Jump), U8(5),
+ B(JumpLoop), U8(93), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(17),
- B(Star), R(16),
+ B(Star), R(11),
+ B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(17),
+ B(Star), R(11),
B(LdaZero),
- B(Star), R(16),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
- B(LdaZero),
- B(TestEqualStrict), R(8), U8(20),
- B(JumpIfTrue), U8(167),
- B(LdaNamedProperty), R(5), U8(10), U8(21),
- B(Star), R(10),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(156),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(23),
- B(JumpIfFalse), U8(86),
- B(Ldar), R(10),
+ B(Star), R(12),
+ B(Ldar), R(9),
+ B(JumpIfToBooleanTrue), U8(96),
+ B(LdaNamedProperty), R(6), U8(8), U8(19),
+ B(Star), R(16),
+ B(JumpIfUndefined), U8(88),
+ B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(19),
- B(LdaConstant), U8(11),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(Star), R(17),
+ B(LdaConstant), U8(9),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(10), R(20),
- B(Mov), R(5), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(21),
- B(Mov), R(3), R(20),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_caught), R(20), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(20), U8(1),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ B(Mov), R(context), R(17),
+ B(CallProperty0), R(16), R(6), U8(21),
+ B(Star), R(19),
+ B(Mov), R(2), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(18), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(18), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(18),
+ B(Star), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(19),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(19),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(18),
B(ReThrow),
- B(Ldar), R(20),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(Jump), U8(65),
- B(Mov), R(10), R(19),
- B(Mov), R(5), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(20),
- B(Mov), R(3), R(19),
- B(Mov), R(0), R(21),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(19), U8(3),
- B(SuspendGenerator), R(3), R(0), U8(19), U8(2),
- B(ResumeGenerator), R(3), R(0), U8(19),
- B(Star), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Ldar), R(18),
+ B(JumpIfJSReceiver), U8(21),
B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(20), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(17),
B(LdaZero),
- B(TestReferenceEqual), R(20),
+ B(TestReferenceEqual), R(10),
B(JumpIfTrue), U8(5),
- B(Ldar), R(19),
+ B(Ldar), R(17),
B(ReThrow),
- B(Mov), R(19), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(18),
+ B(Ldar), R(12),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(16),
+ B(TestReferenceEqual), R(10),
B(JumpIfFalse), U8(5),
- B(Ldar), R(17),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(13),
- B(LdaZero),
- B(Star), R(12),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(16),
- B(CreateCatchContext), R(16), U8(12),
- B(Star), R(15),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 114 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(10),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
- B(Star), R(19),
- B(Mov), R(0), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
- B(PopContext), R(16),
- B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(0), R(13),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(13),
- B(Star), R(12),
- B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(2),
- B(Star), R(12),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(7),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
- B(Ldar), R(14),
- B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(15),
- B(Mov), R(13), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
- B(Ldar), R(0),
- /* 114 S> */ B(Return),
- B(Ldar), R(13),
+ B(Star), R(8),
+ B(Mov), R(2), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 114 S> */ B(Return),
- B(Ldar), R(13),
- B(ReThrow),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [110],
- Smi [324],
- Smi [381],
+ Smi [98],
+ Smi [245],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [26, 476, 484],
- [29, 436, 438],
- [35, 229, 237],
- [38, 192, 194],
- [297, 346, 348],
+ [20, 313, 315],
+ [77, 173, 181],
+ [227, 276, 278],
]
---
@@ -801,179 +545,123 @@ snippet: "
}
f();
"
-frame size: 20
+frame size: 16
parameter count: 1
-bytecode array length: 392
+bytecode array length: 265
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
B(Star), R(0),
- B(Mov), R(context), R(12),
- B(Mov), R(context), R(13),
- /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(14),
- B(Mov), R(14), R(2),
- B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(16),
- B(Mov), R(context), R(17),
+ /* 16 E> */ B(StackCheck),
+ B(Mov), R(context), R(2),
+ /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
/* 68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(18),
- B(LdaNamedProperty), R(18), U8(2), U8(2),
- B(Star), R(19),
- B(CallProperty0), R(19), R(18), U8(4),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(2), U8(2),
+ B(Star), R(6),
+ B(CallProperty0), R(6), R(5), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
- /* 68 E> */ B(LdaNamedProperty), R(3), U8(3), U8(6),
B(Star), R(4),
- /* 59 S> */ B(CallProperty0), R(4), R(3), U8(8),
- B(Star), R(5),
- /* 59 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(4), U8(10),
- B(JumpIfToBooleanTrue), U8(30),
- /* 58 E> */ B(LdaNamedProperty), R(5), U8(5), U8(12),
+ B(LdaNamedProperty), R(4), U8(3), U8(6),
+ B(Star), R(3),
+ B(LdaFalse),
B(Star), R(7),
- B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Ldar), R(7),
- B(StaNamedProperty), R(2), U8(6), U8(14),
+ B(Mov), R(context), R(10),
+ B(LdaTrue),
+ B(Star), R(7),
+ /* 59 S> */ B(CallProperty0), R(3), R(4), U8(8),
+ B(Star), R(11),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaNamedProperty), R(11), U8(4), U8(10),
+ B(JumpIfToBooleanTrue), U8(33),
+ B(LdaNamedProperty), R(11), U8(5), U8(12),
+ B(Star), R(11),
+ B(LdaFalse),
+ B(Star), R(7),
+ B(Ldar), R(11),
+ /* 58 E> */ B(StaNamedProperty), R(1), U8(6), U8(14),
/* 53 E> */ B(StackCheck),
- /* 87 S> */ B(LdaNamedProperty), R(2), U8(6), U8(16),
- B(Star), R(15),
- B(LdaZero),
- B(Star), R(14),
- B(Jump), U8(53),
- B(Jump), U8(37),
- B(Star), R(18),
- B(CreateCatchContext), R(18), U8(7),
- B(Star), R(17),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(17),
- B(PushContext), R(18),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(18),
- B(JumpIfFalse), U8(6),
+ /* 87 S> */ B(LdaNamedProperty), R(1), U8(6), U8(16),
+ B(Star), R(9),
B(LdaSmi), I8(1),
- B(Star), R(6),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
- B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
- B(PopContext), R(18),
+ B(Star), R(8),
+ B(Mov), R(1), R(12),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
- B(Star), R(15),
- B(Star), R(14),
- B(Jump), U8(8),
- B(Star), R(15),
- B(LdaSmi), I8(1),
- B(Star), R(14),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(16),
+ B(Star), R(9),
+ B(Star), R(8),
+ B(Jump), U8(7),
+ B(Star), R(9),
B(LdaZero),
- B(TestEqualStrict), R(6), U8(19),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(3), U8(8), U8(20),
B(Star), R(8),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(22),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(8),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(10),
+ B(Ldar), R(7),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(4), U8(7), U8(18),
+ B(Star), R(13),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(17),
- B(LdaConstant), U8(9),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(Star), R(14),
+ B(LdaConstant), U8(8),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
- B(Mov), R(8), R(18),
- B(Mov), R(3), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(17),
- B(Jump), U8(27),
- B(Mov), R(8), R(17),
- B(Mov), R(3), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(16),
- B(SetPendingMessage),
- B(Ldar), R(14),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Jump), U8(13),
+ B(Mov), R(context), R(14),
+ B(CallProperty0), R(13), R(4), U8(20),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(14),
B(LdaZero),
- B(Star), R(10),
- B(Mov), R(15), R(11),
- B(Jump), U8(67),
- B(Ldar), R(15),
+ B(TestReferenceEqual), R(8),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(14),
B(ReThrow),
- B(LdaUndefined),
- B(Star), R(11),
- B(LdaZero),
- B(Star), R(10),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(14),
- B(CreateCatchContext), R(14), U8(12),
- B(Star), R(13),
- B(LdaTheHole),
+ B(Ldar), R(10),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(PushContext), R(14),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+ B(Jump), U8(19),
+ B(Ldar), R(9),
+ B(ReThrow),
B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(0), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(15), U8(3),
- B(PopContext), R(14),
- B(LdaSmi), I8(1),
- B(Star), R(10),
- B(Mov), R(0), R(11),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(11),
- B(Star), R(10),
- B(Jump), U8(8),
- B(Star), R(11),
- B(LdaSmi), I8(2),
- B(Star), R(10),
+ B(Star), R(15),
+ B(Mov), R(0), R(13),
+ B(Mov), R(9), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(13), U8(3),
+ /* 96 S> */ B(Return),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaFalse),
+ B(Star), R(5),
+ B(Mov), R(0), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ /* 96 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(3),
+ B(CreateCatchContext), R(3), U8(11),
+ B(Star), R(2),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(12),
+ B(Ldar), R(2),
+ B(PushContext), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(5),
B(LdaFalse),
- B(Star), R(14),
- B(Mov), R(0), R(13),
- B(CallJSRuntime), U8(%async_function_promise_release), R(13), U8(2),
- B(Ldar), R(12),
- B(SetPendingMessage),
- B(Ldar), R(10),
- B(SwitchOnSmiNoFeedback), U8(13), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(13),
- B(Mov), R(11), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(13), U8(2),
- B(Ldar), R(0),
- /* 96 S> */ B(Return),
- B(Ldar), R(11),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 96 S> */ B(Return),
- B(Ldar), R(11),
- B(ReThrow),
B(LdaUndefined),
/* 96 S> */ B(Return),
]
@@ -985,21 +673,15 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
- Smi [14],
+ Smi [9],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [10, 332, 340],
- [13, 292, 294],
- [27, 150, 158],
- [30, 113, 115],
- [219, 229, 231],
+ [16, 233, 235],
+ [59, 112, 120],
+ [166, 179, 181],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 73b72d4d20..8c24e461cd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -79,9 +79,9 @@ bytecodes: [
B(JumpIfFalse), U8(22),
B(ForInNext), R(3), R(7), R(4), U8(0),
B(JumpIfUndefined), U8(8),
- B(Star), R(1),
- /* 54 E> */ B(StackCheck),
B(Star), R(2),
+ /* 54 E> */ B(StackCheck),
+ /* 63 S> */ B(Star), R(1),
/* 82 S> */ B(Return),
B(ForInStep), R(7),
B(Star), R(7),
@@ -119,10 +119,10 @@ bytecodes: [
B(JumpIfFalse), U8(31),
B(ForInNext), R(3), R(7), R(4), U8(0),
B(JumpIfUndefined), U8(17),
- B(Star), R(1),
- /* 45 E> */ B(StackCheck),
B(Star), R(2),
- /* 70 S> */ B(Ldar), R(1),
+ /* 45 E> */ B(StackCheck),
+ /* 54 S> */ B(Star), R(1),
+ /* 70 S> */ B(Ldar), R(2),
/* 75 E> */ B(Add), R(0), U8(2),
B(Mov), R(0), R(8),
B(Star), R(0),
@@ -148,11 +148,11 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 87
+bytecode array length: 85
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Mov), R(1), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
B(JumpIfUndefined), U8(72),
B(JumpIfNull), U8(70),
@@ -167,7 +167,7 @@ bytecodes: [
B(JumpIfUndefined), U8(41),
B(Star), R(6),
B(Ldar), R(6),
- /* 67 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
+ /* 68 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
/* 62 E> */ B(StackCheck),
/* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
B(Star), R(6),
@@ -223,7 +223,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
- /* 64 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
+ /* 65 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
/* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), I8(3),
/* 91 E> */ B(LdaKeyedProperty), R(0), U8(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index b43429e008..67f5c389e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,111 +9,83 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 245
+bytecode array length: 173
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(1), U8(1),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(3),
+ B(Star), R(4),
+ B(LdaNamedProperty), R(4), U8(1), U8(1),
+ B(Star), R(5),
+ B(CallProperty0), R(5), R(4), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(2),
- /* 48 E> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(3),
- /* 43 S> */ B(CallProperty0), R(3), R(2), U8(7),
- B(Star), R(4),
- /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(4), U8(4), U8(11),
+ B(LdaNamedProperty), R(3), U8(2), U8(5),
+ B(Star), R(2),
+ B(LdaFalse),
B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(6), R(0),
+ B(Mov), R(context), R(9),
+ B(LdaTrue),
+ B(Star), R(6),
+ /* 43 S> */ B(CallProperty0), R(2), R(3), U8(7),
+ B(Star), R(10),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(LdaNamedProperty), R(10), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(23),
+ B(LdaNamedProperty), R(10), U8(4), U8(11),
+ B(Star), R(10),
+ B(LdaFalse),
+ B(Star), R(6),
+ B(Mov), R(10), R(1),
/* 34 E> */ B(StackCheck),
- B(Mov), R(0), R(1),
- B(LdaZero),
- B(Star), R(5),
- B(JumpLoop), U8(44), I8(0),
- B(Jump), U8(33),
- B(Star), R(13),
- B(CreateCatchContext), R(13), U8(5),
- B(PushContext), R(13),
- B(Star), R(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(13),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(13),
+ /* 43 S> */ B(Mov), R(1), R(0),
+ B(Ldar), R(10),
+ B(JumpLoop), U8(40), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(10),
- B(Star), R(9),
+ B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(7),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaZero),
- B(Star), R(9),
+ B(Star), R(7),
B(LdaTheHole),
- B(SetPendingMessage),
+ /* 43 E> */ B(SetPendingMessage),
+ B(Star), R(9),
+ B(Ldar), R(6),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(3), U8(5), U8(13),
B(Star), R(11),
- B(LdaZero),
- B(TestEqualStrict), R(5), U8(14),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(2), U8(6), U8(15),
- B(Star), R(7),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(17),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(7),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(12),
- B(LdaConstant), U8(7),
+ B(LdaConstant), U8(6),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
B(Mov), R(context), R(12),
- B(Mov), R(7), R(13),
- B(Mov), R(2), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(11), R(3), U8(15),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestReferenceEqual), R(7),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(12),
- B(Jump), U8(27),
- B(Mov), R(7), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(Ldar), R(11),
+ B(ReThrow),
+ B(Ldar), R(9),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(9),
+ B(TestReferenceEqual), R(7),
B(JumpIfFalse), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(8),
B(ReThrow),
B(LdaUndefined),
/* 62 S> */ B(Return),
@@ -124,14 +96,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [7, 121, 129],
- [10, 88, 90],
- [189, 199, 201],
+ [38, 81, 89],
+ [135, 148, 150],
]
---
@@ -139,116 +109,89 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 16
+frame size: 15
parameter count: 1
-bytecode array length: 255
+bytecode array length: 184
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- B(LdaZero),
- B(Star), R(6),
- B(Mov), R(context), R(12),
- B(Mov), R(context), R(13),
/* 68 S> */ B(LdaNamedProperty), R(0), U8(1), U8(0),
- B(Star), R(15),
- B(CallProperty0), R(15), R(0), U8(2),
- B(Mov), R(0), R(14),
+ B(Star), R(6),
+ B(CallProperty0), R(6), R(0), U8(2),
+ B(Mov), R(0), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
- /* 68 E> */ B(LdaNamedProperty), R(3), U8(2), U8(4),
B(Star), R(4),
- /* 63 S> */ B(CallProperty0), R(4), R(3), U8(6),
- B(Star), R(5),
- /* 63 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(3), U8(8),
+ B(LdaNamedProperty), R(4), U8(2), U8(4),
+ B(Star), R(3),
+ B(LdaFalse),
+ B(Star), R(7),
+ B(Mov), R(context), R(10),
+ B(LdaTrue),
+ B(Star), R(7),
+ /* 63 S> */ B(CallProperty0), R(3), R(4), U8(6),
+ B(Star), R(11),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaNamedProperty), R(11), U8(3), U8(8),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(5), U8(4), U8(10),
+ B(LdaNamedProperty), R(11), U8(4), U8(10),
+ B(Star), R(11),
+ B(LdaFalse),
B(Star), R(7),
- B(LdaSmi), I8(2),
- B(Star), R(6),
- B(Mov), R(7), R(1),
+ B(Mov), R(11), R(2),
/* 54 E> */ B(StackCheck),
- B(Mov), R(1), R(2),
- /* 73 S> */ B(LdaZero),
- B(Star), R(10),
- B(Mov), R(7), R(11),
- B(Jump), U8(49),
- B(Jump), U8(33),
- B(Star), R(14),
- B(CreateCatchContext), R(14), U8(5),
- B(PushContext), R(14),
- B(Star), R(13),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(6), U8(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(6),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kReThrow), R(15), U8(1),
- B(PopContext), R(14),
+ /* 63 S> */ B(Mov), R(2), R(1),
+ /* 73 S> */ B(LdaSmi), I8(1),
+ B(Mov), R(11), R(9),
+ B(Star), R(8),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
- B(Star), R(11),
- B(Star), R(10),
- B(Jump), U8(8),
- B(Star), R(11),
- B(LdaSmi), I8(1),
- B(Star), R(10),
+ B(Star), R(9),
+ B(Star), R(8),
+ B(Jump), U8(7),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(10),
+ B(Ldar), R(7),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(4), U8(5), U8(12),
B(Star), R(12),
- B(LdaZero),
- B(TestEqualStrict), R(6), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(3), U8(6), U8(14),
- B(Star), R(8),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(6), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(8),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(13),
- B(LdaConstant), U8(7),
+ B(LdaConstant), U8(6),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
B(Mov), R(context), R(13),
- B(Mov), R(8), R(14),
- B(Mov), R(3), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(12), R(4), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(13),
+ B(LdaZero),
+ B(TestReferenceEqual), R(8),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(13),
- B(Jump), U8(27),
- B(Mov), R(8), R(13),
- B(Mov), R(3), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(Ldar), R(12),
- B(SetPendingMessage),
+ B(ReThrow),
B(Ldar), R(10),
- B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+ B(SetPendingMessage),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(11),
- /* 85 S> */ B(Return),
- B(Ldar), R(11),
+ B(Ldar), R(9),
B(ReThrow),
+ B(Ldar), R(9),
+ /* 85 S> */ B(Return),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
@@ -258,16 +201,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [11, 124, 132],
- [14, 91, 93],
- [193, 203, 205],
+ [39, 86, 94],
+ [140, 153, 155],
]
---
@@ -277,119 +218,90 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 263
+bytecode array length: 189
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(1), U8(1),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(3),
+ B(Star), R(4),
+ B(LdaNamedProperty), R(4), U8(1), U8(1),
+ B(Star), R(5),
+ B(CallProperty0), R(5), R(4), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(2),
- /* 48 E> */ B(LdaNamedProperty), R(2), U8(2), U8(5),
B(Star), R(3),
- /* 43 S> */ B(CallProperty0), R(3), R(2), U8(7),
- B(Star), R(4),
- /* 43 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(4), U8(4), U8(11),
+ B(LdaNamedProperty), R(3), U8(2), U8(5),
+ B(Star), R(2),
+ B(LdaFalse),
B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(6), R(0),
+ B(Mov), R(context), R(9),
+ B(LdaTrue),
+ B(Star), R(6),
+ /* 43 S> */ B(CallProperty0), R(2), R(3), U8(7),
+ B(Star), R(10),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(LdaNamedProperty), R(10), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(39),
+ B(LdaNamedProperty), R(10), U8(4), U8(11),
+ B(Star), R(10),
+ B(LdaFalse),
+ B(Star), R(6),
+ B(Mov), R(10), R(1),
/* 34 E> */ B(StackCheck),
- B(Mov), R(0), R(1),
+ /* 43 S> */ B(Mov), R(1), R(0),
/* 66 S> */ B(LdaSmi), I8(10),
- /* 72 E> */ B(TestEqual), R(1), U8(13),
+ /* 72 E> */ B(TestEqual), R(0), U8(13),
B(JumpIfFalse), U8(4),
- /* 79 S> */ B(Jump), U8(14),
+ /* 79 S> */ B(Jump), U8(11),
/* 91 S> */ B(LdaSmi), I8(20),
- /* 97 E> */ B(TestEqual), R(1), U8(14),
+ /* 97 E> */ B(TestEqual), R(0), U8(14),
B(JumpIfFalse), U8(4),
- /* 104 S> */ B(Jump), U8(8),
- B(LdaZero),
- B(Star), R(5),
- B(JumpLoop), U8(62), I8(0),
- B(Jump), U8(33),
- B(Star), R(13),
- B(CreateCatchContext), R(13), U8(5),
- B(PushContext), R(13),
- B(Star), R(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(15),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(13),
+ /* 104 S> */ B(Jump), U8(5),
+ B(JumpLoop), U8(56), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(10),
- B(Star), R(9),
+ B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(7),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaZero),
- B(Star), R(9),
+ B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(9),
+ B(Ldar), R(6),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(3), U8(5), U8(15),
B(Star), R(11),
- B(LdaZero),
- B(TestEqualStrict), R(5), U8(16),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(2), U8(6), U8(17),
- B(Star), R(7),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(19),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(7),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(12),
- B(LdaConstant), U8(7),
+ B(LdaConstant), U8(6),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
B(Mov), R(context), R(12),
- B(Mov), R(7), R(13),
- B(Mov), R(2), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(11), R(3), U8(17),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestReferenceEqual), R(7),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(12),
- B(Jump), U8(27),
- B(Mov), R(7), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(Ldar), R(11),
+ B(ReThrow),
+ B(Ldar), R(9),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(9),
+ B(TestReferenceEqual), R(7),
B(JumpIfFalse), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(8),
B(ReThrow),
B(LdaUndefined),
/* 113 S> */ B(Return),
@@ -400,14 +312,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [7, 139, 147],
- [10, 106, 108],
- [207, 217, 219],
+ [38, 97, 105],
+ [151, 164, 166],
]
---
@@ -417,116 +327,90 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 265
+bytecode array length: 195
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(8),
- B(Mov), R(8), R(0),
- B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(2), U8(2),
- B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(4),
+ B(Star), R(3),
+ B(LdaNamedProperty), R(3), U8(2), U8(2),
+ B(Star), R(4),
+ B(CallProperty0), R(4), R(3), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(1),
- /* 77 E> */ B(LdaNamedProperty), R(1), U8(3), U8(6),
B(Star), R(2),
- /* 68 S> */ B(CallProperty0), R(2), R(1), U8(8),
- B(Star), R(3),
- /* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(4), U8(10),
- B(JumpIfToBooleanTrue), U8(30),
- /* 67 E> */ B(LdaNamedProperty), R(3), U8(5), U8(12),
+ B(LdaNamedProperty), R(2), U8(3), U8(6),
+ B(Star), R(1),
+ B(LdaFalse),
B(Star), R(5),
- B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Ldar), R(5),
- B(StaNamedProperty), R(0), U8(6), U8(14),
+ B(Mov), R(context), R(8),
+ B(LdaTrue),
+ B(Star), R(5),
+ /* 68 S> */ B(CallProperty0), R(1), R(2), U8(8),
+ B(Star), R(9),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(LdaNamedProperty), R(9), U8(4), U8(10),
+ B(JumpIfToBooleanTrue), U8(33),
+ B(LdaNamedProperty), R(9), U8(5), U8(12),
+ B(Star), R(9),
+ B(LdaFalse),
+ B(Star), R(5),
+ B(Ldar), R(9),
+ /* 67 E> */ B(StaNamedProperty), R(0), U8(6), U8(14),
/* 62 E> */ B(StackCheck),
/* 96 S> */ B(LdaNamedProperty), R(0), U8(6), U8(16),
- B(Star), R(9),
- B(LdaZero),
- B(Star), R(8),
- B(Jump), U8(49),
- B(Jump), U8(33),
- B(Star), R(12),
- B(CreateCatchContext), R(12), U8(7),
- B(PushContext), R(12),
- B(Star), R(11),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(18),
- B(JumpIfFalse), U8(6),
+ B(Star), R(7),
B(LdaSmi), I8(1),
- B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(12),
+ B(Star), R(6),
+ B(Mov), R(0), R(10),
+ B(Jump), U8(15),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(8),
- B(Star), R(9),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(10),
+ B(Star), R(7),
+ B(Star), R(6),
+ B(Jump), U8(7),
+ B(Star), R(7),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(19),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(1), U8(8), U8(20),
B(Star), R(6),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(22),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(8),
+ B(Ldar), R(5),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(2), U8(7), U8(18),
+ B(Star), R(11),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(11),
- B(LdaConstant), U8(9),
B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(Mov), R(6), R(12),
- B(Mov), R(1), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(11),
- B(Jump), U8(27),
- B(Mov), R(6), R(11),
- B(Mov), R(1), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(10),
- B(SetPendingMessage),
+ B(Mov), R(context), R(12),
+ B(CallProperty0), R(11), R(2), U8(20),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestReferenceEqual), R(6),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(12),
+ B(ReThrow),
B(Ldar), R(8),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+ B(SetPendingMessage),
+ B(Ldar), R(6),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(9),
- /* 105 S> */ B(Return),
- B(Ldar), R(9),
+ B(Ldar), R(7),
B(ReThrow),
+ B(Ldar), R(7),
+ /* 105 S> */ B(Return),
B(LdaUndefined),
/* 105 S> */ B(Return),
]
@@ -538,15 +422,13 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [15, 134, 142],
- [18, 101, 103],
- [203, 213, 215],
+ [44, 97, 105],
+ [151, 164, 166],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 9755e0af17..33cccfc896 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -13,111 +13,83 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 17
+frame size: 16
parameter count: 2
-bytecode array length: 245
+bytecode array length: 173
bytecodes: [
/* 10 E> */ B(StackCheck),
- B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(13),
- B(Mov), R(context), R(14),
/* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(16),
- B(CallProperty0), R(16), R(arg0), U8(2),
- B(Mov), R(arg0), R(15),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(arg0), U8(2),
+ B(Mov), R(arg0), R(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- /* 34 E> */ B(LdaNamedProperty), R(4), U8(1), U8(4),
B(Star), R(5),
- /* 29 S> */ B(CallProperty0), R(5), R(4), U8(6),
- B(Star), R(6),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(6), U8(3), U8(10),
+ B(LdaNamedProperty), R(5), U8(1), U8(4),
+ B(Star), R(4),
+ B(LdaFalse),
B(Star), R(8),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Mov), R(context), R(11),
+ B(LdaTrue),
+ B(Star), R(8),
+ /* 29 S> */ B(CallProperty0), R(4), R(5), U8(6),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(2), U8(8),
+ B(JumpIfToBooleanTrue), U8(26),
+ B(LdaNamedProperty), R(12), U8(3), U8(10),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(12), R(3),
/* 20 E> */ B(StackCheck),
- B(Mov), R(3), R(1),
+ /* 29 S> */ B(Mov), R(3), R(1),
/* 49 S> */ B(Mov), R(1), R(0),
- B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(47), I8(0),
- B(Jump), U8(33),
- B(Star), R(15),
- /* 49 E> */ B(CreateCatchContext), R(15), U8(4),
- B(PushContext), R(15),
- B(Star), R(14),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(7),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
- B(PopContext), R(15),
+ B(Ldar), R(12),
+ B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
+ B(Star), R(10),
+ B(Star), R(9),
B(Jump), U8(7),
- B(Star), R(12),
+ B(Star), R(10),
B(LdaZero),
- B(Star), R(11),
+ B(Star), R(9),
B(LdaTheHole),
- B(SetPendingMessage),
+ /* 49 E> */ B(SetPendingMessage),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(4), U8(12),
B(Star), R(13),
- B(LdaZero),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(5), U8(14),
- B(Star), R(9),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(9),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
- B(LdaConstant), U8(6),
+ B(LdaConstant), U8(5),
B(Star), R(15),
B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
B(Mov), R(context), R(14),
- B(Mov), R(9), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(13), R(5), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(14),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(14),
- B(Jump), U8(27),
- B(Mov), R(9), R(14),
- B(Mov), R(4), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(13),
+ B(ReThrow),
+ B(Ldar), R(11),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(11),
+ B(TestReferenceEqual), R(9),
B(JumpIfFalse), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
/* 54 S> */ B(Return),
@@ -127,14 +99,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [7, 121, 129],
- [10, 88, 90],
- [189, 199, 201],
+ [35, 81, 89],
+ [135, 148, 150],
]
---
@@ -144,149 +114,121 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 24
+frame size: 22
parameter count: 2
-bytecode array length: 325
+bytecode array length: 254
bytecodes: [
B(CreateFunctionContext), U8(0), U8(4),
- B(PushContext), R(9),
+ B(PushContext), R(2),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(5),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
B(StaCurrentContextSlot), U8(7),
- B(Ldar), R(8),
+ B(Ldar), R(1),
B(StaCurrentContextSlot), U8(6),
/* 10 E> */ B(StackCheck),
B(CreateBlockContext), U8(1),
- B(PushContext), R(10),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaZero),
- B(Star), R(4),
- B(Mov), R(context), R(13),
- B(Mov), R(context), R(14),
- /* 34 S> */ B(LdaContextSlot), R(10), U8(4), U8(0),
- B(Star), R(15),
- B(LdaNamedProperty), R(15), U8(2), U8(0),
- B(Star), R(16),
- B(CallProperty0), R(16), R(15), U8(2),
+ /* 34 S> */ B(LdaContextSlot), R(3), U8(4), U8(0),
+ B(Star), R(6),
+ B(LdaNamedProperty), R(6), U8(2), U8(0),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(6), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(1),
- /* 34 E> */ B(LdaNamedProperty), R(1), U8(3), U8(4),
- B(Star), R(2),
- /* 29 S> */ B(CallProperty0), R(2), R(1), U8(6),
- B(Star), R(3),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(4), U8(8),
- B(JumpIfToBooleanTrue), U8(76),
- B(LdaNamedProperty), R(3), U8(5), U8(10),
B(Star), R(5),
- B(LdaSmi), I8(2),
+ B(LdaNamedProperty), R(5), U8(3), U8(4),
B(Star), R(4),
- B(Mov), R(5), R(0),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(context), R(11),
+ B(LdaTrue),
+ B(Star), R(8),
+ /* 29 S> */ B(CallProperty0), R(4), R(5), U8(6),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(4), U8(8),
+ B(JumpIfToBooleanTrue), U8(75),
+ B(LdaNamedProperty), R(12), U8(5), U8(10),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(12), R(0),
/* 20 E> */ B(StackCheck),
B(CreateBlockContext), U8(6),
- B(PushContext), R(15),
+ B(PushContext), R(13),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(4),
+ /* 29 S> */ B(Ldar), R(0),
+ /* 29 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(LdaLookupGlobalSlot), U8(7), U8(12), U8(3),
- B(Star), R(16),
+ B(Star), R(14),
B(LdaConstant), U8(8),
- B(Star), R(17),
+ B(Star), R(15),
B(LdaZero),
- B(Star), R(21),
+ B(Star), R(19),
B(LdaSmi), I8(37),
- B(Star), R(22),
+ B(Star), R(20),
B(LdaSmi), I8(41),
- B(Star), R(23),
- B(Mov), R(16), R(18),
- B(Mov), R(17), R(19),
- B(Mov), R(closure), R(20),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(18), U8(6),
- B(Star), R(16),
- /* 41 E> */ B(CallUndefinedReceiver1), R(16), R(17), U8(14),
- B(PopContext), R(15),
- B(LdaZero),
- B(Star), R(4),
- B(JumpLoop), U8(95), I8(0),
- B(Jump), U8(33),
- B(Star), R(15),
- B(CreateCatchContext), R(15), U8(9),
- B(PushContext), R(15),
+ B(Star), R(21),
+ B(Mov), R(14), R(16),
+ B(Mov), R(15), R(17),
+ B(Mov), R(closure), R(18),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(16), U8(6),
B(Star), R(14),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(16),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
- B(PopContext), R(15),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(14), R(15), U8(14),
+ B(PopContext), R(13),
+ B(Mov), R(0), R(12),
+ B(JumpLoop), U8(92), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
+ B(Star), R(10),
+ B(Star), R(9),
B(Jump), U8(7),
- B(Star), R(12),
+ B(Star), R(10),
B(LdaZero),
- B(Star), R(11),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(13),
- B(LdaZero),
- B(TestEqualStrict), R(4), U8(17),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(1), U8(10), U8(18),
- B(Star), R(6),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(20),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(6),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(9), U8(16),
+ B(Star), R(14),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(14),
- B(LdaConstant), U8(11),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(10),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
- B(Mov), R(6), R(15),
- B(Mov), R(1), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(14),
- B(Jump), U8(27),
- B(Mov), R(6), R(14),
- B(Mov), R(1), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(13),
+ B(Mov), R(context), R(15),
+ B(CallProperty0), R(14), R(5), U8(18),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(15),
+ B(ReThrow),
+ B(Ldar), R(11),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(11),
+ B(TestReferenceEqual), R(9),
B(JumpIfFalse), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(10),
B(ReThrow),
- B(PopContext), R(10),
+ B(PopContext), R(3),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
@@ -300,14 +242,12 @@ constant pool: [
SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [34, 199, 207],
- [37, 166, 168],
- [267, 277, 279],
+ [65, 160, 168],
+ [214, 227, 229],
]
---
@@ -319,117 +259,89 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 261
+bytecode array length: 190
bytecodes: [
/* 10 E> */ B(StackCheck),
- B(LdaZero),
- B(Star), R(5),
- B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
/* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(14),
- B(CallProperty0), R(14), R(arg0), U8(2),
- B(Mov), R(arg0), R(13),
+ B(Star), R(5),
+ B(CallProperty0), R(5), R(arg0), U8(2),
+ B(Mov), R(arg0), R(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(2),
- /* 34 E> */ B(LdaNamedProperty), R(2), U8(1), U8(4),
B(Star), R(3),
- /* 29 S> */ B(CallProperty0), R(3), R(2), U8(6),
- B(Star), R(4),
- /* 29 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(44),
- B(LdaNamedProperty), R(4), U8(3), U8(10),
+ B(LdaNamedProperty), R(3), U8(1), U8(4),
+ B(Star), R(2),
+ B(LdaFalse),
B(Star), R(6),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(6), R(1),
+ B(Mov), R(context), R(9),
+ B(LdaTrue),
+ B(Star), R(6),
+ /* 29 S> */ B(CallProperty0), R(2), R(3), U8(6),
+ B(Star), R(10),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(LdaNamedProperty), R(10), U8(2), U8(8),
+ B(JumpIfToBooleanTrue), U8(43),
+ B(LdaNamedProperty), R(10), U8(3), U8(10),
+ B(Star), R(10),
+ B(LdaFalse),
+ B(Star), R(6),
+ B(Mov), R(10), R(1),
/* 20 E> */ B(StackCheck),
B(CreateBlockContext), U8(4),
- B(PushContext), R(13),
+ B(PushContext), R(11),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(Ldar), R(6),
- B(StaCurrentContextSlot), U8(4),
+ /* 29 S> */ B(Ldar), R(1),
+ /* 29 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(CreateClosure), U8(5), U8(12), U8(2),
- B(Star), R(14),
- /* 67 E> */ B(CallUndefinedReceiver0), R(14), U8(13),
- B(PopContext), R(13),
- B(LdaZero),
- B(Star), R(5),
- B(JumpLoop), U8(63), I8(0),
- B(Jump), U8(33),
- B(Star), R(13),
- B(CreateCatchContext), R(13), U8(6),
- B(PushContext), R(13),
B(Star), R(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(15),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(13),
+ /* 67 E> */ B(CallUndefinedReceiver0), R(12), U8(13),
+ B(PopContext), R(11),
+ B(Mov), R(1), R(10),
+ B(JumpLoop), U8(60), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(10),
- B(Star), R(9),
+ B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(7),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaZero),
- B(Star), R(9),
+ B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
- B(LdaZero),
- B(TestEqualStrict), R(5), U8(16),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(2), U8(7), U8(17),
- B(Star), R(7),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(19),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(7),
+ B(Star), R(9),
+ B(Ldar), R(6),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(3), U8(6), U8(15),
+ B(Star), R(12),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(12),
- B(LdaConstant), U8(8),
B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(Mov), R(7), R(13),
- B(Mov), R(2), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(12),
- B(Jump), U8(27),
- B(Mov), R(7), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(Ldar), R(11),
+ B(Mov), R(context), R(13),
+ B(CallProperty0), R(12), R(3), U8(17),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(13),
+ B(LdaZero),
+ B(TestReferenceEqual), R(7),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(13),
+ B(ReThrow),
+ B(Ldar), R(9),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(9),
+ B(TestReferenceEqual), R(7),
B(JumpIfFalse), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(8),
B(ReThrow),
B(LdaUndefined),
/* 73 S> */ B(Return),
@@ -441,14 +353,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [7, 137, 145],
- [10, 104, 106],
- [205, 215, 217],
+ [35, 98, 106],
+ [152, 165, 167],
]
---
@@ -458,127 +368,92 @@ snippet: "
}
f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
"
-frame size: 20
+frame size: 18
parameter count: 2
-bytecode array length: 283
+bytecode array length: 197
bytecodes: [
/* 10 E> */ B(StackCheck),
- B(LdaZero),
- B(Star), R(10),
- B(Mov), R(context), R(16),
- B(Mov), R(context), R(17),
/* 41 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(19),
- B(CallProperty0), R(19), R(arg0), U8(2),
- B(Mov), R(arg0), R(18),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(arg0), U8(2),
+ B(Mov), R(arg0), R(8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(7),
- /* 41 E> */ B(LdaNamedProperty), R(7), U8(1), U8(4),
- B(Star), R(8),
- /* 36 S> */ B(CallProperty0), R(8), R(7), U8(6),
- B(Star), R(9),
- /* 36 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
- B(LdaNamedProperty), R(9), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(66),
- B(LdaNamedProperty), R(9), U8(3), U8(10),
- B(Star), R(11),
- B(LdaSmi), I8(2),
+ B(LdaNamedProperty), R(7), U8(1), U8(4),
+ B(Star), R(6),
+ B(LdaFalse),
+ B(Star), R(10),
+ B(Mov), R(context), R(13),
+ B(LdaTrue),
+ B(Star), R(10),
+ /* 36 S> */ B(CallProperty0), R(6), R(7), U8(6),
+ B(Star), R(14),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(LdaNamedProperty), R(14), U8(2), U8(8),
+ B(JumpIfToBooleanTrue), U8(50),
+ B(LdaNamedProperty), R(14), U8(3), U8(10),
+ B(Star), R(14),
+ B(LdaFalse),
B(Star), R(10),
- B(Mov), R(11), R(5),
+ B(Mov), R(14), R(5),
/* 20 E> */ B(StackCheck),
- B(Mov), R(5), R(6),
- B(Ldar), R(6),
- B(JumpIfUndefined), U8(6),
- B(Ldar), R(6),
- B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(81),
- B(Star), R(18),
- B(LdaConstant), U8(4),
- B(Star), R(19),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
- /* 31 E> */ B(Throw),
- /* 31 S> */ B(LdaNamedProperty), R(6), U8(4), U8(12),
+ /* 36 S> */ B(Ldar), R(14),
+ B(JumpIfNull), U8(4),
+ B(JumpIfNotUndefined), U8(7),
+ /* 29 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
+ B(Star), R(15),
+ /* 31 S> */ B(LdaNamedProperty), R(15), U8(4), U8(12),
B(Star), R(1),
- /* 34 S> */ B(LdaNamedProperty), R(6), U8(5), U8(14),
+ /* 34 S> */ B(LdaNamedProperty), R(15), U8(5), U8(14),
B(Star), R(2),
/* 56 S> */ B(Ldar), R(2),
/* 58 E> */ B(Add), R(1), U8(16),
B(Star), R(0),
- B(LdaZero),
- B(Star), R(10),
- B(JumpLoop), U8(85), I8(0),
- B(Jump), U8(33),
- B(Star), R(18),
- /* 56 E> */ B(CreateCatchContext), R(18), U8(6),
- B(PushContext), R(18),
- B(Star), R(17),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(10), U8(17),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(10),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
- B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
- B(PopContext), R(18),
+ B(JumpLoop), U8(67), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(15),
- B(Star), R(14),
+ B(Star), R(12),
+ B(Star), R(11),
B(Jump), U8(7),
- B(Star), R(15),
+ B(Star), R(12),
B(LdaZero),
- B(Star), R(14),
+ B(Star), R(11),
B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(16),
- B(LdaZero),
- B(TestEqualStrict), R(10), U8(18),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(7), U8(7), U8(19),
- B(Star), R(12),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(10), U8(21),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(12),
+ /* 56 E> */ B(SetPendingMessage),
+ B(Star), R(13),
+ B(Ldar), R(10),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(7), U8(6), U8(17),
+ B(Star), R(15),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
+ B(Star), R(16),
+ B(LdaConstant), U8(7),
B(Star), R(17),
- B(LdaConstant), U8(8),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
- B(Mov), R(12), R(18),
- B(Mov), R(7), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(17),
- B(Jump), U8(27),
- B(Mov), R(12), R(17),
- B(Mov), R(7), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
- B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(13), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Mov), R(context), R(16),
+ B(CallProperty0), R(15), R(7), U8(19),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(16),
+ B(LdaZero),
+ B(TestReferenceEqual), R(11),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(16),
+ B(ReThrow),
+ B(Ldar), R(13),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(14),
+ B(TestReferenceEqual), R(11),
B(JumpIfFalse), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(12),
B(ReThrow),
B(LdaUndefined),
/* 65 S> */ B(Return),
@@ -590,14 +465,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [7, 159, 167],
- [10, 126, 128],
- [227, 237, 239],
+ [35, 105, 113],
+ [159, 172, 174],
]
---
@@ -607,125 +480,97 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 18
+frame size: 17
parameter count: 2
-bytecode array length: 286
+bytecode array length: 214
bytecodes: [
B(SwitchOnGeneratorState), R(3), U8(0), U8(1),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(Mov), R(closure), R(5),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(3),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(12), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(12),
- B(Star), R(12),
+ /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(5), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Ldar), R(12),
+ B(Ldar), R(5),
/* 11 E> */ B(Throw),
- B(Ldar), R(12),
+ B(Ldar), R(5),
/* 55 S> */ B(Return),
- B(LdaZero),
- B(Star), R(8),
- B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
/* 35 S> */ B(LdaNamedProperty), R(arg0), U8(3), U8(0),
- B(Star), R(17),
- B(CallProperty0), R(17), R(arg0), U8(2),
- B(Mov), R(arg0), R(16),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(arg0), U8(2),
+ B(Mov), R(arg0), R(7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- /* 35 E> */ B(LdaNamedProperty), R(5), U8(4), U8(4),
B(Star), R(6),
- /* 30 S> */ B(CallProperty0), R(6), R(5), U8(6),
- B(Star), R(7),
- /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(5), U8(8),
- B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(7), U8(6), U8(10),
+ B(LdaNamedProperty), R(6), U8(4), U8(4),
+ B(Star), R(5),
+ B(LdaFalse),
B(Star), R(9),
- B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Mov), R(context), R(12),
+ B(LdaTrue),
+ B(Star), R(9),
+ /* 30 S> */ B(CallProperty0), R(5), R(6), U8(6),
+ B(Star), R(13),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(LdaNamedProperty), R(13), U8(5), U8(8),
+ B(JumpIfToBooleanTrue), U8(26),
+ B(LdaNamedProperty), R(13), U8(6), U8(10),
+ B(Star), R(13),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(Mov), R(13), R(4),
/* 21 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
+ /* 30 S> */ B(Mov), R(4), R(1),
/* 50 S> */ B(Mov), R(1), R(0),
- B(LdaZero),
- B(Star), R(8),
- B(JumpLoop), U8(47), I8(0),
- B(Jump), U8(33),
- B(Star), R(16),
- /* 50 E> */ B(CreateCatchContext), R(16), U8(7),
- B(PushContext), R(16),
- B(Star), R(15),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kReThrow), R(17), U8(1),
- B(PopContext), R(16),
+ B(Ldar), R(13),
+ B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(13),
- B(Star), R(12),
+ B(Star), R(11),
+ B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(13),
+ B(Star), R(11),
B(LdaZero),
- B(Star), R(12),
+ B(Star), R(10),
B(LdaTheHole),
- B(SetPendingMessage),
+ /* 50 E> */ B(SetPendingMessage),
+ B(Star), R(12),
+ B(Ldar), R(9),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(6), U8(7), U8(12),
B(Star), R(14),
- B(LdaZero),
- B(TestEqualStrict), R(8), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(5), U8(8), U8(14),
- B(Star), R(10),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(10),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(15),
- B(LdaConstant), U8(9),
+ B(LdaConstant), U8(8),
B(Star), R(16),
B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
B(Mov), R(context), R(15),
- B(Mov), R(10), R(16),
- B(Mov), R(5), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(14), R(6), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestReferenceEqual), R(10),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(15),
- B(Jump), U8(27),
- B(Mov), R(10), R(15),
- B(Mov), R(5), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(14),
+ B(ReThrow),
+ B(Ldar), R(12),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(12),
+ B(TestReferenceEqual), R(10),
B(JumpIfFalse), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
/* 55 S> */ B(Return),
@@ -738,14 +583,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [48, 162, 170],
- [51, 129, 131],
- [230, 240, 242],
+ [76, 122, 130],
+ [176, 189, 191],
]
---
@@ -755,166 +598,136 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 17
+frame size: 16
parameter count: 2
-bytecode array length: 330
+bytecode array length: 258
bytecodes: [
B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
- B(Mov), R(closure), R(11),
- B(Mov), R(this), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(11), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(11),
- B(Star), R(11),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(11),
+ B(Ldar), R(4),
/* 11 E> */ B(Throw),
- B(Ldar), R(11),
+ B(Ldar), R(4),
/* 49 S> */ B(Return),
- B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(13),
- B(Mov), R(context), R(14),
/* 35 S> */ B(LdaNamedProperty), R(arg0), U8(4), U8(0),
- B(Star), R(16),
- B(CallProperty0), R(16), R(arg0), U8(2),
- B(Mov), R(arg0), R(15),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(arg0), U8(2),
+ B(Mov), R(arg0), R(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- /* 35 E> */ B(LdaNamedProperty), R(4), U8(5), U8(4),
B(Star), R(5),
- /* 30 S> */ B(CallProperty0), R(5), R(4), U8(6),
- B(Star), R(6),
- /* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(6), U8(8),
- B(JumpIfToBooleanTrue), U8(65),
- B(LdaNamedProperty), R(6), U8(7), U8(10),
+ B(LdaNamedProperty), R(5), U8(5), U8(4),
+ B(Star), R(4),
+ B(LdaFalse),
B(Star), R(8),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Mov), R(context), R(11),
+ B(LdaTrue),
+ B(Star), R(8),
+ /* 30 S> */ B(CallProperty0), R(4), R(5), U8(6),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(6), U8(8),
+ B(JumpIfToBooleanTrue), U8(64),
+ B(LdaNamedProperty), R(12), U8(7), U8(10),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(12), R(3),
/* 21 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ /* 30 S> */ B(Mov), R(3), R(0),
/* 40 S> */ B(LdaFalse),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
- /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(15),
- B(Star), R(15),
+ B(Star), R(14),
+ B(Mov), R(0), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
+ /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(13),
+ B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
- B(Ldar), R(15),
+ B(Ldar), R(13),
/* 40 E> */ B(Throw),
- B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
- B(Jump), U8(55),
- B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(84), I8(0),
- B(Jump), U8(33),
- B(Star), R(15),
- B(CreateCatchContext), R(15), U8(10),
- B(PushContext), R(15),
- B(Star), R(14),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(12),
- B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
- B(PopContext), R(15),
+ B(Star), R(9),
+ B(Mov), R(13), R(10),
+ B(Jump), U8(20),
+ B(Ldar), R(13),
+ B(JumpLoop), U8(81), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
- B(Jump), U8(8),
- B(Star), R(12),
- B(LdaSmi), I8(1),
- B(Star), R(11),
+ B(Star), R(10),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(10), U8(12),
B(Star), R(13),
- B(LdaZero),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(11), U8(14),
- B(Star), R(9),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(9),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
- B(LdaConstant), U8(12),
+ B(LdaConstant), U8(11),
B(Star), R(15),
B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
B(Mov), R(context), R(14),
- B(Mov), R(9), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(13), R(5), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(14),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(14),
- B(Jump), U8(27),
- B(Mov), R(9), R(14),
- B(Mov), R(4), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(13),
- B(SetPendingMessage),
+ B(ReThrow),
B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
+ B(SetPendingMessage),
+ B(Ldar), R(9),
+ B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(12),
- /* 49 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(10),
B(ReThrow),
+ B(Ldar), R(10),
+ /* 49 S> */ B(Return),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
constant pool: [
Smi [22],
- Smi [135],
+ Smi [129],
Smi [10],
Smi [7],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [15],
+ Smi [16],
Smi [7],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [48, 199, 207],
- [51, 166, 168],
- [268, 278, 280],
+ [76, 160, 168],
+ [214, 227, 229],
]
---
@@ -924,171 +737,111 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 23
+frame size: 18
parameter count: 2
-bytecode array length: 367
+bytecode array length: 232
bytecodes: [
+ B(Mov), R(closure), R(5),
+ B(Mov), R(this), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(5), U8(2),
+ B(Star), R(3),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(15),
- B(Mov), R(context), R(16),
- B(LdaZero),
- B(Star), R(9),
- B(Mov), R(context), R(19),
- B(Mov), R(context), R(20),
+ B(Mov), R(context), R(5),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(22),
- B(CallProperty0), R(22), R(arg0), U8(2),
- B(Mov), R(arg0), R(21),
+ B(Star), R(9),
+ B(CallProperty0), R(9), R(arg0), U8(2),
+ B(Mov), R(arg0), R(8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(6),
- /* 40 E> */ B(LdaNamedProperty), R(6), U8(1), U8(4),
B(Star), R(7),
- /* 35 S> */ B(CallProperty0), R(7), R(6), U8(6),
- B(Star), R(8),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(28),
- B(LdaNamedProperty), R(8), U8(3), U8(10),
+ B(LdaNamedProperty), R(7), U8(1), U8(4),
+ B(Star), R(6),
+ B(LdaFalse),
B(Star), R(10),
- B(LdaSmi), I8(2),
- B(Star), R(9),
- B(Mov), R(10), R(5),
+ B(Mov), R(context), R(13),
+ B(LdaTrue),
+ B(Star), R(10),
+ /* 35 S> */ B(CallProperty0), R(6), R(7), U8(6),
+ B(Star), R(14),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(LdaNamedProperty), R(14), U8(2), U8(8),
+ B(JumpIfToBooleanTrue), U8(26),
+ B(LdaNamedProperty), R(14), U8(3), U8(10),
+ B(Star), R(14),
+ B(LdaFalse),
+ B(Star), R(10),
+ B(Mov), R(14), R(4),
/* 26 E> */ B(StackCheck),
- B(Mov), R(5), R(2),
- /* 55 S> */ B(Mov), R(2), R(1),
- B(LdaZero),
- B(Star), R(9),
- B(JumpLoop), U8(47), I8(0),
- B(Jump), U8(37),
- B(Star), R(21),
- /* 55 E> */ B(CreateCatchContext), R(21), U8(4),
- B(Star), R(20),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(9), U8(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(9),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ /* 35 S> */ B(Mov), R(4), R(1),
+ /* 55 S> */ B(Mov), R(1), R(0),
+ B(Ldar), R(14),
+ B(JumpLoop), U8(43), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(18),
- B(Star), R(17),
+ B(Star), R(12),
+ B(Star), R(11),
B(Jump), U8(7),
- B(Star), R(18),
- B(LdaZero),
- B(Star), R(17),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(9), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(6), U8(5), U8(14),
B(Star), R(11),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(9), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(11),
+ B(LdaTheHole),
+ /* 55 E> */ B(SetPendingMessage),
+ B(Star), R(13),
+ B(Ldar), R(10),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(7), U8(4), U8(12),
+ B(Star), R(15),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(20),
- B(LdaConstant), U8(6),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(Star), R(16),
+ B(LdaConstant), U8(5),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(11), R(21),
- B(Mov), R(6), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(20),
- B(Jump), U8(27),
- B(Mov), R(11), R(20),
- B(Mov), R(6), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(19),
+ B(Mov), R(context), R(16),
+ B(CallProperty0), R(15), R(7), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(16),
+ B(LdaZero),
+ B(TestReferenceEqual), R(11),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(16),
+ B(ReThrow),
+ B(Ldar), R(13),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(17),
+ B(TestReferenceEqual), R(11),
B(JumpIfFalse), U8(5),
- B(Ldar), R(18),
+ B(Ldar), R(12),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(14),
- B(LdaZero),
- B(Star), R(13),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(17),
- B(CreateCatchContext), R(17), U8(7),
- B(Star), R(16),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(7),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(0), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(18), U8(3),
- B(PopContext), R(17),
- B(LdaSmi), I8(1),
- B(Star), R(13),
- B(Mov), R(0), R(14),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(14),
- B(Star), R(13),
- B(Jump), U8(8),
- B(Star), R(14),
- B(LdaSmi), I8(2),
- B(Star), R(13),
+ B(Star), R(8),
+ B(Mov), R(3), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
+ /* 60 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(6),
+ B(CreateCatchContext), R(6), U8(6),
+ B(Star), R(5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Ldar), R(5),
+ B(PushContext), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(8),
B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(0), R(16),
- B(CallJSRuntime), U8(%async_function_promise_release), R(16), U8(2),
- B(Ldar), R(15),
- B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(8), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(16),
- B(Mov), R(14), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
- B(Ldar), R(0),
- /* 60 S> */ B(Return),
- B(Ldar), R(14),
+ B(Star), R(9),
+ B(Mov), R(3), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
/* 60 S> */ B(Return),
- B(Ldar), R(14),
- B(ReThrow),
B(LdaUndefined),
/* 60 S> */ B(Return),
]
@@ -1097,20 +850,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [10, 307, 315],
- [13, 267, 269],
- [19, 137, 145],
- [22, 100, 102],
- [205, 215, 217],
+ [16, 200, 202],
+ [50, 96, 104],
+ [150, 163, 165],
]
---
@@ -1120,211 +867,140 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 23
+frame size: 17
parameter count: 2
-bytecode array length: 418
+bytecode array length: 268
bytecodes: [
- B(SwitchOnGeneratorState), R(3), U8(0), U8(1),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
- B(LdaZero),
- B(Star), R(8),
- B(Mov), R(context), R(18),
- B(Mov), R(context), R(19),
+ B(Mov), R(context), R(4),
/* 40 S> */ B(LdaNamedProperty), R(arg0), U8(1), U8(0),
- B(Star), R(21),
- B(CallProperty0), R(21), R(arg0), U8(2),
- B(Mov), R(arg0), R(20),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(arg0), U8(2),
+ B(Mov), R(arg0), R(7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
- /* 40 E> */ B(LdaNamedProperty), R(5), U8(2), U8(4),
B(Star), R(6),
- /* 35 S> */ B(CallProperty0), R(6), R(5), U8(6),
- B(Star), R(7),
- /* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(LdaNamedProperty), R(7), U8(3), U8(8),
- B(JumpIfToBooleanTrue), U8(63),
- B(LdaNamedProperty), R(7), U8(4), U8(10),
+ B(LdaNamedProperty), R(6), U8(2), U8(4),
+ B(Star), R(5),
+ B(LdaFalse),
B(Star), R(9),
- B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Mov), R(9), R(4),
+ B(Mov), R(context), R(12),
+ B(LdaTrue),
+ B(Star), R(9),
+ /* 35 S> */ B(CallProperty0), R(5), R(6), U8(6),
+ B(Star), R(13),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(LdaNamedProperty), R(13), U8(3), U8(8),
+ B(JumpIfToBooleanTrue), U8(58),
+ B(LdaNamedProperty), R(13), U8(4), U8(10),
+ B(Star), R(13),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(Mov), R(13), R(3),
/* 26 E> */ B(StackCheck),
- B(Mov), R(4), R(1),
- /* 45 S> */ B(Mov), R(3), R(20),
- B(Mov), R(1), R(21),
- B(Mov), R(0), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- /* 45 E> */ B(SuspendGenerator), R(3), R(0), U8(20), U8(0),
- B(ResumeGenerator), R(3), R(0), U8(20),
- B(Star), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
- B(Star), R(21),
+ /* 35 S> */ B(Mov), R(3), R(0),
+ /* 45 S> */ B(Mov), R(2), R(14),
+ B(Mov), R(0), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(14), U8(2),
+ /* 45 E> */ B(SuspendGenerator), R(2), R(0), U8(14), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(14),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(21),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(20),
+ B(Ldar), R(14),
B(ReThrow),
- B(LdaZero),
- B(Star), R(8),
- B(JumpLoop), U8(82), I8(0),
- B(Jump), U8(37),
- B(Star), R(20),
- B(CreateCatchContext), R(20), U8(5),
- B(Star), R(19),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(8), U8(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), I8(1),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ B(Ldar), R(14),
+ B(JumpLoop), U8(75), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(17),
- B(Star), R(16),
+ B(Star), R(11),
+ B(Star), R(10),
B(Jump), U8(7),
- B(Star), R(17),
+ B(Star), R(11),
B(LdaZero),
- B(Star), R(16),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
- B(LdaZero),
- B(TestEqualStrict), R(8), U8(13),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(5), U8(6), U8(14),
- B(Star), R(10),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(8), U8(16),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(10),
+ B(Star), R(12),
+ B(Ldar), R(9),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(6), U8(5), U8(12),
+ B(Star), R(14),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
- B(Star), R(19),
- B(LdaConstant), U8(7),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(Star), R(15),
+ B(LdaConstant), U8(6),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(10), R(20),
- B(Mov), R(5), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(19),
- B(Jump), U8(27),
- B(Mov), R(10), R(19),
- B(Mov), R(5), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(18),
+ B(Mov), R(context), R(15),
+ B(CallProperty0), R(14), R(6), U8(14),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestReferenceEqual), R(10),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(15),
+ B(ReThrow),
+ B(Ldar), R(12),
B(SetPendingMessage),
B(LdaZero),
- B(TestReferenceEqual), R(16),
+ B(TestReferenceEqual), R(10),
B(JumpIfFalse), U8(5),
- B(Ldar), R(17),
+ B(Ldar), R(11),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(13),
- B(LdaZero),
- B(Star), R(12),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(16),
- B(CreateCatchContext), R(16), U8(8),
- B(Star), R(15),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ /* 54 S> */ B(Return),
+ B(Jump), U8(30),
+ B(Star), R(5),
+ B(CreateCatchContext), R(5), U8(7),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(LdaFalse),
- B(Star), R(19),
- B(Mov), R(0), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
- B(PopContext), R(16),
- B(LdaSmi), I8(1),
- B(Star), R(12),
- B(Mov), R(0), R(13),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(13),
- B(Star), R(12),
- B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(2),
- B(Star), R(12),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(7),
B(LdaTrue),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(CallJSRuntime), U8(%async_function_promise_release), R(15), U8(2),
- B(Ldar), R(14),
- B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(9), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(15),
- B(Mov), R(13), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
- B(Ldar), R(0),
- /* 54 S> */ B(Return),
- B(Ldar), R(13),
+ B(Star), R(8),
+ B(Mov), R(2), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 54 S> */ B(Return),
- B(Ldar), R(13),
- B(ReThrow),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [125],
+ Smi [107],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [26, 358, 366],
- [29, 318, 320],
- [35, 188, 196],
- [38, 151, 153],
- [256, 266, 268],
+ [20, 236, 238],
+ [54, 132, 140],
+ [186, 199, 201],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
index d5f7a8f5bf..02f8f49ece 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
@@ -12,13 +12,13 @@ snippet: "
if (obj_a == null) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -42,13 +42,13 @@ snippet: "
if (obj_a == undefined) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -72,13 +72,13 @@ snippet: "
if (obj_a != null) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -102,13 +102,13 @@ snippet: "
if (obj_a != undefined) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -132,13 +132,13 @@ snippet: "
if (obj_a === null) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -161,13 +161,13 @@ snippet: "
if (obj_a === undefined) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -190,13 +190,13 @@ snippet: "
if (obj_a !== null) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
@@ -219,13 +219,13 @@ snippet: "
if (obj_a !== undefined) { b = 20;}
return b;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(2),
- B(Mov), R(2), R(0),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 67 S> */ B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index df054bd5b2..f6520129c1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -98,149 +98,121 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 17
+frame size: 16
parameter count: 1
-bytecode array length: 333
+bytecode array length: 261
bytecodes: [
B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
- B(Mov), R(closure), R(11),
- B(Mov), R(this), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
+ B(Mov), R(closure), R(4),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(11), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(11),
- B(Star), R(11),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(11),
+ B(Ldar), R(4),
/* 11 E> */ B(Throw),
- B(Ldar), R(11),
+ B(Ldar), R(4),
/* 44 S> */ B(Return),
- B(LdaZero),
- B(Star), R(7),
- B(Mov), R(context), R(13),
- B(Mov), R(context), R(14),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
- B(Star), R(15),
- B(LdaNamedProperty), R(15), U8(5), U8(1),
- B(Star), R(16),
- B(CallProperty0), R(16), R(15), U8(3),
+ B(Star), R(6),
+ B(LdaNamedProperty), R(6), U8(5), U8(1),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(6), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- /* 30 E> */ B(LdaNamedProperty), R(4), U8(6), U8(5),
B(Star), R(5),
- /* 25 S> */ B(CallProperty0), R(5), R(4), U8(7),
- B(Star), R(6),
- /* 25 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
- B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(7), U8(9),
- B(JumpIfToBooleanTrue), U8(65),
- B(LdaNamedProperty), R(6), U8(8), U8(11),
+ B(LdaNamedProperty), R(5), U8(6), U8(5),
+ B(Star), R(4),
+ B(LdaFalse),
B(Star), R(8),
- B(LdaSmi), I8(2),
- B(Star), R(7),
- B(Mov), R(8), R(3),
+ B(Mov), R(context), R(11),
+ B(LdaTrue),
+ B(Star), R(8),
+ /* 25 S> */ B(CallProperty0), R(4), R(5), U8(7),
+ B(Star), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(7), U8(9),
+ B(JumpIfToBooleanTrue), U8(64),
+ B(LdaNamedProperty), R(12), U8(8), U8(11),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(8),
+ B(Mov), R(12), R(3),
/* 16 E> */ B(StackCheck),
- B(Mov), R(3), R(0),
+ /* 25 S> */ B(Mov), R(3), R(0),
/* 36 S> */ B(LdaFalse),
- B(Star), R(16),
- B(Mov), R(0), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
- /* 36 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(1),
- B(ResumeGenerator), R(2), R(0), U8(15),
- B(Star), R(15),
+ B(Star), R(14),
+ B(Mov), R(0), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
+ /* 36 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(13),
+ B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
- B(Ldar), R(15),
+ B(Ldar), R(13),
/* 36 E> */ B(Throw),
- B(LdaZero),
- B(Star), R(11),
- B(Mov), R(15), R(12),
- B(Jump), U8(55),
- B(LdaZero),
- B(Star), R(7),
- B(JumpLoop), U8(84), I8(0),
- B(Jump), U8(33),
- B(Star), R(15),
- B(CreateCatchContext), R(15), U8(11),
- B(PushContext), R(15),
- B(Star), R(14),
- B(LdaSmi), I8(2),
- B(TestEqualStrict), R(7), U8(13),
- B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
- B(Star), R(7),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
- B(PopContext), R(15),
+ B(Star), R(9),
+ B(Mov), R(13), R(10),
+ B(Jump), U8(20),
+ B(Ldar), R(13),
+ B(JumpLoop), U8(81), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
- B(Jump), U8(8),
- B(Star), R(12),
- B(LdaSmi), I8(1),
- B(Star), R(11),
+ B(Star), R(10),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(11),
+ B(Ldar), R(8),
+ B(JumpIfToBooleanTrue), U8(60),
+ B(LdaNamedProperty), R(5), U8(11), U8(13),
B(Star), R(13),
- B(LdaZero),
- B(TestEqualStrict), R(7), U8(14),
- B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(12), U8(15),
- B(Star), R(9),
- B(TestUndetectable),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(79),
- B(LdaSmi), I8(1),
- B(TestEqualStrict), R(7), U8(17),
- B(JumpIfFalse), U8(47),
- B(Ldar), R(9),
+ B(JumpIfUndefined), U8(52),
+ B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
+ B(JumpIfTrue), U8(18),
B(Wide), B(LdaSmi), I16(154),
B(Star), R(14),
- B(LdaConstant), U8(13),
+ B(LdaConstant), U8(12),
B(Star), R(15),
B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
B(Mov), R(context), R(14),
- B(Mov), R(9), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
+ B(CallProperty0), R(13), R(5), U8(15),
+ B(JumpIfJSReceiver), U8(21),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
+ B(Jump), U8(12),
+ B(Star), R(14),
+ B(LdaZero),
+ B(TestReferenceEqual), R(9),
+ B(JumpIfTrue), U8(5),
B(Ldar), R(14),
- B(Jump), U8(27),
- B(Mov), R(9), R(14),
- B(Mov), R(4), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Star), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(13),
- B(SetPendingMessage),
+ B(ReThrow),
B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(SetPendingMessage),
+ B(Ldar), R(9),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(12),
- /* 44 S> */ B(Return),
- B(Ldar), R(12),
+ B(Ldar), R(10),
B(ReThrow),
+ B(Ldar), R(10),
+ /* 44 S> */ B(Return),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
constant pool: [
Smi [22],
- Smi [138],
+ Smi [132],
Smi [10],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -248,18 +220,16 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [15],
+ Smi [16],
Smi [7],
- SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [48, 202, 210],
- [51, 169, 171],
- [271, 281, 283],
+ [79, 163, 171],
+ [217, 230, 232],
]
---
@@ -288,8 +258,8 @@ bytecodes: [
B(Ldar), R(1),
/* 54 S> */ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
- B(Star), R(8),
- /* 50 E> */ B(CallUndefinedReceiver0), R(8), U8(2),
+ B(Star), R(5),
+ /* 50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
B(Star), R(6),
B(LdaNamedProperty), R(6), U8(5), U8(4),
B(Star), R(7),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
index a9f03b2c28..9bffe3bf79 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
@@ -565,9 +565,9 @@ snippet: "
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 46
+bytecode array length: 43
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
@@ -577,17 +577,15 @@ bytecodes: [
/* 111 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star), R(1),
B(LdaSmi), I8(3),
- /* 115 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(1), U8(0),
+ /* 115 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
/* 130 S> */ B(LdaGlobal), U8(0), U8(2),
B(Star), R(1),
B(LdaSmi), I8(4),
- /* 134 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
- /* 149 S> */ B(LdaUndefined),
- B(Star), R(2),
- B(LdaGlobal), U8(3), U8(4),
+ /* 134 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
+ /* 149 S> */ B(LdaGlobal), U8(3), U8(8),
B(Star), R(1),
- /* 149 E> */ B(CallNoFeedback), R(1), R(2), U8(1),
- /* 182 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
+ /* 149 E> */ B(CallUndefinedReceiver0), R(1), U8(10),
+ /* 182 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
/* 189 S> */ B(Return),
]
constant pool: [
@@ -649,3 +647,164 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+
+ var f = function(l) { l.a = 3; return l; };
+ f({});
+ f;
+
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ /* 25 E> */ B(StackCheck),
+ /* 32 S> */ B(LdaSmi), I8(3),
+ /* 36 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 41 S> */ B(Ldar), R(arg0),
+ /* 50 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ var f = (function(l) { l.a = 3; return l; });
+ f;
+
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ /* 26 E> */ B(StackCheck),
+ /* 33 S> */ B(LdaSmi), I8(3),
+ /* 37 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 42 S> */ B(Ldar), R(arg0),
+ /* 51 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ var f = (function foo(l) { l.a = 3; return l; });
+ f;
+
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 37 S> */ B(LdaSmi), I8(3),
+ /* 41 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 46 S> */ B(Ldar), R(arg0),
+ /* 55 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ var f = function foo(l) { l.a = 3; return l; };
+ f({});
+ f;
+
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ /* 29 E> */ B(StackCheck),
+ /* 36 S> */ B(LdaSmi), I8(3),
+ /* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
+ /* 45 S> */ B(Ldar), R(arg0),
+ /* 54 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {};
+ var f = (function foo(l) { l.a = 3; return arguments.callee; })(l);
+ f;
+
+"
+frame size: 3
+parameter count: 2
+bytecode array length: 27
+bytecodes: [
+ B(CreateFunctionContext), U8(0), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 46 E> */ B(StackCheck),
+ /* 53 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(2),
+ B(LdaSmi), I8(3),
+ /* 57 E> */ B(StaNamedPropertyNoFeedback), R(2), U8(1), U8(0),
+ /* 79 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
+ /* 86 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ var f = (function foo(l) { l.a = 3; return arguments.callee; })({});
+ f;
+
+"
+frame size: 3
+parameter count: 2
+bytecode array length: 27
+bytecodes: [
+ B(CreateFunctionContext), U8(0), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 37 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(2),
+ B(LdaSmi), I8(3),
+ /* 41 E> */ B(StaNamedPropertyNoFeedback), R(2), U8(1), U8(0),
+ /* 63 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
+ /* 70 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
index efe9078eea..54a3fa4917 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
@@ -68,13 +68,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 68
+bytecode array length: 65
bytecodes: [
B(CreateMappedArguments),
B(Star), R(0),
/* 16 E> */ B(StackCheck),
- /* 29 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Ldar), R(1),
+ /* 29 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 31 E> */ B(StaGlobal), U8(1), U8(1),
/* 93 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 056f9d7b84..aa89a500db 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -10,30 +10,34 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(...[1, 2, 3]);
"
-frame size: 6
+frame size: 7
parameter count: 1
-bytecode array length: 45
+bytecode array length: 51
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(6),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
- /* 89 S> */ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
+ /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(3),
B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(2), R(3), U8(1), U8(2),
+ /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(1), U8(2),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -46,32 +50,36 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3]);
"
-frame size: 6
+frame size: 7
parameter count: 1
-bytecode array length: 48
+bytecode array length: 54
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(6),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
/* 89 S> */ B(LdaZero),
B(Star), R(3),
- B(CreateArrayLiteral), U8(2), U8(1), U8(37),
+ B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(4),
B(Ldar), R(1),
- /* 89 E> */ B(ConstructWithSpread), R(2), R(3), U8(2), U8(2),
+ /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(2), U8(2),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -86,55 +94,59 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 124
+bytecode array length: 131
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(5),
- B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Mov), R(2), R(4),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(6),
+ B(CreateClosure), U8(2), U8(0), U8(2),
B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
+ B(PopContext), R(2),
B(Mov), R(0), R(1),
- /* 89 S> */ B(CreateArrayLiteral), U8(2), U8(1), U8(37),
+ /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(1), U8(37),
B(Star), R(4),
- B(LdaConstant), U8(3),
+ B(LdaConstant), U8(4),
B(Star), R(3),
- /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(5), U8(37),
+ /* 101 S> */ B(CreateArrayLiteral), U8(5), U8(2), U8(37),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(6), U8(3),
B(Star), R(8),
- B(LdaNamedProperty), R(8), U8(5), U8(6),
- B(Star), R(9),
- B(CallProperty0), R(9), R(8), U8(8),
+ B(CallProperty0), R(8), R(7), U8(5),
+ B(Mov), R(5), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(6), U8(10),
B(Star), R(6),
- B(CallProperty0), R(6), R(7), U8(12),
+ B(LdaNamedProperty), R(6), U8(7), U8(7),
B(Star), R(5),
+ B(CallProperty0), R(5), R(6), U8(16),
+ B(Star), R(9),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
- B(LdaNamedProperty), R(5), U8(7), U8(14),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(5), U8(8), U8(16),
- B(Star), R(5),
- B(StaInArrayLiteral), R(4), R(3), U8(3),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(LdaNamedProperty), R(9), U8(8), U8(18),
+ B(JumpIfToBooleanTrue), U8(19),
+ B(LdaNamedProperty), R(9), U8(9), U8(9),
+ B(StaInArrayLiteral), R(4), R(3), U8(14),
B(Ldar), R(3),
- B(Inc), U8(2),
+ B(Inc), U8(13),
B(Star), R(3),
- B(JumpLoop), U8(35), I8(0),
+ B(JumpLoop), U8(33), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(4), R(3), U8(3),
+ B(StaInArrayLiteral), R(4), R(3), U8(14),
B(Mov), R(4), R(3),
B(CallJSRuntime), U8(%reflect_construct), R(2), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
constant pool: [
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index a1f4d78f7c..9070a36805 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -26,13 +26,12 @@ handlers: [
snippet: "
return { name: 'string', val: 9.2 };
"
-frame size: 1
+frame size: 0
parameter count: 1
-bytecode array length: 9
+bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
- B(Ldar), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 70 S> */ B(Return),
]
constant pool: [
@@ -47,12 +46,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 20
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 75 E> */ B(StaNamedOwnProperty), R(1), U8(1), U8(1),
B(Ldar), R(1),
/* 79 S> */ B(Return),
@@ -70,12 +71,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 20
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 69 E> */ B(AddSmi), I8(1), U8(1),
B(StaNamedOwnProperty), R(1), U8(1), U8(2),
B(Ldar), R(1),
@@ -94,10 +97,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 49 E> */ B(CreateClosure), U8(1), U8(1), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(2),
B(Ldar), R(0),
@@ -117,10 +121,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
/* 43 E> */ B(CreateClosure), U8(1), U8(1), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(2),
B(Ldar), R(0),
@@ -140,10 +145,11 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
B(CreateClosure), U8(2), U8(1), U8(2),
@@ -171,10 +177,11 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 36
+bytecode array length: 37
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
B(CreateClosure), U8(2), U8(1), U8(2),
@@ -203,10 +210,11 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
B(LdaNull),
@@ -234,12 +242,13 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(Star), R(1),
B(LdaSmi), I8(1),
B(Star), R(3),
B(LdaZero),
@@ -260,13 +269,12 @@ handlers: [
snippet: "
return { __proto__: null };
"
-frame size: 1
+frame size: 0
parameter count: 1
-bytecode array length: 9
+bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(57), R(0),
- B(Ldar), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(57),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -281,12 +289,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
/* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
@@ -306,12 +316,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 31
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(1),
B(Ldar), R(0),
/* 68 E> */ B(ToName), R(2),
@@ -334,12 +346,14 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 33
+bytecode array length: 36
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
/* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
@@ -363,12 +377,14 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 61
+bytecode array length: 64
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
+ B(Star), R(1),
+ B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaConstant), U8(2),
/* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index f19a0f0c73..71a7119326 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -23,9 +23,9 @@ snippet: "
new B;
}
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 127
+bytecode array length: 143
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
@@ -38,36 +38,44 @@ bytecodes: [
B(Star), R(5),
B(LdaConstant), U8(1),
B(Star), R(6),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(9),
+ B(LdaConstant), U8(3),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(9), U8(1),
B(StaCurrentContextSlot), U8(4),
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
B(Star), R(6),
B(Mov), R(7), R(1),
- B(CreateClosure), U8(3), U8(1), U8(2),
+ B(CreateClosure), U8(4), U8(1), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(4), U8(2),
+ B(StaNamedProperty), R(5), U8(5), U8(2),
B(PopContext), R(4),
B(Mov), R(1), R(2),
- /* 38 E> */ B(CreateBlockContext), U8(5),
+ /* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(Star), R(8),
- B(CreateClosure), U8(7), U8(4), U8(2),
+ B(CreateClosure), U8(8), U8(4), U8(2),
B(Star), R(5),
- B(LdaConstant), U8(6),
+ B(LdaConstant), U8(7),
B(Star), R(6),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(9),
+ B(LdaConstant), U8(3),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(9), U8(1),
B(StaCurrentContextSlot), U8(4),
B(Mov), R(5), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
B(Star), R(6),
B(Mov), R(7), R(0),
- B(CreateClosure), U8(8), U8(5), U8(2),
+ B(CreateClosure), U8(9), U8(5), U8(2),
B(Star), R(7),
- B(StaNamedProperty), R(5), U8(4), U8(6),
+ B(StaNamedProperty), R(5), U8(5), U8(6),
B(PopContext), R(4),
B(Mov), R(0), R(3),
/* 136 S> */ B(Ldar), R(1),
@@ -81,6 +89,7 @@ constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SCOPE_INFO_TYPE,
@@ -128,7 +137,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 257
+bytecode array length: 289
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
@@ -148,19 +157,23 @@ bytecodes: [
B(Star), R(7),
B(LdaConstant), U8(1),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
B(StaCurrentContextSlot), U8(4),
B(Mov), R(7), R(9),
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
B(Star), R(8),
B(Mov), R(9), R(2),
- B(CreateClosure), U8(5), U8(2), U8(2),
+ B(CreateClosure), U8(6), U8(2), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(6), U8(3),
+ B(StaNamedProperty), R(7), U8(7), U8(3),
B(PopContext), R(6),
B(Mov), R(2), R(3),
- /* 38 E> */ B(CreateBlockContext), U8(7),
+ /* 38 E> */ B(CreateBlockContext), U8(8),
B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
@@ -168,53 +181,65 @@ bytecodes: [
B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(14),
- B(CreateClosure), U8(10), U8(5), U8(2),
+ B(CreateClosure), U8(11), U8(5), U8(2),
B(Star), R(11),
- B(LdaConstant), U8(9),
+ B(LdaConstant), U8(10),
B(Star), R(12),
B(Mov), R(11), R(13),
B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
B(Star), R(12),
- B(CreateClosure), U8(11), U8(6), U8(2),
+ B(CreateClosure), U8(12), U8(6), U8(2),
B(Star), R(7),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
B(StaCurrentContextSlot), U8(4),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(11),
+ B(LdaConstant), U8(13),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
B(StaCurrentContextSlot), U8(5),
- B(CreateClosure), U8(12), U8(7), U8(2),
+ B(CreateClosure), U8(14), U8(7), U8(2),
B(Star), R(11),
- B(CreateClosure), U8(13), U8(8), U8(2),
+ B(CreateClosure), U8(15), U8(8), U8(2),
B(Star), R(12),
B(Mov), R(7), R(9),
B(Mov), R(13), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
B(Star), R(8),
B(Mov), R(9), R(1),
- B(CreateClosure), U8(14), U8(9), U8(2),
+ B(CreateClosure), U8(16), U8(9), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(6), U8(10),
+ B(StaNamedProperty), R(7), U8(7), U8(10),
B(PopContext), R(6),
B(Mov), R(1), R(4),
- /* 140 E> */ B(CreateBlockContext), U8(15),
+ /* 140 E> */ B(CreateBlockContext), U8(17),
B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- /* 356 E> */ B(CreateClosure), U8(17), U8(12), U8(2),
+ /* 356 E> */ B(CreateClosure), U8(19), U8(12), U8(2),
B(Star), R(7),
- B(LdaConstant), U8(16),
+ B(LdaConstant), U8(18),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(LdaConstant), U8(5),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(11), U8(1),
B(StaCurrentContextSlot), U8(4),
B(Mov), R(7), R(9),
B(Mov), R(1), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
B(Star), R(8),
B(Mov), R(9), R(0),
- B(CreateClosure), U8(18), U8(13), U8(2),
+ B(CreateClosure), U8(20), U8(13), U8(2),
B(Star), R(9),
- B(StaNamedProperty), R(7), U8(6), U8(14),
+ B(StaNamedProperty), R(7), U8(7), U8(14),
B(PopContext), R(6),
B(Mov), R(0), R(5),
/* 430 S> */ B(Ldar), R(2),
@@ -232,6 +257,7 @@ constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#a"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SCOPE_INFO_TYPE,
@@ -239,6 +265,7 @@ constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["#b"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
index 4b249ea15f..fe75e8a344 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
@@ -21,11 +21,10 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 74
+bytecode array length: 71
bytecodes: [
/* 0 E> */ B(StackCheck),
- /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Ldar), R(1),
+ /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
B(Star), R(1),
@@ -77,11 +76,10 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 70
+bytecode array length: 67
bytecodes: [
/* 0 E> */ B(StackCheck),
- /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
- B(Ldar), R(1),
+ /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 65 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 849f7beba3..34e5de5443 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -218,26 +218,19 @@ snippet: "
}
f();
"
-frame size: 6
+frame size: 4
parameter count: 1
-bytecode array length: 68
+bytecode array length: 53
bytecodes: [
/* 10 E> */ B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(4),
- B(Mov), R(4), R(3),
- B(Ldar), R(3),
- B(JumpIfUndefined), U8(6),
- B(Ldar), R(3),
- B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(81),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(4), U8(2),
- /* 28 E> */ B(Throw),
- /* 37 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
+ /* 37 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
+ B(JumpIfNull), U8(4),
+ B(JumpIfNotUndefined), U8(7),
+ /* 26 E> */ B(CallRuntime), U16(Runtime::kThrowPatternAssignmentNonCoercible), R(0), U8(0),
+ B(Star), R(3),
+ /* 28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
B(Star), R(1),
- /* 37 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
+ /* 31 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
B(Star), R(2),
/* 55 S> */ B(LdaZero),
/* 55 E> */ B(TestGreaterThan), R(2), U8(5),
@@ -379,90 +372,57 @@ snippet: "
}
f();
"
-frame size: 12
+frame size: 8
parameter count: 1
-bytecode array length: 144
+bytecode array length: 85
bytecodes: [
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ B(Star), R(2),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(6),
- B(Mov), R(context), R(7),
+ B(Mov), R(context), R(3),
/* 36 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star), R(1),
/* 41 S> */ B(LdaSmi), I8(10),
- /* 41 E> */ B(TestLessThan), R(2), U8(0),
+ /* 41 E> */ B(TestLessThan), R(1), U8(0),
B(JumpIfFalse), U8(15),
/* 23 E> */ B(StackCheck),
- /* 62 S> */ B(Mov), R(2), R(1),
- /* 49 S> */ B(Ldar), R(1),
+ /* 62 S> */ B(Mov), R(1), R(0),
+ /* 49 S> */ B(Ldar), R(0),
B(Inc), U8(1),
- B(Star), R(2),
+ B(Star), R(1),
B(JumpLoop), U8(17), I8(0),
B(LdaUndefined),
B(Star), R(5),
- B(LdaZero),
- B(Star), R(4),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(8),
- /* 49 E> */ B(CreateCatchContext), R(8), U8(0),
- B(Star), R(7),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(7),
- B(PushContext), R(8),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(10),
B(LdaFalse),
- B(Star), R(11),
- B(Mov), R(0), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(9), U8(3),
- B(PopContext), R(8),
- B(LdaSmi), I8(1),
- B(Star), R(4),
- B(Mov), R(0), R(5),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(5),
- B(Star), R(4),
- B(Jump), U8(8),
- B(Star), R(5),
- B(LdaSmi), I8(2),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ /* 67 S> */ B(Return),
+ B(Jump), U8(30),
B(Star), R(4),
+ B(CreateCatchContext), R(4), U8(0),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(PushContext), R(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(6),
B(LdaFalse),
- B(Star), R(8),
- B(Mov), R(0), R(7),
- B(CallJSRuntime), U8(%async_function_promise_release), R(7), U8(2),
- B(Ldar), R(6),
- B(SetPendingMessage),
- B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(1), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(7),
- B(Mov), R(5), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(7), U8(2),
- B(Ldar), R(0),
- /* 67 S> */ B(Return),
- B(Ldar), R(5),
+ B(Star), R(7),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 67 S> */ B(Return),
- B(Ldar), R(5),
- B(ReThrow),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [10, 84, 92],
- [13, 44, 46],
+ [16, 53, 55],
]
---
@@ -472,108 +432,70 @@ snippet: "
}
f();
"
-frame size: 11
+frame size: 7
parameter count: 1
-bytecode array length: 195
+bytecode array length: 121
bytecodes: [
- B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
+ B(Star), R(1),
/* 16 E> */ B(StackCheck),
- B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
- B(Star), R(0),
- B(Mov), R(context), R(5),
- B(Mov), R(context), R(6),
+ B(Mov), R(context), R(2),
/* 36 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star), R(0),
/* 41 S> */ B(LdaSmi), I8(10),
- /* 41 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(50),
+ /* 41 E> */ B(TestLessThan), R(0), U8(0),
+ B(JumpIfFalse), U8(47),
/* 23 E> */ B(StackCheck),
- /* 52 S> */ B(Mov), R(2), R(7),
- B(Mov), R(1), R(8),
- B(Mov), R(0), R(9),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(7), U8(3),
- /* 52 E> */ B(SuspendGenerator), R(2), R(0), U8(7), U8(0),
- B(ResumeGenerator), R(2), R(0), U8(7),
- B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(8),
+ /* 52 S> */ B(Mov), R(1), R(3),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
+ /* 52 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(3),
+ B(Star), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
+ B(Star), R(4),
B(LdaZero),
- B(TestReferenceEqual), R(8),
+ B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
- B(Ldar), R(7),
+ B(Ldar), R(3),
B(ReThrow),
- /* 49 S> */ B(Ldar), R(1),
+ /* 49 S> */ B(Ldar), R(0),
B(Inc), U8(1),
- B(Star), R(1),
- B(JumpLoop), U8(52), I8(0),
+ B(Star), R(0),
+ B(JumpLoop), U8(49), I8(0),
B(LdaUndefined),
B(Star), R(4),
- B(LdaZero),
+ B(LdaTrue),
+ B(Star), R(5),
+ B(Mov), R(1), R(3),
+ /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ /* 61 S> */ B(Return),
+ B(Jump), U8(30),
B(Star), R(3),
- B(Jump), U8(56),
- B(Jump), U8(40),
- B(Star), R(7),
- /* 49 E> */ B(CreateCatchContext), R(7), U8(1),
- B(Star), R(6),
+ B(CreateCatchContext), R(3), U8(1),
+ B(Star), R(2),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(6),
- B(PushContext), R(7),
+ B(Ldar), R(2),
+ B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(9),
- B(LdaFalse),
- B(Star), R(10),
- B(Mov), R(0), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(8), U8(3),
- B(PopContext), R(7),
- B(LdaSmi), I8(1),
- B(Star), R(3),
- B(Mov), R(0), R(4),
- B(Jump), U8(16),
- B(LdaSmi), I8(-1),
- B(Star), R(4),
- B(Star), R(3),
- B(Jump), U8(8),
- B(Star), R(4),
- B(LdaSmi), I8(2),
- B(Star), R(3),
- B(LdaTheHole),
- B(SetPendingMessage),
B(Star), R(5),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(6),
- B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
- B(Ldar), R(5),
- B(SetPendingMessage),
- B(Ldar), R(3),
- B(SwitchOnSmiNoFeedback), U8(2), U8(3), I8(0),
- B(Jump), U8(21),
- B(Mov), R(0), R(6),
- B(Mov), R(4), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(6), U8(2),
- B(Ldar), R(0),
- /* 61 S> */ B(Return),
- B(Ldar), R(4),
+ B(Star), R(6),
+ B(Mov), R(1), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 61 S> */ B(Return),
- B(Ldar), R(4),
- B(ReThrow),
B(LdaUndefined),
/* 61 S> */ B(Return),
]
constant pool: [
- Smi [58],
+ Smi [46],
SCOPE_INFO_TYPE,
- Smi [6],
- Smi [19],
- Smi [22],
]
handlers: [
- [26, 135, 143],
- [29, 95, 97],
+ [20, 89, 91],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 42238ac049..a84807e5fc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -54,16 +54,16 @@ parameter count: 1
bytecode array length: 40
bytecodes: [
B(CreateRestParameter),
- B(Star), R(2),
+ B(Star), R(3),
B(Mov), R(closure), R(1),
/* 128 E> */ B(StackCheck),
- B(Mov), R(2), R(3),
+ /* 136 S> */ B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(LdaSmi), I8(1),
B(Star), R(6),
B(Ldar), R(0),
- B(Mov), R(2), R(7),
+ B(Mov), R(3), R(7),
/* 140 E> */ B(ConstructWithSpread), R(5), R(6), U8(2), U8(0),
B(Star), R(8),
B(Ldar), R(this),
@@ -93,13 +93,13 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 130
+bytecode array length: 128
bytecodes: [
B(CreateRestParameter),
- B(Star), R(2),
+ B(Star), R(3),
B(Mov), R(closure), R(1),
/* 128 E> */ B(StackCheck),
- B(Mov), R(2), R(3),
+ /* 136 S> */ B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(CreateEmptyArrayLiteral), U8(0),
@@ -111,29 +111,28 @@ bytecodes: [
B(Ldar), R(6),
B(Inc), U8(3),
/* 152 S> */ B(Star), R(6),
- B(LdaNamedProperty), R(2), U8(0), U8(4),
- B(Star), R(12),
- B(CallProperty0), R(12), R(2), U8(6),
- B(Mov), R(2), R(11),
+ B(LdaNamedProperty), R(3), U8(0), U8(4),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(3), U8(6),
+ B(Mov), R(3), R(10),
B(Mov), R(1), R(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(10),
- B(LdaNamedProperty), R(10), U8(1), U8(8),
B(Star), R(9),
- B(CallProperty0), R(9), R(10), U8(10),
+ B(LdaNamedProperty), R(9), U8(1), U8(8),
B(Star), R(8),
+ B(CallProperty0), R(8), R(9), U8(14),
+ B(Star), R(12),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(LdaNamedProperty), R(8), U8(2), U8(12),
- B(JumpIfToBooleanTrue), U8(21),
- B(LdaNamedProperty), R(8), U8(3), U8(14),
- B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(2), U8(16),
+ B(JumpIfToBooleanTrue), U8(19),
+ B(LdaNamedProperty), R(12), U8(3), U8(10),
B(StaInArrayLiteral), R(7), R(6), U8(1),
B(Ldar), R(6),
B(Inc), U8(3),
B(Star), R(6),
- B(JumpLoop), U8(35), I8(0),
+ B(JumpLoop), U8(33), I8(0),
B(LdaSmi), I8(1),
B(StaInArrayLiteral), R(7), R(6), U8(1),
B(Mov), R(5), R(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index e4fa07d2a0..5fc0eef86d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -12,7 +12,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 36
+bytecode array length: 37
bytecodes: [
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -21,7 +21,8 @@ bytecodes: [
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(3),
/* 0 E> */ B(StackCheck),
- /* 8 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(41), R(1),
+ /* 8 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(41),
+ B(Star), R(1),
/* 16 E> */ B(CreateClosure), U8(2), U8(3), U8(0),
B(StaNamedOwnProperty), R(1), U8(3), U8(4),
B(Ldar), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index 09dfa7dbdb..0b44c07300 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -11,11 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 15
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(0),
- B(Ldar), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(ToObject), R(0),
B(CreateWithContext), R(0), U8(1),
B(PushContext), R(0),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index ef0f616528..99ab4cd8c0 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -44,10 +44,10 @@ class ProgramOptions final {
top_level_(false),
print_callee_(false),
oneshot_opt_(false),
- do_expressions_(false),
async_iteration_(false),
public_fields_(false),
private_fields_(false),
+ private_methods_(false),
static_fields_(false),
verbose_(false) {}
@@ -68,10 +68,10 @@ class ProgramOptions final {
bool top_level() const { return top_level_; }
bool print_callee() const { return print_callee_; }
bool oneshot_opt() const { return oneshot_opt_; }
- bool do_expressions() const { return do_expressions_; }
bool async_iteration() const { return async_iteration_; }
bool public_fields() const { return public_fields_; }
bool private_fields() const { return private_fields_; }
+ bool private_methods() const { return private_methods_; }
bool static_fields() const { return static_fields_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
@@ -90,10 +90,10 @@ class ProgramOptions final {
bool top_level_;
bool print_callee_;
bool oneshot_opt_;
- bool do_expressions_;
bool async_iteration_;
bool public_fields_;
bool private_fields_;
+ bool private_methods_;
bool static_fields_;
bool verbose_;
std::vector<std::string> input_filenames_;
@@ -184,14 +184,14 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.print_callee_ = true;
} else if (strcmp(argv[i], "--disable-oneshot-opt") == 0) {
options.oneshot_opt_ = false;
- } else if (strcmp(argv[i], "--do-expressions") == 0) {
- options.do_expressions_ = true;
} else if (strcmp(argv[i], "--async-iteration") == 0) {
options.async_iteration_ = true;
} else if (strcmp(argv[i], "--public-fields") == 0) {
options.public_fields_ = true;
} else if (strcmp(argv[i], "--private-fields") == 0) {
options.private_fields_ = true;
+ } else if (strcmp(argv[i], "--private-methods") == 0) {
+ options.private_methods_ = true;
} else if (strcmp(argv[i], "--static-fields") == 0) {
options.static_fields_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
@@ -300,14 +300,14 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
print_callee_ = ParseBoolean(line.c_str() + strlen(kPrintCallee));
} else if (line.compare(0, strlen(kOneshotOpt), kOneshotOpt) == 0) {
oneshot_opt_ = ParseBoolean(line.c_str() + strlen(kOneshotOpt));
- } else if (line.compare(0, 16, "do expressions: ") == 0) {
- do_expressions_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 17, "async iteration: ") == 0) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
} else if (line.compare(0, 15, "public fields: ") == 0) {
public_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line.compare(0, 16, "private fields: ") == 0) {
private_fields_ = ParseBoolean(line.c_str() + 16);
+ } else if (line.compare(0, 16, "private methods: ") == 0) {
+ private_methods_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 15, "static fields: ") == 0) {
static_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line == "---") {
@@ -333,10 +333,10 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (top_level_) stream << "\ntop level: yes";
if (print_callee_) stream << "\nprint callee: yes";
if (oneshot_opt_) stream << "\noneshot opt: yes";
- if (do_expressions_) stream << "\ndo expressions: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
if (public_fields_) stream << "\npublic fields: yes";
if (private_fields_) stream << "\nprivate fields: yes";
+ if (private_methods_) stream << "\nprivate methods: yes";
if (static_fields_) stream << "\nstatic fields: yes";
stream << "\n\n";
@@ -446,9 +446,9 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
printer.set_test_function_name(options.test_function_name());
}
- if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
if (options.public_fields()) i::FLAG_harmony_public_fields = true;
if (options.private_fields()) i::FLAG_harmony_private_fields = true;
+ if (options.private_methods()) i::FLAG_harmony_private_methods = true;
if (options.static_fields()) i::FLAG_harmony_static_fields = true;
stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
@@ -457,9 +457,9 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
printer.PrintExpectation(stream, snippet);
}
- i::FLAG_harmony_do_expressions = false;
i::FLAG_harmony_public_fields = false;
i::FLAG_harmony_private_fields = false;
+ i::FLAG_harmony_private_methods = false;
i::FLAG_harmony_static_fields = false;
}
@@ -509,9 +509,9 @@ void PrintUsage(const char* exec_path) {
" --test-function-name=foo "
"Specify the name of the test function.\n"
" --top-level Process top level code, not the top-level function.\n"
- " --do-expressions Enable harmony_do_expressions flag.\n"
" --public-fields Enable harmony_public_fields flag.\n"
" --private-fields Enable harmony_private_fields flag.\n"
+ " --private-methods Enable harmony_private_methods flag.\n"
" --static-fields Enable harmony_static_fields flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index 855e01e786..a361a98a52 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -30,8 +30,10 @@ InterpreterTester::InterpreterTester(
InterpreterTester::InterpreterTester(
Isolate* isolate, Handle<BytecodeArray> bytecode,
MaybeHandle<FeedbackMetadata> feedback_metadata, const char* filter)
- : InterpreterTester(isolate, nullptr, bytecode, feedback_metadata, filter) {
-}
+ : InterpreterTester(
+ isolate, nullptr, bytecode,
+ FLAG_lite_mode ? MaybeHandle<FeedbackMetadata>() : feedback_metadata,
+ filter) {}
InterpreterTester::InterpreterTester(Isolate* isolate, const char* source,
const char* filter)
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index d670252242..a768908998 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -12,6 +12,7 @@
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
+#include "src/objects/feedback-cell.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-feedback-vector.h"
@@ -42,7 +43,7 @@ class InterpreterCallable {
return CallInterpreter(isolate_, function_, args...);
}
- FeedbackVector* vector() const { return function_->feedback_vector(); }
+ FeedbackVector vector() const { return function_->feedback_vector(); }
private:
Isolate* isolate_;
@@ -88,6 +89,8 @@ class InterpreterTester {
return RegisterList(first_reg_index, register_count);
}
+ inline bool HasFeedbackMetadata() { return !feedback_metadata_.is_null(); }
+
private:
Isolate* isolate_;
const char* source_;
@@ -121,8 +124,8 @@ class InterpreterTester {
if (!bytecode_.is_null()) {
function->shared()->set_function_data(*bytecode_.ToHandleChecked());
}
- if (!feedback_metadata_.is_null()) {
- function->set_feedback_cell(isolate_->heap()->many_closures_cell());
+ if (HasFeedbackMetadata()) {
+ function->set_raw_feedback_cell(isolate_->heap()->many_closures_cell());
// Set the raw feedback metadata to circumvent checks that we are not
// overwriting existing metadata.
function->shared()->set_raw_outer_scope_info_or_feedback_metadata(
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index e81b0cf981..fb56d5d98a 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -157,13 +157,14 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
std::string expected_line;
// Line number does not include golden file header.
int line_number = 0;
+ bool strings_match = true;
do {
std::getline(generated_stream, generated_line);
std::getline(expected_stream, expected_line);
if (!generated_stream.good() && !expected_stream.good()) {
- return true;
+ return strings_match;
}
if (!generated_stream.good()) {
@@ -182,7 +183,7 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
std::cerr << "Inputs differ at line " << line_number << "\n";
std::cerr << " Generated: '" << generated_line << "'\n";
std::cerr << " Expected: '" << expected_line << "'\n";
- return false;
+ strings_match = false;
}
line_number++;
} while (true);
@@ -663,6 +664,7 @@ TEST(IIFEWithOneshotOpt) {
return arguments.callee;
})();
)",
+ // TODO(rmcilroy): Make this function produce one-shot code.
R"(
var t = 0;
function f2() {};
@@ -690,6 +692,34 @@ TEST(IIFEWithOneshotOpt) {
}
f();
)",
+ R"(
+ var f = function(l) { l.a = 3; return l; };
+ f({});
+ f;
+ )",
+ // No one-shot opt for top-level functions enclosed in parentheses
+ R"(
+ var f = (function(l) { l.a = 3; return l; });
+ f;
+ )",
+ R"(
+ var f = (function foo(l) { l.a = 3; return l; });
+ f;
+ )",
+ R"(
+ var f = function foo(l) { l.a = 3; return l; };
+ f({});
+ f;
+ )",
+ R"(
+ l = {};
+ var f = (function foo(l) { l.a = 3; return arguments.callee; })(l);
+ f;
+ )",
+ R"(
+ var f = (function foo(l) { l.a = 3; return arguments.callee; })({});
+ f;
+ )",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IIFEWithOneshotOpt.golden")));
@@ -1469,6 +1499,8 @@ TEST(Delete) {
"return delete a[1];\n",
"return delete 'test';\n",
+
+ "return delete this;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
@@ -2242,6 +2274,33 @@ TEST(AssignmentsInBinaryExpression) {
LoadGolden("AssignmentsInBinaryExpression.golden")));
}
+TEST(DestructuringAssignment) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ const char* snippets[] = {
+ "var x, a = [0,1,2,3];\n"
+ "[x] = a;\n",
+
+ "var x, y, a = [0,1,2,3];\n"
+ "[,x,...y] = a;\n",
+
+ "var x={}, y, a = [0];\n"
+ "[x.foo,y=4] = a;\n",
+
+ "var x, a = {x:1};\n"
+ "({x} = a);\n",
+
+ "var x={}, a = {y:1};\n"
+ "({y:x.foo} = a);\n",
+
+ "var x, a = {y:1, w:2, v:3};\n"
+ "({x=0,...y} = a);\n",
+ };
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("DestructuringAssignment.golden")));
+}
+
TEST(Eval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2476,26 +2535,6 @@ TEST(LetVariableContextSlot) {
LoadGolden("LetVariableContextSlot.golden")));
}
-TEST(DoExpression) {
- bool old_flag = FLAG_harmony_do_expressions;
- FLAG_harmony_do_expressions = true;
-
- InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
- "var a = do { }; return a;\n",
-
- "var a = do { var x = 100; }; return a;\n",
-
- "while(true) { var a = 10; a = do { ++a; break; }; a = 20; }\n",
- };
-
- CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("DoExpression.golden")));
-
- FLAG_harmony_do_expressions = old_flag;
-}
-
TEST(WithStatement) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 65eee6f778..9ec0c99ce1 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -15,7 +15,8 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
-#include "src/unicode-cache.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/smi.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/interpreter-tester.h"
#include "test/cctest/test-feedback-vector.h"
@@ -240,7 +241,7 @@ static double BinaryOpC(Token::Value op, double lhs, double rhs) {
case Token::Value::DIV:
return lhs / rhs;
case Token::Value::MOD:
- return std::fmod(lhs, rhs);
+ return Modulo(lhs, rhs);
case Token::Value::BIT_OR:
return (v8::internal::DoubleToInt32(lhs) |
v8::internal::DoubleToInt32(rhs));
@@ -421,10 +422,11 @@ TEST(InterpreterBinaryOpsBigInt) {
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBigInt());
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kBigInt,
- feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback->ToSmi().value());
+ }
}
}
}
@@ -542,9 +544,11 @@ TEST(InterpreterStringAdd) {
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*test_cases[i].expected_value));
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- CHECK_EQ(test_cases[i].expected_feedback, feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(test_cases[i].expected_feedback, feedback->ToSmi().value());
+ }
}
}
@@ -623,6 +627,8 @@ TEST(InterpreterParameter8) {
}
TEST(InterpreterBinaryOpTypeFeedback) {
+ if (FLAG_lite_mode) return;
+
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -747,14 +753,16 @@ TEST(InterpreterBinaryOpTypeFeedback) {
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- MaybeObject* feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(test_case.feedback, feedback0->cast<Smi>()->value());
+ CHECK_EQ(test_case.feedback, feedback0->ToSmi().value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
}
}
TEST(InterpreterBinaryOpSmiTypeFeedback) {
+ if (FLAG_lite_mode) return;
+
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -794,7 +802,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
isolate->factory()->NewHeapNumber(3.1415 - 2.0),
BinaryOperationFeedback::kNumber},
{Token::Value::SUB, LiteralForTest(ast_factory.GetOneByteString("2")), 2,
- Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
+ Handle<Smi>(Smi::zero(), isolate), BinaryOperationFeedback::kAny},
// BIT_OR
{Token::Value::BIT_OR, LiteralForTest(4), 1,
Handle<Smi>(Smi::FromInt(5), isolate),
@@ -811,7 +819,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
{Token::Value::BIT_AND, LiteralForTest(3.1415), 2,
Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kNumber},
{Token::Value::BIT_AND, LiteralForTest(ast_factory.GetOneByteString("2")),
- 1, Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
+ 1, Handle<Smi>(Smi::zero(), isolate), BinaryOperationFeedback::kAny},
// SHL
{Token::Value::SHL, LiteralForTest(3), 1,
Handle<Smi>(Smi::FromInt(6), isolate),
@@ -826,7 +834,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Smi>(Smi::FromInt(1), isolate),
BinaryOperationFeedback::kSignedSmall},
{Token::Value::SAR, LiteralForTest(3.1415), 2,
- Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kNumber},
+ Handle<Smi>(Smi::zero(), isolate), BinaryOperationFeedback::kNumber},
{Token::Value::SAR, LiteralForTest(ast_factory.GetOneByteString("2")), 1,
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
@@ -853,14 +861,16 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- MaybeObject* feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(test_case.feedback, feedback0->cast<Smi>()->value());
+ CHECK_EQ(test_case.feedback, feedback0->ToSmi().value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
}
}
TEST(InterpreterUnaryOpFeedback) {
+ if (FLAG_lite_mode) return;
+
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -924,30 +934,31 @@ TEST(InterpreterUnaryOpFeedback) {
test_case.bigint_feedback_value, test_case.any_feedback_value)
.ToHandleChecked();
USE(return_val);
- MaybeObject* feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
- feedback0->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kSignedSmall, feedback0->ToSmi().value());
- MaybeObject* feedback1 = callable.vector()->Get(slot1);
+ MaybeObject feedback1 = callable.vector()->Get(slot1);
CHECK(feedback1->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi().value());
- MaybeObject* feedback2 = callable.vector()->Get(slot2);
+ MaybeObject feedback2 = callable.vector()->Get(slot2);
CHECK(feedback2->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback2->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback2->ToSmi().value());
- MaybeObject* feedback3 = callable.vector()->Get(slot3);
+ MaybeObject feedback3 = callable.vector()->Get(slot3);
CHECK(feedback3->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback3->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback3->ToSmi().value());
- MaybeObject* feedback4 = callable.vector()->Get(slot4);
+ MaybeObject feedback4 = callable.vector()->Get(slot4);
CHECK(feedback4->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kAny, feedback4->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kAny, feedback4->ToSmi().value());
}
}
TEST(InterpreterBitwiseTypeFeedback) {
+ if (FLAG_lite_mode) return;
+
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -986,18 +997,17 @@ TEST(InterpreterBitwiseTypeFeedback) {
Handle<Object> return_val =
callable(arg1, arg2, arg3, arg4).ToHandleChecked();
USE(return_val);
- MaybeObject* feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector()->Get(slot0);
CHECK(feedback0->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
- feedback0->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kSignedSmall, feedback0->ToSmi().value());
- MaybeObject* feedback1 = callable.vector()->Get(slot1);
+ MaybeObject feedback1 = callable.vector()->Get(slot1);
CHECK(feedback1->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi().value());
- MaybeObject* feedback2 = callable.vector()->Get(slot2);
+ MaybeObject feedback2 = callable.vector()->Get(slot2);
CHECK(feedback2->IsSmi());
- CHECK_EQ(BinaryOperationFeedback::kAny, feedback2->cast<Smi>()->value());
+ CHECK_EQ(BinaryOperationFeedback::kAny, feedback2->ToSmi().value());
}
}
@@ -1519,7 +1529,7 @@ TEST(InterpreterJumps) {
Register reg(0), scratch(1);
BytecodeLabel label[3];
- builder.LoadLiteral(Smi::kZero)
+ builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
.Jump(&label[1]);
SetRegister(builder, reg, 1024, scratch).Bind(&label[0]);
@@ -1559,7 +1569,7 @@ TEST(InterpreterConditionalJumps) {
BytecodeLabel label[2];
BytecodeLabel done, done1;
- builder.LoadLiteral(Smi::kZero)
+ builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &label[0]);
@@ -1609,7 +1619,7 @@ TEST(InterpreterConditionalJumps2) {
BytecodeLabel label[2];
BytecodeLabel done, done1;
- builder.LoadLiteral(Smi::kZero)
+ builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &label[0]);
@@ -1654,7 +1664,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Register reg(0), scratch(256);
BytecodeLabel done, fake;
- builder.LoadLiteral(Smi::kZero);
+ builder.LoadLiteral(Smi::zero());
builder.StoreAccumulatorInRegister(reg);
// Consume all 8-bit operands
for (int i = 1; i <= 256; i++) {
@@ -1667,7 +1677,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
// Emit more than 16-bit immediate operands worth of code to jump over.
builder.Bind(&fake);
for (int i = 0; i < 6600; i++) {
- builder.LoadLiteral(Smi::kZero); // 1-byte
+ builder.LoadLiteral(Smi::zero()); // 1-byte
builder.BinaryOperation(Token::Value::ADD, scratch,
GetIndex(slot)); // 6-bytes
builder.StoreAccumulatorInRegister(scratch); // 4-bytes
@@ -1709,7 +1719,7 @@ TEST(InterpreterJumpWith32BitOperand) {
Register reg(0);
BytecodeLabel done;
- builder.LoadLiteral(Smi::kZero);
+ builder.LoadLiteral(Smi::zero());
builder.StoreAccumulatorInRegister(reg);
// Consume all 16-bit constant pool entries. Make sure to use doubles so that
// the jump can't re-use an integer.
@@ -1717,7 +1727,7 @@ TEST(InterpreterJumpWith32BitOperand) {
builder.LoadLiteral(i + 0.5);
}
builder.Jump(&done);
- builder.LoadLiteral(Smi::kZero);
+ builder.LoadLiteral(Smi::zero());
builder.Bind(&done);
builder.Return();
@@ -1816,10 +1826,12 @@ TEST(InterpreterSmiComparisons) {
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kSignedSmall,
- feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kSignedSmall,
+ feedback->ToSmi().value());
+ }
}
}
}
@@ -1865,10 +1877,12 @@ TEST(InterpreterHeapNumberComparisons) {
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kNumber,
- feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kNumber,
+ feedback->ToSmi().value());
+ }
}
}
}
@@ -1908,10 +1922,12 @@ TEST(InterpreterBigIntComparisons) {
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- CHECK_EQ(CompareOperationFeedback::kBigInt,
- feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kBigInt,
+ feedback->ToSmi().value());
+ }
}
}
}
@@ -1956,13 +1972,15 @@ TEST(InterpreterStringComparisons) {
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- int const expected_feedback =
- Token::IsOrderedRelationalCompareOp(comparison)
- ? CompareOperationFeedback::kString
- : CompareOperationFeedback::kInternalizedString;
- CHECK_EQ(expected_feedback, feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ int const expected_feedback =
+ Token::IsOrderedRelationalCompareOp(comparison)
+ ? CompareOperationFeedback::kString
+ : CompareOperationFeedback::kInternalizedString;
+ CHECK_EQ(expected_feedback, feedback->ToSmi().value());
+ }
}
}
}
@@ -1989,8 +2007,6 @@ TEST(InterpreterMixedComparisons) {
// performed.
const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e50", "2.01"};
- UnicodeCache unicode_cache;
-
enum WhichSideString { kLhsIsString, kRhsIsString };
enum StringType { kInternalizedStringConstant, kComputedString };
@@ -2006,10 +2022,8 @@ TEST(InterpreterMixedComparisons) {
{kInternalizedStringConstant, kComputedString}) {
const char* lhs_cstr = inputs[i];
const char* rhs_cstr = inputs[j];
- double lhs = StringToDouble(&unicode_cache, lhs_cstr,
- ConversionFlags::NO_FLAGS);
- double rhs = StringToDouble(&unicode_cache, rhs_cstr,
- ConversionFlags::NO_FLAGS);
+ double lhs = StringToDouble(lhs_cstr, ConversionFlags::NO_FLAGS);
+ double rhs = StringToDouble(rhs_cstr, ConversionFlags::NO_FLAGS);
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -2071,11 +2085,13 @@ TEST(InterpreterMixedComparisons) {
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, lhs, rhs, true));
- MaybeObject* feedback = callable.vector()->Get(slot);
- CHECK(feedback->IsSmi());
- // Comparison with a number and string collects kAny feedback.
- CHECK_EQ(CompareOperationFeedback::kAny,
- feedback->cast<Smi>()->value());
+ if (tester.HasFeedbackMetadata()) {
+ MaybeObject feedback = callable.vector()->Get(slot);
+ CHECK(feedback->IsSmi());
+ // Comparison with a number and string collects kAny feedback.
+ CHECK_EQ(CompareOperationFeedback::kAny,
+ feedback->ToSmi().value());
+ }
}
}
}
@@ -2097,13 +2113,10 @@ TEST(InterpreterStrictNotEqual) {
// Test passing different types.
const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e5", "2.01"};
- UnicodeCache unicode_cache;
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
- double lhs =
- StringToDouble(&unicode_cache, inputs[i], ConversionFlags::NO_FLAGS);
- double rhs =
- StringToDouble(&unicode_cache, inputs[j], ConversionFlags::NO_FLAGS);
+ double lhs = StringToDouble(inputs[i], ConversionFlags::NO_FLAGS);
+ double rhs = StringToDouble(inputs[j], ConversionFlags::NO_FLAGS);
Handle<Object> lhs_obj = factory->NewNumber(lhs);
Handle<Object> rhs_obj = factory->NewStringFromAsciiChecked(inputs[j]);
@@ -4635,37 +4648,6 @@ TEST(InterpreterWideParametersSummation) {
}
}
-TEST(InterpreterDoExpression) {
- bool old_flag = FLAG_harmony_do_expressions;
- FLAG_harmony_do_expressions = true;
-
- HandleAndZoneScope handles;
- Isolate* isolate = handles.main_isolate();
- Factory* factory = isolate->factory();
-
- std::pair<const char*, Handle<Object>> do_expr[] = {
- {"var a = do {}; return a;", factory->undefined_value()},
- {"var a = do { var x = 100; }; return a;", factory->undefined_value()},
- {"var a = do { var x = 100; }; return a;", factory->undefined_value()},
- {"var a = do { var x = 100; x++; }; return a;",
- handle(Smi::FromInt(100), isolate)},
- {"var i = 0; for (; i < 5;) { i = do { if (i == 3) { break; }; i + 1; }};"
- "return i;",
- handle(Smi::FromInt(3), isolate)},
- };
-
- for (size_t i = 0; i < arraysize(do_expr); i++) {
- std::string source(InterpreterTester::SourceForBody(do_expr[i].first));
- InterpreterTester tester(isolate, source.c_str());
- auto callable = tester.GetCallable<>();
-
- Handle<i::Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*do_expr[i].second));
- }
-
- FLAG_harmony_do_expressions = old_flag;
-}
-
TEST(InterpreterWithStatement) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5026,6 +5008,7 @@ TEST(InterpreterGenerators) {
}
}
+#ifndef V8_TARGET_ARCH_ARM
TEST(InterpreterWithNativeStack) {
i::FLAG_interpreted_frames_native_stack = true;
@@ -5039,41 +5022,41 @@ TEST(InterpreterWithNativeStack) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
CHECK(f->shared()->HasBytecodeArray());
- i::Code* code = f->shared()->GetCode();
+ i::Code code = f->shared()->GetCode();
i::Handle<i::Code> interpreter_entry_trampoline =
BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
CHECK(code->IsCode());
CHECK(code->is_interpreter_trampoline_builtin());
- CHECK_NE(code->InstructionStart(),
- interpreter_entry_trampoline->InstructionStart());
+ CHECK_NE(code->address(), interpreter_entry_trampoline->address());
}
+#endif // V8_TARGET_ARCH_ARM
-TEST(InterpreterGetAndMaybeDeserializeBytecodeHandler) {
+TEST(InterpreterGetBytecodeHandler) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Interpreter* interpreter = isolate->interpreter();
// Test that single-width bytecode handlers deserializer correctly.
- Code* wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
- Bytecode::kWide, OperandScale::kSingle);
+ Code wide_handler =
+ interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kSingle);
CHECK_EQ(wide_handler->builtin_index(), Builtins::kWideHandler);
- Code* add_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
- Bytecode::kAdd, OperandScale::kSingle);
+ Code add_handler =
+ interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kSingle);
CHECK_EQ(add_handler->builtin_index(), Builtins::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
- Code* wide_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
- Bytecode::kWide, OperandScale::kDouble);
+ Code wide_wide_handler =
+ interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kDouble);
CHECK_EQ(wide_wide_handler->builtin_index(), Builtins::kIllegalHandler);
- Code* add_wide_handler = interpreter->GetAndMaybeDeserializeBytecodeHandler(
- Bytecode::kAdd, OperandScale::kDouble);
+ Code add_wide_handler =
+ interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kDouble);
CHECK_EQ(add_wide_handler->builtin_index(), Builtins::kAddWideHandler);
}
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
index 462da988e4..e00861ddcf 100644
--- a/deps/v8/test/cctest/libsampler/test-sampler.cc
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -26,7 +26,7 @@ class TestSamplingThread : public base::Thread {
// Implement Thread::Run().
void Run() override {
- while (sampler_->IsProfiling()) {
+ while (sampler_->IsActive()) {
sampler_->DoSample();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
}
@@ -73,23 +73,17 @@ static void RunSampler(v8::Local<v8::Context> env,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples = 0,
unsigned min_external_samples = 0) {
- Sampler::SetUp();
- TestSampler* sampler = new TestSampler(env->GetIsolate());
- TestSamplingThread* thread = new TestSamplingThread(sampler);
- sampler->IncreaseProfilingDepth();
- sampler->Start();
- sampler->StartCountingSamples();
- thread->StartSynchronously();
+ TestSampler sampler(env->GetIsolate());
+ TestSamplingThread thread(&sampler);
+ sampler.Start();
+ sampler.StartCountingSamples();
+ thread.StartSynchronously();
do {
function->Call(env, env->Global(), argc, argv).ToLocalChecked();
- } while (sampler->js_sample_count() < min_js_samples ||
- sampler->external_sample_count() < min_external_samples);
- sampler->Stop();
- sampler->DecreaseProfilingDepth();
- thread->Join();
- delete thread;
- delete sampler;
- Sampler::TearDown();
+ } while (sampler.js_sample_count() < min_js_samples ||
+ sampler.external_sample_count() < min_external_samples);
+ sampler.Stop();
+ thread.Join();
}
} // namespace
@@ -137,5 +131,88 @@ TEST(LibSamplerCollectSample) {
RunSampler(env.local(), function, args, arraysize(args), 100, 100);
}
+#ifdef USE_SIGNALS
+
+class CountingSampler : public Sampler {
+ public:
+ explicit CountingSampler(Isolate* isolate) : Sampler(isolate) {}
+
+ void SampleStack(const v8::RegisterState& regs) override { sample_count_++; }
+
+ int sample_count() { return sample_count_; }
+ void set_active(bool active) { SetActive(active); }
+
+ private:
+ int sample_count_ = 0;
+};
+
+TEST(SamplerManager_AddRemoveSampler) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ SamplerManager* manager = SamplerManager::instance();
+ CountingSampler sampler1(isolate);
+ sampler1.set_active(true);
+ CHECK_EQ(0, sampler1.sample_count());
+
+ manager->AddSampler(&sampler1);
+
+ RegisterState state;
+ manager->DoSample(state);
+ CHECK_EQ(1, sampler1.sample_count());
+
+ sampler1.set_active(true);
+ manager->RemoveSampler(&sampler1);
+ sampler1.set_active(false);
+
+ manager->DoSample(state);
+ CHECK_EQ(1, sampler1.sample_count());
+}
+
+TEST(SamplerManager_DoesNotReAdd) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ // Add the same sampler twice, but check we only get one sample for it.
+ SamplerManager* manager = SamplerManager::instance();
+ CountingSampler sampler1(isolate);
+ sampler1.set_active(true);
+ manager->AddSampler(&sampler1);
+ manager->AddSampler(&sampler1);
+
+ RegisterState state;
+ manager->DoSample(state);
+ CHECK_EQ(1, sampler1.sample_count());
+ sampler1.set_active(false);
+}
+
+TEST(AtomicGuard_GetNonBlockingSuccess) {
+ std::atomic_bool atomic{false};
+ {
+ AtomicGuard guard(&atomic, false);
+ CHECK(guard.is_success());
+
+ AtomicGuard guard2(&atomic, false);
+ CHECK(!guard2.is_success());
+ }
+ AtomicGuard guard(&atomic, false);
+ CHECK(guard.is_success());
+}
+
+TEST(AtomicGuard_GetBlockingSuccess) {
+ std::atomic_bool atomic{false};
+ {
+ AtomicGuard guard(&atomic);
+ CHECK(guard.is_success());
+
+ AtomicGuard guard2(&atomic, false);
+ CHECK(!guard2.is_success());
+ }
+ AtomicGuard guard(&atomic);
+ CHECK(guard.is_success());
+}
+
+#endif // USE_SIGNALS
+
} // namespace sampler
} // namespace v8
diff --git a/deps/v8/test/cctest/parsing/test-parse-decision.cc b/deps/v8/test/cctest/parsing/test-parse-decision.cc
index f44a9e4b82..c0c4b2cd7e 100644
--- a/deps/v8/test/cctest/parsing/test-parse-decision.cc
+++ b/deps/v8/test/cctest/parsing/test-parse-decision.cc
@@ -33,7 +33,8 @@ void GetTopLevelFunctionInfo(
SharedFunctionInfo::ScriptIterator iterator(
toplevel_fn->GetIsolate(), Script::cast(toplevel_fn->shared()->script()));
- while (SharedFunctionInfo* shared = iterator.Next()) {
+ for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
+ shared = iterator.Next()) {
std::unique_ptr<char[]> name = String::cast(shared->Name())->ToCString();
is_compiled->insert(std::make_pair(name.get(), shared->is_compiled()));
}
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index ecea6f6134..4d2aba768c 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -8,8 +8,8 @@
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparsed-scope-data-impl.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/parsing/preparse-data-impl.h"
+#include "src/parsing/preparse-data.h"
#include "test/cctest/cctest.h"
#include "test/cctest/scope-test-helper.h"
@@ -34,14 +34,11 @@ enum class Bailout { BAILOUT_IF_OUTER_SLOPPY, NO };
} // namespace
TEST(PreParserScopeAnalysis) {
- i::FLAG_lazy_inner_functions = true;
- i::FLAG_preparser_scope_analysis = true;
- i::FLAG_aggressive_lazy_inner_functions = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
LocalContext env;
- struct {
+ struct Outer {
const char* code;
bool strict_outer;
bool strict_test_function;
@@ -691,34 +688,27 @@ TEST(PreParserScopeAnalysis) {
[] { i::FLAG_harmony_private_fields = false; }},
};
- for (unsigned outer_ix = 0; outer_ix < arraysize(outers); ++outer_ix) {
- for (unsigned inner_ix = 0; inner_ix < arraysize(inners); ++inner_ix) {
- if (outers[outer_ix].strict_outer &&
- (inners[inner_ix].skip & SKIP_STRICT_OUTER)) {
- continue;
- }
- if (outers[outer_ix].strict_test_function &&
- (inners[inner_ix].skip & SKIP_STRICT_FUNCTION)) {
- continue;
- }
- if (outers[outer_ix].arrow && (inners[inner_ix].skip & SKIP_ARROW)) {
+ for (unsigned i = 0; i < arraysize(outers); ++i) {
+ struct Outer outer = outers[i];
+ for (unsigned j = 0; j < arraysize(inners); ++j) {
+ struct Inner inner = inners[j];
+ if (outer.strict_outer && (inner.skip & SKIP_STRICT_OUTER)) continue;
+ if (outer.strict_test_function && (inner.skip & SKIP_STRICT_FUNCTION)) {
continue;
}
+ if (outer.arrow && (inner.skip & SKIP_ARROW)) continue;
- const char* code = outers[outer_ix].code;
+ const char* code = outer.code;
int code_len = Utf8LengthHelper(code);
- int params_len = Utf8LengthHelper(inners[inner_ix].params);
- int source_len = Utf8LengthHelper(inners[inner_ix].source);
+ int params_len = Utf8LengthHelper(inner.params);
+ int source_len = Utf8LengthHelper(inner.source);
int len = code_len + params_len + source_len;
- if (inners[inner_ix].prologue != nullptr) {
- inners[inner_ix].prologue();
- }
+ if (inner.prologue != nullptr) inner.prologue();
i::ScopedVector<char> program(len + 1);
- i::SNPrintF(program, code, inners[inner_ix].params,
- inners[inner_ix].source);
+ i::SNPrintF(program, code, inner.params, inner.source);
i::HandleScope scope(isolate);
@@ -733,23 +723,22 @@ TEST(PreParserScopeAnalysis) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared(), isolate);
- if (inners[inner_ix].bailout == Bailout::BAILOUT_IF_OUTER_SLOPPY &&
- !outers[outer_ix].strict_outer) {
- CHECK(!shared->HasUncompiledDataWithPreParsedScope());
+ if (inner.bailout == Bailout::BAILOUT_IF_OUTER_SLOPPY &&
+ !outer.strict_outer) {
+ CHECK(!shared->HasUncompiledDataWithPreparseData());
continue;
}
- CHECK(shared->HasUncompiledDataWithPreParsedScope());
- i::Handle<i::PreParsedScopeData> produced_data_on_heap(
- shared->uncompiled_data_with_pre_parsed_scope()
- ->pre_parsed_scope_data(),
+ CHECK(shared->HasUncompiledDataWithPreparseData());
+ i::Handle<i::PreparseData> produced_data_on_heap(
+ shared->uncompiled_data_with_preparse_data()->preparse_data(),
isolate);
// Parse the lazy function using the scope data.
i::ParseInfo using_scope_data(isolate, shared);
using_scope_data.set_lazy_compile();
- using_scope_data.set_consumed_preparsed_scope_data(
- i::ConsumedPreParsedScopeData::For(isolate, produced_data_on_heap));
+ using_scope_data.set_consumed_preparse_data(
+ i::ConsumedPreparseData::For(isolate, produced_data_on_heap));
CHECK(i::parsing::ParseFunction(&using_scope_data, shared, isolate));
// Verify that we skipped at least one function inside that scope.
@@ -780,11 +769,9 @@ TEST(PreParserScopeAnalysis) {
// scope data (and skipping functions), and when parsing without.
i::ScopeTestHelper::CompareScopes(
scope_without_skipped_functions, scope_with_skipped_functions,
- inners[inner_ix].precise_maybe_assigned == PreciseMaybeAssigned::YES);
+ inner.precise_maybe_assigned == PreciseMaybeAssigned::YES);
- if (inners[inner_ix].epilogue != nullptr) {
- inners[inner_ix].epilogue();
- }
+ if (inner.epilogue != nullptr) inner.epilogue();
}
}
}
@@ -793,7 +780,6 @@ TEST(PreParserScopeAnalysis) {
// https://bugs.chromium.org/p/chromium/issues/detail?id=753896. Should not
// crash.
TEST(Regress753896) {
- i::FLAG_preparser_scope_analysis = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope scope(isolate);
@@ -815,16 +801,24 @@ TEST(ProducingAndConsumingByteData) {
LocalContext env;
i::Zone zone(isolate->allocator(), ZONE_NAME);
- i::PreParsedScopeDataBuilder::ByteData bytes(&zone);
+ std::vector<uint8_t> buffer;
+ i::PreparseDataBuilder::ByteData bytes;
+ bytes.Start(&buffer);
// Write some data.
+#ifdef DEBUG
bytes.WriteUint32(1983); // This will be overwritten.
- bytes.WriteUint32(2147483647);
+#else
+ bytes.WriteVarint32(1983);
+#endif
+ bytes.WriteVarint32(2147483647);
bytes.WriteUint8(4);
bytes.WriteUint8(255);
- bytes.WriteUint32(0);
+ bytes.WriteVarint32(0);
bytes.WriteUint8(0);
#ifdef DEBUG
- bytes.OverwriteFirstUint32(2017);
+ bytes.SaveCurrentSizeAtFirstUint32();
+ int saved_size = 21;
+ CHECK_EQ(buffer.size(), saved_size);
#endif
bytes.WriteUint8(100);
// Write quarter bytes between uint8s and uint32s to verify they're stored
@@ -835,74 +829,129 @@ TEST(ProducingAndConsumingByteData) {
bytes.WriteQuarter(1);
bytes.WriteQuarter(0);
bytes.WriteUint8(50);
+
bytes.WriteQuarter(0);
bytes.WriteQuarter(1);
bytes.WriteQuarter(2);
- bytes.WriteUint32(50);
+ bytes.WriteQuarter(3);
+ bytes.WriteVarint32(50);
+
+ // End with a lonely quarter.
+ bytes.WriteQuarter(0);
+ bytes.WriteQuarter(1);
+ bytes.WriteQuarter(2);
+ bytes.WriteVarint32(0xff);
+
// End with a lonely quarter.
bytes.WriteQuarter(2);
+#ifdef DEBUG
+ CHECK_EQ(buffer.size(), 42);
+#else
+ CHECK_EQ(buffer.size(), 21);
+#endif
+
+ // Copy buffer for sanity checks later-on.
+ std::vector<uint8_t> copied_buffer(buffer);
+
+ // Move the data from the temporary buffer into the zone for later
+ // serialization.
+ bytes.Finalize(&zone);
+ CHECK_EQ(buffer.size(), 0);
+ CHECK_LT(0, copied_buffer.size());
+
{
- // Serialize as a ZoneConsumedPreParsedScopeData, and read back data.
- i::ZonePreParsedScopeData zone_serialized(&zone, bytes.begin(), bytes.end(),
- 0);
- i::ZoneConsumedPreParsedScopeData::ByteData bytes_for_reading;
- i::ZoneVectorWrapper wrapper(zone_serialized.byte_data());
- i::ZoneConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
- &bytes_for_reading, &wrapper);
+ // Serialize as a ZoneConsumedPreparseData, and read back data.
+ i::ZonePreparseData* data_in_zone = bytes.CopyToZone(&zone, 0);
+ i::ZoneConsumedPreparseData::ByteData bytes_for_reading;
+ i::ZoneVectorWrapper wrapper(data_in_zone->byte_data());
+ i::ZoneConsumedPreparseData::ByteData::ReadingScope reading_scope(
+ &bytes_for_reading, wrapper);
+
+ for (int i = 0; i < static_cast<int>(copied_buffer.size()); i++) {
+ CHECK_EQ(copied_buffer.at(i), wrapper.get(i));
+ }
#ifdef DEBUG
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), saved_size);
#else
- CHECK_EQ(bytes_for_reading.ReadUint32(), 1983);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 1983);
#endif
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 2147483647);
CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 50);
+
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 0xff);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ // We should have consumed all data at this point.
+ CHECK(!bytes_for_reading.HasRemainingBytes(1));
}
{
- // Serialize as an OnHeapConsumedPreParsedScopeData, and read back data.
- i::Handle<i::PodArray<uint8_t>> data_on_heap = bytes.Serialize(isolate);
- i::OnHeapConsumedPreParsedScopeData::ByteData bytes_for_reading;
- i::OnHeapConsumedPreParsedScopeData::ByteData::ReadingScope reading_scope(
+ // Serialize as an OnHeapConsumedPreparseData, and read back data.
+ i::Handle<i::PreparseData> data_on_heap = bytes.CopyToHeap(isolate, 0);
+ CHECK_EQ(copied_buffer.size(), data_on_heap->data_length());
+ CHECK_EQ(data_on_heap->children_length(), 0);
+ i::OnHeapConsumedPreparseData::ByteData bytes_for_reading;
+ i::OnHeapConsumedPreparseData::ByteData::ReadingScope reading_scope(
&bytes_for_reading, *data_on_heap);
+ for (int i = 0; i < static_cast<int>(copied_buffer.size()); i++) {
+ CHECK_EQ(copied_buffer[i], data_on_heap->get(i));
+ }
+
#ifdef DEBUG
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2017);
+ CHECK_EQ(bytes_for_reading.ReadUint32(), saved_size);
#else
- CHECK_EQ(bytes_for_reading.ReadUint32(), 1983);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 1983);
#endif
- CHECK_EQ(bytes_for_reading.ReadUint32(), 2147483647);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 2147483647);
CHECK_EQ(bytes_for_reading.ReadUint8(), 4);
CHECK_EQ(bytes_for_reading.ReadUint8(), 255);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 0);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 100);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadUint8(), 50);
+
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ CHECK_EQ(bytes_for_reading.ReadQuarter(), 3);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 50);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 0);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 1);
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
- CHECK_EQ(bytes_for_reading.ReadUint32(), 50);
+ CHECK_EQ(bytes_for_reading.ReadVarint32(), 0xff);
+
CHECK_EQ(bytes_for_reading.ReadQuarter(), 2);
+ // We should have consumed all data at this point.
+ CHECK(!bytes_for_reading.HasRemainingBytes(1));
}
}
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index bb05231f08..ef3d0f7df8 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -751,7 +751,7 @@ TEST(RelocatingCharacterStream) {
CHECK_EQ('a', two_byte_string_stream->Advance());
CHECK_EQ('b', two_byte_string_stream->Advance());
CHECK_EQ(size_t{2}, two_byte_string_stream->pos());
- i::String* raw = *two_byte_string;
+ i::String raw = *two_byte_string;
i_isolate->heap()->CollectGarbage(i::NEW_SPACE,
i::GarbageCollectionReason::kUnknown);
// GC moved the string.
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index 56fe0ed83a..df1153793b 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -9,7 +9,6 @@
#include "src/objects-inl.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
-#include "src/unicode-cache.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -22,11 +21,9 @@ const char src_simple[] = "function foo() { var x = 2 * a() + b; }";
struct ScannerTestHelper {
ScannerTestHelper() = default;
ScannerTestHelper(ScannerTestHelper&& other) V8_NOEXCEPT
- : unicode_cache(std::move(other.unicode_cache)),
- stream(std::move(other.stream)),
+ : stream(std::move(other.stream)),
scanner(std::move(other.scanner)) {}
- std::unique_ptr<UnicodeCache> unicode_cache;
std::unique_ptr<Utf16CharacterStream> stream;
std::unique_ptr<Scanner> scanner;
@@ -36,10 +33,9 @@ struct ScannerTestHelper {
ScannerTestHelper make_scanner(const char* src) {
ScannerTestHelper helper;
- helper.unicode_cache = std::unique_ptr<UnicodeCache>(new UnicodeCache);
helper.stream = ScannerStream::ForTesting(src);
- helper.scanner = std::unique_ptr<Scanner>(
- new Scanner(helper.unicode_cache.get(), helper.stream.get(), false));
+ helper.scanner =
+ std::unique_ptr<Scanner>(new Scanner(helper.stream.get(), false));
helper.scanner->Initialize();
return helper;
}
@@ -74,7 +70,7 @@ TEST(Bookmarks) {
for (size_t i = 0; i < std::min(bookmark_pos + 10, tokens.size()); i++) {
if (i == bookmark_pos) {
- bookmark.Set();
+ bookmark.Set(scanner->peek_location().beg_pos);
}
CHECK_TOK(tokens[i], scanner->Next());
}
@@ -107,29 +103,5 @@ TEST(AllThePushbacks) {
}
}
-TEST(ContextualKeywordTokens) {
- auto scanner = make_scanner("function of get bla");
-
- // function (regular keyword)
- scanner->Next();
- CHECK_TOK(Token::FUNCTION, scanner->current_token());
- CHECK_TOK(Token::UNINITIALIZED, scanner->current_contextual_token());
-
- // of (contextual keyword)
- scanner->Next();
- CHECK_TOK(Token::IDENTIFIER, scanner->current_token());
- CHECK_TOK(Token::OF, scanner->current_contextual_token());
-
- // get (contextual keyword)
- scanner->Next();
- CHECK_TOK(Token::IDENTIFIER, scanner->current_token());
- CHECK_TOK(Token::GET, scanner->current_contextual_token());
-
- // bla (identfier, not any sort of keyword)
- scanner->Next();
- CHECK_TOK(Token::IDENTIFIER, scanner->current_token());
- CHECK_TOK(Token::UNINITIALIZED, scanner->current_contextual_token());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index 28fb146036..780d182c36 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -42,15 +42,14 @@ const char* ProfilerExtension::kSource =
v8::Local<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name) {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- if (name->Equals(context, v8_str(isolate, "startProfiling")).FromJust()) {
+ if (name->StrictEquals(v8_str(isolate, "startProfiling"))) {
return v8::FunctionTemplate::New(isolate,
ProfilerExtension::StartProfiling);
}
- if (name->Equals(context, v8_str(isolate, "stopProfiling")).FromJust()) {
+ if (name->StrictEquals(v8_str(isolate, "stopProfiling"))) {
return v8::FunctionTemplate::New(isolate, ProfilerExtension::StopProfiling);
}
- if (name->Equals(context, v8_str(isolate, "collectSample")).FromJust()) {
+ if (name->StrictEquals(v8_str(isolate, "collectSample"))) {
return v8::FunctionTemplate::New(isolate, ProfilerExtension::CollectSample);
}
UNREACHABLE();
diff --git a/deps/v8/test/cctest/scope-test-helper.h b/deps/v8/test/cctest/scope-test-helper.h
index 8dd49970a1..a10d8af96f 100644
--- a/deps/v8/test/cctest/scope-test-helper.h
+++ b/deps/v8/test/cctest/scope-test-helper.h
@@ -24,7 +24,7 @@ class ScopeTestHelper {
baseline->AsDeclarationScope()->function_kind() ==
scope->AsDeclarationScope()->function_kind());
- if (!PreParsedScopeDataBuilder::ScopeNeedsData(baseline)) {
+ if (!PreparseDataBuilder::ScopeNeedsData(baseline)) {
return;
}
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 963277bd33..8096b82b90 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -8,6 +8,7 @@
#include "src/ic/accessor-assembler.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -58,7 +59,7 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
};
Handle<Map> maps[] = {
- Handle<Map>(nullptr, isolate),
+ Handle<Map>(Map(), isolate),
factory->cell_map(),
Map::Create(isolate, 0),
factory->meta_map(),
@@ -88,7 +89,7 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
}
Handle<Object> result = ft.Call(name, map).ToHandleChecked();
- Smi* expected = Smi::FromInt(expected_result & Smi::kMaxValue);
+ Smi expected = Smi::FromInt(expected_result & Smi::kMaxValue);
CHECK_EQ(expected, Smi::cast(*result));
}
}
@@ -227,8 +228,8 @@ TEST(TryProbeStubCache) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
- MaybeObject* handler = stub_cache.Get(*name, receiver->map());
- if (handler == nullptr) {
+ MaybeObject handler = stub_cache.Get(*name, receiver->map());
+ if (handler.ptr() == kNullAddress) {
queried_non_existing = true;
} else {
queried_existing = true;
@@ -243,8 +244,8 @@ TEST(TryProbeStubCache) {
int index2 = rand_gen.NextInt();
Handle<Name> name = names[index1 % names.size()];
Handle<JSObject> receiver = receivers[index2 % receivers.size()];
- MaybeObject* handler = stub_cache.Get(*name, receiver->map());
- if (handler == nullptr) {
+ MaybeObject handler = stub_cache.Get(*name, receiver->map());
+ if (handler.ptr() == kNullAddress) {
queried_non_existing = true;
} else {
queried_existing = true;
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index a6a02ba762..d769c1ebd7 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -538,7 +538,7 @@ static void StackCheck(Local<String> name,
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
- i::Code* code = frame->LookupCode();
+ i::Code code = frame->LookupCode();
CHECK(code->IsCode());
CHECK(code->contains(frame->pc()));
iter.Advance();
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 3604af020f..2ca473dea7 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -2955,8 +2955,7 @@ void SloppyArgsIndexedPropertyEnumerator(
// Have to populate the handle manually, as it's not Cast-able.
i::Handle<i::JSReceiver> o =
v8::Utils::OpenHandle<Object, i::JSReceiver>(result);
- i::Handle<i::JSArray> array(reinterpret_cast<i::JSArray*>(*o),
- o->GetIsolate());
+ i::Handle<i::JSArray> array(i::JSArray::unchecked_cast(*o), o->GetIsolate());
info.GetReturnValue().Set(v8::Utils::ToLocal(array));
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 70763547ea..18dfe1c629 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -40,8 +40,8 @@
#include "include/v8-util.h"
#include "src/api-inl.h"
#include "src/arguments.h"
+#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
#include "src/execution.h"
@@ -1263,7 +1263,7 @@ THREADED_PROFILED_TEST(FastReturnValues) {
};
for (size_t i = 0; i < arraysize(int_values); i++) {
for (int modifier = -1; modifier <= 1; modifier++) {
- int int_value = int_values[i] + modifier;
+ int int_value = v8::base::AddWithWraparound(int_values[i], modifier);
// check int32_t
fast_return_value_int32 = int_value;
value = TestFastReturnValues<int32_t>();
@@ -2760,7 +2760,8 @@ TEST(InternalFieldsSubclassing) {
.FromJust());
// Create various levels of subclasses to stress instance size calculation.
const int kMaxNofProperties =
- i::JSObject::kMaxInObjectProperties - nof_embedder_fields;
+ i::JSObject::kMaxInObjectProperties -
+ nof_embedder_fields * i::kEmbedderDataSlotSizeInTaggedSlots;
// Select only a few values to speed up the test.
int sizes[] = {0,
1,
@@ -2869,7 +2870,7 @@ THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
static void CheckAlignedPointerInInternalField(Local<v8::Object> obj,
void* value) {
- CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
+ CHECK(HAS_SMI_TAG(reinterpret_cast<i::Address>(value)));
obj->SetAlignedPointerInInternalField(0, value);
CcTest::CollectAllGarbage();
CHECK_EQ(value, obj->GetAlignedPointerFromInternalField(0));
@@ -3069,7 +3070,7 @@ void GlobalProxyIdentityHash(bool set_in_js) {
int32_t hash1;
if (set_in_js) {
CompileRun("var m = new Set(); m.add(global);");
- i::Object* original_hash = i_global_proxy->GetHash();
+ i::Object original_hash = i_global_proxy->GetHash();
CHECK(original_hash->IsSmi());
hash1 = i::Smi::ToInt(original_hash);
} else {
@@ -3765,8 +3766,7 @@ THREADED_TEST(ArrayBuffer_External) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-
-THREADED_TEST(ArrayBuffer_DisableNeuter) {
+THREADED_TEST(ArrayBuffer_DisableDetach) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
@@ -3775,29 +3775,26 @@ THREADED_TEST(ArrayBuffer_DisableNeuter) {
memset(my_data.start(), 0, 100);
Local<v8::ArrayBuffer> ab =
v8::ArrayBuffer::New(isolate, my_data.start(), 100);
- CHECK(ab->IsNeuterable());
+ CHECK(ab->IsDetachable());
i::Handle<i::JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
- buf->set_is_neuterable(false);
+ buf->set_is_detachable(false);
- CHECK(!ab->IsNeuterable());
+ CHECK(!ab->IsDetachable());
}
-
-static void CheckDataViewIsNeutered(v8::Local<v8::DataView> dv) {
+static void CheckDataViewIsDetached(v8::Local<v8::DataView> dv) {
CHECK_EQ(0, static_cast<int>(dv->ByteLength()));
CHECK_EQ(0, static_cast<int>(dv->ByteOffset()));
}
-
-static void CheckIsNeutered(v8::Local<v8::TypedArray> ta) {
+static void CheckIsDetached(v8::Local<v8::TypedArray> ta) {
CHECK_EQ(0, static_cast<int>(ta->ByteLength()));
CHECK_EQ(0, static_cast<int>(ta->Length()));
CHECK_EQ(0, static_cast<int>(ta->ByteOffset()));
}
-
-static void CheckIsTypedArrayVarNeutered(const char* name) {
+static void CheckIsTypedArrayVarDetached(const char* name) {
i::ScopedVector<char> source(1024);
i::SNPrintF(source,
"%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
@@ -3805,10 +3802,9 @@ static void CheckIsTypedArrayVarNeutered(const char* name) {
CHECK(CompileRun(source.start())->IsTrue());
v8::Local<v8::TypedArray> ta =
v8::Local<v8::TypedArray>::Cast(CompileRun(name));
- CheckIsNeutered(ta);
+ CheckIsDetached(ta);
}
-
template <typename TypedArray, int kElementSize>
static Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab,
int byteOffset, int length) {
@@ -3820,8 +3816,7 @@ static Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab,
return ta;
}
-
-THREADED_TEST(ArrayBuffer_NeuteringApi) {
+THREADED_TEST(ArrayBuffer_DetachingApi) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
@@ -3856,22 +3851,21 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
ScopedArrayBufferContents contents(buffer->Externalize());
- buffer->Neuter();
+ buffer->Detach();
CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
- CheckIsNeutered(u8a);
- CheckIsNeutered(u8c);
- CheckIsNeutered(i8a);
- CheckIsNeutered(u16a);
- CheckIsNeutered(i16a);
- CheckIsNeutered(u32a);
- CheckIsNeutered(i32a);
- CheckIsNeutered(f32a);
- CheckIsNeutered(f64a);
- CheckDataViewIsNeutered(dv);
-}
-
-
-THREADED_TEST(ArrayBuffer_NeuteringScript) {
+ CheckIsDetached(u8a);
+ CheckIsDetached(u8c);
+ CheckIsDetached(i8a);
+ CheckIsDetached(u16a);
+ CheckIsDetached(i16a);
+ CheckIsDetached(u32a);
+ CheckIsDetached(i32a);
+ CheckIsDetached(f32a);
+ CheckIsDetached(f64a);
+ CheckDataViewIsDetached(dv);
+}
+
+THREADED_TEST(ArrayBuffer_DetachingScript) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
@@ -3895,22 +3889,22 @@ THREADED_TEST(ArrayBuffer_NeuteringScript) {
v8::Local<v8::DataView> dv = v8::Local<v8::DataView>::Cast(CompileRun("dv"));
ScopedArrayBufferContents contents(ab->Externalize());
- ab->Neuter();
+ ab->Detach();
CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
CHECK_EQ(0, v8_run_int32value(v8_compile("ab.byteLength")));
- CheckIsTypedArrayVarNeutered("u8a");
- CheckIsTypedArrayVarNeutered("u8c");
- CheckIsTypedArrayVarNeutered("i8a");
- CheckIsTypedArrayVarNeutered("u16a");
- CheckIsTypedArrayVarNeutered("i16a");
- CheckIsTypedArrayVarNeutered("u32a");
- CheckIsTypedArrayVarNeutered("i32a");
- CheckIsTypedArrayVarNeutered("f32a");
- CheckIsTypedArrayVarNeutered("f64a");
+ CheckIsTypedArrayVarDetached("u8a");
+ CheckIsTypedArrayVarDetached("u8c");
+ CheckIsTypedArrayVarDetached("i8a");
+ CheckIsTypedArrayVarDetached("u16a");
+ CheckIsTypedArrayVarDetached("i16a");
+ CheckIsTypedArrayVarDetached("u32a");
+ CheckIsTypedArrayVarDetached("i32a");
+ CheckIsTypedArrayVarDetached("f32a");
+ CheckIsTypedArrayVarDetached("f64a");
CHECK(CompileRun("dv.byteLength == 0 && dv.byteOffset == 0")->IsTrue());
- CheckDataViewIsNeutered(dv);
+ CheckDataViewIsDetached(dv);
}
THREADED_TEST(ArrayBuffer_AllocationInformation) {
@@ -4287,7 +4281,7 @@ THREADED_TEST(ResettingGlobalHandle) {
}
v8::internal::GlobalHandles* global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- int initial_handle_count = global_handles->global_handles_count();
+ size_t initial_handle_count = global_handles->handles_count();
{
v8::HandleScope scope(isolate);
CHECK_EQ(3, v8::Local<String>::New(isolate, global)->Length());
@@ -4296,13 +4290,13 @@ THREADED_TEST(ResettingGlobalHandle) {
v8::HandleScope scope(isolate);
global.Reset(isolate, v8_str("longer"));
}
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count);
+ CHECK_EQ(global_handles->handles_count(), initial_handle_count);
{
v8::HandleScope scope(isolate);
CHECK_EQ(6, v8::Local<String>::New(isolate, global)->Length());
}
global.Reset();
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count - 1);
+ CHECK_EQ(global_handles->handles_count(), initial_handle_count - 1);
}
@@ -4315,7 +4309,7 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
}
v8::internal::GlobalHandles* global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- int initial_handle_count = global_handles->global_handles_count();
+ size_t initial_handle_count = global_handles->handles_count();
{
v8::HandleScope scope(isolate);
CHECK_EQ(3, v8::Local<String>::New(isolate, global)->Length());
@@ -4326,7 +4320,7 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
global.Reset(isolate, empty);
}
CHECK(global.IsEmpty());
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count - 1);
+ CHECK_EQ(global_handles->handles_count(), initial_handle_count - 1);
}
@@ -4353,17 +4347,16 @@ THREADED_TEST(Global) {
}
v8::internal::GlobalHandles* global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- int initial_handle_count = global_handles->global_handles_count();
+ size_t initial_handle_count = global_handles->handles_count();
{
v8::Global<String> unique(isolate, global);
- CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
// Test assignment via Pass
{
v8::Global<String> copy = unique.Pass();
CHECK(unique.IsEmpty());
CHECK(copy == global);
- CHECK_EQ(initial_handle_count + 1,
- global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
unique = copy.Pass();
}
// Test ctor via Pass
@@ -4371,8 +4364,7 @@ THREADED_TEST(Global) {
v8::Global<String> copy(unique.Pass());
CHECK(unique.IsEmpty());
CHECK(copy == global);
- CHECK_EQ(initial_handle_count + 1,
- global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
unique = copy.Pass();
}
// Test pass through function call
@@ -4380,19 +4372,18 @@ THREADED_TEST(Global) {
v8::Global<String> copy = PassUnique(unique.Pass());
CHECK(unique.IsEmpty());
CHECK(copy == global);
- CHECK_EQ(initial_handle_count + 1,
- global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
unique = copy.Pass();
}
- CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
}
// Test pass from function call
{
v8::Global<String> unique = ReturnUnique(isolate, global);
CHECK(unique == global);
- CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
}
- CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count, global_handles->handles_count());
global.Reset();
}
@@ -4586,7 +4577,7 @@ void TestGlobalValueMap() {
Map map(isolate);
v8::internal::GlobalHandles* global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- int initial_handle_count = global_handles->global_handles_count();
+ size_t initial_handle_count = global_handles->handles_count();
CHECK_EQ(0, static_cast<int>(map.Size()));
{
HandleScope scope(isolate);
@@ -4619,14 +4610,14 @@ void TestGlobalValueMap() {
CHECK(expected2->Equals(env.local(), ref.NewLocal(isolate)).FromJust());
}
}
- CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count + 1, global_handles->handles_count());
if (map.IsWeak()) {
CcTest::PreciseCollectAllGarbage();
} else {
map.Clear();
}
CHECK_EQ(0, static_cast<int>(map.Size()));
- CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count, global_handles->handles_count());
{
HandleScope scope(isolate);
Local<v8::Object> value = NewObjectForIntKey(isolate, templ, 9);
@@ -4634,7 +4625,7 @@ void TestGlobalValueMap() {
map.Clear();
}
CHECK_EQ(0, static_cast<int>(map.Size()));
- CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+ CHECK_EQ(initial_handle_count, global_handles->handles_count());
}
} // namespace
@@ -4656,7 +4647,7 @@ TEST(PersistentValueVector) {
v8::Isolate* isolate = env->GetIsolate();
v8::internal::GlobalHandles* global_handles =
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- int handle_count = global_handles->global_handles_count();
+ size_t handle_count = global_handles->handles_count();
HandleScope scope(isolate);
v8::PersistentValueVector<v8::Object> vector(isolate);
@@ -4685,12 +4676,12 @@ TEST(PersistentValueVector) {
CHECK(obj1->Equals(env.local(), vector.Get(4)).FromJust());
CHECK(obj2->Equals(env.local(), vector.Get(1)).FromJust());
- CHECK_EQ(5 + handle_count, global_handles->global_handles_count());
+ CHECK_EQ(5 + handle_count, global_handles->handles_count());
vector.Clear();
CHECK(vector.IsEmpty());
CHECK_EQ(0, static_cast<int>(vector.Size()));
- CHECK_EQ(handle_count, global_handles->global_handles_count());
+ CHECK_EQ(handle_count, global_handles->handles_count());
}
@@ -7269,7 +7260,63 @@ THREADED_TEST(ExtensibleOnUndetectable) {
ExpectBoolean("undetectable.y == undefined", true);
}
+THREADED_TEST(ConstructCallWithUndetectable) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
+ desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
+ desc->InstanceTemplate()->SetCallAsFunctionHandler(ReturnThis); // callable
+
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK(
+ env->Global()->Set(env.local(), v8_str("undetectable"), obj).FromJust());
+
+ // Undetectable object cannot be called as constructor.
+ v8::TryCatch try_catch(env->GetIsolate());
+ CHECK(CompileRun("new undetectable()").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ String::Utf8Value exception_value(env->GetIsolate(), try_catch.Exception());
+ CHECK_EQ(0, strcmp("TypeError: undetectable is not a constructor",
+ *exception_value));
+}
+
+static int increment_callback_counter = 0;
+
+static void IncrementCounterConstructCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ increment_callback_counter++;
+ CHECK(Local<Object>::Cast(args.NewTarget())
+ ->Set(args.GetIsolate()->GetCurrentContext(), v8_str("counter"),
+ v8_num(increment_callback_counter))
+ .FromJust());
+ args.GetReturnValue().Set(args.NewTarget());
+}
+
+THREADED_TEST(SetCallAsFunctionHandlerConstructor) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
+ desc->InstanceTemplate()->SetCallAsFunctionHandler(
+ IncrementCounterConstructCallback); // callable
+
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("Counter"), obj).FromJust());
+ ExpectInt32("(new Counter()).counter", 1);
+ CHECK_EQ(1, increment_callback_counter);
+ ExpectInt32("(new Counter()).counter", 2);
+ CHECK_EQ(2, increment_callback_counter);
+}
// The point of this test is type checking. We run it only so compilers
// don't complain about an unused function.
TEST(PersistentHandles) {
@@ -7688,15 +7735,13 @@ static int lookup_count = 0;
v8::Local<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<String> name) {
lookup_count++;
- if (name->Equals(isolate->GetCurrentContext(), v8_str("A")).FromJust()) {
+ if (name->StrictEquals(v8_str("A"))) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 8));
- } else if (name->Equals(isolate->GetCurrentContext(), v8_str("B"))
- .FromJust()) {
+ } else if (name->StrictEquals(v8_str("B"))) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 7));
- } else if (name->Equals(isolate->GetCurrentContext(), v8_str("C"))
- .FromJust()) {
+ } else if (name->StrictEquals(v8_str("C"))) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 6));
} else {
@@ -8406,6 +8451,13 @@ THREADED_TEST(StringWrite) {
int charlen;
memset(utf8buf, 0x1, 1000);
+ len = v8::String::Empty(isolate)->WriteUtf8(isolate, utf8buf, sizeof(utf8buf),
+ &charlen);
+ CHECK_EQ(1, len);
+ CHECK_EQ(0, charlen);
+ CHECK_EQ(0, strcmp(utf8buf, ""));
+
+ memset(utf8buf, 0x1, 1000);
len = str2->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen);
CHECK_EQ(9, len);
CHECK_EQ(5, charlen);
@@ -8959,13 +9011,13 @@ THREADED_TEST(ToArrayIndex) {
static v8::MaybeLocal<Value> PrepareStackTrace42(v8::Local<Context> context,
v8::Local<Value> error,
- v8::Local<StackTrace> trace) {
+ v8::Local<Array> trace) {
return v8::Number::New(context->GetIsolate(), 42);
}
-static v8::MaybeLocal<Value> PrepareStackTraceThrow(
- v8::Local<Context> context, v8::Local<Value> error,
- v8::Local<StackTrace> trace) {
+static v8::MaybeLocal<Value> PrepareStackTraceThrow(v8::Local<Context> context,
+ v8::Local<Value> error,
+ v8::Local<Array> trace) {
v8::Isolate* isolate = context->GetIsolate();
v8::Local<String> message = v8_str("42");
isolate->ThrowException(v8::Exception::Error(message));
@@ -11806,15 +11858,10 @@ THREADED_TEST(CallAsFunction) {
CHECK(!try_catch.HasCaught());
CHECK_EQ(17, value->Int32Value(context.local()).FromJust());
- // Check that the call-as-function handler cannot be called through
- // new.
+ // Check that the call-as-function handler can be called through new.
value = CompileRun("new obj(43)");
- CHECK(value.IsEmpty());
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value(isolate, try_catch.Exception());
- CHECK_EQ(0,
- strcmp("TypeError: obj is not a constructor", *exception_value));
- try_catch.Reset();
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(-43, value->Int32Value(context.local()).FromJust());
// Check that the call-as-function handler can be called through
// the API.
@@ -13488,11 +13535,11 @@ TEST(CallHandlerAsFunctionHasNoSideEffectNotSupported) {
CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj()"), true).IsEmpty());
// Side-effect-free version is not supported.
- i::FunctionTemplateInfo* cons = i::FunctionTemplateInfo::cast(
+ i::FunctionTemplateInfo cons = i::FunctionTemplateInfo::cast(
v8::Utils::OpenHandle(*templ)->constructor());
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- i::CallHandlerInfo* handler_info =
- i::CallHandlerInfo::cast(cons->instance_call_handler());
+ i::CallHandlerInfo handler_info =
+ i::CallHandlerInfo::cast(cons->GetInstanceCallHandler());
CHECK(!handler_info->IsSideEffectFreeCallHandlerInfo());
handler_info->set_map(
i::ReadOnlyRoots(heap).side_effect_free_call_handler_info_map());
@@ -14190,14 +14237,16 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
int count = 0;
i::HeapIterator it(CcTest::heap());
- for (i::HeapObject* object = it.next(); object != nullptr; object = it.next())
+ for (i::HeapObject object = it.next(); !object.is_null();
+ object = it.next()) {
if (object->IsJSGlobalObject()) {
- i::JSGlobalObject* g = i::JSGlobalObject::cast(object);
+ i::JSGlobalObject g = i::JSGlobalObject::cast(object);
// Skip dummy global object.
if (g->global_dictionary()->NumberOfElements() != 0) {
count++;
}
}
+ }
return count;
}
@@ -14259,7 +14308,7 @@ TEST(CopyablePersistent) {
v8::Isolate* isolate = context->GetIsolate();
i::GlobalHandles* globals =
reinterpret_cast<i::Isolate*>(isolate)->global_handles();
- int initial_handles = globals->global_handles_count();
+ size_t initial_handles = globals->handles_count();
typedef v8::Persistent<v8::Object, v8::CopyablePersistentTraits<v8::Object> >
CopyableObject;
{
@@ -14268,17 +14317,17 @@ TEST(CopyablePersistent) {
v8::HandleScope scope(isolate);
handle1.Reset(isolate, v8::Object::New(isolate));
}
- CHECK_EQ(initial_handles + 1, globals->global_handles_count());
+ CHECK_EQ(initial_handles + 1, globals->handles_count());
CopyableObject handle2;
handle2 = handle1;
CHECK(handle1 == handle2);
- CHECK_EQ(initial_handles + 2, globals->global_handles_count());
+ CHECK_EQ(initial_handles + 2, globals->handles_count());
CopyableObject handle3(handle2);
CHECK(handle1 == handle3);
- CHECK_EQ(initial_handles + 3, globals->global_handles_count());
+ CHECK_EQ(initial_handles + 3, globals->handles_count());
}
// Verify autodispose
- CHECK_EQ(initial_handles, globals->global_handles_count());
+ CHECK_EQ(initial_handles, globals->handles_count());
}
@@ -14294,7 +14343,7 @@ TEST(WeakCallbackApi) {
v8::Isolate* isolate = context->GetIsolate();
i::GlobalHandles* globals =
reinterpret_cast<i::Isolate*>(isolate)->global_handles();
- int initial_handles = globals->global_handles_count();
+ size_t initial_handles = globals->handles_count();
{
v8::HandleScope scope(isolate);
v8::Local<v8::Object> obj = v8::Object::New(isolate);
@@ -14308,7 +14357,7 @@ TEST(WeakCallbackApi) {
}
CcTest::PreciseCollectAllGarbage();
// Verify disposed.
- CHECK_EQ(initial_handles, globals->global_handles_count());
+ CHECK_EQ(initial_handles, globals->handles_count());
}
@@ -14469,353 +14518,6 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
env->Exit();
}
-
-struct SymbolInfo {
- size_t id;
- size_t size;
- std::string name;
-};
-
-
-class SetFunctionEntryHookTest {
- public:
- SetFunctionEntryHookTest() {
- CHECK_NULL(instance_);
- instance_ = this;
- }
- ~SetFunctionEntryHookTest() {
- CHECK(instance_ == this);
- instance_ = nullptr;
- }
- void Reset() {
- symbols_.clear();
- symbol_locations_.clear();
- invocations_.clear();
- }
- void RunTest();
- void OnJitEvent(const v8::JitCodeEvent* event);
- static void JitEvent(const v8::JitCodeEvent* event) {
- CHECK_NOT_NULL(instance_);
- instance_->OnJitEvent(event);
- }
-
- void OnEntryHook(uintptr_t function,
- uintptr_t return_addr_location);
- static void EntryHook(uintptr_t function,
- uintptr_t return_addr_location) {
- CHECK_NOT_NULL(instance_);
- instance_->OnEntryHook(function, return_addr_location);
- }
-
- static void RuntimeCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK_NOT_NULL(instance_);
- args.GetReturnValue().Set(v8_num(42));
- }
- void RunLoopInNewEnv(v8::Isolate* isolate);
-
- // Records addr as location of symbol.
- void InsertSymbolAt(i::Address addr, SymbolInfo* symbol);
-
- // Finds the symbol containing addr
- SymbolInfo* FindSymbolForAddr(i::Address addr);
- // Returns the number of invocations where the caller name contains
- // \p caller_name and the function name contains \p function_name.
- int CountInvocations(const char* caller_name,
- const char* function_name);
-
- i::Handle<i::JSFunction> foo_func_;
- i::Handle<i::JSFunction> bar_func_;
-
- typedef std::map<size_t, SymbolInfo> SymbolMap;
- typedef std::map<i::Address, SymbolInfo*> SymbolLocationMap;
- typedef std::map<std::pair<SymbolInfo*, SymbolInfo*>, int> InvocationMap;
- SymbolMap symbols_;
- SymbolLocationMap symbol_locations_;
- InvocationMap invocations_;
-
- static SetFunctionEntryHookTest* instance_;
-};
-SetFunctionEntryHookTest* SetFunctionEntryHookTest::instance_ = nullptr;
-
-// Returns true if addr is in the range [start, start+len).
-static bool Overlaps(i::Address start, size_t len, i::Address addr) {
- if (start <= addr && start + len > addr)
- return true;
-
- return false;
-}
-
-void SetFunctionEntryHookTest::InsertSymbolAt(i::Address addr,
- SymbolInfo* symbol) {
- // Insert the symbol at the new location.
- SymbolLocationMap::iterator it =
- symbol_locations_.insert(std::make_pair(addr, symbol)).first;
- // Now erase symbols to the left and right that overlap this one.
- while (it != symbol_locations_.begin()) {
- SymbolLocationMap::iterator left = it;
- --left;
- if (!Overlaps(left->first, left->second->size, addr))
- break;
- symbol_locations_.erase(left);
- }
-
- // Now erase symbols to the left and right that overlap this one.
- while (true) {
- SymbolLocationMap::iterator right = it;
- ++right;
- if (right == symbol_locations_.end())
- break;
- if (!Overlaps(addr, symbol->size, right->first))
- break;
- symbol_locations_.erase(right);
- }
-}
-
-
-void SetFunctionEntryHookTest::OnJitEvent(const v8::JitCodeEvent* event) {
- switch (event->type) {
- case v8::JitCodeEvent::CODE_ADDED: {
- CHECK_NOT_NULL(event->code_start);
- CHECK_NE(0, static_cast<int>(event->code_len));
- CHECK_NOT_NULL(event->name.str);
- size_t symbol_id = symbols_.size();
-
- // Record the new symbol.
- SymbolInfo& info = symbols_[symbol_id];
- info.id = symbol_id;
- info.size = event->code_len;
- info.name.assign(event->name.str, event->name.str + event->name.len);
-
- // And record it's location.
- InsertSymbolAt(reinterpret_cast<i::Address>(event->code_start), &info);
- }
- break;
-
- case v8::JitCodeEvent::CODE_MOVED: {
- // We would like to never see code move that we haven't seen before,
- // but the code creation event does not happen until the line endings
- // have been calculated (this is so that we can report the line in the
- // script at which the function source is found, see
- // Compiler::RecordFunctionCompilation) and the line endings
- // calculations can cause a GC, which can move the newly created code
- // before its existence can be logged.
- SymbolLocationMap::iterator it(
- symbol_locations_.find(
- reinterpret_cast<i::Address>(event->code_start)));
- if (it != symbol_locations_.end()) {
- // Found a symbol at this location, move it.
- SymbolInfo* info = it->second;
- symbol_locations_.erase(it);
- InsertSymbolAt(reinterpret_cast<i::Address>(event->new_code_start),
- info);
- }
- }
- break;
- default:
- break;
- }
-}
-
-void SetFunctionEntryHookTest::OnEntryHook(
- uintptr_t function, uintptr_t return_addr_location) {
- // Get the function's code object.
- i::Code* function_code =
- i::Code::GetCodeFromTargetAddress(static_cast<i::Address>(function));
- CHECK_NOT_NULL(function_code);
-
- // Then try and look up the caller's code object.
- i::Address caller = *reinterpret_cast<i::Address*>(return_addr_location);
-
- // Count the invocation.
- SymbolInfo* caller_symbol = FindSymbolForAddr(caller);
- SymbolInfo* function_symbol =
- FindSymbolForAddr(static_cast<i::Address>(function));
- ++invocations_[std::make_pair(caller_symbol, function_symbol)];
-
- if (!bar_func_.is_null() && function_code == bar_func_->code()) {
- // Check that we have a symbol for the "bar" function at the right location.
- SymbolLocationMap::iterator it(
- symbol_locations_.find(function_code->raw_instruction_start()));
- CHECK(it != symbol_locations_.end());
- }
-
- if (!foo_func_.is_null() && function_code == foo_func_->code()) {
- // Check that we have a symbol for "foo" at the right location.
- SymbolLocationMap::iterator it(
- symbol_locations_.find(function_code->raw_instruction_start()));
- CHECK(it != symbol_locations_.end());
- }
-}
-
-
-SymbolInfo* SetFunctionEntryHookTest::FindSymbolForAddr(i::Address addr) {
- SymbolLocationMap::iterator it(symbol_locations_.lower_bound(addr));
- // Do we have a direct hit on a symbol?
- if (it != symbol_locations_.end()) {
- if (it->first == addr)
- return it->second;
- }
-
- // If not a direct hit, it'll have to be the previous symbol.
- if (it == symbol_locations_.begin()) return nullptr;
-
- --it;
- size_t offs = addr - it->first;
- if (offs < it->second->size)
- return it->second;
-
- return nullptr;
-}
-
-
-int SetFunctionEntryHookTest::CountInvocations(
- const char* caller_name, const char* function_name) {
- InvocationMap::iterator it(invocations_.begin());
- int invocations = 0;
- for (; it != invocations_.end(); ++it) {
- SymbolInfo* caller = it->first.first;
- SymbolInfo* function = it->first.second;
-
- // Filter out non-matching functions.
- if (function_name != nullptr) {
- if (function->name.find(function_name) == std::string::npos)
- continue;
- }
-
- // Filter out non-matching callers.
- if (caller_name != nullptr) {
- if (caller == nullptr) continue;
- if (caller->name.find(caller_name) == std::string::npos)
- continue;
- }
-
- // It matches add the invocation count to the tally.
- invocations += it->second;
- }
-
- return invocations;
-}
-
-void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
- v8::HandleScope outer(isolate);
- v8::Local<Context> env = Context::New(isolate);
- env->Enter();
-
- Local<ObjectTemplate> t = ObjectTemplate::New(isolate);
- t->Set(v8_str("asdf"), v8::FunctionTemplate::New(isolate, RuntimeCallback));
- CHECK(env->Global()
- ->Set(env, v8_str("obj"), t->NewInstance(env).ToLocalChecked())
- .FromJust());
-
- const char* script =
- "function bar() {\n"
- " var sum = 0;\n"
- " for (i = 0; i < 100; ++i)\n"
- " sum = foo(i);\n"
- " return sum;\n"
- "}\n"
- "function foo(i) { return i * i; }\n"
- "// Invoke on the runtime function.\n"
- "obj.asdf()";
- CompileRun(script);
- bar_func_ = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(
- *env->Global()->Get(env, v8_str("bar")).ToLocalChecked()));
- CHECK(!bar_func_.is_null());
-
- foo_func_ = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(
- *env->Global()->Get(env, v8_str("foo")).ToLocalChecked()));
- CHECK(!foo_func_.is_null());
-
- v8::Local<v8::Value> value = CompileRun("bar();");
- CHECK(value->IsNumber());
- CHECK_EQ(9801.0, v8::Number::Cast(*value)->Value());
-
- // Test the optimized codegen path.
- value = CompileRun("%OptimizeFunctionOnNextCall(foo);"
- "bar();");
- CHECK(value->IsNumber());
- CHECK_EQ(9801.0, v8::Number::Cast(*value)->Value());
-
- env->Exit();
-}
-
-
-void SetFunctionEntryHookTest::RunTest() {
- // Work in a new isolate throughout.
- v8::Isolate::CreateParams create_params;
- create_params.entry_hook = EntryHook;
- create_params.code_event_handler = JitEvent;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
-
- {
- v8::Isolate::Scope scope(isolate);
-
- RunLoopInNewEnv(isolate);
-
- // Check the expected invocation counts.
- if (i::FLAG_always_opt) {
- CHECK_EQ(2, CountInvocations(nullptr, "bar"));
- CHECK_EQ(200, CountInvocations("bar", "foo"));
- CHECK_EQ(200, CountInvocations(nullptr, "foo"));
- } else if (i::FLAG_opt) {
- // For ignition we don't see the actual functions being called, instead
- // we see the InterpreterEntryTrampoline at least 102 times
- // (100 unoptimized calls to foo, and 2 calls to bar).
- CHECK_LE(102, CountInvocations(nullptr, "InterpreterEntryTrampoline"));
- // We should also see the calls to the optimized function foo.
- CHECK_EQ(100, CountInvocations(nullptr, "foo"));
- } else {
- // For ignition without an optimizing compiler, we should only see the
- // InterpreterEntryTrampoline.
- // (200 unoptimized calls to foo, and 2 calls to bar).
- CHECK_LE(202, CountInvocations(nullptr, "InterpreterEntryTrampoline"));
- }
-
- // Verify that we have an entry hook on some specific stubs and builtins.
- CHECK_NE(0, CountInvocations(
- nullptr,
- "CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit"));
- CHECK_NE(0, CountInvocations(
- nullptr,
- "CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit"));
- CHECK_NE(0, CountInvocations(nullptr, "JSEntryStub"));
- CHECK_NE(0, CountInvocations(nullptr, "JSEntryTrampoline"));
- }
- isolate->Dispose();
-
- Reset();
-
- // Make sure a second isolate is unaffected by the previous entry hook.
- create_params = v8::Isolate::CreateParams();
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope scope(isolate);
-
- // Reset the entry count to zero and set the entry hook.
- RunLoopInNewEnv(isolate);
-
- // We should record no invocations in this isolate.
- CHECK_EQ(0, static_cast<int>(invocations_.size()));
- }
-
- isolate->Dispose();
-}
-
-
-TEST(SetFunctionEntryHook) {
- // FunctionEntryHook does not work well with experimental natives.
- // Experimental natives are compiled during snapshot deserialization.
- // This test breaks because InstallGetter (function from snapshot that
- // only gets called from experimental natives) is compiled with entry hooks.
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_turbo_inlining = false;
-
- SetFunctionEntryHookTest test;
- test.RunTest();
-}
-
static v8::base::HashMap* code_map = nullptr;
static v8::base::HashMap* jitcode_line_info = nullptr;
static int saw_bar = 0;
@@ -15015,9 +14717,9 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
.ToLocalChecked())));
i::PagedSpace* foo_owning_space = reinterpret_cast<i::PagedSpace*>(
- i::Page::FromAddress(foo->abstract_code()->address())->owner());
+ i::Page::FromHeapObject(foo->abstract_code())->owner());
i::PagedSpace* bar_owning_space = reinterpret_cast<i::PagedSpace*>(
- i::Page::FromAddress(bar->abstract_code()->address())->owner());
+ i::Page::FromHeapObject(bar->abstract_code())->owner());
CHECK_EQ(foo_owning_space, bar_owning_space);
i::heap::SimulateFullSpace(foo_owning_space);
@@ -15843,8 +15545,7 @@ class UC16VectorResource : public v8::String::ExternalStringResource {
i::Vector<const i::uc16> data_;
};
-
-static void MorphAString(i::String* string,
+static void MorphAString(i::String string,
OneByteVectorResource* one_byte_resource,
UC16VectorResource* uc16_resource) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -15855,8 +15556,7 @@ static void MorphAString(i::String* string,
CHECK(string->map() == roots.external_one_byte_string_map());
// Morph external string to be TwoByte string.
string->set_map(roots.external_string_map());
- i::ExternalTwoByteString* morphed =
- i::ExternalTwoByteString::cast(string);
+ i::ExternalTwoByteString morphed = i::ExternalTwoByteString::cast(string);
CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
morphed->SetResource(isolate, uc16_resource);
} else {
@@ -15864,13 +15564,12 @@ static void MorphAString(i::String* string,
CHECK(string->map() == roots.external_string_map());
// Morph external string to be one-byte string.
string->set_map(roots.external_one_byte_string_map());
- i::ExternalOneByteString* morphed = i::ExternalOneByteString::cast(string);
+ i::ExternalOneByteString morphed = i::ExternalOneByteString::cast(string);
CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
morphed->SetResource(isolate, one_byte_resource);
}
}
-
// Test that we can still flatten a string if the components it is built up
// from have been turned into 16 bit strings in the mean time.
THREADED_TEST(MorphCompositeStringTest) {
@@ -15907,8 +15606,8 @@ THREADED_TEST(MorphCompositeStringTest) {
CHECK(lhs->IsOneByte());
CHECK(rhs->IsOneByte());
- i::String* ilhs = *v8::Utils::OpenHandle(*lhs);
- i::String* irhs = *v8::Utils::OpenHandle(*rhs);
+ i::String ilhs = *v8::Utils::OpenHandle(*lhs);
+ i::String irhs = *v8::Utils::OpenHandle(*rhs);
MorphAString(ilhs, &one_byte_resource, &uc16_resource);
MorphAString(irhs, &one_byte_resource, &uc16_resource);
@@ -16612,7 +16311,7 @@ TEST(DefineProperty) {
THREADED_TEST(GetCurrentContextWhenNotInContext) {
i::Isolate* isolate = CcTest::i_isolate();
CHECK_NOT_NULL(isolate);
- CHECK_NULL(isolate->context());
+ CHECK(isolate->context().is_null());
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::HandleScope scope(v8_isolate);
// The following should not crash, but return an empty handle.
@@ -16660,7 +16359,7 @@ static void CheckElementValue(i::Isolate* isolate,
int expected,
i::Handle<i::Object> obj,
int offset) {
- i::Object* element =
+ i::Object element =
*i::Object::GetElement(isolate, obj, offset).ToHandleChecked();
CHECK_EQ(expected, i::Smi::ToInt(element));
}
@@ -16910,35 +16609,44 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
array->set(i, static_cast<ElementType>(i));
}
+ bool old_natives_flag_sentry = i::FLAG_allow_natives_syntax;
+ i::FLAG_allow_natives_syntax = true;
+
// Test complex assignments
- result = CompileRun("function ee_op_test_complex_func(sum) {"
- " for (var i = 0; i < 40; ++i) {"
- " sum += (ext_array[i] += 1);"
- " sum += (ext_array[i] -= 1);"
- " } "
- " return sum;"
- "}"
- "sum=0;"
- "for (var i=0;i<10000;++i) {"
- " sum=ee_op_test_complex_func(sum);"
- "}"
- "sum;");
- CHECK_EQ(16000000, result->Int32Value(context).FromJust());
+ result = CompileRun(
+ "function ee_op_test_complex_func(sum) {"
+ " for (var i = 0; i < 40; ++i) {"
+ " sum += (ext_array[i] += 1);"
+ " sum += (ext_array[i] -= 1);"
+ " } "
+ " return sum;"
+ "}"
+ "sum=0;"
+ "sum=ee_op_test_complex_func(sum);"
+ "sum=ee_op_test_complex_func(sum);"
+ "%OptimizeFunctionOnNextCall(ee_op_test_complex_func);"
+ "sum=ee_op_test_complex_func(sum);"
+ "sum;");
+ CHECK_EQ(4800, result->Int32Value(context).FromJust());
// Test count operations
- result = CompileRun("function ee_op_test_count_func(sum) {"
- " for (var i = 0; i < 40; ++i) {"
- " sum += (++ext_array[i]);"
- " sum += (--ext_array[i]);"
- " } "
- " return sum;"
- "}"
- "sum=0;"
- "for (var i=0;i<10000;++i) {"
- " sum=ee_op_test_count_func(sum);"
- "}"
- "sum;");
- CHECK_EQ(16000000, result->Int32Value(context).FromJust());
+ result = CompileRun(
+ "function ee_op_test_count_func(sum) {"
+ " for (var i = 0; i < 40; ++i) {"
+ " sum += (++ext_array[i]);"
+ " sum += (--ext_array[i]);"
+ " } "
+ " return sum;"
+ "}"
+ "sum=0;"
+ "sum=ee_op_test_count_func(sum);"
+ "sum=ee_op_test_count_func(sum);"
+ "%OptimizeFunctionOnNextCall(ee_op_test_count_func);"
+ "sum=ee_op_test_count_func(sum);"
+ "sum;");
+ CHECK_EQ(4800, result->Int32Value(context).FromJust());
+
+ i::FLAG_allow_natives_syntax = old_natives_flag_sentry;
result = CompileRun("ext_array[3] = 33;"
"delete ext_array[3];"
@@ -17732,7 +17440,7 @@ TEST(ErrorLevelWarning) {
for (size_t i = 0; i < arraysize(levels); i++) {
i::MessageLocation location(script, 0, 0);
i::Handle<i::String> msg(i_isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("test")));
+ i::StaticCharVector("test")));
i::Handle<i::JSMessageObject> message =
i::MessageHandler::MakeMessageObject(
i_isolate, i::MessageTemplate::kAsmJsInvalid, &location, msg,
@@ -18458,6 +18166,34 @@ TEST(PromiseRejectIsSharedCrossOrigin) {
CHECK(promise_reject_is_shared_cross_origin);
}
+TEST(PromiseRejectMarkAsHandled) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPromiseRejectCallback(PromiseRejectCallback);
+
+ ResetPromiseStates();
+
+ // Create promise p0.
+ CompileRun(
+ "var reject; \n"
+ "var p0 = new Promise( \n"
+ " function(res, rej) { \n"
+ " reject = rej; \n"
+ " } \n"
+ "); \n");
+ CHECK(!GetPromise("p0")->HasHandler());
+ CHECK_EQ(0, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ GetPromise("p0")->MarkAsHandled();
+
+ // Reject p0. promise_reject_counter shouldn't be incremented because
+ // it's marked as handled.
+ CompileRun("reject('ppp');");
+ CHECK_EQ(0, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+}
void PromiseRejectCallbackConstructError(
v8::PromiseRejectMessage reject_message) {
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -19261,7 +18997,7 @@ TEST(GetHeapSpaceStatistics) {
// Force allocation in LO_SPACE so that every space has non-zero size.
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate);
- auto unused = i_isolate->factory()->TryNewFixedArray(512 * 1024);
+ auto unused = i_isolate->factory()->TryNewFixedArray(512 * 1024, i::TENURED);
USE(unused);
isolate->GetHeapStatistics(&heap_statistics);
@@ -19276,7 +19012,8 @@ TEST(GetHeapSpaceStatistics) {
v8::HeapSpaceStatistics space_statistics;
isolate->GetHeapSpaceStatistics(&space_statistics, i);
CHECK_NOT_NULL(space_statistics.space_name());
- if (strcmp(space_statistics.space_name(), "new_large_object_space") == 0) {
+ if (strcmp(space_statistics.space_name(), "new_large_object_space") == 0 ||
+ strcmp(space_statistics.space_name(), "code_large_object_space") == 0) {
continue;
}
CHECK_GT(space_statistics.space_size(), 0u);
@@ -19287,6 +19024,8 @@ TEST(GetHeapSpaceStatistics) {
CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size();
}
+ total_available_size += CcTest::heap()->memory_allocator()->Available();
+
CHECK_EQ(total_size, heap_statistics.total_heap_size());
CHECK_EQ(total_used_size, heap_statistics.used_heap_size());
CHECK_EQ(total_available_size, heap_statistics.total_available_size());
@@ -20615,7 +20354,7 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
// one-byte characters). This is a valid sequence of steps, and it can
// happen in real pages.
CHECK(string->IsOneByteRepresentation());
- i::ConsString* cons = i::ConsString::cast(*string);
+ i::ConsString cons = i::ConsString::cast(*string);
CHECK_EQ(0, cons->second()->length());
CHECK(cons->first()->IsTwoByteRepresentation());
}
@@ -20881,7 +20620,6 @@ TEST(IsolateNewDispose) {
CHECK_NOT_NULL(isolate);
CHECK(current_isolate != isolate);
CHECK(current_isolate == CcTest::isolate());
- CHECK(isolate->GetArrayBufferAllocator() == CcTest::array_buffer_allocator());
isolate->SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = nullptr;
@@ -21175,7 +20913,6 @@ TEST(IsolateDifferentContexts) {
class InitDefaultIsolateThread : public v8::base::Thread {
public:
enum TestCase {
- SetResourceConstraints,
SetFatalHandler,
SetCounterFunction,
SetCreateHistogramFunction,
@@ -21190,22 +20927,9 @@ class InitDefaultIsolateThread : public v8::base::Thread {
void Run() override {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- switch (testCase_) {
- case SetResourceConstraints: {
- create_params.constraints.set_max_semi_space_size_in_kb(1024);
- create_params.constraints.set_max_old_space_size(8);
- break;
- }
- default:
- break;
- }
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
switch (testCase_) {
- case SetResourceConstraints:
- // Already handled in pre-Isolate-creation block.
- break;
-
case SetFatalHandler:
isolate->SetFatalErrorHandler(nullptr);
break;
@@ -21242,10 +20966,6 @@ static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
CHECK(thread.result());
}
-TEST(InitializeDefaultIsolateOnSecondaryThread_ResourceConstraints) {
- InitializeTestHelper(InitDefaultIsolateThread::SetResourceConstraints);
-}
-
TEST(InitializeDefaultIsolateOnSecondaryThread_FatalHandler) {
InitializeTestHelper(InitDefaultIsolateThread::SetFatalHandler);
}
@@ -21444,7 +21164,17 @@ TEST(PersistentHandleInNewSpaceVisitor) {
CHECK_EQ(42, object2.WrapperClassId());
Visitor42 visitor(&object2);
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ // VisitHandlesForPartialDependence is marked deprecated. This test will be
+ // removed with the API method.
isolate->VisitHandlesForPartialDependence(&visitor);
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+
CHECK_EQ(1, visitor.counter_);
object1.Reset();
@@ -22111,9 +21841,8 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
.FromJust());
}
-
-static int CountLiveMapsInMapCache(i::Context* context) {
- i::WeakFixedArray* map_cache = i::WeakFixedArray::cast(context->map_cache());
+static int CountLiveMapsInMapCache(i::Context context) {
+ i::WeakFixedArray map_cache = i::WeakFixedArray::cast(context->map_cache());
int length = map_cache->length();
int count = 0;
for (int i = 0; i < length; i++) {
@@ -22747,7 +22476,6 @@ static void Regress808911_MicrotaskCallback(void* data) {
// So here we expect "current context" to be context1 and
// "entered or microtask context" to be context2.
v8::Isolate* isolate = static_cast<v8::Isolate*>(data);
- CHECK(isolate->GetCurrentContext() != isolate->GetEnteredContext());
CHECK(isolate->GetCurrentContext() !=
isolate->GetEnteredOrMicrotaskContext());
}
@@ -22757,7 +22485,6 @@ static void Regress808911_CurrentContextWrapper(
// So here we expect "current context" to be context1 and
// "entered or microtask context" to be context2.
v8::Isolate* isolate = info.GetIsolate();
- CHECK(isolate->GetCurrentContext() != isolate->GetEnteredContext());
CHECK(isolate->GetCurrentContext() !=
isolate->GetEnteredOrMicrotaskContext());
isolate->EnqueueMicrotask(Regress808911_MicrotaskCallback, isolate);
@@ -22921,130 +22648,6 @@ TEST(ScopedMicrotasks) {
namespace {
-int probes_counter = 0;
-int misses_counter = 0;
-int updates_counter = 0;
-
-int* LookupCounter(const char* name) {
- if (strcmp(name, "c:V8.MegamorphicStubCacheProbes") == 0) {
- return &probes_counter;
- } else if (strcmp(name, "c:V8.MegamorphicStubCacheMisses") == 0) {
- return &misses_counter;
- } else if (strcmp(name, "c:V8.MegamorphicStubCacheUpdates") == 0) {
- return &updates_counter;
- }
- return nullptr;
-}
-
-template <typename Stub, typename... Args>
-void Recompile(Args... args) {
- Stub stub(args...);
- stub.DeleteStubFromCacheForTesting();
- stub.GetCode();
-}
-
-void RecompileICStubs() {
- // BUG(5784): We had a list of IC stubs here to recompile. These are now
- // builtins and we can't compile them again (easily). Bug 5784 tracks
- // our progress in finding another way to do this.
-}
-
-} // namespace
-
-#ifdef ENABLE_DISASSEMBLER
-// FLAG_test_primary_stub_cache and FLAG_test_secondary_stub_cache are read
-// only when ENABLE_DISASSEMBLER is not defined.
-
-namespace {
-
-const char* kMegamorphicTestProgram =
- "function CreateClass(name) {\n"
- " var src = \n"
- // Disable constant tracking of "a" field by assigning different Smi values
- // twice to ease megamorphic probes counting.
- " ` function ${name}() { this.a = 0; this.a = 1; };` +\n"
- " ` ${name}.prototype.foo = function() {};` +\n"
- " ` ${name};\\n`;\n"
- " return (0, eval)(src);\n"
- "}\n"
- "function trigger_ics(obj, v) {\n"
- " obj.foo();\n"
- " obj.a = v;\n"
- "};\n"
- "var objs = [];\n"
- "for (var i = 0; i < 50; i++) {\n"
- " var Class = CreateClass('Class' + i);\n"
- " var obj = new Class();\n"
- " objs.push(obj);\n"
- "}\n"
- "for (var i = 0; i < 1000; i++) {\n"
- " for (var obj of objs) {\n"
- " trigger_ics(obj, 1);\n"
- " }\n"
- "}\n";
-
-void TestStubCache(bool primary) {
- i::FLAG_native_code_counters = true;
- if (primary) {
- i::FLAG_test_primary_stub_cache = true;
- } else {
- i::FLAG_test_secondary_stub_cache = true;
- }
- i::FLAG_opt = false;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- create_params.counter_lookup_callback = LookupCounter;
- v8::Isolate* isolate = v8::Isolate::New(create_params);
-
- {
- v8::Isolate::Scope isolate_scope(isolate);
- LocalContext env(isolate);
- v8::HandleScope scope(isolate);
-
- // Enforce recompilation of IC stubs that access megamorphic stub cache
- // to respect enabled native code counters and stub cache test flags.
- RecompileICStubs();
-
- int initial_probes = probes_counter;
- int initial_misses = misses_counter;
- int initial_updates = updates_counter;
- CompileRun(kMegamorphicTestProgram);
- int probes = probes_counter - initial_probes;
- int misses = misses_counter - initial_misses;
- int updates = updates_counter - initial_updates;
- const int kClassesCount = 50;
- const int kIterationsCount = 1000;
- const int kICKinds = 2; // LoadIC and StoreIC
- CHECK_LE(kClassesCount * kICKinds, updates);
- // Check that updates and misses counts are bounded.
- // If there are too many updates then most likely the stub cache does not
- // work properly.
- CHECK_LE(updates, kClassesCount * 2 * kICKinds);
- CHECK_LE(kICKinds, misses);
- CHECK_LE(misses, kClassesCount * 2 * kICKinds);
- // 2 is for PREMONOMORPHIC and MONOMORPHIC states,
- // 4 is for POLYMORPHIC states,
- // and all the others probes are for MEGAMORPHIC state.
- CHECK_EQ((kIterationsCount * kClassesCount - 2 - 4) * kICKinds, probes);
- }
- isolate->Dispose();
-}
-
-} // namespace
-
-UNINITIALIZED_TEST(PrimaryStubCache) {
- TestStubCache(true);
-}
-
-UNINITIALIZED_TEST(SecondaryStubCache) {
- TestStubCache(false);
-}
-
-#endif // ENABLE_DISASSEMBLER
-
-namespace {
-
void AssertCowElements(bool expected, const char* source) {
Local<Value> object = CompileRun(source);
i::Handle<i::JSObject> array =
@@ -23908,7 +23511,6 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("has_own_property(other, 'x')");
CheckCorrectThrow("%GetProperty(other, 'x')");
CheckCorrectThrow("%SetKeyedProperty(other, 'x', 'foo', 0)");
- CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
CheckCorrectThrow("%SetNamedProperty(other, 'y', 'foo', 1)");
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kSloppy) == 0);
STATIC_ASSERT(static_cast<int>(i::LanguageMode::kStrict) == 1);
@@ -23928,110 +23530,6 @@ TEST(AccessCheckThrows) {
isolate->SetFailedAccessCheckCallbackFunction(nullptr);
}
-TEST(AccessCheckInIC) {
- i::FLAG_native_code_counters = true;
- i::FLAG_opt = false;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- create_params.counter_lookup_callback = LookupCounter;
- v8::Isolate* isolate = v8::Isolate::New(create_params);
-
- {
- v8::Isolate::Scope isolate_scope(isolate);
- LocalContext env(isolate);
- v8::HandleScope scope(isolate);
-
- // Enforce recompilation of IC stubs that access megamorphic stub cache
- // to respect enabled native code counters and stub cache test flags.
- RecompileICStubs();
-
- // Create an ObjectTemplate for global objects and install access
- // check callbacks that will block access.
- v8::Local<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallback(AccessCounter);
-
- // Create a context and set an x property on its global object.
- LocalContext context0(isolate, nullptr, global_template);
- v8::Local<v8::Object> global0 = context0->Global();
- CHECK(global0->Set(context0.local(), v8_str("x"), global0).FromJust());
-
- // Create a context with a different security token so that the
- // failed access check callback will be called on each access.
- LocalContext context1(isolate, nullptr, global_template);
- CHECK(context1->Global()
- ->Set(context1.local(), v8_str("other"), global0)
- .FromJust());
-
- // Set different security tokens.
- Local<Value> token0 = v8_str("token0");
- context0.local()->SetSecurityToken(token0);
- context1.local()->SetSecurityToken(v8_str("token1"));
-
- int initial_probes = probes_counter;
- int initial_misses = misses_counter;
- int initial_updates = updates_counter;
- access_count = 0;
-
- // Create megamorphic load ic with a handler for "global0.x" compiled for
- // context0.
- CompileRun(context0.local(),
- "Number(1).__proto__.x = null;\n"
- "String(1).__proto__.x = null;\n"
- "function get0(o) { return o.x; };\n"
- "get0({x:1});\n" // premonomorphic
- "get0({x:1,a:0});\n" // monomorphic
- "get0({x:1,b:0});\n" // polymorphic
- "get0('str');\n"
- "get0(1.1);\n"
- "get0(this);\n" // megamorphic
- "");
- CHECK_EQ(0, probes_counter - initial_probes);
- CHECK_EQ(0, misses_counter - initial_misses);
- CHECK_EQ(5, updates_counter - initial_updates);
-
- // Create megamorphic load ic in context1.
- CompileRun(context1.local(),
- "function get1(o) { return o.x; };\n"
- "get1({x:1});\n" // premonomorphic
- "get1({x:1,a:0});\n" // monomorphic
- "get1({x:1,b:0});\n" // polymorphic
- "get1({x:1,c:0});\n"
- "get1({x:1,d:0});\n"
- "get1({x:1,e:0});\n" // megamorphic
- "");
- CHECK_EQ(0, access_count);
- CHECK_EQ(0, probes_counter - initial_probes);
- CHECK_EQ(0, misses_counter - initial_misses);
- CHECK_EQ(10, updates_counter - initial_updates);
-
- // Feed the |other| to the load ic and ensure that it doesn't pick the
- // handler for "global0.x" compiled for context0 from the megamorphic
- // cache but create another handler for "global0.x" compiled for context1
- // and ensure the access check callback is triggered.
- CompileRun(context1.local(), "get1(other)");
- CHECK_EQ(1, access_count); // Access check callback must be triggered.
-
- // Feed the primitive objects to the load ic and ensure that it doesn't
- // pick handlers for primitive maps from the megamorphic stub cache even
- // if the security token matches.
- context1.local()->SetSecurityToken(token0);
- CHECK(CompileRun(context1.local(), "get1(1.1)")
- .ToLocalChecked()
- ->IsUndefined());
- CHECK(CompileRun(context1.local(), "get1('str')")
- .ToLocalChecked()
- ->IsUndefined());
-
- CHECK_EQ(1, access_count); // Access check callback must be triggered.
- CHECK_EQ(3, probes_counter - initial_probes);
- CHECK_EQ(0, misses_counter - initial_misses);
- CHECK_EQ(13, updates_counter - initial_updates);
- }
- isolate->Dispose();
-}
-
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -24433,6 +23931,143 @@ THREADED_TEST(FunctionNew) {
CHECK(v8::Integer::New(isolate, 17)->Equals(env.local(), result2).FromJust());
}
+namespace {
+
+void Verify(v8::Isolate* isolate, Local<v8::Object> obj) {
+#if VERIFY_HEAP
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::JSReceiver> i_obj = v8::Utils::OpenHandle(*obj);
+ i_obj->ObjectVerify(i_isolate);
+#endif
+}
+
+} // namespace
+
+THREADED_TEST(ObjectNew) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ {
+ // Verify that Object::New(null) produces an object with a null
+ // [[Prototype]].
+ Local<v8::Object> obj =
+ v8::Object::New(isolate, v8::Null(isolate), nullptr, nullptr, 0);
+ CHECK(obj->GetPrototype()->IsNull());
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(0, keys->Length());
+ }
+ {
+ // Verify that Object::New(proto) produces an object with
+ // proto as it's [[Prototype]].
+ Local<v8::Object> proto = v8::Object::New(isolate);
+ Local<v8::Object> obj =
+ v8::Object::New(isolate, proto, nullptr, nullptr, 0);
+ Verify(isolate, obj);
+ CHECK(obj->GetPrototype()->SameValue(proto));
+ }
+ {
+ // Verify that the properties are installed correctly.
+ Local<v8::Name> names[3] = {v8_str("a"), v8_str("b"), v8_str("c")};
+ Local<v8::Value> values[3] = {v8_num(1), v8_num(2), v8_num(3)};
+ Local<v8::Object> obj = v8::Object::New(isolate, v8::Null(isolate), names,
+ values, arraysize(values));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(arraysize(names), keys->Length());
+ for (uint32_t i = 0; i < arraysize(names); ++i) {
+ CHECK(names[i]->SameValue(keys->Get(env.local(), i).ToLocalChecked()));
+ CHECK(values[i]->SameValue(
+ obj->Get(env.local(), names[i]).ToLocalChecked()));
+ }
+ }
+ {
+ // Same as above, but with non-null prototype.
+ Local<v8::Object> proto = v8::Object::New(isolate);
+ Local<v8::Name> names[3] = {v8_str("x"), v8_str("y"), v8_str("z")};
+ Local<v8::Value> values[3] = {v8_num(1), v8_num(2), v8_num(3)};
+ Local<v8::Object> obj =
+ v8::Object::New(isolate, proto, names, values, arraysize(values));
+ CHECK(obj->GetPrototype()->SameValue(proto));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(arraysize(names), keys->Length());
+ for (uint32_t i = 0; i < arraysize(names); ++i) {
+ CHECK(names[i]->SameValue(keys->Get(env.local(), i).ToLocalChecked()));
+ CHECK(values[i]->SameValue(
+ obj->Get(env.local(), names[i]).ToLocalChecked()));
+ }
+ }
+ {
+ // This has to work with duplicate names too.
+ Local<v8::Name> names[3] = {v8_str("a"), v8_str("a"), v8_str("a")};
+ Local<v8::Value> values[3] = {v8_num(1), v8_num(2), v8_num(3)};
+ Local<v8::Object> obj = v8::Object::New(isolate, v8::Null(isolate), names,
+ values, arraysize(values));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(1, keys->Length());
+ CHECK(v8_str("a")->SameValue(keys->Get(env.local(), 0).ToLocalChecked()));
+ CHECK(v8_num(3)->SameValue(
+ obj->Get(env.local(), v8_str("a")).ToLocalChecked()));
+ }
+ {
+ // This has to work with array indices too.
+ Local<v8::Name> names[2] = {v8_str("0"), v8_str("1")};
+ Local<v8::Value> values[2] = {v8_num(0), v8_num(1)};
+ Local<v8::Object> obj = v8::Object::New(isolate, v8::Null(isolate), names,
+ values, arraysize(values));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(arraysize(names), keys->Length());
+ for (uint32_t i = 0; i < arraysize(names); ++i) {
+ CHECK(v8::Number::New(isolate, i)
+ ->SameValue(keys->Get(env.local(), i).ToLocalChecked()));
+ CHECK(values[i]->SameValue(obj->Get(env.local(), i).ToLocalChecked()));
+ }
+ }
+ {
+ // This has to work with mixed array indices / property names too.
+ Local<v8::Name> names[2] = {v8_str("0"), v8_str("x")};
+ Local<v8::Value> values[2] = {v8_num(42), v8_num(24)};
+ Local<v8::Object> obj = v8::Object::New(isolate, v8::Null(isolate), names,
+ values, arraysize(values));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(arraysize(names), keys->Length());
+ // 0 -> 42
+ CHECK(v8_num(0)->SameValue(keys->Get(env.local(), 0).ToLocalChecked()));
+ CHECK(
+ values[0]->SameValue(obj->Get(env.local(), names[0]).ToLocalChecked()));
+ // "x" -> 24
+ CHECK(v8_str("x")->SameValue(keys->Get(env.local(), 1).ToLocalChecked()));
+ CHECK(
+ values[1]->SameValue(obj->Get(env.local(), names[1]).ToLocalChecked()));
+ }
+ {
+ // Verify that this also works for a couple thousand properties.
+ size_t const kLength = 10 * 1024;
+ Local<v8::Name> names[kLength];
+ Local<v8::Value> values[kLength];
+ for (size_t i = 0; i < arraysize(names); ++i) {
+ std::ostringstream ost;
+ ost << "a" << i;
+ names[i] = v8_str(ost.str().c_str());
+ values[i] = v8_num(static_cast<double>(i));
+ }
+ Local<v8::Object> obj = v8::Object::New(isolate, v8::Null(isolate), names,
+ values, arraysize(names));
+ Verify(isolate, obj);
+ Local<Array> keys = obj->GetOwnPropertyNames(env.local()).ToLocalChecked();
+ CHECK_EQ(arraysize(names), keys->Length());
+ for (uint32_t i = 0; i < arraysize(names); ++i) {
+ CHECK(names[i]->SameValue(keys->Get(env.local(), i).ToLocalChecked()));
+ CHECK(values[i]->SameValue(
+ obj->Get(env.local(), names[i]).ToLocalChecked()));
+ }
+ }
+}
+
TEST(EscapableHandleScope) {
HandleScope outer_scope(CcTest::isolate());
LocalContext context;
@@ -25074,7 +24709,7 @@ TEST(Promises) {
CHECK(r->IsPromise());
}
-
+// Promise.Then(on_fulfilled)
TEST(PromiseThen) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -25171,6 +24806,121 @@ TEST(PromiseThen) {
.FromJust());
}
+// Promise.Then(on_fulfilled, on_rejected)
+TEST(PromiseThen2) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
+ v8::HandleScope scope(isolate);
+ Local<Object> global = context->Global();
+
+ // Creation.
+ Local<v8::Promise::Resolver> pr =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ Local<v8::Promise> p = pr->GetPromise();
+
+ CHECK(p->IsPromise());
+
+ pr->Resolve(context.local(), v8::Integer::New(isolate, 1)).FromJust();
+
+ // Chaining non-pending promises.
+ CompileRun(
+ "var x1 = 0;\n"
+ "var x2 = 0;\n"
+ "function f1(x) { x1 = x; return x+1 };\n"
+ "function f2(x) { x2 = x; return x+1 };\n"
+ "function f3(x) { throw x + 100 };\n");
+ Local<Function> f1 = Local<Function>::Cast(
+ global->Get(context.local(), v8_str("f1")).ToLocalChecked());
+ Local<Function> f2 = Local<Function>::Cast(
+ global->Get(context.local(), v8_str("f2")).ToLocalChecked());
+ Local<Function> f3 = Local<Function>::Cast(
+ global->Get(context.local(), v8_str("f3")).ToLocalChecked());
+
+ // Then
+ CompileRun("x1 = x2 = 0;");
+ Local<v8::Promise> a = p->Then(context.local(), f1, f2).ToLocalChecked();
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ isolate->RunMicrotasks();
+ CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ Local<v8::Promise> b = a->Then(context.local(), f3, f2).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ Local<v8::Promise> c = b->Then(context.local(), f1, f2).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(102, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ v8::Local<v8::Promise> d = c->Then(context.local(), f1, f2).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(102, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ v8::Local<v8::Promise> e = d->Then(context.local(), f3, f2).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(102, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ v8::Local<v8::Promise> f = e->Then(context.local(), f1, f3).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(102, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+
+ f->Then(context.local(), f1, f2).ToLocalChecked();
+ isolate->RunMicrotasks();
+ CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(304, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+}
+
TEST(PromiseStateAndValue) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -25262,7 +25012,6 @@ TEST(DisallowJavascriptExecutionScope) {
CompileRun("2+2");
}
-
TEST(AllowJavascriptExecutionScope) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -25276,7 +25025,6 @@ TEST(AllowJavascriptExecutionScope) {
}
}
-
TEST(ThrowOnJavascriptExecution) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -25288,6 +25036,41 @@ TEST(ThrowOnJavascriptExecution) {
CHECK(try_catch.HasCaught());
}
+namespace {
+
+class MockPlatform : public TestPlatform {
+ public:
+ MockPlatform() : old_platform_(i::V8::GetCurrentPlatform()) {
+ // Now that it's completely constructed, make this the current platform.
+ i::V8::SetPlatformForTesting(this);
+ }
+ ~MockPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
+
+ bool dump_without_crashing_called() const {
+ return dump_without_crashing_called_;
+ }
+
+ void DumpWithoutCrashing() override { dump_without_crashing_called_ = true; }
+
+ private:
+ v8::Platform* old_platform_;
+ bool dump_without_crashing_called_ = false;
+};
+
+} // namespace
+
+TEST(DumpOnJavascriptExecution) {
+ MockPlatform platform;
+
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope throw_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::DUMP_ON_FAILURE);
+ CHECK(!platform.dump_without_crashing_called());
+ CompileRun("1+1");
+ CHECK(platform.dump_without_crashing_called());
+}
TEST(Regress354123) {
LocalContext current;
@@ -26532,7 +26315,8 @@ TEST(StringConcatOverflow) {
CHECK(!try_catch.HasCaught());
}
-TEST(TurboAsmDisablesNeuter) {
+TEST(TurboAsmDisablesDetach) {
+#ifndef V8_LITE_MODE
i::FLAG_opt = true;
i::FLAG_allow_natives_syntax = true;
v8::V8::Initialize();
@@ -26552,7 +26336,7 @@ TEST(TurboAsmDisablesNeuter) {
"buffer";
v8::Local<v8::ArrayBuffer> result = CompileRun(load).As<v8::ArrayBuffer>();
- CHECK(!result->IsNeuterable());
+ CHECK(!result->IsDetachable());
const char* store =
"function Module(stdlib, foreign, heap) {"
@@ -26568,10 +26352,10 @@ TEST(TurboAsmDisablesNeuter) {
"buffer";
result = CompileRun(store).As<v8::ArrayBuffer>();
- CHECK(!result->IsNeuterable());
+ CHECK(!result->IsDetachable());
+#endif // V8_LITE_MODE
}
-
TEST(ClassPrototypeCreationContext) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -26751,42 +26535,6 @@ TEST(ExtrasBindingObject) {
}
-TEST(ExperimentalExtras) {
- i::FLAG_experimental_extras = true;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- // standalone.gypi ensures we include the test-experimental-extra.js file,
- // which should export the tested functions.
- v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
-
- auto func =
- binding->Get(env.local(), v8_str("testExperimentalExtraShouldReturnTen"))
- .ToLocalChecked()
- .As<v8::Function>();
- auto undefined = v8::Undefined(isolate);
- auto result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::Number>();
- CHECK_EQ(10, result->Int32Value(env.local()).FromJust());
-
- v8::Local<v8::FunctionTemplate> runtimeFunction =
- v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
- binding->Set(env.local(), v8_str("runtime"),
- runtimeFunction->GetFunction(env.local()).ToLocalChecked())
- .FromJust();
- func = binding->Get(env.local(),
- v8_str("testExperimentalExtraShouldCallToRuntime"))
- .ToLocalChecked()
- .As<v8::Function>();
- result = func->Call(env.local(), undefined, 0, {})
- .ToLocalChecked()
- .As<v8::Number>();
- CHECK_EQ(7, result->Int32Value(env.local()).FromJust());
-}
-
TEST(ExtrasCreatePromise) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
@@ -26974,6 +26722,11 @@ TEST(ExtrasUtilsObject) {
.ToLocalChecked()
.As<v8::Boolean>();
CHECK_EQ(false, thenable_is_promise->Value());
+
+ auto uncurry_this = result->Get(env.local(), v8_str("uncurryThis"))
+ .ToLocalChecked()
+ .As<v8::Boolean>();
+ CHECK_EQ(true, uncurry_this->Value());
}
@@ -28055,7 +27808,7 @@ void CheckContexts(v8::Isolate* isolate) {
->GetEmbedderData(1)
.As<v8::Integer>()
->Value());
- CHECK_EQ(EnteredContext, isolate->GetEnteredContext()
+ CHECK_EQ(EnteredContext, isolate->GetEnteredOrMicrotaskContext()
->GetEmbedderData(1)
.As<v8::Integer>()
->Value());
@@ -28354,8 +28107,7 @@ bool wasm_streaming_data_got_collected = false;
void WasmStreamingTestFinalizer(const v8::WeakCallbackInfo<void>& data) {
CHECK(!wasm_streaming_data_got_collected);
wasm_streaming_data_got_collected = true;
- i::JSObject** p = reinterpret_cast<i::JSObject**>(data.GetParameter());
- i::GlobalHandles::Destroy(reinterpret_cast<i::Object**>(p));
+ i::GlobalHandles::Destroy(reinterpret_cast<i::Address*>(data.GetParameter()));
}
void WasmStreamingCallbackTestCallbackIsCalled(
@@ -28484,7 +28236,7 @@ struct AtomicsWaitCallbackInfo {
Local<v8::SharedArrayBuffer> expected_sab;
v8::Isolate::AtomicsWaitEvent expected_event;
double expected_timeout;
- int32_t expected_value;
+ int64_t expected_value;
size_t expected_offset;
size_t ncalls = 0;
@@ -28506,7 +28258,7 @@ class StopAtomicsWaitThread : public v8::base::Thread {
void AtomicsWaitCallbackForTesting(
v8::Isolate::AtomicsWaitEvent event, Local<v8::SharedArrayBuffer> sab,
- size_t offset_in_bytes, int32_t value, double timeout_in_ms,
+ size_t offset_in_bytes, int64_t value, double timeout_in_ms,
v8::Isolate::AtomicsWaitWakeHandle* wake_handle, void* data) {
AtomicsWaitCallbackInfo* info = static_cast<AtomicsWaitCallbackInfo*>(data);
info->ncalls++;
@@ -28828,18 +28580,19 @@ TEST(TestSetWasmThreadsEnabledCallback) {
CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
}
-TEST(TestGetEmbeddedCodeRange) {
+TEST(TestGetUnwindState) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- v8::MemoryRange builtins_range = isolate->GetEmbeddedCodeRange();
+ v8::UnwindState unwind_state = isolate->GetUnwindState();
+ v8::MemoryRange builtins_range = unwind_state.embedded_code_range;
// Check that each off-heap builtin is within the builtins code range.
if (i::FLAG_embedded_builtins) {
for (int id = 0; id < i::Builtins::builtin_count; id++) {
if (!i::Builtins::IsIsolateIndependent(id)) continue;
- i::Code* builtin = i_isolate->builtins()->builtin(id);
+ i::Code builtin = i_isolate->builtins()->builtin(id);
i::Address start = builtin->InstructionStart();
i::Address end = start + builtin->InstructionSize();
@@ -28852,9 +28605,46 @@ TEST(TestGetEmbeddedCodeRange) {
CHECK_EQ(nullptr, builtins_range.start);
CHECK_EQ(0, builtins_range.length_in_bytes);
}
+
+ v8::JSEntryStub js_entry_stub = unwind_state.js_entry_stub;
+
+ CHECK_EQ(
+ i_isolate->heap()->builtin(i::Builtins::kJSEntry)->InstructionStart(),
+ reinterpret_cast<i::Address>(js_entry_stub.code.start));
}
-TEST(PreviewSetKeysIteratorEntriesWithDeleted) {
+TEST(MicrotaskContextShouldBeNativeContext) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ auto callback = [](const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::HandleScope scope(isolate);
+ i::Handle<i::Context> context =
+ v8::Utils::OpenHandle(*isolate->GetEnteredOrMicrotaskContext());
+
+ CHECK(context->IsNativeContext());
+ info.GetReturnValue().SetUndefined();
+ };
+
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
+ desc->InstanceTemplate()->SetCallAsFunctionHandler(callback);
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+
+ CHECK(env->Global()->Set(env.local(), v8_str("callback"), obj).FromJust());
+ CompileRun(
+ "with({}){(async ()=>{"
+ " await 42;"
+ "})().then(callback);}");
+
+ isolate->RunMicrotasks();
+}
+
+TEST(PreviewSetIteratorEntriesWithDeleted) {
LocalContext env;
v8::HandleScope handle_scope(env->GetIsolate());
v8::Local<v8::Context> context = env.local();
@@ -28953,142 +28743,7 @@ TEST(PreviewSetKeysIteratorEntriesWithDeleted) {
}
}
-TEST(PreviewSetValuesIteratorEntriesWithDeleted) {
- LocalContext env;
- v8::HandleScope handle_scope(env->GetIsolate());
- v8::Local<v8::Context> context = env.local();
-
- {
- // Create set, delete entry, create iterator, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); set.delete(1); set.values()")
- ->ToObject(context)
- .ToLocalChecked();
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); set.values()")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, iterate, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(1, entries->Length());
- CHECK_EQ(3, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create set, create iterator, delete entry, iterate until empty, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next(); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(0, entries->Length());
- }
- {
- // Create set, create iterator, delete entry, iterate, trigger rehash,
- // preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); var it = set.values(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("set.delete(1); it.next();");
- CompileRun("for (var i = 4; i < 20; i++) set.add(i);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(17, entries->Length());
- for (uint32_t i = 0; i < 17; i++) {
- CHECK_EQ(i + 3, entries->Get(context, i)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- }
-}
-
-TEST(PreviewMapEntriesIteratorEntries) {
- LocalContext env;
- v8::HandleScope handle_scope(env->GetIsolate());
- v8::Local<v8::Context> context = env.local();
- {
- // Create set, delete entry, create entries iterator, preview.
- v8::Local<v8::Object> iterator =
- CompileRun("var set = new Set([1,2,3]); set.delete(2); set.entries()")
- ->ToObject(context)
- .ToLocalChecked();
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(is_key);
- CHECK_EQ(4, entries->Length());
- uint32_t first = entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust();
- uint32_t second = entries->Get(context, 2)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust();
- CHECK_EQ(1, first);
- CHECK_EQ(3, second);
- CHECK_EQ(first, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(second, entries->Get(context, 3)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
-}
-
-TEST(PreviewMapValuesIteratorEntriesWithDeleted) {
+TEST(PreviewMapIteratorEntriesWithDeleted) {
LocalContext env;
v8::HandleScope handle_scope(env->GetIsolate());
v8::Local<v8::Context> context = env.local();
@@ -29203,93 +28858,123 @@ TEST(PreviewMapValuesIteratorEntriesWithDeleted) {
}
}
-TEST(PreviewMapKeysIteratorEntriesWithDeleted) {
- LocalContext env;
- v8::HandleScope handle_scope(env->GetIsolate());
- v8::Local<v8::Context> context = env.local();
+namespace {
+static v8::Isolate* isolate_1;
+static v8::Isolate* isolate_2;
+v8::Persistent<v8::Context> context_1;
+v8::Persistent<v8::Context> context_2;
+
+static void CallIsolate1(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate::Scope isolate_scope(isolate_1);
+ v8::HandleScope handle_scope(isolate_1);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_1, context_1);
+ v8::Context::Scope context_scope(context);
+ CompileRun("f1() //# sourceURL=isolate1b");
+}
+
+static void CallIsolate2(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate::Scope isolate_scope(isolate_2);
+ v8::HandleScope handle_scope(isolate_2);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_2, context_2);
+ v8::Context::Scope context_scope(context);
+ reinterpret_cast<i::Isolate*>(isolate_2)->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
+ v8::kGCCallbackFlagForced);
+ CompileRun("f2() //# sourceURL=isolate2b");
+}
+
+} // anonymous namespace
+
+UNINITIALIZED_TEST(NestedIsolates) {
+#ifdef VERIFY_HEAP
+ i::FLAG_verify_heap = true;
+#endif // VERIFY_HEAP
+ // Create two isolates and set up C++ functions via function templates that
+ // call into the other isolate. Recurse a few times, trigger GC along the way,
+ // and finally capture a stack trace. Check that the stack trace only includes
+ // frames from its own isolate.
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ isolate_1 = v8::Isolate::New(create_params);
+ isolate_2 = v8::Isolate::New(create_params);
{
- // Create map, delete entry, create iterator, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = 1; map.set(key, {});"
- "map.set(2, {}); map.set(3, {});"
- "map.delete(key);"
- "map.keys()")
- ->ToObject(context)
- .ToLocalChecked();
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create map, create iterator, delete entry, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = 1; map.set(key, {});"
- "map.set(2, {}); map.set(3, {});"
- "map.keys()")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key);");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(2, entries->Length());
- CHECK_EQ(2, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(3, entries->Get(context, 1)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- }
- {
- // Create map, create iterator, delete entry, iterate, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = 1; map.set(key, {});"
- "map.set(2, {}); map.set(3, {});"
- "var it = map.keys(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(1, entries->Length());
- CHECK_EQ(3, entries->Get(context, 0)
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
+ v8::Isolate::Scope isolate_scope(isolate_1);
+ v8::HandleScope handle_scope(isolate_1);
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate_1);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate_1, CallIsolate2);
+ fun_templ->SetClassName(v8_str(isolate_1, "call_isolate_2"));
+ Local<Function> fun = fun_templ->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context, v8_str(isolate_1, "call_isolate_2"), fun)
+ .FromJust());
+ CompileRun(
+ "let c = 0;"
+ "function f1() {"
+ " c++;"
+ " return call_isolate_2();"
+ "} //# sourceURL=isolate1a");
+ context_1.Reset(isolate_1, context);
}
+
{
- // Create map, create iterator, delete entry, iterate until empty, preview.
- v8::Local<v8::Object> iterator = CompileRun(
- "var map = new Map();"
- "var key = 1; map.set(key, {});"
- "map.set(2, {}); map.set(3, {});"
- "var it = map.keys(); it")
- ->ToObject(context)
- .ToLocalChecked();
- CompileRun("map.delete(key); it.next(); it.next();");
- bool is_key;
- v8::Local<v8::Array> entries =
- iterator->PreviewEntries(&is_key).ToLocalChecked();
- CHECK(!is_key);
- CHECK_EQ(0, entries->Length());
+ v8::Isolate::Scope isolate_scope(isolate_2);
+ v8::HandleScope handle_scope(isolate_2);
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate_2);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate_2, CallIsolate1);
+ fun_templ->SetClassName(v8_str(isolate_2, "call_isolate_1"));
+ Local<Function> fun = fun_templ->GetFunction(context).ToLocalChecked();
+
+ CHECK(context->Global()
+ ->Set(context, v8_str(isolate_2, "call_isolate_1"), fun)
+ .FromJust());
+ CompileRun(
+ "let c = 4;"
+ "let result = undefined;"
+ "function f2() {"
+ " if (c-- > 0) return call_isolate_1();"
+ " else result = new Error().stack;"
+ "} //# sourceURL=isolate2a");
+ context_2.Reset(isolate_2, context);
+
+ v8::Local<v8::String> result =
+ CompileRun("f2(); result //# sourceURL=isolate2c")
+ ->ToString(context)
+ .ToLocalChecked();
+ v8::Local<v8::String> expectation = v8_str(isolate_2,
+ "Error\n"
+ " at f2 (isolate2a:1:104)\n"
+ " at isolate2b:1:1\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2c:1:1");
+ CHECK(result->StrictEquals(expectation));
+ }
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate_1);
+ v8::HandleScope handle_scope(isolate_1);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_1, context_1);
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("c", 4);
}
+
+ isolate_1->Dispose();
+ isolate_2->Dispose();
}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 3887792373..37d858f5f4 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -15,7 +15,7 @@
template <typename T>
static void CheckReturnValue(const T& t, i::Address callback) {
v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
- i::Object** o = *reinterpret_cast<i::Object***>(&rv);
+ i::FullObjectSlot o(*reinterpret_cast<i::Address*>(&rv));
CHECK_EQ(CcTest::isolate(), t.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 7e1bb402fc..d66027b5fd 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -53,7 +53,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ add(r0, r0, Operand(r1));
__ mov(pc, Operand(lr));
@@ -78,7 +78,7 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
__ mov(r1, Operand(r0));
@@ -114,7 +114,7 @@ TEST(2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
__ mov(r1, Operand(r0));
@@ -166,7 +166,7 @@ TEST(3) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
__ mov(ip, Operand(sp));
@@ -235,7 +235,7 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
@@ -367,7 +367,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(&assm, ARMv7);
@@ -401,7 +401,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
__ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
@@ -438,7 +438,7 @@ static void TestRoundingMode(VCVTTypes types,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label wrong_exception;
@@ -636,7 +636,7 @@ TEST(8) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -741,7 +741,7 @@ TEST(9) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -850,7 +850,7 @@ TEST(10) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -940,7 +940,7 @@ TEST(11) {
i.a = 0xABCD0001;
i.b = 0xABCD0000;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Test HeapObject untagging.
__ ldr(r1, MemOperand(r0, offsetof(I, a)));
@@ -992,7 +992,7 @@ TEST(12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label target;
__ b(eq, &target);
__ b(ne, &target);
@@ -1028,7 +1028,7 @@ TEST(13) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
if (CpuFeatures::IsSupported(VFPv3)) {
@@ -1142,9 +1142,9 @@ TEST(14) {
T t;
// Create a function that makes the four basic operations.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
- // Ensure FPSCR state (as JSEntryStub does).
+ // Ensure FPSCR state (as JSEntry does).
Label fpscr_done;
__ vmrs(r1);
__ tst(r1, Operand(kVFPDefaultNaNModeControlBit));
@@ -1321,7 +1321,7 @@ TEST(15) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles, floats, and SIMD values.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(NEON)) {
CpuFeatureScope scope(&assm, NEON);
@@ -2296,7 +2296,7 @@ TEST(16) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ stm(db_w, sp, r4.bit() | lr.bit());
@@ -2356,7 +2356,7 @@ TEST(17) {
HandleScope scope(isolate);
// Generate a code segment that will be longer than 2^24 bytes.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
for (size_t i = 0; i < 1 << 23 ; ++i) { // 2^23
__ nop();
}
@@ -2379,7 +2379,7 @@ TEST(sdiv) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
struct T {
int32_t dividend;
@@ -2439,7 +2439,7 @@ TEST(udiv) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
struct T {
uint32_t dividend;
@@ -2485,7 +2485,7 @@ TEST(smmla) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ smmla(r1, r1, r2, r3);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2510,7 +2510,7 @@ TEST(smmul) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ smmul(r1, r1, r2);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2535,7 +2535,7 @@ TEST(sxtb) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ sxtb(r1, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2560,7 +2560,7 @@ TEST(sxtab) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ sxtab(r1, r2, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2585,7 +2585,7 @@ TEST(sxth) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ sxth(r1, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2610,7 +2610,7 @@ TEST(sxtah) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ sxtah(r1, r2, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2635,7 +2635,7 @@ TEST(uxtb) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ uxtb(r1, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2660,7 +2660,7 @@ TEST(uxtab) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ uxtab(r1, r2, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2685,7 +2685,7 @@ TEST(uxth) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ uxth(r1, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2710,7 +2710,7 @@ TEST(uxtah) {
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
RandomNumberGenerator* const rng = isolate->random_number_generator();
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ uxtah(r1, r2, r1);
__ str(r1, MemOperand(r0));
__ bx(lr);
@@ -2739,7 +2739,7 @@ TEST(rbit) {
CcTest::InitializeVM();
Isolate* const isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(&assm, ARMv7);
@@ -2785,7 +2785,7 @@ TEST(code_relative_offset) {
Handle<HeapObject> code_object(ReadOnlyRoots(isolate).self_reference_marker(),
isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label start, target_away, target_faraway;
@@ -2852,7 +2852,7 @@ TEST(msr_mrs) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Create a helper function:
// void TestMsrMrs(uint32_t nzcv,
@@ -2934,7 +2934,7 @@ TEST(ARMv8_float32_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
@@ -3036,7 +3036,7 @@ TEST(ARMv8_vrintX) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
@@ -3125,7 +3125,7 @@ TEST(ARMv8_vsel) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Used to indicate whether a condition passed or failed.
static constexpr float kResultPass = 1.0f;
@@ -3275,7 +3275,7 @@ TEST(ARMv8_vminmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
struct Inputs {
double left_;
@@ -3355,7 +3355,7 @@ TEST(ARMv8_vminmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
struct Inputs {
float left_;
@@ -3530,7 +3530,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0, CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, CodeObjectRequired::kYes);
struct Inputs {
double left_;
@@ -3595,7 +3595,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0, CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, CodeObjectRequired::kYes);
struct Inputs {
float left_;
@@ -3667,7 +3667,7 @@ TEST(unaligned_loads) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ ldrh(ip, MemOperand(r1, r2));
__ str(ip, MemOperand(r0, offsetof(T, ldrh)));
__ ldrsh(ip, MemOperand(r1, r2));
@@ -3714,7 +3714,7 @@ TEST(unaligned_stores) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ strh(r3, MemOperand(r0, r2));
__ str(r3, MemOperand(r1, r2));
__ bx(lr);
@@ -3768,7 +3768,7 @@ TEST(vswp) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
typedef struct {
uint64_t vswp_d0;
@@ -3850,7 +3850,7 @@ TEST(regress4292_b) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3865,7 +3865,7 @@ TEST(regress4292_bl) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3880,7 +3880,7 @@ TEST(regress4292_blx) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label end;
__ mov(r0, Operand(isolate->factory()->infinity_value()));
for (int i = 0; i < 1020; ++i) {
@@ -3895,7 +3895,7 @@ TEST(regress4292_CheckConstPool) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(r0, Operand(isolate->factory()->infinity_value()));
__ BlockConstPoolFor(1019);
for (int i = 0; i < 1019; ++i) __ nop();
@@ -3907,7 +3907,7 @@ TEST(use_scratch_register_scope) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// The assembler should have ip as a scratch by default.
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
@@ -3929,7 +3929,7 @@ TEST(use_scratch_vfp_register_scope) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
VfpRegList orig_scratches = *assm.GetScratchVfpRegisterList();
@@ -4025,7 +4025,7 @@ TEST(split_add_immediate) {
HandleScope scope(isolate);
{
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(r1, r0);
// Re-use the destination as a scratch.
__ add(r0, r1, Operand(0x12345678));
@@ -4046,7 +4046,7 @@ TEST(split_add_immediate) {
}
{
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Use ip as a scratch.
__ add(r0, r0, Operand(0x12345678));
__ blx(lr);
@@ -4066,7 +4066,7 @@ TEST(split_add_immediate) {
}
{
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
UseScratchRegisterScope temps(&assm);
Register reserved = temps.Acquire();
USE(reserved);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index f2ca5c01e5..a500c9cb51 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -66,8 +66,6 @@ namespace internal {
// RUN();
//
// CHECK_EQUAL_64(1, x0);
-//
-// TEARDOWN();
// }
//
// Within a START ... END block all registers but sp can be modified. sp has to
@@ -119,21 +117,22 @@ static void InitializeVM() {
#ifdef USE_SIMULATOR
// Run tests with the simulator.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK_NOT_NULL(isolate); \
- byte* buf = new byte[buf_size]; \
- MacroAssembler masm(isolate, buf, buf_size, \
- v8::internal::CodeObjectRequired::kYes); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- Simulator simulator(decoder); \
- PrintDisassembler* pdis = nullptr; \
- RegisterDump core; \
- if (i::FLAG_trace_sim) { \
- pdis = new PrintDisassembler(stdout); \
- decoder->PrependVisitor(pdis); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK_NOT_NULL(isolate); \
+ std::unique_ptr<byte[]> owned_buf{new byte[buf_size]}; \
+ byte* buf = owned_buf.get(); \
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
+ ExternalAssemblerBuffer(buf, buf_size)); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ std::unique_ptr<PrintDisassembler> pdis; \
+ RegisterDump core; \
+ if (i::FLAG_trace_sim) { \
+ pdis.reset(new PrintDisassembler(stdout)); \
+ decoder->PrependVisitor(pdis.get()); \
}
// Reset the assembler and simulator, so that instructions can be generated,
@@ -165,24 +164,21 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
-#define TEARDOWN() \
- delete pdis; \
- delete[] buf;
-
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK_NOT_NULL(isolate); \
- size_t allocated; \
- byte* buf = AllocateAssemblerBuffer(&allocated, buf_size); \
- MacroAssembler masm(isolate, buf, static_cast<int>(allocated), \
- v8::internal::CodeObjectRequired::kYes); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK_NOT_NULL(isolate); \
+ auto owned_buf = AllocateAssemblerBuffer(buf_size); \
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
+ owned_buf->CreateView()); \
+ uint8_t* buf = owned_buf->start(); \
+ USE(buf); \
RegisterDump core;
#define RESET() \
- MakeAssemblerBufferWritable(buf, allocated); \
+ owned_buf->MakeWritable(); \
__ Reset(); \
/* Reset the machine state (like simulator.ResetState()). */ \
__ Msr(NZCV, xzr); \
@@ -195,12 +191,11 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- MakeAssemblerBufferExecutable(buf, allocated); \
- { \
- void (*test_function)(void); \
- memcpy(&test_function, &buf, sizeof(buf)); \
- test_function(); \
+#define RUN() \
+ owned_buf->MakeExecutable(); \
+ { \
+ auto* test_function = bit_cast<void (*)()>(buf); \
+ test_function(); \
}
#define END() \
@@ -209,9 +204,6 @@ static void InitializeVM() {
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
-#define TEARDOWN() \
- CHECK(v8::internal::FreePages(GetPlatformPageAllocator(), buf, allocated));
-
#endif // ifdef USE_SIMULATOR.
#define CHECK_EQUAL_NZCV(expected) \
@@ -244,7 +236,6 @@ static void InitializeVM() {
#define CHECK_CONSTANT_POOL_SIZE(expected) ((void)0)
#endif
-
TEST(stack_ops) {
INIT_V8();
SETUP();
@@ -291,11 +282,8 @@ TEST(stack_ops) {
CHECK_EQUAL_64(0x1FFF, x3);
CHECK_EQUAL_64(0xFFFFFFF8, x4);
CHECK_EQUAL_64(0xFFFFFFF8, x5);
-
- TEARDOWN();
}
-
TEST(mvn) {
INIT_V8();
SETUP();
@@ -337,11 +325,8 @@ TEST(mvn) {
CHECK_EQUAL_64(0xFFFFFFFFFFFF0007UL, x13);
CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x14);
CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x15);
-
- TEARDOWN();
}
-
TEST(mov) {
INIT_V8();
SETUP();
@@ -354,9 +339,9 @@ TEST(mov) {
__ Mov(x0, 0x0123456789ABCDEFL);
- __ movz(x1, 0xABCDL << 16);
- __ movk(x2, 0xABCDL << 32);
- __ movn(x3, 0xABCDL << 48);
+ __ movz(x1, 0xABCDLL << 16);
+ __ movk(x2, 0xABCDLL << 32);
+ __ movn(x3, 0xABCDLL << 48);
__ Mov(x4, 0x0123456789ABCDEFL);
__ Mov(x5, x4);
@@ -418,11 +403,8 @@ TEST(mov) {
CHECK_EQUAL_64(0x00007FF8, x25);
CHECK_EQUAL_64(0x000000000000FFF0UL, x26);
CHECK_EQUAL_64(0x000000000001FFE0UL, x27);
-
- TEARDOWN();
}
-
TEST(mov_imm_w) {
INIT_V8();
SETUP();
@@ -452,11 +434,8 @@ TEST(mov_imm_w) {
CHECK_EQUAL_64(0x80000000L, x7);
CHECK_EQUAL_64(0xFFFF0000L, x8);
CHECK_EQUAL_32(kWMinInt, w9);
-
- TEARDOWN();
}
-
TEST(mov_imm_x) {
INIT_V8();
SETUP();
@@ -519,11 +498,8 @@ TEST(mov_imm_x) {
CHECK_EQUAL_64(0x123456789ABCDEF0L, x26);
CHECK_EQUAL_64(0xFFFF000000000001L, x27);
CHECK_EQUAL_64(0x8000FFFF00000000L, x28);
-
- TEARDOWN();
}
-
TEST(orr) {
INIT_V8();
SETUP();
@@ -556,11 +532,8 @@ TEST(orr) {
CHECK_EQUAL_64(0x0FF00000000FF0F0L, x9);
CHECK_EQUAL_64(0xF0FF, x10);
CHECK_EQUAL_64(0xF0000000F000F0F0L, x11);
-
- TEARDOWN();
}
-
TEST(orr_extend) {
INIT_V8();
SETUP();
@@ -588,11 +561,8 @@ TEST(orr_extend) {
CHECK_EQUAL_64(0xFFFFFFFFFFFF0101UL, x11);
CHECK_EQUAL_64(0xFFFFFFFE00020201UL, x12);
CHECK_EQUAL_64(0x0000000400040401UL, x13);
-
- TEARDOWN();
}
-
TEST(bitwise_wide_imm) {
INIT_V8();
SETUP();
@@ -616,11 +586,8 @@ TEST(bitwise_wide_imm) {
CHECK_EQUAL_64(0xF0FBFDFFUL, x11);
CHECK_EQUAL_32(kWMinInt, w12);
CHECK_EQUAL_32(kWMinInt, w13);
-
- TEARDOWN();
}
-
TEST(orn) {
INIT_V8();
SETUP();
@@ -653,11 +620,8 @@ TEST(orn) {
CHECK_EQUAL_64(0xFF00FFFFFFFFFFFFL, x9);
CHECK_EQUAL_64(0xFFFFF0F0, x10);
CHECK_EQUAL_64(0xFFFF0000FFFFF0F0L, x11);
-
- TEARDOWN();
}
-
TEST(orn_extend) {
INIT_V8();
SETUP();
@@ -685,11 +649,8 @@ TEST(orn_extend) {
CHECK_EQUAL_64(0x0000FEFD, x11);
CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
-
- TEARDOWN();
}
-
TEST(and_) {
INIT_V8();
SETUP();
@@ -722,11 +683,8 @@ TEST(and_) {
CHECK_EQUAL_64(0x00000000, x9);
CHECK_EQUAL_64(0x0000FF00, x10);
CHECK_EQUAL_64(0x000000F0, x11);
-
- TEARDOWN();
}
-
TEST(and_extend) {
INIT_V8();
SETUP();
@@ -754,11 +712,8 @@ TEST(and_extend) {
CHECK_EQUAL_64(0xFFFFFFFFFFFF0102UL, x11);
CHECK_EQUAL_64(0xFFFFFFFE00020204UL, x12);
CHECK_EQUAL_64(0x0000000400040408UL, x13);
-
- TEARDOWN();
}
-
TEST(ands) {
INIT_V8();
SETUP();
@@ -814,11 +769,8 @@ TEST(ands) {
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0x80000000, x0);
-
- TEARDOWN();
}
-
TEST(bic) {
INIT_V8();
SETUP();
@@ -863,11 +815,8 @@ TEST(bic) {
CHECK_EQUAL_64(0x0000FEF0, x11);
CHECK_EQUAL_64(0x543210, x21);
-
- TEARDOWN();
}
-
TEST(bic_extend) {
INIT_V8();
SETUP();
@@ -895,11 +844,8 @@ TEST(bic_extend) {
CHECK_EQUAL_64(0x0000FEFD, x11);
CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
-
- TEARDOWN();
}
-
TEST(bics) {
INIT_V8();
SETUP();
@@ -954,11 +900,8 @@ TEST(bics) {
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
-
- TEARDOWN();
}
-
TEST(eor) {
INIT_V8();
SETUP();
@@ -991,11 +934,8 @@ TEST(eor) {
CHECK_EQUAL_64(0x00000FF00000FFFFL, x9);
CHECK_EQUAL_64(0xFF0000F0, x10);
CHECK_EQUAL_64(0xFF00FF00FF0000F0L, x11);
-
- TEARDOWN();
}
-
TEST(eor_extend) {
INIT_V8();
SETUP();
@@ -1023,11 +963,8 @@ TEST(eor_extend) {
CHECK_EQUAL_64(0xEEEEEEEEEEEE1013UL, x11);
CHECK_EQUAL_64(0xEEEEEEEF11131315UL, x12);
CHECK_EQUAL_64(0x1111111511151519UL, x13);
-
- TEARDOWN();
}
-
TEST(eon) {
INIT_V8();
SETUP();
@@ -1060,11 +997,8 @@ TEST(eon) {
CHECK_EQUAL_64(0xFFFFF00FFFFF0000L, x9);
CHECK_EQUAL_64(0xFC3F03CF, x10);
CHECK_EQUAL_64(0xFFFFEFFFFFFF100FL, x11);
-
- TEARDOWN();
}
-
TEST(eon_extend) {
INIT_V8();
SETUP();
@@ -1092,11 +1026,8 @@ TEST(eon_extend) {
CHECK_EQUAL_64(0x111111111111EFECUL, x11);
CHECK_EQUAL_64(0x11111110EEECECEAUL, x12);
CHECK_EQUAL_64(0xEEEEEEEAEEEAEAE6UL, x13);
-
- TEARDOWN();
}
-
TEST(mul) {
INIT_V8();
SETUP();
@@ -1149,11 +1080,8 @@ TEST(mul) {
CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x21);
CHECK_EQUAL_64(0xFFFFFFFF, x22);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x23);
-
- TEARDOWN();
}
-
static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
SETUP();
START();
@@ -1163,10 +1091,8 @@ static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
END();
RUN();
CHECK_EQUAL_64(expected, x2);
- TEARDOWN();
}
-
TEST(smull) {
INIT_V8();
SmullHelper(0, 0, 0);
@@ -1177,7 +1103,6 @@ TEST(smull) {
SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
}
-
TEST(madd) {
INIT_V8();
SETUP();
@@ -1243,11 +1168,8 @@ TEST(madd) {
CHECK_EQUAL_64(0xFFFFFFFE00000002UL, x25);
CHECK_EQUAL_64(0, x26);
CHECK_EQUAL_64(0, x27);
-
- TEARDOWN();
}
-
TEST(msub) {
INIT_V8();
SETUP();
@@ -1313,11 +1235,8 @@ TEST(msub) {
CHECK_EQUAL_64(0x200000000UL, x25);
CHECK_EQUAL_64(0x1FFFFFFFEUL, x26);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x27);
-
- TEARDOWN();
}
-
TEST(smulh) {
INIT_V8();
SETUP();
@@ -1362,11 +1281,8 @@ TEST(smulh) {
CHECK_EQUAL_64(0x1C71C71C71C71C71UL, x9);
CHECK_EQUAL_64(0xE38E38E38E38E38EUL, x10);
CHECK_EQUAL_64(0x1C71C71C71C71C72UL, x11);
-
- TEARDOWN();
}
-
TEST(smaddl_umaddl) {
INIT_V8();
SETUP();
@@ -1398,11 +1314,8 @@ TEST(smaddl_umaddl) {
CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x14);
CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x15);
CHECK_EQUAL_64(0x1, x22);
-
- TEARDOWN();
}
-
TEST(smsubl_umsubl) {
INIT_V8();
SETUP();
@@ -1434,11 +1347,8 @@ TEST(smsubl_umsubl) {
CHECK_EQUAL_64(0x200000003UL, x14);
CHECK_EQUAL_64(0x200000003UL, x15);
CHECK_EQUAL_64(0x3FFFFFFFFUL, x22);
-
- TEARDOWN();
}
-
TEST(div) {
INIT_V8();
SETUP();
@@ -1517,11 +1427,8 @@ TEST(div) {
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0, x21);
-
- TEARDOWN();
}
-
TEST(rbit_rev) {
INIT_V8();
SETUP();
@@ -1546,11 +1453,8 @@ TEST(rbit_rev) {
CHECK_EQUAL_64(0x10325476, x4);
CHECK_EQUAL_64(0x98BADCFE10325476UL, x5);
CHECK_EQUAL_64(0x1032547698BADCFEUL, x6);
-
- TEARDOWN();
}
-
TEST(clz_cls) {
INIT_V8();
SETUP();
@@ -1587,11 +1491,8 @@ TEST(clz_cls) {
CHECK_EQUAL_64(8, x9);
CHECK_EQUAL_64(31, x10);
CHECK_EQUAL_64(63, x11);
-
- TEARDOWN();
}
-
TEST(label) {
INIT_V8();
SETUP();
@@ -1626,11 +1527,8 @@ TEST(label) {
CHECK_EQUAL_64(0x1, x0);
CHECK_EQUAL_64(0x1, x1);
-
- TEARDOWN();
}
-
TEST(branch_at_start) {
INIT_V8();
SETUP();
@@ -1659,10 +1557,8 @@ TEST(branch_at_start) {
RUN();
CHECK_EQUAL_64(0x1, x0);
- TEARDOWN();
}
-
TEST(adr) {
INIT_V8();
SETUP();
@@ -1704,11 +1600,8 @@ TEST(adr) {
CHECK_EQUAL_64(0x0, x0);
CHECK_EQUAL_64(0x0, x1);
-
- TEARDOWN();
}
-
TEST(adr_far) {
INIT_V8();
@@ -1769,11 +1662,8 @@ TEST(adr_far) {
RUN();
CHECK_EQUAL_64(0xF, x0);
-
- TEARDOWN();
}
-
TEST(branch_cond) {
INIT_V8();
SETUP();
@@ -1859,11 +1749,8 @@ TEST(branch_cond) {
RUN();
CHECK_EQUAL_64(0x1, x0);
-
- TEARDOWN();
}
-
TEST(branch_to_reg) {
INIT_V8();
SETUP();
@@ -1908,11 +1795,8 @@ TEST(branch_to_reg) {
CHECK_EQUAL_64(core.xreg(3) + kInstrSize, x0);
CHECK_EQUAL_64(42, x1);
CHECK_EQUAL_64(84, x2);
-
- TEARDOWN();
}
-
TEST(compare_branch) {
INIT_V8();
SETUP();
@@ -1981,11 +1865,8 @@ TEST(compare_branch) {
CHECK_EQUAL_64(0, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(0, x5);
-
- TEARDOWN();
}
-
TEST(test_branch) {
INIT_V8();
SETUP();
@@ -2032,11 +1913,8 @@ TEST(test_branch) {
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0, x3);
-
- TEARDOWN();
}
-
TEST(far_branch_backward) {
INIT_V8();
@@ -2106,11 +1984,8 @@ TEST(far_branch_backward) {
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
-
- TEARDOWN();
}
-
TEST(far_branch_simple_veneer) {
INIT_V8();
@@ -2176,11 +2051,8 @@ TEST(far_branch_simple_veneer) {
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
-
- TEARDOWN();
}
-
TEST(far_branch_veneer_link_chain) {
INIT_V8();
@@ -2271,11 +2143,8 @@ TEST(far_branch_veneer_link_chain) {
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
-
- TEARDOWN();
}
-
TEST(far_branch_veneer_broken_link_chain) {
INIT_V8();
@@ -2361,11 +2230,8 @@ TEST(far_branch_veneer_broken_link_chain) {
CHECK_EQUAL_64(0x3, x0);
CHECK_EQUAL_64(0x1, x1);
-
- TEARDOWN();
}
-
TEST(branch_type) {
INIT_V8();
@@ -2418,11 +2284,8 @@ TEST(branch_type) {
RUN();
CHECK_EQUAL_64(0x0, x0);
-
- TEARDOWN();
}
-
TEST(ldr_str_offset) {
INIT_V8();
SETUP();
@@ -2461,11 +2324,8 @@ TEST(ldr_str_offset) {
CHECK_EQUAL_64(0x765400, dst[4]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base, x18);
-
- TEARDOWN();
}
-
TEST(ldr_str_wide) {
INIT_V8();
SETUP();
@@ -2510,8 +2370,6 @@ TEST(ldr_str_wide) {
CHECK_EQUAL_32(6144, dst[6144]);
CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
-
- TEARDOWN();
}
TEST(ldr_str_preindex) {
@@ -2568,8 +2426,6 @@ TEST(ldr_str_preindex) {
CHECK_EQUAL_64(dst_base + 25, x24);
CHECK_EQUAL_64(src_base + 3, x25);
CHECK_EQUAL_64(dst_base + 41, x26);
-
- TEARDOWN();
}
TEST(ldr_str_postindex) {
@@ -2626,8 +2482,6 @@ TEST(ldr_str_postindex) {
CHECK_EQUAL_64(dst_base + 30, x24);
CHECK_EQUAL_64(src_base, x25);
CHECK_EQUAL_64(dst_base, x26);
-
- TEARDOWN();
}
TEST(load_signed) {
@@ -2663,8 +2517,6 @@ TEST(load_signed) {
CHECK_EQUAL_64(0x0000000000007F7FUL, x7);
CHECK_EQUAL_64(0xFFFFFFFF80008080UL, x8);
CHECK_EQUAL_64(0x000000007FFF7F7FUL, x9);
-
- TEARDOWN();
}
TEST(load_store_regoffset) {
@@ -2710,8 +2562,6 @@ TEST(load_store_regoffset) {
CHECK_EQUAL_32(2, dst[1]);
CHECK_EQUAL_32(3, dst[2]);
CHECK_EQUAL_32(3, dst[3]);
-
- TEARDOWN();
}
TEST(load_store_float) {
@@ -2752,8 +2602,6 @@ TEST(load_store_float) {
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
-
- TEARDOWN();
}
TEST(load_store_double) {
@@ -2794,8 +2642,6 @@ TEST(load_store_double) {
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
-
- TEARDOWN();
}
TEST(load_store_b) {
@@ -2836,8 +2682,6 @@ TEST(load_store_b) {
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
-
- TEARDOWN();
}
TEST(load_store_h) {
@@ -2878,8 +2722,6 @@ TEST(load_store_h) {
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
-
- TEARDOWN();
}
TEST(load_store_q) {
@@ -2928,8 +2770,6 @@ TEST(load_store_q) {
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base + 32, x21);
CHECK_EQUAL_64(dst_base, x22);
-
- TEARDOWN();
}
TEST(neon_ld1_d) {
@@ -2978,8 +2818,6 @@ TEST(neon_ld1_d) {
CHECK_EQUAL_128(0, 0x14131211100F0E0D, q21);
CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
-
- TEARDOWN();
}
TEST(neon_ld1_d_postindex) {
@@ -3038,8 +2876,6 @@ TEST(neon_ld1_d_postindex) {
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
CHECK_EQUAL_64(src_base + 5 + 32, x22);
-
- TEARDOWN();
}
TEST(neon_ld1_q) {
@@ -3081,8 +2917,6 @@ TEST(neon_ld1_q) {
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x1B1A191817161514, q31);
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
-
- TEARDOWN();
}
TEST(neon_ld1_q_postindex) {
@@ -3132,8 +2966,6 @@ TEST(neon_ld1_q_postindex) {
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
-
- TEARDOWN();
}
TEST(neon_ld1_lane) {
@@ -3196,8 +3028,6 @@ TEST(neon_ld1_lane) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q5);
CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
-
- TEARDOWN();
}
TEST(neon_ld2_d) {
@@ -3231,8 +3061,6 @@ TEST(neon_ld2_d) {
CHECK_EQUAL_128(0, 0x11100D0C09080504, q7);
CHECK_EQUAL_128(0, 0x0E0D0C0B06050403, q31);
CHECK_EQUAL_128(0, 0x1211100F0A090807, q0);
-
- TEARDOWN();
}
TEST(neon_ld2_d_postindex) {
@@ -3276,8 +3104,6 @@ TEST(neon_ld2_d_postindex) {
CHECK_EQUAL_64(src_base + 2 + 16, x19);
CHECK_EQUAL_64(src_base + 3 + 16, x20);
CHECK_EQUAL_64(src_base + 4 + 16, x21);
-
- TEARDOWN();
}
TEST(neon_ld2_q) {
@@ -3315,8 +3141,6 @@ TEST(neon_ld2_q) {
CHECK_EQUAL_128(0x2221201F1A191817, 0x1211100F0A090807, q17);
CHECK_EQUAL_128(0x1B1A191817161514, 0x0B0A090807060504, q31);
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
-
- TEARDOWN();
}
TEST(neon_ld2_q_postindex) {
@@ -3361,8 +3185,6 @@ TEST(neon_ld2_q_postindex) {
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
-
- TEARDOWN();
}
TEST(neon_ld2_lane) {
@@ -3441,8 +3263,6 @@ TEST(neon_ld2_lane) {
CHECK_EQUAL_128(0x1F1E1D1C07060504, 0x1716151413121110, q13);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q14);
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
-
- TEARDOWN();
}
TEST(neon_ld2_lane_postindex) {
@@ -3536,8 +3356,6 @@ TEST(neon_ld2_lane_postindex) {
CHECK_EQUAL_64(src_base + 2, x22);
CHECK_EQUAL_64(src_base + 3, x23);
CHECK_EQUAL_64(src_base + 4, x24);
-
- TEARDOWN();
}
TEST(neon_ld2_alllanes) {
@@ -3584,8 +3402,6 @@ TEST(neon_ld2_alllanes) {
CHECK_EQUAL_128(0x11100F0E11100F0E, 0x11100F0E11100F0E, q11);
CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x21201F1E1D1C1B1A, q13);
-
- TEARDOWN();
}
TEST(neon_ld2_alllanes_postindex) {
@@ -3627,8 +3443,6 @@ TEST(neon_ld2_alllanes_postindex) {
CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
CHECK_EQUAL_128(0x21201F1E1D1C1B1A, 0x21201F1E1D1C1B1A, q13);
CHECK_EQUAL_64(src_base + 34, x17);
-
- TEARDOWN();
}
TEST(neon_ld3_d) {
@@ -3666,8 +3480,6 @@ TEST(neon_ld3_d) {
CHECK_EQUAL_128(0, 0x1211100F06050403, q31);
CHECK_EQUAL_128(0, 0x161514130A090807, q0);
CHECK_EQUAL_128(0, 0x1A1918170E0D0C0B, q1);
-
- TEARDOWN();
}
TEST(neon_ld3_d_postindex) {
@@ -3717,8 +3529,6 @@ TEST(neon_ld3_d_postindex) {
CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 24, x20);
CHECK_EQUAL_64(src_base + 4 + 24, x21);
-
- TEARDOWN();
}
TEST(neon_ld3_q) {
@@ -3761,8 +3571,6 @@ TEST(neon_ld3_q) {
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x0B0A090807060504, q31);
CHECK_EQUAL_128(0x2B2A292827262524, 0x131211100F0E0D0C, q0);
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
-
- TEARDOWN();
}
TEST(neon_ld3_q_postindex) {
@@ -3813,8 +3621,6 @@ TEST(neon_ld3_q_postindex) {
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 48, x20);
CHECK_EQUAL_64(src_base + 4 + 48, x21);
-
- TEARDOWN();
}
TEST(neon_ld3_lane) {
@@ -3899,8 +3705,6 @@ TEST(neon_ld3_lane) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0100050403020100, q15);
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0302151413121110, q16);
CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x0504252423222120, q17);
-
- TEARDOWN();
}
TEST(neon_ld3_lane_postindex) {
@@ -4006,8 +3810,6 @@ TEST(neon_ld3_lane_postindex) {
CHECK_EQUAL_64(src_base + 2, x22);
CHECK_EQUAL_64(src_base + 3, x23);
CHECK_EQUAL_64(src_base + 4, x24);
-
- TEARDOWN();
}
TEST(neon_ld3_alllanes) {
@@ -4061,8 +3863,6 @@ TEST(neon_ld3_alllanes) {
CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x201F1E1D1C1B1A19, q18);
CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
CHECK_EQUAL_128(0x302F2E2D2C2B2A29, 0x302F2E2D2C2B2A29, q20);
-
- TEARDOWN();
}
TEST(neon_ld3_alllanes_postindex) {
@@ -4112,8 +3912,6 @@ TEST(neon_ld3_alllanes_postindex) {
CHECK_EQUAL_128(0x201F1E1D1C1B1A19, 0x201F1E1D1C1B1A19, q18);
CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
CHECK_EQUAL_128(0x302F2E2D2C2B2A29, 0x302F2E2D2C2B2A29, q20);
-
- TEARDOWN();
}
TEST(neon_ld4_d) {
@@ -4155,8 +3953,6 @@ TEST(neon_ld4_d) {
CHECK_EQUAL_128(0, 0x1A1918170A090807, q31);
CHECK_EQUAL_128(0, 0x1E1D1C1B0E0D0C0B, q0);
CHECK_EQUAL_128(0, 0x2221201F1211100F, q1);
-
- TEARDOWN();
}
TEST(neon_ld4_d_postindex) {
@@ -4216,7 +4012,6 @@ TEST(neon_ld4_d_postindex) {
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
- TEARDOWN();
}
TEST(neon_ld4_q) {
@@ -4264,7 +4059,6 @@ TEST(neon_ld4_q) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x131211100F0E0D0C, q19);
CHECK_EQUAL_128(0x3B3A393837363534, 0x1B1A191817161514, q20);
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q21);
- TEARDOWN();
}
TEST(neon_ld4_q_postindex) {
@@ -4325,8 +4119,6 @@ TEST(neon_ld4_q_postindex) {
CHECK_EQUAL_64(src_base + 2 + 64, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
-
- TEARDOWN();
}
TEST(neon_ld4_lane) {
@@ -4432,8 +4224,6 @@ TEST(neon_ld4_lane) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q29);
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q30);
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
-
- TEARDOWN();
}
TEST(neon_ld4_lane_postindex) {
@@ -4558,8 +4348,6 @@ TEST(neon_ld4_lane_postindex) {
CHECK_EQUAL_64(src_base + 2, x22);
CHECK_EQUAL_64(src_base + 3, x23);
CHECK_EQUAL_64(src_base + 4, x24);
-
- TEARDOWN();
}
TEST(neon_ld4_alllanes) {
@@ -4621,8 +4409,6 @@ TEST(neon_ld4_alllanes) {
CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2F2E2D2C2B2A2928, q25);
CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3F3E3D3C3B3A3938, q27);
-
- TEARDOWN();
}
TEST(neon_ld4_alllanes_postindex) {
@@ -4687,8 +4473,6 @@ TEST(neon_ld4_alllanes_postindex) {
CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3F3E3D3C3B3A3938, q27);
CHECK_EQUAL_64(src_base + 64, x17);
-
- TEARDOWN();
}
TEST(neon_st1_lane) {
@@ -4738,8 +4522,6 @@ TEST(neon_st1_lane) {
CHECK_EQUAL_128(0x0100030205040706, 0x09080B0A0D0C0F0E, q2);
CHECK_EQUAL_128(0x0302010007060504, 0x0B0A09080F0E0D0C, q3);
CHECK_EQUAL_128(0x0706050403020100, 0x0F0E0D0C0B0A0908, q4);
-
- TEARDOWN();
}
TEST(neon_st2_lane) {
@@ -4831,8 +4613,6 @@ TEST(neon_st2_lane) {
CHECK_EQUAL_128(0x18191A1B1C1D1E1F, 0x08090A0B0C0D0E0F, q23);
CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q22);
CHECK_EQUAL_128(0x18191A1B1C1D1E1F, 0x08090A0B0C0D0E0F, q23);
-
- TEARDOWN();
}
TEST(neon_st3_lane) {
@@ -4930,8 +4710,6 @@ TEST(neon_st3_lane) {
CHECK_EQUAL_128(0x0405060720212223, 0x1011121300010203, q26);
CHECK_EQUAL_128(0x18191A1B08090A0B, 0x2425262714151617, q27);
CHECK_EQUAL_128(0x2C2D2E2F1C1D1E1F, 0x0C0D0E0F28292A2B, q28);
-
- TEARDOWN();
}
TEST(neon_st4_lane) {
@@ -5013,8 +4791,6 @@ TEST(neon_st4_lane) {
CHECK_EQUAL_128(0x28292A2B2C2D2E2F, 0x28292A2B2C2D2E2F, q25);
CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q26);
CHECK_EQUAL_128(0x2021222324252627, 0x2021222324252627, q27);
-
- TEARDOWN();
}
TEST(neon_ld1_lane_postindex) {
@@ -5091,8 +4867,6 @@ TEST(neon_ld1_lane_postindex) {
CHECK_EQUAL_64(src_base + 2, x22);
CHECK_EQUAL_64(src_base + 3, x23);
CHECK_EQUAL_64(src_base + 4, x24);
-
- TEARDOWN();
}
TEST(neon_st1_lane_postindex) {
@@ -5138,8 +4912,6 @@ TEST(neon_st1_lane_postindex) {
CHECK_EQUAL_128(0x0100030205040706, 0x09080B0A0D0C0F0E, q2);
CHECK_EQUAL_128(0x0302010007060504, 0x0B0A09080F0E0D0C, q3);
CHECK_EQUAL_128(0x0706050403020100, 0x0F0E0D0C0B0A0908, q4);
-
- TEARDOWN();
}
TEST(neon_ld1_alllanes) {
@@ -5181,8 +4953,6 @@ TEST(neon_ld1_alllanes) {
CHECK_EQUAL_128(0x0908070609080706, 0x0908070609080706, q5);
CHECK_EQUAL_128(0, 0x0E0D0C0B0A090807, q6);
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x0F0E0D0C0B0A0908, q7);
-
- TEARDOWN();
}
TEST(neon_ld1_alllanes_postindex) {
@@ -5217,8 +4987,6 @@ TEST(neon_ld1_alllanes_postindex) {
CHECK_EQUAL_128(0x0A0908070A090807, 0x0A0908070A090807, q5);
CHECK_EQUAL_128(0x1211100F0E0D0C0B, 0x1211100F0E0D0C0B, q6);
CHECK_EQUAL_64(src_base + 19, x17);
-
- TEARDOWN();
}
TEST(neon_st1_d) {
@@ -5274,8 +5042,6 @@ TEST(neon_st1_d) {
CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q22);
CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q23);
CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q24);
-
- TEARDOWN();
}
TEST(neon_st1_d_postindex) {
@@ -5333,8 +5099,6 @@ TEST(neon_st1_d_postindex) {
CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q22);
CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q23);
CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q24);
-
- TEARDOWN();
}
TEST(neon_st1_q) {
@@ -5385,8 +5149,6 @@ TEST(neon_st1_q) {
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q23);
CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q24);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323130, q25);
-
- TEARDOWN();
}
TEST(neon_st1_q_postindex) {
@@ -5443,8 +5205,6 @@ TEST(neon_st1_q_postindex) {
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x1716151413121110, q23);
CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726252423222120, q24);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323130, q25);
-
- TEARDOWN();
}
TEST(neon_st2_d) {
@@ -5483,8 +5243,6 @@ TEST(neon_st2_d) {
CHECK_EQUAL_128(0x0504131203021110, 0x0100151413121110, q1);
CHECK_EQUAL_128(0x1615140706050413, 0x1211100302010014, q2);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736353433323117, q3);
-
- TEARDOWN();
}
TEST(neon_st2_d_postindex) {
@@ -5520,8 +5278,6 @@ TEST(neon_st2_d_postindex) {
CHECK_EQUAL_128(0x1405041312030211, 0x1001000211011000, q0);
CHECK_EQUAL_128(0x0605041312111003, 0x0201001716070615, q1);
CHECK_EQUAL_128(0x2F2E2D2C2B2A2928, 0x2726251716151407, q2);
-
- TEARDOWN();
}
TEST(neon_st2_q) {
@@ -5562,7 +5318,6 @@ TEST(neon_st2_q) {
CHECK_EQUAL_128(0x01000B0A19180908, 0x1716070615140504, q1);
CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q2);
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x0F0E0D0C0B0A0908, q3);
- TEARDOWN();
}
TEST(neon_st2_q_postindex) {
@@ -5603,8 +5358,6 @@ TEST(neon_st2_q_postindex) {
CHECK_EQUAL_128(0x0504030201001003, 0x0201001F1E0F0E1D, q2);
CHECK_EQUAL_128(0x0D0C0B0A09081716, 0x1514131211100706, q3);
CHECK_EQUAL_128(0x4F4E4D4C4B4A1F1E, 0x1D1C1B1A19180F0E, q4);
-
- TEARDOWN();
}
TEST(neon_st3_d) {
@@ -5640,8 +5393,6 @@ TEST(neon_st3_d) {
CHECK_EQUAL_128(0x2221201312111003, 0x0201000100201000, q0);
CHECK_EQUAL_128(0x1F1E1D2726252417, 0x1615140706050423, q1);
-
- TEARDOWN();
}
TEST(neon_st3_d_postindex) {
@@ -5680,8 +5431,6 @@ TEST(neon_st3_d_postindex) {
CHECK_EQUAL_128(0x0201002726171607, 0x0625241514050423, q1);
CHECK_EQUAL_128(0x1615140706050423, 0x2221201312111003, q2);
CHECK_EQUAL_128(0x3F3E3D3C3B3A3938, 0x3736352726252417, q3);
-
- TEARDOWN();
}
TEST(neon_st3_q) {
@@ -5727,8 +5476,6 @@ TEST(neon_st3_q) {
CHECK_EQUAL_128(0x0827262524232221, 0x2017161514131211, q3);
CHECK_EQUAL_128(0x281F1E1D1C1B1A19, 0x180F0E0D0C0B0A09, q4);
CHECK_EQUAL_128(0x5F5E5D5C5B5A5958, 0x572F2E2D2C2B2A29, q5);
-
- TEARDOWN();
}
TEST(neon_st3_q_postindex) {
@@ -5774,8 +5521,6 @@ TEST(neon_st3_q_postindex) {
CHECK_EQUAL_128(0x2524232221201716, 0x1514131211100706, q4);
CHECK_EQUAL_128(0x1D1C1B1A19180F0E, 0x0D0C0B0A09082726, q5);
CHECK_EQUAL_128(0x6F6E6D6C6B6A2F2E, 0x2D2C2B2A29281F1E, q6);
-
- TEARDOWN();
}
TEST(neon_st4_d) {
@@ -5816,8 +5561,6 @@ TEST(neon_st4_d) {
CHECK_EQUAL_128(0x1003020100322322, 0X1312030231302120, q1);
CHECK_EQUAL_128(0x1407060504333231, 0X3023222120131211, q2);
CHECK_EQUAL_128(0x3F3E3D3C3B373635, 0x3427262524171615, q3);
-
- TEARDOWN();
}
TEST(neon_st4_d_postindex) {
@@ -5861,8 +5604,6 @@ TEST(neon_st4_d_postindex) {
CHECK_EQUAL_128(0x2221201312111003, 0x0201003736272617, q2);
CHECK_EQUAL_128(0x2625241716151407, 0x0605043332313023, q3);
CHECK_EQUAL_128(0x4F4E4D4C4B4A4948, 0x4746453736353427, q4);
-
- TEARDOWN();
}
TEST(neon_st4_q) {
@@ -5912,8 +5653,6 @@ TEST(neon_st4_q) {
CHECK_EQUAL_128(0x180F0E0D0C0B0A09, 0x0837363534333231, q4);
CHECK_EQUAL_128(0x382F2E2D2C2B2A29, 0x281F1E1D1C1B1A19, q5);
CHECK_EQUAL_128(0x6F6E6D6C6B6A6968, 0x673F3E3D3C3B3A39, q6);
-
- TEARDOWN();
}
TEST(neon_st4_q_postindex) {
@@ -5967,8 +5706,6 @@ TEST(neon_st4_q_postindex) {
CHECK_EQUAL_128(0x0D0C0B0A09083736, 0x3534333231302726, q6);
CHECK_EQUAL_128(0x2D2C2B2A29281F1E, 0x1D1C1B1A19180F0E, q7);
CHECK_EQUAL_128(0x8F8E8D8C8B8A3F3E, 0x3D3C3B3A39382F2E, q8);
-
- TEARDOWN();
}
TEST(neon_destructive_minmaxp) {
@@ -6033,8 +5770,6 @@ TEST(neon_destructive_minmaxp) {
CHECK_EQUAL_128(0, 0x1111111133333333, q29);
CHECK_EQUAL_128(0, 0x1111111133333333, q30);
CHECK_EQUAL_128(0, 0x3333333333333333, q31);
-
- TEARDOWN();
}
TEST(neon_destructive_tbl) {
@@ -6085,8 +5820,6 @@ TEST(neon_destructive_tbl) {
CHECK_EQUAL_128(0xA0000000D4D5D6C7, 0xC8C9BABBBCADAEAF, q21);
CHECK_EQUAL_128(0xA0000000D4D5D6C7, 0xC8C9BABBBCADAEAF, q22);
CHECK_EQUAL_128(0x0F000000C4C5C6B7, 0xB8B9AAABAC424100, q26);
-
- TEARDOWN();
}
TEST(neon_destructive_tbx) {
@@ -6137,8 +5870,6 @@ TEST(neon_destructive_tbx) {
CHECK_EQUAL_128(0xA0414243D4D5D6C7, 0xC8C9BABBBCADAEAF, q21);
CHECK_EQUAL_128(0xA0AEADACD4D5D6C7, 0xC8C9BABBBCADAEAF, q22);
CHECK_EQUAL_128(0x0F414243C4C5C6B7, 0xB8B9AAABAC424100, q26);
-
- TEARDOWN();
}
TEST(neon_destructive_fcvtl) {
@@ -6175,11 +5906,8 @@ TEST(neon_destructive_fcvtl) {
CHECK_EQUAL_128(0x400000003F800000, 0x3F80000040000000, q21);
CHECK_EQUAL_128(0xC0000000BF800000, 0xBF800000C0000000, q22);
CHECK_EQUAL_128(0x400000003F800000, 0x3F80000040000000, q23);
-
- TEARDOWN();
}
-
TEST(ldp_stp_float) {
INIT_V8();
SETUP();
@@ -6205,11 +5933,8 @@ TEST(ldp_stp_float) {
CHECK_EQUAL_FP32(1.0, dst[2]);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
-
- TEARDOWN();
}
-
TEST(ldp_stp_double) {
INIT_V8();
SETUP();
@@ -6235,8 +5960,6 @@ TEST(ldp_stp_double) {
CHECK_EQUAL_FP64(1.0, dst[2]);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
-
- TEARDOWN();
}
TEST(ldp_stp_quad) {
@@ -6267,8 +5990,6 @@ TEST(ldp_stp_quad) {
CHECK_EQUAL_64(0xAAAAAAAA55555555, dst[5]);
CHECK_EQUAL_64(src_base + 4 * sizeof(src[0]), x16);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[1]), x17);
-
- TEARDOWN();
}
TEST(ldp_stp_offset) {
@@ -6321,11 +6042,8 @@ TEST(ldp_stp_offset) {
CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(src_base + 24, x18);
CHECK_EQUAL_64(dst_base + 56, x19);
-
- TEARDOWN();
}
-
TEST(ldp_stp_offset_wide) {
INIT_V8();
SETUP();
@@ -6379,11 +6097,8 @@ TEST(ldp_stp_offset_wide) {
CHECK_EQUAL_64(dst_base - base_offset, x21);
CHECK_EQUAL_64(src_base + base_offset + 24, x18);
CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
-
- TEARDOWN();
}
-
TEST(ldp_stp_preindex) {
INIT_V8();
SETUP();
@@ -6434,11 +6149,8 @@ TEST(ldp_stp_preindex) {
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
CHECK_EQUAL_64(dst_base + 24, x22);
-
- TEARDOWN();
}
-
TEST(ldp_stp_preindex_wide) {
INIT_V8();
SETUP();
@@ -6497,11 +6209,8 @@ TEST(ldp_stp_preindex_wide) {
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
CHECK_EQUAL_64(dst_base + 24, x22);
-
- TEARDOWN();
}
-
TEST(ldp_stp_postindex) {
INIT_V8();
SETUP();
@@ -6552,11 +6261,8 @@ TEST(ldp_stp_postindex) {
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
CHECK_EQUAL_64(dst_base + 24, x22);
-
- TEARDOWN();
}
-
TEST(ldp_stp_postindex_wide) {
INIT_V8();
SETUP();
@@ -6615,11 +6321,8 @@ TEST(ldp_stp_postindex_wide) {
CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
CHECK_EQUAL_64(src_base + base_offset + 8, x21);
CHECK_EQUAL_64(dst_base - base_offset + 24, x22);
-
- TEARDOWN();
}
-
TEST(ldp_sign_extend) {
INIT_V8();
SETUP();
@@ -6636,11 +6339,8 @@ TEST(ldp_sign_extend) {
CHECK_EQUAL_64(0xFFFFFFFF80000000UL, x0);
CHECK_EQUAL_64(0x000000007FFFFFFFUL, x1);
-
- TEARDOWN();
}
-
TEST(ldur_stur) {
INIT_V8();
SETUP();
@@ -6681,8 +6381,6 @@ TEST(ldur_stur) {
CHECK_EQUAL_64(dst_base, x18);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
-
- TEARDOWN();
}
namespace {
@@ -6719,8 +6417,6 @@ TEST(ldr_pcrel_large_offset) {
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x1);
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
-
- TEARDOWN();
}
TEST(ldr_literal) {
@@ -6735,8 +6431,6 @@ TEST(ldr_literal) {
RUN();
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
-
- TEARDOWN();
}
#ifdef DEBUG
@@ -6769,7 +6463,11 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
// can be handled by this test.
CHECK_LE(code_size, range);
- auto PoolSizeAt = [pool_entries](int pc_offset) {
+#if defined(_M_ARM64) && !defined(__clang__)
+ auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
+#else
+ auto PoolSizeAt = [](int pc_offset) {
+#endif
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
@@ -6826,8 +6524,6 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
CHECK_EQUAL_64(0xABCDEF1234567890UL, x1);
CHECK_EQUAL_64(0x34567890ABCDEF12UL, x4);
CHECK_EQUAL_64(0xABCDEF0123456789UL, x5);
-
- TEARDOWN();
}
TEST(ldr_literal_range_max_dist_emission_1) {
@@ -6910,11 +6606,8 @@ TEST(add_sub_imm) {
CHECK_EQUAL_32(0x1000, w25);
CHECK_EQUAL_32(0x111, w26);
CHECK_EQUAL_32(0xFFFFFFFF, w27);
-
- TEARDOWN();
}
-
TEST(add_sub_wide_imm) {
INIT_V8();
SETUP();
@@ -6947,13 +6640,10 @@ TEST(add_sub_wide_imm) {
CHECK_EQUAL_32(kWMinInt, w18);
CHECK_EQUAL_32(kWMinInt, w19);
- CHECK_EQUAL_64(-0x1234567890ABCDEFUL, x20);
+ CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20);
CHECK_EQUAL_32(-0x12345678, w21);
-
- TEARDOWN();
}
-
TEST(add_sub_shifted) {
INIT_V8();
SETUP();
@@ -7002,11 +6692,8 @@ TEST(add_sub_shifted) {
CHECK_EQUAL_64(0x00765432, x25);
CHECK_EQUAL_64(0x10765432, x26);
CHECK_EQUAL_64(0x10FEDCBA98765432L, x27);
-
- TEARDOWN();
}
-
TEST(add_sub_extended) {
INIT_V8();
SETUP();
@@ -7073,11 +6760,8 @@ TEST(add_sub_extended) {
CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x29);
CHECK_EQUAL_64(256, x30);
-
- TEARDOWN();
}
-
TEST(add_sub_negative) {
INIT_V8();
SETUP();
@@ -7119,11 +6803,8 @@ TEST(add_sub_negative) {
CHECK_EQUAL_32(0x11223400, w21);
CHECK_EQUAL_32(402000, w22);
-
- TEARDOWN();
}
-
TEST(add_sub_zero) {
INIT_V8();
SETUP();
@@ -7157,8 +6838,6 @@ TEST(add_sub_zero) {
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(0, x2);
-
- TEARDOWN();
}
TEST(preshift_immediates) {
@@ -7227,8 +6906,6 @@ TEST(preshift_immediates) {
CHECK_EQUAL_64(0x207F0, x13);
CHECK_EQUAL_64(0x1F7F0, x14);
CHECK_EQUAL_64(0x11100, x15);
-
- TEARDOWN();
}
TEST(claim_drop_zero) {
@@ -7256,11 +6933,8 @@ TEST(claim_drop_zero) {
END();
RUN();
-
- TEARDOWN();
}
-
TEST(neg) {
INIT_V8();
SETUP();
@@ -7305,11 +6979,8 @@ TEST(neg) {
CHECK_EQUAL_64(0x0000000000019088UL, x12);
CHECK_EQUAL_64(0x65432110, x13);
CHECK_EQUAL_64(0x0000000765432110UL, x14);
-
- TEARDOWN();
}
-
template <typename T, typename Op>
static void AdcsSbcsHelper(Op op, T left, T right, int carry, T expected,
StatusFlags expected_flags) {
@@ -7335,11 +7006,8 @@ static void AdcsSbcsHelper(Op op, T left, T right, int carry, T expected,
CHECK_EQUAL_64(right, right_reg.X());
CHECK_EQUAL_64(expected, result_reg.X());
CHECK_EQUAL_NZCV(expected_flags);
-
- TEARDOWN();
}
-
TEST(adcs_sbcs_x) {
INIT_V8();
uint64_t inputs[] = {
@@ -7509,7 +7177,6 @@ TEST(adcs_sbcs_x) {
}
}
-
TEST(adcs_sbcs_w) {
INIT_V8();
uint32_t inputs[] = {
@@ -7678,7 +7345,6 @@ TEST(adcs_sbcs_w) {
}
}
-
TEST(adc_sbc_shift) {
INIT_V8();
SETUP();
@@ -7724,7 +7390,7 @@ TEST(adc_sbc_shift) {
RUN();
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL, x5);
- CHECK_EQUAL_64(1L << 60, x6);
+ CHECK_EQUAL_64(1LL << 60, x6);
CHECK_EQUAL_64(0xF0123456789ABCDDL, x7);
CHECK_EQUAL_64(0x0111111111111110L, x8);
CHECK_EQUAL_64(0x1222222222222221L, x9);
@@ -7735,22 +7401,19 @@ TEST(adc_sbc_shift) {
CHECK_EQUAL_32(0x91111110, w13);
CHECK_EQUAL_32(0x9A222221, w14);
- CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFL + 1, x18);
- CHECK_EQUAL_64((1L << 60) + 1, x19);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x18);
+ CHECK_EQUAL_64((1LL << 60) + 1, x19);
CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
- CHECK_EQUAL_32(0xFFFFFFFF + 1, w23);
+ CHECK_EQUAL_32(0xFFFFFFFFULL + 1, w23);
CHECK_EQUAL_32((1 << 30) + 1, w24);
CHECK_EQUAL_32(0xF89ABCDD + 1, w25);
CHECK_EQUAL_32(0x91111110 + 1, w26);
CHECK_EQUAL_32(0x9A222221 + 1, w27);
-
- TEARDOWN();
}
-
TEST(adc_sbc_extend) {
INIT_V8();
SETUP();
@@ -7840,11 +7503,8 @@ TEST(adc_sbc_extend) {
RUN();
CHECK_EQUAL_NZCV(NVFlag);
-
- TEARDOWN();
}
-
TEST(adc_sbc_wide_imm) {
INIT_V8();
SETUP();
@@ -7888,11 +7548,8 @@ TEST(adc_sbc_wide_imm) {
CHECK_EQUAL_64(1, x21);
CHECK_EQUAL_64(0x100000000UL, x22);
CHECK_EQUAL_64(0x10000, x23);
-
- TEARDOWN();
}
-
TEST(flags) {
INIT_V8();
SETUP();
@@ -8038,11 +7695,8 @@ TEST(flags) {
RUN();
CHECK_EQUAL_NZCV(ZCFlag);
-
- TEARDOWN();
}
-
TEST(cmp_shift) {
INIT_V8();
SETUP();
@@ -8094,11 +7748,8 @@ TEST(cmp_shift) {
CHECK_EQUAL_32(ZCFlag, w5);
CHECK_EQUAL_32(ZCFlag, w6);
CHECK_EQUAL_32(ZCFlag, w7);
-
- TEARDOWN();
}
-
TEST(cmp_extend) {
INIT_V8();
SETUP();
@@ -8147,11 +7798,8 @@ TEST(cmp_extend) {
CHECK_EQUAL_32(ZCFlag, w5);
CHECK_EQUAL_32(NCFlag, w6);
CHECK_EQUAL_32(ZCFlag, w7);
-
- TEARDOWN();
}
-
TEST(ccmp) {
INIT_V8();
SETUP();
@@ -8191,11 +7839,8 @@ TEST(ccmp) {
CHECK_EQUAL_32(NZCVFlag, w3);
CHECK_EQUAL_32(ZCFlag, w4);
CHECK_EQUAL_32(ZCFlag, w5);
-
- TEARDOWN();
}
-
TEST(ccmp_wide_imm) {
INIT_V8();
SETUP();
@@ -8216,11 +7861,8 @@ TEST(ccmp_wide_imm) {
CHECK_EQUAL_32(NFlag, w0);
CHECK_EQUAL_32(NoFlag, w1);
-
- TEARDOWN();
}
-
TEST(ccmp_shift_extend) {
INIT_V8();
SETUP();
@@ -8260,11 +7902,8 @@ TEST(ccmp_shift_extend) {
CHECK_EQUAL_32(ZCFlag, w2);
CHECK_EQUAL_32(NCFlag, w3);
CHECK_EQUAL_32(NZCVFlag, w4);
-
- TEARDOWN();
}
-
TEST(csel) {
INIT_V8();
SETUP();
@@ -8330,11 +7969,8 @@ TEST(csel) {
CHECK_EQUAL_64(0x0000001F0000001FUL, x25);
CHECK_EQUAL_64(0x0000001F0000001FUL, x26);
CHECK_EQUAL_64(0, x27);
-
- TEARDOWN();
}
-
TEST(csel_imm) {
INIT_V8();
SETUP();
@@ -8384,11 +8020,8 @@ TEST(csel_imm) {
CHECK_EQUAL_64(-1, x13);
CHECK_EQUAL_64(0x4000000000000000UL, x14);
CHECK_EQUAL_64(0x8000000000000000UL, x15);
-
- TEARDOWN();
}
-
TEST(lslv) {
INIT_V8();
SETUP();
@@ -8437,11 +8070,8 @@ TEST(lslv) {
CHECK_EQUAL_32(value << (shift[3] & 31), w25);
CHECK_EQUAL_32(value << (shift[4] & 31), w26);
CHECK_EQUAL_32(value << (shift[5] & 31), w27);
-
- TEARDOWN();
}
-
TEST(lsrv) {
INIT_V8();
SETUP();
@@ -8492,11 +8122,8 @@ TEST(lsrv) {
CHECK_EQUAL_32(value >> (shift[3] & 31), w25);
CHECK_EQUAL_32(value >> (shift[4] & 31), w26);
CHECK_EQUAL_32(value >> (shift[5] & 31), w27);
-
- TEARDOWN();
}
-
TEST(asrv) {
INIT_V8();
SETUP();
@@ -8547,11 +8174,8 @@ TEST(asrv) {
CHECK_EQUAL_32(value32 >> (shift[3] & 31), w25);
CHECK_EQUAL_32(value32 >> (shift[4] & 31), w26);
CHECK_EQUAL_32(value32 >> (shift[5] & 31), w27);
-
- TEARDOWN();
}
-
TEST(rorv) {
INIT_V8();
SETUP();
@@ -8600,11 +8224,8 @@ TEST(rorv) {
CHECK_EQUAL_32(0xCDEF89AB, w25);
CHECK_EQUAL_32(0xABCDEF89, w26);
CHECK_EQUAL_32(0xF89ABCDE, w27);
-
- TEARDOWN();
}
-
TEST(bfm) {
INIT_V8();
SETUP();
@@ -8640,11 +8261,8 @@ TEST(bfm) {
CHECK_EQUAL_64(0x8888888888EF8888L, x12);
CHECK_EQUAL_64(0x88888888888888ABL, x13);
-
- TEARDOWN();
}
-
TEST(sbfm) {
INIT_V8();
SETUP();
@@ -8702,11 +8320,8 @@ TEST(sbfm) {
CHECK_EQUAL_64(0x3210, x27);
CHECK_EQUAL_64(0xFFFFFFFF89ABCDEFL, x28);
CHECK_EQUAL_64(0x76543210, x29);
-
- TEARDOWN();
}
-
TEST(ubfm) {
INIT_V8();
SETUP();
@@ -8759,11 +8374,8 @@ TEST(ubfm) {
CHECK_EQUAL_64(0xEFL, x20);
CHECK_EQUAL_64(0xCDEFL, x21);
CHECK_EQUAL_64(0x89ABCDEFL, x22);
-
- TEARDOWN();
}
-
TEST(extr) {
INIT_V8();
SETUP();
@@ -8797,11 +8409,8 @@ TEST(extr) {
CHECK_EQUAL_64(0x13579BDF, x23);
CHECK_EQUAL_64(0x7F6E5D4C3B2A1908UL, x24);
CHECK_EQUAL_64(0x02468ACF13579BDEUL, x25);
-
- TEARDOWN();
}
-
TEST(fmov_imm) {
INIT_V8();
SETUP();
@@ -8827,11 +8436,8 @@ TEST(fmov_imm) {
CHECK_EQUAL_FP64(0.0, d4);
CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
CHECK_EQUAL_FP64(kFP64NegativeInfinity, d6);
-
- TEARDOWN();
}
-
TEST(fmov_reg) {
INIT_V8();
SETUP();
@@ -8858,11 +8464,8 @@ TEST(fmov_reg) {
CHECK_EQUAL_FP64(-13.0, d2);
CHECK_EQUAL_FP64(-13.0, d4);
CHECK_EQUAL_FP32(bit_cast<float>(0x89ABCDEF), s6);
-
- TEARDOWN();
}
-
TEST(fadd) {
INIT_V8();
SETUP();
@@ -8915,11 +8518,8 @@ TEST(fadd) {
CHECK_EQUAL_FP64(kFP64NegativeInfinity, d11);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
-
- TEARDOWN();
}
-
TEST(fsub) {
INIT_V8();
SETUP();
@@ -8972,11 +8572,8 @@ TEST(fsub) {
CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
-
- TEARDOWN();
}
-
TEST(fmul) {
INIT_V8();
SETUP();
@@ -9030,8 +8627,6 @@ TEST(fmul) {
CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
-
- TEARDOWN();
}
@@ -9056,11 +8651,8 @@ static void FmaddFmsubHelper(double n, double m, double a,
CHECK_EQUAL_FP64(fmsub, d29);
CHECK_EQUAL_FP64(fnmadd, d30);
CHECK_EQUAL_FP64(fnmsub, d31);
-
- TEARDOWN();
}
-
TEST(fmadd_fmsub_double) {
INIT_V8();
@@ -9102,7 +8694,6 @@ TEST(fmadd_fmsub_double) {
kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
}
-
static void FmaddFmsubHelper(float n, float m, float a,
float fmadd, float fmsub,
float fnmadd, float fnmsub) {
@@ -9124,11 +8715,8 @@ static void FmaddFmsubHelper(float n, float m, float a,
CHECK_EQUAL_FP32(fmsub, s29);
CHECK_EQUAL_FP32(fnmadd, s30);
CHECK_EQUAL_FP32(fnmsub, s31);
-
- TEARDOWN();
}
-
TEST(fmadd_fmsub_float) {
INIT_V8();
// It's hard to check the result of fused operations because the only way to
@@ -9169,7 +8757,6 @@ TEST(fmadd_fmsub_float) {
kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
}
-
TEST(fmadd_fmsub_double_nans) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -9252,7 +8839,6 @@ TEST(fmadd_fmsub_double_nans) {
kFP64DefaultNaN, kFP64DefaultNaN);
}
-
TEST(fmadd_fmsub_float_nans) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -9335,7 +8921,6 @@ TEST(fmadd_fmsub_float_nans) {
kFP32DefaultNaN, kFP32DefaultNaN);
}
-
TEST(fdiv) {
INIT_V8();
SETUP();
@@ -9389,8 +8974,6 @@ TEST(fdiv) {
CHECK_EQUAL_FP64(-0.0, d11);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
-
- TEARDOWN();
}
@@ -9495,11 +9078,8 @@ static void FminFmaxDoubleHelper(double n, double m, double min, double max,
CHECK_EQUAL_FP64(max, d29);
CHECK_EQUAL_FP64(minnm, d30);
CHECK_EQUAL_FP64(maxnm, d31);
-
- TEARDOWN();
}
-
TEST(fmax_fmin_d) {
INIT_V8();
// Use non-standard NaNs to check that the payload bits are preserved.
@@ -9560,7 +9140,6 @@ TEST(fmax_fmin_d) {
}
}
-
static void FminFmaxFloatHelper(float n, float m, float min, float max,
float minnm, float maxnm) {
SETUP();
@@ -9580,11 +9159,8 @@ static void FminFmaxFloatHelper(float n, float m, float min, float max,
CHECK_EQUAL_FP32(max, s29);
CHECK_EQUAL_FP32(minnm, s30);
CHECK_EQUAL_FP32(maxnm, s31);
-
- TEARDOWN();
}
-
TEST(fmax_fmin_s) {
INIT_V8();
// Use non-standard NaNs to check that the payload bits are preserved.
@@ -9645,7 +9221,6 @@ TEST(fmax_fmin_s) {
}
}
-
TEST(fccmp) {
INIT_V8();
SETUP();
@@ -9709,11 +9284,8 @@ TEST(fccmp) {
CHECK_EQUAL_32(NFlag, w7);
CHECK_EQUAL_32(ZCFlag, w8);
CHECK_EQUAL_32(ZCFlag, w9);
-
- TEARDOWN();
}
-
TEST(fcmp) {
INIT_V8();
SETUP();
@@ -9792,11 +9364,8 @@ TEST(fcmp) {
CHECK_EQUAL_32(CVFlag, w14);
CHECK_EQUAL_32(ZCFlag, w15);
CHECK_EQUAL_32(NFlag, w16);
-
- TEARDOWN();
}
-
TEST(fcsel) {
INIT_V8();
SETUP();
@@ -9825,11 +9394,8 @@ TEST(fcsel) {
CHECK_EQUAL_FP64(4.0, d3);
CHECK_EQUAL_FP32(1.0, s4);
CHECK_EQUAL_FP64(3.0, d5);
-
- TEARDOWN();
}
-
TEST(fneg) {
INIT_V8();
SETUP();
@@ -9870,11 +9436,8 @@ TEST(fneg) {
CHECK_EQUAL_FP64(0.0, d9);
CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
-
- TEARDOWN();
}
-
TEST(fabs) {
INIT_V8();
SETUP();
@@ -9907,11 +9470,8 @@ TEST(fabs) {
CHECK_EQUAL_FP64(1.0, d5);
CHECK_EQUAL_FP64(0.0, d6);
CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
-
- TEARDOWN();
}
-
TEST(fsqrt) {
INIT_V8();
SETUP();
@@ -9964,11 +9524,8 @@ TEST(fsqrt) {
CHECK_EQUAL_FP64(-0.0, d11);
CHECK_EQUAL_FP64(kFP32PositiveInfinity, d12);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
-
- TEARDOWN();
}
-
TEST(frinta) {
INIT_V8();
SETUP();
@@ -10053,11 +9610,8 @@ TEST(frinta) {
CHECK_EQUAL_FP64(0.0, d21);
CHECK_EQUAL_FP64(-0.0, d22);
CHECK_EQUAL_FP64(-0.0, d23);
-
- TEARDOWN();
}
-
TEST(frintm) {
INIT_V8();
SETUP();
@@ -10142,11 +9696,8 @@ TEST(frintm) {
CHECK_EQUAL_FP64(0.0, d21);
CHECK_EQUAL_FP64(-0.0, d22);
CHECK_EQUAL_FP64(-1.0, d23);
-
- TEARDOWN();
}
-
TEST(frintn) {
INIT_V8();
SETUP();
@@ -10231,11 +9782,8 @@ TEST(frintn) {
CHECK_EQUAL_FP64(0.0, d21);
CHECK_EQUAL_FP64(-0.0, d22);
CHECK_EQUAL_FP64(-0.0, d23);
-
- TEARDOWN();
}
-
TEST(frintp) {
INIT_V8();
SETUP();
@@ -10320,11 +9868,8 @@ TEST(frintp) {
CHECK_EQUAL_FP64(0.0, d21);
CHECK_EQUAL_FP64(-0.0, d22);
CHECK_EQUAL_FP64(-0.0, d23);
-
- TEARDOWN();
}
-
TEST(frintz) {
INIT_V8();
SETUP();
@@ -10403,11 +9948,8 @@ TEST(frintz) {
CHECK_EQUAL_FP64(kFP64NegativeInfinity, d19);
CHECK_EQUAL_FP64(0.0, d20);
CHECK_EQUAL_FP64(-0.0, d21);
-
- TEARDOWN();
}
-
TEST(fcvt_ds) {
INIT_V8();
SETUP();
@@ -10470,11 +10012,8 @@ TEST(fcvt_ds) {
// - The low-order bits that haven't already been assigned are set to 0.
CHECK_EQUAL_FP64(bit_cast<double>(0x7FF82468A0000000), d13);
CHECK_EQUAL_FP64(bit_cast<double>(0x7FF82468A0000000), d14);
-
- TEARDOWN();
}
-
TEST(fcvt_sd) {
INIT_V8();
// There are a huge number of corner-cases to check, so this test iterates
@@ -10588,11 +10127,9 @@ TEST(fcvt_sd) {
RUN();
CHECK_EQUAL_FP32(expected, s20);
CHECK_EQUAL_FP32(-expected, s21);
- TEARDOWN();
}
}
-
TEST(fcvtas) {
INIT_V8();
SETUP();
@@ -10693,11 +10230,8 @@ TEST(fcvtas) {
CHECK_EQUAL_64(0x8000000000000000UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
-
- TEARDOWN();
}
-
TEST(fcvtau) {
INIT_V8();
SETUP();
@@ -10793,11 +10327,8 @@ TEST(fcvtau) {
CHECK_EQUAL_64(0, x28);
CHECK_EQUAL_64(0xFFFFFFFFFFFFF800UL, x29);
CHECK_EQUAL_64(0xFFFFFFFF, x30);
-
- TEARDOWN();
}
-
TEST(fcvtms) {
INIT_V8();
SETUP();
@@ -10898,11 +10429,8 @@ TEST(fcvtms) {
CHECK_EQUAL_64(0x8000000000000000UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
-
- TEARDOWN();
}
-
TEST(fcvtmu) {
INIT_V8();
SETUP();
@@ -11001,11 +10529,8 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(0x0UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x0UL, x30);
-
- TEARDOWN();
}
-
TEST(fcvtns) {
INIT_V8();
SETUP();
@@ -11106,11 +10631,8 @@ TEST(fcvtns) {
// CHECK_EQUAL_64(0x8000000000000000UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
-
- TEARDOWN();
}
-
TEST(fcvtnu) {
INIT_V8();
SETUP();
@@ -11206,11 +10728,8 @@ TEST(fcvtnu) {
// CHECK_EQUAL_64(0, x28);
CHECK_EQUAL_64(0xFFFFFFFFFFFFF800UL, x29);
CHECK_EQUAL_64(0xFFFFFFFF, x30);
-
- TEARDOWN();
}
-
TEST(fcvtzs) {
INIT_V8();
SETUP();
@@ -11311,11 +10830,8 @@ TEST(fcvtzs) {
CHECK_EQUAL_64(0x8000000000000000UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x8000000000000400UL, x30);
-
- TEARDOWN();
}
-
TEST(fcvtzu) {
INIT_V8();
SETUP();
@@ -11414,8 +10930,6 @@ TEST(fcvtzu) {
CHECK_EQUAL_64(0x0UL, x28);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFC00UL, x29);
CHECK_EQUAL_64(0x0UL, x30);
-
- TEARDOWN();
}
@@ -11512,11 +11026,8 @@ static void TestUScvtfHelper(uint64_t in,
CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
}
-
- TEARDOWN();
}
-
TEST(scvtf_ucvtf_double) {
INIT_V8();
// Simple conversions of positive numbers which require no rounding; the
@@ -11582,7 +11093,6 @@ TEST(scvtf_ucvtf_double) {
TestUScvtfHelper(0xFFFFFFFFFFFFFFFF, 0xBFF0000000000000, 0x43F0000000000000);
}
-
// The same as TestUScvtfHelper, but convert to floats.
static void TestUScvtf32Helper(uint64_t in,
uint32_t expected_scvtf_bits,
@@ -11667,11 +11177,8 @@ static void TestUScvtf32Helper(uint64_t in,
CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
}
-
- TEARDOWN();
}
-
TEST(scvtf_ucvtf_float) {
INIT_V8();
// Simple conversions of positive numbers which require no rounding; the
@@ -11740,7 +11247,6 @@ TEST(scvtf_ucvtf_float) {
TestUScvtf32Helper(0xFFFFFFFFFFFFFFFF, 0xBF800000, 0x5F800000);
}
-
TEST(system_mrs) {
INIT_V8();
SETUP();
@@ -11776,11 +11282,8 @@ TEST(system_mrs) {
// FPCR
// The default FPCR on Linux-based platforms is 0.
CHECK_EQUAL_32(0, w6);
-
- TEARDOWN();
}
-
TEST(system_msr) {
INIT_V8();
// All FPCR fields that must be implemented: AHP, DN, FZ, RMode
@@ -11848,8 +11351,6 @@ TEST(system_msr) {
CHECK_EQUAL_64(fpcr_core, x8);
CHECK_EQUAL_64(fpcr_core, x9);
CHECK_EQUAL_64(0, x10);
-
- TEARDOWN();
}
TEST(system) {
@@ -11867,11 +11368,8 @@ TEST(system) {
CHECK_EQUAL_REGISTERS(before);
CHECK_EQUAL_NZCV(before.flags_nzcv());
-
- TEARDOWN();
}
-
TEST(zero_dest) {
INIT_V8();
SETUP();
@@ -11934,11 +11432,8 @@ TEST(zero_dest) {
CHECK_EQUAL_REGISTERS(before);
CHECK_EQUAL_NZCV(before.flags_nzcv());
-
- TEARDOWN();
}
-
TEST(zero_dest_setflags) {
INIT_V8();
SETUP();
@@ -11998,29 +11493,26 @@ TEST(zero_dest_setflags) {
RUN();
CHECK_EQUAL_REGISTERS(before);
-
- TEARDOWN();
}
-
TEST(register_bit) {
// No code generation takes place in this test, so no need to setup and
// teardown.
// Simple tests.
- CHECK(x0.bit() == (1UL << 0));
- CHECK(x1.bit() == (1UL << 1));
- CHECK(x10.bit() == (1UL << 10));
+ CHECK(x0.bit() == (1ULL << 0));
+ CHECK(x1.bit() == (1ULL << 1));
+ CHECK(x10.bit() == (1ULL << 10));
// AAPCS64 definitions.
- CHECK(fp.bit() == (1UL << kFramePointerRegCode));
- CHECK(lr.bit() == (1UL << kLinkRegCode));
+ CHECK(fp.bit() == (1ULL << kFramePointerRegCode));
+ CHECK(lr.bit() == (1ULL << kLinkRegCode));
// Fixed (hardware) definitions.
- CHECK(xzr.bit() == (1UL << kZeroRegCode));
+ CHECK(xzr.bit() == (1ULL << kZeroRegCode));
// Internal ABI definitions.
- CHECK(sp.bit() == (1UL << kSPRegInternalCode));
+ CHECK(sp.bit() == (1ULL << kSPRegInternalCode));
CHECK(sp.bit() != xzr.bit());
// xn.bit() == wn.bit() at all times, for the same n.
@@ -12031,7 +11523,6 @@ TEST(register_bit) {
CHECK(sp.bit() == wsp.bit());
}
-
TEST(peek_poke_simple) {
INIT_V8();
SETUP();
@@ -12094,11 +11585,8 @@ TEST(peek_poke_simple) {
CHECK_EQUAL_64((literal_base * 2) & 0xFFFFFFFF, x11);
CHECK_EQUAL_64((literal_base * 3) & 0xFFFFFFFF, x12);
CHECK_EQUAL_64((literal_base * 4) & 0xFFFFFFFF, x13);
-
- TEARDOWN();
}
-
TEST(peek_poke_unaligned) {
INIT_V8();
SETUP();
@@ -12174,11 +11662,8 @@ TEST(peek_poke_unaligned) {
CHECK_EQUAL_64((literal_base * 1) & 0xFFFFFFFF, x10);
CHECK_EQUAL_64((literal_base * 2) & 0xFFFFFFFF, x11);
CHECK_EQUAL_64((literal_base * 3) & 0xFFFFFFFF, x12);
-
- TEARDOWN();
}
-
TEST(peek_poke_endianness) {
INIT_V8();
SETUP();
@@ -12224,11 +11709,8 @@ TEST(peek_poke_endianness) {
CHECK_EQUAL_64(x1_expected, x1);
CHECK_EQUAL_64(x4_expected, x4);
CHECK_EQUAL_64(x5_expected, x5);
-
- TEARDOWN();
}
-
TEST(peek_poke_mixed) {
INIT_V8();
SETUP();
@@ -12288,11 +11770,8 @@ TEST(peek_poke_mixed) {
CHECK_EQUAL_64(x3_expected, x3);
CHECK_EQUAL_64(x6_expected, x6);
CHECK_EQUAL_64(x7_expected, x7);
-
- TEARDOWN();
}
-
// This enum is used only as an argument to the push-pop test helpers.
enum PushPopMethod {
// Push or Pop using the Push and Pop methods, with blocks of up to four
@@ -12413,8 +11892,6 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
CHECK_EQUAL_64(literal_base * i, x[i]);
}
}
-
- TEARDOWN();
}
TEST(push_pop_simple_32) {
@@ -12563,8 +12040,6 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
memcpy(&expected, &literal, sizeof(expected));
CHECK_EQUAL_FP64(expected, d[i]);
}
-
- TEARDOWN();
}
TEST(push_pop_fp_simple_32) {
@@ -12681,8 +12156,6 @@ static void PushPopMixedMethodsHelper(int reg_size) {
CHECK_EQUAL_64(literal_base * 3, x[6]);
CHECK_EQUAL_64(literal_base * 1, x[5]);
CHECK_EQUAL_64(literal_base * 2, x[4]);
-
- TEARDOWN();
}
TEST(push_pop_mixed_methods_64) {
@@ -12772,10 +12245,8 @@ TEST(push_pop) {
CHECK_EQUAL_32(0x00000000U, w27);
CHECK_EQUAL_32(0x22222222U, w28);
CHECK_EQUAL_32(0x33333333U, w29);
- TEARDOWN();
}
-
TEST(push_queued) {
INIT_V8();
SETUP();
@@ -12851,11 +12322,8 @@ TEST(push_queued) {
CHECK_EQUAL_FP32(123403.0, s3);
CHECK_EQUAL_FP32(123404.0, s4);
CHECK_EQUAL_FP32(123405.0, s5);
-
- TEARDOWN();
}
-
TEST(pop_queued) {
INIT_V8();
SETUP();
@@ -12931,8 +12399,6 @@ TEST(pop_queued) {
CHECK_EQUAL_FP32(123403.0, s3);
CHECK_EQUAL_FP32(123404.0, s4);
CHECK_EQUAL_FP32(123405.0, s5);
-
- TEARDOWN();
}
TEST(copy_slots_down) {
@@ -12998,8 +12464,6 @@ TEST(copy_slots_down) {
CHECK_EQUAL_64(ones, x15);
CHECK_EQUAL_64(ones, x0);
-
- TEARDOWN();
}
TEST(copy_slots_up) {
@@ -13060,8 +12524,6 @@ TEST(copy_slots_up) {
CHECK_EQUAL_64(threes, x0);
CHECK_EQUAL_64(twos, x1);
CHECK_EQUAL_64(ones, x2);
-
- TEARDOWN();
}
TEST(copy_double_words_downwards_even) {
@@ -13114,8 +12576,6 @@ TEST(copy_double_words_downwards_even) {
CHECK_EQUAL_64(fours, x6);
CHECK_EQUAL_64(threes, x5);
CHECK_EQUAL_64(fours, x4);
-
- TEARDOWN();
}
TEST(copy_double_words_downwards_odd) {
@@ -13172,8 +12632,6 @@ TEST(copy_double_words_downwards_odd) {
CHECK_EQUAL_64(fours, x6);
CHECK_EQUAL_64(threes, x5);
CHECK_EQUAL_64(fours, x4);
-
- TEARDOWN();
}
TEST(copy_noop) {
@@ -13239,8 +12697,6 @@ TEST(copy_noop) {
CHECK_EQUAL_64(fives, x14);
CHECK_EQUAL_64(fives, x15);
CHECK_EQUAL_64(0, x16);
-
- TEARDOWN();
}
TEST(jump_both_smi) {
@@ -13311,11 +12767,8 @@ TEST(jump_both_smi) {
CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0, x6);
CHECK_EQUAL_64(1, x7);
-
- TEARDOWN();
}
-
TEST(jump_either_smi) {
INIT_V8();
SETUP();
@@ -13384,11 +12837,8 @@ TEST(jump_either_smi) {
CHECK_EQUAL_64(1, x5);
CHECK_EQUAL_64(1, x6);
CHECK_EQUAL_64(1, x7);
-
- TEARDOWN();
}
-
TEST(noreg) {
// This test doesn't generate any code, but it verifies some invariants
// related to NoReg.
@@ -14042,7 +13492,6 @@ TEST(cpureglist_utils_x) {
CHECK(test.IsEmpty());
}
-
TEST(cpureglist_utils_w) {
// This test doesn't generate any code, but it verifies the behaviour of
// the CPURegList utility methods.
@@ -14108,7 +13557,6 @@ TEST(cpureglist_utils_w) {
CHECK(test.IsEmpty());
}
-
TEST(cpureglist_utils_d) {
// This test doesn't generate any code, but it verifies the behaviour of
// the CPURegList utility methods.
@@ -14175,7 +13623,6 @@ TEST(cpureglist_utils_d) {
CHECK(test.IsEmpty());
}
-
TEST(cpureglist_utils_s) {
// This test doesn't generate any code, but it verifies the behaviour of
// the CPURegList utility methods.
@@ -14196,7 +13643,6 @@ TEST(cpureglist_utils_s) {
CHECK(test.IncludesAliasOf(s23));
}
-
TEST(cpureglist_utils_empty) {
// This test doesn't generate any code, but it verifies the behaviour of
// the CPURegList utility methods.
@@ -14230,7 +13676,6 @@ TEST(cpureglist_utils_empty) {
CHECK(fpreg64.IsEmpty());
}
-
TEST(printf) {
INIT_V8();
SETUP_SIZE(BUF_SIZE * 2);
@@ -14313,11 +13758,8 @@ TEST(printf) {
// bytes that were printed. However, the printf_no_preserve test should check
// that, and here we just test that we didn't clobber any registers.
CHECK_EQUAL_REGISTERS(before);
-
- TEARDOWN();
}
-
TEST(printf_no_preserve) {
INIT_V8();
SETUP();
@@ -14419,11 +13861,8 @@ TEST(printf_no_preserve) {
CHECK_EQUAL_64(17, x27);
// w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
CHECK_EQUAL_64(69, x28);
-
- TEARDOWN();
}
-
TEST(blr_lr) {
// A simple test to check that the simulator correcty handle "blr lr".
INIT_V8();
@@ -14449,11 +13888,8 @@ TEST(blr_lr) {
RUN();
CHECK_EQUAL_64(0xC001C0DE, x0);
-
- TEARDOWN();
}
-
TEST(barriers) {
// Generate all supported barriers, this is just a smoke test
INIT_V8();
@@ -14509,11 +13945,8 @@ TEST(barriers) {
END();
RUN();
-
- TEARDOWN();
}
-
TEST(process_nan_double) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -14585,11 +14018,8 @@ TEST(process_nan_double) {
CHECK_EQUAL_FP64(qn_proc, d15);
CHECK_EQUAL_FP64(qn_proc, d16);
CHECK_EQUAL_FP64(qn_proc, d17);
-
- TEARDOWN();
}
-
TEST(process_nan_float) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -14662,8 +14092,6 @@ TEST(process_nan_float) {
CHECK_EQUAL_FP32(qn_proc, s15);
CHECK_EQUAL_FP32(qn_proc, s16);
CHECK_EQUAL_FP32(qn_proc, s17);
-
- TEARDOWN();
}
@@ -14695,11 +14123,8 @@ static void ProcessNaNsHelper(double n, double m, double expected) {
CHECK_EQUAL_FP64(expected, d5);
CHECK_EQUAL_FP64(expected, d6);
CHECK_EQUAL_FP64(expected, d7);
-
- TEARDOWN();
}
-
TEST(process_nans_double) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -14738,7 +14163,6 @@ TEST(process_nans_double) {
ProcessNaNsHelper(sn, sm, sn_proc);
}
-
static void ProcessNaNsHelper(float n, float m, float expected) {
CHECK(std::isnan(n) || std::isnan(m));
CHECK(std::isnan(expected));
@@ -14767,11 +14191,8 @@ static void ProcessNaNsHelper(float n, float m, float expected) {
CHECK_EQUAL_FP32(expected, s5);
CHECK_EQUAL_FP32(expected, s6);
CHECK_EQUAL_FP32(expected, s7);
-
- TEARDOWN();
}
-
TEST(process_nans_float) {
INIT_V8();
// Make sure that NaN propagation works correctly.
@@ -14810,7 +14231,6 @@ TEST(process_nans_float) {
ProcessNaNsHelper(sn, sm, sn_proc);
}
-
static void DefaultNaNHelper(float n, float m, float a) {
CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
@@ -14893,11 +14313,8 @@ static void DefaultNaNHelper(float n, float m, float a) {
CHECK_EQUAL_FP32(kFP32DefaultNaN, s25);
CHECK_EQUAL_FP32(kFP32DefaultNaN, s26);
CHECK_EQUAL_FP32(kFP32DefaultNaN, s27);
-
- TEARDOWN();
}
-
TEST(default_nan_float) {
INIT_V8();
float sn = bit_cast<float>(0x7F951111);
@@ -14939,7 +14356,6 @@ TEST(default_nan_float) {
DefaultNaNHelper(qn, qm, qa);
}
-
static void DefaultNaNHelper(double n, double m, double a) {
CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
@@ -15021,11 +14437,8 @@ static void DefaultNaNHelper(double n, double m, double a) {
CHECK_EQUAL_FP64(kFP64DefaultNaN, d25);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d26);
CHECK_EQUAL_FP64(kFP64DefaultNaN, d27);
-
- TEARDOWN();
}
-
TEST(default_nan_double) {
INIT_V8();
double sn = bit_cast<double>(0x7FF5555511111111);
@@ -15067,7 +14480,6 @@ TEST(default_nan_double) {
DefaultNaNHelper(qn, qm, qa);
}
-
TEST(call_no_relocation) {
INIT_V8();
SETUP();
@@ -15097,8 +14509,6 @@ TEST(call_no_relocation) {
RUN();
CHECK_EQUAL_64(1, x0);
-
- TEARDOWN();
}
@@ -15152,8 +14562,6 @@ static void AbsHelperX(int64_t value) {
CHECK_EQUAL_64(expected, x11);
CHECK_EQUAL_64(expected, x12);
CHECK_EQUAL_64(expected, x13);
-
- TEARDOWN();
}
@@ -15209,11 +14617,8 @@ static void AbsHelperW(int32_t value) {
CHECK_EQUAL_32(expected, w11);
CHECK_EQUAL_32(expected, w12);
CHECK_EQUAL_32(expected, w13);
-
- TEARDOWN();
}
-
TEST(abs) {
INIT_V8();
AbsHelperX(0);
@@ -15229,7 +14634,6 @@ TEST(abs) {
AbsHelperW(kWMaxInt);
}
-
TEST(pool_size) {
INIT_V8();
SETUP();
@@ -15277,11 +14681,8 @@ TEST(pool_size) {
}
CHECK_EQ(pool_count, 2);
-
- TEARDOWN();
}
-
TEST(jump_tables_forward) {
// Test jump tables with forward jumps.
const int kNumCases = 512;
@@ -15341,11 +14742,8 @@ TEST(jump_tables_forward) {
for (int i = 0; i < kNumCases; ++i) {
CHECK_EQ(values[i], results[i]);
}
-
- TEARDOWN();
}
-
TEST(jump_tables_backward) {
// Test jump tables with backward jumps.
const int kNumCases = 512;
@@ -15406,11 +14804,8 @@ TEST(jump_tables_backward) {
for (int i = 0; i < kNumCases; ++i) {
CHECK_EQ(values[i], results[i]);
}
-
- TEARDOWN();
}
-
TEST(internal_reference_linked) {
// Test internal reference when they are linked in a label chain.
@@ -15447,8 +14842,6 @@ TEST(internal_reference_linked) {
RUN();
CHECK_EQUAL_64(0x1, x0);
-
- TEARDOWN();
}
} // namespace internal
@@ -15464,7 +14857,6 @@ TEST(internal_reference_linked) {
#undef START
#undef RUN
#undef END
-#undef TEARDOWN
#undef CHECK_EQUAL_NZCV
#undef CHECK_EQUAL_REGISTERS
#undef CHECK_EQUAL_32
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 562dc5a8ab..781dbfcc10 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -54,7 +54,8 @@ TEST(AssemblerIa320) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ mov(eax, Operand(esp, 4));
__ add(eax, Operand(esp, 8));
@@ -81,7 +82,8 @@ TEST(AssemblerIa321) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
Label L, C;
__ mov(edx, Operand(esp, 4));
@@ -118,7 +120,8 @@ TEST(AssemblerIa322) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
Label L, C;
__ mov(edx, Operand(esp, 4));
@@ -162,7 +165,8 @@ TEST(AssemblerIa323) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ cvttss2si(eax, Operand(esp, 4));
__ ret(0);
@@ -191,7 +195,8 @@ TEST(AssemblerIa324) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ cvttsd2si(eax, Operand(esp, 4));
__ ret(0);
@@ -218,7 +223,8 @@ TEST(AssemblerIa325) {
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE));
__ ret(0);
@@ -241,7 +247,8 @@ TEST(AssemblerIa326) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ movsd(xmm1, Operand(esp, 3 * kPointerSize));
@@ -279,7 +286,8 @@ TEST(AssemblerIa328) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
__ mov(eax, Operand(esp, 4));
__ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack.
@@ -308,7 +316,7 @@ TEST(AssemblerIa3210) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label target;
__ j(equal, &target);
@@ -323,7 +331,8 @@ TEST(AssemblerMultiByteNop) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler assm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
__ push(ebx);
__ push(ecx);
__ push(edx);
@@ -395,7 +404,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(ELEMENT_COUNT, vec->Length());
v8::internal::byte buffer[256];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
// Remove return address from the stack for fix stack frame alignment.
__ pop(ecx);
@@ -477,8 +487,8 @@ TEST(AssemblerIa32Extractps) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{ CpuFeatureScope fscope41(&assm, SSE4_1);
__ movsd(xmm1, Operand(esp, 4));
__ extractps(eax, xmm1, 0x1);
@@ -509,8 +519,8 @@ TEST(AssemblerIa32SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
__ movss(xmm0, Operand(esp, kPointerSize));
__ movss(xmm1, Operand(esp, 2 * kPointerSize));
@@ -545,8 +555,8 @@ TEST(AssemblerIa32SSE3) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, SSE3);
__ movss(xmm0, Operand(esp, kPointerSize));
@@ -579,8 +589,8 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -808,8 +818,8 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -1036,8 +1046,8 @@ TEST(AssemblerIa32BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, BMI1);
Label exit;
@@ -1144,8 +1154,8 @@ TEST(AssemblerIa32LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, LZCNT);
Label exit;
@@ -1192,8 +1202,8 @@ TEST(AssemblerIa32POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, POPCNT);
Label exit;
@@ -1240,8 +1250,8 @@ TEST(AssemblerIa32BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler assm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, BMI2);
Label exit;
@@ -1384,7 +1394,7 @@ TEST(AssemblerIa32JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumCases = 512;
int values[kNumCases];
@@ -1431,7 +1441,7 @@ TEST(AssemblerIa32JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumCases = 512;
int values[kNumCases];
@@ -1480,7 +1490,7 @@ TEST(Regress621926) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
uint16_t a = 42;
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 4b625c2b53..2f5b13f725 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -34,7 +34,6 @@
#include "src/disassembler.h"
#include "src/heap/factory.h"
#include "src/macro-assembler.h"
-#include "src/mips/macro-assembler-mips.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -44,10 +43,10 @@ namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
// TODO(mips): Refine these signatures per test case.
-typedef Object*(F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object*(F2)(int x, int y, int p2, int p3, int p4);
-typedef Object*(F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object*(F4)(void* p0, void* p1, int p2, int p3, int p4);
+typedef void*(F1)(int x, int p1, int p2, int p3, int p4);
+typedef void*(F2)(int x, int y, int p2, int p3, int p4);
+typedef void*(F3)(void* p, int p1, int p2, int p3, int p4);
+typedef void*(F4)(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -56,8 +55,7 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -79,8 +77,7 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -115,8 +112,7 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -276,8 +272,7 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -405,8 +400,7 @@ TEST(MIPS4) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -474,8 +468,7 @@ TEST(MIPS5) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Load all structure elements to registers.
@@ -543,7 +536,7 @@ TEST(MIPS6) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
// Basic word load/store.
@@ -623,8 +616,7 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -714,8 +706,7 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Basic word load.
__ lw(t0, MemOperand(a0, offsetof(T, input)) );
@@ -798,8 +789,7 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -834,8 +824,7 @@ TEST(MIPS10) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
@@ -909,7 +898,7 @@ TEST(MIPS11) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Test all combinations of LWL and vAddr.
__ lw(t0, MemOperand(a0, offsetof(T, reg_init)) );
@@ -1061,8 +1050,7 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ mov(t6, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1150,8 +1138,7 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, t0, f4);
@@ -1228,8 +1215,7 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
@@ -1334,7 +1320,7 @@ TEST(MIPS15) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label target;
__ beq(v0, v1, &target);
@@ -1352,8 +1338,7 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
int a;
@@ -1454,8 +1439,7 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
struct TestFloat {
double a;
@@ -1539,8 +1523,7 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1645,8 +1628,7 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dd;
@@ -1720,8 +1702,7 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -1825,8 +1806,7 @@ TEST(Cvt_d_uw) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_struct {
unsigned input;
@@ -1868,8 +1848,7 @@ TEST(mina_maxa) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dnan = std::numeric_limits<double>::quiet_NaN();
const double dinf = std::numeric_limits<double>::infinity();
const double dminf = -std::numeric_limits<double>::infinity();
@@ -1963,8 +1942,7 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2038,8 +2016,7 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int32_t rt;
@@ -2168,8 +2145,7 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
__ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
@@ -2222,8 +2198,7 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2300,8 +2275,7 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2371,8 +2345,7 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2442,8 +2415,7 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2516,8 +2488,7 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2590,8 +2561,7 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2691,8 +2661,7 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2745,8 +2714,7 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2805,8 +2773,7 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2860,8 +2827,7 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2932,8 +2898,7 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3005,8 +2970,7 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3077,8 +3041,7 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3151,7 +3114,7 @@ TEST(jump_tables1) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumCases = 512;
int values[kNumCases];
@@ -3215,7 +3178,7 @@ TEST(jump_tables2) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumCases = 512;
int values[kNumCases];
@@ -3281,7 +3244,7 @@ TEST(jump_tables3) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
const int kNumCases = 256;
Handle<Object> values[kNumCases];
@@ -3290,7 +3253,7 @@ TEST(jump_tables3) {
values[i] = isolate->factory()->NewHeapNumber(value, TENURED);
}
Label labels[kNumCases];
- Object* obj;
+ Object obj;
int32_t imm32;
__ addiu(sp, sp, -4);
@@ -3303,7 +3266,7 @@ TEST(jump_tables3) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
obj = *values[i];
- imm32 = reinterpret_cast<intptr_t>(obj);
+ imm32 = obj->ptr();
__ lui(v0, (imm32 >> 16) & 0xFFFF);
__ ori(v0, v0, imm32 & 0xFFFF);
__ b(&done);
@@ -3342,7 +3305,8 @@ TEST(jump_tables3) {
#endif
auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(f.Call(i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(
+ Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3368,7 +3332,7 @@ TEST(BITSWAP) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ lw(a2, MemOperand(a0, offsetof(T, r1)));
__ nop();
@@ -3430,8 +3394,7 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
__ class_d(f6, f4);
@@ -3578,8 +3541,7 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3676,8 +3638,7 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3747,8 +3708,7 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -3959,8 +3919,7 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -4176,8 +4135,7 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4422,8 +4380,7 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4545,8 +4502,7 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4601,8 +4557,7 @@ uint32_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4655,8 +4610,7 @@ uint32_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4709,8 +4663,7 @@ uint32_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t7, t0, 0xFFFF; (0x250FFFFF)
@@ -4787,8 +4740,7 @@ uint32_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -4863,8 +4815,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0);
@@ -4968,7 +4919,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
typedef struct {
@@ -5121,8 +5072,7 @@ uint32_t run_jialc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -5207,8 +5157,7 @@ static uint32_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -5261,8 +5210,7 @@ int32_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5341,8 +5289,7 @@ int32_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5396,8 +5343,7 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ aui(v0, t0, offset);
@@ -5479,8 +5425,7 @@ uint32_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -5535,8 +5480,7 @@ TEST(Trampoline) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
@@ -5567,8 +5511,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
T x = std::sqrt(static_cast<T>(2.0));
T y = std::sqrt(static_cast<T>(3.0));
@@ -5690,8 +5633,7 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -5774,8 +5716,7 @@ TEST(MSA_fill_copy) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5839,8 +5780,7 @@ TEST(MSA_fill_copy_2) {
} T;
T t[2];
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5908,8 +5848,7 @@ TEST(MSA_fill_copy_3) {
} T;
T t[2];
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -5952,8 +5891,7 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
__ li(t0, -1);
@@ -6067,8 +6005,7 @@ TEST(MSA_move_v) {
0xA9913868FB819C59}};
for (unsigned i = 0; i < arraysize(t); ++i) {
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
@@ -6114,8 +6051,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
uint64_t res[2];
for (unsigned i = 0; i < arraysize(t); ++i) {
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
@@ -6196,8 +6132,7 @@ void run_msa_ctc_cfc(uint32_t value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
MSAControlRegister msareg = {kMSACSRRegister};
@@ -6258,8 +6193,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
uint64_t wd_lo = 0xF35862E13E38F8B0;
@@ -6492,8 +6426,7 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6543,8 +6476,7 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, 0xFFFFFFFF);
__ li(t0, source);
@@ -6599,8 +6531,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
int32_t i5 =
@@ -7023,8 +6954,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -8071,8 +8001,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -8161,8 +8090,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -8638,8 +8566,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -8709,8 +8636,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
T in_test_vector[1024];
T out_test_vector[1024];
@@ -8793,8 +8719,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -9799,7 +9724,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index ebae2e9ed5..aa82b359f2 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -34,7 +34,6 @@
#include "src/disassembler.h"
#include "src/heap/factory.h"
#include "src/macro-assembler.h"
-#include "src/mips64/macro-assembler-mips64.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -44,11 +43,11 @@ namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
// TODO(mips64): Refine these signatures per test case.
-typedef Object*(F1)(int x, int p1, int p2, int p3, int p4);
-typedef Object*(F2)(int x, int y, int p2, int p3, int p4);
-typedef Object*(F3)(void* p, int p1, int p2, int p3, int p4);
-typedef Object*(F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
-typedef Object*(F5)(void* p0, void* p1, int p2, int p3, int p4);
+typedef void*(F1)(int x, int p1, int p2, int p3, int p4);
+typedef void*(F2)(int x, int y, int p2, int p3, int p4);
+typedef void*(F3)(void* p, int p1, int p2, int p3, int p4);
+typedef void*(F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+typedef void*(F5)(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -57,8 +56,7 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -80,8 +78,7 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -116,8 +113,7 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -286,8 +282,7 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -410,8 +405,7 @@ TEST(MIPS4) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -477,8 +471,7 @@ TEST(MIPS5) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Load all structure elements to registers.
@@ -546,8 +539,7 @@ TEST(MIPS6) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic word load/store.
@@ -625,8 +617,7 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
@@ -712,8 +703,7 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Basic word load.
__ Lw(a4, MemOperand(a0, offsetof(T, input)));
@@ -796,8 +786,7 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -838,8 +827,7 @@ TEST(MIPS10) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
if (kArchVariant == kMips64r2) {
@@ -939,8 +927,7 @@ TEST(MIPS11) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Test all combinations of LWL and vAddr.
__ Lw(a4, MemOperand(a0, offsetof(T, reg_init)));
@@ -1091,8 +1078,7 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ mov(t2, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1180,8 +1166,7 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, a4);
@@ -1258,8 +1243,7 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
@@ -1363,7 +1347,7 @@ TEST(MIPS15) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label target;
__ beq(v0, v1, &target);
@@ -1401,8 +1385,7 @@ TEST(MIPS16) {
};
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic 32-bit word load/store, with un-signed data.
@@ -1535,8 +1518,7 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
int a;
@@ -1638,8 +1620,7 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
struct TestFloat {
double a;
@@ -1723,8 +1704,7 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1827,8 +1807,7 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dd;
@@ -1902,8 +1881,7 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2009,8 +1987,7 @@ TEST(mina_maxa) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dnan = std::numeric_limits<double>::quiet_NaN();
const double dinf = std::numeric_limits<double>::infinity();
const double dminf = -std::numeric_limits<double>::infinity();
@@ -2106,8 +2083,7 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2180,8 +2156,7 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t rt;
@@ -2309,8 +2284,7 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
__ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)));
__ Lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)));
@@ -2364,8 +2338,7 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2442,8 +2415,7 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2513,8 +2485,7 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2583,8 +2554,7 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -2656,8 +2626,7 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2730,8 +2699,7 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2822,8 +2790,7 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2877,8 +2844,7 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2937,8 +2903,7 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2991,8 +2956,7 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3062,8 +3026,7 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3134,8 +3097,7 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3205,8 +3167,7 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
uint32_t isNaN2008;
@@ -3278,8 +3239,7 @@ TEST(jump_tables1) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3344,8 +3304,7 @@ TEST(jump_tables2) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3412,8 +3371,7 @@ TEST(jump_tables3) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
Handle<Object> values[kNumCases];
@@ -3422,7 +3380,7 @@ TEST(jump_tables3) {
values[i] = isolate->factory()->NewHeapNumber(value, TENURED);
}
Label labels[kNumCases];
- Object* obj;
+ Object obj;
int64_t imm64;
__ daddiu(sp, sp, -8);
@@ -3436,7 +3394,7 @@ TEST(jump_tables3) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
obj = *values[i];
- imm64 = reinterpret_cast<intptr_t>(obj);
+ imm64 = obj->ptr();
__ lui(v0, (imm64 >> 32) & kImm16Mask);
__ ori(v0, v0, (imm64 >> 16) & kImm16Mask);
__ dsll(v0, v0, 16);
@@ -3478,7 +3436,8 @@ TEST(jump_tables3) {
#endif
auto f = GeneratedCode<F1>::FromCode(*code);
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(f.Call(i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(
+ Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3506,8 +3465,7 @@ TEST(BITSWAP) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Ld(a4, MemOperand(a0, offsetof(T, r1)));
__ nop();
@@ -3597,8 +3555,7 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
__ class_d(f6, f4);
@@ -3746,8 +3703,7 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3845,8 +3801,7 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3916,8 +3871,7 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -4128,8 +4082,7 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -4345,8 +4298,7 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4543,8 +4495,7 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4664,8 +4615,7 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4720,8 +4670,7 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ dalign(v0, a0, a1, bp);
__ jr(ra);
@@ -4781,8 +4730,7 @@ uint64_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4835,8 +4783,7 @@ uint64_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4889,8 +4836,7 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ aui(v0, t0, offset);
@@ -4914,8 +4860,7 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(t0, rs);
__ daui(v0, t0, offset);
@@ -4939,8 +4884,7 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, rs);
__ dahi(v0, offset);
@@ -4964,8 +4908,7 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, rs);
__ dati(v0, offset);
@@ -5064,8 +5007,7 @@ TEST(r6_aui_family) {
uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -5260,8 +5202,7 @@ uint64_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xFFFF; (0x250FFFFF)
@@ -5338,8 +5279,7 @@ uint64_t run_lwupc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xFFFF; (0x250FFFFF)
@@ -5416,8 +5356,7 @@ uint64_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -5492,8 +5431,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0l);
@@ -5597,7 +5535,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
typedef struct {
@@ -5750,8 +5688,7 @@ uint64_t run_jialc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -5839,8 +5776,7 @@ uint64_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -5893,8 +5829,7 @@ uint64_t run_ldpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2 * 2^7k = 2^8k
// addiu t3, a4, 0xFFFF; (0x250FFFFF)
@@ -5975,8 +5910,7 @@ int64_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -6055,8 +5989,7 @@ int64_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -6136,8 +6069,7 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ dsll(v0, a0, sa_value);
__ jr(ra);
@@ -6186,8 +6118,7 @@ uint64_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -6244,8 +6175,7 @@ TEST(Trampoline) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
@@ -6276,8 +6206,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
T x = std::sqrt(static_cast<T>(2.0));
T y = std::sqrt(static_cast<T>(3.0));
@@ -6397,8 +6326,7 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -6481,8 +6409,7 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
Label code_start;
__ bind(&code_start);
@@ -6579,8 +6506,7 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6640,8 +6566,7 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, imm);
__ li(t0, source);
@@ -6709,8 +6634,7 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
__ li(v0, 0xFFFFFFFFFFFFFFFF);
__ li(t0, source);
@@ -6768,8 +6692,7 @@ TEST(MSA_fill_copy) {
} T;
T t;
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6835,8 +6758,7 @@ TEST(MSA_fill_copy_2) {
} T;
T t[2];
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6896,8 +6818,7 @@ TEST(MSA_fill_copy_3) {
} T;
T t[2];
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
if ((kArchVariant != kMips64r6) || !CpuFeatures::IsSupported(MIPS_SIMD))
return;
@@ -6941,8 +6862,7 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
__ li(t0, -1);
@@ -7066,8 +6986,7 @@ void run_msa_ctc_cfc(uint64_t value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
MSAControlRegister msareg = {kMSACSRRegister};
@@ -7119,8 +7038,7 @@ TEST(MSA_move_v) {
0xA9913868FB819C59}};
for (unsigned i = 0; i < arraysize(t); ++i) {
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
@@ -7166,8 +7084,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
uint64_t res[2];
for (unsigned i = 0; i < arraysize(t); ++i) {
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
load_elements_of_vector(assm, &t[i].ws_lo, w0, t0, t1);
load_elements_of_vector(assm, &t[i].wd_lo, w2, t0, t1);
@@ -7278,8 +7195,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
uint64_t wd_lo = 0xF35862E13E38F8B0;
@@ -7527,8 +7443,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
int32_t i5 =
@@ -7957,8 +7872,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -9005,8 +8919,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -9095,8 +9008,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -9572,8 +9484,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -9643,8 +9554,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
T in_test_vector[1024];
T out_test_vector[1024];
@@ -9727,8 +9637,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
@@ -10732,7 +10641,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
msa_reg_t res;
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 08559cd762..196a3d91df 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -32,17 +32,18 @@
#include "src/ppc/assembler-ppc-inl.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
// TODO(ppc): Refine these signatures per test case, they can have arbitrary
// return and argument types and arbitrary number of arguments.
-using F_iiiii = Object*(int x, int p1, int p2, int p3, int p4);
-using F_piiii = Object*(void* p0, int p1, int p2, int p3, int p4);
-using F_ppiii = Object*(void* p0, void* p1, int p2, int p3, int p4);
-using F_pppii = Object*(void* p0, void* p1, void* p2, int p3, int p4);
-using F_ippii = Object*(int p0, void* p1, void* p2, int p3, int p4);
+using F_iiiii = void*(int x, int p1, int p2, int p3, int p4);
+using F_piiii = void*(void* p0, int p1, int p2, int p3, int p4);
+using F_ppiii = void*(void* p0, void* p1, int p2, int p3, int p4);
+using F_pppii = void*(void* p0, void* p1, void* p2, int p3, int p4);
+using F_ippii = void*(int p0, void* p1, void* p2, int p3, int p4);
#define __ assm.
@@ -52,9 +53,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
-
- __ function_descriptor();
+ Assembler assm(AssemblerOptions{});
__ add(r3, r3, r4);
__ blr();
@@ -79,11 +78,9 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
- __ function_descriptor();
-
__ mr(r4, r3);
__ li(r3, Operand::Zero());
__ b(&C);
@@ -116,11 +113,9 @@ TEST(2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
- __ function_descriptor();
-
__ mr(r4, r3);
__ li(r3, Operand(1));
__ b(&C);
@@ -173,11 +168,9 @@ TEST(3) {
} T;
T t;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
- __ function_descriptor();
-
// build a frame
#if V8_TARGET_ARCH_PPC64
__ stdu(sp, MemOperand(sp, -32));
@@ -264,7 +257,7 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
if (CpuFeatures::IsSupported(VFP3)) {
@@ -333,7 +326,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -379,7 +372,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -393,7 +386,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -415,7 +408,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -428,7 +421,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -457,7 +450,7 @@ static void TestRoundingMode(VCVTTypes types,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -503,7 +496,7 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -661,7 +654,7 @@ TEST(8) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -690,7 +683,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -772,7 +765,7 @@ TEST(9) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -805,7 +798,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -887,7 +880,7 @@ TEST(10) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -916,7 +909,7 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -983,7 +976,7 @@ TEST(11) {
i.a = 0xABCD0001;
i.b = 0xABCD0000;
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Test HeapObject untagging.
__ ldr(r1, MemOperand(r0, offsetof(I, a)));
@@ -1013,7 +1006,7 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Object* code = isolate->heap()->CreateCode(
+ Object code = isolate->heap()->CreateCode(
desc,
Code::STUB,
Handle<Code>())->ToObjectChecked();
@@ -1037,7 +1030,7 @@ TEST(12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label target;
__ b(eq, &target);
__ b(ne, &target);
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index 76e0ce8d10..447a9c048f 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -33,16 +33,17 @@
#include "src/s390/assembler-s390-inl.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
// TODO(s390): Refine these signatures per test case.
-using F1 = Object*(int x, int p1, int p2, int p3, int p4);
-using F2 = Object*(int x, int y, int p2, int p3, int p4);
-using F3 = Object*(void* p0, int p1, int p2, int p3, int p4);
-using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p0, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -52,7 +53,7 @@ TEST(0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ lhi(r1, Operand(3)); // test 4-byte instr
__ llilf(r2, Operand(4)); // test 6-byte instr
@@ -79,7 +80,7 @@ TEST(1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
#if defined(_AIX)
@@ -119,7 +120,7 @@ TEST(2) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L, C;
#if defined(_AIX)
@@ -168,7 +169,7 @@ TEST(3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ ar(r14, r13);
__ sr(r14, r13);
@@ -222,7 +223,7 @@ TEST(4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label L2, L3, L4;
__ chi(r2, Operand(10));
@@ -267,7 +268,7 @@ TEST(5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ mov(r2, Operand(0x12345678));
__ ExtractBitRange(r3, r2, 3, 2);
@@ -295,7 +296,7 @@ TEST(6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label yes;
@@ -329,7 +330,7 @@ TEST(7) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label yes;
@@ -361,7 +362,7 @@ TEST(8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
// Zero upper bits of r3/r4
__ llihf(r3, Operand::Zero());
@@ -393,7 +394,7 @@ TEST(9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
__ lzdr(d4);
__ b(r14);
@@ -424,7 +425,7 @@ TEST(10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label ok, failed;
@@ -502,7 +503,7 @@ TEST(11) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label ok, failed, continue1, continue2;
// r1 - operand; r3 - inc / test val
@@ -556,7 +557,7 @@ TEST(12) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(AssemblerOptions{}, nullptr, 0);
+ Assembler assm(AssemblerOptions{});
Label ok, failed, continue1, continue2;
// r1 - operand; r3 - inc / test val
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index a340322bd6..ae23af9f87 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -73,9 +73,8 @@ static const Register arg2 = rsi;
TEST(AssemblerX64ReturnOperation) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@@ -84,9 +83,9 @@ TEST(AssemblerX64ReturnOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(2, result);
}
@@ -94,9 +93,8 @@ TEST(AssemblerX64ReturnOperation) {
TEST(AssemblerX64StackOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@@ -115,9 +113,9 @@ TEST(AssemblerX64StackOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(2, result);
}
@@ -125,9 +123,8 @@ TEST(AssemblerX64StackOperations) {
TEST(AssemblerX64ArithmeticOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@@ -136,9 +133,9 @@ TEST(AssemblerX64ArithmeticOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(5, result);
}
@@ -146,9 +143,8 @@ TEST(AssemblerX64ArithmeticOperations) {
TEST(AssemblerX64CmpbOperation) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a function that compare argument byte returing 1 if equal else 0.
// On Windows, it compares rcx with rdx which does not require REX prefix;
@@ -164,9 +160,9 @@ TEST(AssemblerX64CmpbOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(0x1002, 0x2002);
CHECK_EQ(1, result);
result = f.Call(0x1002, 0x2003);
@@ -175,9 +171,8 @@ TEST(AssemblerX64CmpbOperation) {
TEST(AssemblerX64ImulOperation) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that multiplies arguments returning the high
// word.
@@ -188,9 +183,9 @@ TEST(AssemblerX64ImulOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(0, result);
result = f.Call(0x100000000l, 0x100000000l);
@@ -202,9 +197,8 @@ TEST(AssemblerX64ImulOperation) {
TEST(AssemblerX64testbwqOperation) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ pushq(rbx);
__ pushq(rdi);
@@ -359,18 +353,17 @@ TEST(AssemblerX64testbwqOperation) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(0, 0);
CHECK_EQ(1, result);
}
TEST(AssemblerX64XchglOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(rax, Operand(arg1, 0));
__ movq(r11, Operand(arg2, 0));
@@ -381,11 +374,11 @@ TEST(AssemblerX64XchglOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 40000000), left);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 20000000), right);
@@ -395,9 +388,8 @@ TEST(AssemblerX64XchglOperations) {
TEST(AssemblerX64OrlOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(rax, Operand(arg2, 0));
__ orl(Operand(arg1, 0), rax);
@@ -405,11 +397,11 @@ TEST(AssemblerX64OrlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 60000000), left);
USE(result);
@@ -418,9 +410,8 @@ TEST(AssemblerX64OrlOperations) {
TEST(AssemblerX64RollOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(rax, arg1);
__ roll(rax, Immediate(1));
@@ -428,10 +419,10 @@ TEST(AssemblerX64RollOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
- auto f = GeneratedCode<F5>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F5>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(src);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 80000001), result);
}
@@ -439,9 +430,8 @@ TEST(AssemblerX64RollOperations) {
TEST(AssemblerX64SublOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(rax, Operand(arg2, 0));
__ subl(Operand(arg1, 0), rax);
@@ -449,11 +439,11 @@ TEST(AssemblerX64SublOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, E0000000), left);
USE(result);
@@ -462,9 +452,8 @@ TEST(AssemblerX64SublOperations) {
TEST(AssemblerX64TestlOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Set rax with the ZF flag of the testl instruction.
Label done;
@@ -478,11 +467,11 @@ TEST(AssemblerX64TestlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
- auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(&left, &right);
CHECK_EQ(1u, result);
}
@@ -490,9 +479,8 @@ TEST(AssemblerX64TestlOperations) {
TEST(AssemblerX64TestwOperations) {
typedef uint16_t(F)(uint16_t * x);
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Set rax with the ZF flag of the testl instruction.
Label done;
@@ -505,19 +493,18 @@ TEST(AssemblerX64TestwOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint16_t operand = 0x8000;
- auto f = GeneratedCode<F>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint16_t result = f.Call(&operand);
CHECK_EQ(1u, result);
}
TEST(AssemblerX64XorlOperations) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(rax, Operand(arg2, 0));
__ xorl(Operand(arg1, 0), rax);
@@ -525,11 +512,11 @@ TEST(AssemblerX64XorlOperations) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
- auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F4>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 40000000), left);
USE(result);
@@ -538,9 +525,8 @@ TEST(AssemblerX64XorlOperations) {
TEST(AssemblerX64MemoryOperands) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that copies argument 2 and returns it.
__ pushq(rbp);
@@ -561,9 +547,9 @@ TEST(AssemblerX64MemoryOperands) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(3, result);
}
@@ -571,9 +557,8 @@ TEST(AssemblerX64MemoryOperands) {
TEST(AssemblerX64ControlFlow) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble a simple function that copies argument 1 and returns it.
__ pushq(rbp);
@@ -589,9 +574,9 @@ TEST(AssemblerX64ControlFlow) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F2>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call(3, 2);
CHECK_EQ(3, result);
}
@@ -599,9 +584,8 @@ TEST(AssemblerX64ControlFlow) {
TEST(AssemblerX64LoopImmediates) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
@@ -639,9 +623,9 @@ TEST(AssemblerX64LoopImmediates) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(1, result);
}
@@ -694,7 +678,7 @@ TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- Assembler masm(AssemblerOptions{}, nullptr, 0);
+ Assembler masm(AssemblerOptions{});
Label target;
__ j(equal, &target);
@@ -709,7 +693,8 @@ TEST(AssemblerMultiByteNop) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
__ pushq(rbx);
__ pushq(rcx);
__ pushq(rdx);
@@ -780,7 +765,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(ELEMENT_COUNT, vec->Length());
Isolate* isolate = CcTest::i_isolate();
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
// Remove return address from the stack for fix stack frame alignment.
__ popq(rcx);
@@ -869,7 +855,8 @@ TEST(AssemblerX64Extractps) {
v8::HandleScope scope(CcTest::isolate());
byte buffer[256];
Isolate* isolate = CcTest::i_isolate();
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope2(&masm, SSE4_1);
__ extractps(rax, xmm0, 0x1);
@@ -899,8 +886,8 @@ TEST(AssemblerX64SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
@@ -933,8 +920,8 @@ TEST(AssemblerX64SSE3) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, SSE3);
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
@@ -965,8 +952,8 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, FMA3);
Label exit;
@@ -1191,8 +1178,8 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, FMA3);
Label exit;
@@ -1415,7 +1402,8 @@ TEST(AssemblerX64SSE_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
Label exit;
// arguments in xmm0, xmm1 and xmm2
@@ -1493,7 +1481,8 @@ TEST(AssemblerX64AVX_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
@@ -1578,7 +1567,8 @@ TEST(AssemblerX64AVX_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- Assembler masm(AssemblerOptions{}, buffer, sizeof(buffer));
+ Assembler masm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope avx_scope(&masm, AVX);
Label exit;
@@ -1817,8 +1807,8 @@ TEST(AssemblerX64BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, BMI1);
Label exit;
@@ -2007,8 +1997,8 @@ TEST(AssemblerX64LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, LZCNT);
Label exit;
@@ -2067,8 +2057,8 @@ TEST(AssemblerX64POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, POPCNT);
Label exit;
@@ -2127,8 +2117,8 @@ TEST(AssemblerX64BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&masm, BMI2);
Label exit;
@@ -2388,8 +2378,7 @@ TEST(AssemblerX64JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -2436,8 +2425,7 @@ TEST(AssemblerX64JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -2481,9 +2469,8 @@ TEST(AssemblerX64JumpTables2) {
TEST(AssemblerX64PslldWithXmm15) {
CcTest::InitializeVM();
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
+ auto buffer = AllocateAssemblerBuffer();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
__ movq(xmm15, arg1);
__ pslld(xmm15, 1);
@@ -2492,8 +2479,8 @@ TEST(AssemblerX64PslldWithXmm15) {
CodeDesc desc;
masm.GetCode(CcTest::i_isolate(), &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- auto f = GeneratedCode<F5>::FromBuffer(CcTest::i_isolate(), buffer);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<F5>::FromBuffer(CcTest::i_isolate(), buffer->start());
uint64_t result = f.Call(uint64_t{0x1122334455667788});
CHECK_EQ(uint64_t{0x22446688AACCEF10}, result);
}
@@ -2506,8 +2493,8 @@ TEST(AssemblerX64vmovups) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler masm(isolate, buffer, sizeof(buffer),
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope avx_scope(&masm, AVX);
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index 1b7fc4f481..5dbe81cb62 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -30,6 +30,7 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
code_desc.origin = nullptr;
code_desc.unwinding_info = nullptr;
code_desc.unwinding_info_size = 0;
+ code_desc.code_comments_size = 0;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
code_desc, Code::STUB, Handle<Object>::null());
@@ -68,6 +69,7 @@ TEST(CodeLayoutWithUnwindingInfo) {
code_desc.origin = nullptr;
code_desc.unwinding_info = unwinding_info;
code_desc.unwinding_info_size = unwinding_info_size;
+ code_desc.code_comments_size = 0;
Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
code_desc, Code::STUB, Handle<Object>::null());
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 4289efa1d0..0c22f4503b 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -7,6 +7,7 @@
#include "src/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
+#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-string-gen.h"
#include "src/char-predicates.h"
#include "src/code-factory.h"
@@ -17,9 +18,12 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/smi.h"
+#include "src/objects/struct-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -345,7 +349,7 @@ TEST(ComputeIntegerHash) {
Handle<Object> result = ft.Call(key).ToHandleChecked();
uint32_t hash = ComputeSeededHash(k, isolate->heap()->HashSeed());
- Smi* expected = Smi::FromInt(hash);
+ Smi expected = Smi::FromInt(hash);
CHECK_EQ(expected, Smi::cast(*result));
}
}
@@ -1541,7 +1545,7 @@ TEST(TryLookupElement) {
CHECK_ABSENT(object, 42);
v8::ArrayBuffer::Contents contents = buffer->Externalize();
- buffer->Neuter();
+ buffer->Detach();
isolate->array_buffer_allocator()->Free(contents.Data(),
contents.ByteLength());
@@ -1710,7 +1714,8 @@ TEST(AllocateNameDictionary) {
// Both dictionaries should be memory equal.
int size =
FixedArrayBase::kHeaderSize + (dict->length() - 1) * kPointerSize;
- CHECK_EQ(0, memcmp(*dict, *result, size));
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(dict->ptr()),
+ reinterpret_cast<void*>(result->ptr()), size));
}
}
}
@@ -1786,16 +1791,17 @@ TEST(OneToTwoByteStringCopy) {
isolate->factory()->NewStringFromTwoByte(str).ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[0]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[1]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[2],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[2]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[3],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[3]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[4],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[4]);
+ DisallowHeapAllocation no_gc;
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[0],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[0]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[1],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[1]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[2],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[2]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[3],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[4],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[4]);
}
TEST(OneToOneByteStringCopy) {
@@ -1817,16 +1823,17 @@ TEST(OneToOneByteStringCopy) {
isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[0]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[1]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[2],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[2]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[3],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[3]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[4],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[4]);
+ DisallowHeapAllocation no_gc;
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[0],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[0]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[1],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[1]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[2],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[2]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[3],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[4],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[4]);
}
TEST(OneToOneByteStringCopyNonZeroStart) {
@@ -1848,13 +1855,14 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[3]);
- CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
- Handle<SeqOneByteString>::cast(string2)->GetChars()[4]);
- CHECK_EQ(100, Handle<SeqOneByteString>::cast(string2)->GetChars()[0]);
- CHECK_EQ(101, Handle<SeqOneByteString>::cast(string2)->GetChars()[1]);
- CHECK_EQ(102, Handle<SeqOneByteString>::cast(string2)->GetChars()[2]);
+ DisallowHeapAllocation no_gc;
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[0],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars(no_gc)[1],
+ Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[4]);
+ CHECK_EQ(100, Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[0]);
+ CHECK_EQ(101, Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[1]);
+ CHECK_EQ(102, Handle<SeqOneByteString>::cast(string2)->GetChars(no_gc)[2]);
}
TEST(TwoToTwoByteStringCopy) {
@@ -1879,16 +1887,17 @@ TEST(TwoToTwoByteStringCopy) {
isolate->factory()->NewStringFromTwoByte(str2).ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
- CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[0],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[0]);
- CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[1],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[1]);
- CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[2],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[2]);
- CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[3],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[3]);
- CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[4],
- Handle<SeqTwoByteString>::cast(string2)->GetChars()[4]);
+ DisallowHeapAllocation no_gc;
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars(no_gc)[0],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[0]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars(no_gc)[1],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[1]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars(no_gc)[2],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[2]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars(no_gc)[3],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[3]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars(no_gc)[4],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars(no_gc)[4]);
}
TEST(Arguments) {
@@ -1965,8 +1974,8 @@ TNode<Smi> NonConstantSmi(CodeStubAssembler* m, int value) {
m->BIND(&dummy_done);
// Ensure that the above hackery actually created a non-constant SMI.
- Smi* smi_constant;
- CHECK(!m->ToSmiConstant(var.value(), smi_constant));
+ Smi smi_constant;
+ CHECK(!m->ToSmiConstant(var.value(), &smi_constant));
return m->UncheckedCast<Smi>(var.value());
}
@@ -2075,17 +2084,15 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
: CodeStubAssembler(state), kind_(kind) {}
void TestAppendJSArrayImpl(Isolate* isolate, CodeAssemblerTester* csa_tester,
- Object* o1, Object* o2, Object* o3, Object* o4,
+ Object o1, Object o2, Object o3, Object o4,
int initial_size, int result_size) {
Handle<JSArray> array = isolate->factory()->NewJSArray(
kind_, 2, initial_size, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- JSObject::SetElement(isolate, array, 0,
- Handle<Smi>(Smi::FromInt(1), isolate),
- LanguageMode::kSloppy)
+ Object::SetElement(isolate, array, 0, Handle<Smi>(Smi::FromInt(1), isolate),
+ LanguageMode::kSloppy)
.Check();
- JSObject::SetElement(isolate, array, 1,
- Handle<Smi>(Smi::FromInt(2), isolate),
- LanguageMode::kSloppy)
+ Object::SetElement(isolate, array, 1, Handle<Smi>(Smi::FromInt(2), isolate),
+ LanguageMode::kSloppy)
.Check();
CodeStubArguments args(this, IntPtrConstant(kNumParams));
TVariable<IntPtrT> arg_index(this);
@@ -2108,8 +2115,8 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
CHECK_EQ(kind_, array->GetElementsKind());
CHECK_EQ(result_size, Handle<Smi>::cast(result)->value());
CHECK_EQ(result_size, Smi::ToInt(array->length()));
- Object* obj = *JSObject::GetElement(isolate, array, 2).ToHandleChecked();
- HeapObject* undefined_value = ReadOnlyRoots(isolate).undefined_value();
+ Object obj = *JSObject::GetElement(isolate, array, 2).ToHandleChecked();
+ HeapObject undefined_value = ReadOnlyRoots(isolate).undefined_value();
CHECK_EQ(result_size < 3 ? undefined_value : o1, obj);
obj = *JSObject::GetElement(isolate, array, 3).ToHandleChecked();
CHECK_EQ(result_size < 4 ? undefined_value : o2, obj);
@@ -2119,8 +2126,8 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
CHECK_EQ(result_size < 6 ? undefined_value : o4, obj);
}
- static void TestAppendJSArray(Isolate* isolate, ElementsKind kind, Object* o1,
- Object* o2, Object* o3, Object* o4,
+ static void TestAppendJSArray(Isolate* isolate, ElementsKind kind, Object o1,
+ Object o2, Object o3, Object o4,
int initial_size, int result_size) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
AppendJSArrayCodeStubAssembler m(asm_tester.state(), kind);
@@ -2355,9 +2362,9 @@ TEST(CreatePromiseResolvingFunctionsContext) {
CHECK_EQ(isolate->native_context()->scope_info(), context_js->scope_info());
CHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
- CHECK(context_js->get(PromiseBuiltinsAssembler::kPromiseSlot)->IsJSPromise());
+ CHECK(context_js->get(PromiseBuiltins::kPromiseSlot)->IsJSPromise());
CHECK_EQ(ReadOnlyRoots(isolate).false_value(),
- context_js->get(PromiseBuiltinsAssembler::kDebugEventSlot));
+ context_js->get(PromiseBuiltins::kDebugEventSlot));
}
TEST(CreatePromiseResolvingFunctions) {
@@ -2480,7 +2487,7 @@ TEST(AllocateFunctionWithMapAndContext) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_property_array(),
fun->property_array());
CHECK_EQ(ReadOnlyRoots(isolate).empty_fixed_array(), fun->elements());
- CHECK_EQ(isolate->heap()->many_closures_cell(), fun->feedback_cell());
+ CHECK_EQ(isolate->heap()->many_closures_cell(), fun->raw_feedback_cell());
CHECK(!fun->has_prototype_slot());
CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
fun->shared());
@@ -2515,13 +2522,12 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
ft.Call(isolate->factory()->undefined_value()).ToHandleChecked();
CHECK(result_obj->IsContext());
Handle<Context> context_js = Handle<Context>::cast(result_obj);
- CHECK_EQ(PromiseBuiltinsAssembler::kCapabilitiesContextLength,
- context_js->length());
+ CHECK_EQ(PromiseBuiltins::kCapabilitiesContextLength, context_js->length());
CHECK_EQ(isolate->native_context()->scope_info(), context_js->scope_info());
CHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
- CHECK(context_js->get(PromiseBuiltinsAssembler::kCapabilitySlot)
- ->IsPromiseCapability());
+ CHECK(
+ context_js->get(PromiseBuiltins::kCapabilitySlot)->IsPromiseCapability());
}
TEST(NewPromiseCapability) {
@@ -2568,10 +2574,8 @@ TEST(NewPromiseCapability) {
CHECK_EQ(isolate->native_context()->scope_info(), context->scope_info());
CHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), context->extension());
CHECK_EQ(*isolate->native_context(), context->native_context());
- CHECK_EQ(PromiseBuiltinsAssembler::kPromiseContextLength,
- context->length());
- CHECK_EQ(context->get(PromiseBuiltinsAssembler::kPromiseSlot),
- result->promise());
+ CHECK_EQ(PromiseBuiltins::kPromiseContextLength, context->length());
+ CHECK_EQ(context->get(PromiseBuiltins::kPromiseSlot), result->promise());
}
}
@@ -2891,7 +2895,7 @@ TEST(GotoIfNotWhiteSpaceOrLineTerminator) {
for (uc16 c = 0; c < 0xFFFF; c++) {
Handle<Object> expected_value =
- WhiteSpaceOrLineTerminator::Is(c) ? true_value : false_value;
+ IsWhiteSpaceOrLineTerminator(c) ? true_value : false_value;
ft.CheckCall(expected_value, handle(Smi::FromInt(c), isolate));
}
}
@@ -3087,7 +3091,7 @@ TEST(CloneEmptyFixedArray) {
Handle<FixedArray> source(isolate->factory()->empty_fixed_array());
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(0, result->length());
CHECK_EQ(*(isolate->factory()->empty_fixed_array()), result);
}
@@ -3105,7 +3109,7 @@ TEST(CloneFixedArray) {
Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(5, result->length());
CHECK(result->get(0)->IsTheHole(isolate));
CHECK_EQ(Smi::cast(result->get(1))->value(), 1234);
@@ -3128,7 +3132,7 @@ TEST(CloneFixedArrayCOW) {
source->set(1, Smi::FromInt(1234));
source->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(*source, result);
}
@@ -3150,7 +3154,7 @@ TEST(ExtractFixedArrayCOWForceCopy) {
source->set(1, Smi::FromInt(1234));
source->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_NE(*source, result);
CHECK_EQ(5, result->length());
CHECK(result->get(0)->IsTheHole(isolate));
@@ -3181,7 +3185,7 @@ TEST(ExtractFixedArraySimple) {
ft.Call(source, Handle<Smi>(Smi::FromInt(1), isolate),
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(2, result->length());
CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
CHECK(result->get(1)->IsTheHole(isolate));
@@ -3205,7 +3209,7 @@ TEST(ExtractFixedArraySimpleSmiConstant) {
Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(2, result->length());
CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
CHECK(result->get(1)->IsTheHole(isolate));
@@ -3229,7 +3233,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstant) {
Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(2, result->length());
CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
CHECK(result->get(1)->IsTheHole(isolate));
@@ -3251,7 +3255,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
Handle<FixedArray> source(isolate->factory()->NewFixedArrayWithHoles(5));
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(2, result->length());
CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
CHECK(result->get(1)->IsTheHole(isolate));
@@ -3275,7 +3279,7 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
ft.Call(source, Handle<Smi>(Smi::FromInt(1), isolate),
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
- FixedArray* result(FixedArray::cast(*result_raw));
+ FixedArray result(FixedArray::cast(*result_raw));
CHECK_EQ(2, result->length());
CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
CHECK(result->get(1)->IsTheHole(isolate));
@@ -3291,7 +3295,7 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
ft.Call(source_double, Handle<Smi>(Smi::FromInt(1), isolate),
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
- FixedDoubleArray* double_result(FixedDoubleArray::cast(*double_result_raw));
+ FixedDoubleArray double_result = FixedDoubleArray::cast(*double_result_raw);
CHECK_EQ(2, double_result->length());
CHECK_EQ(double_result->get_scalar(0), 11);
CHECK_EQ(double_result->get_scalar(1), 12);
@@ -3451,6 +3455,7 @@ TEST(IsDoubleElementsKind) {
}
TEST(TestCallBuiltinInlineTrampoline) {
+ if (!i::FLAG_embedded_builtins) return;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
@@ -3475,6 +3480,7 @@ TEST(TestCallBuiltinInlineTrampoline) {
}
TEST(TestCallBuiltinIndirectLoad) {
+ if (!i::FLAG_embedded_builtins) return;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
deleted file mode 100644
index a5746a7f8e..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Rrdistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Rrdistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Rrdistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/assembler-inl.h"
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/simulator.h"
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- // Save callee save registers.
- __ Push(r7, r6, r5, r4);
- __ Push(lr);
-
- // For softfp, move the input value into d0.
- if (!masm.use_eabi_hardfloat()) {
- __ vmov(d0, r0, r1);
- }
- // Push the double argument.
- __ sub(sp, sp, Operand(kDoubleSize));
- __ vstr(d0, sp, 0);
-
- // Save registers make sure they don't get clobbered.
- int source_reg_offset = kDoubleSize;
- int reg_num = 0;
- for (; reg_num < Register::kNumRegisters; ++reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ push(reg);
- source_reg_offset += kPointerSize;
- }
- }
- }
-
- // Re-push the double argument.
- __ sub(sp, sp, Operand(kDoubleSize));
- __ vstr(d0, sp, 0);
-
- // Call through to the actual stub
- __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
-
- __ ldr(destination_reg, MemOperand(sp, 0));
- __ add(sp, sp, Operand(kDoubleSize));
-
- // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 0; --reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ ldr(ip, MemOperand(sp, 0));
- __ cmp(reg, ip);
- __ Assert(eq, AbortReason::kRegisterWasClobbered);
- __ add(sp, sp, Operand(kPointerSize));
- }
- }
- }
-
- __ add(sp, sp, Operand(kDoubleSize));
-
- if (destination_reg != r0) __ mov(r0, destination_reg);
-
- // Restore callee save registers.
- __ Pop(lr);
- __ Pop(r7, r6, r5, r4);
-
- __ Ret(0);
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return (reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer)));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
- double from) {
-#ifdef USE_SIMULATOR
- return Simulator::current(CcTest::i_isolate())
- ->CallFP<int32_t>(FUNCTION_ADDR(func), from, 0);
-#else
- return (*func)(from);
-#endif
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {r0, r1, r2, r3, r4, r5, r6, r7};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
deleted file mode 100644
index cb20931a5d..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Rrdistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Rrdistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Rrdistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer =
- AllocateAssemblerBuffer(&allocated, 4 * Assembler::kMinimalBufferSize);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- __ PushCalleeSavedRegisters();
-
- MacroAssembler::PushPopQueue queue(&masm);
-
- // Save registers make sure they don't get clobbered.
- int source_reg_offset = kDoubleSize;
- int reg_num = 0;
- for (; reg_num < Register::kNumRegisters; ++reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- queue.Queue(reg);
- source_reg_offset += kPointerSize;
- }
- }
- // Push the double argument. We push a second copy to maintain sp alignment.
- queue.Queue(d0);
- queue.Queue(d0);
-
- queue.PushQueued();
-
- // Call through to the actual stub.
- __ IndirectCall(start, RelocInfo::CODE_TARGET);
- __ Peek(destination_reg, 0);
-
- __ Drop(2, kDoubleSize);
-
- // Make sure no registers have been unexpectedly clobbered.
- {
- const RegisterConfiguration* config(RegisterConfiguration::Default());
- int allocatable_register_count =
- config->num_allocatable_general_registers();
- UseScratchRegisterScope temps(&masm);
- Register temp0 = temps.AcquireX();
- Register temp1 = temps.AcquireX();
- for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
- int code0 = config->GetAllocatableGeneralCode(i);
- int code1 = config->GetAllocatableGeneralCode(i - 1);
- Register reg0 = Register::from_code(code0);
- Register reg1 = Register::from_code(code1);
- __ Pop(temp0, temp1);
- if (!reg0.is(destination_reg)) {
- __ Cmp(reg0, temp0);
- __ Assert(eq, AbortReason::kRegisterWasClobbered);
- }
- if (!reg1.is(destination_reg)) {
- __ Cmp(reg1, temp1);
- __ Assert(eq, AbortReason::kRegisterWasClobbered);
- }
- }
-
- if (allocatable_register_count % 2 != 0) {
- int code = config->GetAllocatableGeneralCode(0);
- Register reg = Register::from_code(code);
- __ Pop(temp0, xzr);
- if (!reg.is(destination_reg)) {
- __ Cmp(reg, temp0);
- __ Assert(eq, AbortReason::kRegisterWasClobbered);
- }
- }
- }
-
- if (!destination_reg.is(x0))
- __ Mov(x0, destination_reg);
-
- // Restore callee save registers.
- __ PopCalleeSavedRegisters();
-
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return (reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer)));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
- double from) {
-#ifdef USE_SIMULATOR
- return Simulator::current(CcTest::i_isolate())
- ->Call<int32_t>(FUNCTION_ADDR(func), from);
-#else
- return (*func)(from);
-#endif
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
- x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
- x24};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
deleted file mode 100644
index d9bfe9bb17..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include <limits>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- __ push(ebx);
- __ push(ecx);
- __ push(edx);
- __ push(esi);
- __ push(edi);
-
- int param_offset = 7 * kPointerSize;
- // Save registers make sure they don't get clobbered.
- int reg_num = 0;
- for (; reg_num < Register::kNumRegisters; ++reg_num) {
- if (GetRegConfig()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != esp && reg != ebp && reg != destination_reg) {
- __ push(reg);
- param_offset += kPointerSize;
- }
- }
- }
-
- // Re-push the double argument
- __ push(MemOperand(esp, param_offset));
- __ push(MemOperand(esp, param_offset));
-
- // Call through to the actual stub
- __ call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ mov(destination_reg, MemOperand(esp, 0));
-
- __ add(esp, Immediate(kDoubleSize));
-
- // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 0; --reg_num) {
- if (GetRegConfig()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != esp && reg != ebp && reg != destination_reg) {
- __ cmp(reg, MemOperand(esp, 0));
- __ Assert(equal, AbortReason::kRegisterWasClobbered);
- __ add(esp, Immediate(kPointerSize));
- }
- }
- }
-
- __ mov(eax, destination_reg);
-
- __ pop(edi);
- __ pop(esi);
- __ pop(edx);
- __ pop(ecx);
- __ pop(ebx);
-
- __ ret(kDoubleSize);
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {eax, ebx, ecx, edx, edi, esi};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
deleted file mode 100644
index ed1798160b..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Rrdistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Rrdistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Rrdistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/mips/constants-mips.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "src/simulator.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- // Save callee save registers.
- __ MultiPush(kCalleeSaved | ra.bit());
-
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
-
- // For softfp, move the input value into f12.
- if (IsMipsSoftFloatABI) {
- __ Move(f12, a0, a1);
- }
- // Push the double argument.
- __ Subu(sp, sp, Operand(kDoubleSize));
- __ Sdc1(f12, MemOperand(sp));
-
- // Save registers make sure they don't get clobbered.
- int source_reg_offset = kDoubleSize;
- int reg_num = 2;
- for (; reg_num < Register::kNumRegisters; ++reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ push(reg);
- source_reg_offset += kPointerSize;
- }
- }
- }
-
- // Re-push the double argument.
- __ Subu(sp, sp, Operand(kDoubleSize));
- __ Sdc1(f12, MemOperand(sp));
-
- // Call through to the actual stub
- __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ lw(destination_reg, MemOperand(sp, 0));
-
- __ Addu(sp, sp, Operand(kDoubleSize));
-
- // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 2; --reg_num) {
- if (RegisterConfiguration::Default()->IsAllocatableGeneralCode(reg_num)) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ lw(at, MemOperand(sp, 0));
- __ Assert(eq, AbortReason::kRegisterWasClobbered, reg, Operand(at));
- __ Addu(sp, sp, Operand(kPointerSize));
- }
- }
- }
-
- __ Addu(sp, sp, Operand(kDoubleSize));
-
- __ Move(v0, destination_reg);
- Label ok;
- __ Branch(&ok, eq, v0, Operand(zero_reg));
- __ bind(&ok);
-
- // Restore callee-saved FPU registers.
- __ MultiPopFPU(kCalleeSavedFPU);
-
- // Restore callee save registers.
- __ MultiPop(kCalleeSaved | ra.bit());
-
- Label ok1;
- __ Branch(&ok1, eq, v0, Operand(zero_reg));
- __ bind(&ok1);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return (reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer)));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
- double from) {
-#ifdef USE_SIMULATOR
- Simulator::current(CcTest::i_isolate())
- ->CallFP(FUNCTION_ADDR(func), from, 0.);
- return Simulator::current(CcTest::i_isolate())->get_register(v0.code());
-#else
- return (*func)(from);
-#endif
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {
- v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
deleted file mode 100644
index 3518e722c7..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Rrdistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Rrdistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Rrdistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "src/simulator.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- // Save callee save registers.
- __ MultiPush(kCalleeSaved | ra.bit());
-
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
-
- // For softfp, move the input value into f12.
- if (IsMipsSoftFloatABI) {
- __ Move(f12, a0, a1);
- }
- // Push the double argument.
- __ Dsubu(sp, sp, Operand(kDoubleSize));
- __ Sdc1(f12, MemOperand(sp));
-
- // Save registers make sure they don't get clobbered.
- int source_reg_offset = kDoubleSize;
- int reg_num = 2;
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (; reg_num < config->num_allocatable_general_registers(); ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ push(reg);
- source_reg_offset += kPointerSize;
- }
- }
-
- // Re-push the double argument.
- __ Dsubu(sp, sp, Operand(kDoubleSize));
- __ Sdc1(f12, MemOperand(sp));
-
- // Call through to the actual stub
- __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ Ld(destination_reg, MemOperand(sp, 0));
-
- __ Daddu(sp, sp, Operand(kDoubleSize));
-
- // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 2; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg != destination_reg) {
- __ Ld(at, MemOperand(sp, 0));
- __ Assert(eq, AbortReason::kRegisterWasClobbered, reg, Operand(at));
- __ Daddu(sp, sp, Operand(kPointerSize));
- }
- }
-
- __ Daddu(sp, sp, Operand(kDoubleSize));
-
- __ Move(v0, destination_reg);
- Label ok;
- __ Branch(&ok, eq, v0, Operand(zero_reg));
- __ bind(&ok);
-
- // Restore callee-saved FPU registers.
- __ MultiPopFPU(kCalleeSavedFPU);
-
- // Restore callee save registers.
- __ MultiPop(kCalleeSaved | ra.bit());
-
- Label ok1;
- __ Branch(&ok1, eq, v0, Operand(zero_reg));
- __ bind(&ok1);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return (reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer)));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
- double from) {
-#ifdef USE_SIMULATOR
- Simulator::current(CcTest::i_isolate())
- ->CallFP(FUNCTION_ADDR(func), from, 0.);
- return static_cast<int32_t>(
- Simulator::current(CcTest::i_isolate())->get_register(v0.code()));
-#else
- return (*func)(from);
-#endif
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {
- v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- RunGeneratedCodeCallWrapper,
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
deleted file mode 100644
index c55b2e2b1e..0000000000
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Rrdistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Rrdistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Rrdistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-#include "test/common/assembler-tester.h"
-
-namespace v8 {
-namespace internal {
-namespace test_code_stubs_x64 {
-
-#define __ masm.
-
-ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
- Register destination_reg) {
- HandleScope handles(isolate);
-
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
-
- Handle<Code> code = BUILTIN_CODE(isolate, DoubleToI);
- Address start = code->InstructionStart();
-
- __ pushq(rbx);
- __ pushq(rcx);
- __ pushq(rdx);
- __ pushq(rsi);
- __ pushq(rdi);
-
- const RegisterConfiguration* config = RegisterConfiguration::Default();
-
- // Save registers make sure they don't get clobbered.
- int reg_num = 0;
- for (; reg_num < config->num_allocatable_general_registers(); ++reg_num) {
- Register reg =
- Register::from_code(config->GetAllocatableGeneralCode(reg_num));
- if (reg != rsp && reg != rbp && reg != destination_reg) {
- __ pushq(reg);
- }
- }
-
- // Put the double argument into the designated double argument slot.
- __ subq(rsp, Immediate(kDoubleSize));
- __ Movsd(MemOperand(rsp, 0), xmm0);
-
- // Call through to the actual stub
- __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
- __ movl(destination_reg, MemOperand(rsp, 0));
-
- __ addq(rsp, Immediate(kDoubleSize));
-
- // Make sure no registers have been unexpectedly clobbered
- for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg =
- Register::from_code(config->GetAllocatableGeneralCode(reg_num));
- if (reg != rsp && reg != rbp && reg != destination_reg) {
- __ cmpq(reg, MemOperand(rsp, 0));
- __ Assert(equal, AbortReason::kRegisterWasClobbered);
- __ addq(rsp, Immediate(kPointerSize));
- }
- }
-
- __ movq(rax, destination_reg);
-
- __ popq(rdi);
- __ popq(rsi);
- __ popq(rdx);
- __ popq(rcx);
- __ popq(rbx);
-
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- return reinterpret_cast<ConvertDToIFunc>(
- reinterpret_cast<intptr_t>(buffer));
-}
-
-#undef __
-
-
-static Isolate* GetIsolateFrom(LocalContext* context) {
- return reinterpret_cast<Isolate*>((*context)->GetIsolate());
-}
-
-
-TEST(ConvertDToI) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = GetIsolateFrom(&context);
- HandleScope scope(isolate);
-
-#if DEBUG
- // Verify that the tests actually work with the C version. In the release
- // code, the compiler optimizes it away because it's all constant, but does it
- // wrong, triggering an assert on gcc.
- RunAllTruncationTests(&ConvertDToICVersion);
-#endif
-
- Register dest_registers[] = {rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
-
- for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
- RunAllTruncationTests(
- MakeConvertDToIFuncTrampoline(isolate, dest_registers[d]));
- }
-}
-
-} // namespace test_code_stubs_x64
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
deleted file mode 100644
index 73489a1bd6..0000000000
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include <limits>
-
-#include "src/v8.h"
-
-#include "src/base/platform/platform.h"
-#include "src/code-stubs.h"
-#include "src/double.h"
-#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-code-stubs.h"
-
-namespace v8 {
-namespace internal {
-
-int STDCALL ConvertDToICVersion(double d) {
-#if defined(V8_TARGET_BIG_ENDIAN)
- const int kExponentIndex = 0;
- const int kMantissaIndex = 1;
-#elif defined(V8_TARGET_LITTLE_ENDIAN)
- const int kExponentIndex = 1;
- const int kMantissaIndex = 0;
-#else
-#error Unsupported endianness
-#endif
- uint32_t u[2];
- memcpy(u, &d, sizeof(d));
- uint32_t exponent_bits = u[kExponentIndex];
- int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
- int32_t exponent = (((exponent_bits & shifted_mask) >>
- (Double::kPhysicalSignificandSize - 32)) -
- HeapNumber::kExponentBias);
- if (exponent < 0) {
- return 0;
- }
- uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
- int result = 0;
- uint32_t max_exponent =
- static_cast<uint32_t>(Double::kPhysicalSignificandSize);
- if (unsigned_exponent >= max_exponent) {
- if ((exponent - Double::kPhysicalSignificandSize) < 32) {
- result = u[kMantissaIndex]
- << (exponent - Double::kPhysicalSignificandSize);
- }
- } else {
- uint64_t big_result =
- (bit_cast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
- big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
- result = static_cast<uint32_t>(big_result);
- }
- if (static_cast<int32_t>(exponent_bits) < 0) {
- return (0 - result);
- } else {
- return result;
- }
-}
-
-
-void RunOneTruncationTestWithTest(ConvertDToICallWrapper callWrapper,
- ConvertDToIFunc func,
- double from,
- int32_t to) {
- int32_t result = (*callWrapper)(func, from);
- CHECK_EQ(to, result);
-}
-
-DISABLE_CFI_ICALL
-int32_t DefaultCallWrapper(ConvertDToIFunc func,
- double from) {
- return (*func)(from);
-}
-
-
-// #define NaN and Infinity so that it's possible to cut-and-paste these tests
-// directly to a .js file and run them.
-#define NaN (std::numeric_limits<double>::quiet_NaN())
-#define Infinity (std::numeric_limits<double>::infinity())
-#define RunOneTruncationTest(p1, p2) \
- RunOneTruncationTestWithTest(callWrapper, func, p1, p2)
-
-
-void RunAllTruncationTests(ConvertDToIFunc func) {
- RunAllTruncationTests(DefaultCallWrapper, func);
-}
-
-
-void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
- ConvertDToIFunc func) {
- RunOneTruncationTest(0, 0);
- RunOneTruncationTest(0.5, 0);
- RunOneTruncationTest(-0.5, 0);
- RunOneTruncationTest(1.5, 1);
- RunOneTruncationTest(-1.5, -1);
- RunOneTruncationTest(5.5, 5);
- RunOneTruncationTest(-5.0, -5);
- RunOneTruncationTest(NaN, 0);
- RunOneTruncationTest(Infinity, 0);
- RunOneTruncationTest(-NaN, 0);
- RunOneTruncationTest(-Infinity, 0);
- RunOneTruncationTest(4.94065645841e-324, 0);
- RunOneTruncationTest(-4.94065645841e-324, 0);
-
- RunOneTruncationTest(0.9999999999999999, 0);
- RunOneTruncationTest(-0.9999999999999999, 0);
- RunOneTruncationTest(4294967296.0, 0);
- RunOneTruncationTest(-4294967296.0, 0);
- RunOneTruncationTest(9223372036854775000.0, -1024);
- RunOneTruncationTest(-9223372036854775000.0, 1024);
- RunOneTruncationTest(4.5036e+15, 372629504);
- RunOneTruncationTest(-4.5036e+15, -372629504);
-
- RunOneTruncationTest(287524199.5377777, 0x11234567);
- RunOneTruncationTest(-287524199.5377777, -0x11234567);
- RunOneTruncationTest(2300193596.302222, -1994773700);
- RunOneTruncationTest(-2300193596.302222, 1994773700);
- RunOneTruncationTest(4600387192.604444, 305419896);
- RunOneTruncationTest(-4600387192.604444, -305419896);
- RunOneTruncationTest(4823855600872397.0, 1737075661);
- RunOneTruncationTest(-4823855600872397.0, -1737075661);
-
- RunOneTruncationTest(4503603922337791.0, -1);
- RunOneTruncationTest(-4503603922337791.0, 1);
- RunOneTruncationTest(4503601774854143.0, 2147483647);
- RunOneTruncationTest(-4503601774854143.0, -2147483647);
- RunOneTruncationTest(9007207844675582.0, -2);
- RunOneTruncationTest(-9007207844675582.0, 2);
-
- RunOneTruncationTest(2.4178527921507624e+24, -536870912);
- RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
- RunOneTruncationTest(2.417853945072267e+24, -536870912);
- RunOneTruncationTest(-2.417853945072267e+24, 536870912);
-
- RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
- RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
- RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
- RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
-
- RunOneTruncationTest(2147483647.0, 2147483647);
- RunOneTruncationTest(-2147483648.0, -2147483647-1);
- RunOneTruncationTest(9.6714111686030497e+24, -2147483647-1);
- RunOneTruncationTest(-9.6714111686030497e+24, -2147483647-1);
- RunOneTruncationTest(9.6714157802890681e+24, -2147483647-1);
- RunOneTruncationTest(-9.6714157802890681e+24, -2147483647-1);
- RunOneTruncationTest(1.9342813113834065e+25, -2147483647-1);
- RunOneTruncationTest(-1.9342813113834065e+25, -2147483647-1);
-
- RunOneTruncationTest(3.868562622766813e+25, 0);
- RunOneTruncationTest(-3.868562622766813e+25, 0);
- RunOneTruncationTest(1.7976931348623157e+308, 0);
- RunOneTruncationTest(-1.7976931348623157e+308, 0);
-}
-
-#undef NaN
-#undef Infinity
-#undef RunOneTruncationTest
-
-
-TEST(CodeStubMajorKeys) {
- CcTest::InitializeVM();
- LocalContext context;
- Isolate* isolate = CcTest::i_isolate();
-
-#define CHECK_STUB(NAME) \
- { \
- HandleScope scope(isolate); \
- NAME##Stub stub_impl(0xABCD, isolate); \
- CodeStub* stub = &stub_impl; \
- CHECK_EQ(stub->MajorKey(), CodeStub::NAME); \
- }
- CODE_STUB_LIST(CHECK_STUB);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs.h b/deps/v8/test/cctest/test-code-stubs.h
deleted file mode 100644
index 54182d0c45..0000000000
--- a/deps/v8/test/cctest/test-code-stubs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TEST_CODE_STUBS_H_
-#define V8_TEST_CODE_STUBS_H_
-
-namespace v8 {
-namespace internal {
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
-#if __GNUC__
-#define STDCALL __attribute__((stdcall))
-#else
-#define STDCALL __stdcall
-#endif
-#else
-#define STDCALL
-#endif
-
-typedef int32_t STDCALL ConvertDToIFuncType(double input);
-typedef ConvertDToIFuncType* ConvertDToIFunc;
-
-typedef int32_t ConvertDToICallWrapperType(ConvertDToIFunc func, double from);
-typedef ConvertDToICallWrapperType* ConvertDToICallWrapper;
-
-int STDCALL ConvertDToICVersion(double d);
-
-void RunAllTruncationTests(ConvertDToIFunc func);
-void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
- ConvertDToIFunc func);
-
-} // namespace internal
-} // namespace v8
-
-#endif
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 76ce276c06..f05056a2de 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -31,6 +31,7 @@
#include "src/v8.h"
#include "src/api-inl.h"
+#include "src/compilation-cache.h"
#include "src/compiler.h"
#include "src/disasm.h"
#include "src/heap/factory.h"
@@ -47,8 +48,7 @@ static Handle<Object> GetGlobalProperty(const char* name) {
.ToHandleChecked();
}
-
-static void SetGlobalProperty(const char* name, Object* value) {
+static void SetGlobalProperty(const char* name, Object value) {
Isolate* isolate = CcTest::i_isolate();
Handle<Object> object(value, isolate);
Handle<String> internalized_name =
@@ -59,7 +59,6 @@ static void SetGlobalProperty(const char* name, Object* value) {
.Check();
}
-
static Handle<JSFunction> Compile(const char* source) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
@@ -155,7 +154,7 @@ TEST(Sum) {
TEST(Print) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({PRINT_EXTENSION_ID});
v8::Context::Scope context_scope(context);
const char* source = "for (n = 0; n < 100; ++n) print(n, 1, 2);";
Handle<JSFunction> fun = Compile(source);
@@ -223,7 +222,7 @@ TEST(C2JSFrames) {
FLAG_expose_gc = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context =
- CcTest::NewContext(PRINT_EXTENSION | GC_EXTENSION);
+ CcTest::NewContext({PRINT_EXTENSION_ID, GC_EXTENSION_ID});
v8::Context::Scope context_scope(context);
const char* source = "function foo(a) { gc(), print(a); }";
@@ -241,8 +240,8 @@ TEST(C2JSFrames) {
.ToHandleChecked();
CHECK(fun1->IsJSFunction());
- Handle<Object> argv[] = {isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("hello"))};
+ Handle<Object> argv[] = {
+ isolate->factory()->InternalizeOneByteString(StaticCharVector("hello"))};
Execution::Call(isolate,
Handle<JSFunction>::cast(fun1),
global,
@@ -315,9 +314,9 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
Handle<FeedbackVector> feedback_vector(f->feedback_vector(), f->GetIsolate());
CHECK(!feedback_vector->is_empty());
FeedbackSlot slot_for_a(0);
- MaybeObject* object = feedback_vector->Get(slot_for_a);
+ MaybeObject object = feedback_vector->Get(slot_for_a);
{
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(object->GetHeapObjectIfWeak(&heap_object));
CHECK(heap_object->IsJSFunction());
}
@@ -329,7 +328,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
CHECK(f->IsOptimized());
object = f->feedback_vector()->Get(slot_for_a);
{
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(object->GetHeapObjectIfWeak(&heap_object));
CHECK(heap_object->IsJSFunction());
}
@@ -337,7 +336,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
TEST(FeedbackVectorUnaffectedByScopeChanges) {
- if (i::FLAG_always_opt || !i::FLAG_lazy) {
+ if (i::FLAG_always_opt || !i::FLAG_lazy || i::FLAG_lite_mode) {
return;
}
CcTest::InitializeVM();
@@ -487,8 +486,8 @@ TEST(CompileFunctionInContextArgs) {
v8::Local<v8::Object> ext[1];
ext[0] = v8::Local<v8::Object>::Cast(
env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
- v8::ScriptCompiler::Source script_source(v8_str("result = x + b"));
- v8::Local<v8::String> arg = v8_str("b");
+ v8::ScriptCompiler::Source script_source(v8_str("result = x + abc"));
+ v8::Local<v8::String> arg = v8_str("abc");
v8::Local<v8::Function> fun =
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
1, &arg, 1, ext)
@@ -498,8 +497,8 @@ TEST(CompileFunctionInContextArgs) {
->ToInt32(env.local())
.ToLocalChecked()
->Value());
- v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
- fun->Call(env.local(), env->Global(), 1, &b_value).ToLocalChecked();
+ v8::Local<v8::Value> arg_value = v8::Number::New(CcTest::isolate(), 42.0);
+ fun->Call(env.local(), env->Global(), 1, &arg_value).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
v8::Local<v8::Value> result =
env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
@@ -516,16 +515,17 @@ TEST(CompileFunctionInContextComments) {
v8::Local<v8::Object> ext[1];
ext[0] = v8::Local<v8::Object>::Cast(
env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
- v8::ScriptCompiler::Source script_source(
- v8_str("result = /* y + */ x + b // + z"));
- v8::Local<v8::String> arg = v8_str("b");
+ v8::Local<v8::String> source =
+ CompileRun("'result = /* y + */ x + a\\u4e00 // + z'").As<v8::String>();
+ v8::ScriptCompiler::Source script_source(source);
+ v8::Local<v8::String> arg = CompileRun("'a\\u4e00'").As<v8::String>();
v8::Local<v8::Function> fun =
v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
1, &arg, 1, ext)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
- v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
- fun->Call(env.local(), env->Global(), 1, &b_value).ToLocalChecked();
+ v8::Local<v8::Value> arg_value = v8::Number::New(CcTest::isolate(), 42.0);
+ fun->Call(env.local(), env->Global(), 1, &arg_value).ToLocalChecked();
CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
v8::Local<v8::Value> result =
env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
@@ -771,6 +771,7 @@ TEST(CompileFunctionInContextFunctionToString) {
}
TEST(InvocationCount) {
+ if (FLAG_lite_mode) return;
FLAG_allow_natives_syntax = true;
FLAG_always_opt = false;
CcTest::InitializeVM();
@@ -842,5 +843,182 @@ TEST(DeepEagerCompilation) {
}
}
+TEST(DeepEagerCompilationPeakMemory) {
+ i::FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::String> source = v8_str(
+ "function f() {"
+ " function g1() {"
+ " function h1() {"
+ " function i1() {}"
+ " function i2() {}"
+ " }"
+ " function h2() {"
+ " function i1() {}"
+ " function i2() {}"
+ " }"
+ " }"
+ " function g2() {"
+ " function h1() {"
+ " function i1() {}"
+ " function i2() {}"
+ " }"
+ " function h2() {"
+ " function i1() {}"
+ " function i2() {}"
+ " }"
+ " }"
+ "}");
+ v8::ScriptCompiler::Source script_source(source);
+ CcTest::i_isolate()->compilation_cache()->Disable();
+
+ v8::HeapStatistics heap_statistics;
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ size_t peak_mem_1 = heap_statistics.peak_malloced_memory();
+ printf("peak memory after init: %8zu\n", peak_mem_1);
+
+ v8::ScriptCompiler::Compile(env.local(), &script_source,
+ v8::ScriptCompiler::kNoCompileOptions)
+ .ToLocalChecked();
+
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ size_t peak_mem_2 = heap_statistics.peak_malloced_memory();
+ printf("peak memory after lazy compile: %8zu\n", peak_mem_2);
+
+ v8::ScriptCompiler::Compile(env.local(), &script_source,
+ v8::ScriptCompiler::kNoCompileOptions)
+ .ToLocalChecked();
+
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ size_t peak_mem_3 = heap_statistics.peak_malloced_memory();
+ printf("peak memory after lazy compile: %8zu\n", peak_mem_3);
+
+ v8::ScriptCompiler::Compile(env.local(), &script_source,
+ v8::ScriptCompiler::kEagerCompile)
+ .ToLocalChecked();
+
+ CcTest::isolate()->GetHeapStatistics(&heap_statistics);
+ size_t peak_mem_4 = heap_statistics.peak_malloced_memory();
+ printf("peak memory after eager compile: %8zu\n", peak_mem_4);
+
+ CHECK_LE(peak_mem_1, peak_mem_2);
+ CHECK_EQ(peak_mem_2, peak_mem_3);
+ CHECK_LE(peak_mem_3, peak_mem_4);
+ // Check that eager compilation does not cause significantly higher (+100%)
+ // peak memory than lazy compilation.
+ CHECK_LE(peak_mem_4 - peak_mem_3, peak_mem_3);
+}
+
+// TODO(mslekova): Remove the duplication with test-heap.cc
+static int AllocationSitesCount(Heap* heap) {
+ int count = 0;
+ for (Object site = heap->allocation_sites_list(); site->IsAllocationSite();) {
+ AllocationSite cur = AllocationSite::cast(site);
+ CHECK(cur->HasWeakNext());
+ site = cur->weak_next();
+ count++;
+ }
+ return count;
+}
+
+// This test simulates a specific race-condition if GC is triggered just
+// before CompilationDependencies::Commit is finished, and this changes
+// the pretenuring decision, thus causing a deoptimization.
+TEST(DecideToPretenureDuringCompilation) {
+ // The test makes use of optimization and relies on deterministic
+ // compilation.
+ if (!i::FLAG_opt || i::FLAG_always_opt ||
+ i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size
+#ifdef ENABLE_MINOR_MC
+ || i::FLAG_minor_mc
+#endif
+ )
+ return;
+
+ FLAG_stress_gc_during_compilation = true;
+ FLAG_allow_natives_syntax = true;
+ FLAG_allocation_site_pretenuring = true;
+ FLAG_flush_bytecode = false;
+
+ // We want to trigger exactly 1 optimization.
+ FLAG_use_osr = false;
+
+ // We'll do manual initialization.
+ ManualGCScope manual_gc_scope;
+ v8::Isolate::CreateParams create_params;
+
+ // This setting ensures Heap::MaximumSizeScavenge will return `true`.
+ // We need to initialize the heap with at least 1 page, while keeping the
+ // limit low, to ensure the new space fills even on 32-bit architectures.
+ create_params.constraints.set_max_semi_space_size_in_kb(Page::kPageSize /
+ 1024);
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ isolate->Enter();
+ {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Heap* heap = i_isolate->heap();
+ GlobalHandles* global_handles = i_isolate->global_handles();
+ HandleScope handle_scope(i_isolate);
+
+ // The allocation site at the head of the list is ours.
+ Handle<AllocationSite> site;
+ {
+ LocalContext context(isolate);
+ v8::HandleScope scope(context->GetIsolate());
+
+ int count = AllocationSitesCount(heap);
+ CompileRun(
+ "let arr = [];"
+ "function foo(shouldKeep) {"
+ " let local_array = new Array();"
+ " if (shouldKeep) arr.push(local_array);"
+ "}"
+ "function bar(shouldKeep) {"
+ " for (let i = 0; i < 10000; i++) {"
+ " foo(shouldKeep);"
+ " }"
+ "}"
+ "bar();");
+
+ // This number should be >= kPretenureRatio * 10000,
+ // where 10000 is the number of iterations in `bar`,
+ // in order to make the ratio in DigestPretenuringFeedback close to 1.
+ const int memento_found_bump = 8500;
+
+ // One allocation site should have been created.
+ int new_count = AllocationSitesCount(heap);
+ CHECK_EQ(new_count, (count + 1));
+ site = Handle<AllocationSite>::cast(global_handles->Create(
+ AllocationSite::cast(heap->allocation_sites_list())));
+ site->set_memento_found_count(memento_found_bump);
+
+ CompileRun("%OptimizeFunctionOnNextCall(bar);");
+ CompileRun("bar(true);");
+
+ // The last call should have caused `foo` to bail out of compilation
+ // due to dependency change (the pretenuring decision in this case).
+ // This will cause recompilation.
+
+ // Check `bar` can get optimized again, meaning the compiler state is
+ // recoverable from this point.
+ CompileRun("%OptimizeFunctionOnNextCall(bar);");
+ CompileRun("bar();");
+
+ Handle<Object> foo_obj =
+ JSReceiver::GetProperty(i_isolate, i_isolate->global_object(), "bar")
+ .ToHandleChecked();
+ Handle<JSFunction> bar = Handle<JSFunction>::cast(foo_obj);
+
+ CHECK(bar->IsOptimized());
+ }
+ }
+ isolate->Exit();
+ isolate->Dispose();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 1215004a83..1e5f98b5b7 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -6,12 +6,14 @@
#include "src/v8.h"
-#include "src/assembler.h"
+#include "src/constant-pool.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
+#if defined(V8_TARGET_ARCH_PPC)
+
const ConstantPoolEntry::Type kPtrType = ConstantPoolEntry::INTPTR;
const ConstantPoolEntry::Type kDblType = ConstantPoolEntry::DOUBLE;
const ConstantPoolEntry::Access kRegAccess = ConstantPoolEntry::REGULAR;
@@ -247,5 +249,7 @@ TEST(ConstantPoolNoSharing) {
CHECK_EQ(access, kOvflAccess);
}
+#endif // defined(V8_TARGET_ARCH_PPC)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 97f2880087..88ba562376 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -32,7 +32,8 @@
#include "src/heap/factory-inl.h"
#include "src/isolate.h"
#include "src/objects.h"
-#include "src/unicode-cache.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/smi.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -40,207 +41,211 @@ namespace v8 {
namespace internal {
TEST(Hex) {
- UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0x0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(255.0,
- StringToDouble(&uc, "0xFF", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(175.0, StringToDouble(&uc, "0xAF",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(0.0, StringToDouble(&uc, "0x0", ALLOW_HEX));
- CHECK_EQ(0.0, StringToDouble(&uc, "0X0", ALLOW_HEX));
- CHECK_EQ(1.0, StringToDouble(&uc, "0x1", ALLOW_HEX));
- CHECK_EQ(16.0, StringToDouble(&uc, "0x10", ALLOW_HEX));
- CHECK_EQ(255.0, StringToDouble(&uc, "0xFF", ALLOW_HEX));
- CHECK_EQ(175.0, StringToDouble(&uc, "0xAF", ALLOW_HEX));
+ CHECK_EQ(0.0, StringToDouble("0x0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0X0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble("0x1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(16.0, StringToDouble("0x10", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(255.0, StringToDouble("0xFF", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(175.0, StringToDouble("0xAF", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble("0x0", ALLOW_HEX));
+ CHECK_EQ(0.0, StringToDouble("0X0", ALLOW_HEX));
+ CHECK_EQ(1.0, StringToDouble("0x1", ALLOW_HEX));
+ CHECK_EQ(16.0, StringToDouble("0x10", ALLOW_HEX));
+ CHECK_EQ(255.0, StringToDouble("0xFF", ALLOW_HEX));
+ CHECK_EQ(175.0, StringToDouble("0xAF", ALLOW_HEX));
}
TEST(Octal) {
- UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0o0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0O0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(1.0, StringToDouble(&uc, "0o1", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(7.0, StringToDouble(&uc, "0o7", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(8.0, StringToDouble(&uc, "0o10",
- ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(63.0, StringToDouble(&uc, "0o77",
- ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(0.0, StringToDouble(&uc, "0o0", ALLOW_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0O0", ALLOW_OCTAL));
- CHECK_EQ(1.0, StringToDouble(&uc, "0o1", ALLOW_OCTAL));
- CHECK_EQ(7.0, StringToDouble(&uc, "0o7", ALLOW_OCTAL));
- CHECK_EQ(8.0, StringToDouble(&uc, "0o10", ALLOW_OCTAL));
- CHECK_EQ(63.0, StringToDouble(&uc, "0o77", ALLOW_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0o0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0O0", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble("0o1", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(7.0, StringToDouble("0o7", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8.0, StringToDouble("0o10", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(63.0, StringToDouble("0o77", ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble("0o0", ALLOW_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0O0", ALLOW_OCTAL));
+ CHECK_EQ(1.0, StringToDouble("0o1", ALLOW_OCTAL));
+ CHECK_EQ(7.0, StringToDouble("0o7", ALLOW_OCTAL));
+ CHECK_EQ(8.0, StringToDouble("0o10", ALLOW_OCTAL));
+ CHECK_EQ(63.0, StringToDouble("0o77", ALLOW_OCTAL));
}
TEST(ImplicitOctal) {
- UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "00", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(1.0, StringToDouble(&uc, "01", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(7.0, StringToDouble(&uc, "07", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(8.0, StringToDouble(&uc, "010", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(63.0, StringToDouble(&uc, "077", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(0.0, StringToDouble(&uc, "0", ALLOW_HEX));
- CHECK_EQ(0.0, StringToDouble(&uc, "00", ALLOW_HEX));
- CHECK_EQ(1.0, StringToDouble(&uc, "01", ALLOW_HEX));
- CHECK_EQ(7.0, StringToDouble(&uc, "07", ALLOW_HEX));
- CHECK_EQ(10.0, StringToDouble(&uc, "010", ALLOW_HEX));
- CHECK_EQ(77.0, StringToDouble(&uc, "077", ALLOW_HEX));
+ CHECK_EQ(0.0, StringToDouble("0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("00", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble("01", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(7.0, StringToDouble("07", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8.0, StringToDouble("010", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(63.0, StringToDouble("077", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble("0", ALLOW_HEX));
+ CHECK_EQ(0.0, StringToDouble("00", ALLOW_HEX));
+ CHECK_EQ(1.0, StringToDouble("01", ALLOW_HEX));
+ CHECK_EQ(7.0, StringToDouble("07", ALLOW_HEX));
+ CHECK_EQ(10.0, StringToDouble("010", ALLOW_HEX));
+ CHECK_EQ(77.0, StringToDouble("077", ALLOW_HEX));
const double x = 010000000000; // Power of 2, no rounding errors.
- CHECK_EQ(x * x * x * x * x, StringToDouble(&uc, "01" "0000000000" "0000000000"
- "0000000000" "0000000000" "0000000000", ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(x * x * x * x * x, StringToDouble("01"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000",
+ ALLOW_IMPLICIT_OCTAL));
}
TEST(Binary) {
- UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0b0",
- ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0B0",
- ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(1.0, StringToDouble(&uc, "0b1",
- ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(2.0, StringToDouble(&uc, "0b10",
- ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(3.0, StringToDouble(&uc, "0b11",
- ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(0.0, StringToDouble(&uc, "0b0", ALLOW_BINARY));
- CHECK_EQ(0.0, StringToDouble(&uc, "0B0", ALLOW_BINARY));
- CHECK_EQ(1.0, StringToDouble(&uc, "0b1", ALLOW_BINARY));
- CHECK_EQ(2.0, StringToDouble(&uc, "0b10", ALLOW_BINARY));
- CHECK_EQ(3.0, StringToDouble(&uc, "0b11", ALLOW_BINARY));
+ CHECK_EQ(0.0, StringToDouble("0b0", ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0B0", ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(1.0, StringToDouble("0b1", ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(2.0, StringToDouble("0b10", ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(3.0, StringToDouble("0b11", ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.0, StringToDouble("0b0", ALLOW_BINARY));
+ CHECK_EQ(0.0, StringToDouble("0B0", ALLOW_BINARY));
+ CHECK_EQ(1.0, StringToDouble("0b1", ALLOW_BINARY));
+ CHECK_EQ(2.0, StringToDouble("0b10", ALLOW_BINARY));
+ CHECK_EQ(3.0, StringToDouble("0b11", ALLOW_BINARY));
}
TEST(MalformedOctal) {
- UnicodeCache uc;
- CHECK_EQ(8.0, StringToDouble(&uc, "08", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(81.0, StringToDouble(&uc, "081", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(78.0, StringToDouble(&uc, "078", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
-
- CHECK(std::isnan(StringToDouble(&uc, "07.7",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
- CHECK(std::isnan(StringToDouble(&uc, "07.8",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
- CHECK(std::isnan(StringToDouble(&uc, "07e8",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
- CHECK(std::isnan(StringToDouble(&uc, "07e7",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
-
- CHECK_EQ(8.7, StringToDouble(&uc, "08.7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(8e7, StringToDouble(&uc, "08e7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(0.001, StringToDouble(&uc, "0.001",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.713, StringToDouble(&uc, "0.713",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
-
- CHECK_EQ(8.0, StringToDouble(&uc, "08", ALLOW_HEX));
- CHECK_EQ(81.0, StringToDouble(&uc, "081", ALLOW_HEX));
- CHECK_EQ(78.0, StringToDouble(&uc, "078", ALLOW_HEX));
-
- CHECK_EQ(7.7, StringToDouble(&uc, "07.7", ALLOW_HEX));
- CHECK_EQ(7.8, StringToDouble(&uc, "07.8", ALLOW_HEX));
- CHECK_EQ(7e8, StringToDouble(&uc, "07e8", ALLOW_HEX));
- CHECK_EQ(7e7, StringToDouble(&uc, "07e7", ALLOW_HEX));
-
- CHECK_EQ(8.7, StringToDouble(&uc, "08.7", ALLOW_HEX));
- CHECK_EQ(8e7, StringToDouble(&uc, "08e7", ALLOW_HEX));
-
- CHECK_EQ(0.001, StringToDouble(&uc, "0.001", ALLOW_HEX));
- CHECK_EQ(0.713, StringToDouble(&uc, "0.713", ALLOW_HEX));
+ CHECK_EQ(8.0, StringToDouble("08", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(81.0, StringToDouble("081", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(78.0, StringToDouble("078", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK(std::isnan(StringToDouble("07.7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble("07.8", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble("07e8", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+ CHECK(std::isnan(StringToDouble("07e7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL)));
+
+ CHECK_EQ(8.7, StringToDouble("08.7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(8e7, StringToDouble("08e7", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(0.001, StringToDouble("0.001", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.713, StringToDouble("0.713", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+
+ CHECK_EQ(8.0, StringToDouble("08", ALLOW_HEX));
+ CHECK_EQ(81.0, StringToDouble("081", ALLOW_HEX));
+ CHECK_EQ(78.0, StringToDouble("078", ALLOW_HEX));
+
+ CHECK_EQ(7.7, StringToDouble("07.7", ALLOW_HEX));
+ CHECK_EQ(7.8, StringToDouble("07.8", ALLOW_HEX));
+ CHECK_EQ(7e8, StringToDouble("07e8", ALLOW_HEX));
+ CHECK_EQ(7e7, StringToDouble("07e7", ALLOW_HEX));
+
+ CHECK_EQ(8.7, StringToDouble("08.7", ALLOW_HEX));
+ CHECK_EQ(8e7, StringToDouble("08e7", ALLOW_HEX));
+
+ CHECK_EQ(0.001, StringToDouble("0.001", ALLOW_HEX));
+ CHECK_EQ(0.713, StringToDouble("0.713", ALLOW_HEX));
}
TEST(TrailingJunk) {
- UnicodeCache uc;
- CHECK_EQ(8.0, StringToDouble(&uc, "8q", ALLOW_TRAILING_JUNK));
- CHECK_EQ(63.0, StringToDouble(&uc, "077qqq",
- ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
- CHECK_EQ(10.0, StringToDouble(&uc, "10e",
- ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
- CHECK_EQ(10.0, StringToDouble(&uc, "10e-",
+ CHECK_EQ(8.0, StringToDouble("8q", ALLOW_TRAILING_JUNK));
+ CHECK_EQ(63.0, StringToDouble("077qqq",
ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
+ CHECK_EQ(10.0,
+ StringToDouble("10e", ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
+ CHECK_EQ(10.0,
+ StringToDouble("10e-", ALLOW_IMPLICIT_OCTAL | ALLOW_TRAILING_JUNK));
}
TEST(NonStrDecimalLiteral) {
- UnicodeCache uc;
- CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS,
- std::numeric_limits<double>::quiet_NaN())));
- CHECK(std::isnan(StringToDouble(&uc, "", NO_FLAGS,
- std::numeric_limits<double>::quiet_NaN())));
- CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS,
- std::numeric_limits<double>::quiet_NaN())));
- CHECK_EQ(0.0, StringToDouble(&uc, "", NO_FLAGS));
- CHECK_EQ(0.0, StringToDouble(&uc, " ", NO_FLAGS));
+ CHECK(std::isnan(
+ StringToDouble(" ", NO_FLAGS, std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(
+ StringToDouble("", NO_FLAGS, std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(
+ StringToDouble(" ", NO_FLAGS, std::numeric_limits<double>::quiet_NaN())));
+ CHECK_EQ(0.0, StringToDouble("", NO_FLAGS));
+ CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS));
}
TEST(IntegerStrLiteral) {
- UnicodeCache uc;
- CHECK_EQ(0.0, StringToDouble(&uc, "0.0", NO_FLAGS));
- CHECK_EQ(0.0, StringToDouble(&uc, "0", NO_FLAGS));
- CHECK_EQ(0.0, StringToDouble(&uc, "00", NO_FLAGS));
- CHECK_EQ(0.0, StringToDouble(&uc, "000", NO_FLAGS));
- CHECK_EQ(1.0, StringToDouble(&uc, "1", NO_FLAGS));
- CHECK_EQ(-1.0, StringToDouble(&uc, "-1", NO_FLAGS));
- CHECK_EQ(-1.0, StringToDouble(&uc, " -1 ", NO_FLAGS));
- CHECK_EQ(1.0, StringToDouble(&uc, " +1 ", NO_FLAGS));
- CHECK(std::isnan(StringToDouble(&uc, " - 1 ", NO_FLAGS)));
- CHECK(std::isnan(StringToDouble(&uc, " + 1 ", NO_FLAGS)));
-
- CHECK_EQ(0.0, StringToDouble(&uc, "0e0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e-1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e-100000",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0e+100000",
- ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
- CHECK_EQ(0.0, StringToDouble(&uc, "0.", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0.0", NO_FLAGS));
+ CHECK_EQ(0.0, StringToDouble("0", NO_FLAGS));
+ CHECK_EQ(0.0, StringToDouble("00", NO_FLAGS));
+ CHECK_EQ(0.0, StringToDouble("000", NO_FLAGS));
+ CHECK_EQ(1.0, StringToDouble("1", NO_FLAGS));
+ CHECK_EQ(-1.0, StringToDouble("-1", NO_FLAGS));
+ CHECK_EQ(-1.0, StringToDouble(" -1 ", NO_FLAGS));
+ CHECK_EQ(1.0, StringToDouble(" +1 ", NO_FLAGS));
+ CHECK(std::isnan(StringToDouble(" - 1 ", NO_FLAGS)));
+ CHECK(std::isnan(StringToDouble(" + 1 ", NO_FLAGS)));
+
+ CHECK_EQ(0.0, StringToDouble("0e0", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0e1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0e-1", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0e-100000", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0e+100000", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
+ CHECK_EQ(0.0, StringToDouble("0.", ALLOW_HEX | ALLOW_IMPLICIT_OCTAL));
}
TEST(LongNumberStr) {
- UnicodeCache uc;
- CHECK_EQ(1e10, StringToDouble(&uc, "1" "0000000000", NO_FLAGS));
- CHECK_EQ(1e20, StringToDouble(&uc, "1" "0000000000" "0000000000", NO_FLAGS));
-
- CHECK_EQ(1e60, StringToDouble(&uc, "1" "0000000000" "0000000000" "0000000000"
- "0000000000" "0000000000" "0000000000", NO_FLAGS));
-
- CHECK_EQ(1e-2, StringToDouble(&uc, "." "0" "1", NO_FLAGS));
- CHECK_EQ(1e-11, StringToDouble(&uc, "." "0000000000" "1", NO_FLAGS));
- CHECK_EQ(1e-21, StringToDouble(&uc, "." "0000000000" "0000000000" "1",
+ CHECK_EQ(1e10, StringToDouble("1"
+ "0000000000",
+ NO_FLAGS));
+ CHECK_EQ(1e20, StringToDouble("1"
+ "0000000000"
+ "0000000000",
+ NO_FLAGS));
+
+ CHECK_EQ(1e60, StringToDouble("1"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000",
+ NO_FLAGS));
+
+ CHECK_EQ(1e-2, StringToDouble("."
+ "0"
+ "1",
+ NO_FLAGS));
+ CHECK_EQ(1e-11, StringToDouble("."
+ "0000000000"
+ "1",
+ NO_FLAGS));
+ CHECK_EQ(1e-21, StringToDouble("."
+ "0000000000"
+ "0000000000"
+ "1",
NO_FLAGS));
- CHECK_EQ(1e-61, StringToDouble(&uc, "." "0000000000" "0000000000" "0000000000"
- "0000000000" "0000000000" "0000000000" "1", NO_FLAGS));
-
+ CHECK_EQ(1e-61, StringToDouble("."
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "0000000000"
+ "1",
+ NO_FLAGS));
// x = 24414062505131248.0 and y = 24414062505131252.0 are representable in
// double. Check chat z = (x + y) / 2 is rounded to x...
CHECK_EQ(24414062505131248.0,
- StringToDouble(&uc, "24414062505131250.0", NO_FLAGS));
+ StringToDouble("24414062505131250.0", NO_FLAGS));
// ... and z = (x + y) / 2 + delta is rounded to y.
CHECK_EQ(24414062505131252.0,
- StringToDouble(&uc, "24414062505131250.000000001", NO_FLAGS));
+ StringToDouble("24414062505131250.000000001", NO_FLAGS));
}
TEST(MaximumSignificantDigits) {
- UnicodeCache uc;
char num[] =
"4.4501477170144020250819966727949918635852426585926051135169509"
"122872622312493126406953054127118942431783801370080830523154578"
@@ -256,17 +261,16 @@ TEST(MaximumSignificantDigits) {
"847003580761626016356864581135848683152156368691976240370422601"
"6998291015625000000000000000000000000000000000e-308";
- CHECK_EQ(4.4501477170144017780491e-308, StringToDouble(&uc, num, NO_FLAGS));
+ CHECK_EQ(4.4501477170144017780491e-308, StringToDouble(num, NO_FLAGS));
// Changes the result of strtod (at least in glibc implementation).
num[sizeof(num) - 8] = '1';
- CHECK_EQ(4.4501477170144022721148e-308, StringToDouble(&uc, num, NO_FLAGS));
+ CHECK_EQ(4.4501477170144022721148e-308, StringToDouble(num, NO_FLAGS));
}
TEST(MinimumExponent) {
- UnicodeCache uc;
// Same test but with different point-position.
char num[] =
"445014771701440202508199667279499186358524265859260511351695091"
@@ -283,31 +287,29 @@ TEST(MinimumExponent) {
"470035807616260163568645811358486831521563686919762403704226016"
"998291015625000000000000000000000000000000000e-1108";
- CHECK_EQ(4.4501477170144017780491e-308, StringToDouble(&uc, num, NO_FLAGS));
+ CHECK_EQ(4.4501477170144017780491e-308, StringToDouble(num, NO_FLAGS));
// Changes the result of strtod (at least in glibc implementation).
num[sizeof(num) - 8] = '1';
- CHECK_EQ(4.4501477170144022721148e-308, StringToDouble(&uc, num, NO_FLAGS));
+ CHECK_EQ(4.4501477170144022721148e-308, StringToDouble(num, NO_FLAGS));
}
TEST(MaximumExponent) {
- UnicodeCache uc;
char num[] = "0.16e309";
- CHECK_EQ(1.59999999999999997765e+308, StringToDouble(&uc, num, NO_FLAGS));
+ CHECK_EQ(1.59999999999999997765e+308, StringToDouble(num, NO_FLAGS));
}
TEST(ExponentNumberStr) {
- UnicodeCache uc;
- CHECK_EQ(1e1, StringToDouble(&uc, "1e1", NO_FLAGS));
- CHECK_EQ(1e1, StringToDouble(&uc, "1e+1", NO_FLAGS));
- CHECK_EQ(1e-1, StringToDouble(&uc, "1e-1", NO_FLAGS));
- CHECK_EQ(1e100, StringToDouble(&uc, "1e+100", NO_FLAGS));
- CHECK_EQ(1e-100, StringToDouble(&uc, "1e-100", NO_FLAGS));
- CHECK_EQ(1e-106, StringToDouble(&uc, ".000001e-100", NO_FLAGS));
+ CHECK_EQ(1e1, StringToDouble("1e1", NO_FLAGS));
+ CHECK_EQ(1e1, StringToDouble("1e+1", NO_FLAGS));
+ CHECK_EQ(1e-1, StringToDouble("1e-1", NO_FLAGS));
+ CHECK_EQ(1e100, StringToDouble("1e+100", NO_FLAGS));
+ CHECK_EQ(1e-100, StringToDouble("1e-100", NO_FLAGS));
+ CHECK_EQ(1e-106, StringToDouble(".000001e-100", NO_FLAGS));
}
@@ -371,7 +373,7 @@ TEST(BitField64) {
static void CheckNonArrayIndex(bool expected, const char* chars) {
auto isolate = CcTest::i_isolate();
auto string = isolate->factory()->NewStringFromAsciiChecked(chars);
- CHECK_EQ(expected, IsSpecialIndex(isolate->unicode_cache(), *string));
+ CHECK_EQ(expected, IsSpecialIndex(*string));
}
@@ -416,7 +418,7 @@ TEST(NoHandlesForTryNumberToSize) {
size_t result = 0;
{
SealHandleScope no_handles(isolate);
- Smi* smi = Smi::FromInt(1);
+ Smi smi = Smi::FromInt(1);
CHECK(TryNumberToSize(smi, &result));
CHECK_EQ(result, 1u);
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e08bec375e..3a3063ed3c 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -37,6 +37,7 @@
#include "src/base/platform/platform.h"
#include "src/deoptimizer.h"
#include "src/libplatform/default-platform.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
@@ -79,7 +80,7 @@ TEST(StartStop) {
CpuProfilesCollection profiles(isolate);
ProfileGenerator generator(&profiles);
std::unique_ptr<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(isolate, &generator,
+ new SamplingEventsProcessor(isolate, &generator,
v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
@@ -89,19 +90,20 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
i::Address frame2 = kNullAddress,
i::Address frame3 = kNullAddress) {
- v8::TickSample* sample = proc->StartTickSample();
- sample->pc = reinterpret_cast<void*>(frame1);
- sample->tos = reinterpret_cast<void*>(frame1);
- sample->frames_count = 0;
+ v8::internal::TickSample sample;
+ sample.pc = reinterpret_cast<void*>(frame1);
+ sample.tos = reinterpret_cast<void*>(frame1);
+ sample.frames_count = 0;
if (frame2 != kNullAddress) {
- sample->stack[0] = reinterpret_cast<void*>(frame2);
- sample->frames_count = 1;
+ sample.stack[0] = reinterpret_cast<void*>(frame2);
+ sample.frames_count = 1;
}
if (frame3 != kNullAddress) {
- sample->stack[1] = reinterpret_cast<void*>(frame3);
- sample->frames_count = 2;
+ sample.stack[1] = reinterpret_cast<void*>(frame3);
+ sample.frames_count = 2;
}
- proc->FinishTickSample();
+ sample.timestamp = base::TimeTicks::HighResolutionNow();
+ proc->AddSample(sample);
}
namespace {
@@ -123,7 +125,7 @@ class TestSetup {
} // namespace
-i::AbstractCode* CreateCode(LocalContext* env) {
+i::AbstractCode CreateCode(LocalContext* env) {
static int counter = 0;
i::EmbeddedVector<char, 256> script;
i::EmbeddedVector<char, 32> name;
@@ -153,19 +155,17 @@ TEST(CodeEvents) {
i::HandleScope scope(isolate);
- i::AbstractCode* aaa_code = CreateCode(&env);
- i::AbstractCode* comment_code = CreateCode(&env);
- i::AbstractCode* comment2_code = CreateCode(&env);
- i::AbstractCode* moved_code = CreateCode(&env);
+ i::AbstractCode aaa_code = CreateCode(&env);
+ i::AbstractCode comment_code = CreateCode(&env);
+ i::AbstractCode comment2_code = CreateCode(&env);
+ i::AbstractCode moved_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfileGenerator* generator = new ProfileGenerator(profiles);
- ProfilerEventsProcessor* processor = new ProfilerEventsProcessor(
+ ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
isolate, generator, v8::base::TimeDelta::FromMicroseconds(100));
- CpuProfiler profiler(isolate, profiles, generator, processor);
- profiles->StartProfiling("", false);
processor->Start();
- ProfilerListener profiler_listener(isolate, &profiler);
+ ProfilerListener profiler_listener(isolate, processor);
isolate->logger()->AddCodeEventListener(&profiler_listener);
// Enqueue code creation events.
@@ -215,19 +215,19 @@ TEST(TickEvents) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- i::AbstractCode* frame1_code = CreateCode(&env);
- i::AbstractCode* frame2_code = CreateCode(&env);
- i::AbstractCode* frame3_code = CreateCode(&env);
+ i::AbstractCode frame1_code = CreateCode(&env);
+ i::AbstractCode frame2_code = CreateCode(&env);
+ i::AbstractCode frame3_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfileGenerator* generator = new ProfileGenerator(profiles);
ProfilerEventsProcessor* processor =
- new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ new SamplingEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
processor->Start();
- ProfilerListener profiler_listener(isolate, &profiler);
+ ProfilerListener profiler_listener(isolate, processor);
isolate->logger()->AddCodeEventListener(&profiler_listener);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
@@ -286,28 +286,29 @@ TEST(Issue1398) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- i::AbstractCode* code = CreateCode(&env);
+ i::AbstractCode code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfileGenerator* generator = new ProfileGenerator(profiles);
ProfilerEventsProcessor* processor =
- new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ new SamplingEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
processor->Start();
- ProfilerListener profiler_listener(isolate, &profiler);
+ ProfilerListener profiler_listener(isolate, processor);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
- v8::TickSample* sample = processor->StartTickSample();
- sample->pc = reinterpret_cast<void*>(code->InstructionStart());
- sample->tos = nullptr;
- sample->frames_count = v8::TickSample::kMaxFramesCount;
- for (unsigned i = 0; i < sample->frames_count; ++i) {
- sample->stack[i] = reinterpret_cast<void*>(code->InstructionStart());
+ v8::internal::TickSample sample;
+ sample.pc = reinterpret_cast<void*>(code->InstructionStart());
+ sample.tos = nullptr;
+ sample.frames_count = v8::TickSample::kMaxFramesCount;
+ for (unsigned i = 0; i < sample.frames_count; ++i) {
+ sample.stack[i] = reinterpret_cast<void*>(code->InstructionStart());
}
- processor->FinishTickSample();
+ sample.timestamp = base::TimeTicks::HighResolutionNow();
+ processor->AddSample(sample);
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
@@ -459,7 +460,9 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
v8::internal::CpuProfiler* iprofiler =
reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
- v8::sampler::Sampler* sampler = iprofiler->processor()->sampler();
+ v8::sampler::Sampler* sampler =
+ reinterpret_cast<i::SamplingEventsProcessor*>(iprofiler->processor())
+ ->sampler();
sampler->StartCountingSamples();
do {
function->Call(context_, context_->Global(), argc, argv).ToLocalChecked();
@@ -1077,7 +1080,7 @@ static const char* bound_function_test_source =
TEST(BoundFunctionCall) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
CompileRun(bound_function_test_source);
@@ -1096,7 +1099,9 @@ TEST(BoundFunctionCall) {
// This tests checks distribution of the samples through the source lines.
static void TickLines(bool optimize) {
- if (!optimize) i::FLAG_opt = false;
+#ifndef V8_LITE_MODE
+ FLAG_opt = optimize;
+#endif // V8_LITE_MODE
CcTest::InitializeVM();
LocalContext env;
i::FLAG_allow_natives_syntax = true;
@@ -1132,24 +1137,24 @@ static void TickLines(bool optimize) {
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
- CHECK(func->shared());
- CHECK(func->shared()->abstract_code());
+ CHECK(!func->shared().is_null());
+ CHECK(!func->shared()->abstract_code().is_null());
CHECK(!optimize || func->IsOptimized() ||
!CcTest::i_isolate()->use_optimizer());
- i::AbstractCode* code = func->abstract_code();
- CHECK(code);
+ i::AbstractCode code = func->abstract_code();
+ CHECK(!code.is_null());
i::Address code_address = code->raw_instruction_start();
- CHECK(code_address);
+ CHECK_NE(code_address, kNullAddress);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfileGenerator* generator = new ProfileGenerator(profiles);
ProfilerEventsProcessor* processor =
- new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ new SamplingEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
processor->Start();
- ProfilerListener profiler_listener(isolate, &profiler);
+ ProfilerListener profiler_listener(isolate, processor);
// Enqueue code creation events.
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
@@ -1340,7 +1345,7 @@ static const char* cpu_profiler_deep_stack_test_source =
// 0 foo 21 #254 no reason
TEST(CpuProfileDeepStack) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
@@ -1398,7 +1403,7 @@ static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
TEST(JsNativeJsSample) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
@@ -1451,7 +1456,7 @@ static const char* js_native_js_runtime_js_test_source =
TEST(JsNativeJsRuntimeJsSample) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
@@ -1508,7 +1513,7 @@ static const char* js_native1_js_native2_js_test_source =
TEST(JsNative1JsNative2JsSample) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::Local<v8::Function> func1 =
@@ -1554,7 +1559,7 @@ static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
TEST(CollectSampleAPI) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template =
@@ -1608,7 +1613,7 @@ static const char* js_native_js_runtime_multiple_test_source =
TEST(JsNativeJsRuntimeJsSampleMultiple) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template =
@@ -1673,7 +1678,7 @@ static const char* inlining_test_source =
TEST(Inlining) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
@@ -1697,6 +1702,135 @@ TEST(Inlining) {
profile->Delete();
}
+static const char* inlining_test_source2 = R"(
+ %NeverOptimizeFunction(action);
+ %NeverOptimizeFunction(start);
+ level1();
+ level1();
+ %OptimizeFunctionOnNextCall(level1);
+ %OptimizeFunctionOnNextCall(level2);
+ %OptimizeFunctionOnNextCall(level3);
+ %OptimizeFunctionOnNextCall(level4);
+ level1();
+ function action(n) {
+ var s = 0;
+ for (var i = 0; i < n; ++i) s += i*i*i;
+ return s;
+ }
+ function level4() {
+ action(100);
+ return action(100);
+ }
+ function level3() {
+ const a = level4();
+ const b = level4();
+ return a + b * 1.1;
+ }
+ function level2() {
+ return level3() * 2;
+ }
+ function level1() {
+ action(1);
+ action(200);
+ action(1);
+ return level2();
+ }
+ function start(n) {
+ while (--n)
+ level1();
+ };
+ )";
+
+// The simulator builds are extremely slow. We run them with fewer iterations.
+#ifdef USE_SIMULATOR
+const double load_factor = 0.01;
+#else
+const double load_factor = 1.0;
+#endif
+
+// [Top down]:
+// 0 (root):0 0 #1
+// 13 start:34 6 #3
+// bailed out due to 'Optimization is always disabled'
+// 19 level1:36 6 #4
+// 16 action:29 6 #14
+// bailed out due to 'Optimization is always disabled'
+// 2748 action:30 6 #10
+// bailed out due to 'Optimization is always disabled'
+// 18 action:31 6 #15
+// bailed out due to 'Optimization is always disabled'
+// 0 level2:32 6 #5
+// 0 level3:26 6 #6
+// 12 level4:22 6 #11
+// 1315 action:17 6 #13
+// bailed out due to 'Optimization is always disabled'
+// 1324 action:18 6 #12
+// bailed out due to 'Optimization is always disabled'
+// 16 level4:21 6 #7
+// 1268 action:17 6 #9
+// bailed out due to 'Optimization is always disabled'
+// 1322 action:18 6 #8
+// bailed out due to 'Optimization is always disabled'
+// 2 (program):0 0 #2
+TEST(Inlining2) {
+ FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
+ v8::Context::Scope context_scope(env);
+
+ CompileRun(inlining_test_source2);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(CcTest::isolate());
+ v8::Local<v8::String> profile_name = v8_str("inlining");
+ profiler->StartProfiling(profile_name,
+ v8::CpuProfilingMode::kCallerLineNumbers);
+
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), 50000 * load_factor)};
+ function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
+ v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
+ CHECK(profile);
+
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(profile)->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+
+ NameLinePair l421_a17[] = {{"level1", 36},
+ {"level2", 32},
+ {"level3", 26},
+ {"level4", 21},
+ {"action", 17}};
+ CheckBranch(start_node, l421_a17, arraysize(l421_a17));
+ NameLinePair l422_a17[] = {{"level1", 36},
+ {"level2", 32},
+ {"level3", 26},
+ {"level4", 22},
+ {"action", 17}};
+ CheckBranch(start_node, l422_a17, arraysize(l422_a17));
+
+ NameLinePair l421_a18[] = {{"level1", 36},
+ {"level2", 32},
+ {"level3", 26},
+ {"level4", 21},
+ {"action", 18}};
+ CheckBranch(start_node, l421_a18, arraysize(l421_a18));
+ NameLinePair l422_a18[] = {{"level1", 36},
+ {"level2", 32},
+ {"level3", 26},
+ {"level4", 22},
+ {"action", 18}};
+ CheckBranch(start_node, l422_a18, arraysize(l422_a18));
+
+ NameLinePair action_direct[] = {{"level1", 36}, {"action", 30}};
+ CheckBranch(start_node, action_direct, arraysize(action_direct));
+
+ profile->Delete();
+ profiler->Dispose();
+}
+
// [Top down]:
// 0 (root) #0 1
// 2 (program) #0 2
@@ -1713,13 +1847,13 @@ TEST(IdleTime) {
i::ProfilerEventsProcessor* processor =
reinterpret_cast<i::CpuProfiler*>(cpu_profiler)->processor();
- processor->AddCurrentStack(isolate, true);
+ processor->AddCurrentStack(true);
isolate->SetIdle(true);
for (int i = 0; i < 3; i++) {
- processor->AddCurrentStack(isolate, true);
+ processor->AddCurrentStack(true);
}
isolate->SetIdle(false);
- processor->AddCurrentStack(isolate, true);
+ processor->AddCurrentStack(true);
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
CHECK(profile);
@@ -1761,7 +1895,7 @@ static void CheckFunctionDetails(v8::Isolate* isolate,
TEST(FunctionDetails) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
@@ -1808,7 +1942,7 @@ TEST(FunctionDetailsInlining) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
@@ -1881,12 +2015,12 @@ TEST(FunctionDetailsInlining) {
const v8::CpuProfileNode* beta = FindChild(env, alpha, "beta");
if (!beta) return;
CheckFunctionDetails(env->GetIsolate(), beta, "beta", "script_b",
- script_b->GetUnboundScript()->GetId(), 0, 0);
+ script_b->GetUnboundScript()->GetId(), 1, 14);
}
TEST(DontStopOnFinishedProfileDelete) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
v8::CpuProfiler* profiler = v8::CpuProfiler::New(env->GetIsolate());
@@ -1934,7 +2068,7 @@ TEST(CollectDeoptEvents) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
i::CpuProfiler* iprofiler =
@@ -2067,7 +2201,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
i::CpuProfiler* iprofiler =
@@ -2137,7 +2271,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
i::CpuProfiler* iprofiler =
@@ -2212,7 +2346,7 @@ TEST(DeoptUntrackedFunction) {
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
i::CpuProfiler* iprofiler =
@@ -2423,7 +2557,7 @@ TEST(StaticCollectSampleAPI) {
TEST(CodeEntriesMemoryLeak) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
std::string source = "function start() {}\n";
@@ -2457,7 +2591,7 @@ TEST(NativeFrameStackTrace) {
// v8::internal::StringTable::LookupStringIfExists_NoAllocate native function
// without producing an EXIT frame.
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
const char* source = R"(
@@ -2503,6 +2637,7 @@ TEST(SourcePositionTable) {
int no_info = v8::CpuProfileNode::kNoLineNumberInfo;
CHECK_EQ(no_info, info.GetSourceLineNumber(std::numeric_limits<int>::min()));
CHECK_EQ(no_info, info.GetSourceLineNumber(0));
+ CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(0));
CHECK_EQ(no_info, info.GetSourceLineNumber(1));
CHECK_EQ(no_info, info.GetSourceLineNumber(9));
CHECK_EQ(no_info, info.GetSourceLineNumber(10));
@@ -2511,12 +2646,14 @@ TEST(SourcePositionTable) {
CHECK_EQ(no_info, info.GetSourceLineNumber(20));
CHECK_EQ(no_info, info.GetSourceLineNumber(21));
CHECK_EQ(no_info, info.GetSourceLineNumber(100));
+ CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(100));
CHECK_EQ(no_info, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
- info.SetPosition(10, 1);
- info.SetPosition(20, 2);
+ info.SetPosition(10, 1, SourcePosition::kNotInlined);
+ info.SetPosition(20, 2, SourcePosition::kNotInlined);
- // The only valid return values are 1 or 2 - every pc maps to a line number.
+ // The only valid return values are 1 or 2 - every pc maps to a line
+ // number.
CHECK_EQ(1, info.GetSourceLineNumber(std::numeric_limits<int>::min()));
CHECK_EQ(1, info.GetSourceLineNumber(0));
CHECK_EQ(1, info.GetSourceLineNumber(1));
@@ -2524,16 +2661,22 @@ TEST(SourcePositionTable) {
CHECK_EQ(1, info.GetSourceLineNumber(10));
CHECK_EQ(1, info.GetSourceLineNumber(11));
CHECK_EQ(1, info.GetSourceLineNumber(19));
- CHECK_EQ(2, info.GetSourceLineNumber(20));
+ CHECK_EQ(1, info.GetSourceLineNumber(20));
CHECK_EQ(2, info.GetSourceLineNumber(21));
CHECK_EQ(2, info.GetSourceLineNumber(100));
CHECK_EQ(2, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
+ CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(0));
+ CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(100));
+
// Test SetPosition behavior.
- info.SetPosition(25, 3);
+ info.SetPosition(25, 3, 0);
CHECK_EQ(2, info.GetSourceLineNumber(21));
CHECK_EQ(3, info.GetSourceLineNumber(100));
CHECK_EQ(3, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
+
+ CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(21));
+ CHECK_EQ(0, info.GetInliningId(100));
}
TEST(MultipleProfilers) {
@@ -2545,6 +2688,58 @@ TEST(MultipleProfilers) {
profiler2->StopProfiling("2");
}
+void ProfileSomeCode(v8::Isolate* isolate) {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ LocalContext context(isolate);
+
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(isolate);
+
+ v8::Local<v8::String> profile_name = v8_str("1");
+ profiler->StartProfiling(profile_name);
+ const char* source = R"(
+ function foo() {
+ var x = 0;
+ for (var i = 0; i < 1e3; i++) {
+ for (var j = 0; j < 1e3; j++) {
+ x = i * j;
+ }
+ }
+ return x;
+ }
+ foo();
+ )";
+
+ CompileRun(source);
+ profiler->StopProfiling(profile_name);
+ profiler->Dispose();
+}
+
+class IsolateThread : public v8::base::Thread {
+ public:
+ IsolateThread() : Thread(Options("IsolateThread")) {}
+
+ void Run() override {
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ ProfileSomeCode(isolate);
+ isolate->Dispose();
+ }
+};
+
+// Checking for crashes and TSAN issues with multiple isolates profiling.
+TEST(MultipleIsolates) {
+ IsolateThread thread1;
+ IsolateThread thread2;
+
+ thread1.Start();
+ thread2.Start();
+
+ thread1.Join();
+ thread2.Join();
+}
+
int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source) {
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*CompileRun(source)));
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index d1d8efe26c..66e5441ed1 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -193,28 +193,5 @@ TEST(DateParseLegacyUseCounter) {
CHECK_EQ(1, legacy_parse_count);
}
-#ifdef V8_INTL_SUPPORT
-TEST(DateCacheVersion) {
- FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- v8::Local<v8::Number> date_cache_version =
- v8::Local<v8::Number>::Cast(CompileRun("%DateCacheVersion()"));
-
- CHECK(date_cache_version->IsNumber());
- CHECK_EQ(0.0, date_cache_version->NumberValue(context).FromMaybe(-1.0));
-
- v8::Date::DateTimeConfigurationChangeNotification(isolate);
-
- date_cache_version =
- v8::Local<v8::Number>::Cast(CompileRun("%DateCacheVersion()"));
- CHECK(date_cache_version->IsNumber());
- CHECK_EQ(1.0, date_cache_version->NumberValue(context).FromMaybe(-1.0));
-}
-#endif // V8_INTL_SUPPORT
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 2e2128e50b..bc9a11a9f1 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -165,7 +165,7 @@ void CheckDebuggerUnloaded() {
// Iterate the heap and check that there are no debugger related objects left.
HeapIterator iterator(CcTest::heap());
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
}
@@ -2818,6 +2818,21 @@ TEST(DebugBreakInWrappedScript) {
CheckDebuggerUnloaded();
}
+static void EmptyHandler(const v8::FunctionCallbackInfo<v8::Value>& args) {}
+
+TEST(DebugScopeIteratorWithFunctionTemplate) {
+ LocalContext env;
+ v8::HandleScope handle_scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ EnableDebugger(isolate);
+ v8::Local<v8::Function> func =
+ v8::Function::New(env.local(), EmptyHandler).ToLocalChecked();
+ std::unique_ptr<v8::debug::ScopeIterator> iterator =
+ v8::debug::ScopeIterator::CreateForFunction(isolate, func);
+ CHECK(iterator->Done());
+ DisableDebugger(isolate);
+}
+
TEST(DebugBreakWithoutJS) {
i::FLAG_stress_compaction = false;
#ifdef VERIFY_HEAP
@@ -3039,7 +3054,7 @@ TEST(DebugScriptLineEndsAreAscending) {
v8::internal::Script::cast(instances->get(i)), CcTest::i_isolate());
v8::internal::Script::InitLineEnds(script);
- v8::internal::FixedArray* ends =
+ v8::internal::FixedArray ends =
v8::internal::FixedArray::cast(script->line_ends());
CHECK_GT(ends->length(), 0);
@@ -4016,6 +4031,8 @@ UNINITIALIZED_TEST(DebugSetOutOfMemoryListener) {
}
TEST(DebugCoverage) {
+ // Coverage needs feedback vectors.
+ if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4069,6 +4086,8 @@ v8::debug::Coverage::ScriptData GetScriptDataAndDeleteCoverage(
} // namespace
TEST(DebugCoverageWithCoverageOutOfScope) {
+ // Coverage needs feedback vectors.
+ if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4138,6 +4157,8 @@ v8::debug::Coverage::FunctionData GetFunctionDataAndDeleteCoverage(
} // namespace
TEST(DebugCoverageWithScriptDataOutOfScope) {
+ // Coverage needs feedback vectors.
+ if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4163,12 +4184,10 @@ TEST(BuiltinsExceptionPrediction) {
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate);
- i::Snapshot::EnsureAllBuiltinsAreDeserialized(iisolate);
-
i::Builtins* builtins = iisolate->builtins();
bool fail = false;
for (int i = 0; i < i::Builtins::builtin_count; i++) {
- i::Code* builtin = builtins->builtin(i);
+ i::Code builtin = builtins->builtin(i);
if (builtin->kind() != i::Code::BUILTIN) continue;
auto prediction = builtin->GetBuiltinCatchPrediction();
USE(prediction);
@@ -4213,9 +4232,10 @@ TEST(DebugEvaluateNoSideEffect) {
std::vector<i::Handle<i::JSFunction>> all_functions;
{
i::HeapIterator iterator(isolate->heap());
- while (i::HeapObject* obj = iterator.next()) {
+ for (i::HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
if (!obj->IsJSFunction()) continue;
- i::JSFunction* fun = i::JSFunction::cast(obj);
+ i::JSFunction fun = i::JSFunction::cast(obj);
all_functions.emplace_back(fun, isolate);
}
}
@@ -4249,7 +4269,6 @@ i::MaybeHandle<i::Script> FindScript(
UNINITIALIZED_TEST(LoadedAtStartupScripts) {
i::FLAG_expose_gc = true;
- i::FLAG_expose_natives_as = "natives";
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -4266,8 +4285,8 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
{
i::DisallowHeapAllocation no_gc;
i::Script::Iterator iterator(i_isolate);
- i::Script* script;
- while ((script = iterator.Next()) != nullptr) {
+ for (i::Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
if (script->type() == i::Script::TYPE_NATIVE &&
script->name()->IsUndefined(i_isolate)) {
continue;
@@ -4276,17 +4295,12 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
scripts.emplace_back(script, i_isolate);
}
}
- CHECK_EQ(count_by_type[i::Script::TYPE_NATIVE],
- i::Natives::GetBuiltinsCount());
+ CHECK_EQ(count_by_type[i::Script::TYPE_NATIVE], 0);
CHECK_EQ(count_by_type[i::Script::TYPE_EXTENSION], 2);
CHECK_EQ(count_by_type[i::Script::TYPE_NORMAL], 1);
CHECK_EQ(count_by_type[i::Script::TYPE_WASM], 0);
CHECK_EQ(count_by_type[i::Script::TYPE_INSPECTOR], 0);
- i::Handle<i::Script> native_array_script =
- FindScript(i_isolate, scripts, "native array.js").ToHandleChecked();
- CHECK_EQ(native_array_script->type(), i::Script::TYPE_NATIVE);
-
i::Handle<i::Script> gc_script =
FindScript(i_isolate, scripts, "v8/gc").ToHandleChecked();
CHECK_EQ(gc_script->type(), i::Script::TYPE_EXTENSION);
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 8b59fe5960..cce41a3738 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -29,8 +29,6 @@
#include "src/v8.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 1101ec06eb..feeaeb4214 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -34,7 +34,6 @@
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/spaces.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "test/cctest/heap/heap-utils.h"
@@ -86,7 +85,7 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK_EQ(table->NumberOfElements(), i + 1);
CHECK_NE(table->FindEntry(isolate, key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), *value);
- CHECK(key->GetIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys never added to the map which already have an identity hash
@@ -96,7 +95,7 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK(key->GetOrCreateIdentityHash(isolate)->IsSmi());
CHECK_EQ(table->FindEntry(isolate, key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), roots.the_hole_value());
- CHECK(key->GetIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -104,7 +103,7 @@ static void TestHashMap(Handle<HashMap> table) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(key), roots.the_hole_value());
- Object* identity_hash = key->GetIdentityHash(isolate);
+ Object identity_hash = key->GetIdentityHash();
CHECK_EQ(roots.undefined_value(), identity_hash);
}
}
@@ -157,7 +156,7 @@ static void TestHashSet(Handle<HashSet> table) {
table = HashSet::Add(isolate, table, key);
CHECK_EQ(table->NumberOfElements(), i + 2);
CHECK(table->Has(isolate, key));
- CHECK(key->GetIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys never added to the map which already have an identity hash
@@ -166,7 +165,7 @@ static void TestHashSet(Handle<HashSet> table) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK(key->GetOrCreateIdentityHash(isolate)->IsSmi());
CHECK(!table->Has(isolate, key));
- CHECK(key->GetIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -174,7 +173,7 @@ static void TestHashSet(Handle<HashSet> table) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK(!table->Has(isolate, key));
- Object* identity_hash = key->GetIdentityHash(isolate);
+ Object identity_hash = key->GetIdentityHash();
CHECK_EQ(ReadOnlyRoots(CcTest::heap()).undefined_value(), identity_hash);
}
}
@@ -188,6 +187,9 @@ TEST(HashSet) {
class ObjectHashTableTest: public ObjectHashTable {
public:
+ explicit ObjectHashTableTest(ObjectHashTable o) : ObjectHashTable(o) {}
+ ObjectHashTableTest* operator->() { return this; }
+
void insert(int entry, int key, int value) {
set(EntryToIndex(entry), Smi::FromInt(key));
set(EntryToIndex(entry) + 1, Smi::FromInt(value));
@@ -211,7 +213,7 @@ TEST(HashTableRehash) {
// Test almost filled table.
{
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 100);
- ObjectHashTableTest* t = reinterpret_cast<ObjectHashTableTest*>(*table);
+ ObjectHashTableTest t(*table);
int capacity = t->capacity();
for (int i = 0; i < capacity - 1; i++) {
t->insert(i, i * i, i);
@@ -224,7 +226,7 @@ TEST(HashTableRehash) {
// Test half-filled table.
{
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 100);
- ObjectHashTableTest* t = reinterpret_cast<ObjectHashTableTest*>(*table);
+ ObjectHashTableTest t(*table);
int capacity = t->capacity();
for (int i = 0; i < capacity / 2; i++) {
t->insert(i, i * i, i);
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 877ae6665f..a3244d37ee 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -106,12 +106,13 @@ bool DisassembleAndCompare(byte* begin, UseRegex use_regex,
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
-#define SET_UP() \
- CcTest::InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
- Assembler assm(AssemblerOptions{}, buffer, 4 * 1024); \
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
@@ -1511,12 +1512,13 @@ static void TestLoadLiteral(byte* buffer, Assembler* assm, bool* failure,
const char *expected_string_template =
(offset >= 0) ?
- "e59f0%03x ldr r0, [pc, #+%d] (addr %p)" :
- "e51f0%03x ldr r0, [pc, #%d] (addr %p)";
+ "e59f0%03x ldr r0, [pc, #+%d] (addr 0x%08" PRIxPTR ")" :
+ "e51f0%03x ldr r0, [pc, #%d] (addr 0x%08" PRIxPTR ")";
char expected_string[80];
snprintf(expected_string, sizeof(expected_string), expected_string_template,
- abs(offset), offset,
- progcounter + Instruction::kPcLoadDelta + offset);
+ abs(offset), offset,
+ reinterpret_cast<uintptr_t>(
+ progcounter + Instruction::kPcLoadDelta + offset));
if (!DisassembleAndCompare(progcounter, kRawString, expected_string)) {
*failure = true;
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 1cc14271a6..20ccf77fd0 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -28,19 +28,14 @@
#include <stdio.h>
#include <cstring>
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-#include "src/macro-assembler.h"
-
-#include "src/frames-inl.h"
-
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/utils-arm64.h"
+#include "src/frames-inl.h"
+#include "src/macro-assembler-inl.h"
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
@@ -49,29 +44,31 @@ namespace internal {
#define EXP_SIZE (256)
#define INSTR_SIZE (1024)
-#define SET_UP_MASM() \
- InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
- uint32_t encoding = 0; \
- MacroAssembler* assm = new MacroAssembler( \
- isolate, buf, INSTR_SIZE, v8::internal::CodeObjectRequired::kYes); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
+#define SET_UP_MASM() \
+ InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ MacroAssembler* assm = \
+ new MacroAssembler(isolate, v8::internal::CodeObjectRequired::kYes, \
+ ExternalAssemblerBuffer(buf, INSTR_SIZE)); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
decoder->AppendVisitor(disasm)
-#define SET_UP_ASM() \
- InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
- uint32_t encoding = 0; \
- Assembler* assm = new Assembler(AssemblerOptions{}, buf, INSTR_SIZE); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
+#define SET_UP_ASM() \
+ InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ Assembler* assm = new Assembler(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buf, INSTR_SIZE)); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
decoder->AppendVisitor(disasm)
#define COMPARE(ASM, EXP) \
@@ -799,6 +796,13 @@ TEST_(dp_2_source) {
TEST_(adr) {
SET_UP_ASM();
+ char expected[100];
+ snprintf(expected, sizeof(expected), "adr x0, #+0x0 (addr %p)", buf);
+ COMPARE(adr(x0, 0), expected);
+ snprintf(expected, sizeof(expected), "adr x0, #+0x1 (addr %p)", buf + 1);
+ COMPARE(adr(x0, 1), expected);
+ snprintf(expected, sizeof(expected), "adr x0, #-0x1 (addr %p)", buf - 1);
+ COMPARE(adr(x0, -1), expected);
COMPARE_PREFIX(adr(x0, 0), "adr x0, #+0x0");
COMPARE_PREFIX(adr(x1, 1), "adr x1, #+0x1");
COMPARE_PREFIX(adr(x2, -1), "adr x2, #-0x1");
@@ -1887,7 +1891,8 @@ TEST_(debug) {
#else
CHECK(!options.enable_simulator_code);
#endif
- Assembler* assm = new Assembler(options, buf, INSTR_SIZE);
+ Assembler* assm =
+ new Assembler(options, ExternalAssemblerBuffer(buf, INSTR_SIZE));
Decoder<DispatchingDecoderVisitor>* decoder =
new Decoder<DispatchingDecoderVisitor>();
DisassemblingDecoder* disasm = new DisassemblingDecoder();
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index a24de5656d..2dfb1f5bdb 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -35,6 +35,7 @@
#include "src/disassembler.h"
#include "src/frames-inl.h"
#include "src/macro-assembler.h"
+#include "src/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -42,17 +43,15 @@ namespace internal {
#define __ assm.
-static void DummyStaticFunction(Object* result) {
-}
-
+static void DummyStaticFunction(Object result) {}
TEST(DisasmIa320) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[8192];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
- DummyStaticFunction(nullptr); // just bloody use it (DELETE; debugging)
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
// Short immediate instructions
__ adc(eax, 12345678);
__ add(eax, Immediate(12345678));
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 100eb426a0..5a4f14fe9b 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -70,12 +70,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
-#define SET_UP() \
- CcTest::InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
- Assembler assm(AssemblerOptions{}, buffer, 4 * 1024); \
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
@@ -1123,11 +1124,11 @@ TEST(madd_msub_maddf_msubf) {
TEST(atomic_load_store) {
SET_UP();
if (IsMipsArchVariant(kMips32r6)) {
- COMPARE(ll(v0, MemOperand(v1, -1)), "7c62ffb6 ll v0, -1(v1)");
- COMPARE(sc(v0, MemOperand(v1, 1)), "7c6200a6 sc v0, 1(v1)");
+ COMPARE(ll(v0, MemOperand(v1, -1)), "7c62ffb6 ll v0, -1(v1)");
+ COMPARE(sc(v0, MemOperand(v1, 1)), "7c6200a6 sc v0, 1(v1)");
} else {
- COMPARE(ll(v0, MemOperand(v1, -1)), "c062ffff ll v0, -1(v1)");
- COMPARE(sc(v0, MemOperand(v1, 1)), "e0620001 sc v0, 1(v1)");
+ COMPARE(ll(v0, MemOperand(v1, -1)), "c062ffff ll v0, -1(v1)");
+ COMPARE(sc(v0, MemOperand(v1, 1)), "e0620001 sc v0, 1(v1)");
}
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 1b0bdcc270..ad71c1598a 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -70,12 +70,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
-#define SET_UP() \
- CcTest::InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
- Assembler assm(AssemblerOptions{}, buffer, 4 * 1024); \
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index 3193fee931..b64402b383 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -67,8 +67,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
CcTest::InitializeVM(); \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
- byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
- Assembler assm(AssemblerOptions{}, buffer, 4 * 1024); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
diff --git a/deps/v8/test/cctest/test-disasm-s390.cc b/deps/v8/test/cctest/test-disasm-s390.cc
index c575b9566c..8e664f0b03 100644
--- a/deps/v8/test/cctest/test-disasm-s390.cc
+++ b/deps/v8/test/cctest/test-disasm-s390.cc
@@ -62,12 +62,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
-#define SET_UP() \
- CcTest::InitializeVM(); \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
- Assembler assm(AssemblerOptions{}, buffer, 4 * 1024); \
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
bool failure = false;
// This macro assembles one instruction using the preallocated assembler and
@@ -298,5 +299,10 @@ TEST(SixBytes) {
VERIFY_RUN();
}
+#undef SET_UP
+#undef COMPARE
+#undef EMIT_PENDING_LITERALS
+#undef VERIFY_RUN
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 1e530c3ce2..085fd4be7c 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -36,6 +36,7 @@
#include "src/frames-inl.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -43,17 +44,13 @@ namespace internal {
#define __ assm.
-
-static void DummyStaticFunction(Object* result) {
-}
-
TEST(DisasmX64) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[8192];
- Assembler assm(AssemblerOptions{}, buffer, sizeof buffer);
- DummyStaticFunction(nullptr); // just bloody use it (DELETE; debugging)
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
// Short immediate instructions
__ addq(rax, Immediate(12345678));
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index 59252f2ef8..ca382a60c1 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -43,8 +43,8 @@ Handle<String> MakeName(const char* str, int suffix) {
template <typename T, typename M>
bool EQUALS(Isolate* isolate, Handle<T> left, Handle<M> right) {
if (*left == *right) return true;
- return JSObject::Equals(isolate, Handle<Object>::cast(left),
- Handle<Object>::cast(right))
+ return Object::Equals(isolate, Handle<Object>::cast(left),
+ Handle<Object>::cast(right))
.FromJust();
}
@@ -65,6 +65,12 @@ bool EQUALS(Isolate* isolate, T left, Handle<M> right) {
// Tests
//
+TEST(SystemPointerElementsKind) {
+ CHECK_EQ(ElementsKindToShiftSize(SYSTEM_POINTER_ELEMENTS),
+ kSystemPointerSizeLog2);
+ CHECK_EQ(ElementsKindToByteSize(SYSTEM_POINTER_ELEMENTS), kSystemPointerSize);
+}
+
TEST(JSObjectAddingProperties) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-experimental-extra.js b/deps/v8/test/cctest/test-experimental-extra.js
deleted file mode 100644
index a29fc7688c..0000000000
--- a/deps/v8/test/cctest/test-experimental-extra.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function (global, binding) {
- 'use strict';
- binding.testExperimentalExtraShouldReturnTen = function () {
- return 10;
- };
-
- binding.testExperimentalExtraShouldCallToRuntime = function() {
- return binding.runtime(3);
- };
-})
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index 9b17e401a2..f369108228 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -80,7 +80,7 @@
const rejectedPromise = v8.createPromise();
v8.rejectPromise(rejectedPromise, apply(function (arg1, arg2) {
return (arg1 === arg2 && arg2 === 'x') ? 3 : -1;
- }, null, new v8.InternalPackedArray('x', 'x')));
+ }, null, ['x', 'x']));
const rejectedButHandledPromise = v8.createPromise();
v8.rejectPromise(rejectedButHandledPromise, 4);
@@ -103,6 +103,10 @@
promiseStateToString(fulfilledPromise) + ' ' +
promiseStateToString(rejectedPromise);
+ const uncurryThis = v8.uncurryThis(function (a, b, c, d, e) {
+ return (this + a + b + c + d + e) === 21;
+ })(1, 2, 3, 4, 5, 6);
+
return {
privateSymbol: v8.createPrivateSymbol('sym'),
fulfilledPromise, // should be fulfilled with 1
@@ -111,7 +115,8 @@
rejectedButHandledPromise, // should be rejected but have a handler
promiseStates, // should be the string "pending fulfilled rejected"
promiseIsPromise: v8.isPromise(fulfilledPromise), // should be true
- thenableIsPromise: v8.isPromise({ then() { } }) // should be false
+ thenableIsPromise: v8.isPromise({ then() { } }), // should be false
+ uncurryThis // should be true
};
};
})
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
new file mode 100644
index 0000000000..a282f4bccd
--- /dev/null
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace test_factory {
+
+TEST(Factory_NewCode) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ HandleScope scope(i_isolate);
+
+ // Create a big function that ends up in CODE_LO_SPACE.
+ const int instruction_size = kMaxRegularHeapObjectSize + 1;
+ std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
+
+ CodeDesc desc;
+ desc.buffer = instructions.get();
+ desc.buffer_size = instruction_size;
+ desc.instr_size = instruction_size;
+ desc.reloc_size = 0;
+ desc.constant_pool_size = 0;
+ desc.unwinding_info = nullptr;
+ desc.unwinding_info_size = 0;
+ desc.origin = nullptr;
+ Handle<Object> self_ref;
+ Handle<Code> code =
+ i_isolate->factory()->NewCode(desc, Code::WASM_FUNCTION, self_ref);
+
+ CHECK(i_isolate->heap()->InSpace(*code, CODE_LO_SPACE));
+#if VERIFY_HEAP
+ code->ObjectVerify(i_isolate);
+#endif
+}
+
+} // namespace test_factory
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 80ae82d799..c241ac6b7d 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -12,6 +12,7 @@
#include "src/heap/factory.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/feedback-cell-inl.h"
#include "test/cctest/test-feedback-vector.h"
namespace v8 {
@@ -95,7 +96,7 @@ TEST(VectorStructure) {
CHECK_EQ(1,
FeedbackMetadata::GetSlotSize(FeedbackSlotKind::kCreateClosure));
FeedbackSlot slot = helper.slot(1);
- FeedbackCell* cell =
+ FeedbackCell cell =
FeedbackCell::cast(vector->Get(slot)->GetHeapObjectAssumeStrong());
CHECK_EQ(cell->value(), *factory->undefined_value());
}
@@ -158,7 +159,9 @@ TEST(VectorICMetadata) {
TEST(VectorCallICStates) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -184,7 +187,9 @@ TEST(VectorCallICStates) {
}
TEST(VectorCallFeedback) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -202,7 +207,7 @@ TEST(VectorCallFeedback) {
FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*foo, heap_object);
@@ -212,7 +217,9 @@ TEST(VectorCallFeedback) {
}
TEST(VectorCallFeedbackForArray) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -227,7 +234,7 @@ TEST(VectorCallFeedbackForArray) {
FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- HeapObject* heap_object;
+ HeapObject heap_object;
CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(*isolate->array_function(), heap_object);
@@ -247,7 +254,9 @@ size_t GetFeedbackVectorLength(Isolate* isolate, const char* src,
}
TEST(OneShotCallICSlotCount) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -299,7 +308,9 @@ TEST(OneShotCallICSlotCount) {
}
TEST(VectorCallCounts) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -328,7 +339,9 @@ TEST(VectorCallCounts) {
}
TEST(VectorConstructCounts) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -359,7 +372,9 @@ TEST(VectorConstructCounts) {
}
TEST(VectorSpeculationMode) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -391,7 +406,9 @@ TEST(VectorSpeculationMode) {
}
TEST(VectorLoadICStates) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -436,7 +453,7 @@ TEST(VectorLoadICStates) {
// Finally driven megamorphic.
CompileRun("f({ blarg: 3, gran: 3, torino: 10, foo: 2 })");
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
- CHECK(!nexus.FindFirstMap());
+ CHECK(nexus.FindFirstMap().is_null());
// After a collection, state should not be reset to PREMONOMORPHIC.
CcTest::CollectAllGarbage();
@@ -444,7 +461,9 @@ TEST(VectorLoadICStates) {
}
TEST(VectorLoadGlobalICSlotSharing) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -479,7 +498,9 @@ TEST(VectorLoadGlobalICSlotSharing) {
TEST(VectorLoadICOnSmi) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -501,7 +522,7 @@ TEST(VectorLoadICOnSmi) {
CompileRun("f(34)");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
// Verify that the monomorphic map is the one we expect.
- Map* number_map = ReadOnlyRoots(heap).heap_number_map();
+ Map number_map = ReadOnlyRoots(heap).heap_number_map();
CHECK_EQ(number_map, nexus.FindFirstMap());
// Now go polymorphic on o.
@@ -537,7 +558,9 @@ TEST(VectorLoadICOnSmi) {
TEST(ReferenceContextAllocatesNoSlots) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
+
CcTest::InitializeVM();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -675,6 +698,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
TEST(VectorStoreICBasic) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -700,6 +724,7 @@ TEST(VectorStoreICBasic) {
}
TEST(StoreOwnIC) {
+ if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index f40dbe83bd..dca13242ba 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -19,7 +19,10 @@
#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/struct-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/ostreams.h"
#include "src/property.h"
#include "src/transitions.h"
@@ -78,6 +81,14 @@ static Handle<AccessorPair> CreateAccessorPair(bool with_getter,
return pair;
}
+// Check cached migration target map after Map::Update() and Map::TryUpdate()
+static void CheckMigrationTarget(Isolate* isolate, Map old_map, Map new_map) {
+ Map target = TransitionsAccessor(isolate, handle(old_map, isolate))
+ .GetMigrationTarget();
+ if (target.is_null()) return;
+ CHECK_EQ(new_map, target);
+ CHECK_EQ(Map::TryUpdateSlow(isolate, old_map), target);
+}
class Expectations {
static const int MAX_PROPERTIES = 10;
@@ -256,7 +267,7 @@ class Expectations {
}
}
- bool Check(DescriptorArray* descriptors, int descriptor) const {
+ bool Check(DescriptorArray descriptors, int descriptor) const {
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.kind() != kinds_[descriptor]) return false;
@@ -269,17 +280,17 @@ class Expectations {
Representation expected_representation = representations_[descriptor];
if (!details.representation().Equals(expected_representation)) return false;
- Object* expected_value = *values_[descriptor];
+ Object expected_value = *values_[descriptor];
if (details.location() == kField) {
if (details.kind() == kData) {
- FieldType* type = descriptors->GetFieldType(descriptor);
+ FieldType type = descriptors->GetFieldType(descriptor);
return FieldType::cast(expected_value) == type;
} else {
// kAccessor
UNREACHABLE();
}
} else {
- Object* value = descriptors->GetStrongValue(descriptor);
+ Object value = descriptors->GetStrongValue(descriptor);
// kDescriptor
if (details.kind() == kData) {
CHECK(!FLAG_track_constant_fields);
@@ -288,20 +299,20 @@ class Expectations {
// kAccessor
if (value == expected_value) return true;
if (!value->IsAccessorPair()) return false;
- AccessorPair* pair = AccessorPair::cast(value);
+ AccessorPair pair = AccessorPair::cast(value);
return pair->Equals(expected_value, *setter_values_[descriptor]);
}
}
UNREACHABLE();
}
- bool Check(Map* map, int expected_nof) const {
+ bool Check(Map map, int expected_nof) const {
CHECK_EQ(elements_kind_, map->elements_kind());
CHECK(number_of_properties_ <= MAX_PROPERTIES);
CHECK_EQ(expected_nof, map->NumberOfOwnDescriptors());
CHECK(!map->is_dictionary_map());
- DescriptorArray* descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors();
CHECK(expected_nof <= number_of_properties_);
for (int i = 0; i < expected_nof; i++) {
if (!Check(descriptors, i)) {
@@ -316,8 +327,7 @@ class Expectations {
return true;
}
- bool Check(Map* map) const { return Check(map, number_of_properties_); }
-
+ bool Check(Map map) const { return Check(map, number_of_properties_); }
//
// Helper methods for initializing expectations and adding properties to
@@ -398,9 +408,9 @@ class Expectations {
heap_type);
Handle<String> name = MakeName("prop", property_index);
- Map* target = TransitionsAccessor(isolate_, map)
- .SearchTransition(*name, kData, attributes);
- CHECK_NOT_NULL(target);
+ Map target = TransitionsAccessor(isolate_, map)
+ .SearchTransition(*name, kData, attributes);
+ CHECK(!target.is_null());
return handle(target, isolate_);
}
@@ -573,7 +583,7 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
// Check that the property contains |value|.
CHECK_EQ(1, obj->map()->NumberOfOwnDescriptors());
FieldIndex index = FieldIndex::ForDescriptor(obj->map(), 0);
- Object* the_value = obj->RawFastPropertyAt(index);
+ Object the_value = obj->RawFastPropertyAt(index);
CHECK(the_value->IsSmi());
CHECK_EQ(42, Smi::ToInt(the_value));
}
@@ -695,9 +705,9 @@ static void TestGeneralizeField(int detach_property_at_index,
{
// Check that all previous maps are not stable.
- Map* tmp = *new_map;
+ Map tmp = *new_map;
while (true) {
- Object* back = tmp->GetBackPointer();
+ Object back = tmp->GetBackPointer();
if (back->IsUndefined(isolate)) break;
tmp = Map::cast(back);
CHECK(!tmp->is_stable());
@@ -707,6 +717,7 @@ static void TestGeneralizeField(int detach_property_at_index,
// Update all deprecated maps and check that they are now the same.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*new_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
}
static void TestGeneralizeField(const CRFTData& from, const CRFTData& to,
@@ -967,9 +978,11 @@ TEST(GeneralizeFieldWithAccessorProperties) {
// Update all deprecated maps and check that they are now the same.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*active_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
for (int i = 0; i < kPropCount; i++) {
updated_map = Map::Update(isolate, maps[i]);
CHECK_EQ(*active_map, *updated_map);
+ CheckMigrationTarget(isolate, *maps[i], *updated_map);
}
}
@@ -1060,6 +1073,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeField(
// Update deprecated |map|, it should become |new_map|.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*new_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
}
// This test ensures that trivial field generalization (from HeapObject to
@@ -1370,6 +1384,7 @@ struct CheckDeprecated {
// Update deprecated |map|, it should become |new_map|.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*new_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
}
};
@@ -1828,13 +1843,14 @@ static void TestReconfigureElementsKind_GeneralizeField(
// Update deprecated |map|, it should become |new_map|.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*new_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
// Ensure Map::FindElementsKindTransitionedMap() is able to find the
// transitioned map.
{
MapHandles map_list;
map_list.push_back(updated_map);
- Map* transitioned_map =
+ Map transitioned_map =
map2->FindElementsKindTransitionedMap(isolate, map_list);
CHECK_EQ(*updated_map, transitioned_map);
}
@@ -1932,7 +1948,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
{
MapHandles map_list;
map_list.push_back(updated_map);
- Map* transitioned_map =
+ Map transitioned_map =
map2->FindElementsKindTransitionedMap(isolate, map_list);
CHECK_EQ(*updated_map, transitioned_map);
}
@@ -2187,9 +2203,9 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
}
Handle<String> name = MakeName("prop", i);
- Map* target = TransitionsAccessor(isolate, map2)
- .SearchTransition(*name, kData, NONE);
- CHECK_NOT_NULL(target);
+ Map target = TransitionsAccessor(isolate, map2)
+ .SearchTransition(*name, kData, NONE);
+ CHECK(!target.is_null());
map2 = handle(target, isolate);
}
@@ -2339,9 +2355,11 @@ static void TestGeneralizeFieldWithSpecialTransition(TestConfig& config,
// Update all deprecated maps and check that they are now the same.
Handle<Map> updated_map = Map::Update(isolate, map);
CHECK_EQ(*active_map, *updated_map);
+ CheckMigrationTarget(isolate, *map, *updated_map);
for (int i = 0; i < kPropCount; i++) {
updated_map = Map::Update(isolate, maps[i]);
CHECK_EQ(*active_map, *updated_map);
+ CheckMigrationTarget(isolate, *maps[i], *updated_map);
}
}
@@ -2623,6 +2641,7 @@ struct FieldGeneralizationChecker {
CHECK_NE(*map1, *map2);
Handle<Map> updated_map = Map::Update(isolate, map1);
CHECK_EQ(*map2, *updated_map);
+ CheckMigrationTarget(isolate, *map1, *updated_map);
expectations2.SetDataField(descriptor_, attributes_, constness_,
representation_, heap_type_);
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 20db9547e9..e38a61f4b3 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -183,76 +183,12 @@ TEST(Flags6b) {
CHECK_EQ(3, FlagList::SetFlagsFromString(str, StrLength(str)));
}
-
-TEST(FlagsJSArguments1) {
- SetFlagsToDefault();
- int argc = 6;
- const char* argv[] = {"TestJSArgs1",
- "--testing-int-flag", "42",
- "--", "testing-float-flag", "7"};
- CHECK_EQ(0, FlagList::SetFlagsFromCommandLine(&argc,
- const_cast<char **>(argv),
- true));
- CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc);
- CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
- CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
- CHECK_EQ(1, argc);
-}
-
-
-TEST(FlagsJSArguments1b) {
- SetFlagsToDefault();
- const char* str = "--testing-int-flag 42 -- testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
- CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc);
- CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
- CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
-}
-
-
-TEST(FlagsJSArguments2) {
- SetFlagsToDefault();
- const char* str = "--testing-int-flag 42 --js-arguments testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
- CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc);
- CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
- CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
-}
-
-
-TEST(FlagsJSArguments3) {
- SetFlagsToDefault();
- const char* str = "--testing-int-flag 42 --js-arguments=testing-float-flag 7";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
- CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc);
- CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
- CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
-}
-
-
-TEST(FlagsJSArguments4) {
- SetFlagsToDefault();
- const char* str = "--testing-int-flag 42 --";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
- CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(0, FLAG_js_arguments.argc);
-}
-
-
TEST(FlagsRemoveIncomplete) {
// Test that processed command line arguments are removed, even
// if the list of arguments ends unexpectedly.
SetFlagsToDefault();
int argc = 3;
- const char* argv[] = {"", "--opt", "--expose-natives-as"};
+ const char* argv[] = {"", "--testing-bool-flag", "--expose-gc-as"};
CHECK_EQ(2, FlagList::SetFlagsFromCommandLine(&argc,
const_cast<char **>(argv),
true));
@@ -260,5 +196,18 @@ TEST(FlagsRemoveIncomplete) {
CHECK_EQ(2, argc);
}
+TEST(FlagsJitlessImplications) {
+ if (FLAG_jitless) {
+ // Double-check implications work as expected. Our implication system is
+ // fairly primitive and can break easily depending on the implication
+ // definition order in flag-definitions.h.
+ CHECK(!FLAG_opt);
+ CHECK(!FLAG_validate_asm);
+ CHECK(FLAG_wasm_interpret_all);
+ CHECK(!FLAG_asm_wasm_lazy_compilation);
+ CHECK(!FLAG_wasm_lazy_compilation);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 61f3ef0eeb..23ac83a953 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -71,7 +71,7 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
{
i::DisallowHeapAllocation no_gc;
Vector<const uint8_t> func_pos_str = i::OneByteVector(func_pos_src);
- i::String::FlatContent script_content = script_src->GetFlatContent();
+ i::String::FlatContent script_content = script_src->GetFlatContent(no_gc);
func_pos = SearchString(isolate, script_content.ToOneByteVector(),
func_pos_str, 0);
}
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
index 4345bb5f44..92f917a703 100644
--- a/deps/v8/test/cctest/test-fuzz-arm64.cc
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -29,6 +29,12 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
+#if defined(V8_OS_WIN)
+#define RANDGEN() rand()
+#else
+#define RANDGEN() mrand48()
+#endif
+
namespace v8 {
namespace internal {
@@ -37,14 +43,18 @@ TEST(FUZZ_decoder) {
// 43 million = ~1% of the instruction space.
static const int instruction_count = 43 * 1024 * 1024;
+#if defined(V8_OS_WIN)
+ srand(1);
+#else
uint16_t seed[3] = {1, 2, 3};
seed48(seed);
+#endif
Decoder<DispatchingDecoderVisitor> decoder;
Instruction buffer[kInstrSize];
for (int i = 0; i < instruction_count; i++) {
- uint32_t instr = static_cast<uint32_t>(mrand48());
+ uint32_t instr = static_cast<uint32_t>(RANDGEN());
buffer->SetInstructionBits(instr);
decoder.Decode(buffer);
}
@@ -56,8 +66,12 @@ TEST(FUZZ_disasm) {
// 9 million = ~0.2% of the instruction space.
static const int instruction_count = 9 * 1024 * 1024;
+#if defined(V8_OS_WIN)
+ srand(42);
+#else
uint16_t seed[3] = {42, 43, 44};
seed48(seed);
+#endif
Decoder<DispatchingDecoderVisitor> decoder;
DisassemblingDecoder disasm;
@@ -65,7 +79,7 @@ TEST(FUZZ_disasm) {
decoder.AppendVisitor(&disasm);
for (int i = 0; i < instruction_count; i++) {
- uint32_t instr = static_cast<uint32_t>(mrand48());
+ uint32_t instr = static_cast<uint32_t>(RANDGEN());
buffer->SetInstructionBits(instr);
decoder.Decode(buffer);
}
@@ -73,3 +87,5 @@ TEST(FUZZ_disasm) {
} // namespace internal
} // namespace v8
+
+#undef RANDGEN
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index ea76faa857..0db56e382a 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -37,6 +37,10 @@ namespace internal {
namespace {
+void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
+
+void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
+
void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(v8_num(0));
}
@@ -141,7 +145,7 @@ TEST(EternalHandles) {
int indices[kArrayLength];
v8::Eternal<v8::Value> eternals[kArrayLength];
- CHECK_EQ(0, eternal_handles->NumberOfHandles());
+ CHECK_EQ(0, eternal_handles->handles_count());
for (int i = 0; i < kArrayLength; i++) {
indices[i] = -1;
HandleScope scope(isolate);
@@ -180,7 +184,7 @@ TEST(EternalHandles) {
}
}
- CHECK_EQ(2*kArrayLength, eternal_handles->NumberOfHandles());
+ CHECK_EQ(2 * kArrayLength, eternal_handles->handles_count());
// Create an eternal via the constructor
{
@@ -191,7 +195,7 @@ TEST(EternalHandles) {
CHECK(object == eternal.Get(v8_isolate));
}
- CHECK_EQ(2*kArrayLength + 1, eternal_handles->NumberOfHandles());
+ CHECK_EQ(2 * kArrayLength + 1, eternal_handles->handles_count());
}
@@ -272,14 +276,14 @@ TEST(WeakHandleToUnmodifiedJSObjectSurvivesScavenge) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
- []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
+ []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
- []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kDies);
+ []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
}
TEST(WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
@@ -291,14 +295,14 @@ TEST(WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
},
- []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+ []() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
- []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kDies);
+ []() { InvokeScavenge(); }, SurvivalMode::kDies);
}
TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
@@ -310,14 +314,14 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
},
- []() { CcTest::CollectGarbage(i::NEW_SPACE); }, SurvivalMode::kSurvives);
+ []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
WeakHandleTest(
CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
- []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kDies);
+ []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
}
TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
@@ -329,23 +333,23 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
},
- []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+ []() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
}
TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesScavenge) {
CcTest::InitializeVM();
- WeakHandleTest(CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
- []() { CcTest::CollectGarbage(i::NEW_SPACE); },
- SurvivalMode::kSurvives);
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
}
TEST(WeakHandleToActiveUnmodifiedJSApiObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
- WeakHandleTest(CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
- []() { CcTest::CollectGarbage(i::OLD_SPACE); },
- SurvivalMode::kDies);
+ WeakHandleTest(
+ CcTest::isolate(), &ConstructJSApiObject,
+ [](FlagAndPersistent* fp) { fp->handle.MarkActive(); },
+ []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
}
TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
@@ -358,7 +362,29 @@ TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
},
- []() { CcTest::CollectGarbage(i::OLD_SPACE); }, SurvivalMode::kSurvives);
+ []() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
+}
+
+TEST(FinalizerOnUnmodifiedJSApiObjectDoesNotCrash) {
+ // See crbug.com/v8/8586.
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ FlagAndPersistent fp;
+ // Could use a regular object and MarkIndependent too.
+ ConstructJSApiObject(isolate, context, &fp);
+ fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
+ v8::WeakCallbackType::kFinalizer);
+ fp.flag = false;
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> tmp = v8::Local<v8::Object>::New(isolate, fp.handle);
+ USE(tmp);
+ InvokeScavenge();
+ }
}
namespace {
@@ -391,12 +417,12 @@ TEST(FinalizerResurrectsAndKeepsPhantomAliveOnMarkCompact) {
v8::Global<v8::Object> g1, g2;
ConstructFinalizerPointingPhantomHandle(CcTest::isolate(), &g1, &g2,
ResurrectingFinalizer);
- CcTest::CollectGarbage(i::OLD_SPACE);
+ InvokeMarkSweep();
// Both, g1 and g2, should stay alive as the finalizer resurrects the root
// object that transitively keeps the other one alive.
CHECK(!g1.IsEmpty());
CHECK(!g2.IsEmpty());
- CcTest::CollectGarbage(i::OLD_SPACE);
+ InvokeMarkSweep();
// The finalizer handle is now strong, so it should keep the objects alive.
CHECK(!g1.IsEmpty());
CHECK(!g2.IsEmpty());
@@ -407,12 +433,12 @@ TEST(FinalizerDiesAndKeepsPhantomAliveOnMarkCompact) {
v8::Global<v8::Object> g1, g2;
ConstructFinalizerPointingPhantomHandle(CcTest::isolate(), &g1, &g2,
ResettingFinalizer);
- CcTest::CollectGarbage(i::OLD_SPACE);
+ InvokeMarkSweep();
// Finalizer (g1) dies but the phantom handle (g2) is kept alive for one
// more round as the underlying object only dies on the next GC.
CHECK(g1.IsEmpty());
CHECK(!g2.IsEmpty());
- CcTest::CollectGarbage(i::OLD_SPACE);
+ InvokeMarkSweep();
// Phantom handle dies after one more round.
CHECK(g1.IsEmpty());
CHECK(g2.IsEmpty());
@@ -420,10 +446,6 @@ TEST(FinalizerDiesAndKeepsPhantomAliveOnMarkCompact) {
namespace {
-void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
-
-void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
-
void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
data.GetParameter()->flag = true;
InvokeScavenge();
@@ -505,8 +527,8 @@ TEST(SecondPassPhantomCallbacks) {
fp.flag = false;
fp.handle.SetWeak(&fp, FirstPassCallback, v8::WeakCallbackType::kParameter);
CHECK(!fp.flag);
- CcTest::CollectGarbage(i::OLD_SPACE);
- CcTest::CollectGarbage(i::OLD_SPACE);
+ InvokeMarkSweep();
+ InvokeMarkSweep();
CHECK(fp.flag);
}
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 1d72cfaf1c..4d93fe9bd5 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "src/base/overflowing-math.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -133,7 +134,7 @@ void TestSet(IntKeyHash hash, int size) {
for (uint32_t i = 0; i < n; i++) {
CHECK_EQ(i, static_cast<double>(set.occupancy()));
set.Insert(x);
- x = x * factor + offset;
+ x = base::AddWithWraparound(base::MulWithWraparound(x, factor), offset);
}
CHECK_EQ(n, static_cast<double>(set.occupancy()));
@@ -141,7 +142,7 @@ void TestSet(IntKeyHash hash, int size) {
x = start;
for (uint32_t i = 0; i < n; i++) {
CHECK(set.Present(x));
- x = x * factor + offset;
+ x = base::AddWithWraparound(base::MulWithWraparound(x, factor), offset);
}
CHECK_EQ(n, static_cast<double>(set.occupancy()));
@@ -152,7 +153,7 @@ void TestSet(IntKeyHash hash, int size) {
CHECK(set.Present(x));
set.Remove(x);
CHECK(!set.Present(x));
- x = x * factor + offset;
+ x = base::AddWithWraparound(base::MulWithWraparound(x, factor), offset);
// Verify the the expected values are still there.
int y = start;
@@ -162,7 +163,7 @@ void TestSet(IntKeyHash hash, int size) {
} else {
CHECK(set.Present(y));
}
- y = y * factor + offset;
+ y = base::AddWithWraparound(base::MulWithWraparound(y, factor), offset);
}
}
CHECK_EQ(0u, set.occupancy());
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index f3c545fd83..84ecc78ff3 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -37,6 +37,7 @@
#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/base/hashmap.h"
+#include "src/base/optional.h"
#include "src/collector.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
@@ -1194,9 +1195,7 @@ class TestStatsStream : public v8::OutputStream {
entries_size_(0),
intervals_count_(0),
first_interval_index_(-1) { }
- TestStatsStream(const TestStatsStream& stream)
-
- = default;
+ TestStatsStream(const TestStatsStream& stream) V8_NOEXCEPT = default;
~TestStatsStream() override = default;
void EndOfStream() override { ++eos_signaled_; }
WriteResult WriteAsciiChunk(char* buffer, int chars_written) override {
@@ -1898,8 +1897,7 @@ TEST(GetHeapValueForDeletedObject) {
CHECK(heap_profiler->FindObjectById(prop->GetId()).IsEmpty());
}
-
-static int StringCmp(const char* ref, i::String* act) {
+static int StringCmp(const char* ref, i::String act) {
std::unique_ptr<char[]> s_act = act->ToCString();
int result = strcmp(ref, s_act.get());
if (result != 0)
@@ -1932,37 +1930,37 @@ TEST(GetConstructor) {
.As<v8::Object>();
i::Handle<i::JSObject> js_obj1 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj1));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj1));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj1).is_null());
v8::Local<v8::Object> obj2 = js_global->Get(env.local(), v8_str("obj2"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj2 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj2));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj2));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj2).is_null());
v8::Local<v8::Object> obj3 = js_global->Get(env.local(), v8_str("obj3"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj3 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj3));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj3));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj3).is_null());
v8::Local<v8::Object> obj4 = js_global->Get(env.local(), v8_str("obj4"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj4 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj4));
- CHECK(i::V8HeapExplorer::GetConstructor(*js_obj4));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj4).is_null());
v8::Local<v8::Object> obj5 = js_global->Get(env.local(), v8_str("obj5"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj5 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj5));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj5));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj5).is_null());
v8::Local<v8::Object> obj6 = js_global->Get(env.local(), v8_str("obj6"))
.ToLocalChecked()
.As<v8::Object>();
i::Handle<i::JSObject> js_obj6 =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj6));
- CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj6));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj6).is_null());
}
TEST(GetConstructorName) {
@@ -2206,12 +2204,12 @@ TEST(AccessorInfo) {
env->GetIsolate(), map, v8::HeapGraphEdge::kInternal, "descriptors");
CHECK(descriptors);
const v8::HeapGraphNode* length_name = GetProperty(
- env->GetIsolate(), descriptors, v8::HeapGraphEdge::kInternal, "2");
+ env->GetIsolate(), descriptors, v8::HeapGraphEdge::kInternal, "0");
CHECK(length_name);
CHECK_EQ(0, strcmp("length", *v8::String::Utf8Value(env->GetIsolate(),
length_name->GetName())));
const v8::HeapGraphNode* length_accessor = GetProperty(
- env->GetIsolate(), descriptors, v8::HeapGraphEdge::kInternal, "4");
+ env->GetIsolate(), descriptors, v8::HeapGraphEdge::kInternal, "2");
CHECK(length_accessor);
CHECK_EQ(0, strcmp("system / AccessorInfo",
*v8::String::Utf8Value(env->GetIsolate(),
@@ -2397,6 +2395,11 @@ TEST(MapHasDescriptorsAndTransitions) {
TEST(ManyLocalsInSharedContext) {
+ // This test gets very slow with slow asserts (18 minutes instead of 1:30,
+ // as of November 2018).
+#ifdef ENABLE_SLOW_DCHECKS
+ i::FLAG_enable_slow_asserts = false;
+#endif
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -2443,6 +2446,8 @@ TEST(ManyLocalsInSharedContext) {
TEST(AllocationSitesAreVisible) {
+ if (i::FLAG_lite_mode) return;
+
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -2558,16 +2563,10 @@ TEST(CheckCodeNames) {
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
- const char* stub_path[] = {"::(GC roots)", "::(Strong roots)",
- "code_stubs::", "::(StoreFastElementStub code)"};
- const v8::HeapGraphNode* node = GetNodeByPath(
- env->GetIsolate(), snapshot, stub_path, arraysize(stub_path));
- CHECK(node);
-
const char* builtin_path1[] = {"::(GC roots)", "::(Builtins)",
"::(KeyedLoadIC_Slow builtin)"};
- node = GetNodeByPath(env->GetIsolate(), snapshot, builtin_path1,
- arraysize(builtin_path1));
+ const v8::HeapGraphNode* node = GetNodeByPath(
+ env->GetIsolate(), snapshot, builtin_path1, arraysize(builtin_path1));
CHECK(node);
const char* builtin_path2[] = {"::(GC roots)", "::(Builtins)",
@@ -2692,7 +2691,9 @@ TEST(TrackHeapAllocationsWithInlining) {
const char* names[] = {"", "start", "f_0_0"};
AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
- CHECK_GE(node->allocation_count(), 8u);
+ // In lite mode, there is feedback and feedback metadata.
+ unsigned int num_nodes = (i::FLAG_lite_mode) ? 6 : 8;
+ CHECK_GE(node->allocation_count(), num_nodes);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
heap_profiler->StopTrackingHeapObjects();
}
@@ -3656,6 +3657,51 @@ TEST(SamplingHeapProfilerApiAllocation) {
heap_profiler->StopSamplingHeapProfiler();
}
+TEST(SamplingHeapProfilerApiSamples) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ // Suppress randomness to avoid flakiness in tests.
+ v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
+
+ heap_profiler->StartSamplingHeapProfiler(1024);
+
+ size_t count = 8 * 1024;
+ for (size_t i = 0; i < count; ++i) v8::Object::New(env->GetIsolate());
+
+ std::unique_ptr<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(profile);
+
+ std::vector<v8::AllocationProfile::Node*> nodes_to_visit;
+ std::unordered_set<uint32_t> node_ids;
+ nodes_to_visit.push_back(profile->GetRootNode());
+ while (!nodes_to_visit.empty()) {
+ v8::AllocationProfile::Node* node = nodes_to_visit.back();
+ nodes_to_visit.pop_back();
+ CHECK_LT(0, node->node_id);
+ CHECK_EQ(0, node_ids.count(node->node_id));
+ node_ids.insert(node->node_id);
+ nodes_to_visit.insert(nodes_to_visit.end(), node->children.begin(),
+ node->children.end());
+ }
+
+ size_t total_size = 0;
+ std::unordered_set<uint64_t> samples_set;
+ for (auto& sample : profile->GetSamples()) {
+ total_size += sample.size * sample.count;
+ CHECK_EQ(0, samples_set.count(sample.sample_id));
+ CHECK_EQ(1, node_ids.count(sample.node_id));
+ CHECK_GT(sample.node_id, 0);
+ CHECK_GT(sample.sample_id, 0);
+ samples_set.insert(sample.sample_id);
+ }
+ size_t object_size = total_size / count;
+ CHECK_GE(object_size, sizeof(void*) * 2);
+ heap_profiler->StopSamplingHeapProfiler();
+}
+
TEST(SamplingHeapProfilerLeftTrimming) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
@@ -3802,7 +3848,7 @@ TEST(SamplingHeapProfilerSampleDuringDeopt) {
v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
// Small sample interval to force each object to be sampled.
- heap_profiler->StartSamplingHeapProfiler(i::kPointerSize);
+ heap_profiler->StartSamplingHeapProfiler(i::kTaggedSize);
// Lazy deopt from runtime call from inlined callback function.
const char* source =
@@ -3861,7 +3907,7 @@ TEST(WeakReference) {
i::Handle<i::FeedbackVector> fv = factory->NewFeedbackVector(shared_function);
// Create a Code.
- i::Assembler assm(i::AssemblerOptions{}, nullptr, 0);
+ i::Assembler assm(i::AssemblerOptions{});
assm.nop(); // supported on all architectures
i::CodeDesc desc;
assm.GetCode(i_isolate, &desc);
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
new file mode 100644
index 0000000000..c1dde75a93
--- /dev/null
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -0,0 +1,192 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler-inl.h"
+#include "src/handles-inl.h"
+#include "src/macro-assembler-inl.h"
+#include "src/simulator.h"
+#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace test_icache {
+
+using F0 = int(int);
+
+#define __ masm.
+
+static constexpr int kNumInstr = 100;
+static constexpr int kNumIterations = 5;
+static constexpr int kBufferSize = 8 * KB;
+
+static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
+ MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
+#if V8_TARGET_ARCH_IA32
+ __ mov(eax, Operand(esp, kPointerSize));
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ add(eax, Immediate(1));
+ }
+#elif V8_TARGET_ARCH_X64
+ __ movl(rax, arg_reg_1);
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ addl(rax, Immediate(1));
+ }
+#elif V8_TARGET_ARCH_ARM64
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ Add(x0, x0, Operand(1));
+ }
+#elif V8_TARGET_ARCH_ARM
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ add(r0, r0, Operand(1));
+ }
+#elif V8_TARGET_ARCH_MIPS
+ __ mov(v0, a0);
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ Addu(v0, v0, Operand(1));
+ }
+#elif V8_TARGET_ARCH_MIPS64
+ __ mov(v0, a0);
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ Addu(v0, v0, Operand(1));
+ }
+#elif V8_TARGET_ARCH_PPC
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ addi(r3, r3, Operand(1));
+ }
+#elif V8_TARGET_ARCH_S390
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ agfi(r2, Operand(1));
+ }
+#else
+#error Unsupported architecture
+#endif
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+}
+
+static void FloodWithNop(Isolate* isolate, TestingAssemblerBuffer* buffer) {
+ MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
+#if V8_TARGET_ARCH_IA32
+ __ mov(eax, Operand(esp, kPointerSize));
+#elif V8_TARGET_ARCH_X64
+ __ movl(rax, arg_reg_1);
+#elif V8_TARGET_ARCH_MIPS
+ __ mov(v0, a0);
+#elif V8_TARGET_ARCH_MIPS64
+ __ mov(v0, a0);
+#endif
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ nop();
+ }
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+}
+
+// Order of operation for this test case:
+// exec -> perm(RW) -> patch -> flush -> perm(RX) -> exec
+TEST(TestFlushICacheOfWritable) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ for (int i = 0; i < kNumIterations; ++i) {
+ auto buffer = AllocateAssemblerBuffer(kBufferSize);
+
+ // Allow calling the function from C++.
+ auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
+
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadWrite));
+ FloodWithInc(isolate, buffer.get());
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadExecute));
+ CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadWrite));
+ FloodWithNop(isolate, buffer.get());
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadExecute));
+ CHECK_EQ(23, f.Call(23)); // Call into generated code.
+ }
+}
+
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+// Note that this order of operations is not supported on ARM32/64 because on
+// some older ARM32/64 kernels there is a bug which causes an access error on
+// cache flush instructions to trigger access error on non-writable memory.
+// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+//
+// Also note that this requires {kBufferSize == 8 * KB} to reproduce.
+//
+// The order of operations in V8 is akin to {TestFlushICacheOfWritable} above.
+// It is hence OK to disable the below test on some architectures. Only the
+// above test case should remain enabled on all architectures.
+#define CONDITIONAL_TEST DISABLED_TEST
+#else
+#define CONDITIONAL_TEST TEST
+#endif
+
+// Order of operation for this test case:
+// exec -> perm(RW) -> patch -> perm(RX) -> flush -> exec
+CONDITIONAL_TEST(TestFlushICacheOfExecutable) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ for (int i = 0; i < kNumIterations; ++i) {
+ auto buffer = AllocateAssemblerBuffer(kBufferSize);
+
+ // Allow calling the function from C++.
+ auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
+
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadWrite));
+ FloodWithInc(isolate, buffer.get());
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadExecute));
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadWrite));
+ FloodWithNop(isolate, buffer.get());
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadExecute));
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK_EQ(23, f.Call(23)); // Call into generated code.
+ }
+}
+
+#undef CONDITIONAL_TEST
+
+// Order of operation for this test case:
+// perm(RWX) -> exec -> patch -> flush -> exec
+TEST(TestFlushICacheOfWritableAndExecutable) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ for (int i = 0; i < kNumIterations; ++i) {
+ auto buffer = AllocateAssemblerBuffer(kBufferSize);
+
+ // Allow calling the function from C++.
+ auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
+
+ CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
+ buffer->size(), v8::PageAllocator::kReadWriteExecute));
+ FloodWithInc(isolate, buffer.get());
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
+ FloodWithNop(isolate, buffer.get());
+ Assembler::FlushICache(buffer->start(), buffer->size());
+ CHECK_EQ(23, f.Call(23)); // Call into generated code.
+ }
+}
+
+#undef __
+
+} // namespace test_icache
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index 8d388e5033..aa5eb3e5c7 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -8,6 +8,7 @@
#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/heap-number-inl.h"
#include "src/zone/zone.h"
#include "test/cctest/cctest.h"
@@ -142,8 +143,9 @@ class IdentityMapTester : public HandleAndZoneScope {
void SimulateGCByIncrementingSmisBy(int shift) {
for (int i = 0; i < map.capacity_; i++) {
- if (map.keys_[i]->IsSmi()) {
- map.keys_[i] = Smi::FromInt(Smi::ToInt(map.keys_[i]) + shift);
+ Address key = map.keys_[i];
+ if (!Internals::HasHeapObjectTag(key)) {
+ map.keys_[i] = Internals::IntToSmi(Internals::SmiValue(key) + shift);
}
}
map.gc_counter_ = -1;
@@ -705,7 +707,7 @@ TEST(CanonicalHandleScope) {
for (int i = 0; i < 100; i++) {
smi_handles.push_back(Handle<Object>(Smi::FromInt(i), isolate));
}
- Object** next_handle = isolate->handle_scope_data()->next;
+ Address* next_handle = isolate->handle_scope_data()->next;
for (int i = 0; i < 100; i++) {
Handle<Object> new_smi = Handle<Object>(Smi::FromInt(i), isolate);
Handle<Object> old_smi = smi_handles[i];
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 9255dc04b0..61984afe23 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -8,7 +8,7 @@
#include "src/api-inl.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/heap-number-inl.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -49,10 +49,11 @@ Handle<T> GetLexical(const char* name) {
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(isolate, script_contexts, str_name,
&lookup_result)) {
- Handle<Object> result = FixedArray::get(
- *ScriptContextTable::GetContext(isolate, script_contexts,
- lookup_result.context_index),
- lookup_result.slot_index, isolate);
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ isolate, script_contexts, lookup_result.context_index);
+
+ Handle<Object> result(script_context->get(lookup_result.slot_index),
+ isolate);
return Handle<T>::cast(result);
}
return Handle<T>();
@@ -74,31 +75,27 @@ static inline Handle<T> CompileRunI(const char* script) {
return OpenHandle<T>(CompileRun(script));
}
-
-static Object* GetFieldValue(JSObject* obj, int property_index) {
+static Object GetFieldValue(JSObject obj, int property_index) {
FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
return obj->RawFastPropertyAt(index);
}
-
-static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
+static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
if (obj->IsUnboxedDoubleField(field_index)) {
return obj->RawFastDoublePropertyAt(field_index);
} else {
- Object* value = obj->RawFastPropertyAt(field_index);
+ Object value = obj->RawFastPropertyAt(field_index);
CHECK(value->IsMutableHeapNumber());
return MutableHeapNumber::cast(value)->value();
}
}
-
-static double GetDoubleFieldValue(JSObject* obj, int property_index) {
+static double GetDoubleFieldValue(JSObject obj, int property_index) {
FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
return GetDoubleFieldValue(obj, index);
}
-
-bool IsObjectShrinkable(JSObject* obj) {
+bool IsObjectShrinkable(JSObject obj) {
Handle<Map> filler_map =
CcTest::i_isolate()->factory()->one_pointer_filler_map();
@@ -114,7 +111,6 @@ bool IsObjectShrinkable(JSObject* obj) {
return true;
}
-
TEST(JSObjectBasic) {
// Avoid eventual completion of in-object slack tracking.
FLAG_always_opt = false;
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index 3359a3878b..0670340227 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -7,7 +7,14 @@
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-break-iterator.h"
+#include "src/objects/js-collator.h"
+#include "src/objects/js-date-time-format.h"
+#include "src/objects/js-list-format.h"
#include "src/objects/js-number-format.h"
+#include "src/objects/js-plural-rules.h"
+#include "src/objects/js-relative-time-format.h"
+#include "src/objects/js-segmenter.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -211,65 +218,31 @@ TEST(GetBoolOption) {
TEST(GetAvailableLocales) {
std::set<std::string> locales;
- locales = Intl::GetAvailableLocales(ICUService::kBreakIterator);
+ locales = JSV8BreakIterator::GetAvailableLocales();
CHECK(locales.count("en-US"));
CHECK(!locales.count("abcdefg"));
- locales = Intl::GetAvailableLocales(ICUService::kCollator);
+ locales = JSCollator::GetAvailableLocales();
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(ICUService::kDateFormat);
+ locales = JSDateTimeFormat::GetAvailableLocales();
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(ICUService::kNumberFormat);
+ locales = JSListFormat::GetAvailableLocales();
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(ICUService::kPluralRules);
+ locales = JSNumberFormat::GetAvailableLocales();
CHECK(locales.count("en-US"));
- locales = Intl::GetAvailableLocales(ICUService::kRelativeDateTimeFormatter);
+ locales = JSPluralRules::GetAvailableLocales();
CHECK(locales.count("en-US"));
-}
-
-TEST(IsObjectOfType) {
- LocalContext env;
- Isolate* isolate = CcTest::i_isolate();
- v8::Isolate* v8_isolate = env->GetIsolate();
- v8::HandleScope handle_scope(v8_isolate);
-
- Handle<JSObject> obj = isolate->factory()->NewJSObjectWithNullProto();
- Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
-
- STATIC_ASSERT(Intl::Type::kNumberFormat == 0);
- Intl::Type types[] = {Intl::Type::kNumberFormat, Intl::Type::kCollator,
- Intl::Type::kDateTimeFormat, Intl::Type::kPluralRules,
- Intl::Type::kBreakIterator, Intl::Type::kLocale};
- for (auto type : types) {
- Handle<Smi> tag =
- Handle<Smi>(Smi::FromInt(static_cast<int>(type)), isolate);
- JSObject::SetProperty(isolate, obj, marker, tag, LanguageMode::kStrict)
- .Assert();
-
- CHECK(Intl::IsObjectOfType(isolate, obj, type));
- }
-
- Handle<Object> tag = isolate->factory()->NewStringFromAsciiChecked("foo");
- JSObject::SetProperty(isolate, obj, marker, tag, LanguageMode::kStrict)
- .Assert();
- CHECK(!Intl::IsObjectOfType(isolate, obj, types[0]));
-
- CHECK(!Intl::IsObjectOfType(isolate, tag, types[0]));
- CHECK(!Intl::IsObjectOfType(isolate, Handle<Smi>(Smi::FromInt(0), isolate),
- types[0]));
+ locales = JSRelativeTimeFormat::GetAvailableLocales();
+ CHECK(locales.count("en-US"));
- // Proxy with target as an initialized object should fail.
- tag = Handle<Smi>(Smi::FromInt(static_cast<int>(types[0])), isolate);
- JSObject::SetProperty(isolate, obj, marker, tag, LanguageMode::kStrict)
- .Assert();
- Handle<JSReceiver> proxy = isolate->factory()->NewJSProxy(
- obj, isolate->factory()->NewJSObjectWithNullProto());
- CHECK(!Intl::IsObjectOfType(isolate, proxy, types[0]));
+ locales = JSSegmenter::GetAvailableLocales();
+ CHECK(locales.count("en-US"));
+ CHECK(!locales.count("abcdefg"));
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-isolate-independent-builtins.cc b/deps/v8/test/cctest/test-isolate-independent-builtins.cc
deleted file mode 100644
index 4b4babdb37..0000000000
--- a/deps/v8/test/cctest/test-isolate-independent-builtins.cc
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/cctest/cctest.h"
-
-#include "src/assembler-inl.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
-#include "src/macro-assembler-inl.h"
-#include "src/simulator.h"
-#include "src/snapshot/macros.h"
-#include "src/snapshot/snapshot.h"
-#include "test/common/assembler-tester.h"
-
-// To generate the binary files for the test function, enable this section and
-// run GenerateTestFunctionData once on each arch.
-#define GENERATE_TEST_FUNCTION_DATA false
-
-namespace v8 {
-namespace internal {
-namespace test_isolate_independent_builtins {
-
-// V8_CC_MSVC is true for both MSVC and clang on windows. clang can handle
-// __asm__-style inline assembly but MSVC cannot, and thus we need a more
-// precise compiler detection that can distinguish between the two. clang on
-// windows sets both __clang__ and _MSC_VER, MSVC sets only _MSC_VER.
-#if defined(_MSC_VER) && !defined(__clang__)
-#define V8_COMPILER_IS_MSVC
-#endif
-
-#ifndef V8_COMPILER_IS_MSVC
-#if GENERATE_TEST_FUNCTION_DATA
-
-// Arch-specific defines.
-#if V8_TARGET_ARCH_IA32
-#define TEST_FUNCTION_FILE "f-ia32.bin"
-#elif V8_TARGET_ARCH_X64 && _WIN64
-#define TEST_FUNCTION_FILE "f-x64-win.bin"
-#elif V8_TARGET_ARCH_X64
-#define TEST_FUNCTION_FILE "f-x64.bin"
-#elif V8_TARGET_ARCH_ARM64
-#define TEST_FUNCTION_FILE "f-arm64.bin"
-#elif V8_TARGET_ARCH_ARM
-#define TEST_FUNCTION_FILE "f-arm.bin"
-#elif V8_TARGET_ARCH_PPC
-#define TEST_FUNCTION_FILE "f-ppc.bin"
-#elif V8_TARGET_ARCH_MIPS
-#define TEST_FUNCTION_FILE "f-mips.bin"
-#elif V8_TARGET_ARCH_MIPS64
-#define TEST_FUNCTION_FILE "f-mips64.bin"
-#elif V8_TARGET_ARCH_S390
-#define TEST_FUNCTION_FILE "f-s390.bin"
-#else
-#error "Unknown architecture."
-#endif
-
-#define __ masm.
-
-TEST(GenerateTestFunctionData) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
-
-#if V8_TARGET_ARCH_IA32
- v8::internal::byte buffer[256];
- Assembler masm(isolate, buffer, sizeof(buffer));
-
- __ mov(eax, Operand(esp, 4));
- __ add(eax, Operand(esp, 8));
- __ ret(0);
-#elif V8_TARGET_ARCH_X64
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- Assembler masm(isolate, buffer, static_cast<int>(allocated));
-
-#ifdef _WIN64
- static const Register arg1 = rcx;
- static const Register arg2 = rdx;
-#else
- static const Register arg1 = rdi;
- static const Register arg2 = rsi;
-#endif
-
- __ movq(rax, arg2);
- __ addq(rax, arg1);
- __ ret(0);
-#elif V8_TARGET_ARCH_ARM64
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
-
- __ Add(x0, x0, x1);
- __ Ret();
-#elif V8_TARGET_ARCH_ARM
- Assembler masm(isolate, nullptr, 0);
-
- __ add(r0, r0, Operand(r1));
- __ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_PPC
- Assembler masm(isolate, nullptr, 0);
-
- __ function_descriptor();
- __ add(r3, r3, r4);
- __ blr();
-#elif V8_TARGET_ARCH_MIPS
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
-
- __ addu(v0, a0, a1);
- __ jr(ra);
- __ nop();
-#elif V8_TARGET_ARCH_MIPS64
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
-
- __ addu(v0, a0, a1);
- __ jr(ra);
- __ nop();
-#elif V8_TARGET_ARCH_S390
- Assembler masm(isolate, nullptr, 0);
-
- __ agr(r2, r3);
- __ b(r14);
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
-
- std::ofstream of(TEST_FUNCTION_FILE, std::ios::out | std::ios::binary);
- of.write(reinterpret_cast<char*>(desc.buffer), desc.instr_size);
-}
-#undef __
-#endif // GENERATE_TEST_FUNCTION_DATA
-
-#if V8_TARGET_ARCH_IA32
-#define FUNCTION_BYTES \
- ".byte 0x8b, 0x44, 0x24, 0x04, 0x03, 0x44, 0x24, 0x08, 0xc3\n"
-#elif V8_TARGET_ARCH_X64 && _WIN64
-#define FUNCTION_BYTES ".byte 0x48, 0x8b, 0xc2, 0x48, 0x03, 0xc1, 0xc3\n"
-#elif V8_TARGET_ARCH_X64
-#define FUNCTION_BYTES ".byte 0x48, 0x8b, 0xc6, 0x48, 0x03, 0xc7, 0xc3\n"
-#elif V8_TARGET_ARCH_ARM64
-#define FUNCTION_BYTES ".byte 0x00, 0x00, 0x01, 0x8b, 0xc0, 0x03, 0x5f, 0xd6\n"
-#elif V8_TARGET_ARCH_ARM
-#define FUNCTION_BYTES ".byte 0x01, 0x00, 0x80, 0xe0, 0x0e, 0xf0, 0xa0, 0xe1\n"
-#elif V8_TARGET_ARCH_PPC
-#if defined(V8_OS_AIX)
-#define FUNCTION_BYTES ".byte 0x7c, 0x64, 0x1a, 0x14, 0x4e, 0x80, 0x00, 0x20\n"
-#else
-#define FUNCTION_BYTES ".byte 0x14, 0x22, 0x63, 0x7c, 0x20, 0x00, 0x80, 0x4e\n"
-#endif
-#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
-#if defined(V8_TARGET_BIG_ENDIAN)
-#define FUNCTION_BYTES \
- ".byte 0x00, 0x85, 0x10, 0x21, 0x03, 0xe0, 0x00, " \
- "0x08, 0x00, 0x00, 0x00, 0x00\n"
-#else
-#define FUNCTION_BYTES \
- ".byte 0x21, 0x10, 0x85, 0x00, 0x08, 0x00, 0xe0, " \
- "0x03, 0x00, 0x00, 0x00, 0x00\n"
-#endif
-#elif V8_TARGET_ARCH_S390
-#define FUNCTION_BYTES \
- ".byte 0xb9, 0x08, 0x00, 0x23, 0x07, 0xfe\n"
-#else
-#error "Unknown architecture."
-#endif
-
-V8_EMBEDDED_RODATA_HEADER(test_string0_bytes)
-__asm__(".byte 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37\n"
- ".byte 0x38, 0x39, 0x0a, 0x00\n");
-extern "C" V8_ALIGNED(16) const char test_string0_bytes[];
-
-V8_EMBEDDED_TEXT_HEADER(test_function0_bytes)
-__asm__(FUNCTION_BYTES);
-extern "C" V8_ALIGNED(16) const char test_function0_bytes[];
-// clang-format on
-
-// A historical note: We use .byte over .incbin since the latter leads to
-// complications involving generation of build-time dependencies. Goma parses
-// #include statements, and clang has -MD/-MMD. Neither recognize .incbin.
-
-TEST(ByteInRodata) {
- CHECK_EQ(0, std::strcmp("0123456789\n", test_string0_bytes));
-}
-
-TEST(ByteInText) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- auto f = GeneratedCode<int(int, int)>::FromAddress(
- isolate, reinterpret_cast<Address>(&test_function0_bytes[0]));
- CHECK_EQ(7, f.Call(3, 4));
- CHECK_EQ(11, f.Call(5, 6));
-}
-#endif // #ifndef V8_COMPILER_IS_MSVC
-#undef V8_COMPILER_IS_MSVC
-
-#undef FUNCTION_BYTES
-#undef GENERATE_TEST_FUNCTION_DATA
-#undef TEST_FUNCTION_FILE
-
-} // namespace test_isolate_independent_builtins
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
new file mode 100644
index 0000000000..ffa2ba54f0
--- /dev/null
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -0,0 +1,477 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles-inl.h"
+#include "src/heap/factory-inl.h"
+#include "src/isolate.h"
+#include "src/microtask-queue.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<JSWeakFactory> ConstructJSWeakFactory(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ Handle<String> weak_factory_name = factory->WeakFactory_string();
+ Handle<Object> global =
+ handle(isolate->native_context()->global_object(), isolate);
+ Handle<JSFunction> weak_factory_fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate, global, weak_factory_name)
+ .ToHandleChecked());
+ auto weak_factory = Handle<JSWeakFactory>::cast(
+ JSObject::New(weak_factory_fun, weak_factory_fun,
+ Handle<AllocationSite>::null())
+ .ToHandleChecked());
+#ifdef VERIFY_HEAP
+ weak_factory->JSWeakFactoryVerify(isolate);
+#endif // VERIFY_HEAP
+ return weak_factory;
+}
+
+Handle<JSWeakRef> ConstructJSWeakRef(Isolate* isolate,
+ Handle<JSReceiver> target) {
+ Factory* factory = isolate->factory();
+ Handle<String> weak_ref_name = factory->WeakRef_string();
+ Handle<Object> global =
+ handle(isolate->native_context()->global_object(), isolate);
+ Handle<JSFunction> weak_ref_fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate, global, weak_ref_name).ToHandleChecked());
+ auto weak_ref = Handle<JSWeakRef>::cast(
+ JSObject::New(weak_ref_fun, weak_ref_fun, Handle<AllocationSite>::null())
+ .ToHandleChecked());
+ weak_ref->set_target(*target);
+#ifdef VERIFY_HEAP
+ weak_ref->JSWeakRefVerify(isolate);
+#endif // VERIFY_HEAP
+ return weak_ref;
+}
+
+Handle<JSWeakCell> MakeCell(Isolate* isolate, Handle<JSObject> js_object,
+ Handle<JSWeakFactory> weak_factory) {
+ Handle<Map> weak_cell_map(isolate->native_context()->js_weak_cell_map(),
+ isolate);
+ Handle<JSWeakCell> weak_cell =
+ Handle<JSWeakCell>::cast(isolate->factory()->NewJSObjectFromMap(
+ weak_cell_map, TENURED, Handle<AllocationSite>::null()));
+ weak_cell->set_target(*js_object);
+ weak_factory->AddWeakCell(*weak_cell);
+#ifdef VERIFY_HEAP
+ weak_cell->JSWeakCellVerify(isolate);
+#endif // VERIFY_HEAP
+ return weak_cell;
+}
+
+void NullifyWeakCell(Handle<JSWeakCell> weak_cell, Isolate* isolate) {
+ auto empty_func = [](HeapObject object, ObjectSlot slot, Object target) {};
+ weak_cell->Nullify(isolate, empty_func);
+#ifdef VERIFY_HEAP
+ weak_cell->JSWeakCellVerify(isolate);
+#endif // VERIFY_HEAP
+}
+
+void ClearWeakCell(Handle<JSWeakCell> weak_cell, Isolate* isolate) {
+ weak_cell->Clear(isolate);
+ CHECK(weak_cell->next()->IsUndefined(isolate));
+ CHECK(weak_cell->prev()->IsUndefined(isolate));
+#ifdef VERIFY_HEAP
+ weak_cell->JSWeakCellVerify(isolate);
+#endif // VERIFY_HEAP
+}
+
+TEST(TestJSWeakCellCreation) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ // Create JSWeakCell and verify internal data structures.
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
+ CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+
+ // Create another JSWeakCell and verify internal data structures.
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell2->next(), *weak_cell1);
+ CHECK_EQ(weak_cell1->prev(), *weak_cell2);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell2);
+ CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakCellNullify1) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+
+ // Nullify the first JSWeakCell and verify internal data structures.
+ NullifyWeakCell(weak_cell1, isolate);
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell2);
+ CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK(weak_cell2->next()->IsUndefined(isolate));
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ // Nullify the second JSWeakCell and verify internal data structures.
+ NullifyWeakCell(weak_cell2, isolate);
+ CHECK(weak_factory->active_cells()->IsUndefined(isolate));
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell2);
+ CHECK_EQ(weak_cell2->next(), *weak_cell1);
+ CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell1->prev(), *weak_cell2);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakCellNullify2) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+
+ // Like TestJSWeakCellNullify1 but clear the JSWeakCells in opposite order.
+ NullifyWeakCell(weak_cell2, isolate);
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell2);
+ CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK(weak_cell2->next()->IsUndefined(isolate));
+
+ NullifyWeakCell(weak_cell1, isolate);
+ CHECK(weak_factory->active_cells()->IsUndefined(isolate));
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
+ CHECK_EQ(weak_cell1->next(), *weak_cell2);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell2->prev(), *weak_cell1);
+ CHECK(weak_cell2->next()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakFactoryPopClearedCell) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+
+ NullifyWeakCell(weak_cell2, isolate);
+ NullifyWeakCell(weak_cell3, isolate);
+
+ CHECK(weak_factory->NeedsCleanup());
+ JSWeakCell cleared1 = weak_factory->PopClearedCell(isolate);
+ CHECK_EQ(cleared1, *weak_cell3);
+ CHECK(weak_cell3->prev()->IsUndefined(isolate));
+ CHECK(weak_cell3->next()->IsUndefined(isolate));
+
+ CHECK(weak_factory->NeedsCleanup());
+ JSWeakCell cleared2 = weak_factory->PopClearedCell(isolate);
+ CHECK_EQ(cleared2, *weak_cell2);
+ CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK(weak_cell2->next()->IsUndefined(isolate));
+
+ CHECK(!weak_factory->NeedsCleanup());
+
+ NullifyWeakCell(weak_cell1, isolate);
+
+ CHECK(weak_factory->NeedsCleanup());
+ JSWeakCell cleared3 = weak_factory->PopClearedCell(isolate);
+ CHECK_EQ(cleared3, *weak_cell1);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ CHECK(!weak_factory->NeedsCleanup());
+ CHECK(weak_factory->active_cells()->IsUndefined(isolate));
+ CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakCellClearActiveCells) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell3);
+ CHECK(weak_cell3->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell3->next(), *weak_cell2);
+ CHECK_EQ(weak_cell2->prev(), *weak_cell3);
+ CHECK_EQ(weak_cell2->next(), *weak_cell1);
+ CHECK_EQ(weak_cell1->prev(), *weak_cell2);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ // Clear all JSWeakCells in active_cells and verify the consistency of the
+ // active_cells list in all stages.
+ ClearWeakCell(weak_cell2, isolate);
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell3);
+ CHECK(weak_cell3->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell3->next(), *weak_cell1);
+ CHECK_EQ(weak_cell1->prev(), *weak_cell3);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ ClearWeakCell(weak_cell3, isolate);
+ CHECK_EQ(weak_factory->active_cells(), *weak_cell1);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ ClearWeakCell(weak_cell1, isolate);
+ CHECK(weak_factory->active_cells()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakCellClearClearedCells) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell2 = MakeCell(isolate, js_object, weak_factory);
+ Handle<JSWeakCell> weak_cell3 = MakeCell(isolate, js_object, weak_factory);
+
+ NullifyWeakCell(weak_cell1, isolate);
+ NullifyWeakCell(weak_cell2, isolate);
+ NullifyWeakCell(weak_cell3, isolate);
+
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell3);
+ CHECK(weak_cell3->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell3->next(), *weak_cell2);
+ CHECK_EQ(weak_cell2->prev(), *weak_cell3);
+ CHECK_EQ(weak_cell2->next(), *weak_cell1);
+ CHECK_EQ(weak_cell1->prev(), *weak_cell2);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ // Clear all JSWeakCells in cleared_cells and verify the consistency of the
+ // cleared_cells list in all stages.
+ ClearWeakCell(weak_cell2, isolate);
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell3);
+ CHECK(weak_cell3->prev()->IsUndefined(isolate));
+ CHECK_EQ(weak_cell3->next(), *weak_cell1);
+ CHECK_EQ(weak_cell1->prev(), *weak_cell3);
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ ClearWeakCell(weak_cell3, isolate);
+ CHECK_EQ(weak_factory->cleared_cells(), *weak_cell1);
+ CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->next()->IsUndefined(isolate));
+
+ ClearWeakCell(weak_cell1, isolate);
+ CHECK(weak_factory->cleared_cells()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakCellClearTwice) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+
+ ClearWeakCell(weak_cell1, isolate);
+ ClearWeakCell(weak_cell1, isolate);
+}
+
+TEST(TestJSWeakCellClearPopped) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakFactory> weak_factory = ConstructJSWeakFactory(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<JSWeakCell> weak_cell1 = MakeCell(isolate, js_object, weak_factory);
+ NullifyWeakCell(weak_cell1, isolate);
+ JSWeakCell cleared1 = weak_factory->PopClearedCell(isolate);
+ CHECK_EQ(cleared1, *weak_cell1);
+
+ ClearWeakCell(weak_cell1, isolate);
+}
+
+TEST(TestJSWeakRef) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+ {
+ HandleScope inner_scope(isolate);
+
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // This doesn't add the target into the KeepDuringJob set.
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+
+ CcTest::CollectAllGarbage();
+ CHECK(!inner_weak_ref->target()->IsUndefined(isolate));
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ CcTest::CollectAllGarbage();
+
+ CHECK(weak_ref->target()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakRefIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ LocalContext context;
+
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+ {
+ HandleScope inner_scope(isolate);
+
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ // This doesn't add the target into the KeepDuringJob set.
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+
+ heap::SimulateIncrementalMarking(heap, true);
+ CcTest::CollectAllGarbage();
+ CHECK(!inner_weak_ref->target()->IsUndefined(isolate));
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ heap::SimulateIncrementalMarking(heap, true);
+ CcTest::CollectAllGarbage();
+
+ CHECK(weak_ref->target()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakRefKeepDuringJob) {
+ FLAG_harmony_weak_refs = true;
+ CcTest::InitializeVM();
+ LocalContext context;
+
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+ {
+ HandleScope inner_scope(isolate);
+
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ heap->AddKeepDuringJobTarget(js_object);
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ CcTest::CollectAllGarbage();
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ // Clears the KeepDuringJob set.
+ isolate->default_microtask_queue()->RunMicrotasks(isolate);
+ CcTest::CollectAllGarbage();
+
+ CHECK(weak_ref->target()->IsUndefined(isolate));
+}
+
+TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ LocalContext context;
+
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+ {
+ HandleScope inner_scope(isolate);
+
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(isolate, js_object);
+ heap->AddKeepDuringJobTarget(js_object);
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ heap::SimulateIncrementalMarking(heap, true);
+ CcTest::CollectAllGarbage();
+
+ CHECK(!weak_ref->target()->IsUndefined(isolate));
+
+ // Clears the KeepDuringJob set.
+ isolate->default_microtask_queue()->RunMicrotasks(isolate);
+ heap::SimulateIncrementalMarking(heap, true);
+ CcTest::CollectAllGarbage();
+
+ CHECK(weak_ref->target()->IsUndefined(isolate));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index ea51a168d7..d49d9eb1d4 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -31,10 +31,8 @@
#include "include/v8-profiler.h"
#include "src/api-inl.h"
-#include "src/code-stubs.h"
#include "src/disassembler.h"
#include "src/isolate.h"
-#include "src/log.h"
#include "src/objects-inl.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
@@ -44,8 +42,8 @@
namespace v8 {
namespace internal {
-static bool IsAddressWithinFuncCode(JSFunction* function, void* addr) {
- i::AbstractCode* code = function->abstract_code();
+static bool IsAddressWithinFuncCode(JSFunction function, void* addr) {
+ i::AbstractCode code = function->abstract_code();
return code->contains(reinterpret_cast<Address>(addr));
}
@@ -54,7 +52,7 @@ static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
v8::Local<v8::Value> func =
context->Global()->Get(context, v8_str(func_name)).ToLocalChecked();
CHECK(func->IsFunction());
- JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
+ JSFunction js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
return IsAddressWithinFuncCode(js_func, addr);
}
@@ -148,7 +146,7 @@ TEST(CFromJSStackTrace) {
i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({TRACE_EXTENSION_ID});
v8::Context::Scope context_scope(context);
// Create global function JSFuncDoTrace which calls
@@ -197,7 +195,7 @@ TEST(PureJSStackTrace) {
i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({TRACE_EXTENSION_ID});
v8::Context::Scope context_scope(context);
// Create global function JSFuncDoTrace which calls
@@ -267,7 +265,7 @@ TEST(PureCStackTrace) {
TickSample sample;
i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({TRACE_EXTENSION_ID});
v8::Context::Scope context_scope(context);
// Check that sampler doesn't crash
CHECK_EQ(10, CFunc(10));
@@ -276,7 +274,7 @@ TEST(PureCStackTrace) {
TEST(JsEntrySp) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({TRACE_EXTENSION_ID});
v8::Context::Scope context_scope(context);
CHECK(!i::TraceExtension::GetJsEntrySp());
CompileRun("a = 1; b = a + 1;");
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index f7774b7bda..6057db75ee 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -34,6 +34,7 @@
#include "src/log-utils.h"
#include "src/log.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/natives.h"
#include "src/v8.h"
@@ -104,6 +105,11 @@ class ScopedLoggerInitializer {
.ToLocalChecked();
}
+ void PrintLog() {
+ i::StdoutStream os;
+ os << raw_log_ << std::flush;
+ }
+
void StopLogging() {
bool exists = false;
raw_log_ = i::ReadFile(StopLoggingGetTempFile(), &exists, true);
@@ -146,16 +152,19 @@ class ScopedLoggerInitializer {
return true;
}
- std::unordered_set<uintptr_t> ExtractAllAddresses(std::string search_term,
- size_t address_column) {
+ std::unordered_set<uintptr_t> ExtractLogAddresses(std::string search_term,
+ size_t address_column,
+ bool allow_duplicates) {
CHECK_GT(log_.size(), 0);
- std::unordered_set<uintptr_t> result;
- size_t start = 0;
+ // Map addresses of Maps to log_lines.
+ std::unordered_map<uintptr_t, std::string> map;
+ size_t current = 0;
while (true) {
- start = IndexOfLine({search_term}, start);
- if (start == std::string::npos) break;
- std::vector<std::string> columns = Split(log_.at(start), ',');
- ++start; // Skip the found line.
+ current = IndexOfLine({search_term}, current);
+ if (current == std::string::npos) break;
+ std::string current_line = log_.at(current);
+ std::vector<std::string> columns = Split(current_line, ',');
+ ++current; // Skip the found line.
// TODO(crbug.com/v8/8084): These two continue lines should really be
// errors. But on Windows the log is sometimes mysteriously cut off at the
// end. If the cut-off point happens to fall in the address field, the
@@ -164,7 +173,25 @@ class ScopedLoggerInitializer {
uintptr_t address =
strtoll(columns.at(address_column).c_str(), nullptr, 16);
if (address == 0) continue;
- result.insert(address);
+ if (!allow_duplicates) {
+ auto match = map.find(address);
+ // Ignore same address but different log line.
+ if (match != map.end() && match->second.compare(current_line) == 0) {
+ for (size_t i = 0; i < current; i++) {
+ printf("%s\n", log_.at(i).c_str());
+ }
+ printf("%zu\n", current);
+ V8_Fatal(__FILE__, __LINE__, "%s, ... %p apperead twice:\n %s",
+ search_term.c_str(), reinterpret_cast<void*>(address),
+ current_line.c_str());
+ }
+ }
+ map.insert({address, current_line});
+ }
+ // Extract all keys.
+ std::unordered_set<uintptr_t> result;
+ for (auto key_value : map) {
+ result.insert(key_value.first);
}
return result;
}
@@ -431,7 +458,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" obj.test =\n"
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
- logger.logger()->StopProfiler();
+ logger.logger()->StopProfilerThread();
CcTest::PreciseCollectAllGarbage();
logger.StringEvent("test-logging-done", "");
@@ -506,12 +533,12 @@ TEST(Issue539892) {
explicit FakeCodeEventLogger(i::Isolate* isolate)
: CodeEventLogger(isolate) {}
- void CodeMoveEvent(i::AbstractCode* from, i::AbstractCode* to) override {}
- void CodeDisableOptEvent(i::AbstractCode* code,
- i::SharedFunctionInfo* shared) override {}
+ void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
+ void CodeDisableOptEvent(i::AbstractCode code,
+ i::SharedFunctionInfo shared) override {}
private:
- void LogRecordedBuffer(i::AbstractCode* code, i::SharedFunctionInfo* shared,
+ void LogRecordedBuffer(i::AbstractCode code, i::SharedFunctionInfo shared,
const char* name, int length) override {}
void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
int length) override {}
@@ -559,7 +586,7 @@ TEST(LogAll) {
SETUP_FLAGS();
i::FLAG_log_all = true;
i::FLAG_turbo_inlining = false;
- i::FLAG_enable_one_shot_optimization = false;
+ i::FLAG_allow_natives_syntax = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -567,12 +594,27 @@ TEST(LogAll) {
{
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
- const char* source_text =
- "function testAddFn(a,b) { return a + b };"
- "let result;"
- "for (let i = 0; i < 100000; i++) { result = testAddFn(i, i); };"
- "testAddFn('1', 1);"
- "for (let i = 0; i < 100000; i++) { result = testAddFn('1', i); }";
+ const char* source_text = R"(
+ function testAddFn(a,b) {
+ return a + b
+ };
+ let result;
+
+ // Warm up the ICs.
+ for (let i = 0; i < 100000; i++) {
+ result = testAddFn(i, i);
+ };
+
+ // Enforce optimization.
+ %OptimizeFunctionOnNextCall(testAddFn);
+ result = testAddFn(1, 1);
+
+ // Cause deopt.
+ testAddFn('1', 1)
+ for (let i = 0; i < 100000; i++) {
+ result = testAddFn('1', i);
+ }
+ )";
CompileRun(source_text);
logger.StopLogging();
@@ -584,10 +626,9 @@ TEST(LogAll) {
CHECK(logger.ContainsLine({"code-creation,Script", ":1:1"}));
CHECK(logger.ContainsLine({"api,v8::Script::Run"}));
CHECK(logger.ContainsLine({"code-creation,LazyCompile,", "testAddFn"}));
+
if (i::FLAG_opt && !i::FLAG_always_opt) {
CHECK(logger.ContainsLine({"code-deopt,", "not a Smi"}));
- if (i::FLAG_enable_one_shot_optimization)
- CHECK(logger.ContainsLine({"code-deopt,", "DeoptimizeNow"}));
CHECK(logger.ContainsLine({"timer-event-start", "V8.DeoptimizeCode"}));
CHECK(logger.ContainsLine({"timer-event-end", "V8.DeoptimizeCode"}));
}
@@ -595,6 +636,7 @@ TEST(LogAll) {
isolate->Dispose();
}
+#ifndef V8_TARGET_ARCH_ARM
TEST(LogInterpretedFramesNativeStack) {
SETUP_FLAGS();
i::FLAG_interpreted_frames_native_stack = true;
@@ -617,6 +659,7 @@ TEST(LogInterpretedFramesNativeStack) {
}
isolate->Dispose();
}
+#endif // V8_TARGET_ARCH_ARM
TEST(ExternalCodeEventListener) {
i::FLAG_log = false;
@@ -630,7 +673,7 @@ TEST(ExternalCodeEventListener) {
v8::HandleScope scope(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- context->Enter();
+ v8::Context::Scope context_scope(context);
TestCodeEventHandler code_event_handler(isolate);
@@ -657,12 +700,69 @@ TEST(ExternalCodeEventListener) {
CHECK_GE(code_event_handler.CountLines("LazyCompile",
"testCodeEventListenerAfterStart"),
1);
-
- context->Exit();
}
isolate->Dispose();
}
+TEST(ExternalCodeEventListenerInnerFunctions) {
+ i::FLAG_log = false;
+ i::FLAG_prof = false;
+
+ v8::ScriptCompiler::CachedData* cache;
+ static const char* source_cstring =
+ "(function f1() { return (function f2() {}); })()";
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate1 = v8::Isolate::New(create_params);
+ { // Test that we emit the correct code events from eagerly compiling.
+ v8::HandleScope scope(isolate1);
+ v8::Isolate::Scope isolate_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ v8::Context::Scope context_scope(context);
+
+ TestCodeEventHandler code_event_handler(isolate1);
+ code_event_handler.Enable();
+
+ v8::Local<v8::String> source_string = v8_str(source_cstring);
+ v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptCompiler::Source source(source_string, origin);
+ v8::Local<v8::UnboundScript> script =
+ v8::ScriptCompiler::CompileUnboundScript(isolate1, &source)
+ .ToLocalChecked();
+ CHECK_EQ(code_event_handler.CountLines("Script", "f1"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Script", "f2"), 1);
+ cache = v8::ScriptCompiler::CreateCodeCache(script);
+ }
+ isolate1->Dispose();
+
+ v8::Isolate* isolate2 = v8::Isolate::New(create_params);
+ { // Test that we emit the correct code events from deserialization.
+ v8::HandleScope scope(isolate2);
+ v8::Isolate::Scope isolate_scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
+ v8::Context::Scope context_scope(context);
+
+ TestCodeEventHandler code_event_handler(isolate2);
+ code_event_handler.Enable();
+
+ v8::Local<v8::String> source_string = v8_str(source_cstring);
+ v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptCompiler::Source source(source_string, origin, cache);
+ {
+ i::DisallowCompilation no_compile_expected(
+ reinterpret_cast<i::Isolate*>(isolate2));
+ v8::ScriptCompiler::CompileUnboundScript(
+ isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+ }
+ CHECK_EQ(code_event_handler.CountLines("Script", "f1"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Script", "f2"), 1);
+ }
+ isolate2->Dispose();
+}
+
+#ifndef V8_TARGET_ARCH_ARM
TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
i::FLAG_log = false;
i::FLAG_prof = false;
@@ -712,6 +812,7 @@ TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
}
isolate->Dispose();
}
+#endif // V8_TARGET_ARCH_ARM
TEST(TraceMaps) {
SETUP_FLAGS();
@@ -723,15 +824,25 @@ TEST(TraceMaps) {
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
// Try to create many different kind of maps to make sure the logging won't
// crash. More detailed tests are implemented separately.
- const char* source_text =
- "let a = {};"
- "for (let i = 0; i < 500; i++) { a['p'+i] = i };"
- "class Test { constructor(i) { this.a = 1; this['p'+i] = 1; }};"
- "let t = new Test();"
- "t.b = 1; t.c = 1; t.d = 3;"
- "for (let i = 0; i < 100; i++) { t = new Test(i) };"
- "t.b = {};";
- CompileRun(source_text);
+ const char* source_text = R"(
+ let a = {};
+ for (let i = 0; i < 500; i++) {
+ a['p'+i] = i
+ };
+ class Test {
+ constructor(i) {
+ this.a = 1;
+ this['p'+i] = 1;
+ }
+ };
+ let t = new Test();
+ t.b = 1; t.c = 1; t.d = 3;
+ for (let i = 0; i < 100; i++) {
+ t = new Test(i)
+ };
+ t.b = {};
+ )";
+ CompileRunChecked(isolate, source_text);
logger.StopLogging();
@@ -744,7 +855,58 @@ TEST(TraceMaps) {
isolate->Dispose();
}
-TEST(LogMaps) {
+namespace {
+// Ensure that all Maps found on the heap have a single corresponding map-create
+// and map-details entry in the v8.log.
+void ValidateMapDetailsLogging(v8::Isolate* isolate,
+ ScopedLoggerInitializer* logger) {
+ // map-create might have duplicates if a Map address is reused after a gc.
+ std::unordered_set<uintptr_t> map_create_addresses =
+ logger->ExtractLogAddresses("map-create", 2, true);
+ std::unordered_set<uintptr_t> map_details_addresses =
+ logger->ExtractLogAddresses("map-details", 2, false);
+
+ // Iterate over all maps on the heap.
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::HeapIterator iterator(heap);
+ i::DisallowHeapAllocation no_gc;
+ size_t i = 0;
+ for (i::HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
+ if (!obj->IsMap()) continue;
+ i++;
+ uintptr_t address = obj->ptr();
+ if (map_create_addresses.find(address) == map_create_addresses.end()) {
+ // logger->PrintLog();
+ i::Map::cast(obj)->Print();
+ V8_Fatal(__FILE__, __LINE__,
+ "Map (%p, #%zu) creation not logged during startup with "
+ "--trace-maps!"
+ "\n# Expected Log Line: map-create, ... %p",
+ reinterpret_cast<void*>(obj->ptr()), i,
+ reinterpret_cast<void*>(obj->ptr()));
+ } else if (map_details_addresses.find(address) ==
+ map_details_addresses.end()) {
+ // logger->PrintLog();
+ i::Map::cast(obj)->Print();
+ V8_Fatal(__FILE__, __LINE__,
+ "Map (%p, #%zu) details not logged during startup with "
+ "--trace-maps!"
+ "\n# Expected Log Line: map-details, ... %p",
+ reinterpret_cast<void*>(obj->ptr()), i,
+ reinterpret_cast<void*>(obj->ptr()));
+ }
+ }
+}
+
+} // namespace
+
+TEST(LogMapsDetailsStartup) {
+ // Reusing map addresses might cause these tests to fail.
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
+ i::FLAG_stress_incremental_marking) {
+ return;
+ }
// Test that all Map details from Maps in the snapshot are logged properly.
SETUP_FLAGS();
i::FLAG_trace_maps = true;
@@ -754,27 +916,142 @@ TEST(LogMaps) {
{
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
logger.StopLogging();
- std::unordered_set<uintptr_t> map_addresses =
- logger.ExtractAllAddresses("map-details", 2);
- i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- i::HeapIterator iterator(heap);
- i::DisallowHeapAllocation no_gc;
-
- // Iterate over all maps on the heap.
- size_t i = 0;
- for (i::HeapObject* obj = iterator.next(); obj != nullptr;
- obj = iterator.next()) {
- i++;
- if (!obj->IsMap()) continue;
- uintptr_t address = reinterpret_cast<uintptr_t>(obj);
- if (map_addresses.find(address) != map_addresses.end()) continue;
- i::Map::cast(obj)->Print();
- V8_Fatal(__FILE__, __LINE__,
- "Map (%p, #%zu) was not logged during startup with --trace-maps!"
- "\n# Expected Log Line: map_details, ... %p",
- reinterpret_cast<void*>(obj), i, reinterpret_cast<void*>(obj));
+ ValidateMapDetailsLogging(isolate, &logger);
+ }
+
+ i::FLAG_log_function_events = false;
+ isolate->Dispose();
+}
+
+TEST(LogMapsDetailsCode) {
+ // Reusing map addresses might cause these tests to fail.
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
+ i::FLAG_stress_incremental_marking) {
+ return;
+ }
+ SETUP_FLAGS();
+ i::FLAG_retain_maps_for_n_gc = 0xFFFFFFF;
+ i::FLAG_trace_maps = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ const char* source = R"(
+ // Normal properties overflowing into dict-mode.
+ let a = {};
+ for (let i = 0; i < 500; i++) {
+ a['p'+i] = i
+ };
+ // Constructor / initial maps
+ function Constructor(dictElements=false) {
+ this.a = 1;
+ this.b = 2;
+ this.c = 3;
+ if (dictElements) {
+ this[0xFFFFF] = 1;
+ }
+ this.d = 4;
+ this.e = 5;
+ this.f = 5;
+ }
+ // Keep objects and their maps alive to avoid reusing map addresses.
+ let instances = [];
+ let instance;
+ for (let i =0; i < 500; i++) {
+ instances.push(new Constructor());
+ }
+ // Map deprecation.
+ for (let i =0; i < 500; i++) {
+ instance = new Constructor();
+ instance.d = 1.1;
+ instances.push(instance);
+ }
+ for (let i =0; i < 500; i++) {
+ instance = new Constructor();
+ instance.b = 1.1;
+ instances.push(instance);
+ }
+ for (let i =0; i < 500; i++) {
+ instance = new Constructor();
+ instance.c = Object;
+ instances.push(instance);
}
+ // Create instance with dict-elements.
+ instances.push(new Constructor(true));
+
+ // Class
+ class Test {
+ constructor(i) {
+ this.a = 1;
+ this['p'+i] = 1;
+ }
+ };
+ let t = new Test();
+ t.b = 1; t.c = 1; t.d = 3;
+ for (let i = 0; i < 100; i++) {
+ t = new Test(i);
+ instances.push(t);
+ }
+ t.b = {};
+
+ // Anonymous classes
+ function create(value) {
+ return new class {
+ constructor() {
+ this.value = value;
+ }
+ }
+ }
+ for (let i = 0; i < 100; i++) {
+ instances.push(create(i));
+ };
+
+ // Modifying some protoypes.
+ Array.prototype.helper = () => 1;
+ [1,2,3].helper();
+ )";
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ CompileRunChecked(isolate, source);
+ logger.StopLogging();
+ ValidateMapDetailsLogging(isolate, &logger);
+ }
+
+ i::FLAG_log_function_events = false;
+ isolate->Dispose();
+}
+
+TEST(LogMapsDetailsContexts) {
+ // Reusing map addresses might cause these tests to fail.
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
+ i::FLAG_stress_incremental_marking) {
+ return;
}
+ // Test that all Map details from Maps in the snapshot are logged properly.
+ SETUP_FLAGS();
+ i::FLAG_trace_maps = true;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+ // Use the default context.
+ CompileRunChecked(isolate, "{a:1}");
+ // Create additional contexts.
+ v8::Local<v8::Context> env1 = v8::Context::New(isolate);
+ env1->Enter();
+ CompileRun(env1, "{b:1}").ToLocalChecked();
+
+ v8::Local<v8::Context> env2 = v8::Context::New(isolate);
+ env2->Enter();
+ CompileRun(env2, "{c:1}").ToLocalChecked();
+ env2->Exit();
+ env1->Exit();
+
+ logger.StopLogging();
+ ValidateMapDetailsLogging(isolate, &logger);
+ }
+
i::FLAG_log_function_events = false;
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 3f115af416..c1789560fa 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -30,6 +30,7 @@
#include "src/assembler-inl.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -43,17 +44,16 @@ using F = void*(int x, int y, int p2, int p3, int p4);
#define __ masm->
-using F3 = Object*(void* p0, int p1, int p2, int p3, int p4);
+using F3 = void*(void* p0, int p1, int p2, int p3, int p4);
using F5 = int(void*, void*, void*, void*, void*);
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ sub(sp, sp, Operand(1 * kPointerSize));
@@ -140,10 +140,9 @@ TEST(ExtractLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
typedef struct {
@@ -280,10 +279,9 @@ TEST(ReplaceLane) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
typedef struct {
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 97ddda12c5..63d0794fc8 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -31,8 +31,9 @@
#include "src/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
-#include "src/mips/macro-assembler-mips.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-array-inl.h"
#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -41,9 +42,9 @@ namespace v8 {
namespace internal {
// TODO(mips): Refine these signatures per test case.
-using F1 = Object*(int x, int p1, int p2, int p3, int p4);
-using F3 = Object*(void* p, int p1, int p2, int p3, int p4);
-using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
#define __ masm->
@@ -62,8 +63,7 @@ TEST(BYTESWAP) {
uint32_t test_values[] = {0x5612FFCD, 0x9D327ACC, 0x781A15C3, 0xFCDE, 0x9F,
0xC81A15C3, 0x80000000, 0xFFFFFFFF, 0x00008000};
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -112,7 +112,7 @@ static void TestNaN(const char *code) {
// tests checks the case where a x86 NaN value is serialized into the
// snapshot on the simulator during cross compilation.
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Local<v8::Context> context = CcTest::NewContext({PRINT_EXTENSION_ID});
v8::Context::Scope context_scope(context);
v8::Local<v8::Script> script =
@@ -120,9 +120,8 @@ static void TestNaN(const char *code) {
v8::Local<v8::Object> result =
v8::Local<v8::Object>::Cast(script->Run(context).ToLocalChecked());
i::Handle<i::JSReceiver> o = v8::Utils::OpenHandle(*result);
- i::Handle<i::JSArray> array1(reinterpret_cast<i::JSArray*>(*o),
- o->GetIsolate());
- i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
+ i::Handle<i::JSArray> array1(i::JSArray::cast(*o), o->GetIsolate());
+ i::FixedDoubleArray a = i::FixedDoubleArray::cast(array1->elements());
double value = a->get_scalar(0);
CHECK(std::isnan(value) &&
bit_cast<uint64_t>(value) ==
@@ -158,8 +157,7 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -221,8 +219,7 @@ TEST(jump_tables5) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -288,8 +285,7 @@ TEST(jump_tables6) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kSwitchTableCases = 40;
@@ -370,8 +366,7 @@ TEST(jump_tables6) {
static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ Lsa(v0, a0, a1, sa);
@@ -497,8 +492,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
__ mtc1(a0, f4);
@@ -580,8 +574,7 @@ TEST(OverflowInstructions) {
int32_t jj = *j;
int32_t expected_add, expected_sub, expected_mul;
bool expected_add_ovf, expected_sub_ovf, expected_mul_ovf;
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ lw(t0, MemOperand(a0, offsetof(T, lhs)));
@@ -658,8 +651,7 @@ TEST(min_max_nan) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct TestFloat {
@@ -771,8 +763,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
IN_TYPE res;
@@ -1020,8 +1011,7 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
GenerateSltuInstructionFunc(masm, rd);
@@ -1139,8 +1129,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct Inputs {
@@ -1281,8 +1270,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct Inputs {
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index b2aea23920..14acf2eb02 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -33,8 +33,8 @@
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
-#include "src/mips64/macro-assembler-mips64.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number.h"
#include "src/simulator.h"
namespace v8 {
@@ -42,9 +42,9 @@ namespace internal {
// TODO(mips64): Refine these signatures per test case.
using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4);
-using F1 = Object*(int x, int p1, int p2, int p3, int p4);
-using F3 = Object*(void* p, int p1, int p2, int p3, int p4);
-using F4 = Object*(void* p0, void* p1, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
#define __ masm->
@@ -73,8 +73,7 @@ TEST(BYTESWAP) {
0x0000000080000000,
0x0000000000008000};
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -147,8 +146,7 @@ TEST(LoadConstants) {
refConstants[i] = ~(mask << i);
}
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ mov(a4, a0);
@@ -181,8 +179,7 @@ TEST(LoadAddress) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
Label to_jump, skip;
__ mov(a4, a0);
@@ -225,8 +222,7 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -288,8 +284,7 @@ TEST(jump_tables5) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -362,8 +357,7 @@ TEST(jump_tables6) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kSwitchTableCases = 40;
@@ -445,8 +439,7 @@ TEST(jump_tables6) {
static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ Lsa(v0, a0, a1, sa);
@@ -526,8 +519,7 @@ TEST(Lsa) {
static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ Dlsa(v0, a0, a1, sa);
@@ -676,8 +668,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
GenerateConvertInstructionFunc(masm);
@@ -819,8 +810,7 @@ TEST(OverflowInstructions) {
int32_t jj32 = static_cast<int32_t>(jj);
int32_t expected_mul;
int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf;
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ ld(t0, MemOperand(a0, offsetof(T, lhs)));
@@ -899,8 +889,7 @@ TEST(min_max_nan) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct TestFloat {
@@ -1012,8 +1001,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
IN_TYPE res;
@@ -1378,8 +1366,7 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
GenerateSltuInstructionFunc(masm, rd);
@@ -1497,8 +1484,7 @@ TEST(macro_float_minmax_f32) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct Inputs {
@@ -1639,8 +1625,7 @@ TEST(macro_float_minmax_f64) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
struct Inputs {
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 49d57aed21..a110ed76aa 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -33,6 +33,7 @@
#include "src/heap/factory.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -76,9 +77,9 @@ TEST(Smi) {
bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
CHECK_EQ(is_in_range, is_valid);
if (is_valid) {
- Smi* smi_from_intptr = Smi::FromIntptr(number);
+ Smi smi_from_intptr = Smi::FromIntptr(number);
if (static_cast<int>(number) == number) { // Is a 32-bit int.
- Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
+ Smi smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr);
}
int64_t smi_value = smi_from_intptr->value();
@@ -87,11 +88,10 @@ TEST(Smi) {
}
}
-
-static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
+static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi value) {
__ movl(rax, Immediate(id));
__ Move(rcx, value);
- __ Set(rdx, reinterpret_cast<intptr_t>(value));
+ __ Set(rdx, static_cast<intptr_t>(value.ptr()));
__ cmpq(rcx, rdx);
__ j(not_equal, exit);
}
@@ -101,15 +101,14 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
TEST(SmiMove) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
Label exit;
- TestMoveSmi(masm, &exit, 1, Smi::kZero);
+ TestMoveSmi(masm, &exit, 1, Smi::zero());
TestMoveSmi(masm, &exit, 2, Smi::FromInt(127));
TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
@@ -129,9 +128,9 @@ TEST(SmiMove) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -186,11 +185,9 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiCompare) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer =
- AllocateAssemblerBuffer(&allocated, 2 * Assembler::kMinimalBufferSize);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer(2 * Assembler::kMinimalBufferSize);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -223,9 +220,9 @@ TEST(SmiCompare) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -233,10 +230,9 @@ TEST(SmiCompare) {
TEST(SmiTag) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -245,35 +241,35 @@ TEST(SmiTag) {
__ movq(rax, Immediate(1)); // Test number.
__ movq(rcx, Immediate(0));
__ SmiTag(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
+ __ Set(rdx, Smi::kZero.ptr());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(2)); // Test number.
__ movq(rcx, Immediate(1024));
__ SmiTag(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+ __ Set(rdx, Smi::FromInt(1024).ptr());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(3)); // Test number.
__ movq(rcx, Immediate(-1));
__ SmiTag(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+ __ Set(rdx, Smi::FromInt(-1).ptr());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(4)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+ __ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(5)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+ __ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
@@ -282,35 +278,35 @@ TEST(SmiTag) {
__ movq(rax, Immediate(6)); // Test number.
__ movq(rcx, Immediate(0));
__ SmiTag(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
+ __ Set(rdx, Smi::zero().ptr());
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(7)); // Test number.
__ movq(rcx, Immediate(1024));
__ SmiTag(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+ __ Set(rdx, Smi::FromInt(1024).ptr());
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(8)); // Test number.
__ movq(rcx, Immediate(-1));
__ SmiTag(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+ __ Set(rdx, Smi::FromInt(-1).ptr());
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(9)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+ __ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(10)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+ __ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
@@ -322,9 +318,9 @@ TEST(SmiTag) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -332,10 +328,9 @@ TEST(SmiTag) {
TEST(SmiCheck) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -398,9 +393,9 @@ TEST(SmiCheck) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -431,10 +426,9 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiIndex) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -453,9 +447,9 @@ TEST(SmiIndex) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -466,10 +460,9 @@ TEST(OperandOffset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
Label exit;
@@ -494,9 +487,9 @@ TEST(OperandOffset) {
// r15 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
- __ leaq(r14, Operand(rsp, 3 * kPointerSize));
- __ leaq(r13, Operand(rbp, -3 * kPointerSize));
- __ leaq(rbx, Operand(rbp, -5 * kPointerSize));
+ __ leaq(r14, Operand(rsp, 3 * kSystemPointerSize));
+ __ leaq(r13, Operand(rbp, -3 * kSystemPointerSize));
+ __ leaq(rbx, Operand(rbp, -5 * kSystemPointerSize));
__ movl(rcx, Immediate(2));
__ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE);
__ movl(rax, Immediate(1));
@@ -511,12 +504,12 @@ TEST(OperandOffset) {
// Test 2.
// Zero to non-zero displacement.
- __ movl(rdx, Operand(sp0, 2 * kPointerSize));
+ __ movl(rdx, Operand(sp0, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
- Operand sp2 = Operand(rsp, 2 * kPointerSize);
+ Operand sp2 = Operand(rsp, 2 * kSystemPointerSize);
// Test 3.
__ movl(rdx, sp2); // Sanity check.
@@ -524,18 +517,18 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(sp2, 2 * kPointerSize));
+ __ movl(rdx, Operand(sp2, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
- __ movl(rdx, Operand(sp2, -2 * kPointerSize));
+ __ movl(rdx, Operand(sp2, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x109));
__ j(not_equal, &exit);
__ incq(rax);
- Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kPointerSize);
+ Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kSystemPointerSize);
// Test 6.
__ movl(rdx, sp2c2); // Sanity check.
@@ -543,13 +536,13 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(sp2c2, 2 * kPointerSize));
+ __ movl(rdx, Operand(sp2c2, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x103));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
- __ movl(rdx, Operand(sp2c2, -2 * kPointerSize));
+ __ movl(rdx, Operand(sp2c2, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
@@ -564,12 +557,12 @@ TEST(OperandOffset) {
__ incq(rax);
// Zero to non-zero displacement.
- __ movl(rdx, Operand(bp0, -2 * kPointerSize));
+ __ movl(rdx, Operand(bp0, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x102));
__ j(not_equal, &exit);
__ incq(rax);
- Operand bp2 = Operand(rbp, -2 * kPointerSize);
+ Operand bp2 = Operand(rbp, -2 * kSystemPointerSize);
// Test 11.
__ movl(rdx, bp2); // Sanity check.
@@ -578,17 +571,18 @@ TEST(OperandOffset) {
__ incq(rax);
// Non-zero to zero displacement.
- __ movl(rdx, Operand(bp2, 2 * kPointerSize));
+ __ movl(rdx, Operand(bp2, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bp2, -2 * kPointerSize));
+ __ movl(rdx, Operand(bp2, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x104));
__ j(not_equal, &exit);
__ incq(rax);
- Operand bp2c4 = Operand(rbp, rcx, times_pointer_size, -4 * kPointerSize);
+ Operand bp2c4 =
+ Operand(rbp, rcx, times_pointer_size, -4 * kSystemPointerSize);
// Test 14:
__ movl(rdx, bp2c4); // Sanity check.
@@ -596,12 +590,12 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bp2c4, 2 * kPointerSize));
+ __ movl(rdx, Operand(bp2c4, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bp2c4, -2 * kPointerSize));
+ __ movl(rdx, Operand(bp2c4, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x104));
__ j(not_equal, &exit);
__ incq(rax);
@@ -614,17 +608,17 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bx0, 5 * kPointerSize));
+ __ movl(rdx, Operand(bx0, 5 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x100));
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bx0, -4 * kPointerSize));
+ __ movl(rdx, Operand(bx0, -4 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x109));
__ j(not_equal, &exit);
__ incq(rax);
- Operand bx2 = Operand(rbx, 2 * kPointerSize);
+ Operand bx2 = Operand(rbx, 2 * kSystemPointerSize);
// Test 20.
__ movl(rdx, bx2); // Sanity check.
@@ -632,18 +626,19 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bx2, 2 * kPointerSize));
+ __ movl(rdx, Operand(bx2, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x101));
__ j(not_equal, &exit);
__ incq(rax);
// Non-zero to zero displacement.
- __ movl(rdx, Operand(bx2, -2 * kPointerSize));
+ __ movl(rdx, Operand(bx2, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x105));
__ j(not_equal, &exit);
__ incq(rax);
- Operand bx2c2 = Operand(rbx, rcx, times_pointer_size, -2 * kPointerSize);
+ Operand bx2c2 =
+ Operand(rbx, rcx, times_pointer_size, -2 * kSystemPointerSize);
// Test 23.
__ movl(rdx, bx2c2); // Sanity check.
@@ -651,12 +646,12 @@ TEST(OperandOffset) {
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bx2c2, 2 * kPointerSize));
+ __ movl(rdx, Operand(bx2c2, 2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x103));
__ j(not_equal, &exit);
__ incq(rax);
- __ movl(rdx, Operand(bx2c2, -2 * kPointerSize));
+ __ movl(rdx, Operand(bx2c2, -2 * kSystemPointerSize));
__ cmpl(rdx, Immediate(0x107));
__ j(not_equal, &exit);
__ incq(rax);
@@ -794,7 +789,7 @@ TEST(OperandOffset) {
__ movl(rax, Immediate(0));
__ bind(&exit);
- __ leaq(rsp, Operand(rbp, kPointerSize));
+ __ leaq(rsp, Operand(rbp, kSystemPointerSize));
__ popq(rbp);
__ popq(rbx);
__ popq(r14);
@@ -805,9 +800,9 @@ TEST(OperandOffset) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -816,146 +811,159 @@ TEST(OperandOffset) {
TEST(LoadAndStoreWithRepresentation) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
- __ subq(rsp, Immediate(1 * kPointerSize));
+ __ subq(rsp, Immediate(1 * kSystemPointerSize));
Label exit;
// Test 1.
__ movq(rax, Immediate(1)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::UInteger8());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::UInteger8());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(255));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::UInteger8());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::UInteger8());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 2.
__ movq(rax, Immediate(2)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ Set(rcx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Smi());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx, Representation::Smi());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ Set(rdx, V8_2PART_UINT64_C(0xDEADBEAF, 12345678));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Smi());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize), Representation::Smi());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 3.
__ movq(rax, Immediate(3)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer32());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::Integer32());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(-1));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer32());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::Integer32());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 4.
__ movq(rax, Immediate(4)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movl(rcx, Immediate(0x44332211));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::HeapObject());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::HeapObject());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(0x44332211));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::HeapObject());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::HeapObject());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 5.
__ movq(rax, Immediate(5)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ Set(rcx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Tagged());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx, Representation::Tagged());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ Set(rdx, V8_2PART_UINT64_C(0x12345678, DEADBEAF));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Tagged());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize), Representation::Tagged());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 6.
__ movq(rax, Immediate(6)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ Set(rcx, V8_2PART_UINT64_C(0x11223344, 55667788));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::External());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::External());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ Set(rdx, V8_2PART_UINT64_C(0x11223344, 55667788));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::External());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::External());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 7.
__ movq(rax, Immediate(7)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer8());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::Integer8());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(255));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer8());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::Integer8());
__ movq(rcx, Immediate(-1));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 8.
__ movq(rax, Immediate(8)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer16());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::Integer16());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(65535));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer16());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::Integer16());
__ movq(rcx, Immediate(-1));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
// Test 9.
__ movq(rax, Immediate(9)); // Test number.
- __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(Operand(rsp, 0 * kSystemPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::UInteger16());
- __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Store(Operand(rsp, 0 * kSystemPointerSize), rcx,
+ Representation::UInteger16());
+ __ movq(rcx, Operand(rsp, 0 * kSystemPointerSize));
__ movl(rdx, Immediate(65535));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::UInteger16());
+ __ Load(rdx, Operand(rsp, 0 * kSystemPointerSize),
+ Representation::UInteger16());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ xorq(rax, rax); // Success.
__ bind(&exit);
- __ addq(rsp, Immediate(1 * kPointerSize));
+ __ addq(rsp, Immediate(1 * kSystemPointerSize));
ExitCode(masm);
__ ret(0);
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
@@ -1085,10 +1093,9 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
TEST(SIMDMacros) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
- v8::internal::CodeObjectRequired::kYes);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1107,13 +1114,29 @@ TEST(SIMDMacros) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// Call the function from C++.
- auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer);
+ auto f = GeneratedCode<F0>::FromBuffer(CcTest::i_isolate(), buffer->start());
int result = f.Call();
CHECK_EQ(0, result);
}
+TEST(AreAliased) {
+ DCHECK(!AreAliased(rax));
+ DCHECK(!AreAliased(rax, no_reg));
+ DCHECK(!AreAliased(no_reg, rax, no_reg));
+
+ DCHECK(AreAliased(rax, rax));
+ DCHECK(!AreAliased(no_reg, no_reg));
+
+ DCHECK(!AreAliased(rax, rbx, rcx, rdx, no_reg));
+ DCHECK(AreAliased(rax, rbx, rcx, rdx, rax, no_reg));
+
+ // no_regs are allowed in
+ DCHECK(!AreAliased(rax, no_reg, rbx, no_reg, rcx, no_reg, rdx, no_reg));
+ DCHECK(AreAliased(rax, no_reg, rbx, no_reg, rcx, no_reg, rdx, rax, no_reg));
+}
+
#undef __
} // namespace test_macro_assembler_x64
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 3690752f13..aa2b23c413 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -45,16 +45,17 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
// Allocate a string, the GC may suspect a memento behind the string.
Handle<SeqOneByteString> string =
isolate->factory()->NewRawOneByteString(12).ToHandleChecked();
- CHECK(*string);
+ CHECK(!string->is_null());
// Create an allocation memento behind the string with a garbage allocation
// site pointer.
- AllocationMemento* memento =
- reinterpret_cast<AllocationMemento*>(new_space->top() + kHeapObjectTag);
+ AllocationMemento memento = AllocationMemento::unchecked_cast(
+ Object(new_space->top() + kHeapObjectTag));
memento->set_map_after_allocation(
ReadOnlyRoots(heap).allocation_memento_map(), SKIP_WRITE_BARRIER);
memento->set_allocation_site(
- reinterpret_cast<AllocationSite*>(kHeapObjectTag), SKIP_WRITE_BARRIER);
+ AllocationSite::unchecked_cast(Object(kHeapObjectTag)),
+ SKIP_WRITE_BARRIER);
}
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 3f65691141..7d76b170d9 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -15,7 +15,7 @@ namespace internal {
static void CheckObject(Isolate* isolate, Handle<Object> obj,
const char* string) {
- Object* print_string = *Object::NoSideEffectsToString(isolate, obj);
+ Object print_string = *Object::NoSideEffectsToString(isolate, obj);
CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
@@ -123,13 +123,13 @@ TEST(EnumCache) {
CHECK_EQ(cc->map()->EnumLength(), kInvalidEnumCacheSentinel);
// Check that the EnumCache is empty.
- CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(a->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(b->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(c->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(cc->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
// The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
@@ -141,14 +141,14 @@ TEST(EnumCache) {
CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
CHECK_EQ(cc->map()->EnumLength(), 3);
- CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(a->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(b->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_EQ(c->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- EnumCache* enum_cache = cc->map()->instance_descriptors()->GetEnumCache();
+ EnumCache enum_cache = cc->map()->instance_descriptors()->enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
CHECK_EQ(enum_cache->keys()->length(), 3);
CHECK_EQ(enum_cache->indices()->length(), 3);
@@ -165,14 +165,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- EnumCache* enum_cache = a->map()->instance_descriptors()->GetEnumCache();
+ EnumCache enum_cache = a->map()->instance_descriptors()->enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
CHECK_EQ(enum_cache->keys()->length(), 1);
CHECK_EQ(enum_cache->indices()->length(), 1);
@@ -181,7 +181,7 @@ TEST(EnumCache) {
// Creating the EnumCache for {c} will create a new EnumCache on the shared
// DescriptorArray.
Handle<EnumCache> previous_enum_cache(
- a->map()->instance_descriptors()->GetEnumCache(), a->GetIsolate());
+ a->map()->instance_descriptors()->enum_cache(), a->GetIsolate());
Handle<FixedArray> previous_keys(previous_enum_cache->keys(),
a->GetIsolate());
Handle<FixedArray> previous_indices(previous_enum_cache->indices(),
@@ -193,7 +193,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map()->EnumLength(), 3);
CHECK_EQ(cc->map()->EnumLength(), 3);
- EnumCache* enum_cache = c->map()->instance_descriptors()->GetEnumCache();
+ EnumCache enum_cache = c->map()->instance_descriptors()->enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -206,20 +206,20 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
}
// {b} can reuse the existing EnumCache, hence we only need to set the correct
// EnumLength on the map without modifying the cache itself.
previous_enum_cache =
- handle(a->map()->instance_descriptors()->GetEnumCache(), a->GetIsolate());
+ handle(a->map()->instance_descriptors()->enum_cache(), a->GetIsolate());
previous_keys = handle(previous_enum_cache->keys(), a->GetIsolate());
previous_indices = handle(previous_enum_cache->indices(), a->GetIsolate());
CompileRun("var s = 0; for (let key in b) { s += b[key] };");
@@ -229,7 +229,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map()->EnumLength(), 3);
CHECK_EQ(cc->map()->EnumLength(), 3);
- EnumCache* enum_cache = c->map()->instance_descriptors()->GetEnumCache();
+ EnumCache enum_cache = c->map()->instance_descriptors()->enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are not updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -240,14 +240,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_NE(cc->map()->instance_descriptors()->GetEnumCache(),
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->GetEnumCache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->GetEnumCache(), enum_cache);
+ CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
}
}
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index e680d38287..b1f7ae9068 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -237,8 +237,8 @@ TEST(SmallOrderedHashMapDuplicateHashCode) {
CopyHashCode(key1, key2);
CHECK(!key1->SameValue(*key2));
- Object* hash1 = key1->GetHash();
- Object* hash2 = key2->GetHash();
+ Object hash1 = key1->GetHash();
+ Object hash2 = key2->GetHash();
CHECK_EQ(hash1, hash2);
map = SmallOrderedHashMap::Add(isolate, map, key2, value).ToHandleChecked();
@@ -1271,6 +1271,727 @@ TEST(OrderedHashMapHandlerInsertion) {
CHECK(OrderedHashMap::Is(map));
}
+TEST(OrderedNameDictionaryInsertion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+}
+
+TEST(OrderedNameDictionaryFindEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+
+ int entry = dict->FindEntry(isolate, *key1);
+ CHECK_EQ(entry, 0);
+ CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+
+ entry = dict->FindEntry(isolate, *key1);
+ CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+ CHECK_EQ(entry, 0);
+
+ entry = dict->FindEntry(isolate, *key2);
+ CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+ CHECK_EQ(entry, 1);
+}
+
+TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ int entry = dict->FindEntry(isolate, *key1);
+ Handle<Object> found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ Handle<String> other_value = isolate->factory()->InternalizeUtf8String("baz");
+ dict->ValueAtPut(entry, *other_value);
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+
+ entry = dict->FindEntry(isolate, *key2);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ dict->ValueAtPut(entry, *other_value);
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+}
+
+TEST(OrderedNameDictionaryDetailsAtAndDetailsAtPut) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ int entry = dict->FindEntry(isolate, *key1);
+ PropertyDetails found = dict->DetailsAt(entry);
+ CHECK_EQ(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+
+ PropertyDetails other =
+ PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
+ dict->DetailsAtPut(entry, other);
+
+ found = dict->DetailsAt(entry);
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = dict->DetailsAt(entry);
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+
+ entry = dict->FindEntry(isolate, *key2);
+ dict->DetailsAtPut(entry, other);
+
+ found = dict->DetailsAt(entry);
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+}
+
+TEST(SmallOrderedNameDictionaryInsertion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key2));
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+}
+
+TEST(SmallOrderedNameDictionaryInsertionMax) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ char buf[10];
+ for (int i = 0; i < SmallOrderedNameDictionary::kMaxCapacity; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ Handle<String> key = isolate->factory()->InternalizeUtf8String(buf);
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ }
+
+ CHECK_EQ(SmallOrderedNameDictionary::kMaxCapacity /
+ SmallOrderedNameDictionary::kLoadFactor,
+ dict->NumberOfBuckets());
+ CHECK_EQ(SmallOrderedNameDictionary::kMaxCapacity, dict->NumberOfElements());
+
+ // This should overflow and fail.
+ CHECK(SmallOrderedNameDictionary::Add(isolate, dict, value, value, details)
+ .is_null());
+}
+
+TEST(SmallOrderedNameDictionaryFindEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ int entry = dict->FindEntry(isolate, *key1);
+ CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key2));
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+}
+
+TEST(SmallOrderedNameDictionaryValueAtAndValueAtPut) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ int entry = dict->FindEntry(isolate, *key1);
+ Handle<Object> found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ Handle<String> other_value = isolate->factory()->InternalizeUtf8String("baz");
+ dict->ValueAtPut(entry, *other_value);
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key2));
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+
+ entry = dict->FindEntry(isolate, *key2);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ dict->ValueAtPut(entry, *other_value);
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+}
+
+TEST(SmallOrderedNameDictionaryDetailsAtAndDetailsAtPut) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key1));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+
+ int entry = dict->FindEntry(isolate, *key1);
+ PropertyDetails found = dict->DetailsAt(entry);
+ CHECK_EQ(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+
+ PropertyDetails other =
+ PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
+ dict->DetailsAtPut(entry, other);
+
+ found = dict->DetailsAt(entry);
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+
+ Handle<Symbol> key2 = factory->NewSymbol();
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key2));
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(2, dict->NumberOfElements());
+ CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+
+ entry = dict->FindEntry(isolate, *key1);
+ found = dict->DetailsAt(entry);
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+
+ entry = dict->FindEntry(isolate, *key2);
+ dict->DetailsAtPut(entry, other);
+
+ found = dict->DetailsAt(entry);
+ CHECK_NE(PropertyDetails::Empty().AsSmi(), found.AsSmi());
+ CHECK_EQ(other.AsSmi(), found.AsSmi());
+}
+
+TEST(SmallOrderedNameDictionarySetAndMigrateHash) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ CHECK_EQ(PropertyArray::kNoHashSentinel, dict->Hash());
+ dict->SetHash(100);
+ CHECK_EQ(100, dict->Hash());
+
+ char buf[10];
+ for (int i = 0; i < SmallOrderedNameDictionary::kMaxCapacity; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ Handle<String> key = isolate->factory()->InternalizeUtf8String(buf);
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(100, dict->Hash());
+ }
+}
+
+TEST(OrderedNameDictionarySetAndMigrateHash) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ CHECK_EQ(PropertyArray::kNoHashSentinel, dict->Hash());
+ dict->SetHash(100);
+ CHECK_EQ(100, dict->Hash());
+
+ char buf[10];
+ for (int i = 0; i <= 1024; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ Handle<String> key = isolate->factory()->InternalizeUtf8String(buf);
+ dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(100, dict->Hash());
+ }
+}
+
+TEST(OrderedNameDictionaryHandlerInsertion) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+ Handle<HeapObject> table = OrderedNameDictionaryHandler::Allocate(isolate, 4);
+ CHECK(table->IsSmallOrderedNameDictionary());
+ Verify(isolate, table);
+
+ // Add a new key.
+ Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
+ Handle<String> key = isolate->factory()->InternalizeUtf8String("foo");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ table =
+ OrderedNameDictionaryHandler::Add(isolate, table, key, value, details);
+ DCHECK(key->IsUniqueName());
+ Verify(isolate, table);
+ CHECK(table->IsSmallOrderedNameDictionary());
+ CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+
+ char buf[10];
+ for (int i = 0; i < 1024; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ key = isolate->factory()->InternalizeUtf8String(buf);
+ table =
+ OrderedNameDictionaryHandler::Add(isolate, table, key, value, details);
+ DCHECK(key->IsUniqueName());
+ Verify(isolate, table);
+
+ for (int j = 0; j <= i; j++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", j));
+ Handle<Name> key_j = isolate->factory()->InternalizeUtf8String(buf);
+ CHECK_NE(
+ OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j));
+ }
+
+ for (int j = i + 1; j < 1024; j++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", j));
+ Handle<Name> key_j = isolate->factory()->InternalizeUtf8String(buf);
+ CHECK_EQ(
+ OrderedNameDictionaryHandler::kNotFound,
+ OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j));
+ }
+ }
+
+ CHECK(table->IsOrderedNameDictionary());
+}
+
+TEST(OrderedNameDictionarySetEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ Handle<String> key = factory->InternalizeUtf8String("foo");
+ Handle<String> value = factory->InternalizeUtf8String("bar");
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+
+ int entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ Handle<Object> found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ Handle<String> other_value = isolate->factory()->InternalizeUtf8String("baz");
+ PropertyDetails other_details =
+ PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
+ dict->SetEntry(isolate, entry, *key, *other_value, other_details);
+
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+ found = handle(dict->KeyAt(entry), isolate);
+ CHECK_EQ(*found, *key);
+ PropertyDetails found_details = dict->DetailsAt(entry);
+ CHECK_EQ(found_details.AsSmi(), other_details.AsSmi());
+}
+
+TEST(SmallOrderedNameDictionarySetEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key = factory->InternalizeUtf8String("foo");
+ Handle<String> value = factory->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ int entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ Handle<Object> found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *value);
+
+ // Change the value
+ Handle<String> other_value = factory->InternalizeUtf8String("baz");
+ PropertyDetails other_details =
+ PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
+ dict->SetEntry(isolate, entry, *key, *other_value, other_details);
+
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ found = handle(dict->ValueAt(entry), isolate);
+ CHECK_EQ(*found, *other_value);
+ found = handle(dict->KeyAt(entry), isolate);
+ CHECK_EQ(*found, *key);
+ PropertyDetails found_details = dict->DetailsAt(entry);
+ CHECK_EQ(found_details.AsSmi(), other_details.AsSmi());
+}
+
+TEST(OrderedNameDictionaryDeleteEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<OrderedNameDictionary> dict = factory->NewOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key = factory->InternalizeUtf8String("foo");
+ Handle<String> value = factory->InternalizeUtf8String("bar");
+ CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ int entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ dict = OrderedNameDictionary::DeleteEntry(isolate, dict, entry);
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(OrderedNameDictionary::kNotFound, entry);
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ char buf[10];
+ // Make sure we grow at least once.
+ CHECK_LT(OrderedNameDictionaryHandler::Capacity(*dict), 100);
+ for (int i = 0; i < 100; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ key = factory->InternalizeUtf8String(buf);
+ dict = OrderedNameDictionary::Add(isolate, dict, key, value, details);
+ DCHECK(key->IsUniqueName());
+ Verify(isolate, dict);
+ }
+
+ CHECK_EQ(100, dict->NumberOfElements());
+ // Initial dictionary has grown.
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ for (int i = 0; i < 100; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ key = factory->InternalizeUtf8String(buf);
+ entry = dict->FindEntry(isolate, *key);
+
+ dict = OrderedNameDictionary::DeleteEntry(isolate, dict, entry);
+ Verify(isolate, dict);
+
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(OrderedNameDictionary::kNotFound, entry);
+ }
+ CHECK_EQ(0, dict->NumberOfElements());
+ // Dictionary shrunk again.
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+}
+
+TEST(SmallOrderedNameDictionaryDeleteEntry) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedNameDictionary> dict =
+ factory->NewSmallOrderedNameDictionary();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+
+ Handle<String> key = factory->InternalizeUtf8String("foo");
+ Handle<String> value = factory->InternalizeUtf8String("bar");
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
+ dict->FindEntry(isolate, *key));
+ PropertyDetails details = PropertyDetails::Empty();
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
+ .ToHandleChecked();
+ Verify(isolate, dict);
+ CHECK_EQ(2, dict->NumberOfBuckets());
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ int entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(0, entry);
+ dict = SmallOrderedNameDictionary::DeleteEntry(isolate, dict, entry);
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound, entry);
+
+ char buf[10];
+ // Make sure we grow at least once.
+ CHECK_LT(dict->Capacity(), SmallOrderedNameDictionary::kMaxCapacity);
+
+ for (int i = 0; i < SmallOrderedNameDictionary::kMaxCapacity; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ key = factory->InternalizeUtf8String(buf);
+ dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
+ .ToHandleChecked();
+ DCHECK(key->IsUniqueName());
+ Verify(isolate, dict);
+ }
+
+ CHECK_EQ(SmallOrderedNameDictionary::kMaxCapacity, dict->NumberOfElements());
+ // Dictionary has grown.
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+
+ for (int i = 0; i < SmallOrderedNameDictionary::kMaxCapacity; i++) {
+ CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", i));
+ key = factory->InternalizeUtf8String(buf);
+
+ entry = dict->FindEntry(isolate, *key);
+ dict = SmallOrderedNameDictionary::DeleteEntry(isolate, dict, entry);
+ Verify(isolate, dict);
+
+ entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(SmallOrderedNameDictionary::kNotFound, entry);
+ }
+
+ CHECK_EQ(0, dict->NumberOfElements());
+ // Dictionary shrunk.
+ CHECK_EQ(0, dict->NumberOfDeletedElements());
+}
+
} // namespace test_orderedhashtable
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 942d597ccc..2634c30ec0 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -36,6 +36,7 @@
#include "src/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/base/enum-set.h"
#include "src/compiler.h"
#include "src/execution.h"
#include "src/flags.h"
@@ -49,8 +50,6 @@
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/token.h"
-#include "src/unicode-cache.h"
-#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/scope-test-helper.h"
@@ -71,11 +70,21 @@ void MockUseCounterCallback(v8::Isolate* isolate,
} // namespace
-TEST(IsContextualKeyword) {
+bool TokenIsAutoSemicolon(Token::Value token) {
+ switch (token) {
+ case Token::SEMICOLON:
+ case Token::EOS:
+ case Token::RBRACE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(AutoSemicolonToken) {
for (int i = 0; i < Token::NUM_TOKENS; i++) {
Token::Value token = static_cast<Token::Value>(i);
- CHECK_EQ(Token::TypeForTesting(token) == 'C',
- Token::IsContextualKeyword(token));
+ CHECK_EQ(TokenIsAutoSemicolon(token), Token::IsAutoSemicolon(token));
}
}
@@ -89,7 +98,6 @@ bool TokenIsAnyIdentifier(Token::Value token) {
case Token::STATIC:
case Token::FUTURE_STRICT_RESERVED_WORD:
case Token::ESCAPED_STRICT_RESERVED_WORD:
- case Token::ENUM:
return true;
default:
return false;
@@ -103,8 +111,32 @@ TEST(AnyIdentifierToken) {
}
}
-bool TokenIsIdentifier(Token::Value token, LanguageMode language_mode,
- bool is_generator, bool disallow_await) {
+bool TokenIsCallable(Token::Value token) {
+ switch (token) {
+ case Token::SUPER:
+ case Token::IDENTIFIER:
+ case Token::ASYNC:
+ case Token::AWAIT:
+ case Token::YIELD:
+ case Token::LET:
+ case Token::STATIC:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(CallableToken) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsCallable(token), Token::IsCallable(token));
+ }
+}
+
+bool TokenIsValidIdentifier(Token::Value token, LanguageMode language_mode,
+ bool is_generator, bool disallow_await) {
switch (token) {
case Token::IDENTIFIER:
case Token::ASYNC:
@@ -124,7 +156,7 @@ bool TokenIsIdentifier(Token::Value token, LanguageMode language_mode,
UNREACHABLE();
}
-TEST(IsIdentifierToken) {
+TEST(IsValidIdentifierToken) {
for (int i = 0; i < Token::NUM_TOKENS; i++) {
Token::Value token = static_cast<Token::Value>(i);
for (size_t raw_language_mode = 0; raw_language_mode < LanguageModeSize;
@@ -133,8 +165,9 @@ TEST(IsIdentifierToken) {
for (int is_generator = 0; is_generator < 2; is_generator++) {
for (int disallow_await = 0; disallow_await < 2; disallow_await++) {
CHECK_EQ(
- TokenIsIdentifier(token, mode, is_generator, disallow_await),
- Token::IsIdentifier(token, mode, is_generator, disallow_await));
+ TokenIsValidIdentifier(token, mode, is_generator, disallow_await),
+ Token::IsValidIdentifier(token, mode, is_generator,
+ disallow_await));
}
}
}
@@ -144,6 +177,7 @@ TEST(IsIdentifierToken) {
bool TokenIsStrictReservedWord(Token::Value token) {
switch (token) {
case Token::LET:
+ case Token::YIELD:
case Token::STATIC:
case Token::FUTURE_STRICT_RESERVED_WORD:
case Token::ESCAPED_STRICT_RESERVED_WORD:
@@ -184,6 +218,7 @@ TEST(IsLiteralToken) {
CHECK_EQ(TokenIsLiteral(token), Token::IsLiteral(token));
}
}
+
bool TokenIsAssignmentOp(Token::Value token) {
switch (token) {
case Token::INIT:
@@ -204,6 +239,18 @@ TEST(AssignmentOp) {
}
}
+bool TokenIsArrowOrAssignmentOp(Token::Value token) {
+ return token == Token::ARROW || TokenIsAssignmentOp(token);
+}
+
+TEST(ArrowOrAssignmentOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsArrowOrAssignmentOp(token),
+ Token::IsArrowOrAssignmentOp(token));
+ }
+}
+
bool TokenIsBinaryOp(Token::Value token) {
switch (token) {
case Token::COMMA:
@@ -331,63 +378,119 @@ TEST(IsUnaryOp) {
}
}
-bool TokenIsCountOp(Token::Value token) {
+bool TokenIsPropertyOrCall(Token::Value token) {
switch (token) {
- case Token::INC:
- case Token::DEC:
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL:
+ case Token::PERIOD:
+ case Token::LBRACK:
+ case Token::LPAREN:
return true;
default:
return false;
}
}
-TEST(IsCountOp) {
+TEST(IsPropertyOrCall) {
for (int i = 0; i < Token::NUM_TOKENS; i++) {
Token::Value token = static_cast<Token::Value>(i);
- CHECK_EQ(TokenIsCountOp(token), Token::IsCountOp(token));
+ CHECK_EQ(TokenIsPropertyOrCall(token), Token::IsPropertyOrCall(token));
}
}
-bool TokenIsShiftOp(Token::Value token) {
+bool TokenIsMember(Token::Value token) {
switch (token) {
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL:
+ case Token::PERIOD:
+ case Token::LBRACK:
return true;
default:
return false;
}
}
-TEST(IsShiftOp) {
+bool TokenIsTemplate(Token::Value token) {
+ switch (token) {
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool TokenIsProperty(Token::Value token) {
+ switch (token) {
+ case Token::PERIOD:
+ case Token::LBRACK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsMember) {
for (int i = 0; i < Token::NUM_TOKENS; i++) {
Token::Value token = static_cast<Token::Value>(i);
- CHECK_EQ(TokenIsShiftOp(token), Token::IsShiftOp(token));
+ CHECK_EQ(TokenIsMember(token), Token::IsMember(token));
}
}
-bool TokenIsTrivialExpressionToken(Token::Value token) {
+TEST(IsTemplate) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsTemplate(token), Token::IsTemplate(token));
+ }
+}
+
+TEST(IsProperty) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsProperty(token), Token::IsProperty(token));
+ }
+}
+
+bool TokenIsCountOp(Token::Value token) {
switch (token) {
- case Token::SMI:
- case Token::NUMBER:
- case Token::BIGINT:
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::STRING:
- case Token::IDENTIFIER:
- case Token::THIS:
+ case Token::INC:
+ case Token::DEC:
return true;
default:
return false;
}
}
-TEST(IsTrivialExpressionToken) {
+TEST(IsCountOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsCountOp(token), Token::IsCountOp(token));
+ }
+}
+
+TEST(IsUnaryOrCountOp) {
for (int i = 0; i < Token::NUM_TOKENS; i++) {
Token::Value token = static_cast<Token::Value>(i);
- CHECK_EQ(TokenIsTrivialExpressionToken(token),
- Token::IsTrivialExpressionToken(token));
+ CHECK_EQ(TokenIsUnaryOp(token) || TokenIsCountOp(token),
+ Token::IsUnaryOrCountOp(token));
+ }
+}
+
+bool TokenIsShiftOp(Token::Value token) {
+ switch (token) {
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+TEST(IsShiftOp) {
+ for (int i = 0; i < Token::NUM_TOKENS; i++) {
+ Token::Value token = static_cast<Token::Value>(i);
+ CHECK_EQ(TokenIsShiftOp(token), Token::IsShiftOp(token));
}
}
@@ -399,12 +502,11 @@ TEST(ScanKeywords) {
static const KeywordToken keywords[] = {
#define KEYWORD(t, s, d) { s, i::Token::t },
- TOKEN_LIST(IGNORE_TOKEN, KEYWORD, IGNORE_TOKEN)
+ TOKEN_LIST(IGNORE_TOKEN, KEYWORD)
#undef KEYWORD
{nullptr, i::Token::IDENTIFIER}};
KeywordToken key_token;
- i::UnicodeCache unicode_cache;
char buffer[32];
for (int i = 0; (key_token = keywords[i]).keyword != nullptr; i++) {
const char* keyword = key_token.keyword;
@@ -412,7 +514,7 @@ TEST(ScanKeywords) {
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
- i::Scanner scanner(&unicode_cache, stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -420,7 +522,7 @@ TEST(ScanKeywords) {
// Removing characters will make keyword matching fail.
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
- i::Scanner scanner(&unicode_cache, stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -431,7 +533,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
- i::Scanner scanner(&unicode_cache, stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -441,7 +543,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
- i::Scanner scanner(&unicode_cache, stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -508,7 +610,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
@@ -527,7 +629,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
@@ -541,18 +643,17 @@ TEST(ScanHTMLEndComments) {
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(pending_error_handler.has_pending_error());
+ CHECK(pending_error_handler.has_pending_error() ||
+ pending_error_handler.has_error_unidentifiable_by_preparser());
}
}
TEST(ScanHtmlComments) {
const char* src = "a <!-- b --> c";
- i::UnicodeCache unicode_cache;
-
// Disallow HTML comments.
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache, stream.get(), true);
+ i::Scanner scanner(stream.get(), true);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
@@ -561,7 +662,7 @@ TEST(ScanHtmlComments) {
// Skip HTML comments:
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache, stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -600,7 +701,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
@@ -633,7 +734,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(isolate->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
// Preparser defaults to disallowing natives syntax.
@@ -648,7 +749,8 @@ TEST(StandAlonePreParserNoNatives) {
isolate->logger());
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(pending_error_handler.has_pending_error());
+ CHECK(pending_error_handler.has_pending_error() ||
+ pending_error_handler.has_error_unidentifiable_by_preparser());
}
}
@@ -668,7 +770,7 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -682,7 +784,8 @@ TEST(RegressChromium62639) {
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(pending_error_handler.has_pending_error());
+ CHECK(pending_error_handler.has_pending_error() ||
+ pending_error_handler.has_error_unidentifiable_by_preparser());
}
@@ -701,7 +804,7 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::Scanner scanner(isolate->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -720,7 +823,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream, false);
+ i::Scanner scanner(stream, false);
scanner.Initialize();
int i = 0;
@@ -798,7 +901,7 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream.get(), false);
+ i::Scanner scanner(stream.get(), false);
scanner.Initialize();
i::Token::Value start = scanner.peek();
@@ -814,7 +917,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
ast_value_factory.Internalize(CcTest::i_isolate());
i::Handle<i::String> val = current_symbol->string();
i::DisallowHeapAllocation no_alloc;
- i::String::FlatContent content = val->GetFlatContent();
+ i::String::FlatContent content = val->GetFlatContent(no_alloc);
CHECK(content.IsOneByte());
i::Vector<const uint8_t> actual = content.ToOneByteVector();
for (int i = 0; i < actual.length(); i++) {
@@ -998,8 +1101,8 @@ TEST(ScopeUsesArgumentsSuperThis) {
if ((source_data[i].expected & THIS) != 0) {
// Currently the is_used() flag is conservative; all variables in a
// script scope are marked as used.
- CHECK(
- scope->Lookup(info.ast_value_factory()->this_string())->is_used());
+ CHECK(scope->LookupForTesting(info.ast_value_factory()->this_string())
+ ->is_used());
}
if (is_sloppy(scope->language_mode())) {
CHECK_EQ((source_data[i].expected & EVAL) != 0,
@@ -1385,7 +1488,6 @@ TEST(DiscardFunctionBody) {
i::parsing::ParseProgram(&info, isolate);
function = info.literal();
CHECK_NOT_NULL(function);
- CHECK_NOT_NULL(function->body());
CHECK_EQ(1, function->body()->length());
i::FunctionLiteral* inner =
function->body()->first()->AsExpressionStatement()->expression()->
@@ -1401,7 +1503,7 @@ TEST(DiscardFunctionBody) {
// TODO(conradw): This path won't be hit until the other test cases can be
// uncommented.
UNREACHABLE();
- CHECK_NOT_NULL(inner->body());
+ CHECK(inner->ShouldEagerCompile());
CHECK_GE(2, inner->body()->length());
i::Expression* exp = inner->body()->at(1)->AsExpressionStatement()->
expression()->AsBinaryOperation()->right();
@@ -1415,7 +1517,7 @@ TEST(DiscardFunctionBody) {
AsFunctionLiteral();
}
}
- CHECK_NULL(fun->body());
+ CHECK(!fun->ShouldEagerCompile());
}
}
@@ -1435,10 +1537,10 @@ enum ParserFlag {
kAllowNatives,
kAllowHarmonyPublicFields,
kAllowHarmonyPrivateFields,
+ kAllowHarmonyPrivateMethods,
kAllowHarmonyStaticFields,
kAllowHarmonyDynamicImport,
kAllowHarmonyImportMeta,
- kAllowHarmonyDoExpressions,
kAllowHarmonyNumericSeparator
};
@@ -1448,38 +1550,38 @@ enum ParserSyncTestResult {
kError
};
-void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
- i::FLAG_allow_natives_syntax = flags.Contains(kAllowNatives);
- i::FLAG_harmony_public_fields = flags.Contains(kAllowHarmonyPublicFields);
- i::FLAG_harmony_private_fields = flags.Contains(kAllowHarmonyPrivateFields);
- i::FLAG_harmony_static_fields = flags.Contains(kAllowHarmonyStaticFields);
- i::FLAG_harmony_dynamic_import = flags.Contains(kAllowHarmonyDynamicImport);
- i::FLAG_harmony_import_meta = flags.Contains(kAllowHarmonyImportMeta);
- i::FLAG_harmony_do_expressions = flags.Contains(kAllowHarmonyDoExpressions);
+void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
+ i::FLAG_allow_natives_syntax = flags.contains(kAllowNatives);
+ i::FLAG_harmony_public_fields = flags.contains(kAllowHarmonyPublicFields);
+ i::FLAG_harmony_private_fields = flags.contains(kAllowHarmonyPrivateFields);
+ i::FLAG_harmony_private_methods = flags.contains(kAllowHarmonyPrivateMethods);
+ i::FLAG_harmony_static_fields = flags.contains(kAllowHarmonyStaticFields);
+ i::FLAG_harmony_dynamic_import = flags.contains(kAllowHarmonyDynamicImport);
+ i::FLAG_harmony_import_meta = flags.contains(kAllowHarmonyImportMeta);
i::FLAG_harmony_numeric_separator =
- flags.Contains(kAllowHarmonyNumericSeparator);
+ flags.contains(kAllowHarmonyNumericSeparator);
}
-void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
- parser->set_allow_natives(flags.Contains(kAllowNatives));
+void SetParserFlags(i::PreParser* parser, base::EnumSet<ParserFlag> flags) {
+ parser->set_allow_natives(flags.contains(kAllowNatives));
parser->set_allow_harmony_public_fields(
- flags.Contains(kAllowHarmonyPublicFields));
+ flags.contains(kAllowHarmonyPublicFields));
parser->set_allow_harmony_private_fields(
- flags.Contains(kAllowHarmonyPrivateFields));
+ flags.contains(kAllowHarmonyPrivateFields));
+ parser->set_allow_harmony_private_methods(
+ flags.contains(kAllowHarmonyPrivateMethods));
parser->set_allow_harmony_static_fields(
- flags.Contains(kAllowHarmonyStaticFields));
+ flags.contains(kAllowHarmonyStaticFields));
parser->set_allow_harmony_dynamic_import(
- flags.Contains(kAllowHarmonyDynamicImport));
+ flags.contains(kAllowHarmonyDynamicImport));
parser->set_allow_harmony_import_meta(
- flags.Contains(kAllowHarmonyImportMeta));
- parser->set_allow_harmony_do_expressions(
- flags.Contains(kAllowHarmonyDoExpressions));
+ flags.contains(kAllowHarmonyImportMeta));
parser->set_allow_harmony_numeric_separator(
- flags.Contains(kAllowHarmonyNumericSeparator));
+ flags.contains(kAllowHarmonyNumericSeparator));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
- i::EnumSet<ParserFlag> flags,
+ base::EnumSet<ParserFlag> flags,
ParserSyncTestResult result,
bool is_module = false, bool test_preparser = true,
bool ignore_error_msg = false) {
@@ -1493,7 +1595,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
if (test_preparser) {
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(isolate, source));
- i::Scanner scanner(isolate->unicode_cache(), stream.get(), is_module);
+ i::Scanner scanner(stream.get(), is_module);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -1511,10 +1613,10 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Parse the data
i::FunctionLiteral* function;
{
+ SetGlobalFlags(flags);
i::Handle<i::Script> script = factory->NewScript(source);
i::ParseInfo info(isolate, script);
- info.set_allow_lazy_parsing(flags.Contains(kAllowLazy));
- SetGlobalFlags(flags);
+ info.set_allow_lazy_parsing(flags.contains(kAllowLazy));
if (is_module) info.set_module();
i::parsing::ParseProgram(&info, isolate);
function = info.literal();
@@ -1541,7 +1643,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
source->ToCString().get(), message_string->ToCString().get());
}
- if (test_preparser && !pending_error_handler.has_pending_error()) {
+ if (test_preparser && !pending_error_handler.has_pending_error() &&
+ !pending_error_handler.has_error_unidentifiable_by_preparser()) {
FATAL(
"Parser failed on:\n"
"\t%s\n"
@@ -1553,7 +1656,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Check that preparser and parser produce the same error, except for cases
// where we do not track errors in the preparser.
if (test_preparser && !ignore_error_msg &&
- !pending_error_handler.ErrorUnidentifiableByPreParser()) {
+ !pending_error_handler.has_error_unidentifiable_by_preparser()) {
i::Handle<i::String> preparser_message =
pending_error_handler.FormatErrorMessageForTest(CcTest::i_isolate());
if (!i::String::Equals(isolate, message_string, preparser_message)) {
@@ -1602,7 +1705,7 @@ void TestParserSync(const char* source, const ParserFlag* varying_flags,
->NewStringFromUtf8(Vector<const char>(source, strlen(source)))
.ToHandleChecked();
for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
- i::EnumSet<ParserFlag> flags;
+ base::EnumSet<ParserFlag> flags;
for (size_t flag_index = 0; flag_index < varying_flags_length;
++flag_index) {
if ((bits & (1 << flag_index)) != 0) flags.Add(varying_flags[flag_index]);
@@ -1778,6 +1881,7 @@ void RunParserSyncTest(
context_data[i][0],
statement_data[j],
context_data[i][1]);
+ PrintF("%s\n", program.start());
CHECK_EQ(length, kProgramSize);
TestParserSync(program.start(), flags, flags_len, result,
always_true_flags, always_true_len, always_false_flags,
@@ -2617,29 +2721,6 @@ TEST(OptionalCatchBinding) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
-TEST(OptionalCatchBindingInDoExpression) {
- // This is an edge case no otherwise hit: a catch scope in a parameter
- // expression which needs its own scope.
- // clang-format off
- const char* context_data[][2] = {
- {"((x = (eval(''), do {", "}))=>{})()"},
- { nullptr, nullptr }
- };
-
- const char* statement_data[] = {
- "try { } catch { }",
- "try { } catch { } finally { }",
- "try { let e; } catch { let e; }",
- "try { let e; } catch { let e; } finally { let e; }",
- nullptr
- };
- // clang-format on
-
- static const ParserFlag do_and_catch_flags[] = {kAllowHarmonyDoExpressions};
- RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
- do_and_catch_flags, arraysize(do_and_catch_flags));
-}
-
TEST(ErrorsRegexpLiteral) {
const char* context_data[][2] = {{"var r = ", ""}, {nullptr, nullptr}};
@@ -3153,7 +3234,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
- i::Context* context = f->context();
+ i::Context context = f->context();
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
const i::AstRawString* name = avf.GetOneByteString("result");
@@ -3169,7 +3250,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
CHECK_NOT_NULL(name);
// Get result from h's function context (that is f's context)
- i::Variable* var = s->Lookup(name);
+ i::Variable* var = s->LookupForTesting(name);
CHECK_NOT_NULL(var);
// Maybe assigned should survive deserialization
@@ -3203,7 +3284,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
- i::Context* context = f->context();
+ i::Context context = f->context();
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
const i::AstRawString* name_x = avf.GetOneByteString("x");
@@ -3217,7 +3298,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
CHECK(s != script_scope);
// Get result from f's function context (that is g's outer context)
- i::Variable* var_x = s->Lookup(name_x);
+ i::Variable* var_x = s->LookupForTesting(name_x);
CHECK_NOT_NULL(var_x);
CHECK_EQ(var_x->maybe_assigned(), i::kMaybeAssigned);
}
@@ -3389,17 +3470,13 @@ TEST(InnerAssignment) {
DCHECK(scope->is_function_scope());
const i::AstRawString* var_name =
info->ast_value_factory()->GetOneByteString("x");
- i::Variable* var = scope->Lookup(var_name);
+ i::Variable* var = scope->LookupForTesting(var_name);
bool expected = outers[i].assigned || inners[j].assigned;
CHECK_NOT_NULL(var);
CHECK(var->is_used() || !expected);
bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
- if (i::FLAG_lazy_inner_functions) {
- CHECK(is_maybe_assigned == expected ||
- (is_maybe_assigned && inners[j].allow_error_in_inner_function));
- } else {
- CHECK_EQ(is_maybe_assigned, expected);
- }
+ CHECK(is_maybe_assigned == expected ||
+ (is_maybe_assigned && inners[j].allow_error_in_inner_function));
}
}
}
@@ -3490,7 +3567,7 @@ TEST(MaybeAssignedParameters) {
CHECK(scope->is_function_scope());
const i::AstRawString* var_name =
info->ast_value_factory()->GetOneByteString("arg");
- i::Variable* var = scope->Lookup(var_name);
+ i::Variable* var = scope->LookupForTesting(var_name);
CHECK(var->is_used() || !assigned);
bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
CHECK_EQ(is_maybe_assigned, assigned);
@@ -3534,7 +3611,7 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
scope = i::ScopeTestHelper::FindScope(scope, input.location);
const i::AstRawString* var_name =
info->ast_value_factory()->GetOneByteString(variable);
- var = scope->Lookup(var_name);
+ var = scope->LookupForTesting(var_name);
}
CHECK_NOT_NULL(var);
@@ -3564,8 +3641,16 @@ TEST(MaybeAssignedInsideLoop) {
Input module_and_script_tests[] = {
{true, "for (j=x; j<10; ++j) { foo = j }", top},
{true, "for (j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (j=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for (j=x; j<10; ++j) { var foo = j }", top},
{true, "for (j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for (j=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for (j=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for (j=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for (j=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (j=x; j<10; ++j) { let foo; foo = j }", {0}},
+ {true, "for (j=x; j<10; ++j) { let foo; [foo] = [j] }", {0}},
+ {true, "for (j=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (j=x; j<10; ++j) { let foo = j }", {0}},
{false, "for (j=x; j<10; ++j) { let [foo] = [j] }", {0}},
{false, "for (j=x; j<10; ++j) { const foo = j }", {0}},
@@ -3574,8 +3659,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for ({j}=x; j<10; ++j) { foo = j }", top},
{true, "for ({j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for ({j}=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for ({j}=x; j<10; ++j) { var foo = j }", top},
{true, "for ({j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for ({j}=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for ({j}=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for ({j}=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for ({j}=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for ({j}=x; j<10; ++j) { let foo; foo = j }", {0}},
+ {true, "for ({j}=x; j<10; ++j) { let foo; [foo] = [j] }", {0}},
+ {true, "for ({j}=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for ({j}=x; j<10; ++j) { let foo = j }", {0}},
{false, "for ({j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
{false, "for ({j}=x; j<10; ++j) { const foo = j }", {0}},
@@ -3584,8 +3677,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (var j=x; j<10; ++j) { foo = j }", top},
{true, "for (var j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (var j=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for (var j=x; j<10; ++j) { var foo = j }", top},
{true, "for (var j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for (var j=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var j=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for (var j=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for (var j=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var j=x; j<10; ++j) { let foo; foo = j }", {0}},
+ {true, "for (var j=x; j<10; ++j) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var j=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (var j=x; j<10; ++j) { let foo = j }", {0}},
{false, "for (var j=x; j<10; ++j) { let [foo] = [j] }", {0}},
{false, "for (var j=x; j<10; ++j) { const foo = j }", {0}},
@@ -3594,8 +3695,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (var {j}=x; j<10; ++j) { foo = j }", top},
{true, "for (var {j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for (var {j}=x; j<10; ++j) { var foo = j }", top},
{true, "for (var {j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var {j}=x; j<10; ++j) { let foo; foo = j }", {0}},
+ {true, "for (var {j}=x; j<10; ++j) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var {j}=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (var {j}=x; j<10; ++j) { let foo = j }", {0}},
{false, "for (var {j}=x; j<10; ++j) { let [foo] = [j] }", {0}},
{false, "for (var {j}=x; j<10; ++j) { const foo = j }", {0}},
@@ -3604,112 +3713,202 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (let j=x; j<10; ++j) { foo = j }", top},
{true, "for (let j=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (let j=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for (let j=x; j<10; ++j) { var foo = j }", top},
{true, "for (let j=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for (let j=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let j=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for (let j=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for (let j=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let j=x; j<10; ++j) { let foo; foo = j }", {0, 0}},
+ {true, "for (let j=x; j<10; ++j) { let foo; [foo] = [j] }", {0, 0}},
+ {true, "for (let j=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }", {0, 0}},
{false, "for (let j=x; j<10; ++j) { let foo = j }", {0, 0}},
- {false, "for (let j=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j=x; j<10; ++j) { let [foo] = [j] }", {0, 0}},
{false, "for (let j=x; j<10; ++j) { const foo = j }", {0, 0}},
- {false, "for (let j=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j=x; j<10; ++j) { const [foo] = [j] }", {0, 0}},
{false,
"for (let j=x; j<10; ++j) { function foo() {return j} }",
{0, 0, 0}},
{true, "for (let {j}=x; j<10; ++j) { foo = j }", top},
{true, "for (let {j}=x; j<10; ++j) { [foo] = [j] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { [[foo]=[42]] = [] }", top},
{true, "for (let {j}=x; j<10; ++j) { var foo = j }", top},
{true, "for (let {j}=x; j<10; ++j) { var [foo] = [j] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var foo; foo = j }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var foo; [foo] = [j] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let {j}=x; j<10; ++j) { let foo; foo = j }", {0, 0}},
+ {true, "for (let {j}=x; j<10; ++j) { let foo; [foo] = [j] }", {0, 0}},
+ {true,
+ "for (let {j}=x; j<10; ++j) { let foo; [[foo]=[42]] = [] }",
+ {0, 0}},
{false, "for (let {j}=x; j<10; ++j) { let foo = j }", {0, 0}},
- {false, "for (let {j}=x; j<10; ++j) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j}=x; j<10; ++j) { let [foo] = [j] }", {0, 0}},
{false, "for (let {j}=x; j<10; ++j) { const foo = j }", {0, 0}},
- {false, "for (let {j}=x; j<10; ++j) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j}=x; j<10; ++j) { const [foo] = [j] }", {0, 0}},
{false,
"for (let {j}=x; j<10; ++j) { function foo(){return j} }",
{0, 0, 0}},
{true, "for (j of x) { foo = j }", top},
{true, "for (j of x) { [foo] = [j] }", top},
+ {true, "for (j of x) { [[foo]=[42]] = [] }", top},
{true, "for (j of x) { var foo = j }", top},
{true, "for (j of x) { var [foo] = [j] }", top},
- {false, "for (j of x) { let foo = j }", {1}},
- {false, "for (j of x) { let [foo] = [j] }", {1}},
- {false, "for (j of x) { const foo = j }", {1}},
- {false, "for (j of x) { const [foo] = [j] }", {1}},
- {false, "for (j of x) { function foo() {return j} }", {1}},
+ {true, "for (j of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (j of x) { var foo; foo = j }", top},
+ {true, "for (j of x) { var foo; [foo] = [j] }", top},
+ {true, "for (j of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (j of x) { let foo; foo = j }", {0}},
+ {true, "for (j of x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (j of x) { let foo; [[foo]=[42]] = [] }", {0}},
+ {false, "for (j of x) { let foo = j }", {0}},
+ {false, "for (j of x) { let [foo] = [j] }", {0}},
+ {false, "for (j of x) { const foo = j }", {0}},
+ {false, "for (j of x) { const [foo] = [j] }", {0}},
+ {false, "for (j of x) { function foo() {return j} }", {0}},
{true, "for ({j} of x) { foo = j }", top},
{true, "for ({j} of x) { [foo] = [j] }", top},
+ {true, "for ({j} of x) { [[foo]=[42]] = [] }", top},
{true, "for ({j} of x) { var foo = j }", top},
{true, "for ({j} of x) { var [foo] = [j] }", top},
- {false, "for ({j} of x) { let foo = j }", {1}},
- {false, "for ({j} of x) { let [foo] = [j] }", {1}},
- {false, "for ({j} of x) { const foo = j }", {1}},
- {false, "for ({j} of x) { const [foo] = [j] }", {1}},
- {false, "for ({j} of x) { function foo() {return j} }", {1}},
+ {true, "for ({j} of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for ({j} of x) { var foo; foo = j }", top},
+ {true, "for ({j} of x) { var foo; [foo] = [j] }", top},
+ {true, "for ({j} of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for ({j} of x) { let foo; foo = j }", {0}},
+ {true, "for ({j} of x) { let foo; [foo] = [j] }", {0}},
+ {true, "for ({j} of x) { let foo; [[foo]=[42]] = [] }", {0}},
+ {false, "for ({j} of x) { let foo = j }", {0}},
+ {false, "for ({j} of x) { let [foo] = [j] }", {0}},
+ {false, "for ({j} of x) { const foo = j }", {0}},
+ {false, "for ({j} of x) { const [foo] = [j] }", {0}},
+ {false, "for ({j} of x) { function foo() {return j} }", {0}},
{true, "for (var j of x) { foo = j }", top},
{true, "for (var j of x) { [foo] = [j] }", top},
+ {true, "for (var j of x) { [[foo]=[42]] = [] }", top},
{true, "for (var j of x) { var foo = j }", top},
{true, "for (var j of x) { var [foo] = [j] }", top},
- {false, "for (var j of x) { let foo = j }", {1}},
- {false, "for (var j of x) { let [foo] = [j] }", {1}},
- {false, "for (var j of x) { const foo = j }", {1}},
- {false, "for (var j of x) { const [foo] = [j] }", {1}},
- {false, "for (var j of x) { function foo() {return j} }", {1}},
+ {true, "for (var j of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var j of x) { var foo; foo = j }", top},
+ {true, "for (var j of x) { var foo; [foo] = [j] }", top},
+ {true, "for (var j of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var j of x) { let foo; foo = j }", {0}},
+ {true, "for (var j of x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var j of x) { let foo; [[foo]=[42]] = [] }", {0}},
+ {false, "for (var j of x) { let foo = j }", {0}},
+ {false, "for (var j of x) { let [foo] = [j] }", {0}},
+ {false, "for (var j of x) { const foo = j }", {0}},
+ {false, "for (var j of x) { const [foo] = [j] }", {0}},
+ {false, "for (var j of x) { function foo() {return j} }", {0}},
{true, "for (var {j} of x) { foo = j }", top},
{true, "for (var {j} of x) { [foo] = [j] }", top},
+ {true, "for (var {j} of x) { [[foo]=[42]] = [] }", top},
{true, "for (var {j} of x) { var foo = j }", top},
{true, "for (var {j} of x) { var [foo] = [j] }", top},
- {false, "for (var {j} of x) { let foo = j }", {1}},
- {false, "for (var {j} of x) { let [foo] = [j] }", {1}},
- {false, "for (var {j} of x) { const foo = j }", {1}},
- {false, "for (var {j} of x) { const [foo] = [j] }", {1}},
- {false, "for (var {j} of x) { function foo() {return j} }", {1}},
+ {true, "for (var {j} of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var {j} of x) { var foo; foo = j }", top},
+ {true, "for (var {j} of x) { var foo; [foo] = [j] }", top},
+ {true, "for (var {j} of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var {j} of x) { let foo; foo = j }", {0}},
+ {true, "for (var {j} of x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var {j} of x) { let foo; [[foo]=[42]] = [] }", {0}},
+ {false, "for (var {j} of x) { let foo = j }", {0}},
+ {false, "for (var {j} of x) { let [foo] = [j] }", {0}},
+ {false, "for (var {j} of x) { const foo = j }", {0}},
+ {false, "for (var {j} of x) { const [foo] = [j] }", {0}},
+ {false, "for (var {j} of x) { function foo() {return j} }", {0}},
{true, "for (let j of x) { foo = j }", top},
{true, "for (let j of x) { [foo] = [j] }", top},
+ {true, "for (let j of x) { [[foo]=[42]] = [] }", top},
{true, "for (let j of x) { var foo = j }", top},
{true, "for (let j of x) { var [foo] = [j] }", top},
- {false, "for (let j of x) { let foo = j }", {0, 1, 0}},
- {false, "for (let j of x) { let [foo] = [j] }", {0, 1, 0}},
- {false, "for (let j of x) { const foo = j }", {0, 1, 0}},
- {false, "for (let j of x) { const [foo] = [j] }", {0, 1, 0}},
- {false, "for (let j of x) { function foo() {return j} }", {0, 1, 0}},
+ {true, "for (let j of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let j of x) { var foo; foo = j }", top},
+ {true, "for (let j of x) { var foo; [foo] = [j] }", top},
+ {true, "for (let j of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let j of x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (let j of x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (let j of x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
+ {false, "for (let j of x) { let foo = j }", {0, 0, 0}},
+ {false, "for (let j of x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j of x) { const foo = j }", {0, 0, 0}},
+ {false, "for (let j of x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let j of x) { function foo() {return j} }", {0, 0, 0}},
{true, "for (let {j} of x) { foo = j }", top},
{true, "for (let {j} of x) { [foo] = [j] }", top},
+ {true, "for (let {j} of x) { [[foo]=[42]] = [] }", top},
{true, "for (let {j} of x) { var foo = j }", top},
{true, "for (let {j} of x) { var [foo] = [j] }", top},
- {false, "for (let {j} of x) { let foo = j }", {0, 1, 0}},
- {false, "for (let {j} of x) { let [foo] = [j] }", {0, 1, 0}},
- {false, "for (let {j} of x) { const foo = j }", {0, 1, 0}},
- {false, "for (let {j} of x) { const [foo] = [j] }", {0, 1, 0}},
- {false, "for (let {j} of x) { function foo() {return j} }", {0, 1, 0}},
+ {true, "for (let {j} of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let {j} of x) { var foo; foo = j }", top},
+ {true, "for (let {j} of x) { var foo; [foo] = [j] }", top},
+ {true, "for (let {j} of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let {j} of x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (let {j} of x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (let {j} of x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
+ {false, "for (let {j} of x) { let foo = j }", {0, 0, 0}},
+ {false, "for (let {j} of x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j} of x) { const foo = j }", {0, 0, 0}},
+ {false, "for (let {j} of x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (let {j} of x) { function foo() {return j} }", {0, 0, 0}},
{true, "for (const j of x) { foo = j }", top},
{true, "for (const j of x) { [foo] = [j] }", top},
+ {true, "for (const j of x) { [[foo]=[42]] = [] }", top},
{true, "for (const j of x) { var foo = j }", top},
{true, "for (const j of x) { var [foo] = [j] }", top},
- {false, "for (const j of x) { let foo = j }", {0, 1, 0}},
- {false, "for (const j of x) { let [foo] = [j] }", {0, 1, 0}},
- {false, "for (const j of x) { const foo = j }", {0, 1, 0}},
- {false, "for (const j of x) { const [foo] = [j] }", {0, 1, 0}},
- {false, "for (const j of x) { function foo() {return j} }", {0, 1, 0}},
+ {true, "for (const j of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (const j of x) { var foo; foo = j }", top},
+ {true, "for (const j of x) { var foo; [foo] = [j] }", top},
+ {true, "for (const j of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (const j of x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (const j of x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (const j of x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
+ {false, "for (const j of x) { let foo = j }", {0, 0, 0}},
+ {false, "for (const j of x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const j of x) { const foo = j }", {0, 0, 0}},
+ {false, "for (const j of x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const j of x) { function foo() {return j} }", {0, 0, 0}},
{true, "for (const {j} of x) { foo = j }", top},
{true, "for (const {j} of x) { [foo] = [j] }", top},
+ {true, "for (const {j} of x) { [[foo]=[42]] = [] }", top},
{true, "for (const {j} of x) { var foo = j }", top},
{true, "for (const {j} of x) { var [foo] = [j] }", top},
- {false, "for (const {j} of x) { let foo = j }", {0, 1, 0}},
- {false, "for (const {j} of x) { let [foo] = [j] }", {0, 1, 0}},
- {false, "for (const {j} of x) { const foo = j }", {0, 1, 0}},
- {false, "for (const {j} of x) { const [foo] = [j] }", {0, 1, 0}},
- {false, "for (const {j} of x) { function foo() {return j} }", {0, 1, 0}},
+ {true, "for (const {j} of x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (const {j} of x) { var foo; foo = j }", top},
+ {true, "for (const {j} of x) { var foo; [foo] = [j] }", top},
+ {true, "for (const {j} of x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (const {j} of x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (const {j} of x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (const {j} of x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
+ {false, "for (const {j} of x) { let foo = j }", {0, 0, 0}},
+ {false, "for (const {j} of x) { let [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const {j} of x) { const foo = j }", {0, 0, 0}},
+ {false, "for (const {j} of x) { const [foo] = [j] }", {0, 0, 0}},
+ {false, "for (const {j} of x) { function foo() {return j} }", {0, 0, 0}},
{true, "for (j in x) { foo = j }", top},
{true, "for (j in x) { [foo] = [j] }", top},
+ {true, "for (j in x) { [[foo]=[42]] = [] }", top},
{true, "for (j in x) { var foo = j }", top},
{true, "for (j in x) { var [foo] = [j] }", top},
+ {true, "for (j in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (j in x) { var foo; foo = j }", top},
+ {true, "for (j in x) { var foo; [foo] = [j] }", top},
+ {true, "for (j in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (j in x) { let foo; foo = j }", {0}},
+ {true, "for (j in x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (j in x) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (j in x) { let foo = j }", {0}},
{false, "for (j in x) { let [foo] = [j] }", {0}},
{false, "for (j in x) { const foo = j }", {0}},
@@ -3718,8 +3917,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for ({j} in x) { foo = j }", top},
{true, "for ({j} in x) { [foo] = [j] }", top},
+ {true, "for ({j} in x) { [[foo]=[42]] = [] }", top},
{true, "for ({j} in x) { var foo = j }", top},
{true, "for ({j} in x) { var [foo] = [j] }", top},
+ {true, "for ({j} in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for ({j} in x) { var foo; foo = j }", top},
+ {true, "for ({j} in x) { var foo; [foo] = [j] }", top},
+ {true, "for ({j} in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for ({j} in x) { let foo; foo = j }", {0}},
+ {true, "for ({j} in x) { let foo; [foo] = [j] }", {0}},
+ {true, "for ({j} in x) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for ({j} in x) { let foo = j }", {0}},
{false, "for ({j} in x) { let [foo] = [j] }", {0}},
{false, "for ({j} in x) { const foo = j }", {0}},
@@ -3728,8 +3935,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (var j in x) { foo = j }", top},
{true, "for (var j in x) { [foo] = [j] }", top},
+ {true, "for (var j in x) { [[foo]=[42]] = [] }", top},
{true, "for (var j in x) { var foo = j }", top},
{true, "for (var j in x) { var [foo] = [j] }", top},
+ {true, "for (var j in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var j in x) { var foo; foo = j }", top},
+ {true, "for (var j in x) { var foo; [foo] = [j] }", top},
+ {true, "for (var j in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var j in x) { let foo; foo = j }", {0}},
+ {true, "for (var j in x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var j in x) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (var j in x) { let foo = j }", {0}},
{false, "for (var j in x) { let [foo] = [j] }", {0}},
{false, "for (var j in x) { const foo = j }", {0}},
@@ -3738,8 +3953,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (var {j} in x) { foo = j }", top},
{true, "for (var {j} in x) { [foo] = [j] }", top},
+ {true, "for (var {j} in x) { [[foo]=[42]] = [] }", top},
{true, "for (var {j} in x) { var foo = j }", top},
{true, "for (var {j} in x) { var [foo] = [j] }", top},
+ {true, "for (var {j} in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (var {j} in x) { var foo; foo = j }", top},
+ {true, "for (var {j} in x) { var foo; [foo] = [j] }", top},
+ {true, "for (var {j} in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (var {j} in x) { let foo; foo = j }", {0}},
+ {true, "for (var {j} in x) { let foo; [foo] = [j] }", {0}},
+ {true, "for (var {j} in x) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "for (var {j} in x) { let foo = j }", {0}},
{false, "for (var {j} in x) { let [foo] = [j] }", {0}},
{false, "for (var {j} in x) { const foo = j }", {0}},
@@ -3748,8 +3971,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (let j in x) { foo = j }", top},
{true, "for (let j in x) { [foo] = [j] }", top},
+ {true, "for (let j in x) { [[foo]=[42]] = [] }", top},
{true, "for (let j in x) { var foo = j }", top},
{true, "for (let j in x) { var [foo] = [j] }", top},
+ {true, "for (let j in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let j in x) { var foo; foo = j }", top},
+ {true, "for (let j in x) { var foo; [foo] = [j] }", top},
+ {true, "for (let j in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let j in x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (let j in x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (let j in x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
{false, "for (let j in x) { let foo = j }", {0, 0, 0}},
{false, "for (let j in x) { let [foo] = [j] }", {0, 0, 0}},
{false, "for (let j in x) { const foo = j }", {0, 0, 0}},
@@ -3758,8 +3989,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (let {j} in x) { foo = j }", top},
{true, "for (let {j} in x) { [foo] = [j] }", top},
+ {true, "for (let {j} in x) { [[foo]=[42]] = [] }", top},
{true, "for (let {j} in x) { var foo = j }", top},
{true, "for (let {j} in x) { var [foo] = [j] }", top},
+ {true, "for (let {j} in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (let {j} in x) { var foo; foo = j }", top},
+ {true, "for (let {j} in x) { var foo; [foo] = [j] }", top},
+ {true, "for (let {j} in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (let {j} in x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (let {j} in x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (let {j} in x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
{false, "for (let {j} in x) { let foo = j }", {0, 0, 0}},
{false, "for (let {j} in x) { let [foo] = [j] }", {0, 0, 0}},
{false, "for (let {j} in x) { const foo = j }", {0, 0, 0}},
@@ -3768,8 +4007,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (const j in x) { foo = j }", top},
{true, "for (const j in x) { [foo] = [j] }", top},
+ {true, "for (const j in x) { [[foo]=[42]] = [] }", top},
{true, "for (const j in x) { var foo = j }", top},
{true, "for (const j in x) { var [foo] = [j] }", top},
+ {true, "for (const j in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (const j in x) { var foo; foo = j }", top},
+ {true, "for (const j in x) { var foo; [foo] = [j] }", top},
+ {true, "for (const j in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (const j in x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (const j in x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (const j in x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
{false, "for (const j in x) { let foo = j }", {0, 0, 0}},
{false, "for (const j in x) { let [foo] = [j] }", {0, 0, 0}},
{false, "for (const j in x) { const foo = j }", {0, 0, 0}},
@@ -3778,8 +4025,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "for (const {j} in x) { foo = j }", top},
{true, "for (const {j} in x) { [foo] = [j] }", top},
+ {true, "for (const {j} in x) { [[foo]=[42]] = [] }", top},
{true, "for (const {j} in x) { var foo = j }", top},
{true, "for (const {j} in x) { var [foo] = [j] }", top},
+ {true, "for (const {j} in x) { var [[foo]=[42]] = [] }", top},
+ {true, "for (const {j} in x) { var foo; foo = j }", top},
+ {true, "for (const {j} in x) { var foo; [foo] = [j] }", top},
+ {true, "for (const {j} in x) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "for (const {j} in x) { let foo; foo = j }", {0, 0, 0}},
+ {true, "for (const {j} in x) { let foo; [foo] = [j] }", {0, 0, 0}},
+ {true, "for (const {j} in x) { let foo; [[foo]=[42]] = [] }", {0, 0, 0}},
{false, "for (const {j} in x) { let foo = j }", {0, 0, 0}},
{false, "for (const {j} in x) { let [foo] = [j] }", {0, 0, 0}},
{false, "for (const {j} in x) { const foo = j }", {0, 0, 0}},
@@ -3788,8 +4043,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "while (j) { foo = j }", top},
{true, "while (j) { [foo] = [j] }", top},
+ {true, "while (j) { [[foo]=[42]] = [] }", top},
{true, "while (j) { var foo = j }", top},
{true, "while (j) { var [foo] = [j] }", top},
+ {true, "while (j) { var [[foo]=[42]] = [] }", top},
+ {true, "while (j) { var foo; foo = j }", top},
+ {true, "while (j) { var foo; [foo] = [j] }", top},
+ {true, "while (j) { var foo; [[foo]=[42]] = [] }", top},
+ {true, "while (j) { let foo; foo = j }", {0}},
+ {true, "while (j) { let foo; [foo] = [j] }", {0}},
+ {true, "while (j) { let foo; [[foo]=[42]] = [] }", {0}},
{false, "while (j) { let foo = j }", {0}},
{false, "while (j) { let [foo] = [j] }", {0}},
{false, "while (j) { const foo = j }", {0}},
@@ -3798,8 +4061,16 @@ TEST(MaybeAssignedInsideLoop) {
{true, "do { foo = j } while (j)", top},
{true, "do { [foo] = [j] } while (j)", top},
+ {true, "do { [[foo]=[42]] = [] } while (j)", top},
{true, "do { var foo = j } while (j)", top},
{true, "do { var [foo] = [j] } while (j)", top},
+ {true, "do { var [[foo]=[42]] = [] } while (j)", top},
+ {true, "do { var foo; foo = j } while (j)", top},
+ {true, "do { var foo; [foo] = [j] } while (j)", top},
+ {true, "do { var foo; [[foo]=[42]] = [] } while (j)", top},
+ {true, "do { let foo; foo = j } while (j)", {0}},
+ {true, "do { let foo; [foo] = [j] } while (j)", {0}},
+ {true, "do { let foo; [[foo]=[42]] = [] } while (j)", {0}},
{false, "do { let foo = j } while (j)", {0}},
{false, "do { let [foo] = [j] } while (j)", {0}},
{false, "do { const foo = j } while (j)", {0}},
@@ -3965,7 +4236,7 @@ TEST(AsmModuleFlag) {
// The asm.js module should be marked as such.
i::Scope* s = DeserializeFunctionScope(isolate, &zone, m, "f");
- CHECK(s->IsAsmModule() && s->AsDeclarationScope()->asm_module());
+ CHECK(s->IsAsmModule() && s->AsDeclarationScope()->is_asm_module());
}
@@ -4979,6 +5250,8 @@ TEST(StaticClassFieldsNoErrors) {
"static 'a' = 0;",
"static 'a';",
+ "static c = [c] = c",
+
// ASI
"static a = 0\n",
"static a = 0\n b",
@@ -5069,6 +5342,8 @@ TEST(ClassFieldsNoErrors) {
"'a' = 0;",
"'a';",
+ "c = [c] = c",
+
// ASI
"a = 0\n",
"a = 0\n b",
@@ -5122,6 +5397,254 @@ TEST(ClassFieldsNoErrors) {
static_flags, arraysize(static_flags));
}
+TEST(PrivateMethodsNoErrors) {
+ // clang-format off
+ // Tests proposed class methods syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "#a() { }",
+ "get #a() { }",
+ "set #a(foo) { }",
+ "*#a() { }",
+ "async #a() { }",
+ "async *#a() { }",
+
+ "#a() { } #b() {}",
+ "get #a() { } set #a(foo) {}",
+ "get #a() { } get #b() {} set #a(foo) {}",
+ "get #a() { } get #b() {} set #a(foo) {} set #b(foo) {}",
+ "set #a(foo) { } set #b(foo) {}",
+ "get #a() { } get #b() {}",
+
+ "#a() { } static a() {}",
+ "#a() { } a() {}",
+ "#a() { } a() {} static a() {}",
+ "get #a() { } get a() {} static get a() {}",
+ "set #a(foo) { } set a(foo) {} static set a(foo) {}",
+
+ "#a() { } get #b() {}",
+ "#a() { } async #b() {}",
+ "#a() { } async *#b() {}",
+
+ // With arguments
+ "#a(...args) { }",
+ "#a(a = 1) { }",
+ "get #a() { }",
+ "set #a(a = (...args) => {}) { }",
+
+ // Misc edge cases
+ "#get() {}",
+ "#set() {}",
+ "#yield() {}",
+ "#await() {}",
+ "#async() {}",
+ "#static() {}",
+ "#arguments() {}",
+ "get #yield() {}",
+ "get #await() {}",
+ "get #async() {}",
+ "get #get() {}",
+ "get #static() {}",
+ "get #arguments() {}",
+ "set #yield(test) {}",
+ "set #async(test) {}",
+ "set #await(test) {}",
+ "set #set(test) {}",
+ "set #static(test) {}",
+ "set #arguments(test) {}",
+ "async #yield() {}",
+ "async #async() {}",
+ "async #await() {}",
+ "async #get() {}",
+ "async #set() {}",
+ "async #static() {}",
+ "async #arguments() {}",
+ "*#async() {}",
+ "*#await() {}",
+ "*#yield() {}",
+ "*#get() {}",
+ "*#set() {}",
+ "*#static() {}",
+ "*#arguments() {}",
+ "async *#yield() {}",
+ "async *#async() {}",
+ "async *#await() {}",
+ "async *#get() {}",
+ "async *#set() {}",
+ "async *#static() {}",
+ "async *#arguments() {}",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
+ kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ private_methods, arraysize(private_methods));
+}
+
+TEST(PrivateMethodsAndFieldsNoErrors) {
+ // clang-format off
+ // Tests proposed class methods syntax in combination with fields
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "#b;#a() { }",
+ "#b;get #a() { }",
+ "#b;set #a(foo) { }",
+ "#b;*#a() { }",
+ "#b;async #a() { }",
+ "#b;async *#a() { }",
+ "#b = 1;#a() { }",
+ "#b = 1;get #a() { }",
+ "#b = 1;set #a(foo) { }",
+ "#b = 1;*#a() { }",
+ "#b = 1;async #a() { }",
+ "#b = 1;async *#a() { }",
+
+ // With public fields
+ "a;#a() { }",
+ "a;get #a() { }",
+ "a;set #a(foo) { }",
+ "a;*#a() { }",
+ "a;async #a() { }",
+ "a;async *#a() { }",
+ "a = 1;#a() { }",
+ "a = 1;get #a() { }",
+ "a = 1;set #a(foo) { }",
+ "a = 1;*#a() { }",
+ "a = 1;async #a() { }",
+ "a = 1;async *#a() { }",
+
+ // ASI
+ "#a = 0\n #b(){}",
+ "#a\n *#b(){}",
+ "#a = 0\n get #b(){}",
+ "#a\n *#b(){}",
+
+ "b = 0\n #b(){}",
+ "b\n *#b(){}",
+ "b = 0\n get #b(){}",
+ "b\n *#b(){}",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods_and_fields[] = {
+ kAllowHarmonyPrivateFields, kAllowHarmonyPublicFields,
+ kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ private_methods_and_fields,
+ arraysize(private_methods_and_fields));
+}
+
+TEST(PrivateMethodsErrors) {
+ // clang-format off
+ // Tests proposed class methods syntax in combination with fields
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "#a() : 0",
+ "#a() =",
+ "#a() => {}",
+ "#a => {}",
+ "*#a() = 0",
+ "*#a() => 0",
+ "*#a() => {}",
+ "get #a()[]",
+ "yield #a()[]",
+ "yield #a => {}",
+ "async #a() = 0",
+ "async #a => {}",
+ "#a(arguments) {}",
+ "set #a(arguments) {}",
+
+ "#['a']() { }",
+ "get #['a']() { }",
+ "set #['a'](foo) { }",
+ "*#['a']() { }",
+ "async #['a']() { }",
+ "async *#['a]() { }",
+
+ // TODO(joyee): check duplicate accessors
+
+ "#a\n#",
+ "#a() c",
+ "#a() #",
+ "#a(arg) c",
+ "#a(arg) #",
+ "#a(arg) #c",
+ "#a#",
+ "#a#b",
+ "#a#b(){}",
+ "#[test](){}",
+
+ "async *#constructor() {}",
+ "*#constructor() {}",
+ "async #constructor() {}",
+ "set #constructor(test) {}",
+ "#constructor() {}",
+ "get #constructor() {}",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
+ kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ private_methods, arraysize(private_methods));
+}
+
+// Test that private members do not parse outside class bodies
+TEST(PrivateMembersInNonClassNoErrors) {
+ // clang-format off
+ const char* context_data[][2] = {{"", ""},
+ {"({", "})"},
+ {"'use strict'; ({", "});"},
+ {"function() {", "}"},
+ {"() => {", "}"},
+ {"class C { test() {", "} }"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ "#a = 1",
+ "#a = () => {}",
+ "#a",
+ "#a() { }",
+ "get #a() { }",
+ "set #a(foo) { }",
+ "*#a() { }",
+ "async #a() { }",
+ "async *#a() { }",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
+ kAllowHarmonyPrivateMethods};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ private_methods, arraysize(private_methods));
+}
+
TEST(PrivateClassFieldsNoErrors) {
// clang-format off
// Tests proposed class fields syntax.
@@ -5222,6 +5745,8 @@ TEST(StaticClassFieldsErrors) {
"static a b",
"static a = 0 b",
+ "static c = [1] = [c]",
+
// ASI requires that the next token is not part of any legal production
"static a = 0\n *b(){}",
"static a = 0\n ['b'](){}",
@@ -5270,6 +5795,8 @@ TEST(ClassFieldsErrors) {
"a b",
"a = 0 b",
+ "c = [1] = [c]",
+
// ASI requires that the next token is not part of any legal production
"a = 0\n *b(){}",
"a = 0\n ['b'](){}",
@@ -5306,6 +5833,10 @@ TEST(PrivateClassFieldsErrors) {
"#async a = 0",
"#async a",
+ "#a; #a",
+ "#a = 1; #a",
+ "#a; #a = 1;",
+
"#constructor",
"#constructor = function() {}",
@@ -5365,7 +5896,7 @@ TEST(PrivateClassFieldsErrors) {
private_fields, arraysize(private_fields));
}
-TEST(PrivateStaticClassFieldsErrors) {
+TEST(PrivateStaticClassFieldsNoErrors) {
// clang-format off
// Tests proposed class fields syntax.
const char* context_data[][2] = {{"(class {", "});"},
@@ -5386,6 +5917,71 @@ TEST(PrivateStaticClassFieldsErrors) {
"static #a; b(){}",
"static #a; *b(){}",
"static #a; ['b'](){}",
+
+ "#prototype",
+ "#prototype = function() {}",
+
+ // ASI
+ "static #a = 0\n",
+ "static #a = 0\n b",
+ "static #a = 0\n #b",
+ "static #a = 0\n b(){}",
+ "static #a\n",
+ "static #a\n b\n",
+ "static #a\n #b\n",
+ "static #a\n b(){}",
+ "static #a\n *b(){}",
+ "static #a\n ['b'](){}",
+
+ "static #a = function t() { arguments; }",
+ "static #a = () => function t() { arguments; }",
+
+ // ASI edge cases
+ "static #a\n get",
+ "static #get\n *a(){}",
+ "static #a\n static",
+
+ // Misc edge cases
+ "static #yield",
+ "static #yield = 0",
+ "static #yield\n a",
+ "static #async;",
+ "static #async = 0;",
+ "static #async",
+ "static #async = 0",
+ "static #async\n a(){}", // a field named async, and a method named a.
+ "static #async\n a",
+ "static #await;",
+ "static #await = 0;",
+ "static #await\n a",
+ nullptr
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, class_body_data, kError);
+
+ static const ParserFlag public_static_fields[] = {kAllowHarmonyPublicFields,
+ kAllowHarmonyStaticFields};
+ RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
+ public_static_fields, arraysize(public_static_fields));
+
+ static const ParserFlag private_static_fields[] = {
+ kAllowHarmonyPublicFields, kAllowHarmonyStaticFields,
+ kAllowHarmonyPrivateFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
+ private_static_fields, arraysize(private_static_fields));
+}
+
+TEST(PrivateStaticClassFieldsErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {nullptr, nullptr}};
+ const char* class_body_data[] = {
+ // Basic syntax
"static #['a'] = 0;",
"static #['a'] = 0; b",
"static #['a'] = 0; #b",
@@ -5405,28 +6001,31 @@ TEST(PrivateStaticClassFieldsErrors) {
"static #'a';",
"static # a = 0",
+ "static #get a() { }",
+ "static #set a() { }",
+ "static #*a() { }",
+ "static async #*a() { }",
+
+ "#a = arguments",
+ "#a = () => arguments",
+ "#a = () => { arguments }",
+ "#a = arguments[0]",
+ "#a = delete arguments[0]",
+ "#a = f(arguments)",
+ "#a = () => () => arguments",
+
+ "#a; static #a",
+ "static #a; #a",
+
+ // TODO(joyee): support static private methods
"static #a() { }",
"static get #a() { }",
- "static #get a() { }",
"static set #a() { }",
- "static #set a() { }",
"static *#a() { }",
- "static #*a() { }",
"static async #a() { }",
"static async *#a() { }",
- "static async #*a() { }",
// ASI
- "static #a = 0\n",
- "static #a = 0\n b",
- "static #a = 0\n #b",
- "static #a = 0\n b(){}",
- "static #a\n",
- "static #a\n b\n",
- "static #a\n #b\n",
- "static #a\n b(){}",
- "static #a\n *b(){}",
- "static #a\n ['b'](){}",
"static #['a'] = 0\n",
"static #['a'] = 0\n b",
"static #['a'] = 0\n #b",
@@ -5438,27 +6037,34 @@ TEST(PrivateStaticClassFieldsErrors) {
"static #['a']\n *b(){}",
"static #['a']\n ['b'](){}",
- "static #a = function t() { arguments; }",
- "static #a = () => function t() { arguments; }",
+ // ASI requires a linebreak
+ "static #a b",
+ "static #a = 0 b",
- // ASI edge cases
- "static #a\n get",
- "static #get\n *a(){}",
- "static #a\n static",
+ // ASI requires that the next token is not part of any legal production
+ "static #a = 0\n *b(){}",
+ "static #a = 0\n ['b'](){}",
+
+ "static #a : 0",
+ "static #a =",
+ "static #*a = 0",
+ "static #*a",
+ "static #get a",
+ "static #yield a",
+ "static #async a = 0",
+ "static #async a",
+ "static # a = 0",
- // Misc edge cases
- "static #yield",
- "static #yield = 0",
- "static #yield\n a",
- "static #async;",
- "static #async = 0;",
- "static #async",
- "static #async = 0",
- "static #async\n a(){}", // a field named async, and a method named a.
- "static #async\n a",
- "static #await;",
- "static #await = 0;",
- "static #await\n a",
+ "#constructor",
+ "#constructor = function() {}",
+
+ "foo() { delete this.#a }",
+ "foo() { delete this.x.#a }",
+ "foo() { delete this.x().#a }",
+
+ "foo() { delete f.#a }",
+ "foo() { delete f.x.#a }",
+ "foo() { delete f.x().#a }",
nullptr
};
// clang-format on
@@ -6578,6 +7184,41 @@ TEST(BasicImportExportParsing) {
}
}
+TEST(NamespaceExportParsing) {
+ // clang-format off
+ const char* kSources[] = {
+ "export * as arguments from 'bar'",
+ "export * as await from 'bar'",
+ "export * as default from 'bar'",
+ "export * as enum from 'bar'",
+ "export * as foo from 'bar'",
+ "export * as for from 'bar'",
+ "export * as let from 'bar'",
+ "export * as static from 'bar'",
+ "export * as yield from 'bar'",
+ };
+ // clang-format on
+
+ i::FLAG_harmony_namespace_exports = true;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kSources); ++i) {
+ i::Handle<i::String> source =
+ factory->NewStringFromAsciiChecked(kSources[i]);
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::ParseInfo info(isolate, script);
+ info.set_module();
+ CHECK(i::parsing::ParseProgram(&info, isolate));
+ }
+}
TEST(ImportExportParsingErrors) {
// clang-format off
@@ -6643,6 +7284,13 @@ TEST(ImportExportParsingErrors) {
"import * as x, * as y from 'm.js';",
"import {x}, {y} from 'm.js';",
"import * as x, {y} from 'm.js';",
+
+ "export *;",
+ "export * as;",
+ "export * as foo;",
+ "export * as foo from;",
+ "export * as foo from ';",
+ "export * as ,foo from 'bar'",
};
// clang-format on
@@ -6908,103 +7556,85 @@ TEST(ModuleParsingInternals) {
i::Declaration::List* declarations = module_scope->declarations();
CHECK_EQ(13, declarations->LengthForTest());
- CHECK(declarations->AtForTest(0)->proxy()->raw_name()->IsOneByteEqualTo("x"));
- CHECK(declarations->AtForTest(0)->proxy()->var()->mode() ==
- i::VariableMode::kLet);
- CHECK(declarations->AtForTest(0)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(0)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(0)->var()->raw_name()->IsOneByteEqualTo("x"));
+ CHECK(declarations->AtForTest(0)->var()->mode() == i::VariableMode::kLet);
+ CHECK(declarations->AtForTest(0)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(0)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(1)->proxy()->raw_name()->IsOneByteEqualTo("z"));
- CHECK(declarations->AtForTest(1)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(1)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(1)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(1)->var()->raw_name()->IsOneByteEqualTo("z"));
+ CHECK(declarations->AtForTest(1)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(1)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(1)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(2)->proxy()->raw_name()->IsOneByteEqualTo("n"));
- CHECK(declarations->AtForTest(2)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(2)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(2)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(2)->var()->raw_name()->IsOneByteEqualTo("n"));
+ CHECK(declarations->AtForTest(2)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(2)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(2)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->AtForTest(3)->proxy()->raw_name()->IsOneByteEqualTo("foo"));
- CHECK(declarations->AtForTest(3)->proxy()->var()->mode() ==
- i::VariableMode::kVar);
- CHECK(!declarations->AtForTest(3)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(3)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(3)->var()->raw_name()->IsOneByteEqualTo("foo"));
+ CHECK(declarations->AtForTest(3)->var()->mode() == i::VariableMode::kVar);
+ CHECK(!declarations->AtForTest(3)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(3)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->AtForTest(4)->proxy()->raw_name()->IsOneByteEqualTo("goo"));
- CHECK(declarations->AtForTest(4)->proxy()->var()->mode() ==
- i::VariableMode::kLet);
- CHECK(!declarations->AtForTest(4)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(4)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(4)->var()->raw_name()->IsOneByteEqualTo("goo"));
+ CHECK(declarations->AtForTest(4)->var()->mode() == i::VariableMode::kLet);
+ CHECK(!declarations->AtForTest(4)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(4)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->AtForTest(5)->proxy()->raw_name()->IsOneByteEqualTo("hoo"));
- CHECK(declarations->AtForTest(5)->proxy()->var()->mode() ==
- i::VariableMode::kLet);
- CHECK(declarations->AtForTest(5)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(5)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(5)->var()->raw_name()->IsOneByteEqualTo("hoo"));
+ CHECK(declarations->AtForTest(5)->var()->mode() == i::VariableMode::kLet);
+ CHECK(declarations->AtForTest(5)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(5)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->AtForTest(6)->proxy()->raw_name()->IsOneByteEqualTo("joo"));
- CHECK(declarations->AtForTest(6)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(6)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(6)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(6)->var()->raw_name()->IsOneByteEqualTo("joo"));
+ CHECK(declarations->AtForTest(6)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(6)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(6)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(7)->proxy()->raw_name()->IsOneByteEqualTo(
+ CHECK(declarations->AtForTest(7)->var()->raw_name()->IsOneByteEqualTo(
"*default*"));
- CHECK(declarations->AtForTest(7)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(7)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(7)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(7)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(7)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(7)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(8)->proxy()->raw_name()->IsOneByteEqualTo(
+ CHECK(declarations->AtForTest(8)->var()->raw_name()->IsOneByteEqualTo(
"nonexport"));
- CHECK(!declarations->AtForTest(8)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(8)->proxy()->var()->location() ==
+ CHECK(!declarations->AtForTest(8)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(8)->var()->location() ==
i::VariableLocation::LOCAL);
- CHECK(
- declarations->AtForTest(9)->proxy()->raw_name()->IsOneByteEqualTo("mm"));
- CHECK(declarations->AtForTest(9)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(9)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(9)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(9)->var()->raw_name()->IsOneByteEqualTo("mm"));
+ CHECK(declarations->AtForTest(9)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(9)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(9)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->AtForTest(10)->proxy()->raw_name()->IsOneByteEqualTo("aa"));
- CHECK(declarations->AtForTest(10)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(declarations->AtForTest(10)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(10)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(10)->var()->raw_name()->IsOneByteEqualTo("aa"));
+ CHECK(declarations->AtForTest(10)->var()->mode() == i::VariableMode::kConst);
+ CHECK(declarations->AtForTest(10)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(10)->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(11)->proxy()->raw_name()->IsOneByteEqualTo(
- "loo"));
- CHECK(declarations->AtForTest(11)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(!declarations->AtForTest(11)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(11)->proxy()->var()->location() !=
+ CHECK(
+ declarations->AtForTest(11)->var()->raw_name()->IsOneByteEqualTo("loo"));
+ CHECK(declarations->AtForTest(11)->var()->mode() == i::VariableMode::kConst);
+ CHECK(!declarations->AtForTest(11)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(11)->var()->location() !=
i::VariableLocation::MODULE);
- CHECK(declarations->AtForTest(12)->proxy()->raw_name()->IsOneByteEqualTo(
- "foob"));
- CHECK(declarations->AtForTest(12)->proxy()->var()->mode() ==
- i::VariableMode::kConst);
- CHECK(!declarations->AtForTest(12)->proxy()->var()->binding_needs_init());
- CHECK(declarations->AtForTest(12)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(12)->var()->raw_name()->IsOneByteEqualTo("foob"));
+ CHECK(declarations->AtForTest(12)->var()->mode() == i::VariableMode::kConst);
+ CHECK(!declarations->AtForTest(12)->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(12)->var()->location() ==
i::VariableLocation::MODULE);
i::ModuleDescriptor* descriptor = module_scope->module();
@@ -7040,31 +7670,31 @@ TEST(ModuleParsingInternals) {
CHECK_EQ(8u, descriptor->regular_exports().size());
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(3)->proxy()->raw_name())
+ .find(declarations->AtForTest(3)->var()->raw_name())
->second;
CheckEntry(entry, "foo", "foo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(4)->proxy()->raw_name())
+ .find(declarations->AtForTest(4)->var()->raw_name())
->second;
CheckEntry(entry, "goo", "goo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(5)->proxy()->raw_name())
+ .find(declarations->AtForTest(5)->var()->raw_name())
->second;
CheckEntry(entry, "hoo", "hoo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(6)->proxy()->raw_name())
+ .find(declarations->AtForTest(6)->var()->raw_name())
->second;
CheckEntry(entry, "joo", "joo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(7)->proxy()->raw_name())
+ .find(declarations->AtForTest(7)->var()->raw_name())
->second;
CheckEntry(entry, "default", "*default*", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->AtForTest(12)->proxy()->raw_name())
+ .find(declarations->AtForTest(12)->var()->raw_name())
->second;
CheckEntry(entry, "foob", "foob", nullptr, -1);
// TODO(neis): The next lines are terrible. Find a better way.
- auto name_x = declarations->AtForTest(0)->proxy()->raw_name();
+ auto name_x = declarations->AtForTest(0)->var()->raw_name();
CHECK_EQ(2u, descriptor->regular_exports().count(name_x));
auto it = descriptor->regular_exports().equal_range(name_x).first;
entry = it->second;
@@ -7085,19 +7715,19 @@ TEST(ModuleParsingInternals) {
CHECK_EQ(4u, descriptor->regular_imports().size());
entry = descriptor->regular_imports()
- .find(declarations->AtForTest(1)->proxy()->raw_name())
+ .find(declarations->AtForTest(1)->var()->raw_name())
->second;
CheckEntry(entry, nullptr, "z", "q", 0);
entry = descriptor->regular_imports()
- .find(declarations->AtForTest(2)->proxy()->raw_name())
+ .find(declarations->AtForTest(2)->var()->raw_name())
->second;
CheckEntry(entry, nullptr, "n", "default", 1);
entry = descriptor->regular_imports()
- .find(declarations->AtForTest(9)->proxy()->raw_name())
+ .find(declarations->AtForTest(9)->var()->raw_name())
->second;
CheckEntry(entry, nullptr, "mm", "m", 0);
entry = descriptor->regular_imports()
- .find(declarations->AtForTest(10)->proxy()->raw_name())
+ .find(declarations->AtForTest(10)->var()->raw_name())
->second;
CheckEntry(entry, nullptr, "aa", "aa", 0);
}
@@ -9664,7 +10294,6 @@ TEST(ArgumentsRedeclaration) {
// Test that lazily parsed inner functions don't result in overly pessimistic
// context allocations.
TEST(NoPessimisticContextAllocation) {
- i::FLAG_lazy_inner_functions = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope scope(isolate);
@@ -9945,7 +10574,7 @@ TEST(NoPessimisticContextAllocation) {
DCHECK(scope->is_function_scope());
const i::AstRawString* var_name =
info.ast_value_factory()->GetOneByteString("my_var");
- i::Variable* var = scope->Lookup(var_name);
+ i::Variable* var = scope->LookupForTesting(var_name);
CHECK_EQ(inners[i].ctxt_allocate,
i::ScopeTestHelper::MustAllocateInContext(var));
}
@@ -10674,6 +11303,7 @@ TEST(PrivateNamesSyntaxError) {
LocalContext env;
auto test = [isolate](const char* program, bool is_lazy) {
+ i::FLAG_harmony_private_fields = true;
i::Factory* const factory = isolate->factory();
i::Handle<i::String> source =
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
@@ -10681,7 +11311,6 @@ TEST(PrivateNamesSyntaxError) {
i::ParseInfo info(isolate, script);
info.set_allow_lazy_parsing(is_lazy);
- i::FLAG_harmony_private_fields = true;
CHECK(i::parsing::ParseProgram(&info, isolate));
CHECK(i::Rewriter::Rewrite(&info));
CHECK(!i::DeclarationScope::Analyze(&info));
@@ -10769,6 +11398,84 @@ TEST(PrivateNamesSyntaxError) {
}
}
+TEST(HashbangSyntax) {
+ const char* context_data[][2] = {
+ {"#!\n", ""},
+ {"#!---IGNORED---\n", ""},
+ {"#!---IGNORED---\r", ""},
+ {"#!---IGNORED---\xE2\x80\xA8", ""}, // <U+2028>
+ {"#!---IGNORED---\xE2\x80\xA9", ""}, // <U+2029>
+ {nullptr, nullptr}};
+
+ const char* data[] = {"function\nFN\n(\n)\n {\n}\nFN();", nullptr};
+
+ i::FLAG_harmony_hashbang = true;
+ RunParserSyncTest(context_data, data, kSuccess);
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, nullptr, 0,
+ nullptr, 0, true);
+
+ i::FLAG_harmony_hashbang = false;
+ RunParserSyncTest(context_data, data, kError);
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0, nullptr,
+ 0, true);
+}
+
+TEST(HashbangSyntaxErrors) {
+ const char* file_context_data[][2] = {{"", ""}, {nullptr, nullptr}};
+ const char* other_context_data[][2] = {{"/**/", ""},
+ {"//---\n", ""},
+ {";", ""},
+ {"function fn() {", "}"},
+ {"function* fn() {", "}"},
+ {"async function fn() {", "}"},
+ {"async function* fn() {", "}"},
+ {"() => {", "}"},
+ {"() => ", ""},
+ {"function fn(a = ", ") {}"},
+ {"function* fn(a = ", ") {}"},
+ {"async function fn(a = ", ") {}"},
+ {"async function* fn(a = ", ") {}"},
+ {"(a = ", ") => {}"},
+ {"(a = ", ") => a"},
+ {"class k {", "}"},
+ {"[", "]"},
+ {"{", "}"},
+ {"({", "})"},
+ {nullptr, nullptr}};
+
+ const char* invalid_hashbang_data[] = {// Encoded characters are not allowed
+ "#\\u0021\n"
+ "#\\u{21}\n",
+ "#\\x21\n",
+ "#\\041\n",
+ "\\u0023!\n",
+ "\\u{23}!\n",
+ "\\x23!\n",
+ "\\043!\n",
+ "\\u0023\\u0021\n",
+
+ "\n#!---IGNORED---\n",
+ " #!---IGNORED---\n",
+ nullptr};
+ const char* hashbang_data[] = {"#!\n", "#!---IGNORED---\n", nullptr};
+
+ auto SyntaxErrorTest = [](const char* context_data[][2], const char* data[]) {
+ i::FLAG_harmony_hashbang = true;
+ RunParserSyncTest(context_data, data, kError);
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true);
+
+ i::FLAG_harmony_hashbang = false;
+ RunParserSyncTest(context_data, data, kError);
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true);
+ };
+
+ SyntaxErrorTest(file_context_data, invalid_hashbang_data);
+ SyntaxErrorTest(other_context_data, invalid_hashbang_data);
+ SyntaxErrorTest(other_context_data, hashbang_data);
+}
+
} // namespace test_parsing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
index 7a3238eea1..8b883cad4f 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -30,6 +30,29 @@ std::string DisassembleFunction(const char* function) {
return os.str();
}
+struct Matchers {
+ std::string start = "0x[0-9a-f]+ +[0-9a-f]+ +[0-9a-f]+ +";
+ std::regex map_load_re =
+ std::regex(start + "ldr r([0-9]+), \\[r([0-9]+), #-1\\]");
+ std::regex load_const_re = std::regex(start + "ldr r([0-9]+), \\[pc, .*");
+ std::regex cmp_re = std::regex(start + "cmp r([0-9]+), r([0-9]+)");
+ std::regex bne_re = std::regex(start + "bne (.*)");
+ std::regex beq_re = std::regex(start + "beq (.*)");
+ std::regex b_re = std::regex(start + "b (.*)");
+ std::regex eorne_re =
+ std::regex(start + "eorne r([0-9]+), r([0-9]+), r([0-9]+)");
+ std::regex eoreq_re =
+ std::regex(start + "eoreq r([0-9]+), r([0-9]+), r([0-9]+)");
+ std::regex csdb_re = std::regex(start + "csdb");
+ std::regex load_field_re =
+ std::regex(start + "ldr r([0-9]+), \\[r([0-9]+), #\\+[0-9]+\\]");
+ std::regex mask_re =
+ std::regex(start + "and r([0-9]+), r([0-9]+), r([0-9]+)");
+ std::regex untag_re = std::regex(start + "mov r([0-9]+), r([0-9]+), asr #1");
+
+ std::string poison_reg = "9";
+};
+
TEST(DisasmPoisonMonomorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
if (i::FLAG_always_opt || !i::FLAG_opt) return;
@@ -47,25 +70,14 @@ TEST(DisasmPoisonMonomorphicLoad) {
"%OptimizeFunctionOnNextCall(mono);"
"mono({ x : 1 });");
- std::string start("0x[0-9a-f]+ +[0-9a-f]+ +[0-9a-f]+ +");
- std::regex map_load_re(start + "ldr r([0-9]+), \\[r([0-9]+), #-1\\]");
- std::regex load_const_re(start + "ldr r([0-9]+), \\[pc, .*");
- std::regex cmp_re(start + "cmp r([0-9]+), r([0-9]+)");
- std::regex bne_re(start + "bne(.*)");
- std::regex eorne_re(start + "eorne r([0-9]+), r([0-9]+), r([0-9]+)");
- std::regex csdb_re(start + "csdb");
- std::regex load_field_re(start +
- "ldr r([0-9]+), \\[r([0-9]+), #\\+[0-9]+\\]");
- std::regex mask_re(start + "and r([0-9]+), r([0-9]+), r([0-9]+)");
-
- std::string poison_reg = "9";
+ Matchers m;
std::smatch match;
std::string line;
std::istringstream reader(DisassembleFunction("mono"));
bool poisoning_sequence_found = false;
while (std::getline(reader, line)) {
- if (std::regex_match(line, match, map_load_re)) {
+ if (std::regex_match(line, match, m.map_load_re)) {
std::string map_reg = match[1];
std::string object_reg = match[2];
// Matches that the property access sequence is instrumented with
@@ -81,34 +93,194 @@ TEST(DisasmPoisonMonomorphicLoad) {
// and r0, r0, r9 ; apply the poison
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, load_const_re));
+ CHECK(std::regex_match(line, match, m.load_const_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.cmp_re));
+ CHECK_EQ(match[1], map_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.bne_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.eorne_re));
+ CHECK_EQ(match[1], m.poison_reg);
+ CHECK_EQ(match[2], m.poison_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.csdb_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.load_field_re));
+ CHECK_EQ(match[2], object_reg);
+ std::string field_reg = match[1];
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.mask_re));
+ CHECK_EQ(match[1], field_reg);
+ CHECK_EQ(match[2], field_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ poisoning_sequence_found = true;
+ break;
+ }
+ }
+
+ CHECK(poisoning_sequence_found);
+#endif // ENABLE_DISASSEMBLER
+}
+
+TEST(DisasmPoisonPolymorphicLoad) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function poly(o) { return o.x + 1; };"
+ "let o1 = { x : 1 };"
+ "let o2 = { y : 1 };"
+ "o2.x = 2;"
+ "poly(o1);"
+ "poly(o2);"
+ "poly(o1);"
+ "poly(o2);"
+ "%OptimizeFunctionOnNextCall(poly);"
+ "poly(o1);");
+
+ Matchers m;
+
+ std::smatch match;
+ std::string line;
+ std::istringstream reader(DisassembleFunction("poly"));
+ bool poisoning_sequence_found = false;
+ while (std::getline(reader, line)) {
+ if (std::regex_match(line, match, m.map_load_re)) {
+ std::string map_reg = match[1];
+ std::string object_reg = match[2];
+ // Matches that the property access sequence is instrumented with
+ // poisoning. We match the following sequence:
+ //
+ // ldr r1, [r0, #-1] ; load map
+ // ldr r2, [pc, #+104] ; load map constant #1
+ // cmp r1, r2 ; compare maps
+ // beq +Lcase1 ; if match, got to the load
+ // eoreq r9, r9, r9 ; update the poison
+ // csdb ; speculation barrier
+ // ldr r1, [r0, #-1] ; load map
+ // ldr r2, [pc, #+304] ; load map constant #2
+ // cmp r1, r2 ; compare maps
+ // bne +Ldeopt ; deopt if different
+ // eorne r9, r9, r9 ; update the poison
+ // csdb ; speculation barrier
+ // ldr r0, [r0, #+11] ; load the field
+ // and r0, r0, r9 ; apply the poison
+ // mov r0, r0, asr #1 ; untag
+ // b +Ldone ; goto merge point
+ // Lcase1:
+ // eorne r9, r9, r9 ; update the poison
+ // csdb ; speculation barrier
+ // ldr r0, [r0, #+3] ; load property backing store
+ // and r0, r0, r9 ; apply the poison
+ // ldr r0, [r0, #+3] ; load the property
+ // and r0, r0, r9 ; apply the poison
+ // Ldone:
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.load_const_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.cmp_re));
+ CHECK_EQ(match[1], map_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.beq_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.eoreq_re));
+ CHECK_EQ(match[1], m.poison_reg);
+ CHECK_EQ(match[2], m.poison_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.csdb_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.map_load_re));
+ map_reg = match[1];
+ CHECK_EQ(match[2], object_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.load_const_re));
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, cmp_re));
+ CHECK(std::regex_match(line, match, m.cmp_re));
CHECK_EQ(match[1], map_reg);
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, bne_re));
+ CHECK(std::regex_match(line, match, m.bne_re));
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, eorne_re));
- CHECK_EQ(match[1], poison_reg);
- CHECK_EQ(match[2], poison_reg);
- CHECK_EQ(match[3], poison_reg);
+ CHECK(std::regex_match(line, match, m.eorne_re));
+ CHECK_EQ(match[1], m.poison_reg);
+ CHECK_EQ(match[2], m.poison_reg);
+ CHECK_EQ(match[3], m.poison_reg);
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, csdb_re));
+ CHECK(std::regex_match(line, match, m.csdb_re));
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, load_field_re));
+ CHECK(std::regex_match(line, match, m.load_field_re));
CHECK_EQ(match[2], object_reg);
std::string field_reg = match[1];
CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, mask_re));
+ CHECK(std::regex_match(line, match, m.mask_re));
+ CHECK_EQ(match[1], field_reg);
+ CHECK_EQ(match[2], field_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.untag_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.b_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.eorne_re));
+ CHECK_EQ(match[1], m.poison_reg);
+ CHECK_EQ(match[2], m.poison_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.csdb_re));
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.load_field_re));
+ CHECK_EQ(match[2], object_reg);
+ std::string storage_reg = match[1];
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.mask_re));
+ CHECK_EQ(match[1], storage_reg);
+ CHECK_EQ(match[2], storage_reg);
+ CHECK_EQ(match[3], m.poison_reg);
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.load_field_re));
+ CHECK_EQ(match[2], storage_reg);
+ field_reg = match[1];
+
+ CHECK(std::getline(reader, line));
+ CHECK(std::regex_match(line, match, m.mask_re));
CHECK_EQ(match[1], field_reg);
CHECK_EQ(match[2], field_reg);
- CHECK_EQ(match[3], poison_reg);
+ CHECK_EQ(match[3], m.poison_reg);
poisoning_sequence_found = true;
break;
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index c4ad1babc5..c9d7f1da68 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -29,6 +29,7 @@
#include "include/v8-profiler.h"
#include "src/api-inl.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
@@ -538,7 +539,7 @@ TEST(RecordStackTraceAtStartProfiling) {
i::FLAG_turbo_inlining = false;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
std::unique_ptr<i::CpuProfiler> iprofiler(
new i::CpuProfiler(CcTest::i_isolate()));
@@ -616,7 +617,7 @@ TEST(ProfileNodeScriptId) {
i::FLAG_turbo_inlining = false;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
std::unique_ptr<CpuProfiler> iprofiler(new CpuProfiler(CcTest::i_isolate()));
i::ProfilerExtension::set_profiler(iprofiler.get());
@@ -711,11 +712,12 @@ TEST(LineNumber) {
}
TEST(BailoutReason) {
+#ifndef V8_LITE_MODE
i::FLAG_allow_natives_syntax = true;
i::FLAG_always_opt = false;
i::FLAG_opt = true;
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
std::unique_ptr<CpuProfiler> iprofiler(new CpuProfiler(CcTest::i_isolate()));
i::ProfilerExtension::set_profiler(iprofiler.get());
@@ -751,7 +753,9 @@ TEST(BailoutReason) {
current = PickChild(current, "Debugger");
CHECK(const_cast<v8::CpuProfileNode*>(current));
- CHECK(!strcmp("Optimization disabled for test", current->GetBailoutReason()));
+ CHECK(
+ !strcmp("Optimization is always disabled", current->GetBailoutReason()));
+#endif // V8_LITE_MODE
}
} // namespace test_profile_generator
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 301aaf2968..950237a105 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -30,9 +30,8 @@
#include <sstream>
#include "include/v8.h"
-#include "src/v8.h"
-
#include "src/api-inl.h"
+#include "src/assembler-arch.h"
#include "src/ast/ast.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
@@ -44,49 +43,34 @@
#include "src/splay-tree-inl.h"
#include "src/string-stream.h"
#include "src/unicode-inl.h"
+#include "src/v8.h"
#ifdef V8_INTERPRETED_REGEXP
#include "src/regexp/interpreter-irregexp.h"
#else // V8_INTERPRETED_REGEXP
#include "src/macro-assembler.h"
#if V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h" // NOLINT
-#include "src/arm/macro-assembler-arm.h"
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#endif
#if V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#include "src/s390/assembler-s390.h"
-#include "src/s390/macro-assembler-s390.h"
#endif
#if V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/macro-assembler-ppc.h"
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
#endif
#if V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#include "src/mips/macro-assembler-mips.h"
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#endif
#if V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#include "src/mips64/macro-assembler-mips64.h"
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#endif
#if V8_TARGET_ARCH_X64
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h"
#endif
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/macro-assembler-ia32.h"
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
#endif
#endif // V8_INTERPRETED_REGEXP
@@ -531,7 +515,7 @@ static bool NotDigit(uc16 c) {
static bool IsWhiteSpaceOrLineTerminator(uc16 c) {
// According to ECMA 5.1, 15.10.2.12 the CharacterClassEscape \s includes
// WhiteSpace (7.2) and LineTerminator (7.3) values.
- return v8::internal::WhiteSpaceOrLineTerminator::Is(c);
+ return v8::internal::IsWhiteSpaceOrLineTerminator(c);
}
@@ -792,7 +776,7 @@ class ContextInitializer {
v8::Local<v8::Context> env_;
};
-static ArchRegExpMacroAssembler::Result Execute(Code* code, String* input,
+static ArchRegExpMacroAssembler::Result Execute(Code code, String input,
int start_offset,
Address input_start,
Address input_end,
@@ -802,7 +786,6 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code, String* input,
reinterpret_cast<byte*>(input_end), captures, 0, CcTest::i_isolate());
}
-
TEST(MacroAssemblerNativeSuccess) {
v8::V8::Initialize();
ContextInitializer initializer;
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index 7dcbe998cd..bb4e09d8a4 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
namespace {
-AllocationSpace GetSpaceFromObject(Object* object) {
+AllocationSpace GetSpaceFromObject(Object object) {
DCHECK(object->IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object))
->owner()
@@ -19,7 +19,7 @@ AllocationSpace GetSpaceFromObject(Object* object) {
} // namespace
#define CHECK_IN_RO_SPACE(type, name, CamelName) \
- HeapObject* name = roots.name(); \
+ HeapObject name = roots.name(); \
CHECK_EQ(RO_SPACE, GetSpaceFromObject(name));
// The following tests check that all the roots accessible via ReadOnlyRoots are
@@ -34,18 +34,24 @@ TEST(TestReadOnlyRoots) {
namespace {
bool IsInitiallyMutable(Factory* factory, Address object_address) {
-// Entries in this list are in STRONG_MUTABLE_ROOT_LIST, but may initially point
-// to objects that in RO_SPACE.
+// Entries in this list are in STRONG_MUTABLE_MOVABLE_ROOT_LIST, but may
+// initially point to objects that are in RO_SPACE.
#define INITIALLY_READ_ONLY_ROOT_LIST(V) \
+ V(api_private_symbol_table) \
+ V(api_symbol_table) \
V(builtins_constants_table) \
+ V(current_microtask) \
V(detached_contexts) \
+ V(dirty_js_weak_factories) \
V(feedback_vectors_for_profiling_tools) \
V(materialized_objects) \
V(noscript_shared_function_infos) \
+ V(public_symbol_table) \
V(retained_maps) \
V(retaining_path_targets) \
V(serialized_global_proxy_sizes) \
- V(serialized_objects)
+ V(serialized_objects) \
+ V(weak_refs_keep_during_job)
#define TEST_CAN_BE_READ_ONLY(name) \
if (factory->name().address() == object_address) return false;
@@ -59,12 +65,13 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
// The CHECK_EQ line is there just to ensure that the root is publicly
// accessible from Heap, but ultimately the factory is used as it provides
// handles that have the address in the root table.
-#define CHECK_NOT_IN_RO_SPACE(type, name, CamelName) \
- Handle<Object> name = factory->name(); \
- CHECK_EQ(*name, heap->name()); \
- if (name->IsHeapObject() && IsInitiallyMutable(factory, name.address())) \
- CHECK_NE(RO_SPACE, \
- GetSpaceFromObject(reinterpret_cast<HeapObject*>(*name)));
+#define CHECK_NOT_IN_RO_SPACE(type, name, CamelName) \
+ Handle<Object> name = factory->name(); \
+ CHECK_EQ(*name, heap->name()); \
+ if (name->IsHeapObject() && IsInitiallyMutable(factory, name.address()) && \
+ !name->IsUndefined(CcTest::i_isolate())) { \
+ CHECK_NE(RO_SPACE, GetSpaceFromObject(HeapObject::cast(*name))); \
+ }
// The following tests check that all the roots accessible via public Heap
// accessors are not in RO_SPACE with the exception of the objects listed in
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 848678d43f..ed1718adde 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -38,17 +38,18 @@
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/heap/spaces.h"
+#include "src/interpreter/interpreter.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/builtin-deserializer.h"
-#include "src/snapshot/builtin-serializer.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-deserializer.h"
#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/snapshot/startup-serializer.h"
@@ -61,66 +62,76 @@ namespace internal {
enum CodeCacheType { kLazy, kEager, kAfterExecute };
-void DisableLazyDeserialization() {
- // UNINITIALIZED tests do not set up the isolate sufficiently for lazy
- // deserialization to work.
- FLAG_lazy_deserialization = false;
-}
-
void DisableAlwaysOpt() {
// Isolates prepared for serialization do not optimize. The only exception is
// with the flag --always-opt.
FLAG_always_opt = false;
}
+// A convenience struct to simplify management of the blobs required to
+// deserialize an isolate.
+struct StartupBlobs {
+ Vector<const byte> startup;
+ Vector<const byte> read_only;
-// TestIsolate is used for testing isolate serialization.
-class TestIsolate : public Isolate {
+ void Dispose() {
+ startup.Dispose();
+ read_only.Dispose();
+ }
+};
+
+// TestSerializer is used for testing isolate serialization.
+class TestSerializer {
public:
- static v8::Isolate* NewInitialized() {
+ static v8::Isolate* NewIsolateInitialized() {
const bool kEnableSerializer = true;
const bool kGenerateHeap = true;
- i::Isolate* isolate = new TestIsolate(kEnableSerializer, kGenerateHeap);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ DisableEmbeddedBlobRefcounting();
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Scope isolate_scope(v8_isolate);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(nullptr);
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
return v8_isolate;
}
- // Wraps v8::Isolate::New, but with a TestIsolate under the hood.
+
+ static v8::Isolate* NewIsolateFromBlob(StartupBlobs& blobs) {
+ SnapshotData startup_snapshot(blobs.startup);
+ SnapshotData read_only_snapshot(blobs.read_only);
+ StartupDeserializer deserializer(&startup_snapshot, &read_only_snapshot);
+ const bool kEnableSerializer = false;
+ const bool kGenerateHeap = false;
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->Init(&deserializer);
+ return v8_isolate;
+ }
+
+ // Wraps v8::Isolate::New, but with a test isolate under the hood.
// Allows flexibility to bootstrap with or without snapshot even when
// the production Isolate class has one or the other behavior baked in.
- static v8::Isolate* New(const v8::Isolate::CreateParams& params) {
+ static v8::Isolate* NewIsolate(const v8::Isolate::CreateParams& params) {
const bool kEnableSerializer = false;
const bool kGenerateHeap = params.snapshot_blob == nullptr;
- i::Isolate* isolate = new TestIsolate(kEnableSerializer, kGenerateHeap);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Initialize(v8_isolate, params);
return v8_isolate;
}
- explicit TestIsolate(bool with_serializer, bool generate_heap) : Isolate() {
- if (with_serializer) enable_serializer();
- set_array_buffer_allocator(CcTest::array_buffer_allocator());
- setup_delegate_ = new SetupIsolateDelegateForTests(generate_heap);
-
- if (FLAG_embedded_builtins) {
- if (generate_heap || clear_embedded_blob_) {
- // We're generating the heap, including new builtins. Act as if we don't
- // have an embedded blob.
- clear_embedded_blob_ = true;
- SetEmbeddedBlob(nullptr, 0);
- }
- }
- }
private:
- // A sticky flag that ensures the embedded blob is remains cleared after it
- // has been cleared once. E.g.: after creating & serializing a complete heap
- // snapshot, future isolates also expect the embedded blob to be cleared.
- static bool clear_embedded_blob_;
-};
+ // Creates an Isolate instance configured for testing.
+ static v8::Isolate* NewIsolate(bool with_serializer, bool generate_heap) {
+ i::Isolate* isolate = i::Isolate::New();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+ if (with_serializer) isolate->enable_serializer();
+ isolate->set_array_buffer_allocator(CcTest::array_buffer_allocator());
+ isolate->setup_delegate_ = new SetupIsolateDelegateForTests(generate_heap);
-bool TestIsolate::clear_embedded_blob_ = false;
+ return v8_isolate;
+ }
+};
static Vector<const byte> WritePayload(const Vector<const byte>& payload) {
int length = payload.length();
@@ -129,18 +140,6 @@ static Vector<const byte> WritePayload(const Vector<const byte>& payload) {
return Vector<const byte>(const_cast<const byte*>(blob), length);
}
-// A convenience struct to simplify management of the two blobs required to
-// deserialize an isolate.
-struct StartupBlobs {
- Vector<const byte> startup;
- Vector<const byte> builtin;
-
- void Dispose() {
- startup.Dispose();
- builtin.Dispose();
- }
-};
-
namespace {
bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
@@ -165,9 +164,12 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
return true;
}
-v8::StartupData CreateSnapshotDataBlob(const char* embedded_source = nullptr) {
+v8::StartupData CreateSnapshotDataBlob(
+ v8::SnapshotCreator::FunctionCodeHandling function_code_handling,
+ const char* embedded_source) {
// Create a new isolate and a new context from scratch, optionally run
// a script to embed, and serialize to create a snapshot blob.
+ DisableEmbeddedBlobRefcounting();
v8::StartupData result = {nullptr, 0};
{
v8::SnapshotCreator snapshot_creator;
@@ -181,12 +183,16 @@ v8::StartupData CreateSnapshotDataBlob(const char* embedded_source = nullptr) {
}
snapshot_creator.SetDefaultContext(context);
}
- result = snapshot_creator.CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ result = snapshot_creator.CreateBlob(function_code_handling);
}
return result;
}
+v8::StartupData CreateSnapshotDataBlob(const char* embedded_source = nullptr) {
+ return CreateSnapshotDataBlob(
+ v8::SnapshotCreator::FunctionCodeHandling::kClear, embedded_source);
+}
+
v8::StartupData WarmUpSnapshotDataBlob(v8::StartupData cold_snapshot_blob,
const char* warmup_source) {
CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != nullptr);
@@ -237,17 +243,19 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
internal_isolate->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kTesting);
- StartupSerializer ser(internal_isolate);
- ser.SerializeStrongReferences();
- i::BuiltinSerializer builtin_serializer(internal_isolate, &ser);
- builtin_serializer.SerializeBuiltinsAndHandlers();
+ ReadOnlySerializer read_only_serializer(internal_isolate);
+ read_only_serializer.SerializeReadOnlyRoots();
+
+ StartupSerializer ser(internal_isolate, &read_only_serializer);
+ ser.SerializeStrongReferences();
ser.SerializeWeakReferencesAndDeferred();
+ read_only_serializer.FinalizeSerialization();
SnapshotData startup_snapshot(&ser);
- BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
+ SnapshotData read_only_snapshot(&read_only_serializer);
return {WritePayload(startup_snapshot.RawData()),
- WritePayload(builtin_snapshot.RawData())};
+ WritePayload(read_only_snapshot.RawData())};
}
@@ -267,24 +275,9 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
source_length);
}
-v8::Isolate* InitializeFromBlob(StartupBlobs& blobs) {
- v8::Isolate* v8_isolate = nullptr;
- {
- SnapshotData startup_snapshot(blobs.startup);
- BuiltinSnapshotData builtin_snapshot(blobs.builtin);
- StartupDeserializer deserializer(&startup_snapshot, &builtin_snapshot);
- const bool kEnableSerializer = false;
- const bool kGenerateHeap = false;
- TestIsolate* isolate = new TestIsolate(kEnableSerializer, kGenerateHeap);
- v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::Isolate::Scope isolate_scope(v8_isolate);
- isolate->Init(&deserializer);
- }
- return v8_isolate;
-}
static v8::Isolate* Deserialize(StartupBlobs& blobs) {
- v8::Isolate* isolate = InitializeFromBlob(blobs);
+ v8::Isolate* isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(isolate);
return isolate;
}
@@ -298,11 +291,11 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
#endif
CHECK(isolate->global_object()->IsJSObject());
CHECK(isolate->native_context()->IsContext());
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
+ isolate->factory()->InternalizeOneByteString(StaticCharVector("Empty"));
}
void TestStartupSerializerOnceImpl() {
- v8::Isolate* isolate = TestIsolate::NewInitialized();
+ v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
isolate = Deserialize(blobs);
@@ -317,44 +310,39 @@ void TestStartupSerializerOnceImpl() {
}
isolate->Dispose();
blobs.Dispose();
+ FreeCurrentEmbeddedBlob();
}
UNINITIALIZED_TEST(StartupSerializerOnce) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
TestStartupSerializerOnceImpl();
}
UNINITIALIZED_TEST(StartupSerializerOnce1) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
FLAG_serialization_chunk_size = 1;
TestStartupSerializerOnceImpl();
}
UNINITIALIZED_TEST(StartupSerializerOnce32) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
FLAG_serialization_chunk_size = 32;
TestStartupSerializerOnceImpl();
}
UNINITIALIZED_TEST(StartupSerializerOnce1K) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
FLAG_serialization_chunk_size = 1 * KB;
TestStartupSerializerOnceImpl();
}
UNINITIALIZED_TEST(StartupSerializerOnce4K) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
FLAG_serialization_chunk_size = 4 * KB;
TestStartupSerializerOnceImpl();
}
UNINITIALIZED_TEST(StartupSerializerOnce32K) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
FLAG_serialization_chunk_size = 32 * KB;
TestStartupSerializerOnceImpl();
@@ -408,9 +396,8 @@ UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
}
UNINITIALIZED_TEST(StartupSerializerTwice) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
- v8::Isolate* isolate = TestIsolate::NewInitialized();
+ v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs1 = Serialize(isolate);
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
@@ -427,12 +414,12 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
}
isolate->Dispose();
blobs2.Dispose();
+ FreeCurrentEmbeddedBlob();
}
UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
- v8::Isolate* isolate = TestIsolate::NewInitialized();
+ v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
isolate = Deserialize(blobs);
@@ -453,12 +440,12 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
}
isolate->Dispose();
blobs.Dispose();
+ FreeCurrentEmbeddedBlob();
}
UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
- v8::Isolate* isolate = TestIsolate::NewInitialized();
+ v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs1 = Serialize(isolate);
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
@@ -480,12 +467,13 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
}
isolate->Dispose();
blobs2.Dispose();
+ FreeCurrentEmbeddedBlob();
}
static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
- Vector<const byte>* builtin_blob_out,
+ Vector<const byte>* read_only_blob_out,
Vector<const byte>* partial_blob_out) {
- v8::Isolate* v8_isolate = TestIsolate::NewInitialized();
+ v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
Heap* heap = isolate->heap();
{
@@ -511,12 +499,16 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
- i::Context* raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
+ i::Context raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
+ SnapshotByteSink read_only_sink;
+ ReadOnlySerializer read_only_serializer(isolate);
+ read_only_serializer.SerializeReadOnlyRoots();
+
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate);
+ StartupSerializer startup_serializer(isolate, &read_only_serializer);
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
@@ -524,32 +516,30 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
- i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltinsAndHandlers();
-
startup_serializer.SerializeWeakReferencesAndDeferred();
+ read_only_serializer.FinalizeSerialization();
+
+ SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
- BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
- *builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
+ *read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
}
UNINITIALIZED_TEST(PartialSerializerContext) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
Vector<const byte> startup_blob;
- Vector<const byte> builtin_blob;
+ Vector<const byte> read_only_blob;
Vector<const byte> partial_blob;
- PartiallySerializeContext(&startup_blob, &builtin_blob, &partial_blob);
+ PartiallySerializeContext(&startup_blob, &read_only_blob, &partial_blob);
- StartupBlobs blobs = {startup_blob, builtin_blob};
- v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
+ StartupBlobs blobs = {startup_blob, read_only_blob};
+ v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(v8_isolate);
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -584,12 +574,14 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
}
v8_isolate->Dispose();
blobs.Dispose();
+ FreeCurrentEmbeddedBlob();
}
static void PartiallySerializeCustomContext(
- Vector<const byte>* startup_blob_out, Vector<const byte>* builtin_blob_out,
+ Vector<const byte>* startup_blob_out,
+ Vector<const byte>* read_only_blob_out,
Vector<const byte>* partial_blob_out) {
- v8::Isolate* v8_isolate = TestIsolate::NewInitialized();
+ v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -613,12 +605,13 @@ static void PartiallySerializeCustomContext(
"var r = Math.random();"
"var c = Math.sin(0) + Math.cos(0);"
"var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
- "var s = parseInt('12345');");
+ "var s = parseInt('12345');"
+ "var p = 0;"
+ "(async ()=>{ p = await 42; })();");
Vector<const uint8_t> source = ConstructSource(
- STATIC_CHAR_VECTOR("function g() { return [,"),
- STATIC_CHAR_VECTOR("1,"),
- STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
+ StaticCharVector("function g() { return [,"), StaticCharVector("1,"),
+ StaticCharVector("];} a = g(); b = g(); b.push(1);"), 100000);
v8::MaybeLocal<v8::String> source_str = v8::String::NewFromOneByte(
v8_isolate, source.start(), v8::NewStringType::kNormal,
source.length());
@@ -635,12 +628,16 @@ static void PartiallySerializeCustomContext(
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
- i::Context* raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
+ i::Context raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
+ SnapshotByteSink read_only_sink;
+ ReadOnlySerializer read_only_serializer(isolate);
+ read_only_serializer.SerializeReadOnlyRoots();
+
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate);
+ StartupSerializer startup_serializer(isolate, &read_only_serializer);
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
@@ -648,32 +645,31 @@ static void PartiallySerializeCustomContext(
v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&raw_context, false);
- i::BuiltinSerializer builtin_serializer(isolate, &startup_serializer);
- builtin_serializer.SerializeBuiltinsAndHandlers();
-
startup_serializer.SerializeWeakReferencesAndDeferred();
+ read_only_serializer.FinalizeSerialization();
+
+ SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
- BuiltinSnapshotData builtin_snapshot(&builtin_serializer);
SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
- *builtin_blob_out = WritePayload(builtin_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
+ *read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
- DisableLazyDeserialization();
DisableAlwaysOpt();
Vector<const byte> startup_blob;
- Vector<const byte> builtin_blob;
+ Vector<const byte> read_only_blob;
Vector<const byte> partial_blob;
- PartiallySerializeCustomContext(&startup_blob, &builtin_blob, &partial_blob);
+ PartiallySerializeCustomContext(&startup_blob, &read_only_blob,
+ &partial_blob);
- StartupBlobs blobs = {startup_blob, builtin_blob};
- v8::Isolate* v8_isolate = InitializeFromBlob(blobs);
+ StartupBlobs blobs = {startup_blob, read_only_blob};
+ v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(v8_isolate);
{
v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -740,6 +736,11 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
.ToLocalChecked();
CHECK(s->Equals(v8_isolate->GetCurrentContext(), v8_str("12345"))
.FromJust());
+ v8::Local<v8::String> p = CompileRun("p")
+ ->ToString(v8_isolate->GetCurrentContext())
+ .ToLocalChecked();
+ CHECK(
+ p->Equals(v8_isolate->GetCurrentContext(), v8_str("42")).FromJust());
int a = CompileRun("a.length")
->ToNumber(v8_isolate->GetCurrentContext())
.ToLocalChecked()
@@ -757,9 +758,10 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
}
v8_isolate->Dispose();
blobs.Dispose();
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlob1) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlob1) {
DisableAlwaysOpt();
const char* source1 = "function f() { return 42; }";
@@ -770,7 +772,7 @@ TEST(CustomSnapshotDataBlob1) {
params1.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate1 = TestIsolate::New(params1);
+ v8::Isolate* isolate1 = TestSerializer::NewIsolate(params1);
{
v8::Isolate::Scope i_scope(isolate1);
v8::HandleScope h_scope(isolate1);
@@ -783,9 +785,161 @@ TEST(CustomSnapshotDataBlob1) {
}
isolate1->Dispose();
delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
+}
+
+static void UnreachableCallback(const FunctionCallbackInfo<Value>& args) {
+ UNREACHABLE();
+}
+
+UNINITIALIZED_TEST(CustomSnapshotDataBlobOverwriteGlobal) {
+ DisableAlwaysOpt();
+ const char* source1 = "function f() { return 42; }";
+
+ v8::StartupData data1 = CreateSnapshotDataBlob(source1);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ params1.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ // Test that the snapshot overwrites the object template when there are
+ // duplicate global properties.
+ v8::Isolate* isolate1 = TestSerializer::NewIsolate(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate1);
+ global_template->Set(
+ v8_str("f"), v8::FunctionTemplate::New(isolate1, UnreachableCallback));
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate1, nullptr, global_template);
+ v8::Context::Scope c_scope(context);
+ v8::Maybe<int32_t> result =
+ CompileRun("f()")->Int32Value(isolate1->GetCurrentContext());
+ CHECK_EQ(42, result.FromJust());
+ }
+ isolate1->Dispose();
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
+}
+
+UNINITIALIZED_TEST(CustomSnapshotDataBlobStringNotInternalized) {
+ DisableAlwaysOpt();
+ const char* source1 =
+ R"javascript(
+ // String would be internalized if it came from a literal so create "A"
+ // via a function call.
+ var global = String.fromCharCode(65);
+ function f() { return global; }
+ )javascript";
+
+ v8::StartupData data1 = CreateSnapshotDataBlob(source1);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ params1.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate1 = TestSerializer::NewIsolate(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ v8::Context::Scope c_scope(context);
+ v8::Local<v8::Value> result = CompileRun("f()").As<v8::Value>();
+ CHECK(result->IsString());
+ i::String str = *v8::Utils::OpenHandle(*result.As<v8::String>());
+ CHECK_EQ(std::string(str->ToCString().get()), "A");
+ CHECK(!str.IsInternalizedString());
+ CHECK(
+ !reinterpret_cast<i::Isolate*>(isolate1)->heap()->InReadOnlySpace(str));
+ }
+ isolate1->Dispose();
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
-TEST(SnapshotChecksum) {
+#ifndef V8_INTERPRETED_REGEXP
+namespace {
+
+void TestCustomSnapshotDataBlobWithIrregexpCode(
+ v8::SnapshotCreator::FunctionCodeHandling function_code_handling) {
+ DisableAlwaysOpt();
+ const char* source =
+ "var re1 = /\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\//;\n"
+ "function f() { return '/* a comment */'.search(re1); }\n"
+ "function g() { return 'not a comment'.search(re1); }\n"
+ "function h() { return '// this is a comment'.search(re1); }\n"
+ "var re2 = /a/;\n"
+ "function i() { return '/* a comment */'.search(re2); }\n"
+ "f(); f(); g(); g(); h(); h(); i(); i();\n";
+
+ v8::StartupData data1 =
+ CreateSnapshotDataBlob(function_code_handling, source);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ params1.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate1 = TestSerializer::NewIsolate(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ v8::Context::Scope c_scope(context);
+ {
+ // Check that compiled irregexp code has not been flushed prior to
+ // serialization.
+ i::Handle<i::JSRegExp> re =
+ Utils::OpenHandle(*CompileRun("re1").As<v8::RegExp>());
+ CHECK_EQ(re->HasCompiledCode(),
+ function_code_handling ==
+ v8::SnapshotCreator::FunctionCodeHandling::kKeep);
+ }
+ {
+ v8::Maybe<int32_t> result =
+ CompileRun("f()")->Int32Value(isolate1->GetCurrentContext());
+ CHECK_EQ(0, result.FromJust());
+ }
+ {
+ v8::Maybe<int32_t> result =
+ CompileRun("g()")->Int32Value(isolate1->GetCurrentContext());
+ CHECK_EQ(-1, result.FromJust());
+ }
+ {
+ v8::Maybe<int32_t> result =
+ CompileRun("h()")->Int32Value(isolate1->GetCurrentContext());
+ CHECK_EQ(-1, result.FromJust());
+ }
+ {
+ // Check that ATOM regexp remains valid.
+ i::Handle<i::JSRegExp> re =
+ Utils::OpenHandle(*CompileRun("re2").As<v8::RegExp>());
+ CHECK_EQ(re->TypeTag(), JSRegExp::ATOM);
+ CHECK(!re->HasCompiledCode());
+ }
+ }
+ isolate1->Dispose();
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
+}
+
+} // namespace
+
+UNINITIALIZED_TEST(CustomSnapshotDataBlobWithIrregexpCodeKeepCode) {
+ TestCustomSnapshotDataBlobWithIrregexpCode(
+ v8::SnapshotCreator::FunctionCodeHandling::kKeep);
+}
+
+UNINITIALIZED_TEST(CustomSnapshotDataBlobWithIrregexpCodeClearCode) {
+ TestCustomSnapshotDataBlobWithIrregexpCode(
+ v8::SnapshotCreator::FunctionCodeHandling::kClear);
+}
+#endif // V8_INTERPRETED_REGEXP
+
+UNINITIALIZED_TEST(SnapshotChecksum) {
DisableAlwaysOpt();
const char* source1 = "function f() { return 42; }";
@@ -794,6 +948,7 @@ TEST(SnapshotChecksum) {
const_cast<char*>(data1.data)[142] = data1.data[142] ^ 4; // Flip a bit.
CHECK(!i::Snapshot::VerifyChecksum(&data1));
delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
struct InternalFieldData {
@@ -802,7 +957,14 @@ struct InternalFieldData {
v8::StartupData SerializeInternalFields(v8::Local<v8::Object> holder, int index,
void* data) {
- CHECK_EQ(reinterpret_cast<void*>(2016), data);
+ if (data == reinterpret_cast<void*>(2000)) {
+ // Used for SnapshotCreatorTemplates test. We check that none of the fields
+ // have been cleared yet.
+ CHECK_NOT_NULL(holder->GetAlignedPointerFromInternalField(1));
+ } else {
+ CHECK_EQ(reinterpret_cast<void*>(2016), data);
+ }
+ if (index != 1) return {nullptr, 0};
InternalFieldData* embedder_field = static_cast<InternalFieldData*>(
holder->GetAlignedPointerFromInternalField(index));
if (embedder_field == nullptr) return {nullptr, 0};
@@ -842,6 +1004,7 @@ void TypedArrayTestHelper(
const Int32Expectations& after_restore_expectations = Int32Expectations()) {
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -864,7 +1027,7 @@ void TypedArrayTestHelper(
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = TestIsolate::New(create_params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(create_params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -883,9 +1046,10 @@ void TypedArrayTestHelper(
}
isolate->Dispose();
delete[] blob.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlobWithOffHeapTypedArray) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobWithOffHeapTypedArray) {
const char* code =
"var x = new Uint8Array(128);"
"x[0] = 12;"
@@ -902,7 +1066,7 @@ TEST(CustomSnapshotDataBlobWithOffHeapTypedArray) {
TypedArrayTestHelper(code, expectations);
}
-TEST(CustomSnapshotDataBlobSharedArrayBuffer) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobSharedArrayBuffer) {
const char* code =
"var x = new Int32Array([12, 24, 48, 96]);"
"var y = new Uint8Array(x.buffer)";
@@ -927,7 +1091,7 @@ TEST(CustomSnapshotDataBlobSharedArrayBuffer) {
TypedArrayTestHelper(code, expectations);
}
-TEST(CustomSnapshotDataBlobArrayBufferWithOffset) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobArrayBufferWithOffset) {
const char* code =
"var x = new Int32Array([12, 24, 48, 96]);"
"var y = new Int32Array(x.buffer, 4, 2)";
@@ -946,7 +1110,7 @@ TEST(CustomSnapshotDataBlobArrayBufferWithOffset) {
after_restore_expectations);
}
-TEST(CustomSnapshotDataBlobDataView) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobDataView) {
const char* code =
"var x = new Int8Array([1, 2, 3, 4]);"
"var v = new DataView(x.buffer)";
@@ -958,15 +1122,16 @@ TEST(CustomSnapshotDataBlobDataView) {
TypedArrayTestHelper(code, expectations);
}
-TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobDetachedArrayBuffer) {
const char* code =
"var x = new Int16Array([12, 24, 48]);"
- "%ArrayBufferNeuter(x.buffer);";
+ "%ArrayBufferDetach(x.buffer);";
Int32Expectations expectations = {std::make_tuple("x.buffer.byteLength", 0),
std::make_tuple("x.length", 0)};
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -989,7 +1154,7 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = TestIsolate::New(create_params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(create_params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1005,12 +1170,13 @@ TEST(CustomSnapshotDataBlobNeuteredArrayBuffer) {
CHECK(x->IsTypedArray());
i::Handle<i::JSTypedArray> array =
i::Handle<i::JSTypedArray>::cast(v8::Utils::OpenHandle(*x));
- CHECK(array->WasNeutered());
+ CHECK(array->WasDetached());
CHECK_NULL(
FixedTypedArrayBase::cast(array->elements())->external_pointer());
}
isolate->Dispose();
delete[] blob.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
i::Handle<i::JSArrayBuffer> GetBufferFromTypedArray(
@@ -1023,7 +1189,7 @@ i::Handle<i::JSArrayBuffer> GetBufferFromTypedArray(
return i::handle(i::JSArrayBuffer::cast(view->buffer()), view->GetIsolate());
}
-TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
const char* code =
"var x = new Uint8Array(8);"
"x[0] = 12;"
@@ -1037,6 +1203,7 @@ TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -1059,7 +1226,7 @@ TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = TestIsolate::New(create_params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(create_params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1085,9 +1252,10 @@ TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
}
isolate->Dispose();
delete[] blob.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlob2) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlob2) {
DisableAlwaysOpt();
const char* source2 =
"function f() { return g() * 2; }"
@@ -1100,7 +1268,7 @@ TEST(CustomSnapshotDataBlob2) {
params2.snapshot_blob = &data2;
params2.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate2 = TestIsolate::New(params2);
+ v8::Isolate* isolate2 = TestSerializer::NewIsolate(params2);
{
v8::Isolate::Scope i_scope(isolate2);
v8::HandleScope h_scope(isolate2);
@@ -1114,6 +1282,7 @@ TEST(CustomSnapshotDataBlob2) {
}
isolate2->Dispose();
delete[] data2.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
static void SerializationFunctionTemplate(
@@ -1121,7 +1290,7 @@ static void SerializationFunctionTemplate(
args.GetReturnValue().Set(args[0]);
}
-TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
DisableAlwaysOpt();
const char* source1 =
"var o = {};"
@@ -1144,7 +1313,7 @@ TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1165,12 +1334,14 @@ TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
}
isolate->Dispose();
delete[] data.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlobWithLocker) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
DisableAlwaysOpt();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ DisableEmbeddedBlobRefcounting();
v8::Isolate* isolate0 = v8::Isolate::New(create_params);
{
v8::Locker locker(isolate0);
@@ -1192,7 +1363,7 @@ TEST(CustomSnapshotDataBlobWithLocker) {
params1.snapshot_blob = &data1;
params1.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate1 = TestIsolate::New(params1);
+ v8::Isolate* isolate1 = TestSerializer::NewIsolate(params1);
{
v8::Locker locker(isolate1);
v8::Isolate::Scope i_scope(isolate1);
@@ -1204,9 +1375,10 @@ TEST(CustomSnapshotDataBlobWithLocker) {
}
isolate1->Dispose();
delete[] data1.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlobStackOverflow) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobStackOverflow) {
DisableAlwaysOpt();
const char* source =
"var a = [0];"
@@ -1225,7 +1397,7 @@ TEST(CustomSnapshotDataBlobStackOverflow) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1244,6 +1416,7 @@ TEST(CustomSnapshotDataBlobStackOverflow) {
}
isolate->Dispose();
delete[] data.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
bool IsCompiled(const char* name) {
@@ -1253,7 +1426,7 @@ bool IsCompiled(const char* name) {
->is_compiled();
}
-TEST(SnapshotDataBlobWithWarmup) {
+UNINITIALIZED_TEST(SnapshotDataBlobWithWarmup) {
DisableAlwaysOpt();
const char* warmup = "Math.abs(1); Math.random = 1;";
@@ -1266,7 +1439,7 @@ TEST(SnapshotDataBlobWithWarmup) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1280,9 +1453,10 @@ TEST(SnapshotDataBlobWithWarmup) {
}
isolate->Dispose();
delete[] warm.data;
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlobWithWarmup) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobWithWarmup) {
DisableAlwaysOpt();
const char* source =
"function f() { return Math.abs(1); }\n"
@@ -1300,7 +1474,7 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1317,17 +1491,18 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
}
isolate->Dispose();
delete[] warm.data;
+ FreeCurrentEmbeddedBlob();
}
-TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
+UNINITIALIZED_TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
DisableAlwaysOpt();
// Flood the startup snapshot with shared function infos. If they are
// serialized before the immortal immovable root, the root will no longer end
// up on the first page.
Vector<const uint8_t> source =
- ConstructSource(STATIC_CHAR_VECTOR("var a = [];"),
- STATIC_CHAR_VECTOR("a.push(function() {return 7});"),
- STATIC_CHAR_VECTOR("\0"), 10000);
+ ConstructSource(StaticCharVector("var a = [];"),
+ StaticCharVector("a.push(function() {return 7});"),
+ StaticCharVector("\0"), 10000);
v8::StartupData data =
CreateSnapshotDataBlob(reinterpret_cast<const char*>(source.start()));
@@ -1337,7 +1512,7 @@ TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -1348,6 +1523,7 @@ TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
isolate->Dispose();
source.Dispose();
delete[] data.data; // We can dispose of the snapshot blob now.
+ FreeCurrentEmbeddedBlob();
}
TEST(TestThatAlwaysSucceeds) {
@@ -1365,7 +1541,7 @@ int CountBuiltins() {
HeapIterator iterator(CcTest::heap());
DisallowHeapAllocation no_allocation;
int counter = 0;
- for (HeapObject* obj = iterator.next(); obj != nullptr;
+ for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
if (obj->IsCode() && Code::cast(obj)->kind() == Code::BUILTIN) counter++;
}
@@ -1573,10 +1749,10 @@ TEST(CodeSerializerLargeCodeObject) {
FLAG_always_opt = false;
Vector<const uint8_t> source = ConstructSource(
- STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
- STATIC_CHAR_VECTOR(
+ StaticCharVector("var j=1; if (j == 0) {"),
+ StaticCharVector(
"for (let i of Object.prototype) for (let k = 0; k < 0; ++k);"),
- STATIC_CHAR_VECTOR("} j=7; j"), 1200);
+ StaticCharVector("} j=7; j"), 2000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1618,7 +1794,6 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
FLAG_always_opt = false;
const char* filter_flag = "--turbo-filter=NOTHING";
FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
- FLAG_black_allocation = true;
FLAG_manual_evacuation_candidates_selection = true;
LocalContext context;
@@ -1629,9 +1804,9 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source = ConstructSource(
- STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
- STATIC_CHAR_VECTOR("for (var i = 0; i < Object.prototype; i++);"),
- STATIC_CHAR_VECTOR("} j=7; var s = 'happy_hippo'; j"), 20000);
+ StaticCharVector("var j=1; if (j == 0) {"),
+ StaticCharVector("for (var i = 0; i < Object.prototype; i++);"),
+ StaticCharVector("} j=7; var s = 'happy_hippo'; j"), 20000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1643,7 +1818,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
heap::SimulateFullSpace(heap->old_space());
moving_object = isolate->factory()->InternalizeString(
isolate->factory()->NewStringFromAsciiChecked("happy_hippo"));
- ec_page = Page::FromAddress(moving_object->address());
+ ec_page = Page::FromHeapObject(*moving_object);
}
Handle<JSObject> global(isolate->context()->global_object(), isolate);
@@ -1699,11 +1874,11 @@ TEST(CodeSerializerLargeStrings) {
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source_s = ConstructSource(
- STATIC_CHAR_VECTOR("var s = \""), STATIC_CHAR_VECTOR("abcdef"),
- STATIC_CHAR_VECTOR("\";"), 1000000);
+ StaticCharVector("var s = \""), StaticCharVector("abcdef"),
+ StaticCharVector("\";"), 1000000);
Vector<const uint8_t> source_t = ConstructSource(
- STATIC_CHAR_VECTOR("var t = \""), STATIC_CHAR_VECTOR("uvwxyz"),
- STATIC_CHAR_VECTOR("\"; s + t"), 999999);
+ StaticCharVector("var t = \""), StaticCharVector("uvwxyz"),
+ StaticCharVector("\"; s + t"), 999999);
Handle<String> source_str =
f->NewConsString(f->NewStringFromOneByte(source_s).ToHandleChecked(),
f->NewStringFromOneByte(source_t).ToHandleChecked())
@@ -1755,20 +1930,20 @@ TEST(CodeSerializerThreeBigStrings) {
v8::HandleScope scope(CcTest::isolate());
Vector<const uint8_t> source_a =
- ConstructSource(STATIC_CHAR_VECTOR("var a = \""), STATIC_CHAR_VECTOR("a"),
- STATIC_CHAR_VECTOR("\";"), 700000);
+ ConstructSource(StaticCharVector("var a = \""), StaticCharVector("a"),
+ StaticCharVector("\";"), 700000);
Handle<String> source_a_str =
f->NewStringFromOneByte(source_a).ToHandleChecked();
Vector<const uint8_t> source_b =
- ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
- STATIC_CHAR_VECTOR("\";"), 400000);
+ ConstructSource(StaticCharVector("var b = \""), StaticCharVector("b"),
+ StaticCharVector("\";"), 400000);
Handle<String> source_b_str =
f->NewStringFromOneByte(source_b).ToHandleChecked();
Vector<const uint8_t> source_c =
- ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
- STATIC_CHAR_VECTOR("\";"), 400000);
+ ConstructSource(StaticCharVector("var c = \""), StaticCharVector("c"),
+ StaticCharVector("\";"), 400000);
Handle<String> source_c_str =
f->NewStringFromOneByte(source_c).ToHandleChecked();
@@ -1814,11 +1989,20 @@ TEST(CodeSerializerThreeBigStrings) {
result_str = CompileRun("b")
->ToString(CcTest::isolate()->GetCurrentContext())
.ToLocalChecked();
+#if V8_HOST_ARCH_PPC
+ CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), LO_SPACE));
+#else
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), OLD_SPACE));
+#endif
+
result_str = CompileRun("c")
->ToString(CcTest::isolate()->GetCurrentContext())
.ToLocalChecked();
+#if V8_HOST_ARCH_PPC
+ CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), LO_SPACE));
+#else
CHECK(heap->InSpace(*v8::Utils::OpenHandle(*result_str), OLD_SPACE));
+#endif
delete cache;
source_a.Dispose();
@@ -1938,8 +2122,8 @@ TEST(CodeSerializerLargeExternalString) {
// Create a huge external internalized string to use as variable name.
Vector<const uint8_t> string =
- ConstructSource(STATIC_CHAR_VECTOR(""), STATIC_CHAR_VECTOR("abcdef"),
- STATIC_CHAR_VECTOR(""), 999999);
+ ConstructSource(StaticCharVector(""), StaticCharVector("abcdef"),
+ StaticCharVector(""), 999999);
Handle<String> name = f->NewStringFromOneByte(string).ToHandleChecked();
SerializerOneByteResource one_byte_resource(
reinterpret_cast<const char*>(string.start()), string.length());
@@ -2044,7 +2228,8 @@ static bool toplevel_test_code_event_found = false;
static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
if (event->type == v8::JitCodeEvent::CODE_ADDED &&
- memcmp(event->name.str, "Script:~test", 12) == 0) {
+ (memcmp(event->name.str, "Script:~ test", 13) == 0 ||
+ memcmp(event->name.str, "Script: test", 12) == 0)) {
toplevel_test_code_event_found = true;
}
}
@@ -2067,7 +2252,7 @@ v8::ScriptCompiler::CachedData* CompileRunAndProduceCache(
v8::ScriptCompiler::CompileOptions options;
switch (cacheType) {
case CodeCacheType::kEager:
- options = v8::ScriptCompiler::kProduceFullCodeCache;
+ options = v8::ScriptCompiler::kEagerCompile;
break;
case CodeCacheType::kLazy:
case CodeCacheType::kAfterExecute:
@@ -2101,15 +2286,6 @@ v8::ScriptCompiler::CachedData* CompileRunAndProduceCache(
return cache;
}
-void CheckDeserializedFlag(v8::Local<v8::UnboundScript> script) {
- i::Handle<i::SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
- i::SharedFunctionInfo::ScriptIterator iterator(sfi->GetIsolate(),
- Script::cast(sfi->script()));
- while (SharedFunctionInfo* next = iterator.Next()) {
- CHECK_EQ(next->is_compiled(), next->deserialized());
- }
-}
-
TEST(CodeSerializerIsolates) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
@@ -2137,7 +2313,6 @@ TEST(CodeSerializerIsolates) {
.ToLocalChecked();
}
CHECK(!cache->rejected);
- CheckDeserializedFlag(script);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate2->GetCurrentContext())
.ToLocalChecked();
@@ -2184,7 +2359,6 @@ TEST(CodeSerializerIsolatesEager) {
.ToLocalChecked();
}
CHECK(!cache->rejected);
- CheckDeserializedFlag(script);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate2->GetCurrentContext())
.ToLocalChecked();
@@ -2200,10 +2374,8 @@ TEST(CodeSerializerIsolatesEager) {
TEST(CodeSerializerAfterExecute) {
// We test that no compilations happen when running this code. Forcing
// to always optimize breaks this test.
- bool prev_opt_value = FLAG_opt;
bool prev_always_opt_value = FLAG_always_opt;
FLAG_always_opt = false;
- FLAG_opt = false;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache =
CompileRunAndProduceCache(source, CodeCacheType::kAfterExecute);
@@ -2230,11 +2402,10 @@ TEST(CodeSerializerAfterExecute) {
.ToLocalChecked();
}
CHECK(!cache->rejected);
- CheckDeserializedFlag(script);
Handle<SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
CHECK(sfi->HasBytecodeArray());
- BytecodeArray* bytecode = sfi->GetBytecodeArray();
+ BytecodeArray bytecode = sfi->GetBytecodeArray();
CHECK_EQ(bytecode->interrupt_budget(),
interpreter::Interpreter::InterruptBudget());
CHECK_EQ(bytecode->osr_loop_nesting_level(), 0);
@@ -2256,7 +2427,6 @@ TEST(CodeSerializerAfterExecute) {
// Restore the flags.
FLAG_always_opt = prev_always_opt_value;
- FLAG_opt = prev_opt_value;
}
TEST(CodeSerializerFlagChange) {
@@ -2373,7 +2543,6 @@ TEST(CodeSerializerWithHarmonyScoping) {
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
.ToLocalChecked();
}
- CheckDeserializedFlag(script);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate2->GetCurrentContext())
.ToLocalChecked();
@@ -2441,7 +2610,7 @@ TEST(SnapshotCreatorMultipleContexts) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
{
@@ -2578,7 +2747,7 @@ TEST(SnapshotCreatorExternalReferences) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = original_external_references;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2603,7 +2772,7 @@ TEST(SnapshotCreatorExternalReferences) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = replaced_external_references;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2649,7 +2818,7 @@ TEST(SnapshotCreatorShortExternalReferences) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = short_external_references;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2707,7 +2876,7 @@ TEST(SnapshotCreatorNoExternalReferencesDefault) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = nullptr;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2753,7 +2922,7 @@ TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2793,7 +2962,7 @@ TEST(SnapshotCreatorArrayJoinWithKeep) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2817,7 +2986,7 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail1) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = nullptr;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2842,7 +3011,7 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail2) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = nullptr;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -2886,8 +3055,8 @@ TEST(SnapshotCreatorTemplates) {
{
InternalFieldData* a1 = new InternalFieldData{11};
- InternalFieldData* b0 = new InternalFieldData{20};
- InternalFieldData* c0 = new InternalFieldData{30};
+ InternalFieldData* b1 = new InternalFieldData{20};
+ InternalFieldData* c1 = new InternalFieldData{30};
v8::SnapshotCreator creator(original_external_references);
v8::Isolate* isolate = creator.GetIsolate();
@@ -2923,19 +3092,23 @@ TEST(SnapshotCreatorTemplates) {
v8::External::New(isolate, nullptr);
v8::Local<v8::External> field_external =
v8::External::New(isolate, &serialized_static_field);
+
a->SetInternalField(0, b);
+ b->SetInternalField(0, c);
+
a->SetAlignedPointerInInternalField(1, a1);
- b->SetAlignedPointerInInternalField(0, b0);
- b->SetInternalField(1, c);
- c->SetAlignedPointerInInternalField(0, c0);
- c->SetInternalField(1, null_external);
- c->SetInternalField(2, field_external);
+ b->SetAlignedPointerInInternalField(1, b1);
+ c->SetAlignedPointerInInternalField(1, c1);
+
+ a->SetInternalField(2, null_external);
+ b->SetInternalField(2, field_external);
+ c->SetInternalField(2, v8_num(35));
CHECK(context->Global()->Set(context, v8_str("a"), a).FromJust());
CHECK_EQ(0u,
creator.AddContext(context, v8::SerializeInternalFieldsCallback(
SerializeInternalFields,
- reinterpret_cast<void*>(2016))));
+ reinterpret_cast<void*>(2000))));
CHECK_EQ(0u, creator.AddTemplate(callback));
CHECK_EQ(1u, creator.AddTemplate(global_template));
}
@@ -2943,8 +3116,8 @@ TEST(SnapshotCreatorTemplates) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
delete a1;
- delete b0;
- delete c0;
+ delete b1;
+ delete c1;
}
{
@@ -2953,7 +3126,7 @@ TEST(SnapshotCreatorTemplates) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = original_external_references;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
{
@@ -3000,30 +3173,34 @@ TEST(SnapshotCreatorTemplates) {
.ToLocalChecked();
v8::Local<v8::Object> b =
a->GetInternalField(0)->ToObject(context).ToLocalChecked();
+ v8::Local<v8::Object> c =
+ b->GetInternalField(0)->ToObject(context).ToLocalChecked();
+
InternalFieldData* a1 = reinterpret_cast<InternalFieldData*>(
a->GetAlignedPointerFromInternalField(1));
v8::Local<v8::Value> a2 = a->GetInternalField(2);
- InternalFieldData* b0 = reinterpret_cast<InternalFieldData*>(
- b->GetAlignedPointerFromInternalField(0));
- v8::Local<v8::Object> c =
- b->GetInternalField(1)->ToObject(context).ToLocalChecked();
+ InternalFieldData* b1 = reinterpret_cast<InternalFieldData*>(
+ b->GetAlignedPointerFromInternalField(1));
v8::Local<v8::Value> b2 = b->GetInternalField(2);
- InternalFieldData* c0 = reinterpret_cast<InternalFieldData*>(
- c->GetAlignedPointerFromInternalField(0));
- v8::Local<v8::Value> c1 = c->GetInternalField(1);
+ v8::Local<v8::Value> c0 = c->GetInternalField(0);
+ InternalFieldData* c1 = reinterpret_cast<InternalFieldData*>(
+ c->GetAlignedPointerFromInternalField(1));
v8::Local<v8::Value> c2 = c->GetInternalField(2);
+ CHECK(c0->IsUndefined());
+
CHECK_EQ(11u, a1->data);
- CHECK(a2->IsUndefined());
- CHECK_EQ(20u, b0->data);
- CHECK(b2->IsUndefined());
- CHECK_EQ(30u, c0->data);
- CHECK(c1->IsExternal());
- CHECK_NULL(v8::Local<v8::External>::Cast(c1)->Value());
+ CHECK_EQ(20u, b1->data);
+ CHECK_EQ(30u, c1->data);
+
+ CHECK(a2->IsExternal());
+ CHECK_NULL(v8::Local<v8::External>::Cast(a2)->Value());
+ CHECK(b2->IsExternal());
CHECK_EQ(static_cast<void*>(&serialized_static_field),
- v8::Local<v8::External>::Cast(c2)->Value());
+ v8::Local<v8::External>::Cast(b2)->Value());
+ CHECK(c2->IsInt32() && c2->Int32Value(context).FromJust() == 35);
// Accessing out of bound returns empty MaybeHandle.
CHECK(v8::ObjectTemplate::FromSnapshot(isolate, 2).IsEmpty());
@@ -3106,7 +3283,7 @@ TEST(SnapshotCreatorAddData) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -3212,7 +3389,7 @@ TEST(SnapshotCreatorAddData) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -3355,7 +3532,7 @@ TEST(SnapshotCreatorIncludeGlobalProxy) {
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = original_external_references;
// Test-appropriate equivalent of v8::Isolate::New.
- v8::Isolate* isolate = TestIsolate::New(params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
{
v8::Isolate::Scope isolate_scope(isolate);
// We can introduce new extensions, which could override functions already
@@ -3460,6 +3637,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
i::FLAG_rehash_snapshot = true;
i::FLAG_hash_seed = 42;
i::FLAG_allow_natives_syntax = true;
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -3498,6 +3676,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
}
isolate->Dispose();
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
@@ -3505,6 +3684,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
i::FLAG_rehash_snapshot = true;
i::FLAG_hash_seed = 42;
i::FLAG_allow_natives_syntax = true;
+ DisableEmbeddedBlobRefcounting();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -3568,6 +3748,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
}
isolate->Dispose();
delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
}
TEST(SerializationStats) {
@@ -3587,12 +3768,12 @@ TEST(SerializationStats) {
}
}
-void CheckSFIsAreWeak(WeakFixedArray* sfis, Isolate* isolate) {
+void CheckSFIsAreWeak(WeakFixedArray sfis, Isolate* isolate) {
CHECK_GT(sfis->length(), 0);
int no_of_weak = 0;
for (int i = 0; i < sfis->length(); ++i) {
- MaybeObject* maybe_object = sfis->Get(i);
- HeapObject* heap_object;
+ MaybeObject maybe_object = sfis->Get(i);
+ HeapObject heap_object;
CHECK(maybe_object->IsWeakOrCleared() ||
(maybe_object->GetHeapObjectIfStrong(&heap_object) &&
heap_object->IsUndefined(isolate)));
@@ -3629,7 +3810,7 @@ TEST(WeakArraySerializizationInSnapshot) {
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = TestIsolate::New(create_params);
+ v8::Isolate* isolate = TestSerializer::NewIsolate(create_params);
{
v8::Isolate::Scope i_scope(isolate);
v8::HandleScope h_scope(isolate);
@@ -3646,7 +3827,7 @@ TEST(WeakArraySerializizationInSnapshot) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*x));
// Verify that the pointers in shared_function_infos are weak.
- WeakFixedArray* sfis =
+ WeakFixedArray sfis =
Script::cast(function->shared()->script())->shared_function_infos();
CheckSFIsAreWeak(sfis, CcTest::i_isolate());
}
@@ -3676,7 +3857,7 @@ TEST(WeakArraySerializationInCodeCache) {
isolate, src, src, cache, v8::ScriptCompiler::kConsumeCodeCache);
// Verify that the pointers in shared_function_infos are weak.
- WeakFixedArray* sfis = Script::cast(copy->script())->shared_function_infos();
+ WeakFixedArray sfis = Script::cast(copy->script())->shared_function_infos();
CheckSFIsAreWeak(sfis, isolate);
delete cache;
diff --git a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
index 58617fd5c2..7e4f76698f 100644
--- a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
+++ b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
@@ -5,6 +5,7 @@
#include <set>
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -13,7 +14,7 @@ namespace internal {
namespace {
-void AddSigned(std::set<Smi*>& smis, int64_t x) {
+void AddSigned(std::set<Smi>& smis, int64_t x) {
if (!Smi::IsValid(x)) return;
smis.insert(Smi::FromInt(static_cast<int>(x)));
@@ -21,7 +22,7 @@ void AddSigned(std::set<Smi*>& smis, int64_t x) {
}
// Uses std::lexicographical_compare twice to convert the result to -1, 0 or 1.
-int ExpectedCompareResult(Smi* a, Smi* b) {
+int ExpectedCompareResult(Smi a, Smi b) {
std::string str_a = std::to_string(a->value());
std::string str_b = std::to_string(b->value());
bool expected_a_lt_b = std::lexicographical_compare(
@@ -39,8 +40,8 @@ int ExpectedCompareResult(Smi* a, Smi* b) {
}
}
-bool Test(Isolate* isolate, Smi* a, Smi* b) {
- int actual = Smi::LexicographicCompare(isolate, a, b)->value();
+bool Test(Isolate* isolate, Smi a, Smi b) {
+ int actual = Smi(Smi::LexicographicCompare(isolate, a, b)).value();
int expected = ExpectedCompareResult(a, b);
return actual == expected;
@@ -52,7 +53,7 @@ TEST(TestSmiLexicographicCompare) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- std::set<Smi*> smis;
+ std::set<Smi> smis;
for (int64_t xb = 1; xb <= Smi::kMaxValue; xb *= 10) {
for (int64_t xf = 0; xf <= 9; ++xf) {
@@ -68,8 +69,8 @@ TEST(TestSmiLexicographicCompare) {
}
}
- for (Smi* a : smis) {
- for (Smi* b : smis) {
+ for (Smi a : smis) {
+ for (Smi b : smis) {
CHECK(Test(isolate, a, b));
}
}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9326c347ec..9afde32287 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -35,6 +35,7 @@
#include "src/v8.h"
#include "src/api-inl.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/heap/factory.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -277,7 +278,7 @@ class ConsStringGenerationData {
int max_leaves_;
// Cached data.
Handle<String> building_blocks_[kNumberOfBuildingBlocks];
- String* empty_string_;
+ String empty_string_;
MyRandomNumberGenerator rng_;
// Stats.
ConsStringStats stats_;
@@ -318,8 +319,7 @@ void ConsStringGenerationData::Reset() {
rng_.init();
}
-
-void AccumulateStats(ConsString* cons_string, ConsStringStats* stats) {
+void AccumulateStats(ConsString cons_string, ConsStringStats* stats) {
int left_length = cons_string->first()->length();
int right_length = cons_string->second()->length();
CHECK(cons_string->length() == left_length + right_length);
@@ -347,7 +347,6 @@ void AccumulateStats(ConsString* cons_string, ConsStringStats* stats) {
}
}
-
void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
DisallowHeapAllocation no_allocation;
if (cons_string->IsConsString()) {
@@ -357,13 +356,12 @@ void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
stats->chars_ += cons_string->length();
}
-
-void AccumulateStatsWithOperator(
- ConsString* cons_string, ConsStringStats* stats) {
+void AccumulateStatsWithOperator(ConsString cons_string,
+ ConsStringStats* stats) {
ConsStringIterator iter(cons_string);
- String* string;
int offset;
- while (nullptr != (string = iter.Next(&offset))) {
+ for (String string = iter.Next(&offset); !string.is_null();
+ string = iter.Next(&offset)) {
// Accumulate stats.
CHECK_EQ(0, offset);
stats->leaves_++;
@@ -371,7 +369,6 @@ void AccumulateStatsWithOperator(
}
}
-
void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
// Verify basic data.
CHECK(root->IsConsString());
@@ -640,8 +637,7 @@ TEST(ConsStringWithEmptyFirstFlatten) {
CHECK_EQ(initial_length, flat->length());
}
-static void VerifyCharacterStream(
- String* flat_string, String* cons_string) {
+static void VerifyCharacterStream(String flat_string, String cons_string) {
// Do not want to test ConString traversal on flat string.
CHECK(flat_string->IsFlat() && !flat_string->IsConsString());
CHECK(cons_string->IsConsString());
@@ -668,7 +664,6 @@ static void VerifyCharacterStream(
}
}
-
static inline void PrintStats(const ConsStringGenerationData& data) {
#ifdef DEBUG
printf("%s: [%u], %s: [%u], %s: [%u], %s: [%u], %s: [%u], %s: [%u]\n",
@@ -707,10 +702,9 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
cons_string_stats.VerifyEqual(flat_string_stats);
cons_string_stats.VerifyEqual(data.stats_);
VerifyConsString(cons_string, &data);
- String* flat_string_ptr =
- flat_string->IsConsString() ?
- ConsString::cast(*flat_string)->first() :
- *flat_string;
+ String flat_string_ptr = flat_string->IsConsString()
+ ? ConsString::cast(*flat_string)->first()
+ : *flat_string;
VerifyCharacterStream(flat_string_ptr, *cons_string);
}
}
@@ -964,6 +958,109 @@ TEST(Utf8Conversion) {
}
}
+TEST(Utf8ConversionPerf) {
+ // Smoke test for converting strings to utf-8.
+ LocalContext context;
+ v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Local<v8::String> ascii_string =
+ CompileRun("'abc'.repeat(1E6)").As<v8::String>();
+ v8::Local<v8::String> one_byte_string =
+ CompileRun("'\\u0255\\u0254\\u0253'.repeat(1E6)").As<v8::String>();
+ v8::Local<v8::String> two_byte_string =
+ CompileRun("'\\u2255\\u2254\\u2253'.repeat(1E6)").As<v8::String>();
+ v8::Local<v8::String> surrogate_string =
+ CompileRun("'\\u{12345}\\u2244'.repeat(1E6)").As<v8::String>();
+ int size = 1E7;
+ char* buffer = new char[4 * size];
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ ascii_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ ascii_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ ascii_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr);
+ printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ one_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ one_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ one_byte_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr);
+ printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ two_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ two_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ two_byte_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr);
+ printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ surrogate_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ surrogate_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr);
+ printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ surrogate_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr);
+ printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF());
+ timer.Stop();
+ }
+ delete[] buffer;
+}
TEST(ExternalShortStringAdd) {
LocalContext context;
@@ -1071,6 +1168,27 @@ TEST(ExternalShortStringAdd) {
CHECK_EQ(0, CompileRun(source)->Int32Value(context.local()).FromJust());
}
+TEST(ReplaceInvalidUtf8) {
+ LocalContext context;
+ v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Local<v8::String> string = CompileRun("'ab\\ud800cd'").As<v8::String>();
+ char buffer[7];
+ memset(buffer, 0, 7);
+ int chars_written = 0;
+ int size = string->WriteUtf8(CcTest::isolate(), buffer, 7, &chars_written,
+ v8::String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(7, size);
+ CHECK_EQ(5, chars_written);
+ CHECK_EQ(0, memcmp("\x61\x62\xef\xbf\xbd\x63\x64", buffer, 7));
+
+ memset(buffer, 0, 7);
+ chars_written = 0;
+ size = string->WriteUtf8(CcTest::isolate(), buffer, 6, &chars_written,
+ v8::String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(6, size);
+ CHECK_EQ(4, chars_written);
+ CHECK_EQ(0, memcmp("\x61\x62\xef\xbf\xbd\x63", buffer, 6));
+}
TEST(JSONStringifySliceMadeExternal) {
if (!FLAG_string_slices) return;
@@ -1310,8 +1428,8 @@ TEST(InternalizeExternal) {
CHECK(!i::Heap::InNewSpace(*string));
CHECK_EQ(
isolate->factory()->string_table()->LookupStringIfExists_NoAllocate(
- isolate, *string),
- Smi::FromInt(ResultSentinel::kNotFound));
+ isolate, string->ptr()),
+ Smi::FromInt(ResultSentinel::kNotFound).ptr());
factory->InternalizeName(string);
CHECK(string->IsExternalString());
CHECK(string->IsInternalizedString());
@@ -1620,7 +1738,7 @@ TEST(FormatMessage) {
Handle<String> arg1 = isolate->factory()->NewStringFromAsciiChecked("arg1");
Handle<String> arg2 = isolate->factory()->NewStringFromAsciiChecked("arg2");
Handle<String> result =
- MessageTemplate::FormatMessage(
+ MessageFormatter::FormatMessage(
isolate, MessageTemplate::kPropertyNotFunction, arg0, arg1, arg2)
.ToHandleChecked();
Handle<String> expected = isolate->factory()->NewStringFromAsciiChecked(
@@ -1819,7 +1937,8 @@ TEST(Regress876759) {
{
Handle<SeqTwoByteString> raw =
factory->NewRawTwoByteString(kLength).ToHandleChecked();
- CopyChars(raw->GetChars(), two_byte_buf, kLength);
+ DisallowHeapAllocation no_gc;
+ CopyChars(raw->GetChars(no_gc), two_byte_buf, kLength);
parent = raw;
}
CHECK(parent->IsTwoByteRepresentation());
@@ -1838,9 +1957,8 @@ TEST(Regress876759) {
CHECK(grandparent->IsOneByteRepresentation());
CHECK(parent->IsTwoByteRepresentation());
CHECK(sliced->IsTwoByteRepresentation());
- // The *Underneath versions return the correct representation.
- CHECK(sliced->IsOneByteRepresentationUnderneath());
- CHECK(!sliced->IsTwoByteRepresentationUnderneath());
+ // The *Underneath version returns the correct representation.
+ CHECK(String::IsOneByteRepresentationUnderneath(*sliced));
}
} // namespace test_strings
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm.cc b/deps/v8/test/cctest/test-sync-primitives-arm.cc
index d3bed61622..e32136b81d 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm.cc
@@ -292,7 +292,7 @@ class MemoryAccessThread : public v8::base::Thread {
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
{
v8::Isolate::Scope scope(isolate_);
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
while (!is_finished_) {
while (!(has_request_ || is_finished_)) {
has_request_cv_.Wait(&mutex_);
@@ -313,7 +313,7 @@ class MemoryAccessThread : public v8::base::Thread {
void NextAndWait(TestData* test_data, MemoryAccess access) {
DCHECK(!has_request_);
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
test_data_ = test_data;
access_ = access;
has_request_ = true;
@@ -325,7 +325,7 @@ class MemoryAccessThread : public v8::base::Thread {
}
void Finish() {
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
is_finished_ = true;
has_request_cv_.NotifyOne();
}
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index e04f44f0f2..3ea282e972 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -195,8 +195,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
int expected_res, TestData expected_data) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
AssembleLoadExcl(&masm, access1, w1, x1);
AssembleMemoryAccess(&masm, access2, w3, w2, x1);
@@ -271,8 +270,7 @@ namespace {
int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
MemoryAccess access) {
HandleScope scope(isolate);
- MacroAssembler masm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
AssembleMemoryAccess(&masm, access, w0, w2, x1);
__ br(lr);
@@ -303,7 +301,7 @@ class MemoryAccessThread : public v8::base::Thread {
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
{
v8::Isolate::Scope scope(isolate_);
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
while (!is_finished_) {
while (!(has_request_ || is_finished_)) {
has_request_cv_.Wait(&mutex_);
@@ -324,7 +322,7 @@ class MemoryAccessThread : public v8::base::Thread {
void NextAndWait(TestData* test_data, MemoryAccess access) {
DCHECK(!has_request_);
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
test_data_ = test_data;
access_ = access;
has_request_ = true;
@@ -336,7 +334,7 @@ class MemoryAccessThread : public v8::base::Thread {
}
void Finish() {
- v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
+ v8::base::MutexGuard lock_guard(&mutex_);
is_finished_ = true;
has_request_cv_.NotifyOne();
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 16a18c51d0..72d01c2b7b 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -315,6 +315,7 @@ void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
+ CHECK(try_catch.HasTerminated());
CHECK(isolate->IsExecutionTerminating());
script = v8::Local<v8::String>::New(isolate, reenter_script_2);
v8::MaybeLocal<v8::Script> compiled_script =
@@ -357,6 +358,45 @@ TEST(TerminateAndReenterFromThreadItself) {
reenter_script_2.Reset();
}
+TEST(TerminateAndReenterFromThreadItselfWithOuterTryCatch) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, ReenterAfterTermination);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ // Create script strings upfront as it won't work when terminating.
+ reenter_script_1.Reset(isolate, v8_str("function f() {"
+ " var term = true;"
+ " try {"
+ " while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"));
+ reenter_script_2.Reset(isolate, v8_str("function f() { fail(); } f()"));
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("try { loop(); fail(); } catch(e) { fail(); }");
+ CHECK(try_catch.HasCaught());
+ CHECK(try_catch.Exception()->IsNull());
+ CHECK(try_catch.Message().IsEmpty());
+ CHECK(!try_catch.CanContinue());
+ CHECK(try_catch.HasTerminated());
+ CHECK(isolate->IsExecutionTerminating());
+ }
+ CHECK(!isolate->IsExecutionTerminating());
+ // Check we can run JS again after termination.
+ CHECK(CompileRun("function f() { return true; } f()")->IsTrue());
+ reenter_script_1.Reset();
+ reenter_script_2.Reset();
+}
void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
@@ -746,21 +786,23 @@ TEST(TerminateAndTryCall) {
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
CHECK(!isolate->IsExecutionTerminating());
- v8::TryCatch try_catch(isolate);
- CHECK(!isolate->IsExecutionTerminating());
- // Terminate execution has been triggered inside TryCall, but re-requested
- // to trigger later.
- CHECK(CompileRun("terminate(); reference_error();").IsEmpty());
- CHECK(try_catch.HasCaught());
- CHECK(!isolate->IsExecutionTerminating());
- v8::Local<v8::Value> value =
- CcTest::global()
- ->Get(isolate->GetCurrentContext(), v8_str("terminate"))
- .ToLocalChecked();
- CHECK(value->IsFunction());
- // The first stack check after terminate has been re-requested fails.
- CHECK(CompileRun("1 + 1").IsEmpty());
- CHECK(!isolate->IsExecutionTerminating());
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(!isolate->IsExecutionTerminating());
+ // Terminate execution has been triggered inside TryCall, but re-requested
+ // to trigger later.
+ CHECK(CompileRun("terminate(); reference_error();").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::Local<v8::Value> value =
+ CcTest::global()
+ ->Get(isolate->GetCurrentContext(), v8_str("terminate"))
+ .ToLocalChecked();
+ CHECK(value->IsFunction());
+ // The first stack check after terminate has been re-requested fails.
+ CHECK(CompileRun("1 + 1").IsEmpty());
+ CHECK(isolate->IsExecutionTerminating());
+ }
// V8 then recovers.
v8::Maybe<int32_t> result = CompileRun("2 + 2")->Int32Value(
v8::Isolate::GetCurrent()->GetCurrentContext());
@@ -791,7 +833,7 @@ TEST(TerminateConsole) {
CHECK(!isolate->IsExecutionTerminating());
CHECK(CompileRun("terminate(); console.log(); fail();").IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK(!isolate->IsExecutionTerminating());
+ CHECK(isolate->IsExecutionTerminating());
}
class TerminatorSleeperThread : public v8::base::Thread {
@@ -830,7 +872,7 @@ TEST(TerminateRegExp) {
CHECK(CompileRun("re.test('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); fail();")
.IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK(!isolate->IsExecutionTerminating());
+ CHECK(isolate->IsExecutionTerminating());
#endif // V8_INTERPRETED_REGEXP
}
@@ -855,11 +897,11 @@ TEST(TerminateInMicrotask) {
{
v8::Context::Scope context_scope(context2);
CHECK(context2 == isolate->GetCurrentContext());
- CHECK(context2 == isolate->GetEnteredContext());
+ CHECK(context2 == isolate->GetEnteredOrMicrotaskContext());
CHECK(!isolate->IsExecutionTerminating());
isolate->RunMicrotasks();
CHECK(context2 == isolate->GetCurrentContext());
- CHECK(context2 == isolate->GetEnteredContext());
+ CHECK(context2 == isolate->GetEnteredOrMicrotaskContext());
CHECK(try_catch.HasCaught());
CHECK(try_catch.HasTerminated());
}
diff --git a/deps/v8/test/cctest/test-traced-value.cc b/deps/v8/test/cctest/test-traced-value.cc
index 3a33389a3e..101779d242 100644
--- a/deps/v8/test/cctest/test-traced-value.cc
+++ b/deps/v8/test/cctest/test-traced-value.cc
@@ -116,11 +116,27 @@ TEST(Escaping) {
// Cannot use the expected value literal directly in CHECK_EQ
// as it fails to process the # character on Windows.
const char* expected =
- "{\"a\":\"abc\\\"\'\\\\\\\\x\\\"y\'z\\n\\t\\u0017\",\"b\":"
- "\"\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\u0008\\t\\n\\u000B"
- "\\u000C\\u000D\\u000E\\u000F\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\"
- "u0016\\u0017\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F "
- "!\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`"
- "abcdefghijklmnopqrstuvwxyz{|}~\x7F\"}";
+ R"({"a":"abc\"'\\\\x\"y'z\n\t\u0017","b":"\u0001\u0002\u0003\u0004\u0005)"
+ R"(\u0006\u0007\b\t\n\u000B\f\r\u000E\u000F\u0010\u0011\u0012\u0013)"
+ R"(\u0014\u0015\u0016\u0017\u0018\u0019\u001A\u001B\u001C\u001D\u001E)"
+ R"(\u001F !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ)"
+ R"([\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007F"})";
+ CHECK_EQ(expected, json);
+}
+
+TEST(Utf8) {
+ const char* string1 = "Люблю тебя, Петра творенье";
+ const char* string2 = "☀\u2600\u26FF";
+ auto value = TracedValue::Create();
+ value->SetString("a", string1);
+ value->SetString("b", string2);
+ // Surrogate pair test. Smile emoji === U+1F601 === \xf0\x9f\x98\x81
+ value->SetString("c", "\U0001F601");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ const char* expected =
+ "{\"a\":\"\u041B\u044E\u0431\u043B\u044E \u0442\u0435\u0431\u044F, \u041F"
+ "\u0435\u0442\u0440\u0430 \u0442\u0432\u043E\u0440\u0435\u043D\u044C"
+ "\u0435\",\"b\":\"\u2600\u2600\u26FF\",\"c\":\"\xf0\x9f\x98\x81\"}";
CHECK_EQ(expected, json);
}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index f3658664d9..c4b7451147 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -32,16 +32,14 @@ TEST(TransitionArray_SimpleFieldTransitions) {
Handle<Map> map0 = Map::Create(isolate, 0);
Handle<Map> map1 =
- Map::CopyWithField(isolate, map0, name1,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable, Representation::Tagged(),
- OMIT_TRANSITION)
+ Map::CopyWithField(isolate, map0, name1, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
+ Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
Handle<Map> map2 =
- Map::CopyWithField(isolate, map0, name2,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable, Representation::Tagged(),
- OMIT_TRANSITION)
+ Map::CopyWithField(isolate, map0, name2, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
+ Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
CHECK(map0->raw_transitions()->IsSmi());
@@ -68,8 +66,8 @@ TEST(TransitionArray_SimpleFieldTransitions) {
CHECK_EQ(*map2, transitions.SearchTransition(*name2, kData, attributes));
CHECK_EQ(2, transitions.NumberOfTransitions());
for (int i = 0; i < 2; i++) {
- Name* key = transitions.GetKey(i);
- Map* target = transitions.GetTarget(i);
+ Name key = transitions.GetKey(i);
+ Map target = transitions.GetTarget(i);
CHECK((key == *name1 && target == *map1) ||
(key == *name2 && target == *map2));
}
@@ -91,16 +89,14 @@ TEST(TransitionArray_FullFieldTransitions) {
Handle<Map> map0 = Map::Create(isolate, 0);
Handle<Map> map1 =
- Map::CopyWithField(isolate, map0, name1,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable, Representation::Tagged(),
- OMIT_TRANSITION)
+ Map::CopyWithField(isolate, map0, name1, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
+ Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
Handle<Map> map2 =
- Map::CopyWithField(isolate, map0, name2,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable, Representation::Tagged(),
- OMIT_TRANSITION)
+ Map::CopyWithField(isolate, map0, name2, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
+ Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
CHECK(map0->raw_transitions()->IsSmi());
@@ -127,8 +123,8 @@ TEST(TransitionArray_FullFieldTransitions) {
CHECK_EQ(*map2, transitions.SearchTransition(*name2, kData, attributes));
CHECK_EQ(2, transitions.NumberOfTransitions());
for (int i = 0; i < 2; i++) {
- Name* key = transitions.GetKey(i);
- Map* target = transitions.GetTarget(i);
+ Name key = transitions.GetKey(i);
+ Map target = transitions.GetTarget(i);
CHECK((key == *name1 && target == *map1) ||
(key == *name2 && target == *map2));
}
@@ -157,9 +153,8 @@ TEST(TransitionArray_DifferentFieldNames) {
SNPrintF(buffer, "prop%d", i);
Handle<String> name = factory->InternalizeUtf8String(buffer.start());
Handle<Map> map =
- Map::CopyWithField(isolate, map0, name,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable,
+ Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
names[i] = name;
@@ -174,8 +169,8 @@ TEST(TransitionArray_DifferentFieldNames) {
transitions.SearchTransition(*names[i], kData, attributes));
}
for (int i = 0; i < PROPS_COUNT; i++) {
- Name* key = transitions.GetKey(i);
- Map* target = transitions.GetTarget(i);
+ Name key = transitions.GetKey(i);
+ Map target = transitions.GetTarget(i);
for (int j = 0; j < PROPS_COUNT; j++) {
if (*names[i] == key) {
CHECK_EQ(*maps[i], target);
@@ -250,8 +245,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
SNPrintF(buffer, "prop%d", i);
Handle<String> name = factory->InternalizeUtf8String(buffer.start());
Handle<Map> map =
- Map::CopyWithField(isolate, map0, name,
- handle(FieldType::Any(), isolate), NONE,
+ Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate), NONE,
PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
@@ -271,9 +265,8 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
Handle<Map> map =
- Map::CopyWithField(isolate, map0, name,
- handle(FieldType::Any(), isolate), attributes,
- PropertyConstness::kMutable,
+ Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate),
+ attributes, PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
attr_maps[i] = map;
@@ -291,8 +284,8 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
// Ensure that info about the other fields still valid.
CHECK_EQ(PROPS_COUNT + ATTRS_COUNT, transitions.NumberOfTransitions());
for (int i = 0; i < PROPS_COUNT + ATTRS_COUNT; i++) {
- Name* key = transitions.GetKey(i);
- Map* target = transitions.GetTarget(i);
+ Name key = transitions.GetKey(i);
+ Map target = transitions.GetTarget(i);
if (key == *name) {
// Attributes transition.
PropertyAttributes attributes =
diff --git a/deps/v8/test/cctest/test-transitions.h b/deps/v8/test/cctest/test-transitions.h
index 08c5d2cab2..f9def25a56 100644
--- a/deps/v8/test/cctest/test-transitions.h
+++ b/deps/v8/test/cctest/test-transitions.h
@@ -12,7 +12,7 @@ namespace internal {
class TestTransitionsAccessor : public TransitionsAccessor {
public:
- TestTransitionsAccessor(Isolate* isolate, Map* map,
+ TestTransitionsAccessor(Isolate* isolate, Map map,
DisallowHeapAllocation* no_gc)
: TransitionsAccessor(isolate, map, no_gc) {}
TestTransitionsAccessor(Isolate* isolate, Handle<Map> map)
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 421407180c..0d1ebe6759 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -9,6 +9,7 @@
#include "src/accessors.h"
#include "src/api-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/field-type.h"
@@ -18,9 +19,9 @@
#include "src/heap/spaces.h"
#include "src/ic/ic.h"
#include "src/layout-descriptor.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/heap-number-inl.h"
#include "src/property.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -36,15 +37,13 @@ namespace test_unboxed_doubles {
// Helper functions.
//
-
static void InitializeVerifiedMapDescriptors(
- Map* map, DescriptorArray* descriptors,
- LayoutDescriptor* layout_descriptor) {
- map->InitializeDescriptors(descriptors, layout_descriptor);
+ Isolate* isolate, Map map, DescriptorArray descriptors,
+ LayoutDescriptor layout_descriptor) {
+ map->InitializeDescriptors(isolate, descriptors, layout_descriptor);
CHECK(layout_descriptor->IsConsistentWithMap(map, true));
}
-
static Handle<String> MakeString(const char* str) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -68,19 +67,18 @@ Handle<JSObject> GetObject(const char* name) {
.ToLocalChecked())));
}
-
-static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
+static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
if (obj->IsUnboxedDoubleField(field_index)) {
return obj->RawFastDoublePropertyAt(field_index);
} else {
- Object* value = obj->RawFastPropertyAt(field_index);
+ Object value = obj->RawFastPropertyAt(field_index);
CHECK(value->IsMutableHeapNumber());
return MutableHeapNumber::cast(value)->value();
}
}
-void WriteToField(JSObject* object, int descriptor, Object* value) {
- DescriptorArray* descriptors = object->map()->instance_descriptors();
+void WriteToField(JSObject object, int descriptor, Object value) {
+ DescriptorArray descriptors = object->map()->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
object->WriteToField(descriptor, details, value);
}
@@ -141,7 +139,7 @@ TEST(LayoutDescriptorBasicFast) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- LayoutDescriptor* layout_desc = LayoutDescriptor::FastPointerLayout();
+ LayoutDescriptor layout_desc = LayoutDescriptor::FastPointerLayout();
CHECK(!layout_desc->IsSlowLayout());
CHECK(layout_desc->IsFastPointerLayout());
@@ -196,7 +194,8 @@ TEST(LayoutDescriptorBasicSlow) {
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
CHECK_EQ(kBitsInSmiLayout, layout_descriptor->capacity());
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
props[0] = PROP_DOUBLE;
@@ -220,7 +219,8 @@ TEST(LayoutDescriptorBasicSlow) {
for (int i = 1; i < kPropsCount; i++) {
CHECK(layout_descriptor->IsTagged(i));
}
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -240,14 +240,15 @@ TEST(LayoutDescriptorBasicSlow) {
CHECK(layout_descriptor->IsTagged(i));
}
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
// Here we have truly slow layout descriptor, so play with the bits.
CHECK(layout_descriptor->IsTagged(-1));
CHECK(layout_descriptor->IsTagged(-12347));
CHECK(layout_descriptor->IsTagged(15635));
- LayoutDescriptor* layout_desc = *layout_descriptor;
+ LayoutDescriptor layout_desc = *layout_descriptor;
// Play with the bits but leave it in consistent state with map at the end.
for (int i = 1; i < kPropsCount - 1; i++) {
layout_desc = layout_desc->SetTaggedForTesting(i, false);
@@ -268,7 +269,7 @@ static void TestLayoutDescriptorQueries(int layout_descriptor_length,
Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::NewForTesting(
CcTest::i_isolate(), layout_descriptor_length);
layout_descriptor_length = layout_descriptor->capacity();
- LayoutDescriptor* layout_desc = *layout_descriptor;
+ LayoutDescriptor layout_desc = *layout_descriptor;
{
// Fill in the layout descriptor.
@@ -328,7 +329,7 @@ static void TestLayoutDescriptorQueries(int layout_descriptor_length,
static void TestLayoutDescriptorQueriesFast(int max_sequence_length) {
{
- LayoutDescriptor* layout_desc = LayoutDescriptor::FastPointerLayout();
+ LayoutDescriptor layout_desc = LayoutDescriptor::FastPointerLayout();
int sequence_length;
for (int i = 0; i < kNumberOfBits; i++) {
CHECK_EQ(true,
@@ -440,7 +441,7 @@ static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
int cur = 0;
for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
bit_flip_positions[i] = cur;
- cur = (cur + 1) * 2;
+ cur = base::MulWithWraparound((cur + 1), 2);
}
CHECK_LT(cur, 10000);
bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
@@ -453,7 +454,7 @@ static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
int cur = 3;
for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
bit_flip_positions[i] = cur;
- cur = (cur + 1) * 2;
+ cur = base::MulWithWraparound((cur + 1), 2);
}
CHECK_LT(cur, 10000);
bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
@@ -520,7 +521,8 @@ TEST(LayoutDescriptorCreateNewFast) {
layout_descriptor =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -528,7 +530,8 @@ TEST(LayoutDescriptorCreateNewFast) {
layout_descriptor =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -541,7 +544,8 @@ TEST(LayoutDescriptorCreateNewFast) {
CHECK(!layout_descriptor->IsTagged(1));
CHECK(layout_descriptor->IsTagged(2));
CHECK(layout_descriptor->IsTagged(125));
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
}
@@ -566,7 +570,8 @@ TEST(LayoutDescriptorCreateNewSlow) {
layout_descriptor =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -574,7 +579,8 @@ TEST(LayoutDescriptorCreateNewSlow) {
layout_descriptor =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -587,7 +593,8 @@ TEST(LayoutDescriptorCreateNewSlow) {
CHECK(!layout_descriptor->IsTagged(1));
CHECK(layout_descriptor->IsTagged(2));
CHECK(layout_descriptor->IsTagged(125));
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
}
{
@@ -606,13 +613,14 @@ TEST(LayoutDescriptorCreateNewSlow) {
for (int i = inobject_properties; i < kPropsCount; i++) {
CHECK(layout_descriptor->IsTagged(i));
}
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
// Now test LayoutDescriptor::cast_gc_safe().
Handle<LayoutDescriptor> layout_descriptor_copy =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- LayoutDescriptor* layout_desc = *layout_descriptor;
+ LayoutDescriptor layout_desc = *layout_descriptor;
CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
CHECK(layout_desc->IsSlowLayout());
@@ -640,7 +648,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
DescriptorArray::Allocate(isolate, 0, kPropsCount);
Handle<Map> map = Map::Create(isolate, inobject_properties);
- map->InitializeDescriptors(*descriptors,
+ map->InitializeDescriptors(isolate, *descriptors,
LayoutDescriptor::FastPointerLayout());
int next_field_offset = 0;
@@ -676,7 +684,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
}
CHECK(layout_descriptor->IsTagged(next_field_offset));
}
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
+ map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
}
Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
@@ -789,10 +797,10 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
std::vector<Handle<Map>> maps(descriptors_length);
{
CHECK(last_map->is_stable());
- Map* map = *last_map;
+ Map map = *last_map;
for (int i = 0; i < descriptors_length; i++) {
maps[descriptors_length - 1 - i] = handle(map, isolate);
- Object* maybe_map = map->GetBackPointer();
+ Object maybe_map = map->GetBackPointer();
CHECK(maybe_map->IsMap());
map = Map::cast(maybe_map);
CHECK(!map->is_stable());
@@ -805,7 +813,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
for (int i = 0; i < number_of_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
map = maps[i];
- LayoutDescriptor* layout_desc = map->layout_descriptor();
+ LayoutDescriptor layout_desc = map->layout_descriptor();
if (layout_desc->IsSlowLayout()) {
switched_to_slow_mode = true;
@@ -944,12 +952,12 @@ TEST(Regress436816) {
Handle<Map> map = Map::Create(isolate, kPropsCount);
Handle<LayoutDescriptor> layout_descriptor =
LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
+ map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
Handle<JSObject> object = factory->NewJSObjectFromMap(map, TENURED);
Address fake_address = static_cast<Address>(~kHeapObjectTagMask);
- HeapObject* fake_object = HeapObject::FromAddress(fake_address);
+ HeapObject fake_object = HeapObject::FromAddress(fake_address);
CHECK(fake_object->IsHeapObject());
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
@@ -1106,7 +1114,7 @@ TEST(DoScavenge) {
// Construct a double value that looks like a pointer to the new space object
// and store it into the obj.
- Address fake_object = reinterpret_cast<Address>(*temp) + kPointerSize;
+ Address fake_object = temp->ptr() + kPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
@@ -1158,7 +1166,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// Make sure |obj_value| is placed on an old-space evacuation candidate.
heap::SimulateFullSpace(old_space);
obj_value = factory->NewJSArray(32 * KB, HOLEY_ELEMENTS, TENURED);
- ec_page = Page::FromAddress(obj_value->address());
+ ec_page = Page::FromHeapObject(*obj_value);
}
// Create object in new space.
@@ -1226,7 +1234,8 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
isolate, map, descriptors, descriptors->number_of_descriptors());
- InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
+ InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
+ *layout_descriptor);
LayoutDescriptorHelper helper(*map);
bool all_fields_tagged = true;
@@ -1459,7 +1468,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// |boom_value| to the slot that was earlier recorded by write barrier.
JSObject::MigrateToMap(obj, new_map);
- Address fake_object = reinterpret_cast<Address>(*obj_value) + kPointerSize;
+ Address fake_object = obj_value->ptr() + kPointerSize;
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
FieldIndex double_field_index =
@@ -1510,8 +1519,8 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Make sure |obj_value| is placed on an old-space evacuation candidate.
heap::SimulateFullSpace(old_space);
obj_value = factory->NewJSArray(32 * KB, HOLEY_ELEMENTS, TENURED);
- ec_page = Page::FromAddress(obj_value->address());
- CHECK_NE(ec_page, Page::FromAddress(obj->address()));
+ ec_page = Page::FromHeapObject(*obj_value);
+ CHECK_NE(ec_page, Page::FromHeapObject(*obj));
}
// Heap is ready, force |ec_page| to become an evacuation candidate and
diff --git a/deps/v8/test/cctest/test-unwinder.cc b/deps/v8/test/cctest/test-unwinder.cc
new file mode 100644
index 0000000000..26b7c99b2f
--- /dev/null
+++ b/deps/v8/test/cctest/test-unwinder.cc
@@ -0,0 +1,543 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+
+#include "src/api-inl.h"
+#include "src/builtins/builtins.h"
+#include "src/isolate.h"
+#include "src/objects/code-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace test_unwinder {
+
+static void* unlimited_stack_base = std::numeric_limits<void*>::max();
+
+TEST(Unwind_BadState_Fail) {
+ UnwindState unwind_state; // Fields are intialized to nullptr.
+ RegisterState register_state;
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ unlimited_stack_base);
+ CHECK(!unwound);
+ // The register state should not change when unwinding fails.
+ CHECK_NULL(register_state.fp);
+ CHECK_NULL(register_state.sp);
+ CHECK_NULL(register_state.pc);
+}
+
+TEST(Unwind_BuiltinPCInMiddle_Success) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ uintptr_t stack[3];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
+ stack[1] = 202; // Return address into C++ code.
+ stack[2] = 303; // The SP points here in the caller's frame.
+
+ register_state.sp = stack;
+ register_state.fp = stack;
+
+ // Put the current PC inside of a valid builtin.
+ Code builtin = i_isolate->builtins()->builtin(Builtins::kStringEqual);
+ const uintptr_t offset = 40;
+ CHECK_LT(offset, builtin->InstructionSize());
+ register_state.pc =
+ reinterpret_cast<void*>(builtin->InstructionStart() + offset);
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ CHECK(unwound);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 2), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 2), register_state.sp);
+ CHECK_EQ(reinterpret_cast<void*>(202), register_state.pc);
+}
+
+// The unwinder should be able to unwind even if we haven't properly set up the
+// current frame, as long as there is another JS frame underneath us (i.e. as
+// long as the PC isn't in JSEntry). This test puts the PC at the start
+// of a JS builtin and creates a fake JSEntry frame before it on the stack. The
+// unwinder should be able to unwind to the C++ frame before the JSEntry frame.
+TEST(Unwind_BuiltinPCAtStart_Success) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ const size_t code_length = 40;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ uintptr_t stack[6];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = 101;
+ // Return address into JS code. It doesn't matter that this is not actually in
+ // JSEntry, because we only check that for the top frame.
+ stack[1] = reinterpret_cast<uintptr_t>(code + 10);
+ stack[2] = reinterpret_cast<uintptr_t>(stack + 5); // saved FP (rbp).
+ stack[3] = 303; // Return address into C++ code.
+ stack[4] = 404;
+ stack[5] = 505;
+
+ register_state.sp = stack;
+ register_state.fp = stack + 2; // FP to the JSEntry frame.
+
+ // Put the current PC at the start of a valid builtin, so that we are setting
+ // up the frame.
+ Code builtin = i_isolate->builtins()->builtin(Builtins::kStringEqual);
+ register_state.pc = reinterpret_cast<void*>(builtin->InstructionStart());
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+
+ CHECK(unwound);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 5), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 4), register_state.sp);
+ CHECK_EQ(reinterpret_cast<void*>(303), register_state.pc);
+}
+
+const char* foo_source = R"(
+ function foo(a, b) {
+ let x = a * b;
+ let y = x ^ b;
+ let z = y / a;
+ return x + y - z;
+ }
+ foo(1, 2);
+ foo(1, 2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(1, 2);
+)";
+
+// Check that we can unwind when the pc is within an optimized code object on
+// the V8 heap.
+TEST(Unwind_CodeObjectPCInMiddle_Success) {
+ FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ HandleScope scope(i_isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ uintptr_t stack[3];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
+ stack[1] = 202; // Return address into C++ code.
+ stack[2] = 303; // The SP points here in the caller's frame.
+
+ register_state.sp = stack;
+ register_state.fp = stack;
+
+ // Create an on-heap code object. Make sure we run the function so that it is
+ // compiled and not just marked for lazy compilation.
+ CompileRun(foo_source);
+ v8::Local<v8::Function> local_foo = v8::Local<v8::Function>::Cast(
+ env.local()->Global()->Get(env.local(), v8_str("foo")).ToLocalChecked());
+ Handle<JSFunction> foo =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
+
+ // Put the current PC inside of the created code object.
+ AbstractCode abstract_code = foo->abstract_code();
+ // We don't produce optimized code when run with --no-opt.
+ if (!abstract_code->IsCode() && FLAG_opt == false) return;
+ CHECK(abstract_code->IsCode());
+
+ Code code = abstract_code->GetCode();
+ // We don't want the offset too early or it could be the `push rbp`
+ // instruction (which is not at the start of generated code, because the lazy
+ // deopt check happens before frame setup).
+ const uintptr_t offset = code->InstructionSize() - 20;
+ CHECK_LT(offset, code->InstructionSize());
+ Address pc = code->InstructionStart() + offset;
+ register_state.pc = reinterpret_cast<void*>(pc);
+
+ // Check that the created code is within the code range that we get from the
+ // API.
+ Address start = reinterpret_cast<Address>(unwind_state.code_range.start);
+ CHECK(pc >= start && pc < start + unwind_state.code_range.length_in_bytes);
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ CHECK(unwound);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 2), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 2), register_state.sp);
+ CHECK_EQ(reinterpret_cast<void*>(202), register_state.pc);
+}
+
+// If the PC is within JSEntry but we haven't set up the frame yet, then we
+// cannot unwind.
+TEST(Unwind_JSEntryBeforeFrame_Fail) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ const size_t code_length = 40;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ // Pretend that it takes 5 instructions to set up the frame in JSEntry.
+ unwind_state.js_entry_stub.code.start = code + 10;
+ unwind_state.js_entry_stub.code.length_in_bytes = 10 * sizeof(uintptr_t);
+
+ uintptr_t stack[10];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = 101;
+ stack[1] = 111;
+ stack[2] = 121;
+ stack[3] = 131;
+ stack[4] = 141;
+ stack[5] = 151;
+ stack[6] = 100; // Return address into C++ code.
+ stack[7] = 303; // The SP points here in the caller's frame.
+ stack[8] = 404;
+ stack[9] = 505;
+
+ register_state.sp = stack + 5;
+ register_state.fp = stack + 9;
+
+ // Put the current PC inside of JSEntry, before the frame is set up.
+ register_state.pc = code + 12;
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ CHECK(!unwound);
+ // The register state should not change when unwinding fails.
+ CHECK_EQ(reinterpret_cast<void*>(stack + 9), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 5), register_state.sp);
+ CHECK_EQ(code + 12, register_state.pc);
+
+ // Change the PC to a few instructions later, after the frame is set up.
+ register_state.pc = code + 16;
+ unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ // TODO(petermarshall): More precisely check position within JSEntry rather
+ // than just assuming the frame is unreadable.
+ CHECK(!unwound);
+ // The register state should not change when unwinding fails.
+ CHECK_EQ(reinterpret_cast<void*>(stack + 9), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 5), register_state.sp);
+ CHECK_EQ(code + 16, register_state.pc);
+}
+
+TEST(Unwind_OneJSFrame_Success) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ // Use a fake code range so that we can initialize it to 0s.
+ const size_t code_length = 40;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ // Our fake stack has two frames - one C++ frame and one JS frame (on top).
+ // The stack grows from high addresses to low addresses.
+ uintptr_t stack[10];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = 101;
+ stack[1] = 111;
+ stack[2] = 121;
+ stack[3] = 131;
+ stack[4] = 141;
+ stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
+ stack[6] = 100; // Return address into C++ code.
+ stack[7] = 303; // The SP points here in the caller's frame.
+ stack[8] = 404;
+ stack[9] = 505;
+
+ register_state.sp = stack;
+ register_state.fp = stack + 5;
+
+ // Put the current PC inside of the code range so it looks valid.
+ register_state.pc = code + 30;
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+
+ CHECK(unwound);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 9), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 7), register_state.sp);
+ CHECK_EQ(reinterpret_cast<void*>(100), register_state.pc);
+}
+
+// Creates a fake stack with two JS frames on top of a C++ frame and checks that
+// the unwinder correctly unwinds past the JS frames and returns the C++ frame's
+// details.
+TEST(Unwind_TwoJSFrames_Success) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ // Use a fake code range so that we can initialize it to 0s.
+ const size_t code_length = 40;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ // Our fake stack has three frames - one C++ frame and two JS frames (on top).
+ // The stack grows from high addresses to low addresses.
+ uintptr_t stack[10];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = 101;
+ stack[1] = 111;
+ stack[2] = reinterpret_cast<uintptr_t>(stack + 5); // saved FP (rbp).
+ // The fake return address is in the JS code range.
+ stack[3] = reinterpret_cast<uintptr_t>(code + 10);
+ stack[4] = 141;
+ stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
+ stack[6] = 100; // Return address into C++ code.
+ stack[7] = 303; // The SP points here in the caller's frame.
+ stack[8] = 404;
+ stack[9] = 505;
+
+ register_state.sp = stack;
+ register_state.fp = stack + 2;
+
+ // Put the current PC inside of the code range so it looks valid.
+ register_state.pc = code + 30;
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+
+ CHECK(unwound);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 9), register_state.fp);
+ CHECK_EQ(reinterpret_cast<void*>(stack + 7), register_state.sp);
+ CHECK_EQ(reinterpret_cast<void*>(100), register_state.pc);
+}
+
+// If the PC is in JSEntry then the frame might not be set up correctly, meaning
+// we can't unwind the stack properly.
+TEST(Unwind_JSEntry_Fail) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ Code js_entry = i_isolate->heap()->builtin(Builtins::kJSEntry);
+ byte* start = reinterpret_cast<byte*>(js_entry->InstructionStart());
+ register_state.pc = start + 10;
+
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ unlimited_stack_base);
+ CHECK(!unwound);
+ // The register state should not change when unwinding fails.
+ CHECK_NULL(register_state.fp);
+ CHECK_NULL(register_state.sp);
+ CHECK_EQ(start + 10, register_state.pc);
+}
+
+TEST(Unwind_StackBounds_Basic) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ const size_t code_length = 10;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ uintptr_t stack[3];
+ stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
+ stack[1] = 202; // Return address into C++ code.
+ stack[2] = 303; // The SP points here in the caller's frame.
+
+ register_state.sp = stack;
+ register_state.fp = stack;
+ register_state.pc = code;
+
+ void* wrong_stack_base = reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(stack) - sizeof(uintptr_t));
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ wrong_stack_base);
+ CHECK(!unwound);
+
+ // Correct the stack base and unwinding should succeed.
+ void* correct_stack_base = stack + arraysize(stack);
+ unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ correct_stack_base);
+ CHECK(unwound);
+}
+
+TEST(Unwind_StackBounds_WithUnwinding) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ RegisterState register_state;
+
+ // Use a fake code range so that we can initialize it to 0s.
+ const size_t code_length = 40;
+ uintptr_t code[code_length] = {0};
+ unwind_state.code_range.start = code;
+ unwind_state.code_range.length_in_bytes = code_length * sizeof(uintptr_t);
+
+ // Our fake stack has two frames - one C++ frame and one JS frame (on top).
+ // The stack grows from high addresses to low addresses.
+ uintptr_t stack[11];
+ void* stack_base = stack + arraysize(stack);
+ stack[0] = 101;
+ stack[1] = 111;
+ stack[2] = 121;
+ stack[3] = 131;
+ stack[4] = 141;
+ stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
+ stack[6] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
+ stack[7] = 303; // The SP points here in the caller's frame.
+ stack[8] = 404;
+ stack[9] = reinterpret_cast<uintptr_t>(stack) +
+ (12 * sizeof(uintptr_t)); // saved FP (OOB).
+ stack[10] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
+
+ register_state.sp = stack;
+ register_state.fp = stack + 5;
+
+ // Put the current PC inside of the code range so it looks valid.
+ register_state.pc = code + 30;
+
+ // Unwind will fail because stack[9] FP points outside of the stack.
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ CHECK(!unwound);
+
+ // Change the return address so that it is not in range.
+ stack[10] = 202;
+ unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
+ stack_base);
+ CHECK(!unwound);
+}
+
+TEST(PCIsInV8_BadState_Fail) {
+ UnwindState unwind_state;
+ void* pc = nullptr;
+
+ CHECK(!v8::Unwinder::PCIsInV8(unwind_state, pc));
+}
+
+TEST(PCIsInV8_ValidStateNullPC_Fail) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+ void* pc = nullptr;
+
+ CHECK(!v8::Unwinder::PCIsInV8(unwind_state, pc));
+}
+
+void TestRangeBoundaries(const UnwindState& unwind_state, byte* range_start,
+ size_t range_length) {
+ void* pc = range_start - 1;
+ CHECK(!v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = range_start;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = range_start + 1;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = range_start + range_length - 1;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = range_start + range_length;
+ CHECK(!v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = range_start + range_length + 1;
+ CHECK(!v8::Unwinder::PCIsInV8(unwind_state, pc));
+}
+
+TEST(PCIsInV8_InCodeOrEmbeddedRange) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+
+ byte* code_range_start = const_cast<byte*>(
+ reinterpret_cast<const byte*>(unwind_state.code_range.start));
+ size_t code_range_length = unwind_state.code_range.length_in_bytes;
+ TestRangeBoundaries(unwind_state, code_range_start, code_range_length);
+
+ byte* embedded_range_start = const_cast<byte*>(
+ reinterpret_cast<const byte*>(unwind_state.embedded_code_range.start));
+ size_t embedded_range_length =
+ unwind_state.embedded_code_range.length_in_bytes;
+ TestRangeBoundaries(unwind_state, embedded_range_start,
+ embedded_range_length);
+}
+
+// PCIsInV8 doesn't check if the PC is in JSEntrydirectly. It's assumed that the
+// CodeRange or EmbeddedCodeRange contain JSEntry.
+TEST(PCIsInV8_InJSEntryRange) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+
+ Code js_entry = i_isolate->heap()->builtin(Builtins::kJSEntry);
+ byte* start = reinterpret_cast<byte*>(js_entry->InstructionStart());
+ size_t length = js_entry->InstructionSize();
+
+ void* pc = start;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = start + 1;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+ pc = start + length - 1;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+}
+
+// Large code objects can be allocated in large object space. Check that this is
+// inside the CodeRange.
+TEST(PCIsInV8_LargeCodeObject) {
+ FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ HandleScope scope(i_isolate);
+
+ UnwindState unwind_state = isolate->GetUnwindState();
+
+ // Create a big function that ends up in CODE_LO_SPACE.
+ const int instruction_size = Page::kPageSize + 1;
+ STATIC_ASSERT(instruction_size > kMaxRegularHeapObjectSize);
+ std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
+
+ CodeDesc desc;
+ desc.buffer = instructions.get();
+ desc.buffer_size = instruction_size;
+ desc.instr_size = instruction_size;
+ desc.reloc_size = 0;
+ desc.constant_pool_size = 0;
+ desc.unwinding_info = nullptr;
+ desc.unwinding_info_size = 0;
+ desc.origin = nullptr;
+ Handle<Object> self_ref;
+ Handle<Code> foo_code =
+ i_isolate->factory()->NewCode(desc, Code::WASM_FUNCTION, self_ref);
+
+ CHECK(i_isolate->heap()->InSpace(*foo_code, CODE_LO_SPACE));
+ byte* start = reinterpret_cast<byte*>(foo_code->InstructionStart());
+
+ void* pc = start;
+ CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
+}
+
+} // namespace test_unwinder
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index 4c6c72a28d..52a24a3046 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -81,6 +81,40 @@ TEST(AtomicsWakeAndAtomicsNotify) {
CHECK_EQ(1, use_counts[v8::Isolate::kAtomicsNotify]);
}
+TEST(RegExpMatchIsTrueishOnNonJSRegExp) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ CompileRun("new RegExp(/./); new RegExp('');");
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp]);
+
+ CompileRun("let p = { [Symbol.match]: true }; new RegExp(p);");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp]);
+}
+
+TEST(RegExpMatchIsFalseishOnJSRegExp) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ CompileRun("new RegExp(/./); new RegExp('');");
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp]);
+
+ CompileRun("let p = /./; p[Symbol.match] = false; new RegExp(p);");
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp]);
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp]);
+}
+
} // namespace test_usecounters
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index ac647c5c64..7086a26ec8 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -25,14 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "test/cctest/test-utils-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/utils-arm64.h"
#include "src/base/template-utils.h"
#include "src/macro-assembler-inl.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/test-utils-arm64.h"
namespace v8 {
namespace internal {
@@ -233,7 +233,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
RegList list = 0;
int i = 0;
for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
- if (((1UL << n) & allowed) != 0) {
+ if (((1ULL << n) & allowed) != 0) {
// Only assign allowed registers.
if (r) {
r[i] = Register::Create(n, reg_size);
@@ -244,7 +244,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
if (w) {
w[i] = Register::Create(n, kWRegSizeInBits);
}
- list |= (1UL << n);
+ list |= (1ULL << n);
i++;
}
}
@@ -259,7 +259,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
RegList list = 0;
int i = 0;
for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
- if (((1UL << n) & allowed) != 0) {
+ if (((1ULL << n) & allowed) != 0) {
// Only assigned allowed registers.
if (v) {
v[i] = VRegister::Create(n, reg_size);
@@ -270,7 +270,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
if (s) {
s[i] = VRegister::Create(n, kSRegSizeInBits);
}
- list |= (1UL << n);
+ list |= (1ULL << n);
i++;
}
}
@@ -284,7 +284,7 @@ RegList PopulateVRegisterArray(VRegister* s, VRegister* d, VRegister* v,
void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
Register first = NoReg;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if (reg_list & (1UL << i)) {
+ if (reg_list & (1ULL << i)) {
Register xn = Register::Create(i, kXRegSizeInBits);
// We should never write into sp here.
CHECK(!xn.Is(sp));
@@ -307,7 +307,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
VRegister first = NoVReg;
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
- if (reg_list & (1UL << i)) {
+ if (reg_list & (1ULL << i)) {
VRegister dn = VRegister::Create(i, kDRegSizeInBits);
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index e5b91fb280..45970ac8b7 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -28,13 +28,10 @@
#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
#define V8_ARM64_TEST_UTILS_ARM64_H_
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/utils-arm64.h"
#include "src/macro-assembler.h"
-
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index c339aa4134..0941795bd6 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -4,6 +4,7 @@
#include <cmath>
+#include "src/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-string-gen.h"
@@ -29,12 +30,19 @@ namespace {
typedef CodeAssemblerLabel Label;
typedef CodeAssemblerVariable Variable;
+class TestTorqueAssembler : public CodeStubAssembler,
+ public TestBuiltinsFromDSLAssembler {
+ public:
+ explicit TestTorqueAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state), TestBuiltinsFromDSLAssembler(state) {}
+};
+
} // namespace
TEST(TestConstexpr1) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexpr1();
m.Return(m.UndefinedConstant());
@@ -46,7 +54,7 @@ TEST(TestConstexpr1) {
TEST(TestConstexprIf) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexprIf();
m.Return(m.UndefinedConstant());
@@ -58,7 +66,7 @@ TEST(TestConstexprIf) {
TEST(TestConstexprReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestConstexprReturn();
m.Return(m.UndefinedConstant());
@@ -70,7 +78,7 @@ TEST(TestConstexprReturn) {
TEST(TestGotoLabel) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabel()); }
FunctionTester ft(asm_tester.GenerateCode(), 0);
ft.CheckCall(ft.true_value());
@@ -79,7 +87,7 @@ TEST(TestGotoLabel) {
TEST(TestGotoLabelWithOneParameter) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabelWithOneParameter()); }
FunctionTester ft(asm_tester.GenerateCode(), 0);
ft.CheckCall(ft.true_value());
@@ -88,7 +96,7 @@ TEST(TestGotoLabelWithOneParameter) {
TEST(TestGotoLabelWithTwoParameters) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestGotoLabelWithTwoParameters()); }
FunctionTester ft(asm_tester.GenerateCode(), 0);
ft.CheckCall(ft.true_value());
@@ -97,7 +105,7 @@ TEST(TestGotoLabelWithTwoParameters) {
TEST(TestPartiallyUnusedLabel) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{ m.Return(m.TestPartiallyUnusedLabel()); }
FunctionTester ft(asm_tester.GenerateCode(), 0);
ft.CheckCall(ft.true_value());
@@ -106,7 +114,7 @@ TEST(TestPartiallyUnusedLabel) {
TEST(TestBuiltinSpecialization) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
Node* temp = m.SmiConstant(0);
m.TestBuiltinSpecialization(m.UncheckedCast<Context>(temp));
@@ -119,7 +127,7 @@ TEST(TestBuiltinSpecialization) {
TEST(TestMacroSpecialization) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestMacroSpecialization();
m.Return(m.UndefinedConstant());
@@ -132,7 +140,7 @@ TEST(TestFunctionPointers) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
TNode<Context> context =
m.UncheckedCast<Context>(m.Parameter(kNumParams + 2));
@@ -146,7 +154,7 @@ TEST(TestTernaryOperator) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
TNode<Smi> arg = m.UncheckedCast<Smi>(m.Parameter(0));
m.Return(m.TestTernaryOperator(arg));
@@ -163,7 +171,7 @@ TEST(TestTernaryOperator) {
TEST(TestFunctionPointerToGeneric) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
Node* temp = m.SmiConstant(0);
m.TestFunctionPointerToGeneric(m.UncheckedCast<Context>(temp));
@@ -176,7 +184,7 @@ TEST(TestFunctionPointerToGeneric) {
TEST(TestUnsafeCast) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
Node* temp = m.SmiConstant(0);
Node* n = m.SmiConstant(10);
@@ -190,7 +198,7 @@ TEST(TestUnsafeCast) {
TEST(TestHexLiteral) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestHexLiteral();
m.Return(m.UndefinedConstant());
@@ -202,7 +210,7 @@ TEST(TestHexLiteral) {
TEST(TestModuleConstBindings) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestModuleConstBindings();
m.Return(m.UndefinedConstant());
@@ -214,7 +222,7 @@ TEST(TestModuleConstBindings) {
TEST(TestLocalConstBindings) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestLocalConstBindings();
m.Return(m.UndefinedConstant());
@@ -226,7 +234,7 @@ TEST(TestLocalConstBindings) {
TEST(TestForLoop) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestForLoop();
m.Return(m.UndefinedConstant());
@@ -236,11 +244,15 @@ TEST(TestForLoop) {
}
TEST(TestTypeswitch) {
- Isolate* isolate(CcTest::InitIsolateOnce());
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
- m.TestTypeswitch();
+ m.TestTypeswitch(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -248,11 +260,15 @@ TEST(TestTypeswitch) {
}
TEST(TestGenericOverload) {
- Isolate* isolate(CcTest::InitIsolateOnce());
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
- m.TestGenericOverload();
+ m.TestGenericOverload(m.UncheckedCast<Context>(m.HeapConstant(context)));
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -262,7 +278,7 @@ TEST(TestGenericOverload) {
TEST(TestLogicalOperators) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestLogicalOperators();
m.Return(m.UndefinedConstant());
@@ -274,7 +290,7 @@ TEST(TestLogicalOperators) {
TEST(TestOtherwiseAndLabels) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 0);
- TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ TestTorqueAssembler m(asm_tester.state());
{
m.TestOtherwiseWithCode1();
m.TestOtherwiseWithCode2();
@@ -286,6 +302,127 @@ TEST(TestOtherwiseAndLabels) {
ft.Call();
}
+TEST(TestCatch1) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ TNode<Smi> result =
+ m.TestCatch1(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ USE(result);
+ CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(1)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestCatch2) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ TNode<Smi> result =
+ m.TestCatch2(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ USE(result);
+ CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(2)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestCatch3) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ TNode<Smi> result =
+ m.TestCatch3(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ USE(result);
+ CSA_ASSERT(&m, m.WordEqual(result, m.SmiConstant(2)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestLookup) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestQualifiedAccess(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestFrame1) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestFrame1(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestNew) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestNew(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestStructConstructor) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestStructConstructor(m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index cb44ebde7a..3e2a9192d7 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -28,6 +28,7 @@
#include "test/cctest/trace-extension.h"
#include "include/v8-profiler.h"
+#include "src/objects/smi.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
@@ -43,29 +44,24 @@ const char* TraceExtension::kSource =
v8::Local<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name) {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- if (name->Equals(context, v8::String::NewFromUtf8(isolate, "trace",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
+ if (name->StrictEquals(
+ v8::String::NewFromUtf8(isolate, "trace", v8::NewStringType::kNormal)
+ .ToLocalChecked())) {
return v8::FunctionTemplate::New(isolate, TraceExtension::Trace);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "js_trace",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
+ } else if (name->StrictEquals(
+ v8::String::NewFromUtf8(isolate, "js_trace",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSTrace);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "js_entry_sp",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
+ } else if (name->StrictEquals(
+ v8::String::NewFromUtf8(isolate, "js_entry_sp",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySP);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "js_entry_sp_level2",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
+ } else if (name->StrictEquals(
+ v8::String::NewFromUtf8(isolate, "js_entry_sp_level2",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySPLevel2);
}
UNREACHABLE();
@@ -81,9 +77,9 @@ Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
uint64_t kSmiValueMask =
(static_cast<uintptr_t>(1) << (kSmiValueSize - 1)) - 1;
uint64_t low_bits =
- (*reinterpret_cast<Smi**>(*args[0]))->value() & kSmiValueMask;
+ Smi(*reinterpret_cast<Address*>(*args[0]))->value() & kSmiValueMask;
uint64_t high_bits =
- (*reinterpret_cast<Smi**>(*args[1]))->value() & kSmiValueMask;
+ Smi(*reinterpret_cast<Address*>(*args[1]))->value() & kSmiValueMask;
Address fp =
static_cast<Address>((high_bits << (kSmiValueSize - 1)) | low_bits);
#else
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
index 3972f0dd99..dc68b39733 100644
--- a/deps/v8/test/cctest/wasm/OWNERS
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -1,7 +1,5 @@
ahaas@chromium.org
-bradnelson@chromium.org
clemensh@chromium.org
-eholk@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index ca6662c90c..6c246bc48c 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -5,6 +5,7 @@
#include <cstdint>
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
@@ -59,11 +60,11 @@ class CWasmEntryArgTester {
WriteToBuffer(reinterpret_cast<Address>(arg_buffer.data()), args...);
Handle<Object> receiver = isolate_->factory()->undefined_value();
- Handle<Object> buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
- isolate_);
+ Handle<Object> buffer_obj(
+ Object(reinterpret_cast<Address>(arg_buffer.data())), isolate_);
CHECK(!buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(
- reinterpret_cast<Object*>(wasm_code_->instruction_start()), isolate_);
+ Handle<Object> code_entry_obj(Object(wasm_code_->instruction_start()),
+ isolate_);
CHECK(!code_entry_obj->IsHeapObject());
Handle<Object> call_args[]{code_entry_obj,
runner_.builder().instance_object(), buffer_obj};
@@ -104,7 +105,9 @@ TEST(TestCWasmEntryArgPassing_int32) {
CWasmEntryArgTester<int32_t, int32_t> tester(
{// Return 2*<0> + 1.
WASM_I32_ADD(WASM_I32_MUL(WASM_I32V_1(2), WASM_GET_LOCAL(0)), WASM_ONE)},
- [](int32_t a) { return 2 * a + 1; });
+ [](int32_t a) {
+ return base::AddWithWraparound(base::MulWithWraparound(2, a), 1);
+ });
FOR_INT32_INPUTS(v) { tester.CheckCall(*v); }
}
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 53ee5eedd1..d8d9e0412e 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <bitset>
+
#include "src/assembler-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/simulator.h"
@@ -30,29 +32,57 @@ namespace {
static volatile int global_stop_bit = 0;
-Address GenerateJumpTableThunk(Address jump_target) {
- size_t allocated;
- byte* buffer;
+constexpr int kJumpTableSlotCount = 128;
+constexpr uint32_t kJumpTableSize =
+ JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
+
#if V8_TARGET_ARCH_ARM64
- // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
- // that the jump table only supports {near_call} distances.
- const uintptr_t kThunkAddrMask = (1 << WhichPowerOf2(kMaxWasmCodeMemory)) - 1;
- const int kArbitrarilyChosenRetryCount = 10; // Retry to avoid flakes.
- for (int retry = 0; retry < kArbitrarilyChosenRetryCount; ++retry) {
- Address random_addr = reinterpret_cast<Address>(GetRandomMmapAddr());
- void* address = reinterpret_cast<void*>((jump_target & ~kThunkAddrMask) |
- (random_addr & kThunkAddrMask));
- buffer = AllocateAssemblerBuffer(
- &allocated, AssemblerBase::kMinimalBufferSize, address);
- Address bufferptr = reinterpret_cast<uintptr_t>(buffer);
- if ((bufferptr & ~kThunkAddrMask) == (jump_target & ~kThunkAddrMask)) break;
- }
+constexpr uint32_t kAvailableBufferSlots =
+ (kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
+constexpr uint32_t kBufferSlotStartOffset =
+ RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
+#else
+constexpr uint32_t kAvailableBufferSlots = 0;
+#endif
+
+Address GenerateJumpTableThunk(
+ Address jump_target, byte* thunk_slot_buffer,
+ std::bitset<kAvailableBufferSlots>* used_slots,
+ std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
+#if V8_TARGET_ARCH_ARM64
+ // To guarantee that the branch range lies within the near-call range,
+ // generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
+ // jump_target itself.
+ //
+ // Allocate a slot that we haven't already used. This is necessary because
+ // each test iteration expects to generate two unique addresses and we leave
+ // each slot executable (and not writable).
+ base::RandomNumberGenerator* rng =
+ CcTest::i_isolate()->random_number_generator();
+ // Ensure a chance of completion without too much thrashing.
+ DCHECK(used_slots->count() < (used_slots->size() / 2));
+ int buffer_index;
+ do {
+ buffer_index = rng->NextInt(kAvailableBufferSlots);
+ } while (used_slots->test(buffer_index));
+ used_slots->set(buffer_index);
+ byte* buffer =
+ thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
+
+ DCHECK(TurboAssembler::IsNearCallOffset(
+ (reinterpret_cast<byte*>(jump_target) - buffer) / kInstrSize));
+
#else
- buffer = AllocateAssemblerBuffer(
- &allocated, AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr());
+ USE(thunk_slot_buffer);
+ USE(used_slots);
+ thunk_buffers->emplace_back(AllocateAssemblerBuffer(
+ AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()));
+ byte* buffer = thunk_buffers->back()->start();
#endif
- MacroAssembler masm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+
+ MacroAssembler masm(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer, AssemblerBase::kMinimalBufferSize));
Label exit;
Register scratch = kReturnRegister0;
@@ -87,7 +117,6 @@ Address GenerateJumpTableThunk(Address jump_target) {
CodeDesc desc;
masm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
return reinterpret_cast<Address>(buffer);
}
@@ -151,27 +180,46 @@ class JumpTablePatcher : public v8::base::Thread {
// forth between two thunk. If there is a race then chances are high that
// one of the runners is currently executing the jump-table slot.
TEST(JumpTablePatchingStress) {
- constexpr int kJumpTableSlotCount = 128;
constexpr int kNumberOfRunnerThreads = 5;
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(
- &allocated,
- JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount));
+#if V8_TARGET_ARCH_ARM64
+ // We need the branches (from GenerateJumpTableThunk) to be within near-call
+ // range of the jump table slots. The address hint to AllocateAssemblerBuffer
+ // is not reliable enough to guarantee that we can always achieve this with
+ // separate allocations, so for Arm64 we generate all code in a single
+ // kMaxMasmCodeMemory-sized chunk.
+ //
+ // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
+ // that the jump table only supports {near_call} distances.
+ STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
+ auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeMemory);
+ byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
+#else
+ auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
+ byte* thunk_slot_buffer = nullptr;
+#endif
+ std::bitset<kAvailableBufferSlots> used_thunk_slots;
+ buffer->MakeWritableAndExecutable();
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
- Address slot_start = reinterpret_cast<Address>(buffer);
+ Address slot_start = reinterpret_cast<Address>(buffer->start());
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
- Address thunk1 = GenerateJumpTableThunk(slot_start + slot_offset);
- Address thunk2 = GenerateJumpTableThunk(slot_start + slot_offset);
+ std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
+ Address thunk1 =
+ GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
+ &used_thunk_slots, &thunk_buffers);
+ Address thunk2 =
+ GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
+ &used_thunk_slots, &thunk_buffers);
TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
WasmCode::kFlushICache);
+ for (auto& buf : thunk_buffers) buf->MakeExecutable();
// Start multiple runner threads and a patcher thread that hammer on the
// same jump-table slot concurrently.
std::list<JumpTableRunner> runners;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index be45f5bc17..cca0c1b061 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -8,6 +8,7 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
@@ -53,7 +54,9 @@ WASM_EXEC_TEST(I64Add) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(base::AddWithWraparound(*i, *j), r.Call(*i, *j));
+ }
}
}
@@ -75,7 +78,9 @@ WASM_EXEC_TEST(I64Sub) {
WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i - *j, r.Call(*i, *j)); }
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(base::SubWithWraparound(*i, *j), r.Call(*i, *j));
+ }
}
}
@@ -94,7 +99,8 @@ WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(*i + *j), r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::AddWithWraparound(*i, *j)),
+ r.Call(*i, *j));
}
}
}
@@ -105,7 +111,8 @@ WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(*i - *j), r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::SubWithWraparound(*i, *j)),
+ r.Call(*i, *j));
}
}
}
@@ -116,7 +123,8 @@ WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- CHECK_EQ(static_cast<int32_t>(*i * *j), r.Call(*i, *j));
+ CHECK_EQ(static_cast<int32_t>(base::MulWithWraparound(*i, *j)),
+ r.Call(*i, *j));
}
}
}
@@ -127,7 +135,7 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
- int32_t expected = static_cast<int32_t>((*i) << (*j & 0x3F));
+ int32_t expected = static_cast<int32_t>(base::ShlWithWraparound(*i, *j));
CHECK_EQ(expected, r.Call(*i, *j));
}
}
@@ -1230,7 +1238,8 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
FOR_INT32_INPUTS(i) {
- int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ int64_t expected = base::MulWithWraparound(static_cast<int64_t>(*i),
+ int64_t{0x300010001L});
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
@@ -1255,7 +1264,8 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
- int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ int64_t expected = base::MulWithWraparound(static_cast<int64_t>(*i),
+ int64_t{0x300010001L});
CHECK_EQ(expected, r.Call(expected));
CHECK_EQ(expected, r.builder().ReadMemory<int64_t>(&memory[0]));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 570c48d240..624982a117 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -484,14 +484,14 @@ void RunConvertTest(ExecutionTier execution_tier, WasmOpcode wasm_op,
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
- BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
- kExprI64AtomicAdd, WASM_ZERO, WASM_GET_LOCAL(0),
- MachineRepresentation::kWord64)));
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_GET_LOCAL(0),
+ MachineRepresentation::kWord64)));
uint64_t initial = 0x1111222233334444, local = 0x1111111111111111;
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(static_cast<uint32_t>(initial), r.Call(local));
- uint64_t expected = Add(initial, local);
+ uint64_t expected = op(initial, local);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
@@ -520,6 +520,104 @@ WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
+// The WASM_I64_EQ operation is used here to test that the index node
+// is lowered correctly.
+void RunNonConstIndexTest(ExecutionTier execution_tier, WasmOpcode wasm_op,
+ Uint64BinOp op) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
+ wasm_op, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ WASM_GET_LOCAL(0), MachineRepresentation::kWord32)));
+
+ uint64_t initial = 0x1111222233334444, local = 0x5555666677778888;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint32_t>(initial), r.Call(local));
+ CHECK_EQ(static_cast<uint32_t>(op(initial, local)),
+ static_cast<uint32_t>(r.builder().ReadMemory(&memory[0])));
+}
+
+#define TEST_OPERATION(Name) \
+ WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
+ RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \
+ }
+OPERATION_LIST(TEST_OPERATION)
+#undef TEST_OPERATION
+
+WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchange) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t, uint64_t, uint64_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange16U,
+ WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), MachineRepresentation::kWord16)));
+
+ uint64_t initial = 4444333322221111, local = 0x9999888877776666;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint16_t>(initial), r.Call(initial, local));
+ CHECK_EQ(static_cast<uint16_t>(CompareExchange(initial, initial, local)),
+ static_cast<uint16_t>(r.builder().ReadMemory(&memory[0])));
+}
+
+WASM_EXEC_TEST(I64AtomicNonConstIndexLoad8U) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_LOAD_OP(
+ kExprI64AtomicLoad8U, WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)),
+ MachineRepresentation::kWord8)));
+
+ uint64_t expected = 0xffffeeeeddddcccc;
+ r.builder().WriteMemory(&memory[0], expected);
+ CHECK_EQ(static_cast<uint8_t>(expected), r.Call());
+}
+
+WASM_EXEC_TEST(I64AtomicCompareExchangeFail) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ BUILD(r, WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), MachineRepresentation::kWord64));
+
+ uint64_t initial = 0x1111222233334444, local = 0x1111111111111111,
+ test = 0x2222222222222222;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(initial, r.Call(test, local));
+ // No memory change on failed compare exchange
+ CHECK_EQ(initial, r.builder().ReadMemory(&memory[0]));
+}
+
+WASM_EXEC_TEST(I64AtomicCompareExchange32UFail) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
+ r.builder().SetHasSharedMemory();
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange32U,
+ WASM_I32V_1(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1),
+ MachineRepresentation::kWord32));
+
+ uint64_t initial = 0x1111222233334444, test = 0xffffffff, local = 0xeeeeeeee;
+ r.builder().WriteMemory(&memory[0], initial);
+ CHECK_EQ(static_cast<uint32_t>(initial), r.Call(test, local));
+ // No memory change on failed compare exchange
+ CHECK_EQ(initial, r.builder().ReadMemory(&memory[0]));
+}
+
} // namespace test_run_wasm_atomics_64
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index f788cc84b6..c5d0d84e63 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -291,7 +291,7 @@ TEST(Breakpoint_I32And_disable) {
}
}
-TEST(GrowMemory) {
+TEST(MemoryGrow) {
{
WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
@@ -308,7 +308,7 @@ TEST(GrowMemory) {
}
}
-TEST(GrowMemoryPreservesData) {
+TEST(MemoryGrowPreservesData) {
int32_t index = 16;
int32_t value = 2335;
WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
@@ -320,7 +320,7 @@ TEST(GrowMemoryPreservesData) {
CHECK_EQ(value, r.Call(1));
}
-TEST(GrowMemoryInvalidSize) {
+TEST(MemoryGrowInvalidSize) {
// Grow memory by an invalid amount without initial memory.
WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index b9de081c9f..499942464e 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -9,6 +9,7 @@
#include "src/api-inl.h"
#include "src/assembler-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 21d92cbada..2503ec57fd 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -235,7 +235,7 @@ TEST(MemorySize) {
TEST(Run_WasmModule_MemSize_GrowMem) {
{
- // Initial memory size = 16 + GrowMemory(10)
+ // Initial memory size = 16 + MemoryGrow(10)
static const int kExpectedValue = 26;
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
@@ -252,7 +252,7 @@ TEST(Run_WasmModule_MemSize_GrowMem) {
Cleanup();
}
-TEST(GrowMemoryZero) {
+TEST(MemoryGrowZero) {
{
// Initial memory size is 16, see wasm-module-builder.cc
static const int kExpectedValue = 16;
@@ -362,7 +362,7 @@ TEST(TestInterruptLoop) {
Cleanup();
}
-TEST(Run_WasmModule_GrowMemoryInIf) {
+TEST(Run_WasmModule_MemoryGrowInIf) {
{
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
@@ -381,7 +381,7 @@ TEST(Run_WasmModule_GrowMemoryInIf) {
TEST(Run_WasmModule_GrowMemOobOffset) {
{
static const int kPageSize = 0x10000;
- // Initial memory size = 16 + GrowMemory(10)
+ // Initial memory size = 16 + MemoryGrow(10)
static const int index = kPageSize * 17 + 4;
int value = 0xACED;
TestSignatures sigs;
@@ -403,7 +403,7 @@ TEST(Run_WasmModule_GrowMemOobOffset) {
TEST(Run_WasmModule_GrowMemOobFixedIndex) {
{
static const int kPageSize = 0x10000;
- // Initial memory size = 16 + GrowMemory(10)
+ // Initial memory size = 16 + MemoryGrow(10)
static const int index = kPageSize * 26 + 4;
int value = 0xACED;
TestSignatures sigs;
@@ -720,7 +720,7 @@ struct ManuallyExternalizedBuffer {
size_t allocation_length_;
bool const should_free_;
- ManuallyExternalizedBuffer(JSArrayBuffer* buffer, Isolate* isolate)
+ ManuallyExternalizedBuffer(JSArrayBuffer buffer, Isolate* isolate)
: isolate_(isolate),
buffer_(buffer, isolate),
allocation_base_(buffer->allocation_base()),
@@ -770,7 +770,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
// Grow using the API.
uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
CHECK_EQ(16, result);
- CHECK(buffer1.buffer_->was_neutered()); // growing always neuters
+ CHECK(buffer1.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, buffer1.buffer_->byte_length());
CHECK_NE(*buffer1.buffer_, memory_object->array_buffer());
@@ -781,7 +781,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
// Grow using an internal WASM bytecode.
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
CHECK_EQ(26, result);
- CHECK(buffer2.buffer_->was_neutered()); // growing always neuters
+ CHECK(buffer2.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, buffer2.buffer_->byte_length());
CHECK_NE(*buffer2.buffer_, memory_object->array_buffer());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index b0f3dcf8ce..ed8bdf7281 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <type_traits>
+
#include "src/assembler-inl.h"
#include "src/base/bits.h"
+#include "src/base/overflowing-math.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -50,22 +53,29 @@ typedef int8_t (*Int8ShiftOp)(int8_t, int);
void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
// Generic expected value functions.
-template <typename T>
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
T Negate(T a) {
return -a;
}
-template <typename T>
+// For signed integral types, use base::AddWithWraparound.
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
T Add(T a, T b) {
return a + b;
}
-template <typename T>
+// For signed integral types, use base::SubWithWraparound.
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
T Sub(T a, T b) {
return a - b;
}
-template <typename T>
+// For signed integral types, use base::MulWithWraparound.
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
T Mul(T a, T b) {
return a * b;
}
@@ -243,16 +253,6 @@ T Sqrt(T a) {
return std::sqrt(a);
}
-template <typename T>
-T Recip(T a) {
- return 1.0f / a;
-}
-
-template <typename T>
-T RecipSqrt(T a) {
- return 1.0f / std::sqrt(a);
-}
-
} // namespace
#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
@@ -438,10 +438,36 @@ WASM_SIMD_TEST(F32x4ReplaceLane) {
CHECK_EQ(1, r.Call(3.14159f, -1.5f));
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
+// Runs tests of compiled code, using the interpreter as a reference.
+#define WASM_SIMD_COMPILED_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ ExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
+
+// The macro below disables tests lowering for certain nodes where the simd
+// lowering doesn't work correctly. Early return here if the CPU does not
+// support SIMD as the graph will be implicitly lowered in that case.
+#define WASM_SIMD_TEST_TURBOFAN(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ ExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ if (!CpuFeatures::SupportsWasmSimd128()) return; \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
+
// Tests both signed and unsigned conversion.
-WASM_SIMD_TEST(F32x4ConvertI32x4) {
+// v8:8425 tracks this test being enabled in the interpreter.
+WASM_SIMD_COMPILED_TEST(F32x4ConvertI32x4) {
WasmRunner<int32_t, int32_t, float, float> r(execution_tier, lower_simd);
byte a = 0;
byte expected_signed = 1;
@@ -463,8 +489,6 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
static_cast<float>(static_cast<uint32_t>(*i))));
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, FloatUnOp expected_op,
@@ -498,13 +522,13 @@ WASM_SIMD_TEST(F32x4Neg) {
static const float kApproxError = 0.01f;
WASM_SIMD_TEST(F32x4RecipApprox) {
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox, Recip,
- kApproxError);
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
+ base::Recip, kApproxError);
}
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
- RecipSqrt, kApproxError);
+ base::RecipSqrt, kApproxError);
}
void RunF32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
@@ -819,9 +843,6 @@ WASM_SIMD_TEST(I8x16ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-
int32_t ConvertToInt(double val, bool unsigned_integer) {
if (std::isnan(val)) return 0;
if (unsigned_integer) {
@@ -900,8 +921,6 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned, 0));
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int32UnOp expected_op) {
@@ -917,7 +936,8 @@ void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I32x4Neg) {
- RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg, Negate);
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
+ base::NegateWithWraparound);
}
WASM_SIMD_TEST(S128Not) {
@@ -944,15 +964,18 @@ void RunI32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I32x4Add) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add, Add);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add,
+ base::AddWithWraparound);
}
WASM_SIMD_TEST(I32x4Sub) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub, Sub);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub,
+ base::SubWithWraparound);
}
WASM_SIMD_TEST(I32x4Mul) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul, Mul);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul,
+ base::MulWithWraparound);
}
WASM_SIMD_TEST(I32x4MinS) {
@@ -1137,7 +1160,8 @@ void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I16x8Neg) {
- RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg, Negate);
+ RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
+ base::NegateWithWraparound);
}
// Tests both signed and unsigned conversion from I32x4 (packing).
@@ -1205,7 +1229,8 @@ void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I16x8Add) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add, Add);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
+ base::AddWithWraparound);
}
WASM_SIMD_TEST(I16x8AddSaturateS) {
@@ -1214,7 +1239,8 @@ WASM_SIMD_TEST(I16x8AddSaturateS) {
}
WASM_SIMD_TEST(I16x8Sub) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub, Sub);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub,
+ base::SubWithWraparound);
}
WASM_SIMD_TEST(I16x8SubSaturateS) {
@@ -1223,7 +1249,8 @@ WASM_SIMD_TEST(I16x8SubSaturateS) {
}
WASM_SIMD_TEST(I16x8Mul) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul, Mul);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul,
+ base::MulWithWraparound);
}
WASM_SIMD_TEST(I16x8MinS) {
@@ -1363,7 +1390,8 @@ void RunI8x16UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I8x16Neg) {
- RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg, Negate);
+ RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
+ base::NegateWithWraparound);
}
// Tests both signed and unsigned conversion from I16x8 (packing).
@@ -1433,7 +1461,8 @@ void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I8x16Add) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add, Add);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
+ base::AddWithWraparound);
}
WASM_SIMD_TEST(I8x16AddSaturateS) {
@@ -1442,7 +1471,8 @@ WASM_SIMD_TEST(I8x16AddSaturateS) {
}
WASM_SIMD_TEST(I8x16Sub) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub, Sub);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub,
+ base::SubWithWraparound);
}
WASM_SIMD_TEST(I8x16SubSaturateS) {
@@ -1542,13 +1572,10 @@ WASM_SIMD_TEST(I8x16LeU) {
UnsignedLessEqual);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Mul) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul, Mul);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
+ base::MulWithWraparound);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int8ShiftOp expected_op) {
@@ -1566,8 +1593,6 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
LogicalShiftLeft);
@@ -1582,14 +1607,12 @@ WASM_SIMD_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
LogicalShiftRight);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Test Select by making a mask where the 0th and 3rd lanes are true and the
// rest false, and comparing for non-equality with zero to convert to a boolean
// vector.
#define WASM_SIMD_SELECT_TEST(format) \
- WASM_SIMD_TEST(S##format##Select) { \
+ WASM_SIMD_TEST_TURBOFAN(S##format##Select) { \
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
@@ -1610,10 +1633,9 @@ WASM_SIMD_TEST(I8x16ShrU) {
WASM_SET_LOCAL( \
mask, \
WASM_SIMD_SELECT( \
- format, \
+ format, WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2), \
WASM_SIMD_BINOP(kExprI##format##Ne, WASM_GET_LOCAL(mask), \
- WASM_GET_LOCAL(zero)), \
- WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2))), \
+ WASM_GET_LOCAL(zero)))), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 1), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 2), \
@@ -1629,7 +1651,7 @@ WASM_SIMD_SELECT_TEST(8x16)
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
// rest 0. The mask is not the result of a comparison op.
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
- WASM_SIMD_TEST(S##format##NonCanonicalSelect) { \
+ WASM_SIMD_TEST_TURBOFAN(S##format##NonCanonicalSelect) { \
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, \
lower_simd); \
byte val1 = 0; \
@@ -1649,9 +1671,9 @@ WASM_SIMD_SELECT_TEST(8x16)
1, WASM_GET_LOCAL(zero), WASM_I32V(0xF))), \
WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
2, WASM_GET_LOCAL(mask), WASM_I32V(0xF))), \
- WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(mask), \
- WASM_GET_LOCAL(src1), \
- WASM_GET_LOCAL(src2))), \
+ WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(src1), \
+ WASM_GET_LOCAL(src2), \
+ WASM_GET_LOCAL(mask))), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 1), \
WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 2), \
@@ -1740,8 +1762,6 @@ void RunShuffleOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
other_swizzle);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
#define SHUFFLE_LIST(V) \
V(S128Identity) \
V(S32x4Dup) \
@@ -1939,20 +1959,6 @@ void BuildShuffle(std::vector<Shuffle>& shuffles, std::vector<byte>* buffer) {
for (size_t j = 0; j < arraysize(epilog); ++j) buffer->push_back(epilog[j]);
}
-// Runs tests of compiled code, using the interpreter as a reference.
-#define WASM_SIMD_COMPILED_TEST(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- ExecutionTier execution_tier); \
- TEST(RunWasm_##name##_turbofan) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kOptimized); \
- } \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kOptimized); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
-
void RunWasmCode(ExecutionTier execution_tier, LowerSimd lower_simd,
const std::vector<byte>& code,
std::array<int8_t, kSimd128Size>* result) {
@@ -2000,8 +2006,6 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
@@ -2320,7 +2324,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-WASM_SIMD_TEST(SimdLoadStoreLoad) {
+WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -2336,6 +2340,56 @@ WASM_SIMD_TEST(SimdLoadStoreLoad) {
}
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+// V8:8665 - Tracking bug to enable reduction tests in the interpreter,
+// and for SIMD lowering.
+// TODO(gdeepti): Enable these tests for ARM/ARM64
+#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max) \
+ WASM_SIMD_TEST_TURBOFAN(S##format##AnyTrue) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ byte simd = r.AllocateLocal(kWasmS128); \
+ BUILD( \
+ r, \
+ WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
+ WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, WASM_GET_LOCAL(simd))); \
+ DCHECK_EQ(1, r.Call(max)); \
+ DCHECK_EQ(1, r.Call(5)); \
+ DCHECK_EQ(0, r.Call(0)); \
+ }
+WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff);
+WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff);
+WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff);
+
+#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max) \
+ WASM_SIMD_TEST_TURBOFAN(S##format##AllTrue) { \
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ byte simd = r.AllocateLocal(kWasmS128); \
+ BUILD( \
+ r, \
+ WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
+ WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, WASM_GET_LOCAL(simd))); \
+ DCHECK_EQ(1, r.Call(max)); \
+ DCHECK_EQ(0, r.Call(21)); \
+ DCHECK_EQ(0, r.Call(0)); \
+ }
+WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff);
+WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff);
+WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff);
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+
+WASM_SIMD_TEST_TURBOFAN(BitSelect) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ byte simd = r.AllocateLocal(kWasmS128);
+ BUILD(r,
+ WASM_SET_LOCAL(
+ simd,
+ WASM_SIMD_SELECT(32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
+ WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0)))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(simd)));
+ DCHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
+}
+
#undef WASM_SIMD_TEST
#undef WASM_SIMD_CHECK_LANE
#undef WASM_SIMD_CHECK4
@@ -2376,6 +2430,9 @@ WASM_SIMD_TEST(SimdLoadStoreLoad) {
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
#undef WASM_SIMD_COMPILED_TEST
#undef WASM_SIMD_BOOL_REDUCTION_TEST
+#undef WASM_SIMD_TEST_TURBOFAN
+#undef WASM_SIMD_ANYTRUE_TEST
+#undef WASM_SIMD_ALLTRUE_TEST
} // namespace test_run_wasm_simd
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 0ba12aedd9..7e6ba47448 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -7,6 +7,7 @@
#include <string.h>
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -84,14 +85,14 @@ WASM_EXEC_TEST(Int32Add_P) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(*i, 13), r.Call(*i)); }
}
WASM_EXEC_TEST(Int32Add_P_fallthru) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(base::AddWithWraparound(*i, 13), r.Call(*i)); }
}
static void RunInt32AddTest(ExecutionTier execution_tier, const byte* code,
@@ -190,9 +191,9 @@ static void TestInt32Binop(ExecutionTier execution_tier, WasmOpcode opcode,
[](ctype a, ctype b) -> ctype { return expected; }); \
}
-WASM_I32_BINOP_TEST(Add, int32_t, a + b)
-WASM_I32_BINOP_TEST(Sub, int32_t, a - b)
-WASM_I32_BINOP_TEST(Mul, int32_t, a* b)
+WASM_I32_BINOP_TEST(Add, int32_t, base::AddWithWraparound(a, b))
+WASM_I32_BINOP_TEST(Sub, int32_t, base::SubWithWraparound(a, b))
+WASM_I32_BINOP_TEST(Mul, int32_t, base::MulWithWraparound(a, b))
WASM_I32_BINOP_TEST(DivS, int32_t,
(a == kMinInt && b == -1) || b == 0
? static_cast<int32_t>(0xDEADBEEF)
@@ -206,8 +207,8 @@ WASM_I32_BINOP_TEST(Xor, int32_t, a ^ b)
WASM_I32_BINOP_TEST(Shl, int32_t, a << (b & 0x1F))
WASM_I32_BINOP_TEST(ShrU, uint32_t, a >> (b & 0x1F))
WASM_I32_BINOP_TEST(ShrS, int32_t, a >> (b & 0x1F))
-WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1F)) | (a << (32 - (b & 0x1F))))
-WASM_I32_BINOP_TEST(Rol, uint32_t, (a << (b & 0x1F)) | (a >> (32 - (b & 0x1F))))
+WASM_I32_BINOP_TEST(Ror, uint32_t, (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F)))
+WASM_I32_BINOP_TEST(Rol, uint32_t, (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F)))
WASM_I32_BINOP_TEST(Eq, int32_t, a == b)
WASM_I32_BINOP_TEST(Ne, int32_t, a != b)
WASM_I32_BINOP_TEST(LtS, int32_t, a < b)
@@ -2839,6 +2840,13 @@ WASM_EXEC_TEST(F32Min) {
}
}
+WASM_EXEC_TEST(F32MinSameValue) {
+ WasmRunner<float, float> r(execution_tier);
+ BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ float result = r.Call(5.0f);
+ CHECK_FLOAT_EQ(5.0f, result);
+}
+
WASM_EXEC_TEST(F64Min) {
WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2848,6 +2856,13 @@ WASM_EXEC_TEST(F64Min) {
}
}
+WASM_EXEC_TEST(F64MinSameValue) {
+ WasmRunner<double, double> r(execution_tier);
+ BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ double result = r.Call(5.0);
+ CHECK_DOUBLE_EQ(5.0, result);
+}
+
WASM_EXEC_TEST(F32Max) {
WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2857,6 +2872,13 @@ WASM_EXEC_TEST(F32Max) {
}
}
+WASM_EXEC_TEST(F32MaxSameValue) {
+ WasmRunner<float, float> r(execution_tier);
+ BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ float result = r.Call(5.0f);
+ CHECK_FLOAT_EQ(5.0f, result);
+}
+
WASM_EXEC_TEST(F64Max) {
WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2869,6 +2891,13 @@ WASM_EXEC_TEST(F64Max) {
}
}
+WASM_EXEC_TEST(F64MaxSameValue) {
+ WasmRunner<double, double> r(execution_tier);
+ BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ double result = r.Call(5.0);
+ CHECK_DOUBLE_EQ(5.0, result);
+}
+
WASM_EXEC_TEST(I32SConvertF32) {
WasmRunner<int32_t, float> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
@@ -3270,9 +3299,11 @@ WASM_EXEC_TEST(I32SubOnDifferentRegisters) {
}
WASM_EXEC_TEST(I32MulOnDifferentRegisters) {
- BinOpOnDifferentRegisters<int32_t>(
- execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Mul,
- [](int32_t lhs, int32_t rhs, bool* trap) { return lhs * rhs; });
+ BinOpOnDifferentRegisters<int32_t>(execution_tier, kWasmI32,
+ ArrayVector(kSome32BitInputs), kExprI32Mul,
+ [](int32_t lhs, int32_t rhs, bool* trap) {
+ return base::MulWithWraparound(lhs, rhs);
+ });
}
WASM_EXEC_TEST(I32ShlOnDifferentRegisters) {
@@ -3344,9 +3375,11 @@ WASM_EXEC_TEST(I64SubOnDifferentRegisters) {
}
WASM_EXEC_TEST(I64MulOnDifferentRegisters) {
- BinOpOnDifferentRegisters<int64_t>(
- execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Mul,
- [](int64_t lhs, int64_t rhs, bool* trap) { return lhs * rhs; });
+ BinOpOnDifferentRegisters<int64_t>(execution_tier, kWasmI64,
+ ArrayVector(kSome64BitInputs), kExprI64Mul,
+ [](int64_t lhs, int64_t rhs, bool* trap) {
+ return base::MulWithWraparound(lhs, rhs);
+ });
}
WASM_EXEC_TEST(I64ShlOnDifferentRegisters) {
@@ -3437,9 +3470,9 @@ TEST(Liftoff_tier_up) {
memcpy(buffer.get(), sub_code->instructions().start(), sub_size);
desc.buffer = buffer.get();
desc.instr_size = static_cast<int>(sub_size);
- WasmCode* code =
- native_module->AddCode(add.function_index(), desc, 0, 0, 0, {},
- OwnedVector<byte>(), WasmCode::kOther);
+ WasmCode* code = native_module->AddCode(
+ add.function_index(), desc, 0, 0, 0, {}, OwnedVector<byte>(),
+ WasmCode::kFunction, WasmCode::kOther);
native_module->PublishCode(code);
// Second run should now execute {sub}.
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 39d7e1a5be..43ba7dfea1 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -4,6 +4,7 @@
#include "src/api-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/managed.h"
#include "src/v8.h"
#include "src/vector.h"
@@ -12,6 +13,9 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-serialization.h"
#include "test/cctest/cctest.h"
@@ -50,7 +54,7 @@ class MockPlatform final : public TestPlatform {
class MockTaskRunner final : public TaskRunner {
public:
void PostTask(std::unique_ptr<v8::Task> task) override {
- tasks_.push_back(std::move(task));
+ tasks_.push(std::move(task));
}
void PostDelayedTask(std::unique_ptr<Task> task,
@@ -66,15 +70,15 @@ class MockPlatform final : public TestPlatform {
void ExecuteTasks() {
while (!tasks_.empty()) {
- std::unique_ptr<Task> task = std::move(tasks_.back());
- tasks_.pop_back();
+ std::unique_ptr<Task> task = std::move(tasks_.front());
+ tasks_.pop();
task->Run();
}
}
private:
// We do not execute tasks concurrently, so we only need one list of tasks.
- std::vector<std::unique_ptr<v8::Task>> tasks_;
+ std::queue<std::unique_ptr<v8::Task>> tasks_;
};
std::shared_ptr<MockTaskRunner> task_runner_;
@@ -90,10 +94,15 @@ enum class CompilationState {
class TestResolver : public CompilationResultResolver {
public:
- explicit TestResolver(CompilationState* state) : state_(state) {}
+ TestResolver(CompilationState* state,
+ std::shared_ptr<NativeModule>* native_module)
+ : state_(state), native_module_(native_module) {}
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> module) override {
*state_ = CompilationState::kFinished;
+ if (!module.is_null()) {
+ *native_module_ = module->shared_native_module();
+ }
}
void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
@@ -102,24 +111,29 @@ class TestResolver : public CompilationResultResolver {
private:
CompilationState* state_;
+ std::shared_ptr<NativeModule>* native_module_;
};
class StreamTester {
public:
- StreamTester() : zone_(&allocator_, "StreamTester") {
+ StreamTester()
+ : zone_(&allocator_, "StreamTester"),
+ internal_scope_(CcTest::i_isolate()) {
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = CcTest::i_isolate();
- i::HandleScope internal_scope(i_isolate);
v8::Local<v8::Context> context = isolate->GetCurrentContext();
stream_ = i_isolate->wasm_engine()->StartStreamingCompilation(
i_isolate, kAllWasmFeatures, v8::Utils::OpenHandle(*context),
- std::make_shared<TestResolver>(&state_));
+ std::make_shared<TestResolver>(&state_, &native_module_));
}
std::shared_ptr<StreamingDecoder> stream() { return stream_; }
+ // Compiled native module, valid after successful compile.
+ std::shared_ptr<NativeModule> native_module() { return native_module_; }
+
// Run all compiler tasks, both foreground and background tasks.
void RunCompilerTasks() {
static_cast<MockPlatform*>(i::V8::GetCurrentPlatform())->ExecuteTasks();
@@ -137,12 +151,18 @@ class StreamTester {
void FinishStream() { stream_->Finish(); }
+ void SetCompiledModuleBytes(const uint8_t* start, size_t length) {
+ stream_->SetCompiledModuleBytes(Vector<const uint8_t>(start, length));
+ }
+
Zone* zone() { return &zone_; }
private:
AccountingAllocator allocator_;
Zone zone_;
+ i::HandleScope internal_scope_;
CompilationState state_ = CompilationState::kPending;
+ std::shared_ptr<NativeModule> native_module_;
std::shared_ptr<StreamingDecoder> stream_;
};
} // namespace
@@ -180,6 +200,27 @@ ZoneBuffer GetValidModuleBytes(Zone* zone) {
return buffer;
}
+// Create the same valid module as above and serialize it to test streaming
+// with compiled module caching.
+ZoneBuffer GetValidCompiledModuleBytes(Zone* zone, ZoneBuffer wire_bytes) {
+ // Use a tester to compile to a NativeModule.
+ StreamTester tester;
+ tester.OnBytesReceived(wire_bytes.begin(), wire_bytes.size());
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+ // Serialize the NativeModule.
+ std::shared_ptr<NativeModule> native_module = tester.native_module();
+ CHECK(native_module);
+ i::wasm::WasmSerializer serializer(native_module.get());
+ size_t size = serializer.GetSerializedNativeModuleSize();
+ std::vector<byte> buffer(size);
+ CHECK(serializer.SerializeNativeModule({buffer.data(), size}));
+ ZoneBuffer result(zone, size);
+ result.write(buffer.data(), size);
+ return result;
+}
+
// Test that all bytes arrive before doing any compilation. FinishStream is
// called immediately.
STREAM_TEST(TestAllBytesArriveImmediatelyStreamFinishesFirst) {
@@ -215,7 +256,7 @@ size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
kAllWasmFeatures, buffer, buffer + size, false, ModuleOrigin::kWasmOrigin,
isolate->counters(), isolate->allocator());
CHECK(result.ok());
- const WasmFunction* func = &result.val->functions[1];
+ const WasmFunction* func = &result.value()->functions[1];
return func->code.offset();
}
@@ -1010,6 +1051,107 @@ STREAM_TEST(TestModuleWithErrorAfterDataSection) {
tester.RunCompilerTasks();
CHECK(tester.IsPromiseRejected());
}
+
+// Test that cached bytes work.
+STREAM_TEST(TestDeserializationBypassesCompilation) {
+ StreamTester tester;
+ ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
+ ZoneBuffer module_bytes =
+ GetValidCompiledModuleBytes(tester.zone(), wire_bytes);
+ tester.SetCompiledModuleBytes(module_bytes.begin(), module_bytes.size());
+ tester.OnBytesReceived(wire_bytes.begin(), wire_bytes.size());
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Test that bad cached bytes don't cause compilation of wire bytes to fail.
+STREAM_TEST(TestDeserializationFails) {
+ StreamTester tester;
+ ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
+ ZoneBuffer module_bytes =
+ GetValidCompiledModuleBytes(tester.zone(), wire_bytes);
+ // corrupt header
+ byte first_byte = *module_bytes.begin();
+ module_bytes.patch_u8(0, first_byte + 1);
+ tester.SetCompiledModuleBytes(module_bytes.begin(), module_bytes.size());
+ tester.OnBytesReceived(wire_bytes.begin(), wire_bytes.size());
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseFulfilled());
+}
+
+// Test that a non-empty function section with a missing code section fails.
+STREAM_TEST(TestFunctionSectionWithoutCodeSection) {
+ StreamTester tester;
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.FinishStream();
+
+ tester.RunCompilerTasks();
+
+ CHECK(tester.IsPromiseRejected());
+}
+
+STREAM_TEST(TestSetModuleCompiledCallback) {
+ StreamTester tester;
+ bool callback_called = false;
+ tester.stream()->SetModuleCompiledCallback(
+ [&callback_called](const std::shared_ptr<NativeModule> module) {
+ callback_called = true;
+ });
+
+ uint8_t code[] = {
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprGetLocal, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 3), // section size
+ U32V_1(3), // functions count
+ 0, // signature index
+ 0, // signature index
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(1 + arraysize(code) * 3), // section size
+ U32V_1(3), // functions count
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+ CHECK(callback_called);
+}
+
#undef STREAM_TEST
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 26e98a1ba4..0ba2b95864 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -22,7 +22,7 @@ namespace wasm {
namespace {
void CheckLocations(
- WasmModuleObject* module_object, debug::Location start, debug::Location end,
+ WasmModuleObject module_object, debug::Location start, debug::Location end,
std::initializer_list<debug::Location> expected_locations_init) {
std::vector<debug::BreakLocation> locations;
bool success = module_object->GetPossibleBreakpoints(start, end, &locations);
@@ -45,7 +45,7 @@ void CheckLocations(
}
}
-void CheckLocationsFail(WasmModuleObject* module_object, debug::Location start,
+void CheckLocationsFail(WasmModuleObject module_object, debug::Location start,
debug::Location end) {
std::vector<debug::BreakLocation> locations;
bool success = module_object->GetPossibleBreakpoints(start, end, &locations);
@@ -246,8 +246,8 @@ WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
- WasmInstanceObject* instance = *runner.builder().instance_object();
- WasmModuleObject* module_object = instance->module_object();
+ WasmInstanceObject instance = *runner.builder().instance_object();
+ WasmModuleObject module_object = instance->module_object();
std::vector<debug::Location> locations;
// Check all locations for function 0.
diff --git a/deps/v8/test/cctest/wasm/test-wasm-codegen.cc b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
index cbd943ca09..ff4ef80bd6 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
@@ -57,13 +57,12 @@ void BuildTrivialModule(Zone* zone, ZoneBuffer* buffer) {
builder->WriteTo(*buffer);
}
-bool TestModule(Isolate* isolate,
- v8::WasmCompiledModule::BufferReference wire_bytes) {
+bool TestModule(Isolate* isolate, v8::MemorySpan<const uint8_t> wire_bytes) {
HandleScope scope(isolate);
- v8::WasmCompiledModule::BufferReference serialized_module(nullptr, 0);
- MaybeLocal<v8::WasmCompiledModule> module =
- v8::WasmCompiledModule::DeserializeOrCompile(
+ v8::MemorySpan<const uint8_t> serialized_module;
+ MaybeLocal<v8::WasmModuleObject> module =
+ v8::WasmModuleObject::DeserializeOrCompile(
reinterpret_cast<v8::Isolate*>(isolate), serialized_module,
wire_bytes);
return !module.IsEmpty();
@@ -76,8 +75,7 @@ TEST(PropertiesOfCodegenCallbacks) {
Zone zone(&allocator, ZONE_NAME);
ZoneBuffer buffer(&zone);
BuildTrivialModule(&zone, &buffer);
- v8::WasmCompiledModule::BufferReference wire_bytes = {buffer.begin(),
- buffer.size()};
+ v8::MemorySpan<const uint8_t> wire_bytes = {buffer.begin(), buffer.size()};
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
new file mode 100644
index 0000000000..ba189a57ca
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -0,0 +1,127 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/function-compiler.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-module.h"
+
+#include "test/cctest/cctest.h"
+#include "test/common/wasm/test-signatures.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_wasm_import_wrapper_cache {
+
+std::unique_ptr<NativeModule> NewModule(Isolate* isolate) {
+ WasmCodeManager* manager = isolate->wasm_engine()->code_manager();
+ std::shared_ptr<WasmModule> module(new WasmModule);
+ bool can_request_more = false;
+ size_t size = 16384;
+ auto native_module = manager->NewNativeModule(
+ isolate, kAllWasmFeatures, size, can_request_more, std::move(module));
+ native_module->SetRuntimeStubs(isolate);
+ return native_module;
+}
+
+TEST(CacheHit) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ auto module = NewModule(isolate);
+ TestSignatures sigs;
+
+ auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+
+ WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+
+ CHECK_NOT_NULL(c1);
+ CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
+
+ WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+
+ CHECK_NOT_NULL(c2);
+ CHECK_EQ(c1, c2);
+}
+
+TEST(CacheMissSig) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ auto module = NewModule(isolate);
+ TestSignatures sigs;
+
+ auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+
+ WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+
+ CHECK_NOT_NULL(c1);
+ CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
+
+ WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+
+ CHECK_NOT_NULL(c2);
+ CHECK_NE(c1, c2);
+}
+
+TEST(CacheMissKind) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ auto module = NewModule(isolate);
+ TestSignatures sigs;
+
+ auto kind1 = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+ auto kind2 = compiler::WasmImportCallKind::kJSFunctionArityMismatch;
+
+ WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind1, sigs.i_i());
+
+ CHECK_NOT_NULL(c1);
+ CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
+
+ WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind2, sigs.i_i());
+
+ CHECK_NOT_NULL(c2);
+ CHECK_NE(c1, c2);
+}
+
+TEST(CacheHitMissSig) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ auto module = NewModule(isolate);
+ TestSignatures sigs;
+
+ auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
+
+ WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+
+ CHECK_NOT_NULL(c1);
+ CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
+
+ WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+
+ CHECK_NOT_NULL(c2);
+ CHECK_NE(c1, c2);
+
+ WasmCode* c3 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+
+ CHECK_NOT_NULL(c3);
+ CHECK_EQ(c1, c3);
+
+ WasmCode* c4 = module->import_wrapper_cache()->GetOrCompile(
+ isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+
+ CHECK_NOT_NULL(c4);
+ CHECK_EQ(c2, c4);
+}
+
+} // namespace test_wasm_import_wrapper_cache
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index d927de34ca..1349ce2d17 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -5,6 +5,7 @@
#include <cstdint>
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
@@ -100,7 +101,9 @@ TEST(TestArgumentPassing_int32) {
WASM_I32_ADD(WASM_I32_MUL(WASM_I32V_1(2), WASM_GET_LOCAL(0)), WASM_ONE)},
{// Call f2 with param <0>.
WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
- [](int32_t a) { return 2 * a + 1; });
+ [](int32_t a) {
+ return base::AddWithWraparound(base::MulWithWraparound(2, a), 1);
+ });
FOR_INT32_INPUTS(v) { helper.CheckCall(*v); }
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index a2c53ab210..7929b23891 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -68,33 +68,33 @@ class WasmSerializationTest {
void InvalidateVersion() {
uint32_t* slot = reinterpret_cast<uint32_t*>(
- const_cast<uint8_t*>(serialized_bytes_.start) +
+ const_cast<uint8_t*>(serialized_bytes_.data()) +
SerializedCodeData::kVersionHashOffset);
*slot = Version::Hash() + 1;
}
void InvalidateWireBytes() {
- memset(const_cast<uint8_t*>(wire_bytes_.start), 0, wire_bytes_.size / 2);
+ memset(const_cast<uint8_t*>(wire_bytes_.data()), 0, wire_bytes_.size() / 2);
}
void InvalidateLength() {
uint32_t* slot = reinterpret_cast<uint32_t*>(
- const_cast<uint8_t*>(serialized_bytes_.start) +
+ const_cast<uint8_t*>(serialized_bytes_.data()) +
SerializedCodeData::kPayloadLengthOffset);
*slot = 0u;
}
- v8::MaybeLocal<v8::WasmCompiledModule> Deserialize() {
+ v8::MaybeLocal<v8::WasmModuleObject> Deserialize() {
ErrorThrower thrower(current_isolate(), "");
- v8::MaybeLocal<v8::WasmCompiledModule> deserialized =
- v8::WasmCompiledModule::DeserializeOrCompile(
+ v8::MaybeLocal<v8::WasmModuleObject> deserialized =
+ v8::WasmModuleObject::DeserializeOrCompile(
current_isolate_v8(), serialized_bytes_, wire_bytes_);
return deserialized;
}
void DeserializeAndRun() {
ErrorThrower thrower(current_isolate(), "");
- v8::Local<v8::WasmCompiledModule> deserialized_module;
+ v8::Local<v8::WasmModuleObject> deserialized_module;
CHECK(Deserialize().ToLocal(&deserialized_module));
Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
v8::Utils::OpenHandle(*deserialized_module));
@@ -102,9 +102,9 @@ class WasmSerializationTest {
DisallowHeapAllocation assume_no_gc;
Vector<const byte> deserialized_module_wire_bytes =
module_object->native_module()->wire_bytes();
- CHECK_EQ(deserialized_module_wire_bytes.size(), wire_bytes_.size);
- CHECK_EQ(memcmp(deserialized_module_wire_bytes.start(), wire_bytes_.start,
- wire_bytes_.size),
+ CHECK_EQ(deserialized_module_wire_bytes.size(), wire_bytes_.size());
+ CHECK_EQ(memcmp(deserialized_module_wire_bytes.start(),
+ wire_bytes_.data(), wire_bytes_.size()),
0);
}
Handle<WasmInstanceObject> instance =
@@ -159,18 +159,20 @@ class WasmSerializationTest {
v8::Utils::ToLocal(Handle<JSObject>::cast(module_object));
CHECK(v8_module_obj->IsWebAssemblyCompiledModule());
- v8::Local<v8::WasmCompiledModule> v8_compiled_module =
- v8_module_obj.As<v8::WasmCompiledModule>();
- v8::WasmCompiledModule::BufferReference uncompiled_bytes =
- v8_compiled_module->GetWasmWireBytesRef();
- uint8_t* bytes_copy = zone()->NewArray<uint8_t>(uncompiled_bytes.size);
- memcpy(bytes_copy, uncompiled_bytes.start, uncompiled_bytes.size);
- wire_bytes_ = {bytes_copy, uncompiled_bytes.size};
+ v8::Local<v8::WasmModuleObject> v8_module_object =
+ v8_module_obj.As<v8::WasmModuleObject>();
+ v8::CompiledWasmModule compiled_module =
+ v8_module_object->GetCompiledModule();
+ v8::MemorySpan<const uint8_t> uncompiled_bytes =
+ compiled_module.GetWireBytesRef();
+ uint8_t* bytes_copy = zone()->NewArray<uint8_t>(uncompiled_bytes.size());
+ memcpy(bytes_copy, uncompiled_bytes.data(), uncompiled_bytes.size());
+ wire_bytes_ = {bytes_copy, uncompiled_bytes.size()};
// keep alive data_ until the end
- data_ = v8_compiled_module->Serialize();
+ data_ = compiled_module.Serialize();
}
- serialized_bytes_ = {data_.first.get(), data_.second};
+ serialized_bytes_ = {data_.buffer.get(), data_.size};
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator =
@@ -191,9 +193,9 @@ class WasmSerializationTest {
v8::internal::AccountingAllocator allocator_;
Zone zone_;
- v8::WasmCompiledModule::SerializedModule data_;
- v8::WasmCompiledModule::BufferReference wire_bytes_ = {nullptr, 0};
- v8::WasmCompiledModule::BufferReference serialized_bytes_ = {nullptr, 0};
+ v8::OwnedBuffer data_;
+ v8::MemorySpan<const uint8_t> wire_bytes_ = {nullptr, 0};
+ v8::MemorySpan<const uint8_t> serialized_bytes_ = {nullptr, 0};
v8::Isolate* current_isolate_v8_;
};
@@ -263,7 +265,7 @@ TEST(BlockWasmCodeGenAtDeserialization) {
{
HandleScope scope(test.current_isolate());
test.current_isolate_v8()->SetAllowCodeGenerationFromStringsCallback(False);
- v8::MaybeLocal<v8::WasmCompiledModule> nothing = test.Deserialize();
+ v8::MaybeLocal<v8::WasmModuleObject> nothing = test.Deserialize();
CHECK(nothing.IsEmpty());
}
Cleanup(test.current_isolate());
@@ -283,7 +285,7 @@ void TestTransferrableWasmModules(bool should_share) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* from_isolate = v8::Isolate::New(create_params);
- std::vector<v8::WasmCompiledModule::TransferrableModule> store;
+ std::vector<v8::WasmModuleObject::TransferrableModule> store;
std::shared_ptr<NativeModule> original_native_module;
{
v8::HandleScope scope(from_isolate);
@@ -299,11 +301,11 @@ void TestTransferrableWasmModules(bool should_share) {
ModuleWireBytes(buffer.begin(), buffer.end()));
Handle<WasmModuleObject> module_object =
maybe_module_object.ToHandleChecked();
- v8::Local<v8::WasmCompiledModule> v8_module =
- v8::Local<v8::WasmCompiledModule>::Cast(
+ v8::Local<v8::WasmModuleObject> v8_module =
+ v8::Local<v8::WasmModuleObject>::Cast(
v8::Utils::ToLocal(Handle<JSObject>::cast(module_object)));
store.push_back(v8_module->GetTransferrableModule());
- original_native_module = module_object->managed_native_module()->get();
+ original_native_module = module_object->shared_native_module();
}
{
@@ -312,13 +314,13 @@ void TestTransferrableWasmModules(bool should_share) {
v8::HandleScope scope(to_isolate);
LocalContext env(to_isolate);
- v8::MaybeLocal<v8::WasmCompiledModule> transferred_module =
- v8::WasmCompiledModule::FromTransferrableModule(to_isolate, store[0]);
+ v8::MaybeLocal<v8::WasmModuleObject> transferred_module =
+ v8::WasmModuleObject::FromTransferrableModule(to_isolate, store[0]);
CHECK(!transferred_module.IsEmpty());
Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
v8::Utils::OpenHandle(*transferred_module.ToLocalChecked()));
std::shared_ptr<NativeModule> transferred_native_module =
- module_object->managed_native_module()->get();
+ module_object->shared_native_module();
bool is_sharing = (original_native_module == transferred_native_module);
CHECK_EQ(should_share, is_sharing);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 5d383bb9c5..a3ca928d84 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -4,6 +4,7 @@
#include <memory>
+#include "src/microtask-queue.h"
#include "src/objects-inl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -94,7 +95,7 @@ class SharedEngineIsolate {
}
SharedModule ExportInstance(Handle<WasmInstanceObject> instance) {
- return instance->module_object()->managed_native_module()->get();
+ return instance->module_object()->shared_native_module();
}
int32_t Run(Handle<WasmInstanceObject> instance) {
@@ -146,7 +147,7 @@ class MockInstantiationResolver : public InstantiationResultResolver {
explicit MockInstantiationResolver(Handle<Object>* out_instance)
: out_instance_(out_instance) {}
void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) override {
- *out_instance_->location() = *result;
+ *out_instance_->location() = result->ptr();
}
void OnInstantiationFailed(Handle<Object> error_reason) override {
UNREACHABLE();
@@ -180,7 +181,8 @@ void PumpMessageLoop(SharedEngineIsolate& isolate) {
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
isolate.v8_isolate(),
platform::MessageLoopBehavior::kWaitForWork);
- isolate.isolate()->RunMicrotasks();
+ isolate.isolate()->default_microtask_queue()->RunMicrotasks(
+ isolate.isolate());
}
Handle<WasmInstanceObject> CompileAndInstantiateAsync(
@@ -350,11 +352,9 @@ TEST(SharedEngineRunThreadedTierUp) {
threads.emplace_back(&engine, [module](SharedEngineIsolate& isolate) {
HandleScope scope(isolate.isolate());
Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
- ErrorThrower thrower(isolate.isolate(), "Forced Tier Up");
WasmFeatures detected = kNoWasmFeatures;
WasmCompilationUnit::CompileWasmFunction(
- isolate.isolate(), module.get(), &detected, &thrower,
- GetModuleEnv(module->compilation_state()),
+ isolate.isolate(), module.get(), &detected,
&module->module()->functions[0], ExecutionTier::kOptimized);
CHECK_EQ(23, isolate.Run(instance));
});
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 4ce07089e2..f95760569f 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -6,6 +6,8 @@
#include "src/assembler-inl.h"
#include "src/code-tracer.h"
+#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -24,7 +26,7 @@ TestingModuleBuilder::TestingModuleBuilder(
runtime_exception_support_(exception_support),
lower_simd_(lower_simd) {
WasmJs::Install(isolate_, true);
- test_module_->globals_buffer_size = kMaxGlobalsSize;
+ test_module_->untagged_globals_buffer_size = kMaxGlobalsSize;
memset(globals_data_, 0, sizeof(globals_data_));
uint32_t maybe_import_index = 0;
@@ -39,20 +41,15 @@ TestingModuleBuilder::TestingModuleBuilder(
instance_object_ = InitInstanceObject();
if (maybe_import) {
- // Manually compile a wasm to JS wrapper and insert it into the instance.
+ // Manually compile an import wrapper and insert it into the instance.
CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
auto kind = compiler::GetWasmImportCallKind(maybe_import->js_function,
- maybe_import->sig);
- MaybeHandle<Code> code = compiler::CompileWasmImportCallWrapper(
- isolate_, kind, maybe_import->sig, maybe_import_index,
- test_module_->origin,
- trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
- : kNoTrapHandler);
- auto wasm_to_js_wrapper = native_module_->AddImportWrapper(
- code.ToHandleChecked(), maybe_import_index);
+ maybe_import->sig, false);
+ auto import_wrapper = native_module_->import_wrapper_cache()->GetOrCompile(
+ isolate_->wasm_engine(), isolate_->counters(), kind, maybe_import->sig);
ImportedFunctionEntry(instance_object_, maybe_import_index)
- .set_wasm_to_js(*maybe_import->js_function, wasm_to_js_wrapper);
+ .SetWasmToJs(isolate_, maybe_import->js_function, import_wrapper);
}
if (tier == ExecutionTier::kInterpreter) {
@@ -170,9 +167,8 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_->functions[table.values[j]];
int sig_id = test_module_->signature_map.Find(*function.sig);
- auto target =
- native_module_->GetCallTargetForFunction(function.func_index);
- IndirectFunctionTableEntry(instance, j).set(sig_id, *instance, target);
+ IndirectFunctionTableEntry(instance, j)
+ .Set(sig_id, instance, function.func_index);
}
}
}
@@ -187,15 +183,15 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
OwnedVector<uint8_t> new_bytes = OwnedVector<uint8_t>::New(new_size);
memcpy(new_bytes.start(), old_bytes.start(), old_size);
memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
- native_module_->set_wire_bytes(std::move(new_bytes));
+ native_module_->SetWireBytes(std::move(new_bytes));
return bytes_offset;
}
-ModuleEnv TestingModuleBuilder::CreateModuleEnv() {
+CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
return {
test_module_ptr_,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler,
- runtime_exception_support_, lower_simd()};
+ runtime_exception_support_, enabled_features_, lower_simd()};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -213,9 +209,8 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
Handle<Script> script =
isolate_->factory()->NewScript(isolate_->factory()->empty_string());
script->set_type(Script::TYPE_WASM);
- ModuleEnv env = CreateModuleEnv();
Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate_, enabled_features_, test_module_, env, {},
+ WasmModuleObject::New(isolate_, enabled_features_, test_module_, {},
script, Handle<ByteArray>::null());
// This method is called when we initialize TestEnvironment. We don't
// have a memory yet, so we won't create it here. We'll update the
@@ -246,9 +241,8 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
}
#endif
- uint32_t pc = result.error_offset();
- FATAL("Verification failed; pc = +%x, msg = %s", pc,
- result.error_msg().c_str());
+ FATAL("Verification failed; pc = +%x, msg = %s", result.error().offset(),
+ result.error().message().c_str());
}
builder->LowerInt64();
if (!CpuFeatures::SupportsWasmSimd128()) {
@@ -257,7 +251,7 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
}
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
- ModuleEnv* module, FunctionSig* sig,
+ CompilationEnv* module, FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
const byte* start, const byte* end) {
compiler::WasmGraphBuilder builder(module, zone, jsgraph, sig,
@@ -345,7 +339,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
- if (kPointerSize == 4) {
+ if (kSystemPointerSize == 4) {
size_t num_params = signature_->parameter_count();
// One additional parameter for the pointer of the return value.
Signature<MachineRepresentation>::Builder rep_builder(zone(), 1,
@@ -415,8 +409,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
->native_module()
->wire_bytes();
- ModuleEnv module_env = builder_->CreateModuleEnv();
- ErrorThrower thrower(isolate(), "WasmFunctionCompiler::Build");
+ CompilationEnv env = builder_->CreateCompilationEnv();
ScopedVector<uint8_t> func_wire_bytes(function_->code.length());
memcpy(func_wire_bytes.start(), wire_bytes.start() + function_->code.offset(),
func_wire_bytes.length());
@@ -425,16 +418,15 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
func_wire_bytes.start(), func_wire_bytes.end()};
NativeModule* native_module =
builder_->instance_object()->module_object()->native_module();
- WasmCompilationUnit unit(isolate()->wasm_engine(), &module_env, native_module,
- func_body, function_->func_index,
- isolate()->counters(), tier);
+ WasmCompilationUnit unit(isolate()->wasm_engine(), function_->func_index,
+ tier);
WasmFeatures unused_detected_features;
- unit.ExecuteCompilation(&unused_detected_features);
- WasmCode* wasm_code = unit.FinishCompilation(&thrower);
- if (WasmCode::ShouldBeLogged(isolate())) {
- wasm_code->LogCode(isolate());
- }
- CHECK(!thrower.error());
+ WasmCompilationResult result = unit.ExecuteCompilation(
+ &env, native_module->compilation_state()->GetWireBytesStorage(),
+ isolate()->counters(), &unused_detected_features);
+ WasmCode* code = unit.Publish(std::move(result), native_module);
+ DCHECK_NOT_NULL(code);
+ if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
}
WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index aba43f3a08..af575fff77 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -13,7 +13,6 @@
#include <memory>
#include "src/base/utils/random-number-generator.h"
-#include "src/code-stubs.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/int64-lowering.h"
@@ -208,7 +207,7 @@ class TestingModuleBuilder {
native_module_->SetExecutable(true);
}
- ModuleEnv CreateModuleEnv();
+ CompilationEnv CreateCompilationEnv();
ExecutionTier execution_tier() const { return execution_tier_; }
@@ -224,7 +223,7 @@ class TestingModuleBuilder {
uint32_t global_offset = 0;
byte* mem_start_ = nullptr;
uint32_t mem_size_ = 0;
- V8_ALIGNED(16) byte globals_data_[kMaxGlobalsSize];
+ alignas(16) byte globals_data_[kMaxGlobalsSize];
WasmInterpreter* interpreter_ = nullptr;
ExecutionTier execution_tier_;
Handle<WasmInstanceObject> instance_object_;
@@ -239,7 +238,7 @@ class TestingModuleBuilder {
};
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
- ModuleEnv* module, FunctionSig* sig,
+ CompilationEnv* module, FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
const byte* start, const byte* end);
@@ -263,11 +262,7 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
intptr_t address = static_cast<intptr_t>(code->instruction_start());
compiler::NodeProperties::ChangeOp(
inner_code_node_,
- kPointerSize == 8
- ? common()->RelocatableInt64Constant(address,
- RelocInfo::JS_TO_WASM_CALL)
- : common()->RelocatableInt32Constant(static_cast<int>(address),
- RelocInfo::JS_TO_WASM_CALL));
+ common()->ExternalConstant(ExternalReference::FromRawAddress(address)));
}
const compiler::Operator* IntPtrConstant(intptr_t value) {
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index eca34c5521..5861acd71e 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -10,41 +10,70 @@
namespace v8 {
namespace internal {
-static inline uint8_t* AllocateAssemblerBuffer(
- size_t* allocated,
- size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize,
- void* address = nullptr) {
- size_t page_size = v8::internal::AllocatePageSize();
- size_t alloc_size = RoundUp(requested, page_size);
- void* result = v8::internal::AllocatePages(
- GetPlatformPageAllocator(), address, alloc_size, page_size,
- v8::PageAllocator::kReadWriteExecute);
- CHECK(result);
- *allocated = alloc_size;
- return static_cast<uint8_t*>(result);
-}
+class TestingAssemblerBuffer : public AssemblerBuffer {
+ public:
+ TestingAssemblerBuffer(size_t requested, void* address) {
+ size_t page_size = v8::internal::AllocatePageSize();
+ size_t alloc_size = RoundUp(requested, page_size);
+ CHECK_GE(kMaxInt, alloc_size);
+ size_ = static_cast<int>(alloc_size);
+ buffer_ = static_cast<byte*>(AllocatePages(GetPlatformPageAllocator(),
+ address, alloc_size, page_size,
+ v8::PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(buffer_);
+ }
-static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
- size_t allocated) {
- // Flush the instruction cache as part of making the buffer executable.
- // Note: we do this before setting permissions to ReadExecute because on
- // some older Arm64 kernels there is a bug which causes an access error on
- // cache flush instructions to trigger access error on non-writable memory.
- // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- Assembler::FlushICache(buffer, allocated);
-
- bool result =
- v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
- allocated, v8::PageAllocator::kReadExecute);
- CHECK(result);
-}
+ ~TestingAssemblerBuffer() {
+ CHECK(FreePages(GetPlatformPageAllocator(), buffer_, size_));
+ }
+
+ byte* start() const override { return buffer_; }
+
+ int size() const override { return size_; }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ FATAL("Cannot grow TestingAssemblerBuffer");
+ }
+
+ std::unique_ptr<AssemblerBuffer> CreateView() const {
+ return ExternalAssemblerBuffer(buffer_, size_);
+ }
-static inline void MakeAssemblerBufferWritable(uint8_t* buffer,
- size_t allocated) {
- bool result =
- v8::internal::SetPermissions(GetPlatformPageAllocator(), buffer,
- allocated, v8::PageAllocator::kReadWrite);
- CHECK(result);
+ void MakeExecutable() {
+ // Flush the instruction cache as part of making the buffer executable.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ Assembler::FlushICache(buffer_, size_);
+
+ bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ v8::PageAllocator::kReadExecute);
+ CHECK(result);
+ }
+
+ void MakeWritable() {
+ bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ v8::PageAllocator::kReadWrite);
+ CHECK(result);
+ }
+
+ // TODO(wasm): Only needed for the "test-jump-table-assembler.cc" tests.
+ void MakeWritableAndExecutable() {
+ bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ v8::PageAllocator::kReadWriteExecute);
+ CHECK(result);
+ }
+
+ private:
+ byte* buffer_;
+ int size_;
+};
+
+static inline std::unique_ptr<TestingAssemblerBuffer> AllocateAssemblerBuffer(
+ size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize,
+ void* address = nullptr) {
+ return base::make_unique<TestingAssemblerBuffer>(requested, address);
}
} // namespace internal
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index 1b24a3b3cd..8b47720870 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -6,14 +6,13 @@
#define TEST_SIGNATURES_H
#include "src/signature.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
-typedef Signature<ValueType> FunctionSig;
-
// A helper class with many useful signatures in order to simplify tests.
class TestSignatures {
public:
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index f722062662..17045ac325 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -17,14 +17,23 @@
#define WASM_MODULE_HEADER U32_LE(kWasmMagic), U32_LE(kWasmVersion)
-#define IMPORT_SIG_INDEX(v) U32V_1(v)
+#define SIG_INDEX(v) U32V_1(v)
#define FUNC_INDEX(v) U32V_1(v)
-#define TABLE_INDEX(v) U32V_1(v)
#define EXCEPTION_INDEX(v) U32V_1(v)
#define NO_NAME U32V_1(0)
-#define NAME_LENGTH(v) U32V_1(v)
#define ENTRY_COUNT(v) U32V_1(v)
+// Segment flags
+#define ACTIVE_NO_INDEX 0
+#define PASSIVE 1
+#define ACTIVE_WITH_INDEX 2
+
+// The table index field in an element segment was repurposed as a flags field.
+// To specify a table index, we have to set the flag value to 2, followed by
+// the table index.
+#define TABLE_INDEX0 U32V_1(ACTIVE_NO_INDEX)
+#define TABLE_INDEX(v) U32V_1(ACTIVE_WITH_INDEX), U32V_1(v)
+
#define ZERO_ALIGNMENT 0
#define ZERO_OFFSET 0
@@ -573,10 +582,25 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_I64_SCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI64SConvertSatF64)
#define WASM_I64_UCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI64UConvertSatF64)
+#define MEMORY_ZERO 0
+
+#define WASM_MEMORY_INIT(seg, dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprMemoryInit), MEMORY_ZERO, U32V_1(seg)
+#define WASM_MEMORY_DROP(seg) WASM_NUMERIC_OP(kExprMemoryDrop), U32V_1(seg)
+#define WASM_MEMORY_COPY(dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprMemoryCopy), MEMORY_ZERO
+#define WASM_MEMORY_FILL(dst, val, size) \
+ dst, val, size, WASM_NUMERIC_OP(kExprMemoryFill), MEMORY_ZERO
+#define WASM_TABLE_INIT(seg, dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprTableInit), TABLE_ZERO, U32V_1(seg)
+#define WASM_TABLE_DROP(seg) WASM_NUMERIC_OP(kExprTableDrop), U32V_1(seg)
+#define WASM_TABLE_COPY(dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), TABLE_ZERO
+
//------------------------------------------------------------------------------
// Memory Operations.
//------------------------------------------------------------------------------
-#define WASM_GROW_MEMORY(x) x, kExprGrowMemory, 0
+#define WASM_GROW_MEMORY(x) x, kExprMemoryGrow, 0
#define WASM_MEMORY_SIZE kExprMemorySize, 0
#define SIG_ENTRY_v_v kWasmFunctionTypeCode, 0, 0
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 9dfbe6fe1a..15e71b017d 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -7,7 +7,7 @@
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/heap-number-inl.h"
#include "src/property-descriptor.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
@@ -51,10 +51,10 @@ std::shared_ptr<WasmModule> DecodeWasmModuleForTesting(
if (decoding_result.failed()) {
// Module verification failed. throw.
thrower->CompileError("DecodeWasmModule failed: %s",
- decoding_result.error_msg().c_str());
+ decoding_result.error().message().c_str());
}
- return std::move(decoding_result.val);
+ return std::move(decoding_result).value();
}
bool InterpretWasmModuleForTesting(Isolate* isolate,
@@ -143,12 +143,16 @@ int32_t CompileAndRunAsmWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end) {
HandleScope scope(isolate);
ErrorThrower thrower(isolate, "CompileAndRunAsmWasmModule");
- MaybeHandle<WasmModuleObject> module =
+ MaybeHandle<AsmWasmData> data =
isolate->wasm_engine()->SyncCompileTranslatedAsmJs(
isolate, &thrower, ModuleWireBytes(module_start, module_end),
- Handle<Script>::null(), Vector<const byte>());
- DCHECK_EQ(thrower.error(), module.is_null());
- if (module.is_null()) return -1;
+ Vector<const byte>(), Handle<HeapNumber>());
+ DCHECK_EQ(thrower.error(), data.is_null());
+ if (data.is_null()) return -1;
+
+ MaybeHandle<WasmModuleObject> module =
+ isolate->wasm_engine()->FinalizeTranslatedAsmJs(
+ isolate, data.ToHandleChecked(), Handle<Script>::null());
MaybeHandle<WasmInstanceObject> instance =
isolate->wasm_engine()->SyncInstantiate(
@@ -160,10 +164,9 @@ int32_t CompileAndRunAsmWasmModule(Isolate* isolate, const byte* module_start,
return RunWasmModuleForTesting(isolate, instance.ToHandleChecked(), 0,
nullptr);
}
-int32_t InterpretWasmModule(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- ErrorThrower* thrower, int32_t function_index,
- WasmValue* args, bool* possible_nondeterminism) {
+WasmInterpretationResult InterpretWasmModule(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ int32_t function_index, WasmValue* args) {
// Don't execute more than 16k steps.
constexpr int kMaxNumSteps = 16 * 1024;
@@ -186,17 +189,19 @@ int32_t InterpretWasmModule(Isolate* isolate,
bool stack_overflow = isolate->has_pending_exception();
isolate->clear_pending_exception();
- *possible_nondeterminism = thread->PossibleNondeterminism();
- if (stack_overflow) return 0xDEADBEEF;
+ if (stack_overflow) return WasmInterpretationResult::Stopped();
- if (thread->state() == WasmInterpreter::TRAPPED) return 0xDEADBEEF;
+ if (thread->state() == WasmInterpreter::TRAPPED) {
+ return WasmInterpretationResult::Trapped(thread->PossibleNondeterminism());
+ }
- if (interpreter_result == WasmInterpreter::FINISHED)
- return thread->GetReturnValue().to<int32_t>();
+ if (interpreter_result == WasmInterpreter::FINISHED) {
+ return WasmInterpretationResult::Finished(
+ thread->GetReturnValue().to<int32_t>(),
+ thread->PossibleNondeterminism());
+ }
- thrower->RangeError(
- "Interpreter did not finish execution within its step bound");
- return -1;
+ return WasmInterpretationResult::Stopped();
}
MaybeHandle<WasmExportedFunction> GetExportedFunction(
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index 7aa40bc6f1..f3ed508e40 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -61,13 +61,48 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+class WasmInterpretationResult {
+ public:
+ static WasmInterpretationResult Stopped() { return {kStopped, 0, false}; }
+ static WasmInterpretationResult Trapped(bool possible_nondeterminism) {
+ return {kTrapped, 0, possible_nondeterminism};
+ }
+ static WasmInterpretationResult Finished(int32_t result,
+ bool possible_nondeterminism) {
+ return {kFinished, result, possible_nondeterminism};
+ }
+
+ bool stopped() const { return status_ == kStopped; }
+ bool trapped() const { return status_ == kTrapped; }
+ bool finished() const { return status_ == kFinished; }
+
+ int32_t result() const {
+ DCHECK_EQ(status_, kFinished);
+ return result_;
+ }
+
+ bool possible_nondeterminism() const { return possible_nondeterminism_; }
+
+ private:
+ enum Status { kFinished, kTrapped, kStopped };
+
+ const Status status_;
+ const int32_t result_;
+ const bool possible_nondeterminism_;
+
+ WasmInterpretationResult(Status status, int32_t result,
+ bool possible_nondeterminism)
+ : status_(status),
+ result_(result),
+ possible_nondeterminism_(possible_nondeterminism) {}
+};
+
// Interprets the given module, starting at the function specified by
// {function_index}. The return type of the function has to be int32. The module
// should not have any imports or exports
-int32_t InterpretWasmModule(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- ErrorThrower* thrower, int32_t function_index,
- WasmValue* args, bool* possible_nondeterminism);
+WasmInterpretationResult InterpretWasmModule(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ int32_t function_index, WasmValue* args);
// Runs the module instance with arguments.
int32_t RunWasmModuleForTesting(Isolate* isolate,
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js b/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js
index 71b07476d3..ec3555d22b 100644
--- a/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js
@@ -51,7 +51,7 @@ LogX("start")();
// Make sure that the debug event listener was invoked.
assertTrue(listenerComplete);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
var expectation =
[ "[0] debugger", "[1] start", "[1] then 1",
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js b/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js
index 3bd1fead08..431837ceba 100644
--- a/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js
@@ -39,4 +39,4 @@ function listener(event, exec_state, event_data, data) {}
Debug.setBreakOnUncaughtException();
Debug.setListener(listener);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js b/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
index 6b7abde111..2b8ebb1c0b 100644
--- a/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
@@ -40,22 +40,22 @@ function bar(a,b) {
}
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
%OptimizeFunctionOnNextCall(foo);
// bar likely gets inlined into foo.
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
%NeverOptimizeFunction(bar);
%OptimizeFunctionOnNextCall(foo);
// bar does not get inlined into foo.
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(0, expected_events);
diff --git a/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
index 2e707b994c..8a5eaea8d2 100644
--- a/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
@@ -21,89 +21,89 @@ function listener(event, exec_state, event_data, data) {
} catch (e) {
exception = e;
print(e);
- }
+ } // B34
};
Debug.setListener(listener);
-var id = x => x; // B9 B10 B36 B37
+var id = x => x; // B11 B12 B42 B43
function test() {
debugger; // B0
function fx1([
- a, // B2
- b // B3
- ]) {
- assertEquals([1, 2], [a, b]); // B4
- } // B5
+ a, // B3
+ b // B4
+ ]) { // B2
+ assertEquals([1, 2], [a, b]); // B5
+ } // B6
fx1([1, 2, 3]); // B1
function f2([
- a, // B7
- b = id(3) // B8
- ]) {
- assertEquals([4, 3], [a, b]); // B11
- } // B12
- f2([4]); // B6
+ a, // B9
+ b = id(3) // B10
+ ]) { // B8
+ assertEquals([4, 3], [a, b]); // B13
+ } // B14
+ f2([4]); // B7
function f3({
- x: a, // B14
- y: b // B15
- }) {
- assertEquals([5, 6], [a, b]); // B16
- } // B17
- f3({y: 6, x: 5}); // B13
+ x: a, // B17
+ y: b // B18
+ }) { // B16
+ assertEquals([5, 6], [a, b]); // B19
+ } // B20
+ f3({y: 6, x: 5}); // B15
function f4([
- a, // B19
+ a, // B23
{
- b, // B20
- c, // B21
+ b, // B24
+ c, // B25
}
- ]) {
- assertEquals([2, 4, 6], [a, b, c]); // B22
- } // B23
- f4([2, {c: 6, b: 4}]); // B18
+ ]) { // B22
+ assertEquals([2, 4, 6], [a, b, c]); // B26
+ } // B27
+ f4([2, {c: 6, b: 4}]); // B21
function f5([
{
- a, // B25
- b = 7 // B26
+ a, // B30
+ b = 7 // B31
},
- c = 3 // B27
- ] = [{a:1}]) {
- assertEquals([1, 7, 3], [a, b, c]); // B28
- } // B29
- f5(); // B24
+ c = 3 // B32
+ ] = [{a:1}]) { // B29
+ assertEquals([1, 7, 3], [a, b, c]); // B33
+ } // B34
+ f5(); // B28
- var name = "x"; // B30
+ var name = "x"; // B35
function f6({
- [id(name)]: a, // B34 B35
- b = a // B38
- }) {
- assertEquals([9, 9], [a, b]); // B39
- } // B40
- var o6 = {}; // B31
- o6[name] = 9; // B32
- f6(o6); // B33
+ [id(name)]: a, // B40 B41
+ b = a // B44
+ }) { // B39
+ assertEquals([9, 9], [a, b]); // B45
+ } // B46
+ var o6 = {}; // B36
+ o6[name] = 9; // B37
+ f6(o6); // B38
try {
- throw [3, 4]; // B41
+ throw [3, 4]; // B47
} catch ([
- a, // B42
- b, // B43
- c = 6 // B44
- ]) {
- assertEquals([3, 4, 6], [a, b, c]); // B45
+ a, // B49
+ b, // B50
+ c = 6 // B51
+ ]) { // B48
+ assertEquals([3, 4, 6], [a, b, c]); // B52
}
var {
- x: a,
- y: b = 9
- } = { x: 4 }; // B46
- assertEquals([4, 9], [a, b]); // B47
-} // B48
+ x: a, // B54
+ y: b = 9 // B55
+ } = { x: 4 }; // B53
+ assertEquals([4, 9], [a, b]); // B56
+} // B57
test();
-Debug.setListener(null); // B49
+Debug.setListener(null); // B58
assertNull(exception);
diff --git a/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js b/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
index 9f8eda5a68..cfc2f77e17 100644
--- a/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepin-default-parameters.js
@@ -41,5 +41,5 @@ Debug.setListener(null); // c
assertNull(exception);
assertEquals("default", result);
-assertEquals(["a0","b13","f18b13","d2f18b13","d19f18b13","g14b13","c0"],
+assertEquals(["a0","b13","f31b13","f18b13","d2f18b13","d19f18b13","g14b13","c0"],
log);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-builtin-predictions.js b/deps/v8/test/debugger/debug/es8/async-debug-builtin-predictions.js
index 70f1b57e7e..bcd990896b 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-builtin-predictions.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-builtin-predictions.js
@@ -57,7 +57,7 @@ async function foo() {
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnException();
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-caught-exception-cases.js b/deps/v8/test/debugger/debug/es8/async-debug-caught-exception-cases.js
index f5c1ed98a8..24cf598439 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-caught-exception-cases.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-caught-exception-cases.js
@@ -197,7 +197,7 @@ function runPart(n) {
events = 0;
consumer(producer);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
Debug.setListener(null);
if (caught) {
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-caught-exception.js b/deps/v8/test/debugger/debug/es8/async-debug-caught-exception.js
index 2feecc067f..de0c4e9c5b 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-caught-exception.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-caught-exception.js
@@ -37,7 +37,7 @@ log = [];
Debug.setListener(listener);
Debug.setBreakOnException();
caught_throw();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnException();
assertEquals(["a"], log);
@@ -48,7 +48,7 @@ log = [];
Debug.setListener(listener);
Debug.setBreakOnUncaughtException();
caught_throw();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnUncaughtException();
assertEquals([], log);
@@ -69,7 +69,7 @@ log = [];
Debug.setListener(listener);
Debug.setBreakOnException();
caught_reject();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnException();
assertEquals([], log);
@@ -80,7 +80,7 @@ log = [];
Debug.setListener(listener);
Debug.setBreakOnUncaughtException();
caught_reject();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnUncaughtException();
assertEquals([], log);
@@ -95,7 +95,7 @@ async function propagate_inner() { return thrower(); }
async function propagate_outer() { return propagate_inner(); }
propagate_outer();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(["a"], log);
assertNull(exception);
@@ -104,7 +104,7 @@ log = [];
async function propagate_await() { await 1; return thrower(); }
async function propagate_await_outer() { return propagate_await(); }
propagate_await_outer();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(["a"], log);
assertNull(exception);
@@ -113,7 +113,7 @@ Debug.setBreakOnUncaughtException();
log = [];
Promise.resolve().then(() => Promise.reject()).catch(() => log.push("d")); // Exception c
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(["d"], log);
assertNull(exception);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-abort-at-break.js b/deps/v8/test/debugger/debug/es8/async-debug-step-abort-at-break.js
index 85232b075b..47605cea16 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-abort-at-break.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-abort-at-break.js
@@ -48,6 +48,6 @@ debugger; // B3 StepNext
late_resolve(3); // B4 Continue
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(5, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-continue-at-break.js b/deps/v8/test/debugger/debug/es8/async-debug-step-continue-at-break.js
index a4726bd7c1..79dcad6b1e 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-continue-at-break.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-continue-at-break.js
@@ -37,8 +37,8 @@ async function f() {
a +=
await // B1 StepIn
g(); // B2 StepIn
- return a; // B4 StepNext
-} // B5 Continue
+ return a; // B4 Continue
+}
f();
@@ -48,6 +48,6 @@ debugger; // B3 Continue
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(6, step_count);
+assertEquals(5, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-in-and-out.js b/deps/v8/test/debugger/debug/es8/async-debug-step-in-and-out.js
index 43fb16fa6f..5d64e5a10e 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-in-and-out.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-in-and-out.js
@@ -37,13 +37,13 @@ async function f() {
a +=
await // B1 StepIn
g();
- return a; // B3 StepNext
-} // B4 Continue
+ return a; // B3 Continue
+}
f();
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(5, step_count);
+assertEquals(4, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-in-out-out.js b/deps/v8/test/debugger/debug/es8/async-debug-step-in-out-out.js
index c1d8fd71be..7702742f58 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-in-out-out.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-in-out-out.js
@@ -44,6 +44,6 @@ f();
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(4, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-in.js b/deps/v8/test/debugger/debug/es8/async-debug-step-in.js
index c32fa2fedc..fe84e4b9ba 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-in.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-in.js
@@ -37,13 +37,13 @@ async function f() {
a +=
await // B1 StepIn
g();
- return a; // B6 StepIn
-} // B7 Continue
+ return a; // B6 Continue
+}
f().then(value => assertEquals(4, value));
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(8, step_count);
+assertEquals(7, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-nested.js b/deps/v8/test/debugger/debug/es8/async-debug-step-nested.js
index 79e8dfaaef..74432a6f52 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-nested.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-nested.js
@@ -37,8 +37,8 @@ async function f1() {
a +=
await // B1 StepIn
f2(); // B2 StepIn
- return a; // B5 StepNext
-} // B6 Continue
+ return a; // B5 Continue
+}
async function f2() {
var b = 0 + // B2 StepIn
@@ -51,6 +51,6 @@ f1();
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(7, step_count);
+assertEquals(6, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-next-constant.js b/deps/v8/test/debugger/debug/es8/async-debug-step-next-constant.js
index 32833acc11..2b432632fa 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-next-constant.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-next-constant.js
@@ -27,11 +27,11 @@ async function f() {
a += // B1 StepNext
await
5;
- return a; // B2 StepNext
-} // B3 Continue
+ return a; // B2 Continue
+}
f();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(4, step_count);
+assertEquals(3, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-next.js b/deps/v8/test/debugger/debug/es8/async-debug-step-next.js
index 597afd3876..0fdbd73f05 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-next.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-next.js
@@ -37,13 +37,13 @@ async function f() {
a +=
await // B1 StepNext
g();
- return a; // B2 StepNext
-} // B3 Continue
+ return a; // B2 Continue
+}
f();
late_resolve(3);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
-assertEquals(4, step_count);
+assertEquals(3, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/async-debug-step-out.js b/deps/v8/test/debugger/debug/es8/async-debug-step-out.js
index 3ec6dd3490..d37ce5893e 100644
--- a/deps/v8/test/debugger/debug/es8/async-debug-step-out.js
+++ b/deps/v8/test/debugger/debug/es8/async-debug-step-out.js
@@ -42,6 +42,6 @@ f();
late_resolve(3); // B2 Continue
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(3, step_count);
diff --git a/deps/v8/test/debugger/debug/es8/debug-async-break-on-stack.js b/deps/v8/test/debugger/debug/es8/debug-async-break-on-stack.js
index df389f33d9..124cbabb8e 100644
--- a/deps/v8/test/debugger/debug/es8/debug-async-break-on-stack.js
+++ b/deps/v8/test/debugger/debug/es8/debug-async-break-on-stack.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
@@ -67,7 +67,7 @@ function setbreaks() {
f();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEqualsAsync(2, async () => break_count);
assertEqualsAsync(null, async () => exception);
diff --git a/deps/v8/test/debugger/debug/es8/debug-async-break.js b/deps/v8/test/debugger/debug/es8/debug-async-break.js
index 3e07ba9344..8a2982045a 100644
--- a/deps/v8/test/debugger/debug/es8/debug-async-break.js
+++ b/deps/v8/test/debugger/debug/es8/debug-async-break.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
@@ -65,7 +65,7 @@ Debug.setBreakPoint(f, 5);
f();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEqualsAsync(3, async () => break_count);
assertEqualsAsync(null, async () => exception);
diff --git a/deps/v8/test/debugger/debug/es8/debug-async-liveedit.js b/deps/v8/test/debugger/debug/es8/debug-async-liveedit.js
index 723a40f478..e85cbe3896 100644
--- a/deps/v8/test/debugger/debug/es8/debug-async-liveedit.js
+++ b/deps/v8/test/debugger/debug/es8/debug-async-liveedit.js
@@ -94,7 +94,7 @@ function patch(fun, from, to) {
assertPromiseValue("Cat", promise);
assertTrue(patch_attempted);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
// At this point one iterator is live, but closed, so the patch will succeed.
patch(asyncfn, "'Cat'", "'Capybara'");
@@ -143,4 +143,4 @@ function patch(fun, from, to) {
}));
})();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
diff --git a/deps/v8/test/debugger/debug/es8/promise-finally.js b/deps/v8/test/debugger/debug/es8/promise-finally.js
index 6923e64f64..dc7833e6e8 100644
--- a/deps/v8/test/debugger/debug/es8/promise-finally.js
+++ b/deps/v8/test/debugger/debug/es8/promise-finally.js
@@ -35,7 +35,7 @@ Promise.resolve()
.finally(() => thenable)
.catch(e => caughtException = e);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
Debug.setListener(null);
Debug.clearBreakOnException();
diff --git a/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
new file mode 100644
index 0000000000..bc4cd29cee
--- /dev/null
+++ b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt --stress-flush-bytecode
+// Flags: --expose-gc
+
+Debug = debug.Debug
+
+function foo() {
+ return 44;
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+
+ // Optimize foo.
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ assertOptimized(foo);
+
+ // Lazily deopt foo, which marks the code for deoptimization and invalidates
+ // the DeoptimizationData, but doesn't unlink the optimized code entry in
+ // foo's JSFunction.
+ %DeoptimizeFunction(foo);
+
+ // Run the GC. Since the DeoptimizationData is now dead, the bytecode
+ // associated with the optimized code is free to be flushed, which also
+ // free's the feedback vector meta-data.
+ gc();
+
+ // Execute foo with side-effect checks, which causes the debugger to call
+ // DeoptimizeFunction on foo. Even though the code is already marked for
+ // deoptimization, this will try to unlink the optimized code from the
+ // feedback vector, which will fail due to the feedback meta-data being
+ // flushed. The deoptimizer should call JSFunction::ResetIfBytecodeFlushed
+ // before trying to do this, which will clear the whole feedback vector and
+ // reset the JSFunction's code entry field to CompileLazy.
+ exec_state.frame(0).evaluate("foo()", true);
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function f() {
+ debugger;
+}
+f();
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
index 938461690e..92d534ce58 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
@@ -73,8 +73,8 @@ function listener(event, exec_state, event_data, data) {
"flatMap", "forEach", "every", "some", "reduce", "reduceRight", "find",
"filter", "map", "findIndex"
];
- var fails = ["toString", "join", "toLocaleString", "pop", "push", "reverse",
- "shift", "unshift", "splice", "sort", "copyWithin", "fill"];
+ var fails = ["toLocaleString", "pop", "push", "reverse", "shift", "unshift",
+ "splice", "sort", "copyWithin", "fill"];
for (f of Object.getOwnPropertyNames(Array.prototype)) {
if (typeof Array.prototype[f] === "function") {
if (fails.includes(f)) {
@@ -182,6 +182,7 @@ function listener(event, exec_state, event_data, data) {
}
if (f == "normalize") continue;
if (f == "match") continue;
+ if (f == "matchAll") continue;
if (f == "search") continue;
if (f == "split" || f == "replace") {
fail(`'abcd'.${f}(2)`);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index e85f1bef03..4f86b05769 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -45,6 +45,12 @@
}], # variant == stress
##############################################################################
+['variant == stress and (arch == arm or arch == arm64) and simulator_run', {
+ # Slow tests: https://crbug.com/v8/7783
+ 'debug/debug-stepout-scope-part*': [SKIP],
+}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
+
+##############################################################################
['variant == stress_incremental_marking', {
# BUG(chromium:772010).
'debug/debug-*': [PASS, ['system == windows', SKIP]],
@@ -54,14 +60,7 @@
['gc_stress == True', {
# Skip tests not suitable for GC stress.
# Tests taking too long
- 'debug/debug-stepout-scope-part1': [SKIP],
- 'debug/debug-stepout-scope-part2': [SKIP],
- 'debug/debug-stepout-scope-part3': [SKIP],
- 'debug/debug-stepout-scope-part4': [SKIP],
- 'debug/debug-stepout-scope-part5': [SKIP],
- 'debug/debug-stepout-scope-part6': [SKIP],
- 'debug/debug-stepout-scope-part7': [SKIP],
- 'debug/debug-stepout-scope-part8': [SKIP],
+ 'debug/debug-stepout-scope-part*': [SKIP],
# Async function tests taking too long
# https://bugs.chromium.org/p/v8/issues/detail?id=5411
@@ -124,4 +123,11 @@
'debug/debug-liveedit-restart-frame': [SKIP],
}], # 'arch == s390 or arch == s390x'
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'debug/wasm/*': [SKIP],
+ 'wasm-*': [SKIP],
+}], # lite_mode
+
]
diff --git a/deps/v8/test/debugger/regress/regress-5610.js b/deps/v8/test/debugger/regress/regress-5610.js
index 7736c040a2..d59b27eb19 100644
--- a/deps/v8/test/debugger/regress/regress-5610.js
+++ b/deps/v8/test/debugger/regress/regress-5610.js
@@ -23,9 +23,10 @@ Debug.setListener(listener);
async function f() {
var a = 1;
- debugger; // B0 StepNext
- print(1); // B1 StepNext
- return a; // B2 StepNext
-} // B3 Continue
+ debugger; // B0 StepNext
+ print(1); // B1 StepNext
+ return a + // B2 StepNext
+ 0; // B3 Continue
+}
f();
diff --git a/deps/v8/test/fuzzer/BUILD.gn b/deps/v8/test/fuzzer/BUILD.gn
index d72cb77a64..5b768ea88c 100644
--- a/deps/v8/test/fuzzer/BUILD.gn
+++ b/deps/v8/test/fuzzer/BUILD.gn
@@ -22,12 +22,5 @@ group("v8_fuzzer") {
"./wasm_async/",
"./wasm_code/",
"./wasm_compile/",
- "./wasm_data_section/",
- "./wasm_function_sigs_section/",
- "./wasm_globals_section/",
- "./wasm_imports_section/",
- "./wasm_memory_section/",
- "./wasm_names_section/",
- "./wasm_types_section/",
]
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index df922bbf4e..30bf257088 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -4,4 +4,12 @@
[
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'multi_return/*': [SKIP],
+ 'wasm_async/*': [SKIP],
+ 'wasm_compile/*': [SKIP],
+}], # lite_mode
+
]
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index a7f4ca06ca..30b0020425 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -5,8 +5,8 @@
#include <cstddef>
#include <cstdint>
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
@@ -138,16 +138,12 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(i::Isolate* isolate,
size_t code_size) {
std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule);
module->num_declared_functions = 1;
- wasm::ModuleEnv env(
- module.get(), wasm::UseTrapHandler::kNoTrapHandler,
- wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
return isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, i::wasm::kAllWasmFeatures, code_size, false, std::move(module),
- env);
+ isolate, i::wasm::kAllWasmFeatures, code_size, false, std::move(module));
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index 12f7a4eed4..01ec69cbda 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -19,7 +19,7 @@
bool IsValidInput(const uint8_t* data, size_t size) {
// Ignore too long inputs as they tend to find OOM or timeouts, not real bugs.
- if (size > 8192) {
+ if (size > 2048) {
return false;
}
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index c4ff115d72..02b78b0d68 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -103,7 +103,7 @@ std::string NaiveEscape(const std::string& input, char escaped_char) {
}
// Disallow trailing backslashes as they mess with our naive source string
// concatenation.
- if (out.back() == '\\') out.back() = '_';
+ if (!out.empty() && out.back() == '\\') out.back() = '_';
return out;
}
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 8b90d5238c..95085885d5 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -15,10 +15,7 @@ class VariantsGenerator(testsuite.VariantsGenerator):
class TestSuite(testsuite.TestSuite):
SUB_TESTS = ( 'json', 'parser', 'regexp_builtins', 'regexp', 'multi_return', 'wasm',
- 'wasm_async', 'wasm_code', 'wasm_compile',
- 'wasm_data_section', 'wasm_function_sigs_section',
- 'wasm_globals_section', 'wasm_imports_section', 'wasm_memory_section',
- 'wasm_names_section', 'wasm_types_section' )
+ 'wasm_async', 'wasm_code', 'wasm_compile')
def ListTests(self):
tests = []
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index ec9a179945..b159fad3da 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -46,7 +46,8 @@ class WasmCodeFuzzer : public WasmExecutionFuzzer {
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return WasmCodeFuzzer().FuzzWasmModule({data, size});
+ WasmCodeFuzzer().FuzzWasmModule({data, size});
+ return 0;
}
} // namespace fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index a9f4382cd1..1218bef3d8 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -61,14 +61,15 @@ class DataRange {
return split;
}
- template <typename T>
+ template <typename T, size_t max_bytes = sizeof(T)>
T get() {
+ STATIC_ASSERT(max_bytes <= sizeof(T));
// We want to support the case where we have less than sizeof(T) bytes
// remaining in the slice. For example, if we emit an i32 constant, it's
// okay if we don't have a full four bytes available, we'll just use what
// we have. We aren't concerned about endianness because we are generating
// arbitrary expressions.
- const size_t num_bytes = std::min(sizeof(T), data_.size());
+ const size_t num_bytes = std::min(max_bytes, data_.size());
T result = T();
memcpy(&result, data_.start(), num_bytes);
data_ += num_bytes;
@@ -342,6 +343,16 @@ class WasmGenerator {
local_op<wanted_type>(data, kExprTeeLocal);
}
+ template <size_t num_bytes>
+ void i32_const(DataRange& data) {
+ builder_->EmitI32Const(data.get<int32_t, num_bytes>());
+ }
+
+ template <size_t num_bytes>
+ void i64_const(DataRange& data) {
+ builder_->EmitI64Const(data.get<int64_t, num_bytes>());
+ }
+
Var GetRandomGlobal(DataRange& data, bool ensure_mutable) {
uint32_t index;
if (ensure_mutable) {
@@ -507,12 +518,17 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
template <>
void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= sizeof(uint32_t)) {
+ if (recursion_limit_reached() || data.size() <= 1) {
builder_->EmitI32Const(data.get<uint32_t>());
return;
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::i32_const<1>,
+ &WasmGenerator::i32_const<2>,
+ &WasmGenerator::i32_const<3>,
+ &WasmGenerator::i32_const<4>,
+
&WasmGenerator::sequence<kWasmI32, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmI32>,
&WasmGenerator::sequence<kWasmStmt, kWasmI32, kWasmStmt>,
@@ -598,12 +614,21 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
template <>
void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= sizeof(uint64_t)) {
+ if (recursion_limit_reached() || data.size() <= 1) {
builder_->EmitI64Const(data.get<int64_t>());
return;
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::i64_const<1>,
+ &WasmGenerator::i64_const<2>,
+ &WasmGenerator::i64_const<3>,
+ &WasmGenerator::i64_const<4>,
+ &WasmGenerator::i64_const<5>,
+ &WasmGenerator::i64_const<6>,
+ &WasmGenerator::i64_const<7>,
+ &WasmGenerator::i64_const<8>,
+
&WasmGenerator::sequence<kWasmI64, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmI64>,
&WasmGenerator::sequence<kWasmStmt, kWasmI64, kWasmStmt>,
@@ -720,7 +745,7 @@ void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
void WasmGenerator::grow_memory(DataRange& data) {
Generate<kWasmI32>(data);
- builder_->EmitWithU8(kExprGrowMemory, 0);
+ builder_->EmitWithU8(kExprMemoryGrow, 0);
}
void WasmGenerator::Generate(ValueType type, DataRange& data) {
@@ -824,7 +849,8 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
constexpr bool require_valid = true;
- return WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid);
+ WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid);
+ return 0;
}
} // namespace fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-data-section.cc b/deps/v8/test/fuzzer/wasm-data-section.cc
deleted file mode 100644
index c6a7547767..0000000000
--- a/deps/v8/test/fuzzer/wasm-data-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kDataSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-function-sigs-section.cc b/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
deleted file mode 100644
index 2c894d6ee5..0000000000
--- a/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kFunctionSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index c253da9cb5..48c94be426 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -4,9 +4,12 @@
#include "test/fuzzer/wasm-fuzzer-common.h"
+#include <ctime>
+
#include "include/v8.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -22,49 +25,6 @@ namespace internal {
namespace wasm {
namespace fuzzer {
-static constexpr const char* kNameString = "name";
-static constexpr size_t kNameStringLength = 4;
-
-int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size) {
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
-
- // Clear any pending exceptions from a prior run.
- i_isolate->clear_pending_exception();
-
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
-
- AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
-
- ZoneBuffer buffer(&zone);
- buffer.write_u32(kWasmMagic);
- buffer.write_u32(kWasmVersion);
- if (section == kNameSectionCode) {
- buffer.write_u8(kUnknownSectionCode);
- buffer.write_size(size + kNameStringLength + 1);
- buffer.write_u8(kNameStringLength);
- buffer.write(reinterpret_cast<const uint8_t*>(kNameString),
- kNameStringLength);
- buffer.write(data, size);
- } else {
- buffer.write_u8(section);
- buffer.write_size(size);
- buffer.write(data, size);
- }
-
- ErrorThrower thrower(i_isolate, "decoder");
-
- testing::DecodeWasmModuleForTesting(i_isolate, &thrower, buffer.begin(),
- buffer.end(), kWasmOrigin);
-
- return 0;
-}
-
void InterpretAndExecuteModule(i::Isolate* isolate,
Handle<WasmModuleObject> module_object) {
// We do not instantiate the module if there is a start function, because a
@@ -158,12 +118,23 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
enabled_features, wire_bytes.start(), wire_bytes.end(), kVerifyFunctions,
ModuleOrigin::kWasmOrigin, isolate->counters(), isolate->allocator());
CHECK(module_res.ok());
- WasmModule* module = module_res.val.get();
+ WasmModule* module = module_res.value().get();
CHECK_NOT_NULL(module);
StdoutStream os;
- os << "// Copyright 2018 the V8 project authors. All rights reserved.\n"
+ tzset();
+ time_t current_time = time(nullptr);
+ struct tm current_localtime;
+#ifdef V8_OS_WIN
+ localtime_s(&current_localtime, &current_time);
+#else
+ localtime_r(&current_time, &current_localtime);
+#endif
+ int year = 1900 + current_localtime.tm_year;
+
+ os << "// Copyright " << year
+ << " the V8 project authors. All rights reserved.\n"
"// Use of this source code is governed by a BSD-style license that "
"can be\n"
"// found in the LICENSE file.\n"
@@ -183,7 +154,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
os << ", " << (module->mem_export ? "true" : "false");
if (module->has_shared_memory) {
- os << ", shared";
+ os << ", true";
}
os << ");\n";
}
@@ -193,19 +164,46 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
<< glob.mutability << ");\n";
}
+ for (const FunctionSig* sig : module->signatures) {
+ os << " builder.addType(makeSig(" << PrintParameters(sig) << ", "
+ << PrintReturns(sig) << "));\n";
+ }
+
Zone tmp_zone(isolate->allocator(), ZONE_NAME);
+ // There currently cannot be more than one table.
+ DCHECK_GE(1, module->tables.size());
+ for (const WasmTable& table : module->tables) {
+ os << " builder.setTableBounds(" << table.initial_size << ", ";
+ if (table.has_maximum_size) {
+ os << table.maximum_size << ");\n";
+ } else {
+ os << "undefined);\n";
+ }
+ }
+ for (const WasmElemSegment& elem_segment : module->elem_segments) {
+ os << " builder.addElementSegment(";
+ switch (elem_segment.offset.kind) {
+ case WasmInitExpr::kGlobalIndex:
+ os << elem_segment.offset.val.global_index << ", true";
+ break;
+ case WasmInitExpr::kI32Const:
+ os << elem_segment.offset.val.i32_const << ", false";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ os << ", " << PrintCollection(elem_segment.entries) << ");\n";
+ }
+
for (const WasmFunction& func : module->functions) {
Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
os << " // Generate function " << (func.func_index + 1) << " (out of "
<< module->functions.size() << ").\n";
- // Generate signature.
- os << " sig" << (func.func_index + 1) << " = makeSig("
- << PrintParameters(func.sig) << ", " << PrintReturns(func.sig) << ");\n";
// Add function.
- os << " builder.addFunction(undefined, sig" << (func.func_index + 1)
- << ")\n";
+ os << " builder.addFunction(undefined, " << func.sig_index
+ << " /* sig */)\n";
// Add locals.
BodyLocalDecls decls(&tmp_zone);
@@ -249,8 +247,13 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << "})();\n";
}
-int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
- bool require_valid) {
+void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
+ bool require_valid) {
+ // Strictly enforce the input size limit. Note that setting "max_len" on the
+ // fuzzer target is not enough, since different fuzzers are used and not all
+ // respect that limit.
+ if (data.size() > max_input_size()) return;
+
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
@@ -277,7 +280,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
if (!data.is_empty()) data += 1;
if (!GenerateModule(i_isolate, &zone, data, buffer, num_args,
interpreter_args, compiler_args)) {
- return 0;
+ return;
}
testing::SetupIsolateForWasmModule(i_isolate);
@@ -309,31 +312,24 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
CHECK_EQ(compiles, validates);
CHECK_IMPLIES(require_valid, validates);
- if (!compiles) return 0;
+ if (!compiles) return;
- int32_t result_interpreter;
- bool possible_nondeterminism = false;
- {
- MaybeHandle<WasmInstanceObject> interpreter_instance =
- i_isolate->wasm_engine()->SyncInstantiate(
- i_isolate, &interpreter_thrower, compiled_module.ToHandleChecked(),
- MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
+ MaybeHandle<WasmInstanceObject> interpreter_instance =
+ i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &interpreter_thrower, compiled_module.ToHandleChecked(),
+ MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
- // Ignore instantiation failure.
- if (interpreter_thrower.error()) {
- return 0;
- }
+ // Ignore instantiation failure.
+ if (interpreter_thrower.error()) return;
- result_interpreter = testing::InterpretWasmModule(
- i_isolate, interpreter_instance.ToHandleChecked(), &interpreter_thrower,
- 0, interpreter_args.get(), &possible_nondeterminism);
- }
+ testing::WasmInterpretationResult interpreter_result =
+ testing::InterpretWasmModule(i_isolate,
+ interpreter_instance.ToHandleChecked(), 0,
+ interpreter_args.get());
// Do not execute the generated code if the interpreter did not finished after
// a bounded number of steps.
- if (interpreter_thrower.error()) {
- return 0;
- }
+ if (interpreter_result.stopped()) return;
// The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
// This sign bit can make the difference between an infinite loop and
@@ -341,12 +337,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
// the generated code will not go into an infinite loop and cause a timeout in
// Clusterfuzz. Therefore we do not execute the generated code if the result
// may be non-deterministic.
- if (possible_nondeterminism) {
- return 0;
- }
-
- bool expect_exception =
- result_interpreter == static_cast<int32_t>(0xDEADBEEF);
+ if (interpreter_result.possible_nondeterminism()) return;
int32_t result_compiled;
{
@@ -362,17 +353,19 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
"main", num_args, compiler_args.get());
}
- if (expect_exception != i_isolate->has_pending_exception()) {
+ if (interpreter_result.trapped() != i_isolate->has_pending_exception()) {
const char* exception_text[] = {"no exception", "exception"};
- FATAL("interpreter: %s; compiled: %s", exception_text[expect_exception],
+ FATAL("interpreter: %s; compiled: %s",
+ exception_text[interpreter_result.trapped()],
exception_text[i_isolate->has_pending_exception()]);
}
- if (!expect_exception) CHECK_EQ(result_interpreter, result_compiled);
+ if (!interpreter_result.trapped()) {
+ CHECK_EQ(interpreter_result.result(), result_compiled);
+ }
// Cleanup any pending exception.
i_isolate->clear_pending_exception();
- return 0;
}
} // namespace fuzzer
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 17bc70e91c..34b6e58479 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -17,8 +17,6 @@ namespace internal {
namespace wasm {
namespace fuzzer {
-int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size);
-
// First instantiates and interprets the "main" function within module_object if
// possible. If the interpretation finishes within kMaxSteps steps,
// module_object is instantiated again and the compiled "main" function is
@@ -32,7 +30,9 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
class WasmExecutionFuzzer {
public:
virtual ~WasmExecutionFuzzer() = default;
- int FuzzWasmModule(Vector<const uint8_t> data, bool require_valid = false);
+ void FuzzWasmModule(Vector<const uint8_t> data, bool require_valid = false);
+
+ virtual size_t max_input_size() const { return 512; }
protected:
virtual bool GenerateModule(
diff --git a/deps/v8/test/fuzzer/wasm-globals-section.cc b/deps/v8/test/fuzzer/wasm-globals-section.cc
deleted file mode 100644
index 23a8dcf339..0000000000
--- a/deps/v8/test/fuzzer/wasm-globals-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kGlobalSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-imports-section.cc b/deps/v8/test/fuzzer/wasm-imports-section.cc
deleted file mode 100644
index a8f455eeb8..0000000000
--- a/deps/v8/test/fuzzer/wasm-imports-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kImportSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-memory-section.cc b/deps/v8/test/fuzzer/wasm-memory-section.cc
deleted file mode 100644
index b80545ae19..0000000000
--- a/deps/v8/test/fuzzer/wasm-memory-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kMemorySectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-names-section.cc b/deps/v8/test/fuzzer/wasm-names-section.cc
deleted file mode 100644
index 6dec25555c..0000000000
--- a/deps/v8/test/fuzzer/wasm-names-section.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // TODO(titzer): Names section requires a preceding function section.
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kNameSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm-types-section.cc b/deps/v8/test/fuzzer/wasm-types-section.cc
deleted file mode 100644
index e3c6b58da7..0000000000
--- a/deps/v8/test/fuzzer/wasm-types-section.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return v8::internal::wasm::fuzzer::FuzzWasmSection(
- v8::internal::wasm::kTypeSectionCode, data, size);
-}
diff --git a/deps/v8/test/fuzzer/wasm_globals_section/foo b/deps/v8/test/fuzzer/wasm_globals_section/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_globals_section/foo
+++ /dev/null
diff --git a/deps/v8/test/fuzzer/wasm_imports_section/foo b/deps/v8/test/fuzzer/wasm_imports_section/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_imports_section/foo
+++ /dev/null
diff --git a/deps/v8/test/fuzzer/wasm_memory_section/foo b/deps/v8/test/fuzzer/wasm_memory_section/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_memory_section/foo
+++ /dev/null
diff --git a/deps/v8/test/fuzzer/wasm_names_section/foo b/deps/v8/test/fuzzer/wasm_names_section/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_names_section/foo
+++ /dev/null
diff --git a/deps/v8/test/fuzzer/wasm_types_section/foo b/deps/v8/test/fuzzer/wasm_types_section/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_types_section/foo
+++ /dev/null
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index fc3d51506f..f83c7d044d 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -44,7 +44,6 @@ v8_executable("inspector-test") {
"sessions/",
"testcfg.py",
"type-profiler/",
- "../../src/inspector/injected-script-source.js",
]
cflags = []
diff --git a/deps/v8/test/inspector/PRESUBMIT.py b/deps/v8/test/inspector/PRESUBMIT.py
deleted file mode 100644
index 9d6c2365ea..0000000000
--- a/deps/v8/test/inspector/PRESUBMIT.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
-for more details about the presubmit API built into gcl.
-"""
-
-
-def PostUploadHook(cl, change, output_api):
- """git cl upload will call this hook after the issue is created/modified.
-
- This hook adds extra try bots to the CL description in order to run layout
- tests in addition to CQ try bots.
- """
- return output_api.EnsureCQIncludeTrybotsAreAdded(
- cl,
- [
- 'master.tryserver.blink:linux_trusty_blink_rel',
- 'luci.chromium.try:linux_chromium_headless_rel',
- ],
- 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block.js b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
index cba6d7d3d7..565fb5dbc6 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
+// Flags: --no-stress-flush-bytecode
var source =
`
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index dbad54b6d6..0ea0cceb93 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
+// Flags: --no-stress-flush-bytecode
var source =
`
diff --git a/deps/v8/test/inspector/debugger/async-chains-expected.txt b/deps/v8/test/inspector/debugger/async-chains-expected.txt
index e5a57fdb61..8fc641cff2 100644
--- a/deps/v8/test/inspector/debugger/async-chains-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-chains-expected.txt
@@ -42,7 +42,7 @@ userFunction (test.js:1:36)
-- inner async --
runWithRegular (utils.js:2:12)
inner (test.js:2:28)
-runWithRegular (utils.js:21:4)
+(anonymous) (utils.js:21:4)
<external stack>
EmptyName
@@ -88,7 +88,7 @@ userFunction (test.js:1:36)
-- <empty> --
runWithEmptyName (utils.js:6:12)
inner (test.js:2:28)
-runWithRegular (utils.js:21:4)
+(anonymous) (utils.js:21:4)
<external stack>
EmptyStack
@@ -147,6 +147,6 @@ userFunction (test.js:1:36)
External
userFunction (test.js:1:36)
-runWithRegular (utils.js:21:4)
+(anonymous) (utils.js:21:4)
<external stack>
diff --git a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
index 0f8ff54b28..880a982c7c 100644
--- a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
@@ -4,7 +4,7 @@ Running test: testBasic
Debugger (test.js:10:2)
Basic (test.js:48:4)
-- async function --
-Basic (test.js:47:19)
+Basic (test.js:47:17)
(anonymous) (testBasic.js:0:0)
@@ -26,7 +26,7 @@ Running test: testCaughtReject
Debugger (test.js:10:2)
CaughtReject (test.js:76:4)
-- async function --
-CaughtReject (test.js:72:21)
+CaughtReject (test.js:72:19)
(anonymous) (testCaughtReject.js:0:0)
@@ -34,7 +34,7 @@ Running test: testCaughtThrow
Debugger (test.js:10:2)
CaughtThrow (test.js:86:4)
-- async function --
-CaughtThrow (test.js:82:21)
+CaughtThrow (test.js:82:19)
(anonymous) (testCaughtThrow.js:0:0)
@@ -53,6 +53,6 @@ Running test: testCaughtThrowOnBreak
Debugger (test.js:10:2)
CaughtThrowOnBreak (test.js:124:4)
-- async function --
-CaughtThrowOnBreak (test.js:120:21)
+CaughtThrowOnBreak (test.js:120:19)
(anonymous) (testCaughtThrowOnBreak.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt b/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
index 5f4092dc2a..28fae0cbfe 100644
--- a/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
@@ -200,6 +200,11 @@ test (testStepIntoAtReturn.js:144:14)
test (testStepIntoAtReturn.js:143:14)
(anonymous) (:0:0)
+test (testStepIntoAtReturn.js:145:8)
+-- async function --
+test (testStepIntoAtReturn.js:143:14)
+(anonymous) (:0:0)
+
floodWithTimeouts (testStepIntoAtReturn.js:136:15)
-- setTimeout --
floodWithTimeouts (testStepIntoAtReturn.js:137:10)
@@ -207,3 +212,4 @@ floodWithTimeouts (testStepIntoAtReturn.js:137:10)
floodWithTimeouts (testStepIntoAtReturn.js:137:10)
test (testStepIntoAtReturn.js:142:8)
(anonymous) (:0:0)
+
diff --git a/deps/v8/test/inspector/debugger/async-function-step-out-optimized-expected.txt b/deps/v8/test/inspector/debugger/async-function-step-out-optimized-expected.txt
deleted file mode 100644
index b669237ac4..0000000000
--- a/deps/v8/test/inspector/debugger/async-function-step-out-optimized-expected.txt
+++ /dev/null
@@ -1,215 +0,0 @@
-stepOut async function
-
-Running test: testTrivial
-Check that we have proper async stack at return
-bar (testTrivial.js:30:8)
--- async function --
-bar (testTrivial.js:29:22)
-foo (testTrivial.js:25:14)
--- async function --
-foo (testTrivial.js:24:22)
-test (testTrivial.js:20:14)
--- async function --
-test (testTrivial.js:19:22)
-(anonymous) (:0:0)
-
-foo (testTrivial.js:26:6)
--- async function --
-foo (testTrivial.js:24:22)
-test (testTrivial.js:20:14)
--- async function --
-test (testTrivial.js:19:22)
-(anonymous) (:0:0)
-
-test (testTrivial.js:21:6)
--- async function --
-test (testTrivial.js:19:22)
-(anonymous) (:0:0)
-
-
-Running test: testStepOutPrecision
-Check that stepOut go to resumed outer generator
-bar (testStepOutPrecision.js:63:8)
--- async function --
-bar (testStepOutPrecision.js:62:22)
-foo (testStepOutPrecision.js:57:14)
--- async function --
-foo (testStepOutPrecision.js:56:22)
-test (testStepOutPrecision.js:50:14)
--- async function --
-test (testStepOutPrecision.js:49:14)
-(anonymous) (:0:0)
-
-foo (testStepOutPrecision.js:58:8)
--- async function --
-foo (testStepOutPrecision.js:56:22)
-test (testStepOutPrecision.js:50:14)
--- async function --
-test (testStepOutPrecision.js:49:14)
-(anonymous) (:0:0)
-
-test (testStepOutPrecision.js:51:8)
--- async function --
-test (testStepOutPrecision.js:49:14)
-(anonymous) (:0:0)
-
-floodWithTimeouts (testStepOutPrecision.js:42:15)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
-test (testStepOutPrecision.js:48:8)
-(anonymous) (:0:0)
-
-test (testStepOutPrecision.js:52:8)
--- async function --
-test (testStepOutPrecision.js:49:14)
-(anonymous) (:0:0)
-
-floodWithTimeouts (testStepOutPrecision.js:42:15)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
--- setTimeout --
-floodWithTimeouts (testStepOutPrecision.js:43:10)
-test (testStepOutPrecision.js:48:8)
-(anonymous) (:0:0)
-
-
-Running test: testStepIntoAtReturn
-Check that stepInto at return go to resumed outer generator
-bar (testStepIntoAtReturn.js:95:8)
--- async function --
-bar (testStepIntoAtReturn.js:94:22)
-foo (testStepIntoAtReturn.js:90:14)
--- async function --
-foo (testStepIntoAtReturn.js:89:22)
-test (testStepIntoAtReturn.js:84:14)
--- async function --
-test (testStepIntoAtReturn.js:83:14)
-(anonymous) (:0:0)
-
-bar (testStepIntoAtReturn.js:96:6)
--- async function --
-bar (testStepIntoAtReturn.js:94:22)
-foo (testStepIntoAtReturn.js:90:14)
--- async function --
-foo (testStepIntoAtReturn.js:89:22)
-test (testStepIntoAtReturn.js:84:14)
--- async function --
-test (testStepIntoAtReturn.js:83:14)
-(anonymous) (:0:0)
-
-foo (testStepIntoAtReturn.js:91:6)
--- async function --
-foo (testStepIntoAtReturn.js:89:22)
-test (testStepIntoAtReturn.js:84:14)
--- async function --
-test (testStepIntoAtReturn.js:83:14)
-(anonymous) (:0:0)
-
-test (testStepIntoAtReturn.js:85:8)
--- async function --
-test (testStepIntoAtReturn.js:83:14)
-(anonymous) (:0:0)
-
-test (testStepIntoAtReturn.js:86:6)
--- async function --
-test (testStepIntoAtReturn.js:83:14)
-(anonymous) (:0:0)
-
-floodWithTimeouts (testStepIntoAtReturn.js:76:15)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:77:10)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:77:10)
-test (testStepIntoAtReturn.js:82:8)
-(anonymous) (:0:0)
-
-
-Running test: testStepOverAtReturn
-Check that stepOver at return go to resumed outer generator
-bar (testStepIntoAtReturn.js:126:8)
--- async function --
-bar (testStepIntoAtReturn.js:125:22)
-foo (testStepIntoAtReturn.js:121:14)
--- async function --
-foo (testStepIntoAtReturn.js:120:22)
-test (testStepIntoAtReturn.js:115:14)
--- async function --
-test (testStepIntoAtReturn.js:114:14)
-(anonymous) (:0:0)
-
-bar (testStepIntoAtReturn.js:127:6)
--- async function --
-bar (testStepIntoAtReturn.js:125:22)
-foo (testStepIntoAtReturn.js:121:14)
--- async function --
-foo (testStepIntoAtReturn.js:120:22)
-test (testStepIntoAtReturn.js:115:14)
--- async function --
-test (testStepIntoAtReturn.js:114:14)
-(anonymous) (:0:0)
-
-foo (testStepIntoAtReturn.js:122:6)
--- async function --
-foo (testStepIntoAtReturn.js:120:22)
-test (testStepIntoAtReturn.js:115:14)
--- async function --
-test (testStepIntoAtReturn.js:114:14)
-(anonymous) (:0:0)
-
-test (testStepIntoAtReturn.js:116:8)
--- async function --
-test (testStepIntoAtReturn.js:114:14)
-(anonymous) (:0:0)
-
-test (testStepIntoAtReturn.js:117:6)
--- async function --
-test (testStepIntoAtReturn.js:114:14)
-(anonymous) (:0:0)
-
-floodWithTimeouts (testStepIntoAtReturn.js:107:15)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:108:10)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:108:10)
-test (testStepIntoAtReturn.js:113:8)
-(anonymous) (:0:0)
-
-
-Running test: testStepOutFromNotAwaitedCall
-Checks stepOut from not awaited call
-bar (testStepIntoAtReturn.js:160:8)
--- async function --
-bar (testStepIntoAtReturn.js:159:22)
-foo (testStepIntoAtReturn.js:154:8)
--- async function --
-foo (testStepIntoAtReturn.js:153:22)
-test (testStepIntoAtReturn.js:146:14)
--- async function --
-test (testStepIntoAtReturn.js:145:14)
-(anonymous) (:0:0)
-
-test (testStepIntoAtReturn.js:147:8)
--- async function --
-test (testStepIntoAtReturn.js:145:14)
-(anonymous) (:0:0)
-
-floodWithTimeouts (testStepIntoAtReturn.js:138:15)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:139:10)
--- setTimeout --
-floodWithTimeouts (testStepIntoAtReturn.js:139:10)
-test (testStepIntoAtReturn.js:144:8)
-(anonymous) (:0:0)
-
diff --git a/deps/v8/test/inspector/debugger/async-function-step-out-optimized.js b/deps/v8/test/inspector/debugger/async-function-step-out-optimized.js
deleted file mode 100644
index 80b66c7138..0000000000
--- a/deps/v8/test/inspector/debugger/async-function-step-out-optimized.js
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-await-optimization
-
-let {session, contextGroup, Protocol} =
- InspectorTest.start('stepOut async function');
-
-session.setupScriptMap();
-
-Protocol.Runtime.enable();
-
-InspectorTest.runAsyncTestSuite([
- async function testTrivial() {
- InspectorTest.log('Check that we have proper async stack at return');
- contextGroup.addInlineScript(`
- async function test() {
- await Promise.resolve();
- await foo();
- }
-
- async function foo() {
- await Promise.resolve();
- await bar();
- }
-
- async function bar() {
- await Promise.resolve();
- debugger;
- }`, 'testTrivial.js');
- await runTestAndStepAction('stepOut');
- },
-
- async function testStepOutPrecision() {
- InspectorTest.log('Check that stepOut go to resumed outer generator');
- contextGroup.addInlineScript(`
- function wait() {
- return new Promise(resolve => setTimeout(resolve, 0));
- }
- function floodWithTimeouts(a) {
- if (!a.stop)
- setTimeout(floodWithTimeouts.bind(this, a), 0);
- }
-
- async function test() {
- let a = {};
- floodWithTimeouts(a)
- await wait();
- await foo();
- await wait();
- a.stop = true;
- }
-
- async function foo() {
- await Promise.resolve();
- await bar();
- await wait();
- }
-
- async function bar() {
- await Promise.resolve();
- debugger;
- await wait();
- }`, 'testStepOutPrecision.js');
- await runTestAndStepAction('stepOut');
- },
-
- async function testStepIntoAtReturn() {
- InspectorTest.log('Check that stepInto at return go to resumed outer generator');
- contextGroup.addInlineScript(`
- function wait() {
- return new Promise(resolve => setTimeout(resolve, 0));
- }
- function floodWithTimeouts(a) {
- if (!a.stop)
- setTimeout(floodWithTimeouts.bind(this, a), 0);
- }
-
- async function test() {
- let a = {};
- floodWithTimeouts(a)
- await wait();
- await foo();
- a.stop = true;
- }
-
- async function foo() {
- await Promise.resolve();
- await bar();
- }
-
- async function bar() {
- await Promise.resolve();
- debugger;
- }`, 'testStepIntoAtReturn.js');
- await runTestAndStepAction('stepInto');
- },
-
- async function testStepOverAtReturn() {
- InspectorTest.log('Check that stepOver at return go to resumed outer generator');
- contextGroup.addInlineScript(`
- function wait() {
- return new Promise(resolve => setTimeout(resolve, 0));
- }
- function floodWithTimeouts(a) {
- if (!a.stop)
- setTimeout(floodWithTimeouts.bind(this, a), 0);
- }
-
- async function test() {
- let a = {};
- floodWithTimeouts(a)
- await wait();
- await foo();
- a.stop = true;
- }
-
- async function foo() {
- await Promise.resolve();
- await bar();
- }
-
- async function bar() {
- await Promise.resolve();
- debugger;
- }`, 'testStepIntoAtReturn.js');
- await runTestAndStepAction('stepOver');
- },
-
- async function testStepOutFromNotAwaitedCall() {
- InspectorTest.log('Checks stepOut from not awaited call');
- contextGroup.addInlineScript(`
- function wait() {
- return new Promise(resolve => setTimeout(resolve, 0));
- }
- function floodWithTimeouts(a) {
- if (!a.stop)
- setTimeout(floodWithTimeouts.bind(this, a), 0);
- }
-
- async function test() {
- let a = {};
- floodWithTimeouts(a)
- await wait();
- await foo();
- a.stop = true;
- }
-
- async function foo() {
- let a = {};
- floodWithTimeouts(a);
- await Promise.resolve();
- bar();
- a.stop = true;
- }
-
- async function bar() {
- await Promise.resolve();
- debugger;
- }`, 'testStepIntoAtReturn.js');
- await runTestAndStepAction('stepOut');
- }
-
-]);
-
-async function runTestAndStepAction(action) {
- Protocol.Debugger.enable();
- Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
- let finished =
- Protocol.Runtime.evaluate({expression: 'test()', awaitPromise: true})
- .then(() => false);
- while (true) {
- const r = await Promise.race([finished, waitPauseAndDumpStack()]);
- if (!r) break;
- Protocol.Debugger[action]();
- }
- await Protocol.Debugger.disable();
-}
-
-async function waitPauseAndDumpStack() {
- const {params} = await Protocol.Debugger.oncePaused();
- session.logCallFrames(params.callFrames);
- session.logAsyncStackTrace(params.asyncStackTrace);
- InspectorTest.log('');
- return true;
-}
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
index d555fb84de..21e7dc1632 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
@@ -80,11 +80,11 @@ thenableJob2 (test.js:64:57)
Running test: testSetTimeouts
foo1 (test.js:10:2)
-setTimeout (test.js:72:25)
+(anonymous) (test.js:72:25)
-- setTimeout --
-setTimeout (test.js:72:6)
+(anonymous) (test.js:72:6)
-- setTimeout --
-setTimeout (test.js:71:4)
+(anonymous) (test.js:71:4)
-- setTimeout --
setTimeouts (test.js:70:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
index 14a357659b..86860fdb39 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
@@ -101,11 +101,11 @@ foo1 (test.js:156:4)
complex (test.js:202:5)
(anonymous) (testComplex.js:0:0)
-p.then (test.js:207:8)
+(anonymous) (test.js:207:8)
-- Promise.then --
-p.then (test.js:206:8)
+(anonymous) (test.js:206:8)
-- Promise.then --
-setTimeout (test.js:205:6)
+(anonymous) (test.js:205:6)
-- setTimeout --
complex (test.js:204:2)
(anonymous) (testComplex.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
index 3213c6973c..80225b1618 100644
--- a/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-load-more-expected.txt
@@ -1,5 +1,5 @@
Tests super long async stacks.
-callWithAsyncStack (expr.js:0:26)
+(anonymous) (expr.js:0:26)
callWithAsyncStack (utils.js:3:4)
call1 (wrapper.js:0:20)
--Promise.then--
diff --git a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
index 15304c2073..17008f3550 100644
--- a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
@@ -5,7 +5,7 @@ Running test: testBreakLocations
function testFunction() {
async function f1() {
for (let x = |_|0; x |_|< 1; ++|_|x) |_|await x;
- |_|return |_|await Promise.|C|resolve(2);
+ |_|return |_|await Promise.|C|resolve(2);|R|
|R|}
async function f2() {
@@ -16,7 +16,7 @@ function testFunction() {
|_|await Promise.|C|resolve().|C|then(x => x |_|* 2|R|);
let p = |_|Promise.|C|resolve(42);
|_|await p;
- |_|return r;
+ |_|return r;|R|
|R|}
return |C|f2();|R|
@@ -79,10 +79,10 @@ f1 (test.js:4:4)
#return await Promise.resolve(2);
}
-f1 (test.js:5:2)
- return await Promise.resolve(2);
- #}
-
+f1 (test.js:4:36)
+ for (let x = 0; x < 1; ++x) await x;
+ return await Promise.resolve(2);#
+ }
f2 (test.js:8:31)
async function f2() {
@@ -122,10 +122,10 @@ f1 (test.js:4:4)
#return await Promise.resolve(2);
}
-f1 (test.js:5:2)
- return await Promise.resolve(2);
- #}
-
+f1 (test.js:4:36)
+ for (let x = 0; x < 1; ++x) await x;
+ return await Promise.resolve(2);#
+ }
f2 (test.js:9:4)
let r = await f1() + await f1();
@@ -165,10 +165,10 @@ f1 (test.js:4:4)
#return await Promise.resolve(2);
}
-f1 (test.js:5:2)
- return await Promise.resolve(2);
- #}
-
+f1 (test.js:4:36)
+ for (let x = 0; x < 1; ++x) await x;
+ return await Promise.resolve(2);#
+ }
f2 (test.js:10:4)
await f1();
@@ -208,17 +208,17 @@ f1 (test.js:4:4)
#return await Promise.resolve(2);
}
-f1 (test.js:5:2)
- return await Promise.resolve(2);
- #}
-
+f1 (test.js:4:36)
+ for (let x = 0; x < 1; ++x) await x;
+ return await Promise.resolve(2);#
+ }
-f1.then.x (test.js:10:27)
+(anonymous) (test.js:10:27)
await f1();
await f1().then(x => x #* 2);
await [1].map(x => Promise.resolve(x))[0];
-f1.then.x (test.js:10:30)
+(anonymous) (test.js:10:30)
await f1();
await f1().then(x => x * 2#);
await [1].map(x => Promise.resolve(x))[0];
@@ -228,13 +228,13 @@ f2 (test.js:11:4)
#await [1].map(x => Promise.resolve(x))[0];
await Promise.resolve().then(x => x * 2);
-map.x (test.js:11:31)
+(anonymous) (test.js:11:31)
f2 (test.js:11:14)
await f1().then(x => x * 2);
await [1].map(x => Promise.#resolve(x))[0];
await Promise.resolve().then(x => x * 2);
-map.x (test.js:11:41)
+(anonymous) (test.js:11:41)
f2 (test.js:11:14)
await f1().then(x => x * 2);
await [1].map(x => Promise.resolve(x)#)[0];
@@ -260,10 +260,10 @@ f2 (test.js:15:4)
#return r;
}
-f2 (test.js:16:2)
- return r;
- #}
-
+f2 (test.js:15:13)
+ await p;
+ return r;#
+ }
Running test: testStepOver
@@ -314,10 +314,10 @@ f2 (test.js:15:4)
#return r;
}
-f2 (test.js:16:2)
- return r;
- #}
-
+f2 (test.js:15:13)
+ await p;
+ return r;#
+ }
Running test: testStepIntoAfterBreakpoint
diff --git a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
index 2242088763..24bda366a9 100644
--- a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
@@ -15,12 +15,12 @@ function testFunction() {
var y = |_|(a = 100);
var z = |_|x + (a = 1) + (a = 2) + (a = 3) + |C|f();
function f() {
- for (let { x, y } = |_|{ x: 0, y: 1 }; y |_|> 0; --|_|y) { let z = |_|x + y; }
+ for (let { |_|x, |_|y } = |_|{ x: 0, y: 1 }; y |_|> 0; --|_|y) { let z = |_|x + y; }
|R|}
var b = obj1.|_|a;
|_|(async function asyncF() {
let r = |_|await Promise.|C|resolve(42);
- |_|return r;
+ |_|return r;|R|
|R|})|C|();
|_|return promise;|R|
}
@@ -89,6 +89,20 @@ testFunction (test.js:10:44)
for (let { x, y } = #{ x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
}
+f (test.js:12:15)
+testFunction (test.js:10:44)
+(anonymous) (expr.js:0:0)
+ function f() {
+ for (let { #x, y } = { x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
+ }
+
+f (test.js:12:18)
+testFunction (test.js:10:44)
+(anonymous) (expr.js:0:0)
+ function f() {
+ for (let { x, #y } = { x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
+ }
+
f (test.js:12:42)
testFunction (test.js:10:44)
(anonymous) (expr.js:0:0)
@@ -148,10 +162,40 @@ asyncF (test.js:17:4)
#return r;
})();
-asyncF (test.js:18:2)
- return r;
- #})();
- return promise;
+asyncF (test.js:17:13)
+ let r = await Promise.resolve(42);
+ return r;#
+ })();
+
+(anonymous) (test.js:4:64)
+ var arr1 = [1];
+ var promise = Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
+ Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+
+(anonymous) (test.js:4:67)
+ var arr1 = [1];
+ var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
+ Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+
+(anonymous) (test.js:5:50)
+ var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+ Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
+ promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+
+(anonymous) (test.js:5:53)
+ var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+ Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
+ promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+
+(anonymous) (test.js:6:60)
+ Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+ promise = Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
+ var a = 1;
+
+(anonymous) (test.js:6:63)
+ Promise.resolve(1).then(x => x * 2).then(x => x / 2);
+ promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
+ var a = 1;
Running test: testStepIntoAfterBreakpoint
diff --git a/deps/v8/test/inspector/debugger/break-locations-var-init-optimized-expected.txt b/deps/v8/test/inspector/debugger/break-locations-var-init-optimized-expected.txt
deleted file mode 100644
index 5ca86ea579..0000000000
--- a/deps/v8/test/inspector/debugger/break-locations-var-init-optimized-expected.txt
+++ /dev/null
@@ -1,200 +0,0 @@
-Tests breakable locations in variable initializations.
-
-Running test: testBreakLocations
-
-Running test: testStepInto
-
-function testFunction() {
- var obj1 = |_|{a : 1};
- var arr1 = |_|[1];
- var promise = |_|Promise.|C|resolve(1).|C|then(x => x |_|* 2|R|).|C|then(x => x |_|/ 2|R|);
- |_|Promise.|C|resolve(1).|C|then(x => x |_|* 2|R|).|C|then(x => x |_|/ 2|R|);
- |_|promise = Promise.|C|resolve(1).|C|then(x => x |_|* 2|R|).|C|then(x => x |_|/ 2|R|);
- var a = |_|1;
- const x = |_|(a = 20);
- var y = |_|(a = 100);
- var z = |_|x + (a = 1) + (a = 2) + (a = 3) + |C|f();
- function f() {
- for (let { x, y } = |_|{ x: 0, y: 1 }; y |_|> 0; --|_|y) { let z = |_|x + y; }
- |R|}
- var b = obj1.|_|a;
- |_|(async function asyncF() {
- let r = |_|await Promise.|C|resolve(42);
- |_|return r;
- |R|})|C|();
- |_|return promise;|R|
-}
-
-(anonymous) (expr.js:0:0)
-
-
-testFunction (test.js:2:13)
-(anonymous) (expr.js:0:0)
-function testFunction() {
- var obj1 = #{a : 1};
- var arr1 = [1];
-
-testFunction (test.js:3:13)
-(anonymous) (expr.js:0:0)
- var obj1 = {a : 1};
- var arr1 = #[1];
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-testFunction (test.js:4:16)
-(anonymous) (expr.js:0:0)
- var arr1 = [1];
- var promise = #Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-testFunction (test.js:5:2)
-(anonymous) (expr.js:0:0)
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- #Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-testFunction (test.js:6:2)
-(anonymous) (expr.js:0:0)
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- #promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- var a = 1;
-
-testFunction (test.js:7:10)
-(anonymous) (expr.js:0:0)
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- var a = #1;
- const x = (a = 20);
-
-testFunction (test.js:8:12)
-(anonymous) (expr.js:0:0)
- var a = 1;
- const x = #(a = 20);
- var y = (a = 100);
-
-testFunction (test.js:9:10)
-(anonymous) (expr.js:0:0)
- const x = (a = 20);
- var y = #(a = 100);
- var z = x + (a = 1) + (a = 2) + (a = 3) + f();
-
-testFunction (test.js:10:10)
-(anonymous) (expr.js:0:0)
- var y = (a = 100);
- var z = #x + (a = 1) + (a = 2) + (a = 3) + f();
- function f() {
-
-f (test.js:12:24)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = #{ x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
- }
-
-f (test.js:12:42)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = { x: 0, y: 1 }; y #> 0; --y) { let z = x + y; }
- }
-
-f (test.js:12:62)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = { x: 0, y: 1 }; y > 0; --y) { let z = #x + y; }
- }
-
-f (test.js:12:49)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = { x: 0, y: 1 }; y > 0; --#y) { let z = x + y; }
- }
-
-f (test.js:12:42)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = { x: 0, y: 1 }; y #> 0; --y) { let z = x + y; }
- }
-
-f (test.js:13:2)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- for (let { x, y } = { x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
- #}
- var b = obj1.a;
-
-testFunction (test.js:14:15)
-(anonymous) (expr.js:0:0)
- }
- var b = obj1.#a;
- (async function asyncF() {
-
-testFunction (test.js:15:2)
-(anonymous) (expr.js:0:0)
- var b = obj1.a;
- #(async function asyncF() {
- let r = await Promise.resolve(42);
-
-asyncF (test.js:16:12)
-testFunction (test.js:18:4)
-(anonymous) (expr.js:0:0)
- (async function asyncF() {
- let r = #await Promise.resolve(42);
- return r;
-
-asyncF (test.js:17:4)
- let r = await Promise.resolve(42);
- #return r;
- })();
-
-asyncF (test.js:18:2)
- return r;
- #})();
- return promise;
-
-Promise.resolve.then.then.x (test.js:4:64)
- var arr1 = [1];
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-Promise.resolve.then.then.x (test.js:4:67)
- var arr1 = [1];
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-Promise.resolve.then.then.x (test.js:5:50)
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-Promise.resolve.then.then.x (test.js:5:53)
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
-
-Promise.resolve.then.then.x (test.js:6:60)
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x #/ 2);
- var a = 1;
-
-Promise.resolve.then.then.x (test.js:6:63)
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2#);
- var a = 1;
-
-
-Running test: testStepIntoAfterBreakpoint
-testFunction (test.js:10:10)
-(anonymous) (expr.js:0:0)
- var y = (a = 100);
- var z = #x + (a = 1) + (a = 2) + (a = 3) + f();
- function f() {
-
-f (test.js:12:24)
-testFunction (test.js:10:44)
-(anonymous) (expr.js:0:0)
- function f() {
- for (let { x, y } = #{ x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
- }
-
diff --git a/deps/v8/test/inspector/debugger/break-locations-var-init-optimized.js b/deps/v8/test/inspector/debugger/break-locations-var-init-optimized.js
deleted file mode 100644
index f9850606c4..0000000000
--- a/deps/v8/test/inspector/debugger/break-locations-var-init-optimized.js
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-await-optimization
-
-let {session, contextGroup, Protocol} =
- InspectorTest.start('Tests breakable locations in variable initializations.');
-
-let source = `
-function testFunction() {
- var obj1 = {a : 1};
- var arr1 = [1];
- var promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- promise = Promise.resolve(1).then(x => x * 2).then(x => x / 2);
- var a = 1;
- const x = (a = 20);
- var y = (a = 100);
- var z = x + (a = 1) + (a = 2) + (a = 3) + f();
- function f() {
- for (let { x, y } = { x: 0, y: 1 }; y > 0; --y) { let z = x + y; }
- }
- var b = obj1.a;
- (async function asyncF() {
- let r = await Promise.resolve(42);
- return r;
- })();
- return promise;
-}
-//# sourceURL=test.js`;
-
-contextGroup.addScript(source);
-session.setupScriptMap();
-
-InspectorTest.runAsyncTestSuite([
- async function testBreakLocations() {
- Protocol.Debugger.enable();
- let {params:{scriptId}} = await Protocol.Debugger.onceScriptParsed();
- let {result:{locations}} = await Protocol.Debugger.getPossibleBreakpoints({
- start: {lineNumber: 0, columnNumber : 0, scriptId}});
- session.logBreakLocations(locations);
- },
-
- async function testStepInto() {
- Protocol.Debugger.pause();
- let fin = Protocol.Runtime.evaluate({
- expression: 'testFunction()//# sourceURL=expr.js', awaitPromise: true}).then(() => false);
- let result;
- while (result = await Promise.race([fin, Protocol.Debugger.oncePaused()])) {
- let {params:{callFrames}} = result;
- session.logCallFrames(callFrames);
- session.logSourceLocation(callFrames[0].location);
- Protocol.Debugger.stepInto();
- }
- Protocol.Runtime.evaluate({expression: '42'});
- await Protocol.Debugger.oncePaused();
- await Protocol.Debugger.resume();
- },
-
- async function testStepIntoAfterBreakpoint() {
- Protocol.Debugger.setBreakpointByUrl({lineNumber: 10, url: 'test.js'});
- Protocol.Runtime.evaluate({
- expression: 'testFunction()//# sourceURL=expr.js'});
- await awaitPausedAndDump();
- Protocol.Debugger.stepInto();
- await awaitPausedAndDump();
- await Protocol.Debugger.resume();
-
- async function awaitPausedAndDump() {
- let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
- session.logCallFrames(callFrames);
- session.logSourceLocation(callFrames[0].location);
- }
- }
-]);
diff --git a/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
new file mode 100644
index 0000000000..755be67cba
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
@@ -0,0 +1,977 @@
+Test class fields scopes
+
+Running test: testScopesPaused
+[
+ [0] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 14
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ functionName : foo
+ location : {
+ columnNumber : 4
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 3
+ lineNumber : 5
+ scriptId : <scriptId>
+ }
+ name : foo
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 14
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [1] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ functionName : run
+ location : {
+ columnNumber : 5
+ lineNumber : 13
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 3
+ lineNumber : 15
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 9
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ type : block
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [2] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ functionName :
+ location : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+]
+[
+ [0] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 4
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ functionName : <instance_members_initializer>
+ location : {
+ columnNumber : 8
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [1] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ functionName : X
+ location : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ name : X
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [2] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ functionName : run
+ location : {
+ columnNumber : 2
+ lineNumber : 18
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [3] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ functionName :
+ location : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+]
+[
+ [0] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 4
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ functionName : <instance_members_initializer>
+ location : {
+ columnNumber : 14
+ lineNumber : 13
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [1] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ functionName : X
+ location : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ name : X
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [2] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ functionName : run
+ location : {
+ columnNumber : 2
+ lineNumber : 18
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [3] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ functionName :
+ location : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+]
+[
+ [0] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 4
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ functionName : <instance_members_initializer>
+ location : {
+ columnNumber : 8
+ lineNumber : 14
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [1] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ functionName : X
+ location : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ name : X
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [2] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ functionName : run
+ location : {
+ columnNumber : 2
+ lineNumber : 18
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [3] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ functionName :
+ location : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+]
+[
+ [0] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 14
+ lineNumber : 7
+ scriptId : <scriptId>
+ }
+ functionName : bar
+ location : {
+ columnNumber : 4
+ lineNumber : 8
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 3
+ lineNumber : 9
+ scriptId : <scriptId>
+ }
+ name : bar
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 14
+ lineNumber : 7
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [1] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 4
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ functionName : <instance_members_initializer>
+ location : {
+ columnNumber : 8
+ lineNumber : 14
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [2] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ functionName : X
+ location : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ name : X
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 2
+ lineNumber : 11
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : closure
+ }
+ [2] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : X
+ description : X
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [3] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ functionName : run
+ location : {
+ columnNumber : 2
+ lineNumber : 18
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ endLocation : {
+ columnNumber : 1
+ lineNumber : 19
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 12
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+ [4] : {
+ callFrameId : <callFrameId>
+ functionLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ functionName :
+ location : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ scopeChain : [
+ [0] : {
+ object : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ type : global
+ }
+ ]
+ this : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ url :
+ }
+]
diff --git a/deps/v8/test/inspector/debugger/class-fields-scopes.js b/deps/v8/test/inspector/debugger/class-fields-scopes.js
new file mode 100644
index 0000000000..6849e5ebac
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-fields-scopes.js
@@ -0,0 +1,80 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-public-fields --harmony-static-fields
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Test class fields scopes"
+);
+
+contextGroup.addScript(`
+function run() {
+ function foo() {
+ debugger;
+ return "foo";
+ }
+
+ function bar() {
+ return 3;
+ }
+
+ class X {
+ x = 1;
+ [foo()] = 2;
+ p = bar();
+ }
+
+ debugger;
+ new X;
+}`);
+
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({ expression: "run()" });
+
+ let {
+ params: { callFrames: callFrames0 }
+ } = await Protocol.Debugger.oncePaused(); // inside foo()
+ InspectorTest.logMessage(callFrames0);
+
+ Protocol.Debugger.resume();
+
+ await Protocol.Debugger.oncePaused(); // at debugger;
+ Protocol.Debugger.stepOver(); // at debugger;
+
+ await Protocol.Debugger.oncePaused(); // at new X;
+ Protocol.Debugger.stepInto(); // step into initializer_function;
+
+ // at x = 1;
+ let {
+ params: { callFrames: callFrames1 }
+ } = await Protocol.Debugger.oncePaused();
+ InspectorTest.logMessage(callFrames1);
+ Protocol.Debugger.stepOver();
+
+ // at [foo()] = 2;
+ let {
+ params: { callFrames: callFrames2 }
+ } = await Protocol.Debugger.oncePaused();
+ InspectorTest.logMessage(callFrames2);
+ Protocol.Debugger.stepOver();
+
+ // at p = bar();
+ let {
+ params: { callFrames: callFrames3 }
+ } = await Protocol.Debugger.oncePaused();
+ InspectorTest.logMessage(callFrames3);
+ Protocol.Debugger.stepInto();
+
+ // inside bar();
+ let {
+ params: { callFrames: callFrames4 }
+ } = await Protocol.Debugger.oncePaused();
+ InspectorTest.logMessage(callFrames4);
+
+ Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
index 4c93498c68..71d6618c8e 100644
--- a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
@@ -2,12 +2,6 @@ Tests that variables introduced in eval scopes are accessible
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
index 74666e9e8e..c742413a6b 100644
--- a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
+++ b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
@@ -251,6 +251,12 @@ testFunction (test.js:25:11)
return { value: this.i++, done: false };#
}
+testFunction (test.js:25:11)
+(anonymous) (expr.js:0:0)
+ };
+ for (var #k of iterable) { all.push(k); }
+ iterable.i = 0;
+
testFunction (test.js:25:32)
(anonymous) (expr.js:0:0)
};
@@ -337,6 +343,12 @@ testFunction (test.js:27:11)
return { value: this.i++, done: false };#
}
+testFunction (test.js:27:11)
+(anonymous) (expr.js:0:0)
+ iterable.i = 0;
+ for (let #k of iterable) { all.push(k); }
+}
+
testFunction (test.js:27:32)
(anonymous) (expr.js:0:0)
iterable.i = 0;
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
index 87eba6ed85..b8dfd1ce3d 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
@@ -81,10 +81,10 @@ function foo2() { Promise.resolve().then(() => 42) }
paused in foo1
function foo1() { Promise.resolve().then(() => 42) ^}
function foo2() { Promise.resolve().then(() => 42) }
-paused in Promise.resolve.then
+paused in
function foo1() { Promise.resolve().then(() => ^42) }
function foo2() { Promise.resolve().then(() => 42) }
-paused in Promise.resolve.then
+paused in
function foo1() { Promise.resolve().then(() => 42^) }
function foo2() { Promise.resolve().then(() => 42) }
@@ -148,22 +148,22 @@ debugger; function foo3() { Promise.resolve().then(() => 42) }
function foo4() { Promise.resolve().then(() => 42) };
foo3();
foo4();^
-paused in Promise.resolve.then
+paused in
debugger; function foo3() { Promise.resolve().then(() => ^42) }
function foo4() { Promise.resolve().then(() => 42) };
foo3();
foo4();
-paused in Promise.resolve.then
+paused in
debugger; function foo3() { Promise.resolve().then(() => 42^) }
function foo4() { Promise.resolve().then(() => 42) };
foo3();
foo4();
-paused in Promise.resolve.then
+paused in
debugger; function foo3() { Promise.resolve().then(() => 42) }
function foo4() { Promise.resolve().then(() => ^42) };
foo3();
foo4();
-paused in Promise.resolve.then
+paused in
debugger; function foo3() { Promise.resolve().then(() => 42) }
function foo4() { Promise.resolve().then(() => 42^) };
foo3();
@@ -247,19 +247,19 @@ paused in foo6
function foo5() { Promise.resolve().then(() => 42) }
function foo6() { Promise.resolve().then(() => 42) ^}
-paused in Promise.resolve.then
+paused in
function foo5() { Promise.resolve().then(() => ^42) }
function foo6() { Promise.resolve().then(() => 42) }
-paused in Promise.resolve.then
+paused in
function foo5() { Promise.resolve().then(() => 42^) }
function foo6() { Promise.resolve().then(() => 42) }
-paused in Promise.resolve.then
+paused in
function foo5() { Promise.resolve().then(() => 42) }
function foo6() { Promise.resolve().then(() => ^42) }
-paused in Promise.resolve.then
+paused in
function foo5() { Promise.resolve().then(() => 42) }
function foo6() { Promise.resolve().then(() => 42^) }
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index e0ba91f4e2..27346bffea 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -235,7 +235,7 @@ async function testPromiseAsyncWithCode() {
|C|nextTest();
|R|}
|C|main();
- |_|return testPromise;
+ |_|return testPromise;|R|
|R|}
function returnFunction() {
@@ -248,7 +248,7 @@ async function testPromiseComplex() {
async function main() {
async function foo() {
|_|await Promise.|C|resolve();
- |_|return 42;
+ |_|return 42;|R|
|R|}
var x = |_|1;
var y = |_|2;
@@ -256,7 +256,7 @@ async function testPromiseComplex() {
|C|nextTest();
|R|}
|C|main();
- |_|return testPromise;
+ |_|return testPromise;|R|
|R|}
function twiceDefined() {
diff --git a/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
index 0d60dbb6a1..029353b404 100644
--- a/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
+++ b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
@@ -46,7 +46,7 @@ Running test: testConsoleTraceWithEmptySync
callFrames : [
[0] : {
columnNumber : 66
- functionName : Promise.then
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index 5ef8327499..9f265261f8 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -26,18 +26,7 @@ expression: Object(Symbol(42))
{
name : [[PrimitiveValue]]
type : symbol
- valuePreview : {
- description : Symbol
- overflow : false
- properties : [
- [0] : {
- name : description
- type : string
- value : 42
- }
- ]
- type : object
- }
+ value : Symbol(42)
}
expression: Object(BigInt(2))
@@ -177,39 +166,27 @@ expression: (new Map([[1,2]])).entries()
}
]
-expression: (new Set([1,2])).entries()
+expression: (new Set([[1,2]])).entries()
[[Entries]]:
[
[0] : {
- key : {
- description : 1
- overflow : false
- properties : [
- ]
- type : number
- }
- value : {
- description : 1
- overflow : false
- properties : [
- ]
- type : number
- }
- }
- [1] : {
- key : {
- description : 2
- overflow : false
- properties : [
- ]
- type : number
- }
value : {
- description : 2
+ description : Array(2)
overflow : false
properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 2
+ }
]
- type : number
+ subtype : array
+ type : object
}
}
]
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
index 5b1cc3b8a2..fc7dabac1a 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -46,7 +46,7 @@ InspectorTest.runTestSuite([
function iteratorObject(next)
{
checkExpression("(new Map([[1,2]])).entries()")
- .then(() => checkExpression("(new Set([1,2])).entries()"))
+ .then(() => checkExpression("(new Set([[1,2]])).entries()"))
.then(next);
},
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout-expected.txt
index dd7cd64d3c..bb3199f520 100644
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout-expected.txt
@@ -1,4 +1,4 @@
-Checks Debugger.scheduleStepIntoAsync with setTimeout.
+Checks Debugger.pauseOnAsynCall with setTimeout.
Running test: testSetTimeout
paused at:
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js
index f2171a5037..be26bc11de 100644
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js
+++ b/deps/v8/test/inspector/debugger/pause-on-async-call-set-timeout.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.scheduleStepIntoAsync with setTimeout.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.pauseOnAsynCall with setTimeout.');
session.setupScriptMap();
Protocol.Debugger.enable();
InspectorTest.runAsyncTestSuite([
@@ -11,8 +11,10 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Debugger.oncePaused();
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync();
- Protocol.Debugger.stepOver();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -23,8 +25,10 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Debugger.oncePaused();
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync();
- Protocol.Debugger.stepOver();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ await Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
@@ -38,8 +42,10 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Debugger.oncePaused();
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync();
- Protocol.Debugger.stepOver();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
await InspectorTest.waitForPendingTasks();
@@ -50,8 +56,10 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Debugger.oncePaused();
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync();
- Protocol.Debugger.stepOver();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
+ Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
},
@@ -60,7 +68,9 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.pause();
Protocol.Runtime.evaluate({expression: 'setTimeout(() => 42, 0)'});
await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync();
+ Protocol.Debugger.stepInto({breakOnAsyncCall: true});
+ let parentStackTraceId = await waitPauseAndDumpLocation();
+ Protocol.Debugger.pauseOnAsyncCall({parentStackTraceId});
Protocol.Debugger.resume();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
@@ -68,8 +78,11 @@ InspectorTest.runAsyncTestSuite([
]);
async function waitPauseAndDumpLocation() {
- var message = await Protocol.Debugger.oncePaused();
- InspectorTest.log('paused at:');
- await session.logSourceLocation(message.params.callFrames[0].location);
- return message;
+ var {params: {callFrames, asyncCallStackTraceId}} =
+ await Protocol.Debugger.oncePaused();
+ if (!asyncCallStackTraceId) {
+ InspectorTest.log('paused at:');
+ await session.logSourceLocation(callFrames[0].location);
+ }
+ return asyncCallStackTraceId;
}
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom.js b/deps/v8/test/inspector/debugger/pause-on-oom.js
index fe5d61b492..062c53b0d7 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom.js
+++ b/deps/v8/test/inspector/debugger/pause-on-oom.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --max-old-space-size=8
+// Flags: --max-old-space-size=16
let {session, contextGroup, Protocol} = InspectorTest.start('Check pause on OOM');
diff --git a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
index 549b1a3ddc..ee1689ba7c 100644
--- a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
+++ b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
@@ -23,7 +23,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -40,7 +40,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -58,7 +58,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -114,7 +114,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -131,7 +131,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -149,7 +149,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -205,7 +205,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -222,7 +222,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -240,7 +240,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -296,7 +296,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -313,7 +313,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -331,7 +331,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -387,7 +387,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -404,7 +404,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -422,7 +422,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -478,7 +478,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -495,7 +495,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
@@ -513,7 +513,7 @@ Run expression 'console.trace()' with async chain len: 3
}
[1] : {
columnNumber : 33
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 5
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async-expected.txt b/deps/v8/test/inspector/debugger/schedule-step-into-async-expected.txt
deleted file mode 100644
index dbdc5fb84f..0000000000
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async-expected.txt
+++ /dev/null
@@ -1,191 +0,0 @@
-Checks Debugger.scheduleStepIntoAsync.
-
-Running test: testScheduleErrors
-paused at:
-function testNoScheduledTask() {
- #debugger;
- return 42;
-
-{
- error : {
- code : -32000
- message : Current scheduled step into async was overriden with new one.
- }
- id : <messageId>
-}
-{
- error : {
- code : -32000
- message : No async tasks were scheduled before pause.
- }
- id : <messageId>
-}
-paused at:
- debugger;
- #return 42;
-}
-
-
-Running test: testSimple
-paused at:
-function testSimple() {
- #debugger;
- Promise.resolve().then(v => v * 2);
-
-paused at:
- debugger;
- #Promise.resolve().then(v => v * 2);
-}
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- Promise.resolve().then(v => v #* 2);
-}
-
-
-Running test: testNotResolvedPromise
-paused at:
- var p = new Promise(resolve => resolveCallback = resolve);
- #debugger;
- p.then(v => v * 2);
-
-paused at:
- debugger;
- p.#then(v => v * 2);
- resolveCallback();
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- p.then(v => v #* 2);
- resolveCallback();
-
-
-Running test: testTwoAsyncTasks
-paused at:
-function testTwoAsyncTasks() {
- #debugger;
- Promise.resolve().then(v => v * 2);
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- Promise.resolve().then(v => v #* 2);
- Promise.resolve().then(v => v * 4);
-
-
-Running test: testTwoTasksAndGoToSecond
-paused at:
-function testTwoAsyncTasks() {
- #debugger;
- Promise.resolve().then(v => v * 2);
-
-paused at:
- debugger;
- #Promise.resolve().then(v => v * 2);
- Promise.resolve().then(v => v * 4);
-
-paused at:
- Promise.resolve().then(v => v * 2);
- #Promise.resolve().then(v => v * 4);
-}
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- Promise.resolve().then(v => v * 2);
- Promise.resolve().then(v => v #* 4);
-}
-
-
-Running test: testTwoAsyncTasksWithBreak
-paused at:
-function testTwoAsyncTasksWithBreak() {
- #debugger;
- Promise.resolve().then(v => v * 2);
-
-paused at:
- debugger;
- #Promise.resolve().then(v => v * 2);
- debugger;
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- Promise.resolve().then(v => v * 2);
- #debugger;
- Promise.resolve().then(v => v * 4);
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- Promise.resolve().then(v => v #* 4);
-}
-
-
-Running test: testPromiseAll
-paused at:
-function testPromiseAll() {
- #debugger;
- Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
-
-paused at:
- debugger;
- #Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
-}
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v #* 2);
-}
-
-
-Running test: testWithBlackboxedCode
-paused at:
-function testBlackboxedCreatePromise() {
- #debugger;
- createPromise().then(v => v * 2);
-
-paused at:
- debugger;
- #createPromise().then(v => v * 2);
-}
-
-{
- id : <messageId>
- result : {
- }
-}
-paused at:
- debugger;
- createPromise().then(v => v #* 2);
-}
-
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async.js b/deps/v8/test/inspector/debugger/schedule-step-into-async.js
deleted file mode 100644
index 132fb8c7c1..0000000000
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async.js
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.scheduleStepIntoAsync.');
-
-contextGroup.addScript(`
-function testNoScheduledTask() {
- debugger;
- return 42;
-}
-
-function testSimple() {
- debugger;
- Promise.resolve().then(v => v * 2);
-}
-
-function testNotResolvedPromise() {
- var resolveCallback;
- var p = new Promise(resolve => resolveCallback = resolve);
- debugger;
- p.then(v => v * 2);
- resolveCallback();
-}
-
-function testTwoAsyncTasks() {
- debugger;
- Promise.resolve().then(v => v * 2);
- Promise.resolve().then(v => v * 4);
-}
-
-function testTwoAsyncTasksWithBreak() {
- debugger;
- Promise.resolve().then(v => v * 2);
- debugger;
- Promise.resolve().then(v => v * 4);
-}
-
-function testPromiseAll() {
- debugger;
- Promise.all([ Promise.resolve(), Promise.resolve() ]).then(v => v * 2);
-}
-
-function testBlackboxedCreatePromise() {
- debugger;
- createPromise().then(v => v * 2);
-}
-//# sourceURL=test.js`);
-
-contextGroup.addScript(`
-
-function createPromise() {
- return Promise.resolve().then(v => v * 3).then(v => v * 4);
-}
-
-//# sourceURL=framework.js`)
-
-session.setupScriptMap();
-
-Protocol.Debugger.enable();
-Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
-InspectorTest.runAsyncTestSuite([
- async function testScheduleErrors() {
- Protocol.Runtime.evaluate({ expression: 'testNoScheduledTask()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.stepInto();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testSimple() {
- Protocol.Runtime.evaluate({ expression: 'testSimple()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.stepInto();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testNotResolvedPromise() {
- Protocol.Runtime.evaluate({ expression: 'testNotResolvedPromise()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.stepInto();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testTwoAsyncTasks() {
- Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasks()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.resume();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testTwoTasksAndGoToSecond() {
- Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasks()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.resume();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testTwoAsyncTasksWithBreak() {
- Protocol.Runtime.evaluate({ expression: 'testTwoAsyncTasksWithBreak()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.resume();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.resume();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testPromiseAll() {
- Protocol.Runtime.evaluate({ expression: 'testPromiseAll()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- },
-
- async function testWithBlackboxedCode() {
- Protocol.Runtime.evaluate({ expression: 'testBlackboxedCreatePromise()' });
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js'] });
- Protocol.Debugger.scheduleStepIntoAsync().then(InspectorTest.logMessage);
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- }
-]);
-
-async function waitPauseAndDumpLocation() {
- var message = await Protocol.Debugger.oncePaused();
- InspectorTest.log('paused at:');
- session.logSourceLocation(message.params.callFrames[0].location);
- return message;
-}
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
index 6fbe355eff..626f9787c3 100644
--- a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
@@ -2,12 +2,6 @@ Tests that scopes do not report variables with empty names
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt b/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt
new file mode 100644
index 0000000000..40c7c7d856
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt
@@ -0,0 +1,33 @@
+Debugger.scriptParsed.stackTrace should contain only one frame
+{
+ method : Debugger.scriptParsed
+ params : {
+ endColumn : 0
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : 3fb75160ab1f4e4e82675bc4cd924d3481abe278
+ isLiveEdit : false
+ isModule : false
+ length : 0
+ scriptId : <scriptId>
+ sourceMapURL :
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 17
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parentId : {
+ id : <id>
+ }
+ }
+ startColumn : 0
+ startLine : 0
+ url :
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/script-origin-stack.js b/deps/v8/test/inspector/debugger/script-origin-stack.js
new file mode 100644
index 0000000000..96ef1129d8
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-origin-stack.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(async function test(){
+ const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Debugger.scriptParsed.stackTrace should contain only one frame');
+ const { Debugger, Runtime } = Protocol;
+ await Debugger.enable();
+ Debugger.setAsyncCallStackDepth({ maxDepth: 32 });
+ Runtime.evaluate({ expression: `setTimeout(() => eval(''), 0)` });
+ await Debugger.onceScriptParsed();
+ InspectorTest.logMessage(await Debugger.onceScriptParsed());
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/set-script-source-2-expected.txt b/deps/v8/test/inspector/debugger/set-script-source-2-expected.txt
index 8b052ce813..26d450d020 100644
--- a/deps/v8/test/inspector/debugger/set-script-source-2-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-script-source-2-expected.txt
@@ -26,7 +26,7 @@ return x + b;
---
Break location after LiveEdit:
var x = a;
-var x = #3;
+#var x = 3;
debugger;
stackChanged: true
@@ -62,12 +62,12 @@ var x = 1;
---
Break location after LiveEdit:
function boo() {
-var x = #3;
+#var x = 3;
debugger;
stackChanged: true
Protocol.Debugger.stepInto
-var x = 3;
-#debugger;
-var x = 1;
+function boo() {
+var x = #3;
+debugger;
diff --git a/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
index 6b5234f13f..5f61c89c1a 100644
--- a/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-break-on-async-call-expected.txt
@@ -3,11 +3,11 @@ Test for Debugger.stepInto with breakOnAsyncCall.
Running test: testSetTimeout
(anonymous) (test.js:0:0)
asyncCallStackTraceId is set
-setTimeout (test.js:0:17)
+(anonymous) (test.js:0:17)
asyncCallStackTraceId is empty
Running test: testPromiseThen
(anonymous) (test.js:0:2)
asyncCallStackTraceId is set
-p.then (test.js:0:13)
+(anonymous) (test.js:0:13)
asyncCallStackTraceId is empty
diff --git a/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt b/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
index 7b585762b1..83efd83046 100644
--- a/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-out-async-await-expected.txt
@@ -9,9 +9,9 @@ Running test: testStepInto
#return p;
}
- return p;
- #}
- await foo();
+ debugger;
+ return p;#
+ }
await p;
p.then(() => #1);
@@ -27,9 +27,9 @@ Running test: testStepOver
#return p;
}
- return p;
- #}
- await foo();
+ debugger;
+ return p;#
+ }
await p;
p.then(() => #1);
@@ -53,9 +53,9 @@ Running test: testStepOut
#return p;
}
- return p;
- #}
- await foo();
+ debugger;
+ return p;#
+ }
await p;
p.then(() => #1);
diff --git a/deps/v8/test/inspector/debugger/step-snapshot-expected.txt b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
index f75a184252..0d8d039538 100644
--- a/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
@@ -11,6 +11,12 @@ paused
}
paused
+function c(f#, ...args) { return f(...args); }
+
+paused
+function c(f, ...args#) { return f(...args); }
+
+paused
function c(f, ...args) { #return f(...args); }
paused
diff --git a/deps/v8/test/inspector/debugger/step-snapshot.js b/deps/v8/test/inspector/debugger/step-snapshot.js
index a4ecbf2f28..f8c5dc05b0 100644
--- a/deps/v8/test/inspector/debugger/step-snapshot.js
+++ b/deps/v8/test/inspector/debugger/step-snapshot.js
@@ -5,6 +5,7 @@
// Embed a user function in the snapshot and step through it.
// Flags: --embed 'function c(f, ...args) { return f(...args); }'
+// Flags: --no-turbo-rewrite-far-jumps
let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping works on snapshotted function');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt
deleted file mode 100644
index 5a63493dc7..0000000000
--- a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Tests that stepping ignores injected script
-InjectedSciptSource was not reached
diff --git a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js
deleted file mode 100644
index 9021664a96..0000000000
--- a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping ignores injected script');
-
-Protocol.Debugger.onPaused(message => {
- let url = session._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
- if (url !== 'test.js') {
- InspectorTest.log('InjectedSciptSource on stack.');
- InspectorTest.completeTest();
- }
- Protocol.Debugger.stepInto();
-});
-
-session.setupScriptMap();
-Protocol.Debugger.enable();
-Protocol.Debugger.pause();
-Protocol.Runtime.evaluate({expression: 'console.log(42)//# sourceURL=test.js'})
- .then(() => InspectorTest.log('InjectedSciptSource was not reached'))
- .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt
deleted file mode 100644
index 65c32c3ec9..0000000000
--- a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Tests that stepping does not ignore injected script when passed a flag
-InjectedSciptSource on stack.
diff --git a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js
deleted file mode 100644
index d608137c81..0000000000
--- a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// Flags: --expose-inspector-scripts
-
-let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping does not ignore injected script when passed a flag');
-
-Protocol.Debugger.onPaused(message => {
- let url = session._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
- if (url !== 'test.js') {
- InspectorTest.log('InjectedSciptSource on stack.');
- InspectorTest.completeTest();
- }
- Protocol.Debugger.stepInto();
-});
-
-session.setupScriptMap();
-Protocol.Debugger.enable();
-Protocol.Debugger.pause();
-Protocol.Runtime.evaluate({expression: 'console.log(42)//# sourceURL=test.js'})
- .then(() => InspectorTest.log('InjectedSciptSource was not reached'))
- .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/terminate-execution-on-pause-expected.txt b/deps/v8/test/inspector/debugger/terminate-execution-on-pause-expected.txt
index d1a3c7b626..ee7a0db65e 100644
--- a/deps/v8/test/inspector/debugger/terminate-execution-on-pause-expected.txt
+++ b/deps/v8/test/inspector/debugger/terminate-execution-on-pause-expected.txt
@@ -3,3 +3,58 @@ Tests Runtime.terminateExecution on pause
Running test: testTerminateOnDebugger
Running test: testTerminateAtBreakpoint
+{
+ error : {
+ code : -32000
+ message : Execution was terminated
+ }
+ id : <messageId>
+}
+
+Running test: testTerminateRuntimeEvaluate
+{
+ id : <messageId>
+ result : {
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Execution was terminated
+ }
+ id : <messageId>
+}
+{
+ description : 42
+ type : number
+ value : 42
+}
+
+Running test: testTerminateRuntimeEvaluateOnCallFrame
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 1
+ type : number
+ value : 1
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Execution was terminated
+ }
+ id : <messageId>
+}
+{
+ description : 43
+ type : number
+ value : 43
+}
diff --git a/deps/v8/test/inspector/debugger/terminate-execution-on-pause.js b/deps/v8/test/inspector/debugger/terminate-execution-on-pause.js
index 3f1ea39f02..ffba181400 100644
--- a/deps/v8/test/inspector/debugger/terminate-execution-on-pause.js
+++ b/deps/v8/test/inspector/debugger/terminate-execution-on-pause.js
@@ -22,22 +22,63 @@ callback();
//# sourceURL=test.js`});
await Protocol.Debugger.oncePaused();
const terminated = Protocol.Runtime.terminateExecution();
- Protocol.Debugger.resume();
+ await Protocol.Debugger.resume();
await terminated;
},
async function testTerminateAtBreakpoint() {
Protocol.Debugger.setBreakpointByUrl({url: 'test.js', lineNumber: 2});
- Protocol.Runtime.evaluate({expression: `
+ const result = Protocol.Runtime.evaluate({expression: `
function callback() {
console.log(42);
setTimeout(callback, 0);
}
callback();
-//# sourceURL=test.js`});
+//# sourceURL=test.js`}).then(InspectorTest.logMessage);
await Protocol.Debugger.oncePaused();
const terminated = Protocol.Runtime.terminateExecution();
- Protocol.Debugger.resume();
+ await Protocol.Debugger.resume();
await terminated;
- }
+ await result;
+ },
+
+ async function testTerminateRuntimeEvaluate() {
+ Protocol.Runtime.evaluate({expression: `
+function callback() {
+ debugger;
+ console.log(42);
+ debugger;
+}
+callback();
+//# sourceURL=test.js`});
+ await Protocol.Debugger.oncePaused();
+ await Promise.all([
+ Protocol.Runtime.terminateExecution().then(InspectorTest.logMessage),
+ Protocol.Runtime.evaluate({expression: 'console.log(42)'}).then(InspectorTest.logMessage)
+ ]);
+ await Protocol.Debugger.resume();
+ await Protocol.Debugger.oncePaused();
+ await Protocol.Debugger.resume();
+ },
+
+ async function testTerminateRuntimeEvaluateOnCallFrame() {
+ Protocol.Runtime.evaluate({expression: `
+function callback() {
+ let a = 1;
+ debugger;
+ console.log(43);
+}
+callback();
+//# sourceURL=test.js`});
+ let message = await Protocol.Debugger.oncePaused();
+ let topFrameId = message.params.callFrames[0].callFrameId;
+ await Protocol.Debugger.evaluateOnCallFrame({callFrameId: topFrameId, expression: "a"})
+ .then(InspectorTest.logMessage)
+ await Promise.all([
+ Protocol.Runtime.terminateExecution().then(InspectorTest.logMessage),
+ Protocol.Debugger.evaluateOnCallFrame({callFrameId: topFrameId, expression: "a"})
+ .then(InspectorTest.logMessage)
+ ]);
+ await Protocol.Debugger.resume();
+ },
]);
diff --git a/deps/v8/test/inspector/debugger/wasm-reset-context-group-expected.txt b/deps/v8/test/inspector/debugger/wasm-reset-context-group-expected.txt
new file mode 100644
index 0000000000..a3c2dab8c3
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-reset-context-group-expected.txt
@@ -0,0 +1,18 @@
+Checks resetting context group with wasm.
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 3
+ type : number
+ value : 3
+ }
+ }
+}
+{
+ error : {
+ code : -32000
+ message : Cannot find context with specified id
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/debugger/wasm-reset-context-group.js b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
new file mode 100644
index 0000000000..be347c0736
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-reset-context-group.js
@@ -0,0 +1,63 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.log('Checks resetting context group with wasm.');
+
+utils.load('test/mjsunit/wasm/wasm-constants.js');
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+// wasm_B calls wasm_A <param0> times.
+builder.addFunction('wasm_func', kSig_i_i)
+ .addBody([
+ // clang-format off
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ // clang-format on
+ ])
+ .exportAs('main');
+
+
+var module_bytes = builder.toArray();
+
+function instantiate(bytes) {
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ var module = new WebAssembly.Module(buffer);
+ // Set global variable.
+ instance = new WebAssembly.Instance(module);
+}
+
+var contextGroup1 = new InspectorTest.ContextGroup();
+var session1 = contextGroup1.connect();
+session1.setupScriptMap();
+
+let contextGroup2 = new InspectorTest.ContextGroup();
+let session2 = contextGroup2.connect();
+session2.setupScriptMap();
+
+(async function test() {
+ await session1.Protocol.Debugger.enable();
+ await session2.Protocol.Debugger.enable();
+
+ session1.Protocol.Runtime.evaluate({
+ expression: `var instance;(${instantiate.toString()})(${JSON.stringify(module_bytes)})`});
+
+ session2.Protocol.Runtime.evaluate({
+ expression: `var instance;(${instantiate.toString()})(${JSON.stringify(module_bytes)})`});
+
+ contextGroup2.reset();
+
+ await session1.Protocol.Debugger.onceScriptParsed(2);
+ InspectorTest.logMessage(await session1.Protocol.Runtime.evaluate({expression: 'instance.exports.main(4)'}));
+ InspectorTest.logMessage(await session2.Protocol.Runtime.evaluate({expression: 'instance.exports.main(5)'}));
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 7701468937..f9e900d0d1 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -10,157 +10,243 @@ Setting breakpoint on line 2 (first instruction)
scriptId : <scriptId>
}
Paused:
-(local i32 f64)
+(local i32 i64 f64)
#i32.const 11
set_local 0
+Scope:
at func (2:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 4 (number), "local#1": 0 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
i32.const 11
#set_local 0
i32.const 47
+Scope:
at func (3:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 4 (number), "local#1": 0 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 11 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
set_local 0
#i32.const 47
set_local 1
+Scope:
at func (4:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 0 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
i32.const 47
#set_local 1
- i32.const 1
+ i64.const 9223372036854775807
+Scope:
at func (5:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 0 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 47 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
set_local 1
+ #i64.const 9223372036854775807
+ set_local 2
+
+Scope:
+at func (6:2):
+ - scope (global):
+ -- skipped globals
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ stack:
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+ i64.const 9223372036854775807
+ #set_local 2
+ i64.const -9223372036854775808
+
+Scope:
+at func (7:2):
+ - scope (global):
+ -- skipped globals
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ stack: "0": 9223372036854775807 (string)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+ set_local 2
+ #i64.const -9223372036854775808
+ set_local 2
+
+Scope:
+at func (8:2):
+ - scope (global):
+ -- skipped globals
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
+ stack:
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+ i64.const -9223372036854775808
+ #set_local 2
+ i32.const 1
+
+Scope:
+at func (9:2):
+ - scope (global):
+ -- skipped globals
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
+ stack: "0": -9223372036854775808 (string)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+ set_local 2
#i32.const 1
f64.convert_u/i32
-at func (6:2):
+Scope:
+at func (10:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack:
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
i32.const 1
#f64.convert_u/i32
i32.const 7
-at func (7:2):
+Scope:
+at func (11:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
f64.convert_u/i32
#i32.const 7
f64.convert_u/i32
-at func (8:2):
+Scope:
+at func (12:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
i32.const 7
#f64.convert_u/i32
f64.div
-at func (9:2):
+Scope:
+at func (13:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
f64.convert_u/i32
#f64.div
- set_local 2
+ set_local 3
-at func (10:2):
+Scope:
+at func (14:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
f64.div
- #set_local 2
+ #set_local 3
end
-at func (11:2):
+Scope:
+at func (15:2):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 0.14285714285714285 (number)
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
Paused:
- set_local 2
+ set_local 3
#end
-at func (12:0):
+Scope:
+at func (16:0):
- scope (global):
- -- skipped
+ -- skipped globals
- scope (local):
- locals: "i32Arg": 11 (number), "local#1": 47 (number), "unicode☼f64": 0.14285714285714285 (number)
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
stack:
at (anonymous) (0:17):
- scope (global):
- -- skipped
+ -- skipped globals
+
exports.main returned. Test finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index 9a20d6a733..290bd58412 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -37,15 +37,22 @@ async function instantiateWasm() {
builder.addFunction('func', kSig_v_i)
.addLocals(
- {i32_count: 1, f64_count: 1}, ['i32Arg', undefined, 'unicode☼f64'])
+ {i32_count: 1, i64_count: 1, f64_count: 1},
+ ['i32Arg', undefined, 'i64_local', 'unicode☼f64'])
.addBody([
// Set param 0 to 11.
kExprI32Const, 11, kExprSetLocal, 0,
// Set local 1 to 47.
kExprI32Const, 47, kExprSetLocal, 1,
- // Set local 2 to 1/7.
+ // Set local 2 to 0x7FFFFFFFFFFFFFFF (max i64).
+ kExprI64Const, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ kExprSetLocal, 2,
+ // Set local 2 to 0x8000000000000000 (min i64).
+ kExprI64Const, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f,
+ kExprSetLocal, 2,
+ // Set local 3 to 1/7.
kExprI32Const, 1, kExprF64UConvertI32, kExprI32Const, 7,
- kExprF64UConvertI32, kExprF64Div, kExprSetLocal, 2
+ kExprF64UConvertI32, kExprF64Div, kExprSetLocal, 3
])
.exportAs('main');
@@ -98,26 +105,30 @@ async function waitForWasmScript() {
}
}
-async function getValueString(value) {
- if (value.type == 'object') {
- var msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
- printFailure(msg);
- let printProperty = elem => '"' + elem.name + '"' +
- ': ' + elem.value.description + ' (' + elem.value.type + ')';
- return msg.result.result.map(printProperty).join(', ');
+async function getScopeValues(value) {
+ if (value.type != 'object') {
+ InspectorTest.log('Expected object. Found:');
+ InspectorTest.logObject(value);
+ return;
}
- return JSON.stringify(value.value) + ' (' + value.type + ')';
+
+ let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ printFailure(msg);
+ let printProperty = elem => '"' + elem.name + '"' +
+ ': ' + elem.value.value + ' (' + elem.value.type + ')';
+ return msg.result.result.map(printProperty).join(', ');
}
-async function dumpProperties(message) {
+async function dumpScopeProperties(message) {
printFailure(message);
for (var value of message.result.result) {
- var value_str = await getValueString(value.value);
+ var value_str = await getScopeValues(value.value);
InspectorTest.log(' ' + value.name + ': ' + value_str);
}
}
async function dumpScopeChainsOnPause(message) {
+ InspectorTest.log(`Scope:`);
for (var frame of message.params.callFrames) {
var functionName = frame.functionName || '(anonymous)';
var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
@@ -126,12 +137,13 @@ async function dumpScopeChainsOnPause(message) {
for (var scope of frame.scopeChain) {
InspectorTest.logObject(' - scope (' + scope.type + '):');
if (scope.type == 'global') {
- InspectorTest.logObject(' -- skipped');
- } else {
- var properties = await Protocol.Runtime.getProperties(
- {'objectId': scope.object.objectId});
- await dumpProperties(properties);
+ InspectorTest.logObject(' -- skipped globals');
+ continue;
}
+ var properties = await Protocol.Runtime.getProperties(
+ {'objectId': scope.object.objectId});
+ await dumpScopeProperties(properties);
}
}
+ InspectorTest.log();
}
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index 4aa2f97213..09a9395eaa 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -1,62 +1,58 @@
-Tests stepping through wasm scripts
-Installing code an global variable.
-Calling instantiate function.
-Waiting for two wasm scripts to be parsed.
-Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0
-Requesting source for wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0...
-Got wasm script: wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Requesting source for wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1...
-func $wasm_A
- nop
- nop
-end
+Tests stepping through wasm scripts.
+Instantiating.
+Waiting for two wasm scripts (ignoring first non-wasm script).
+Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:
+1: func $wasm_A
+2: nop
+3: nop
+4: end
-func $wasm_B (param i32)
- loop
- get_local 0
- if
- get_local 0
- i32.const 1
- i32.sub
- set_local 0
- call 0
- br 1
- end
- end
-end
+Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:
+1: func $wasm_B (param i32)
+2: loop
+3: get_local 0
+4: if
+5: get_local 0
+6: i32.const 1
+7: i32.sub
+8: set_local 0
+9: call 0
+10: br 1
+11: end
+12: end
+13: end
-Setting breakpoint on line 8 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 7 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 6 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 5 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 3 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-Setting breakpoint on line 4 (on the setlocal before the call), url wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
-3
-4
-5
-6
-7
-8
-3
-4
-5
-6
-7
-8
-3
-4
-5
-6
-7
-8
-3
-4
-5
-6
-7
-8
-3
+Setting breakpoint on line 8 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 7 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 6 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 5 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 3 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Setting breakpoint on line 4 on script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1
+Calling main(4)
+Breaking on line 3
+Breaking on line 4
+Breaking on line 5
+Breaking on line 6
+Breaking on line 7
+Breaking on line 8
+Breaking on line 3
+Breaking on line 4
+Breaking on line 5
+Breaking on line 6
+Breaking on line 7
+Breaking on line 8
+Breaking on line 3
+Breaking on line 4
+Breaking on line 5
+Breaking on line 6
+Breaking on line 7
+Breaking on line 8
+Breaking on line 3
+Breaking on line 4
+Breaking on line 5
+Breaking on line 6
+Breaking on line 7
+Breaking on line 8
+Breaking on line 3
exports.main returned!
Finished!
-
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index 828fac3c54..76a831392f 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-let {session, contextGroup, Protocol} = InspectorTest.start('Tests stepping through wasm scripts');
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests stepping through wasm scripts.');
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
-var builder = new WasmModuleBuilder();
+const builder = new WasmModuleBuilder();
-var func_a_idx =
+const func_a_idx =
builder.addFunction('wasm_A', kSig_v_v).addBody([kExprNop, kExprNop]).index;
// wasm_B calls wasm_A <param0> times.
@@ -31,128 +32,66 @@ builder.addFunction('wasm_B', kSig_v_i)
])
.exportAs('main');
-var module_bytes = builder.toArray();
+const module_bytes = builder.toArray();
function instantiate(bytes) {
- var buffer = new ArrayBuffer(bytes.length);
- var view = new Uint8Array(buffer);
- for (var i = 0; i < bytes.length; ++i) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
view[i] = bytes[i] | 0;
}
- var module = new WebAssembly.Module(buffer);
- // Set global variable.
- instance = new WebAssembly.Instance(module);
+ let module = new WebAssembly.Module(buffer);
+ return new WebAssembly.Instance(module);
}
-var evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
- {'expression': code + '\n//# sourceURL=v8://test/' + url});
+const getResult = msg => msg.result || InspectorTest.logMessage(msg);
-var wasm_B_scriptId;
-var sources = {};
-var urls = {};
-var afterTwoSourcesCallback;
+const evalWithUrl = (code, url) =>
+ Protocol.Runtime
+ .evaluate({'expression': code + '\n//# sourceURL=v8://test/' + url})
+ .then(getResult);
-Protocol.Debugger.onPaused((msg) => {
- var loc = msg.params.callFrames[0].location;
- InspectorTest.log(loc.lineNumber);
+function setBreakpoint(line, script) {
+ InspectorTest.log(
+ 'Setting breakpoint on line ' + line + ' on script ' + script.url);
+ return Protocol.Debugger
+ .setBreakpoint(
+ {'location': {'scriptId': script.scriptId, 'lineNumber': line}})
+ .then(getResult);
+}
+
+Protocol.Debugger.onPaused(pause_msg => {
+ let loc = pause_msg.params.callFrames[0].location;
+ InspectorTest.log('Breaking on line ' + loc.lineNumber);
Protocol.Debugger.resume();
});
-Protocol.Debugger.enable()
- .then(() => InspectorTest.log('Installing code an global variable.'))
- .then(
- () => evalWithUrl('var instance;\n' + instantiate.toString(), 'setup'))
- .then(() => InspectorTest.log('Calling instantiate function.'))
- .then(
- () =>
- (evalWithUrl(
- 'instantiate(' + JSON.stringify(module_bytes) + ')',
- 'callInstantiate'),
- 0))
- .then(waitForTwoWasmScripts)
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 8 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 8}}))
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 7 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 7}}))
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 6 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 6}}))
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 5 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 5}}))
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 3 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 3}}))
- .then(
- () => InspectorTest.log(
- 'Setting breakpoint on line 4 (on the setlocal before the call), url ' +
- urls[wasm_B_scriptId]))
- .then(
- () => Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': wasm_B_scriptId, 'lineNumber': 4}}))
- .then(printFailure)
- .then(() => evalWithUrl('instance.exports.main(4)', 'runWasm'))
- .then(() => InspectorTest.log('exports.main returned!'))
- .then(() => InspectorTest.log('Finished!'))
- .then(InspectorTest.completeTest);
-
-function printFailure(message) {
- if (!message.result) {
- InspectorTest.logMessage(message);
- }
- return message;
-}
-function waitForTwoWasmScripts() {
- var num = 0;
- InspectorTest.log('Waiting for two wasm scripts to be parsed.');
- var promise = new Promise(fulfill => gotBothSources = fulfill);
- function waitForMore() {
- if (num == 2) return promise;
- Protocol.Debugger.onceScriptParsed()
- .then(handleNewScript)
- .then(waitForMore);
+(async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating.');
+ // Spawn asynchronously:
+ let instantiate_code = 'const instance = (' + instantiate + ')(' +
+ JSON.stringify(module_bytes) + ');';
+ evalWithUrl(instantiate_code, 'instantiate');
+ InspectorTest.log(
+ 'Waiting for two wasm scripts (ignoring first non-wasm script).');
+ const [, {params: wasm_script_a}, {params: wasm_script_b}] =
+ await Protocol.Debugger.onceScriptParsed(3);
+ for (script of [wasm_script_a, wasm_script_b]) {
+ InspectorTest.log('Source of script ' + script.url + ':');
+ let src_msg =
+ await Protocol.Debugger.getScriptSource({scriptId: script.scriptId});
+ let lines = getResult(src_msg).scriptSource.replace(/\s+$/, '').split('\n');
+ InspectorTest.log(
+ lines.map((line, nr) => (nr + 1) + ': ' + line).join('\n') + '\n');
}
- function handleNewScript(msg) {
- var url = msg.params.url;
- if (!url.startsWith('wasm://')) {
- InspectorTest.log('Ignoring script with url ' + url);
- return;
- }
- num += 1;
- var scriptId = msg.params.scriptId;
- urls[scriptId] = url;
- InspectorTest.log('Got wasm script: ' + url);
- if (url.substr(-2) == '-1') wasm_B_scriptId = scriptId;
- InspectorTest.log('Requesting source for ' + url + '...');
- Protocol.Debugger.getScriptSource({scriptId: scriptId})
- .then(printFailure)
- .then(msg => sources[scriptId] = msg.result.scriptSource)
- .then(InspectorTest.log)
- .then(() => Object.keys(sources).length == 2 ? gotBothSources() : 0);
+ for (line of [8, 7, 6, 5, 3, 4]) {
+ await setBreakpoint(line, wasm_script_b);
}
- waitForMore();
- return promise;
-}
+ InspectorTest.log('Calling main(4)');
+ await evalWithUrl('instance.exports.main(4)', 'runWasm');
+ InspectorTest.log('exports.main returned!');
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt
index 2b14f901b6..5186b01928 100644
--- a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt
+++ b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler-expected.txt
@@ -4,4 +4,5 @@ Allocated size is zero in the beginning: true
Allocated size is more than 100KB after a chunk is allocated: true
Allocated size increased after one more chunk is allocated: true
Allocated size did not change after stopping: true
+Sample found: true
Successfully finished
diff --git a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js
index 1b82a46fa2..42717631c4 100644
--- a/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js
+++ b/deps/v8/test/inspector/heap-profiler/sampling-heap-profiler.js
@@ -38,6 +38,11 @@
const size4 = nodeSize(profile4.result.profile.head);
InspectorTest.log('Allocated size did not change after stopping:', size4 === size3);
+ const sample = profile4.result.profile.samples.find(s => s.size > 400000);
+ const hasSample = hasNode(n => n.id === sample.nodeId && n.callFrame.functionName === 'allocateChunk',
+ profile4.result.profile.head);
+ InspectorTest.log('Sample found: ' + hasSample);
+
InspectorTest.log('Successfully finished');
InspectorTest.completeTest();
@@ -45,4 +50,8 @@
return node.children.reduce((res, child) => res + nodeSize(child),
node.callFrame.functionName === 'allocateChunk' ? node.selfSize : 0);
}
+
+ function hasNode(predicate, node) {
+ return predicate(node) || node.children.some(hasNode.bind(null, predicate));
+ }
})();
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index d69ee6f4e2..4f5a31d290 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -21,6 +21,15 @@
#include "test/inspector/isolate-data.h"
#include "test/inspector/task-runner.h"
+namespace v8 {
+namespace internal {
+
+extern void DisableEmbeddedBlobRefcounting();
+extern void FreeCurrentEmbeddedBlob();
+
+} // namespace internal
+} // namespace v8
+
namespace {
std::vector<TaskRunner*> task_runners;
@@ -312,6 +321,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
v8::FunctionTemplate::New(isolate,
&UtilsExtension::CreateContextGroup));
utils->Set(
+ ToV8String(isolate, "resetContextGroup"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::ResetContextGroup));
+ utils->Set(
ToV8String(isolate, "connectSession"),
v8::FunctionTemplate::New(isolate, &UtilsExtension::ConnectSession));
utils->Set(
@@ -526,6 +538,18 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
v8::Int32::New(args.GetIsolate(), context_group_id));
}
+ static void ResetContextGroup(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ fprintf(stderr, "Internal error: resetContextGroup(context_group_id).");
+ Exit();
+ }
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
+ data->ResetContextGroup(context_group_id);
+ });
+ }
+
static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
!args[2]->IsFunction()) {
@@ -1059,6 +1083,7 @@ int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::Initialize();
+ i::DisableEmbeddedBlobRefcounting();
v8::base::Semaphore ready_semaphore(0);
@@ -1072,49 +1097,56 @@ int main(int argc, char* argv[]) {
}
}
- IsolateData::SetupGlobalTasks frontend_extensions;
- frontend_extensions.emplace_back(new UtilsExtension());
- TaskRunner frontend_runner(std::move(frontend_extensions), true,
- &ready_semaphore, nullptr, false);
- ready_semaphore.Wait();
+ {
+ IsolateData::SetupGlobalTasks frontend_extensions;
+ frontend_extensions.emplace_back(new UtilsExtension());
+ TaskRunner frontend_runner(std::move(frontend_extensions), true,
+ &ready_semaphore, nullptr, false);
+ ready_semaphore.Wait();
+
+ int frontend_context_group_id = 0;
+ RunSyncTask(&frontend_runner,
+ [&frontend_context_group_id](IsolateData* data) {
+ frontend_context_group_id = data->CreateContextGroup();
+ });
- int frontend_context_group_id = 0;
- RunSyncTask(&frontend_runner,
- [&frontend_context_group_id](IsolateData* data) {
- frontend_context_group_id = data->CreateContextGroup();
- });
-
- IsolateData::SetupGlobalTasks backend_extensions;
- backend_extensions.emplace_back(new SetTimeoutExtension());
- backend_extensions.emplace_back(new InspectorExtension());
- TaskRunner backend_runner(std::move(backend_extensions), false,
- &ready_semaphore,
- startup_data.data ? &startup_data : nullptr, true);
- ready_semaphore.Wait();
- UtilsExtension::set_backend_task_runner(&backend_runner);
+ IsolateData::SetupGlobalTasks backend_extensions;
+ backend_extensions.emplace_back(new SetTimeoutExtension());
+ backend_extensions.emplace_back(new InspectorExtension());
+ TaskRunner backend_runner(
+ std::move(backend_extensions), false, &ready_semaphore,
+ startup_data.data ? &startup_data : nullptr, true);
+ ready_semaphore.Wait();
+ UtilsExtension::set_backend_task_runner(&backend_runner);
+
+ task_runners.push_back(&frontend_runner);
+ task_runners.push_back(&backend_runner);
+
+ for (int i = 1; i < argc; ++i) {
+ // Ignore unknown flags.
+ if (argv[i] == nullptr || argv[i][0] == '-') continue;
+
+ bool exists = false;
+ std::string chars = v8::internal::ReadFile(argv[i], &exists, true);
+ if (!exists) {
+ fprintf(stderr, "Internal error: script file doesn't exists: %s\n",
+ argv[i]);
+ Exit();
+ }
+ frontend_runner.Append(
+ new ExecuteStringTask(chars, frontend_context_group_id));
+ }
- task_runners.push_back(&frontend_runner);
- task_runners.push_back(&backend_runner);
+ frontend_runner.Join();
+ backend_runner.Join();
- for (int i = 1; i < argc; ++i) {
- // Ignore unknown flags.
- if (argv[i] == nullptr || argv[i][0] == '-') continue;
+ UtilsExtension::ClearAllSessions();
+ delete startup_data.data;
- bool exists = false;
- std::string chars = v8::internal::ReadFile(argv[i], &exists, true);
- if (!exists) {
- fprintf(stderr, "Internal error: script file doesn't exists: %s\n",
- argv[i]);
- Exit();
- }
- frontend_runner.Append(
- new ExecuteStringTask(chars, frontend_context_group_id));
+ // TaskRunners go out of scope here, which causes Isolate teardown and all
+ // running background tasks to be properly joined.
}
- frontend_runner.Join();
- backend_runner.Join();
-
- delete startup_data.data;
- UtilsExtension::ClearAllSessions();
+ i::FreeCurrentEmbeddedBlob();
return 0;
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 56a92c0dfc..b4f04ce7b0 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -9,12 +9,17 @@
'debugger/wasm-imports': [SKIP],
# https://crbug.com/v8/7932
'runtime/command-line-api-without-side-effects': [SKIP],
+
+ # Require optimization, so can't be run on Lite mode.
+ 'cpu-profiler/coverage-block': [PASS, ['lite_mode == True', SKIP]],
+ 'cpu-profiler/coverage': [PASS, ['lite_mode == True', SKIP]],
+
+ # Bad OOM timing on noembed builds (https://crbug.com/v8/8494).
+ 'debugger/pause-on-oom': [PASS, ['embedded_builtins == False', SKIP]],
}], # ALWAYS
##############################################################################
['system == android', {
- # https://crbug.com/v8/8160
- 'debugger/stepping-with-exposed-injected-script': [FAIL],
# https://crbug.com/v8/8197
'debugger/get-possible-breakpoints-class-fields': [SKIP],
}], # 'system == android'
@@ -30,6 +35,28 @@
}], # variant != default
##############################################################################
+['lite_mode', {
+ # Lite mode does not allocate feedback vector.
+ 'type-profiler/type-profile-start-stop': [SKIP],
+ 'type-profiler/type-profile': [SKIP],
+ 'type-profiler/type-profile-with-to-string-tag': [SKIP],
+ 'type-profiler/type-profile-with-classes': [SKIP],
+ 'type-profiler/type-profile-disable': [SKIP],
+
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'debugger/asm-js-stack': [SKIP],
+ 'debugger/asm-js-breakpoint-before-exec': [SKIP],
+ 'debugger/asm-js-breakpoint-during-exec': [SKIP],
+ 'debugger/wasm-*': [SKIP],
+}], # 'lite_mode'
+
+##############################################################################
+['(arch == arm or arch == arm64) and simulator_run', {
+ # Slow tests: https://crbug.com/v8/7783
+ 'runtime/console-messages-limits': [PASS, NO_VARIANTS, ['mode == debug', SKIP]],
+}], # (arch == arm or arch == arm64) and simulator_run
+
+##############################################################################
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
@@ -39,4 +66,11 @@
# Stack manipulations in LiveEdit is not implemented for this arch.
'debugger/set-script-source-stack-padding': [SKIP],
}], # 'arch == s390 or arch == s390x'
+
+##############################################################################
+['no_snap == True', {
+ # https://crbug.com/v8/8521
+ 'debugger/pause-on-oom': [SKIP],
+}],
+
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 15690370cc..2a5f8e1c84 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -110,6 +110,10 @@ v8::Local<v8::Context> IsolateData::GetContext(int context_group_id) {
return contexts_[context_group_id].Get(isolate_.get());
}
+void IsolateData::ResetContextGroup(int context_group_id) {
+ inspector_->resetContextGroup(context_group_id);
+}
+
int IsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
return static_cast<int>(
reinterpret_cast<intptr_t>(
@@ -242,7 +246,7 @@ void IsolateData::DumpAsyncTaskStacksStateForTest() {
int IsolateData::HandleMessage(v8::Local<v8::Message> message,
v8::Local<v8::Value> exception) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::Local<v8::Context> context = isolate->GetEnteredContext();
+ v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return 0;
v8_inspector::V8Inspector* inspector =
IsolateData::FromContext(context)->inspector_.get();
@@ -285,7 +289,7 @@ void IsolateData::MessageHandler(v8::Local<v8::Message> message,
// static
void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::Local<v8::Context> context = isolate->GetEnteredContext();
+ v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return;
v8::Local<v8::Promise> promise = data.GetPromise();
v8::Local<v8::Private> id_private = v8::Private::ForApi(
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index d0a263e573..6d68a85776 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -35,6 +35,7 @@ class IsolateData : public v8_inspector::V8InspectorClient {
// Setting things up.
int CreateContextGroup();
+ void ResetContextGroup(int context_group_id);
v8::Local<v8::Context> GetContext(int context_group_id);
int GetContextGroupId(v8::Local<v8::Context> context);
void RegisterModule(v8::Local<v8::Context> context,
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index d395808b91..ba7c26295c 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -33,32 +33,32 @@ InspectorTest.startDumpingProtocolMessages = function() {
}
InspectorTest.logMessage = function(originalMessage) {
- var message = JSON.parse(JSON.stringify(originalMessage));
- if (message.id)
- message.id = "<messageId>";
-
const nonStableFields = new Set([
'objectId', 'scriptId', 'exceptionId', 'timestamp', 'executionContextId',
'callFrameId', 'breakpointId', 'bindRemoteObjectFunctionId',
- 'formatterObjectId', 'debuggerId'
+ 'formatterObjectId', 'debuggerId', 'bodyGetterId'
]);
- var objects = [ message ];
- while (objects.length) {
- var object = objects.shift();
- if (object && object.name === '[[StableObjectId]]')
- object.value = '<StablectObjectId>';
- for (var key in object) {
- if (nonStableFields.has(key))
- object[key] = `<${key}>`;
- else if (typeof object[key] === "string" && object[key].match(/\d+:\d+:\d+:\d+/))
- object[key] = object[key].substring(0, object[key].lastIndexOf(':')) + ":<scriptId>";
- else if (typeof object[key] === "object")
- objects.push(object[key]);
- }
- }
+ const message = JSON.parse(JSON.stringify(originalMessage, replacer.bind(null, Symbol(), nonStableFields)));
+ if (message.id)
+ message.id = '<messageId>';
InspectorTest.logObject(message);
return originalMessage;
+
+ function replacer(stableIdSymbol, nonStableFields, name, val) {
+ if (nonStableFields.has(name))
+ return `<${name}>`;
+ if (name === 'internalProperties') {
+ const stableId = val.find(prop => prop.name === '[[StableObjectId]]');
+ if (stableId)
+ stableId.value[stableIdSymbol] = true;
+ }
+ if (name === 'parentId')
+ return { id: '<id>' };
+ if (val && val[stableIdSymbol])
+ return '<StablectObjectId>';
+ return val;
+ }
}
InspectorTest.logObject = function(object, title) {
@@ -140,16 +140,28 @@ InspectorTest.ContextGroup = class {
return new InspectorTest.Session(this);
}
+ reset() {
+ utils.resetContextGroup(this.id);
+ }
+
setupInjectedScriptEnvironment(session) {
let scriptSource = '';
- // First define all getters on Object.prototype.
- let injectedScriptSource = utils.read('src/inspector/injected-script-source.js');
- let getterRegex = /\.[a-zA-Z0-9]+/g;
- let match;
- let getters = new Set();
- while (match = getterRegex.exec(injectedScriptSource)) {
- getters.add(match[0].substr(1));
- }
+ let getters = ["length","internalConstructorName","subtype","getProperty",
+ "objectHasOwnProperty","nullifyPrototype","primitiveTypes",
+ "closureTypes","prototype","all","RemoteObject","bind",
+ "PropertyDescriptor","object","get","set","value","configurable",
+ "enumerable","symbol","getPrototypeOf","nativeAccessorDescriptor",
+ "isBuiltin","hasGetter","hasSetter","getOwnPropertyDescriptor",
+ "description","formatAccessorsAsProperties","isOwn","name",
+ "typedArrayProperties","keys","getOwnPropertyNames",
+ "getOwnPropertySymbols","isPrimitiveValue","com","toLowerCase",
+ "ELEMENT","trim","replace","DOCUMENT","size","byteLength","toString",
+ "stack","substr","message","indexOf","key","type","unserializableValue",
+ "objectId","className","preview","proxyTargetValue","customPreview",
+ "CustomPreview","resolve","then","console","error","header","hasBody",
+ "stringify","ObjectPreview","ObjectPreviewType","properties",
+ "ObjectPreviewSubtype","getInternalProperties","wasThrown","indexes",
+ "overflow","valuePreview","entries"];
scriptSource += `(function installSettersAndGetters() {
let defineProperty = Object.defineProperty;
let ObjectPrototype = Object.prototype;
@@ -158,7 +170,7 @@ InspectorTest.ContextGroup = class {
set() { debugger; throw 42; }, get() { debugger; throw 42; },
__proto__: null
});`,
- scriptSource += Array.from(getters).map(getter => `
+ scriptSource += getters.map(getter => `
defineProperty(ObjectPrototype, '${getter}', {
set() { debugger; throw 42; }, get() { debugger; throw 42; },
__proto__: null
@@ -168,8 +180,6 @@ InspectorTest.ContextGroup = class {
if (session) {
InspectorTest.log('WARNING: setupInjectedScriptEnvironment with debug flag for debugging only and should not be landed.');
- InspectorTest.log('WARNING: run test with --expose-inspector-scripts flag to get more details.');
- InspectorTest.log('WARNING: you can additionally comment rjsmin in xxd.py to get unminified injected-script-source.js.');
session.setupScriptMap();
session.Protocol.Debugger.enable();
session.Protocol.Debugger.onPaused(message => {
@@ -401,6 +411,9 @@ InspectorTest.runTestSuite = function(testSuite) {
}
InspectorTest.runAsyncTestSuite = async function(testSuite) {
+ const selected = testSuite.filter(test => test.name.startsWith('f_'));
+ if (selected.length)
+ testSuite = selected;
for (var test of testSuite) {
InspectorTest.log("\nRunning test: " + test.name);
try {
diff --git a/deps/v8/test/inspector/runtime/console-messages-limits-expected.txt b/deps/v8/test/inspector/runtime/console-messages-limits-expected.txt
index 3d1cd9f526..73433e3f54 100644
--- a/deps/v8/test/inspector/runtime/console-messages-limits-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-messages-limits-expected.txt
@@ -4,4 +4,4 @@ Running test: testMaxConsoleMessagesCount
Messages reported: 1000
Running test: testMaxConsoleMessagesV8Size
-Messages reported: 3
+Messages reported: 5
diff --git a/deps/v8/test/inspector/runtime/console-methods-expected.txt b/deps/v8/test/inspector/runtime/console-methods-expected.txt
index 45373f9dc7..0bffa5fc7a 100644
--- a/deps/v8/test/inspector/runtime/console-methods-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-methods-expected.txt
@@ -239,6 +239,7 @@ Checks console methods
name : 0
subtype : array
type : object
+ value : Array(2)
valuePreview : {
description : Array(2)
overflow : false
@@ -262,6 +263,7 @@ Checks console methods
name : 1
subtype : array
type : object
+ value : Array(2)
valuePreview : {
description : Array(2)
overflow : false
@@ -328,6 +330,7 @@ Checks console methods
name : 0
subtype : array
type : object
+ value : Array(2)
valuePreview : {
description : Array(2)
overflow : false
@@ -346,6 +349,7 @@ Checks console methods
name : 1
subtype : array
type : object
+ value : Array(2)
valuePreview : {
description : Array(2)
overflow : false
@@ -949,3 +953,166 @@ Checks console methods
type : warning
}
}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : foo
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 41
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 2
+ type : number
+ value : 2
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 42
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 2n
+ type : bigint
+ unserializableValue : 2n
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 43
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : Symbol(foo)
+ objectId : <objectId>
+ type : symbol
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 44
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : function() {}
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 10
+ functionName : testFunction
+ lineNumber : 45
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : table
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/console-methods.js b/deps/v8/test/inspector/runtime/console-methods.js
index b7b0462e15..3b410ad528 100644
--- a/deps/v8/test/inspector/runtime/console-methods.js
+++ b/deps/v8/test/inspector/runtime/console-methods.js
@@ -15,7 +15,7 @@ function testFunction() {
console.dir('dir');
console.dirxml('dirxml');
console.table([[1,2],[3,4]]);
- console.table([[1,2],[3,4]], [1,2]);
+ console.table([[1,2],[3,4]], ['1','2']);
console.trace('trace');
console.trace();
console.group();
@@ -39,6 +39,11 @@ function testFunction() {
console.countReset('default');
console.count();
console.countReset('countReset');
+ console.table('foo');
+ console.table(2);
+ console.table(2n);
+ console.table(Symbol('foo'));
+ console.table(function() {});
}
//# sourceURL=test.js`, 7, 26);
diff --git a/deps/v8/test/inspector/runtime/console-table-expected.txt b/deps/v8/test/inspector/runtime/console-table-expected.txt
new file mode 100644
index 0000000000..e00708b587
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-table-expected.txt
@@ -0,0 +1,385 @@
+console.table
+{
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : string
+ value : apples
+ }
+ [1] : {
+ name : 1
+ type : string
+ value : oranges
+ }
+ [2] : {
+ name : 2
+ type : string
+ value : bananas
+ }
+ ]
+ subtype : array
+ type : object
+}
+{
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : John
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Smith
+ }
+ ]
+ type : object
+}
+{
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ subtype : array
+ type : object
+ value : Array(2)
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : string
+ value : John
+ }
+ [1] : {
+ name : 1
+ type : string
+ value : Smith
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+ [1] : {
+ name : 1
+ subtype : array
+ type : object
+ value : Array(2)
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : string
+ value : Jane
+ }
+ [1] : {
+ name : 1
+ type : string
+ value : Doe
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+ [2] : {
+ name : 2
+ subtype : array
+ type : object
+ value : Array(2)
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : string
+ value : Emily
+ }
+ [1] : {
+ name : 1
+ type : string
+ value : Jones
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+ ]
+ subtype : array
+ type : object
+}
+{
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : John
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Smith
+ }
+ ]
+ type : object
+ }
+ }
+ [1] : {
+ name : 1
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Jane
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Doe
+ }
+ ]
+ type : object
+ }
+ }
+ [2] : {
+ name : 2
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Emily
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Jones
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ subtype : array
+ type : object
+}
+{
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : mother
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Jane
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Smith
+ }
+ ]
+ type : object
+ }
+ }
+ [1] : {
+ name : father
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : John
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Smith
+ }
+ ]
+ type : object
+ }
+ }
+ [2] : {
+ name : daughter
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Emily
+ }
+ [1] : {
+ name : lastName
+ type : string
+ value : Smith
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ type : object
+}
+{
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : John
+ }
+ ]
+ type : object
+ }
+ }
+ [1] : {
+ name : 1
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Jane
+ }
+ ]
+ type : object
+ }
+ }
+ [2] : {
+ name : 2
+ type : object
+ value : Person
+ valuePreview : {
+ description : Person
+ overflow : false
+ properties : [
+ [0] : {
+ name : firstName
+ type : string
+ value : Emily
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ subtype : array
+ type : object
+}
+last value property:
+{
+ name : 1
+ type : string
+ value : b
+}
+last property:
+{
+ name : 998
+ subtype : array
+ type : object
+ value : Array(2)
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : <ValuePreviewPropertiesArray(2)>
+ subtype : array
+ type : object
+ }
+}
+preview:
+{
+ description : Array(999)
+ overflow : false
+ properties : <PropertiesArray(999)>
+ subtype : array
+ type : object
+}
+
+last value property:
+{
+ name : 1
+ type : string
+ value : b
+}
+last property:
+{
+ name : 999
+ subtype : array
+ type : object
+ value : Array(2)
+ valuePreview : {
+ description : Array(2)
+ overflow : false
+ properties : <ValuePreviewPropertiesArray(2)>
+ subtype : array
+ type : object
+ }
+}
+preview:
+{
+ description : Array(1001)
+ overflow : true
+ properties : <PropertiesArray(1000)>
+ subtype : array
+ type : object
+}
+
diff --git a/deps/v8/test/inspector/runtime/console-table.js b/deps/v8/test/inspector/runtime/console-table.js
new file mode 100644
index 0000000000..70e3548c14
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-table.js
@@ -0,0 +1,102 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { session, contextGroup, Protocol } =
+ InspectorTest.start('console.table');
+
+(async function test() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.evaluate({
+ expression: `console.table(['apples', 'oranges', 'bananas'])`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `function Person(firstName, lastName) {
+ this.firstName = firstName;
+ this.lastName = lastName;
+ };
+ var me = new Person('John', 'Smith');
+ console.table(me);`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `var people = [
+ ['John', 'Smith'], ['Jane', 'Doe'], ['Emily', 'Jones']];
+ console.table(people);`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `function Person(firstName, lastName) {
+ this.firstName = firstName;
+ this.lastName = lastName;
+ }
+
+ var john = new Person('John', 'Smith');
+ var jane = new Person('Jane', 'Doe');
+ var emily = new Person('Emily', 'Jones');
+
+ console.table([john, jane, emily]);`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `var family = {};
+
+ family.mother = new Person('Jane', 'Smith');
+ family.father = new Person('John', 'Smith');
+ family.daughter = new Person('Emily', 'Smith');
+
+ console.table(family);`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `console.table([john, jane, emily], ['firstName'])`
+ });
+ await waitConsoleAPICalledAndDump();
+ Protocol.Runtime.evaluate({
+ expression: `var bigTable = new Array(999);
+ bigTable.fill(['a', 'b']);
+ console.table(bigTable);`
+ });
+ await waitConsoleAPICalledAndDump(true /* concise */);
+ Protocol.Runtime.evaluate({
+ expression: `var bigTable = new Array(1001);
+ bigTable.fill(['a', 'b']);
+ console.table(bigTable);`
+ });
+ await waitConsoleAPICalledAndDump(true /* concise */);
+ InspectorTest.completeTest();
+})()
+
+/**
+ * @param {boolean=} concise
+ */
+async function waitConsoleAPICalledAndDump(concise) {
+ const { params : {
+ args: [ arg ]
+ } } = await Protocol.Runtime.onceConsoleAPICalled();
+ const preview = arg.preview;
+ if (concise)
+ simplifyAndPrintLast(preview);
+ else
+ InspectorTest.logMessage(arg.preview);
+
+ function simplifyAndPrintLast(preview) {
+ if (!Array.isArray(preview.properties))
+ return;
+ const properties = preview.properties;
+ const lastProperty = properties[properties.length - 1];
+ if (lastProperty.valuePreview && lastProperty.valuePreview.properties) {
+ const innerProperties = lastProperty.valuePreview.properties;
+ InspectorTest.logMessage(`last value property:`);
+ InspectorTest.logMessage(innerProperties[innerProperties.length - 1]);
+ lastProperty.valuePreview.properties = `<ValuePreviewPropertiesArray(${innerProperties.length})>`;
+ }
+ InspectorTest.logMessage(`last property:`);
+ InspectorTest.logMessage(lastProperty);
+ preview.properties = `<PropertiesArray(${properties.length})>`;
+ InspectorTest.logMessage(`preview:`);
+ InspectorTest.logMessage(preview);
+ InspectorTest.logMessage(``);
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/custom-preview-expected.txt b/deps/v8/test/inspector/runtime/custom-preview-expected.txt
new file mode 100644
index 0000000000..f32d899e5d
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/custom-preview-expected.txt
@@ -0,0 +1,250 @@
+RemoteObject.CustomPreview
+Dump custom previews..
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 1 ","a"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 1
+ [3] : a
+ [4] : [
+ [0] : object
+ [1] : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
+ }
+ }
+}
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 2 ","b"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 2
+ [3] : b
+ ]
+ }
+ }
+}
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 1 ","c"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 1
+ [3] : c
+ [4] : [
+ [0] : object
+ [1] : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
+ }
+ }
+}
+{
+ header : ["span",{},"Formatter with config ",["object",{"type":"object","className":"Object","description":"Object","objectId":"{\"injectedScriptId\":1,\"id\":10}","customPreview":{"header":"[\"span\",{},\"Header \",\"info: \",\"additional info\"]","bodyGetterId":"{\"injectedScriptId\":1,\"id\":11}"}}]]
+}
+Change formatters order and dump again..
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 1 ","a"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 1
+ [3] : a
+ [4] : [
+ [0] : object
+ [1] : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
+ }
+ }
+}
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 2 ","b"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 2
+ [3] : b
+ ]
+ }
+ }
+}
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 2 ","c"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 2
+ [3] : c
+ ]
+ }
+ }
+}
+{
+ header : ["span",{},"Formatter with config ",["object",{"type":"object","className":"Object","description":"Object","objectId":"{\"injectedScriptId\":1,\"id\":21}","customPreview":{"header":"[\"span\",{},\"Header \",\"info: \",\"additional info\"]","bodyGetterId":"{\"injectedScriptId\":1,\"id\":22}"}}]]
+}
+Test Runtime.getProperties
+{
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header formatted by 1 ","a"]
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Body formatted by 1
+ [3] : a
+ [4] : [
+ [0] : object
+ [1] : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
+ }
+ }
+}
+Try to break custom preview..
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Custom Formatter Failed: Uncaught 1
+ }
+ ]
+ executionContextId : <executionContextId>
+ timestamp : <timestamp>
+ type : error
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Custom Formatter Failed: Uncaught 1
+ }
+ ]
+ executionContextId : <executionContextId>
+ timestamp : <timestamp>
+ type : error
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Custom Formatter Failed: Uncaught 2
+ }
+ ]
+ executionContextId : <executionContextId>
+ timestamp : <timestamp>
+ type : error
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Custom Formatter Failed: Uncaught 3
+ }
+ ]
+ executionContextId : <executionContextId>
+ timestamp : <timestamp>
+ type : error
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : Custom Formatter Failed: Uncaught 4
+ }
+ ]
+ executionContextId : <executionContextId>
+ timestamp : <timestamp>
+ type : error
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/custom-preview.js b/deps/v8/test/inspector/runtime/custom-preview.js
new file mode 100644
index 0000000000..62bb848f26
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/custom-preview.js
@@ -0,0 +1,133 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('RemoteObject.CustomPreview');
+
+(async function test() {
+ contextGroup.addScript(`
+ var a = {name: 'a'};
+ var b = {name: 'b'};
+ var c = {name: 'c'};
+ a.formattableBy1 = true;
+ b.formattableBy2 = true;
+ c.formattableBy1 = true;
+ c.formattableBy2 = true;
+ var formatter1 = {
+ header: (x) => x.formattableBy1 ? ['span', {}, 'Header formatted by 1 ', x.name] : null,
+ hasBody: () => true,
+ body: (x) => ['span', {}, 'Body formatted by 1 ', x.name, ['object', {object: {}}]]
+ };
+ var formatter2 = {
+ header: (x) => x.formattableBy2 ? ['span', {}, 'Header formatted by 2 ', x.name] : null,
+ hasBody: (x) => true,
+ body: (x) => ['span', {}, 'Body formatted by 2 ', x.name]
+ };
+ var configTest = {};
+ var formatterWithConfig1 = {
+ header: function(x, config) {
+ if (x !== configTest || config)
+ return null;
+ return ['span', {}, 'Formatter with config ', ['object', {'object': x, 'config': {'info': 'additional info'}}]];
+ },
+ hasBody: (x) => false,
+ body: (x) => { throw 'Unreachable'; }
+ }
+ var formatterWithConfig2 = {
+ header: function(x, config) {
+ if (x !== configTest || !config)
+ return null;
+ return ['span', {}, 'Header ', 'info: ', config.info];
+ },
+ hasBody: (x, config) => config && config.info,
+ body: (x, config) => ['span', {}, 'body', 'info: ', config.info]
+ }
+ this.devtoolsFormatters = [formatter1, formatter2, formatterWithConfig1, formatterWithConfig2];
+ `);
+
+ Protocol.Runtime.enable();
+ Protocol.Runtime.setCustomObjectFormatterEnabled({enabled: true});
+
+ Protocol.Runtime.onConsoleAPICalled(m => InspectorTest.logMessage(m));
+ InspectorTest.log('Dump custom previews..');
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'a'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'b'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'c'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'configTest'}));
+ InspectorTest.log('Change formatters order and dump again..');
+ await Protocol.Runtime.evaluate({
+ expression: 'this.devtoolsFormatters = [formatter2, formatter1, formatterWithConfig1, formatterWithConfig2]'
+ });
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'a'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'b'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'c'}));
+ await dumpCustomPreviewForEvaluate(await Protocol.Runtime.evaluate({expression: 'configTest'}));
+
+ InspectorTest.log('Test Runtime.getProperties');
+ const {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({expression: '({a})'});
+ const {result:{result}} = await Protocol.Runtime.getProperties({
+ objectId, ownProperties: true, generatePreview: true});
+ await dumpCustomPreview(result.find(value => value.name === 'a').value);
+
+ InspectorTest.log('Try to break custom preview..');
+ await Protocol.Runtime.evaluate({
+ expression: `Object.defineProperty(this, 'devtoolsFormatters', {
+ get: () => { throw 1; },
+ configurable: true
+ })`
+ });
+ Protocol.Runtime.evaluate({ expression: '({})', generatePreview: true });
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await Protocol.Runtime.evaluate({
+ expression: `Object.defineProperty(this, 'devtoolsFormatters', {
+ get: () => {
+ const arr = [1];
+ Object.defineProperty(arr, 0, { get: () => { throw 2; }});
+ return arr;
+ },
+ configurable: true
+ })`
+ });
+ Protocol.Runtime.evaluate({ expression: '({})', generatePreview: true });
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await Protocol.Runtime.evaluate({
+ expression: `Object.defineProperty(this, 'devtoolsFormatters', {
+ get: () => [{get header() { throw 3; }}],
+ configurable: true
+ })`
+ });
+ Protocol.Runtime.evaluate({ expression: '({})', generatePreview: true });
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await Protocol.Runtime.evaluate({
+ expression: `Object.defineProperty(this, 'devtoolsFormatters', {
+ get: () => [{header: () => { throw 4; }}],
+ configurable: true
+ })`
+ });
+ Protocol.Runtime.evaluate({ expression: '({})', generatePreview: true });
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ InspectorTest.completeTest();
+})()
+
+function dumpCustomPreviewForEvaluate(result) {
+ return dumpCustomPreview(result.result.result);
+}
+
+async function dumpCustomPreview(result) {
+ const { objectId, customPreview } = result;
+ InspectorTest.logMessage(customPreview);
+ if (customPreview.bodyGetterId) {
+ const body = await Protocol.Runtime.callFunctionOn({
+ objectId,
+ functionDeclaration: 'function(bodyGetter) { return bodyGetter.call(this); }',
+ arguments: [ { objectId: customPreview.bodyGetterId } ],
+ returnByValue: true
+ });
+ InspectorTest.logMessage(body);
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 051ef6ceae..25ba52e034 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -128,12 +128,6 @@ console.log(239)
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt b/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt
index 080db69d7e..397b5e41db 100644
--- a/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt
@@ -14,7 +14,7 @@ Tests that microtasks run before the Runtime.evaluate response is sent
callFrames : [
[0] : {
columnNumber : 37
- functionName : Promise.resolve.then
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
index 6d6787ab56..22fc49d9bf 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
+++ b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
@@ -61,6 +61,7 @@ Object.defineProperty(parentObj, 'propNotNamedProto', {
get: deterministicNativeFunction,
set: function() {}
});
+inspector.allowAccessorFormatting(parentObj);
var objInheritsGetterProperty = {__proto__: parentObj};
inspector.allowAccessorFormatting(objInheritsGetterProperty);
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
index fb4cf70ae0..7c58a0421e 100644
--- a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
+++ b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
@@ -6,16 +6,16 @@ Check that exceptionThrown is supported by test runner.
columnNumber : 2
exception : {
className : Error
- description : Error at setTimeout (<anonymous>:2:9)
+ description : Error at <anonymous>:2:9
objectId : <objectId>
preview : {
- description : Error at setTimeout (<anonymous>:2:9)
+ description : Error at <anonymous>:2:9
overflow : false
properties : [
[0] : {
name : stack
type : string
- value : Error at setTimeout (<anonymous>:2:9)
+ value : Error at <anonymous>:2:9
}
]
subtype : error
@@ -31,7 +31,7 @@ Check that exceptionThrown is supported by test runner.
callFrames : [
[0] : {
columnNumber : 8
- functionName : setTimeout
+ functionName :
lineNumber : 1
scriptId : <scriptId>
url :
@@ -99,7 +99,7 @@ Check that exceptionThrown is supported by test runner.
callFrames : [
[0] : {
columnNumber : 2
- functionName : setTimeout
+ functionName :
lineNumber : 1
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 5707ffc5af..8b48e65c3b 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -5,7 +5,6 @@ Running test: testObject5
foo own string cat
Internal properties
[[PrimitiveValue]] number 5
- [[StableObjectId]]: <stableObjectId>
Running test: testNotOwn
__defineGetter__ inherited function undefined
@@ -24,8 +23,6 @@ Running test: testNotOwn
toLocaleString inherited function undefined
toString inherited function undefined
valueOf inherited function undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
Running test: testAccessorsOnly
b own no value, getter, setter
@@ -37,8 +34,6 @@ Running test: testArray
2 own string blue
__proto__ own object undefined
length own number 3
-Internal properties
- [[StableObjectId]]: <stableObjectId>
Running test: testBound
__proto__ own function undefined
@@ -47,19 +42,14 @@ Running test: testBound
Internal properties
[[BoundArgs]] object undefined
[[BoundThis]] object undefined
- [[StableObjectId]]: <stableObjectId>
[[TargetFunction]] function undefined
Running test: testObjectThrowsLength
__proto__ own object undefined
length own no value, getter
-Internal properties
- [[StableObjectId]]: <stableObjectId>
Running test: testTypedArrayWithoutLength
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
Running test: testArrayBuffer
[[Int8Array]]
@@ -72,8 +62,6 @@ Running test: testArrayBuffer
6 own number 1
7 own number 1
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
[[Uint8Array]]
0 own number 1
1 own number 1
@@ -84,26 +72,18 @@ Internal properties
6 own number 1
7 own number 1
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
[[Int16Array]]
0 own number 257
1 own number 257
2 own number 257
3 own number 257
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
[[Int32Array]]
0 own number 16843009
1 own number 16843009
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
Running test: testArrayBufferWithBrokenUintCtor
[[Int8Array]] own object undefined
[[Uint8Array]] own object undefined
__proto__ own object undefined
-Internal properties
- [[StableObjectId]]: <stableObjectId>
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
index efde782ae3..a0437f4af6 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -54,10 +54,6 @@ Testing regular Proxy
value : false
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
result : [
]
@@ -118,10 +114,6 @@ Testing revocable Proxy
value : false
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
result : [
]
@@ -174,10 +166,6 @@ Testing revocable Proxy
value : true
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
result : [
]
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 0386fdea6d..422b0f211e 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -94,10 +94,7 @@ async function logGetPropertiesResult(objectId, flags = { ownProperties: true })
for (var i = 0; i < internalPropertyArray.length; i++) {
var p = internalPropertyArray[i];
var v = p.value;
- if (p.name !== '[[StableObjectId]]')
- InspectorTest.log(" " + p.name + " " + v.type + " " + v.value);
- else
- InspectorTest.log(" [[StableObjectId]]: <stableObjectId>");
+ InspectorTest.log(' ' + p.name + ' ' + v.type + ' ' + v.value);
}
}
diff --git a/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
index 9082e9b266..c78451d5d1 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-entries-expected.txt
@@ -15,12 +15,6 @@ expression: new Map([[1,2],[3,4]])
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -71,12 +65,6 @@ expression: new Map()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : false
@@ -109,12 +97,6 @@ expression: new Map([[1,2],[3,4]]).entries()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -169,12 +151,6 @@ expression: it = new Map([[1,2],[3,4]]).entries(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -214,12 +190,6 @@ expression: it = new Map([[1,2],[3,4]]).keys(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -259,12 +229,6 @@ expression: it = new Map([[1,2],[3,4]]).values(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -301,22 +265,112 @@ expression: it = new Map([[1,2],[3,4]]).entries(); it.next(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
+ result : [
[0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
+ configurable : false
+ enumerable : false
+ isOwn : true
+ name : length
+ value : {
+ description : 0
+ type : number
+ value : 0
+ }
+ writable : true
}
]
+ }
+}
+expression: new Map([[1, undefined], [2, () => 42], [3, /abc/], [4, new Error()]]).entries()
+[
+ [0] : {
+ key : 1
+ }
+ [1] : {
+ key : 2
+ value : {
+ }
+ }
+ [2] : {
+ key : 3
+ value : {
+ }
+ }
+ [3] : {
+ key : 4
+ value : {
+ }
+ }
+]
+{
+ id : <messageId>
+ result : {
result : [
[0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 0
+ value : {
+ className : Object
+ description : {1 => undefined}
+ objectId : <objectId>
+ subtype : internal#entry
+ type : object
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 1
+ value : {
+ className : Object
+ description : {2 => () => 42}
+ objectId : <objectId>
+ subtype : internal#entry
+ type : object
+ }
+ writable : true
+ }
+ [2] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 2
+ value : {
+ className : Object
+ description : {3 => /abc/}
+ objectId : <objectId>
+ subtype : internal#entry
+ type : object
+ }
+ writable : true
+ }
+ [3] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : 3
+ value : {
+ className : Object
+ description : {4 => Error at <anonymous>:1:57}
+ objectId : <objectId>
+ subtype : internal#entry
+ type : object
+ }
+ writable : true
+ }
+ [4] : {
configurable : false
enumerable : false
isOwn : true
name : length
value : {
- description : 0
+ description : 4
type : number
- value : 0
+ value : 4
}
writable : true
}
@@ -337,12 +391,6 @@ expression: new Set([1,2])
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -393,12 +441,6 @@ expression: new Set()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : false
@@ -429,12 +471,6 @@ expression: new Set([1,2]).values()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -488,12 +524,6 @@ expression: it = new Set([1,2]).values(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -533,12 +563,6 @@ expression: it = new Set([1,2]).keys(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -572,19 +596,12 @@ expression: it = new Set([1,2]).keys(); it.next(); it
expression: it = new Set([1,2]).entries(); it.next(); it
[
[0] : {
- key : 2
value : 2
}
]
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -593,7 +610,7 @@ expression: it = new Set([1,2]).entries(); it.next(); it
name : 0
value : {
className : Object
- description : {2 => 2}
+ description : 2
objectId : <objectId>
subtype : internal#entry
type : object
@@ -621,12 +638,6 @@ expression: it = new Set([1,2]).values(); it.next(); it.next(); it
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : false
@@ -651,12 +662,6 @@ expression: new WeakMap()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : false
@@ -685,12 +690,6 @@ expression: new WeakMap([[{ a: 2 }, 42]])
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -729,12 +728,6 @@ expression: new WeakSet()
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : false
@@ -762,12 +755,6 @@ expression: new WeakSet([{a:2}])
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/runtime/internal-properties-entries.js b/deps/v8/test/inspector/runtime/internal-properties-entries.js
index 9555ae5558..d955a19ac0 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-entries.js
+++ b/deps/v8/test/inspector/runtime/internal-properties-entries.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --no-stress-flush-bytecode
+
let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal [[Entries]] in Runtime.getProperties output');
Protocol.Runtime.enable();
@@ -19,6 +21,7 @@ InspectorTest.runTestSuite([
.then(() => checkExpression('it = new Map([[1,2],[3,4]]).keys(); it.next(); it'))
.then(() => checkExpression('it = new Map([[1,2],[3,4]]).values(); it.next(); it'))
.then(() => checkExpression('it = new Map([[1,2],[3,4]]).entries(); it.next(); it.next(); it'))
+ .then(() => checkExpression('new Map([[1, undefined], [2, () => 42], [3, /abc/], [4, new Error()]]).entries()'))
.then(next);
},
diff --git a/deps/v8/test/inspector/runtime/internal-properties-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
index c114696eb8..978c104866 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
@@ -7,10 +7,6 @@ expression: (function* foo() { yield 1 })
result : {
internalProperties : [
[0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [1] : {
name : [[FunctionLocation]]
value : {
description : Object
@@ -23,14 +19,14 @@ expression: (function* foo() { yield 1 })
}
}
}
- [2] : {
+ [1] : {
name : [[IsGenerator]]
value : {
type : boolean
value : true
}
}
- [3] : {
+ [2] : {
name : [[Scopes]]
value : {
className : Array
@@ -51,10 +47,6 @@ expression: (function foo() {})
result : {
internalProperties : [
[0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [1] : {
name : [[FunctionLocation]]
value : {
description : Object
@@ -67,7 +59,7 @@ expression: (function foo() {})
}
}
}
- [2] : {
+ [1] : {
name : [[Scopes]]
value : {
className : Array
@@ -95,10 +87,6 @@ expression: new Number(239)
value : 239
}
}
- [1] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -114,10 +102,6 @@ expression: new Boolean(false)
value : false
}
}
- [1] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -133,10 +117,6 @@ expression: new String('abc')
value : abc
}
}
- [1] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -153,10 +133,6 @@ expression: Object(Symbol(42))
type : symbol
}
}
- [1] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -173,10 +149,6 @@ expression: Object(BigInt(2))
unserializableValue : 2n
}
}
- [1] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -202,10 +174,6 @@ expression: Promise.resolve(42)
value : 42
}
}
- [2] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -227,10 +195,6 @@ expression: new Promise(() => undefined)
type : undefined
}
}
- [2] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
]
}
}
@@ -242,13 +206,26 @@ expression: gen1
result : {
internalProperties : [
[0] : {
+ name : [[GeneratorLocation]]
+ value : {
+ description : Object
+ subtype : internal#location
+ type : object
+ value : {
+ columnNumber : 13
+ lineNumber : 8
+ scriptId : <scriptId>
+ }
+ }
+ }
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : suspended
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -257,7 +234,7 @@ expression: gen1
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -266,24 +243,7 @@ expression: gen1
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
[4] : {
- name : [[GeneratorLocation]]
- value : {
- description : Object
- subtype : internal#location
- type : object
- value : {
- columnNumber : 13
- lineNumber : 8
- scriptId : <scriptId>
- }
- }
- }
- [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -302,13 +262,26 @@ expression: gen1.next();gen1
result : {
internalProperties : [
[0] : {
+ name : [[GeneratorLocation]]
+ value : {
+ description : Object
+ subtype : internal#location
+ type : object
+ value : {
+ columnNumber : 2
+ lineNumber : 9
+ scriptId : <scriptId>
+ }
+ }
+ }
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : suspended
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -317,7 +290,7 @@ expression: gen1.next();gen1
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -326,24 +299,7 @@ expression: gen1.next();gen1
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
[4] : {
- name : [[GeneratorLocation]]
- value : {
- description : Object
- subtype : internal#location
- type : object
- value : {
- columnNumber : 2
- lineNumber : 9
- scriptId : <scriptId>
- }
- }
- }
- [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -362,13 +318,26 @@ expression: gen1.next();gen1
result : {
internalProperties : [
[0] : {
+ name : [[GeneratorLocation]]
+ value : {
+ description : Object
+ subtype : internal#location
+ type : object
+ value : {
+ columnNumber : 13
+ lineNumber : 8
+ scriptId : <scriptId>
+ }
+ }
+ }
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : closed
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -377,7 +346,7 @@ expression: gen1.next();gen1
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -386,11 +355,17 @@ expression: gen1.next();gen1
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [4] : {
+ ]
+ }
+}
+
+Running test: generatorObjectDebuggerDisabled
+expression: gen2
+{
+ id : <messageId>
+ result : {
+ internalProperties : [
+ [0] : {
name : [[GeneratorLocation]]
value : {
description : Object
@@ -403,24 +378,14 @@ expression: gen1.next();gen1
}
}
}
- ]
- }
-}
-
-Running test: generatorObjectDebuggerDisabled
-expression: gen2
-{
- id : <messageId>
- result : {
- internalProperties : [
- [0] : {
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : suspended
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -429,7 +394,7 @@ expression: gen2
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -438,24 +403,7 @@ expression: gen2
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
[4] : {
- name : [[GeneratorLocation]]
- value : {
- description : Object
- subtype : internal#location
- type : object
- value : {
- columnNumber : 13
- lineNumber : 8
- scriptId : <scriptId>
- }
- }
- }
- [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -474,13 +422,26 @@ expression: gen2.next();gen2
result : {
internalProperties : [
[0] : {
+ name : [[GeneratorLocation]]
+ value : {
+ description : Object
+ subtype : internal#location
+ type : object
+ value : {
+ columnNumber : 2
+ lineNumber : 9
+ scriptId : <scriptId>
+ }
+ }
+ }
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : suspended
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -489,7 +450,7 @@ expression: gen2.next();gen2
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -498,24 +459,7 @@ expression: gen2.next();gen2
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
[4] : {
- name : [[GeneratorLocation]]
- value : {
- description : Object
- subtype : internal#location
- type : object
- value : {
- columnNumber : 2
- lineNumber : 9
- scriptId : <scriptId>
- }
- }
- }
- [5] : {
name : [[Scopes]]
value : {
className : Array
@@ -534,13 +478,26 @@ expression: gen2.next();gen2
result : {
internalProperties : [
[0] : {
+ name : [[GeneratorLocation]]
+ value : {
+ description : Object
+ subtype : internal#location
+ type : object
+ value : {
+ columnNumber : 13
+ lineNumber : 8
+ scriptId : <scriptId>
+ }
+ }
+ }
+ [1] : {
name : [[GeneratorStatus]]
value : {
type : string
value : closed
}
}
- [1] : {
+ [2] : {
name : [[GeneratorFunction]]
value : {
className : GeneratorFunction
@@ -549,7 +506,7 @@ expression: gen2.next();gen2
type : function
}
}
- [2] : {
+ [3] : {
name : [[GeneratorReceiver]]
value : {
className : global
@@ -558,23 +515,6 @@ expression: gen2.next();gen2
type : object
}
}
- [3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [4] : {
- name : [[GeneratorLocation]]
- value : {
- description : Object
- subtype : internal#location
- type : object
- value : {
- columnNumber : 13
- lineNumber : 8
- scriptId : <scriptId>
- }
- }
- }
]
}
}
@@ -608,10 +548,6 @@ expression: (new Map([[1,2]])).entries()
}
}
[3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [4] : {
name : [[Entries]]
value : {
className : Array
@@ -652,10 +588,6 @@ expression: (new Set([[1,2]])).entries()
}
}
[3] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- [4] : {
name : [[Entries]]
value : {
className : Array
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
new file mode 100644
index 0000000000..d05401ddfb
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -0,0 +1,2965 @@
+Tests Runtime.RemoteObject.
+
+Running test: testNull
+'null', returnByValue: false, generatePreview: false
+{
+ result : {
+ subtype : null
+ type : object
+ value : null
+ }
+}
+'null', returnByValue: true, generatePreview: false
+{
+ result : {
+ subtype : null
+ type : object
+ value : null
+ }
+}
+'null', returnByValue: false, generatePreview: true
+{
+ result : {
+ subtype : null
+ type : object
+ value : null
+ }
+}
+
+Running test: testBoolean
+'true', returnByValue: false, generatePreview: false
+{
+ result : {
+ type : boolean
+ value : true
+ }
+}
+'false', returnByValue: false, generatePreview: false
+{
+ result : {
+ type : boolean
+ value : false
+ }
+}
+'true', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : boolean
+ value : true
+ }
+}
+'true', returnByValue: false, generatePreview: true
+{
+ result : {
+ type : boolean
+ value : true
+ }
+}
+
+Running test: testNumber
+'0 / {}', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : NaN
+ type : number
+ unserializableValue : NaN
+ }
+}
+'-0', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : -0
+ type : number
+ unserializableValue : -0
+ }
+}
+'0', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+}
+'1/0', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : Infinity
+ type : number
+ unserializableValue : Infinity
+ }
+}
+'-1/0', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : -Infinity
+ type : number
+ unserializableValue : -Infinity
+ }
+}
+'2.3456', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : 2.3456
+ type : number
+ value : 2.3456
+ }
+}
+'2.3456', returnByValue: true, generatePreview: false
+{
+ result : {
+ description : 2.3456
+ type : number
+ value : 2.3456
+ }
+}
+'1/0', returnByValue: true, generatePreview: false
+{
+ result : {
+ description : Infinity
+ type : number
+ unserializableValue : Infinity
+ }
+}
+'({a: 1/0})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ a : null
+ }
+ }
+}
+
+Running test: testUndefined
+'undefined', returnByValue: false, generatePreview: false
+{
+ result : {
+ type : undefined
+ }
+}
+'undefined', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : undefined
+ }
+}
+'({a : undefined})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+'([1, undefined])', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : [
+ [0] : 1
+ [1] : null
+ ]
+ }
+}
+
+Running test: testString
+''Hello!'', returnByValue: false, generatePreview: false
+{
+ result : {
+ type : string
+ value : Hello!
+ }
+}
+''Hello!'', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : string
+ value : Hello!
+ }
+}
+''Hello!'', returnByValue: false, generatePreview: true
+{
+ result : {
+ type : string
+ value : Hello!
+ }
+}
+
+Running test: testSymbol
+'Symbol()', returnByValue: false, generatePreview: true
+{
+ result : {
+ description : Symbol()
+ objectId : <objectId>
+ type : symbol
+ }
+}
+'Symbol(42)', returnByValue: false, generatePreview: true
+{
+ result : {
+ description : Symbol(42)
+ objectId : <objectId>
+ type : symbol
+ }
+}
+'Symbol('abc')', returnByValue: false, generatePreview: true
+{
+ result : {
+ description : Symbol(abc)
+ objectId : <objectId>
+ type : symbol
+ }
+}
+'Symbol('abc')', returnByValue: true, generatePreview: false
+{
+ error : {
+ code : -32000
+ message : Object couldn't be returned by value
+ }
+ id : <messageId>
+}
+
+Running test: testReturnByValue
+Empty object
+'({})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+Object with properties
+'({a:1, b:2})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ a : 1
+ b : 2
+ }
+ }
+}
+Object with cycle
+'a = {};a.a = a; a', returnByValue: true, generatePreview: false
+{
+ code : -32000
+ message : Object reference chain is too long
+}
+Function () => 42
+'() => 42', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : function
+ value : {
+ }
+ }
+}
+Symbol(42)
+'Symbol(42)', returnByValue: true, generatePreview: false
+{
+ code : -32000
+ message : Object couldn't be returned by value
+}
+Error object
+'new Error()', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+
+Running test: testFunction
+'(() => 42)', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Function
+ description : () => 42
+ objectId : <objectId>
+ type : function
+ }
+}
+'(function() { return 42 })', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Function
+ description : function() { return 42 }
+ objectId : <objectId>
+ type : function
+ }
+}
+'(function name() { return 42 })', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Function
+ description : function name() { return 42 }
+ objectId : <objectId>
+ type : function
+ }
+}
+'(async function asyncName() { return 42 })', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : AsyncFunction
+ description : async function asyncName() { return 42 }
+ objectId : <objectId>
+ type : function
+ }
+}
+'(async () => 42)', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : AsyncFunction
+ description : async () => 42
+ objectId : <objectId>
+ type : function
+ }
+}
+'(function (a) { return a; }).bind(null, 42)', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Function
+ description : function () { [native code] }
+ objectId : <objectId>
+ type : function
+ }
+}
+'a = (function() { return 42 }); a.b = 2; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Function
+ description : function() { return 42 }
+ objectId : <objectId>
+ type : function
+ }
+}
+'(function() { return 42 })', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : function
+ value : {
+ }
+ }
+}
+'a = (function() { return 42 }); a.b = 2; a', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : function
+ value : {
+ b : 2
+ }
+ }
+}
+
+Running test: testBigInt
+'1n', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : 1n
+ type : bigint
+ unserializableValue : 1n
+ }
+}
+'-5n', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : -5n
+ type : bigint
+ unserializableValue : -5n
+ }
+}
+'1234567890123456789012345678901234567890n', returnByValue: false, generatePreview: false
+{
+ result : {
+ description : 1234567890123456789012345678901234567890n
+ type : bigint
+ unserializableValue : 1234567890123456789012345678901234567890n
+ }
+}
+'-5n', returnByValue: true, generatePreview: false
+{
+ result : {
+ description : -5n
+ type : bigint
+ unserializableValue : -5n
+ }
+}
+'-5n', returnByValue: false, generatePreview: true
+{
+ result : {
+ description : -5n
+ type : bigint
+ unserializableValue : -5n
+ }
+}
+
+Running test: testRegExp
+'/w+/g', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/g
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/i', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/i
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/m', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/m
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/s', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/s
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/u', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/u
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/y', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/y
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'/w+/gimsuy', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/gimsuy
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'new RegExp('\w+', 'g')', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : RegExp
+ description : /w+/g
+ objectId : <objectId>
+ subtype : regexp
+ type : object
+ }
+}
+'var re = new RegExp('\w+', 'g');
+ re.prop = 32;
+ re', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : RegExp
+ description : /w+/g
+ objectId : <objectId>
+ preview : {
+ description : /w+/g
+ overflow : false
+ properties : [
+ [0] : {
+ name : prop
+ type : number
+ value : 32
+ }
+ [1] : {
+ name : lastIndex
+ type : number
+ value : 0
+ }
+ ]
+ subtype : regexp
+ type : object
+ }
+ subtype : regexp
+ type : object
+ }
+}
+'var re = new RegExp('\w+', 'g');
+ re.prop = 32;
+ re', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ prop : 32
+ }
+ }
+}
+
+Running test: testDate
+'new Date('May 18, 1991 03:24:00')', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Date
+ description : <expected description>
+ objectId : <objectId>
+ preview : {
+ description : <expected description>
+ overflow : false
+ properties : [
+ ]
+ subtype : date
+ type : object
+ }
+ subtype : date
+ type : object
+ }
+}
+'new Date(2018, 9, 31)', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Date
+ description : <expected description>
+ objectId : <objectId>
+ preview : {
+ description : <expected description>
+ overflow : false
+ properties : [
+ ]
+ subtype : date
+ type : object
+ }
+ subtype : date
+ type : object
+ }
+}
+'a = new Date(2018, 9, 31); a.b = 2; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Date
+ description : <expected description>
+ objectId : <objectId>
+ preview : {
+ description : <expected description>
+ overflow : false
+ properties : [
+ [0] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ subtype : date
+ type : object
+ }
+ subtype : date
+ type : object
+ }
+}
+
+Running test: testMap
+'new Map()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Map
+ description : Map(0)
+ objectId : <objectId>
+ preview : {
+ description : Map(0)
+ overflow : false
+ properties : [
+ ]
+ subtype : map
+ type : object
+ }
+ subtype : map
+ type : object
+ }
+}
+'new Map([[1,2]])', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Map
+ description : Map(1)
+ objectId : <objectId>
+ preview : {
+ description : Map(1)
+ entries : [
+ [0] : {
+ key : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ value : {
+ description : 2
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : map
+ type : object
+ }
+ subtype : map
+ type : object
+ }
+}
+'a = new Map(); a.set(a, a); a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Map
+ description : Map(1)
+ objectId : <objectId>
+ preview : {
+ description : Map(1)
+ entries : [
+ [0] : {
+ key : {
+ description : Map(1)
+ overflow : true
+ properties : [
+ ]
+ subtype : map
+ type : object
+ }
+ value : {
+ description : Map(1)
+ overflow : true
+ properties : [
+ ]
+ subtype : map
+ type : object
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : map
+ type : object
+ }
+ subtype : map
+ type : object
+ }
+}
+'new Map([['a','b']])', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Map
+ description : Map(1)
+ objectId : <objectId>
+ subtype : map
+ type : object
+ }
+}
+'({ a: new Map([['a','b']]) })', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ subtype : map
+ type : object
+ value : Map(1)
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+'m = new Map([['a', {b: 2}]])
+ m.d = 42;
+ m', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Map
+ description : Map(1)
+ objectId : <objectId>
+ preview : {
+ description : Map(1)
+ entries : [
+ [0] : {
+ key : {
+ description : a
+ overflow : false
+ properties : [
+ ]
+ type : string
+ }
+ value : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ [0] : {
+ name : d
+ type : number
+ value : 42
+ }
+ ]
+ subtype : map
+ type : object
+ }
+ subtype : map
+ type : object
+ }
+}
+'m = new Map([['a', {b: 2}]])
+ m.d = 42;
+ m', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ d : 42
+ }
+ }
+}
+'new Map([['a', {b: 2}]]).values()', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ subtype : iterator
+ type : object
+ }
+}
+'new Map([['a', {b: 2}]]).values()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ preview : {
+ description : MapIterator
+ entries : [
+ [0] : {
+ value : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+'it = new Map([['a', {b: 2}]]).values(); it.next(); it', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ preview : {
+ description : MapIterator
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+'new Map([['a', {b: 2}]]).values()', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+'new Map([['a', {b: 2}]]).entries()', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ subtype : iterator
+ type : object
+ }
+}
+'new Map([['a', {b: 2}]]).entries()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ preview : {
+ description : MapIterator
+ entries : [
+ [0] : {
+ key : {
+ description : a
+ overflow : false
+ properties : [
+ ]
+ type : string
+ }
+ value : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+'it = new Map([['a', {b: 2}]]).entries(); it.next(); it', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : MapIterator
+ description : MapIterator
+ objectId : <objectId>
+ preview : {
+ description : MapIterator
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+'new Map([['a', {b: 2}]]).entries()', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+
+Running test: testSet
+'new Set([1])', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Set
+ description : Set(1)
+ objectId : <objectId>
+ preview : {
+ description : Set(1)
+ entries : [
+ [0] : {
+ value : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : set
+ type : object
+ }
+ subtype : set
+ type : object
+ }
+}
+'new Set([1])', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+'new Set([1,2,3,4,5,6,7])', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Set
+ description : Set(7)
+ objectId : <objectId>
+ preview : {
+ description : Set(7)
+ entries : [
+ [0] : {
+ value : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [1] : {
+ value : {
+ description : 2
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [2] : {
+ value : {
+ description : 3
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [3] : {
+ value : {
+ description : 4
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [4] : {
+ value : {
+ description : 5
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : true
+ properties : [
+ ]
+ subtype : set
+ type : object
+ }
+ subtype : set
+ type : object
+ }
+}
+'new Set([1,2,3]).values()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : SetIterator
+ description : SetIterator
+ objectId : <objectId>
+ preview : {
+ description : SetIterator
+ entries : [
+ [0] : {
+ value : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [1] : {
+ value : {
+ description : 2
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [2] : {
+ value : {
+ description : 3
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+'it = new Set([1,2,3]).values(); it.next(); it', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : SetIterator
+ description : SetIterator
+ objectId : <objectId>
+ preview : {
+ description : SetIterator
+ entries : [
+ [0] : {
+ value : {
+ description : 2
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ [1] : {
+ value : {
+ description : 3
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : iterator
+ type : object
+ }
+ subtype : iterator
+ type : object
+ }
+}
+
+Running test: testWeakMap
+'new WeakMap()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : WeakMap
+ description : WeakMap
+ objectId : <objectId>
+ preview : {
+ description : WeakMap
+ overflow : false
+ properties : [
+ ]
+ subtype : weakmap
+ type : object
+ }
+ subtype : weakmap
+ type : object
+ }
+}
+'new WeakMap([[this, 1]])', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : WeakMap
+ description : WeakMap
+ objectId : <objectId>
+ preview : {
+ description : WeakMap
+ entries : [
+ [0] : {
+ key : {
+ description : global
+ overflow : true
+ properties : [
+ [0] : {
+ name : setTimeout
+ type : function
+ value :
+ }
+ [1] : {
+ name : inspector
+ type : object
+ value : Object
+ }
+ [2] : {
+ name : a
+ subtype : map
+ type : object
+ value : Map(1)
+ }
+ [3] : {
+ name : re
+ subtype : regexp
+ type : object
+ value : /w+/g
+ }
+ [4] : {
+ name : m
+ subtype : map
+ type : object
+ value : Map(1)
+ }
+ ]
+ type : object
+ }
+ value : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : weakmap
+ type : object
+ }
+ subtype : weakmap
+ type : object
+ }
+}
+
+Running test: testWeakSet
+'new WeakSet()', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : WeakSet
+ description : WeakSet
+ objectId : <objectId>
+ preview : {
+ description : WeakSet
+ overflow : false
+ properties : [
+ ]
+ subtype : weakset
+ type : object
+ }
+ subtype : weakset
+ type : object
+ }
+}
+'new WeakSet([this])', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : WeakSet
+ description : WeakSet
+ objectId : <objectId>
+ preview : {
+ description : WeakSet
+ entries : [
+ [0] : {
+ value : {
+ description : global
+ overflow : true
+ properties : [
+ [0] : {
+ name : setTimeout
+ type : function
+ value :
+ }
+ [1] : {
+ name : inspector
+ type : object
+ value : Object
+ }
+ [2] : {
+ name : a
+ subtype : map
+ type : object
+ value : Map(1)
+ }
+ [3] : {
+ name : re
+ subtype : regexp
+ type : object
+ value : /w+/g
+ }
+ [4] : {
+ name : m
+ subtype : map
+ type : object
+ value : Map(1)
+ }
+ ]
+ type : object
+ }
+ }
+ ]
+ overflow : false
+ properties : [
+ ]
+ subtype : weakset
+ type : object
+ }
+ subtype : weakset
+ type : object
+ }
+}
+
+Running test: testGenerator
+'g = (function*(){ yield 42; })(); g.a = 2; g', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Generator
+ description : Generator
+ objectId : <objectId>
+ preview : {
+ description : Generator
+ overflow : false
+ properties : [
+ [0] : {
+ name : [[GeneratorStatus]]
+ type : string
+ value : suspended
+ }
+ [1] : {
+ name : a
+ type : number
+ value : 2
+ }
+ ]
+ subtype : generator
+ type : object
+ }
+ subtype : generator
+ type : object
+ }
+}
+
+Running test: testError
+'new Error()', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Error
+ description : Error at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+}
+'new Error('abc')', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Error
+ description : Error: abc at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+}
+'new Error('at\nat')', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Error
+ description : Error: at at at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+}
+'new Error('preview')', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+'new Error('preview')', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Error
+ description : Error: preview at <anonymous>:1:1
+ objectId : <objectId>
+ preview : {
+ description : Error: preview at <anonymous>:1:1
+ overflow : false
+ properties : [
+ [0] : {
+ name : stack
+ type : string
+ value : Error: preview at <anonymous>:1:1
+ }
+ [1] : {
+ name : message
+ type : string
+ value : preview
+ }
+ ]
+ subtype : error
+ type : object
+ }
+ subtype : error
+ type : object
+ }
+}
+'({a: new Error('preview')})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ subtype : error
+ type : object
+ value : Error: preview at <anonymous>:1:6
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+'a = new Error('preview and a'); a.a = 123; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Error
+ description : Error: preview and a at <anonymous>:1:5
+ objectId : <objectId>
+ preview : {
+ description : Error: preview and a at <anonymous>:1:5
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 123
+ }
+ [1] : {
+ name : stack
+ type : string
+ value : Error: preview and a at <anonymous>:1:5
+ }
+ [2] : {
+ name : message
+ type : string
+ value : preview and a
+ }
+ ]
+ subtype : error
+ type : object
+ }
+ subtype : error
+ type : object
+ }
+}
+'a = new Error('preview and a'); a.a = 123; a', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ a : 123
+ }
+ }
+}
+
+Running test: testCustomError
+'class CustomError extends Error {}; a = new CustomError(); delete a.stack; a', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : CustomError
+ description : CustomError
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+}
+
+Running test: testProxy
+'new Proxy({}, {})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ subtype : proxy
+ type : object
+ }
+}
+'new Proxy(new Error(), {})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ subtype : proxy
+ type : object
+ }
+}
+'new Proxy({c: 3}, {d: 4})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ }
+ }
+}
+'new Proxy({a: 1}, {b: 2})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Proxy
+ objectId : <objectId>
+ preview : {
+ description : Proxy
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 1
+ }
+ ]
+ subtype : proxy
+ type : object
+ }
+ subtype : proxy
+ type : object
+ }
+}
+'({e: new Proxy({a: 1}, {b: 2})})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : e
+ subtype : proxy
+ type : object
+ value : Proxy
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+
+Running test: testPromise
+'Promise.resolve(42)', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ subtype : promise
+ type : object
+ }
+}
+'Promise.reject(42)', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ subtype : promise
+ type : object
+ }
+}
+'(async function(){})()', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ subtype : promise
+ type : object
+ }
+}
+'Promise.resolve('a'.repeat(101))', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ preview : {
+ description : Promise
+ overflow : false
+ properties : [
+ [0] : {
+ name : [[PromiseStatus]]
+ type : string
+ value : resolved
+ }
+ [1] : {
+ name : [[PromiseValue]]
+ type : string
+ value : aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa…aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ }
+ ]
+ subtype : promise
+ type : object
+ }
+ subtype : promise
+ type : object
+ }
+}
+'Promise.reject(42)', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ preview : {
+ description : Promise
+ overflow : false
+ properties : [
+ [0] : {
+ name : [[PromiseStatus]]
+ type : string
+ value : rejected
+ }
+ [1] : {
+ name : [[PromiseValue]]
+ type : number
+ value : 42
+ }
+ ]
+ subtype : promise
+ type : object
+ }
+ subtype : promise
+ type : object
+ }
+}
+'new Promise(resolve => this.resolve = resolve)', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ preview : {
+ description : Promise
+ overflow : false
+ properties : [
+ [0] : {
+ name : [[PromiseStatus]]
+ type : string
+ value : pending
+ }
+ [1] : {
+ name : [[PromiseValue]]
+ type : undefined
+ value : undefined
+ }
+ ]
+ subtype : promise
+ type : object
+ }
+ subtype : promise
+ type : object
+ }
+}
+'a = Promise.resolve(42); a.b = 2; a', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ b : 2
+ }
+ }
+}
+'({a: Promise.resolve(42)})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ subtype : promise
+ type : object
+ value : Promise
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+
+Running test: testTypedArray
+'a = new Uint8Array(2); a.b = 2; a', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Uint8Array
+ description : Uint8Array(2)
+ objectId : <objectId>
+ preview : {
+ description : Uint8Array(2)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 0
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 0
+ }
+ [2] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ subtype : typedarray
+ type : object
+ }
+ subtype : typedarray
+ type : object
+ }
+}
+'new Int32Array(101)', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Int32Array
+ description : Int32Array(101)
+ objectId : <objectId>
+ preview : {
+ description : Int32Array(101)
+ overflow : true
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 0
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 0
+ }
+ [2] : {
+ name : 2
+ type : number
+ value : 0
+ }
+ [3] : {
+ name : 3
+ type : number
+ value : 0
+ }
+ [4] : {
+ name : 4
+ type : number
+ value : 0
+ }
+ [5] : {
+ name : 5
+ type : number
+ value : 0
+ }
+ [6] : {
+ name : 6
+ type : number
+ value : 0
+ }
+ [7] : {
+ name : 7
+ type : number
+ value : 0
+ }
+ [8] : {
+ name : 8
+ type : number
+ value : 0
+ }
+ [9] : {
+ name : 9
+ type : number
+ value : 0
+ }
+ [10] : {
+ name : 10
+ type : number
+ value : 0
+ }
+ [11] : {
+ name : 11
+ type : number
+ value : 0
+ }
+ [12] : {
+ name : 12
+ type : number
+ value : 0
+ }
+ [13] : {
+ name : 13
+ type : number
+ value : 0
+ }
+ [14] : {
+ name : 14
+ type : number
+ value : 0
+ }
+ [15] : {
+ name : 15
+ type : number
+ value : 0
+ }
+ [16] : {
+ name : 16
+ type : number
+ value : 0
+ }
+ [17] : {
+ name : 17
+ type : number
+ value : 0
+ }
+ [18] : {
+ name : 18
+ type : number
+ value : 0
+ }
+ [19] : {
+ name : 19
+ type : number
+ value : 0
+ }
+ [20] : {
+ name : 20
+ type : number
+ value : 0
+ }
+ [21] : {
+ name : 21
+ type : number
+ value : 0
+ }
+ [22] : {
+ name : 22
+ type : number
+ value : 0
+ }
+ [23] : {
+ name : 23
+ type : number
+ value : 0
+ }
+ [24] : {
+ name : 24
+ type : number
+ value : 0
+ }
+ [25] : {
+ name : 25
+ type : number
+ value : 0
+ }
+ [26] : {
+ name : 26
+ type : number
+ value : 0
+ }
+ [27] : {
+ name : 27
+ type : number
+ value : 0
+ }
+ [28] : {
+ name : 28
+ type : number
+ value : 0
+ }
+ [29] : {
+ name : 29
+ type : number
+ value : 0
+ }
+ [30] : {
+ name : 30
+ type : number
+ value : 0
+ }
+ [31] : {
+ name : 31
+ type : number
+ value : 0
+ }
+ [32] : {
+ name : 32
+ type : number
+ value : 0
+ }
+ [33] : {
+ name : 33
+ type : number
+ value : 0
+ }
+ [34] : {
+ name : 34
+ type : number
+ value : 0
+ }
+ [35] : {
+ name : 35
+ type : number
+ value : 0
+ }
+ [36] : {
+ name : 36
+ type : number
+ value : 0
+ }
+ [37] : {
+ name : 37
+ type : number
+ value : 0
+ }
+ [38] : {
+ name : 38
+ type : number
+ value : 0
+ }
+ [39] : {
+ name : 39
+ type : number
+ value : 0
+ }
+ [40] : {
+ name : 40
+ type : number
+ value : 0
+ }
+ [41] : {
+ name : 41
+ type : number
+ value : 0
+ }
+ [42] : {
+ name : 42
+ type : number
+ value : 0
+ }
+ [43] : {
+ name : 43
+ type : number
+ value : 0
+ }
+ [44] : {
+ name : 44
+ type : number
+ value : 0
+ }
+ [45] : {
+ name : 45
+ type : number
+ value : 0
+ }
+ [46] : {
+ name : 46
+ type : number
+ value : 0
+ }
+ [47] : {
+ name : 47
+ type : number
+ value : 0
+ }
+ [48] : {
+ name : 48
+ type : number
+ value : 0
+ }
+ [49] : {
+ name : 49
+ type : number
+ value : 0
+ }
+ [50] : {
+ name : 50
+ type : number
+ value : 0
+ }
+ [51] : {
+ name : 51
+ type : number
+ value : 0
+ }
+ [52] : {
+ name : 52
+ type : number
+ value : 0
+ }
+ [53] : {
+ name : 53
+ type : number
+ value : 0
+ }
+ [54] : {
+ name : 54
+ type : number
+ value : 0
+ }
+ [55] : {
+ name : 55
+ type : number
+ value : 0
+ }
+ [56] : {
+ name : 56
+ type : number
+ value : 0
+ }
+ [57] : {
+ name : 57
+ type : number
+ value : 0
+ }
+ [58] : {
+ name : 58
+ type : number
+ value : 0
+ }
+ [59] : {
+ name : 59
+ type : number
+ value : 0
+ }
+ [60] : {
+ name : 60
+ type : number
+ value : 0
+ }
+ [61] : {
+ name : 61
+ type : number
+ value : 0
+ }
+ [62] : {
+ name : 62
+ type : number
+ value : 0
+ }
+ [63] : {
+ name : 63
+ type : number
+ value : 0
+ }
+ [64] : {
+ name : 64
+ type : number
+ value : 0
+ }
+ [65] : {
+ name : 65
+ type : number
+ value : 0
+ }
+ [66] : {
+ name : 66
+ type : number
+ value : 0
+ }
+ [67] : {
+ name : 67
+ type : number
+ value : 0
+ }
+ [68] : {
+ name : 68
+ type : number
+ value : 0
+ }
+ [69] : {
+ name : 69
+ type : number
+ value : 0
+ }
+ [70] : {
+ name : 70
+ type : number
+ value : 0
+ }
+ [71] : {
+ name : 71
+ type : number
+ value : 0
+ }
+ [72] : {
+ name : 72
+ type : number
+ value : 0
+ }
+ [73] : {
+ name : 73
+ type : number
+ value : 0
+ }
+ [74] : {
+ name : 74
+ type : number
+ value : 0
+ }
+ [75] : {
+ name : 75
+ type : number
+ value : 0
+ }
+ [76] : {
+ name : 76
+ type : number
+ value : 0
+ }
+ [77] : {
+ name : 77
+ type : number
+ value : 0
+ }
+ [78] : {
+ name : 78
+ type : number
+ value : 0
+ }
+ [79] : {
+ name : 79
+ type : number
+ value : 0
+ }
+ [80] : {
+ name : 80
+ type : number
+ value : 0
+ }
+ [81] : {
+ name : 81
+ type : number
+ value : 0
+ }
+ [82] : {
+ name : 82
+ type : number
+ value : 0
+ }
+ [83] : {
+ name : 83
+ type : number
+ value : 0
+ }
+ [84] : {
+ name : 84
+ type : number
+ value : 0
+ }
+ [85] : {
+ name : 85
+ type : number
+ value : 0
+ }
+ [86] : {
+ name : 86
+ type : number
+ value : 0
+ }
+ [87] : {
+ name : 87
+ type : number
+ value : 0
+ }
+ [88] : {
+ name : 88
+ type : number
+ value : 0
+ }
+ [89] : {
+ name : 89
+ type : number
+ value : 0
+ }
+ [90] : {
+ name : 90
+ type : number
+ value : 0
+ }
+ [91] : {
+ name : 91
+ type : number
+ value : 0
+ }
+ [92] : {
+ name : 92
+ type : number
+ value : 0
+ }
+ [93] : {
+ name : 93
+ type : number
+ value : 0
+ }
+ [94] : {
+ name : 94
+ type : number
+ value : 0
+ }
+ [95] : {
+ name : 95
+ type : number
+ value : 0
+ }
+ [96] : {
+ name : 96
+ type : number
+ value : 0
+ }
+ [97] : {
+ name : 97
+ type : number
+ value : 0
+ }
+ [98] : {
+ name : 98
+ type : number
+ value : 0
+ }
+ [99] : {
+ name : 99
+ type : number
+ value : 0
+ }
+ ]
+ subtype : typedarray
+ type : object
+ }
+ subtype : typedarray
+ type : object
+ }
+}
+
+Running test: testArrayBuffer
+'new Uint8Array().buffer', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : ArrayBuffer
+ description : ArrayBuffer(0)
+ objectId : <objectId>
+ preview : {
+ description : ArrayBuffer(0)
+ overflow : false
+ properties : [
+ ]
+ subtype : arraybuffer
+ type : object
+ }
+ subtype : arraybuffer
+ type : object
+ }
+}
+'new Int32Array(100).buffer', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : ArrayBuffer
+ description : ArrayBuffer(400)
+ objectId : <objectId>
+ preview : {
+ description : ArrayBuffer(400)
+ overflow : false
+ properties : [
+ ]
+ subtype : arraybuffer
+ type : object
+ }
+ subtype : arraybuffer
+ type : object
+ }
+}
+
+Running test: testDataView
+'new DataView(new ArrayBuffer(16))', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : DataView
+ description : DataView(16)
+ objectId : <objectId>
+ preview : {
+ description : DataView(16)
+ overflow : false
+ properties : [
+ ]
+ subtype : dataview
+ type : object
+ }
+ subtype : dataview
+ type : object
+ }
+}
+'new DataView(new ArrayBuffer(16), 12, 4)', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : DataView
+ description : DataView(4)
+ objectId : <objectId>
+ preview : {
+ description : DataView(4)
+ overflow : false
+ properties : [
+ ]
+ subtype : dataview
+ type : object
+ }
+ subtype : dataview
+ type : object
+ }
+}
+
+Running test: testArray
+'[]', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Array
+ description : Array(0)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+}
+'[1,2,3]', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Array
+ description : Array(3)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+}
+
+Running test: testArrayLike
+'({length: 5, splice: () => []})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Object
+ description : Object(5)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+}
+'new (class Foo{constructor() {
+ this.length = 5;
+ this.splice = () => [];
+ }})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Foo
+ description : Foo(5)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+}
+'({length: -5, splice: () => []})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
+
+Running test: testOtherObjects
+'({a: 1, b:2})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+}
+'({a: 1, b:2})', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ a : 1
+ b : 2
+ }
+ }
+}
+'({a: 1, b:2})', returnByValue: false, generatePreview: true
+{
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : b
+ type : number
+ value : 2
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+}
+'new (function Foo() { this.a = 5; })', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Foo
+ description : Foo
+ objectId : <objectId>
+ type : object
+ }
+}
+'new (function Foo() { this.a = [1,2,3]; })', returnByValue: true, generatePreview: false
+{
+ result : {
+ type : object
+ value : {
+ a : [
+ [0] : 1
+ [1] : 2
+ [2] : 3
+ ]
+ }
+ }
+}
+'new (class Bar {})', returnByValue: false, generatePreview: false
+{
+ result : {
+ className : Bar
+ description : Bar
+ objectId : <objectId>
+ type : object
+ }
+}
+'inspector.createObjectWithAccessor('title', true)', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : title
+ type : accessor
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'inspector.createObjectWithAccessor('title', false)', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : title
+ type : accessor
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'inspector.createObjectWithAccessor('title', true)', returnByValue: true, generatePreview: false
+{
+ error : {
+ code : -32603
+ message : Internal error
+ }
+ id : <messageId>
+}
+'({get a() { return 42; }})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : accessor
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({set a(v) {}})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a: () => 42})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a: null})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ subtype : null
+ type : object
+ value : null
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a: true})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : boolean
+ value : true
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: -Infinity, a2: +Infinity, a3: -0, a4: NaN, a5: 1.23})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : number
+ value : -Infinity
+ }
+ [1] : {
+ name : a2
+ type : number
+ value : Infinity
+ }
+ [2] : {
+ name : a3
+ type : number
+ value : -0
+ }
+ [3] : {
+ name : a4
+ type : number
+ value : NaN
+ }
+ [4] : {
+ name : a5
+ type : number
+ value : 1.23
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: 1234567890123456789012345678901234567890n})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : bigint
+ value : 1234567890123456789012345678901234567890n
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: Symbol(42)})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : symbol
+ value : Symbol(42)
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: /abc/i})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ subtype : regexp
+ type : object
+ value : /abc/i
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: () => 42, a2: async () => 42})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : function
+ value :
+ }
+ [1] : {
+ name : a2
+ type : function
+ value :
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: ({}), a2: new (class Bar{})})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : object
+ value : Object
+ }
+ [1] : {
+ name : a2
+ type : object
+ value : Bar
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: 'a'.repeat(100), a2: 'a'.repeat(101)})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a1
+ type : string
+ value : aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ }
+ [1] : {
+ name : a2
+ type : string
+ value : aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa…aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'({a1: 1, a2: 2, a3: 3, a4:4, a5:5, a6: 6})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : true
+ properties : [
+ [0] : {
+ name : a1
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : a2
+ type : number
+ value : 2
+ }
+ [2] : {
+ name : a3
+ type : number
+ value : 3
+ }
+ [3] : {
+ name : a4
+ type : number
+ value : 4
+ }
+ [4] : {
+ name : a5
+ type : number
+ value : 5
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+'([1,2,3])', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Array
+ description : Array(3)
+ objectId : <objectId>
+ preview : {
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 2
+ }
+ [2] : {
+ name : 2
+ type : number
+ value : 3
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ subtype : array
+ type : object
+ }
+ }
+}
+
+Running test: testArray2
+'([1,2,3])', returnByValue: false, generatePreview: false
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Array
+ description : Array(3)
+ objectId : <objectId>
+ subtype : array
+ type : object
+ }
+ }
+}
+'([1,2,3])', returnByValue: true, generatePreview: false
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : [
+ [0] : 1
+ [1] : 2
+ [2] : 3
+ ]
+ }
+ }
+}
+'([1,2,3])', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Array
+ description : Array(3)
+ objectId : <objectId>
+ preview : {
+ description : Array(3)
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 2
+ }
+ [2] : {
+ name : 2
+ type : number
+ value : 3
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ subtype : array
+ type : object
+ }
+ }
+}
+'({a: [1,2,3]})', returnByValue: false, generatePreview: true
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ subtype : array
+ type : object
+ value : Array(3)
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/remote-object-get-properties-expected.txt b/deps/v8/test/inspector/runtime/remote-object-get-properties-expected.txt
new file mode 100644
index 0000000000..216639355e
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/remote-object-get-properties-expected.txt
@@ -0,0 +1,138 @@
+Tests Runtime.getProperties.
+
+Running test: testObject
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 1
+ type : number
+ value : 1
+ }
+ writable : true
+ }
+ [1] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : __proto__
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ writable : true
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : false
+ enumerable : true
+ isOwn : true
+ name : d
+ value : {
+ description : 42
+ type : number
+ value : 42
+ }
+ writable : false
+ }
+ [1] : {
+ configurable : false
+ enumerable : false
+ isOwn : true
+ name : a
+ value : {
+ description : 42
+ type : number
+ value : 42
+ }
+ writable : false
+ }
+ [2] : {
+ configurable : false
+ enumerable : false
+ isOwn : true
+ name : b
+ value : {
+ description : 42
+ type : number
+ value : 42
+ }
+ writable : true
+ }
+ [3] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : c
+ value : {
+ description : 42
+ type : number
+ value : 42
+ }
+ writable : false
+ }
+ [4] : {
+ configurable : false
+ enumerable : false
+ get : {
+ className : Function
+ description : () => 42
+ objectId : <objectId>
+ type : function
+ }
+ isOwn : true
+ name : e
+ set : {
+ className : Function
+ description : () => 0
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [5] : {
+ configurable : false
+ enumerable : false
+ isOwn : true
+ name : Symbol(42)
+ symbol : {
+ description : Symbol(42)
+ objectId : <objectId>
+ type : symbol
+ }
+ value : {
+ description : 239
+ type : number
+ value : 239
+ }
+ writable : false
+ }
+ [6] : {
+ configurable : true
+ enumerable : false
+ isOwn : true
+ name : __proto__
+ value : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ writable : true
+ }
+ ]
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/remote-object-get-properties.js b/deps/v8/test/inspector/runtime/remote-object-get-properties.js
new file mode 100644
index 0000000000..463539fbfd
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/remote-object-get-properties.js
@@ -0,0 +1,61 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests Runtime.getProperties.');
+
+InspectorTest.runAsyncTestSuite([
+ async function testObject() {
+ await getProperties('({a: 1})', { ownProperties: true });
+ await getProperties(`(function(){
+ let b = {};
+ Object.defineProperty(b, 'a', {
+ configurable: false,
+ enumerable: false,
+ value: 42,
+ writable: false
+ });
+ Object.defineProperty(b, 'b', {
+ configurable: false,
+ enumerable: false,
+ value: 42,
+ writable: true
+ });
+ Object.defineProperty(b, 'c', {
+ configurable: true,
+ enumerable: false,
+ value: 42,
+ writable: false
+ });
+ Object.defineProperty(b, 'd', {
+ configurable: false,
+ enumerable: true,
+ value: 42,
+ writable: false
+ });
+ Object.defineProperty(b, 'e', {
+ set: () => 0,
+ get: () => 42
+ });
+ Object.defineProperty(b, Symbol(42), {
+ value: 239
+ });
+ return b;
+ })()`, { ownProperties: true });
+ }
+]);
+
+async function getProperties(expression, options) {
+ try {
+ const { result: { result: { objectId } } } =
+ await Protocol.Runtime.evaluate({ expression });
+ const result = await Protocol.Runtime.getProperties({
+ objectId,
+ ownProperties: options.ownProperties
+ });
+ InspectorTest.logMessage(result);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/remote-object.js b/deps/v8/test/inspector/runtime/remote-object.js
new file mode 100644
index 0000000000..ed35f0eff6
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/remote-object.js
@@ -0,0 +1,635 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests Runtime.RemoteObject.');
+
+function evaluate(options) {
+ InspectorTest.log(`'${options.expression}', ` +
+ `returnByValue: ${options.returnByValue || false}, ` +
+ `generatePreview: ${options.generatePreview || false}`);
+ return Protocol.Runtime.evaluate(options);
+}
+
+InspectorTest.runAsyncTestSuite([
+ async function testNull() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'null'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'null',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'null',
+ generatePreview: true
+ })).result);
+ },
+ async function testBoolean() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'true'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'false'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'true',
+ returnByValue: true,
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'true',
+ generatePreview: true,
+ })).result);
+ },
+ async function testNumber() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '0 / {}'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '-0'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '0'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '1/0'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '-1/0'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '2.3456'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '2.3456',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '1/0',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: 1/0})',
+ returnByValue: true
+ })).result);
+ },
+ async function testUndefined() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'undefined'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'undefined',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a : undefined})',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '([1, undefined])',
+ returnByValue: true
+ })).result);
+ },
+ async function testString() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '\'Hello!\''
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '\'Hello!\'',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '\'Hello!\'',
+ generatePreview: true
+ })).result);
+ },
+ async function testSymbol() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Symbol()',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Symbol(42)',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `Symbol('abc')`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `Symbol('abc')`,
+ returnByValue: true
+ })));
+ },
+ async function testReturnByValue() {
+ InspectorTest.log('Empty object');
+ InspectorTest.logMessage((await evaluate({
+ expression: '({})', returnByValue: true
+ })).result);
+ InspectorTest.log('Object with properties');
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a:1, b:2})', returnByValue: true
+ })).result);
+ InspectorTest.log('Object with cycle');
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = {};a.a = a; a', returnByValue: true
+ })).error);
+ InspectorTest.log('Function () => 42');
+ InspectorTest.logMessage((await evaluate({
+ expression: '() => 42', returnByValue: true
+ })).result);
+ InspectorTest.log('Symbol(42)');
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Symbol(42)', returnByValue: true
+ })).error);
+ InspectorTest.log('Error object');
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Error()', returnByValue: true
+ })).result);
+ },
+ async function testFunction() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '(() => 42)'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(function() { return 42 })'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(function name() { return 42 })'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(async function asyncName() { return 42 })'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(async () => 42)'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(function (a) { return a; }).bind(null, 42)'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = (function() { return 42 }); a.b = 2; a',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(function() { return 42 })',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = (function() { return 42 }); a.b = 2; a',
+ returnByValue: true
+ })).result);
+ },
+ async function testBigInt() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '1n'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '-5n'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '1234567890123456789012345678901234567890n'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '-5n',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '-5n',
+ generatePreview: true
+ })).result);
+ },
+ async function testRegExp() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/g'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/i'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/m'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/s'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/u'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/y'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '/\w+/gimsuy'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new RegExp('\\w+', 'g')`,
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `var re = new RegExp('\\w+', 'g');
+ re.prop = 32;
+ re`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `var re = new RegExp('\\w+', 'g');
+ re.prop = 32;
+ re`,
+ returnByValue: true
+ })).result);
+ },
+ async function testDate() {
+ let result = (await evaluate({
+ expression: `new Date('May 18, 1991 03:24:00')`,
+ generatePreview: true
+ })).result;
+ if (result.result.description === new Date('May 18, 1991 03:24:00') + '')
+ result.result.description = '<expected description>';
+ if (result.result.preview.description === new Date('May 18, 1991 03:24:00') + '')
+ result.result.preview.description = '<expected description>';
+ InspectorTest.logMessage(result);
+
+ result = (await evaluate({
+ expression: `new Date(2018, 9, 31)`,
+ generatePreview: true
+ })).result;
+ if (result.result.description === new Date(2018, 9, 31) + '')
+ result.result.description = '<expected description>';
+ if (result.result.preview.description === new Date(2018, 9, 31) + '')
+ result.result.preview.description = '<expected description>';
+ InspectorTest.logMessage(result);
+
+ result = (await evaluate({
+ expression: `a = new Date(2018, 9, 31); a.b = 2; a`,
+ generatePreview: true
+ })).result;
+ if (result.result.description === new Date(2018, 9, 31) + '')
+ result.result.description = '<expected description>';
+ if (result.result.preview.description === new Date(2018, 9, 31) + '')
+ result.result.preview.description = '<expected description>';
+ InspectorTest.logMessage(result);
+ },
+ async function testMap() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Map()',
+ generatePreview: true,
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Map([[1,2]])',
+ generatePreview: true,
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = new Map(); a.set(a, a); a',
+ generatePreview: true,
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a','b']])`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `({ a: new Map([['a','b']]) })`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `m = new Map([['a', {b: 2}]])
+ m.d = 42;
+ m`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `m = new Map([['a', {b: 2}]])
+ m.d = 42;
+ m`,
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).values()`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).values()`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `it = new Map([['a', {b: 2}]]).values(); it.next(); it`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).values()`,
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).entries()`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).entries()`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `it = new Map([['a', {b: 2}]]).entries(); it.next(); it`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Map([['a', {b: 2}]]).entries()`,
+ returnByValue: true
+ })).result);
+ },
+ async function testSet() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Set([1])',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Set([1])',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Set([1,2,3,4,5,6,7])',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Set([1,2,3]).values()',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'it = new Set([1,2,3]).values(); it.next(); it',
+ generatePreview: true
+ })).result);
+ },
+ async function testWeakMap() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new WeakMap()',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new WeakMap([[this, 1]])',
+ generatePreview: true
+ })).result);
+ },
+ async function testWeakSet() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new WeakSet()',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new WeakSet([this])',
+ generatePreview: true
+ })).result);
+ },
+ async function testGenerator() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'g = (function*(){ yield 42; })(); g.a = 2; g',
+ generatePreview: true
+ })).result);
+ },
+ async function testError() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Error()'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Error('abc')`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Error('at\\nat')`
+ })).result);
+
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Error('preview')`,
+ returnByValue: true
+ })).result);
+
+ InspectorTest.logMessage((await evaluate({
+ expression: `new Error('preview')`,
+ generatePreview: true
+ })).result);
+
+ InspectorTest.logMessage((await evaluate({
+ expression: `({a: new Error('preview')})`,
+ generatePreview: true
+ })).result);
+
+ InspectorTest.logMessage((await evaluate({
+ expression: `a = new Error('preview and a'); a.a = 123; a`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `a = new Error('preview and a'); a.a = 123; a`,
+ returnByValue: true
+ })).result);
+ },
+ async function testCustomError() {
+ InspectorTest.logMessage((await evaluate({
+ expression: `class CustomError extends Error {}; a = new CustomError(); delete a.stack; a`
+ })).result);
+ },
+ async function testProxy() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Proxy({}, {})'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Proxy(new Error(), {})'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Proxy({c: 3}, {d: 4})',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Proxy({a: 1}, {b: 2})',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({e: new Proxy({a: 1}, {b: 2})})',
+ generatePreview: true
+ })).result);
+ },
+ async function testPromise() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Promise.resolve(42)'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Promise.reject(42)'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '(async function(){})()'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `Promise.resolve('a'.repeat(101))`,
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'Promise.reject(42)',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Promise(resolve => this.resolve = resolve)',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = Promise.resolve(42); a.b = 2; a',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: Promise.resolve(42)})',
+ generatePreview: true
+ })).result);
+ },
+ async function testTypedArray() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'a = new Uint8Array(2); a.b = 2; a',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Int32Array(101)',
+ generatePreview: true
+ })).result);
+ },
+ async function testArrayBuffer() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Uint8Array().buffer',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new Int32Array(100).buffer',
+ generatePreview: true
+ })).result);
+ },
+ async function testDataView() {
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new DataView(new ArrayBuffer(16))',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new DataView(new ArrayBuffer(16), 12, 4)',
+ generatePreview: true
+ })).result);
+ },
+ async function testArray() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '[]'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '[1,2,3]'
+ })).result);
+ },
+ async function testArrayLike() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '({length: 5, splice: () => []})'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: `new (class Foo{constructor() {
+ this.length = 5;
+ this.splice = () => [];
+ }})`
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({length: -5, splice: () => []})'
+ })).result);
+ },
+ async function testOtherObjects() {
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: 1, b:2})'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: 1, b:2})',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: 1, b:2})',
+ generatePreview: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new (function Foo() { this.a = 5; })'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new (function Foo() { this.a = [1,2,3]; })',
+ returnByValue: true
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'new (class Bar {})'
+ })).result);
+ InspectorTest.logMessage((await evaluate({
+ expression: 'inspector.createObjectWithAccessor(\'title\', true)',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: 'inspector.createObjectWithAccessor(\'title\', false)',
+ generatePreview: true
+ })));
+ // TODO(kozyatinskiy): fix this one.
+ InspectorTest.logMessage((await evaluate({
+ expression: 'inspector.createObjectWithAccessor(\'title\', true)',
+ returnByValue: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({get a() { return 42; }})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({set a(v) {}})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: () => 42})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: null})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a: true})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: -Infinity, a2: +Infinity, a3: -0, a4: NaN, a5: 1.23})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: 1234567890123456789012345678901234567890n})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: Symbol(42)})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: /abc/i})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: () => 42, a2: async () => 42})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: '({a1: ({}), a2: new (class Bar{})})',
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `({a1: 'a'.repeat(100), a2: 'a'.repeat(101)})`,
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `({a1: 1, a2: 2, a3: 3, a4:4, a5:5, a6: 6})`,
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `([1,2,3])`,
+ generatePreview: true
+ })));
+ },
+
+ async function testArray2() {
+ InspectorTest.logMessage((await evaluate({
+ expression: `([1,2,3])`
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `([1,2,3])`,
+ returnByValue: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `([1,2,3])`,
+ generatePreview: true
+ })));
+ InspectorTest.logMessage((await evaluate({
+ expression: `({a: [1,2,3]})`,
+ generatePreview: true
+ })));
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt
index 648187c2b4..8789f64ba6 100644
--- a/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt
+++ b/deps/v8/test/inspector/runtime/runtime-get-properties-and-accessor-expected.txt
@@ -1,11 +1,11 @@
Runtime.getProperties for objects with accessor
title property with getter and setter:
{
- configurable : false
- enumerable : false
+ configurable : true
+ enumerable : true
get : {
className : Function
- description : function nativeGetter() { [native code] }
+ description : function () { [native code] }
objectId : <objectId>
type : function
}
@@ -13,18 +13,18 @@ title property with getter and setter:
name : title
set : {
className : Function
- description : function nativeSetter() { [native code] }
+ description : function () { [native code] }
objectId : <objectId>
type : function
}
}
title property with getter only:
{
- configurable : false
- enumerable : false
+ configurable : true
+ enumerable : true
get : {
className : Function
- description : function nativeGetter() { [native code] }
+ description : function () { [native code] }
objectId : <objectId>
type : function
}
diff --git a/deps/v8/test/inspector/runtime/runtime-restore-expected.txt b/deps/v8/test/inspector/runtime/runtime-restore-expected.txt
index d95051bd2d..1810872f0b 100644
--- a/deps/v8/test/inspector/runtime/runtime-restore-expected.txt
+++ b/deps/v8/test/inspector/runtime/runtime-restore-expected.txt
@@ -67,9 +67,7 @@ will reconnect..
[0] : {
className : Object
customPreview : {
- bindRemoteObjectFunctionId : <bindRemoteObjectFunctionId>
- formatterObjectId : <formatterObjectId>
- hasBody : true
+ bodyGetterId : <bodyGetterId>
header : ["span",{},"Header formatted ",42]
}
description : Object
diff --git a/deps/v8/test/inspector/runtime/stable-object-id-expected.txt b/deps/v8/test/inspector/runtime/stable-object-id-expected.txt
deleted file mode 100644
index d4e3fab7ee..0000000000
--- a/deps/v8/test/inspector/runtime/stable-object-id-expected.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Checks that protocol returns the same RemoteObjectId for the same object
-
-Running test: testGlobal
-Compare global evaluated twice: true
-
-Running test: testObject
-Compare object evaluated twice: true
-
-Running test: testObjectInArray
-Compare first and second element: true
-
-Running test: testObjectOnPause
-Compare global and this: true
-Compare global and global on pause: true
-Compare a and a on pause: true
diff --git a/deps/v8/test/inspector/runtime/stable-object-id.js b/deps/v8/test/inspector/runtime/stable-object-id.js
deleted file mode 100644
index 944bae0d3a..0000000000
--- a/deps/v8/test/inspector/runtime/stable-object-id.js
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start(
- 'Checks that protocol returns the same RemoteObjectId for the same object');
-
-InspectorTest.runAsyncTestSuite([
- async function testGlobal() {
- const {result:{result:{objectId:firstId}}} =
- await Protocol.Runtime.evaluate({expression: 'this'});
- const firstStableId = await stableObjectId(firstId);
- const {result:{result:{objectId:secondId}}} =
- await Protocol.Runtime.evaluate({expression: 'this'});
- const secondStableId = await stableObjectId(secondId);
- InspectorTest.log(
- `Compare global evaluated twice: ${firstStableId === secondStableId}`);
- },
-
- async function testObject() {
- const {result:{result:{objectId:firstId}}} =
- await Protocol.Runtime.evaluate({expression: 'this.a = {}, this.a'});
- const firstStableId = await stableObjectId(firstId);
- const {result:{result:{objectId:secondId}}} =
- await Protocol.Runtime.evaluate({expression: 'this.a'});
- const secondStableId = await stableObjectId(secondId);
- InspectorTest.log(
- `Compare object evaluated twice: ${firstStableId === secondStableId}`);
- },
-
- async function testObjectInArray() {
- await Protocol.Runtime.evaluate({expression: 'this.b = [this.a, this.a]'});
- const {result:{result:{objectId:firstId}}} =
- await Protocol.Runtime.evaluate({expression: 'this.b[0]'});
- const firstStableId = await stableObjectId(firstId);
- const {result:{result:{objectId:secondId}}} =
- await Protocol.Runtime.evaluate({expression: 'this.b[1]'});
- const secondStableId = await stableObjectId(secondId);
- InspectorTest.log(
- `Compare first and second element: ${firstStableId === secondStableId}`);
- },
-
- async function testObjectOnPause() {
- const {result:{result:{objectId:globalId}}} =
- await Protocol.Runtime.evaluate({expression: 'this'});
- const globalStableId = await stableObjectId(globalId);
- const {result:{result:{objectId:aId}}} =
- await Protocol.Runtime.evaluate({expression: 'this.a'});
- const aStableId = await stableObjectId(aId);
- await Protocol.Debugger.enable();
- Protocol.Runtime.evaluate({expression: 'debugger'});
- const {params:{callFrames:[topFrame]}} =
- await Protocol.Debugger.oncePaused();
- const topFrameThisStableId = await stableObjectId(topFrame.this.objectId);
- InspectorTest.log(
- `Compare global and this: ${globalStableId === topFrameThisStableId}`);
-
- const {result:{result:{objectId:globalIdOnPause}}} =
- await Protocol.Debugger.evaluateOnCallFrame({
- callFrameId: topFrame.callFrameId,
- expression: 'this'
- });
- const globalStableIdOnPause = await stableObjectId(globalIdOnPause);
- InspectorTest.log(
- `Compare global and global on pause: ${
- globalStableId === globalStableIdOnPause}`);
-
- const {result:{result: props}} = await Protocol.Runtime.getProperties({
- objectId: topFrame.scopeChain[0].object.objectId
- });
- const {value:{objectId: aIdOnPause}} = props.find(prop => prop.name === 'a');
- const aStableIdOnPause = await stableObjectId(aIdOnPause);
- InspectorTest.log(`Compare a and a on pause: ${
- aStableId === aStableIdOnPause}`);
- }
-]);
-
-async function stableObjectId(objectId) {
- const {result:{
- internalProperties: props
- }} = await Protocol.Runtime.getProperties({
- objectId,
- ownProperties: true,
- generatePreview: false
- });
- return props.find(prop => prop.name === '[[StableObjectId]]').value.value;
-}
diff --git a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
index b64854cf80..24df70ebb6 100644
--- a/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
+++ b/deps/v8/test/inspector/runtime/terminate-execution-expected.txt
@@ -8,7 +8,7 @@ Terminate first evaluation (it forces injected-script-source compilation)
{
error : {
code : -32000
- message : Cannot access specified execution context
+ message : Execution was terminated
}
id : <messageId>
}
diff --git a/deps/v8/test/inspector/sessions/runtime-console-api-called-expected.txt b/deps/v8/test/inspector/sessions/runtime-console-api-called-expected.txt
index 0f679fa7e5..67956cf2ac 100644
--- a/deps/v8/test/inspector/sessions/runtime-console-api-called-expected.txt
+++ b/deps/v8/test/inspector/sessions/runtime-console-api-called-expected.txt
@@ -125,7 +125,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 25
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -151,7 +151,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 25
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -178,7 +178,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 25
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -204,7 +204,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 25
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/sessions/runtime-evaluate-exception-expected.txt b/deps/v8/test/inspector/sessions/runtime-evaluate-exception-expected.txt
index e84c770448..f9ac741978 100644
--- a/deps/v8/test/inspector/sessions/runtime-evaluate-exception-expected.txt
+++ b/deps/v8/test/inspector/sessions/runtime-evaluate-exception-expected.txt
@@ -19,7 +19,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 19
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -48,7 +48,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 19
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -78,7 +78,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 19
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -107,7 +107,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 19
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -137,7 +137,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 40
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -166,7 +166,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 40
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -215,7 +215,7 @@ From session 1
callFrames : [
[0] : {
columnNumber : 40
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
@@ -244,7 +244,7 @@ From session 2
callFrames : [
[0] : {
columnNumber : 40
- functionName : setTimeout
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url :
diff --git a/deps/v8/test/inspector/sessions/runtime-evaluate-exception.js b/deps/v8/test/inspector/sessions/runtime-evaluate-exception.js
index 8f3520fe18..e1b57184a5 100644
--- a/deps/v8/test/inspector/sessions/runtime-evaluate-exception.js
+++ b/deps/v8/test/inspector/sessions/runtime-evaluate-exception.js
@@ -45,14 +45,14 @@ function connect(contextGroup, num) {
await session2.Protocol.Runtime.evaluate({expression: 'var p2; setTimeout(() => { p2 = Promise.reject("error5") }, 0)'});
await InspectorTest.waitForPendingTasks();
InspectorTest.log('Revoking in 2');
- await session2.Protocol.Runtime.evaluate({expression: 'setTimeout(() => { p2.catch() }, 0);'});
+ await session2.Protocol.Runtime.evaluate({expression: 'setTimeout(() => { p2.catch(()=>{}) }, 0);'});
await InspectorTest.waitForPendingTasks();
InspectorTest.log('Rejecting in 1');
await session1.Protocol.Runtime.evaluate({expression: 'var p1; setTimeout(() => { p1 = Promise.reject("error6")} , 0)'});
await InspectorTest.waitForPendingTasks();
InspectorTest.log('Revoking in 1');
- await session1.Protocol.Runtime.evaluate({expression: 'setTimeout(() => { p1.catch() }, 0);'});
+ await session1.Protocol.Runtime.evaluate({expression: 'setTimeout(() => { p1.catch(()=>{}) }, 0);'});
await InspectorTest.waitForPendingTasks();
InspectorTest.completeTest();
diff --git a/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt b/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
index 7c6e69e05d..a8d0ec0c20 100644
--- a/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
+++ b/deps/v8/test/inspector/sessions/runtime-remote-object-expected.txt
@@ -5,12 +5,6 @@ Retrieving properties in 2
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -44,12 +38,6 @@ Retrieving properties in 1
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
@@ -84,12 +72,6 @@ Retrieving properties in 1
{
id : <messageId>
result : {
- internalProperties : [
- [0] : {
- name : [[StableObjectId]]
- value : <StablectObjectId>
- }
- ]
result : [
[0] : {
configurable : true
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 50b7795dfc..9660147624 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -60,7 +60,6 @@ class TestCase(testcase.TestCase):
def _get_resources(self):
return [
- os.path.join('src', 'inspector', 'injected-script-source.js'),
os.path.join(
'test', 'inspector', 'debugger', 'resources', 'break-locations.js'),
]
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-disable.js b/deps/v8/test/inspector/type-profiler/type-profile-disable.js
index 38a3c6fa9b..e378c54e58 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-disable.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile-disable.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
const source =
`
function g(a, b, c) {
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
index be4e0bdfd9..88a2ad13bf 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile-start-stop.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
const source1 =
`
function g(a, b, c) {
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js b/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js
index b697ebfd3f..0e75b7db09 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-classes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
const source =
`
function f(n) {
diff --git a/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js
index 654f18afc5..d6d5b6d538 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile-with-to-string-tag.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
const source =
`
function g(a, b, c) {
diff --git a/deps/v8/test/inspector/type-profiler/type-profile.js b/deps/v8/test/inspector/type-profiler/type-profile.js
index e912a4b940..7754b4c6cb 100644
--- a/deps/v8/test/inspector/type-profiler/type-profile.js
+++ b/deps/v8/test/inspector/type-profiler/type-profile.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
const source =
`
function f(a, b, c) {
diff --git a/deps/v8/test/intl/break-iterator/default-locale.js b/deps/v8/test/intl/break-iterator/default-locale.js
index e1a42a100a..fd379db1c8 100644
--- a/deps/v8/test/intl/break-iterator/default-locale.js
+++ b/deps/v8/test/intl/break-iterator/default-locale.js
@@ -37,9 +37,6 @@ assertFalse(options.locale === 'und');
assertFalse(options.locale === '');
assertFalse(options.locale === undefined);
-// Then check for legitimacy.
-assertLanguageTag(%GetDefaultICULocale(), options.locale);
-
var iteratorNone = new Intl.v8BreakIterator();
assertEquals(options.locale, iteratorNone.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js b/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js
deleted file mode 100644
index ffa44aef08..0000000000
--- a/deps/v8/test/intl/break-iterator/wellformed-unsupported-locale.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Passing a well formed but unsupported locale falls back to default.
-
-var iterator = Intl.v8BreakIterator(['xx']);
-
-assertLanguageTag(%GetDefaultICULocale(), iterator.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/collator/check-co-option.js b/deps/v8/test/intl/collator/check-co-option.js
new file mode 100644
index 0000000000..477d00a045
--- /dev/null
+++ b/deps/v8/test/intl/collator/check-co-option.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let invalid_co = [
+ "invalid",
+ "search",
+ "standard",
+ "abce",
+];
+
+let valid_locales = [
+ "zh-u-co-zhuyin",
+ "zh-u-co-stroke",
+ "ar-u-co-compat",
+ "en-u-co-emoji",
+ "en-u-co-eor",
+ "zh-Hant-u-co-pinyin",
+ "ko-u-co-searchjl",
+ "ja-u-co-unihan",
+];
+
+invalid_co.forEach(function(co) {
+ let col = new Intl.Collator(["en-u-co-" + co]);
+ assertEquals("en", col.resolvedOptions().locale);
+}
+);
+
+valid_locales.forEach(function(l) {
+ let col = new Intl.Collator([l + "-fo-obar"]);
+ assertEquals(l, col.resolvedOptions().locale);
+}
+);
diff --git a/deps/v8/test/intl/collator/check-kf-option.js b/deps/v8/test/intl/collator/check-kf-option.js
new file mode 100644
index 0000000000..45085c667e
--- /dev/null
+++ b/deps/v8/test/intl/collator/check-kf-option.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let invalid_kf = [
+ "invalid",
+ "abce",
+ "none",
+ "true",
+];
+
+let valid_kf= [
+ "false",
+ "upper",
+ "lower",
+];
+
+let locales = [
+ "en",
+ "fr",
+];
+
+invalid_kf.forEach(function(kf) {
+ let col = new Intl.Collator(["en-u-kf-" + kf + "-fo-obar"]);
+ assertEquals("en", col.resolvedOptions().locale);
+}
+);
+
+valid_kf.forEach(function(kf) {
+ locales.forEach(function(base) {
+ let l = base + "-u-kf-" + kf;
+ let col = new Intl.Collator([l + "-fo-obar"]);
+ assertEquals(l, col.resolvedOptions().locale);
+ });
+}
+);
diff --git a/deps/v8/test/intl/collator/check-kn-option.js b/deps/v8/test/intl/collator/check-kn-option.js
new file mode 100644
index 0000000000..0e3a82fe26
--- /dev/null
+++ b/deps/v8/test/intl/collator/check-kn-option.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let invalid_kn = [
+ "invalid",
+ "search",
+ "standard",
+ "abce",
+];
+
+let valid_kn = [
+ ["en-u-kn", true, "en-u-kn"],
+ ["en-u-kn-true", true, "en-u-kn"],
+ ["en-u-kn-false",false, "en-u-kn-false"],
+];
+
+invalid_kn.forEach(function(kn) {
+ let col = new Intl.Collator(["en-u-kn-" + kn]);
+ assertEquals("en", col.resolvedOptions().locale);
+}
+);
+
+valid_kn.forEach(function(l) {
+ let col = new Intl.Collator([l[0] + "-fo-obar"]);
+ assertEquals(l[1], col.resolvedOptions().numeric);
+ assertEquals(l[2], col.resolvedOptions().locale);
+}
+);
diff --git a/deps/v8/test/intl/collator/constructor-order.js b/deps/v8/test/intl/collator/constructor-order.js
new file mode 100644
index 0000000000..2b1de6b866
--- /dev/null
+++ b/deps/v8/test/intl/collator/constructor-order.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.Collator(['en-US'], {
+ get usage() {
+ assertEquals(0, getCount++);
+ },
+ get localeMatcher() {
+ assertEquals(1, getCount++);
+ },
+ get numeric() {
+ assertEquals(2, getCount++);
+ },
+ get caseFirst() {
+ assertEquals(3, getCount++);
+ },
+ get sensitivity() {
+ assertEquals(4, getCount++);
+ },
+ get ignorePunctuation() {
+ assertEquals(5, getCount++);
+ },
+});
+assertEquals(6, getCount);
diff --git a/deps/v8/test/intl/collator/default-locale.js b/deps/v8/test/intl/collator/default-locale.js
index fd964f0620..56435d147f 100644
--- a/deps/v8/test/intl/collator/default-locale.js
+++ b/deps/v8/test/intl/collator/default-locale.js
@@ -37,9 +37,6 @@ assertFalse(options.locale === 'und');
assertFalse(options.locale === '');
assertFalse(options.locale === undefined);
-// Then check for legitimacy.
-assertLanguageTag(%GetDefaultICULocale(), options.locale);
-
var collatorNone = new Intl.Collator();
assertEquals(options.locale, collatorNone.resolvedOptions().locale);
@@ -49,5 +46,4 @@ assertEquals(options.locale, collatorBraket.resolvedOptions().locale);
var collatorWithOptions = new Intl.Collator(undefined, {usage: 'search'});
var locale = collatorWithOptions.resolvedOptions().locale;
-assertLanguageTag(%GetDefaultICULocale(), locale);
assertEquals(locale.indexOf('-co-search'), -1);
diff --git a/deps/v8/test/intl/collator/options.js b/deps/v8/test/intl/collator/options.js
index f03ff2cafc..126bfc0959 100644
--- a/deps/v8/test/intl/collator/options.js
+++ b/deps/v8/test/intl/collator/options.js
@@ -5,14 +5,12 @@
// No locale
var collatorWithOptions = new Intl.Collator(undefined);
var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
-assertLanguageTag(%GetDefaultICULocale(), locale);
assertEquals('sort', usage);
assertEquals('default', collation);
assertEquals(locale.indexOf('-co-search'), -1);
collatorWithOptions = new Intl.Collator(undefined, {usage: 'sort'});
var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
-assertLanguageTag(%GetDefaultICULocale(), locale);
assertEquals('sort', usage);
assertEquals('default', collation);
assertEquals(locale.indexOf('-co-search'), -1);
@@ -21,12 +19,10 @@ collatorWithOptions = new Intl.Collator(undefined, {usage: 'search'});
var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
assertEquals('search', usage);
assertEquals('default', collation);
-assertLanguageTag(%GetDefaultICULocale(), locale);
assertEquals(locale.indexOf('-co-search'), -1);
collatorWithOptions = new Intl.Collator(locale);
var { locale, usage, collation } = collatorWithOptions.resolvedOptions();
-assertLanguageTag(%GetDefaultICULocale(), locale);
assertEquals('sort', usage);
assertEquals('default', collation);
assertEquals(locale.indexOf('-co-search'), -1);
diff --git a/deps/v8/test/intl/collator/wellformed-unsupported-locale.js b/deps/v8/test/intl/collator/wellformed-unsupported-locale.js
deleted file mode 100644
index ad89e3e220..0000000000
--- a/deps/v8/test/intl/collator/wellformed-unsupported-locale.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Passing a well formed but unsupported locale falls back to default.
-
-var collator = Intl.Collator(['xx']);
-
-assertLanguageTag(%GetDefaultICULocale(), collator.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/date-format/check-ca-option.js b/deps/v8/test/intl/date-format/check-ca-option.js
new file mode 100644
index 0000000000..d27ae44b48
--- /dev/null
+++ b/deps/v8/test/intl/date-format/check-ca-option.js
@@ -0,0 +1,51 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+let invalid_ca = [
+ "invalid",
+ "abce",
+];
+
+// https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+let valid_ca= [
+ "buddhist",
+ "chinese",
+ "coptic",
+ "dangi",
+ "ethioaa",
+ "ethiopic",
+ "gregory",
+ "hebrew",
+ "indian",
+ "islamic",
+ "islamic-umalqura",
+ "islamic-tbla",
+ "islamic-civil",
+ "islamic-rgsa",
+ "iso8601",
+ "japanese",
+ "persian",
+ "roc",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+invalid_ca.forEach(function(ca) {
+ let df = new Intl.DateTimeFormat(["en-u-ca-" + ca + "-fo-obar"]);
+ assertEquals("en", df.resolvedOptions().locale);
+}
+);
+
+valid_ca.forEach(function(ca) {
+ locales.forEach(function(base) {
+ let l = base + "-u-ca-" + ca;
+ let df = new Intl.DateTimeFormat([l + "-fo-obar"]);
+ assertEquals(l, df.resolvedOptions().locale);
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/check-hc-option.js b/deps/v8/test/intl/date-format/check-hc-option.js
new file mode 100644
index 0000000000..276bfe6a23
--- /dev/null
+++ b/deps/v8/test/intl/date-format/check-hc-option.js
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+let invalid_hc = [
+ "invalid",
+ "abce",
+ "h10",
+ "h13",
+ "h22",
+ "h25",
+];
+
+// https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+let valid_hc= [
+ "h11",
+ "h12",
+ "h23",
+ "h24",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+invalid_hc.forEach(function(hc) {
+ let df = new Intl.DateTimeFormat(["en-u-hc-" + hc + "-fo-obar"]);
+ assertEquals("en", df.resolvedOptions().locale);
+}
+);
+
+valid_hc.forEach(function(hc) {
+ locales.forEach(function(base) {
+ let l = base + "-u-hc-" + hc;
+ let df = new Intl.DateTimeFormat([l + "-fo-obar"]);
+ assertEquals(l, df.resolvedOptions().locale);
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/check-nu-option.js b/deps/v8/test/intl/date-format/check-nu-option.js
new file mode 100644
index 0000000000..7d4b4dc927
--- /dev/null
+++ b/deps/v8/test/intl/date-format/check-nu-option.js
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+let invalid_nu = [
+ "invalid",
+ "abce",
+ "finance",
+ "native",
+ "traditio",
+];
+
+// https://tc39.github.io/ecma402/#table-numbering-system-digits
+let valid_nu= [
+ "arab",
+ "arabext",
+ "bali",
+ "beng",
+ "deva",
+ "fullwide",
+ "gujr",
+ "guru",
+ "hanidec",
+ "khmr",
+ "knda",
+ "laoo",
+ "latn",
+ "limb",
+ "mlym",
+ "mong",
+ "mymr",
+ "orya",
+ "tamldec",
+ "telu",
+ "thai",
+ "tibt",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalid_nu.forEach(function(nu) {
+ let df = new Intl.DateTimeFormat(["en-u-nu-" + nu + "-fo-obar"]);
+ assertEquals("en", df.resolvedOptions().locale);
+}
+);
+
+valid_nu.forEach(function(nu) {
+ locales.forEach(function(base) {
+ let l = base + "-u-nu-" + nu;
+ let df = new Intl.DateTimeFormat([l + "-fo-obar"]);
+ assertEquals(l, df.resolvedOptions().locale);
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/default-locale.js b/deps/v8/test/intl/date-format/default-locale.js
index 2d79e895b5..ecc85da479 100644
--- a/deps/v8/test/intl/date-format/default-locale.js
+++ b/deps/v8/test/intl/date-format/default-locale.js
@@ -37,8 +37,5 @@ assertFalse(options.locale === 'und');
assertFalse(options.locale === '');
assertFalse(options.locale === undefined);
-// Then check for legitimacy.
-assertLanguageTag(%GetDefaultICULocale(), options.locale);
-
var dtfNone = new Intl.DateTimeFormat();
assertEquals(options.locale, dtfNone.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/date-format/property-override.js b/deps/v8/test/intl/date-format/property-override.js
index a2bc2d9a30..370f82b0c6 100644
--- a/deps/v8/test/intl/date-format/property-override.js
+++ b/deps/v8/test/intl/date-format/property-override.js
@@ -53,10 +53,24 @@ for (var prop in options) {
}
}
+// In the order of Table 6 of
+// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
var expectedProperties = [
- 'calendar', 'day', 'era', 'hour12', 'hour', 'locale',
- 'minute', 'month', 'numberingSystem',
- 'second', 'timeZone', 'timeZoneName', 'weekday', 'year'
+ 'locale',
+ 'calendar',
+ 'numberingSystem',
+ 'timeZone',
+ 'hourCycle',
+ 'hour12',
+ 'weekday',
+ 'era',
+ 'year',
+ 'month',
+ 'day',
+ 'hour',
+ 'minute',
+ 'second',
+ 'timeZoneName',
];
assertEquals(expectedProperties.length, properties.length);
diff --git a/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js b/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js
deleted file mode 100644
index b812164832..0000000000
--- a/deps/v8/test/intl/date-format/wellformed-unsupported-locale.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Passing a well formed but unsupported locale falls back to default.
-
-var dtf = Intl.DateTimeFormat(['xx']);
-
-assertLanguageTag(%GetDefaultICULocale(), dtf.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/default_locale.js b/deps/v8/test/intl/default_locale.js
new file mode 100644
index 0000000000..453f5e66ed
--- /dev/null
+++ b/deps/v8/test/intl/default_locale.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Environment Variables: LC_ALL=de
+
+assertEquals("de", (new Intl.Collator([])).resolvedOptions().locale);
+assertEquals("de", (new Intl.Collator(['xx'])).resolvedOptions().locale);
+assertEquals("de", (new Intl.Collator(undefined)).resolvedOptions().locale);
+assertEquals("de", (new Intl.Collator(undefined, {usage: 'sort'})).resolvedOptions().locale);
+assertEquals("de", (new Intl.Collator(undefined, {usage: 'search'})).resolvedOptions().locale);
+assertEquals("de", (new Intl.DateTimeFormat([])).resolvedOptions().locale);
+assertEquals("de", (new Intl.DateTimeFormat(['xx'])).resolvedOptions().locale);
+assertEquals("de", (new Intl.NumberFormat([])).resolvedOptions().locale);
+assertEquals("de", (new Intl.NumberFormat(['xx'])).resolvedOptions().locale);
+assertEquals("de", (new Intl.v8BreakIterator([])).resolvedOptions().locale);
+assertEquals("de", (new Intl.v8BreakIterator(['xx'])).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js b/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js
new file mode 100644
index 0000000000..8dcdf70b97
--- /dev/null
+++ b/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js
@@ -0,0 +1,101 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-locale
+//
+// Test NumberFormat will accept Intl.Locale as first parameter, or
+// as in the array.
+
+let tag = "zh-Hant-TW-u-nu-thai"
+let l = new Intl.Locale(tag);
+
+var nf;
+// Test with String
+assertDoesNotThrow(() => nf = new Intl.NumberFormat(tag));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with Array with one String
+assertDoesNotThrow(() => nf = new Intl.NumberFormat([tag]));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with Array with two String
+assertDoesNotThrow(() => nf = new Intl.NumberFormat([tag, "en"]));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with a Locale
+assertDoesNotThrow(() => nf = new Intl.NumberFormat(l));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with a Array of one Locale
+assertDoesNotThrow(() => nf = new Intl.NumberFormat([l]));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with a Array of one Locale and a Sring
+assertDoesNotThrow(() => nf = new Intl.NumberFormat([l, "en"]));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test DateTimeFormat
+var df;
+assertDoesNotThrow(() => df = new Intl.DateTimeFormat(tag));
+assertEquals(tag, df.resolvedOptions().locale);
+assertDoesNotThrow(() => df = new Intl.DateTimeFormat([tag]));
+assertEquals(tag, df.resolvedOptions().locale);
+
+// Test RelativeTimeFormat
+var rtf;
+assertDoesNotThrow(() => rtf = new Intl.RelativeTimeFormat(tag));
+assertEquals(tag, rtf.resolvedOptions().locale);
+assertDoesNotThrow(() => rtf = new Intl.RelativeTimeFormat([tag]));
+assertEquals(tag, rtf.resolvedOptions().locale);
+
+// Test ListFormat
+tag = "zh-Hant-TW"
+var lf;
+assertDoesNotThrow(() => lf = new Intl.ListFormat(tag));
+assertEquals(tag, lf.resolvedOptions().locale);
+assertDoesNotThrow(() => lf = new Intl.ListFormat([tag]));
+assertEquals(tag, lf.resolvedOptions().locale);
+
+// Test Collator
+var col;
+assertDoesNotThrow(() => col = new Intl.Collator(tag));
+assertEquals(tag, lf.resolvedOptions().locale);
+assertDoesNotThrow(() => col = new Intl.Collator([tag]));
+assertEquals(tag, lf.resolvedOptions().locale);
+
+// Test monkey patching won't impact the result.
+
+class MyLocale extends Intl.Locale {
+ constructor(tag, options) {
+ super(tag, options);
+ }
+ toString() {
+ // this should not get called.
+ fail("toString should not be called")
+ }
+}
+
+let myLocale = new MyLocale(tag);
+
+// Test with a Locale
+assertDoesNotThrow(() => nf = new Intl.NumberFormat(myLocale));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+// Test with a Array of one Locale
+assertDoesNotThrow(() => nf = new Intl.NumberFormat([myLocale]));
+assertEquals(tag, nf.resolvedOptions().locale);
+
+var res = Intl.getCanonicalLocales(myLocale);
+assertEquals(1, res.length);
+assertEquals(tag, res[0]);
+
+res = Intl.getCanonicalLocales([myLocale, "fr"]);
+assertEquals(2, res.length);
+assertEquals(tag, res[0]);
+assertEquals("fr", res[1]);
+
+res = Intl.getCanonicalLocales(["fr", myLocale]);
+assertEquals(2, res.length);
+assertEquals("fr", res[0]);
+assertEquals(tag, res[1]);
diff --git a/deps/v8/test/intl/general/invalid-locale.js b/deps/v8/test/intl/general/invalid-locale.js
index c85ed83cce..c3dd59ad68 100644
--- a/deps/v8/test/intl/general/invalid-locale.js
+++ b/deps/v8/test/intl/general/invalid-locale.js
@@ -22,18 +22,3 @@ for (let locale of not_so_long_locales) {
assertEquals((new Intl.NumberFormat(locale)).resolvedOptions().numberingSystem,
"latn");
}
-
-// The point of this test is to make sure that there's no ill-effect with too
-// long a locale name. Because, thhere's no provision in the Ecma 402 on the
-// length limit of a locale ID and BCP 47 (RFC 5646 section 2.1). So, it's
-// a spec violation to treat this as invalid. See TODO(jshin) comment
-// in Runtime_CanonicalizeLanguageTag in runtime-intl.cc .
-var overlong_locales = [
- "he-up-a-caiaup-araup-ai-pdu-sp-bs-up-arscna-zeieiaup-araup-arscia-rews-us-up-arscna-zeieiaup-araup-arsciap-arscna-zeieiaup-araup-arscie-u-sp-bs-uaup-arscia",
- "he-up-a-caiaup-araup-ai-pdu-sp-bs-up-arscna-zeieiaup-araup-arscia-rews-us-up-arscna-zeieiaup-araup-arsciap-arscna-zeieiaup-araup-arscie-u-sp-bs-uaup-arscia-xyza",
- "bs-u-nu-bzcu-cab-cabs-avnlubs-avnihu-zcu-cab-cbs-avnllubs-avnihq-zcu-cab-cbs-ubs-avnihu-cabs-flus-xxd-vnluy-abcd",
-];
-
-for (let locale of overlong_locales) {
- assertThrows("var nf = new Intl.NumberFormat('" + locale + "')", RangeError)
-}
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 42807597a0..83e546db76 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -27,15 +27,12 @@
[
[ALWAYS, {
- # TODO(jochen): The following test is flaky.
+# TODO(jochen): The following test is flaky.
'overrides/caching': [PASS, FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6891
- 'segmenter/segment': [FAIL],
- 'segmenter/segment-iterator': [FAIL],
- 'segmenter/segment-iterator-following': [FAIL],
- 'segmenter/segment-iterator-next': [FAIL],
- 'segmenter/segment-iterator-preceding': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=7481
+ 'collator/check-kf-option': [FAIL],
+ 'collator/check-kn-option': [FAIL],
}], # ALWAYS
['variant == no_wasm_traps', {
@@ -46,14 +43,18 @@
# noi18n cannot turn on ICU backend for Date
'relative-time-format/default-locale-fr-CA': [SKIP],
'relative-time-format/default-locale-pt-BR': [SKIP],
+
+ # Unable to change locale on Windows:
+ 'default_locale': [SKIP],
}], # system == windows'
['system == android', {
# Android's ICU data file does not have the Chinese/Japanese dictionary
# required for the test to pass.
- 'break-iterator/zh-break': [FAIL],
+ 'break-iterator/zh-break': [SKIP],
# Unable to change locale on Android:
- 'relative-time-format/default-locale-fr-CA': [FAIL],
- 'relative-time-format/default-locale-pt-BR': [FAIL],
+ 'relative-time-format/default-locale-fr-CA': [SKIP],
+ 'relative-time-format/default-locale-pt-BR': [SKIP],
+ 'default_locale': [SKIP],
}], # 'system == android'
]
diff --git a/deps/v8/test/intl/list-format/constructor-order.js b/deps/v8/test/intl/list-format/constructor-order.js
new file mode 100644
index 0000000000..97f58436b2
--- /dev/null
+++ b/deps/v8/test/intl/list-format/constructor-order.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.ListFormat(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get type() {
+ assertEquals(1, getCount++);
+ },
+ get style() {
+ assertEquals(2, getCount++);
+ },
+});
+assertEquals(3, getCount);
diff --git a/deps/v8/test/intl/list-format/constructor.js b/deps/v8/test/intl/list-format/constructor.js
index 33a85fd79f..d730516c9c 100644
--- a/deps/v8/test/intl/list-format/constructor.js
+++ b/deps/v8/test/intl/list-format/constructor.js
@@ -54,8 +54,7 @@ assertDoesNotThrow(
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {style: 'short'}));
-assertDoesNotThrow(
- () => new Intl.ListFormat(['sr'], {style: 'narrow'}));
+assertThrows(() => new Intl.ListFormat(['sr'], {style: 'narrow'}), RangeError);
assertThrows(
() => new Intl.ListFormat(['sr'], {style: 'giant'}),
@@ -67,8 +66,9 @@ assertDoesNotThrow(
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'short'}));
-assertDoesNotThrow(
- () => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'narrow'}));
+assertThrows(
+ () => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'narrow'}),
+ RangeError);
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'long'}));
@@ -76,8 +76,9 @@ assertDoesNotThrow(
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'short'}));
-assertDoesNotThrow(
- () => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'narrow'}));
+assertThrows(
+ () => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'narrow'}),
+ RangeError);
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {type: 'unit', style: 'long'}));
@@ -87,22 +88,3 @@ assertDoesNotThrow(
assertDoesNotThrow(
() => new Intl.ListFormat(['sr'], {type: 'unit', style: 'narrow'}));
-
-// Throws only once during construction.
-// Check for all getters to prevent regression.
-// Preserve the order of getter initialization.
-let getCount = 0;
-let style = -1;
-let type = -1;
-
-new Intl.ListFormat(['en-US'], {
- get style() {
- style = ++getCount;
- },
- get type() {
- type = ++getCount;
- }
-});
-
-assertEquals(1, type);
-assertEquals(2, style);
diff --git a/deps/v8/test/intl/list-format/format-en.js b/deps/v8/test/intl/list-format/format-en.js
index 21eb99d06d..d628537990 100644
--- a/deps/v8/test/intl/list-format/format-en.js
+++ b/deps/v8/test/intl/list-format/format-en.js
@@ -14,13 +14,14 @@ let enLongConjunction = new Intl.ListFormat(
["en"], {style: "long", type: 'conjunction'});
assertEquals('', enLongConjunction.format());
- assertEquals('', enLongConjunction.format([]));
+assertEquals('', enLongConjunction.format([]));
assertEquals('a', enLongConjunction.format(['a']));
assertEquals('b', enLongConjunction.format(['b']));
assertEquals('a and b', enLongConjunction.format(['a', 'b']));
assertEquals('a, b, and c', enLongConjunction.format(['a', 'b', 'c']));
assertEquals('a, b, c, and d', enLongConjunction.format(['a', 'b', 'c', 'd']));
-assertEquals('a, b, c, d, and and', enLongConjunction.format(['a', 'b', 'c', 'd', 'and']));
+assertEquals('a, b, c, d, and and',
+ enLongConjunction.format(['a', 'b', 'c', 'd', 'and']));
let enLongDisjunction = new Intl.ListFormat(
["en"], {style: "long", type: 'disjunction'});
@@ -32,7 +33,8 @@ assertEquals('b', enLongDisjunction.format(['b']));
assertEquals('a or b', enLongDisjunction.format(['a', 'b']));
assertEquals('a, b, or c', enLongDisjunction.format(['a', 'b', 'c']));
assertEquals('a, b, c, or d', enLongDisjunction.format(['a', 'b', 'c', 'd']));
-assertEquals('a, b, c, d, or or', enLongDisjunction.format(['a', 'b', 'c', 'd', 'or']));
+assertEquals('a, b, c, d, or or',
+ enLongDisjunction.format(['a', 'b', 'c', 'd', 'or']));
let enLongUnit = new Intl.ListFormat(
["en"], {style: "long", type: 'unit'});
@@ -56,7 +58,8 @@ assertEquals('b', enShortConjunction.format(['b']));
assertEquals('a and b', enShortConjunction.format(['a', 'b']));
assertEquals('a, b, and c', enShortConjunction.format(['a', 'b', 'c']));
assertEquals('a, b, c, and d', enShortConjunction.format(['a', 'b', 'c', 'd']));
-assertEquals('a, b, c, d, and and', enShortConjunction.format(['a', 'b', 'c', 'd', 'and']));
+assertEquals('a, b, c, d, and and',
+ enShortConjunction.format(['a', 'b', 'c', 'd', 'and']));
let enShortDisjunction = new Intl.ListFormat(
["en"], {style: "short", type: 'disjunction'});
@@ -82,30 +85,6 @@ assertEquals('a, b, c', enShortUnit.format(['a', 'b', 'c']));
assertEquals('a, b, c, d', enShortUnit.format(['a', 'b', 'c', 'd']));
assertEquals('a, b, c, d, or', enShortUnit.format(['a', 'b', 'c', 'd', 'or']));
-let enNarrowConjunction = new Intl.ListFormat(
- ["en"], {style: "narrow", type: 'conjunction'});
-
-assertEquals('', enNarrowConjunction.format());
-assertEquals('', enNarrowConjunction.format([]));
-assertEquals('a', enNarrowConjunction.format(['a']));
-assertEquals('b', enNarrowConjunction.format(['b']));
-assertEquals('a and b', enNarrowConjunction.format(['a', 'b']));
-assertEquals('a, b, and c', enNarrowConjunction.format(['a', 'b', 'c']));
-assertEquals('a, b, c, and d', enNarrowConjunction.format(['a', 'b', 'c', 'd']));
-assertEquals('a, b, c, d, and and', enNarrowConjunction.format(['a', 'b', 'c', 'd', 'and']));
-
-let enNarrowDisjunction = new Intl.ListFormat(
- ["en"], {style: "narrow", type: 'disjunction'});
-
-assertEquals('', enNarrowDisjunction.format());
-assertEquals('', enNarrowDisjunction.format([]));
-assertEquals('a', enNarrowDisjunction.format(['a']));
-assertEquals('b', enNarrowDisjunction.format(['b']));
-assertEquals('a or b', enNarrowDisjunction.format(['a', 'b']));
-assertEquals('a, b, or c', enNarrowDisjunction.format(['a', 'b', 'c']));
-assertEquals('a, b, c, or d', enNarrowDisjunction.format(['a', 'b', 'c', 'd']));
-assertEquals('a, b, c, d, or or', enNarrowDisjunction.format(['a', 'b', 'c', 'd', 'or']));
-
let enNarrowUnit = new Intl.ListFormat(
["en"], {style: "narrow", type: 'unit'});
diff --git a/deps/v8/test/intl/list-format/format-to-parts.js b/deps/v8/test/intl/list-format/format-to-parts.js
index 83473b6d0b..64eac823ba 100644
--- a/deps/v8/test/intl/list-format/format-to-parts.js
+++ b/deps/v8/test/intl/list-format/format-to-parts.js
@@ -77,16 +77,22 @@ testFormatter(new Intl.ListFormat());
testFormatter(new Intl.ListFormat(["en"]));
testFormatter(new Intl.ListFormat(["en"], {style: 'long'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow'}));
+assertThrows(() => new Intl.ListFormat(["en"], {style: 'narrow'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {type: 'conjunction'}));
testFormatter(new Intl.ListFormat(["en"], {type: 'disjunction'}));
testFormatter(new Intl.ListFormat(["en"], {type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'conjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'conjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'conjunction'}));
+testFormatter(
+ new Intl.ListFormat(["en"], {style: 'short', type: 'conjunction'}));
+assertThrows(
+ () => new Intl.ListFormat(
+ ["en"], {style: 'narrow', type: 'conjunction'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'disjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'disjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'disjunction'}));
+testFormatter(
+ new Intl.ListFormat(["en"], {style: 'short', type: 'disjunction'}));
+assertThrows(
+ () => new Intl.ListFormat(
+ ["en"], {style: 'narrow', type: 'disjunction'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'unit'}));
diff --git a/deps/v8/test/intl/list-format/format.js b/deps/v8/test/intl/list-format/format.js
index 677cb22496..fef05c38e0 100644
--- a/deps/v8/test/intl/list-format/format.js
+++ b/deps/v8/test/intl/list-format/format.js
@@ -48,16 +48,19 @@ testFormatter(new Intl.ListFormat());
testFormatter(new Intl.ListFormat(["en"]));
testFormatter(new Intl.ListFormat(["en"], {style: 'long'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow'}));
+assertThrows(() => new Intl.ListFormat(
+ ["en"], {style: 'narrow'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {type: 'conjunction'}));
testFormatter(new Intl.ListFormat(["en"], {type: 'disjunction'}));
testFormatter(new Intl.ListFormat(["en"], {type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'conjunction'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'conjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'conjunction'}));
+assertThrows(() => new Intl.ListFormat(
+ ["en"], {style: 'narrow', type: 'conjunction'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'disjunction'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'disjunction'}));
-testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'disjunction'}));
+assertThrows(() => new Intl.ListFormat(
+ ["en"], {style: 'narrow', type: 'disjunction'}), RangeError);
testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'unit'}));
testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'unit'}));
diff --git a/deps/v8/test/intl/list-format/resolved-options.js b/deps/v8/test/intl/list-format/resolved-options.js
index 270eb33e45..b5662718e5 100644
--- a/deps/v8/test/intl/list-format/resolved-options.js
+++ b/deps/v8/test/intl/list-format/resolved-options.js
@@ -22,16 +22,6 @@ assertEquals(
.resolvedOptions().type);
assertEquals(
- 'narrow',
- (new Intl.ListFormat(['sr'], {style: 'narrow'}))
- .resolvedOptions().style);
-
-assertEquals(
- 'conjunction',
- (new Intl.ListFormat(['sr'], {style: 'narrow'}))
- .resolvedOptions().type);
-
-assertEquals(
'long',
(new Intl.ListFormat(['sr'], {style: 'long'}))
.resolvedOptions().style);
@@ -72,33 +62,43 @@ assertEquals(
.resolvedOptions().style);
assertEquals(
- 'disjunction',
- (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'conjunction'}))
.resolvedOptions().type);
assertEquals(
'long',
- (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'conjunction'}))
.resolvedOptions().style);
assertEquals(
- 'disjunction',
- (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'conjunction'}))
.resolvedOptions().type);
assertEquals(
'short',
- (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'conjunction'}))
.resolvedOptions().style);
assertEquals(
'disjunction',
- (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'disjunction'}))
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
.resolvedOptions().type);
assertEquals(
- 'narrow',
- (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'disjunction'}))
+ 'long',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'disjunction',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'short',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
.resolvedOptions().style);
assertEquals(
@@ -147,9 +147,6 @@ assertEquals(
'ar',
(new Intl.ListFormat(['xyz', 'ar'])).resolvedOptions().locale);
-// The following is not working yet because it depend on the getAvailableLocales
-// work in another path set.
-// TODO(ftang): uncomment the following once that patchset is checked in.
-// assertEquals(
-// 'ar',
-// (new Intl.ListFormat(['i-default', 'ar'])).resolvedOptions().locale);
+assertEquals(
+ 'ar',
+ (new Intl.ListFormat(['i-default', 'ar'])).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/locale/locale-canonicalization.js b/deps/v8/test/intl/locale/locale-canonicalization.js
index 5012537277..cc0478fdb6 100644
--- a/deps/v8/test/intl/locale/locale-canonicalization.js
+++ b/deps/v8/test/intl/locale/locale-canonicalization.js
@@ -19,6 +19,6 @@ let locale = new Intl.Locale('sr-cyrl-rs-t-ja-u-ca-islamic-x-whatever', {
});
let expected =
- 'sr-Cyrl-RS-t-ja-u-ca-buddhist-co-phonebk-hc-h23-kf-upper-kn-true-nu-roman-x-whatever';
+ 'sr-Cyrl-RS-t-ja-u-ca-buddhist-co-phonebk-hc-h23-kf-upper-kn-nu-roman-x-whatever';
assertEquals(expected, locale.toString());
diff --git a/deps/v8/test/intl/locale/locale-constructor.js b/deps/v8/test/intl/locale/locale-constructor.js
index 3da9e291be..bf2510553f 100644
--- a/deps/v8/test/intl/locale/locale-constructor.js
+++ b/deps/v8/test/intl/locale/locale-constructor.js
@@ -86,9 +86,7 @@ assertThrows(
}),
Error);
-// These don't throw yet, we need to implement language/script/region
-// override logic first.
-assertDoesNotThrow(
+assertThrows(
() => new Intl.Locale('en-US', {
get language() {
throw new Error('foo');
@@ -96,7 +94,7 @@ assertDoesNotThrow(
}),
Error);
-assertDoesNotThrow(
+assertThrows(
() => new Intl.Locale('en-US', {
get script() {
throw new Error('foo');
@@ -104,7 +102,7 @@ assertDoesNotThrow(
}),
Error);
-assertDoesNotThrow(
+assertThrows(
() => new Intl.Locale('en-US', {
get region() {
throw new Error('foo');
diff --git a/deps/v8/test/intl/locale/property.js b/deps/v8/test/intl/locale/property.js
new file mode 100644
index 0000000000..cbe076842f
--- /dev/null
+++ b/deps/v8/test/intl/locale/property.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-locale
+
+// Make sure that accessing locale property will return undefined instead of
+// crash.
+
+let locale = new Intl.Locale('sr');
+
+assertEquals('sr', locale.toString());
+assertEquals('sr', locale.baseName);
+assertEquals('sr', locale.language);
+assertEquals(undefined, locale.script);
+assertEquals(undefined, locale.region);
+assertEquals(false, locale.numeric);
+assertEquals(undefined, locale.calendar);
+assertEquals(undefined, locale.collation);
+assertEquals(undefined, locale.hourCycle);
+assertEquals(undefined, locale.caseFirst);
+assertEquals(undefined, locale.numberingSystem);
diff --git a/deps/v8/test/intl/number-format/check-nu-option.js b/deps/v8/test/intl/number-format/check-nu-option.js
new file mode 100644
index 0000000000..39c4cbb8cf
--- /dev/null
+++ b/deps/v8/test/intl/number-format/check-nu-option.js
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+let invalid_nu = [
+ "invalid",
+ "abce",
+ "finance",
+ "native",
+ "traditio",
+];
+
+// https://tc39.github.io/ecma402/#table-numbering-system-digits
+let valid_nu= [
+ "arab",
+ "arabext",
+ "bali",
+ "beng",
+ "deva",
+ "fullwide",
+ "gujr",
+ "guru",
+ "hanidec",
+ "khmr",
+ "knda",
+ "laoo",
+ "latn",
+ "limb",
+ "mlym",
+ "mong",
+ "mymr",
+ "orya",
+ "tamldec",
+ "telu",
+ "thai",
+ "tibt",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalid_nu.forEach(function(nu) {
+ let nf = new Intl.NumberFormat(["en-u-nu-" + nu + "-fo-obar"]);
+ assertEquals("en", nf.resolvedOptions().locale);
+}
+);
+
+valid_nu.forEach(function(nu) {
+ locales.forEach(function(base) {
+ let l = base + "-u-nu-" + nu;
+ let nf = new Intl.NumberFormat([l + "-fo-obar"]);
+ assertEquals(l, nf.resolvedOptions().locale);
+ });
+}
+);
diff --git a/deps/v8/test/intl/number-format/constructor-order.js b/deps/v8/test/intl/number-format/constructor-order.js
new file mode 100644
index 0000000000..db99793bb9
--- /dev/null
+++ b/deps/v8/test/intl/number-format/constructor-order.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.NumberFormat(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get style() {
+ assertEquals(1, getCount++);
+ },
+ get currency() {
+ assertEquals(2, getCount++);
+ },
+ get currencyDisplay() {
+ assertEquals(3, getCount++);
+ },
+ get minimumIntegerDigits() {
+ assertEquals(4, getCount++);
+ },
+ get minimumFractionDigits() {
+ assertEquals(5, getCount++);
+ },
+ get maximumFractionDigits() {
+ assertEquals(6, getCount++);
+ },
+ get minimumSignificantDigits() {
+ assertEquals(7, getCount++);
+ },
+ get maximumSignificantDigits() {
+ assertEquals(8, getCount++);
+ },
+ get useGrouping() {
+ assertEquals(9, getCount++);
+ },
+});
+assertEquals(10, getCount);
diff --git a/deps/v8/test/intl/number-format/default-locale.js b/deps/v8/test/intl/number-format/default-locale.js
index a24aec2333..7e67176baf 100644
--- a/deps/v8/test/intl/number-format/default-locale.js
+++ b/deps/v8/test/intl/number-format/default-locale.js
@@ -37,8 +37,5 @@ assertFalse(options.locale === 'und');
assertFalse(options.locale === '');
assertFalse(options.locale === undefined);
-// Then check for legitimacy.
-assertLanguageTag(%GetDefaultICULocale(), options.locale);
-
var nfNone = new Intl.NumberFormat();
assertEquals(options.locale, nfNone.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js b/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js
deleted file mode 100644
index c51753928e..0000000000
--- a/deps/v8/test/intl/number-format/wellformed-unsupported-locale.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Passing a well formed but unsupported locale falls back to default.
-
-var nf = Intl.NumberFormat(['xx']);
-
-assertLanguageTag(%GetDefaultICULocale(), nf.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/plural-rules/constructor-order.js b/deps/v8/test/intl/plural-rules/constructor-order.js
new file mode 100644
index 0000000000..18acdcccbb
--- /dev/null
+++ b/deps/v8/test/intl/plural-rules/constructor-order.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.PluralRules(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get type() {
+ assertEquals(1, getCount++);
+ },
+ get minimumIntegerDigits() {
+ assertEquals(2, getCount++);
+ },
+ get minimumFractionDigits() {
+ assertEquals(3, getCount++);
+ },
+ get maximumFractionDigits() {
+ assertEquals(4, getCount++);
+ },
+ get minimumSignificantDigits() {
+ assertEquals(5, getCount++);
+ },
+ get maximumSignificantDigits() {
+ assertEquals(6, getCount++);
+ },
+});
+assertEquals(7, getCount);
diff --git a/deps/v8/test/intl/regexp-assert.js b/deps/v8/test/intl/regexp-assert.js
deleted file mode 100644
index 28fafd0125..0000000000
--- a/deps/v8/test/intl/regexp-assert.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-assertEquals("a", RegExp.$1);
-assertEquals("b", RegExp.$2);
-assertEquals("c", RegExp.$3);
-assertEquals("d", RegExp.$4);
-assertEquals("e", RegExp.$5);
-assertEquals("f", RegExp.$6);
-assertEquals("g", RegExp.$7);
-assertEquals("h", RegExp.$8);
-assertEquals("i", RegExp.$9);
-
-assertEquals("abcdefghij", RegExp.lastMatch);
-assertEquals("j", RegExp.lastParen);
-assertEquals(">>>", RegExp.leftContext);
-assertEquals("<<<", RegExp.rightContext);
-assertEquals(">>>abcdefghij<<<", RegExp.input);
diff --git a/deps/v8/test/intl/regexp-prepare.js b/deps/v8/test/intl/regexp-prepare.js
deleted file mode 100644
index dec84110ed..0000000000
--- a/deps/v8/test/intl/regexp-prepare.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-/(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)/.exec(">>>abcdefghij<<<");
diff --git a/deps/v8/test/intl/regress-7481.js b/deps/v8/test/intl/regress-7481.js
new file mode 100644
index 0000000000..c3441e35cb
--- /dev/null
+++ b/deps/v8/test/intl/regress-7481.js
@@ -0,0 +1,39 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+assertEquals(
+ "en-u-hc-h11-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h11-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-hc-h12-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h12-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-hc-h23-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h23-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-hc-h24-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h24-nu-arab"]).resolvedOptions().locale
+);
+
+// https://tc39.github.io/ecma402/#sec-intl.datetimeformat-internal-slots
+// invalid hc should be removed
+// [[LocaleData]][locale].hc must be « null, "h11", "h12", "h23", "h24" » for all locale values.
+assertEquals(
+ "en-u-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h10-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h13-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h22-nu-arab"]).resolvedOptions().locale
+);
+assertEquals(
+ "en-u-nu-arab",
+ new Intl.DateTimeFormat(["en-u-hc-h25-nu-arab"]).resolvedOptions().locale
+);
diff --git a/deps/v8/test/intl/regress-8432.js b/deps/v8/test/intl/regress-8432.js
new file mode 100644
index 0000000000..13083e3c82
--- /dev/null
+++ b/deps/v8/test/intl/regress-8432.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Somehow only ar-SA fails on Android in regress-8413*.js.
+// Split it into this test just for ar-SA.
+// This is likely to be caused by an Android-specific ICU data trimming.
+let locales = [ "ar-SA" ];
+
+// "Table 5: Components of date and time formats" as in
+// https://ecma-international.org/ecma-402/#sec-datetimeformat-abstracts
+let table5 = [
+ ["weekday", ["narrow", "short", "long"]],
+ ["era", ["narrow", "short", "long"]],
+ ["year", ["2-digit", "numeric"]],
+ ["month", ["2-digit", "numeric", "narrow", "short", "long"]],
+ ["day", ["2-digit", "numeric"]],
+ ["hour", ["2-digit", "numeric"]],
+ ["minute", ["2-digit", "numeric"]],
+ ["second", ["2-digit", "numeric"]],
+ ["timeZoneName", ["short", "long"]]
+];
+
+// Test each locale
+for (let loc of locales) {
+ // Test each property in Table 5
+ for (let row of table5) {
+ let prop = row[0];
+ let values = row[1];
+ // Test each value of the property
+ for (let value of values) {
+ let opt = {};
+ opt[prop] = value;
+ let dft = new Intl.DateTimeFormat([loc], opt);
+ let result = dft.resolvedOptions();
+ assertTrue(values.indexOf(result[prop]) >= 0,
+ "Expect new Intl.DateTimeFormat([" + loc + "], {" + prop + ": '" +
+ value + "'}).resolvedOptions()['" + prop + "'] to return one of [" +
+ values + "] but got '" + result[prop] + "'");
+ }
+ }
+}
diff --git a/deps/v8/test/intl/regress-8469.js b/deps/v8/test/intl/regress-8469.js
new file mode 100644
index 0000000000..7febb51cf9
--- /dev/null
+++ b/deps/v8/test/intl/regress-8469.js
@@ -0,0 +1,87 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The following tz are NOT impacted by v8:8469
+var some_tz_list = [
+ "ciabj",
+ "ghacc",
+];
+
+// The following tz ARE impacted by v8:8469
+var problem_tz_list = [
+ "etadd",
+ "tzdar",
+ "eheai",
+ "sttms",
+ "arirj",
+ "arrgl",
+ "aruaq",
+ "arluq",
+ "mxpvr",
+ "brbvb",
+ "arbue",
+ "caycb",
+ "brcgr",
+ "cayzs",
+ "crsjo",
+ "caydq",
+ "svsal",
+ "cafne",
+ "caglb",
+ "cagoo",
+ "tcgdt",
+ "ustel",
+ "bolpb",
+ "uslax",
+ "sxphi",
+ "mxmex",
+ "usnyc",
+ "usxul",
+ "usndcnt",
+ "usndnsl",
+ "ttpos",
+ "brpvh",
+ "prsju",
+ "clpuq",
+ "caffs",
+ "cayek",
+ "brrbr",
+ "mxstis",
+ "dosdq",
+ "brsao",
+ "gpsbh",
+ "casjf",
+ "knbas",
+ "lccas",
+ "vistt",
+ "vcsvd",
+ "cayyn",
+ "cathu",
+ "hkhkg",
+ "mykul",
+ "khpnh",
+ "cvrai",
+ "gsgrv",
+ "shshn",
+ "aubhq",
+ "auldh",
+ "imdgs",
+ "smsai",
+ "asppg",
+ "pgpom",
+];
+
+let expectedTimeZone = (new Intl.DateTimeFormat("en"))
+ .resolvedOptions().timeZone;
+
+function testTz(tz) {
+ print(tz);
+ let timeZone = (new Intl.DateTimeFormat("en-u-tz-" + tz))
+ .resolvedOptions().timeZone;
+ assertEquals(expectedTimeZone, timeZone);
+}
+
+// first test soem tz not impacted by v8:8469 to ensure testTz is correct.
+for (var tz of some_tz_list) testTz(tz);
+for (var tz of problem_tz_list) testTz(tz);
diff --git a/deps/v8/test/intl/regress-8525.js b/deps/v8/test/intl/regress-8525.js
new file mode 100644
index 0000000000..4d925d73c3
--- /dev/null
+++ b/deps/v8/test/intl/regress-8525.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// test the numberingSystem is set correctly via -u-nu-
+let dtf = new Intl.DateTimeFormat(["en-u-ba-rfoo-nu-arab-fo-obar"]);
+assertEquals("arab", dtf.resolvedOptions().numberingSystem);
+assertEquals("en-u-nu-arab", dtf.resolvedOptions().locale);
+
+let nf = new Intl.NumberFormat(["en-u-ba-rfoo-nu-arab-fo-obar"]);
+assertEquals("arab", nf.resolvedOptions().numberingSystem);
+assertEquals("١٢٣", nf.format(123));
+assertEquals("en-u-nu-arab", nf.resolvedOptions().locale);
+
+dtf = new Intl.DateTimeFormat(["en-u-ba-rfoo-nu-thai-fo-obar"]);
+assertEquals("thai", dtf.resolvedOptions().numberingSystem);
+assertEquals("en-u-nu-thai", dtf.resolvedOptions().locale);
+
+nf = new Intl.NumberFormat(["en-u-ba-rfoo-nu-thai-fo-obar"]);
+assertEquals("thai", nf.resolvedOptions().numberingSystem);
+assertEquals("๑๒๓", nf.format(123));
+assertEquals("en-u-nu-thai", nf.resolvedOptions().locale);
+
+nf = new Intl.NumberFormat(["ar-EG-u-nu-latn"]);
+assertEquals("latn", nf.resolvedOptions().numberingSystem);
+assertEquals("123", nf.format(123));
+assertEquals("ar-EG-u-nu-latn", nf.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/regress-8657.js b/deps/v8/test/intl/regress-8657.js
new file mode 100644
index 0000000000..c1c5cea708
--- /dev/null
+++ b/deps/v8/test/intl/regress-8657.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-locale
+
+assertDoesNotThrow(() => new Intl.Locale('und'));
diff --git a/deps/v8/test/intl/regress-895942.js b/deps/v8/test/intl/regress-895942.js
new file mode 100644
index 0000000000..88574cf916
--- /dev/null
+++ b/deps/v8/test/intl/regress-895942.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+x = new Intl.DateTimeFormat("en-u-foo-x-u");
+assertEquals('en', x.resolvedOptions().locale);
diff --git a/deps/v8/test/intl/regress-900013.js b/deps/v8/test/intl/regress-900013.js
new file mode 100644
index 0000000000..c7cdc65895
--- /dev/null
+++ b/deps/v8/test/intl/regress-900013.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+// Regression test to ensure no Intl["SegmentIterator"]
+
+assertThrows(() => new Intl["SegmentIterator"](), TypeError);
diff --git a/deps/v8/test/intl/regress-903566.js b/deps/v8/test/intl/regress-903566.js
new file mode 100644
index 0000000000..9346fa63a8
--- /dev/null
+++ b/deps/v8/test/intl/regress-903566.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
+assertDoesNotThrow(()=>(new Intl.ListFormat()).format());
+// Intl.getCanonicalLocales() will create a HOLEY_ELEMENTS array
+assertDoesNotThrow(()=>(new Intl.ListFormat()).format(Intl.getCanonicalLocales()));
+assertDoesNotThrow(()=>(new Intl.ListFormat()).format(Intl.getCanonicalLocales(["en","fr"])));
+
+let arr = ["a","b","c"];
+
+// Test under no HasHoleyElements();
+assertFalse(%HasHoleyElements(arr));
+assertDoesNotThrow(()=>(new Intl.ListFormat()).format(arr));
+for (var i = 0; i < 10000; i++) {
+ arr.push("xx");
+}
+assertFalse(%HasHoleyElements(arr));
+assertDoesNotThrow(()=>(new Intl.ListFormat()).format(arr));
+
+// Test under HasHoleyElements();
+arr[arr.length + 10] = "x";
+assertTrue(%HasHoleyElements(arr));
+assertFalse(%HasDictionaryElements(arr));
+assertThrows(()=>(new Intl.ListFormat()).format(arr), TypeError);
+
+// Test it work under HasDictionaryElements();
+arr = ["a","b","c"];
+arr[arr.length + 100000] = "x";
+assertTrue(%HasDictionaryElements(arr));
+assertThrows(()=>(new Intl.ListFormat()).format(arr), TypeError);
diff --git a/deps/v8/test/intl/regress-917151.js b/deps/v8/test/intl/regress-917151.js
new file mode 100644
index 0000000000..9b971fe2b9
--- /dev/null
+++ b/deps/v8/test/intl/regress-917151.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test for 917151
+
+assertThrows(
+ () => Number.prototype.toLocaleString.call(
+ -22,
+ "x-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6-6"),
+ RangeError)
diff --git a/deps/v8/test/intl/regress-925216.js b/deps/v8/test/intl/regress-925216.js
new file mode 100644
index 0000000000..f9683dfc77
--- /dev/null
+++ b/deps/v8/test/intl/regress-925216.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertTrue(new Intl.DateTimeFormat(
+ "en", { timeZone: 'UTC', hour: 'numeric'}).resolvedOptions().hour12);
+assertFalse(new Intl.DateTimeFormat(
+ "fr", { timeZone: 'UTC', hour: 'numeric'}).resolvedOptions().hour12);
+assertFalse(new Intl.DateTimeFormat(
+ "de", { timeZone: 'UTC', hour: 'numeric'}).resolvedOptions().hour12);
diff --git a/deps/v8/test/intl/relative-time-format/constructor-order.js b/deps/v8/test/intl/relative-time-format/constructor-order.js
new file mode 100644
index 0000000000..540034dd4d
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/constructor-order.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.RelativeTimeFormat(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get style() {
+ assertEquals(1, getCount++);
+ },
+ get numeric() {
+ assertEquals(2, getCount++);
+ },
+});
+assertEquals(3, getCount);
diff --git a/deps/v8/test/intl/relative-time-format/constructor.js b/deps/v8/test/intl/relative-time-format/constructor.js
index 91d649891d..ba03e1dd70 100644
--- a/deps/v8/test/intl/relative-time-format/constructor.js
+++ b/deps/v8/test/intl/relative-time-format/constructor.js
@@ -77,27 +77,3 @@ assertDoesNotThrow(
assertThrows(
() => new Intl.RelativeTimeFormat('sr', {numeric: 'never'}),
RangeError);
-
-// Throws only once during construction.
-// Check for all getters to prevent regression.
-// Preserve the order of getter initialization.
-let getCount = 0;
-let localeMatcher = -1;
-let style = -1;
-let numeric = -1;
-
-new Intl.RelativeTimeFormat('en-US', {
- get localeMatcher() {
- localeMatcher = ++getCount;
- },
- get style() {
- style = ++getCount;
- },
- get numeric() {
- numeric = ++getCount;
- }
-});
-
-assertEquals(1, localeMatcher);
-assertEquals(2, style);
-assertEquals(3, numeric);
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts-en.js b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
index 52a0b885d7..689059f4cd 100644
--- a/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
@@ -66,3 +66,44 @@ assertEquals('day', parts[0].unit);
assertEquals(2, Object.getOwnPropertyNames(parts[1]).length);
assertEquals('literal', parts[1].type);
assertEquals(' days ago', parts[1].value);
+
+// Test with non integer
+// Part Idx: 0 1 23 45 6
+assertEquals('in 123,456.78 seconds', longAlways.format(123456.78, 'seconds'));
+parts = longAlways.formatToParts(123456.78, 'seconds');
+assertEquals(7, parts.length);
+// 0: "in "
+assertEquals(2, Object.getOwnPropertyNames(parts[0]).length);
+assertEquals('literal', parts[0].type);
+assertEquals('in ', parts[0].value);
+assertEquals(undefined, parts[0].unit);
+// 1: "123"
+assertEquals(3, Object.getOwnPropertyNames(parts[1]).length);
+assertEquals('integer', parts[1].type);
+assertEquals('123', parts[1].value);
+assertEquals('second', parts[1].unit);
+// 2: ","
+assertEquals(3, Object.getOwnPropertyNames(parts[2]).length);
+assertEquals('group', parts[2].type);
+assertEquals(',', parts[2].value);
+assertEquals('second', parts[2].unit);
+// 3: "456"
+assertEquals(3, Object.getOwnPropertyNames(parts[3]).length);
+assertEquals('integer', parts[3].type);
+assertEquals('456', parts[3].value);
+assertEquals('second', parts[3].unit);
+// 4: "."
+assertEquals(3, Object.getOwnPropertyNames(parts[4]).length);
+assertEquals('decimal', parts[4].type);
+assertEquals('.', parts[4].value);
+assertEquals('second', parts[4].unit);
+// 5: "78"
+assertEquals(3, Object.getOwnPropertyNames(parts[4]).length);
+assertEquals('fraction', parts[5].type);
+assertEquals('78', parts[5].value);
+assertEquals('second', parts[5].unit);
+// 6: " seconds"
+assertEquals(2, Object.getOwnPropertyNames(parts[6]).length);
+assertEquals('literal', parts[6].type);
+assertEquals(' seconds', parts[6].value);
+assertEquals(undefined, parts[6].unit);
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options-nu.js b/deps/v8/test/intl/relative-time-format/resolved-options-nu.js
new file mode 100644
index 0000000000..fb1fa72a93
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/resolved-options-nu.js
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// For locale default the numberingSystem to 'latn'
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("ar").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("en").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("fr").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("hi").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("th").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("zh-Hant").resolvedOptions().numberingSystem
+);
+
+// For locale default the numberingSystem to other than 'latn'
+assertEquals(
+ "arab",
+ new Intl.RelativeTimeFormat("ar-TD").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "arabext",
+ new Intl.RelativeTimeFormat("fa").resolvedOptions().numberingSystem
+);
+assertEquals(
+ "beng",
+ new Intl.RelativeTimeFormat("bn").resolvedOptions().numberingSystem
+);
+
+// For locale use -u-nu- to change to other numberingSystem
+assertEquals(
+ "thai",
+ new Intl.RelativeTimeFormat("en-u-nu-thai").resolvedOptions()
+ .numberingSystem
+);
+assertEquals(
+ "arab",
+ new Intl.RelativeTimeFormat("en-u-nu-arab").resolvedOptions()
+ .numberingSystem
+);
+
+// For locale which default others but use -u-nu-latn to change to 'latn' numberingSystem
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("fa-u-nu-latn").resolvedOptions()
+ .numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("ar-TD-u-nu-latn").resolvedOptions()
+ .numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("fa-u-nu-latn").resolvedOptions()
+ .numberingSystem
+);
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("bn-u-nu-latn").resolvedOptions()
+ .numberingSystem
+);
+
+// For locale use -u-nu- with invalid value still back to default.
+assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("en-u-nu-abcd").resolvedOptions()
+ .numberingSystem
+);
+
+assertEquals(
+ "arabext",
+ new Intl.RelativeTimeFormat("fa-u-nu-abcd").resolvedOptions()
+ .numberingSystem
+);
+assertEquals(
+ "beng",
+ new Intl.RelativeTimeFormat("bn-u-nu-abcd").resolvedOptions()
+ .numberingSystem
+);
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options.js b/deps/v8/test/intl/relative-time-format/resolved-options.js
index d51a58a960..391b83ae0a 100644
--- a/deps/v8/test/intl/relative-time-format/resolved-options.js
+++ b/deps/v8/test/intl/relative-time-format/resolved-options.js
@@ -13,10 +13,15 @@ assertEquals('long', rtf.resolvedOptions().style);
assertEquals('always', rtf.resolvedOptions().numeric);
// contains style, numeric and locale key
-assertEquals(3, Object.getOwnPropertyNames(rtf.resolvedOptions()).length);
+assertEquals(4, Object.getOwnPropertyNames(rtf.resolvedOptions()).length);
// contains style, numeric and locale key
-assertEquals(3, Object.getOwnPropertyNames(new Intl.RelativeTimeFormat('en').resolvedOptions()).length);
+assertEquals(
+ 4,
+ Object.getOwnPropertyNames(
+ new Intl.RelativeTimeFormat("en").resolvedOptions()
+ ).length
+);
assertEquals(
'short',
@@ -154,9 +159,6 @@ assertEquals(
Intl.RelativeTimeFormat.prototype.resolvedOptions.call(receiver), TypeError);
}
-// The following is not working yet because it depend on the getAvailableLocales
-// work in another path set.
-// TODO(ftang): uncomment the following once that patchset is checked in.
-//assertEquals(
-// 'ar',
-// (new Intl.RelativeTimeFormat(['i-default', 'ar'])).resolvedOptions().locale);
+assertEquals(
+ 'ar',
+ (new Intl.RelativeTimeFormat(['i-default', 'ar'])).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/segmenter/check-lb-option.js b/deps/v8/test/intl/segmenter/check-lb-option.js
new file mode 100644
index 0000000000..0e54d18202
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/check-lb-option.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+let invalid_lb = [
+ "invalid",
+ "abce",
+ "breakall",
+ "keepall",
+ "none",
+ "standard",
+ "strict",
+ "normal",
+ "loose",
+];
+
+let locales = [
+ "en",
+ "ja",
+ "zh",
+];
+
+invalid_lb.forEach(function(lb) {
+ let df = new Intl.Segmenter(["en-u-lb-" + lb + "-fo-obar"]);
+ assertEquals("en", df.resolvedOptions().locale);
+}
+);
diff --git a/deps/v8/test/intl/segmenter/constructor-order.js b/deps/v8/test/intl/segmenter/constructor-order.js
new file mode 100644
index 0000000000..e43fb9f963
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/constructor-order.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.Segmenter(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get granularity() {
+ assertEquals(1, getCount++);
+ },
+});
+assertEquals(2, getCount);
diff --git a/deps/v8/test/intl/segmenter/constructor.js b/deps/v8/test/intl/segmenter/constructor.js
index 655bb100df..6612e1eab6 100644
--- a/deps/v8/test/intl/segmenter/constructor.js
+++ b/deps/v8/test/intl/segmenter/constructor.js
@@ -42,7 +42,7 @@ assertDoesNotThrow(
() => new Intl.Segmenter(["sr"], { granularity: "grapheme" })
);
-assertDoesNotThrow(() => new Intl.Segmenter(["sr"], { granularity: "line" }));
+assertThrows(() => new Intl.Segmenter(["sr"], { granularity: "line" }), RangeError);
assertThrows(
() => new Intl.Segmenter(["sr"], { granularity: "standard" }),
@@ -61,9 +61,8 @@ assertDoesNotThrow(
() => new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" })
);
-assertThrows(
- () => new Intl.Segmenter(["sr"], { lineBreakStyle: "giant" }),
- RangeError
+assertDoesNotThrow(
+ () => new Intl.Segmenter(["sr"], { lineBreakStyle: "giant" })
);
assertDoesNotThrow(
@@ -138,28 +137,28 @@ assertDoesNotThrow(
})
);
-assertDoesNotThrow(
+assertThrows(
() =>
new Intl.Segmenter(["sr"], {
granularity: "line",
lineBreakStyle: "loose"
- })
+ }), RangeError
);
-assertDoesNotThrow(
+assertThrows(
() =>
new Intl.Segmenter(["sr"], {
granularity: "line",
lineBreakStyle: "normal"
- })
+ }), RangeError
);
-assertDoesNotThrow(
+assertThrows(
() =>
new Intl.Segmenter(["sr"], {
granularity: "line",
lineBreakStyle: "strict"
- })
+ }), RangeError
);
// propagate exception from getter
@@ -172,14 +171,13 @@ assertThrows(
}),
TypeError
);
-assertThrows(
+assertDoesNotThrow(
() =>
new Intl.Segmenter(undefined, {
get lineBreakStyle() {
throw new TypeError("");
}
- }),
- TypeError
+ })
);
assertThrows(
() =>
@@ -190,27 +188,3 @@ assertThrows(
}),
TypeError
);
-
-// Throws only once during construction.
-// Check for all getters to prevent regression.
-// Preserve the order of getter initialization.
-let getCount = 0;
-let localeMatcher = -1;
-let lineBreakStyle = -1;
-let granularity = -1;
-
-new Intl.Segmenter(["en-US"], {
- get localeMatcher() {
- localeMatcher = ++getCount;
- },
- get lineBreakStyle() {
- lineBreakStyle = ++getCount;
- },
- get granularity() {
- granularity = ++getCount;
- }
-});
-
-assertEquals(1, localeMatcher);
-assertEquals(2, lineBreakStyle);
-assertEquals(3, granularity);
diff --git a/deps/v8/test/intl/segmenter/resolved-options.js b/deps/v8/test/intl/segmenter/resolved-options.js
deleted file mode 100644
index 2e2a910ddb..0000000000
--- a/deps/v8/test/intl/segmenter/resolved-options.js
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-intl-segmenter
-
-let segmenter = new Intl.Segmenter([], { granularity: "line" });
-// The default lineBreakStyle is 'normal'
-assertEquals("normal", segmenter.resolvedOptions().lineBreakStyle);
-
-segmenter = new Intl.Segmenter();
-assertEquals(undefined, segmenter.resolvedOptions().lineBreakStyle);
-
-// The default granularity is 'grapheme'
-assertEquals("grapheme", segmenter.resolvedOptions().granularity);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { lineBreakStyle: "strict" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], { lineBreakStyle: "strict" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { lineBreakStyle: "normal" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], { lineBreakStyle: "normal" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], { lineBreakStyle: "loose" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- "word",
- new Intl.Segmenter(["sr"], { granularity: "word" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { granularity: "word" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], { granularity: "grapheme" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { granularity: "grapheme" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "sentence",
- new Intl.Segmenter(["sr"], { granularity: "sentence" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], { granularity: "sentence" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "line",
- new Intl.Segmenter(["sr"], { granularity: "line" }).resolvedOptions()
- .granularity
-);
-
-assertEquals(
- "normal",
- new Intl.Segmenter(["sr"], { granularity: "line" }).resolvedOptions()
- .lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "grapheme"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "grapheme"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "grapheme"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "grapheme"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "grapheme",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "grapheme"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "grapheme"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "word",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "word"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "word"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "word",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "word"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "word"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "word",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "word"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "word"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "sentence",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "sentence"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "sentence"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "sentence",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "sentence"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- undefined,
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "sentence"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "sentence",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "sentence"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- "normal",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "line"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "line",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "line"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- "loose",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "loose",
- granularity: "line"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "line",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "line"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- "strict",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "strict",
- granularity: "line"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals(
- "line",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "line"
- }).resolvedOptions().granularity
-);
-
-assertEquals(
- "normal",
- new Intl.Segmenter(["sr"], {
- lineBreakStyle: "normal",
- granularity: "line"
- }).resolvedOptions().lineBreakStyle
-);
-
-assertEquals("ar", new Intl.Segmenter(["ar"]).resolvedOptions().locale);
-
-assertEquals("ar", new Intl.Segmenter(["ar", "en"]).resolvedOptions().locale);
-
-assertEquals("fr", new Intl.Segmenter(["fr", "en"]).resolvedOptions().locale);
-
-assertEquals("ar", new Intl.Segmenter(["xyz", "ar"]).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/segmenter/segment-grapheme-following.js b/deps/v8/test/intl/segmenter/segment-grapheme-following.js
new file mode 100644
index 0000000000..4cfd8f4aef
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-grapheme-following.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "grapheme"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = 0;
+ let segments = [];
+ while (!iter.following()) {
+ assertEquals(undefined, iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index > prev);
+ segments.push(text.substring(prev, iter.index));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-grapheme-iterable.js b/deps/v8/test/intl/segmenter/segment-grapheme-iterable.js
new file mode 100644
index 0000000000..cd18590ee1
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-grapheme-iterable.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "grapheme"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ let segments = [];
+ // Create another %SegmentIterator% to compare with result from the one that
+ // created in the for of loop.
+ let iter = seg.segment(text);
+ let prev = 0;
+ for (const v of seg.segment(text)) {
+ assertEquals(undefined, v.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+
+ // manually advance the iter.
+ assertFalse(iter.following());
+ assertEquals(iter.breakType, v.breakType);
+ assertEquals(text.substring(prev, iter.index), v.segment);
+ prev = iter.index;
+ }
+ assertTrue(iter.following());
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-grapheme-next.js b/deps/v8/test/intl/segmenter/segment-grapheme-next.js
new file mode 100644
index 0000000000..df7f82acff
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-grapheme-next.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "grapheme"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let segments = [];
+ let oldPos = -1;
+ for (let result = iter.next(); !result.done; result = iter.next()) {
+ const v = result.value;
+ assertEquals(undefined, v.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+ assertEquals("number", typeof v.index);
+ assertTrue(oldPos < v.index);
+ oldPos = v.index;
+ }
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-grapheme-preceding.js b/deps/v8/test/intl/segmenter/segment-grapheme-preceding.js
new file mode 100644
index 0000000000..9ee7b7c811
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-grapheme-preceding.js
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "grapheme"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = text.length;
+ let segments = [];
+ iter.preceding(prev)
+ assertEquals(undefined, iter.breakType)
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ while (!iter.preceding()) {
+ assertEquals(undefined, iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.reverse().join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-grapheme.js b/deps/v8/test/intl/segmenter/segment-grapheme.js
new file mode 100644
index 0000000000..1515f0997d
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-grapheme.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "grapheme"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ assertEquals(undefined, iter.breakType);
+ assertEquals(0, iter.index);
+}
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-breakType.js b/deps/v8/test/intl/segmenter/segment-iterator-breakType.js
new file mode 100644
index 0000000000..ea5f523519
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-breakType.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals(undefined, iter.breakType);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-following.js b/deps/v8/test/intl/segmenter/segment-iterator-following.js
index a28d6c31d1..14f6dd16c4 100644
--- a/deps/v8/test/intl/segmenter/segment-iterator-following.js
+++ b/deps/v8/test/intl/segmenter/segment-iterator-following.js
@@ -10,6 +10,15 @@ const iter = segmenter.segment(text);
assertEquals("function", typeof iter.following);
+// ToNumber("ABC") return NaN, ToInteger("ABC") return +0, ToIndex("ABC") return 0
+assertDoesNotThrow(() => iter.following("ABC"));
+// ToNumber(null) return +0, ToInteger(null) return +0, ToIndex(null) return 0
+assertDoesNotThrow(() => iter.following(null));
+// ToNumber(1.4) return 1.4, ToInteger(1.4) return 1, ToIndex(1.4) return 1
+assertDoesNotThrow(() => iter.following(1.4));
+
+assertThrows(() => iter.following(-3), RangeError);
+
// 1.5.3.2 %SegmentIteratorPrototype%.following( [ from ] )
// 3.b If from >= iterator.[[SegmentIteratorString]], throw a RangeError exception.
assertDoesNotThrow(() => iter.following(text.length - 1));
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-ownPropertyDescriptor.js b/deps/v8/test/intl/segmenter/segment-iterator-ownPropertyDescriptor.js
new file mode 100644
index 0000000000..3021c81c63
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-ownPropertyDescriptor.js
@@ -0,0 +1,91 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+let seg = new Intl.Segmenter();
+let descriptor = Object.getOwnPropertyDescriptor(
+ Intl.Segmenter, "supportedLocalesOf");
+assertTrue(descriptor.writable);
+assertFalse(descriptor.enumerable);
+assertTrue(descriptor.configurable);
+
+// ecma402 #sec-Intl.Segmenter.prototype
+// Intl.Segmenter.prototype
+// The value of Intl.Segmenter.prototype is %SegmenterPrototype%.
+// This property has the attributes
+// { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }.
+descriptor = Object.getOwnPropertyDescriptor(Intl.Segmenter, "prototype");
+assertFalse(descriptor.writable);
+assertFalse(descriptor.enumerable);
+assertFalse(descriptor.configurable);
+
+for (let func of ["segment", "resolvedOptions"]) {
+ let descriptor = Object.getOwnPropertyDescriptor(
+ Intl.Segmenter.prototype, func);
+ assertTrue(descriptor.writable);
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+}
+
+let segmentIterator = seg.segment('text');
+let prototype = Object.getPrototypeOf(segmentIterator);
+for (let func of ["next", "following", "preceding"]) {
+ let descriptor = Object.getOwnPropertyDescriptor(prototype, func);
+ assertTrue(descriptor.writable);
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+}
+
+function checkGetterProperty(prototype, property) {
+ let desc = Object.getOwnPropertyDescriptor(prototype, property);
+ assertEquals(`get ${property}`, desc.get.name);
+ assertEquals('function', typeof desc.get)
+ assertEquals(undefined, desc.set);
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+}
+
+// Test the descriptor is correct for properties.
+checkGetterProperty(prototype, 'index');
+checkGetterProperty(prototype, 'breakType');
+
+// Test the SegmentIteratorPrototype methods are called with same
+// receiver and won't throw.
+assertDoesNotThrow(() => prototype.next.call(segmentIterator));
+assertDoesNotThrow(() => prototype.following.call(segmentIterator));
+assertDoesNotThrow(() => prototype.preceding.call(segmentIterator));
+
+// Test the SegmentIteratorPrototype methods are called with a different
+// receiver and correctly throw.
+var otherReceivers = [
+ 1, 123.45, undefined, null, "string", true, false,
+ Intl, Intl.Segmenter, Intl.Segmenter.prototype,
+ prototype,
+ new Intl.Segmenter(),
+ new Intl.Collator(),
+ new Intl.DateTimeFormat(),
+ new Intl.NumberFormat(),
+];
+for (let rec of otherReceivers) {
+ assertThrows(() => prototype.next.call(rec), TypeError);
+ assertThrows(() => prototype.following.call(rec), TypeError);
+ assertThrows(() => prototype.preceding.call(rec), TypeError);
+}
+
+// Check the property of the return object of next()
+let nextReturn = segmentIterator.next();
+
+function checkProperty(obj, property) {
+ let desc = Object.getOwnPropertyDescriptor(obj, property);
+ assertTrue(desc.writable);
+ assertTrue(desc.enumerable);
+ assertTrue(desc.configurable);
+}
+
+checkProperty(nextReturn, 'done');
+checkProperty(nextReturn, 'value');
+checkProperty(nextReturn.value, 'segment');
+checkProperty(nextReturn.value, 'breakType');
+checkProperty(nextReturn.value, 'index');
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-position.js b/deps/v8/test/intl/segmenter/segment-iterator-position.js
new file mode 100644
index 0000000000..649076b454
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-iterator-position.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const segmenter = new Intl.Segmenter();
+const text = "Hello World, Test 123! Foo Bar. How are you?";
+const iter = segmenter.segment(text);
+
+assertEquals(0, iter.index);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator-preceding.js b/deps/v8/test/intl/segmenter/segment-iterator-preceding.js
index 07d73376f2..09ba2847cc 100644
--- a/deps/v8/test/intl/segmenter/segment-iterator-preceding.js
+++ b/deps/v8/test/intl/segmenter/segment-iterator-preceding.js
@@ -10,6 +10,15 @@ const iter = segmenter.segment(text);
assertEquals("function", typeof iter.preceding);
+// ToNumber("ABC") return NaN, ToInteger("ABC") return +0, ToIndex("ABC") return 0
+assertThrows(() => iter.preceding("ABC"), RangeError);
+// ToNumber(null) return +0, ToInteger(null) return +0, ToIndex(null) return 0
+assertThrows(() => iter.preceding(null), RangeError);
+assertThrows(() => iter.preceding(-3), RangeError);
+
+// ToNumber(1.4) return 1.4, ToInteger(1.4) return 1, ToIndex(1.4) return 1
+assertDoesNotThrow(() => iter.preceding(1.4));
+
// 1.5.3.3 %SegmentIteratorPrototype%.preceding( [ from ] )
// 3.b If ... from = 0, throw a RangeError exception.
assertThrows(() => iter.preceding(0), RangeError);
diff --git a/deps/v8/test/intl/segmenter/segment-iterator.js b/deps/v8/test/intl/segmenter/segment-iterator.js
index 0d0c31b405..696ffab554 100644
--- a/deps/v8/test/intl/segmenter/segment-iterator.js
+++ b/deps/v8/test/intl/segmenter/segment-iterator.js
@@ -4,10 +4,12 @@
// Flags: --harmony-intl-segmenter
-const segmenter = new Intl.Segmenter();
const text = "Hello World, Test 123! Foo Bar. How are you?";
-const iter = segmenter.segment(text);
+for (const granularity of ["grapheme", "word", "sentence"]) {
+ const segmenter = new Intl.Segmenter("en", { granularity });
+ const iter = segmenter.segment(text);
-assertEquals("number", typeof iter.position);
-assertEquals(0, iter.position);
-assertEquals("strig", typeof iter.breakType);
+ assertEquals("number", typeof iter.index);
+ assertEquals(0, iter.index);
+ assertEquals(undefined, iter.breakType);
+}
diff --git a/deps/v8/test/intl/segmenter/segment-sentence-following.js b/deps/v8/test/intl/segmenter/segment-sentence-following.js
new file mode 100644
index 0000000000..c6b4aa25d6
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-sentence-following.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "sentence"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = 0;
+ let segments = [];
+ while (!iter.following()) {
+ assertTrue(["sep", "term"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index > prev);
+ segments.push(text.substring(prev, iter.index));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-sentence-iterable.js b/deps/v8/test/intl/segmenter/segment-sentence-iterable.js
new file mode 100644
index 0000000000..a84807bfb0
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-sentence-iterable.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "sentence"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ let segments = [];
+ // Create another %SegmentIterator% to compare with result from the one that
+ // created in the for of loop.
+ let iter = seg.segment(text);
+ let prev = 0;
+ for (const v of seg.segment(text)) {
+ assertTrue(["sep", "term"].includes(v.breakType), v.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+
+ // manually advance the iter.
+ assertFalse(iter.following());
+ assertEquals(iter.breakType, v.breakType);
+ assertEquals(text.substring(prev, iter.index), v.segment);
+ prev = iter.index;
+ }
+ assertTrue(iter.following());
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-sentence-next.js b/deps/v8/test/intl/segmenter/segment-sentence-next.js
new file mode 100644
index 0000000000..466eac54d3
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-sentence-next.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "sentence"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let segments = [];
+ let oldPos = -1;
+ for (let result = iter.next(); !result.done; result = iter.next()) {
+ const v = result.value;
+ assertTrue(["sep", "term"].includes(iter.breakType), iter.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+ assertEquals("number", typeof v.index);
+ assertTrue(oldPos < v.index);
+ oldPos = v.index;
+ }
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-sentence-preceding.js b/deps/v8/test/intl/segmenter/segment-sentence-preceding.js
new file mode 100644
index 0000000000..bbc17eecce
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-sentence-preceding.js
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "sentence"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = text.length;
+ let segments = [];
+ iter.preceding(prev);
+ assertTrue(["sep", "term"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ while (!iter.preceding()) {
+ assertTrue(["sep", "term"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.reverse().join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-sentence.js b/deps/v8/test/intl/segmenter/segment-sentence.js
new file mode 100644
index 0000000000..a802362e0e
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-sentence.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "sentence"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ assertEquals(undefined, iter.breakType);
+ assertEquals(0, iter.index);
+}
diff --git a/deps/v8/test/intl/segmenter/segment-word-following.js b/deps/v8/test/intl/segmenter/segment-word-following.js
new file mode 100644
index 0000000000..cd0950eff5
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-word-following.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "word"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = 0;
+ let segments = [];
+ while (!iter.following()) {
+ assertTrue(["word", "none"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index > prev);
+ segments.push(text.substring(prev, iter.index));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-word-iterable.js b/deps/v8/test/intl/segmenter/segment-word-iterable.js
new file mode 100644
index 0000000000..3dab2103c7
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-word-iterable.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "word"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ let segments = [];
+ // Create another %SegmentIterator% to compare with result from the one that
+ // created in the for of loop.
+ let iter = seg.segment(text);
+ let prev = 0;
+ for (const v of seg.segment(text)) {
+ assertTrue(["word", "none"].includes(v.breakType), v.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+
+ // manually advance the iter.
+ assertFalse(iter.following());
+ assertEquals(iter.breakType, v.breakType);
+ assertEquals(text.substring(prev, iter.index), v.segment);
+ prev = iter.index;
+ }
+ assertTrue(iter.following());
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-word-next.js b/deps/v8/test/intl/segmenter/segment-word-next.js
new file mode 100644
index 0000000000..54fb40a251
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-word-next.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "word"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let segments = [];
+ let oldPos = -1;
+ for (let result = iter.next(); !result.done; result = iter.next()) {
+ const v = result.value;
+ assertTrue(["word", "none"].includes(iter.breakType), iter.breakType);
+ assertEquals("string", typeof v.segment);
+ assertTrue(v.segment.length > 0);
+ segments.push(v.segment);
+ assertEquals("number", typeof v.index);
+ assertTrue(oldPos < v.index);
+ oldPos = v.index;
+ }
+ assertEquals(text, segments.join(''));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-word-preceding.js b/deps/v8/test/intl/segmenter/segment-word-preceding.js
new file mode 100644
index 0000000000..7ca5132dfd
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-word-preceding.js
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "word"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ let prev = text.length;
+ let segments = [];
+ iter.preceding(prev);
+ assertTrue(["word", "none"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ while (!iter.preceding()) {
+ assertTrue(["word", "none"].includes(iter.breakType), iter.breakType);
+ assertTrue(iter.index >= 0);
+ assertTrue(iter.index <= text.length);
+ assertTrue(iter.index < prev);
+ segments.push(text.substring(iter.index, prev));
+ prev = iter.index;
+ }
+ assertEquals(text, segments.reverse().join(""));
+}
diff --git a/deps/v8/test/intl/segmenter/segment-word.js b/deps/v8/test/intl/segmenter/segment-word.js
new file mode 100644
index 0000000000..b191a67cb9
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/segment-word.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+const seg = new Intl.Segmenter([], {granularity: "word"})
+for (const text of [
+ "Hello world!", // English
+ " Hello world! ", // English with space before/after
+ " Hello world? Foo bar!", // English
+ "Jedovatou mambu objevila žena v zahrádkářské kolonii.", // Czech
+ "Việt Nam: Nhất thể hóa sẽ khác Trung Quốc?", // Vietnamese
+ "Σοβαρές ενστάσεις Κομισιόν για τον προϋπολογισμό της Ιταλίας", // Greek
+ "Решение Индии о покупке российских С-400 расценили как вызов США", // Russian
+ "הרופא שהציל נשים והנערה ששועבדה ע", // Hebrew,
+ "ترامب للملك سلمان: أنا جاد للغاية.. عليك دفع المزيد", // Arabic
+ "भारत की एस 400 मिसाइल के मुकाबले पाक की थाड, जानें कौन कितना ताकतवर", // Hindi
+ "ரெட் அலர்ட் எச்சரிக்கை; புதுச்சேரியில் நாளை அரசு விடுமுறை!", // Tamil
+ "'ఉత్తర్వులు అందే వరకు ఓటర్ల తుది జాబితాను వెబ్‌సైట్లో పెట్టవద్దు'", // Telugu
+ "台北》抹黑柯P失敗?朱學恒酸:姚文智氣pupu嗆大老闆", // Chinese
+ "วัดไทรตีระฆังเบาลงช่วงเข้าพรรษา เจ้าอาวาสเผยคนร้องเรียนรับผลกรรมแล้ว", // Thai
+ "九州北部の一部が暴風域に入りました(日直予報士 2018年10月06日) - 日本気象協会 tenki.jp", // Japanese
+ "법원 “다스 지분 처분권·수익권 모두 MB가 보유”", // Korean
+ ]) {
+ const iter = seg.segment(text);
+ assertEquals(undefined, iter.breakType);
+ assertEquals(0, iter.index);
+}
diff --git a/deps/v8/test/intl/segmenter/segment.js b/deps/v8/test/intl/segmenter/segment.js
index ca17c5040d..4c76b96e54 100644
--- a/deps/v8/test/intl/segmenter/segment.js
+++ b/deps/v8/test/intl/segmenter/segment.js
@@ -5,3 +5,35 @@
// Flags: --harmony-intl-segmenter
assertEquals("function", typeof Intl.Segmenter.prototype.segment);
+assertEquals(1, Intl.Segmenter.prototype.segment.length);
+
+let seg = new Intl.Segmenter("en", {granularity: "word"})
+let res;
+
+// test with 0 args
+assertDoesNotThrow(() => res = seg.segment())
+// test with 1 arg
+assertDoesNotThrow(() => res = seg.segment("hello"))
+assertEquals("hello", res.next().value.segment);
+// test with 2 args
+assertDoesNotThrow(() => res = seg.segment("hello world"))
+assertEquals("hello", res.next().value.segment);
+
+// test with other types
+assertDoesNotThrow(() => res = seg.segment(undefined))
+assertEquals("undefined", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(null))
+assertEquals("null", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(true))
+assertEquals("true", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(false))
+assertEquals("false", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(1234))
+assertEquals("1234", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(3.1415926))
+assertEquals("3.1415926", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment(["hello","world"]))
+assertEquals("hello", res.next().value.segment);
+assertDoesNotThrow(() => res = seg.segment({k: 'v'}))
+assertEquals("[", res.next().value.segment);
+assertThrows(() => res = seg.segment(Symbol()), TypeError)
diff --git a/deps/v8/test/intl/segmenter/subclassing.js b/deps/v8/test/intl/segmenter/subclassing.js
new file mode 100644
index 0000000000..2bea1e88ce
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/subclassing.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-segmenter
+
+// Test subclassing of Segmenter
+class CustomSegmenter extends Intl.Segmenter {
+ constructor(locales, options) {
+ super(locales, options);
+ this.isCustom = true;
+ }
+}
+
+const seg = new CustomSegmenter("zh");
+assertEquals(true, seg.isCustom, "Custom property");
+assertEquals(Object.getPrototypeOf(seg), CustomSegmenter.prototype, "Prototype");
diff --git a/deps/v8/test/intl/segmenter/supported-locale.js b/deps/v8/test/intl/segmenter/supported-locale.js
index 24825a2759..9197b6a815 100644
--- a/deps/v8/test/intl/segmenter/supported-locale.js
+++ b/deps/v8/test/intl/segmenter/supported-locale.js
@@ -9,14 +9,14 @@ assertEquals(
"Intl.Segmenter.supportedLocalesOf should be a function"
);
-var undef = Intl.Segmenter.supportedLocalesOf();
+const undef = Intl.Segmenter.supportedLocalesOf();
assertEquals([], undef);
-var empty = Intl.Segmenter.supportedLocalesOf([]);
+const empty = Intl.Segmenter.supportedLocalesOf([]);
assertEquals([], empty);
-var strLocale = Intl.Segmenter.supportedLocalesOf("sr");
+const strLocale = Intl.Segmenter.supportedLocalesOf("sr");
assertEquals("sr", strLocale[0]);
-var multiLocale = ["sr-Thai-RS", "de", "zh-CN"];
+const multiLocale = ["sr-Thai-RS", "de", "zh-CN"];
assertEquals(multiLocale, Intl.Segmenter.supportedLocalesOf(multiLocale));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 6c5660ea9d..8cf26f1a61 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -43,8 +43,7 @@ class TestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
- filename != "utils.js" and filename != "regexp-assert.js" and
- filename != "regexp-prepare.js"):
+ filename != "utils.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
@@ -80,9 +79,7 @@ class TestCase(testcase.D8TestCase):
files = map(lambda f: os.path.join(self.suite.root, f), [
'assert.js',
'utils.js',
- 'regexp-prepare.js',
self.path + self._get_suffix(),
- 'regexp-assert.js',
])
if self._test_config.isolates:
diff --git a/deps/v8/test/js-perf-test/Array/from.js b/deps/v8/test/js-perf-test/Array/from.js
index 06e00a223e..d173c5af20 100644
--- a/deps/v8/test/js-perf-test/Array/from.js
+++ b/deps/v8/test/js-perf-test/Array/from.js
@@ -4,16 +4,26 @@
(() => {
createSuite('MixedFrom', 1000, MixedFrom, MixedFromSetup);
+ createSuite(
+ 'MixedCowNoMapFrom', 1000, MixedCowNoMapFrom, MixedCowNoMapFromSetup);
+ createSuite('MixedNonCowNoMapFrom', 1000, MixedNonCowNoMapFrom,
+ MixedNonCowNoMapFromSetup);
createSuite('SmiFrom', 1000, SmiFrom, SmiFromSetup);
createSuite('SmallSmiFrom', 1000, SmallSmiFrom, SmallSmiFromSetup);
- createSuite('SmiNoMapFrom', 1000, SmiNoMapFrom, SmiNoMapFromSetup);
+ createSuite('SmiCowNoMapFrom', 1000, SmiCowNoMapFrom, SmiCowNoMapFromSetup);
+ createSuite(
+ 'SmiNonCowNoMapFrom', 1000, SmiNonCowNoMapFrom, SmiNonCowNoMapFromSetup);
createSuite(
'SmiNoIteratorFrom', 1000, SmiNoIteratorFrom, SmiNoIteratorFromSetup);
createSuite(
'TransplantedFrom', 1000, TransplantedFrom, TransplantedFromSetup);
createSuite('DoubleFrom', 1000, DoubleFrom, DoubleFromSetup);
+ createSuite('DoubleNoMapFrom', 1000, DoubleNoMapFrom, DoubleNoMapFromSetup);
createSuite('StringFrom', 1000, StringFrom, StringFromSetup);
- createSuite('StringNoMapFrom', 1000, StringNoMapFrom, StringNoMapFromSetup);
+ createSuite(
+ 'StringCowNoMapFrom', 1000, StringCowNoMapFrom, StringCowNoMapFromSetup);
+ createSuite('StringNonCowNoMapFrom', 1000, StringNonCowNoMapFrom,
+ StringNonCowNoMapFromSetup);
function ArrayLike() {}
ArrayLike.from = Array.from;
@@ -22,7 +32,9 @@
var result;
var func
- var smi_array = [
+ // This creates a COW array of smis. COWness does not affect the performance
+ // of Array.from calls with a callback function.
+ var smi_array_Cow = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
@@ -30,6 +42,11 @@
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
];
+ // This creates a non-COW array.
+ var smi_array = Array.from(smi_array_Cow);
+ smi_array[0] = 1;
+
+ // This creates an array of doubles. There is no COW array for doubles.
var double_array = [
1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, //
11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5,
@@ -43,7 +60,8 @@
11.5, 12.5, 13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5,
];
- var string_array = [
+ // This creates a COW array of objects.
+ var string_array_Cow = [
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'a', 'b', 'c', 'a', 'b',
'c', 'a', 'b', 'c', 'a', 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'a', 'b', 'c', 'a', 'b',
@@ -53,7 +71,12 @@
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
];
- var mixed_array = [
+ // This creates a non-COW array.
+ var string_array = Array.from(string_array_Cow);
+ string_array[0] = 'a';
+
+ // This creates a COW array of objects.
+ var mixed_array_Cow = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, //
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
@@ -66,13 +89,21 @@
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
];
+ // This creates a non-COW array.
+ var mixed_array = Array.from(mixed_array_Cow);
+ mixed_array[0] = 1;
+
// Although these functions have the same code, they are separated for
// clean IC feedback.
function SmallSmiFrom() {
result = Array.from(arg, func);
}
- function SmiNoMapFrom() {
+ function SmiCowNoMapFrom() {
+ result = Array.from(arg);
+ }
+
+ function SmiNonCowNoMapFrom() {
result = Array.from(arg);
}
@@ -92,11 +123,19 @@
result = Array.from(arg, func);
}
+ function DoubleNoMapFrom() {
+ result = Array.from(arg);
+ }
+
function StringFrom() {
result = Array.from(arg, func);
}
- function StringNoMapFrom() {
+ function StringCowNoMapFrom() {
+ result = Array.from(arg);
+ }
+
+ function StringNonCowNoMapFrom() {
result = Array.from(arg);
}
@@ -104,31 +143,44 @@
result = Array.from(arg, func);
}
+ function MixedCowNoMapFrom() {
+ result = Array.from(arg);
+ }
+
+ function MixedNonCowNoMapFrom() {
+ result = Array.from(arg);
+ }
+
function SmallSmiFromSetup() {
func = (v, i) => v + i;
arg = [1, 2, 3];
}
- function SmiNoMapFromSetup() {
+ function SmiCowNoMapFromSetup() {
+ func = undefined;
+ arg = smi_array_Cow;
+ }
+
+ function SmiNonCowNoMapFromSetup() {
func = undefined;
arg = smi_array;
}
function SmiFromSetup() {
func = (v, i) => v + i;
- arg = smi_array;
+ arg = smi_array_Cow;
}
function SmiNoIteratorFromSetup() {
func = (v, i) => v + i;
- array = smi_array;
+ array = smi_array_Cow;
arg = {length: array.length};
Object.assign(arg, array);
}
function TransplantedFromSetup() {
func = (v, i) => v + i;
- arg = smi_array;
+ arg = smi_array_Cow;
}
function DoubleFromSetup() {
@@ -136,19 +188,38 @@
arg = double_array;
}
+ function DoubleNoMapFromSetup() {
+ func = undefined;
+ arg = double_array;
+ }
+
function StringFromSetup() {
func = (v, i) => v + i;
- arg = string_array;
+ arg = string_array_Cow;
}
- function StringNoMapFromSetup() {
+ function StringCowNoMapFromSetup() {
+ func = undefined;
+ arg = string_array_Cow;
+ }
+
+ function StringNonCowNoMapFromSetup() {
func = undefined;
arg = string_array;
}
function MixedFromSetup() {
func = (v, i) => v + i;
- arg = mixed_array;
+ arg = mixed_array_Cow;
+ }
+
+ function MixedCowNoMapFromSetup() {
+ func = undefined;
+ arg = mixed_array_Cow;
}
+ function MixedNonCowNoMapFromSetup() {
+ func = undefined;
+ arg = mixed_array;
+ }
})();
diff --git a/deps/v8/test/js-perf-test/Array/reduce.js b/deps/v8/test/js-perf-test/Array/reduce.js
index 2b9a28f098..3b07969669 100644
--- a/deps/v8/test/js-perf-test/Array/reduce.js
+++ b/deps/v8/test/js-perf-test/Array/reduce.js
@@ -6,7 +6,7 @@
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
function RunOptFastReduce(multiple) {
- // Use of variable multiple in the callback function forces
+ // Use of multiple variables in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
diff --git a/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/indexof-includes-polymorphic.js b/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/indexof-includes-polymorphic.js
new file mode 100644
index 0000000000..c79ba71174
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/indexof-includes-polymorphic.js
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function benchy(fn, name) {
+ new BenchmarkSuite(name, [1], [
+ new Benchmark(name, true, false, 0, fn),
+ ]);
+}
+
+function forLoop(array, searchValue) {
+ for (let i = 0; i < array.length; ++i) {
+ if (array[i] === searchValue) return true;
+ }
+ return farraylse;
+}
+
+function indexOf(array, searchValue) {
+ return array.indexOf(searchValue) !== -1;
+}
+
+function includes(array, searchValue) {
+ return array.includes(searchValue);
+}
+
+const PACKED = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+];
+const HOLEY = new Array(PACKED.length);
+for (let i = 0; i < PACKED.length; ++i)
+ HOLEY[i] = PACKED[i];
+
+function helper(fn) {
+ const SEARCH_VALUE = 15;
+ const result = fn(PACKED, SEARCH_VALUE) && fn(HOLEY, SEARCH_VALUE);
+ return result;
+}
+
+benchy(() => helper(forLoop), 'for loop');
+benchy(() => helper(indexOf), 'Array#indexOf');
+benchy(() => helper(includes), 'Array#includes');
diff --git a/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/run.js b/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/run.js
new file mode 100644
index 0000000000..3ad11b83e1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayIndexOfIncludesPolymorphic/run.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load(arguments[0] + '.js')
+
+function PrintResult(name, result) {
+ print(name + '-ArrayIndexOfIncludesPolymorphic(Score): ' + result);
+}
+
+function PrintStep(name) {}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError,
+ NotifyStep: PrintStep });
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoubleHoley/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoubleHoley/run.js
new file mode 100644
index 0000000000..72b83a3ada
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoubleHoley/run.js
@@ -0,0 +1,158 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for large holey double arrays.
+
+const largeHoleyArray = new Array(1e5);
+
+for (var i = 0; i < 100; i++) {
+ largeHoleyArray[i] = i + 6.66;
+}
+
+for (var i = 5000; i < 5500; i++) {
+ largeHoleyArray[i] = i + 6.66;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadLargeHoley() {
+ var newArr = [...largeHoleyArray];
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthLargeHoley() {
+ var newArr = new Array(largeHoleyArray.length);
+ for (let i = 0; i < largeHoleyArray.length; i++) {
+ newArr[i] = largeHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptyLargeHoley() {
+ var newArr = [];
+ for (let i = 0; i < largeHoleyArray.length; i++) {
+ newArr[i] = largeHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceLargeHoley() {
+ var newArr = largeHoleyArray.slice();
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0LargeHoley() {
+ var newArr = largeHoleyArray.slice(0);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveLargeHoley() {
+ var newArr = largeHoleyArray.concat();
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgLargeHoley() {
+ var newArr = [].concat(largeHoleyArray);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushLargeHoley() {
+ var newArr = [];
+ for (let x of largeHoleyArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdLargeHoley() {
+ var newArr = largeHoleyArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLargeDoubleHoley(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+// Run the benchmark (5 x 100) iterations instead of 1 second.
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 5, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadLargeHoley);
+CreateBenchmark('ForLength', ForLengthLargeHoley);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptyLargeHoley);
+CreateBenchmark('Slice', SliceLargeHoley);
+CreateBenchmark('Slice0', Slice0LargeHoley);
+CreateBenchmark('ConcatReceive', ConcatReceiveLargeHoley);
+CreateBenchmark('ConcatArg', ConcatArgLargeHoley);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = true;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoublePacked/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoublePacked/run.js
new file mode 100644
index 0000000000..9b449840ea
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeDoublePacked/run.js
@@ -0,0 +1,155 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for large packed double arrays.
+
+var largeArray = Array.from(Array(1e5).keys());
+// TODO(dhai): we should be able to use Array.prototype.map here, but that
+// implementation is currently creating a HOLEY array.
+for (var i = 0; i < 1e5; i++) {
+ largeArray[i] += 6.66;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadLarge() {
+ var newArr = [...largeArray];
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthLarge() {
+ var newArr = new Array(largeArray.length);
+ for (let i = 0; i < largeArray.length; i++) {
+ newArr[i] = largeArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptyLarge() {
+ var newArr = [];
+ for (let i = 0; i < largeArray.length; i++) {
+ newArr[i] = largeArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceLarge() {
+ var newArr = largeArray.slice();
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0Large() {
+ var newArr = largeArray.slice(0);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveLarge() {
+ var newArr = largeArray.concat();
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgLarge() {
+ var newArr = [].concat(largeArray);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushLarge() {
+ var newArr = [];
+ for (let x of largeArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdLarge() {
+ var newArr = largeArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLargeDoublePacked(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+// Run the benchmark (5 x 100) iterations instead of 1 second.
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 5, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadLarge);
+CreateBenchmark('ForLength', ForLengthLarge);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptyLarge);
+CreateBenchmark('Slice', SliceLarge);
+CreateBenchmark('Slice0', Slice0Large);
+CreateBenchmark('ConcatReceive', ConcatReceiveLarge);
+CreateBenchmark('ConcatArg', ConcatArgLarge);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = true;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js
index 38643c6903..dc279784cf 100644
--- a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargePacked/run.js
@@ -5,8 +5,7 @@
// Comparing different copy schemes against spread initial literals.
// Benchmarks for large packed arrays.
-const largeHoleyArray = new Array(1e5);
-const largeArray = Array.from(largeHoleyArray.keys());
+const largeArray = Array.from(Array(1e5).keys());
// ----------------------------------------------------------------------------
// Benchmark: Spread
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiMap/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiMap/run.js
new file mode 100644
index 0000000000..2654d53d9c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiMap/run.js
@@ -0,0 +1,94 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different iterating schemes against spread initial literals.
+// Benchmarks for large smi maps.
+
+var keys = Array.from(Array(1e4).keys());
+var keyValuePairs = keys.map((value) => [value, value + 1]);
+var map = new Map(keyValuePairs);
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadKeys
+// ----------------------------------------------------------------------------
+
+function SpreadKeys() {
+ var newArr = [...map.keys()];
+ // basic sanity check
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadValues
+// ----------------------------------------------------------------------------
+
+function SpreadValues() {
+ var newArr = [...map.values()];
+ // basic sanity check
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfKeys
+// ----------------------------------------------------------------------------
+
+function ForOfKeys() {
+ var newArr = new Array(map.size);
+ var i = 0;
+ for (let x of map.keys()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfValues
+// ----------------------------------------------------------------------------
+
+function ForOfValues() {
+ var newArr = new Array(map.size);
+ var i = 0;
+ for (let x of map.values()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLargeSmiMap(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('ForOfKeys', ForOfKeys);
+CreateBenchmark('ForOfValues', ForOfValues);
+CreateBenchmark('SpreadKeys', SpreadKeys);
+CreateBenchmark('SpreadValues', SpreadValues);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiSet/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiSet/run.js
new file mode 100644
index 0000000000..30a4a1666b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeSmiSet/run.js
@@ -0,0 +1,121 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different iterating schemes against spread initial literals.
+// Benchmarks for large smi sets.
+
+var keys = Array.from(Array(1e4).keys());
+var set = new Set(keys);
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function Spread() {
+ var newArr = [...set];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadKeys
+// ----------------------------------------------------------------------------
+
+function SpreadKeys() {
+ var newArr = [...set.keys()];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadValues
+// ----------------------------------------------------------------------------
+
+function SpreadValues() {
+ var newArr = [...set.values()];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOf
+// ----------------------------------------------------------------------------
+
+function ForOf() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let x of set) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfKeys
+// ----------------------------------------------------------------------------
+
+function ForOfKeys() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let x of set.keys()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfValues
+// ----------------------------------------------------------------------------
+
+function ForOfValues() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let kv of set.values()) {
+ newArr[i] = kv;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLargeSmiSet(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('ForOf', ForOf);
+CreateBenchmark('ForOfKeys', ForOfKeys);
+CreateBenchmark('ForOfValues', ForOfValues);
+CreateBenchmark('Spread', Spread);
+CreateBenchmark('SpreadKeys', SpreadKeys);
+CreateBenchmark('SpreadValues', SpreadValues);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoubleHoley/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoubleHoley/run.js
new file mode 100644
index 0000000000..19768b73aa
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoubleHoley/run.js
@@ -0,0 +1,159 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for small holey double arrays.
+
+const smallHoleyArray = Array(100);
+
+for (var i = 0; i < 10; i++) {
+ smallHoleyArray[i] = i + 6.66;
+}
+for (var i = 90; i < 99; i++) {
+ smallHoleyArray[i] = i + 6.66;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadSmallHoley() {
+ var newArr = [...smallHoleyArray];
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthSmallHoley() {
+ var newArr = new Array(smallHoleyArray.length);
+ for (let i = 0; i < smallHoleyArray.length; i++) {
+ newArr[i] = smallHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptySmallHoley() {
+ var newArr = [];
+ for (let i = 0; i < smallHoleyArray.length; i++) {
+ newArr[i] = smallHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceSmallHoley() {
+ var newArr = smallHoleyArray.slice();
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0SmallHoley() {
+ var newArr = smallHoleyArray.slice(0);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveSmallHoley() {
+ var newArr = smallHoleyArray.concat();
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgSmallHoley() {
+ var newArr = [].concat(smallHoleyArray);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushSmallHoley() {
+ var newArr = [];
+ for (let x of smallHoleyArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdSmallHoley() {
+ var newArr = smallHoleyArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmallDoubleHoley(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadSmallHoley);
+CreateBenchmark('ForLength', ForLengthSmallHoley);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptySmallHoley);
+CreateBenchmark('Slice', SliceSmallHoley);
+CreateBenchmark('Slice0', Slice0SmallHoley);
+CreateBenchmark('ConcatReceive', ConcatReceiveSmallHoley);
+CreateBenchmark('ConcatArg', ConcatArgSmallHoley);
+CreateBenchmark('ForOfPush', ForOfPushSmallHoley);
+CreateBenchmark('MapId', MapIdSmallHoley);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoublePacked/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoublePacked/run.js
new file mode 100644
index 0000000000..88d382c4fc
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallDoublePacked/run.js
@@ -0,0 +1,158 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for small packed double arrays.
+
+var smallArray = Array.from(Array(100).keys());
+
+for (var i = 0; i < 100; i++) {
+ smallArray[i] += 6.66;
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadSmall() {
+ var newArr = [...smallArray];
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthSmall() {
+ var newArr = new Array(smallArray.length);
+ for (let i = 0; i < smallArray.length; i++) {
+ newArr[i] = smallArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptySmall() {
+ var newArr = [];
+ for (let i = 0; i < smallArray.length; i++) {
+ newArr[i] = smallArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceSmall() {
+ var newArr = smallArray.slice();
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0Small() {
+ var newArr = smallArray.slice(0);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveSmall() {
+ var newArr = smallArray.concat();
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgSmall() {
+ var newArr = [].concat(smallArray);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushSmall() {
+ var newArr = [];
+ for (let x of smallArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdSmall() {
+ var newArr = smallArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmallDoublePacked(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadSmall);
+CreateBenchmark('ForLength', ForLengthSmall);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptySmall);
+CreateBenchmark('Slice', SliceSmall);
+CreateBenchmark('Slice0', Slice0Small);
+CreateBenchmark('ConcatReceive', ConcatReceiveSmall);
+CreateBenchmark('ConcatArg', ConcatArgSmall);
+CreateBenchmark('ForOfPush', ForOfPushSmall);
+CreateBenchmark('MapId', MapIdSmall);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiMap/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiMap/run.js
new file mode 100644
index 0000000000..a2f8bad5c0
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiMap/run.js
@@ -0,0 +1,93 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different iterating schemes against spread initial literals.
+// Benchmarks for small smi maps.
+
+var keys = Array.from(Array(50).keys());
+var keyValuePairs = keys.map((value) => [value, value + 1]);
+var map = new Map(keyValuePairs);
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadKeys
+// ----------------------------------------------------------------------------
+
+function SpreadKeys() {
+ var newArr = [...map.keys()];
+ // basic sanity check
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadValues
+// ----------------------------------------------------------------------------
+
+function SpreadValues() {
+ var newArr = [...map.values()];
+ // basic sanity check
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfKeys
+// ----------------------------------------------------------------------------
+
+function ForOfKeys() {
+ var newArr = new Array(map.size);
+ var i = 0;
+ for (let x of map.keys()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfValues
+// ----------------------------------------------------------------------------
+
+function ForOfValues() {
+ var newArr = new Array(map.size);
+ var i = 0;
+ for (let x of map.values()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != map.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmallSmiMap(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('ForOfKeys', ForOfKeys);
+CreateBenchmark('ForOfValues', ForOfValues);
+CreateBenchmark('SpreadKeys', SpreadKeys);
+CreateBenchmark('SpreadValues', SpreadValues);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiSet/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiSet/run.js
new file mode 100644
index 0000000000..35e9bcec07
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallSmiSet/run.js
@@ -0,0 +1,120 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different iterating schemes against spread initial literals.
+// Benchmarks for small smi sets.
+
+var keys = Array.from(Array(50).keys());
+var set = new Set(keys);
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function Spread() {
+ var newArr = [...set];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadKeys
+// ----------------------------------------------------------------------------
+
+function SpreadKeys() {
+ var newArr = [...set.keys()];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadValues
+// ----------------------------------------------------------------------------
+
+function SpreadValues() {
+ var newArr = [...set.values()];
+ // basic sanity check
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOf
+// ----------------------------------------------------------------------------
+
+function ForOf() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let x of set) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfKeys
+// ----------------------------------------------------------------------------
+
+function ForOfKeys() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let x of set.keys()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfValues
+// ----------------------------------------------------------------------------
+
+function ForOfValues() {
+ var newArr = new Array(set.size);
+ var i = 0;
+ for (let x of set.values()) {
+ newArr[i] = x;
+ i++;
+ }
+ if (newArr.length != set.size) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmallSmiSet(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('ForOf', ForOf);
+CreateBenchmark('ForOfKeys', ForOfKeys);
+CreateBenchmark('ForOfValues', ForOfValues);
+CreateBenchmark('Spread', Spread);
+CreateBenchmark('SpreadKeys', SpreadKeys);
+CreateBenchmark('SpreadValues', SpreadValues);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/AsyncAwait/baseline-babel-es2017.js b/deps/v8/test/js-perf-test/AsyncAwait/baseline-babel-es2017.js
index 1be3074f3b..37709d739b 100644
--- a/deps/v8/test/js-perf-test/AsyncAwait/baseline-babel-es2017.js
+++ b/deps/v8/test/js-perf-test/AsyncAwait/baseline-babel-es2017.js
@@ -160,10 +160,10 @@ function Setup() {
return a;
})();
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
function Basic() {
a();
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
diff --git a/deps/v8/test/js-perf-test/AsyncAwait/baseline-naive-promises.js b/deps/v8/test/js-perf-test/AsyncAwait/baseline-naive-promises.js
index f59ae86194..9b9c091a13 100644
--- a/deps/v8/test/js-perf-test/AsyncAwait/baseline-naive-promises.js
+++ b/deps/v8/test/js-perf-test/AsyncAwait/baseline-naive-promises.js
@@ -34,7 +34,7 @@ function Setup() {
b = function b(p) { return p; };
a = function a(p) { return p; };
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
function Basic() {
@@ -48,5 +48,5 @@ function Basic() {
.then(c)
.then(b)
.then(a);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
diff --git a/deps/v8/test/js-perf-test/AsyncAwait/native.js b/deps/v8/test/js-perf-test/AsyncAwait/native.js
index 7979d2f4db..e9504c3a19 100644
--- a/deps/v8/test/js-perf-test/AsyncAwait/native.js
+++ b/deps/v8/test/js-perf-test/AsyncAwait/native.js
@@ -34,10 +34,10 @@ function Setup() {
b = async function b() { return c(); };
a = async function a() { return b(); };
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
function Basic() {
a();
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
diff --git a/deps/v8/test/js-perf-test/Dates/run.js b/deps/v8/test/js-perf-test/Dates/run.js
new file mode 100644
index 0000000000..c57e56990a
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Dates/run.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+load('../base.js');
+load('toLocaleString.js');
+
+function PrintResult(name, result) {
+ console.log(name);
+ console.log(name + '-Dates(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/Dates/toLocaleString.js b/deps/v8/test/js-perf-test/Dates/toLocaleString.js
new file mode 100644
index 0000000000..fd89a7cdb8
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Dates/toLocaleString.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+function DateToLocaleDateString() {
+ let d = new Date();
+ d.toLocaleDateString()
+}
+createSuite('toLocaleDateString', 100000, DateToLocaleDateString, ()=>{});
+
+function DateToLocaleString() {
+ let d = new Date();
+ d.toLocaleString()
+}
+createSuite('toLocaleString', 100000, DateToLocaleString, ()=>{});
+
+function DateToLocaleTimeString() {
+ let d = new Date();
+ d.toLocaleTimeString()
+}
+createSuite('toLocaleTimeString', 100000, DateToLocaleTimeString, ()=>{});
diff --git a/deps/v8/test/js-perf-test/Inspector/debugger.js b/deps/v8/test/js-perf-test/Inspector/debugger.js
index 8e44aa7710..e9c862563b 100644
--- a/deps/v8/test/js-perf-test/Inspector/debugger.js
+++ b/deps/v8/test/js-perf-test/Inspector/debugger.js
@@ -69,7 +69,7 @@ function AsyncStacksInstrumentation() {
p = p.then(nopCallback);
}
p = p.then(() => done = true);
- while (!done) %RunMicrotasks();
+ while (!done) %PerformMicrotaskCheckpoint();
}
})();
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index c2aacb452f..88f39b9fa8 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -128,6 +128,130 @@
]
},
{
+ "name": "ArrayLiteralInitialSpreadSmallDoublePacked",
+ "path": ["ArrayLiteralInitialSpreadSmallDoublePacked"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallDoublePacked\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"},
+ {"name": "ForOfPush"},
+ {"name": "MapId"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargeDoublePacked",
+ "path": ["ArrayLiteralInitialSpreadLargeDoublePacked"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeDoublePacked\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadSmallDoubleHoley",
+ "path": ["ArrayLiteralInitialSpreadSmallDoubleHoley"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallDoubleHoley\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"},
+ {"name": "ForOfPush"},
+ {"name": "MapId"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargeDoubleHoley",
+ "path": ["ArrayLiteralInitialSpreadLargeDoubleHoley"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeDoubleHoley\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread"},
+ {"name": "ForLength"},
+ {"name": "ForLengthEmpty"},
+ {"name": "Slice"},
+ {"name": "Slice0"},
+ {"name": "ConcatReceive"},
+ {"name": "ConcatArg"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadSmallSmiMap",
+ "path": ["ArrayLiteralInitialSpreadSmallSmiMap"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallSmiMap\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "ForOfKeys"},
+ {"name": "ForOfValues"},
+ {"name": "SpreadKeys"},
+ {"name": "SpreadValues"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargeSmiMap",
+ "path": ["ArrayLiteralInitialSpreadLargeSmiMap"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeSmiMap\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "ForOfKeys"},
+ {"name": "ForOfValues"},
+ {"name": "SpreadKeys"},
+ {"name": "SpreadValues"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadSmallSmiSet",
+ "path": ["ArrayLiteralInitialSpreadSmallSmiSet"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadSmallSmiSet\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "ForOf"},
+ {"name": "ForOfKeys"},
+ {"name": "ForOfValues"},
+ {"name": "Spread"},
+ {"name": "SpreadKeys"},
+ {"name": "SpreadValues"}
+ ]
+ },
+ {
+ "name": "ArrayLiteralInitialSpreadLargeSmiSet",
+ "path": ["ArrayLiteralInitialSpreadLargeSmiSet"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeSmiSet\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "ForOf"},
+ {"name": "ForOfKeys"},
+ {"name": "ForOfValues"},
+ {"name": "Spread"},
+ {"name": "SpreadKeys"},
+ {"name": "SpreadValues"}
+ ]
+ },
+ {
"name": "ArrayLiteralSpread",
"path": ["ArrayLiteralSpread"],
"main": "run.js",
@@ -359,6 +483,17 @@
]
},
{
+ "name": "StringLocaleCompare",
+ "main": "run.js",
+ "resources": [ "string-localeCompare.js" ],
+ "test_flags": [ "string-localeCompare" ],
+ "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+ "run_count": 1,
+ "tests": [
+ {"name": "StringLocaleCompare"}
+ ]
+ },
+ {
"name": "StringMatchAll",
"main": "run.js",
"resources": [ "string-matchall.js" ],
@@ -394,6 +529,21 @@
]
},
{
+ "name": "StringStartsWith",
+ "main": "run.js",
+ "resources": [ "string-startswith.js" ],
+ "test_flags": [ "string-startswith" ],
+ "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+ "run_count": 1,
+ "flags": [ "--allow-natives-syntax" ],
+ "tests": [
+ {"name": "DirectStringsDirectSearch"},
+ {"name": "ConsStringsDirectSearch"},
+ {"name": "DirectStringsConsSearch"},
+ {"name": "ConsStringsConsSearch"}
+ ]
+ },
+ {
"name": "StringSubstring",
"main": "run.js",
"resources": [ "string-substring.js" ],
@@ -610,6 +760,42 @@
"test_flags": ["construct-all-typedarrays"]
},
{
+ "name": "JoinBigIntTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-bigint.js"],
+ "test_flags": ["join-bigint"]
+ },
+ {
+ "name": "JoinFloatTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-float.js"],
+ "test_flags": ["join-float"]
+ },
+ {
+ "name": "JoinIntTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-int.js"],
+ "test_flags": ["join-int"]
+ },
+ {
+ "name": "JoinWithSeparatorBigIntTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-sep-bigint.js"],
+ "test_flags": ["join-sep-bigint"]
+ },
+ {
+ "name": "JoinWithSeparatorFloatTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-sep-float.js"],
+ "test_flags": ["join-sep-float"]
+ },
+ {
+ "name": "JoinWithSeparatorIntTypes",
+ "main": "run.js",
+ "resources": ["base.js", "join.js", "join-sep-int.js"],
+ "test_flags": ["join-sep-int"]
+ },
+ {
"name": "SetFromArrayLike",
"main": "run.js",
"resources": ["set-from-arraylike.js"],
@@ -636,37 +822,37 @@
{
"name": "SortIntTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-int.js"],
+ "resources": ["base.js", "sort.js", "sort-int.js"],
"test_flags": ["sort-int"]
},
{
"name": "SortBigIntTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-bigint.js"],
+ "resources": ["base.js", "sort.js", "sort-bigint.js"],
"test_flags": ["sort-bigint"]
},
{
"name": "SortFloatTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-float.js"],
+ "resources": ["base.js", "sort.js", "sort-float.js"],
"test_flags": ["sort-float"]
},
{
"name": "SortCustomCompareFnIntTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-cmpfn-int.js"],
+ "resources": ["base.js", "sort.js", "sort-cmpfn-int.js"],
"test_flags": ["sort-cmpfn-int"]
},
{
"name": "SortCustomCompareFnBigIntTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-cmpfn-bigint.js"],
+ "resources": ["base.js", "sort.js", "sort-cmpfn-bigint.js"],
"test_flags": ["sort-cmpfn-bigint"]
},
{
"name": "SortCustomCompareFnFloatTypes",
"main": "run.js",
- "resources": ["sort.js", "sort-cmpfn-float.js"],
+ "resources": ["base.js", "sort.js", "sort-cmpfn-float.js"],
"test_flags": ["sort-cmpfn-float"]
},
{
@@ -763,13 +949,18 @@
{"name": "SmallMixedArrayOf"},
{"name": "SmiFrom"},
{"name": "SmallSmiFrom"},
- {"name": "SmiNoMapFrom"},
+ {"name": "SmiCowNoMapFrom"},
+ {"name": "SmiNonCowNoMapFrom"},
{"name": "SmiNoIteratorFrom"},
{"name": "TransplantedFrom"},
{"name": "DoubleFrom"},
+ {"name": "DoubleNoMapFrom"},
{"name": "StringFrom"},
- {"name": "StringNoMapFrom"},
+ {"name": "StringCowNoMapFrom"},
+ {"name": "StringNonCowNoMapFrom"},
{"name": "MixedFrom"},
+ {"name": "MixedCowNoMapFrom"},
+ {"name": "MixedNonCowNoMapFrom"},
{"name": "Array.slice(500)"},
{"name": "Array.slice(500,999)"},
{"name": "Array.slice(-500)"},
@@ -787,6 +978,22 @@
]
},
{
+ "name": "ArrayIndexOfIncludesPolymorphic",
+ "path": ["ArrayIndexOfIncludesPolymorphic"],
+ "main": "run.js",
+ "resources": ["indexof-includes-polymorphic.js"],
+ "test_flags": ["indexof-includes-polymorphic"],
+ "results_regexp": "^%s\\-ArrayIndexOfIncludesPolymorphic\\(Score\\): (.+)$",
+ "flags": [
+ "--allow-natives-syntax"
+ ],
+ "tests": [
+ {"name": "for loop"},
+ {"name": "Array#indexOf"},
+ {"name": "Array#includes"}
+ ]
+ },
+ {
"name": "ArraySort",
"path": ["ArraySort"],
"main": "run.js",
@@ -1061,6 +1268,18 @@
]
},
{
+ "name": "Dates",
+ "path": ["Dates"],
+ "main": "run.js",
+ "resources": ["toLocaleString.js"],
+ "results_regexp": "^%s\\-Dates\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "toLocaleDateString"},
+ {"name": "toLocaleString"},
+ {"name": "toLocaleTimeString"}
+ ]
+ },
+ {
"name": "ExpressionDepth",
"path": ["ExpressionDepth"],
"main": "run.js",
@@ -1126,12 +1345,16 @@
"path": ["Numbers"],
"main": "run.js",
"flags": ["--allow-natives-syntax"],
- "resources": [ "toNumber.js"],
+ "resources": [
+ "toNumber.js",
+ "toLocaleString.js"
+ ],
"results_regexp": "^%s\\-Numbers\\(Score\\): (.+)$",
"tests": [
{"name": "Constructor"},
{"name": "UnaryPlus"},
- {"name": "ParseFloat"}
+ {"name": "ParseFloat"},
+ {"name": "toLocaleString"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/Modules/run.js b/deps/v8/test/js-perf-test/Modules/run.js
index e5f91e1aa9..4c038801a9 100644
--- a/deps/v8/test/js-perf-test/Modules/run.js
+++ b/deps/v8/test/js-perf-test/Modules/run.js
@@ -23,21 +23,21 @@ const iterations = 10000;
function BasicExport() {
let success = false;
import("basic-export.js").then(m => { m.bench(); success = true; });
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!success) throw new Error(666);
}
function BasicImport() {
let success = false;
import("basic-import.js").then(m => { m.bench(); success = true; });
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!success) throw new Error(666);
}
function BasicNamespace() {
let success = false;
import("basic-namespace.js").then(m => { m.bench(); success = true; });
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!success) throw new Error(666);
}
diff --git a/deps/v8/test/js-perf-test/Numbers/run.js b/deps/v8/test/js-perf-test/Numbers/run.js
index cdfbf25a70..d48227826a 100644
--- a/deps/v8/test/js-perf-test/Numbers/run.js
+++ b/deps/v8/test/js-perf-test/Numbers/run.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
load('../base.js');
load('toNumber.js');
+load('toLocaleString.js');
function PrintResult(name, result) {
console.log(name);
diff --git a/deps/v8/test/js-perf-test/Numbers/toLocaleString.js b/deps/v8/test/js-perf-test/Numbers/toLocaleString.js
new file mode 100644
index 0000000000..8dd5a535ad
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Numbers/toLocaleString.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function NumberToLocaleString() {
+ Number(0).toLocaleString()
+ Number(-12).toLocaleString()
+ Number(13).toLocaleString()
+ Number(123456789).toLocaleString()
+ Number(1234567.89).toLocaleString()
+ Number(-123456789).toLocaleString()
+ Number(-1234567.89).toLocaleString()
+}
+createSuite('toLocaleString', 100000, NumberToLocaleString, ()=>{});
diff --git a/deps/v8/test/js-perf-test/RegExp/base.js b/deps/v8/test/js-perf-test/RegExp/base.js
index 31f7206f07..3883b7f939 100644
--- a/deps/v8/test/js-perf-test/RegExp/base.js
+++ b/deps/v8/test/js-perf-test/RegExp/base.js
@@ -5,6 +5,7 @@
function benchName(bench, setup) {
var name = bench.name;
if (setup) name += "/" + setup.name;
+ return name;
}
function slowBenchName(bench, setup) {
@@ -30,7 +31,7 @@ function createBenchmarkSuite(name) {
return new BenchmarkSuite(
name, [1000],
benchmarks.map(([bench, setup]) =>
- new Benchmark(benchName(bench, setup), false, false, 0, bench,
+ new Benchmark(benchName(bench, setup), false, false, 100000, bench,
setup)));
}
diff --git a/deps/v8/test/js-perf-test/StringIterators/string-iterator.js b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
index 8a7e323b42..e46526447c 100644
--- a/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
+++ b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
@@ -21,8 +21,8 @@ function Spread_OneByteShort() {
function Spread_OneByteShortTearDown() {
var expected = "A|l|p|h|a|b|e|t|-|S|o|u|p";
- return assert(Array.isArray(result))
- && assertEquals(expected, result.join("|"));
+ assert(Array.isArray(result));
+ assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -44,8 +44,8 @@ function Spread_TwoByteShort() {
function Spread_TwoByteShortTearDown() {
var expected = "\u5FCD|\u8005|\u306E|\u653B|\u6483";
- return assert(Array.isArray(result))
- && assertEquals(expected, result.join("|"));
+ assert(Array.isArray(result));
+ assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -69,8 +69,8 @@ function Spread_WithSurrogatePairsShort() {
function Spread_WithSurrogatePairsShortTearDown() {
var expected =
"\uD83C\uDF1F|\u5FCD|\u8005|\u306E|\u653B|\u6483|\uD83C\uDF1F";
- return assert(Array.isArray(result))
- && assertEquals(expected, result.join("|"));
+ assert(Array.isArray(result));
+ assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -92,7 +92,7 @@ function ForOf_OneByteShort() {
}
function ForOf_OneByteShortTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -114,7 +114,7 @@ function ForOf_TwoByteShort() {
}
function ForOf_TwoByteShortTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -137,7 +137,7 @@ function ForOf_WithSurrogatePairsShort() {
}
function ForOf_WithSurrogatePairsShortTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -159,7 +159,7 @@ function ForOf_OneByteLong() {
}
function ForOf_OneByteLongTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -181,7 +181,7 @@ function ForOf_TwoByteLong() {
}
function ForOf_TwoByteLongTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -204,5 +204,5 @@ function ForOf_WithSurrogatePairsLong() {
}
function ForOf_WithSurrogatePairsLongTearDown() {
- return assertEquals(string, result);
+ assertEquals(string, result);
}
diff --git a/deps/v8/test/js-perf-test/Strings/string-localeCompare.js b/deps/v8/test/js-perf-test/Strings/string-localeCompare.js
new file mode 100644
index 0000000000..6c8b609825
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Strings/string-localeCompare.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('StringLocaleCompare', [1000000], [
+ new Benchmark('StringLocaleCompare', false, false, 0,
+ StringLocaleCompare),
+]);
+
+function StringLocaleCompare() {
+ var array = ["XYZ", "mno", "abc", "EFG", "ijk", "123", "tuv", "234", "efg"];
+
+ var sum = 0;
+ for (var j = 0; j < array.length; ++j) {
+ sum += "fox".localeCompare(array[j]);
+ }
+
+ return sum;
+}
diff --git a/deps/v8/test/js-perf-test/Strings/string-startswith.js b/deps/v8/test/js-perf-test/Strings/string-startswith.js
new file mode 100644
index 0000000000..4b0379b3a3
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Strings/string-startswith.js
@@ -0,0 +1,78 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function createSuite(name, count, fn) {
+ new BenchmarkSuite(name, [count], [new Benchmark(name, true, false, 0, fn)]);
+}
+
+const inputs = [
+ 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
+ 'Integer eu augue suscipit, accumsan ipsum nec, sagittis sem.',
+ 'In vitae pellentesque dolor. Curabitur leo nunc, luctus vitae',
+ 'risus eget, fermentum hendrerit justo.',
+ 'hello'.repeat(1024),
+ 'h',
+ ''
+];
+const firsts = ['I', 'Integer', 'Lorem', 'risus', 'hello'];
+
+function simpleHelper() {
+ let sum = 0;
+ for (const input of inputs) {
+ for (const first of firsts) {
+ sum += input.startsWith(first);
+ }
+ }
+ return sum;
+}
+
+function consInputHelper() {
+ let sum = 0;
+ for (const inputOne of inputs) {
+ for (const inputTwo of inputs) {
+ for (const first of firsts) {
+ // Skip if the length is too small for %ConstructConsString
+ if (inputOne.length + inputTwo.length < 13) break;
+ sum += %ConstructConsString(inputOne, inputTwo).startsWith(first);
+ }
+ }
+ }
+ return sum;
+}
+
+function consFirstHelper() {
+ let sum = 0;
+ for (const input of inputs) {
+ for (const firstOne of firsts) {
+ for (const firstTwo of firsts) {
+ // Skip if the length is too small for %ConstructConsString
+ if (firstOne.length + firstTwo.length < 13) break;
+ sum += input.startsWith(%ConstructConsString(firstOne, firstTwo));
+ }
+ }
+ }
+ return sum;
+}
+
+function doubleConsHelper() {
+ let sum = 0;
+ for (const inputOne of inputs) {
+ for (const inputTwo of inputs) {
+ for (const firstOne of firsts) {
+ for (const firstTwo of firsts) {
+ // Skip if the length is too small for %ConstructConsString
+ if (inputOne.length + inputTwo.length < 13 || firstOne.length + firstTwo.length) break;
+ sum += % ConstructConsString(inputOne, inputTwo).startsWith(
+ % ConstructConsString(firstOne, firstTwo)
+ );
+ }
+ }
+ }
+ }
+}
+
+createSuite('DirectStringsDirectSearch', 1000, simpleHelper);
+createSuite('ConsStringsDirectSearch', 1000, consInputHelper);
+createSuite('DirectStringsConsSearch', 1000, consFirstHelper);
+createSuite('ConsStringsConsSearch', 1000, doubleConsHelper);
diff --git a/deps/v8/test/js-perf-test/TypedArrays/base.js b/deps/v8/test/js-perf-test/TypedArrays/base.js
new file mode 100644
index 0000000000..04f65a6ea6
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/base.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const typedArrayIntConstructors = [
+ {name: "Uint8", ctor: Uint8Array},
+ {name: "Int8", ctor: Int8Array},
+ {name: "Uint16", ctor: Uint16Array},
+ {name: "Int16", ctor: Int16Array},
+ {name: "Uint32", ctor: Uint32Array},
+ {name: "Int32", ctor: Int32Array},
+ {name: "Uint8Clamped", ctor: Uint8ClampedArray},
+];
+
+const typedArrayFloatConstructors = [
+ {name: "Float32", ctor: Float32Array},
+ {name: "Float64", ctor: Float64Array},
+];
+
+// "ref" builds might not yet have BigInt support, so the benchmark fails
+// gracefully during setup (the constructor will be undefined), instead of
+// a hard fail when this file is loaded.
+const typedArrayBigIntConstructors = [
+ {name: "BigUint64", ctor: this["BigUint64Array"]},
+ {name: "BigInt64", ctor: this["BigInt64Array"]}
+];
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-bigint.js b/deps/v8/test/js-perf-test/TypedArrays/join-bigint.js
new file mode 100644
index 0000000000..8f8bb92dbb
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-bigint.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinBigIntTypes', [100],
+ CreateBenchmarks(typedArrayBigIntConstructors, false));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-float.js b/deps/v8/test/js-perf-test/TypedArrays/join-float.js
new file mode 100644
index 0000000000..e666bfda34
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-float.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinFloatTypes', [100],
+ CreateBenchmarks(typedArrayFloatConstructors, false));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-int.js b/deps/v8/test/js-perf-test/TypedArrays/join-int.js
new file mode 100644
index 0000000000..d08dc5f6ce
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-int.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinIntTypes', [100],
+ CreateBenchmarks(typedArrayIntConstructors, false));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-sep-bigint.js b/deps/v8/test/js-perf-test/TypedArrays/join-sep-bigint.js
new file mode 100644
index 0000000000..34461b9395
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-sep-bigint.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinWithSeparatorBigIntTypes', [100],
+ CreateBenchmarks(typedArrayBigIntConstructors, true));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-sep-float.js b/deps/v8/test/js-perf-test/TypedArrays/join-sep-float.js
new file mode 100644
index 0000000000..672d2a6ff6
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-sep-float.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinWithSeparatorFloatTypes', [100],
+ CreateBenchmarks(typedArrayFloatConstructors, true));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join-sep-int.js b/deps/v8/test/js-perf-test/TypedArrays/join-sep-int.js
new file mode 100644
index 0000000000..76312990ab
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join-sep-int.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('join.js');
+
+new BenchmarkSuite('JoinWithSeparatorIntTypes', [100],
+ CreateBenchmarks(typedArrayIntConstructors, true));
diff --git a/deps/v8/test/js-perf-test/TypedArrays/join.js b/deps/v8/test/js-perf-test/TypedArrays/join.js
new file mode 100644
index 0000000000..9f090e4331
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TypedArrays/join.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('base.js');
+
+function CreateBenchmarks(constructors, withSep) {
+ return constructors.map(({ ctor, name }) =>
+ new Benchmark(`Join${name}`, false, false, 0, CreateJoinFn(withSep),
+ CreateSetupFn(ctor), TearDown)
+ );
+}
+
+const kInitialArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+let result;
+let arrayToJoin = [];
+
+function CreateSetupFn(constructor) {
+ return () => {
+ if (constructor == BigUint64Array || constructor == BigInt64Array) {
+ arrayToJoin = constructor.from(kInitialArray, x => BigInt(Math.floor(x)));
+ } else {
+ arrayToJoin = new constructor(kInitialArray);
+ }
+ }
+}
+
+function CreateJoinFn(withSep) {
+ if (withSep) {
+ return () => result = arrayToJoin.join(',');
+ } else {
+ return () => result = arrayToJoin.join('');
+ }
+}
+
+function TearDown() {
+ arrayToJoin = void 0;
+}
diff --git a/deps/v8/test/js-perf-test/TypedArrays/sort.js b/deps/v8/test/js-perf-test/TypedArrays/sort.js
index c2423994cd..c98ac05154 100644
--- a/deps/v8/test/js-perf-test/TypedArrays/sort.js
+++ b/deps/v8/test/js-perf-test/TypedArrays/sort.js
@@ -2,28 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-let typedArrayIntConstructors = [
- {name: "Uint8", ctor: Uint8Array},
- {name: "Int8", ctor: Int8Array},
- {name: "Uint16", ctor: Uint16Array},
- {name: "Int16", ctor: Int16Array},
- {name: "Uint32", ctor: Uint32Array},
- {name: "Int32", ctor: Int32Array},
- {name: "Uint8Clamped", ctor: Uint8ClampedArray},
-];
-
-let typedArrayFloatConstructors = [
- {name: "Float32", ctor: Float32Array},
- {name: "Float64", ctor: Float64Array},
-];
-
-// "ref" builds might not yet have BigInt support, so the benchmark fails
-// gracefully during setup (the constructor will be undefined), instead of
-// a hard fail when this file is loaded.
-let typedArrayBigIntConstructors = [
- {name: "BigUint64", ctor: this["BigUint64Array"]},
- {name: "BigInt64", ctor: this["BigInt64Array"]}
-];
+load('base.js');
function CreateBenchmarks(constructors, comparefns = []) {
var benchmarks = [];
diff --git a/deps/v8/test/js-perf-test/base.js b/deps/v8/test/js-perf-test/base.js
index ca25789488..66a15c0ff9 100644
--- a/deps/v8/test/js-perf-test/base.js
+++ b/deps/v8/test/js-perf-test/base.js
@@ -105,7 +105,8 @@ BenchmarkSuite.ResetRNG = function() {
Math.random = (function() {
var seed = 49734321;
return function() {
- // Robert Jenkins' 32 bit integer hash function.
+ // Robert Jenkins' 32-bit integer hash function.
+ seed = seed & 0xffffffff;
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
diff --git a/deps/v8/test/memory/Memory.json b/deps/v8/test/memory/Memory.json
index d5f11e9b2c..085b3d6678 100644
--- a/deps/v8/test/memory/Memory.json
+++ b/deps/v8/test/memory/Memory.json
@@ -20,8 +20,8 @@
"results_regexp": "(\\d+) bytes in \\d+ chunks for startup$"
},
{
- "name": "SnapshotSizeBuiltins",
- "results_regexp": "(\\d+) bytes for builtins$"
+ "name": "SnapshotSizeReadOnly",
+ "results_regexp": "(\\d+) bytes for read-only$"
},
{
"name": "SnapshotSizeContext",
diff --git a/deps/v8/test/fuzzer/wasm_data_section/foo b/deps/v8/test/message/empty.js
index e69de29bb2..e69de29bb2 100644
--- a/deps/v8/test/fuzzer/wasm_data_section/foo
+++ b/deps/v8/test/message/empty.js
diff --git a/deps/v8/test/fuzzer/wasm_function_sigs_section/foo b/deps/v8/test/message/empty.out
index e69de29bb2..e69de29bb2 100644
--- a/deps/v8/test/fuzzer/wasm_function_sigs_section/foo
+++ b/deps/v8/test/message/empty.out
diff --git a/deps/v8/test/message/fail/arrow-formal-parameters.out b/deps/v8/test/message/fail/arrow-formal-parameters.out
index bafcf452e3..e5e90cc1fc 100644
--- a/deps/v8/test/message/fail/arrow-formal-parameters.out
+++ b/deps/v8/test/message/fail/arrow-formal-parameters.out
@@ -1,4 +1,4 @@
*%(basename)s:5: SyntaxError: Duplicate parameter name not allowed in this context
(b, a, a, d) => a
- ^
+ ^
SyntaxError: Duplicate parameter name not allowed in this context
diff --git a/deps/v8/test/message/fail/arrow-param-after-rest-2.out b/deps/v8/test/message/fail/arrow-param-after-rest-2.out
index 27785cfb02..ef17ff8938 100644
--- a/deps/v8/test/message/fail/arrow-param-after-rest-2.out
+++ b/deps/v8/test/message/fail/arrow-param-after-rest-2.out
@@ -1,4 +1,4 @@
*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
(w, ...x, y) => 10
- ^
+ ^
SyntaxError: Rest parameter must be last formal parameter
diff --git a/deps/v8/test/message/fail/arrow-param-after-rest.out b/deps/v8/test/message/fail/arrow-param-after-rest.out
index 5b36e43584..5a3310878c 100644
--- a/deps/v8/test/message/fail/arrow-param-after-rest.out
+++ b/deps/v8/test/message/fail/arrow-param-after-rest.out
@@ -1,4 +1,4 @@
*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
(...x, y) => 10
- ^
+ ^
SyntaxError: Rest parameter must be last formal parameter
diff --git a/deps/v8/test/message/fail/arrow-two-rest-params.out b/deps/v8/test/message/fail/arrow-two-rest-params.out
index 7147ebcf11..855446ef83 100644
--- a/deps/v8/test/message/fail/arrow-two-rest-params.out
+++ b/deps/v8/test/message/fail/arrow-two-rest-params.out
@@ -1,4 +1,4 @@
*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
(w, ...x, ...y) => 10
- ^
+ ^
SyntaxError: Rest parameter must be last formal parameter
diff --git a/deps/v8/test/message/fail/call-async.js b/deps/v8/test/message/fail/call-async.js
new file mode 100644
index 0000000000..139be7f353
--- /dev/null
+++ b/deps/v8/test/message/fail/call-async.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(async) {
+ async()
+}
+f()
diff --git a/deps/v8/test/message/fail/call-async.out b/deps/v8/test/message/fail/call-async.out
new file mode 100644
index 0000000000..36bd9cb172
--- /dev/null
+++ b/deps/v8/test/message/fail/call-async.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: TypeError: async is not a function
+ async()
+ ^
+TypeError: async is not a function
+ at f (*%(basename)s:6:3)
+ at *%(basename)s:8:1
diff --git a/deps/v8/test/message/fail/call-await.js b/deps/v8/test/message/fail/call-await.js
new file mode 100644
index 0000000000..fe8c814343
--- /dev/null
+++ b/deps/v8/test/message/fail/call-await.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(await) {
+ await()
+}
+f()
diff --git a/deps/v8/test/message/fail/call-await.out b/deps/v8/test/message/fail/call-await.out
new file mode 100644
index 0000000000..ba0ecd051d
--- /dev/null
+++ b/deps/v8/test/message/fail/call-await.out
@@ -0,0 +1,7 @@
+*%(basename)s:6: TypeError: await is not a function
+ await()
+ ^
+TypeError: await is not a function
+ at f (*%(basename)s:6:3)
+ at *%(basename)s:8:1
+
diff --git a/deps/v8/test/mjsunit/regress/regress-408036.js b/deps/v8/test/message/fail/call-let.js
index a4dfade25d..65f06b406d 100644
--- a/deps/v8/test/mjsunit/regress/regress-408036.js
+++ b/deps/v8/test/message/fail/call-let.js
@@ -1,5 +1,8 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-natives-as 1
+function f(let) {
+ let()
+}
+f()
diff --git a/deps/v8/test/message/fail/call-let.out b/deps/v8/test/message/fail/call-let.out
new file mode 100644
index 0000000000..1ac51f019e
--- /dev/null
+++ b/deps/v8/test/message/fail/call-let.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: TypeError: let is not a function
+ let()
+ ^
+TypeError: let is not a function
+ at f (*%(basename)s:6:3)
+ at *%(basename)s:8:1
diff --git a/deps/v8/test/message/fail/call-static.js b/deps/v8/test/message/fail/call-static.js
new file mode 100644
index 0000000000..77c69e918d
--- /dev/null
+++ b/deps/v8/test/message/fail/call-static.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(static) {
+ static()
+}
+f()
diff --git a/deps/v8/test/message/fail/call-static.out b/deps/v8/test/message/fail/call-static.out
new file mode 100644
index 0000000000..bdc1133a56
--- /dev/null
+++ b/deps/v8/test/message/fail/call-static.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: TypeError: static is not a function
+ static()
+ ^
+TypeError: static is not a function
+ at f (*%(basename)s:6:3)
+ at *%(basename)s:8:1
diff --git a/deps/v8/test/message/fail/call-yield.js b/deps/v8/test/message/fail/call-yield.js
new file mode 100644
index 0000000000..e0822ac623
--- /dev/null
+++ b/deps/v8/test/message/fail/call-yield.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(yield) {
+ yield()
+}
+f()
diff --git a/deps/v8/test/message/fail/call-yield.out b/deps/v8/test/message/fail/call-yield.out
new file mode 100644
index 0000000000..cb6f82dc72
--- /dev/null
+++ b/deps/v8/test/message/fail/call-yield.out
@@ -0,0 +1,6 @@
+*%(basename)s:6: TypeError: yield is not a function
+ yield()
+ ^
+TypeError: yield is not a function
+ at f (*%(basename)s:6:3)
+ at *%(basename)s:8:1
diff --git a/deps/v8/test/message/fail/class-fields-private-source-positions.js b/deps/v8/test/message/fail/class-fields-private-source-positions.js
new file mode 100644
index 0000000000..b4f8093a1f
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-source-positions.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields
+
+var o = {};
+class C {
+ #a = 0;
+
+ [o.#a](){}
+}
diff --git a/deps/v8/test/message/fail/class-fields-private-source-positions.out b/deps/v8/test/message/fail/class-fields-private-source-positions.out
new file mode 100644
index 0000000000..cc70fde7b6
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-source-positions.out
@@ -0,0 +1,5 @@
+*%(basename)s:11: TypeError: Read of private field #a from an object which did not contain the field
+ [o.#a](){}
+ ^
+TypeError: Read of private field #a from an object which did not contain the field
+ at *%(basename)s:11:8
diff --git a/deps/v8/test/mjsunit/mjsunit_suppressions.js b/deps/v8/test/message/fail/class-fields-private-throw-early-2.js
index 1bd466993f..2831d4d91a 100644
--- a/deps/v8/test/mjsunit/mjsunit_suppressions.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early-2.js
@@ -1,9 +1,14 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-private-fields
+class X {
+ #x;
+ constructor() {
+ ({}).#x = 1;
+ }
+}
-// This turns all mjsunit asserts into no-ops used for fuzzing.
-(function () {
- failWithMessage = function () {}
-})();
+new X;
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early-2.out b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
new file mode 100644
index 0000000000..fdcdfbd414
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
@@ -0,0 +1,6 @@
+*%(basename)s:10: TypeError: Write of private field #x to an object which did not contain the field
+ ({}).#x = 1;
+ ^
+TypeError: Write of private field #x to an object which did not contain the field
+ at new X (*%(basename)s:10:13)
+ at *%(basename)s:14:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early.js b/deps/v8/test/message/fail/class-fields-private-throw-early.js
new file mode 100644
index 0000000000..b224a8d04e
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-private-fields
+
+class X {
+ constructor() {
+ this.#x = 1;
+ }
+}
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early.out b/deps/v8/test/message/fail/class-fields-private-throw-early.out
new file mode 100644
index 0000000000..86395a2ee5
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early.out
@@ -0,0 +1,4 @@
+*%(basename)s:9: SyntaxError: Undefined private field #x: must be declared in an enclosing class
+ this.#x = 1;
+ ^
+SyntaxError: Undefined private field #x: must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-read.js b/deps/v8/test/message/fail/class-fields-private-throw-read.js
new file mode 100644
index 0000000000..3be60efdfc
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-read.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-private-fields
+
+class X {
+ #x;
+ eq(o) { return this.#x === o.#x; }
+}
+
+new X().eq({});
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-read.out b/deps/v8/test/message/fail/class-fields-private-throw-read.out
new file mode 100644
index 0000000000..4b49cfd354
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-read.out
@@ -0,0 +1,6 @@
+*%(basename)s:9: TypeError: Read of private field #x from an object which did not contain the field
+ eq(o) { return this.#x === o.#x; }
+ ^
+TypeError: Read of private field #x from an object which did not contain the field
+ at X.eq (*%(basename)s:9:32)
+ at *%(basename)s:12:9 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-write.js b/deps/v8/test/message/fail/class-fields-private-throw-write.js
new file mode 100644
index 0000000000..93e9c135b9
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-write.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-private-fields
+
+class X {
+ #x;
+ setX(o, val) { o.#x = val; }
+}
+
+new X().setX({}, 1);
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-write.out b/deps/v8/test/message/fail/class-fields-private-throw-write.out
new file mode 100644
index 0000000000..e0a11d90bc
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-throw-write.out
@@ -0,0 +1,6 @@
+*%(basename)s:9: TypeError: Write of private field #x to an object which did not contain the field
+ setX(o, val) { o.#x = val; }
+ ^
+TypeError: Write of private field #x to an object which did not contain the field
+ at X.setX (*%(basename)s:9:23)
+ at *%(basename)s:12:9 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-throw.out b/deps/v8/test/message/fail/class-fields-throw.out
index f1036fde86..3c347a9529 100644
--- a/deps/v8/test/message/fail/class-fields-throw.out
+++ b/deps/v8/test/message/fail/class-fields-throw.out
@@ -2,6 +2,6 @@
x = foo();
^
ReferenceError: foo is not defined
- at X.<instance_fields_initializer> (*%(basename)s:8:7)
+ at X.<instance_members_initializer> (*%(basename)s:8:7)
at new X (*%(basename)s:7:1)
at *%(basename)s:11:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/destructuring-undefined-computed-property.out b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
index 27baf9a2d6..1dfb19eb69 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-computed-property.out
@@ -2,4 +2,4 @@
var { [x] : y } = undefined;
^
TypeError: Cannot destructure 'undefined' or 'null'.
- at *%(basename)s:5:19
+ at *%(basename)s:5:5
diff --git a/deps/v8/test/message/fail/destructuring-undefined-number-property.out b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
index 4d63351204..b23889566a 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-number-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-number-property.out
@@ -2,4 +2,4 @@
var { 1: x } = undefined;
^
TypeError: Cannot destructure 'undefined' or 'null'.
- at *%(basename)s:5:16
+ at *%(basename)s:5:5
diff --git a/deps/v8/test/message/fail/destructuring-undefined-string-property.out b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
index 1ea1c6b264..238aae974a 100644
--- a/deps/v8/test/message/fail/destructuring-undefined-string-property.out
+++ b/deps/v8/test/message/fail/destructuring-undefined-string-property.out
@@ -2,4 +2,4 @@
var { x } = undefined;
^
TypeError: Cannot destructure property `x` of 'undefined' or 'null'.
- at *%(basename)s:5:13
+ at *%(basename)s:5:5
diff --git a/deps/v8/test/message/fail/directive.js b/deps/v8/test/message/fail/directive.js
new file mode 100644
index 0000000000..217e81ad13
--- /dev/null
+++ b/deps/v8/test/message/fail/directive.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f([a]) {
+ "use strict";
+}
diff --git a/deps/v8/test/message/fail/directive.out b/deps/v8/test/message/fail/directive.out
new file mode 100644
index 0000000000..981045a8a7
--- /dev/null
+++ b/deps/v8/test/message/fail/directive.out
@@ -0,0 +1,4 @@
+*%(basename)s:6: SyntaxError: Illegal 'use strict' directive in function with non-simple parameter list
+ "use strict";
+ ^^^^^^^^^^^^
+SyntaxError: Illegal 'use strict' directive in function with non-simple parameter list
diff --git a/deps/v8/test/message/fail/get-iterator1.out b/deps/v8/test/message/fail/get-iterator1.out
index 5d01f1cc88..cd7a5a18fe 100644
--- a/deps/v8/test/message/fail/get-iterator1.out
+++ b/deps/v8/test/message/fail/get-iterator1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: TypeError: nonIterable(...) is not a function or its return value is not iterable
+*%(basename)s:7: TypeError: nonIterable is not a function or its return value is not iterable
[...nonIterable()];
^
-TypeError: nonIterable(...) is not a function or its return value is not iterable
+TypeError: nonIterable is not a function or its return value is not iterable
at *%(basename)s:7:5
diff --git a/deps/v8/test/message/fail/invalid-spread-2.out b/deps/v8/test/message/fail/invalid-spread-2.out
index 287390a74a..2a7f746c1a 100644
--- a/deps/v8/test/message/fail/invalid-spread-2.out
+++ b/deps/v8/test/message/fail/invalid-spread-2.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token ...
+*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
(x, ...y, z)
- ^^^
-SyntaxError: Unexpected token ...
+ ^
+SyntaxError: Rest parameter must be last formal parameter
diff --git a/deps/v8/test/message/fail/isvar.js b/deps/v8/test/message/fail/isvar.js
deleted file mode 100644
index fedf9d5f68..0000000000
--- a/deps/v8/test/message/fail/isvar.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-var x;
-%IS_VAR(x);
-%IS_VAR(x+x);
diff --git a/deps/v8/test/message/fail/isvar.out b/deps/v8/test/message/fail/isvar.out
deleted file mode 100644
index 6d5cca3345..0000000000
--- a/deps/v8/test/message/fail/isvar.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:31: SyntaxError: builtin %%IS_VAR: not a variable
-%%IS_VAR(x+x);
- ^
-SyntaxError: builtin %%IS_VAR: not a variable
diff --git a/deps/v8/test/message/fail/list-format-style-narrow.js b/deps/v8/test/message/fail/list-format-style-narrow.js
new file mode 100644
index 0000000000..f9af8ff4a7
--- /dev/null
+++ b/deps/v8/test/message/fail/list-format-style-narrow.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-intl-list-format
+
+new Intl.ListFormat("en", {style: 'narrow'})
diff --git a/deps/v8/test/message/fail/list-format-style-narrow.out b/deps/v8/test/message/fail/list-format-style-narrow.out
new file mode 100644
index 0000000000..b762f8d664
--- /dev/null
+++ b/deps/v8/test/message/fail/list-format-style-narrow.out
@@ -0,0 +1,8 @@
+*%(basename)s:7: RangeError: When style is 'narrow', 'unit' is the only allowed value for the type option.
+new Intl.ListFormat("en", {style: 'narrow'})
+^
+RangeError: When style is 'narrow', 'unit' is the only allowed value for the type option.
+ at new ListFormat (<anonymous>)
+ at *%(basename)s:7:1
+
+
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.js b/deps/v8/test/message/fail/modules-duplicate-export5.js
new file mode 100644
index 0000000000..82fbca1e01
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+// Flags: --harmony-namespace-exports
+
+export let foo = 42;
+export * as foo from "./doesnt-even-matter.js";
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.out b/deps/v8/test/message/fail/modules-duplicate-export5.out
new file mode 100644
index 0000000000..85cd76c68c
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: SyntaxError: Duplicate export of 'foo'
+export * as foo from "./doesnt-even-matter.js";
+ ^^^
+SyntaxError: Duplicate export of 'foo'
+
diff --git a/deps/v8/test/message/fail/modules-export-illformed-class.js b/deps/v8/test/message/fail/modules-export-illformed-class.js
new file mode 100644
index 0000000000..9a5fc05f63
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-export-illformed-class.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+export class foo {[]};
diff --git a/deps/v8/test/message/fail/modules-export-illformed-class.out b/deps/v8/test/message/fail/modules-export-illformed-class.out
new file mode 100644
index 0000000000..cf26e55134
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-export-illformed-class.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Unexpected token ]
+export class foo {[]};
+ ^
+SyntaxError: Unexpected token ]
+
diff --git a/deps/v8/test/message/fail/object-rest-assignment-pattern.out b/deps/v8/test/message/fail/object-rest-assignment-pattern.out
index 058bbc065c..ebc5a67b05 100644
--- a/deps/v8/test/message/fail/object-rest-assignment-pattern.out
+++ b/deps/v8/test/message/fail/object-rest-assignment-pattern.out
@@ -1,4 +1,4 @@
*%(basename)s:5: SyntaxError: `...` must be followed by an assignable reference in assignment contexts
({...{}} = {});
- ^
+ ^^
SyntaxError: `...` must be followed by an assignable reference in assignment contexts
diff --git a/deps/v8/test/message/fail/object-rest-binding-pattern.out b/deps/v8/test/message/fail/object-rest-binding-pattern.out
index 43fb7d5fd2..e690e2e2d8 100644
--- a/deps/v8/test/message/fail/object-rest-binding-pattern.out
+++ b/deps/v8/test/message/fail/object-rest-binding-pattern.out
@@ -1,4 +1,4 @@
*%(basename)s:5: SyntaxError: `...` must be followed by an identifier in declaration contexts
let {...{}} = {};
- ^
+ ^^
SyntaxError: `...` must be followed by an identifier in declaration contexts
diff --git a/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.js b/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.js
new file mode 100644
index 0000000000..7713849014
--- /dev/null
+++ b/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+({x}) => {
+ let x
+}
diff --git a/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.out b/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.out
new file mode 100644
index 0000000000..36eaba3def
--- /dev/null
+++ b/deps/v8/test/message/fail/param-arrow-redeclaration-as-let.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+ let x
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.js b/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.js
new file mode 100644
index 0000000000..efec34ce68
--- /dev/null
+++ b/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async ({x}) => {
+ let x
+}
diff --git a/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.out b/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.out
new file mode 100644
index 0000000000..36eaba3def
--- /dev/null
+++ b/deps/v8/test/message/fail/param-async-arrow-redeclaration-as-let.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+ let x
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.js b/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.js
new file mode 100644
index 0000000000..fe5b605b65
--- /dev/null
+++ b/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function f({x}) {
+ let x
+}
diff --git a/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.out b/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.out
new file mode 100644
index 0000000000..36eaba3def
--- /dev/null
+++ b/deps/v8/test/message/fail/param-async-function-redeclaration-as-let.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+ let x
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/fail/param-function-redeclaration-as-let.js b/deps/v8/test/message/fail/param-function-redeclaration-as-let.js
new file mode 100644
index 0000000000..11ae7cf6f9
--- /dev/null
+++ b/deps/v8/test/message/fail/param-function-redeclaration-as-let.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f({x}) {
+ let x
+}
diff --git a/deps/v8/test/message/fail/param-function-redeclaration-as-let.out b/deps/v8/test/message/fail/param-function-redeclaration-as-let.out
new file mode 100644
index 0000000000..36eaba3def
--- /dev/null
+++ b/deps/v8/test/message/fail/param-function-redeclaration-as-let.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+ let x
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.js b/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.js
new file mode 100644
index 0000000000..9215afa7cd
--- /dev/null
+++ b/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ try { }
+ catch ({x}) {
+ let x
+ }
+}
diff --git a/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.out b/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.out
new file mode 100644
index 0000000000..b30a5945fd
--- /dev/null
+++ b/deps/v8/test/message/fail/try-catch-lexical-conflict-preparser.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
+ let x
+ ^
+SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.js b/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.js
new file mode 100644
index 0000000000..9578dce74b
--- /dev/null
+++ b/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+-5**2
diff --git a/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.out b/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.out
new file mode 100644
index 0000000000..5f293d782f
--- /dev/null
+++ b/deps/v8/test/message/fail/unparenthesized-exponentiation-expression.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Unary operator used immediately before exponentiation expression. Parenthesis must be used to disambiguate operator precedence
+-5**2
+^^^^
+SyntaxError: Unary operator used immediately before exponentiation expression. Parenthesis must be used to disambiguate operator precedence
diff --git a/deps/v8/test/message/fail/weak-refs-makecell1.js b/deps/v8/test/message/fail/weak-refs-makecell1.js
new file mode 100644
index 0000000000..416fcca255
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-makecell1.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+let wf = new WeakFactory(() => {});
+wf.makeCell(1);
diff --git a/deps/v8/test/message/fail/weak-refs-makecell1.out b/deps/v8/test/message/fail/weak-refs-makecell1.out
new file mode 100644
index 0000000000..5c74c1f7fa
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-makecell1.out
@@ -0,0 +1,6 @@
+*%(basename)s:8: TypeError: WeakFactory.prototype.makeCell: target must be an object
+wf.makeCell(1);
+ ^
+TypeError: WeakFactory.prototype.makeCell: target must be an object
+ at WeakFactory.makeCell (<anonymous>)
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-makecell2.js b/deps/v8/test/message/fail/weak-refs-makecell2.js
new file mode 100644
index 0000000000..df0352554f
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-makecell2.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+let wf = new WeakFactory(() => {});
+let o = {};
+wf.makeCell(o, o);
diff --git a/deps/v8/test/message/fail/weak-refs-makecell2.out b/deps/v8/test/message/fail/weak-refs-makecell2.out
new file mode 100644
index 0000000000..2ea8033183
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-makecell2.out
@@ -0,0 +1,6 @@
+*%(basename)s:9: TypeError: WeakFactory.prototype.makeCell: target and holdings must not be same
+wf.makeCell(o, o);
+ ^
+TypeError: WeakFactory.prototype.makeCell: target and holdings must not be same
+ at WeakFactory.makeCell (<anonymous>)
+ at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory1.js b/deps/v8/test/message/fail/weak-refs-weakfactory1.js
new file mode 100644
index 0000000000..5359aee736
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-weakfactory1.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+let wf = new WeakFactory();
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory1.out b/deps/v8/test/message/fail/weak-refs-weakfactory1.out
new file mode 100644
index 0000000000..e865df3053
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-weakfactory1.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: WeakFactory: cleanup must be callable
+let wf = new WeakFactory();
+ ^
+TypeError: WeakFactory: cleanup must be callable
+ at new WeakFactory (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory2.js b/deps/v8/test/message/fail/weak-refs-weakfactory2.js
new file mode 100644
index 0000000000..fabb7f1e41
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-weakfactory2.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+let wf = new WeakFactory({});
diff --git a/deps/v8/test/message/fail/weak-refs-weakfactory2.out b/deps/v8/test/message/fail/weak-refs-weakfactory2.out
new file mode 100644
index 0000000000..7a6ee459b3
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-weakfactory2.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: WeakFactory: cleanup must be callable
+let wf = new WeakFactory({});
+ ^
+TypeError: WeakFactory: cleanup must be callable
+ at new WeakFactory (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index d106f51e27..cc604a5a3b 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -49,4 +49,18 @@
'*': [SKIP],
}], # variant == code_serializer
+['no_i18n == True', {
+ 'fail/list-format*': [SKIP],
+}], # no_i18n == True
+
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'fail/wasm-*': [SKIP],
+ 'wasm-*': [SKIP],
+
+ # Test output requires --validate-asm, which is disabled in jitless mode.
+ 'asm-*': [SKIP],
+}], # lite_mode
+
]
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects-with-throw-empty.out b/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects-with-throw-empty.out
index 605c3712aa..39be5b61fe 100644
--- a/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects-with-throw-empty.out
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects-with-throw-empty.out
@@ -7,4 +7,4 @@ Error
Error: Reject with callback
at concatenateErrors (test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER})
- at _ (test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER})
+ at test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects.out b/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects.out
index 87bbc0311d..edec4c86e4 100644
--- a/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects.out
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-rejects.out
@@ -6,7 +6,7 @@ Error
at *%(basename)s:9:1
Error: Error in reject handler
- at _ *%(basename)s:11:17)
- at promise.then.result (test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER})
+ at *%(basename)s:11:17
+ at test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-resolves.out b/deps/v8/test/message/mjsunit/fail/assert-promise-result-resolves.out
index 2cd2a1cf7c..6aba852b07 100644
--- a/deps/v8/test/message/mjsunit/fail/assert-promise-result-resolves.out
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-resolves.out
@@ -6,8 +6,8 @@ Error
at *%(basename)s:9:1
Error: Error in resolve handler
- at _ *%(basename)s:10:39)
- at promise.then.result (test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER})
+ at *%(basename)s:10:39
+ at test/mjsunit/mjsunit.js:{NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/regress/fail/regress-8409.js b/deps/v8/test/message/regress/fail/regress-8409.js
new file mode 100644
index 0000000000..d6db7cbf1c
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-8409.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[().x] = 1
diff --git a/deps/v8/test/message/regress/fail/regress-8409.out b/deps/v8/test/message/regress/fail/regress-8409.out
new file mode 100644
index 0000000000..3ca7690366
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-8409.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected token )
+[().x] = 1
+ ^
+SyntaxError: Unexpected token )
diff --git a/deps/v8/test/message/regress/fail/regress-900383.js b/deps/v8/test/message/regress/fail/regress-900383.js
new file mode 100644
index 0000000000..b35bc32bb1
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-900383.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export default x = 1;
+export default x = 1;
diff --git a/deps/v8/test/message/regress/fail/regress-900383.out b/deps/v8/test/message/regress/fail/regress-900383.out
new file mode 100644
index 0000000000..490ca03a80
--- /dev/null
+++ b/deps/v8/test/message/regress/fail/regress-900383.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Identifier '*default*' has already been declared
+export default x = 1;
+ ^
+SyntaxError: Identifier '*default*' has already been declared
diff --git a/deps/v8/test/message/wasm-function-name-async.out b/deps/v8/test/message/wasm-function-name-async.out
index b8af10f3a7..4627c7fcf3 100644
--- a/deps/v8/test/message/wasm-function-name-async.out
+++ b/deps/v8/test/message/wasm-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at main (wasm-function[0]:1)
- at pair (*%(basename)s:16:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at *%(basename)s:16:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-function-name-streaming.out b/deps/v8/test/message/wasm-function-name-streaming.out
index 545abb1343..2e33b0808b 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at main (wasm-function[0]:1)
- at pair (test/message/wasm-function-name-async.js:16:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at test/message/wasm-function-name-async.js:16:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.out b/deps/v8/test/message/wasm-module-and-function-name-async.out
index a88baa9aec..d1d62dd5be 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module.main (wasm-function[0]:1)
- at pair (*%(basename)s:17:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at *%(basename)s:17:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.out b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
index 5dcff0ce9f..fc7360383a 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module.main (wasm-function[0]:1)
- at pair (test/message/wasm-module-and-function-name-async.js:17:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at test/message/wasm-module-and-function-name-async.js:17:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-async.out b/deps/v8/test/message/wasm-module-name-async.out
index d1c95136f3..6301dba480 100644
--- a/deps/v8/test/message/wasm-module-name-async.out
+++ b/deps/v8/test/message/wasm-module-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module (wasm-function[0]:1)
- at pair (*%(basename)s:19:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at *%(basename)s:19:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-streaming.out b/deps/v8/test/message/wasm-module-name-streaming.out
index 11ef814896..e16c7ad373 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at test-module (wasm-function[0]:1)
- at pair (test/message/wasm-module-name-async.js:19:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at test/message/wasm-module-name-async.js:19:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-async.out b/deps/v8/test/message/wasm-no-name-async.out
index 9eedba5885..0a299aaaed 100644
--- a/deps/v8/test/message/wasm-no-name-async.out
+++ b/deps/v8/test/message/wasm-no-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at wasm-function[0]:1
- at pair (*%(basename)s:18:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at *%(basename)s:18:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-streaming.out b/deps/v8/test/message/wasm-no-name-streaming.out
index cf6ab3d4a5..e4bcb2f48e 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.out
+++ b/deps/v8/test/message/wasm-no-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
at wasm-function[0]:1
- at pair (test/message/wasm-no-name-async.js:18:27)
- at promise.then.result (test/mjsunit/mjsunit.js:*)
+ at test/message/wasm-no-name-async.js:18:27
+ at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/mjsunit/BUILD.gn b/deps/v8/test/mjsunit/BUILD.gn
index 7823d8f3aa..847565f91a 100644
--- a/deps/v8/test/mjsunit/BUILD.gn
+++ b/deps/v8/test/mjsunit/BUILD.gn
@@ -23,8 +23,5 @@ group("v8_mjsunit") {
"../../tools/splaytree.js",
"../../tools/tickprocessor.js",
"../../tools/dumpcpp.js",
- "../wasm-js/test/harness/wasm-constants.js",
- "../wasm-js/test/harness/wasm-module-builder.js",
- "../wasm-js/test/js-api/jsapi.js",
]
}
diff --git a/deps/v8/test/mjsunit/array-from-large-set.js b/deps/v8/test/mjsunit/array-from-large-set.js
new file mode 100644
index 0000000000..cff3b5a774
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-from-large-set.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const n = 130000;
+
+{
+ let x = new Set();
+ for (let i = 0; i < n; ++i) x.add(i);
+ let a = Array.from(x);
+}
diff --git a/deps/v8/test/mjsunit/array-functions-prototype-misc.js b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
index dd95e2d266..0aff0a2304 100644
--- a/deps/v8/test/mjsunit/array-functions-prototype-misc.js
+++ b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
@@ -170,20 +170,6 @@ for (var use_real_arrays = 0; use_real_arrays <= 1; use_real_arrays++) {
assertEquals("concat", join);
join = ba.join('');
assertEquals("catcon", join);
-
- var sparse = [];
- sparse[pos + 1000] = 'is ';
- sparse[pos + 271828] = 'time ';
- sparse[pos + 31415] = 'the ';
- sparse[pos + 012260199] = 'all ';
- sparse[-1] = 'foo';
- sparse[pos + 22591927] = 'good ';
- sparse[pos + 1618033] = 'for ';
- sparse[pos + 91] = ': Now ';
- sparse[pos + 86720199] = 'men.';
- sparse.hest = 'fisk';
-
- assertEquals("baz: Now is the time for all good men.", sparse.join(''));
}
a = new_function(pos);
diff --git a/deps/v8/test/mjsunit/array-indexing-receiver.js b/deps/v8/test/mjsunit/array-indexing-receiver.js
index ea8d02b218..4c47a39216 100644
--- a/deps/v8/test/mjsunit/array-indexing-receiver.js
+++ b/deps/v8/test/mjsunit/array-indexing-receiver.js
@@ -356,7 +356,7 @@ var kTests = {
Detached_Int8Array() {
var array = new Int8Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -391,7 +391,7 @@ var kTests = {
Detached_Uint8Array() {
var array = new Uint8Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -421,7 +421,7 @@ var kTests = {
Detached_Uint8ClampedArray() {
var array = new Uint8ClampedArray(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -453,7 +453,7 @@ var kTests = {
Detached_Int16Array() {
var array = new Int16Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -485,7 +485,7 @@ var kTests = {
Detached_Uint16Array() {
var array = new Uint16Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -517,7 +517,7 @@ var kTests = {
Detached_Int32Array() {
var array = new Int32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -550,7 +550,7 @@ var kTests = {
Detached_Uint32Array() {
var array = new Uint32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -583,7 +583,7 @@ var kTests = {
Detached_Float32Array() {
var array = new Float32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
@@ -616,7 +616,7 @@ var kTests = {
Detached_Float64Array() {
var array = new Float32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertEquals(Array.prototype.indexOf.call(array, 0), -1);
assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
},
diff --git a/deps/v8/test/mjsunit/array-join-element-tostring-prototype-side-effects.js b/deps/v8/test/mjsunit/array-join-element-tostring-prototype-side-effects.js
new file mode 100644
index 0000000000..a5ddebb9b9
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-element-tostring-prototype-side-effects.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function ArrayPrototypeChanged() {
+ const el = {
+ toString() {
+ Array.prototype[1] = '2';
+ return '1';
+ }
+ };
+ const a = [el, ,3];
+ assertSame("123", a.join(''));
+})();
diff --git a/deps/v8/test/mjsunit/array-join-element-tostring-side-effects.js b/deps/v8/test/mjsunit/array-join-element-tostring-side-effects.js
new file mode 100644
index 0000000000..1da72a7970
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-element-tostring-side-effects.js
@@ -0,0 +1,152 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const MIN_DICTIONARY_INDEX = 8192;
+
+function ArrayTests() {
+ (function ToStringThrows() {
+ function TestError() {}
+
+ let callCount = 0;
+ const toStringThrows = {
+ toString() {
+ callCount++;
+ throw new TestError;
+ }
+ };
+ const a = [toStringThrows];
+ assertThrows(() => a.join(), TestError);
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after thrown error.
+ a[0] = 1;
+ a[1] = 2;
+ assertSame('1,2', a.join());
+ })();
+
+ (function ArrayLengthIncreased() {
+ let callCount = 0;
+ const a = [
+ {
+ toString() {
+ callCount++;
+ a.push(2);
+ return '1';
+ }
+ }
+ ];
+ assertSame('1', a.join());
+ assertSame(1, callCount);
+ assertSame('1,2', a.join());
+ })();
+
+ (function ArrayLengthDecreased() {
+ let callCount = 0;
+ const a = [
+ {
+ toString() {
+ callCount++;
+ a.pop();
+ return '1';
+ }
+ },
+ '2'
+ ];
+ assertSame('1,', a.join());
+ assertSame(1, callCount);
+ assertSame('1', a.join());
+ })();
+
+ (function ElementsKindChangedToHoley() {
+ let callCount = 0;
+ const a = [
+ {
+ toString() {
+ callCount++;
+ a.length = 4;
+ a[1] = 777;
+ a[2] = 7.7;
+ return '1';
+ }
+ },
+ 2,
+ 3
+ ];
+ assertSame('1,777,7.7', a.join());
+ assertSame(1, callCount);
+ assertSame('1,777,7.7,', a.join());
+ })();
+
+ (function ElementsKindChangedToHoleyThroughDeletion() {
+ let callCount = 0;
+ const a = [
+ {
+ toString() {
+ callCount++;
+ delete a[1];
+ a[2] = 7.7;
+ return '1';
+ }
+ },
+ 2,
+ 3
+ ];
+ assertSame('1,,7.7', a.join());
+ assertSame(1, callCount);
+ assertSame('1,,7.7', a.join());
+ })();
+
+ (function NumberDictionaryChanged() {
+ let callCount = 0;
+ const a = [];
+ a[MIN_DICTIONARY_INDEX - 1] = {
+ toString() {
+ callCount++;
+ a[MIN_DICTIONARY_INDEX] = '2';
+ return '1';
+ }
+ };
+ a[MIN_DICTIONARY_INDEX] = 'NOPE';
+ assertTrue(%HasDictionaryElements(a));
+ assertSame('12', a.join(''));
+ assertSame(1, callCount);
+ assertSame('12', a.join(''));
+ })();
+
+ (function NumberDictionaryLengthChange() {
+ let callCount = 0;
+ const a = [];
+ a[MIN_DICTIONARY_INDEX - 1] = {
+ toString() {
+ callCount++;
+ a.length = MIN_DICTIONARY_INDEX;
+ return '1';
+ }
+ };
+ a[MIN_DICTIONARY_INDEX] = '2';
+ assertTrue(%HasDictionaryElements(a));
+ assertSame('1', a.join(''));
+ assertSame(1, callCount);
+ assertSame('1', a.join(''));
+ })();
+}
+
+(function NonArrayCycleDetection() {
+ const a = {
+ length: 3,
+ toString() { return Array.prototype.join.call(this); }
+ };
+ a[0] = '1';
+ a[1] = a;
+ a[2] = '3';
+ assertSame("1,,3", Array.prototype.join.call(a));
+});
+
+ArrayTests();
+
+%SetForceSlowPath(true);
+
+ArrayTests();
diff --git a/deps/v8/test/mjsunit/array-join-index-getter-side-effects.js b/deps/v8/test/mjsunit/array-join-index-getter-side-effects.js
new file mode 100644
index 0000000000..4b843464df
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-index-getter-side-effects.js
@@ -0,0 +1,108 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function Throws() {
+ function TestError() {}
+
+ let callCount = 0;
+ const a = [0, 1];
+ Object.defineProperty(a, '0', {
+ configurable: true,
+ get() {
+ callCount++;
+ throw new TestError();
+ }
+ });
+ assertTrue(%HasDictionaryElements(a));
+ assertThrows(() => a.join(), TestError);
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after thrown error.
+ Object.defineProperty(a, '0', {
+ configurable: true,
+ get() {
+ callCount++;
+ return 777;
+ }
+ });
+ assertSame('777,1', a.join());
+ assertSame(2, callCount);
+})();
+
+(function ArrayLengthIncreased() {
+ let callCount = 0;
+ const a = [1];
+ Object.defineProperty(a, '0', {
+ configurable: true,
+ get() {
+ callCount++;
+ a.push(2);
+ return 9;
+ }
+ });
+ assertSame('9', a.join());
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after continuation.
+ assertSame('9,2', a.join());
+ assertSame(2, callCount);
+})();
+
+(function ArrayLengthIncreasedWithHole() {
+ let callCount = 0;
+ const a = [1, , 2];
+ Object.defineProperty(a, '1', {
+ configurable: true,
+ get() {
+ callCount++;
+ a.push(3);
+ }
+ });
+ assertSame('1,,2', a.join());
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after continuation.
+ assertSame('1,,2,3', a.join());
+ assertSame(2, callCount);
+})();
+
+(function ArrayLengthDecreased() {
+ let callCount = 0;
+ const a = [0, 1];
+ Object.defineProperty(a, '0', {
+ configurable: true,
+ get() {
+ callCount++;
+ a.length = 1;
+ return 9;
+ }
+ });
+ assertSame('9,', a.join());
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after continuation.
+ assertSame('9', a.join());
+ assertSame(2, callCount);
+})();
+
+(function ElementsKindChangedToHoley() {
+ let callCount = 0;
+ const a = [0, 1];
+ Object.defineProperty(a, '0', {
+ configurable: true,
+ get() {
+ callCount++;
+ a.length = 3;
+ return 9;
+ }
+ });
+ assertSame('9,1', a.join());
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after continuation.
+ assertSame('9,1,', a.join());
+ assertSame(2, callCount);
+})();
diff --git a/deps/v8/test/mjsunit/array-join-invalid-string-length.js b/deps/v8/test/mjsunit/array-join-invalid-string-length.js
new file mode 100644
index 0000000000..cab8f00bd4
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-invalid-string-length.js
@@ -0,0 +1,63 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function DictionaryStringRepeatFastPath() {
+ const a = new Array(%StringMaxLength());
+ assertTrue(%HasDictionaryElements(a));
+ const sep = '12';
+ assertThrows(() => a.join(sep), RangeError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ assertThrows(() => a.join(sep), RangeError);
+
+ a.length = 3;
+ a[0] = 'a';
+ a[1] = 'b';
+ a[2] = 'c';
+ assertSame('a,b,c', a.join());
+})();
+
+(function SeparatorOverflow() {
+ const a = ['a',,,,,'b'];
+
+ const sep = ','.repeat(%StringMaxLength());
+ assertThrows(() => a.join(sep), RangeError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ assertThrows(() => a.join(sep), RangeError);
+ assertSame('a,,,,,b', a.join());
+})();
+
+(function ElementOverflow() {
+ const el = ','.repeat(%StringMaxLength());
+ const a = [el, el, el, el, el];
+
+ assertThrows(() => a.join(), RangeError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ assertThrows(() => a.join(), RangeError);
+ a[0] = 'a';
+ a[1] = 'b';
+ a[2] = 'c';
+ a[3] = 'd';
+ a[4] = 'e';
+ assertSame('a,b,c,d,e', a.join());
+})();
+
+(function ElementSeparatorOverflow() {
+ const el = ','.repeat(%StringMaxLength());
+ const a = [el, el, el, el];
+
+ assertThrows(() => a.join(el), RangeError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ assertThrows(() => a.join(el), RangeError);
+ a[0] = 'a';
+ a[1] = 'b';
+ a[2] = 'c';
+ a[3] = 'd';
+ assertSame('a,b,c,d', a.join());
+})();
diff --git a/deps/v8/test/mjsunit/array-join-nesting.js b/deps/v8/test/mjsunit/array-join-nesting.js
new file mode 100644
index 0000000000..d1e75fb512
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-nesting.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const DEPTH = 128;
+
+function makeNestedArray(depth, value) {
+ return depth > 0 ? [value, makeNestedArray(depth - 1, value)] : [value];
+}
+
+const array = makeNestedArray(DEPTH, 'a');
+const expected = 'a' + ',a'.repeat(DEPTH);
+assertSame(expected, array.join());
+
+// Verify cycle detection is still working.
+assertSame(expected, array.join());
diff --git a/deps/v8/test/mjsunit/array-join-nonarray-length-getter-side-effects.js b/deps/v8/test/mjsunit/array-join-nonarray-length-getter-side-effects.js
new file mode 100644
index 0000000000..3f8d2aa029
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-nonarray-length-getter-side-effects.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function Throws() {
+ function TestError() {}
+
+ let callCount = 0;
+ const a = {
+ 0: 1,
+ 1: 2,
+ get length() {
+ callCount++;
+ throw new TestError();
+ }
+ };
+ assertThrows(() => Array.prototype.join.call(a), TestError);
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after thrown error.
+ Object.defineProperty(a, 'length', {
+ get() {
+ callCount++;
+ return 2;
+ }
+ });
+ assertSame('1,2', Array.prototype.join.call(a));
+ assertSame(2, callCount);
+})();
diff --git a/deps/v8/test/mjsunit/array-join-separator-tostring-side-effects.js b/deps/v8/test/mjsunit/array-join-separator-tostring-side-effects.js
new file mode 100644
index 0000000000..d9e85152ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-join-separator-tostring-side-effects.js
@@ -0,0 +1,197 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const MIN_DICTIONARY_INDEX = 8192;
+
+(function ToStringThrows() {
+ function TestError() {}
+
+ let callCount = 0;
+ const a = [1, 2];
+ assertThrows(() => a.join({
+ toString() {
+ callCount++;
+ throw new TestError;
+ }
+ }), TestError);
+ assertSame(1, callCount);
+
+ // Verifies cycle detection still works properly after thrown error.
+ assertSame('1,2', a.join());
+})();
+
+(function RecursiveJoinCall() {
+ const a = [1,2,3];
+ let callCount = 0;
+ const sep = {
+ toString() {
+ callCount++;
+ return a.join('-');
+ }
+ };
+ assertSame('11-2-321-2-33', a.join(sep));
+ assertSame(1, callCount);
+
+ // Verify cycle detection works properly after nested call
+ assertSame('1,2,3', a.join());
+})();
+
+
+(function ArrayLengthIncreased() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertSame('1,2,3', a.join({
+ toString() {
+ callCount++;
+ a.push(4);
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('1,2,3,4', a.join());
+})();
+
+(function ArrayLengthDecreased() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertSame('1,2,', a.join({
+ toString() {
+ callCount++;
+ a.pop();
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('1,2', a.join());
+})();
+
+(function ArrayEmptied() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertSame(',,', a.join({
+ toString() {
+ callCount++;
+ a.length = 0;
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+})();
+
+(function NumberDictionaryEmptied() {
+ const a = [];
+ a[0] = 1;
+ a[MIN_DICTIONARY_INDEX] = 2;
+ assertTrue(%HasDictionaryElements(a));
+
+ let callCount = 0;
+ assertSame('-'.repeat(MIN_DICTIONARY_INDEX), a.join({
+ toString() {
+ callCount++;
+ a.length = 0;
+ return '-';
+ }
+ }));
+ assertSame(1, callCount);
+})();
+
+(function NumberDictionaryEmptiedEmptySeparator() {
+ const a = [];
+ a[0] = 1;
+ a[MIN_DICTIONARY_INDEX] = 2;
+ assertTrue(%HasDictionaryElements(a));
+
+ let callCount = 0;
+ assertSame(''.repeat(MIN_DICTIONARY_INDEX), a.join({
+ toString() {
+ callCount++;
+ a.length = 0;
+ return '';
+ }
+ }));
+ assertSame(1, callCount);
+})();
+
+(function ElementsKindSmiToDoubles() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertTrue(%HasSmiElements(a));
+ assertSame('1.5,2,3', a.join({
+ toString() {
+ callCount++;
+ a[0] = 1.5;
+ assertTrue(%HasDoubleElements(a));
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('1.5,2,3', a.join());
+})();
+
+(function ElementsKindDoublesToObjects() {
+ const a = [1.5, 2.5, 3.5];
+ let callCount = 0;
+ assertTrue(%HasDoubleElements(a));
+ assertSame('one,2.5,3.5', a.join({
+ toString() {
+ callCount++;
+ a[0] = 'one';
+ assertTrue(%HasObjectElements(a));
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('one,2.5,3.5', a.join());
+})();
+
+(function ArrayIsNoLongerFast() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertSame('666,2,3', a.join({
+ toString() {
+ callCount++;
+ Object.defineProperty(a, '0', {
+ get(){ return 666; }
+ });
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('666,2,3', a.join());
+})();
+
+(function ArrayPrototypeUnset() {
+ const a = [1,2];
+ a.length = 3;
+ let callCount = 0;
+ assertSame('1,2,4', a.join({
+ toString() {
+ callCount++;
+ a.__proto__ = { '2': 4 };
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ a.__proto__ = Array.prototype;
+ assertSame('1,2,', a.join());
+})();
+
+(function ArrayPrototypeIsNoLongerFast() {
+ const a = [1,2,3];
+ let callCount = 0;
+ assertSame('1,2,777', a.join({
+ toString() {
+ callCount++;
+ a.pop();
+ Object.defineProperty(Array.prototype, '2', {
+ get(){ return 777; }
+ });
+ return ',';
+ }
+ }));
+ assertSame(1, callCount);
+ assertSame('1,2', a.join());
+})();
diff --git a/deps/v8/test/mjsunit/array-join.js b/deps/v8/test/mjsunit/array-join.js
index 0c949e769a..1d5ab30ebc 100644
--- a/deps/v8/test/mjsunit/array-join.js
+++ b/deps/v8/test/mjsunit/array-join.js
@@ -25,6 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+assertSame(',', [null, undefined].join());
+assertSame('1.5,2.5', [1.5, 2.5].join());
+assertSame(',1.5,', [,1.5,,].join());
+
+var obj = {
+ toString() {
+ return 'a';
+ }
+};
+assertSame('1,1.5,a,', [1, 1.5, obj, ,].join());
+
// Test that array join calls toString on subarrays.
var a = [[1,2],3,4,[5,6]];
assertEquals('1,2345,6', a.join(''));
@@ -82,9 +93,6 @@ assertEquals(246244, a.join("oo").length);
a = new Array(Math.pow(2,32) - 1); // Max length.
assertEquals("", a.join(""));
-a[123123123] = "o";
-a[1255215215] = "p";
-assertEquals("op", a.join(""));
a = new Array(100001);
for (var i = 0; i < a.length; i++) a[i] = undefined;
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index 8ab2148b91..aa3bea49d0 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -112,15 +112,19 @@ function array_natives_test() {
assertEquals([2], a2.slice(1,2));
a2 = [1.1,2,3];
assertTrue(%HasDoubleElements(a2.slice()));
- assertTrue(%HasDoubleElements(a2.slice(1)));
- assertTrue(%HasDoubleElements(a2.slice(1, 2)));
+ assertTrue(%HasDoubleElements(a2.slice(1)) ||
+ %HasSmiElements(a2.slice(1)));
+ assertTrue(%HasDoubleElements(a2.slice(1, 2)) ||
+ %HasSmiElements(a2.slice(1, 2)));
assertEquals([1.1,2,3], a2.slice());
assertEquals([2,3], a2.slice(1));
assertEquals([2], a2.slice(1,2));
a2 = [{},2,3];
assertTrue(%HasObjectElements(a2.slice()));
- assertTrue(%HasObjectElements(a2.slice(1)));
- assertTrue(%HasObjectElements(a2.slice(1, 2)));
+ assertTrue(%HasObjectElements(a2.slice(1)) ||
+ %HasSmiElements(a2.slice(1)));
+ assertTrue(%HasObjectElements(a2.slice(1, 2)) ||
+ %HasSmiElements(a2.slice(1, 2)));
assertEquals([{},2,3], a2.slice());
assertEquals([2,3], a2.slice(1));
assertEquals([2], a2.slice(1,2));
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index 8c2c6fca63..ca0daadf04 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -567,6 +567,27 @@ function TestPrototypeHoles() {
}
TestPrototypeHoles();
+// The following test ensures that elements on the prototype are also copied
+// for JSArrays and not only JSObjects.
+function TestArrayPrototypeHasElements() {
+ let array = [1, 2, 3, 4, 5];
+ for (let i = 0; i < array.length; i++) {
+ delete array[i];
+ Object.prototype[i] = 42;
+ }
+
+ let comparator_called = false;
+ array.sort(function (a, b) {
+ if (a === 42 || b === 42) {
+ comparator_called = true;
+ }
+ return a - b;
+ });
+
+ assertTrue(comparator_called);
+}
+TestArrayPrototypeHasElements();
+
// The following Tests make sure that there is no crash when the element kind
// or the array length changes. Since comparison functions like this are not
// consistent, we do not have to make sure that the array is actually sorted
diff --git a/deps/v8/test/mjsunit/array-tolocalestring.js b/deps/v8/test/mjsunit/array-tolocalestring.js
new file mode 100644
index 0000000000..a5f856907a
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-tolocalestring.js
@@ -0,0 +1,72 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function CycleDetection() {
+ const arr = [
+ {
+ toLocaleString() {
+ return [1, arr];
+ }
+ }
+ ];
+ assertSame('1,', arr.toLocaleString());
+ assertSame('1,', arr.toLocaleString());
+})();
+
+(function ThrowsError(){
+ function TestError() {}
+ const arr = [];
+ const obj = {
+ toLocaleString(){
+ throw new TestError();
+ }
+ };
+ arr[0] = obj;
+ assertThrows(() => arr.toLocaleString(), TestError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ arr[0] = {
+ toLocaleString() {
+ return 1;
+ }
+ };
+ assertSame('1', arr.toLocaleString());
+})();
+
+(function AccessThrowsError(){
+ function TestError() {}
+ const arr = [];
+ const obj = {
+ get toLocaleString(){
+ throw new TestError();
+ }
+ };
+ arr[0] = obj;
+ assertThrows(() => arr.toLocaleString(), TestError);
+
+ // Verifies cycle detection still works properly after thrown error.
+ arr[0] = {
+ toLocaleString() {
+ return 1;
+ }
+ };
+ assertSame('1', arr.toLocaleString());
+})();
+
+(function NotCallable(){
+ const arr = [];
+ const obj = {
+ toLocaleString: 7
+ }
+ arr[0] = obj;
+ assertThrows(() => arr.toLocaleString(), TypeError, '7 is not a function');
+
+ // Verifies cycle detection still works properly after thrown error.
+ arr[0] = {
+ toLocaleString() {
+ return 1;
+ }
+ };
+ assertSame('1', arr.toLocaleString());
+})();
diff --git a/deps/v8/test/mjsunit/arrow-with.js b/deps/v8/test/mjsunit/arrow-with.js
new file mode 100644
index 0000000000..a1f97afa38
--- /dev/null
+++ b/deps/v8/test/mjsunit/arrow-with.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() { with ({}) { return (()=>this)() } }
+var o = {}
+assertEquals(o, f.call(o))
diff --git a/deps/v8/test/mjsunit/asm/regress-913822.js b/deps/v8/test/mjsunit/asm/regress-913822.js
new file mode 100644
index 0000000000..d6ee74637a
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-913822.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestNewlineInCPPComment() {
+ function Module() {
+ "use asm" // Crash by comment!
+ function f() {}
+ return f
+ }
+ Module();
+ assertTrue(%IsAsmWasmCode(Module));
+})();
+
+(function TestNewlineInCComment() {
+ function Module() {
+ "use asm" /* Crash by
+ comment! */ function f() {}
+ return f
+ }
+ Module();
+ assertTrue(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/asm/regress-920076.js b/deps/v8/test/mjsunit/asm/regress-920076.js
new file mode 100644
index 0000000000..754b931cb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-920076.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module() {
+ "use asm";
+ function f() {}
+ return f
+}
+eval("(" + Module.toString().replace(/;/, String.fromCharCode(8233)) + ")();");
+assertFalse(%IsAsmWasmCode(Module)); // Valid asm.js, but we reject Unicode.
diff --git a/deps/v8/test/mjsunit/async-hooks/async-await-tree.js b/deps/v8/test/mjsunit/async-hooks/async-await-tree.js
index 955355cf31..230c40e4e0 100644
--- a/deps/v8/test/mjsunit/async-hooks/async-await-tree.js
+++ b/deps/v8/test/mjsunit/async-hooks/async-await-tree.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-async-hooks
+// Flags: --expose-async-hooks --harmony-await-optimization
// Check for async/await asyncIds relation
(function() {
@@ -41,8 +41,9 @@
});
ah.enable();
- // Simplified version of Node.js util.promisify(setTimeout)
- function sleep(callback, timeout) {
+ // Simplified version of Node.js util.promisify(setTimeout),
+ // but d8 ignores the timeout of setTimeout.
+ function sleep0() {
const promise = new Promise(function(resolve, reject) {
try {
setTimeout((err, ...values) => {
@@ -51,7 +52,7 @@
} else {
resolve(values[0]);
}
- }, timeout);
+ }, 0);
} catch (err) {
reject(err);
}
@@ -60,15 +61,14 @@
}
async function foo() {
- await sleep(10);
+ await sleep0();
}
- foo().then(function() {
- assertEquals(asyncIds.length, 6);
- assertEquals(triggerIds.length, 6);
- assertEquals(triggerIds[2], asyncIds[0]);
- assertEquals(triggerIds[3], asyncIds[2]);
- assertEquals(triggerIds[4], asyncIds[0]);
- assertEquals(triggerIds[5], asyncIds[1]);
- });
+ assertPromiseResult(
+ foo().then(function() {
+ assertEquals(triggerIds[2], asyncIds[1]);
+ assertEquals(triggerIds[3], asyncIds[0]);
+ assertEquals(triggerIds[4], asyncIds[3]);
+ assertEquals(triggerIds[6], asyncIds[5]);
+ }));
})();
diff --git a/deps/v8/test/mjsunit/async-hooks/chained-promises.js b/deps/v8/test/mjsunit/async-hooks/chained-promises.js
index 8b346530eb..a2746dcf0d 100644
--- a/deps/v8/test/mjsunit/async-hooks/chained-promises.js
+++ b/deps/v8/test/mjsunit/async-hooks/chained-promises.js
@@ -40,9 +40,13 @@
let createdPromise = new Promise(function(resolve) {
resolve(42);
}).then(function() {
- assertEquals(asyncIds.length, 2, 'Exactly 2 promises should be inited');
- assertEquals(triggerIds.length, 2, 'Exactly 2 promises should be inited');
+ assertEquals(3, asyncIds.length, 'Exactly 3 promises should be inited');
+ assertEquals(3, triggerIds.length, 'Exactly 3 promises should be inited');
assertEquals(triggerIds[1], asyncIds[0],
"Parent promise asyncId doesn't correspond to child triggerAsyncId");
+ }).catch((err) => {
+ setTimeout(() => {
+ throw err;
+ }, 0);
});
})();
diff --git a/deps/v8/test/mjsunit/async-hooks/execution-order.js b/deps/v8/test/mjsunit/async-hooks/execution-order.js
index f63ecf0032..7eb055e708 100644
--- a/deps/v8/test/mjsunit/async-hooks/execution-order.js
+++ b/deps/v8/test/mjsunit/async-hooks/execution-order.js
@@ -29,27 +29,27 @@
// Check for correct execution of available hooks and asyncIds
(function() {
- let inited = false, resolved = false, before = false, after = false;
- let storedAsyncId;
+ let calledHooks = [];
+ let rootAsyncId = 0;
+
let ah = async_hooks.createHook({
- init(asyncId, type, triggerAsyncId, resource) {
+ init: function init(asyncId, type, triggerAsyncId, resource) {
if (type !== 'PROMISE') {
return;
}
- inited = true;
- storedAsyncId = asyncId;
+ if (triggerAsyncId === 0) {
+ rootAsyncId = asyncId;
+ }
+ calledHooks.push(['init', asyncId]);
},
- promiseResolve(asyncId) {
- assertEquals(asyncId, storedAsyncId, 'AsyncId mismatch in resolve hook');
- resolved = true;
+ promiseResolve: function promiseResolve(asyncId) {
+ calledHooks.push(['resolve', asyncId]);
},
- before(asyncId) {
- assertEquals(asyncId, storedAsyncId, 'AsyncId mismatch in before hook');
- before = true;
+ before: function before(asyncId) {
+ calledHooks.push(['before', asyncId]);
},
- after(asyncId) {
- assertEquals(asyncId, storedAsyncId, 'AsyncId mismatch in after hook');
- after = true;
+ after: function after(asyncId) {
+ calledHooks.push(['after', asyncId]);
},
});
ah.enable();
@@ -57,9 +57,21 @@
new Promise(function(resolve) {
resolve(42);
}).then(function() {
- assertTrue(inited, "Didn't call init hook");
- assertTrue(resolved, "Didn't call resolve hook");
- assertTrue(before, "Didn't call before hook before the callback");
- assertFalse(after, "Called after hook before the callback");
+ // [hook type, async Id]
+ const expectedHooks = [
+ ['init', rootAsyncId], // the promise that we create initially
+ ['resolve', rootAsyncId],
+ ['init', rootAsyncId + 1], // the chained promise with the assertions
+ ['init', rootAsyncId + 2], // the chained promise from the catch block
+ ['before', rootAsyncId + 1],
+ // ['after', rootAsyncId + 1] will get called after the assertions
+ ];
+
+ assertArrayEquals(expectedHooks, calledHooks,
+ 'Mismatch in async hooks execution order');
+ }).catch((err) => {
+ setTimeout(() => {
+ throw err;
+ }, 0);
});
})();
diff --git a/deps/v8/test/mjsunit/async-hooks/promises-async-await.js b/deps/v8/test/mjsunit/async-hooks/promises-async-await.js
index 2eba6ba6c5..27148e7cd8 100644
--- a/deps/v8/test/mjsunit/async-hooks/promises-async-await.js
+++ b/deps/v8/test/mjsunit/async-hooks/promises-async-await.js
@@ -56,6 +56,10 @@
}).then(() => {
assertNotEquals(outerExecutionAsyncId, async_hooks.executionAsyncId());
assertNotEquals(outerTriggerAsyncId, async_hooks.triggerAsyncId());
+ }).catch((err) => {
+ setTimeout(() => {
+ throw err;
+ }, 0);
});
});
diff --git a/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-4.js b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-4.js
new file mode 100644
index 0000000000..a4484cdc89
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-prepare-stacktrace-4.js
@@ -0,0 +1,39 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces
+
+// Check that Error.prepareStackTrace properly exposes async
+// stack frames and special Promise.all() stack frames.
+Error.prepareStackTrace = (e, frames) => {
+ assertEquals(two, frames[0].getFunction());
+ assertEquals(two.name, frames[0].getFunctionName());
+ assertEquals(null, frames[0].getPromiseIndex());
+ assertFalse(frames[0].isAsync());
+ assertEquals(Promise.all, frames[1].getFunction());
+ assertEquals(0, frames[1].getPromiseIndex());
+ assertTrue(frames[1].isAsync());
+ assertTrue(frames[1].isPromiseAll());
+ assertEquals(one, frames[2].getFunction());
+ assertEquals(one.name, frames[2].getFunctionName());
+ assertEquals(null, frames[2].getPromiseIndex());
+ assertTrue(frames[2].isAsync());
+ assertFalse(frames[2].isPromiseAll());
+ return frames;
+};
+
+async function one(x) {
+ return await Promise.all([two(x)]);
+}
+
+async function two(x) {
+ try {
+ x = await x;
+ throw new Error();
+ } catch (e) {
+ return e.stack;
+ }
+}
+
+one(1).catch(e => setTimeout(_ => {throw e}, 0));
diff --git a/deps/v8/test/mjsunit/async-stack-traces-promise-all.js b/deps/v8/test/mjsunit/async-stack-traces-promise-all.js
new file mode 100644
index 0000000000..7f8457c961
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-promise-all.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --async-stack-traces
+
+// Basic test with Promise.all().
+(function() {
+ async function fine() { }
+
+ async function thrower() {
+ await fine();
+ throw new Error();
+ }
+
+ async function driver() {
+ await Promise.all([fine(), fine(), thrower(), thrower()]);
+ }
+
+ async function test(f) {
+ try {
+ await f();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at thrower.+at async Promise.all \(index 2\).+at async driver.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(driver);
+ await test(driver);
+ %OptimizeFunctionOnNextCall(thrower);
+ await test(driver);
+ %OptimizeFunctionOnNextCall(driver);
+ await test(driver);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/async-stack-traces.js b/deps/v8/test/mjsunit/async-stack-traces.js
index 05cf8a095f..c945f4e37b 100644
--- a/deps/v8/test/mjsunit/async-stack-traces.js
+++ b/deps/v8/test/mjsunit/async-stack-traces.js
@@ -268,3 +268,34 @@
await test(one);
})());
})();
+
+// Basic test to check that we also follow initial
+// promise chains created via Promise#then().
+(function() {
+ async function one(p) {
+ return await p.then(two);
+ }
+
+ function two() {
+ throw new Error();
+ }
+
+ async function test(f) {
+ try {
+ await f(Promise.resolve());
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
index 75f513c099..184c7d52b7 100644
--- a/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
+++ b/deps/v8/test/mjsunit/code-coverage-ad-hoc.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
// Files: test/mjsunit/code-coverage-utils.js
// Test code coverage without explicitly activating it upfront.
diff --git a/deps/v8/test/mjsunit/code-coverage-block-noopt.js b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
index ef68e0394d..9865e6ee27 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-noopt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
// Flags: --no-opt
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index 5477a18dd5..ee21ff6a80 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -3,8 +3,14 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt --opt
+// Flags: --no-stress-flush-bytecode
// Files: test/mjsunit/code-coverage-utils.js
+if (isNeverOptimizeLiteMode()) {
+ print("Warning: skipping test that requires optimization in Lite mode.");
+ quit(0);
+}
+
%DebugToggleBlockCoverage(true);
TestCoverage(
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index 6df2ca5a56..0547d54a42 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
@@ -213,7 +213,7 @@ TestCoverage(
nop(); // 0100
} // 0150
}(); // 0200
-%RunMicrotasks(); // 0250
+%PerformMicrotaskCheckpoint(); // 0250
`,
[{"start":0,"end":299,"count":1},
{"start":1,"end":201,"count":6}, // TODO(jgruber): Invocation count is off.
@@ -353,11 +353,11 @@ TestCoverage(
[{"start":0,"end":849,"count":1},
{"start":1,"end":801,"count":1},
{"start":67,"end":87,"count":0},
- {"start":221,"end":222,"count":0},
+ {"start":219,"end":222,"count":0},
{"start":254,"end":274,"count":0},
- {"start":371,"end":372,"count":0},
+ {"start":369,"end":372,"count":0},
{"start":403,"end":404,"count":0},
- {"start":553,"end":554,"count":0}]
+ {"start":513,"end":554,"count":0}]
);
TestCoverage("try/catch/finally statements with early return",
@@ -374,10 +374,10 @@ TestCoverage("try/catch/finally statements with early return",
`,
[{"start":0,"end":449,"count":1},
{"start":1,"end":151,"count":1},
- {"start":69,"end":70,"count":0},
+ {"start":67,"end":70,"count":0},
{"start":91,"end":150,"count":0},
{"start":201,"end":401,"count":1},
- {"start":269,"end":270,"count":0},
+ {"start":267,"end":270,"count":0},
{"start":321,"end":400,"count":0}]
);
@@ -409,7 +409,7 @@ TestCoverage(
`,
[{"start":0,"end":1099,"count":1},
{"start":1,"end":151,"count":1},
- {"start":69,"end":70,"count":0},
+ {"start":67,"end":70,"count":0},
{"start":91,"end":150,"count":0},
{"start":201,"end":351,"count":1},
{"start":286,"end":350,"count":0},
@@ -417,7 +417,7 @@ TestCoverage(
{"start":603,"end":700,"count":0},
{"start":561,"end":568,"count":0}, // TODO(jgruber): Sorting.
{"start":751,"end":1051,"count":1},
- {"start":819,"end":820,"count":0},
+ {"start":817,"end":820,"count":0},
{"start":861,"end":1050,"count":0}]
);
@@ -656,7 +656,7 @@ async function f() { // 0000
await 42; // 0100
}; // 0150
f(); // 0200
-%RunMicrotasks(); // 0250
+%PerformMicrotaskCheckpoint(); // 0250
`,
[{"start":0,"end":299,"count":1},
{"start":0,"end":151,"count":3},
@@ -1004,41 +1004,4 @@ c(true); d(true); // 1650
{"start":1403,"end":1503,"count":0}]
);
-TestCoverage(
-"https://crbug.com/927464",
-`
-!function f() { // 0000
- function unused() { nop(); } // 0050
- nop(); // 0100
-}(); // 0150
-`,
-[{"start":0,"end":199,"count":1},
- {"start":1,"end":151,"count":1},
- {"start":52,"end":80,"count":0}]
-);
-
-TestCoverage(
-"https://crbug.com/v8/8691",
-`
-function f(shouldThrow) { // 0000
- if (shouldThrow) { // 0050
- throw Error('threw') // 0100
- } // 0150
-} // 0200
-try { // 0250
- f(true) // 0300
-} catch (err) { // 0350
- // 0400
-} // 0450
-try { // 0500
- f(false) // 0550
-} catch (err) {} // 0600
-`,
-[{"start":0,"end":649,"count":1},
- {"start":351,"end":352,"count":0},
- {"start":602,"end":616,"count":0},
- {"start":0,"end":201,"count":2},
- {"start":69,"end":153,"count":1}]
-);
-
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-class-fields.js b/deps/v8/test/mjsunit/code-coverage-class-fields.js
index a91c25824f..8db45d142b 100644
--- a/deps/v8/test/mjsunit/code-coverage-class-fields.js
+++ b/deps/v8/test/mjsunit/code-coverage-class-fields.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --harmony-public-fields --harmony-static-fields
+// Flags: --allow-natives-syntax --no-always-opt --harmony-public-fields
+// Flags: --harmony-static-fields --no-stress-flush-bytecode
// Files: test/mjsunit/code-coverage-utils.js
%DebugToggleBlockCoverage(true);
diff --git a/deps/v8/test/mjsunit/code-coverage-precise.js b/deps/v8/test/mjsunit/code-coverage-precise.js
index c5569cf010..2593ed64a0 100644
--- a/deps/v8/test/mjsunit/code-coverage-precise.js
+++ b/deps/v8/test/mjsunit/code-coverage-precise.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt
+// Flags: --allow-natives-syntax --no-always-opt --no-stress-flush-bytecode
// Flags: --no-stress-incremental-marking
// Files: test/mjsunit/code-coverage-utils.js
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
new file mode 100644
index 0000000000..1026b68342
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-receiver.js
@@ -0,0 +1,177 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Known receivers abstract equality.
+(function() {
+ const a = {};
+ const b = {};
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver/null abstract equality.
+(function() {
+ const a = {};
+ const b = null;
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known null/receiver abstract equality.
+(function() {
+ const a = null;
+ const b = {};
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver/undefined abstract equality.
+(function() {
+ const a = {};
+ const b = undefined;
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known undefined/receiver abstract equality.
+(function() {
+ const a = undefined;
+ const b = {};
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver on one side strict equality.
+(function() {
+ const a = {};
+ const b = {};
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+
+ // TurboFan bakes in feedback for the (unknown) left hand side.
+ assertFalse(foo(null));
+ assertUnoptimized(foo);
+})();
+
+// Known receiver on one side strict equality with null.
+(function() {
+ const a = null;
+ const b = {};
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+
+ // TurboFan bakes in feedback for the (unknown) left hand side.
+ assertFalse(foo(1));
+ assertUnoptimized(foo);
+})();
+
+// Known receiver on one side strict equality with undefined.
+(function() {
+ const a = undefined;
+ const b = {};
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+
+ // TurboFan bakes in feedback for the (unknown) left hand side.
+ assertFalse(foo(1));
+ assertUnoptimized(foo);
+})();
+
+// Known null on one side strict equality with receiver.
+(function() {
+ const a = {};
+ const b = null;
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(null));
+ assertTrue(foo(undefined));
+ assertOptimized(foo);
+
+ // TurboFan doesn't need to bake in feedback, since it sees the null.
+ assertFalse(foo(1));
+ assertOptimized(foo);
+})();
+
+// Known undefined on one side strict equality with receiver.
+(function() {
+ const a = {};
+ const b = undefined;
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(null));
+ assertTrue(foo(undefined));
+ assertOptimized(foo);
+
+ // TurboFan needs to bake in feedback, since undefined cannot
+ // be context specialized.
+ assertFalse(foo(1));
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
new file mode 100644
index 0000000000..1e1bb6ba2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
@@ -0,0 +1,119 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+const undetectable = %GetUndetectable();
+
+// Known undetectable abstract equality.
+(function() {
+ const a = undetectable;
+ const b = {};
+
+ function foo() { return a == b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known undetectable/null abstract equality.
+(function() {
+ const a = undetectable;
+ const b = null;
+
+ function foo() { return a == b; }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Known undetectable/receiver abstract equality.
+(function() {
+ const a = null;
+ const b = undetectable;
+
+ function foo() { return a == b; }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Known undetectable/undefined abstract equality.
+(function() {
+ const a = undetectable;
+ const b = undefined;
+
+ function foo() { return a == b; }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Known undefined/undetectable abstract equality.
+(function() {
+ const a = undefined;
+ const b = undetectable;
+
+ function foo() { return a == b; }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Known undetectable on one side strict equality with receiver.
+(function() {
+ const a = {};
+ const b = undetectable;
+
+ function foo(a) { return a == b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+
+ // TurboFan doesn't need to bake in feedback, since it sees the undetectable.
+ assertFalse(foo(1));
+ assertOptimized(foo);
+})();
+
+// Unknown undetectable on one side strict equality with receiver.
+(function() {
+ const a = undetectable;
+ const b = {};
+
+ function foo(a, b) { return a == b; }
+
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ assertTrue(foo(a, null));
+ assertFalse(foo(b, null));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ assertTrue(foo(a, null));
+ assertFalse(foo(b, null));
+ assertOptimized(foo);
+
+ // TurboFan bakes in feedback on the inputs.
+ assertFalse(foo(1));
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-every.js b/deps/v8/test/mjsunit/compiler/array-every.js
new file mode 100644
index 0000000000..5064bd557d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-every.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Basic loop peeling test case with Array.prototype.every().
+(function() {
+ function foo(a, o) {
+ return a.every(x => x === o.x);
+ }
+
+ assertTrue(foo([3, 3, 3], {x:3}));
+ assertFalse(foo([3, 3, 2], {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo([3, 3, 3], {x:3}));
+ assertFalse(foo([3, 3, 2], {x:3}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-find.js b/deps/v8/test/mjsunit/compiler/array-find.js
new file mode 100644
index 0000000000..419a758ac7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-find.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Basic loop peeling test case with Array.prototype.find().
+(function() {
+ function foo(a, o) {
+ return a.find(x => x === o.x);
+ }
+
+ assertEquals(3, foo([1, 2, 3], {x:3}));
+ assertEquals(undefined, foo([0, 1, 2], {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo([1, 2, 3], {x:3}));
+ assertEquals(undefined, foo([0, 1, 2], {x:3}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-findindex.js b/deps/v8/test/mjsunit/compiler/array-findindex.js
new file mode 100644
index 0000000000..583f553ce4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-findindex.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Basic loop peeling test case with Array.prototype.findIndex().
+(function() {
+ function foo(a, o) {
+ return a.findIndex(x => x === o.x);
+ }
+
+ assertEquals(2, foo([1, 2, 3], {x:3}));
+ assertEquals(-1, foo([0, 1, 2], {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo([1, 2, 3], {x:3}));
+ assertEquals(-1, foo([0, 1, 2], {x:3}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
index 3ddff992f7..c26aeda7dc 100644
--- a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --no-stress-background-compile
+// Flags: --no-stress-background-compile --trace-opt --trace-deopt
let id = 0;
diff --git a/deps/v8/test/mjsunit/compiler/array-some.js b/deps/v8/test/mjsunit/compiler/array-some.js
new file mode 100644
index 0000000000..411a5881de
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-some.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Basic loop peeling test case with Array.prototype.some().
+(function() {
+ function foo(a, o) {
+ return a.some(x => x === o.x);
+ }
+
+ assertTrue(foo([1, 2, 3], {x:3}));
+ assertFalse(foo([0, 1, 2], {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo([1, 2, 3], {x:3}));
+ assertFalse(foo([0, 1, 2], {x:3}));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-get.js b/deps/v8/test/mjsunit/compiler/dataview-get.js
index 78c6bdf4ac..09094399df 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-get.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-get.js
@@ -173,14 +173,14 @@ assertUnoptimized(readFloat64);
assertUnoptimized(readUint8);
})();
-// TurboFan neutered buffer deopts.
+// TurboFan detached buffer deopts.
(function() {
function readInt8Handled(offset) {
try { return dataview.getInt8(offset); } catch (e) { return e; }
}
warmup(readInt8Handled);
assertOptimized(readInt8Handled);
- %ArrayBufferNeuter(buffer);
+ %ArrayBufferDetach(buffer);
assertInstanceof(readInt8Handled(0), TypeError);
assertUnoptimized(readInt8Handled);
})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-neutered.js b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
index 54b35f73c8..ef485c69db 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-neutered.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-neutered.js
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --noalways-opt
+// Flags: --allow-natives-syntax --opt --noalways-opt --no-stress-flush-bytecode
-// Invalidate the neutering protector.
-%ArrayBufferNeuter(new ArrayBuffer(1));
+// Invalidate the detaching protector.
+%ArrayBufferDetach(new ArrayBuffer(1));
// Check DataView.prototype.getInt8() optimization.
(function() {
@@ -21,7 +21,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -43,7 +43,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -65,7 +65,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -87,7 +87,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -109,7 +109,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -131,7 +131,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -153,7 +153,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -175,7 +175,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(dv));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -199,7 +199,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -223,7 +223,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -247,7 +247,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -271,7 +271,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -295,7 +295,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -319,7 +319,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -343,7 +343,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
@@ -367,7 +367,7 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo(dv, 3));
assertOptimized(foo);
- %ArrayBufferNeuter(ab);
+ %ArrayBufferDetach(ab);
assertThrows(() => foo(dv, 4), TypeError);
assertUnoptimized(foo);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-inlined-smi.js b/deps/v8/test/mjsunit/compiler/deopt-inlined-smi.js
index dda083e5b4..9c0dc99bcb 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-inlined-smi.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-inlined-smi.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --always-opt --always-inline-smi-code
+// Flags: --always-opt
// Test deoptimization into inlined smi code.
diff --git a/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js b/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js
new file mode 100644
index 0000000000..49c8899e69
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/instance-of-overridden-has-instance.js
@@ -0,0 +1,106 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function NonConstHasInstance() {
+ var C = {
+ [Symbol.hasInstance] : () => true
+ };
+
+ function f() {
+ return {} instanceof C;
+ }
+
+ assertTrue(f());
+ assertTrue(f());
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+ C[Symbol.hasInstance] = () => false;
+ assertFalse(f());
+})();
+
+(function NumberHasInstance() {
+ var C = {
+ [Symbol.hasInstance] : 0.1
+ };
+
+ function f(b, C) {
+ if (b) return {} instanceof C;
+ return false;
+ }
+
+ function g(b) {
+ return f(b, C);
+ }
+
+ assertFalse(f(true, Number));
+ assertFalse(f(true, Number));
+ assertFalse(g(false));
+ assertFalse(g(false));
+ %OptimizeFunctionOnNextCall(g);
+ assertThrows(() => g(true));
+})();
+
+(function NonFunctionHasInstance() {
+ var C = {
+ [Symbol.hasInstance] : {}
+ };
+
+ function f(b, C) {
+ if (b) return {} instanceof C;
+ return false;
+ }
+
+ function g(b) {
+ return f(b, C);
+ }
+
+ assertFalse(f(true, Number));
+ assertFalse(f(true, Number));
+ assertFalse(g(false));
+ assertFalse(g(false));
+ %OptimizeFunctionOnNextCall(g);
+ assertThrows(() => g(true));
+})();
+
+(function NonConstHasInstanceProto() {
+ var B = {
+ [Symbol.hasInstance]() { return true; }
+ };
+
+ var C = { __proto__ : B };
+
+ function f() {
+ return {} instanceof C;
+ }
+
+ assertTrue(f());
+ assertTrue(f());
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+ B[Symbol.hasInstance] = () => { return false; };
+ assertFalse(f());
+})();
+
+(function HasInstanceOverwriteOnProto() {
+ var A = {
+ [Symbol.hasInstance] : () => false
+ }
+
+ var B = { __proto__ : A };
+
+ var C = { __proto__ : B };
+
+ function f() {
+ return {} instanceof C;
+ }
+
+ assertFalse(f());
+ assertFalse(f());
+ %OptimizeFunctionOnNextCall(f);
+ assertFalse(f());
+ B[Symbol.hasInstance] = () => { return true; };
+ assertTrue(f());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/int64.js b/deps/v8/test/mjsunit/compiler/int64.js
index 0a88a95895..b2c53913da 100644
--- a/deps/v8/test/mjsunit/compiler/int64.js
+++ b/deps/v8/test/mjsunit/compiler/int64.js
@@ -89,3 +89,43 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(0, foo(0xFFFFFFFF));
})();
+
+// Test checked Float32->Word64 conversions.
+(function() {
+ function foo(dv, i) {
+ i = dv.getFloat32(i, true);
+ return dv.getInt8(i, true);
+ }
+
+ const dv = new DataView(new ArrayBuffer(10));
+ dv.setFloat32(0, 8, true);
+ dv.setFloat32(4, 9, true);
+ dv.setInt8(8, 42);
+ dv.setInt8(9, 24);
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 4));
+})();
+
+// Test checked Float64->Word64 conversions.
+(function() {
+ function foo(dv, i) {
+ i = dv.getFloat64(i, true);
+ return dv.getInt8(i, true);
+ }
+
+ const dv = new DataView(new ArrayBuffer(18));
+ dv.setFloat64(0, 16, true);
+ dv.setFloat64(8, 17, true);
+ dv.setInt8(16, 42);
+ dv.setInt8(17, 24);
+
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 8));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(42, foo(dv, 0));
+ assertEquals(24, foo(dv, 8));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js b/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js
new file mode 100644
index 0000000000..faa5e63239
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/lazy-deopt-async-function-resolve.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test that the lazy deoptimization point for JSAsyncFunctionResolve
+// works correctly, aka that we return the promise and not the result
+// of the JSResolvePromise operation.
+(function() {
+ async function foo(x) {
+ return x;
+ }
+
+ assertPromiseResult((async () => {
+ await foo(1);
+ await foo(2);
+ %OptimizeFunctionOnNextCall(foo);
+ const p = new Proxy({}, {
+ get(...args) {
+ %DeoptimizeFunction(foo);
+ return Reflect.get(...args);
+ }
+ });
+ assertEquals(p, await foo(p));
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
index 1256f453eb..7f4db56483 100644
--- a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
+++ b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
@@ -27,6 +27,11 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
+if (isNeverOptimizeLiteMode()) {
+ print("Warning: skipping test that requires optimization in Lite mode.");
+ quit(0);
+}
+
function f() {
Array.prototype[10] = 2;
var arr = new Array();
diff --git a/deps/v8/test/mjsunit/compiler/number-max.js b/deps/v8/test/mjsunit/compiler/number-max.js
index 7e5a4a4ad1..0e9b84fb39 100644
--- a/deps/v8/test/mjsunit/compiler/number-max.js
+++ b/deps/v8/test/mjsunit/compiler/number-max.js
@@ -21,3 +21,17 @@
assertEquals(0, foo(0));
assertOptimized(foo);
})();
+
+// Test that NumberMax properly handles 64-bit comparisons.
+(function() {
+ function foo(x) {
+ x = x|0;
+ return Math.max(x - 1, x + 1);
+ }
+
+ assertEquals(-Math.pow(2, 31) + 1, foo(-Math.pow(2, 31)));
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-Math.pow(2, 31) + 1, foo(-Math.pow(2, 31)));
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-min.js b/deps/v8/test/mjsunit/compiler/number-min.js
index 72bff78686..6c7c62d773 100644
--- a/deps/v8/test/mjsunit/compiler/number-min.js
+++ b/deps/v8/test/mjsunit/compiler/number-min.js
@@ -21,3 +21,17 @@
assertEquals(1, foo(0));
assertOptimized(foo);
})();
+
+// Test that NumberMin properly handles 64-bit comparisons.
+(function() {
+ function foo(x) {
+ x = x|0;
+ return Math.min(x - 1, x + 1);
+ }
+
+ assertEquals(-Math.pow(2, 31) - 1, foo(-Math.pow(2, 31)));
+ assertEquals(Math.pow(2, 31) - 2, foo(Math.pow(2, 31) - 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-Math.pow(2, 31) - 1, foo(-Math.pow(2, 31)));
+ assertEquals(Math.pow(2, 31) - 2, foo(Math.pow(2, 31) - 1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-modulus.js b/deps/v8/test/mjsunit/compiler/number-modulus.js
index 5f695d1ee5..0925aa0da3 100644
--- a/deps/v8/test/mjsunit/compiler/number-modulus.js
+++ b/deps/v8/test/mjsunit/compiler/number-modulus.js
@@ -4,178 +4,6 @@
// Flags: --allow-natives-syntax --opt --noalways-opt
-// Test that NumberModulus with Number feedback works if only in the
-// end SimplifiedLowering figures out that the inputs to this operation
-// are actually Unsigned32.
-(function() {
- // We need a separately polluted % with NumberOrOddball feedback.
- function bar(x) { return x % 2; }
- bar(undefined); // The % feedback is now NumberOrOddball.
-
- // Now just use the gadget above in a way that only after RETYPE
- // in SimplifiedLowering we find out that the `x` is actually in
- // Unsigned32 range (based on taking the SignedSmall feedback on
- // the + operator).
- function foo(x) {
- x = (x >>> 0) + 1;
- return bar(x) | 0;
- }
-
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- assertOptimized(foo);
-})();
-
-// Test that NumberModulus with Number feedback works if only in the
-// end SimplifiedLowering figures out that the inputs to this operation
-// are actually Signed32.
-(function() {
- // We need a separately polluted % with NumberOrOddball feedback.
- function bar(x) { return x % 2; }
- bar(undefined); // The % feedback is now NumberOrOddball.
-
- // Now just use the gadget above in a way that only after RETYPE
- // in SimplifiedLowering we find out that the `x` is actually in
- // Signed32 range (based on taking the SignedSmall feedback on
- // the + operator).
- function foo(x) {
- x = (x | 0) + 1;
- return bar(x) | 0;
- }
-
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- assertOptimized(foo);
-})();
-
-// Test that SpeculativeNumberModulus with Number feedback works if
-// only in the end SimplifiedLowering figures out that the inputs to
-// this operation are actually Unsigned32.
-(function() {
- // We need to use an object literal here to make sure that the
- // SpeculativeNumberModulus is not turned into a NumberModulus
- // early during JSTypedLowering.
- function bar(x) { return {x}.x % 2; }
- bar(undefined); // The % feedback is now NumberOrOddball.
-
- // Now just use the gadget above in a way that only after RETYPE
- // in SimplifiedLowering we find out that the `x` is actually in
- // Unsigned32 range (based on taking the SignedSmall feedback on
- // the + operator).
- function foo(x) {
- x = (x >>> 0) + 1;
- return bar(x) | 0;
- }
-
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- assertOptimized(foo);
-})();
-
-// Test that SpeculativeNumberModulus with Number feedback works if
-// only in the end SimplifiedLowering figures out that the inputs to
-// this operation are actually Signed32.
-(function() {
- // We need to use an object literal here to make sure that the
- // SpeculativeNumberModulus is not turned into a NumberModulus
- // early during JSTypedLowering.
- function bar(x) { return {x}.x % 2; }
- bar(undefined); // The % feedback is now NumberOrOddball.
-
- // Now just use the gadget above in a way that only after RETYPE
- // in SimplifiedLowering we find out that the `x` is actually in
- // Signed32 range (based on taking the SignedSmall feedback on
- // the + operator).
- function foo(x) {
- x = (x | 0) + 1;
- return bar(x) | 0;
- }
-
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(1));
- assertEquals(1, foo(2));
- assertEquals(0, foo(3));
- assertEquals(1, foo(4));
- assertOptimized(foo);
-})();
-
-// Test that NumberModulus works in the case where TurboFan
-// can infer that the output is Signed32 \/ MinusZero, and
-// there's a truncation on the result that identifies zeros
-// (via the SpeculativeNumberEqual).
-(function() {
- // We need a separately polluted % with NumberOrOddball feedback.
- function bar(x) { return x % 2; }
- bar(undefined); // The % feedback is now NumberOrOddball.
-
- // Now we just use the gadget above on an `x` that is known
- // to be in Signed32 range and compare it to 0, which passes
- // a truncation that identifies zeros.
- function foo(x) {
- if (bar(x | 0) == 0) return 0;
- return 1;
- }
-
- assertEquals(0, foo(2));
- assertEquals(1, foo(1));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(2));
- assertEquals(1, foo(1));
- assertOptimized(foo);
-
- // Now `foo` should stay optimized even if `x % 2` would
- // produce -0, aka when we pass a negative value for `x`.
- assertEquals(0, foo(-2));
- assertEquals(1, foo(-1));
- assertOptimized(foo);
-})();
-
-// Test that CheckedInt32Mod handles the slow-path (when
-// the left hand side is negative) correctly.
-(function() {
- // We need a SpeculativeNumberModulus with SignedSmall feedback.
- function foo(x, y) {
- return x % y;
- }
-
- assertEquals(0, foo(2, 1));
- assertEquals(0, foo(2, 2));
- assertEquals(-1, foo(-3, 2));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(2, 1));
- assertEquals(0, foo(2, 2));
- assertEquals(-1, foo(-3, 2));
- assertOptimized(foo);
-
- // Now `foo` should deoptimize if the result is -0.
- assertEquals(-0, foo(-2, 2));
- assertUnoptimized(foo);
-})();
// Test that NumberModulus passes kIdentifiesZero to the
// left hand side input when the result doesn't care about
@@ -196,61 +24,3 @@
assertTrue(foo(0));
assertOptimized(foo);
})();
-
-// Test that NumberModulus passes kIdentifiesZero to the
-// right hand side input, even when the inputs are outside
-// the Signed32 range.
-(function() {
- function foo(x) {
- return (2 ** 32) % (x * -2);
- }
-
- assertEquals(0, foo(1));
- assertEquals(0, foo(1));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(1));
-
- // Now `foo` should stay optimized even if `x * -2` would
- // produce -0, aka when we pass a zero value for `x`.
- assertEquals(NaN, foo(0));
- assertOptimized(foo);
-})();
-
-// Test that SpeculativeNumberModulus passes kIdentifiesZero
-// to the right hand side input, even when feedback is consumed.
-(function() {
- function foo(x, y) {
- return (x % (y * -2)) | 0;
- }
-
- assertEquals(0, foo(2, 1));
- assertEquals(-1, foo(-3, 1));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo(2, 1));
- assertEquals(-1, foo(-3, 1));
- assertOptimized(foo);
-
- // Now `foo` should stay optimized even if `y * -2` would
- // produce -0, aka when we pass a zero value for `y`.
- assertEquals(0, foo(2, 0));
- assertOptimized(foo);
-})();
-
-// Test that SpeculativeNumberModulus passes kIdentifiesZero
-// to the left hand side input, even when feedback is consumed.
-(function() {
- function foo(x, y) {
- return ((x * -2) % y) | 0;
- }
-
- assertEquals(-2, foo(1, 3));
- assertEquals(-2, foo(1, 3));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-2, foo(1, 3));
- assertOptimized(foo);
-
- // Now `foo` should stay optimized even if `x * -2` would
- // produce -0, aka when we pass a zero value for `x`.
- assertEquals(0, foo(0, 2));
- assertOptimized(foo);
-})();
diff --git a/deps/v8/test/mjsunit/compiler/number-multiply.js b/deps/v8/test/mjsunit/compiler/number-multiply.js
new file mode 100644
index 0000000000..5b644974ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-multiply.js
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+// Test the extreme case where -0 is produced by rounding errors.
+(function() {
+ function bar(x) {
+ return 1e-308 * x;
+ }
+ bar(1);
+
+ function foo() {
+ return Object.is(-0, bar(-1e-308));
+ }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Test that multiplication of integer by 0 produces the correct results.
+(function() {
+ function foo(x) {
+ return 0 * Math.round(x);
+ }
+
+ assertEquals(0, foo(0.1));
+ assertEquals(-0, foo(-0.1));
+ assertEquals(NaN, foo(NaN));
+ assertEquals(NaN, foo(Infinity));
+ assertEquals(NaN, foo(-Infinity));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(0.1));
+ assertEquals(-0, foo(-0.1));
+ assertEquals(NaN, foo(NaN));
+ assertEquals(NaN, foo(Infinity));
+ assertEquals(NaN, foo(-Infinity));
+})();
+
+// Test that multiplication properly preserves -0 and NaN, and doesn't
+// cut it short incorrectly.
+(function() {
+ function foo(x, y) {
+ x = Math.sign(x);
+ y = Math.sign(y);
+ return Math.min(x * y, 0);
+ }
+
+ assertEquals(0, foo(1, 0));
+ assertEquals(-0, foo(1, -0));
+ assertEquals(NaN, foo(NaN, -0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(0, foo(1, 0));
+ assertEquals(-0, foo(1, -0));
+ assertEquals(NaN, foo(NaN, -0));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/osr-assert.js b/deps/v8/test/mjsunit/compiler/osr-assert.js
index 94b901fd4f..c67ad536ad 100644
--- a/deps/v8/test/mjsunit/compiler/osr-assert.js
+++ b/deps/v8/test/mjsunit/compiler/osr-assert.js
@@ -25,17 +25,20 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr
+// Flags: --use-osr --allow-natives-syntax
function f(x, b, c) {
- var outer = 1000000;
+ var outer = 10;
var a = 1;
while (outer > 0) {
a = a + 5;
assertEquals(b + 1, c);
outer--;
+ if (outer === 5) {
+ %OptimizeOsr();
+ }
}
return a + 4;
}
-assertEquals(5000005, f(5, "122", "1221"));
+assertEquals(55, f(5, "122", "1221"));
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
new file mode 100644
index 0000000000..7acd891b9b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -0,0 +1,61 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Test that JSResolvePromise takes a proper stability dependency
+// on the resolutions map if the infer receiver maps are unreliable
+// (as is the case for HeapConstants).
+(function() {
+ // We need an object literal which gets a stable map initially.
+ function makeObjectWithStableMap() {
+ return {a:1, b:1, c:1};
+ }
+ const a = makeObjectWithStableMap();
+
+ function foo() {
+ return Promise.resolve(a);
+ }
+
+ assertInstanceof(foo(), Promise);
+ assertInstanceof(foo(), Promise);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), Promise);
+ assertOptimized(foo);
+
+ // Now invalidate the stability of a's map.
+ const b = makeObjectWithStableMap();
+ b.d = 1;
+
+ // This should deoptimize foo.
+ assertUnoptimized(foo);
+})();
+
+// Same test with async functions.
+(function() {
+ // We need an object literal which gets a stable map initially,
+ // it needs to be different from the above, otherwise the map
+ // is already not stable when we get here.
+ function makeObjectWithStableMap() {
+ return {x:1, y:1};
+ }
+ const a = makeObjectWithStableMap();
+
+ async function foo() {
+ return a;
+ }
+
+ assertInstanceof(foo(), Promise);
+ assertInstanceof(foo(), Promise);
+ %OptimizeFunctionOnNextCall(foo);
+ assertInstanceof(foo(), Promise);
+ assertOptimized(foo);
+
+ // Now invalidate the stability of a's map.
+ const b = makeObjectWithStableMap();
+ b.z = 1;
+
+ // This should deoptimize foo.
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-8380.js b/deps/v8/test/mjsunit/compiler/regress-8380.js
new file mode 100644
index 0000000000..d0bf28571e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-8380.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function reduceLHS() {
+ for (var i = 0; i < 2 ;i++) {
+ let [q, r] = [1n, 1n];
+ r = r - 1n;
+ q += 1n;
+ q = r;
+ }
+}
+
+reduceLHS();
+%OptimizeFunctionOnNextCall(reduceLHS);
+reduceLHS();
+
+
+function reduceRHS() {
+ for (var i = 0; i < 2 ;i++) {
+ let [q, r] = [1n, 1n];
+ r = 1n - r;
+ q += 1n;
+ q = r;
+ }
+}
+
+reduceRHS();
+%OptimizeFunctionOnNextCall(reduceRHS);
+reduceRHS();
diff --git a/deps/v8/test/mjsunit/compiler/regress-902608.js b/deps/v8/test/mjsunit/compiler/regress-902608.js
new file mode 100644
index 0000000000..faa9ec49df
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-902608.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+async function f() {
+ var a = [...new Int8Array([, ...new Uint8Array(65536)])];
+ var p = new Proxy([f], {
+ set: function () { },
+ done: undefined.prototype
+ });
+}
+
+f()
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555-2.js b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
new file mode 100644
index 0000000000..5852c6dd43
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-905555-2.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --block-concurrent-recompilation --noalways-opt
+
+global = 1;
+
+function boom(value) {
+ return global;
+}
+
+assertEquals(1, boom());
+assertEquals(1, boom());
+%OptimizeFunctionOnNextCall(boom, "concurrent");
+assertEquals(1, boom());
+
+delete this.global;
+
+%UnblockConcurrentRecompilation();
+
+// boom should be deoptimized because the global property cell has changed.
+assertUnoptimized(boom, "sync");
+
+assertThrows(boom);
diff --git a/deps/v8/test/mjsunit/compiler/regress-905555.js b/deps/v8/test/mjsunit/compiler/regress-905555.js
new file mode 100644
index 0000000000..bc7ba7428e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-905555.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --block-concurrent-recompilation --noalways-opt
+
+global = 1;
+
+function boom(value) {
+ return global;
+}
+
+assertEquals(1, boom());
+assertEquals(1, boom());
+%OptimizeFunctionOnNextCall(boom, "concurrent");
+assertEquals(1, boom());
+
+this.__defineGetter__("global", () => 42);
+
+%UnblockConcurrentRecompilation();
+
+// boom should be deoptimized because the global property cell has changed.
+assertUnoptimized(boom, "sync");
+
+assertEquals(42, boom());
diff --git a/deps/v8/test/mjsunit/compiler/regress-910838.js b/deps/v8/test/mjsunit/compiler/regress-910838.js
new file mode 100644
index 0000000000..6e62a453e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-910838.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(b, s, x) {
+ if (!b) {
+ return (x ? b : s * undefined) ? 1 : 42;
+ }
+}
+
+function g(b, x) {
+ return f(b, 'abc', x);
+}
+
+f(false, 0, 0);
+g(true, 0);
+%OptimizeFunctionOnNextCall(g);
+assertEquals(42, g(false, 0));
diff --git a/deps/v8/test/mjsunit/compiler/regress-913232.js b/deps/v8/test/mjsunit/compiler/regress-913232.js
new file mode 100644
index 0000000000..efd7fb8e5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-913232.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function* E(b) {
+ while (true) {
+ for (yield* 0; b; yield* 0) {}
+ }
+}
+
+%OptimizeFunctionOnNextCall(E);
+E();
diff --git a/deps/v8/test/mjsunit/compiler/regress-919754.js b/deps/v8/test/mjsunit/compiler/regress-919754.js
new file mode 100644
index 0000000000..5f20aad928
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-919754.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+function f(get, ...a) {
+ for (let i = 0; i < 1000; i++) {
+ if (i === 999) %OptimizeOsr();
+ a.map(f);
+ }
+ return get();
+}
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-number.js b/deps/v8/test/mjsunit/compiler/strict-equal-number.js
new file mode 100644
index 0000000000..18cd52aa01
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-number.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Make sure that we don't incorrectly truncate Oddball
+// to Number for strict equality comparisons.
+(function() {
+ function foo(x, y) { return x === y; }
+
+ assertTrue(foo(0.1, 0.1));
+ assertTrue(foo(undefined, undefined));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(undefined, undefined));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js b/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js
new file mode 100644
index 0000000000..1f38d79dfa
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/strict-equal-receiver.js
@@ -0,0 +1,152 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --noalways-opt
+
+// Known receivers strict equality.
+(function() {
+ const a = {};
+ const b = {};
+
+ function foo() { return a === b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver/null strict equality.
+(function() {
+ const a = {};
+ const b = null;
+
+ function foo() { return a === b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver/undefined strict equality.
+(function() {
+ const a = {};
+ const b = undefined;
+
+ function foo() { return a === b; }
+
+ assertFalse(foo());
+ assertFalse(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo());
+})();
+
+// Known receiver on one side strict equality.
+(function() {
+ const a = {};
+ const b = {};
+
+ function foo(a) { return a === b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+})();
+
+// Known receiver on one side strict equality.
+(function() {
+ const a = {};
+ const b = null;
+
+ function foo(a) { return a === b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+})();
+
+// Known receiver on one side strict equality.
+(function() {
+ const a = {};
+ const b = undefined;
+
+ function foo(a) { return a === b; }
+
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(b));
+ assertFalse(foo(a));
+})();
+
+// Feedback based receiver strict equality.
+(function() {
+ const a = {};
+ const b = {};
+
+ function foo(a, b) { return a === b; }
+
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+
+ // TurboFan bakes in feedback for the left hand side.
+ assertFalse(foo(null, b));
+ assertUnoptimized(foo);
+})();
+
+// Feedback based receiver/null strict equality.
+(function() {
+ const a = {};
+ const b = null;
+
+ function foo(a, b) { return a === b; }
+
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+
+ // TurboFan bakes in feedback for the left hand side.
+ assertFalse(foo(1, b));
+ assertUnoptimized(foo);
+})();
+
+// Feedback based receiver/undefined strict equality.
+(function() {
+ const a = {};
+ const b = undefined;
+
+ function foo(a, b) { return a === b; }
+
+ assertTrue(foo(b, b));
+ assertFalse(foo(a, b));
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertFalse(foo(b, a));
+
+ // TurboFan bakes in feedback for the left hand side.
+ assertFalse(foo(1, b));
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/d8/d8-arguments.js b/deps/v8/test/mjsunit/d8/d8-arguments.js
new file mode 100644
index 0000000000..5e44ea0e6e
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-arguments.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: -- 1 2 3
+
+assertEquals(["1", "2", "3"], arguments);
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index d847ce1e44..2b12e8d2f6 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -358,3 +358,13 @@ delete Date.prototype.getUTCMilliseconds;
assertTrue(delete Date.prototype.toString);
assertTrue('[object Date]' !== Date());
})();
+
+// Test minimum and maximum date range according to ES6 section 20.3.1.1:
+// "The actual range of times supported by ECMAScript Date objects is slightly
+// smaller: exactly -100,000,000 days to 100,000,000 days measured relative to
+// midnight at the beginning of 01 January, 1970 UTC. This gives a range of
+// 8,640,000,000,000,000 milliseconds to either side of 01 January, 1970 UTC."
+assertEquals(-8640000000000000, Date.parse("-271821-04-20T00:00:00.000Z"));
+assertEquals(8640000000000000, Date.parse("+275760-09-13T00:00:00.000Z"));
+assertTrue(isNaN(Date.parse("-271821-04-19T00:00:00.000Z")));
+assertTrue(isNaN(Date.parse("+275760-09-14T00:00:00.000Z")));
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index e220f16533..3ffdbba2a8 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -241,12 +241,17 @@ for (var i = 0; i < 3; i++) {
}
convert_mixed(construct_smis(), "three", elements_kind.fast);
convert_mixed(construct_doubles(), "three", elements_kind.fast);
+
+if (%ICsAreEnabled()) {
+ // Test that allocation sites allocate correct elements kind initially based
+ // on previous transitions.
%OptimizeFunctionOnNextCall(convert_mixed);
-smis = construct_smis();
-doubles = construct_doubles();
-convert_mixed(smis, 1, elements_kind.fast);
-convert_mixed(doubles, 1, elements_kind.fast);
-assertTrue(%HaveSameMap(smis, doubles));
+ smis = construct_smis();
+ doubles = construct_doubles();
+ convert_mixed(smis, 1, elements_kind.fast);
+ convert_mixed(doubles, 1, elements_kind.fast);
+ assertTrue(%HaveSameMap(smis, doubles));
+}
// Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-detached.js b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
index e9a940191b..2a92ee4ff9 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-detached.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
@@ -11,13 +11,13 @@ function Baseline() {
assertEquals(0, it.next().value);
assertEquals(1, it.next().value);
assertEquals(2, it.next().value);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
it.next();
};
%NeverOptimizeFunction(Baseline);
assertThrows(Baseline, TypeError,
- "Cannot perform Array Iterator.prototype.next on a detached ArrayBuffer");
+ "Cannot perform Array Iterator.prototype.next on a neutered ArrayBuffer");
function Turbo(count = 10000) {
let array = Array(10000);
@@ -32,7 +32,7 @@ function Turbo(count = 10000) {
for (let i = 0; i < count; ++i) {
let result = it.next();
if (result.value === 255) {
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
}
sum += result.value;
}
@@ -44,4 +44,4 @@ Turbo(10);
%OptimizeFunctionOnNextCall(Turbo);
assertThrows(Turbo, TypeError,
- "Cannot perform Array Iterator.prototype.next on a detached ArrayBuffer");
+ "Cannot perform Array Iterator.prototype.next on a neutered ArrayBuffer");
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
index 489a53dbc7..7dcdbe10fa 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
@@ -217,7 +217,7 @@ let tests = {
// Throw when detached
let clone = new array.constructor(array);
- %ArrayBufferNeuter(clone.buffer);
+ %ArrayBufferDetach(clone.buffer);
assertThrows(() => sum(clone), TypeError);
// Clear the slate for the next iteration.
diff --git a/deps/v8/test/mjsunit/es6/array-prototype-values.js b/deps/v8/test/mjsunit/es6/array-prototype-values.js
index b7c4e78e33..4b4c04342b 100644
--- a/deps/v8/test/mjsunit/es6/array-prototype-values.js
+++ b/deps/v8/test/mjsunit/es6/array-prototype-values.js
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-array-prototype-values
// Functionality of the values iterator is tested elsewhere; this test
// merely verifies that the 'values' property is set up correctly.
diff --git a/deps/v8/test/mjsunit/es6/array-spread-large-holey.js b/deps/v8/test/mjsunit/es6/array-spread-large-holey.js
new file mode 100644
index 0000000000..c319c187df
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-spread-large-holey.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test spreading of large holey arrays, which are supposedly allocated in
+// LargeObjectSpace. Holes should be replaced with undefined.
+
+var arr = new Array(2e5);
+
+for (var i = 0; i < 10; i++) {
+ arr[i] = i;
+}
+
+var arr2 = [...arr];
+assertTrue(arr2.hasOwnProperty(10));
+assertEquals(undefined, arr2[10]);
+assertEquals(9, arr2[9]);
diff --git a/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js b/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js
index e16d7a02a6..784f5d2f42 100644
--- a/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js
+++ b/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js
@@ -141,15 +141,15 @@ try {
}
assertTrue(caught);
-caught = false
-try {
- (function() {
- {
- let x = 1;
- eval('{ function x() {} }');
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
+// See ES#sec-web-compat-evaldeclarationinstantiation. Sloppy block functions
+// inside of blocks in eval behave similar to regular sloppy block function
+// hoisting: the var declaration on the function level is only created if
+// it would not cause a syntax error. A masking let would cause a conflicting
+// var declaration syntax error, and hence the var isn't introduced.
+(function() {
+ {
+ let x = 1;
+ eval('{ function x() {} }');
+ assertEquals(1, x);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/block-sloppy-function.js b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
index d527a7debb..5d22cd85c0 100644
--- a/deps/v8/test/mjsunit/es6/block-sloppy-function.js
+++ b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
@@ -620,15 +620,12 @@ eval(`
return 4;
} }`);
- // assertEquals(0, f);
- assertEquals(4, f());
+ assertEquals(0, f);
}
- // assertEquals(4, f());
- assertEquals(undefined, f);
+ assertEquals(4, f());
})();
-// This test is incorrect BUG(v8:5168). The commented assertions are correct.
(function evalHoistingThroughWith() {
with ({f: 0}) {
eval(`{ function f() {
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index a123dadc52..27121ec007 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-do-expressions
+// Flags: --allow-natives-syntax
(function TestBasics() {
var C = class C {}
@@ -23,10 +23,41 @@
assertEquals('D2', D2.name);
var E = class {}
- assertEquals('E', E.name); // Should be 'E'.
+ assertEquals('E', E.name);
var F = class { constructor() {} };
- assertEquals('F', F.name); // Should be 'F'.
+ assertEquals('F', F.name);
+
+ var literal = { E: class {} };
+ assertEquals('E', literal.E.name);
+
+ literal = { E: class F {} };
+ assertEquals('F', literal.E.name);
+
+ literal = { __proto__: class {} };
+ assertEquals('', literal.__proto__.name);
+ assertEquals(
+ undefined, Object.getOwnPropertyDescriptor(literal.__proto__, 'name'));
+
+ literal = { __proto__: class F {} };
+ assertEquals('F', literal.__proto__.name);
+ assertNotEquals(
+ undefined, Object.getOwnPropertyDescriptor(literal.__proto__, 'name'));
+
+ class G {};
+ literal = { __proto__: G };
+ assertEquals('G', literal.__proto__.name);
+
+ var H = class { static name() { return 'A'; } };
+ literal = { __proto__ : H };
+ assertEquals('A', literal.__proto__.name());
+
+ literal = {
+ __proto__: class {
+ static name() { return 'A'; }
+ }
+ };
+ assertEquals('A', literal.__proto__.name());
})();
@@ -1038,31 +1069,6 @@ function testClassRestrictedProperties(C) {
(function testReturnFromClassLiteral() {
- function usingDoExpressionInBody() {
- let x = 42;
- let dummy = function() {x};
- try {
- class C {
- dummy() {C}
- [do {return}]() {}
- };
- } finally {
- return x;
- }
- }
- assertEquals(42, usingDoExpressionInBody());
-
- function usingDoExpressionInExtends() {
- let x = 42;
- let dummy = function() {x};
- try {
- class C extends (do {return}) { dummy() {C} };
- } finally {
- return x;
- }
- }
- assertEquals(42, usingDoExpressionInExtends());
-
function usingYieldInBody() {
function* foo() {
class C {
@@ -1186,3 +1192,95 @@ function testClassRestrictedProperties(C) {
assertEquals(instance[key], value);
}
})();
+
+var b = 'b';
+
+(function TestOverwritingInstanceAccessors() {
+ var C, desc;
+ C = class {
+ [b]() { return 'B'; };
+ get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ [b]() { return 'B'; };
+ set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+
+ C = class {
+ set b(v) { return 'get B'; };
+ [b]() { return 'B'; };
+ get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ get b() { return 'get B'; };
+ [b]() { return 'B'; };
+ set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+})();
+
+(function TestOverwritingStaticAccessors() {
+ var C, desc;
+ C = class {
+ static [b]() { return 'B'; };
+ static get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ static [b]() { return 'B'; };
+ static set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+
+ C = class {
+ static set b(v) { return 'get B'; };
+ static [b]() { return 'B'; };
+ static get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ static get b() { return 'get B'; };
+ static [b]() { return 'B'; };
+ static set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+})();
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
index e4b52bc5c5..d6fa548179 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax --opt
+var global;
+
function TestSetWithCustomIterator(ctor) {
const k1 = {};
const k2 = {};
@@ -19,6 +21,9 @@ function TestSetWithCustomIterator(ctor) {
assertFalse(set.has(k1));
assertTrue(set.has(k2));
assertEquals(2, callCount);
+ // Keep entries alive to avoid collection of the weakly held map in optimized
+ // code which causes the code to deopt.
+ global = entries;
}
TestSetWithCustomIterator(Set);
TestSetWithCustomIterator(Set);
@@ -49,6 +54,9 @@ function TestMapWithCustomIterator(ctor) {
assertFalse(map.has(k1));
assertEquals(2, map.get(k2));
assertEquals(2, callCount);
+ // Keep entries alive to avoid collection of the weakly held map in optimized
+ // code which causes the code to deopt.
+ global = entries;
}
TestMapWithCustomIterator(Map);
TestMapWithCustomIterator(Map);
diff --git a/deps/v8/test/mjsunit/es6/destructuring-assignment.js b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
index dee7a0b16d..7f61e023fc 100644
--- a/deps/v8/test/mjsunit/es6/destructuring-assignment.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
@@ -574,3 +574,58 @@ assertEquals(oz, [1, 2, 3, 4, 5]);
assertEquals(1, ext("let x; ({x} = { x: super() })").x);
assertEquals(1, ext("let x, y; ({ x: y } = { x } = { x: super() })").x);
})();
+
+(function testInvalidReturn() {
+ function* g() { yield 1; }
+
+ let executed_x_setter;
+ let executed_return;
+ var a = {
+ set x(val) {
+ executed_x_setter = true;
+ throw 3;
+ }
+ };
+
+ // The exception from the execution of g().return() should be suppressed by
+ // the setter error.
+ executed_x_setter = false;
+ executed_return = false;
+ g.prototype.return = function() {
+ executed_return = true;
+ throw 4;
+ };
+ assertThrowsEquals("[a.x] = g()", 3);
+ assertTrue(executed_x_setter);
+ assertTrue(executed_return);
+
+ // The exception from g().return() not returning an object should be
+ // suppressed by the setter error.
+ executed_x_setter = false;
+ executed_return = false;
+ g.prototype.return = function() {
+ assertTrue(executed_return);
+ return null;
+ };
+ assertThrowsEquals("[a.x] = g()", 3);
+ assertTrue(executed_x_setter);
+ assertTrue(executed_return);
+
+ // The TypeError from g().return not being a method should suppress the setter
+ // error.
+ executed_x_setter = false;
+ g.prototype.return = "not a method";
+ assertThrows("[a.x] = g()", TypeError);
+ assertTrue(executed_x_setter);
+
+ // The exception from the access of g().return should suppress the setter
+ // error.
+ executed_x_setter = false;
+ Object.setPrototypeOf(g.prototype, {
+ get return() {
+ throw 4;
+ }
+ });
+ assertThrowsEquals("[a.x] = g()", 4);
+ assertTrue(executed_x_setter);
+})
diff --git a/deps/v8/test/mjsunit/es6/destructuring.js b/deps/v8/test/mjsunit/es6/destructuring.js
index f09165a24e..30df8c63bf 100644
--- a/deps/v8/test/mjsunit/es6/destructuring.js
+++ b/deps/v8/test/mjsunit/es6/destructuring.js
@@ -1190,3 +1190,13 @@
assertEquals(undefined, eval('try {throw {foo: 1, bar: 2}} catch({foo}) {}'));
assertEquals(undefined, eval('try {throw [1, 2, 3]} catch([x]) {}'));
})();
+
+// Property access as declaration target.
+assertThrows("let [o.x=1]=[]", SyntaxError);
+assertThrows("let {x:o.f=1}={x:1}", SyntaxError);
+assertThrows("(o.f=1)=>0", SyntaxError);
+
+// Invalidly parenthesized declaration targets.
+assertThrows("for (({x}) of [{x:1}]) {}", SyntaxError);
+assertThrows("for (var ({x}) of [{x:1}]) {}", SyntaxError);
+assertThrows("for await (({x}) of [{x:1}]) {}", SyntaxError);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-1.js b/deps/v8/test/mjsunit/es6/map-iterator-1.js
new file mode 100644
index 0000000000..50d37726a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-1.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+map[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-10.js b/deps/v8/test/mjsunit/es6/map-iterator-10.js
new file mode 100644
index 0000000000..d8d20ee9ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-10.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+// This changes %IteratorPrototype%. No more tests should be run after this in
+// the same instance.
+var iterator = map.keys();
+// iterator object --> %MapIteratorPrototype% --> %IteratorPrototype%
+iterator.__proto__.__proto__[Symbol.iterator] =
+ () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([1,2,3], [...set]);
+assertEquals([], [...set.entries()]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-11.js b/deps/v8/test/mjsunit/es6/map-iterator-11.js
new file mode 100644
index 0000000000..02eada9cc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-11.js
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+
+var iterator = map.keys();
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([1,2,3], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = map.values();
+assertEquals([2,3,4], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = map.keys();
+iterator.next();
+assertEquals([2,3], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = map.values();
+var iterator2 = map.values();
+
+map.delete(1);
+assertEquals([3,4], [...iterator]);
+assertEquals([], [...iterator]);
+
+map.set(1,5);
+assertEquals([3,4,5], [...iterator2]);
+assertEquals([], [...iterator2]);
+
+iterator = map.keys();
+map.set(4,6);
+assertEquals([2,3,1,4], [...iterator]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-2.js b/deps/v8/test/mjsunit/es6/map-iterator-2.js
new file mode 100644
index 0000000000..7adf058fb4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-2.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapPrototype%. No more tests should be run after this in the
+// same instance.
+map.__proto__[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-3.js b/deps/v8/test/mjsunit/es6/map-iterator-3.js
new file mode 100644
index 0000000000..ca0dc9cac2
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-3.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = map[Symbol.iterator]();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-4.js b/deps/v8/test/mjsunit/es6/map-iterator-4.js
new file mode 100644
index 0000000000..a43282a69c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-4.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = map.keys();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-5.js b/deps/v8/test/mjsunit/es6/map-iterator-5.js
new file mode 100644
index 0000000000..0af32b58ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-5.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = map.values();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-6.js b/deps/v8/test/mjsunit/es6/map-iterator-6.js
new file mode 100644
index 0000000000..6611e7aca0
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-6.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+var iterator = map.values();
+iterator.next = () => ({done: true});
+
+assertFalse(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-7.js b/deps/v8/test/mjsunit/es6/map-iterator-7.js
new file mode 100644
index 0000000000..b5a2345bd8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-7.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = map.entries();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%MapIteratorProtector());
+assertTrue(%SetIteratorProtector());
+assertEquals([], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-8.js b/deps/v8/test/mjsunit/es6/map-iterator-8.js
new file mode 100644
index 0000000000..01dacfb72e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-8.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+// This changes %MapIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = map.keys();
+iterator.__proto__[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+assertEquals([], [...iterator]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([1,2,3], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-9.js b/deps/v8/test/mjsunit/es6/map-iterator-9.js
new file mode 100644
index 0000000000..2db159d80e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-iterator-9.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+var iterator = map.keys();
+iterator[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
+assertEquals([], [...iterator]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([1,2,3], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/microtask-delivery.js b/deps/v8/test/mjsunit/es6/microtask-delivery.js
index 6b239bea47..96035164f2 100644
--- a/deps/v8/test/mjsunit/es6/microtask-delivery.js
+++ b/deps/v8/test/mjsunit/es6/microtask-delivery.js
@@ -40,7 +40,7 @@ function assertArrayValues(expected, actual) {
}
function assertOrdering(expected) {
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertArrayValues(expected, ordering);
}
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js b/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
index 94e143fa77..5f361f3a78 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
@@ -8,7 +8,7 @@ var buf = new ArrayBuffer(0x10000);
var arr = new Uint8Array(buf).fill(55);
var tmp = {};
tmp[Symbol.toPrimitive] = function () {
- %ArrayBufferNeuter(arr.buffer);
+ %ArrayBufferDetach(arr.buffer);
return 50;
}
arr.copyWithin(tmp);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-7706.js b/deps/v8/test/mjsunit/es6/regress/regress-7706.js
index 78ba4c9dee..7401a731d4 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-7706.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-7706.js
@@ -4,13 +4,37 @@
// Flags: --allow-natives-syntax
-class Test extends Number {}
-Test.prototype[Symbol.toStringTag] = "Test";
-
function toString(o) {
%ToFastProperties(o.__proto__);
return Object.prototype.toString.call(o);
}
-assertEquals("[object Test]", toString(new Test), "Try #1");
-assertEquals("[object Test]", toString(new Test), "Try #2");
+class TestNumber extends Number {}
+TestNumber.prototype[Symbol.toStringTag] = "TestNumber";
+assertEquals("[object TestNumber]", toString(new TestNumber), "Try #1");
+assertEquals("[object TestNumber]", toString(new TestNumber), "Try #2");
+
+class TestBoolean extends Boolean {}
+TestBoolean.prototype[Symbol.toStringTag] = "TestBoolean";
+assertEquals("[object TestBoolean]", toString(new TestBoolean), "Try #1");
+assertEquals("[object TestBoolean]", toString(new TestBoolean), "Try #2");
+
+class TestString extends String {}
+TestString.prototype[Symbol.toStringTag] = "TestString";
+assertEquals("[object TestString]", toString(new TestString), "Try #1");
+assertEquals("[object TestString]", toString(new TestString), "Try #2");
+
+class base {}
+class TestBigInt extends base {}
+TestBigInt.prototype[Symbol.toStringTag] = 'TestBigInt';
+var b = new TestBigInt();
+b.__proto__.__proto__ = BigInt.prototype;
+assertEquals("[object TestBigInt]", toString(b), "Try #1");
+assertEquals("[object TestBigInt]", toString(b), "Try #2");
+
+class TestSymbol extends base {}
+TestSymbol.prototype[Symbol.toStringTag] = 'TestSymbol';
+var sym = new TestSymbol();
+sym.__proto__.__proto__ = Symbol.prototype;
+assertEquals("[object TestSymbol]", toString(sym), "Try #1");
+assertEquals("[object TestSymbol]", toString(sym), "Try #2");
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr895860.js b/deps/v8/test/mjsunit/es6/regress/regress-cr895860.js
new file mode 100644
index 0000000000..1ecd1f7730
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr895860.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ var s = "f";
+
+ // 2^18 length, enough to ensure an array (of pointers) bigger than 500KB.
+ for (var i = 0; i < 18; i++) {
+ s += s;
+ }
+
+ var ss = [...s];
+})();
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-1.js b/deps/v8/test/mjsunit/es6/set-iterator-1.js
new file mode 100644
index 0000000000..2e4447de68
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-1.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertEquals([1,2,3], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+set[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-10.js b/deps/v8/test/mjsunit/es6/set-iterator-10.js
new file mode 100644
index 0000000000..ec094d20d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-10.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+// This changes %IteratorPrototype%. No more tests should be run after this in
+// the same instance.
+var iterator = set.keys();
+// iterator object --> %SetIteratorPrototype% --> %IteratorPrototype%
+iterator.__proto__.__proto__[Symbol.iterator] =
+ () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([], [...map.entries()]);
+assertEquals([], [...map.keys()]);
+assertEquals([], [...map.values()]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([], [...set.entries()]);
+assertEquals([1,2,3], [...set]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-11.js b/deps/v8/test/mjsunit/es6/set-iterator-11.js
new file mode 100644
index 0000000000..dff71c8d91
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-11.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var set = new Set([1,2,3]);
+
+var iterator = set.keys();
+
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = set.values();
+assertEquals([1,2,3], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = set.keys();
+iterator.next();
+assertEquals([2,3], [...iterator]);
+assertEquals([], [...iterator]);
+
+iterator = set.values();
+var iterator2 = set.values();
+
+set.delete(1);
+assertEquals([2,3], [...iterator]);
+
+set.add(4);
+assertEquals([2,3,4], [...iterator2]);
+
+iterator = set.keys();
+set.add(1);
+assertEquals([2,3,4,1], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-2.js b/deps/v8/test/mjsunit/es6/set-iterator-2.js
new file mode 100644
index 0000000000..b1fc6bbfea
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-2.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+// This changes %SetPrototype%. No more tests should be run after this in the
+// same instance.
+set.__proto__[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-3.js b/deps/v8/test/mjsunit/es6/set-iterator-3.js
new file mode 100644
index 0000000000..b727f3280c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-3.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+// This changes %SetIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = set[Symbol.iterator]();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([], [...set.entries()]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-4.js b/deps/v8/test/mjsunit/es6/set-iterator-4.js
new file mode 100644
index 0000000000..69a18893e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-4.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+// This changes %SetIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = set.keys();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([], [...set.entries()]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-5.js b/deps/v8/test/mjsunit/es6/set-iterator-5.js
new file mode 100644
index 0000000000..ec8a653b69
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-5.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+// This changes %SetIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = set.values();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([], [...set.entries()]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-6.js b/deps/v8/test/mjsunit/es6/set-iterator-6.js
new file mode 100644
index 0000000000..c5a2a7b09d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-6.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+var iterator = set.values();
+iterator.next = () => ({done: true});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([1,2,3], [...set]);
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-7.js b/deps/v8/test/mjsunit/es6/set-iterator-7.js
new file mode 100644
index 0000000000..a244b1e47f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-7.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+var set = new Set([1,2,3]);
+
+assertTrue(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+
+// This changes %SetIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = set.entries();
+iterator.__proto__.next = () => ({done: true});
+
+assertFalse(%SetIteratorProtector());
+assertTrue(%MapIteratorProtector());
+assertEquals([], [...set]);
+assertEquals([], [...set.entries()]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-8.js b/deps/v8/test/mjsunit/es6/set-iterator-8.js
new file mode 100644
index 0000000000..2328a7b737
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-8.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+// This changes %SetIteratorPrototype%. No more tests should be run after this
+// in the same instance.
+var iterator = set.keys();
+iterator.__proto__[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([], [...set.entries()]);
+assertEquals([1,2,3], [...set]);
+assertEquals([], [...set.keys()]);
+assertEquals([], [...set.values()]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-9.js b/deps/v8/test/mjsunit/es6/set-iterator-9.js
new file mode 100644
index 0000000000..42cbf3077a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-iterator-9.js
@@ -0,0 +1,31 @@
+
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+// This tests the interaction between the MapIterator protector and SetIterator
+// protector.
+
+var map = new Map([[1,2], [2,3], [3,4]]);
+assertTrue(%MapIteratorProtector());
+
+var set = new Set([1,2,3]);
+assertTrue(%SetIteratorProtector());
+
+var iterator = set.keys();
+iterator[Symbol.iterator] = () => ({next: () => ({done: true})});
+
+assertFalse(%MapIteratorProtector());
+assertEquals([[1,2], [2,3], [3,4]], [...map]);
+assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
+assertEquals([1,2,3], [...map.keys()]);
+assertEquals([2,3,4], [...map.values()]);
+
+assertFalse(%SetIteratorProtector());
+assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
+assertEquals([1,2,3], [...set]);
+assertEquals([1,2,3], [...set.keys()]);
+assertEquals([1,2,3], [...set.values()]);
+assertEquals([], [...iterator]);
diff --git a/deps/v8/test/mjsunit/es6/string-iterator.js b/deps/v8/test/mjsunit/es6/string-iterator.js
index b63de36fc5..d47a03fbd6 100644
--- a/deps/v8/test/mjsunit/es6/string-iterator.js
+++ b/deps/v8/test/mjsunit/es6/string-iterator.js
@@ -100,3 +100,12 @@ function TestSlicedStringRegression() {
var iterator = sliced_string[Symbol.iterator]();
}
TestSlicedStringRegression();
+
+
+(function(){
+ var str = "\uD83C\uDF1F\u5FCD\u8005\u306E\u653B\u6483\uD83C\uDF1F";
+ var arr = [...str];
+ assertEquals(["\uD83C\uDF1F", "\u5FCD", "\u8005", "\u306E", "\u653B",
+ "\u6483", "\uD83C\uDF1F"], arr);
+ assertEquals(7, arr.length);
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
index 0a55fccf5c..e6be924d8e 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
@@ -194,7 +194,7 @@ tests.push(function TestFromTypedArraySpecies(constr) {
assertEquals(1, constructor_read);
});
-tests.push(function TestFromTypedArraySpeciesNeutersBuffer(constr) {
+tests.push(function TestFromTypedArraySpeciesDetachsBuffer(constr) {
var b = new ArrayBuffer(16);
var a1 = new constr(b);
@@ -203,7 +203,7 @@ tests.push(function TestFromTypedArraySpeciesNeutersBuffer(constr) {
Object.defineProperty(b, 'constructor', {
get: function() {
- %ArrayBufferNeuter(b);
+ %ArrayBufferDetach(b);
return cons;
}
});
diff --git a/deps/v8/test/mjsunit/es6/typedarray-copywithin.js b/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
index c52a38625b..e60c88f6e7 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
@@ -232,13 +232,13 @@ CheckEachTypedArray(function parametersNotCalledIfDetached(constructor) {
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.copyWithin(tmp, tmp, tmp), TypeError);
assertEquals(0, array.length, "array.[[ViewedArrayBuffer]] is detached");
diff --git a/deps/v8/test/mjsunit/es6/typedarray-every.js b/deps/v8/test/mjsunit/es6/typedarray-every.js
index 968078988f..feb7cc90cc 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-every.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-every.js
@@ -15,7 +15,7 @@ var typedArrayConstructors = [
Float32Array,
Float64Array];
-function CheckTypedArrayIsNeutered(array) {
+function CheckTypedArrayIsDetached(array) {
assertEquals(0, array.byteLength);
assertEquals(0, array.byteOffset);
assertEquals(0, array.length);
@@ -81,21 +81,21 @@ function TestTypedArrayForEach(constructor) {
CheckWrapping(3.14, Number);
CheckWrapping({}, Object);
- // Neutering the buffer backing the typed array mid-way should
+ // Detaching the buffer backing the typed array mid-way should
// still make .forEach() finish, and the array should keep being
- // empty after neutering it.
+ // empty after detaching it.
count = 0;
a = new constructor(3);
result = a.every(function (n, index, array) {
- assertFalse(array[index] === undefined); // don't get here if neutered
- if (count > 0) %ArrayBufferNeuter(array.buffer);
+ assertFalse(array[index] === undefined); // don't get here if detached
+ if (count > 0) %ArrayBufferDetach(array.buffer);
array[index] = n + 1;
count++;
return count > 1 ? array[index] === undefined : true;
});
assertEquals(2, count);
assertEquals(true, result);
- CheckTypedArrayIsNeutered(a);
+ CheckTypedArrayIsDetached(a);
assertEquals(undefined, a[0]);
// Calling array.buffer midway can change the backing store.
@@ -161,7 +161,7 @@ function TestTypedArrayForEach(constructor) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.every(() => true), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-fill.js b/deps/v8/test/mjsunit/es6/typedarray-fill.js
index 9ed220373b..791b214734 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-fill.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-fill.js
@@ -74,12 +74,12 @@ for (var constructor of typedArrayConstructors) {
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.fill(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-filter.js b/deps/v8/test/mjsunit/es6/typedarray-filter.js
index 0f25c362ec..077016a26c 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-filter.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-filter.js
@@ -21,7 +21,7 @@ function TestTypedArrayFilter(constructor) {
// Throw type error if source array is detached while executing a callback
let ta1 = new constructor(10);
assertThrows(() =>
- ta1.filter(() => %ArrayBufferNeuter(ta1.buffer))
+ ta1.filter(() => %ArrayBufferDetach(ta1.buffer))
, TypeError);
// A new typed array should be created after finishing callbacks
diff --git a/deps/v8/test/mjsunit/es6/typedarray-find.js b/deps/v8/test/mjsunit/es6/typedarray-find.js
index 6f646e5c80..f33e4c5cb9 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-find.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-find.js
@@ -190,13 +190,13 @@ assertEquals(x, 4);
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
-%ArrayBufferNeuter(array.buffer);
+%ArrayBufferDetach(array.buffer);
assertThrows(() => array.find(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-findindex.js b/deps/v8/test/mjsunit/es6/typedarray-findindex.js
index 7447395e77..e31e17401a 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-findindex.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-findindex.js
@@ -190,11 +190,11 @@ assertEquals(x, 4);
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.findIndex(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-foreach.js b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
index 252706a9b5..81c212046a 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-foreach.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
@@ -15,7 +15,7 @@ var typedArrayConstructors = [
Float32Array,
Float64Array];
-function CheckTypedArrayIsNeutered(array) {
+function CheckTypedArrayIsDetached(array) {
assertEquals(0, array.byteLength);
assertEquals(0, array.byteOffset);
assertEquals(0, array.length);
@@ -84,7 +84,7 @@ function TestTypedArrayForEach(constructor) {
assertEquals(43, a[0]);
assertEquals(42, a[1]);
- // Neutering the buffer backing the typed array mid-way should
+ // Detaching the buffer backing the typed array mid-way should
// still make .forEach() finish, but exiting early due to the missing
// elements, and the array should keep being empty after detaching it.
// TODO(dehrenberg): According to the ES6 spec, accessing or testing
@@ -94,12 +94,12 @@ function TestTypedArrayForEach(constructor) {
a = new constructor(3);
count = 0;
a.forEach(function (n, index, array) {
- if (count > 0) %ArrayBufferNeuter(array.buffer);
+ if (count > 0) %ArrayBufferDetach(array.buffer);
array[index] = n + 1;
count++;
});
assertEquals(2, count);
- CheckTypedArrayIsNeutered(a);
+ CheckTypedArrayIsDetached(a);
assertEquals(undefined, a[0]);
// The method must work for typed arrays created from ArrayBuffer.
@@ -150,7 +150,7 @@ function TestTypedArrayForEach(constructor) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.forEach(() => true), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from-detached-typedarray.js b/deps/v8/test/mjsunit/es6/typedarray-from-detached-typedarray.js
new file mode 100644
index 0000000000..6850571bc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-from-detached-typedarray.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+for (constructor of typedArrayConstructors) {
+ var ta = new constructor(10);
+ %ArrayBufferDetach(ta.buffer);
+ assertThrows(() => constructor.from(ta), TypeError);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from-next-overridden.js b/deps/v8/test/mjsunit/es6/typedarray-from-next-overridden.js
new file mode 100644
index 0000000000..4d918a4a07
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-from-next-overridden.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+for (var constructor of typedArrayConstructors) {
+ let ta = new constructor([1, 2, 3]);
+ let it = ta[Symbol.iterator]();
+ let original_next = it.__proto__["next"];
+ Object.defineProperty(it.__proto__, "next", {
+ value: function() {
+ return {value: undefined, done: true};
+ },
+ configurable: true
+ });
+ assertEquals(0, constructor.from(ta).length);
+ Object.defineProperty(it.__proto__, "next", original_next);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from-nonfunction-iterator.js b/deps/v8/test/mjsunit/es6/typedarray-from-nonfunction-iterator.js
new file mode 100644
index 0000000000..ff7d7bc075
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-from-nonfunction-iterator.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let ta = new Uint8Array([1, 2, 3]);
+
+ta[Symbol.iterator] = 1;
+assertThrows(function() { Uint8Array.from(ta); }, TypeError);
+
+ta[Symbol.iterator] = "bad";
+assertThrows(function() { Uint8Array.from(ta); }, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from.js b/deps/v8/test/mjsunit/es6/typedarray-from.js
index 8157658249..4bc17ca260 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-from.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-from.js
@@ -208,7 +208,7 @@ for (var constructor of typedArrayConstructors) {
let ta2 = new constructor(3).fill(1);
Object.defineProperty(ta2, "length", {get: function() {
- %ArrayBufferNeuter(ta2.buffer);
+ %ArrayBufferDetach(ta2.buffer);
return 6;
}});
assertArrayLikeEquals(constructor.from(ta2), [d, d, d, d, d, d], constructor);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-indexing.js b/deps/v8/test/mjsunit/es6/typedarray-indexing.js
index d12a1570c2..cb88068d76 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-indexing.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-indexing.js
@@ -19,7 +19,7 @@ var typedArrayConstructors = [
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
@@ -65,7 +65,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.indexOf(tmp), TypeError);
// ----------------------------------------------------------------------
@@ -107,6 +107,6 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.lastIndexOf(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-iteration.js b/deps/v8/test/mjsunit/es6/typedarray-iteration.js
index b423ed0f04..30b09ab08a 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-iteration.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-iteration.js
@@ -82,7 +82,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.filter(() => false), TypeError);
})();
@@ -140,7 +140,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.map((v) => v), TypeError);
})();
@@ -204,7 +204,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.some((v) => false), TypeError);
})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-map.js b/deps/v8/test/mjsunit/es6/typedarray-map.js
index 54b535fd30..e8d97879b6 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-map.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-map.js
@@ -37,7 +37,7 @@ function TestTypedArrayMap(constructor) {
new DetachingArray(5).map(function(v,i,a){
print(i);
if (i == 1) {
- %ArrayBufferNeuter(target.buffer);
+ %ArrayBufferDetach(target.buffer);
}
})
}, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-neutered.js b/deps/v8/test/mjsunit/es6/typedarray-neutered.js
index f272f4f084..55a76cdc4d 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-neutered.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-neutered.js
@@ -4,8 +4,8 @@
// Flags: --allow-natives-syntax
-// Disable the neutering protector.
-%ArrayBufferNeuter(new ArrayBuffer(1024));
+// Disable the detaching protector.
+%ArrayBufferDetach(new ArrayBuffer(1024));
// ArrayBuffer
diff --git a/deps/v8/test/mjsunit/es6/typedarray-reduce.js b/deps/v8/test/mjsunit/es6/typedarray-reduce.js
index ba5d7f7a20..2746c57dff 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-reduce.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-reduce.js
@@ -254,13 +254,13 @@ for (var constructor of typedArrayConstructors) {
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.reduce(sum, tmp), TypeError);
assertThrows(() => array.reduceRight(sum, tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-reverse.js b/deps/v8/test/mjsunit/es6/typedarray-reverse.js
index bfeb227c5c..d7133718c3 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-reverse.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-reverse.js
@@ -57,7 +57,7 @@ for (var constructor of arrayConstructors) {
// Detached Operation
if (constructor != ArrayMaker) {
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.reverse(), TypeError);
}
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-slice.js b/deps/v8/test/mjsunit/es6/typedarray-slice.js
index 2f40fe5425..bd89f86e74 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-slice.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-slice.js
@@ -73,12 +73,12 @@ for (var constructor of typedArrayConstructors) {
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 0;
}
};
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.slice(tmp, tmp), TypeError);
// Check that the species array must be a typed array
diff --git a/deps/v8/test/mjsunit/es6/typedarray-sort.js b/deps/v8/test/mjsunit/es6/typedarray-sort.js
index e2618ade6b..c5c4ff079a 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-sort.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-sort.js
@@ -65,7 +65,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.sort(), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-tostring.js b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
index a1fa9c7665..16c6319b7a 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-tostring.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
@@ -101,7 +101,7 @@ for (var constructor of typedArrayConstructors) {
// Detached Operation
var array = new constructor([1, 2, 3]);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertThrows(() => array.join(), TypeError);
assertThrows(() => array.toLocalString(), TypeError);
assertThrows(() => array.toString(), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 02bd91c1e5..aab12341ac 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -636,7 +636,7 @@ function TestTypedArraySet() {
var detached = false;
evilarr[1] = {
[Symbol.toPrimitive]() {
- %ArrayBufferNeuter(a111.buffer);
+ %ArrayBufferDetach(a111.buffer);
detached = true;
return 1;
}
@@ -648,7 +648,7 @@ function TestTypedArraySet() {
var tmp = {
[Symbol.toPrimitive]() {
assertUnreachable("Parameter should not be processed when " +
- "array.[[ViewedArrayBuffer]] is neutered.");
+ "array.[[ViewedArrayBuffer]] is detached.");
return 1;
}
};
@@ -662,7 +662,7 @@ function TestTypedArraySet() {
let detached = false;
const offset = {
[Symbol.toPrimitive]() {
- %ArrayBufferNeuter(xs.buffer);
+ %ArrayBufferDetach(xs.buffer);
detached = true;
return 0;
}
@@ -677,7 +677,7 @@ function TestTypedArraySet() {
for (const klass of typedArrayConstructors) {
const a = new klass(2);
for (let i = 0; i < a.length; i++) a[i] = i;
- %ArrayBufferNeuter(a.buffer);
+ %ArrayBufferDetach(a.buffer);
const b = new klass(2);
assertThrows(() => b.set(a), TypeError);
@@ -1022,3 +1022,29 @@ assertThrows(function LargeSourceArray() {
a.set(v0);
});
+
+function TestMapCustomSpeciesConstructor(constructor) {
+ const sample = new constructor([40, 42, 42]);
+ let result, ctorThis;
+
+ sample.constructor = {};
+ sample.constructor[Symbol.species] = function(count) {
+ result = arguments;
+ ctorThis = this;
+ return new constructor(count);
+ };
+
+ sample.map(function(v) { return v; });
+
+ assertSame(result.length, 1, "called with 1 argument");
+ assertSame(result[0], 3, "[0] is the new captured length");
+
+ assertTrue(
+ ctorThis instanceof sample.constructor[Symbol.species],
+ "`this` value in the @@species fn is an instance of the function itself"
+ );
+};
+
+for(i = 0; i < typedArrayConstructors.length; i++) {
+ TestPropertyTypeChecks(typedArrayConstructors[i]);
+}
diff --git a/deps/v8/test/mjsunit/es6/unscopables.js b/deps/v8/test/mjsunit/es6/unscopables.js
index 782dd2d7a3..d60abc4b4e 100644
--- a/deps/v8/test/mjsunit/es6/unscopables.js
+++ b/deps/v8/test/mjsunit/es6/unscopables.js
@@ -96,7 +96,10 @@ function TestArrayPrototypeUnscopables() {
var fill = 'local fill';
var find = 'local find';
var findIndex = 'local findIndex';
+ var flat = 'local flat';
+ var flatMap = 'local flatMap';
var keys = 'local keys';
+ var includes = 'local includes';
var values = 'local values';
var array = [];
@@ -108,6 +111,9 @@ function TestArrayPrototypeUnscopables() {
assertEquals('local fill', fill);
assertEquals('local find', find);
assertEquals('local findIndex', findIndex);
+ assertEquals('local flat', flat);
+ assertEquals('local flatMap', flatMap);
+ assertEquals('local includes', includes);
assertEquals('local keys', keys);
assertEquals('local values', values);
assertEquals(42, toString);
diff --git a/deps/v8/test/mjsunit/es7/array-includes-receiver.js b/deps/v8/test/mjsunit/es7/array-includes-receiver.js
index edeae88631..91916aad54 100644
--- a/deps/v8/test/mjsunit/es7/array-includes-receiver.js
+++ b/deps/v8/test/mjsunit/es7/array-includes-receiver.js
@@ -356,7 +356,7 @@ var kTests = {
Detached_Int8Array() {
var array = new Int8Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -391,7 +391,7 @@ var kTests = {
Detached_Uint8Array() {
var array = new Uint8Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -421,7 +421,7 @@ var kTests = {
Detached_Uint8ClampedArray() {
var array = new Uint8ClampedArray(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -453,7 +453,7 @@ var kTests = {
Detached_Int16Array() {
var array = new Int16Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -485,7 +485,7 @@ var kTests = {
Detached_Uint16Array() {
var array = new Uint16Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -517,7 +517,7 @@ var kTests = {
Detached_Int32Array() {
var array = new Int32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -550,7 +550,7 @@ var kTests = {
Detached_Uint32Array() {
var array = new Uint32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -583,7 +583,7 @@ var kTests = {
Detached_Float32Array() {
var array = new Float32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
@@ -616,7 +616,7 @@ var kTests = {
Detached_Float64Array() {
var array = new Float32Array(10);
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
assertFalse(Array.prototype.includes.call(array, 0));
assertFalse(Array.prototype.includes.call(array, 0, 10));
},
diff --git a/deps/v8/test/mjsunit/es7/array-includes.js b/deps/v8/test/mjsunit/es7/array-includes.js
index 4170a1cf31..ac0f34106a 100644
--- a/deps/v8/test/mjsunit/es7/array-includes.js
+++ b/deps/v8/test/mjsunit/es7/array-includes.js
@@ -630,6 +630,34 @@
})();
+// Array.prototype.includes accesses out-of-bounds if length is changed late.
+(function () {
+ let arr = [1, 2, 3];
+ assertTrue(arr.includes(undefined, {
+ toString: function () {
+ arr.length = 0;
+ return 0;
+ }
+ }));
+
+ arr = [1, 2, 3];
+ assertFalse(arr.includes(undefined, {
+ toString: function () {
+ arr.length = 0;
+ return 10;
+ }
+ }));
+
+ arr = [1, 2, 3];
+ assertFalse(arr.includes(4, {
+ toString: function () {
+ arr.push(4);
+ return 0;
+ }
+ }));
+})();
+
+
// Array.prototype.includes should use the SameValueZero algorithm to compare
(function() {
assertTrue([1, 2, 3].includes(2));
diff --git a/deps/v8/test/mjsunit/es8/async-arrow-default-function-await.js b/deps/v8/test/mjsunit/es8/async-arrow-default-function-await.js
new file mode 100644
index 0000000000..6c4d00e61a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/async-arrow-default-function-await.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async(o = (function(await) {})) => 0
diff --git a/deps/v8/test/mjsunit/es8/async-arrow-lexical-arguments.js b/deps/v8/test/mjsunit/es8/async-arrow-lexical-arguments.js
index b29f17fce0..720770ef49 100644
--- a/deps/v8/test/mjsunit/es8/async-arrow-lexical-arguments.js
+++ b/deps/v8/test/mjsunit/es8/async-arrow-lexical-arguments.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es8/async-arrow-lexical-new.target.js b/deps/v8/test/mjsunit/es8/async-arrow-lexical-new.target.js
index 943267e9d8..7d016281f8 100644
--- a/deps/v8/test/mjsunit/es8/async-arrow-lexical-new.target.js
+++ b/deps/v8/test/mjsunit/es8/async-arrow-lexical-new.target.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es8/async-arrow-lexical-super.js b/deps/v8/test/mjsunit/es8/async-arrow-lexical-super.js
index b15a3834db..b175ac5ae1 100644
--- a/deps/v8/test/mjsunit/es8/async-arrow-lexical-super.js
+++ b/deps/v8/test/mjsunit/es8/async-arrow-lexical-super.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es8/async-arrow-lexical-this.js b/deps/v8/test/mjsunit/es8/async-arrow-lexical-this.js
index 38bdddc2c7..a21978d1a0 100644
--- a/deps/v8/test/mjsunit/es8/async-arrow-lexical-this.js
+++ b/deps/v8/test/mjsunit/es8/async-arrow-lexical-this.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es8/async-await-basic.js b/deps/v8/test/mjsunit/es8/async-await-basic.js
index 6e9ee02ffe..1c7b2ac601 100644
--- a/deps/v8/test/mjsunit/es8/async-await-basic.js
+++ b/deps/v8/test/mjsunit/es8/async-await-basic.js
@@ -23,7 +23,7 @@ function assertThrowsAsync(run, errorType, message) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!hadError) {
throw new MjsUnitAssertionError(
@@ -57,7 +57,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
@@ -402,7 +402,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "a", "c"], log);
}
@@ -416,7 +416,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "a", "c"], log);
}
@@ -430,7 +430,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["a", "b", "c"], log);
}
@@ -445,7 +445,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "c", "a"], log);
}
@@ -459,7 +459,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "a", "c"], log);
}
@@ -473,7 +473,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["a", "b", "c"], log);
}
@@ -488,7 +488,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "a", "c"], log);
}
@@ -502,7 +502,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "a", "c"], log);
}
@@ -516,7 +516,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["a", "b", "c"], log);
}
@@ -531,7 +531,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "c", "a"], log);
}
@@ -545,7 +545,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["b", "c", "a"], log);
}
@@ -559,7 +559,7 @@ assertDoesNotThrow(gaga);
}
}
foo().then(() => log.push("c"));
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(["a", "b", "c"], log);
}
@@ -585,7 +585,7 @@ assertDoesNotThrow(gaga);
var ans;
f2().then(x => ans = x).catch(e => ans = e);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals([0], ans);
}
diff --git a/deps/v8/test/mjsunit/es8/async-await-no-constructor.js b/deps/v8/test/mjsunit/es8/async-await-no-constructor.js
index e954e2ac57..5e4780a6ef 100644
--- a/deps/v8/test/mjsunit/es8/async-await-no-constructor.js
+++ b/deps/v8/test/mjsunit/es8/async-await-no-constructor.js
@@ -23,5 +23,5 @@ async function bar() {
foo();
bar();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(2, count);
diff --git a/deps/v8/test/mjsunit/es8/async-await-species.js b/deps/v8/test/mjsunit/es8/async-await-species.js
index b3e925433a..3fc46fd230 100644
--- a/deps/v8/test/mjsunit/es8/async-await-species.js
+++ b/deps/v8/test/mjsunit/es8/async-await-species.js
@@ -21,7 +21,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es8/async-destructuring.js b/deps/v8/test/mjsunit/es8/async-destructuring.js
index 1fbac5a072..d5e90eb057 100644
--- a/deps/v8/test/mjsunit/es8/async-destructuring.js
+++ b/deps/v8/test/mjsunit/es8/async-destructuring.js
@@ -21,7 +21,7 @@ function assertThrowsAsync(run, errorType, message) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!hadError) {
throw new MjsUnitAssertionError(
@@ -55,7 +55,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
@@ -77,7 +77,7 @@ function assertEqualsAsync(expected, run, msg) {
assertEquals(1, y);
assertEquals(1, z);
assertEquals(0, w);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(1, y);
assertEquals(1, z);
assertEquals(1, w);
diff --git a/deps/v8/test/mjsunit/es8/async-function-stacktrace.js b/deps/v8/test/mjsunit/es8/async-function-stacktrace.js
index ab6dd2633b..e9358c2679 100644
--- a/deps/v8/test/mjsunit/es8/async-function-stacktrace.js
+++ b/deps/v8/test/mjsunit/es8/async-function-stacktrace.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --noasync-stack-traces
+
async function test(func, funcs) {
try {
await func();
@@ -81,21 +83,19 @@ async function runTests() {
try { await reject(); } catch (e) { throw new Error("FAIL"); }
} }).c4, ["c4"]);
- // TODO(caitp): We should infer anonymous async functions as the empty
- // string, not as the name of a function they're passed as a parameter to.
await test(async x => { throw new Error("FAIL") },
- ["test", "test", "runTests"]);
+ ["test", "runTests"]);
await test(async() => { throw new Error("FAIL") },
- ["test", "test", "runTests"]);
+ ["test", "runTests"]);
await test(async(a) => { throw new Error("FAIL") },
- ["test", "test", "runTests"]);
+ ["test", "runTests"]);
await test(async(a, b) => { throw new Error("FAIL") },
- ["test", "test", "runTests"]);
+ ["test", "runTests"]);
- await test(async x => { await 1; throw new Error("FAIL") }, ["test"]);
- await test(async() => { await 1; throw new Error("FAIL") }, ["test"]);
- await test(async(a) => { await 1; throw new Error("FAIL") }, ["test"]);
- await test(async(a, b) => { await 1; throw new Error("FAIL") }, ["test"]);
+ await test(async x => { await 1; throw new Error("FAIL") }, []);
+ await test(async() => { await 1; throw new Error("FAIL") }, []);
+ await test(async(a) => { await 1; throw new Error("FAIL") }, []);
+ await test(async(a, b) => { await 1; throw new Error("FAIL") }, []);
await test(async x => {
await 1;
@@ -104,7 +104,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async() => {
await 1;
@@ -113,7 +113,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async(a) => {
await 1;
@@ -122,7 +122,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async(a, b) => {
await 1;
@@ -131,7 +131,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async x => {
await 1;
@@ -140,7 +140,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async() => {
await 1;
@@ -149,7 +149,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async(a) => {
await 1;
@@ -158,7 +158,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
await test(async(a, b) => {
await 1;
@@ -167,7 +167,7 @@ async function runTests() {
} catch (e) {
throw new Error("FAIL");
}
- }, ["test"]);
+ }, []);
}
runTests().catch(e => {
diff --git a/deps/v8/test/mjsunit/es8/async-function-try-finally.js b/deps/v8/test/mjsunit/es8/async-function-try-finally.js
index 9ba07eb427..43badc480a 100644
--- a/deps/v8/test/mjsunit/es8/async-function-try-finally.js
+++ b/deps/v8/test/mjsunit/es8/async-function-try-finally.js
@@ -21,7 +21,7 @@ function assertThrowsAsync(run, errorType, message) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!hadError) {
throw new MjsUnitAssertionError(
@@ -55,7 +55,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-904167.js b/deps/v8/test/mjsunit/es9/regress/regress-904167.js
new file mode 100644
index 0000000000..8986972a8f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-904167.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Previously, spreading in-object properties would always treat double fields
+// as tagged, potentially dereferencing a Float64.
+
+// Ensure that we don't fail an assert from --verify-heap when cloning a
+// MutableHeapNumber in the CloneObjectIC handler case.
+var src, clone;
+for (var i = 0; i < 40000; i++) {
+ src = { ...i, x: -9007199254740991 };
+ clone = { ...src };
+}
diff --git a/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js b/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js
new file mode 100644
index 0000000000..ee0cda66d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/for-of-in-catch-duplicate-decl.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("try { } catch (e) { var e; for (var e of []) {} }")
diff --git a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
index 2d6be098a2..e9dfe5d322 100644
--- a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
+++ b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
@@ -24,7 +24,7 @@ function assertThrowsAsync(run, errorType, message) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!hadError) {
throw new MjsUnitAssertionError(
@@ -185,7 +185,7 @@ class MyError extends Error {};
testFailure = error;
});
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
if (testFailed) {
throw testFailure;
}
@@ -619,7 +619,7 @@ if (testFailed) {
testFailure = error;
});
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
if (testFailed) {
throw testFailure;
}
@@ -663,7 +663,7 @@ if (testFailed) {
// Cycle through `f` to extract iterator methods
f().catch(function() { %AbortJS("No error should have occurred"); });
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assertEquals(typeof extractedNext, "function");
assertThrowsAsync(() => extractedNext.call(undefined), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-basic.js b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
index cf21d9a8c8..5ff7d25eea 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-basic.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
@@ -21,7 +21,7 @@ function assertThrowsAsync(run, errorType, message) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (!hadError) {
throw new MjsUnitAssertionError(
@@ -55,7 +55,7 @@ function assertEqualsAsync(expected, run, msg) {
assertFalse(hadValue || hadError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (hadError) throw actual;
@@ -448,7 +448,7 @@ async function* asyncGeneratorForNestedResumeNext() {
}
it = asyncGeneratorForNestedResumeNext();
it.next().then(logIterResult, AbortUnreachable);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "rootbeer", done: false },
{ value: "float", done: false },
@@ -464,7 +464,7 @@ let asyncGeneratorExprForNestedResumeNext = async function*() {
};
it = asyncGeneratorExprForNestedResumeNext();
it.next().then(logIterResult, AbortUnreachable);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "first", done: false },
{ value: "second", done: false },
@@ -482,7 +482,7 @@ let asyncGeneratorMethodForNestedResumeNext = ({
}).method;
it = asyncGeneratorMethodForNestedResumeNext();
it.next().then(logIterResult, AbortUnreachable);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "remember", done: false },
{ value: "the cant!", done: false },
@@ -498,7 +498,7 @@ let asyncGeneratorCallEvalForNestedResumeNext =
yield await Resolver("rainbow!");`);
it = asyncGeneratorCallEvalForNestedResumeNext();
it.next().then(logIterResult, AbortUnreachable);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "reading", done: false },
{ value: "rainbow!", done: false },
@@ -514,7 +514,7 @@ let asyncGeneratorNewEvalForNestedResumeNext =
yield await Resolver("BB!");`);
it = asyncGeneratorNewEvalForNestedResumeNext();
it.next().then(logIterResult, AbortUnreachable);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: 731, done: false },
{ value: "BB!", done: false },
@@ -536,7 +536,7 @@ async function* asyncGeneratorForNestedResumeThrow() {
}
it = asyncGeneratorForNestedResumeThrow();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
"throw1",
"throw2",
@@ -556,7 +556,7 @@ let asyncGeneratorExprForNestedResumeThrow = async function*() {
};
it = asyncGeneratorExprForNestedResumeThrow();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
"throw3",
"throw4",
@@ -578,7 +578,7 @@ let asyncGeneratorMethodForNestedResumeThrow = ({
}).method;
it = asyncGeneratorMethodForNestedResumeThrow();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
"throw5",
"throw6",
@@ -598,7 +598,7 @@ let asyncGeneratorCallEvalForNestedResumeThrow =
AbortUnreachable();`);
it = asyncGeneratorCallEvalForNestedResumeThrow();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
"throw7",
"throw8",
@@ -618,7 +618,7 @@ let asyncGeneratorNewEvalForNestedResumeThrow =
AbortUnreachable();`);
it = asyncGeneratorNewEvalForNestedResumeThrow();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
"throw9",
"throw10",
@@ -636,7 +636,7 @@ async function* asyncGeneratorForNestedResumeReturn() {
}
it = asyncGeneratorForNestedResumeReturn();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "step1", done: false },
{ value: "step2", done: true },
@@ -651,7 +651,7 @@ let asyncGeneratorExprForNestedResumeReturn = async function*() {
};
it = asyncGeneratorExprForNestedResumeReturn();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "step3", done: false },
{ value: "step4", done: true },
@@ -668,7 +668,7 @@ let asyncGeneratorMethodForNestedResumeReturn = ({
}).method;
it = asyncGeneratorMethodForNestedResumeReturn();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "step5", done: false },
{ value: "step6", done: true },
@@ -683,7 +683,7 @@ let asyncGeneratorCallEvalForNestedResumeReturn =
yield "step7";`);
it = asyncGeneratorCallEvalForNestedResumeReturn();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "step7", done: false },
{ value: "step8", done: true },
@@ -698,7 +698,7 @@ let asyncGeneratorNewEvalForNestedResumeReturn =
yield "step9";`);
it = asyncGeneratorNewEvalForNestedResumeReturn();
it.next().then(logIterResult, logError);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals([
{ value: "step9", done: false },
{ value: "step10", done: true },
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
index 154a0929e5..ef48cadcc6 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -145,6 +145,10 @@
assertEquals(-4n, BigInt.asIntN(3, "12"));
assertEquals(0x123456789abcdefn,
BigInt.asIntN(64, 0xabcdef0123456789abcdefn));
+}{
+ // Regression test for crbug.com/v8/8426.
+ assertEquals(-0x8000000000000000n,
+ BigInt.asIntN(64, -0x8000000000000000n));
}
// BigInt.asUintN
diff --git a/deps/v8/test/mjsunit/harmony/bigint/misc.js b/deps/v8/test/mjsunit/harmony/bigint/misc.js
new file mode 100644
index 0000000000..1a11547353
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/misc.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Create BigInt in large object space for which MakeImmutable reduces the
+// length.
+const x = 2n ** (2n ** 22n);
+assertEquals(1n, x - (x - 1n));
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regressions.js b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
index 8e13622eab..b4a55c1ffd 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/regressions.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/regressions.js
@@ -19,8 +19,8 @@ assertTrue(0n === 0n);
// crbug.com/818277: Must throw without DCHECK failures.
// In order to run acceptably fast in Debug mode, this test assumes that
-// we allow at least 2 billion bits in a BigInt.
-var close_to_limit = 2n ** 2000000000n;
+// we allow at least 1 billion bits in a BigInt.
+var close_to_limit = 2n ** 1000000000n;
assertThrows(() => close_to_limit ** 100n, RangeError);
// Check boundary conditions of the power-of-two fast path.
diff --git a/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js b/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js
deleted file mode 100644
index cb80d246bc..0000000000
--- a/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions --nolazy
-
-function hoist_unique_do_var() {
- var f = (y = do { var unique = 3 }) => unique;
- assertEquals(3, f());
- assertThrows(() => unique, ReferenceError);
-}
-hoist_unique_do_var();
-
-function hoist_duplicate_do_var() {
- var duplicate = 100;
- var f = (y = do { var duplicate = 3 }) => duplicate;
- assertEquals(3, f());
- // TODO(verwaest): The {duplicate} declarations were invalidly merged.
- assertEquals(3, duplicate);
-}
-hoist_duplicate_do_var();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions-arrow-param-scope.js b/deps/v8/test/mjsunit/harmony/do-expressions-arrow-param-scope.js
deleted file mode 100644
index f5d5097995..0000000000
--- a/deps/v8/test/mjsunit/harmony/do-expressions-arrow-param-scope.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions
-
-(function testBasic() {
- let f = (x = eval("var z = 42; z"), y = do { 43 }) => x + y;
- assertEquals(85, f());
-})();
-
-(function testReturnParam() {
- let f = (x = eval("var z = 42; z"), y = do { x }) => x + y;
- assertEquals(84, f());
-})();
-
-(function testCaptureParam() {
- let f = (x = eval("var z = 42; z"), y = do { () => x }) => x + y();
- assertEquals(84, f());
-})();
-
-(function testScoped() {
- let f = (x = eval("var z = 42; z"), y = do { let z; x }) => x + y;
- assertEquals(84, f());
-})();
-
-(function testCaptureScoped() {
- let f = (x = eval("var z = 42; z"), y = do { let z; () => x }) => x + y();
- assertEquals(84, f());
-})();
-
-(function testCaptureOuter() {
- let z = 44;
- let f = (x = eval("var z = 42; z"), y = do { () => z }) => x + y();
- assertEquals(86, f())
-})();
-
-(function testCaptureOuterScoped() {
- let z = 44;
- let f = (x = eval("var z = 42; z"), y = do { let q; () => z }) => x + y();
- assertEquals(86, f())
-})();
-
-(function testWith() {
- let f = (x = eval("var z = 42; z"),
- y = do {
- with ({foo: "bar"}) {
- () => x }
- }) => x + y();
- assertEquals(84, f())
-})();
-
-(function testTry() {
- let f = (x = eval("var z = 42; z"),
- y = do {
- try { () => x }
- catch (e) { }
- }) => x + y();
- assertEquals(84, f())
-})();
-
-(function testCatch() {
- let f = (x = eval("var z = 42; z"),
- y = do {
- try { throw 42 }
- catch (e) { () => x }
- }) => x + y();
- assertEquals(84, f())
-})();
-
-(function testFinally() {
- let z = 44;
- let q;
- let f = (x = eval("var z = 42; z"),
- y = do {
- try { }
- catch (e) { }
- finally { q = () => z }
- q;
- }) => x + y();
- assertEquals(86, f())
-})();
-
-(function testFinallyThrow() {
- let z = 44;
- let q;
- let f = (x = eval("var z = 42; z"),
- y = do {
- try { throw 42; }
- catch (e) { }
- finally { q = () => z }
- q;
- }) => x + y();
- assertEquals(86, f())
-})();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions-control.js b/deps/v8/test/mjsunit/harmony/do-expressions-control.js
deleted file mode 100644
index 12c54295cc..0000000000
--- a/deps/v8/test/mjsunit/harmony/do-expressions-control.js
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-do-expressions
-
-(function TestDoForInDoBreak() {
- function f(o, i) {
- var a = "result@" + do {
- var r = "(";
- for (var x in o) {
- var b = "end@" + do {
- if (x == i) { break } else { r += o[x]; x }
- }
- }
- r + ")";
- }
- return a + "," + b;
- }
- assertEquals("result@(3),end@0", f([3], 2));
- assertEquals("result@(35),end@1", f([3,5], 2));
- assertEquals("result@(35),end@1", f([3,5,7], 2));
- assertEquals("result@(35),end@1", f([3,5,7,9], 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("result@(3),end@0", f([3], 2));
- assertEquals("result@(35),end@1", f([3,5], 2));
- assertEquals("result@(35),end@1", f([3,5,7], 2));
- assertEquals("result@(35),end@1", f([3,5,7,9], 2));
-})();
-
-(function TestDoForInDoContinue() {
- function f(o, i) {
- var a = "result@" + do {
- var r = "("
- for (var x in o) {
- var b = "end@" + do {
- if (x == i) { continue } else { r += o[x]; x }
- }
- }
- r + ")"
- }
- return a + "," + b
- }
- assertEquals("result@(3),end@0", f([3], 2));
- assertEquals("result@(35),end@1", f([3,5], 2));
- assertEquals("result@(35),end@1", f([3,5,7], 2));
- assertEquals("result@(359),end@3", f([3,5,7,9], 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("result@(3),end@0", f([3], 2));
- assertEquals("result@(35),end@1", f([3,5], 2));
- assertEquals("result@(35),end@1", f([3,5,7], 2));
- assertEquals("result@(359),end@3", f([3,5,7,9], 2));
-})();
-
-(function TestDoForNestedWithTargetLabels() {
- function f(mode) {
- var loop = true;
- var head = "<";
- var tail = ">";
- var middle =
- "1" + do { loop1: for(; loop; head += "A") {
- "2" + do { loop2: for(; loop; head += "B") {
- "3" + do { loop3: for(; loop; head += "C") {
- "4" + do { loop4: for(; loop; head += "D") {
- "5" + do { loop5: for(; loop; head += "E") {
- "6" + do { loop6: for(; loop; head += "F") {
- loop = false;
- switch (mode) {
- case "b1": break loop1;
- case "b2": break loop2;
- case "b3": break loop3;
- case "b4": break loop4;
- case "b5": break loop5;
- case "b6": break loop6;
- case "c1": continue loop1;
- case "c2": continue loop2;
- case "c3": continue loop3;
- case "c4": continue loop4;
- case "c5": continue loop5;
- case "c6": continue loop6;
- default: "7";
- }
- }}
- }}
- }}
- }}
- }}
- }}
- return head + middle + tail;
- }
- function test() {
- assertEquals( "<1undefined>", f("b1"));
- assertEquals( "<A1undefined>", f("c1"));
- assertEquals( "<A12undefined>", f("b2"));
- assertEquals( "<BA12undefined>", f("c2"));
- assertEquals( "<BA123undefined>", f("b3"));
- assertEquals( "<CBA123undefined>", f("c3"));
- assertEquals( "<CBA1234undefined>", f("b4"));
- assertEquals( "<DCBA1234undefined>", f("c4"));
- assertEquals( "<DCBA12345undefined>", f("b5"));
- assertEquals( "<EDCBA12345undefined>", f("c5"));
- assertEquals( "<EDCBA123456undefined>", f("b6"));
- assertEquals("<FEDCBA123456undefined>", f("c6"));
- assertEquals("<FEDCBA1234567>", f("xx"));
- }
- test();
- %OptimizeFunctionOnNextCall(f);
- test();
-})();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
deleted file mode 100644
index f66c71eb83..0000000000
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions --allow-natives-syntax --no-always-opt --opt
-
-function returnValue(v) { return v; }
-function MyError() {}
-var global = this;
-
-function TestBasic() {
- // Looping and lexical declarations
- assertEquals(512, returnValue(do {
- let n = 2;
- for (let i = 0; i < 4; i++) n <<= 2;
- }));
-
- // Strings do the right thing
- assertEquals("spooky halloween", returnValue(do {
- "happy halloween".replace('happy', 'spooky');
- }));
-
- // Do expressions with no completion produce an undefined value
- assertEquals(undefined, returnValue(do {}));
- assertEquals(undefined, returnValue(do { var x = 99; }));
- assertEquals(undefined, returnValue(do { function f() {}; }));
- assertEquals(undefined, returnValue(do { let z = 33; }));
-
- // Propagation of exception
- assertThrows(function() {
- (do {
- throw new MyError();
- "potatoes";
- });
- }, MyError);
-
- assertThrows(function() {
- return do {
- throw new MyError();
- "potatoes";
- };
- }, MyError);
-
- // Return value within do-block overrides `return |do-expression|`
- assertEquals("inner-return", (function() {
- return "outer-return" + do {
- return "inner-return";
- "";
- };
- })());
-
- var count = 0, n = 1;
- // Breaking out |do-expression|
- assertEquals(3, (function() {
- for (var i = 0; i < 10; ++i) (count += 2 * do { if (i === 3) break; ++n });
- return i;
- })());
- // (2 * 2) + (2 * 3) + (2 * 4)
- assertEquals(18, count);
-
- // Continue in |do-expression|
- count = 0, n = 1;
- assertEquals([1, 3, 5, 7, 9], (function() {
- var values = [];
- for (var i = 0; i < 10; ++i) {
- count += 2 * (do {
- if ((i & 1) === 0) continue;
- values.push(i);
- ++n;
- }) + 1;
- }
- // (2*2) + 1 + (2*3) + 1 + (2*4) + 1 + (2*5) + 1 + (2*6) + 1
- return values;
- })());
- assertEquals(count, 45);
-
- assertThrows("(do { break; });", SyntaxError);
- assertThrows("(do { continue; });", SyntaxError);
-
- // Real-world use case for desugaring
- var array = [1, 2, 3, 4, 5], iterable = [6, 7, 8,9];
- assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], do {
- for (var element of iterable) array.push(element);
- array;
- });
-
- // Nested do-expressions
- assertEquals(125, do { (do { (do { 5 * 5 * 5 }) }) });
-
- // Directives are not honoured
- (do {
- "use strict";
- foo = 80;
- assertEquals(foo, 80);
- });
-
- // Non-empty operand stack testing
- var O = {
- method1() {
- let x = 256;
- return x + do {
- for (var i = 0; i < 4; ++i) x += i;
- } + 17;
- },
- method2() {
- let x = 256;
- this.reset();
- return x + do {
- for (var i = 0; i < this.length(); ++i) x += this.index() * 2;
- };
- },
- _index: 0,
- index() {
- return ++this._index;
- },
- _length: 4,
- length() { return this._length; },
- reset() { this._index = 0; }
- };
- assertEquals(535, O["method" + do { 1 } + ""]());
- assertEquals(532, O["method" + do { ({ valueOf() { return "2"; } }); }]());
- assertEquals(532, O[
- do { let s = ""; for (let c of "method") s += c; } + "2"]());
-}
-TestBasic();
-
-
-function TestDeoptimization1() {
- function f(v) {
- return 88 + do {
- v.a * v.b + v.c;
- };
- }
-
- var o1 = {};
- o1.a = 10;
- o1.b = 5;
- o1.c = 50;
-
- var o2 = {};
- o2.c = 100;
- o2.a = 10;
- o2.b = 10;
-
- assertEquals(188, f(o1));
- assertEquals(188, f(o1));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(188, f(o1));
- assertOptimized(f);
- assertEquals(288, f(o2));
- assertUnoptimized(f);
- assertEquals(288, f(o2));
-}
-TestDeoptimization1();
-
-
-function TestInParameterInitializers() {
- var first_name = "George";
- var last_name = "Jetson";
- function fn1(name = do { first_name + " " + last_name }) {
- return name;
- }
- assertEquals("George Jetson", fn1());
-
- var _items = [1, 2, 3, NaN, 4, 5];
- function fn2(items = do {
- let items = [];
- for (var el of _items) {
- if (el !== el) {
- items;
- break;
- }
- items.push(el), items;
- }
- }) {
- return items;
- }
- assertEquals([1, 2, 3], fn2());
-
- function thrower() { throw new MyError(); }
- function fn3(exception = do { try { thrower(); } catch (e) { e } }) {
- return exception;
- }
- assertDoesNotThrow(fn3);
- assertInstanceof(fn3(), MyError);
-
- function fn4(exception = do { throw new MyError() }) {}
- function catcher(fn) {
- try {
- fn();
- assertUnreachable("fn() initializer should throw");
- } catch (e) {
- assertInstanceof(e, MyError);
- }
- }
- catcher(fn4);
-}
-TestInParameterInitializers();
-
-
-function TestWithEval() {
- (function sloppy1() {
- assertEquals(do { eval("var x = 5"), x }, 5);
- assertEquals(x, 5);
- })();
-
- assertThrows(function strict1() {
- "use strict";
- (do { eval("var x = 5"), x }, 5);
- }, ReferenceError);
-
- assertThrows(function strict2() {
- (do { eval("'use strict'; var x = 5"), x }, 5);
- }, ReferenceError);
-}
-TestWithEval();
-
-
-function TestHoisting() {
- (do { var a = 1; });
- assertEquals(a, 1);
- assertEquals(global.a, undefined);
-
- (do {
- for (let it of [1, 2, 3, 4, 5]) {
- var b = it;
- }
- });
- assertEquals(b, 5);
- assertEquals(global.b, undefined);
-
- {
- let x = 1
-
- // TODO(caitp): ensure VariableStatements in |do-expressions| in parameter
- // initializers, are evaluated in the same VariableEnvironment as they would
- // be for eval().
- // function f1(a = do { var x = 2 }, b = x) { return b }
- // assertEquals(1, f1())
-
- // function f2(a = x, b = do { var x = 2 }) { return a }
- // assertEquals(1, f2())
-
- function f3({a = do { var x = 2 }, b = x}) { return b }
- assertEquals(2, f3({}))
-
- function f4({a = x, b = do { var x = 2 }}) { return b }
- assertEquals(undefined, f4({}))
-
- function f5(a = do { var y = 0 }) {}
- assertThrows(() => y, ReferenceError)
- }
-
- // TODO(caitp): Always block-scope function declarations in |do| expressions
- //(do {
- // assertEquals(true, inner_func());
- // function inner_func() { return true; }
- //});
- //assertThrows(function() { return innerFunc(); }, ReferenceError);
-}
-TestHoisting();
-
-
-// v8:4661
-
-function tryFinallySimple() { (do { try {} finally {} }); }
-tryFinallySimple();
-tryFinallySimple();
-tryFinallySimple();
-tryFinallySimple();
-
-var finallyRanCount = 0;
-function tryFinallyDoExpr() {
- return (do {
- try {
- throw "BOO";
- } catch (e) {
- "Caught: " + e + " (" + finallyRanCount + ")"
- } finally {
- ++finallyRanCount;
- }
- });
-}
-assertEquals("Caught: BOO (0)", tryFinallyDoExpr());
-assertEquals(1, finallyRanCount);
-assertEquals("Caught: BOO (1)", tryFinallyDoExpr());
-assertEquals(2, finallyRanCount);
-assertEquals("Caught: BOO (2)", tryFinallyDoExpr());
-assertEquals(3, finallyRanCount);
-assertEquals("Caught: BOO (3)", tryFinallyDoExpr());
-assertEquals(4, finallyRanCount);
-
-
-function TestOSR() {
- var numbers = do {
- let nums = [];
- for (let i = 0; i < 1000; ++i) {
- let value = (Math.random() * 100) | 0;
- nums.push(value === 0 ? 1 : value), nums;
- }
- };
- assertEquals(numbers.length, 1000);
-}
-
-for (var i = 0; i < 64; ++i) TestOSR();
diff --git a/deps/v8/test/mjsunit/harmony/for-await-of.js b/deps/v8/test/mjsunit/harmony/for-await-of.js
index e23758a5e1..1b4fcd701a 100644
--- a/deps/v8/test/mjsunit/harmony/for-await-of.js
+++ b/deps/v8/test/mjsunit/harmony/for-await-of.js
@@ -1257,7 +1257,7 @@ let testFailure;
testFailure = error;
});
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
if (testFailed) {
throw testFailure;
diff --git a/deps/v8/test/mjsunit/harmony/generators.js b/deps/v8/test/mjsunit/harmony/generators.js
index a4fc1c4aa4..b98164c135 100644
--- a/deps/v8/test/mjsunit/harmony/generators.js
+++ b/deps/v8/test/mjsunit/harmony/generators.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-do-expressions --allow-natives-syntax
+// Flags: --allow-natives-syntax
function MaybeOptimizeOrDeoptimize(f) {
@@ -429,14 +429,6 @@ function Throw(generator, ...args) {
}
{
- function* foo() { yield 2; (do {yield 3}) + 42; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
-}
-
-{
function* foo() { yield 2; return (yield 3) + 42; yield 4 }
g = foo();
assertEquals({value: 2, done: false}, Next(g));
diff --git a/deps/v8/test/mjsunit/harmony/hashbang-eval.js b/deps/v8/test/mjsunit/harmony/hashbang-eval.js
new file mode 100644
index 0000000000..c5040f7bee
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/hashbang-eval.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-hashbang
+
+// Hashbang syntax is not allowed in eval.
+assertThrows("#!", SyntaxError);
+assertThrows("#!\n", SyntaxError);
+assertThrows("#!---IGNORED---", SyntaxError);
+assertThrows("#!---IGNORED---\n", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
index 3c99498d0e..49570b51de 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
@@ -7,7 +7,7 @@
var error1, error2;
import('modules-skip-12.js').catch(e => error1 = e);
import('modules-skip-12.js').catch(e => error2 = e);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
assertInstanceof(error1, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
index 3623091777..87dbc0a6aa 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
@@ -7,7 +7,7 @@
var error1, error2;
import('modules-skip-11.js').catch(e => error1 = e);
import('modules-skip-11.js').catch(e => error2 = e);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
assertEquals(typeof error1, "symbol");
diff --git a/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js b/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
index f3db881eb2..6d6510fcde 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
@@ -7,7 +7,7 @@
var error1, error2;
import('no-such-file').catch(e => error1 = e);
import('no-such-file').catch(e => error2 = e);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
assertEquals(typeof error1, "string");
diff --git a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
index 2a481d5965..2cdbaaea32 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
@@ -7,7 +7,7 @@
var error1, error2;
import('modules-skip-10.js').catch(e => error1 = e);
import('modules-skip-10.js').catch(e => error2 = e);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
assertInstanceof(error1, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-1.js b/deps/v8/test/mjsunit/harmony/modules-import-1.js
index 7fd567f56f..f62d4d7b32 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-1.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-1.js
@@ -7,6 +7,6 @@
var life;
import('modules-skip-1.js').then(namespace => life = namespace.life());
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-10.js b/deps/v8/test/mjsunit/harmony/modules-import-10.js
index 68e000a131..eda4aaf7f9 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-10.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-10.js
@@ -9,7 +9,7 @@ import('modules-skip-6.js').then(namespace => life = namespace.life);
assertEquals(undefined, Object.life);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, Object.life);
assertEquals("42", life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-11.js b/deps/v8/test/mjsunit/harmony/modules-import-11.js
index a5afa10048..ffba6a0722 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-11.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-11.js
@@ -18,5 +18,5 @@ async function test() {
}
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-12.js b/deps/v8/test/mjsunit/harmony/modules-import-12.js
index bcb8569221..d898c984ad 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-12.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-12.js
@@ -15,5 +15,5 @@ async function test() {
}
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-13.js b/deps/v8/test/mjsunit/harmony/modules-import-13.js
index 1cec1cce61..52518350ba 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-13.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-13.js
@@ -19,7 +19,7 @@ async function test1() {
}
test1();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
ran = false;
@@ -36,5 +36,5 @@ async function test2() {
}
test2();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-14.js b/deps/v8/test/mjsunit/harmony/modules-import-14.js
index 3849c54c59..32b307eb3b 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-14.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-14.js
@@ -22,5 +22,5 @@ async function test() {
}
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.js b/deps/v8/test/mjsunit/harmony/modules-import-15.js
index 32255ce980..d041add3db 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.js
@@ -17,7 +17,7 @@ async function test1() {
}
test1();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
ran = false;
@@ -37,7 +37,7 @@ async function test2() {
}
test2();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
ran = false;
@@ -53,5 +53,5 @@ async function test3() {
}
test3();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-16.js b/deps/v8/test/mjsunit/harmony/modules-import-16.js
index 94510d48fc..18ad445a84 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-16.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-16.js
@@ -12,7 +12,7 @@ var body = "import('modules-skip-1.js').then(ns => { x = ns.life();" +
var func = new Function(body);
func();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, x);
assertTrue(ran);
@@ -21,7 +21,7 @@ var body = "import('modules-skip-1.js').then(ns => { x = ns.life();" +
" ran = true;} ).catch(err => %AbortJS(err))"
eval("var func = new Function(body); func();");
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, x);
assertTrue(ran);
@@ -31,6 +31,6 @@ var body = "eval(import('modules-skip-1.js').then(ns => { x = ns.life();" +
var func = new Function(body);
func();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, x);
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-17.js b/deps/v8/test/mjsunit/harmony/modules-import-17.js
new file mode 100644
index 0000000000..606ebcd385
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-17.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-namespace-exports
+
+var ns;
+import('modules-skip-13.js').then(x => ns = x);
+%PerformMicrotaskCheckpoint();
+assertEquals(42, ns.default);
+assertEquals(ns, ns.self);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-2.js b/deps/v8/test/mjsunit/harmony/modules-import-2.js
index f50a5c8c53..a3fe0bc601 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-2.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-2.js
@@ -12,7 +12,7 @@ import('modules-skip-2.js').catch(err => msg = err.message);
assertEquals(undefined, life);
assertEquals(undefined, msg);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, life);
assertEquals('42 is not the answer', msg);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-3.js b/deps/v8/test/mjsunit/harmony/modules-import-3.js
index 669f820fd7..d8cbe2a228 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-3.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-3.js
@@ -17,6 +17,6 @@ async function foo () {
foo();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-5.js b/deps/v8/test/mjsunit/harmony/modules-import-5.js
index d9237828fe..c868a0c63f 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-5.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-5.js
@@ -9,5 +9,5 @@ let x = 'modules-skip-1.js';
import(x).then(namespace => life = namespace.life());
x = 'modules-skip-2.js';
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-6.js b/deps/v8/test/mjsunit/harmony/modules-import-6.js
index 6a5b7c8b5b..02fdf1b5fa 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-6.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-6.js
@@ -25,5 +25,5 @@ async function test() {
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-7.js b/deps/v8/test/mjsunit/harmony/modules-import-7.js
index d0105112f4..8df8ddcdb2 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-7.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-7.js
@@ -19,6 +19,6 @@ async function test() {
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-8.js b/deps/v8/test/mjsunit/harmony/modules-import-8.js
index 4417f0eb78..ac21a8c9e9 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-8.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-8.js
@@ -43,7 +43,7 @@ async function test1() {
test1();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
@@ -66,7 +66,7 @@ async function test2() {
test2();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
@@ -86,6 +86,6 @@ async function test3() {
test3();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-9.js b/deps/v8/test/mjsunit/harmony/modules-import-9.js
index 6794311305..664416f0eb 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-9.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-9.js
@@ -18,5 +18,5 @@ async function test() {
}
test();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-13.js b/deps/v8/test/mjsunit/harmony/modules-skip-13.js
new file mode 100644
index 0000000000..d823a283f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-13.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * as self from "./modules-skip-13.js";
+export default 42;
diff --git a/deps/v8/test/mjsunit/harmony/object-fromentries.js b/deps/v8/test/mjsunit/harmony/object-fromentries.js
new file mode 100644
index 0000000000..8bbd6317c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/object-fromentries.js
@@ -0,0 +1,439 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-from-entries
+
+const fromEntries = Object.fromEntries;
+const ObjectPrototype = Object.prototype;
+const ObjectPrototypeHasOwnProperty = ObjectPrototype.hasOwnProperty;
+function hasOwnProperty(O, Name) {
+ if (O === undefined || O === null) return false;
+ return ObjectPrototypeHasOwnProperty.call(O, Name);
+}
+
+let test = {
+ methodExists() {
+ assertTrue(hasOwnProperty(Object, "fromEntries"));
+ assertEquals("function", typeof Object.fromEntries);
+ },
+
+ methodLength() {
+ assertEquals(1, Object.fromEntries.length);
+ },
+
+ methodName() {
+ assertEquals("fromEntries", Object.fromEntries.name);
+ },
+
+ methodPropertyDescriptor() {
+ let descriptor = Object.getOwnPropertyDescriptor(Object, "fromEntries");
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+ assertTrue(descriptor.writable);
+ assertEquals(descriptor.value, Object.fromEntries);
+ },
+
+ exceptionIfNotCoercible() {
+ assertThrows(() => fromEntries(null), TypeError);
+ assertThrows(() => fromEntries(undefined), TypeError);
+ },
+
+ exceptionIfNotIterable() {
+ let nonIterable = [1, 2, 3, 4, 5];
+ Object.defineProperty(nonIterable, Symbol.iterator, { value: undefined });
+ assertThrows(() => fromEntries(nonIterable), TypeError);
+ },
+
+ exceptionIfGetIteratorThrows() {
+ let iterable = [1, 2, 3, 4, 5];
+ class ThrewDuringGet {};
+ Object.defineProperty(iterable, Symbol.iterator, {
+ get() { throw new ThrewDuringGet(); }
+ });
+ assertThrows(() => fromEntries(iterable), ThrewDuringGet);
+ },
+
+ exceptionIfCallIteratorThrows() {
+ let iterable = [1, 2, 3, 4, 5];
+ class ThrewDuringCall {};
+ iterable[Symbol.iterator] = function() {
+ throw new ThrewDuringCall();
+ }
+ assertThrows(() => fromEntries(iterable), ThrewDuringCall);
+ },
+
+ exceptionIfIteratorNextThrows() {
+ let iterable = [1, 2, 3, 4, 5];
+ class ThrewDuringIteratorNext {}
+ iterable[Symbol.iterator] = function() {
+ return {
+ next() { throw new ThrewDuringIteratorNext; },
+ return() {
+ throw new Error(
+ "IteratorClose must not be performed if IteratorStep throws");
+ },
+ }
+ }
+ assertThrows(() => fromEntries(iterable), ThrewDuringIteratorNext);
+ },
+
+ exceptionIfIteratorCompleteThrows() {
+ let iterable = [1, 2, 3, 4, 5];
+ class ThrewDuringIteratorComplete {}
+ iterable[Symbol.iterator] = function() {
+ return {
+ next() {
+ return {
+ get value() { throw new Error(
+ "IteratorValue must not be performed before IteratorComplete");
+ },
+ get done() {
+ throw new ThrewDuringIteratorComplete();
+ }
+ }
+ throw new ThrewDuringIteratorNext;
+ },
+ return() {
+ throw new Error(
+ "IteratorClose must not be performed if IteratorStep throws");
+ },
+ }
+ }
+ assertThrows(() => fromEntries(iterable), ThrewDuringIteratorComplete);
+ },
+
+ exceptionIfEntryIsNotObject() {
+ {
+ // Fast path (Objects/Smis)
+ let iterables = [[null], [undefined], [1], [NaN], [false], [Symbol()],
+ [""]];
+ for (let iterable of iterables) {
+ assertThrows(() => fromEntries(iterable), TypeError);
+ }
+ }
+ {
+ // Fast path (Doubles)
+ let iterable = [3.7, , , 3.6, 1.1, -0.4];
+ assertThrows(() => fromEntries(iterable), TypeError);
+ }
+ {
+ // Slow path
+ let i = 0;
+ let values = [null, undefined, 1, NaN, false, Symbol(), ""];
+ let iterable = {
+ [Symbol.iterator]() { return this; },
+ next() {
+ return {
+ done: i >= values.length,
+ value: values[i++],
+ }
+ },
+ };
+ for (let k = 0; k < values.length; ++k) {
+ assertThrows(() => fromEntries(iterable), TypeError);
+ }
+ assertEquals({}, fromEntries(iterable));
+ }
+ },
+
+ returnIfEntryIsNotObject() {
+ // Only observable/verifiable in the slow path :(
+ let i = 0;
+ let didCallReturn = false;
+ let values = [null, undefined, 1, NaN, false, Symbol(), ""];
+ let iterable = {
+ [Symbol.iterator]() { return this; },
+ next() {
+ return {
+ done: i >= values.length,
+ value: values[i++],
+ }
+ },
+ return() { didCallReturn = true; throw new Error("Unused!"); }
+ };
+ for (let k = 0; k < values.length; ++k) {
+ didCallReturn = false;
+ assertThrows(() => fromEntries(iterable), TypeError);
+ assertTrue(didCallReturn);
+ }
+ assertEquals({}, fromEntries(iterable));
+ },
+
+ returnIfEntryKeyAccessorThrows() {
+ class ThrewDuringKeyAccessor {};
+ let entries = [{ get 0() { throw new ThrewDuringKeyAccessor(); },
+ get 1() { throw new Error("Unreachable!"); } }];
+ let didCallReturn = false;
+ let iterator = entries[Symbol.iterator]();
+ iterator.return = function() {
+ didCallReturn = true;
+ throw new Error("Unused!");
+ }
+ assertThrows(() => fromEntries(iterator), ThrewDuringKeyAccessor);
+ assertTrue(didCallReturn);
+ },
+
+ returnIfEntryKeyAccessorThrows() {
+ class ThrewDuringValueAccessor {};
+ let entries = [{ get 1() { throw new ThrewDuringValueAccessor(); },
+ 0: "key",
+ }];
+ let didCallReturn = false;
+ let iterator = entries[Symbol.iterator]();
+ iterator.return = function() {
+ didCallReturn = true;
+ throw new Error("Unused!");
+ };
+ assertThrows(() => fromEntries(iterator), ThrewDuringValueAccessor);
+ assertTrue(didCallReturn);
+ },
+
+ returnIfKeyToStringThrows() {
+ class ThrewDuringKeyToString {};
+ let operations = [];
+ let entries = [{
+ get 0() {
+ operations.push("[[Get]] key");
+ return {
+ toString() {
+ operations.push("toString(key)");
+ throw new ThrewDuringKeyToString();
+ },
+ valueOf() {
+ operations.push("valueOf(key)");
+ }
+ };
+ },
+ get 1() {
+ operations.push("[[Get]] value");
+ return "value";
+ },
+ }];
+
+ let iterator = entries[Symbol.iterator]();
+ iterator.return = function() {
+ operations.push("IteratorClose");
+ throw new Error("Unused!");
+ };
+ assertThrows(() => fromEntries(iterator), ThrewDuringKeyToString);
+ assertEquals([
+ "[[Get]] key",
+ "[[Get]] value",
+ "toString(key)",
+ "IteratorClose",
+ ], operations);
+ },
+
+ throwsIfIteratorValueThrows() {
+ let iterable = [1, 2, 3, 4, 5];
+ class ThrewDuringIteratorValue {}
+ iterable[Symbol.iterator] = function() {
+ return {
+ next() {
+ return {
+ get value() { throw new ThrewDuringIteratorValue(); },
+ get done() { return false; }
+ }
+ throw new ThrewDuringIteratorNext;
+ },
+ return() {
+ throw new Error(
+ "IteratorClose must not be performed if IteratorStep throws");
+ },
+ }
+ }
+ assertThrows(() => fromEntries(iterable), ThrewDuringIteratorValue);
+ },
+
+ emptyIterable() {
+ let iterables = [[], new Set(), new Map()];
+ for (let iterable of iterables) {
+ let result = fromEntries(iterable);
+ assertEquals({}, result);
+ assertEquals(ObjectPrototype, result.__proto__);
+ }
+ },
+
+ keyOrderFastPath() {
+ let entries = [
+ ["z", 1],
+ ["y", 2],
+ ["x", 3],
+ ["y", 4],
+ [100, 0],
+ ];
+ let result = fromEntries(entries);
+ assertEquals({
+ 100: 0,
+ z: 1,
+ y: 4,
+ x: 3,
+ }, result);
+ assertEquals(["100", "z", "y", "x"], Object.keys(result));
+ },
+
+ keyOrderSlowPath() {
+ let entries = [
+ ["z", 1],
+ ["y", 2],
+ ["x", 3],
+ ["y", 4],
+ [100, 0],
+ ];
+ let i = 0;
+ let iterable = {
+ [Symbol.iterator]() { return this; },
+ next() {
+ return {
+ done: i >= entries.length,
+ value: entries[i++]
+ }
+ },
+ return() { throw new Error("Unreachable!"); }
+ };
+ let result = fromEntries(iterable);
+ assertEquals({
+ 100: 0,
+ z: 1,
+ y: 4,
+ x: 3,
+ }, result);
+ assertEquals(["100", "z", "y", "x"], Object.keys(result));
+ },
+
+ doesNotUseIteratorForKeyValuePairFastCase() {
+ class Entry {
+ constructor(k, v) {
+ this[0] = k;
+ this[1] = v;
+ }
+ get [Symbol.iterator]() {
+ throw new Error("Should not load Symbol.iterator from Entry!");
+ }
+ }
+ function e(k, v) { return new Entry(k, v); }
+ let entries = [e(100, 0), e('z', 1), e('y', 2), e('x', 3), e('y', 4)];
+ let result = fromEntries(entries);
+ assertEquals({
+ 100: 0,
+ z: 1,
+ y: 4,
+ x: 3,
+ }, result);
+ },
+
+ doesNotUseIteratorForKeyValuePairSlowCase() {
+ class Entry {
+ constructor(k, v) {
+ this[0] = k;
+ this[1] = v;
+ }
+ get [Symbol.iterator]() {
+ throw new Error("Should not load Symbol.iterator from Entry!");
+ }
+ }
+ function e(k, v) { return new Entry(k, v); }
+ let entries = new Set(
+ [e(100, 0), e('z', 1), e('y', 2), e('x', 3), e('y', 4)]);
+ let result = fromEntries(entries);
+ assertEquals({
+ 100: 0,
+ z: 1,
+ y: 4,
+ x: 3,
+ }, result);
+ },
+
+ createDataPropertyFastCase() {
+ Object.defineProperty(ObjectPrototype, "property", {
+ configurable: true,
+ get() { throw new Error("Should not invoke getter on prototype!"); },
+ set() { throw new Error("Should not invoke setter on prototype!"); },
+ });
+
+ let entries = [["property", "value"]];
+ let result = fromEntries(entries);
+ assertEquals(result.property, "value");
+ delete ObjectPrototype.property;
+ },
+
+ createDataPropertySlowCase() {
+ Object.defineProperty(ObjectPrototype, "property", {
+ configurable: true,
+ get() { throw new Error("Should not invoke getter on prototype!"); },
+ set() { throw new Error("Should not invoke setter on prototype!"); },
+ });
+
+ let entries = new Set([["property", "value"]]);
+ let result = fromEntries(entries);
+ assertEquals(result.property, "value");
+ delete ObjectPrototype.property;
+ },
+
+ keyToPrimitiveMutatesArrayInFastCase() {
+ let mySymbol = Symbol();
+ let entries = [[0, 1], ["a", 2], [{
+ [Symbol.toPrimitive]() {
+ // The fast path should bail out if a key is a JSReceiver, otherwise
+ // assumptions about the structure of the iterable can change. If the
+ // fast path doesn't bail out, the 4th key would be "undefined".
+ delete entries[3][0];
+ entries[3].__proto__ = { 0: "shfifty", };
+ return mySymbol;
+ },
+ }, 3], [3, 4]];
+ let result = fromEntries(entries);
+ assertEquals({
+ 0: 1,
+ "a": 2,
+ [mySymbol]: 3,
+ "shfifty": 4,
+ }, result);
+ assertEquals(["0", "a", "shfifty", mySymbol], Reflect.ownKeys(result));
+ },
+
+ keyToStringMutatesArrayInFastCase() {
+ let mySymbol = Symbol();
+ let entries = [[mySymbol, 1], [0, 2], [{
+ toString() {
+ delete entries[3][0];
+ entries[3].__proto__ = { 0: "shfifty", };
+ return "z";
+ },
+ valueOf() { throw new Error("Unused!"); }
+ }, 3], [3, 4]];
+ let result = fromEntries(entries);
+ assertEquals({
+ [mySymbol]: 1,
+ 0: 2,
+ "z": 3,
+ "shfifty": 4,
+ }, result);
+ assertEquals(["0", "z", "shfifty", mySymbol], Reflect.ownKeys(result));
+ },
+
+ keyValueOfMutatesArrayInFastCase() {
+ let mySymbol = Symbol();
+ let entries = [[mySymbol, 1], ["z", 2], [{
+ toString: undefined,
+ valueOf() {
+ delete entries[3][0];
+ entries[3].__proto__ = { 0: "shfifty", };
+ return 0;
+ },
+ }, 3], [3, 4]];
+ let result = fromEntries(entries);
+ assertEquals({
+ [mySymbol]: 1,
+ "z": 2,
+ 0: 3,
+ "shfifty": 4,
+ }, result);
+ assertEquals(["0", "z", "shfifty", mySymbol], Reflect.ownKeys(result));
+ },
+}
+
+for (let t of Reflect.ownKeys(test)) {
+ test[t]();
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-static.js b/deps/v8/test/mjsunit/harmony/private-fields-static.js
new file mode 100644
index 0000000000..e4019cc32e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields-static.js
@@ -0,0 +1,356 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields --allow-natives-syntax
+
+
+"use strict";
+
+{
+ class C {
+ static #a;
+ static getA() { return this.#a; }
+ }
+
+ assertEquals(undefined, C.a);
+ assertEquals(undefined, C.getA());
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+}
+
+{
+ class C {
+ static #a = 1;
+ static getA() { return this.#a; }
+ }
+
+ assertEquals(undefined, C.a);
+ assertEquals(1, C.getA());
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+}
+
+{
+ class C {
+ static #a = 1;
+ static #b = this.#a;
+ static getB() { return this.#b; }
+ }
+
+ assertEquals(1, C.getB());
+
+ let c = new C;
+ assertEquals(undefined, c.getB);
+}
+
+{
+ class C {
+ static #a = 1;
+ static getA() { return this.#a; }
+ constructor() {
+ assertThrows(() => this.#a, TypeError);
+ C.#a = 2;
+ }
+ }
+
+ assertEquals(1, C.getA());
+
+ let c = new C;
+ assertThrows(() => C.prototype.getA.call(c));
+ assertEquals(2, C.getA());
+}
+
+{
+ class C {
+ static #a = this;
+ static #b = () => this;
+ static getA() { return this.#a; }
+ static getB() { return this.#b; }
+ }
+
+ assertSame(C, C.getA());
+ assertSame(C, C.getB()());
+}
+
+{
+ class C {
+ static #a = this;
+ static #b = function() { return this; };
+ static getA() { return this.#a; }
+ static getB() { return this.#b; }
+ }
+
+ assertSame(C, C.getA());
+ assertSame(C, C.getB().call(C));
+ assertSame(undefined, C.getB()());
+}
+
+
+{
+ class C {
+ static #a = function() { return 1 };
+ static getA() {return this.#a;}
+ }
+
+ assertEquals('#a', C.getA().name);
+}
+
+{
+ let d = function() { return new.target; }
+ class C {
+ static #c = d;
+ static getC() { return this.#c; }
+ }
+
+ assertEquals(undefined, C.getC()());
+ assertSame(new d, new (C.getC()));
+}
+
+{
+ class C {
+ static #a = 1;
+ static getA(instance) { return instance.#a; }
+ }
+
+ class B { }
+
+ assertEquals(undefined, C.a);
+ assertEquals(1, C.getA(C));
+ assertThrows(() => C.getA(B), TypeError);
+}
+
+{
+ class A {
+ static #a = 1;
+ static getA() { return this.#a; }
+ }
+
+ class B extends A {}
+ assertThrows(() => B.getA(), TypeError);
+}
+
+{
+ class A {
+ static #a = 1;
+ static getA() { return A.#a; }
+ }
+
+ class B extends A {}
+ assertSame(1, B.getA());
+}
+
+{
+ let prototypeLookup = false;
+ class A {
+ static set a(val) {
+ prototypeLookup = true;
+ }
+
+ static get a() { return undefined; }
+ }
+
+ class C extends A {
+ static #a = 1;
+ static getA() { return this.#a; }
+ }
+
+ assertEquals(1, C.getA());
+ assertEquals(false, prototypeLookup);
+}
+
+{
+ class A {
+ static a = 1;
+ }
+
+ class B extends A {
+ static #b = this.a;
+ static getB() { return this.#b; }
+ }
+
+ assertEquals(1, B.getB());
+}
+
+{
+ class A {
+ static #a = 1;
+ static getA() { return this.#a; }
+ }
+
+ class B extends A {
+ static getA() { return super.getA(); }
+ }
+
+ assertThrows(() => B.getA(), TypeError);
+}
+
+{
+ class A {
+ static #a = 1;
+ static getA() { return this.#a;}
+ }
+
+ class B extends A {
+ static #a = 2;
+ static get_A() { return this.#a;}
+ }
+
+ assertEquals(1, A.getA());
+ assertThrows(() => B.getA(), TypeError);
+ assertEquals(2, B.get_A());
+}
+
+{
+ let foo = undefined;
+ class A {
+ static #a = (function() { foo = 1; })();
+ }
+
+ assertEquals(1, foo);
+}
+
+{
+ let foo = undefined;
+ class A extends class {} {
+ static #a = (function() { foo = 1; })();
+ }
+
+ assertEquals(1, foo);
+}
+
+{
+ function makeClass() {
+ return class {
+ static #a;
+ static setA(val) { this.#a = val; }
+ static getA() { return this.#a; }
+ }
+ }
+
+ let classA = makeClass();
+ let classB = makeClass();
+
+ assertEquals(undefined, classA.getA());
+ assertEquals(undefined, classB.getA());
+
+ classA.setA(3);
+ assertEquals(3, classA.getA());
+ assertEquals(undefined, classB.getA());
+
+ classB.setA(5);
+ assertEquals(3, classA.getA());
+ assertEquals(5, classB.getA());
+
+ assertThrows(() => classA.getA.call(classB), TypeError);
+ assertThrows(() => classB.getA.call(classA), TypeError);
+}
+
+{
+ let value = undefined;
+
+ new class {
+ static #a = 1;
+ static getA() { return this.#a; }
+
+ constructor() {
+ new class C {
+ static #a = 2;
+ constructor() {
+ value = C.#a;
+ }
+ }
+ }
+ }
+
+ assertEquals(2, value);
+}
+
+{
+ class A {
+ static #a = 1;
+ static b = class {
+ static getA() { return this.#a; }
+ static get_A(val) { return val.#a; }
+ }
+ }
+
+ assertEquals(1, A.b.getA.call(A));
+ assertEquals(1, A.b.get_A(A));
+}
+
+{
+ assertThrows(() => class { static b = this.#a; static #a = 1 }, TypeError);
+}
+
+{
+ let symbol = Symbol();
+
+ class C {
+ static #a = 1;
+ static [symbol] = 1;
+ static getA() { return this.#a; }
+ static setA(val) { this.#a = val; }
+ }
+
+ var p = new Proxy(C, {
+ get: function(target, name) {
+ if (typeof(arg) === 'symbol') {
+ assertFalse(%SymbolIsPrivate(name));
+ }
+ return target[name];
+ }
+ });
+
+ assertThrows(() => p.getA(), TypeError);
+ assertThrows(() => p.setA(1), TypeError);
+ assertEquals(1, p[symbol]);
+}
+
+{
+ class C {
+ static #b = Object.freeze(this);
+ static getA() { return this.#a; }
+ static #a = 1;
+ }
+
+ assertEquals(1, C.getA());
+}
+
+{
+ class C {
+ static #a = 1;
+ static getA() { return eval('this.#a'); }
+ }
+
+ assertEquals(1, C.getA());
+}
+
+{
+ var C;
+ eval('C = class { static #a = 1; static getA() { return eval(\'this.#a\'); }}');
+
+ assertEquals(1, C.getA());
+}
+
+{
+ class C {
+ static #a = 1;
+ static getA() { return this.#a; }
+ static setA() { eval('this.#a = 4'); }
+ }
+
+ assertEquals(1, C.getA());
+ C.setA();
+ assertEquals(4, C.getA());
+}
+
+{
+ class C {
+ static getA() { return eval('this.#a'); }
+ }
+
+ assertThrows(() => C.getA(), SyntaxError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
index 3de3e2d9d2..0334a87786 100644
--- a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
@@ -457,3 +457,16 @@ y()();
assertEquals(1, X.p);
}
+
+{
+ let p = { z: class { static y = this.name } }
+ assertEquals(p.z.y, 'z');
+
+ let q = { ["z"]: class { static y = this.name } }
+ assertEquals(q.z.y, 'z');
+
+ const C = class {
+ static x = this.name;
+ }
+ assertEquals(C.x, 'C');
+}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4658.js b/deps/v8/test/mjsunit/harmony/regress/regress-4658.js
deleted file mode 100644
index 35bea12adc..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4658.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions
-
-(function testWithSimpleLoopVariable() {
- var f = (x, y = (do { var s=0; for (var e of x) s += e; s; })) => y*(y+1);
- var result = f([1,2,3]); // core dump here, if not fixed.
- assertEquals(result, 42);
-})();
-
-(function testWithComplexLoopVariable() {
- var f = (x, i=x[0]-1, a=[],
- y = (do { var s=0;
- for (a[i] of x) s += a[i++];
- s;
- })) => y*(a[0]+a[1]*a[2]);
- var result = f([1,2,3]); // core dump here, if not fixed.
- assertEquals(result, 42);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4755.js b/deps/v8/test/mjsunit/harmony/regress/regress-4755.js
deleted file mode 100644
index 2a0df9dba4..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4755.js
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-do-expressions
-
-(function DoTryCatchInsideBinop() {
- function f(a, b) {
- return a + do { try { throw "boom" } catch(e) { b } }
- }
- assertEquals(3, f(1, 2));
- assertEquals(3, f(1, 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(3, f(1, 2));
-})();
-
-(function DoTryCatchInsideCall() {
- function f(a, b) {
- return Math.max(a, do { try { throw a } catch(e) { e + b } })
- }
- assertEquals(3, f(1, 2));
- assertEquals(3, f(1, 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(3, f(1, 2));
-})();
-
-(function DoTryCatchInsideTry() {
- function f(a, b) {
- try { return do { try { throw a } catch(e) { e + b } } } catch(e) {}
- }
- assertEquals(3, f(1, 2));
- assertEquals(3, f(1, 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(3, f(1, 2));
-})();
-
-(function DoTryCatchInsideFinally() {
- function f(a, b) {
- try {} finally { return do { try { throw a } catch(e) { e + b } } }
- }
- assertEquals(3, f(1, 2));
- assertEquals(3, f(1, 2));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(3, f(1, 2));
-})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4904.js b/deps/v8/test/mjsunit/harmony/regress/regress-4904.js
deleted file mode 100644
index a57d246b6f..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4904.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions
-
-(function testCatchScopeInDoExpression() {
- var f = (s = 17, y = do { try { throw 25; } catch(e) { s += e; }; }) => s;
- var result = f();
- assertEquals(result, 42);
-})();
-
-(function testCatchScopeInDoExpression() {
- var f = (s = 17, y = do { let t; try { throw 25; } catch(e) { s += e; }; }) => s;
- var result = f();
- assertEquals(result, 42);
-})();
-
-(function testCatchScopeInDoExpression() {
- let t1;
- var f = (s = 17, y = do { let t2; try { throw 25; } catch(e) { s += e; }; }) => s;
- var result = f();
- assertEquals(result, 42);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-546967.js b/deps/v8/test/mjsunit/harmony/regress/regress-546967.js
deleted file mode 100644
index 0315c43634..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-546967.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-do-expressions --allow-natives-syntax
-
-function func1() {
- for (var i = 0; i < 64; ++i) func2();
-}
-
-%OptimizeFunctionOnNextCall(func1);
-func1();
-
-function func2() {
- var v = do {};
-}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-897436.js b/deps/v8/test/mjsunit/harmony/regress/regress-897436.js
new file mode 100644
index 0000000000..c0cf41661c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-897436.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+async function* gen() {
+ const alwaysPending = new Promise(() => {});
+ alwaysPending.then = "non-callable then";
+ yield alwaysPending;
+}
+gen().next();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-912504.js b/deps/v8/test/mjsunit/harmony/regress/regress-912504.js
new file mode 100644
index 0000000000..78b1992b14
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-912504.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt --harmony-object-from-entries --allow-natives-syntax
+
+function test() {
+ Object.fromEntries([[]]);
+ %DeoptimizeNow();
+}
+test();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js
deleted file mode 100644
index 42774b84ed..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-do-expressions
-
-(function testWithoutOtherLiteral() {
- var result = ((x = [...[42]]) => x)();
- assertEquals(result, [42]);
-})();
-
-(function testWithSomeOtherLiteral() {
- []; // important: an array literal before the arrow function
- var result = ((x = [...[42]]) => x)(); // will core dump, if not fixed.
- assertEquals(result, [42]);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js b/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js
new file mode 100644
index 0000000000..5d2985f318
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-matchall
+
+delete RegExp.prototype[Symbol.matchAll];
+const str = 'a';
+assertThrows(() => str.matchAll(/\w/g), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
new file mode 100644
index 0000000000..88fb020101
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+(function TestConstructWeakFactory() {
+ let wf = new WeakFactory(() => {});
+ assertEquals(wf.toString(), "[object WeakFactory]");
+ assertNotSame(wf.__proto__, Object.prototype);
+ assertSame(wf.__proto__.__proto__, Object.prototype);
+})();
+
+(function TestWeakFactoryConstructorCallAsFunction() {
+ let caught = false;
+ let message = "";
+ try {
+ let f = WeakFactory(() => {});
+ } catch (e) {
+ message = e.message;
+ caught = true;
+ } finally {
+ assertTrue(caught);
+ assertEquals(message, "Constructor WeakFactory requires 'new'");
+ }
+})();
+
+(function TestConstructWeakFactoryCleanupNotCallable() {
+ let message = "WeakFactory: cleanup must be callable";
+ assertThrows(() => { let wf = new WeakFactory(); }, TypeError, message);
+ assertThrows(() => { let wf = new WeakFactory(1); }, TypeError, message);
+ assertThrows(() => { let wf = new WeakFactory(null); }, TypeError, message);
+})();
+
+(function TestConstructWeakFactoryWithCallableProxyAsCleanup() {
+ let handler = {};
+ let obj = () => {};
+ let proxy = new Proxy(obj, handler);
+ let wf = new WeakFactory(proxy);
+})();
+
+(function TestConstructWeakFactoryWithNonCallableProxyAsCleanup() {
+ let message = "WeakFactory: cleanup must be callable";
+ let handler = {};
+ let obj = {};
+ let proxy = new Proxy(obj, handler);
+ assertThrows(() => { let wf = new WeakFactory(proxy); }, TypeError, message);
+})();
+
+(function TestMakeCell() {
+ let wf = new WeakFactory(() => {});
+ let wc = wf.makeCell({});
+ assertEquals(wc.toString(), "[object WeakCell]");
+ assertNotSame(wc.__proto__, Object.prototype);
+ assertSame(wc.__proto__.__proto__, Object.prototype);
+ assertEquals(wc.holdings, undefined);
+
+ let holdings_desc = Object.getOwnPropertyDescriptor(wc.__proto__, "holdings");
+ assertEquals(true, holdings_desc.configurable);
+ assertEquals(false, holdings_desc.enumerable);
+ assertEquals("function", typeof holdings_desc.get);
+ assertEquals(undefined, holdings_desc.set);
+
+ let clear_desc = Object.getOwnPropertyDescriptor(wc.__proto__, "clear");
+ assertEquals(true, clear_desc.configurable);
+ assertEquals(false, clear_desc.enumerable);
+ assertEquals("function", typeof clear_desc.value);
+})();
+
+(function TestMakeCellWithHoldings() {
+ let wf = new WeakFactory(() => {});
+ let obj = {a: 1};
+ let holdings = {b: 2};
+ let wc = wf.makeCell(obj, holdings);
+ assertSame(wc.holdings, holdings);
+})();
+
+(function TestMakeCellWithHoldingsSetHoldings() {
+ let wf = new WeakFactory(() => {});
+ let obj = {a: 1};
+ let holdings = {b: 2};
+ let wc = wf.makeCell(obj, holdings);
+ assertSame(wc.holdings, holdings);
+ wc.holdings = 5;
+ assertSame(wc.holdings, holdings);
+})();
+
+(function TestMakeCellWithHoldingsSetHoldingsStrict() {
+ "use strict";
+ let wf = new WeakFactory(() => {});
+ let obj = {a: 1};
+ let holdings = {b: 2};
+ let wc = wf.makeCell(obj, holdings);
+ assertSame(wc.holdings, holdings);
+ assertThrows(() => { wc.holdings = 5; }, TypeError);
+ assertSame(wc.holdings, holdings);
+})();
+
+(function TestMakeCellWithNonObject() {
+ let wf = new WeakFactory(() => {});
+ let message = "WeakFactory.prototype.makeCell: target must be an object";
+ assertThrows(() => wf.makeCell(), TypeError, message);
+ assertThrows(() => wf.makeCell(1), TypeError, message);
+ assertThrows(() => wf.makeCell(false), TypeError, message);
+ assertThrows(() => wf.makeCell("foo"), TypeError, message);
+ assertThrows(() => wf.makeCell(Symbol()), TypeError, message);
+ assertThrows(() => wf.makeCell(null), TypeError, message);
+ assertThrows(() => wf.makeCell(undefined), TypeError, message);
+})();
+
+(function TestMakeCellWithProxy() {
+ let handler = {};
+ let obj = {};
+ let proxy = new Proxy(obj, handler);
+ let wf = new WeakFactory(() => {});
+ let wc = wf.makeCell(proxy);
+})();
+
+(function TestMakeCellTargetAndHoldingsSameValue() {
+ let wf = new WeakFactory(() => {});
+ let obj = {a: 1};
+ // SameValue(target, holdings) not ok
+ assertThrows(() => wf.makeCell(obj, obj), TypeError,
+ "WeakFactory.prototype.makeCell: target and holdings must not be same");
+ let holdings = {a: 1};
+ let wc = wf.makeCell(obj, holdings);
+})();
+
+(function TestMakeCellWithoutWeakFactory() {
+ assertThrows(() => WeakFactory.prototype.makeCell.call({}, {}), TypeError);
+ // Does not throw:
+ let wf = new WeakFactory(() => {});
+ WeakFactory.prototype.makeCell.call(wf, {});
+})();
+
+(function TestHoldingsWithoutWeakCell() {
+ let wf = new WeakFactory(() => {});
+ let wc = wf.makeCell({});
+ let holdings_getter = Object.getOwnPropertyDescriptor(wc.__proto__, "holdings").get;
+ assertThrows(() => holdings_getter.call({}), TypeError);
+ // Does not throw:
+ holdings_getter.call(wc);
+})();
+
+(function TestClearWithoutWeakCell() {
+ let wf = new WeakFactory(() => {});
+ let wc = wf.makeCell({});
+ let clear = Object.getOwnPropertyDescriptor(wc.__proto__, "clear").value;
+ assertThrows(() => clear.call({}), TypeError);
+ // Does not throw:
+ clear.call(wc);
+})();
+
+(function TestWeakRefConstructor() {
+ let wr = new WeakRef({});
+ assertEquals(wr.toString(), "[object WeakRef]");
+ assertNotSame(wr.__proto__, Object.prototype);
+
+ let deref_desc = Object.getOwnPropertyDescriptor(wr.__proto__, "deref");
+ assertEquals(true, deref_desc.configurable);
+ assertEquals(false, deref_desc.enumerable);
+ assertEquals("function", typeof deref_desc.value);
+})();
+
+(function TestWeakRefConstructorWithNonObject() {
+ let message = "WeakRef: target must be an object";
+ assertThrows(() => new WeakRef(), TypeError, message);
+ assertThrows(() => new WeakRef(1), TypeError, message);
+ assertThrows(() => new WeakRef(false), TypeError, message);
+ assertThrows(() => new WeakRef("foo"), TypeError, message);
+ assertThrows(() => new WeakRef(Symbol()), TypeError, message);
+ assertThrows(() => new WeakRef(null), TypeError, message);
+ assertThrows(() => new WeakRef(undefined), TypeError, message);
+})();
+
+(function TestWeakRefConstructorCallAsFunction() {
+ let caught = false;
+ let message = "";
+ try {
+ let f = WeakRef({});
+ } catch (e) {
+ message = e.message;
+ caught = true;
+ } finally {
+ assertTrue(caught);
+ assertEquals(message, "Constructor WeakRef requires 'new'");
+ }
+})();
+
+(function TestWeakRefWithProxy() {
+ let handler = {};
+ let obj = {};
+ let proxy = new Proxy(obj, handler);
+ let wr = new WeakRef(proxy);
+})();
+
+(function TestCleanupSomeWithoutWeakFactory() {
+ assertThrows(() => WeakFactory.prototype.cleanupSome.call({}), TypeError);
+ // Does not throw:
+ let wf = new WeakFactory(() => {});
+ let rv = WeakFactory.prototype.cleanupSome.call(wf);
+ assertEquals(undefined, rv);
+})();
+
+(function TestDerefWithoutWeakRef() {
+ let wf = new WeakFactory(() => {});
+ let wc = wf.makeCell({});
+ let wr = new WeakRef({});
+ let deref = Object.getOwnPropertyDescriptor(wr.__proto__, "deref").value;
+ assertThrows(() => deref.call({}), TypeError);
+ assertThrows(() => deref.call(wc), TypeError);
+ // Does not throw:
+ deref.call(wr);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js
new file mode 100644
index 0000000000..f8e44c355c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-cells.js
@@ -0,0 +1,89 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup = function(iter) {
+ if (cleanup_call_count == 0) {
+ // First call: iterate 2 of the 3 cells
+ let cells = [];
+ for (wc of iter) {
+ cells.push(wc);
+ // Don't iterate the rest of the cells
+ if (cells.length == 2) {
+ break;
+ }
+ }
+ assertEquals(cells.length, 2);
+ assertTrue(cells[0].holdings < 3);
+ assertTrue(cells[1].holdings < 3);
+ // Update call count only after the asserts; this ensures that the test
+ // fails even if the exceptions inside the cleanup function are swallowed.
+ cleanup_call_count++;
+ } else {
+ // Second call: iterate one leftover cell and one new cell.
+ assertEquals(1, cleanup_call_count);
+ let cells = [];
+ for (wc of iter) {
+ cells.push(wc);
+ }
+ assertEquals(cells.length, 2);
+ assertTrue((cells[0].holdings < 3 && cells[1].holdings == 100) ||
+ (cells[1].holdings < 3 && cells[0].holdings == 100));
+ // Update call count only after the asserts; this ensures that the test
+ // fails even if the exceptions inside the cleanup function are swallowed.
+ cleanup_call_count++;
+ }
+}
+
+let wf = new WeakFactory(cleanup);
+// Create 3 objects and WeakCells pointing to them. The objects need to be
+// inside a closure so that we can reliably kill them!
+let weak_cells = [];
+
+(function() {
+ let objects = [];
+
+ for (let i = 0; i < 3; ++i) {
+ objects[i] = {a: i};
+ weak_cells[i] = wf.makeCell(objects[i], i);
+ }
+
+ gc();
+ assertEquals(0, cleanup_call_count);
+
+ // Drop the references to the objects.
+ objects = [];
+})();
+
+// This GC will discover dirty WeakCells.
+gc();
+assertEquals(0, cleanup_call_count);
+
+let timeout_func_1 = function() {
+ assertEquals(1, cleanup_call_count);
+
+ // Assert that the cleanup function won't be called unless new WeakCells appear.
+ setTimeout(timeout_func_2, 0);
+}
+
+setTimeout(timeout_func_1, 0);
+
+let timeout_func_2 = function() {
+ assertEquals(1, cleanup_call_count);
+
+ // Create a new WeakCells to be cleaned up.
+ let obj = {};
+ let wc = wf.makeCell(obj, 100);
+ obj = null;
+
+ gc();
+
+ setTimeout(timeout_func_3, 0);
+}
+
+let timeout_func_3 = function() {
+ assertEquals(2, cleanup_call_count);
+}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
new file mode 100644
index 0000000000..02f05ac8e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let r = Realm.create();
+
+let cleanup = Realm.eval(r, "var stored_global; function cleanup() { stored_global = globalThis; } cleanup");
+let realm_global_this = Realm.eval(r, "globalThis");
+
+let wf = new WeakFactory(cleanup);
+
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+gc();
+
+// Assert that the cleanup function was called in its Realm.
+let timeout_func = function() {
+ let stored_global = Realm.eval(r, "stored_global;");
+ assertNotEquals(stored_global, globalThis);
+ assertEquals(stored_global, realm_global_this);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
new file mode 100644
index 0000000000..6a5bcfa821
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-a-microtask.js
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+// This test asserts that the cleanup function call, scheduled by GC, is a
+// microtask and not a normal task.
+
+// Inside a microtask, cause GC (which should schedule the cleanup as
+// microtask). lso schedule another microtask. Assert that the cleanup
+// function ran before the other microtask.
+
+function scheduleMicrotask(func) {
+ Promise.resolve().then(func);
+}
+
+let log = [];
+
+let cleanup = (iter) => {
+ log.push("cleanup");
+ for (wc of iter) { }
+}
+
+let wf = new WeakFactory(cleanup);
+let o = null;
+
+(function() {
+ // Use a closure here to avoid other references to o which might keep it alive
+ // (e.g., stack frames pointing to it).
+ o = {};
+ wf.makeCell(o);
+})();
+
+let microtask_after_cleanup = () => {
+ log.push("microtask_after_cleanup");
+}
+
+let first_microtask = function() {
+ log.push("first_microtask");
+
+ // This schedules the cleanup function as microtask.
+ o = null;
+ gc();
+
+ // Schedule a microtask which should run after the cleanup microtask.
+ scheduleMicrotask(microtask_after_cleanup);
+}
+
+scheduleMicrotask(first_microtask);
+
+setTimeout(() => {
+ // Assert that the functions got called in the right order.
+ let wanted_log = ["first_microtask", "cleanup", "microtask_after_cleanup"];
+ assertEquals(wanted_log, log);
+}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
new file mode 100644
index 0000000000..2e46830093
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let r = Realm.create();
+
+let cleanup = Realm.eval(r, "var stored_global; let cleanup = new Proxy(function() { stored_global = globalThis;}, {}); cleanup");
+let realm_global_this = Realm.eval(r, "globalThis");
+
+let wf = new WeakFactory(cleanup);
+
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+gc();
+
+// Assert that the cleanup function was called in its Realm.
+let timeout_func = function() {
+ let stored_global = Realm.eval(r, "stored_global;");
+ assertNotEquals(stored_global, globalThis);
+ assertEquals(stored_global, realm_global_this);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js
new file mode 100644
index 0000000000..631f43c012
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-cleared-weakcell.js
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_count = 0;
+let cleanup_cells = [];
+let cleanup = function(iter) {
+ for (wc of iter) {
+ cleanup_cells.push(wc);
+ }
+ ++cleanup_count;
+}
+
+let wf = new WeakFactory(cleanup);
+let weak_cell;
+(function() {
+ let o = {};
+ weak_cell = wf.makeCell(o);
+
+ // cleanupSome won't do anything since there are no dirty WeakCells.
+ wf.cleanupSome();
+ assertEquals(0, cleanup_count);
+})();
+
+// GC will detect the WeakCell as dirty.
+gc();
+
+// Clear the WeakCell just before we would've called cleanupSome.
+weak_cell.clear();
+
+wf.cleanupSome();
+
+assertEquals(0, cleanup_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js
new file mode 100644
index 0000000000..84a946d390
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-weakcell.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_count = 0;
+let cleanup_cells = [];
+let cleanup = function(iter) {
+ for (wc of iter) {
+ cleanup_cells.push(wc);
+ }
+ ++cleanup_count;
+}
+
+let wf = new WeakFactory(cleanup);
+let weak_cell;
+(function() {
+ let o = {};
+ weak_cell = wf.makeCell(o);
+
+ // cleanupSome won't do anything since there are no dirty WeakCells.
+ wf.cleanupSome();
+ assertEquals(0, cleanup_count);
+})();
+
+// GC will detect the WeakCell as dirty.
+gc();
+
+wf.cleanupSome();
+assertEquals(1, cleanup_count);
+assertEquals(1, cleanup_cells.length);
+assertEquals(weak_cell, cleanup_cells[0]);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js
new file mode 100644
index 0000000000..3392d7fbb9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-after-cleanup.js
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ for (wc of iter) {
+ assertSame(wc, weak_cell);
+ ++cleanup_weak_cell_count;
+ }
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the WeakCell.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_weak_cell_count);
+
+ // Clear an already iterated over WeakCell.
+ weak_cell.clear();
+
+ // Assert that it didn't do anything.
+ setTimeout(() => { assertEquals(1, cleanup_call_count); }, 0);
+ setTimeout(() => { assertEquals(1, cleanup_weak_cell_count); }, 0);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js
new file mode 100644
index 0000000000..1fd0fbf3b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-before-cleanup.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup = function(iter) {
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object, "my holdings");
+
+ // Clear the WeakCell before the GC has a chance to discover it.
+ let return_value = weak_cell.clear();
+ assertEquals(undefined, return_value);
+
+ // Assert holdings got cleared too.
+ assertEquals(undefined, weak_cell.holdings);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function won't be called, since the WeakCell was cleared.
+let timeout_func = function() {
+ assertEquals(0, cleanup_call_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js
new file mode 100644
index 0000000000..a5aa537ff2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-called-twice.js
@@ -0,0 +1,39 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup = function(iter) {
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // Clear the WeakCell before the GC has a chance to discover it.
+ weak_cell.clear();
+
+ // Call clear again (just to assert we handle this gracefully).
+ weak_cell.clear();
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function won't be called, since the WeakCell was cleared.
+let timeout_func = function() {
+ assertEquals(0, cleanup_call_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js
new file mode 100644
index 0000000000..98410d5d0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-clears-factory-pointer.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+// Test that WeakCell.prototype.clear() also clears the WeakFactory pointer of
+// WeakCell. The only way to observe this is to assert that the WeakCell no
+// longer keeps its WeakFactory alive after clear() has been called.
+
+let weak_cell;
+let weak_cell_pointing_to_factory;
+
+let cleanup1_call_count = 0;
+let cleanup2_call_count = 0;
+
+let cleanup1 = function() {
+ ++cleanup1_call_count;
+}
+
+let cleanup2 = function() {
+ ++cleanup2_call_count;
+}
+
+let wf1 = new WeakFactory(cleanup1);
+
+(function(){
+ let wf2 = new WeakFactory(cleanup2);
+
+ (function() {
+ let object = {};
+ weak_cell = wf2.makeCell(object);
+ // object goes out of scope.
+ })();
+
+ weak_cell_pointing_to_factory = wf1.makeCell(wf2);
+ // wf goes out of scope
+})();
+
+weak_cell.clear();
+gc();
+
+// Assert that weak_cell_pointing_to_factory now got cleared.
+let timeout_func = function() {
+ assertEquals(1, cleanup1_call_count);
+ assertEquals(0, cleanup2_call_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js
new file mode 100644
index 0000000000..6c06d7af74
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup1.js
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ // Clear the WeakCell before we've iterated through it.
+ weak_cell.clear();
+
+ for (wc of iter) {
+ ++cleanup_weak_cell_count;
+ }
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called, but didn't iterate any weak cells.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(0, cleanup_weak_cell_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js
new file mode 100644
index 0000000000..0aab366f97
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup2.js
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ for (wc of iter) {
+ assertSame(wc, weak_cell);
+ wc.clear();
+ ++cleanup_weak_cell_count;
+ }
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the WeakCell.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_weak_cell_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js
new file mode 100644
index 0000000000..9dcea5ded5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup3.js
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ for (wc of iter) {
+ assertSame(wc, weak_cell);
+ ++cleanup_weak_cell_count;
+ }
+ // Clear an already iterated over WeakCell.
+ weak_cell.clear();
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the WeakCell.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_weak_cell_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js
new file mode 100644
index 0000000000..794f356119
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-inside-cleanup4.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ for (wc of iter) {
+ // See which WeakCell we're iterating over and clear the other one.
+ if (wc == weak_cell1) {
+ weak_cell2.clear();
+ } else {
+ assertSame(wc, weak_cell2);
+ weak_cell1.clear();
+ }
+ ++cleanup_weak_cell_count;
+ }
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell1;
+let weak_cell2;
+
+(function() {
+ let object1 = {};
+ weak_cell1 = wf.makeCell(object1);
+ let object2 = {};
+ weak_cell2 = wf.makeCell(object2);
+
+ // object1 and object2 go out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated one WeakCell (but not the other one).
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_weak_cell_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js
new file mode 100644
index 0000000000..159fb0b140
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clear-when-cleanup-already-scheduled.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup = function(iter) {
+ ++cleanup_call_count;
+}
+
+let wf = new WeakFactory(cleanup);
+// Create an object and a WeakCell pointing to it. The object needs to be inside
+// a closure so that we can reliably kill them!
+let weak_cell;
+
+(function() {
+ let object = {};
+ weak_cell = wf.makeCell(object);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Clear the WeakCell before cleanup has ran.
+weak_cell.clear();
+
+// Assert that the cleanup function won't be called, since the WeakCell was cleared.
+let timeout_func = function() {
+ assertEquals(0, cleanup_call_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js
new file mode 100644
index 0000000000..2f3915478e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/factory-scheduled-for-cleanup-multiple-times.js
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --no-stress-flush-bytecode
+
+let cleanup0_call_count = 0;
+let cleanup0_weak_cell_count = 0;
+
+let cleanup1_call_count = 0;
+let cleanup1_weak_cell_count = 0;
+
+let cleanup0 = function(iter) {
+ for (wc of iter) {
+ ++cleanup0_weak_cell_count;
+ }
+ ++cleanup0_call_count;
+}
+
+let cleanup1 = function(iter) {
+ for (wc of iter) {
+ ++cleanup1_weak_cell_count;
+ }
+ ++cleanup1_call_count;
+}
+
+let wf0 = new WeakFactory(cleanup0);
+let wf1 = new WeakFactory(cleanup1);
+
+// Create 1 WeakCell for each WeakFactory and kill the objects they point to.
+(function() {
+ // The objects need to be inside a closure so that we can reliably kill them.
+ let objects = [];
+ objects[0] = {};
+ objects[1] = {};
+
+ wf0.makeCell(objects[0]);
+ wf1.makeCell(objects[1]);
+
+ // Drop the references to the objects.
+ objects = [];
+
+ // Will schedule both wf0 and wf1 for cleanup.
+ gc();
+})();
+
+// Before the cleanup task has a chance to run, do the same thing again, so both
+// factories are (again) scheduled for cleanup. This has to be a IIFE function
+// (so that we can reliably kill the objects) so we cannot use the same function
+// as before.
+(function() {
+ let objects = [];
+ objects[0] = {};
+ objects[1] = {};
+ wf0.makeCell(objects[0]);
+ wf1.makeCell(objects[1]);
+ objects = [];
+ gc();
+})();
+
+let timeout_func = function() {
+ assertEquals(1, cleanup0_call_count);
+ assertEquals(2, cleanup0_weak_cell_count);
+ assertEquals(1, cleanup1_call_count);
+ assertEquals(2, cleanup1_weak_cell_count);
+}
+
+// Give the cleanup task a chance to run. All WeakCells to cleanup will be
+// available during the same invocation of the cleanup function.
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js
new file mode 100644
index 0000000000..9fef051122
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-weak-cells.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_called = false;
+let cleanup = function(iter) {
+ assertFalse(cleanup_called);
+ let cells = [];
+ for (wc of iter) {
+ cells.push(wc);
+ }
+ assertEquals(cells.length, 2);
+ if (cells[0] == wc1) {
+ assertEquals(cells[0].holdings, 1);
+ assertEquals(cells[1], wc2);
+ assertEquals(cells[1].holdings, 2);
+ } else {
+ assertEquals(cells[0], wc2);
+ assertEquals(cells[0].holdings, 2);
+ assertEquals(cells[1], wc1);
+ assertEquals(cells[1].holdings, 1);
+ }
+ cleanup_called = true;
+}
+
+let wf = new WeakFactory(cleanup);
+let o1 = {};
+let o2 = {};
+let wc1 = wf.makeCell(o1, 1);
+let wc2 = wf.makeCell(o2, 2);
+
+gc();
+assertFalse(cleanup_called);
+
+// Drop the last references to o1 and o2.
+o1 = null;
+o2 = null;
+// GC will clear the WeakCells; the cleanup function will be called the next time
+// we enter the event loop.
+gc();
+assertFalse(cleanup_called);
+
+let timeout_func = function() {
+ assertTrue(cleanup_called);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js
new file mode 100644
index 0000000000..98a33df240
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-weak-factories.js
@@ -0,0 +1,44 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_weak_cell_count = 0;
+let cleanup = function(iter) {
+ for (wc of iter) {
+ ++cleanup_weak_cell_count;
+ }
+ ++cleanup_call_count;
+}
+
+let wf1 = new WeakFactory(cleanup);
+let wf2 = new WeakFactory(cleanup);
+
+// Create two objects and WeakCells pointing to them. The objects need to be inside
+// a closure so that we can reliably kill them!
+let weak_cell1;
+let weak_cell2;
+
+(function() {
+ let object1 = {};
+ weak_cell1 = wf1.makeCell(object1);
+
+ let object2 = {};
+ weak_cell2 = wf2.makeCell(object2);
+
+ // object1 and object2 go out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the WeakCells.
+let timeout_func = function() {
+ assertEquals(2, cleanup_call_count);
+ assertEquals(2, cleanup_weak_cell_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
new file mode 100644
index 0000000000..c3fc9f741c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+
+let o1 = {};
+let o2 = {};
+let wr1;
+let wr2;
+(function() {
+ wr1 = new WeakRef(o1);
+ wr2 = new WeakRef(o2);
+})();
+
+// Since the WeakRefs were created during this turn, they're not cleared by GC.
+gc();
+
+(function() {
+ assertNotEquals(undefined, wr1.deref());
+ assertNotEquals(undefined, wr2.deref());
+})();
+
+%PerformMicrotaskCheckpoint();
+// New turn.
+
+wr1.deref();
+o1 = null;
+gc(); // deref makes sure we don't clean up wr1
+
+%PerformMicrotaskCheckpoint();
+// New turn.
+
+wr2.deref();
+o2 = null;
+gc(); // deref makes sure we don't clean up wr2
+
+%PerformMicrotaskCheckpoint();
+// New turn.
+
+assertEquals(undefined, wr1.deref());
+
+gc();
+
+%PerformMicrotaskCheckpoint();
+// New turn.
+
+assertEquals(undefined, wr2.deref());
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
new file mode 100644
index 0000000000..eb365986d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_called = false;
+let cleanup = function(iter) {
+ assertFalse(cleanup_called);
+ let result = iter.next();
+ assertEquals(result.value, wc);
+ assertFalse(result.done);
+ result = iter.next();
+ assertTrue(result.done);
+ cleanup_called = true;
+}
+
+let wf = new WeakFactory(cleanup);
+let o = {};
+let wc = wf.makeCell(o);
+
+gc();
+assertFalse(cleanup_called);
+
+// Drop the last reference to o.
+o = null;
+// GC will clear the WeakCell; the cleanup function will be called the next time
+// we enter the event loop.
+gc();
+assertFalse(cleanup_called);
+
+let timeout_func = function() {
+ assertTrue(cleanup_called);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js
new file mode 100644
index 0000000000..367cd9a9c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-factory-keeps-weak-cells-alive.js
@@ -0,0 +1,39 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_called = false;
+let cleanup = function(iter) {
+ assertFalse(cleanup_called);
+ let cells = [];
+ for (wc of iter) {
+ cells.push(wc);
+ }
+ assertEquals(cells.length, 1);
+ assertEquals(cells[0].holdings, "this is my cell");
+ cleanup_called = true;
+}
+
+let wf = new WeakFactory(cleanup);
+let o1 = {};
+let wc1 = wf.makeCell(o1, "this is my cell");
+
+gc();
+assertFalse(cleanup_called);
+
+// Drop the last references to o1.
+o1 = null;
+
+// Drop the last reference to the WeakCell. The WeakFactory keeps it alive, so
+// the cleanup function will be called as normal.
+wc1 = null;
+gc();
+assertFalse(cleanup_called);
+
+let timeout_func = function() {
+ assertTrue(cleanup_called);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js
new file mode 100644
index 0000000000..f6627be19e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakcell-and-weakref.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+
+let cleanup_called = false;
+let cleanup = function(iter) {
+ assertFalse(cleanup_called);
+ let cells = [];
+ for (wc of iter) {
+ cells.push(wc);
+ }
+ assertEquals(1, cells.length);
+ assertEquals(weak_cell, cells[0]);
+ cleanup_called = true;
+}
+
+let wf = new WeakFactory(cleanup);
+let weak_ref;
+let weak_cell;
+(function() {
+ let o = {};
+ weak_ref = new WeakRef(o);
+ weak_cell = wf.makeCell(o);
+})();
+
+// Since the WeakRef was created during this turn, it is not cleared by GC. The
+// WeakCell is not cleared either, since the WeakRef keeps the target object
+// alive.
+gc();
+(function() {
+ assertNotEquals(undefined, weak_ref.deref());
+})();
+
+%PerformMicrotaskCheckpoint();
+// Next turn.
+
+gc();
+
+%PerformMicrotaskCheckpoint();
+// Next turn.
+
+assertTrue(cleanup_called);
+assertEquals(undefined, weak_ref.deref());
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
new file mode 100644
index 0000000000..18e3af26ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+
+let wr;
+(function() {
+ let o = {};
+ wr = new WeakRef(o);
+ // Don't deref here, we want to test that the creation is enough to keep the
+ // WeakRef alive until the end of the turn.
+})();
+
+gc();
+
+// Since the WeakRef was created during this turn, it is not cleared by GC.
+(function() {
+ assertNotEquals(undefined, wr.deref());
+})();
+
+%PerformMicrotaskCheckpoint();
+// Next turn.
+
+gc();
+
+assertEquals(undefined, wr.deref());
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
new file mode 100644
index 0000000000..c17f060713
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+
+let wr;
+let wr_control; // control WeakRef for testing what happens without deref
+(function() {
+ let o1 = {};
+ wr = new WeakRef(o1);
+ let o2 = {};
+ wr_control = new WeakRef(o2);
+})();
+
+let strong = {a: wr.deref(), b: wr_control.deref()};
+
+gc();
+
+%PerformMicrotaskCheckpoint();
+// Next turn.
+
+// Call deref inside a closure, trying to avoid accidentally storing a strong
+// reference into the object in the stack frame.
+(function() {
+ wr.deref();
+})();
+
+strong = null;
+
+// This GC will clear wr_control.
+gc();
+
+(function() {
+ assertNotEquals(undefined, wr.deref());
+ // Now the control WeakRef got cleared, since nothing was keeping it alive.
+ assertEquals(undefined, wr_control.deref());
+})();
+
+%PerformMicrotaskCheckpoint();
+// Next turn.
+
+gc();
+
+assertEquals(undefined, wr.deref());
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 04754577ff..05997b3a84 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -390,11 +390,9 @@ for (var i = 0x0000; i <= 0xFFFF; i++) {
else if (string == '\n') expected = '\\n';
else if (string == '\f') expected = '\\f';
else if (string == '\r') expected = '\\r';
- } else if (i < 0x20) {
+ } else if (i < 0x20 || (i >= 0xD800 && i <= 0xDFFF)) {
// Step 2.c
expected = '\\u' + i.toString(16).padStart(4, '0');
- // TODO(mathias): Add i >= 0xD800 && i <= 0xDFFF case once
- // --harmony-json-stringify is enabled by default.
} else {
expected = string;
}
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 1dd9a05879..d5c796228c 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -171,15 +171,15 @@ test(function() {
for (constructor of typedArrayConstructors) {
test(() => {
const ta = new constructor([1]);
- %ArrayBufferNeuter(ta.buffer);
+ %ArrayBufferDetach(ta.buffer);
ta.find(() => {});
- }, "Cannot perform %TypedArray%.prototype.find on a detached ArrayBuffer", TypeError);
+ }, "Cannot perform %TypedArray%.prototype.find on a neutered ArrayBuffer", TypeError);
test(() => {
const ta = new constructor([1]);
- %ArrayBufferNeuter(ta.buffer);
+ %ArrayBufferDetach(ta.buffer);
ta.findIndex(() => {});
- }, "Cannot perform %TypedArray%.prototype.findIndex on a detached ArrayBuffer", TypeError);
+ }, "Cannot perform %TypedArray%.prototype.findIndex on a neutered ArrayBuffer", TypeError);
}
// kFirstArgumentNotRegExp
@@ -570,6 +570,10 @@ test(function() {
"a".repeat(1 << 30);
}, "Invalid string length", RangeError);
+test(function() {
+ new Array(1 << 30).join();
+}, "Invalid string length", RangeError);
+
// kNormalizationForm
test(function() {
"".normalize("ABC");
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 59923a4247..41f2caee7a 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -166,8 +166,12 @@ var V8OptimizationStatus = {
kOptimizingConcurrently: 1 << 9,
kIsExecuting: 1 << 10,
kTopmostFrameIsTurboFanned: 1 << 11,
+ kLiteMode: 1 << 12,
};
+// Returns true if --lite-mode is on and we can't ever turn on optimization.
+var isNeverOptimizeLiteMode;
+
// Returns true if --no-opt mode is on.
var isNeverOptimize;
@@ -653,6 +657,12 @@ var prettyPrinted;
fun, sync_opt, name_opt, skip_if_maybe_deopted = true) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
+ // Tests that use assertOptimized() do not make sense for Lite mode where
+ // optimization is always disabled, explicitly exit the test with a warning.
+ if (opt_status & V8OptimizationStatus.kLiteMode) {
+ print("Warning: Test uses assertOptimized in Lite mode, skipping test.");
+ quit(0);
+ }
// Tests that use assertOptimized() do not make sense if --no-opt
// option is provided. Such tests must add --opt to flags comment.
assertFalse((opt_status & V8OptimizationStatus.kNeverOptimize) !== 0,
@@ -668,6 +678,11 @@ var prettyPrinted;
assertTrue((opt_status & V8OptimizationStatus.kOptimized) !== 0, name_opt);
}
+ isNeverOptimizeLiteMode = function isNeverOptimizeLiteMode() {
+ var opt_status = OptimizationStatus(undefined, "");
+ return (opt_status & V8OptimizationStatus.kLiteMode) !== 0;
+ }
+
isNeverOptimize = function isNeverOptimize() {
var opt_status = OptimizationStatus(undefined, "");
return (opt_status & V8OptimizationStatus.kNeverOptimize) !== 0;
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 16a17189e0..884c7cca92 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -32,6 +32,9 @@
'modules-skip*': [SKIP],
'harmony/modules-skip*': [SKIP],
'regress/modules-skip*': [SKIP],
+ 'wasm/exceptions-utils': [SKIP],
+ 'wasm/wasm-constants': [SKIP],
+ 'wasm/wasm-module-builder': [SKIP],
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
@@ -55,6 +58,9 @@
# Issue 5495: enable the test when the constant field tracking in enabled.
'const-field-tracking': [SKIP],
+ # Issue 8505: Math.pow is incorrect for asm.js
+ 'regress/wasm/regress-8505': [SKIP],
+
##############################################################################
# Too slow in debug mode with --stress-opt mode.
'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
@@ -84,7 +90,7 @@
# Issue 488: this test sometimes times out.
# TODO(arm): This seems to flush out a bug on arm with simulator.
- 'array-constructor': [PASS, SLOW, ['arch == arm and simulator == True', SKIP]],
+ 'array-constructor': [PASS, SLOW, ['arch == arm and simulator_run == True', SKIP]],
# Very slow test
'regress/regress-crbug-808192' : [PASS, SLOW, NO_VARIANTS, ['mode == debug or arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips or arch == s390 or arch == s390x or arch == ppc or arch == ppc64', SKIP]],
@@ -105,7 +111,8 @@
'migrations': [SKIP],
'array-functions-prototype-misc': [PASS, SLOW, ['mode == debug', SKIP]],
'compiler/regress-808472': [PASS, ['mode == debug', SKIP]],
- 'es6/promise-all-overflow-*': [PASS, SLOW, ['mode == debug or arch != x64', SKIP]],
+ 'es6/promise-all-overflow-1': [SKIP],
+ 'es6/promise-all-overflow-2': [PASS, SLOW, ['mode == debug or arch != x64', SKIP]],
##############################################################################
# This test sets the umask on a per-process basis and hence cannot be
@@ -133,7 +140,7 @@
##############################################################################
# Tests verifying CHECK and ASSERT.
'verify-check-false': [FAIL, NO_VARIANTS],
- 'verify-assert-false': [NO_VARIANTS, ['mode == release and dcheck_always_on == False', PASS], ['mode == debug or dcheck_always_on == True', FAIL]],
+ 'verify-assert-false': [NO_VARIANTS, ['mode == release and dcheck_always_on == False', PASS], ['mode == debug', FAIL]],
##############################################################################
# Tests with different versions for release and debug.
@@ -142,7 +149,7 @@
'regress/regress-634-debug': [PASS, ['mode == release', SKIP]],
# BUG(v8:2989).
- 'regress/regress-2989': [FAIL, NO_VARIANTS],
+ 'regress/regress-2989': [FAIL, NO_VARIANTS, ['lite_mode == True', SKIP]],
# This test variant makes only sense on arm.
'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
@@ -173,6 +180,8 @@
'wasm/embenchen/*': [PASS, SLOW],
'wasm/grow-memory': [PASS, SLOW],
'wasm/unreachable-validation': [PASS, SLOW],
+ 'wasm/atomics-stress': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['(arch == arm or arch == arm64) and simulator_run', SKIP], ['tsan', SKIP]],
+ 'wasm/atomics64-stress': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['(arch == arm or arch == arm64) and simulator_run', SKIP], ['tsan', SKIP]],
'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
@@ -259,6 +268,7 @@
'regress/regress-trap-allocation-memento': [SKIP],
'regress/regress-2249': [SKIP],
'regress/regress-4121': [SKIP],
+ 'regress/regress-6989': [SKIP],
'compare-known-objects-slow': [SKIP],
'compiler/array-multiple-receiver-maps': [SKIP],
# Tests taking too long
@@ -313,6 +323,66 @@
}], # 'gc_stress == True'
##############################################################################
+['lite_mode', {
+ # Skip tests not suitable for lite_mode.
+
+ # TODO(8596): We cache the templates in the feedback vector. In lite mode
+ # without feedback vectors we need to implement some other mechanism to cache
+ # them. Enable this test after fixing it.
+ 'es6/templates': [SKIP],
+
+ # code coverage needs feedback vectors
+ 'code-coverage-ad-hoc': [SKIP],
+ 'code-coverage-class-fields': [SKIP],
+ 'code-coverage-block-noopt': [SKIP],
+ 'code-coverage-block': [SKIP],
+ 'code-coverage-precise': [SKIP],
+
+ # Needs feedback vector - tests for allocation sites
+ 'array-constructor-feedback': [SKIP],
+ 'regress/regress-trap-allocation-memento': [SKIP],
+ 'regress/regress-4121': [SKIP],
+
+ # Slow tests without feedback vectors
+ # TODO(mythria): Investigate why they are slow and either fix if
+ # possible are update the reason why they are slow.
+ 'spread-large-string': [SKIP],
+ 'spread-large-array': [SKIP],
+
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'regress/wasm/*': [SKIP],
+ 'tools/compiler-trace-flags': [SKIP],
+ 'wasm/*': [SKIP],
+
+ # Other tests that use asm / wasm / optimized code.
+ 'asm/asm-heap': [SKIP],
+ 'asm/asm-validation': [SKIP],
+ 'asm/call-stdlib': [SKIP],
+ 'asm/call-annotation': [SKIP],
+ 'asm/global-imports': [SKIP],
+ 'asm/regress-913822': [SKIP],
+ 'asm/return-types': [SKIP],
+ 'regress/regress-599719': [SKIP],
+ 'regress/regress-6196': [SKIP],
+ 'regress/regress-6700': [SKIP],
+ 'regress/regress-6838-2': [SKIP],
+ 'regress/regress-6838-3': [SKIP],
+
+ # Timeouts in lite / jitless mode.
+ 'asm/embenchen/*': [SKIP],
+
+ # Tests that generate code at runtime.
+ 'code-comments': [SKIP],
+ 'regress/regress-617526': [SKIP],
+ 'regress/regress-7893': [SKIP],
+ 'regress/regress-8377': [SKIP],
+ 'regress/regress-863810': [SKIP],
+ 'regress/regress-crbug-721835': [SKIP],
+ 'regress/regress-crbug-759327': [SKIP],
+ 'regress/regress-crbug-898974': [SKIP],
+}], # 'lite_mode'
+
+##############################################################################
['byteorder == big', {
# Emscripten requires little-endian, skip all tests on big endian platforms.
'asm/embenchen/*': [SKIP],
@@ -363,7 +433,6 @@
'array-splice': [PASS, SLOW],
'bit-not': [PASS, SLOW],
'compiler/alloc-number': [PASS, SLOW],
- 'compiler/osr-assert': [PASS, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
'generated-transition-stub': [PASS, SLOW],
'json2': [PASS, SLOW],
@@ -562,6 +631,10 @@
'regress/regress-752764': [SKIP],
'regress/regress-779407': [SKIP],
'harmony/bigint/regressions': [SKIP],
+
+ # Pre-r6 MIPS32 doesn't have instructions needed to properly handle 64-bit
+ # atomic instructions.
+ 'wasm/atomics64-stress': [PASS, ['mips_arch_variant != r6', SKIP]],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -616,6 +689,11 @@
'regress/regress-779407': [SKIP],
}], # 'arch == mips64el or arch == mips64'
+['(arch == mips64el or arch == mips64) and simulator_run', {
+ # Slow tests which have flaky timeout on simulator.
+ 'wasm/atomics64-stress': [SKIP],
+}], # '(arch == mips64el or arch == mips64) and simulator_run'
+
##############################################################################
['system == windows', {
# TODO(mstarzinger): Too slow with turbo fan.
@@ -701,20 +779,30 @@
'code-coverage-ad-hoc': [SKIP],
'code-coverage-precise': [SKIP],
+ # Passes incompatible arguments.
+ 'd8/d8-arguments': [SKIP],
+
# Fails allocation on tsan.
'es6/classes': [PASS, ['tsan', SKIP]],
# Tests that fail some assertions due to checking internal state sensitive
# to GC. We mark PASS,FAIL to not skip those tests on the endurance fuzzer.
'array-literal-feedback': [PASS, FAIL],
+ 'compiler/dataview-neutered': [PASS, FAIL],
'compiler/native-context-specialization-hole-check': [PASS, FAIL],
'elements-transition-hoisting': [PASS, FAIL],
'es6/collections-constructor-custom-iterator': [PASS, FAIL],
+ 'harmony/weakrefs/clear-clears-factory-pointer': [PASS, FAIL],
+ 'ignition/throw-if-not-hole': [PASS, FAIL],
'keyed-load-with-symbol-key': [PASS, FAIL],
'object-seal': [PASS, FAIL],
'regress/regress-3709': [PASS, FAIL],
+ 'regress/regress-385565': [PASS, FAIL],
'regress/regress-6948': [PASS, FAIL],
+ 'regress/regress-7014-1': [PASS, FAIL],
+ 'regress/regress-7014-2': [PASS, FAIL],
'regress/regress-7510': [PASS, FAIL],
+ 'regress/regress-crbug-882233-2': [PASS, FAIL],
'regress/regress-trap-allocation-memento': [PASS, FAIL],
'regress/regress-unlink-closures-on-deopt': [PASS, FAIL],
'shared-function-tier-up-turbo': [PASS, FAIL],
@@ -750,6 +838,10 @@
'd8/enable-tracing': [SKIP],
# Relies on async compilation which requires background tasks.
'wasm/streaming-error-position': [SKIP],
+ # Intentionally non-deterministic using shared arraybuffers.
+ 'wasm/atomics-stress': [SKIP],
+ 'wasm/atomics64-stress': [SKIP],
+ 'wasm/futex': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -762,6 +854,8 @@
'regress/regress-91008': [PASS, SLOW],
'harmony/regexp-property-lu-ui': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
+ 'wasm/atomics-stress': [SKIP],
+ 'wasm/atomics64-stress': [SKIP],
}], # 'simulator_run and (arch == ppc or arch == ppc64 or arch == s390 or arch == s390x)'
##############################################################################
@@ -783,9 +877,10 @@
##############################################################################
['variant == stress', {
- 'es6/array-iterator-turbo': [SKIP],
-
+ # Slow tests.
'array-natives-elements': [SKIP],
+ 'big-object-literal': [SKIP],
+ 'es6/array-iterator-turbo': [SKIP],
'ignition/regress-599001-verifyheap': [SKIP],
'unicode-test': [SKIP],
@@ -796,20 +891,21 @@
'es6/classes': [PASS, ['tsan', SKIP]],
'regress/regress-1122': [PASS, ['tsan', SKIP]],
+ # Too slow with gc_stress on arm64.
+ 'messages': [PASS, ['gc_stress and arch == arm64', SKIP]],
+
# Slow on arm64 simulator: https://crbug.com/v8/7783
'string-replace-gc': [PASS, ['arch == arm64 and simulator_run', SKIP]],
# Too memory hungry on Odroid devices.
'regress/regress-678917': [PASS, ['arch == arm and not simulator_run', SKIP]],
-
- # https://crbug.com/v8/8164
- 'wasm/compare-exchange-stress': [SKIP],
}], # variant == stress
##############################################################################
['variant == stress and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'generated-transition-stub': [SKIP],
+ 'regress/regress-336820': [SKIP],
'wasm/grow-memory': [SKIP],
}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
@@ -818,6 +914,7 @@
# Slow tests: https://crbug.com/v8/7783
'md5': [SKIP],
'packed-elements': [SKIP],
+ 'regress/regress-crbug-319860': [SKIP],
'wasm/asm-wasm-f32': [SKIP],
'wasm/asm-wasm-f64': [SKIP],
'wasm/grow-memory': [SKIP],
@@ -871,12 +968,13 @@
}], # arch != x64 or deopt_fuzzer
##############################################################################
-# Liftoff is currently only sufficiently implemented on x64, ia32 and arm64.
+# Liftoff is currently only sufficiently implemented on x64, ia32, arm64 and
+# arm.
# TODO(clemensh): Implement on all other platforms (crbug.com/v8/6600).
-['arch != x64 and arch != ia32 and arch != arm64', {
+['arch != x64 and arch != ia32 and arch != arm64 and arch != arm', {
'wasm/liftoff': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
-}], # arch != x64 and arch != ia32 and arch != arm64
+}], # arch != x64 and arch != ia32 and arch != arm64 and arch != arm
##############################################################################
['variant == slow_path and gc_stress', {
@@ -892,4 +990,17 @@
'wasm/asm-wasm-f64': [SKIP],
}], # arch == x64
+##############################################################################
+['arch in [arm, android_arm, android_ia32, ia32, ppc, s390, s390x, mipsel, mips]', {
+ # TODO(ssauleau): implement BigInt<>Wasm conversion for other arch -
+ # crbug.com/v8/7741
+ 'wasm/bigint': [SKIP],
+}], # arch in [arm, android_arm, android_ia32, ia32, ppc, s390, s390x, mipsel, mips]
+
+##############################################################################
+['arch not in [x64, arm, arm64] or system != linux', {
+ # Unwinding info writer is only supported on x64, arm, and arm64 Linux
+ 'regress/regress-913844': [SKIP],
+}],
+
]
diff --git a/deps/v8/test/mjsunit/modules-export-star-as1.js b/deps/v8/test/mjsunit/modules-export-star-as1.js
new file mode 100644
index 0000000000..1696c1c84d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-export-star-as1.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --harmony-namespace-exports
+
+import {foo} from "./modules-skip-8.js";
+assertEquals(42, foo.default);
+assertEquals(1, foo.get_a());
diff --git a/deps/v8/test/mjsunit/modules-export-star-as2.js b/deps/v8/test/mjsunit/modules-export-star-as2.js
new file mode 100644
index 0000000000..57828ebd67
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-export-star-as2.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --harmony-namespace-exports
+
+export * as self from "./modules-export-star-as2.js";
+export * as self_again from "./modules-export-star-as2.js";
+import {self as myself} from "./modules-export-star-as2.js";
+import {self_again as myself_again} from "./modules-export-star-as2.js";
+
+assertEquals(["self", "self_again"], Object.keys(myself));
+assertEquals(myself, myself.self);
+assertEquals(myself_again, myself.self_again);
+assertEquals(myself, myself_again);
+
+assertThrows(_ => self, ReferenceError);
+assertThrows(_ => self_again, ReferenceError);
diff --git a/deps/v8/test/mjsunit/modules-export-star-as3.js b/deps/v8/test/mjsunit/modules-export-star-as3.js
new file mode 100644
index 0000000000..4077cbd9c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-export-star-as3.js
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --harmony-namespace-exports
+
+let self = 42;
+export * as self from "./modules-export-star-as3.js";
+import {self as myself} from "./modules-export-star-as3.js";
+assertEquals(["self"], Object.keys(myself));
+assertEquals(myself, myself.self);
+assertEquals(42, self);
+self++;
+assertEquals(43, self);
diff --git a/deps/v8/test/mjsunit/modules-imports8.js b/deps/v8/test/mjsunit/modules-imports8.js
new file mode 100644
index 0000000000..56ea60f4c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports8.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --harmony-namespace-exports
+
+import {a, b} from "./modules-skip-9.js";
+assertSame(a, b);
+assertEquals(42, a.default);
+assertEquals(1, a.a);
diff --git a/deps/v8/test/mjsunit/modules-namespace1.js b/deps/v8/test/mjsunit/modules-namespace1.js
index 9c2ce93504..82b1e528ad 100644
--- a/deps/v8/test/mjsunit/modules-namespace1.js
+++ b/deps/v8/test/mjsunit/modules-namespace1.js
@@ -53,9 +53,9 @@ assertEquals(
{value: "Module", configurable: false, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(foo, Symbol.toStringTag));
-// Nonexistant properties.
-let nonexistant = ["gaga", 123, Symbol('')];
-for (let key of nonexistant) {
+// Nonexistent properties.
+let nonexistent = ["gaga", 123, Symbol('')];
+for (let key of nonexistent) {
assertSame(undefined, Reflect.getOwnPropertyDescriptor(foo, key));
assertTrue(Reflect.deleteProperty(foo, key));
assertFalse(Reflect.set(foo, key, true));
diff --git a/deps/v8/test/mjsunit/modules-skip-8.js b/deps/v8/test/mjsunit/modules-skip-8.js
new file mode 100644
index 0000000000..376788e283
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-8.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * as foo from "./modules-skip-1.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-9.js b/deps/v8/test/mjsunit/modules-skip-9.js
new file mode 100644
index 0000000000..c0afcdf99e
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-9.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as b from "./modules-skip-1.js";
+export {b};
+export * as a from "./modules-skip-1.js";
diff --git a/deps/v8/test/mjsunit/neuter-twice.js b/deps/v8/test/mjsunit/neuter-twice.js
index 3501cee433..1bf0fa9405 100644
--- a/deps/v8/test/mjsunit/neuter-twice.js
+++ b/deps/v8/test/mjsunit/neuter-twice.js
@@ -5,5 +5,5 @@
// Flags: --allow-natives-syntax
var ab = new ArrayBuffer(100);
-%ArrayBufferNeuter(ab);
-%ArrayBufferNeuter(ab);
+%ArrayBufferDetach(ab);
+%ArrayBufferDetach(ab);
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index f685b41927..265e50abc3 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -389,3 +389,7 @@ Object.seal(Sealed);
assertDoesNotThrow(function() { return new Sealed(); });
Sealed.prototype.prototypeExists = true;
assertTrue((new Sealed()).prototypeExists);
+
+obj = new Int32Array(10)
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
diff --git a/deps/v8/test/mjsunit/opt-elements-kind.js b/deps/v8/test/mjsunit/opt-elements-kind.js
index 8634366a7c..19e3981d44 100644
--- a/deps/v8/test/mjsunit/opt-elements-kind.js
+++ b/deps/v8/test/mjsunit/opt-elements-kind.js
@@ -134,11 +134,15 @@ function test1() {
convert_mixed(construct_smis(), "three", elements_kind.fast);
convert_mixed(construct_doubles(), "three", elements_kind.fast);
- smis = construct_smis();
- doubles = construct_doubles();
- convert_mixed(smis, 1, elements_kind.fast);
- convert_mixed(doubles, 1, elements_kind.fast);
- assertTrue(%HaveSameMap(smis, doubles));
+ if (%ICsAreEnabled()) {
+ // Test that allocation sites allocate correct elements kind initially based
+ // on previous transitions.
+ smis = construct_smis();
+ doubles = construct_doubles();
+ convert_mixed(smis, 1, elements_kind.fast);
+ convert_mixed(doubles, 1, elements_kind.fast);
+ assertTrue(%HaveSameMap(smis, doubles));
+ }
}
function clear_ic_state() {
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index d68da9b61f..2440f5c8ad 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -132,11 +132,15 @@ convert_mixed(doubles, "three", elements_kind.fast);
convert_mixed(construct_smis(), "three", elements_kind.fast);
convert_mixed(construct_doubles(), "three", elements_kind.fast);
-smis = construct_smis();
-doubles = construct_doubles();
-convert_mixed(smis, 1, elements_kind.fast);
-convert_mixed(doubles, 1, elements_kind.fast);
-assertTrue(%HaveSameMap(smis, doubles));
+if (%ICsAreEnabled()) {
+ // Test that allocation sites allocate correct elements kind initially based
+ // on previous transitions.
+ smis = construct_smis();
+ doubles = construct_doubles();
+ convert_mixed(smis, 1, elements_kind.fast);
+ convert_mixed(doubles, 1, elements_kind.fast);
+ assertTrue(%HaveSameMap(smis, doubles));
+}
// Throw away type information in the ICs for next stress run.
gc();
diff --git a/deps/v8/test/mjsunit/parallel-compile-tasks.js b/deps/v8/test/mjsunit/parallel-compile-tasks.js
new file mode 100644
index 0000000000..fbde569556
--- /dev/null
+++ b/deps/v8/test/mjsunit/parallel-compile-tasks.js
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --compiler-dispatcher --parallel-compile-tasks --use-external-strings
+
+(function(a) {
+ assertEquals(a, "IIFE");
+})("IIFE");
+
+(function(a, ...rest) {
+ assertEquals(a, 1);
+ assertEquals(rest.length, 2);
+ assertEquals(rest[0], 2);
+ assertEquals(rest[1], 3);
+})(1,2,3);
+
+var outer_var = 42;
+
+function lazy_outer() {
+ return 42;
+}
+
+var eager_outer = (function() { return 42; });
+
+(function() {
+ assertEquals(outer_var, 42);
+ assertEquals(lazy_outer(), 42);
+ assertEquals(eager_outer(), 42);
+})();
+
+var gen = (function*() {
+ yield 1;
+ yield 2;
+})();
+
+assertEquals(gen.next().value, 1);
+assertEquals(gen.next().value, 2);
+
+var result = (function recursive(a=0) {
+ if (a == 1) {
+ return 42;
+ }
+ return recursive(1);
+})();
+
+assertEquals(result, 42);
+
+var a = 42;
+var b;
+var c = (a, b = (function z(){ return a+1; })());
+assertEquals(b, 43);
+assertEquals(c, 43);
+var c = (a, b = (function z(){ return a+1; })()) => { return b; };
+assertEquals(c(314), 315);
+
+// http://crbug.com/898076
+(function() {
+ class foo {};
+}); // Don't call IIFE so that it is compiled during idle time
+
+// http://crbug.com/900535
+(function() {
+ "use asm";
+ function bar(i, j) {
+ i = i|0;
+ j = j|0;
+ }
+ return {bar: bar};
+}); // Don't call IIFE so that it is compiled during idle time
diff --git a/deps/v8/test/mjsunit/regexp-override-exec.js b/deps/v8/test/mjsunit/regexp-override-exec.js
new file mode 100644
index 0000000000..66dbf1349f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-exec.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = "baa";
+
+assertEquals(1, s.search(/a/));
+assertEquals(["aa"], s.match(/a./));
+assertEquals(["b", "", ""], s.split(/a/));
+
+let o = { index : 3, 0 : "x" };
+
+RegExp.prototype.exec = () => { return o; }
+assertEquals(3, s.search(/a/));
+assertEquals(o, s.match(/a./));
+assertEquals("baar", s.replace(/a./, "r"));
+
+RegExp.prototype.exec = () => { return null; }
+assertEquals(["baa"], s.split(/a/));
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js b/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js
new file mode 100644
index 0000000000..b5b99f232d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-matchall
+
+var s = "baa";
+
+assertEquals([["b"]], [...s.matchAll(/./)]);
+
+RegExp.prototype[Symbol.matchAll] = () => 42;
+assertEquals(42, s.matchAll(/a./));
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-match.js b/deps/v8/test/mjsunit/regexp-override-symbol-match.js
new file mode 100644
index 0000000000..da9b6f5aff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-match.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = "baa";
+
+assertEquals(["aa"], s.match(/a./));
+
+RegExp.prototype[Symbol.match] = () => 42;
+assertEquals(42, s.match(/a./));
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-replace.js b/deps/v8/test/mjsunit/regexp-override-symbol-replace.js
new file mode 100644
index 0000000000..8f3e5c1620
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-replace.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = "baa";
+
+assertEquals("bca", s.replace(/a/, "c"));
+
+RegExp.prototype[Symbol.replace] = () => 42;
+assertEquals(42, s.replace(/a./));
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-search.js b/deps/v8/test/mjsunit/regexp-override-symbol-search.js
new file mode 100644
index 0000000000..2daf25a65a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-search.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = "baa";
+
+assertEquals(1, s.search(/a/));
+
+RegExp.prototype[Symbol.search] = () => 42;
+assertEquals(42, s.search(/a/));
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-split.js b/deps/v8/test/mjsunit/regexp-override-symbol-split.js
new file mode 100644
index 0000000000..f5d35b1862
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-split.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = "baa";
+
+assertEquals(["b", "", ""], s.split(/a/));
+
+RegExp.prototype[Symbol.split] = () => 42;
+assertEquals(42, s.split(/a./));
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index dd4832b567..aabac1ed9f 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -808,3 +808,19 @@ assertFalse(/^[\d-X-Z]*$/.test("234-XYZ-432"));
assertFalse(/\uDB88|\uDBEC|aa/.test(""));
assertFalse(/\uDB88|\uDBEC|aa/u.test(""));
+
+// EscapeRegExpPattern
+assertEquals("\\n", /\n/.source);
+assertEquals("\\n", new RegExp("\n").source);
+assertEquals("\\n", new RegExp("\\n").source);
+assertEquals("\\\\n", /\\n/.source);
+assertEquals("\\r", /\r/.source);
+assertEquals("\\r", new RegExp("\r").source);
+assertEquals("\\r", new RegExp("\\r").source);
+assertEquals("\\\\r", /\\r/.source);
+assertEquals("\\u2028", /\u2028/.source);
+assertEquals("\\u2028", new RegExp("\u2028").source);
+assertEquals("\\u2028", new RegExp("\\u2028").source);
+assertEquals("\\u2029", /\u2029/.source);
+assertEquals("\\u2029", new RegExp("\u2029").source);
+assertEquals("\\u2029", new RegExp("\\u2029").source);
diff --git a/deps/v8/test/mjsunit/regress-906893.js b/deps/v8/test/mjsunit/regress-906893.js
new file mode 100644
index 0000000000..4b4942d665
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-906893.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const r = /x/;
+let counter = 0;
+
+r.exec = () => { counter++; return null; }
+
+function f() {
+ r.test("ABcd");
+}
+
+f();
+assertEquals(1, counter);
+%OptimizeFunctionOnNextCall(f);
+
+f();
+assertEquals(2, counter);
diff --git a/deps/v8/test/mjsunit/regress-918763.js b/deps/v8/test/mjsunit/regress-918763.js
new file mode 100644
index 0000000000..45916f015a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-918763.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function C() {}
+C.__proto__ = null;
+
+function f(c) { return 0 instanceof c; }
+
+f(C);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(() => f(0));
diff --git a/deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js b/deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js
new file mode 100644
index 0000000000..033bcee1ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/a/.constructor = "";
+
+assertEquals("b", "a".replace(/a/, () => "b"));
diff --git a/deps/v8/test/mjsunit/regress-v8-8445-2.js b/deps/v8/test/mjsunit/regress-v8-8445-2.js
new file mode 100644
index 0000000000..828b877d0c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-v8-8445-2.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-string-matchall
+
+class MyRegExp {
+ exec() { return null; }
+}
+
+var r = /c/;
+
+assertEquals(["ab", ""], "abc".split(r));
+assertEquals([["c"]], [..."c".matchAll(r)]);
+
+r.constructor = { [Symbol.species] : MyRegExp };
+
+assertEquals(["abc"], "abc".split(r));
+assertEquals([], [..."c".matchAll(r)]);
+
+assertEquals(["ab", ""], "abc".split(/c/));
+assertEquals([["c"]], [..."c".matchAll(/c/)]);
+
+RegExp.prototype.constructor = { [Symbol.species] : MyRegExp };
+
+assertEquals(["abc"], "abc".split(/c/));
+assertEquals([], [..."c".matchAll(/c/)]);
diff --git a/deps/v8/test/mjsunit/regress-v8-8445.js b/deps/v8/test/mjsunit/regress-v8-8445.js
new file mode 100644
index 0000000000..7641416ba0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-v8-8445.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-string-matchall
+
+class MyRegExp {
+ exec() { return null; }
+}
+
+assertEquals(["ab", ""], "abc".split(/c/));
+assertEquals([["a"]], [..."a".matchAll(/a/)]);
+
+Object.defineProperty(RegExp, Symbol.species, { get() { return MyRegExp; }});
+
+assertEquals(["abc"], "abc".split(/c/));
+assertEquals([], [..."a".matchAll(/a/)]);
diff --git a/deps/v8/test/mjsunit/regress/regress-1199637.js b/deps/v8/test/mjsunit/regress/regress-1199637.js
index ae7c5e03f0..763484d850 100644
--- a/deps/v8/test/mjsunit/regress/regress-1199637.js
+++ b/deps/v8/test/mjsunit/regress/regress-1199637.js
@@ -32,44 +32,53 @@
var NONE = 0;
var READ_ONLY = 1;
+function AddNamedProperty(object, name, value, attrs) {
+ Object.defineProperty(object, name, {
+ value,
+ configurable: true,
+ enumerable: true,
+ writable: (attrs & READ_ONLY) === 0
+ });
+}
+
// Use DeclareGlobal...
-%AddNamedProperty(this.__proto__, "a", 1234, NONE);
+AddNamedProperty(this.__proto__, "a", 1234, NONE);
assertEquals(1234, a);
eval("var a = 5678;");
assertEquals(5678, a);
-%AddNamedProperty(this.__proto__, "b", 1234, NONE);
+AddNamedProperty(this.__proto__, "b", 1234, NONE);
assertEquals(1234, b);
eval("var b = 5678;");
assertEquals(5678, b);
-%AddNamedProperty(this.__proto__, "c", 1234, READ_ONLY);
+AddNamedProperty(this.__proto__, "c", 1234, READ_ONLY);
assertEquals(1234, c);
eval("var c = 5678;");
assertEquals(5678, c);
-%AddNamedProperty(this.__proto__, "d", 1234, READ_ONLY);
+AddNamedProperty(this.__proto__, "d", 1234, READ_ONLY);
assertEquals(1234, d);
eval("var d = 5678;");
assertEquals(5678, d);
// Use DeclareContextSlot...
-%AddNamedProperty(this.__proto__, "x", 1234, NONE);
+AddNamedProperty(this.__proto__, "x", 1234, NONE);
assertEquals(1234, x);
eval("with({}) { var x = 5678; }");
assertEquals(5678, x);
-%AddNamedProperty(this.__proto__, "y", 1234, NONE);
+AddNamedProperty(this.__proto__, "y", 1234, NONE);
assertEquals(1234, y);
eval("with({}) { var y = 5678; }");
assertEquals(5678, y);
-%AddNamedProperty(this.__proto__, "z", 1234, READ_ONLY);
+AddNamedProperty(this.__proto__, "z", 1234, READ_ONLY);
assertEquals(1234, z);
eval("with({}) { var z = 5678; }");
assertEquals(5678, z);
-%AddNamedProperty(this.__proto__, "w", 1234, READ_ONLY);
+AddNamedProperty(this.__proto__, "w", 1234, READ_ONLY);
assertEquals(1234, w);
eval("with({}) { var w = 5678; }");
assertEquals(5678, w);
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 8e539fffa4..9feb911c81 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -28,7 +28,11 @@
// Flags: --use-osr --allow-natives-syntax --ignition-osr --opt
// Flags: --no-always-opt
-// Can't OSR with always-opt.
+// Can't OSR with always-opt or in Lite mode.
+if (isNeverOptimizeLiteMode()) {
+ print("Warning: skipping test that requires optimization in Lite mode.");
+ quit(0);
+}
assertFalse(isAlwaysOptimize());
function f() {
diff --git a/deps/v8/test/mjsunit/regress/regress-2989.js b/deps/v8/test/mjsunit/regress/regress-2989.js
index 49c4a1cb03..d11e352105 100644
--- a/deps/v8/test/mjsunit/regress/regress-2989.js
+++ b/deps/v8/test/mjsunit/regress/regress-2989.js
@@ -21,7 +21,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt
+
+if (isNeverOptimizeLiteMode()) {
+ print("Warning: skipping test that requires optimization in Lite mode.");
+ quit(0);
+}
(function ArgumentsObjectChange() {
function f(x) {
diff --git a/deps/v8/test/mjsunit/regress/regress-334.js b/deps/v8/test/mjsunit/regress/regress-334.js
index c52c72aa90..9a20c6ae76 100644
--- a/deps/v8/test/mjsunit/regress/regress-334.js
+++ b/deps/v8/test/mjsunit/regress/regress-334.js
@@ -33,14 +33,23 @@ var READ_ONLY = 1;
var DONT_ENUM = 2;
var DONT_DELETE = 4;
+function AddNamedProperty(object, name, value, attrs) {
+ Object.defineProperty(object, name, {
+ value,
+ configurable: (attrs & DONT_DELETE) === 0,
+ enumerable: (attrs & DONT_ENUM) === 0,
+ writable: (attrs & READ_ONLY) === 0
+ });
+}
+
function func1(){}
function func2(){}
var object = {__proto__:{}};
-%AddNamedProperty(object, "foo", func1, DONT_ENUM | DONT_DELETE);
-%AddNamedProperty(object, "bar", func1, DONT_ENUM | READ_ONLY);
-%AddNamedProperty(object, "baz", func1, DONT_DELETE | READ_ONLY);
-%AddNamedProperty(object.__proto__, "bif", func1, DONT_ENUM | DONT_DELETE);
+AddNamedProperty(object, "foo", func1, DONT_ENUM | DONT_DELETE);
+AddNamedProperty(object, "bar", func1, DONT_ENUM | READ_ONLY);
+AddNamedProperty(object, "baz", func1, DONT_DELETE | READ_ONLY);
+AddNamedProperty(object.__proto__, "bif", func1, DONT_ENUM | DONT_DELETE);
object.bif = func2;
function enumerable(obj) {
diff --git a/deps/v8/test/mjsunit/regress/regress-336820.js b/deps/v8/test/mjsunit/regress/regress-336820.js
index 56d88747fb..660a8fc314 100644
--- a/deps/v8/test/mjsunit/regress/regress-336820.js
+++ b/deps/v8/test/mjsunit/regress/regress-336820.js
@@ -28,11 +28,10 @@
// Flags: --max-old-space-size=1600
assertThrows((function() {
- s = "Hello World!\n";
- while (true) {
- x = new Array();
- x[0] = s;
- x[1000] = s;
- x[1000000] = s;
- s = x.join("::");
- }}), RangeError);
+ let str = "a".repeat(1e7);
+ let arr = new Array(2000);
+ for (let i = 0; i < 200; ++i) {
+ arr[i*10] = str;
+ }
+ let res = arr.join(':');
+}), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-353004.js b/deps/v8/test/mjsunit/regress/regress-353004.js
index fe19354d8b..f5430c6df4 100644
--- a/deps/v8/test/mjsunit/regress/regress-353004.js
+++ b/deps/v8/test/mjsunit/regress/regress-353004.js
@@ -8,7 +8,7 @@ var buffer1 = new ArrayBuffer(100 * 1024);
assertThrows(function() {
var array1 = new Uint8Array(buffer1, {valueOf : function() {
- %ArrayBufferNeuter(buffer1);
+ %ArrayBufferDetach(buffer1);
return 0;
}});
}, TypeError);
@@ -17,7 +17,7 @@ var buffer2 = new ArrayBuffer(100 * 1024);
assertThrows(function() {
var array2 = new Uint8Array(buffer2, 0, {valueOf : function() {
- %ArrayBufferNeuter(buffer2);
+ %ArrayBufferDetach(buffer2);
return 100 * 1024;
}});
}, TypeError);
@@ -30,7 +30,7 @@ assertThrows(() =>
return 0;
}}, {valueOf : function() {
convertedLength = true;
- %ArrayBufferNeuter(buffer1);
+ %ArrayBufferDetach(buffer1);
return 0;
}}), TypeError);
assertTrue(convertedOffset);
@@ -38,7 +38,7 @@ assertTrue(convertedLength);
var buffer3 = new ArrayBuffer(100 * 1024 * 1024);
var dataView1 = new DataView(buffer3, {valueOf : function() {
- %ArrayBufferNeuter(buffer3);
+ %ArrayBufferDetach(buffer3);
return 0;
}});
@@ -47,7 +47,7 @@ assertEquals(0, dataView1.byteLength);
var buffer4 = new ArrayBuffer(100 * 1024);
assertThrows(function() {
var dataView2 = new DataView(buffer4, 0, {valueOf : function() {
- %ArrayBufferNeuter(buffer4);
+ %ArrayBufferDetach(buffer4);
return 100 * 1024 * 1024;
}});
}, RangeError);
@@ -56,7 +56,7 @@ assertThrows(function() {
var buffer5 = new ArrayBuffer(100 * 1024);
assertThrows(function() {
buffer5.slice({valueOf : function() {
- %ArrayBufferNeuter(buffer5);
+ %ArrayBufferDetach(buffer5);
return 0;
}}, 100 * 1024 * 1024);
}, TypeError);
@@ -65,7 +65,7 @@ assertThrows(function() {
var buffer7 = new ArrayBuffer(100 * 1024 * 1024);
assertThrows(function() {
buffer7.slice(0, {valueOf : function() {
- %ArrayBufferNeuter(buffer7);
+ %ArrayBufferDetach(buffer7);
return 100 * 1024 * 1024;
}});
}, TypeError);
@@ -74,7 +74,7 @@ var buffer9 = new ArrayBuffer(1024);
var array9 = new Uint8Array(buffer9);
assertThrows(() =>
array9.subarray({valueOf : function() {
- %ArrayBufferNeuter(buffer9);
+ %ArrayBufferDetach(buffer9);
return 0;
}}, 1024), TypeError);
assertEquals(0, array9.length);
@@ -83,7 +83,7 @@ var buffer11 = new ArrayBuffer(1024);
var array11 = new Uint8Array(buffer11);
assertThrows(() =>
array11.subarray(0, {valueOf : function() {
- %ArrayBufferNeuter(buffer11);
+ %ArrayBufferDetach(buffer11);
return 1024;
}}), TypeError);
assertEquals(0, array11.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-4964.js b/deps/v8/test/mjsunit/regress/regress-4964.js
index d834708667..ad259dca54 100644
--- a/deps/v8/test/mjsunit/regress/regress-4964.js
+++ b/deps/v8/test/mjsunit/regress/regress-4964.js
@@ -4,19 +4,19 @@
// Flags: --allow-natives-syntax
-// Neutered source
+// Detached source
var ab = new ArrayBuffer(10);
-ab.constructor = { get [Symbol.species]() { %ArrayBufferNeuter(ab); return ArrayBuffer; } };
+ab.constructor = { get [Symbol.species]() { %ArrayBufferDetach(ab); return ArrayBuffer; } };
assertThrows(() => ab.slice(0), TypeError);
-// Neutered target
-class NeuteredArrayBuffer extends ArrayBuffer {
+// Detached target
+class DetachedArrayBuffer extends ArrayBuffer {
constructor(...args) {
super(...args);
- %ArrayBufferNeuter(this);
+ %ArrayBufferDetach(this);
}
}
var ab2 = new ArrayBuffer(10);
-ab2.constructor = NeuteredArrayBuffer;
+ab2.constructor = DetachedArrayBuffer;
assertThrows(() => ab2.slice(0), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-5405.js b/deps/v8/test/mjsunit/regress/regress-5405.js
index eeab479384..e21818c880 100644
--- a/deps/v8/test/mjsunit/regress/regress-5405.js
+++ b/deps/v8/test/mjsunit/regress/regress-5405.js
@@ -11,7 +11,7 @@ let log = [];
return 10;
}
})();
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
(function() {
with ({get ['.new.target']() { log.push('new.target') }}) {
diff --git a/deps/v8/test/mjsunit/regress/regress-5691.js b/deps/v8/test/mjsunit/regress/regress-5691.js
index 6cda92ca79..b460ac4b99 100644
--- a/deps/v8/test/mjsunit/regress/regress-5691.js
+++ b/deps/v8/test/mjsunit/regress/regress-5691.js
@@ -18,6 +18,6 @@ Promise.resolve(Promise.resolve()).then(() => log += "|fast-resolve");
(class extends Promise {}).resolve(Promise.resolve()).then(() => log += "|slow-resolve");
log += "|start";
- %RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals("|start|turn1|fast-resolve|turn2|turn3|slow-resolve|turn4\n\
|start|turn1|fast-resolve|turn2|turn3|slow-resolve|turn4", result);
diff --git a/deps/v8/test/mjsunit/regress/regress-6711.js b/deps/v8/test/mjsunit/regress/regress-6711.js
new file mode 100644
index 0000000000..c1b61c72a1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6711.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ensure `delete this` throws before `super` is called.
+assertThrows(()=>{
+ new class extends Object {
+ constructor() {
+ delete this;
+ super();
+ }
+ }
+}, ReferenceError);
+
+// ensure `delete this` doesn't throw after `super` is called.
+new class extends Object {
+ constructor() {
+ super();
+ delete this;
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-682349.js b/deps/v8/test/mjsunit/regress/regress-682349.js
index d94e0691d6..f82f242f03 100644
--- a/deps/v8/test/mjsunit/regress/regress-682349.js
+++ b/deps/v8/test/mjsunit/regress/regress-682349.js
@@ -9,5 +9,5 @@ function f() {
success = (f.caller === null);
}
Promise.resolve().then(f);
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(success);
diff --git a/deps/v8/test/mjsunit/regress/regress-707410.js b/deps/v8/test/mjsunit/regress/regress-707410.js
index a6a0aa52bb..cc3f58cdb5 100644
--- a/deps/v8/test/mjsunit/regress/regress-707410.js
+++ b/deps/v8/test/mjsunit/regress/regress-707410.js
@@ -5,5 +5,5 @@
// Flags: --allow-natives-syntax
var a = new Uint8Array(1024*1024);
-%ArrayBufferNeuter(a.buffer);
+%ArrayBufferDetach(a.buffer);
assertThrows(() => new Uint8Array(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-740694.js b/deps/v8/test/mjsunit/regress/regress-740694.js
index f07eb1b3a7..6f31fef0c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-740694.js
+++ b/deps/v8/test/mjsunit/regress/regress-740694.js
@@ -17,6 +17,6 @@ var error;
var promise = __f_0();
promise.then(assertUnreachable,
err => { done = true; error = err });
-%RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertTrue(error.startsWith('Error reading'));
assertTrue(done);
diff --git a/deps/v8/test/mjsunit/regress/regress-748069.js b/deps/v8/test/mjsunit/regress/regress-748069.js
index e652cca672..86c2f8f5ec 100644
--- a/deps/v8/test/mjsunit/regress/regress-748069.js
+++ b/deps/v8/test/mjsunit/regress/regress-748069.js
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// This test gets very slow with slow asserts.
+// Flags: --noenable-slow-asserts
+
try {
var a = 'a'.repeat(1 << 28);
} catch (e) {
diff --git a/deps/v8/test/mjsunit/regress/regress-7773.js b/deps/v8/test/mjsunit/regress/regress-7773.js
new file mode 100644
index 0000000000..7930ae9106
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7773.js
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testFunctionNames() {
+ let descriptor = {
+ value: '',
+ writable: false,
+ enumerable: false,
+ configurable: true
+ };
+ // Functions have a "name" property by default.
+ assertEquals(
+ descriptor, Object.getOwnPropertyDescriptor(function(){}, 'name'));
+ let a = { fn: function(){} };
+ assertSame('fn', a.fn.name);
+ descriptor.value = 'fn';
+ assertEquals(descriptor, Object.getOwnPropertyDescriptor(a.fn, 'name'));
+
+ let b = { __proto__: function(){} };
+ assertSame('', b.__proto__.name);
+ descriptor.value = '';
+ assertEquals(
+ descriptor, Object.getOwnPropertyDescriptor(b.__proto__, 'name'));
+
+ let c = { fn: function F(){} };
+ assertSame('F', c.fn.name);
+ descriptor.value = 'F';
+ assertEquals(descriptor, Object.getOwnPropertyDescriptor(c.fn, 'name'));
+
+ let d = { __proto__: function E(){} };
+ assertSame('E', d.__proto__.name);
+ descriptor.value = 'E';
+ assertEquals(
+ descriptor, Object.getOwnPropertyDescriptor(d.__proto__, 'name'));
+})();
+
+(function testClassNames() {
+ let descriptor = {
+ value: '',
+ writable: false,
+ enumerable: false,
+ configurable: true
+ };
+
+ // Anonymous classes do not have a "name" property by default.
+ assertSame(undefined, Object.getOwnPropertyDescriptor(class {}, 'name'));
+ descriptor.value = 'C';
+ assertEquals(descriptor, Object.getOwnPropertyDescriptor(class C {}, 'name'));
+
+ let a = { fn: class {} };
+ assertSame('fn', a.fn.name);
+ descriptor.value = 'fn';
+ assertEquals(descriptor, Object.getOwnPropertyDescriptor(a.fn, 'name'));
+
+ let b = { __proto__: class {} };
+ assertSame('', b.__proto__.name);
+ assertSame(
+ undefined, Object.getOwnPropertyDescriptor(b.__proto__, 'name'));
+
+ let c = { fn: class F {} };
+ assertSame('F', c.fn.name);
+ descriptor.value = 'F';
+ assertEquals(descriptor, Object.getOwnPropertyDescriptor(c.fn, 'name'));
+
+ let d = { __proto__: class F {} };
+ assertSame('F', d.__proto__.name);
+ descriptor.value = 'F';
+ assertEquals(
+ descriptor, Object.getOwnPropertyDescriptor(d.__proto__, 'name'));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-778668.js b/deps/v8/test/mjsunit/regress/regress-778668.js
index cb6a359fd9..93bde12222 100644
--- a/deps/v8/test/mjsunit/regress/regress-778668.js
+++ b/deps/v8/test/mjsunit/regress/regress-778668.js
@@ -3,6 +3,22 @@
// found in the LICENSE file.
(function () {
+ function f() {
+ arguments.length = -5;
+ Array.prototype.slice.call(arguments);
+ }
+ f('a')
+})();
+
+(function () {
+ function f() {
+ arguments.length = 2.3;
+ Array.prototype.slice.call(arguments);
+ }
+ f('a')
+})();
+
+(function () {
function f( __v_59960) {
arguments.length = -5;
Array.prototype.slice.call(arguments);
@@ -13,7 +29,6 @@
(function () {
function f( __v_59960) {
arguments.length = 2.3;
- print(arguments.length);
Array.prototype.slice.call(arguments);
}
f('a')
diff --git a/deps/v8/test/mjsunit/regress/regress-797581.js b/deps/v8/test/mjsunit/regress/regress-797581.js
index eb87e67128..3dfad4c463 100644
--- a/deps/v8/test/mjsunit/regress/regress-797581.js
+++ b/deps/v8/test/mjsunit/regress/regress-797581.js
@@ -17,7 +17,7 @@ function TryToLoadModule(filename, expect_error, token) {
}
import(filename).catch(SetError);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
if (expect_error) {
assertTrue(caught_error instanceof SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-800651.js b/deps/v8/test/mjsunit/regress/regress-800651.js
index c6410f702e..05e31fe4ca 100644
--- a/deps/v8/test/mjsunit/regress/regress-800651.js
+++ b/deps/v8/test/mjsunit/regress/regress-800651.js
@@ -27,5 +27,5 @@ function g() {
let namespace = Promise.resolve().then(importUndefined);
}
g();
- %RunMicrotasks();
+%PerformMicrotaskCheckpoint();
assertEquals(list, [1,2]);
diff --git a/deps/v8/test/mjsunit/regress/regress-813440.js b/deps/v8/test/mjsunit/regress/regress-813440.js
index d2aef2528b..8fcb695f5a 100644
--- a/deps/v8/test/mjsunit/regress/regress-813440.js
+++ b/deps/v8/test/mjsunit/regress/regress-813440.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --invoke-weak-callbacks --omit-quit --wasm-async-compilation --expose-wasm --allow-natives-syntax
+// Flags: --invoke-weak-callbacks --omit-quit --expose-wasm --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/regress/regress-8241.js b/deps/v8/test/mjsunit/regress/regress-8241.js
new file mode 100644
index 0000000000..fb9d5475cb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8241.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(x) { }
+f(x=>x, [x,y] = [1,2]);
diff --git a/deps/v8/test/mjsunit/regress/regress-8377.js b/deps/v8/test/mjsunit/regress/regress-8377.js
new file mode 100644
index 0000000000..32d2eb74fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8377.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(global, env, buffer) {
+ "use asm";
+ function test1() {
+ var x = 0;
+ x = -1 / 1 | 0;
+ return x | 0;
+ }
+ function test2() {
+ var x = 0;
+ x = (-1 / 1) | 0;
+ return x | 0;
+ }
+ return { test1: test1, test2: test2 };
+};
+let module = Module(this);
+assertEquals(-1, module.test1());
+assertEquals(-1, module.test2());
+assertTrue(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-8384.js b/deps/v8/test/mjsunit/regress/regress-8384.js
new file mode 100644
index 0000000000..bbb0d575e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8384.js
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assert(cond) { if (!cond) throw "Assert"; }
+
+function Constructor() {
+ this.padding1 = null;
+ this.padding2 = null;
+ this.padding3 = null;
+ this.padding4 = null;
+ this.padding5 = null;
+ this.padding6 = null;
+ this.padding7 = null;
+ this.padding8 = null;
+ this.padding9 = null;
+ this.padding10 = null;
+ this.padding11 = null;
+ this.padding12 = null;
+ this.padding13 = null;
+ this.padding14 = null;
+ this.padding15 = null;
+ this.padding16 = null;
+ this.padding17 = null;
+ this.padding18 = null;
+ this.padding19 = null;
+ this.padding20 = null;
+ this.padding21 = null;
+ this.padding22 = null;
+ this.padding23 = null;
+ this.padding24 = null;
+ this.padding25 = null;
+ this.padding26 = null;
+ this.padding27 = null;
+ this.padding28 = null;
+ this.padding29 = null;
+ this.array = null;
+ this.accumulator = 0;
+}
+
+function f(k) {
+ var c = k.accumulator | 0;
+ k.accumulator = k.array[(k.accumulator + 1 | 0)] | 0;
+ k.array[c + 1 | 0] = (-1);
+ var head = k.accumulator;
+ assert((head + c) & 1);
+ while (head >= 0) {
+ head = k.array[head + 1 | 0];
+ }
+ return;
+}
+
+const tmp = new Constructor();
+tmp.array = new Int32Array(5);
+for (var i = 1; i < 5; i++)
+ tmp.array[i] = i | 0;
+tmp.accumulator = 0;
+
+f(tmp);
+f(tmp);
+%OptimizeFunctionOnNextCall(f);
+f(tmp); // This must not trigger the {assert}.
diff --git a/deps/v8/test/mjsunit/regress/regress-840106.js b/deps/v8/test/mjsunit/regress/regress-840106.js
index b49464dce5..568ab75479 100644
--- a/deps/v8/test/mjsunit/regress/regress-840106.js
+++ b/deps/v8/test/mjsunit/regress/regress-840106.js
@@ -8,7 +8,7 @@ var buffer = new ArrayBuffer(1024 * 1024);
buffer.constructor = {
[Symbol.species]: new Proxy(function() {}, {
get: _ => {
- %ArrayBufferNeuter(buffer);
+ %ArrayBufferDetach(buffer);
}
})
};
diff --git a/deps/v8/test/mjsunit/regress/regress-852765.js b/deps/v8/test/mjsunit/regress/regress-852765.js
index bdd3a9ddb2..393adf2079 100644
--- a/deps/v8/test/mjsunit/regress/regress-852765.js
+++ b/deps/v8/test/mjsunit/regress/regress-852765.js
@@ -3,14 +3,14 @@
// found in the LICENSE file.
// The actual regression test
-assertThrows("(import(foo)) =>", undefined, "Unexpected token import");
+assertThrows("(import(foo)) =>", undefined, "Invalid destructuring assignment target");
// Other related tests
-assertThrows("import(foo) =>", undefined, "Unexpected token import");
-assertThrows("(a, import(foo)) =>", undefined, "Unexpected token import");
-assertThrows("(1, import(foo)) =>", undefined, "Unexpected number");
+assertThrows("import(foo) =>", undefined, "Malformed arrow function parameter list");
+assertThrows("(a, import(foo)) =>", undefined, "Invalid destructuring assignment target");
+assertThrows("(1, import(foo)) =>", undefined, "Invalid destructuring assignment target");
assertThrows("(super(foo)) =>", undefined, "'super' keyword unexpected here");
-assertThrows("(bar(foo)) =>", undefined, "Unexpected token (");
+assertThrows("(bar(foo)) =>", undefined, "Invalid destructuring assignment target");
// No syntax errors
assertThrows("[import(foo).then] = [1];", undefined, "foo is not defined");
diff --git a/deps/v8/test/mjsunit/regress/regress-8607.js b/deps/v8/test/mjsunit/regress/regress-8607.js
new file mode 100644
index 0000000000..9f77bc5d79
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8607.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("[({ p: this }), [][0]] = x", SyntaxError);
+assertThrows("[...a, [][0]] = []", SyntaxError);
+assertThrows("[...o=1,[][0]] = []", SyntaxError);
+assertThrows("({x(){},y:[][0]} = {})", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-8630.js b/deps/v8/test/mjsunit/regress/regress-8630.js
new file mode 100644
index 0000000000..f51807b4a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8630.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Parameters can't have parentheses (both patterns and identifiers)
+assertThrows("( ({x: 1}) ) => {};", SyntaxError);
+assertThrows("( (x) ) => {}", SyntaxError);
+assertThrows("( ({x: 1}) = y ) => {}", SyntaxError);
+assertThrows("( (x) = y ) => {}", SyntaxError);
+
+// Declarations can't have parentheses (both patterns and identifiers)
+assertThrows("let [({x: 1})] = [];", SyntaxError);
+assertThrows("let [(x)] = [];", SyntaxError);
+assertThrows("let [({x: 1}) = y] = [];", SyntaxError);
+assertThrows("let [(x) = y] = [];", SyntaxError);
+assertThrows("var [({x: 1})] = [];", SyntaxError);
+assertThrows("var [(x)] = [];", SyntaxError);
+assertThrows("var [({x: 1}) = y] = [];", SyntaxError);
+assertThrows("var [(x) = y] = [];", SyntaxError);
+
+// Patterns in can't have parentheses in assignments either
+assertThrows("[({x: 1}) = y] = [];", SyntaxError);
+
+// Parentheses are fine around identifiers in assignments though, even inside a
+// pattern
+var x;
+[(x)] = [2];
+assertEquals(x, 2);
+[(x) = 3] = [];
+assertEquals(x, 3);
diff --git a/deps/v8/test/mjsunit/regress/regress-8659.js b/deps/v8/test/mjsunit/regress/regress-8659.js
new file mode 100644
index 0000000000..636c667c79
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8659.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("const [(x)] = []", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-8708.js b/deps/v8/test/mjsunit/regress/regress-8708.js
new file mode 100644
index 0000000000..4faff3324c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8708.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+let array = new Array(1);
+array.splice(1, 0, array);
+
+assertThrows(() => array.flat(Infinity), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-896326.js b/deps/v8/test/mjsunit/regress/regress-896326.js
new file mode 100644
index 0000000000..88c2e56e2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-896326.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+function f() {
+}
+
+var large_array = Array(150 * 1024);
+assertThrows('new f(... large_array)', RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-897512.js b/deps/v8/test/mjsunit/regress/regress-897512.js
index 0e676a06c2..649ee2b922 100644
--- a/deps/v8/test/mjsunit/regress/regress-897512.js
+++ b/deps/v8/test/mjsunit/regress/regress-897512.js
@@ -20,5 +20,4 @@ assertEquals(o51.length, 39);
// Sort triggers the bug.
o51.sort();
-// TODO(chromium:897512): The length should be 39.
-assertEquals(o51.length, 101);
+assertEquals(o51.length, 39);
diff --git a/deps/v8/test/mjsunit/regress/regress-897815.js b/deps/v8/test/mjsunit/regress/regress-897815.js
new file mode 100644
index 0000000000..40a8c5e1ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-897815.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function __f_19350() {
+ function __f_19351() {
+ function __f_19352() {
+ }
+ }
+ try {
+ __f_19350();
+ } catch (e) {}
+ %OptimizeFunctionOnNextCall(__f_19351)
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-898812.js b/deps/v8/test/mjsunit/regress/regress-898812.js
new file mode 100644
index 0000000000..889bd53d3a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-898812.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("(async)(a)=>{}", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-898936.js b/deps/v8/test/mjsunit/regress/regress-898936.js
new file mode 100644
index 0000000000..b32c4691c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-898936.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("async(...x=e)()=>");
diff --git a/deps/v8/test/mjsunit/regress/regress-899115.js b/deps/v8/test/mjsunit/regress/regress-899115.js
new file mode 100644
index 0000000000..5b4099792f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-899115.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ Object.getPrototypeOf([]).includes();
+}
+
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-899133.js b/deps/v8/test/mjsunit/regress/regress-899133.js
new file mode 100644
index 0000000000..4e11d49160
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-899133.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("let fun = ({a} = {a: 30}) => {", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-899474.js b/deps/v8/test/mjsunit/regress/regress-899474.js
new file mode 100644
index 0000000000..ea16bb0098
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-899474.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("class A {...", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-899537.js b/deps/v8/test/mjsunit/regress/regress-899537.js
new file mode 100644
index 0000000000..3cd772cc65
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-899537.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[ class { c = [ c ] = c } ]
diff --git a/deps/v8/test/mjsunit/regress/regress-900085.js b/deps/v8/test/mjsunit/regress/regress-900085.js
new file mode 100644
index 0000000000..5efaf7e80e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-900085.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(
+ "async function f() { let v = 1; for await (var v of {}) { }",
+ SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-900585.js b/deps/v8/test/mjsunit/regress/regress-900585.js
new file mode 100644
index 0000000000..8969644f95
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-900585.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("/*for..in*/for(var [x5, functional] = this = function(id) { return id } in false) var x2, x;", ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-900786.js b/deps/v8/test/mjsunit/regress/regress-900786.js
new file mode 100644
index 0000000000..c012e3fcd8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-900786.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("{function g(){}function g(){+", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-901633.js b/deps/v8/test/mjsunit/regress/regress-901633.js
new file mode 100644
index 0000000000..220926c854
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-901633.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const magic0 = 2396;
+const magic1 = 1972;
+
+// Fill xs with float arrays.
+const xs = [];
+for (let j = 0; j < magic0; ++j) {
+ xs[j] = [j + 0.1];
+}
+
+// Sort, but trim the array at some point.
+let cmp_calls = 0;
+xs.sort((lhs, rhs) => {
+ lhs = lhs || [0];
+ rhs = rhs || [0];
+ if (cmp_calls++ == magic1) xs.length = 1;
+ return lhs[0] - rhs[0];
+});
+
+// The final shape of the array is unspecified since the comparison function is
+// inconsistent.
diff --git a/deps/v8/test/mjsunit/regress/regress-901798.js b/deps/v8/test/mjsunit/regress/regress-901798.js
new file mode 100644
index 0000000000..67022a70e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-901798.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a) {
+ return (a >>> 1073741824) + -3;
+}
+
+assertEquals(-3, f(0));
+assertEquals(-2, f(1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(4294967291, f(-2));
diff --git a/deps/v8/test/mjsunit/regress/regress-902552.js b/deps/v8/test/mjsunit/regress/regress-902552.js
new file mode 100644
index 0000000000..081df058e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-902552.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var C = class {};
+for (var i = 0; i < 4; ++i) {
+ if (i == 2) %OptimizeOsr();
+ C.prototype.foo = 42;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-902810.js b/deps/v8/test/mjsunit/regress/regress-902810.js
new file mode 100644
index 0000000000..76ea7d9443
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-902810.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("((__v_4 = __v_4, __v_0) => eval(__v_4))()", ReferenceError)
diff --git a/deps/v8/test/mjsunit/regress/regress-903527.js b/deps/v8/test/mjsunit/regress/regress-903527.js
new file mode 100644
index 0000000000..fe56d8c216
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-903527.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("e*!`\\2`", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-903697.js b/deps/v8/test/mjsunit/regress/regress-903697.js
new file mode 100644
index 0000000000..85f970fe8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-903697.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --verify-heap
+
+C = class {};
+for (var i = 0; i < 5; ++i) {
+ gc();
+ if (i == 2) %OptimizeOsr();
+ C.prototype.foo = i + 9000000000000000;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-903874.js b/deps/v8/test/mjsunit/regress/regress-903874.js
new file mode 100644
index 0000000000..c1301eb2aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-903874.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var code = "function f(" + ("{o(".repeat(10000));
+assertThrows(code, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-904255.js b/deps/v8/test/mjsunit/regress/regress-904255.js
new file mode 100644
index 0000000000..dd24dbdc48
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-904255.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("((__v_0 = __v_0.replace(...new Array(), '0').slice(...new Int32Array(), '0')) => print())()", ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-904275.js b/deps/v8/test/mjsunit/regress/regress-904275.js
new file mode 100644
index 0000000000..10233d9629
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-904275.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __isPropertyOfType(obj, name) {
+ Object.getOwnPropertyDescriptor(obj, name)
+}
+function __getProperties(obj, type) {
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ __isPropertyOfType(obj, name);
+ }
+}
+function __getRandomProperty(obj) {
+ let properties = __getProperties(obj);
+}
+function __f_6776(__v_33890, __v_33891) {
+ var __v_33896 = __v_33891();
+ __getRandomProperty([])
+}
+(function __f_6777() {
+ var __v_33906 = async () => { };
+ __f_6776(1, () => __v_33906())
+})();
+(function __f_6822() {
+ try {
+ __f_6776(1, () => __f_6822());
+ } catch (e) {}
+ var __v_34059 = async (__v_34079 = () => eval()) => { };
+ delete __v_34059[__getRandomProperty(__v_34059)];
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-904417.js b/deps/v8/test/mjsunit/regress/regress-904417.js
new file mode 100644
index 0000000000..dc469cca08
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-904417.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(o) {
+ return o.hello, Object.getPrototypeOf(o);
+}
+
+var y = { __proto__: {}, hello: 44 };
+var z = { hello: 45 };
+
+bar(y);
+bar(z);
+bar(y);
+%OptimizeFunctionOnNextCall(bar);
+bar(y);
diff --git a/deps/v8/test/mjsunit/regress/regress-904707.js b/deps/v8/test/mjsunit/regress/regress-904707.js
new file mode 100644
index 0000000000..fdf89c2b55
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-904707.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+delete Float64Array.prototype.__proto__[Symbol.iterator];
+
+let a = new Float64Array(9);
+Object.defineProperty(a, "length", {
+ get: function () { %ArrayBufferDetach(a.buffer); return 6; }
+});
+
+Float64Array.from(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-905587.js b/deps/v8/test/mjsunit/regress/regress-905587.js
new file mode 100644
index 0000000000..297846d02b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-905587.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("function test() { '\\u`''\\u' }", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-905907.js b/deps/v8/test/mjsunit/regress/regress-905907.js
new file mode 100644
index 0000000000..06bbb51f56
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-905907.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var g = function f(a = 3) {
+ var context_allocated = undefined;
+ function inner() { f(); f(context_allocated) };
+ inner();
+};
+assertThrows("g()", RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-906406.js b/deps/v8/test/mjsunit/regress/regress-906406.js
new file mode 100644
index 0000000000..eb79ff0a0c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-906406.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (x = 0; x < 10000; ++x) {
+ [(x) => x, [, 4294967295].find((x) => x), , 2].includes('x', -0);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-907479.js b/deps/v8/test/mjsunit/regress/regress-907479.js
new file mode 100644
index 0000000000..e3e1d0926a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-907479.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+{
+ const x = [42];
+ x.splice(0, 0, 23);
+ assertEquals([23, 42], x);
+ x.length++;
+ assertEquals([23, 42, ,], x);
+ assertFalse(x.hasOwnProperty(2));
+}
+
+{
+ const x = [4.2];
+ x.splice(0, 0, 23);
+ assertEquals([23, 4.2], x);
+ x.length++;
+ assertEquals([23, 4.2, ,], x);
+ assertFalse(x.hasOwnProperty(2));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-907575.js b/deps/v8/test/mjsunit/regress/regress-907575.js
new file mode 100644
index 0000000000..2f63c3d870
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-907575.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("0 || () =>", SyntaxError);
+assertThrows("++(a) =>", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-907669.js b/deps/v8/test/mjsunit/regress/regress-907669.js
new file mode 100644
index 0000000000..155ff060d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-907669.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("function f() { function g() { (); ", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-908231.js b/deps/v8/test/mjsunit/regress/regress-908231.js
new file mode 100644
index 0000000000..8af5f30591
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-908231.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(`
+ class C {
+ get [(function() { function lazy() { Syntax Error } })()]() {}
+ }`, SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-908250.js b/deps/v8/test/mjsunit/regress/regress-908250.js
new file mode 100644
index 0000000000..78f071d230
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-908250.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("(al,al,e={}=e)=>l", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-908975.js b/deps/v8/test/mjsunit/regress/regress-908975.js
new file mode 100644
index 0000000000..b80f4ce9c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-908975.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[] = [];
+a => 0
diff --git a/deps/v8/test/mjsunit/regress/regress-913844.js b/deps/v8/test/mjsunit/regress/regress-913844.js
new file mode 100644
index 0000000000..3516f35126
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-913844.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --disable-in-process-stack-traces --perf-prof-unwinding-info --turbo-loop-rotation
+for (var x = 0; x < 1000000; x++)
+;
diff --git a/deps/v8/test/mjsunit/regress/regress-917215.js b/deps/v8/test/mjsunit/regress/regress-917215.js
new file mode 100644
index 0000000000..e8d7e10462
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-917215.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+a: if (true) b: { break a; break b; }
+else b: { break a; break b; }
diff --git a/deps/v8/test/mjsunit/regress/regress-917755.js b/deps/v8/test/mjsunit/regress/regress-917755.js
new file mode 100644
index 0000000000..49803ae2d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-917755.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ function a() {}
+}
+
+{
+ let a;
+ function a() {};
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-917988.js b/deps/v8/test/mjsunit/regress/regress-917988.js
new file mode 100644
index 0000000000..e9a8458edd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-917988.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests if class declarations in parameter list are correctly handled.
+function v_2(
+v_3 = class v_4 {
+ get [[] = ';']() { }
+}
+) { }
+v_2();
+
+// Test object inside a class in a parameter list
+(function f(
+v_3 = class v_4 {
+ get [{} = ';']() { }
+}
+) { })();
+
+// Test destructuring of class in parameters
+(function f( {p, q} = class C { get [[] = ';']() {} } ) {})();
+
+// Test array destructuring of class in parameters
+class C {};
+C[Symbol.iterator] = function() {
+ return {
+ next: function() { return { done: true }; },
+ _first: true
+ };
+};
+(function f1([p, q] = class D extends C { get [[]]() {} }) { })();
diff --git a/deps/v8/test/mjsunit/regress/regress-919340.js b/deps/v8/test/mjsunit/regress/regress-919340.js
new file mode 100644
index 0000000000..900bf6fde2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-919340.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+var E = 'Σ';
+var PI = 123;
+function f() {
+ print(E = 2, /b/.test(E) || /b/.test(E = 2));
+ ((E = 3) * PI);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-919710.js b/deps/v8/test/mjsunit/regress/regress-919710.js
new file mode 100644
index 0000000000..11422958af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-919710.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("( let ) => { 'use strict'; let }", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-921382.js b/deps/v8/test/mjsunit/regress/regress-921382.js
new file mode 100644
index 0000000000..d7cce2b723
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-921382.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("(d * f * g) * e => 0", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-923723.js b/deps/v8/test/mjsunit/regress/regress-923723.js
new file mode 100644
index 0000000000..5a838e558f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-923723.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=50
+
+function __f_3() {
+ try {
+ __f_3();
+ } catch(e) {
+ eval("let fun = ({a} = {a: 30}) => {");
+ }
+}
+assertThrows(__f_3);
diff --git a/deps/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js b/deps/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js
index fd4ac6d6c0..ee30d071db 100644
--- a/deps/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js
+++ b/deps/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js
@@ -27,13 +27,16 @@
// Flags: --allow-natives-syntax --expose-gc
-DontEnum = 2;
-
var o = {};
-%AddNamedProperty(o, "a", 0, DontEnum);
+Object.defineProperty(o, "a", {
+ value: 0, configurable: true, writable: true, enumerable: false
+});
var o2 = {};
-%AddNamedProperty(o2, "a", 0, DontEnum);
+Object.defineProperty(o2, "a", {
+ value: 0, configurable: true, writable: true, enumerable: false
+});
+
assertTrue(%HaveSameMap(o, o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-178790.js b/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
index 25cc96b852..f04b6068a8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --stack-size=1200
+
// Create a regexp in the form of a?a?...a? so that fully
// traversing the entire graph would be prohibitively expensive.
// This should not cause time out.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-546968.js b/deps/v8/test/mjsunit/regress/regress-crbug-546968.js
deleted file mode 100644
index 51f20c4204..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-546968.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-do-expressions
-
-function f() {
- print(
- do {
- for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }
- }
- );
-}
-f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-691323.js b/deps/v8/test/mjsunit/regress/regress-crbug-691323.js
index d786875d76..5002ceaf20 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-691323.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-691323.js
@@ -7,7 +7,7 @@ var buffer = new ArrayBuffer(0x100);
var array = new Uint8Array(buffer).fill(55);
var tmp = {};
tmp[Symbol.toPrimitive] = function () {
- %ArrayBufferNeuter(array.buffer)
+ %ArrayBufferDetach(array.buffer)
return 0;
};
@@ -18,7 +18,7 @@ buffer = new ArrayBuffer(0x100);
array = new Uint8Array(buffer).fill(55);
tmp = {};
tmp[Symbol.toPrimitive] = function () {
- %ArrayBufferNeuter(array.buffer)
+ %ArrayBufferDetach(array.buffer)
return 0;
};
@@ -29,7 +29,7 @@ buffer = new ArrayBuffer(0x100);
array = new Uint8Array(buffer).fill(55);
tmp = {};
tmp[Symbol.toPrimitive] = function () {
- %ArrayBufferNeuter(array.buffer)
+ %ArrayBufferDetach(array.buffer)
return 0;
};
assertEquals(true, Array.prototype.includes.call(array, undefined, tmp));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
index 380bae356d..380f968560 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
@@ -14,4 +14,4 @@ let table = new WebAssembly.Table({element: "anyfunc",
initial: 1, maximum:1000000});
let instance = new WebAssembly.Instance(module, {x: {table:table}});
-assertThrows(() => table.grow(Infinity), RangeError);
+assertThrows(() => table.grow(Infinity), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
index 83af7a8b98..fb3f7da54e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
@@ -9,7 +9,6 @@ function baz(obj, store) {
}
function bar(store) {
baz(Array.prototype, store);
- baz(this.arguments, true);
}
bar(false);
bar(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
index f8ffbe8ff5..0af8d6f1a8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
@@ -9,7 +9,6 @@ function baz(obj, store) {
}
function bar(store) {
baz(Object.prototype, store);
- baz(this.arguments, true);
}
bar(false);
bar(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-800032.js b/deps/v8/test/mjsunit/regress/regress-crbug-800032.js
index 7c9206c7f6..2b4bc067fb 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-800032.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-800032.js
@@ -6,10 +6,7 @@
class Derived extends RegExp {
- constructor(a) {
- // Syntax Error
- const a = 1;
- }
+ constructor(a) { throw "error" }
}
let o = Reflect.construct(RegExp, [], Derived);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-806388.js b/deps/v8/test/mjsunit/regress/regress-crbug-806388.js
index b55b50107e..138ae18dae 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-806388.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-806388.js
@@ -5,10 +5,7 @@
// Flags: --allow-natives-syntax --enable-slow-asserts --expose-gc
class Derived extends Array {
- constructor(a) {
- // Syntax Error.
- const a = 1;
- }
+ constructor(a) { throw "error" }
}
// Derived is not a subclass of RegExp
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-860788.js b/deps/v8/test/mjsunit/regress/regress-crbug-860788.js
index 55d243eac6..fe0dcb83af 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-860788.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-860788.js
@@ -26,5 +26,5 @@ try {
} catch(e) { print("Caught: " + e); }
try {
var obj = {prop: 7};
- assertThrows("nonexistant(obj)");
+ assertThrows("nonexistent(obj)");
} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-867776.js b/deps/v8/test/mjsunit/regress/regress-crbug-867776.js
index f108f2acc4..76d2121640 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-867776.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-867776.js
@@ -8,7 +8,7 @@ for (var i = 0; i < 3; i++) {
var array = new BigInt64Array(200);
function evil_callback() {
- %ArrayBufferNeuter(array.buffer);
+ %ArrayBufferDetach(array.buffer);
gc();
return 1094795585n;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-895199.js b/deps/v8/test/mjsunit/regress/regress-crbug-895199.js
new file mode 100644
index 0000000000..7975ffc699
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-895199.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var a = new Array(2);
+ a[0] = 23.1234;
+ a[1] = 25.1234;
+ %DeoptimizeNow();
+ return a[2];
+}
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-896181.js b/deps/v8/test/mjsunit/regress/regress-crbug-896181.js
new file mode 100644
index 0000000000..9aef4fc1d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-896181.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = new Array();
+a[0] = 0.1;
+a[2] = 0.2;
+Object.defineProperty(a, 1, {
+ get: function() {
+ a[4] = 0.3;
+ },
+});
+
+assertSame('0.1,,0.2', a.join());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-896700.js b/deps/v8/test/mjsunit/regress/regress-crbug-896700.js
new file mode 100644
index 0000000000..3e6232a96f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-896700.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces --expose-async-hooks
+
+async_hooks.createHook({
+ after() { throw new Error(); }
+}).enable();
+Promise.resolve().then();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-897098.js b/deps/v8/test/mjsunit/regress/regress-crbug-897098.js
new file mode 100644
index 0000000000..fe11aa17f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-897098.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const arr = [1.1,2.2,3.3];
+arr.pop();
+const start = {toString: function() {arr.pop();}}
+arr.includes(0, start);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-897404.js b/deps/v8/test/mjsunit/regress/regress-crbug-897404.js
new file mode 100644
index 0000000000..7e8b48de85
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-897404.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function TestError() {}
+
+const a = new Array(2**32 - 1);
+
+// Force early exit to avoid an unreasonably long test.
+a[0] = {
+ toString() { throw new TestError(); }
+};
+
+// Verify join throws test error and does not fail due to asserts (Negative
+// length fixed array allocation).
+assertThrows(() => a.join(), TestError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-897406.js b/deps/v8/test/mjsunit/regress/regress-crbug-897406.js
new file mode 100644
index 0000000000..62eeeebbef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-897406.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --async-stack-traces --expose-async-hooks
+
+async_hooks.createHook({
+ after() { throw new Error(); }
+}).enable();
+
+(async function() {
+ await 1;
+ await 1;
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-898785.js b/deps/v8/test/mjsunit/regress/regress-crbug-898785.js
new file mode 100644
index 0000000000..668b095c6e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-898785.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [0, 1];
+var o = { [Symbol.toPrimitive]() { a.length = 1; return 2; } };
+
+a.push(2);
+a.lastIndexOf(5, o);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-898974.js b/deps/v8/test/mjsunit/regress/regress-crbug-898974.js
new file mode 100644
index 0000000000..1b9b07ab74
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-898974.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(global, env, buffer) {
+ "use asm";
+ var HEAPF64 = new global.Float64Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var Math_fround = global.Math.fround;
+ function main_d_f() {
+ HEAPF64[0] = Math_fround(+HEAPF64[0]);
+ }
+ function main_d_fq() {
+ HEAPF64[1] = HEAPF32[4096];
+ }
+ function main_f_dq() {
+ HEAPF32[4] = HEAPF64[4096];
+ }
+ return {main_d_f: main_d_f, main_d_fq: main_d_fq, main_f_dq: main_f_dq};
+};
+let buffer = new ArrayBuffer(4096);
+let module = Module(this, undefined, buffer);
+let view64 = new Float64Array(buffer);
+let view32 = new Float32Array(buffer);
+assertEquals(view64[0] = 2.3, view64[0]);
+module.main_d_f();
+module.main_d_fq();
+module.main_f_dq();
+assertTrue(%IsAsmWasmCode(Module));
+assertEquals(Math.fround(2.3), view64[0]);
+assertTrue(isNaN(view64[1]));
+assertTrue(isNaN(view32[4]));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-899464.js b/deps/v8/test/mjsunit/regress/regress-crbug-899464.js
new file mode 100644
index 0000000000..1deaa30c2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-899464.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-matchall
+
+''.matchAll(/./u);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-899535.js b/deps/v8/test/mjsunit/regress/regress-crbug-899535.js
new file mode 100644
index 0000000000..aab112bb7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-899535.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let a = [1.1, 2.2, 3.3];
+a.includes(4.4, { toString: () => a.length = 0 });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-900674.js b/deps/v8/test/mjsunit/regress/regress-crbug-900674.js
new file mode 100644
index 0000000000..7549b36a4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-900674.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ let val = Promise.resolve().then();
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-902395.js b/deps/v8/test/mjsunit/regress/regress-crbug-902395.js
new file mode 100644
index 0000000000..79aaecf6fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-902395.js
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function opt() {
+ try{
+ Object.seal({})
+ }finally{
+ try{
+ // Carefully crafted by clusterfuzz to alias the temporary object literal
+ // register with the below dead try block's context register.
+ (
+ {
+ toString(){
+ }
+ }
+ ).apply(-1).x( )
+ }
+ finally{
+ if(2.2)
+ {
+ return
+ }
+ // This code should be dead.
+ try{
+ Reflect.construct
+ }finally{
+ }
+ }
+ }
+}
+
+opt();
+%OptimizeFunctionOnNextCall(opt);
+opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-902610.js b/deps/v8/test/mjsunit/regress/regress-crbug-902610.js
new file mode 100644
index 0000000000..11b88f288b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-902610.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => {
+ // Make a function with 65535 args. This should throw a SyntaxError because -1
+ // is reserved for the "don't adapt arguments" sentinel.
+ var f_with_65535_args =
+ eval("(function(" + Array(65535).fill("x").join(",") + "){})");
+ f_with_65535_args();
+}, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-902672.js b/deps/v8/test/mjsunit/regress/regress-crbug-902672.js
new file mode 100644
index 0000000000..4073b554e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-902672.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = this;
+var b = {};
+a.length = 4294967296; // 2 ^ 32 (max array length + 1)
+assertThrows(() => Array.prototype.join.call(a,b), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-905457.js b/deps/v8/test/mjsunit/regress/regress-crbug-905457.js
new file mode 100644
index 0000000000..3a97a87520
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-905457.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x) {
+ return Math.abs(Math.min(+x, 0));
+ }
+
+ assertEquals(NaN, foo());
+ assertEquals(NaN, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(NaN, foo());
+})();
+
+(function() {
+ function foo(x) {
+ return Math.abs(Math.min(-x, 0));
+ }
+
+ assertEquals(NaN, foo());
+ assertEquals(NaN, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(NaN, foo());
+})();
+
+(function() {
+ function foo(x) {
+ return Math.abs(Math.max(0, +x));
+ }
+
+ assertEquals(NaN, foo());
+ assertEquals(NaN, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(NaN, foo());
+})();
+
+(function() {
+ function foo(x) {
+ return Math.abs(Math.max(0, -x));
+ }
+
+ assertEquals(NaN, foo());
+ assertEquals(NaN, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(NaN, foo());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906043.js b/deps/v8/test/mjsunit/regress/regress-crbug-906043.js
new file mode 100644
index 0000000000..dbc283fa9f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906043.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function fun(arg) {
+ let x = arguments.length;
+ a1 = new Array(0x10);
+ a1[0] = 1.1;
+ a2 = new Array(0x10);
+ a2[0] = 1.1;
+ a1[(x >> 16) * 21] = 1.39064994160909e-309; // 0xffff00000000
+ a1[(x >> 16) * 41] = 8.91238232205e-313; // 0x2a00000000
+}
+
+var a1, a2;
+var a3 = [1.1, 2.2];
+a3.length = 0x11000;
+a3.fill(3.3);
+
+var a4 = [1.1];
+
+for (let i = 0; i < 3; i++) fun(...a4);
+%OptimizeFunctionOnNextCall(fun);
+fun(...a4);
+
+res = fun(...a3);
+
+assertEquals(16, a2.length);
+for (let i = 8; i < 32; i++) {
+ assertEquals(undefined, a2[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906220.js b/deps/v8/test/mjsunit/regress/regress-crbug-906220.js
new file mode 100644
index 0000000000..580ff59bdd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906220.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() { new Array().pop(); }
+
+assertEquals(undefined, foo());
+assertEquals(undefined, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906870.js b/deps/v8/test/mjsunit/regress/regress-crbug-906870.js
new file mode 100644
index 0000000000..d94ee67a4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906870.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo() {
+ return Infinity / Math.max(-0, +0);
+ }
+
+ assertEquals(+Infinity, foo());
+ assertEquals(+Infinity, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(+Infinity, foo());
+})();
+
+(function() {
+ function foo() {
+ return Infinity / Math.max(+0, -0);
+ }
+
+ assertEquals(+Infinity, foo());
+ assertEquals(+Infinity, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(+Infinity, foo());
+})();
+
+(function() {
+ function foo() {
+ return Infinity / Math.min(-0, +0);
+ }
+
+ assertEquals(-Infinity, foo());
+ assertEquals(-Infinity, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-Infinity, foo());
+})();
+
+(function() {
+ function foo() {
+ return Infinity / Math.min(+0, -0);
+ }
+
+ assertEquals(-Infinity, foo());
+ assertEquals(-Infinity, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-Infinity, foo());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-908309.js b/deps/v8/test/mjsunit/regress/regress-crbug-908309.js
new file mode 100644
index 0000000000..c2d939001d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-908309.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const p = Object.defineProperty(Promise.resolve(), 'then', {
+ value() { return 0; }
+});
+
+(function() {
+ function foo() { return p.catch().catch(); }
+
+ assertThrows(foo, TypeError);
+ assertThrows(foo, TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo, TypeError);
+})();
+
+(function() {
+ function foo() { return p.finally().finally(); }
+
+ assertThrows(foo, TypeError);
+ assertThrows(foo, TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-909614.js b/deps/v8/test/mjsunit/regress/regress-crbug-909614.js
new file mode 100644
index 0000000000..4070c9b821
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-909614.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let just_under = 2n ** 30n - 1n;
+let just_above = 2n ** 30n;
+
+assertDoesNotThrow(() => { var dummy = 2n ** just_under; });
+assertThrows(() => { var dummy = 2n ** just_above; });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-911416.js b/deps/v8/test/mjsunit/regress/regress-crbug-911416.js
new file mode 100644
index 0000000000..60d7ae892a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-911416.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(7, ({[Symbol.hasInstance.description]:7})["Symbol.hasInstance"]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-913212.js b/deps/v8/test/mjsunit/regress/regress-crbug-913212.js
new file mode 100644
index 0000000000..2de99d6efc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-913212.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const globalThis = this;
+Object.setPrototypeOf(this, new Proxy({}, {
+ get(target, prop, receiver) {
+ assertTrue(receiver === globalThis);
+ }
+}));
+undefined_name_access
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-913296.js b/deps/v8/test/mjsunit/regress/regress-crbug-913296.js
new file mode 100644
index 0000000000..3fab06607f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-913296.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(trigger) {
+ return Object.is((trigger ? -0 : 0) - 0, -0);
+}
+
+assertFalse(foo(false));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-915783.js b/deps/v8/test/mjsunit/regress/regress-crbug-915783.js
new file mode 100644
index 0000000000..9220b4aa54
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-915783.js
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const constructors = [
+ [Uint8Array, [0, 1]],
+ [Int8Array, [0, 1]],
+ [Uint16Array, [0, 1]],
+ [Int16Array, [0, 1]],
+ [Uint32Array, [0, 1]],
+ [Int32Array, [0, 1]],
+ [Float32Array, [0, 1]],
+ [Float64Array, [0, 1]],
+ [Uint8ClampedArray, [0, 1]],
+ [BigInt64Array, [0n, 1n]],
+ [BigUint64Array, [0n, 1n]]
+];
+
+let typedArray;
+function detachBuffer() {
+ %ArrayBufferDetach(typedArray.buffer);
+ return 'a';
+}
+Number.prototype.toString = detachBuffer;
+BigInt.prototype.toString = detachBuffer;
+Number.prototype.toLocaleString = detachBuffer;
+BigInt.prototype.toLocaleString = detachBuffer;
+
+constructors.forEach(([constructor, arr]) => {
+ typedArray = new constructor(arr);
+ assertSame(typedArray.join(), '0,1');
+ assertSame(typedArray.toLocaleString(), 'a,');
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-916288.js b/deps/v8/test/mjsunit/regress/regress-crbug-916288.js
new file mode 100644
index 0000000000..aa170c9383
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-916288.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("(a()=0)=>0", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-917076.js b/deps/v8/test/mjsunit/regress/regress-crbug-917076.js
new file mode 100644
index 0000000000..7c19c02204
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-917076.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let speciesCounter = 0;
+
+Object.defineProperty(Promise, Symbol.species, {
+ value: function(...args) {
+ speciesCounter++;
+ return new Promise(...args);
+ }
+});
+
+async function foo() { }
+
+assertPromiseResult(Promise.all([foo()]), () => {
+ assertEquals(3, speciesCounter);
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-917980.js b/deps/v8/test/mjsunit/regress/regress-crbug-917980.js
new file mode 100644
index 0000000000..18dc782400
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-917980.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const constructors = [
+ [Uint8Array, [0, 1]],
+ [Int8Array, [0, 1]],
+ [Uint16Array, [0, 1]],
+ [Int16Array, [0, 1]],
+ [Uint32Array, [0, 1]],
+ [Int32Array, [0, 1]],
+ [Float32Array, [0, 1]],
+ [Float64Array, [0, 1]],
+ [Uint8ClampedArray, [0, 1]],
+ [BigInt64Array, [0n, 1n]],
+ [BigUint64Array, [0n, 1n]]
+];
+
+let typedArray;
+
+const separator = {
+ toString() {
+ %ArrayBufferDetach(typedArray.buffer);
+ return '*';
+ }
+};
+
+constructors.forEach(([constructor, arr]) => {
+ typedArray = new constructor(arr);
+ assertSame(typedArray.join(separator), '*');
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-920184.js b/deps/v8/test/mjsunit/regress/regress-crbug-920184.js
new file mode 100644
index 0000000000..c38f181750
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-920184.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --enable-slow-asserts
+
+var Ctor = function() {
+ return [];
+};
+var a = ["one", "two", "three"];
+a.constructor = {};
+a.constructor[Symbol.species] = Ctor;
+
+a.filter(function() { return true; });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-923264.js b/deps/v8/test/mjsunit/regress/regress-crbug-923264.js
new file mode 100644
index 0000000000..e8c0d43022
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-923264.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --expose-gc
+
+let paramName = '';
+for (let i=0; i < 2**10; i++) {
+ paramName += 'a';
+}
+
+let params = '';
+for (let i = 0; i < 2**10; i++) {
+ params += paramName + i + ',';
+}
+
+let fn = eval(`(
+ class A {
+ constructor (${params}) {
+ function lazy() {
+ return function lazier() { return ${paramName+1} }
+ };
+ return lazy;
+ }
+})`);
+
+gc()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-923265.js b/deps/v8/test/mjsunit/regress/regress-crbug-923265.js
new file mode 100644
index 0000000000..8e6125b34b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-923265.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let a = {0: 5, 1: 4, 2: 3, length: 2};
+Object.freeze(a);
+
+assertThrows(() => Array.prototype.sort.call(a));
+assertPropertiesEqual({0: 5, 1: 4, 2: 3, length: 2}, a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-923705.js b/deps/v8/test/mjsunit/regress/regress-crbug-923705.js
new file mode 100644
index 0000000000..9cdb98b15b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-923705.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap
+
+function __f_5() {
+ function __f_1() {
+ function __f_0() {
+ ({y = eval()}) => assertEquals()();
+ }
+ }
+}
+
+__f_5();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-926651.js b/deps/v8/test/mjsunit/regress/regress-crbug-926651.js
new file mode 100644
index 0000000000..8ec3c60af5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-926651.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var asdf = false;
+
+const f =
+ (v1 = (function g() {
+ if (asdf) { return; } else { return; }
+ (function h() {});
+ })()) => 1;
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-930580.js b/deps/v8/test/mjsunit/regress/regress-crbug-930580.js
new file mode 100644
index 0000000000..e7f49875f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-930580.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function outer() {
+ (arg = (function inner() {
+ return this
+ })()) => 0;
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-directive.js b/deps/v8/test/mjsunit/regress/regress-directive.js
new file mode 100644
index 0000000000..ff38f86469
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-directive.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ 'use strict'
+ in Number
+}
+
+f.arguments
diff --git a/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js b/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js
new file mode 100644
index 0000000000..8c85c1380f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+function f() {
+ // Loop with a body that's not wrapped in a block.
+ for (i = 0; i < 2; i++)
+ var x = i, // var x that's assigned on each iteration
+ y = y||(()=>x), // single arrow function that returns x
+ z = (%OptimizeFunctionOnNextCall(y), y()); // optimize y on first iteration
+ return y()
+};
+assertEquals(1, f())
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js b/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js
deleted file mode 100644
index 3ad9e33646..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-do-expressions
-
-function f(x) {
- switch (x) {
- case 1: return "one";
- case 2: return "two";
- case do { for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); } }:
- case 3: return "WAT";
- }
-}
-
-assertEquals("one", f(1));
-assertEquals("two", f(2));
-assertEquals("WAT", f(3));
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js b/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js
deleted file mode 100644
index 7553b9c725..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-do-expressions
-
-"use strict";
-
-var p = {};
-var testCases = [
- { s:"[1, do { _OSR_ 2 }, 3]", r:[1, 2, 3] },
- { s:"[1, ...[2], do { _OSR_ 3 }, 4]", r:[1, 2, 3, 4] },
- { s:"[1, ...do { _OSR_ [2,3] }, 4]", r:[1, 2, 3, 4] },
- { s:"{ a:do { _OSR_ 1 } }", r:{ a:1 } },
- { s:"{ a:do { _OSR_ 2 }, __proto__:p }", r:{ a:2, __proto__:p } },
- { s:"{ a:do { _OSR_ 3 }, get b() { return 4; } }", r:{ a:3, b:4 } },
- { s:"{ [do { _OSR_ 'b' }]: 3 }", r:{ b:3 } },
- { s:"{ [do { _OSR_ 'b' }]: 3, c: 4 }", r:{ b:3, c:4 } },
- { s:"{ [do { _OSR_ 'b' }]: 3, __proto__:p }", r:{ b:3, __proto__:p } },
- { s:"{ get [do { _OSR_ 'c' }]() { return 4; } }", r:{ c:4 } },
- { s:"class { [do { _OSR_ 'f' }]() {} }" },
- { s:"class { [do { _OSR_ 'f' }]() {}; g() {} }" },
-];
-
-for (var i = 0; i < testCases.length; ++i) {
- var source = "(function f" + i + "(x) { return " + testCases[i].s + "})";
- var osr = "for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }";
- var result = eval(source.replace("_OSR_", osr))();
- if (testCases[i].r) assertEquals(testCases[i].r, result);
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-preparse-inner-arrow-duplicate-parameter.js b/deps/v8/test/mjsunit/regress/regress-preparse-inner-arrow-duplicate-parameter.js
new file mode 100644
index 0000000000..cff5fcc666
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-preparse-inner-arrow-duplicate-parameter.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("()=>{ (x,x)=>1 }", SyntaxError)
diff --git a/deps/v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js b/deps/v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js
new file mode 100644
index 0000000000..59758eda5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+with({}) { eval("{function f(){f}}") };
+f()
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8357.js b/deps/v8/test/mjsunit/regress/regress-v8-8357.js
new file mode 100644
index 0000000000..6f27e84abb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8357.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const s = "Umbridge has been reading your mail, Harry."
+
+{
+ let monkey_called = false;
+ s.__proto__.__proto__[Symbol.replace] =
+ () => { monkey_called = true; };
+ s.replace(s);
+ assertTrue(monkey_called);
+}
+
+{
+ let monkey_called = false;
+ s.__proto__.__proto__[Symbol.search] =
+ () => { monkey_called = true; };
+ s.search(s);
+ assertTrue(monkey_called);
+}
+
+{
+ let monkey_called = false;
+ s.__proto__.__proto__[Symbol.match] =
+ () => { monkey_called = true; };
+ s.match(s);
+ assertTrue(monkey_called);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
index 3b9b76b5a6..d1dae276d3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
@@ -274,7 +274,7 @@ try {
function __f_16() {
var __v_1 = new WasmModuleBuilder();
__v_1.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow])
.exportFunc();
__v_1.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-651961.js b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
index 30f6565d32..bf08200d30 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-651961.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprMemorySize, kMemoryZero,
kExprI32Const, 0x10,
- kExprGrowMemory, kMemoryZero,
+ kExprMemoryGrow, kMemoryZero,
kExprI32Mul,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-680938.js b/deps/v8/test/mjsunit/regress/wasm/regress-680938.js
index 75c8a457bb..5471f60a71 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-680938.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-680938.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var v17 = {};
-var v32 = {};
+var v17 = 42;
+var v32 = { initial: 1 };
v39 = new WebAssembly.Memory(v32);
v49 = v39.grow(v17);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-688876.js b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
index 83bebbb802..02932b4812 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-688876.js
@@ -26,7 +26,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
kExprI32And,
kExprI32Eqz,
kExprI32And,
-kExprGrowMemory, 0x00,
+kExprMemoryGrow, 0x00,
kExprI32Const, 0x55,
kExprI32LoadMem8S, 0x00, 0x3a,
kExprI32LoadMem16U, 0x00, 0x71,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-699485.js b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
index 7f4560789e..a44b14b031 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-699485.js
@@ -15,7 +15,7 @@ builder.addFunction("regression_699485", kSig_i_v)
.addBody([
kExprI32Const, 0x04,
kExprNop,
- kExprGrowMemory, 0x00,
+ kExprMemoryGrow, 0x00,
]).exportFunc();
let module = builder.instantiate();
assertEquals(0, module.exports.regression_699485());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
index 73c01e13a0..44e60330b4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-702460.js
@@ -18,34 +18,34 @@ let kExprS128LoadMem = 0xc0;
kExprI32Const, 0x41,
kExprI32Const, 0x3c,
kExprI32Const, 0xdc, 0x01,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
kExprSetLocal, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
- kExprGrowMemory, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
+ kExprMemoryGrow, 0x00,
kExprS128LoadMem, 0x00, 0x40,
kExprUnreachable,
- kExprGrowMemory, 0x00
+ kExprMemoryGrow, 0x00
]).exportFunc();
assertThrows(() => builder.instantiate());
})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-710844.js b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
index a45e953574..20c8154e4a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-710844.js
@@ -13,7 +13,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI32Const, 0x03,
kExprNop,
- kExprGrowMemory, 0x00,
+ kExprMemoryGrow, 0x00,
kExprI32Const, 0x13,
kExprNop,
kExprI32StoreMem8, 0x00, 0x10
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-734108.js b/deps/v8/test/mjsunit/regress/wasm/regress-734108.js
index d8774f4a84..74a1717b8d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-734108.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-734108.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-async-compilation
-
__v_0 = new Uint8Array([
0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x05, 0x01,
0x60, 0x00, 0x01, 0x7f, 0x03, 0x02, 0x01, 0x00, 0x05, 0x03, 0x01,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
index d41cbabf36..748c74139f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -11,7 +11,7 @@ const builder = new WasmModuleBuilder();
builder.addMemory(16, 32);
builder.addFunction('grow', kSig_i_i).addBody([
kExprGetLocal, 0,
- kExprGrowMemory, 0,
+ kExprMemoryGrow, 0,
]).exportFunc();
builder.addFunction('main', kSig_i_i).addBody([
...wasmI32Const(0x41),
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-736584.js b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
index 39f03c1072..033732f368 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-736584.js
@@ -7,7 +7,7 @@
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
-let mem = new WebAssembly.Memory({});
+let mem = new WebAssembly.Memory({initial: 0});
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mod", "imported_mem");
builder.addFunction('mem_size', kSig_i_v)
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
index bcf3ceeca2..52985c3297 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-739768.js
@@ -16,7 +16,7 @@ builder0.addFunction('main', kSig_i_i)
kExprCallIndirect, sig_index, kTableZero
]) // --
.exportAs('main');
-builder0.setFunctionTableBounds(3, 3);
+builder0.setTableBounds(3, 3);
builder0.addExportOfKind('table', kExternalTable);
let module0 = new WebAssembly.Module(builder0.toBuffer());
let instance0 = new WebAssembly.Instance(module0);
@@ -25,7 +25,7 @@ let builder1 = new WasmModuleBuilder();
builder1.setName('module_1');
builder1.addFunction('main', kSig_i_v).addBody([kExprUnreachable]);
builder1.addImportedTable('z', 'table');
-builder1.addFunctionTableInit(0, false, [0], true);
+builder1.addElementSegment(0, false, [0], true);
let module1 = new WebAssembly.Module(builder1.toBuffer());
let instance1 =
new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-763439.js b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
index 0f9d2b24d8..1f90e0a017 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-763439.js
@@ -10,7 +10,7 @@ builder.addMemory(0, 1234, false);
builder.addFunction('f', kSig_i_v)
.addBody([
kExprI32Const, 0x1d, // --
- kExprGrowMemory, 0x00, // --
+ kExprMemoryGrow, 0x00, // --
kExprI32LoadMem, 0x00, 0xff, 0xff, 0x45, // --
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7914.js b/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
index 731005a872..ede4668d08 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7914.js
@@ -10,7 +10,7 @@ builder.addMemory(16, 32, false);
builder.addFunction('main', kSig_i_v)
.addBody([
...wasmI32Const(10000), // i32.const 10000
- kExprGrowMemory, 0, // grow_memory --> -1
+ kExprMemoryGrow, 0, // grow_memory --> -1
kExprI32Popcnt, // i32.popcnt --> 32
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803788.js b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
index 8edec7c464..e7fa3aaa8f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
@@ -12,7 +12,7 @@ let q_table = builder.addImportedTable("q", "table")
let q_base = builder.addImportedGlobal("q", "base", kWasmI32);
let q_fun = builder.addImport("q", "fun", kSig_v_v);
builder.addType(kSig_i_ii);
-builder.addFunctionTableInit(q_base, true, [ q_fun ])
+builder.addElementSegment(q_base, true, [ q_fun ])
let module = new WebAssembly.Module(builder.toBuffer());
let table = new WebAssembly.Table({
element: "anyfunc",
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
index 884572b895..ecf6476c37 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808980.js
@@ -17,7 +17,7 @@ builder.addFunction('main', kSig_i_ii).addBody([
sig_index1,
kTableZero
]).exportAs('main');
-builder.setFunctionTableBounds(kTableSize, kTableSize);
+builder.setTableBounds(kTableSize, kTableSize);
var m1_bytes = builder.toBuffer();
var m1 = new WebAssembly.Module(m1_bytes);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-816226.js b/deps/v8/test/mjsunit/regress/wasm/regress-816226.js
index a9cb715570..1fca02fe82 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-816226.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-816226.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(new Int8Array((new WebAssembly.Memory({})).buffer)).buffer;
+(new Int8Array((new WebAssembly.Memory({initial: 0})).buffer)).buffer;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
index e6047ea231..2cf50892fc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-817380.js
@@ -20,6 +20,6 @@ const builder2 = new WasmModuleBuilder();
const mul_import = builder2.addImport('q', 'wasm_mul', kSig_i_ii);
builder2.addImportedTable('q', 'table');
const glob_import = builder2.addImportedGlobal('q', 'glob', kWasmI32);
-builder2.addFunctionTableInit(glob_import, true, [mul_import]);
+builder2.addElementSegment(glob_import, true, [mul_import]);
builder2.instantiate(
{q: {glob: 0, js_div: i => i, wasm_mul: mul, table: table}});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-825087a.js b/deps/v8/test/mjsunit/regress/wasm/regress-825087a.js
index a1f9a1bea8..9a986d0839 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-825087a.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-825087a.js
@@ -5,5 +5,5 @@
PAGES = 10;
memory = new WebAssembly.Memory({initial: PAGES});
buffer = memory.buffer;
-memory.grow();
+memory.grow(0);
WebAssembly.validate(buffer);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-825087b.js b/deps/v8/test/mjsunit/regress/wasm/regress-825087b.js
index 699549d429..266fd53219 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-825087b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-825087b.js
@@ -6,5 +6,5 @@ PAGES = 10;
memory = new WebAssembly.Memory({initial: PAGES});
buffer = memory.buffer;
buffer = new Uint8Array(buffer);
-memory.grow();
+memory.grow(0);
WebAssembly.validate(buffer);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
index 5ddc9dd9c4..378e38e03c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
@@ -33,7 +33,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprCallIndirect, 0, kTableZero
])
.exportFunc();
- builder.addFunctionTableInit(0, false, [0, 1, 1, 0]);
+ builder.addElementSegment(0, false, [0, 1, 1, 0]);
return builder.instantiate({q: {f2: i1.exports.f2, f1: i1.exports.f1}});
})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834693.js b/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
index a4073e968c..dac0e8578d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834693.js
@@ -13,7 +13,7 @@ module.addFunction("main", kSig_v_v)
.addBody([
kExprI32Const, 20,
kExprI32Const, 29,
- kExprGrowMemory, kMemoryZero,
+ kExprMemoryGrow, kMemoryZero,
kExprI32StoreMem, 0, 0xFF, 0xFF, 0x7A])
.exportAs("main");
var instance = module.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
new file mode 100644
index 0000000000..ebc97a95b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
@@ -0,0 +1,204 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-math-intrinsics --validate-asm --allow-natives-syntax
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function verbose(args) {
+ // print(...args);
+}
+
+//=============================================
+// Global count of failures
+//=============================================
+let numFailures = 0;
+
+function reportFailure(name, vals, m, w) {
+ print(" error: " + name + "(" + vals + ") == " + w + ", expected " + m);
+ numFailures++;
+}
+
+let global_imports = {Math: Math};
+
+let inputs = [
+ 1 / 0,
+ -1 / 0,
+ 0 / 0,
+ -2.70497e+38,
+ -1.4698e+37,
+ -1.22813e+35,
+ -1.34584e+34,
+ -1.0079e+32,
+ -6.49364e+26,
+ -3.06077e+25,
+ -1.46821e+25,
+ -1.17658e+23,
+ -1.9617e+22,
+ -2.7357e+20,
+ -9223372036854775808.0, // INT64_MIN
+ -1.48708e+13,
+ -1.89633e+12,
+ -4.66622e+11,
+ -2.22581e+11,
+ -1.45381e+10,
+ -2147483904.0, // First float32 after INT32_MIN
+ -2147483648.0, // INT32_MIN
+ -2147483520.0, // Last float32 before INT32_MIN
+ -1.3956e+09,
+ -1.32951e+09,
+ -1.30721e+09,
+ -1.19756e+09,
+ -9.26822e+08,
+ -5.09256e+07,
+ -964300.0,
+ -192446.0,
+ -28455.0,
+ -27194.0,
+ -20575.0,
+ -17069.0,
+ -9167.0,
+ -960.178,
+ -113.0,
+ -62.0,
+ -15.0,
+ -7.0,
+ -1.0,
+ -0.0256635,
+ -4.60374e-07,
+ -3.63759e-10,
+ -4.30175e-14,
+ -5.27385e-15,
+ -1.5707963267948966,
+ -1.48084e-15,
+ -2.220446049250313e-16,
+ -1.05755e-19,
+ -3.2995e-21,
+ -1.67354e-23,
+ -1.11885e-23,
+ -1.78506e-30,
+ -1.43718e-34,
+ -1.27126e-38,
+ -0.0,
+ 3e-88,
+ -2e66,
+ 0.0,
+ 2e66,
+ 1.17549e-38,
+ 1.56657e-37,
+ 4.08512e-29,
+ 6.25073e-22,
+ 4.1723e-13,
+ 1.44343e-09,
+ 1.5707963267948966,
+ 5.27004e-08,
+ 9.48298e-08,
+ 5.57888e-07,
+ 4.89988e-05,
+ 0.244326,
+ 1.0,
+ 12.4895,
+ 19.0,
+ 47.0,
+ 106.0,
+ 538.324,
+ 564.536,
+ 819.124,
+ 7048.0,
+ 12611.0,
+ 19878.0,
+ 20309.0,
+ 797056.0,
+ 1.77219e+09,
+ 2147483648.0, // INT32_MAX + 1
+ 4294967296.0, // UINT32_MAX + 1
+ 1.51116e+11,
+ 4.18193e+13,
+ 3.59167e+16,
+ 9223372036854775808.0, // INT64_MAX + 1
+ 18446744073709551616.0, // UINT64_MAX + 1
+ 3.38211e+19,
+ 2.67488e+20,
+ 1.78831e+21,
+ 9.20914e+21,
+ 8.35654e+23,
+ 1.4495e+24,
+ 5.94015e+25,
+ 4.43608e+30,
+ 2.44502e+33,
+ 1.38178e+37,
+ 1.71306e+37,
+ 3.31899e+38,
+ 3.40282e+38,
+];
+
+function assertBinop(name, math_func, wasm_func) {
+ let inputs2 = [ 1, 0.5, -1, -0.5, 0, -0, 1/0, -1/0, 0/0 ];
+ for (val of inputs) {
+ verbose(" ", val);
+ for (val2 of inputs2) {
+ verbose(" ", val2);
+ let m = math_func(val, val2);
+ let w = wasm_func(val, val2);
+ if (!deepEquals(m, w)) reportFailure(name, [val, val2], m, w);
+ m = math_func(val2, val);
+ w = wasm_func(val2, val);
+ if (!deepEquals(m, w)) reportFailure(name, [val2, val], m, w);
+ }
+ }
+}
+
+let stdlib = this;
+function Module_exp(stdlib) {
+ "use asm";
+
+ var Stdlib = stdlib.Math.exp;
+
+ function NAME(a, b) {
+ a = +a;
+ b = +b;
+ return +Stdlib(a, b);
+ }
+
+ return {exp: exp};
+}
+
+function wasmBinop(name, sig) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(sig);
+ builder.addImport('Math', name, sig_index);
+ builder.addFunction('main', sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, 0
+ ]) // --
+ .exportAs('main');
+
+ return builder.instantiate(global_imports).exports.main;
+}
+
+function asmBinop(name) {
+ let instance = Module_exp(stdlib);
+ assertTrue(%IsAsmWasmCode(Module_exp));
+
+ let asm_func = instance[name];
+ if (typeof asm_func != "function") throw "asm[" + full_name + "] not found";
+ return asm_func;
+}
+
+(function TestF64() {
+ let name = 'exp';
+ let math_func = Math[name];
+
+ let wasm_func = wasmBinop(name, kSig_d_dd);
+ assertBinop("(f64)" + name, math_func, wasm_func);
+
+ let asm_func = asmBinop(name);
+ assertBinop("(f64)" + name, math_func, asm_func);
+})();
+
+assertEquals(0, numFailures);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
new file mode 100644
index 0000000000..5d782b747c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
@@ -0,0 +1,85 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-shared-engine --no-wasm-disable-structured-cloning --allow-natives-syntax --experimental-wasm-threads
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+
+// In this test we start a worker which enters wasm and stays there in a loop.
+// The main thread stays in JS and checks that its thread-in-wasm flag is not
+// set. The main thread calls setTimeout after every check to give the worker a
+// chance to be scheculed.
+const sync_address = 12;
+(function TestPostModule() {
+ let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_v_v);
+ let import_id = builder.addImport('m', 'func', sig_index);
+ builder.addFunction('wait', kSig_v_v)
+ .addBody([
+ // Calling the imported function sets the thread-in-wasm flag of the
+ // main thread.
+ kExprCallFunction, import_id, // --
+ kExprLoop, kWasmStmt, // --
+ kExprI32Const, sync_address, // --
+ kExprI32LoadMem, 0, 0, // --
+ kExprI32Eqz,
+ kExprBrIf, 0, // --
+ kExprEnd,
+ ])
+ .exportFunc();
+
+ builder.addFunction('signal', kSig_v_v)
+ .addBody([
+ kExprI32Const, sync_address, // --
+ kExprI32Const, 1, // --
+ kExprI32StoreMem, 0, 0, // --
+ ])
+ .exportFunc();
+ builder.addImportedMemory("m", "imported_mem", 0, 1, "shared");
+
+ let module = builder.toModule();
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+
+ let workerScript = `
+ onmessage = function(msg) {
+ try {
+ let worker_instance = new WebAssembly.Instance(msg.module,
+ {m: {imported_mem: msg.memory,
+ func: _ => 5}});
+ postMessage("start running");
+ worker_instance.exports.wait();
+ postMessage("finished");
+ } catch(e) {
+ postMessage('ERROR: ' + e);
+ }
+ }
+ `;
+
+ let worker = new Worker(workerScript, {type: 'string'});
+ worker.postMessage({module: module, memory: memory});
+
+ let main_instance = new WebAssembly.Instance(
+ module, {m: {imported_mem: memory, func: _ => 7}});
+
+ let counter = 0;
+ function CheckThreadNotInWasm() {
+ // We check the thread-in-wasm flag many times and reschedule ourselves in
+ // between to increase the chance that we read the flag set by the worker.
+ assertFalse(%IsThreadInWasm());
+ counter++;
+ if (counter < 100) {
+ setTimeout(CheckThreadNotInWasm, 0);
+ } else {
+ main_instance.exports.signal(sync_address);
+ assertEquals('finished', worker.getMessage());
+ worker.terminate();
+ }
+ }
+
+ assertFalse(%IsThreadInWasm());
+ assertEquals('start running', worker.getMessage());
+ CheckThreadNotInWasm();
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-894307.js b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
new file mode 100644
index 0000000000..5aef9eba86
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-894307.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = makeSig([kWasmI32, kWasmI64, kWasmI64], [kWasmI64]);
+builder.addFunction(undefined, sig)
+ .addBody([
+ kExprGetLocal, 2,
+ kExprGetLocal, 1,
+ kExprI64Shl,
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-894374.js b/deps/v8/test/mjsunit/regress/wasm/regress-894374.js
new file mode 100644
index 0000000000..fb9cb3b4fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-894374.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+const sig = makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
+builder.addFunction(undefined, sig)
+ .addBodyWithEnd([
+ kExprMemorySize, 0,
+ kExprI32Const, 0,
+ kExprI64Const, 0,
+ kExprI64StoreMem8, 0, 0,
+ kExprEnd,
+ ]);
+builder.addExport('main', 0);
+builder.instantiate(); // shouldn't crash
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-898932.js b/deps/v8/test/mjsunit/regress/wasm/regress-898932.js
new file mode 100644
index 0000000000..e3d1bbf378
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-898932.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-mem-pages=49152
+
+let mem = new WebAssembly.Memory({initial: 1});
+try {
+ mem.grow(49151);
+} catch (e) {
+ // This can fail on 32-bit systems if we cannot make such a big reservation.
+ if (!(e instanceof RangeError)) throw e;
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
new file mode 100644
index 0000000000..7967d99756
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function() {
+ const builder = new WasmModuleBuilder();
+ builder.addType(makeSig([], []));
+ builder.addType(makeSig([kWasmI32], [kWasmI32]));
+ builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+ kExprEnd, // @1
+ ]);
+ builder.addFunction(undefined, 1 /* sig */)
+ .addLocals({i32_count: 65})
+ .addBodyWithEnd([
+ kExprLoop, kWasmStmt, // @3
+ kSimdPrefix,
+ kExprF32x4Min,
+ kExprI64UConvertI32,
+ kExprI64RemS,
+ kExprUnreachable,
+ kExprLoop, 0x02, // @10
+ ]);
+})
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-910824.js b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
new file mode 100644
index 0000000000..7c8f154496
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-910824.js
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addGlobal(kWasmI32, 1);
+builder.addGlobal(kWasmF32, 1);
+builder.addType(makeSig([kWasmI32, kWasmF32, kWasmF32, kWasmF64], [kWasmI32]));
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals({i32_count: 504})
+ .addBody([
+kExprGetGlobal, 0x00,
+kExprSetLocal, 0x04,
+kExprGetLocal, 0x04,
+kExprI32Const, 0x01,
+kExprI32Sub,
+kExprGetGlobal, 0x00,
+kExprI32Const, 0x00,
+kExprI32Eqz,
+kExprGetGlobal, 0x00,
+kExprI32Const, 0x01,
+kExprI32Const, 0x01,
+kExprI32Sub,
+kExprGetGlobal, 0x00,
+kExprI32Const, 0x00,
+kExprI32Eqz,
+kExprGetGlobal, 0x00,
+kExprI32Const, 0x00,
+kExprI32Const, 0x01,
+kExprI32Sub,
+kExprGetGlobal, 0x01,
+kExprUnreachable,
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
new file mode 100644
index 0000000000..c12013c9f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('main', kSig_v_v).addBody([
+ kExprLoop, kWasmStmt, // loop
+ /**/ kExprBr, 0x01, // br depth=1
+ /**/ kExprBlock, kWasmStmt, // block
+ /**/ /**/ kExprBr, 0x02, // br depth=2
+ /**/ /**/ kExprEnd, // end [block]
+ /**/ kExprEnd // end [loop]
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-916869.js b/deps/v8/test/mjsunit/regress/wasm/regress-916869.js
new file mode 100644
index 0000000000..6acd5d68d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-916869.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addFunction('main', sig)
+ .addBody([kExprI32Const, 0x01, kExprI32SExtendI8])
+ .exportFunc();
+const instance = builder.instantiate();
+assertEquals(1, instance.exports.main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
new file mode 100644
index 0000000000..fd7ab99020
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([kWasmI32, kWasmI64], []));
+builder.addFunction(undefined, sig)
+ .addBody([
+kExprI32Const, 0,
+kExprIf, kWasmI32,
+ kExprI32Const, 0,
+kExprElse,
+ kExprI32Const, 1,
+ kExprEnd,
+kExprTeeLocal, 0,
+kExprGetLocal, 0,
+kExprLoop, kWasmStmt,
+ kExprI64Const, 0x80, 0x80, 0x80, 0x70,
+ kExprSetLocal, 0x01,
+ kExprI32Const, 0x00,
+ kExprIf, kWasmI32,
+ kExprI32Const, 0x00,
+ kExprElse,
+ kExprI32Const, 0x00,
+ kExprEnd,
+ kExprBrIf, 0x00,
+ kExprUnreachable,
+ kExprEnd,
+kExprUnreachable,
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588.js
new file mode 100644
index 0000000000..cb07bb5280
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([], [kWasmF64]));
+builder.addFunction(undefined, sig)
+ .addLocals({f32_count: 5}).addLocals({f64_count: 3})
+ .addBody([
+kExprBlock, kWasmF64,
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f,
+ kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ kExprI32Const, 0,
+ kExprIf, kWasmI32,
+ kExprI32Const, 0,
+ kExprElse,
+ kExprI32Const, 1,
+ kExprEnd,
+ kExprBrIf, 0,
+ kExprUnreachable,
+kExprEnd
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
new file mode 100644
index 0000000000..9d461cfd84
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig0 = builder.addType(makeSig([kWasmF32], [kWasmI32]));
+const sig1 = builder.addType(makeSig([kWasmI64, kWasmI32, kWasmI64, kWasmF32, kWasmI64], [kWasmF32]));
+const sig2 = builder.addType(makeSig([kWasmF32], [kWasmF32]));
+// Generate function 1 (out of 3).
+builder.addFunction(undefined, sig0).addBody([kExprI32Const, 0x00]);
+// Generate function 2 (out of 3).
+builder.addFunction(undefined, sig1)
+ .addBody([
+ // signature: f_lilfl
+ kExprBlock, kWasmF32, // @1 f32
+ kExprI32Const, 0x00,
+ kExprIf, kWasmStmt, // @5
+ kExprLoop, kWasmStmt, // @7
+ kExprBlock, kWasmI32, // @9 i32
+ kExprF32Const, 0x00, 0x00, 0x80, 0xc1,
+ kExprF32Const, 0x00, 0x00, 0x80, 0x45,
+ kExprCallFunction, 0x00, // function #0: i_f
+ kExprBrIf, 0x03, // depth=3
+ kExprDrop,
+ kExprI32Const, 0xd8, 0x00,
+ kExprEnd, // @29
+ kExprBrIf, 0x00, // depth=0
+ kExprEnd, // @32
+ kExprF32Const, 0x00, 0x00, 0x80, 0x3f,
+ kExprF32Const, 0x00, 0x00, 0x80, 0xc6,
+ kExprBlock, kWasmI32, // @43 i32
+ kExprF32Const, 0x00, 0x00, 0x80, 0x3f,
+ kExprCallFunction, 0x02, // function #2: f_f
+ kExprDrop,
+ kExprI32Const, 0x68,
+ kExprEnd, // @55
+ kExprBrIf, 0x01, // depth=1
+ kExprI32Const, 0x00,
+ kExprSelect,
+ kExprDrop,
+ kExprUnreachable,
+ kExprElse, // @63
+ kExprNop,
+ kExprEnd, // @65
+ kExprF32Const, 0x00, 0x00, 0x69, 0x43,
+ kExprEnd // @71
+]);
+// Generate function 3 (out of 3).
+builder.addFunction(undefined, sig2).addBody([
+ kExprF32Const, 0x00, 0x00, 0x80, 0x3f
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918149.js b/deps/v8/test/mjsunit/regress/wasm/regress-918149.js
new file mode 100644
index 0000000000..f19a26d2a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918149.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig =
+ builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI64]));
+builder.addFunction('main', sig).addBody([kExprI64Const, 1, kExprI64SExtendI8]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918284.js b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
new file mode 100644
index 0000000000..05614edf3c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918284.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_i)
+ .addLocals({i32_count: 7})
+ .addBody([
+ kExprI32Const, 0,
+ kExprIf, kWasmI32, // @11 i32
+ kExprI32Const, 0,
+ kExprElse, // @15
+ kExprI32Const, 1,
+ kExprEnd, // @18
+ kExprTeeLocal, 0,
+ kExprI32Popcnt
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-918917.js b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
new file mode 100644
index 0000000000..725287ae74
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-918917.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_v_v)
+ .addLocals({i32_count: 1}).addLocals({f32_count: 1}).addLocals({f64_count: 1})
+ .addBody([
+kExprGetLocal, 1,
+kExprGetLocal, 2,
+kExprGetLocal, 0,
+kExprIf, kWasmI32,
+ kExprI32Const, 1,
+kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+kExprUnreachable
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919308.js b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
new file mode 100644
index 0000000000..cb10662290
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919308.js
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_i)
+ .addLocals({i32_count: 5})
+ .addBody([
+ kExprGetLocal, 0, // --> 1
+ kExprIf, kWasmI32,
+ kExprGetLocal, 0, // --> 1
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+ kExprIf, kWasmI32,
+ kExprGetLocal, 0, // --> 1
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+ kExprIf, kWasmI32,
+ kExprI32Const, 0,
+ kExprGetLocal, 0,
+ kExprI32Sub, // --> -1
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprI32Sub, // --> 0
+ kExprI32Sub, // --> -1
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(-1, instance.exports.main(1));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
new file mode 100644
index 0000000000..58273f666b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_v_v).addBody([]);
+builder.addFunction(undefined, kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ // Stack now contains two copies of the first param register.
+ // Start a loop to create a merge point (values still in registers).
+ kExprLoop, kWasmStmt,
+ // The call spills all values.
+ kExprCallFunction, 0,
+ // Break to the loop. Now the spilled values need to be loaded back *into
+ // the same register*.
+ kExprBr, 0,
+ kExprEnd,
+ kExprDrop
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922432.js b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
new file mode 100644
index 0000000000..8f1ad11ebc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922432.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestTruncatedBrOnExnInLoop() {
+ let builder = new WasmModuleBuilder();
+ let fun = builder.addFunction(undefined, kSig_v_v)
+ .addLocals({except_count: 1})
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprBrOnExn // Bytecode truncated here.
+ ]).exportFunc();
+ fun.body.pop(); // Pop implicitly added kExprEnd from body.
+ assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922670.js b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
new file mode 100644
index 0000000000..d5617df238
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922670.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([kWasmI32], []));
+builder.addFunction(undefined, sig)
+ .addLocals({i64_count: 1})
+ .addBody([
+ kExprLoop, kWasmI32,
+ kExprGetLocal, 1,
+ kExprI64Const, 1,
+ kExprLoop, kWasmI32,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Const, 1,
+ kExprIf, kWasmI32,
+ kExprI32Const, 1,
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+ kExprSelect,
+ kExprUnreachable,
+ kExprEnd,
+ kExprUnreachable,
+ kExprEnd,
+ kExprUnreachable
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
new file mode 100644
index 0000000000..3af0e86e1e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
@@ -0,0 +1,52 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const sig = builder.addType(makeSig([kWasmI64], [kWasmI64]));
+builder.addFunction(undefined, sig)
+ .addLocals({i32_count: 14}).addLocals({i64_count: 17}).addLocals({f32_count: 14})
+ .addBody([
+ kExprBlock, kWasmStmt,
+ kExprBr, 0x00,
+ kExprEnd,
+ kExprBlock, kWasmStmt,
+ kExprI32Const, 0x00,
+ kExprSetLocal, 0x09,
+ kExprI32Const, 0x00,
+ kExprIf, kWasmStmt,
+ kExprBlock, kWasmStmt,
+ kExprI32Const, 0x00,
+ kExprSetLocal, 0x0a,
+ kExprBr, 0x00,
+ kExprEnd,
+ kExprBlock, kWasmStmt,
+ kExprBlock, kWasmStmt,
+ kExprGetLocal, 0x00,
+ kExprSetLocal, 0x12,
+ kExprBr, 0x00,
+ kExprEnd,
+ kExprGetLocal, 0x16,
+ kExprSetLocal, 0x0f,
+ kExprGetLocal, 0x0f,
+ kExprSetLocal, 0x17,
+ kExprGetLocal, 0x0f,
+ kExprSetLocal, 0x18,
+ kExprGetLocal, 0x17,
+ kExprGetLocal, 0x18,
+ kExprI64ShrS,
+ kExprSetLocal, 0x19,
+ kExprUnreachable,
+ kExprEnd,
+ kExprUnreachable,
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+ kExprUnreachable,
+ kExprEnd,
+ kExprUnreachable
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924905.js b/deps/v8/test/mjsunit/regress/wasm/regress-924905.js
new file mode 100644
index 0000000000..5db3583e4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924905.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction("kaboom", kSig_i_v)
+ .addBody([
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprI32And,
+ kExprI32Const, 0,
+ kExprI32ShrU,
+ ]).exportFunc();
+let instance = builder.instantiate();
+assertEquals(0, instance.exports.kaboom());
diff --git a/deps/v8/test/mjsunit/samevalue.js b/deps/v8/test/mjsunit/samevalue.js
index 1e5384d73d..30cce35bcc 100644
--- a/deps/v8/test/mjsunit/samevalue.js
+++ b/deps/v8/test/mjsunit/samevalue.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-natives-as natives --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Test the SameValue and SameValueZero internal methods.
var obj1 = {x: 10, y: 11, z: "test"};
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index f8e9117785..0d23b4f843 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -6,6 +6,10 @@
// Flags: --opt --no-always-opt --turbo-filter=*
// If we are always or never optimizing it is useless.
+if (isNeverOptimizeLiteMode()) {
+ print("Warning: skipping test that requires optimization in Lite mode.");
+ quit(0);
+}
assertFalse(isAlwaysOptimize());
assertFalse(isNeverOptimize());
diff --git a/deps/v8/test/mjsunit/smi-ops-inlined.js b/deps/v8/test/mjsunit/smi-ops-inlined.js
index afc6cc0765..dd753d17b0 100644
--- a/deps/v8/test/mjsunit/smi-ops-inlined.js
+++ b/deps/v8/test/mjsunit/smi-ops-inlined.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --always-inline-smi-code
-
const SMI_MAX = (1 << 30) - 1;
const SMI_MIN = -(1 << 30);
const ONE = 1;
diff --git a/deps/v8/test/mjsunit/spread-large-array.js b/deps/v8/test/mjsunit/spread-large-array.js
new file mode 100644
index 0000000000..5ec7a82e38
--- /dev/null
+++ b/deps/v8/test/mjsunit/spread-large-array.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that spread can create arrays in large object space.
+
+const n = 130000;
+
+{
+ let x = new Array(n);
+ for (let i = 0; i < n; ++i) x[i] = i;
+ let a = [...x];
+}
diff --git a/deps/v8/test/mjsunit/spread-large-map.js b/deps/v8/test/mjsunit/spread-large-map.js
new file mode 100644
index 0000000000..f70770d028
--- /dev/null
+++ b/deps/v8/test/mjsunit/spread-large-map.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that spread can create arrays in large object space.
+
+const n = 130000;
+
+{
+ let x = new Map();
+ for (let i = 0; i < n; ++i) x.set(i, String(i));
+ let a = [...x.values()];
+}{
+ let x = new Map();
+ for (let i = 0; i < n; ++i) x.set(i, String(i));
+ let a = [...x.keys()];
+}
diff --git a/deps/v8/test/mjsunit/spread-large-set.js b/deps/v8/test/mjsunit/spread-large-set.js
new file mode 100644
index 0000000000..e9f406ea02
--- /dev/null
+++ b/deps/v8/test/mjsunit/spread-large-set.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that spread can create arrays in large object space.
+
+const n = 130000;
+
+{
+ let x = new Set();
+ for (let i = 0; i < n; ++i) x.add(i);
+ let a = [...x];
+}{
+ let x = new Set();
+ for (let i = 0; i < n; ++i) x.add(i);
+ let a = [...x.values()];
+}{
+ let x = new Set();
+ for (let i = 0; i < n; ++i) x.add(i);
+ let a = [...x.keys()];
+}
diff --git a/deps/v8/test/mjsunit/spread-large-string.js b/deps/v8/test/mjsunit/spread-large-string.js
new file mode 100644
index 0000000000..14320ee5d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/spread-large-string.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that spread can create arrays in large object space.
+
+const n = 130000;
+
+{
+ let x = new Array(n);
+ for (let i = 0; i < n; ++i) x[i] = i;
+ let a = [...String(x)];
+}
diff --git a/deps/v8/test/mjsunit/stack-traces-class-fields.js b/deps/v8/test/mjsunit/stack-traces-class-fields.js
index 84d7e8a843..d40abbab9a 100644
--- a/deps/v8/test/mjsunit/stack-traces-class-fields.js
+++ b/deps/v8/test/mjsunit/stack-traces-class-fields.js
@@ -75,14 +75,14 @@ function testClassInstantiation() {
// ReferenceError: FAIL is not defined
// at thrower
-// at X.<instance_fields_initializer>
+// at X.<instance_members_initializer>
// at new X
// at testClassInstantiation
// at testTrace
testTrace(
"during class instantiation",
testClassInstantiation,
- ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["thrower", "X.<instance_members_initializer>", "new X"],
["anonymous"]
);
@@ -98,14 +98,14 @@ function testClassInstantiationWithSuper() {
// ReferenceError: FAIL is not defined
// at thrower
-// at X.<instance_fields_initializer>
+// at X.<instance_members_initializer>
// at new X
// at testClassInstantiation
// at testTrace
testTrace(
"during class instantiation with super",
testClassInstantiationWithSuper,
- ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["thrower", "X.<instance_members_initializer>", "new X"],
["Base", "anonymous"]
);
@@ -124,14 +124,14 @@ function testClassInstantiationWithSuper2() {
// ReferenceError: FAIL is not defined
// at thrower
-// at X.<instance_fields_initializer>
+// at X.<instance_members_initializer>
// at new X
// at testClassInstantiation
// at testTrace
testTrace(
"during class instantiation with super2",
testClassInstantiationWithSuper2,
- ["thrower", "X.<instance_fields_initializer>", "new X"],
+ ["thrower", "X.<instance_members_initializer>", "new X"],
["Base", "anonymous"]
);
@@ -151,7 +151,7 @@ function testClassInstantiationWithSuper3() {
// ReferenceError: FAIL is not defined
// at thrower
-// at X.<instance_fields_initializer>
+// at X.<instance_members_initializer>
// at new Base
// at new X
// at testClassInstantiationWithSuper3
@@ -159,7 +159,7 @@ function testClassInstantiationWithSuper3() {
testTrace(
"during class instantiation with super3",
testClassInstantiationWithSuper3,
- ["thrower", "X.<instance_fields_initializer>", "new Base", "new X"],
+ ["thrower", "X.<instance_members_initializer>", "new Base", "new X"],
["anonymous"]
);
diff --git a/deps/v8/test/mjsunit/test-async.js b/deps/v8/test/mjsunit/test-async.js
index d4fee9bfb9..8f7b553988 100644
--- a/deps/v8/test/mjsunit/test-async.js
+++ b/deps/v8/test/mjsunit/test-async.js
@@ -80,7 +80,7 @@ var testAsync;
}
drainMicrotasks() {
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
}
done_() {
@@ -111,7 +111,7 @@ var testAsync;
testAsync = function(test, name) {
let assert = new AsyncAssertion(test, name);
test(assert);
- %RunMicrotasks();
+ %PerformMicrotaskCheckpoint();
assert.done_();
}
})();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index 422210365e..901d8e90a4 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -81,9 +81,6 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def _suppressed_test_class(self):
- return SuppressedTestCase
-
class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
@@ -282,27 +279,5 @@ class CombinedTest(testcase.D8TestCase):
test._get_statusfile_flags() for test in self._tests)
-class SuppressedTestCase(TestCase):
- """The same as a standard mjsunit test case with all asserts as no-ops."""
- def __init__(self, *args, **kwargs):
- super(SuppressedTestCase, self).__init__(*args, **kwargs)
- self._mjsunit_files.append(
- os.path.join(self.suite.root, "mjsunit_suppressions.js"))
-
- def _prepare_outcomes(self, *args, **kwargs):
- super(SuppressedTestCase, self)._prepare_outcomes(*args, **kwargs)
- # Skip tests expected to fail. We suppress all asserts anyways, but some
- # tests are expected to fail with type errors or even dchecks, and we
- # can't differentiate that.
- if statusfile.FAIL in self._statusfile_outcomes:
- self._statusfile_outcomes = [statusfile.SKIP]
-
- def _get_extra_flags(self, *args, **kwargs):
- return (
- super(SuppressedTestCase, self)._get_extra_flags(*args, **kwargs) +
- ['--disable-abortjs']
- )
-
-
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/mjsunit/try-catch-default-destructuring.js b/deps/v8/test/mjsunit/try-catch-default-destructuring.js
new file mode 100644
index 0000000000..c254a25855
--- /dev/null
+++ b/deps/v8/test/mjsunit/try-catch-default-destructuring.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f1() {
+ let y = 200;
+ try {
+ throw {}
+ } catch ({x=()=>y, y=300}) {
+ return x()
+ }
+}
+assertEquals(300, f1());
+
+function f2() {
+ let y = 200;
+ try {
+ throw {}
+ } catch ({x=()=>y}) {
+ let y = 300;
+ return x()
+ }
+}
+assertEquals(200, f2());
diff --git a/deps/v8/test/mjsunit/type-profile/regress-707223.js b/deps/v8/test/mjsunit/type-profile/regress-707223.js
index 078b687c51..ea0e84bf68 100644
--- a/deps/v8/test/mjsunit/type-profile/regress-707223.js
+++ b/deps/v8/test/mjsunit/type-profile/regress-707223.js
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --type-profile
-
let e;
eval("e");
diff --git a/deps/v8/test/mjsunit/unicodelctest-no-optimization.js b/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
index 3bcb5bf256..0b31c560e0 100644
--- a/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
+++ b/deps/v8/test/mjsunit/unicodelctest-no-optimization.js
@@ -60,6 +60,7 @@ function rand() {
// To make the test results predictable, we use a 100% deterministic
// alternative.
// Robert Jenkins' 32 bit integer hash function.
+ seed = seed & 0xffffffff;
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
diff --git a/deps/v8/test/mjsunit/unicodelctest.js b/deps/v8/test/mjsunit/unicodelctest.js
index 2caaabdcbe..95d6dac78f 100644
--- a/deps/v8/test/mjsunit/unicodelctest.js
+++ b/deps/v8/test/mjsunit/unicodelctest.js
@@ -59,6 +59,7 @@ function rand() {
// To make the test results predictable, we use a 100% deterministic
// alternative.
// Robert Jenkins' 32 bit integer hash function.
+ seed = seed & 0xffffffff;
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
diff --git a/deps/v8/test/mjsunit/wasm/OWNERS b/deps/v8/test/mjsunit/wasm/OWNERS
index d9195d8e54..b6d75023d7 100644
--- a/deps/v8/test/mjsunit/wasm/OWNERS
+++ b/deps/v8/test/mjsunit/wasm/OWNERS
@@ -1,5 +1,3 @@
ahaas@chromium.org
-bradnelson@chromium.org
clemensh@chromium.org
-eholk@chromium.org
titzer@chromium.org
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
new file mode 100644
index 0000000000..30faef12a7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -0,0 +1,43 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function testAnyRefIdentityFunction() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_a_a)
+ .addBody([kExprGetLocal, 0])
+ .exportFunc();
+
+
+ const instance = builder.instantiate();
+
+ assertThrows(() => instance.exports.main(print), TypeError);
+ assertThrows(() => instance.exports.main({'hello' : 'world'}), TypeError);
+ assertSame(
+ instance.exports.main, instance.exports.main(instance.exports.main));
+})();
+
+(function testPassAnyRefToImportedFunction() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_v_a);
+ const imp_index = builder.addImport("q", "func", sig_index);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprGetLocal, 0,
+ kExprCallFunction, imp_index])
+ .exportFunc();
+
+ const main = builder.instantiate({q: {func: checkFunction}}).exports.main;
+
+ function checkFunction(value) {
+ assertSame(main, value);
+ }
+
+ main(main);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals.js b/deps/v8/test/mjsunit/wasm/anyref-globals.js
new file mode 100644
index 0000000000..64f56366e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/anyref-globals.js
@@ -0,0 +1,313 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-anyref --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestDefaultValue() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const g_nullref = builder.addGlobal(kWasmAnyRef, true);
+ builder.addFunction("main", kSig_r_v)
+ .addBody([kExprGetGlobal, g_nullref.index])
+ .exportAs("main");
+
+ const instance = builder.instantiate();
+ assertNull(instance.exports.main());
+})();
+
+(function TestDefaultValueSecondGlobal() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const g_setref = builder.addGlobal(kWasmAnyRef, true);
+ const g_nullref = builder.addGlobal(kWasmAnyRef, true);
+ builder.addFunction("main", kSig_r_r)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g_setref.index,
+ kExprGetGlobal, g_nullref.index
+ ])
+ .exportAs("main");
+
+ const instance = builder.instantiate();
+ assertNull(instance.exports.main({}));
+})();
+
+(function TestGlobalChangeValue() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ // Dummy global for offset.
+ builder.addGlobal(kWasmAnyRef, true);
+ const g = builder.addGlobal(kWasmAnyRef, true);
+ builder.addFunction("main", kSig_r_r)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g.index,
+ kExprGetGlobal, g.index
+ ])
+ .exportAs("main");
+
+ const instance = builder.instantiate();
+
+ const test_value = {hello: 'world'};
+ assertSame(test_value, instance.exports.main(test_value));
+})();
+
+(function TestGlobalChangeValueWithGC() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const gc_index = builder.addImport("q", "gc", kSig_v_v);
+ // Dummy global for offset.
+ builder.addGlobal(kWasmAnyRef, true);
+ const g = builder.addGlobal(kWasmAnyRef, true);
+ builder.addFunction("main", kSig_r_r)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g.index,
+ kExprCallFunction, gc_index, // call gc
+ kExprGetGlobal, g.index
+ ])
+ .exportAs("main");
+
+ const instance = builder.instantiate({q: {gc: gc}});
+
+ const test_value = {hello: 'world'};
+ assertSame(test_value, instance.exports.main(test_value));
+ assertSame(5, instance.exports.main(5));
+ assertSame("Hello", instance.exports.main("Hello"));
+})();
+
+(function TestGlobalAsRoot() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const g = builder.addGlobal(kWasmAnyRef, true);
+ builder.addFunction("get_global", kSig_r_v)
+ .addBody([
+ kExprGetGlobal, g.index
+ ])
+ .exportAs("get_global");
+
+ builder.addFunction("set_global", kSig_v_r)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g.index
+ ])
+ .exportAs("set_global");
+
+ const instance = builder.instantiate();
+
+ let test_value = {hello: 'world'};
+ instance.exports.set_global(test_value);
+ test_value = null;
+ gc();
+
+ const result = instance.exports.get_global();
+
+ assertEquals('world', result.hello);
+})();
+
+(function TestImported() {
+ print(arguments.callee.name);
+ function Test(obj) {
+ let builder = new WasmModuleBuilder();
+ const g = builder.addImportedGlobal('m', 'val', kWasmAnyRef);
+ builder.addFunction('main', kSig_r_v)
+ .addBody([kExprGetGlobal, g])
+ .exportAs('main');
+
+ const instance = builder.instantiate({m: {val: obj}});
+ assertSame(obj, instance.exports.main());
+ }
+ Test(null);
+ Test(undefined);
+ Test(1653);
+ Test("mystring");
+ Test({q: 14});
+ Test(print);
+})();
+
+(function TestAnyRefGlobalObjectDefaultValue() {
+ print(arguments.callee.name);
+ let default_init = new WebAssembly.Global({value: 'anyref', mutable: true});
+ assertSame(null, default_init.value);
+ assertSame(null, default_init.valueOf());
+})();
+
+
+(function TestAnyRefGlobalObject() {
+ print(arguments.callee.name);
+ function TestGlobal(obj) {
+ const global = new WebAssembly.Global({value: 'anyref'}, obj);
+ assertSame(obj, global.value);
+ assertSame(obj, global.valueOf());
+ }
+
+ TestGlobal(null);
+ TestGlobal(undefined);
+ TestGlobal(1663);
+ TestGlobal("testmyglobal");
+ TestGlobal({a: 11});
+ TestGlobal(print);
+})();
+
+(function TestAnyRefGlobalObjectSetValue() {
+ print(arguments.callee.name);
+ let global = new WebAssembly.Global({value: 'anyref', mutable: true});
+
+ function TestGlobal(obj) {
+ global.value = obj;
+ assertSame(obj, global.value);
+ assertSame(obj, global.valueOf());
+ }
+
+ TestGlobal(null);
+ assertThrows(() => TestGlobal(undefined), TypeError);
+ TestGlobal(1663);
+ TestGlobal("testmyglobal");
+ TestGlobal({a: 11});
+ TestGlobal(print);
+})();
+
+(function TestExportMutableAnyRefGlobal() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const g1 = builder.addGlobal(kWasmAnyRef, true).exportAs("global1");
+ builder.addGlobal(kWasmI32, true); // Dummy.
+ builder.addGlobal(kWasmAnyRef, true); // Dummy.
+ const g2 = builder.addGlobal(kWasmAnyRef, true).exportAs("global2");
+ builder.addFunction("main", kSig_v_rr)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g1.index,
+ kExprGetLocal, 1,
+ kExprSetGlobal, g2.index
+ ])
+ .exportAs("main");
+
+ const instance = builder.instantiate();
+ const obj1 = {x: 221};
+ const obj2 = print;
+ instance.exports.main(obj1, obj2);
+ assertSame(obj1, instance.exports.global1.value);
+ assertSame(obj2, instance.exports.global2.value);
+})();
+
+(function TestImportMutableAnyRefGlobal() {
+ print(arguments.callee.name);
+ function Test(obj) {
+ let builder = new WasmModuleBuilder();
+ const g = builder.addImportedGlobal('m', 'val', kWasmAnyRef, true);
+ builder.addFunction('main', kSig_r_v)
+ .addBody([kExprGetGlobal, g])
+ .exportAs('main');
+
+ const global = new WebAssembly.Global({value: 'anyref', mutable: 'true'}, obj);
+ const instance = builder.instantiate({m: {val: global}});
+ assertSame(obj, instance.exports.main());
+ }
+ Test(null);
+ Test(undefined);
+ Test(1653);
+ Test("mystring");
+ Test({q: 14});
+ Test(print);
+})();
+
+(function TestImportMutableAnyRefGlobalFromOtherInstance() {
+ print(arguments.callee.name);
+
+ // Create an instance which exports globals.
+ let builder1 = new WasmModuleBuilder();
+ const g3 = builder1.addGlobal(kWasmAnyRef, true).exportAs("e3");
+ builder1.addGlobal(kWasmI32, true).exportAs("e1"); // Dummy.
+ builder1.addGlobal(kWasmAnyRef, true).exportAs("e4"); // Dummy.
+ const g2 = builder1.addGlobal(kWasmAnyRef, true).exportAs("e2");
+
+ builder1.addFunction("set_globals", kSig_v_rr)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, g2.index,
+ kExprGetLocal, 1,
+ kExprSetGlobal, g3.index,
+ ])
+ .exportAs("set_globals");
+
+ builder1.addFunction('get_global2', kSig_r_v)
+ .addBody([kExprGetGlobal, g2.index])
+ .exportAs('get_global2');
+
+ builder1.addFunction('get_global3', kSig_r_v)
+ .addBody([kExprGetGlobal, g3.index])
+ .exportAs('get_global3');
+
+ const instance1 = builder1.instantiate();
+ const obj2 = {x: 221};
+ const obj3 = print;
+ instance1.exports.set_globals(obj2, obj3);
+
+ // Create an instance which imports the globals of the other instance.
+ let builder2 = new WasmModuleBuilder();
+ const i1 = builder2.addImportedGlobal('exports', 'e1', kWasmI32, true);
+ const i2 = builder2.addImportedGlobal('exports', 'e2', kWasmAnyRef, true);
+ const i3 = builder2.addImportedGlobal('exports', 'e3', kWasmAnyRef, true);
+ const i4 = builder2.addImportedGlobal('exports', 'e4', kWasmAnyRef, true);
+
+ builder2.addExportOfKind("reexport1", kExternalGlobal, i1);
+ builder2.addExportOfKind("reexport2", kExternalGlobal, i2);
+ builder2.addExportOfKind("reexport3", kExternalGlobal, i3);
+ builder2.addExportOfKind("reexport4", kExternalGlobal, i4);
+
+ builder2.addFunction("set_globals", kSig_v_rr)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, i2,
+ kExprGetLocal, 1,
+ kExprSetGlobal, i3,
+ ])
+ .exportAs("set_globals");
+
+ builder2.addFunction('get_global2', kSig_r_v)
+ .addBody([kExprGetGlobal, i2])
+ .exportAs('get_global2');
+
+ builder2.addFunction('get_global3', kSig_r_v)
+ .addBody([kExprGetGlobal, i3])
+ .exportAs('get_global3');
+
+ const instance2 = builder2.instantiate(instance1);
+ // Check if the globals were imported correctly.
+ assertSame(obj2, instance2.exports.get_global2());
+ assertSame(obj3, instance2.exports.get_global3());
+
+ assertSame(obj2, instance2.exports.reexport2.value);
+ assertSame(obj3, instance2.exports.reexport3.value);
+
+ // Check if instance2 can make changes visible for instance1.
+ instance2.exports.set_globals(null, undefined);
+ assertEquals(null, instance1.exports.get_global2());
+ assertEquals(undefined, instance1.exports.get_global3());
+
+ assertEquals(null, instance2.exports.reexport2.value);
+ assertEquals(undefined, instance2.exports.reexport3.value);
+
+ // Check if instance1 can make changes visible for instance2.
+ instance1.exports.set_globals("foo", 66343);
+ assertEquals("foo", instance2.exports.get_global2());
+ assertEquals(66343, instance2.exports.get_global3());
+
+ assertEquals("foo", instance2.exports.reexport2.value);
+ assertEquals(66343, instance2.exports.reexport3.value);
+
+ const bar2 = {f: "oo"};
+ const bar3 = {b: "ar"};
+ instance2.exports.reexport2.value = bar2;
+ instance2.exports.reexport3.value = bar3;
+
+ assertSame(bar2, instance1.exports.get_global2());
+ assertSame(bar3, instance1.exports.get_global3());
+ assertSame(bar2, instance2.exports.get_global2());
+ assertSame(bar3, instance2.exports.get_global3());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyref.js b/deps/v8/test/mjsunit/wasm/anyref.js
index e6306b6a9e..2c045712f1 100644
--- a/deps/v8/test/mjsunit/wasm/anyref.js
+++ b/deps/v8/test/mjsunit/wasm/anyref.js
@@ -45,6 +45,41 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 'world'});
})();
+(function testPassAnyRefWithGCWithLocals() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const ref_sig = builder.addType(kSig_v_r);
+ const void_sig = builder.addType(kSig_v_v);
+ const imp_index = builder.addImport("q", "func", ref_sig);
+ const gc_index = builder.addImport("q", "gc", void_sig);
+ // First call the gc, then check if the object still exists.
+ builder.addFunction('main', ref_sig)
+ .addLocals({anyref_count: 10})
+ .addBody([
+ kExprGetLocal, 0, kExprSetLocal, 1, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 2, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 3, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 4, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 5, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 6, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 7, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 8, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 9, // Set local
+ kExprGetLocal, 0, kExprSetLocal, 10, // Set local
+ kExprCallFunction, gc_index, // call gc
+ kExprGetLocal, 9, kExprCallFunction, imp_index // call import
+ ])
+ .exportFunc();
+
+ function checkFunction(value) {
+ assertEquals('world', value.hello);
+ }
+
+ const instance = builder.instantiate({q: {func: checkFunction, gc: gc}});
+
+ instance.exports.main({hello: 'world'});
+})();
+
(function testPassAnyRefWithGC() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
@@ -69,6 +104,28 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 'world'});
})();
+(function testPassAnyRefWithGCInWrapper() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ const kSig_r_iri = makeSig([kWasmI32, kWasmAnyRef, kWasmI32], [kWasmAnyRef]);
+ const sig_index = builder.addType(kSig_r_iri);
+ builder.addFunction('main', sig_index)
+ .addBody([kExprGetLocal, 1])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+
+ const triggerGCParam = {
+ valueOf: () => {
+ gc();
+ return 17;
+ }
+ };
+
+ const result = instance.exports.main(triggerGCParam, {hello: 'world'}, triggerGCParam);
+ assertEquals('world', result.hello);
+})();
+
(function testAnyRefNull() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
@@ -99,6 +156,19 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(0, instance.exports.main(print));
})();
+(function testAnyRefNullIsNull() {
+ print(arguments.callee.name);
+
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_i_v)
+ .addBody([kExprRefNull, kExprRefIsNull])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+
+ assertEquals(1, instance.exports.main());
+})();
+
(function testAnyRefLocalDefaultValue() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js b/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js
new file mode 100644
index 0000000000..f683436246
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-math-intrinsic.js
@@ -0,0 +1,295 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function verbose(args) {
+ // print(...args);
+}
+
+//=============================================
+// Global count of failures
+//=============================================
+let numFailures = 0;
+
+function reportFailure(name, vals, m, w) {
+ print(' error: ' + name + '(' + vals + ') == ' + w + ', expected ' + m);
+ numFailures++;
+}
+
+let inputs = [
+ 1 / 0,
+ -1 / 0,
+ 0 / 0,
+ -2.70497e+38,
+ -1.4698e+37,
+ -1.22813e+35,
+ -1.34584e+34,
+ -1.0079e+32,
+ -6.49364e+26,
+ -3.06077e+25,
+ -1.46821e+25,
+ -1.17658e+23,
+ -1.9617e+22,
+ -2.7357e+20,
+ -9223372036854775808.0, // INT64_MIN
+ -1.48708e+13,
+ -1.89633e+12,
+ -4.66622e+11,
+ -2.22581e+11,
+ -1.45381e+10,
+ -2147483904.0, // First float32 after INT32_MIN
+ -2147483648.0, // INT32_MIN
+ -2147483520.0, // Last float32 before INT32_MIN
+ -1.3956e+09,
+ -1.32951e+09,
+ -1.30721e+09,
+ -1.19756e+09,
+ -9.26822e+08,
+ -5.09256e+07,
+ -964300.0,
+ -192446.0,
+ -28455.0,
+ -27194.0,
+ -20575.0,
+ -17069.0,
+ -9167.0,
+ -960.178,
+ -113.0,
+ -62.0,
+ -15.0,
+ -7.0,
+ -1.0,
+ -0.0256635,
+ -4.60374e-07,
+ -3.63759e-10,
+ -4.30175e-14,
+ -5.27385e-15,
+ -1.5707963267948966,
+ -1.48084e-15,
+ -2.220446049250313e-16,
+ -1.05755e-19,
+ -3.2995e-21,
+ -1.67354e-23,
+ -1.11885e-23,
+ -1.78506e-30,
+ -1.43718e-34,
+ -1.27126e-38,
+ -0.0,
+ 3e-88,
+ -2e66,
+ 0.0,
+ 2e66,
+ 1.17549e-38,
+ 1.56657e-37,
+ 4.08512e-29,
+ 6.25073e-22,
+ 4.1723e-13,
+ 1.44343e-09,
+ 1.5707963267948966,
+ 5.27004e-08,
+ 9.48298e-08,
+ 5.57888e-07,
+ 4.89988e-05,
+ 0.244326,
+ 1.0,
+ 12.4895,
+ 19.0,
+ 47.0,
+ 106.0,
+ 538.324,
+ 564.536,
+ 819.124,
+ 7048.0,
+ 12611.0,
+ 19878.0,
+ 20309.0,
+ 797056.0,
+ 1.77219e+09,
+ 2147483648.0, // INT32_MAX + 1
+ 4294967296.0, // UINT32_MAX + 1
+ 1.51116e+11,
+ 4.18193e+13,
+ 3.59167e+16,
+ 9223372036854775808.0, // INT64_MAX + 1
+ 18446744073709551616.0, // UINT64_MAX + 1
+ 3.38211e+19,
+ 2.67488e+20,
+ 1.78831e+21,
+ 9.20914e+21,
+ 8.35654e+23,
+ 1.4495e+24,
+ 5.94015e+25,
+ 4.43608e+30,
+ 2.44502e+33,
+ 1.38178e+37,
+ 1.71306e+37,
+ 3.31899e+38,
+ 3.40282e+38,
+];
+
+let stdlib = this;
+
+// Module template for generating f64 unop functions.
+function ModuleTemplate_f64_unop(stdlib) {
+ 'use asm';
+
+ var Stdlib = stdlib.Math.NAME;
+
+ function NAME(a) {
+ a = +a;
+ return +Stdlib(a);
+ }
+
+ return {NAME: NAME};
+}
+
+// Module template for generating f64 binop functions.
+function ModuleTemplate_f64_binop(stdlib) {
+ 'use asm';
+
+ var Stdlib = stdlib.Math.NAME;
+
+ function NAME(a, b) {
+ a = +a;
+ b = +b;
+ return +Stdlib(a, b);
+ }
+
+ return {NAME: NAME};
+}
+
+// Module template for generating f64 unop functions.
+function ModuleTemplate_f32_unop(stdlib) {
+ 'use asm';
+
+ var Stdlib = stdlib.Math.NAME;
+ var fround = stdlib.Math.fround;
+
+ function NAME(a) {
+ a = fround(a);
+ return fround(Stdlib(a));
+ }
+
+ return {NAME: NAME};
+}
+
+// Module template for generating f64 binop functions.
+function ModuleTemplate_f32_binop(stdlib) {
+ 'use asm';
+
+ var Stdlib = stdlib.Math.NAME;
+ var fround = stdlib.Math.fround;
+
+ function NAME(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(Stdlib(a, b));
+ }
+
+ return {NAME: NAME};
+}
+
+function instantiateTemplate(func, name) {
+ let src = func.toString();
+ src = src.replace(/NAME/g, name);
+ let module = eval('(' + src + ')');
+ let instance = module(stdlib);
+ assertTrue(%IsAsmWasmCode(module));
+
+ let asm_func = instance[name];
+ if (typeof asm_func != 'function') throw 'asm[' + full_name + '] not found';
+ return asm_func;
+}
+
+function genUnop(name, f32) {
+ return instantiateTemplate(
+ f32 ? ModuleTemplate_f32_unop : ModuleTemplate_f64_unop, name);
+}
+
+function genBinop(name, f32) {
+ return instantiateTemplate(
+ f32 ? ModuleTemplate_f32_binop : ModuleTemplate_f64_binop, name);
+}
+
+function assertUnop(name, math_func, asm_func) {
+ for (val of inputs) {
+ verbose(' ', val);
+ let m = math_func(val);
+ let w = asm_func(val);
+ if (!deepEquals(m, w)) reportFailure(name, [val], m, w);
+ }
+}
+
+function assertBinop(name, math_func, asm_func) {
+ let inputs2 = [1, 0.5, -1, -0.5, 0, -0, 1 / 0, -1 / 0, 0 / 0];
+ for (val of inputs) {
+ verbose(' ', val);
+ for (val2 of inputs2) {
+ verbose(' ', val2);
+ let m = math_func(val, val2);
+ let w = asm_func(val, val2);
+ if (!deepEquals(m, w)) reportFailure(name, [val, val2], m, w);
+ m = math_func(val2, val);
+ w = asm_func(val2, val);
+ if (!deepEquals(m, w)) reportFailure(name, [val2, val], m, w);
+ }
+ }
+}
+
+(function TestF64() {
+ let f64_intrinsics = [
+ 'acos', 'asin', 'atan', 'cos', 'sin', 'tan', 'exp', 'log',
+ 'atan2', 'pow', 'ceil', 'floor', 'sqrt', 'min', 'max', 'abs',
+ 'min', 'max', 'abs', 'ceil', 'floor', 'sqrt',
+ ];
+
+ for (name of f64_intrinsics) {
+ if (name == 'pow') continue; // TODO(8505): asm.js correctness
+ let math_func = Math[name];
+ let f32 = false;
+ print('Testing (f64) Math.' + name);
+ switch (math_func.length) {
+ case 1: {
+ let asm_func = genUnop(name, false);
+ assertUnop('(f64)' + name, math_func, asm_func);
+ break;
+ }
+ case 2: {
+ let asm_func = genBinop(name, false);
+ assertBinop('(f64)' + name, math_func, asm_func);
+ break;
+ }
+ default:
+ throw 'Unexpected param count: ' + func.length;
+ }
+ }
+})();
+
+(function TestF32() {
+ let f32_intrinsics = ['min', 'max', 'abs', 'ceil', 'floor', 'sqrt'];
+
+ for (name of f32_intrinsics) {
+ let r = Math.fround, f = Math[name];
+ print('Testing (f32) Math.' + name);
+ switch (f.length) {
+ case 1: {
+ let asm_func = genUnop(name, true);
+ let math_func = (val) => r(f(r(val)));
+ assertUnop('(f32)' + name, math_func, asm_func);
+ break;
+ }
+ case 2: {
+ let asm_func = genBinop(name, true);
+ let math_func = (v1, v2) => r(f(r(v1), r(v2)));
+ assertBinop('(f32)' + name, math_func, asm_func);
+ break;
+ }
+ default:
+ throw 'Unexpected param count: ' + func.length;
+ }
+ }
+})();
+
+assertEquals(0, numFailures);
diff --git a/deps/v8/test/mjsunit/wasm/async-compile.js b/deps/v8/test/mjsunit/wasm/async-compile.js
index 39a339aae6..122eccbe96 100644
--- a/deps/v8/test/mjsunit/wasm/async-compile.js
+++ b/deps/v8/test/mjsunit/wasm/async-compile.js
@@ -2,22 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-async-compilation --expose-wasm --allow-natives-syntax
+// Flags: --expose-wasm --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function assertCompiles(buffer) {
- return assertPromiseResult(
- WebAssembly.compile(buffer),
- module => assertTrue(module instanceof WebAssembly.Module),
- ex => assertUnreachable());
+async function assertCompiles(buffer) {
+ var module = await WebAssembly.compile(buffer);
+ assertInstanceof(module, WebAssembly.Module);
}
-function assertCompileError(buffer) {
- return assertPromiseResult(
- WebAssembly.compile(buffer), module => assertUnreachable(),
- ex => assertTrue(ex instanceof WebAssembly.CompileError));
+async function assertCompileError(buffer) {
+ try {
+ await WebAssembly.compile(buffer);
+ assertUnreachable();
+ } catch (e) {
+ if (!(e instanceof WebAssembly.CompileError)) throw e;
+ }
}
assertPromiseResult(async function basicCompile() {
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
new file mode 100644
index 0000000000..b832fad8fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -0,0 +1,554 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+// This test might time out if the search space for a sequential
+// interleaving becomes to large. However, it should never fail.
+// Note that results of this test are flaky by design. While the test is
+// deterministic with a fixed seed, bugs may introduce non-determinism.
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const kDebug = false;
+
+const kSequenceLength = 256;
+const kNumberOfWorker = 4;
+const kNumberOfSteps = 10000000;
+
+const kFirstOpcodeWithInput = 3;
+const kFirstOpcodeWithoutOutput = 3;
+const kLastOpcodeWithoutOutput = 5;
+
+const opCodes = [
+ kExprI32AtomicLoad,
+ kExprI32AtomicLoad8U,
+ kExprI32AtomicLoad16U,
+ kExprI32AtomicStore,
+ kExprI32AtomicStore8U,
+ kExprI32AtomicStore16U,
+ kExprI32AtomicAdd,
+ kExprI32AtomicAdd8U,
+ kExprI32AtomicAdd16U,
+ kExprI32AtomicSub,
+ kExprI32AtomicSub8U,
+ kExprI32AtomicSub16U,
+ kExprI32AtomicAnd,
+ kExprI32AtomicAnd8U,
+ kExprI32AtomicAnd16U,
+ kExprI32AtomicOr,
+ kExprI32AtomicOr8U,
+ kExprI32AtomicOr16U,
+ kExprI32AtomicXor,
+ kExprI32AtomicXor8U,
+ kExprI32AtomicXor16U,
+ kExprI32AtomicExchange,
+ kExprI32AtomicExchange8U,
+ kExprI32AtomicExchange16U
+];
+
+const opCodeNames = [
+ "kExprI32AtomicLoad",
+ "kExprI32AtomicLoad8U",
+ "kExprI32AtomicLoad16U",
+ "kExprI32AtomicStore",
+ "kExprI32AtomicStore8U",
+ "kExprI32AtomicStore16U",
+ "kExprI32AtomicAdd",
+ "kExprI32AtomicAdd8U",
+ "kExprI32AtomicAdd16U",
+ "kExprI32AtomicSub",
+ "kExprI32AtomicSub8U",
+ "kExprI32AtomicSub16U",
+ "kExprI32AtomicAnd",
+ "kExprI32AtomicAnd8U",
+ "kExprI32AtomicAnd16U",
+ "kExprI32AtomicOr",
+ "kExprI32AtomicOr8U",
+ "kExprI32AtomicOr16U",
+ "kExprI32AtomicXor",
+ "kExprI32AtomicXor8U",
+ "kExprI32AtomicXor16U",
+ "kExprI32AtomicExchange",
+ "kExprI32AtomicExchange8U",
+ "kExprI32AtomicExchange16U"
+];
+
+class Operation {
+ constructor(opcode, input, offset) {
+ this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
+ this.size = Operation.opcodeToSize(this.opcode);
+ this.input = input != undefined ? input : Operation.inputForSize(
+ this.size);
+ this.offset = offset != undefined ? offset : Operation.offsetForSize(
+ this.size);
+ }
+
+ static nextOpcode() {
+ let random = Math.random();
+ return Math.floor(random * opCodes.length);
+ }
+
+ static opcodeToSize(opcode) {
+ // Instructions are ordered in 32, 8, 16 bits size
+ return [32, 8, 16][opcode % 3];
+ }
+
+ static opcodeToAlignment(opcode) {
+ // Instructions are ordered in 32, 8, 16 bits size
+ return [2, 0, 1][opcode % 3];
+ }
+
+ static inputForSize(size) {
+ let random = Math.random();
+ // Avoid 32 bit overflow for integer here :(
+ return Math.floor(random * (1 << (size - 1)) * 2);
+ }
+
+ static offsetForSize(size) {
+ // Pick an offset in bytes between 0 and 7.
+ let offset = Math.floor(Math.random() * 8);
+ // Make sure the offset matches the required alignment by masking out the lower bits.
+ let size_in_bytes = size / 8;
+ let mask = ~(size_in_bytes - 1);
+ return offset & mask;
+ }
+
+ get wasmOpcode() {
+ // [opcode, alignment, offset]
+ return [opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode), this.offset];
+ }
+
+ get hasInput() {
+ return this.opcode >= kFirstOpcodeWithInput;
+ }
+
+ get hasOutput() {
+ return this.opcode < kFirstOpcodeWithoutOutput || this.opcode >
+ kLastOpcodeWithoutOutput;
+ }
+
+ truncateResultBits(low, high) {
+ // Shift the lower part. For offsets greater four it drops out of the visible window.
+ let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
+ // The higher part is zero for offset 0, left shifted for [1..3] and right shifted
+ // for [4..7].
+ let shiftedH = this.offset == 0 ? 0 :
+ this.offset >= 4 ? high >>> (this.offset - 4) * 8 : high << ((4 -
+ this.offset) * 8);
+ let value = shiftedL | shiftedH;
+
+ switch (this.size) {
+ case 8:
+ return value & 0xFF;
+ case 16:
+ return value & 0xFFFF;
+ case 32:
+ return value;
+ default:
+ throw "Unexpected size: " + this.size;
+ }
+ }
+
+ static get builder() {
+ if (!Operation.__builder) {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, 1, false);
+ builder.exportMemoryAs("mem");
+ Operation.__builder = builder;
+ }
+ return Operation.__builder;
+ }
+
+ static get exports() {
+ if (!Operation.__instance) {
+ return {};
+ }
+ return Operation.__instance.exports;
+ }
+
+ static get memory() {
+ return Operation.exports.mem;
+ }
+
+ static set instance(instance) {
+ Operation.__instance = instance;
+ }
+
+ compute(state) {
+ let evalFun = Operation.exports[this.key];
+ if (!evalFun) {
+ let builder = Operation.builder;
+ let body = [
+ // Load address of low 32 bits.
+ kExprI32Const, 0,
+ // Load expected value.
+ kExprGetLocal, 0,
+ kExprI32StoreMem, 2, 0,
+ // Load address of high 32 bits.
+ kExprI32Const, 4,
+ // Load expected value.
+ kExprGetLocal, 1,
+ kExprI32StoreMem, 2, 0,
+ // Load address of where our window starts.
+ kExprI32Const, 0,
+ // Load input if there is one.
+ ...(this.hasInput ? [kExprGetLocal, 2] : []),
+ // Perform operation.
+ kAtomicPrefix, ...this.wasmOpcode,
+ // Drop output if it had any.
+ ...(this.hasOutput ? [kExprDrop] : []),
+ // Load resulting value.
+ kExprI32Const, 0,
+ kExprI32LoadMem, 2, 0,
+ // Return.
+ kExprReturn
+ ]
+ builder.addFunction(this.key, kSig_i_iii)
+ .addBody(body)
+ .exportAs(this.key);
+ // Instantiate module, get function exports.
+ let module = new WebAssembly.Module(builder.toBuffer());
+ Operation.instance = new WebAssembly.Instance(module);
+ evalFun = Operation.exports[this.key];
+ }
+ let result = evalFun(state.low, state.high, this.input);
+ let ta = new Int32Array(Operation.memory.buffer);
+ if (kDebug) {
+ print(state.high + ":" + state.low + " " + this.toString() +
+ " -> " + ta[1] + ":" + ta[0]);
+ }
+ if (result != ta[0]) throw "!";
+ return {
+ low: ta[0],
+ high: ta[1]
+ };
+ }
+
+ toString() {
+ return opCodeNames[this.opcode] + "[+" + this.offset + "] " + this.input;
+ }
+
+ get key() {
+ return this.opcode + "-" + this.offset;
+ }
+}
+
+class State {
+ constructor(low, high, indices, count) {
+ this.low = low;
+ this.high = high;
+ this.indices = indices;
+ this.count = count;
+ }
+
+ isFinal() {
+ return (this.count == kNumberOfWorker * kSequenceLength);
+ }
+
+ toString() {
+ return this.high + ":" + this.low + " @ " + this.indices;
+ }
+}
+
+function makeSequenceOfOperations(size) {
+ let result = new Array(size);
+ for (let i = 0; i < size; i++) {
+ result[i] = new Operation();
+ }
+ return result;
+}
+
+function toSLeb128(val) {
+ let result = [];
+ while (true) {
+ let v = val & 0x7f;
+ val = val >> 7;
+ let msbIsSet = (v & 0x40) || false;
+ if (((val == 0) && !msbIsSet) || ((val == -1) && msbIsSet)) {
+ result.push(v);
+ break;
+ }
+ result.push(v | 0x80);
+ }
+ return result;
+}
+
+function generateFunctionBodyForSequence(sequence) {
+ // We expect the int32* to perform ops on as arg 0 and
+ // the int32* for our value log as arg1. Argument 2 gives
+ // an int32* we use to count down spinning workers.
+ let body = [];
+ // Initially, we spin until all workers start running.
+ if (!kDebug) {
+ body.push(
+ // Decrement the wait count.
+ kExprGetLocal, 2,
+ kExprI32Const, 1,
+ kAtomicPrefix, kExprI32AtomicSub, 2, 0,
+ // Spin until zero.
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 2,
+ kAtomicPrefix, kExprI32AtomicLoad, 2, 0,
+ kExprI32Const, 0,
+ kExprI32GtU,
+ kExprBrIf, 0,
+ kExprEnd
+ );
+ }
+ for (let operation of sequence) {
+ body.push(
+ // Pre-load address of results sequence pointer for later.
+ kExprGetLocal, 1,
+ // Load address where atomic pointers are stored.
+ kExprGetLocal, 0,
+ // Load the second argument if it had any.
+ ...(operation.hasInput ? [kExprI32Const, ...toSLeb128(operation
+ .input)] : []),
+ // Perform operation
+ kAtomicPrefix, ...operation.wasmOpcode,
+ // Generate fake output in needed.
+ ...(operation.hasOutput ? [] : [kExprI32Const, 0]),
+ // Store read intermediate to sequence.
+ kExprI32StoreMem, 2, 0,
+ // Increment result sequence pointer.
+ kExprGetLocal, 1,
+ kExprI32Const, 4,
+ kExprI32Add,
+ kExprSetLocal, 1
+ );
+ }
+ // Return end of sequence index.
+ body.push(
+ kExprGetLocal, 1,
+ kExprReturn);
+ return body;
+}
+
+function getSequence(start, end) {
+ return new Int32Array(memory.buffer, start, (end - start) / Int32Array.BYTES_PER_ELEMENT);
+}
+
+function spawnWorkers() {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
+ if (msg.module) {
+ let module = msg.module;
+ let mem = msg.mem;
+ this.instance = new WebAssembly.Instance(module, {m: {imported_mem: mem}});
+ postMessage({instantiated: true});
+ } else {
+ let address = msg.address;
+ let sequence = msg.sequence;
+ let index = msg.index;
+ let spin = msg.spin;
+ let result = instance.exports["worker" + index](address, sequence, spin);
+ postMessage({index: index, sequence: sequence, result: result});
+ }
+ }`, {type: 'string'}
+ );
+ workers.push(worker);
+ }
+ return workers;
+}
+
+function instantiateModuleInWorkers(workers) {
+ for (let worker of workers) {
+ worker.postMessage({
+ module: module,
+ mem: memory
+ });
+ let msg = worker.getMessage();
+ if (!msg.instantiated) throw "Worker failed to instantiate";
+ }
+}
+
+function executeSequenceInWorkers(workers) {
+ for (i = 0; i < workers.length; i++) {
+ let worker = workers[i];
+ worker.postMessage({
+ index: i,
+ address: 0,
+ spin: 16,
+ sequence: 32 + ((kSequenceLength * 4) + 32) * i
+ });
+ // In debug mode, keep execution sequential.
+ if (kDebug) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+ }
+}
+
+function selectMatchingWorkers(state) {
+ let matching = [];
+ let indices = state.indices;
+ for (let i = 0; i < indices.length; i++) {
+ let index = indices[i];
+ if (index >= kSequenceLength) continue;
+ // We need to project the expected value to the number of bits this
+ // operation will read at runtime.
+ let expected = sequences[i][index].truncateResultBits(state.low, state.high);
+ let hasOutput = sequences[i][index].hasOutput;
+ if (!hasOutput || (results[i][index] == expected)) {
+ matching.push(i);
+ }
+ }
+ return matching;
+}
+
+function computeNextState(state, advanceIdx) {
+ let newIndices = state.indices.slice();
+ let sequence = sequences[advanceIdx];
+ let operation = sequence[state.indices[advanceIdx]];
+ newIndices[advanceIdx]++;
+ let {
+ low,
+ high
+ } = operation.compute(state);
+ return new State(low, high, newIndices, state.count + 1);
+}
+
+function findSequentialOrdering() {
+ let startIndices = new Array(results.length);
+ let steps = 0;
+ startIndices.fill(0);
+ let matchingStates = [new State(0, 0, startIndices, 0)];
+ while (matchingStates.length > 0) {
+ let current = matchingStates.pop();
+ if (kDebug) {
+ print(current);
+ }
+ let matchingResults = selectMatchingWorkers(current);
+ if (matchingResults.length == 0) {
+ continue;
+ }
+ for (let match of matchingResults) {
+ let newState = computeNextState(current, match);
+ if (newState.isFinal()) {
+ return true;
+ }
+ matchingStates.push(newState);
+ }
+ if (steps++ > kNumberOfSteps) {
+ print("Search timed out, aborting...");
+ return true;
+ }
+ }
+ // We have no options left.
+ return false;
+}
+
+// Helpful for debugging failed tests.
+function loadSequencesFromStrings(inputs) {
+ let reverseOpcodes = {};
+ for (let i = 0; i < opCodeNames.length; i++) {
+ reverseOpcodes[opCodeNames[i]] = i;
+ }
+ let sequences = [];
+ let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
+ for (let input of inputs) {
+ let parts = input.split(",");
+ let sequence = [];
+ for (let part of parts) {
+ let parsed = parseRE.exec(part);
+ sequence.push(new Operation(reverseOpcodes[parsed[1]], parsed[3],
+ parsed[2] | 0));
+ }
+ sequences.push(sequence);
+ }
+ return sequences;
+}
+
+// Helpful for debugging failed tests.
+function loadResultsFromStrings(inputs) {
+ let results = [];
+ for (let input of inputs) {
+ let parts = input.split(",");
+ let result = [];
+ for (let number of parts) {
+ result.push(number | 0);
+ }
+ results.push(result);
+ }
+ return results;
+}
+
+let maxSize = 10;
+let memory = new WebAssembly.Memory({
+ initial: 1,
+ maximum: maxSize,
+ shared: true
+});
+let memory_view = new Int32Array(memory.buffer);
+
+let sequences = [];
+let results = [];
+
+let builder = new WasmModuleBuilder();
+builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+
+for (let i = 0; i < kNumberOfWorker; i++) {
+ sequences[i] = makeSequenceOfOperations(kSequenceLength);
+ builder.addFunction("worker" + i, kSig_i_iii)
+ .addBody(generateFunctionBodyForSequence(sequences[i]))
+ .exportAs("worker" + i);
+}
+
+// Instantiate module, get function exports.
+let module = new WebAssembly.Module(builder.toBuffer());
+let instance = new WebAssembly.Instance(module, {
+ m: {
+ imported_mem: memory
+ }
+});
+
+// Spawn off the workers and run the sequences.
+let workers = spawnWorkers();
+// Set spin count.
+memory_view[4] = kNumberOfWorker;
+instantiateModuleInWorkers(workers);
+executeSequenceInWorkers(workers);
+
+if (!kDebug) {
+ // Collect results, d8 style.
+ for (let worker of workers) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+}
+
+// Terminate all workers.
+for (let worker of workers) {
+ worker.terminate();
+}
+
+// In debug mode, print sequences and results.
+if (kDebug) {
+ for (let result of results) {
+ print(result);
+ }
+
+ for (let sequence of sequences) {
+ print(sequence);
+ }
+}
+
+// Try to reconstruct a sequential ordering.
+let passed = findSequentialOrdering();
+
+if (passed) {
+ print("PASS");
+} else {
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ print("Worker " + i);
+ print(sequences[i]);
+ print(results[i]);
+ }
+ print("FAIL");
+ quit(-1);
+}
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
new file mode 100644
index 0000000000..e0ce2324ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -0,0 +1,596 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+// This test might time out if the search space for a sequential
+// interleaving becomes to large. However, it should never fail.
+// Note that results of this test are flaky by design. While the test is
+// deterministic with a fixed seed, bugs may introduce non-determinism.
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const kDebug = false;
+
+const kSequenceLength = 256;
+const kNumberOfWorker = 4;
+const kNumberOfSteps = 10000000;
+
+const kFirstOpcodeWithInput = 4;
+const kFirstOpcodeWithoutOutput = 4;
+const kLastOpcodeWithoutOutput = 7;
+
+const opCodes = [
+ kExprI64AtomicLoad,
+ kExprI64AtomicLoad8U,
+ kExprI64AtomicLoad16U,
+ kExprI64AtomicLoad32U,
+ kExprI64AtomicStore,
+ kExprI64AtomicStore8U,
+ kExprI64AtomicStore16U,
+ kExprI64AtomicStore32U,
+ kExprI64AtomicAdd,
+ kExprI64AtomicAdd8U,
+ kExprI64AtomicAdd16U,
+ kExprI64AtomicAdd32U,
+ kExprI64AtomicSub,
+ kExprI64AtomicSub8U,
+ kExprI64AtomicSub16U,
+ kExprI64AtomicSub32U,
+ kExprI64AtomicAnd,
+ kExprI64AtomicAnd8U,
+ kExprI64AtomicAnd16U,
+ kExprI64AtomicAnd32U,
+ kExprI64AtomicOr,
+ kExprI64AtomicOr8U,
+ kExprI64AtomicOr16U,
+ kExprI64AtomicOr32U,
+ kExprI64AtomicXor,
+ kExprI64AtomicXor8U,
+ kExprI64AtomicXor16U,
+ kExprI64AtomicXor32U,
+ kExprI64AtomicExchange,
+ kExprI64AtomicExchange8U,
+ kExprI64AtomicExchange16U,
+ kExprI64AtomicExchange32U
+];
+
+const opCodeNames = [
+ "kExprI64AtomicLoad",
+ "kExprI64AtomicLoad8U",
+ "kExprI64AtomicLoad16U",
+ "kExprI64AtomicLoad32U",
+ "kExprI64AtomicStore",
+ "kExprI64AtomicStore8U",
+ "kExprI64AtomicStore16U",
+ "kExprI64AtomicStore32U",
+ "kExprI64AtomicAdd",
+ "kExprI64AtomicAdd8U",
+ "kExprI64AtomicAdd16U",
+ "kExprI64AtomicAdd32U",
+ "kExprI64AtomicSub",
+ "kExprI64AtomicSub8U",
+ "kExprI64AtomicSub16U",
+ "kExprI64AtomicSub32U",
+ "kExprI64AtomicAnd",
+ "kExprI64AtomicAnd8U",
+ "kExprI64AtomicAnd16U",
+ "kExprI64AtomicAnd32U",
+ "kExprI64AtomicOr",
+ "kExprI64AtomicOr8U",
+ "kExprI64AtomicOr16U",
+ "kExprI64AtomicOr32U",
+ "kExprI64AtomicXor",
+ "kExprI64AtomicXor8U",
+ "kExprI64AtomicXor16U",
+ "kExprI64AtomicXor32U",
+ "kExprI64AtomicExchange",
+ "kExprI64AtomicExchange8U",
+ "kExprI64AtomicExchange16U",
+ "kExprI64AtomicExchange32U"
+];
+
+const kMaxInt32 = (1 << 31) * 2;
+
+class Operation {
+ constructor(opcode, low_input, high_input, offset) {
+ this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
+ this.size = Operation.opcodeToSize(this.opcode);
+ if (low_input == undefined) {
+ [low_input, high_input] = Operation.inputForSize(this.size);
+ }
+ this.low_input = low_input;
+ this.high_input = high_input;
+ this.offset = offset != undefined ? offset : Operation.offsetForSize(
+ this.size);
+ }
+
+ static nextOpcode() {
+ let random = Math.random();
+ return Math.floor(random * opCodes.length);
+ }
+
+ static opcodeToSize(opcode) {
+ // Instructions are ordered in 64, 8, 16, 32 bits size
+ return [64, 8, 16, 32][opcode % 4];
+ }
+
+ static opcodeToAlignment(opcode) {
+ // Instructions are ordered in 64, 8, 16, 32 bits size
+ return [3, 0, 1, 2][opcode % 4];
+ }
+
+ static inputForSize(size) {
+ if (size <= 32) {
+ let random = Math.random();
+ // Avoid 32 bit overflow for integer here :(
+ return [Math.floor(random * (1 << (size - 1)) * 2), 0];
+ }
+ return [Math.floor(Math.random() * kMaxInt32), Math.floor(Math.random() *
+ kMaxInt32)];
+ }
+
+ static offsetForSize(size) {
+ // Pick an offset in bytes between 0 and 8.
+ let offset = Math.floor(Math.random() * 8);
+ // Make sure the offset matches the required alignment by masking out the lower bits.
+ let size_in_bytes = size / 8;
+ let mask = ~(size_in_bytes - 1);
+ return offset & mask;
+ }
+
+ get wasmOpcode() {
+ // [opcode, alignment, offset]
+ return [opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode), this.offset];
+ }
+
+ get hasInput() {
+ return this.opcode >= kFirstOpcodeWithInput;
+ }
+
+ get hasOutput() {
+ return this.opcode < kFirstOpcodeWithoutOutput || this.opcode >
+ kLastOpcodeWithoutOutput;
+ }
+
+ truncateResultBits(low, high) {
+ if (this.size == 64) return [low, high]
+
+ // Shift the lower part. For offsets greater four it drops out of the visible window.
+ let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
+ // The higher part is zero for offset 0, left shifted for [1..3] and right shifted
+ // for [4..7].
+ let shiftedH = this.offset == 0 ? 0 :
+ this.offset >= 4 ? high >>> (this.offset - 4) * 8 : high << ((4 -
+ this.offset) * 8);
+ let value = shiftedL | shiftedH;
+
+ switch (this.size) {
+ case 8:
+ return [value & 0xFF, 0];
+ case 16:
+ return [value & 0xFFFF, 0];
+ case 32:
+ return [value, 0];
+ default:
+ throw "Unexpected size: " + this.size;
+ }
+ }
+
+ static get builder() {
+ if (!Operation.__builder) {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, 1, false);
+ builder.exportMemoryAs("mem");
+ Operation.__builder = builder;
+ }
+ return Operation.__builder;
+ }
+
+ static get exports() {
+ if (!Operation.__instance) {
+ return {};
+ }
+ return Operation.__instance.exports;
+ }
+
+ static get memory() {
+ return Operation.exports.mem;
+ }
+
+ static set instance(instance) {
+ Operation.__instance = instance;
+ }
+
+ compute(state) {
+ let evalFun = Operation.exports[this.key];
+ if (!evalFun) {
+ let builder = Operation.builder;
+ let body = [
+ // Load address of low 32 bits.
+ kExprI32Const, 0,
+ // Load expected value.
+ kExprGetLocal, 0,
+ kExprI32StoreMem, 2, 0,
+ // Load address of high 32 bits.
+ kExprI32Const, 4,
+ // Load expected value.
+ kExprGetLocal, 1,
+ kExprI32StoreMem, 2, 0,
+ // Load address of where our window starts.
+ kExprI32Const, 0,
+ // Load input if there is one.
+ ...(this.hasInput ? [kExprGetLocal, 3,
+ kExprI64UConvertI32,
+ kExprI64Const, 32,
+ kExprI64Shl,
+ kExprGetLocal, 2,
+ kExprI64UConvertI32,
+ kExprI64Ior
+ ] : []),
+ // Perform operation.
+ kAtomicPrefix, ...this.wasmOpcode,
+ // Drop output if it had any.
+ ...(this.hasOutput ? [kExprDrop] : []),
+ // Return.
+ kExprReturn
+ ]
+ builder.addFunction(this.key, kSig_v_iiii)
+ .addBody(body)
+ .exportAs(this.key);
+ // Instantiate module, get function exports.
+ let module = new WebAssembly.Module(builder.toBuffer());
+ Operation.instance = new WebAssembly.Instance(module);
+ evalFun = Operation.exports[this.key];
+ }
+ evalFun(state.low, state.high, this.low_input, this.high_input);
+ let ta = new Int32Array(Operation.memory.buffer);
+ if (kDebug) {
+ print(state.high + ":" + state.low + " " + this.toString() +
+ " -> " + ta[1] + ":" + ta[0]);
+ }
+ return {
+ low: ta[0],
+ high: ta[1]
+ };
+ }
+
+ toString() {
+ return opCodeNames[this.opcode] + "[+" + this.offset + "] " + this.high_input +
+ ":" + this.low_input;
+ }
+
+ get key() {
+ return this.opcode + "-" + this.offset;
+ }
+}
+
+class State {
+ constructor(low, high, indices, count) {
+ this.low = low;
+ this.high = high;
+ this.indices = indices;
+ this.count = count;
+ }
+
+ isFinal() {
+ return (this.count == kNumberOfWorker * kSequenceLength);
+ }
+
+ toString() {
+ return this.high + ":" + this.low + " @ " + this.indices;
+ }
+}
+
+function makeSequenceOfOperations(size) {
+ let result = new Array(size);
+ for (let i = 0; i < size; i++) {
+ result[i] = new Operation();
+ }
+ return result;
+}
+
+function toSLeb128(low, high) {
+ let result = [];
+ while (true) {
+ let v = low & 0x7f;
+ // For low, fill up with zeros, high will add extra bits.
+ low = low >>> 7;
+ if (high != 0) {
+ let shiftIn = high << (32 - 7);
+ low = low | shiftIn;
+ // For high, fill up with ones, so that we keep trailing one.
+ high = high >> 7;
+ }
+ let msbIsSet = (v & 0x40) || false;
+ if (((low == 0) && (high == 0) && !msbIsSet) || ((low == -1) && (high ==
+ -1) && msbIsSet)) {
+ result.push(v);
+ break;
+ }
+ result.push(v | 0x80);
+ }
+ return result;
+}
+
+function generateFunctionBodyForSequence(sequence) {
+ // We expect the int64* to perform ops on as arg 0 and
+ // the int64* for our value log as arg1. Argument 2 gives
+ // an int32* we use to count down spinning workers.
+ let body = [];
+ // Initially, we spin until all workers start running.
+ if (!kDebug) {
+ body.push(
+ // Decrement the wait count.
+ kExprGetLocal, 2,
+ kExprI32Const, 1,
+ kAtomicPrefix, kExprI32AtomicSub, 2, 0,
+ // Spin until zero.
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 2,
+ kAtomicPrefix, kExprI32AtomicLoad, 2, 0,
+ kExprI32Const, 0,
+ kExprI32GtU,
+ kExprBrIf, 0,
+ kExprEnd
+ );
+ }
+ for (let operation of sequence) {
+ body.push(
+ // Pre-load address of results sequence pointer for later.
+ kExprGetLocal, 1,
+ // Load address where atomic pointers are stored.
+ kExprGetLocal, 0,
+ // Load the second argument if it had any.
+ ...(operation.hasInput ? [kExprI64Const, ...toSLeb128(operation
+ .low_input, operation.high_input)] : []),
+ // Perform operation
+ kAtomicPrefix, ...operation.wasmOpcode,
+ // Generate fake output in needed.
+ ...(operation.hasOutput ? [] : [kExprI64Const, 0]),
+ // Store read intermediate to sequence.
+ kExprI64StoreMem, 3, 0,
+ // Increment result sequence pointer.
+ kExprGetLocal, 1,
+ kExprI32Const, 8,
+ kExprI32Add,
+ kExprSetLocal, 1
+ );
+ }
+ // Return end of sequence index.
+ body.push(
+ kExprGetLocal, 1,
+ kExprReturn);
+ return body;
+}
+
+function getSequence(start, end) {
+ return new Int32Array(memory.buffer, start, (end - start) / Int32Array.BYTES_PER_ELEMENT);
+}
+
+function spawnWorkers() {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
+ if (msg.module) {
+ let module = msg.module;
+ let mem = msg.mem;
+ this.instance = new WebAssembly.Instance(module, {m: {imported_mem: mem}});
+ postMessage({instantiated: true});
+ } else {
+ let address = msg.address;
+ let sequence = msg.sequence;
+ let index = msg.index;
+ let spin = msg.spin;
+ let result = instance.exports["worker" + index](address, sequence, spin);
+ postMessage({index: index, sequence: sequence, result: result});
+ }
+ }`, {type: 'string'}
+ );
+ workers.push(worker);
+ }
+ return workers;
+}
+
+function instantiateModuleInWorkers(workers) {
+ for (let worker of workers) {
+ worker.postMessage({
+ module: module,
+ mem: memory
+ });
+ let msg = worker.getMessage();
+ if (!msg.instantiated) throw "Worker failed to instantiate";
+ }
+}
+
+function executeSequenceInWorkers(workers) {
+ for (i = 0; i < workers.length; i++) {
+ let worker = workers[i];
+ worker.postMessage({
+ index: i,
+ address: 0,
+ spin: 16,
+ sequence: 32 + ((kSequenceLength * 8) + 32) * i
+ });
+ // In debug mode, keep execution sequential.
+ if (kDebug) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+ }
+}
+
+function selectMatchingWorkers(state) {
+ let matching = [];
+ let indices = state.indices;
+ for (let i = 0; i < indices.length; i++) {
+ let index = indices[i];
+ if (index >= kSequenceLength) continue;
+ // We need to project the expected value to the number of bits this
+ // operation will read at runtime.
+ let [expected_low, expected_high] = sequences[i][index].truncateResultBits(
+ state.low, state.high);
+ let hasOutput = sequences[i][index].hasOutput;
+ if (!hasOutput || ((results[i][index * 2] == expected_low) && (results[
+ i][index * 2 + 1] == expected_high))) {
+ matching.push(i);
+ }
+ }
+ return matching;
+}
+
+function computeNextState(state, advanceIdx) {
+ let newIndices = state.indices.slice();
+ let sequence = sequences[advanceIdx];
+ let operation = sequence[state.indices[advanceIdx]];
+ newIndices[advanceIdx]++;
+ let {
+ low,
+ high
+ } = operation.compute(state);
+
+ return new State(low, high, newIndices, state.count + 1);
+}
+
+function findSequentialOrdering() {
+ let startIndices = new Array(results.length);
+ let steps = 0;
+ startIndices.fill(0);
+ let matchingStates = [new State(0, 0, startIndices, 0)];
+ while (matchingStates.length > 0) {
+ let current = matchingStates.pop();
+ if (kDebug) {
+ print(current);
+ }
+ let matchingResults = selectMatchingWorkers(current);
+ if (matchingResults.length == 0) {
+ continue;
+ }
+ for (let match of matchingResults) {
+ let newState = computeNextState(current, match);
+ if (newState.isFinal()) {
+ return true;
+ }
+ matchingStates.push(newState);
+ }
+ if (steps++ > kNumberOfSteps) {
+ print("Search timed out, aborting...");
+ return true;
+ }
+ }
+ // We have no options left.
+ return false;
+}
+
+// Helpful for debugging failed tests.
+function loadSequencesFromStrings(inputs) {
+ let reverseOpcodes = {};
+ for (let i = 0; i < opCodeNames.length; i++) {
+ reverseOpcodes[opCodeNames[i]] = i;
+ }
+ let sequences = [];
+ let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
+ for (let input of inputs) {
+ let parts = input.split(",");
+ let sequence = [];
+ for (let part of parts) {
+ let parsed = parseRE.exec(part);
+ sequence.push(new Operation(reverseOpcodes[parsed[1]], parsed[3],
+ parsed[2] | 0));
+ }
+ sequences.push(sequence);
+ }
+ return sequences;
+}
+
+// Helpful for debugging failed tests.
+function loadResultsFromStrings(inputs) {
+ let results = [];
+ for (let input of inputs) {
+ let parts = input.split(",");
+ let result = [];
+ for (let number of parts) {
+ result.push(number | 0);
+ }
+ results.push(result);
+ }
+ return results;
+}
+
+let maxSize = 10;
+let memory = new WebAssembly.Memory({
+ initial: 1,
+ maximum: maxSize,
+ shared: true
+});
+let memory_view = new Int32Array(memory.buffer);
+
+let sequences = [];
+let results = [];
+
+let builder = new WasmModuleBuilder();
+builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+
+for (let i = 0; i < kNumberOfWorker; i++) {
+ sequences[i] = makeSequenceOfOperations(kSequenceLength);
+ builder.addFunction("worker" + i, kSig_i_iii)
+ .addBody(generateFunctionBodyForSequence(sequences[i]))
+ .exportAs("worker" + i);
+}
+
+// Instantiate module, get function exports.
+let module = new WebAssembly.Module(builder.toBuffer());
+let instance = new WebAssembly.Instance(module, {
+ m: {
+ imported_mem: memory
+ }
+});
+
+// Spawn off the workers and run the sequences.
+let workers = spawnWorkers();
+// Set spin count.
+memory_view[4] = kNumberOfWorker;
+instantiateModuleInWorkers(workers);
+executeSequenceInWorkers(workers);
+
+if (!kDebug) {
+ // Collect results, d8 style.
+ for (let worker of workers) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+}
+
+// Terminate all workers.
+for (let worker of workers) {
+ worker.terminate();
+}
+
+// In debug mode, print sequences and results.
+if (kDebug) {
+ for (let result of results) {
+ print(result);
+ }
+
+ for (let sequence of sequences) {
+ print(sequence);
+ }
+}
+
+// Try to reconstruct a sequential ordering.
+let passed = findSequentialOrdering();
+
+if (passed) {
+ print("PASS");
+} else {
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ print("Worker " + i);
+ print(sequences[i]);
+ print(results[i]);
+ }
+ print("FAIL");
+ quit(-1);
+}
diff --git a/deps/v8/test/mjsunit/wasm/bigint.js b/deps/v8/test/mjsunit/wasm/bigint.js
new file mode 100644
index 0000000000..cb761acab1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bigint.js
@@ -0,0 +1,207 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-bigint
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestWasmI64ToJSBigInt() {
+ var builder = new WasmModuleBuilder();
+
+ builder
+ .addFunction("fn", kSig_l_v) // () -> i64
+ .addBody([
+ kExprI64Const, 0x3,
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
+
+ assertEquals(typeof module.exports.fn(), "bigint");
+ assertEquals(module.exports.fn(), 3n);
+})();
+
+(function TestWasmI64ToJSBigIntImportedFunc() {
+ var builder = new WasmModuleBuilder();
+
+ var a_index = builder
+ .addImport("a", "a", kSig_l_l) // i64 -> i64
+
+ builder
+ .addFunction("fn", kSig_l_v) // () -> i64
+ .addBody([
+ kExprI64Const, 0x7,
+ kExprCallFunction, a_index
+ ])
+ .exportFunc();
+
+ a_was_called = false;
+
+ var module = builder.instantiate({
+ a: {
+ a(param) {
+ assertEquals(typeof param, "bigint");
+ assertEquals(param, 7n);
+ a_was_called = true;
+ return 12n;
+ },
+ }
+ });
+
+ assertEquals(module.exports.fn(), 12n);
+
+ assertTrue(a_was_called);
+})();
+
+(function TestJSBigIntToWasmI64Global() {
+ var builder = new WasmModuleBuilder();
+
+ var a_global_index = builder
+ .addImportedGlobal("mod", "a", kWasmI64)
+
+ var b_global_index = builder
+ .addImportedGlobal("mod", "b", kWasmI64);
+
+ var c_global_index = builder
+ .addImportedGlobal("mod", "c", kWasmI64);
+
+ builder
+ .addExportOfKind('a', kExternalGlobal, a_global_index)
+ .addExportOfKind('b', kExternalGlobal, b_global_index)
+ .addExportOfKind('c', kExternalGlobal, c_global_index);
+
+ var module = builder.instantiate({
+ mod: {
+ a: 1n,
+ b: 2n ** 63n,
+ c: "123",
+ }
+ });
+
+ assertEquals(module.exports.a.value, 1n);
+ assertEquals(module.exports.b.value, - (2n ** 63n));
+ assertEquals(module.exports.c.value, 123n);
+})();
+
+(function TestJSBigIntToWasmI64MutableGlobal() {
+ var builder = new WasmModuleBuilder();
+
+ var a_global_index = builder
+ .addImportedGlobal("mod", "a", kWasmI64, /* mutable = */ true)
+
+ builder
+ .addExportOfKind('a', kExternalGlobal, a_global_index);
+
+ // as non object
+ var fn = () => builder.instantiate({
+ mod: {
+ a: 1n,
+ }
+ });
+
+ assertThrows(fn, WebAssembly.LinkError);
+
+ // as WebAssembly.Global object
+ var module = builder.instantiate({
+ mod: {
+ a: new WebAssembly.Global({ value: "i64", mutable: true }, 1n),
+ }
+ });
+
+ assertEquals(module.exports.a.value, 1n);
+})();
+
+(function TestJSBigIntToWasmI64Identity() {
+ var builder = new WasmModuleBuilder();
+
+ builder
+ .addFunction("f", kSig_l_l) // i64 -> i64
+ .addBody([
+ kExprGetLocal, 0x0,
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
+ var f = module.exports.f;
+
+ assertEquals(f(0n), 0n);
+ assertEquals(f(-0n), -0n);
+ assertEquals(f(123n), 123n);
+ assertEquals(f(-123n), -123n);
+
+ assertEquals(f("5"), 5n);
+
+ assertThrows(() => f(5), TypeError);
+})();
+
+(function TestI64Global() {
+ var argument = { "value": "i64", "mutable": true };
+ var global = new WebAssembly.Global(argument);
+
+ assertEquals(global.value, 0n); // initial value
+
+ global.value = 123n;
+ assertEquals(global.valueOf(), 123n);
+
+ global.value = 2n ** 63n;
+ assertEquals(global.valueOf(), - (2n ** 63n));
+})();
+
+(function TestI64GlobalValueOf() {
+ var argument = { "value": "i64" };
+
+ // as literal
+ var global = new WebAssembly.Global(argument, {
+ valueOf() {
+ return 123n;
+ }
+ });
+ assertEquals(global.value, 123n);
+
+ // as string
+ var global2 = new WebAssembly.Global(argument, {
+ valueOf() {
+ return "321";
+ }
+ });
+ assertEquals(global2.value, 321n);
+})();
+
+(function TestInvalidValtypeGlobalErrorMessage() {
+ var argument = { "value": "some string" };
+ assertThrows(() => new WebAssembly.Global(argument), TypeError);
+
+ try {
+ new WebAssembly.Global(argument);
+ } catch (e) {
+ assertContains("'value' must be", e.message);
+ assertContains("i64", e.message);
+ }
+})();
+
+(function TestGlobalI64ValueWrongType() {
+ var argument = { "value": "i64" };
+ assertThrows(() => new WebAssembly.Global(argument, 666), TypeError);
+})();
+
+(function TestGlobalI64SetWrongType() {
+ var argument = { "value": "i64", "mutable": true };
+ var global = new WebAssembly.Global(argument);
+
+ assertThrows(() => global.value = 1, TypeError);
+})();
+
+(function TestFuncParamF64PassingBigInt() {
+ var builder = new WasmModuleBuilder();
+
+ builder
+ .addFunction("f", kSig_v_d) // f64 -> ()
+ .addBody([])
+ .exportFunc();
+
+ var module = builder.instantiate();
+
+ assertThrows(() => module.exports.f(123n), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
new file mode 100644
index 0000000000..7e260eab08
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-turbofan.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, undefined, false);
+builder.addFunction('load', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 100])
+ .exportFunc();
+
+const module = builder.instantiate();
+%WasmTierUpFunction(module, 0);
+// 100 is added as part of the load instruction above
+// Last valid address (64k - 100 - 4)
+assertEquals(0, module.exports.load(0x10000 - 100 - 4));
+// First invalid address (64k - 100)
+assertTraps(kTrapMemOutOfBounds, _ => { module.exports.load(0x10000 - 100);});
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory.js b/deps/v8/test/mjsunit/wasm/bulk-memory.js
new file mode 100644
index 0000000000..c9aefb774c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory.js
@@ -0,0 +1,375 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-bulk-memory
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestPassiveDataSegment() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addPassiveDataSegment([0, 1, 2]);
+ builder.addPassiveDataSegment([3, 4]);
+
+ // Should not throw.
+ builder.instantiate();
+})();
+
+(function TestPassiveElementSegment() {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('f', kSig_v_v).addBody([]);
+ builder.setTableBounds(1, 1);
+ builder.addPassiveElementSegment([0, 0, 0]);
+ builder.addPassiveElementSegment([0, 0]);
+
+ // Should not throw.
+ builder.instantiate();
+})();
+
+function assertBufferContents(buf, expected) {
+ for (let i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], buf[i]);
+ }
+ for (let i = expected.length; i < buf.length; ++i) {
+ assertEquals(0, buf[i]);
+ }
+}
+
+function getMemoryInit(mem, segment_data) {
+ const builder = new WasmModuleBuilder();
+ builder.addImportedMemory("", "mem", 0);
+ builder.addPassiveDataSegment(segment_data);
+ builder.addFunction('init', kSig_v_iii)
+ .addBody([
+ kExprGetLocal, 0, // Dest.
+ kExprGetLocal, 1, // Source.
+ kExprGetLocal, 2, // Size in bytes.
+ kNumericPrefix, kExprMemoryInit,
+ 0, // Memory index.
+ 0, // Data segment index.
+ ])
+ .exportAs('init');
+ return builder.instantiate({'': {mem}}).exports.init;
+}
+
+(function TestMemoryInit() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryInit = getMemoryInit(mem, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ const u8a = new Uint8Array(mem.buffer);
+
+ // All zeroes.
+ assertBufferContents(u8a, []);
+
+ // Copy all bytes from data segment 0, to memory at [10, 20).
+ memoryInit(10, 0, 10);
+ assertBufferContents(u8a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ // Copy bytes in range [5, 10) from data segment 0, to memory at [0, 5).
+ memoryInit(0, 5, 5);
+ assertBufferContents(u8a, [5, 6, 7, 8, 9, 0, 0, 0, 0, 0,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+})();
+
+(function TestMemoryInitOutOfBounds() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ // Create a data segment that has a length of kPageSize.
+ const memoryInit = getMemoryInit(mem, new Array(kPageSize));
+
+ // OK, copy the full data segment to memory.
+ memoryInit(0, 0, kPageSize);
+
+ // Source range must not be out of bounds.
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(0, 1, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(0, 1000, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(0, kPageSize, 1));
+
+ // Destination range must not be out of bounds.
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(1, 0, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(1000, 0, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(kPageSize, 0, 1));
+
+ // Make sure bounds aren't checked with 32-bit wrapping.
+ assertTraps(kTrapMemOutOfBounds, () => memoryInit(1, 1, -1));
+
+ mem.grow(1);
+
+ // Works properly after grow.
+ memoryInit(kPageSize, 0, 1000);
+
+ // Traps at new boundary.
+ assertTraps(
+ kTrapMemOutOfBounds, () => memoryInit(kPageSize + 1, 0, kPageSize));
+})();
+
+(function TestMemoryInitOnActiveSegment() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1);
+ builder.addPassiveDataSegment([1, 2, 3]);
+ builder.addDataSegment(0, [4, 5, 6]);
+ builder.addFunction('init', kSig_v_v)
+ .addBody([
+ kExprI32Const, 0, // Dest.
+ kExprI32Const, 0, // Source.
+ kExprI32Const, 0, // Size in bytes.
+ kNumericPrefix, kExprMemoryInit,
+ 0, // Memory index.
+ 1, // Data segment index.
+ ])
+ .exportAs('init');
+
+ // Instantiation succeeds, because using memory.init with an active segment
+ // is a trap, not a validation error.
+ const instance = builder.instantiate();
+
+ assertTraps(kTrapDataSegmentDropped, () => instance.exports.init());
+})();
+
+(function TestMemoryInitOnDroppedSegment() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1);
+ builder.addPassiveDataSegment([1, 2, 3]);
+ builder.addFunction('init', kSig_v_v)
+ .addBody([
+ kExprI32Const, 0, // Dest.
+ kExprI32Const, 0, // Source.
+ kExprI32Const, 0, // Size in bytes.
+ kNumericPrefix, kExprMemoryInit,
+ 0, // Memory index.
+ 0, // Data segment index.
+ ])
+ .exportAs('init');
+ builder.addFunction('drop', kSig_v_v)
+ .addBody([
+ kNumericPrefix, kExprMemoryDrop,
+ 0, // Data segment index.
+ ])
+ .exportAs('drop');
+
+ // Instantiation succeeds, because using memory.init with an active segment
+ // is a trap, not a validation error.
+ const instance = builder.instantiate();
+
+ // OK, segment hasn't been dropped.
+ instance.exports.init();
+
+ instance.exports.drop();
+
+ // After segment has been dropped, memory.init and memory.drop fail.
+ assertTraps(kTrapDataSegmentDropped, () => instance.exports.init());
+ assertTraps(kTrapDataSegmentDropped, () => instance.exports.drop());
+})();
+
+(function TestMemoryDropOnActiveSegment() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1);
+ builder.addPassiveDataSegment([1, 2, 3]);
+ builder.addDataSegment(0, [4, 5, 6]);
+ builder.addFunction('drop', kSig_v_v)
+ .addBody([
+ kNumericPrefix, kExprMemoryDrop,
+ 1, // Data segment index.
+ ])
+ .exportAs('drop');
+
+ const instance = builder.instantiate();
+ assertTraps(kTrapDataSegmentDropped, () => instance.exports.drop());
+})();
+
+function getMemoryCopy(mem) {
+ const builder = new WasmModuleBuilder();
+ builder.addImportedMemory("", "mem", 0);
+ builder.addFunction("copy", kSig_v_iii).addBody([
+ kExprGetLocal, 0, // Dest.
+ kExprGetLocal, 1, // Source.
+ kExprGetLocal, 2, // Size in bytes.
+ kNumericPrefix, kExprMemoryCopy, 0,
+ ]).exportAs("copy");
+ return builder.instantiate({'': {mem}}).exports.copy;
+}
+
+(function TestMemoryCopy() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryCopy = getMemoryCopy(mem);
+
+ const u8a = new Uint8Array(mem.buffer);
+ u8a.set([0, 11, 22, 33, 44, 55, 66, 77]);
+
+ memoryCopy(10, 1, 8);
+
+ assertBufferContents(u8a, [0, 11, 22, 33, 44, 55, 66, 77, 0, 0,
+ 11, 22, 33, 44, 55, 66, 77]);
+
+ // Copy 0 bytes does nothing.
+ memoryCopy(10, 1, 0);
+ assertBufferContents(u8a, [0, 11, 22, 33, 44, 55, 66, 77, 0, 0,
+ 11, 22, 33, 44, 55, 66, 77]);
+})();
+
+(function TestMemoryCopyOverlapping() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryCopy = getMemoryCopy(mem);
+
+ const u8a = new Uint8Array(mem.buffer);
+ u8a.set([10, 20, 30]);
+
+ // Copy from [0, 3] -> [2, 5]. The copy must not overwrite 30 before copying
+ // it (i.e. cannot copy forward in this case).
+ memoryCopy(2, 0, 3);
+ assertBufferContents(u8a, [10, 20, 10, 20, 30]);
+
+ // Copy from [2, 5] -> [0, 3]. The copy must not write the first 10 (i.e.
+ // cannot copy backward in this case).
+ memoryCopy(0, 2, 3);
+ assertBufferContents(u8a, [10, 20, 30, 20, 30]);
+})();
+
+(function TestMemoryCopyOutOfBounds() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryCopy = getMemoryCopy(mem);
+
+ memoryCopy(0, 0, kPageSize);
+
+ // Source range must not be out of bounds.
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(0, 1, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(0, 1000, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(0, kPageSize, 1));
+
+ // Destination range must not be out of bounds.
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1, 0, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1000, 0, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(kPageSize, 0, 1));
+
+ // Make sure bounds aren't checked with 32-bit wrapping.
+ assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1, 1, -1));
+
+ mem.grow(1);
+
+ // Works properly after grow.
+ memoryCopy(0, kPageSize, 1000);
+
+ // Traps at new boundary.
+ assertTraps(
+ kTrapMemOutOfBounds, () => memoryCopy(0, kPageSize + 1, kPageSize));
+})();
+
+function getMemoryFill(mem) {
+ const builder = new WasmModuleBuilder();
+ builder.addImportedMemory("", "mem", 0);
+ builder.addFunction("fill", kSig_v_iii).addBody([
+ kExprGetLocal, 0, // Dest.
+ kExprGetLocal, 1, // Byte value.
+ kExprGetLocal, 2, // Size.
+ kNumericPrefix, kExprMemoryFill, 0,
+ ]).exportAs("fill");
+ return builder.instantiate({'': {mem}}).exports.fill;
+}
+
+(function TestMemoryFill() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryFill = getMemoryFill(mem);
+
+ const u8a = new Uint8Array(mem.buffer);
+
+ memoryFill(1, 33, 5);
+ assertBufferContents(u8a, [0, 33, 33, 33, 33, 33]);
+
+ memoryFill(4, 66, 4);
+ assertBufferContents(u8a, [0, 33, 33, 33, 66, 66, 66, 66]);
+
+ // Fill 0 bytes does nothing.
+ memoryFill(4, 66, 0);
+ assertBufferContents(u8a, [0, 33, 33, 33, 66, 66, 66, 66]);
+})();
+
+(function TestMemoryFillValueWrapsToByte() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryFill = getMemoryFill(mem);
+
+ const u8a = new Uint8Array(mem.buffer);
+
+ memoryFill(0, 1000, 3);
+ const expected = 1000 & 255;
+ assertBufferContents(u8a, [expected, expected, expected]);
+})();
+
+(function TestMemoryFillOutOfBounds() {
+ const mem = new WebAssembly.Memory({initial: 1});
+ const memoryFill = getMemoryFill(mem);
+ const v = 123;
+
+ memoryFill(0, 0, kPageSize);
+
+ // Destination range must not be out of bounds.
+ assertTraps(kTrapMemOutOfBounds, () => memoryFill(1, v, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryFill(1000, v, kPageSize));
+ assertTraps(kTrapMemOutOfBounds, () => memoryFill(kPageSize, v, 1));
+
+ // Make sure bounds aren't checked with 32-bit wrapping.
+ assertTraps(kTrapMemOutOfBounds, () => memoryFill(1, v, -1));
+
+ mem.grow(1);
+
+ // Works properly after grow.
+ memoryFill(kPageSize, v, 1000);
+
+ // Traps at new boundary.
+ assertTraps(
+ kTrapMemOutOfBounds, () => memoryFill(kPageSize + 1, v, kPageSize));
+})();
+
+(function TestTableInit0() {
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+
+ builder.setTableBounds(5, 5);
+ builder.addElementSegment(0, false, []);
+ builder.addElementSegment(0, false, []);
+
+ builder.addFunction("init0", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableInit, kTableZero, kSegmentZero])
+ .exportAs("init0");
+
+ let instance = builder.instantiate();
+ let init = instance.exports.init0;
+ // TODO(titzer): we only check that a function containing TableInit can be compiled.
+ // init(1, 2, 3);
+})();
+
+(function TestTableDropActive() {
+ const builder = new WasmModuleBuilder();
+ builder.setTableBounds(5, 5);
+ builder.addElementSegment(0, false, [0, 0, 0]);
+ builder.addFunction('drop', kSig_v_v)
+ .addBody([
+ kNumericPrefix, kExprTableDrop,
+ 0, // Element segment index.
+ ])
+ .exportAs('drop');
+
+ const instance = builder.instantiate();
+ assertTraps(kTrapElemSegmentDropped, () => instance.exports.drop());
+})();
+
+(function TestTableDropTwice() {
+ const builder = new WasmModuleBuilder();
+ builder.setTableBounds(5, 5);
+ builder.addPassiveElementSegment([0, 0, 0]);
+ builder.addFunction('drop', kSig_v_v)
+ .addBody([
+ kNumericPrefix, kExprTableDrop,
+ 0, // Element segment index.
+ ])
+ .exportAs('drop');
+
+ const instance = builder.instantiate();
+ instance.exports.drop();
+ assertTraps(kTrapElemSegmentDropped, () => instance.exports.drop());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
new file mode 100644
index 0000000000..7e8bcde6af
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/code-space-exhaustion.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-code-space=1
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// We only have 1 MB code space. This is enough for the code below, but for all
+// 1000 modules, it requires several GCs to get rid of the old code.
+const builder = new WasmModuleBuilder();
+builder.addFunction('main', kSig_i_i).addBody([kExprGetLocal, 0]);
+const buffer = builder.toBuffer();
+
+for (let i = 0; i < 1000; ++i) {
+ new WebAssembly.Module(buffer);
+}
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index a5d1bc5414..8a9a0cbd10 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -217,8 +217,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprCallIndirect, sig_index1, kTableZero]) // --
.exportAs("main");
- builder.setFunctionTableBounds(kTableSize, kTableSize);
- builder.addFunctionTableInit(0, false, [f1.index]);
+ builder.setTableBounds(kTableSize, kTableSize);
+ builder.addElementSegment(0, false, [f1.index]);
builder.addExportOfKind("table", kExternalTable, 0);
return new WebAssembly.Module(builder.toBuffer());
@@ -239,7 +239,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.exportAs("main");
builder.addImportedTable("z", "table", kTableSize, kTableSize);
- builder.addFunctionTableInit(1, false, [f2.index], true);
+ builder.addElementSegment(1, false, [f2.index], true);
var m2_bytes = builder.toBuffer();
var m2 = new WebAssembly.Module(m2_bytes);
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
index 0d4893c18a..744ba9bbdb 100644
--- a/deps/v8/test/mjsunit/wasm/errors.js
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -72,9 +72,10 @@ function assertConversionError(bytes, imports, msg) {
assertCompileError(
builder().addFunction("f", kSig_i_v).end().toBuffer(),
/function body must end with "end" opcode @/);
- assertCompileError(builder().addFunction("f", kSig_i_v).addBody([
- kExprReturn
- ]).end().toBuffer(), /return found empty stack @/);
+ assertCompileError(
+ builder().addFunction('f', kSig_i_v).addBody([kExprReturn])
+ .end().toBuffer(),
+ /expected 1 elements on the stack for return, found 0 @/);
assertCompileError(builder().addFunction("f", kSig_v_v).addBody([
kExprGetLocal, 0
]).end().toBuffer(), /invalid local index: 0 @/);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
new file mode 100644
index 0000000000..8d43610ff8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -0,0 +1,101 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --experimental-wasm-anyref --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+load("test/mjsunit/wasm/exceptions-utils.js");
+
+// Test the encoding of a thrown exception with a null-ref value.
+(function TestThrowRefNull() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction("throw_null", kSig_v_v)
+ .addBody([
+ kExprRefNull,
+ kExprThrow, except,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertWasmThrows(instance, except, [null], () => instance.exports.throw_null());
+})();
+
+// Test throwing/catching the null-ref value.
+(function TestThrowCatchRefNull() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction("throw_catch_null", kSig_i_i)
+ .addBody([
+ kExprTry, kWasmAnyRef,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmAnyRef,
+ kExprRefNull,
+ kExprThrow, except,
+ kExprElse,
+ kExprI32Const, 42,
+ kExprReturn,
+ kExprEnd,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ kExprRefIsNull,
+ kExprIf, kWasmI32,
+ kExprI32Const, 23,
+ kExprElse,
+ kExprUnreachable,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(23, instance.exports.throw_catch_null(0));
+ assertEquals(42, instance.exports.throw_catch_null(1));
+})();
+
+// Test the encoding of a thrown exception with a reference type value.
+(function TestThrowRefParam() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction("throw_param", kSig_v_r)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow, except,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ let o = new Object();
+
+ assertWasmThrows(instance, except, [o], () => instance.exports.throw_param(o));
+ assertWasmThrows(instance, except, [1], () => instance.exports.throw_param(1));
+ assertWasmThrows(instance, except, [2.3], () => instance.exports.throw_param(2.3));
+ assertWasmThrows(instance, except, ["str"], () => instance.exports.throw_param("str"));
+})();
+
+// Test throwing/catching the reference type value.
+(function TestThrowCatchRefParam() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction("throw_catch_param", kSig_r_r)
+ .addBody([
+ kExprTry, kWasmAnyRef,
+ kExprGetLocal, 0,
+ kExprThrow, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ let o = new Object();
+
+ assertEquals(o, instance.exports.throw_catch_param(o));
+ assertEquals(1, instance.exports.throw_catch_param(1));
+ assertEquals(2.3, instance.exports.throw_catch_param(2.3));
+ assertEquals("str", instance.exports.throw_catch_param("str"));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
new file mode 100644
index 0000000000..43041ca9e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -0,0 +1,121 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+load("test/mjsunit/wasm/exceptions-utils.js");
+
+// Test that rethrow expressions work inside catch blocks.
+(function TestRethrowInCatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("rethrow0", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprThrow, except,
+ kExprCatch,
+ kExprRethrow,
+ kExprEnd,
+ ]).exportFunc();
+ builder.addFunction("rethrow1", kSig_i_i)
+ .addLocals({except_count: 1})
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprCatch,
+ kExprSetLocal, 1,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 1,
+ kExprRethrow,
+ kExprEnd,
+ kExprI32Const, 23,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertWasmThrows(instance, except, [], () => instance.exports.rethrow0());
+ assertWasmThrows(instance, except, [], () => instance.exports.rethrow1(0));
+ assertEquals(23, instance.exports.rethrow1(1));
+})();
+
+// Test that rethrow expressions work properly even in the presence of multiple
+// nested handlers being involved.
+(function TestRethrowNested() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ let except2 = builder.addException(kSig_v_v);
+ builder.addFunction("rethrow_nested", kSig_i_i)
+ .addLocals({except_count: 2})
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except2,
+ kExprCatch,
+ kExprSetLocal, 2,
+ kExprTry, kWasmI32,
+ kExprThrow, except1,
+ kExprCatch,
+ kExprSetLocal, 1,
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 1,
+ kExprRethrow,
+ kExprEnd,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 2,
+ kExprRethrow,
+ kExprEnd,
+ kExprI32Const, 23,
+ kExprEnd,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertWasmThrows(instance, except1, [], () => instance.exports.rethrow_nested(0));
+ assertWasmThrows(instance, except2, [], () => instance.exports.rethrow_nested(1));
+ assertEquals(23, instance.exports.rethrow_nested(2));
+})();
+
+// Test that an exception being rethrow can be caught by another local catch
+// block in the same function without ever unwinding the activation.
+(function TestRethrowRecatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("rethrow_recatch", kSig_i_i)
+ .addLocals({except_count: 1})
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprCatch,
+ kExprSetLocal, 1,
+ kExprTry, kWasmI32,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 1,
+ kExprRethrow,
+ kExprEnd,
+ kExprI32Const, 42,
+ kExprCatch,
+ kExprDrop,
+ kExprI32Const, 23,
+ kExprEnd,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(23, instance.exports.rethrow_recatch(0));
+ assertEquals(42, instance.exports.rethrow_recatch(1));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
index f2a5b56e9a..adfbded0c7 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-shared.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -33,7 +33,9 @@ function NewExportedException() {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, fun,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let ex_obj = new Error("my exception");
@@ -65,7 +67,9 @@ function NewExportedException() {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, fun,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let ex_obj = new Error("my exception");
@@ -100,7 +104,9 @@ function NewExportedException() {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, fun,
- kExprCatch, except1,
+ kExprCatch,
+ kExprBrOnExn, 0, except1,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let ex_obj = new Error("my exception");
@@ -139,7 +145,9 @@ function NewExportedException() {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, fun,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let ex_obj = new Error("my exception");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
new file mode 100644
index 0000000000..fc21156b18
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -0,0 +1,51 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh --experimental-wasm-simd --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+load("test/mjsunit/wasm/exceptions-utils.js");
+
+(function TestThrowS128Default() {
+ var builder = new WasmModuleBuilder();
+ var kSig_v_s = makeSig([kWasmS128], []);
+ var except = builder.addException(kSig_v_s);
+ builder.addFunction("throw_simd", kSig_v_v)
+ .addLocals({s128_count: 1})
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ ])
+ .exportFunc();
+ var instance = builder.instantiate();
+
+ assertWasmThrows(instance, except, [0, 0, 0, 0, 0, 0, 0, 0],
+ () => instance.exports.throw_simd());
+})();
+
+(function TestThrowCatchS128Default() {
+ var builder = new WasmModuleBuilder();
+ var kSig_v_s = makeSig([kWasmS128], []);
+ var except = builder.addException(kSig_v_s);
+ builder.addFunction("throw_catch_simd", kSig_i_v)
+ .addLocals({s128_count: 1})
+ .addBody([
+ kExprTry, kWasmS128,
+ kExprGetLocal, 0,
+ kExprThrow, 0,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ // TODO(mstarzinger): Actually return some compressed form of the s128
+ // value here to make sure it is extracted properly from the exception.
+ kExprDrop,
+ kExprI32Const, 1,
+ ])
+ .exportFunc();
+ var instance = builder.instantiate();
+
+ assertEquals(1, instance.exports.throw_catch_simd());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-utils.js b/deps/v8/test/mjsunit/wasm/exceptions-utils.js
new file mode 100644
index 0000000000..344ca64da1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions-utils.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This file is intended to be loaded by other tests to provide utility methods
+// requiring natives syntax (and hence not suited for the mjsunit.js file).
+
+function assertWasmThrows(instance, runtime_id, values, code) {
+ try {
+ if (typeof code === 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ assertInstanceof(e, WebAssembly.RuntimeError);
+ var e_runtime_id = %GetWasmExceptionId(e, instance);
+ assertTrue(Number.isInteger(e_runtime_id));
+ assertEquals(e_runtime_id, runtime_id);
+ var e_values = %GetWasmExceptionValues(e);
+ assertArrayEquals(values, e_values);
+ return; // Success.
+ }
+ throw new MjsUnitAssertionError('Did not throw expected <' + runtime_id +
+ '> with values: ' + values);
+}
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index d165c8742d..6aafd0c087 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -6,35 +6,18 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-
-function assertWasmThrows(instance, runtime_id, values, code) {
- try {
- if (typeof code === 'function') {
- code();
- } else {
- eval(code);
- }
- } catch (e) {
- assertInstanceof(e, WebAssembly.RuntimeError);
- var e_runtime_id = %GetWasmExceptionId(e, instance);
- assertTrue(Number.isInteger(e_runtime_id));
- assertEquals(e_runtime_id, runtime_id);
- var e_values = %GetWasmExceptionValues(e);
- assertArrayEquals(values, e_values);
- return; // Success.
- }
- throw new MjsUnitAssertionError('Did not throw expected <' + runtime_id +
- '> with values: ' + values);
-}
+load("test/mjsunit/wasm/exceptions-utils.js");
// First we just test that "except_ref" local variables are allowed.
(function TestLocalExceptRef() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction("push_and_drop_except_ref", kSig_v_v)
+ .addLocals({except_count: 1})
.addBody([
kExprGetLocal, 0,
kExprDrop,
- ]).addLocals({except_count: 1}).exportFunc();
+ ]).exportFunc();
let instance = builder.instantiate();
assertDoesNotThrow(instance.exports.push_and_drop_except_ref);
@@ -42,6 +25,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// The following method doesn't attempt to catch an raised exception.
(function TestThrowSimple() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_v);
builder.addFunction("throw_if_param_not_zero", kSig_i_i)
@@ -63,12 +47,13 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test that empty try/catch blocks work.
(function TestCatchEmptyBlocks() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let except = builder.addException(kSig_v_v);
builder.addFunction("catch_empty_try", kSig_v_v)
.addBody([
kExprTry, kWasmStmt,
- kExprCatch, except,
+ kExprCatch,
+ kExprDrop,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -78,6 +63,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Now that we know throwing works, we test catching the exceptions we raise.
(function TestCatchSimple() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_v);
builder.addFunction("simple_throw_catch_to_0_1", kSig_i_i)
@@ -89,7 +75,8 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprThrow, except,
kExprEnd,
kExprI32Const, 42,
- kExprCatch, except,
+ kExprCatch,
+ kExprDrop,
kExprI32Const, 23,
kExprEnd
]).exportFunc();
@@ -99,47 +86,139 @@ function assertWasmThrows(instance, runtime_id, values, code) {
assertEquals(42, instance.exports.simple_throw_catch_to_0_1(1));
})();
-// Test that we can distinguish which exception was thrown.
-(function TestCatchComplex() {
+// Test that we can distinguish which exception was thrown by using a cascaded
+// sequence of nested try blocks with a single handler in each catch block.
+(function TestCatchComplex1() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except1 = builder.addException(kSig_v_v);
let except2 = builder.addException(kSig_v_v);
let except3 = builder.addException(kSig_v_v);
- builder.addFunction("catch_different_exceptions", kSig_i_i)
+ builder.addFunction("catch_complex", kSig_i_i)
.addBody([
- kExprTry, kWasmI32,
- kExprTry, kWasmI32,
- kExprGetLocal, 0,
- kExprI32Eqz,
- kExprIf, kWasmStmt,
- kExprThrow, except1,
- kExprElse,
+ kExprBlock, kWasmStmt,
+ kExprBlock, kWasmStmt,
+ kExprTry, kWasmStmt,
+ kExprTry, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmStmt,
+ kExprThrow, except1,
+ kExprElse,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprThrow, except2,
+ kExprElse,
+ kExprThrow, except3,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 2,
+ kExprReturn,
+ kExprCatch,
+ kExprBrOnExn, 2, except1,
+ kExprRethrow,
+ kExprEnd,
+ kExprCatch,
+ kExprBrOnExn, 2, except2,
+ kExprRethrow,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 3,
+ kExprReturn,
+ kExprEnd,
+ kExprI32Const, 4,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(3, instance.exports.catch_complex(0));
+ assertEquals(4, instance.exports.catch_complex(1));
+ assertWasmThrows(instance, except3, [], () => instance.exports.catch_complex(2));
+})();
+
+// Test that we can distinguish which exception was thrown by using a single
+// try block with multiple handlers in the associated catch block.
+(function TestCatchComplex2() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ let except2 = builder.addException(kSig_v_v);
+ let except3 = builder.addException(kSig_v_v);
+ builder.addFunction("catch_complex", kSig_i_i)
+ .addBody([
+ kExprBlock, kWasmStmt,
+ kExprBlock, kWasmStmt,
+ kExprTry, kWasmStmt,
kExprGetLocal, 0,
- kExprI32Const, 1,
- kExprI32Eq,
+ kExprI32Eqz,
kExprIf, kWasmStmt,
- kExprThrow, except2,
+ kExprThrow, except1,
kExprElse,
- kExprThrow, except3,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprThrow, except2,
+ kExprElse,
+ kExprThrow, except3,
+ kExprEnd,
kExprEnd,
+ kExprI32Const, 2,
+ kExprReturn,
+ kExprCatch,
+ kExprBrOnExn, 1, except1,
+ kExprBrOnExn, 2, except2,
+ kExprRethrow,
kExprEnd,
- kExprI32Const, 2,
- kExprCatch, except1,
- kExprI32Const, 3,
kExprEnd,
- kExprCatch, except2,
- kExprI32Const, 4,
- kExprEnd
+ kExprI32Const, 3,
+ kExprReturn,
+ kExprEnd,
+ kExprI32Const, 4,
]).exportFunc();
let instance = builder.instantiate();
- assertEquals(3, instance.exports.catch_different_exceptions(0));
- assertEquals(4, instance.exports.catch_different_exceptions(1));
- assertWasmThrows(instance, except3, [], () => instance.exports.catch_different_exceptions(2));
+ assertEquals(3, instance.exports.catch_complex(0));
+ assertEquals(4, instance.exports.catch_complex(1));
+ assertWasmThrows(instance, except3, [], () => instance.exports.catch_complex(2));
+})();
+
+// Test that br-on-exn also is allowed to consume values already present on the
+// operand stack, instead of solely values being pushed by the branch itself.
+(function TestCatchBranchWithValueOnStack() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction("catch_complex", kSig_i_i)
+ .addLocals({except_count: 1})
+ .addBody([
+ kExprBlock, kWasmI32,
+ kExprTry, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmStmt,
+ kExprThrow, except,
+ kExprEnd,
+ kExprCatch,
+ kExprSetLocal, 1,
+ kExprI32Const, 23,
+ kExprGetLocal, 1,
+ kExprBrOnExn, 1, except,
+ kExprRethrow,
+ kExprEnd,
+ kExprI32Const, 42,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(23, instance.exports.catch_complex(0));
+ assertEquals(42, instance.exports.catch_complex(1));
})();
// Test throwing an exception with multiple values.
(function TestThrowMultipleValues() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_ii);
builder.addFunction("throw_1_2", kSig_v_v)
@@ -155,6 +234,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test throwing/catching the i32 parameter value.
(function TestThrowCatchParamI() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
builder.addFunction("throw_catch_param", kSig_i_i)
@@ -163,8 +243,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprThrow, except,
kExprI32Const, 2,
- kExprCatch, except,
- kExprReturn,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -176,6 +257,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test the encoding of a thrown exception with an integer exception.
(function TestThrowParamI() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
builder.addFunction("throw_param", kSig_v_i)
@@ -191,6 +273,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test throwing/catching the f32 parameter value.
(function TestThrowCatchParamF() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_f);
builder.addFunction("throw_catch_param", kSig_f_f)
@@ -199,8 +282,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprThrow, except,
kExprF32Const, 0, 0, 0, 0,
- kExprCatch, except,
- kExprReturn,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -211,6 +295,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test the encoding of a thrown exception with a float value.
(function TestThrowParamF() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_f);
builder.addFunction("throw_param", kSig_v_f)
@@ -226,29 +311,26 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test throwing/catching an I64 value
(function TestThrowCatchParamL() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_l);
builder.addFunction("throw_catch_param", kSig_i_i)
+ .addLocals({i64_count: 1})
.addBody([
kExprGetLocal, 0,
kExprI64UConvertI32,
kExprSetLocal, 1,
- kExprTry, kWasmI32,
+ kExprTry, kWasmI64,
kExprGetLocal, 1,
kExprThrow, except,
- kExprI32Const, 2,
- kExprCatch, except,
- kExprGetLocal, 1,
- kExprI64Eq,
- kExprIf, kWasmI32,
- kExprI32Const, 1,
- kExprElse,
- kExprI32Const, 0,
- kExprEnd,
- // TODO(kschimpf): Why is this return necessary?
- kExprReturn,
+ kExprI64Const, 23,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
- ]).addLocals({i64_count: 1}).exportFunc();
+ kExprGetLocal, 1,
+ kExprI64Eq,
+ ]).exportFunc();
let instance = builder.instantiate();
assertEquals(1, instance.exports.throw_catch_param(5));
@@ -258,6 +340,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test the encoding of a thrown exception with an I64 value.
(function TestThrowParamL() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_l);
builder.addFunction("throw_param", kSig_v_ii)
@@ -279,6 +362,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test throwing/catching the F64 parameter value
(function TestThrowCatchParamD() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_d);
builder.addFunction("throw_catch_param", kSig_d_d)
@@ -287,8 +371,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprThrow, except,
kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
- kExprCatch, except,
- kExprReturn,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
]).exportFunc();
let instance = builder.instantiate();
@@ -299,6 +384,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test the encoding of a thrown exception with an f64 value.
(function TestThrowParamD() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_d);
builder.addFunction("throw_param", kSig_v_f)
@@ -315,6 +401,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Test the encoding of a computed parameter value.
(function TestThrowParamComputed() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
builder.addFunction("throw_expr_with_params", kSig_v_ddi)
@@ -341,6 +428,7 @@ function assertWasmThrows(instance, runtime_id, values, code) {
// Now that we know catching works locally, we test catching exceptions that
// cross function boundaries and/or raised by JavaScript.
(function TestCatchCrossFunctions() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
@@ -400,7 +488,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprUnreachable,
kExprEnd,
kExprI32Const, 63,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd
])
.exportFunc();
@@ -411,12 +501,15 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprThrow, except,
kExprUnreachable,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
])
.exportFunc();
builder.addFunction("same_scope_multiple", kSig_i_i)
+ .addLocals({i32_count: 1, except_count: 1})
// path = 0;
//
// try {
@@ -462,7 +555,13 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprUnreachable,
kExprEnd,
kExprI32Const, 2,
- kExprCatch, except,
+ kExprCatch,
+ kExprSetLocal, 2,
+ kExprBlock, kWasmI32,
+ kExprGetLocal, 2,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
kExprI32Const, 4,
kExprI32Ior,
kExprThrow, except,
@@ -481,7 +580,13 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprEnd,
kExprI32Const, 16,
kExprI32Ior,
- kExprCatch, except,
+ kExprCatch,
+ kExprSetLocal, 2,
+ kExprBlock, kWasmI32,
+ kExprGetLocal, 2,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
kExprI32Const, 32,
kExprI32Ior,
kExprThrow, except,
@@ -500,12 +605,17 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprEnd,
kExprI32Const, /*128=*/ 128, 1,
kExprI32Ior,
- kExprCatch, except,
+ kExprCatch,
+ kExprSetLocal, 2,
+ kExprBlock, kWasmI32,
+ kExprGetLocal, 2,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
kExprI32Const, /*256=*/ 128, 2,
kExprI32Ior,
kExprEnd,
])
- .addLocals({i32_count: 1})
.exportFunc();
// Scenario 2: Catches an exception raised from the direct callee.
@@ -515,7 +625,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprCallFunction, kWasmThrowFunction,
kExprUnreachable,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
])
.exportFunc();
@@ -530,7 +642,9 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 1,
kExprCallIndirect, sig_v_i, kTableZero,
kExprUnreachable,
- kExprCatch, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd
])
.exportFunc();
@@ -543,9 +657,11 @@ function assertWasmThrows(instance, runtime_id, values, code) {
kExprGetLocal, 0,
kExprCallFunction, kJSThrowI,
kExprUnreachable,
- kExprCatch, except,
- kExprUnreachable,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
+ kExprUnreachable,
])
.exportFunc();
@@ -553,9 +669,11 @@ function assertWasmThrows(instance, runtime_id, values, code) {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, kJSThrowString,
- kExprCatch, except,
- kExprUnreachable,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
+ kExprUnreachable,
])
.exportFunc();
@@ -563,9 +681,11 @@ function assertWasmThrows(instance, runtime_id, values, code) {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, kJSThrowFP,
- kExprCatch, except,
- kExprUnreachable,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
+ kExprUnreachable,
])
.exportFunc();
@@ -573,9 +693,11 @@ function assertWasmThrows(instance, runtime_id, values, code) {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, kJSThrowLarge,
- kExprCatch, except,
- kExprUnreachable,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
+ kExprUnreachable,
])
.exportFunc();
@@ -583,9 +705,11 @@ function assertWasmThrows(instance, runtime_id, values, code) {
.addBody([
kExprTry, kWasmStmt,
kExprCallFunction, kJSThrowUndefined,
- kExprCatch, except,
- kExprUnreachable,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
kExprEnd,
+ kExprUnreachable,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/export-mutable-global.js b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
index 25dca3f211..8e7d34a8b5 100644
--- a/deps/v8/test/mjsunit/wasm/export-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/export-mutable-global.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-mut-global
-
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/futex.js b/deps/v8/test/mjsunit/wasm/futex.js
new file mode 100644
index 0000000000..1ebb3f65ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/futex.js
@@ -0,0 +1,310 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-sharedarraybuffer
+// Flags: --experimental-wasm-threads
+
+'use strict';
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function WasmAtomicWake(memory, offset, index, num) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "memory", 0, 20, "shared");
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kAtomicPrefix,
+ kExprAtomicWake, /* alignment */ 0, offset])
+ .exportAs("main");
+
+ // Instantiate module, get function exports
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module, {m: {memory}});
+ return instance.exports.main(index, num);
+}
+
+function WasmI32AtomicWait(memory, offset, index, val, timeout) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "memory", 0, 20, "shared");
+ builder.addFunction("main",
+ makeSig([kWasmI32, kWasmI32, kWasmF64], [kWasmI32]))
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprI64SConvertF64,
+ kAtomicPrefix,
+ kExprI32AtomicWait, /* alignment */ 0, offset])
+ .exportAs("main");
+
+ // Instantiate module, get function exports
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module, {m: {memory}});
+ return instance.exports.main(index, val, timeout);
+}
+
+function WasmI64AtomicWait(memory, offset, index, val_low,
+ val_high, timeout) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("m", "memory", 0, 20, "shared");
+ // Wrapper for I64AtomicWait that takes two I32 values and combines to into
+ // I64 for the instruction parameter.
+ builder.addFunction("main",
+ makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmF64], [kWasmI32]))
+ .addLocals({i64_count: 1}) // local that is passed as value param to wait
+ .addBody([
+ kExprGetLocal, 1,
+ kExprI64UConvertI32,
+ kExprI64Const, 32,
+ kExprI64Shl,
+ kExprGetLocal, 2,
+ kExprI64UConvertI32,
+ kExprI64Ior,
+ kExprSetLocal, 4, // Store the created I64 value in local
+ kExprGetLocal, 0,
+ kExprGetLocal, 4,
+ kExprGetLocal, 3,
+ kExprI64SConvertF64,
+ kAtomicPrefix,
+ kExprI64AtomicWait, /* alignment */ 0, offset])
+ .exportAs("main");
+
+ // Instantiate module, get function exports
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module, {m: {memory}});
+ return instance.exports.main(index, val_high, val_low, timeout);
+}
+
+(function TestInvalidIndex() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+
+ // Valid indexes are 0-65535 (1 page).
+ [-2, 65536, 0xffffffff].forEach(function(invalidIndex) {
+ assertThrows(function() {
+ WasmAtomicWake(memory, 0, invalidIndex, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI32AtomicWait(memory, 0, invalidIndex, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, 0, invalidIndex, 0, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmAtomicWake(memory, invalidIndex, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI32AtomicWait(memory, invalidIndex, 0, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, invalidIndex, 0, 0, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmAtomicWake(memory, invalidIndex/2, invalidIndex/2, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI32AtomicWait(memory, invalidIndex/2, invalidIndex/2, 0, -1);
+ }, Error);
+ assertThrows(function() {
+ WasmI64AtomicWait(memory, invalidIndex/2, invalidIndex/2, 0, 0, -1);
+ }, Error);
+ });
+})();
+
+(function TestI32WaitTimeout() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+ var waitMs = 100;
+ var startTime = new Date();
+ assertEquals(2, WasmI32AtomicWait(memory, 0, 0, 0, waitMs*1000000));
+ var endTime = new Date();
+ assertTrue(endTime - startTime >= waitMs);
+})();
+
+(function TestI64WaitTimeout() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+ var waitMs = 100;
+ var startTime = new Date();
+ assertEquals(2, WasmI64AtomicWait(memory, 0, 0, 0, 0, waitMs*1000000));
+ var endTime = new Date();
+ assertTrue(endTime - startTime >= waitMs);
+})();
+
+(function TestI32WaitNotEqual() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+ assertEquals(1, WasmI32AtomicWait(memory, 0, 0, 42, -1));
+
+ assertEquals(2, WasmI32AtomicWait(memory, 0, 0, 0, 0));
+
+ let i32a = new Int32Array(memory.buffer);
+ i32a[0] = 1;
+ assertEquals(1, WasmI32AtomicWait(memory, 0, 0, 0, -1));
+ assertEquals(2, WasmI32AtomicWait(memory, 0, 0, 1, 0));
+})();
+
+(function TestI64WaitNotEqual() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+ assertEquals(1, WasmI64AtomicWait(memory, 0, 0, 42, 0, -1));
+
+ assertEquals(2, WasmI64AtomicWait(memory, 0, 0, 0, 0, 0));
+
+ let i32a = new Int32Array(memory.buffer);
+ i32a[0] = 1;
+ i32a[1] = 2;
+ assertEquals(1, WasmI64AtomicWait(memory, 0, 0, 0, 0, -1));
+ assertEquals(2, WasmI64AtomicWait(memory, 0, 0, 1, 2, 0));
+})();
+
+(function TestWakeCounts() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+
+ [-1, 0, 4, 100, 0xffffffff].forEach(function(count) {
+ WasmAtomicWake(memory, 0, 0, count);
+ });
+})();
+
+//// WORKER ONLY TESTS
+
+if (this.Worker) {
+
+ // This test creates 4 workers that wait on consecutive (8 byte separated to
+ // satisfy alignments for all kinds of wait) memory locations to test various
+ // wait/wake combinations. For each combination, each thread waits 3 times
+ // expecting all 4 threads to be woken with wake(4) in first iteration, all 4
+ // to be woken with wake(5) in second iteration and, 3 and 1 to be woken in
+ // third iteration.
+
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true});
+ let i32a = new Int32Array(memory.buffer);
+ const numWorkers = 4;
+
+ let workerScript = `onmessage = function(msg) {
+ load("test/mjsunit/wasm/wasm-constants.js");
+ load("test/mjsunit/wasm/wasm-module-builder.js");
+ ${WasmI32AtomicWait.toString()}
+ ${WasmI64AtomicWait.toString()}
+ let id = msg.id;
+ let memory = msg.memory;
+ let i32a = new Int32Array(memory.buffer);
+ // indices are right shifted by 2 for Atomics.wait to convert them to index
+ // for Int32Array
+ // for wasm-wake numWorkers threads
+ let result = Atomics.wait(i32a, 0>>>2, 0);
+ postMessage(result);
+ // for wasm-wake numWorkers + 1 threads
+ result = Atomics.wait(i32a, 8>>>2, 0);
+ postMessage(result);
+ // for wasm-wake numWorkers - 1 threads
+ result = Atomics.wait(i32a, 16>>>2, 0);
+ postMessage(result);
+ // for js-wake numWorkers threads
+ result = WasmI32AtomicWait(memory, 0, 24, 0, -1);
+ postMessage(result);
+ // for js-wake numWorkers + 1 threads
+ result = WasmI32AtomicWait(memory, 0, 32, 0, -1);
+ postMessage(result);
+ // for js-wake numWorkers - 1 threads
+ result = WasmI32AtomicWait(memory, 0, 40, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers threads
+ result = WasmI32AtomicWait(memory, 0, 48, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers + 1 threads
+ result = WasmI32AtomicWait(memory, 0, 56, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers - 1 threads
+ result = WasmI32AtomicWait(memory, 0, 64, 0, -1);
+ postMessage(result);
+ // for js-wake numWorkers threads
+ result = WasmI64AtomicWait(memory, 0, 72, 0, 0, -1);
+ postMessage(result);
+ // for js-wake numWorkers + 1 threads
+ result = WasmI64AtomicWait(memory, 0, 80, 0, 0, -1);
+ postMessage(result);
+ // for js-wake numWorkers - 1 threads
+ result = WasmI64AtomicWait(memory, 0, 88, 0, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers threads
+ result = WasmI64AtomicWait(memory, 0, 96, 0, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers + 1 threads
+ result = WasmI64AtomicWait(memory, 0, 104, 0, 0, -1);
+ postMessage(result);
+ // for wasm-wake numWorkers - 1 threads
+ result = WasmI64AtomicWait(memory, 0, 112, 0, 0, -1);
+ postMessage(result);
+ };`;
+
+ let waitForAllWorkers = function(index) {
+ // index is right shifted by 2 to convert to index in Int32Array
+ while (%AtomicsNumWaitersForTesting(i32a, index>>>2) != numWorkers) {}
+ }
+
+ let jsWakeCheck = function(index, num, workers, msg) {
+ waitForAllWorkers(index);
+ let indexJs = index>>>2; // convert to index in Int32Array
+ if (num >= numWorkers) {
+ // if numWorkers or more is passed to wake, numWorkers workers should be
+ // woken.
+ assertEquals(numWorkers, Atomics.wake(i32a, indexJs, num));
+ } else {
+ // if num < numWorkers is passed to wake, num workers should be woken.
+ // Then the remaining workers are woken for the next part
+ assertEquals(num, Atomics.wake(i32a, indexJs, num));
+ assertEquals(numWorkers-num, Atomics.wake(i32a, indexJs, numWorkers));
+ }
+ for (let id = 0; id < numWorkers; id++) {
+ assertEquals(msg, workers[id].getMessage());
+ }
+ };
+
+ let wasmWakeCheck = function(index, num, workers, msg) {
+ waitForAllWorkers(index);
+ if (num >= numWorkers) {
+ // if numWorkers or more is passed to wake, numWorkers workers should be
+ // woken.
+ assertEquals(numWorkers, WasmAtomicWake(memory, 0, index, num));
+ } else {
+ // if num < numWorkers is passed to wake, num workers should be woken.
+ // Then the remaining workers are woken for the next part
+ assertEquals(num, WasmAtomicWake(memory, 0, index, num));
+ assertEquals(numWorkers-num,
+ WasmAtomicWake(memory, 0, index, numWorkers));
+ }
+ for (let id = 0; id < numWorkers; id++) {
+ assertEquals(msg, workers[id].getMessage());
+ }
+ };
+
+ let workers = [];
+ for (let id = 0; id < numWorkers; id++) {
+ workers[id] = new Worker(workerScript, {type: 'string'});
+ workers[id].postMessage({id, memory});
+ }
+
+ wasmWakeCheck(0, numWorkers, workers, "ok");
+ wasmWakeCheck(8, numWorkers + 1, workers, "ok");
+ wasmWakeCheck(16, numWorkers - 1, workers, "ok");
+
+ jsWakeCheck(24, numWorkers, workers, 0);
+ jsWakeCheck(32, numWorkers + 1, workers, 0);
+ jsWakeCheck(40, numWorkers - 1, workers, 0);
+
+ wasmWakeCheck(48, numWorkers, workers, 0);
+ wasmWakeCheck(56, numWorkers + 1, workers, 0);
+ wasmWakeCheck(64, numWorkers - 1, workers, 0);
+
+ jsWakeCheck(72, numWorkers, workers, 0);
+ jsWakeCheck(80, numWorkers + 1, workers, 0);
+ jsWakeCheck(88, numWorkers - 1, workers, 0);
+
+ wasmWakeCheck(96, numWorkers, workers, 0);
+ wasmWakeCheck(104, numWorkers + 1, workers, 0);
+ wasmWakeCheck(112, numWorkers - 1, workers, 0);
+
+ for (let id = 0; id < numWorkers; id++) {
+ workers[id].terminate();
+ }
+}
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
index a3091075ea..1bc90fd417 100644
--- a/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --wasm-async-compilation --no-wait-for-wasm
+// Flags: --expose-wasm --no-wait-for-wasm
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
index 69273d9184..3581f47202 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-detaching.js
@@ -11,7 +11,7 @@ let module = (() => {
let builder = new WasmModuleBuilder();
builder.addMemory(1, undefined, false);
builder.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.exportMemoryAs("memory");
return builder.toModule();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
index f0e526fcc9..c73f0762ec 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
@@ -27,8 +27,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if branch
// affect the result of current_memory when the branch is merged.
-(function TestGrowMemoryInIfBranchNoElse() {
- print('TestGrowMemoryInIfBranchNoElse ...');
+(function TestMemoryGrowInIfBranchNoElse() {
+ print('TestMemoryGrowInIfBranchNoElse ...');
let deltaPages = 4;
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
@@ -36,7 +36,7 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprEnd,
kExprMemorySize, kMemoryZero // get the memory size
@@ -51,8 +51,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if branch are
// retained when the branch is merged even when an else branch exists.
-(function TestGrowMemoryInIfBranchWithElse() {
- print('TestGrowMemoryInIfBranchWithElse ...');
+(function TestMemoryGrowInIfBranchWithElse() {
+ print('TestMemoryGrowInIfBranchWithElse ...');
let index = 0;
let oldValue = 21;
let newValue = 42;
@@ -63,7 +63,7 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprElse,
kExprI32Const, index, // put index on stack
@@ -86,8 +86,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an else branch
// affect the result of current_memory when the branch is merged.
-(function TestGrowMemoryInElseBranch() {
- print('TestGrowMemoryInElseBranch ...');
+(function TestMemoryGrowInElseBranch() {
+ print('TestMemoryGrowInElseBranch ...');
let index = 0;
let oldValue = 21;
let newValue = 42;
@@ -102,7 +102,7 @@ function generateBuilder() {
kExprI32StoreMem, 0, 0, // store
kExprElse,
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprEnd,
kExprMemorySize, kMemoryZero // get the memory size
@@ -121,8 +121,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if/else
// branch affect the result of current_memory when the branches are merged.
-(function TestGrowMemoryInBothIfAndElse() {
- print('TestGrowMemoryInBothIfAndElse ...');
+(function TestMemoryGrowInBothIfAndElse() {
+ print('TestMemoryGrowInBothIfAndElse ...');
let deltaPagesIf = 1;
let deltaPagesElse = 2;
let builder = generateBuilder();
@@ -131,11 +131,11 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprElse,
kExprI32Const, deltaPagesElse, // put deltaPagesElse on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprEnd,
kExprMemorySize, kMemoryZero // get the memory size
@@ -152,8 +152,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if branch are
// retained when the branch is merged.
-(function TestGrowMemoryAndStoreInIfBranchNoElse() {
- print('TestGrowMemoryAndStoreInIfBranchNoElse ...');
+(function TestMemoryGrowAndStoreInIfBranchNoElse() {
+ print('TestMemoryGrowAndStoreInIfBranchNoElse ...');
let index = 2 * kPageSize - 4;
let value = 42;
let deltaPages = 1;
@@ -163,7 +163,7 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprGetLocal, 1, // get index parameter
kExprI32Const, value, // put the value on stack
@@ -184,8 +184,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if branch are
// retained when the branch is merged even when an else branch exists.
-(function TestGrowMemoryAndStoreInIfBranchWithElse() {
- print('TestGrowMemoryAndStoreInIfBranchWithElse ...');
+(function TestMemoryGrowAndStoreInIfBranchWithElse() {
+ print('TestMemoryGrowAndStoreInIfBranchWithElse ...');
let index = 2 * kPageSize - 4;
let value = 42;
let deltaPages = 1;
@@ -195,7 +195,7 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprGetLocal, 1, // get index parameter
kExprI32Const, value, // put the value on stack
@@ -219,8 +219,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an else branch are
// retained when the branch is merged.
-(function TestGrowMemoryAndStoreInElseBranch() {
- print('TestGrowMemoryAndStoreInElseBranch ...');
+(function TestMemoryGrowAndStoreInElseBranch() {
+ print('TestMemoryGrowAndStoreInElseBranch ...');
let index = 2 * kPageSize - 4;
let value = 42;
let deltaPages = 1;
@@ -234,7 +234,7 @@ function generateBuilder() {
kExprI32StoreMem, 0, 0, // store
kExprElse,
kExprI32Const, deltaPages, // put deltaPages on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprGetLocal, 1, // get index parameter
kExprI32Const, value, // put the value on stack
@@ -254,8 +254,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory in an if/else branch
// are retained when the branch is merged.
-(function TestGrowMemoryAndStoreInBothIfAndElse() {
- print('TestGrowMemoryAndStoreInBothIfAndElse ...');
+(function TestMemoryGrowAndStoreInBothIfAndElse() {
+ print('TestMemoryGrowAndStoreInBothIfAndElse ...');
let index = 0;
let valueIf = 21;
let valueElse = 42;
@@ -267,14 +267,14 @@ function generateBuilder() {
kExprGetLocal, 0, // get condition parameter
kExprIf, kWasmStmt, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprGetLocal, 1, // get index parameter
kExprI32Const, valueIf, // put valueIf on stack
kExprI32StoreMem, 0, 0, // store
kExprElse,
kExprI32Const, deltaPagesElse, // put deltaPagesElse on stack
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprGetLocal, 1, // get index parameter
kExprI32Const, valueElse, // put valueElse on stack
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index 750e76905a..a87e123501 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -15,13 +15,13 @@ print('=== grow_memory in direct calls ===');
// This test verifies that the current_memory instruction returns the correct
// value after returning from a function (direct call) that grew memory.
-(function TestGrowMemoryInFunction() {
- print('TestGrowMemoryInFunction ...');
+(function TestMemoryGrowInFunction() {
+ print('TestMemoryGrowInFunction ...');
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_i)
@@ -41,14 +41,14 @@ print('=== grow_memory in direct calls ===');
// This test verifies that accessing a memory page that has been created inside
// a function (direct call) does not trap in the caller.
-(function TestGrowMemoryAndAccessInFunction() {
- print('TestGrowMemoryAndAccessInFunction ...');
+(function TestMemoryGrowAndAccessInFunction() {
+ print('TestMemoryGrowAndAccessInFunction ...');
let index = 2 * kPageSize - 4;
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('load', kSig_i_i)
@@ -75,8 +75,8 @@ print('=== grow_memory in direct calls ===');
// This test verifies that when a function (direct call) grows and store
// something in the grown memory, the caller always reads from the grown
// memory. This checks that the memory start address gets updated in the caller.
-(function TestGrowMemoryAndStoreInFunction() {
- print('TestGrowMemoryAndStoreInFunction ...');
+(function TestMemoryGrowAndStoreInFunction() {
+ print('TestMemoryGrowAndStoreInFunction ...');
let index = 0;
let oldValue = 21;
let newValue = 42;
@@ -87,7 +87,7 @@ print('=== grow_memory in direct calls ===');
builder.addFunction('grow', kSig_v_v)
.addBody([
kExprI32Const, deltaPages, // always grow memory by deltaPages
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprI32Const, index, // put index on stack
kExprI32Const, newValue, // put new value on stack
@@ -113,13 +113,13 @@ print('=== grow_memory in direct calls ===');
// This test verifies that the effects of growing memory in an directly
// called function inside a loop affect the result of current_memory when
// the loop is over.
-(function TestGrowMemoryInFunctionInsideLoop() {
- print('TestGrowMemoryInFunctionInsideLoop ...');
+(function TestMemoryGrowInFunctionInsideLoop() {
+ print('TestMemoryGrowInFunctionInsideLoop ...');
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_ii)
@@ -156,8 +156,8 @@ print('=== grow_memory in direct calls ===');
// This test verifies that the effects of writing to memory grown in an
// directly called function inside a loop are retained when the loop is over.
-(function TestGrowMemoryAndStoreInFunctionInsideLoop() {
- print('TestGrowMemoryAndStoreInFunctionInsideLoop ...');
+(function TestMemoryGrowAndStoreInFunctionInsideLoop() {
+ print('TestMemoryGrowAndStoreInFunctionInsideLoop ...');
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
builder.addFunction('store', kSig_i_ii)
@@ -168,7 +168,7 @@ print('=== grow_memory in direct calls ===');
.exportFunc();
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
// parameters: iterations, deltaPages, index
@@ -220,13 +220,13 @@ print('\n=== grow_memory in indirect calls ===');
// This test verifies that the current_memory instruction returns the correct
// value after returning from a function (indirect call) that grew memory.
-(function TestGrowMemoryInIndirectCall() {
- print('TestGrowMemoryInIndirectCall ...');
+(function TestMemoryGrowInIndirectCall() {
+ print('TestMemoryGrowInIndirectCall ...');
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_ii)
@@ -249,14 +249,14 @@ print('\n=== grow_memory in indirect calls ===');
// This test verifies that accessing a memory page that has been created inside
// a function (indirect call) does not trap in the caller.
-(function TestGrowMemoryAndAccessInIndirectCall() {
- print('TestGrowMemoryAndAccessInIndirectCall ...');
+(function TestMemoryGrowAndAccessInIndirectCall() {
+ print('TestMemoryGrowAndAccessInIndirectCall ...');
let index = 2 * kPageSize - 4;
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('load', kSig_i_i)
@@ -287,8 +287,8 @@ print('\n=== grow_memory in indirect calls ===');
// This test verifies that when a function (indirect call) grows and store
// something in the grown memory, the caller always reads from the grown
// memory. This checks that the memory start address gets updated in the caller.
-(function TestGrowMemoryAndStoreInIndirectCall() {
- print('TestGrowMemoryAndStoreInIndirectCall ...');
+(function TestMemoryGrowAndStoreInIndirectCall() {
+ print('TestMemoryGrowAndStoreInIndirectCall ...');
let index = 0;
let oldValue = 21;
let newValue = 42;
@@ -299,7 +299,7 @@ print('\n=== grow_memory in indirect calls ===');
builder.addFunction('grow', kSig_v_v)
.addBody([
kExprI32Const, deltaPages, // always grow memory by deltaPages
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprI32Const, index, // put index on stack
kExprI32Const, newValue, // put new value on stack
@@ -327,13 +327,13 @@ print('\n=== grow_memory in indirect calls ===');
// This test verifies that the effects of growing memory in an indirectly
// called function inside a loop affect the result of current_memory when
// the loop is over.
-(function TestGrowMemoryInIndirectCallInsideLoop() {
- print('TestGrowMemoryInIndirectCallInsideLoop ...');
+(function TestMemoryGrowInIndirectCallInsideLoop() {
+ print('TestMemoryGrowInIndirectCallInsideLoop ...');
let builder = new WasmModuleBuilder();
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('main', kSig_i_iii)
@@ -372,14 +372,14 @@ print('\n=== grow_memory in indirect calls ===');
// This test verifies that the effects of writing to memory grown in an
// indirectly called function inside a loop are retained when the loop is over.
-(function TestGrowMemoryAndStoreInIndirectCallInsideLoop() {
- print('TestGrowMemoryAndStoreInIndirectCallInsideLoop ...');
+(function TestMemoryGrowAndStoreInIndirectCallInsideLoop() {
+ print('TestMemoryGrowAndStoreInIndirectCallInsideLoop ...');
let builder = new WasmModuleBuilder();
let deltaPages = 1;
builder.addMemory(initialMemoryPages, maximumMemoryPages, true);
let kGrowFunction =
builder.addFunction('grow', kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc()
.index;
builder.addFunction('store', kSig_i_ii)
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
index 8bb1018825..908f966017 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
@@ -24,8 +24,8 @@ function generateBuilder() {
// This test verifies that the effects of growing memory inside a loop
// affect the result of current_memory when the loop is over.
-(function TestGrowMemoryInsideLoop() {
- print('TestGrowMemoryInsideLoop ...');
+(function TestMemoryGrowInsideLoop() {
+ print('TestMemoryGrowInsideLoop ...');
let deltaPages = 1;
let builder = generateBuilder();
builder.addFunction('main', kSig_i_i)
@@ -36,7 +36,7 @@ function generateBuilder() {
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Decrease loop variable.
kExprGetLocal, 0, // -
@@ -71,8 +71,8 @@ function generateBuilder() {
// This test verifies that a loop does not affect the result of current_memory
// when the memory is grown both inside and outside the loop.
-(function TestGrowMemoryInsideAndOutsideLoop() {
- print('TestGrowMemoryInsideAndOutsideLoop ...');
+(function TestMemoryGrowInsideAndOutsideLoop() {
+ print('TestMemoryGrowInsideAndOutsideLoop ...');
let deltaPagesIn = 1;
let deltaPagesOut = 2;
let builder = generateBuilder();
@@ -81,14 +81,14 @@ function generateBuilder() {
// clang-format off
// Grow memory.
kExprI32Const, deltaPagesOut, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
kExprLoop, kWasmStmt, // while
kExprGetLocal, 0, // -
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Decrease loop variable.
kExprGetLocal, 0, // -
@@ -125,8 +125,8 @@ function generateBuilder() {
// This test verifies that the effects of writing to memory grown inside a loop
// are retained when the loop is over.
-(function TestGrowMemoryAndStoreInsideLoop() {
- print('TestGrowMemoryAndStoreInsideLoop ...');
+(function TestMemoryGrowAndStoreInsideLoop() {
+ print('TestMemoryGrowAndStoreInsideLoop ...');
let deltaPages = 1;
let builder = generateBuilder();
builder.addFunction('main', kSig_i_ii)
@@ -137,7 +137,7 @@ function generateBuilder() {
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
kExprGetLocal, 1, // put index (for store)
@@ -183,8 +183,8 @@ function generateBuilder() {
// This test verifies that a loop does not affect the memory when the
// memory is grown both inside and outside the loop.
-(function TestGrowMemoryAndStoreInsideAndOutsideLoop() {
- print('TestGrowMemoryAndStoreInsideAndOutsideLoop ...');
+(function TestMemoryGrowAndStoreInsideAndOutsideLoop() {
+ print('TestMemoryGrowAndStoreInsideAndOutsideLoop ...');
let deltaPagesIn = 1;
let deltaPagesOut = 2;
let builder = generateBuilder();
@@ -193,7 +193,7 @@ function generateBuilder() {
// clang-format off
// Grow memory.
kExprI32Const, deltaPagesOut, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
kExprGetLocal, 1, // put index (for store)
@@ -208,7 +208,7 @@ function generateBuilder() {
kExprIf, kWasmStmt, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
- kExprGrowMemory, kMemoryZero, // grow memory
+ kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
// Increase counter in memory.
kExprGetLocal, 1, // put index (for store)
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index 48e3252d08..3ecdb9aa1e 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -8,10 +8,10 @@ load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function genGrowMemoryBuilder() {
+function genMemoryGrowBuilder() {
var builder = new WasmModuleBuilder();
builder.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
@@ -43,8 +43,8 @@ var kV8MaxPages = 32767;
// TODO(gdeepti): Generate tests programatically for all the sizes instead of
// current implementation.
-function testGrowMemoryReadWrite32() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowReadWrite32() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
@@ -88,10 +88,10 @@ function testGrowMemoryReadWrite32() {
}
}
-testGrowMemoryReadWrite32();
+testMemoryGrowReadWrite32();
-function testGrowMemoryReadWrite16() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowReadWrite16() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
@@ -135,10 +135,10 @@ function testGrowMemoryReadWrite16() {
}
}
-testGrowMemoryReadWrite16();
+testMemoryGrowReadWrite16();
-function testGrowMemoryReadWrite8() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowReadWrite8() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
@@ -182,10 +182,10 @@ function testGrowMemoryReadWrite8() {
}
}
-testGrowMemoryReadWrite8();
+testMemoryGrowReadWrite8();
-function testGrowMemoryZeroInitialSize() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowZeroInitialSize() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
@@ -216,10 +216,10 @@ function testGrowMemoryZeroInitialSize() {
assertEquals(20, peek());
}
-testGrowMemoryZeroInitialSize();
+testMemoryGrowZeroInitialSize();
-function testGrowMemoryZeroInitialSize32() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowZeroInitialSize32() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
@@ -242,10 +242,10 @@ function testGrowMemoryZeroInitialSize32() {
}
}
-testGrowMemoryZeroInitialSize32();
+testMemoryGrowZeroInitialSize32();
-function testGrowMemoryZeroInitialSize16() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowZeroInitialSize16() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
@@ -268,10 +268,10 @@ function testGrowMemoryZeroInitialSize16() {
}
}
-testGrowMemoryZeroInitialSize16();
+testMemoryGrowZeroInitialSize16();
-function testGrowMemoryZeroInitialSize8() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowZeroInitialSize8() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
@@ -294,30 +294,30 @@ function testGrowMemoryZeroInitialSize8() {
}
}
-testGrowMemoryZeroInitialSize8();
+testMemoryGrowZeroInitialSize8();
-function testGrowMemoryTrapMaxPagesZeroInitialMemory() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowTrapMaxPagesZeroInitialMemory() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
assertEquals(-1, growMem(kV8MaxPages + 1));
}
-testGrowMemoryTrapMaxPagesZeroInitialMemory();
+testMemoryGrowTrapMaxPagesZeroInitialMemory();
-function testGrowMemoryTrapMaxPages() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowTrapMaxPages() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, 1, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
assertEquals(-1, growMem(kV8MaxPages));
}
-testGrowMemoryTrapMaxPages();
+testMemoryGrowTrapMaxPages();
-function testGrowMemoryTrapsWithNonSmiInput() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowTrapsWithNonSmiInput() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
@@ -326,10 +326,10 @@ function testGrowMemoryTrapsWithNonSmiInput() {
assertEquals(-1, growMem(-1));
};
-testGrowMemoryTrapsWithNonSmiInput();
+testMemoryGrowTrapsWithNonSmiInput();
-function testGrowMemoryCurrentMemory() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowCurrentMemory() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
builder.addFunction("memory_size", kSig_i_v)
.addBody([kExprMemorySize, kMemoryZero])
@@ -342,10 +342,10 @@ function testGrowMemoryCurrentMemory() {
assertEquals(2, MemSize());
}
-testGrowMemoryCurrentMemory();
+testMemoryGrowCurrentMemory();
-function testGrowMemoryPreservesDataMemOp32() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowPreservesDataMemOp32() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
@@ -365,10 +365,10 @@ function testGrowMemoryPreservesDataMemOp32() {
}
}
-testGrowMemoryPreservesDataMemOp32();
+testMemoryGrowPreservesDataMemOp32();
-function testGrowMemoryPreservesDataMemOp16() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowPreservesDataMemOp16() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
@@ -388,10 +388,10 @@ function testGrowMemoryPreservesDataMemOp16() {
}
}
-testGrowMemoryPreservesDataMemOp16();
+testMemoryGrowPreservesDataMemOp16();
-function testGrowMemoryPreservesDataMemOp8() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowPreservesDataMemOp8() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val = 0;
@@ -415,10 +415,10 @@ function testGrowMemoryPreservesDataMemOp8() {
}
}
-testGrowMemoryPreservesDataMemOp8();
+testMemoryGrowPreservesDataMemOp8();
-function testGrowMemoryOutOfBoundsOffset() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowOutOfBoundsOffset() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
@@ -447,16 +447,16 @@ function testGrowMemoryOutOfBoundsOffset() {
}
}
-testGrowMemoryOutOfBoundsOffset();
+testMemoryGrowOutOfBoundsOffset();
-function testGrowMemoryOutOfBoundsOffset2() {
+function testMemoryGrowOutOfBoundsOffset2() {
var builder = new WasmModuleBuilder();
builder.addMemory(16, 128, false);
builder.addFunction("main", kSig_v_v)
.addBody([
kExprI32Const, 20,
kExprI32Const, 29,
- kExprGrowMemory, kMemoryZero,
+ kExprMemoryGrow, kMemoryZero,
kExprI32StoreMem, 0, 0xFF, 0xFF, 0xFF, 0x3a
])
.exportAs("main");
@@ -464,10 +464,10 @@ function testGrowMemoryOutOfBoundsOffset2() {
assertTraps(kTrapMemOutOfBounds, module.exports.main);
}
-testGrowMemoryOutOfBoundsOffset2();
+testMemoryGrowOutOfBoundsOffset2();
-function testGrowMemoryDeclaredMaxTraps() {
- var builder = genGrowMemoryBuilder();
+function testMemoryGrowDeclaredMaxTraps() {
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, 16, false);
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
@@ -476,13 +476,13 @@ function testGrowMemoryDeclaredMaxTraps() {
assertEquals(-1, growMem(6));
}
-testGrowMemoryDeclaredMaxTraps();
+testMemoryGrowDeclaredMaxTraps();
-function testGrowMemoryDeclaredSpecMaxTraps() {
+function testMemoryGrowDeclaredSpecMaxTraps() {
// The spec maximum is higher than the internal V8 maximum. This test only
// checks that grow_memory does not grow past the internally defined maximum
// to reflect the current implementation.
- var builder = genGrowMemoryBuilder();
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, kSpecMaxPages, false);
var module = builder.instantiate();
function poke(value) { return module.exports.store(offset, value); }
@@ -491,11 +491,11 @@ function testGrowMemoryDeclaredSpecMaxTraps() {
assertEquals(-1, growMem(kV8MaxPages - 20));
}
-testGrowMemoryDeclaredSpecMaxTraps();
+testMemoryGrowDeclaredSpecMaxTraps();
-function testGrowMemory2Gb() {
- print("testGrowMemory2Gb");
- var builder = genGrowMemoryBuilder();
+function testMemoryGrow2Gb() {
+ print("testMemoryGrow2Gb");
+ var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset, val;
@@ -542,4 +542,4 @@ function testGrowMemory2Gb() {
}
}
-testGrowMemory2Gb();
+testMemoryGrow2Gb();
diff --git a/deps/v8/test/mjsunit/wasm/huge-memory.js b/deps/v8/test/mjsunit/wasm/huge-memory.js
index 9719ad4a28..188805bb5f 100644
--- a/deps/v8/test/mjsunit/wasm/huge-memory.js
+++ b/deps/v8/test/mjsunit/wasm/huge-memory.js
@@ -8,7 +8,8 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function testHugeMemory() {
+(function testHugeMemory() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
const num_pages = 49152; // 3GB
@@ -30,10 +31,10 @@ function testHugeMemory() {
assertEquals(0, geti(2500, 1 << 20));
print("Out of bounds");
assertTraps(kTrapMemOutOfBounds, () => geti(3500, 1 << 20));
-}
-testHugeMemory();
+})();
-function testHugeMemoryConstInBounds() {
+(function testHugeMemoryConstInBounds() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
const num_pages = 49152; // 3GB
@@ -51,10 +52,10 @@ function testHugeMemoryConstInBounds() {
print("In bounds");
assertEquals(0, geti());
-}
-testHugeMemoryConstInBounds();
+})();
-function testHugeMemoryConstOutOfBounds() {
+(function testHugeMemoryConstOutOfBounds() {
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
const num_pages = 49152; // 3GB
@@ -72,5 +73,11 @@ function testHugeMemoryConstOutOfBounds() {
print("Out of bounds");
assertTraps(kTrapMemOutOfBounds, geti);
-}
-testHugeMemoryConstOutOfBounds();
+})();
+
+(function testGrowHugeMemory() {
+ print(arguments.callee.name);
+
+ let mem = new WebAssembly.Memory({initial: 1});
+ mem.grow(49151);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
index ca91c698c8..f099af56b4 100644
--- a/deps/v8/test/mjsunit/wasm/import-memory.js
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -112,7 +112,7 @@ var kV8MaxPages = 32767;
}
})();
-(function TestGrowMemoryMaxDesc() {
+(function TestMemoryGrowMaxDesc() {
print("MaximumDescriptor");
let memory = new WebAssembly.Memory({initial: 1, maximum: 5});
assertEquals(kPageSize, memory.buffer.byteLength);
@@ -150,7 +150,7 @@ var kV8MaxPages = 32767;
assertThrows(() => memory.grow(1));
})();
-(function TestGrowMemoryZeroInitialMemory() {
+(function TestMemoryGrowZeroInitialMemory() {
print("ZeroInitialMemory");
let memory = new WebAssembly.Memory({initial: 0});
assertEquals(0, memory.buffer.byteLength);
@@ -188,7 +188,7 @@ var kV8MaxPages = 32767;
assertEquals(2*kPageSize, memory.buffer.byteLength);
let builder = new WasmModuleBuilder();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addImportedMemory("cat", "mine");
let instance = builder.instantiate({cat: {mine: memory}});
@@ -200,8 +200,8 @@ var kV8MaxPages = 32767;
assertThrows(() => memory.grow(1));
})();
-(function TestGrowMemoryExportedMaximum() {
- print("TestGrowMemoryExportedMaximum");
+(function TestMemoryGrowExportedMaximum() {
+ print("TestMemoryGrowExportedMaximum");
let initial_size = 1, maximum_size = 10;
var exp_instance;
{
@@ -218,7 +218,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance = builder.instantiate({fur: {
imported_mem: exp_instance.exports.exported_mem}});
@@ -239,7 +239,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportAs("mem_size");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
var instances = [];
@@ -258,7 +258,7 @@ var kV8MaxPages = 32767;
verify_mem_size(1);
// Verify memory size with interleaving calls to Memory.grow,
- // GrowMemory opcode.
+ // MemoryGrow opcode.
var current_mem_size = 1;
for (var i = 0; i < 5; i++) {
function grow(pages) { return instances[i].exports.grow(pages); }
@@ -280,7 +280,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
@@ -345,7 +345,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 10; i++) {
@@ -380,7 +380,7 @@ var kV8MaxPages = 32767;
builder.addMemory(1, kSpecMaxPages, true);
builder.exportMemoryAs("exported_mem");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance_1 = builder.instantiate();
}
@@ -388,7 +388,7 @@ var kV8MaxPages = 32767;
let builder = new WasmModuleBuilder();
builder.addImportedMemory("doo", "imported_mem");
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance_2 = builder.instantiate({
doo: {imported_mem: instance_1.exports.exported_mem}});
@@ -408,7 +408,7 @@ var kV8MaxPages = 32767;
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
instance = builder.instantiate();
assertEquals(kPageSize, instance.exports.exported_mem.buffer.byteLength);
diff --git a/deps/v8/test/mjsunit/wasm/import-mutable-global.js b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
index 46b9ca5f5a..a1133ee3f7 100644
--- a/deps/v8/test/mjsunit/wasm/import-mutable-global.js
+++ b/deps/v8/test/mjsunit/wasm/import-mutable-global.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-mut-global --expose-gc
+// Flags: --expose-gc
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index bb8bf7807b..881f0b3b2c 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -42,8 +42,8 @@ let kTableSize = 50;
let f17 = addConstFunc(builder, 17);
builder.addExport("f15", f15);
builder.addExport("f17", f17);
- builder.addFunctionTableInit(15, false, [f15], true);
- builder.addFunctionTableInit(1, false, [call.index], true);
+ builder.addElementSegment(15, false, [f15], true);
+ builder.addElementSegment(1, false, [call.index], true);
var mod1 = builder.toModule();
}
@@ -63,10 +63,10 @@ let kTableSize = 50;
])
.exportAs("call");
let f26 = addConstFunc(builder, 26);
- builder.addFunctionTableInit(17, false, [f17], true);
- builder.addFunctionTableInit(21, false, [f21], true);
- builder.addFunctionTableInit(26, false, [f26], true);
- builder.addFunctionTableInit(5, false, [call.index], true);
+ builder.addElementSegment(17, false, [f17], true);
+ builder.addElementSegment(21, false, [f21], true);
+ builder.addElementSegment(26, false, [f26], true);
+ builder.addElementSegment(5, false, [call.index], true);
var mod2 = builder.toModule();
}
@@ -114,8 +114,8 @@ function addConstFuncUsingGlobal(builder, val) {
let f18 = addConstFuncUsingGlobal(builder, 18);
builder.addExport("f14", f14);
builder.addExport("f18", f18);
- builder.addFunctionTableInit(14, false, [f14], true);
- builder.addFunctionTableInit(1, false, [call.index], true);
+ builder.addElementSegment(14, false, [f14], true);
+ builder.addElementSegment(1, false, [call.index], true);
var mod1 = builder.toModule();
}
@@ -135,10 +135,10 @@ function addConstFuncUsingGlobal(builder, val) {
])
.exportAs("call");
let f28 = addConstFuncUsingGlobal(builder, 28);
- builder.addFunctionTableInit(18, false, [f18], true);
- builder.addFunctionTableInit(22, false, [f22], true);
- builder.addFunctionTableInit(28, false, [f28], true);
- builder.addFunctionTableInit(5, false, [call.index], true);
+ builder.addElementSegment(18, false, [f18], true);
+ builder.addElementSegment(22, false, [f22], true);
+ builder.addElementSegment(28, false, [f28], true);
+ builder.addElementSegment(5, false, [call.index], true);
var mod2 = builder.toModule();
}
@@ -194,8 +194,8 @@ function addConstFuncUsingMemory(builder, val) {
let f19 = addConstFuncUsingMemory(builder, 19);
builder.addExport("f13", f13);
builder.addExport("f19", f19);
- builder.addFunctionTableInit(13, false, [f13], true);
- builder.addFunctionTableInit(1, false, [call.index], true);
+ builder.addElementSegment(13, false, [f13], true);
+ builder.addElementSegment(1, false, [call.index], true);
var mod1 = builder.toModule();
}
@@ -217,10 +217,10 @@ function addConstFuncUsingMemory(builder, val) {
])
.exportAs("call");
let f29 = addConstFuncUsingMemory(builder, 29);
- builder.addFunctionTableInit(19, false, [f19], true);
- builder.addFunctionTableInit(23, false, [f23], true);
- builder.addFunctionTableInit(29, false, [f29], true);
- builder.addFunctionTableInit(5, false, [call.index], true);
+ builder.addElementSegment(19, false, [f19], true);
+ builder.addElementSegment(23, false, [f23], true);
+ builder.addElementSegment(29, false, [f29], true);
+ builder.addElementSegment(5, false, [call.index], true);
var mod2 = builder.toModule();
}
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index bcb7acd2ba..5abb346879 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -7,83 +7,102 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-var module = (function () {
- var builder = new WasmModuleBuilder();
+(function Test1() {
+ print("Test1...");
+ var module = (function () {
+ var builder = new WasmModuleBuilder();
- var sig_index = builder.addType(kSig_i_ii);
- builder.addImport("q", "add", sig_index);
- builder.addFunction("add", sig_index)
- .addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0
- ]);
- builder.addFunction("sub", sig_index)
- .addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprI32Sub, // --
- ]);
- builder.addFunction("main", kSig_i_iii)
- .addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
- kExprCallIndirect, sig_index, kTableZero
- ])
- .exportFunc()
- builder.appendToTable([1, 2, 3]);
-
- return builder.instantiate({q: {add: function(a, b) { return a + b | 0; }}});
+ var sig_index = builder.addType(kSig_i_ii);
+ builder.addImport("q", "add", sig_index);
+ var f = builder.addFunction("add", sig_index)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0
+ ]);
+ print("internal add index = " + f.index);
+ builder.addFunction("sub", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Sub, // --
+ ]);
+ builder.addFunction("main", kSig_i_iii)
+ .addBody([
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, kTableZero
+ ])
+ .exportFunc()
+ builder.appendToTable([1, 2, 3]);
+
+ return builder.instantiate({q: {add: function(a, b) {
+ print(" --extadd");
+ return a + b | 0;
+ }}});
+ })();
+
+ // Check the module exists.
+ assertFalse(module === undefined);
+ assertFalse(module === null);
+ assertFalse(module === 0);
+
+ assertEquals("object", typeof module.exports);
+ assertEquals("function", typeof module.exports.main);
+
+ print(" --x1--");
+ assertEquals(19, module.exports.main(0, 12, 7));
+ print(" --y1--");
+ assertEquals(5, module.exports.main(1, 12, 7));
+ print(" --z1--");
+ assertTraps(kTrapFuncSigMismatch, () => module.exports.main(2, 12, 33));
+ print(" --w1--");
+ assertTraps(kTrapFuncInvalid, () => module.exports.main(3, 12, 33));
})();
-// Check the module exists.
-assertFalse(module === undefined);
-assertFalse(module === null);
-assertFalse(module === 0);
-assertEquals("object", typeof module.exports);
-assertEquals("function", typeof module.exports.main);
-
-assertEquals(5, module.exports.main(1, 12, 7));
-assertEquals(19, module.exports.main(0, 12, 7));
-
-assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
-assertTraps(kTrapFuncInvalid, "module.exports.main(3, 12, 33)");
-
-
-module = (function () {
- var builder = new WasmModuleBuilder();
+(function Test2() {
+ print("Test2...");
+ var module = (function () {
+ var builder = new WasmModuleBuilder();
- var sig_i_ii = builder.addType(kSig_i_ii);
- var sig_i_i = builder.addType(kSig_i_i);
- var mul = builder.addImport("q", "mul", sig_i_ii);
- var add = builder.addFunction("add", sig_i_ii)
- .addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprI32Add // --
- ]);
- var popcnt = builder.addFunction("popcnt", sig_i_i)
- .addBody([
- kExprGetLocal, 0, // --
- kExprI32Popcnt // --
- ]);
- var main = builder.addFunction("main", kSig_i_iii)
- .addBody([
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprGetLocal, 0,
- kExprCallIndirect, sig_i_ii, kTableZero
- ])
- .exportFunc();
- builder.appendToTable([mul, add.index, popcnt.index, main.index]);
-
- return builder.instantiate({q: {mul: function(a, b) { return a * b | 0; }}});
+ var sig_i_ii = builder.addType(kSig_i_ii);
+ var sig_i_i = builder.addType(kSig_i_i);
+ var mul = builder.addImport("q", "mul", sig_i_ii);
+ var add = builder.addFunction("add", sig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Add // --
+ ]);
+ var popcnt = builder.addFunction("popcnt", sig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprI32Popcnt // --
+ ]);
+ var main = builder.addFunction("main", kSig_i_iii)
+ .addBody([
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_ii, kTableZero
+ ])
+ .exportFunc();
+ builder.appendToTable([mul, add.index, popcnt.index, main.index]);
+
+ return builder.instantiate({q: {mul: function(a, b) { return a * b | 0; }}});
+ })();
+
+ print(" --x2--");
+ assertEquals(-6, module.exports.main(0, -2, 3));
+ print(" --y2--");
+ assertEquals(99, module.exports.main(1, 22, 77));
+ print(" --z2--");
+ assertTraps(kTrapFuncSigMismatch, () => module.exports.main(2, 12, 33));
+ print(" --q2--");
+ assertTraps(kTrapFuncSigMismatch, () => module.exports.main(3, 12, 33));
+ print(" --t2--");
+ assertTraps(kTrapFuncInvalid, () => module.exports.main(4, 12, 33));
})();
-assertEquals(-6, module.exports.main(0, -2, 3));
-assertEquals(99, module.exports.main(1, 22, 77));
-assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
-assertTraps(kTrapFuncSigMismatch, "module.exports.main(3, 12, 33)");
-assertTraps(kTrapFuncInvalid, "module.exports.main(4, 12, 33)");
function AddFunctions(builder) {
var mul = builder.addFunction("mul", kSig_i_ii)
@@ -108,31 +127,34 @@ function AddFunctions(builder) {
}
-module = (function () {
- var builder = new WasmModuleBuilder();
+(function Test3() {
+ print("Test3...");
+ var module = (function () {
+ var builder = new WasmModuleBuilder();
- var f = AddFunctions(builder);
- builder.addFunction("main", kSig_i_ii)
- .addBody([
- kExprI32Const, 33, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprCallIndirect, 0, kTableZero]) // --
- .exportAs("main");
+ var f = AddFunctions(builder);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
- builder.appendToTable([f.mul.index, f.add.index, f.sub.index]);
+ builder.appendToTable([f.mul.index, f.add.index, f.sub.index]);
- return builder.instantiate();
+ return builder.instantiate();
+ })();
+
+ assertEquals(33, module.exports.main(1, 0));
+ assertEquals(66, module.exports.main(2, 0));
+ assertEquals(34, module.exports.main(1, 1));
+ assertEquals(35, module.exports.main(2, 1));
+ assertEquals(32, module.exports.main(1, 2));
+ assertEquals(31, module.exports.main(2, 2));
+ assertTraps(kTrapFuncInvalid, () => module.exports.main(12, 3));
})();
-assertEquals(33, module.exports.main(1, 0));
-assertEquals(66, module.exports.main(2, 0));
-assertEquals(34, module.exports.main(1, 1));
-assertEquals(35, module.exports.main(2, 1));
-assertEquals(32, module.exports.main(1, 2));
-assertEquals(31, module.exports.main(2, 2));
-assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
-
(function ConstBaseTest() {
print("ConstBaseTest...");
function instanceWithTable(base, length) {
@@ -147,8 +169,8 @@ assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
- builder.setFunctionTableBounds(length, length);
- builder.addFunctionTableInit(base, false, [f.add.index, f.sub.index, f.mul.index]);
+ builder.setTableBounds(length, length);
+ builder.addElementSegment(base, false, [f.add.index, f.sub.index, f.mul.index]);
return builder.instantiate();
}
@@ -166,7 +188,7 @@ assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
assertEquals(31, main(2, i + 1));
assertEquals(33, main(1, i + 2));
assertEquals(66, main(2, i + 2));
- assertTraps(kTrapFuncInvalid, "main(12, 10)");
+ assertTraps(kTrapFuncInvalid, () => main(12, 10));
}
})();
@@ -184,9 +206,9 @@ assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
- builder.setFunctionTableBounds(10, 10);
+ builder.setTableBounds(10, 10);
var g = builder.addImportedGlobal("fff", "base", kWasmI32);
- builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index, f.sub.index]);
+ builder.addElementSegment(g, true, [f.mul.index, f.add.index, f.sub.index]);
var module = new WebAssembly.Module(builder.toBuffer());
@@ -203,6 +225,6 @@ assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
assertEquals(35, main(2, i + 1));
assertEquals(32, main(1, i + 2));
assertEquals(31, main(2, i + 2));
- assertTraps(kTrapFuncInvalid, "main(12, 10)");
+ assertTraps(kTrapFuncInvalid, () => main(12, 10));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 247d67a41d..642bd953bf 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -49,11 +49,10 @@ function js_div(a, b) { return (a / b) | 0; }
f.add.exportAs("blarg");
- builder.setFunctionTableBounds(10, 10);
+ builder.setTableBounds(10, 10);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
- builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
- f.sub.index,
- d]);
+ builder.addElementSegment(
+ g, true, [f.mul.index, f.add.index, f.sub.index, d]);
builder.addExportOfKind("table", kExternalTable, 0);
let module = new WebAssembly.Module(builder.toBuffer());
@@ -109,11 +108,10 @@ function js_div(a, b) { return (a / b) | 0; }
let d = builder.addImport("q", "js_div", kSig_i_ii);
let f = AddFunctions(builder);
- builder.setFunctionTableBounds(kTableSize, kTableSize);
+ builder.setTableBounds(kTableSize, kTableSize);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
- builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
- f.sub.index,
- d]);
+ builder.addElementSegment(
+ g, true, [f.mul.index, f.add.index, f.sub.index, d]);
builder.addExportOfKind("table", kExternalTable, 0);
let m1 = new WebAssembly.Module(builder.toBuffer());
@@ -182,9 +180,8 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addImportedTable("q", "table", kTableSize, kTableSize);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
let f = AddFunctions(builder);
- builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
- f.sub.index,
- d]);
+ builder.addElementSegment(
+ g, true, [f.mul.index, f.add.index, f.sub.index, d]);
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 55, // --
@@ -260,7 +257,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprGetLocal, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
- builder.addFunctionTableInit(g, true, [f.index]);
+ builder.addElementSegment(g, true, [f.index]);
let module = new WebAssembly.Module(builder.toBuffer());
@@ -300,8 +297,8 @@ function js_div(a, b) { return (a / b) | 0; }
kExprCallIndirect, sig_index1, kTableZero]) // --
.exportAs("main");
- builder.setFunctionTableBounds(kTableSize, kTableSize);
- builder.addFunctionTableInit(0, false, [f1.index]);
+ builder.setTableBounds(kTableSize, kTableSize);
+ builder.addElementSegment(0, false, [f1.index]);
builder.addExportOfKind("table", kExternalTable, 0);
var m1 = new WebAssembly.Module(builder.toBuffer());
@@ -320,7 +317,7 @@ function js_div(a, b) { return (a / b) | 0; }
.exportAs("main");
builder.addImportedTable("z", "table", kTableSize, kTableSize);
- builder.addFunctionTableInit(1, false, [f2.index], true);
+ builder.addElementSegment(1, false, [f2.index], true);
var m2 = new WebAssembly.Module(builder.toBuffer());
@@ -349,7 +346,7 @@ function js_div(a, b) { return (a / b) | 0; }
for (var impsize = 1; impsize < 4; impsize++) {
print(" expsize = " + expsize + ", impsize = " + impsize);
var builder = new WasmModuleBuilder();
- builder.setFunctionTableBounds(expsize, expsize);
+ builder.setTableBounds(expsize, expsize);
builder.addExportOfKind("expfoo", kExternalTable, 0);
let m1 = new WebAssembly.Module(builder.toBuffer());
@@ -415,7 +412,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprGetLocal, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
- builder.addFunctionTableInit(g, true, [g]);
+ builder.addElementSegment(g, true, [g]);
let module = new WebAssembly.Module(builder.toBuffer());
var instances = [];
@@ -538,7 +535,7 @@ function js_div(a, b) { return (a / b) | 0; }
for (let num_segments = 1; num_segments < 4; ++num_segments) {
var builder = new WasmModuleBuilder();
- builder.setFunctionTableBounds(kTableSize, kTableSize);
+ builder.setTableBounds(kTableSize, kTableSize);
builder.addExportOfKind("table", kExternalTable, 0);
let f = AddFunctions(builder);
let indexes = [f.mul.index, f.add.index, f.sub.index];
@@ -546,7 +543,7 @@ function js_div(a, b) { return (a / b) | 0; }
let offset = i + 1;
let len = i + 2;
let index = indexes[i];
- builder.addFunctionTableInit(offset, false, new Array(len).fill(index));
+ builder.addElementSegment(offset, false, new Array(len).fill(index));
}
let instance = builder.instantiate();
@@ -580,7 +577,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprCallIndirect, sig_index, kTableZero
])
.exportAs('main');
- builder0.setFunctionTableBounds(3, 3);
+ builder0.setTableBounds(3, 3);
builder0.addExportOfKind('table', kExternalTable);
let module0 = new WebAssembly.Module(builder0.toBuffer());
let instance0 = new WebAssembly.Instance(module0);
@@ -590,7 +587,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder1.setName('module_1');
builder1.addFunction('f', kSig_i_i).addBody([kExprGetLocal, 0]);
builder1.addImportedTable('z', 'table');
- builder1.addFunctionTableInit(0, false, [0], true);
+ builder1.addElementSegment(0, false, [0], true);
let module1 = new WebAssembly.Module(builder1.toBuffer());
let instance1 =
new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
@@ -619,7 +616,7 @@ function js_div(a, b) { return (a / b) | 0; }
.exportAs('main');
builder.addImportedMemory('', 'memory', 1);
- builder.setFunctionTableBounds(1, 1);
+ builder.setTableBounds(1, 1);
builder.addExportOfKind('table', kExternalTable);
let module1 = new WebAssembly.Module(builder.toBuffer());
@@ -628,7 +625,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_v).addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0]);
builder.addImportedTable('', 'table');
- builder.addFunctionTableInit(0, false, [0], true);
+ builder.addElementSegment(0, false, [0], true);
builder.addImportedMemory('', 'memory', 1);
@@ -734,9 +731,9 @@ function js_div(a, b) { return (a / b) | 0; }
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
- builder.setFunctionTableBounds(10, 10);
+ builder.setTableBounds(10, 10);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
- builder.addFunctionTableInit(g, true, [j, w]);
+ builder.addElementSegment(g, true, [j, w]);
let module = new WebAssembly.Module(builder.toBuffer());
for (var i = 0; i < 5; i++) {
@@ -785,7 +782,7 @@ function js_div(a, b) { return (a / b) | 0; }
.exportAs("main");
let g = builder.addImportedGlobal("q", "base", kWasmI32);
- builder.addFunctionTableInit(g, true, [j, w]);
+ builder.addElementSegment(g, true, [j, w]);
let module = new WebAssembly.Module(builder.toBuffer());
for (var i = 0; i < 5; i++) {
@@ -824,7 +821,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprCallIndirect, 0, kTableZero
])
.exportFunc();
- builder.addFunctionTableInit(0, false, [0, 1, 1, 0]);
+ builder.addElementSegment(0, false, [0, 1, 1, 0]);
return builder.instantiate({q: {f2: i1.exports.f2, f1: i1.exports.f1}});
})();
@@ -886,7 +883,7 @@ function js_div(a, b) { return (a / b) | 0; }
])
.exportFunc();
builder.exportMemoryAs("memory");
- builder.addFunctionTableInit(0, false, [0, 1, 2, 3]);
+ builder.addElementSegment(0, false, [0, 1, 2, 3]);
var instance = builder.instantiate({q: {f1: f100, f2: f200, f3: f300}});
setMemI32(instance, 0, 5000000);
setMemI32(instance, 4, 6000000);
diff --git a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
index 64909eb9df..dc1ca24a85 100644
--- a/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/instance-memory-gc-stress.js
@@ -18,7 +18,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index 6225220c8d..4a84e13414 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -26,11 +26,11 @@ function checkStack(stack, expected_lines) {
}
}
-(function testGrowMemoryBetweenInterpretedAndCompiled() {
+(function testMemoryGrowBetweenInterpretedAndCompiled() {
// grow_memory can be called from interpreted or compiled code, and changes
// should be reflected in either execution.
var builder = new WasmModuleBuilder();
- var grow_body = [kExprGetLocal, 0, kExprGrowMemory, kMemoryZero];
+ var grow_body = [kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero];
var load_body = [kExprGetLocal, 0, kExprI32LoadMem, 0, 0];
var store_body = [kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0];
builder.addFunction('grow_memory', kSig_i_i).addBody(grow_body).exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index b887b40918..d9a1751408 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -530,7 +530,7 @@ function checkStack(stack, expected_lines) {
kExprCallIndirect, sig_index, kTableZero
]) // --
.exportAs('main');
- builder0.setFunctionTableBounds(3, 3);
+ builder0.setTableBounds(3, 3);
builder0.addExportOfKind('table', kExternalTable);
const module0 = new WebAssembly.Module(builder0.toBuffer());
const instance0 = new WebAssembly.Instance(module0);
@@ -538,10 +538,33 @@ function checkStack(stack, expected_lines) {
const builder1 = new WasmModuleBuilder();
builder1.addFunction('main', kSig_i_v).addBody([kExprUnreachable]);
builder1.addImportedTable('z', 'table');
- builder1.addFunctionTableInit(0, false, [0], true);
+ builder1.addElementSegment(0, false, [0], true);
const module1 = new WebAssembly.Module(builder1.toBuffer());
const instance1 =
new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
assertThrows(
() => instance0.exports.main(0), WebAssembly.RuntimeError, 'unreachable');
})();
+
+(function testSerializeInterpreted() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32Const, 7, kExprI32Add])
+ .exportFunc();
+
+ const wire_bytes = builder.toBuffer();
+ var module = new WebAssembly.Module(wire_bytes);
+ const i1 = new WebAssembly.Instance(module);
+
+ assertEquals(11, i1.exports.main(4));
+
+ const buff = %SerializeWasmModule(module);
+ module = null;
+ gc();
+
+ module = %DeserializeWasmModule(buff, wire_bytes);
+ const i2 = new WebAssembly.Instance(module);
+
+ assertEquals(11, i2.exports.main(4));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index f27fa7a361..5054fd73be 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -194,7 +194,7 @@ assertEq(Object.getPrototypeOf(emptyModule), moduleProto);
let moduleImportsDesc = Object.getOwnPropertyDescriptor(Module, 'imports');
assertEq(typeof moduleImportsDesc.value, 'function');
assertTrue(moduleImportsDesc.writable);
-assertFalse(moduleImportsDesc.enumerable);
+assertTrue(moduleImportsDesc.enumerable);
assertTrue(moduleImportsDesc.configurable);
// 'WebAssembly.Module.imports' method
@@ -241,7 +241,7 @@ assertEq(arr[3].name, 'x');
let moduleExportsDesc = Object.getOwnPropertyDescriptor(Module, 'exports');
assertEq(typeof moduleExportsDesc.value, 'function');
assertTrue(moduleExportsDesc.writable);
-assertFalse(moduleExportsDesc.enumerable);
+assertTrue(moduleExportsDesc.enumerable);
assertTrue(moduleExportsDesc.configurable);
// 'WebAssembly.Module.exports' method
@@ -265,7 +265,7 @@ let exportingModuleBinary2 = (() => {
builder.addFunction('foo', kSig_v_v).addBody([]).exportAs('a');
builder.addMemory(1, 1, false);
builder.exportMemoryAs('b');
- builder.setFunctionTableBounds(1, 1);
+ builder.setTableBounds(1, 1);
builder.addExportOfKind('c', kExternalTable, 0);
var o = builder.addGlobal(kWasmI32, false).exportAs('x');
return new Int8Array(builder.toBuffer());
@@ -286,9 +286,9 @@ assertEq(arr[3].name, 'x');
let moduleCustomSectionsDesc =
Object.getOwnPropertyDescriptor(Module, 'customSections');
assertEq(typeof moduleCustomSectionsDesc.value, 'function');
-assertEq(moduleCustomSectionsDesc.writable, true);
-assertEq(moduleCustomSectionsDesc.enumerable, false);
-assertEq(moduleCustomSectionsDesc.configurable, true);
+assertTrue(moduleCustomSectionsDesc.writable);
+assertTrue(moduleCustomSectionsDesc.enumerable);
+assertTrue(moduleCustomSectionsDesc.configurable);
let moduleCustomSections = moduleCustomSectionsDesc.value;
assertEq(moduleCustomSections.length, 2);
@@ -397,7 +397,7 @@ let instanceExportsDesc =
Object.getOwnPropertyDescriptor(instanceProto, 'exports');
assertEq(typeof instanceExportsDesc.get, 'function');
assertEq(instanceExportsDesc.set, undefined);
-assertFalse(instanceExportsDesc.enumerable);
+assertTrue(instanceExportsDesc.enumerable);
assertTrue(instanceExportsDesc.configurable);
exportsObj = exportingInstance.exports;
@@ -435,9 +435,9 @@ assertErrorMessage(
() => new Memory({initial: {valueOf() { throw new Error('here') }}}), Error,
'here');
assertErrorMessage(
- () => new Memory({initial: -1}), RangeError, /bad Memory initial size/);
+ () => new Memory({initial: -1}), TypeError, /bad Memory initial size/);
assertErrorMessage(
- () => new Memory({initial: Math.pow(2, 32)}), RangeError,
+ () => new Memory({initial: Math.pow(2, 32)}), TypeError,
/bad Memory initial size/);
assertErrorMessage(
() => new Memory({initial: 1, maximum: Math.pow(2, 32) / Math.pow(2, 14)}),
@@ -446,7 +446,7 @@ assertErrorMessage(
() => new Memory({initial: 2, maximum: 1}), RangeError,
/bad Memory maximum size/);
assertErrorMessage(
- () => new Memory({maximum: -1}), RangeError, /bad Memory maximum size/);
+ () => new Memory({maximum: -1}), TypeError, /bad Memory maximum size/);
assertTrue(new Memory({initial: 1}) instanceof Memory);
assertEq(new Memory({initial: 1.5}).buffer.byteLength, kPageSize);
@@ -473,7 +473,7 @@ assertEq(Object.getPrototypeOf(mem1), memoryProto);
let bufferDesc = Object.getOwnPropertyDescriptor(memoryProto, 'buffer');
assertEq(typeof bufferDesc.get, 'function');
assertEq(bufferDesc.set, undefined);
-assertFalse(bufferDesc.enumerable);
+assertTrue(bufferDesc.enumerable);
assertTrue(bufferDesc.configurable);
// 'WebAssembly.Memory.prototype.buffer' getter
@@ -488,7 +488,7 @@ assertEq(bufferGetter.call(mem1).byteLength, kPageSize);
// 'WebAssembly.Memory.prototype.grow' data property
let memGrowDesc = Object.getOwnPropertyDescriptor(memoryProto, 'grow');
assertEq(typeof memGrowDesc.value, 'function');
-assertFalse(memGrowDesc.enumerable);
+assertTrue(memGrowDesc.enumerable);
assertTrue(memGrowDesc.configurable);
// 'WebAssembly.Memory.prototype.grow' method
@@ -500,10 +500,10 @@ assertErrorMessage(
assertErrorMessage(
() => memGrow.call({}), TypeError, /called on incompatible Object/);
assertErrorMessage(
- () => memGrow.call(mem1, -1), RangeError, /bad Memory grow delta/);
+ () => memGrow.call(mem1, -1), TypeError, /must be non-negative/);
assertErrorMessage(
- () => memGrow.call(mem1, Math.pow(2, 32)), RangeError,
- /bad Memory grow delta/);
+ () => memGrow.call(mem1, Math.pow(2, 32)), TypeError,
+ /must be in the unsigned long range/);
var mem = new Memory({initial: 1, maximum: 2});
var buf = mem.buffer;
assertEq(buf.byteLength, kPageSize);
@@ -517,7 +517,7 @@ assertTrue(buf !== mem.buffer);
assertEq(buf.byteLength, 0);
buf = mem.buffer;
assertEq(buf.byteLength, 2 * kPageSize);
-assertEq(mem.grow(), 2);
+assertEq(mem.grow(0), 2);
assertTrue(buf !== mem.buffer);
assertEq(buf.byteLength, 0);
buf = mem.buffer;
@@ -583,17 +583,17 @@ assertErrorMessage(
{initial: {valueOf() { throw new Error('here') }}, element: 'anyfunc'}),
Error, 'here');
assertErrorMessage(
- () => new Table({initial: -1, element: 'anyfunc'}), RangeError,
+ () => new Table({initial: -1, element: 'anyfunc'}), TypeError,
/bad Table initial size/);
assertErrorMessage(
- () => new Table({initial: Math.pow(2, 32), element: 'anyfunc'}), RangeError,
+ () => new Table({initial: Math.pow(2, 32), element: 'anyfunc'}), TypeError,
/bad Table initial size/);
assertErrorMessage(
() => new Table({initial: 2, maximum: 1, element: 'anyfunc'}), RangeError,
/bad Table maximum size/);
assertErrorMessage(
() => new Table({initial: 2, maximum: Math.pow(2, 32), element: 'anyfunc'}),
- RangeError, /bad Table maximum size/);
+ TypeError, /bad Table maximum size/);
assertTrue(new Table({initial: 1, element: 'anyfunc'}) instanceof Table);
assertTrue(new Table({initial: 1.5, element: 'anyfunc'}) instanceof Table);
assertTrue(
@@ -625,7 +625,7 @@ assertEq(Object.getPrototypeOf(tbl1), tableProto);
let lengthDesc = Object.getOwnPropertyDescriptor(tableProto, 'length');
assertEq(typeof lengthDesc.get, 'function');
assertEq(lengthDesc.set, undefined);
-assertFalse(lengthDesc.enumerable);
+assertTrue(lengthDesc.enumerable);
assertTrue(lengthDesc.configurable);
// 'WebAssembly.Table.prototype.length' getter
@@ -641,7 +641,7 @@ assertEq(lengthGetter.call(tbl1), 2);
// 'WebAssembly.Table.prototype.get' data property
let getDesc = Object.getOwnPropertyDescriptor(tableProto, 'get');
assertEq(typeof getDesc.value, 'function');
-assertFalse(getDesc.enumerable);
+assertTrue(getDesc.enumerable);
assertTrue(getDesc.configurable);
// 'WebAssembly.Table.prototype.get' method
@@ -651,7 +651,8 @@ assertErrorMessage(
() => get.call(), TypeError, /called on incompatible undefined/);
assertErrorMessage(
() => get.call({}), TypeError, /called on incompatible Object/);
-assertEq(get.call(tbl1), null);
+assertErrorMessage(
+ () => get.call(tbl1), TypeError, /must be convertible to a valid number/);
assertEq(get.call(tbl1, 0), null);
assertEq(get.call(tbl1, 0, Infinity), null);
assertEq(get.call(tbl1, 1), null);
@@ -659,16 +660,16 @@ assertEq(get.call(tbl1, 1.5), null);
assertErrorMessage(() => get.call(tbl1, 2), RangeError, /bad Table get index/);
assertErrorMessage(
() => get.call(tbl1, 2.5), RangeError, /bad Table get index/);
-assertErrorMessage(() => get.call(tbl1, -1), RangeError, /bad Table get index/);
+assertErrorMessage(() => get.call(tbl1, -1), TypeError, /bad Table get index/);
assertErrorMessage(
- () => get.call(tbl1, Math.pow(2, 33)), RangeError, /bad Table get index/);
+ () => get.call(tbl1, Math.pow(2, 33)), TypeError, /bad Table get index/);
assertErrorMessage(
() => get.call(tbl1, {valueOf() { throw new Error('hi') }}), Error, 'hi');
// 'WebAssembly.Table.prototype.set' data property
let setDesc = Object.getOwnPropertyDescriptor(tableProto, 'set');
assertEq(typeof setDesc.value, 'function');
-assertFalse(setDesc.enumerable);
+assertTrue(setDesc.enumerable);
assertTrue(setDesc.configurable);
// 'WebAssembly.Table.prototype.set' method
@@ -686,14 +687,14 @@ assertErrorMessage(
assertErrorMessage(
() => set.call(tbl1, 2, null), RangeError, /bad Table set index/);
assertErrorMessage(
- () => set.call(tbl1, -1, null), RangeError, /bad Table set index/);
+ () => set.call(tbl1, -1, null), TypeError, /bad Table set index/);
assertErrorMessage(
- () => set.call(tbl1, Math.pow(2, 33), null), RangeError,
+ () => set.call(tbl1, Math.pow(2, 33), null), TypeError,
/bad Table set index/);
assertErrorMessage(
- () => set.call(tbl1, Infinity, null), RangeError, /bad Table set index/);
+ () => set.call(tbl1, Infinity, null), TypeError, /bad Table set index/);
assertErrorMessage(
- () => set.call(tbl1, -Infinity, null), RangeError, /bad Table set index/);
+ () => set.call(tbl1, -Infinity, null), TypeError, /bad Table set index/);
assertErrorMessage(
() => set.call(tbl1, 0, undefined), TypeError,
/can only assign WebAssembly exported functions to Table/);
@@ -713,12 +714,14 @@ assertErrorMessage(
'hai');
assertEq(set.call(tbl1, 0, null), undefined);
assertEq(set.call(tbl1, 1, null), undefined);
-assertEq(set.call(tbl1, undefined, null), undefined);
+assertErrorMessage(
+ () => set.call(tbl1, undefined, null), TypeError,
+ /must be convertible to a valid number/);
// 'WebAssembly.Table.prototype.grow' data property
let tblGrowDesc = Object.getOwnPropertyDescriptor(tableProto, 'grow');
assertEq(typeof tblGrowDesc.value, 'function');
-assertFalse(tblGrowDesc.enumerable);
+assertTrue(tblGrowDesc.enumerable);
assertTrue(tblGrowDesc.configurable);
// 'WebAssembly.Table.prototype.grow' method
@@ -729,27 +732,26 @@ assertErrorMessage(
assertErrorMessage(
() => tblGrow.call({}), TypeError, /called on incompatible Object/);
assertErrorMessage(
- () => tblGrow.call(tbl1, -1), RangeError, /bad Table grow delta/);
+ () => tblGrow.call(tbl1, -1), TypeError, /bad Table grow delta/);
assertErrorMessage(
- () => tblGrow.call(tbl1, Math.pow(2, 32)), RangeError,
+ () => tblGrow.call(tbl1, Math.pow(2, 32)), TypeError,
/bad Table grow delta/);
var tbl = new Table({element: 'anyfunc', initial: 1, maximum: 2});
assertEq(tbl.length, 1);
assertErrorMessage(
- () => tbl.grow(Infinity), RangeError, /failed to grow table/);
+ () => tbl.grow(Infinity), TypeError, /failed to grow table/);
assertErrorMessage(
- () => tbl.grow(-Infinity), RangeError, /failed to grow table/);
+ () => tbl.grow(-Infinity), TypeError, /failed to grow table/);
assertEq(tbl.grow(0), 1);
assertEq(tbl.length, 1);
assertEq(tbl.grow(1, 4), 1);
assertEq(tbl.length, 2);
-assertEq(tbl.grow(), 2);
assertEq(tbl.length, 2);
assertErrorMessage(() => tbl.grow(1), Error, /failed to grow table/);
assertErrorMessage(
- () => tbl.grow(Infinity), RangeError, /failed to grow table/);
+ () => tbl.grow(Infinity), TypeError, /failed to grow table/);
assertErrorMessage(
- () => tbl.grow(-Infinity), RangeError, /failed to grow table/);
+ () => tbl.grow(-Infinity), TypeError, /failed to grow table/);
// 'WebAssembly.validate' function
assertErrorMessage(() => WebAssembly.validate(), TypeError);
@@ -763,7 +765,7 @@ assertFalse(WebAssembly.validate(moduleBinaryWithMemSectionAndMemImport));
let compileDesc = Object.getOwnPropertyDescriptor(WebAssembly, 'compile');
assertEq(typeof compileDesc.value, 'function');
assertTrue(compileDesc.writable);
-assertFalse(compileDesc.enumerable);
+assertTrue(compileDesc.enumerable);
assertTrue(compileDesc.configurable);
// 'WebAssembly.compile' function
@@ -809,7 +811,7 @@ let instantiateDesc =
Object.getOwnPropertyDescriptor(WebAssembly, 'instantiate');
assertEq(typeof instantiateDesc.value, 'function');
assertTrue(instantiateDesc.writable);
-assertFalse(instantiateDesc.enumerable);
+assertTrue(instantiateDesc.enumerable);
assertTrue(instantiateDesc.configurable);
// 'WebAssembly.instantiate' function
@@ -824,7 +826,7 @@ function assertInstantiateError(args, err, msg) {
// TODO assertTrue(Boolean(error.message.match(msg)));
});
}
-var scratch_memory = new WebAssembly.Memory(new ArrayBuffer(10));
+var scratch_memory = new WebAssembly.Memory({ initial: 0 });
assertInstantiateError([], TypeError, /requires more than 0 arguments/);
assertInstantiateError(
[undefined], TypeError, /first argument must be a BufferSource/);
@@ -925,3 +927,10 @@ assertInstantiateSuccess(
var instance = new WebAssembly.Instance(module);
assertTrue(instance instanceof Instance);
})();
+
+(function TestPassBigIntInGlobalWhenNotEnabled() {
+ assertThrows(() => new WebAssembly.Global({ value: "i64" }, 1), TypeError,
+ /Can't set the value/);
+ assertThrows(() => new WebAssembly.Global({ value: "i64" }, 1n), TypeError,
+ /Can't set the value/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/jsapi-harness.js b/deps/v8/test/mjsunit/wasm/jsapi-harness.js
deleted file mode 100644
index d827b67570..0000000000
--- a/deps/v8/test/mjsunit/wasm/jsapi-harness.js
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// TODO(eholk): Once we have stable test IDs, use those as the key instead.
-// See https://github.com/WebAssembly/spec/issues/415
-//
-// Flags: --expose-wasm --allow-natives-syntax
-
-const known_failures = {
- // Enter failing tests like follows:
- // "'WebAssembly.Instance.prototype.exports' accessor property":
- // 'https://bugs.chromium.org/p/v8/issues/detail?id=5507',
-};
-
-let failures = [];
-let unexpected_successes = [];
-
-let last_promise = new Promise((resolve, reject) => { resolve(); });
-
-function test(func, description) {
- let maybeErr;
- try { func(); }
- catch(e) { maybeErr = e; }
- if (typeof maybeErr !== 'undefined') {
- var known = "";
- if (known_failures[description]) {
- known = " (known)";
- }
- print(`${description}: FAIL${known}. ${maybeErr}`);
- failures.push(description);
- } else {
- if (known_failures[description]) {
- unexpected_successes.push(description);
- }
- print(`${description}: PASS.`);
- }
-}
-
-function promise_test(func, description) {
- last_promise = last_promise.then(func)
- .then(_ => {
- if (known_failures[description]) {
- unexpected_successes.push(description);
- }
- print(`${description}: PASS.`);
- })
- .catch(err => {
- var known = "";
- if (known_failures[description]) {
- known = " (known)";
- }
- print(`${description}: FAIL${known}. ${err}`);
- failures.push(description);
- });
-}
-
-let assert_true = assertEquals.bind(null, true);
-let assert_false = assertEquals.bind(null, false);
-
-function same_value(x, y) {
- if (y !== y) {
- // NaN case
- return x!==x;
- }
- if (x === 0 && y === 0) {
- // Distinguish +0 and -0
- return 1/x === 1/y;
- }
- return x === y;
-}
-
-let assert_equals = function(expected, found, description) {
- if (typeof found != typeof expected) {
- assert_true(false, "assert_equals", description,
- "expected (" + typeof expected + ") ${expected} but got (" +
- typeof found + ") ${found}", {expected:expected, found:found});
- }
- assert_true(same_value(found, expected), "assert_equals", description,
- "expected ${expected} but got ${found}",
- {expected:expected, found:found});
-}
-
-let assert_not_equals = function(expected, found, description) {
- assert_true(!same_value(found, expected), "assert_not_equals", description,
- "got disallowed value ${found}", {found:found});
-}
-
-function assert_unreached(description) {
- throw new Error(`unreachable:\n${description}`);
-}
-
-function assertErrorMessage(f, ctor, test) {
- try { f(); }
- catch (e) {
- assert_true(e instanceof ctor, "expected exception " + ctor.name + ", got " + e);
- return;
- }
- assert_true(false, "expected exception " + ctor.name + ", no exception thrown");
-};
-
-load("test/wasm-js/test/harness/wasm-constants.js");
-load("test/wasm-js/test/harness/wasm-module-builder.js");
-load("test/wasm-js/test/js-api/jsapi.js");
-
-assertPromiseResult(last_promise, _ => {
- if (failures.length > 0) {
- let unexpected = false;
- print("Some tests FAILED:");
- for (let i in failures) {
- if (known_failures[failures[i]]) {
- print(` ${failures[i]} [KNOWN: ${known_failures[failures[i]]}]`);
- } else {
- print(` ${failures[i]}`);
- unexpected = true;
- }
- }
- if (unexpected_successes.length > 0) {
- unexpected = true;
- print("");
- print("Unexpected successes:");
- for(let i in unexpected_successes) {
- print(` ${unexpected_successes[i]}`);
- }
- print("Some tests SUCCEEDED but were known failures. If you've fixed " +
- "the bug, please remove the test from the known failures list.")
- }
- if (unexpected) {
- print("\n");
- print(" #############################################################");
- print(" # #");
- print(" # Unexpected outcome. Did you forget to run 'gclient sync'? #");
- print(" # #");
- print(" #############################################################");
- print("\n");
- assertUnreachable("Unexpected outcome");
- }
- }
-});
diff --git a/deps/v8/test/mjsunit/wasm/large-offset.js b/deps/v8/test/mjsunit/wasm/large-offset.js
index 7c458974d4..653194c159 100644
--- a/deps/v8/test/mjsunit/wasm/large-offset.js
+++ b/deps/v8/test/mjsunit/wasm/large-offset.js
@@ -5,15 +5,15 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-function testGrowMemoryOutOfBoundsOffset() {
- print("testGrowMemoryOutOfBoundsOffset2");
+function testMemoryGrowOutOfBoundsOffset() {
+ print("testMemoryGrowOutOfBoundsOffset2");
var builder = new WasmModuleBuilder();
builder.addMemory(16, 128, false);
builder.addFunction("main", kSig_v_v)
.addBody([
kExprI32Const, 20,
kExprI32Const, 29,
- kExprGrowMemory, kMemoryZero,
+ kExprMemoryGrow, kMemoryZero,
// Assembly equivalent Move <reg>,0xf5fffff
// with wasm memory reference relocation information
kExprI32StoreMem, 0, 0xFF, 0xFF, 0xFF, 0x7A
@@ -23,4 +23,4 @@ function testGrowMemoryOutOfBoundsOffset() {
assertTraps(kTrapMemOutOfBounds, module.exports.main);
}
-testGrowMemoryOutOfBoundsOffset();
+testMemoryGrowOutOfBoundsOffset();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff.js b/deps/v8/test/mjsunit/wasm/liftoff.js
index 8570d3d523..66fa5b70ef 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --liftoff --wasm-async-compilation
-// Flags: --no-future --no-wasm-tier-up
+// Flags: --allow-natives-syntax --liftoff --no-future --no-wasm-tier-up
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/loop-rotation.js b/deps/v8/test/mjsunit/wasm/loop-rotation.js
new file mode 100644
index 0000000000..da7a45c4d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/loop-rotation.js
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-loop-rotation --noliftoff --nowasm-tier-up
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestTrivialLoop1() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_v_i)
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprTeeLocal, 0,
+ kExprBrIf, 0,
+ kExprEnd,
+ ])
+ .exportFunc();
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ instance.exports.main(1);
+ instance.exports.main(10);
+ instance.exports.main(100);
+})();
+
+(function TestTrivialLoop2() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_v_i)
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprTeeLocal, 0,
+ kExprBrIf, 1,
+ kExprBr, 0,
+ kExprEnd,
+ ])
+ .exportFunc();
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ instance.exports.main(1);
+ instance.exports.main(10);
+ instance.exports.main(100);
+})();
+
+(function TestNonRotatedLoopWithStore() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, undefined, false);
+ builder.addFunction("main", kSig_v_i)
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprTeeLocal, 0,
+ kExprBrIf, 1,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprI32StoreMem, 0, 0,
+ kExprBr, 0,
+ kExprEnd,
+ ])
+ .exportFunc();
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ instance.exports.main(1);
+ instance.exports.main(10);
+ instance.exports.main(100);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
index 9828cd2a8b..c7aa32e4d5 100644
--- a/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
+++ b/deps/v8/test/mjsunit/wasm/memory-instance-validation.js
@@ -18,7 +18,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("grow", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var instances = [];
for (var i = 0; i < 5; i++) {
diff --git a/deps/v8/test/mjsunit/wasm/memory.js b/deps/v8/test/mjsunit/wasm/memory.js
index fbf4b18a27..3bfb052808 100644
--- a/deps/v8/test/mjsunit/wasm/memory.js
+++ b/deps/v8/test/mjsunit/wasm/memory.js
@@ -29,11 +29,11 @@ function assertMemoryIsValid(memory) {
assertThrows(() => new WebAssembly.Memory(1), TypeError);
assertThrows(() => new WebAssembly.Memory(""), TypeError);
- assertThrows(() => new WebAssembly.Memory({initial: -1}), RangeError);
- assertThrows(() => new WebAssembly.Memory({initial: outOfUint32RangeValue}), RangeError);
+ assertThrows(() => new WebAssembly.Memory({initial: -1}), TypeError);
+ assertThrows(() => new WebAssembly.Memory({initial: outOfUint32RangeValue}), TypeError);
- assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: -1}), RangeError);
- assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: outOfUint32RangeValue}), RangeError);
+ assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: -1}), TypeError);
+ assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: outOfUint32RangeValue}), TypeError);
assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: 9}), RangeError);
let memory = new WebAssembly.Memory({initial: 1});
@@ -45,12 +45,6 @@ function assertMemoryIsValid(memory) {
assertMemoryIsValid(memory);
})();
-(function TestInitialIsUndefined() {
- // New memory with initial = undefined, which means initial = 0.
- let memory = new WebAssembly.Memory({initial: undefined});
- assertMemoryIsValid(memory);
-})();
-
(function TestMaximumIsUndefined() {
// New memory with maximum = undefined, which means maximum = 0.
let memory = new WebAssembly.Memory({initial: 0, maximum: undefined});
@@ -74,7 +68,7 @@ function assertMemoryIsValid(memory) {
assertMemoryIsValid(memory);
})();
-(function TestMaximumDoesHasProperty() {
+(function TestMaximumDoesNotHasProperty() {
var hasPropertyWasCalled = false;
var desc = {initial: 10};
var proxy = new Proxy({maximum: 16}, {
@@ -83,7 +77,7 @@ function assertMemoryIsValid(memory) {
Object.setPrototypeOf(desc, proxy);
let memory = new WebAssembly.Memory(desc);
assertMemoryIsValid(memory);
- assertTrue(hasPropertyWasCalled);
+ assertFalse(hasPropertyWasCalled);
})();
(function TestBuffer() {
diff --git a/deps/v8/test/mjsunit/wasm/mutable-globals.js b/deps/v8/test/mjsunit/wasm/mutable-globals.js
index 7a94520a21..3ba22818d5 100644
--- a/deps/v8/test/mjsunit/wasm/mutable-globals.js
+++ b/deps/v8/test/mjsunit/wasm/mutable-globals.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-mut-global
-
function assertGlobalIsValid(global) {
assertSame(WebAssembly.Global.prototype, global.__proto__);
assertSame(WebAssembly.Global, global.constructor);
@@ -22,9 +20,9 @@ function assertGlobalIsValid(global) {
assertThrows(() => new WebAssembly.Global({}), TypeError);
assertThrows(() => new WebAssembly.Global({value: 'foo'}), TypeError);
- assertThrows(() => new WebAssembly.Global({value: 'i64'}), TypeError);
+ assertThrows(() => new WebAssembly.Global({value: 'i128'}), TypeError);
- for (let type of ['i32', 'f32', 'f64']) {
+ for (let type of ['i32', 'f32', 'f64', 'i64']) {
assertGlobalIsValid(new WebAssembly.Global({value: type}));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-compile.js b/deps/v8/test/mjsunit/wasm/streaming-compile.js
index 5f2ca6b9fa..74f107bf28 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-compile.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-compile.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-test-streaming -async-compilation --expose-wasm --allow-natives-syntax
+// Flags: --wasm-test-streaming --expose-wasm --allow-natives-syntax
load("test/mjsunit/wasm/async-compile.js");
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index c2b86c03fa..fe9f9a1410 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-test-streaming --wasm-async-compilation --expose-wasm --allow-natives-syntax
+// Flags: --wasm-test-streaming --expose-wasm --allow-natives-syntax
'use strict';
diff --git a/deps/v8/test/mjsunit/wasm/streaming-trap-location.js b/deps/v8/test/mjsunit/wasm/streaming-trap-location.js
index 1607ca76f2..5d693aeae7 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-trap-location.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-test-streaming --wasm-async-compilation --expose-wasm
+// Flags: --wasm-test-streaming --expose-wasm
load("test/mjsunit/wasm/trap-location.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-copy.js b/deps/v8/test/mjsunit/wasm/table-copy.js
new file mode 100644
index 0000000000..89572fa41e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-copy.js
@@ -0,0 +1,275 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-bulk-memory
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestTableCopyInbounds() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+
+ builder.addFunction("copy", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableCopy, kTableZero])
+ .exportAs("copy");
+
+ let instance = builder.instantiate();
+ let copy = instance.exports.copy;
+ for (let i = 0; i < kTableSize; i++) {
+ copy(0, 0, i); // nop
+ copy(0, i, kTableSize - i);
+ copy(i, 0, kTableSize - i);
+ }
+ let big = 1000000;
+ copy(big, 0, 0); // nop
+ copy(0, big, 0); // nop
+})();
+
+function addFunction(builder, k) {
+ let m = builder.addFunction("", kSig_i_v)
+ .addBody([...wasmI32Const(k)]);
+ return m;
+}
+
+function addFunctions(builder, count) {
+ let o = {};
+ for (var i = 0; i < count; i++) {
+ o[`f${i}`] = addFunction(builder, i);
+ }
+ return o;
+}
+
+function assertTable(obj, ...elems) {
+ for (var i = 0; i < elems.length; i++) {
+ assertEquals(elems[i], obj.get(i));
+ }
+}
+
+(function TestTableCopyElems() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+
+ {
+ let o = addFunctions(builder, kTableSize);
+ builder.addElementSegment(0, false,
+ [o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index]);
+ }
+
+ builder.addFunction("copy", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableCopy, kTableZero])
+ .exportAs("copy");
+
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ let instance = builder.instantiate();
+ let table = instance.exports.table;
+ let f0 = table.get(0), f1 = table.get(1), f2 = table.get(2),
+ f3 = table.get(3), f4 = table.get(4);
+ let copy = instance.exports.copy;
+
+ assertEquals(0, f0());
+ assertEquals(1, f1());
+ assertEquals(2, f2());
+
+ assertTable(table, f0, f1, f2, f3, f4);
+ copy(0, 1, 1);
+ assertTable(table, f1, f1, f2, f3, f4);
+ copy(0, 1, 2);
+ assertTable(table, f1, f2, f2, f3, f4);
+ copy(3, 0, 2);
+ assertTable(table, f1, f2, f2, f1, f2);
+ copy(1, 0, 2);
+ assertTable(table, f1, f1, f2, f1, f2);
+})();
+
+function assertCall(call, ...elems) {
+ for (var i = 0; i < elems.length; i++) {
+ assertEquals(elems[i], call(i));
+ }
+}
+
+(function TestTableCopyCalls() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let sig_i_i = builder.addType(kSig_i_i);
+ let sig_i_v = builder.addType(kSig_i_v);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+
+ {
+ let o = addFunctions(builder, 5);
+ builder.addElementSegment(0, false,
+ [o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index]);
+ }
+
+ builder.addFunction("copy", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableCopy, kTableZero])
+ .exportAs("copy");
+
+ builder.addFunction("call", sig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_v, kTableZero])
+ .exportAs("call");
+
+ let instance = builder.instantiate();
+ let copy = instance.exports.copy;
+ let call = instance.exports.call;
+
+ assertCall(call, 0, 1, 2, 3, 4);
+ copy(0, 1, 1);
+ assertCall(call, 1, 1, 2, 3, 4);
+ copy(0, 1, 2);
+ assertCall(call, 1, 2, 2, 3, 4);
+ copy(3, 0, 2);
+ assertCall(call, 1, 2, 2, 1, 2);
+})();
+
+(function TestTableCopyOob1() {
+ print(arguments.callee.name);
+
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let kTableSize = 5;
+
+ builder.setTableBounds(kTableSize, kTableSize);
+
+ builder.addFunction("copy", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableCopy, kTableZero])
+ .exportAs("copy");
+
+ let instance = builder.instantiate();
+ let copy = instance.exports.copy;
+ copy(0, 0, 1); // nop
+ copy(0, 0, kTableSize); // nop
+ assertThrows(() => copy(0, 0, kTableSize+1));
+ assertThrows(() => copy(1, 0, kTableSize));
+ assertThrows(() => copy(0, 1, kTableSize));
+
+ for (let big = 4294967295; big > 1000; big >>>= 1) {
+ assertThrows(() => copy(big, 0, 1));
+ assertThrows(() => copy(0, big, 1));
+ assertThrows(() => copy(0, 0, big));
+ }
+
+ for (let big = -1000; big != 0; big <<= 1) {
+ assertThrows(() => copy(big, 0, 1));
+ assertThrows(() => copy(0, big, 1));
+ assertThrows(() => copy(0, 0, big));
+ }
+})();
+
+(function TestTableCopyShared() {
+ print(arguments.callee.name);
+ let kTableSize = 5;
+
+ let table = new WebAssembly.Table({element: "anyfunc",
+ initial: kTableSize,
+ maximum: kTableSize});
+
+ let module = (() => {
+ let builder = new WasmModuleBuilder();
+ let sig_v_iii = builder.addType(kSig_v_iii);
+ let sig_i_i = builder.addType(kSig_i_i);
+ let sig_i_v = builder.addType(kSig_i_v);
+
+ builder.addImportedTable("m", "table", kTableSize, kTableSize);
+ var g = builder.addImportedGlobal("m", "g", kWasmI32);
+
+ for (let i = 0; i < kTableSize; i++) {
+ let f = builder.addFunction("", kSig_i_v)
+ .addBody([
+ kExprGetGlobal, g,
+ ...wasmI32Const(i),
+ kExprI32Add
+ ]);
+ f.exportAs(`f${i}`);
+ }
+
+ builder.addFunction("copy", sig_v_iii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kNumericPrefix, kExprTableCopy, kTableZero])
+ .exportAs("copy");
+
+ builder.addFunction("call", sig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_v, kTableZero])
+ .exportAs("call");
+
+ return builder.toModule();
+ })();
+
+ // Two different instances with different globals, to verify that
+ // dispatch tables get updated with the right instance.
+ let x = new WebAssembly.Instance(module, {m: {g: 1000, table: table}});
+ let y = new WebAssembly.Instance(module, {m: {g: 2000, table: table}});
+
+ let x_call = x.exports.call;
+ let y_call = y.exports.call;
+
+ assertNotEquals(x.exports.f3, y.exports.f3);
+
+ table.set(0, x.exports.f0);
+ table.set(1, x.exports.f1);
+ table.set(2, x.exports.f2);
+ table.set(3, y.exports.f3);
+ table.set(4, y.exports.f4);
+
+ assertEquals(2003, table.get(3)(3));
+ assertEquals(2003, x_call(3));
+ assertEquals(2003, y_call(3));
+
+ // Check that calling copy on either of them updates the dispatch table
+ // on both of them.
+ assertCall(x_call, 1000, 1001, 1002, 2003, 2004);
+ assertCall(y_call, 1000, 1001, 1002, 2003, 2004);
+
+ x.exports.copy(0, 1, 1);
+
+ assertCall(x_call, 1001, 1001, 1002, 2003, 2004);
+ assertCall(y_call, 1001, 1001, 1002, 2003, 2004);
+
+ y.exports.copy(0, 1, 2);
+
+ assertCall(x_call, 1001, 1002, 1002, 2003, 2004);
+ assertCall(y_call, 1001, 1002, 1002, 2003, 2004);
+
+ x.exports.copy(3, 0, 2);
+
+ assertCall(x_call, 1001, 1002, 1002, 1001, 1002);
+ assertCall(y_call, 1001, 1002, 1002, 1001, 1002);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index fbf49c5b47..8d3c717522 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -133,7 +133,7 @@ let id = (() => { // identity exported function
builder.addImportedTable("q", "table", 5, 32);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
- builder.addFunctionTableInit(g, true,
+ builder.addElementSegment(g, true,
[funcs.mul.index, funcs.add.index, funcs.sub.index]);
builder.addExportOfKind("table", kExternalTable, 0);
let module = new WebAssembly.Module(builder.toBuffer());
@@ -179,10 +179,10 @@ let id = (() => { // identity exported function
let i = builder.addImport("q", "exp_inc", sig_i_i);
let t = builder.addImport("q", "exp_ten", sig_i_v);
- builder.setFunctionTableBounds(7, 35);
- // builder.addFunctionTableInit(g1, true,
+ builder.setTableBounds(7, 35);
+ // builder.addElementSegment(g1, true,
// [funcs.mul.index, funcs.add.index, funcs.sub.index]);
- builder.addFunctionTableInit(g1, true, [a, i, t]);
+ builder.addElementSegment(g1, true, [a, i, t]);
builder.addExportOfKind("table", kExternalTable, 0);
let module = new WebAssembly.Module(builder.toBuffer());
@@ -215,7 +215,7 @@ let id = (() => { // identity exported function
let funcs = addFunctions(builder1);
builder1.addImportedTable("q", "table", 6, 36);
- builder1.addFunctionTableInit(g, true,
+ builder1.addElementSegment(g, true,
[funcs.mul.index, funcs.add.index, funcs.sub.index]);
let module1 = new WebAssembly.Module(builder1.toBuffer());
@@ -267,7 +267,7 @@ let id = (() => { // identity exported function
kExprGetLocal, 0,
kExprCallIndirect, index_i_ii, kTableZero])
.exportAs("main");
- builder.addFunctionTableInit(0, false, [0], true);
+ builder.addElementSegment(0, false, [0], true);
return new WebAssembly.Module(builder.toBuffer());
}
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
index 32bdecad66..0f4a63396e 100644
--- a/deps/v8/test/mjsunit/wasm/table.js
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -46,14 +46,14 @@ function assertTableIsValid(table, length) {
assertThrows(() => new WebAssembly.Table({element: "any", initial: 10}), TypeError);
assertThrows(() => new WebAssembly.Table(
- {element: "anyfunc", initial: -1}), RangeError);
+ {element: "anyfunc", initial: -1}), TypeError);
assertThrows(() => new WebAssembly.Table(
- {element: "anyfunc", initial: outOfUint32RangeValue}), RangeError);
+ {element: "anyfunc", initial: outOfUint32RangeValue}), TypeError);
assertThrows(() => new WebAssembly.Table(
- {element: "anyfunc", initial: 10, maximum: -1}), RangeError);
+ {element: "anyfunc", initial: 10, maximum: -1}), TypeError);
assertThrows(() => new WebAssembly.Table(
- {element: "anyfunc", initial: 10, maximum: outOfUint32RangeValue}), RangeError);
+ {element: "anyfunc", initial: 10, maximum: outOfUint32RangeValue}), TypeError);
assertThrows(() => new WebAssembly.Table(
{element: "anyfunc", initial: 10, maximum: 9}), RangeError);
@@ -75,31 +75,25 @@ function assertTableIsValid(table, length) {
assertEquals(null, table.get(0));
assertEquals(undefined, table[0]);
- table = new WebAssembly.Table({element: "anyfunc", initial: undefined});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: 10});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc"});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: "10"});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", maximum: 10});
- assertTableIsValid(table, 0);
-
- table = new WebAssembly.Table({element: "anyfunc", maximum: "10"});
- assertTableIsValid(table, 0);
-
- table = new WebAssembly.Table({element: "anyfunc", maximum: {valueOf() { return "10" }}});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: {valueOf() { return "10" }}});
assertTableIsValid(table, 0);
table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: undefined});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", maximum: kMaxUint31});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kMaxUint31});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", maximum: kMaxUint32});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kMaxUint32});
assertTableIsValid(table, 0);
- table = new WebAssembly.Table({element: "anyfunc", maximum: kV8MaxWasmTableSize + 1});
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: kV8MaxWasmTableSize + 1});
assertTableIsValid(table, 0);
})();
@@ -147,10 +141,13 @@ function assertTableIsValid(table, length) {
assertEquals(null, table.get(i));
assertEquals(null, table.get(String(i)));
}
- for (let key of [0.4, "", NaN, {}, [], () => {}]) {
+ for (let key of [0.4, "", []]) {
assertEquals(null, table.get(key));
}
- for (let key of [-1, table.length, table.length * 10]) {
+ for (let key of [-1, NaN, {}, () => {}]) {
+ assertThrows(() => table.get(key), TypeError);
+ }
+ for (let key of [table.length, table.length * 10]) {
assertThrows(() => table.get(key), RangeError);
}
assertThrows(() => table.get(Symbol()), TypeError);
@@ -182,14 +179,19 @@ function assertTableIsValid(table, length) {
assertSame(undefined, table[i]);
}
- for (let key of [0.4, "", NaN, {}, [], () => {}]) {
+ for (let key of [0.4, "", []]) {
assertSame(undefined, table.set(0, null));
assertSame(undefined, table.set(key, f));
assertSame(f, table.get(0));
assertSame(undefined, table[key]);
}
+ for (let key of [NaN, {}, () => {}]) {
+ assertSame(undefined, table[key]);
+ assertThrows(() => table.set(key, f), TypeError);
+ }
- for (let key of [-1, table.length, table.length * 10]) {
+ assertThrows(() => table.set(-1, f), TypeError);
+ for (let key of [table.length, table.length * 10]) {
assertThrows(() => table.set(key, f), RangeError);
}
@@ -220,7 +222,12 @@ function assertTableIsValid(table, length) {
assertSame(f, table[i]);
}
- for (let key of [0.4, "", NaN, {}, [], () => {}]) {
+ for (let key of [NaN, {}, () => {}]) {
+ assertSame(f, table[key] = f);
+ assertSame(f, table[key]);
+ assertThrows(() => table.get(key), TypeError);
+ }
+ for (let key of [0.4, "", []]) {
assertSame(f, table[key] = f);
assertSame(f, table[key]);
assertSame(null, table.get(key));
@@ -252,7 +259,7 @@ function assertTableIsValid(table, length) {
check(table);
table.grow(10);
check(table);
- assertThrows(() => table.grow(-10), RangeError);
+ assertThrows(() => table.grow(-10), TypeError);
table = new WebAssembly.Table({element: "anyfunc", initial: 20, maximum: 25});
init(table);
@@ -264,7 +271,7 @@ function assertTableIsValid(table, length) {
table.grow(0);
check(table);
assertThrows(() => table.grow(1), RangeError);
- assertThrows(() => table.grow(-10), RangeError);
+ assertThrows(() => table.grow(-10), TypeError);
assertThrows(() => WebAssembly.Table.prototype.grow.call([], 0), TypeError);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index cc10e9953c..7583d39a9b 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -64,8 +64,8 @@ let kStartSectionCode = 8; // Start function declaration
let kElementSectionCode = 9; // Elements section
let kCodeSectionCode = 10; // Function code
let kDataSectionCode = 11; // Data segments
-let kNameSectionCode = 12; // Name section (encoded as string)
-let kExceptionSectionCode = 13; // Exception section (must appear before code section)
+let kExceptionSectionCode = 12; // Exception section (between Global & Export)
+let kDataCountSectionCode = 13; // Data segments
// Name section types
let kModuleNameCode = 0;
@@ -77,6 +77,11 @@ let kWasmAnyFunctionTypeForm = 0x70;
let kHasMaximumFlag = 1;
+// Segment flags
+let kActiveNoIndex = 0;
+let kPassive = 1;
+let kActiveWithIndex = 2;
+
// Function declaration flags
let kDeclFunctionName = 0x01;
let kDeclFunctionImport = 0x02;
@@ -91,6 +96,7 @@ let kWasmF32 = 0x7d;
let kWasmF64 = 0x7c;
let kWasmS128 = 0x7b;
let kWasmAnyRef = 0x6f;
+let kWasmAnyFunc = 0x70;
let kWasmExceptRef = 0x68;
let kExternalFunction = 0;
@@ -101,6 +107,9 @@ let kExternalException = 4;
let kTableZero = 0;
let kMemoryZero = 0;
+let kSegmentZero = 0;
+
+let kExceptionAttribute = 0;
// Useful signatures
let kSig_i_i = makeSig([kWasmI32], [kWasmI32]);
@@ -108,6 +117,8 @@ let kSig_l_l = makeSig([kWasmI64], [kWasmI64]);
let kSig_i_l = makeSig([kWasmI64], [kWasmI32]);
let kSig_i_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32]);
let kSig_i_iii = makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
+let kSig_v_iiii = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], []);
+let kSig_f_ff = makeSig([kWasmF32, kWasmF32], [kWasmF32]);
let kSig_d_dd = makeSig([kWasmF64, kWasmF64], [kWasmF64]);
let kSig_l_ll = makeSig([kWasmI64, kWasmI64], [kWasmI64]);
let kSig_i_dd = makeSig([kWasmF64, kWasmF64], [kWasmI32]);
@@ -132,10 +143,14 @@ let kSig_iii_ii = makeSig([kWasmI32, kWasmI32], [kWasmI32, kWasmI32, kWasmI32]);
let kSig_v_f = makeSig([kWasmF32], []);
let kSig_f_f = makeSig([kWasmF32], [kWasmF32]);
+let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
let kSig_r_r = makeSig([kWasmAnyRef], [kWasmAnyRef]);
+let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
let kSig_i_r = makeSig([kWasmAnyRef], [kWasmI32]);
let kSig_v_r = makeSig([kWasmAnyRef], []);
+let kSig_v_a = makeSig([kWasmAnyFunc], []);
+let kSig_v_rr = makeSig([kWasmAnyRef, kWasmAnyRef], []);
let kSig_r_v = makeSig([], [kWasmAnyRef]);
function makeSig(params, results) {
@@ -172,6 +187,8 @@ let kExprElse = 0x05;
let kExprTry = 0x06;
let kExprCatch = 0x07;
let kExprThrow = 0x08;
+let kExprRethrow = 0x09;
+let kExprBrOnExn = 0x0a;
let kExprEnd = 0x0b;
let kExprBr = 0x0c;
let kExprBrIf = 0x0d;
@@ -215,7 +232,7 @@ let kExprI64StoreMem8 = 0x3c;
let kExprI64StoreMem16 = 0x3d;
let kExprI64StoreMem32 = 0x3e;
let kExprMemorySize = 0x3f;
-let kExprGrowMemory = 0x40;
+let kExprMemoryGrow = 0x40;
let kExprI32Eqz = 0x45;
let kExprI32Eq = 0x46;
let kExprI32Ne = 0x47;
@@ -340,10 +357,30 @@ let kExprI32ReinterpretF32 = 0xbc;
let kExprI64ReinterpretF64 = 0xbd;
let kExprF32ReinterpretI32 = 0xbe;
let kExprF64ReinterpretI64 = 0xbf;
+let kExprI32SExtendI8 = 0xc0;
+let kExprI32SExtendI16 = 0xc1;
+let kExprI64SExtendI8 = 0xc2;
+let kExprI64SExtendI16 = 0xc3;
+let kExprI64SExtendI32 = 0xc4;
// Prefix opcodes
+let kNumericPrefix = 0xfc;
+let kSimdPrefix = 0xfd;
let kAtomicPrefix = 0xfe;
+// Numeric opcodes.
+let kExprMemoryInit = 0x08;
+let kExprMemoryDrop = 0x09;
+let kExprMemoryCopy = 0x0a;
+let kExprMemoryFill = 0x0b;
+let kExprTableInit = 0x0c;
+let kExprTableDrop = 0x0d;
+let kExprTableCopy = 0x0e;
+
+// Atomic opcodes.
+let kExprAtomicWake = 0x00;
+let kExprI32AtomicWait = 0x01;
+let kExprI64AtomicWait = 0x02;
let kExprI32AtomicLoad = 0x10;
let kExprI32AtomicLoad8U = 0x12;
let kExprI32AtomicLoad16U = 0x13;
@@ -368,9 +405,9 @@ let kExprI32AtomicXor16U = 0x3d;
let kExprI32AtomicExchange = 0x41;
let kExprI32AtomicExchange8U = 0x43;
let kExprI32AtomicExchange16U = 0x44;
-let kExprI32AtomicCompareExchange = 0x48
-let kExprI32AtomicCompareExchange8U = 0x4a
-let kExprI32AtomicCompareExchange16U = 0x4b
+let kExprI32AtomicCompareExchange = 0x48;
+let kExprI32AtomicCompareExchange8U = 0x4a;
+let kExprI32AtomicCompareExchange16U = 0x4b;
let kExprI64AtomicLoad = 0x11;
let kExprI64AtomicLoad8U = 0x14;
@@ -409,6 +446,9 @@ let kExprI64AtomicCompareExchange8U = 0x4c;
let kExprI64AtomicCompareExchange16U = 0x4d;
let kExprI64AtomicCompareExchange32U = 0x4e;
+// Simd opcodes.
+let kExprF32x4Min = 0x9e;
+
let kTrapUnreachable = 0;
let kTrapMemOutOfBounds = 1;
let kTrapDivByZero = 2;
@@ -419,6 +459,8 @@ let kTrapFuncInvalid = 6;
let kTrapFuncSigMismatch = 7;
let kTrapTypeError = 8;
let kTrapUnalignedAccess = 9;
+let kTrapDataSegmentDropped = 10;
+let kTrapElemSegmentDropped = 11;
let kTrapMsgs = [
"unreachable",
@@ -430,7 +472,9 @@ let kTrapMsgs = [
"invalid index into function table",
"function signature mismatch",
"wasm function signature contains illegal type",
- "operation does not support unaligned accesses"
+ "operation does not support unaligned accesses",
+ "data segment has been dropped",
+ "element segment has been dropped"
];
function assertTraps(trap, code) {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
new file mode 100644
index 0000000000..44bfedbfdb
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-math-intrinsic.js
@@ -0,0 +1,257 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-math-intrinsics
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function verbose(args) {
+ // print(...args);
+}
+
+//=============================================
+// Global count of failures
+//=============================================
+let numFailures = 0;
+
+function reportFailure(name, vals, m, w) {
+ print(' error: ' + name + '(' + vals + ') == ' + w + ', expected ' + m);
+ numFailures++;
+}
+
+let global_imports = {Math: Math};
+
+let inputs = [
+ 1 / 0,
+ -1 / 0,
+ 0 / 0,
+ -2.70497e+38,
+ -1.4698e+37,
+ -1.22813e+35,
+ -1.34584e+34,
+ -1.0079e+32,
+ -6.49364e+26,
+ -3.06077e+25,
+ -1.46821e+25,
+ -1.17658e+23,
+ -1.9617e+22,
+ -2.7357e+20,
+ -9223372036854775808.0, // INT64_MIN
+ -1.48708e+13,
+ -1.89633e+12,
+ -4.66622e+11,
+ -2.22581e+11,
+ -1.45381e+10,
+ -2147483904.0, // First float32 after INT32_MIN
+ -2147483648.0, // INT32_MIN
+ -2147483520.0, // Last float32 before INT32_MIN
+ -1.3956e+09,
+ -1.32951e+09,
+ -1.30721e+09,
+ -1.19756e+09,
+ -9.26822e+08,
+ -5.09256e+07,
+ -964300.0,
+ -192446.0,
+ -28455.0,
+ -27194.0,
+ -20575.0,
+ -17069.0,
+ -9167.0,
+ -960.178,
+ -113.0,
+ -62.0,
+ -15.0,
+ -7.0,
+ -1.0,
+ -0.0256635,
+ -4.60374e-07,
+ -3.63759e-10,
+ -4.30175e-14,
+ -5.27385e-15,
+ -1.5707963267948966,
+ -1.48084e-15,
+ -2.220446049250313e-16,
+ -1.05755e-19,
+ -3.2995e-21,
+ -1.67354e-23,
+ -1.11885e-23,
+ -1.78506e-30,
+ -1.43718e-34,
+ -1.27126e-38,
+ -0.0,
+ 3e-88,
+ -2e66,
+ 0.0,
+ 2e66,
+ 1.17549e-38,
+ 1.56657e-37,
+ 4.08512e-29,
+ 6.25073e-22,
+ 4.1723e-13,
+ 1.44343e-09,
+ 1.5707963267948966,
+ 5.27004e-08,
+ 9.48298e-08,
+ 5.57888e-07,
+ 4.89988e-05,
+ 0.244326,
+ 1.0,
+ 12.4895,
+ 19.0,
+ 47.0,
+ 106.0,
+ 538.324,
+ 564.536,
+ 819.124,
+ 7048.0,
+ 12611.0,
+ 19878.0,
+ 20309.0,
+ 797056.0,
+ 1.77219e+09,
+ 2147483648.0, // INT32_MAX + 1
+ 4294967296.0, // UINT32_MAX + 1
+ 1.51116e+11,
+ 4.18193e+13,
+ 3.59167e+16,
+ 9223372036854775808.0, // INT64_MAX + 1
+ 18446744073709551616.0, // UINT64_MAX + 1
+ 3.38211e+19,
+ 2.67488e+20,
+ 1.78831e+21,
+ 9.20914e+21,
+ 8.35654e+23,
+ 1.4495e+24,
+ 5.94015e+25,
+ 4.43608e+30,
+ 2.44502e+33,
+ 1.38178e+37,
+ 1.71306e+37,
+ 3.31899e+38,
+ 3.40282e+38,
+];
+
+function genUnop(name, sig) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(sig);
+ builder.addImport('Math', name, sig_index);
+ builder.addFunction('main', sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprCallFunction, 0
+ ]) // --
+ .exportAs('main');
+
+ return builder.instantiate(global_imports).exports.main;
+}
+
+function genBinop(name, sig) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(sig);
+ builder.addImport('Math', name, sig_index);
+ builder.addFunction('main', sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, 0
+ ]) // --
+ .exportAs('main');
+
+ return builder.instantiate(global_imports).exports.main;
+}
+
+function assertUnop(name, math_func, wasm_func) {
+ for (val of inputs) {
+ verbose(' ', val);
+ let m = math_func(val);
+ let w = wasm_func(val);
+ if (!deepEquals(m, w)) reportFailure(name, [val], m, w);
+ }
+}
+
+function assertBinop(name, math_func, wasm_func) {
+ let inputs2 = [1, 0.5, -1, -0.5, 0, -0, 1 / 0, -1 / 0, 0 / 0];
+ for (val of inputs) {
+ verbose(' ', val);
+ for (val2 of inputs2) {
+ verbose(' ', val2);
+ let m = math_func(val, val2);
+ let w = wasm_func(val, val2);
+ if (!deepEquals(m, w)) reportFailure(name, [val, val2], m, w);
+ m = math_func(val2, val);
+ w = wasm_func(val2, val);
+ if (!deepEquals(m, w)) reportFailure(name, [val2, val], m, w);
+ }
+ }
+}
+
+(function TestF64() {
+ let f64_intrinsics = [
+ 'acos', 'asin', 'atan', 'cos', 'sin', 'tan', 'exp', 'log',
+ 'atan2', 'pow', 'ceil', 'floor', 'sqrt', 'min', 'max', 'abs',
+ 'min', 'max', 'abs', 'ceil', 'floor', 'sqrt',
+ ];
+
+ for (name of f64_intrinsics) {
+ let math_func = Math[name];
+ let f32 = false;
+ print('Testing (f64) Math.' + name);
+ switch (math_func.length) {
+ case 1: {
+ let wasm_func = genUnop(name, kSig_d_d);
+ assertUnop('(f64)' + name, math_func, wasm_func);
+ break;
+ }
+ case 2: {
+ let wasm_func = genBinop(name, kSig_d_dd);
+ assertBinop('(f64)' + name, math_func, wasm_func);
+ break;
+ }
+ default:
+ throw 'Unexpected param count: ' + func.length;
+ }
+ }
+})();
+
+(function TestF32() {
+ let f32_intrinsics = ['min', 'max', 'abs', 'ceil', 'floor', 'sqrt'];
+
+ for (name of f32_intrinsics) {
+ let r = Math.fround, f = Math[name];
+ print('Testing (f32) Math.' + name);
+ switch (f.length) {
+ case 1: {
+ let wasm_func = genUnop(name, kSig_f_f);
+ let math_func = (val) => r(f(r(val)));
+ assertUnop('(f32)' + name, math_func, wasm_func);
+ break;
+ }
+ case 2: {
+ let wasm_func = genBinop(name, kSig_f_ff);
+ let math_func = (v1, v2) => r(f(r(v1), r(v2)));
+ assertBinop('(f32)' + name, math_func, wasm_func);
+ break;
+ }
+ default:
+ throw 'Unexpected param count: ' + func.length;
+ }
+ }
+})();
+
+(function TestFround() {
+ let name = 'fround';
+ print('Testing (f32) Math.' + name);
+
+ let wasm_func = genUnop(name, kSig_f_d); // fround has a special signature.
+ let f = Math[name];
+ let r = Math.fround;
+ let math_func = (val) => r(f(r(val)));
+ assertUnop(name, math_func, wasm_func);
+})();
+
+assertEquals(0, numFailures);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 38b4a0e308..b6959be23e 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -69,12 +69,13 @@ class Binary extends Array {
// Emit section name.
this.emit_u8(section_code);
// Emit the section to a temporary buffer: its full length isn't know yet.
- let section = new Binary;
+ const section = new Binary;
content_generator(section);
// Emit section length.
this.emit_u32v(section.length);
// Copy the temporary buffer.
- for (var i = 0; i < section.length; i++) this.push(section[i]);
+ // Avoid spread because {section} can be huge.
+ for (let b of section) this.push(b);
}
}
@@ -84,10 +85,11 @@ class WasmFunctionBuilder {
this.name = name;
this.type_index = type_index;
this.body = [];
+ this.locals = [];
+ this.local_names = [];
}
numLocalNames() {
- if (this.local_names === undefined) return 0;
let num_local_names = 0;
for (let loc_name of this.local_names) {
if (loc_name !== undefined) ++num_local_names;
@@ -123,7 +125,7 @@ class WasmFunctionBuilder {
getNumLocals() {
let total_locals = 0;
- for (let l of this.locals || []) {
+ for (let l of this.locals) {
for (let type of ["i32", "i64", "f32", "f64", "s128"]) {
total_locals += l[type + "_count"] || 0;
}
@@ -133,10 +135,8 @@ class WasmFunctionBuilder {
addLocals(locals, names) {
const old_num_locals = this.getNumLocals();
- if (!this.locals) this.locals = []
this.locals.push(locals);
if (names) {
- if (!this.local_names) this.local_names = [];
const missing_names = old_num_locals - this.local_names.length;
this.local_names.push(...new Array(missing_names), ...names);
}
@@ -171,11 +171,10 @@ class WasmModuleBuilder {
this.globals = [];
this.exceptions = [];
this.functions = [];
- this.function_table = [];
- this.function_table_length_min = 0;
- this.function_table_length_max = 0;
- this.function_table_inits = [];
- this.segments = [];
+ this.table_length_min = 0;
+ this.table_length_max = undefined;
+ this.element_segments = [];
+ this.data_segments = [];
this.explicit = [];
this.num_imported_funcs = 0;
this.num_imported_globals = 0;
@@ -211,7 +210,10 @@ class WasmModuleBuilder {
name = this.stringToBytes(name);
var length = new Binary();
length.emit_u32v(name.length + bytes.length);
- this.explicit.push([0, ...length, ...name, ...bytes]);
+ var section = [0, ...length, ...name];
+ // Avoid spread because {bytes} can be huge.
+ for (var b of bytes) section.push(b);
+ this.explicit.push(section);
}
addType(type) {
@@ -229,11 +231,9 @@ class WasmModuleBuilder {
}
addException(type) {
- if (type.results.length != 0) {
- throw new Error('Exception signature must have void result: ' + type);
- }
+ let type_index = (typeof type) == "number" ? type : this.addType(type);
let except_index = this.exceptions.length + this.num_imported_exceptions;
- this.exceptions.push(type);
+ this.exceptions.push(type_index);
return except_index;
}
@@ -245,7 +245,7 @@ class WasmModuleBuilder {
return func;
}
- addImport(module = "", name, type) {
+ addImport(module, name, type) {
if (this.functions.length != 0) {
throw new Error('Imported functions must be declared before local ones');
}
@@ -255,7 +255,7 @@ class WasmModuleBuilder {
return this.num_imported_funcs++;
}
- addImportedGlobal(module = "", name, type, mutable = false) {
+ addImportedGlobal(module, name, type, mutable = false) {
if (this.globals.length != 0) {
throw new Error('Imported globals must be declared before local ones');
}
@@ -265,27 +265,25 @@ class WasmModuleBuilder {
return this.num_imported_globals++;
}
- addImportedMemory(module = "", name, initial = 0, maximum, shared) {
+ addImportedMemory(module, name, initial = 0, maximum, shared) {
let o = {module: module, name: name, kind: kExternalMemory,
initial: initial, maximum: maximum, shared: shared};
this.imports.push(o);
return this;
}
- addImportedTable(module = "", name, initial, maximum) {
+ addImportedTable(module, name, initial, maximum) {
let o = {module: module, name: name, kind: kExternalTable, initial: initial,
maximum: maximum};
this.imports.push(o);
}
- addImportedException(module = "", name, type) {
- if (type.results.length != 0) {
- throw new Error('Exception signature must have void result: ' + type);
- }
+ addImportedException(module, name, type) {
if (this.exceptions.length != 0) {
throw new Error('Imported exceptions must be declared before local ones');
}
- let o = {module: module, name: name, kind: kExternalException, type: type};
+ let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let o = {module: module, name: name, kind: kExternalException, type: type_index};
this.imports.push(o);
return this.num_imported_exceptions++;
}
@@ -301,40 +299,51 @@ class WasmModuleBuilder {
}
addDataSegment(addr, data, is_global = false) {
- this.segments.push({addr: addr, data: data, is_global: is_global});
- return this.segments.length - 1;
+ this.data_segments.push(
+ {addr: addr, data: data, is_global: is_global, is_active: true});
+ return this.data_segments.length - 1;
+ }
+
+ addPassiveDataSegment(data) {
+ this.data_segments.push({data: data, is_active: false});
+ return this.data_segments.length - 1;
}
exportMemoryAs(name) {
this.exports.push({name: name, kind: kExternalMemory, index: 0});
}
- addFunctionTableInit(base, is_global, array, is_import = false) {
- this.function_table_inits.push({base: base, is_global: is_global,
- array: array});
+ addElementSegment(base, is_global, array, is_import = false) {
+ this.element_segments.push({base: base, is_global: is_global,
+ array: array, is_active: true});
if (!is_global) {
var length = base + array.length;
- if (length > this.function_table_length_min && !is_import) {
- this.function_table_length_min = length;
+ if (length > this.table_length_min && !is_import) {
+ this.table_length_min = length;
}
- if (length > this.function_table_length_max && !is_import) {
- this.function_table_length_max = length;
+ if (length > this.table_length_max && !is_import) {
+ this.table_length_max = length;
}
}
return this;
}
+ addPassiveElementSegment(array, is_import = false) {
+ this.element_segments.push({array: array, is_active: false});
+ return this;
+ }
+
appendToTable(array) {
for (let n of array) {
if (typeof n != 'number')
throw new Error('invalid table (entries have to be numbers): ' + array);
}
- return this.addFunctionTableInit(this.function_table.length, false, array);
+ return this.addElementSegment(this.table_length_min, false, array);
}
- setFunctionTableBounds(min, max) {
- this.function_table_length_min = min;
- this.function_table_length_max = max;
+ setTableBounds(min, max = undefined) {
+ this.table_length_min = min;
+ this.table_length_max = max;
return this;
}
@@ -400,10 +409,8 @@ class WasmModuleBuilder {
section.emit_u32v(imp.initial); // initial
if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalException) {
- section.emit_u32v(imp.type.params.length);
- for (let param of imp.type.params) {
- section.emit_u8(param);
- }
+ section.emit_u32v(kExceptionAttribute);
+ section.emit_u32v(imp.type);
} else {
throw new Error("unknown/unsupported import kind " + imp.kind);
}
@@ -422,17 +429,17 @@ class WasmModuleBuilder {
});
}
- // Add function_table.
- if (wasm.function_table_length_min > 0) {
+ // Add table section
+ if (wasm.table_length_min > 0) {
if (debug) print("emitting table @ " + binary.length);
binary.emit_section(kTableSectionCode, section => {
section.emit_u8(1); // one table entry
section.emit_u8(kWasmAnyFunctionTypeForm);
- // TODO(gdeepti): Cleanup to use optional max flag,
- // fix up tests to set correct initial/maximum values
- section.emit_u32v(1);
- section.emit_u32v(wasm.function_table_length_min);
- section.emit_u32v(wasm.function_table_length_max);
+ const max = wasm.table_length_max;
+ const has_max = max !== undefined;
+ section.emit_u8(has_max ? kHasMaximumFlag : 0);
+ section.emit_u32v(wasm.table_length_min);
+ if (has_max) section.emit_u32v(max);
});
}
@@ -493,6 +500,9 @@ class WasmModuleBuilder {
section.emit_u8(byte_view[6]);
section.emit_u8(byte_view[7]);
break;
+ case kWasmAnyRef:
+ section.emit_u8(kExprRefNull);
+ break;
}
} else {
// Emit a global-index initializer.
@@ -510,10 +520,8 @@ class WasmModuleBuilder {
binary.emit_section(kExceptionSectionCode, section => {
section.emit_u32v(wasm.exceptions.length);
for (let type of wasm.exceptions) {
- section.emit_u32v(type.params.length);
- for (let param of type.params) {
- section.emit_u8(param);
- }
+ section.emit_u32v(kExceptionAttribute);
+ section.emit_u32v(type);
}
});
}
@@ -546,22 +554,26 @@ class WasmModuleBuilder {
});
}
- // Add table elements.
- if (wasm.function_table_inits.length > 0) {
- if (debug) print("emitting table @ " + binary.length);
+ // Add element segments
+ if (wasm.element_segments.length > 0) {
+ if (debug) print("emitting element segments @ " + binary.length);
binary.emit_section(kElementSectionCode, section => {
- var inits = wasm.function_table_inits;
+ var inits = wasm.element_segments;
section.emit_u32v(inits.length);
for (let init of inits) {
- section.emit_u8(0); // table index
- if (init.is_global) {
- section.emit_u8(kExprGetGlobal);
+ if (init.is_active) {
+ section.emit_u8(0); // table index / flags
+ if (init.is_global) {
+ section.emit_u8(kExprGetGlobal);
+ } else {
+ section.emit_u8(kExprI32Const);
+ }
+ section.emit_u32v(init.base);
+ section.emit_u8(kExprEnd);
} else {
- section.emit_u8(kExprI32Const);
+ section.emit_u8(kPassive); // flags
}
- section.emit_u32v(init.base);
- section.emit_u8(kExprEnd);
section.emit_u32v(init.array.length);
for (let index of init.array) {
section.emit_u32v(index);
@@ -570,6 +582,13 @@ class WasmModuleBuilder {
});
}
+ // If there are any passive data segments, add the DataCount section.
+ if (wasm.data_segments.some(seg => !seg.is_active)) {
+ binary.emit_section(kDataCountSectionCode, section => {
+ section.emit_u32v(wasm.data_segments.length);
+ });
+ }
+
// Add function bodies.
if (wasm.functions.length > 0) {
// emit function bodies
@@ -618,22 +637,26 @@ class WasmModuleBuilder {
}
// Add data segments.
- if (wasm.segments.length > 0) {
+ if (wasm.data_segments.length > 0) {
if (debug) print("emitting data segments @ " + binary.length);
binary.emit_section(kDataSectionCode, section => {
- section.emit_u32v(wasm.segments.length);
- for (let seg of wasm.segments) {
- section.emit_u8(0); // linear memory index 0
- if (seg.is_global) {
- // initializer is a global variable
- section.emit_u8(kExprGetGlobal);
- section.emit_u32v(seg.addr);
+ section.emit_u32v(wasm.data_segments.length);
+ for (let seg of wasm.data_segments) {
+ if (seg.is_active) {
+ section.emit_u8(0); // linear memory index 0 / flags
+ if (seg.is_global) {
+ // initializer is a global variable
+ section.emit_u8(kExprGetGlobal);
+ section.emit_u32v(seg.addr);
+ } else {
+ // initializer is a constant
+ section.emit_u8(kExprI32Const);
+ section.emit_u32v(seg.addr);
+ }
+ section.emit_u8(kExprEnd);
} else {
- // initializer is a constant
- section.emit_u8(kExprI32Const);
- section.emit_u32v(seg.addr);
+ section.emit_u8(kPassive); // flags
}
- section.emit_u8(kExprEnd);
section.emit_u32v(seg.data.length);
section.emit_bytes(seg.data);
}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 387d064974..5582fd2b75 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -48,11 +48,11 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
static void DumpMaps(i::PagedSpace* space) {
i::HeapObjectIterator it(space);
i::ReadOnlyRoots roots(space->heap());
- for (i::Object* o = it.Next(); o != nullptr; o = it.Next()) {
+ for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
if (!o->IsMap()) continue;
- i::Map* m = i::Map::cast(o);
+ i::Map m = i::Map::cast(o);
const char* n = nullptr;
- intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7FFFF;
+ intptr_t p = static_cast<intptr_t>(m.ptr()) & 0x7FFFF;
int t = m->instance_type();
READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
@@ -113,16 +113,16 @@ static int DumpHeapConstants(const char* argv0) {
if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
continue;
const char* sname = s->name();
- for (i::Object* o = it.Next(); o != nullptr; o = it.Next()) {
+ for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
// Skip maps in RO_SPACE since they will be reported elsewhere.
if (o->IsMap()) continue;
const char* n = nullptr;
i::RootIndex i = i::RootIndex::kFirstSmiRoot;
- intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7FFFF;
+ intptr_t p = o.ptr() & 0x7FFFF;
STRONG_READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
MUTABLE_ROOT_LIST(ROOT_LIST_CASE)
if (n == nullptr) continue;
- if (!i::Heap::RootIsImmortalImmovable(i)) continue;
+ if (!i::RootsTable::IsImmortalImmovable(i)) continue;
i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
}
}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.status b/deps/v8/test/mkgrokdump/mkgrokdump.status
index 8fd6a0417a..110cf6d15e 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.status
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.status
@@ -3,8 +3,10 @@
# found in the LICENSE file.
[
+
# Only test for default mode x64.
-['variant != default or arch != x64', {
+['variant != default or arch != x64 or lite_mode', {
'*': [SKIP],
-}], # variant != default or arch != x64
+}], # variant != default or arch != x64 or lite_mode
+
]
diff --git a/deps/v8/test/test262/BUILD.gn b/deps/v8/test/test262/BUILD.gn
index 334511168d..093e489df0 100644
--- a/deps/v8/test/test262/BUILD.gn
+++ b/deps/v8/test/test262/BUILD.gn
@@ -15,9 +15,10 @@ group("v8_test262") {
"detachArrayBuffer.js",
"harness/",
"harness-adapt.js",
+ "harness-adapt-donotevaluate.js",
"harness-agent.js",
"test262.status",
"testcfg.py",
"local-tests/",
]
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/test262/detachArrayBuffer.js b/deps/v8/test/test262/detachArrayBuffer.js
index 959773b5ca..d9ca415c2b 100644
--- a/deps/v8/test/test262/detachArrayBuffer.js
+++ b/deps/v8/test/test262/detachArrayBuffer.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
function $DETACHBUFFER(buffer) {
- %ArrayBufferNeuter(buffer);
+ %ArrayBufferDetach(buffer);
}
$262.detachArrayBuffer = $DETACHBUFFER;
diff --git a/deps/v8/test/test262/harness-adapt-donotevaluate.js b/deps/v8/test/test262/harness-adapt-donotevaluate.js
new file mode 100644
index 0000000000..d489c0008c
--- /dev/null
+++ b/deps/v8/test/test262/harness-adapt-donotevaluate.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// V8 has several long-standing bugs where "early errors", i.e. errors that are
+// supposed to be thrown at parse time, end up being thrown at runtime instead.
+// This file is used to implement the FAIL_PHASE_ONLY outcome as used in
+// test/test262/test262.status. Tests marked with this outcome are run in a
+// special mode that verifies that a) V8 throws an exception at all, and b) that
+// the exception has the correct type, but ignores the fact that they are thrown
+// after parsing is done. See crbug.com/v8/8467 for details.
+$DONOTEVALUATE = () => {};
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 4210263d40..d547824b12 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -142,216 +142,237 @@
'language/eval-code/direct/var-env-lower-lex-catch-non-strict': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
- 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-iter-thrw-close': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-iter-thrw-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close-null': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-thrw-close': [FAIL],
- 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-thrw-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close-null': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-iter-thrw-close': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-iter-thrw-close-err': [FAIL],
- 'language/expressions/assignment/dstr-array-rest-lref-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-iter-rtrn-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-iter-thrw-close': [FAIL],
- 'language/statements/for-of/dstr-array-elem-iter-thrw-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close-null': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-thrw-close': [FAIL],
- 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-thrw-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-rest-iter-rtrn-close': [FAIL],
- 'language/statements/for-of/dstr-array-rest-iter-rtrn-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-rest-iter-rtrn-close-null': [FAIL],
- 'language/statements/for-of/dstr-array-rest-iter-thrw-close': [FAIL],
- 'language/statements/for-of/dstr-array-rest-iter-thrw-close-err': [FAIL],
- 'language/statements/for-of/dstr-array-rest-lref-err': [FAIL],
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=896
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes': [FAIL],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/character-class-range-end': [FAIL],
- 'built-ins/RegExp/property-escapes/character-class-range-no-dash-end': [FAIL],
- 'built-ins/RegExp/property-escapes/character-class-range-no-dash-start': [FAIL],
- 'built-ins/RegExp/property-escapes/character-class-range-start': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-empty': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-empty-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-invalid': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-invalid-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-only': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-only-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-unclosed': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-unclosed-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-unopened': [FAIL],
- 'built-ins/RegExp/property-escapes/grammar-extension-unopened-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-01': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-01-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-02': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-02-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-03': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-03-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-04': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-04-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-05': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-05-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-06': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-06-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-07': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-07-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-08': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-08-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-09': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-09-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-10': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-10-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-11': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-11-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-12': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-12-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-13': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-13-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-14': [FAIL],
- 'built-ins/RegExp/property-escapes/loose-matching-14-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-binary-property': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-binary-property-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-and-value': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-and-value-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-existing-value': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-existing-value-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-General_Category-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-general-category': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-negated': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value': [FAIL],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value-negated': [FAIL],
- 'language/literals/regexp/early-err-pattern': [FAIL],
- 'language/literals/regexp/invalid-braced-quantifier-exact': [FAIL],
- 'language/literals/regexp/invalid-braced-quantifier-lower': [FAIL],
- 'language/literals/regexp/invalid-braced-quantifier-range': [FAIL],
- 'language/literals/regexp/invalid-optional-lookbehind': [FAIL],
- 'language/literals/regexp/invalid-optional-negative-lookbehind': [FAIL],
- 'language/literals/regexp/invalid-range-lookbehind': [FAIL],
- 'language/literals/regexp/invalid-range-negative-lookbehind': [FAIL],
- 'language/literals/regexp/u-dec-esc': [FAIL],
- 'language/literals/regexp/u-invalid-class-escape': [FAIL],
- 'language/literals/regexp/u-invalid-extended-pattern-char': [FAIL],
- 'language/literals/regexp/u-invalid-identity-escape': [FAIL],
- 'language/literals/regexp/u-invalid-legacy-octal-escape': [FAIL],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges': [FAIL],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-a': [FAIL],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-ab': [FAIL],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-b': [FAIL],
- 'language/literals/regexp/u-invalid-oob-decimal-escape': [FAIL],
- 'language/literals/regexp/u-invalid-optional-lookahead': [FAIL],
- 'language/literals/regexp/u-invalid-optional-lookbehind': [FAIL],
- 'language/literals/regexp/u-invalid-optional-negative-lookahead': [FAIL],
- 'language/literals/regexp/u-invalid-optional-negative-lookbehind': [FAIL],
- 'language/literals/regexp/u-invalid-range-lookahead': [FAIL],
- 'language/literals/regexp/u-invalid-range-lookbehind': [FAIL],
- 'language/literals/regexp/u-invalid-range-negative-lookahead': [FAIL],
- 'language/literals/regexp/u-invalid-range-negative-lookbehind': [FAIL],
- 'language/literals/regexp/u-unicode-esc-bounds': [FAIL],
- 'language/literals/regexp/u-unicode-esc-non-hex': [FAIL],
- 'language/literals/regexp/unicode-escape-nls-err': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=7828
- 'language/statements/try/early-catch-function': [FAIL],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/character-class-range-end': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/character-class-range-no-dash-end': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/character-class-range-no-dash-start': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/character-class-range-start': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-empty': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-empty-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-invalid': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-invalid-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-only': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-separator-only-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unclosed': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unclosed-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unopened': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/grammar-extension-unopened-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-01': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-01-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-02': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-02-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-03': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-03-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-04': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-04-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-05': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-05-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-06': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-06-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-07': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-07-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-08': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-08-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-09': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-09-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-10': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-10-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-11': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-11-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-12': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-12-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-13': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-13-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-14': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/loose-matching-14-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-binary-property': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-binary-property-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-and-value': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-and-value-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-existing-value': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-existing-value-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-General_Category-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/non-existent-property-value-general-category': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-negated': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value': [FAIL_PHASE_ONLY],
+ 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value-negated': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/early-err-pattern': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-braced-quantifier-exact': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-braced-quantifier-lower': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-braced-quantifier-range': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-optional-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-optional-negative-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-range-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/invalid-range-negative-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-2': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-2-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-3': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-3-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-4': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-4-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-5': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-dangling-groupname-without-group-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-2': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-2-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-empty-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-empty-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-identity-escape-in-capture-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-2': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-2-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-3': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-3-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-4': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-5': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-6': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-2-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-3-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier-4': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier-4-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-2': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-2-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-3': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-4': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-4-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-5': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-5-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-6': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-7': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-8': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-8-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-9-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-numeric-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-numeric-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-punctuator-starting-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-punctuator-starting-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-u-escape-in-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-u-escape-in-groupspecifier-2': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier-u': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-dec-esc': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-class-escape': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-extended-pattern-char': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-identity-escape': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-legacy-octal-escape': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-non-empty-class-ranges': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-a': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-ab': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-b': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-oob-decimal-escape': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-optional-lookahead': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-optional-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-optional-negative-lookahead': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-optional-negative-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-range-lookahead': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-range-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-range-negative-lookahead': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-invalid-range-negative-lookbehind': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-unicode-esc-bounds': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/u-unicode-esc-non-hex': [FAIL_PHASE_ONLY],
+ 'language/literals/regexp/unicode-escape-nls-err': [FAIL_PHASE_ONLY],
# https://bugs.chromium.org/p/v8/issues/detail?id=7829
'language/block-scope/syntax/redeclaration/function-declaration-attempt-to-redeclare-with-var-declaration-nested-in-function': [FAIL],
@@ -367,19 +388,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=5116
'built-ins/TypedArray/prototype/fill/fill-values-conversion-operations-consistent-nan': [PASS, FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5135
- 'annexB/language/eval-code/direct/func-block-decl-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-decl-a-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-decl-b-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-stmt-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-no-else-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-if-stmt-else-decl-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-switch-case-eval-func-block-scoping': [FAIL],
- 'annexB/language/eval-code/direct/func-switch-dflt-eval-func-block-scoping': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=5139
- 'annexB/built-ins/Date/prototype/setYear/year-number-relative': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4698
'language/expressions/call/tco-call-args': [SKIP],
'language/expressions/call/tco-cross-realm-class-construct': [SKIP],
@@ -418,10 +426,6 @@
'language/statements/try/tco-finally': [SKIP],
'language/statements/while/tco-body': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5064
- 'language/expressions/arrow-function/dflt-params-duplicates': [FAIL],
- 'language/expressions/async-arrow-function/dflt-params-duplicates': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5327
'built-ins/TypedArrayConstructors/internals/Set/key-is-minus-zero': [FAIL],
'built-ins/TypedArrayConstructors/internals/Set/BigInt/key-is-minus-zero': [FAIL],
@@ -430,27 +434,6 @@
'built-ins/TypedArrayConstructors/internals/Set/key-is-out-of-bounds': [FAIL],
'built-ins/TypedArrayConstructors/internals/Set/BigInt/key-is-out-of-bounds': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5329
- 'built-ins/RegExp/prototype/source/value-line-terminator': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=5112
- 'annexB/language/eval-code/direct/func-block-decl-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-decl-a-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-decl-b-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-else-stmt-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-if-decl-no-else-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-if-stmt-else-decl-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-switch-case-eval-func-no-skip-try': [FAIL],
- 'annexB/language/eval-code/direct/func-switch-dflt-eval-func-no-skip-try': [FAIL],
-
- # PreParser doesn't produce early errors
- # https://bugs.chromium.org/p/v8/issues/detail?id=2728
- 'language/expressions/async-arrow-function/early-errors-arrow-formals-body-duplicate': [FAIL],
- 'language/expressions/object/method-definition/generator-param-redecl-const': [FAIL],
- 'language/expressions/object/method-definition/generator-param-redecl-let': [FAIL],
- 'language/expressions/object/method-definition/name-param-redecl': [FAIL],
- 'language/statements/async-function/early-errors-declaration-formals-body-duplicate': [FAIL],
-
# SharedArrayBuffer tests that require flags
'built-ins/SharedArrayBuffer/*': ['--harmony-sharedarraybuffer'],
'built-ins/Atomics/*': ['--harmony-sharedarraybuffer'],
@@ -506,9 +489,6 @@
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7874
- 'built-ins/Reflect/ownKeys/return-on-corresponding-order-large-index': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6776
'built-ins/Proxy/ownKeys/return-duplicate-entries-throws': [FAIL],
'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
@@ -518,48 +498,41 @@
'annexB/language/statements/for-await-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
'annexB/language/statements/for-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7186
- 'language/statements/class/fields-indirect-eval-err-contains-arguments': [FAIL],
- 'language/expressions/class/fields-indirect-eval-err-contains-arguments': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7468
- 'language/statements/class/privatename-not-valid-earlyerr-script-8': [FAIL],
+ 'language/statements/class/elements/privatename-not-valid-earlyerr-script-8': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=5690
'language/expressions/call/eval-spread': [FAIL],
'language/expressions/call/eval-spread-empty-leading': [FAIL],
'language/expressions/call/eval-spread-empty-trailing': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8717
+ 'intl402/Segmenter/constructor/constructor/options-granularity-valid': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-lineBreakStyle-invalid': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-lineBreakStyle-valid': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-order': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-throwing-getters': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-toobject-prototype': [FAIL],
+ 'intl402/Segmenter/constructor/constructor/options-valid-combinations': [FAIL],
+ 'intl402/Segmenter/iterator/granularity': [FAIL],
+ 'intl402/Segmenter/prototype/resolvedOptions/order': [FAIL],
+ 'intl402/Segmenter/prototype/resolvedOptions/type-with-lbs': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line-following': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line-following-modes': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line-iterable': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line-next': [FAIL],
+ 'intl402/Segmenter/prototype/segment/segment-line-preceding': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=7472
'intl402/NumberFormat/currency-digits': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7474
- 'intl402/NumberFormat/prototype/format/format-fraction-digits': [FAIL],
- 'intl402/NumberFormat/prototype/format/format-significant-digits': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=7481
- 'intl402/NumberFormat/ignore-invalid-unicode-ext-values': [FAIL],
- 'intl402/DateTimeFormat/ignore-invalid-unicode-ext-values': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7482
'intl402/DateTimeFormat/prototype/resolvedOptions/resolved-locale-with-hc-unicode': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7483
'annexB/built-ins/Function/createdynfn-html-close-comment-params': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7669
- 'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=8051
- 'intl402/Collator/unicode-ext-seq-in-private-tag': [FAIL],
-
- # Tests assume that the sort order of "same elements" (comparator returns 0)
- # is deterministic.
- # https://crbug.com/v8/7808
- 'intl402/String/prototype/localeCompare/returns-same-results-as-Collator': [SKIP],
- 'intl402/Collator/prototype/compare/bound-to-collator-instance': [SKIP],
- 'intl402/Collator/ignore-invalid-unicode-ext-values': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=8260
'intl402/Locale/constructor-non-iana-canon': [FAIL],
@@ -572,17 +545,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=8246
'intl402/Locale/constructor-tag': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8244
- 'intl402/Locale/constructor-getter-order': [FAIL],
- 'intl402/Locale/constructor-locale-object': [FAIL],
- 'intl402/Locale/constructor-options-language-grandfathered': [FAIL],
- 'intl402/Locale/constructor-options-language-invalid': [FAIL],
- 'intl402/Locale/constructor-options-region-invalid': [FAIL],
- 'intl402/Locale/constructor-options-region-valid': [FAIL],
- 'intl402/Locale/constructor-options-script-invalid': [FAIL],
- 'intl402/Locale/constructor-options-script-valid': [FAIL],
- 'intl402/Locale/getters': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=8243
'intl402/Locale/extensions-private': [FAIL],
'intl402/Locale/getters-privateuse': [FAIL],
@@ -595,26 +557,86 @@
'intl402/Locale/getters-grandfathered': [FAIL],
'intl402/Locale/likely-subtags-grandfathered': [FAIL],
- # Wrong test see https://github.com/tc39/test262/pull/1835
- 'intl402/Locale/constructor-options-numeric-valid': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8613
+ 'intl402/RelativeTimeFormat/prototype/resolvedOptions/order': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=6705
'built-ins/Object/assign/strings-and-symbol-order': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7781
- 'built-ins/Date/parse/time-value-maximum-range': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7831
'language/statements/generators/generator-created-after-decl-inst': [FAIL],
'language/expressions/generators/generator-created-after-decl-inst': [FAIL],
'language/expressions/async-generator/generator-created-after-decl-inst': [FAIL],
'language/statements/async-generator/generator-created-after-decl-inst': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8099
- 'intl402/NumberFormat/prototype/format/format-negative-numbers': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=7871
- 'intl402/ListFormat/prototype/formatToParts/en-us-disjunction': [FAIL],
+ # await tests that require flags
+ 'language/expressions/await/async-generator-interleaved': ['--harmony-await-optimization'],
+ 'language/expressions/await/await-monkey-patched-promise': ['--harmony-await-optimization'],
+ 'language/expressions/await/for-await-of-interleaved': ['--harmony-await-optimization'],
+ 'language/expressions/await/async-await-interleaved': ['--harmony-await-optimization'],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8706
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-async-function': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-async-generator': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-class': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-const': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-function': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-generator': [FAIL],
+ 'language/block-scope/syntax/redeclaration/fn-scope-var-name-redeclaration-attempt-with-let' : [FAIL],
+
+ # https://github.com/tc39/test262/issues/2033
+ 'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall': [SKIP],
+ 'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall-1': [SKIP],
+ 'language/expressions/class/elements/private-derived-cls-direct-eval-err-contains-supercall-2': [SKIP],
+ 'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall': [SKIP],
+ 'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-1': [SKIP],
+ 'language/expressions/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-2': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall-1': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-direct-eval-err-contains-supercall-2': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-1': [SKIP],
+ 'language/statements/class/elements/private-derived-cls-indirect-eval-err-contains-supercall-2': [SKIP],
+
+ # https://github.com/tc39/test262/issues/2034
+ 'language/expressions/postfix-decrement/arguments': [SKIP],
+ 'language/expressions/postfix-decrement/arguments-nostrict': [SKIP],
+ 'language/expressions/postfix-decrement/eval': [SKIP],
+ 'language/expressions/postfix-decrement/eval-nostrict': [SKIP],
+ 'language/expressions/postfix-increment/arguments': [SKIP],
+ 'language/expressions/postfix-increment/arguments-nostrict': [SKIP],
+ 'language/expressions/postfix-increment/eval': [SKIP],
+ 'language/expressions/postfix-increment/eval-nostrict': [SKIP],
+ 'language/expressions/prefix-decrement/arguments': [SKIP],
+ 'language/expressions/prefix-decrement/arguments-nostrict': [SKIP],
+ 'language/expressions/prefix-decrement/eval': [SKIP],
+ 'language/expressions/prefix-decrement/eval-nostrict': [SKIP],
+ 'language/expressions/prefix-increment/arguments': [SKIP],
+ 'language/expressions/prefix-increment/arguments-nostrict': [SKIP],
+ 'language/expressions/prefix-increment/eval': [SKIP],
+ 'language/expressions/prefix-increment/eval-nostrict': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8707
+ 'language/line-terminators/invalid-string-ls': [SKIP],
+ 'language/line-terminators/invalid-string-ps': [SKIP],
+
+ # https://github.com/tc39/proposal-class-fields/issues/215
+ 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-bad-reference': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-this': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-bad-reference': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-this': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-bad-reference': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-call-expression-this': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-bad-reference': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/invalid-names/method-inner-member-expression-this': [FAIL],
+ 'language/expressions/function/early-errors/invalid-names-call-expression-bad-reference': [FAIL],
+ 'language/expressions/function/early-errors/invalid-names-call-expression-this': [FAIL],
+ 'language/expressions/function/early-errors/invalid-names-member-expression-bad-reference': [FAIL],
+ 'language/expressions/function/early-errors/invalid-names-member-expression-this': [FAIL],
+ 'language/statements/function/early-errors/invalid-names-call-expression-bad-reference': [FAIL],
+ 'language/statements/function/early-errors/invalid-names-call-expression-this': [FAIL],
+ 'language/statements/function/early-errors/invalid-names-member-expression-bad-reference': [FAIL],
+ 'language/statements/function/early-errors/invalid-names-member-expression-this': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -622,26 +644,15 @@
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
# Some of these are related to v8:4361 in being visible side effects from Intl.
- 'intl402/DateTimeFormat/prototype/resolvedOptions/hourCycle': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
'built-ins/Atomics/wait/cannot-suspend-throws': [SKIP],
'built-ins/Atomics/wait/undefined-index-defaults-to-zero': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6890#c12
- 'built-ins/RegExp/prototype/Symbol.matchAll/isregexp-called-once': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.matchAll/species-constructor': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.matchAll/species-regexp-get-global-throws': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.matchAll/species-regexp-get-unicode-throws': [FAIL],
- 'built-ins/String/prototype/matchAll/regexp-prototype-has-no-matchAll': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=8258
'intl402/Locale/constructor-options-language-valid-undefined': [FAIL],
- 'intl402/Locale/constructor-options-throwing-getters': [FAIL],
- 'intl402/Locale/constructor-tag-tostring': [FAIL],
'intl402/NumberFormat/prototype/format/format-fraction-digits-precision': [FAIL],
'intl402/NumberFormat/prototype/format/format-significant-digits-precision': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/value-tonumber': [FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -707,18 +718,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7187
'built-ins/Function/prototype/toString/line-terminator-normalisation-CR': [SKIP],
- 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall': [FAIL],
- 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall-1': [FAIL],
- 'language/expressions/class/fields-private-derived-cls-direct-eval-err-contains-supercall-2': [FAIL],
- 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall': [FAIL],
- 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-1': [FAIL],
- 'language/expressions/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-2': [FAIL],
- 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall': [FAIL],
- 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall-1': [FAIL],
- 'language/statements/class/fields-private-derived-cls-direct-eval-err-contains-supercall-2': [FAIL],
- 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall': [FAIL],
- 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-1': [FAIL],
- 'language/statements/class/fields-private-derived-cls-indirect-eval-err-contains-supercall-2': [FAIL],
############################ SLOW TESTS #############################
@@ -791,59 +790,541 @@
# BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
# asan's --omit-quit flag.
'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-cls-anon': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-cls-named': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-cls-name-meth': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-anon': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-named': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-cls-name-meth': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-fn-anon': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-fn-named': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-gen-anon': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-gen-named': [SKIP],
- 'language/module-code/dynamic-import/eval-export-dflt-expr-in': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-assignment-expression-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-arrow-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-await-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-return-await-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-async-function-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-block-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-do-while-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-else-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-function-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-braceless-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-if-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/nested-while-import-then-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/syntax-nested-block-labeled-returns-promise': [SKIP],
- 'language/module-code/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
- 'language/module-code/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update': [SKIP],
- 'language/module-code/dynamic-import/usage/top-level-import-then-returns-promise': [SKIP],
+ 'language/expressions/dynamic-import/always-create-new-promise': [SKIP],
+ 'language/expressions/dynamic-import/assign-expr-get-value-abrupt-throws': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/additive-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/array-literal': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/arrow-function': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/await-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/await-identifier': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/call-expr-arguments': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/call-expr-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/call-expr-identifier': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/cover-call-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/cover-parenthesized-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/identifier': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/import-meta': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/lhs-assign-operator-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/lhs-eq-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/lhs-eq-assign-expr-nostrict': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/logical-and-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/logical-or-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/member-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/new-target': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/object-literal': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/tagged-function-call': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/ternary': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/this': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/unary-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/yield-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/yield-expr': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/yield-identifier': [SKIP],
+ 'language/expressions/dynamic-import/assignment-expression/yield-star': [SKIP],
+ 'language/expressions/dynamic-import/await-import-evaluation': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-arrow-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-arrow-function-return-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-return-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-function-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-async-gen-return-await-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-block-labeled-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-do-while-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-else-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-function-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-if-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/nested-while-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-rqstd-abrupt-typeerror': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-rqstd-abrupt-urierror': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-eval-script-code-target': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-file-does-not-exist': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-instn-iee-err-ambiguous-import': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-instn-iee-err-circular': [SKIP],
+ 'language/expressions/dynamic-import/catch/top-level-import-catch-specifier-tostring-abrupt-rejects': [SKIP],
+ 'language/expressions/dynamic-import/custom-primitive': [SKIP],
+ 'language/expressions/dynamic-import/double-error-resolution': [SKIP],
+ 'language/expressions/dynamic-import/double-error-resolution-promise': [SKIP],
+ 'language/expressions/dynamic-import/escape-sequence-import': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-cls-anon': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-cls-named': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-cls-name-meth': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-anon': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-named': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-cls-name-meth': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-fn-anon': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-fn-named': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-gen-anon': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-gen-named': [SKIP],
+ 'language/expressions/dynamic-import/eval-export-dflt-expr-in': [SKIP],
+ 'language/expressions/dynamic-import/eval-rqstd-once': [SKIP],
+ 'language/expressions/dynamic-import/eval-self-once-module': [SKIP],
+ 'language/expressions/dynamic-import/eval-self-once-script': [SKIP],
+ 'language/expressions/dynamic-import/for-await-resolution-and-error-agen': [SKIP],
+ 'language/expressions/dynamic-import/for-await-resolution-and-error-agen-yield': [SKIP],
+ 'language/expressions/dynamic-import/for-await-resolution-and-error': [SKIP],
+ 'language/expressions/dynamic-import/imported-self-update': [SKIP],
+ 'language/expressions/dynamic-import/indirect-resolution': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-define-own-property': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-delete-exported-init-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-delete-exported-init-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-delete-non-exported-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-delete-non-exported-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-extensible': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-dflt-direct': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-dflt-indirect': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-nested-namespace-props-nrml': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-str-found-init': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-own-property-sym': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-str-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-sym-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-get-sym-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-has-property-str-found-init': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-has-property-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-has-property-sym-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-has-property-sym-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-no-iterator': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-own-property-keys-sort': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-prevent-extensions-object': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-prevent-extensions-reflect': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-prop-descs': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-prototype': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-prototype-of': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-prototype-of-null': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-same-values-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-same-values-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-set-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/await-ns-Symbol-toStringTag': [SKIP],
+ 'language/expressions/dynamic-import/namespace/default-property-not-set-own': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-define-own-property': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-exported-init-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-exported-init-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-non-exported-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-delete-non-exported-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-extensible': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-dflt-direct': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-dflt-indirect': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-nested-namespace-props-nrml': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-str-found-init': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-own-property-sym': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-str-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-sym-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-get-sym-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-str-found-init': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-str-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-sym-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-has-property-sym-not-found': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-no-iterator': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-own-property-keys-sort': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-prevent-extensions-object': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-prevent-extensions-reflect': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-prop-descs': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-prototype': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-prototype-of': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-prototype-of-null': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-same-values-no-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-same-values-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-set-strict': [SKIP],
+ 'language/expressions/dynamic-import/namespace/promise-then-ns-Symbol-toStringTag': [SKIP],
+ 'language/expressions/dynamic-import/returns-promise': [SKIP],
+ 'language/expressions/dynamic-import/reuse-namespace-object': [SKIP],
+ 'language/expressions/dynamic-import/reuse-namespace-object-from-import': [SKIP],
+ 'language/expressions/dynamic-import/reuse-namespace-object-from-script': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-10-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-11-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-12-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-13-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-14-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-15-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-16-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-17-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-1-update-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-2-update-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-3-update-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-4-update-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-5-lhs-equals-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-6-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-7-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-8-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-9-lhs-assignment-operator-assignment-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-arrow-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-await-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-arrow-function-return-await-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-await-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-function-return-await-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-async-gen-await-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-labeled-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-block-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-do-while-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-braceless-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-else-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-function-return-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-braceless-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-if-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-while-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-while-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-while-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-while-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-while-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-expression-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/nested-with-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/top-level-assignment-expr-not-optional': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/top-level-no-new-call-expression': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/top-level-no-rest-param': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/top-level-not-extensible-args': [SKIP],
+ 'language/expressions/dynamic-import/syntax/invalid/top-level-not-extensible-no-trailing-comma': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/callexpression-arguments': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/callexpression-templateliteral': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-assignment-expression-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-arrow-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-await-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-arrow-function-return-await-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-await-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-return-await-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-function-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-async-gen-await-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-labeled-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-block-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-do-while-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-do-while-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-do-while-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-braceless-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-else-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-return-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-return-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-return-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-function-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-braceless-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-if-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-while-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-while-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-while-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-expression-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/nested-with-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/new-covered-expression-is-valid': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/top-level-empty-str-is-valid-assign-expr': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/top-level-nested-imports': [SKIP],
+ 'language/expressions/dynamic-import/syntax/valid/top-level-script-code-valid': [SKIP],
+ 'language/expressions/dynamic-import/update-to-dynamic-import': [SKIP],
+ 'language/expressions/dynamic-import/usage-from-eval': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-assignment-expression-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-arrow-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-arrow-function-return-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-return-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-function-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-async-gen-return-await-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-block-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-do-while-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-else-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-function-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-braceless-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-if-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/nested-while-import-then-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/syntax-nested-block-labeled-specifier-tostring': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update-dflt': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-eval-gtbndng-indirect-update': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-eval-script-code-host-resolves-module-code': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-is-call-expression-square-brackets': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-returns-thenable': [SKIP],
+ 'language/expressions/dynamic-import/usage/top-level-import-then-specifier-tostring': [SKIP],
}], # asan == True
['asan == True or msan == True or tsan == True', {
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 105f6713f2..6674abbfce 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -44,6 +44,8 @@ from testrunner.outproc import test262
FEATURE_FLAGS = {
'class-fields-public': '--harmony-public-fields',
'class-static-fields-public': '--harmony-class-fields',
+ 'class-fields-private': '--harmony-private-fields',
+ 'class-static-fields-private': '--harmony-private-fields',
'Array.prototype.flat': '--harmony-array-flat',
'Array.prototype.flatMap': '--harmony-array-flat',
'String.prototype.matchAll': '--harmony-string-matchall',
@@ -56,14 +58,13 @@ FEATURE_FLAGS = {
'Symbol.prototype.description': '--harmony-symbol-description',
'globalThis': '--harmony-global',
'well-formed-json-stringify': '--harmony-json-stringify',
+ 'export-star-as-namespace-from-module': '--harmony-namespace-exports',
+ 'Object.fromEntries': '--harmony-object-from-entries',
}
-SKIPPED_FEATURES = set(['Object.fromEntries',
- 'export-star-as-namespace-from-module',
- 'class-fields-private',
- 'class-static-fields-private',
- 'class-methods-private',
- 'class-static-methods-private'])
+SKIPPED_FEATURES = set(['class-methods-private',
+ 'class-static-methods-private',
+ 'Intl.NumberFormat-unified'])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
@@ -86,15 +87,23 @@ class VariantsGenerator(testsuite.VariantsGenerator):
def gen(self, test):
flags_set = self._get_flags_set(test)
test_record = test.test_record
- for n, variant in enumerate(self._get_variants(test)):
- flags = flags_set[variant][0]
- if 'noStrict' in test_record:
- yield (variant, flags, str(n))
- elif 'onlyStrict' in test_record:
- yield (variant, flags + ['--use-strict'], 'strict-%d' % n)
- else:
- yield (variant, flags, str(n))
- yield (variant, flags + ['--use-strict'], 'strict-%d' % n)
+
+ # Add a reverse test ensuring that FAIL_PHASE_ONLY is only used for tests
+ # that actually fail to throw an exception at wrong phase.
+ phase_variants = ['']
+ if test.fail_phase_only:
+ phase_variants.append('-fail-phase-reverse')
+
+ for phase_var in phase_variants:
+ for n, variant in enumerate(self._get_variants(test)):
+ flags = flags_set[variant][0]
+ if 'noStrict' in test_record:
+ yield (variant, flags, str(n) + phase_var)
+ elif 'onlyStrict' in test_record:
+ yield (variant, flags + ['--use-strict'], 'strict-%d' % n + phase_var)
+ else:
+ yield (variant, flags, str(n))
+ yield (variant, flags + ['--use-strict'], 'strict-%d' % n + phase_var)
class TestSuite(testsuite.TestSuite):
@@ -168,11 +177,34 @@ class TestCase(testcase.D8TestCase):
.get('type', None)
)
+ # We disallow combining FAIL_PHASE_ONLY with any other fail outcome types.
+ # Outcome parsing logic in the base class converts all outcomes specified in
+ # the status file into either FAIL, CRASH or PASS, thus we do not need to
+ # handle FAIL_OK, FAIL_SLOPPY and various other outcomes.
+ if self.fail_phase_only:
+ assert (
+ statusfile.FAIL not in self.expected_outcomes and
+ statusfile.CRASH not in self.expected_outcomes), self.name
+
+ @property
+ def fail_phase_only(self):
+ # The FAIL_PHASE_ONLY is defined in tools/testrunner/local/statusfile.py and
+ # can be used in status files to mark tests that throw an exception at wrong
+ # phase, e.g. SyntaxError is thrown at execution phase instead of parsing
+ # phase. See https://crbug.com/v8/8467 for more details.
+ return statusfile.FAIL_PHASE_ONLY in self._statusfile_outcomes
+
+ @property
+ def _fail_phase_reverse(self):
+ return 'fail-phase-reverse' in self.procid
+
def _get_files_params(self):
return (
list(self.suite.harness) +
([os.path.join(self.suite.root, "harness-agent.js")]
if self.path.startswith('built-ins/Atomics') else []) +
+ ([os.path.join(self.suite.root, "harness-adapt-donotevaluate.js")]
+ if self.fail_phase_only and not self._fail_phase_reverse else []) +
self._get_includes() +
(["--module"] if "module" in self.test_record else []) +
[self._get_source_path()]
@@ -185,7 +217,8 @@ class TestCase(testcase.D8TestCase):
if "detachArrayBuffer.js" in self.test_record.get("includes", [])
else []) +
[flag for (feature, flag) in FEATURE_FLAGS.items()
- if feature in self.test_record.get("features", [])]
+ if feature in self.test_record.get("features", [])] +
+ ["--no-arguments"] # disable top-level arguments in d8
)
def _get_includes(self):
@@ -209,7 +242,12 @@ class TestCase(testcase.D8TestCase):
def output_proc(self):
if self._expected_exception is not None:
return test262.ExceptionOutProc(self.expected_outcomes,
- self._expected_exception)
+ self._expected_exception,
+ self._fail_phase_reverse)
+ else:
+ # We only support fail phase reverse on tests that expect an exception.
+ assert not self._fail_phase_reverse
+
if self.expected_outcomes == outproc.OUTCOMES_PASS:
return test262.PASS_NO_EXCEPTION
return test262.NoExceptionOutProc(self.expected_outcomes)
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index ee0cba9c5a..59b1c3895f 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module test {
+namespace test {
macro ElementsKindTestHelper1(kind: constexpr ElementsKind): bool {
if constexpr ((kind == UINT8_ELEMENTS) || (kind == UINT16_ELEMENTS)) {
return true;
@@ -174,7 +174,7 @@ module test {
return x + 2;
}
- macro TestFunctionPointers(context: Context): Boolean {
+ macro TestFunctionPointers(implicit context: Context)(): Boolean {
let fptr: builtin(Context, Smi) => Smi = TestHelperPlus1;
check(fptr(context, 42) == 43);
fptr = TestHelperPlus2;
@@ -182,7 +182,7 @@ module test {
return True;
}
- macro TestVariableRedeclaration(context: Context): Boolean {
+ macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
let var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
let var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
return True;
@@ -203,16 +203,16 @@ module test {
check(fptr2(c, Undefined) == Undefined);
}
- type SmiToSmi = builtin(Smi) => Smi;
- macro TestTypeAlias(x: SmiToSmi): Code {
+ type ObjectToObject = builtin(Context, Object) => Object;
+ macro TestTypeAlias(x: ObjectToObject): BuiltinPtr {
return x;
}
- macro TestUnsafeCast(c: Context, n: Number): Boolean {
+ macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
if (TaggedIsSmi(n)) {
let m: Smi = UnsafeCast<Smi>(n);
- check(TestHelperPlus1(c, m) == 11);
+ check(TestHelperPlus1(context, m) == 11);
return True;
}
return False;
@@ -223,7 +223,7 @@ module test {
check(Convert<intptr>(-0xffff) == -65535);
}
- macro TestLargeIntegerLiterals(c: Context) {
+ macro TestLargeIntegerLiterals(implicit c: Context)() {
let x: int32 = 0x40000000;
let y: int32 = 0x7fffffff;
}
@@ -278,11 +278,11 @@ module test {
return i.i;
}
- macro TestStruct2(): TestStructA {
+ macro TestStruct2(implicit context: Context)(): TestStructA {
return TestStructA{UnsafeCast<FixedArray>(kEmptyFixedArray), 27, 31};
}
- macro TestStruct3(): TestStructA {
+ macro TestStruct3(implicit context: Context)(): TestStructA {
let a: TestStructA =
TestStructA{UnsafeCast<FixedArray>(kEmptyFixedArray), 13, 5};
let b: TestStructA = a;
@@ -304,10 +304,21 @@ module test {
y: TestStructA;
}
- macro TestStruct4(): TestStructC {
+ macro TestStruct4(implicit context: Context)(): TestStructC {
return TestStructC{TestStruct2(), TestStruct2()};
}
+ macro TestStructInLabel(implicit context: Context)(): never
+ labels Foo(TestStructA) {
+ goto Foo(TestStruct2());
+ }
+ macro CallTestStructInLabel(implicit context: Context)() {
+ try {
+ TestStructInLabel() otherwise Foo;
+ }
+ label Foo(s: TestStructA) {}
+ }
+
// This macro tests different versions of the for-loop where some parts
// are (not) present.
macro TestForLoop() {
@@ -392,6 +403,16 @@ module test {
j++;
}
check(sum == 7);
+
+ j = 0;
+ try {
+ for (;;) {
+ if (++j == 10) goto Exit;
+ }
+ }
+ label Exit {
+ check(j == 10);
+ }
}
macro TestSubtyping(x: Smi) {
@@ -409,9 +430,11 @@ module test {
}
}
- macro TypeswitchExample(x: Number | FixedArray): int32 {
+ type NumberOrFixedArray = Number | FixedArray;
+ macro TypeswitchExample(implicit context: Context)(x: NumberOrFixedArray):
+ int32 {
let result: int32 = 0;
- typeswitch (IncrementIfSmi<(Number | FixedArray)>(x)) {
+ typeswitch (IncrementIfSmi(x)) {
case (x: FixedArray): {
result = result + 1;
}
@@ -422,7 +445,7 @@ module test {
result = result * 10;
- typeswitch (IncrementIfSmi<(Number | FixedArray)>(x)) {
+ typeswitch (IncrementIfSmi(x)) {
case (x: Smi): {
result = result + Convert<int32>(x);
}
@@ -437,13 +460,26 @@ module test {
return result;
}
- macro TestTypeswitch() {
+ macro TestTypeswitch(implicit context: Context)() {
check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
const a: FixedArray = AllocateZeroedFixedArray(3);
check(TypeswitchExample(a) == 13);
check(TypeswitchExample(FromConstexpr<Number>(0.5)) == 27);
}
+ macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
+ typeswitch (obj) {
+ case (o: Smi): {
+ }
+ case (o: JSTypedArray): {
+ }
+ case (o: JSReceiver): {
+ }
+ case (o: HeapObject): {
+ }
+ }
+ }
+
macro ExampleGenericOverload<A: type>(o: Object): A {
return o;
}
@@ -451,7 +487,7 @@ module test {
return o + 1;
}
- macro TestGenericOverload() {
+ macro TestGenericOverload(implicit context: Context)() {
const xSmi: Smi = 5;
const xObject: Object = xSmi;
check(ExampleGenericOverload<Smi>(xSmi) == 6);
@@ -588,4 +624,148 @@ module test {
assert(b == 5);
}
}
+
+ macro TestQualifiedAccess(implicit context: Context)() {
+ let s: Smi = 0;
+ check(!array::IsJSArray(s));
+ }
+
+ macro TestCatch1(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
+ ThrowTypeError(context, kInvalidArrayLength);
+ } catch (e) {
+ r = 1;
+ return r;
+ }
+ }
+
+ macro TestCatch2Wrapper(implicit context: Context)(): never {
+ ThrowTypeError(context, kInvalidArrayLength);
+ }
+
+ macro TestCatch2(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
+ TestCatch2Wrapper();
+ } catch (e) {
+ r = 2;
+ return r;
+ }
+ }
+
+ macro TestCatch3WrapperWithLabel(implicit context: Context)(): never
+ labels Abort {
+ ThrowTypeError(context, kInvalidArrayLength);
+ }
+
+ macro TestCatch3(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
+ TestCatch3WrapperWithLabel() otherwise Abort;
+ }
+ label Abort {
+ return -1;
+ }
+ catch (e) {
+ r = 2;
+ return r;
+ }
+ }
+
+ // This test doesn't actually test the functionality of iterators,
+ // it's only purpose is to make sure tha the CSA macros in the
+ // IteratorBuiltinsAssembler match the signatures provided in
+ // iterator.tq.
+ macro TestIterator(implicit context: Context)(o: Object, map: Map) {
+ try {
+ const t1: Object = iterator::GetIteratorMethod(o);
+ const t2: iterator::IteratorRecord = iterator::GetIterator(o);
+
+ const t3: Object = iterator::IteratorStep(t2) otherwise Fail;
+ const t4: Object = iterator::IteratorStep(t2, map) otherwise Fail;
+
+ const t5: Object = iterator::IteratorValue(t4);
+ const t6: Object = iterator::IteratorValue(t4, map);
+
+ const t7: JSArray = iterator::IterableToList(t1, t1);
+
+ iterator::IteratorCloseOnException(t2, t5);
+ }
+ label Fail {}
+ }
+
+ macro TestFrame1(implicit context: Context)() {
+ const f: Frame = LoadFramePointer();
+ const frameType: FrameType =
+ Cast<FrameType>(f.context_or_frame_type) otherwise unreachable;
+ assert(frameType == STUB_FRAME);
+ assert(f.caller == LoadParentFramePointer());
+ typeswitch (f) {
+ case (f: StandardFrame): {
+ unreachable;
+ }
+ case (f: ArgumentsAdaptorFrame): {
+ unreachable;
+ }
+ case (f: StubFrame): {
+ }
+ }
+ }
+
+ macro TestNew(implicit context: Context)() {
+ const f: JSArray = new JSArray{};
+ assert(f.IsEmpty());
+ f.length = 0;
+ }
+
+ struct TestInner {
+ SetX(newValue: int32) {
+ this.x = newValue;
+ }
+ GetX(): int32 {
+ return this.x;
+ }
+ x: int32;
+ y: int32;
+ }
+
+ struct TestOuter {
+ a: int32;
+ b: TestInner;
+ c: int32;
+ }
+
+ struct TestCustomStructConstructor {
+ constructor(x: int32, y: Smi) {
+ this.a = x;
+ this.c = x;
+ this.b = y;
+ this.d = y;
+ }
+ a: int32;
+ b: Smi;
+ c: int32;
+ d: Smi;
+ }
+
+ macro TestStructConstructor(implicit context: Context)() {
+ // Test default constructor
+ let a: TestOuter = TestOuter{5, TestInner{6, 7}, 8};
+ assert(a.a == 5);
+ assert(a.b.x == 6);
+ assert(a.b.y == 7);
+ assert(a.c == 8);
+ a.b.x = 1;
+ assert(a.b.x == 1);
+ a.b.SetX(2);
+ assert(a.b.x == 2);
+ assert(a.b.GetX() == 2);
+ // Test custom constructor
+ let w: TestCustomStructConstructor = TestCustomStructConstructor{1, 2};
+ assert(w.a == 1);
+ assert(w.b == 2);
+ assert(w.c == 1);
+ assert(w.d == 2);
+ }
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index f63e2af197..77d503c7d4 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -54,7 +54,7 @@ v8_source_set("unittests_sources") {
"api/v8-object-unittest.cc",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
- "asmjs/switch-logic-unittest.cc",
+ "background-compile-task-unittest.cc",
"base/address-region-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
@@ -83,10 +83,13 @@ v8_source_set("unittests_sources") {
"char-predicates-unittest.cc",
"code-stub-assembler-unittest.cc",
"code-stub-assembler-unittest.h",
- "compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
"compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc",
- "compiler-dispatcher/unoptimized-compile-job-unittest.cc",
+ "compiler/backend/instruction-selector-unittest.cc",
+ "compiler/backend/instruction-selector-unittest.h",
+ "compiler/backend/instruction-sequence-unittest.cc",
+ "compiler/backend/instruction-sequence-unittest.h",
+ "compiler/backend/instruction-unittest.cc",
"compiler/branch-elimination-unittest.cc",
"compiler/bytecode-analysis-unittest.cc",
"compiler/checkpoint-elimination-unittest.cc",
@@ -106,11 +109,6 @@ v8_source_set("unittests_sources") {
"compiler/graph-trimmer-unittest.cc",
"compiler/graph-unittest.cc",
"compiler/graph-unittest.h",
- "compiler/instruction-selector-unittest.cc",
- "compiler/instruction-selector-unittest.h",
- "compiler/instruction-sequence-unittest.cc",
- "compiler/instruction-sequence-unittest.h",
- "compiler/instruction-unittest.cc",
"compiler/int64-lowering-unittest.cc",
"compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
@@ -119,7 +117,6 @@ v8_source_set("unittests_sources") {
"compiler/js-operator-unittest.cc",
"compiler/js-typed-lowering-unittest.cc",
"compiler/linkage-tail-call-unittest.cc",
- "compiler/live-range-builder.h",
"compiler/load-elimination-unittest.cc",
"compiler/loop-peeling-unittest.cc",
"compiler/machine-operator-reducer-unittest.cc",
@@ -147,6 +144,7 @@ v8_source_set("unittests_sources") {
"compiler/typer-unittest.cc",
"compiler/value-numbering-reducer-unittest.cc",
"compiler/zone-stats-unittest.cc",
+ "conversions-unittest.cc",
"counters-unittest.cc",
"detachable-vector-unittest.cc",
"eh-frame-iterator-unittest.cc",
@@ -186,11 +184,12 @@ v8_source_set("unittests_sources") {
"libplatform/task-queue-unittest.cc",
"libplatform/worker-thread-unittest.cc",
"locked-queue-unittest.cc",
+ "microtask-queue-unittest.cc",
"object-unittest.cc",
- "objects/microtask-queue-unittest.cc",
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
"register-configuration-unittest.cc",
+ "regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
"source-position-table-unittest.cc",
"strings-storage-unittest.cc",
@@ -199,6 +198,7 @@ v8_source_set("unittests_sources") {
"test-utils.cc",
"test-utils.h",
"torque/earley-parser-unittest.cc",
+ "torque/torque-unittest.cc",
"unicode-unittest.cc",
"utils-unittest.cc",
"value-serializer-unittest.cc",
@@ -209,8 +209,8 @@ v8_source_set("unittests_sources") {
"wasm/loop-assignment-analysis-unittest.cc",
"wasm/module-decoder-unittest.cc",
"wasm/streaming-decoder-unittest.cc",
- "wasm/trap-handler-unittest.cc",
"wasm/wasm-code-manager-unittest.cc",
+ "wasm/wasm-compiler-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
@@ -258,6 +258,7 @@ v8_source_set("unittests_sources") {
sources += [
"assembler/turbo-assembler-x64-unittest.cc",
"compiler/x64/instruction-selector-x64-unittest.cc",
+ "wasm/trap-handler-x64-unittest.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
@@ -271,6 +272,14 @@ v8_source_set("unittests_sources") {
]
}
+ if (is_posix) {
+ sources += [ "wasm/trap-handler-posix-unittest.cc" ]
+ }
+
+ if (is_win) {
+ sources += [ "wasm/trap-handler-win-unittest.cc" ]
+ }
+
configs = [
"../..:external_config",
"../..:internal_config_base",
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 8ddf8a29c8..10fa7bba22 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -74,8 +74,7 @@ using IncumbentContextTest = TestWithIsolate;
// Check that Isolate::GetIncumbentContext() returns the correct one in basic
// scenarios.
-#if !defined(V8_USE_ADDRESS_SANITIZER)
-TEST_F(IncumbentContextTest, MAYBE_Basic) {
+TEST_F(IncumbentContextTest, Basic) {
auto Str = [&](const char* s) {
return String::NewFromUtf8(isolate(), s, NewStringType::kNormal)
.ToLocalChecked();
@@ -137,6 +136,5 @@ TEST_F(IncumbentContextTest, MAYBE_Basic) {
EXPECT_EQ(global_c, Run(context_a, "funcA()"));
}
}
-#endif // !defined(V8_USE_ADDRESS_SANITIZER)
} // namespace v8
diff --git a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index db5ed2ba52..afc1be0991 100644
--- a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -250,7 +250,7 @@ TEST_F(AsmTypeTest, IsExactly) {
for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
- EXPECT_EQ(ii == jj, test_types[ii]->IsExactly(test_types[jj]))
+ EXPECT_EQ(ii == jj, AsmType::IsExactly(test_types[ii], test_types[jj]))
<< test_types[ii]->Name()
<< ((ii == jj) ? " is not exactly " : " is exactly ")
<< test_types[jj]->Name();
diff --git a/deps/v8/test/unittests/asmjs/switch-logic-unittest.cc b/deps/v8/test/unittests/asmjs/switch-logic-unittest.cc
deleted file mode 100644
index cc3fbb05cc..0000000000
--- a/deps/v8/test/unittests/asmjs/switch-logic-unittest.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/asmjs/switch-logic.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-class SwitchLogicTest : public TestWithZone {};
-
-void CheckNodeValues(CaseNode* node, int begin, int end) {
- CHECK_EQ(node->begin, begin);
- CHECK_EQ(node->end, end);
-}
-
-TEST_F(SwitchLogicTest, Single_Table_Test) {
- ZoneVector<int> values(zone());
- values.push_back(14);
- values.push_back(12);
- values.push_back(15);
- values.push_back(19);
- values.push_back(18);
- values.push_back(16);
- CaseNode* root = OrderCases(&values, zone());
- CHECK_NULL(root->left);
- CHECK_NULL(root->right);
- CheckNodeValues(root, 12, 19);
-}
-
-TEST_F(SwitchLogicTest, Balanced_Tree_Test) {
- ZoneVector<int> values(zone());
- values.push_back(5);
- values.push_back(1);
- values.push_back(6);
- values.push_back(9);
- values.push_back(-4);
- CaseNode* root = OrderCases(&values, zone());
- CheckNodeValues(root, 5, 5);
- CheckNodeValues(root->left, -4, -4);
- CHECK_NULL(root->left->left);
- CheckNodeValues(root->left->right, 1, 1);
- CHECK_NULL(root->left->right->left);
- CHECK_NULL(root->left->right->right);
- CheckNodeValues(root->right, 6, 6);
- CHECK_NULL(root->right->left);
- CheckNodeValues(root->right->right, 9, 9);
- CHECK_NULL(root->right->right->left);
- CHECK_NULL(root->right->right->right);
-}
-
-TEST_F(SwitchLogicTest, Hybrid_Test) {
- ZoneVector<int> values(zone());
- values.push_back(1);
- values.push_back(2);
- values.push_back(3);
- values.push_back(4);
- values.push_back(7);
- values.push_back(10);
- values.push_back(11);
- values.push_back(12);
- values.push_back(13);
- values.push_back(16);
- CaseNode* root = OrderCases(&values, zone());
- CheckNodeValues(root, 7, 7);
- CheckNodeValues(root->left, 1, 4);
- CheckNodeValues(root->right, 10, 13);
- CheckNodeValues(root->right->right, 16, 16);
-}
-
-TEST_F(SwitchLogicTest, Single_Case) {
- ZoneVector<int> values(zone());
- values.push_back(3);
- CaseNode* root = OrderCases(&values, zone());
- CheckNodeValues(root, 3, 3);
- CHECK_NULL(root->left);
- CHECK_NULL(root->right);
-}
-
-TEST_F(SwitchLogicTest, Empty_Case) {
- ZoneVector<int> values(zone());
- CaseNode* root = OrderCases(&values, zone());
- CHECK_NULL(root);
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
index 056bd1c2c6..63c68ff48f 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
@@ -30,28 +30,26 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -62,9 +60,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
index e354fb91d9..57e82ecde3 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
@@ -30,28 +30,26 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -62,9 +60,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
index ba3634314f..3ef812e07a 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
@@ -17,27 +17,25 @@ namespace internal {
// V8 library, create a context, or use any V8 objects.
TEST(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -48,8 +46,8 @@ TEST(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
index abba0ff30b..6da112c5dd 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
@@ -21,28 +21,26 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter (in {a0}) is 17.
@@ -51,9 +49,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
index 8d8bc0756c..5b798b8e02 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
@@ -21,28 +21,26 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter (in {a0}) is 17.
@@ -51,9 +49,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
index dcc138fce1..24e2e71fd8 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
@@ -21,12 +21,9 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
- // Called from C
- __ function_descriptor();
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
@@ -34,20 +31,17 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
- // Called from C
- __ function_descriptor();
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
@@ -59,9 +53,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
index 7d45ec907f..f3f0a532d6 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
@@ -21,28 +21,26 @@ namespace internal {
class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST_F(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -53,9 +51,9 @@ TEST_F(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
+ buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
- auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
index 060060c762..8142cbc274 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
@@ -17,27 +17,25 @@ namespace internal {
// V8 library, create a context, or use any V8 objects.
TEST(TurboAssemblerTest, TestHardAbort) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
TEST(TurboAssemblerTest, TestCheck) {
- size_t allocated;
- byte* buffer = AllocateAssemblerBuffer(&allocated);
- TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
- static_cast<int>(allocated), CodeObjectRequired::kNo);
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -48,8 +46,8 @@ TEST(TurboAssemblerTest, TestCheck) {
CodeDesc desc;
tasm.GetCode(nullptr, &desc);
- MakeAssemblerBufferExecutable(buffer, allocated);
- auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer);
+ buffer->MakeExecutable();
+ auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/background-compile-task-unittest.cc
index e3d4ae078b..5bb6b68285 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/background-compile-task-unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
+// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,13 +10,13 @@
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-#include "src/compiler-dispatcher/unoptimized-compile-job.h"
+#include "src/compiler.h"
#include "src/flags.h"
#include "src/isolate-inl.h"
+#include "src/objects/smi.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/preparsed-scope-data.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data.h"
#include "src/v8.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
@@ -25,14 +25,12 @@
namespace v8 {
namespace internal {
-class UnoptimizedCompileJobTest : public TestWithNativeContext {
+class BackgroundCompileTaskTest : public TestWithNativeContext {
public:
- UnoptimizedCompileJobTest()
- : tracer_(isolate()), allocator_(isolate()->allocator()) {}
- ~UnoptimizedCompileJobTest() override = default;
+ BackgroundCompileTaskTest() : allocator_(isolate()->allocator()) {}
+ ~BackgroundCompileTaskTest() override = default;
AccountingAllocator* allocator() { return allocator_; }
- CompilerDispatcherTracer* tracer() { return &tracer_; }
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
@@ -47,7 +45,7 @@ class UnoptimizedCompileJobTest : public TestWithNativeContext {
save_flags_ = nullptr;
}
- UnoptimizedCompileJob* NewUnoptimizedCompileJob(
+ BackgroundCompileTask* NewBackgroundCompileTask(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
size_t stack_size = FLAG_stack_size) {
std::unique_ptr<ParseInfo> outer_parse_info =
@@ -66,82 +64,55 @@ class UnoptimizedCompileJobTest : public TestWithNativeContext {
outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
function_scope->set_start_position(shared->StartPosition());
function_scope->set_end_position(shared->EndPosition());
+ std::vector<void*> buffer;
+ ScopedPtrList<Statement> statements(&buffer);
const FunctionLiteral* function_literal =
ast_node_factory.NewFunctionLiteral(
- function_name, function_scope, nullptr, -1, -1, -1,
+ function_name, function_scope, statements, -1, -1, -1,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
shared->FunctionLiteralId(isolate), nullptr);
- return new UnoptimizedCompileJob(
- tracer(), allocator(), outer_parse_info.get(), function_name,
- function_literal,
+ return new BackgroundCompileTask(
+ allocator(), outer_parse_info.get(), function_name, function_literal,
isolate->counters()->worker_thread_runtime_call_stats(),
isolate->counters()->compile_function_on_background(), FLAG_stack_size);
}
private:
- CompilerDispatcherTracer tracer_;
AccountingAllocator* allocator_;
static SaveFlags* save_flags_;
- DISALLOW_COPY_AND_ASSIGN(UnoptimizedCompileJobTest);
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTaskTest);
};
-SaveFlags* UnoptimizedCompileJobTest::save_flags_ = nullptr;
+SaveFlags* BackgroundCompileTaskTest::save_flags_ = nullptr;
-#define ASSERT_JOB_STATUS(STATUS, JOB) ASSERT_EQ(STATUS, JOB->status())
-
-TEST_F(UnoptimizedCompileJobTest, Construct) {
- Handle<SharedFunctionInfo> shared =
- test::CreateSharedFunctionInfo(isolate(), nullptr);
- ASSERT_FALSE(shared->is_compiled());
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
-}
-
-TEST_F(UnoptimizedCompileJobTest, StateTransitions) {
+TEST_F(BackgroundCompileTaskTest, Construct) {
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(isolate(), nullptr);
ASSERT_FALSE(shared->is_compiled());
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
-
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
}
-TEST_F(UnoptimizedCompileJobTest, SyntaxError) {
+TEST_F(BackgroundCompileTaskTest, SyntaxError) {
test::ScriptResource* script = new test::ScriptResource("^^^", strlen("^^^"));
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(isolate(), script);
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
-
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
+ task->Run();
+ ASSERT_FALSE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(isolate()->has_pending_exception());
isolate()->clear_pending_exception();
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
+TEST_F(BackgroundCompileTaskTest, CompileAndRun) {
const char raw_script[] =
"function g() {\n"
" f = function(a) {\n"
@@ -156,21 +127,19 @@ TEST_F(UnoptimizedCompileJobTest, CompileAndRun) {
Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
ASSERT_FALSE(shared->is_compiled());
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
- job->Compile(false);
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ task->Run();
+ ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
- Smi* value = Smi::cast(*RunJS("f(100);"));
+ Smi value = Smi::cast(*RunJS("f(100);"));
ASSERT_TRUE(value == Smi::FromInt(160));
}
-TEST_F(UnoptimizedCompileJobTest, CompileFailure) {
+TEST_F(BackgroundCompileTaskTest, CompileFailure) {
std::string raw_script("() { var a = ");
for (int i = 0; i < 10000; i++) {
// TODO(leszeks): Figure out a more "unit-test-y" way of forcing an analysis
@@ -184,42 +153,35 @@ TEST_F(UnoptimizedCompileJobTest, CompileFailure) {
new test::ScriptResource(raw_script.c_str(), strlen(raw_script.c_str()));
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(isolate(), script);
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared, 100));
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared, 100));
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kReadyToFinalize, job);
-
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_TRUE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kFailed, job);
+ task->Run();
+ ASSERT_FALSE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(isolate()->has_pending_exception());
isolate()->clear_pending_exception();
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
class CompileTask : public Task {
public:
- CompileTask(UnoptimizedCompileJob* job, base::Semaphore* semaphore)
- : job_(job), semaphore_(semaphore) {}
+ CompileTask(BackgroundCompileTask* task, base::Semaphore* semaphore)
+ : task_(task), semaphore_(semaphore) {}
~CompileTask() override = default;
void Run() override {
- job_->Compile(true);
- ASSERT_FALSE(job_->IsFailed());
+ task_->Run();
semaphore_->Signal();
}
private:
- UnoptimizedCompileJob* job_;
+ BackgroundCompileTask* task_;
base::Semaphore* semaphore_;
DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
-TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
+TEST_F(BackgroundCompileTaskTest, CompileOnBackgroundThread) {
const char* raw_script =
"(a, b) {\n"
" var c = a + b;\n"
@@ -231,24 +193,20 @@ TEST_F(UnoptimizedCompileJobTest, CompileOnBackgroundThread) {
new test::ScriptResource(raw_script, strlen(raw_script));
Handle<SharedFunctionInfo> shared =
test::CreateSharedFunctionInfo(isolate(), script);
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
base::Semaphore semaphore(0);
- auto background_task = base::make_unique<CompileTask>(job.get(), &semaphore);
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+ auto background_task = base::make_unique<CompileTask>(task.get(), &semaphore);
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(background_task));
semaphore.Wait();
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
+ ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
+ ASSERT_TRUE(shared->is_compiled());
}
-TEST_F(UnoptimizedCompileJobTest, EagerInnerFunctions) {
+TEST_F(BackgroundCompileTaskTest, EagerInnerFunctions) {
const char raw_script[] =
"function g() {\n"
" f = function() {\n"
@@ -264,25 +222,20 @@ TEST_F(UnoptimizedCompileJobTest, EagerInnerFunctions) {
Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
ASSERT_FALSE(shared->is_compiled());
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
-
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
+
+ task->Run();
+ ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
Handle<JSFunction> e = RunJS<JSFunction>("f();");
ASSERT_TRUE(e->shared()->is_compiled());
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
+TEST_F(BackgroundCompileTaskTest, LazyInnerFunctions) {
const char raw_script[] =
"function g() {\n"
" f = function() {\n"
@@ -297,25 +250,18 @@ TEST_F(UnoptimizedCompileJobTest, LazyInnerFunctions) {
Handle<JSFunction> f = RunJS<JSFunction>(script);
Handle<SharedFunctionInfo> shared = handle(f->shared(), isolate());
ASSERT_FALSE(shared->is_compiled());
- std::unique_ptr<UnoptimizedCompileJob> job(
- NewUnoptimizedCompileJob(isolate(), shared));
-
- job->Compile(false);
- ASSERT_FALSE(job->IsFailed());
- job->FinalizeOnMainThread(isolate(), shared);
- ASSERT_FALSE(job->IsFailed());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kDone, job);
+ std::unique_ptr<BackgroundCompileTask> task(
+ NewBackgroundCompileTask(isolate(), shared));
+
+ task->Run();
+ ASSERT_TRUE(Compiler::FinalizeBackgroundCompileTask(
+ task.get(), shared, isolate(), Compiler::KEEP_EXCEPTION));
ASSERT_TRUE(shared->is_compiled());
Handle<JSFunction> e = RunJS<JSFunction>("f();");
ASSERT_FALSE(e->shared()->is_compiled());
-
- job->ResetOnMainThread(isolate());
- ASSERT_JOB_STATUS(CompilerDispatcherJob::Status::kInitial, job);
}
-#undef ASSERT_JOB_STATUS
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/base/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index 1d8e71c9aa..442257eff8 100644
--- a/deps/v8/test/unittests/base/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -12,11 +12,7 @@ namespace v8 {
namespace base {
namespace {
-enum TestFlag : base::AtomicWord {
- kA,
- kB,
- kC,
-};
+enum TestFlag : base::AtomicWord { kA, kB, kC };
} // namespace
@@ -26,15 +22,6 @@ TEST(AtomicValue, Initial) {
EXPECT_EQ(TestFlag::kA, a.Value());
}
-
-TEST(AtomicValue, TrySetValue) {
- AtomicValue<TestFlag> a(kA);
- EXPECT_FALSE(a.TrySetValue(kB, kC));
- EXPECT_TRUE(a.TrySetValue(kA, kC));
- EXPECT_EQ(TestFlag::kC, a.Value());
-}
-
-
TEST(AtomicValue, SetValue) {
AtomicValue<TestFlag> a(kB);
a.SetValue(kC);
@@ -48,9 +35,6 @@ TEST(AtomicValue, WithVoidStar) {
EXPECT_EQ(nullptr, a.Value());
a.SetValue(&a);
EXPECT_EQ(&a, a.Value());
- EXPECT_FALSE(a.TrySetValue(nullptr, &dummy));
- EXPECT_TRUE(a.TrySetValue(&a, &dummy));
- EXPECT_EQ(&dummy, a.Value());
}
TEST(AsAtomic8, CompareAndSwap_Sequential) {
diff --git a/deps/v8/test/unittests/base/ieee754-unittest.cc b/deps/v8/test/unittests/base/ieee754-unittest.cc
index 2110b63976..56f1812a9e 100644
--- a/deps/v8/test/unittests/base/ieee754-unittest.cc
+++ b/deps/v8/test/unittests/base/ieee754-unittest.cc
@@ -6,6 +6,7 @@
#include "src/base/ieee754.h"
#include "src/base/macros.h"
+#include "src/base/overflowing-math.h"
#include "testing/gmock-support.h"
#include "testing/gtest-support.h"
@@ -314,8 +315,8 @@ TEST(Ieee754, Sin) {
EXPECT_THAT(sin(-kInfinity), IsNaN());
// Tests for sin for |x| < pi/4
- EXPECT_EQ(-kInfinity, 1 / sin(-0.0));
- EXPECT_EQ(kInfinity, 1 / sin(0.0));
+ EXPECT_EQ(-kInfinity, Divide(1.0, sin(-0.0)));
+ EXPECT_EQ(kInfinity, Divide(1.0, sin(0.0)));
// sin(x) = x for x < 2^-27
EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
@@ -361,8 +362,8 @@ TEST(Ieee754, Tan) {
EXPECT_THAT(tan(-kInfinity), IsNaN());
// Tests for tan for |x| < pi/4
- EXPECT_EQ(kInfinity, 1 / tan(0.0));
- EXPECT_EQ(-kInfinity, 1 / tan(-0.0));
+ EXPECT_EQ(kInfinity, Divide(1.0, tan(0.0)));
+ EXPECT_EQ(-kInfinity, Divide(1.0, tan(-0.0)));
// tan(x) = x for |x| < 2^-28
EXPECT_EQ(2.3283064365386963e-10, tan(2.3283064365386963e-10));
EXPECT_EQ(-2.3283064365386963e-10, tan(-2.3283064365386963e-10));
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index d73845c373..b720331c9e 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -6,6 +6,7 @@
#include "src/base/logging.h"
#include "src/objects.h"
+#include "src/objects/smi.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -64,7 +65,7 @@ TEST(LoggingTest, CompareSignedMismatch) {
TEST(LoggingTest, CompareAgainstStaticConstPointer) {
// These used to produce link errors before http://crrev.com/2524093002.
- CHECK_FAIL(EQ, v8::internal::Smi::kZero, v8::internal::Smi::FromInt(17));
+ CHECK_FAIL(EQ, v8::internal::Smi::zero(), v8::internal::Smi::FromInt(17));
CHECK_SUCCEED(GT, 0, v8::internal::Smi::kMinValue);
}
diff --git a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
index b32863f4b2..6206569433 100644
--- a/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/condition-variable-unittest.cc
@@ -16,7 +16,7 @@ TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
Mutex mutex;
ConditionVariable cv;
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
cv.NotifyOne();
EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
@@ -37,7 +37,7 @@ class ThreadWithMutexAndConditionVariable final : public Thread {
finished_(false) {}
void Run() override {
- LockGuard<Mutex> lock_guard(&mutex_);
+ MutexGuard lock_guard(&mutex_);
running_ = true;
cv_.NotifyOne();
while (running_) {
@@ -61,7 +61,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
ThreadWithMutexAndConditionVariable threads[kThreadCount];
for (int n = 0; n < kThreadCount; ++n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ MutexGuard lock_guard(&threads[n].mutex_);
EXPECT_FALSE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
threads[n].Start();
@@ -72,13 +72,13 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
}
for (int n = kThreadCount - 1; n >= 0; --n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ MutexGuard lock_guard(&threads[n].mutex_);
EXPECT_TRUE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
}
for (int n = 0; n < kThreadCount; ++n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ MutexGuard lock_guard(&threads[n].mutex_);
EXPECT_TRUE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
// Tell the nth thread to quit.
@@ -88,7 +88,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
for (int n = kThreadCount - 1; n >= 0; --n) {
// Wait for nth thread to quit.
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ MutexGuard lock_guard(&threads[n].mutex_);
while (!threads[n].finished_) {
threads[n].cv_.Wait(&threads[n].mutex_);
}
@@ -98,7 +98,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
for (int n = 0; n < kThreadCount; ++n) {
threads[n].Join();
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ MutexGuard lock_guard(&threads[n].mutex_);
EXPECT_FALSE(threads[n].running_);
EXPECT_TRUE(threads[n].finished_);
}
@@ -117,7 +117,7 @@ class ThreadWithSharedMutexAndConditionVariable final : public Thread {
mutex_(nullptr) {}
void Run() override {
- LockGuard<Mutex> lock_guard(mutex_);
+ MutexGuard lock_guard(mutex_);
running_ = true;
cv_->NotifyAll();
while (running_) {
@@ -149,7 +149,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Start all threads.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
EXPECT_FALSE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
@@ -159,7 +159,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Wait for all threads to start.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
while (!threads[n].running_) {
cv.Wait(&mutex);
@@ -169,7 +169,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Make sure that all threads are running.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
EXPECT_TRUE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
@@ -178,7 +178,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Tell all threads to quit.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
EXPECT_TRUE(threads[n].running_);
EXPECT_FALSE(threads[n].finished_);
@@ -190,7 +190,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Wait for all threads to quit.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
while (!threads[n].finished_) {
cv.Wait(&mutex);
@@ -200,7 +200,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
// Make sure all threads are finished.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ MutexGuard lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
EXPECT_FALSE(threads[n].running_);
EXPECT_TRUE(threads[n].finished_);
@@ -234,7 +234,7 @@ class LoopIncrementThread final : public Thread {
void Run() override {
int last_count = -1;
while (true) {
- LockGuard<Mutex> lock_guard(mutex_);
+ MutexGuard lock_guard(mutex_);
int count = *counter_;
while (count % thread_count_ != rem_ && count < limit_) {
cv_->Wait(mutex_);
diff --git a/deps/v8/test/unittests/base/platform/mutex-unittest.cc b/deps/v8/test/unittests/base/platform/mutex-unittest.cc
index 5af5efb5a9..7eb3973e51 100644
--- a/deps/v8/test/unittests/base/platform/mutex-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/mutex-unittest.cc
@@ -11,8 +11,8 @@ namespace base {
TEST(Mutex, LockGuardMutex) {
Mutex mutex;
- { LockGuard<Mutex> lock_guard(&mutex); }
- { LockGuard<Mutex> lock_guard(&mutex); }
+ { MutexGuard lock_guard(&mutex); }
+ { MutexGuard lock_guard(&mutex); }
}
@@ -28,8 +28,8 @@ TEST(Mutex, LockGuardRecursiveMutex) {
TEST(Mutex, LockGuardLazyMutex) {
LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
- { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
- { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+ { MutexGuard lock_guard(lazy_mutex.Pointer()); }
+ { MutexGuard lock_guard(lazy_mutex.Pointer()); }
}
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
index 96a730370b..effe9b08f7 100644
--- a/deps/v8/test/unittests/base/threaded-list-unittest.cc
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -20,6 +20,10 @@ struct ThreadedListTestNode {
ThreadedListTestNode* next_;
struct OtherTraits {
+ static ThreadedListTestNode** start(ThreadedListTestNode** h) { return h; }
+ static ThreadedListTestNode* const* start(ThreadedListTestNode* const* h) {
+ return h;
+ }
static ThreadedListTestNode** next(ThreadedListTestNode* t) {
return t->other_next();
}
@@ -134,16 +138,6 @@ TEST_F(ThreadedListTest, AddFront) {
CHECK_EQ(list.first(), &new_node);
}
-TEST_F(ThreadedListTest, ReinitializeHead) {
- CHECK_EQ(list.LengthForTest(), 5);
- CHECK_NE(extra_test_list.first(), list.first());
- list.ReinitializeHead(&extra_test_node_0);
- list.Verify();
- CHECK_EQ(extra_test_list.first(), list.first());
- CHECK_EQ(extra_test_list.end(), list.end());
- CHECK_EQ(extra_test_list.LengthForTest(), 3);
-}
-
TEST_F(ThreadedListTest, DropHead) {
CHECK_EQ(extra_test_list.LengthForTest(), 3);
CHECK_EQ(extra_test_list.first(), &extra_test_node_0);
@@ -166,6 +160,23 @@ TEST_F(ThreadedListTest, Append) {
CHECK_EQ(list.end(), initial_extra_list_end);
}
+TEST_F(ThreadedListTest, AppendOutOfScope) {
+ ThreadedListTestNode local_extra_test_node_0;
+ CHECK_EQ(list.LengthForTest(), 5);
+ {
+ ThreadedList<ThreadedListTestNode, ThreadedListTestNode::OtherTraits>
+ scoped_extra_test_list;
+
+ list.Append(std::move(scoped_extra_test_list));
+ }
+ list.Add(&local_extra_test_node_0);
+
+ list.Verify();
+ CHECK_EQ(list.LengthForTest(), 6);
+ CHECK_EQ(list.AtForTest(4), &nodes[4]);
+ CHECK_EQ(list.AtForTest(5), &local_extra_test_node_0);
+}
+
TEST_F(ThreadedListTest, Prepend) {
CHECK_EQ(list.LengthForTest(), 5);
list.Prepend(std::move(extra_test_list));
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
index 97ac4d4b7d..05048136aa 100644
--- a/deps/v8/test/unittests/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
@@ -5,258 +5,262 @@
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
#include "src/cancelable-task.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-
namespace v8 {
namespace internal {
namespace {
+using ResultType = std::atomic<CancelableTaskManager::Id>;
+
+class CancelableTaskManagerTest;
+
class TestTask : public Task, public Cancelable {
public:
- enum Mode { kDoNothing, kWaitTillCanceledAgain, kCheckNotRun };
+ enum Mode { kDoNothing, kWaitTillCancelTriggered, kCheckNotRun };
- TestTask(CancelableTaskManager* parent, base::AtomicWord* result,
- Mode mode = kDoNothing)
- : Cancelable(parent), result_(result), mode_(mode) {}
+ TestTask(CancelableTaskManagerTest* test, ResultType* result, Mode mode);
- // Task overrides.
- void Run() final {
- if (TryRun()) {
- RunInternal();
- }
- }
+ // Task override.
+ void Run() final;
private:
- void RunInternal() {
- base::Release_Store(result_, id());
-
- switch (mode_) {
- case kWaitTillCanceledAgain:
- // Simple busy wait until the main thread tried to cancel.
- while (CancelAttempts() == 0) {
- }
- break;
- case kCheckNotRun:
- // Check that we never execute {RunInternal}.
- EXPECT_TRUE(false);
- break;
- default:
- break;
- }
- }
-
- base::AtomicWord* result_;
- Mode mode_;
+ ResultType* const result_;
+ const Mode mode_;
+ CancelableTaskManagerTest* const test_;
};
-
class SequentialRunner {
public:
- explicit SequentialRunner(TestTask* task) : task_(task) {}
+ explicit SequentialRunner(std::unique_ptr<TestTask> task)
+ : task_(std::move(task)), task_id_(task_->id()) {}
void Run() {
task_->Run();
- delete task_;
+ task_.reset();
}
+ CancelableTaskManager::Id task_id() const { return task_id_; }
+
private:
- TestTask* task_;
+ std::unique_ptr<TestTask> task_;
+ const CancelableTaskManager::Id task_id_;
};
-
class ThreadedRunner final : public base::Thread {
public:
- explicit ThreadedRunner(TestTask* task)
- : Thread(Options("runner thread")), task_(task) {}
+ explicit ThreadedRunner(std::unique_ptr<TestTask> task)
+ : Thread(Options("runner thread")),
+ task_(std::move(task)),
+ task_id_(task_->id()) {}
void Run() override {
task_->Run();
- delete task_;
+ task_.reset();
}
+ CancelableTaskManager::Id task_id() const { return task_id_; }
+
private:
- TestTask* task_;
+ std::unique_ptr<TestTask> task_;
+ const CancelableTaskManager::Id task_id_;
};
+class CancelableTaskManagerTest : public ::testing::Test {
+ public:
+ CancelableTaskManager* manager() { return &manager_; }
-typedef base::AtomicWord ResultType;
+ std::unique_ptr<TestTask> NewTask(
+ ResultType* result, TestTask::Mode mode = TestTask::kDoNothing) {
+ return base::make_unique<TestTask>(this, result, mode);
+ }
+ void CancelAndWait() {
+ cancel_triggered_.store(true);
+ manager_.CancelAndWait();
+ }
-intptr_t GetValue(ResultType* result) { return base::Acquire_Load(result); }
+ TryAbortResult TryAbortAll() {
+ cancel_triggered_.store(true);
+ return manager_.TryAbortAll();
+ }
-} // namespace
+ bool cancel_triggered() const { return cancel_triggered_.load(); }
+ private:
+ CancelableTaskManager manager_;
+ std::atomic<bool> cancel_triggered_{false};
+};
-TEST(CancelableTask, EmptyCancelableTaskManager) {
- CancelableTaskManager manager;
- manager.CancelAndWait();
+TestTask::TestTask(CancelableTaskManagerTest* test, ResultType* result,
+ Mode mode)
+ : Cancelable(test->manager()), result_(result), mode_(mode), test_(test) {}
+
+void TestTask::Run() {
+ if (!TryRun()) return;
+
+ result_->store(id());
+
+ switch (mode_) {
+ case kWaitTillCancelTriggered:
+ // Simple busy wait until the main thread tried to cancel.
+ while (!test_->cancel_triggered()) {
+ }
+ break;
+ case kCheckNotRun:
+ // Check that we never execute {RunInternal}.
+ EXPECT_TRUE(false);
+ break;
+ default:
+ break;
+ }
}
+} // namespace
-TEST(CancelableTask, SequentialCancelAndWait) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- SequentialRunner runner1(
- new TestTask(&manager, &result1, TestTask::kCheckNotRun));
- EXPECT_EQ(GetValue(&result1), 0);
- manager.CancelAndWait();
- EXPECT_EQ(GetValue(&result1), 0);
- runner1.Run(); // Run to avoid leaking the Task.
- EXPECT_EQ(GetValue(&result1), 0);
+TEST_F(CancelableTaskManagerTest, EmptyCancelableTaskManager) {
+ CancelAndWait();
}
-
-TEST(CancelableTask, SequentialMultipleTasks) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- ResultType result2 = 0;
- TestTask* task1 = new TestTask(&manager, &result1);
- TestTask* task2 = new TestTask(&manager, &result2);
- SequentialRunner runner1(task1);
- SequentialRunner runner2(task2);
- EXPECT_EQ(task1->id(), 1u);
- EXPECT_EQ(task2->id(), 2u);
-
- EXPECT_EQ(GetValue(&result1), 0);
- runner1.Run(); // Don't touch task1 after running it.
- EXPECT_EQ(GetValue(&result1), 1);
-
- EXPECT_EQ(GetValue(&result2), 0);
- runner2.Run(); // Don't touch task2 after running it.
- EXPECT_EQ(GetValue(&result2), 2);
-
- manager.CancelAndWait();
- EXPECT_FALSE(manager.TryAbort(1));
- EXPECT_FALSE(manager.TryAbort(2));
+TEST_F(CancelableTaskManagerTest, SequentialCancelAndWait) {
+ ResultType result1{0};
+ SequentialRunner runner1(NewTask(&result1, TestTask::kCheckNotRun));
+ EXPECT_EQ(0u, result1);
+ CancelAndWait();
+ EXPECT_EQ(0u, result1);
+ runner1.Run();
+ EXPECT_EQ(0u, result1);
}
+TEST_F(CancelableTaskManagerTest, SequentialMultipleTasks) {
+ ResultType result1{0};
+ ResultType result2{0};
+ SequentialRunner runner1(NewTask(&result1));
+ SequentialRunner runner2(NewTask(&result2));
+ EXPECT_EQ(1u, runner1.task_id());
+ EXPECT_EQ(2u, runner2.task_id());
+
+ EXPECT_EQ(0u, result1);
+ runner1.Run();
+ EXPECT_EQ(1u, result1);
+
+ EXPECT_EQ(0u, result2);
+ runner2.Run();
+ EXPECT_EQ(2u, result2);
+
+ CancelAndWait();
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(1));
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(2));
+}
-TEST(CancelableTask, ThreadedMultipleTasksStarted) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- ResultType result2 = 0;
- TestTask* task1 =
- new TestTask(&manager, &result1, TestTask::kWaitTillCanceledAgain);
- TestTask* task2 =
- new TestTask(&manager, &result2, TestTask::kWaitTillCanceledAgain);
- ThreadedRunner runner1(task1);
- ThreadedRunner runner2(task2);
+TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStarted) {
+ ResultType result1{0};
+ ResultType result2{0};
+ ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
+ ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
runner1.Start();
runner2.Start();
// Busy wait on result to make sure both tasks are done.
- while ((GetValue(&result1) == 0) || (GetValue(&result2) == 0)) {
+ while (result1.load() == 0 || result2.load() == 0) {
}
- manager.CancelAndWait();
+ CancelAndWait();
runner1.Join();
runner2.Join();
- EXPECT_EQ(GetValue(&result1), 1);
- EXPECT_EQ(GetValue(&result2), 2);
+ EXPECT_EQ(1u, result1);
+ EXPECT_EQ(2u, result2);
}
-
-TEST(CancelableTask, ThreadedMultipleTasksNotRun) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- ResultType result2 = 0;
- TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
- TestTask* task2 = new TestTask(&manager, &result2, TestTask::kCheckNotRun);
- ThreadedRunner runner1(task1);
- ThreadedRunner runner2(task2);
- manager.CancelAndWait();
+TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRun) {
+ ResultType result1{0};
+ ResultType result2{0};
+ ThreadedRunner runner1(NewTask(&result1, TestTask::kCheckNotRun));
+ ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
+ CancelAndWait();
// Tasks are canceled, hence the runner will bail out and not update result.
runner1.Start();
runner2.Start();
runner1.Join();
runner2.Join();
- EXPECT_EQ(GetValue(&result1), 0);
- EXPECT_EQ(GetValue(&result2), 0);
+ EXPECT_EQ(0u, result1);
+ EXPECT_EQ(0u, result2);
}
-
-TEST(CancelableTask, RemoveBeforeCancelAndWait) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
- ThreadedRunner runner1(task1);
- CancelableTaskManager::Id id = task1->id();
- EXPECT_EQ(id, 1u);
- EXPECT_TRUE(manager.TryAbort(id));
+TEST_F(CancelableTaskManagerTest, RemoveBeforeCancelAndWait) {
+ ResultType result1{0};
+ ThreadedRunner runner1(NewTask(&result1, TestTask::kCheckNotRun));
+ CancelableTaskManager::Id id = runner1.task_id();
+ EXPECT_EQ(1u, id);
+ EXPECT_EQ(TryAbortResult::kTaskAborted, manager()->TryAbort(id));
runner1.Start();
runner1.Join();
- manager.CancelAndWait();
- EXPECT_EQ(GetValue(&result1), 0);
+ CancelAndWait();
+ EXPECT_EQ(0u, result1);
}
-
-TEST(CancelableTask, RemoveAfterCancelAndWait) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- TestTask* task1 = new TestTask(&manager, &result1);
- ThreadedRunner runner1(task1);
- CancelableTaskManager::Id id = task1->id();
- EXPECT_EQ(id, 1u);
+TEST_F(CancelableTaskManagerTest, RemoveAfterCancelAndWait) {
+ ResultType result1{0};
+ ThreadedRunner runner1(NewTask(&result1));
+ CancelableTaskManager::Id id = runner1.task_id();
+ EXPECT_EQ(1u, id);
runner1.Start();
runner1.Join();
- manager.CancelAndWait();
- EXPECT_FALSE(manager.TryAbort(id));
- EXPECT_EQ(GetValue(&result1), 1);
+ CancelAndWait();
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(id));
+ EXPECT_EQ(1u, result1);
}
-
-TEST(CancelableTask, RemoveUnmanagedId) {
- CancelableTaskManager manager;
- EXPECT_FALSE(manager.TryAbort(1));
- EXPECT_FALSE(manager.TryAbort(2));
- manager.CancelAndWait();
- EXPECT_FALSE(manager.TryAbort(1));
- EXPECT_FALSE(manager.TryAbort(3));
+TEST_F(CancelableTaskManagerTest, RemoveUnmanagedId) {
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(1));
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(2));
+ CancelAndWait();
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(1));
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, manager()->TryAbort(3));
}
-TEST(CancelableTask, EmptyTryAbortAll) {
- CancelableTaskManager manager;
- EXPECT_EQ(manager.TryAbortAll(), CancelableTaskManager::kTaskRemoved);
+TEST_F(CancelableTaskManagerTest, EmptyTryAbortAll) {
+ EXPECT_EQ(TryAbortResult::kTaskRemoved, TryAbortAll());
+ CancelAndWait();
}
-TEST(CancelableTask, ThreadedMultipleTasksNotRunTryAbortAll) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- ResultType result2 = 0;
- TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
- TestTask* task2 = new TestTask(&manager, &result2, TestTask::kCheckNotRun);
- ThreadedRunner runner1(task1);
- ThreadedRunner runner2(task2);
- EXPECT_EQ(manager.TryAbortAll(), CancelableTaskManager::kTaskAborted);
+TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksNotRunTryAbortAll) {
+ ResultType result1{0};
+ ResultType result2{0};
+ ThreadedRunner runner1(NewTask(&result1, TestTask::kCheckNotRun));
+ ThreadedRunner runner2(NewTask(&result2, TestTask::kCheckNotRun));
+ EXPECT_EQ(TryAbortResult::kTaskAborted, TryAbortAll());
// Tasks are canceled, hence the runner will bail out and not update result.
runner1.Start();
runner2.Start();
runner1.Join();
runner2.Join();
- EXPECT_EQ(GetValue(&result1), 0);
- EXPECT_EQ(GetValue(&result2), 0);
+ EXPECT_EQ(0u, result1);
+ EXPECT_EQ(0u, result2);
+ CancelAndWait();
}
-TEST(CancelableTask, ThreadedMultipleTasksStartedTryAbortAll) {
- CancelableTaskManager manager;
- ResultType result1 = 0;
- ResultType result2 = 0;
- TestTask* task1 =
- new TestTask(&manager, &result1, TestTask::kWaitTillCanceledAgain);
- TestTask* task2 =
- new TestTask(&manager, &result2, TestTask::kWaitTillCanceledAgain);
- ThreadedRunner runner1(task1);
- ThreadedRunner runner2(task2);
+TEST_F(CancelableTaskManagerTest, ThreadedMultipleTasksStartedTryAbortAll) {
+ ResultType result1{0};
+ ResultType result2{0};
+ ThreadedRunner runner1(NewTask(&result1, TestTask::kWaitTillCancelTriggered));
+ ThreadedRunner runner2(NewTask(&result2, TestTask::kWaitTillCancelTriggered));
runner1.Start();
// Busy wait on result to make sure task1 is done.
- while (GetValue(&result1) == 0) {
+ while (result1.load() == 0) {
}
- EXPECT_EQ(manager.TryAbortAll(), CancelableTaskManager::kTaskRunning);
+ // If the task saw that we triggered the cancel and finished *before* the
+ // actual cancel happened, we get {kTaskAborted}. Otherwise, we get
+ // {kTaskRunning}.
+ EXPECT_THAT(TryAbortAll(),
+ testing::AnyOf(testing::Eq(TryAbortResult::kTaskAborted),
+ testing::Eq(TryAbortResult::kTaskRunning)));
runner2.Start();
runner1.Join();
runner2.Join();
- EXPECT_EQ(GetValue(&result1), 1);
- EXPECT_EQ(GetValue(&result2), 0);
+ EXPECT_EQ(1u, result1);
+ EXPECT_EQ(0u, result2);
+ CancelAndWait();
}
} // namespace internal
diff --git a/deps/v8/test/unittests/char-predicates-unittest.cc b/deps/v8/test/unittests/char-predicates-unittest.cc
index 3c6cf5d6a6..85c550a7e2 100644
--- a/deps/v8/test/unittests/char-predicates-unittest.cc
+++ b/deps/v8/test/unittests/char-predicates-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/char-predicates.h"
+#include "src/char-predicates-inl.h"
#include "src/unicode.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -10,122 +11,122 @@ namespace v8 {
namespace internal {
TEST(CharPredicatesTest, WhiteSpace) {
- EXPECT_TRUE(WhiteSpace::Is(0x0009));
- EXPECT_TRUE(WhiteSpace::Is(0x000B));
- EXPECT_TRUE(WhiteSpace::Is(0x000C));
- EXPECT_TRUE(WhiteSpace::Is(' '));
- EXPECT_TRUE(WhiteSpace::Is(0x00A0));
- EXPECT_TRUE(WhiteSpace::Is(0x1680));
- EXPECT_TRUE(WhiteSpace::Is(0x2000));
- EXPECT_TRUE(WhiteSpace::Is(0x2007));
- EXPECT_TRUE(WhiteSpace::Is(0x202F));
- EXPECT_TRUE(WhiteSpace::Is(0x205F));
- EXPECT_TRUE(WhiteSpace::Is(0x3000));
- EXPECT_TRUE(WhiteSpace::Is(0xFEFF));
- EXPECT_FALSE(WhiteSpace::Is(0x180E));
+ EXPECT_TRUE(IsWhiteSpace(0x0009));
+ EXPECT_TRUE(IsWhiteSpace(0x000B));
+ EXPECT_TRUE(IsWhiteSpace(0x000C));
+ EXPECT_TRUE(IsWhiteSpace(' '));
+ EXPECT_TRUE(IsWhiteSpace(0x00A0));
+ EXPECT_TRUE(IsWhiteSpace(0x1680));
+ EXPECT_TRUE(IsWhiteSpace(0x2000));
+ EXPECT_TRUE(IsWhiteSpace(0x2007));
+ EXPECT_TRUE(IsWhiteSpace(0x202F));
+ EXPECT_TRUE(IsWhiteSpace(0x205F));
+ EXPECT_TRUE(IsWhiteSpace(0x3000));
+ EXPECT_TRUE(IsWhiteSpace(0xFEFF));
+ EXPECT_FALSE(IsWhiteSpace(0x180E));
}
TEST(CharPredicatesTest, WhiteSpaceOrLineTerminator) {
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x0009));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000B));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000C));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(' '));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x00A0));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x1680));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2000));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2007));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x202F));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x205F));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0xFEFF));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x0009));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x000B));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x000C));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(' '));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x00A0));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x1680));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x2000));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x2007));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x202F));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x205F));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0xFEFF));
// Line terminators
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000A));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x000D));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2028));
- EXPECT_TRUE(WhiteSpaceOrLineTerminator::Is(0x2029));
- EXPECT_FALSE(WhiteSpaceOrLineTerminator::Is(0x180E));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x000A));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x000D));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x2028));
+ EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x2029));
+ EXPECT_FALSE(IsWhiteSpaceOrLineTerminator(0x180E));
}
TEST(CharPredicatesTest, IdentifierStart) {
- EXPECT_TRUE(IdentifierStart::Is('$'));
- EXPECT_TRUE(IdentifierStart::Is('_'));
- EXPECT_TRUE(IdentifierStart::Is('\\'));
+ EXPECT_TRUE(IsIdentifierStart('$'));
+ EXPECT_TRUE(IsIdentifierStart('_'));
+ EXPECT_TRUE(IsIdentifierStart('\\'));
// http://www.unicode.org/reports/tr31/
// curl http://www.unicode.org/Public/UCD/latest/ucd/PropList.txt |
// grep 'Other_ID_Start'
// Other_ID_Start
- EXPECT_TRUE(IdentifierStart::Is(0x1885));
- EXPECT_TRUE(IdentifierStart::Is(0x1886));
- EXPECT_TRUE(IdentifierStart::Is(0x2118));
- EXPECT_TRUE(IdentifierStart::Is(0x212E));
- EXPECT_TRUE(IdentifierStart::Is(0x309B));
- EXPECT_TRUE(IdentifierStart::Is(0x309C));
+ EXPECT_TRUE(IsIdentifierStart(0x1885));
+ EXPECT_TRUE(IsIdentifierStart(0x1886));
+ EXPECT_TRUE(IsIdentifierStart(0x2118));
+ EXPECT_TRUE(IsIdentifierStart(0x212E));
+ EXPECT_TRUE(IsIdentifierStart(0x309B));
+ EXPECT_TRUE(IsIdentifierStart(0x309C));
// Issue 2892:
// \u2E2F has the Pattern_Syntax property, excluding it from ID_Start.
- EXPECT_FALSE(IdentifierStart::Is(0x2E2F));
+ EXPECT_FALSE(IsIdentifierStart(0x2E2F));
#ifdef V8_INTL_SUPPORT
// New in Unicode 8.0 (6,847 code points)
// [:ID_Start:] & [[:Age=8.0:] - [:Age=7.0:]]
- EXPECT_TRUE(IdentifierStart::Is(0x08B3));
- EXPECT_TRUE(IdentifierStart::Is(0x0AF9));
- EXPECT_TRUE(IdentifierStart::Is(0x13F8));
- EXPECT_TRUE(IdentifierStart::Is(0x9FCD));
- EXPECT_TRUE(IdentifierStart::Is(0xAB60));
- EXPECT_TRUE(IdentifierStart::Is(0x10CC0));
- EXPECT_TRUE(IdentifierStart::Is(0x108E0));
- EXPECT_TRUE(IdentifierStart::Is(0x2B820));
+ EXPECT_TRUE(IsIdentifierStart(0x08B3));
+ EXPECT_TRUE(IsIdentifierStart(0x0AF9));
+ EXPECT_TRUE(IsIdentifierStart(0x13F8));
+ EXPECT_TRUE(IsIdentifierStart(0x9FCD));
+ EXPECT_TRUE(IsIdentifierStart(0xAB60));
+ EXPECT_TRUE(IsIdentifierStart(0x10CC0));
+ EXPECT_TRUE(IsIdentifierStart(0x108E0));
+ EXPECT_TRUE(IsIdentifierStart(0x2B820));
// New in Unicode 9.0 (7,177 code points)
// [:ID_Start:] & [[:Age=9.0:] - [:Age=8.0:]]
- EXPECT_TRUE(IdentifierStart::Is(0x1C80));
- EXPECT_TRUE(IdentifierStart::Is(0x104DB));
- EXPECT_TRUE(IdentifierStart::Is(0x1E922));
+ EXPECT_TRUE(IsIdentifierStart(0x1C80));
+ EXPECT_TRUE(IsIdentifierStart(0x104DB));
+ EXPECT_TRUE(IsIdentifierStart(0x1E922));
#endif
}
TEST(CharPredicatesTest, IdentifierPart) {
- EXPECT_TRUE(IdentifierPart::Is('$'));
- EXPECT_TRUE(IdentifierPart::Is('_'));
- EXPECT_TRUE(IdentifierPart::Is('\\'));
- EXPECT_TRUE(IdentifierPart::Is(0x200C));
- EXPECT_TRUE(IdentifierPart::Is(0x200D));
+ EXPECT_TRUE(IsIdentifierPart('$'));
+ EXPECT_TRUE(IsIdentifierPart('_'));
+ EXPECT_TRUE(IsIdentifierPart('\\'));
+ EXPECT_TRUE(IsIdentifierPart(0x200C));
+ EXPECT_TRUE(IsIdentifierPart(0x200D));
#ifdef V8_INTL_SUPPORT
// New in Unicode 8.0 (6,847 code points)
// [:ID_Start:] & [[:Age=8.0:] - [:Age=7.0:]]
- EXPECT_TRUE(IdentifierPart::Is(0x08B3));
- EXPECT_TRUE(IdentifierPart::Is(0x0AF9));
- EXPECT_TRUE(IdentifierPart::Is(0x13F8));
- EXPECT_TRUE(IdentifierPart::Is(0x9FCD));
- EXPECT_TRUE(IdentifierPart::Is(0xAB60));
- EXPECT_TRUE(IdentifierPart::Is(0x10CC0));
- EXPECT_TRUE(IdentifierPart::Is(0x108E0));
- EXPECT_TRUE(IdentifierPart::Is(0x2B820));
+ EXPECT_TRUE(IsIdentifierPart(0x08B3));
+ EXPECT_TRUE(IsIdentifierPart(0x0AF9));
+ EXPECT_TRUE(IsIdentifierPart(0x13F8));
+ EXPECT_TRUE(IsIdentifierPart(0x9FCD));
+ EXPECT_TRUE(IsIdentifierPart(0xAB60));
+ EXPECT_TRUE(IsIdentifierPart(0x10CC0));
+ EXPECT_TRUE(IsIdentifierPart(0x108E0));
+ EXPECT_TRUE(IsIdentifierPart(0x2B820));
// [[:ID_Continue:]-[:ID_Start:]] & [[:Age=8.0:]-[:Age=7.0:]]
// 162 code points
- EXPECT_TRUE(IdentifierPart::Is(0x08E3));
- EXPECT_TRUE(IdentifierPart::Is(0xA69E));
- EXPECT_TRUE(IdentifierPart::Is(0x11730));
+ EXPECT_TRUE(IsIdentifierPart(0x08E3));
+ EXPECT_TRUE(IsIdentifierPart(0xA69E));
+ EXPECT_TRUE(IsIdentifierPart(0x11730));
// New in Unicode 9.0 (7,177 code points)
// [:ID_Start:] & [[:Age=9.0:] - [:Age=8.0:]]
- EXPECT_TRUE(IdentifierPart::Is(0x1C80));
- EXPECT_TRUE(IdentifierPart::Is(0x104DB));
- EXPECT_TRUE(IdentifierPart::Is(0x1E922));
+ EXPECT_TRUE(IsIdentifierPart(0x1C80));
+ EXPECT_TRUE(IsIdentifierPart(0x104DB));
+ EXPECT_TRUE(IsIdentifierPart(0x1E922));
// [[:ID_Continue:]-[:ID_Start:]] & [[:Age=9.0:]-[:Age=8.0:]]
// 162 code points
- EXPECT_TRUE(IdentifierPart::Is(0x08D4));
- EXPECT_TRUE(IdentifierPart::Is(0x1DFB));
- EXPECT_TRUE(IdentifierPart::Is(0xA8C5));
- EXPECT_TRUE(IdentifierPart::Is(0x11450));
+ EXPECT_TRUE(IsIdentifierPart(0x08D4));
+ EXPECT_TRUE(IsIdentifierPart(0x1DFB));
+ EXPECT_TRUE(IsIdentifierPart(0xA8C5));
+ EXPECT_TRUE(IsIdentifierPart(0x11450));
#endif
// http://www.unicode.org/reports/tr31/
@@ -133,58 +134,58 @@ TEST(CharPredicatesTest, IdentifierPart) {
// grep 'Other_ID_(Continue|Start)'
// Other_ID_Start
- EXPECT_TRUE(IdentifierPart::Is(0x1885));
- EXPECT_TRUE(IdentifierPart::Is(0x1886));
- EXPECT_TRUE(IdentifierPart::Is(0x2118));
- EXPECT_TRUE(IdentifierPart::Is(0x212E));
- EXPECT_TRUE(IdentifierPart::Is(0x309B));
- EXPECT_TRUE(IdentifierPart::Is(0x309C));
+ EXPECT_TRUE(IsIdentifierPart(0x1885));
+ EXPECT_TRUE(IsIdentifierPart(0x1886));
+ EXPECT_TRUE(IsIdentifierPart(0x2118));
+ EXPECT_TRUE(IsIdentifierPart(0x212E));
+ EXPECT_TRUE(IsIdentifierPart(0x309B));
+ EXPECT_TRUE(IsIdentifierPart(0x309C));
// Other_ID_Continue
- EXPECT_TRUE(IdentifierPart::Is(0x00B7));
- EXPECT_TRUE(IdentifierPart::Is(0x0387));
- EXPECT_TRUE(IdentifierPart::Is(0x1369));
- EXPECT_TRUE(IdentifierPart::Is(0x1370));
- EXPECT_TRUE(IdentifierPart::Is(0x1371));
- EXPECT_TRUE(IdentifierPart::Is(0x19DA));
+ EXPECT_TRUE(IsIdentifierPart(0x00B7));
+ EXPECT_TRUE(IsIdentifierPart(0x0387));
+ EXPECT_TRUE(IsIdentifierPart(0x1369));
+ EXPECT_TRUE(IsIdentifierPart(0x1370));
+ EXPECT_TRUE(IsIdentifierPart(0x1371));
+ EXPECT_TRUE(IsIdentifierPart(0x19DA));
// Issue 2892:
// \u2E2F has the Pattern_Syntax property, excluding it from ID_Start.
- EXPECT_FALSE(IdentifierPart::Is(0x2E2F));
+ EXPECT_FALSE(IsIdentifierPart(0x2E2F));
}
#ifdef V8_INTL_SUPPORT
TEST(CharPredicatesTest, SupplementaryPlaneIdentifiers) {
// Both ID_Start and ID_Continue.
- EXPECT_TRUE(IdentifierStart::Is(0x10403)); // Category Lu
- EXPECT_TRUE(IdentifierPart::Is(0x10403));
- EXPECT_TRUE(IdentifierStart::Is(0x1043C)); // Category Ll
- EXPECT_TRUE(IdentifierPart::Is(0x1043C));
- EXPECT_TRUE(IdentifierStart::Is(0x16F9C)); // Category Lm
- EXPECT_TRUE(IdentifierPart::Is(0x16F9C));
- EXPECT_TRUE(IdentifierStart::Is(0x10048)); // Category Lo
- EXPECT_TRUE(IdentifierPart::Is(0x10048));
- EXPECT_TRUE(IdentifierStart::Is(0x1014D)); // Category Nl
- EXPECT_TRUE(IdentifierPart::Is(0x1014D));
+ EXPECT_TRUE(IsIdentifierStart(0x10403)); // Category Lu
+ EXPECT_TRUE(IsIdentifierPart(0x10403));
+ EXPECT_TRUE(IsIdentifierStart(0x1043C)); // Category Ll
+ EXPECT_TRUE(IsIdentifierPart(0x1043C));
+ EXPECT_TRUE(IsIdentifierStart(0x16F9C)); // Category Lm
+ EXPECT_TRUE(IsIdentifierPart(0x16F9C));
+ EXPECT_TRUE(IsIdentifierStart(0x10048)); // Category Lo
+ EXPECT_TRUE(IsIdentifierPart(0x10048));
+ EXPECT_TRUE(IsIdentifierStart(0x1014D)); // Category Nl
+ EXPECT_TRUE(IsIdentifierPart(0x1014D));
// New in Unicode 8.0
// [ [:ID_Start=Yes:] & [:Age=8.0:]] - [:Age=7.0:]
- EXPECT_TRUE(IdentifierStart::Is(0x108E0));
- EXPECT_TRUE(IdentifierStart::Is(0x10C80));
+ EXPECT_TRUE(IsIdentifierStart(0x108E0));
+ EXPECT_TRUE(IsIdentifierStart(0x10C80));
// Only ID_Continue.
- EXPECT_FALSE(IdentifierStart::Is(0x101FD)); // Category Mn
- EXPECT_TRUE(IdentifierPart::Is(0x101FD));
- EXPECT_FALSE(IdentifierStart::Is(0x11002)); // Category Mc
- EXPECT_TRUE(IdentifierPart::Is(0x11002));
- EXPECT_FALSE(IdentifierStart::Is(0x104A9)); // Category Nd
- EXPECT_TRUE(IdentifierPart::Is(0x104A9));
+ EXPECT_FALSE(IsIdentifierStart(0x101FD)); // Category Mn
+ EXPECT_TRUE(IsIdentifierPart(0x101FD));
+ EXPECT_FALSE(IsIdentifierStart(0x11002)); // Category Mc
+ EXPECT_TRUE(IsIdentifierPart(0x11002));
+ EXPECT_FALSE(IsIdentifierStart(0x104A9)); // Category Nd
+ EXPECT_TRUE(IsIdentifierPart(0x104A9));
// Neither.
- EXPECT_FALSE(IdentifierStart::Is(0x10111)); // Category No
- EXPECT_FALSE(IdentifierPart::Is(0x10111));
- EXPECT_FALSE(IdentifierStart::Is(0x1F4A9)); // Category So
- EXPECT_FALSE(IdentifierPart::Is(0x1F4A9));
+ EXPECT_FALSE(IsIdentifierStart(0x10111)); // Category No
+ EXPECT_FALSE(IsIdentifierPart(0x10111));
+ EXPECT_FALSE(IsIdentifierStart(0x1F4A9)); // Category So
+ EXPECT_FALSE(IsIdentifierPart(0x1F4A9));
}
#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/code-stub-assembler-unittest.cc
index dab7e3e3a6..836a18e2e6 100644
--- a/deps/v8/test/unittests/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/code-stub-assembler-unittest.cc
@@ -20,14 +20,6 @@ namespace c = v8::internal::compiler;
namespace v8 {
namespace internal {
-#ifdef ENABLE_VERIFY_CSA
-#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) IsBitcastWordToTaggedSigned(x)
-#define IS_BITCAST_TAGGED_TO_WORD(x) IsBitcastTaggedToWord(x)
-#else
-#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) (x)
-#define IS_BITCAST_TAGGED_TO_WORD(x) (x)
-#endif
-
CodeStubAssemblerTestState::CodeStubAssemblerTestState(
CodeStubAssemblerTest* test)
: compiler::CodeAssemblerState(
@@ -39,7 +31,7 @@ TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
CodeStubAssemblerForTest m(&state);
Node* value = m.Int32Constant(44);
EXPECT_THAT(m.SmiTag(value),
- IS_BITCAST_WORD_TO_TAGGED_SIGNED(c::IsIntPtrConstant(
+ IsBitcastWordToTaggedSigned(c::IsIntPtrConstant(
static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
EXPECT_THAT(m.SmiUntag(value),
c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
deleted file mode 100644
index 6ae5c7bd6c..0000000000
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-
-TEST(CompilerDispatcherTracerTest, EstimateWithoutSamples) {
- CompilerDispatcherTracer tracer(nullptr);
-
- EXPECT_EQ(0.0, tracer.EstimatePrepareInMs());
- EXPECT_EQ(1.0, tracer.EstimateCompileInMs(1));
- EXPECT_EQ(1.0, tracer.EstimateCompileInMs(42));
- EXPECT_EQ(0.0, tracer.EstimateFinalizeInMs());
-}
-
-TEST(CompilerDispatcherTracerTest, Average) {
- CompilerDispatcherTracer tracer(nullptr);
-
- EXPECT_EQ(0.0, tracer.EstimatePrepareInMs());
-
- tracer.RecordPrepare(1.0);
- tracer.RecordPrepare(2.0);
- tracer.RecordPrepare(3.0);
-
- EXPECT_EQ((1.0 + 2.0 + 3.0) / 3, tracer.EstimatePrepareInMs());
-}
-
-TEST(CompilerDispatcherTracerTest, SizeBasedAverage) {
- CompilerDispatcherTracer tracer(nullptr);
-
- EXPECT_EQ(1.0, tracer.EstimateCompileInMs(100));
-
- // All three samples parse 100 units/ms.
- tracer.RecordCompile(1.0, 100);
- tracer.RecordCompile(2.0, 200);
- tracer.RecordCompile(3.0, 300);
-
- EXPECT_EQ(1.0, tracer.EstimateCompileInMs(100));
- EXPECT_EQ(5.0, tracer.EstimateCompileInMs(500));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index bfc111aed5..0f918e3a07 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -13,9 +13,6 @@
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
-#include "src/compiler-dispatcher/unoptimized-compile-job.h"
#include "src/compiler.h"
#include "src/flags.h"
#include "src/handles.h"
@@ -88,9 +85,11 @@ class CompilerDispatcherTest : public TestWithNativeContext {
outer_parse_info->zone(), script_scope, FUNCTION_SCOPE);
function_scope->set_start_position(shared->StartPosition());
function_scope->set_end_position(shared->EndPosition());
+ std::vector<void*> pointer_buffer;
+ ScopedPtrList<Statement> statements(&pointer_buffer);
const FunctionLiteral* function_literal =
ast_node_factory.NewFunctionLiteral(
- function_name, function_scope, nullptr, -1, -1, -1,
+ function_name, function_scope, statements, -1, -1, -1,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
@@ -115,7 +114,7 @@ class MockPlatform : public v8::Platform {
sem_(0),
tracing_controller_(V8::GetCurrentPlatform()->GetTracingController()) {}
~MockPlatform() override {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
EXPECT_TRUE(foreground_tasks_.empty());
EXPECT_TRUE(worker_tasks_.empty());
EXPECT_TRUE(idle_task_ == nullptr);
@@ -129,7 +128,7 @@ class MockPlatform : public v8::Platform {
}
void CallOnWorkerThread(std::unique_ptr<Task> task) override {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
worker_tasks_.push_back(std::move(task));
}
@@ -139,7 +138,7 @@ class MockPlatform : public v8::Platform {
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
foreground_tasks_.push_back(std::unique_ptr<Task>(task));
}
@@ -150,7 +149,7 @@ class MockPlatform : public v8::Platform {
void CallIdleOnForegroundThread(v8::Isolate* isolate,
IdleTask* task) override {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
ASSERT_TRUE(idle_task_ == nullptr);
idle_task_ = task;
}
@@ -174,7 +173,7 @@ class MockPlatform : public v8::Platform {
time_step_ = time_step;
IdleTask* task;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
task = idle_task_;
ASSERT_TRUE(idle_task_ != nullptr);
idle_task_ = nullptr;
@@ -184,24 +183,24 @@ class MockPlatform : public v8::Platform {
}
bool IdleTaskPending() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
return idle_task_;
}
bool WorkerTasksPending() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
return !worker_tasks_.empty();
}
bool ForegroundTasksPending() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
return !foreground_tasks_.empty();
}
void RunWorkerTasksAndBlock(Platform* platform) {
std::vector<std::unique_ptr<Task>> tasks;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
@@ -212,7 +211,7 @@ class MockPlatform : public v8::Platform {
void RunWorkerTasks(Platform* platform) {
std::vector<std::unique_ptr<Task>> tasks;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
@@ -222,7 +221,7 @@ class MockPlatform : public v8::Platform {
void RunForegroundTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tasks.swap(foreground_tasks_);
}
for (auto& task : tasks) {
@@ -235,7 +234,7 @@ class MockPlatform : public v8::Platform {
void ClearWorkerTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
}
@@ -243,13 +242,13 @@ class MockPlatform : public v8::Platform {
void ClearForegroundTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
tasks.swap(foreground_tasks_);
}
}
void ClearIdleTask() {
- base::LockGuard<base::Mutex> lock(&mutex_);
+ base::MutexGuard lock(&mutex_);
ASSERT_TRUE(idle_task_ != nullptr);
delete idle_task_;
idle_task_ = nullptr;
@@ -286,7 +285,7 @@ class MockPlatform : public v8::Platform {
: platform_(platform) {}
void PostTask(std::unique_ptr<v8::Task> task) override {
- base::LockGuard<base::Mutex> lock(&platform_->mutex_);
+ base::MutexGuard lock(&platform_->mutex_);
platform_->foreground_tasks_.push_back(std::move(task));
}
@@ -297,7 +296,7 @@ class MockPlatform : public v8::Platform {
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
DCHECK(IdleTasksEnabled());
- base::LockGuard<base::Mutex> lock(&platform_->mutex_);
+ base::MutexGuard lock(&platform_->mutex_);
ASSERT_TRUE(platform_->idle_task_ == nullptr);
platform_->idle_task_ = task.release();
}
@@ -330,6 +329,7 @@ class MockPlatform : public v8::Platform {
TEST_F(CompilerDispatcherTest, Construct) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, IsEnqueued) {
@@ -352,12 +352,13 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- dispatcher.AbortAll(BlockingBehavior::kBlock);
+ dispatcher.AbortAll();
ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(platform.IdleTaskPending());
+
+ ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_TRUE(platform.WorkerTasksPending());
platform.ClearWorkerTasks();
- platform.ClearIdleTask();
}
TEST_F(CompilerDispatcherTest, FinishNow) {
@@ -377,12 +378,13 @@ TEST_F(CompilerDispatcherTest, FinishNow) {
ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
- ASSERT_TRUE(platform.IdleTaskPending());
+
platform.ClearWorkerTasks();
- platform.ClearIdleTask();
+ ASSERT_FALSE(platform.IdleTaskPending());
+ dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTask) {
+TEST_F(CompilerDispatcherTest, CompileAndFinalize) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
@@ -393,11 +395,10 @@ TEST_F(CompilerDispatcherTest, IdleTask) {
base::Optional<CompilerDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_TRUE(platform.WorkerTasksPending());
- // Since time doesn't progress on the MockPlatform, this is enough idle time
- // to finish compiling the function.
- platform.RunIdleTask(1000.0, 0.0);
+ // Run compile steps.
+ platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
// Since we haven't yet registered the SFI for the job, it should still be
// enqueued and waiting.
@@ -405,7 +406,7 @@ TEST_F(CompilerDispatcherTest, IdleTask) {
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- // Register SFI, which should schedule another idle task to complete the
+ // Register SFI, which should schedule another idle task to finalize the
// compilation.
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_TRUE(platform.IdleTaskPending());
@@ -413,10 +414,12 @@ TEST_F(CompilerDispatcherTest, IdleTask) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
- platform.ClearWorkerTasks();
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ ASSERT_FALSE(platform.IdleTaskPending());
+ dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
+TEST_F(CompilerDispatcherTest, IdleTaskNoIdleTime) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
@@ -429,25 +432,24 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
- ASSERT_TRUE(platform.IdleTaskPending());
+ // Run compile steps.
+ platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
- // The job should be scheduled for the main thread.
+ // Job should be ready to finalize.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_TRUE(platform.IdleTaskPending());
- // Only grant a little idle time and have time advance beyond it in one step.
- platform.RunIdleTask(2.0, 1.0);
+ // Grant no idle time and have time advance beyond it in one step.
+ platform.RunIdleTask(0.0, 1.0);
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_TRUE(platform.IdleTaskPending());
- // The job should be still scheduled for the main thread, but ready for
- // finalization.
+ // Job should be ready to finalize.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
- dispatcher.jobs_.begin()->second->status());
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
@@ -455,7 +457,58 @@ TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
- platform.ClearWorkerTasks();
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
+}
+
+TEST_F(CompilerDispatcherTest, IdleTaskSmallIdleTime) {
+ MockPlatform platform;
+ CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
+
+ Handle<SharedFunctionInfo> shared_1 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_1->is_compiled());
+ Handle<SharedFunctionInfo> shared_2 =
+ test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ ASSERT_FALSE(shared_2->is_compiled());
+
+ base::Optional<CompilerDispatcher::JobId> job_id_1 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
+ base::Optional<CompilerDispatcher::JobId> job_id_2 =
+ EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
+
+ dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
+ dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
+
+ // Run compile steps.
+ platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
+
+ // Both jobs should be ready to finalize.
+ ASSERT_EQ(dispatcher.jobs_.size(), 2u);
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_TRUE((++dispatcher.jobs_.begin())->second->has_run);
+ ASSERT_TRUE(platform.IdleTaskPending());
+
+ // Grant a small anount of idle time and have time advance beyond it in one
+ // step.
+ platform.RunIdleTask(2.0, 1.0);
+
+ // Only one of the jobs should be finalized.
+ ASSERT_EQ(dispatcher.jobs_.size(), 1u);
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_NE(dispatcher.IsEnqueued(shared_1), dispatcher.IsEnqueued(shared_2));
+ ASSERT_NE(shared_1->is_compiled(), shared_2->is_compiled());
+ ASSERT_TRUE(platform.IdleTaskPending());
+
+ // Now grant a lot of idle time and freeze time.
+ platform.RunIdleTask(1000.0, 0.0);
+
+ ASSERT_FALSE(dispatcher.IsEnqueued(shared_1) ||
+ dispatcher.IsEnqueued(shared_2));
+ ASSERT_TRUE(shared_1->is_compiled() && shared_2->is_compiled());
+ ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, IdleTaskException) {
@@ -478,49 +531,15 @@ TEST_F(CompilerDispatcherTest, IdleTaskException) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
- // Since time doesn't progress on the MockPlatform, this is enough idle time
- // to finish compiling the function.
+ // Run compile steps and finalize.
+ platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunIdleTask(1000.0, 0.0);
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(i_isolate()->has_pending_exception());
platform.ClearWorkerTasks();
-}
-
-TEST_F(CompilerDispatcherTest, CompileOnBackgroundThread) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- Handle<SharedFunctionInfo> shared =
- test::CreateSharedFunctionInfo(i_isolate(), nullptr);
- ASSERT_FALSE(shared->is_compiled());
-
- base::Optional<CompilerDispatcher::JobId> job_id =
- EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
-
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_TRUE(platform.WorkerTasksPending());
-
- platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
-
- ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_FALSE(platform.WorkerTasksPending());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
- dispatcher.jobs_.begin()->second->status());
-
- // Now grant a lot of idle time and freeze time.
- platform.RunIdleTask(1000.0, 0.0);
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
- ASSERT_TRUE(shared->is_compiled());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
@@ -536,14 +555,12 @@ TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
// This does not block, but races with the FinishNow() call below.
@@ -555,6 +572,7 @@ TEST_F(CompilerDispatcherTest, FinishNowWithWorkerTask) {
ASSERT_TRUE(shared->is_compiled());
if (platform.IdleTaskPending()) platform.ClearIdleTask();
ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
@@ -579,15 +597,17 @@ TEST_F(CompilerDispatcherTest, IdleTaskMultipleJobs) {
ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
- // Since time doesn't progress on the MockPlatform, this is enough idle time
- // to finish compiling the function.
+ // Run compile steps and finalize.
+ platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunIdleTask(1000.0, 0.0);
ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
ASSERT_FALSE(dispatcher.IsEnqueued(shared_2));
ASSERT_TRUE(shared_1->is_compiled());
ASSERT_TRUE(shared_2->is_compiled());
- platform.ClearWorkerTasks();
+ ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, FinishNowException) {
@@ -617,11 +637,12 @@ TEST_F(CompilerDispatcherTest, FinishNowException) {
ASSERT_TRUE(i_isolate()->has_pending_exception());
i_isolate()->clear_pending_exception();
- platform.ClearIdleTask();
+ ASSERT_FALSE(platform.IdleTaskPending());
platform.ClearWorkerTasks();
+ dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask) {
+TEST_F(CompilerDispatcherTest, AbortJobNotStarted) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
@@ -631,107 +652,27 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask) {
base::Optional<CompilerDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- ASSERT_FALSE(shared->is_compiled());
- ASSERT_TRUE(platform.WorkerTasksPending());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
- // The background task hasn't yet started, so we can just cancel it.
- dispatcher.AbortAll(BlockingBehavior::kDontBlock);
- ASSERT_FALSE(platform.ForegroundTasksPending());
-
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(shared->is_compiled());
-
- platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
-
- if (platform.IdleTaskPending()) platform.ClearIdleTask();
- ASSERT_FALSE(platform.WorkerTasksPending());
- ASSERT_FALSE(platform.ForegroundTasksPending());
-}
-
-TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- Handle<SharedFunctionInfo> shared_1 =
- test::CreateSharedFunctionInfo(i_isolate(), nullptr);
- ASSERT_FALSE(shared_1->is_compiled());
- Handle<SharedFunctionInfo> shared_2 =
- test::CreateSharedFunctionInfo(i_isolate(), nullptr);
- ASSERT_FALSE(shared_2->is_compiled());
-
- base::Optional<CompilerDispatcher::JobId> job_id_1 =
- EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
- dispatcher.RegisterSharedFunctionInfo(*job_id_1, *shared_1);
-
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
- ASSERT_FALSE(shared_1->is_compiled());
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
- // Kick off background tasks and freeze them.
- dispatcher.block_for_testing_.SetValue(true);
- platform.RunWorkerTasks(V8::GetCurrentPlatform());
+ dispatcher.AbortJob(*job_id);
- // Busy loop until the background task started running.
- while (dispatcher.block_for_testing_.Value()) {
- }
- dispatcher.AbortAll(BlockingBehavior::kDontBlock);
- ASSERT_TRUE(platform.ForegroundTasksPending());
-
- // We can't schedule new tasks while we're aborting.
- base::Optional<CompilerDispatcher::JobId> job_id_2 =
- EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
- ASSERT_FALSE(job_id_2);
-
- // Run the first AbortTask. Since the background job is still pending, it
- // can't do anything.
- platform.RunForegroundTasks();
- {
- base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
- ASSERT_TRUE(dispatcher.abort_);
- }
-
- // Release background task.
- dispatcher.semaphore_for_testing_.Signal();
-
- // Busy loop until the background task scheduled another AbortTask task.
- while (!platform.ForegroundTasksPending()) {
- }
-
- platform.RunForegroundTasks();
- ASSERT_TRUE(dispatcher.jobs_.empty());
- {
- base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
- ASSERT_FALSE(dispatcher.abort_);
- }
-
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.RunIdleTask(5.0, 1.0);
- ASSERT_FALSE(platform.WorkerTasksPending());
- ASSERT_FALSE(platform.ForegroundTasksPending());
-
- // Now it's possible to enqueue new functions again.
- job_id_2 = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
- ASSERT_TRUE(job_id_2);
- ASSERT_TRUE(platform.IdleTaskPending());
- ASSERT_TRUE(platform.WorkerTasksPending());
- ASSERT_FALSE(platform.ForegroundTasksPending());
-
- dispatcher.AbortAll(BlockingBehavior::kBlock);
+ // Aborting removes the job from the queue.
+ ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
+ ASSERT_FALSE(shared->is_compiled());
+ ASSERT_FALSE(platform.IdleTaskPending());
platform.ClearWorkerTasks();
- platform.ClearIdleTask();
+ dispatcher.AbortAll();
}
-TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
+TEST_F(CompilerDispatcherTest, AbortJobAlreadyStarted) {
MockPlatform platform;
CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
@@ -741,154 +682,59 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
base::Optional<CompilerDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
+ ASSERT_EQ(dispatcher.jobs_.size(), 1u);
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
+
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
- // Kick off background tasks and freeze them.
- dispatcher.block_for_testing_.SetValue(true);
- platform.RunWorkerTasks(V8::GetCurrentPlatform());
-
- // Busy loop until the background task started running.
- while (dispatcher.block_for_testing_.Value()) {
- }
- dispatcher.AbortAll(BlockingBehavior::kDontBlock);
- ASSERT_TRUE(platform.ForegroundTasksPending());
-
- // Run the first AbortTask. Since the background job is still pending, it
- // can't do anything.
- platform.RunForegroundTasks();
+ // Have dispatcher block on the background thread when running the job.
{
base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
- ASSERT_TRUE(dispatcher.abort_);
+ dispatcher.block_for_testing_.SetValue(true);
}
- // Run the idle task, which should have already been canceled and won't do
- // anything.
- ASSERT_TRUE(platform.IdleTaskPending());
- platform.RunIdleTask(5.0, 1.0);
-
- // While the background thread holds on to a job, it is still enqueued.
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
-
- // Release background task.
- dispatcher.semaphore_for_testing_.Signal();
-
- // Force the compilation to finish, even while aborting.
- ASSERT_TRUE(dispatcher.FinishNow(shared));
- ASSERT_TRUE(dispatcher.jobs_.empty());
-
- // Busy wait for the background task to finish.
- for (;;) {
- base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
- if (dispatcher.num_worker_tasks_ == 0) {
- break;
- }
+ // Start background thread and wait until it is about to run the job.
+ platform.RunWorkerTasks(V8::GetCurrentPlatform());
+ while (dispatcher.block_for_testing_.Value()) {
}
- ASSERT_TRUE(platform.ForegroundTasksPending());
- ASSERT_FALSE(platform.IdleTaskPending());
- ASSERT_FALSE(platform.WorkerTasksPending());
+ // Now abort while dispatcher is in the middle of running the job.
+ dispatcher.AbortJob(*job_id);
- platform.RunForegroundTasks();
+ // Unblock background thread, and wait for job to complete.
{
base::LockGuard<base::Mutex> lock(&dispatcher.mutex_);
- ASSERT_FALSE(dispatcher.abort_);
+ dispatcher.main_thread_blocking_on_job_ =
+ dispatcher.jobs_.begin()->second.get();
+ dispatcher.semaphore_for_testing_.Signal();
+ while (dispatcher.main_thread_blocking_on_job_ != nullptr) {
+ dispatcher.main_thread_blocking_signal_.Wait(&dispatcher.mutex_);
+ }
}
-}
-TEST_F(CompilerDispatcherTest, MemoryPressure) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- Handle<SharedFunctionInfo> shared =
- test::CreateSharedFunctionInfo(i_isolate(), nullptr);
+ // Job should have finished running and then been aborted.
+ ASSERT_TRUE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(shared->is_compiled());
+ ASSERT_EQ(dispatcher.jobs_.size(), 1u);
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->aborted);
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ ASSERT_TRUE(platform.IdleTaskPending());
- // Can't enqueue tasks under memory pressure.
- dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
- true);
- base::Optional<CompilerDispatcher::JobId> job_id =
- EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- ASSERT_FALSE(job_id);
-
- dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kNone, true);
-
- job_id = EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- ASSERT_TRUE(job_id);
+ // Runt the pending idle task
+ platform.RunIdleTask(1000.0, 0.0);
- // Memory pressure cancels current jobs.
- dispatcher.MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
- true);
+ // Aborting removes the SFI from the queue.
ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
- platform.ClearIdleTask();
- platform.ClearWorkerTasks();
-}
-
-namespace {
-
-class PressureNotificationTask : public CancelableTask {
- public:
- PressureNotificationTask(Isolate* isolate, CompilerDispatcher* dispatcher,
- base::Semaphore* sem)
- : CancelableTask(isolate), dispatcher_(dispatcher), sem_(sem) {}
- ~PressureNotificationTask() override = default;
-
- void RunInternal() override {
- dispatcher_->MemoryPressureNotification(v8::MemoryPressureLevel::kCritical,
- false);
- sem_->Signal();
- }
-
- private:
- CompilerDispatcher* dispatcher_;
- base::Semaphore* sem_;
-
- DISALLOW_COPY_AND_ASSIGN(PressureNotificationTask);
-};
-
-} // namespace
-
-TEST_F(CompilerDispatcherTest, MemoryPressureFromBackground) {
- MockPlatform platform;
- CompilerDispatcher dispatcher(i_isolate(), &platform, FLAG_stack_size);
-
- Handle<SharedFunctionInfo> shared =
- test::CreateSharedFunctionInfo(i_isolate(), nullptr);
- ASSERT_FALSE(shared->is_compiled());
-
- base::Optional<CompilerDispatcher::JobId> job_id =
- EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
- dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
-
- base::Semaphore sem(0);
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- base::make_unique<PressureNotificationTask>(i_isolate(), &dispatcher,
- &sem));
-
- sem.Wait();
-
- // A memory pressure task is pending, and running it will cancel the job.
- ASSERT_TRUE(platform.ForegroundTasksPending());
- ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- platform.RunForegroundTasks();
- ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
-
- // Since the AbortAll() call is made from a task, AbortAll thinks that there
- // is at least one task running, and fires of an AbortTask to be safe.
- ASSERT_TRUE(platform.ForegroundTasksPending());
- platform.RunForegroundTasks();
- ASSERT_FALSE(platform.ForegroundTasksPending());
-
- platform.ClearIdleTask();
- platform.ClearWorkerTasks();
+ ASSERT_FALSE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.WorkerTasksPending());
+ dispatcher.AbortAll();
}
TEST_F(CompilerDispatcherTest, CompileLazyFinishesDispatcherJob) {
@@ -972,16 +818,14 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kInitial,
- (++dispatcher.jobs_.begin())->second->status());
+ ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_FALSE((++dispatcher.jobs_.begin())->second->has_run);
ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
ASSERT_FALSE(shared_1->is_compiled());
ASSERT_FALSE(shared_2->is_compiled());
- ASSERT_TRUE(platform.IdleTaskPending());
+ ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
@@ -989,10 +833,8 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
- dispatcher.jobs_.begin()->second->status());
- ASSERT_EQ(UnoptimizedCompileJob::Status::kReadyToFinalize,
- (++dispatcher.jobs_.begin())->second->status());
+ ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
+ ASSERT_TRUE((++dispatcher.jobs_.begin())->second->has_run);
// Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0);
@@ -1002,6 +844,7 @@ TEST_F(CompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(shared_1->is_compiled());
ASSERT_TRUE(shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
+ dispatcher.AbortAll();
}
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index b796e457d4..d9e1731dc1 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -73,6 +73,9 @@ TEST_F(OptimizingCompileDispatcherTest, Construct) {
TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
Handle<JSFunction> fun =
RunJS<JSFunction>("function f() { function g() {}; return g;}; f();");
+ IsCompiledScope is_compiled_scope;
+ ASSERT_TRUE(
+ Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION, &is_compiled_scope));
BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun);
OptimizingCompileDispatcher dispatcher(i_isolate());
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 011cc67c81..77a1587f0b 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -4,7 +4,7 @@
#include <limits>
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index aa54abe320..bca04a5cf3 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
namespace v8 {
namespace internal {
@@ -1288,7 +1288,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
+ uint64_t mask = 1LL << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
@@ -1309,7 +1309,7 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
+ uint64_t mask = 1LL << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
@@ -4542,9 +4542,8 @@ TEST_F(InstructionSelectorTest, ExternalReferenceLoad1) {
const int64_t kOffsets[] = {0, 1, 4, INT32_MIN, INT32_MAX};
TRACED_FOREACH(int64_t, offset, kOffsets) {
StreamBuilder m(this, MachineType::Int64());
- ExternalReference reference = bit_cast<ExternalReference>(
- reinterpret_cast<intptr_t>(isolate()->heap()->roots_array_start()) +
- offset + kRootRegisterBias);
+ ExternalReference reference =
+ bit_cast<ExternalReference>(isolate()->isolate_root() + offset);
Node* const value =
m.Load(MachineType::Int64(), m.ExternalConstant(reference));
m.Return(value);
@@ -4564,9 +4563,8 @@ TEST_F(InstructionSelectorTest, ExternalReferenceLoad2) {
// Offset too large, we cannot use kMode_Root.
StreamBuilder m(this, MachineType::Int64());
int64_t offset = 0x100000000;
- ExternalReference reference = bit_cast<ExternalReference>(
- reinterpret_cast<intptr_t>(isolate()->heap()->roots_array_start()) +
- offset + kRootRegisterBias);
+ ExternalReference reference =
+ bit_cast<ExternalReference>(isolate()->isolate_root() + offset);
Node* const value =
m.Load(MachineType::Int64(), m.ExternalConstant(reference));
m.Return(value);
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 2d59393f9d..59d5dccd06 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/code-factory.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -16,13 +16,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-
InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
-
InstructionSelectorTest::~InstructionSelectorTest() = default;
-
InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector::Features features,
InstructionSelectorTest::StreamBuilderMode mode,
@@ -50,11 +47,9 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
PoisoningMitigationLevel::kPoisonAll);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
- PrintableInstructionSequence printable = {RegisterConfiguration::Default(),
- &sequence};
StdoutStream{} << "=== Code sequence after instruction selection ==="
<< std::endl
- << printable;
+ << sequence;
}
Stream s;
s.virtual_registers_ = selector.GetVirtualRegistersForTesting();
@@ -115,14 +110,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
return s;
}
-
int InstructionSelectorTest::Stream::ToVreg(const Node* node) const {
VirtualRegisters::const_iterator i = virtual_registers_.find(node->id());
CHECK(i != virtual_registers_.end());
return i->second;
}
-
bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
Register reg) const {
if (!operand->IsUnallocated()) return false;
@@ -131,7 +124,6 @@ bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
return unallocated->fixed_register_index() == reg.code();
}
-
bool InstructionSelectorTest::Stream::IsSameAsFirst(
const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false;
@@ -139,7 +131,6 @@ bool InstructionSelectorTest::Stream::IsSameAsFirst(
return unallocated->HasSameAsInputPolicy();
}
-
bool InstructionSelectorTest::Stream::IsUsedAtStart(
const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false;
@@ -147,7 +138,6 @@ bool InstructionSelectorTest::Stream::IsUsedAtStart(
return unallocated->IsUsedAtStart();
}
-
const FrameStateFunctionInfo*
InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
int parameter_count, int local_count) {
@@ -156,11 +146,9 @@ InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
Handle<SharedFunctionInfo>());
}
-
// -----------------------------------------------------------------------------
// Return.
-
TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
const float kValue = 4.2f;
StreamBuilder m(this, MachineType::Float32());
@@ -174,7 +162,6 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
EXPECT_EQ(2U, s[1]->InputCount());
}
-
TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Parameter(0));
@@ -186,7 +173,6 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
EXPECT_EQ(2U, s[1]->InputCount());
}
-
TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
StreamBuilder m(this, MachineType::Int32());
m.Return(m.Int32Constant(0));
@@ -200,7 +186,6 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
EXPECT_EQ(2U, s[1]->InputCount());
}
-
// -----------------------------------------------------------------------------
// Conversions.
@@ -216,11 +201,9 @@ TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToWord32WithParameter) {
EXPECT_EQ(kArchRet, s[2]->arch_opcode());
}
-
// -----------------------------------------------------------------------------
// Parameters.
-
TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* param = m.Parameter(0);
@@ -229,7 +212,6 @@ TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
EXPECT_TRUE(s.IsDouble(param));
}
-
TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
@@ -238,11 +220,9 @@ TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
EXPECT_TRUE(s.IsReference(param));
}
-
// -----------------------------------------------------------------------------
// FinishRegion.
-
TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
@@ -260,15 +240,12 @@ TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
EXPECT_TRUE(s.IsReference(finish));
}
-
// -----------------------------------------------------------------------------
// Phi.
-
typedef InstructionSelectorTestWithParam<MachineType>
InstructionSelectorPhiTest;
-
TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
const MachineType type = GetParam();
StreamBuilder m(this, type, type, type);
@@ -288,7 +265,6 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param1));
}
-
TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
const MachineType type = GetParam();
StreamBuilder m(this, type, type, type);
@@ -308,7 +284,6 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
EXPECT_EQ(s.IsReference(phi), s.IsReference(param1));
}
-
INSTANTIATE_TEST_CASE_P(
InstructionSelectorTest, InstructionSelectorPhiTest,
::testing::Values(MachineType::Float64(), MachineType::Int8(),
@@ -318,11 +293,9 @@ INSTANTIATE_TEST_CASE_P(
MachineType::Uint64(), MachineType::Pointer(),
MachineType::AnyTagged()));
-
// -----------------------------------------------------------------------------
// ValueEffect.
-
TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
StreamBuilder m1(this, MachineType::Int32(), MachineType::Pointer());
Node* p1 = m1.Parameter(0);
@@ -346,11 +319,9 @@ TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
}
}
-
// -----------------------------------------------------------------------------
// Calls with deoptimization.
-
TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
@@ -405,7 +376,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
// TODO(jarin) Check deoptimization table.
}
-
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
@@ -498,7 +468,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
EXPECT_EQ(index, s.size());
}
-
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
index 19f85ff1ff..3c4374101c 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
@@ -9,7 +9,7 @@
#include <set>
#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/instruction-selector.h"
+#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/macro-assembler.h"
#include "test/unittests/test-utils.h"
@@ -18,8 +18,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class InstructionSelectorTest : public TestWithContext,
- public TestWithIsolateAndZone {
+class InstructionSelectorTest : public TestWithNativeContextAndZone {
public:
InstructionSelectorTest();
~InstructionSelectorTest() override;
@@ -288,7 +287,6 @@ class InstructionSelectorTest : public TestWithContext,
base::RandomNumberGenerator rng_;
};
-
template <typename T>
class InstructionSelectorTestWithParam
: public InstructionSelectorTest,
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index 2dd910aafb..f5fe72d5be 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "test/unittests/compiler/backend/instruction-sequence-unittest.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/pipeline.h"
-#include "test/unittests/compiler/instruction-sequence-unittest.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -12,48 +12,29 @@ namespace v8 {
namespace internal {
namespace compiler {
-static const char*
- general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
-static const char*
- double_register_names_[RegisterConfiguration::kMaxFPRegisters];
-static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
- RegisterConfiguration::kMaxFPRegisters)];
-
namespace {
-static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
- 0, 1, 2, 3, 4, 5, 6, 7};
-}
-
-static void InitializeRegisterNames() {
- char* loc = register_names_;
- for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
- general_register_names_[i] = loc;
- loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
- *loc++ = 0;
- }
- for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
- double_register_names_[i] = loc;
- loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
- *loc++ = 0;
- }
+constexpr int kMaxNumAllocatable =
+ Max(Register::kNumRegisters, DoubleRegister::kNumRegisters);
+static std::array<int, kMaxNumAllocatable> kAllocatableCodes =
+ base::make_array<kMaxNumAllocatable>(
+ [](size_t i) { return static_cast<int>(i); });
}
InstructionSequenceTest::InstructionSequenceTest()
: sequence_(nullptr),
- num_general_registers_(kDefaultNRegs),
- num_double_registers_(kDefaultNRegs),
+ num_general_registers_(Register::kNumRegisters),
+ num_double_registers_(DoubleRegister::kNumRegisters),
instruction_blocks_(zone()),
current_block_(nullptr),
- block_returns_(false) {
- InitializeRegisterNames();
-}
-
+ block_returns_(false) {}
void InstructionSequenceTest::SetNumRegs(int num_general_registers,
int num_double_registers) {
CHECK(!config_);
CHECK(instructions_.empty());
CHECK(instruction_blocks_.empty());
+ CHECK_GE(Register::kNumRegisters, num_general_registers);
+ CHECK_GE(DoubleRegister::kNumRegisters, num_double_registers);
num_general_registers_ = num_general_registers;
num_double_registers_ = num_double_registers;
}
@@ -89,18 +70,14 @@ const RegisterConfiguration* InstructionSequenceTest::config() {
if (!config_) {
config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
- num_double_registers_, allocatable_codes, allocatable_codes,
+ num_double_registers_, kAllocatableCodes.data(),
+ kAllocatableCodes.data(),
kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
- : RegisterConfiguration::COMBINE,
- general_register_names_,
- double_register_names_, // float register names
- double_register_names_,
- double_register_names_)); // SIMD 128 register names
+ : RegisterConfiguration::COMBINE));
}
return config_.get();
}
-
InstructionSequence* InstructionSequenceTest::sequence() {
if (sequence_ == nullptr) {
sequence_ = new (zone())
@@ -111,7 +88,6 @@ InstructionSequence* InstructionSequenceTest::sequence() {
return sequence_;
}
-
void InstructionSequenceTest::StartLoop(int loop_blocks) {
CHECK_NULL(current_block_);
if (!loop_blocks_.empty()) {
@@ -121,7 +97,6 @@ void InstructionSequenceTest::StartLoop(int loop_blocks) {
loop_blocks_.push_back(loop_data);
}
-
void InstructionSequenceTest::EndLoop() {
CHECK_NULL(current_block_);
CHECK(!loop_blocks_.empty());
@@ -129,13 +104,11 @@ void InstructionSequenceTest::EndLoop() {
loop_blocks_.pop_back();
}
-
void InstructionSequenceTest::StartBlock(bool deferred) {
block_returns_ = false;
NewBlock(deferred);
}
-
Instruction* InstructionSequenceTest::EndBlock(BlockCompletion completion) {
Instruction* result = nullptr;
if (block_returns_) {
@@ -168,12 +141,10 @@ Instruction* InstructionSequenceTest::EndBlock(BlockCompletion completion) {
return result;
}
-
InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
return TestOperand(kImmediate, imm);
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::Define(
TestOperand output_op) {
VReg vreg = NewReg(output_op);
@@ -188,7 +159,6 @@ Instruction* InstructionSequenceTest::Return(TestOperand input_op_0) {
return Emit(kArchRet, 0, nullptr, 1, inputs);
}
-
PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
VReg incoming_vreg_1,
VReg incoming_vreg_2,
@@ -208,7 +178,6 @@ PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
return phi;
}
-
PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
size_t input_count) {
auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
@@ -217,14 +186,12 @@ PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
return phi;
}
-
void InstructionSequenceTest::SetInput(PhiInstruction* phi, size_t input,
VReg vreg) {
CHECK_NE(kNoValue, vreg.value_);
phi->SetInput(input, vreg.value_);
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
int32_t imm) {
VReg vreg = NewReg();
@@ -234,10 +201,8 @@ InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
return vreg;
}
-
Instruction* InstructionSequenceTest::EmitNop() { return Emit(kArchNop); }
-
static size_t CountInputs(size_t size,
InstructionSequenceTest::TestOperand* inputs) {
size_t i = 0;
@@ -247,14 +212,12 @@ static size_t CountInputs(size_t size,
return i;
}
-
Instruction* InstructionSequenceTest::EmitI(size_t input_size,
TestOperand* inputs) {
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
return Emit(kArchNop, 0, nullptr, input_size, mapped_inputs);
}
-
Instruction* InstructionSequenceTest::EmitI(TestOperand input_op_0,
TestOperand input_op_1,
TestOperand input_op_2,
@@ -263,7 +226,6 @@ Instruction* InstructionSequenceTest::EmitI(TestOperand input_op_0,
return EmitI(CountInputs(arraysize(inputs), inputs), inputs);
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg(output_op);
@@ -273,7 +235,6 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
return output_vreg;
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
TestOperand input_op_2, TestOperand input_op_3) {
@@ -281,7 +242,6 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
return EmitOI(output_op, CountInputs(arraysize(inputs), inputs), inputs);
}
-
InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
TestOperand output_op_0, TestOperand output_op_1, size_t input_size,
TestOperand* inputs) {
@@ -295,7 +255,6 @@ InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
return output_vregs;
}
-
InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
TestOperand output_op_0, TestOperand output_op_1, TestOperand input_op_0,
TestOperand input_op_1, TestOperand input_op_2, TestOperand input_op_3) {
@@ -304,7 +263,6 @@ InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
CountInputs(arraysize(inputs), inputs), inputs);
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg(output_op);
@@ -316,7 +274,6 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
return output_vreg;
}
-
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
TestOperand input_op_2, TestOperand input_op_3) {
@@ -324,7 +281,6 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
return EmitCall(output_op, CountInputs(arraysize(inputs), inputs), inputs);
}
-
Instruction* InstructionSequenceTest::EmitBranch(TestOperand input_op) {
InstructionOperand inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
ConvertInputOp(Imm()), ConvertInputOp(Imm())};
@@ -334,20 +290,17 @@ Instruction* InstructionSequenceTest::EmitBranch(TestOperand input_op) {
return AddInstruction(instruction);
}
-
Instruction* InstructionSequenceTest::EmitFallThrough() {
auto instruction = NewInstruction(kArchNop, 0, nullptr);
return AddInstruction(instruction);
}
-
Instruction* InstructionSequenceTest::EmitJump() {
InstructionOperand inputs[1]{ConvertInputOp(Imm())};
auto instruction = NewInstruction(kArchJmp, 0, nullptr, 1, inputs);
return AddInstruction(instruction);
}
-
Instruction* InstructionSequenceTest::NewInstruction(
InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
@@ -357,32 +310,27 @@ Instruction* InstructionSequenceTest::NewInstruction(
inputs, temps_size, temps);
}
-
InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy) {
return UnallocatedOperand(policy, op.vreg_.value_);
}
-
InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy,
UnallocatedOperand::Lifetime lifetime) {
return UnallocatedOperand(policy, lifetime, op.vreg_.value_);
}
-
InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy, int index) {
return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-
InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::BasicPolicy policy, int index) {
return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-
InstructionOperand* InstructionSequenceTest::ConvertInputs(
size_t input_size, TestOperand* inputs) {
InstructionOperand* mapped_inputs =
@@ -393,7 +341,6 @@ InstructionOperand* InstructionSequenceTest::ConvertInputs(
return mapped_inputs;
}
-
InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
@@ -439,7 +386,6 @@ InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
UNREACHABLE();
}
-
InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
TestOperand op) {
CHECK_EQ(op.vreg_.value_, kNoValue);
@@ -474,7 +420,6 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
UNREACHABLE();
}
-
InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
CHECK_NULL(current_block_);
Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
@@ -504,7 +449,6 @@ InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
return instruction_block;
}
-
void InstructionSequenceTest::WireBlocks() {
CHECK(!current_block());
CHECK(instruction_blocks_.size() == completions_.size());
@@ -536,7 +480,6 @@ void InstructionSequenceTest::WireBlocks() {
}
}
-
void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
size_t target_block_offset = block_offset + static_cast<size_t>(jump_offset);
CHECK(block_offset < instruction_blocks_.size());
@@ -547,7 +490,6 @@ void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
target->predecessors().push_back(block->rpo_number());
}
-
Instruction* InstructionSequenceTest::Emit(
InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
@@ -558,7 +500,6 @@ Instruction* InstructionSequenceTest::Emit(
return AddInstruction(instruction);
}
-
Instruction* InstructionSequenceTest::AddInstruction(Instruction* instruction) {
sequence()->AddInstruction(instruction);
return instruction;
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 2c4df038fb..55dbe167c1 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -7,7 +7,8 @@
#include <memory>
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/register-configuration.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -17,12 +18,14 @@ namespace compiler {
class InstructionSequenceTest : public TestWithIsolateAndZone {
public:
- static const int kDefaultNRegs = 8;
- static const int kNoValue = kMinInt;
- static const MachineRepresentation kNoRep = MachineRepresentation::kNone;
- static const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
- static const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
- static const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
+ static constexpr int kNoValue = kMinInt;
+ static constexpr MachineRepresentation kNoRep = MachineRepresentation::kNone;
+ static constexpr MachineRepresentation kFloat32 =
+ MachineRepresentation::kFloat32;
+ static constexpr MachineRepresentation kFloat64 =
+ MachineRepresentation::kFloat64;
+ static constexpr MachineRepresentation kSimd128 =
+ MachineRepresentation::kSimd128;
typedef RpoNumber Rpo;
diff --git a/deps/v8/test/unittests/compiler/instruction-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
index 72deb12d02..09b4ea9295 100644
--- a/deps/v8/test/unittests/compiler/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/instruction.h"
+#include "src/compiler/backend/instruction.h"
#include "src/register-configuration.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
index 8a28ef389a..68a701136f 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -126,6 +126,44 @@ TARGET_TEST_F(CodeAssemblerTest, IntPtrMul) {
}
}
+TARGET_TEST_F(CodeAssemblerTest, IntPtrDiv) {
+ CodeAssemblerTestState state(this);
+ CodeAssemblerForTest m(&state);
+ {
+ TNode<IntPtrT> a = m.UncheckedCast<IntPtrT>(m.Parameter(0));
+ TNode<IntPtrT> b = m.IntPtrConstant(100);
+ TNode<IntPtrT> div = m.IntPtrDiv(a, b);
+ EXPECT_THAT(div, IsIntPtrDiv(Matcher<Node*>(a), Matcher<Node*>(b)));
+ }
+ // x / 1 => x
+ {
+ TNode<IntPtrT> a = m.UncheckedCast<IntPtrT>(m.Parameter(0));
+ TNode<IntPtrT> b = m.IntPtrConstant(1);
+ TNode<IntPtrT> div = m.IntPtrDiv(a, b);
+ EXPECT_THAT(div, a);
+ }
+ // CONST_a / CONST_b => CONST_c
+ {
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(5);
+ TNode<IntPtrT> div = m.IntPtrDiv(a, b);
+ EXPECT_THAT(div, IsIntPtrConstant(20));
+ }
+ {
+ TNode<IntPtrT> a = m.IntPtrConstant(100);
+ TNode<IntPtrT> b = m.IntPtrConstant(5);
+ TNode<IntPtrT> div = m.IntPtrDiv(a, b);
+ EXPECT_THAT(div, IsIntPtrConstant(20));
+ }
+ // x / 2^CONST => x >> CONST
+ {
+ TNode<IntPtrT> a = m.UncheckedCast<IntPtrT>(m.Parameter(0));
+ TNode<IntPtrT> b = m.IntPtrConstant(1 << 3);
+ TNode<IntPtrT> div = m.IntPtrDiv(a, b);
+ EXPECT_THAT(div, IsWordSar(Matcher<Node*>(a), IsIntPtrConstant(3)));
+ }
+}
+
TARGET_TEST_F(CodeAssemblerTest, WordShl) {
CodeAssemblerTestState state(this);
CodeAssemblerForTest m(&state);
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index fd0845159f..640526bc90 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -63,7 +63,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
public:
ConstantFoldingReducerTest()
: TypedGraphTest(3),
- js_heap_broker_(isolate(), zone()),
+ broker_(isolate(), zone()),
simplified_(zone()),
deps_(isolate(), zone()) {}
~ConstantFoldingReducerTest() override = default;
@@ -76,23 +76,23 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- ConstantFoldingReducer reducer(&graph_reducer, &jsgraph, js_heap_broker());
+ ConstantFoldingReducer reducer(&graph_reducer, &jsgraph, broker());
return reducer.Reduce(node);
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
+ JSHeapBroker* broker() { return &broker_; }
private:
- JSHeapBroker js_heap_broker_;
+ JSHeapBroker broker_;
SimplifiedOperatorBuilder simplified_;
CompilationDependencies deps_;
};
TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
{
- Reduction r = Reduce(Parameter(Type::NewConstant(
- js_heap_broker(), factory()->minus_zero_value(), zone())));
+ Reduction r = Reduce(Parameter(
+ Type::NewConstant(broker(), factory()->minus_zero_value(), zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
}
@@ -104,8 +104,7 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
{
Reduction r = Reduce(Parameter(Type::Union(
Type::MinusZero(),
- Type::NewConstant(js_heap_broker(), factory()->NewNumber(0), zone()),
- zone())));
+ Type::NewConstant(broker(), factory()->NewNumber(0), zone()), zone())));
EXPECT_FALSE(r.Changed());
}
}
@@ -113,8 +112,7 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
TEST_F(ConstantFoldingReducerTest, ParameterWithNull) {
Handle<HeapObject> null = factory()->null_value();
{
- Reduction r =
- Reduce(Parameter(Type::NewConstant(js_heap_broker(), null, zone())));
+ Reduction r = Reduce(Parameter(Type::NewConstant(broker(), null, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
@@ -131,14 +129,14 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithNaN) {
std::numeric_limits<double>::signaling_NaN()};
TRACED_FOREACH(double, nan, kNaNs) {
Handle<Object> constant = factory()->NewNumber(nan);
- Reduction r = Reduce(
- Parameter(Type::NewConstant(js_heap_broker(), constant, zone())));
+ Reduction r =
+ Reduce(Parameter(Type::NewConstant(broker(), constant, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
}
{
- Reduction r = Reduce(Parameter(
- Type::NewConstant(js_heap_broker(), factory()->nan_value(), zone())));
+ Reduction r = Reduce(
+ Parameter(Type::NewConstant(broker(), factory()->nan_value(), zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
}
@@ -152,8 +150,8 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithNaN) {
TEST_F(ConstantFoldingReducerTest, ParameterWithPlainNumber) {
TRACED_FOREACH(double, value, kFloat64Values) {
Handle<Object> constant = factory()->NewNumber(value);
- Reduction r = Reduce(
- Parameter(Type::NewConstant(js_heap_broker(), constant, zone())));
+ Reduction r =
+ Reduce(Parameter(Type::NewConstant(broker(), constant, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
@@ -172,8 +170,8 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithUndefined) {
EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
{
- Reduction r = Reduce(
- Parameter(Type::NewConstant(js_heap_broker(), undefined, zone())));
+ Reduction r =
+ Reduce(Parameter(Type::NewConstant(broker(), undefined, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
@@ -194,10 +192,10 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
Type::Undefined(),
Type::Union(
Type::Undetectable(),
- Type::Union(Type::NewConstant(
- js_heap_broker(),
- factory()->false_value(), zone()),
- Type::Range(0.0, 0.0, zone()), zone()),
+ Type::Union(
+ Type::NewConstant(
+ broker(), factory()->false_value(), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
zone()),
@@ -212,7 +210,7 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
TEST_F(ConstantFoldingReducerTest, ToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
- Type::NewConstant(js_heap_broker(), factory()->true_value(), zone()),
+ Type::NewConstant(broker(), factory()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 4736ddefa2..051aa68e64 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -15,17 +15,15 @@ namespace internal {
namespace compiler {
GraphTest::GraphTest(int num_parameters)
- : TestWithNativeContext(),
- TestWithIsolateAndZone(),
- canonical_(isolate()),
+ : canonical_(isolate()),
common_(zone()),
graph_(zone()),
- js_heap_broker_(isolate(), zone()),
+ broker_(isolate(), zone()),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
- js_heap_broker()->SetNativeContextRef();
+ broker()->SetNativeContextRef();
}
@@ -69,7 +67,7 @@ Node* GraphTest::NumberConstant(volatile double value) {
Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
Node* node = graph()->NewNode(common()->HeapConstant(value));
- Type type = Type::NewConstant(js_heap_broker(), value, zone());
+ Type type = Type::NewConstant(broker(), value, zone());
NodeProperties::SetType(node, type);
return node;
}
@@ -119,8 +117,7 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
}
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters),
- typer_(js_heap_broker(), Typer::kNoFlags, graph()) {}
+ : GraphTest(num_parameters), typer_(broker(), Typer::kNoFlags, graph()) {}
TypedGraphTest::~TypedGraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 8317ebf279..a4b719fe6b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -24,8 +24,7 @@ namespace compiler {
using ::testing::Matcher;
-class GraphTest : public virtual TestWithNativeContext,
- public virtual TestWithIsolateAndZone {
+class GraphTest : public TestWithNativeContextAndZone {
public:
explicit GraphTest(int num_parameters = 1);
~GraphTest() override;
@@ -62,13 +61,13 @@ class GraphTest : public virtual TestWithNativeContext,
Graph* graph() { return &graph_; }
SourcePositionTable* source_positions() { return &source_positions_; }
NodeOriginTable* node_origins() { return &node_origins_; }
- JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
+ JSHeapBroker* broker() { return &broker_; }
private:
CanonicalHandleScope canonical_;
CommonOperatorBuilder common_;
Graph graph_;
- JSHeapBroker js_heap_broker_;
+ JSHeapBroker broker_;
SourcePositionTable source_positions_;
NodeOriginTable node_origins_;
};
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 5b808eab25..65903506ad 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 7660f5851e..171658d830 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -22,9 +22,7 @@ class JSCallReducerTest : public TypedGraphTest {
public:
JSCallReducerTest()
: TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {
- if (FLAG_concurrent_compiler_frontend) {
- js_heap_broker()->SerializeStandardObjects();
- }
+ broker()->SerializeStandardObjects();
}
~JSCallReducerTest() override = default;
@@ -37,24 +35,13 @@ class JSCallReducerTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSCallReducer reducer(&graph_reducer, &jsgraph, js_heap_broker(),
- JSCallReducer::kNoFlags, native_context(), &deps_);
+ JSCallReducer reducer(&graph_reducer, &jsgraph, broker(),
+ JSCallReducer::kNoFlags, &deps_);
return reducer.Reduce(node);
}
JSOperatorBuilder* javascript() { return &javascript_; }
- static void SetUpTestCase() {
- old_flag_lazy_ = i::FLAG_lazy_deserialization;
- i::FLAG_lazy_deserialization = false;
- TypedGraphTest::SetUpTestCase();
- }
-
- static void TearDownTestCase() {
- TypedGraphTest::TearDownTestCase();
- i::FLAG_lazy_deserialization = old_flag_lazy_;
- }
-
Node* GlobalFunction(const char* name) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
Object::GetProperty(
@@ -131,9 +118,6 @@ class JSCallReducerTest : public TypedGraphTest {
private:
JSOperatorBuilder javascript_;
CompilationDependencies deps_;
-
- static bool old_flag_lazy_;
- static bool old_flag_lazy_handler_;
};
TEST_F(JSCallReducerTest, PromiseConstructorNoArgs) {
@@ -217,9 +201,6 @@ TEST_F(JSCallReducerTest, PromiseConstructorWithHook) {
ASSERT_FALSE(r.Changed());
}
-bool JSCallReducerTest::old_flag_lazy_;
-bool JSCallReducerTest::old_flag_lazy_handler_;
-
// -----------------------------------------------------------------------------
// Math unaries
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index eafd7fa35e..41f0c180e6 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -45,7 +45,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, js_heap_broker(),
+ JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, broker(),
zone());
return reducer.Reduce(node);
}
@@ -75,8 +75,8 @@ class JSCreateLoweringTest : public TypedGraphTest {
TEST_F(JSCreateLoweringTest, JSCreate) {
Handle<JSFunction> function = isolate()->object_function();
- Node* const target = Parameter(
- Type::HeapConstant(js_heap_broker(), function, graph()->zone()));
+ Node* const target =
+ Parameter(Type::HeapConstant(broker(), function, graph()->zone()));
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
index fdc87904c4..bf9a144fab 100644
--- a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
@@ -34,14 +34,14 @@ TEST_F(JSNativeContextSpecializationTest, GetMaxStringLengthOfString) {
Node* const str_node = graph()->NewNode(
common()->HeapConstant(factory()->InternalizeUtf8String("str")));
- EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
- str_node),
- str_len);
+ EXPECT_EQ(
+ JSNativeContextSpecialization::GetMaxStringLength(broker(), str_node),
+ str_len);
Node* const num_node = graph()->NewNode(common()->NumberConstant(10.0 / 3));
- EXPECT_EQ(JSNativeContextSpecialization::GetMaxStringLength(js_heap_broker(),
- num_node),
- num_len);
+ EXPECT_EQ(
+ JSNativeContextSpecialization::GetMaxStringLength(broker(), num_node),
+ num_len);
}
} // namespace js_native_context_specialization_unittest
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 43998824d2..b3326b0ad4 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -48,12 +48,13 @@ class JSTypedLoweringTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSTypedLowering reducer(&graph_reducer, &jsgraph, js_heap_broker(), zone());
+ JSTypedLowering reducer(&graph_reducer, &jsgraph, broker(), zone());
return reducer.Reduce(node);
}
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
- Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
+ Handle<JSArrayBuffer> buffer =
+ factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
return buffer;
}
diff --git a/deps/v8/test/unittests/compiler/live-range-builder.h b/deps/v8/test/unittests/compiler/live-range-builder.h
deleted file mode 100644
index 4a5621fab7..0000000000
--- a/deps/v8/test/unittests/compiler/live-range-builder.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIVE_RANGE_BUILDER_H_
-#define V8_LIVE_RANGE_BUILDER_H_
-
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// Utility offering shorthand syntax for building up a range by providing its ID
-// and pairs (start, end) specifying intervals. Circumvents current incomplete
-// support for C++ features such as instantiation lists, on OS X and Android.
-class TestRangeBuilder {
- public:
- explicit TestRangeBuilder(Zone* zone)
- : id_(-1), pairs_(), uses_(), zone_(zone) {}
-
- TestRangeBuilder& Id(int id) {
- id_ = id;
- return *this;
- }
- TestRangeBuilder& Add(int start, int end) {
- pairs_.push_back({start, end});
- return *this;
- }
-
- TestRangeBuilder& AddUse(int pos) {
- uses_.insert(pos);
- return *this;
- }
-
- TopLevelLiveRange* Build(int start, int end) {
- return Add(start, end).Build();
- }
-
- TopLevelLiveRange* Build() {
- TopLevelLiveRange* range =
- new (zone_) TopLevelLiveRange(id_, MachineRepresentation::kTagged);
- // Traverse the provided interval specifications backwards, because that is
- // what LiveRange expects.
- for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
- Interval pair = pairs_[i];
- LifetimePosition start = LifetimePosition::FromInt(pair.first);
- LifetimePosition end = LifetimePosition::FromInt(pair.second);
- CHECK(start < end);
- range->AddUseInterval(start, end, zone_);
- }
- for (int pos : uses_) {
- UsePosition* use_position =
- new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
- nullptr, UsePositionHintType::kNone);
- range->AddUsePosition(use_position);
- }
-
- pairs_.clear();
- return range;
- }
-
- private:
- typedef std::pair<int, int> Interval;
- typedef std::vector<Interval> IntervalList;
- int id_;
- IntervalList pairs_;
- std::set<int> uses_;
- Zone* zone_;
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_LIVE_RANGE_BUILDER_H_
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index b8b0c9004f..cd70b3bc41 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
+#include "src/base/overflowing-math.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/typer.h"
#include "src/conversions-inl.h"
@@ -26,14 +27,20 @@ namespace compiler {
class MachineOperatorReducerTest : public GraphTest {
public:
explicit MachineOperatorReducerTest(int num_parameters = 2)
- : GraphTest(num_parameters), machine_(zone()) {}
+ : GraphTest(num_parameters),
+ machine_(zone()),
+ common_(zone()),
+ javascript_(zone()),
+ jsgraph_(isolate(), graph(), &common_, &javascript_, nullptr,
+ &machine_),
+ graph_reducer_(zone(), graph(), jsgraph_.Dead()) {}
protected:
Reduction Reduce(Node* node) {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
&machine_);
- MachineOperatorReducer reducer(&jsgraph);
+ MachineOperatorReducer reducer(&graph_reducer_, &jsgraph);
return reducer.Reduce(node);
}
@@ -61,6 +68,10 @@ class MachineOperatorReducerTest : public GraphTest {
private:
MachineOperatorBuilder machine_;
+ CommonOperatorBuilder common_;
+ JSOperatorBuilder javascript_;
+ JSGraph jsgraph_;
+ GraphReducer graph_reducer_;
};
@@ -968,7 +979,8 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
TEST_F(MachineOperatorReducerTest, Word32ShrWithWord32And) {
Node* const p0 = Parameter(0);
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- uint32_t mask = (1 << shift) - 1;
+ uint32_t mask =
+ base::SubWithWraparound(base::ShlWithWraparound(1, shift), 1);
Node* node = graph()->NewNode(
machine()->Word32Shr(),
graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
@@ -1057,7 +1069,9 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
if (k == 0) {
EXPECT_EQ(p0, r.replacement());
} else {
- EXPECT_THAT(r.replacement(), IsInt32Add(p0, IsInt32Constant(-k)));
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt32Add(p0, IsInt32Constant(base::NegateWithWraparound(k))));
}
}
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index eea7276f6f..9eddb1d311 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -13,8 +13,6 @@ namespace internal {
namespace compiler {
namespace machine_operator_unittest {
-#if GTEST_HAS_COMBINE
-
template <typename T>
class MachineOperatorTestWithParam
: public TestWithZone,
@@ -159,7 +157,6 @@ INSTANTIATE_TEST_CASE_P(
::testing::Combine(::testing::ValuesIn(kRepresentationsForStore),
::testing::Values(kNoWriteBarrier,
kFullWriteBarrier))));
-#endif
// -----------------------------------------------------------------------------
// Pure operators.
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 15f5de7b2f..83edb6a21e 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 8bbcab4c2d..b455d9ef29 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
-skovacevic@wavecomp.com \ No newline at end of file
+arikalo@wavecomp.com
+prudic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index c090e29321..c6c1ff3ee8 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 0b3d8786f8..f23265e8e4 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -6,7 +6,6 @@
#include <vector>
-#include "src/assembler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
@@ -2152,6 +2151,7 @@ IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
IS_BINOP_MATCHER(Int32SubWithOverflow)
IS_BINOP_MATCHER(Int32Add)
+IS_BINOP_MATCHER(Int32Div)
IS_BINOP_MATCHER(Int32Sub)
IS_BINOP_MATCHER(Int32Mul)
IS_BINOP_MATCHER(Int32MulHigh)
@@ -2159,6 +2159,7 @@ IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
+IS_BINOP_MATCHER(Int64Div)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
IS_BINOP_MATCHER(JSAdd)
@@ -2255,22 +2256,14 @@ IS_UNOP_MATCHER(TaggedPoisonOnSpeculation)
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
// not enabled.
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
-#ifdef ENABLE_VERIFY_CSA
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
-#else
- return input_matcher;
-#endif
}
Matcher<Node*> IsBitcastWordToTaggedSigned(
const Matcher<Node*>& input_matcher) {
-#ifdef ENABLE_VERIFY_CSA
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
-#else
- return input_matcher;
-#endif
}
#undef LOAD_MATCHER
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 4e9c32e6d6..96bdbdf3be 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -387,6 +387,8 @@ Matcher<Node*> IsInt32Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Div(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32MulHigh(const Matcher<Node*>& lhs_matcher,
@@ -403,6 +405,8 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSParseInt(const Matcher<Node*>& lhs_matcher,
@@ -517,6 +521,12 @@ static inline Matcher<Node*> IsIntPtrMul(const Matcher<Node*>& lhs_matcher,
: IsInt32Mul(lhs_matcher, rhs_matcher);
}
+static inline Matcher<Node*> IsIntPtrDiv(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Div(lhs_matcher, rhs_matcher)
+ : IsInt32Div(lhs_matcher, rhs_matcher);
+}
+
static inline Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
diff --git a/deps/v8/test/unittests/compiler/persistent-unittest.cc b/deps/v8/test/unittests/compiler/persistent-unittest.cc
index 8454aeaeb1..4c5a1974c7 100644
--- a/deps/v8/test/unittests/compiler/persistent-unittest.cc
+++ b/deps/v8/test/unittests/compiler/persistent-unittest.cc
@@ -83,19 +83,19 @@ TEST(PersistentMap, Zip) {
// Provoke hash collisions to stress the iterator.
struct bad_hash {
- size_t operator()(int key) {
+ size_t operator()(uint32_t key) {
return base::hash_value(static_cast<size_t>(key) % 1000);
}
};
- PersistentMap<int, int, bad_hash> a(&zone);
- PersistentMap<int, int, bad_hash> b(&zone);
+ PersistentMap<int, uint32_t, bad_hash> a(&zone);
+ PersistentMap<int, uint32_t, bad_hash> b(&zone);
- int sum_a = 0;
- int sum_b = 0;
+ uint32_t sum_a = 0;
+ uint32_t sum_b = 0;
for (int i = 0; i < 30000; ++i) {
int key = small_big_distr(&rand);
- int value = small_big_distr(&rand);
+ uint32_t value = small_big_distr(&rand);
if (rand.NextBool()) {
sum_a += value;
a.Set(key, a.Get(key) + value);
@@ -105,28 +105,28 @@ TEST(PersistentMap, Zip) {
}
}
- int sum = sum_a + sum_b;
+ uint32_t sum = sum_a + sum_b;
for (auto pair : a) {
sum_a -= pair.second;
}
- ASSERT_EQ(0, sum_a);
+ ASSERT_EQ(0u, sum_a);
for (auto pair : b) {
sum_b -= pair.second;
}
- ASSERT_EQ(0, sum_b);
+ ASSERT_EQ(0u, sum_b);
for (auto triple : a.Zip(b)) {
int key = std::get<0>(triple);
- int value_a = std::get<1>(triple);
- int value_b = std::get<2>(triple);
+ uint32_t value_a = std::get<1>(triple);
+ uint32_t value_b = std::get<2>(triple);
ASSERT_EQ(value_a, a.Get(key));
ASSERT_EQ(value_b, b.Get(key));
sum -= value_a;
sum -= value_b;
}
- ASSERT_EQ(0, sum);
+ ASSERT_EQ(0u, sum);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/ppc/OWNERS b/deps/v8/test/unittests/compiler/ppc/OWNERS
index eb007cb908..6d1a8fc472 100644
--- a/deps/v8/test/unittests/compiler/ppc/OWNERS
+++ b/deps/v8/test/unittests/compiler/ppc/OWNERS
@@ -1,5 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
index 86f7d69ec9..611e766edb 100644
--- a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/assembler-inl.h"
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index f3ecd228a5..079cc4b99a 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -133,6 +133,67 @@ TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
}
// -----------------------------------------------------------------------------
+// CheckReceiver
+
+TEST_F(RedundancyEliminationTest, CheckReceiver) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+}
+
+// -----------------------------------------------------------------------------
+// CheckReceiverOrNullOrUndefined
+
+TEST_F(RedundancyEliminationTest, CheckReceiverOrNullOrUndefined) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+}
+
+TEST_F(RedundancyEliminationTest,
+ CheckReceiverOrNullOrUndefinedSubsumedByCheckReceiver) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+}
+
+// -----------------------------------------------------------------------------
// CheckString
TEST_F(RedundancyEliminationTest,
@@ -207,6 +268,35 @@ TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
}
// -----------------------------------------------------------------------------
+// CheckedFloat64ToInt64
+
+TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt64(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedFloat64ToInt64(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedInt32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
@@ -427,6 +517,35 @@ TEST_F(RedundancyEliminationTest,
}
// -----------------------------------------------------------------------------
+// CheckedTaggedToInt64
+
+TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt64(mode, feedback1), value, effect,
+ control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedTaggedToInt64(mode, feedback2), value, effect,
+ control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedTaggedToTaggedPointer
TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
@@ -539,6 +658,34 @@ TEST_F(RedundancyEliminationTest,
}
// -----------------------------------------------------------------------------
+// CheckedUint32Bounds
+
+TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* index = Parameter(0);
+ Node* length = Parameter(1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32Bounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32Bounds(feedback2), index,
+ length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedUint32ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
@@ -593,6 +740,34 @@ TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
}
// -----------------------------------------------------------------------------
+// CheckedUint64Bounds
+
+TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* index = Parameter(0);
+ Node* length = Parameter(1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64Bounds(feedback1), index,
+ length, effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64Bounds(feedback2), index,
+ length, effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedUint64ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
@@ -651,7 +826,7 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -687,7 +862,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -726,7 +901,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -762,7 +937,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -801,7 +976,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -837,7 +1012,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -876,7 +1051,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberAddWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -902,7 +1077,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -932,7 +1107,7 @@ TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -960,7 +1135,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -991,7 +1166,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1019,7 +1194,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1050,7 +1225,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1078,7 +1253,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1109,7 +1284,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeToNumberWithCheckBoundsBetterType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
@@ -1137,7 +1312,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
diff --git a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
index fc7b268b44..9ac6ca8810 100644
--- a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/live-range-builder.h"
+#include "src/compiler/backend/register-allocator.h"
#include "test/unittests/test-utils.h"
// TODO(mtrofin): would we want to centralize this definition?
@@ -20,6 +20,64 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Utility offering shorthand syntax for building up a range by providing its ID
+// and pairs (start, end) specifying intervals. Circumvents current incomplete
+// support for C++ features such as instantiation lists, on OS X and Android.
+class TestRangeBuilder {
+ public:
+ explicit TestRangeBuilder(Zone* zone)
+ : id_(-1), pairs_(), uses_(), zone_(zone) {}
+
+ TestRangeBuilder& Id(int id) {
+ id_ = id;
+ return *this;
+ }
+ TestRangeBuilder& Add(int start, int end) {
+ pairs_.push_back({start, end});
+ return *this;
+ }
+
+ TestRangeBuilder& AddUse(int pos) {
+ uses_.insert(pos);
+ return *this;
+ }
+
+ TopLevelLiveRange* Build(int start, int end) {
+ return Add(start, end).Build();
+ }
+
+ TopLevelLiveRange* Build() {
+ TopLevelLiveRange* range =
+ new (zone_) TopLevelLiveRange(id_, MachineRepresentation::kTagged);
+ // Traverse the provided interval specifications backwards, because that is
+ // what LiveRange expects.
+ for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
+ Interval pair = pairs_[i];
+ LifetimePosition start = LifetimePosition::FromInt(pair.first);
+ LifetimePosition end = LifetimePosition::FromInt(pair.second);
+ CHECK(start < end);
+ range->AddUseInterval(start, end, zone_);
+ }
+ for (int pos : uses_) {
+ UsePosition* use_position =
+ new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
+ nullptr, UsePositionHintType::kNone);
+ range->AddUsePosition(use_position);
+ }
+
+ pairs_.clear();
+ return range;
+ }
+
+ private:
+ typedef std::pair<int, int> Interval;
+ typedef std::vector<Interval> IntervalList;
+ int id_;
+ IntervalList pairs_;
+ std::set<int> uses_;
+ Zone* zone_;
+};
+
class LiveRangeUnitTest : public TestWithZone {
public:
// Split helper, to avoid int->LifetimePosition conversion nuisance.
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 71767a964e..23f17b2b6c 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/move-optimizer.h"
-#include "src/compiler/pipeline.h"
+#include "src/compiler/backend/move-optimizer.h"
#include "src/ostreams.h"
-#include "test/unittests/compiler/instruction-sequence-unittest.h"
+#include "test/unittests/compiler/backend/instruction-sequence-unittest.h"
namespace v8 {
namespace internal {
@@ -54,18 +53,16 @@ class MoveOptimizerTest : public InstructionSequenceTest {
void Optimize() {
WireBlocks();
if (FLAG_trace_turbo) {
- PrintableInstructionSequence printable = {config(), sequence()};
StdoutStream{}
<< "----- Instruction sequence before move optimization -----\n"
- << printable;
+ << *sequence();
}
MoveOptimizer move_optimizer(zone(), sequence());
move_optimizer.Run();
if (FLAG_trace_turbo) {
- PrintableInstructionSequence printable = {config(), sequence()};
StdoutStream{}
<< "----- Instruction sequence after move optimization -----\n"
- << printable;
+ << *sequence();
}
}
diff --git a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
index e12d53caf5..d77f424ef7 100644
--- a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
@@ -4,7 +4,7 @@
#include "src/assembler-inl.h"
#include "src/compiler/pipeline.h"
-#include "test/unittests/compiler/instruction-sequence-unittest.h"
+#include "test/unittests/compiler/backend/instruction-sequence-unittest.h"
namespace v8 {
namespace internal {
@@ -187,7 +187,7 @@ TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
}
TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
- const int kPhis = kDefaultNRegs * 2;
+ constexpr int kPhis = Register::kNumRegisters * 2;
StartBlock();
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
@@ -218,7 +218,7 @@ TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
}
TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
- const int kPhis = kDefaultNRegs * 2;
+ constexpr int kPhis = Register::kNumRegisters * 2;
// First diamond.
StartBlock();
@@ -326,16 +326,16 @@ TEST_F(RegisterAllocatorTest, SpillPhi) {
TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
StartBlock();
- VReg constants[kDefaultNRegs];
+ VReg constants[Register::kNumRegisters];
for (size_t i = 0; i < arraysize(constants); ++i) {
constants[i] = DefineConstant();
}
- TestOperand call_ops[kDefaultNRegs * 2];
- for (int i = 0; i < kDefaultNRegs; ++i) {
+ TestOperand call_ops[Register::kNumRegisters * 2];
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
call_ops[i] = Reg(constants[i], i);
}
- for (int i = 0; i < kDefaultNRegs; ++i) {
- call_ops[i + kDefaultNRegs] = Slot(constants[i], i);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ call_ops[i + Register::kNumRegisters] = Slot(constants[i], i);
}
EmitCall(Slot(-1), arraysize(call_ops), call_ops);
EndBlock(Last());
@@ -488,7 +488,7 @@ TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
StartBlock();
// Fill registers.
- VReg values[kDefaultNRegs];
+ VReg values[Register::kNumRegisters];
for (size_t i = 0; i < arraysize(values); ++i) {
if (i == 0 || i == 1) continue; // Leave a hole for c_1 to take.
values[i] = Define(Reg(static_cast<int>(i)));
@@ -522,7 +522,7 @@ TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
StartBlock();
// Fill registers.
- VReg values[kDefaultNRegs];
+ VReg values[Register::kNumRegisters];
for (size_t i = arraysize(values); i > 0; --i) {
values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
}
@@ -711,8 +711,6 @@ class SlotConstraintTest : public RegisterAllocatorTest,
} // namespace
-#if GTEST_HAS_COMBINE
-
TEST_P(SlotConstraintTest, SlotConstraint) {
StartBlock();
VReg p_0;
@@ -762,8 +760,6 @@ INSTANTIATE_TEST_CASE_P(
::testing::Combine(::testing::ValuesIn(kParameterTypes),
::testing::Range(0, SlotConstraintTest::kMaxVariant)));
-#endif // GTEST_HAS_COMBINE
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/s390/OWNERS b/deps/v8/test/unittests/compiler/s390/OWNERS
index eb007cb908..6d1a8fc472 100644
--- a/deps/v8/test/unittests/compiler/s390/OWNERS
+++ b/deps/v8/test/unittests/compiler/s390/OWNERS
@@ -1,5 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
index 86f7d69ec9..611e766edb 100644
--- a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
+++ b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/assembler-inl.h"
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 82bcda6e9f..722384da5b 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -42,12 +42,12 @@ class SimplifiedLoweringTest : public GraphTest {
{
// Simplified lowering needs to run w/o the typer decorator so make sure
// the object is not live at the same time.
- Typer typer(js_heap_broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph());
typer.Run();
}
- SimplifiedLowering lowering(jsgraph(), js_heap_broker(), zone(),
- source_positions(), node_origins(),
+ SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(),
+ node_origins(),
PoisoningMitigationLevel::kDontPoison);
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 5e2f8f15cc..f3573d6379 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -29,14 +29,13 @@ class SimplifiedOperatorReducerTest : public GraphTest {
protected:
Reduction Reduce(Node* node) {
- JSHeapBroker js_heap_broker(isolate(), zone());
+ JSHeapBroker broker(isolate(), zone());
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
GraphReducer graph_reducer(zone(), graph());
- SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph,
- &js_heap_broker);
+ SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph, &broker);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 51426a5f85..da1f3941f0 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -38,8 +38,7 @@ class TypedOptimizationTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- TypedOptimization reducer(&graph_reducer, &deps_, &jsgraph,
- js_heap_broker());
+ TypedOptimization reducer(&graph_reducer, &deps_, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index b827088336..5954dbc638 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -4,7 +4,7 @@
#include <functional>
-#include "src/codegen.h"
+#include "src/base/overflowing-math.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -22,8 +22,8 @@ class TyperTest : public TypedGraphTest {
public:
TyperTest()
: TypedGraphTest(3),
- js_heap_broker_(isolate(), zone()),
- operation_typer_(&js_heap_broker_, zone()),
+ broker_(isolate(), zone()),
+ operation_typer_(&broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
simplified_(zone()) {
@@ -56,7 +56,7 @@ class TyperTest : public TypedGraphTest {
const int kRepetitions = 50;
- JSHeapBroker js_heap_broker_;
+ JSHeapBroker broker_;
OperationTyper operation_typer_;
Types types_;
JSOperatorBuilder javascript_;
@@ -176,8 +176,8 @@ class TyperTest : public TypedGraphTest {
for (int x2 = rmin; x2 < rmin + width; x2++) {
double result_value = opfun(x1, x2);
Type result_type = Type::NewConstant(
- &js_heap_broker_,
- isolate()->factory()->NewNumber(result_value), zone());
+ &broker_, isolate()->factory()->NewNumber(result_value),
+ zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
@@ -198,23 +198,21 @@ class TyperTest : public TypedGraphTest {
double x2 = RandomInt(r2.AsRange());
double result_value = opfun(x1, x2);
Type result_type = Type::NewConstant(
- &js_heap_broker_, isolate()->factory()->NewNumber(result_value),
- zone());
+ &broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
// Test extreme cases.
double x1 = +1e-308;
double x2 = -1e-308;
- Type r1 = Type::NewConstant(&js_heap_broker_,
- isolate()->factory()->NewNumber(x1), zone());
- Type r2 = Type::NewConstant(&js_heap_broker_,
- isolate()->factory()->NewNumber(x2), zone());
+ Type r1 = Type::NewConstant(&broker_, isolate()->factory()->NewNumber(x1),
+ zone());
+ Type r2 = Type::NewConstant(&broker_, isolate()->factory()->NewNumber(x2),
+ zone());
Type expected_type = TypeBinaryOp(op, r1, r2);
double result_value = opfun(x1, x2);
Type result_type = Type::NewConstant(
- &js_heap_broker_, isolate()->factory()->NewNumber(result_value),
- zone());
+ &broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
@@ -229,7 +227,7 @@ class TyperTest : public TypedGraphTest {
double x2 = RandomInt(r2.AsRange());
bool result_value = opfun(x1, x2);
Type result_type = Type::NewConstant(
- &js_heap_broker_,
+ &broker_,
result_value ? isolate()->factory()->true_value()
: isolate()->factory()->false_value(),
zone());
@@ -249,8 +247,7 @@ class TyperTest : public TypedGraphTest {
int32_t x2 = static_cast<int32_t>(RandomInt(r2.AsRange()));
double result_value = opfun(x1, x2);
Type result_type = Type::NewConstant(
- &js_heap_broker_, isolate()->factory()->NewNumber(result_value),
- zone());
+ &broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
@@ -311,6 +308,7 @@ int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1F); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
+double divide_double_double(double x, double y) { return base::Divide(x, y); }
double modulo_double_double(double x, double y) { return Modulo(x, y); }
} // namespace
@@ -335,7 +333,7 @@ TEST_F(TyperTest, TypeJSMultiply) {
}
TEST_F(TyperTest, TypeJSDivide) {
- TestBinaryArithOp(javascript_.Divide(), std::divides<double>());
+ TestBinaryArithOp(javascript_.Divide(), divide_double_double);
}
TEST_F(TyperTest, TypeJSModulus) {
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index fb7caa4bf9..f174b92731 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/instruction-selector-unittest.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/compiler/node-matchers.h"
#include "src/objects-inl.h"
@@ -1205,10 +1205,114 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
}
-
// -----------------------------------------------------------------------------
// Binops with a memory operand.
+TEST_F(InstructionSelectorTest, LoadCmp32) {
+ {
+ // Word32Equal(Load[Int8](p0, p1), Int32Constant(0)) -> cmpb [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(
+ m.Word32Equal(m.Load(MachineType::Int8(), p0, p1), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp8, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+ {
+ // Word32Equal(Load[Uint8](p0, p1), Int32Constant(0)) -> cmpb [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Load(MachineType::Uint8(), p0, p1),
+ m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp8, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+ {
+ // Word32Equal(Load[Int16](p0, p1), Int32Constant(0)) -> cmpw [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Load(MachineType::Int16(), p0, p1),
+ m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp16, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+ {
+ // Word32Equal(Load[Uint16](p0, p1), Int32Constant(0)) -> cmpw [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Load(MachineType::Uint16(), p0, p1),
+ m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp16, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+ {
+ // Word32Equal(Load[Int32](p0, p1), Int32Constant(0)) -> cmpl [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Load(MachineType::Int32(), p0, p1),
+ m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+ {
+ // Word32Equal(Load[Uint32](p0, p1), Int32Constant(0)) -> cmpl [p0,p1], 0
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Load(MachineType::Uint32(), p0, p1),
+ m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+ }
+}
+
TEST_F(InstructionSelectorTest, LoadAnd32) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
diff --git a/deps/v8/test/unittests/compiler/zone-stats-unittest.cc b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
index a643d1480c..9f3bd3493f 100644
--- a/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
@@ -10,7 +10,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class ZoneStatsTest : public TestWithIsolate {
+class ZoneStatsTest : public ::testing::Test {
public:
ZoneStatsTest() : zone_stats_(&allocator_) {}
diff --git a/deps/v8/test/unittests/conversions-unittest.cc b/deps/v8/test/unittests/conversions-unittest.cc
new file mode 100644
index 0000000000..7c4bd96a6f
--- /dev/null
+++ b/deps/v8/test/unittests/conversions-unittest.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/conversions.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConversionsTest : public ::testing::Test {
+ public:
+ ConversionsTest() = default;
+ ~ConversionsTest() override = default;
+
+ SourcePosition toPos(int offset) {
+ return SourcePosition(offset, offset % 10 - 1);
+ }
+};
+
+// Some random offsets, mostly at 'suspicious' bit boundaries.
+
+struct IntStringPair {
+ int integer;
+ std::string string;
+};
+
+static IntStringPair int_pairs[] = {{0, "0"},
+ {101, "101"},
+ {-1, "-1"},
+ {1024, "1024"},
+ {200000, "200000"},
+ {-1024, "-1024"},
+ {-200000, "-200000"},
+ {kMinInt, "-2147483648"},
+ {kMaxInt, "2147483647"}};
+
+TEST_F(ConversionsTest, IntToCString) {
+ std::unique_ptr<char[]> buf(new char[4096]);
+
+ for (size_t i = 0; i < arraysize(int_pairs); i++) {
+ ASSERT_STREQ(IntToCString(int_pairs[i].integer, {buf.get(), 4096}),
+ int_pairs[i].string.c_str());
+ }
+}
+
+struct DoubleStringPair {
+ double number;
+ std::string string;
+};
+
+static DoubleStringPair double_pairs[] = {
+ {0.0, "0"},
+ {kMinInt, "-2147483648"},
+ {kMaxInt, "2147483647"},
+ // ES section 7.1.12.1 #sec-tostring-applied-to-the-number-type:
+ // -0.0 is stringified to "0".
+ {-0.0, "0"},
+ {1.1, "1.1"},
+ {0.1, "0.1"}};
+
+TEST_F(ConversionsTest, DoubleToCString) {
+ std::unique_ptr<char[]> buf(new char[4096]);
+
+ for (size_t i = 0; i < arraysize(double_pairs); i++) {
+ ASSERT_STREQ(DoubleToCString(double_pairs[i].number, {buf.get(), 4096}),
+ double_pairs[i].string.c_str());
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/detachable-vector-unittest.cc b/deps/v8/test/unittests/detachable-vector-unittest.cc
index f9c846df22..b805352a7e 100644
--- a/deps/v8/test/unittests/detachable-vector-unittest.cc
+++ b/deps/v8/test/unittests/detachable-vector-unittest.cc
@@ -63,5 +63,65 @@ TEST(DetachableVector, DetachLeaksBackingStore) {
// The destructor of v2 will release the backing store.
}
+TEST(DetachableVector, PushAndPopWithReallocation) {
+ DetachableVector<size_t> v;
+ const size_t kMinimumCapacity = DetachableVector<size_t>::kMinimumCapacity;
+
+ EXPECT_EQ(0u, v.capacity());
+ EXPECT_EQ(0u, v.size());
+ v.push_back(0);
+ EXPECT_EQ(kMinimumCapacity, v.capacity());
+ EXPECT_EQ(1u, v.size());
+
+ // Push values until the reallocation happens.
+ for (size_t i = 1; i <= kMinimumCapacity; ++i) {
+ v.push_back(i);
+ }
+ EXPECT_EQ(2 * kMinimumCapacity, v.capacity());
+ EXPECT_EQ(kMinimumCapacity + 1, v.size());
+
+ EXPECT_EQ(kMinimumCapacity, v.back());
+ v.pop_back();
+
+ v.push_back(100);
+ EXPECT_EQ(100u, v.back());
+ v.pop_back();
+ EXPECT_EQ(kMinimumCapacity - 1, v.back());
+}
+
+TEST(DetachableVector, ShrinkToFit) {
+ DetachableVector<size_t> v;
+ const size_t kMinimumCapacity = DetachableVector<size_t>::kMinimumCapacity;
+
+ // shrink_to_fit doesn't affect the empty capacity DetachableVector.
+ EXPECT_EQ(0u, v.capacity());
+ v.shrink_to_fit();
+ EXPECT_EQ(0u, v.capacity());
+
+ // Do not shrink the buffer if it's smaller than kMinimumCapacity.
+ v.push_back(0);
+ EXPECT_EQ(kMinimumCapacity, v.capacity());
+ v.shrink_to_fit();
+ EXPECT_EQ(kMinimumCapacity, v.capacity());
+
+ // Fill items to |v| until the buffer grows twice.
+ for (size_t i = 0; i < 2 * kMinimumCapacity; ++i) {
+ v.push_back(i);
+ }
+ EXPECT_EQ(2 * kMinimumCapacity + 1, v.size());
+ EXPECT_EQ(4 * kMinimumCapacity, v.capacity());
+
+ // Do not shrink the buffer if the number of unused slots is not large enough.
+ v.shrink_to_fit();
+ EXPECT_EQ(2 * kMinimumCapacity + 1, v.size());
+ EXPECT_EQ(4 * kMinimumCapacity, v.capacity());
+
+ v.pop_back();
+ v.pop_back();
+ v.shrink_to_fit();
+ EXPECT_EQ(2 * kMinimumCapacity - 1, v.size());
+ EXPECT_EQ(2 * kMinimumCapacity - 1, v.capacity());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/barrier-unittest.cc b/deps/v8/test/unittests/heap/barrier-unittest.cc
index 1d42f97a4f..07906b20c1 100644
--- a/deps/v8/test/unittests/heap/barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/barrier-unittest.cc
@@ -4,19 +4,27 @@
#include "src/heap/barrier.h"
#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
namespace heap {
+namespace {
+
+// Large timeout that will not trigger in tests.
+constexpr base::TimeDelta test_timeout = base::TimeDelta::FromHours(3);
+
+} // namespace
+
TEST(OneshotBarrier, InitializeNotDone) {
- OneshotBarrier barrier;
+ OneshotBarrier barrier(test_timeout);
EXPECT_FALSE(barrier.DoneForTesting());
}
TEST(OneshotBarrier, DoneAfterWait_Sequential) {
- OneshotBarrier barrier;
+ OneshotBarrier barrier(test_timeout);
barrier.Start();
barrier.Wait();
EXPECT_TRUE(barrier.DoneForTesting());
@@ -41,7 +49,7 @@ class ThreadWaitingOnBarrier final : public base::Thread {
TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
const int kThreadCount = 2;
- OneshotBarrier barrier;
+ OneshotBarrier barrier(test_timeout);
ThreadWaitingOnBarrier threads[kThreadCount];
for (int i = 0; i < kThreadCount; i++) {
threads[i].Initialize(&barrier);
@@ -59,7 +67,7 @@ TEST(OneshotBarrier, DoneAfterWait_Concurrent) {
TEST(OneshotBarrier, EarlyFinish_Concurrent) {
const int kThreadCount = 2;
- OneshotBarrier barrier;
+ OneshotBarrier barrier(test_timeout);
ThreadWaitingOnBarrier threads[kThreadCount];
// Test that one thread that actually finishes processing work before other
// threads call Start() will move the barrier in Done state.
@@ -103,7 +111,7 @@ class CountingThread final : public base::Thread {
private:
void ProcessWork() {
- base::LockGuard<base::Mutex> guard(mutex_);
+ base::MutexGuard guard(mutex_);
processed_work_ += *work_;
*work_ = 0;
}
@@ -118,7 +126,7 @@ class CountingThread final : public base::Thread {
TEST(OneshotBarrier, Processing_Concurrent) {
const size_t kWorkCounter = 173173;
- OneshotBarrier barrier;
+ OneshotBarrier barrier(test_timeout);
base::Mutex mutex;
size_t work = 0;
CountingThread counting_thread(&barrier, &mutex, &work);
@@ -129,7 +137,7 @@ TEST(OneshotBarrier, Processing_Concurrent) {
for (size_t i = 0; i < kWorkCounter; i++) {
{
- base::LockGuard<base::Mutex> guard(&mutex);
+ base::MutexGuard guard(&mutex);
work++;
}
barrier.NotifyAll();
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index 33cc05e692..5bbbaceb3c 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -83,6 +83,14 @@ TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
local_tracer.EnterFinalPause();
}
+TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneForwards) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ EXPECT_CALL(remote_tracer, IsTracingDone());
+ local_tracer.IsRemoteTracingDone();
+}
+
TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
@@ -102,6 +110,36 @@ TEST(LocalEmbedderHeapTracer, EnterFinalPauseStackStateIsForwarded) {
local_tracer.EnterFinalPause();
}
+TEST(LocalEmbedderHeapTracer, TemporaryEmbedderStackState) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ // Default is unknown, see above.
+ {
+ EmbedderStackStateScope scope(&local_tracer, EmbedderHeapTracer::kEmpty);
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ local_tracer.EnterFinalPause();
+ }
+}
+
+TEST(LocalEmbedderHeapTracer, TemporaryEmbedderStackStateRestores) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ // Default is unknown, see above.
+ {
+ EmbedderStackStateScope scope(&local_tracer, EmbedderHeapTracer::kEmpty);
+ {
+ EmbedderStackStateScope scope(&local_tracer,
+ EmbedderHeapTracer::kUnknown);
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
+ local_tracer.EnterFinalPause();
+ }
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ local_tracer.EnterFinalPause();
+ }
+}
+
TEST(LocalEmbedderHeapTracer, EnterFinalPauseStackStateResets) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
@@ -122,52 +160,19 @@ TEST(LocalEmbedderHeapTracer, IsRemoteTracingDoneIncludesRemote) {
local_tracer.IsRemoteTracingDone();
}
-TEST(LocalEmbedderHeapTracer, NumberOfCachedWrappersToTraceExcludesRemote) {
- LocalEmbedderHeapTracer local_tracer(nullptr);
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.NumberOfCachedWrappersToTrace();
-}
-
-TEST(LocalEmbedderHeapTracer, RegisterWrappersWithRemoteTracer) {
+TEST(LocalEmbedderHeapTracer, RegisterV8ReferencesWithRemoteTracer) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.AddWrapperToTrace(CreateWrapperInfo());
- EXPECT_EQ(1u, local_tracer.NumberOfCachedWrappersToTrace());
- EXPECT_CALL(remote_tracer, RegisterV8References(_));
- local_tracer.RegisterWrappersWithRemoteTracer();
- EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(&local_tracer);
+ scope.AddWrapperInfoForTesting(CreateWrapperInfo());
+ EXPECT_CALL(remote_tracer, RegisterV8References(_));
+ }
EXPECT_CALL(remote_tracer, IsTracingDone()).WillOnce(Return(false));
EXPECT_FALSE(local_tracer.IsRemoteTracingDone());
}
-TEST(LocalEmbedderHeapTracer, TraceFinishes) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.AddWrapperToTrace(CreateWrapperInfo());
- EXPECT_EQ(1u, local_tracer.NumberOfCachedWrappersToTrace());
- EXPECT_CALL(remote_tracer, RegisterV8References(_));
- local_tracer.RegisterWrappersWithRemoteTracer();
- EXPECT_CALL(remote_tracer, AdvanceTracing(_)).WillOnce(Return(true));
- EXPECT_TRUE(local_tracer.Trace(std::numeric_limits<double>::infinity()));
- EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
-}
-
-TEST(LocalEmbedderHeapTracer, TraceDoesNotFinish) {
- StrictMock<MockEmbedderHeapTracer> remote_tracer;
- LocalEmbedderHeapTracer local_tracer(nullptr);
- local_tracer.SetRemoteTracer(&remote_tracer);
- local_tracer.AddWrapperToTrace(CreateWrapperInfo());
- EXPECT_EQ(1u, local_tracer.NumberOfCachedWrappersToTrace());
- EXPECT_CALL(remote_tracer, RegisterV8References(_));
- local_tracer.RegisterWrappersWithRemoteTracer();
- EXPECT_CALL(remote_tracer, AdvanceTracing(_)).WillOnce(Return(false));
- EXPECT_FALSE(local_tracer.Trace(1.0));
- EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
-}
-
TEST_F(LocalEmbedderHeapTracerWithIsolate, SetRemoteTracerSetsIsolate) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(isolate());
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index ac18e1817b..b1e646fa2c 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -468,7 +468,7 @@ class GcHistogram {
static void CleanUp() { histograms_.clear(); }
- int Total() {
+ int Total() const {
int result = 0;
for (int i : samples_) {
result += i;
@@ -476,7 +476,7 @@ class GcHistogram {
return result;
}
- int Count() { return static_cast<int>(samples_.size()); }
+ int Count() const { return static_cast<int>(samples_.size()); }
private:
std::vector<int> samples_;
@@ -524,5 +524,27 @@ TEST_F(GCTracerTest, RecordScavengerHistograms) {
GcHistogram::CleanUp();
}
+TEST_F(GCTracerTest, RecordGCSumHistograms) {
+ if (FLAG_stress_incremental_marking) return;
+ isolate()->SetCreateHistogramFunction(&GcHistogram::CreateHistogram);
+ isolate()->SetAddHistogramSampleFunction(&GcHistogram::AddHistogramSample);
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_START]
+ .duration = 1;
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_SWEEPING]
+ .duration = 2;
+ tracer->AddIncrementalMarkingStep(3.0, 1024);
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .duration = 4;
+ const double atomic_pause_duration = 5.0;
+ tracer->RecordGCSumCounters(atomic_pause_duration);
+ EXPECT_EQ(15, GcHistogram::Get("V8.GCMarkCompactor")->Total());
+ GcHistogram::CleanUp();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index dd14e22d54..53954d8178 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -20,6 +20,7 @@ namespace v8 {
namespace internal {
typedef TestWithIsolate HeapTest;
+typedef TestWithIsolateAndPointerCompression HeapWithPointerCompressionTest;
TEST(Heap, SemiSpaceSize) {
const size_t KB = static_cast<size_t>(i::KB);
@@ -61,15 +62,46 @@ TEST_F(HeapTest, ASLR) {
TEST_F(HeapTest, ExternalLimitDefault) {
Heap* heap = i_isolate()->heap();
- EXPECT_EQ(kExternalAllocationSoftLimit, heap->external_memory_limit_);
+ EXPECT_EQ(kExternalAllocationSoftLimit,
+ heap->isolate()->isolate_data()->external_memory_limit_);
}
TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(+10 * MB);
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(-10 * MB);
Heap* heap = i_isolate()->heap();
- EXPECT_GE(heap->external_memory_limit_, kExternalAllocationSoftLimit);
+ EXPECT_GE(heap->isolate()->isolate_data()->external_memory_limit_,
+ kExternalAllocationSoftLimit);
}
+#if V8_TARGET_ARCH_64_BIT
+TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
+ // Produce some garbage.
+ RunJS(
+ "let ar = [];"
+ "for (let i = 0; i < 100; i++) {"
+ " ar.push(Array(i));"
+ "}"
+ "ar.push(Array(32 * 1024 * 1024));");
+
+ Address isolate_root = i_isolate()->isolate_root();
+ EXPECT_TRUE(IsAligned(isolate_root, size_t{4} * GB));
+
+ // Check that all memory chunks belong this region.
+ base::AddressRegion heap_reservation(isolate_root - size_t{2} * GB,
+ size_t{4} * GB);
+
+ OldGenerationMemoryChunkIterator iter(i_isolate()->heap());
+ for (;;) {
+ MemoryChunk* chunk = iter.next();
+ if (chunk == nullptr) break;
+
+ Address address = chunk->address();
+ size_t size = chunk->area_end() - address;
+ EXPECT_TRUE(heap_reservation.contains(address, size));
+ }
+}
+#endif // V8_TARGET_ARCH_64_BIT
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index 0553dc0ea5..be26d4eebd 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -14,7 +14,7 @@ namespace internal {
TEST(Marking, TransitionWhiteBlackWhite) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
const int kLocationsSize = 3;
int position[kLocationsSize] = {
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
@@ -34,7 +34,7 @@ TEST(Marking, TransitionWhiteBlackWhite) {
TEST(Marking, TransitionWhiteGreyBlack) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
const int kLocationsSize = 3;
int position[kLocationsSize] = {
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
@@ -60,7 +60,7 @@ TEST(Marking, TransitionWhiteGreyBlack) {
TEST(Marking, SetAndClearRange) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu << i);
@@ -74,7 +74,7 @@ TEST(Marking, SetAndClearRange) {
TEST(Marking, ClearMultipleRanges) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
- calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu);
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index aff6f02130..168bc9c7bc 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -8,6 +8,7 @@
#include "src/globals.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
+#include "src/objects/slots.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -16,13 +17,13 @@ namespace internal {
TEST(SlotSet, InsertAndLookup1) {
SlotSet set;
set.SetPageStart(0);
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
}
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
set.Insert(i);
}
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_TRUE(set.Lookup(i));
}
}
@@ -30,12 +31,12 @@ TEST(SlotSet, InsertAndLookup1) {
TEST(SlotSet, InsertAndLookup2) {
SlotSet set;
set.SetPageStart(0);
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
@@ -47,15 +48,15 @@ TEST(SlotSet, InsertAndLookup2) {
TEST(SlotSet, Iterate) {
SlotSet set;
set.SetPageStart(0);
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
set.Iterate(
- [](Address slot_address) {
- if (slot_address % 3 == 0) {
+ [](MaybeObjectSlot slot) {
+ if (slot.address() % 3 == 0) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
@@ -63,7 +64,7 @@ TEST(SlotSet, Iterate) {
},
SlotSet::KEEP_EMPTY_BUCKETS);
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
@@ -75,19 +76,19 @@ TEST(SlotSet, Iterate) {
TEST(SlotSet, Remove) {
SlotSet set;
set.SetPageStart(0);
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert(i);
}
}
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 3 != 0) {
set.Remove(i);
}
}
- for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
} else {
@@ -99,11 +100,11 @@ TEST(SlotSet, Remove) {
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set;
set.SetPageStart(0);
- uint32_t first = start == 0 ? 0 : start - kPointerSize;
- uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
+ uint32_t first = start == 0 ? 0 : start - kTaggedSize;
+ uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
- for (uint32_t i = first; i <= last; i += kPointerSize) {
+ for (uint32_t i = first; i <= last; i += kTaggedSize) {
set.Insert(i);
}
set.RemoveRange(start, end, mode);
@@ -113,7 +114,7 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
if (last == end) {
EXPECT_TRUE(set.Lookup(last));
}
- for (uint32_t i = start; i < end; i += kPointerSize) {
+ for (uint32_t i = start; i < end; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
}
}
@@ -121,16 +122,16 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
TEST(SlotSet, RemoveRange) {
CheckRemoveRangeOn(0, Page::kPageSize);
- CheckRemoveRangeOn(1 * kPointerSize, 1023 * kPointerSize);
+ CheckRemoveRangeOn(1 * kTaggedSize, 1023 * kTaggedSize);
for (uint32_t start = 0; start <= 32; start++) {
- CheckRemoveRangeOn(start * kPointerSize, (start + 1) * kPointerSize);
- CheckRemoveRangeOn(start * kPointerSize, (start + 2) * kPointerSize);
+ CheckRemoveRangeOn(start * kTaggedSize, (start + 1) * kTaggedSize);
+ CheckRemoveRangeOn(start * kTaggedSize, (start + 2) * kTaggedSize);
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
for (size_t i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
for (int k = -3; k <= 3; k++) {
uint32_t end = (kEnds[i] + k);
if (start < end) {
- CheckRemoveRangeOn(start * kPointerSize, end * kPointerSize);
+ CheckRemoveRangeOn(start * kTaggedSize, end * kTaggedSize);
}
}
}
@@ -141,7 +142,7 @@ TEST(SlotSet, RemoveRange) {
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode);
- for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
+ for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
}
}
@@ -153,23 +154,18 @@ TEST(TypedSlotSet, Iterate) {
// for a MSVC++ bug about lambda captures, see the discussion at
// https://social.msdn.microsoft.com/Forums/SqlServer/4abf18bd-4ae4-4c72-ba3e-3b13e7909d5f
static const int kDelta = 10000001;
- static const int kHostDelta = 50001;
int added = 0;
- uint32_t j = 0;
- for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset;
- i += kDelta, j += kHostDelta) {
+ for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
- set.Insert(type, j, i);
+ set.Insert(type, i);
++added;
}
int iterated = 0;
set.Iterate(
- [&iterated](SlotType type, Address host_addr, Address addr) {
+ [&iterated](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(addr);
- uint32_t j = static_cast<uint32_t>(host_addr);
EXPECT_EQ(i % CLEARED_SLOT, static_cast<uint32_t>(type));
EXPECT_EQ(0u, i % kDelta);
- EXPECT_EQ(0u, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
},
@@ -177,7 +173,7 @@ TEST(TypedSlotSet, Iterate) {
EXPECT_EQ(added, iterated);
iterated = 0;
set.Iterate(
- [&iterated](SlotType type, Address host_addr, Address addr) {
+ [&iterated](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(addr);
EXPECT_EQ(0u, i % 2);
++iterated;
@@ -187,13 +183,13 @@ TEST(TypedSlotSet, Iterate) {
EXPECT_EQ(added / 2, iterated);
}
-TEST(TypedSlotSet, RemoveInvalidSlots) {
+TEST(TypedSlotSet, ClearInvalidSlots) {
TypedSlotSet set(0);
const int kHostDelta = 100;
uint32_t entries = 10;
for (uint32_t i = 0; i < entries; i++) {
SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
- set.Insert(type, i * kHostDelta, i * kHostDelta);
+ set.Insert(type, i * kHostDelta);
}
std::map<uint32_t, uint32_t> invalid_ranges;
@@ -202,19 +198,48 @@ TEST(TypedSlotSet, RemoveInvalidSlots) {
std::pair<uint32_t, uint32_t>(i * kHostDelta, i * kHostDelta + 1));
}
- set.RemoveInvaldSlots(invalid_ranges);
+ set.ClearInvalidSlots(invalid_ranges);
for (std::map<uint32_t, uint32_t>::iterator it = invalid_ranges.begin();
it != invalid_ranges.end(); ++it) {
uint32_t start = it->first;
uint32_t end = it->second;
set.Iterate(
- [start, end](SlotType slot_type, Address host_addr, Address slot_addr) {
- CHECK(host_addr < start || host_addr >= end);
+ [=](SlotType slot_type, Address slot_addr) {
+ CHECK(slot_addr < start || slot_addr >= end);
return KEEP_SLOT;
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
}
}
+TEST(TypedSlotSet, Merge) {
+ TypedSlotSet set0(0), set1(0);
+ static const uint32_t kEntries = 10000;
+ for (uint32_t i = 0; i < kEntries; i++) {
+ set0.Insert(EMBEDDED_OBJECT_SLOT, 2 * i);
+ set1.Insert(EMBEDDED_OBJECT_SLOT, 2 * i + 1);
+ }
+ uint32_t count = 0;
+ set0.Merge(&set1);
+ set0.Iterate(
+ [&count](SlotType slot_type, Address slot_addr) {
+ if (count < kEntries) {
+ CHECK_EQ(slot_addr % 2, 0);
+ } else {
+ CHECK_EQ(slot_addr % 2, 1);
+ }
+ ++count;
+ return KEEP_SLOT;
+ },
+ TypedSlotSet::KEEP_EMPTY_CHUNKS);
+ CHECK_EQ(2 * kEntries, count);
+ set1.Iterate(
+ [](SlotType slot_type, Address slot_addr) {
+ CHECK(false); // Unreachable.
+ return KEEP_SLOT;
+ },
+ TypedSlotSet::KEEP_EMPTY_CHUNKS);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 5266e54e09..de4bd39e1e 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -36,7 +36,7 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
const int kExpectedPages =
(kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
for (int i = 0; i < kNumObjects; i++) {
- HeapObject* object =
+ HeapObject object =
compaction_space->AllocateRawUnaligned(kMaxRegularHeapObjectSize)
.ToObjectChecked();
heap->CreateFillerObjectAt(object->address(), kMaxRegularHeapObjectSize,
@@ -54,13 +54,13 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
constexpr Address address1 = Page::kPageSize;
- HeapObject* object1 = reinterpret_cast<HeapObject*>(address1);
+ HeapObject object1 = HeapObject::unchecked_cast(Object(address1));
MemoryChunk* chunk1 = MemoryChunk::FromHeapObject(object1);
heap_internals::MemoryChunk* slim_chunk1 =
heap_internals::MemoryChunk::FromHeapObject(object1);
EXPECT_EQ(static_cast<void*>(chunk1), static_cast<void*>(slim_chunk1));
constexpr Address address2 = 2 * Page::kPageSize - 1;
- HeapObject* object2 = reinterpret_cast<HeapObject*>(address2);
+ HeapObject object2 = HeapObject::unchecked_cast(Object(address2));
MemoryChunk* chunk2 = MemoryChunk::FromHeapObject(object2);
heap_internals::MemoryChunk* slim_chunk2 =
heap_internals::MemoryChunk::FromHeapObject(object2);
@@ -68,8 +68,9 @@ TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
}
TEST_F(SpacesTest, WriteBarrierIsMarking) {
- char memory[256];
- memset(&memory, 0, sizeof(memory));
+ const size_t kSizeOfMemoryChunk = sizeof(MemoryChunk);
+ char memory[kSizeOfMemoryChunk];
+ memset(&memory, 0, kSizeOfMemoryChunk);
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
heap_internals::MemoryChunk* slim_chunk =
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
@@ -84,8 +85,9 @@ TEST_F(SpacesTest, WriteBarrierIsMarking) {
}
TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) {
- char memory[256];
- memset(&memory, 0, sizeof(memory));
+ const size_t kSizeOfMemoryChunk = sizeof(MemoryChunk);
+ char memory[kSizeOfMemoryChunk];
+ memset(&memory, 0, kSizeOfMemoryChunk);
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
heap_internals::MemoryChunk* slim_chunk =
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
@@ -100,8 +102,9 @@ TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) {
}
TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
- char memory[256];
- memset(&memory, 0, sizeof(memory));
+ const size_t kSizeOfMemoryChunk = sizeof(MemoryChunk);
+ char memory[kSizeOfMemoryChunk];
+ memset(&memory, 0, kSizeOfMemoryChunk);
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
heap_internals::MemoryChunk* slim_chunk =
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index e28e50e3b7..5b783c42b5 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -2,26 +2,237 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifdef __linux__
-#include <sys/mman.h>
-#undef MAP_TYPE
-#endif // __linux__
+#include <map>
+#include "src/base/region-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/isolate.h"
+#include "src/ostreams.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
+// This is a v8::PageAllocator implementation that decorates provided page
+// allocator object with page tracking functionality.
+class TrackingPageAllocator : public ::v8::PageAllocator {
+ public:
+ explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
+ : page_allocator_(page_allocator),
+ allocate_page_size_(page_allocator_->AllocatePageSize()),
+ commit_page_size_(page_allocator_->CommitPageSize()),
+ region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
+ commit_page_size_) {
+ CHECK_NOT_NULL(page_allocator);
+ CHECK(IsAligned(allocate_page_size_, commit_page_size_));
+ }
+ ~TrackingPageAllocator() override = default;
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ return page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override {
+ void* result =
+ page_allocator_->AllocatePages(address, size, alignment, access);
+ if (result) {
+ // Mark pages as used.
+ Address current_page = reinterpret_cast<Address>(result);
+ CHECK(IsAligned(current_page, allocate_page_size_));
+ CHECK(IsAligned(size, allocate_page_size_));
+ CHECK(region_allocator_.AllocateRegionAt(current_page, size));
+ Address end = current_page + size;
+ while (current_page < end) {
+ page_permissions_.insert({current_page, access});
+ current_page += commit_page_size_;
+ }
+ }
+ return result;
+ }
+
+ bool FreePages(void* address, size_t size) override {
+ bool result = page_allocator_->FreePages(address, size);
+ if (result) {
+ // Mark pages as free.
+ Address start = reinterpret_cast<Address>(address);
+ CHECK(IsAligned(start, allocate_page_size_));
+ CHECK(IsAligned(size, allocate_page_size_));
+ size_t freed_size = region_allocator_.FreeRegion(start);
+ CHECK(IsAligned(freed_size, commit_page_size_));
+ CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
+ auto start_iter = page_permissions_.find(start);
+ CHECK_NE(start_iter, page_permissions_.end());
+ auto end_iter = page_permissions_.lower_bound(start + size);
+ page_permissions_.erase(start_iter, end_iter);
+ }
+ return result;
+ }
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override {
+ bool result = page_allocator_->ReleasePages(address, size, new_size);
+ if (result) {
+ Address start = reinterpret_cast<Address>(address);
+ CHECK(IsAligned(start, allocate_page_size_));
+ CHECK(IsAligned(size, commit_page_size_));
+ CHECK(IsAligned(new_size, commit_page_size_));
+ CHECK_LT(new_size, size);
+ CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
+ auto start_iter = page_permissions_.find(start + new_size);
+ CHECK_NE(start_iter, page_permissions_.end());
+ auto end_iter = page_permissions_.lower_bound(start + size);
+ page_permissions_.erase(start_iter, end_iter);
+ }
+ return result;
+ }
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override {
+ bool result = page_allocator_->SetPermissions(address, size, access);
+ if (result) {
+ UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
+ }
+ return result;
+ }
+
+ // Returns true if all the allocated pages were freed.
+ bool IsEmpty() { return page_permissions_.empty(); }
+
+ void CheckIsFree(Address address, size_t size) {
+ CHECK(IsAligned(address, allocate_page_size_));
+ CHECK(IsAligned(size, allocate_page_size_));
+ EXPECT_TRUE(region_allocator_.IsFree(address, size));
+ }
+
+ void CheckPagePermissions(Address address, size_t size,
+ PageAllocator::Permission access) {
+ ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
+ EXPECT_EQ(access, value->second);
+ });
+ }
+
+ void Print(const char* comment) const {
+ i::StdoutStream os;
+ os << "\n========================================="
+ << "\nTracingPageAllocator state: ";
+ if (comment) os << comment;
+ os << "\n-----------------------------------------\n";
+ region_allocator_.Print(os);
+ os << "-----------------------------------------"
+ << "\nPage permissions:";
+ if (page_permissions_.empty()) {
+ os << " empty\n";
+ return;
+ }
+ os << "\n" << std::hex << std::showbase;
+
+ Address contiguous_region_start = static_cast<Address>(-1);
+ Address contiguous_region_end = contiguous_region_start;
+ PageAllocator::Permission contiguous_region_access =
+ PageAllocator::kNoAccess;
+ for (auto& pair : page_permissions_) {
+ if (contiguous_region_end == pair.first &&
+ pair.second == contiguous_region_access) {
+ contiguous_region_end += commit_page_size_;
+ continue;
+ }
+ if (contiguous_region_start != contiguous_region_end) {
+ PrintRegion(os, contiguous_region_start, contiguous_region_end,
+ contiguous_region_access);
+ }
+ contiguous_region_start = pair.first;
+ contiguous_region_end = pair.first + commit_page_size_;
+ contiguous_region_access = pair.second;
+ }
+ if (contiguous_region_start != contiguous_region_end) {
+ PrintRegion(os, contiguous_region_start, contiguous_region_end,
+ contiguous_region_access);
+ }
+ }
+
+ private:
+ typedef std::map<Address, PageAllocator::Permission> PagePermissionsMap;
+ typedef std::function<void(PagePermissionsMap::value_type*)> ForEachFn;
+
+ static void PrintRegion(std::ostream& os, Address start, Address end,
+ PageAllocator::Permission access) {
+ os << " page: [" << start << ", " << end << "), access: ";
+ switch (access) {
+ case PageAllocator::kNoAccess:
+ os << "--";
+ break;
+ case PageAllocator::kRead:
+ os << "R";
+ break;
+ case PageAllocator::kReadWrite:
+ os << "RW";
+ break;
+ case PageAllocator::kReadWriteExecute:
+ os << "RWX";
+ break;
+ case PageAllocator::kReadExecute:
+ os << "RX";
+ break;
+ }
+ os << "\n";
+ }
+
+ void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
+ CHECK(IsAligned(address, commit_page_size_));
+ CHECK(IsAligned(size, commit_page_size_));
+ auto start_iter = page_permissions_.find(address);
+ // Start page must exist in page_permissions_.
+ CHECK_NE(start_iter, page_permissions_.end());
+ auto end_iter = page_permissions_.find(address + size - commit_page_size_);
+ // Ensure the last but one page exists in page_permissions_.
+ CHECK_NE(end_iter, page_permissions_.end());
+ // Now make it point to the next element in order to also process is by the
+ // following for loop.
+ ++end_iter;
+ for (auto iter = start_iter; iter != end_iter; ++iter) {
+ PagePermissionsMap::value_type& pair = *iter;
+ fn(&pair);
+ }
+ }
+
+ void UpdatePagePermissions(Address address, size_t size,
+ PageAllocator::Permission access) {
+ ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
+ value->second = access;
+ });
+ }
+
+ v8::PageAllocator* const page_allocator_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+ // Region allocator tracks page allocation/deallocation requests.
+ base::RegionAllocator region_allocator_;
+ // This map keeps track of allocated pages' permissions.
+ PagePermissionsMap page_permissions_;
+};
+
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
static void SetUpTestCase() {
+ CHECK_NULL(tracking_page_allocator_);
+ old_page_allocator_ = GetPlatformPageAllocator();
+ tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
+ CHECK(tracking_page_allocator_->IsEmpty());
+ CHECK_EQ(old_page_allocator_,
+ SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
TestWithIsolate::SetUpTestCase();
@@ -30,57 +241,91 @@ class SequentialUnmapperTest : public TestWithIsolate {
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
i::FLAG_concurrent_sweeping = old_flag_;
+ CHECK(tracking_page_allocator_->IsEmpty());
+ delete tracking_page_allocator_;
+ tracking_page_allocator_ = nullptr;
}
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
+ TrackingPageAllocator* tracking_page_allocator() {
+ return tracking_page_allocator_;
+ }
+
private:
+ static TrackingPageAllocator* tracking_page_allocator_;
+ static v8::PageAllocator* old_page_allocator_;
static bool old_flag_;
DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
};
+TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
+ nullptr;
+v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
-#ifdef __linux__
-
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
- Page* page =
- allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
- static_cast<PagedSpace*>(heap()->old_space()),
- Executability::NOT_EXECUTABLE);
+ Page* page = allocator()->AllocatePage(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(),
+ static_cast<PagedSpace*>(heap()->old_space()),
+ Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
- const int page_size = getpagesize();
- void* start_address = reinterpret_cast<void*>(page->address());
- EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
+ const size_t page_size = tracking_page_allocator()->AllocatePageSize();
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
- EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
- EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kNoAccess);
unmapper()->TearDown();
- EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
+ if (i_isolate()->isolate_allocation_mode() ==
+ IsolateAllocationMode::kInV8Heap) {
+ // In this mode Isolate uses bounded page allocator which allocates pages
+ // inside prereserved region. Thus these pages are kept reserved until
+ // the Isolate dies.
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kNoAccess);
+ } else {
+ CHECK_EQ(IsolateAllocationMode::kInCppHeap,
+ i_isolate()->isolate_allocation_mode());
+ tracking_page_allocator()->CheckIsFree(page->address(), page_size);
+ }
}
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
- Page* page =
- allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
- static_cast<PagedSpace*>(heap()->old_space()),
- Executability::NOT_EXECUTABLE);
+ Page* page = allocator()->AllocatePage(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(),
+ static_cast<PagedSpace*>(heap()->old_space()),
+ Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
- const int page_size = getpagesize();
- void* start_address = reinterpret_cast<void*>(page->address());
- EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
+ const size_t page_size = tracking_page_allocator()->AllocatePageSize();
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kReadWrite);
+
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
- EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kReadWrite);
unmapper()->TearDown();
- EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
+ if (i_isolate()->isolate_allocation_mode() ==
+ IsolateAllocationMode::kInV8Heap) {
+ // In this mode Isolate uses bounded page allocator which allocates pages
+ // inside prereserved region. Thus these pages are kept reserved until
+ // the Isolate dies.
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kNoAccess);
+ } else {
+ CHECK_EQ(IsolateAllocationMode::kInCppHeap,
+ i_isolate()->isolate_allocation_mode());
+ tracking_page_allocator()->CheckIsFree(page->address(), page_size);
+ }
}
-#endif // __linux__
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index a2c8d94793..ed53b8b0d2 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -13,6 +13,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -54,7 +55,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArguments(CreateArgumentsType::kRestParameter);
// Emit constant loads.
- builder.LoadLiteral(Smi::kZero)
+ builder.LoadLiteral(Smi::zero())
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
.CompareOperation(Token::Value::EQ, reg,
@@ -179,7 +180,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit literal creation operations.
builder.CreateRegExpLiteral(ast_factory.GetOneByteString("a"), 0, 0);
builder.CreateArrayLiteral(0, 0, 0);
- builder.CreateObjectLiteral(0, 0, 0, reg);
+ builder.CreateObjectLiteral(0, 0, 0);
// Emit tagged template operations.
builder.GetTemplateObject(0, 0);
@@ -380,7 +381,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArrayLiteral(0, 0, 0)
.CreateEmptyArrayLiteral(0)
.CreateArrayFromIterable()
- .CreateObjectLiteral(0, 0, 0, reg)
+ .CreateObjectLiteral(0, 0, 0)
.CreateEmptyObjectLiteral()
.CloneObject(reg, 0, 0);
@@ -488,12 +489,12 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
BytecodeArrayBuilder builder(zone(), 1, locals);
BytecodeRegisterAllocator* allocator(builder.register_allocator());
for (int i = 0; i < locals; i++) {
- builder.LoadLiteral(Smi::kZero);
+ builder.LoadLiteral(Smi::zero());
builder.StoreAccumulatorInRegister(Register(i));
}
for (int i = 0; i < temps; i++) {
Register temp = allocator->NewRegister();
- builder.LoadLiteral(Smi::kZero);
+ builder.LoadLiteral(Smi::zero());
builder.StoreAccumulatorInRegister(temp);
// Ensure temporaries are used so not optimized away by the
// register optimizer.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 69d0e96507..ec70605dde 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -29,9 +30,9 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 71c79300f3..2e2d92628f 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/objects-inl.h"
+#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -29,9 +30,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -83,9 +84,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -137,9 +138,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -195,9 +196,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -254,9 +255,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -439,9 +440,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
@@ -718,9 +719,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
isolate()->heap()->HashSeed());
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
- Smi* zero = Smi::kZero;
- Smi* smi_0 = Smi::FromInt(64);
- Smi* smi_1 = Smi::FromInt(-65536);
+ Smi zero = Smi::zero();
+ Smi smi_0 = Smi::FromInt(64);
+ Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index eb4fdbb745..9c010f25e3 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -58,11 +58,10 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
2,
3,
" Ldar a1"},
- {{B(Wide), B(CreateObjectLiteral), U16(513), U16(1027), U8(165),
- R16(137)},
- 9,
+ {{B(Wide), B(CreateObjectLiteral), U16(513), U16(1027), U8(165)},
+ 7,
0,
- "CreateObjectLiteral.Wide [513], [1027], #165, r137"},
+ "CreateObjectLiteral.Wide [513], [1027], #165"},
{{B(ExtraWide), B(JumpIfNull), U32(123456789)},
6,
0,
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index ce6ba81fc1..16b4e80489 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -88,7 +88,7 @@ TEST(OperandScaling, ScalableAndNonScalable) {
CHECK_EQ(Bytecodes::Size(Bytecode::kCallRuntime, operand_scale),
1 + 2 + 2 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCreateObjectLiteral, operand_scale),
- 1 + 2 * scale + 1 + 1 * scale);
+ 1 + 2 * scale + 1);
CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + scale);
}
}
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 46bbb900c0..55f1cacf56 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -138,7 +138,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
CHECK_EQ(operand_size, OperandSize::kByte);
}
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
- Smi* value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
+ Smi value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
size_t index = builder.CommitReservedEntry(OperandSize::kByte, value);
CHECK_EQ(index, k8BitCapacity - reserved + i);
}
@@ -155,13 +155,13 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
// Check all committed values match expected
for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
- Object* value = constant_array->get(static_cast<int>(i));
- Smi* smi = Smi::FromInt(static_cast<int>(i));
+ Object value = constant_array->get(static_cast<int>(i));
+ Smi smi = Smi::FromInt(static_cast<int>(i));
CHECK(value->SameValue(smi));
}
for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
- Object* value = constant_array->get(static_cast<int>(i));
- Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
+ Object value = constant_array->get(static_cast<int>(i));
+ Smi smi = Smi::FromInt(static_cast<int>(i - reserved));
CHECK(value->SameValue(smi));
}
}
@@ -207,7 +207,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
CHECK_EQ(constant_array->length(),
static_cast<int>(k8BitCapacity + reserved));
for (size_t i = 0; i < k8BitCapacity + reserved; i++) {
- Object* value = constant_array->get(static_cast<int>(i));
+ Object value = constant_array->get(static_cast<int>(i));
CHECK(value->SameValue(*isolate()->factory()->NewNumberFromSize(i)));
}
}
@@ -237,8 +237,8 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), static_cast<int>(2 * k8BitCapacity));
for (size_t i = 0; i < k8BitCapacity; i++) {
- Object* original = constant_array->get(static_cast<int>(k8BitCapacity + i));
- Object* duplicate = constant_array->get(static_cast<int>(i));
+ Object original = constant_array->get(static_cast<int>(k8BitCapacity + i));
+ Object duplicate = constant_array->get(static_cast<int>(i));
CHECK(original->SameValue(duplicate));
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
CHECK(original->SameValue(*reference));
diff --git a/deps/v8/test/unittests/microtask-queue-unittest.cc b/deps/v8/test/unittests/microtask-queue-unittest.cc
new file mode 100644
index 0000000000..cc2c7f0de7
--- /dev/null
+++ b/deps/v8/test/unittests/microtask-queue-unittest.cc
@@ -0,0 +1,187 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/microtask-queue.h"
+
+#include <algorithm>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "src/heap/factory.h"
+#include "src/objects/foreign.h"
+#include "src/visitors.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+using Closure = std::function<void()>;
+
+void RunStdFunction(void* data) {
+ std::unique_ptr<Closure> f(static_cast<Closure*>(data));
+ (*f)();
+}
+
+class MicrotaskQueueTest : public TestWithNativeContext {
+ public:
+ template <typename F>
+ Handle<Microtask> NewMicrotask(F&& f) {
+ Handle<Foreign> runner =
+ factory()->NewForeign(reinterpret_cast<Address>(&RunStdFunction));
+ Handle<Foreign> data = factory()->NewForeign(
+ reinterpret_cast<Address>(new Closure(std::forward<F>(f))));
+ return factory()->NewCallbackTask(runner, data);
+ }
+
+ void SetUp() override {
+ microtask_queue_ = MicrotaskQueue::New(isolate());
+ native_context()->set_microtask_queue(microtask_queue());
+ }
+
+ void TearDown() override {
+ if (microtask_queue()) {
+ microtask_queue()->RunMicrotasks(isolate());
+ context()->DetachGlobal();
+ }
+ }
+
+ MicrotaskQueue* microtask_queue() const { return microtask_queue_.get(); }
+
+ void ClearTestMicrotaskQueue() {
+ context()->DetachGlobal();
+ microtask_queue_ = nullptr;
+ }
+
+ private:
+ std::unique_ptr<MicrotaskQueue> microtask_queue_;
+};
+
+class RecordingVisitor : public RootVisitor {
+ public:
+ RecordingVisitor() = default;
+ ~RecordingVisitor() override = default;
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override {
+ for (FullObjectSlot current = start; current != end; ++current) {
+ visited_.push_back(*current);
+ }
+ }
+
+ const std::vector<Object>& visited() const { return visited_; }
+
+ private:
+ std::vector<Object> visited_;
+};
+
+// Sanity check. Ensure a microtask is stored in a queue and run.
+TEST_F(MicrotaskQueueTest, EnqueueAndRun) {
+ bool ran = false;
+ EXPECT_EQ(0, microtask_queue()->capacity());
+ EXPECT_EQ(0, microtask_queue()->size());
+ microtask_queue()->EnqueueMicrotask(*NewMicrotask([&ran] {
+ EXPECT_FALSE(ran);
+ ran = true;
+ }));
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity, microtask_queue()->capacity());
+ EXPECT_EQ(1, microtask_queue()->size());
+ microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_TRUE(ran);
+ EXPECT_EQ(0, microtask_queue()->size());
+}
+
+// Check for a buffer growth.
+TEST_F(MicrotaskQueueTest, BufferGrowth) {
+ int count = 0;
+
+ // Enqueue and flush the queue first to have non-zero |start_|.
+ microtask_queue()->EnqueueMicrotask(
+ *NewMicrotask([&count] { EXPECT_EQ(0, count++); }));
+ microtask_queue()->RunMicrotasks(isolate());
+
+ EXPECT_LT(0, microtask_queue()->capacity());
+ EXPECT_EQ(0, microtask_queue()->size());
+ EXPECT_EQ(1, microtask_queue()->start());
+
+ // Fill the queue with Microtasks.
+ for (int i = 1; i <= MicrotaskQueue::kMinimumCapacity; ++i) {
+ microtask_queue()->EnqueueMicrotask(
+ *NewMicrotask([&count, i] { EXPECT_EQ(i, count++); }));
+ }
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity, microtask_queue()->capacity());
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity, microtask_queue()->size());
+
+ // Add another to grow the ring buffer.
+ microtask_queue()->EnqueueMicrotask(*NewMicrotask(
+ [&] { EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 1, count++); }));
+
+ EXPECT_LT(MicrotaskQueue::kMinimumCapacity, microtask_queue()->capacity());
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 1, microtask_queue()->size());
+
+ // Run all pending Microtasks to ensure they run in the proper order.
+ microtask_queue()->RunMicrotasks(isolate());
+ EXPECT_EQ(MicrotaskQueue::kMinimumCapacity + 2, count);
+}
+
+// MicrotaskQueue instances form a doubly linked list.
+TEST_F(MicrotaskQueueTest, InstanceChain) {
+ ClearTestMicrotaskQueue();
+
+ MicrotaskQueue* default_mtq = isolate()->default_microtask_queue();
+ ASSERT_TRUE(default_mtq);
+ EXPECT_EQ(default_mtq, default_mtq->next());
+ EXPECT_EQ(default_mtq, default_mtq->prev());
+
+ // Create two instances, and check their connection.
+ // The list contains all instances in the creation order, and the next of the
+ // last instance is the first instance:
+ // default_mtq -> mtq1 -> mtq2 -> default_mtq.
+ std::unique_ptr<MicrotaskQueue> mtq1 = MicrotaskQueue::New(isolate());
+ std::unique_ptr<MicrotaskQueue> mtq2 = MicrotaskQueue::New(isolate());
+ EXPECT_EQ(default_mtq->next(), mtq1.get());
+ EXPECT_EQ(mtq1->next(), mtq2.get());
+ EXPECT_EQ(mtq2->next(), default_mtq);
+ EXPECT_EQ(default_mtq, mtq1->prev());
+ EXPECT_EQ(mtq1.get(), mtq2->prev());
+ EXPECT_EQ(mtq2.get(), default_mtq->prev());
+
+ // Deleted item should be also removed from the list.
+ mtq1 = nullptr;
+ EXPECT_EQ(default_mtq->next(), mtq2.get());
+ EXPECT_EQ(mtq2->next(), default_mtq);
+ EXPECT_EQ(default_mtq, mtq2->prev());
+ EXPECT_EQ(mtq2.get(), default_mtq->prev());
+}
+
+// Pending Microtasks in MicrotaskQueues are strong roots. Ensure they are
+// visited exactly once.
+TEST_F(MicrotaskQueueTest, VisitRoot) {
+ // Ensure that the ring buffer has separate in-use region.
+ for (int i = 0; i < MicrotaskQueue::kMinimumCapacity / 2 + 1; ++i) {
+ microtask_queue()->EnqueueMicrotask(*NewMicrotask([] {}));
+ }
+ microtask_queue()->RunMicrotasks(isolate());
+
+ std::vector<Object> expected;
+ for (int i = 0; i < MicrotaskQueue::kMinimumCapacity / 2 + 1; ++i) {
+ Handle<Microtask> microtask = NewMicrotask([] {});
+ expected.push_back(*microtask);
+ microtask_queue()->EnqueueMicrotask(*microtask);
+ }
+ EXPECT_GT(microtask_queue()->start() + microtask_queue()->size(),
+ microtask_queue()->capacity());
+
+ RecordingVisitor visitor;
+ microtask_queue()->IterateMicrotasks(&visitor);
+
+ std::vector<Object> actual = visitor.visited();
+ std::sort(expected.begin(), expected.end());
+ std::sort(actual.begin(), actual.end());
+ EXPECT_EQ(expected, actual);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/objects/microtask-queue-unittest.cc b/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
deleted file mode 100644
index 2b237ebc50..0000000000
--- a/deps/v8/test/unittests/objects/microtask-queue-unittest.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects/microtask-queue-inl.h"
-
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-
-void NoopCallback(void*) {}
-
-class MicrotaskQueueTest : public TestWithIsolate {
- public:
- Handle<Microtask> NewMicrotask() {
- MicrotaskCallback callback = &NoopCallback;
- void* data = nullptr;
- return factory()->NewCallbackTask(
- factory()->NewForeign(reinterpret_cast<Address>(callback)),
- factory()->NewForeign(reinterpret_cast<Address>(data)));
- }
-};
-
-TEST_F(MicrotaskQueueTest, EnqueueMicrotask) {
- Handle<MicrotaskQueue> microtask_queue = factory()->NewMicrotaskQueue();
- Handle<Microtask> microtask = NewMicrotask();
-
- EXPECT_EQ(0, microtask_queue->pending_microtask_count());
- MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
- EXPECT_EQ(1, microtask_queue->pending_microtask_count());
- ASSERT_LE(1, microtask_queue->queue()->length());
- EXPECT_EQ(*microtask, microtask_queue->queue()->get(0));
-
- std::vector<Handle<Microtask>> microtasks;
- microtasks.push_back(microtask);
-
- // Queue microtasks until the reallocation happens.
- int queue_capacity = microtask_queue->queue()->length();
- for (int i = 0; i < queue_capacity; ++i) {
- microtask = NewMicrotask();
- MicrotaskQueue::EnqueueMicrotask(isolate(), microtask_queue, microtask);
- microtasks.push_back(microtask);
- }
-
- int num_tasks = static_cast<int>(microtasks.size());
- EXPECT_EQ(num_tasks, microtask_queue->pending_microtask_count());
- ASSERT_LE(num_tasks, microtask_queue->queue()->length());
- for (int i = 0; i < num_tasks; ++i) {
- EXPECT_EQ(*microtasks[i], microtask_queue->queue()->get(i));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
index f0da8a5b93..15873dd69c 100644
--- a/deps/v8/test/unittests/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -26,10 +26,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
- RegisterConfiguration test(
- kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, general_codes, double_codes,
- RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr, nullptr);
+ RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
+ kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::OVERLAP);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
@@ -62,10 +62,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
int general_codes[] = {1, 2};
int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
- RegisterConfiguration test(
- kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, general_codes, double_codes,
- RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr, nullptr);
+ RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
+ kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::COMBINE);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
diff --git a/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc b/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc
new file mode 100644
index 0000000000..3a04fc46e6
--- /dev/null
+++ b/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+
+using NewFixedDoubleArrayTest = TestWithIsolateAndZone;
+
+TEST_F(NewFixedDoubleArrayTest, ThrowOnNegativeLength) {
+ ASSERT_DEATH_IF_SUPPORTED({ factory()->NewFixedDoubleArray(-1); },
+ "Fatal javascript OOM in invalid array length");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 1ff25337e4..53b07da23b 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -44,7 +44,7 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
int function_literal_id = 1;
// Ensure that the function can be compiled lazily.
shared->set_uncompiled_data(
- *isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
+ *isolate->factory()->NewUncompiledDataWithoutPreparseData(
ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length(),
function_literal_id));
// Make sure we have an outer scope info, even though it's empty
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index fadc0c3e2b..8a4d9f02ce 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -8,7 +8,6 @@
#include <memory>
#include "include/v8.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/parsing/parse-info.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 32f405764d..8a31b9ce02 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -15,109 +15,34 @@
namespace v8 {
-// static
-v8::ArrayBuffer::Allocator* TestWithIsolate::array_buffer_allocator_ = nullptr;
-
-// static
-Isolate* TestWithIsolate::isolate_ = nullptr;
-
-TestWithIsolate::TestWithIsolate()
- : isolate_scope_(isolate()), handle_scope_(isolate()) {}
-
-TestWithIsolate::~TestWithIsolate() = default;
-
-// static
-void TestWithIsolate::SetUpTestCase() {
- Test::SetUpTestCase();
- EXPECT_EQ(nullptr, isolate_);
+IsolateWrapper::IsolateWrapper(bool enforce_pointer_compression)
+ : array_buffer_allocator_(
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
v8::Isolate::CreateParams create_params;
- array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
- isolate_ = v8::Isolate::New(create_params);
- EXPECT_TRUE(isolate_ != nullptr);
+ if (enforce_pointer_compression) {
+ isolate_ = reinterpret_cast<v8::Isolate*>(
+ i::Isolate::New(i::IsolateAllocationMode::kInV8Heap));
+ v8::Isolate::Initialize(isolate_, create_params);
+ } else {
+ isolate_ = v8::Isolate::New(create_params);
+ }
+ CHECK_NOT_NULL(isolate_);
}
-
-// static
-void TestWithIsolate::TearDownTestCase() {
- ASSERT_TRUE(isolate_ != nullptr);
+IsolateWrapper::~IsolateWrapper() {
v8::Platform* platform = internal::V8::GetCurrentPlatform();
- ASSERT_TRUE(platform != nullptr);
+ CHECK_NOT_NULL(platform);
while (platform::PumpMessageLoop(platform, isolate_)) continue;
isolate_->Dispose();
- isolate_ = nullptr;
delete array_buffer_allocator_;
- Test::TearDownTestCase();
-}
-
-Local<Value> TestWithIsolate::RunJS(const char* source) {
- Local<Script> script =
- v8::Script::Compile(
- isolate()->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate(), source, v8::NewStringType::kNormal)
- .ToLocalChecked())
- .ToLocalChecked();
- return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
-}
-
-Local<Value> TestWithIsolate::RunJS(
- String::ExternalOneByteStringResource* source) {
- Local<Script> script =
- v8::Script::Compile(
- isolate()->GetCurrentContext(),
- v8::String::NewExternalOneByte(isolate(), source).ToLocalChecked())
- .ToLocalChecked();
- return script->Run(isolate()->GetCurrentContext()).ToLocalChecked();
}
-TestWithContext::TestWithContext()
- : context_(Context::New(isolate())), context_scope_(context_) {}
-
-TestWithContext::~TestWithContext() = default;
-
-v8::Local<v8::String> TestWithContext::NewString(const char* string) {
- return v8::String::NewFromUtf8(v8_isolate(), string,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
-}
-
-void TestWithContext::SetGlobalProperty(const char* name,
- v8::Local<v8::Value> value) {
- CHECK(v8_context()
- ->Global()
- ->Set(v8_context(), NewString(name), value)
- .FromJust());
-}
+// static
+v8::IsolateWrapper* SharedIsolateHolder::isolate_wrapper_ = nullptr;
namespace internal {
-TestWithIsolate::~TestWithIsolate() = default;
-
-TestWithIsolateAndZone::~TestWithIsolateAndZone() = default;
-
-Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
-
-Handle<Object> TestWithIsolate::RunJSInternal(const char* source) {
- return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
-}
-
-Handle<Object> TestWithIsolate::RunJSInternal(
- ::v8::String::ExternalOneByteStringResource* source) {
- return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
-}
-
-base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
- return isolate()->random_number_generator();
-}
-
-TestWithZone::~TestWithZone() = default;
-
-TestWithNativeContext::~TestWithNativeContext() = default;
-
-Handle<Context> TestWithNativeContext::native_context() const {
- return isolate()->native_context();
-}
-
SaveFlags::SaveFlags() { non_default_flags_ = FlagList::argv(); }
SaveFlags::~SaveFlags() {
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 289ef5edf2..57db1a39e2 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,6 +8,7 @@
#include <vector>
#include "include/v8.h"
+#include "src/api-inl.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
#include "src/handles.h"
@@ -21,126 +22,286 @@ namespace v8 {
class ArrayBufferAllocator;
-// Use v8::internal::TestWithIsolate if you are testing internals,
-// aka. directly work with Handles.
-class TestWithIsolate : public virtual ::testing::Test {
+// RAII-like Isolate instance wrapper.
+class IsolateWrapper final {
public:
- TestWithIsolate();
- ~TestWithIsolate() override;
+ // When enforce_pointer_compression is true the Isolate is created with
+ // enabled pointer compression. When it's false then the Isolate is created
+ // with the default pointer compression state for current build.
+ explicit IsolateWrapper(bool enforce_pointer_compression = false);
+ ~IsolateWrapper();
- v8::Isolate* isolate() const { return v8_isolate(); }
+ v8::Isolate* isolate() const { return isolate_; }
+
+ private:
+ v8::ArrayBuffer::Allocator* array_buffer_allocator_;
+ v8::Isolate* isolate_;
- v8::Isolate* v8_isolate() const { return isolate_; }
+ DISALLOW_COPY_AND_ASSIGN(IsolateWrapper);
+};
- v8::internal::Isolate* i_isolate() const {
- return reinterpret_cast<v8::internal::Isolate*>(isolate());
+class SharedIsolateHolder final {
+ public:
+ static v8::Isolate* isolate() { return isolate_wrapper_->isolate(); }
+
+ static void CreateIsolate() {
+ CHECK_NULL(isolate_wrapper_);
+ isolate_wrapper_ = new IsolateWrapper();
+ }
+
+ static void DeleteIsolate() {
+ CHECK_NOT_NULL(isolate_wrapper_);
+ delete isolate_wrapper_;
+ isolate_wrapper_ = nullptr;
+ }
+
+ private:
+ static v8::IsolateWrapper* isolate_wrapper_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SharedIsolateHolder);
+};
+
+//
+// A set of mixins from which the test fixtures will be constructed.
+//
+template <typename TMixin>
+class WithPrivateIsolateMixin : public TMixin {
+ public:
+ explicit WithPrivateIsolateMixin(bool enforce_pointer_compression = false)
+ : isolate_wrapper_(enforce_pointer_compression) {}
+
+ v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
+
+ static void SetUpTestCase() { TMixin::SetUpTestCase(); }
+ static void TearDownTestCase() { TMixin::TearDownTestCase(); }
+
+ private:
+ v8::IsolateWrapper isolate_wrapper_;
+
+ DISALLOW_COPY_AND_ASSIGN(WithPrivateIsolateMixin);
+};
+
+template <typename TMixin>
+class WithSharedIsolateMixin : public TMixin {
+ public:
+ WithSharedIsolateMixin() = default;
+
+ v8::Isolate* v8_isolate() const { return SharedIsolateHolder::isolate(); }
+
+ static void SetUpTestCase() {
+ TMixin::SetUpTestCase();
+ SharedIsolateHolder::CreateIsolate();
}
- Local<Value> RunJS(const char* source);
- Local<Value> RunJS(String::ExternalOneByteStringResource* source);
+ static void TearDownTestCase() {
+ SharedIsolateHolder::DeleteIsolate();
+ TMixin::TearDownTestCase();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WithSharedIsolateMixin);
+};
+
+template <typename TMixin>
+class WithPointerCompressionIsolateMixin
+ : public WithPrivateIsolateMixin<TMixin> {
+ public:
+ WithPointerCompressionIsolateMixin()
+ : WithPrivateIsolateMixin<TMixin>(true) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WithPointerCompressionIsolateMixin);
+};
+
+template <typename TMixin>
+class WithIsolateScopeMixin : public TMixin {
+ public:
+ WithIsolateScopeMixin()
+ : isolate_scope_(v8_isolate()), handle_scope_(v8_isolate()) {}
- static void SetUpTestCase();
- static void TearDownTestCase();
+ v8::Isolate* isolate() const { return v8_isolate(); }
+ v8::Isolate* v8_isolate() const { return TMixin::v8_isolate(); }
+
+ v8::internal::Isolate* i_isolate() const {
+ return reinterpret_cast<v8::internal::Isolate*>(v8_isolate());
+ }
+
+ static void SetUpTestCase() { TMixin::SetUpTestCase(); }
+ static void TearDownTestCase() { TMixin::TearDownTestCase(); }
private:
- static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
- static v8::Isolate* isolate_;
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
- DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+ DISALLOW_COPY_AND_ASSIGN(WithIsolateScopeMixin);
};
-// Use v8::internal::TestWithNativeContext if you are testing internals,
-// aka. directly work with Handles.
-class TestWithContext : public virtual v8::TestWithIsolate {
+template <typename TMixin>
+class WithContextMixin : public TMixin {
public:
- TestWithContext();
- ~TestWithContext() override;
+ WithContextMixin()
+ : context_(Context::New(v8_isolate())), context_scope_(context_) {}
+
+ v8::Isolate* v8_isolate() const { return TMixin::v8_isolate(); }
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
- v8::Local<v8::String> NewString(const char* string);
- void SetGlobalProperty(const char* name, v8::Local<v8::Value> value);
+ Local<Value> RunJS(const char* source) {
+ return RunJS(v8::String::NewFromUtf8(v8_isolate(), source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked());
+ }
+
+ Local<Value> RunJS(v8::String::ExternalOneByteStringResource* source) {
+ return RunJS(
+ v8::String::NewExternalOneByte(v8_isolate(), source).ToLocalChecked());
+ }
+
+ v8::Local<v8::String> NewString(const char* string) {
+ return v8::String::NewFromUtf8(v8_isolate(), string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ }
+
+ void SetGlobalProperty(const char* name, v8::Local<v8::Value> value) {
+ CHECK(v8_context()
+ ->Global()
+ ->Set(v8_context(), NewString(name), value)
+ .FromJust());
+ }
+
+ static void SetUpTestCase() { TMixin::SetUpTestCase(); }
+ static void TearDownTestCase() { TMixin::TearDownTestCase(); }
private:
- Local<Context> context_;
+ Local<Value> RunJS(Local<String> source) {
+ auto context = v8_isolate()->GetCurrentContext();
+ Local<Script> script =
+ v8::Script::Compile(context, source).ToLocalChecked();
+ return script->Run(context).ToLocalChecked();
+ }
+
+ v8::Local<v8::Context> context_;
v8::Context::Scope context_scope_;
- DISALLOW_COPY_AND_ASSIGN(TestWithContext);
+ DISALLOW_COPY_AND_ASSIGN(WithContextMixin);
};
+// Use v8::internal::TestWithIsolate if you are testing internals,
+// aka. directly work with Handles.
+using TestWithIsolate = //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>;
+
+// Use v8::internal::TestWithNativeContext if you are testing internals,
+// aka. directly work with Handles.
+using TestWithContext = //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>>;
+
+using TestWithIsolateAndPointerCompression = //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithPointerCompressionIsolateMixin< //
+ ::testing::Test>>>;
+
namespace internal {
// Forward declarations.
class Factory;
-
-class TestWithIsolate : public virtual ::v8::TestWithIsolate {
+template <typename TMixin>
+class WithInternalIsolateMixin : public TMixin {
public:
- TestWithIsolate() = default;
- ~TestWithIsolate() override;
+ WithInternalIsolateMixin() = default;
+
+ Factory* factory() const { return isolate()->factory(); }
+ Isolate* isolate() const { return TMixin::i_isolate(); }
+
+ Handle<NativeContext> native_context() const {
+ return isolate()->native_context();
+ }
- Factory* factory() const;
- Isolate* isolate() const { return i_isolate(); }
template <typename T = Object>
Handle<T> RunJS(const char* source) {
return Handle<T>::cast(RunJSInternal(source));
}
- Handle<Object> RunJSInternal(const char* source);
+
+ Handle<Object> RunJSInternal(const char* source) {
+ return Utils::OpenHandle(*TMixin::RunJS(source));
+ }
+
template <typename T = Object>
Handle<T> RunJS(::v8::String::ExternalOneByteStringResource* source) {
return Handle<T>::cast(RunJSInternal(source));
}
- Handle<Object> RunJSInternal(
- ::v8::String::ExternalOneByteStringResource* source);
- base::RandomNumberGenerator* random_number_generator() const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
-};
+ Handle<Object> RunJSInternal(
+ ::v8::String::ExternalOneByteStringResource* source) {
+ return Utils::OpenHandle(*TMixin::RunJS(source));
+ }
-class TestWithZone : public virtual ::testing::Test {
- public:
- TestWithZone() : zone_(&allocator_, ZONE_NAME) {}
- ~TestWithZone() override;
+ base::RandomNumberGenerator* random_number_generator() const {
+ return isolate()->random_number_generator();
+ }
- Zone* zone() { return &zone_; }
+ static void SetUpTestCase() { TMixin::SetUpTestCase(); }
+ static void TearDownTestCase() { TMixin::TearDownTestCase(); }
private:
- v8::internal::AccountingAllocator allocator_;
- Zone zone_;
-
- DISALLOW_COPY_AND_ASSIGN(TestWithZone);
+ DISALLOW_COPY_AND_ASSIGN(WithInternalIsolateMixin);
};
-class TestWithIsolateAndZone : public virtual TestWithIsolate {
+template <typename TMixin>
+class WithZoneMixin : public TMixin {
public:
- TestWithIsolateAndZone() : zone_(&allocator_, ZONE_NAME) {}
- ~TestWithIsolateAndZone() override;
+ WithZoneMixin() : zone_(&allocator_, ZONE_NAME) {}
Zone* zone() { return &zone_; }
+ static void SetUpTestCase() { TMixin::SetUpTestCase(); }
+ static void TearDownTestCase() { TMixin::TearDownTestCase(); }
+
private:
v8::internal::AccountingAllocator allocator_;
Zone zone_;
- DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
+ DISALLOW_COPY_AND_ASSIGN(WithZoneMixin);
};
-class TestWithNativeContext : public virtual ::v8::TestWithContext,
- public virtual TestWithIsolate {
- public:
- TestWithNativeContext() = default;
- ~TestWithNativeContext() override;
-
- Handle<Context> native_context() const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TestWithNativeContext);
-};
+using TestWithIsolate = //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>>;
+
+using TestWithZone = WithZoneMixin<::testing::Test>;
+
+using TestWithIsolateAndZone = //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ WithZoneMixin< //
+ ::testing::Test>>>>;
+
+using TestWithNativeContext = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>>>;
+
+using TestWithNativeContextAndZone = //
+ WithZoneMixin< //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
+ ::testing::Test>>>>>;
class SaveFlags {
public:
@@ -153,6 +314,14 @@ class SaveFlags {
DISALLOW_COPY_AND_ASSIGN(SaveFlags);
};
+// For GTest.
+inline void PrintTo(Object o, ::std::ostream* os) {
+ *os << reinterpret_cast<void*>(o.ptr());
+}
+inline void PrintTo(Smi o, ::std::ostream* os) {
+ *os << reinterpret_cast<void*>(o.ptr());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
new file mode 100644
index 0000000000..eca4e6fda2
--- /dev/null
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+TEST(Torque, StackDeleteRange) {
+ Stack<int> stack = {1, 2, 3, 4, 5, 6, 7};
+ stack.DeleteRange(StackRange{BottomOffset{2}, BottomOffset{4}});
+ Stack<int> result = {1, 2, 5, 6, 7};
+ ASSERT_TRUE(stack == result);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
index 068052a3fc..1bede08343 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -29,8 +29,7 @@ template <size_t kBufferSize>
void DecodeUtf16(unibrow::Utf8Decoder<kBufferSize>* decoder,
const std::vector<byte>& bytes,
std::vector<unibrow::uchar>* output) {
- const char* bytes_begin = reinterpret_cast<const char*>(&(*bytes.begin()));
- auto vector = Vector<const char>(bytes_begin, bytes.size());
+ auto vector = Vector<const char>::cast(VectorOf(bytes));
decoder->Reset(vector);
std::vector<uint16_t> utf16(decoder->Utf16Length());
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index f0eef446d1..7582deaedd 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -20,4 +20,11 @@
# Uses too much memory.
'Parameterized/WasmCodeManagerTest.GrowingVsFixedModule/Fixed': [SKIP]
}], # '(arch == arm or arch == mips) and not simulator_run'
+
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ 'ValueSerializerTestWithWasm.*': [SKIP],
+}], # lite_mode
+
]
diff --git a/deps/v8/test/unittests/utils-unittest.cc b/deps/v8/test/unittests/utils-unittest.cc
index 0a37e84ab5..c8032d187d 100644
--- a/deps/v8/test/unittests/utils-unittest.cc
+++ b/deps/v8/test/unittests/utils-unittest.cc
@@ -132,5 +132,73 @@ TYPED_TEST(UtilsTest, PassesFilterTest) {
EXPECT_FALSE(PassesFilter(CStrVector(""), CStrVector("a")));
}
+TEST(UtilsTest, IsInBounds) {
+// for column consistency and terseness
+#define INB(x, y, z) EXPECT_TRUE(IsInBounds(x, y, z))
+#define OOB(x, y, z) EXPECT_FALSE(IsInBounds(x, y, z))
+ INB(0, 0, 1);
+ INB(0, 1, 1);
+ INB(1, 0, 1);
+
+ OOB(0, 2, 1);
+ OOB(2, 0, 1);
+
+ INB(0, 0, 2);
+ INB(0, 1, 2);
+ INB(0, 2, 2);
+
+ INB(0, 0, 2);
+ INB(1, 0, 2);
+ INB(2, 0, 2);
+
+ OOB(0, 3, 2);
+ OOB(3, 0, 2);
+
+ INB(0, 1, 2);
+ INB(1, 1, 2);
+
+ OOB(1, 2, 2);
+ OOB(2, 1, 2);
+
+ const size_t max = std::numeric_limits<size_t>::max();
+ const size_t half = max / 2;
+
+ // limit cases.
+ INB(0, 0, max);
+ INB(0, 1, max);
+ INB(1, 0, max);
+ INB(max, 0, max);
+ INB(0, max, max);
+ INB(max - 1, 0, max);
+ INB(0, max - 1, max);
+ INB(max - 1, 1, max);
+ INB(1, max - 1, max);
+
+ INB(half, half, max);
+ INB(half + 1, half, max);
+ INB(half, half + 1, max);
+
+ OOB(max, 0, 0);
+ OOB(0, max, 0);
+ OOB(max, 0, 1);
+ OOB(0, max, 1);
+ OOB(max, 0, 2);
+ OOB(0, max, 2);
+
+ OOB(max, 0, max - 1);
+ OOB(0, max, max - 1);
+
+ // wraparound cases.
+ OOB(max, 1, max);
+ OOB(1, max, max);
+ OOB(max - 1, 2, max);
+ OOB(2, max - 1, max);
+ OOB(half + 1, half + 1, max);
+ OOB(half + 1, half + 1, max);
+
+#undef INB
+#undef OOB
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 2cc0bdc8a6..3e6cac2175 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -119,7 +119,10 @@ class ValueSerializerTest : public TestWithIsolate {
}
std::pair<uint8_t*, size_t> buffer = serializer.Release();
std::vector<uint8_t> result(buffer.first, buffer.first + buffer.second);
- free(buffer.first);
+ if (auto* delegate = GetSerializerDelegate())
+ delegate->FreeBufferMemory(buffer.first);
+ else
+ free(buffer.first);
return Just(std::move(result));
}
@@ -138,6 +141,10 @@ class ValueSerializerTest : public TestWithIsolate {
return buffer;
}
+ std::vector<uint8_t> EncodeTest(const char* source) {
+ return EncodeTest(EvaluateScriptForInput(source));
+ }
+
v8::Local<v8::Message> InvalidEncodeTest(Local<Value> input_value) {
Context::Scope scope(serialization_context());
TryCatch try_catch(isolate());
@@ -318,6 +325,89 @@ TEST_F(ValueSerializerTest, DecodeOddball) {
EXPECT_TRUE(value->IsNull());
}
+TEST_F(ValueSerializerTest, EncodeArrayStackOverflow) {
+ InvalidEncodeTest("var a = []; for (var i = 0; i < 1E5; i++) a = [a]; a");
+}
+
+TEST_F(ValueSerializerTest, EncodeObjectStackOverflow) {
+ InvalidEncodeTest("var a = {}; for (var i = 0; i < 1E5; i++) a = {a}; a");
+}
+
+TEST_F(ValueSerializerTest, DecodeArrayStackOverflow) {
+ static const int nesting_level = 1E5;
+ std::vector<uint8_t> payload;
+ // Header.
+ payload.push_back(0xFF);
+ payload.push_back(0x0D);
+
+ // Nested arrays, each with one element.
+ for (int i = 0; i < nesting_level; i++) {
+ payload.push_back(0x41);
+ payload.push_back(0x01);
+ }
+
+ // Innermost array is empty.
+ payload.push_back(0x41);
+ payload.push_back(0x00);
+ payload.push_back(0x24);
+ payload.push_back(0x00);
+ payload.push_back(0x00);
+
+ // Close nesting.
+ for (int i = 0; i < nesting_level; i++) {
+ payload.push_back(0x24);
+ payload.push_back(0x00);
+ payload.push_back(0x01);
+ }
+
+ InvalidDecodeTest(payload);
+}
+
+TEST_F(ValueSerializerTest, DecodeObjectStackOverflow) {
+ static const int nesting_level = 1E5;
+ std::vector<uint8_t> payload;
+ // Header.
+ payload.push_back(0xFF);
+ payload.push_back(0x0D);
+
+ // Nested objects, each with one property 'a'.
+ for (int i = 0; i < nesting_level; i++) {
+ payload.push_back(0x6F);
+ payload.push_back(0x22);
+ payload.push_back(0x01);
+ payload.push_back(0x61);
+ }
+
+ // Innermost array is empty.
+ payload.push_back(0x6F);
+ payload.push_back(0x7B);
+ payload.push_back(0x00);
+
+ // Close nesting.
+ for (int i = 0; i < nesting_level; i++) {
+ payload.push_back(0x7B);
+ payload.push_back(0x01);
+ }
+
+ InvalidDecodeTest(payload);
+}
+
+TEST_F(ValueSerializerTest, DecodeVerifyObjectCount) {
+ static const int nesting_level = 1E5;
+ std::vector<uint8_t> payload;
+ // Header.
+ payload.push_back(0xFF);
+ payload.push_back(0x0D);
+
+ // Repeat SerializationTag:kVerifyObjectCount. This leads to stack overflow.
+ for (int i = 0; i < nesting_level; i++) {
+ payload.push_back(0x3F);
+ payload.push_back(0x01);
+ }
+
+ InvalidDecodeTest(payload);
+}
+
TEST_F(ValueSerializerTest, RoundTripNumber) {
Local<Value> value = RoundTripTest(Integer::New(isolate(), 42));
ASSERT_TRUE(value->IsInt32());
@@ -1871,19 +1961,15 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
}
TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty1) {
- ASSERT_DEATH_IF_SUPPORTED(
- DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
- 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x49, 0x02, 0x24, 0x01, 0x03}),
- ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+ InvalidDecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x49, 0x02, 0x24, 0x01, 0x03});
}
TEST_F(ValueSerializerTest, DecodeArrayWithLengthProperty2) {
- ASSERT_DEATH_IF_SUPPORTED(
- DecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
- 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x6f, 0x7b, 0x00, 0x24, 0x01, 0x03}),
- ".*LookupIterator::NOT_FOUND == it.state\\(\\).*");
+ InvalidDecodeTest({0xff, 0x0d, 0x41, 0x03, 0x49, 0x02, 0x49, 0x04,
+ 0x49, 0x06, 0x22, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x6f, 0x7b, 0x00, 0x24, 0x01, 0x03});
}
TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
@@ -2386,7 +2472,7 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
class ThrowingSerializer : public ValueSerializer::Delegate {
public:
Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmCompiledModule> module) override {
+ Isolate* isolate, Local<WasmModuleObject> module) override {
isolate->ThrowException(Exception::Error(
String::NewFromOneByte(
isolate,
@@ -2402,10 +2488,10 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
class SerializeToTransfer : public ValueSerializer::Delegate {
public:
SerializeToTransfer(
- std::vector<WasmCompiledModule::TransferrableModule>* modules)
+ std::vector<WasmModuleObject::TransferrableModule>* modules)
: modules_(modules) {}
Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmCompiledModule> module) override {
+ Isolate* isolate, Local<WasmModuleObject> module) override {
modules_->push_back(module->GetTransferrableModule());
return Just(static_cast<uint32_t>(modules_->size()) - 1);
}
@@ -2413,23 +2499,23 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
void ThrowDataCloneError(Local<String> message) override { UNREACHABLE(); }
private:
- std::vector<WasmCompiledModule::TransferrableModule>* modules_;
+ std::vector<WasmModuleObject::TransferrableModule>* modules_;
};
class DeserializeFromTransfer : public ValueDeserializer::Delegate {
public:
DeserializeFromTransfer(
- std::vector<WasmCompiledModule::TransferrableModule>* modules)
+ std::vector<WasmModuleObject::TransferrableModule>* modules)
: modules_(modules) {}
- MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(Isolate* isolate,
- uint32_t id) override {
- return WasmCompiledModule::FromTransferrableModule(isolate,
- modules_->at(id));
+ MaybeLocal<WasmModuleObject> GetWasmModuleFromId(Isolate* isolate,
+ uint32_t id) override {
+ return WasmModuleObject::FromTransferrableModule(isolate,
+ modules_->at(id));
}
private:
- std::vector<WasmCompiledModule::TransferrableModule>* modules_;
+ std::vector<WasmModuleObject::TransferrableModule>* modules_;
};
ValueSerializer::Delegate* GetSerializerDelegate() override {
@@ -2440,9 +2526,9 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
return current_deserializer_delegate_;
}
- Local<WasmCompiledModule> MakeWasm() {
+ Local<WasmModuleObject> MakeWasm() {
Context::Scope scope(serialization_context());
- return WasmCompiledModule::DeserializeOrCompile(
+ return WasmModuleObject::DeserializeOrCompile(
isolate(), {nullptr, 0},
{kIncrementerWasm, sizeof(kIncrementerWasm)})
.ToLocalChecked();
@@ -2509,7 +2595,7 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
private:
static bool g_saved_flag;
- std::vector<WasmCompiledModule::TransferrableModule> transfer_modules_;
+ std::vector<WasmModuleObject::TransferrableModule> transfer_modules_;
SerializeToTransfer serialize_delegate_;
DeserializeFromTransfer deserialize_delegate_;
ValueSerializer::Delegate* current_serializer_delegate_ = nullptr;
@@ -2715,5 +2801,89 @@ TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidDataLength) {
InvalidDecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x57, 0x79, 0x00, 0x7F});
}
+class ValueSerializerTestWithLimitedMemory : public ValueSerializerTest {
+ protected:
+// GMock doesn't use the "override" keyword.
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winconsistent-missing-override"
+#endif
+
+ class SerializerDelegate : public ValueSerializer::Delegate {
+ public:
+ explicit SerializerDelegate(ValueSerializerTestWithLimitedMemory* test)
+ : test_(test) {}
+
+ ~SerializerDelegate() { EXPECT_EQ(nullptr, last_buffer_); }
+
+ void SetMemoryLimit(size_t limit) { memory_limit_ = limit; }
+
+ void* ReallocateBufferMemory(void* old_buffer, size_t size,
+ size_t* actual_size) override {
+ EXPECT_EQ(old_buffer, last_buffer_);
+ if (size > memory_limit_) return nullptr;
+ *actual_size = size;
+ last_buffer_ = realloc(old_buffer, size);
+ return last_buffer_;
+ }
+
+ void FreeBufferMemory(void* buffer) override {
+ EXPECT_EQ(buffer, last_buffer_);
+ last_buffer_ = nullptr;
+ free(buffer);
+ }
+
+ void ThrowDataCloneError(Local<String> message) override {
+ test_->isolate()->ThrowException(Exception::Error(message));
+ }
+
+ MOCK_METHOD2(WriteHostObject,
+ Maybe<bool>(Isolate* isolate, Local<Object> object));
+
+ private:
+ ValueSerializerTestWithLimitedMemory* test_;
+ void* last_buffer_ = nullptr;
+ size_t memory_limit_ = 0;
+ };
+
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+
+ ValueSerializer::Delegate* GetSerializerDelegate() override {
+ return &serializer_delegate_;
+ }
+
+ void BeforeEncode(ValueSerializer* serializer) override {
+ serializer_ = serializer;
+ }
+
+ SerializerDelegate serializer_delegate_{this};
+ ValueSerializer* serializer_ = nullptr;
+};
+
+TEST_F(ValueSerializerTestWithLimitedMemory, FailIfNoMemoryInWriteHostObject) {
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillRepeatedly(Invoke([this](Isolate*, Local<Object>) {
+ static const char kDummyData[1024] = {};
+ serializer_->WriteRawBytes(&kDummyData, sizeof(kDummyData));
+ return Just(true);
+ }));
+
+ // If there is enough memory, things work.
+ serializer_delegate_.SetMemoryLimit(2048);
+ EncodeTest("new ExampleHostObject()");
+
+ // If not, we get a graceful failure, rather than silent misbehavior.
+ serializer_delegate_.SetMemoryLimit(1024);
+ InvalidEncodeTest("new ExampleHostObject()");
+
+ // And we definitely don't continue to serialize other things.
+ serializer_delegate_.SetMemoryLimit(1024);
+ EvaluateScriptForInput("gotA = false");
+ InvalidEncodeTest("[new ExampleHostObject, {get a() { gotA = true; }}]");
+ EXPECT_TRUE(EvaluateScriptForInput("gotA")->IsFalse());
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index 3972f0dd99..dc68b39733 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -1,7 +1,5 @@
ahaas@chromium.org
-bradnelson@chromium.org
clemensh@chromium.org
-eholk@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index e2a7bcc388..1d9d0ea15c 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/test-utils.h"
+#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
#include "test/common/wasm/wasm-macro-gen.h"
@@ -475,7 +476,8 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// foreach length 1...32
for (int i = 1; i <= 32; i++) {
uint32_t val = kVals[v];
- if (i < 32) val &= ((1 << i) - 1);
+ if (i < 32)
+ val &= base::SubWithWraparound(base::ShlWithWraparound(1, i), 1);
unsigned length = 1 + i / 7;
for (unsigned j = 0; j < kMaxSize; j++) {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 31e4a12ae7..e13816744f 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -18,6 +18,7 @@
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "testing/gmock-support.h"
namespace v8 {
namespace internal {
@@ -54,39 +55,37 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
val, WASM_ZERO, kExprBrIf, static_cast<byte>(depth)
#define EXPECT_VERIFIES_C(sig, x) \
- Verify(true, sigs.sig(), x, x + arraysize(x), kAppendEnd)
+ Verify(true, sigs.sig(), ArrayVector(x), kAppendEnd)
-#define EXPECT_FAILURE_C(sig, x) \
- Verify(false, sigs.sig(), x, x + arraysize(x), kAppendEnd)
+#define EXPECT_FAILURE_C(sig, x, ...) \
+ Verify(false, sigs.sig(), ArrayVector(x), kAppendEnd, ##__VA_ARGS__)
-#define EXPECT_VERIFIES_SC(sig, x) \
- Verify(true, sig, x, x + arraysize(x), kAppendEnd)
+#define EXPECT_VERIFIES_SC(sig, x) Verify(true, sig, ArrayVector(x), kAppendEnd)
-#define EXPECT_FAILURE_SC(sig, x) \
- Verify(false, sig, x, x + arraysize(x), kAppendEnd)
+#define EXPECT_FAILURE_SC(sig, x) Verify(false, sig, ArrayVector(x), kAppendEnd)
-#define EXPECT_VERIFIES_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(true, env, code, code + arraysize(code), kAppendEnd); \
+#define EXPECT_VERIFIES_S(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(true, env, ArrayVector(code), kAppendEnd); \
} while (false)
-#define EXPECT_FAILURE_S(env, ...) \
- do { \
- static byte code[] = {__VA_ARGS__}; \
- Verify(false, env, code, code + arraysize(code), kAppendEnd); \
+#define EXPECT_FAILURE_S(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(false, env, ArrayVector(code), kAppendEnd); \
} while (false)
-#define EXPECT_VERIFIES(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- Verify(true, sigs.sig(), code, code + sizeof(code), kAppendEnd); \
+#define EXPECT_VERIFIES(sig, ...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_VERIFIES_C(sig, code); \
} while (false)
-#define EXPECT_FAILURE(sig, ...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- Verify(false, sigs.sig(), code, code + sizeof(code), kAppendEnd); \
+#define EXPECT_FAILURE(sig, ...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_FAILURE_C(sig, code); \
} while (false)
class FunctionBodyDecoderTest : public TestWithZone {
@@ -108,49 +107,48 @@ class FunctionBodyDecoderTest : public TestWithZone {
enum AppendEnd : bool { kAppendEnd, kOmitEnd };
- void PrepareBytecode(const byte** startp, const byte** endp,
- AppendEnd append_end) {
- const byte* start = *startp;
- const byte* end = *endp;
+ Vector<const byte> PrepareBytecode(Vector<const byte> code,
+ AppendEnd append_end) {
size_t locals_size = local_decls.Size();
- size_t total_size = end - start + locals_size;
- if (append_end == kAppendEnd) ++total_size;
+ size_t total_size =
+ code.size() + locals_size + (append_end == kAppendEnd ? 1 : 0);
byte* buffer = static_cast<byte*>(zone()->New(total_size));
// Prepend the local decls to the code.
local_decls.Emit(buffer);
// Emit the code.
- memcpy(buffer + locals_size, start, end - start);
+ memcpy(buffer + locals_size, code.start(), code.size());
if (append_end == kAppendEnd) {
// Append an extra end opcode.
buffer[total_size - 1] = kExprEnd;
}
- *startp = buffer;
- *endp = buffer + total_size;
+ return {buffer, total_size};
}
// Prepends local variable declarations and renders nice error messages for
// verification failures.
- void Verify(bool expected_success, FunctionSig* sig, const byte* start,
- const byte* end, AppendEnd append_end) {
- PrepareBytecode(&start, &end, append_end);
+ void Verify(bool expected_success, FunctionSig* sig, Vector<const byte> code,
+ AppendEnd append_end, const char* message = nullptr) {
+ code = PrepareBytecode(code, append_end);
// Verify the code.
- FunctionBody body(sig, 0, start, end);
+ FunctionBody body(sig, 0, code.start(), code.end());
WasmFeatures unused_detected_features;
DecodeResult result =
VerifyWasmCode(zone()->allocator(), enabled_features_, module,
&unused_detected_features, body);
- uint32_t pc = result.error_offset();
std::ostringstream str;
- if (expected_success) {
- str << "Verification failed: pc = +" << pc
- << ", msg = " << result.error_msg();
+ if (result.failed()) {
+ str << "Verification failed: pc = +" << result.error().offset()
+ << ", msg = " << result.error().message();
} else {
- str << "Verification successed, expected failure; pc = +" << pc;
+ str << "Verification successed, expected failure";
}
EXPECT_EQ(result.ok(), expected_success) << str.str();
+ if (result.failed() && message) {
+ EXPECT_THAT(result.error().message(), ::testing::HasSubstr(message));
+ }
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
@@ -264,6 +262,18 @@ class TestModuleBuilder {
void InitializeTable() { mod.tables.emplace_back(); }
+ byte AddPassiveElementSegment() {
+ mod.elem_segments.emplace_back();
+ return static_cast<byte>(mod.elem_segments.size() - 1);
+ }
+
+ // Set the number of data segments as declared by the DataCount section.
+ void SetDataSegmentCount(uint32_t data_segment_count) {
+ // The Data section occurs after the Code section, so we don't need to
+ // update mod.data_segments, as it is always empty.
+ mod.num_declared_data_segments = data_segment_count;
+ }
+
WasmModule* module() { return &mod; }
private:
@@ -286,9 +296,8 @@ TEST_F(FunctionBodyDecoderTest, RefNull) {
}
TEST_F(FunctionBodyDecoderTest, EmptyFunction) {
- byte code[] = {0};
- Verify(true, sigs.v_v(), code, code, kAppendEnd);
- Verify(false, sigs.i_i(), code, code, kAppendEnd);
+ Verify(true, sigs.v_v(), {}, kAppendEnd);
+ Verify(false, sigs.i_i(), {}, kAppendEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteIf1) {
@@ -343,10 +352,10 @@ TEST_F(FunctionBodyDecoderTest, Float64Const) {
TEST_F(FunctionBodyDecoderTest, Int32Const_off_end) {
byte code[] = {kExprI32Const, 0xAA, 0xBB, 0xCC, 0x44};
- for (int size = 1; size <= 4; size++) {
- Verify(false, sigs.i_i(), code, code + size, kAppendEnd);
+ for (size_t size = 1; size <= 4; ++size) {
+ Verify(false, sigs.i_i(), {code, size}, kAppendEnd);
// Should also fail without the trailing 'end' opcode.
- Verify(false, sigs.i_i(), code, code + size, kOmitEnd);
+ Verify(false, sigs.i_i(), {code, size}, kOmitEnd);
}
}
@@ -524,15 +533,15 @@ TEST_F(FunctionBodyDecoderTest, TeeLocalN_local) {
}
TEST_F(FunctionBodyDecoderTest, BlockN) {
- const int kMaxSize = 200;
+ constexpr size_t kMaxSize = 200;
byte buffer[kMaxSize + 3];
- for (int i = 0; i <= kMaxSize; i++) {
+ for (size_t i = 0; i <= kMaxSize; i++) {
memset(buffer, kExprNop, sizeof(buffer));
buffer[0] = kExprBlock;
buffer[1] = kLocalVoid;
buffer[i + 2] = kExprEnd;
- Verify(true, sigs.v_i(), buffer, buffer + i + 3, kAppendEnd);
+ Verify(true, sigs.v_i(), {buffer, i + 3}, kAppendEnd);
}
}
@@ -679,8 +688,8 @@ TEST_F(FunctionBodyDecoderTest, BlockN_off_end) {
byte code[] = {WASM_BLOCK(kExprNop, kExprNop, kExprNop, kExprNop)};
EXPECT_VERIFIES_C(v_v, code);
for (size_t i = 1; i < arraysize(code); i++) {
- Verify(false, sigs.v_v(), code, code + i, kAppendEnd);
- Verify(false, sigs.v_v(), code, code + i, kOmitEnd);
+ Verify(false, sigs.v_v(), {code, i}, kAppendEnd);
+ Verify(false, sigs.v_v(), {code, i}, kOmitEnd);
}
}
@@ -787,6 +796,13 @@ TEST_F(FunctionBodyDecoderTest, IfElseUnreachable2) {
}
}
+TEST_F(FunctionBodyDecoderTest, OneArmedIfWithArity) {
+ static const byte code[] = {WASM_ZERO, kExprIf, kLocalI32, WASM_ONE,
+ kExprEnd};
+ EXPECT_FAILURE_C(i_v, code,
+ "start-arity and end-arity of one-armed if must match");
+}
+
TEST_F(FunctionBodyDecoderTest, IfBreak) {
EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
@@ -1011,8 +1027,8 @@ TEST_F(FunctionBodyDecoderTest, If_off_end) {
static const byte kCode[] = {
WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))};
for (size_t len = 3; len < arraysize(kCode); len++) {
- Verify(false, sigs.i_i(), kCode, kCode + len, kAppendEnd);
- Verify(false, sigs.i_i(), kCode, kCode + len, kOmitEnd);
+ Verify(false, sigs.i_i(), {kCode, len}, kAppendEnd);
+ Verify(false, sigs.i_i(), {kCode, len}, kOmitEnd);
}
}
@@ -1614,7 +1630,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
module = builder.module();
static byte code[] = {kExprCallIndirect};
- Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+ Verify(false, sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
@@ -1625,7 +1641,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
module = builder.module();
static byte code[] = {kExprI32StoreMem};
- Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+ Verify(false, sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
@@ -1638,7 +1654,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
static byte code[] = {kSimdPrefix,
static_cast<byte>(kExprS8x16Shuffle & 0xff)};
- Verify(false, sig, code, code + arraysize(code), kOmitEnd);
+ Verify(false, sig, ArrayVector(code), kOmitEnd);
}
TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
@@ -1798,22 +1814,22 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
}
}
-TEST_F(FunctionBodyDecoderTest, WasmGrowMemory) {
+TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
TestModuleBuilder builder;
module = builder.module();
builder.InitializeMemory();
- byte code[] = {WASM_GET_LOCAL(0), kExprGrowMemory, 0};
+ byte code[] = {WASM_GET_LOCAL(0), kExprMemoryGrow, 0};
EXPECT_VERIFIES_C(i_i, code);
EXPECT_FAILURE_C(i_d, code);
}
-TEST_F(FunctionBodyDecoderTest, AsmJsGrowMemory) {
+TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
TestModuleBuilder builder(kAsmJsOrigin);
module = builder.module();
builder.InitializeMemory();
- byte code[] = {WASM_GET_LOCAL(0), kExprGrowMemory, 0};
+ byte code[] = {WASM_GET_LOCAL(0), kExprMemoryGrow, 0};
EXPECT_FAILURE_C(i_i, code);
}
@@ -2214,8 +2230,8 @@ TEST_F(FunctionBodyDecoderTest, BrTable2b) {
TEST_F(FunctionBodyDecoderTest, BrTable_off_end) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
for (size_t len = 1; len < sizeof(code); len++) {
- Verify(false, sigs.i_i(), code, code + len, kAppendEnd);
- Verify(false, sigs.i_i(), code, code + len, kOmitEnd);
+ Verify(false, sigs.i_i(), {code, len}, kAppendEnd);
+ Verify(false, sigs.i_i(), {code, len}, kOmitEnd);
}
}
@@ -2429,38 +2445,57 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
}
#define WASM_TRY_OP kExprTry, kLocalVoid
-#define WASM_CATCH(index) kExprCatch, static_cast<byte>(index)
+#define WASM_BR_ON_EXN(depth, index) \
+ kExprBrOnExn, static_cast<byte>(depth), static_cast<byte>(index)
TEST_F(FunctionBodyDecoderTest, TryCatch) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
- byte ex1 = builder.AddException(sigs.v_v());
- byte ex2 = builder.AddException(sigs.v_v());
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd); // Missing catch.
- EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1)); // Missing end.
- EXPECT_FAILURE(v_v, WASM_CATCH(ex1), kExprEnd); // Missing try.
-
- // TODO(mstarzinger): Double catch. Fix this to verify.
- EXPECT_FAILURE(v_v, WASM_TRY_OP, WASM_CATCH(ex1), WASM_CATCH(ex2), kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, kExprDrop, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, kExprCatch, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd); // Missing catch.
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch); // Missing end.
+ EXPECT_FAILURE(v_v, kExprCatch, kExprEnd); // Missing try.
}
-TEST_F(FunctionBodyDecoderTest, TryCatchAll) {
+TEST_F(FunctionBodyDecoderTest, Rethrow) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
- byte ex1 = builder.AddException(sigs.v_v());
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatchAll, kExprEnd);
- EXPECT_VERIFIES(v_v, WASM_TRY_OP, WASM_CATCH(ex1), kExprCatchAll, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, kExprCatchAll, kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll, WASM_CATCH(ex1), kExprEnd);
- EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatchAll); // Missing end.
- EXPECT_FAILURE(v_v, kExprCatchAll, kExprEnd); // Missing try.
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, kExprRethrow, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprRethrow, kExprCatch, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_BLOCK(kExprRethrow));
+ EXPECT_FAILURE(v_v, kExprRethrow);
}
+TEST_F(FunctionBodyDecoderTest, BrOnExn) {
+ WASM_FEATURE_SCOPE(eh);
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte ex1 = builder.AddException(sigs.v_v());
+ byte ex2 = builder.AddException(sigs.v_i());
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
+ kExprDrop, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex1),
+ kExprDrop, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
+ WASM_BR_ON_EXN(0, ex1), kExprDrop, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_BLOCK(WASM_TRY_OP, kExprCatch,
+ WASM_BR_ON_EXN(1, ex1), kExprDrop, kExprEnd));
+ EXPECT_VERIFIES(i_v,
+ WASM_BLOCK_I(WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(1, ex2),
+ kExprDrop, kExprEnd, kExprI32Const, 0));
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(2, ex1),
+ kExprDrop, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, kExprDrop,
+ WASM_BR_ON_EXN(0, ex1), kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprCatch, WASM_BR_ON_EXN(0, ex1),
+ kExprEnd);
+}
+
+#undef WASM_BR_ON_EXN
#undef WASM_TRY_OP
-#undef WASM_CATCH
TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
WASM_FEATURE_SCOPE(mv);
@@ -2690,13 +2725,10 @@ TEST_F(FunctionBodyDecoderTest, IfParam) {
TEST_F(FunctionBodyDecoderTest, Regression709741) {
AddLocals(kWasmI32, kV8MaxWasmFunctionLocals - 1);
EXPECT_VERIFIES(v_v, WASM_NOP);
- byte code[] = {WASM_NOP};
- const byte* start = code;
- const byte* end = code + sizeof(code);
- PrepareBytecode(&start, &end, kAppendEnd);
+ byte code[] = {WASM_NOP, WASM_END};
- for (const byte* i = start; i < end; i++) {
- FunctionBody body(sigs.v_v(), 0, start, i);
+ for (size_t i = 0; i < arraysize(code); ++i) {
+ FunctionBody body(sigs.v_v(), 0, code, code + i);
WasmFeatures unused_detected_features;
DecodeResult result =
VerifyWasmCode(zone()->allocator(), kAllWasmFeatures, nullptr,
@@ -2708,6 +2740,129 @@ TEST_F(FunctionBodyDecoderTest, Regression709741) {
}
}
+TEST_F(FunctionBodyDecoderTest, MemoryInit) {
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ builder.SetDataSegmentCount(1);
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ builder.SetDataSegmentCount(1);
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ byte code[] = {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
+ WASM_END};
+ for (size_t i = 0; i <= arraysize(code); ++i) {
+ Verify(i == arraysize(code), sigs.v_v(), {code, i}, kOmitEnd);
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, MemoryDrop) {
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ builder.SetDataSegmentCount(1);
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_MEMORY_DROP(0));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_MEMORY_DROP(0));
+ EXPECT_FAILURE(v_v, WASM_MEMORY_DROP(1));
+}
+
+TEST_F(FunctionBodyDecoderTest, MemoryCopy) {
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, MemoryFill) {
+ TestModuleBuilder builder;
+ builder.InitializeMemory();
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, BulkMemoryOpsWithoutMemory) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_FAILURE(v_v, WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, TableInit) {
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ byte code[] = {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO), WASM_END};
+ for (size_t i = 0; i <= arraysize(code); ++i) {
+ Verify(i == arraysize(code), sigs.v_v(), {code, i}, kOmitEnd);
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, TableDrop) {
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_TABLE_DROP(0));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_TABLE_DROP(0));
+ EXPECT_FAILURE(v_v, WASM_TABLE_DROP(1));
+}
+
+TEST_F(FunctionBodyDecoderTest, TableCopy) {
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ module = builder.module();
+
+ EXPECT_FAILURE(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(FunctionBodyDecoderTest, BulkTableOpsWithoutTable) {
+ TestModuleBuilder builder;
+ builder.InitializeTable();
+ builder.AddPassiveElementSegment();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_FAILURE(v_v, WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_TABLE_DROP(0));
+ EXPECT_FAILURE(v_v, WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
class BranchTableIteratorTest : public TestWithZone {
public:
BranchTableIteratorTest() : TestWithZone() {}
@@ -2795,10 +2950,12 @@ TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprElse);
EXPECT_LENGTH(1, kExprEnd);
EXPECT_LENGTH(1, kExprSelect);
+ EXPECT_LENGTH(1, kExprCatch);
+ EXPECT_LENGTH(1, kExprRethrow);
EXPECT_LENGTH(2, kExprBr);
EXPECT_LENGTH(2, kExprBrIf);
EXPECT_LENGTH(2, kExprThrow);
- EXPECT_LENGTH(2, kExprCatch);
+ EXPECT_LENGTH(3, kExprBrOnExn);
EXPECT_LENGTH_N(2, kExprBlock, kLocalI32);
EXPECT_LENGTH_N(2, kExprLoop, kLocalI32);
EXPECT_LENGTH_N(2, kExprIf, kLocalI32);
@@ -2874,7 +3031,7 @@ TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
EXPECT_LENGTH(2, kExprMemorySize);
- EXPECT_LENGTH(2, kExprGrowMemory);
+ EXPECT_LENGTH(2, kExprMemoryGrow);
}
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 83876b3e0f..3d99dffa72 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -12,6 +12,9 @@
#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "testing/gmock-support.h"
+
+using testing::HasSubstr;
namespace v8 {
namespace internal {
@@ -29,38 +32,50 @@ namespace module_decoder_unittest {
#define WASM_INIT_EXPR_ANYREF WASM_REF_NULL, kExprEnd
#define WASM_INIT_EXPR_GLOBAL(index) WASM_GET_GLOBAL(index), kExprEnd
-#define SIZEOF_EMPTY_FUNCTION ((size_t)5)
#define EMPTY_BODY 0
-#define SIZEOF_EMPTY_BODY ((size_t)1)
#define NOP_BODY 2, 0, kExprNop
-#define SIZEOF_NOP_BODY ((size_t)3)
#define SIG_ENTRY_i_i SIG_ENTRY_x_x(kLocalI32, kLocalI32)
-#define UNKNOWN_SECTION(size) 0, U32V_1(size + 5), 4, 'l', 'u', 'l', 'z'
+#define UNKNOWN_SECTION(size) 0, U32V_1(size + 5), ADD_COUNT('l', 'u', 'l', 'z')
+
+template <typename... Args>
+std::integral_constant<size_t, sizeof...(Args)> CountArgsHelper(Args...);
+#define COUNT_ARGS(...) (decltype(CountArgsHelper(__VA_ARGS__))::value)
+
+template <size_t num>
+struct CheckLEB1 : std::integral_constant<size_t, num> {
+ static_assert(num <= I32V_MAX(1), "LEB range check");
+};
+#define CHECK_LEB1(num) CheckLEB1<num>::value
-#define SECTION(name, size) k##name##SectionCode, U32V_1(size)
+#define ADD_COUNT(...) CHECK_LEB1(COUNT_ARGS(__VA_ARGS__)), __VA_ARGS__
-#define SIGNATURES_SECTION(count, ...) \
- SECTION(Type, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
+#define SECTION(name, ...) k##name##SectionCode, ADD_COUNT(__VA_ARGS__)
+
+#define SIGNATURES_SECTION(count, ...) SECTION(Type, U32V_1(count), __VA_ARGS__)
#define FUNCTION_SIGNATURES_SECTION(count, ...) \
- SECTION(Function, 1 + (count)), U32V_1(count), __VA_ARGS__
+ SECTION(Function, U32V_1(count), __VA_ARGS__)
-#define FOO_STRING 3, 'f', 'o', 'o'
+#define FOO_STRING ADD_COUNT('f', 'o', 'o')
#define NO_LOCAL_NAMES 0
-#define EMPTY_SIGNATURES_SECTION SECTION(Type, 1), 0
-#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(Function, 1), 0
-#define EMPTY_FUNCTION_BODIES_SECTION SECTION(Code, 1), 0
-#define SECTION_NAMES(size) SECTION(Unknown, size + 5), 4, 'n', 'a', 'm', 'e'
-#define SECTION_EXCEPTIONS(size) SECTION(Exception, size)
-#define EMPTY_NAMES_SECTION SECTION_NAMES(1), 0
+#define EMPTY_SIGNATURES_SECTION SECTION(Type, ENTRY_COUNT(0))
+#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(Function, ENTRY_COUNT(0))
+#define EMPTY_FUNCTION_BODIES_SECTION SECTION(Code, ENTRY_COUNT(0))
+#define SECTION_NAMES(...) \
+ SECTION(Unknown, ADD_COUNT('n', 'a', 'm', 'e'), ##__VA_ARGS__)
+#define EMPTY_NAMES_SECTION SECTION_NAMES()
+#define SECTION_SRC_MAP(...) \
+ SECTION(Unknown, \
+ ADD_COUNT('s', 'o', 'u', 'r', 'c', 'e', 'M', 'a', 'p', 'p', 'i', \
+ 'n', 'g', 'U', 'R', 'L'), \
+ ADD_COUNT(__VA_ARGS__))
#define FAIL_IF_NO_EXPERIMENTAL_EH(data) \
do { \
ModuleResult result = DecodeModule((data), (data) + sizeof((data))); \
EXPECT_FALSE(result.ok()); \
- EXPECT_EQ(0u, result.val->exceptions.size()); \
} while (false)
#define X1(...) __VA_ARGS__
@@ -68,35 +83,30 @@ namespace module_decoder_unittest {
#define X3(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
#define X4(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
-#define ONE_EMPTY_FUNCTION SECTION(Function, 1 + 1 * 1), 1, X1(0)
-
-#define TWO_EMPTY_FUNCTIONS SECTION(Function, 1 + 2 * 1), 2, X2(0)
-
-#define THREE_EMPTY_FUNCTIONS SECTION(Function, 1 + 3 * 1), 3, X3(0)
+#define ONE_EMPTY_FUNCTION(sig_index) \
+ SECTION(Function, ENTRY_COUNT(1), X1(sig_index))
-#define FOUR_EMPTY_FUNCTIONS SECTION(Function, 1 + 4 * 1), 4, X4(0)
+#define TWO_EMPTY_FUNCTIONS(sig_index) \
+ SECTION(Function, ENTRY_COUNT(2), X2(sig_index))
-#define ONE_EMPTY_BODY \
- SECTION(Code, 1 + 1 * (1 + SIZEOF_EMPTY_BODY)) \
- , 1, X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define THREE_EMPTY_FUNCTIONS(sig_index) \
+ SECTION(Function, ENTRY_COUNT(3), X3(sig_index))
-#define TWO_EMPTY_BODIES \
- SECTION(Code, 1 + 2 * (1 + SIZEOF_EMPTY_BODY)) \
- , 2, X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define FOUR_EMPTY_FUNCTIONS(sig_index) \
+ SECTION(Function, ENTRY_COUNT(4), X4(sig_index))
-#define THREE_EMPTY_BODIES \
- SECTION(Code, 1 + 3 * (1 + SIZEOF_EMPTY_BODY)) \
- , 3, X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
-
-#define FOUR_EMPTY_BODIES \
- SECTION(Code, 1 + 4 * (1 + SIZEOF_EMPTY_BODY)) \
- , 4, X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define ONE_EMPTY_BODY SECTION(Code, ENTRY_COUNT(1), X1(EMPTY_BODY))
+#define TWO_EMPTY_BODIES SECTION(Code, ENTRY_COUNT(2), X2(EMPTY_BODY))
+#define THREE_EMPTY_BODIES SECTION(Code, ENTRY_COUNT(3), X3(EMPTY_BODY))
+#define FOUR_EMPTY_BODIES SECTION(Code, ENTRY_COUNT(4), X4(EMPTY_BODY))
#define SIGNATURES_SECTION_VOID_VOID \
- SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v)
#define LINEAR_MEMORY_INDEX_0 0
+#define EXCEPTION_ENTRY(sig_index) U32V_1(kExceptionAttribute), sig_index
+
#define EXPECT_VERIFIES(data) \
do { \
ModuleResult result = DecodeModule(data, data + sizeof(data)); \
@@ -111,11 +121,12 @@ namespace module_decoder_unittest {
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
-#define EXPECT_OFF_END_FAILURE(data, min, max) \
- do { \
- for (size_t length = min; length < max; length++) { \
- EXPECT_FAILURE_LEN(data, length); \
- } \
+#define EXPECT_OFF_END_FAILURE(data, min) \
+ do { \
+ STATIC_ASSERT(min < arraysize(data)); \
+ for (size_t length = min; length < arraysize(data); length++) { \
+ EXPECT_FAILURE_LEN(data, length); \
+ } \
} while (false)
#define EXPECT_OK(result) \
@@ -124,6 +135,12 @@ namespace module_decoder_unittest {
if (!result.ok()) return; \
} while (false)
+#define EXPECT_NOT_OK(result, msg) \
+ do { \
+ EXPECT_FALSE(result.ok()); \
+ EXPECT_THAT(result.error().message(), HasSubstr(msg)); \
+ } while (false)
+
static size_t SizeOfVarInt(size_t value) {
size_t size = 0;
do {
@@ -213,22 +230,22 @@ TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
TEST_F(WasmModuleVerifyTest, OneGlobal) {
static const byte data[] = {
- SECTION(Global, 6), // --
- 1,
- kLocalI32, // local type
- 0, // immutable
- WASM_INIT_EXPR_I32V_1(13) // init
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_1(13)) // init
};
{
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->data_segments.size());
+ EXPECT_EQ(1u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.value()->globals.back();
EXPECT_EQ(kWasmI32, global->type);
EXPECT_EQ(0u, global->offset);
@@ -237,28 +254,28 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
EXPECT_EQ(13, global->init.val.i32_const);
}
- EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 1);
}
TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
- SECTION(Global, 5), // --
- 1,
- kLocalAnyRef, // local type
- 0, // immutable
- WASM_INIT_EXPR_ANYREF // init
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalAnyRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_ANYREF) // init
};
{
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->data_segments.size());
+ EXPECT_EQ(1u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.value()->globals.back();
EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
@@ -269,31 +286,29 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
TEST_F(WasmModuleVerifyTest, AnyRefGlobalWithGlobalInit) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
- SECTION(Import, 8), // section header
- 1, // number of imports
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'f', // global name
- kExternalGlobal, // import kind
- kLocalAnyRef, // type
- 0, // mutability
- SECTION(Global, 6), // --
- 1,
- kLocalAnyRef, // local type
- 0, // immutable
- WASM_INIT_EXPR_GLOBAL(0),
+ SECTION(Import, // --
+ ENTRY_COUNT(1), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ kLocalAnyRef, // type
+ 0), // mutability
+ SECTION(Global, // --
+ ENTRY_COUNT(1),
+ kLocalAnyRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_GLOBAL(0)),
};
{
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->data_segments.size());
+ EXPECT_EQ(2u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.value()->globals.back();
EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
@@ -303,76 +318,62 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobalWithGlobalInit) {
TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
static const byte data[] = {
- SECTION(Global, 6), // --
- 1,
- 64, // invalid memory type
- 1, // mutable
- WASM_INIT_EXPR_I32V_1(33), // init
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ 64, // invalid memory type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(33)), // init
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_FALSE(result.ok());
+ EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, Global_invalid_type2) {
static const byte data[] = {
- SECTION(Global, 6), // --
- 1,
- kLocalVoid, // invalid memory type
- 1, // mutable
- WASM_INIT_EXPR_I32V_1(33), // init
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalVoid, // invalid memory type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(33)), // init
};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_FALSE(result.ok());
+ EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
- static const byte data[] = {
- SECTION(Global, 1), // --
- 0, // declare 0 globals
- };
+ static const byte data[] = {SECTION(Global, ENTRY_COUNT(0))};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
}
TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
- WASM_FEATURE_SCOPE(mut_global);
{
static const byte data[] = {
- SECTION(Global, 6), // --
- 1,
- kLocalI32, // local type
- 0, // immutable
- WASM_INIT_EXPR_I32V_1(13), // init
- SECTION(Export, 8), // --
- 1, // Export count
- 4, // name length
- 'n', // --
- 'a', // --
- 'm', // --
- 'e', // --
- kExternalGlobal, // global
- 0, // global index
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_1(13)), // init
+ SECTION(Export, // --
+ ENTRY_COUNT(1), // export count
+ ADD_COUNT('n', 'a', 'm', 'e'), // name
+ kExternalGlobal, // global
+ 0), // global index
};
EXPECT_VERIFIES(data);
}
{
static const byte data[] = {
- SECTION(Global, 6), // --
- 1, // --
- kLocalI32, // local type
- 1, // mutable
- WASM_INIT_EXPR_I32V_1(13), // init
- SECTION(Export, 8), // --
- 1, // Export count
- 4, // name length
- 'n', // --
- 'a', // --
- 'm', // --
- 'e', // --
- kExternalGlobal, // global
- 0, // global index
+ SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ kLocalI32, // local type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(13)), // init
+ SECTION(Export, // --
+ ENTRY_COUNT(1), // export count
+ ADD_COUNT('n', 'a', 'm', 'e'), // name
+ kExternalGlobal, // global
+ 0), // global index
};
EXPECT_VERIFIES(data);
}
@@ -417,42 +418,41 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
}
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
- static const byte data[] = {SECTION(Global, 7),
- 33, // memory type
- 0, // exported
- WASM_INIT_EXPR_I32V_1(1)};
+ static const byte data[] = {SECTION(Global, // --
+ ENTRY_COUNT(1), // --
+ 33, // memory type
+ 0, // exported
+ WASM_INIT_EXPR_I32V_1(1))};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
- static const byte data[] = {
- SECTION(Global, 21),
- 2,
- kLocalF32, // type
- 0, // immutable
- WASM_INIT_EXPR_F32(22.0),
- kLocalF64, // type
- 1, // mutable
- WASM_INIT_EXPR_F64(23.0),
- };
+ static const byte data[] = {SECTION(Global, // --
+ ENTRY_COUNT(2), // --
+ kLocalF32, // type
+ 0, // immutable
+ WASM_INIT_EXPR_F32(22.0), // --
+ kLocalF64, // type
+ 1, // mutable
+ WASM_INIT_EXPR_F64(23.0))}; // --
{
// Should decode to exactly two globals.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->data_segments.size());
+ EXPECT_EQ(2u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* g0 = &result.val->globals[0];
+ const WasmGlobal* g0 = &result.value()->globals[0];
EXPECT_EQ(kWasmF32, g0->type);
EXPECT_EQ(0u, g0->offset);
EXPECT_FALSE(g0->mutability);
EXPECT_EQ(WasmInitExpr::kF32Const, g0->init.kind);
- const WasmGlobal* g1 = &result.val->globals[1];
+ const WasmGlobal* g1 = &result.value()->globals[1];
EXPECT_EQ(kWasmF64, g1->type);
EXPECT_EQ(8u, g1->offset);
@@ -460,72 +460,103 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
EXPECT_EQ(WasmInitExpr::kF64Const, g1->init.kind);
}
- EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 1);
}
TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
- static const byte data[] = {
- SECTION_EXCEPTIONS(1), 0,
- };
+ static const byte data[] = {SECTION(Exception, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0u, result.val->exceptions.size());
+ EXPECT_EQ(0u, result.value()->exceptions.size());
}
TEST_F(WasmModuleVerifyTest, OneI32Exception) {
- static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
- // except[0] (i32)
- 1, kLocalI32};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kLocalI32)), // sig#0 (i32)
+ SECTION(Exception, ENTRY_COUNT(1),
+ EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->exceptions.size());
+ EXPECT_EQ(1u, result.value()->exceptions.size());
- const WasmException& e0 = result.val->exceptions.front();
+ const WasmException& e0 = result.value()->exceptions.front();
EXPECT_EQ(1u, e0.sig->parameter_count());
EXPECT_EQ(kWasmI32, e0.sig->GetParam(0));
}
TEST_F(WasmModuleVerifyTest, TwoExceptions) {
- static const byte data[] = {SECTION_EXCEPTIONS(6), 2,
- // except[0] (f32, i64)
- 2, kLocalF32, kLocalI64,
- // except[1] (i32)
- 1, kLocalI32};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(2),
+ SIG_ENTRY_v_x(kLocalI32), // sig#0 (i32)
+ SIG_ENTRY_v_xx(kLocalF32, kLocalI64)), // sig#1 (f32, i64)
+ SECTION(Exception, ENTRY_COUNT(2),
+ EXCEPTION_ENTRY(SIG_INDEX(1)), // except[0] (sig#1)
+ EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[1] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.val->exceptions.size());
- const WasmException& e0 = result.val->exceptions.front();
+ EXPECT_EQ(2u, result.value()->exceptions.size());
+ const WasmException& e0 = result.value()->exceptions.front();
EXPECT_EQ(2u, e0.sig->parameter_count());
EXPECT_EQ(kWasmF32, e0.sig->GetParam(0));
EXPECT_EQ(kWasmI64, e0.sig->GetParam(1));
- const WasmException& e1 = result.val->exceptions.back();
+ const WasmException& e1 = result.value()->exceptions.back();
EXPECT_EQ(kWasmI32, e1.sig->GetParam(0));
}
-TEST_F(WasmModuleVerifyTest, Exception_invalid_type) {
- static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
- // except[0] (?)
- 1, 64};
+TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_index) {
+ static const byte data[] = {
+ SIGNATURES_SECTION_VOID_VOID,
+ SECTION(Exception, ENTRY_COUNT(1),
+ EXCEPTION_ENTRY(
+ SIG_INDEX(23)))}; // except[0] (sig#23 [out-of-bounds])
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ // Should fail decoding exception section.
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "signature index 23 out of bounds");
+}
+
+TEST_F(WasmModuleVerifyTest, Exception_invalid_sig_return) {
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_i_i),
+ SECTION(Exception, ENTRY_COUNT(1),
+ EXCEPTION_ENTRY(
+ SIG_INDEX(0)))}; // except[0] (sig#0 [invalid-return-type])
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ // Should fail decoding exception section.
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "exception signature 0 has non-void return");
+}
+
+TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_i_i),
+ SECTION(Exception, ENTRY_COUNT(1), 23,
+ SIG_INDEX(0))}; // except[0] (sig#0) [invalid-attribute]
FAIL_IF_NO_EXPERIMENTAL_EH(data);
// Should fail decoding exception section.
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_FALSE(result.ok());
+ EXPECT_NOT_OK(result, "exception attribute 23 not supported");
}
TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
- static const byte data[] = {SECTION(Import, 1), 0, SECTION_EXCEPTIONS(1), 0,
- SECTION(Export, 1), 0};
+ static const byte data[] = {SECTION(Import, ENTRY_COUNT(0)),
+ SECTION(Exception, ENTRY_COUNT(0)),
+ SECTION(Export, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
@@ -534,57 +565,72 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
}
TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterExport) {
- static const byte data[] = {SECTION(Export, 1), 0, SECTION_EXCEPTIONS(1), 0};
+ static const byte data[] = {SECTION(Export, ENTRY_COUNT(0)),
+ SECTION(Exception, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_FALSE(result.ok());
+ EXPECT_NOT_OK(result,
+ "The Exception section must appear before the Export section");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeImport) {
- static const byte data[] = {SECTION_EXCEPTIONS(1), 0, SECTION(Import, 1), 0};
+TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeGlobal) {
+ static const byte data[] = {SECTION(Exception, ENTRY_COUNT(0)),
+ SECTION(Global, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_FALSE(result.ok());
+ EXPECT_NOT_OK(result, "unexpected section: Global");
+}
+
+TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterMemoryBeforeGlobal) {
+ STATIC_ASSERT(kMemorySectionCode + 1 == kGlobalSectionCode);
+ static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
+ SECTION(Exception, ENTRY_COUNT(0)),
+ SECTION(Global, ENTRY_COUNT(0))};
+ FAIL_IF_NO_EXPERIMENTAL_EH(data);
+
+ WASM_FEATURE_SCOPE(eh);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "unexpected section: Global");
}
TEST_F(WasmModuleVerifyTest, ExceptionImport) {
- static const byte data[] = {SECTION(Import, 9), // section header
- 1, // number of imports
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(2), // --
- 'e', 'x', // exception name
- kExternalException, // import kind
- // except[0] (i32)
- 1, kLocalI32};
+ static const byte data[] = {
+ SIGNATURES_SECTION_VOID_VOID,
+ SECTION(Import, // section header
+ ENTRY_COUNT(1), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('e', 'x'), // exception name
+ kExternalException, // import kind
+ EXCEPTION_ENTRY(SIG_INDEX(0)))}; // except[0] (sig#0)
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->exceptions.size());
- EXPECT_EQ(1u, result.val->import_table.size());
+ EXPECT_EQ(1u, result.value()->exceptions.size());
+ EXPECT_EQ(1u, result.value()->import_table.size());
}
TEST_F(WasmModuleVerifyTest, ExceptionExport) {
- static const byte data[] = {SECTION_EXCEPTIONS(3), 1,
- // except[0] (i32)
- 1, kLocalI32, SECTION(Export, 4),
- 1, // exports
- NO_NAME, // --
- kExternalException, // --
- EXCEPTION_INDEX(0)};
+ static const byte data[] = {
+ SIGNATURES_SECTION_VOID_VOID,
+ SECTION(Exception, ENTRY_COUNT(1),
+ EXCEPTION_ENTRY(SIG_INDEX(0))), // except[0] (sig#0)
+ SECTION(Export, ENTRY_COUNT(1), // --
+ NO_NAME, // --
+ kExternalException, // --
+ EXCEPTION_INDEX(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->exceptions.size());
- EXPECT_EQ(1u, result.val->export_table.size());
+ EXPECT_EQ(1u, result.value()->exceptions.size());
+ EXPECT_EQ(1u, result.value()->export_table.size());
}
TEST_F(WasmModuleVerifyTest, OneSignature) {
@@ -594,74 +640,61 @@ TEST_F(WasmModuleVerifyTest, OneSignature) {
}
{
- static const byte data[] = {SECTION(Type, 1 + SIZEOF_SIG_ENTRY_x_x), 1,
- SIG_ENTRY_i_i};
+ static const byte data[] = {SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_i_i)};
EXPECT_VERIFIES(data);
}
}
TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
static const byte data[] = {
- SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_x_x +
- SIZEOF_SIG_ENTRY_x_xx), // --
- 3, // --
- SIG_ENTRY_v_v, // void -> void
- SIG_ENTRY_x_x(kLocalI32, kLocalF32), // f32 -> i32
- SIG_ENTRY_x_xx(kLocalI32, kLocalF64, kLocalF64), // f64,f64 -> i32
+ SECTION(
+ Type, // --
+ ENTRY_COUNT(3), // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_x_x(kLocalI32, kLocalF32), // f32 -> i32
+ SIG_ENTRY_x_xx(kLocalI32, kLocalF64, kLocalF64)), // f64,f64 -> i32
};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(3u, result.val->signatures.size());
- if (result.val->signatures.size() == 3) {
- EXPECT_EQ(0u, result.val->signatures[0]->return_count());
- EXPECT_EQ(1u, result.val->signatures[1]->return_count());
- EXPECT_EQ(1u, result.val->signatures[2]->return_count());
-
- EXPECT_EQ(0u, result.val->signatures[0]->parameter_count());
- EXPECT_EQ(1u, result.val->signatures[1]->parameter_count());
- EXPECT_EQ(2u, result.val->signatures[2]->parameter_count());
+ EXPECT_EQ(3u, result.value()->signatures.size());
+ if (result.value()->signatures.size() == 3) {
+ EXPECT_EQ(0u, result.value()->signatures[0]->return_count());
+ EXPECT_EQ(1u, result.value()->signatures[1]->return_count());
+ EXPECT_EQ(1u, result.value()->signatures[2]->return_count());
+
+ EXPECT_EQ(0u, result.value()->signatures[0]->parameter_count());
+ EXPECT_EQ(1u, result.value()->signatures[1]->parameter_count());
+ EXPECT_EQ(2u, result.value()->signatures[2]->parameter_count());
}
- EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 1);
}
TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
// Import 2 globals so that we can initialize data with a global index != 0.
const byte data[] = {
- SECTION(Import, 15), // section header
- 2, // number of imports
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'f', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 0, // mutability
- NAME_LENGTH(1), // --
- 'n', // module name
- NAME_LENGTH(1), // --
- 'g', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 0, // mutability
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Data, 9),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_GLOBAL(1), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Import, // section header
+ ENTRY_COUNT(2), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0, // mutability
+ ADD_COUNT('n'), // module name
+ ADD_COUNT('g'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0), // mutability
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(1), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- WasmInitExpr expr = result.val->data_segments.back().dest_addr;
+ WasmInitExpr expr = result.value()->data_segments.back().dest_addr;
EXPECT_EQ(WasmInitExpr::kGlobalIndex, expr.kind);
EXPECT_EQ(1u, expr.val.global_index);
}
@@ -669,52 +702,33 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
TEST_F(WasmModuleVerifyTest, DataSegmentWithMutableImportedGlobal) {
// Only an immutable imported global can be used as an init_expr.
const byte data[] = {
- SECTION(Import, 8), // section header
- 1, // number of imports
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'f', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 1, // mutability
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Data, 9),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_GLOBAL(0), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Import, // section header
+ ENTRY_COUNT(1), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 1), // mutability
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(0), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableGlobal) {
// Only an immutable imported global can be used as an init_expr.
const byte data[] = {
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Global, 8), // --
- 1,
- kLocalI32, // local type
- 0, // immutable
- WASM_INIT_EXPR_I32V_3(0x9BBAA), // init
- SECTION(Data, 9),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_GLOBAL(0), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Global, ENTRY_COUNT(1),
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_3(0x9BBAA)), // init
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(0), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
EXPECT_FAILURE(data);
}
@@ -722,30 +736,22 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableGlobal) {
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const byte kDataSegmentSourceOffset = 24;
const byte data[] = {
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Data, 11),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
{
EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(1u, result.val->data_segments.size());
+ EXPECT_EQ(0u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(1u, result.value()->data_segments.size());
- const WasmDataSegment* segment = &result.val->data_segments.back();
+ const WasmDataSegment* segment = &result.value()->data_segments.back();
EXPECT_EQ(WasmInitExpr::kI32Const, segment->dest_addr.kind);
EXPECT_EQ(0x9BBAA, segment->dest_addr.val.i32_const);
@@ -753,7 +759,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
EXPECT_EQ(3u, segment->source.length());
}
- EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 14);
}
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
@@ -761,44 +767,28 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
const byte kDataSegment1SourceOffset = kDataSegment0SourceOffset + 11;
const byte data[] = {
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Data, 29),
- ENTRY_COUNT(2), // segment count
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x7FFEE), // #0: dest addr
- U32V_1(4), // source size
- 1,
- 2,
- 3,
- 4, // data bytes
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x6DDCC), // #1: dest addr
- U32V_1(10), // source size
- 1,
- 2,
- 3,
- 4,
- 5,
- 6,
- 7,
- 8,
- 9,
- 10 // data bytes
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data,
+ ENTRY_COUNT(2), // segment count
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x7FFEE), // #0: dest addr
+ U32V_1(4), // source size
+ 1, 2, 3, 4, // data bytes
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x6DDCC), // #1: dest addr
+ U32V_1(10), // source size
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) // data bytes
};
{
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(2u, result.val->data_segments.size());
+ EXPECT_EQ(0u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(2u, result.value()->data_segments.size());
- const WasmDataSegment* s0 = &result.val->data_segments[0];
- const WasmDataSegment* s1 = &result.val->data_segments[1];
+ const WasmDataSegment* s0 = &result.value()->data_segments[0];
+ const WasmDataSegment* s1 = &result.value()->data_segments[1];
EXPECT_EQ(WasmInitExpr::kI32Const, s0->dest_addr.kind);
EXPECT_EQ(0x7FFEE, s0->dest_addr.val.i32_const);
@@ -811,19 +801,15 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
EXPECT_EQ(10u, s1->source.length());
}
- EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 14);
}
TEST_F(WasmModuleVerifyTest, DataWithoutMemory) {
const byte data[] = {
- SECTION(Data, 11),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x9BBAA), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
EXPECT_FAILURE(data);
}
@@ -831,33 +817,23 @@ TEST_F(WasmModuleVerifyTest, DataWithoutMemory) {
TEST_F(WasmModuleVerifyTest, MaxMaximumMemorySize) {
{
const byte data[] = {
- SECTION(Memory, 6), ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65536),
- };
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65536))};
EXPECT_VERIFIES(data);
}
{
const byte data[] = {
- SECTION(Memory, 6), ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65537),
- };
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 0, U32V_3(65537))};
EXPECT_FAILURE(data);
}
}
TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
const byte data[] = {
- SECTION(Memory, 4),
- ENTRY_COUNT(1),
- kHasMaximumFlag,
- 28,
- 28,
- SECTION(Data, 11),
- ENTRY_COUNT(1),
- LINEAR_MEMORY_INDEX_0,
- WASM_INIT_EXPR_F64(9.9), // dest addr
- U32V_1(3), // source size
- 'a',
- 'b',
- 'c' // data bytes
+ SECTION(Memory, ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_F64(9.9), // dest addr
+ U32V_1(3), // source size
+ 'a', 'b', 'c') // data bytes
};
EXPECT_FAILURE(data);
@@ -865,13 +841,13 @@ TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
TEST_F(WasmModuleVerifyTest, DataSegmentEndOverflow) {
const byte data[] = {
- SECTION(Memory, 4), // memory section
- ENTRY_COUNT(1), kHasMaximumFlag, 28, 28,
- SECTION(Data, 10), // data section
- ENTRY_COUNT(1), // one entry
- LINEAR_MEMORY_INDEX_0, // mem index
- WASM_INIT_EXPR_I32V_1(0), // offset
- U32V_5(0xFFFFFFFF) // size
+ SECTION(Memory, // memory section
+ ENTRY_COUNT(1), kHasMaximumFlag, 28, 28),
+ SECTION(Data, // data section
+ ENTRY_COUNT(1), // one entry
+ LINEAR_MEMORY_INDEX_0, // mem index
+ WASM_INIT_EXPR_I32V_1(0), // offset
+ U32V_5(0xFFFFFFFF)) // size
};
EXPECT_FAILURE(data);
@@ -882,28 +858,28 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kLocalAnyFunc, 0, 1};
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
- EXPECT_EQ(1u, result.val->signatures.size());
- EXPECT_EQ(1u, result.val->functions.size());
- EXPECT_EQ(1u, result.val->tables.size());
- EXPECT_EQ(1u, result.val->tables[0].initial_size);
+ EXPECT_EQ(1u, result.value()->signatures.size());
+ EXPECT_EQ(1u, result.value()->functions.size());
+ EXPECT_EQ(1u, result.value()->tables.size());
+ EXPECT_EQ(1u, result.value()->tables[0].initial_size);
}
}
TEST_F(WasmModuleVerifyTest, ElementSectionWithInternalTable) {
static const byte data[] = {
// table ---------------------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kLocalAnyFunc, 0, 1,
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
// elements ------------------------------------------------------------
- SECTION(Element, 1),
- 0 // entry count
- };
+ SECTION(Element, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
@@ -911,19 +887,15 @@ TEST_F(WasmModuleVerifyTest, ElementSectionWithInternalTable) {
TEST_F(WasmModuleVerifyTest, ElementSectionWithImportedTable) {
static const byte data[] = {
// imports -------------------------------------------------------------
- SECTION(Import, 9), ENTRY_COUNT(1),
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 't', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 1, // initial size
+ SECTION(Import, ENTRY_COUNT(1),
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('t'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 1), // initial size
// elements ------------------------------------------------------------
- SECTION(Element, 1),
- 0 // entry count
- };
+ SECTION(Element, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
@@ -932,11 +904,11 @@ TEST_F(WasmModuleVerifyTest, ElementSectionWithoutTable) {
// Test that an element section without a table causes a validation error.
static const byte data[] = {
// elements ------------------------------------------------------------
- SECTION(Element, 4),
- 1, // entry count
- 0, // table index
- 0, // offset
- 0 // number of elements
+ SECTION(Element,
+ ENTRY_COUNT(1), // entry count
+ 0, // table index
+ 0, // offset
+ 0) // number of elements
};
EXPECT_FAILURE(data);
@@ -948,15 +920,15 @@ TEST_F(WasmModuleVerifyTest, Regression_735887) {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kLocalAnyFunc, 0, 1,
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
// elements ------------------------------------------------------------
- SECTION(Element, 7),
- 1, // entry count
- TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
- 1, // elements count
- 0x9A // invalid I32V as function index
+ SECTION(Element,
+ ENTRY_COUNT(1), // entry count
+ TABLE_INDEX0, WASM_INIT_EXPR_I32V_1(0),
+ 1, // elements count
+ 0x9A) // invalid I32V as function index
};
EXPECT_FAILURE(data);
@@ -967,88 +939,82 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kLocalAnyFunc, 0, 1,
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
// elements ------------------------------------------------------------
- SECTION(Element, 7),
- 1, // entry count
- TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
- 1, // elements count
- FUNC_INDEX(0)};
+ SECTION(Element,
+ ENTRY_COUNT(1), // entry count
+ TABLE_INDEX0, WASM_INIT_EXPR_I32V_1(0),
+ 1, // elements count
+ FUNC_INDEX(0)),
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- if (result.ok()) {
- EXPECT_EQ(1u, result.val->signatures.size());
- EXPECT_EQ(1u, result.val->functions.size());
- EXPECT_EQ(1u, result.val->tables.size());
- EXPECT_EQ(1u, result.val->tables[0].initial_size);
- }
+ EXPECT_EQ(1u, result.value()->signatures.size());
+ EXPECT_EQ(1u, result.value()->functions.size());
+ EXPECT_EQ(1u, result.value()->tables.size());
+ EXPECT_EQ(1u, result.value()->tables[0].initial_size);
}
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_v_x),
- 2, // --
- SIG_ENTRY_v_v, // void -> void
- SIG_ENTRY_v_x(kLocalI32), // void -> i32
+ SECTION(Type,
+ ENTRY_COUNT(2), // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_v_x(kLocalI32)), // void -> i32
// funcs ------------------------------------------------------
- FOUR_EMPTY_FUNCTIONS,
+ FOUR_EMPTY_FUNCTIONS(SIG_INDEX(0)),
// table declaration -------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), kLocalAnyFunc, 0, 8,
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 8),
// table elements ----------------------------------------------
- SECTION(Element, 14),
- 1, // entry count
- TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
- 8, // elements count
- FUNC_INDEX(0), // --
- FUNC_INDEX(1), // --
- FUNC_INDEX(2), // --
- FUNC_INDEX(3), // --
- FUNC_INDEX(0), // --
- FUNC_INDEX(1), // --
- FUNC_INDEX(2), // --
- FUNC_INDEX(3), // --
+ SECTION(Element,
+ ENTRY_COUNT(1), // entry count
+ TABLE_INDEX0, WASM_INIT_EXPR_I32V_1(0),
+ ADD_COUNT(FUNC_INDEX(0), FUNC_INDEX(1), FUNC_INDEX(2),
+ FUNC_INDEX(3), FUNC_INDEX(0), FUNC_INDEX(1),
+ FUNC_INDEX(2), FUNC_INDEX(3))),
FOUR_EMPTY_BODIES};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- if (result.ok()) {
- EXPECT_EQ(2u, result.val->signatures.size());
- EXPECT_EQ(4u, result.val->functions.size());
- EXPECT_EQ(1u, result.val->tables.size());
- EXPECT_EQ(8u, result.val->tables[0].initial_size);
- }
+ EXPECT_EQ(2u, result.value()->signatures.size());
+ EXPECT_EQ(4u, result.value()->functions.size());
+ EXPECT_EQ(1u, result.value()->tables.size());
+ EXPECT_EQ(8u, result.value()->tables[0].initial_size);
}
TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
// Test that if we have multiple tables, in the element section we can target
// and initialize all tables.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyFunc, 0, 5, // table 0
+ kLocalAnyFunc, 0, 9), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 14),
- 2, // entry count
- TABLE_INDEX(0), // element for table 0
- WASM_INIT_EXPR_I32V_1(0), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 1
- WASM_INIT_EXPR_I32V_1(7), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- };
+ SECTION(Element,
+ ENTRY_COUNT(2), // entry count
+ TABLE_INDEX0, // element for table 0
+ WASM_INIT_EXPR_I32V_1(0), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 1
+ WASM_INIT_EXPR_I32V_1(7), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0)), // entry 1
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
EXPECT_VERIFIES(data);
}
@@ -1057,55 +1023,53 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
// Test that if we have multiple tables, both imported and module-defined, in
// the element section we can target and initialize all tables.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// imports -------------------------------------------------------------
- SECTION(Import, 17), ENTRY_COUNT(2),
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 't', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 5, // initial size
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 's', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 10, // initial size
+ SECTION(Import, ENTRY_COUNT(2),
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('t'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 5, // initial size
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('s'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 10), // initial size
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyFunc, 0, 15, // table 0
+ kLocalAnyFunc, 0, 19), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 27),
- 4, // entry count
- TABLE_INDEX(0), // element for table 0
- WASM_INIT_EXPR_I32V_1(0), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 1
- WASM_INIT_EXPR_I32V_1(7), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- TABLE_INDEX(2), // element for table 2
- WASM_INIT_EXPR_I32V_1(12), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(3), // element for table 1
- WASM_INIT_EXPR_I32V_1(17), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- };
+ SECTION(Element,
+ 4, // entry count
+ TABLE_INDEX0, // element for table 0
+ WASM_INIT_EXPR_I32V_1(0), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 1
+ WASM_INIT_EXPR_I32V_1(7), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0), // entry 1
+ TABLE_INDEX(2), // element for table 2
+ WASM_INIT_EXPR_I32V_1(12), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(3), // element for table 1
+ WASM_INIT_EXPR_I32V_1(17), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0)), // entry 1
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
EXPECT_VERIFIES(data);
}
@@ -1114,32 +1078,34 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion
// can be arbitrary.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyFunc, 0, 5, // table 0
+ kLocalAnyFunc, 0, 9), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 20),
- 3, // entry count
- TABLE_INDEX(0), // element for table 1
- WASM_INIT_EXPR_I32V_1(0), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 0
- WASM_INIT_EXPR_I32V_1(7), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- TABLE_INDEX(0), // element for table 1
- WASM_INIT_EXPR_I32V_1(3), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- };
+ SECTION(Element,
+ ENTRY_COUNT(3), // entry count
+ TABLE_INDEX0, // element for table 1
+ WASM_INIT_EXPR_I32V_1(0), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 0
+ WASM_INIT_EXPR_I32V_1(7), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0), // entry 1
+ TABLE_INDEX0, // element for table 1
+ WASM_INIT_EXPR_I32V_1(3), // index
+ 1, // elements count
+ FUNC_INDEX(0)), // function
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
EXPECT_VERIFIES(data);
}
@@ -1148,55 +1114,53 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion can
// be arbitrary. In this test, tables can be both imported and module-defined.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// imports -------------------------------------------------------------
- SECTION(Import, 17), ENTRY_COUNT(2),
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 't', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 5, // initial size
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 's', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 10, // initial size
+ SECTION(Import, ENTRY_COUNT(2),
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('t'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 5, // initial size
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('s'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 10), // initial size
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyFunc, 0, 15, // table 0
+ kLocalAnyFunc, 0, 19), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 27),
- 4, // entry count
- TABLE_INDEX(2), // element for table 0
- WASM_INIT_EXPR_I32V_1(10), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(3), // element for table 1
- WASM_INIT_EXPR_I32V_1(17), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- TABLE_INDEX(0), // element for table 2
- WASM_INIT_EXPR_I32V_1(2), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 1
- WASM_INIT_EXPR_I32V_1(7), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
- };
+ SECTION(Element,
+ 4, // entry count
+ TABLE_INDEX(2), // element for table 0
+ WASM_INIT_EXPR_I32V_1(10), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(3), // element for table 1
+ WASM_INIT_EXPR_I32V_1(17), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0), // entry 1
+ TABLE_INDEX0, // element for table 2
+ WASM_INIT_EXPR_I32V_1(2), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 1
+ WASM_INIT_EXPR_I32V_1(7), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0)), // entry 1
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY};
EXPECT_VERIFIES(data);
}
@@ -1205,27 +1169,28 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefTable) {
// Test that tables of type 'AnyRef' cannot be initialized by the element
// section.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyRef, 0, 5, // table 0
- kLocalAnyFunc, 0, 9, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyRef, 0, 5, // table 0
+ kLocalAnyFunc, 0, 9), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 14),
- 2, // entry count
- TABLE_INDEX(0), // element for table 0
- WASM_INIT_EXPR_I32V_1(0), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 1
- WASM_INIT_EXPR_I32V_1(7), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
+ SECTION(Element,
+ ENTRY_COUNT(2), // entry count
+ TABLE_INDEX0, // element for table 0
+ WASM_INIT_EXPR_I32V_1(0), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 1
+ WASM_INIT_EXPR_I32V_1(7), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0)), // entry 1
};
EXPECT_FAILURE(data);
@@ -1235,45 +1200,42 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
// Test that imported tables of type AnyRef cannot be initialized in the
// elements section.
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// imports -------------------------------------------------------------
- SECTION(Import, 17), ENTRY_COUNT(2),
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 't', // table name
- kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
- 0, // no maximum field
- 5, // initial size
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 's', // table name
- kExternalTable, // import kind
- kLocalAnyRef, // elem_type
- 0, // no maximum field
- 10, // initial size
+ SECTION(Import, ENTRY_COUNT(2),
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('t'), // table name
+ kExternalTable, // import kind
+ kLocalAnyFunc, // elem_type
+ 0, // no maximum field
+ 5, // initial size
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('s'), // table name
+ kExternalTable, // import kind
+ kLocalAnyRef, // elem_type
+ 0, // no maximum field
+ 10), // initial size
// funcs ---------------------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, 7), ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19, // table 1
+ SECTION(Table, ENTRY_COUNT(2), // section header
+ kLocalAnyFunc, 0, 15, // table 0
+ kLocalAnyFunc, 0, 19), // table 1
// elements ------------------------------------------------------------
- SECTION(Element, 14),
- 4, // entry count
- TABLE_INDEX(0), // element for table 0
- WASM_INIT_EXPR_I32V_1(10), // index
- 1, // elements count
- FUNC_INDEX(0), // function
- TABLE_INDEX(1), // element for table 1
- WASM_INIT_EXPR_I32V_1(17), // index
- 2, // elements count
- FUNC_INDEX(0), // entry 0
- FUNC_INDEX(0), // entry 1
+ SECTION(Element,
+ ENTRY_COUNT(4), // entry count
+ TABLE_INDEX0, // element for table 0
+ WASM_INIT_EXPR_I32V_1(10), // index
+ 1, // elements count
+ FUNC_INDEX(0), // function
+ TABLE_INDEX(1), // element for table 1
+ WASM_INIT_EXPR_I32V_1(17), // index
+ 2, // elements count
+ FUNC_INDEX(0), // entry 0
+ FUNC_INDEX(0)), // entry 1
};
EXPECT_FAILURE(data);
@@ -1284,8 +1246,7 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
// sig#0 -------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// indirect table ----------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), 1, 0, 0,
- };
+ SECTION(Table, ENTRY_COUNT(1), 1, 0, 0)};
EXPECT_FAILURE(data);
}
@@ -1295,24 +1256,23 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
// sig#0 -------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// functions ---------------------------------------------------
- ONE_EMPTY_FUNCTION,
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// indirect table ----------------------------------------------
- SECTION(Table, 4), ENTRY_COUNT(1), 1, 1, 0,
- };
+ SECTION(Table, ENTRY_COUNT(1), 1, 1, 0)};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
static const byte data[] = {
- SECTION(Table, 7), // table section
- ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
- 0, // table 1: no maximum
- 10, // table 1: minimum size
- kLocalAnyFunc, // table 2: type
- 0, // table 2: no maximum
- 10, // table 2: minimum size
+ SECTION(Table, // table section
+ ENTRY_COUNT(2), // 2 tables
+ kLocalAnyFunc, // table 1: type
+ 0, // table 1: no maximum
+ 10, // table 1: minimum size
+ kLocalAnyFunc, // table 2: type
+ 0, // table 2: no maximum
+ 10), // table 2: minimum size
};
EXPECT_FAILURE(data);
}
@@ -1320,26 +1280,26 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
- SECTION(Table, 7), // table section
- ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
- 0, // table 1: no maximum
- 10, // table 1: minimum size
- kLocalAnyRef, // table 2: type
- 0, // table 2: no maximum
- 11, // table 2: minimum size
+ SECTION(Table, // table section
+ ENTRY_COUNT(2), // 2 tables
+ kLocalAnyFunc, // table 1: type
+ 0, // table 1: no maximum
+ 10, // table 1: minimum size
+ kLocalAnyRef, // table 2: type
+ 0, // table 2: no maximum
+ 11), // table 2: minimum size
};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.val->tables.size());
+ EXPECT_EQ(2u, result.value()->tables.size());
- EXPECT_EQ(10u, result.val->tables[0].initial_size);
- EXPECT_EQ(kWasmAnyFunc, result.val->tables[0].type);
+ EXPECT_EQ(10u, result.value()->tables[0].initial_size);
+ EXPECT_EQ(kWasmAnyFunc, result.value()->tables[0].type);
- EXPECT_EQ(11u, result.val->tables[1].initial_size);
- EXPECT_EQ(kWasmAnyRef, result.val->tables[1].type);
+ EXPECT_EQ(11u, result.value()->tables[1].initial_size);
+ EXPECT_EQ(kWasmAnyRef, result.value()->tables[1].type);
}
class WasmSignatureDecodeTest : public TestWithZone {
@@ -1490,8 +1450,9 @@ TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
WASM_FEATURE_SCOPE_VAL(anyref, false);
byte ref_types[] = {kLocalAnyFunc, kLocalAnyRef};
for (byte invalid_type : ref_types) {
- for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
+ for (size_t i = 0;; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
+ if (i >= arraysize(data)) break;
data[i] = invalid_type;
FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
@@ -1501,8 +1462,9 @@ TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte kInvalidType = 76;
- for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
+ for (size_t i = 0;; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
+ if (i >= arraysize(data)) break;
data[i] = kInvalidType;
FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
@@ -1529,15 +1491,12 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
class WasmFunctionVerifyTest : public TestWithIsolateAndZone {
public:
- WasmFeatures enabled_features_;
- WasmModule module;
- Vector<const byte> bytes;
-
FunctionResult DecodeWasmFunction(const ModuleWireBytes& wire_bytes,
const WasmModule* module,
const byte* function_start,
const byte* function_end) {
- return DecodeWasmFunctionForTesting(enabled_features_, zone(), wire_bytes,
+ WasmFeatures enabled_features;
+ return DecodeWasmFunctionForTesting(enabled_features, zone(), wire_bytes,
module, function_start, function_end,
isolate()->counters());
}
@@ -1558,16 +1517,16 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
kExprEnd // body
};
- FunctionResult result =
- DecodeWasmFunction(bytes, &module, data, data + sizeof(data));
+ WasmModule module;
+ FunctionResult result = DecodeWasmFunction(ModuleWireBytes({}), &module, data,
+ data + sizeof(data));
EXPECT_OK(result);
- if (result.val && result.ok()) {
- WasmFunction* function = result.val.get();
+ if (result.value() && result.ok()) {
+ WasmFunction* function = result.value().get();
EXPECT_EQ(0u, function->sig->parameter_count());
EXPECT_EQ(0u, function->sig->return_count());
- EXPECT_EQ(static_cast<uint32_t>(SIZEOF_SIG_ENTRY_v_v),
- function->code.offset());
+ EXPECT_EQ(COUNT_ARGS(SIG_ENTRY_v_v), function->code.offset());
EXPECT_EQ(sizeof(data), function->code.end_offset());
// TODO(titzer): verify encoding of local declarations
}
@@ -1658,102 +1617,94 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
static const byte data[] = {
UNKNOWN_SECTION(1),
0, // one byte section
- SECTION(Global, 6),
- 1,
- kLocalI32, // memory type
- 0, // exported
- WASM_INIT_EXPR_I32V_1(33), // init
+ SECTION(Global, ENTRY_COUNT(1),
+ kLocalI32, // memory type
+ 0, // exported
+ WASM_INIT_EXPR_I32V_1(33)), // init
};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->globals.size());
- EXPECT_EQ(0u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->data_segments.size());
+ EXPECT_EQ(1u, result.value()->globals.size());
+ EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.value()->globals.back();
EXPECT_EQ(kWasmI32, global->type);
EXPECT_EQ(0u, global->offset);
}
TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
- static const byte data[] = {SECTION(Type, 1), 0, SECTION(Import, 1), 0};
+ static const byte data[] = {SECTION(Type, ENTRY_COUNT(0)),
+ SECTION(Import, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
- static const byte data[] = {SECTION(Import, 1), 0};
+ static const byte data[] = {SECTION(Import, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_mutable_global) {
- WASM_FEATURE_SCOPE(mut_global);
{
static const byte data[] = {
- SECTION(Import, 8), // section header
- 1, // number of imports
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'f', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 0, // mutability
+ SECTION(Import, // section header
+ ENTRY_COUNT(1), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0), // mutability
};
EXPECT_VERIFIES(data);
}
{
static const byte data[] = {
- SECTION(Import, 8), // section header
- 1, // sig table
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'f', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 1, // mutability
+ SECTION(Import, // section header
+ ENTRY_COUNT(1), // sig table
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 1), // mutability
};
EXPECT_VERIFIES(data);
}
}
TEST_F(WasmModuleVerifyTest, ImportTable_mutability_malformed) {
- WASM_FEATURE_SCOPE(mut_global);
- static const byte data[] = {
- SECTION(Import, 8),
- 1, // --
- NAME_LENGTH(1), // --
- 'm', // module name
- NAME_LENGTH(1), // --
- 'g', // global name
- kExternalGlobal, // import kind
- kLocalI32, // type
- 2, // invalid mutability
+ static const byte data[] = {
+ SECTION(Import,
+ ENTRY_COUNT(1), // --
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('g'), // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 2), // invalid mutability
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs2) {
static const byte data[] = {
- SECTION(Import, 6), 1, // sig table
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
- kExternalFunction, // import kind
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Import, ENTRY_COUNT(1), // sig table
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // function name
+ kExternalFunction, // import kind
+ SIG_INDEX(0)), // sig index
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
static const byte data[] = {
- SECTION(Type, 1), 0, // --
- SECTION(Import, 6), 1, // --
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
- kExternalFunction, // import kind
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Type, ENTRY_COUNT(0)), // --
+ SECTION(Import, ENTRY_COUNT(1), // --
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // function name
+ kExternalFunction, // import kind
+ SIG_INDEX(0)), // sig index
};
EXPECT_FAILURE(data);
}
@@ -1762,14 +1713,12 @@ TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID,
- SECTION(Import, 7),
- 1, // --
- NAME_LENGTH(1),
- 'm', // module name
- NAME_LENGTH(1),
- 'f', // function name
- kExternalFunction, // import kind
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Import,
+ ENTRY_COUNT(1), // --
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // function name
+ kExternalFunction, // import kind
+ SIG_INDEX(0)), // sig index
};
EXPECT_VERIFIES(data);
}
@@ -1778,13 +1727,13 @@ TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID, // --
- SECTION(Import, 7), // --
- 1, // --
- NO_NAME, // module name
- NAME_LENGTH(1), // --
- 'f', // function name
- kExternalFunction, // import kind
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Import, // --
+ ENTRY_COUNT(1), // --
+ NO_NAME, // module name
+ ADD_COUNT('f'), // function name
+ kExternalFunction, // import kind
+ SIG_INDEX(0), // sig index
+ 0), // auxiliary data
};
EXPECT_FAILURE(data);
}
@@ -1793,143 +1742,129 @@ TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID,
- SECTION(Import, 6),
- 1,
- NAME_LENGTH(1),
- 'm', // module name
- NAME_LENGTH(1),
- 'f', // function name
- kExternalFunction, // import kind
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Import, ENTRY_COUNT(1),
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // function name
+ kExternalFunction), // import kind
+ SIG_INDEX(0), // sig index (outside import section!)
};
- EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 3);
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
static const byte data[] = { // signatures
SIGNATURES_SECTION_VOID_VOID, // --
- ONE_EMPTY_FUNCTION, SECTION(Export, 1), // --
- 0, // --
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ SECTION(Export, ENTRY_COUNT(0)), // --
ONE_EMPTY_BODY};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->functions.size());
- EXPECT_EQ(0u, result.val->export_table.size());
+ EXPECT_EQ(1u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->export_table.size());
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
- static const byte data[] = {
- SECTION(Type, 1), 0, SECTION(Export, 1), 0 // --
- };
+ static const byte data[] = {SECTION(Type, ENTRY_COUNT(0)),
+ SECTION(Export, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions2) {
- static const byte data[] = {SECTION(Export, 1), 0};
+ static const byte data[] = {SECTION(Export, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ExportTableOne) {
- static const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID,
- ONE_EMPTY_FUNCTION,
- SECTION(Export, 4),
- 1, // exports
- NO_NAME, // --
- kExternalFunction, // --
- FUNC_INDEX(0), // --
- ONE_EMPTY_BODY};
+ static const byte data[] = {
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(1), // exports
+ NO_NAME, // --
+ kExternalFunction, // --
+ FUNC_INDEX(0)), // --
+ ONE_EMPTY_BODY};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->functions.size());
- EXPECT_EQ(1u, result.val->export_table.size());
+ EXPECT_EQ(1u, result.value()->functions.size());
+ EXPECT_EQ(1u, result.value()->export_table.size());
}
TEST_F(WasmModuleVerifyTest, ExportNameWithInvalidStringLength) {
- static const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID,
- ONE_EMPTY_FUNCTION,
- SECTION(Export, 12),
- 1, // exports
- NAME_LENGTH(84), // invalid string length
- 'e', // --
- kExternalFunction, // --
- FUNC_INDEX(0)};
+ static const byte data[] = {
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(1), // exports
+ U32V_1(84), // invalid string length
+ 'e', // --
+ kExternalFunction, // --
+ FUNC_INDEX(0), // --
+ 0, 0, 0) // auxiliary data
+ };
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
- static const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID,
- ONE_EMPTY_FUNCTION,
- SECTION(Export, 14),
- 2, // exports
- NAME_LENGTH(4),
- 'n',
- 'a',
- 'm',
- 'e', // --
- kExternalFunction,
- FUNC_INDEX(0), // --
- NAME_LENGTH(3),
- 'n',
- 'o',
- 'm', // --
- kExternalFunction, // --
- FUNC_INDEX(0), // --
- ONE_EMPTY_BODY};
+ static const byte data[] = {
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(2), // exports
+ ADD_COUNT('n', 'a', 'm', 'e'), // --
+ kExternalFunction, // --
+ FUNC_INDEX(0), // --
+ ADD_COUNT('n', 'o', 'm'), // --
+ kExternalFunction, // --
+ FUNC_INDEX(0)), // --
+ ONE_EMPTY_BODY};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.val->functions.size());
- EXPECT_EQ(2u, result.val->export_table.size());
+ EXPECT_EQ(1u, result.value()->functions.size());
+ EXPECT_EQ(2u, result.value()->export_table.size());
}
TEST_F(WasmModuleVerifyTest, ExportTableThree) {
- static const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID,
- THREE_EMPTY_FUNCTIONS,
- SECTION(Export, 13),
- 3, // exports
- NAME_LENGTH(1),
- 'a', // --
- kExternalFunction,
- FUNC_INDEX(0), // --
- NAME_LENGTH(1),
- 'b', // --
- kExternalFunction,
- FUNC_INDEX(1), // --
- NAME_LENGTH(1),
- 'c', // --
- kExternalFunction,
- FUNC_INDEX(2), // --
- THREE_EMPTY_BODIES};
+ static const byte data[] = {
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID, THREE_EMPTY_FUNCTIONS(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(3), // exports
+ ADD_COUNT('a'), // --
+ kExternalFunction,
+ FUNC_INDEX(0), // --
+ ADD_COUNT('b'), // --
+ kExternalFunction,
+ FUNC_INDEX(1), // --
+ ADD_COUNT('c'), // --
+ kExternalFunction,
+ FUNC_INDEX(2)), // --
+ THREE_EMPTY_BODIES};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(3u, result.val->functions.size());
- EXPECT_EQ(3u, result.val->export_table.size());
+ EXPECT_EQ(3u, result.value()->functions.size());
+ EXPECT_EQ(3u, result.value()->export_table.size());
}
TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
for (int i = 0; i < 6; i++) {
- const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID,
- THREE_EMPTY_FUNCTIONS,
- SECTION(Export, 6),
- 1, // exports
- NAME_LENGTH(2),
- 'e',
- 'x', // --
- kExternalFunction,
- FUNC_INDEX(i), // --
- THREE_EMPTY_BODIES};
+ const byte data[] = {
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID, THREE_EMPTY_FUNCTIONS(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(1), // exports
+ ADD_COUNT('e', 'x'), // --
+ kExternalFunction,
+ FUNC_INDEX(i)), // --
+ THREE_EMPTY_BODIES};
if (i < 3) {
EXPECT_VERIFIES(data);
@@ -1942,43 +1877,23 @@ TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
static const byte data[] = {
// signatures
- SIGNATURES_SECTION_VOID_VOID,
- ONE_EMPTY_FUNCTION,
- SECTION(Export, 1 + 6),
- 1, // exports
- NO_NAME, // --
- kExternalFunction,
- FUNC_INDEX(0), // --
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ SECTION(Export,
+ ENTRY_COUNT(1), // exports
+ NO_NAME, // --
+ kExternalFunction,
+ FUNC_INDEX(0), // --
+ 0, 0, 0) // auxiliary data
};
- for (size_t length = 33; length < sizeof(data); length++) {
- ModuleResult result = DecodeModule(data, data + length);
- EXPECT_FALSE(result.ok());
- }
-}
-
-TEST_F(WasmModuleVerifyTest, FunctionSignatures_empty) {
- static const byte data[] = {
- SECTION(Type, 1), 0, // --
- SECTION(Function, 1), 0 // --
- }; // --
- EXPECT_VERIFIES(data);
-}
-
-TEST_F(WasmModuleVerifyTest, FunctionSignatures_one) {
- static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0) // --
- };
- EXPECT_VERIFIES(data);
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 3);
}
TEST_F(WasmModuleVerifyTest, Regression_648070) {
static const byte data[] = {
- SECTION(Type, 1), 0, // --
- SECTION(Function, 5), // --
- U32V_5(3500228624) // function count = 3500228624
- }; // --
+ SECTION(Type, ENTRY_COUNT(0)), // --
+ SECTION(Function, U32V_5(3500228624)) // function count = 3500228624
+ }; // --
EXPECT_FAILURE(data);
}
@@ -1987,10 +1902,10 @@ TEST_F(WasmModuleVerifyTest, Regression_738097) {
static const byte data[] = {
SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(Code, 1 + 5 + 1), // --
- 1, // --
- U32V_5(0xFFFFFFFF), // function size,
- 0 // No real body
+ SECTION(Code, // --
+ ENTRY_COUNT(1), // --
+ U32V_5(0xFFFFFFFF), // function size,
+ 0) // No real body
};
EXPECT_FAILURE(data);
}
@@ -2031,40 +1946,36 @@ TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(Code, 1 + SIZEOF_EMPTY_BODY), 1, EMPTY_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ ONE_EMPTY_BODY // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_nop) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(Code, 1 + SIZEOF_NOP_BODY), 1, NOP_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(Code, ENTRY_COUNT(1), NOP_BODY) // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch1) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(Code, 1 + SIZEOF_EMPTY_BODY), 1, // --
- EMPTY_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ ONE_EMPTY_BODY // --
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch2) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(Code, 1 + 2 * SIZEOF_NOP_BODY), // --
- ENTRY_COUNT(2), // --
- NOP_BODY, // --
- NOP_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(Code, ENTRY_COUNT(2), NOP_BODY, NOP_BODY) // --
};
EXPECT_FAILURE(data);
}
@@ -2077,54 +1988,71 @@ TEST_F(WasmModuleVerifyTest, Names_empty) {
}
TEST_F(WasmModuleVerifyTest, Names_one_empty) {
+ // TODO(wasm): This test does not test anything (corrupt name section does not
+ // fail validation).
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(Code, 1 + SIZEOF_EMPTY_BODY),
- ENTRY_COUNT(1),
- EMPTY_BODY, // --
- SECTION_NAMES(1 + 5),
- ENTRY_COUNT(1),
- FOO_STRING,
- NO_LOCAL_NAMES // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ ONE_EMPTY_BODY, // --
+ SECTION_NAMES(ENTRY_COUNT(1), FOO_STRING, NO_LOCAL_NAMES) // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, Names_two_empty) {
+ // TODO(wasm): This test does not test anything (corrupt name section does not
+ // fail validation).
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(Code, 1 + 2 * SIZEOF_EMPTY_BODY), // --
- ENTRY_COUNT(2),
- EMPTY_BODY,
- EMPTY_BODY, // --
- SECTION_NAMES(1 + 10),
- ENTRY_COUNT(2), // --
- FOO_STRING,
- NO_LOCAL_NAMES, // --
- FOO_STRING,
- NO_LOCAL_NAMES, // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ TWO_EMPTY_BODIES, // --
+ SECTION_NAMES(ENTRY_COUNT(2), // --
+ FOO_STRING, NO_LOCAL_NAMES, // --
+ FOO_STRING, NO_LOCAL_NAMES), // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, Regression684855) {
static const byte data[] = {
- SECTION_NAMES(12),
- 0xFB, // functions count
- 0x27, // |
- 0x00, // function name length
- 0xFF, // local names count
- 0xFF, // |
- 0xFF, // |
- 0xFF, // |
- 0xFF, // |
- 0xFF, // error: "varint too large"
- 0xFF, // |
- 0x00, // --
- 0x00 // --
+ SECTION_NAMES(0xFB, // functions count
+ 0x27, // |
+ 0x00, // function name length
+ 0xFF, // local names count
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // |
+ 0xFF, // error: "varint too large"
+ 0xFF, // |
+ 0x00, // --
+ 0x00) // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionSectionWithoutCodeSection) {
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // Type section.
+ FUNCTION_SIGNATURES_SECTION(1, 0), // Function section.
};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "function count is 1, but code section is absent");
+}
+
+TEST_F(WasmModuleVerifyTest, CodeSectionWithoutFunctionSection) {
+ static const byte data[] = {ONE_EMPTY_BODY};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "function body count 1 mismatch (0 expected)");
+}
+
+TEST_F(WasmModuleVerifyTest, EmptyFunctionSectionWithoutCodeSection) {
+ static const byte data[] = {SECTION(Function, ENTRY_COUNT(0))};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, EmptyCodeSectionWithoutFunctionSection) {
+ static const byte data[] = {SECTION(Code, ENTRY_COUNT(0))};
EXPECT_VERIFIES(data);
}
@@ -2199,15 +2127,15 @@ TEST_F(WasmInitExprDecodeTest, InitExpr_illegal) {
TEST_F(WasmModuleVerifyTest, Multiple_Named_Sections) {
static const byte data[] = {
- SECTION(Unknown, 4), 1, 'X', 17, 18, // --
- SECTION(Unknown, 9), 3, 'f', 'o', 'o', 5, 6, 7, 8, 9, // --
- SECTION(Unknown, 8), 5, 'o', 't', 'h', 'e', 'r', 7, 8, // --
+ SECTION(Unknown, ADD_COUNT('X'), 17, 18), // --
+ SECTION(Unknown, ADD_COUNT('f', 'o', 'o'), 5, 6, 7, 8, 9), // --
+ SECTION(Unknown, ADD_COUNT('o', 't', 'h', 'e', 'r'), 7, 8), // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, Section_Name_No_UTF8) {
- static const byte data[] = {SECTION(Unknown, 4), 1, 0xFF, 17, 18};
+ static const byte data[] = {SECTION(Unknown, 1, 0xFF, 17, 18)};
EXPECT_FAILURE(data);
}
@@ -2237,11 +2165,11 @@ class WasmModuleCustomSectionTest : public TestWithIsolateAndZone {
TEST_F(WasmModuleCustomSectionTest, ThreeUnknownSections) {
static constexpr byte data[] = {
- U32_LE(kWasmMagic), // --
- U32_LE(kWasmVersion), // --
- SECTION(Unknown, 4), 1, 'X', 17, 18, // --
- SECTION(Unknown, 9), 3, 'f', 'o', 'o', 5, 6, 7, 8, 9, // --
- SECTION(Unknown, 8), 5, 'o', 't', 'h', 'e', 'r', 7, 8, // --
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ SECTION(Unknown, 1, 'X', 17, 18), // --
+ SECTION(Unknown, 3, 'f', 'o', 'o', 5, 6, 7, 8, 9), // --
+ SECTION(Unknown, 5, 'o', 't', 'h', 'e', 'r', 7, 8), // --
};
static const CustomSectionOffset expected[] = {
@@ -2256,24 +2184,12 @@ TEST_F(WasmModuleCustomSectionTest, ThreeUnknownSections) {
TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
static const byte data[] = {
- U32_LE(kWasmMagic), // --
- U32_LE(kWasmVersion), // --
- SIGNATURES_SECTION(2, SIG_ENTRY_v_v, SIG_ENTRY_v_v), // --
- SECTION(Unknown, 4),
- 1,
- 'X',
- 17,
- 18, // --
- ONE_EMPTY_FUNCTION,
- SECTION(Unknown, 8),
- 5,
- 'o',
- 't',
- 'h',
- 'e',
- 'r',
- 7,
- 8, // --
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ SIGNATURES_SECTION(2, SIG_ENTRY_v_v, SIG_ENTRY_v_v), // --
+ SECTION(Unknown, ADD_COUNT('X'), 17, 18), // --
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)), // --
+ SECTION(Unknown, ADD_COUNT('o', 't', 'h', 'e', 'r'), 7, 8), // --
};
static const CustomSectionOffset expected[] = {
@@ -2285,79 +2201,162 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
CheckSections(data, data + sizeof(data), expected, arraysize(expected));
}
-#define SRC_MAP \
- 16, 's', 'o', 'u', 'r', 'c', 'e', 'M', 'a', 'p', 'p', 'i', 'n', 'g', 'U', \
- 'R', 'L'
TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
-#define SRC 's', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c'
- static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, SRC};
+ static const byte data[] = {
+ SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c')};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(9u, result.val->source_map_url.size());
- const char src[] = {SRC};
- EXPECT_EQ(
- 0,
- strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
- src, 9));
-#undef SRC
+ EXPECT_EQ("src/xyz.c", result.value()->source_map_url);
}
TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
-#define BAD_SRC 's', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c'
- static const byte data[] = {SECTION(Unknown, 27), SRC_MAP, 9, BAD_SRC};
+ static const byte data[] = {
+ SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(0u, result.val->source_map_url.size());
-#undef BAD_SRC
+ EXPECT_EQ(0u, result.value()->source_map_url.size());
}
TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
-#define SRC 'a', 'b', 'c'
- static const byte data[] = {SECTION(Unknown, 21),
- SRC_MAP,
- 3,
- SRC,
- SECTION(Unknown, 21),
- SRC_MAP,
- 3,
- 'p',
- 'q',
- 'r'};
+ static const byte data[] = {SECTION_SRC_MAP('a', 'b', 'c'),
+ SECTION_SRC_MAP('p', 'q', 'r')};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(3u, result.val->source_map_url.size());
- const char src[] = {SRC};
- EXPECT_EQ(
- 0,
- strncmp(reinterpret_cast<const char*>(result.val->source_map_url.data()),
- src, 3));
-#undef SRC
+ EXPECT_EQ("abc", result.value()->source_map_url);
}
-#undef SRC_MAP
TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
-#define NAME_SECTION 4, 'n', 'a', 'm', 'e'
- static const byte data[] = {SECTION(Unknown, 11),
- NAME_SECTION,
- 0,
- 4,
- 3,
- 'a',
- 'b',
- 'c',
- SECTION(Unknown, 12),
- NAME_SECTION,
- 0,
- 5,
- 4,
- 'p',
- 'q',
- 'r',
- 's'};
+ static const byte data[] = {
+ SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('a', 'b', 'c'))),
+ SECTION_NAMES(0, ADD_COUNT(ADD_COUNT('p', 'q', 'r', 's')))};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(3u, result.val->name.length());
-#undef NAME_SECTION
+ EXPECT_EQ(3u, result.value()->name.length());
+}
+
+TEST_F(WasmModuleVerifyTest, PassiveDataSegment) {
+ static const byte data[] = {
+ // memory declaration ----------------------------------------------------
+ SECTION(Memory, ENTRY_COUNT(1), 0, 1),
+ // data segments --------------------------------------------------------
+ SECTION(Data, ENTRY_COUNT(1), PASSIVE, ADD_COUNT('h', 'i')),
+ };
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
+}
+
+TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // table declaration -----------------------------------------------------
+ SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ // element segments -----------------------------------------------------
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE,
+ ADD_COUNT(FUNC_INDEX(0), FUNC_INDEX(0))),
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+ EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
+ static const byte data[] = {SECTION(Element, ENTRY_COUNT(0)),
+ SECTION(DataCount, ENTRY_COUNT(0)),
+ SECTION(Code, ENTRY_COUNT(0))};
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
+ static const byte data[] = {SECTION(Code, ENTRY_COUNT(0)),
+ SECTION(DataCount, ENTRY_COUNT(0))};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result,
+ "The DataCount section must appear before the Code section");
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSectionBeforeElement) {
+ static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
+ SECTION(Element, ENTRY_COUNT(0))};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "unexpected section: Element");
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
+ STATIC_ASSERT(kStartSectionCode + 1 == kElementSectionCode);
+ static const byte data[] = {
+ // We need the start section for this test, but the start section must
+ // reference a valid function, which requires the type and function
+ // sections too.
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // Type section.
+ FUNCTION_SIGNATURES_SECTION(1, 0), // Function section.
+
+ SECTION(Start, U32V_1(0)), // Start section.
+ SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
+ SECTION(Element, ENTRY_COUNT(0)) // Element section.
+ };
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "unexpected section: Element");
+}
+
+TEST_F(WasmModuleVerifyTest, MultipleDataCountSections) {
+ static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
+ SECTION(DataCount, ENTRY_COUNT(0))};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "Multiple DataCount sections not allowed");
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSegmentCountMatch) {
+ static const byte data[] = {
+ SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
+ SECTION(DataCount, ENTRY_COUNT(1)), // DataCount section.
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
+ WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('h', 'i'))};
+
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_greater) {
+ static const byte data[] = {
+ SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
+ SECTION(DataCount, ENTRY_COUNT(3)), // DataCount section.
+ SECTION(Data, ENTRY_COUNT(0))}; // Data section.
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "data segments count 0 mismatch (3 expected)");
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_less) {
+ static const byte data[] = {
+ SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
+ SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
+ SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
+ WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('a', 'b', 'c'))};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "data segments count 1 mismatch (0 expected)");
+}
+
+TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
+ static const byte data[] = {SECTION(Memory, ENTRY_COUNT(1), 0, 1),
+ SECTION(DataCount, ENTRY_COUNT(1))};
+ WASM_FEATURE_SCOPE(bulk_memory);
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
#undef WASM_FEATURE_SCOPE
@@ -2374,13 +2373,13 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
#undef WASM_INIT_EXPR_F64
#undef WASM_INIT_EXPR_ANYREF
#undef WASM_INIT_EXPR_GLOBAL
-#undef SIZEOF_EMPTY_FUNCTION
#undef EMPTY_BODY
-#undef SIZEOF_EMPTY_BODY
#undef NOP_BODY
-#undef SIZEOF_NOP_BODY
#undef SIG_ENTRY_i_i
#undef UNKNOWN_SECTION
+#undef COUNT_ARGS
+#undef CHECK_LEB1
+#undef ADD_COUNT
#undef SECTION
#undef SIGNATURES_SECTION
#undef FUNCTION_SIGNATURES_SECTION
@@ -2390,8 +2389,8 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
#undef EMPTY_FUNCTION_SIGNATURES_SECTION
#undef EMPTY_FUNCTION_BODIES_SECTION
#undef SECTION_NAMES
-#undef SECTION_EXCEPTIONS
#undef EMPTY_NAMES_SECTION
+#undef SECTION_SRC_MAP
#undef FAIL_IF_NO_EXPERIMENTAL_EH
#undef X1
#undef X2
@@ -2407,11 +2406,13 @@ TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
#undef FOUR_EMPTY_BODIES
#undef SIGNATURES_SECTION_VOID_VOID
#undef LINEAR_MEMORY_INDEX_0
+#undef EXCEPTION_ENTRY
#undef EXPECT_VERIFIES
#undef EXPECT_FAILURE_LEN
#undef EXPECT_FAILURE
#undef EXPECT_OFF_END_FAILURE
#undef EXPECT_OK
+#undef EXPECT_NOT_OK
} // namespace module_decoder_unittest
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 4159117c26..a5b89762ad 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -17,20 +17,32 @@ namespace v8 {
namespace internal {
namespace wasm {
+struct MockStreamingResult {
+ size_t num_sections = 0;
+ size_t num_functions = 0;
+ bool ok = true;
+ OwnedVector<uint8_t> received_bytes;
+
+ MockStreamingResult() = default;
+};
+
class MockStreamingProcessor : public StreamingProcessor {
public:
+ explicit MockStreamingProcessor(MockStreamingResult* result)
+ : result_(result) {}
+
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override {
// TODO(ahaas): Share code with the module-decoder.
Decoder decoder(bytes.begin(), bytes.end());
uint32_t magic_word = decoder.consume_u32("wasm magic");
if (decoder.failed() || magic_word != kWasmMagic) {
- ok_ = false;
+ result_->ok = false;
return false;
}
uint32_t magic_version = decoder.consume_u32("wasm version");
if (decoder.failed() || magic_version != kWasmVersion) {
- ok_ = false;
+ result_->ok = false;
return false;
}
return true;
@@ -38,19 +50,19 @@ class MockStreamingProcessor : public StreamingProcessor {
// Process all sections but the code section.
bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset) override {
- ++num_sections_;
+ ++result_->num_sections;
return true;
}
- bool ProcessCodeSectionHeader(size_t num_functions,
- uint32_t offset) override {
+ bool ProcessCodeSectionHeader(size_t num_functions, uint32_t offset,
+ std::shared_ptr<WireBytesStorage>) override {
return true;
}
// Process a function body.
bool ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t offset) override {
- ++num_functions_;
+ ++result_->num_functions;
return true;
}
@@ -58,26 +70,21 @@ class MockStreamingProcessor : public StreamingProcessor {
// Finish the processing of the stream.
void OnFinishedStream(OwnedVector<uint8_t> bytes) override {
- received_bytes_ = std::move(bytes);
+ result_->received_bytes = std::move(bytes);
}
// Report an error detected in the StreamingDecoder.
- void OnError(DecodeResult result) override { ok_ = false; }
+ void OnError(const WasmError&) override { result_->ok = false; }
void OnAbort() override {}
- size_t num_sections() const { return num_sections_; }
- size_t num_functions() const { return num_functions_; }
- bool ok() const { return ok_; }
- Vector<const uint8_t> received_bytes() const {
- return received_bytes_.as_vector();
- }
+ bool Deserialize(Vector<const uint8_t> module_bytes,
+ Vector<const uint8_t> wire_bytes) override {
+ return false;
+ };
private:
- size_t num_sections_ = 0;
- size_t num_functions_ = 0;
- bool ok_ = true;
- OwnedVector<uint8_t> received_bytes_;
+ MockStreamingResult* const result_;
};
class WasmStreamingDecoderTest : public ::testing::Test {
@@ -85,50 +92,49 @@ class WasmStreamingDecoderTest : public ::testing::Test {
void ExpectVerifies(Vector<const uint8_t> data, size_t expected_sections,
size_t expected_functions) {
for (int split = 0; split <= data.length(); ++split) {
- // Use a unique_ptr so that the StreamingDecoder can own the processor.
- std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
- MockStreamingProcessor* processor = p.get();
- StreamingDecoder stream(std::move(p));
+ MockStreamingResult result;
+ StreamingDecoder stream(
+ base::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
- EXPECT_TRUE(processor->ok());
- EXPECT_EQ(expected_sections, processor->num_sections());
- EXPECT_EQ(expected_functions, processor->num_functions());
- EXPECT_EQ(data, processor->received_bytes());
+ EXPECT_TRUE(result.ok);
+ EXPECT_EQ(expected_sections, result.num_sections);
+ EXPECT_EQ(expected_functions, result.num_functions);
+ EXPECT_EQ(data, result.received_bytes.as_vector());
}
}
void ExpectFailure(Vector<const uint8_t> data) {
for (int split = 0; split <= data.length(); ++split) {
- std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
- MockStreamingProcessor* processor = p.get();
- StreamingDecoder stream(std::move(p));
+ MockStreamingResult result;
+ StreamingDecoder stream(
+ base::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(data.SubVector(0, split));
stream.OnBytesReceived(data.SubVector(split, data.length()));
stream.Finish();
- EXPECT_FALSE(processor->ok());
+ EXPECT_FALSE(result.ok);
}
}
+
+ MockStreamingResult result;
};
TEST_F(WasmStreamingDecoderTest, EmptyStream) {
- std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
- MockStreamingProcessor* processor = p.get();
- StreamingDecoder stream(std::move(p));
+ MockStreamingResult result;
+ StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
stream.Finish();
- EXPECT_FALSE(processor->ok());
+ EXPECT_FALSE(result.ok);
}
TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
{
- std::unique_ptr<MockStreamingProcessor> p(new MockStreamingProcessor());
- MockStreamingProcessor* processor = p.get();
- StreamingDecoder stream(std::move(p));
+ MockStreamingResult result;
+ StreamingDecoder stream(base::make_unique<MockStreamingProcessor>(&result));
stream.OnBytesReceived(Vector<const uint8_t>(data, 1));
stream.Finish();
- EXPECT_FALSE(processor->ok());
+ EXPECT_FALSE(result.ok);
}
for (int length = 1; length < static_cast<int>(arraysize(data)); ++length) {
ExpectFailure(Vector<const uint8_t>(data, length));
diff --git a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
index 07e3ca888d..604d2adfb2 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/trap-handler/trap-handler.h"
#include "include/v8.h"
+#include "src/trap-handler/trap-handler.h"
#include "testing/gtest/include/gtest/gtest.h"
#if V8_OS_POSIX
@@ -58,7 +58,7 @@ TEST_F(SignalHandlerFallbackTest, DoTest) {
FAIL();
} else {
// Our signal handler ran.
- v8::internal::trap_handler::RestoreOriginalSignalHandler();
+ v8::internal::trap_handler::RemoveTrapHandler();
SUCCEED();
return;
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
new file mode 100644
index 0000000000..58302bad74
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
@@ -0,0 +1,93 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include "include/v8.h"
+#include "src/allocation.h"
+#include "src/base/page-allocator.h"
+#include "src/trap-handler/trap-handler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+#if V8_TRAP_HANDLER_SUPPORTED
+
+bool g_handler_got_executed = false;
+// The start address of the virtual memory we use to cause an exception.
+i::Address g_start_address;
+
+// When using V8::EnableWebAssemblyTrapHandler, we save the old one to fall back
+// on if V8 doesn't handle the exception. This allows tools like ASan to
+// register a handler early on during the process startup and still generate
+// stack traces on failures.
+class ExceptionHandlerFallbackTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ // Register this handler as the last handler.
+ registered_handler_ = AddVectoredExceptionHandler(/*first=*/0, TestHandler);
+ CHECK_NOT_NULL(registered_handler_);
+
+ v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ // We only need a single page.
+ size_t size = page_allocator->AllocatePageSize();
+ void* hint = page_allocator->GetRandomMmapAddr();
+ i::VirtualMemory mem(page_allocator, size, hint, size);
+ g_start_address = mem.address();
+ // Set the permissions of the memory to no-access.
+ CHECK(mem.SetPermissions(g_start_address, size,
+ v8::PageAllocator::kNoAccess));
+ mem_ = std::move(mem);
+ }
+
+ void WriteToTestMemory(int value) {
+ *reinterpret_cast<volatile int*>(g_start_address) = value;
+ }
+
+ int ReadFromTestMemory() {
+ return *reinterpret_cast<volatile int*>(g_start_address);
+ }
+
+ void TearDown() override {
+ // be a good citizen and remove the exception handler.
+ ULONG result = RemoveVectoredExceptionHandler(registered_handler_);
+ CHECK(result);
+ }
+
+ private:
+ static LONG WINAPI TestHandler(EXCEPTION_POINTERS* exception) {
+ g_handler_got_executed = true;
+ v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+ // Make the allocated memory accessible so that from now on memory accesses
+ // do not cause an exception anymore.
+ CHECK(i::SetPermissions(page_allocator, g_start_address,
+ page_allocator->AllocatePageSize(),
+ v8::PageAllocator::kReadWrite));
+ // The memory access should work now, we can continue execution.
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ i::VirtualMemory mem_;
+ void* registered_handler_;
+};
+
+TEST_F(ExceptionHandlerFallbackTest, DoTest) {
+ constexpr bool use_default_handler = true;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_handler));
+ // In the original test setup the test memory is protected against any kind of
+ // access. Therefore the access here causes an access violation exception,
+ // which should be caught by the exception handler we install above. In the
+ // exception handler we change the permission of the test memory to make it
+ // accessible, and then return from the exception handler to execute the
+ // memory access again. This time we expect the memory access to work.
+ constexpr int test_value = 42;
+ WriteToTestMemory(test_value);
+ CHECK_EQ(test_value, ReadFromTestMemory());
+ CHECK(g_handler_got_executed);
+ v8::internal::trap_handler::RemoveTrapHandler();
+}
+
+#endif
+
+} // namespace
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
new file mode 100644
index 0000000000..8c42b1735c
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -0,0 +1,478 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8config.h"
+
+#if V8_OS_LINUX
+#include <signal.h>
+#include <ucontext.h>
+#elif V8_OS_MACOSX
+#include <signal.h>
+#include <sys/ucontext.h>
+#elif V8_OS_WIN
+#include <windows.h>
+#endif
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_POSIX
+#include "include/v8-wasm-trap-handler-posix.h"
+#elif V8_OS_WIN
+#include "include/v8-wasm-trap-handler-win.h"
+#endif
+#include "src/allocation.h"
+#include "src/assembler-inl.h"
+#include "src/base/page-allocator.h"
+#include "src/macro-assembler-inl.h"
+#include "src/simulator.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/vector.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-memory.h"
+
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+constexpr Register scratch = r10;
+bool g_test_handler_executed = false;
+#if V8_OS_LINUX || V8_OS_MACOSX
+struct sigaction g_old_segv_action;
+struct sigaction g_old_fpe_action;
+struct sigaction g_old_bus_action; // We get SIGBUS on Mac sometimes.
+#elif V8_OS_WIN
+void* g_registered_handler = nullptr;
+#endif
+
+// The recovery address allows us to recover from an intentional crash.
+Address g_recovery_address;
+// Flag to indicate if the test handler should call the trap handler as a first
+// chance handler.
+bool g_use_as_first_chance_handler = false;
+} // namespace
+
+#define __ masm.
+
+enum TrapHandlerStyle : int {
+ // The test uses the default trap handler of V8.
+ kDefault = 0,
+ // The test installs the trap handler callback in its own test handler.
+ kCallback = 1
+};
+
+std::string PrintTrapHandlerTestParam(
+ ::testing::TestParamInfo<TrapHandlerStyle> info) {
+ switch (info.param) {
+ case kDefault:
+ return "DefaultTrapHandler";
+ case kCallback:
+ return "Callback";
+ }
+ UNREACHABLE();
+}
+
+class TrapHandlerTest : public TestWithIsolate,
+ public ::testing::WithParamInterface<TrapHandlerStyle> {
+ protected:
+ void SetUp() override {
+ void* base = nullptr;
+ size_t length = 0;
+ accessible_memory_start_ =
+ i_isolate()
+ ->wasm_engine()
+ ->memory_tracker()
+ ->TryAllocateBackingStoreForTesting(
+ i_isolate()->heap(), 1 * kWasmPageSize, &base, &length);
+ memory_buffer_ =
+ base::AddressRegion(reinterpret_cast<Address>(base), length);
+
+ // The allocated memory buffer ends with a guard page.
+ crash_address_ = memory_buffer_.end() - 32;
+ // Allocate a buffer for the generated code.
+ buffer_ = AllocateAssemblerBuffer(AssemblerBase::kMinimalBufferSize,
+ GetRandomMmapAddr());
+
+ InitRecoveryCode();
+
+#if V8_OS_LINUX || V8_OS_MACOSX
+ // Set up a signal handler to recover from the expected crash.
+ struct sigaction action;
+ action.sa_sigaction = SignalHandler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO;
+ // SIGSEGV happens for wasm oob memory accesses on Linux.
+ CHECK_EQ(0, sigaction(SIGSEGV, &action, &g_old_segv_action));
+ // SIGBUS happens for wasm oob memory accesses on macOS.
+ CHECK_EQ(0, sigaction(SIGBUS, &action, &g_old_bus_action));
+ // SIGFPE to simulate crashes which are not handled by the trap handler.
+ CHECK_EQ(0, sigaction(SIGFPE, &action, &g_old_fpe_action));
+#elif V8_OS_WIN
+ g_registered_handler =
+ AddVectoredExceptionHandler(/*first=*/0, TestHandler);
+#endif
+ }
+
+ void TearDown() override {
+ // We should always have left wasm code.
+ CHECK(!GetThreadInWasmFlag());
+ buffer_.reset();
+ recovery_buffer_.reset();
+
+ // Free the allocated backing store.
+ i_isolate()->wasm_engine()->memory_tracker()->FreeBackingStoreForTesting(
+ memory_buffer_, accessible_memory_start_);
+
+ // Clean up the trap handler
+ trap_handler::RemoveTrapHandler();
+ if (!g_test_handler_executed) {
+#if V8_OS_LINUX || V8_OS_MACOSX
+ // The test handler cleans up the signal handler setup in the test. If the
+ // test handler was not called, we have to do the cleanup ourselves.
+ CHECK_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
+ CHECK_EQ(0, sigaction(SIGFPE, &g_old_fpe_action, nullptr));
+ CHECK_EQ(0, sigaction(SIGBUS, &g_old_bus_action, nullptr));
+#elif V8_OS_WIN
+ RemoveVectoredExceptionHandler(g_registered_handler);
+ g_registered_handler = nullptr;
+#endif
+ }
+ }
+
+ void InitRecoveryCode() {
+ // Create a code snippet where we can jump to to recover from a signal or
+ // exception. The code snippet only consists of a return statement.
+ recovery_buffer_ = AllocateAssemblerBuffer(
+ AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr());
+
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ recovery_buffer_->CreateView());
+ int recovery_offset = __ pc_offset();
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+ recovery_buffer_->MakeExecutable();
+ g_recovery_address =
+ reinterpret_cast<Address>(desc.buffer + recovery_offset);
+ }
+
+#if V8_OS_LINUX || V8_OS_MACOSX
+ static void SignalHandler(int signal, siginfo_t* info, void* context) {
+ if (g_use_as_first_chance_handler) {
+ if (v8::TryHandleWebAssemblyTrapPosix(signal, info, context)) {
+ return;
+ }
+ }
+
+ // Reset the signal handler, to avoid that this signal handler is called
+ // repeatedly.
+ sigaction(SIGSEGV, &g_old_segv_action, nullptr);
+ sigaction(SIGFPE, &g_old_fpe_action, nullptr);
+ sigaction(SIGBUS, &g_old_bus_action, nullptr);
+
+ g_test_handler_executed = true;
+ // Set the $rip to the recovery code.
+ ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
+#if V8_OS_LINUX
+ uc->uc_mcontext.gregs[REG_RIP] = g_recovery_address;
+#else // V8_OS_MACOSX
+ uc->uc_mcontext->__ss.__rip = g_recovery_address;
+#endif
+ }
+#endif
+
+#if V8_OS_WIN
+ static LONG WINAPI TestHandler(EXCEPTION_POINTERS* exception) {
+ if (g_use_as_first_chance_handler) {
+ if (v8::TryHandleWebAssemblyTrapWindows(exception)) {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ }
+ RemoveVectoredExceptionHandler(g_registered_handler);
+ g_registered_handler = nullptr;
+ g_test_handler_executed = true;
+ exception->ContextRecord->Rip = g_recovery_address;
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+#endif
+
+ public:
+ void SetupTrapHandler(TrapHandlerStyle style) {
+ bool use_default_handler = style == kDefault;
+ g_use_as_first_chance_handler = !use_default_handler;
+ CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_handler));
+ }
+
+ void GenerateSetThreadInWasmFlagCode(MacroAssembler* masm) {
+ masm->Move(scratch,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
+ RelocInfo::NONE);
+ masm->movl(MemOperand(scratch, 0), Immediate(1));
+ }
+
+ void GenerateResetThreadInWasmFlagCode(MacroAssembler* masm) {
+ masm->Move(scratch,
+ i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
+ RelocInfo::NONE);
+ masm->movl(MemOperand(scratch, 0), Immediate(0));
+ }
+
+ bool GetThreadInWasmFlag() {
+ return *reinterpret_cast<int*>(
+ trap_handler::GetThreadInWasmThreadLocalAddress());
+ }
+
+ // Execute the code in buffer.
+ void ExecuteBuffer() {
+ buffer_->MakeExecutable();
+ GeneratedCode<void>::FromAddress(
+ i_isolate(), reinterpret_cast<Address>(buffer_->start()))
+ .Call();
+ CHECK(!g_test_handler_executed);
+ }
+
+ // Execute the code in buffer. We expect a crash which we recover from in the
+ // test handler.
+ void ExecuteExpectCrash(TestingAssemblerBuffer* buffer,
+ bool check_wasm_flag = true) {
+ CHECK(!g_test_handler_executed);
+ buffer->MakeExecutable();
+ GeneratedCode<void>::FromAddress(i_isolate(),
+ reinterpret_cast<Address>(buffer->start()))
+ .Call();
+ CHECK(g_test_handler_executed);
+ g_test_handler_executed = false;
+ if (check_wasm_flag) CHECK(!GetThreadInWasmFlag());
+ }
+
+ bool test_handler_executed() { return g_test_handler_executed; }
+
+ // Allocated memory which corresponds to wasm memory with guard regions.
+ base::AddressRegion memory_buffer_;
+ // Address within the guard region of the wasm memory. Accessing this memory
+ // address causes a signal or exception.
+ Address crash_address_;
+ // The start of the accessible region in the allocated memory. This pointer is
+ // needed to de-register the memory from the wasm memory tracker again.
+ void* accessible_memory_start_;
+
+ // Buffer for generated code.
+ std::unique_ptr<TestingAssemblerBuffer> buffer_;
+ // Buffer for the code for the landing pad of the test handler.
+ std::unique_ptr<TestingAssemblerBuffer> recovery_buffer_;
+};
+
+TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
+ // Test that the wasm trap handler can recover a memory access violation in
+ // wasm code (we fake the wasm code and the access violation).
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ GenerateSetThreadInWasmFlagCode(&masm);
+ __ Move(scratch, crash_address_, RelocInfo::NONE);
+ int crash_offset = __ pc_offset();
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ int recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ SetupTrapHandler(GetParam());
+ trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
+ recovery_offset};
+ trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ ExecuteBuffer();
+}
+
+TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
+ // Test that after we release handler data in the trap handler, it cannot
+ // recover from the specific memory access violation anymore.
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ GenerateSetThreadInWasmFlagCode(&masm);
+ __ Move(scratch, crash_address_, RelocInfo::NONE);
+ int crash_offset = __ pc_offset();
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ int recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
+ recovery_offset};
+ int handler_id = trap_handler::RegisterHandlerData(
+ reinterpret_cast<Address>(desc.buffer), desc.instr_size, 1,
+ &protected_instruction);
+
+ SetupTrapHandler(GetParam());
+
+ ExecuteBuffer();
+
+ // Deregister from the trap handler. The trap handler should not do the
+ // recovery now.
+ trap_handler::ReleaseHandlerData(handler_id);
+
+ ExecuteExpectCrash(buffer_.get());
+}
+
+TEST_P(TrapHandlerTest, TestNoThreadInWasmFlag) {
+ // That that if the thread_in_wasm flag is not set, the trap handler does not
+ // get active.
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ __ Move(scratch, crash_address_, RelocInfo::NONE);
+ int crash_offset = __ pc_offset();
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ int recovery_offset = __ pc_offset();
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
+ recovery_offset};
+ trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ SetupTrapHandler(GetParam());
+
+ ExecuteExpectCrash(buffer_.get());
+}
+
+TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
+ // Test that if the crash in wasm happened at an instruction which is not
+ // protected, then the trap handler does not handle it.
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ GenerateSetThreadInWasmFlagCode(&masm);
+ int no_crash_offset = __ pc_offset();
+ __ Move(scratch, crash_address_, RelocInfo::NONE);
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ // Offset where the crash is not happening.
+ int recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ trap_handler::ProtectedInstructionData protected_instruction{no_crash_offset,
+ recovery_offset};
+ trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ SetupTrapHandler(GetParam());
+
+ ExecuteExpectCrash(buffer_.get());
+}
+
+TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
+ // Test that if the crash reason is not a memory access violation, then the
+ // wasm trap handler does not handle it.
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ GenerateSetThreadInWasmFlagCode(&masm);
+ __ xorq(scratch, scratch);
+ int crash_offset = __ pc_offset();
+ __ divq(scratch);
+ // Offset where the crash is not happening.
+ int recovery_offset = __ pc_offset();
+ GenerateResetThreadInWasmFlagCode(&masm);
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
+ recovery_offset};
+ trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ SetupTrapHandler(GetParam());
+
+#if V8_OS_POSIX
+ // The V8 default trap handler does not register for SIGFPE, therefore the
+ // thread-in-wasm flag is never reset in this test. We therefore do not check
+ // the value of this flag.
+ bool check_wasm_flag = GetParam() != kDefault;
+#else
+ bool check_wasm_flag = true;
+#endif
+ ExecuteExpectCrash(buffer_.get(), check_wasm_flag);
+ if (!check_wasm_flag) {
+ // Reset the thread-in-wasm flag because it was probably not reset in the
+ // trap handler.
+ *trap_handler::GetThreadInWasmThreadLocalAddress() = 0;
+ }
+}
+
+class CodeRunner : public v8::base::Thread {
+ public:
+ CodeRunner(TrapHandlerTest* test, TestingAssemblerBuffer* buffer)
+ : Thread(Options("CodeRunner")), test_(test), buffer_(buffer) {}
+
+ void Run() override { test_->ExecuteExpectCrash(buffer_); }
+
+ private:
+ TrapHandlerTest* test_;
+ TestingAssemblerBuffer* buffer_;
+};
+
+TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
+ // Test setup:
+ // The current thread enters wasm land (sets the thread_in_wasm flag)
+ // A second thread crashes at a protected instruction without having the flag
+ // set.
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer_->CreateView());
+ __ Push(scratch);
+ __ Move(scratch, crash_address_, RelocInfo::NONE);
+ int crash_offset = __ pc_offset();
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ int recovery_offset = __ pc_offset();
+ __ Pop(scratch);
+ __ Ret();
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+
+ trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
+ recovery_offset};
+ trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
+ desc.instr_size, 1, &protected_instruction);
+
+ SetupTrapHandler(GetParam());
+
+ CodeRunner runner(this, buffer_.get());
+ CHECK(!GetThreadInWasmFlag());
+ // Set the thread-in-wasm flag manually in this thread.
+ *trap_handler::GetThreadInWasmThreadLocalAddress() = 1;
+ runner.Start();
+ runner.Join();
+ CHECK(GetThreadInWasmFlag());
+ // Reset the thread-in-wasm flag.
+ *trap_handler::GetThreadInWasmThreadLocalAddress() = 0;
+}
+
+INSTANTIATE_TEST_CASE_P(/* no prefix */, TrapHandlerTest,
+ ::testing::Values(kDefault, kCallback),
+ PrintTrapHandlerTestParam);
+
+#undef __
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index 5d695c8275..e90c97f3a1 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -8,6 +8,7 @@
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
namespace v8 {
@@ -160,15 +161,12 @@ class WasmCodeManagerTest : public TestWithContext,
using NativeModulePtr = std::unique_ptr<NativeModule>;
- NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
- ModuleStyle style) {
+ NativeModulePtr AllocModule(size_t size, ModuleStyle style) {
std::shared_ptr<WasmModule> module(new WasmModule);
module->num_declared_functions = kNumFunctions;
bool can_request_more = style == Growable;
- ModuleEnv env(module.get(), UseTrapHandler::kNoTrapHandler,
- RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
- return manager->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
- can_request_more, std::move(module), env);
+ return manager()->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
+ can_request_more, std::move(module));
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
@@ -178,15 +176,18 @@ class WasmCodeManagerTest : public TestWithContext,
desc.buffer = exec_buff.get();
desc.instr_size = static_cast<int>(size);
return native_module->AddCode(index, desc, 0, 0, 0, {}, OwnedVector<byte>(),
- WasmCode::kOther);
+ WasmCode::kFunction, WasmCode::kOther);
}
size_t page() const { return AllocatePageSize(); }
- WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
+ WasmCodeManager* manager() {
+ return i_isolate()->wasm_engine()->code_manager();
+ }
- private:
- WasmMemoryTracker memory_tracker_;
+ void SetMaxCommittedMemory(size_t limit) {
+ manager()->SetMaxCommittedMemoryForTesting(limit);
+ }
};
INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
@@ -194,32 +195,32 @@ INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
PrintWasmCodeManageTestParam);
TEST_P(WasmCodeManagerTest, EmptyCase) {
- WasmCodeManager manager(memory_tracker(), 0 * page());
- CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ SetMaxCommittedMemory(0 * page());
+ CHECK_EQ(0, manager()->remaining_uncommitted_code_space());
- ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
+ ASSERT_DEATH_IF_SUPPORTED(AllocModule(1 * page(), GetParam()),
"OOM in NativeModule::AllocateForCode commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
- WasmCodeManager manager(memory_tracker(), 1 * page());
- CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
- NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
+ SetMaxCommittedMemory(1 * page());
+ CHECK_EQ(1 * page(), manager()->remaining_uncommitted_code_space());
+ NativeModulePtr native_module = AllocModule(1 * page(), GetParam());
CHECK(native_module);
- CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ CHECK_EQ(0, manager()->remaining_uncommitted_code_space());
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ CHECK_EQ(0, manager()->remaining_uncommitted_code_space());
code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ CHECK_EQ(0, manager()->remaining_uncommitted_code_space());
code = AddCode(native_module.get(), index++,
page() - 4 * kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted_code_space());
+ CHECK_EQ(0, manager()->remaining_uncommitted_code_space());
// This fails in "reservation" if we cannot extend the code space, or in
// "commit" it we can (since we hit the allocation limit in the
@@ -230,9 +231,9 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
- WasmCodeManager manager(memory_tracker(), 3 * page());
- NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
- NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
+ SetMaxCommittedMemory(3 * page());
+ NativeModulePtr nm1 = AllocModule(2 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(2 * page(), GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
@@ -241,23 +242,9 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
"OOM in NativeModule::AllocateForCode commit");
}
-TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
- WasmCodeManager manager1(memory_tracker(), 1 * page());
- WasmCodeManager manager2(memory_tracker(), 2 * page());
- NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
- NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
- CHECK(nm1);
- CHECK(nm2);
- WasmCode* code = AddCode(nm1.get(), 0, 1 * page() - kJumpTableSize);
- CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager1.remaining_uncommitted_code_space());
- code = AddCode(nm2.get(), 0, 1 * page() - kJumpTableSize);
- CHECK_NOT_NULL(code);
-}
-
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
- WasmCodeManager manager(memory_tracker(), 3 * page());
- NativeModulePtr nm = AllocModule(&manager, 1 * page(), GetParam());
+ SetMaxCommittedMemory(3 * page());
+ NativeModulePtr nm = AllocModule(1 * page(), GetParam());
size_t module_size = GetParam() == Fixed ? kMaxWasmCodeMemory : 1 * page();
size_t remaining_space_in_module = module_size - kJumpTableSize;
if (GetParam() == Fixed) {
@@ -270,29 +257,29 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
// The module grows by one page. One page remains uncommitted.
CHECK_NOT_NULL(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment));
- CHECK_EQ(manager.remaining_uncommitted_code_space(), 1 * page());
+ CHECK_EQ(manager()->remaining_uncommitted_code_space(), 1 * page());
}
}
TEST_P(WasmCodeManagerTest, CommitIncrements) {
- WasmCodeManager manager(memory_tracker(), 10 * page());
- NativeModulePtr nm = AllocModule(&manager, 3 * page(), GetParam());
+ SetMaxCommittedMemory(10 * page());
+ NativeModulePtr nm = AllocModule(3 * page(), GetParam());
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted_code_space(), 9 * page());
+ CHECK_EQ(manager()->remaining_uncommitted_code_space(), 9 * page());
code = AddCode(nm.get(), 1, 2 * page());
CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
+ CHECK_EQ(manager()->remaining_uncommitted_code_space(), 7 * page());
code = AddCode(nm.get(), 2, page() - kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
+ CHECK_EQ(manager()->remaining_uncommitted_code_space(), 7 * page());
}
TEST_P(WasmCodeManagerTest, Lookup) {
- WasmCodeManager manager(memory_tracker(), 2 * page());
+ SetMaxCommittedMemory(2 * page());
- NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
- NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
+ NativeModulePtr nm1 = AllocModule(1 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(1 * page(), GetParam());
WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
CHECK_EQ(nm1.get(), code1_0->native_module());
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
@@ -307,63 +294,41 @@ TEST_P(WasmCodeManagerTest, Lookup) {
// we know the manager object is allocated here, so we shouldn't
// find any WasmCode* associated with that ptr.
- WasmCode* not_found = manager.LookupCode(reinterpret_cast<Address>(&manager));
+ WasmCode* not_found =
+ manager()->LookupCode(reinterpret_cast<Address>(manager()));
CHECK_NULL(not_found);
- WasmCode* found = manager.LookupCode(code1_0->instruction_start());
+ WasmCode* found = manager()->LookupCode(code1_0->instruction_start());
CHECK_EQ(found, code1_0);
- found = manager.LookupCode(code2_1->instruction_start() +
- (code2_1->instructions().size() / 2));
+ found = manager()->LookupCode(code2_1->instruction_start() +
+ (code2_1->instructions().size() / 2));
CHECK_EQ(found, code2_1);
- found = manager.LookupCode(code2_1->instruction_start() +
- code2_1->instructions().size() - 1);
+ found = manager()->LookupCode(code2_1->instruction_start() +
+ code2_1->instructions().size() - 1);
CHECK_EQ(found, code2_1);
- found = manager.LookupCode(code2_1->instruction_start() +
- code2_1->instructions().size());
+ found = manager()->LookupCode(code2_1->instruction_start() +
+ code2_1->instructions().size());
CHECK_NULL(found);
Address mid_code1_1 =
code1_1->instruction_start() + (code1_1->instructions().size() / 2);
- CHECK_EQ(code1_1, manager.LookupCode(mid_code1_1));
+ CHECK_EQ(code1_1, manager()->LookupCode(mid_code1_1));
nm1.reset();
- CHECK_NULL(manager.LookupCode(mid_code1_1));
-}
-
-TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
- WasmCodeManager manager1(memory_tracker(), 2 * page());
- WasmCodeManager manager2(memory_tracker(), 2 * page());
-
- NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
- NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
-
- WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
- CHECK_EQ(nm1.get(), code1_0->native_module());
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
- WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
- CHECK_EQ(nm2.get(), code2_1->native_module());
-
- CHECK_EQ(0, code1_0->index());
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(0, code2_0->index());
- CHECK_EQ(1, code2_1->index());
-
- CHECK_EQ(code1_0, manager1.LookupCode(code1_0->instruction_start()));
- CHECK_NULL(manager2.LookupCode(code1_0->instruction_start()));
+ CHECK_NULL(manager()->LookupCode(mid_code1_1));
}
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
- WasmCodeManager manager(memory_tracker(), 2 * page());
+ SetMaxCommittedMemory(2 * page());
- NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
+ NativeModulePtr nm1 = AllocModule(1 * page(), GetParam());
WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(0, code0->index());
CHECK_EQ(1, code1->index());
- CHECK_EQ(code1, manager.LookupCode(code1->instruction_start()));
+ CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(1, code1_1->index());
- CHECK_EQ(code1, manager.LookupCode(code1->instruction_start()));
- CHECK_EQ(code1_1, manager.LookupCode(code1_1->instruction_start()));
+ CHECK_EQ(code1, manager()->LookupCode(code1->instruction_start()));
+ CHECK_EQ(code1_1, manager()->LookupCode(code1_1->instruction_start()));
}
} // namespace wasm_heap_unittest
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
new file mode 100644
index 0000000000..3cca4bc55c
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -0,0 +1,71 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/machine-type.h"
+#include "src/signature.h"
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmCallDescriptorTest : public TestWithZone {};
+
+TEST_F(WasmCallDescriptorTest, TestAnyRefIsGrouped) {
+ constexpr size_t kMaxCount = 30;
+ ValueType params[kMaxCount];
+
+ for (size_t i = 0; i < kMaxCount; i += 2) {
+ params[i] = ValueType::kWasmAnyRef;
+ CHECK_LT(i + 1, kMaxCount);
+ params[i + 1] = ValueType::kWasmI32;
+ }
+
+ for (size_t count = 1; count <= kMaxCount; ++count) {
+ FunctionSig sig(/*return_count=*/0, count, params);
+ compiler::CallDescriptor* desc =
+ compiler::GetWasmCallDescriptor(zone(), &sig);
+
+ // The WasmInstance is the implicit first parameter.
+ CHECK_EQ(count + 1, desc->ParameterCount());
+
+ bool has_untagged_stack_param = false;
+ bool has_tagged_register_param = false;
+ int max_tagged_stack_location = std::numeric_limits<int>::min();
+ int min_untagged_stack_location = std::numeric_limits<int>::max();
+ for (size_t i = 1; i < desc->ParameterCount(); ++i) {
+ // InputLocation i + 1, because target is the first input.
+ compiler::LinkageLocation location = desc->GetInputLocation(i + 1);
+ if (desc->GetParameterType(i).IsTagged()) {
+ if (location.IsRegister()) {
+ has_tagged_register_param = true;
+ } else {
+ CHECK(location.IsCallerFrameSlot());
+ max_tagged_stack_location =
+ std::max(max_tagged_stack_location, location.AsCallerFrameSlot());
+ }
+ } else { // !isTagged()
+ if (location.IsCallerFrameSlot()) {
+ has_untagged_stack_param = true;
+ min_untagged_stack_location = std::min(min_untagged_stack_location,
+ location.AsCallerFrameSlot());
+ } else {
+ CHECK(location.IsRegister());
+ }
+ }
+ }
+ // There should never be a tagged parameter in a register and an untagged
+ // parameter on the stack at the same time.
+ CHECK_EQ(false, has_tagged_register_param && has_untagged_stack_param);
+ CHECK_LT(max_tagged_stack_location, min_untagged_stack_location);
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-js/BUILD.gn b/deps/v8/test/wasm-js/BUILD.gn
new file mode 100644
index 0000000000..2f7b8c31fb
--- /dev/null
+++ b/deps/v8/test/wasm-js/BUILD.gn
@@ -0,0 +1,17 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+group("v8_wasm_js") {
+ testonly = true
+
+ data_deps = [
+ "../..:d8",
+ "../../tools:v8_testrunner",
+ ]
+
+ data = [
+ "./",
+ "../mjsunit/mjsunit.js",
+ ]
+}
diff --git a/deps/v8/test/wasm-js/LICENSE.testharness b/deps/v8/test/wasm-js/LICENSE.testharness
new file mode 100644
index 0000000000..45896e6be2
--- /dev/null
+++ b/deps/v8/test/wasm-js/LICENSE.testharness
@@ -0,0 +1,30 @@
+W3C 3-clause BSD License
+
+http://www.w3.org/Consortium/Legal/2008/03-bsd-license.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of works must retain the original copyright notice,
+ this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the original copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the name of the W3C nor the names of its contributors may be
+ used to endorse or promote products derived from this work without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
new file mode 100644
index 0000000000..b0763e008a
--- /dev/null
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -0,0 +1,74 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+ANY_JS = ".any.js"
+WPT_ROOT = "/wasm/jsapi/"
+META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
+
+class TestSuite(testsuite.TestSuite):
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
+ self.testroot = os.path.join(self.root, "data", "test", "js-api")
+ self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
+ "mjsunit.js")
+
+ def ListTests(self):
+ tests = []
+ for dirname, dirs, files in os.walk(self.testroot):
+ for dotted in [x for x in dirs if x.startswith(".")]:
+ dirs.remove(dotted)
+ dirs.sort()
+ files.sort()
+ for filename in files:
+ if (filename.endswith(ANY_JS)):
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.testroot) + 1 : -len(ANY_JS)]
+ testname = relpath.replace(os.path.sep, "/")
+ test = self._create_test(testname)
+ tests.append(test)
+ return tests
+
+ def _test_class(self):
+ return TestCase
+
+
+class TestCase(testcase.D8TestCase):
+ def _get_files_params(self):
+ files = [os.path.join(self.suite.mjsunit_js),
+ os.path.join(self.suite.root, "testharness.js")]
+
+ source = self.get_source()
+ for script in META_SCRIPT_REGEXP.findall(source):
+ if script.startswith(WPT_ROOT):
+ # Matched an absolute path, strip the root and replace it with our
+ # local root.
+ script = os.path.join(self.suite.testroot, script[len(WPT_ROOT):])
+ elif not script.startswith("/"):
+ # Matched a relative path, prepend this test's directory.
+ thisdir = os.path.dirname(self._get_source_path())
+ script = os.path.join(thisdir, script)
+ else:
+ raise Exception("Unexpected absolute path for script: \"%s\"" % script);
+
+ files.append(script)
+
+ files.extend([
+ self._get_source_path(),
+ os.path.join(self.suite.root, "testharness-after.js")
+ ])
+ return files
+
+ def _get_source_path(self):
+ # All tests are named `path/name.any.js`
+ return os.path.join(self.suite.testroot, self.path + ANY_JS)
+
+
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/wasm-js/testharness-after.js b/deps/v8/test/wasm-js/testharness-after.js
new file mode 100644
index 0000000000..9520be1c9b
--- /dev/null
+++ b/deps/v8/test/wasm-js/testharness-after.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Uses lastPromise defined in testharness.js
+
+assertPromiseResult(lastPromise, _ => {
+ if (failures.length > 0) {
+ let message = 'Some tests FAILED:\n';
+ for (const failure of failures) {
+ message += ` ${failure}\n`;
+ }
+
+ failWithMessage(message);
+ }
+});
diff --git a/deps/v8/test/wasm-js/testharness.js b/deps/v8/test/wasm-js/testharness.js
new file mode 100644
index 0000000000..904a973519
--- /dev/null
+++ b/deps/v8/test/wasm-js/testharness.js
@@ -0,0 +1,148 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementations of some functions from testharness.js
+// See https://github.com/web-platform-tests/wpt/blob/master/resources/testharness.js
+// Licensed as follows:
+//
+// Distributed under both the W3C Test Suite License [1] and the W3C
+// 3-clause BSD License [2]. To contribute to a W3C Test Suite, see the
+// policies and contribution forms [3].
+// [1] http://www.w3.org/Consortium/Legal/2008/04-testsuite-license
+// [2] http://www.w3.org/Consortium/Legal/2008/03-bsd-license
+// [3] http://www.w3.org/2004/10/27-testcases
+
+const failures = [];
+
+let lastPromise = Promise.resolve();
+
+function test(func, description) {
+ let maybeErr;
+ try { func({unreached_func: unreached_func}); }
+ catch(e) { maybeErr = e; }
+ if (typeof maybeErr !== 'undefined') {
+ console.log(`${description}: FAIL. ${maybeErr}`);
+ failures.push(description);
+ } else {
+ console.log(`${description}: PASS.`);
+ }
+}
+
+function promise_test(func, description) {
+ lastPromise = lastPromise.then(func)
+ .then(_ => {
+ console.log(`${description}: PASS.`);
+ })
+ .catch(err => {
+ console.log(`${description}: FAIL. ${err}`);
+ failures.push(description);
+ });
+}
+
+const assert_true = assertEquals.bind(null, true);
+const assert_false = assertEquals.bind(null, false);
+
+function same_value(x, y) {
+ if (y !== y) {
+ // NaN case
+ return x!==x;
+ }
+ if (x === 0 && y === 0) {
+ // Distinguish +0 and -0
+ return 1/x === 1/y;
+ }
+ return x === y;
+}
+
+function assert_equals(expected, found, description) {
+ if (typeof found != typeof expected) {
+ assert_true(false, "assert_equals", description,
+ "expected (" + typeof expected + ") ${expected} but got (" +
+ typeof found + ") ${found}", {expected:expected, found:found});
+ }
+ assert_true(same_value(found, expected), "assert_equals", description,
+ "expected ${expected} but got ${found}",
+ {expected:expected, found:found});
+}
+
+function assert_not_equals(expected, found, description) {
+ assert_true(!same_value(found, expected), "assert_not_equals", description,
+ "got disallowed value ${found}", {found:found});
+}
+
+function assert_array_equals(actual, expected, description) {
+ assert_true(
+ typeof actual === 'object' && actual !== null && 'length' in actual,
+ 'assert_array_equals', description, 'value is ${actual}, expected array',
+ {actual: actual});
+ assert_true(
+ actual.length === expected.length, 'assert_array_equals', description,
+ 'lengths differ, expected ${expected} got ${actual}',
+ {expected: expected.length, actual: actual.length});
+
+ for (let i = 0; i < actual.length; i++) {
+ assert_true(
+ actual.hasOwnProperty(i) === expected.hasOwnProperty(i),
+ 'assert_array_equals', description,
+ 'property ${i}, property expected to be ${expected} but was ${actual}',
+ {
+ i: i,
+ expected: expected.hasOwnProperty(i) ? 'present' : 'missing',
+ actual: actual.hasOwnProperty(i) ? 'present' : 'missing'
+ });
+ assert_true(
+ same_value(expected[i], actual[i]), 'assert_array_equals', description,
+ 'property ${i}, expected ${expected} but got ${actual}',
+ {i: i, expected: expected[i], actual: actual[i]});
+ }
+}
+
+function unreached_func(msg) {
+ return function trap() {
+ assert_unreached(msg);
+ };
+}
+
+function assert_unreached(description) {
+ throw new Error(`unreachable:\n${description}`);
+}
+
+function format_value(s) {
+ // TODO
+ try {
+ return String(s);
+ } catch(e) {
+ return `<String(e) for type ${typeof(e)} threw>`;
+ }
+}
+
+function promise_rejects(test, expected, promise, description) {
+ return promise
+ .then(() => assert_unreached('Should have rejected: ' + description))
+ .catch(function(e) {
+ assert_throws(expected, function() {
+ throw e;
+ }, description);
+ });
+}
+
+function assert_class_string(object, class_string, description) {
+ assert_equals(
+ {}.toString.call(object), '[object ' + class_string + ']', description);
+}
+
+function assert_throws(code, func, description) {
+ try {
+ func();
+ } catch (e) {
+ assert_true(e.name === code.name, "expected exception " + code.name + ", got " + e.name);
+ return;
+ }
+ assert_true(false, "expected exception " + code.name + ", no exception thrown");
+}
+
+function setup(func) {
+ // TODO need to do anything fancier here?
+ func();
+}
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
new file mode 100644
index 0000000000..22e5457ae1
--- /dev/null
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8319
+ 'module/customSections': [FAIL],
+}], # ALWAYS
+
+[ALWAYS, {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8633
+ 'limits': [SKIP],
+}], # ALWAYS
+
+['arch == s390 or arch == s390x or system == aix', {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8402
+ 'instance/constructor': [SKIP],
+ 'constructor/instantiate': [SKIP],
+}], # 'arch == s390 or arch == s390x or system == aix'
+
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ '*': [SKIP],
+}], # lite_mode
+
+]
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 6bf33e90fb..b676370dad 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-0df32ecb1ad4141cc082f2b5575d8f88f3ae8c53 \ No newline at end of file
+6a2c7fb6e2a4ead5d261c9fdac77d3129268848e \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index c97ac33824..e61171b6b2 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -43,4 +43,11 @@
'tests/f64': [SKIP],
}], # 'arch == s390 or arch == s390x'
+##############################################################################
+['lite_mode', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ '*': [SKIP],
+}], # lite_mode
+
+
]
diff --git a/deps/v8/test/webkit/array-holes-expected.txt b/deps/v8/test/webkit/array-holes-expected.txt
index 05e095f386..992667f9ad 100644
--- a/deps/v8/test/webkit/array-holes-expected.txt
+++ b/deps/v8/test/webkit/array-holes-expected.txt
@@ -59,7 +59,7 @@ PASS showHoles([0, , 2].concat([3, , 5])) is '[0, peekaboo, 2, 3, peekaboo, 5]'
PASS showHoles([0, , 2, 3].reverse()) is '[3, 2, peekaboo, 0]'
PASS a = [0, , 2, 3]; a.shift(); showHoles(a) is '[peekaboo, 2, 3]'
PASS showHoles([0, , 2, 3].slice(0, 3)) is '[0, peekaboo, 2]'
-PASS showHoles([0, , 2, 3].sort()) is '[0, 2, 3, hole]'
+PASS showHoles([0, , 2, 3].sort()) is '[0, 2, 3, peekaboo]'
PASS showHoles([0, undefined, 2, 3].sort()) is '[0, 2, 3, undefined]'
PASS a = [0, , 2, 3]; a.splice(2, 3, 5, 6); showHoles(a) is '[0, hole, 5, 6]'
PASS a = [0, , 2, 3]; a.unshift(4); showHoles(a) is '[4, 0, peekaboo, 2, 3]'
diff --git a/deps/v8/test/webkit/array-holes.js b/deps/v8/test/webkit/array-holes.js
index 007626b847..182492c1c8 100644
--- a/deps/v8/test/webkit/array-holes.js
+++ b/deps/v8/test/webkit/array-holes.js
@@ -110,7 +110,7 @@ shouldBe("showHoles([0, , 2].concat([3, , 5]))", "'[0, peekaboo, 2, 3, peekaboo,
shouldBe("showHoles([0, , 2, 3].reverse())", "'[3, 2, peekaboo, 0]'");
shouldBe("a = [0, , 2, 3]; a.shift(); showHoles(a)", "'[peekaboo, 2, 3]'");
shouldBe("showHoles([0, , 2, 3].slice(0, 3))", "'[0, peekaboo, 2]'");
-shouldBe("showHoles([0, , 2, 3].sort())", "'[0, 2, 3, hole]'");
+shouldBe("showHoles([0, , 2, 3].sort())", "'[0, 2, 3, peekaboo]'");
shouldBe("showHoles([0, undefined, 2, 3].sort())", "'[0, 2, 3, undefined]'");
shouldBe("a = [0, , 2, 3]; a.splice(2, 3, 5, 6); showHoles(a)", "'[0, hole, 5, 6]'");
shouldBe("a = [0, , 2, 3]; a.unshift(4); showHoles(a)", "'[4, 0, peekaboo, 2, 3]'");
diff --git a/deps/v8/test/webkit/class-syntax-semicolon-expected.txt b/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
index 488054a582..c45eabac9c 100644
--- a/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
@@ -3,12 +3,9 @@ Tests for ES6 class syntax containing semicolon in the class body
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS class A { foo;() { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { foo() ; { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { get ; foo() { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { get foo;() { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { get foo() ; { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { set ; foo(x) { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { set foo;(x) { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { set foo(x) ; { } } threw exception SyntaxError: Unexpected token ;.
PASS class A { ; } did not throw exception.
diff --git a/deps/v8/test/webkit/class-syntax-semicolon.js b/deps/v8/test/webkit/class-syntax-semicolon.js
index d4a0d9bd83..88609dcbc7 100644
--- a/deps/v8/test/webkit/class-syntax-semicolon.js
+++ b/deps/v8/test/webkit/class-syntax-semicolon.js
@@ -23,12 +23,9 @@
description('Tests for ES6 class syntax containing semicolon in the class body');
-shouldThrow("class A { foo;() { } }", "'SyntaxError: Unexpected token ;'");
shouldThrow("class A { foo() ; { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { get ; foo() { } }", "'SyntaxError: Unexpected token ;'");
shouldThrow("class A { get foo;() { } }", "'SyntaxError: Unexpected token ;'");
shouldThrow("class A { get foo() ; { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { set ; foo(x) { } }", "'SyntaxError: Unexpected token ;'");
shouldThrow("class A { set foo;(x) { } }", "'SyntaxError: Unexpected token ;'");
shouldThrow("class A { set foo(x) ; { } }", "'SyntaxError: Unexpected token ;'");
diff --git a/deps/v8/test/webkit/fast/js/kde/delete-expected.txt b/deps/v8/test/webkit/fast/js/kde/delete-expected.txt
index 0c87f6ddab..f59ae9fed2 100644
--- a/deps/v8/test/webkit/fast/js/kde/delete-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/delete-expected.txt
@@ -27,7 +27,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS a = 1; delete a; is true
-PASS delete nonexistant; is true
+PASS delete nonexistent; is true
PASS delete NaN is false
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/kde/delete.js b/deps/v8/test/webkit/fast/js/kde/delete.js
index 8aeb0d09e8..3224f2aa7b 100644
--- a/deps/v8/test/webkit/fast/js/kde/delete.js
+++ b/deps/v8/test/webkit/fast/js/kde/delete.js
@@ -23,5 +23,5 @@
description("KDE JS Test");
shouldBe("a = 1; delete a;", "true");
-shouldBe("delete nonexistant;", "true");
+shouldBe("delete nonexistent;", "true");
shouldBe("delete NaN", "false");
diff --git a/deps/v8/test/webkit/fast/js/kde/exceptions.js b/deps/v8/test/webkit/fast/js/kde/exceptions.js
index 73b7b57fb4..a96011e86e 100644
--- a/deps/v8/test/webkit/fast/js/kde/exceptions.js
+++ b/deps/v8/test/webkit/fast/js/kde/exceptions.js
@@ -58,7 +58,7 @@ function testReferenceError()
var err = "noerror";
var caught = false;
try {
- var dummy = nonexistant; // throws reference error
+ var dummy = nonexistent; // throws reference error
} catch (e) {
caught = true;
err = e.name;
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index 16706e43dd..4dfe4f18e0 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,10 +28,10 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string "invalid" is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: invalid is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string "invalid" is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: invalid is not a function
PASS caught is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index 2001708838..745e9ee6f8 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -43,13 +43,13 @@ PASS testForwardSlash("x/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x\/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x/x\/x", "x\/x\/x"); is true
PASS testForwardSlash("x\/x\/x", "x\/x\/x"); is true
-FAIL testLineTerminator("\n"); should be false. Was true.
+PASS testLineTerminator("\n"); is false
PASS testLineTerminator("\\n"); is false
-FAIL testLineTerminator("\r"); should be false. Was true.
+PASS testLineTerminator("\r"); is false
PASS testLineTerminator("\\r"); is false
-FAIL testLineTerminator("\u2028"); should be false. Was true.
+PASS testLineTerminator("\u2028"); is false
PASS testLineTerminator("\\u2028"); is false
-FAIL testLineTerminator("\u2029"); should be false. Was true.
+PASS testLineTerminator("\u2029"); is false
PASS testLineTerminator("\\u2029"); is false
FAIL RegExp('[/]').source should be [/]. Was [\/].
PASS RegExp('\\[/]').source is '\\[\\/]'
diff --git a/deps/v8/test/webkit/nested-functions.js b/deps/v8/test/webkit/nested-functions.js
index b421143cf8..511e242e87 100644
--- a/deps/v8/test/webkit/nested-functions.js
+++ b/deps/v8/test/webkit/nested-functions.js
@@ -32,7 +32,7 @@ var passed4 = false;
// Test cases deliberately nested!
function runTests() {
- // Formating of these functions is significant for regression
+ // Formatting of these functions is significant for regression
// testing; functions with small bodies are not cached!
function test1() { return this; }
function test2() { "use strict"; return this; }
diff --git a/deps/v8/test/webkit/propertyIsEnumerable-expected.txt b/deps/v8/test/webkit/propertyIsEnumerable-expected.txt
index e3fd215a08..086697c3b7 100644
--- a/deps/v8/test/webkit/propertyIsEnumerable-expected.txt
+++ b/deps/v8/test/webkit/propertyIsEnumerable-expected.txt
@@ -28,7 +28,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS a.propertyIsEnumerable('length') is false
PASS a.propertyIsEnumerable ('foo') is true
-PASS a.propertyIsEnumerable ('non-existant') is false
+PASS a.propertyIsEnumerable ('non-existent') is false
PASS global.propertyIsEnumerable ('aVarDecl') is true
PASS global.propertyIsEnumerable ('aFunctionDecl') is true
PASS global.propertyIsEnumerable ('Math') is false
diff --git a/deps/v8/test/webkit/propertyIsEnumerable.js b/deps/v8/test/webkit/propertyIsEnumerable.js
index e6dc22f64b..eecdba6b5d 100644
--- a/deps/v8/test/webkit/propertyIsEnumerable.js
+++ b/deps/v8/test/webkit/propertyIsEnumerable.js
@@ -33,7 +33,7 @@ function aFunctionDecl(){}
var global = this;
shouldBeFalse("a.propertyIsEnumerable('length')");
shouldBeTrue("a.propertyIsEnumerable ('foo')");
-shouldBeFalse("a.propertyIsEnumerable ('non-existant')");
+shouldBeFalse("a.propertyIsEnumerable ('non-existent')");
shouldBeTrue("global.propertyIsEnumerable ('aVarDecl')");
shouldBeTrue("global.propertyIsEnumerable ('aFunctionDecl')");
diff --git a/deps/v8/test/webkit/resources/JSON-stringify.js b/deps/v8/test/webkit/resources/JSON-stringify.js
index 8ea24c4fd9..aa4e9c8eaa 100644
--- a/deps/v8/test/webkit/resources/JSON-stringify.js
+++ b/deps/v8/test/webkit/resources/JSON-stringify.js
@@ -82,19 +82,19 @@ function createTests() {
});
result.push(function (jsonObject){
var value = new Number(1);
- value.valueOf = function() { return 2; }
+ value.valueOf = function() { return 2; };
return jsonObject.stringify(value);
});
result[result.length - 1].expected = '2';
result.push(function (jsonObject){
var value = new Boolean(true);
- value.valueOf = function() { return 2; }
+ value.valueOf = function() { return false; };
return jsonObject.stringify(value);
});
- result[result.length - 1].expected = '2';
+ result[result.length - 1].expected = 'true';
result.push(function (jsonObject){
var value = new String("fail");
- value.toString = function() { return "converted string"; }
+ value.toString = function() { return "converted string"; };
return jsonObject.stringify(value);
});
result[result.length - 1].expected = '"converted string"';
@@ -114,10 +114,13 @@ function createTests() {
result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return "custom toISOString"; }});
});
+ // Note: JSC fails the following test. Every other engine matches the spec.
+ // This is also covered by the test in
+ // https://github.com/v8/v8/blob/fc664bda1725de0412f6d197fda9503d6e6e122e/test/mjsunit/json.js#L78-L80
result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return {}; }});
});
- result[result.length - 1].throws = true;
+ result[result.length - 1].expected = '{}';
result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ throw "An exception"; }});
});
@@ -162,6 +165,7 @@ function createTests() {
jsonObject.stringify([1,2,3,4,5], function(k,v){allString = allString && (typeof k == "string"); return v});
return allString;
});
+ result[result.length - 1].expected = true;
result.push(function (jsonObject){
var allString = true;
var array = [];
@@ -332,9 +336,11 @@ function createTests() {
result.push(function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetter);
});
+ result[result.length - 1].expected = '{}';
result.push(function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetterAndProto);
});
+ result[result.length - 1].expected = '{}';
result.push(function (jsonObject){
return jsonObject.stringify(arrayWithSideEffectGetter);
});
@@ -351,7 +357,7 @@ function createTests() {
jsonObject.stringify([1,2,3,,,,4,5,6], replaceFunc);
return replaceTracker;
});
- result[result.length - 1].expected = '(string)[1,2,3,null,null,null,4,5,6];0(number)1;1(number)2;2(number)3;3(number)undefined;4(number)undefined;5(number)undefined;6(number)4;7(number)5;8(number)6;'
+ result[result.length - 1].expected = '(string)[1,2,3,null,null,null,4,5,6];0(string)1;1(string)2;2(string)3;3(string)undefined;4(string)undefined;5(string)undefined;6(string)4;7(string)5;8(string)6;';
result.push(function (jsonObject){
replaceTracker = "";
jsonObject.stringify({a:"a", b:"b", c:"c", 3: "d", 2: "e", 1: "f"}, replaceFunc);
@@ -429,10 +435,10 @@ function createTests() {
result[result.length - 1].throws = true;
result.push(function (jsonObject){
cycleTracker = "";
- try { jsonObject.stringify(cyclicArray); } catch(e) { cycleTracker += " -> exception" }
+ try { jsonObject.stringify(cyclicArray); } catch { cycleTracker += " -> exception" }
return cycleTracker;
});
- result[result.length - 1].expected = "0(number):[object Object]first, -> exception";
+ result[result.length - 1].expected = "0(string):[object Object]first, -> exception";
function createArray(len, o) { var r = []; for (var i = 0; i < len; i++) r[i] = o; return r; }
var getterCalls;
var magicObject = createArray(10, {abcdefg: [1,2,5,"ab", null, undefined, true, false,,],
@@ -475,20 +481,20 @@ function createTests() {
});
result.push(function (jsonObject){
var deepObject = {};
- for (var i = 0; i < 1000; i++)
+ for (var i = 0; i < 700; i++)
deepObject = {next:deepObject};
return jsonObject.stringify(deepObject);
});
result.push(function (jsonObject){
var deepArray = [];
- for (var i = 0; i < 1024; i++)
+ for (var i = 0; i < 800; i++)
deepArray = [deepArray];
return jsonObject.stringify(deepArray);
});
result.push(function (jsonObject){
var depth = 0;
function toDeepVirtualJSONObject() {
- if (++depth >= 1000)
+ if (++depth >= 700)
return {};
var r = {};
r.toJSON = toDeepVirtualJSONObject;
@@ -508,7 +514,7 @@ function createTests() {
return jsonObject.stringify(toDeepVirtualJSONArray());
});
var fullCharsetString = "";
- for (var i = 0; i < 65536; i++)
+ for (let i = 0; i <= 0xFFFF; i++)
fullCharsetString += String.fromCharCode(i);
result.push(function (jsonObject){
return jsonObject.stringify(fullCharsetString);
diff --git a/deps/v8/test/webkit/resources/json2-es5-compat.js b/deps/v8/test/webkit/resources/json2-es5-compat.js
index b71656f007..82146ca6a4 100644
--- a/deps/v8/test/webkit/resources/json2-es5-compat.js
+++ b/deps/v8/test/webkit/resources/json2-es5-compat.js
@@ -186,7 +186,7 @@ if (!this.JSON) {
}
var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
- escapable = /[\\\"\x00-\x1f]/g,
+ escapable = /[\\\"\x00-\x1F]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g,
gap,
indent,
meta = { // table of character substitutions
@@ -203,17 +203,17 @@ if (!this.JSON) {
function quote(string) {
-// If the string contains no control characters, no quote characters, and no
-// backslash characters, then we can safely slap some quotes around it.
-// Otherwise we must also replace the offending characters with safe escape
-// sequences.
+// If the string contains no control characters, no quote characters, no
+// backslash characters, and no lone surrogates, then we can safely
+// slap some quotes around it. Otherwise we must also replace the
+// offending characters with safe escape sequences.
escapable.lastIndex = 0;
return escapable.test(string) ?
'"' + string.replace(escapable, function (a) {
var c = meta[a];
return typeof c === 'string' ? c :
- '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ '\\u' + a.charCodeAt(0).toString(16).padStart(4, '0');
}) + '"' :
'"' + string + '"';
}
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index f62adb07b2..437897efa2 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -52,19 +52,19 @@ function (jsonObject){
PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var value = new Number(1);
- value.valueOf = function() { return 2; }
+ value.valueOf = function() { return 2; };
return jsonObject.stringify(value);
}
PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
var value = new Boolean(true);
- value.valueOf = function() { return 2; }
+ value.valueOf = function() { return false; };
return jsonObject.stringify(value);
}
-FAIL tests[i](nativeJSON) should be 2. Was true.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
var value = new String("fail");
- value.toString = function() { return "converted string"; }
+ value.toString = function() { return "converted string"; };
return jsonObject.stringify(value);
}
PASS tests[i](nativeJSON) is tests[i].expected
@@ -91,7 +91,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return {}; }});
}
-FAIL tests[i](nativeJSON) should throw an exception. Was {}.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ throw "An exception"; }});
}
@@ -143,7 +143,7 @@ function (jsonObject){
jsonObject.stringify([1,2,3,4,5], function(k,v){allString = allString && (typeof k == "string"); return v});
return allString;
}
-FAIL tests[i](nativeJSON) should be false. Was true.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
var allString = true;
var array = [];
@@ -355,11 +355,11 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetter);
}
-FAIL tests[i](nativeJSON) should be {"foo":1}. Was {}.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetterAndProto);
}
-FAIL tests[i](nativeJSON) should be {"foo":1}. Was {}.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
return jsonObject.stringify(arrayWithSideEffectGetter);
}
@@ -373,7 +373,7 @@ function (jsonObject){
jsonObject.stringify([1,2,3,,,,4,5,6], replaceFunc);
return replaceTracker;
}
-FAIL tests[i](nativeJSON) should be (string)[1,2,3,null,null,null,4,5,6];0(number)1;1(number)2;2(number)3;3(number)undefined;4(number)undefined;5(number)undefined;6(number)4;7(number)5;8(number)6;. Was (string)[1,2,3,null,null,null,4,5,6];0(string)1;1(string)2;2(string)3;3(string)undefined;4(string)undefined;5(string)undefined;6(string)4;7(string)5;8(string)6;.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
replaceTracker = "";
jsonObject.stringify({a:"a", b:"b", c:"c", 3: "d", 2: "e", 1: "f"}, replaceFunc);
@@ -448,10 +448,10 @@ function (jsonObject){
PASS tests[i](nativeJSON) threw exception TypeError: Converting circular structure to JSON.
function (jsonObject){
cycleTracker = "";
- try { jsonObject.stringify(cyclicArray); } catch(e) { cycleTracker += " -> exception" }
+ try { jsonObject.stringify(cyclicArray); } catch { cycleTracker += " -> exception" }
return cycleTracker;
}
-FAIL tests[i](nativeJSON) should be 0(number):[object Object]first, -> exception. Was 0(string):[object Object]first, -> exception.
+PASS tests[i](nativeJSON) is tests[i].expected
function (jsonObject){
getterCalls = 0;
return jsonObject.stringify(magicObject) + " :: getter calls = " + getterCalls;
@@ -499,14 +499,14 @@ function (jsonObject){
PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var deepObject = {};
- for (var i = 0; i < 1000; i++)
+ for (var i = 0; i < 700; i++)
deepObject = {next:deepObject};
return jsonObject.stringify(deepObject);
}
PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var deepArray = [];
- for (var i = 0; i < 1024; i++)
+ for (var i = 0; i < 800; i++)
deepArray = [deepArray];
return jsonObject.stringify(deepArray);
}
@@ -514,7 +514,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
var depth = 0;
function toDeepVirtualJSONObject() {
- if (++depth >= 1000)
+ if (++depth >= 700)
return {};
var r = {};
r.toJSON = toDeepVirtualJSONObject;
@@ -541,4 +541,3 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index cc856007e2..d5dac2ba9d 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -52,17 +52,18 @@
# Too slow with --enable-slow-asserts.
'array-iterate-backwards': [SKIP],
}], # 'mode == debug'
-['simulator', {
+['simulator_run', {
# Skip tests that timeout with turbofan.
'array-iterate-backwards': [PASS, NO_VARIANTS],
'function-apply-aliased': [SKIP],
# Skip tests that are too slow for simulators.
'dfg-int-overflow-in-loop': [SKIP],
-}], # 'simulator'
-['arch == arm64 and simulator_run', {
- 'dfg-int-overflow-in-loop': [SKIP],
-}], # 'arch == arm64 and simulator_run'
+}], # 'simulator_run'
+['mode == debug and simulator_run', {
+ # Too slow on debug simulators.
+ 'dfg-inline-function-dot-caller': [SKIP],
+}], # 'mode == debug and simulator_run'
['dcheck_always_on == True and (arch == arm or arch == arm64)', {
# Doesn't work with gcc 4.6 on arm or arm64 for some reason.
'reentrant-caching': [SKIP],
@@ -71,14 +72,6 @@
# Too slow for mips big-endian boards on bots (no FPU).
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == mips'
-['(arch == mips64 or arch == mips64el) and simulator_run', {
- # Too slow.
- 'dfg-int-overflow-in-loop': [SKIP],
-}], # '(arch == mips64 or arch == mips64el) and simulator_run'
-['(arch == ppc or arch == ppc64) and simulator_run', {
- # Too slow.
- 'dfg-int-overflow-in-loop': [SKIP],
-}], # 'arch == ppc or arch == ppc64'
['arch == s390 or arch == s390x', {
# Too slow.
'dfg-int-overflow-in-loop': [SKIP],
diff --git a/deps/v8/third_party/binutils/download.py b/deps/v8/third_party/binutils/download.py
index 99f43708f5..b3be6414e2 100755
--- a/deps/v8/third_party/binutils/download.py
+++ b/deps/v8/third_party/binutils/download.py
@@ -36,19 +36,6 @@ def WriteFile(filename, content):
f.write('\n')
-def GetArch():
- gyp_host_arch = re.search(
- 'host_arch=(\S*)', os.environ.get('GYP_DEFINES', ''))
- if gyp_host_arch:
- arch = gyp_host_arch.group(1)
- # This matches detect_host_arch.py.
- if arch == 'x86_64':
- return 'x64'
- return arch
-
- return DetectHostArch()
-
-
def FetchAndExtract(arch):
archdir = os.path.join(BINUTILS_DIR, 'Linux_' + arch)
tarball = os.path.join(archdir, BINUTILS_FILE)
@@ -99,7 +86,7 @@ def main(args):
if not sys.platform.startswith('linux'):
return 0
- arch = GetArch()
+ arch = DetectHostArch()
if arch == 'x64':
return FetchAndExtract(arch)
if arch == 'ia32':
diff --git a/deps/v8/third_party/googletest/BUILD.gn b/deps/v8/third_party/googletest/BUILD.gn
index 1c761a7620..f105d1a6b4 100644
--- a/deps/v8/third_party/googletest/BUILD.gn
+++ b/deps/v8/third_party/googletest/BUILD.gn
@@ -52,6 +52,7 @@ source_set("gtest") {
# TODO(crbug.com/829773): Remove this after transitioning off <tr1/tuple>.
"custom/gmock/internal/custom/gmock-port.h",
"src/googletest/include/gtest/gtest-death-test.h",
+ "src/googletest/include/gtest/gtest-matchers.h",
"src/googletest/include/gtest/gtest-message.h",
"src/googletest/include/gtest/gtest-param-test.h",
"src/googletest/include/gtest/gtest-printers.h",
@@ -75,6 +76,7 @@ source_set("gtest") {
"src/googletest/src/gtest-death-test.cc",
"src/googletest/src/gtest-filepath.cc",
"src/googletest/src/gtest-internal-inl.h",
+ "src/googletest/src/gtest-matchers.cc",
"src/googletest/src/gtest-port.cc",
"src/googletest/src/gtest-printers.cc",
"src/googletest/src/gtest-test-part.cc",
@@ -93,7 +95,10 @@ source_set("gtest") {
deps = []
if (is_fuchsia) {
- deps += [ "//third_party/fuchsia-sdk:fdio" ]
+ deps += [
+ "//third_party/fuchsia-sdk/sdk:fdio",
+ "//third_party/fuchsia-sdk/sdk:zx",
+ ]
}
}
@@ -116,16 +121,21 @@ source_set("gmock") {
sources = [
"src/googlemock/include/gmock/gmock-actions.h",
"src/googlemock/include/gmock/gmock-cardinalities.h",
+ "src/googlemock/include/gmock/gmock-function-mocker.h",
"src/googlemock/include/gmock/gmock-generated-actions.h",
"src/googlemock/include/gmock/gmock-generated-function-mockers.h",
"src/googlemock/include/gmock/gmock-generated-matchers.h",
"src/googlemock/include/gmock/gmock-generated-nice-strict.h",
"src/googlemock/include/gmock/gmock-matchers.h",
+ "src/googlemock/include/gmock/gmock-more-actions.h",
+ "src/googlemock/include/gmock/gmock-more-matchers.h",
+ "src/googlemock/include/gmock/gmock-nice-strict.h",
"src/googlemock/include/gmock/gmock-spec-builders.h",
"src/googlemock/include/gmock/gmock.h",
"src/googlemock/include/gmock/internal/gmock-generated-internal-utils.h",
"src/googlemock/include/gmock/internal/gmock-internal-utils.h",
"src/googlemock/include/gmock/internal/gmock-port.h",
+ "src/googlemock/include/gmock/internal/gmock-pp.h",
# gmock helpers.
"custom/gmock/internal/custom/gmock-port.h",
diff --git a/deps/v8/third_party/googletest/README.chromium b/deps/v8/third_party/googletest/README.chromium
index 170ae3ef27..95dc49af85 100644
--- a/deps/v8/third_party/googletest/README.chromium
+++ b/deps/v8/third_party/googletest/README.chromium
@@ -1,7 +1,7 @@
Name: Google Test: Google's C++ Testing Framework
Short Name: googletest
URL: https://github.com/google/googletest.git
-Version: 1.8.0.git-7d15497f7538fb531d0f025929d080743af421ee
+Version: 1.8.0.git-9518a57428ae0a7ed450c1361768e84a2a38af5a
License: BSD
Security critical: no
diff --git a/deps/v8/third_party/inspector_protocol/CodeGenerator.py b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
deleted file mode 100755
index 4882ed9da5..0000000000
--- a/deps/v8/third_party/inspector_protocol/CodeGenerator.py
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import code_generator
diff --git a/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py b/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py
deleted file mode 100755
index 6cfdc7fa07..0000000000
--- a/deps/v8/third_party/inspector_protocol/ConvertProtocolToJSON.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2018 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import convert_protocol_to_json
-
-
-def main():
- convert_protocol_to_json.main()
diff --git a/deps/v8/third_party/inspector_protocol/README.md b/deps/v8/third_party/inspector_protocol/README.md
index 54b7c12929..4eff4338ff 100644
--- a/deps/v8/third_party/inspector_protocol/README.md
+++ b/deps/v8/third_party/inspector_protocol/README.md
@@ -3,8 +3,26 @@
This package contains code generators and templates for the Chromium
inspector protocol.
+The canonical location of this package is at
+https://chromium.googlesource.com/deps/inspector_protocol/
+
In the Chromium tree, it's rolled into
https://cs.chromium.org/chromium/src/third_party/inspector_protocol/
In the V8 tree, it's rolled into
https://cs.chromium.org/chromium/src/v8/third_party/inspector_protocol/
+
+See also [Contributing to Chrome Devtools Protocol](https://docs.google.com/document/d/1c-COD2kaK__5iMM5SEx-PzNA7HFmgttcYfOHHX0HaOM/edit).
+
+We're working on enabling standalone builds for parts of this package for
+testing and development, please feel free to ignore this for now.
+But, if you're familiar with
+[Chromium's development process](https://www.chromium.org/developers/contributing-code)
+and have the depot_tools installed, you may use these commands
+to fetch the package (and dependencies) and build and run the tests:
+
+ fetch inspector_protocol
+ cd src
+ gn gen out/Release
+ ninja -C out/Release json_parser_test
+ out/Release/json_parser_test
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index 79fc685c88..cdca51a9bf 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 5768a449acc0407bf55aef535b18d710df2a14f2
+Revision: fdbdb154336fc1f15a0a6775349dd90243b8d3fc
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py b/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
index 7e7d9af3d6..e23bd70213 100755
--- a/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
+++ b/deps/v8/third_party/inspector_protocol/check_protocol_compatibility.py
@@ -168,6 +168,11 @@ def compare_types(context, kind, type_1, type_2, types_map_1, types_map_2, depth
base_type_1 = type_1["type"]
base_type_2 = type_2["type"]
+ # Binary and string have the same wire representation in JSON.
+ if ((base_type_1 == "string" and base_type_2 == "binary") or
+ (base_type_2 == "string" and base_type_1 == "binary")):
+ return
+
if base_type_1 != base_type_2:
errors.append("%s: %s base type mismatch, '%s' vs '%s'" % (context, kind, base_type_1, base_type_2))
elif base_type_1 == "object":
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index da220fdbea..edf8c4de21 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -266,6 +266,21 @@ def create_string_type_definition():
}
+def create_binary_type_definition():
+ # pylint: disable=W0622
+ return {
+ "return_type": "Binary",
+ "pass_type": "const Binary&",
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": "Binary",
+ "raw_type": "Binary",
+ "raw_pass_type": "const Binary&",
+ "raw_return_type": "Binary",
+ }
+
+
def create_primitive_type_definition(type):
# pylint: disable=W0622
typedefs = {
@@ -443,8 +458,10 @@ class Protocol(object):
self.type_definitions["boolean"] = create_primitive_type_definition("boolean")
self.type_definitions["object"] = create_object_type_definition()
self.type_definitions["any"] = create_any_type_definition()
+ self.type_definitions["binary"] = create_binary_type_definition()
for domain in self.json_api["domains"]:
self.type_definitions[domain["domain"] + ".string"] = create_string_type_definition()
+ self.type_definitions[domain["domain"] + ".binary"] = create_binary_type_definition()
if not ("types" in domain):
continue
for type in domain["types"]:
@@ -457,6 +474,8 @@ class Protocol(object):
self.type_definitions[type_name] = self.resolve_type(type)
elif type["type"] == domain["domain"] + ".string":
self.type_definitions[type_name] = create_string_type_definition()
+ elif type["type"] == domain["domain"] + ".binary":
+ self.type_definitions[type_name] = create_binary_type_definition()
else:
self.type_definitions[type_name] = create_primitive_type_definition(type["type"])
diff --git a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
index 2dd6f501a6..96048f793d 100755
--- a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
+++ b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
@@ -3,6 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import argparse
import collections
import json
import os.path
@@ -12,19 +13,22 @@ import sys
import pdl
def main(argv):
- if len(argv) < 2:
- sys.stderr.write("Usage: %s <protocol.pdl> <protocol.json>\n" % sys.argv[0])
- return 1
- file_name = os.path.normpath(argv[0])
+ parser = argparse.ArgumentParser(description=(
+ "Converts from .pdl to .json by invoking the pdl Python module."))
+ parser.add_argument('--map_binary_to_string', type=bool,
+ help=('If set, binary in the .pdl is mapped to a '
+ 'string in .json. Client code will have to '
+ 'base64 decode the string to get the payload.'))
+ parser.add_argument("pdl_file", help="The .pdl input file to parse.")
+ parser.add_argument("json_file", help="The .json output file write.")
+ args = parser.parse_args(argv)
+ file_name = os.path.normpath(args.pdl_file)
input_file = open(file_name, "r")
pdl_string = input_file.read()
- protocol = pdl.loads(pdl_string, file_name)
+ protocol = pdl.loads(pdl_string, file_name, args.map_binary_to_string)
input_file.close()
- output_file = open(argv[0].replace('.pdl', '.json'), 'wb')
- json.dump(protocol, output_file, indent=4, separators=(',', ': '))
- output_file.close()
- output_file = open(os.path.normpath(argv[1]), 'wb')
+ output_file = open(os.path.normpath(args.json_file), 'wb')
json.dump(protocol, output_file, indent=4, separators=(',', ': '))
output_file.close()
diff --git a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
index 8f8109d695..d05ddaea7e 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
@@ -11,13 +11,6 @@ namespace {{namespace}} {
enum NotNullTagEnum { NotNullLiteral };
-#define PROTOCOL_DISALLOW_NEW() \
- private: \
- void* operator new(size_t) = delete; \
- void* operator new(size_t, NotNullTagEnum, void*) = delete; \
- void* operator new(size_t, void*) = delete; \
- public:
-
#define PROTOCOL_DISALLOW_COPY(ClassName) \
private: \
ClassName(const ClassName&) = delete; \
diff --git a/deps/v8/third_party/inspector_protocol/lib/Collections_h.template b/deps/v8/third_party/inspector_protocol/lib/Collections_h.template
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Collections_h.template
+++ /dev/null
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index 9ad3eba91a..4b1b2b8148 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -246,10 +246,6 @@ bool UberDispatcher::parseCommand(Value* parsedMessage, int* outCallId, String*
reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' property", nullptr);
return false;
}
-
- std::unordered_map<String, String>::iterator redirectIt = m_redirects.find(method);
- if (redirectIt != m_redirects.end())
- method = redirectIt->second;
if (outMethod)
*outMethod = method;
return true;
@@ -268,13 +264,21 @@ protocol::DispatcherBase* UberDispatcher::findDispatcher(const String& method) {
return it->second.get();
}
-bool UberDispatcher::canDispatch(const String& method)
+bool UberDispatcher::canDispatch(const String& in_method)
{
+ String method = in_method;
+ auto redirectIt = m_redirects.find(method);
+ if (redirectIt != m_redirects.end())
+ method = redirectIt->second;
return !!findDispatcher(method);
}
-void UberDispatcher::dispatch(int callId, const String& method, std::unique_ptr<Value> parsedMessage, const String& rawMessage)
+void UberDispatcher::dispatch(int callId, const String& in_method, std::unique_ptr<Value> parsedMessage, const String& rawMessage)
{
+ String method = in_method;
+ auto redirectIt = m_redirects.find(method);
+ if (redirectIt != m_redirects.end())
+ method = redirectIt->second;
protocol::DispatcherBase* dispatcher = findDispatcher(method);
if (!dispatcher) {
reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
index a3f7c2f0ba..ac792e0837 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -11,6 +11,7 @@
#include {{format_include(config.lib.string_header)}}
#include <cstddef>
+#include <memory>
#include <vector>
#include <unordered_map>
#include <unordered_set>
diff --git a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
index 5af6960a7b..15626ab350 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
@@ -116,6 +116,15 @@ public:
using MaybeBase::operator=;
};
+template<>
+class Maybe<Binary> : public MaybeBase<Binary> {
+public:
+ Maybe() { }
+ Maybe(Binary value) : MaybeBase(value) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
+ using MaybeBase::operator=;
+};
+
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_h.template b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
index f6ffc57659..9efed8e033 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Object_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
@@ -16,12 +16,12 @@ namespace {{namespace}} {
class {{config.lib.export_macro}} Object {
public:
static std::unique_ptr<Object> fromValue(protocol::Value*, ErrorSupport*);
+ explicit Object(std::unique_ptr<protocol::DictionaryValue>);
~Object();
std::unique_ptr<protocol::DictionaryValue> toValue() const;
std::unique_ptr<Object> clone() const;
private:
- explicit Object(std::unique_ptr<protocol::DictionaryValue>);
std::unique_ptr<protocol::DictionaryValue> m_object;
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
index 4d64ec9091..6549e5ab39 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
@@ -100,6 +100,28 @@ struct ValueConversions<String> {
};
template<>
+struct ValueConversions<Binary> {
+ static Binary fromValue(protocol::Value* value, ErrorSupport* errors)
+ {
+ String result;
+ bool success = value ? value->asString(&result) : false;
+ if (!success) {
+ errors->addError("string value expected");
+ return Binary();
+ }
+ Binary out = Binary::fromBase64(result, &success);
+ if (!success)
+ errors->addError("base64 decoding error");
+ return out;
+ }
+
+ static std::unique_ptr<protocol::Value> toValue(const Binary& value)
+ {
+ return StringValue::create(value.toBase64());
+ }
+};
+
+template<>
struct ValueConversions<Value> {
static std::unique_ptr<Value> fromValue(protocol::Value* value, ErrorSupport* errors)
{
diff --git a/deps/v8/third_party/inspector_protocol/pdl.py b/deps/v8/third_party/inspector_protocol/pdl.py
index c8a5c6f310..652e99c6db 100644
--- a/deps/v8/third_party/inspector_protocol/pdl.py
+++ b/deps/v8/third_party/inspector_protocol/pdl.py
@@ -10,18 +10,21 @@ import sys
description = ''
-primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object', 'any', 'array']
+primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object', 'any', 'array', 'binary']
-def assignType(item, type, isArray=False):
- if isArray:
+
+def assignType(item, type, is_array=False, map_binary_to_string=False):
+ if is_array:
item['type'] = 'array'
item['items'] = collections.OrderedDict()
- assignType(item['items'], type)
+ assignType(item['items'], type, False, map_binary_to_string)
return
if type == 'enum':
type = 'string'
+ if map_binary_to_string and type == 'binary':
+ type = 'string'
if type in primitiveTypes:
item['type'] = type
else:
@@ -42,7 +45,7 @@ def createItem(d, experimental, deprecated, name=None):
return result
-def parse(data, file_name):
+def parse(data, file_name, map_binary_to_string=False):
protocol = collections.OrderedDict()
protocol['version'] = collections.OrderedDict()
protocol['domains'] = []
@@ -88,7 +91,7 @@ def parse(data, file_name):
if 'types' not in domain:
domain['types'] = []
item = createItem({'id': match.group(3)}, match.group(1), match.group(2))
- assignType(item, match.group(5), match.group(4))
+ assignType(item, match.group(5), match.group(4), map_binary_to_string)
domain['types'].append(item)
continue
@@ -115,7 +118,7 @@ def parse(data, file_name):
param = createItem({}, match.group(1), match.group(2), match.group(6))
if match.group(3):
param['optional'] = True
- assignType(param, match.group(5), match.group(4))
+ assignType(param, match.group(5), match.group(4), map_binary_to_string)
if match.group(5) == 'enum':
enumliterals = param['enum'] = []
subitems.append(param)
@@ -161,7 +164,7 @@ def parse(data, file_name):
return protocol
-def loads(data, file_name):
+def loads(data, file_name, map_binary_to_string=False):
if file_name.endswith(".pdl"):
- return parse(data, file_name)
+ return parse(data, file_name, map_binary_to_string)
return json.loads(data)
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 46848e3f49..791f88e009 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -13,7 +13,7 @@
//
// https://github.com/python/cpython/blob/master/Objects/listsort.txt
-module array {
+namespace array {
// All accessors bail to the GenericElementsAccessor if assumptions checked
// by "CanUseSameAccessor<>" are violated:
// Generic <- FastPackedSmi
@@ -124,10 +124,10 @@ module array {
// copied values from the prototype chain to the receiver if they were visible
// through a hole.
- builtin Load<ElementsAccessor: type>(
+ transitioning builtin Load<ElementsAccessor: type>(
context: Context, sortState: FixedArray, elements: HeapObject,
index: Smi): Object {
- return GetProperty(context, elements, index);
+ return GetProperty(elements, index);
}
Load<FastPackedSmiElements>(
@@ -193,10 +193,10 @@ module array {
return elems[index];
}
- builtin Store<ElementsAccessor: type>(
+ transitioning builtin Store<ElementsAccessor: type>(
context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
value: Object): Smi {
- SetProperty(context, elements, index, value);
+ SetProperty(elements, index, value);
return kSuccess;
}
@@ -257,25 +257,19 @@ module array {
return kSuccess;
}
- extern macro UnsafeCastObjectToCompareBuiltinFn(Object): CompareBuiltinFn;
- UnsafeCast<CompareBuiltinFn>(o: Object): CompareBuiltinFn {
- return UnsafeCastObjectToCompareBuiltinFn(o);
+ UnsafeCast<CompareBuiltinFn>(implicit context: Context)(o: Object):
+ CompareBuiltinFn {
+ return %RawObjectCast<CompareBuiltinFn>(o);
}
-
- extern macro UnsafeCastObjectToLoadFn(Object): LoadFn;
- UnsafeCast<LoadFn>(o: Object): LoadFn {
- return UnsafeCastObjectToLoadFn(o);
+ UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
+ return %RawObjectCast<LoadFn>(o);
}
-
- extern macro UnsafeCastObjectToStoreFn(Object): StoreFn;
- UnsafeCast<StoreFn>(o: Object): StoreFn {
- return UnsafeCastObjectToStoreFn(o);
+ UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
+ return %RawObjectCast<StoreFn>(o);
}
-
- extern macro UnsafeCastObjectToCanUseSameAccessorFn(Object):
- CanUseSameAccessorFn;
- UnsafeCast<CanUseSameAccessorFn>(o: Object): CanUseSameAccessorFn {
- return UnsafeCastObjectToCanUseSameAccessorFn(o);
+ UnsafeCast<CanUseSameAccessorFn>(implicit context: Context)(o: Object):
+ CanUseSameAccessorFn {
+ return %RawObjectCast<CanUseSameAccessorFn>(o);
}
builtin SortCompareDefault(
@@ -306,7 +300,7 @@ module array {
return 0;
}
- builtin SortCompareUserFn(
+ transitioning builtin SortCompareUserFn(
context: Context, comparefn: Object, x: Object, y: Object): Number {
assert(comparefn != Undefined);
const cmpfn: Callable = UnsafeCast<Callable>(comparefn);
@@ -325,14 +319,12 @@ module array {
builtin CanUseSameAccessor<ElementsAccessor: type>(
context: Context, receiver: JSReceiver, initialReceiverMap: Object,
initialReceiverLength: Number): Boolean {
- assert(IsJSArray(receiver));
-
- let a: JSArray = UnsafeCast<JSArray>(receiver);
+ const a: JSArray = UnsafeCast<JSArray>(receiver);
if (a.map != initialReceiverMap) return False;
assert(TaggedIsSmi(initialReceiverLength));
let originalLength: Smi = UnsafeCast<Smi>(initialReceiverLength);
- if (a.length_fast != originalLength) return False;
+ if (UnsafeCast<Smi>(a.length) != originalLength) return False;
return True;
}
@@ -362,8 +354,7 @@ module array {
const receiver: JSReceiver = GetReceiver(sortState);
const initialReceiverMap: Object = sortState[kInitialReceiverMapIdx];
- const initialReceiverLength: Number =
- UnsafeCast<Number>(sortState[kInitialReceiverLengthIdx]);
+ const initialReceiverLength: Number = GetInitialReceiverLength(sortState);
const canUseSameAccessorFn: CanUseSameAccessorFn =
GetCanUseSameAccessorFn(sortState);
@@ -378,7 +369,8 @@ module array {
// might have occurred. This means we cannot leave any pointer to the elements
// backing store on the stack (since it would point to the filler object).
// TODO(v8:7995): Remove reloading once left-trimming is removed.
- macro ReloadElements(sortState: FixedArray): HeapObject {
+ macro ReloadElements(implicit context: Context)(sortState: FixedArray):
+ HeapObject {
const receiver: JSReceiver = GetReceiver(sortState);
if (sortState[kAccessorIdx] == kGenericElementsAccessorId) return receiver;
@@ -386,31 +378,40 @@ module array {
return object.elements;
}
- macro GetLoadFn(sortState: FixedArray): LoadFn {
+ macro GetInitialReceiverLength(implicit context:
+ Context)(sortState: FixedArray): Number {
+ return UnsafeCast<Number>(sortState[kInitialReceiverLengthIdx]);
+ }
+
+ macro GetLoadFn(implicit context: Context)(sortState: FixedArray): LoadFn {
return UnsafeCast<LoadFn>(sortState[kLoadFnIdx]);
}
- macro GetStoreFn(sortState: FixedArray): StoreFn {
+ macro GetStoreFn(implicit context: Context)(sortState: FixedArray): StoreFn {
return UnsafeCast<StoreFn>(sortState[kStoreFnIdx]);
}
- macro GetCanUseSameAccessorFn(sortState: FixedArray): CanUseSameAccessorFn {
+ macro GetCanUseSameAccessorFn(implicit context: Context)(
+ sortState: FixedArray): CanUseSameAccessorFn {
return UnsafeCast<CanUseSameAccessorFn>(
sortState[kCanUseSameAccessorFnIdx]);
}
- macro GetReceiver(sortState: FixedArray): JSReceiver {
+ macro GetReceiver(implicit context: Context)(sortState: FixedArray):
+ JSReceiver {
return UnsafeCast<JSReceiver>(sortState[kReceiverIdx]);
}
// Returns the temporary array without changing its size.
- macro GetTempArray(sortState: FixedArray): FixedArray {
+ macro GetTempArray(implicit context: Context)(sortState: FixedArray):
+ FixedArray {
return UnsafeCast<FixedArray>(sortState[kTempArrayIdx]);
}
// Re-loading the stack-size is done in a few places. The small macro allows
// for easier invariant checks at all use sites.
- macro GetPendingRunsSize(sortState: FixedArray): Smi {
+ macro GetPendingRunsSize(implicit context: Context)(sortState: FixedArray):
+ Smi {
assert(TaggedIsSmi(sortState[kPendingRunsSizeIdx]));
const stackSize: Smi = UnsafeCast<Smi>(sortState[kPendingRunsSizeIdx]);
@@ -422,7 +423,8 @@ module array {
sortState[kPendingRunsSizeIdx] = value;
}
- macro GetPendingRunBase(pendingRuns: FixedArray, run: Smi): Smi {
+ macro GetPendingRunBase(implicit context:
+ Context)(pendingRuns: FixedArray, run: Smi): Smi {
return UnsafeCast<Smi>(pendingRuns[run << 1]);
}
@@ -430,7 +432,8 @@ module array {
pendingRuns[run << 1] = value;
}
- macro GetPendingRunLength(pendingRuns: FixedArray, run: Smi): Smi {
+ macro GetPendingRunLength(implicit context: Context)(
+ pendingRuns: FixedArray, run: Smi): Smi {
return UnsafeCast<Smi>(pendingRuns[(run << 1) + 1]);
}
@@ -438,7 +441,8 @@ module array {
pendingRuns[(run << 1) + 1] = value;
}
- macro PushRun(sortState: FixedArray, base: Smi, length: Smi) {
+ macro PushRun(implicit context:
+ Context)(sortState: FixedArray, base: Smi, length: Smi) {
assert(GetPendingRunsSize(sortState) < kMaxMergePending);
const stackSize: Smi = GetPendingRunsSize(sortState);
@@ -453,7 +457,8 @@ module array {
// Returns the temporary array and makes sure that it is big enough.
// TODO(szuend): Implement a better re-size strategy.
- macro GetTempArray(sortState: FixedArray, requestedSize: Smi): FixedArray {
+ macro GetTempArray(implicit context: Context)(
+ sortState: FixedArray, requestedSize: Smi): FixedArray {
const minSize: Smi = SmiMax(kSortStateTempSize, requestedSize);
const currentSize: Smi = UnsafeCast<Smi>(sortState[kTempArraySizeIdx]);
@@ -470,7 +475,8 @@ module array {
}
// This macro jumps to the Bailout label iff kBailoutStatus is kFailure.
- macro EnsureSuccess(sortState: FixedArray) labels Bailout {
+ macro EnsureSuccess(implicit context:
+ Context)(sortState: FixedArray) labels Bailout {
const status: Smi = UnsafeCast<Smi>(sortState[kBailoutStatusIdx]);
if (status == kFailure) goto Bailout;
}
@@ -501,19 +507,19 @@ module array {
EnsureSuccess(sortState) otherwise Bailout;
}
- macro CallCopyFromTempArray(
+ transitioning macro CallCopyFromTempArray(
context: Context, sortState: FixedArray, dstElements: HeapObject,
dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi)
- labels Bailout {
+ labels Bailout {
CopyFromTempArray(
context, sortState, dstElements, dstPos, tempArray, srcPos, length);
EnsureSuccess(sortState) otherwise Bailout;
}
- macro CallCopyWithinSortArray(
+ transitioning macro CallCopyWithinSortArray(
context: Context, sortState: FixedArray, elements: HeapObject,
srcPos: Smi, dstPos: Smi, length: Smi)
- labels Bailout {
+ labels Bailout {
CopyWithinSortArray(context, sortState, elements, srcPos, dstPos, length);
EnsureSuccess(sortState) otherwise Bailout;
}
@@ -538,32 +544,21 @@ module array {
return result;
}
- macro CallMergeAt(context: Context, sortState: FixedArray, i: Smi)
- labels Bailout {
+ transitioning macro
+ CallMergeAt(context: Context, sortState: FixedArray, i: Smi)
+ labels Bailout {
MergeAt(context, sortState, i);
EnsureSuccess(sortState) otherwise Bailout;
}
- // Used for OOB asserts in Copy* builtins.
- macro GetReceiverLengthProperty(
- context: Context, sortState: FixedArray): Smi {
- const receiver: JSReceiver = GetReceiver(sortState);
-
- if (IsJSArray(receiver)) return UnsafeCast<JSArray>(receiver).length_fast;
-
- const len: Number =
- ToLength_Inline(context, GetProperty(context, receiver, kLengthString));
- return UnsafeCast<Smi>(len);
- }
-
- macro CopyToTempArray(
+ transitioning macro CopyToTempArray(
context: Context, sortState: FixedArray, load: LoadFn,
srcElements: HeapObject, srcPos: Smi, tempArray: FixedArray, dstPos: Smi,
length: Smi)
- labels Bailout {
+ labels Bailout {
assert(srcPos >= 0);
assert(dstPos >= 0);
- assert(srcPos <= GetReceiverLengthProperty(context, sortState) - length);
+ assert(srcPos <= GetInitialReceiverLength(sortState) - length);
assert(dstPos <= tempArray.length - length);
let srcIdx: Smi = srcPos;
@@ -578,13 +573,13 @@ module array {
}
}
- builtin CopyFromTempArray(
+ transitioning builtin CopyFromTempArray(
context: Context, sortState: FixedArray, dstElements: HeapObject,
dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi): Smi {
assert(srcPos >= 0);
assert(dstPos >= 0);
assert(srcPos <= tempArray.length - length);
- assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
+ assert(dstPos <= GetInitialReceiverLength(sortState) - length);
let store: StoreFn = GetStoreFn(sortState);
@@ -605,13 +600,13 @@ module array {
}
}
- builtin CopyWithinSortArray(
+ transitioning builtin CopyWithinSortArray(
context: Context, sortState: FixedArray, elements: HeapObject,
srcPos: Smi, dstPos: Smi, length: Smi): Smi {
assert(srcPos >= 0);
assert(dstPos >= 0);
- assert(srcPos <= GetReceiverLengthProperty(context, sortState) - length);
- assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
+ assert(srcPos <= GetInitialReceiverLength(sortState) - length);
+ assert(dstPos <= GetInitialReceiverLength(sortState) - length);
try {
let load: LoadFn = GetLoadFn(sortState);
@@ -681,7 +676,7 @@ module array {
// Find pivot insertion point.
while (left < right) {
- const mid: Smi = left + ((right - left) >>> 1);
+ const mid: Smi = left + ((right - left) >> 1);
const midElement: Object =
CallLoad(context, sortState, load, elements, mid)
otherwise Bailout;
@@ -795,7 +790,7 @@ module array {
macro ReverseRange(
context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
elements: HeapObject, from: Smi, to: Smi)
- labels Bailout {
+ labels Bailout {
let low: Smi = from;
let high: Smi = to - 1;
@@ -813,7 +808,8 @@ module array {
// Merges the two runs at stack indices i and i + 1.
// Returns kFailure if we need to bailout, kSuccess otherwise.
- builtin MergeAt(context: Context, sortState: FixedArray, i: Smi): Smi {
+ transitioning builtin
+ MergeAt(context: Context, sortState: FixedArray, i: Smi): Smi {
const stackSize: Smi = GetPendingRunsSize(sortState);
// We are only allowed to either merge the two top-most runs, or leave
@@ -891,7 +887,7 @@ module array {
}
}
- macro LoadElementsOrTempArray(
+ macro LoadElementsOrTempArray(implicit context: Context)(
useTempArray: Boolean, sortState: FixedArray): HeapObject {
return useTempArray == True ? GetTempArray(sortState) :
ReloadElements(sortState);
@@ -1003,7 +999,7 @@ module array {
// a[base + lastOfs - 1] < key <= a[base + offset].
lastOfs++;
while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >>> 1);
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
const baseMElement: Object = CallLoad(
context, sortState, load,
@@ -1124,7 +1120,7 @@ module array {
// a[base + lastOfs - 1] < key <= a[base + ofs].
lastOfs++;
while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >>> 1);
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
const baseMElement: Object = CallLoad(
context, sortState, load,
@@ -1153,7 +1149,7 @@ module array {
macro CopyElement(
context: Context, sortState: FixedArray, load: LoadFn, store: StoreFn,
elements: HeapObject, from: Smi, to: Smi)
- labels Bailout {
+ labels Bailout {
const element: Object = CallLoad(context, sortState, load, elements, from)
otherwise Bailout;
CallStore(context, sortState, store, elements, to, element)
@@ -1166,10 +1162,10 @@ module array {
// array[baseB] < array[baseA],
// that array[baseA + lengthA - 1] belongs at the end of the merge,
// and should have lengthA <= lengthB.
- macro MergeLow(
+ transitioning macro MergeLow(
context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
baseB: Smi, lengthBArg: Smi)
- labels Bailout {
+ labels Bailout {
assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
assert(baseA + lengthAArg == baseB);
@@ -1341,10 +1337,10 @@ module array {
// starting at baseB in a stable way, in-place. lengthA and lengthB must
// be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
// end of the merge and should have lengthA >= lengthB.
- macro MergeHigh(
+ transitioning macro MergeHigh(
context: Context, sortState: FixedArray, baseA: Smi, lengthAArg: Smi,
baseB: Smi, lengthBArg: Smi)
- labels Bailout {
+ labels Bailout {
assert(0 < lengthAArg && 0 < lengthBArg);
assert(0 <= baseA && 0 < baseB);
assert(baseA + lengthAArg == baseB);
@@ -1535,7 +1531,7 @@ module array {
assert(n >= 0);
while (n >= 64) {
r = r | (n & 1);
- n = n >>> 1;
+ n = n >> 1;
}
const minRunLength: Smi = n + r;
@@ -1544,7 +1540,8 @@ module array {
}
// Returns true iff run_length(n - 2) > run_length(n - 1) + run_length(n).
- macro RunInvariantEstablished(pendingRuns: FixedArray, n: Smi): bool {
+ macro RunInvariantEstablished(implicit context: Context)(
+ pendingRuns: FixedArray, n: Smi): bool {
if (n < 2) return true;
const runLengthN: Smi = GetPendingRunLength(pendingRuns, n);
@@ -1563,8 +1560,8 @@ module array {
// TODO(szuend): Remove unnecessary loads. This macro was refactored to
// improve readability, introducing unnecessary loads in the
// process. Determine if all these extra loads are ok.
- macro MergeCollapse(context: Context, sortState: FixedArray)
- labels Bailout {
+ transitioning macro MergeCollapse(context: Context, sortState: FixedArray)
+ labels Bailout {
const pendingRuns: FixedArray =
UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
@@ -1592,8 +1589,9 @@ module array {
// Regardless of invariants, merge all runs on the stack until only one
// remains. This is used at the end of the mergesort.
- macro MergeForceCollapse(context: Context, sortState: FixedArray)
- labels Bailout {
+ transitioning macro
+ MergeForceCollapse(context: Context, sortState: FixedArray)
+ labels Bailout {
let pendingRuns: FixedArray =
UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]);
@@ -1635,8 +1633,9 @@ module array {
CanUseSameAccessor<GenericElementsAccessor>;
}
- macro ArrayTimSortImpl(context: Context, sortState: FixedArray, length: Smi)
- labels Bailout {
+ transitioning macro
+ ArrayTimSortImpl(context: Context, sortState: FixedArray, length: Smi)
+ labels Bailout {
InitializeSortState(sortState);
if (length < 2) return;
@@ -1678,8 +1677,8 @@ module array {
UnsafeCast<FixedArray>(sortState[kPendingRunsIdx]), 0) == length);
}
- builtin ArrayTimSort(
- context: Context, sortState: FixedArray, length: Smi): Object {
+ transitioning builtin
+ ArrayTimSort(context: Context, sortState: FixedArray, length: Smi): Object {
try {
ArrayTimSortImpl(context, sortState, length)
otherwise Slow;
@@ -1712,8 +1711,8 @@ module array {
extern runtime PrepareElementsForSort(Context, Object, Number): Smi;
// https://tc39.github.io/ecma262/#sec-array.prototype.sort
- javascript builtin ArrayPrototypeSort(
- context: Context, receiver: Object, ...arguments): Object {
+ transitioning javascript builtin
+ ArrayPrototypeSort(context: Context, receiver: Object, ...arguments): Object {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
const comparefnObj: Object = arguments[0];
@@ -1733,8 +1732,8 @@ module array {
sortState[kBailoutStatusIdx] = kSuccess;
// 3. Let len be ? ToLength(? Get(obj, "length")).
- const len: Number =
- ToLength_Inline(context, GetProperty(context, obj, 'length'));
+ const len: Number = GetLengthProperty(obj);
+
if (len < 2) return receiver;
// TODO(szuend): Investigate performance tradeoff of skipping this step
@@ -1747,10 +1746,9 @@ module array {
sortState[kInitialReceiverLengthIdx] = len;
try {
- const a: JSArray = Cast<JSArray>(obj) otherwise Slow;
- const elementsKind: ElementsKind = map.elements_kind;
- if (!IsFastElementsKind(elementsKind)) goto Slow;
+ let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const elementsKind: ElementsKind = map.elements_kind;
if (IsDoubleElementsKind(elementsKind)) {
InitializeSortStateAccessor<FastDoubleElements>(sortState);
} else if (elementsKind == PACKED_SMI_ELEMENTS) {
@@ -1773,4 +1771,3 @@ module array {
return receiver;
}
}
-
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 3ae98078f1..7b019ad0b2 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -50,6 +50,7 @@ group("v8_testrunner") {
data = [
# Also add the num-fuzzer wrapper script in order to be able to run the
# num-fuzzer on all existing isolated V8 test suites.
+ "predictable_wrapper.py",
"run-num-fuzzer.py",
"run-tests.py",
"testrunner/",
diff --git a/deps/v8/tools/unittests/PRESUBMIT.py b/deps/v8/tools/PRESUBMIT.py
index d428813e13..f719c75eed 100644
--- a/deps/v8/tools/unittests/PRESUBMIT.py
+++ b/deps/v8/tools/PRESUBMIT.py
@@ -3,7 +3,6 @@
# found in the LICENSE file.
def CheckChangeOnCommit(input_api, output_api):
- # TODO(machenbach): Run all unittests.
tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api, output_api, '.', whitelist=['run_tests_test.py$'])
+ input_api, output_api, 'unittests')
return input_api.RunTests(tests)
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
index e6cc3d274f..e69de29bb2 100644
--- a/deps/v8/tools/blink_tests/TestExpectations
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -1,7 +0,0 @@
-[ Linux ] virtual/pointerevent/fast/events/mouse-cursor-style-change-iframe.html [ Skip ]
-
-# Turn off Slimming Paint tests on linux.
-[ Linux ] virtual/slimmingpaint/ [ Skip ]
-
-# Several failures since https://crrev.com/c/1196547
-crbug.com/879604 external/wpt/cookies/samesite/ [ Skip ]
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index 2afd0602d8..1ceca83db0 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
<html>
<!--
Copyright 2016 the V8 project authors. All rights reserved. Use of this source
@@ -5,7 +6,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
-->
<head>
- <meta charset="UTF-8">
+ <meta charset="utf-8">
+ <title>V8 Runtime Stats Komparator</title>
<style>
body {
font-family: arial;
@@ -228,8 +230,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
display: none;
}
</style>
- <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
- <script type="text/javascript">
+ <script src="https://www.gstatic.com/charts/loader.js"></script>
+ <script>
"use strict"
google.charts.load('current', {packages: ['corechart']});
@@ -957,7 +959,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
}
</script>
- <script type="text/javascript">
+ <script>
"use strict"
// =========================================================================
// Helpers
@@ -1058,7 +1060,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
</script>
- <script type="text/javascript">
+ <script>
"use strict"
// =========================================================================
// EventHandlers
@@ -1305,7 +1307,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
window.open(url,'_blank');
}
</script>
- <script type="text/javascript">
+ <script>
"use strict"
// =========================================================================
class Versions {
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 91f8637acd..709aade30f 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -140,6 +140,8 @@ def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
"--no-first-run",
"--user-data-dir={}{}{}".format(arg_delimiter, user_data_dir,
arg_delimiter),
+ "--data-path={}{}{}".format(arg_delimiter,
+ os.path.join(user_data_dir, 'content-shell-data'), arg_delimiter),
]
def get_chrome_replay_flags(args, arg_delimiter=""):
diff --git a/deps/v8/tools/check-unused-symbols.sh b/deps/v8/tools/check-unused-symbols.sh
new file mode 100755
index 0000000000..03489389b5
--- /dev/null
+++ b/deps/v8/tools/check-unused-symbols.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+symbols=$(
+ grep \
+ --only-matching \
+ --perl-regexp 'V\(_, \K([^,\)]*)' \
+ -- "$v8_root/src/heap-symbols.h")
+
+# Find symbols which appear exactly once (in heap-symbols.h)
+grep \
+ --only-matching \
+ --no-filename \
+ --recursive \
+ --fixed-strings "$symbols" \
+ -- "$v8_root/src" "$v8_root/test/cctest" \
+| sort \
+| uniq -u \
+| sed -e 's/.*/Heap symbol "&" seems to be unused./'
+
+echo "Kthxbye."
diff --git a/deps/v8/tools/clusterfuzz/v8_mock_archs.js b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
index 507f31a3a2..acbaef821f 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock_archs.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
@@ -15,15 +15,10 @@
var mock = function(arrayType) {
var handler = {
construct: function(target, args) {
- var arrayLength = args[0]
- if (args.length > 0 &&
- Number.isInteger(args[0]) &&
- args[0] > 1048576) {
- args[0] = 1048576
- } else if (args.length > 2 &&
- Number.isInteger(args[2]) &&
- args[2] > 1048576) {
- args[2] = 1048576
+ for (let i = 0; i < args.length; i++) {
+ if (typeof args[i] != "object") {
+ args[i] = Math.min(1048576, args[i]);
+ }
}
return new (
Function.prototype.bind.apply(arrayType, [null].concat(args)));
@@ -40,6 +35,8 @@
Uint16Array = mock(Uint16Array);
Int32Array = mock(Int32Array);
Uint32Array = mock(Uint32Array);
+ BigInt64Array = mock(BigInt64Array);
+ BigUint64Array = mock(BigUint64Array);
Float32Array = mock(Float32Array);
Float64Array = mock(Float64Array);
})();
@@ -54,6 +51,8 @@
Uint16Array,
Int32Array,
Uint32Array,
+ BigInt64Array,
+ BigUint64Array,
Float32Array,
Float64Array,
];
diff --git a/deps/v8/tools/deprecation_stats.py b/deps/v8/tools/deprecation_stats.py
new file mode 100755
index 0000000000..780832e681
--- /dev/null
+++ b/deps/v8/tools/deprecation_stats.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+from datetime import datetime
+import re
+import subprocess
+import sys
+
+RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
+RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
+RE_FILENAME = re.compile(r"^filename (.+)$")
+
+def GetBlame(file_path):
+ result = subprocess.check_output(
+ ['git', 'blame', '-t', '--line-porcelain', file_path])
+ line_iter = iter(result.splitlines())
+ blame_list = list()
+ current_blame = None
+ while True:
+ line = next(line_iter, None)
+ if line is None:
+ break
+ if RE_GITHASH.match(line):
+ if current_blame is not None:
+ blame_list.append(current_blame)
+ current_blame = {'time': 0, 'filename': None, 'content': None}
+ continue
+ match = RE_AUTHOR_TIME.match(line)
+ if match:
+ current_blame['time'] = datetime.fromtimestamp(int(match.groups()[0]))
+ continue
+ match = RE_FILENAME.match(line)
+ if match:
+ current_blame['filename'] = match.groups()[0]
+ current_blame['content'] = next(line_iter).strip()
+ continue
+ blame_list.append(current_blame)
+ return blame_list
+
+RE_MACRO_END = re.compile(r"\);");
+RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
+
+def FilterAndPrint(blame_list, macro, before):
+ index = 0
+ re_macro = re.compile(macro)
+ deprecated = list()
+ while index < len(blame_list):
+ blame = blame_list[index]
+ match = re_macro.search(blame['content'])
+ if match and blame['time'] < before:
+ line = blame['content']
+ time = blame['time']
+ pos = match.end()
+ start = -1
+ parens = 0
+ quotes = 0
+ while True:
+ if pos >= len(line):
+ # extend to next line
+ index = index + 1
+ blame = blame_list[index]
+ if line.endswith(','):
+ # add whitespace when breaking line due to comma
+ line = line + ' '
+ line = line + blame['content']
+ if line[pos] == '(':
+ parens = parens + 1
+ elif line[pos] == ')':
+ parens = parens - 1
+ if parens == 0:
+ break
+ elif line[pos] == '"':
+ quotes = quotes + 1
+ elif line[pos] == ',' and quotes % 2 == 0 and start == -1:
+ start = pos + 1
+ pos = pos + 1
+ deprecated.append([index + 1, time, line[start:pos].strip()])
+ index = index + 1
+ print("Marked as " + macro + ": " + str(len(deprecated)))
+ for linenumber, time, content in deprecated:
+ print(str(linenumber).rjust(8) + " : " + str(time) + " : " + content)
+ return len(deprecated)
+
+def ParseOptions(args):
+ parser = argparse.ArgumentParser(description="Collect deprecation statistics")
+ parser.add_argument("file_path", help="Path to v8.h")
+ parser.add_argument("--before", help="Filter by date")
+ options = parser.parse_args(args)
+ if options.before:
+ options.before = datetime.strptime(options.before, '%Y-%m-%d')
+ else:
+ options.before = datetime.now()
+ return options
+
+def Main(args):
+ options = ParseOptions(args)
+ blame_list = GetBlame(options.file_path)
+ FilterAndPrint(blame_list, "V8_DEPRECATE_SOON", options.before)
+ FilterAndPrint(blame_list, "V8_DEPRECATED", options.before)
+
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 178ffdb964..e7a4a239e0 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -10,10 +10,11 @@ Uses Goma by default if it is detected (at output directory setup time).
Expects to be run from the root of a V8 checkout.
Usage:
- gm.py [<arch>].[<mode>].[<target>] [testname...]
+ gm.py [<arch>].[<mode>[-<suffix>]].[<target>] [testname...]
All arguments are optional. Most combinations should work, e.g.:
- gm.py ia32.debug x64.release d8
+ gm.py ia32.debug x64.release x64.release-my-custom-opts d8
+ gm.py android_arm.release.check
gm.py x64 mjsunit/foo cctest/test-bar/*
"""
# See HELP below for additional documentation.
@@ -21,17 +22,20 @@ All arguments are optional. Most combinations should work, e.g.:
from __future__ import print_function
import errno
import os
-import pty
import re
import subprocess
import sys
+USE_PTY = "linux" in sys.platform
+if USE_PTY:
+ import pty
+
BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "s390", "s390x"]
+ "s390", "s390x", "android_arm", "android_arm64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
@@ -202,6 +206,16 @@ def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
return os.path.join(OUTDIR, subdir)
+def PrepareMksnapshotCmdline(orig_cmdline, path):
+ result = "gdb --args %s/mksnapshot " % path
+ for w in orig_cmdline.split(" "):
+ if w.startswith("gen/") or w.startswith("snapshot_blob"):
+ result += ("%(path)s%(sep)s%(arg)s " %
+ {"path": path, "sep": os.sep, "arg": w})
+ else:
+ result += "%s " % w
+ return result
+
class Config(object):
def __init__(self, arch, mode, targets, tests=[]):
self.arch = arch
@@ -214,51 +228,64 @@ class Config(object):
self.tests.update(tests)
def GetTargetCpu(self):
+ if self.arch == "android_arm": return "target_cpu = \"arm\""
+ if self.arch == "android_arm64": return "target_cpu = \"arm64\""
cpu = "x86"
if "64" in self.arch or self.arch == "s390x":
cpu = "x64"
return "target_cpu = \"%s\"" % cpu
def GetV8TargetCpu(self):
+ if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
+ if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x"):
return "\nv8_target_cpu = \"%s\"" % self.arch
return ""
+ def GetTargetOS(self):
+ if self.arch in ("android_arm", "android_arm64"):
+ return "\ntarget_os = \"android\""
+ return ""
+
def GetGnArgs(self):
- template = ARGS_TEMPLATES[self.mode]
- arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
+ # Use only substring before first '-' as the actual mode
+ mode = re.match("([^-]+)", self.mode).group(1)
+ template = ARGS_TEMPLATES[mode]
+ arch_specific = (self.GetTargetCpu() + self.GetV8TargetCpu() +
+ self.GetTargetOS())
return template % arch_specific
def Build(self):
path = GetPath(self.arch, self.mode)
args_gn = os.path.join(path, "args.gn")
+ build_ninja = os.path.join(path, "build.ninja")
if not os.path.exists(path):
print("# mkdir -p %s" % path)
os.makedirs(path)
if not os.path.exists(args_gn):
_Write(args_gn, self.GetGnArgs())
+ if not os.path.exists(build_ninja):
code = _Call("gn gen %s" % path)
if code != 0: return code
targets = " ".join(self.targets)
# The implementation of mksnapshot failure detection relies on
# the "pty" module and GDB presence, so skip it on non-Linux.
- if "linux" not in sys.platform:
+ if not USE_PTY:
return _Call("autoninja -C %s %s" % (path, targets))
return_code, output = _CallWithOutput("autoninja -C %s %s" %
(path, targets))
- if return_code != 0 and "FAILED: snapshot_blob.bin" in output:
+ if return_code != 0 and "FAILED:" in output and "snapshot_blob" in output:
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
extra_opt = match.group(1) if match else ""
+ cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
+ match = cmdline.search(output)
+ cmdline = PrepareMksnapshotCmdline(match.group(1), path) + extra_opt
_Notify("V8 build requires your attention",
"Detected mksnapshot failure, re-running in GDB...")
- _Call("gdb -args %(path)s/mksnapshot "
- "--startup_src %(path)s/gen/snapshot.cc "
- "--random-seed 314159265 "
- "--startup-blob %(path)s/snapshot_blob.bin"
- "%(extra)s"% {"path": path, "extra": extra_opt})
+ _Call(cmdline)
return return_code
def RunTests(self):
@@ -267,8 +294,8 @@ class Config(object):
tests = ""
else:
tests = " ".join(self.tests)
- return _Call("tools/run-tests.py --arch=%s --mode=%s %s" %
- (self.arch, self.mode, tests))
+ return _Call("tools/run-tests.py --outdir=%s %s" %
+ (GetPath(self.arch, self.mode), tests))
def GetTestBinary(argstring):
for suite in TESTSUITES_TARGETS:
@@ -336,6 +363,8 @@ class ArgumentParser(object):
targets.append(word)
elif word in ACTIONS:
actions.append(word)
+ elif any(map(lambda x: word.startswith(x + "-"), MODES)):
+ modes.append(word)
else:
print("Didn't understand: %s" % word)
sys.exit(1)
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index cea0f07b1e..5e98d92d6f 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# Print HeapObjects.
+# Print tagged object.
define job
call _v8_internal_Print_Object((void*)($arg0))
end
@@ -11,7 +11,16 @@ Print a v8 JavaScript object
Usage: job tagged_ptr
end
-# Print v8::Local handle value.
+# Print content of v8::internal::Handle.
+define jh
+call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).location_))
+end
+document jh
+Print content of a v8::internal::Handle
+Usage: jh internal_handle
+end
+
+# Print content of v8::Local handle.
define jlh
call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
end
@@ -128,17 +137,29 @@ set disable-randomization off
# immediately at the line of code that triggered the DCHECK.
python
def dcheck_stop_handler(event):
- orig_frame = gdb.selected_frame()
- frame = orig_frame
+ frame = gdb.selected_frame()
select_frame = None
- while frame is not None:
- if frame.name() in ('V8_Dcheck', 'V8_Fatal'):
+ message = None
+ count = 0
+ # limit stack scanning since they're usually shallow and otherwise stack
+ # overflows can be very slow.
+ while frame is not None and count < 5:
+ count += 1
+ if frame.name() == 'V8_Dcheck':
+ frame_message = gdb.lookup_symbol('message', frame.block())[0]
+ if frame_message:
+ message = frame_message.value(frame).string()
+ select_frame = frame.older()
+ break
+ if frame.name() is not None and frame.name().startswith('V8_Fatal'):
select_frame = frame.older()
frame = frame.older()
if select_frame is not None:
select_frame.select()
gdb.execute('frame')
+ if message:
+ print('DCHECK error: {}'.format(message))
gdb.events.stop.connect(dcheck_stop_handler)
end
diff --git a/deps/v8/tools/gen-keywords-gen-h.py b/deps/v8/tools/gen-keywords-gen-h.py
new file mode 100755
index 0000000000..02750dc109
--- /dev/null
+++ b/deps/v8/tools/gen-keywords-gen-h.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import subprocess
+import re
+import math
+
+INPUT_PATH = "src/parsing/keywords.txt"
+OUTPUT_PATH = "src/parsing/keywords-gen.h"
+
+# TODO(leszeks): Trimming seems to regress performance, investigate.
+TRIM_CHAR_TABLE = False
+
+
+def next_power_of_2(x):
+ return 1 if x == 0 else 2**int(math.ceil(math.log(x, 2)))
+
+
+def call_with_input(cmd, input_string=""):
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ stdout, _ = p.communicate(input_string)
+ retcode = p.wait()
+ if retcode != 0:
+ raise subprocess.CalledProcessError(retcode, cmd)
+ return stdout
+
+
+def checked_sub(pattern, sub, out, count=1, flags=0):
+ out, n = re.subn(pattern, sub, out, flags=flags)
+ if n != count:
+ raise Exception("Didn't get exactly %d replacement(s) for pattern: %s" %
+ (count, pattern))
+ return out
+
+
+def change_sizet_to_int(out):
+ # Literal buffer lengths are given as ints, not size_t
+ return checked_sub(r'\bsize_t\b', 'int', out, count=4)
+
+
+def drop_line_directives(out):
+ # #line causes gcov issue, so drop it
+ return re.sub(r'^#\s*line .*$\n', '', out, flags=re.MULTILINE)
+
+
+def trim_and_dcheck_char_table(out):
+ # Potential keyword strings are known to be lowercase ascii, so chop off the
+ # rest of the table and mask out the char
+
+ reads_re = re.compile(
+ r'asso_values\[static_cast<unsigned char>\(str\[(\d+)\]\)\]')
+
+ dchecks = []
+ for str_read in reads_re.finditer(out):
+ dchecks.append("DCHECK_LT(str[%d], 128);" % int(str_read.group(1)))
+
+ if TRIM_CHAR_TABLE:
+ out = checked_sub(
+ r'static const unsigned char asso_values\[\]\s*=\s*\{(\s*\d+\s*,){96}',
+ "".join(dchecks) + r'static const unsigned char asso_values[32] = {',
+ out,
+ flags=re.MULTILINE)
+ out = checked_sub(
+ reads_re.pattern,
+ r'asso_values[static_cast<unsigned char>(str[(\1)]&31)]',
+ out,
+ count=len(dchecks),
+ flags=re.MULTILINE)
+ else:
+ out = checked_sub(
+ r'static const unsigned char asso_values\[\]\s*=\s*\{',
+ "".join(dchecks) + r'static const unsigned char asso_values[128] = {',
+ out,
+ flags=re.MULTILINE)
+
+ return out
+
+
+def use_isinrange(out):
+ # Our IsInRange method is more efficient than checking for min/max length
+ return checked_sub(r'if \(len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH\)',
+ r'if (IsInRange(len, MIN_WORD_LENGTH, MAX_WORD_LENGTH))',
+ out)
+
+
+def pad_tables(out):
+ # We don't want to compare against the max hash value, so pad the tables up
+ # to a power of two and mask the hash.
+
+ # First get the new size
+ max_hash_value = int(re.search(r'MAX_HASH_VALUE\s*=\s*(\d+)', out).group(1))
+ old_table_length = max_hash_value + 1
+ new_table_length = next_power_of_2(old_table_length)
+ table_padding_len = new_table_length - old_table_length
+
+ # Pad the length table.
+ single_lengthtable_entry = r'\d+'
+ out = checked_sub(
+ r"""
+ static\ const\ unsigned\ char\ kPerfectKeywordLengthTable\[\]\s*=\s*\{
+ (
+ \s*%(single_lengthtable_entry)s\s*
+ (?:,\s*%(single_lengthtable_entry)s\s*)*
+ )
+ \}
+ """ % {'single_lengthtable_entry': single_lengthtable_entry},
+ r'static const unsigned char kPerfectKeywordLengthTable[%d] = { \1 %s }'
+ % (new_table_length, "".join([',0'] * table_padding_len)),
+ out,
+ flags=re.MULTILINE | re.VERBOSE)
+
+ # Pad the word list.
+ single_wordlist_entry = r"""
+ (?:\#line\ \d+\ ".*"$\s*)?
+ \{\s*"[a-z]*"\s*,\s*Token::[A-Z_]+\}
+ """
+ out = checked_sub(
+ r"""
+ static\ const\ struct\ PerfectKeywordHashTableEntry\ kPerfectKeywordHashTable\[\]\s*=\s*\{
+ (
+ \s*%(single_wordlist_entry)s\s*
+ (?:,\s*%(single_wordlist_entry)s\s*)*
+ )
+ \}
+ """ % {'single_wordlist_entry': single_wordlist_entry},
+ r'static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[%d] = {\1 %s }'
+ % (new_table_length, "".join(
+ [',{"",Token::IDENTIFIER}'] * table_padding_len)),
+ out,
+ flags=re.MULTILINE | re.VERBOSE)
+
+ # Mask the hash and replace the range check with DCHECKs.
+ out = checked_sub(r'Hash\s*\(\s*str,\s*len\s*\)',
+ r'Hash(str, len)&0x%x' % (new_table_length - 1), out)
+ out = checked_sub(
+ r'if \(key <= MAX_HASH_VALUE\)',
+ r'DCHECK_LT(key, arraysize(kPerfectKeywordLengthTable));DCHECK_LT(key, arraysize(kPerfectKeywordHashTable));',
+ out)
+
+ return out
+
+
+def return_token(out):
+ # We want to return the actual token rather than the table entry.
+
+ # Change the return type of the function. Make it inline too.
+ out = checked_sub(
+ r'const\s*struct\s*PerfectKeywordHashTableEntry\s*\*\s*((?:PerfectKeywordHash::)?GetToken)',
+ r'inline Token::Value \1',
+ out,
+ count=2)
+
+ # Change the return value when the keyword is found
+ out = checked_sub(r'return &kPerfectKeywordHashTable\[key\];',
+ r'return kPerfectKeywordHashTable[key].value;', out)
+
+ # Change the return value when the keyword is not found
+ out = checked_sub(r'return 0;', r'return Token::IDENTIFIER;', out)
+
+ return out
+
+
+def memcmp_to_while(out):
+ # It's faster to loop over the keyword with a while loop than calling memcmp.
+ # Careful, this replacement is quite flaky, because otherwise the regex is
+ # unreadable.
+ return checked_sub(
+ re.escape("if (*str == *s && !memcmp (str + 1, s + 1, len - 1))") + r"\s*"
+ + re.escape("return kPerfectKeywordHashTable[key].value;"),
+ """
+ while(*s!=0) {
+ if (*s++ != *str++) return Token::IDENTIFIER;
+ }
+ return kPerfectKeywordHashTable[key].value;
+ """,
+ out,
+ flags=re.MULTILINE)
+
+
+def wrap_namespace(out):
+ return """// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is automatically generated by gen-keywords-gen-h.py and should not
+// be modified manually.
+
+#ifndef V8_PARSING_KEYWORDS_GEN_H_
+#define V8_PARSING_KEYWORDS_GEN_H_
+
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+%s
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_KEYWORDS_GEN_H_
+""" % (out)
+
+
+def trim_character_set_warning(out):
+ # gperf generates an error message that is too large, trim it
+
+ return out.replace(
+ '"gperf generated tables don\'t work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."',
+ '"gperf generated tables don\'t work with this execution character set."\\\n// If you see this error, please report a bug to <bug-gperf@gnu.org>.'
+ )
+
+
+def main():
+ try:
+ script_dir = os.path.dirname(sys.argv[0])
+ root_dir = os.path.join(script_dir, '..')
+
+ out = subprocess.check_output(["gperf", "-m100", INPUT_PATH], cwd=root_dir)
+
+ # And now some munging of the generated file.
+ out = change_sizet_to_int(out)
+ out = drop_line_directives(out)
+ out = trim_and_dcheck_char_table(out)
+ out = use_isinrange(out)
+ out = pad_tables(out)
+ out = return_token(out)
+ out = memcmp_to_while(out)
+ out = wrap_namespace(out)
+ out = trim_character_set_warning(out)
+
+ # Final formatting.
+ clang_format_path = os.path.join(root_dir,
+ 'third_party/depot_tools/clang-format')
+ out = call_with_input([clang_format_path], out)
+
+ with open(os.path.join(root_dir, OUTPUT_PATH), 'w') as f:
+ f.write(out)
+
+ return 0
+
+ except subprocess.CalledProcessError as e:
+ sys.stderr.write("Error calling '{}'\n".format(" ".join(e.cmd)))
+ return e.returncode
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index b98a92d266..af6e2f3cb4 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -94,8 +94,6 @@ consts_misc = [
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
- { 'name': 'prop_idx_first',
- 'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_kind_Data',
'value': 'kData' },
{ 'name': 'prop_kind_Accessor',
@@ -192,10 +190,10 @@ consts_misc = [
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
- { 'name': 'jsarray_buffer_was_neutered_mask',
- 'value': 'JSArrayBuffer::WasNeuteredBit::kMask' },
- { 'name': 'jsarray_buffer_was_neutered_shift',
- 'value': 'JSArrayBuffer::WasNeuteredBit::kShift' },
+ { 'name': 'jsarray_buffer_was_detached_mask',
+ 'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
+ { 'name': 'jsarray_buffer_was_detached_shift',
+ 'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
{ 'name': 'context_idx_scope_info',
'value': 'Context::SCOPE_INFO_INDEX' },
@@ -207,8 +205,8 @@ consts_misc = [
'value': 'Context::EXTENSION_INDEX' },
{ 'name': 'context_min_slots',
'value': 'Context::MIN_CONTEXT_SLOTS' },
- { 'name': 'context_idx_embedder_data',
- 'value': 'Internals::kContextEmbedderDataIndex' },
+ { 'name': 'native_context_embedder_data_offset',
+ 'value': 'Internals::kNativeContextEmbedderDataOffset' },
{ 'name': 'namedictionaryshape_prefix_size',
@@ -232,6 +230,9 @@ consts_misc = [
'value': 'SimpleNumberDictionaryShape::kEntrySize' },
{ 'name': 'type_JSError__JS_ERROR_TYPE', 'value': 'JS_ERROR_TYPE' },
+
+ { 'name': 'class_SharedFunctionInfo__function_data__Object',
+ 'value': 'SharedFunctionInfo::kFunctionDataOffset' },
];
#
@@ -244,6 +245,7 @@ consts_misc = [
#
extras_accessors = [
'JSFunction, context, Context, kContextOffset',
+ 'JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
@@ -288,7 +290,7 @@ extras_accessors = [
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
- 'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo'
+ 'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise'
];
@@ -311,6 +313,7 @@ header = '''
#include "src/frames-inl.h" /* for architecture-specific frame constants */
#include "src/contexts.h"
#include "src/objects.h"
+#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-string-iterator.h"
using namespace v8::internal;
@@ -517,7 +520,8 @@ def parse_field(call):
consts = [];
- if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
+ if (kind == 'ACCESSORS' or kind == 'ACCESSORS2' or
+ kind == 'ACCESSORS_GCSAFE'):
klass = args[0];
field = args[1];
dtype = args[2].replace('<', '_').replace('>', '_')
@@ -560,7 +564,7 @@ def load_fields_from_file(filename):
# may span multiple lines and may contain nested parentheses. We also
# call parse_field() to pick apart the invocation.
#
- prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
+ prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
current = '';
opens = 0;
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 7ff52dd740..511d03c7ba 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -28,26 +28,14 @@ OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
AUTO_EXCLUDE = [
# flag-definitions.h needs a mode set for being included.
'src/flag-definitions.h',
- # blacklist of headers we need to fix (https://crbug.com/v8/7965).
- 'src/allocation-site-scopes.h',
- 'src/compiler/allocation-builder.h',
- 'src/compiler/js-context-specialization.h',
- 'src/compiler/raw-machine-assembler.h',
- 'src/dateparser-inl.h',
- 'src/ic/ic.h',
- 'src/lookup.h',
- 'src/parsing/parser.h',
- 'src/parsing/preparser.h',
- 'src/regexp/jsregexp.h',
- 'src/snapshot/object-deserializer.h',
- 'src/transitions.h',
]
AUTO_EXCLUDE_PATTERNS = [
'src/base/atomicops_internals_.*',
] + [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
- ('win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390', 'ppc')]
+ ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
+ 'ppc')]
args = None
def parse_args():
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 63b99aae7e..b94a534896 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -34,6 +34,7 @@ const CATEGORIES = new Map([
'INTERNALIZED_STRING_TYPE',
'JS_ARGUMENTS_TYPE',
'JS_ARRAY_BUFFER_TYPE',
+ 'JS_ARRAY_ITERATOR_TYPE',
'JS_ARRAY_TYPE',
'JS_BOUND_FUNCTION_TYPE',
'JS_DATE_TYPE',
@@ -49,13 +50,26 @@ const CATEGORIES = new Map([
'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
'JS_GLOBAL_OBJECT_TYPE',
'JS_GLOBAL_PROXY_TYPE',
+ 'JS_INTL_COLLATOR_TYPE',
+ 'JS_INTL_DATE_TIME_FORMAT_TYPE',
+ 'JS_INTL_LIST_FORMAT_TYPE',
+ 'JS_INTL_LOCALE_TYPE',
+ 'JS_INTL_NUMBER_FORMAT_TYPE',
+ 'JS_INTL_PLURAL_RULES_TYPE',
+ 'JS_INTL_RELATIVE_TIME_FORMAT_TYPE',
+ 'JS_INTL_SEGMENT_ITERATOR_TYPE',
+ 'JS_INTL_SEGMENTER_TYPE',
+ 'JS_INTL_V8_BREAK_ITERATOR_TYPE',
+ 'JS_MAP_KEY_ITERATOR_TYPE',
'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
'JS_MAP_TYPE',
'JS_MAP_VALUE_ITERATOR_TYPE',
'JS_MESSAGE_OBJECT_TYPE',
'JS_OBJECT_TYPE',
'JS_PROMISE_TYPE',
+ 'JS_PROXY_TYPE',
'JS_REGEXP_TYPE',
+ 'JS_SET_KEY_VALUE_ITERATOR_TYPE',
'JS_SET_TYPE',
'JS_SET_VALUE_ITERATOR_TYPE',
'JS_STRING_ITERATOR_TYPE',
@@ -70,10 +84,6 @@ const CATEGORIES = new Map([
'ONE_BYTE_STRING_TYPE',
'OTHER_CONTEXT_TYPE',
'PROPERTY_ARRAY_TYPE',
- 'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
- 'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
- 'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
- 'UNCACHED_EXTERNAL_STRING_TYPE',
'SLICED_ONE_BYTE_STRING_TYPE',
'SLICED_STRING_TYPE',
'STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE',
@@ -82,6 +92,10 @@ const CATEGORIES = new Map([
'SYMBOL_TYPE',
'THIN_ONE_BYTE_STRING_TYPE',
'THIN_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_STRING_TYPE',
'WASM_INSTANCE_TYPE',
'WASM_MEMORY_TYPE',
'WASM_MODULE_TYPE',
@@ -166,7 +180,7 @@ const CATEGORIES = new Map([
'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
'OPTIMIZED_CODE_LITERALS_TYPE',
'OPTIMIZED_FUNCTION',
- 'PRE_PARSED_SCOPE_DATA_TYPE',
+ 'PREPARSE_DATA_TYPE',
'REGEXP',
'RELOC_INFO_TYPE',
'SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE',
@@ -179,8 +193,8 @@ const CATEGORIES = new Map([
'SOURCE_POSITION_TABLE_TYPE',
'STORE_HANDLER_TYPE',
'STUB',
- 'UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE',
- 'UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE',
+ 'UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE',
+ 'UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE',
'UNCOMPILED_JS_FUNCTION_TYPE',
'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE'
])
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
index 648a38c798..11fac21a3d 100644
--- a/deps/v8/tools/heap-stats/index.html
+++ b/deps/v8/tools/heap-stats/index.html
@@ -32,7 +32,6 @@ body {
</style>
<script>
-
'use strict';
google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader.html
index 649d32bb40..c5e5c6f04a 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.html
+++ b/deps/v8/tools/heap-stats/trace-file-reader.html
@@ -60,12 +60,12 @@ found in the LICENSE file. -->
}
@keyframes spin {
- 0% {
+ 0% {
transform: rotate(0deg);
- };
- 100% {
+ }
+ 100% {
transform: rotate(360deg);
- };
+ }
}
</style>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 5c244a5e92..4fec9a1cb9 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -78,7 +78,7 @@ class TraceFileReader extends HTMLElement {
}
};
// Delay the loading a bit to allow for CSS animations to happen.
- setTimeout(() => reader.readAsArrayBuffer(file), 10);
+ setTimeout(() => reader.readAsArrayBuffer(file), 0);
} else {
reader.onload = (e) => {
try {
@@ -90,7 +90,8 @@ class TraceFileReader extends HTMLElement {
this.section.className = 'failure';
}
};
- setTimeout(() => reader.readAsText(file), 10);
+ // Delay the loading a bit to allow for CSS animations to happen.
+ setTimeout(() => reader.readAsText(file), 0);
}
}
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
index f60a356dd4..aede91e0d0 100644
--- a/deps/v8/tools/ic-explorer.html
+++ b/deps/v8/tools/ic-explorer.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
<html>
<!--
Copyright 2016 the V8 project authors. All rights reserved. Use of this source
@@ -5,6 +6,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
-->
<head>
+ <meta charset="utf-8">
+ <title>V8 IC explorer</title>
<style>
html {
font-family: monospace;
@@ -46,16 +49,16 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
padding: 0.5em 0 0.2em 0;
}
</style>
- <script src="./splaytree.js" type="text/javascript"></script>
- <script src="./codemap.js" type="text/javascript"></script>
- <script src="./csvparser.js" type="text/javascript"></script>
- <script src="./consarray.js" type="text/javascript"></script>
- <script src="./profile.js" type="text/javascript"></script>
- <script src="./profile_view.js" type="text/javascript"></script>
- <script src="./logreader.js" type="text/javascript"></script>
- <script src="./arguments.js" type="text/javascript"></script>
- <script src="./ic-processor.js" type="text/javascript"></script>
- <script src="./SourceMap.js" type="text/javascript"></script>
+ <script src="./splaytree.js"></script>
+ <script src="./codemap.js"></script>
+ <script src="./csvparser.js"></script>
+ <script src="./consarray.js"></script>
+ <script src="./profile.js"></script>
+ <script src="./profile_view.js"></script>
+ <script src="./logreader.js"></script>
+ <script src="./arguments.js"></script>
+ <script src="./ic-processor.js"></script>
+ <script src="./SourceMap.js"></script>
<script>
"use strict"
diff --git a/deps/v8/tools/ic-processor.js b/deps/v8/tools/ic-processor.js
index db1eef4295..a97fe0efff 100644
--- a/deps/v8/tools/ic-processor.js
+++ b/deps/v8/tools/ic-processor.js
@@ -45,6 +45,12 @@ function IcProcessor() {
processor: this.processCodeDelete },
'sfi-move': { parsers: [parseInt, parseInt],
processor: this.processFunctionMove },
+ 'LoadGlobalIC': {
+ parsers : propertyICParser,
+ processor: this.processPropertyIC.bind(this, "LoadGlobalIC") },
+ 'StoreGlobalIC': {
+ parsers : propertyICParser,
+ processor: this.processPropertyIC.bind(this, "StoreGlobalIC") },
'LoadIC': {
parsers : propertyICParser,
processor: this.processPropertyIC.bind(this, "LoadIC") },
@@ -63,6 +69,8 @@ function IcProcessor() {
});
this.profile_ = new Profile();
+ this.LoadGlobalIC = 0;
+ this.StoreGlobalIC = 0;
this.LoadIC = 0;
this.StoreIC = 0;
this.KeyedLoadIC = 0;
@@ -104,6 +112,8 @@ IcProcessor.prototype.processLogFile = function(fileName) {
}
print();
print("=====================");
+ print("LoadGlobal: " + this.LoadGlobalIC);
+ print("StoreGlobal: " + this.StoreGlobalIC);
print("Load: " + this.LoadIC);
print("Store: " + this.StoreIC);
print("KeyedLoad: " + this.KeyedLoadIC);
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 0107436df6..d03151805d 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -33,7 +33,6 @@
import os, re
import optparse
-import jsmin
import textwrap
@@ -96,161 +95,6 @@ def ExpandConstants(lines, constants):
return lines
-def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
- pattern_match = name_pattern.search(lines, pos)
- while pattern_match is not None:
- # Scan over the arguments
- height = 1
- start = pattern_match.start()
- end = pattern_match.end()
- assert lines[end - 1] == '('
- last_match = end
- arg_index = [0] # Wrap state into array, to work around Python "scoping"
- mapping = { }
- def add_arg(str):
- # Remember to expand recursively in the arguments
- if arg_index[0] >= len(macro.args):
- lineno = lines.count(os.linesep, 0, start) + 1
- raise Error('line %s: Too many arguments for macro "%s"' % (lineno, name_pattern.pattern))
- replacement = expander(str.strip())
- mapping[macro.args[arg_index[0]]] = replacement
- arg_index[0] += 1
- while end < len(lines) and height > 0:
- # We don't count commas at higher nesting levels.
- if lines[end] == ',' and height == 1:
- add_arg(lines[last_match:end])
- last_match = end + 1
- elif lines[end] in ['(', '{', '[']:
- height = height + 1
- elif lines[end] in [')', '}', ']']:
- height = height - 1
- end = end + 1
- # Remember to add the last match.
- add_arg(lines[last_match:end-1])
- if arg_index[0] < len(macro.args) -1:
- lineno = lines.count(os.linesep, 0, start) + 1
- raise Error('line %s: Too few arguments for macro "%s"' % (lineno, name_pattern.pattern))
- result = macro.expand(mapping)
- # Replace the occurrence of the macro with the expansion
- lines = lines[:start] + result + lines[end:]
- pattern_match = name_pattern.search(lines, start + len(result))
- return lines
-
-def ExpandMacros(lines, macros):
- # We allow macros to depend on the previously declared macros, but
- # we don't allow self-dependecies or recursion.
- for name_pattern, macro in reversed(macros):
- def expander(s):
- return ExpandMacros(s, macros)
- lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
- return lines
-
-class TextMacro:
- def __init__(self, args, body):
- self.args = args
- self.body = body
- def expand(self, mapping):
- # Keys could be substrings of earlier values. To avoid unintended
- # clobbering, apply all replacements simultaneously.
- any_key_pattern = "|".join(re.escape(k) for k in mapping.iterkeys())
- def replace(match):
- return mapping[match.group(0)]
- return re.sub(any_key_pattern, replace, self.body)
-
-CONST_PATTERN = re.compile(r'^define\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
-MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-
-
-def ReadMacros(lines):
- constants = []
- macros = []
- for line in lines.split('\n'):
- hash = line.find('#')
- if hash != -1: line = line[:hash]
- line = line.strip()
- if len(line) is 0: continue
- const_match = CONST_PATTERN.match(line)
- if const_match:
- name = const_match.group(1)
- value = const_match.group(2).strip()
- constants.append((re.compile("\\b%s\\b" % name), value))
- else:
- macro_match = MACRO_PATTERN.match(line)
- if macro_match:
- name = macro_match.group(1)
- args = [match.strip() for match in macro_match.group(2).split(',')]
- body = macro_match.group(3).strip()
- macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
- else:
- raise Error("Illegal line: " + line)
- return (constants, macros)
-
-
-TEMPLATE_PATTERN = re.compile(r'^\s+T\(([A-Z][a-zA-Z0-9]*),')
-
-def ReadMessageTemplates(lines):
- templates = []
- index = 0
- for line in lines.split('\n'):
- template_match = TEMPLATE_PATTERN.match(line)
- if template_match:
- name = "k%s" % template_match.group(1)
- value = index
- index = index + 1
- templates.append((re.compile("\\b%s\\b" % name), value))
- return templates
-
-INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
-INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
-
-def ExpandInlineMacros(lines):
- pos = 0
- while True:
- macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
- if macro_match is None:
- # no more macros
- return lines
- name = macro_match.group(1)
- args = [match.strip() for match in macro_match.group(2).split(',')]
- end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
- if end_macro_match is None:
- raise Error("Macro %s unclosed" % name)
- body = lines[macro_match.end():end_macro_match.start()]
-
- # remove macro definition
- lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
- name_pattern = re.compile("\\b%s\\(" % name)
- macro = TextMacro(args, body)
-
- # advance position to where the macro definition was
- pos = macro_match.start()
-
- def non_expander(s):
- return s
- lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
-
-
-INLINE_CONSTANT_PATTERN = re.compile(r'define\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+);\n')
-
-def ExpandInlineConstants(lines):
- pos = 0
- while True:
- const_match = INLINE_CONSTANT_PATTERN.search(lines, pos)
- if const_match is None:
- # no more constants
- return lines
- name = const_match.group(1)
- replacement = const_match.group(2)
- name_pattern = re.compile("\\b%s\\b" % name)
-
- # remove constant definition and replace
- lines = (lines[:const_match.start()] +
- re.sub(name_pattern, replacement, lines[const_match.end():]))
-
- # advance position to where the constant definition was
- pos = const_match.start()
-
-
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
@@ -273,11 +117,6 @@ namespace internal {
}
template <>
- int NativesCollection<%(type)s>::GetDebuggerCount() {
- return %(debugger_count)i;
- }
-
- template <>
int NativesCollection<%(type)s>::GetIndex(const char* name) {
%(get_index_cases)s\
return -1;
@@ -323,33 +162,16 @@ GET_SCRIPT_NAME_CASE = """\
"""
-def BuildFilterChain(macro_filename, message_template_file):
+def BuildFilterChain():
"""Build the chain of filter functions to be applied to the sources.
- Args:
- macro_filename: Name of the macro file, if any.
-
Returns:
A function (string -> string) that processes a source file.
"""
- filter_chain = []
-
- if macro_filename:
- (consts, macros) = ReadMacros(ReadFile(macro_filename))
- filter_chain.append(lambda l: ExpandMacros(l, macros))
- filter_chain.append(lambda l: ExpandConstants(l, consts))
-
- if message_template_file:
- message_templates = ReadMessageTemplates(ReadFile(message_template_file))
- filter_chain.append(lambda l: ExpandConstants(l, message_templates))
-
- filter_chain.extend([
+ filter_chain = [
RemoveCommentsEmptyLinesAndWhitespace,
- ExpandInlineMacros,
- ExpandInlineConstants,
Validate,
- jsmin.JavaScriptMinifier().JSMinify
- ])
+ ]
def chain(f1, f2):
return lambda x: f2(f1(x))
@@ -363,25 +185,12 @@ class Sources:
def __init__(self):
self.names = []
self.modules = []
- self.is_debugger_id = []
-
-
-def IsDebuggerFile(filename):
- return os.path.basename(os.path.dirname(filename)) == "debug"
-
-def IsMacroFile(filename):
- return filename.endswith("macros.py")
-
-def IsMessageTemplateFile(filename):
- return filename.endswith("messages.h")
-
def PrepareSources(source_files, native_type, emit_js):
"""Read, prepare and assemble the list of source files.
Args:
- source_files: List of JavaScript-ish source files. A file named macros.py
- will be treated as a list of macros.
+ source_files: List of JavaScript-ish source files.
native_type: String corresponding to a NativeType enum value, allowing us
to treat different types of sources differently.
emit_js: True if we should skip the byte conversion and just leave the
@@ -390,29 +199,7 @@ def PrepareSources(source_files, native_type, emit_js):
Returns:
An instance of Sources.
"""
- macro_file = None
- macro_files = filter(IsMacroFile, source_files)
- assert len(macro_files) in [0, 1]
- if macro_files:
- source_files.remove(macro_files[0])
- macro_file = macro_files[0]
-
- message_template_file = None
- message_template_files = filter(IsMessageTemplateFile, source_files)
- assert len(message_template_files) in [0, 1]
- if message_template_files:
- source_files.remove(message_template_files[0])
- message_template_file = message_template_files[0]
-
- filters = None
- if native_type in ("EXTRAS", "EXPERIMENTAL_EXTRAS"):
- filters = BuildExtraFilterChain()
- else:
- filters = BuildFilterChain(macro_file, message_template_file)
-
- # Sort 'debugger' sources first.
- source_files = sorted(source_files,
- lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
+ filters = BuildFilterChain()
source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
@@ -433,9 +220,6 @@ def PrepareSources(source_files, native_type, emit_js):
result.modules.append(lines)
- is_debugger = IsDebuggerFile(source)
- result.is_debugger_id.append(is_debugger)
-
name = os.path.basename(source)[:-3]
result.names.append(name)
@@ -483,7 +267,6 @@ def BuildMetadata(sources, source_bytes, native_type):
metadata = {
"builtin_count": len(sources.modules),
- "debugger_count": sum(sources.is_debugger_id),
"sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
@@ -528,14 +311,8 @@ def WriteStartupBlob(sources, startup_blob):
"""
output = open(startup_blob, "wb")
- debug_sources = sum(sources.is_debugger_id);
- PutInt(output, debug_sources)
- for i in xrange(debug_sources):
- PutStr(output, sources.names[i]);
- PutStr(output, sources.modules[i]);
-
- PutInt(output, len(sources.names) - debug_sources)
- for i in xrange(debug_sources, len(sources.names)):
+ PutInt(output, len(sources.names))
+ for i in xrange(len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
@@ -578,7 +355,7 @@ def main():
parser.set_usage("""js2c out.cc type sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
- sources.js: JS internal sources or macros.py.""")
+ sources.js: JS internal sources.""")
(options, args) = parser.parse_args()
JS2C(args[2:],
args[0],
diff --git a/deps/v8/tools/jsfunfuzz/fuzz-harness.sh b/deps/v8/tools/jsfunfuzz/fuzz-harness.sh
index 8d064b286e..fa4f9d9127 100755
--- a/deps/v8/tools/jsfunfuzz/fuzz-harness.sh
+++ b/deps/v8/tools/jsfunfuzz/fuzz-harness.sh
@@ -51,8 +51,17 @@ if [ "$3" == "--download" ]; then
cat << EOF | patch -s -p0 -d "$v8_root"
--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
+++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
-@@ -125,7 +125,7 @@
-
+@@ -118,19 +118,19 @@
+-def showtail(logfilename):
++def showtail(logfilename, method="tail"):
+- cmd = "tail -n 20 %s" % logfilename
++ cmd = "%s -n 20 %s" % (method, logfilename)
+ print cmd
+ print ""
+ os.system(cmd)
+ print ""
+ print ""
+
def many_timed_runs():
iteration = 0
- while True:
@@ -60,6 +69,12 @@ if [ "$3" == "--download" ]; then
iteration += 1
logfilename = "w%d" % iteration
one_timed_run(logfilename)
+ if not succeeded(logfilename):
+ showtail(logfilename)
+- showtail("err-" + logfilename)
++ showtail("err-" + logfilename, method="head")
+
+ many_timed_runs()
EOF
fi
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1 b/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
index 449996007d..d12877e3b8 100644
--- a/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
+++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
@@ -1 +1 @@
-d92e66273ea2a0da89456a977edd0224a8e837e9 \ No newline at end of file
+936f3baf5a24313da5eb98195d5e01d76fe602fb \ No newline at end of file
diff --git a/deps/v8/tools/jsmin.py b/deps/v8/tools/jsmin.py
deleted file mode 100644
index 236f511d44..0000000000
--- a/deps/v8/tools/jsmin.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/python2.4
-
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A JavaScript minifier.
-
-It is far from being a complete JS parser, so there are many valid
-JavaScript programs that will be ruined by it. Another strangeness is that
-it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
-out blank lines in order to ease debugging. Variables at the top scope are
-properties of the global object so we can't rename them. It is assumed that
-you introduce variables with var as if JavaScript followed C++ scope rules
-around curly braces, so the declaration must be above the first use.
-
-Use as:
-import jsmin
-minifier = JavaScriptMinifier()
-program1 = minifier.JSMinify(program1)
-program2 = minifier.JSMinify(program2)
-"""
-
-import re
-
-
-class JavaScriptMinifier(object):
- """An object that you can feed code snippets to to get them minified."""
-
- def __init__(self):
- # We prepopulate the list of identifiers that shouldn't be used. These
- # short language keywords could otherwise be used by the script as variable
- # names.
- self.seen_identifiers = {"do": True, "in": True}
- self.identifier_counter = 0
- self.in_comment = False
- self.map = {}
- self.nesting = 0
-
- def LookAtIdentifier(self, m):
- """Records identifiers or keywords that we see in use.
-
- (So we can avoid renaming variables to these strings.)
- Args:
- m: The match object returned by re.search.
-
- Returns:
- Nothing.
- """
- identifier = m.group(1)
- self.seen_identifiers[identifier] = True
-
- def Push(self):
- """Called when we encounter a '{'."""
- self.nesting += 1
-
- def Pop(self):
- """Called when we encounter a '}'."""
- self.nesting -= 1
- # We treat each top-level opening brace as a single scope that can span
- # several sets of nested braces.
- if self.nesting == 0:
- self.map = {}
- self.identifier_counter = 0
-
- def Declaration(self, m):
- """Rewrites bits of the program selected by a regexp.
-
- These can be curly braces, literal strings, function declarations and var
- declarations. (These last two must be on one line including the opening
- curly brace of the function for their variables to be renamed).
-
- Args:
- m: The match object returned by re.search.
-
- Returns:
- The string that should replace the match in the rewritten program.
- """
- matched_text = m.group(0)
-
- if matched_text.startswith("`") and matched_text.endswith("`"):
- return re.sub(r"\$\{([\w$%]+)\}",
- lambda m: '${' + self.FindNewName(m.group(1)) + '}',
- matched_text)
-
- if matched_text == "{":
- self.Push()
- return matched_text
- if matched_text == "}":
- self.Pop()
- return matched_text
- if re.match("[\"'/]", matched_text):
- return matched_text
- m = re.match(r"var ", matched_text)
- if m:
- var_names = matched_text[m.end():]
- var_names = re.split(r",", var_names)
- return "var " + ",".join(map(self.FindNewName, var_names))
- m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
- if m:
- up_to_args = m.group(1)
- args = m.group(2)
- args = re.split(r",", args)
- self.Push()
- return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
-
- if matched_text in self.map:
- return self.map[matched_text]
-
- return matched_text
-
- def CharFromNumber(self, number):
- """A single-digit base-52 encoding using a-zA-Z."""
- if number < 26:
- return chr(number + 97)
- number -= 26
- return chr(number + 65)
-
- def FindNewName(self, var_name):
- """Finds a new 1-character or 2-character name for a variable.
-
- Enters it into the mapping table for this scope.
-
- Args:
- var_name: The name of the variable before renaming.
-
- Returns:
- The new name of the variable.
- """
- new_identifier = ""
- # Variable names that end in _ are member variables of the global object,
- # so they can be visible from code in a different scope. We leave them
- # alone.
- if var_name in self.map:
- return self.map[var_name]
- if self.nesting == 0:
- return var_name
- # Do not rename arguments object.
- if var_name == 'arguments':
- return 'arguments'
- while True:
- identifier_first_char = self.identifier_counter % 52
- identifier_second_char = self.identifier_counter // 52
- new_identifier = self.CharFromNumber(identifier_first_char)
- if identifier_second_char != 0:
- new_identifier = (
- self.CharFromNumber(identifier_second_char - 1) + new_identifier)
- self.identifier_counter += 1
- if not new_identifier in self.seen_identifiers:
- break
-
- self.map[var_name] = new_identifier
- return new_identifier
-
- def RemoveSpaces(self, m):
- """Returns literal strings unchanged, replaces other inputs with group 2.
-
- Other inputs are replaced with the contents of capture 1. This is either
- a single space or an empty string.
-
- Args:
- m: The match object returned by re.search.
-
- Returns:
- The string that should be inserted instead of the matched text.
- """
- entire_match = m.group(0)
- replacement = m.group(1)
- if re.match(r"'.*'$", entire_match):
- return entire_match
- if re.match(r'".*"$', entire_match):
- return entire_match
- if re.match(r"`.*`$", entire_match):
- return entire_match
- if re.match(r"/.+/$", entire_match):
- return entire_match
- return replacement
-
- def JSMinify(self, text):
- """The main entry point. Takes a text and returns a compressed version.
-
- The compressed version hopefully does the same thing. Line breaks are
- preserved.
-
- Args:
- text: The text of the code snippet as a multiline string.
-
- Returns:
- The compressed text of the code snippet as a multiline string.
- """
- new_lines = []
- for line in re.split(r"\n", text):
- line = line.replace("\t", " ")
- if self.in_comment:
- m = re.search(r"\*/", line)
- if m:
- line = line[m.end():]
- self.in_comment = False
- else:
- new_lines.append("")
- continue
-
- if not self.in_comment:
- line = re.sub(r"/\*.*?\*/", " ", line)
- line = re.sub(r"//.*", "", line)
- m = re.search(r"/\*", line)
- if m:
- line = line[:m.start()]
- self.in_comment = True
-
- # Strip leading and trailing spaces.
- line = re.sub(r"^ +", "", line)
- line = re.sub(r" +$", "", line)
- # A regexp that matches a literal string surrounded by "double quotes".
- # This regexp can handle embedded backslash-escaped characters including
- # embedded backslash-escaped double quotes.
- double_quoted_string = r'"(?:[^"\\]|\\.)*"'
- # A regexp that matches a literal string surrounded by 'single quotes'.
- single_quoted_string = r"'(?:[^'\\]|\\.)*'"
- # A regexp that matches a template string
- template_string = r"`(?:[^`\\]|\\.)*`"
- # A regexp that matches a regexp literal surrounded by /slashes/.
- # Don't allow a regexp to have a ) before the first ( since that's a
- # syntax error and it's probably just two unrelated slashes.
- # Also don't allow it to come after anything that can only be the
- # end of a primary expression.
- slash_quoted_regexp = r"(?<![\w$'\")\]])/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
- # Replace multiple spaces with a single space.
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- template_string,
- slash_quoted_regexp,
- "( )+"]),
- self.RemoveSpaces,
- line)
- # Strip single spaces unless they have an identifier character both before
- # and after the space. % and $ are counted as identifier characters.
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- template_string,
- slash_quoted_regexp,
- r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
- self.RemoveSpaces,
- line)
- # Collect keywords and identifiers that are already in use.
- if self.nesting == 0:
- re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
- function_declaration_regexp = (
- r"\bfunction" # Function definition keyword...
- r"( [\w$%]+)?" # ...optional function name...
- r"\([\w$%,]+\)\{") # ...argument declarations.
- # Unfortunately the keyword-value syntax { key:value } makes the key look
- # like a variable where in fact it is a literal string. We use the
- # presence or absence of a question mark to try to distinguish between
- # this case and the ternary operator: "condition ? iftrue : iffalse".
- if re.search(r"\?", line):
- block_trailing_colon = r""
- else:
- block_trailing_colon = r"(?![:\w$%])"
- # Variable use. Cannot follow a period precede a colon.
- variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
- line = re.sub("|".join([double_quoted_string,
- single_quoted_string,
- template_string,
- slash_quoted_regexp,
- r"\{", # Curly braces.
- r"\}",
- r"\bvar [\w$%,]+", # var declarations.
- function_declaration_regexp,
- variable_use_regexp]),
- self.Declaration,
- line)
- new_lines.append(line)
-
- return "\n".join(new_lines) + "\n"
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 705e07d514..8b856caa9c 100755
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -27,8 +27,8 @@ if [ ! -x "$d8_exec" ]; then
fi
if [ ! -x "$d8_exec" ]; then
- echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
+ echo "d8 shell not found in $D8_PATH" >&2
+ echo "Please provide path to d8 as env var in D8_PATH" >&2
exit 1
fi
diff --git a/deps/v8/tools/locs.py b/deps/v8/tools/locs.py
new file mode 100755
index 0000000000..6773d1a76a
--- /dev/null
+++ b/deps/v8/tools/locs.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python3
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" locs.py - Count lines of code before and after preprocessor expansion
+ Consult --help for more information.
+"""
+
+import argparse
+import json
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+from pathlib import Path
+
+ARGPARSE = argparse.ArgumentParser(
+ description=("A script that computes LoC for a build dir or from a"
+ "compile_commands.json file"),
+ epilog="""Examples:
+ Count with default settings for build in out/Default:
+ locs.py --build-dir out/Default
+ Count with default settings according to given compile_commands file:
+ locs.py --compile-commands compile_commands.json
+ Count only a custom group of files settings for build in out/Default:
+ tools/locs.py --build-dir out/Default
+ --group src-compiler '\.\./\.\./src/compiler'
+ --only src-compiler
+ Report the 10 files with the worst expansion:
+ tools/locs.py --build-dir out/Default --worst 10
+ Report the 10 files with the worst expansion in src/compiler:
+ tools/locs.py --build-dir out/Default --worst 10
+ --group src-compiler '\.\./\.\./src/compiler'
+ --only src-compiler
+ Report the 10 largest files after preprocessing:
+ tools/locs.py --build-dir out/Default --largest 10
+ Report the 10 smallest input files:
+ tools/locs.py --build-dir out/Default --smallest 10""",
+ formatter_class=argparse.RawTextHelpFormatter
+)
+
+ARGPARSE.add_argument(
+ '--json',
+ action='store_true',
+ default=False,
+ help="output json instead of short summary")
+ARGPARSE.add_argument(
+ '--build-dir',
+ type=str,
+ default="",
+ help="Use specified build dir and generate necessary files")
+ARGPARSE.add_argument(
+ '--echocmd',
+ action='store_true',
+ default=False,
+ help="output command used to compute LoC")
+ARGPARSE.add_argument(
+ '--compile-commands',
+ type=str,
+ default='compile_commands.json',
+ help="Use specified compile_commands.json file")
+ARGPARSE.add_argument(
+ '--only',
+ action='append',
+ default=[],
+ help="Restrict counting to report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+ '--not',
+ action='append',
+ default=[],
+ help="Exclude specific group (can be passed multiple times)")
+ARGPARSE.add_argument(
+ '--list-groups',
+ action='store_true',
+ default=False,
+ help="List groups and associated regular expressions")
+ARGPARSE.add_argument(
+ '--group',
+ nargs=2,
+ action='append',
+ default=[],
+ help="Add a report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+ '--largest',
+ type=int,
+ nargs='?',
+ default=0,
+ const=3,
+ help="Output the n largest files after preprocessing")
+ARGPARSE.add_argument(
+ '--worst',
+ type=int,
+ nargs='?',
+ default=0,
+ const=3,
+ help="Output the n files with worst expansion by preprocessing")
+ARGPARSE.add_argument(
+ '--smallest',
+ type=int,
+ nargs='?',
+ default=0,
+ const=3,
+ help="Output the n smallest input files")
+ARGPARSE.add_argument(
+ '--files',
+ type=int,
+ nargs='?',
+ default=0,
+ const=3,
+ help="Output results for each file separately")
+
+ARGS = vars(ARGPARSE.parse_args())
+
+
+def MaxWidth(strings):
+ max_width = 0
+ for s in strings:
+ max_width = max(max_width, len(s))
+ return max_width
+
+
+def GenerateCompileCommandsAndBuild(build_dir, compile_commands_file, out):
+ if not os.path.isdir(build_dir):
+ print("Error: Specified build dir {} is not a directory.".format(
+ build_dir), file=sys.stderr)
+ exit(1)
+ compile_commands_file = "{}/compile_commands.json".format(build_dir)
+
+ print("Generating compile commands in {}.".format(
+ compile_commands_file), file=out)
+
+ ninja = "ninja -C {} -t compdb cxx cc > {}".format(
+ build_dir, compile_commands_file)
+ if subprocess.call(ninja, shell=True, stdout=out) != 0:
+ print("Error: Cound not generate {} for {}.".format(
+ compile_commands_file, build_dir), file=sys.stderr)
+ exit(1)
+
+ autoninja = "autoninja -C {} v8_generated_cc_files".format(build_dir)
+ if subprocess.call(autoninja, shell=True, stdout=out) != 0:
+ print("Error: Building target 'v8_generated_cc_files'"
+ " failed for {}.".format(build_dir), file=sys.stderr)
+ exit(1)
+
+ return compile_commands_file
+
+
+class CompilationData:
+ def __init__(self, loc, expanded):
+ self.loc = loc
+ self.expanded = expanded
+
+ def ratio(self):
+ return self.expanded / (self.loc+1)
+
+ def to_string(self):
+ return "{:>9,} to {:>12,} ({:>5.0f}x)".format(
+ self.loc, self.expanded, self.ratio())
+
+
+class File(CompilationData):
+ def __init__(self, file, loc, expanded):
+ super().__init__(loc, expanded)
+ self.file = file
+
+ def to_string(self):
+ return "{} {}".format(super().to_string(), self.file)
+
+
+class Group(CompilationData):
+ def __init__(self, name, regexp_string):
+ super().__init__(0, 0)
+ self.name = name
+ self.count = 0
+ self.regexp = re.compile(regexp_string)
+
+ def account(self, unit):
+ if (self.regexp.match(unit.file)):
+ self.loc += unit.loc
+ self.expanded += unit.expanded
+ self.count += 1
+
+ def to_string(self, name_width):
+ return "{:<{}} ({:>5} files): {}".format(
+ self.name, name_width, self.count, super().to_string())
+
+
+def SetupReportGroups():
+ default_report_groups = {"total": '.*',
+ "src": '\\.\\./\\.\\./src',
+ "test": '\\.\\./\\.\\./test',
+ "third_party": '\\.\\./\\.\\./third_party',
+ "gen": 'gen'}
+
+ report_groups = {**default_report_groups, **dict(ARGS['group'])}
+
+ if ARGS['only']:
+ for only_arg in ARGS['only']:
+ if not only_arg in report_groups.keys():
+ print("Error: specified report group '{}' is not defined.".format(
+ ARGS['only']))
+ exit(1)
+ else:
+ report_groups = {
+ k: v for (k, v) in report_groups.items() if k in ARGS['only']}
+
+ if ARGS['not']:
+ report_groups = {
+ k: v for (k, v) in report_groups.items() if k not in ARGS['not']}
+
+ if ARGS['list_groups']:
+ print_cat_max_width = MaxWidth(list(report_groups.keys()) + ["Category"])
+ print(" {:<{}} {}".format("Category",
+ print_cat_max_width, "Regular expression"))
+ for cat, regexp_string in report_groups.items():
+ print(" {:<{}}: {}".format(
+ cat, print_cat_max_width, regexp_string))
+
+ report_groups = {k: Group(k, v) for (k, v) in report_groups.items()}
+
+ return report_groups
+
+
+class Results:
+ def __init__(self):
+ self.groups = SetupReportGroups()
+ self.units = {}
+
+ def track(self, filename):
+ is_tracked = False
+ for group in self.groups.values():
+ if group.regexp.match(filename):
+ is_tracked = True
+ return is_tracked
+
+ def recordFile(self, filename, loc, expanded):
+ unit = File(filename, loc, expanded)
+ self.units[filename] = unit
+ for group in self.groups.values():
+ group.account(unit)
+
+ def maxGroupWidth(self):
+ return MaxWidth([v.name for v in self.groups.values()])
+
+ def printGroupResults(self, file):
+ for key in sorted(self.groups.keys()):
+ print(self.groups[key].to_string(self.maxGroupWidth()), file=file)
+
+ def printSorted(self, key, count, reverse, out):
+ for unit in sorted(list(self.units.values()), key=key, reverse=reverse)[:count]:
+ print(unit.to_string(), file=out)
+
+
+class LocsEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, File):
+ return {"file": o.file, "loc": o.loc, "expanded": o.expanded}
+ if isinstance(o, Group):
+ return {"name": o.name, "loc": o.loc, "expanded": o.expanded}
+ if isinstance(o, Results):
+ return {"groups": o.groups, "units": o.units}
+ return json.JSONEncoder.default(self, o)
+
+
+class StatusLine:
+ def __init__(self):
+ self.max_width = 0
+
+ def print(self, statusline, end="\r", file=sys.stdout):
+ self.max_width = max(self.max_width, len(statusline))
+ print("{0:<{1}}".format(statusline, self.max_width), end=end, file=file, flush=True)
+
+
+class CommandSplitter:
+ def __init__(self):
+ self.cmd_pattern = re.compile(
+ "([^\\s]*\\s+)?(?P<clangcmd>[^\\s]*clang.*)"
+ " -c (?P<infile>.*) -o (?P<outfile>.*)")
+
+ def process(self, compilation_unit, temp_file_name):
+ cmd = self.cmd_pattern.match(compilation_unit['command'])
+ outfilename = cmd.group('outfile') + ".cc"
+ infilename = cmd.group('infile')
+ infile = Path(compilation_unit['directory']).joinpath(infilename)
+ outfile = Path(str(temp_file_name)).joinpath(outfilename)
+ return [cmd.group('clangcmd'), infilename, infile, outfile]
+
+
+def Main():
+ compile_commands_file = ARGS['compile_commands']
+ out = sys.stdout
+ if ARGS['json']:
+ out = sys.stderr
+
+ if ARGS['build_dir']:
+ compile_commands_file = GenerateCompileCommandsAndBuild(
+ ARGS['build_dir'], compile_commands_file, out)
+
+ try:
+ with open(compile_commands_file) as file:
+ data = json.load(file)
+ except FileNotFoundError:
+ print("Error: Cannot read '{}'. Consult --help to get started.")
+ exit(1)
+
+ result = Results()
+ status = StatusLine()
+
+ with tempfile.TemporaryDirectory(dir='/tmp/', prefix="locs.") as temp:
+ processes = []
+ start = time.time()
+ cmd_splitter = CommandSplitter()
+
+ for i, key in enumerate(data):
+ if not result.track(key['file']):
+ continue
+ if not ARGS['json']:
+ status.print(
+ "[{}/{}] Counting LoCs of {}".format(i, len(data), key['file']))
+ clangcmd, infilename, infile, outfile = cmd_splitter.process(key, temp)
+ outfile.parent.mkdir(parents=True, exist_ok=True)
+ if infile.is_file():
+ clangcmd = clangcmd + " -E -P " + \
+ str(infile) + " -o /dev/stdout | sed '/^\\s*$/d' | wc -l"
+ loccmd = ("cat {} | sed '\\;^\\s*//;d' | sed '\\;^/\\*;d'"
+ " | sed '/^\\*/d' | sed '/^\\s*$/d' | wc -l").format(
+ infile)
+ runcmd = " {} ; {}".format(clangcmd, loccmd)
+ if ARGS['echocmd']:
+ print(runcmd)
+ p = subprocess.Popen(
+ runcmd, shell=True, cwd=key['directory'], stdout=subprocess.PIPE)
+ processes.append({'process': p, 'infile': infilename})
+
+ for i, p in enumerate(processes):
+ status.print("[{}/{}] Summing up {}".format(
+ i, len(processes), p['infile']), file=out)
+ output, err = p['process'].communicate()
+ expanded, loc = list(map(int, output.split()))
+ result.recordFile(p['infile'], loc, expanded)
+
+ end = time.time()
+ if ARGS['json']:
+ print(json.dumps(result, ensure_ascii=False, cls=LocsEncoder))
+ status.print("Processed {:,} files in {:,.2f} sec.".format(
+ len(processes), end-start), end="\n", file=out)
+ result.printGroupResults(file=out)
+
+ if ARGS['largest']:
+ print("Largest {} files after expansion:".format(ARGS['largest']))
+ result.printSorted(
+ lambda v: v.expanded, ARGS['largest'], reverse=True, out=out)
+
+ if ARGS['worst']:
+ print("Worst expansion ({} files):".format(ARGS['worst']))
+ result.printSorted(
+ lambda v: v.ratio(), ARGS['worst'], reverse=True, out=out)
+
+ if ARGS['smallest']:
+ print("Smallest {} input files:".format(ARGS['smallest']))
+ result.printSorted(
+ lambda v: v.loc, ARGS['smallest'], reverse=False, out=out)
+
+ if ARGS['files']:
+ print("List of input files:")
+ result.printSorted(
+ lambda v: v.file, ARGS['files'], reverse=False, out=out)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(Main())
diff --git a/deps/v8/tools/map-processor b/deps/v8/tools/map-processor
index c0713bdf13..cf18c31a8e 100755
--- a/deps/v8/tools/map-processor
+++ b/deps/v8/tools/map-processor
@@ -28,7 +28,7 @@ fi
if [ ! -x "$d8_exec" ]; then
echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
+ echo "Please provide path to d8 as env var in D8_PATH"
exit 1
fi
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
index 4029e96343..70c205c771 100644
--- a/deps/v8/tools/map-processor.html
+++ b/deps/v8/tools/map-processor.html
@@ -5,7 +5,7 @@
code is governed by a BSD-style license that can be found in the LICENSE file.
-->
<head>
-<meta charset="UTF-8">
+<meta charset="utf-8">
<style>
html, body {
font-family: sans-serif;
@@ -15,10 +15,100 @@ html, body {
h1, h2, h3, section {
padding-left: 15px;
}
+
+#content {
+ opacity: 0.0;
+ height: 0px;
+ transition: all 0.5s ease-in-out;
+}
+
+.success #content {
+ height: auto;
+ opacity: 1.0;
+}
+
+#fileReader {
+ width: 100%;
+ height: 100px;
+ line-height: 100px;
+ text-align: center;
+ border: solid 1px #000000;
+ border-radius: 5px;
+ cursor: pointer;
+ transition: all 0.5s ease-in-out;
+}
+
+.failure #fileReader {
+ background-color: #FFAAAA;
+}
+
+.success #fileReader {
+ height: 20px;
+ line-height: 20px;
+}
+
+#fileReader:hover {
+ background-color: #e0edfe;
+}
+
+.loading #fileReader {
+ cursor: wait;
+}
+
+#fileReader > input {
+ display: none;
+}
+
+
+#loader {
+ display: none;
+}
+
+.loading #loader {
+ display: block;
+ position: fixed;
+ top: 0px;
+ left: 0px;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+ position: absolute;
+ width: 100px;
+ height: 100px;
+ top: 40%;
+ left: 50%;
+ margin-left: -50px;
+ border: 30px solid #000;
+ border-top: 30px solid #36E;
+ border-radius: 50%;
+ animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ }
+ 100% {
+ transform: rotate(360deg);
+ }
+}
+
+.colorbox {
+ width: 10px;
+ height: 10px;
+ border: 1px black solid;
+}
+
#stats table {
display: inline-block;
padding-right: 50px;
}
+#stats table td {
+ cursor: pointer;
+}
#stats .transitionTable {
max-height: 200px;
overflow-y: scroll;
@@ -30,6 +120,16 @@ h1, h2, h3, section {
overflow-x: scroll;
user-select: none;
}
+#timelineLabel {
+ transform: rotate(90deg);
+ transform-origin: left bottom 0;
+ position: absolute;
+ left: 0;
+ width: 250px;
+ text-align: center;
+ font-size: 10px;
+ opacity: 0.5;
+}
#timelineChunks {
height: 250px;
position: absolute;
@@ -179,6 +279,9 @@ h1, h2, h3, section {
word-break: break-all;
background-color: rgba(255,255,255,0.5);
}
+.black{
+ background-color: black;
+}
.red {
background-color: red;
}
@@ -308,7 +411,7 @@ function selectOption(select, match) {
function div(classes) {
let node = document.createElement('div');
if (classes !== void 0) {
- if (typeof classes == "string") {
+ if (typeof classes === "string") {
node.classList.add(classes);
} else {
classes.forEach(cls => node.classList.add(cls));
@@ -322,11 +425,17 @@ function table(className) {
if (className) node.classList.add(className)
return node;
}
-function td(text) {
+
+function td(textOrNode) {
let node = document.createElement("td");
- node.innerText = text;
+ if (typeof textOrNode === "object") {
+ node.appendChild(textOrNode);
+ } else {
+ node.innerText = textOrNode;
+ }
return node;
}
+
function tr() {
let node = document.createElement("tr");
return node;
@@ -369,9 +478,14 @@ define(Array.prototype, "last", function() { return this[this.length - 1] });
// =========================================================================
// EventHandlers
function handleBodyLoad() {
- let upload = $('uploadInput');
- upload.onclick = (e) => { e.target.value = null };
- upload.onchange = (e) => { handleLoadFile(e.target) };
+ let upload = $('fileReader');
+ upload.onclick = (e) => $("file").click();
+ upload.ondragover = (e) => e.preventDefault();
+ upload.ondrop = (e) => handleLoadFile(e);
+ $('file').onchange = (e) => handleLoadFile(e);
+ upload.onkeydown = (e) => {
+ if (event.key == "Enter") $("file").click();
+ };
upload.focus();
document.state = new State();
@@ -381,21 +495,32 @@ function handleBodyLoad() {
tooltip.style.top = e.pageY + "px";
let map = e.target.map;
if (map) {
- $("tooltipContents").innerText = map.description.join("\n");
+ $("tooltipContents").innerText = map.description;
}
});
-}
-function handleLoadFile(upload) {
- let files = upload.files;
- let file = files[0];
- let reader = new FileReader();
- reader.onload = function(evt) {
- handleLoadText(this.result);
+ function handleLoadFile(event) {
+ // Used for drop and file change.
+ event.preventDefault();
+ let host = event.dataTransfer ? event.dataTransfer : event.target;
+ let file = host.files[0];
+ let reader = new FileReader();
+ document.body.className = 'loading';
+ reader.onload = function(evt) {
+ try {
+ handleLoadText(this.result);
+ document.body.className = 'success';
+ } catch(e) {
+ document.body.className = 'failure';
+ console.error(e);
+ }
+ }
+ // Defer the reading to allow spinner CSS animation.
+ setTimeout(() => reader.readAsText(file), 0);
}
- reader.readAsText(file);
}
+
function handleLoadText(text) {
let mapProcessor = new MapProcessor();
document.state.timeline = mapProcessor.processString(text);
@@ -540,6 +665,7 @@ class View {
let details = "";
if (this.map) {
details += "ID: " + this.map.id;
+ details += "\nSource location: " + this.map.filePosition;
details += "\n" + this.map.description;
}
$("mapDetails").innerText = details;
@@ -591,7 +717,6 @@ class View {
time += interval;
}
this.drawOverview();
- this.drawHistograms();
this.redraw();
}
@@ -686,47 +811,6 @@ class View {
$("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
}
- drawHistograms() {
- $("mapsDepthHistogram").histogram = this.timeline.depthHistogram();
- $("mapsFanOutHistogram").histogram = this.timeline.fanOutHistogram();
- }
-
- drawMapsDepthHistogram() {
- let canvas = $("mapsDepthCanvas");
- let histogram = this.timeline.depthHistogram();
- this.drawHistogram(canvas, histogram, true);
- }
-
- drawMapsFanOutHistogram() {
- let canvas = $("mapsFanOutCanvas");
- let histogram = this.timeline.fanOutHistogram();
- this.drawHistogram(canvas, histogram, true, true);
- }
-
- drawHistogram(canvas, histogram, logScaleX=false, logScaleY=false) {
- let ctx = canvas.getContext("2d");
- let yMax = histogram.max(each => each.length);
- if (logScaleY) yMax = Math.log(yMax);
- let xMax = histogram.length;
- if (logScaleX) xMax = Math.log(xMax);
- ctx.clearRect(0, 0, canvas.width, canvas.height);
- ctx.beginPath();
- ctx.moveTo(0,canvas.height);
- for (let i = 0; i < histogram.length; i++) {
- let x = i;
- if (logScaleX) x = Math.log(x);
- x = x / xMax * canvas.width;
- let bucketLength = histogram[i].length;
- if (logScaleY) bucketLength = Math.log(bucketLength);
- let y = (1 - bucketLength / yMax) * canvas.height;
- ctx.lineTo(x, y);
- }
- ctx.lineTo(canvas.width, canvas.height);
- ctx.closePath;
- ctx.stroke();
- ctx.fill();
- }
-
redraw() {
let canvas= $("timelineCanvas");
canvas.width = (this.chunks.length+1) * kChunkWidth;
@@ -1006,26 +1090,32 @@ class StatsView {
}
updateGeneralStats() {
let pairs = [
- ["Maps", e => true],
- ["Transitions", e => e.edge && e.edge.isTransition()],
- ["Fast to Slow", e => e.edge && e.edge.isFastToSlow()],
- ["Slow to Fast", e => e.edge && e.edge.isSlowToFast()],
- ["Initial Map", e => e.edge && e.edge.isInitial()],
- ["Replace Descriptors", e => e.edge && e.edge.isReplaceDescriptors()],
- ["Copy as Prototype", e => e.edge && e.edge.isCopyAsPrototype()],
- ["Optimize as Prototype", e => e.edge && e.edge.isOptimizeAsPrototype()],
- ["Deprecated", e => e.isDeprecated()],
+ ["Maps", null, e => true],
+ ["Transitions", 'black', e => e.edge && e.edge.isTransition()],
+ ["Fast to Slow", 'violet', e => e.edge && e.edge.isFastToSlow()],
+ ["Slow to Fast", 'orange', e => e.edge && e.edge.isSlowToFast()],
+ ["Initial Map", 'yellow', e => e.edge && e.edge.isInitial()],
+ ["Replace Descriptors", 'red', e => e.edge && e.edge.isReplaceDescriptors()],
+ ["Copy as Prototype", 'red', e => e.edge && e.edge.isCopyAsPrototype()],
+ ["Optimize as Prototype", null, e => e.edge && e.edge.isOptimizeAsPrototype()],
+ ["Deprecated", null, e => e.isDeprecated()],
+ ["Bootstrapped", 'green', e => e.isBootstrapped()],
];
let text = "";
let tableNode = table();
let name, filter;
let total = this.timeline.size();
- pairs.forEach(([name, filter]) => {
+ pairs.forEach(([name, color, filter]) => {
let row = tr();
+ if (color !== null) {
+ row.appendChild(td(div(['colorbox', color])));
+ } else {
+ row.appendChild(td(""));
+ }
row.maps = this.timeline.filterUniqueTransitions(filter);
- row.addEventListener("click",
- e => this.transitionView.showMaps(e.target.parentNode.maps));
+ row.onclick =
+ (e) => this.transitionView.showMaps(e.target.parentNode.maps);
row.appendChild(td(name));
let count = this.timeline.count(filter);
row.appendChild(td(count));
@@ -1060,7 +1150,7 @@ function transitionTypeToColor(type) {
switch(type) {
case "new": return "green";
case "Normalize": return "violet";
- case "map=SlowToFast": return "orange";
+ case "SlowToFast": return "orange";
case "InitialMap": return "yellow";
case "Transition": return "black";
case "ReplaceDescriptors": return "red";
@@ -1069,183 +1159,53 @@ function transitionTypeToColor(type) {
}
// ShadowDom elements =========================================================
-customElements.define('x-histogram', class extends HTMLElement {
- constructor() {
- super();
- let shadowRoot = this.attachShadow({mode: 'open'});
- const t = document.querySelector('#x-histogram-template');
- const instance = t.content.cloneNode(true);
- shadowRoot.appendChild(instance);
- this._histogram = undefined;
- this.mouseX = 0;
- this.mouseY = 0;
- this.canvas.addEventListener('mousemove', event => this.handleCanvasMove(event));
- }
- setBoolAttribute(name, value) {
- if (value) {
- this.setAttribute(name, "");
- } else {
- this.deleteAttribute(name);
- }
- }
- static get observedAttributes() {
- return ['title', 'xlog', 'ylog', 'xlabel', 'ylabel'];
- }
- $(query) { return this.shadowRoot.querySelector(query) }
- get h1() { return this.$("h2") }
- get canvas() { return this.$("canvas") }
- get xLabelDiv() { return this.$("#xLabel") }
- get yLabelDiv() { return this.$("#yLabel") }
-
- get histogram() {
- return this._histogram;
- }
- set histogram(array) {
- this._histogram = array;
- if (this._histogram) {
- this.yMax = this._histogram.max(each => each.length);
- this.xMax = this._histogram.length;
- }
- this.draw();
- }
-
- get title() { return this.getAttribute("title") }
- set title(string) { this.setAttribute("title", string) }
- get xLabel() { return this.getAttribute("xlabel") }
- set xLabel(string) { this.setAttribute("xlabel", string)}
- get yLabel() { return this.getAttribute("ylabel") }
- set yLabel(string) { this.setAttribute("ylabel", string)}
- get xLog() { return this.hasAttribute("xlog") }
- set xLog(value) { this.setBoolAttribute("xlog", value) }
- get yLog() { return this.hasAttribute("ylog") }
- set yLog(value) { this.setBoolAttribute("ylog", value) }
-
- attributeChangedCallback(name, oldValue, newValue) {
- if (name == "title") {
- this.h1.innerText = newValue;
- return;
- }
- if (name == "ylabel") {
- this.yLabelDiv.innerText = newValue;
- return;
- }
- if (name == "xlabel") {
- this.xLabelDiv.innerText = newValue;
- return;
- }
- this.draw();
- }
-
- handleCanvasMove(event) {
- this.mouseX = event.offsetX;
- this.mouseY = event.offsetY;
- this.draw();
- }
- xPosition(i) {
- let x = i;
- if (this.xLog) x = Math.log(x);
- return x / this.xMax * this.canvas.width;
- }
- yPosition(i) {
- let bucketLength = this.histogram[i].length;
- if (this.yLog) {
- return (1 - Math.log(bucketLength) / Math.log(this.yMax)) * this.drawHeight + 10;
- } else {
- return (1 - bucketLength / this.yMax) * this.drawHeight + 10;
- }
- }
-
- get drawHeight() { return this.canvas.height - 10 }
-
- draw() {
- if (!this.histogram) return;
- let width = this.canvas.width;
- let height = this.drawHeight;
- let ctx = this.canvas.getContext("2d");
- if (this.xLog) yMax = Math.log(yMax);
- let xMax = this.histogram.length;
- if (this.yLog) xMax = Math.log(xMax);
- ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);
- ctx.beginPath();
- ctx.moveTo(0, height);
- for (let i = 0; i < this.histogram.length; i++) {
- ctx.lineTo(this.xPosition(i), this.yPosition(i));
- }
- ctx.lineTo(width, height);
- ctx.closePath;
- ctx.stroke();
- ctx.fill();
- if (!this.mouseX) return;
- ctx.beginPath();
- let index = Math.round(this.mouseX);
- let yBucket = this.histogram[index];
- let y = this.yPosition(index);
- if (this.yLog) y = Math.log(y);
- ctx.moveTo(0, y);
- ctx.lineTo(width-40, y);
- ctx.moveTo(this.mouseX, 0);
- ctx.lineTo(this.mouseX, height);
- ctx.stroke();
- ctx.textAlign = "left";
- ctx.fillText(yBucket.length, width-30, y);
- }
-});
</script>
</head>
-<template id="x-histogram-template">
- <style>
- #yLabel {
- transform: rotate(90deg);
- }
- canvas, #yLabel, #info { float: left; }
- #xLabel { clear: both }
- </style>
- <h2></h2>
- <div id="yLabel"></div>
- <canvas height=50></canvas>
- <div id="info">
- </div>
- <div id="xLabel"></div>
-</template>
-
<body onload="handleBodyLoad(event)" onkeypress="handleKeyDown(event)">
- <h2>Data</h2>
+ <h1>V8 Map Explorer</h1>
<section>
- <form name="fileForm">
- <p>
- <input id="uploadInput" type="file" name="files">
- </p>
- </form>
+ <div id="fileReader" tabindex=1 >
+ <span id="label">
+ Drag and drop a v8.log file into this area, or click to choose from disk.
+ </span>
+ <input id="file" type="file" name="files">
+ </div>
+ <div id="loader">
+ <div id="spinner"></div>
+ </div>
</section>
- <h2>Stats</h2>
- <section id="stats"></section>
+ <div id="content">
+ <h2>Stats</h2>
+ <section id="stats"></section>
- <h2>Timeline</h2>
- <div id="timeline">
- <div id=timelineChunks></div>
- <canvas id="timelineCanvas" ></canvas>
- </div>
- <div id="timelineOverview"
- onmousemove="handleTimelineIndicatorMove(event)" >
- <div id="timelineOverviewIndicator">
- <div class="leftMask"></div>
- <div class="rightMask"></div>
+ <h2>Timeline</h2>
+ <div id="timeline">
+ <div id="timelineLabel">Frequency</div>
+ <div id="timelineChunks"></div>
+ <canvas id="timelineCanvas"></canvas>
+ </div>
+ <div id="timelineOverview"
+ onmousemove="handleTimelineIndicatorMove(event)" >
+ <div id="timelineOverviewIndicator">
+ <div class="leftMask"></div>
+ <div class="rightMask"></div>
+ </div>
</div>
- </div>
- <h2>Transitions</h2>
- <section id="transitionView"></section>
- <br/>
+ <h2>Transitions</h2>
+ <section id="transitionView"></section>
+ <br/>
- <h2>Selected Map</h2>
- <section id="mapDetails"></section>
+ <h2>Selected Map</h2>
+ <section id="mapDetails"></section>
+ </div>
- <x-histogram id="mapsDepthHistogram"
- title="Maps Depth" xlabel="depth" ylabel="nof"></x-histogram>
- <x-histogram id="mapsFanOutHistogram" xlabel="fan-out"
- title="Maps Fan-out" ylabel="nof"></x-histogram>
+ <h2>Instructions</h2>
+ <section>
+ <p>Visualize Map trees that have been gathere using <code>--trace-maps</code>.</p>
+ </section>
<div id="tooltip">
<div id="tooltipContents"></div>
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js
index c0731e8555..7e8572af8c 100644
--- a/deps/v8/tools/map-processor.js
+++ b/deps/v8/tools/map-processor.js
@@ -151,7 +151,7 @@ class MapProcessor extends LogReader {
from = this.getExistingMap(from, time);
to = this.getExistingMap(to, time);
let edge = new Edge(type, name, reason, time, from, to);
- edge.filePosition = this.formatPC(pc, line, column);
+ to.filePosition = this.formatPC(pc, line, column);
edge.finishSetup();
}
@@ -209,6 +209,7 @@ class V8Map {
V8Map.set(id, this);
this.leftId = 0;
this.rightId = 0;
+ this.filePosition = "";
}
finalize(id) {
@@ -284,6 +285,10 @@ class V8Map {
return this.edge === void 0 ? "new" : this.edge.type;
}
+ isBootstrapped() {
+ return this.edge === void 0;
+ }
+
getParents() {
let parents = [];
let current = this.parent();
@@ -315,7 +320,6 @@ class Edge {
this.time = time;
this.from = from;
this.to = to;
- this.filePosition = "";
}
finishSetup() {
@@ -363,31 +367,35 @@ class Edge {
}
isTransition() {
- return this.type == "Transition"
+ return this.type === "Transition"
}
isFastToSlow() {
- return this.type == "Normalize"
+ return this.type === "Normalize"
}
isSlowToFast() {
- return this.type == "SlowToFast"
+ return this.type === "SlowToFast"
}
isInitial() {
- return this.type == "InitialMap"
+ return this.type === "InitialMap"
+ }
+
+ isBootstrapped() {
+ return this.type === "new"
}
isReplaceDescriptors() {
- return this.type == "ReplaceDescriptors"
+ return this.type === "ReplaceDescriptors"
}
isCopyAsPrototype() {
- return this.reason == "CopyAsPrototype"
+ return this.reason === "CopyAsPrototype"
}
isOptimizeAsPrototype() {
- return this.reason == "OptimizeAsPrototype"
+ return this.reason === "OptimizeAsPrototype"
}
symbol() {
diff --git a/deps/v8/tools/mb/docs/design_spec.md b/deps/v8/tools/mb/docs/design_spec.md
index fb202da74e..c119e65e90 100644
--- a/deps/v8/tools/mb/docs/design_spec.md
+++ b/deps/v8/tools/mb/docs/design_spec.md
@@ -169,7 +169,7 @@ We can then return two lists as output:
* `compile_targets`, which is a list of pruned targets to be
passed to Ninja to build. It is acceptable to replace a list of
pruned targets by a meta target if it turns out that all of the
- dependendencies of the target are affected by the patch (i.e.,
+ dependencies of the target are affected by the patch (i.e.,
all ten binaries that blink_tests depends on), but doing so is
not required.
* `test_targets`, which is a list of unpruned targets to be mapped
diff --git a/deps/v8/tools/mb/docs/user_guide.md b/deps/v8/tools/mb/docs/user_guide.md
index a7d72c8839..75c195a949 100644
--- a/deps/v8/tools/mb/docs/user_guide.md
+++ b/deps/v8/tools/mb/docs/user_guide.md
@@ -20,7 +20,7 @@ For more discussion of MB, see also [the design spec](design_spec.md).
### `mb analyze`
-`mb analyze` is reponsible for determining what targets are affected by
+`mb analyze` is responsible for determining what targets are affected by
a list of files (e.g., the list of files in a patch on a trybot):
```
@@ -229,7 +229,7 @@ The `configs` key points to a dictionary of named build configurations.
There should be an key in this dict for every supported configuration
of Chromium, meaning every configuration we have a bot for, and every
-configuration commonly used by develpers but that we may not have a bot
+configuration commonly used by developers but that we may not have a bot
for.
The value of each key is a list of "mixins" that will define what that
diff --git a/deps/v8/tools/node/README.md b/deps/v8/tools/node/README.md
new file mode 100644
index 0000000000..dc16c914fd
--- /dev/null
+++ b/deps/v8/tools/node/README.md
@@ -0,0 +1,12 @@
+# Node.js Backports
+
+We no longer maintain our own backport script.
+
+For backporting V8 changes to Node.js, there is a useful script in
+[node-core-utils][1]. You can use the `git node v8 backport` command, which will
+bump the necessary V8 version numbers depending on the specific branch.
+
+See the [Node.js documentation][2] on V8 backports for a guide.
+
+[1]: https://github.com/nodejs/node-core-utils
+[2]: https://github.com/nodejs/node/blob/master/doc/guides/maintaining-V8.md
diff --git a/deps/v8/tools/node/backport_node.py b/deps/v8/tools/node/backport_node.py
deleted file mode 100755
index 50b0b077fa..0000000000
--- a/deps/v8/tools/node/backport_node.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to cherry-pick a V8 commit to backport to a Node.js checkout.
-
-Requirements:
- - Node.js checkout to backport to.
- - V8 checkout that contains the commit to cherry-pick.
-
-Usage:
- $ backport_node.py <path_to_v8> <path_to_node> <commit-hash>
-
- This will apply the commit to <path_to_node>/deps/v8 and create a commit in
- the Node.js checkout, increment patch level, and copy over the original
- commit message.
-
-Optional flags:
- --no-review Run `gclient sync` on the V8 checkout before updating.
-"""
-
-import argparse
-import os
-import subprocess
-import re
-import sys
-
-TARGET_SUBDIR = os.path.join("deps", "v8")
-VERSION_FILE = os.path.join("include", "v8-version.h")
-VERSION_PATTERN = r'(?<=#define V8_PATCH_LEVEL )\d+'
-
-def FileToText(file_name):
- with open(file_name) as f:
- return f.read()
-
-def TextToFile(text, file_name):
- with open(file_name, "w") as f:
- f.write(text)
-
-
-def Clean(options):
- print ">> Cleaning target directory."
- subprocess.check_call(["git", "clean", "-fd"],
- cwd = os.path.join(options.node_path, TARGET_SUBDIR))
-
-def CherryPick(options):
- print ">> Apply patch."
- patch = subprocess.Popen(["git", "diff-tree", "-p", options.commit],
- stdout=subprocess.PIPE, cwd=options.v8_path)
- patch.wait()
- try:
- subprocess.check_output(["git", "apply", "-3", "--directory=%s" % TARGET_SUBDIR],
- stdin=patch.stdout, cwd=options.node_path)
- except:
- print ">> In another shell, please resolve patch conflicts"
- print ">> and `git add` affected files."
- print ">> Finally continue by entering RESOLVED."
- while raw_input("[RESOLVED]") != "RESOLVED":
- print ">> You need to type RESOLVED"
-
-def UpdateVersion(options):
- print ">> Increment patch level."
- version_file = os.path.join(options.node_path, TARGET_SUBDIR, VERSION_FILE)
- text = FileToText(version_file)
- def increment(match):
- patch = int(match.group(0))
- return str(patch + 1)
- text = re.sub(VERSION_PATTERN, increment, text, flags=re.MULTILINE)
- TextToFile(text, version_file)
-
-def CreateCommit(options):
- print ">> Creating commit."
- # Find short hash from source.
- shorthash = subprocess.check_output(
- ["git", "rev-parse", "--short", options.commit],
- cwd=options.v8_path).strip()
-
- # Commit message
- title = "deps: backport %s from upstream V8" % shorthash
- body = subprocess.check_output(
- ["git", "log", options.commit, "-1", "--format=%B"],
- cwd=options.v8_path).strip()
- body = '\n'.join(" " + line for line in body.splitlines())
-
- message = title + "\n\nOriginal commit message:\n\n" + body
-
- # Create commit at target.
- review_message = "--no-edit" if options.no_review else "--edit"
- git_commands = [
- ["git", "checkout", "-b", "backport_%s" % shorthash], # new branch
- ["git", "add", TARGET_SUBDIR], # add files
- ["git", "commit", "-m", message, review_message] # new commit
- ]
- for command in git_commands:
- subprocess.check_call(command, cwd=options.node_path)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(description="Backport V8 commit to Node.js")
- parser.add_argument("v8_path", help="Path to V8 checkout")
- parser.add_argument("node_path", help="Path to Node.js checkout")
- parser.add_argument("commit", help="Commit to backport")
- parser.add_argument("--no-review", action="store_true",
- help="Skip editing commit message")
- options = parser.parse_args(args)
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.v8_path)
- options.node_path = os.path.abspath(options.node_path)
- assert os.path.isdir(options.node_path)
- return options
-
-def Main(args):
- options = ParseOptions(args)
- Clean(options)
- try:
- CherryPick(options)
- UpdateVersion(options)
- CreateCommit(options)
- except:
- print ">> Failed. Resetting."
- subprocess.check_output(["git", "reset", "--hard"], cwd=options.node_path)
- raise
-
-if __name__ == "__main__":
- Main(sys.argv[1:])
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index 332c6e2d7d..872263f627 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -24,8 +24,6 @@ GCLIENT_SOLUTION = [
# These deps are already part of Node.js.
"v8/base/trace_event/common" : None,
"v8/third_party/googletest/src" : None,
- "v8/third_party/jinja2" : None,
- "v8/third_party/markupsafe" : None,
# These deps are unnecessary for building.
"v8/test/benchmarks/data" : None,
"v8/testing/gmock" : None,
diff --git a/deps/v8/tools/node/test_backport_node.py b/deps/v8/tools/node/test_backport_node.py
deleted file mode 100755
index 3c61a402c4..0000000000
--- a/deps/v8/tools/node/test_backport_node.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import unittest
-
-import backport_node
-
-# Base paths.
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TEST_DATA = os.path.join(BASE_DIR, 'testdata')
-
-def gitify(path):
- files = os.listdir(path)
- subprocess.check_call(['git', 'init'], cwd=path)
- subprocess.check_call(['git', 'add'] + files, cwd=path)
- subprocess.check_call(['git', 'commit', '-m', 'Initial'], cwd=path)
-
-class TestUpdateNode(unittest.TestCase):
- def setUp(self):
- self.workdir = tempfile.mkdtemp(prefix='tmp_test_node_')
-
- def tearDown(self):
- shutil.rmtree(self.workdir)
-
- def testUpdate(self):
- v8_cwd = os.path.join(self.workdir, 'v8')
- node_cwd = os.path.join(self.workdir, 'node')
-
- # Set up V8 test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'v8'), dst=v8_cwd)
- gitify(v8_cwd)
-
- # Set up node test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'node'), dst=node_cwd)
- gitify(os.path.join(node_cwd))
-
- # Add a patch.
- with open(os.path.join(v8_cwd, 'v8_foo'), 'w') as f:
- f.write('zonk')
- subprocess.check_call(['git', 'add', 'v8_foo'], cwd=v8_cwd)
- subprocess.check_call(['git', 'commit', '-m', "Title\n\nBody"], cwd=v8_cwd)
- commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=v8_cwd).strip()
-
- # Run update script.
- backport_node.Main([v8_cwd, node_cwd, commit, "--no-review"])
-
- # Check message.
- message = subprocess.check_output(['git', 'log', '-1', '--format=%B'], cwd=node_cwd)
- self.assertIn('Original commit message:\n\n Title\n\n Body', message)
-
- # Check patch.
- gitlog = subprocess.check_output(
- ['git', 'diff', 'master', '--cached', '--', 'deps/v8/v8_foo'],
- cwd=node_cwd,
- )
- self.assertIn('+zonk', gitlog.strip())
-
- # Check version.
- version_file = os.path.join(node_cwd, "deps", "v8", "include", "v8-version.h")
- self.assertIn('#define V8_PATCH_LEVEL 4322', backport_node.FileToText(version_file))
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
index fb3c2a0aec..c480a69a9b 100755
--- a/deps/v8/tools/node/update_node.py
+++ b/deps/v8/tools/node/update_node.py
@@ -91,10 +91,11 @@ def UpdateTarget(repository, options, files_to_keep):
git_args.append(["add"] + files_to_keep) # add and commit
git_args.append(["commit", "-m", "keep files"]) # files we want to keep
+ git_args.append(["clean", "-fxd"]) # nuke everything else
git_args.append(["remote", "add", "source", source]) # point to source repo
git_args.append(["fetch", "source", "HEAD"]) # sync to current branch
git_args.append(["checkout", "-f", "FETCH_HEAD"]) # switch to that branch
- git_args.append(["clean", "-fd"]) # delete removed files
+ git_args.append(["clean", "-fxd"]) # delete removed files
if files_to_keep:
git_args.append(["cherry-pick", "master"]) # restore kept files
diff --git a/deps/v8/tools/parse-processor b/deps/v8/tools/parse-processor
index 85a82d5479..1c7175257d 100755
--- a/deps/v8/tools/parse-processor
+++ b/deps/v8/tools/parse-processor
@@ -28,7 +28,7 @@ fi
if [ ! -x "$d8_exec" ]; then
echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
+ echo "Please provide path to d8 as env var in D8_PATH"
exit 1
fi
diff --git a/deps/v8/tools/parse-processor.html b/deps/v8/tools/parse-processor.html
index 1e67e4a281..0f5818eaf0 100644
--- a/deps/v8/tools/parse-processor.html
+++ b/deps/v8/tools/parse-processor.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
<html>
<!--
Copyright 2016 the V8 project authors. All rights reserved. Use of this source
@@ -5,6 +6,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
-->
<head>
+<meta charset="utf-8">
+<title>V8 Parse Processor</title>
<style>
html {
font-family: monospace;
@@ -100,18 +103,18 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
text-align: right;
}
</style>
-<script src="./splaytree.js" type="text/javascript"></script>
-<script src="./codemap.js" type="text/javascript"></script>
-<script src="./csvparser.js" type="text/javascript"></script>
-<script src="./consarray.js" type="text/javascript"></script>
-<script src="./profile.js" type="text/javascript"></script>
-<script src="./profile_view.js" type="text/javascript"></script>
-<script src="./logreader.js" type="text/javascript"></script>
-<script src="./arguments.js" type="text/javascript"></script>
-<script src="./parse-processor.js" type="text/javascript"></script>
-<script src="./SourceMap.js" type="text/javascript"></script>
-<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
-<script type="text/javascript">
+<script src="./splaytree.js"></script>
+<script src="./codemap.js"></script>
+<script src="./csvparser.js"></script>
+<script src="./consarray.js"></script>
+<script src="./profile.js"></script>
+<script src="./profile_view.js"></script>
+<script src="./logreader.js"></script>
+<script src="./arguments.js"></script>
+<script src="./parse-processor.js"></script>
+<script src="./SourceMap.js"></script>
+<script src="https://www.gstatic.com/charts/loader.js"></script>
+<script>
"use strict";
google.charts.load('current', {packages: ['corechart']});
diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events
index 3294e85862..02176320a5 100755
--- a/deps/v8/tools/plot-timer-events
+++ b/deps/v8/tools/plot-timer-events
@@ -32,7 +32,7 @@ fi
if test ! -x "$d8_exec"; then
echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
+ echo "Please provide path to d8 as env var in D8_PATH"
exit 1
fi
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index 5bd64a49bd..210cec7618 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -55,6 +55,7 @@ function setCallTreeState(state, callTreeState) {
let main = {
currentState : emptyState(),
+ renderPending : false,
setMode(mode) {
if (mode !== main.currentState.mode) {
@@ -197,7 +198,11 @@ let main = {
},
delayRender() {
- Promise.resolve().then(() => {
+ if (main.renderPending) return;
+ main.renderPending = true;
+
+ window.requestAnimationFrame(() => {
+ main.renderPending = false;
for (let c of components) {
c.render(main.currentState);
}
@@ -496,7 +501,9 @@ class CallTreeView {
nameCell.appendChild(createTypeNode(node.type));
nameCell.appendChild(createFunctionNode(node.name, node.codeId));
if (main.currentState.sourceData &&
- main.currentState.sourceData.hasSource(node.name)) {
+ node.codeId >= 0 &&
+ main.currentState.sourceData.hasSource(
+ this.currentState.file.code[node.codeId].func)) {
nameCell.appendChild(createViewSourceNode(node.codeId));
}
@@ -1369,7 +1376,7 @@ class SourceData {
this.functions = new Map();
for (let codeId = 0; codeId < file.code.length; ++codeId) {
let codeBlock = file.code[codeId];
- if (codeBlock.source) {
+ if (codeBlock.source && codeBlock.func !== undefined) {
let data = this.functions.get(codeBlock.func);
if (!data) {
data = new FunctionSourceData(codeBlock.source.script,
@@ -1386,7 +1393,7 @@ class SourceData {
for (let i = 0; i < stack.length; i += 2) {
let codeId = stack[i];
if (codeId < 0) continue;
- let functionid = file.code[codeId].func;
+ let functionId = file.code[codeId].func;
if (this.functions.has(functionId)) {
let codeOffset = stack[i + 1];
this.functions.get(functionId).addOffsetSample(codeId, codeOffset);
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 83c3d343aa..dd60d5dff7 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -20,7 +20,7 @@ Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel;luci.chromium.try:android_optional_gpu_tests_rel""")
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel;luci.chromium.try:android_optional_gpu_tests_rel""")
class Preparation(Step):
MESSAGE = "Preparation."
@@ -155,8 +155,7 @@ class UploadCL(Step):
message.append("TBR=%s" % self._options.reviewer)
self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
if not self._options.dry_run:
- self.GitUpload(author=self._options.author,
- force=True,
+ self.GitUpload(force=True,
bypass_hooks=True,
cq=self._options.use_commit_queue,
cq_dry_run=self._options.use_dry_run,
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index 0f12d910da..bd28fe3aa7 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -782,7 +782,7 @@ class UploadStep(Step):
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
- self.GitUpload(reviewer, self._options.author, self._options.force_upload,
+ self.GitUpload(reviewer, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
cc=self._options.cc, tbr_reviewer=tbr_reviewer)
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index ffa5c2a0ca..f030ac804e 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -207,8 +207,7 @@ class CommitBranch(Step):
self["commit_title"] = text.splitlines()[0]
TextToFile(text, self.Config("COMMITMSG_FILE"))
- self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
- os.remove(self.Config("COMMITMSG_FILE"))
+ self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
@@ -219,16 +218,18 @@ class LandBranch(Step):
if self._options.dry_run:
print "Dry run - upload CL."
else:
- self.GitUpload(author=self._options.author,
- force=True,
+ self.GitUpload(force=True,
bypass_hooks=True,
- no_autocc=True)
+ no_autocc=True,
+ message_file=self.Config("COMMITMSG_FILE"))
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
else:
self.Git(cmd)
+ os.remove(self.Config("COMMITMSG_FILE"))
+
class TagRevision(Step):
MESSAGE = "Tag the new revision."
diff --git a/deps/v8/tools/release/filter_build_files.py b/deps/v8/tools/release/filter_build_files.py
index 9cc6607108..032848e3cc 100755
--- a/deps/v8/tools/release/filter_build_files.py
+++ b/deps/v8/tools/release/filter_build_files.py
@@ -23,6 +23,7 @@ import sys
EXECUTABLE_FILES = [
'd8',
+ 'cctest',
]
SUPPLEMENTARY_FILES = [
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index a002f4211c..0997e0bb89 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -205,12 +205,10 @@ class GitRecipesMixin(object):
args.append(Quoted(patch_file))
self.Git(MakeArgs(args), **kwargs)
- def GitUpload(self, reviewer="", author="", force=False, cq=False,
+ def GitUpload(self, reviewer="", force=False, cq=False,
cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
- no_autocc=False, **kwargs):
+ no_autocc=False, message_file=None, **kwargs):
args = ["cl upload --send-mail"]
- if author:
- args += ["--email", Quoted(author)]
if reviewer:
args += ["-r", Quoted(reviewer)]
if tbr_reviewer:
@@ -227,6 +225,8 @@ class GitRecipesMixin(object):
args.append("--no-autocc")
if cc:
args += ["--cc", Quoted(cc)]
+ if message_file:
+ args += ["--message-file", Quoted(message_file)]
args += ["--gerrit"]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index f3dc400e58..e454f542ae 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -933,8 +933,9 @@ TBR=reviewer@chromium.org"""
cb=self.WriteFakeWatchlistsFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" "
- "-f --bypass-hooks --no-autocc --gerrit", ""),
+ Cmd("git cl upload --send-mail "
+ "-f --bypass-hooks --no-autocc --message-file "
+ "\"%s\" --gerrit" % TEST_CONFIG["COMMITMSG_FILE"], ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
@@ -1003,7 +1004,7 @@ Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel;luci.chromium.try:android_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel;luci.chromium.try:android_optional_gpu_tests_rel
TBR=reviewer@chromium.org"""
@@ -1085,7 +1086,7 @@ deps = {
"--author \"author@chromium.org <author@chromium.org>\"" %
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+ Cmd("git cl upload --send-mail -f "
"--cq-dry-run --bypass-hooks --gerrit", "",
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
diff --git a/deps/v8/tools/run-clang-tidy.py b/deps/v8/tools/run-clang-tidy.py
index 11826f19b1..bf08a65fcd 100755
--- a/deps/v8/tools/run-clang-tidy.py
+++ b/deps/v8/tools/run-clang-tidy.py
@@ -74,6 +74,7 @@ def GenerateCompileCommands(build_folder):
line = line.replace('-Wno-ignored-pragma-optimize', '')
line = line.replace('-Wno-null-pointer-arithmetic', '')
line = line.replace('-Wno-unused-lambda-capture', '')
+ line = line.replace('-Wno-defaulted-function-deleted', '')
cc_file.write(line)
@@ -179,7 +180,7 @@ def ClangTidyRunDiff(build_folder, diff_branch, auto_fix):
"""
The script `clang-tidy-diff` does not provide support to add header-
filters. To still analyze headers we use the build path option `-path` to
- inject out header-filter option. This works because the script just adds
+ inject our header-filter option. This works because the script just adds
the passed path string to the commandline of clang-tidy.
"""
modified_build_folder = build_folder
diff --git a/deps/v8/tools/run-tests.py.vpython b/deps/v8/tools/run-tests.py.vpython
deleted file mode 100644
index 6a12277f6b..0000000000
--- a/deps/v8/tools/run-tests.py.vpython
+++ /dev/null
@@ -1,32 +0,0 @@
-# This is a vpython "spec" file.
-#
-# It describes patterns for python wheel dependencies of the python scripts in
-# the chromium repo, particularly for dependencies that have compiled components
-# (since pure-python dependencies can be easily vendored into third_party).
-#
-# When vpython is invoked, it finds this file and builds a python VirtualEnv,
-# containing all of the dependencies described in this file, fetching them from
-# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
-# this never requires the end-user machine to have a working python extension
-# compilation environment. All of these packages are built using:
-# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
-#
-# All python scripts in the repo share this same spec, to avoid dependency
-# fragmentation.
-#
-# If you have depot_tools installed in your $PATH, you can invoke python scripts
-# in this repo by running them as you normally would run them, except
-# substituting `vpython` instead of `python` on the command line, e.g.:
-# vpython path/to/script.py some --arguments
-#
-# Read more about `vpython` and how to modify this file here:
-# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
-
-python_version: "2.7"
-
-# Used by:
-# test/test262
-wheel: <
- name: "infra/python/wheels/pyyaml/${vpython_platform}"
- version: "version:3.12"
->
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 67861db3ea..46afbdedce 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -108,6 +108,7 @@ import os
import re
import subprocess
import sys
+import traceback
from testrunner.local import android
from testrunner.local import command
@@ -125,6 +126,7 @@ GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
+INFRA_FAILURE_RETCODE = 87
def GeometricMean(values):
@@ -136,6 +138,11 @@ def GeometricMean(values):
return str(math.exp(sum(map(math.log, values)) / len(values)))
+class TestFailedError(Exception):
+ """Error raised when a test has failed due to a non-infra issue."""
+ pass
+
+
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
@@ -690,7 +697,7 @@ class DesktopPlatform(Platform):
output = cmd.execute()
except OSError: # pragma: no cover
logging.exception(title % "OSError")
- return ""
+ raise
logging.info(title % "Stdout" + "\n%s", output.stdout)
if output.stderr: # pragma: no cover
@@ -698,6 +705,10 @@ class DesktopPlatform(Platform):
logging.info(title % "Stderr" + "\n%s", output.stderr)
if output.timed_out:
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
+ raise TestFailedError()
+ if output.exit_code != 0:
+ logging.warning(">>> Test crashed.")
+ raise TestFailedError()
if '--prof' in self.extra_flags:
os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
if os_prefix:
@@ -779,12 +790,13 @@ class AndroidPlatform(Platform): # pragma: no cover
logging.info(title % "Stdout" + "\n%s", stdout)
except android.CommandFailedException as e:
logging.info(title % "Stdout" + "\n%s", e.output)
- raise
+ logging.warning('>>> Test crashed.')
+ raise TestFailedError()
except android.TimeoutException as e:
- if e.output:
+ if e.output is not None:
logging.info(title % "Stdout" + "\n%s", e.output)
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
- stdout = ""
+ raise TestFailedError()
if runnable.process_size:
return stdout + "MaxMemory: Unsupported"
return stdout
@@ -962,28 +974,24 @@ def Main(args):
(options, args) = parser.parse_args(args)
- if options.buildbot:
- logging.basicConfig(
- level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
- else:
- logging.basicConfig(level=logging.INFO, format="%(message)s")
+ logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s")
if len(args) == 0: # pragma: no cover
parser.print_help()
- return 1
+ return INFRA_FAILURE_RETCODE
if options.arch in ["auto", "native"]: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
logging.error("Unknown architecture %s", options.arch)
- return 1
+ return INFRA_FAILURE_RETCODE
if (options.json_test_results_secondary and
not options.outdir_secondary): # pragma: no cover
logging.error("For writing secondary json test results, a secondary outdir "
"patch must be specified.")
- return 1
+ return INFRA_FAILURE_RETCODE
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
@@ -998,10 +1006,10 @@ def Main(args):
else:
if not os.path.isfile(options.binary_override_path):
logging.error("binary-override-path must be a file name")
- return 1
+ return INFRA_FAILURE_RETCODE
if options.outdir_secondary:
logging.error("specify either binary-override-path or outdir-secondary")
- return 1
+ return INFRA_FAILURE_RETCODE
options.shell_dir = os.path.abspath(
os.path.dirname(options.binary_override_path))
default_binary_name = os.path.basename(options.binary_override_path)
@@ -1029,6 +1037,8 @@ def Main(args):
results = Results()
results_secondary = Results()
+ # We use list here to allow modification in nested function below.
+ have_failed_tests = [False]
with CustomMachineConfiguration(governor = options.cpu_governor,
disable_aslr = options.noaslr) as conf:
for path in args:
@@ -1067,7 +1077,10 @@ def Main(args):
for i in xrange(0, max(1, total_runs)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
- yield platform.Run(runnable, i)
+ try:
+ yield platform.Run(runnable, i)
+ except TestFailedError:
+ have_failed_tests[0] = True
# Let runnable iterate over all runs and handle output.
result, result_secondary = runnable.Run(
@@ -1086,7 +1099,20 @@ def Main(args):
else: # pragma: no cover
print results_secondary
- return min(1, len(results.errors))
+ if results.errors or have_failed_tests[0]:
+ return 1
+
+ return 0
+
+
+def MainWrapper():
+ try:
+ return Main(sys.argv[1:])
+ except:
+ # Log uncaptured exceptions and report infra failure to the caller.
+ traceback.print_exc()
+ return INFRA_FAILURE_RETCODE
+
if __name__ == "__main__": # pragma: no cover
- sys.exit(Main(sys.argv[1:]))
+ sys.exit(MainWrapper())
diff --git a/deps/v8/tools/sanitizers/tsan_suppressions.txt b/deps/v8/tools/sanitizers/tsan_suppressions.txt
index 839636c8ce..270340e484 100644
--- a/deps/v8/tools/sanitizers/tsan_suppressions.txt
+++ b/deps/v8/tools/sanitizers/tsan_suppressions.txt
@@ -4,7 +4,3 @@
# Incorrectly detected lock cycles in test-lockers
# https://code.google.com/p/thread-sanitizer/issues/detail?id=81
deadlock:LockAndUnlockDifferentIsolatesThread::Run
-
-# Data race in a third party lib
-# https://bugs.chromium.org/p/v8/issues/detail?id=8110
-race:IndianCalendar::fgSystemDefaultCenturyStartYear
diff --git a/deps/v8/tools/snapshot/asm_to_inline_asm.py b/deps/v8/tools/snapshot/asm_to_inline_asm.py
new file mode 100644
index 0000000000..ad8fdcb0fe
--- /dev/null
+++ b/deps/v8/tools/snapshot/asm_to_inline_asm.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''
+Converts a given file in clang assembly syntax to a corresponding
+representation in inline assembly. Specifically, this is used to convert
+embedded.S to embedded.cc for Windows clang builds.
+'''
+
+import argparse
+import sys
+
+def asm_to_inl_asm(in_filename, out_filename):
+ with open(in_filename, 'r') as infile, open(out_filename, 'wb') as outfile:
+ outfile.write('__asm__(\n')
+ for line in infile:
+ # Escape " in .S file before outputing it to inline asm file.
+ line = line.replace('"', '\\"')
+ outfile.write(' "%s\\n"\n' % line.rstrip())
+ outfile.write(');\n')
+ return 0
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('input', help='Name of the input assembly file')
+ parser.add_argument('output', help='Name of the target CC file')
+ args = parser.parse_args()
+ sys.exit(asm_to_inl_asm(args.input, args.output))
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index fcb2202f8a..8c5f7ad205 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -51,6 +51,7 @@ TEST_MAP = {
"inspector",
"webkit",
"mkgrokdump",
+ "wasm-js",
"fuzzer",
"message",
"preparser",
@@ -65,6 +66,7 @@ TEST_MAP = {
"wasm-spec-tests",
"inspector",
"mkgrokdump",
+ "wasm-js",
"fuzzer",
"message",
"preparser",
@@ -183,6 +185,10 @@ class BuildConfig(object):
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
self.ubsan_vptr = build_config['is_ubsan_vptr']
+ self.embedded_builtins = build_config['v8_enable_embedded_builtins']
+ self.verify_csa = build_config['v8_enable_verify_csa']
+ self.lite_mode = build_config['v8_enable_lite_mode']
+ self.pointer_compression = build_config['v8_enable_pointer_compression']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
self.mips_arch_variant = build_config['mips_arch_variant']
@@ -211,6 +217,14 @@ class BuildConfig(object):
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
+ if self.embedded_builtins:
+ detected_options.append('embedded_builtins')
+ if self.verify_csa:
+ detected_options.append('verify_csa')
+ if self.lite_mode:
+ detected_options.append('lite_mode')
+ if self.pointer_compression:
+ detected_options.append('pointer_compression')
return '\n'.join(detected_options)
@@ -246,14 +260,10 @@ class BaseTestRunner(object):
raise
args = self._parse_test_args(args)
- suites = self._get_suites(args, options)
- self._prepare_suites(suites, options)
-
+ tests = self._load_testsuite_generators(args, options)
self._setup_env()
-
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
- tests = [t for s in suites for t in s.tests]
return self._do_execute(tests, args, options)
except TestRunnerError:
return utils.EXIT_CODE_INTERNAL_ERROR
@@ -334,6 +344,8 @@ class BaseTestRunner(object):
help="Run without test harness of a given suite")
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
+ parser.add_option("--run-skipped", help="Also run skipped tests.",
+ default=False, action="store_true")
parser.add_option("-t", "--timeout", default=60, type=int,
help="Timeout for single test in seconds")
parser.add_option("-v", "--verbose", default=False, action="store_true",
@@ -580,10 +592,6 @@ class BaseTestRunner(object):
return reduce(list.__add__, map(expand_test_group, args), [])
- def _get_suites(self, args, options):
- names = self._args_to_suite_names(args, options.test_root)
- return self._load_suites(names, options)
-
def _args_to_suite_names(self, args, test_root):
# Use default tests if no test configuration was provided at the cmd line.
all_names = set(utils.GetSuitePaths(test_root))
@@ -593,26 +601,34 @@ class BaseTestRunner(object):
def _get_default_suite_names(self):
return []
- def _load_suites(self, names, options):
+ def _load_testsuite_generators(self, args, options):
+ names = self._args_to_suite_names(args, options.test_root)
test_config = self._create_test_config(options)
- def load_suite(name):
+ variables = self._get_statusfile_variables(options)
+ slow_chain, fast_chain = [], []
+ for name in names:
if options.verbose:
print '>>> Loading test suite: %s' % name
- return testsuite.TestSuite.LoadTestSuite(
- os.path.join(options.test_root, name),
- test_config)
- return map(load_suite, names)
-
- def _prepare_suites(self, suites, options):
- self._load_status_files(suites, options)
- for s in suites:
- s.ReadTestCases()
-
- def _load_status_files(self, suites, options):
- # simd_mips is true if SIMD is fully supported on MIPS
- variables = self._get_statusfile_variables(options)
- for s in suites:
- s.ReadStatusFile(variables)
+ suite = testsuite.TestSuite.Load(
+ os.path.join(options.test_root, name), test_config)
+
+ if self._is_testsuite_supported(suite, options):
+ slow_tests, fast_tests = suite.load_tests_from_disk(variables)
+ slow_chain.append(slow_tests)
+ fast_chain.append(fast_tests)
+
+ for tests in slow_chain:
+ for test in tests:
+ yield test
+
+ for tests in fast_chain:
+ for test in tests:
+ yield test
+
+ def _is_testsuite_supported(self, suite, options):
+ """A predicate that can be overridden to filter out unsupported TestSuite
+ instances (see NumFuzzer for usage)."""
+ return True
def _get_statusfile_variables(self, options):
simd_mips = (
@@ -624,7 +640,6 @@ class BaseTestRunner(object):
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant)
- # TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
return {
@@ -639,19 +654,25 @@ class BaseTestRunner(object):
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mips_arch_variant": mips_arch_variant,
- "mode": self.mode_options.status_mode,
+ "mode": self.mode_options.status_mode
+ if not self.build_config.dcheck_always_on
+ else "debug",
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
+ "optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
"simd_mips": simd_mips,
- "simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
"system": self.target_os,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
+ "embedded_builtins": self.build_config.embedded_builtins,
+ "verify_csa": self.build_config.verify_csa,
+ "lite_mode": self.build_config.lite_mode,
+ "pointer_compression": self.build_config.pointer_compression,
}
def _create_test_config(self, options):
@@ -664,6 +685,7 @@ class BaseTestRunner(object):
no_harness=options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
+ run_skipped=options.run_skipped,
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py
index 707614f095..5724f9ee2a 100644
--- a/deps/v8/tools/testrunner/local/android.py
+++ b/deps/v8/tools/testrunner/local/android.py
@@ -42,21 +42,16 @@ class _Driver(object):
from devil.android import device_utils # pylint: disable=import-error
from devil.android.perf import cache_control # pylint: disable=import-error
from devil.android.perf import perf_control # pylint: disable=import-error
- from devil.android.sdk import adb_wrapper # pylint: disable=import-error
global cache_control
global device_errors
global perf_control
devil_chromium.Initialize()
- if not device:
- # Detect attached device if not specified.
- devices = adb_wrapper.AdbWrapper.Devices()
- assert devices, 'No devices detected'
- assert len(devices) == 1, 'Multiple devices detected.'
- device = str(devices[0])
- self.adb_wrapper = adb_wrapper.AdbWrapper(device)
- self.device = device_utils.DeviceUtils(self.adb_wrapper)
+ # Find specified device or a single attached device if none was specified.
+ # In case none or multiple devices are attached, this raises an exception.
+ self.device = device_utils.DeviceUtils.HealthyDevices(
+ retries=5, enable_usb_resets=True, device_arg=device)[0]
# This remembers what we have already pushed to the device.
self.pushed = set()
@@ -77,6 +72,8 @@ class _Driver(object):
skip_if_missing: Keeps silent about missing files when set. Otherwise logs
error.
"""
+ # TODO(sergiyb): Implement this method using self.device.PushChangedFiles to
+ # avoid accessing low-level self.device.adb.
file_on_host = os.path.join(host_dir, file_name)
# Only push files not yet pushed in one execution.
@@ -95,13 +92,13 @@ class _Driver(object):
# Work-around for 'text file busy' errors. Push the files to a temporary
# location and then copy them with a shell command.
- output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
+ output = self.device.adb.Push(file_on_host, file_on_device_tmp)
# Success looks like this: '3035 KB/s (12512056 bytes in 4.025s)'.
# Errors look like this: 'failed to copy ... '.
if output and not re.search('^[0-9]', output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + output)
- self.adb_wrapper.Shell('mkdir -p %s' % folder_on_device)
- self.adb_wrapper.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
+ self.device.adb.Shell('mkdir -p %s' % folder_on_device)
+ self.device.adb.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
self.pushed.add(file_on_host)
def push_executable(self, shell_dir, target_dir, binary):
diff --git a/deps/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status b/deps/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
new file mode 100644
index 0000000000..b5ebc84474
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
@@ -0,0 +1,5 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[]
diff --git a/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py b/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py
new file mode 100644
index 0000000000..61d75fb991
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/fake_testsuite/testcfg.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from testrunner.local import testsuite, statusfile
+
+
+class TestSuite(testsuite.TestSuite):
+ def _test_class(self):
+ return testsuite.TestCase
+
+ def ListTests(self):
+ fast = self._create_test("fast")
+ slow = self._create_test("slow")
+ slow._statusfile_outcomes.append(statusfile.SLOW)
+ yield fast
+ yield slow
+
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 4742e84caf..5d05e23cc3 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -45,12 +45,13 @@ FAIL_SLOPPY = "FAIL_SLOPPY"
SKIP = "SKIP"
SLOW = "SLOW"
NO_VARIANTS = "NO_VARIANTS"
+FAIL_PHASE_ONLY = "FAIL_PHASE_ONLY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
- ALWAYS]:
+ ALWAYS, FAIL_PHASE_ONLY]:
KEYWORDS[key] = key
# Support arches, modes to be written as keywords instead of strings.
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 4bdfd008fe..9d00d7bc12 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -29,6 +29,7 @@
import fnmatch
import imp
import os
+from contextlib import contextmanager
from . import command
from . import statusfile
@@ -77,19 +78,22 @@ class TestCombiner(object):
def _combined_test_class(self):
raise NotImplementedError()
+@contextmanager
+def _load_testsuite_module(name, root):
+ f = None
+ try:
+ (f, pathname, description) = imp.find_module("testcfg", [root])
+ yield imp.load_module(name + "_testcfg", f, pathname, description)
+ finally:
+ if f:
+ f.close()
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root, test_config):
+ def Load(root, test_config):
name = root.split(os.path.sep)[-1]
- f = None
- try:
- (f, pathname, description) = imp.find_module("testcfg", [root])
- module = imp.load_module(name + "_testcfg", f, pathname, description)
+ with _load_testsuite_module(name, root) as module:
return module.GetSuite(name, root, test_config)
- finally:
- if f:
- f.close()
def __init__(self, name, root, test_config):
self.name = name # string
@@ -97,22 +101,22 @@ class TestSuite(object):
self.test_config = test_config
self.tests = None # list of TestCase objects
self.statusfile = None
- self.suppress_internals = False
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- def do_suppress_internals(self):
- """Specifies if this test suite should suppress asserts based on internals.
-
- Internals are e.g. testing against the outcome of native runtime functions.
- This is switched off on some fuzzers that violate these contracts.
- """
- self.suppress_internals = True
-
def ListTests(self):
raise NotImplementedError
+ def load_tests_from_disk(self, statusfile_variables):
+ self.statusfile = statusfile.StatusFile(
+ self.status_file(), statusfile_variables)
+
+ slow_tests = (test for test in self.ListTests() if test.is_slow)
+ fast_tests = (test for test in self.ListTests() if not test.is_slow)
+
+ return slow_tests, fast_tests
+
def get_variants_gen(self, variants):
return self._variants_gen_class()(variants)
@@ -134,90 +138,11 @@ class TestSuite(object):
"""
return None
- def ReadStatusFile(self, variables):
- self.statusfile = statusfile.StatusFile(self.status_file(), variables)
-
- def ReadTestCases(self):
- self.tests = self.ListTests()
-
-
- def FilterTestCasesByStatus(self,
- slow_tests_mode=None,
- pass_fail_tests_mode=None):
- """Filters tests by outcomes from status file.
-
- Status file has to be loaded before using this function.
-
- Args:
- slow_tests_mode: What to do with slow tests.
- pass_fail_tests_mode: What to do with pass or fail tests.
-
- Mode options:
- None (default) - don't skip
- "skip" - skip if slow/pass_fail
- "run" - skip if not slow/pass_fail
- """
- def _skip_slow(is_slow, mode):
- return (
- (mode == 'run' and not is_slow) or
- (mode == 'skip' and is_slow))
-
- def _skip_pass_fail(pass_fail, mode):
- return (
- (mode == 'run' and not pass_fail) or
- (mode == 'skip' and pass_fail))
-
- def _compliant(test):
- if test.do_skip:
- return False
- if _skip_slow(test.is_slow, slow_tests_mode):
- return False
- if _skip_pass_fail(test.is_pass_or_fail, pass_fail_tests_mode):
- return False
- return True
-
- self.tests = filter(_compliant, self.tests)
-
- def FilterTestCasesByArgs(self, args):
- """Filter test cases based on command-line arguments.
-
- args can be a glob: asterisks in any position of the argument
- represent zero or more characters. Without asterisks, only exact matches
- will be used with the exeption of the test-suite name as argument.
- """
- filtered = []
- globs = []
- for a in args:
- argpath = a.split('/')
- if argpath[0] != self.name:
- continue
- if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
- return # Don't filter, run all tests in this suite.
- path = '/'.join(argpath[1:])
- globs.append(path)
-
- for t in self.tests:
- for g in globs:
- if fnmatch.fnmatch(t.path, g):
- filtered.append(t)
- break
- self.tests = filtered
-
def _create_test(self, path, **kwargs):
- if self.suppress_internals:
- test_class = self._suppressed_test_class()
- else:
- test_class = self._test_class()
+ test_class = self._test_class()
return test_class(self, path, self._path_to_name(path), self.test_config,
**kwargs)
- def _suppressed_test_class(self):
- """Optional testcase that suppresses assertions. Used by fuzzers that are
- only interested in dchecks or tsan and that might violate the assertions
- through fuzzing.
- """
- return self._test_class()
-
def _test_class(self):
raise NotImplementedError
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
index efefe4c533..efc9fdacf0 100755
--- a/deps/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -5,6 +5,7 @@
import os
import sys
+import tempfile
import unittest
# Needed because the test runner contains relative imports.
@@ -14,107 +15,52 @@ sys.path.append(TOOLS_PATH)
from testrunner.local.testsuite import TestSuite
from testrunner.objects.testcase import TestCase
+from testrunner.test_config import TestConfig
class TestSuiteTest(unittest.TestCase):
- def test_filter_testcases_by_status_first_pass(self):
- suite = TestSuite('foo', 'bar')
- suite.rules = {
- '': {
- 'foo/bar': set(['PASS', 'SKIP']),
- 'baz/bar': set(['PASS', 'FAIL']),
- },
- }
- suite.prefix_rules = {
- '': {
- 'baz/': set(['PASS', 'SLOW']),
- },
- }
- suite.tests = [
- TestCase(suite, 'foo/bar', 'foo/bar'),
- TestCase(suite, 'baz/bar', 'baz/bar'),
- ]
- suite.FilterTestCasesByStatus()
- self.assertEquals(
- [TestCase(suite, 'baz/bar', 'baz/bar')],
- suite.tests,
+ def setUp(self):
+ test_dir = os.path.dirname(__file__)
+ self.test_root = os.path.join(test_dir, "fake_testsuite")
+ self.test_config = TestConfig(
+ command_prefix=[],
+ extra_flags=[],
+ isolates=False,
+ mode_flags=[],
+ no_harness=False,
+ noi18n=False,
+ random_seed=0,
+ run_skipped=False,
+ shell_dir='fake_testsuite/fake_d8',
+ timeout=10,
+ verbose=False,
)
- outcomes = suite.GetStatusFileOutcomes(suite.tests[0].name,
- suite.tests[0].variant)
- self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
- def test_filter_testcases_by_status_second_pass(self):
- suite = TestSuite('foo', 'bar')
+ self.suite = TestSuite.Load(self.test_root, self.test_config)
- suite.rules = {
- '': {
- 'foo/bar': set(['PREV']),
- },
- 'default': {
- 'foo/bar': set(['PASS', 'SKIP']),
- 'baz/bar': set(['PASS', 'FAIL']),
- },
- 'stress': {
- 'baz/bar': set(['SKIP']),
- },
- }
- suite.prefix_rules = {
- '': {
- 'baz/': set(['PREV']),
- },
- 'default': {
- 'baz/': set(['PASS', 'SLOW']),
- },
- 'stress': {
- 'foo/': set(['PASS', 'SLOW']),
- },
- }
+ def testLoadingTestSuites(self):
+ self.assertEquals(self.suite.name, "fake_testsuite")
+ self.assertEquals(self.suite.test_config, self.test_config)
- test1 = TestCase(suite, 'foo/bar', 'foo/bar')
- test2 = TestCase(suite, 'baz/bar', 'baz/bar')
- suite.tests = [
- test1.create_variant(variant='default', flags=[]),
- test1.create_variant(variant='stress', flags=['-v']),
- test2.create_variant(variant='default', flags=[]),
- test2.create_variant(variant='stress', flags=['-v']),
- ]
+ # Verify that the components of the TestSuite aren't loaded yet.
+ self.assertIsNone(self.suite.tests)
+ self.assertIsNone(self.suite.statusfile)
- suite.FilterTestCasesByStatus()
- self.assertEquals(
- [
- TestCase(suite, 'foo/bar', 'foo/bar').create_variant(None, ['-v']),
- TestCase(suite, 'baz/bar', 'baz/bar'),
- ],
- suite.tests,
- )
-
- self.assertEquals(
- set(['PREV', 'PASS', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[0].name,
- suite.tests[0].variant),
- )
- self.assertEquals(
- set(['PREV', 'PASS', 'FAIL', 'SLOW']),
- suite.GetStatusFileOutcomes(suite.tests[1].name,
- suite.tests[1].variant),
- )
+ def testLoadingTestsFromDisk(self):
+ slow_tests, fast_tests = self.suite.load_tests_from_disk(
+ statusfile_variables={})
+ def is_generator(iterator):
+ return iterator == iter(iterator)
- def test_fail_ok_outcome(self):
- suite = TestSuite('foo', 'bar')
- suite.rules = {
- '': {
- 'foo/bar': set(['FAIL_OK']),
- 'baz/bar': set(['FAIL']),
- },
- }
- suite.prefix_rules = {}
- suite.tests = [
- TestCase(suite, 'foo/bar', 'foo/bar'),
- TestCase(suite, 'baz/bar', 'baz/bar'),
- ]
+ self.assertTrue(is_generator(slow_tests))
+ self.assertTrue(is_generator(fast_tests))
- for t in suite.tests:
- self.assertEquals(['FAIL'], t.expected_outcomes)
+ slow_tests, fast_tests = list(slow_tests), list(fast_tests)
+ # Verify that the components of the TestSuite are loaded.
+ self.assertTrue(len(slow_tests) == len(fast_tests) == 1)
+ self.assertTrue(all(test.is_slow for test in slow_tests))
+ self.assertFalse(any(test.is_slow for test in fast_tests))
+ self.assertIsNotNone(self.suite.statusfile)
if __name__ == '__main__':
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index 3b76541604..d5e399626c 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -20,7 +20,7 @@ from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.expectation import ForgiveTimeoutProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.testproc.progress import ResultsTracker
from testrunner.utils import random_utils
@@ -55,6 +55,11 @@ class NumFuzzer(base_runner.BaseTestRunner):
parser.add_option("--stress-gc", default=0, type="int",
help="probability [0-10] of adding --random-gc-interval "
"flag to the test")
+
+ # Stress tasks
+ parser.add_option("--stress-delay-tasks", default=0, type="int",
+ help="probability [0-10] of adding --stress-delay-tasks "
+ "flag to the test")
parser.add_option("--stress-thread-pool-size", default=0, type="int",
help="probability [0-10] of adding --thread-pool-size "
"flag to the test")
@@ -67,11 +72,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
help="extends --stress-deopt to have minimum interval "
"between deopt points")
- # Stress interrupt budget
- parser.add_option("--stress-interrupt-budget", default=0, type="int",
- help="probability [0-10] of adding --interrupt-budget "
- "flag to the test")
-
# Combine multiple tests
parser.add_option("--combine-tests", default=False, action="store_true",
help="Combine multiple tests as one and run with "
@@ -110,14 +110,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _get_default_suite_names(self):
return DEFAULT_SUITES
- def _timeout_scalefactor(self, options):
- factor = super(NumFuzzer, self)._timeout_scalefactor(options)
- if options.stress_interrupt_budget:
- # TODO(machenbach): This should be moved to a more generic config.
- # Fuzzers have too much timeout in debug mode.
- factor = max(int(factor * 0.25), 1)
- return factor
-
def _get_statusfile_variables(self, options):
variables = (
super(NumFuzzer, self)._get_statusfile_variables(options))
@@ -129,6 +121,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.stress_scavenge,
options.stress_compaction,
options.stress_gc,
+ options.stress_delay_tasks,
options.stress_thread_pool_size])),
})
return variables
@@ -180,15 +173,8 @@ class NumFuzzer(base_runner.BaseTestRunner):
# Indicate if a SIGINT or SIGTERM happened.
return sigproc.exit_code
- def _load_suites(self, names, options):
- suites = super(NumFuzzer, self)._load_suites(names, options)
- if options.combine_tests:
- suites = [s for s in suites if s.test_combiner_available()]
- if options.stress_interrupt_budget:
- # Changing interrupt budget forces us to suppress certain test assertions.
- for suite in suites:
- suite.do_suppress_internals()
- return suites
+ def _is_testsuite_supported(self, suite, options):
+ return not options.combine_tests or suite.test_combiner_available()
def _create_combiner(self, rng, options):
if not options.combine_tests:
@@ -211,7 +197,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _disable_analysis(self, options):
"""Disable analysis phase when options are used that don't support it."""
- return options.combine_tests or options.stress_interrupt_budget
+ return options.combine_tests
def _create_fuzzer_configs(self, options):
fuzzers = []
@@ -224,7 +210,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
add('threads', options.stress_thread_pool_size)
- add('interrupt_budget', options.stress_interrupt_budget)
+ add('delay', options.stress_delay_tasks)
add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index de8bc561eb..c8f7bdbfb0 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -43,15 +43,15 @@ RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
# Pattern to auto-detect files to push on Android for statements like:
# load("path/to/file.js")
LOAD_PATTERN = re.compile(
- r"(?:load|readbuffer|read)\((?:'|\")([^'\"]*)(?:'|\")\)")
+ r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
# Pattern to auto-detect files to push on Android for statements like:
# import "path/to/file.js"
MODULE_RESOURCES_PATTERN_1 = re.compile(
- r"(?:import|export)(?:\(| )(?:'|\")([^'\"]*)(?:'|\")")
+ r"(?:import|export)(?:\(| )(?:'|\")([^'\"]+)(?:'|\")")
# Pattern to auto-detect files to push on Android for statements like:
# import foobar from "path/to/file.js"
MODULE_RESOURCES_PATTERN_2 = re.compile(
- r"(?:import|export).*from (?:'|\")([^'\"]*)(?:'|\")")
+ r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
class TestCase(object):
@@ -135,7 +135,8 @@ class TestCase(object):
@property
def do_skip(self):
- return statusfile.SKIP in self._statusfile_outcomes
+ return (statusfile.SKIP in self._statusfile_outcomes and
+ not self.suite.test_config.run_skipped)
@property
def is_slow(self):
diff --git a/deps/v8/tools/testrunner/outproc/test262.py b/deps/v8/tools/testrunner/outproc/test262.py
index b5eb5547c3..bf3bc05809 100644
--- a/deps/v8/tools/testrunner/outproc/test262.py
+++ b/deps/v8/tools/testrunner/outproc/test262.py
@@ -7,18 +7,29 @@ import re
from . import base
+def _is_failure_output(output):
+ return (
+ output.exit_code != 0 or
+ 'FAILED!' in output.stdout
+ )
+
+
class ExceptionOutProc(base.OutProc):
"""Output processor for tests with expected exception."""
- def __init__(self, expected_outcomes, expected_exception=None):
+ def __init__(
+ self, expected_outcomes, expected_exception=None, negative=False):
super(ExceptionOutProc, self).__init__(expected_outcomes)
self._expected_exception = expected_exception
+ self._negative = negative
+
+ @property
+ def negative(self):
+ return self._negative
def _is_failure_output(self, output):
- if output.exit_code != 0:
- return True
if self._expected_exception != self._parse_exception(output.stdout):
return True
- return 'FAILED!' in output.stdout
+ return _is_failure_output(output)
def _parse_exception(self, string):
# somefile:somelinenumber: someerror[: sometext]
@@ -31,16 +42,13 @@ class ExceptionOutProc(base.OutProc):
return None
-def _is_failure_output(self, output):
- return (
- output.exit_code != 0 or
- 'FAILED!' in output.stdout
- )
-
-
class NoExceptionOutProc(base.OutProc):
"""Output processor optimized for tests without expected exception."""
-NoExceptionOutProc._is_failure_output = _is_failure_output
+ def __init__(self, expected_outcomes):
+ super(NoExceptionOutProc, self).__init__(expected_outcomes)
+
+ def _is_failure_output(self, output):
+ return _is_failure_output(output)
class PassNoExceptionOutProc(base.PassOutProc):
@@ -48,7 +56,8 @@ class PassNoExceptionOutProc(base.PassOutProc):
Output processor optimized for tests expected to PASS without expected
exception.
"""
-PassNoExceptionOutProc._is_failure_output = _is_failure_output
+ def _is_failure_output(self, output):
+ return _is_failure_output(output)
PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index bf7d3f133d..c84260c0a6 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -18,7 +18,7 @@ from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.testproc.progress import ResultsTracker
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
from testrunner.utils import random_utils
@@ -49,7 +49,8 @@ VARIANT_ALIASES = {
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
- "--concurrent-recompilation"]
+ "--concurrent-recompilation",
+ "--stress-flush-bytecode"]
RANDOM_GC_STRESS_FLAGS = ["--random-gc-interval=5000",
"--stress-compaction-random"]
@@ -281,7 +282,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print '>>> Running with test processors'
loader = LoadProc()
- tests_counter = TestsCounter()
results = self._create_result_tracker(options)
indicators = self._create_progress_indicators(options)
@@ -296,7 +296,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
NameFilterProc(args) if args else None,
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_shard_proc(options),
- tests_counter,
VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_predictable_filter(),
@@ -310,13 +309,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
]
self._prepare_procs(procs)
- tests.sort(key=lambda t: t.is_slow, reverse=True)
loader.load_tests(tests)
- print '>>> Running %d base tests' % tests_counter.total
- tests_counter.remove_from_chain()
-
# This starts up worker processes and blocks until all tests are
# processed.
execproc.run()
diff --git a/deps/v8/tools/testrunner/test_config.py b/deps/v8/tools/testrunner/test_config.py
index d9418fe9ac..27ac72bf6c 100644
--- a/deps/v8/tools/testrunner/test_config.py
+++ b/deps/v8/tools/testrunner/test_config.py
@@ -16,6 +16,7 @@ class TestConfig(object):
no_harness,
noi18n,
random_seed,
+ run_skipped,
shell_dir,
timeout,
verbose):
@@ -27,6 +28,7 @@ class TestConfig(object):
self.noi18n = noi18n
# random_seed is always not None.
self.random_seed = random_seed or random_utils.random_seed()
+ self.run_skipped = run_skipped
self.shell_dir = shell_dir
self.timeout = timeout
self.verbose = verbose
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 624b9aac04..799b4bfb5e 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -217,17 +217,16 @@ class CompactionFuzzer(Fuzzer):
yield ['--stress-compaction-random']
-class ThreadPoolSizeFuzzer(Fuzzer):
+class TaskDelayFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
while True:
- yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+ yield ['--stress-delay-tasks']
-class InterruptBudgetFuzzer(Fuzzer):
+class ThreadPoolSizeFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
while True:
- limit = 1 + int(rng.random() * 144)
- yield ['--interrupt-budget=%d' % rng.randint(1, limit * 1024)]
+ yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
class DeoptAnalyzer(Analyzer):
@@ -269,9 +268,9 @@ class DeoptFuzzer(Fuzzer):
FUZZERS = {
'compaction': (None, CompactionFuzzer),
+ 'delay': (None, TaskDelayFuzzer),
'deopt': (DeoptAnalyzer, DeoptFuzzer),
'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
- 'interrupt_budget': (None, InterruptBudgetFuzzer),
'marking': (MarkingAnalyzer, MarkingFuzzer),
'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
'threads': (None, ThreadPoolSizeFuzzer),
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 50b7307e1c..096228dc35 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -22,15 +22,6 @@ def print_failure_header(test):
}
-class TestsCounter(base.TestProcObserver):
- def __init__(self):
- super(TestsCounter, self).__init__()
- self.total = 0
-
- def _on_next_test(self, test):
- self.total += 1
-
-
class ResultsTracker(base.TestProcObserver):
"""Tracks number of results and stops to run tests if max_failures reached."""
def __init__(self, max_failures):
@@ -66,10 +57,6 @@ class SimpleProgressIndicator(ProgressIndicator):
self._requirement = base.DROP_PASS_OUTPUT
self._failed = []
- self._total = 0
-
- def _on_next_test(self, test):
- self._total += 1
def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results
@@ -170,13 +157,9 @@ class CompactProgressIndicator(ProgressIndicator):
self._last_status_length = 0
self._start_time = time.time()
- self._total = 0
self._passed = 0
self._failed = 0
- def _on_next_test(self, test):
- self._total += 1
-
def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results
if result.has_unexpected_output:
@@ -210,13 +193,8 @@ class CompactProgressIndicator(ProgressIndicator):
def _print_progress(self, name):
self._clear_line(self._last_status_length)
elapsed = time.time() - self._start_time
- if not self._total:
- progress = 0
- else:
- progress = (self._passed + self._failed) * 100 // self._total
status = self._templates['status_line'] % {
'passed': self._passed,
- 'progress': progress,
'failed': self._failed,
'test': name,
'mins': int(elapsed) / 60,
@@ -241,7 +219,6 @@ class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
- "\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
@@ -256,7 +233,7 @@ class ColorProgressIndicator(CompactProgressIndicator):
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+ 'status_line': ("[%(mins)02i:%(secs)02i|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
index b841cc0bd3..bfebfc9e6a 100644
--- a/deps/v8/tools/tick-processor.html
+++ b/deps/v8/tools/tick-processor.html
@@ -27,10 +27,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
<html lang="en">
<head>
- <meta charset="utf-8"/>
+ <meta charset="utf-8">
<title>V8 Tick Processor</title>
- <style type="text/css">
+ <style>
body {
font-family: Verdana, Arial, Helvetica, sans-serif;
font-size: 10pt;
@@ -53,7 +53,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
<script src="arguments.js"></script>
<script src="tickprocessor.js"></script>
- <script type="text/javascript">
+ <script>
var v8log_content;
var textout;
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index aac432ef41..148e9af4c5 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -13,17 +13,31 @@ from subprocess import Popen, PIPE
def preprocess(input):
input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
- input = re.sub(r'(\)\s*\:\s*\S+\s+)labels\s+',
- r'\1,\n/*_LABELS_HOLD_*/ ', input)
input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
+
+ # Mangle typeswitches to look like switch statements with the extra type
+ # information and syntax encoded in comments.
input = re.sub(r'(\s+)typeswitch\s*\(', r'\1/*_TYPE*/switch (', input)
- input = re.sub(r'(\s+)case\s*\(([^\s]+)\s+\:\s*([^\:]+)\)(\s*)\:',
- r'\1case \3: /*_TSV\2:*/', input)
- input = re.sub(r'(\s+)case\s*\(([^\:]+)\)(\s*)\:',
+ input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
+ r'\1case \2: /*_TSXDEFERRED_*/', input)
+ input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:',
r'\1case \2: /*_TSX*/', input)
- input = re.sub(r'\sgenerates\s+\'([^\']+)\'\s*',
+ input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
+ r'\1case \3: /*_TSVDEFERRED_\2:*/', input)
+ input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:',
+ r'\1case \3: /*_TSV\2:*/', input)
+
+ # Add extra space around | operators to fix union types later.
+ while True:
+ old = input
+ input = re.sub(r'(\w+\s*)\|(\s*\w+)',
+ r'\1|/**/\2', input)
+ if old == input:
+ break;
+
+ input = re.sub(r'\bgenerates\s+\'([^\']+)\'\s*',
r' _GeNeRaTeS00_/*\1@*/', input)
- input = re.sub(r'\sconstexpr\s+\'([^\']+)\'\s*',
+ input = re.sub(r'\bconstexpr\s+\'([^\']+)\'\s*',
r' _CoNsExP_/*\1@*/', input)
input = re.sub(r'\notherwise',
r'\n otherwise', input)
@@ -32,14 +46,23 @@ def preprocess(input):
return input
def postprocess(output):
+ output = re.sub(r'% RawObjectCast', r'%RawObjectCast', output)
+ output = re.sub(r'% RawPointerCast', r'%RawPointerCast', output)
+ output = re.sub(r'% RawConstexprCast', r'%RawConstexprCast', output)
+ output = re.sub(r'% FromConstexpr', r'%FromConstexpr', output)
+ output = re.sub(r'% Allocate', r'%Allocate', output)
output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
- output = re.sub(r',([\n ]*)\/\*_LABELS_HOLD_\*\/', r'\1labels', output)
+ output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1 labels\2', output)
output = re.sub(r'\/\*_OPE \'([^\']+)\'\*\/', r"operator '\1'", output)
output = re.sub(r'\/\*_TYPE\*\/(\s*)switch', r'typeswitch', output)
- output = re.sub(r'case ([^\:]+)\:\s*\/\*_TSX\*\/',
+ output = re.sub(r'case (\w+)\:\s*\/\*_TSXDEFERRED_\*\/',
+ r'case (\1): deferred', output)
+ output = re.sub(r'case (\w+)\:\s*\/\*_TSX\*\/',
r'case (\1):', output)
- output = re.sub(r'case ([^\:]+)\:\s*\/\*_TSV([^\:]+)\:\*\/',
+ output = re.sub(r'case (\w+)\:\s*\/\*_TSVDEFERRED_([^\:]+)\:\*\/',
+ r'case (\2: \1): deferred', output)
+ output = re.sub(r'case (\w+)\:\s*\/\*_TSV([^\:]+)\:\*\/',
r'case (\2: \1):', output)
output = re.sub(r'\n_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
r"\n generates '\1'", output)
@@ -53,37 +76,68 @@ def postprocess(output):
r"\n\1otherwise", output)
output = re.sub(r'_OtheSaLi',
r"otherwise", output)
+
+ while True:
+ old = output
+ output = re.sub(r'(\w+)\s{0,1}\|\s{0,1}/\*\*/(\s*\w+)',
+ r'\1 |\2', output)
+ if old == output:
+ break;
+
return output
-if len(sys.argv) < 2 or len(sys.argv) > 3:
- print "invalid number of arguments"
- sys.exit(-1)
+def process(filename, only_lint, use_stdout):
+ with open(filename, 'r') as content_file:
+ content = content_file.read()
+
+ original_input = content
+
+ p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate(preprocess(content))
+ output = postprocess(output)
+ rc = p.returncode
+ if (rc <> 0):
+ print "error code " + str(rc) + " running clang-format. Exiting..."
+ sys.exit(rc);
+
+ if only_lint:
+ if (output != original_input):
+ print >>sys.stderr, filename + ' requires formatting'
+ elif use_stdout:
+ print output
+ else:
+ output_file = open(filename, 'w')
+ output_file.write(output);
+ output_file.close()
+
+def print_usage():
+ print 'format-torque -i file1[, file2[, ...]]'
+ print ' format and overwrite input files'
+ print 'format-torque -l file1[, file2[, ...]]'
+ print ' merely indicate which files need formatting'
+
+def Main():
+ if len(sys.argv) < 3:
+ print "error: at least 2 arguments required"
+ print_usage();
+ sys.exit(-1)
+
+ use_stdout = True
+ lint = False
-use_stdout = True
-lint = False
-if len(sys.argv) == 3:
if sys.argv[1] == '-i':
use_stdout = False
- if sys.argv[1] == '-l':
+ elif sys.argv[1] == '-l':
lint = True
+ else:
+ print "error: -i or -l must be specified as the first argument"
+ print_usage();
+ sys.exit(-1);
+
+ for filename in sys.argv[2:]:
+ process(filename, lint, use_stdout)
+
+ return 0
-filename = sys.argv[len(sys.argv) - 1]
-
-with open(filename, 'r') as content_file:
- content = content_file.read()
-original_input = content
-p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
-output, err = p.communicate(preprocess(content))
-output = postprocess(output)
-rc = p.returncode
-if (rc <> 0):
- sys.exit(rc);
-if lint:
- if (output != original_input):
- print >>sys.stderr, filename + ' requires formatting'
-elif use_stdout:
- print output
-else:
- output_file = open(filename, 'w')
- output_file.write(output);
- output_file.close()
+if __name__ == '__main__':
+ sys.exit(Main());
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
index 17713c7213..c2e4ba0f7a 100644
--- a/deps/v8/tools/torque/vim-torque/syntax/torque.vim
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -26,18 +26,18 @@ syn keyword torqueBranch break continue goto
syn keyword torqueConditional if else typeswitch otherwise
syn match torqueConstant /\v<[A-Z][A-Z0-9_]+>/
syn match torqueConstant /\v<k[A-Z][A-Za-z0-9]*>/
-syn keyword torqueFunction macro builtin runtime
+syn keyword torqueFunction macro builtin runtime intrinsic
syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast
syn keyword torqueLabel case
-syn keyword torqueMatching try label
-syn keyword torqueModifier extern javascript constexpr
+syn keyword torqueMatching try label catch
+syn keyword torqueModifier extern javascript constexpr transitioning transient weak
syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
syn keyword torqueOperator operator
syn keyword torqueRel extends generates labels
syn keyword torqueRepeat while for of
syn keyword torqueStatement return tail
-syn keyword torqueStructure module struct type
+syn keyword torqueStructure module struct type class
syn keyword torqueVariable const let
syn match torqueType /\v(\<)@<=([A-Za-z][0-9A-Za-z_]*)(>)@=/
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
index d1d43e5dcb..39b994ec25 100644
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
@@ -61,11 +61,11 @@
"keywords": {
"patterns": [{
"name": "keyword.control.torque",
- "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|catch)\\b"
+ "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
},
{
"name": "keyword.other.torque",
- "match": "\\b(constexpr|module|macro|builtin|runtime|javascript|implicit|deferred|label|labels|tail|let|generates|type|extends|extern|const|typeswitch|case)\\b"
+ "match": "\\b(constexpr|module|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|type|struct|class|weak|extends|extern|const|typeswitch|case|transient|transitioning)\\b"
},
{
"name": "keyword.operator.torque",
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index c6dc394389..a0a98ee752 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -9,38 +9,28 @@ import subprocess
import sys
BOTS = {
- '--arm32': 'v8_arm32_perf_try',
+ '--chromebook': 'v8_chromebook_perf_try',
'--linux32': 'v8_linux32_perf_try',
'--linux64': 'v8_linux64_perf_try',
'--linux64_atom': 'v8_linux64_atom_perf_try',
- '--linux64_haswell': 'v8_linux64_haswell_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
'--nexus7': 'v8_nexus7_perf_try',
- '--nexus10': 'v8_nexus10_perf_try',
- '--pixel2': 'v8_pixel2_perf_try',
'--nokia1': 'v8_nokia1_perf_try',
+ '--odroid32': 'v8_odroid32_perf_try',
+ '--pixel2': 'v8_pixel2_perf_try',
}
-# This list will contain builder names that should be triggered on an internal
-# swarming bucket instead of internal Buildbot master.
-SWARMING_BOTS = [
- 'v8_linux64_perf_try',
- 'v8_pixel2_perf_try',
- 'v8_nokia1_perf_try',
-]
-
DEFAULT_BOTS = [
- 'v8_arm32_perf_try',
+ 'v8_chromebook_perf_try',
'v8_linux32_perf_try',
- 'v8_linux64_haswell_perf_try',
- 'v8_nexus10_perf_try',
+ 'v8_linux64_perf_try',
]
PUBLIC_BENCHMARKS = [
'arewefastyet',
+ 'compile',
'embenchen',
'emscripten',
- 'compile',
'jetstream',
'jsbench',
'jstests',
@@ -60,17 +50,6 @@ PUBLIC_BENCHMARKS = [
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
-def _trigger_bots(bucket, bots, options):
- cmd = ['git cl try']
- cmd += ['-B', bucket]
- cmd += ['-b %s' % bot for bot in bots]
- if options.revision: cmd += ['-r %s' % options.revision]
- benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
- cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
- if options.extra_flags:
- cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
- subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
-
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
@@ -80,6 +59,8 @@ def main():
help='Revision (use full hash!) to use for the try job; '
'default: the revision will be determined by the '
'try server; see its waterfall for more info')
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help='Print debug information')
for option in sorted(BOTS):
parser.add_argument(
option, dest='bots', action='append_const', const=BOTS[option],
@@ -110,14 +91,17 @@ def main():
subprocess.check_output(
'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
- buildbot_bots = [bot for bot in options.bots if bot not in SWARMING_BOTS]
- if buildbot_bots:
- _trigger_bots('master.internal.client.v8', buildbot_bots, options)
-
- swarming_bots = [bot for bot in options.bots if bot in SWARMING_BOTS]
- if swarming_bots:
- _trigger_bots('luci.v8-internal.try', swarming_bots, options)
-
+ cmd = ['git cl try', '-B', 'luci.v8-internal.try']
+ cmd += ['-b %s' % bot for bot in options.bots]
+ if options.revision: cmd += ['-r %s' % options.revision]
+ benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+ cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ if options.extra_flags:
+ cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ if options.verbose:
+ cmd.append('-vv')
+ print 'Running %s' % ' '.join(cmd)
+ subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
if __name__ == '__main__': # pragma: no cover
sys.exit(main())
diff --git a/deps/v8/tools/turbolizer/README.md b/deps/v8/tools/turbolizer/README.md
index 293f4a20a6..c5ee729d64 100644
--- a/deps/v8/tools/turbolizer/README.md
+++ b/deps/v8/tools/turbolizer/README.md
@@ -10,15 +10,20 @@ the '--trace-turbo' command-line flag.
Turbolizer is build using npm:
+ cd tools/turbolizer
npm i
npm run-script build
Afterwards, turbolizer can be hosted locally by starting a web server that serve
the contents of the turbolizer directory, e.g.:
- cd src/tools/turbolizer
python -m SimpleHTTPServer 8000
+To deploy to a directory that can be hosted the script `deploy` can be used. The
+following command will deploy to the directory /www/turbolizer:
+
+ npm run deploy -- /www/turbolizer
+
Optionally, profiling data generated by the perf tools in linux can be merged
with the .json files using the turbolizer-perf.py file included. The following
command is an example of using the perf script:
diff --git a/deps/v8/tools/turbolizer/deploy.sh b/deps/v8/tools/turbolizer/deploy.sh
index db76dca490..ae069762d9 100755
--- a/deps/v8/tools/turbolizer/deploy.sh
+++ b/deps/v8/tools/turbolizer/deploy.sh
@@ -7,12 +7,18 @@ if [ ! -d "$DEST" ]; then
exit 1
fi
-echo "Deploying..."
+function copy() {
+ echo -n "."
+ cp "$@"
+}
-cp *.jpg $DEST/
-cp *.png $DEST/
-cp *.css $DEST/
-cp index.html $DEST/
-cp -R build $DEST/
+echo -n "Deploying..."
+copy *.jpg $DEST/
+copy *.png $DEST/
+copy *.css $DEST/
+copy index.html $DEST/
+copy info-view.html $DEST/
+copy -R build $DEST/
+echo "done!"
echo "Deployed to $DEST/."
diff --git a/deps/v8/tools/turbolizer/expand-all.jpg b/deps/v8/tools/turbolizer/expand-all.jpg
deleted file mode 100644
index df64a2c4aa..0000000000
--- a/deps/v8/tools/turbolizer/expand-all.jpg
+++ /dev/null
Binary files differ
diff --git a/deps/v8/tools/turbolizer/hide-selected.png b/deps/v8/tools/turbolizer/img/hide-selected-icon.png
index 207cdbb89a..207cdbb89a 100644
--- a/deps/v8/tools/turbolizer/hide-selected.png
+++ b/deps/v8/tools/turbolizer/img/hide-selected-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/hide-unselected.png b/deps/v8/tools/turbolizer/img/hide-unselected-icon.png
index 15617b0939..15617b0939 100644
--- a/deps/v8/tools/turbolizer/hide-unselected.png
+++ b/deps/v8/tools/turbolizer/img/hide-unselected-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/layout-icon.png b/deps/v8/tools/turbolizer/img/layout-icon.png
index 95a517afa6..95a517afa6 100644
--- a/deps/v8/tools/turbolizer/layout-icon.png
+++ b/deps/v8/tools/turbolizer/img/layout-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/img/show-all-icon.png b/deps/v8/tools/turbolizer/img/show-all-icon.png
new file mode 100644
index 0000000000..50fc845f01
--- /dev/null
+++ b/deps/v8/tools/turbolizer/img/show-all-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/img/show-control-icon.png b/deps/v8/tools/turbolizer/img/show-control-icon.png
new file mode 100644
index 0000000000..4238bee9cc
--- /dev/null
+++ b/deps/v8/tools/turbolizer/img/show-control-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/live.png b/deps/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
index ac72bb93e8..ac72bb93e8 100644
--- a/deps/v8/tools/turbolizer/live.png
+++ b/deps/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/types.png b/deps/v8/tools/turbolizer/img/toggle-types-icon.png
index 8fead8f079..8fead8f079 100644
--- a/deps/v8/tools/turbolizer/types.png
+++ b/deps/v8/tools/turbolizer/img/toggle-types-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/search.png b/deps/v8/tools/turbolizer/img/zoom-selection-icon.png
index 12dc3e3469..12dc3e3469 100644
--- a/deps/v8/tools/turbolizer/search.png
+++ b/deps/v8/tools/turbolizer/img/zoom-selection-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index ea89a61261..f970a6df04 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -1,63 +1,46 @@
-<!DOCTYPE HTML>
+<!DOCTYPE html>
<html>
- <head>
- <title>Turbolizer</title>
- <link rel="stylesheet" href="turbo-visualizer.css" />
- <link rel="stylesheet" href="prettify.css" />
- <link rel="icon" type="image/png" href="turbolizer.png">
- </head>
- <body>
- <div id="left" class="viewpane scrollable"></div>
- <div class="resizer-left"></div>
- <div id="middle" class="viewpane">
- <div id="graph-toolbox-anchor">
- <span id="graph-toolbox">
- <input id="layout" type="image" title="layout graph" src="layout-icon.png"
- alt="layout graph" class="button-input">
- <input id="show-all" type="image" title="show all nodes" src="expand-all.jpg"
- alt="show all nodes" class="button-input">
- <input id="toggle-hide-dead" type="image" title="show only live nodes" src="live.png"
- alt="only live nodes" class="button-input">
- <input id="hide-unselected" type="image" title="hide unselected nodes"
- src="hide-unselected.png" alt="hide unselected nodes" class="button-input">
- <input id="hide-selected" type="image" title="hide selected nodes"
- src="hide-selected.png" alt="hide selected nodes" class="button-input">
- <input id="zoom-selection" type="image" title="zoom to selection"
- src="search.png" alt="zoom to selection" class="button-input">
- <input id="toggle-types" type="image" title="show/hide types"
- src="types.png" alt="show/hide types" class="button-input">
- <input id="search-input" type="text" title="search nodes for regex"
- alt="search node for regex" class="search-input"
- placeholder="find with regexp&hellip;">
- <select id="display-selector">
- <option disabled selected>(please open a file)</option>
- </select>
- </span>
- </div>
- <div id="load-file">
- <input id="upload-helper" type="file">
- <input id="upload" type="image" title="load graph" class="button-input"
- src="upload-icon.png" alt="upload graph">
- </div>
- <div id='text-placeholder' width="0px" height="0px" style="position: absolute; top:100000px;" ><svg><text text-anchor="right">
- <tspan white-space="inherit" id="text-measure"/>
- </text></svg></div>
+<!--
+Copyright 2019 the V8 project authors. All rights reserved. Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+<head>
+ <meta charset="utf-8">
+ <title>V8 Turbolizer</title>
+ <link rel="stylesheet" href="turbo-visualizer.css">
+ <link rel="stylesheet" href="tabs.css">
+ <link rel="stylesheet" href="prettify.css">
+ <link rel="icon" type="image/png" href="turbolizer.png">
+</head>
+
+<body>
+ <div id="left" class="content"></div>
+ <div id="resizer-left" class="resizer"></div>
+ <div id="middle">
+
+ <div id="load-file">
+ <input id="upload-helper" type="file">
+ <input id="upload" type="image" title="load graph" class="button-input" src="upload-icon.png" alt="upload graph">
</div>
- <div class="resizer-right"></div>
- <div id="right" class="viewpane scrollable"></div>
- <div id="source-collapse" class="collapse-pane">
- <input id="source-expand" type="image" title="show source"
- src="right-arrow.png" class="button-input invisible">
- <input id="source-shrink" type="image" title="hide source"
- src="left-arrow.png" class="button-input">
- </div>
- <div id="disassembly-collapse" class="collapse-pane">
- <input id="disassembly-expand" type="image" title="show disassembly"
- src="left-arrow.png" class="button-input invisible">
- <input id="disassembly-shrink" type="image" title="hide disassembly"
- src="right-arrow.png" class="button-input">
- </div>
- <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
- <script src="build/turbolizer.js"></script>
- </body>
+ </div>
+ <div id="resizer-right" class="resizer"></div>
+ <div id="right" class="content"></div>
+ <div id="source-collapse" class="collapse-pane">
+ <input id="source-expand" type="image" title="show source" src="right-arrow.png" class="button-input invisible">
+ <input id="source-shrink" type="image" title="hide source" src="left-arrow.png" class="button-input">
+ </div>
+ <div id="disassembly-collapse" class="collapse-pane">
+ <input id="disassembly-expand" type="image" title="show disassembly" src="left-arrow.png" class="button-input invisible">
+ <input id="disassembly-shrink" type="image" title="hide disassembly" src="right-arrow.png" class="button-input">
+ </div>
+ <div id="text-placeholder" width="0" height="0" style="position: absolute; top:100000px;">
+ <svg>
+ <text text-anchor="right">
+ <tspan white-space="inherit" id="text-measure">
+ </text>
+ </svg>
+ </div>
+ <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
+ <script src="build/turbolizer.js"></script>
+</body>
</html>
diff --git a/deps/v8/tools/turbolizer/info-view.html b/deps/v8/tools/turbolizer/info-view.html
new file mode 100644
index 0000000000..7c714c3f9f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/info-view.html
@@ -0,0 +1,119 @@
+<div>This is view contains hints about available keyboard shortcuts.</div>
+<div class="info-topic" id="info-global">
+ <div class="info-topic-header">Global shortcuts</div>
+ <div class="info-topic-content">
+ <table>
+ <tr>
+ <td>CTRL+L</td>
+ <td>Open load file dialog.</td>
+ </tr>
+ <tr>
+ <td>CTRL+R</td>
+ <td>Reload turbolizer (Chrome shortcut)</td>
+ </tr>
+ </table>
+ </div>
+</div>
+<div class="info-topic" id="info-graph-view">
+ <div class="info-topic-header">Graph view</div>
+ <div class="info-topic-content">
+ <table>
+ <tr>
+ <td>r</td>
+ <td>Relayout graph</td>
+ </tr>
+ <tr>
+ <td>a</td>
+ <td>Select all nodes</td>
+ </tr>
+ <tr>
+ <td>/</td>
+ <td>Select search box</td>
+ </tr>
+ </table>
+ </div>
+</div>
+<div class="info-topic" id="info-graph-nodes">
+ <div class="info-topic-header">TurboFan graph nodes</div>
+ <div class="info-topic-content">
+ <div>The following commands transform node selections, i.e. each operation will be applied
+ to each node in the current selection and the union of the resulting nodes will become the
+ new selection.</div>
+ <table>
+ <tr>
+ <td>UP</td>
+ <td>Select all input nodes</td>
+ </tr>
+ <tr>
+ <td>DOWN</td>
+ <td>Select all output nodes</td>
+ </tr>
+ <tr>
+ <td>1-9</td>
+ <td>Select input node 1-9</td>
+ </tr>
+ <tr>
+ <td>CTRL+1-9</td>
+ <td>Toggle input edge 1-9</td>
+ </tr>
+ <tr>
+ <td>c</td>
+ <td>Select control output node</td>
+ </tr>
+ <tr>
+ <td>e</td>
+ <td>Select effect output node</td>
+ </tr>
+ <tr>
+ <td>o</td>
+ <td>Reveal node's input nodes</td>
+ </tr>
+ <tr>
+ <td>i</td>
+ <td>Reveal node's output nodes</td>
+ </tr>
+ <tr>
+ <td>s</td>
+ <td>Select node's origin node</td>
+ </tr>
+ <tr>
+ <td>/</td>
+ <td>Select search box</td>
+ </tr>
+ </table>
+ </div>
+</div>
+<div class="info-topic" id="info-graph-search">
+ <div class="info-topic-header">Graph search</div>
+ <div class="info-topic-content">
+ <table>
+ <tr>
+ <td>ENTER</td>
+ <td>Select nodes according to regular expression. Invisible nodes are included depending on the state of the
+ checkbox "only visible".</td>
+ </tr>
+ <tr>
+ <td>CTRL+ENTER</td>
+ <td>Select nodes according to regular expression, always including invisible nodes regardless of checkbox.</td>
+ </tr>
+ </table>
+ <div style="font-weight: bold">
+ Useful patterns
+ </div>
+ <table>
+ <tr>
+ <td>IfTrue</td>
+ <td>Select nodes which have 'IfTrue' in title or description.</td>
+ </tr>
+ <tr>
+ <td>^42:</td>
+ <td>Select exactly the node with id 14.</td>
+ </tr>
+ <tr>
+ <td>Origin:&nbsp;#42&nbsp;</td>
+ <td>Select nodes which were created while node with id 42 was reduced. This is inaccurate if the node was
+ changed in-place.</td>
+ </tr>
+ </table>
+ </div>
+</div>
diff --git a/deps/v8/tools/turbolizer/package-lock.json b/deps/v8/tools/turbolizer/package-lock.json
index fc9557f76d..34dea91004 100644
--- a/deps/v8/tools/turbolizer/package-lock.json
+++ b/deps/v8/tools/turbolizer/package-lock.json
@@ -4,19 +4,16 @@
"lockfileVersion": 1,
"requires": true,
"dependencies": {
- "@types/commander": {
- "version": "2.12.2",
- "resolved": "https://registry.npmjs.org/@types/commander/-/commander-2.12.2.tgz",
- "integrity": "sha512-0QEFiR8ljcHp9bAbWxecjVRuAMr16ivPiGOw6KFQBVrVd0RQIcM3xKdRisH2EDWgVWujiYtHwhSkSUoAAGzH7Q==",
- "dev": true,
- "requires": {
- "commander": "*"
- }
+ "@koa/cors": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/@koa/cors/-/cors-2.2.2.tgz",
+ "integrity": "sha512-Ollvsy3wB8+7R9w6hPVzlj3wekF6nK+IHpHj7faSPVXCkahqCwNEPp9+0C4b51RDkdpHjevLEGLOKuVjqtXgSQ==",
+ "dev": true
},
"@types/d3": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/@types/d3/-/d3-5.0.0.tgz",
- "integrity": "sha512-BVfPw7ha+UgsG24v6ymerMY4+pJgQ/6p+hJA4loCeaaqV9snGS/G6ReVaQEn8Himn67dWn/Je9WhRbnDO7MzLw==",
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-5.5.0.tgz",
+ "integrity": "sha512-Bz9EAhWnaO93jLYSAT13blgzwP5Z0grO5THBOXSMeWHIIFHA7ntJSLpHSCr1kDtQunEZKCYT9OfE+4lYY/PwlA==",
"requires": {
"@types/d3-array": "*",
"@types/d3-axis": "*",
@@ -52,30 +49,30 @@
}
},
"@types/d3-array": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.1.tgz",
- "integrity": "sha512-YBaAfimGdWE4nDuoGVKsH89/dkz2hWZ0i8qC+xxqmqi+XJ/aXiRF0jPtzXmN7VdkpVjy1xuDmM5/m1FNuB6VWA=="
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.4.tgz",
+ "integrity": "sha512-3r1fOAAb+SGfcOGXty/LGvoP0ovMec4UtGNUyHOSzYyvSGpmt+eNMxLowol/3HryusevznSfcHZebEShXMwsZA=="
},
"@types/d3-axis": {
- "version": "1.0.10",
- "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-1.0.10.tgz",
- "integrity": "sha512-5YF0wfdQMPKw01VAAupLIlg/T4pn5M3/vL9u0KZjiemnVnnKBEWE24na4X1iW+TfZiYJ8j+BgK2KFYnAAT54Ug==",
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-1.0.11.tgz",
+ "integrity": "sha512-cuigApCyCwYJxaQPghj+BqaxzbdRdT/lpZBMtF7EuEIJ61NMQ8yvGnqFvHCIgJEmUu2Wb2wiZqy9kiHi3Ddftg==",
"requires": {
"@types/d3-selection": "*"
}
},
"@types/d3-brush": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-1.0.8.tgz",
- "integrity": "sha512-9Thv09jvolu9T1BE3fHmIeYSgbwSpdxtF6/A5HZEDjSTfgtA0mtaXRk5AiWOo0KjuLsI+/7ggD3ZGN5Ye8KXPQ==",
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-1.0.9.tgz",
+ "integrity": "sha512-mAx8IVc0luUHfk51pl0UN1vzybnAzLMUsvIwLt3fbsqqPkSXr+Pu1AxOPPeyNc27LhHJnfH/LCV7Jlv+Yzqu1A==",
"requires": {
"@types/d3-selection": "*"
}
},
"@types/d3-chord": {
- "version": "1.0.7",
- "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-1.0.7.tgz",
- "integrity": "sha512-WbCN7SxhZMpQQw46oSjAovAmvl3IdjhLuQ4r7AXCzNKyxtXXBWuihSPZ4bVwFQF3+S2z37i9d4hfUBatcSJpog=="
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-1.0.8.tgz",
+ "integrity": "sha512-F0ftYOo7FenAIxsRjXLt8vbij0NLDuVcL+xaGY7R9jUmF2Mrpj1T5XukBI9Cad+Ei7YSxEWREIO+CYcaKCl2qQ=="
},
"@types/d3-collection": {
"version": "1.0.7",
@@ -88,9 +85,9 @@
"integrity": "sha512-xwb1tqvYNWllbHuhMFhiXk63Imf+QNq/dJdmbXmr2wQVnwGenCuj3/0IWJ9hdIFQIqzvhT7T37cvx93jtAsDbQ=="
},
"@types/d3-contour": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-1.2.1.tgz",
- "integrity": "sha512-p8iC4KeVFyT3qRTGQRj0Jf5QDdPsDUevBEnma7gEsY1yDolVSLanG2eFAiLV+xj8/5DK7oU7Ey8z0drs3pbsug==",
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-1.3.0.tgz",
+ "integrity": "sha512-AUCUIjEnC5lCGBM9hS+MryRaFLIrPls4Rbv6ktqbd+TK/RXZPwOy9rtBWmGpbeXcSOYCJTUDwNJuEnmYPJRxHQ==",
"requires": {
"@types/d3-array": "*",
"@types/geojson": "*"
@@ -102,17 +99,17 @@
"integrity": "sha512-xyWJQMr832vqhu6fD/YqX+MSFBWnkxasNhcStvlhqygXxj0cKqPft0wuGoH5TIq5ADXgP83qeNVa4R7bEYN3uA=="
},
"@types/d3-drag": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-1.2.1.tgz",
- "integrity": "sha512-J9liJ4NNeV0oN40MzPiqwWjqNi3YHCRtHNfNMZ1d3uL9yh1+vDuo346LBEr8yyBm30WHvrHssAkExVZrGCswtA==",
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-1.2.2.tgz",
+ "integrity": "sha512-+UKFeaMVTfSQvMO0PTzOyLXSr7OZbF2Rx1iNVwo2XsyiOsd4MSuLyJKUwRmGn67044QpbNzr+VD6/8iBBLExWw==",
"requires": {
"@types/d3-selection": "*"
}
},
"@types/d3-dsv": {
- "version": "1.0.33",
- "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-1.0.33.tgz",
- "integrity": "sha512-jx5YvaVC3Wfh6LobaiWTeU1NkvL2wPmmpmajk618bD+xVz98yNWzmZMvmlPHGK0HXbMeHmW/6oVX48V9AH1bRQ=="
+ "version": "1.0.34",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-1.0.34.tgz",
+ "integrity": "sha512-/grhPLPFJ17GxH18EB8OSOlqcsLahz1xlKb08cVUu3OP83wBPxfoX2otVvLJDTL6BEP0kyTNsA2SdGrRhWwSBQ=="
},
"@types/d3-ease": {
"version": "1.0.7",
@@ -120,9 +117,9 @@
"integrity": "sha1-k6MBhovp4VBh89RDQ7GrP4rLbwk="
},
"@types/d3-fetch": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-1.1.2.tgz",
- "integrity": "sha512-w6ANZv/mUh+6IV3drT22zgPWMRobzuGXhzOZC8JPD+ygce0/Vx6vTci3m3dizkocnQQCOwNbrWWWPYqpWiKzRQ==",
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-1.1.4.tgz",
+ "integrity": "sha512-POR6AHGEjUk8VjHhU2HfcKxVKnZUIhhHjU65greJs34NlfmWfaDxE+6+ABeMsRCAWa/DRTRNe+1ExuMPBwb7/Q==",
"requires": {
"@types/d3-dsv": "*"
}
@@ -146,14 +143,14 @@
}
},
"@types/d3-hierarchy": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-1.1.2.tgz",
- "integrity": "sha512-L+Ht4doqlCIH8jYN2AC1mYIOj13OxlRhdWNWXv2pc3o5A9i3YmQ0kz6A7w8c+Ujylfusi/FO+zVlVnQoOHc2Qw=="
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-1.1.5.tgz",
+ "integrity": "sha512-DKhqURrURt2c7MsF9sHiF2wrWf2+yZR4Q9oIG026t/ZY4VWoM0Yd7UonaR+rygyReWcFSEjKC/+5A27TgD8R8g=="
},
"@types/d3-interpolate": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-1.2.0.tgz",
- "integrity": "sha512-qM9KlUrqbwIhBRtw9OtAEbkis1AxsOJEun2uxaX/vEsBp3vyNBmhPz9boXXEqic9ZRi7fCpUNRwyZvxa0PioIw==",
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-1.3.0.tgz",
+ "integrity": "sha512-Ng4ds7kPSvP/c3W3J5PPUQlgewif1tGBqCeh5lgY+UG82Y7H9zQ8c2gILsEFDLg7wRGOwnuKZ940Q/LSN14w9w==",
"requires": {
"@types/d3-color": "*"
}
@@ -169,9 +166,9 @@
"integrity": "sha512-E6Kyodn9JThgLq20nxSbEce9ow5/ePgm9PX2EO6W1INIL4DayM7cFaiG10DStuamjYAd0X4rntW2q+GRjiIktw=="
},
"@types/d3-quadtree": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-1.0.5.tgz",
- "integrity": "sha1-HOHmWerkUw3wyxJ/KX8XQaNnqC4="
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-1.0.6.tgz",
+ "integrity": "sha512-sphVuDdiSIaxLt9kQgebJW98pTktQ/xuN7Ysd8X68Rnjeg/q8+c36/ShlqU52qoKg9nob/JEHH1uQMdxURZidQ=="
},
"@types/d3-random": {
"version": "1.1.1",
@@ -179,35 +176,35 @@
"integrity": "sha512-jUPeBq1XKK9/5XasTvy5QAUwFeMsjma2yt/nP02yC2Tijovx7i/W5776U/HZugxc5SSmtpx4Z3g9KFVon0QrjQ=="
},
"@types/d3-scale": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-2.0.1.tgz",
- "integrity": "sha512-D5ZWv8ToLvqacE7XkdMNHMiiVDULdDxT7FMMGU0YJC3/nVzBmApjyTyxracUWOQyY3KK7YhZ05on8pOcNi0dfQ==",
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-2.1.0.tgz",
+ "integrity": "sha512-vLzRDF5lRxZdCLUOvmw90pkiuSsZdgroBQaat0Ov7Z7OnO9iJsPSm/TZw3wW6m2z/NhIn1E4N0RLNfEi1k4kAA==",
"requires": {
"@types/d3-time": "*"
}
},
"@types/d3-scale-chromatic": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-1.2.0.tgz",
- "integrity": "sha512-bhS2SVzUzRtrxp1REhGCfHmj8pyDv9oDmsonYiPvBl8KCxPJTxnfXBF39PzAJrYnRKM41TR0kQzsJvL+NmcDtg=="
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-1.3.0.tgz",
+ "integrity": "sha512-JqQH5uu1kmdQEa6XSu7NYzQM71lL1YreBPS5o8SnmEDcBRKL6ooykXa8iFPPOEUiTah25ydi+cTrbsogBSMNSQ=="
},
"@types/d3-selection": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-1.3.1.tgz",
- "integrity": "sha512-G+eO+2G1iW3GNrROxhoU+ar+bIJbQq1QkxcfhwjQ19xA20n3T31j5pSJqAOWvPSoFTz4Ets/DQgYhmgT4jepDg=="
+ "version": "1.3.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-1.3.4.tgz",
+ "integrity": "sha512-WQ6Ivy7VuUlZ/Grqc8493ZxC+y/fpvZLy5+8ELvmCr2hll8eJPUqC05l6fgRRA7kjqlpbH7lbmvY6pRKf6yzxw=="
},
"@types/d3-shape": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-1.2.3.tgz",
- "integrity": "sha512-iP9TcX0EVi+LlX+jK9ceS+yhEz5abTitF+JaO2ugpRE/J+bccaYLe/0/3LETMmdaEkYarIyboZW8OF67Mpnj1w==",
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-1.2.7.tgz",
+ "integrity": "sha512-b2jpGcddOseeNxchaR1SNLqA5xZAbgKix3cXiFeuGeYIEAEUu91UbtelCxOHIUTbNURFnjcbkf4plRbejNzVaQ==",
"requires": {
"@types/d3-path": "*"
}
},
"@types/d3-time": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-1.0.8.tgz",
- "integrity": "sha512-/UCphyyw97YAq4zKsuXH33R3UNB4jDSza0fLvMubWr/ONh9IePi1NbgFP222blhiCe724ebJs8U87+aDuAq/jA=="
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-1.0.9.tgz",
+ "integrity": "sha512-m+D4NbQdDlTVaO7QgXAnatR3IDxQYDMBtRhgSCi5rs9R1LPq1y7/2aqa1FJ2IWjFm1mOV63swDxonnCDlHgHMA=="
},
"@types/d3-time-format": {
"version": "2.1.0",
@@ -215,27 +212,27 @@
"integrity": "sha512-/myT3I7EwlukNOX2xVdMzb8FRgNzRMpsZddwst9Ld/VFe6LyJyRp0s32l/V9XoUzk+Gqu56F/oGk6507+8BxrA=="
},
"@types/d3-timer": {
- "version": "1.0.7",
- "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-1.0.7.tgz",
- "integrity": "sha512-830pT+aYZrgbA91AuynP3KldfB1A1s60d0gKiV+L7JcSKSJapUzUffAm8VZod7RQOxF5SzoItV6cvrTzjbmrJQ=="
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-1.0.8.tgz",
+ "integrity": "sha512-AKUgQ/nljUFcUO2P3gK24weVI5XwUTdJvjoh8gJ0yxT4aJ+d7t2Or3TB+k9dEYl14BAjoj32D0ky+YzQSVszfg=="
},
"@types/d3-transition": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-1.1.1.tgz",
- "integrity": "sha512-GHTghl0YYB8gGgbyKxVLHyAp9Na0HqsX2U7M0u0lGw4IdfEaslooykweZ8fDHW13T+KZeZAuzhbmqBZVFO+6kg==",
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-1.1.3.tgz",
+ "integrity": "sha512-1EukXNuVu/z2G1GZpZagzFJnie9C5zze17ox/vhTgGXNy46rYAm4UkhLLlUeeZ1ndq88k95SOeC8898RpKMLOQ==",
"requires": {
"@types/d3-selection": "*"
}
},
"@types/d3-voronoi": {
- "version": "1.1.7",
- "resolved": "https://registry.npmjs.org/@types/d3-voronoi/-/d3-voronoi-1.1.7.tgz",
- "integrity": "sha512-/dHFLK5jhXTb/W4XEQcFydVk8qlIAo85G3r7+N2fkBFw190l0R1GQ8C1VPeXBb2GfSU5GbT2hjlnE7i7UY5Gvg=="
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-voronoi/-/d3-voronoi-1.1.8.tgz",
+ "integrity": "sha512-zqNhW7QsYQGlfOdrwPNPG3Wk64zUa4epKRurkJ/dVc6oeXrB+iTDt8sRZ0KZKOOXvvfa1dcdB0e45TZeLBiodQ=="
},
"@types/d3-zoom": {
- "version": "1.7.1",
- "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-1.7.1.tgz",
- "integrity": "sha512-Ofjwz6Pt53tRef9TAwwayN+JThNVYC/vFOepa/H4KtwjhsqkmEseHvc2jpJM7vye5PQ5XHtTSOpdY4Y/6xZWEg==",
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-1.7.3.tgz",
+ "integrity": "sha512-Tz7+z4+Id0MxERw/ozinC5QHJmGLARs9Mpi/7VVfiR+9AHcFGe9q+fjQa30/oPNY8WPuCh5p5uuXmBYAJ3y91Q==",
"requires": {
"@types/d3-interpolate": "*",
"@types/d3-selection": "*"
@@ -247,21 +244,91 @@
"integrity": "sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw=="
},
"@types/geojson": {
- "version": "7946.0.3",
- "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.3.tgz",
- "integrity": "sha512-BYHiG1vQJ7T93uswzuXZ0OBPWqj5tsAPtaMDQADV8sn2InllXarwg9llr6uaW22q1QCwBZ81gVajOpYWzjesug=="
+ "version": "7946.0.4",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.4.tgz",
+ "integrity": "sha512-MHmwBtCb7OCv1DSivz2UNJXPGU/1btAWRKlqJ2saEhVJkpkvqHMMaOpKg0v4sAbDWSQekHGvPVMM8nQ+Jen03Q=="
+ },
+ "@types/json5": {
+ "version": "0.0.29",
+ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+ "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=",
+ "dev": true,
+ "optional": true
},
"@types/node": {
- "version": "10.5.2",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-10.5.2.tgz",
- "integrity": "sha512-m9zXmifkZsMHZBOyxZWilMwmTlpC8x5Ty360JKTiXvlXZfBWYpsg9ZZvP/Ye+iZUh+Q+MxDLjItVTWIsfwz+8Q=="
+ "version": "10.12.18",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.18.tgz",
+ "integrity": "sha512-fh+pAqt4xRzPfqA6eh3Z2y6fyZavRIumvjhaCL753+TVkGKGhpPeyrJG2JftD0T9q4GF00KjefsQ+PQNDdWQaQ=="
+ },
+ "JSONStream": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
+ "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
+ "dev": true,
+ "requires": {
+ "jsonparse": "^1.2.0",
+ "through": ">=2.2.7 <3"
+ }
},
- "@types/semver": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/@types/semver/-/semver-5.5.0.tgz",
- "integrity": "sha512-41qEJgBH/TWgo5NFSvBCJ1qkoi3Q6ONSF2avrHq1LVEZfYpdHmj0y9SuTK+u9ZhG1sYQKBL1AWXKyLWP4RaUoQ==",
+ "accepts": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
+ "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+ "dev": true,
+ "requires": {
+ "mime-types": "~2.1.18",
+ "negotiator": "0.6.1"
+ }
+ },
+ "ansi-escape-sequences": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-4.0.0.tgz",
+ "integrity": "sha512-v+0wW9Wezwsyb0uF4aBVCjmSqit3Ru7PZFziGF0o2KwTvN2zWfTi3BRLq9EkJFdg3eBbyERXGTntVpBxH1J68Q==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0"
+ }
+ },
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "any-promise": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+ "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=",
"dev": true
},
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "argv-tools": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/argv-tools/-/argv-tools-0.1.1.tgz",
+ "integrity": "sha512-Cc0dBvx4dvrjjKpyDA6w8RlNAw8Su30NvZbWl/Tv9ZALEVlLVkWQiHMi84Q0xNfpVuSaiQbYkdmWK8g1PLGhKw==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "find-replace": "^2.0.1"
+ }
+ },
"arr-diff": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz",
@@ -275,11 +342,112 @@
"resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
"integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg=="
},
+ "array-back": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/array-back/-/array-back-2.0.0.tgz",
+ "integrity": "sha512-eJv4pLLufP3g5kcZry0j6WXpIbzYw9GUB4mVJZno9wfwiBxbizTnHCw3VJb07cBihbFX48Y7oSrW9y+gt4glyw==",
+ "dev": true,
+ "requires": {
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
"array-unique": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz",
"integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM="
},
+ "arrify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+ "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+ "dev": true
+ },
+ "assertion-error": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
+ "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
+ "dev": true
+ },
+ "async-limiter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
+ "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==",
+ "dev": true
+ },
+ "babel-code-frame": {
+ "version": "6.26.0",
+ "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz",
+ "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=",
+ "dev": true,
+ "requires": {
+ "chalk": "^1.1.3",
+ "esutils": "^2.0.2",
+ "js-tokens": "^3.0.2"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+ "dev": true
+ },
+ "chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+ "dev": true
+ }
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+ "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
+ "dev": true
+ },
+ "basic-auth": {
+ "version": "1.1.0",
+ "resolved": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.1.0.tgz",
+ "integrity": "sha1-RSIe5Cn37h5QNb4/UVM/HN/SmIQ=",
+ "dev": true
+ },
+ "batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=",
+ "dev": true
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
"braces": {
"version": "1.8.5",
"resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz",
@@ -290,26 +458,233 @@
"repeat-element": "^1.1.2"
}
},
+ "browser-stdout": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz",
+ "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
+ "dev": true
+ },
+ "buffer-from": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+ "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==",
+ "dev": true
+ },
"builtin-modules": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-2.0.0.tgz",
- "integrity": "sha512-3U5kUA5VPsRUA3nofm/BXX7GVHKfxz0hOBAPxXrIvHzlDRkQVqEn6yi8QJegxl4LzOHLdvb7XF5dVawa/VVYBg=="
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.0.0.tgz",
+ "integrity": "sha512-hMIeU4K2ilbXV6Uv93ZZ0Avg/M91RaKXucQ+4me2Do1txxBDyDZWCBa5bJSLqoNTRpXTLwEzIk1KmloenDDjhg=="
+ },
+ "byte-size": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/byte-size/-/byte-size-4.0.4.tgz",
+ "integrity": "sha512-82RPeneC6nqCdSwCX2hZUz3JPOvN5at/nTEw/CMf05Smu3Hrpo9Psb7LjN+k+XndNArG1EY8L4+BM3aTM4BCvw==",
+ "dev": true
+ },
+ "bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=",
+ "dev": true
+ },
+ "cache-content-type": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-content-type/-/cache-content-type-1.0.1.tgz",
+ "integrity": "sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==",
+ "dev": true,
+ "requires": {
+ "mime-types": "^2.1.18",
+ "ylru": "^1.2.0"
+ }
+ },
+ "chai": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz",
+ "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==",
+ "dev": true,
+ "requires": {
+ "assertion-error": "^1.1.0",
+ "check-error": "^1.0.2",
+ "deep-eql": "^3.0.1",
+ "get-func-name": "^2.0.0",
+ "pathval": "^1.1.0",
+ "type-detect": "^4.0.5"
+ }
+ },
+ "chalk": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz",
+ "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "check-error": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz",
+ "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=",
+ "dev": true
+ },
+ "cli-commands": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/cli-commands/-/cli-commands-0.4.0.tgz",
+ "integrity": "sha512-zAvJlR7roeMgpUIhMDYATYL90vz+9ffuyPr0+qq4LzcZ0Jq+gM+H1KdYKxerc6U2nhitiDEx79YiJlXdrooEOA==",
+ "dev": true,
+ "requires": {
+ "command-line-args": "^5.0.2",
+ "command-line-commands": "^2.0.1"
+ }
+ },
+ "co": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+ "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+ "dev": true
+ },
+ "co-body": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/co-body/-/co-body-6.0.0.tgz",
+ "integrity": "sha512-9ZIcixguuuKIptnY8yemEOuhb71L/lLf+Rl5JfJEUiDNJk0e02MBt7BPxR2GEh5mw8dPthQYR4jPI/BnS1MQgw==",
+ "dev": true,
+ "requires": {
+ "inflation": "^2.0.0",
+ "qs": "^6.5.2",
+ "raw-body": "^2.3.3",
+ "type-is": "^1.6.16"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
+ "dev": true
+ },
+ "command-line-args": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.0.2.tgz",
+ "integrity": "sha512-/qPcbL8zpqg53x4rAaqMFlRV4opN3pbla7I7k9x8kyOBMQoGT6WltjN6sXZuxOXw6DgdK7Ad+ijYS5gjcr7vlA==",
+ "dev": true,
+ "requires": {
+ "argv-tools": "^0.1.1",
+ "array-back": "^2.0.0",
+ "find-replace": "^2.0.1",
+ "lodash.camelcase": "^4.3.0",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "command-line-commands": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/command-line-commands/-/command-line-commands-2.0.1.tgz",
+ "integrity": "sha512-m8c2p1DrNd2ruIAggxd/y6DgygQayf6r8RHwchhXryaLF8I6koYjoYroVP+emeROE9DXN5b9sP1Gh+WtvTTdtQ==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0"
+ }
+ },
+ "command-line-usage": {
+ "version": "5.0.5",
+ "resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-5.0.5.tgz",
+ "integrity": "sha512-d8NrGylA5oCXSbGoKz05FkehDAzSmIm4K03S5VDh4d5lZAtTWfc3D1RuETtuQCn8129nYfJfDdF7P/lwcz1BlA==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "chalk": "^2.4.1",
+ "table-layout": "^0.4.3",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
},
"commander": {
"version": "2.15.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
"integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag=="
},
- "commandpost": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/commandpost/-/commandpost-1.3.0.tgz",
- "integrity": "sha512-T62tyrmYTkaRDbV2z1k2yXTyxk0cFptXYwo1cUbnfHtp7ThLgQ9/90jG1Ym5WLZgFhvOTaHA5VSARWJ9URpLDw==",
+ "common-log-format": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/common-log-format/-/common-log-format-0.1.4.tgz",
+ "integrity": "sha512-BXcgq+wzr2htmBmnT7cL7YHzPAWketWbr4kozjoM9kWe4sk3+zMgjcH0HO+EddjDlEw2LZysqLpVRwbF318tDw==",
+ "dev": true
+ },
+ "compressible": {
+ "version": "2.0.15",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.15.tgz",
+ "integrity": "sha512-4aE67DL33dSW9gw4CI2H/yTxqHLNcxp0yS6jB+4h+wr3e43+1z7vm0HU9qXOH8j+qjKuL8+UtkOxYQSMq60Ylw==",
+ "dev": true,
+ "requires": {
+ "mime-db": ">= 1.36.0 < 2"
+ }
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+ "dev": true
+ },
+ "content-disposition": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+ "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=",
+ "dev": true
+ },
+ "content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+ "dev": true
+ },
+ "cookies": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.7.2.tgz",
+ "integrity": "sha512-J2JjH9T3PUNKPHknprxgCrCaZshIfxW2j49gq1E1CP5Micj1LppWAR2y9EHSQAzEiX84zOsScWNwUZ0b/ChlMw==",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "keygrip": "~1.0.2"
+ }
+ },
+ "copy-to": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz",
+ "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=",
+ "dev": true
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+ "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
"dev": true
},
"d3": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/d3/-/d3-5.5.0.tgz",
- "integrity": "sha512-HRDSYvT3n7kMvJH7Avp7iR0Xsz97bkCFka9aOg04EdyXyiAP8yQzUpLH3712y9R7ffVo1g94t1OYFHBB0yI9vQ==",
+ "version": "5.7.0",
+ "resolved": "https://registry.npmjs.org/d3/-/d3-5.7.0.tgz",
+ "integrity": "sha512-8KEIfx+dFm8PlbJN9PI0suazrZ41QcaAufsKE9PRcqYPWLngHIyWJZX96n6IQKePGgeSu0l7rtlueSSNq8Zc3g==",
"requires": {
"d3-array": "1",
"d3-axis": "1",
@@ -345,19 +720,19 @@
}
},
"d3-array": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.1.tgz",
- "integrity": "sha512-CyINJQ0SOUHojDdFDH4JEM0552vCR1utGyLHegJHyYH0JyCpSeTPxi4OBqHMA2jJZq4NH782LtaJWBImqI/HBw=="
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
+ "integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw=="
},
"d3-axis": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-1.0.8.tgz",
- "integrity": "sha1-MacFoLU15ldZ3hQXOjGTMTfxjvo="
+ "version": "1.0.12",
+ "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-1.0.12.tgz",
+ "integrity": "sha512-ejINPfPSNdGFKEOAtnBtdkpr24c4d4jsei6Lg98mxf424ivoDP2956/5HDpIAtmHo85lqT4pruy+zEgvRUBqaQ=="
},
"d3-brush": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-1.0.4.tgz",
- "integrity": "sha1-AMLyOAGfJPbAoZSibUGhUw/+e8Q=",
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-1.0.6.tgz",
+ "integrity": "sha512-lGSiF5SoSqO5/mYGD5FAeGKKS62JdA1EV7HPrU2b5rTX4qEJJtpjaGLJngjnkewQy7UnGstnFd3168wpf5z76w==",
"requires": {
"d3-dispatch": "1",
"d3-drag": "1",
@@ -367,50 +742,50 @@
}
},
"d3-chord": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-1.0.4.tgz",
- "integrity": "sha1-fexPC6iG9xP+ERxF92NBT290yiw=",
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-1.0.6.tgz",
+ "integrity": "sha512-JXA2Dro1Fxw9rJe33Uv+Ckr5IrAa74TlfDEhE/jfLOaXegMQFQTAgAw9WnZL8+HxVBRXaRGCkrNU7pJeylRIuA==",
"requires": {
"d3-array": "1",
"d3-path": "1"
}
},
"d3-collection": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.4.tgz",
- "integrity": "sha1-NC39EoN8kJdPM/HMCnha6lcNzcI="
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.7.tgz",
+ "integrity": "sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A=="
},
"d3-color": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.2.0.tgz",
- "integrity": "sha512-dmL9Zr/v39aSSMnLOTd58in2RbregCg4UtGyUArvEKTTN6S3HKEy+ziBWVYo9PTzRyVW+pUBHUtRKz0HYX+SQg=="
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.2.3.tgz",
+ "integrity": "sha512-x37qq3ChOTLd26hnps36lexMRhNXEtVxZ4B25rL0DVdDsGQIJGB18S7y9XDwlDD6MD/ZBzITCf4JjGMM10TZkw=="
},
"d3-contour": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.0.tgz",
- "integrity": "sha512-6zccxidQRtcydx0lWqHawdW1UcBzKZTxv0cW90Dlx98pY/L7GjQJmftH1tWopYFDaLCoXU0ECg9x/z2EuFT8tg==",
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.2.tgz",
+ "integrity": "sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg==",
"requires": {
"d3-array": "^1.1.1"
}
},
"d3-dispatch": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.3.tgz",
- "integrity": "sha1-RuFJHqqbWMNY/OW+TovtYm54cfg="
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.5.tgz",
+ "integrity": "sha512-vwKx+lAqB1UuCeklr6Jh1bvC4SZgbSqbkGBLClItFBIYH4vqDJCA7qfoy14lXmJdnBOdxndAMxjCbImJYW7e6g=="
},
"d3-drag": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-1.2.1.tgz",
- "integrity": "sha512-Cg8/K2rTtzxzrb0fmnYOUeZHvwa4PHzwXOLZZPwtEs2SKLLKLXeYwZKBB+DlOxUvFmarOnmt//cU4+3US2lyyQ==",
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-1.2.3.tgz",
+ "integrity": "sha512-8S3HWCAg+ilzjJsNtWW1Mutl74Nmzhb9yU6igspilaJzeZVFktmY6oO9xOh5TDk+BM2KrNFjttZNoJJmDnkjkg==",
"requires": {
"d3-dispatch": "1",
"d3-selection": "1"
}
},
"d3-dsv": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.0.8.tgz",
- "integrity": "sha512-IVCJpQ+YGe3qu6odkPQI0KPqfxkhbP/oM1XhhE/DFiYmcXKfCRub4KXyiuehV1d4drjWVXHUWx4gHqhdZb6n/A==",
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.0.10.tgz",
+ "integrity": "sha512-vqklfpxmtO2ZER3fq/B33R/BIz3A1PV0FaZRuFM8w6jLo7sUX1BZDh73fPlr0s327rzq4H6EN1q9U+eCBCSN8g==",
"requires": {
"commander": "2",
"iconv-lite": "0.4",
@@ -418,22 +793,22 @@
}
},
"d3-ease": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-1.0.3.tgz",
- "integrity": "sha1-aL+8NJM4o4DETYrMT7wzBKotjA4="
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-1.0.5.tgz",
+ "integrity": "sha512-Ct1O//ly5y5lFM9YTdu+ygq7LleSgSE4oj7vUt9tPLHUi8VCV7QoizGpdWRWAwCO9LdYzIrQDg97+hGVdsSGPQ=="
},
"d3-fetch": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-1.1.0.tgz",
- "integrity": "sha512-j+V4vtT6dceQbcKYLtpTueB8Zvc+wb9I93WaFtEQIYNADXl0c1ZJMN3qQo0CssiTsAqK8pePwc7f4qiW+b0WOg==",
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-1.1.2.tgz",
+ "integrity": "sha512-S2loaQCV/ZeyTyIF2oP8D1K9Z4QizUzW7cWeAOAS4U88qOt3Ucf6GsmgthuYSdyB2HyEm4CeGvkQxWsmInsIVA==",
"requires": {
"d3-dsv": "1"
}
},
"d3-force": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.1.0.tgz",
- "integrity": "sha512-2HVQz3/VCQs0QeRNZTYb7GxoUCeb6bOzMp/cGcLa87awY9ZsPvXOGeZm0iaGBjXic6I1ysKwMn+g+5jSAdzwcg==",
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.1.2.tgz",
+ "integrity": "sha512-p1vcHAUF1qH7yR+e8ip7Bs61AHjLeKkIn8Z2gzwU2lwEf2wkSpWdjXG0axudTHsVFnYGlMkFaEsVy2l8tAg1Gw==",
"requires": {
"d3-collection": "1",
"d3-dispatch": "1",
@@ -442,55 +817,55 @@
}
},
"d3-format": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.3.0.tgz",
- "integrity": "sha512-ycfLEIzHVZC3rOvuBOKVyQXSiUyCDjeAPIj9n/wugrr+s5AcTQC2Bz6aKkubG7rQaQF0SGW/OV4UEJB9nfioFg=="
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.3.2.tgz",
+ "integrity": "sha512-Z18Dprj96ExragQ0DeGi+SYPQ7pPfRMtUXtsg/ChVIKNBCzjO8XYJvRTC1usblx52lqge56V5ect+frYTQc8WQ=="
},
"d3-geo": {
- "version": "1.10.0",
- "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.10.0.tgz",
- "integrity": "sha512-VK/buVGgexthTTqGRNXQ/LSo3EbOFu4p2Pjud5drSIaEnOaF2moc8A3P7WEljEO1JEBEwbpAJjFWMuJiUtoBcw==",
+ "version": "1.11.3",
+ "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.11.3.tgz",
+ "integrity": "sha512-n30yN9qSKREvV2fxcrhmHUdXP9TNH7ZZj3C/qnaoU0cVf/Ea85+yT7HY7i8ySPwkwjCNYtmKqQFTvLFngfkItQ==",
"requires": {
"d3-array": "1"
}
},
"d3-hierarchy": {
- "version": "1.1.6",
- "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.6.tgz",
- "integrity": "sha512-nn4bhBnwWnMSoZgkBXD7vRyZ0xVUsNMQRKytWYHhP1I4qHw+qzApCTgSQTZqMdf4XXZbTMqA59hFusga+THA/g=="
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.8.tgz",
+ "integrity": "sha512-L+GHMSZNwTpiq4rt9GEsNcpLa4M96lXMR8M/nMG9p5hBE0jy6C+3hWtyZMenPQdwla249iJy7Nx0uKt3n+u9+w=="
},
"d3-interpolate": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.2.0.tgz",
- "integrity": "sha512-zLvTk8CREPFfc/2XglPQriAsXkzoRDAyBzndtKJWrZmHw7kmOWHNS11e40kPTd/oGk8P5mFJW5uBbcFQ+ybxyA==",
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.3.2.tgz",
+ "integrity": "sha512-NlNKGopqaz9qM1PXh9gBF1KSCVh+jSFErrSlD/4hybwoNX/gt1d8CDbDW+3i+5UOHhjC6s6nMvRxcuoMVNgL2w==",
"requires": {
"d3-color": "1"
}
},
"d3-path": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.5.tgz",
- "integrity": "sha1-JB6xhJvZ6egCHA0KeZ+KDo5EF2Q="
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.7.tgz",
+ "integrity": "sha512-q0cW1RpvA5c5ma2rch62mX8AYaiLX0+bdaSM2wxSU9tXjU4DNvkx9qiUvjkuWCj3p22UO/hlPivujqMiR9PDzA=="
},
"d3-polygon": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-1.0.3.tgz",
- "integrity": "sha1-FoiOkCZGCTPysXllKtN4Ik04LGI="
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-1.0.5.tgz",
+ "integrity": "sha512-RHhh1ZUJZfhgoqzWWuRhzQJvO7LavchhitSTHGu9oj6uuLFzYZVeBzaWTQ2qSO6bz2w55RMoOCf0MsLCDB6e0w=="
},
"d3-quadtree": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.3.tgz",
- "integrity": "sha1-rHmH4+I/6AWpkPKOG1DTj8uCJDg="
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.5.tgz",
+ "integrity": "sha512-U2tjwDFbZ75JRAg8A+cqMvqPg1G3BE7UTJn3h8DHjY/pnsAfWdbJKgyfcy7zKjqGtLAmI0q8aDSeG1TVIKRaHQ=="
},
"d3-random": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-1.1.0.tgz",
- "integrity": "sha1-ZkLlBsb6OmSFldKyRpeIqNElKdM="
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-1.1.2.tgz",
+ "integrity": "sha512-6AK5BNpIFqP+cx/sreKzNjWbwZQCSUatxq+pPRmFIQaWuoD+NrbVWw7YWpHiXpCQ/NanKdtGDuB+VQcZDaEmYQ=="
},
"d3-scale": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.1.0.tgz",
- "integrity": "sha512-Bb2N3ZgzPdKVEoWGkt8lPV6R7YdpSBWI70Xf26NQHOVjs77a6gLUmBOOPt9d9nB8JiQhwXY1RHCa+eSyWCJZIQ==",
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.1.2.tgz",
+ "integrity": "sha512-bESpd64ylaKzCDzvULcmHKZTlzA/6DGSVwx7QSDj/EnX9cpSevsdiwdHFYI9ouo9tNBbV3v5xztHS2uFeOzh8Q==",
"requires": {
"d3-array": "^1.2.0",
"d3-collection": "1",
@@ -501,49 +876,49 @@
}
},
"d3-scale-chromatic": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-1.3.0.tgz",
- "integrity": "sha512-YwMbiaW2bStWvQFByK8hA6hk7ToWflspIo2TRukCqERd8isiafEMBXmwfh8c7/0Z94mVvIzIveRLVC6RAjhgeA==",
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-1.3.3.tgz",
+ "integrity": "sha512-BWTipif1CimXcYfT02LKjAyItX5gKiwxuPRgr4xM58JwlLocWbjPLI7aMEjkcoOQXMkYsmNsvv3d2yl/OKuHHw==",
"requires": {
"d3-color": "1",
"d3-interpolate": "1"
}
},
"d3-selection": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-1.3.0.tgz",
- "integrity": "sha512-qgpUOg9tl5CirdqESUAu0t9MU/t3O9klYfGfyKsXEmhyxyzLpzpeh08gaxBUTQw1uXIOkr/30Ut2YRjSSxlmHA=="
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-1.3.2.tgz",
+ "integrity": "sha512-OoXdv1nZ7h2aKMVg3kaUFbLLK5jXUFAMLD/Tu5JA96mjf8f2a9ZUESGY+C36t8R1WFeWk/e55hy54Ml2I62CRQ=="
},
"d3-shape": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.2.0.tgz",
- "integrity": "sha1-RdAVOPBkuv0F6j1tLLdI/YxB93c=",
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.2.2.tgz",
+ "integrity": "sha512-hUGEozlKecFZ2bOSNt7ENex+4Tk9uc/m0TtTEHBvitCBxUNjhzm5hS2GrrVRD/ae4IylSmxGeqX5tWC2rASMlQ==",
"requires": {
"d3-path": "1"
}
},
"d3-time": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.0.8.tgz",
- "integrity": "sha512-YRZkNhphZh3KcnBfitvF3c6E0JOFGikHZ4YqD+Lzv83ZHn1/u6yGenRU1m+KAk9J1GnZMnKcrtfvSktlA1DXNQ=="
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.0.10.tgz",
+ "integrity": "sha512-hF+NTLCaJHF/JqHN5hE8HVGAXPStEq6/omumPE/SxyHVrR7/qQxusFDo0t0c/44+sCGHthC7yNGFZIEgju0P8g=="
},
"d3-time-format": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.1.1.tgz",
- "integrity": "sha512-8kAkymq2WMfzW7e+s/IUNAtN/y3gZXGRrdGfo6R8NKPAA85UBTxZg5E61bR6nLwjPjj4d3zywSQe1CkYLPFyrw==",
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.1.3.tgz",
+ "integrity": "sha512-6k0a2rZryzGm5Ihx+aFMuO1GgelgIz+7HhB4PH4OEndD5q2zGn1mDfRdNrulspOfR6JXkb2sThhDK41CSK85QA==",
"requires": {
"d3-time": "1"
}
},
"d3-timer": {
- "version": "1.0.7",
- "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.7.tgz",
- "integrity": "sha512-vMZXR88XujmG/L5oB96NNKH5lCWwiLM/S2HyyAQLcjWJCloK5shxta4CwOFYLZoY3AWX73v8Lgv4cCAdWtRmOA=="
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.9.tgz",
+ "integrity": "sha512-rT34J5HnQUHhcLvhSB9GjCkN0Ddd5Y8nCwDBG2u6wQEeYxT/Lf51fTFFkldeib/sE/J0clIe0pnCfs6g/lRbyg=="
},
"d3-transition": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-1.1.1.tgz",
- "integrity": "sha512-xeg8oggyQ+y5eb4J13iDgKIjUcEfIOZs2BqV/eEmXm2twx80wTzJ4tB4vaZ5BKfz7XsI/DFmQL5me6O27/5ykQ==",
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-1.1.3.tgz",
+ "integrity": "sha512-tEvo3qOXL6pZ1EzcXxFcPNxC/Ygivu5NoBY6mbzidATAeML86da+JfVIUzon3dNM6UX6zjDx+xbYDmMVtTSjuA==",
"requires": {
"d3-color": "1",
"d3-dispatch": "1",
@@ -554,14 +929,14 @@
}
},
"d3-voronoi": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.2.tgz",
- "integrity": "sha1-Fodmfo8TotFYyAwUgMWinLDYlzw="
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.4.tgz",
+ "integrity": "sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg=="
},
"d3-zoom": {
- "version": "1.7.1",
- "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-1.7.1.tgz",
- "integrity": "sha512-sZHQ55DGq5BZBFGnRshUT8tm2sfhPHFnOlmPbbwTkAoPeVdRTkB4Xsf9GCY0TSHrTD8PeJPZGmP/TpGicwJDJQ==",
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-1.7.3.tgz",
+ "integrity": "sha512-xEBSwFx5Z9T3/VrwDkMt+mr0HCzv7XjpGURJ8lWmIC8wxe32L39eWHIasEe/e7Ox8MPU4p1hvH8PKN2olLzIBg==",
"requires": {
"d3-dispatch": "1",
"d3-drag": "1",
@@ -570,25 +945,120 @@
"d3-transition": "1"
}
},
- "editorconfig": {
- "version": "0.15.0",
- "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-0.15.0.tgz",
- "integrity": "sha512-j7JBoj/bpNzvoTQylfRZSc85MlLNKWQiq5y6gwKhmqD2h1eZ+tH4AXbkhEJD468gjDna/XMx2YtSkCxBRX9OGg==",
+ "debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
"dev": true,
"requires": {
- "@types/commander": "^2.11.0",
- "@types/semver": "^5.4.0",
- "commander": "^2.11.0",
- "lru-cache": "^4.1.1",
- "semver": "^5.4.1",
- "sigmund": "^1.0.1"
+ "ms": "2.0.0"
}
},
+ "deep-eql": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz",
+ "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==",
+ "dev": true,
+ "requires": {
+ "type-detect": "^4.0.0"
+ }
+ },
+ "deep-equal": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz",
+ "integrity": "sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=",
+ "dev": true
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+ "dev": true
+ },
+ "deepmerge": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz",
+ "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==",
+ "dev": true,
+ "optional": true
+ },
+ "defer-promise": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/defer-promise/-/defer-promise-1.0.1.tgz",
+ "integrity": "sha1-HKb/7dvO8XFd16riXHYW+a4iky8=",
+ "dev": true
+ },
+ "delegates": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
+ "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=",
+ "dev": true
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+ "dev": true
+ },
+ "destroy": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+ "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=",
+ "dev": true
+ },
+ "diff": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz",
+ "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==",
+ "dev": true
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+ "dev": true
+ },
+ "error-inject": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/error-inject/-/error-inject-1.0.0.tgz",
+ "integrity": "sha1-4rPZG1Su1nLzCdlQ0VSFD6EdTzc=",
+ "dev": true
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "dev": true
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true
+ },
"estree-walker": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-0.5.2.tgz",
"integrity": "sha512-XpCnW/AE10ws/kDAs37cngSkvgIR8aN3G0MS85m7dUpuK2EREo9VJ00uvw6Dg/hXEpfsE1I1TvJOJr+Z+TL+ig=="
},
+ "esutils": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz",
+ "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=",
+ "dev": true
+ },
+ "etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=",
+ "dev": true
+ },
"expand-brackets": {
"version": "0.1.5",
"resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz",
@@ -630,6 +1100,16 @@
"repeat-string": "^1.5.2"
}
},
+ "find-replace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/find-replace/-/find-replace-2.0.1.tgz",
+ "integrity": "sha512-LzDo3Fpa30FLIBsh6DCDnMN1KW2g4QKkqKmejlImgWY67dDFPX/x9Kh/op/GK522DchQXEvDi/wD48HKW49XOQ==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "test-value": "^3.0.0"
+ }
+ },
"for-in": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
@@ -643,16 +1123,48 @@
"for-in": "^1.0.1"
}
},
+ "fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=",
+ "dev": true
+ },
"fs-extra": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-5.0.0.tgz",
- "integrity": "sha512-66Pm4RYbjzdyeuqudYqhFiNBbCIuI9kgRqLPSHIlXHidW8NIQtVdkM1yeZ4lXwuhbTETv3EUGMNHAAw6hiundQ==",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.0.tgz",
+ "integrity": "sha512-EglNDLRpmaTWiD/qraZn6HREAEAHJcJOmxNEYwq6xeMKnVMAy3GUcFB+wXt2C6k4CNvB/mP1y/U3dzvKKj5OtQ==",
"requires": {
"graceful-fs": "^4.1.2",
"jsonfile": "^4.0.0",
"universalify": "^0.1.0"
}
},
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+ "dev": true
+ },
+ "get-func-name": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz",
+ "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=",
+ "dev": true
+ },
+ "glob": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz",
+ "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
"glob-base": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz",
@@ -671,9 +1183,59 @@
}
},
"graceful-fs": {
- "version": "4.1.11",
- "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz",
- "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg="
+ "version": "4.1.15",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.15.tgz",
+ "integrity": "sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA=="
+ },
+ "growl": {
+ "version": "1.10.5",
+ "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz",
+ "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
+ "dev": true
+ },
+ "has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+ "dev": true
+ },
+ "he": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
+ "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=",
+ "dev": true
+ },
+ "http-assert": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/http-assert/-/http-assert-1.4.0.tgz",
+ "integrity": "sha512-tPVv62a6l3BbQoM/N5qo969l0OFxqpnQzNUPeYfTP6Spo4zkgWeDBD1D5thI7sDLg7jCCihXTLB0X8UtdyAy8A==",
+ "dev": true,
+ "requires": {
+ "deep-equal": "~1.0.1",
+ "http-errors": "~1.7.1"
+ }
+ },
+ "http-errors": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.1.tgz",
+ "integrity": "sha512-jWEUgtZWGSMba9I1N3gc1HmvpBUaNC9vDdA46yScAdp+C5rdEuKWUBLWTQpW9FwSWSbYYs++b6SDCxf9UEJzfw==",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.5.0 < 2",
+ "toidentifier": "1.0.0"
+ }
},
"iconv-lite": {
"version": "0.4.23",
@@ -683,6 +1245,28 @@
"safer-buffer": ">= 2.1.2 < 3"
}
},
+ "inflation": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/inflation/-/inflation-2.0.0.tgz",
+ "integrity": "sha1-i0F+R8KPklpFEz2RTKH9OJEH8w8=",
+ "dev": true
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "dev": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+ "dev": true
+ },
"is-buffer": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
@@ -711,6 +1295,12 @@
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz",
"integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA="
},
+ "is-generator-function": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.7.tgz",
+ "integrity": "sha512-YZc5EwyO4f2kWCax7oegfuSr9mFz1ZvieNYBEjmukLxgXfBUbxAWGVF7GZf0zidYtoBl3WvC07YK0wT76a+Rtw==",
+ "dev": true
+ },
"is-glob": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz",
@@ -742,6 +1332,12 @@
"resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz",
"integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU="
},
+ "is-wsl": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+ "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=",
+ "dev": true
+ },
"isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
@@ -755,6 +1351,38 @@
"isarray": "1.0.0"
}
},
+ "js-tokens": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz",
+ "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "3.12.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.1.tgz",
+ "integrity": "sha512-um46hB9wNOKlwkHgiuyEVAybXBjwFUV0Z/RaHJblRd9DXltue9FTYvzCr9ErQrK9Adz5MU4gHWVaNUfdmrC8qA==",
+ "dev": true,
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "json-stringify-safe": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
+ "dev": true
+ },
+ "json5": {
+ "version": "1.0.1",
+ "resolved": "http://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ },
"jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
@@ -763,6 +1391,18 @@
"graceful-fs": "^4.1.6"
}
},
+ "jsonparse": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
+ "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=",
+ "dev": true
+ },
+ "keygrip": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.0.3.tgz",
+ "integrity": "sha512-/PpesirAIfaklxUzp4Yb7xBper9MwP6hNRA6BGGUFCgbJ+BM5CKBtsoxinNXkLHAr+GXS1/lSlF2rP7cv5Fl+g==",
+ "dev": true
+ },
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
@@ -771,21 +1411,554 @@
"is-buffer": "^1.1.5"
}
},
- "lru-cache": {
+ "koa": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/koa/-/koa-2.6.1.tgz",
+ "integrity": "sha512-n9R5Eex4y0drUeqFTeCIeXyz8wjr2AxBo2Cq8LvmiXbJl4yDA5KIrecMPkhnmgACZnPXMRyCLbJoyLmpM9aFAw==",
+ "dev": true,
+ "requires": {
+ "accepts": "^1.3.5",
+ "cache-content-type": "^1.0.0",
+ "content-disposition": "~0.5.2",
+ "content-type": "^1.0.4",
+ "cookies": "~0.7.1",
+ "debug": "~3.1.0",
+ "delegates": "^1.0.0",
+ "depd": "^1.1.2",
+ "destroy": "^1.0.4",
+ "error-inject": "^1.0.0",
+ "escape-html": "^1.0.3",
+ "fresh": "~0.5.2",
+ "http-assert": "^1.3.0",
+ "http-errors": "^1.6.3",
+ "is-generator-function": "^1.0.7",
+ "koa-compose": "^4.1.0",
+ "koa-convert": "^1.2.0",
+ "koa-is-json": "^1.0.0",
+ "on-finished": "^2.3.0",
+ "only": "~0.0.2",
+ "parseurl": "^1.3.2",
+ "statuses": "^1.5.0",
+ "type-is": "^1.6.16",
+ "vary": "^1.1.2"
+ }
+ },
+ "koa-bodyparser": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/koa-bodyparser/-/koa-bodyparser-4.2.1.tgz",
+ "integrity": "sha512-UIjPAlMZfNYDDe+4zBaOAUKYqkwAGcIU6r2ARf1UOXPAlfennQys5IiShaVeNf7KkVBlf88f2LeLvBFvKylttw==",
+ "dev": true,
+ "requires": {
+ "co-body": "^6.0.0",
+ "copy-to": "^2.0.1"
+ }
+ },
+ "koa-compose": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-4.1.0.tgz",
+ "integrity": "sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==",
+ "dev": true
+ },
+ "koa-compress": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/koa-compress/-/koa-compress-2.0.0.tgz",
+ "integrity": "sha1-e36ykhuEd0a14SK6n1zYpnHo6jo=",
+ "dev": true,
+ "requires": {
+ "bytes": "^2.3.0",
+ "compressible": "^2.0.0",
+ "koa-is-json": "^1.0.0",
+ "statuses": "^1.0.0"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-2.5.0.tgz",
+ "integrity": "sha1-TJQj6i0lLCcMQbK97+/5u2tiwGo=",
+ "dev": true
+ }
+ }
+ },
+ "koa-conditional-get": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/koa-conditional-get/-/koa-conditional-get-2.0.0.tgz",
+ "integrity": "sha1-pD83I8HQFLcwo07Oit8wuTyCM/I=",
+ "dev": true
+ },
+ "koa-convert": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/koa-convert/-/koa-convert-1.2.0.tgz",
+ "integrity": "sha1-2kCHXfSd4FOQmNFwC1CCDOvNIdA=",
+ "dev": true,
+ "requires": {
+ "co": "^4.6.0",
+ "koa-compose": "^3.0.0"
+ },
+ "dependencies": {
+ "koa-compose": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-3.2.1.tgz",
+ "integrity": "sha1-qFzLQLfZhtjlo0Wzoazo6rz1Tec=",
+ "dev": true,
+ "requires": {
+ "any-promise": "^1.1.0"
+ }
+ }
+ }
+ },
+ "koa-etag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/koa-etag/-/koa-etag-3.0.0.tgz",
+ "integrity": "sha1-nvc4Ld1agqsN6xU0FckVg293HT8=",
+ "dev": true,
+ "requires": {
+ "etag": "^1.3.0",
+ "mz": "^2.1.0"
+ }
+ },
+ "koa-is-json": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/koa-is-json/-/koa-is-json-1.0.0.tgz",
+ "integrity": "sha1-JzwH7c3Ljfaiwat9We52SRRR7BQ=",
+ "dev": true
+ },
+ "koa-json": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/koa-json/-/koa-json-2.0.2.tgz",
+ "integrity": "sha1-Nq8U5uofXWRtfESihXAcb4Wk/eQ=",
+ "dev": true,
+ "requires": {
+ "koa-is-json": "1",
+ "streaming-json-stringify": "3"
+ }
+ },
+ "koa-mock-response": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/koa-mock-response/-/koa-mock-response-0.2.0.tgz",
+ "integrity": "sha512-HmybRN1a3WqcSFvf7tycu2YhBIEHeqzm8bwcsShNWGsTgP86coZOpdI8aqYm/1DFsAQMctnpdWrva4rDr1Pibg==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "path-to-regexp": "^1.7.0",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+ "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+ "dev": true,
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ },
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "koa-morgan": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/koa-morgan/-/koa-morgan-1.0.1.tgz",
+ "integrity": "sha1-CAUuDODYOdPEMXi5CluzQkvvH5k=",
+ "dev": true,
+ "requires": {
+ "morgan": "^1.6.1"
+ }
+ },
+ "koa-range": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/koa-range/-/koa-range-0.3.0.tgz",
+ "integrity": "sha1-NYjjSWRzqDmhvSZNKkKx2FvX/qw=",
+ "dev": true,
+ "requires": {
+ "stream-slice": "^0.1.2"
+ }
+ },
+ "koa-rewrite-75lb": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/koa-rewrite-75lb/-/koa-rewrite-75lb-2.1.1.tgz",
+ "integrity": "sha512-i9ofDKLs0xNCb2PW7wKGFzBFX6+Ce3aKoZzNKPh0fkejeUOTWkkDqnjXrgqrJEP2ifX6WWsHp6VtGuXzSYLSWQ==",
+ "dev": true,
+ "requires": {
+ "path-to-regexp": "1.7.0"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+ "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+ "dev": true,
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ }
+ }
+ },
+ "koa-route": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/koa-route/-/koa-route-3.2.0.tgz",
+ "integrity": "sha1-dimLmaa8+p44yrb+XHmocz51i84=",
+ "dev": true,
+ "requires": {
+ "debug": "*",
+ "methods": "~1.1.0",
+ "path-to-regexp": "^1.2.0"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+ "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+ "dev": true,
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ }
+ }
+ },
+ "koa-send": {
"version": "4.1.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.3.tgz",
- "integrity": "sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA==",
+ "resolved": "http://registry.npmjs.org/koa-send/-/koa-send-4.1.3.tgz",
+ "integrity": "sha512-3UetMBdaXSiw24qM2Mx5mKmxLKw5ZTPRjACjfhK6Haca55RKm9hr/uHDrkrxhSl5/S1CKI/RivZVIopiatZuTA==",
+ "dev": true,
+ "requires": {
+ "debug": "^2.6.3",
+ "http-errors": "^1.6.1",
+ "mz": "^2.6.0",
+ "resolve-path": "^1.4.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "koa-static": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/koa-static/-/koa-static-4.0.3.tgz",
+ "integrity": "sha512-JGmxTuPWy4bH7bt6gD/OMWkhprawvRmzJSr8TWKmTL4N7+IMv3s0SedeQi5S4ilxM9Bo6ptkCyXj/7wf+VS5tg==",
+ "dev": true,
+ "requires": {
+ "debug": "^3.1.0",
+ "koa-send": "^4.1.3"
+ }
+ },
+ "load-module": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/load-module/-/load-module-1.0.0.tgz",
+ "integrity": "sha512-FmoAJI/RM4vmvIRk65g/SFCnGQC9BbALY3zy38Z0cMllNnra1+iCdxAf051LVymzE60/FweOo9or9XJiGgFshg==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0"
+ }
+ },
+ "local-web-server": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/local-web-server/-/local-web-server-2.6.0.tgz",
+ "integrity": "sha512-m7Z5zlzZFxMyiK1W8xR5TJMh00Fy9z7Po8vilSQCpeU4LG2VMK667xCkASBUepFR9fPj6heUMBHu9P/TrwDqFw==",
+ "dev": true,
+ "requires": {
+ "lws": "^1.3.0",
+ "lws-basic-auth": "^0.1.1",
+ "lws-blacklist": "^0.3.0",
+ "lws-body-parser": "^0.2.4",
+ "lws-compress": "^0.2.1",
+ "lws-conditional-get": "^0.3.4",
+ "lws-cors": "^1.0.0",
+ "lws-index": "^0.4.0",
+ "lws-json": "^0.3.2",
+ "lws-log": "^0.3.2",
+ "lws-mime": "^0.2.2",
+ "lws-mock-response": "^0.5.1",
+ "lws-range": "^1.1.0",
+ "lws-request-monitor": "^0.1.5",
+ "lws-rewrite": "^0.4.1",
+ "lws-spa": "^0.3.0",
+ "lws-static": "^0.5.0",
+ "node-version-matches": "^1.0.0"
+ }
+ },
+ "lodash.assignwith": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.assignwith/-/lodash.assignwith-4.2.0.tgz",
+ "integrity": "sha1-EnqX8CrcQXUalU0ksN4X4QDgOOs=",
+ "dev": true
+ },
+ "lodash.camelcase": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
+ "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=",
+ "dev": true
+ },
+ "lodash.padend": {
+ "version": "4.6.1",
+ "resolved": "https://registry.npmjs.org/lodash.padend/-/lodash.padend-4.6.1.tgz",
+ "integrity": "sha1-U8y6BH0G4VjTEfRdpiX05J5vFm4=",
+ "dev": true
+ },
+ "lodash.pick": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz",
+ "integrity": "sha1-UvBWEP/53tQiYRRB7R/BI6AwAbM=",
+ "dev": true
+ },
+ "lodash.throttle": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
+ "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=",
+ "dev": true
+ },
+ "lws": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/lws/-/lws-1.3.0.tgz",
+ "integrity": "sha512-2gOJzVtgjg4mA1cyWnzkICR/NLuMD24sbRSwQeVZeVkadp0VOKTlpmnjvA1tQpkb1TGrcOS+N+3vKMJST8tt2w==",
+ "dev": true,
+ "requires": {
+ "ansi-escape-sequences": "^4.0.0",
+ "array-back": "^2.0.0",
+ "byte-size": "^4.0.3",
+ "cli-commands": "^0.4.0",
+ "command-line-args": "^5.0.2",
+ "command-line-usage": "^5.0.5",
+ "koa": "^2.5.2",
+ "load-module": "^1.0.0",
+ "lodash.assignwith": "^4.2.0",
+ "node-version-matches": "^1.0.0",
+ "opn": "^5.3.0",
+ "reduce-flatten": "^2.0.0",
+ "typical": "^3.0.0",
+ "walk-back": "^3.0.0",
+ "ws": "^5.2.1"
+ }
+ },
+ "lws-basic-auth": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/lws-basic-auth/-/lws-basic-auth-0.1.1.tgz",
+ "integrity": "sha512-npPpqkOFzJzB9yJ2pGXmiYOswH+0n86ro75WhromeGuNo0GfE18ZLI/VCOVWmBbeXp2pcnPIMUAdkNSgukpAww==",
+ "dev": true,
+ "requires": {
+ "basic-auth": "^1.1.0"
+ }
+ },
+ "lws-blacklist": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/lws-blacklist/-/lws-blacklist-0.3.0.tgz",
+ "integrity": "sha512-ZA8dujYaZwRNMBhgP+oGsZi9tum44Ba6VHsA3JrV1JVrjZ8c65kLaO/41rLBqQDKP3SDPu7dLity4YLwe1FuNQ==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "path-to-regexp": "^2.2.0"
+ }
+ },
+ "lws-body-parser": {
+ "version": "0.2.4",
+ "resolved": "https://registry.npmjs.org/lws-body-parser/-/lws-body-parser-0.2.4.tgz",
+ "integrity": "sha512-XKJzbzK97TUsewIPA5J2RpEk7kRoJcL+/Du6JlwzqIq84tWuXMfiT2a4Ncj12+tRWrdY2avV6d8uLhqlHLz1yg==",
+ "dev": true,
+ "requires": {
+ "koa-bodyparser": "^4.2.0"
+ }
+ },
+ "lws-compress": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/lws-compress/-/lws-compress-0.2.1.tgz",
+ "integrity": "sha512-14++1o6U8upi3DLx9J2O2sFELsijEJF9utoFxSH4Stoo9SdU2Cxw6BtqQTrb9SEA6O6IsApzstdMYnq8floLSg==",
+ "dev": true,
+ "requires": {
+ "koa-compress": "^2.0.0"
+ }
+ },
+ "lws-conditional-get": {
+ "version": "0.3.4",
+ "resolved": "https://registry.npmjs.org/lws-conditional-get/-/lws-conditional-get-0.3.4.tgz",
+ "integrity": "sha512-6asZSfM747snhdz4xexRllm09pebz8pjYeg2d5khLR53D/OJznZWHsIqW0JGiScJObri2D7+H4z7yRLBjokT7g==",
+ "dev": true,
+ "requires": {
+ "koa-conditional-get": "^2.0.0",
+ "koa-etag": "^3.0.0"
+ }
+ },
+ "lws-cors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/lws-cors/-/lws-cors-1.0.0.tgz",
+ "integrity": "sha512-4C0m4lvYdAnpAa03tr9AqziB4d8SRPh4beQBuzPiefv7N9/tpVdrl9kgXrUe1hLHhISnVJ5MoOZuZ6wFeMiU4g==",
+ "dev": true,
+ "requires": {
+ "@koa/cors": "^2.2.1"
+ }
+ },
+ "lws-index": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/lws-index/-/lws-index-0.4.0.tgz",
+ "integrity": "sha512-k+mkqgMSzx1ipzVpaxsAJU4Qe7R1kp1B/u+qC+d1Y3l+auBz+bLcIxL4dYKfaxLqiz0IFwg1dZwGzVm/dd7FFw==",
+ "dev": true,
+ "requires": {
+ "serve-index-75lb": "^2.0.0"
+ }
+ },
+ "lws-json": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/lws-json/-/lws-json-0.3.2.tgz",
+ "integrity": "sha512-ElmCA8hi3GPMfxbtiI015PDHuJovhhcbXX/qTTTifXhopedAzIBzn/rF5dHZHE4k7HQDYfbiaPgPMbmpv9dMvQ==",
+ "dev": true,
+ "requires": {
+ "koa-json": "^2.0.2"
+ }
+ },
+ "lws-log": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/lws-log/-/lws-log-0.3.2.tgz",
+ "integrity": "sha512-DRp4bFl4a7hjwR/RjARjhFLEXs8pIeqKbUvojaAl1hhfRBuW2JsDxRSKC+ViQN06CW4Qypg3ZsztMMR8dRO8dA==",
+ "dev": true,
+ "requires": {
+ "koa-morgan": "^1.0.1",
+ "stream-log-stats": "^2.0.2"
+ }
+ },
+ "lws-mime": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/lws-mime/-/lws-mime-0.2.2.tgz",
+ "integrity": "sha512-cWBj9CuuSvvaqdYMPiXRid0QhzJmr+5gWAA96pEDOiW8tMCMoxl7CIgTpHXZwhJzCqdI84RZDVm+FswByATS5w==",
+ "dev": true
+ },
+ "lws-mock-response": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/lws-mock-response/-/lws-mock-response-0.5.1.tgz",
+ "integrity": "sha512-4R5Q1RmRglC0pqEwywrS5g62aKaLQsteMnShGmWU9aQ/737Bq0/3qbQ3mb8VbMk3lLzo3ZaNZ1DUsPgVvZaXNQ==",
"dev": true,
"requires": {
- "pseudomap": "^1.0.2",
- "yallist": "^2.1.2"
+ "array-back": "^2.0.0",
+ "koa-mock-response": "0.2.0",
+ "load-module": "^1.0.0",
+ "reduce-flatten": "^2.0.0"
}
},
+ "lws-range": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/lws-range/-/lws-range-1.1.0.tgz",
+ "integrity": "sha512-Mpx6FdO58Z4l6DAXlATsC2zm10QvyGYElQvFd7P1xqUSTPoYG0wAxfjlpqI+Qdb2O7W4Ah21yESVnPEwae3SIw==",
+ "dev": true,
+ "requires": {
+ "koa-range": "^0.3.0"
+ }
+ },
+ "lws-request-monitor": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/lws-request-monitor/-/lws-request-monitor-0.1.5.tgz",
+ "integrity": "sha512-u9eczHPowH17ftUjQ8ysutGDADNZdDD6k8wgFMzOB7/rRq1Is12lTYA4u8pfKZ8C2oyoy+HYsDSrOzTwespTlA==",
+ "dev": true,
+ "requires": {
+ "byte-size": "^4.0.2"
+ }
+ },
+ "lws-rewrite": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/lws-rewrite/-/lws-rewrite-0.4.1.tgz",
+ "integrity": "sha512-EHUdbqfdwc4Baa7iXOdG2y815WC040Cing1GwhM9VsBL7lHtZ7zl3EHzjWFv3styoO3qNqZ4W0xCey4hoo/aYg==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "koa-rewrite-75lb": "^2.1.1",
+ "koa-route": "^3.2.0",
+ "path-to-regexp": "^1.7.0",
+ "req-then": "^0.6.4",
+ "stream-read-all": "^0.1.2",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+ "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+ "dev": true,
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ },
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "lws-spa": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/lws-spa/-/lws-spa-0.3.0.tgz",
+ "integrity": "sha512-8wxZl5dOI/CQsJ6oOG8Y7B4khjlQXfB7GlVkjYFPuOYM+JIw/QzMvezKjKweG0qGePmHJVHWa38+CyololV4aw==",
+ "dev": true,
+ "requires": {
+ "koa-route": "^3.2.0",
+ "koa-send": "^4.1.3"
+ }
+ },
+ "lws-static": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/lws-static/-/lws-static-0.5.0.tgz",
+ "integrity": "sha512-r3QIeJfBox/hSJLSL7TPhNSZsTKE0r4mWYHbGZ+DwrBcKbLt1ljsh5NAtmJpsqCcjYpyOuD/DlsZ0yQY9VI8bA==",
+ "dev": true,
+ "requires": {
+ "koa-static": "^4.0.2"
+ }
+ },
+ "make-error": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
+ "integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==",
+ "dev": true
+ },
"math-random": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz",
"integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w="
},
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
+ "dev": true
+ },
+ "methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+ "dev": true
+ },
"micromatch": {
"version": "2.3.11",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz",
@@ -806,6 +1979,137 @@
"regex-cache": "^0.4.2"
}
},
+ "mime-db": {
+ "version": "1.37.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
+ "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==",
+ "dev": true
+ },
+ "mime-types": {
+ "version": "2.1.21",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
+ "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
+ "dev": true,
+ "requires": {
+ "mime-db": "~1.37.0"
+ }
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.0",
+ "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+ "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+ "dev": true
+ },
+ "mkdirp": {
+ "version": "0.5.1",
+ "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
+ "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
+ "dev": true,
+ "requires": {
+ "minimist": "0.0.8"
+ },
+ "dependencies": {
+ "minimist": {
+ "version": "0.0.8",
+ "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
+ "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
+ "dev": true
+ }
+ }
+ },
+ "mocha": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz",
+ "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==",
+ "dev": true,
+ "requires": {
+ "browser-stdout": "1.3.1",
+ "commander": "2.15.1",
+ "debug": "3.1.0",
+ "diff": "3.5.0",
+ "escape-string-regexp": "1.0.5",
+ "glob": "7.1.2",
+ "growl": "1.10.5",
+ "he": "1.1.1",
+ "minimatch": "3.0.4",
+ "mkdirp": "0.5.1",
+ "supports-color": "5.4.0"
+ }
+ },
+ "morgan": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.9.1.tgz",
+ "integrity": "sha512-HQStPIV4y3afTiCYVxirakhlCfGkI161c76kKFca7Fk1JusM//Qeo1ej2XaMniiNeaZklMVrh3vTtIzpzwbpmA==",
+ "dev": true,
+ "requires": {
+ "basic-auth": "~2.0.0",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "on-finished": "~2.3.0",
+ "on-headers": "~1.0.1"
+ },
+ "dependencies": {
+ "basic-auth": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
+ "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "5.1.2"
+ }
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "mz": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+ "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+ "dev": true,
+ "requires": {
+ "any-promise": "^1.0.0",
+ "object-assign": "^4.0.1",
+ "thenify-all": "^1.0.0"
+ }
+ },
+ "negotiator": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
+ "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=",
+ "dev": true
+ },
+ "node-version-matches": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-version-matches/-/node-version-matches-1.0.0.tgz",
+ "integrity": "sha512-E1OQnAUB+BvEyNTXTWpUUMAWXYCa7yjiS64djOuTJEkm20yaQfNmWTfx/kvN6nC7fc0GQS182IaefOPxQvpxXg==",
+ "dev": true,
+ "requires": {
+ "semver": "^5.5.0"
+ }
+ },
"normalize-path": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
@@ -814,6 +2118,12 @@
"remove-trailing-separator": "^1.0.1"
}
},
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
+ "dev": true
+ },
"object.omit": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz",
@@ -823,6 +2133,45 @@
"is-extendable": "^0.1.1"
}
},
+ "on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "on-headers": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz",
+ "integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c=",
+ "dev": true
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "dev": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "only": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/only/-/only-0.0.2.tgz",
+ "integrity": "sha1-Kv3oTQPlC5qO3EROMGEKcCle37Q=",
+ "dev": true
+ },
+ "opn": {
+ "version": "5.4.0",
+ "resolved": "https://registry.npmjs.org/opn/-/opn-5.4.0.tgz",
+ "integrity": "sha512-YF9MNdVy/0qvJvDtunAOzFw9iasOQHpVthTCvGzxt61Il64AYSGdK+rYwld7NAfk9qJ7dt+hymBNSc9LNYS+Sw==",
+ "dev": true,
+ "requires": {
+ "is-wsl": "^1.1.0"
+ }
+ },
"parse-glob": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz",
@@ -834,31 +2183,56 @@
"is-glob": "^2.0.0"
}
},
+ "parseurl": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
+ "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=",
+ "dev": true
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+ "dev": true
+ },
"path-parse": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz",
"integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME="
},
- "pegjs": {
- "version": "0.10.0",
- "resolved": "https://registry.npmjs.org/pegjs/-/pegjs-0.10.0.tgz",
- "integrity": "sha1-z4uvrm7d/0tafvsYUmnqr0YQ3b0="
+ "path-to-regexp": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.4.0.tgz",
+ "integrity": "sha512-G6zHoVqC6GGTQkZwF4lkuEyMbVOjoBKAEybQUypI1WTkqinCOrq2x6U2+phkJ1XsEMTy4LjtwPI7HW+NVrRR2w==",
+ "dev": true
+ },
+ "pathval": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz",
+ "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=",
+ "dev": true
},
"preserve": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz",
"integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks="
},
- "pseudomap": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
- "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=",
+ "process-nextick-args": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+ "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
+ "dev": true
+ },
+ "qs": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+ "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
"dev": true
},
"randomatic": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.0.0.tgz",
- "integrity": "sha512-VdxFOIEY3mNO5PtSRkkle/hPJDHvQhK21oa73K4yAc9qmp6N429gAyF1gZMOTMeS0/AYzaV/2Trcef+NaIonSA==",
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.1.tgz",
+ "integrity": "sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw==",
"requires": {
"is-number": "^4.0.0",
"kind-of": "^6.0.0",
@@ -877,6 +2251,53 @@
}
}
},
+ "raw-body": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
+ "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
+ "dev": true,
+ "requires": {
+ "bytes": "3.0.0",
+ "http-errors": "1.6.3",
+ "iconv-lite": "0.4.23",
+ "unpipe": "1.0.0"
+ },
+ "dependencies": {
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ }
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.6",
+ "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+ "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
+ "dev": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "reduce-flatten": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-2.0.0.tgz",
+ "integrity": "sha512-EJ4UNY/U1t2P/2k6oqotuX2Cc3T6nxJwsM0N0asT7dhrtH1ltUxDn4NalSYmPE2rCkVpcf/X6R0wDwcFpzhd4w==",
+ "dev": true
+ },
"regex-cache": {
"version": "0.4.4",
"resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz",
@@ -891,15 +2312,36 @@
"integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8="
},
"repeat-element": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.2.tgz",
- "integrity": "sha1-7wiaF40Ug7quTZPrmLT55OEdmQo="
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+ "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g=="
},
"repeat-string": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
"integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc="
},
+ "req-then": {
+ "version": "0.6.4",
+ "resolved": "https://registry.npmjs.org/req-then/-/req-then-0.6.4.tgz",
+ "integrity": "sha512-Uf7xsK1qPqPUetESHemNQ7nGtgOxngSFtlcAOOkx0lDAo+XRZpEA9QDrGBdyOfGq4b+a0z/D5gR2VJ+pp/dzBA==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "defer-promise": "^1.0.1",
+ "lodash.pick": "^4.4.0",
+ "stream-read-all": "^0.1.0",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
"resolve": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.8.1.tgz",
@@ -908,40 +2350,64 @@
"path-parse": "^1.0.5"
}
},
+ "resolve-path": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/resolve-path/-/resolve-path-1.4.0.tgz",
+ "integrity": "sha1-xL2p9e+y/OZSR4c6s2u02DT+Fvc=",
+ "dev": true,
+ "requires": {
+ "http-errors": "~1.6.2",
+ "path-is-absolute": "1.0.1"
+ },
+ "dependencies": {
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ }
+ }
+ },
"rollup": {
- "version": "0.62.0",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.62.0.tgz",
- "integrity": "sha512-mZS0aIGfYzuJySJD78znu9/hCJsNfBzg4lDuZGMj0hFVcYHt2evNRHv8aqiu9/w6z6Qn8AQoVl4iyEjDmisGeA==",
+ "version": "0.68.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.68.2.tgz",
+ "integrity": "sha512-WgjNCXYv7ZbtStIap1+tz4pd2zwz0XYN//OILwEY6dINIFLVizK1iWdu+ZtUURL/OKnp8Lv2w8FBds8YihzX7Q==",
"requires": {
"@types/estree": "0.0.39",
"@types/node": "*"
}
},
"rollup-plugin-node-resolve": {
- "version": "3.3.0",
- "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-3.3.0.tgz",
- "integrity": "sha512-9zHGr3oUJq6G+X0oRMYlzid9fXicBdiydhwGChdyeNRGPcN/majtegApRKHLR5drboUvEWU+QeUmGTyEZQs3WA==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-4.0.0.tgz",
+ "integrity": "sha512-7Ni+/M5RPSUBfUaP9alwYQiIKnKeXCOHiqBpKUl9kwp3jX5ZJtgXAait1cne6pGEVUUztPD6skIKH9Kq9sNtfw==",
"requires": {
- "builtin-modules": "^2.0.0",
+ "builtin-modules": "^3.0.0",
"is-module": "^1.0.0",
- "resolve": "^1.1.6"
+ "resolve": "^1.8.1"
}
},
"rollup-plugin-typescript2": {
- "version": "0.15.1",
- "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.15.1.tgz",
- "integrity": "sha512-lJ/yfIj1fmp0KyfgPmd2QFeRpLgXlc58fS3Ha9Loc7/p3qByDL7CRndcI9MflE/pUSrfUdDjZMR0mHSKvqrZ+g==",
+ "version": "0.18.1",
+ "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.18.1.tgz",
+ "integrity": "sha512-aR2m5NCCAUV/KpcKgCWX6Giy8rTko9z92b5t0NX9eZyjOftCvcdDFa1C9Ze/9yp590hnRymr5hG0O9SAXi1oUg==",
"requires": {
- "fs-extra": "^5.0.0",
- "resolve": "^1.7.1",
- "rollup-pluginutils": "^2.0.1",
- "tslib": "1.9.2"
+ "fs-extra": "7.0.0",
+ "resolve": "1.8.1",
+ "rollup-pluginutils": "2.3.3",
+ "tslib": "1.9.3"
}
},
"rollup-pluginutils": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.3.0.tgz",
- "integrity": "sha512-xB6hsRsjdJdIYWEyYUJy/3ki5g69wrf0luHPGNK3ZSocV6HLNfio59l3dZ3TL4xUwEKgROhFi9jOCt6c5gfUWw==",
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.3.3.tgz",
+ "integrity": "sha512-2XZwja7b6P5q4RZ5FhyX1+f46xi1Z3qBKigLRZ6VTZjwbN0K1IFGMlwm06Uu0Emcre2Z63l77nq/pzn+KxIEoA==",
"requires": {
"estree-walker": "^0.5.2",
"micromatch": "^2.3.11"
@@ -952,6 +2418,12 @@
"resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
"integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q="
},
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ },
"safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
@@ -963,42 +2435,444 @@
"integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
"dev": true
},
- "sigmund": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/sigmund/-/sigmund-1.0.1.tgz",
- "integrity": "sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA=",
+ "serve-index-75lb": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/serve-index-75lb/-/serve-index-75lb-2.0.1.tgz",
+ "integrity": "sha512-/d9r8bqJlFQcwy0a0nb1KnWAA+Mno+V+VaoKocdkbW5aXKRQd/+4bfnRhQRQr6uEoYwTRJ4xgztOyCJvWcpBpQ==",
+ "dev": true,
+ "requires": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.18",
+ "parseurl": "~1.3.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ }
+ }
+ },
+ "setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
+ "dev": true
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true
+ },
+ "source-map-support": {
+ "version": "0.5.9",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.9.tgz",
+ "integrity": "sha512-gR6Rw4MvUlYy83vP0vxoVNzM6t8MUXqNuRsuBmBHQDu1Fh6X015FrLdgoDKcNdkwGubozq0P4N0Q37UyFVr1EA==",
+ "dev": true,
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+ "dev": true
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+ "dev": true
+ },
+ "stream-log-stats": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/stream-log-stats/-/stream-log-stats-2.0.2.tgz",
+ "integrity": "sha512-b1LccxXhMlOQQrzSqapQHyZ3UI00QTAv+8VecFgsJz//sGB5LFl/+mkFeWBVVI2/E4DlCT4sGgvLExB/VTVFfA==",
+ "dev": true,
+ "requires": {
+ "JSONStream": "^1.3.1",
+ "ansi-escape-sequences": "^3.0.0",
+ "byte-size": "^3.0.0",
+ "common-log-format": "~0.1.3",
+ "lodash.throttle": "^4.1.1",
+ "stream-via": "^1.0.3",
+ "table-layout": "~0.4.0"
+ },
+ "dependencies": {
+ "ansi-escape-sequences": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-3.0.0.tgz",
+ "integrity": "sha1-HBg5S2r5t2/5pjUJ+kl2af0s5T4=",
+ "dev": true,
+ "requires": {
+ "array-back": "^1.0.3"
+ }
+ },
+ "array-back": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/array-back/-/array-back-1.0.4.tgz",
+ "integrity": "sha1-ZEun8JX3/898Q7Xw3DnTwfA8Bjs=",
+ "dev": true,
+ "requires": {
+ "typical": "^2.6.0"
+ }
+ },
+ "byte-size": {
+ "version": "3.0.0",
+ "resolved": "http://registry.npmjs.org/byte-size/-/byte-size-3.0.0.tgz",
+ "integrity": "sha1-QG+eI2aqXav2NnLrKR17sJSV2nU=",
+ "dev": true
+ },
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "stream-read-all": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/stream-read-all/-/stream-read-all-0.1.2.tgz",
+ "integrity": "sha512-KX42xBg853m+KnwRtwCKT95ShopAbY/MNKs2dBQ0WkNeuJdqgQYRtGRbTlxdx0L6t979h3z/wMq2eMSAu7Tygw==",
+ "dev": true
+ },
+ "stream-slice": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/stream-slice/-/stream-slice-0.1.2.tgz",
+ "integrity": "sha1-LcT04bk2+xPz6zmi3vGTJ5jQeks=",
+ "dev": true
+ },
+ "stream-via": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/stream-via/-/stream-via-1.0.4.tgz",
+ "integrity": "sha512-DBp0lSvX5G9KGRDTkR/R+a29H+Wk2xItOF+MpZLLNDWbEV9tGPnqLPxHEYjmiz8xGtJHRIqmI+hCjmNzqoA4nQ==",
+ "dev": true
+ },
+ "streaming-json-stringify": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/streaming-json-stringify/-/streaming-json-stringify-3.1.0.tgz",
+ "integrity": "sha1-gCAEN6mTzDnE/gAmO3s7kDrIevU=",
+ "dev": true,
+ "requires": {
+ "json-stringify-safe": "5",
+ "readable-stream": "2"
+ }
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=",
+ "dev": true,
+ "optional": true
+ },
+ "supports-color": {
+ "version": "5.4.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz",
+ "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "table-layout": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/table-layout/-/table-layout-0.4.4.tgz",
+ "integrity": "sha512-uNaR3SRMJwfdp9OUr36eyEi6LLsbcTqTO/hfTsNviKsNeyMBPICJCC7QXRF3+07bAP6FRwA8rczJPBqXDc0CkQ==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "deep-extend": "~0.6.0",
+ "lodash.padend": "^4.6.1",
+ "typical": "^2.6.1",
+ "wordwrapjs": "^3.0.0"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "test-value": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/test-value/-/test-value-3.0.0.tgz",
+ "integrity": "sha512-sVACdAWcZkSU9x7AOmJo5TqE+GyNJknHaHsMrR6ZnhjVlVN9Yx6FjHrsKZ3BjIpPCT68zYesPWkakrNupwfOTQ==",
+ "dev": true,
+ "requires": {
+ "array-back": "^2.0.0",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "thenify": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.0.tgz",
+ "integrity": "sha1-5p44obq+lpsBCCB5eLn2K4hgSDk=",
+ "dev": true,
+ "requires": {
+ "any-promise": "^1.0.0"
+ }
+ },
+ "thenify-all": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+ "integrity": "sha1-GhkY1ALY/D+Y+/I02wvMjMEOlyY=",
+ "dev": true,
+ "requires": {
+ "thenify": ">= 3.1.0 < 4"
+ }
+ },
+ "through": {
+ "version": "2.3.8",
+ "resolved": "http://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+ "dev": true
+ },
+ "toidentifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+ "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==",
"dev": true
},
+ "ts-mocha": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ts-mocha/-/ts-mocha-2.0.0.tgz",
+ "integrity": "sha512-Rj6+vvwKtOTs5GsNO1jLl4DIXUGnyAg5HFt2Yb4SHIRN45clTJkHWpNdTxCSL0u+1oeavSYJah6d1PZ++Ju5pw==",
+ "dev": true,
+ "requires": {
+ "ts-node": "7.0.0",
+ "tsconfig-paths": "^3.5.0"
+ }
+ },
+ "ts-node": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-7.0.0.tgz",
+ "integrity": "sha512-klJsfswHP0FuOLsvBZ/zzCfUvakOSSxds78mVeK7I+qP76YWtxf16hEZsp3U+b0kIo82R5UatGFeblYMqabb2Q==",
+ "dev": true,
+ "requires": {
+ "arrify": "^1.0.0",
+ "buffer-from": "^1.1.0",
+ "diff": "^3.1.0",
+ "make-error": "^1.1.1",
+ "minimist": "^1.2.0",
+ "mkdirp": "^0.5.1",
+ "source-map-support": "^0.5.6",
+ "yn": "^2.0.0"
+ }
+ },
+ "tsconfig-paths": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.6.0.tgz",
+ "integrity": "sha512-mrqQIP2F4e03aMTCiPdedCIT300//+q0ET53o5WqqtQjmEICxP9yfz/sHTpPqXpssuJEzODsEzJaLRaf5J2X1g==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "@types/json5": "^0.0.29",
+ "deepmerge": "^2.0.1",
+ "json5": "^1.0.1",
+ "minimist": "^1.2.0",
+ "strip-bom": "^3.0.0"
+ }
+ },
"tslib": {
- "version": "1.9.2",
- "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.2.tgz",
- "integrity": "sha512-AVP5Xol3WivEr7hnssHDsaM+lVrVXWUvd1cfXTRkTj80b//6g2wIFEH6hZG0muGZRnHGrfttpdzRk3YlBkWjKw=="
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
+ "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ=="
+ },
+ "tslint": {
+ "version": "5.12.0",
+ "resolved": "https://registry.npmjs.org/tslint/-/tslint-5.12.0.tgz",
+ "integrity": "sha512-CKEcH1MHUBhoV43SA/Jmy1l24HJJgI0eyLbBNSRyFlsQvb9v6Zdq+Nz2vEOH00nC5SUx4SneJ59PZUS/ARcokQ==",
+ "dev": true,
+ "requires": {
+ "babel-code-frame": "^6.22.0",
+ "builtin-modules": "^1.1.1",
+ "chalk": "^2.3.0",
+ "commander": "^2.12.1",
+ "diff": "^3.2.0",
+ "glob": "^7.1.1",
+ "js-yaml": "^3.7.0",
+ "minimatch": "^3.0.4",
+ "resolve": "^1.3.2",
+ "semver": "^5.3.0",
+ "tslib": "^1.8.0",
+ "tsutils": "^2.27.2"
+ },
+ "dependencies": {
+ "builtin-modules": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz",
+ "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=",
+ "dev": true
+ }
+ }
},
- "typescript": {
- "version": "2.9.1",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-2.9.1.tgz",
- "integrity": "sha512-h6pM2f/GDchCFlldnriOhs1QHuwbnmj6/v7499eMHqPeW4V2G0elua2eIc2nu8v2NdHV0Gm+tzX83Hr6nUFjQA==",
+ "tsutils": {
+ "version": "2.29.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-2.29.0.tgz",
+ "integrity": "sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA==",
+ "dev": true,
+ "requires": {
+ "tslib": "^1.8.1"
+ }
+ },
+ "type-detect": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
"dev": true
},
- "typescript-formatter": {
- "version": "7.2.2",
- "resolved": "https://registry.npmjs.org/typescript-formatter/-/typescript-formatter-7.2.2.tgz",
- "integrity": "sha512-V7vfI9XArVhriOTYHPzMU2WUnm5IMdu9X/CPxs8mIMGxmTBFpDABlbkBka64PZJ9/xgQeRpK8KzzAG4MPzxBDQ==",
+ "type-is": {
+ "version": "1.6.16",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
+ "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
"dev": true,
"requires": {
- "commandpost": "^1.0.0",
- "editorconfig": "^0.15.0"
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.18"
}
},
+ "typescript": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.2.2.tgz",
+ "integrity": "sha512-VCj5UiSyHBjwfYacmDuc/NOk4QQixbE+Wn7MFJuS0nRuPQbof132Pw4u53dm264O8LPc2MVsc7RJNml5szurkg==",
+ "dev": true
+ },
+ "typical": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-3.0.0.tgz",
+ "integrity": "sha512-2/pGDQD/q1iJWlrj357aEKGIlRvHirm81x04lsg51hreiohy2snAXoFc9dIHFWEx9LsfOVA5K7lUGM9rcUqwlQ==",
+ "dev": true
+ },
"universalify": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
},
- "yallist": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
- "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=",
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=",
+ "dev": true
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+ "dev": true
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+ "dev": true
+ },
+ "walk-back": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/walk-back/-/walk-back-3.0.0.tgz",
+ "integrity": "sha1-I1h4ejXakQMtrV6S+AsSNw2HlcU=",
+ "dev": true
+ },
+ "wordwrapjs": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-3.0.0.tgz",
+ "integrity": "sha512-mO8XtqyPvykVCsrwj5MlOVWvSnCdT+C+QVbm6blradR7JExAhbkZ7hZ9A+9NUtwzSqrlUo9a67ws0EiILrvRpw==",
+ "dev": true,
+ "requires": {
+ "reduce-flatten": "^1.0.1",
+ "typical": "^2.6.1"
+ },
+ "dependencies": {
+ "reduce-flatten": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-1.0.1.tgz",
+ "integrity": "sha1-JYx479FT3fk8tWEjf2EYTzaW4yc=",
+ "dev": true
+ },
+ "typical": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+ "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+ "dev": true
+ }
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+ "dev": true
+ },
+ "ws": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
+ "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
+ "dev": true,
+ "requires": {
+ "async-limiter": "~1.0.0"
+ }
+ },
+ "ylru": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/ylru/-/ylru-1.2.1.tgz",
+ "integrity": "sha512-faQrqNMzcPCHGVC2aaOINk13K+aaBDUPjGWl0teOXywElLjyVAB6Oe2jj62jHYtwsU49jXhScYbvPENK+6zAvQ==",
+ "dev": true
+ },
+ "yn": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/yn/-/yn-2.0.0.tgz",
+ "integrity": "sha1-5a2ryKz0CPY4X8dklWhMiOavaJo=",
"dev": true
}
}
diff --git a/deps/v8/tools/turbolizer/package.json b/deps/v8/tools/turbolizer/package.json
index 1d8efc5286..ae354ba393 100644
--- a/deps/v8/tools/turbolizer/package.json
+++ b/deps/v8/tools/turbolizer/package.json
@@ -4,26 +4,31 @@
"description": "Visualization tool for V8 TurboFan IR graphs",
"scripts": {
"build": "rollup -c",
- "watch": "tsc --watch",
+ "watch": "rollup -c -w",
"deploy": "./deploy.sh",
- "format": "tsfmt -r"
+ "test": "ts-mocha -p tsconfig.test.json test/**/*-test.ts",
+ "dev-server": "ws",
+ "presubmit": "tslint --project ./tslint.json --fix"
},
"author": "The V8 team",
"license": "MIT",
"dependencies": {
- "@types/d3": "^5.0.0",
- "d3": "^5.5.0",
- "pegjs": "^0.10.0",
- "rollup": "^0.62.0",
- "rollup-plugin-node-resolve": "^3.3.0",
- "rollup-plugin-typescript2": "^0.15.1"
+ "@types/d3": "^5.5.0",
+ "d3": "^5.7.0",
+ "rollup": "^0.68.2",
+ "rollup-plugin-node-resolve": "^4.0.0",
+ "rollup-plugin-typescript2": "^0.18.1"
},
"repository": {
"type": "git",
"url": "https://github.com/v8/v8.git"
},
"devDependencies": {
- "typescript": "^2.9.1",
- "typescript-formatter": "^7.2.2"
+ "chai": "^4.2.0",
+ "local-web-server": "^2.6.0",
+ "mocha": "^5.2.0",
+ "ts-mocha": "^2.0.0",
+ "typescript": "^3.2.2",
+ "tslint": "^5.12.0"
}
}
diff --git a/deps/v8/tools/turbolizer/rollup.config.js b/deps/v8/tools/turbolizer/rollup.config.js
index bb34555a7d..05b69b8515 100644
--- a/deps/v8/tools/turbolizer/rollup.config.js
+++ b/deps/v8/tools/turbolizer/rollup.config.js
@@ -5,8 +5,28 @@
import typescript from 'rollup-plugin-typescript2';
import node from 'rollup-plugin-node-resolve';
+import path from 'path'
+
+const onwarn = warning => {
+ // Silence circular dependency warning for moment package
+ const node_modules = path.normalize('node_modules/');
+ if (warning.code === 'CIRCULAR_DEPENDENCY' &&
+ !warning.importer.indexOf(node_modules)) {
+ return
+ }
+
+ console.warn(`(!) ${warning.message}`)
+}
+
export default {
input: "src/turbo-visualizer.ts",
- plugins: [node(), typescript({abortOnError:false})],
- output: {file: "build/turbolizer.js", format: "iife", sourcemap: true}
+ plugins: [node(), typescript({
+ abortOnError: false
+ })],
+ output: {
+ file: "build/turbolizer.js",
+ format: "iife",
+ sourcemap: true
+ },
+ onwarn: onwarn
};
diff --git a/deps/v8/tools/turbolizer/src/code-view.ts b/deps/v8/tools/turbolizer/src/code-view.ts
index 5975ff7a60..298f08b01d 100644
--- a/deps/v8/tools/turbolizer/src/code-view.ts
+++ b/deps/v8/tools/turbolizer/src/code-view.ts
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {Source,SourceResolver,sourcePositionToStringKey} from "./source-resolver.js"
-import {SelectionBroker} from "./selection-broker.js"
-import {View} from "./view.js"
-import {MySelection} from "./selection.js"
-import {anyToString,ViewElements} from "./util.js"
+import { Source, SourceResolver, sourcePositionToStringKey } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View } from "../src/view";
+import { MySelection } from "../src/selection";
+import { ViewElements } from "../src/util";
+import { SelectionHandler } from "./selection-handler";
export enum CodeMode {
MAIN_SOURCE = "main function",
INLINED_SOURCE = "inlined function"
-};
+}
export class CodeView extends View {
broker: SelectionBroker;
@@ -29,11 +30,10 @@ export class CodeView extends View {
return sourceContainer;
}
- constructor(parentId, broker, sourceResolver, sourceFunction, codeMode: CodeMode) {
- super(parentId);
- let view = this;
+ constructor(parent: HTMLElement, broker: SelectionBroker, sourceResolver: SourceResolver, sourceFunction: Source, codeMode: CodeMode) {
+ super(parent);
+ const view = this;
view.broker = broker;
- view.source = null;
view.sourceResolver = sourceResolver;
view.source = sourceFunction;
view.codeMode = codeMode;
@@ -44,11 +44,11 @@ export class CodeView extends View {
clear: function () {
view.selection.clear();
view.updateSelection();
- broker.broadcastClear(this)
+ broker.broadcastClear(this);
},
select: function (sourcePositions, selected) {
const locations = [];
- for (var sourcePosition of sourcePositions) {
+ for (const sourcePosition of sourcePositions) {
locations.push(sourcePosition);
sourceResolver.addInliningPositions(sourcePosition, locations);
}
@@ -62,7 +62,7 @@ export class CodeView extends View {
for (const location of locations) {
const translated = sourceResolver.translateToSourceId(view.source.sourceId, location);
if (!translated) continue;
- view.selection.select(translated, selected);
+ view.selection.select([translated], selected);
}
view.updateSelection(firstSelect);
},
@@ -100,9 +100,6 @@ export class CodeView extends View {
mkVisible.apply(scrollIntoView);
}
- initializeContent(data, rememberedSelection) {
- }
-
getCodeHtmlElementName() {
return `source-pre-${this.source.sourceId}`;
}
@@ -117,7 +114,6 @@ export class CodeView extends View {
}
onSelectLine(lineNumber: number, doClear: boolean) {
- const key = anyToString(lineNumber);
if (doClear) {
this.selectionHandler.clear();
}
@@ -127,7 +123,7 @@ export class CodeView extends View {
}
}
- onSelectSourcePosition(sourcePosition, doClear) {
+ onSelectSourcePosition(sourcePosition, doClear: boolean) {
if (doClear) {
this.selectionHandler.clear();
}
@@ -135,7 +131,7 @@ export class CodeView extends View {
}
initializeCode() {
- var view = this;
+ const view = this;
const source = this.source;
const sourceText = source.sourceText;
if (!sourceText) return;
@@ -145,14 +141,14 @@ export class CodeView extends View {
} else {
sourceContainer.classList.add("inlined-source");
}
- var codeHeader = document.createElement("div");
+ const codeHeader = document.createElement("div");
codeHeader.setAttribute("id", this.getCodeHeaderHtmlElementName());
codeHeader.classList.add("code-header");
- var codeFileFunction = document.createElement("div");
+ const codeFileFunction = document.createElement("div");
codeFileFunction.classList.add("code-file-function");
codeFileFunction.innerHTML = `${source.sourceName}:${source.functionName}`;
codeHeader.appendChild(codeFileFunction);
- var codeModeDiv = document.createElement("div");
+ const codeModeDiv = document.createElement("div");
codeModeDiv.classList.add("code-mode");
codeModeDiv.innerHTML = `${this.codeMode}`;
codeHeader.appendChild(codeModeDiv);
@@ -160,7 +156,7 @@ export class CodeView extends View {
clearDiv.style.clear = "both";
codeHeader.appendChild(clearDiv);
sourceContainer.appendChild(codeHeader);
- var codePre = document.createElement("pre");
+ const codePre = document.createElement("pre");
codePre.setAttribute("id", this.getCodeHtmlElementName());
codePre.classList.add("prettyprint");
sourceContainer.appendChild(codePre);
@@ -171,7 +167,7 @@ export class CodeView extends View {
} else {
codePre.style.display = "none";
}
- }
+ };
if (sourceText != "") {
codePre.classList.add("linenums");
codePre.textContent = sourceText;
@@ -182,9 +178,17 @@ export class CodeView extends View {
console.log(e);
}
- view.divNode.onclick = function (e) {
- view.selectionHandler.clear();
- }
+ view.divNode.onclick = function (e: MouseEvent) {
+ if (e.target instanceof Element && e.target.tagName == "DIV") {
+ const targetDiv = e.target as HTMLDivElement;
+ if (targetDiv.classList.contains("line-number")) {
+ e.stopPropagation();
+ view.onSelectLine(Number(targetDiv.dataset.lineNumber), !e.shiftKey);
+ }
+ } else {
+ view.selectionHandler.clear();
+ }
+ };
const base: number = source.startPosition;
let current = 0;
@@ -197,13 +201,14 @@ export class CodeView extends View {
currentLineElement.id = "li" + i;
currentLineElement.dataset.lineNumber = "" + lineNumber;
const spans = currentLineElement.childNodes;
- for (let j = 0; j < spans.length; ++j) {
- const currentSpan = spans[j];
- const pos = base + current;
- const end = pos + currentSpan.textContent.length;
- current += currentSpan.textContent.length;
- this.insertSourcePositions(currentSpan, lineNumber, pos, end, newlineAdjust);
- newlineAdjust = 0;
+ for (const currentSpan of spans) {
+ if (currentSpan instanceof HTMLSpanElement) {
+ const pos = base + current;
+ const end = pos + currentSpan.textContent.length;
+ current += currentSpan.textContent.length;
+ this.insertSourcePositions(currentSpan, lineNumber, pos, end, newlineAdjust);
+ newlineAdjust = 0;
+ }
}
this.insertLineNumber(currentLineElement, lineNumber);
@@ -220,44 +225,44 @@ export class CodeView extends View {
insertSourcePositions(currentSpan, lineNumber, pos, end, adjust) {
const view = this;
const sps = this.sourceResolver.sourcePositionsInRange(this.source.sourceId, pos - adjust, end);
+ let offset = 0;
for (const sourcePosition of sps) {
this.sourceResolver.addAnyPositionToLine(lineNumber, sourcePosition);
- const textnode = currentSpan.tagName == 'SPAN' ? currentSpan.firstChild : currentSpan;
- const replacementNode = textnode.splitText(Math.max(0, sourcePosition.scriptOffset - pos));
+ const textnode = currentSpan.tagName == 'SPAN' ? currentSpan.lastChild : currentSpan;
+ if (!(textnode instanceof Text)) continue;
+ const splitLength = Math.max(0, sourcePosition.scriptOffset - pos - offset);
+ offset += splitLength;
+ const replacementNode = textnode.splitText(splitLength);
const span = document.createElement('span');
span.setAttribute("scriptOffset", sourcePosition.scriptOffset);
- span.classList.add("source-position")
+ span.classList.add("source-position");
const marker = document.createElement('span');
- marker.classList.add("marker")
+ marker.classList.add("marker");
span.appendChild(marker);
const inlining = this.sourceResolver.getInliningForPosition(sourcePosition);
if (inlining != undefined && view.showAdditionalInliningPosition) {
const sourceName = this.sourceResolver.getSourceName(inlining.sourceId);
const inliningMarker = document.createElement('span');
- inliningMarker.classList.add("inlining-marker")
- inliningMarker.setAttribute("data-descr", `${sourceName} was inlined here`)
+ inliningMarker.classList.add("inlining-marker");
+ inliningMarker.setAttribute("data-descr", `${sourceName} was inlined here`);
span.appendChild(inliningMarker);
}
span.onclick = function (e) {
e.stopPropagation();
- view.onSelectSourcePosition(sourcePosition, !e.shiftKey)
+ view.onSelectSourcePosition(sourcePosition, !e.shiftKey);
};
view.addHtmlElementToSourcePosition(sourcePosition, span);
textnode.parentNode.insertBefore(span, replacementNode);
}
}
- insertLineNumber(lineElement, lineNumber) {
+ insertLineNumber(lineElement: HTMLElement, lineNumber: number) {
const view = this;
const lineNumberElement = document.createElement("div");
lineNumberElement.classList.add("line-number");
- lineNumberElement.dataset.lineNumber = lineNumber;
- lineNumberElement.innerText = lineNumber;
- lineNumberElement.onclick = function (e) {
- e.stopPropagation();
- view.onSelectLine(lineNumber, !e.shiftKey);
- }
- lineElement.insertBefore(lineNumberElement, lineElement.firstChild)
+ lineNumberElement.dataset.lineNumber = `${lineNumber}`;
+ lineNumberElement.innerText = `${lineNumber}`;
+ lineElement.insertBefore(lineNumberElement, lineElement.firstChild);
// Don't add lines to source positions of not in backwardsCompatibility mode.
if (this.source.backwardsCompatibility === true) {
for (const sourcePosition of this.sourceResolver.linetoSourcePositions(lineNumber - 1)) {
@@ -266,6 +271,4 @@ export class CodeView extends View {
}
}
- deleteContent() { }
- detachSelection() { return null; }
}
diff --git a/deps/v8/tools/turbolizer/src/disassembly-view.ts b/deps/v8/tools/turbolizer/src/disassembly-view.ts
index 8142b2aa0d..4b8fc6ea2d 100644
--- a/deps/v8/tools/turbolizer/src/disassembly-view.ts
+++ b/deps/v8/tools/turbolizer/src/disassembly-view.ts
@@ -2,16 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {PROF_COLS, UNICODE_BLOCK} from "./constants.js"
-import {SelectionBroker} from "./selection-broker.js"
-import {TextView} from "./text-view.js"
+import { PROF_COLS, UNICODE_BLOCK } from "../src/constants";
+import { SelectionBroker } from "../src/selection-broker";
+import { TextView } from "../src/text-view";
+import { MySelection } from "./selection";
+import { anyToString, interpolate } from "./util";
+import { InstructionSelectionHandler } from "./selection-handler";
+
+const toolboxHTML = `<div id="disassembly-toolbox">
+<form>
+ <label><input id="show-instruction-address" type="checkbox" name="instruction-address">Show addresses</label>
+ <label><input id="show-instruction-binary" type="checkbox" name="instruction-binary">Show binary literal</label>
+</form>
+</div>`;
export class DisassemblyView extends TextView {
SOURCE_POSITION_HEADER_REGEX: any;
- addr_event_counts: any;
- total_event_counts: any;
- max_event_counts: any;
- pos_lines: Array<any>;
+ addrEventCounts: any;
+ totalEventCounts: any;
+ maxEventCounts: any;
+ posLines: Array<any>;
+ instructionSelectionHandler: InstructionSelectionHandler;
+ offsetSelection: MySelection;
+ showInstructionAddressHandler: () => void;
+ showInstructionBinaryHandler: () => void;
createViewElement() {
const pane = document.createElement('div');
@@ -21,222 +35,310 @@ export class DisassemblyView extends TextView {
<ul id='disassembly-list' class='nolinenums noindent'>
</ul>
</pre>`;
+
return pane;
}
constructor(parentId, broker: SelectionBroker) {
- super(parentId, broker, null);
- let view = this;
- const sourceResolver = broker.sourceResolver;
- let ADDRESS_STYLE = {
- css: 'tag',
- linkHandler: function (text, fragment) {
- const matches = text.match(/0x[0-9a-f]{8,16}\s*(?<offset>[0-9a-f]+)/);
+ super(parentId, broker);
+ const view = this;
+ const ADDRESS_STYLE = {
+ associateData: (text, fragment: HTMLElement) => {
+ const matches = text.match(/(?<address>0?x?[0-9a-fA-F]{8,16})(?<addressSpace>\s+)(?<offset>[0-9a-f]+)(?<offsetSpace>\s*)/);
const offset = Number.parseInt(matches.groups["offset"], 16);
+ const addressElement = document.createElement("SPAN");
+ addressElement.className = "instruction-address";
+ addressElement.innerText = matches.groups["address"];
+ const offsetElement = document.createElement("SPAN");
+ offsetElement.innerText = matches.groups["offset"];
+ fragment.appendChild(addressElement);
+ fragment.appendChild(document.createTextNode(matches.groups["addressSpace"]));
+ fragment.appendChild(offsetElement);
+ fragment.appendChild(document.createTextNode(matches.groups["offsetSpace"]));
+ fragment.classList.add('tag');
+
if (!Number.isNaN(offset)) {
- const [nodes, blockId] = sourceResolver.nodesForPCOffset(offset)
- console.log("nodes for", offset, offset.toString(16), " are ", nodes);
- if (nodes.length > 0) {
- for (const nodeId of nodes) {
- view.addHtmlElementForNodeId(nodeId, fragment);
- }
- return (e) => {
- console.log(offset, nodes);
- e.stopPropagation();
- if (!e.shiftKey) {
- view.selectionHandler.clear();
- }
- view.selectionHandler.select(nodes, true);
- };
- }
+ const pcOffset = view.sourceResolver.getKeyPcOffset(offset);
+ fragment.dataset.pcOffset = `${pcOffset}`;
+ addressElement.classList.add('linkable-text');
+ offsetElement.classList.add('linkable-text');
}
- return undefined;
}
};
- let ADDRESS_LINK_STYLE = {
- css: 'tag'
- };
- let UNCLASSIFIED_STYLE = {
+ const UNCLASSIFIED_STYLE = {
css: 'com'
};
- let NUMBER_STYLE = {
- css: 'lit'
+ const NUMBER_STYLE = {
+ css: ['instruction-binary', 'lit']
};
- let COMMENT_STYLE = {
+ const COMMENT_STYLE = {
css: 'com'
};
- let POSITION_STYLE = {
- css: 'com',
+ const OPCODE_ARGS = {
+ associateData: function (text, fragment) {
+ fragment.innerHTML = text;
+ const replacer = (match, hexOffset) => {
+ const offset = Number.parseInt(hexOffset, 16);
+ const keyOffset = view.sourceResolver.getKeyPcOffset(offset);
+ return `<span class="tag linkable-text" data-pc-offset="${keyOffset}">${match}</span>`;
+ };
+ const html = text.replace(/<.0?x?([0-9a-fA-F]+)>/g, replacer);
+ fragment.innerHTML = html;
+ }
};
- let OPCODE_STYLE = {
- css: 'kwd',
+ const OPCODE_STYLE = {
+ css: 'kwd'
};
const BLOCK_HEADER_STYLE = {
- css: ['com', 'block'],
- block_id: null,
- blockId: function (text) {
- let matches = /\d+/.exec(text);
- if (!matches) return undefined;
- BLOCK_HEADER_STYLE.block_id = Number(matches[0]);
- return BLOCK_HEADER_STYLE.block_id;
- },
- linkHandler: function (text) {
- let matches = /\d+/.exec(text);
- if (!matches) return undefined;
+ associateData: function (text, fragment) {
+ const matches = /\d+/.exec(text);
+ if (!matches) return;
const blockId = matches[0];
- return function (e) {
- e.stopPropagation();
- if (!e.shiftKey) {
- view.selectionHandler.clear();
- }
- view.blockSelectionHandler.select([blockId], true);
- };
+ fragment.dataset.blockId = blockId;
+ fragment.innerHTML = text;
+ fragment.className = "com block";
}
};
const SOURCE_POSITION_HEADER_STYLE = {
css: 'com'
};
view.SOURCE_POSITION_HEADER_REGEX = /^\s*--[^<]*<.*(not inlined|inlined\((\d+)\)):(\d+)>\s*--/;
- let patterns = [
+ const patterns = [
[
- [/^0x[0-9a-f]{8,16}\s*[0-9a-f]+\ /, ADDRESS_STYLE, 1],
+ [/^0?x?[0-9a-fA-F]{8,16}\s+[0-9a-f]+\s+/, ADDRESS_STYLE, 1],
[view.SOURCE_POSITION_HEADER_REGEX, SOURCE_POSITION_HEADER_STYLE, -1],
[/^\s+-- B\d+ start.*/, BLOCK_HEADER_STYLE, -1],
[/^.*/, UNCLASSIFIED_STYLE, -1]
],
[
- [/^\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
- [/^\s+[0-9a-f]+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+ [/^\s*[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+ [/^\s*[0-9a-f]+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
[/^.*/, null, -1]
],
[
+ [/^REX.W \S+\s+/, OPCODE_STYLE, 3],
[/^\S+\s+/, OPCODE_STYLE, 3],
[/^\S+$/, OPCODE_STYLE, -1],
[/^.*/, null, -1]
],
[
[/^\s+/, null],
- [/^[^\(;]+$/, null, -1],
- [/^[^\(;]+/, null],
- [/^\(/, null, 4],
+ [/^[^;]+$/, OPCODE_ARGS, -1],
+ [/^[^;]+/, OPCODE_ARGS, 4],
[/^;/, COMMENT_STYLE, 5]
],
[
- [/^0x[0-9a-f]{8,16}/, ADDRESS_LINK_STYLE],
- [/^[^\)]/, null],
- [/^\)$/, null, -1],
- [/^\)/, null, 3]
- ],
- [
- [/^; debug\: position /, COMMENT_STYLE, 6],
[/^.+$/, COMMENT_STYLE, -1]
- ],
- [
- [/^\d+$/, POSITION_STYLE, -1],
]
];
view.setPatterns(patterns);
+
+ const linkHandler = (e: MouseEvent) => {
+ if (!(e.target instanceof HTMLElement)) return;
+ const offsetAsString = e.target.dataset.pcOffset ? e.target.dataset.pcOffset : e.target.parentElement.dataset.pcOffset;
+ const offset = Number.parseInt(offsetAsString, 10);
+ if ((typeof offsetAsString) != "undefined" && !Number.isNaN(offset)) {
+ view.offsetSelection.select([offset], true);
+ const nodes = view.sourceResolver.nodesForPCOffset(offset)[0];
+ if (nodes.length > 0) {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ view.selectionHandler.clear();
+ }
+ view.selectionHandler.select(nodes, true);
+ } else {
+ view.updateSelection();
+ }
+ }
+ return undefined;
+ };
+ view.divNode.addEventListener('click', linkHandler);
+
+ const linkHandlerBlock = e => {
+ const blockId = e.target.dataset.blockId;
+ if (typeof blockId != "undefined" && !Number.isNaN(blockId)) {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ view.selectionHandler.clear();
+ }
+ view.blockSelectionHandler.select([blockId], true);
+ }
+ };
+ view.divNode.addEventListener('click', linkHandlerBlock);
+
+ this.offsetSelection = new MySelection(anyToString);
+ const instructionSelectionHandler = {
+ clear: function () {
+ view.offsetSelection.clear();
+ view.updateSelection();
+ broker.broadcastClear(instructionSelectionHandler);
+ },
+ select: function (instructionIds, selected) {
+ view.offsetSelection.select(instructionIds, selected);
+ view.updateSelection();
+ broker.broadcastBlockSelect(instructionSelectionHandler, instructionIds, selected);
+ },
+ brokeredInstructionSelect: function (instructionIds, selected) {
+ const firstSelect = view.offsetSelection.isEmpty();
+ const keyPcOffsets = view.sourceResolver.instructionsToKeyPcOffsets(instructionIds);
+ view.offsetSelection.select(keyPcOffsets, selected);
+ view.updateSelection(firstSelect);
+ },
+ brokeredClear: function () {
+ view.offsetSelection.clear();
+ view.updateSelection();
+ }
+ };
+ this.instructionSelectionHandler = instructionSelectionHandler;
+ broker.addInstructionHandler(instructionSelectionHandler);
+
+ const toolbox = document.createElement("div");
+ toolbox.id = "toolbox-anchor";
+ toolbox.innerHTML = toolboxHTML;
+ view.divNode.insertBefore(toolbox, view.divNode.firstChild);
+ const instructionAddressInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-address");
+ const lastShowInstructionAddress = window.sessionStorage.getItem("show-instruction-address");
+ instructionAddressInput.checked = lastShowInstructionAddress == 'true';
+ const showInstructionAddressHandler = () => {
+ window.sessionStorage.setItem("show-instruction-address", `${instructionAddressInput.checked}`);
+ for (const el of view.divNode.querySelectorAll(".instruction-address")) {
+ el.classList.toggle("invisible", !instructionAddressInput.checked);
+ }
+ };
+ instructionAddressInput.addEventListener("change", showInstructionAddressHandler);
+ this.showInstructionAddressHandler = showInstructionAddressHandler;
+
+ const instructionBinaryInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-binary");
+ const lastShowInstructionBinary = window.sessionStorage.getItem("show-instruction-binary");
+ instructionBinaryInput.checked = lastShowInstructionBinary == 'true';
+ const showInstructionBinaryHandler = () => {
+ window.sessionStorage.setItem("show-instruction-binary", `${instructionBinaryInput.checked}`);
+ for (const el of view.divNode.querySelectorAll(".instruction-binary")) {
+ el.classList.toggle("invisible", !instructionBinaryInput.checked);
+ }
+ };
+ instructionBinaryInput.addEventListener("change", showInstructionBinaryHandler);
+ this.showInstructionBinaryHandler = showInstructionBinaryHandler;
+ }
+
+ updateSelection(scrollIntoView: boolean = false) {
+ super.updateSelection(scrollIntoView);
+ const keyPcOffsets = this.sourceResolver.nodesToKeyPcOffsets(this.selection.selectedKeys());
+ if (this.offsetSelection) {
+ for (const key of this.offsetSelection.selectedKeys()) {
+ keyPcOffsets.push(Number(key));
+ }
+ }
+ for (const keyPcOffset of keyPcOffsets) {
+ const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset='${keyPcOffset}']`);
+ for (const el of elementsToSelect) {
+ el.classList.toggle("selected", true);
+ }
+ }
}
- initializeCode(sourceText, sourcePosition) {
- let view = this;
- view.addr_event_counts = null;
- view.total_event_counts = null;
- view.max_event_counts = null;
- view.pos_lines = new Array();
+ initializeCode(sourceText, sourcePosition: number = 0) {
+ const view = this;
+ view.addrEventCounts = null;
+ view.totalEventCounts = null;
+ view.maxEventCounts = null;
+ view.posLines = new Array();
// Comment lines for line 0 include sourcePosition already, only need to
// add sourcePosition for lines > 0.
- view.pos_lines[0] = sourcePosition;
+ view.posLines[0] = sourcePosition;
if (sourceText && sourceText != "") {
- let base = sourcePosition;
+ const base = sourcePosition;
let current = 0;
- let source_lines = sourceText.split("\n");
- for (let i = 1; i < source_lines.length; i++) {
+ const sourceLines = sourceText.split("\n");
+ for (let i = 1; i < sourceLines.length; i++) {
// Add 1 for newline character that is split off.
- current += source_lines[i - 1].length + 1;
- view.pos_lines[i] = base + current;
+ current += sourceLines[i - 1].length + 1;
+ view.posLines[i] = base + current;
}
}
}
initializePerfProfile(eventCounts) {
- let view = this;
+ const view = this;
if (eventCounts !== undefined) {
- view.addr_event_counts = eventCounts;
-
- view.total_event_counts = {};
- view.max_event_counts = {};
- for (let ev_name in view.addr_event_counts) {
- let keys = Object.keys(view.addr_event_counts[ev_name]);
- let values = keys.map(key => view.addr_event_counts[ev_name][key]);
- view.total_event_counts[ev_name] = values.reduce((a, b) => a + b);
- view.max_event_counts[ev_name] = values.reduce((a, b) => Math.max(a, b));
+ view.addrEventCounts = eventCounts;
+
+ view.totalEventCounts = {};
+ view.maxEventCounts = {};
+ for (const evName in view.addrEventCounts) {
+ if (view.addrEventCounts.hasOwnProperty(evName)) {
+ const keys = Object.keys(view.addrEventCounts[evName]);
+ const values = keys.map(key => view.addrEventCounts[evName][key]);
+ view.totalEventCounts[evName] = values.reduce((a, b) => a + b);
+ view.maxEventCounts[evName] = values.reduce((a, b) => Math.max(a, b));
+ }
}
- }
- else {
- view.addr_event_counts = null;
- view.total_event_counts = null;
- view.max_event_counts = null;
+ } else {
+ view.addrEventCounts = null;
+ view.totalEventCounts = null;
+ view.maxEventCounts = null;
}
}
+ showContent(data): void {
+ console.time("disassembly-view");
+ super.initializeContent(data, null);
+ this.showInstructionAddressHandler();
+ this.showInstructionBinaryHandler();
+ console.timeEnd("disassembly-view");
+ }
+
// Shorten decimals and remove trailing zeroes for readability.
humanize(num) {
return num.toFixed(3).replace(/\.?0+$/, "") + "%";
}
- // Interpolate between the given start and end values by a fraction of val/max.
- interpolate(val, max, start, end) {
- return start + (end - start) * (val / max);
- }
-
processLine(line) {
- let view = this;
+ const view = this;
let fragments = super.processLine(line);
// Add profiling data per instruction if available.
- if (view.total_event_counts) {
- let matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
+ if (view.totalEventCounts) {
+ const matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
if (matches) {
- let newFragments = [];
- for (let event in view.addr_event_counts) {
- let count = view.addr_event_counts[event][matches[1]];
+ const newFragments = [];
+ for (const event in view.addrEventCounts) {
+ if (!view.addrEventCounts.hasOwnProperty(event)) continue;
+ const count = view.addrEventCounts[event][matches[1]];
let str = " ";
- let css_cls = "prof";
+ const cssCls = "prof";
if (count !== undefined) {
- let perc = count / view.total_event_counts[event] * 100;
+ const perc = count / view.totalEventCounts[event] * 100;
let col = { r: 255, g: 255, b: 255 };
for (let i = 0; i < PROF_COLS.length; i++) {
if (perc === PROF_COLS[i].perc) {
col = PROF_COLS[i].col;
break;
- }
- else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) {
- let col1 = PROF_COLS[i].col;
- let col2 = PROF_COLS[i + 1].col;
+ } else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) {
+ const col1 = PROF_COLS[i].col;
+ const col2 = PROF_COLS[i + 1].col;
- let val = perc - PROF_COLS[i].perc;
- let max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc;
+ const val = perc - PROF_COLS[i].perc;
+ const max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc;
- col.r = Math.round(view.interpolate(val, max, col1.r, col2.r));
- col.g = Math.round(view.interpolate(val, max, col1.g, col2.g));
- col.b = Math.round(view.interpolate(val, max, col1.b, col2.b));
+ col.r = Math.round(interpolate(val, max, col1.r, col2.r));
+ col.g = Math.round(interpolate(val, max, col1.g, col2.g));
+ col.b = Math.round(interpolate(val, max, col1.b, col2.b));
break;
}
}
str = UNICODE_BLOCK;
- let fragment = view.createFragment(str, css_cls);
+ const fragment = view.createFragment(str, cssCls);
fragment.title = event + ": " + view.humanize(perc) + " (" + count + ")";
fragment.style.color = "rgb(" + col.r + ", " + col.g + ", " + col.b + ")";
newFragments.push(fragment);
+ } else {
+ newFragments.push(view.createFragment(str, cssCls));
}
- else
- newFragments.push(view.createFragment(str, css_cls));
-
}
fragments = newFragments.concat(fragments);
}
@@ -245,4 +347,8 @@ export class DisassemblyView extends TextView {
}
detachSelection() { return null; }
+
+ public searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void {
+ throw new Error("Method not implemented.");
+ }
}
diff --git a/deps/v8/tools/turbolizer/src/edge.ts b/deps/v8/tools/turbolizer/src/edge.ts
index 7ca6d9dba0..30d265c561 100644
--- a/deps/v8/tools/turbolizer/src/edge.ts
+++ b/deps/v8/tools/turbolizer/src/edge.ts
@@ -2,19 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {GNode, DEFAULT_NODE_BUBBLE_RADIUS} from "./node.js"
+import { GNode, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Graph } from "./graph";
export const MINIMUM_EDGE_SEPARATION = 20;
-export function isEdgeInitiallyVisible(target, index, source, type) {
- return type == "control" && (target.cfg || source.cfg);
-}
-
export class Edge {
target: GNode;
source: GNode;
index: number;
- type: String;
+ type: string;
backEdgeNumber: number;
visible: boolean;
@@ -24,55 +21,54 @@ export class Edge {
this.index = index;
this.type = type;
this.backEdgeNumber = 0;
- this.visible = isEdgeInitiallyVisible(target, index, source, type);
+ this.visible = false;
}
-
stringID() {
return this.source.id + "," + this.index + "," + this.target.id;
- };
+ }
isVisible() {
return this.visible && this.source.visible && this.target.visible;
- };
+ }
- getInputHorizontalPosition(graph) {
+ getInputHorizontalPosition(graph: Graph, showTypes: boolean) {
if (this.backEdgeNumber > 0) {
return graph.maxGraphNodeX + this.backEdgeNumber * MINIMUM_EDGE_SEPARATION;
}
- var source = this.source;
- var target = this.target;
- var index = this.index;
- var input_x = target.x + target.getInputX(index);
- var inputApproach = target.getInputApproach(this.index);
- var outputApproach = source.getOutputApproach(graph);
+ const source = this.source;
+ const target = this.target;
+ const index = this.index;
+ const inputX = target.x + target.getInputX(index);
+ const inputApproach = target.getInputApproach(this.index);
+ const outputApproach = source.getOutputApproach(showTypes);
if (inputApproach > outputApproach) {
- return input_x;
+ return inputX;
} else {
- var inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1);
+ const inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1);
return (target.x < source.x)
? (target.x + target.getTotalNodeWidth() + inputOffset)
- : (target.x - inputOffset)
+ : (target.x - inputOffset);
}
}
- generatePath(graph) {
- var target = this.target;
- var source = this.source;
- var input_x = target.x + target.getInputX(this.index);
- var arrowheadHeight = 7;
- var input_y = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight;
- var output_x = source.x + source.getOutputX();
- var output_y = source.y + graph.getNodeHeight(source) + DEFAULT_NODE_BUBBLE_RADIUS;
- var inputApproach = target.getInputApproach(this.index);
- var outputApproach = source.getOutputApproach(graph);
- var horizontalPos = this.getInputHorizontalPosition(graph);
-
- var result = "M" + output_x + "," + output_y +
- "L" + output_x + "," + outputApproach +
+ generatePath(graph: Graph, showTypes: boolean) {
+ const target = this.target;
+ const source = this.source;
+ const inputX = target.x + target.getInputX(this.index);
+ const arrowheadHeight = 7;
+ const inputY = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight;
+ const outputX = source.x + source.getOutputX();
+ const outputY = source.y + source.getNodeHeight(showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+ let inputApproach = target.getInputApproach(this.index);
+ const outputApproach = source.getOutputApproach(showTypes);
+ const horizontalPos = this.getInputHorizontalPosition(graph, showTypes);
+
+ let result = "M" + outputX + "," + outputY +
+ "L" + outputX + "," + outputApproach +
"L" + horizontalPos + "," + outputApproach;
- if (horizontalPos != input_x) {
+ if (horizontalPos != inputX) {
result += "L" + horizontalPos + "," + inputApproach;
} else {
if (inputApproach < outputApproach) {
@@ -80,8 +76,8 @@ export class Edge {
}
}
- result += "L" + input_x + "," + inputApproach +
- "L" + input_x + "," + input_y;
+ result += "L" + inputX + "," + inputApproach +
+ "L" + inputX + "," + inputY;
return result;
}
diff --git a/deps/v8/tools/turbolizer/src/graph-layout.ts b/deps/v8/tools/turbolizer/src/graph-layout.ts
index 302a98aacc..3687c28c86 100644
--- a/deps/v8/tools/turbolizer/src/graph-layout.ts
+++ b/deps/v8/tools/turbolizer/src/graph-layout.ts
@@ -2,23 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import { MAX_RANK_SENTINEL } from "../src/constants";
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS, GNode } from "../src/node";
+import { Graph } from "./graph";
-import {MAX_RANK_SENTINEL} from "./constants.js"
-import {MINIMUM_EDGE_SEPARATION} from "./edge.js"
-import {NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS} from "./node.js"
+const DEFAULT_NODE_ROW_SEPARATION = 130;
+const traceLayout = false;
+function newGraphOccupation(graph: Graph) {
+ const isSlotFilled = [];
+ let maxSlot = 0;
+ let minSlot = 0;
+ let nodeOccupation: Array<[number, number]> = [];
-const DEFAULT_NODE_ROW_SEPARATION = 130
-
-var traceLayout = false;
-
-function newGraphOccupation(graph) {
- var isSlotFilled = [];
- var maxSlot = 0;
- var minSlot = 0;
- var nodeOccupation = [];
-
- function slotToIndex(slot) {
+ function slotToIndex(slot: number) {
if (slot >= 0) {
return slot * 2;
} else {
@@ -26,42 +24,30 @@ function newGraphOccupation(graph) {
}
}
- function indexToSlot(index) {
- if ((index % 0) == 0) {
- return index / 2;
- } else {
- return -((index - 1) / 2);
- }
- }
-
- function positionToSlot(pos) {
+ function positionToSlot(pos: number) {
return Math.floor(pos / NODE_INPUT_WIDTH);
}
- function slotToLeftPosition(slot) {
- return slot * NODE_INPUT_WIDTH
- }
-
- function slotToRightPosition(slot) {
- return (slot + 1) * NODE_INPUT_WIDTH
+ function slotToLeftPosition(slot: number) {
+ return slot * NODE_INPUT_WIDTH;
}
- function findSpace(pos, width, direction) {
- var widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) /
+ function findSpace(pos: number, width: number, direction: number) {
+ const widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) /
NODE_INPUT_WIDTH);
- var currentSlot = positionToSlot(pos + width / 2);
- var currentScanSlot = currentSlot;
- var widthSlotsRemainingLeft = widthSlots;
- var widthSlotsRemainingRight = widthSlots;
- var slotsChecked = 0;
+ const currentSlot = positionToSlot(pos + width / 2);
+ let currentScanSlot = currentSlot;
+ let widthSlotsRemainingLeft = widthSlots;
+ let widthSlotsRemainingRight = widthSlots;
+ let slotsChecked = 0;
while (true) {
- var mod = slotsChecked++ % 2;
+ const mod = slotsChecked++ % 2;
currentScanSlot = currentSlot + (mod ? -1 : 1) * (slotsChecked >> 1);
if (!isSlotFilled[slotToIndex(currentScanSlot)]) {
if (mod) {
- if (direction <= 0)--widthSlotsRemainingLeft
+ if (direction <= 0) --widthSlotsRemainingLeft;
} else {
- if (direction >= 0)--widthSlotsRemainingRight
+ if (direction >= 0) --widthSlotsRemainingRight;
}
if (widthSlotsRemainingLeft == 0 ||
widthSlotsRemainingRight == 0 ||
@@ -83,7 +69,7 @@ function newGraphOccupation(graph) {
}
}
- function setIndexRange(from, to, value) {
+ function setIndexRange(from: number, to: number, value: boolean) {
if (to < from) {
throw ("illegal slot range");
}
@@ -98,48 +84,47 @@ function newGraphOccupation(graph) {
}
}
- function occupySlotRange(from, to) {
+ function occupySlotRange(from: number, to: number) {
if (traceLayout) {
console.log("Occupied [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")");
}
setIndexRange(from, to, true);
}
- function clearSlotRange(from, to) {
+ function clearSlotRange(from: number, to: number) {
if (traceLayout) {
console.log("Cleared [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")");
}
setIndexRange(from, to, false);
}
- function occupyPositionRange(from, to) {
+ function occupyPositionRange(from: number, to: number) {
occupySlotRange(positionToSlot(from), positionToSlot(to - 1));
}
- function clearPositionRange(from, to) {
+ function clearPositionRange(from: number, to: number) {
clearSlotRange(positionToSlot(from), positionToSlot(to - 1));
}
- function occupyPositionRangeWithMargin(from, to, margin) {
- var fromMargin = from - Math.floor(margin);
- var toMargin = to + Math.floor(margin);
+ function occupyPositionRangeWithMargin(from: number, to: number, margin: number) {
+ const fromMargin = from - Math.floor(margin);
+ const toMargin = to + Math.floor(margin);
occupyPositionRange(fromMargin, toMargin);
}
- function clearPositionRangeWithMargin(from, to, margin) {
- var fromMargin = from - Math.floor(margin);
- var toMargin = to + Math.floor(margin);
+ function clearPositionRangeWithMargin(from: number, to: number, margin: number) {
+ const fromMargin = from - Math.floor(margin);
+ const toMargin = to + Math.floor(margin);
clearPositionRange(fromMargin, toMargin);
}
- var occupation = {
- occupyNodeInputs: function (node) {
- for (var i = 0; i < node.inputs.length; ++i) {
+ const occupation = {
+ occupyNodeInputs: function (node: GNode, showTypes: boolean) {
+ for (let i = 0; i < node.inputs.length; ++i) {
if (node.inputs[i].isVisible()) {
- var edge = node.inputs[i];
+ const edge = node.inputs[i];
if (!edge.isBackEdge()) {
- var source = edge.source;
- var horizontalPos = edge.getInputHorizontalPosition(graph);
+ const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
if (traceLayout) {
console.log("Occupying input " + i + " of " + node.id + " at " + horizontalPos);
}
@@ -150,19 +135,18 @@ function newGraphOccupation(graph) {
}
}
},
- occupyNode: function (node) {
- var getPlacementHint = function (n) {
- var pos = 0;
- var direction = -1;
- var outputEdges = 0;
- var inputEdges = 0;
- for (var k = 0; k < n.outputs.length; ++k) {
- var outputEdge = n.outputs[k];
+ occupyNode: function (node: GNode) {
+ const getPlacementHint = function (n: GNode) {
+ let pos = 0;
+ let direction = -1;
+ let outputEdges = 0;
+ let inputEdges = 0;
+ for (const outputEdge of n.outputs) {
if (outputEdge.isVisible()) {
- var output = n.outputs[k].target;
- for (var l = 0; l < output.inputs.length; ++l) {
+ const output = outputEdge.target;
+ for (let l = 0; l < output.inputs.length; ++l) {
if (output.rank > n.rank) {
- var inputEdge = output.inputs[l];
+ const inputEdge = output.inputs[l];
if (inputEdge.isVisible()) {
++inputEdges;
}
@@ -184,19 +168,19 @@ function newGraphOccupation(graph) {
direction = 0;
}
return [direction, pos];
- }
- var width = node.getTotalNodeWidth();
- var margin = MINIMUM_EDGE_SEPARATION;
- var paddedWidth = width + 2 * margin;
- var placementHint = getPlacementHint(node);
- var x = placementHint[1] - paddedWidth + margin;
+ };
+ const width = node.getTotalNodeWidth();
+ const margin = MINIMUM_EDGE_SEPARATION;
+ const paddedWidth = width + 2 * margin;
+ const placementHint = getPlacementHint(node);
+ const x = placementHint[1] - paddedWidth + margin;
if (traceLayout) {
console.log("Node " + node.id + " placement hint [" + x + ", " + (x + paddedWidth) + ")");
}
- var placement = findSpace(x, paddedWidth, placementHint[0]);
- var firstSlot = placement[0];
- var slotWidth = placement[1];
- var endSlotExclusive = firstSlot + slotWidth - 1;
+ const placement = findSpace(x, paddedWidth, placementHint[0]);
+ const firstSlot = placement[0];
+ const slotWidth = placement[1];
+ const endSlotExclusive = firstSlot + slotWidth - 1;
occupySlotRange(firstSlot, endSlotExclusive);
nodeOccupation.push([firstSlot, endSlotExclusive]);
if (placementHint[0] < 0) {
@@ -208,18 +192,18 @@ function newGraphOccupation(graph) {
}
},
clearOccupiedNodes: function () {
- nodeOccupation.forEach(function (o) {
- clearSlotRange(o[0], o[1]);
+ nodeOccupation.forEach(([firstSlot, endSlotExclusive]) => {
+ clearSlotRange(firstSlot, endSlotExclusive);
});
nodeOccupation = [];
},
- clearNodeOutputs: function (source) {
+ clearNodeOutputs: function (source: GNode, showTypes: boolean) {
source.outputs.forEach(function (edge) {
if (edge.isVisible()) {
- var target = edge.target;
- for (var i = 0; i < target.inputs.length; ++i) {
- if (target.inputs[i].source === source) {
- var horizontalPos = edge.getInputHorizontalPosition(graph);
+ const target = edge.target;
+ for (const inputEdge of target.inputs) {
+ if (inputEdge.source === source) {
+ const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
clearPositionRangeWithMargin(horizontalPos,
horizontalPos,
NODE_INPUT_WIDTH / 2);
@@ -229,8 +213,8 @@ function newGraphOccupation(graph) {
});
},
print: function () {
- var s = "";
- for (var currentSlot = -40; currentSlot < 40; ++currentSlot) {
+ let s = "";
+ for (let currentSlot = -40; currentSlot < 40; ++currentSlot) {
if (currentSlot != 0) {
s += " ";
} else {
@@ -239,7 +223,7 @@ function newGraphOccupation(graph) {
}
console.log(s);
s = "";
- for (var currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) {
+ for (let currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) {
if (isSlotFilled[slotToIndex(currentSlot2)]) {
s += "*";
} else {
@@ -248,30 +232,33 @@ function newGraphOccupation(graph) {
}
console.log(s);
}
- }
+ };
return occupation;
}
-export function layoutNodeGraph(graph) {
+export function layoutNodeGraph(graph: Graph, showTypes: boolean): void {
// First determine the set of nodes that have no outputs. Those are the
// basis for bottom-up DFS to determine rank and node placement.
- var endNodesHasNoOutputs = [];
- var startNodesHasNoInputs = [];
- graph.nodes.forEach(function (n, i) {
+
+ const start = performance.now();
+
+ const endNodesHasNoOutputs = [];
+ const startNodesHasNoInputs = [];
+ for (const n of graph.nodes()) {
endNodesHasNoOutputs[n.id] = true;
startNodesHasNoInputs[n.id] = true;
- });
- graph.edges.forEach(function (e, i) {
+ }
+ graph.forEachEdge((e: Edge) => {
endNodesHasNoOutputs[e.source.id] = false;
startNodesHasNoInputs[e.target.id] = false;
});
// Finialize the list of start and end nodes.
- var endNodes = [];
- var startNodes = [];
- var visited = [];
- var rank = [];
- graph.nodes.forEach(function (n, i) {
+ const endNodes: Array<GNode> = [];
+ const startNodes: Array<GNode> = [];
+ let visited: Array<boolean> = [];
+ const rank: Array<number> = [];
+ for (const n of graph.nodes()) {
if (endNodesHasNoOutputs[n.id]) {
endNodes.push(n);
}
@@ -283,40 +270,44 @@ export function layoutNodeGraph(graph) {
n.rank = 0;
n.visitOrderWithinRank = 0;
n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
- });
+ }
+ if (traceLayout) {
+ console.log(`layoutGraph init ${performance.now() - start}`);
+ }
- var maxRank = 0;
- var visited = [];
- var dfsStack = [];
- var visitOrderWithinRank = 0;
+ let maxRank = 0;
+ visited = [];
+ let visitOrderWithinRank = 0;
- var worklist = startNodes.slice();
+ const worklist: Array<GNode> = startNodes.slice();
while (worklist.length != 0) {
- var n = worklist.pop();
- var changed = false;
+ const n: GNode = worklist.pop();
+ let changed = false;
if (n.rank == MAX_RANK_SENTINEL) {
n.rank = 1;
changed = true;
}
- var begin = 0;
- var end = n.inputs.length;
- if (n.opcode == 'Phi' || n.opcode == 'EffectPhi') {
+ let begin = 0;
+ let end = n.inputs.length;
+ if (n.nodeLabel.opcode == 'Phi' ||
+ n.nodeLabel.opcode == 'EffectPhi' ||
+ n.nodeLabel.opcode == 'InductionVariablePhi') {
// Keep with merge or loop node
begin = n.inputs.length - 1;
} else if (n.hasBackEdges()) {
end = 1;
}
- for (var l = begin; l < end; ++l) {
- var input = n.inputs[l].source;
+ for (let l = begin; l < end; ++l) {
+ const input = n.inputs[l].source;
if (input.visible && input.rank >= n.rank) {
n.rank = input.rank + 1;
changed = true;
}
}
if (changed) {
- var hasBackEdges = n.hasBackEdges();
- for (var l = n.outputs.length - 1; l >= 0; --l) {
+ const hasBackEdges = n.hasBackEdges();
+ for (let l = n.outputs.length - 1; l >= 0; --l) {
if (hasBackEdges && (l != 0)) {
worklist.unshift(n.outputs[l].target);
} else {
@@ -329,24 +320,28 @@ export function layoutNodeGraph(graph) {
}
}
+ if (traceLayout) {
+ console.log(`layoutGraph worklist ${performance.now() - start}`);
+ }
+
visited = [];
- function dfsFindRankLate(n) {
+ function dfsFindRankLate(n: GNode) {
if (visited[n.id]) return;
visited[n.id] = true;
- var originalRank = n.rank;
- var newRank = n.rank;
- var firstInput = true;
- for (var l = 0; l < n.outputs.length; ++l) {
- var output = n.outputs[l].target;
+ const originalRank = n.rank;
+ let newRank = n.rank;
+ let isFirstInput = true;
+ for (const outputEdge of n.outputs) {
+ const output = outputEdge.target;
dfsFindRankLate(output);
- var outputRank = output.rank;
- if (output.visible && (firstInput || outputRank <= newRank) &&
+ const outputRank = output.rank;
+ if (output.visible && (isFirstInput || outputRank <= newRank) &&
(outputRank > originalRank)) {
newRank = outputRank - 1;
}
- firstInput = false;
+ isFirstInput = false;
}
- if (n.opcode != "Start" && n.opcode != "Phi" && n.opcode != "EffectPhi") {
+ if (n.nodeLabel.opcode != "Start" && n.nodeLabel.opcode != "Phi" && n.nodeLabel.opcode != "EffectPhi" && n.nodeLabel.opcode != "InductionVariablePhi") {
n.rank = newRank;
}
}
@@ -354,13 +349,12 @@ export function layoutNodeGraph(graph) {
startNodes.forEach(dfsFindRankLate);
visited = [];
- function dfsRankOrder(n) {
+ function dfsRankOrder(n: GNode) {
if (visited[n.id]) return;
visited[n.id] = true;
- for (var l = 0; l < n.outputs.length; ++l) {
- var edge = n.outputs[l];
- if (edge.isVisible()) {
- var output = edge.target;
+ for (const outputEdge of n.outputs) {
+ if (outputEdge.isVisible()) {
+ const output = outputEdge.target;
dfsRankOrder(output);
}
}
@@ -374,10 +368,10 @@ export function layoutNodeGraph(graph) {
n.rank = maxRank + 1;
});
- var rankSets = [];
+ const rankSets: Array<Array<GNode>> = [];
// Collect sets for each rank.
- graph.nodes.forEach(function (n, i) {
- n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + graph.getNodeHeight(n) +
+ for (const n of graph.nodes()) {
+ n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + n.getNodeHeight(showTypes) +
2 * DEFAULT_NODE_BUBBLE_RADIUS);
if (n.visible) {
if (rankSets[n.rank] === undefined) {
@@ -386,18 +380,17 @@ export function layoutNodeGraph(graph) {
rankSets[n.rank].push(n);
}
}
- });
+ }
// Iterate backwards from highest to lowest rank, placing nodes so that they
// spread out from the "center" as much as possible while still being
// compact and not overlapping live input lines.
- var occupation = newGraphOccupation(graph);
- var rankCount = 0;
+ const occupation = newGraphOccupation(graph);
- rankSets.reverse().forEach(function (rankSet) {
+ rankSets.reverse().forEach(function (rankSet: Array<GNode>) {
- for (var i = 0; i < rankSet.length; ++i) {
- occupation.clearNodeOutputs(rankSet[i]);
+ for (const node of rankSet) {
+ occupation.clearNodeOutputs(node, showTypes);
}
if (traceLayout) {
@@ -405,19 +398,25 @@ export function layoutNodeGraph(graph) {
occupation.print();
}
- var placedCount = 0;
- rankSet = rankSet.sort(function (a, b) {
- return a.visitOrderWithinRank < b.visitOrderWithinRank;
+ let placedCount = 0;
+ rankSet = rankSet.sort((a: GNode, b: GNode) => {
+ if (a.visitOrderWithinRank < b.visitOrderWithinRank) {
+ return -1;
+ } else if (a.visitOrderWithinRank == b.visitOrderWithinRank) {
+ return 0;
+ } else {
+ return 1;
+ }
});
- for (var i = 0; i < rankSet.length; ++i) {
- var nodeToPlace = rankSet[i];
+
+ for (const nodeToPlace of rankSet) {
if (nodeToPlace.visible) {
nodeToPlace.x = occupation.occupyNode(nodeToPlace);
if (traceLayout) {
console.log("Node " + nodeToPlace.id + " is placed between [" + nodeToPlace.x + ", " + (nodeToPlace.x + nodeToPlace.getTotalNodeWidth()) + ")");
}
- var staggeredFlooredI = Math.floor(placedCount++ % 3);
- var delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI
+ const staggeredFlooredI = Math.floor(placedCount++ % 3);
+ const delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI;
nodeToPlace.outputApproach += delta;
} else {
nodeToPlace.x = 0;
@@ -436,9 +435,8 @@ export function layoutNodeGraph(graph) {
occupation.print();
}
- for (var i = 0; i < rankSet.length; ++i) {
- var node = rankSet[i];
- occupation.occupyNodeInputs(node);
+ for (const node of rankSet) {
+ occupation.occupyNodeInputs(node, showTypes);
}
if (traceLayout) {
@@ -453,57 +451,11 @@ export function layoutNodeGraph(graph) {
});
graph.maxBackEdgeNumber = 0;
- graph.visibleEdges.selectAll("path").each(function (e) {
+ graph.forEachEdge((e: Edge) => {
if (e.isBackEdge()) {
e.backEdgeNumber = ++graph.maxBackEdgeNumber;
} else {
e.backEdgeNumber = 0;
}
});
-
- redetermineGraphBoundingBox(graph);
-}
-
-function redetermineGraphBoundingBox(graph) {
- graph.minGraphX = 0;
- graph.maxGraphNodeX = 1;
- graph.maxGraphX = undefined; // see below
- graph.minGraphY = 0;
- graph.maxGraphY = 1;
-
- for (var i = 0; i < graph.nodes.length; ++i) {
- var node = graph.nodes[i];
-
- if (!node.visible) {
- continue;
- }
-
- if (node.x < graph.minGraphX) {
- graph.minGraphX = node.x;
- }
- if ((node.x + node.getTotalNodeWidth()) > graph.maxGraphNodeX) {
- graph.maxGraphNodeX = node.x + node.getTotalNodeWidth();
- }
- if ((node.y - 50) < graph.minGraphY) {
- graph.minGraphY = node.y - 50;
- }
- if ((node.y + graph.getNodeHeight(node) + 50) > graph.maxGraphY) {
- graph.maxGraphY = node.y + graph.getNodeHeight(node) + 50;
- }
- }
-
- graph.maxGraphX = graph.maxGraphNodeX +
- graph.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION;
-
- const width = (graph.maxGraphX - graph.minGraphX);
- const height = graph.maxGraphY - graph.minGraphY;
- graph.width = width;
- graph.height = height;
-
- const extent = [
- [graph.minGraphX - width / 2, graph.minGraphY - height / 2],
- [graph.maxGraphX + width / 2, graph.maxGraphY + height / 2]
- ];
- graph.panZoom.translateExtent(extent);
- graph.minScale();
}
diff --git a/deps/v8/tools/turbolizer/src/graph-view.ts b/deps/v8/tools/turbolizer/src/graph-view.ts
index 9e136f8225..c46413c400 100644
--- a/deps/v8/tools/turbolizer/src/graph-view.ts
+++ b/deps/v8/tools/turbolizer/src/graph-view.ts
@@ -2,18 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import * as d3 from "d3"
-import {layoutNodeGraph} from "./graph-layout.js"
-import {MAX_RANK_SENTINEL} from "./constants.js"
-import {GNode, nodeToStr, isNodeInitiallyVisible} from "./node.js"
-import {NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH} from "./node.js"
-import {DEFAULT_NODE_BUBBLE_RADIUS} from "./node.js"
-import {Edge, edgeToStr} from "./edge.js"
-import {View, PhaseView} from "./view.js"
-import {MySelection} from "./selection.js"
-import {partial, alignUp} from "./util.js"
-
-function nodeToStringKey(n) {
+import * as d3 from "d3";
+import { layoutNodeGraph } from "../src/graph-layout";
+import { GNode, nodeToStr } from "../src/node";
+import { NODE_INPUT_WIDTH } from "../src/node";
+import { DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Edge, edgeToStr } from "../src/edge";
+import { PhaseView } from "../src/view";
+import { MySelection } from "../src/selection";
+import { partial } from "../src/util";
+import { NodeSelectionHandler, ClearableHandler } from "./selection-handler";
+import { Graph } from "./graph";
+import { SelectionBroker } from "./selection-broker";
+
+function nodeToStringKey(n: GNode) {
return "" + n.id;
}
@@ -21,35 +23,28 @@ interface GraphState {
showTypes: boolean;
selection: MySelection;
mouseDownNode: any;
- justDragged: boolean,
- justScaleTransGraph: boolean,
- lastKeyDown: number,
- hideDead: boolean
+ justDragged: boolean;
+ justScaleTransGraph: boolean;
+ hideDead: boolean;
}
-export class GraphView extends View implements PhaseView {
+export class GraphView extends PhaseView {
divElement: d3.Selection<any, any, any, any>;
svg: d3.Selection<any, any, any, any>;
- showPhaseByName: (string) => void;
+ showPhaseByName: (p: string, s: Set<any>) => void;
state: GraphState;
- nodes: Array<GNode>;
- edges: Array<any>;
- selectionHandler: NodeSelectionHandler;
+ selectionHandler: NodeSelectionHandler & ClearableHandler;
graphElement: d3.Selection<any, any, any, any>;
visibleNodes: d3.Selection<any, GNode, any, any>;
visibleEdges: d3.Selection<any, Edge, any, any>;
- minGraphX: number;
- maxGraphX: number;
- minGraphY: number;
- maxGraphY: number;
- width: number;
- height: number;
- maxGraphNodeX: number;
drag: d3.DragBehavior<any, GNode, GNode>;
panZoom: d3.ZoomBehavior<SVGElement, any>;
- nodeMap: Array<any>;
visibleBubbles: d3.Selection<any, any, any, any>;
transitionTimout: number;
+ graph: Graph;
+ broker: SelectionBroker;
+ phaseName: string;
+ toolbox: HTMLElement;
createViewElement() {
const pane = document.createElement('div');
@@ -57,82 +52,88 @@ export class GraphView extends View implements PhaseView {
return pane;
}
- constructor(id, broker, showPhaseByName: (string) => void) {
- super(id);
- var graph = this;
+ constructor(idOrContainer: string | HTMLElement, broker: SelectionBroker,
+ showPhaseByName: (s: string) => void, toolbox: HTMLElement) {
+ super(idOrContainer);
+ const view = this;
+ this.broker = broker;
this.showPhaseByName = showPhaseByName;
this.divElement = d3.select(this.divNode);
- const svg = this.divElement.append("svg").attr('version', '1.1')
+ this.phaseName = "";
+ this.toolbox = toolbox;
+ const svg = this.divElement.append("svg")
+ .attr('version', '2.0')
.attr("width", "100%")
.attr("height", "100%");
svg.on("click", function (d) {
- graph.selectionHandler.clear();
+ view.selectionHandler.clear();
});
- graph.svg = svg;
+ // Listen for key events. Note that the focus handler seems
+ // to be important even if it does nothing.
+ svg
+ .attr("focusable", false)
+ .on("focus", e => { })
+ .on("keydown", e => { view.svgKeyDown(); });
- graph.nodes = [];
- graph.edges = [];
+ view.svg = svg;
- graph.minGraphX = 0;
- graph.maxGraphX = 1;
- graph.minGraphY = 0;
- graph.maxGraphY = 1;
-
- graph.state = {
+ this.state = {
selection: null,
mouseDownNode: null,
justDragged: false,
justScaleTransGraph: false,
- lastKeyDown: -1,
showTypes: false,
hideDead: false
};
this.selectionHandler = {
clear: function () {
- graph.state.selection.clear();
+ view.state.selection.clear();
broker.broadcastClear(this);
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
},
- select: function (nodes, selected) {
- let locations = [];
+ select: function (nodes: Array<GNode>, selected: boolean) {
+ const locations = [];
for (const node of nodes) {
- if (node.sourcePosition) {
- locations.push(node.sourcePosition);
+ if (node.nodeLabel.sourcePosition) {
+ locations.push(node.nodeLabel.sourcePosition);
}
- if (node.origin && node.origin.bytecodePosition) {
- locations.push({ bytecodePosition: node.origin.bytecodePosition });
+ if (node.nodeLabel.origin && node.nodeLabel.origin.bytecodePosition) {
+ locations.push({ bytecodePosition: node.nodeLabel.origin.bytecodePosition });
}
}
- graph.state.selection.select(nodes, selected);
+ view.state.selection.select(nodes, selected);
broker.broadcastSourcePositionSelect(this, locations, selected);
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
},
- brokeredNodeSelect: function (locations, selected) {
- let selection = graph.nodes
- .filter(function (n) {
- return locations.has(nodeToStringKey(n))
- && (!graph.state.hideDead || n.isLive());
- });
- graph.state.selection.select(selection, selected);
- // Update edge visibility based on selection.
- graph.nodes.forEach((n) => {
- if (graph.state.selection.isSelected(n)) n.visible = true;
+ brokeredNodeSelect: function (locations, selected: boolean) {
+ if (!view.graph) return;
+ const selection = view.graph.nodes(n => {
+ return locations.has(nodeToStringKey(n))
+ && (!view.state.hideDead || n.isLive());
});
- graph.edges.forEach(function (e) {
- e.visible = e.visible ||
- (graph.state.selection.isSelected(e.source) && graph.state.selection.isSelected(e.target));
- });
- graph.updateGraphVisibility();
+ view.state.selection.select(selection, selected);
+ // Update edge visibility based on selection.
+ for (const n of view.graph.nodes()) {
+ if (view.state.selection.isSelected(n)) {
+ n.visible = true;
+ n.inputs.forEach(e => {
+ e.visible = e.visible || view.state.selection.isSelected(e.source);
+ });
+ n.outputs.forEach(e => {
+ e.visible = e.visible || view.state.selection.isSelected(e.target);
+ });
+ }
+ }
+ view.updateGraphVisibility();
},
brokeredClear: function () {
- graph.state.selection.clear();
- graph.updateGraphVisibility();
+ view.state.selection.clear();
+ view.updateGraphVisibility();
}
};
- broker.addNodeHandler(this.selectionHandler);
- graph.state.selection = new MySelection(nodeToStringKey);
+ view.state.selection = new MySelection(nodeToStringKey);
const defs = svg.append('svg:defs');
defs.append('svg:marker')
@@ -146,35 +147,20 @@ export class GraphView extends View implements PhaseView {
.attr('d', 'M0,-4L8,0L0,4');
this.graphElement = svg.append("g");
- graph.visibleEdges = this.graphElement.append("g");
- graph.visibleNodes = this.graphElement.append("g");
+ view.visibleEdges = this.graphElement.append("g");
+ view.visibleNodes = this.graphElement.append("g");
- graph.drag = d3.drag<any, GNode, GNode>()
+ view.drag = d3.drag<any, GNode, GNode>()
.on("drag", function (d) {
d.x += d3.event.dx;
d.y += d3.event.dy;
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
});
-
- d3.select("#layout").on("click", partial(this.layoutAction, graph));
- d3.select("#show-all").on("click", partial(this.showAllAction, graph));
- d3.select("#toggle-hide-dead").on("click", partial(this.toggleHideDead, graph));
- d3.select("#hide-unselected").on("click", partial(this.hideUnselectedAction, graph));
- d3.select("#hide-selected").on("click", partial(this.hideSelectedAction, graph));
- d3.select("#zoom-selection").on("click", partial(this.zoomSelectionAction, graph));
- d3.select("#toggle-types").on("click", partial(this.toggleTypesAction, graph));
-
- // listen for key events
- d3.select(window).on("keydown", function (e) {
- graph.svgKeyDown.call(graph);
- }).on("keyup", function () {
- graph.svgKeyUp.call(graph);
- });
-
function zoomed() {
if (d3.event.shiftKey) return false;
- graph.graphElement.attr("transform", d3.event.transform);
+ view.graphElement.attr("transform", d3.event.transform);
+ return true;
}
const zoomSvg = d3.zoom<SVGElement, any>()
@@ -190,38 +176,17 @@ export class GraphView extends View implements PhaseView {
svg.call(zoomSvg).on("dblclick.zoom", null);
- graph.panZoom = zoomSvg;
-
- }
-
-
- static get selectedClass() {
- return "selected";
- }
- static get rectClass() {
- return "nodeStyle";
- }
- static get activeEditId() {
- return "active-editing";
- }
- static get nodeRadius() {
- return 50;
- }
+ view.panZoom = zoomSvg;
- getNodeHeight(d): number {
- if (this.state.showTypes) {
- return d.normalheight + d.labelbbox.height;
- } else {
- return d.normalheight;
- }
}
- getEdgeFrontier(nodes, inEdges, edgeFilter) {
- let frontier = new Set();
+ getEdgeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+ edgeFilter: (e: Edge, i: number) => boolean) {
+ const frontier: Set<Edge> = new Set();
for (const n of nodes) {
- var edges = inEdges ? n.inputs : n.outputs;
- var edgeNumber = 0;
- edges.forEach(function (edge) {
+ const edges = inEdges ? n.inputs : n.outputs;
+ let edgeNumber = 0;
+ edges.forEach((edge: Edge) => {
if (edgeFilter == undefined || edgeFilter(edge, edgeNumber)) {
frontier.add(edge);
}
@@ -231,28 +196,29 @@ export class GraphView extends View implements PhaseView {
return frontier;
}
- getNodeFrontier(nodes, inEdges, edgeFilter) {
- let graph = this;
- var frontier = new Set();
- var newState = true;
- var edgeFrontier = graph.getEdgeFrontier(nodes, inEdges, edgeFilter);
+ getNodeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+ edgeFilter: (e: Edge, i: number) => boolean) {
+ const view = this;
+ const frontier: Set<GNode> = new Set();
+ let newState = true;
+ const edgeFrontier = view.getEdgeFrontier(nodes, inEdges, edgeFilter);
// Control key toggles edges rather than just turning them on
if (d3.event.ctrlKey) {
- edgeFrontier.forEach(function (edge) {
+ edgeFrontier.forEach(function (edge: Edge) {
if (edge.visible) {
newState = false;
}
});
}
- edgeFrontier.forEach(function (edge) {
+ edgeFrontier.forEach(function (edge: Edge) {
edge.visible = newState;
if (newState) {
- var node = inEdges ? edge.source : edge.target;
+ const node = inEdges ? edge.source : edge.target;
node.visible = true;
frontier.add(node);
}
});
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
if (newState) {
return frontier;
} else {
@@ -261,8 +227,39 @@ export class GraphView extends View implements PhaseView {
}
initializeContent(data, rememberedSelection) {
- this.createGraph(data, rememberedSelection);
- if (rememberedSelection != null) {
+ this.show();
+ function createImgInput(id: string, title: string, onClick): HTMLElement {
+ const input = document.createElement("input");
+ input.setAttribute("id", id);
+ input.setAttribute("type", "image");
+ input.setAttribute("title", title);
+ input.setAttribute("src", `img/${id}-icon.png`);
+ input.className = "button-input graph-toolbox-item";
+ input.addEventListener("click", onClick);
+ return input;
+ }
+ this.toolbox.appendChild(createImgInput("layout", "layout graph",
+ partial(this.layoutAction, this)));
+ this.toolbox.appendChild(createImgInput("show-all", "show all nodes",
+ partial(this.showAllAction, this)));
+ this.toolbox.appendChild(createImgInput("show-control", "show all nodes",
+ partial(this.showControlAction, this)));
+ this.toolbox.appendChild(createImgInput("toggle-hide-dead", "show only live nodes",
+ partial(this.toggleHideDead, this)));
+ this.toolbox.appendChild(createImgInput("hide-unselected", "show only live nodes",
+ partial(this.hideUnselectedAction, this)));
+ this.toolbox.appendChild(createImgInput("hide-selected", "show only live nodes",
+ partial(this.hideSelectedAction, this)));
+ this.toolbox.appendChild(createImgInput("zoom-selection", "show only live nodes",
+ partial(this.zoomSelectionAction, this)));
+ this.toolbox.appendChild(createImgInput("toggle-types", "show only live nodes",
+ partial(this.toggleTypesAction, this)));
+
+ this.phaseName = data.name;
+ this.createGraph(data.data, rememberedSelection);
+ this.broker.addNodeHandler(this.selectionHandler);
+
+ if (rememberedSelection != null && rememberedSelection.size > 0) {
this.attachSelection(rememberedSelection);
this.connectVisibleSelectedNodes();
this.viewSelection();
@@ -272,88 +269,50 @@ export class GraphView extends View implements PhaseView {
}
deleteContent() {
- if (this.visibleNodes) {
- this.nodes = [];
- this.edges = [];
- this.nodeMap = [];
- this.updateGraphVisibility();
+ for (const item of this.toolbox.querySelectorAll(".graph-toolbox-item")) {
+ item.parentElement.removeChild(item);
}
- };
-
- measureText(text) {
- const textMeasure = document.getElementById('text-measure') as SVGTSpanElement;
- textMeasure.textContent = text;
- return {
- width: textMeasure.getBBox().width,
- height: textMeasure.getBBox().height,
- };
- }
- createGraph(data, rememberedSelection) {
- var g = this;
- g.nodes = [];
- g.nodeMap = [];
- data.nodes.forEach(function (n, i) {
- n.__proto__ = GNode.prototype;
+ for (const n of this.graph.nodes()) {
n.visible = false;
- n.x = 0;
- n.y = 0;
- if (typeof n.pos === "number") {
- // Backwards compatibility.
- n.sourcePosition = { scriptOffset: n.pos, inliningId: -1 };
- }
- n.rank = MAX_RANK_SENTINEL;
- n.inputs = [];
- n.outputs = [];
- n.rpo = -1;
- n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
- n.cfg = n.control;
- g.nodeMap[n.id] = n;
- n.displayLabel = n.getDisplayLabel();
- n.labelbbox = g.measureText(n.displayLabel);
- n.typebbox = g.measureText(n.getDisplayType());
- var innerwidth = Math.max(n.labelbbox.width, n.typebbox.width);
- n.width = alignUp(innerwidth + NODE_INPUT_WIDTH * 2,
- NODE_INPUT_WIDTH);
- var innerheight = Math.max(n.labelbbox.height, n.typebbox.height);
- n.normalheight = innerheight + 20;
- g.nodes.push(n);
- });
- g.edges = [];
- data.edges.forEach(function (e, i) {
- var t = g.nodeMap[e.target];
- var s = g.nodeMap[e.source];
- var newEdge = new Edge(t, e.index, s, e.type);
- t.inputs.push(newEdge);
- s.outputs.push(newEdge);
- g.edges.push(newEdge);
- if (e.type == 'control') {
- s.cfg = true;
- }
+ }
+ this.graph.forEachEdge((e: Edge) => {
+ e.visible = false;
});
- g.nodes.forEach(function (n, i) {
- n.visible = isNodeInitiallyVisible(n) && (!g.state.hideDead || n.isLive());
- if (rememberedSelection != undefined) {
- if (rememberedSelection.has(nodeToStringKey(n))) {
- n.visible = true;
- }
+ this.updateGraphVisibility();
+ }
+
+ public hide(): void {
+ super.hide();
+ this.deleteContent();
+ }
+
+ createGraph(data, rememberedSelection) {
+ this.graph = new Graph(data);
+
+ this.showControlAction(this);
+
+ if (rememberedSelection != undefined) {
+ for (const n of this.graph.nodes()) {
+ n.visible = n.visible || rememberedSelection.has(nodeToStringKey(n));
}
- });
- g.updateGraphVisibility();
- g.layoutGraph();
- g.updateGraphVisibility();
- g.viewWholeGraph();
+ }
+
+ this.graph.forEachEdge(e => e.visible = e.source.visible && e.target.visible);
+
+ this.layoutGraph();
+ this.updateGraphVisibility();
}
connectVisibleSelectedNodes() {
- var graph = this;
- for (const n of graph.state.selection) {
- n.inputs.forEach(function (edge) {
+ const view = this;
+ for (const n of view.state.selection) {
+ n.inputs.forEach(function (edge: Edge) {
if (edge.source.visible && edge.target.visible) {
edge.visible = true;
}
});
- n.outputs.forEach(function (edge) {
+ n.outputs.forEach(function (edge: Edge) {
if (edge.source.visible && edge.target.visible) {
edge.visible = true;
}
@@ -362,52 +321,51 @@ export class GraphView extends View implements PhaseView {
}
updateInputAndOutputBubbles() {
- var g = this;
- var s = g.visibleBubbles;
+ const view = this;
+ const g = this.graph;
+ const s = this.visibleBubbles;
s.classed("filledBubbleStyle", function (c) {
- var components = this.id.split(',');
+ const components = this.id.split(',');
if (components[0] == "ib") {
- var edge = g.nodeMap[components[3]].inputs[components[2]];
+ const edge = g.nodeMap[components[3]].inputs[components[2]];
return edge.isVisible();
} else {
return g.nodeMap[components[1]].areAnyOutputsVisible() == 2;
}
}).classed("halfFilledBubbleStyle", function (c) {
- var components = this.id.split(',');
+ const components = this.id.split(',');
if (components[0] == "ib") {
- var edge = g.nodeMap[components[3]].inputs[components[2]];
return false;
} else {
return g.nodeMap[components[1]].areAnyOutputsVisible() == 1;
}
}).classed("bubbleStyle", function (c) {
- var components = this.id.split(',');
+ const components = this.id.split(',');
if (components[0] == "ib") {
- var edge = g.nodeMap[components[3]].inputs[components[2]];
+ const edge = g.nodeMap[components[3]].inputs[components[2]];
return !edge.isVisible();
} else {
return g.nodeMap[components[1]].areAnyOutputsVisible() == 0;
}
});
s.each(function (c) {
- var components = this.id.split(',');
+ const components = this.id.split(',');
if (components[0] == "ob") {
- var from = g.nodeMap[components[1]];
- var x = from.getOutputX();
- var y = g.getNodeHeight(from) + DEFAULT_NODE_BUBBLE_RADIUS;
- var transform = "translate(" + x + "," + y + ")";
+ const from = g.nodeMap[components[1]];
+ const x = from.getOutputX();
+ const y = from.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+ const transform = "translate(" + x + "," + y + ")";
this.setAttribute('transform', transform);
}
});
}
attachSelection(s) {
- const graph = this;
if (!(s instanceof Set)) return;
- graph.selectionHandler.clear();
- const selected = graph.nodes.filter((n) =>
- s.has(graph.state.selection.stringKey(n)) && (!graph.state.hideDead || n.isLive()));
- graph.selectionHandler.select(selected, true);
+ this.selectionHandler.clear();
+ const selected = [...this.graph.nodes(n =>
+ s.has(this.state.selection.stringKey(n)) && (!this.state.hideDead || n.isLive()))];
+ this.selectionHandler.select(selected, true);
}
detachSelection() {
@@ -415,134 +373,135 @@ export class GraphView extends View implements PhaseView {
}
selectAllNodes() {
- var graph = this;
if (!d3.event.shiftKey) {
- graph.state.selection.clear();
+ this.state.selection.clear();
}
- const allVisibleNodes = graph.nodes.filter((n) => n.visible);
- graph.state.selection.select(allVisibleNodes, true);
- graph.updateGraphVisibility();
+ const allVisibleNodes = [...this.graph.nodes(n => n.visible)];
+ this.state.selection.select(allVisibleNodes, true);
+ this.updateGraphVisibility();
}
- layoutAction(graph) {
- graph.updateGraphVisibility();
+ layoutAction(graph: GraphView) {
graph.layoutGraph();
graph.updateGraphVisibility();
graph.viewWholeGraph();
}
- showAllAction(graph) {
- graph.nodes.forEach(function (n) {
- n.visible = !graph.state.hideDead || n.isLive();
+ showAllAction(view: GraphView) {
+ for (const n of view.graph.nodes()) {
+ n.visible = !view.state.hideDead || n.isLive();
+ }
+ view.graph.forEachEdge((e: Edge) => {
+ e.visible = e.source.visible || e.target.visible;
});
- graph.edges.forEach(function (e) {
- e.visible = !graph.state.hideDead || (e.source.isLive() && e.target.isLive());
+ view.updateGraphVisibility();
+ view.viewWholeGraph();
+ }
+
+ showControlAction(view: GraphView) {
+ for (const n of view.graph.nodes()) {
+ n.visible = n.cfg && (!view.state.hideDead || n.isLive());
+ }
+ view.graph.forEachEdge((e: Edge) => {
+ e.visible = e.type == 'control' && e.source.visible && e.target.visible;
});
- graph.updateGraphVisibility();
- graph.viewWholeGraph();
+ view.updateGraphVisibility();
+ view.viewWholeGraph();
}
- toggleHideDead(graph) {
- graph.state.hideDead = !graph.state.hideDead;
- if (graph.state.hideDead) graph.hideDead();
- var element = document.getElementById('toggle-hide-dead');
- element.classList.toggle('button-input-toggled', graph.state.hideDead);
+ toggleHideDead(view: GraphView) {
+ view.state.hideDead = !view.state.hideDead;
+ if (view.state.hideDead) view.hideDead();
+ const element = document.getElementById('toggle-hide-dead');
+ element.classList.toggle('button-input-toggled', view.state.hideDead);
}
hideDead() {
- const graph = this;
- graph.nodes.filter(function (n) {
+ for (const n of this.graph.nodes()) {
if (!n.isLive()) {
n.visible = false;
- graph.state.selection.select([n], false);
+ this.state.selection.select([n], false);
}
- })
- graph.updateGraphVisibility();
+ }
+ this.updateGraphVisibility();
}
- hideUnselectedAction(graph) {
- graph.nodes.forEach(function (n) {
- if (!graph.state.selection.isSelected(n)) {
+ hideUnselectedAction(view: GraphView) {
+ for (const n of view.graph.nodes()) {
+ if (!view.state.selection.isSelected(n)) {
n.visible = false;
}
- });
- graph.updateGraphVisibility();
+ }
+ view.updateGraphVisibility();
}
- hideSelectedAction(graph) {
- graph.nodes.forEach(function (n) {
- if (graph.state.selection.isSelected(n)) {
+ hideSelectedAction(view: GraphView) {
+ for (const n of view.graph.nodes()) {
+ if (view.state.selection.isSelected(n)) {
n.visible = false;
}
- });
- graph.selectionHandler.clear();
+ }
+ view.selectionHandler.clear();
}
- zoomSelectionAction(graph) {
- graph.viewSelection();
+ zoomSelectionAction(view: GraphView) {
+ view.viewSelection();
}
- toggleTypesAction(graph) {
- graph.toggleTypes();
+ toggleTypesAction(view: GraphView) {
+ view.toggleTypes();
}
- searchInputAction(searchBar, e: KeyboardEvent) {
- const graph = this;
+ searchInputAction(searchBar: HTMLInputElement, e: KeyboardEvent, onlyVisible: boolean) {
if (e.keyCode == 13) {
- graph.selectionHandler.clear();
- var query = searchBar.value;
+ this.selectionHandler.clear();
+ const query = searchBar.value;
window.sessionStorage.setItem("lastSearch", query);
if (query.length == 0) return;
- var reg = new RegExp(query);
- var filterFunction = function (n) {
+ const reg = new RegExp(query);
+ const filterFunction = (n: GNode) => {
return (reg.exec(n.getDisplayLabel()) != null ||
- (graph.state.showTypes && reg.exec(n.getDisplayType())) ||
+ (this.state.showTypes && reg.exec(n.getDisplayType())) ||
(reg.exec(n.getTitle())) ||
- reg.exec(n.opcode) != null);
+ reg.exec(n.nodeLabel.opcode) != null);
};
- const selection = graph.nodes.filter(
- function (n, i) {
- if ((e.ctrlKey || n.visible) && filterFunction(n)) {
- if (e.ctrlKey) n.visible = true;
- return true;
- }
- return false;
- });
+ const selection = [...this.graph.nodes(n => {
+ if ((e.ctrlKey || n.visible || !onlyVisible) && filterFunction(n)) {
+ if (e.ctrlKey || !onlyVisible) n.visible = true;
+ return true;
+ }
+ return false;
+ })];
- graph.selectionHandler.select(selection, true);
- graph.connectVisibleSelectedNodes();
- graph.updateGraphVisibility();
+ this.selectionHandler.select(selection, true);
+ this.connectVisibleSelectedNodes();
+ this.updateGraphVisibility();
searchBar.blur();
- graph.viewSelection();
+ this.viewSelection();
}
e.stopPropagation();
}
svgKeyDown() {
- var state = this.state;
- var graph = this;
-
- // Don't handle key press repetition
- if (state.lastKeyDown !== -1) return;
+ const view = this;
+ const state = this.state;
- var showSelectionFrontierNodes = function (inEdges, filter, select) {
- var frontier = graph.getNodeFrontier(state.selection, inEdges, filter);
+ const showSelectionFrontierNodes = (inEdges: boolean, filter: (e: Edge, i: number) => boolean, doSelect: boolean) => {
+ const frontier = view.getNodeFrontier(state.selection, inEdges, filter);
if (frontier != undefined && frontier.size) {
- if (select) {
+ if (doSelect) {
if (!d3.event.shiftKey) {
state.selection.clear();
}
- state.selection.select(frontier, true);
+ state.selection.select([...frontier], true);
}
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
}
- allowRepetition = false;
- }
+ };
- var allowRepetition = true;
- var eventHandled = true; // unless the below switch defaults
+ let eventHandled = true; // unless the below switch defaults
switch (d3.event.keyCode) {
case 49:
case 50:
@@ -555,8 +514,8 @@ export class GraphView extends View implements PhaseView {
case 57:
// '1'-'9'
showSelectionFrontierNodes(true,
- (edge, index) => { return index == (d3.event.keyCode - 49); },
- false);
+ (edge: Edge, index: number) => index == (d3.event.keyCode - 49),
+ !d3.event.ctrlKey);
break;
case 97:
case 98:
@@ -569,19 +528,19 @@ export class GraphView extends View implements PhaseView {
case 105:
// 'numpad 1'-'numpad 9'
showSelectionFrontierNodes(true,
- (edge, index) => { return index == (d3.event.keyCode - 97); },
- false);
+ (edge, index) => index == (d3.event.keyCode - 97),
+ !d3.event.ctrlKey);
break;
case 67:
// 'c'
showSelectionFrontierNodes(d3.event.altKey,
- (edge, index) => { return edge.type == 'control'; },
+ (edge, index) => edge.type == 'control',
true);
break;
case 69:
// 'e'
showSelectionFrontierNodes(d3.event.altKey,
- (edge, index) => { return edge.type == 'effect'; },
+ (edge, index) => edge.type == 'effect',
true);
break;
case 79:
@@ -590,21 +549,26 @@ export class GraphView extends View implements PhaseView {
break;
case 73:
// 'i'
- showSelectionFrontierNodes(true, undefined, false);
+ if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+ showSelectionFrontierNodes(true, undefined, false);
+ } else {
+ eventHandled = false;
+ }
break;
case 65:
// 'a'
- graph.selectAllNodes();
- allowRepetition = false;
+ view.selectAllNodes();
break;
case 38:
+ // UP
case 40: {
+ // DOWN
showSelectionFrontierNodes(d3.event.keyCode == 38, undefined, true);
break;
}
case 82:
// 'r'
- if (!d3.event.ctrlKey) {
+ if (!d3.event.ctrlKey && !d3.event.shiftKey) {
this.layoutAction(this);
} else {
eventHandled = false;
@@ -612,11 +576,7 @@ export class GraphView extends View implements PhaseView {
break;
case 83:
// 's'
- graph.selectOrigins();
- break;
- case 191:
- // '/'
- document.getElementById("search-input").focus();
+ view.selectOrigins();
break;
default:
eventHandled = false;
@@ -625,93 +585,100 @@ export class GraphView extends View implements PhaseView {
if (eventHandled) {
d3.event.preventDefault();
}
- if (!allowRepetition) {
- state.lastKeyDown = d3.event.keyCode;
- }
}
- svgKeyUp() {
- this.state.lastKeyDown = -1
- };
-
layoutGraph() {
- layoutNodeGraph(this);
+ console.time("layoutGraph");
+ layoutNodeGraph(this.graph, this.state.showTypes);
+ const extent = this.graph.redetermineGraphBoundingBox(this.state.showTypes);
+ this.panZoom.translateExtent(extent);
+ this.minScale();
+ console.timeEnd("layoutGraph");
}
selectOrigins() {
const state = this.state;
const origins = [];
- let phase = null;
+ let phase = this.phaseName;
+ const selection = new Set<any>();
for (const n of state.selection) {
- if (n.origin) {
- const node = this.nodeMap[n.origin.nodeId];
- origins.push(node);
- phase = n.origin.phase;
+ const origin = n.nodeLabel.origin;
+ if (origin) {
+ phase = origin.phase;
+ const node = this.graph.nodeMap[origin.nodeId];
+ if (phase === this.phaseName && node) {
+ origins.push(node);
+ } else {
+ selection.add(`${origin.nodeId}`);
+ }
}
}
- if (origins.length) {
- state.selection.clear();
- state.selection.select(origins, true);
- if (phase) {
- this.showPhaseByName(phase);
- }
+ // Only go through phase reselection if we actually need
+ // to display another phase.
+ if (selection.size > 0 && phase !== this.phaseName) {
+ this.showPhaseByName(phase, selection);
+ } else if (origins.length > 0) {
+ this.selectionHandler.clear();
+ this.selectionHandler.select(origins, true);
}
}
// call to propagate changes to graph
updateGraphVisibility() {
- let graph = this;
- let state = graph.state;
+ const view = this;
+ const graph = this.graph;
+ const state = this.state;
+ if (!graph) return;
- var filteredEdges = graph.edges.filter(function (e) {
- return e.isVisible();
- });
- const selEdges = graph.visibleEdges.selectAll<SVGPathElement, Edge>("path").data(filteredEdges, edgeToStr);
+ const filteredEdges = [...graph.filteredEdges(function (e) {
+ return e.source.visible && e.target.visible;
+ })];
+ const selEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>("path").data(filteredEdges, edgeToStr);
// remove old links
selEdges.exit().remove();
// add new paths
- selEdges.enter()
- .append('path')
- .style('marker-end', 'url(#end-arrow)')
- .classed('hidden', function (e) {
- return !e.isVisible();
- })
+ const newEdges = selEdges.enter()
+ .append('path');
+
+ newEdges.style('marker-end', 'url(#end-arrow)')
.attr("id", function (edge) { return "e," + edge.stringID(); })
.on("click", function (edge) {
d3.event.stopPropagation();
if (!d3.event.shiftKey) {
- graph.selectionHandler.clear();
+ view.selectionHandler.clear();
}
- graph.selectionHandler.select([edge.source, edge.target], true);
+ view.selectionHandler.select([edge.source, edge.target], true);
})
- .attr("adjacentToHover", "false");
-
- // Set the correct styles on all of the paths
- selEdges.classed('value', function (e) {
- return e.type == 'value' || e.type == 'context';
- }).classed('control', function (e) {
- return e.type == 'control';
- }).classed('effect', function (e) {
- return e.type == 'effect';
- }).classed('frame-state', function (e) {
- return e.type == 'frame-state';
- }).attr('stroke-dasharray', function (e) {
- if (e.type == 'frame-state') return "10,10";
- return (e.type == 'effect') ? "5,5" : "";
- });
+ .attr("adjacentToHover", "false")
+ .classed('value', function (e) {
+ return e.type == 'value' || e.type == 'context';
+ }).classed('control', function (e) {
+ return e.type == 'control';
+ }).classed('effect', function (e) {
+ return e.type == 'effect';
+ }).classed('frame-state', function (e) {
+ return e.type == 'frame-state';
+ }).attr('stroke-dasharray', function (e) {
+ if (e.type == 'frame-state') return "10,10";
+ return (e.type == 'effect') ? "5,5" : "";
+ });
+
+ const newAndOldEdges = newEdges.merge(selEdges);
+
+ newAndOldEdges.classed('hidden', e => !e.isVisible());
// select existing nodes
- const filteredNodes = graph.nodes.filter(n => n.visible);
- const allNodes = graph.visibleNodes.selectAll<SVGGElement, GNode>("g");
+ const filteredNodes = [...graph.nodes(n => n.visible)];
+ const allNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
const selNodes = allNodes.data(filteredNodes, nodeToStr);
// remove old nodes
selNodes.exit().remove();
// add new nodes
- var newGs = selNodes.enter()
+ const newGs = selNodes.enter()
.append("g");
newGs.classed("turbonode", function (n) { return true; })
@@ -723,36 +690,33 @@ export class GraphView extends View implements PhaseView {
.classed("simplified", function (n) { return n.isSimplified(); })
.classed("machine", function (n) { return n.isMachine(); })
.on('mouseenter', function (node) {
- const visibleEdges = graph.visibleEdges.selectAll<SVGPathElement, Edge>('path');
- const adjInputEdges = visibleEdges.filter(e => { return e.target === node; });
- const adjOutputEdges = visibleEdges.filter(e => { return e.source === node; });
+ const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+ const adjInputEdges = visibleEdges.filter(e => e.target === node);
+ const adjOutputEdges = visibleEdges.filter(e => e.source === node);
adjInputEdges.attr('relToHover', "input");
adjOutputEdges.attr('relToHover', "output");
const adjInputNodes = adjInputEdges.data().map(e => e.source);
- const visibleNodes = graph.visibleNodes.selectAll<SVGGElement, GNode>("g");
- const input = visibleNodes.data<GNode>(adjInputNodes, nodeToStr)
- .attr('relToHover', "input");
+ const visibleNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
+ visibleNodes.data<GNode>(adjInputNodes, nodeToStr).attr('relToHover', "input");
const adjOutputNodes = adjOutputEdges.data().map(e => e.target);
- const output = visibleNodes.data<GNode>(adjOutputNodes, nodeToStr)
- .attr('relToHover', "output");
- graph.updateGraphVisibility();
+ visibleNodes.data<GNode>(adjOutputNodes, nodeToStr).attr('relToHover', "output");
+ view.updateGraphVisibility();
})
.on('mouseleave', function (node) {
- const visibleEdges = graph.visibleEdges.selectAll<SVGPathElement, Edge>('path');
- const adjEdges = visibleEdges.filter(e => { return e.target === node || e.source === node; });
+ const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+ const adjEdges = visibleEdges.filter(e => e.target === node || e.source === node);
adjEdges.attr('relToHover', "none");
const adjNodes = adjEdges.data().map(e => e.target).concat(adjEdges.data().map(e => e.source));
- const visibleNodes = graph.visibleNodes.selectAll<SVGPathElement, GNode>("g");
- const nodes = visibleNodes.data(adjNodes, nodeToStr)
- .attr('relToHover', "none");
- graph.updateGraphVisibility();
+ const visibleNodes = view.visibleNodes.selectAll<SVGPathElement, GNode>("g");
+ visibleNodes.data(adjNodes, nodeToStr).attr('relToHover', "none");
+ view.updateGraphVisibility();
})
- .on("click", (d) => {
- if (!d3.event.shiftKey) graph.selectionHandler.clear();
- graph.selectionHandler.select([d], undefined);
+ .on("click", d => {
+ if (!d3.event.shiftKey) view.selectionHandler.clear();
+ view.selectionHandler.select([d], undefined);
d3.event.stopPropagation();
})
- .call(graph.drag)
+ .call(view.drag);
newGs.append("rect")
.attr("rx", 10)
@@ -761,14 +725,14 @@ export class GraphView extends View implements PhaseView {
return d.getTotalNodeWidth();
})
.attr('height', function (d) {
- return graph.getNodeHeight(d);
- })
+ return d.getNodeHeight(view.state.showTypes);
+ });
function appendInputAndOutputBubbles(g, d) {
- for (var i = 0; i < d.inputs.length; ++i) {
- var x = d.getInputX(i);
- var y = -DEFAULT_NODE_BUBBLE_RADIUS;
- var s = g.append('circle')
+ for (let i = 0; i < d.inputs.length; ++i) {
+ const x = d.getInputX(i);
+ const y = -DEFAULT_NODE_BUBBLE_RADIUS;
+ g.append('circle')
.classed("filledBubbleStyle", function (c) {
return d.inputs[i].isVisible();
})
@@ -780,20 +744,20 @@ export class GraphView extends View implements PhaseView {
.attr("transform", function (d) {
return "translate(" + x + "," + y + ")";
})
- .on("click", function (d) {
- var components = this.id.split(',');
- var node = graph.nodeMap[components[3]];
- var edge = node.inputs[components[2]];
- var visible = !edge.isVisible();
+ .on("click", function (this: SVGCircleElement, d) {
+ const components = this.id.split(',');
+ const node = graph.nodeMap[components[3]];
+ const edge = node.inputs[components[2]];
+ const visible = !edge.isVisible();
node.setInputVisibility(components[2], visible);
d3.event.stopPropagation();
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
});
}
if (d.outputs.length != 0) {
- var x = d.getOutputX();
- var y = graph.getNodeHeight(d) + DEFAULT_NODE_BUBBLE_RADIUS;
- var s = g.append('circle')
+ const x = d.getOutputX();
+ const y = d.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+ g.append('circle')
.classed("filledBubbleStyle", function (c) {
return d.areAnyOutputsVisible() == 2;
})
@@ -811,7 +775,7 @@ export class GraphView extends View implements PhaseView {
.on("click", function (d) {
d.setOutputVisibility(d.areAnyOutputsVisible() == 0);
d3.event.stopPropagation();
- graph.updateGraphVisibility();
+ view.updateGraphVisibility();
});
}
}
@@ -833,8 +797,8 @@ export class GraphView extends View implements PhaseView {
.append("title")
.text(function (l) {
return d.getTitle();
- })
- if (d.type != undefined) {
+ });
+ if (d.nodeLabel.type != undefined) {
d3.select(this).append("text")
.classed("label", true)
.classed("type", true)
@@ -848,14 +812,14 @@ export class GraphView extends View implements PhaseView {
.append("title")
.text(function (l) {
return d.getType();
- })
+ });
}
});
const newAndOldNodes = newGs.merge(selNodes);
newAndOldNodes.select<SVGTextElement>('.type').each(function (d) {
- this.setAttribute('visibility', graph.state.showTypes ? 'visible' : 'hidden');
+ this.setAttribute('visibility', view.state.showTypes ? 'visible' : 'hidden');
});
newAndOldNodes
@@ -865,15 +829,15 @@ export class GraphView extends View implements PhaseView {
})
.attr("transform", function (d) { return "translate(" + d.x + "," + d.y + ")"; })
.select('rect')
- .attr('height', function (d) { return graph.getNodeHeight(d); });
+ .attr('height', function (d) { return d.getNodeHeight(view.state.showTypes); });
- graph.visibleBubbles = d3.selectAll('circle');
+ view.visibleBubbles = d3.selectAll('circle');
- graph.updateInputAndOutputBubbles();
+ view.updateInputAndOutputBubbles();
graph.maxGraphX = graph.maxGraphNodeX;
- selEdges.attr("d", function (edge) {
- return edge.generatePath(graph);
+ newAndOldEdges.attr("d", function (edge) {
+ return edge.generatePath(graph, view.state.showTypes);
});
}
@@ -886,10 +850,9 @@ export class GraphView extends View implements PhaseView {
}
minScale() {
- const graph = this;
const dimensions = this.getSvgViewDimensions();
- const minXScale = dimensions[0] / (2 * graph.width);
- const minYScale = dimensions[1] / (2 * graph.height);
+ const minXScale = dimensions[0] / (2 * this.graph.width);
+ const minYScale = dimensions[1] / (2 * this.graph.height);
const minScale = Math.min(minXScale, minYScale);
this.panZoom.scaleExtent([minScale, 40]);
return minScale;
@@ -897,48 +860,49 @@ export class GraphView extends View implements PhaseView {
onresize() {
const trans = d3.zoomTransform(this.svg.node());
- const ctrans = this.panZoom.constrain()(trans, this.getSvgExtent(), this.panZoom.translateExtent())
- this.panZoom.transform(this.svg, ctrans)
+ const ctrans = this.panZoom.constrain()(trans, this.getSvgExtent(), this.panZoom.translateExtent());
+ this.panZoom.transform(this.svg, ctrans);
}
toggleTypes() {
- var graph = this;
- graph.state.showTypes = !graph.state.showTypes;
- var element = document.getElementById('toggle-types');
- element.classList.toggle('button-input-toggled', graph.state.showTypes);
- graph.updateGraphVisibility();
+ const view = this;
+ view.state.showTypes = !view.state.showTypes;
+ const element = document.getElementById('toggle-types');
+ element.classList.toggle('button-input-toggled', view.state.showTypes);
+ view.updateGraphVisibility();
}
viewSelection() {
- var graph = this;
- var minX, maxX, minY, maxY;
- var hasSelection = false;
- graph.visibleNodes.selectAll<SVGGElement, GNode>("g").each(function (n) {
- if (graph.state.selection.isSelected(n)) {
+ const view = this;
+ let minX;
+ let maxX;
+ let minY;
+ let maxY;
+ let hasSelection = false;
+ view.visibleNodes.selectAll<SVGGElement, GNode>("g").each(function (n) {
+ if (view.state.selection.isSelected(n)) {
hasSelection = true;
minX = minX ? Math.min(minX, n.x) : n.x;
maxX = maxX ? Math.max(maxX, n.x + n.getTotalNodeWidth()) :
n.x + n.getTotalNodeWidth();
minY = minY ? Math.min(minY, n.y) : n.y;
- maxY = maxY ? Math.max(maxY, n.y + graph.getNodeHeight(n)) :
- n.y + graph.getNodeHeight(n);
+ maxY = maxY ? Math.max(maxY, n.y + n.getNodeHeight(view.state.showTypes)) :
+ n.y + n.getNodeHeight(view.state.showTypes);
}
});
if (hasSelection) {
- graph.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60,
- maxX + NODE_INPUT_WIDTH, maxY + 60,
- true);
+ view.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60,
+ maxX + NODE_INPUT_WIDTH, maxY + 60);
}
}
- viewGraphRegion(minX, minY, maxX, maxY, transition) {
+ viewGraphRegion(minX, minY, maxX, maxY) {
const [width, height] = this.getSvgViewDimensions();
const dx = maxX - minX;
const dy = maxY - minY;
const x = (minX + maxX) / 2;
const y = (minY + maxY) / 2;
const scale = Math.min(width / (1.1 * dx), height / (1.1 * dy));
- const transform = d3.zoomIdentity.translate(1500, 100).scale(0.75);
this.svg
.transition().duration(300).call(this.panZoom.translateTo, x, y)
.transition().duration(300).call(this.panZoom.scaleTo, scale)
@@ -947,6 +911,8 @@ export class GraphView extends View implements PhaseView {
viewWholeGraph() {
this.panZoom.scaleTo(this.svg, 0);
- this.panZoom.translateTo(this.svg, this.minGraphX + this.width / 2, this.minGraphY + this.height / 2)
+ this.panZoom.translateTo(this.svg,
+ this.graph.minGraphX + this.graph.width / 2,
+ this.graph.minGraphY + this.graph.height / 2);
}
}
diff --git a/deps/v8/tools/turbolizer/src/graph.ts b/deps/v8/tools/turbolizer/src/graph.ts
new file mode 100644
index 0000000000..0eb2e3e1e6
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/graph.ts
@@ -0,0 +1,107 @@
+import { GNode } from "./node";
+import { Edge, MINIMUM_EDGE_SEPARATION } from "./edge";
+
+export class Graph {
+ nodeMap: Array<GNode>;
+ minGraphX: number;
+ maxGraphX: number;
+ minGraphY: number;
+ maxGraphY: number;
+ maxGraphNodeX: number;
+ maxBackEdgeNumber: number;
+ width: number;
+ height: number;
+
+ constructor(data: any) {
+ this.nodeMap = [];
+
+ this.minGraphX = 0;
+ this.maxGraphX = 1;
+ this.minGraphY = 0;
+ this.maxGraphY = 1;
+ this.width = 1;
+ this.height = 1;
+
+ data.nodes.forEach((jsonNode: any) => {
+ this.nodeMap[jsonNode.id] = new GNode(jsonNode.nodeLabel);
+ });
+
+ data.edges.forEach((e: any) => {
+ const t = this.nodeMap[e.target];
+ const s = this.nodeMap[e.source];
+ const newEdge = new Edge(t, e.index, s, e.type);
+ t.inputs.push(newEdge);
+ s.outputs.push(newEdge);
+ if (e.type == 'control') {
+ // Every source of a control edge is a CFG node.
+ s.cfg = true;
+ }
+ });
+
+ }
+
+ *nodes(p = (n: GNode) => true) {
+ for (const node of this.nodeMap) {
+ if (!node || !p(node)) continue;
+ yield node;
+ }
+ }
+
+ *filteredEdges(p: (e: Edge) => boolean) {
+ for (const node of this.nodes()) {
+ for (const edge of node.inputs) {
+ if (p(edge)) yield edge;
+ }
+ }
+ }
+
+ forEachEdge(p: (e: Edge) => void) {
+ for (const node of this.nodeMap) {
+ if (!node) continue;
+ for (const edge of node.inputs) {
+ p(edge);
+ }
+ }
+ }
+
+ redetermineGraphBoundingBox(showTypes: boolean): [[number, number], [number, number]] {
+ this.minGraphX = 0;
+ this.maxGraphNodeX = 1;
+ this.maxGraphX = undefined; // see below
+ this.minGraphY = 0;
+ this.maxGraphY = 1;
+
+ for (const node of this.nodes()) {
+ if (!node.visible) {
+ continue;
+ }
+
+ if (node.x < this.minGraphX) {
+ this.minGraphX = node.x;
+ }
+ if ((node.x + node.getTotalNodeWidth()) > this.maxGraphNodeX) {
+ this.maxGraphNodeX = node.x + node.getTotalNodeWidth();
+ }
+ if ((node.y - 50) < this.minGraphY) {
+ this.minGraphY = node.y - 50;
+ }
+ if ((node.y + node.getNodeHeight(showTypes) + 50) > this.maxGraphY) {
+ this.maxGraphY = node.y + node.getNodeHeight(showTypes) + 50;
+ }
+ }
+
+ this.maxGraphX = this.maxGraphNodeX +
+ this.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION;
+
+ this.width = this.maxGraphX - this.minGraphX;
+ this.height = this.maxGraphY - this.minGraphY;
+
+ const extent: [[number, number], [number, number]] = [
+ [this.minGraphX - this.width / 2, this.minGraphY - this.height / 2],
+ [this.maxGraphX + this.width / 2, this.maxGraphY + this.height / 2]
+ ];
+
+ return extent;
+ }
+
+}
diff --git a/deps/v8/tools/turbolizer/src/graphmultiview.ts b/deps/v8/tools/turbolizer/src/graphmultiview.ts
index f9e7efb58c..43ec418ee0 100644
--- a/deps/v8/tools/turbolizer/src/graphmultiview.ts
+++ b/deps/v8/tools/turbolizer/src/graphmultiview.ts
@@ -2,12 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {GraphView} from "./graph-view.js"
-import {ScheduleView} from "./schedule-view.js"
-import {SequenceView} from "./sequence-view.js"
-import {SourceResolver} from "./source-resolver.js"
-import {SelectionBroker} from "./selection-broker.js"
-import {View, PhaseView} from "./view.js"
+import { GraphView } from "../src/graph-view";
+import { ScheduleView } from "../src/schedule-view";
+import { SequenceView } from "../src/sequence-view";
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View, PhaseView } from "../src/view";
+
+const multiviewID = "multiview";
+
+const toolboxHTML = `
+<div class="graph-toolbox">
+ <select id="phase-select">
+ <option disabled selected>(please open a file)</option>
+ </select>
+ <input id="search-input" type="text" title="search nodes for regex" alt="search node for regex" class="search-input"
+ placeholder="find with regexp&hellip;">
+ <label><input id="search-only-visible" type="checkbox" name="instruction-address" alt="Apply search to visible nodes only">only visible</label>
+</div>`;
export class GraphMultiView extends View {
sourceResolver: SourceResolver;
@@ -16,11 +28,12 @@ export class GraphMultiView extends View {
schedule: ScheduleView;
sequence: SequenceView;
selectMenu: HTMLSelectElement;
- currentPhaseView: View & PhaseView;
+ currentPhaseView: PhaseView;
createViewElement() {
- const pane = document.createElement('div');
- pane.setAttribute('id', "multiview");
+ const pane = document.createElement("div");
+ pane.setAttribute("id", multiviewID);
+ pane.className = "viewpane";
return pane;
}
@@ -29,35 +42,50 @@ export class GraphMultiView extends View {
const view = this;
view.sourceResolver = sourceResolver;
view.selectionBroker = selectionBroker;
- const searchInput = document.getElementById("search-input") as HTMLInputElement;
+ const toolbox = document.createElement("div");
+ toolbox.className = "toolbox-anchor";
+ toolbox.innerHTML = toolboxHTML;
+ view.divNode.appendChild(toolbox);
+ const searchInput = toolbox.querySelector("#search-input") as HTMLInputElement;
+ const onlyVisibleCheckbox = toolbox.querySelector("#search-only-visible") as HTMLInputElement;
searchInput.addEventListener("keyup", e => {
if (!view.currentPhaseView) return;
- view.currentPhaseView.searchInputAction(searchInput, e)
+ view.currentPhaseView.searchInputAction(searchInput, e, onlyVisibleCheckbox.checked);
+ });
+ view.divNode.addEventListener("keyup", (e: KeyboardEvent) => {
+ if (e.keyCode == 191) { // keyCode == '/'
+ searchInput.focus();
+ }
});
searchInput.setAttribute("value", window.sessionStorage.getItem("lastSearch") || "");
- this.graph = new GraphView(id, selectionBroker,
- (phaseName) => view.displayPhaseByName(phaseName));
- this.schedule = new ScheduleView(id, selectionBroker);
- this.sequence = new SequenceView(id, selectionBroker);
- this.selectMenu = (<HTMLSelectElement>document.getElementById('display-selector'));
+ this.graph = new GraphView(this.divNode, selectionBroker, view.displayPhaseByName.bind(this),
+ toolbox.querySelector(".graph-toolbox"));
+ this.schedule = new ScheduleView(this.divNode, selectionBroker);
+ this.sequence = new SequenceView(this.divNode, selectionBroker);
+ this.selectMenu = toolbox.querySelector("#phase-select") as HTMLSelectElement;
}
initializeSelect() {
const view = this;
- view.selectMenu.innerHTML = '';
- view.sourceResolver.forEachPhase((phase) => {
+ view.selectMenu.innerHTML = "";
+ view.sourceResolver.forEachPhase(phase => {
const optionElement = document.createElement("option");
- optionElement.text = phase.name;
+ let maxNodeId = "";
+ if (phase.type == "graph" && phase.highestNodeId != 0) {
+ maxNodeId = ` ${phase.highestNodeId}`;
+ }
+ optionElement.text = `${phase.name}${maxNodeId}`;
view.selectMenu.add(optionElement);
});
this.selectMenu.onchange = function (this: HTMLSelectElement) {
- window.sessionStorage.setItem("lastSelectedPhase", this.selectedIndex.toString());
- view.displayPhase(view.sourceResolver.getPhase(this.selectedIndex));
- }
+ const phaseIndex = this.selectedIndex;
+ window.sessionStorage.setItem("lastSelectedPhase", phaseIndex.toString());
+ view.displayPhase(view.sourceResolver.getPhase(phaseIndex));
+ };
}
- show(data, rememberedSelection) {
- super.show(data, rememberedSelection);
+ show() {
+ super.show();
this.initializeSelect();
const lastPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase");
const initialPhaseIndex = this.sourceResolver.repairPhaseId(lastPhaseIndex);
@@ -65,29 +93,27 @@ export class GraphMultiView extends View {
this.displayPhase(this.sourceResolver.getPhase(initialPhaseIndex));
}
- initializeContent() { }
-
- displayPhase(phase) {
- if (phase.type == 'graph') {
- this.displayPhaseView(this.graph, phase.data);
- } else if (phase.type == 'schedule') {
- this.displayPhaseView(this.schedule, phase);
- } else if (phase.type == 'sequence') {
- this.displayPhaseView(this.sequence, phase);
+ displayPhase(phase, selection?: Set<any>) {
+ if (phase.type == "graph") {
+ this.displayPhaseView(this.graph, phase, selection);
+ } else if (phase.type == "schedule") {
+ this.displayPhaseView(this.schedule, phase, selection);
+ } else if (phase.type == "sequence") {
+ this.displayPhaseView(this.sequence, phase, selection);
}
}
- displayPhaseView(view, data) {
- const rememberedSelection = this.hideCurrentPhase();
- view.show(data, rememberedSelection);
- document.getElementById("middle").classList.toggle("scrollable", view.isScrollable());
+ displayPhaseView(view: PhaseView, data, selection?: Set<any>) {
+ const rememberedSelection = selection ? selection : this.hideCurrentPhase();
+ view.initializeContent(data, rememberedSelection);
+ this.divNode.classList.toggle("scrollable", view.isScrollable());
this.currentPhaseView = view;
}
- displayPhaseByName(phaseName) {
+ displayPhaseByName(phaseName, selection?: Set<any>) {
const phaseId = this.sourceResolver.getPhaseIdByName(phaseName);
- this.selectMenu.selectedIndex = phaseId - 1;
- this.displayPhase(this.sourceResolver.getPhase(phaseId));
+ this.selectMenu.selectedIndex = phaseId;
+ this.displayPhase(this.sourceResolver.getPhase(phaseId), selection);
}
hideCurrentPhase() {
@@ -104,10 +130,6 @@ export class GraphMultiView extends View {
if (this.currentPhaseView) this.currentPhaseView.onresize();
}
- deleteContent() {
- this.hideCurrentPhase();
- }
-
detachSelection() {
return null;
}
diff --git a/deps/v8/tools/turbolizer/src/info-view.ts b/deps/v8/tools/turbolizer/src/info-view.ts
new file mode 100644
index 0000000000..38585365ff
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/info-view.ts
@@ -0,0 +1,17 @@
+import { View } from "./view";
+
+export class InfoView extends View {
+
+ constructor(idOrContainer: HTMLElement | string) {
+ super(idOrContainer);
+ fetch("info-view.html")
+ .then(response => response.text())
+ .then(htmlText => this.divNode.innerHTML = htmlText);
+ }
+
+ createViewElement(): HTMLElement {
+ const infoContainer = document.createElement("div");
+ infoContainer.classList.add("info-container");
+ return infoContainer;
+ }
+}
diff --git a/deps/v8/tools/turbolizer/src/lang-disassembly.ts b/deps/v8/tools/turbolizer/src/lang-disassembly.ts
deleted file mode 100644
index 9312627abd..0000000000
--- a/deps/v8/tools/turbolizer/src/lang-disassembly.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-PR.registerLangHandler(
- PR.createSimpleLexer(
- [
- [PR.PR_STRING, /^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$))/, null, '\''],
- [PR.PR_PLAIN, /^\s+/, null, ' \r\n\t\xA0']
- ],
- [ // fallthroughStylePatterns
- [PR.PR_COMMENT, /;; debug: position \d+/, null],
- ]),
- ['disassembly']);
diff --git a/deps/v8/tools/turbolizer/src/node-label.ts b/deps/v8/tools/turbolizer/src/node-label.ts
new file mode 100644
index 0000000000..6e7d41d899
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/node-label.ts
@@ -0,0 +1,86 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function formatOrigin(origin) {
+ if (origin.nodeId) {
+ return `#${origin.nodeId} in phase ${origin.phase}/${origin.reducer}`;
+ }
+ if (origin.bytecodePosition) {
+ return `Bytecode line ${origin.bytecodePosition} in phase ${origin.phase}/${origin.reducer}`;
+ }
+ return "unknown origin";
+}
+
+export class NodeLabel {
+ id: number;
+ label: string;
+ title: string;
+ live: boolean;
+ properties: string;
+ sourcePosition: any;
+ origin: any;
+ opcode: string;
+ control: boolean;
+ opinfo: string;
+ type: string;
+ inplaceUpdatePhase: string;
+
+ constructor(id: number, label: string, title: string, live: boolean, properties: string, sourcePosition: any, origin: any, opcode: string, control: boolean, opinfo: string, type: string) {
+ this.id = id;
+ this.label = label;
+ this.title = title;
+ this.live = live;
+ this.properties = properties;
+ this.sourcePosition = sourcePosition;
+ this.origin = origin;
+ this.opcode = opcode;
+ this.control = control;
+ this.opinfo = opinfo;
+ this.type = type;
+ this.inplaceUpdatePhase = null;
+ }
+
+ equals(that?: NodeLabel) {
+ if (!that) return false;
+ if (this.id != that.id) return false;
+ if (this.label != that.label) return false;
+ if (this.title != that.title) return false;
+ if (this.live != that.live) return false;
+ if (this.properties != that.properties) return false;
+ if (this.opcode != that.opcode) return false;
+ if (this.control != that.control) return false;
+ if (this.opinfo != that.opinfo) return false;
+ if (this.type != that.type) return false;
+ return true;
+ }
+
+ getTitle() {
+ let propsString = "";
+ if (this.properties === "") {
+ propsString = "no properties";
+ } else {
+ propsString = "[" + this.properties + "]";
+ }
+ let title = this.title + "\n" + propsString + "\n" + this.opinfo;
+ if (this.origin) {
+ title += `\nOrigin: ${formatOrigin(this.origin)}`;
+ }
+ if (this.inplaceUpdatePhase) {
+ title += `\nInplace update in phase: ${this.inplaceUpdatePhase}`;
+ }
+ return title;
+ }
+
+ getDisplayLabel() {
+ const result = `${this.id}: ${this.label}`;
+ if (result.length > 40) {
+ return `${this.id}: ${this.opcode}`;
+ }
+ return result;
+ }
+
+ setInplaceUpdatePhase(name: string): any {
+ this.inplaceUpdatePhase = name;
+ }
+}
diff --git a/deps/v8/tools/turbolizer/src/node.ts b/deps/v8/tools/turbolizer/src/node.ts
index 95c47cab20..02906d1204 100644
--- a/deps/v8/tools/turbolizer/src/node.ts
+++ b/deps/v8/tools/turbolizer/src/node.ts
@@ -2,120 +2,111 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {NodeOrigin} from "./source-resolver.js"
-import {MINIMUM_EDGE_SEPARATION} from "./edge.js"
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NodeLabel } from "./node-label";
+import { MAX_RANK_SENTINEL } from "./constants";
+import { alignUp, measureText } from "./util";
export const DEFAULT_NODE_BUBBLE_RADIUS = 12;
export const NODE_INPUT_WIDTH = 50;
export const MINIMUM_NODE_OUTPUT_APPROACH = 15;
const MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS;
-export function isNodeInitiallyVisible(node) {
- return node.cfg;
-}
-
-function formatOrigin(origin) {
- if (origin.nodeId) {
- return `#${origin.nodeId} in phase ${origin.phase}/${origin.reducer}`;
- }
- if (origin.bytecodePosition) {
- return `Bytecode line ${origin.bytecodePosition} in phase ${origin.phase}/${origin.reducer}`;
- }
- return "unknown origin";
-}
-
export class GNode {
- control: boolean;
- opcode: string;
- live: boolean;
- inputs: Array<any>;
- width: number;
- properties: string;
- title: string;
- label: string;
- origin: NodeOrigin;
- outputs: Array<any>;
- outputApproach: number;
- type: string;
id: number;
+ nodeLabel: NodeLabel;
+ displayLabel: string;
+ inputs: Array<Edge>;
+ outputs: Array<Edge>;
+ visible: boolean;
x: number;
y: number;
- visible: boolean;
rank: number;
- opinfo: string;
+ outputApproach: number;
+ cfg: boolean;
labelbbox: { width: number, height: number };
+ width: number;
+ normalheight: number;
+ visitOrderWithinRank: number;
+
+ constructor(nodeLabel: NodeLabel) {
+ this.id = nodeLabel.id;
+ this.nodeLabel = nodeLabel;
+ this.displayLabel = nodeLabel.getDisplayLabel();
+ this.inputs = [];
+ this.outputs = [];
+ this.visible = false;
+ this.x = 0;
+ this.y = 0;
+ this.rank = MAX_RANK_SENTINEL;
+ this.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+ // Every control node is a CFG node.
+ this.cfg = nodeLabel.control;
+ this.labelbbox = measureText(this.displayLabel);
+ const typebbox = measureText(this.getDisplayType());
+ const innerwidth = Math.max(this.labelbbox.width, typebbox.width);
+ this.width = alignUp(innerwidth + NODE_INPUT_WIDTH * 2,
+ NODE_INPUT_WIDTH);
+ const innerheight = Math.max(this.labelbbox.height, typebbox.height);
+ this.normalheight = innerheight + 20;
+ this.visitOrderWithinRank = 0;
+ }
isControl() {
- return this.control;
+ return this.nodeLabel.control;
}
isInput() {
- return this.opcode == 'Parameter' || this.opcode.endsWith('Constant');
+ return this.nodeLabel.opcode == 'Parameter' || this.nodeLabel.opcode.endsWith('Constant');
}
isLive() {
- return this.live !== false;
+ return this.nodeLabel.live !== false;
}
isJavaScript() {
- return this.opcode.startsWith('JS');
+ return this.nodeLabel.opcode.startsWith('JS');
}
isSimplified() {
if (this.isJavaScript()) return false;
- return this.opcode.endsWith('Phi') ||
- this.opcode.startsWith('Boolean') ||
- this.opcode.startsWith('Number') ||
- this.opcode.startsWith('String') ||
- this.opcode.startsWith('Change') ||
- this.opcode.startsWith('Object') ||
- this.opcode.startsWith('Reference') ||
- this.opcode.startsWith('Any') ||
- this.opcode.endsWith('ToNumber') ||
- (this.opcode == 'AnyToBoolean') ||
- (this.opcode.startsWith('Load') && this.opcode.length > 4) ||
- (this.opcode.startsWith('Store') && this.opcode.length > 5);
+ const opcode = this.nodeLabel.opcode;
+ return opcode.endsWith('Phi') ||
+ opcode.startsWith('Boolean') ||
+ opcode.startsWith('Number') ||
+ opcode.startsWith('String') ||
+ opcode.startsWith('Change') ||
+ opcode.startsWith('Object') ||
+ opcode.startsWith('Reference') ||
+ opcode.startsWith('Any') ||
+ opcode.endsWith('ToNumber') ||
+ (opcode == 'AnyToBoolean') ||
+ (opcode.startsWith('Load') && opcode.length > 4) ||
+ (opcode.startsWith('Store') && opcode.length > 5);
}
isMachine() {
return !(this.isControl() || this.isInput() ||
this.isJavaScript() || this.isSimplified());
}
getTotalNodeWidth() {
- var inputWidth = this.inputs.length * NODE_INPUT_WIDTH;
+ const inputWidth = this.inputs.length * NODE_INPUT_WIDTH;
return Math.max(inputWidth, this.width);
}
getTitle() {
- var propsString;
- if (this.properties === undefined) {
- propsString = "";
- } else if (this.properties === "") {
- propsString = "no properties";
- } else {
- propsString = "[" + this.properties + "]";
- }
- let title = this.title + "\n" + propsString + "\n" + this.opinfo;
- if (this.origin) {
- title += `\nOrigin: ${formatOrigin(this.origin)}`;
- }
- return title;
+ return this.nodeLabel.getTitle();
}
getDisplayLabel() {
- var result = this.id + ":" + this.label;
- if (result.length > 40) {
- return this.id + ":" + this.opcode;
- } else {
- return result;
- }
+ return this.nodeLabel.getDisplayLabel();
}
getType() {
- return this.type;
+ return this.nodeLabel.type;
}
getDisplayType() {
- var type_string = this.type;
- if (type_string == undefined) return "";
- if (type_string.length > 24) {
- type_string = type_string.substr(0, 25) + "...";
+ let typeString = this.nodeLabel.type;
+ if (typeString == undefined) return "";
+ if (typeString.length > 24) {
+ typeString = typeString.substr(0, 25) + "...";
}
- return type_string;
+ return typeString;
}
deepestInputRank() {
- var deepestRank = 0;
+ let deepestRank = 0;
this.inputs.forEach(function (e) {
if (e.isVisible() && !e.isBackEdge()) {
if (e.source.rank > deepestRank) {
@@ -126,14 +117,14 @@ export class GNode {
return deepestRank;
}
areAnyOutputsVisible() {
- var visibleCount = 0;
+ let visibleCount = 0;
this.outputs.forEach(function (e) { if (e.isVisible())++visibleCount; });
if (this.outputs.length == visibleCount) return 2;
if (visibleCount != 0) return 1;
return 0;
}
setOutputVisibility(v) {
- var result = false;
+ let result = false;
this.outputs.forEach(function (e) {
e.visible = v;
if (v) {
@@ -146,7 +137,7 @@ export class GNode {
return result;
}
setInputVisibility(i, v) {
- var edge = this.inputs[i];
+ const edge = this.inputs[i];
edge.visible = v;
if (v) {
if (!edge.source.visible) {
@@ -158,14 +149,21 @@ export class GNode {
}
getInputApproach(index) {
return this.y - MINIMUM_NODE_INPUT_APPROACH -
- (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS
+ (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS;
}
- getOutputApproach(graph) {
- return this.y + this.outputApproach + graph.getNodeHeight(this) +
+ getNodeHeight(showTypes: boolean): number {
+ if (showTypes) {
+ return this.normalheight + this.labelbbox.height;
+ } else {
+ return this.normalheight;
+ }
+ }
+ getOutputApproach(showTypes: boolean) {
+ return this.y + this.outputApproach + this.getNodeHeight(showTypes) +
+ DEFAULT_NODE_BUBBLE_RADIUS;
}
getInputX(index) {
- var result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) +
+ const result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) +
(index - this.inputs.length + 1) * NODE_INPUT_WIDTH;
return result;
}
@@ -173,10 +171,10 @@ export class GNode {
return this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2);
}
hasBackEdges() {
- return (this.opcode == "Loop") ||
- ((this.opcode == "Phi" || this.opcode == "EffectPhi") &&
- this.inputs[this.inputs.length - 1].source.opcode == "Loop");
+ return (this.nodeLabel.opcode == "Loop") ||
+ ((this.nodeLabel.opcode == "Phi" || this.nodeLabel.opcode == "EffectPhi" || this.nodeLabel.opcode == "InductionVariablePhi") &&
+ this.inputs[this.inputs.length - 1].source.nodeLabel.opcode == "Loop");
}
-};
+}
export const nodeToStr = (n: GNode) => "N" + n.id;
diff --git a/deps/v8/tools/turbolizer/src/resizer.ts b/deps/v8/tools/turbolizer/src/resizer.ts
new file mode 100644
index 0000000000..ec2d68c0e2
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/resizer.ts
@@ -0,0 +1,199 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as d3 from "d3";
+import * as C from "../src/constants";
+
+class Snapper {
+ resizer: Resizer;
+ sourceExpand: HTMLElement;
+ sourceCollapse: HTMLElement;
+ disassemblyExpand: HTMLElement;
+ disassemblyCollapse: HTMLElement;
+
+ constructor(resizer: Resizer) {
+ this.resizer = resizer;
+ this.sourceExpand = document.getElementById(C.SOURCE_EXPAND_ID);
+ this.sourceCollapse = document.getElementById(C.SOURCE_COLLAPSE_ID);
+ this.disassemblyExpand = document.getElementById(C.DISASSEMBLY_EXPAND_ID);
+ this.disassemblyCollapse = document.getElementById(C.DISASSEMBLY_COLLAPSE_ID);
+
+ document.getElementById("source-collapse").addEventListener("click", () => {
+ this.setSourceExpanded(!this.sourceExpand.classList.contains("invisible"));
+ this.resizer.updatePanes();
+ });
+ document.getElementById("disassembly-collapse").addEventListener("click", () => {
+ this.setDisassemblyExpanded(!this.disassemblyExpand.classList.contains("invisible"));
+ this.resizer.updatePanes();
+ });
+ }
+
+ restoreExpandedState(): void {
+ this.setSourceExpanded(this.getLastExpandedState("source", true));
+ this.setDisassemblyExpanded(this.getLastExpandedState("disassembly", false));
+ }
+
+ getLastExpandedState(type: string, defaultState: boolean): boolean {
+ const state = window.sessionStorage.getItem("expandedState-" + type);
+ if (state === null) return defaultState;
+ return state === 'true';
+ }
+
+ sourceExpandUpdate(newState: boolean): void {
+ window.sessionStorage.setItem("expandedState-source", `${newState}`);
+ this.sourceExpand.classList.toggle("invisible", newState);
+ this.sourceCollapse.classList.toggle("invisible", !newState);
+ }
+
+ setSourceExpanded(newState: boolean): void {
+ if (this.sourceExpand.classList.contains("invisible") === newState) return;
+ const resizer = this.resizer;
+ this.sourceExpandUpdate(newState);
+ if (newState) {
+ resizer.sepLeft = resizer.sepLeftSnap;
+ resizer.sepLeftSnap = 0;
+ } else {
+ resizer.sepLeftSnap = resizer.sepLeft;
+ resizer.sepLeft = 0;
+ }
+ }
+
+ disassemblyExpandUpdate(newState: boolean): void {
+ window.sessionStorage.setItem("expandedState-disassembly", `${newState}`);
+ this.disassemblyExpand.classList.toggle("invisible", newState);
+ this.disassemblyCollapse.classList.toggle("invisible", !newState);
+ }
+
+ setDisassemblyExpanded(newState: boolean): void {
+ if (this.disassemblyExpand.classList.contains("invisible") === newState) return;
+ const resizer = this.resizer;
+ this.disassemblyExpandUpdate(newState);
+ if (newState) {
+ resizer.sepRight = resizer.sepRightSnap;
+ resizer.sepRightSnap = resizer.clientWidth;
+ } else {
+ resizer.sepRightSnap = resizer.sepRight;
+ resizer.sepRight = resizer.clientWidth;
+ }
+ }
+
+ panesUpdated(): void {
+ this.sourceExpandUpdate(this.resizer.sepLeft > this.resizer.deadWidth);
+ this.disassemblyExpandUpdate(this.resizer.sepRight <
+ (this.resizer.clientWidth - this.resizer.deadWidth));
+ }
+}
+
+export class Resizer {
+ snapper: Snapper;
+ deadWidth: number;
+ clientWidth: number;
+ left: HTMLElement;
+ right: HTMLElement;
+ middle: HTMLElement;
+ sepLeft: number;
+ sepRight: number;
+ sepLeftSnap: number;
+ sepRightSnap: number;
+ sepWidthOffset: number;
+ panesUpdatedCallback: () => void;
+ resizerRight: d3.Selection<HTMLDivElement, any, any, any>;
+ resizerLeft: d3.Selection<HTMLDivElement, any, any, any>;
+
+ constructor(panesUpdatedCallback: () => void, deadWidth: number) {
+ const resizer = this;
+ resizer.panesUpdatedCallback = panesUpdatedCallback;
+ resizer.deadWidth = deadWidth;
+ resizer.left = document.getElementById(C.SOURCE_PANE_ID);
+ resizer.middle = document.getElementById(C.INTERMEDIATE_PANE_ID);
+ resizer.right = document.getElementById(C.GENERATED_PANE_ID);
+ resizer.resizerLeft = d3.select('#resizer-left');
+ resizer.resizerRight = d3.select('#resizer-right');
+ resizer.sepLeftSnap = 0;
+ resizer.sepRightSnap = 0;
+ // Offset to prevent resizers from sliding slightly over one another.
+ resizer.sepWidthOffset = 7;
+ this.updateWidths();
+
+ const dragResizeLeft = d3.drag()
+ .on('drag', function () {
+ const x = d3.mouse(this.parentElement)[0];
+ resizer.sepLeft = Math.min(Math.max(0, x), resizer.sepRight - resizer.sepWidthOffset);
+ resizer.updatePanes();
+ })
+ .on('start', function () {
+ resizer.resizerLeft.classed("dragged", true);
+ const x = d3.mouse(this.parentElement)[0];
+ if (x > deadWidth) {
+ resizer.sepLeftSnap = resizer.sepLeft;
+ }
+ })
+ .on('end', function () {
+ if (!resizer.isRightSnapped()) {
+ window.sessionStorage.setItem("source-pane-width", `${resizer.sepLeft / resizer.clientWidth}`);
+ }
+ resizer.resizerLeft.classed("dragged", false);
+ });
+ resizer.resizerLeft.call(dragResizeLeft);
+
+ const dragResizeRight = d3.drag()
+ .on('drag', function () {
+ const x = d3.mouse(this.parentElement)[0];
+ resizer.sepRight = Math.max(resizer.sepLeft + resizer.sepWidthOffset, Math.min(x, resizer.clientWidth));
+ resizer.updatePanes();
+ })
+ .on('start', function () {
+ resizer.resizerRight.classed("dragged", true);
+ const x = d3.mouse(this.parentElement)[0];
+ if (x < (resizer.clientWidth - deadWidth)) {
+ resizer.sepRightSnap = resizer.sepRight;
+ }
+ })
+ .on('end', function () {
+ if (!resizer.isRightSnapped()) {
+ console.log(`disassembly-pane-width ${resizer.sepRight}`);
+ window.sessionStorage.setItem("disassembly-pane-width", `${resizer.sepRight / resizer.clientWidth}`);
+ }
+ resizer.resizerRight.classed("dragged", false);
+ });
+ resizer.resizerRight.call(dragResizeRight);
+ window.onresize = function () {
+ resizer.updateWidths();
+ resizer.updatePanes();
+ };
+ resizer.snapper = new Snapper(resizer);
+ resizer.snapper.restoreExpandedState();
+ }
+
+ isLeftSnapped() {
+ return this.sepLeft === 0;
+ }
+
+ isRightSnapped() {
+ return this.sepRight >= this.clientWidth - 1;
+ }
+
+ updatePanes() {
+ const leftSnapped = this.isLeftSnapped();
+ const rightSnapped = this.isRightSnapped();
+ this.resizerLeft.classed("snapped", leftSnapped);
+ this.resizerRight.classed("snapped", rightSnapped);
+ this.left.style.width = this.sepLeft + 'px';
+ this.middle.style.width = (this.sepRight - this.sepLeft) + 'px';
+ this.right.style.width = (this.clientWidth - this.sepRight) + 'px';
+ this.resizerLeft.style('left', this.sepLeft + 'px');
+ this.resizerRight.style('right', (this.clientWidth - this.sepRight - 1) + 'px');
+
+ this.snapper.panesUpdated();
+ this.panesUpdatedCallback();
+ }
+
+ updateWidths() {
+ this.clientWidth = document.body.getBoundingClientRect().width;
+ const sepLeft = window.sessionStorage.getItem("source-pane-width");
+ this.sepLeft = this.clientWidth * (sepLeft ? Number.parseFloat(sepLeft) : (1 / 3));
+ const sepRight = window.sessionStorage.getItem("disassembly-pane-width");
+ this.sepRight = this.clientWidth * (sepRight ? Number.parseFloat(sepRight) : (2 / 3));
+ }
+}
diff --git a/deps/v8/tools/turbolizer/src/schedule-view.ts b/deps/v8/tools/turbolizer/src/schedule-view.ts
index f62aba0c86..ed36d126fd 100644
--- a/deps/v8/tools/turbolizer/src/schedule-view.ts
+++ b/deps/v8/tools/turbolizer/src/schedule-view.ts
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {Schedule,SourceResolver} from "./source-resolver.js"
-import {isIterable} from "./util.js"
-import {PhaseView} from "./view.js"
-import {TextView} from "./text-view.js"
+import { Schedule, SourceResolver } from "../src/source-resolver";
+import { TextView } from "../src/text-view";
-export class ScheduleView extends TextView implements PhaseView {
+export class ScheduleView extends TextView {
schedule: Schedule;
sourceResolver: SourceResolver;
@@ -18,7 +16,7 @@ export class ScheduleView extends TextView implements PhaseView {
}
constructor(parentId, broker) {
- super(parentId, broker, null);
+ super(parentId, broker);
this.sourceResolver = broker.sourceResolver;
}
@@ -39,26 +37,23 @@ export class ScheduleView extends TextView implements PhaseView {
initializeContent(data, rememberedSelection) {
this.divNode.innerHTML = '';
- this.schedule = data.schedule
+ this.schedule = data.schedule;
this.addBlocks(data.schedule.blocks);
this.attachSelection(rememberedSelection);
+ this.show();
}
createElementFromString(htmlString) {
- var div = document.createElement('div');
+ const div = document.createElement('div');
div.innerHTML = htmlString.trim();
return div.firstChild;
}
elementForBlock(block) {
const view = this;
- function createElement(tag: string, cls: string | Array<string>, content?: string) {
+ function createElement(tag: string, cls: string, content?: string) {
const el = document.createElement(tag);
- if (isIterable(cls)) {
- for (const c of cls) el.classList.add(c);
- } else {
- el.classList.add(cls);
- }
+ el.className = cls;
if (content != undefined) el.innerHTML = content;
return el;
}
@@ -76,15 +71,15 @@ export class ScheduleView extends TextView implements PhaseView {
function getMarker(start, end) {
if (start != end) {
return ["&#8857;", `This node generated instructions in range [${start},${end}). ` +
- `This is currently unreliable for constants.`];
+ `This is currently unreliable for constants.`];
}
if (start != -1) {
return ["&#183;", `The instruction selector did not generate instructions ` +
- `for this node, but processed the node at instruction ${start}. ` +
- `This usually means that this node was folded into another node; ` +
- `the highlighted machine code is a guess.`];
+ `for this node, but processed the node at instruction ${start}. ` +
+ `This usually means that this node was folded into another node; ` +
+ `the highlighted machine code is a guess.`];
}
- return ["", `This not is not in the final schedule.`]
+ return ["", `This not is not in the final schedule.`];
}
function createElementForNode(node) {
@@ -92,27 +87,26 @@ export class ScheduleView extends TextView implements PhaseView {
const [start, end] = view.sourceResolver.getInstruction(node.id);
const [marker, tooltip] = getMarker(start, end);
- const instrMarker = createElement("div", ["instr-marker", "com"], marker);
+ const instrMarker = createElement("div", "instr-marker com", marker);
instrMarker.setAttribute("title", tooltip);
instrMarker.onclick = mkNodeLinkHandler(node.id);
nodeEl.appendChild(instrMarker);
-
- const node_id = createElement("div", ["node-id", "tag", "clickable"], node.id);
- node_id.onclick = mkNodeLinkHandler(node.id);
- view.addHtmlElementForNodeId(node.id, node_id);
- nodeEl.appendChild(node_id);
- const node_label = createElement("div", "node-label", node.label);
- nodeEl.appendChild(node_label);
+ const nodeId = createElement("div", "node-id tag clickable", node.id);
+ nodeId.onclick = mkNodeLinkHandler(node.id);
+ view.addHtmlElementForNodeId(node.id, nodeId);
+ nodeEl.appendChild(nodeId);
+ const nodeLabel = createElement("div", "node-label", node.label);
+ nodeEl.appendChild(nodeLabel);
if (node.inputs.length > 0) {
- const node_parameters = createElement("div", ["parameter-list", "comma-sep-list"]);
+ const nodeParameters = createElement("div", "parameter-list comma-sep-list");
for (const param of node.inputs) {
- const paramEl = createElement("div", ["parameter", "tag", "clickable"], param);
- node_parameters.appendChild(paramEl);
+ const paramEl = createElement("div", "parameter tag clickable", param);
+ nodeParameters.appendChild(paramEl);
paramEl.onclick = mkNodeLinkHandler(param);
view.addHtmlElementForNodeId(param, paramEl);
}
- nodeEl.appendChild(node_parameters);
+ nodeEl.appendChild(nodeParameters);
}
return nodeEl;
@@ -128,38 +122,38 @@ export class ScheduleView extends TextView implements PhaseView {
};
}
- const schedule_block = createElement("div", "schedule-block");
+ const scheduleBlock = createElement("div", "schedule-block");
const [start, end] = view.sourceResolver.getInstructionRangeForBlock(block.id);
- const instrMarker = createElement("div", ["instr-marker", "com"], "&#8857;");
- instrMarker.setAttribute("title", `Instructions range for this block is [${start}, ${end})`)
+ const instrMarker = createElement("div", "instr-marker com", "&#8857;");
+ instrMarker.setAttribute("title", `Instructions range for this block is [${start}, ${end})`);
instrMarker.onclick = mkBlockLinkHandler(block.id);
- schedule_block.appendChild(instrMarker);
+ scheduleBlock.appendChild(instrMarker);
- const block_id = createElement("div", ["block-id", "com", "clickable"], block.id);
- block_id.onclick = mkBlockLinkHandler(block.id);
- schedule_block.appendChild(block_id);
- const block_pred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]);
+ const blockId = createElement("div", "block-id com clickable", block.id);
+ blockId.onclick = mkBlockLinkHandler(block.id);
+ scheduleBlock.appendChild(blockId);
+ const blockPred = createElement("div", "predecessor-list block-list comma-sep-list");
for (const pred of block.pred) {
- const predEl = createElement("div", ["block-id", "com", "clickable"], pred);
+ const predEl = createElement("div", "block-id com clickable", pred);
predEl.onclick = mkBlockLinkHandler(pred);
- block_pred.appendChild(predEl);
+ blockPred.appendChild(predEl);
}
- if (block.pred.length) schedule_block.appendChild(block_pred);
+ if (block.pred.length) scheduleBlock.appendChild(blockPred);
const nodes = createElement("div", "nodes");
for (const node of block.nodes) {
nodes.appendChild(createElementForNode(node));
}
- schedule_block.appendChild(nodes);
- const block_succ = createElement("div", ["successor-list", "block-list", "comma-sep-list"]);
+ scheduleBlock.appendChild(nodes);
+ const blockSucc = createElement("div", "successor-list block-list comma-sep-list");
for (const succ of block.succ) {
- const succEl = createElement("div", ["block-id", "com", "clickable"], succ);
+ const succEl = createElement("div", "block-id com clickable", succ);
succEl.onclick = mkBlockLinkHandler(succ);
- block_succ.appendChild(succEl);
+ blockSucc.appendChild(succEl);
}
- if (block.succ.length) schedule_block.appendChild(block_succ);
- this.addHtmlElementForBlockId(block.id, schedule_block);
- return schedule_block;
+ if (block.succ.length) scheduleBlock.appendChild(blockSucc);
+ this.addHtmlElementForBlockId(block.id, scheduleBlock);
+ return scheduleBlock;
}
addBlocks(blocks) {
@@ -170,10 +164,10 @@ export class ScheduleView extends TextView implements PhaseView {
}
lineString(node) {
- return `${node.id}: ${node.label}(${node.inputs.join(", ")})`
+ return `${node.id}: ${node.label}(${node.inputs.join(", ")})`;
}
- searchInputAction(searchBar, e) {
+ searchInputAction(searchBar, e, onlyVisible) {
e.stopPropagation();
this.selectionHandler.clear();
const query = searchBar.value;
@@ -184,11 +178,9 @@ export class ScheduleView extends TextView implements PhaseView {
for (const node of this.schedule.nodes) {
if (node === undefined) continue;
if (reg.exec(this.lineString(node)) != null) {
- select.push(node.id)
+ select.push(node.id);
}
}
this.selectionHandler.select(select, true);
}
-
- onresize() { }
}
diff --git a/deps/v8/tools/turbolizer/src/selection-broker.ts b/deps/v8/tools/turbolizer/src/selection-broker.ts
index e20fd977d2..7e0c0ddee0 100644
--- a/deps/v8/tools/turbolizer/src/selection-broker.ts
+++ b/deps/v8/tools/turbolizer/src/selection-broker.ts
@@ -2,36 +2,54 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {SourceResolver, sourcePositionValid} from "./source-resolver.js"
+import { SourceResolver, sourcePositionValid } from "../src/source-resolver";
+import { ClearableHandler, SelectionHandler, NodeSelectionHandler, BlockSelectionHandler, InstructionSelectionHandler } from "../src/selection-handler";
export class SelectionBroker {
sourceResolver: SourceResolver;
+ allHandlers: Array<ClearableHandler>;
sourcePositionHandlers: Array<SelectionHandler>;
nodeHandlers: Array<NodeSelectionHandler>;
blockHandlers: Array<BlockSelectionHandler>;
+ instructionHandlers: Array<InstructionSelectionHandler>;
constructor(sourceResolver) {
+ this.allHandlers = [];
this.sourcePositionHandlers = [];
this.nodeHandlers = [];
this.blockHandlers = [];
+ this.instructionHandlers = [];
this.sourceResolver = sourceResolver;
- };
+ }
- addSourcePositionHandler(handler) {
+ addSourcePositionHandler(handler: SelectionHandler & ClearableHandler) {
+ this.allHandlers.push(handler);
this.sourcePositionHandlers.push(handler);
}
- addNodeHandler(handler) {
+ addNodeHandler(handler: NodeSelectionHandler & ClearableHandler) {
+ this.allHandlers.push(handler);
this.nodeHandlers.push(handler);
}
- addBlockHandler(handler) {
+ addBlockHandler(handler: BlockSelectionHandler & ClearableHandler) {
+ this.allHandlers.push(handler);
this.blockHandlers.push(handler);
}
+ addInstructionHandler(handler: InstructionSelectionHandler & ClearableHandler) {
+ this.allHandlers.push(handler);
+ this.instructionHandlers.push(handler);
+ }
+
+ broadcastInstructionSelect(from, instructionOffsets, selected) {
+ for (const b of this.instructionHandlers) {
+ if (b != from) b.brokeredInstructionSelect(instructionOffsets, selected);
+ }
+ }
+
broadcastSourcePositionSelect(from, sourcePositions, selected) {
- let broker = this;
- sourcePositions = sourcePositions.filter((l) => {
+ sourcePositions = sourcePositions.filter(l => {
if (!sourcePositionValid(l)) {
console.log("Warning: invalid source position");
return false;
@@ -48,7 +66,6 @@ export class SelectionBroker {
}
broadcastNodeSelect(from, nodes, selected) {
- let broker = this;
for (const b of this.nodeHandlers) {
if (b != from) b.brokeredNodeSelect(nodes, selected);
}
@@ -59,20 +76,13 @@ export class SelectionBroker {
}
broadcastBlockSelect(from, blocks, selected) {
- let broker = this;
- for (var b of this.blockHandlers) {
+ for (const b of this.blockHandlers) {
if (b != from) b.brokeredBlockSelect(blocks, selected);
}
}
broadcastClear(from) {
- this.sourcePositionHandlers.forEach(function (b) {
- if (b != from) b.brokeredClear();
- });
- this.nodeHandlers.forEach(function (b) {
- if (b != from) b.brokeredClear();
- });
- this.blockHandlers.forEach(function (b) {
+ this.allHandlers.forEach(function (b) {
if (b != from) b.brokeredClear();
});
}
diff --git a/deps/v8/tools/turbolizer/src/selection-handler.ts b/deps/v8/tools/turbolizer/src/selection-handler.ts
index bf0719c8a6..a605149238 100644
--- a/deps/v8/tools/turbolizer/src/selection-handler.ts
+++ b/deps/v8/tools/turbolizer/src/selection-handler.ts
@@ -2,23 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-interface SelectionHandler {
+export interface ClearableHandler {
+ brokeredClear(): void;
+}
+
+export interface SelectionHandler {
clear(): void;
select(nodeIds: any, selected: any): void;
- brokeredClear(): void;
brokeredSourcePositionSelect(sourcePositions: any, selected: any): void;
-};
+}
-interface NodeSelectionHandler {
+export interface NodeSelectionHandler {
clear(): void;
select(nodeIds: any, selected: any): void;
- brokeredClear(): void;
brokeredNodeSelect(nodeIds: any, selected: any): void;
-};
+}
-interface BlockSelectionHandler {
+export interface BlockSelectionHandler {
clear(): void;
select(nodeIds: any, selected: any): void;
- brokeredClear(): void;
brokeredBlockSelect(blockIds: any, selected: any): void;
-};
+}
+
+export interface InstructionSelectionHandler {
+ clear(): void;
+ select(instructionIds: any, selected: any): void;
+ brokeredInstructionSelect(instructionIds: any, selected: any): void;
+}
diff --git a/deps/v8/tools/turbolizer/src/selection.ts b/deps/v8/tools/turbolizer/src/selection.ts
index b02a3e9cbb..90fe3bd4bc 100644
--- a/deps/v8/tools/turbolizer/src/selection.ts
+++ b/deps/v8/tools/turbolizer/src/selection.ts
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {isIterable} from "./util.js"
-
export class MySelection {
selection: any;
stringKey: (o: any) => string;
@@ -21,8 +19,7 @@ export class MySelection {
this.selection = new Map();
}
- select(s, isSelected) {
- if (!isIterable(s)) { s = [s]; }
+ select(s: Iterable<any>, isSelected?: boolean) {
for (const i of s) {
if (!i) continue;
if (isSelected == undefined) {
@@ -36,7 +33,7 @@ export class MySelection {
}
}
- isSelected(i): boolean {
+ isSelected(i: any): boolean {
return this.selection.has(this.stringKey(i));
}
@@ -45,21 +42,18 @@ export class MySelection {
}
selectedKeys() {
- var result = new Set();
- for (var i of this.selection.keys()) {
+ const result = new Set();
+ for (const i of this.selection.keys()) {
result.add(i);
}
return result;
}
detachSelection() {
- var result = new Set();
- for (var i of this.selection.keys()) {
- result.add(i);
- }
+ const result = this.selectedKeys();
this.clear();
return result;
}
- [Symbol.iterator]() { return this.selection.values() }
+ [Symbol.iterator]() { return this.selection.values(); }
}
diff --git a/deps/v8/tools/turbolizer/src/sequence-view.ts b/deps/v8/tools/turbolizer/src/sequence-view.ts
index afddb56649..a796707c74 100644
--- a/deps/v8/tools/turbolizer/src/sequence-view.ts
+++ b/deps/v8/tools/turbolizer/src/sequence-view.ts
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {Sequence} from "./source-resolver.js"
-import {isIterable} from "./util.js"
-import {PhaseView} from "./view.js"
-import {TextView} from "./text-view.js"
+import { Sequence } from "../src/source-resolver";
+import { isIterable } from "../src/util";
+import { TextView } from "../src/text-view";
-export class SequenceView extends TextView implements PhaseView {
+export class SequenceView extends TextView {
sequence: Sequence;
- search_info: Array<any>;
+ searchInfo: Array<any>;
createViewElement() {
const pane = document.createElement('div');
@@ -18,7 +17,7 @@ export class SequenceView extends TextView implements PhaseView {
}
constructor(parentId, broker) {
- super(parentId, broker, null);
+ super(parentId, broker);
}
attachSelection(s) {
@@ -39,9 +38,17 @@ export class SequenceView extends TextView implements PhaseView {
initializeContent(data, rememberedSelection) {
this.divNode.innerHTML = '';
this.sequence = data.sequence;
- this.search_info = [];
+ this.searchInfo = [];
+ this.divNode.addEventListener('click', (e: MouseEvent) => {
+ if (!(e.target instanceof HTMLElement)) return;
+ const instructionId = Number.parseInt(e.target.dataset.instructionId, 10);
+ if (!instructionId) return;
+ if (!e.shiftKey) this.broker.broadcastClear(null);
+ this.broker.broadcastInstructionSelect(null, [instructionId], true);
+ });
this.addBlocks(this.sequence.blocks);
this.attachSelection(rememberedSelection);
+ this.show();
}
elementForBlock(block) {
@@ -75,23 +82,25 @@ export class SequenceView extends TextView implements PhaseView {
return mkLinkHandler(text, view.selectionHandler);
}
- function elementForOperand(operand, search_info) {
- var text = operand.text;
+ function elementForOperand(operand, searchInfo) {
+ const text = operand.text;
const operandEl = createElement("div", ["parameter", "tag", "clickable", operand.type], text);
if (operand.tooltip) {
operandEl.setAttribute("title", operand.tooltip);
}
operandEl.onclick = mkOperandLinkHandler(text);
- search_info.push(text);
+ searchInfo.push(text);
view.addHtmlElementForNodeId(text, operandEl);
return operandEl;
}
- function elementForInstruction(instruction, search_info) {
+ function elementForInstruction(instruction, searchInfo) {
const instNodeEl = createElement("div", "instruction-node");
- const inst_id = createElement("div", "instruction-id", instruction.id);
- instNodeEl.appendChild(inst_id);
+ const instId = createElement("div", "instruction-id", instruction.id);
+ instId.classList.add("clickable");
+ instId.dataset.instructionId = instruction.id;
+ instNodeEl.appendChild(instId);
const instContentsEl = createElement("div", "instruction-contents");
instNodeEl.appendChild(instContentsEl);
@@ -103,11 +112,11 @@ export class SequenceView extends TextView implements PhaseView {
const moves = createElement("div", ["comma-sep-list", "gap-move"]);
for (const move of gap) {
const moveEl = createElement("div", "move");
- const destinationEl = elementForOperand(move[0], search_info);
+ const destinationEl = elementForOperand(move[0], searchInfo);
moveEl.appendChild(destinationEl);
const assignEl = createElement("div", "assign", "=");
moveEl.appendChild(assignEl);
- const sourceEl = elementForOperand(move[1], search_info);
+ const sourceEl = elementForOperand(move[1], searchInfo);
moveEl.appendChild(sourceEl);
moves.appendChild(moveEl);
}
@@ -120,7 +129,7 @@ export class SequenceView extends TextView implements PhaseView {
if (instruction.outputs.length > 0) {
const outputs = createElement("div", ["comma-sep-list", "input-output-list"]);
for (const output of instruction.outputs) {
- const outputEl = elementForOperand(output, search_info);
+ const outputEl = elementForOperand(output, searchInfo);
outputs.appendChild(outputEl);
}
instEl.appendChild(outputs);
@@ -128,16 +137,16 @@ export class SequenceView extends TextView implements PhaseView {
instEl.appendChild(assignEl);
}
- var text = instruction.opcode + instruction.flags;
- const inst_label = createElement("div", "node-label", text);
- search_info.push(text);
- view.addHtmlElementForNodeId(text, inst_label);
- instEl.appendChild(inst_label);
+ const text = instruction.opcode + instruction.flags;
+ const instLabel = createElement("div", "node-label", text);
+ searchInfo.push(text);
+ view.addHtmlElementForNodeId(text, instLabel);
+ instEl.appendChild(instLabel);
if (instruction.inputs.length > 0) {
const inputs = createElement("div", ["comma-sep-list", "input-output-list"]);
for (const input of instruction.inputs) {
- const inputEl = elementForOperand(input, search_info);
+ const inputEl = elementForOperand(input, searchInfo);
inputs.appendChild(inputEl);
}
instEl.appendChild(inputs);
@@ -146,7 +155,7 @@ export class SequenceView extends TextView implements PhaseView {
if (instruction.temps.length > 0) {
const temps = createElement("div", ["comma-sep-list", "input-output-list", "temps"]);
for (const temp of instruction.temps) {
- const tempEl = elementForOperand(temp, search_info);
+ const tempEl = elementForOperand(temp, searchInfo);
temps.appendChild(tempEl);
}
instEl.appendChild(temps);
@@ -155,20 +164,20 @@ export class SequenceView extends TextView implements PhaseView {
return instNodeEl;
}
- const sequence_block = createElement("div", "schedule-block");
+ const sequenceBlock = createElement("div", "schedule-block");
- const block_id = createElement("div", ["block-id", "com", "clickable"], block.id);
- block_id.onclick = mkBlockLinkHandler(block.id);
- sequence_block.appendChild(block_id);
- const block_pred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]);
+ const blockId = createElement("div", ["block-id", "com", "clickable"], block.id);
+ blockId.onclick = mkBlockLinkHandler(block.id);
+ sequenceBlock.appendChild(blockId);
+ const blockPred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]);
for (const pred of block.predecessors) {
const predEl = createElement("div", ["block-id", "com", "clickable"], pred);
predEl.onclick = mkBlockLinkHandler(pred);
- block_pred.appendChild(predEl);
+ blockPred.appendChild(predEl);
}
- if (block.predecessors.length > 0) sequence_block.appendChild(block_pred);
+ if (block.predecessors.length > 0) sequenceBlock.appendChild(blockPred);
const phis = createElement("div", "phis");
- sequence_block.appendChild(phis);
+ sequenceBlock.appendChild(phis);
const phiLabel = createElement("div", "phi-label", "phi:");
phis.appendChild(phiLabel);
@@ -180,7 +189,7 @@ export class SequenceView extends TextView implements PhaseView {
const phiEl = createElement("div", "phi");
phiContents.appendChild(phiEl);
- const outputEl = elementForOperand(phi.output, this.search_info);
+ const outputEl = elementForOperand(phi.output, this.searchInfo);
phiEl.appendChild(outputEl);
const assignEl = createElement("div", "assign", "=");
@@ -194,18 +203,18 @@ export class SequenceView extends TextView implements PhaseView {
const instructions = createElement("div", "instructions");
for (const instruction of block.instructions) {
- instructions.appendChild(elementForInstruction(instruction, this.search_info));
+ instructions.appendChild(elementForInstruction(instruction, this.searchInfo));
}
- sequence_block.appendChild(instructions);
- const block_succ = createElement("div", ["successor-list", "block-list", "comma-sep-list"]);
+ sequenceBlock.appendChild(instructions);
+ const blockSucc = createElement("div", ["successor-list", "block-list", "comma-sep-list"]);
for (const succ of block.successors) {
const succEl = createElement("div", ["block-id", "com", "clickable"], succ);
succEl.onclick = mkBlockLinkHandler(succ);
- block_succ.appendChild(succEl);
+ blockSucc.appendChild(succEl);
}
- if (block.successors.length > 0) sequence_block.appendChild(block_succ);
- this.addHtmlElementForBlockId(block.id, sequence_block);
- return sequence_block;
+ if (block.successors.length > 0) sequenceBlock.appendChild(blockSucc);
+ this.addHtmlElementForBlockId(block.id, sequenceBlock);
+ return sequenceBlock;
}
addBlocks(blocks) {
@@ -223,13 +232,11 @@ export class SequenceView extends TextView implements PhaseView {
const select = [];
window.sessionStorage.setItem("lastSearch", query);
const reg = new RegExp(query);
- for (const item of this.search_info) {
+ for (const item of this.searchInfo) {
if (reg.exec(item) != null) {
select.push(item);
}
}
this.selectionHandler.select(select, true);
}
-
- onresize() { }
}
diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts
index 20f1f5070a..67f9c088a2 100644
--- a/deps/v8/tools/turbolizer/src/source-resolver.ts
+++ b/deps/v8/tools/turbolizer/src/source-resolver.ts
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {sortUnique, anyToString} from "./util.js"
+import { sortUnique, anyToString } from "../src/util";
+import { NodeLabel } from "./node-label";
function sourcePositionLe(a, b) {
if (a.inliningId == b.inliningId) {
@@ -16,12 +17,14 @@ function sourcePositionEq(a, b) {
a.scriptOffset == b.scriptOffset;
}
-export function sourcePositionToStringKey(sourcePosition): string {
+export function sourcePositionToStringKey(sourcePosition: AnyPosition): string {
if (!sourcePosition) return "undefined";
- if (sourcePosition.inliningId && sourcePosition.scriptOffset)
+ if ('inliningId' in sourcePosition && 'scriptOffset' in sourcePosition) {
return "SP:" + sourcePosition.inliningId + ":" + sourcePosition.scriptOffset;
- if (sourcePosition.bytecodePosition)
+ }
+ if (sourcePosition.bytecodePosition) {
return "BCP:" + sourcePosition.bytecodePosition;
+ }
return "undefined";
}
@@ -48,9 +51,9 @@ interface BytecodePosition {
bytecodePosition: number;
}
-type Origin = NodeOrigin | BytecodePosition;
-type TurboFanNodeOrigin = NodeOrigin & TurboFanOrigin;
-type TurboFanBytecodeOrigin = BytecodePosition & TurboFanOrigin;
+export type Origin = NodeOrigin | BytecodePosition;
+export type TurboFanNodeOrigin = NodeOrigin & TurboFanOrigin;
+export type TurboFanBytecodeOrigin = BytecodePosition & TurboFanOrigin;
type AnyPosition = SourcePosition | BytecodePosition;
@@ -61,17 +64,37 @@ export interface Source {
sourceText: string;
sourceId: number;
startPosition?: number;
+ backwardsCompatibility: boolean;
}
interface Inlining {
inliningPosition: SourcePosition;
sourceId: number;
}
-interface Phase {
- type: string;
+interface OtherPhase {
+ type: "disassembly" | "sequence" | "schedule";
+ name: string;
+ data: any;
+}
+
+interface InstructionsPhase {
+ type: "instructions";
+ name: string;
+ data: any;
+ instructionOffsetToPCOffset?: any;
+ blockIdtoInstructionRange?: any;
+ nodeIdToInstructionRange?: any;
+}
+
+interface GraphPhase {
+ type: "graph";
name: string;
data: any;
+ highestNodeId: number;
+ nodeLabelMap: Array<NodeLabel>;
}
+type Phase = GraphPhase | InstructionsPhase | OtherPhase;
+
export interface Schedule {
nodes: Array<any>;
}
@@ -94,7 +117,7 @@ export class SourceResolver {
blockIdToInstructionRange: Array<[number, number]>;
instructionToPCOffset: Array<number>;
pcOffsetToInstructions: Map<number, Array<number>>;
-
+ pcOffsets: Array<number>;
constructor() {
// Maps node ids to source positions.
@@ -123,11 +146,12 @@ export class SourceResolver {
this.instructionToPCOffset = [];
// Maps PC offsets to instructions.
this.pcOffsetToInstructions = new Map();
+ this.pcOffsets = [];
}
setSources(sources, mainBackup) {
if (sources) {
- for (let [sourceId, source] of Object.entries(sources)) {
+ for (const [sourceId, source] of Object.entries(sources)) {
this.sources[sourceId] = source;
this.sources[sourceId].sourcePositions = [];
}
@@ -159,7 +183,7 @@ export class SourceResolver {
alternativeMap[nodeId] = { scriptOffset: scriptOffset, inliningId: -1 };
}
map = alternativeMap;
- };
+ }
for (const [nodeId, sourcePosition] of Object.entries<SourcePosition>(map)) {
if (sourcePosition == undefined) {
@@ -172,13 +196,13 @@ export class SourceResolver {
this.sources[sourceId].sourcePositions.push(sourcePosition);
}
this.nodePositionMap[nodeId] = sourcePosition;
- let key = sourcePositionToStringKey(sourcePosition);
+ const key = sourcePositionToStringKey(sourcePosition);
if (!this.positionToNodes.has(key)) {
this.positionToNodes.set(key, []);
}
this.positionToNodes.get(key).push(nodeId);
}
- for (const [sourceId, source] of Object.entries(this.sources)) {
+ for (const [, source] of Object.entries(this.sources)) {
source.sourcePositions = sortUnique(source.sourcePositions,
sourcePositionLe, sourcePositionEq);
}
@@ -187,8 +211,8 @@ export class SourceResolver {
sourcePositionsToNodeIds(sourcePositions) {
const nodeIds = new Set();
for (const sp of sourcePositions) {
- let key = sourcePositionToStringKey(sp);
- let nodeIdsForPosition = this.positionToNodes.get(key);
+ const key = sourcePositionToStringKey(sp);
+ const nodeIdsForPosition = this.positionToNodes.get(key);
if (!nodeIdsForPosition) continue;
for (const nodeId of nodeIdsForPosition) {
nodeIds.add(nodeId);
@@ -200,8 +224,8 @@ export class SourceResolver {
nodeIdsToSourcePositions(nodeIds): Array<AnyPosition> {
const sourcePositions = new Map();
for (const nodeId of nodeIds) {
- let sp = this.nodePositionMap[nodeId];
- let key = sourcePositionToStringKey(sp);
+ const sp = this.nodePositionMap[nodeId];
+ const key = sourcePositionToStringKey(sp);
sourcePositions.set(key, sp);
}
const sourcePositionArray = [];
@@ -211,13 +235,13 @@ export class SourceResolver {
return sourcePositionArray;
}
- forEachSource(f) {
+ forEachSource(f: (value: Source, index: number, array: Array<Source>) => void) {
this.sources.forEach(f);
}
- translateToSourceId(sourceId, location) {
+ translateToSourceId(sourceId: number, location?: SourcePosition) {
for (const position of this.getInlineStack(location)) {
- let inlining = this.inlinings[position.inliningId];
+ const inlining = this.inlinings[position.inliningId];
if (!inlining) continue;
if (inlining.sourceId == sourceId) {
return position;
@@ -226,10 +250,10 @@ export class SourceResolver {
return location;
}
- addInliningPositions(sourcePosition, locations) {
- let inlining = this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
+ addInliningPositions(sourcePosition: AnyPosition, locations: Array<SourcePosition>) {
+ const inlining = this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
if (!inlining) return;
- let sourceId = inlining.sourceId
+ const sourceId = inlining.sourceId;
const source = this.sources[sourceId];
for (const sp of source.sourcePositions) {
locations.push(sp);
@@ -237,26 +261,26 @@ export class SourceResolver {
}
}
- getInliningForPosition(sourcePosition) {
+ getInliningForPosition(sourcePosition: AnyPosition) {
return this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
}
- getSource(sourceId) {
+ getSource(sourceId: number) {
return this.sources[sourceId];
}
- getSourceName(sourceId) {
+ getSourceName(sourceId: number) {
const source = this.sources[sourceId];
return `${source.sourceName}:${source.functionName}`;
}
- sourcePositionFor(sourceId, scriptOffset) {
+ sourcePositionFor(sourceId: number, scriptOffset: number) {
if (!this.sources[sourceId]) {
return null;
}
const list = this.sources[sourceId].sourcePositions;
for (let i = 0; i < list.length; i++) {
- const sourcePosition = list[i]
+ const sourcePosition = list[i];
const position = sourcePosition.scriptOffset;
const nextPosition = list[Math.min(i + 1, list.length - 1)].scriptOffset;
if ((position <= scriptOffset && scriptOffset < nextPosition)) {
@@ -266,12 +290,11 @@ export class SourceResolver {
return null;
}
- sourcePositionsInRange(sourceId, start, end) {
+ sourcePositionsInRange(sourceId: number, start: number, end: number) {
if (!this.sources[sourceId]) return [];
const res = [];
const list = this.sources[sourceId].sourcePositions;
- for (let i = 0; i < list.length; i++) {
- const sourcePosition = list[i]
+ for (const sourcePosition of list) {
if (start <= sourcePosition.scriptOffset && sourcePosition.scriptOffset < end) {
res.push(sourcePosition);
}
@@ -279,15 +302,14 @@ export class SourceResolver {
return res;
}
- getInlineStack(sourcePosition) {
- if (!sourcePosition) {
- return [];
- }
- let inliningStack = [];
+ getInlineStack(sourcePosition?: SourcePosition) {
+ if (!sourcePosition) return [];
+
+ const inliningStack = [];
let cur = sourcePosition;
while (cur && cur.inliningId != -1) {
inliningStack.push(cur);
- let inlining = this.inlinings[cur.inliningId];
+ const inlining = this.inlinings[cur.inliningId];
if (!inlining) {
break;
}
@@ -299,19 +321,25 @@ export class SourceResolver {
return inliningStack;
}
- recordOrigins(phase) {
+ recordOrigins(phase: GraphPhase) {
if (phase.type != "graph") return;
for (const node of phase.data.nodes) {
+ phase.highestNodeId = Math.max(phase.highestNodeId, node.id);
if (node.origin != undefined &&
node.origin.bytecodePosition != undefined) {
const position = { bytecodePosition: node.origin.bytecodePosition };
this.nodePositionMap[node.id] = position;
- let key = sourcePositionToStringKey(position);
+ const key = sourcePositionToStringKey(position);
if (!this.positionToNodes.has(key)) {
this.positionToNodes.set(key, []);
}
const A = this.positionToNodes.get(key);
- if (!A.includes(node.id)) A.push("" + node.id);
+ if (!A.includes(node.id)) A.push(`${node.id}`);
+ }
+
+ // Backwards compatibility.
+ if (typeof node.pos === "number") {
+ node.sourcePosition = { scriptOffset: node.pos, inliningId: -1 };
}
}
}
@@ -328,13 +356,13 @@ export class SourceResolver {
}
}
- getInstruction(nodeId):[number, number] {
+ getInstruction(nodeId: number): [number, number] {
const X = this.nodeIdToInstructionRange[nodeId];
if (X === undefined) return [-1, -1];
return X;
}
- getInstructionRangeForBlock(blockId):[number, number] {
+ getInstructionRangeForBlock(blockId: number): [number, number] {
const X = this.blockIdToInstructionRange[blockId];
if (X === undefined) return [-1, -1];
return X;
@@ -346,20 +374,51 @@ export class SourceResolver {
if (!this.pcOffsetToInstructions.has(offset)) {
this.pcOffsetToInstructions.set(offset, []);
}
- this.pcOffsetToInstructions.get(offset).push(instruction);
+ this.pcOffsetToInstructions.get(offset).push(Number(instruction));
}
- console.log(this.pcOffsetToInstructions);
+ this.pcOffsets = Array.from(this.pcOffsetToInstructions.keys()).sort((a, b) => b - a);
}
hasPCOffsets() {
return this.pcOffsetToInstructions.size > 0;
}
+ getKeyPcOffset(offset: number): number {
+ if (this.pcOffsets.length === 0) return -1;
+ for (const key of this.pcOffsets) {
+ if (key <= offset) {
+ return key;
+ }
+ }
+ return -1;
+ }
+
+ instructionRangeToKeyPcOffsets([start, end]: [number, number]) {
+ if (start == end) return [this.instructionToPCOffset[start]];
+ return this.instructionToPCOffset.slice(start, end);
+ }
+
+ instructionsToKeyPcOffsets(instructionIds: Iterable<number>) {
+ const keyPcOffsets = [];
+ for (const instructionId of instructionIds) {
+ keyPcOffsets.push(this.instructionToPCOffset[instructionId]);
+ }
+ return keyPcOffsets;
+ }
+
+ nodesToKeyPcOffsets(nodes) {
+ let offsets = [];
+ for (const node of nodes) {
+ const range = this.nodeIdToInstructionRange[node];
+ if (!range) continue;
+ offsets = offsets.concat(this.instructionRangeToKeyPcOffsets(range));
+ }
+ return offsets;
+ }
- nodesForPCOffset(offset): [Array<String>, Array<String>] {
- const keys = Array.from(this.pcOffsetToInstructions.keys()).sort((a, b) => b - a);
- if (keys.length === 0) return [[],[]];
- for (const key of keys) {
+ nodesForPCOffset(offset: number): [Array<string>, Array<string>] {
+ if (this.pcOffsets.length === 0) return [[], []];
+ for (const key of this.pcOffsets) {
if (key <= offset) {
const instrs = this.pcOffsetToInstructions.get(key);
const nodes = [];
@@ -379,54 +438,82 @@ export class SourceResolver {
return [nodes, blocks];
}
}
- return [[],[]];
+ return [[], []];
}
parsePhases(phases) {
- for (const [phaseId, phase] of Object.entries<Phase>(phases)) {
- if (phase.type == 'disassembly') {
- this.disassemblyPhase = phase;
- } else if (phase.type == 'schedule') {
- this.phases.push(this.parseSchedule(phase));
- this.phaseNames.set(phase.name, this.phases.length);
- } else if (phase.type == 'sequence') {
- this.phases.push(this.parseSequence(phase));
- this.phaseNames.set(phase.name, this.phases.length);
- } else if (phase.type == 'instructions') {
- if (phase.nodeIdToInstructionRange) {
- this.readNodeIdToInstructionRange(phase.nodeIdToInstructionRange);
- }
- if (phase.blockIdtoInstructionRange) {
- this.readBlockIdToInstructionRange(phase.blockIdtoInstructionRange);
- }
- if (phase.instructionOffsetToPCOffset) {
- this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
+ const nodeLabelMap = [];
+ for (const [, phase] of Object.entries<Phase>(phases)) {
+ switch (phase.type) {
+ case 'disassembly':
+ this.disassemblyPhase = phase;
+ break;
+ case 'schedule':
+ this.phaseNames.set(phase.name, this.phases.length);
+ this.phases.push(this.parseSchedule(phase));
+ break;
+ case 'sequence':
+ this.phaseNames.set(phase.name, this.phases.length);
+ this.phases.push(this.parseSequence(phase));
+ break;
+ case 'instructions':
+ if (phase.nodeIdToInstructionRange) {
+ this.readNodeIdToInstructionRange(phase.nodeIdToInstructionRange);
+ }
+ if (phase.blockIdtoInstructionRange) {
+ this.readBlockIdToInstructionRange(phase.blockIdtoInstructionRange);
+ }
+ if (phase.instructionOffsetToPCOffset) {
+ this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
+ }
+ break;
+ case 'graph':
+ const graphPhase: GraphPhase = Object.assign(phase, { highestNodeId: 0 });
+ this.phaseNames.set(graphPhase.name, this.phases.length);
+ this.phases.push(graphPhase);
+ this.recordOrigins(graphPhase);
+ this.internNodeLabels(graphPhase, nodeLabelMap);
+ graphPhase.nodeLabelMap = nodeLabelMap.slice();
+ break;
+ default:
+ throw "Unsupported phase type";
+ }
+ }
+ }
+
+ internNodeLabels(phase: GraphPhase, nodeLabelMap: Array<NodeLabel>) {
+ for (const n of phase.data.nodes) {
+ const label = new NodeLabel(n.id, n.label, n.title, n.live,
+ n.properties, n.sourcePosition, n.origin, n.opcode, n.control,
+ n.opinfo, n.type);
+ const previous = nodeLabelMap[label.id];
+ if (!label.equals(previous)) {
+ if (previous != undefined) {
+ label.setInplaceUpdatePhase(phase.name);
}
- } else {
- this.phases.push(phase);
- this.recordOrigins(phase);
- this.phaseNames.set(phase.name, this.phases.length);
+ nodeLabelMap[label.id] = label;
}
+ n.nodeLabel = nodeLabelMap[label.id];
}
}
repairPhaseId(anyPhaseId) {
- return Math.max(0, Math.min(anyPhaseId, this.phases.length - 1))
+ return Math.max(0, Math.min(anyPhaseId | 0, this.phases.length - 1));
}
- getPhase(phaseId) {
+ getPhase(phaseId: number) {
return this.phases[phaseId];
}
- getPhaseIdByName(phaseName) {
+ getPhaseIdByName(phaseName: string) {
return this.phaseNames.get(phaseName);
}
- forEachPhase(f) {
+ forEachPhase(f: (value: Phase, index: number, array: Array<Phase>) => void) {
this.phases.forEach(f);
}
- addAnyPositionToLine(lineNumber: number | String, sourcePosition: AnyPosition) {
+ addAnyPositionToLine(lineNumber: number | string, sourcePosition: AnyPosition) {
const lineNumberString = anyToString(lineNumber);
if (!this.lineToSourcePositions.has(lineNumberString)) {
this.lineToSourcePositions.set(lineNumberString, []);
@@ -442,19 +529,19 @@ export class SourceResolver {
});
}
- linetoSourcePositions(lineNumber: number | String) {
+ linetoSourcePositions(lineNumber: number | string) {
const positions = this.lineToSourcePositions.get(anyToString(lineNumber));
if (positions === undefined) return [];
return positions;
}
parseSchedule(phase) {
- function createNode(state, match) {
+ function createNode(state: any, match) {
let inputs = [];
if (match.groups.args) {
const nodeIdsString = match.groups.args.replace(/\s/g, '');
const nodeIdStrings = nodeIdsString.split(',');
- inputs = nodeIdStrings.map((n) => Number.parseInt(n, 10));
+ inputs = nodeIdStrings.map(n => Number.parseInt(n, 10));
}
const node = {
id: Number.parseInt(match.groups.id, 10),
@@ -464,7 +551,7 @@ export class SourceResolver {
if (match.groups.blocks) {
const nodeIdsString = match.groups.blocks.replace(/\s/g, '').replace(/B/g, '');
const nodeIdStrings = nodeIdsString.split(',');
- const successors = nodeIdStrings.map((n) => Number.parseInt(n, 10));
+ const successors = nodeIdStrings.map(n => Number.parseInt(n, 10));
state.currentBlock.succ = successors;
}
state.nodes[node.id] = node;
@@ -475,7 +562,7 @@ export class SourceResolver {
if (match.groups.in) {
const blockIdsString = match.groups.in.replace(/\s/g, '').replace(/B/g, '');
const blockIdStrings = blockIdsString.split(',');
- predecessors = blockIdStrings.map((n) => Number.parseInt(n, 10));
+ predecessors = blockIdStrings.map(n => Number.parseInt(n, 10));
}
const block = {
id: Number.parseInt(match.groups.id, 10),
diff --git a/deps/v8/tools/turbolizer/src/tabs.ts b/deps/v8/tools/turbolizer/src/tabs.ts
new file mode 100644
index 0000000000..0416b9ed9d
--- /dev/null
+++ b/deps/v8/tools/turbolizer/src/tabs.ts
@@ -0,0 +1,114 @@
+
+export class Tabs {
+ private container: HTMLElement;
+ private tabBar: HTMLElement;
+ private nextTabId: number;
+
+ private mkTabBar(container: HTMLElement) {
+ container.classList.add("nav-tabs-container");
+ this.tabBar = document.createElement("ul");
+ this.tabBar.id = `tab-bar-${container.id}`;
+ this.tabBar.className = "nav-tabs";
+ this.tabBar.ondrop = this.tabBarOnDrop.bind(this);
+ this.tabBar.ondragover = this.tabBarOnDragover.bind(this);
+ this.tabBar.onclick = this.tabBarOnClick.bind(this);
+
+ const defaultDiv = document.createElement("div");
+ defaultDiv.className = "tab-content tab-default";
+ defaultDiv.id = `tab-content-${container.id}-default`;
+ container.insertBefore(defaultDiv, container.firstChild);
+ container.insertBefore(this.tabBar, container.firstChild);
+ }
+
+ constructor(container: HTMLElement) {
+ this.container = container;
+ this.nextTabId = 0;
+ this.mkTabBar(container);
+ }
+
+ activateTab(tab: HTMLLIElement) {
+ if (typeof tab.dataset.divid !== "string") return;
+ for (const li of this.tabBar.querySelectorAll<HTMLLIElement>("li.active")) {
+ li.classList.remove("active");
+ this.showTab(li, false);
+ }
+ tab.classList.add("active");
+ this.showTab(tab, true);
+ }
+
+ clearTabsAndContent() {
+ for (const tab of this.tabBar.querySelectorAll(".nav-tabs > li")) {
+ if (!(tab instanceof HTMLLIElement)) continue;
+ if (tab.classList.contains("persistent-tab")) continue;
+ const tabDiv = document.getElementById(tab.dataset.divid);
+ tabDiv.parentNode.removeChild(tabDiv);
+ tab.parentNode.removeChild(tab);
+ }
+ }
+
+ private showTab(li: HTMLElement, show: boolean = true) {
+ const tabDiv = document.getElementById(li.dataset.divid);
+ tabDiv.style.display = show ? "block" : "none";
+ }
+
+ public addTab(caption: string): HTMLLIElement {
+ const newTab = document.createElement("li");
+ newTab.innerHTML = caption;
+ newTab.id = `tab-header-${this.container.id}-${this.nextTabId++}`;
+ const lastTab = this.tabBar.querySelector("li.last-tab");
+ this.tabBar.insertBefore(newTab, lastTab);
+ return newTab;
+ }
+
+ public addTabAndContent(caption: string): [HTMLLIElement, HTMLDivElement] {
+ const contentDiv = document.createElement("div");
+ contentDiv.className = "tab-content tab-default";
+ contentDiv.id = `tab-content-${this.container.id}-${this.nextTabId++}`;
+ contentDiv.style.display = "none";
+ this.container.appendChild(contentDiv);
+
+ const newTab = this.addTab(caption);
+ newTab.dataset.divid = contentDiv.id;
+ newTab.draggable = true;
+ newTab.ondragstart = this.tabOnDragStart.bind(this);
+ const lastTab = this.tabBar.querySelector("li.last-tab");
+ this.tabBar.insertBefore(newTab, lastTab);
+ return [newTab, contentDiv];
+ }
+
+ private moveTabDiv(tab: HTMLLIElement) {
+ const tabDiv = document.getElementById(tab.dataset.divid);
+ tabDiv.style.display = "none";
+ tab.classList.remove("active");
+ this.tabBar.parentNode.appendChild(tabDiv);
+ }
+
+ private tabBarOnDrop(e: DragEvent) {
+ if (!(e.target instanceof HTMLElement)) return;
+ e.preventDefault();
+ const tabId = e.dataTransfer.getData("text");
+ const tab = document.getElementById(tabId) as HTMLLIElement;
+ if (tab.parentNode != this.tabBar) {
+ this.moveTabDiv(tab);
+ }
+ const dropTab =
+ e.target.parentNode == this.tabBar
+ ? e.target : this.tabBar.querySelector("li.last-tab");
+ this.tabBar.insertBefore(tab, dropTab);
+ this.activateTab(tab);
+ }
+
+ private tabBarOnDragover(e) {
+ e.preventDefault();
+ }
+
+ private tabOnDragStart(e: DragEvent) {
+ if (!(e.target instanceof HTMLElement)) return;
+ e.dataTransfer.setData("text", e.target.id);
+ }
+
+ private tabBarOnClick(e: MouseEvent) {
+ const li = e.target as HTMLLIElement;
+ this.activateTab(li);
+ }
+}
diff --git a/deps/v8/tools/turbolizer/src/text-view.ts b/deps/v8/tools/turbolizer/src/text-view.ts
index a88d31d194..41a06eae77 100644
--- a/deps/v8/tools/turbolizer/src/text-view.ts
+++ b/deps/v8/tools/turbolizer/src/text-view.ts
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {View} from "./view.js"
-import {anyToString, ViewElements, isIterable} from "./util.js"
-import {MySelection} from "./selection.js"
-
-export abstract class TextView extends View {
+import { PhaseView } from "../src/view";
+import { anyToString, ViewElements, isIterable } from "../src/util";
+import { MySelection } from "../src/selection";
+import { SourceResolver } from "./source-resolver";
+import { SelectionBroker } from "./selection-broker";
+import { NodeSelectionHandler, BlockSelectionHandler } from "./selection-handler";
+
+export abstract class TextView extends PhaseView {
selectionHandler: NodeSelectionHandler;
blockSelectionHandler: BlockSelectionHandler;
- nodeSelectionHandler: NodeSelectionHandler;
selection: MySelection;
blockSelection: MySelection;
textListNode: HTMLUListElement;
@@ -18,18 +20,22 @@ export abstract class TextView extends View {
blockIdtoNodeIds: Map<string, Array<string>>;
nodeIdToBlockId: Array<string>;
patterns: any;
+ sourceResolver: SourceResolver;
+ broker: SelectionBroker;
- constructor(id, broker, patterns) {
+ constructor(id, broker) {
super(id);
- let view = this;
+ const view = this;
view.textListNode = view.divNode.getElementsByTagName('ul')[0];
- view.patterns = patterns;
+ view.patterns = null;
view.nodeIdToHtmlElementsMap = new Map();
view.blockIdToHtmlElementsMap = new Map();
view.blockIdtoNodeIds = new Map();
view.nodeIdToBlockId = [];
view.selection = new MySelection(anyToString);
view.blockSelection = new MySelection(anyToString);
+ view.broker = broker;
+ view.sourceResolver = broker.sourceResolver;
const selectionHandler = {
clear: function () {
view.selection.clear();
@@ -53,11 +59,12 @@ export abstract class TextView extends View {
};
this.selectionHandler = selectionHandler;
broker.addNodeHandler(selectionHandler);
- view.divNode.onmouseup = function (e) {
+ view.divNode.addEventListener('click', e => {
if (!e.shiftKey) {
view.selectionHandler.clear();
}
- }
+ e.stopPropagation();
+ });
const blockSelectionHandler = {
clear: function () {
view.blockSelection.clear();
@@ -129,6 +136,10 @@ export abstract class TextView extends View {
element.classList.toggle("selected", isSelected);
}
}
+ const elementsToSelect = view.divNode.querySelectorAll(`[data-pc-offset]`);
+ for (const el of elementsToSelect) {
+ el.classList.toggle("selected", false);
+ }
for (const key of this.nodeIdToHtmlElementsMap.keys()) {
for (const element of this.nodeIdToHtmlElementsMap.get(key)) {
element.classList.toggle("selected", false);
@@ -146,82 +157,47 @@ export abstract class TextView extends View {
}
setPatterns(patterns) {
- let view = this;
- view.patterns = patterns;
+ this.patterns = patterns;
}
clearText() {
- let view = this;
- while (view.textListNode.firstChild) {
- view.textListNode.removeChild(view.textListNode.firstChild);
+ while (this.textListNode.firstChild) {
+ this.textListNode.removeChild(this.textListNode.firstChild);
}
}
createFragment(text, style) {
- let view = this;
- let fragment = document.createElement("SPAN");
-
- if (style.blockId != undefined) {
- const blockId = style.blockId(text);
- if (blockId != undefined) {
- fragment.blockId = blockId;
- this.addHtmlElementForBlockId(blockId, fragment);
- }
- }
-
- if (typeof style.link == 'function') {
- fragment.classList.add('linkable-text');
- fragment.onmouseup = function (e) {
- e.stopPropagation();
- style.link(text)
- };
- }
-
- if (typeof style.nodeId == 'function') {
- const nodeId = style.nodeId(text);
- if (nodeId != undefined) {
- fragment.nodeId = nodeId;
- this.addHtmlElementForNodeId(nodeId, fragment);
- }
- }
-
- if (typeof style.assignBlockId === 'function') {
- fragment.blockId = style.assignBlockId();
- this.addNodeIdToBlockId(fragment.nodeId, fragment.blockId);
- }
-
- if (typeof style.linkHandler == 'function') {
- const handler = style.linkHandler(text, fragment)
- if (handler !== undefined) {
- fragment.classList.add('linkable-text');
- fragment.onmouseup = handler;
+ const fragment = document.createElement("SPAN");
+
+ if (typeof style.associateData == 'function') {
+ style.associateData(text, fragment);
+ } else {
+ if (style.css != undefined) {
+ const css = isIterable(style.css) ? style.css : [style.css];
+ for (const cls of css) {
+ fragment.classList.add(cls);
+ }
}
+ fragment.innerText = text;
}
- if (style.css != undefined) {
- const css = isIterable(style.css) ? style.css : [style.css];
- for (const cls of css) {
- fragment.classList.add(cls);
- }
- }
- fragment.innerHTML = text;
return fragment;
}
processLine(line) {
- let view = this;
- let result = [];
+ const view = this;
+ const result = [];
let patternSet = 0;
while (true) {
- let beforeLine = line;
- for (let pattern of view.patterns[patternSet]) {
- let matches = line.match(pattern[0]);
+ const beforeLine = line;
+ for (const pattern of view.patterns[patternSet]) {
+ const matches = line.match(pattern[0]);
if (matches != null) {
if (matches[0] != '') {
- let style = pattern[1] != null ? pattern[1] : {};
- let text = matches[0];
+ const style = pattern[1] != null ? pattern[1] : {};
+ const text = matches[0];
if (text != '') {
- let fragment = view.createFragment(matches[0], style);
+ const fragment = view.createFragment(matches[0], style);
result.push(fragment);
}
line = line.substr(matches[0].length);
@@ -247,15 +223,15 @@ export abstract class TextView extends View {
}
processText(text) {
- let view = this;
- let textLines = text.split(/[\n]/);
+ const view = this;
+ const textLines = text.split(/[\n]/);
let lineNo = 0;
- for (let line of textLines) {
- let li = document.createElement("LI");
+ for (const line of textLines) {
+ const li = document.createElement("LI");
li.className = "nolinenums";
li.dataset.lineNo = "" + lineNo++;
- let fragments = view.processLine(line);
- for (let fragment of fragments) {
+ const fragments = view.processLine(line);
+ for (const fragment of fragments) {
li.appendChild(fragment);
}
view.textListNode.appendChild(li);
@@ -263,13 +239,12 @@ export abstract class TextView extends View {
}
initializeContent(data, rememberedSelection) {
- let view = this;
- view.clearText();
- view.processText(data);
+ this.clearText();
+ this.processText(data);
+ this.show();
}
- deleteContent() {
- }
+ public onresize(): void {}
isScrollable() {
return true;
diff --git a/deps/v8/tools/turbolizer/src/turbo-visualizer.ts b/deps/v8/tools/turbolizer/src/turbo-visualizer.ts
index 4cab92a197..87924b7b96 100644
--- a/deps/v8/tools/turbolizer/src/turbo-visualizer.ts
+++ b/deps/v8/tools/turbolizer/src/turbo-visualizer.ts
@@ -2,220 +2,50 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import * as C from "./constants.js"
-import {SourceResolver} from "./source-resolver.js"
-import {SelectionBroker} from "./selection-broker.js"
-import {DisassemblyView} from "./disassembly-view.js"
-import {GraphMultiView} from "./graphmultiview.js"
-import {CodeMode, CodeView} from "./code-view.js"
-import * as d3 from "d3"
-
-class Snapper {
- resizer: Resizer;
- sourceExpand: HTMLElement;
- sourceCollapse: HTMLElement;
- disassemblyExpand: HTMLElement;
- disassemblyCollapse: HTMLElement;
-
- constructor(resizer: Resizer) {
- const snapper = this;
- snapper.resizer = resizer;
- snapper.sourceExpand = document.getElementById(C.SOURCE_EXPAND_ID);
- snapper.sourceCollapse = document.getElementById(C.SOURCE_COLLAPSE_ID);
- snapper.disassemblyExpand = document.getElementById(C.DISASSEMBLY_EXPAND_ID);
- snapper.disassemblyCollapse = document.getElementById(C.DISASSEMBLY_COLLAPSE_ID);
-
- document.getElementById("source-collapse").addEventListener("click", function () {
- resizer.snapper.toggleSourceExpanded();
- });
- document.getElementById("disassembly-collapse").addEventListener("click", function () {
- resizer.snapper.toggleDisassemblyExpanded();
- });
- }
-
- getLastExpandedState(type, default_state) {
- var state = window.sessionStorage.getItem("expandedState-" + type);
- if (state === null) return default_state;
- return state === 'true';
- }
-
- setLastExpandedState(type, state) {
- window.sessionStorage.setItem("expandedState-" + type, state);
- }
-
- toggleSourceExpanded(): void {
- this.setSourceExpanded(!this.sourceExpand.classList.contains("invisible"));
- }
-
- sourceExpandUpdate(newState: boolean) {
- this.setLastExpandedState("source", newState);
- this.sourceExpand.classList.toggle("invisible", newState);
- this.sourceCollapse.classList.toggle("invisible", !newState);
- }
-
- setSourceExpanded(newState) {
- if (this.sourceExpand.classList.contains("invisible") === newState) return;
- this.sourceExpandUpdate(newState);
- let resizer = this.resizer;
- if (newState) {
- resizer.sep_left = resizer.sep_left_snap;
- resizer.sep_left_snap = 0;
- } else {
- resizer.sep_left_snap = resizer.sep_left;
- resizer.sep_left = 0;
- }
- resizer.updatePanes();
- }
-
- toggleDisassemblyExpanded() {
- this.setDisassemblyExpanded(!this.disassemblyExpand.classList.contains("invisible"));
- }
-
- disassemblyExpandUpdate(newState) {
- this.setLastExpandedState("disassembly", newState);
- this.disassemblyExpand.classList.toggle("invisible", newState);
- this.disassemblyCollapse.classList.toggle("invisible", !newState);
- }
-
- setDisassemblyExpanded(newState) {
- if (this.disassemblyExpand.classList.contains("invisible") === newState) return;
- this.disassemblyExpandUpdate(newState);
- let resizer = this.resizer;
- if (newState) {
- resizer.sep_right = resizer.sep_right_snap;
- resizer.sep_right_snap = resizer.client_width;
- } else {
- resizer.sep_right_snap = resizer.sep_right;
- resizer.sep_right = resizer.client_width;
- }
- resizer.updatePanes();
- }
-
- panesUpated() {
- this.sourceExpandUpdate(this.resizer.sep_left > this.resizer.dead_width);
- this.disassemblyExpandUpdate(this.resizer.sep_right <
- (this.resizer.client_width - this.resizer.dead_width));
- }
-}
-
-class Resizer {
- snapper: Snapper;
- dead_width: number;
- client_width: number;
- left: HTMLElement;
- right: HTMLElement;
- middle: HTMLElement;
- sep_left: number;
- sep_right: number;
- sep_left_snap: number;
- sep_right_snap: number;
- sep_width_offset: number;
- panes_updated_callback: () => void;
- resizer_right: d3.Selection<HTMLDivElement, any, any, any>;
- resizer_left: d3.Selection<HTMLDivElement, any, any, any>;
-
- constructor(panes_updated_callback: () => void, dead_width: number) {
- let resizer = this;
- resizer.snapper = new Snapper(resizer)
- resizer.panes_updated_callback = panes_updated_callback;
- resizer.dead_width = dead_width
- resizer.client_width = document.body.getBoundingClientRect().width;
- resizer.left = document.getElementById(C.SOURCE_PANE_ID);
- resizer.middle = document.getElementById(C.INTERMEDIATE_PANE_ID);
- resizer.right = document.getElementById(C.GENERATED_PANE_ID);
- resizer.resizer_left = d3.select('.resizer-left');
- resizer.resizer_right = d3.select('.resizer-right');
- resizer.sep_left = resizer.client_width / 3;
- resizer.sep_right = resizer.client_width / 3 * 2;
- resizer.sep_left_snap = 0;
- resizer.sep_right_snap = 0;
- // Offset to prevent resizers from sliding slightly over one another.
- resizer.sep_width_offset = 7;
-
- let dragResizeLeft = d3.drag()
- .on('drag', function () {
- let x = d3.mouse(this.parentElement)[0];
- resizer.sep_left = Math.min(Math.max(0, x), resizer.sep_right - resizer.sep_width_offset);
- resizer.updatePanes();
- })
- .on('start', function () {
- resizer.resizer_left.classed("dragged", true);
- let x = d3.mouse(this.parentElement)[0];
- if (x > dead_width) {
- resizer.sep_left_snap = resizer.sep_left;
- }
- })
- .on('end', function () {
- resizer.resizer_left.classed("dragged", false);
- });
- resizer.resizer_left.call(dragResizeLeft);
-
- let dragResizeRight = d3.drag()
- .on('drag', function () {
- let x = d3.mouse(this.parentElement)[0];
- resizer.sep_right = Math.max(resizer.sep_left + resizer.sep_width_offset, Math.min(x, resizer.client_width));
- resizer.updatePanes();
- })
- .on('start', function () {
- resizer.resizer_right.classed("dragged", true);
- let x = d3.mouse(this.parentElement)[0];
- if (x < (resizer.client_width - dead_width)) {
- resizer.sep_right_snap = resizer.sep_right;
- }
- })
- .on('end', function () {
- resizer.resizer_right.classed("dragged", false);
- });;
- resizer.resizer_right.call(dragResizeRight);
- window.onresize = function () {
- resizer.updateWidths();
- resizer.updatePanes();
- };
- }
-
- updatePanes() {
- let left_snapped = this.sep_left === 0;
- let right_snapped = this.sep_right >= this.client_width - 1;
- this.resizer_left.classed("snapped", left_snapped);
- this.resizer_right.classed("snapped", right_snapped);
- this.left.style.width = this.sep_left + 'px';
- this.middle.style.width = (this.sep_right - this.sep_left) + 'px';
- this.right.style.width = (this.client_width - this.sep_right) + 'px';
- this.resizer_left.style('left', this.sep_left + 'px');
- this.resizer_right.style('right', (this.client_width - this.sep_right - 1) + 'px');
-
- this.snapper.panesUpated();
- this.panes_updated_callback();
- }
-
- updateWidths() {
- this.client_width = document.body.getBoundingClientRect().width;
- this.sep_right = Math.min(this.sep_right, this.client_width);
- this.sep_left = Math.min(Math.max(0, this.sep_left), this.sep_right);
- }
-}
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { DisassemblyView } from "../src/disassembly-view";
+import { GraphMultiView } from "../src/graphmultiview";
+import { CodeMode, CodeView } from "../src/code-view";
+import { Tabs } from "../src/tabs";
+import { Resizer } from "../src/resizer";
+import * as C from "../src/constants";
+import { InfoView } from "./info-view";
window.onload = function () {
- var svg = null;
- var multiview = null;
- var disassemblyView = null;
- var sourceViews = [];
- var selectionBroker = null;
- var sourceResolver = null;
- let resizer = new Resizer(panesUpdatedCallback, 100);
+ let multiview: GraphMultiView = null;
+ let disassemblyView: DisassemblyView = null;
+ let sourceViews: Array<CodeView> = [];
+ let selectionBroker: SelectionBroker = null;
+ let sourceResolver: SourceResolver = null;
+ const resizer = new Resizer(panesUpdatedCallback, 100);
+ const sourceTabsContainer = document.getElementById(C.SOURCE_PANE_ID);
+ const sourceTabs = new Tabs(sourceTabsContainer);
+ sourceTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+ const disassemblyTabsContainer = document.getElementById(C.GENERATED_PANE_ID);
+ const disassemblyTabs = new Tabs(disassemblyTabsContainer);
+ disassemblyTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+ const [infoTab, infoContainer] = sourceTabs.addTabAndContent("Info");
+ infoTab.classList.add("persistent-tab");
+ infoContainer.classList.add("viewpane", "scrollable");
+ const infoView = new InfoView(infoContainer);
+ infoView.show();
+ sourceTabs.activateTab(infoTab);
function panesUpdatedCallback() {
if (multiview) multiview.onresize();
}
- function loadFile(txtRes) {
+ function loadFile(txtRes: string) {
+ sourceTabs.clearTabsAndContent();
+ disassemblyTabs.clearTabsAndContent();
// If the JSON isn't properly terminated, assume compiler crashed and
// add best-guess empty termination
if (txtRes[txtRes.length - 2] == ',') {
txtRes += '{"name":"disassembly","type":"disassembly","data":""}]}';
}
try {
- sourceViews.forEach((sv) => sv.hide());
+ sourceViews.forEach(sv => sv.hide());
if (multiview) multiview.hide();
multiview = null;
if (disassemblyView) disassemblyView.hide();
@@ -225,9 +55,9 @@ window.onload = function () {
const jsonObj = JSON.parse(txtRes);
- let fnc = jsonObj.function;
+ let fnc = null;
// Backwards compatibility.
- if (typeof fnc == 'string') {
+ if (typeof jsonObj.function == 'string') {
fnc = {
functionName: fnc,
sourceId: -1,
@@ -236,33 +66,42 @@ window.onload = function () {
sourceText: jsonObj.source,
backwardsCompatibility: true
};
+ } else {
+ fnc = Object.assign(jsonObj.function, { backwardsCompatibility: false });
}
sourceResolver.setInlinings(jsonObj.inlinings);
sourceResolver.setSourceLineToBytecodePosition(jsonObj.sourceLineToBytecodePosition);
- sourceResolver.setSources(jsonObj.sources, fnc)
+ sourceResolver.setSources(jsonObj.sources, fnc);
sourceResolver.setNodePositionMap(jsonObj.nodePositions);
sourceResolver.parsePhases(jsonObj.phases);
- let sourceView = new CodeView(C.SOURCE_PANE_ID, selectionBroker, sourceResolver, fnc, CodeMode.MAIN_SOURCE);
- sourceView.show(null, null);
+ const [sourceTab, sourceContainer] = sourceTabs.addTabAndContent("Source");
+ sourceContainer.classList.add("viewpane", "scrollable");
+ sourceTabs.activateTab(sourceTab);
+ const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, fnc, CodeMode.MAIN_SOURCE);
+ sourceView.show();
sourceViews.push(sourceView);
- sourceResolver.forEachSource((source) => {
- let sourceView = new CodeView(C.SOURCE_PANE_ID, selectionBroker, sourceResolver, source, CodeMode.INLINED_SOURCE);
- sourceView.show(null, null);
+ sourceResolver.forEachSource(source => {
+ const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, source, CodeMode.INLINED_SOURCE);
+ sourceView.show();
sourceViews.push(sourceView);
});
- disassemblyView = new DisassemblyView(C.GENERATED_PANE_ID, selectionBroker);
+ const [disassemblyTab, disassemblyContainer] = disassemblyTabs.addTabAndContent("Disassembly");
+ disassemblyContainer.classList.add("viewpane", "scrollable");
+ disassemblyTabs.activateTab(disassemblyTab);
+ disassemblyView = new DisassemblyView(disassemblyContainer, selectionBroker);
disassemblyView.initializeCode(fnc.sourceText);
if (sourceResolver.disassemblyPhase) {
disassemblyView.initializePerfProfile(jsonObj.eventCounts);
- disassemblyView.show(sourceResolver.disassemblyPhase.data, null);
+ disassemblyView.showContent(sourceResolver.disassemblyPhase.data);
+ disassemblyView.show();
}
multiview = new GraphMultiView(C.INTERMEDIATE_PANE_ID, selectionBroker, sourceResolver);
- multiview.show(jsonObj);
+ multiview.show();
} catch (err) {
if (window.confirm("Error: Exception during load of TurboFan JSON file:\n" +
"error: " + err.message + "\nDo you want to clear session storage?")) {
@@ -276,26 +115,34 @@ window.onload = function () {
// The <input> form #upload-helper with type file can't be a picture.
// We hence keep it hidden, and forward the click from the picture
// button #upload.
- d3.select("#upload").on("click",
- () => document.getElementById("upload-helper").click());
- d3.select("#upload-helper").on("change", function (this: HTMLInputElement) {
- var uploadFile = this.files && this.files[0];
- var filereader = new FileReader();
- filereader.onload = function (e) {
- var txtRes = e.target.result;
- loadFile(txtRes);
- };
- if (uploadFile)
- filereader.readAsText(uploadFile);
+ document.getElementById("upload").addEventListener("click", e => {
+ document.getElementById("upload-helper").click();
+ e.stopPropagation();
+ });
+ document.getElementById("upload-helper").addEventListener("change",
+ function (this: HTMLInputElement) {
+ const uploadFile = this.files && this.files[0];
+ if (uploadFile) {
+ const filereader = new FileReader();
+ filereader.onload = () => {
+ const txtRes = filereader.result;
+ if (typeof txtRes == 'string') {
+ loadFile(txtRes);
+ }
+ };
+ filereader.readAsText(uploadFile);
+ }
+ }
+ );
+ window.addEventListener("keydown", (e: KeyboardEvent) => {
+ if (e.keyCode == 76 && e.ctrlKey) { // CTRL + L
+ document.getElementById("upload-helper").click();
+ e.stopPropagation();
+ e.preventDefault();
+ }
});
}
initializeUploadHandlers();
-
-
- resizer.snapper.setSourceExpanded(resizer.snapper.getLastExpandedState("source", true));
- resizer.snapper.setDisassemblyExpanded(resizer.snapper.getLastExpandedState("disassembly", false));
-
resizer.updatePanes();
-
};
diff --git a/deps/v8/tools/turbolizer/src/util.ts b/deps/v8/tools/turbolizer/src/util.ts
index ef877e508c..d9c8dcdce0 100644
--- a/deps/v8/tools/turbolizer/src/util.ts
+++ b/deps/v8/tools/turbolizer/src/util.ts
@@ -32,7 +32,7 @@ export class ViewElements {
if (!doConsider) return;
const newScrollTop = computeScrollTop(this.container, element);
if (isNaN(newScrollTop)) {
- console.log("NOO")
+ console.log("NOO");
}
if (this.scrollTop === undefined) {
this.scrollTop = newScrollTop;
@@ -47,50 +47,11 @@ export class ViewElements {
}
}
-
-function lowerBound(a, value, compare, lookup) {
- let first = 0;
- let count = a.length;
- while (count > 0) {
- let step = Math.floor(count / 2);
- let middle = first + step;
- let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle);
- let result = (compare === undefined) ? (middle_value < value) : compare(middle_value, value);
- if (result) {
- first = middle + 1;
- count -= step + 1;
- } else {
- count = step;
- }
- }
- return first;
-}
-
-
-function upperBound(a, value, compare, lookup) {
- let first = 0;
- let count = a.length;
- while (count > 0) {
- let step = Math.floor(count / 2);
- let middle = first + step;
- let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle);
- let result = (compare === undefined) ? (value < middle_value) : compare(value, middle_value);
- if (!result) {
- first = middle + 1;
- count -= step + 1;
- } else {
- count = step;
- }
- }
- return first;
-}
-
-
export function sortUnique<T>(arr: Array<T>, f: (a: T, b: T) => number, equal: (a: T, b: T) => boolean) {
if (arr.length == 0) return arr;
arr = arr.sort(f);
- let ret = [arr[0]];
- for (var i = 1; i < arr.length; i++) {
+ const ret = [arr[0]];
+ for (let i = 1; i < arr.length; i++) {
if (!equal(arr[i - 1], arr[i])) {
ret.push(arr[i]);
}
@@ -99,11 +60,10 @@ export function sortUnique<T>(arr: Array<T>, f: (a: T, b: T) => number, equal: (
}
// Partial application without binding the receiver
-export function partial(f, ...arguments1) {
- return function (...arguments2) {
- var arguments2 = Array.from(arguments);
+export function partial(f: any, ...arguments1: Array<any>) {
+ return function (this: any, ...arguments2: Array<any>) {
f.apply(this, [...arguments1, ...arguments2]);
- }
+ };
}
export function isIterable(obj: any): obj is Iterable<any> {
@@ -111,6 +71,23 @@ export function isIterable(obj: any): obj is Iterable<any> {
&& typeof obj != 'string' && typeof obj[Symbol.iterator] === 'function';
}
-export function alignUp(raw:number, multiple:number):number {
+export function alignUp(raw: number, multiple: number): number {
return Math.floor((raw + multiple - 1) / multiple) * multiple;
}
+
+export function measureText(text: string) {
+ const textMeasure = document.getElementById('text-measure');
+ if (textMeasure instanceof SVGTSpanElement) {
+ textMeasure.textContent = text;
+ return {
+ width: textMeasure.getBBox().width,
+ height: textMeasure.getBBox().height,
+ };
+ }
+ return { width: 0, height: 0 };
+}
+
+// Interpolate between the given start and end values by a fraction of val/max.
+export function interpolate(val: number, max: number, start: number, end: number) {
+ return start + (end - start) * (val / max);
+}
diff --git a/deps/v8/tools/turbolizer/src/view.ts b/deps/v8/tools/turbolizer/src/view.ts
index dbb8514fe2..a8bb125fc4 100644
--- a/deps/v8/tools/turbolizer/src/view.ts
+++ b/deps/v8/tools/turbolizer/src/view.ts
@@ -3,34 +3,35 @@
// found in the LICENSE file.
export abstract class View {
- container: HTMLElement;
- divNode: HTMLElement;
- abstract initializeContent(data: any, rememberedSelection: Selection): void;
- abstract createViewElement(): HTMLElement;
- abstract deleteContent(): void;
- abstract detachSelection(): Set<string>;
+ protected container: HTMLElement;
+ protected divNode: HTMLElement;
+ protected abstract createViewElement(): HTMLElement;
- constructor(id) {
- this.container = document.getElementById(id);
+ constructor(idOrContainer: string | HTMLElement) {
+ this.container = typeof idOrContainer == "string" ? document.getElementById(idOrContainer) : idOrContainer;
this.divNode = this.createViewElement();
}
- isScrollable(): boolean {
- return false;
- }
-
- show(data, rememberedSelection): void {
+ public show(): void {
this.container.appendChild(this.divNode);
- this.initializeContent(data, rememberedSelection);
}
- hide(): void {
- this.deleteContent();
+ public hide(): void {
this.container.removeChild(this.divNode);
}
}
-export interface PhaseView {
- onresize();
- searchInputAction(searchInput: HTMLInputElement, e: Event);
+export abstract class PhaseView extends View {
+ public abstract initializeContent(data: any, rememberedSelection: Set<any>): void;
+ public abstract detachSelection(): Set<string>;
+ public abstract onresize(): void;
+ public abstract searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void;
+
+ constructor(idOrContainer: string | HTMLElement) {
+ super(idOrContainer);
+ }
+
+ public isScrollable(): boolean {
+ return false;
+ }
}
diff --git a/deps/v8/tools/turbolizer/tabs.css b/deps/v8/tools/turbolizer/tabs.css
new file mode 100644
index 0000000000..54dba72b70
--- /dev/null
+++ b/deps/v8/tools/turbolizer/tabs.css
@@ -0,0 +1,55 @@
+.content {
+ display: grid;
+ grid-template-areas: "tabs" "window";
+ grid-template-columns: 1fr;
+ grid-template-rows: auto 1fr;
+ min-height: calc(100vh);
+}
+
+.nav-tabs-container {
+ grid-area: tabs;
+ padding: 0px;
+ background-color: #999999;
+ border-bottom: 4px solid #CCCCCC;
+}
+
+.tab-content {
+ grid-area: window;
+ background-color: white;
+ padding: 0px;
+ display:none;
+}
+
+.tab-content.tab-default {
+ display: block;
+}
+
+ul.nav-tabs {
+ padding: 0px;
+ margin: 0px;
+ overflow: auto;
+ display: table-row;
+ min-height: 2ex;
+}
+
+.nav-tabs li {
+ display: inline-block;
+ padding-left: 10px;
+ padding-right: 10px;
+ padding-top: 4px;
+ padding-bottom: 4px;
+ min-width: 20px;
+ text-decoration: none;
+ color: black;
+ text-align: center;
+ user-select: none;
+ cursor: pointer;
+}
+
+.nav-tabs li:hover {
+ background-color: #EEEEEE;
+}
+
+.nav-tabs li.active {
+ background-color: #CCCCCC;
+} \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/test/source-resolver-test.ts b/deps/v8/tools/turbolizer/test/source-resolver-test.ts
new file mode 100644
index 0000000000..38d674510f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/test/source-resolver-test.ts
@@ -0,0 +1,10 @@
+import { SourceResolver } from '../src/source-resolver';
+import { expect } from 'chai';
+import { describe, it } from 'mocha';
+
+describe('SourceResolver', () => {
+ it('should be constructible', () => {
+ const a: SourceResolver = new SourceResolver();
+ expect(a.sources.length).to.equal(0);
+ });
+});
diff --git a/deps/v8/tools/turbolizer/tsconfig.json b/deps/v8/tools/turbolizer/tsconfig.json
index c54157280f..cd036ac3ab 100644
--- a/deps/v8/tools/turbolizer/tsconfig.json
+++ b/deps/v8/tools/turbolizer/tsconfig.json
@@ -1,32 +1,39 @@
{
- "compilerOptions": {
- "outDir": "build/",
- "allowJs": false,
- "target": "es2017",
- "module": "es2015",
- "sourceMap": true,
- "experimentalDecorators": true,
- "emitDecoratorMetadata": true,
- "moduleResolution": "node"
- },
- "files": [
- "src/util.ts",
- "src/lang-disassembly.ts",
- "src/node.ts",
- "src/edge.ts",
- "src/source-resolver.ts",
- "src/selection.ts",
- "src/selection-broker.ts",
- "src/selection-handler.ts",
- "src/constants.ts",
- "src/view.ts",
- "src/text-view.ts",
- "src/code-view.ts",
- "src/graph-layout.ts",
- "src/graph-view.ts",
- "src/schedule-view.ts",
- "src/disassembly-view.ts",
- "src/graphmultiview.ts",
- "src/turbo-visualizer.ts"
- ]
+ "compilerOptions": {
+ "outDir": "build/",
+ "allowJs": false,
+ "target": "es2018",
+ "module": "es2015",
+ "sourceMap": true,
+ "experimentalDecorators": true,
+ "emitDecoratorMetadata": true,
+ "moduleResolution": "node",
+ "noUnusedLocals": true,
+ "noImplicitReturns": true,
+ "noImplicitThis": true,
+ "lib": ["dom", "es6", "dom.iterable", "scripthost", "es2018"]
+ },
+ "files": [
+ "src/util.ts",
+ "src/node.ts",
+ "src/edge.ts",
+ "src/graph.ts",
+ "src/node-label.ts",
+ "src/source-resolver.ts",
+ "src/selection.ts",
+ "src/selection-broker.ts",
+ "src/selection-handler.ts",
+ "src/constants.ts",
+ "src/view.ts",
+ "src/text-view.ts",
+ "src/code-view.ts",
+ "src/graph-layout.ts",
+ "src/graph-view.ts",
+ "src/schedule-view.ts",
+ "src/disassembly-view.ts",
+ "src/graphmultiview.ts",
+ "src/turbo-visualizer.ts",
+ "src/resizer.ts",
+ "src/info-view.ts"
+ ]
}
diff --git a/deps/v8/tools/turbolizer/tsconfig.test.json b/deps/v8/tools/turbolizer/tsconfig.test.json
new file mode 100644
index 0000000000..1b7a59159d
--- /dev/null
+++ b/deps/v8/tools/turbolizer/tsconfig.test.json
@@ -0,0 +1,6 @@
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "module": "commonjs"
+ }
+}
diff --git a/deps/v8/tools/turbolizer/tslint.json b/deps/v8/tools/turbolizer/tslint.json
new file mode 100644
index 0000000000..e07e057a62
--- /dev/null
+++ b/deps/v8/tools/turbolizer/tslint.json
@@ -0,0 +1,45 @@
+{
+ "defaultSeverity": "error",
+ "extends": "tslint:recommended",
+ "jsRules": {},
+ "rules": {
+ "curly": [true, "ignore-same-line"],
+ "quotemark": [false, "double", "avoid-escape", "avoid-template"],
+ "only-arrow-functions": [false],
+ "no-var-keyword": true,
+ "prefer-const": [true],
+ "max-line-length": [false, {
+ "limit": 80
+ }],
+ "ordered-imports": false,
+ "array-type": [true, "generic"],
+ "semicolon": true,
+ "member-access": false,
+ "object-literal-shorthand": false,
+ "object-literal-key-quotes": [true, "as-needed"],
+ "object-literal-sort-keys": false,
+ "space-before-function-paren": [true, {
+ "anonymous": "always"
+ }],
+ "triple-equals": false,
+ "no-string-throw": false,
+ "no-empty": [true, "allow-empty-catch", "allow-empty-functions"],
+ "trailing-comma": false,
+ "member-ordering": false,
+ "no-string-literal": false,
+ "arrow-parens": [true, "ban-single-arg-parens"],
+ "no-console": false,
+ "interface-name": false,
+ "no-bitwise": false,
+ "no-shadowed-variable": false,
+ "prefer-for-of": true,
+ "align": true,
+ "arrow-return-shorthand": true,
+ "max-classes-per-file": false,
+ "variable-name": true,
+ "forin": false,
+ "one-variable-per-declaration": true,
+ "no-consecutive-blank-lines": true
+ },
+ "rulesDirectory": []
+}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index c7d45a7ee2..b37dcc498b 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -1,58 +1,69 @@
.visible-transition {
- transition-delay: 0s;
- transition-duration: 1s;
- transition-property: all;
- transition-timing-function: ease;
+ transition-delay: 0s;
+ transition-duration: 1s;
+ transition-property: all;
+ transition-timing-function: ease;
}
.collapse-pane {
- background: #A0A0A0;
- bottom: 0;
- position: absolute;
- margin-bottom: 0.5em;
- margin-right: 0.5em;
- margin-left: 0.5em;
- border-radius: 5px;
- padding: 0.5em;
- z-index: 5;
- opacity: 0.7;
- cursor: pointer;
+ background: #A0A0A0;
+ bottom: 0;
+ position: absolute;
+ margin-bottom: 0.5em;
+ margin-right: 0.5em;
+ margin-left: 0.5em;
+ border-radius: 5px;
+ padding: 0.5em;
+ z-index: 20;
+ opacity: 0.7;
+ cursor: pointer;
}
.search-input {
- vertical-align: middle;
- width: 145px;
- opacity: 1;
+ vertical-align: middle;
+ width: 145px;
+ opacity: 1;
+ box-sizing: border-box;
+ height: 1.5em;
+}
+
+#phase-select {
+ box-sizing: border-box;
+ height: 1.5em;
+}
+
+#search-only-visible {
+ vertical-align: middle;
}
.button-input {
- vertical-align: middle;
- width: 24px;
- opacity: 0.4;
- cursor: pointer;
+ vertical-align: middle;
+ width: 24px;
+ opacity: 0.4;
+ cursor: pointer;
}
.button-input-toggled {
- border-radius: 5px;
- background-color: #505050;
+ border-radius: 5px;
+ background-color: #505050;
}
.button-input:focus {
- outline: none;
+ outline: none;
}
.invisible {
- display: none;
+ display: none;
}
.selected {
- background-color: #FFFF33;
+ background-color: #FFFF33;
}
.selected.block,
.selected.block-id,
.selected.schedule-block {
- background-color: #AAFFAA;
+ background-color: #AAFFAA;
}
ol.linenums {
@@ -60,120 +71,142 @@ ol.linenums {
}
.line-number {
- display:inline-block;
- min-width: 3ex;
- text-align: right;
- color: #444444;
- margin-right: 0.5ex;
- padding-right: 0.5ex;
- background: #EEEEEE;
- /* font-size: 80%; */
- user-select: none;
- height: 120%;
+ display: inline-block;
+ min-width: 3ex;
+ text-align: right;
+ color: #444444;
+ margin-right: 0.5ex;
+ padding-right: 0.5ex;
+ background: #EEEEEE;
+ /* font-size: 80%; */
+ user-select: none;
+ height: 120%;
}
.line-number:hover {
- background-color: #CCCCCC;
+ background-color: #CCCCCC;
}
-.prettyprint ol.linenums > li.selected {
- background-color: #FFFF33 !important;
+.prettyprint ol.linenums>li.selected {
+ background-color: #FFFF33 !important;
}
li.selected .line-number {
- background-color: #FFFF33;
+ background-color: #FFFF33;
}
-.prettyprint ol.linenums > li {
- list-style-type: decimal;
- display: block;
+.prettyprint ol.linenums>li {
+ list-style-type: decimal;
+ display: block;
}
.source-container {
- border-bottom: 2px solid #AAAAAA;
+ border-bottom: 2px solid #AAAAAA;
}
.code-header {
- background-color: #CCCCCC;
- padding-left: 1em;
- padding-right: 1em;
- padding-top: 1ex;
- padding-bottom: 1ex;
- font-family: monospace;
- user-select: none;
+ background-color: #CCCCCC;
+ padding-left: 1em;
+ padding-right: 1em;
+ padding-top: 1ex;
+ padding-bottom: 1ex;
+ font-family: monospace;
+ user-select: none;
}
.main-source .code-header {
- border-top: 2px solid #AAAAAA;
- font-weight: bold;
+ border-top: 2px solid #AAAAAA;
+ font-weight: bold;
}
.code-header .code-file-function {
- font-family: monospace;
- float: left;
- user-select: text;
+ font-family: monospace;
+ float: left;
+ user-select: text;
}
.code-header .code-mode {
- float: right;
- font-family: sans-serif;
- font-size: small;
+ float: right;
+ font-family: sans-serif;
+ font-size: small;
+}
+
+.info-container {
+ font-family: sans-serif;
+ font-size: small;
+}
+
+.info-topic {
+ border: 1px solid lightgray;
+ margin: 2px;
}
-html, body {
- margin: 0;
- padding: 0;
- /*height: 99vh;
+.info-topic-header {
+ background-color: lightgray;
+ padding: 1px;
+}
+
+.info-topic-content {
+ padding: 2px;
+}
+
+
+html,
+body {
+ margin: 0;
+ padding: 0;
+ /*height: 99vh;
width: 99vw;*/
- overflow: hidden;
+ overflow: hidden;
}
p {
- text-align: center;
- overflow: overlay;
- position: relative;
+ text-align: center;
+ overflow: overlay;
+ position: relative;
}
marker {
- fill: #080808;
+ fill: #080808;
}
g rect {
- fill: #F0F0F0;
- stroke: #080808;
- stroke-width: 2px;
+ fill: #F0F0F0;
+ stroke: #080808;
+ stroke-width: 2px;
}
g.dead {
- opacity: .5;
+ opacity: .5;
}
g.unsorted rect {
- opacity: 0.5;
+ opacity: 0.5;
}
div.scrollable {
- overflow-y: auto; overflow-x: hidden;
+ overflow-y: auto;
+ overflow-x: hidden;
}
g.turbonode[relToHover="input"] rect {
- stroke: #67e62c;
- stroke-width: 16px;
+ stroke: #67e62c;
+ stroke-width: 16px;
}
g.turbonode[relToHover="output"] rect {
- stroke: #d23b14;
- stroke-width: 16px;
+ stroke: #d23b14;
+ stroke-width: 16px;
}
path[relToHover="input"] {
- stroke: #67e62c;
- stroke-width: 16px;
+ stroke: #67e62c;
+ stroke-width: 16px;
}
path[relToHover="output"] {
- stroke: #d23b14;
- stroke-width: 16px;
+ stroke: #d23b14;
+ stroke-width: 16px;
}
@@ -183,82 +216,82 @@ g.turbonode:hover rect {
}
g.control rect {
- fill: #EFCC00;
- stroke: #080808;
- stroke-width: 5px;
+ fill: #EFCC00;
+ stroke: #080808;
+ stroke-width: 5px;
}
g.javascript rect {
- fill: #DD7E6B;
+ fill: #DD7E6B;
}
g.simplified rect {
- fill: #3C78D8;
+ fill: #3C78D8;
}
g.machine rect {
- fill: #6AA84F;
+ fill: #6AA84F;
}
g.input rect {
- fill: #CFE2F3;
+ fill: #CFE2F3;
}
g.selected rect {
- fill: #FFFF33;
+ fill: #FFFF33;
}
circle.bubbleStyle {
- fill: #080808;
- fill-opacity: 0.0;
- stroke: #080808;
- stroke-width: 2px;
+ fill: #080808;
+ fill-opacity: 0.0;
+ stroke: #080808;
+ stroke-width: 2px;
}
circle.bubbleStyle:hover {
- stroke-width: 3px;
+ stroke-width: 3px;
}
circle.filledBubbleStyle {
- fill: #080808;
- stroke: #080808;
- stroke-width: 2px;
+ fill: #080808;
+ stroke: #080808;
+ stroke-width: 2px;
}
circle.filledBubbleStyle:hover {
- fill: #080808;
- stroke-width: 3px;
+ fill: #080808;
+ stroke-width: 3px;
}
circle.halfFilledBubbleStyle {
- fill: #808080;
- stroke: #101010;
- stroke-width: 2px;
+ fill: #808080;
+ stroke: #101010;
+ stroke-width: 2px;
}
circle.halfFilledBubbleStyle:hover {
- fill: #808080;
- stroke-width: 3px;
+ fill: #808080;
+ stroke-width: 3px;
}
path {
- fill: none;
- stroke: #080808;
- stroke-width: 4px;
- cursor: default;
+ fill: none;
+ stroke: #080808;
+ stroke-width: 4px;
+ cursor: default;
}
path:hover {
- stroke-width: 6px;
+ stroke-width: 6px;
}
path.hidden {
- fill: none;
- stroke-width: 0;
+ fill: none;
+ stroke-width: 0;
}
path.link.selected {
- stroke: #FFFF33;
+ stroke: #FFFF33;
}
pre.prettyprint {
@@ -271,11 +304,11 @@ li.L3,
li.L5,
li.L7,
li.L9 {
- background: none !important
+ background: none !important
}
li.nolinenums {
- list-style-type:none;
+ list-style-type: none;
}
ul.noindent {
@@ -284,151 +317,142 @@ ul.noindent {
-webkit-margin-after: 0px;
}
-input:hover, .collapse-pane:hover input {
- opacity: 1;
- cursor: pointer;
+input:hover,
+.collapse-pane:hover input {
+ opacity: 1;
+ cursor: pointer;
}
-span.linkable-text {
- text-decoration: underline;
+.linkable-text {
+ text-decoration: underline;
}
-span.linkable-text:hover {
- cursor: pointer;
- font-weight: bold;
+.linkable-text:hover {
+ cursor: pointer;
+ font-weight: bold;
}
#left {
- float: left;
- user-select: none;
+ float: left;
+ user-select: none;
}
#middle {
- float:left;
- background-color: #F8F8F8;
- user-select: none;
+ float: left;
+ background-color: #F8F8F8;
+ user-select: none;
}
#right {
- float: right;
+ float: right;
}
.viewpane {
- height: 100vh;
- background-color: #FFFFFF;
+ height: 100vh;
+ background-color: #FFFFFF;
+}
+
+.multiview {
+ width: 100%;
}
#disassembly-collapse {
- right: 0;
+ right: 0;
}
#source-collapse {
- left: 0;
+ left: 0;
}
#graph {
- width: 100%;
- height: 100%;
+ width: 100%;
+ height: 100%;
}
-#graph-toolbox-anchor {
- height: 0px;
+.toolbox-anchor {
+ height: 0px;
}
-#graph-toolbox {
- position: relative;
- top: 1em;
- left: 25px;
- border: 2px solid #eee8d5;
- border-radius: 5px;
- padding: 0.7em;
- z-index: 5;
- background: rgba(100%, 100%, 100%, 0.7);
+.graph-toolbox {
+ position: relative;
+ border-bottom: 2px solid #eee8d5;
+ z-index: 5;
+ background: rgba(100%, 100%, 100%, 0.7);
+ box-sizing: border-box;
+ padding: 3px;
+ margin-left: 4px;
+ margin-right: 4px;
}
-#disassembly-toolbox {
- position: relative;
- top: 1em;
- left: 0.7em;
- border: 2px solid #eee8d5;
- border-radius: 5px;
- padding: 0.7em;
- z-index: 5;
+.disassembly-toolbox {
+ position: relative;
+ padding-bottom: 3px;
+ z-index: 5;
+ background: rgba(100%, 100%, 100%, 0.7);
+ padding-top: 3px;
+ box-sizing: border-box;
+ margin-left: 4px;
+ margin-right: 4px;
}
#load-file {
- position: absolute;
- top: 0;
- right: 0;
- margin-top: 0.5em;
- margin-right: 0.5em;
- z-index: 5;
- opacity: 0.7;
+ position: absolute;
+ top: 0;
+ right: 0;
+ margin-top: 0.5em;
+ margin-right: 0.5em;
+ z-index: 20;
+ opacity: 0.7;
}
#load-file input {
- background: #A0A0A0;
- border-radius: 5px;
- padding: 0.5em;
+ background: #A0A0A0;
+ border-radius: 5px;
+ padding: 0.5em;
}
#upload-helper {
- display: none;
+ display: none;
}
.prof {
- cursor: default;
+ cursor: default;
}
tspan {
- font-size: 500%;
- font-family: sans-serif;
+ font-size: 500%;
+ font-family: sans-serif;
}
text {
- dominant-baseline: text-before-edge;
-}
-
-.resizer-left {
- position:absolute;
- width: 4px;
- height:100%;
- background: #a0a0a0;
- cursor: pointer;
+ dominant-baseline: text-before-edge;
}
-.resizer-left.snapped {
- width: 12px;
+.resizer {
+ position: absolute;
+ z-index: 10;
+ width: 4px;
+ height: 100%;
+ background: #a0a0a0;
+ cursor: pointer;
}
-.resizer-left:hover {
- background: orange;
+.resizer.snapped {
+ width: 12px;
}
-.resizer-left.dragged {
- background: orange;
+.resizer.snapped:hover {
+ width: 12px;
+ margin-left: 0px;
}
-.resizer-right {
- position:absolute;
- width: 4px;
- height:100%;
- background: #a0a0a0;
- cursor: pointer;
-}
-
-.resizer-right.snapped {
- width: 12px;
-}
-
-.resizer-right:hover {
- background: orange;
-}
-
-.resizer-right.dragged {
- background: orange;
+.resizer:hover,
+.resizer.dragged {
+ width: 10px;
+ margin-left: -4px;
+ background: orange;
}
.source-position {
@@ -438,227 +462,233 @@ text {
}
.source-position .inlining-marker {
- content: "";
- position: relative;
- display: inline-block;
- top: -0.5ex;
- margin-left: -4px;
- margin-right: -4px;
- border-width: 5px;
- border-style: solid;
- border-color: #555 transparent transparent transparent;
+ content: "";
+ position: relative;
+ display: inline-block;
+ top: -0.5ex;
+ margin-left: -4px;
+ margin-right: -4px;
+ border-width: 5px;
+ border-style: solid;
+ border-color: #555 transparent transparent transparent;
}
.source-position .marker {
- content: "";
- display: inline-block;
- position: relative;
- bottom: -1ex;
- width: 0px;
- margin-left: -4px;
- margin-right: -4px;
- border-width: 5px;
- border-style: solid;
- border-color: transparent transparent #555 transparent;
+ content: "";
+ display: inline-block;
+ bottom: -1ex;
+ width: 0px;
+ margin-left: -4px;
+ margin-right: -4px;
+ margin-bottom: -1ex;
+ border-width: 5px;
+ border-style: solid;
+ border-color: transparent transparent #555 transparent;
}
.source-position.selected .marker {
- border-color: transparent transparent #F00 transparent;
+ border-color: transparent transparent #F00 transparent;
}
.source-position .inlining-marker:hover {
- border-color: transparent transparent #AA5 transparent;
+ border-color: transparent transparent #AA5 transparent;
}
.source-position .inlining-marker[data-descr]:hover::after {
- content: attr(data-descr);
- position: absolute;
- font-size: 10px;
- z-index: 1;
- background-color: #555;
- color: #fff;
- text-align: center;
- border-radius: 6px;
- padding: 6px;
- top: 6px;
- left: 50%;
- margin-left: -80px;
+ content: attr(data-descr);
+ position: absolute;
+ font-size: 10px;
+ z-index: 1;
+ background-color: #555;
+ color: #fff;
+ text-align: center;
+ border-radius: 6px;
+ padding: 6px;
+ top: 6px;
+ left: 50%;
+ margin-left: -80px;
}
#sequence {
- font-family: monospace;
- margin-top: 50px;
+ font-family: monospace;
+ margin-top: 50px;
}
#schedule {
- font-family: monospace;
- margin-top: 50px;
+ font-family: monospace;
+ margin-top: 50px;
}
.schedule-block {
- margin: 5px;
- background-color: white;
- padding-left: 5px;
+ margin: 5px;
+ background-color: white;
+ padding-left: 5px;
}
.schedule-block .block-id {
- display: inline-block;
- font-size:large;
- text-decoration: underline;
- padding-left: 1ex;
+ display: inline-block;
+ font-size: large;
+ text-decoration: underline;
+ padding-left: 1ex;
}
.schedule-block .block-id:hover {
- font-weight: bold;
+ font-weight: bold;
}
-.schedule-block > .block-id::before {
- content: "Block B";
+.schedule-block>.block-id::before {
+ content: "Block B";
}
.schedule-block .block-list {
- display: inline-block;
+ display: inline-block;
}
.schedule-block .block-list * {
- display: inline-block;
+ display: inline-block;
}
.schedule-block .block-list .block-id {
- padding-left: 1ex;
+ padding-left: 1ex;
}
.schedule-block .block-list .block-id:before {
- content: "B";
+ content: "B";
}
.schedule-block .predecessor-list::before {
- display: inline-block;
- content: " \2B05 ";
- padding-left: 1ex;
- padding-right: 1ex;
+ display: inline-block;
+ content: " \2B05 ";
+ padding-left: 1ex;
+ padding-right: 1ex;
}
.schedule-block .successor-list::before {
- display: inline-block;
- content: " \2B95 ";
- padding-left: 1ex;
- padding-right: 1ex;
+ display: inline-block;
+ content: " \2B95 ";
+ padding-left: 1ex;
+ padding-right: 1ex;
}
.schedule-block .nodes .node * {
- display:inline-block;
+ display: inline-block;
}
.schedule-block .nodes .node .node-id {
- padding-right: 1ex;
- min-width: 5ex;
- text-align: right;
+ padding-right: 1ex;
+ min-width: 5ex;
+ text-align: right;
}
.schedule-block .nodes .node .node-id:after {
- content: ":";
+ content: ":";
}
.schedule-block .nodes .node .node-label {
- user-select: text;
+ user-select: text;
}
.schedule-block .nodes .node .parameter-list:before {
- content: "(";
+ content: "(";
}
.schedule-block .nodes .node .parameter-list:after {
- content: ")";
+ content: ")";
}
.schedule-block .instr-marker {
- padding-right: .5ex;
- padding-left: .5ex;
- min-width: 1ex;
- background: #EEEEEE;
- /* display: none; */
+ padding-right: .5ex;
+ padding-left: .5ex;
+ min-width: 1em;
+ background: #EEEEEE;
+ /* display: none; */
}
-.schedule-block > .instr-marker {
- display: inline;
+.schedule-block>.instr-marker {
+ display: inline;
}
.instruction * {
- padding-right: .5ex;
+ padding-right: .5ex;
}
-.phi-label, .instruction-id {
- display: inline-block;
- padding-right: .5ex;
- padding-left: .5ex;
- min-width: 1ex;
- vertical-align: top;
+.phi-label,
+.instruction-id {
+ display: inline-block;
+ padding-right: .5ex;
+ padding-left: .5ex;
+ min-width: 1ex;
+ vertical-align: top;
}
.instruction-id:after {
- content: ":";
+ content: ":";
}
-.instruction-node, .gap, .instruction {
- display: block;
+.instruction-node,
+.gap,
+.instruction {
+ display: block;
}
-.phi-contents, .instruction-contents, .gap *, .instruction * {
- display: inline-block;
+.phi-contents,
+.instruction-contents,
+.gap *,
+.instruction * {
+ display: inline-block;
}
.phi * {
- padding-right: 1ex;
- display: inline-block;
+ padding-right: 1ex;
+ display: inline-block;
}
.gap .gap-move {
- padding-left: .5ex;
- padding-right: .5ex;
+ padding-left: .5ex;
+ padding-right: .5ex;
}
-.gap > *:before {
- content: "(";
+.gap>*:before {
+ content: "(";
}
-.gap > *:after {
- content: ")";
+.gap>*:after {
+ content: ")";
}
.parameter.constant {
- outline: 1px dotted red;
+ outline: 1px dotted red;
}
.clickable:hover {
- text-decoration: underline;
+ text-decoration: underline;
}
.clickable:hover {
- font-weight: bold;
+ font-weight: bold;
}
-.comma-sep-list > * {
- padding-right: 1ex;
+.comma-sep-list>* {
+ padding-right: 1ex;
}
-.comma-sep-list > *:after {
- content: ",";
+.comma-sep-list>*:after {
+ content: ",";
}
-.comma-sep-list > *:last-child:after {
- content: "";
+.comma-sep-list>*:last-child:after {
+ content: "";
}
-.comma-sep-list > *:last-child {
- padding-right: 0ex;
+.comma-sep-list>*:last-child {
+ padding-right: 0ex;
}
.temps:before {
- content: "temps: ";
+ content: "temps: ";
}
.temps {
- padding-left: .5ex;
- outline: 1px dotted grey;
+ padding-left: .5ex;
+ outline: 1px dotted grey;
}
diff --git a/deps/v8/tools/ubsan/blacklist.txt b/deps/v8/tools/ubsan/blacklist.txt
index 213f2da600..12504639c5 100644
--- a/deps/v8/tools/ubsan/blacklist.txt
+++ b/deps/v8/tools/ubsan/blacklist.txt
@@ -1,2 +1,6 @@
#############################################################################
-# UBSan blacklist. \ No newline at end of file
+# UBSan blacklist.
+
+# UBSan bug, fixed in LLVM r350779. Drop this suppression when that
+# revision has rolled into Chromium's bundled Clang.
+fun:*v8*internal*NewArray*
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 6ba18e0b9e..0e22c77c11 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -85,7 +85,7 @@ V8_GENERIC_JSON = {
"units": "ms",
}
-Output = namedtuple("Output", "stdout, stderr, timed_out")
+Output = namedtuple("Output", "stdout, stderr, timed_out, exit_code")
class PerfTest(unittest.TestCase):
@classmethod
@@ -113,6 +113,7 @@ class PerfTest(unittest.TestCase):
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
+ patch.stopall()
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
@@ -125,7 +126,8 @@ class PerfTest(unittest.TestCase):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
stderr=None,
- timed_out=kwargs.get("timed_out", False))
+ timed_out=kwargs.get("timed_out", False),
+ exit_code=kwargs.get("exit_code", 0))
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = MagicMock()
@@ -134,7 +136,9 @@ class PerfTest(unittest.TestCase):
cmd.execute = MagicMock(side_effect=execute)
return cmd
- command.Command = MagicMock(side_effect=create_cmd)
+ patch.object(
+ run_perf.command, 'PosixCommand',
+ MagicMock(side_effect=create_cmd)).start()
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
@@ -402,6 +406,18 @@ class PerfTest(unittest.TestCase):
self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
+ def testOneRunCrashed(self):
+ self._WriteTestInput(V8_JSON)
+ self._MockCommand(
+ ["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"], exit_code=1)
+ self.assertEquals(1, self._CallMain())
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": [], "stddev": ""},
+ {"name": "DeltaBlue", "results": [], "stddev": ""},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input["timeout"] = 70
@@ -412,10 +428,7 @@ class PerfTest(unittest.TestCase):
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
])
- self._VerifyErrors([
- "Regexp \"^Richards: (.+)$\" didn't match for test test/Richards.",
- "Regexp \"^DeltaBlue: (.+)$\" didn't match for test test/DeltaBlue.",
- ])
+ self._VerifyErrors([])
self._VerifyMock(
path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 4eb9feeac6..e136db6b53 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -103,8 +103,7 @@ def run_tests(basedir, *args, **kwargs):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
- code = standard_runner.StandardTestRunner(
- basedir=basedir).execute(sys_args)
+ code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
@@ -174,7 +173,6 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
@@ -193,10 +191,9 @@ class SystemTest(unittest.TestCase):
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
- infra_staging=True,
+ infra_staging=False,
)
# One of the shards gets one variant of each test.
- self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn('Done running sweet/bananas', result.stdout, result)
@@ -225,10 +222,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
- def testFailProc(self):
- self.testFail(infra_staging=True)
-
- def testFail(self, infra_staging=True):
+ def testFail(self):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
@@ -237,17 +231,13 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 2 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
- def check_cleaned_json_output(self, expected_results_name, actual_json):
+ def check_cleaned_json_output(
+ self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
@@ -260,6 +250,7 @@ class SystemTest(unittest.TestCase):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
+ data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
@@ -272,10 +263,7 @@ class SystemTest(unittest.TestCase):
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
- def testFailWithRerunAndJSONProc(self):
- self.testFailWithRerunAndJSON(infra_staging=True)
-
- def testFailWithRerunAndJSON(self, infra_staging=True):
+ def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -288,21 +276,12 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
- if not infra_staging:
- # We run one test, which fails and gets re-run twice.
- self.assertIn('3 tests failed', result.stdout, result)
- else:
- # With test processors we don't count reruns as separated failures.
- # TODO(majeski): fix it?
- self.assertIn('1 tests failed', result.stdout, result)
+ # With test processors we don't count reruns as separated failures.
+ # TODO(majeski): fix it?
+ self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
@@ -310,12 +289,10 @@ class SystemTest(unittest.TestCase):
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
- self.check_cleaned_json_output('expected_test_results1.json', json_path)
+ self.check_cleaned_json_output(
+ 'expected_test_results1.json', json_path, basedir)
- def testFlakeWithRerunAndJSONProc(self):
- self.testFlakeWithRerunAndJSON(infra_staging=True)
-
- def testFlakeWithRerunAndJSON(self, infra_staging=True):
+ def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -328,21 +305,15 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- self.assertIn(
- 'Done running sweet/bananaflakes: FAIL', result.stdout, result)
- self.assertIn('1 tests failed', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn(
- 'Done running sweet/bananaflakes: pass', result.stdout, result)
- self.assertIn('All tests succeeded', result.stdout, result)
+ self.assertIn(
+ 'Done running sweet/bananaflakes: pass', result.stdout, result)
+ self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
- self.check_cleaned_json_output('expected_test_results2.json', json_path)
+ self.check_cleaned_json_output(
+ 'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
@@ -355,7 +326,9 @@ class SystemTest(unittest.TestCase):
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
- v8_use_snapshot=False)
+ v8_use_snapshot=False, v8_enable_embedded_builtins=False,
+ v8_enable_verify_csa=False, v8_enable_lite_mode=False,
+ v8_enable_pointer_compression=False)
result = run_tests(
basedir,
'--mode=Release',
@@ -379,10 +352,7 @@ class SystemTest(unittest.TestCase):
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
- def testSkipsProc(self):
- self.testSkips(infra_staging=True)
-
- def testSkips(self, infra_staging=True):
+ def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
@@ -391,19 +361,27 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 0 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('0 tests ran', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
- def testDefaultProc(self):
- self.testDefault(infra_staging=True)
+ def testRunSkips(self):
+ """Inverse the above. Test parameter to keep running skipped tests."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=nooptimization',
+ '--run-skipped',
+ 'sweet/strawberries',
+ )
+ self.assertIn('1 tests failed', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
+ self.assertEqual(1, result.returncode, result)
- def testDefault(self, infra_staging=True):
+ def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
@@ -411,13 +389,9 @@ class SystemTest(unittest.TestCase):
result = run_tests(
basedir,
'--mode=Release',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Warning: no tests were run!', result.stdout, result)
- else:
- self.assertIn('Running 0 base tests', result.stdout, result)
- self.assertIn('0 tests ran', result.stdout, result)
+ self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
@@ -514,10 +488,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
- def testPredictableProc(self):
- self.testPredictable(infra_staging=True)
-
- def testPredictable(self, infra_staging=True):
+ def testPredictable(self):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
@@ -531,13 +502,9 @@ class SystemTest(unittest.TestCase):
'--progress=verbose',
'--variants=default',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 1 tests', result.stdout, result)
- else:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('1 tests ran', result.stdout, result)
+ self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
@@ -558,10 +525,7 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
- def testRandomSeedStressWithDefaultProc(self):
- self.testRandomSeedStressWithDefault(infra_staging=True)
-
- def testRandomSeedStressWithDefault(self, infra_staging=True):
+ def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
@@ -571,13 +535,9 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if infra_staging:
- self.assertIn('Running 1 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
- else:
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
@@ -592,7 +552,6 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'sweet/strawberries',
)
- self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
@@ -618,7 +577,6 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
- self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@@ -629,10 +587,7 @@ class SystemTest(unittest.TestCase):
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
- def testDotsProgressProc(self):
- self.testDotsProgress(infra_staging=True)
-
- def testDotsProgress(self, infra_staging=True):
+ def testDotsProgress(self):
with temp_base() as basedir:
result = run_tests(
basedir,
@@ -641,29 +596,19 @@ class SystemTest(unittest.TestCase):
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
- infra_staging=infra_staging,
+ infra_staging=False,
)
- if not infra_staging:
- self.assertIn('Running 2 tests', result.stdout, result)
- else:
- self.assertIn('Running 2 base tests', result.stdout, result)
- self.assertIn('2 tests ran', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
- def testMonoProgressProc(self):
- self._testCompactProgress('mono', True)
-
def testMonoProgress(self):
- self._testCompactProgress('mono', False)
-
- def testColorProgressProc(self):
- self._testCompactProgress('color', True)
+ self._testCompactProgress('mono')
def testColorProgress(self):
- self._testCompactProgress('color', False)
+ self._testCompactProgress('color')
- def _testCompactProgress(self, name, infra_staging):
+ def _testCompactProgress(self, name):
with temp_base() as basedir:
result = run_tests(
basedir,
@@ -671,14 +616,13 @@ class SystemTest(unittest.TestCase):
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
- infra_staging=infra_staging,
+ infra_staging=False,
)
if name == 'color':
- expected = ('\033[34m% 100\033[0m|'
- '\033[32m+ 1\033[0m|'
+ expected = ('\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
- expected = '% 100|+ 1|- 1]: Done'
+ expected = '+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
@@ -697,7 +641,6 @@ class SystemTest(unittest.TestCase):
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
- self.assertIn('Running 4 base tests', result.stdout, result)
self.assertIn('sweet/mangoes: pass', result.stdout, result)
self.assertIn('sweet/strawberries: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index eb9f7bafdc..79f9856a47 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -15,5 +15,9 @@
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
- "v8_use_snapshot": true
+ "v8_use_snapshot": true,
+ "v8_enable_embedded_builtins": false,
+ "v8_enable_verify_csa": false,
+ "v8_enable_lite_mode": false,
+ "v8_enable_pointer_compression": true
}
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index eb9f7bafdc..e4946321c5 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -15,5 +15,9 @@
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
- "v8_use_snapshot": true
+ "v8_use_snapshot": true,
+ "v8_enable_embedded_builtins": false,
+ "v8_enable_verify_csa": false,
+ "v8_enable_lite_mode": false,
+ "v8_enable_pointer_compression": false
}
diff --git a/deps/v8/tools/update-object-macros-undef.py b/deps/v8/tools/update-object-macros-undef.py
new file mode 100755
index 0000000000..ecec6239ad
--- /dev/null
+++ b/deps/v8/tools/update-object-macros-undef.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# vim:fenc=utf-8:shiftwidth=2:tabstop=2:softtabstop=2:extandtab
+
+"""
+Generate object-macros-undef.h from object-macros.h.
+"""
+
+import os.path
+import re
+import sys
+
+INPUT = 'src/objects/object-macros.h'
+OUTPUT = 'src/objects/object-macros-undef.h'
+HEADER = """// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generate this file using the {} script.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+""".format(os.path.basename(__file__))
+
+
+def main():
+ if not os.path.isfile(INPUT):
+ sys.exit("Input file {} does not exist; run this script in a v8 checkout."
+ .format(INPUT))
+ if not os.path.isfile(OUTPUT):
+ sys.exit("Output file {} does not exist; run this script in a v8 checkout."
+ .format(OUTPUT))
+ regexp = re.compile('^#define (\w+)')
+ seen = set()
+ with open(INPUT, 'r') as infile, open(OUTPUT, 'w') as outfile:
+ outfile.write(HEADER)
+ for line in infile:
+ match = regexp.match(line)
+ if match and match.group(1) not in seen:
+ seen.add(match.group(1))
+ outfile.write('#undef {}\n'.format(match.group(1)))
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 22d6bf5389..5d775c8cb9 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -347,14 +347,15 @@ class CppLintProcessor(CacheableSourceFileProcessor):
return None, arguments
-class TorqueFormatProcessor(CacheableSourceFileProcessor):
+class TorqueLintProcessor(CacheableSourceFileProcessor):
"""
Check .tq files to verify they follow the Torque style guide.
"""
def __init__(self, use_cache=True):
- super(TorqueFormatProcessor, self).__init__(
- use_cache=use_cache, cache_file_path='.torquelint-cache', file_type='Torque')
+ super(TorqueLintProcessor, self).__init__(
+ use_cache=use_cache, cache_file_path='.torquelint-cache',
+ file_type='Torque')
def IsRelevant(self, name):
return name.endswith('.tq')
@@ -438,22 +439,15 @@ class SourceProcessor(SourceFileProcessor):
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
- 'check_injected_script_source.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
- 'debugger-script.js',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
- 'generate_protocol_externs.py',
'injected-script.cc',
'injected-script.h',
- 'injected-script-source.js',
- 'java-script-call-frame.cc',
- 'java-script-call-frame.h',
- 'jsmin.py',
'libraries.cc',
'libraries-empty.cc',
'lua_binarytrees.js',
@@ -464,14 +458,11 @@ class SourceProcessor(SourceFileProcessor):
'raytrace.js',
'regexp-pcre.js',
'resources-123.js',
- 'rjsmin.py',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
'v8-debugger-script.h',
- 'v8-function-call.cc',
- 'v8-function-call.h',
'v8-inspector-impl.cc',
'v8-inspector-impl.h',
'v8-runtime-agent-impl.cc',
@@ -521,7 +512,7 @@ class SourceProcessor(SourceFileProcessor):
print "%s does not end with a single new line." % name
result = False
# Sanitize flags for fuzzer.
- if "mjsunit" in name or "debugger" in name:
+ if ".js" in name and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print "%s Flags should use '-' (not '_')" % name
@@ -551,7 +542,7 @@ class SourceProcessor(SourceFileProcessor):
try:
handle = open(file)
contents = handle.read()
- if not self.ProcessContents(file, contents):
+ if len(contents) > 0 and not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
@@ -673,8 +664,8 @@ def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
- result.add_option('--no-linter-cache', help="Do not cache linter results", default=False,
- action="store_true")
+ result.add_option('--no-linter-cache', help="Do not cache linter results",
+ default=False, action="store_true")
return result
@@ -692,7 +683,8 @@ def Main():
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
print "Running Torque formatting check..."
- success &= TorqueFormatProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
+ success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
+ workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success &= SourceProcessor().RunOnPath(workspace)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index f8c8061ff4..fc053b60f7 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -59,73 +59,79 @@ INSTANCE_TYPES = {
155: "ACCESSOR_PAIR_TYPE",
156: "ALIASED_ARGUMENTS_ENTRY_TYPE",
157: "ALLOCATION_MEMENTO_TYPE",
- 158: "ASYNC_GENERATOR_REQUEST_TYPE",
- 159: "DEBUG_INFO_TYPE",
- 160: "FUNCTION_TEMPLATE_INFO_TYPE",
- 161: "INTERCEPTOR_INFO_TYPE",
- 162: "INTERPRETER_DATA_TYPE",
- 163: "MODULE_INFO_ENTRY_TYPE",
- 164: "MODULE_TYPE",
- 165: "OBJECT_TEMPLATE_INFO_TYPE",
- 166: "PROMISE_CAPABILITY_TYPE",
- 167: "PROMISE_REACTION_TYPE",
- 168: "PROTOTYPE_INFO_TYPE",
- 169: "SCRIPT_TYPE",
- 170: "STACK_FRAME_INFO_TYPE",
- 171: "TUPLE2_TYPE",
- 172: "TUPLE3_TYPE",
- 173: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 174: "WASM_DEBUG_INFO_TYPE",
- 175: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 176: "CALLABLE_TASK_TYPE",
- 177: "CALLBACK_TASK_TYPE",
- 178: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 179: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 180: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 181: "MICROTASK_QUEUE_TYPE",
- 182: "ALLOCATION_SITE_TYPE",
- 183: "FIXED_ARRAY_TYPE",
- 184: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 185: "HASH_TABLE_TYPE",
- 186: "ORDERED_HASH_MAP_TYPE",
- 187: "ORDERED_HASH_SET_TYPE",
- 188: "NAME_DICTIONARY_TYPE",
- 189: "GLOBAL_DICTIONARY_TYPE",
- 190: "NUMBER_DICTIONARY_TYPE",
- 191: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 192: "STRING_TABLE_TYPE",
- 193: "EPHEMERON_HASH_TABLE_TYPE",
- 194: "SCOPE_INFO_TYPE",
- 195: "SCRIPT_CONTEXT_TABLE_TYPE",
- 196: "AWAIT_CONTEXT_TYPE",
- 197: "BLOCK_CONTEXT_TYPE",
- 198: "CATCH_CONTEXT_TYPE",
- 199: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 200: "EVAL_CONTEXT_TYPE",
- 201: "FUNCTION_CONTEXT_TYPE",
- 202: "MODULE_CONTEXT_TYPE",
- 203: "NATIVE_CONTEXT_TYPE",
- 204: "SCRIPT_CONTEXT_TYPE",
- 205: "WITH_CONTEXT_TYPE",
- 206: "WEAK_FIXED_ARRAY_TYPE",
- 207: "DESCRIPTOR_ARRAY_TYPE",
- 208: "TRANSITION_ARRAY_TYPE",
- 209: "CALL_HANDLER_INFO_TYPE",
- 210: "CELL_TYPE",
- 211: "CODE_DATA_CONTAINER_TYPE",
- 212: "FEEDBACK_CELL_TYPE",
- 213: "FEEDBACK_VECTOR_TYPE",
- 214: "LOAD_HANDLER_TYPE",
- 215: "PRE_PARSED_SCOPE_DATA_TYPE",
- 216: "PROPERTY_ARRAY_TYPE",
- 217: "PROPERTY_CELL_TYPE",
- 218: "SHARED_FUNCTION_INFO_TYPE",
- 219: "SMALL_ORDERED_HASH_MAP_TYPE",
- 220: "SMALL_ORDERED_HASH_SET_TYPE",
- 221: "STORE_HANDLER_TYPE",
- 222: "UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE",
- 223: "UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE",
- 224: "WEAK_ARRAY_LIST_TYPE",
+ 158: "ASM_WASM_DATA_TYPE",
+ 159: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 160: "DEBUG_INFO_TYPE",
+ 161: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 162: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 163: "INTERCEPTOR_INFO_TYPE",
+ 164: "INTERPRETER_DATA_TYPE",
+ 165: "MODULE_INFO_ENTRY_TYPE",
+ 166: "MODULE_TYPE",
+ 167: "OBJECT_TEMPLATE_INFO_TYPE",
+ 168: "PROMISE_CAPABILITY_TYPE",
+ 169: "PROMISE_REACTION_TYPE",
+ 170: "PROTOTYPE_INFO_TYPE",
+ 171: "SCRIPT_TYPE",
+ 172: "STACK_FRAME_INFO_TYPE",
+ 173: "TUPLE2_TYPE",
+ 174: "TUPLE3_TYPE",
+ 175: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 176: "WASM_DEBUG_INFO_TYPE",
+ 177: "WASM_EXCEPTION_TAG_TYPE",
+ 178: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 179: "CALLABLE_TASK_TYPE",
+ 180: "CALLBACK_TASK_TYPE",
+ 181: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 182: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 183: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 184: "WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE",
+ 185: "ALLOCATION_SITE_TYPE",
+ 186: "EMBEDDER_DATA_ARRAY_TYPE",
+ 187: "FIXED_ARRAY_TYPE",
+ 188: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 189: "HASH_TABLE_TYPE",
+ 190: "ORDERED_HASH_MAP_TYPE",
+ 191: "ORDERED_HASH_SET_TYPE",
+ 192: "ORDERED_NAME_DICTIONARY_TYPE",
+ 193: "NAME_DICTIONARY_TYPE",
+ 194: "GLOBAL_DICTIONARY_TYPE",
+ 195: "NUMBER_DICTIONARY_TYPE",
+ 196: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 197: "STRING_TABLE_TYPE",
+ 198: "EPHEMERON_HASH_TABLE_TYPE",
+ 199: "SCOPE_INFO_TYPE",
+ 200: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 201: "AWAIT_CONTEXT_TYPE",
+ 202: "BLOCK_CONTEXT_TYPE",
+ 203: "CATCH_CONTEXT_TYPE",
+ 204: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 205: "EVAL_CONTEXT_TYPE",
+ 206: "FUNCTION_CONTEXT_TYPE",
+ 207: "MODULE_CONTEXT_TYPE",
+ 208: "NATIVE_CONTEXT_TYPE",
+ 209: "SCRIPT_CONTEXT_TYPE",
+ 210: "WITH_CONTEXT_TYPE",
+ 211: "WEAK_FIXED_ARRAY_TYPE",
+ 212: "TRANSITION_ARRAY_TYPE",
+ 213: "CALL_HANDLER_INFO_TYPE",
+ 214: "CELL_TYPE",
+ 215: "CODE_DATA_CONTAINER_TYPE",
+ 216: "DESCRIPTOR_ARRAY_TYPE",
+ 217: "FEEDBACK_CELL_TYPE",
+ 218: "FEEDBACK_VECTOR_TYPE",
+ 219: "LOAD_HANDLER_TYPE",
+ 220: "PREPARSE_DATA_TYPE",
+ 221: "PROPERTY_ARRAY_TYPE",
+ 222: "PROPERTY_CELL_TYPE",
+ 223: "SHARED_FUNCTION_INFO_TYPE",
+ 224: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 225: "SMALL_ORDERED_HASH_SET_TYPE",
+ 226: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 227: "STORE_HANDLER_TYPE",
+ 228: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 229: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 230: "WEAK_ARRAY_LIST_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -139,246 +145,296 @@ INSTANCE_TYPES = {
1060: "JS_ARRAY_ITERATOR_TYPE",
1061: "JS_ARRAY_TYPE",
1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 1063: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 1064: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 1065: "JS_DATE_TYPE",
- 1066: "JS_ERROR_TYPE",
- 1067: "JS_GENERATOR_OBJECT_TYPE",
- 1068: "JS_MAP_TYPE",
- 1069: "JS_MAP_KEY_ITERATOR_TYPE",
- 1070: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 1071: "JS_MAP_VALUE_ITERATOR_TYPE",
- 1072: "JS_MESSAGE_OBJECT_TYPE",
- 1073: "JS_PROMISE_TYPE",
- 1074: "JS_REGEXP_TYPE",
- 1075: "JS_REGEXP_STRING_ITERATOR_TYPE",
- 1076: "JS_SET_TYPE",
- 1077: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 1078: "JS_SET_VALUE_ITERATOR_TYPE",
- 1079: "JS_STRING_ITERATOR_TYPE",
- 1080: "JS_WEAK_MAP_TYPE",
- 1081: "JS_WEAK_SET_TYPE",
- 1082: "JS_TYPED_ARRAY_TYPE",
- 1083: "JS_DATA_VIEW_TYPE",
- 1084: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
- 1085: "JS_INTL_COLLATOR_TYPE",
- 1086: "JS_INTL_DATE_TIME_FORMAT_TYPE",
- 1087: "JS_INTL_LIST_FORMAT_TYPE",
- 1088: "JS_INTL_LOCALE_TYPE",
- 1089: "JS_INTL_NUMBER_FORMAT_TYPE",
- 1090: "JS_INTL_PLURAL_RULES_TYPE",
- 1091: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
- 1092: "JS_INTL_SEGMENTER_TYPE",
- 1093: "WASM_EXCEPTION_TYPE",
- 1094: "WASM_GLOBAL_TYPE",
- 1095: "WASM_INSTANCE_TYPE",
- 1096: "WASM_MEMORY_TYPE",
- 1097: "WASM_MODULE_TYPE",
- 1098: "WASM_TABLE_TYPE",
- 1099: "JS_BOUND_FUNCTION_TYPE",
- 1100: "JS_FUNCTION_TYPE",
+ 1063: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 1064: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1066: "JS_DATE_TYPE",
+ 1067: "JS_ERROR_TYPE",
+ 1068: "JS_GENERATOR_OBJECT_TYPE",
+ 1069: "JS_MAP_TYPE",
+ 1070: "JS_MAP_KEY_ITERATOR_TYPE",
+ 1071: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 1072: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 1073: "JS_MESSAGE_OBJECT_TYPE",
+ 1074: "JS_PROMISE_TYPE",
+ 1075: "JS_REGEXP_TYPE",
+ 1076: "JS_REGEXP_STRING_ITERATOR_TYPE",
+ 1077: "JS_SET_TYPE",
+ 1078: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 1079: "JS_SET_VALUE_ITERATOR_TYPE",
+ 1080: "JS_STRING_ITERATOR_TYPE",
+ 1081: "JS_WEAK_CELL_TYPE",
+ 1082: "JS_WEAK_REF_TYPE",
+ 1083: "JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE",
+ 1084: "JS_WEAK_FACTORY_TYPE",
+ 1085: "JS_WEAK_MAP_TYPE",
+ 1086: "JS_WEAK_SET_TYPE",
+ 1087: "JS_TYPED_ARRAY_TYPE",
+ 1088: "JS_DATA_VIEW_TYPE",
+ 1089: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
+ 1090: "JS_INTL_COLLATOR_TYPE",
+ 1091: "JS_INTL_DATE_TIME_FORMAT_TYPE",
+ 1092: "JS_INTL_LIST_FORMAT_TYPE",
+ 1093: "JS_INTL_LOCALE_TYPE",
+ 1094: "JS_INTL_NUMBER_FORMAT_TYPE",
+ 1095: "JS_INTL_PLURAL_RULES_TYPE",
+ 1096: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
+ 1097: "JS_INTL_SEGMENT_ITERATOR_TYPE",
+ 1098: "JS_INTL_SEGMENTER_TYPE",
+ 1099: "WASM_EXCEPTION_TYPE",
+ 1100: "WASM_GLOBAL_TYPE",
+ 1101: "WASM_INSTANCE_TYPE",
+ 1102: "WASM_MEMORY_TYPE",
+ 1103: "WASM_MODULE_TYPE",
+ 1104: "WASM_TABLE_TYPE",
+ 1105: "JS_BOUND_FUNCTION_TYPE",
+ 1106: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("RO_SPACE", 0x02201): (138, "FreeSpaceMap"),
- ("RO_SPACE", 0x02251): (132, "MetaMap"),
- ("RO_SPACE", 0x022d1): (131, "NullMap"),
- ("RO_SPACE", 0x02341): (207, "DescriptorArrayMap"),
- ("RO_SPACE", 0x023a1): (206, "WeakFixedArrayMap"),
- ("RO_SPACE", 0x023f1): (152, "OnePointerFillerMap"),
- ("RO_SPACE", 0x02441): (152, "TwoPointerFillerMap"),
- ("RO_SPACE", 0x024c1): (131, "UninitializedMap"),
- ("RO_SPACE", 0x02531): (8, "OneByteInternalizedStringMap"),
- ("RO_SPACE", 0x025d1): (131, "UndefinedMap"),
- ("RO_SPACE", 0x02631): (129, "HeapNumberMap"),
- ("RO_SPACE", 0x026b1): (131, "TheHoleMap"),
- ("RO_SPACE", 0x02759): (131, "BooleanMap"),
- ("RO_SPACE", 0x02831): (136, "ByteArrayMap"),
- ("RO_SPACE", 0x02881): (183, "FixedArrayMap"),
- ("RO_SPACE", 0x028d1): (183, "FixedCOWArrayMap"),
- ("RO_SPACE", 0x02921): (185, "HashTableMap"),
- ("RO_SPACE", 0x02971): (128, "SymbolMap"),
- ("RO_SPACE", 0x029c1): (72, "OneByteStringMap"),
- ("RO_SPACE", 0x02a11): (194, "ScopeInfoMap"),
- ("RO_SPACE", 0x02a61): (218, "SharedFunctionInfoMap"),
- ("RO_SPACE", 0x02ab1): (133, "CodeMap"),
- ("RO_SPACE", 0x02b01): (201, "FunctionContextMap"),
- ("RO_SPACE", 0x02b51): (210, "CellMap"),
- ("RO_SPACE", 0x02ba1): (217, "GlobalPropertyCellMap"),
- ("RO_SPACE", 0x02bf1): (135, "ForeignMap"),
- ("RO_SPACE", 0x02c41): (208, "TransitionArrayMap"),
- ("RO_SPACE", 0x02c91): (213, "FeedbackVectorMap"),
- ("RO_SPACE", 0x02d31): (131, "ArgumentsMarkerMap"),
- ("RO_SPACE", 0x02dd1): (131, "ExceptionMap"),
- ("RO_SPACE", 0x02e71): (131, "TerminationExceptionMap"),
- ("RO_SPACE", 0x02f19): (131, "OptimizedOutMap"),
- ("RO_SPACE", 0x02fb9): (131, "StaleRegisterMap"),
- ("RO_SPACE", 0x03029): (203, "NativeContextMap"),
- ("RO_SPACE", 0x03079): (202, "ModuleContextMap"),
- ("RO_SPACE", 0x030c9): (200, "EvalContextMap"),
- ("RO_SPACE", 0x03119): (204, "ScriptContextMap"),
- ("RO_SPACE", 0x03169): (196, "AwaitContextMap"),
- ("RO_SPACE", 0x031b9): (197, "BlockContextMap"),
- ("RO_SPACE", 0x03209): (198, "CatchContextMap"),
- ("RO_SPACE", 0x03259): (205, "WithContextMap"),
- ("RO_SPACE", 0x032a9): (199, "DebugEvaluateContextMap"),
- ("RO_SPACE", 0x032f9): (195, "ScriptContextTableMap"),
- ("RO_SPACE", 0x03349): (151, "FeedbackMetadataArrayMap"),
- ("RO_SPACE", 0x03399): (183, "ArrayListMap"),
- ("RO_SPACE", 0x033e9): (130, "BigIntMap"),
- ("RO_SPACE", 0x03439): (184, "ObjectBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x03489): (137, "BytecodeArrayMap"),
- ("RO_SPACE", 0x034d9): (211, "CodeDataContainerMap"),
- ("RO_SPACE", 0x03529): (150, "FixedDoubleArrayMap"),
- ("RO_SPACE", 0x03579): (189, "GlobalDictionaryMap"),
- ("RO_SPACE", 0x035c9): (212, "ManyClosuresCellMap"),
- ("RO_SPACE", 0x03619): (183, "ModuleInfoMap"),
- ("RO_SPACE", 0x03669): (134, "MutableHeapNumberMap"),
- ("RO_SPACE", 0x036b9): (188, "NameDictionaryMap"),
- ("RO_SPACE", 0x03709): (212, "NoClosuresCellMap"),
- ("RO_SPACE", 0x03759): (190, "NumberDictionaryMap"),
- ("RO_SPACE", 0x037a9): (212, "OneClosureCellMap"),
- ("RO_SPACE", 0x037f9): (186, "OrderedHashMapMap"),
- ("RO_SPACE", 0x03849): (187, "OrderedHashSetMap"),
- ("RO_SPACE", 0x03899): (215, "PreParsedScopeDataMap"),
- ("RO_SPACE", 0x038e9): (216, "PropertyArrayMap"),
- ("RO_SPACE", 0x03939): (209, "SideEffectCallHandlerInfoMap"),
- ("RO_SPACE", 0x03989): (209, "SideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x039d9): (209, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x03a29): (191, "SimpleNumberDictionaryMap"),
- ("RO_SPACE", 0x03a79): (183, "SloppyArgumentsElementsMap"),
- ("RO_SPACE", 0x03ac9): (219, "SmallOrderedHashMapMap"),
- ("RO_SPACE", 0x03b19): (220, "SmallOrderedHashSetMap"),
- ("RO_SPACE", 0x03b69): (192, "StringTableMap"),
- ("RO_SPACE", 0x03bb9): (222, "UncompiledDataWithoutPreParsedScopeMap"),
- ("RO_SPACE", 0x03c09): (223, "UncompiledDataWithPreParsedScopeMap"),
- ("RO_SPACE", 0x03c59): (224, "WeakArrayListMap"),
- ("RO_SPACE", 0x03ca9): (193, "EphemeronHashTableMap"),
- ("RO_SPACE", 0x03cf9): (106, "NativeSourceStringMap"),
- ("RO_SPACE", 0x03d49): (64, "StringMap"),
- ("RO_SPACE", 0x03d99): (73, "ConsOneByteStringMap"),
- ("RO_SPACE", 0x03de9): (65, "ConsStringMap"),
- ("RO_SPACE", 0x03e39): (77, "ThinOneByteStringMap"),
- ("RO_SPACE", 0x03e89): (69, "ThinStringMap"),
- ("RO_SPACE", 0x03ed9): (67, "SlicedStringMap"),
- ("RO_SPACE", 0x03f29): (75, "SlicedOneByteStringMap"),
- ("RO_SPACE", 0x03f79): (66, "ExternalStringMap"),
- ("RO_SPACE", 0x03fc9): (82, "ExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04019): (74, "ExternalOneByteStringMap"),
- ("RO_SPACE", 0x04069): (98, "UncachedExternalStringMap"),
- ("RO_SPACE", 0x040b9): (114, "UncachedExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04109): (0, "InternalizedStringMap"),
- ("RO_SPACE", 0x04159): (2, "ExternalInternalizedStringMap"),
- ("RO_SPACE", 0x041a9): (18, "ExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x041f9): (10, "ExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04249): (34, "UncachedExternalInternalizedStringMap"),
- ("RO_SPACE", 0x04299): (50, "UncachedExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x042e9): (42, "UncachedExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04339): (106, "UncachedExternalOneByteStringMap"),
- ("RO_SPACE", 0x04389): (140, "FixedUint8ArrayMap"),
- ("RO_SPACE", 0x043d9): (139, "FixedInt8ArrayMap"),
- ("RO_SPACE", 0x04429): (142, "FixedUint16ArrayMap"),
- ("RO_SPACE", 0x04479): (141, "FixedInt16ArrayMap"),
- ("RO_SPACE", 0x044c9): (144, "FixedUint32ArrayMap"),
- ("RO_SPACE", 0x04519): (143, "FixedInt32ArrayMap"),
- ("RO_SPACE", 0x04569): (145, "FixedFloat32ArrayMap"),
- ("RO_SPACE", 0x045b9): (146, "FixedFloat64ArrayMap"),
- ("RO_SPACE", 0x04609): (147, "FixedUint8ClampedArrayMap"),
- ("RO_SPACE", 0x04659): (149, "FixedBigUint64ArrayMap"),
- ("RO_SPACE", 0x046a9): (148, "FixedBigInt64ArrayMap"),
- ("RO_SPACE", 0x046f9): (131, "SelfReferenceMarkerMap"),
- ("RO_SPACE", 0x04761): (171, "Tuple2Map"),
- ("RO_SPACE", 0x04801): (173, "ArrayBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x04af1): (161, "InterceptorInfoMap"),
- ("RO_SPACE", 0x06ea9): (153, "AccessCheckInfoMap"),
- ("RO_SPACE", 0x06ef9): (154, "AccessorInfoMap"),
- ("RO_SPACE", 0x06f49): (155, "AccessorPairMap"),
- ("RO_SPACE", 0x06f99): (156, "AliasedArgumentsEntryMap"),
- ("RO_SPACE", 0x06fe9): (157, "AllocationMementoMap"),
- ("RO_SPACE", 0x07039): (158, "AsyncGeneratorRequestMap"),
- ("RO_SPACE", 0x07089): (159, "DebugInfoMap"),
- ("RO_SPACE", 0x070d9): (160, "FunctionTemplateInfoMap"),
- ("RO_SPACE", 0x07129): (162, "InterpreterDataMap"),
- ("RO_SPACE", 0x07179): (163, "ModuleInfoEntryMap"),
- ("RO_SPACE", 0x071c9): (164, "ModuleMap"),
- ("RO_SPACE", 0x07219): (165, "ObjectTemplateInfoMap"),
- ("RO_SPACE", 0x07269): (166, "PromiseCapabilityMap"),
- ("RO_SPACE", 0x072b9): (167, "PromiseReactionMap"),
- ("RO_SPACE", 0x07309): (168, "PrototypeInfoMap"),
- ("RO_SPACE", 0x07359): (169, "ScriptMap"),
- ("RO_SPACE", 0x073a9): (170, "StackFrameInfoMap"),
- ("RO_SPACE", 0x073f9): (172, "Tuple3Map"),
- ("RO_SPACE", 0x07449): (174, "WasmDebugInfoMap"),
- ("RO_SPACE", 0x07499): (175, "WasmExportedFunctionDataMap"),
- ("RO_SPACE", 0x074e9): (176, "CallableTaskMap"),
- ("RO_SPACE", 0x07539): (177, "CallbackTaskMap"),
- ("RO_SPACE", 0x07589): (178, "PromiseFulfillReactionJobTaskMap"),
- ("RO_SPACE", 0x075d9): (179, "PromiseRejectReactionJobTaskMap"),
- ("RO_SPACE", 0x07629): (180, "PromiseResolveThenableJobTaskMap"),
- ("RO_SPACE", 0x07679): (181, "MicrotaskQueueMap"),
- ("RO_SPACE", 0x076c9): (182, "AllocationSiteWithWeakNextMap"),
- ("RO_SPACE", 0x07719): (182, "AllocationSiteWithoutWeakNextMap"),
- ("RO_SPACE", 0x07769): (214, "LoadHandler1Map"),
- ("RO_SPACE", 0x077b9): (214, "LoadHandler2Map"),
- ("RO_SPACE", 0x07809): (214, "LoadHandler3Map"),
- ("RO_SPACE", 0x07859): (221, "StoreHandler0Map"),
- ("RO_SPACE", 0x078a9): (221, "StoreHandler1Map"),
- ("RO_SPACE", 0x078f9): (221, "StoreHandler2Map"),
- ("RO_SPACE", 0x07949): (221, "StoreHandler3Map"),
- ("MAP_SPACE", 0x02201): (1057, "ExternalMap"),
- ("MAP_SPACE", 0x02251): (1072, "JSMessageObjectMap"),
+ ("RO_SPACE", 0x00139): (138, "FreeSpaceMap"),
+ ("RO_SPACE", 0x00189): (132, "MetaMap"),
+ ("RO_SPACE", 0x00209): (131, "NullMap"),
+ ("RO_SPACE", 0x00271): (216, "DescriptorArrayMap"),
+ ("RO_SPACE", 0x002d1): (211, "WeakFixedArrayMap"),
+ ("RO_SPACE", 0x00321): (152, "OnePointerFillerMap"),
+ ("RO_SPACE", 0x00371): (152, "TwoPointerFillerMap"),
+ ("RO_SPACE", 0x003f1): (131, "UninitializedMap"),
+ ("RO_SPACE", 0x00461): (8, "OneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x00501): (131, "UndefinedMap"),
+ ("RO_SPACE", 0x00561): (129, "HeapNumberMap"),
+ ("RO_SPACE", 0x005e1): (131, "TheHoleMap"),
+ ("RO_SPACE", 0x00689): (131, "BooleanMap"),
+ ("RO_SPACE", 0x00761): (136, "ByteArrayMap"),
+ ("RO_SPACE", 0x007b1): (187, "FixedArrayMap"),
+ ("RO_SPACE", 0x00801): (187, "FixedCOWArrayMap"),
+ ("RO_SPACE", 0x00851): (189, "HashTableMap"),
+ ("RO_SPACE", 0x008a1): (128, "SymbolMap"),
+ ("RO_SPACE", 0x008f1): (72, "OneByteStringMap"),
+ ("RO_SPACE", 0x00941): (199, "ScopeInfoMap"),
+ ("RO_SPACE", 0x00991): (223, "SharedFunctionInfoMap"),
+ ("RO_SPACE", 0x009e1): (133, "CodeMap"),
+ ("RO_SPACE", 0x00a31): (206, "FunctionContextMap"),
+ ("RO_SPACE", 0x00a81): (214, "CellMap"),
+ ("RO_SPACE", 0x00ad1): (222, "GlobalPropertyCellMap"),
+ ("RO_SPACE", 0x00b21): (135, "ForeignMap"),
+ ("RO_SPACE", 0x00b71): (212, "TransitionArrayMap"),
+ ("RO_SPACE", 0x00bc1): (218, "FeedbackVectorMap"),
+ ("RO_SPACE", 0x00c61): (131, "ArgumentsMarkerMap"),
+ ("RO_SPACE", 0x00d01): (131, "ExceptionMap"),
+ ("RO_SPACE", 0x00da1): (131, "TerminationExceptionMap"),
+ ("RO_SPACE", 0x00e49): (131, "OptimizedOutMap"),
+ ("RO_SPACE", 0x00ee9): (131, "StaleRegisterMap"),
+ ("RO_SPACE", 0x00f59): (208, "NativeContextMap"),
+ ("RO_SPACE", 0x00fa9): (207, "ModuleContextMap"),
+ ("RO_SPACE", 0x00ff9): (205, "EvalContextMap"),
+ ("RO_SPACE", 0x01049): (209, "ScriptContextMap"),
+ ("RO_SPACE", 0x01099): (201, "AwaitContextMap"),
+ ("RO_SPACE", 0x010e9): (202, "BlockContextMap"),
+ ("RO_SPACE", 0x01139): (203, "CatchContextMap"),
+ ("RO_SPACE", 0x01189): (210, "WithContextMap"),
+ ("RO_SPACE", 0x011d9): (204, "DebugEvaluateContextMap"),
+ ("RO_SPACE", 0x01229): (200, "ScriptContextTableMap"),
+ ("RO_SPACE", 0x01279): (151, "FeedbackMetadataArrayMap"),
+ ("RO_SPACE", 0x012c9): (187, "ArrayListMap"),
+ ("RO_SPACE", 0x01319): (130, "BigIntMap"),
+ ("RO_SPACE", 0x01369): (188, "ObjectBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x013b9): (137, "BytecodeArrayMap"),
+ ("RO_SPACE", 0x01409): (215, "CodeDataContainerMap"),
+ ("RO_SPACE", 0x01459): (150, "FixedDoubleArrayMap"),
+ ("RO_SPACE", 0x014a9): (194, "GlobalDictionaryMap"),
+ ("RO_SPACE", 0x014f9): (217, "ManyClosuresCellMap"),
+ ("RO_SPACE", 0x01549): (187, "ModuleInfoMap"),
+ ("RO_SPACE", 0x01599): (134, "MutableHeapNumberMap"),
+ ("RO_SPACE", 0x015e9): (193, "NameDictionaryMap"),
+ ("RO_SPACE", 0x01639): (217, "NoClosuresCellMap"),
+ ("RO_SPACE", 0x01689): (217, "NoFeedbackCellMap"),
+ ("RO_SPACE", 0x016d9): (195, "NumberDictionaryMap"),
+ ("RO_SPACE", 0x01729): (217, "OneClosureCellMap"),
+ ("RO_SPACE", 0x01779): (190, "OrderedHashMapMap"),
+ ("RO_SPACE", 0x017c9): (191, "OrderedHashSetMap"),
+ ("RO_SPACE", 0x01819): (192, "OrderedNameDictionaryMap"),
+ ("RO_SPACE", 0x01869): (220, "PreparseDataMap"),
+ ("RO_SPACE", 0x018b9): (221, "PropertyArrayMap"),
+ ("RO_SPACE", 0x01909): (213, "SideEffectCallHandlerInfoMap"),
+ ("RO_SPACE", 0x01959): (213, "SideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x019a9): (213, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x019f9): (196, "SimpleNumberDictionaryMap"),
+ ("RO_SPACE", 0x01a49): (187, "SloppyArgumentsElementsMap"),
+ ("RO_SPACE", 0x01a99): (224, "SmallOrderedHashMapMap"),
+ ("RO_SPACE", 0x01ae9): (225, "SmallOrderedHashSetMap"),
+ ("RO_SPACE", 0x01b39): (226, "SmallOrderedNameDictionaryMap"),
+ ("RO_SPACE", 0x01b89): (197, "StringTableMap"),
+ ("RO_SPACE", 0x01bd9): (228, "UncompiledDataWithoutPreparseDataMap"),
+ ("RO_SPACE", 0x01c29): (229, "UncompiledDataWithPreparseDataMap"),
+ ("RO_SPACE", 0x01c79): (230, "WeakArrayListMap"),
+ ("RO_SPACE", 0x01cc9): (198, "EphemeronHashTableMap"),
+ ("RO_SPACE", 0x01d19): (186, "EmbedderDataArrayMap"),
+ ("RO_SPACE", 0x01d69): (106, "NativeSourceStringMap"),
+ ("RO_SPACE", 0x01db9): (64, "StringMap"),
+ ("RO_SPACE", 0x01e09): (73, "ConsOneByteStringMap"),
+ ("RO_SPACE", 0x01e59): (65, "ConsStringMap"),
+ ("RO_SPACE", 0x01ea9): (77, "ThinOneByteStringMap"),
+ ("RO_SPACE", 0x01ef9): (69, "ThinStringMap"),
+ ("RO_SPACE", 0x01f49): (67, "SlicedStringMap"),
+ ("RO_SPACE", 0x01f99): (75, "SlicedOneByteStringMap"),
+ ("RO_SPACE", 0x01fe9): (66, "ExternalStringMap"),
+ ("RO_SPACE", 0x02039): (82, "ExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x02089): (74, "ExternalOneByteStringMap"),
+ ("RO_SPACE", 0x020d9): (98, "UncachedExternalStringMap"),
+ ("RO_SPACE", 0x02129): (114, "UncachedExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x02179): (0, "InternalizedStringMap"),
+ ("RO_SPACE", 0x021c9): (2, "ExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x02219): (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x02269): (10, "ExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x022b9): (34, "UncachedExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x02309): (50, "UncachedExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x02359): (42, "UncachedExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x023a9): (106, "UncachedExternalOneByteStringMap"),
+ ("RO_SPACE", 0x023f9): (140, "FixedUint8ArrayMap"),
+ ("RO_SPACE", 0x02449): (139, "FixedInt8ArrayMap"),
+ ("RO_SPACE", 0x02499): (142, "FixedUint16ArrayMap"),
+ ("RO_SPACE", 0x024e9): (141, "FixedInt16ArrayMap"),
+ ("RO_SPACE", 0x02539): (144, "FixedUint32ArrayMap"),
+ ("RO_SPACE", 0x02589): (143, "FixedInt32ArrayMap"),
+ ("RO_SPACE", 0x025d9): (145, "FixedFloat32ArrayMap"),
+ ("RO_SPACE", 0x02629): (146, "FixedFloat64ArrayMap"),
+ ("RO_SPACE", 0x02679): (147, "FixedUint8ClampedArrayMap"),
+ ("RO_SPACE", 0x026c9): (149, "FixedBigUint64ArrayMap"),
+ ("RO_SPACE", 0x02719): (148, "FixedBigInt64ArrayMap"),
+ ("RO_SPACE", 0x02769): (131, "SelfReferenceMarkerMap"),
+ ("RO_SPACE", 0x027d1): (173, "Tuple2Map"),
+ ("RO_SPACE", 0x02871): (175, "ArrayBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x02bb1): (163, "InterceptorInfoMap"),
+ ("RO_SPACE", 0x05081): (153, "AccessCheckInfoMap"),
+ ("RO_SPACE", 0x050d1): (154, "AccessorInfoMap"),
+ ("RO_SPACE", 0x05121): (155, "AccessorPairMap"),
+ ("RO_SPACE", 0x05171): (156, "AliasedArgumentsEntryMap"),
+ ("RO_SPACE", 0x051c1): (157, "AllocationMementoMap"),
+ ("RO_SPACE", 0x05211): (158, "AsmWasmDataMap"),
+ ("RO_SPACE", 0x05261): (159, "AsyncGeneratorRequestMap"),
+ ("RO_SPACE", 0x052b1): (160, "DebugInfoMap"),
+ ("RO_SPACE", 0x05301): (161, "FunctionTemplateInfoMap"),
+ ("RO_SPACE", 0x05351): (162, "FunctionTemplateRareDataMap"),
+ ("RO_SPACE", 0x053a1): (164, "InterpreterDataMap"),
+ ("RO_SPACE", 0x053f1): (165, "ModuleInfoEntryMap"),
+ ("RO_SPACE", 0x05441): (166, "ModuleMap"),
+ ("RO_SPACE", 0x05491): (167, "ObjectTemplateInfoMap"),
+ ("RO_SPACE", 0x054e1): (168, "PromiseCapabilityMap"),
+ ("RO_SPACE", 0x05531): (169, "PromiseReactionMap"),
+ ("RO_SPACE", 0x05581): (170, "PrototypeInfoMap"),
+ ("RO_SPACE", 0x055d1): (171, "ScriptMap"),
+ ("RO_SPACE", 0x05621): (172, "StackFrameInfoMap"),
+ ("RO_SPACE", 0x05671): (174, "Tuple3Map"),
+ ("RO_SPACE", 0x056c1): (176, "WasmDebugInfoMap"),
+ ("RO_SPACE", 0x05711): (177, "WasmExceptionTagMap"),
+ ("RO_SPACE", 0x05761): (178, "WasmExportedFunctionDataMap"),
+ ("RO_SPACE", 0x057b1): (179, "CallableTaskMap"),
+ ("RO_SPACE", 0x05801): (180, "CallbackTaskMap"),
+ ("RO_SPACE", 0x05851): (181, "PromiseFulfillReactionJobTaskMap"),
+ ("RO_SPACE", 0x058a1): (182, "PromiseRejectReactionJobTaskMap"),
+ ("RO_SPACE", 0x058f1): (183, "PromiseResolveThenableJobTaskMap"),
+ ("RO_SPACE", 0x05941): (184, "WeakFactoryCleanupJobTaskMap"),
+ ("RO_SPACE", 0x05991): (185, "AllocationSiteWithWeakNextMap"),
+ ("RO_SPACE", 0x059e1): (185, "AllocationSiteWithoutWeakNextMap"),
+ ("RO_SPACE", 0x05a31): (219, "LoadHandler1Map"),
+ ("RO_SPACE", 0x05a81): (219, "LoadHandler2Map"),
+ ("RO_SPACE", 0x05ad1): (219, "LoadHandler3Map"),
+ ("RO_SPACE", 0x05b21): (227, "StoreHandler0Map"),
+ ("RO_SPACE", 0x05b71): (227, "StoreHandler1Map"),
+ ("RO_SPACE", 0x05bc1): (227, "StoreHandler2Map"),
+ ("RO_SPACE", 0x05c11): (227, "StoreHandler3Map"),
+ ("MAP_SPACE", 0x00139): (1057, "ExternalMap"),
+ ("MAP_SPACE", 0x00189): (1073, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("RO_SPACE", 0x022a1): "NullValue",
- ("RO_SPACE", 0x02321): "EmptyDescriptorArray",
- ("RO_SPACE", 0x02491): "UninitializedValue",
- ("RO_SPACE", 0x025a1): "UndefinedValue",
- ("RO_SPACE", 0x02621): "NanValue",
- ("RO_SPACE", 0x02681): "TheHoleValue",
- ("RO_SPACE", 0x02719): "HoleNanValue",
- ("RO_SPACE", 0x02729): "TrueValue",
- ("RO_SPACE", 0x027d9): "FalseValue",
- ("RO_SPACE", 0x02821): "empty_string",
- ("RO_SPACE", 0x02ce1): "EmptyScopeInfo",
- ("RO_SPACE", 0x02cf1): "EmptyFixedArray",
- ("RO_SPACE", 0x02d01): "ArgumentsMarker",
- ("RO_SPACE", 0x02da1): "Exception",
- ("RO_SPACE", 0x02e41): "TerminationException",
- ("RO_SPACE", 0x02ee9): "OptimizedOut",
- ("RO_SPACE", 0x02f89): "StaleRegister",
- ("RO_SPACE", 0x047c1): "EmptyByteArray",
- ("RO_SPACE", 0x04851): "EmptyFixedUint8Array",
- ("RO_SPACE", 0x04871): "EmptyFixedInt8Array",
- ("RO_SPACE", 0x04891): "EmptyFixedUint16Array",
- ("RO_SPACE", 0x048b1): "EmptyFixedInt16Array",
- ("RO_SPACE", 0x048d1): "EmptyFixedUint32Array",
- ("RO_SPACE", 0x048f1): "EmptyFixedInt32Array",
- ("RO_SPACE", 0x04911): "EmptyFixedFloat32Array",
- ("RO_SPACE", 0x04931): "EmptyFixedFloat64Array",
- ("RO_SPACE", 0x04951): "EmptyFixedUint8ClampedArray",
- ("RO_SPACE", 0x049b1): "EmptySloppyArgumentsElements",
- ("RO_SPACE", 0x049d1): "EmptySlowElementDictionary",
- ("RO_SPACE", 0x04a19): "EmptyOrderedHashMap",
- ("RO_SPACE", 0x04a41): "EmptyOrderedHashSet",
- ("RO_SPACE", 0x04a79): "EmptyPropertyCell",
- ("RO_SPACE", 0x04b59): "InfinityValue",
- ("RO_SPACE", 0x04b69): "MinusZeroValue",
- ("RO_SPACE", 0x04b79): "MinusInfinityValue",
- ("RO_SPACE", 0x04b89): "SelfReferenceMarker",
- ("OLD_SPACE", 0x02211): "EmptyScript",
- ("OLD_SPACE", 0x02291): "ManyClosuresCell",
- ("OLD_SPACE", 0x022b1): "NoElementsProtector",
- ("OLD_SPACE", 0x022d9): "IsConcatSpreadableProtector",
- ("OLD_SPACE", 0x022e9): "ArraySpeciesProtector",
- ("OLD_SPACE", 0x02311): "TypedArraySpeciesProtector",
- ("OLD_SPACE", 0x02339): "PromiseSpeciesProtector",
- ("OLD_SPACE", 0x02361): "StringLengthProtector",
- ("OLD_SPACE", 0x02371): "ArrayIteratorProtector",
- ("OLD_SPACE", 0x02399): "ArrayBufferNeuteringProtector",
- ("OLD_SPACE", 0x02421): "StringIteratorProtector",
+ ("RO_SPACE", 0x001d9): "NullValue",
+ ("RO_SPACE", 0x00259): "EmptyDescriptorArray",
+ ("RO_SPACE", 0x002c1): "EmptyWeakFixedArray",
+ ("RO_SPACE", 0x003c1): "UninitializedValue",
+ ("RO_SPACE", 0x004d1): "UndefinedValue",
+ ("RO_SPACE", 0x00551): "NanValue",
+ ("RO_SPACE", 0x005b1): "TheHoleValue",
+ ("RO_SPACE", 0x00649): "HoleNanValue",
+ ("RO_SPACE", 0x00659): "TrueValue",
+ ("RO_SPACE", 0x00709): "FalseValue",
+ ("RO_SPACE", 0x00751): "empty_string",
+ ("RO_SPACE", 0x00c11): "EmptyScopeInfo",
+ ("RO_SPACE", 0x00c21): "EmptyFixedArray",
+ ("RO_SPACE", 0x00c31): "ArgumentsMarker",
+ ("RO_SPACE", 0x00cd1): "Exception",
+ ("RO_SPACE", 0x00d71): "TerminationException",
+ ("RO_SPACE", 0x00e19): "OptimizedOut",
+ ("RO_SPACE", 0x00eb9): "StaleRegister",
+ ("RO_SPACE", 0x027b9): "EmptyEnumCache",
+ ("RO_SPACE", 0x02821): "EmptyPropertyArray",
+ ("RO_SPACE", 0x02831): "EmptyByteArray",
+ ("RO_SPACE", 0x02841): "EmptyObjectBoilerplateDescription",
+ ("RO_SPACE", 0x02859): "EmptyArrayBoilerplateDescription",
+ ("RO_SPACE", 0x028c1): "EmptyFixedUint8Array",
+ ("RO_SPACE", 0x028e1): "EmptyFixedInt8Array",
+ ("RO_SPACE", 0x02901): "EmptyFixedUint16Array",
+ ("RO_SPACE", 0x02921): "EmptyFixedInt16Array",
+ ("RO_SPACE", 0x02941): "EmptyFixedUint32Array",
+ ("RO_SPACE", 0x02961): "EmptyFixedInt32Array",
+ ("RO_SPACE", 0x02981): "EmptyFixedFloat32Array",
+ ("RO_SPACE", 0x029a1): "EmptyFixedFloat64Array",
+ ("RO_SPACE", 0x029c1): "EmptyFixedUint8ClampedArray",
+ ("RO_SPACE", 0x029e1): "EmptyFixedBigUint64Array",
+ ("RO_SPACE", 0x02a01): "EmptyFixedBigInt64Array",
+ ("RO_SPACE", 0x02a21): "EmptySloppyArgumentsElements",
+ ("RO_SPACE", 0x02a41): "EmptySlowElementDictionary",
+ ("RO_SPACE", 0x02a89): "EmptyOrderedHashMap",
+ ("RO_SPACE", 0x02ab1): "EmptyOrderedHashSet",
+ ("RO_SPACE", 0x02ad9): "EmptyFeedbackMetadata",
+ ("RO_SPACE", 0x02ae9): "EmptyPropertyCell",
+ ("RO_SPACE", 0x02b11): "EmptyPropertyDictionary",
+ ("RO_SPACE", 0x02b61): "NoOpInterceptorInfo",
+ ("RO_SPACE", 0x02c01): "EmptyWeakArrayList",
+ ("RO_SPACE", 0x02c19): "InfinityValue",
+ ("RO_SPACE", 0x02c29): "MinusZeroValue",
+ ("RO_SPACE", 0x02c39): "MinusInfinityValue",
+ ("RO_SPACE", 0x02c49): "SelfReferenceMarker",
+ ("RO_SPACE", 0x02ca1): "OffHeapTrampolineRelocationInfo",
+ ("RO_SPACE", 0x02cb9): "HashSeed",
+ ("OLD_SPACE", 0x00139): "ArgumentsIteratorAccessor",
+ ("OLD_SPACE", 0x001a9): "ArrayLengthAccessor",
+ ("OLD_SPACE", 0x00219): "BoundFunctionLengthAccessor",
+ ("OLD_SPACE", 0x00289): "BoundFunctionNameAccessor",
+ ("OLD_SPACE", 0x002f9): "ErrorStackAccessor",
+ ("OLD_SPACE", 0x00369): "FunctionArgumentsAccessor",
+ ("OLD_SPACE", 0x003d9): "FunctionCallerAccessor",
+ ("OLD_SPACE", 0x00449): "FunctionNameAccessor",
+ ("OLD_SPACE", 0x004b9): "FunctionLengthAccessor",
+ ("OLD_SPACE", 0x00529): "FunctionPrototypeAccessor",
+ ("OLD_SPACE", 0x00599): "StringLengthAccessor",
+ ("OLD_SPACE", 0x00609): "InvalidPrototypeValidityCell",
+ ("OLD_SPACE", 0x00619): "EmptyScript",
+ ("OLD_SPACE", 0x00699): "ManyClosuresCell",
+ ("OLD_SPACE", 0x006a9): "NoFeedbackCell",
+ ("OLD_SPACE", 0x006b9): "ArrayConstructorProtector",
+ ("OLD_SPACE", 0x006c9): "NoElementsProtector",
+ ("OLD_SPACE", 0x006f1): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x00701): "ArraySpeciesProtector",
+ ("OLD_SPACE", 0x00729): "TypedArraySpeciesProtector",
+ ("OLD_SPACE", 0x00751): "RegExpSpeciesProtector",
+ ("OLD_SPACE", 0x00779): "PromiseSpeciesProtector",
+ ("OLD_SPACE", 0x007a1): "StringLengthProtector",
+ ("OLD_SPACE", 0x007b1): "ArrayIteratorProtector",
+ ("OLD_SPACE", 0x007d9): "ArrayBufferDetachingProtector",
+ ("OLD_SPACE", 0x00801): "PromiseHookProtector",
+ ("OLD_SPACE", 0x00829): "PromiseResolveProtector",
+ ("OLD_SPACE", 0x00839): "MapIteratorProtector",
+ ("OLD_SPACE", 0x00861): "PromiseThenProtector",
+ ("OLD_SPACE", 0x00889): "SetIteratorProtector",
+ ("OLD_SPACE", 0x008b1): "StringIteratorProtector",
+ ("OLD_SPACE", 0x008d9): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x010e9): "StringSplitCache",
+ ("OLD_SPACE", 0x018f9): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x02109): "BuiltinsConstantsTable",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 92aaa8fd3c..9189c814b9 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -27,16 +27,16 @@ mkdir ${SPEC_TEST_DIR}/tmp
./tools/dev/gm.py x64.release d8
-cd ${V8_DIR}/test/wasm-js/interpreter
+cd ${V8_DIR}/test/wasm-js/data/interpreter
# The next step requires that ocaml is installed. See the README.md in
-# ${V8_DIR}/test/wasm-js/interpreter/.
+# ${V8_DIR}/test/wasm-js/data/interpreter/.
make clean all
-cd ${V8_DIR}/test/wasm-js/test/core
+cd ${V8_DIR}/test/wasm-js/data/test/core
-./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
+./run.py --wasm ${V8_DIR}/test/wasm-js/data/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
cp ${SPEC_TEST_DIR}/tmp/*.js ${SPEC_TEST_DIR}/tests/
rm -rf ${SPEC_TEST_DIR}/tmp
diff --git a/deps/v8/tools/wasm/wasm-import-profiler-end.js b/deps/v8/tools/wasm/wasm-import-profiler-end.js
new file mode 100644
index 0000000000..5b5eedda03
--- /dev/null
+++ b/deps/v8/tools/wasm/wasm-import-profiler-end.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Code to run at shutdown: print out the profiles for all instances.
+if (typeof WebAssembly.dumpAllProfiles == "function") WebAssembly.dumpAllProfiles();
diff --git a/deps/v8/tools/wasm/wasm-import-profiler.js b/deps/v8/tools/wasm/wasm-import-profiler.js
new file mode 100644
index 0000000000..cfbb3fb13b
--- /dev/null
+++ b/deps/v8/tools/wasm/wasm-import-profiler.js
@@ -0,0 +1,131 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(() => {
+ let all_profiles = [];
+ let instanceMap = new WeakMap();
+ let instanceCounter = 0;
+
+ function instrument(imports, profile) {
+ let orig_imports = imports;
+ return new Proxy(imports, {
+ get: (obj, module_name) => {
+ let orig_module = orig_imports[module_name];
+ return new Proxy(orig_module, {
+ get: (obj, item_name) => {
+ let orig_func = orig_module[item_name];
+ let item = orig_func;
+ if (typeof orig_func == "function") {
+ var full_name = module_name + "." + item_name;
+ print("instrumented " + full_name);
+ profile[full_name] = {name: full_name, count: 0, total: 0};
+ item = function profiled_func(...args) {
+ var before = performance.now();
+ var result = orig_func(...args);
+ var delta = performance.now() - before;
+ var data = profile[full_name];
+ data.count++;
+ data.total += delta;
+ return result;
+ }
+ }
+ return item;
+ }
+ })
+ }
+ });
+ }
+
+ function dumpProfile(profile) {
+ let array = [];
+ for (let key in profile) {
+ if (key == "instanceNum") continue;
+ let data = profile[key];
+ if (data.count == 0) continue;
+ array.push(data);
+ }
+ print(`--- Import profile for instance ${profile.instanceNum} ---`);
+ if (array.length == 0) return;
+ array.sort((a, b) => b.total - a.total);
+ for (let data of array) {
+ print(`${padl(data.name, 30)}: ${padr(data.count, 10)} ${padp(data.total, 10)}ms`);
+ }
+ }
+
+ function padl(s, len) {
+ s = s.toString();
+ while (s.length < len) s = s + " ";
+ return s;
+ }
+ function padr(s, len) {
+ s = s.toString();
+ while (s.length < len) s = " " + s;
+ return s;
+ }
+ function padp(s, len) {
+ s = s.toString();
+ var i = s.indexOf(".");
+ if (i == -1) i = s.length;
+ while (i++ < len) s = " " + s;
+ return s;
+ }
+
+ // patch: WebAssembly.instantiate (async)
+ let orig_instantiate = WebAssembly.instantiate;
+ WebAssembly.instantiate = (m, imports, ...args) => {
+ let profile = {};
+ let promise = orig_instantiate(m, instrument(imports, profile), ...args);
+ promise.then((instance) => {
+ instanceMap.set(instance, profile);
+ all_profiles.push(profile);
+ profile.instanceNum = instanceCounter++;
+ });
+ return promise;
+ }
+
+ // patch: new WebAssembly.Instance (sync)
+ let orig_new_instance = WebAssembly.Instance;
+ WebAssembly.Instance = new Proxy(orig_new_instance, {
+ construct: (target, args) => {
+ let profile = {};
+ args[1] = instrument(args[1], profile);
+ let instance = new orig_new_instance(...args);
+ instanceMap.set(instance, profile);
+ all_profiles.push(profile);
+ profile.instanceNum = instanceCounter++;
+ return instance;
+ }
+ });
+
+ // expose: WebAssembly.dumpProfile(instance)
+ WebAssembly.dumpProfile = (instance) => {
+ let profile = instanceMap.get(instance);
+ if (profile === undefined) return;
+ dumpProfile(profile);
+ }
+ // expose: WebAssembly.clearProfile(instance)
+ WebAssembly.clearProfile = (instance) => {
+ let profile = instanceMap.get(instance);
+ if (profile === undefined) return;
+ for (let key in profile) {
+ if (key == "instanceNum") continue;
+ let data = p[key];
+ data.count = 0;
+ data.total = 0;
+ }
+ }
+ // expose: WebAssembly.dumpAllProfiles()
+ WebAssembly.dumpAllProfiles = () => {
+ for (let profile of all_profiles) dumpProfile(profile);
+ }
+ // expose: WebAssembly.getProfile(instance)
+ // returns: {
+ // func_name1: {name: func_name1, count: <num>, total: <num>}
+ // func_name2: {name: func_name1, count: <num>, total: <num>}
+ // ...
+ // }
+ WebAssembly.getProfile = (instance) => {
+ return instanceMap.get(instance);
+ }
+})();
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 11f5364162..1b70f46680 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,5 +6,5 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles......
+The autoroller bought a round of Himbeerbrause. Suddenly.......
+The bartender starts to shake the bottles!